From 0a7f2b1414522847af63e20bf1632b5efc8d6892 Mon Sep 17 00:00:00 2001 From: David Salami Date: Fri, 1 Apr 2022 17:00:30 +0100 Subject: [PATCH 01/96] implementing beefy client consensus state --- .cargo/config.toml | 3 + Cargo.lock | 1803 +++++++++++++++-- Cargo.toml | 222 ++ modules/Cargo.toml | 25 +- modules/src/clients/ics11_beefy/client_def.rs | 1 + .../src/clients/ics11_beefy/client_state.rs | 226 +++ .../clients/ics11_beefy/consensus_state.rs | 243 +++ modules/src/clients/ics11_beefy/error.rs | 231 +++ modules/src/clients/ics11_beefy/header.rs | 403 ++++ .../src/clients/ics11_beefy/misbehaviour.rs | 1 + modules/src/clients/ics11_beefy/mod.rs | 9 + modules/src/clients/mod.rs | 1 + .../src/core/ics02_client/client_consensus.rs | 24 +- modules/src/core/ics02_client/client_state.rs | 40 +- modules/src/core/ics02_client/client_type.rs | 8 +- modules/src/core/ics02_client/error.rs | 4 + modules/src/core/ics02_client/header.rs | 23 +- proto/src/lib.rs | 5 + proto/src/prost/ibc.lightclients.beefy.v1.rs | 198 ++ 19 files changed, 3273 insertions(+), 197 deletions(-) create mode 100644 .cargo/config.toml create mode 100644 modules/src/clients/ics11_beefy/client_def.rs create mode 100644 modules/src/clients/ics11_beefy/client_state.rs create mode 100644 modules/src/clients/ics11_beefy/consensus_state.rs create mode 100644 modules/src/clients/ics11_beefy/error.rs create mode 100644 modules/src/clients/ics11_beefy/header.rs create mode 100644 modules/src/clients/ics11_beefy/misbehaviour.rs create mode 100644 modules/src/clients/ics11_beefy/mod.rs create mode 100644 proto/src/prost/ibc.lightclients.beefy.v1.rs diff --git a/.cargo/config.toml b/.cargo/config.toml new file mode 100644 index 0000000000..78014a91b1 --- /dev/null +++ b/.cargo/config.toml @@ -0,0 +1,3 @@ +paths = [ + "/Users/davidsalami/Documents/open-source/beefy-generic-client" +] \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock index fc8906a7c2..bcd590f5c2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,6 +2,16 @@ # It is not intended for manual editing. version = 3 +[[package]] +name = "Inflector" +version = "0.11.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe438c63458706e03479442743baae6c88256498e6431708f6dfc520a26515d3" +dependencies = [ + "lazy_static", + "regex", +] + [[package]] name = "abscissa_core" version = "0.6.0" @@ -24,7 +34,7 @@ dependencies = [ "toml", "tracing", "tracing-log", - "tracing-subscriber", + "tracing-subscriber 0.3.10", "wait-timeout", ] @@ -62,6 +72,17 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aae1277d39aeec15cb388266ecc24b11c80469deae6067e17a1a7aa9e5c1f234" +[[package]] +name = "ahash" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" +dependencies = [ + "getrandom 0.2.5", + "once_cell", + "version_check", +] + [[package]] name = "aho-corasick" version = "0.7.18" @@ -101,12 +122,48 @@ version = "1.0.57" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08f9b8508dccb7687a1d6c4ce66b2b0ecef467c94667de27d8d7fe1f8d2a9cdc" +[[package]] +name = "approx" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cab112f0a86d568ea0e627cc1d6be74a1e9cd55214684db5561995f6dad897c6" +dependencies = [ + "num-traits", +] + [[package]] name = "arc-swap" version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c5d78ce20460b82d3fa150275ed9d55e21064fc7951177baacf86a145c4a4b1f" +[[package]] +name = "arrayref" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" + +[[package]] +name = "arrayvec" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd9fd44efafa8690358b7408d253adf110036b88f55672a933f01d616ad9b1b9" +dependencies = [ + "nodrop", +] + +[[package]] +name = "arrayvec" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" + +[[package]] +name = "arrayvec" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6" + [[package]] name = "ascii" version = "1.0.0" @@ -242,6 +299,12 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "349a06037c7bf932dd7e7d1f653678b2038b9ad46a74102f1fc7bd7872678cce" +[[package]] +name = "base58" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6107fe1be6682a68940da878d9e9f5e90ca5745b3dec9fd1bb393c8777d4f581" + [[package]] name = "base64" version = "0.13.0" @@ -293,14 +356,48 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" +[[package]] +name = "bitvec" +version = "0.20.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7774144344a4faa177370406a7ff5f1da24303817368584c6206c8303eb07848" +dependencies = [ + "funty", + "radium", + "tap", + "wyz", +] + +[[package]] +name = "blake2-rfc" +version = "0.2.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d6d530bdd2d52966a6d03b7a964add7ae1a288d25214066fd4b600f0f796400" +dependencies = [ + "arrayvec 0.4.12", + "constant_time_eq", +] + +[[package]] +name = "block-buffer" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" +dependencies = [ + "block-padding 0.1.5", + "byte-tools", + "byteorder", + "generic-array 0.12.4", +] + [[package]] name = "block-buffer" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" dependencies = [ - "block-padding", - "generic-array", + "block-padding 0.2.1", + "generic-array 0.14.5", ] [[package]] @@ -309,7 +406,16 @@ version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0bf7fe51849ea569fd452f37822f606a5cabb684dc918707a0193fd4664ff324" dependencies = [ - "generic-array", + "generic-array 0.14.5", +] + +[[package]] +name = "block-padding" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa79dedbb091f449f1f39e53edf88d5dbe95f895dae6135a8d7b881fb5af73f5" +dependencies = [ + "byte-tools", ] [[package]] @@ -355,6 +461,18 @@ version = "3.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4a45a46ab1f2412e53d3a0ade76ffad2025804294569aae387231a0cd6e0899" +[[package]] +name = "byte-slice-cast" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87c5fdd0166095e1d463fc6cc01aa8ce547ad77a4e84d42eb6762b084e28067e" + +[[package]] +name = "byte-tools" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" + [[package]] name = "bytecount" version = "0.6.2" @@ -467,6 +585,15 @@ version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fff857943da45f546682664a79488be82e69e43c1a7a2307679ab9afb3a66d2e" +[[package]] +name = "ckb-merkle-mountain-range" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f061f97d64fd1822664bdfb722f7ae5469a97b77567390f7442be5b5dc82a5b" +dependencies = [ + "cfg-if 0.1.10", +] + [[package]] name = "clap" version = "3.1.18" @@ -563,6 +690,12 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e4c78c047431fee22c1a7bb92e00ad095a02a983affe4d8a72e2a2c62c1b94f3" +[[package]] +name = "constant_time_eq" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" + [[package]] name = "contracts" version = "0.6.3" @@ -574,6 +707,12 @@ dependencies = [ "syn", ] +[[package]] +name = "convert_case" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" + [[package]] name = "core-foundation" version = "0.9.3" @@ -701,7 +840,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "03c6a1d5fa1de37e071642dfa44ec552ca5b299adb128fab16138e24b548fd21" dependencies = [ - "generic-array", + "generic-array 0.14.5", "rand_core 0.6.3", "subtle", "zeroize", @@ -713,7 +852,7 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57952ca27b5e3606ff4dd79b0020231aaf9d6aa76dc05fd30137538c50bd3ce8" dependencies = [ - "generic-array", + "generic-array 0.14.5", "typenum", ] @@ -723,7 +862,7 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b584a330336237c1eecd3e94266efb216c56ed91225d634cb2991c5f3fd1aeab" dependencies = [ - "generic-array", + "generic-array 0.14.5", "subtle", ] @@ -733,7 +872,7 @@ version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b1d1a86f49236c215f271d40892d5fc950490551400b02ef360692c29815c714" dependencies = [ - "generic-array", + "generic-array 0.14.5", "subtle", ] @@ -746,6 +885,19 @@ dependencies = [ "sct 0.6.1", ] +[[package]] +name = "curve25519-dalek" +version = "2.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a9b85542f99a2dfa2a1b8e192662741c9859a846b296bef1c92ef9b58b5a216" +dependencies = [ + "byteorder", + "digest 0.8.1", + "rand_core 0.5.1", + "subtle", + "zeroize", +] + [[package]] name = "curve25519-dalek" version = "3.2.0" @@ -794,8 +946,10 @@ version = "0.99.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" dependencies = [ + "convert_case", "proc-macro2", "quote", + "rustc_version", "syn", ] @@ -810,13 +964,22 @@ dependencies = [ "zeroize", ] +[[package]] +name = "digest" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3d0c8c8752312f9713efd397ff63acb9f85585afbf179282e720e7704954dd5" +dependencies = [ + "generic-array 0.12.4", +] + [[package]] name = "digest" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" dependencies = [ - "generic-array", + "generic-array 0.14.5", ] [[package]] @@ -870,6 +1033,33 @@ dependencies = [ "winapi", ] +[[package]] +name = "downcast-rs" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ea835d29036a4087793836fa931b08837ad5e957da9e23886b29586fb9b6650" + +[[package]] +name = "dyn-clonable" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4e9232f0e607a262ceb9bd5141a3dfb3e4db6994b31989bbfd845878cba59fd4" +dependencies = [ + "dyn-clonable-impl", + "dyn-clone", +] + +[[package]] +name = "dyn-clonable-impl" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "558e40ea573c374cf53507fd240b7ee2f5477df7cfebdb97323ec61c719399c5" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "dyn-clone" version = "1.0.5" @@ -903,8 +1093,10 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c762bae6dcaf24c4c84667b8579785430908723d5c889f469d76a41d59cc7a9d" dependencies = [ - "curve25519-dalek", + "curve25519-dalek 3.2.0", "ed25519", + "rand 0.7.3", + "serde", "sha2 0.9.9", "zeroize", ] @@ -925,7 +1117,7 @@ dependencies = [ "crypto-bigint", "der", "ff", - "generic-array", + "generic-array 0.14.5", "group", "rand_core 0.6.3", "sec1", @@ -952,6 +1144,12 @@ dependencies = [ "termcolor", ] +[[package]] +name = "environmental" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68b91989ae21441195d7d9b9993a2f9295c7e1a8c96255d8b729accddc124797" + [[package]] name = "error-chain" version = "0.12.4" @@ -971,6 +1169,12 @@ dependencies = [ "once_cell", ] +[[package]] +name = "fake-simd" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed" + [[package]] name = "fastrand" version = "1.7.0" @@ -1002,6 +1206,18 @@ dependencies = [ "winapi", ] +[[package]] +name = "fixed-hash" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfcf0ed7fe52a17a03854ec54a9f76d6d84508d1c0e66bc1793301c73fc8493c" +dependencies = [ + "byteorder", + "rand 0.8.5", + "rustc-hex", + "static_assertions", +] + [[package]] name = "flate2" version = "1.0.23" @@ -1041,12 +1257,131 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "frame-benchmarking" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +dependencies = [ + "frame-support", + "frame-system", + "linregress", + "log", + "parity-scale-codec", + "paste", + "scale-info", + "sp-api", + "sp-application-crypto", + "sp-io", + "sp-runtime", + "sp-runtime-interface", + "sp-std 4.0.0", + "sp-storage", +] + +[[package]] +name = "frame-metadata" +version = "14.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37ed5e5c346de62ca5c184b4325a6600d1eaca210666e4606fe4e449574978d0" +dependencies = [ + "cfg-if 1.0.0", + "parity-scale-codec", + "scale-info", + "serde", +] + +[[package]] +name = "frame-support" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +dependencies = [ + "bitflags", + "frame-metadata", + "frame-support-procedural", + "impl-trait-for-tuples", + "log", + "once_cell", + "parity-scale-codec", + "paste", + "scale-info", + "serde", + "smallvec", + "sp-arithmetic", + "sp-core", + "sp-core-hashing-proc-macro", + "sp-inherents", + "sp-io", + "sp-runtime", + "sp-staking", + "sp-state-machine", + "sp-std 4.0.0", + "sp-tracing", + "tt-call", +] + +[[package]] +name = "frame-support-procedural" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +dependencies = [ + "Inflector", + "frame-support-procedural-tools", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "frame-support-procedural-tools" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +dependencies = [ + "frame-support-procedural-tools-derive", + "proc-macro-crate", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "frame-support-procedural-tools-derive" +version = "3.0.0" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "frame-system" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +dependencies = [ + "frame-support", + "log", + "parity-scale-codec", + "scale-info", + "serde", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std 4.0.0", + "sp-version", +] + [[package]] name = "fs-err" version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5bd79fa345a495d3ae89fb7165fec01c0e72f41821d642dda363a1e97975652e" +[[package]] +name = "funty" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fed34cd105917e91daa4da6b3728c47b068749d6a62c59811f06ed2ac71d9da7" + [[package]] name = "futures" version = "0.3.21" @@ -1087,6 +1422,7 @@ dependencies = [ "futures-core", "futures-task", "futures-util", + "num_cpus", ] [[package]] @@ -1118,6 +1454,12 @@ version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57c66a976bf5909d801bbef33416c41372779507e7a6b3a5e25e4749c58f776a" +[[package]] +name = "futures-timer" +version = "3.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" + [[package]] name = "futures-util" version = "0.3.21" @@ -1136,6 +1478,15 @@ dependencies = [ "slab", ] +[[package]] +name = "generic-array" +version = "0.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffdf9f34f1447443d37393cc6c2b8313aebddcd96906caf34e54c68d8e57d7bd" +dependencies = [ + "typenum", +] + [[package]] name = "generic-array" version = "0.14.5" @@ -1249,11 +1600,38 @@ version = "1.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7" +[[package]] +name = "hash-db" +version = "0.15.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d23bd4e7b5eda0d0f3a307e8b381fdc8ba9000f26fbe912250c0a4cc3956364a" + +[[package]] +name = "hash256-std-hasher" +version = "0.15.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92c171d55b98633f4ed3860808f004099b36c1cc29c42cfc53aa8591b21efcf2" +dependencies = [ + "crunchy", +] + [[package]] name = "hashbrown" version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" +dependencies = [ + "ahash", +] + +[[package]] +name = "hashbrown" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c21d40587b92fa6a6c6e3c1bdbf87d75511db5672f9c93175574b3a00df1758" +dependencies = [ + "ahash", +] [[package]] name = "hdpath" @@ -1330,6 +1708,17 @@ dependencies = [ "digest 0.9.0", ] +[[package]] +name = "hmac-drbg" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17ea0a1394df5b6574da6e0c1ade9e78868c9fb0a4e5ef4428e32da4676b85b1" +dependencies = [ + "digest 0.9.0", + "generic-array 0.14.5", + "hmac 0.8.1", +] + [[package]] name = "http" version = "0.2.7" @@ -1338,7 +1727,7 @@ checksum = "ff8670570af52249509a86f5e3e18a08c60b177071826898fde8997cf5f6bfbb" dependencies = [ "bytes", "fnv", - "itoa", + "itoa 1.0.1", ] [[package]] @@ -1401,7 +1790,7 @@ dependencies = [ "http-body", "httparse", "httpdate", - "itoa", + "itoa 1.0.1", "pin-project-lite", "socket2", "tokio", @@ -1463,6 +1852,8 @@ dependencies = [ name = "ibc" version = "0.15.0" dependencies = [ + "beefy-generic-client", + "beefy-primitives", "bytes", "derive_more", "env_logger", @@ -1471,6 +1862,8 @@ dependencies = [ "ics23", "modelator 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "num-traits", + "pallet-mmr-primitives", + "parity-scale-codec", "prost", "prost-types", "safe-regex", @@ -1478,6 +1871,9 @@ dependencies = [ "serde_derive", "serde_json", "sha2 0.10.2", + "sp-core", + "sp-runtime", + "sp-trie", "subtle-encoding", "tendermint", "tendermint-light-client-verifier", @@ -1549,8 +1945,8 @@ dependencies = [ "k256", "moka", "nanoid", - "num-bigint", - "num-rational", + "num-bigint 0.4.3", + "num-rational 0.4.0", "prost", "prost-types", "regex", @@ -1622,7 +2018,7 @@ dependencies = [ "tokio", "toml", "tracing", - "tracing-subscriber", + "tracing-subscriber 0.3.10", ] [[package]] @@ -1684,7 +2080,7 @@ dependencies = [ "tokio", "toml", "tracing", - "tracing-subscriber", + "tracing-subscriber 0.3.10", ] [[package]] @@ -1700,7 +2096,7 @@ dependencies = [ "ripemd160", "sha2 0.9.9", "sha3", - "sp-std", + "sp-std 3.0.0", ] [[package]] @@ -1721,10 +2117,13 @@ dependencies = [ ] [[package]] -name = "indenter" -version = "0.3.3" +name = "impl-codec" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce23b50ad8242c51a442f3ff322d56b02f08852c77e4c0b4d3fd684abc89c683" +checksum = "161ebdfec3c8e3b52bf61c4f3550a1eea4f9579d10dc1b936f3171ebdcd6c443" +dependencies = [ + "parity-scale-codec", +] [[package]] name = "indexmap" @@ -1733,7 +2132,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0f647032dfaa1f8b6dc29bd3edb7bbef4861b8b8007ebb118d6db284fd59f6ee" dependencies = [ "autocfg", - "hashbrown", + "hashbrown 0.11.2", ] [[package]] @@ -1754,6 +2153,15 @@ dependencies = [ "cfg-if 1.0.0", ] +[[package]] +name = "integer-sqrt" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "276ec31bcb4a9ee45f58bec6f9ec700ae4cf4f4f8f2fa7e06cb406bd5ffdd770" +dependencies = [ + "num-traits", +] + [[package]] name = "itertools" version = "0.10.3" @@ -1810,10 +2218,74 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "349d5a591cd28b49e1d1037471617a32ddcda5731b99419008085f72d5a53836" [[package]] -name = "linked-hash-map" -version = "0.5.4" +name = "libm" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" +checksum = "33a33a362ce288760ec6a508b94caaec573ae7d3bbbd91b87aa0bad4456839db" + +[[package]] +name = "libsecp256k1" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0452aac8bab02242429380e9b2f94ea20cea2b37e2c1777a1358799bbe97f37" +dependencies = [ + "arrayref", + "base64", + "digest 0.9.0", + "hmac-drbg", + "libsecp256k1-core", + "libsecp256k1-gen-ecmult", + "libsecp256k1-gen-genmult", + "rand 0.8.5", + "serde", + "sha2 0.9.9", + "typenum", +] + +[[package]] +name = "libsecp256k1-core" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5be9b9bb642d8522a44d533eab56c16c738301965504753b03ad1de3425d5451" +dependencies = [ + "crunchy", + "digest 0.9.0", + "subtle", +] + +[[package]] +name = "libsecp256k1-gen-ecmult" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3038c808c55c87e8a172643a7d87187fc6c4174468159cb3090659d55bcb4809" +dependencies = [ + "libsecp256k1-core", +] + +[[package]] +name = "libsecp256k1-gen-genmult" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3db8d6ba2cec9eacc40e6e8ccc98931840301f1006e95647ceb2dd5c3aa06f7c" +dependencies = [ + "libsecp256k1-core", +] + +[[package]] +name = "linked-hash-map" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" + +[[package]] +name = "linregress" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6c601a85f5ecd1aba625247bca0031585fb1c446461b142878a16f8245ddeb8" +dependencies = [ + "nalgebra", + "statrs", +] [[package]] name = "lock_api" @@ -1843,6 +2315,15 @@ dependencies = [ "libc", ] +[[package]] +name = "matchers" +version = "0.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f099785f7595cc4b4553a174ce30dd7589ef93391ff414dbb67f62392b9e0ce1" +dependencies = [ + "regex-automata", +] + [[package]] name = "matchers" version = "0.1.0" @@ -1894,6 +2375,41 @@ dependencies = [ "autocfg", ] +[[package]] +name = "memory-db" +version = "0.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d505169b746dacf02f7d14d8c80b34edfd8212159c63d23c977739a0d960c626" +dependencies = [ + "hash-db", + "hashbrown 0.11.2", + "parity-util-mem", +] + +[[package]] +name = "memory_units" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "71d96e3f3c0b6325d8ccd83c33b28acb183edcb6c67938ba104ec546854b0882" + +[[package]] +name = "merlin" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4e261cf0f8b3c42ded9f7d2bb59dea03aa52bc8a1cbc7482f9fc3fd1229d3b42" +dependencies = [ + "byteorder", + "keccak", + "rand_core 0.5.1", + "zeroize", +] + +[[package]] +name = "micromath" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39617bc909d64b068dcffd0e3e31679195b5576d0c83fadc52690268cc2b2b55" + [[package]] name = "mime" version = "0.3.16" @@ -1982,7 +2498,7 @@ dependencies = [ "tempfile", "thiserror", "tracing", - "tracing-subscriber", + "tracing-subscriber 0.3.10", "ureq", "zip", ] @@ -2027,6 +2543,35 @@ dependencies = [ "twoway", ] +[[package]] +name = "nalgebra" +version = "0.27.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "462fffe4002f4f2e1f6a9dcf12cc1a6fc0e15989014efc02a941d3e0f5dc2120" +dependencies = [ + "approx", + "matrixmultiply", + "nalgebra-macros", + "num-complex", + "num-rational 0.4.0", + "num-traits", + "rand 0.8.5", + "rand_distr", + "simba", + "typenum", +] + +[[package]] +name = "nalgebra-macros" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01fcc0b8149b4632adc89ac3b7b31a12fb6099a0317a4eb2ebff574ef7de7218" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "nanoid" version = "0.4.0" @@ -2036,6 +2581,12 @@ dependencies = [ "rand 0.8.5", ] +[[package]] +name = "nodrop" +version = "0.1.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72ef4a56884ca558e5ddb05a1d1e7e1bfd9a68d9ed024c21704cc98872dae1bb" + [[package]] name = "nom" version = "7.1.1" @@ -2046,6 +2597,17 @@ dependencies = [ "minimal-lexical", ] +[[package]] +name = "num-bigint" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "090c7f9998ee0ff65aa5b723e4009f7b217707f1fb5ea551329cc4d6231fb304" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + [[package]] name = "num-bigint" version = "0.4.3" @@ -2058,6 +2620,15 @@ dependencies = [ "serde", ] +[[package]] +name = "num-complex" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26873667bbbb7c5182d4a37c1add32cdf09f841af72da53318fdb81543c15085" +dependencies = [ + "num-traits", +] + [[package]] name = "num-derive" version = "0.3.3" @@ -2069,6 +2640,16 @@ dependencies = [ "syn", ] +[[package]] +name = "num-format" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bafe4179722c2894288ee77a9f044f02811c86af699344c498b0840c698a2465" +dependencies = [ + "arrayvec 0.4.12", + "itoa 0.4.8", +] + [[package]] name = "num-integer" version = "0.1.45" @@ -2079,6 +2660,18 @@ dependencies = [ "num-traits", ] +[[package]] +name = "num-rational" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c000134b5dbf44adc5cb772486d335293351644b801551abe8f75c84cfa4aef" +dependencies = [ + "autocfg", + "num-bigint 0.2.6", + "num-integer", + "num-traits", +] + [[package]] name = "num-rational" version = "0.4.0" @@ -2086,7 +2679,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d41702bd167c2df5520b384281bc111a4b5efcf7fbc4c9c222c815b07e0a6a6a" dependencies = [ "autocfg", - "num-bigint", + "num-bigint 0.4.3", "num-integer", "num-traits", "serde", @@ -2099,6 +2692,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" dependencies = [ "autocfg", + "libm", ] [[package]] @@ -2144,6 +2738,12 @@ dependencies = [ "eyre", ] +[[package]] +name = "opaque-debug" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c" + [[package]] name = "opaque-debug" version = "0.3.0" @@ -2200,6 +2800,177 @@ version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "decf7381921fea4dcb2549c5667eda59b3ec297ab7e2b5fc33eac69d2e7da87b" +[[package]] +name = "pallet-beefy" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +dependencies = [ + "beefy-primitives", + "frame-support", + "frame-system", + "pallet-session", + "parity-scale-codec", + "scale-info", + "serde", + "sp-runtime", + "sp-std 4.0.0", +] + +[[package]] +name = "pallet-beefy-mmr" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +dependencies = [ + "beefy-merkle-tree", + "beefy-primitives", + "frame-support", + "frame-system", + "hex", + "libsecp256k1", + "log", + "pallet-beefy", + "pallet-mmr", + "pallet-mmr-primitives", + "pallet-session", + "parity-scale-codec", + "scale-info", + "serde", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std 4.0.0", +] + +[[package]] +name = "pallet-mmr" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +dependencies = [ + "ckb-merkle-mountain-range", + "frame-benchmarking", + "frame-support", + "frame-system", + "pallet-mmr-primitives", + "parity-scale-codec", + "scale-info", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std 4.0.0", +] + +[[package]] +name = "pallet-mmr-primitives" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +dependencies = [ + "frame-support", + "frame-system", + "log", + "parity-scale-codec", + "serde", + "sp-api", + "sp-core", + "sp-runtime", + "sp-std 4.0.0", +] + +[[package]] +name = "pallet-session" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +dependencies = [ + "frame-support", + "frame-system", + "impl-trait-for-tuples", + "log", + "pallet-timestamp", + "parity-scale-codec", + "scale-info", + "sp-core", + "sp-io", + "sp-runtime", + "sp-session", + "sp-staking", + "sp-std 4.0.0", + "sp-trie", +] + +[[package]] +name = "pallet-timestamp" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +dependencies = [ + "frame-benchmarking", + "frame-support", + "frame-system", + "log", + "parity-scale-codec", + "scale-info", + "sp-inherents", + "sp-runtime", + "sp-std 4.0.0", + "sp-timestamp", +] + +[[package]] +name = "parity-scale-codec" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "373b1a4c1338d9cd3d1fa53b3a11bdab5ab6bd80a20f7f7becd76953ae2be909" +dependencies = [ + "arrayvec 0.7.2", + "bitvec", + "byte-slice-cast", + "impl-trait-for-tuples", + "parity-scale-codec-derive", + "serde", +] + +[[package]] +name = "parity-scale-codec-derive" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1557010476e0595c9b568d16dcfb81b93cdeb157612726f5170d31aa707bed27" +dependencies = [ + "proc-macro-crate", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "parity-util-mem" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f4cb4e169446179cbc6b8b6320cc9fca49bd2e94e8db25f25f200a8ea774770" +dependencies = [ + "cfg-if 1.0.0", + "hashbrown 0.11.2", + "impl-trait-for-tuples", + "parity-util-mem-derive", + "parking_lot 0.11.2", + "primitive-types", + "winapi", +] + +[[package]] +name = "parity-util-mem-derive" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f557c32c6d268a07c921471619c0295f5efad3a0e76d4f97a05c091a51d110b2" +dependencies = [ + "proc-macro2", + "syn", + "synstructure", +] + +[[package]] +name = "parity-wasm" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be5e13c266502aadf83426d87d81a0f5d1ef45b8027f5a471c360abfe4bfae92" + [[package]] name = "parking_lot" version = "0.11.2" @@ -2263,6 +3034,15 @@ dependencies = [ "crypto-mac 0.8.0", ] +[[package]] +name = "pbkdf2" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d95f5254224e617595d2cc3cc73ff0a5eaf2637519e25f03388154e9378b6ffa" +dependencies = [ + "crypto-mac 0.11.1", +] + [[package]] name = "peg" version = "0.7.0" @@ -2351,6 +3131,29 @@ version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eb9f9e6e233e5c4a35559a617bf40a4ec447db2e84c20b55a6f83167b7e57872" +[[package]] +name = "primitive-types" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05e4722c697a58a99d5d06a08c30821d7c082a4632198de1eaa5a6c22ef42373" +dependencies = [ + "fixed-hash", + "impl-codec", + "impl-serde", + "scale-info", + "uint", +] + +[[package]] +name = "proc-macro-crate" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e17d47ce914bf4de440332250b0edd23ce48c005f59fab39d3335866b114f11a" +dependencies = [ + "thiserror", + "toml", +] + [[package]] name = "proc-macro-error" version = "1.0.4" @@ -2480,6 +3283,12 @@ dependencies = [ "proc-macro2", ] +[[package]] +name = "radium" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "643f8f41a8ebc4c5dc4515c82bb8abd397b527fc20fd681b7c011c2aee5d44fb" + [[package]] name = "rand" version = "0.7.3" @@ -2491,6 +3300,7 @@ dependencies = [ "rand_chacha 0.2.2", "rand_core 0.5.1", "rand_hc", + "rand_pcg", ] [[package]] @@ -2542,6 +3352,16 @@ dependencies = [ "getrandom 0.2.6", ] +[[package]] +name = "rand_distr" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32cb0b9bc82b0a0876c2dd994a7e7a2683d3e7390ca40e6886785ef0c7e3ee31" +dependencies = [ + "num-traits", + "rand 0.8.5", +] + [[package]] name = "rand_hc" version = "0.2.0" @@ -2551,6 +3371,15 @@ dependencies = [ "rand_core 0.5.1", ] +[[package]] +name = "rand_pcg" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16abd0c1b639e9eb4d7c50c0b8100b0d0f849be2349829c740fe8e6eb4816429" +dependencies = [ + "rand_core 0.5.1", +] + [[package]] name = "raw-cpuid" version = "10.3.0" @@ -2560,6 +3389,12 @@ dependencies = [ "bitflags", ] +[[package]] +name = "rawpointer" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60a357793950651c4ed0f3f52338f53b2f809f32d83a07f72909fa13e4c6c1e3" + [[package]] name = "rayon" version = "1.5.3" @@ -2605,25 +3440,45 @@ dependencies = [ ] [[package]] -name = "regex" -version = "1.5.6" +name = "ref-cast" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d83f127d94bdbcda4c8cc2e50f6f84f4b611f69c902699ca385a39c3a75f9ff1" +checksum = "300f2a835d808734ee295d45007adacb9ebb29dd3ae2424acfa17930cae541da" dependencies = [ - "aho-corasick", - "memchr", - "regex-syntax", + "ref-cast-impl", ] [[package]] -name = "regex-automata" -version = "0.1.10" +name = "ref-cast-impl" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" +checksum = "4c38e3aecd2b21cb3959637b883bb3714bc7e43f0268b9a29d3743ee3e55cdd2" dependencies = [ - "regex-syntax", -] - + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "regex" +version = "1.5.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d83f127d94bdbcda4c8cc2e50f6f84f4b611f69c902699ca385a39c3a75f9ff1" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", +] + +[[package]] +name = "regex-automata" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" +dependencies = [ + "regex-syntax", +] + [[package]] name = "regex-syntax" version = "0.6.26" @@ -2679,7 +3534,7 @@ checksum = "2eca4ecc81b7f313189bf73ce724400a07da2a6dac19588b03c8bd76a2dcc251" dependencies = [ "block-buffer 0.9.0", "digest 0.9.0", - "opaque-debug", + "opaque-debug 0.3.0", ] [[package]] @@ -2707,6 +3562,16 @@ dependencies = [ "url", ] +[[package]] +name = "rs_merkle" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a632a43487c1332be8e183588079f89b6820fab24e04db49521eacd536837372" +dependencies = [ + "micromath", + "sha2 0.10.2", +] + [[package]] name = "rustc-demangle" version = "0.1.21" @@ -2719,6 +3584,21 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" +[[package]] +name = "rustc-hex" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" + +[[package]] +name = "rustc_version" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" +dependencies = [ + "semver", +] + [[package]] name = "rustls" version = "0.19.1" @@ -2851,6 +3731,32 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "scale-info" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c55b744399c25532d63a0d2789b109df8d46fc93752d46b0782991a931a782f" +dependencies = [ + "bitvec", + "cfg-if 1.0.0", + "derive_more", + "parity-scale-codec", + "scale-info-derive", + "serde", +] + +[[package]] +name = "scale-info-derive" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baeb2780690380592f86205aa4ee49815feb2acad8c2f59e6dd207148c3f1fcd" +dependencies = [ + "proc-macro-crate", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "schannel" version = "0.1.20" @@ -2894,6 +3800,24 @@ dependencies = [ "syn", ] +[[package]] +name = "schnorrkel" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "021b403afe70d81eea68f6ea12f6b3c9588e5d536a94c3bf80f15e7faa267862" +dependencies = [ + "arrayref", + "arrayvec 0.5.2", + "curve25519-dalek 2.1.3", + "getrandom 0.1.16", + "merlin", + "rand 0.7.3", + "rand_core 0.5.1", + "sha2 0.8.2", + "subtle", + "zeroize", +] + [[package]] name = "scopeguard" version = "1.1.0" @@ -2927,7 +3851,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08da66b8b0965a5555b6bd6639e68ccba85e1e2506f5fbb089e93f8a04e1a2d1" dependencies = [ "der", - "generic-array", + "generic-array 0.14.5", "pkcs8", "subtle", "zeroize", @@ -3050,141 +3974,527 @@ version = "1.0.81" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b7ce2b32a1aed03c558dc61a5cd328f15aff2dbc17daad8fb8af04d2100e15c" dependencies = [ - "itoa", - "ryu", - "serde", + "itoa 1.0.1", + "ryu", + "serde", +] + +[[package]] +name = "serde_repr" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2ad84e47328a31223de7fed7a4f5087f2d6ddfe586cf3ca25b7a165bc0a5aed" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_yaml" +version = "0.8.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "707d15895415db6628332b737c838b88c598522e4dc70647e59b72312924aebc" +dependencies = [ + "indexmap", + "ryu", + "serde", + "yaml-rust", +] + +[[package]] +name = "serial_test" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d19dbfb999a147cedbfe82f042eb9555f5b0fa4ef95ee4570b74349103d9c9f4" +dependencies = [ + "lazy_static", + "log", + "parking_lot 0.12.0", + "serial_test_derive", +] + +[[package]] +name = "serial_test_derive" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb9e2050b2be1d681f8f1c1a528bcfe4e00afa2d8995f713974f5333288659f2" +dependencies = [ + "proc-macro-error", + "proc-macro2", + "quote", + "rustversion", + "syn", +] + +[[package]] +name = "sha-1" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99cd6713db3cf16b6c84e06321e049a9b9f699826e16096d23bbcc44d15d51a6" +dependencies = [ + "block-buffer 0.9.0", + "cfg-if 1.0.0", + "cpufeatures", + "digest 0.9.0", + "opaque-debug 0.3.0", +] + +[[package]] +name = "sha-1" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "028f48d513f9678cda28f6e4064755b3fbb2af6acd672f2c209b62323f7aea0f" +dependencies = [ + "cfg-if 1.0.0", + "cpufeatures", + "digest 0.10.3", +] + +[[package]] +name = "sha1" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1da05c97445caa12d05e848c4a4fcbbea29e748ac28f7e80e9b010392063770" +dependencies = [ + "sha1_smol", +] + +[[package]] +name = "sha1_smol" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae1a47186c03a32177042e55dbc5fd5aee900b8e0069a8d70fba96a9375cd012" + +[[package]] +name = "sha2" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a256f46ea78a0c0d9ff00077504903ac881a1dafdc20da66545699e7776b3e69" +dependencies = [ + "block-buffer 0.7.3", + "digest 0.8.1", + "fake-simd", + "opaque-debug 0.2.3", +] + +[[package]] +name = "sha2" +version = "0.9.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" +dependencies = [ + "block-buffer 0.9.0", + "cfg-if 1.0.0", + "cpufeatures", + "digest 0.9.0", + "opaque-debug 0.3.0", +] + +[[package]] +name = "sha2" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55deaec60f81eefe3cce0dc50bda92d6d8e88f2a27df7c5033b42afeb1ed2676" +dependencies = [ + "cfg-if 1.0.0", + "cpufeatures", + "digest 0.10.3", +] + +[[package]] +name = "sha3" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f81199417d4e5de3f04b1e871023acea7389672c4135918f05aa9cbf2f2fa809" +dependencies = [ + "block-buffer 0.9.0", + "digest 0.9.0", + "keccak", + "opaque-debug 0.3.0", +] + +[[package]] +name = "sharded-slab" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "900fba806f70c630b0a382d0d825e17a0f19fcd059a2ade1ff237bcddf446b31" +dependencies = [ + "lazy_static", +] + +[[package]] +name = "signal-hook" +version = "0.3.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "647c97df271007dcea485bb74ffdb57f2e683f1306c854f468a0c244badabf2d" +dependencies = [ + "libc", + "signal-hook-registry", +] + +[[package]] +name = "signal-hook-registry" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e51e73328dc4ac0c7ccbda3a494dfa03df1de2f46018127f60c693f2648455b0" +dependencies = [ + "libc", +] + +[[package]] +name = "signature" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02658e48d89f2bec991f9a78e69cfa4c316f8d6a6c4ec12fae1aeb263d486788" +dependencies = [ + "digest 0.9.0", + "rand_core 0.6.3", +] + +[[package]] +name = "simba" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e82063457853d00243beda9952e910b82593e4b07ae9f721b9278a99a0d3d5c" +dependencies = [ + "approx", + "num-complex", + "num-traits", + "paste", +] + +[[package]] +name = "simple-error" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc47a29ce97772ca5c927f75bac34866b16d64e07f330c3248e2d7226623901b" + +[[package]] +name = "skeptic" +version = "0.13.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16d23b015676c90a0f01c197bfdc786c20342c73a0afdda9025adb0bc42940a8" +dependencies = [ + "bytecount", + "cargo_metadata", + "error-chain", + "glob", + "pulldown-cmark", + "tempfile", + "walkdir", +] + +[[package]] +name = "slab" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9def91fd1e018fe007022791f865d0ccc9b3a0d5001e01aabb8b40e46000afb5" + +[[package]] +name = "smallvec" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2dd574626839106c320a323308629dcb1acfc96e32a8cba364ddc61ac23ee83" + +[[package]] +name = "socket2" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "66d72b759436ae32898a2af0a14218dbf55efde3feeb170eb623637db85ee1e0" +dependencies = [ + "libc", + "winapi", +] + +[[package]] +name = "sp-api" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +dependencies = [ + "hash-db", + "log", + "parity-scale-codec", + "sp-api-proc-macro", + "sp-core", + "sp-runtime", + "sp-state-machine", + "sp-std 4.0.0", + "sp-version", + "thiserror", +] + +[[package]] +name = "sp-api-proc-macro" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +dependencies = [ + "blake2-rfc", + "proc-macro-crate", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "sp-application-crypto" +version = "4.0.0" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +dependencies = [ + "parity-scale-codec", + "scale-info", + "serde", + "sp-core", + "sp-io", + "sp-std 4.0.0", +] + +[[package]] +name = "sp-arithmetic" +version = "4.0.0" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +dependencies = [ + "integer-sqrt", + "num-traits", + "parity-scale-codec", + "scale-info", + "serde", + "sp-debug-derive", + "sp-std 4.0.0", + "static_assertions", +] + +[[package]] +name = "sp-core" +version = "4.1.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +dependencies = [ + "base58", + "bitflags", + "blake2-rfc", + "byteorder", + "dyn-clonable", + "ed25519-dalek", + "futures", + "hash-db", + "hash256-std-hasher", + "hex", + "impl-serde", + "lazy_static", + "libsecp256k1", + "log", + "merlin", + "num-traits", + "parity-scale-codec", + "parity-util-mem", + "parking_lot 0.11.2", + "primitive-types", + "rand 0.7.3", + "regex", + "scale-info", + "schnorrkel", + "secrecy", + "serde", + "sha2 0.10.2", + "sp-core-hashing", + "sp-debug-derive", + "sp-externalities", + "sp-runtime-interface", + "sp-std 4.0.0", + "sp-storage", + "ss58-registry", + "substrate-bip39", + "thiserror", + "tiny-bip39", + "tiny-keccak", + "twox-hash", + "wasmi", + "zeroize", +] + +[[package]] +name = "sp-core-hashing" +version = "4.0.0" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +dependencies = [ + "blake2-rfc", + "byteorder", + "sha2 0.10.2", + "sp-std 4.0.0", + "tiny-keccak", + "twox-hash", ] [[package]] -name = "serde_repr" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2ad84e47328a31223de7fed7a4f5087f2d6ddfe586cf3ca25b7a165bc0a5aed" +name = "sp-core-hashing-proc-macro" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" dependencies = [ "proc-macro2", "quote", + "sp-core-hashing", "syn", ] [[package]] -name = "serde_yaml" -version = "0.8.24" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "707d15895415db6628332b737c838b88c598522e4dc70647e59b72312924aebc" +name = "sp-debug-derive" +version = "4.0.0" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" dependencies = [ - "indexmap", - "ryu", - "serde", - "yaml-rust", + "proc-macro2", + "quote", + "syn", ] [[package]] -name = "serial_test" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d19dbfb999a147cedbfe82f042eb9555f5b0fa4ef95ee4570b74349103d9c9f4" +name = "sp-externalities" +version = "0.10.0" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" dependencies = [ - "lazy_static", - "log", - "parking_lot 0.12.0", - "serial_test_derive", + "environmental", + "parity-scale-codec", + "sp-std 4.0.0", + "sp-storage", ] [[package]] -name = "serial_test_derive" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb9e2050b2be1d681f8f1c1a528bcfe4e00afa2d8995f713974f5333288659f2" +name = "sp-inherents" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" dependencies = [ - "proc-macro-error", - "proc-macro2", - "quote", - "rustversion", - "syn", + "async-trait", + "impl-trait-for-tuples", + "parity-scale-codec", + "sp-core", + "sp-runtime", + "sp-std 4.0.0", + "thiserror", ] [[package]] -name = "sha-1" -version = "0.9.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99cd6713db3cf16b6c84e06321e049a9b9f699826e16096d23bbcc44d15d51a6" +name = "sp-io" +version = "4.0.0" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" dependencies = [ - "block-buffer 0.9.0", - "cfg-if 1.0.0", - "cpufeatures", - "digest 0.9.0", - "opaque-debug", + "futures", + "hash-db", + "libsecp256k1", + "log", + "parity-scale-codec", + "parking_lot 0.11.2", + "sp-core", + "sp-externalities", + "sp-keystore", + "sp-runtime-interface", + "sp-state-machine", + "sp-std 4.0.0", + "sp-tracing", + "sp-trie", + "sp-wasm-interface", + "tracing", + "tracing-core", ] [[package]] -name = "sha-1" +name = "sp-keystore" version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "028f48d513f9678cda28f6e4064755b3fbb2af6acd672f2c209b62323f7aea0f" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" dependencies = [ - "cfg-if 1.0.0", - "cpufeatures", - "digest 0.10.3", + "async-trait", + "derive_more", + "futures", + "merlin", + "parity-scale-codec", + "parking_lot 0.11.2", + "schnorrkel", + "sp-core", + "sp-externalities", ] [[package]] -name = "sha1" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1da05c97445caa12d05e848c4a4fcbbea29e748ac28f7e80e9b010392063770" +name = "sp-panic-handler" +version = "4.0.0" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" dependencies = [ - "sha1_smol", + "backtrace", + "lazy_static", + "regex", ] [[package]] -name = "sha1_smol" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae1a47186c03a32177042e55dbc5fd5aee900b8e0069a8d70fba96a9375cd012" - -[[package]] -name = "sha2" -version = "0.9.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" +name = "sp-runtime" +version = "4.1.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" dependencies = [ - "block-buffer 0.9.0", - "cfg-if 1.0.0", - "cpufeatures", - "digest 0.9.0", - "opaque-debug", + "either", + "hash256-std-hasher", + "impl-trait-for-tuples", + "log", + "parity-scale-codec", + "parity-util-mem", + "paste", + "rand 0.7.3", + "scale-info", + "serde", + "sp-application-crypto", + "sp-arithmetic", + "sp-core", + "sp-io", + "sp-std 4.0.0", +] + +[[package]] +name = "sp-runtime-interface" +version = "4.1.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +dependencies = [ + "impl-trait-for-tuples", + "parity-scale-codec", + "primitive-types", + "sp-externalities", + "sp-runtime-interface-proc-macro", + "sp-std 4.0.0", + "sp-storage", + "sp-tracing", + "sp-wasm-interface", + "static_assertions", ] [[package]] -name = "sha2" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55deaec60f81eefe3cce0dc50bda92d6d8e88f2a27df7c5033b42afeb1ed2676" +name = "sp-runtime-interface-proc-macro" +version = "4.0.0" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" dependencies = [ - "cfg-if 1.0.0", - "cpufeatures", - "digest 0.10.3", + "Inflector", + "proc-macro-crate", + "proc-macro2", + "quote", + "syn", ] [[package]] -name = "sha3" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f81199417d4e5de3f04b1e871023acea7389672c4135918f05aa9cbf2f2fa809" +name = "sp-session" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" dependencies = [ - "block-buffer 0.9.0", - "digest 0.9.0", - "keccak", - "opaque-debug", + "parity-scale-codec", + "scale-info", + "sp-api", + "sp-core", + "sp-runtime", + "sp-staking", + "sp-std 4.0.0", ] [[package]] -name = "sharded-slab" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "900fba806f70c630b0a382d0d825e17a0f19fcd059a2ade1ff237bcddf446b31" +name = "sp-staking" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" dependencies = [ - "lazy_static", + "parity-scale-codec", + "scale-info", + "sp-runtime", + "sp-std 4.0.0", ] [[package]] @@ -3193,48 +4503,74 @@ version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a253b5e89e2698464fc26b545c9edceb338e18a89effeeecfea192c3025be29d" dependencies = [ - "libc", - "signal-hook-registry", + "hash-db", + "log", + "num-traits", + "parity-scale-codec", + "parking_lot 0.11.2", + "rand 0.7.3", + "smallvec", + "sp-core", + "sp-externalities", + "sp-panic-handler", + "sp-std 4.0.0", + "sp-trie", + "thiserror", + "tracing", + "trie-db", + "trie-root", ] [[package]] -name = "signal-hook-registry" -version = "1.4.0" +name = "sp-std" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51e73328dc4ac0c7ccbda3a494dfa03df1de2f46018127f60c693f2648455b0" -dependencies = [ - "libc", -] +checksum = "35391ea974fa5ee869cb094d5b437688fbf3d8127d64d1b9fed5822a1ed39b12" [[package]] -name = "signature" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02658e48d89f2bec991f9a78e69cfa4c316f8d6a6c4ec12fae1aeb263d486788" +name = "sp-std" +version = "4.0.0" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[package]] +name = "sp-storage" +version = "4.0.0" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" dependencies = [ - "digest 0.9.0", - "rand_core 0.6.3", + "impl-serde", + "parity-scale-codec", + "ref-cast", + "serde", + "sp-debug-derive", + "sp-std 4.0.0", ] [[package]] -name = "simple-error" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc47a29ce97772ca5c927f75bac34866b16d64e07f330c3248e2d7226623901b" +name = "sp-timestamp" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +dependencies = [ + "async-trait", + "futures-timer", + "log", + "parity-scale-codec", + "sp-api", + "sp-inherents", + "sp-runtime", + "sp-std 4.0.0", + "thiserror", +] [[package]] -name = "skeptic" -version = "0.13.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16d23b015676c90a0f01c197bfdc786c20342c73a0afdda9025adb0bc42940a8" +name = "sp-tracing" +version = "4.0.0" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" dependencies = [ - "bytecount", - "cargo_metadata", - "error-chain", - "glob", - "pulldown-cmark", - "tempfile", - "walkdir", + "parity-scale-codec", + "sp-std 4.0.0", + "tracing", + "tracing-core", + "tracing-subscriber 0.2.25", ] [[package]] @@ -3244,26 +4580,44 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eb703cfe953bccee95685111adeedb76fabe4e97549a58d16f03ea7b9367bb32" [[package]] -name = "smallvec" -version = "1.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2dd574626839106c320a323308629dcb1acfc96e32a8cba364ddc61ac23ee83" +name = "sp-version" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +dependencies = [ + "impl-serde", + "parity-scale-codec", + "parity-wasm", + "scale-info", + "serde", + "sp-core-hashing-proc-macro", + "sp-runtime", + "sp-std 4.0.0", + "sp-version-proc-macro", + "thiserror", +] [[package]] -name = "socket2" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66d72b759436ae32898a2af0a14218dbf55efde3feeb170eb623637db85ee1e0" +name = "sp-version-proc-macro" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" dependencies = [ - "libc", - "winapi", + "parity-scale-codec", + "proc-macro2", + "quote", + "syn", ] [[package]] -name = "sp-std" -version = "3.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35391ea974fa5ee869cb094d5b437688fbf3d8127d64d1b9fed5822a1ed39b12" +name = "sp-wasm-interface" +version = "4.1.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +dependencies = [ + "impl-trait-for-tuples", + "log", + "parity-scale-codec", + "sp-std 4.0.0", + "wasmi", +] [[package]] name = "spin" @@ -3287,12 +4641,38 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" +[[package]] +name = "statrs" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05bdbb8e4e78216a85785a85d3ec3183144f98d0097b9281802c019bb07a6f05" +dependencies = [ + "approx", + "lazy_static", + "nalgebra", + "num-traits", + "rand 0.8.5", +] + [[package]] name = "strsim" version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" +[[package]] +name = "substrate-bip39" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49eee6965196b32f882dd2ee85a92b1dbead41b04e53907f269de3b0dc04733c" +dependencies = [ + "hmac 0.11.0", + "pbkdf2 0.8.0", + "schnorrkel", + "sha2 0.9.9", + "zeroize", +] + [[package]] name = "subtle" version = "2.4.1" @@ -3343,6 +4723,12 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7b2093cf4c8eb1e67749a6762251bc9cd836b6fc171623bd0a9d324d37af2417" +[[package]] +name = "tap" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" + [[package]] name = "tempfile" version = "3.3.0" @@ -3615,7 +5001,7 @@ dependencies = [ "anyhow", "hmac 0.8.1", "once_cell", - "pbkdf2", + "pbkdf2 0.4.0", "rand 0.7.3", "rustc-hash", "sha2 0.9.9", @@ -3886,7 +5272,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d686ec1c0f384b1277f097b2f279a2ecc11afe8c133c1aabf036a27cb4cd206e" dependencies = [ "tracing", - "tracing-subscriber", + "tracing-subscriber 0.3.10", ] [[package]] @@ -3928,7 +5314,7 @@ checksum = "4bc28f93baff38037f64e6f43d34cfa1605f27a49c34e8a04c5e78b0babf2596" dependencies = [ "ansi_term", "lazy_static", - "matchers", + "matchers 0.1.0", "regex", "serde", "serde_json", @@ -3941,6 +5327,28 @@ dependencies = [ "tracing-serde", ] +[[package]] +name = "trie-db" +version = "0.23.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d32d034c0d3db64b43c31de38e945f15b40cd4ca6d2dcfc26d4798ce8de4ab83" +dependencies = [ + "hash-db", + "hashbrown 0.12.0", + "log", + "rustc-hex", + "smallvec", +] + +[[package]] +name = "trie-root" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a36c5ca3911ed3c9a5416ee6c679042064b93fc637ded67e25f92e68d783891" +dependencies = [ + "hash-db", +] + [[package]] name = "triomphe" version = "0.1.5" @@ -3956,6 +5364,12 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" +[[package]] +name = "tt-call" +version = "1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e66dcbec4290c69dd03c57e76c2469ea5c7ce109c6dd4351c13055cf71ea055" + [[package]] name = "tungstenite" version = "0.12.0" @@ -3984,6 +5398,17 @@ dependencies = [ "memchr", ] +[[package]] +name = "twox-hash" +version = "1.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ee73e6e4924fe940354b8d4d98cad5231175d615cd855b758adc658c0aac6a0" +dependencies = [ + "cfg-if 1.0.0", + "rand 0.8.5", + "static_assertions", +] + [[package]] name = "typenum" version = "1.15.0" @@ -4217,6 +5642,30 @@ version = "0.2.80" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d554b7f530dee5964d9a9468d95c1f8b8acae4f282807e7d27d4b03099a46744" +[[package]] +name = "wasmi" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca00c5147c319a8ec91ec1a0edbec31e566ce2c9cc93b3f9bb86a9efd0eb795d" +dependencies = [ + "downcast-rs", + "libc", + "memory_units", + "num-rational 0.2.4", + "num-traits", + "parity-wasm", + "wasmi-validation", +] + +[[package]] +name = "wasmi-validation" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "165343ecd6c018fc09ebcae280752702c9a2ef3e6f8d02f1cfcbdb53ef6d7937" +dependencies = [ + "parity-wasm", +] + [[package]] name = "web-sys" version = "0.3.57" @@ -4339,6 +5788,12 @@ version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680" +[[package]] +name = "wyz" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85e60b0d1b5f99db2556934e21937020776a5d31520bf169e851ac44e6420214" + [[package]] name = "yaml-rust" version = "0.4.5" diff --git a/Cargo.toml b/Cargo.toml index eaf4e09dfc..bb2edb8e41 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -25,3 +25,225 @@ exclude = [ # tendermint-light-client = { git = "https://github.com/informalsystems/tendermint-rs", branch = "v0.23.x" } # tendermint-light-client-verifier = { git = "https://github.com/informalsystems/tendermint-rs", branch = "v0.23.x" } # tendermint-testgen = { git = "https://github.com/informalsystems/tendermint-rs", branch = "v0.23.x" } + + +[patch."https://github.com/paritytech/substrate"] +node-template ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +frame-benchmarking ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +frame-support ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +frame-support-procedural ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +frame-support-procedural-tools ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +frame-support-procedural-tools-derive ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sp-arithmetic ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sp-debug-derive ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sp-std ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sp-core ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sp-core-hashing ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sp-externalities ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sp-storage ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sp-runtime-interface ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sp-runtime-interface-proc-macro ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sp-tracing ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sp-wasm-interface ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sp-io ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sp-keystore ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sp-state-machine ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sp-panic-handler ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sp-trie ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sp-runtime ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sp-application-crypto ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sp-api ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sp-api-proc-macro ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sp-version ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sp-core-hashing-proc-macro ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sp-version-proc-macro ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sp-test-primitives ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +substrate-test-runtime-client ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sc-block-builder ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sc-client-api ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +substrate-prometheus-endpoint ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sc-executor ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sc-executor-common ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sc-allocator ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sp-maybe-compressed-blob ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sp-serializer ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sc-executor-wasmi ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sc-executor-wasmtime ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sc-runtime-test ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sp-sandbox ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sp-tasks ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +substrate-wasm-builder ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sc-tracing ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sc-rpc-server ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sc-tracing-proc-macro ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sp-blockchain ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sp-consensus ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sp-inherents ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sp-database ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sp-rpc ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +substrate-test-runtime ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +frame-system ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +frame-system-rpc-runtime-api ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +pallet-babe ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +pallet-authorship ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sp-authorship ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +pallet-session ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +pallet-timestamp ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sp-timestamp ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sp-session ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sp-staking ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sp-consensus-babe ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sp-consensus-slots ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sp-consensus-vrf ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +frame-election-provider-support ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sp-npos-elections ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sp-npos-elections-solution-type ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +substrate-test-utils ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +substrate-test-utils-derive ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sc-service ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sc-chain-spec ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sc-chain-spec-derive ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sc-network ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +fork-tree ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sc-consensus ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sc-utils ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sc-peerset ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sp-finality-grandpa ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sc-telemetry ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sc-client-db ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sc-state-db ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sc-informant ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sc-transaction-pool-api ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sc-keystore ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sc-offchain ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sp-offchain ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sc-transaction-pool ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sp-transaction-pool ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +substrate-test-runtime-transaction-pool ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sc-rpc ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sc-rpc-api ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sp-block-builder ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sp-transaction-storage-proof ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +pallet-balances ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +pallet-transaction-payment ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +pallet-offences ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +pallet-staking ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +pallet-bags-list ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +pallet-staking-reward-curve ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sp-consensus-aura ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sp-keyring ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +substrate-test-client ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sp-runtime-interface-test-wasm ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +frame-benchmarking-cli ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sc-cli ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +node-template-runtime ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +frame-executive ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +frame-system-benchmarking ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +pallet-aura ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +pallet-grandpa ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +pallet-randomness-collective-flip ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +pallet-sudo ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +pallet-template ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +pallet-transaction-payment-rpc-runtime-api ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +pallet-transaction-payment-rpc ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sc-basic-authorship ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sc-proposer-metrics ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sc-consensus-aura ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sc-consensus-slots ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sc-network-test ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sc-finality-grandpa ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sc-network-gossip ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +substrate-frame-rpc-system ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +substrate-build-script-utils ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +node-bench ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +node-primitives ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +node-runtime ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +frame-try-runtime ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +pallet-asset-tx-payment ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +pallet-assets ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +pallet-authority-discovery ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sp-authority-discovery ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +pallet-bounties ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +pallet-treasury ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +pallet-child-bounties ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +pallet-collective ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +pallet-contracts ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +pallet-contracts-primitives ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +pallet-contracts-proc-macro ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +pallet-utility ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +pallet-contracts-rpc-runtime-api ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +pallet-democracy ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +pallet-scheduler ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +pallet-preimage ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +pallet-election-provider-multi-phase ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +pallet-elections-phragmen ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +pallet-gilt ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +pallet-identity ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +pallet-im-online ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +pallet-indices ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +pallet-lottery ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +frame-support-test ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +frame-support-test-pallet ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +pallet-membership ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +pallet-mmr ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +pallet-mmr-primitives ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +pallet-multisig ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +pallet-offences-benchmarking ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +pallet-proxy ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +pallet-recovery ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +pallet-session-benchmarking ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +pallet-society ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +pallet-tips ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +pallet-transaction-storage ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +pallet-uniques ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +pallet-vesting ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +node-testing ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +node-executor ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +node-cli ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +node-inspect ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +node-rpc ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +pallet-contracts-rpc ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +pallet-mmr-rpc ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sc-consensus-babe ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sc-consensus-epochs ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sc-consensus-babe-rpc ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sc-finality-grandpa-rpc ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sc-sync-state-rpc ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sc-authority-discovery ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sc-consensus-uncles ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +try-runtime-cli ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +remote-externalities ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sc-service-test ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +substrate-frame-cli ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +chain-spec-builder ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +subkey ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +beefy-gadget ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +beefy-primitives ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +beefy-gadget-rpc ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sc-consensus-manual-seal ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sc-consensus-pow ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sp-consensus-pow ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +pallet-atomic-swap ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +pallet-beefy ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +pallet-beefy-mmr ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +beefy-merkle-tree ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +pallet-example-basic ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +pallet-example-offchain-worker ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +pallet-example-parallel ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +pallet-nicks ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +pallet-node-authorization ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +pallet-scored-pool ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +pallet-staking-reward-fn ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +frame-support-test-compile-pass ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +pallet-bags-list-remote-tests ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +pallet-bags-list-fuzzer ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sp-api-test ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sp-application-crypto-test ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sp-arithmetic-fuzzer ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sp-npos-elections-fuzzer ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sp-runtime-interface-test ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +sp-runtime-interface-test-wasm-deprecated ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +substrate-test-utils-test-crate ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +substrate-frame-rpc-support ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +generate-bags ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } +node-runtime-generate-bags ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } diff --git a/modules/Cargo.toml b/modules/Cargo.toml index 9a6b3374e1..db5a08ae50 100644 --- a/modules/Cargo.toml +++ b/modules/Cargo.toml @@ -18,7 +18,19 @@ all-features = true [features] default = ["std"] -std = ["flex-error/std", "flex-error/eyre_tracer", "ibc-proto/std", "clock"] +std = [ + "flex-error/std", + "flex-error/eyre_tracer", + "ibc-proto/std", + "clock", + "beefy-client/std", + "sp-runtime/std", + "sp-core/std", + "codec/std", + "pallet-mmr-primitives/std", + "beefy-primitives/std", + "sp-trie/std" +] clock = ["tendermint/clock", "time/std"] # This feature grants access to development-time mocking libraries, such as `MockContext` or `MockHeader`. @@ -41,9 +53,16 @@ safe-regex = { version = "0.2.5", default-features = false } subtle-encoding = { version = "0.5", default-features = false } sha2 = { version = "0.10.2", default-features = false } flex-error = { version = "0.4.4", default-features = false } -num-traits = { version = "0.2.15", default-features = false } -derive_more = { version = "0.99.17", default-features = false, features = ["from", "into", "display"] } +num-traits = { version = "0.2.14", default-features = false } +derive_more = { version = "0.99.17", default-features = false, features = ["from", "display"] } uint = { version = "0.9", default-features = false } +beefy-client = { package = "beefy-generic-client", git = "https://github.com/ComposableFi/beefy-client", branch = "david/verify_parachain_headers", default-features = false } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.17", default-features = false } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.17", default-features = false } +pallet-mmr-primitives = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.17", default-features = false } +beefy-primitives = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.17", default-features = false } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } +sp-trie = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.17", default-features = false } [dependencies.tendermint] version = "=0.23.7" diff --git a/modules/src/clients/ics11_beefy/client_def.rs b/modules/src/clients/ics11_beefy/client_def.rs new file mode 100644 index 0000000000..8b13789179 --- /dev/null +++ b/modules/src/clients/ics11_beefy/client_def.rs @@ -0,0 +1 @@ + diff --git a/modules/src/clients/ics11_beefy/client_state.rs b/modules/src/clients/ics11_beefy/client_state.rs new file mode 100644 index 0000000000..fd5240717b --- /dev/null +++ b/modules/src/clients/ics11_beefy/client_state.rs @@ -0,0 +1,226 @@ +use crate::prelude::*; + +use beefy_primitives::mmr::BeefyNextAuthoritySet; +use codec::{Decode, Encode}; +use core::convert::TryFrom; +use primitive_types::H256; +use sp_runtime::SaturatedConversion; +use tendermint_proto::Protobuf; + +use ibc_proto::ibc::lightclients::beefy::v1::{BeefyAuthoritySet, ClientState as RawClientState}; + +use crate::clients::ics11_beefy::error::Error; +use crate::clients::ics11_beefy::header::BeefyHeader; +use crate::core::ics02_client::client_state::AnyClientState; +use crate::core::ics02_client::client_type::ClientType; +use crate::core::ics24_host::identifier::ChainId; +use crate::Height; + +pub const REVISION_NUMBER: u64 = 0; +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct ClientState { + /// Latest mmr root hash + pub mmr_root_hash: H256, + /// block number for the latest mmr_root_hash + pub latest_beefy_height: Height, + /// Block height when the client was frozen due to a misbehaviour + pub frozen_height: Option, + /// block number that the beefy protocol was activated on the relay chain. + /// This shoould be the first block in the merkle-mountain-range tree. + pub beefy_activation_block: u32, + /// authorities for the current round + pub authority: BeefyNextAuthoritySet, + /// authorities for the next round + pub next_authority_set: BeefyNextAuthoritySet, + /// Latest parachain height + pub latest_parachain_height: Option, +} + +impl Protobuf for ClientState {} + +impl ClientState { + #[allow(clippy::too_many_arguments)] + pub fn new( + mmr_root_hash: H256, + beefy_activation_block: u32, + latest_beefy_height: u32, + authority_set: BeefyNextAuthoritySet, + next_authority_set: BeefyNextAuthoritySet, + ) -> Result { + // Basic validation for the latest beefy height parameter. + if latest_beefy_height < 0 { + return Err(Error::validation( + "ClientState latest beefy height must be greater than or equal to zero".to_string(), + )); + } + + Ok(Self { + mmr_root_hash, + latest_beefy_height: Height::new(REVISION_NUMBER, latest_beefy_height.into()), + frozen_height: None, + beefy_activation_block, + authority: authority_set, + next_authority_set, + latest_parachain_height: None, + }) + } + + pub fn latest_beefy_height(&self) -> Height { + self.latest_beefy_height + } + + pub fn latest_para_height(&self) -> Height { + self.latest_parachain_height.unwrap_or_default() + } + + pub fn with_frozen_height(self, h: Height) -> Result { + if h == Height::zero() { + return Err(Error::validation( + "ClientState frozen height must be greater than zero".to_string(), + )); + } + Ok(Self { + frozen_height: Some(h), + ..self + }) + } + + /// Verify that the client is at a sufficient height and unfrozen at the given height + pub fn verify_height(&self, height: Height) -> Result<(), Error> { + if self.latest_beefy_height < height { + return Err(Error::insufficient_height( + self.latest_beefy_height(), + height, + )); + } + + match self.frozen_height { + Some(frozen_height) if frozen_height <= height => { + Err(Error::client_frozen(frozen_height, height)) + } + _ => Ok(()), + } + } +} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct UpgradeOptions; + +impl crate::core::ics02_client::client_state::ClientState for ClientState { + type UpgradeOptions = UpgradeOptions; + + fn chain_id(&self) -> ChainId { + Default::default() + } + + fn client_type(&self) -> ClientType { + ClientType::Beefy + } + + fn latest_height(&self) -> Height { + self.latest_para_height() + } + + fn frozen_height(&self) -> Option { + self.frozen_height + } + + fn upgrade( + mut self, + upgrade_height: Height, + upgrade_options: UpgradeOptions, + _chain_id: ChainId, + ) -> Self { + self.frozen_height = None; + // Upgrade the client state + self.latest_beefy_height = upgrade_height; + + self + } + + fn wrap_any(self) -> AnyClientState { + AnyClientState::Beefy(self) + } +} + +impl TryFrom for ClientState { + type Error = Error; + + fn try_from(raw: RawClientState) -> Result { + let frozen_height = Some(Height::new(REVISION_NUMBER, raw.frozen_height)); + let latest_beefy_height = Height::new(REVISION_NUMBER, raw.latest_beefy_height.into()); + + let authority_set = raw + .authority + .and_then(|set| { + Some(BeefyNextAuthoritySet { + id: set.id, + len: set.len, + root: H256::decode(&mut &set.authority_root).ok()?, + }) + }) + .ok_or(Error::missing_validator_set())?; + + let next_authority_set = raw + .next_authority_set + .and_then(|set| { + Some(BeefyNextAuthoritySet { + id: set.id, + len: set.len, + root: H256::decode(&mut &set.authority_root).ok()?, + }) + }) + .ok_or(Error::missing_validator_set())?; + + let mmr_root_hash = H256::decode(&mut &raw.mmr_root_hash).map_err(|_| Error::decode())?; + + Ok(Self { + mmr_root_hash, + latest_beefy_height, + frozen_height, + beefy_activation_block: raw.beefy_activation_block, + authority: authority_set, + next_authority_set, + latest_parachain_height: None, + }) + } +} + +impl From for RawClientState { + fn from(client_state: ClientState) -> Self { + RawClientState { + mmr_root_hash: client_state.mmr_root_hash.encode(), + latest_beefy_height: client_state + .latest_beefy_height + .revision_height + .saturated_into::(), + frozen_height: client_state + .frozen_height + .unwrap_or_default() + .revision_height, + beefy_activation_block: client_state.beefy_activation_block, + authority: Some(BeefyAuthoritySet { + id: client_state.authority.id, + len: client_state.authority.len, + authority_root: client_state.authority.root.encode(), + }), + next_authority_set: Some(BeefyAuthoritySet { + id: client_state.next_authority_set.id, + len: client_state.next_authority_set.len, + authority_root: client_state.next_authority_set.root.encode(), + }), + } + } +} + +#[cfg(test)] +mod tests { + #[test] + fn client_state_new() {} + + #[test] + fn client_state_verify_delay_passed() {} + + #[test] + fn client_state_verify_height() {} +} diff --git a/modules/src/clients/ics11_beefy/consensus_state.rs b/modules/src/clients/ics11_beefy/consensus_state.rs new file mode 100644 index 0000000000..966fc12326 --- /dev/null +++ b/modules/src/clients/ics11_beefy/consensus_state.rs @@ -0,0 +1,243 @@ +use crate::prelude::*; + +use beefy_client::primitives::PartialMmrLeaf; +use beefy_primitives::mmr::{BeefyNextAuthoritySet, MmrLeafVersion}; +use codec::Encode; +use core::convert::Infallible; + +use serde::Serialize; +use sp_core::H256; +use sp_runtime::SaturatedConversion; +use tendermint::{hash::Algorithm, time::Time, Hash}; +use tendermint_proto::google::protobuf as tpb; +use tendermint_proto::Protobuf; + +use ibc_proto::ibc::lightclients::beefy::v1::{ + BeefyAuthoritySet, BeefyMmrLeafPartial as RawPartialMmrLeaf, + ConsensusState as RawConsensusState, ParachainHeader as RawParachainHeader, +}; + +use crate::clients::ics11_beefy::error::Error; +use crate::clients::ics11_beefy::header::{ + decode_parachain_header, merge_leaf_version, split_leaf_version, ParachainHeader, +}; +use crate::core::ics02_client::client_consensus::AnyConsensusState; +use crate::core::ics02_client::client_type::ClientType; +use crate::core::ics23_commitment::commitment::CommitmentRoot; + +pub const IBC_CONSENSUS_ID: [u8; 4] = *b"/IBC"; +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct ConsensusState { + pub timestamp: Time, + pub root: Vec, + pub parachain_header: ParachainHeader, +} + +impl ConsensusState { + pub fn new(root: Vec, timestamp: Time, parachain_header: ParachainHeader) -> Self { + Self { + timestamp, + root, + parachain_header, + } + } +} + +impl crate::core::ics02_client::client_consensus::ConsensusState for ConsensusState { + type Error = Infallible; + + fn client_type(&self) -> ClientType { + ClientType::Beefy + } + + fn root(&self) -> &CommitmentRoot { + &self.root.into() + } + + fn wrap_any(self) -> AnyConsensusState { + AnyConsensusState::Beefy(self) + } +} + +impl Protobuf for ConsensusState {} + +impl TryFrom for ConsensusState { + type Error = Error; + + fn try_from(raw: RawConsensusState) -> Result { + let ibc_proto::google::protobuf::Timestamp { seconds, nanos } = raw + .timestamp + .ok_or_else(|| Error::invalid_raw_consensus_state("missing timestamp".into()))?; + let proto_timestamp = tpb::Timestamp { seconds, nanos }; + let timestamp = proto_timestamp + .try_into() + .map_err(|e| Error::invalid_raw_consensus_state(format!("invalid timestamp: {}", e)))?; + + let parachain_header = raw + .parachain_header + .ok_or_else(|| Error::invalid_raw_consensus_state("missing parachain header".into()))?; + + let parachain_header = ParachainHeader { + parachain_header: decode_parachain_header(parachain_header.parachain_header).map_err( + |_| Error::invalid_raw_consensus_state("invalid parachain header".into()), + )?, + partial_mmr_leaf: { + let partial_leaf = parachain_header.mmr_leaf_partial.ok_or_else( + Error::invalid_raw_consensus_state("missing mmr leaf".into()), + )?; + PartialMmrLeaf { + version: { + let (major, minor) = + split_leaf_version(partial_leaf.version.saturated_into::()); + MmrLeafVersion::new(major, minor) + }, + parent_number_and_hash: ( + partial_leaf.parent_number, + H256::from_slice(&partial_leaf.parent_hash), + ), + beefy_next_authority_set: { + let next_set = partial_leaf.beefy_next_authority_set.ok_or_else( + Error::invalid_raw_consensus_state("missing next authority set".into()), + )?; + BeefyNextAuthoritySet { + id: next_set.id, + len: next_set.len, + root: H256::from_slice(&next_set.authority_root), + } + }, + } + }, + para_id: parachain_header.para_id, + parachain_heads_proof: parachain_header + .parachain_heads_proof + .into_iter() + .map(|item| { + let mut dest = [0u8; 32]; + dest.copy_from_slice(&*item); + dest + }) + .collect(), + heads_leaf_index: parachain_header.heads_leaf_index, + heads_total_count: parachain_header.heads_total_count, + extrinsic_proof: parachain_header.extrinsic_proof, + }; + Ok(Self { + root: raw.root, + timestamp, + parachain_header, + }) + } +} + +impl From for RawConsensusState { + fn from(value: ConsensusState) -> Self { + // FIXME: shunts like this are necessary due to + // https://github.com/informalsystems/tendermint-rs/issues/1053 + let tpb::Timestamp { seconds, nanos } = value.timestamp.into(); + let timestamp = ibc_proto::google::protobuf::Timestamp { seconds, nanos }; + + RawConsensusState { + timestamp: Some(timestamp), + root: value.root, + parachain_header: Some(RawParachainHeader { + parachain_header: value.parachain_header.encode(), + mmr_leaf_partial: Some(RawPartialMmrLeaf { + version: { + let (major, minor) = + value.parachain_header.partial_mmr_leaf.version.split(); + merge_leaf_version(major, minor) as u32 + }, + parent_number: value + .parachain_header + .partial_mmr_leaf + .parent_number_and_hash + .0, + parent_hash: value + .parachain_header + .partial_mmr_leaf + .parent_number_and_hash + .1 + .encode(), + beefy_next_authority_set: Some(BeefyAuthoritySet { + id: value + .parachain_header + .partial_mmr_leaf + .beefy_next_authority_set + .id, + len: value + .parachain_header + .partial_mmr_leaf + .beefy_next_authority_set + .len, + authority_root: value + .parachain_header + .partial_mmr_leaf + .beefy_next_authority_set + .root + .encode(), + }), + }), + para_id: value.parachain_header.para_id, + parachain_heads_proof: value + .parachain_header + .parachain_heads_proof + .into_iter() + .map(|item| item.encode()) + .collect(), + heads_leaf_index: value.parachain_header.heads_leaf_index, + heads_total_count: value.parachain_header.heads_total_count, + extrinsic_proof: value.parachain_header.extrinsic_proof, + }), + } + } +} + +impl From for ConsensusState { + fn from(header: ParachainHeader) -> Self { + let root = { + header + .parachain_header + .digest + .logs + .into_iter() + .filter_map(|digest| digest.as_consensus()) + .find(|(id, value)| id == &IBC_CONSENSUS_ID) + .map(|(.., root)| root.to_vec()) + .unwrap_or_default() + }; + + Self { + root, + timestamp: header.time, + next_validators_hash: header.next_validators_hash, + } + } +} + +impl From
for ConsensusState { + fn from(header: Header) -> Self { + Self::from(header.signed_header.header) + } +} + +#[cfg(test)] +mod tests { + use tendermint_rpc::endpoint::abci_query::AbciQuery; + use test_log::test; + + use crate::test::test_serialization_roundtrip; + + #[test] + fn serialization_roundtrip_no_proof() { + let json_data = + include_str!("../../../tests/support/query/serialization/consensus_state.json"); + test_serialization_roundtrip::(json_data); + } + + #[test] + fn serialization_roundtrip_with_proof() { + let json_data = + include_str!("../../../tests/support/query/serialization/consensus_state_proof.json"); + test_serialization_roundtrip::(json_data); + } +} diff --git a/modules/src/clients/ics11_beefy/error.rs b/modules/src/clients/ics11_beefy/error.rs new file mode 100644 index 0000000000..66257af1bc --- /dev/null +++ b/modules/src/clients/ics11_beefy/error.rs @@ -0,0 +1,231 @@ +use crate::prelude::*; + +use flex_error::{define_error, TraceError}; + +use crate::core::ics23_commitment::error::Error as Ics23Error; +use crate::core::ics24_host::error::ValidationError; +use crate::core::ics24_host::identifier::ClientId; +use crate::timestamp::{Timestamp, TimestampOverflowError}; +use beefy_client::error::BeefyClientError; +use sp_core::H256; + +use crate::Height; + +define_error! { + #[derive(Debug, PartialEq, Eq)] + Error { + InvalidAddress + |_| { "invalid address" }, + + InvalidHeader + { reason: String } + |e| { format_args!("invalid header, failed basic validation: {}", e.reason) }, + + Validation + { reason: String } + |e| { format_args!("invalid header, failed basic validation: {}", e.reason) }, + + InvalidRawClientState + { reason: String } + |e| { format_args!("invalid raw client state: {}", e.reason) }, + + MissingValidatorSet + |_| { "missing validator set" }, + + MissingTrustedValidatorSet + |_| { "missing trusted validator set" }, + + MissingTrustedHeight + |_| { "missing trusted height" }, + + InvalidChainIdentifier + [ ValidationError ] + |_| { "invalid chain identifier" }, + + MissingLatestHeight + |_| { "missing latest height" }, + + MissingFrozenHeight + |_| { "missing frozen height" }, + + InvalidChainId + { raw_value: String } + [ ValidationError ] + |e| { format_args!("invalid chain identifier: {}", e.raw_value) }, + + InvalidRawHeight + { raw_height: u64 } + |e| { format_args!("invalid raw height: {}", e.raw_height) }, + + InvalidRawConsensusState + { reason: String } + | e | { format_args!("invalid raw client consensus state: {}", e.reason) }, + + InvalidRawHeader + | _ | { "invalid raw header" }, + + InvalidRawMisbehaviour + { reason: String } + | e | { format_args!("invalid raw misbehaviour: {}", e.reason) }, + + Decode + [ TraceError ] + | _ | { "decode error" }, + + LowUpdateTimestamp + { + low: String, + high: String + } + | e | { + format_args!("header timestamp {0} must be greater than current client consensus state timestamp {1}", e.low, e.high) + }, + + HeaderTimestampOutsideTrustingTime + { + low: String, + high: String + } + | e | { + format_args!("header timestamp {0} is outside the trusting period w.r.t. consensus state timestamp {1}", e.low, e.high) + }, + + HeaderTimestampTooHigh + { + actual: String, + max: String, + } + | e | { + format_args!("given other previous updates, header timestamp should be at most {0}, but was {1}", e.max, e.actual) + }, + + HeaderTimestampTooLow + { + actual: String, + min: String, + } + | e | { + format_args!("given other previous updates, header timestamp should be at least {0}, but was {1}", e.min, e.actual) + }, + + TimestampOverflow + [ TimestampOverflowError ] + |_| { "timestamp overflowed" }, + + NotEnoughTimeElapsed + { + current_time: Timestamp, + earliest_time: Timestamp, + } + | e | { + format_args!("not enough time elapsed, current timestamp {0} is still less than earliest acceptable timestamp {1}", e.current_time, e.earliest_time) + }, + + NotEnoughBlocksElapsed + { + current_height: Height, + earliest_height: Height, + } + | e | { + format_args!("not enough blocks elapsed, current height {0} is still less than earliest acceptable height {1}", e.current_height, e.earliest_height) + }, + + InvalidHeaderHeight + { height: Height } + | e | { + format_args!("header height = {0} is invalid", e.height) + }, + + InvalidTrustedHeaderHeight + { + trusted_header_height: Height, + height_header: Height + } + | e | { + format_args!("header height is {0} and is lower than the trusted header height, which is {1} ", e.height_header, e.trusted_header_height) + }, + + LowUpdateHeight + { + low: Height, + high: Height + } + | e | { + format_args!("header height is {0} but it must be greater than the current client height which is {1}", e.low, e.high) + }, + + MismatchedRevisions + { + current_revision: u64, + update_revision: u64, + } + | e | { + format_args!("the header's current/trusted revision number ({0}) and the update's revision number ({1}) should be the same", e.current_revision, e.update_revision) + }, + + InvalidValidatorSet + { + hash1: H256, + hash2: H256, + } + | e | { + format_args!("invalid validator set: header_validators_hash={} and validators_hash={}", e.hash1, e.hash2) + }, + + NotEnoughTrustedValsSigned + { reason: String } + | e | { + format_args!("not enough trust because insufficient validators overlap: {}", e.reason) + }, + + VerificationError + { reason: BeefyClientError } + | e | { + format_args!("verification failed: {}", e.reason) + }, + + ProcessedTimeNotFound + { + client_id: ClientId, + height: Height, + } + | e | { + format_args!( + "Processed time for the client {0} at height {1} not found", + e.client_id, e.height) + }, + + ProcessedHeightNotFound + { + client_id: ClientId, + height: Height, + } + | e | { + format_args!( + "Processed height for the client {0} at height {1} not found", + e.client_id, e.height) + }, + + Ics23Error + [ Ics23Error ] + | _ | { "ics23 commitment error" }, + + InsufficientHeight + { + latest_height: Height, + target_height: Height, + } + | e | { + format_args!("the height is insufficient: latest_height={0} target_height={1}", e.latest_height, e.target_height) + }, + + ClientFrozen + { + frozen_height: Height, + target_height: Height, + } + | e | { + format_args!("the client is frozen: frozen_height={0} target_height={1}", e.frozen_height, e.target_height) + }, + } +} diff --git a/modules/src/clients/ics11_beefy/header.rs b/modules/src/clients/ics11_beefy/header.rs new file mode 100644 index 0000000000..4b02683d8b --- /dev/null +++ b/modules/src/clients/ics11_beefy/header.rs @@ -0,0 +1,403 @@ +use prost::Message; +use serde_derive::{Deserialize, Serialize}; +use tendermint_proto::Protobuf; + +use crate::alloc::string::ToString; +use crate::clients::ics11_beefy::client_state::REVISION_NUMBER; +use crate::clients::ics11_beefy::error::Error; +use crate::core::ics02_client::client_type::ClientType; +use crate::core::ics02_client::header::AnyHeader; +use crate::core::ics24_host::identifier::ChainId; +use crate::timestamp::Timestamp; +use crate::Height; +use alloc::vec; +use alloc::vec::Vec; +use beefy_client::primitives::{ + BeefyNextAuthoritySet, Hash, MmrUpdateProof, PartialMmrLeaf, SignatureWithAuthorityIndex, + SignedCommitment, +}; +use beefy_primitives::known_payload_ids::MMR_ROOT_ID; +use beefy_primitives::mmr::{MmrLeaf, MmrLeafVersion}; +use beefy_primitives::{Commitment, Payload}; +use bytes::Buf; +use codec::{Decode, Encode}; +use ibc_proto::ibc::lightclients::beefy::v1::{ + BeefyAuthoritySet as RawBeefyAuthoritySet, BeefyMmrLeaf as RawBeefyMmrLeaf, + BeefyMmrLeafPartial as RawBeefyMmrLeafPartial, Commitment as RawCommitment, + CommitmentSignature, Header as RawBeefyHeader, MmrUpdateProof as RawMmrUpdateProof, + ParachainHeader as RawParachainHeader, PayloadItem as RawPayloadItem, PayloadItem, + SignedCommitment as RawSignedCommitment, +}; +use pallet_mmr_primitives::{BatchProof, Proof}; +use sp_core::H256; +use sp_runtime::generic::Header as SubstrateHeader; +use sp_runtime::traits::{BlakeTwo256, SaturatedConversion}; +use sp_runtime::Digest; +use sp_trie::TrieDBMut; + +/// Beefy consensus header +#[derive(Clone, PartialEq, Eq)] +pub struct BeefyHeader { + pub parachain_headers: Vec, // contains the parachain headers + pub mmr_proofs: Vec>, // mmr proofs for these headers + pub mmr_size: u64, // The latest mmr size + pub mmr_update_proof: Option, // Proof for updating the latest mmr root hash +} + +#[derive(Clone, PartialEq, Eq, codec::Encode, codec::Decode)] +pub struct ParachainHeader { + pub parachain_header: SubstrateHeader, + /// Reconstructed mmr leaf + pub partial_mmr_leaf: PartialMmrLeaf, + /// parachain id + pub para_id: u32, + /// Proof for our parachain header inclusion in the parachain headers root + pub parachain_heads_proof: Vec, + /// leaf index for parachain heads proof + pub heads_leaf_index: u32, + /// Total number of parachain heads + pub heads_total_count: u32, + /// Trie merkle proof of inclusion of the set timestamp extrinsic in header.extrinsic_root + /// this already encodes the actual extrinsic + pub extrinsic_proof: Vec>, +} + +pub fn split_leaf_version(version: u8) -> (u8, u8) { + let major = version >> 5; + let minor = version & 0b11111; + (major, minor) +} + +pub fn merge_leaf_version(major: u8, minor: u8) -> u8 { + (major << 5) + minor +} + +impl TryFrom for BeefyHeader { + type Error = Error; + + fn try_from(raw_header: RawBeefyHeader) -> Result { + let parachain_headers = raw_header + .parachain_headers + .into_iter() + .map(|raw_para_header| { + let parent_hash = + H256::decode(&mut raw_para_header.mmr_leaf_partial.parent_hash.as_slice()) + .unwrap(); + let beefy_next_authority_set = if let Some(next_set) = + raw_para_header.mmr_leaf_partial.beefy_next_authority_set + { + BeefyNextAuthoritySet { + id: next_set.id, + len: next_set.len, + root: H256::decode(&mut next_set.root.as_slice()).unwrap(), + } + } else { + Default::default() + }; + Ok(ParachainHeader { + parachain_header: decode_parachain_header(raw_para_header.parachain_header) + .map_err(|err| Error::invalid_header(err))?, + partial_mmr_leaf: PartialMmrLeaf { + version: { + let (major, minor) = split_leaf_version( + raw_para_header + .mmr_leaf_partial + .version + .saturated_into::(), + ); + MmrLeafVersion::new(major, minor) + }, + parent_number_and_hash: ( + raw_para_header.mmr_leaf_partial.parent_number, + parent_hash, + ), + beefy_next_authority_set, + }, + para_id: raw_para_header.para_id, + parachain_heads_proof: raw_para_header + .parachain_heads_proof + .into_iter() + .map(|item| { + let mut dest = [0u8; 32]; + dest.copy_from_slice(&*item); + dest + }) + .collect(), + heads_leaf_index: raw_para_header.heads_leaf_index, + heads_total_count: raw_para_header.heads_total_count, + extrinsic_proof: raw_para_header.extrinsic_proof, + }) + }) + .collect::, Error>>()?; + + let mmr_update_proof = if let Some(mmr_update) = raw_header.mmr_update_proof { + let payload = { + mmr_update + .signed_commitment + .as_ref() + .unwrap() + .commitment + .unwrap() + .payload + .iter() + .map(|item| { + let mut payload_id = [0u8; 2]; + payload_id.copy_from_slice(&item.payload_id); + Payload::new(payload_id, item.payload_data.clone()) + }) + .collect() + }; + let block_number = mmr_update + .signed_commitment + .as_ref() + .unwrap() + .commitment + .unwrap() + .block_numer; + let validator_set_id = mmr_update + .signed_commitment + .as_ref() + .unwrap() + .commitment + .unwrap() + .validator_set_id; + let signatures = mmr_update + .signed_commitment + .unwrap() + .signatures + .into_iter() + .map(|commitment_sig| SignatureWithAuthorityIndex { + signature: { + let mut sig = [0u8; 65]; + sig.copy_from_slice(&commitment_sig.signature); + sig + }, + index: commitment_sig.authority_index, + }) + .collect(); + Some(MmrUpdateProof { + signed_commitment: SignedCommitment { + commitment: Commitment { + payload, + block_number, + validator_set_id, + }, + signatures, + }, + latest_mmr_leaf: MmrLeaf { + version: { + let (major, minor) = split_leaf_version( + mmr_update + .mmr_leaf + .as_ref() + .unwrap() + .version + .saturated_into::(), + ); + MmrLeafVersion::new(major, minor) + }, + parent_number_and_hash: { + let parent_number = mmr_update.mmr_leaf.as_ref().unwrap().parent_number; + let parent_hash = H256::decode( + &mut mmr_update.mmr_leaf.as_ref().unwrap().parent_hash.as_slice(), + ) + .unwrap(); + (parent_number, parent_hash) + }, + beefy_next_authority_set: BeefyNextAuthoritySet { + id: mmr_update + .mmr_leaf + .as_ref() + .unwrap() + .beefy_next_authority_set + .unwrap() + .id, + len: mmr_update + .mmr_leaf + .as_ref() + .unwrap() + .beefy_next_authority_set + .unwrap() + .len, + root: H256::decode( + &mut &mmr_update + .mmr_leaf + .as_ref() + .unwrap() + .beefy_next_authority_set + .unwrap() + .authority_root, + ) + .unwrap(), + }, + parachain_heads: H256::decode( + &mut &mmr_update.mmr_leaf.as_ref().unwrap().parachain_heads, + ) + .unwrap(), + }, + mmr_proof: Proof { + leaf_index: mmr_update.mmr_leaf_index, + leaf_count: mmr_update.mmr_leaf_index + 1, + items: mmr_update + .mmr_proof + .into_iter() + .map(|item| H256::decode(&mut &item).unwrap()) + .collect(), + }, + authority_proof: mmr_update + .authorities_proof + .into_iter() + .map(|item| { + let mut dest = [0u8; 32]; + dest.copy_from_slice(&item); + dest + }) + .collect(), + }) + } else { + None + }; + + Ok(Self { + parachain_headers, + mmr_proofs: raw_header.mmr_proofs, + mmr_size: raw_header.mmr_size, + mmr_update_proof, + }) + } +} + +impl From for RawBeefyHeader { + fn from(beefy_header: BeefyHeader) -> Self { + Self { + parachain_headers: beefy_header + .parachain_headers + .into_iter() + .map( + |para_header| ibc_proto::ibc::lightclients::beefy::v1::ParachainHeader { + parachain_header: para_header.parachain_header.encode(), + mmr_leaf_partial: Some(RawBeefyMmrLeafPartial { + version: { + let (major, minor) = para_header.partial_mmr_leaf.version.split(); + merge_leaf_version(major, minor) as u32 + }, + parent_number: para_header.partial_mmr_leaf.parent_number_and_hash.0, + parent_hash: para_header + .partial_mmr_leaf + .parent_number_and_hash + .1 + .encode(), + beefy_next_authority_set: Some(RawBeefyAuthoritySet { + id: para_header.partial_mmr_leaf.beefy_next_authority_set.id, + len: para_header.partial_mmr_leaf.beefy_next_authority_set.len, + authority_root: para_header + .partial_mmr_leaf + .beefy_next_authority_set + .root + .encode(), + }), + }), + para_id: para_header.para_id, + parachain_heads_proof: para_header + .parachain_heads_proof + .into_iter() + .map(|item| item.to_vec()) + .collect(), + heads_leaf_index: para_header.heads_leaf_index, + heads_total_count: para_header.heads_total_count, + extrinsic_proof: para_header.extrinsic_proof, + }, + ) + .collect(), + mmr_proofs: beefy_header.mmr_proofs, + mmr_size: beefy_header.mmr_size, + mmr_update_proof: if let Some(mmr_update) = beefy_header.mmr_update_proof { + Some(RawMmrUpdateProof { + mmr_leaf: Some(RawBeefyMmrLeaf { + version: { + let (major, minor) = mmr_update.latest_mmr_leaf.version.split(); + merge_leaf_version(major, minor) as u32 + }, + parent_number: mmr_update.latest_mmr_leaf.parent_number_and_hash.0, + parent_hash: mmr_update.latest_mmr_leaf.parent_number_and_hash.1.encode(), + beefy_next_authority_set: Some(RawBeefyAuthoritySet { + id: mmr_update.latest_mmr_leaf.beefy_next_authority_set.id, + len: mmr_update.latest_mmr_leaf.beefy_next_authority_set.len, + authority_root: mmr_update + .latest_mmr_leaf + .beefy_next_authority_set + .root + .encode(), + }), + parachain_heads: mmr_update.latest_mmr_leaf.parachain_heads.encode(), + }), + mmr_leaf_index: mmr_update.mmr_proof.leaf_index, + mmr_proof: mmr_update + .mmr_proof + .items + .into_iter() + .map(|item| item.encode()) + .collect(), + signed_commitment: Some(RawSignedCommitment { + commitment: Some(RawCommitment { + payload: vec![PayloadItem { + payload_id: MMR_ROOT_ID.to_vec(), + payload_data: mmr_update + .signed_commitment + .commitment + .payload + .get_raw(&MMR_ROOT_ID) + .unwrap() + .clone(), + }], + block_numer: mmr_update.signed_commitment.commitment.block_number, + validator_set_id: mmr_update + .signed_commitment + .commitment + .validator_set_id, + }), + signatures: mmr_update + .signed_commitment + .signatures + .into_iter() + .map(|item| CommitmentSignature { + signature: item.signature.to_vec(), + authority_index: item.index, + }) + .collect(), + }), + authorities_proof: mmr_update + .authority_proof + .into_iter() + .map(|item| item.to_vec()) + .collect(), + }) + } else { + None + }, + } + } +} + +impl Protobuf for BeefyHeader {} + +pub fn decode_parachain_header(raw_header: Vec) -> Result, Error> { + SubstrateHeader::decode(&mut &*raw_header) + .map_err(|_| Error::invalid_header("failed to decode parachain header")) +} + +pub fn decode_header(buf: B) -> Result { + RawBeefyHeader::decode(buf) + .map_err(Error::decode)? + .try_into() +} + +pub fn decode_timestamp_extrinsic(header: ParachainHeader) -> Result { + let mut db = sp_trie::MemoryDB::::default(); + let mut root = Default::default(); + + Ok(0) +} + +#[cfg(test)] +pub mod test_util {} diff --git a/modules/src/clients/ics11_beefy/misbehaviour.rs b/modules/src/clients/ics11_beefy/misbehaviour.rs new file mode 100644 index 0000000000..8b13789179 --- /dev/null +++ b/modules/src/clients/ics11_beefy/misbehaviour.rs @@ -0,0 +1 @@ + diff --git a/modules/src/clients/ics11_beefy/mod.rs b/modules/src/clients/ics11_beefy/mod.rs new file mode 100644 index 0000000000..2b16db780e --- /dev/null +++ b/modules/src/clients/ics11_beefy/mod.rs @@ -0,0 +1,9 @@ +//! ICS 07: Tendermint Client implements a client verification algorithm for blockchains which use +//! the Beefy consensus algorithm. + +pub mod client_def; +pub mod client_state; +pub mod consensus_state; +pub mod error; +pub mod header; +pub mod misbehaviour; diff --git a/modules/src/clients/mod.rs b/modules/src/clients/mod.rs index 65ea910b18..aaf80babb9 100644 --- a/modules/src/clients/mod.rs +++ b/modules/src/clients/mod.rs @@ -1,3 +1,4 @@ //! Implementations of client verification algorithms for specific types of chains. pub mod ics07_tendermint; +pub mod ics11_beefy; diff --git a/modules/src/core/ics02_client/client_consensus.rs b/modules/src/core/ics02_client/client_consensus.rs index e1ee92eaac..68842d013c 100644 --- a/modules/src/core/ics02_client/client_consensus.rs +++ b/modules/src/core/ics02_client/client_consensus.rs @@ -9,6 +9,7 @@ use serde::Serialize; use tendermint_proto::Protobuf; use crate::clients::ics07_tendermint::consensus_state; +use crate::clients::ics11_beefy::consensus_state as beefy_consensus_state; use crate::core::ics02_client::client_type::ClientType; use crate::core::ics02_client::error::Error; use crate::core::ics02_client::height::Height; @@ -23,6 +24,8 @@ use crate::mock::client_state::MockConsensusState; pub const TENDERMINT_CONSENSUS_STATE_TYPE_URL: &str = "/ibc.lightclients.tendermint.v1.ConsensusState"; +pub const BEEFY_CONSENSUS_STATE_TYPE_URL: &str = "/ibc.lightclients.beefy.v1.ConsensusState"; + pub const MOCK_CONSENSUS_STATE_TYPE_URL: &str = "/ibc.mock.ConsensusState"; pub trait ConsensusState: Clone + core::fmt::Debug + Send + Sync { @@ -42,7 +45,8 @@ pub trait ConsensusState: Clone + core::fmt::Debug + Send + Sync { #[serde(tag = "type")] pub enum AnyConsensusState { Tendermint(consensus_state::ConsensusState), - + #[serde(skip)] + Beefy(beefy_consensus_state::ConsensusState), #[cfg(any(test, feature = "mocks"))] Mock(MockConsensusState), } @@ -51,7 +55,7 @@ impl AnyConsensusState { pub fn timestamp(&self) -> Timestamp { match self { Self::Tendermint(cs_state) => cs_state.timestamp.into(), - + Self::Beefy(cs_state) => cs_state.timestamp.into(), #[cfg(any(test, feature = "mocks"))] Self::Mock(mock_state) => mock_state.timestamp(), } @@ -60,7 +64,7 @@ impl AnyConsensusState { pub fn client_type(&self) -> ClientType { match self { AnyConsensusState::Tendermint(_cs) => ClientType::Tendermint, - + AnyConsensusState::Beefy(_) => ClientType::Beefy, #[cfg(any(test, feature = "mocks"))] AnyConsensusState::Mock(_cs) => ClientType::Mock, } @@ -81,6 +85,11 @@ impl TryFrom for AnyConsensusState { .map_err(Error::decode_raw_client_state)?, )), + BEEFY_CONSENSUS_STATE_TYPE_URL => Ok(AnyConsensusState::Beefy( + beefy_consensus_state::ConsensusState::decode_vec(&value.value) + .map_err(Error::decode_raw_client_state)?, + )), + #[cfg(any(test, feature = "mocks"))] MOCK_CONSENSUS_STATE_TYPE_URL => Ok(AnyConsensusState::Mock( MockConsensusState::decode_vec(&value.value) @@ -101,6 +110,13 @@ impl From for Any { .encode_vec() .expect("encoding to `Any` from `AnyConsensusState::Tendermint`"), }, + + AnyConsensusState::Beefy(value) => Any { + type_url: BEEFY_CONSENSUS_STATE_TYPE_URL.to_string(), + value: value + .encode_vec() + .expect("encoding to `Any` from `AnyConsensusState::Beefy`"), + }, #[cfg(any(test, feature = "mocks"))] AnyConsensusState::Mock(value) => Any { type_url: MOCK_CONSENSUS_STATE_TYPE_URL.to_string(), @@ -156,7 +172,7 @@ impl ConsensusState for AnyConsensusState { fn root(&self) -> &CommitmentRoot { match self { Self::Tendermint(cs_state) => cs_state.root(), - + Self::Beefy(cs_state) => cs_state.root(), #[cfg(any(test, feature = "mocks"))] Self::Mock(mock_state) => mock_state.root(), } diff --git a/modules/src/core/ics02_client/client_state.rs b/modules/src/core/ics02_client/client_state.rs index 4c086b6e6a..0aa1625552 100644 --- a/modules/src/core/ics02_client/client_state.rs +++ b/modules/src/core/ics02_client/client_state.rs @@ -8,6 +8,7 @@ use tendermint_proto::Protobuf; use ibc_proto::ibc::core::client::v1::IdentifiedClientState; use crate::clients::ics07_tendermint::client_state; +use crate::clients::ics11_beefy::client_state as beefy_client_state; use crate::core::ics02_client::client_type::ClientType; use crate::core::ics02_client::error::Error; use crate::core::ics02_client::trust_threshold::TrustThreshold; @@ -19,6 +20,7 @@ use crate::prelude::*; use crate::Height; pub const TENDERMINT_CLIENT_STATE_TYPE_URL: &str = "/ibc.lightclients.tendermint.v1.ClientState"; +pub const BEEFY_CLIENT_STATE_TYPE_URL: &str = "/ibc.lightclients.beefy.v1.ClientState"; pub const MOCK_CLIENT_STATE_TYPE_URL: &str = "/ibc.mock.ClientState"; pub trait ClientState: Clone + core::fmt::Debug + Send + Sync { @@ -61,7 +63,7 @@ pub trait ClientState: Clone + core::fmt::Debug + Send + Sync { #[serde(tag = "type")] pub enum AnyUpgradeOptions { Tendermint(client_state::UpgradeOptions), - + Beefy(beefy_client_state::UpgradeOptions), #[cfg(any(test, feature = "mocks"))] Mock(()), } @@ -70,7 +72,22 @@ impl AnyUpgradeOptions { fn into_tendermint(self) -> client_state::UpgradeOptions { match self { Self::Tendermint(options) => options, + Self::Beefy(_) => { + panic!("cannot downcast AnyUpgradeOptions::Beefy to Tendermint::UpgradeOptions") + } + #[cfg(any(test, feature = "mocks"))] + Self::Mock(_) => { + panic!("cannot downcast AnyUpgradeOptions::Mock to Tendermint::UpgradeOptions") + } + } + } + fn into_beefy(self) -> beefy_client_state::UpgradeOptions { + match self { + Self::Tendermint(_) => { + panic!("cannot downcast AnyUpgradeOptions::Tendermint to Beefy::UpgradeOptions") + } + Self::Beefy(options) => options, #[cfg(any(test, feature = "mocks"))] Self::Mock(_) => { panic!("cannot downcast AnyUpgradeOptions::Mock to Tendermint::UpgradeOptions") @@ -83,7 +100,7 @@ impl AnyUpgradeOptions { #[serde(tag = "type")] pub enum AnyClientState { Tendermint(client_state::ClientState), - + Beefy(beefy_client_state::ClientState), #[cfg(any(test, feature = "mocks"))] Mock(MockClientState), } @@ -92,7 +109,7 @@ impl AnyClientState { pub fn latest_height(&self) -> Height { match self { Self::Tendermint(tm_state) => tm_state.latest_height(), - + Self::Beefy(bf_state) => bf_state.latest_para_height(), #[cfg(any(test, feature = "mocks"))] Self::Mock(mock_state) => mock_state.latest_height(), } @@ -101,7 +118,7 @@ impl AnyClientState { pub fn frozen_height(&self) -> Option { match self { Self::Tendermint(tm_state) => tm_state.frozen_height(), - + Self::Beefy(bf_state) => bf_state.frozen_height(), #[cfg(any(test, feature = "mocks"))] Self::Mock(mock_state) => mock_state.frozen_height(), } @@ -110,7 +127,7 @@ impl AnyClientState { pub fn trust_threshold(&self) -> Option { match self { AnyClientState::Tendermint(state) => Some(state.trust_level), - + AnyClientState::Beefy(_) => None, #[cfg(any(test, feature = "mocks"))] AnyClientState::Mock(_) => None, } @@ -119,7 +136,7 @@ impl AnyClientState { pub fn max_clock_drift(&self) -> Duration { match self { AnyClientState::Tendermint(state) => state.max_clock_drift, - + AnyClientState::Beefy(_) => None, #[cfg(any(test, feature = "mocks"))] AnyClientState::Mock(_) => Duration::new(0, 0), } @@ -128,7 +145,7 @@ impl AnyClientState { pub fn client_type(&self) -> ClientType { match self { Self::Tendermint(state) => state.client_type(), - + Self::Beefy(state) => state.client_type(), #[cfg(any(test, feature = "mocks"))] Self::Mock(state) => state.client_type(), } @@ -137,7 +154,7 @@ impl AnyClientState { pub fn refresh_period(&self) -> Option { match self { AnyClientState::Tendermint(tm_state) => tm_state.refresh_time(), - + AnyClientState::Beefy(_) => None, #[cfg(any(test, feature = "mocks"))] AnyClientState::Mock(mock_state) => mock_state.refresh_time(), } @@ -146,7 +163,7 @@ impl AnyClientState { pub fn expired(&self, elapsed_since_latest: Duration) -> bool { match self { AnyClientState::Tendermint(tm_state) => tm_state.expired(elapsed_since_latest), - + AnyClientState::Beefy(state) => false, #[cfg(any(test, feature = "mocks"))] AnyClientState::Mock(mock_state) => mock_state.expired(elapsed_since_latest), } @@ -167,6 +184,11 @@ impl TryFrom for AnyClientState { .map_err(Error::decode_raw_client_state)?, )), + BEEFY_CLIENT_STATE_TYPE_URL => Ok(AnyClientState::Beefy( + beefy_client_state::ClientState::decode_vec(&raw.value), + )) + .map_err(Error::decode_raw_client_state)?, + #[cfg(any(test, feature = "mocks"))] MOCK_CLIENT_STATE_TYPE_URL => Ok(AnyClientState::Mock( MockClientState::decode_vec(&raw.value).map_err(Error::decode_raw_client_state)?, diff --git a/modules/src/core/ics02_client/client_type.rs b/modules/src/core/ics02_client/client_type.rs index 78973c800e..1fb05edf14 100644 --- a/modules/src/core/ics02_client/client_type.rs +++ b/modules/src/core/ics02_client/client_type.rs @@ -8,13 +8,14 @@ use super::error::Error; #[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] pub enum ClientType { Tendermint = 1, - + Beefy = 2, #[cfg(any(test, feature = "mocks"))] Mock = 9999, } impl ClientType { const TENDERMINT_STR: &'static str = "07-tendermint"; + const BEEFY_STR: &'static str = "11-beefy"; #[cfg_attr(not(test), allow(dead_code))] const MOCK_STR: &'static str = "9999-mock"; @@ -23,7 +24,7 @@ impl ClientType { pub fn as_str(&self) -> &'static str { match self { Self::Tendermint => Self::TENDERMINT_STR, - + Self::Beefy => Self::BEEFY_STR, #[cfg(any(test, feature = "mocks"))] Self::Mock => Self::MOCK_STR, } @@ -42,10 +43,9 @@ impl core::str::FromStr for ClientType { fn from_str(s: &str) -> Result { match s { Self::TENDERMINT_STR => Ok(Self::Tendermint), - + Self::BEEFY_STR => Ok(Self::Beefy), #[cfg(any(test, feature = "mocks"))] Self::MOCK_STR => Ok(Self::Mock), - _ => Err(Error::unknown_client_type(s.to_string())), } } diff --git a/modules/src/core/ics02_client/error.rs b/modules/src/core/ics02_client/error.rs index fee8717689..568a0b5959 100644 --- a/modules/src/core/ics02_client/error.rs +++ b/modules/src/core/ics02_client/error.rs @@ -180,6 +180,10 @@ define_error! { [ Ics07Error ] | _ | { "tendermint error" }, + Beefy + [ Ics011Error ] + | _ | { "tendermint error" }, + InvalidPacketTimestamp [ crate::timestamp::ParseTimestampError ] | _ | { "invalid packet timeout timestamp value" }, diff --git a/modules/src/core/ics02_client/header.rs b/modules/src/core/ics02_client/header.rs index cd2de5e9d7..f757ee8a9f 100644 --- a/modules/src/core/ics02_client/header.rs +++ b/modules/src/core/ics02_client/header.rs @@ -6,6 +6,7 @@ use subtle_encoding::hex; use tendermint_proto::Protobuf; use crate::clients::ics07_tendermint::header::{decode_header, Header as TendermintHeader}; +use crate::clients::ics11_beefy::header::{decode_header as decode_beefy_header, BeefyHeader}; use crate::core::ics02_client::client_type::ClientType; use crate::core::ics02_client::error::Error; #[cfg(any(test, feature = "mocks"))] @@ -15,6 +16,7 @@ use crate::timestamp::Timestamp; use crate::Height; pub const TENDERMINT_HEADER_TYPE_URL: &str = "/ibc.lightclients.tendermint.v1.Header"; +pub const BEEFY_HEADER_TYPE_URL: &str = "/ibc.lightclients.beefy.v1.Header"; pub const MOCK_HEADER_TYPE_URL: &str = "/ibc.mock.Header"; /// Abstract of consensus state update information @@ -36,7 +38,8 @@ pub trait Header: Clone + core::fmt::Debug + Send + Sync { #[allow(clippy::large_enum_variant)] pub enum AnyHeader { Tendermint(TendermintHeader), - + #[serde(skip)] + Beefy(BeefyHeader), #[cfg(any(test, feature = "mocks"))] Mock(MockHeader), } @@ -45,7 +48,7 @@ impl Header for AnyHeader { fn client_type(&self) -> ClientType { match self { Self::Tendermint(header) => header.client_type(), - + Self::Beefy(header) => header.client_type(), #[cfg(any(test, feature = "mocks"))] Self::Mock(header) => header.client_type(), } @@ -54,7 +57,7 @@ impl Header for AnyHeader { fn height(&self) -> Height { match self { Self::Tendermint(header) => header.height(), - + Self::Beefy(header) => Default::default(), #[cfg(any(test, feature = "mocks"))] Self::Mock(header) => header.height(), } @@ -63,6 +66,7 @@ impl Header for AnyHeader { fn timestamp(&self) -> Timestamp { match self { Self::Tendermint(header) => header.timestamp(), + Self::Beefy(header) => Default::default(), #[cfg(any(test, feature = "mocks"))] Self::Mock(header) => header.timestamp(), } @@ -99,6 +103,11 @@ impl TryFrom for AnyHeader { Ok(AnyHeader::Tendermint(val)) } + BEEFY_HEADER_TYPE_URL => { + let val = decode_beefy_header(&raw.value).map_err(Error::beefy)?; + Ok(AnyHeader::Beefy(val)) + } + #[cfg(any(test, feature = "mocks"))] MOCK_HEADER_TYPE_URL => Ok(AnyHeader::Mock( MockHeader::decode_vec(&raw.value).map_err(Error::invalid_raw_header)?, @@ -118,6 +127,14 @@ impl From for Any { .encode_vec() .expect("encoding to `Any` from `AnyHeader::Tendermint`"), }, + + AnyHeader::Beefy(header) => Any { + type_url: BEEFY_HEADER_TYPE_URL.to_string(), + value: header + .encode_vec() + .expect("encoding to `Any` from `AnyHeader::Beefy`"), + }, + #[cfg(any(test, feature = "mocks"))] AnyHeader::Mock(header) => Any { type_url: MOCK_HEADER_TYPE_URL.to_string(), diff --git a/proto/src/lib.rs b/proto/src/lib.rs index 7e54ed83c4..aa059a219e 100644 --- a/proto/src/lib.rs +++ b/proto/src/lib.rs @@ -196,6 +196,11 @@ pub mod ibc { include_proto!("ibc.lightclients.tendermint.v1.rs"); } } + pub mod beefy { + pub mod v1 { + include_proto!("ibc.lightclients.beefy.v1.rs"); + } + } } pub mod mock { include_proto!("ibc.mock.rs"); diff --git a/proto/src/prost/ibc.lightclients.beefy.v1.rs b/proto/src/prost/ibc.lightclients.beefy.v1.rs new file mode 100644 index 0000000000..defe33d827 --- /dev/null +++ b/proto/src/prost/ibc.lightclients.beefy.v1.rs @@ -0,0 +1,198 @@ +/// ClientState from Tendermint tracks the current validator set, latest height, +/// and a possible frozen height. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ClientState { + /// Latest mmr root hash + #[prost(bytes = "vec", tag = "1")] + pub mmr_root_hash: ::prost::alloc::vec::Vec, + /// block number for the latest mmr_root_hash + #[prost(uint32, tag = "2")] + pub latest_beefy_height: u32, + /// Block height when the client was frozen due to a misbehaviour + #[prost(uint64, tag = "3")] + pub frozen_height: u64, + /// block number that the beefy protocol was activated on the relay chain. + /// This shoould be the first block in the merkle-mountain-range tree. + #[prost(uint32, tag = "4")] + pub beefy_activation_block: u32, + /// authorities for the current round + #[prost(message, optional, tag = "5")] + pub authority: ::core::option::Option, + /// authorities for the next round + #[prost(message, optional, tag = "6")] + pub next_authority_set: ::core::option::Option, +} +/// Actual payload items +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct PayloadItem { + /// 2-byte payload id + #[prost(bytes = "vec", tag = "1")] + pub payload_id: ::prost::alloc::vec::Vec, + /// arbitrary length payload data., eg mmr_root_hash + #[prost(bytes = "vec", tag = "2")] + pub payload_data: ::prost::alloc::vec::Vec, +} +/// Commitment message signed by beefy validators +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Commitment { + /// array of payload items signed by Beefy validators + #[prost(message, repeated, tag = "1")] + pub payload: ::prost::alloc::vec::Vec, + /// block number for this commitment + #[prost(uint32, tag = "2")] + pub block_numer: u32, + /// validator set that signed this commitment + #[prost(uint64, tag = "3")] + pub validator_set_id: u64, +} +/// Signature belonging to a single validator +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CommitmentSignature { + /// actual signature bytes + #[prost(bytes = "vec", tag = "1")] + pub signature: ::prost::alloc::vec::Vec, + /// authority leaf index in the merkle tree. + #[prost(uint32, tag = "2")] + pub authority_index: u32, +} +/// signed commitment data +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SignedCommitment { + /// commitment data being signed + #[prost(message, optional, tag = "1")] + pub commitment: ::core::option::Option, + /// gotten from rpc subscription + #[prost(message, repeated, tag = "2")] + pub signatures: ::prost::alloc::vec::Vec, +} +/// data needed to update the client +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MmrUpdateProof { + /// the new mmr leaf SCALE encoded. + #[prost(message, optional, tag = "1")] + pub mmr_leaf: ::core::option::Option, + /// leaf index for the mmr_leaf + #[prost(uint64, tag = "2")] + pub mmr_leaf_index: u64, + /// proof that this mmr_leaf index is valid. + #[prost(bytes = "vec", repeated, tag = "3")] + pub mmr_proof: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, + /// signed commitment data + #[prost(message, optional, tag = "4")] + pub signed_commitment: ::core::option::Option, + /// generated using full authority list from runtime + #[prost(bytes = "vec", repeated, tag = "5")] + pub authorities_proof: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, +} +/// ConsensusState defines the consensus state from Tendermint. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ConsensusState { + /// timestamp that corresponds to the block height in which the ConsensusState + /// was stored. + #[prost(message, optional, tag = "1")] + pub timestamp: ::core::option::Option, + /// packet commitment root + #[prost(bytes = "vec", tag = "2")] + pub root: ::prost::alloc::vec::Vec, + /// proof of inclusion for this parachain header in the Mmr. + #[prost(message, optional, tag = "4")] + pub parachain_header: ::core::option::Option, +} +/// Misbehaviour is a wrapper over two conflicting Headers +/// that implements Misbehaviour interface expected by ICS-02 +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Misbehaviour { + #[prost(message, optional, tag = "2")] + pub header_1: ::core::option::Option
, + #[prost(message, optional, tag = "3")] + pub header_2: ::core::option::Option
, +} +/// Header contains the neccessary data to proove finality about IBC commitments +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Header { + /// parachain headers needed for proofs and ConsensusState + #[prost(message, repeated, tag = "1")] + pub parachain_headers: ::prost::alloc::vec::Vec, + /// mmr proofs for the headers gotten from rpc "mmr_generateProofs" + #[prost(bytes = "vec", repeated, tag = "2")] + pub mmr_proofs: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, + /// size of the mmr for the given proof + #[prost(uint64, tag = "3")] + pub mmr_size: u64, + /// optional payload to update the mmr root hash. + #[prost(message, optional, tag = "4")] + pub mmr_update_proof: ::core::option::Option, +} +/// data needed to prove parachain header inclusion in mmr. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ParachainHeader { + /// scale-encoded parachain header bytes + #[prost(bytes = "vec", tag = "1")] + pub parachain_header: ::prost::alloc::vec::Vec, + /// reconstructed MmrLeaf, see beefy-go spec + #[prost(message, optional, tag = "2")] + pub mmr_leaf_partial: ::core::option::Option, + /// para_id of the header. + #[prost(uint32, tag = "3")] + pub para_id: u32, + /// proofs for our header in the parachain heads root + #[prost(bytes = "vec", repeated, tag = "4")] + pub parachain_heads_proof: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, + /// leaf index for parachain heads proof + #[prost(uint32, tag = "5")] + pub heads_leaf_index: u32, + /// total number of para heads in parachain_heads_root + #[prost(uint32, tag = "6")] + pub heads_total_count: u32, + /// trie merkle proof of inclusion in header.extrinsic_root + /// this already encodes the actual extrinsic + #[prost(bytes = "vec", repeated, tag = "7")] + pub extrinsic_proof: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, +} +/// Partial data for MmrLeaf +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BeefyMmrLeafPartial { + /// leaf version + #[prost(uint32, tag = "1")] + pub version: u32, + /// parent block for this leaf + #[prost(uint32, tag = "2")] + pub parent_number: u32, + /// parent hash for this leaf + #[prost(bytes = "vec", tag = "3")] + pub parent_hash: ::prost::alloc::vec::Vec, + /// next authority set. + #[prost(message, optional, tag = "4")] + pub beefy_next_authority_set: ::core::option::Option, +} +/// Beefy Authority Info +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BeefyAuthoritySet { + /// Id of the authority set, it should be strictly increasing + #[prost(uint64, tag = "1")] + pub id: u64, + /// size of the authority set + #[prost(uint32, tag = "2")] + pub len: u32, + /// merkle root of the sorted authority public keys. + #[prost(bytes = "vec", tag = "3")] + pub authority_root: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BeefyMmrLeaf { + /// leaf version + #[prost(uint32, tag = "1")] + pub version: u32, + /// parent block for this leaf + #[prost(uint32, tag = "2")] + pub parent_number: u32, + /// parent hash for this leaf + #[prost(bytes = "vec", tag = "3")] + pub parent_hash: ::prost::alloc::vec::Vec, + /// beefy next authority set. + #[prost(message, optional, tag = "4")] + pub beefy_next_authority_set: ::core::option::Option, + /// merkle root hash of parachain heads included in the leaf. + #[prost(bytes = "vec", tag = "5")] + pub parachain_heads: ::prost::alloc::vec::Vec, +} From ba60e858ee107c81c4e3925c9159d5a63c6e2750 Mon Sep 17 00:00:00 2001 From: David Salami Date: Tue, 5 Apr 2022 19:39:40 +0100 Subject: [PATCH 02/96] implemented client def trait --- modules/src/clients/ics11_beefy/client_def.rs | 530 ++++++++++++++++++ .../src/clients/ics11_beefy/client_state.rs | 112 +++- .../clients/ics11_beefy/consensus_state.rs | 42 +- modules/src/clients/ics11_beefy/error.rs | 25 +- modules/src/clients/ics11_beefy/header.rs | 31 +- modules/src/core/ics02_client/error.rs | 4 +- 6 files changed, 666 insertions(+), 78 deletions(-) diff --git a/modules/src/clients/ics11_beefy/client_def.rs b/modules/src/clients/ics11_beefy/client_def.rs index 8b13789179..96c894c24c 100644 --- a/modules/src/clients/ics11_beefy/client_def.rs +++ b/modules/src/clients/ics11_beefy/client_def.rs @@ -1 +1,531 @@ +use beefy_client::primitives::{MmrUpdateProof, ParachainHeader, ParachainsUpdateProof}; +use beefy_client::traits::{StorageRead, StorageWrite}; +use beefy_client::BeefyLightClient; +use codec::Encode; +use core::convert::TryInto; +use pallet_mmr_primitives::BatchProof; +use prost::Message; +use sp_core::H256; +use sp_runtime::traits::BlakeTwo256; +use tendermint_proto::Protobuf; +use crate::clients::ics11_beefy::client_state::ClientState; +use crate::clients::ics11_beefy::consensus_state::ConsensusState; +use crate::clients::ics11_beefy::error::Error; +use crate::clients::ics11_beefy::header::BeefyHeader; +use crate::core::ics02_client::client_consensus::AnyConsensusState; +use crate::core::ics02_client::client_def::ClientDef; +use crate::core::ics02_client::client_state::AnyClientState; +use crate::core::ics02_client::client_type::ClientType; +use crate::core::ics02_client::context::ClientReader; +use crate::core::ics02_client::error::Error as Ics02Error; +use crate::core::ics03_connection::connection::ConnectionEnd; +use crate::core::ics04_channel::channel::ChannelEnd; +use crate::core::ics04_channel::context::ChannelReader; +use crate::core::ics04_channel::packet::Sequence; + +use crate::core::ics23_commitment::commitment::{ + CommitmentPrefix, CommitmentProofBytes, CommitmentRoot, +}; + +use crate::core::ics24_host::identifier::ConnectionId; +use crate::core::ics24_host::identifier::{ChannelId, ClientId, PortId}; +use crate::core::ics24_host::Path; +use crate::prelude::*; +use crate::Height; + +use crate::core::ics24_host::path::{ + AcksPath, ChannelEndsPath, ClientConsensusStatePath, ClientStatePath, CommitmentsPath, + ConnectionsPath, ReceiptsPath, SeqRecvsPath, +}; +use crate::downcast; + +#[derive(Clone, Debug, Default, PartialEq, Eq)] +pub struct BeefyClient { + store: Store, +} + +impl ClientDef for BeefyClient { + type Header = BeefyHeader; + type ClientState = ClientState; + type ConsensusState = ConsensusState; + + fn check_header_and_update_state( + &self, + ctx: &dyn ClientReader, + client_id: ClientId, + client_state: Self::ClientState, + header: Self::Header, + ) -> Result<(Self::ClientState, Self::ConsensusState), Ics02Error> { + let mut light_client = BeefyLightClient::new(self.store.clone()); + if let Some(mmr_update) = header.mmr_update_proof { + light_client + .ingest_mmr_root_with_proof(mmr_update) + .map_err(|e| Ics02Error::Beefy(Error::invalid_mmmr_update(format!("{:?}", e))))?; + } + + let mut leaf_indices = vec![]; + let parachain_headers = header + .parachain_headers + .clone() + .into_iter() + .map(|header| { + let leaf_index = + client_state.to_leaf_index(header.partial_mmr_leaf.parent_number_and_hash.0); + leaf_indices.push(leaf_index as u64); + ParachainHeader { + parachain_header: header.parachain_header.encode(), + partial_mmr_leaf: header.partial_mmr_leaf, + para_id: header.para_id, + parachain_heads_proof: header.parachain_heads_proof, + heads_leaf_index: header.heads_leaf_index, + heads_total_count: header.heads_total_count, + extrinsic_proof: header.extrinsic_proof, + } + }) + .collect::>(); + let leaf_count = (client_state.to_leaf_index(client_state.latest_beefy_height) + 1) as u64; + + let parachain_update_proof = ParachainsUpdateProof { + parachain_headers, + mmr_proof: BatchProof { + leaf_indices, + leaf_count, + items: header + .mmr_proofs + .into_iter() + .map(|item| H256::from_slice(&item)), + }, + }; + + light_client + .verify_parachain_headers(parachain_update_proof) + .map_err(|e| Ics02Error::Beefy(Error::invalid_mmmr_update(format!("{:?}", e))))?; + // Check if a consensus state is already installed; if so it should + // match the untrusted header + let mut consensus_states = header + .parachain_headers + .into_iter() + .map(ConsensusState::from) + .collect::>(); + consensus_states.sort_by(|a, b| { + a.parachain_header + .parachain_header + .number + .cmp(&b.parachain_header.parachain_header.number) + }); + + let mut latest_para_height = client_state.latest_height(); + let trusted_consensus_state = + downcast_consensus_state(ctx.consensus_state(&client_id, latest_para_height)?)?; + let mut last_seen_cs = None; + let mut last_seen_height = None; + for cs_state in consensus_states { + let height = Height::new( + client_state.para_id as u64, + cs_state.parachain_header.parachain_header.number as u64, + ); + let existing_consensus_state = match ctx.maybe_consensus_state(&client_id, height)? { + Some(cs) => { + let cs = downcast_consensus_state(cs)?; + // If this consensus state matches, skip verification + // (optimization) + if cs == cs_state { + // Header is already installed and matches the incoming + // header (already verified) + continue; + } + Some(cs) + } + None => None, + }; + + // If the header has verified, but its corresponding consensus state + // differs from the existing consensus state for that height, freeze the + // client and return the installed consensus state. + if let Some(cs) = existing_consensus_state { + if cs != cs_state { + let frozen_height = Height::new( + client_state.para_id as u64, + client_state.latest_beefy_height as u64, + ); + return Ok((client_state.with_frozen_height(frozen_height)?, cs)); + } + } + + // Monotonicity checks for timestamps for in-the-middle updates + // (cs-new, cs-next, cs-latest) + if height < latest_para_height { + let maybe_next_cs = ctx + .next_consensus_state(&client_id, height)? + .map(downcast_consensus_state) + .transpose()?; + + if let Some(next_cs) = maybe_next_cs { + // New (untrusted) header timestamp cannot occur after next + // consensus state's height + if cs_state.timestamp > next_cs.timestamp { + // return Err(Ics02Error::beefy( + // Error::header_timestamp_too_high( + // cs_state.timestamp.to_string(), + // next_cs.timestamp.to_string(), + // ), + // )); + continue; + } + } + } + // (cs-trusted, cs-prev, cs-new) + if height > latest_para_height { + let maybe_prev_cs = ctx + .prev_consensus_state(&client_id, header.height())? + .map(downcast_consensus_state) + .transpose()?; + + if let Some(maybe_prev_cs) = maybe_prev_cs { + // New (untrusted) header timestamp cannot occur before the + // previous consensus state's height + if cs_state.timestamp < maybe_prev_cs.timestamp { + // return Err(Ics02Error::beefy( + // Error::header_timestamp_too_low( + // header.signed_header.header().time.to_string(), + // prev_cs.timestamp.to_string(), + // ), + // )); + continue; + } + } + } + last_seen_height = Some(height); + last_seen_cs = Some(cs_state) + } + + let best_cs_state = if let Some(cs_state) = last_seen_cs { + cs_state + } else { + trusted_consensus_state + }; + + let best_para_height = if let Some(best_height) = last_seen_height { + best_height + } else { + latest_para_height + }; + let mmr_state = self + .store + .mmr_state() + .map_err(|e| Ics02Error::Beefy(Error::implementation_specific(format!("{:?}", e))))?; + let authorities = self + .store + .authority_set() + .map_err(|e| Ics02Error::Beefy(Error::implementation_specific(format!("{:?}", e))))?; + Ok(( + client_state.with_updates(mmr_state, authorities, latest_para_height), + best_cs_state, + )) + } + + fn verify_client_consensus_state( + &self, + client_state: &Self::ClientState, + height: Height, + prefix: &CommitmentPrefix, + proof: &CommitmentProofBytes, + root: &CommitmentRoot, + client_id: &ClientId, + consensus_height: Height, + expected_consensus_state: &AnyConsensusState, + ) -> Result<(), Ics02Error> { + client_state.verify_height(height)?; + + let path = ClientConsensusStatePath { + client_id: client_id.clone(), + epoch: consensus_height.revision_number, + height: consensus_height.revision_height, + }; + let value = expected_consensus_state.encode_vec().unwrap(); + verify_membership(client_state, prefix, proof, root, path, value) + } + + fn verify_connection_state( + &self, + client_state: &Self::ClientState, + height: Height, + prefix: &CommitmentPrefix, + proof: &CommitmentProofBytes, + root: &CommitmentRoot, + connection_id: &ConnectionId, + expected_connection_end: &ConnectionEnd, + ) -> Result<(), Ics02Error> { + client_state.verify_height(height)?; + + let path = ConnectionsPath(connection_id.clone()); + let value = expected_connection_end.encode_vec().unwrap(); + verify_membership(client_state, prefix, proof, root, path, value) + } + + fn verify_channel_state( + &self, + client_state: &Self::ClientState, + height: Height, + prefix: &CommitmentPrefix, + proof: &CommitmentProofBytes, + root: &CommitmentRoot, + port_id: &PortId, + channel_id: &ChannelId, + expected_channel_end: &ChannelEnd, + ) -> Result<(), Ics02Error> { + client_state.verify_height(height)?; + + let path = ChannelEndsPath(port_id.clone(), channel_id.clone()); + let value = expected_channel_end.encode_vec().unwrap(); + verify_membership(client_state, prefix, proof, root, path, value) + } + + fn verify_client_full_state( + &self, + client_state: &Self::ClientState, + height: Height, + prefix: &CommitmentPrefix, + proof: &CommitmentProofBytes, + root: &CommitmentRoot, + client_id: &ClientId, + expected_client_state: &AnyClientState, + ) -> Result<(), Ics02Error> { + client_state.verify_height(height)?; + + let path = ClientStatePath(client_id.clone()); + let value = expected_client_state.encode_vec().unwrap(); + verify_membership(client_state, prefix, proof, root, path, value) + } + + fn verify_packet_data( + &self, + ctx: &dyn ChannelReader, + client_state: &Self::ClientState, + height: Height, + connection_end: &ConnectionEnd, + proof: &CommitmentProofBytes, + root: &CommitmentRoot, + port_id: &PortId, + channel_id: &ChannelId, + sequence: Sequence, + commitment: String, + ) -> Result<(), Ics02Error> { + client_state.verify_height(height)?; + verify_delay_passed(ctx, height, connection_end)?; + + let commitment_path = CommitmentsPath { + port_id: port_id.clone(), + channel_id: channel_id.clone(), + sequence, + }; + + let mut commitment_bytes = Vec::new(); + commitment + .encode(&mut commitment_bytes) + .expect("buffer size too small"); + + verify_membership( + client_state, + connection_end.counterparty().prefix(), + proof, + root, + commitment_path, + commitment_bytes, + ) + } + + fn verify_packet_acknowledgement( + &self, + ctx: &dyn ChannelReader, + client_state: &Self::ClientState, + height: Height, + connection_end: &ConnectionEnd, + proof: &CommitmentProofBytes, + root: &CommitmentRoot, + port_id: &PortId, + channel_id: &ChannelId, + sequence: Sequence, + ack: Vec, + ) -> Result<(), Ics02Error> { + client_state.verify_height(height)?; + verify_delay_passed(ctx, height, connection_end)?; + + let ack_path = AcksPath { + port_id: port_id.clone(), + channel_id: channel_id.clone(), + sequence, + }; + verify_membership( + client_state, + connection_end.counterparty().prefix(), + proof, + root, + ack_path, + ack, + ) + } + + fn verify_next_sequence_recv( + &self, + ctx: &dyn ChannelReader, + client_state: &Self::ClientState, + height: Height, + connection_end: &ConnectionEnd, + proof: &CommitmentProofBytes, + root: &CommitmentRoot, + port_id: &PortId, + channel_id: &ChannelId, + sequence: Sequence, + ) -> Result<(), Ics02Error> { + client_state.verify_height(height)?; + verify_delay_passed(ctx, height, connection_end)?; + + let mut seq_bytes = Vec::new(); + u64::from(sequence) + .encode(&mut seq_bytes) + .expect("buffer size too small"); + + let seq_path = SeqRecvsPath(port_id.clone(), channel_id.clone()); + verify_membership( + client_state, + connection_end.counterparty().prefix(), + proof, + root, + seq_path, + seq_bytes, + ) + } + + fn verify_packet_receipt_absence( + &self, + ctx: &dyn ChannelReader, + client_state: &Self::ClientState, + height: Height, + connection_end: &ConnectionEnd, + proof: &CommitmentProofBytes, + root: &CommitmentRoot, + port_id: &PortId, + channel_id: &ChannelId, + sequence: Sequence, + ) -> Result<(), Ics02Error> { + client_state.verify_height(height)?; + verify_delay_passed(ctx, height, connection_end)?; + + let receipt_path = ReceiptsPath { + port_id: port_id.clone(), + channel_id: channel_id.clone(), + sequence, + }; + verify_non_membership( + client_state, + connection_end.counterparty().prefix(), + proof, + root, + receipt_path, + ) + } + + fn verify_upgrade_and_update_state( + &self, + _client_state: &Self::ClientState, + _consensus_state: &Self::ConsensusState, + _proof_upgrade_client: RawMerkleProof, + _proof_upgrade_consensus_state: RawMerkleProof, + ) -> Result<(Self::ClientState, Self::ConsensusState), Ics02Error> { + todo!() + } +} + +fn verify_membership( + client_state: &ClientState, + prefix: &CommitmentPrefix, + proof: &CommitmentProofBytes, + root: &CommitmentRoot, + path: impl Into, + value: Vec, +) -> Result<(), Ics02Error> { + if root.as_bytes().len() != 32 { + return Err(Ics02Error::beefy(Error::invalid_commitment_root)); + } + let path: Path = path.into(); + let path = path.to_string(); + let mut prefix = prefix.as_bytes().to_vec(); + prefix.extend_from_slice(path.as_bytes()); + let key = codec::Encode::encode(&prefix); + let trie_proof: Vec = proof.clone().into(); + let trie_proof: Vec> = codec::Decode::decode(&mut &*trie_proof) + .map_err(|e| Ics02Error::beefy(Error::scale_decode(e)))?; + let root = H256::from_slice(root.into_vec().as_slice()); + sp_trie::verify_trie_proof::, _, _, _>( + &root, + &trie_proof, + vec![&(key, Some(value))], + ) + .map_err(|e| Ics02Error::beefy(Error::ics23_error(e))) +} + +fn verify_non_membership( + _client_state: &ClientState, + prefix: &CommitmentPrefix, + proof: &CommitmentProofBytes, + root: &CommitmentRoot, + path: impl Into, +) -> Result<(), Ics02Error> { + if root.as_bytes().len() != 32 { + return Err(Ics02Error::beefy(Error::invalid_commitment_root)); + } + let path: Path = path.into(); + let path = path.to_string(); + let mut prefix = prefix.as_bytes().to_vec(); + prefix.extend_from_slice(path.as_bytes()); + let key = codec::Encode::encode(&prefix); + let trie_proof: Vec = proof.clone().into(); + let trie_proof: Vec> = codec::Decode::decode(&mut &*trie_proof) + .map_err(|e| Ics02Error::beefy(Error::scale_decode(e)))?; + let root = H256::from_slice(root.into_vec().as_slice()); + sp_trie::verify_trie_proof::, _, _, _>( + &root, + &trie_proof, + vec![&(key, None)], + ) + .map_err(|e| Ics02Error::beefy(Error::ics23_error(e))) +} + +fn verify_delay_passed( + ctx: &dyn ChannelReader, + height: Height, + connection_end: &ConnectionEnd, +) -> Result<(), Ics02Error> { + let current_timestamp = ctx.host_timestamp(); + let current_height = ctx.host_height(); + + let client_id = connection_end.client_id(); + let processed_time = ctx + .client_update_time(client_id, height) + .map_err(|_| Error::processed_time_not_found(client_id.clone(), height))?; + let processed_height = ctx + .client_update_height(client_id, height) + .map_err(|_| Error::processed_height_not_found(client_id.clone(), height))?; + + let delay_period_time = connection_end.delay_period(); + let delay_period_height = ctx.block_delay(delay_period_time); + + ClientState::verify_delay_passed( + current_timestamp, + current_height, + processed_time, + processed_height, + delay_period_time, + delay_period_height, + ) + .map_err(|e| e.into()) +} + +fn downcast_consensus_state(cs: AnyConsensusState) -> Result { + downcast!( + cs => AnyConsensusState::Beefy + ) + .ok_or_else(|| Ics02Error::client_args_type_mismatch(ClientType::Beefy)) +} diff --git a/modules/src/clients/ics11_beefy/client_state.rs b/modules/src/clients/ics11_beefy/client_state.rs index fd5240717b..41bedd421d 100644 --- a/modules/src/clients/ics11_beefy/client_state.rs +++ b/modules/src/clients/ics11_beefy/client_state.rs @@ -1,9 +1,11 @@ use crate::prelude::*; +use beefy_client::traits::{AuthoritySet, MmrState}; use beefy_primitives::mmr::BeefyNextAuthoritySet; use codec::{Decode, Encode}; use core::convert::TryFrom; -use primitive_types::H256; +use core::time::Duration; +use sp_core::H256; use sp_runtime::SaturatedConversion; use tendermint_proto::Protobuf; @@ -14,26 +16,31 @@ use crate::clients::ics11_beefy::header::BeefyHeader; use crate::core::ics02_client::client_state::AnyClientState; use crate::core::ics02_client::client_type::ClientType; use crate::core::ics24_host::identifier::ChainId; +use crate::timestamp::Timestamp; use crate::Height; pub const REVISION_NUMBER: u64 = 0; #[derive(Clone, Debug, PartialEq, Eq)] pub struct ClientState { + /// The chain id which encapsulates the para id + pub chain_id: ChainId, /// Latest mmr root hash pub mmr_root_hash: H256, /// block number for the latest mmr_root_hash - pub latest_beefy_height: Height, + pub latest_beefy_height: u32, /// Block height when the client was frozen due to a misbehaviour pub frozen_height: Option, - /// block number that the beefy protocol was activated on the relay chain. - /// This shoould be the first block in the merkle-mountain-range tree. + /// Block number that the beefy protocol was activated on the relay chain. + /// This should be the first block in the merkle-mountain-range tree. pub beefy_activation_block: u32, /// authorities for the current round pub authority: BeefyNextAuthoritySet, /// authorities for the next round pub next_authority_set: BeefyNextAuthoritySet, /// Latest parachain height - pub latest_parachain_height: Option, + pub latest_para_height: Option, + /// Parachain id + pub para_id: u32, } impl Protobuf for ClientState {} @@ -41,6 +48,7 @@ impl Protobuf for ClientState {} impl ClientState { #[allow(clippy::too_many_arguments)] pub fn new( + chain_id: ChainId, mmr_root_hash: H256, beefy_activation_block: u32, latest_beefy_height: u32, @@ -48,29 +56,77 @@ impl ClientState { next_authority_set: BeefyNextAuthoritySet, ) -> Result { // Basic validation for the latest beefy height parameter. - if latest_beefy_height < 0 { + if latest_beefy_height < 0 || latest_para_height < 0 { return Err(Error::validation( - "ClientState latest beefy height must be greater than or equal to zero".to_string(), + "ClientState latest beefy height and latest parachain height must be greater than or equal to zero".to_string(), )); } Ok(Self { + chain_id, mmr_root_hash, - latest_beefy_height: Height::new(REVISION_NUMBER, latest_beefy_height.into()), + latest_beefy_height, frozen_height: None, beefy_activation_block, authority: authority_set, next_authority_set, - latest_parachain_height: None, + latest_para_height: None, + para_id: chain_id.version().saturated_into::(), }) } - pub fn latest_beefy_height(&self) -> Height { - self.latest_beefy_height + pub fn latest_height(&self) -> Height { + self.latest_para_height.unwrap_or_default() + } + + pub fn to_leaf_index(&self, block_number: u32) -> u32 { + if self.beefy_activation_block == 0 { + return block_number - 1; + } + self.beefy_activation_block - (block_number + 1) + } + + pub fn with_updates( + &self, + mmr_state: MmrState, + authorities: AuthoritySet, + latest_para_height: Height, + ) -> Self { + let clone = self.clone(); + Self { + mmr_root_hash: mmr_state.mmr_root_hash, + latest_beefy_height: mmr_state.latest_beefy_height, + next_authority_set: authorities.next_authorities, + authority: authorities.current_authorities, + latest_para_height: Some(latest_para_height), + ..clone + } } - pub fn latest_para_height(&self) -> Height { - self.latest_parachain_height.unwrap_or_default() + /// Verify the time and height delays + pub fn verify_delay_passed( + current_time: Timestamp, + current_height: Height, + processed_time: Timestamp, + processed_height: Height, + delay_period_time: Duration, + delay_period_blocks: u64, + ) -> Result<(), Error> { + let earliest_time = + (processed_time + delay_period_time).map_err(Error::timestamp_overflow)?; + if !(current_time == earliest_time || current_time.after(&earliest_time)) { + return Err(Error::not_enough_time_elapsed(current_time, earliest_time)); + } + + let earliest_height = processed_height.add(delay_period_blocks); + if current_height < earliest_height { + return Err(Error::not_enough_blocks_elapsed( + current_height, + earliest_height, + )); + } + + Ok(()) } pub fn with_frozen_height(self, h: Height) -> Result { @@ -87,7 +143,7 @@ impl ClientState { /// Verify that the client is at a sufficient height and unfrozen at the given height pub fn verify_height(&self, height: Height) -> Result<(), Error> { - if self.latest_beefy_height < height { + if (self.latest_beefy_height as u64) < height.revision_height { return Err(Error::insufficient_height( self.latest_beefy_height(), height, @@ -110,7 +166,7 @@ impl crate::core::ics02_client::client_state::ClientState for ClientState { type UpgradeOptions = UpgradeOptions; fn chain_id(&self) -> ChainId { - Default::default() + self.chain_id.clone() } fn client_type(&self) -> ClientType { @@ -118,7 +174,7 @@ impl crate::core::ics02_client::client_state::ClientState for ClientState { } fn latest_height(&self) -> Height { - self.latest_para_height() + self.latest_height() } fn frozen_height(&self) -> Option { @@ -128,12 +184,16 @@ impl crate::core::ics02_client::client_state::ClientState for ClientState { fn upgrade( mut self, upgrade_height: Height, - upgrade_options: UpgradeOptions, - _chain_id: ChainId, + _upgrade_options: UpgradeOptions, + chain_id: ChainId, ) -> Self { self.frozen_height = None; // Upgrade the client state - self.latest_beefy_height = upgrade_height; + self.latest_beefy_height = upgrade_height.revision_height.saturated_into::(); + + self.latest_para_height = None; + + self.para_id = chain_id.version().saturated_into::(); self } @@ -147,8 +207,8 @@ impl TryFrom for ClientState { type Error = Error; fn try_from(raw: RawClientState) -> Result { + // TODO: Change Revison number to para id when chain id is added to the beefy spec let frozen_height = Some(Height::new(REVISION_NUMBER, raw.frozen_height)); - let latest_beefy_height = Height::new(REVISION_NUMBER, raw.latest_beefy_height.into()); let authority_set = raw .authority @@ -175,13 +235,16 @@ impl TryFrom for ClientState { let mmr_root_hash = H256::decode(&mut &raw.mmr_root_hash).map_err(|_| Error::decode())?; Ok(Self { + chain_id: ChainId::default(), mmr_root_hash, - latest_beefy_height, + latest_beefy_height: raw.latest_beefy_height, frozen_height, beefy_activation_block: raw.beefy_activation_block, authority: authority_set, next_authority_set, - latest_parachain_height: None, + latest_para_height: None, + // TODO Para Id should be added to the client state spec + para_id: ChainId::default().version().saturated_into::(), }) } } @@ -190,10 +253,7 @@ impl From for RawClientState { fn from(client_state: ClientState) -> Self { RawClientState { mmr_root_hash: client_state.mmr_root_hash.encode(), - latest_beefy_height: client_state - .latest_beefy_height - .revision_height - .saturated_into::(), + latest_beefy_height: client_state.latest_beefy_height, frozen_height: client_state .frozen_height .unwrap_or_default() diff --git a/modules/src/clients/ics11_beefy/consensus_state.rs b/modules/src/clients/ics11_beefy/consensus_state.rs index 966fc12326..fb7e1bffb0 100644 --- a/modules/src/clients/ics11_beefy/consensus_state.rs +++ b/modules/src/clients/ics11_beefy/consensus_state.rs @@ -19,11 +19,13 @@ use ibc_proto::ibc::lightclients::beefy::v1::{ use crate::clients::ics11_beefy::error::Error; use crate::clients::ics11_beefy::header::{ - decode_parachain_header, merge_leaf_version, split_leaf_version, ParachainHeader, + decode_parachain_header, decode_timestamp_extrinsic, merge_leaf_version, split_leaf_version, + ParachainHeader, }; use crate::core::ics02_client::client_consensus::AnyConsensusState; use crate::core::ics02_client::client_type::ClientType; use crate::core::ics23_commitment::commitment::CommitmentRoot; +use crate::timestamp::Timestamp; pub const IBC_CONSENSUS_ID: [u8; 4] = *b"/IBC"; #[derive(Clone, Debug, PartialEq, Eq)] @@ -131,8 +133,6 @@ impl TryFrom for ConsensusState { impl From for RawConsensusState { fn from(value: ConsensusState) -> Self { - // FIXME: shunts like this are necessary due to - // https://github.com/informalsystems/tendermint-rs/issues/1053 let tpb::Timestamp { seconds, nanos } = value.timestamp.into(); let timestamp = ibc_proto::google::protobuf::Timestamp { seconds, nanos }; @@ -206,38 +206,18 @@ impl From for ConsensusState { .unwrap_or_default() }; + let timestamp = decode_timestamp_extrinsic(&header).unwrap_or_default(); + let duration = core::time::Duration::from_millis(timestamp); + let timestamp = Timestamp::from_nanoseconds(duration.as_nanos().saturated_into::()) + .unwrap_or_default(); + Self { root, - timestamp: header.time, - next_validators_hash: header.next_validators_hash, + timestamp: timestamp.into(), + parachain_header: header, } } } -impl From
for ConsensusState { - fn from(header: Header) -> Self { - Self::from(header.signed_header.header) - } -} - #[cfg(test)] -mod tests { - use tendermint_rpc::endpoint::abci_query::AbciQuery; - use test_log::test; - - use crate::test::test_serialization_roundtrip; - - #[test] - fn serialization_roundtrip_no_proof() { - let json_data = - include_str!("../../../tests/support/query/serialization/consensus_state.json"); - test_serialization_roundtrip::(json_data); - } - - #[test] - fn serialization_roundtrip_with_proof() { - let json_data = - include_str!("../../../tests/support/query/serialization/consensus_state_proof.json"); - test_serialization_roundtrip::(json_data); - } -} +mod tests {} diff --git a/modules/src/clients/ics11_beefy/error.rs b/modules/src/clients/ics11_beefy/error.rs index 66257af1bc..126de42051 100644 --- a/modules/src/clients/ics11_beefy/error.rs +++ b/modules/src/clients/ics11_beefy/error.rs @@ -7,6 +7,7 @@ use crate::core::ics24_host::error::ValidationError; use crate::core::ics24_host::identifier::ClientId; use crate::timestamp::{Timestamp, TimestampOverflowError}; use beefy_client::error::BeefyClientError; +use codec::Error as ScaleCodecError; use sp_core::H256; use crate::Height; @@ -16,11 +17,19 @@ define_error! { Error { InvalidAddress |_| { "invalid address" }, - + InvalidMmrUpdate + { reason: String } + |e| { "invalid address {}", e.reason }, + InvalidCommitmentRoot + |_| { "invalid commitment root" } + TimestampExtrinsic + |_| { "error decoding timestamp extrinsic" }, InvalidHeader { reason: String } |e| { format_args!("invalid header, failed basic validation: {}", e.reason) }, - + ImplementationSpecific + { reason: String } + |e| { format_args!("Implementation specific error: {}", e.reason) }, Validation { reason: String } |e| { format_args!("invalid header, failed basic validation: {}", e.reason) }, @@ -68,6 +77,9 @@ define_error! { { reason: String } | e | { format_args!("invalid raw misbehaviour: {}", e.reason) }, + ScaleDecode + [ TraceError ] + | _ | { "Scale decode error" }, Decode [ TraceError ] | _ | { "decode error" }, @@ -81,15 +93,6 @@ define_error! { format_args!("header timestamp {0} must be greater than current client consensus state timestamp {1}", e.low, e.high) }, - HeaderTimestampOutsideTrustingTime - { - low: String, - high: String - } - | e | { - format_args!("header timestamp {0} is outside the trusting period w.r.t. consensus state timestamp {1}", e.low, e.high) - }, - HeaderTimestampTooHigh { actual: String, diff --git a/modules/src/clients/ics11_beefy/header.rs b/modules/src/clients/ics11_beefy/header.rs index 4b02683d8b..b193dca8db 100644 --- a/modules/src/clients/ics11_beefy/header.rs +++ b/modules/src/clients/ics11_beefy/header.rs @@ -30,10 +30,10 @@ use ibc_proto::ibc::lightclients::beefy::v1::{ }; use pallet_mmr_primitives::{BatchProof, Proof}; use sp_core::H256; -use sp_runtime::generic::Header as SubstrateHeader; +use sp_runtime::generic::{Header as SubstrateHeader, UncheckedExtrinsic}; use sp_runtime::traits::{BlakeTwo256, SaturatedConversion}; use sp_runtime::Digest; -use sp_trie::TrieDBMut; +use sp_trie::{StorageProof, Trie, TrieDBMut}; /// Beefy consensus header #[derive(Clone, PartialEq, Eq)] @@ -46,7 +46,7 @@ pub struct BeefyHeader { #[derive(Clone, PartialEq, Eq, codec::Encode, codec::Decode)] pub struct ParachainHeader { - pub parachain_header: SubstrateHeader, + pub parachain_header: SubstrateHeader, /// Reconstructed mmr leaf pub partial_mmr_leaf: PartialMmrLeaf, /// parachain id @@ -381,7 +381,9 @@ impl From for RawBeefyHeader { impl Protobuf for BeefyHeader {} -pub fn decode_parachain_header(raw_header: Vec) -> Result, Error> { +pub fn decode_parachain_header( + raw_header: Vec, +) -> Result, Error> { SubstrateHeader::decode(&mut &*raw_header) .map_err(|_| Error::invalid_header("failed to decode parachain header")) } @@ -392,11 +394,24 @@ pub fn decode_header(buf: B) -> Result { .try_into() } -pub fn decode_timestamp_extrinsic(header: ParachainHeader) -> Result { - let mut db = sp_trie::MemoryDB::::default(); - let mut root = Default::default(); +pub fn decode_timestamp_extrinsic(header: &ParachainHeader) -> Result { + let proof = header.extrinsic_proof.clone(); + let extrinsic_root = header.parachain_header.extrinsics_root; + let db = StorageProof::new(proof).into_memory_db::(); + let trie = + sp_trie::TrieDB::>::new(&db, &extrinsic_root).unwrap(); + // Timestamp extrinsic should be the first inherent and hence the first extrinsic + let key = codec::Compact(0u32).encode(); + let ext_bytes = trie + .get(&key) + .map_err(|_| Error::timestamp_extrinsic())? + .ok_or(Error::timestamp_extrinsic())?; - Ok(0) + // Decoding from the [2..] because the timestamp inmherent has two extra bytes before the call that represents the + // call length and the extrinsic version. + let (_, _, timestamp): (u8, u8, codec::Compact) = + codec::Decode::decode(&mut &*ext_bytes[2..]).map_err(|_| Error::timestamp_extrinsic())?; + Ok(timestamp.into()) } #[cfg(test)] diff --git a/modules/src/core/ics02_client/error.rs b/modules/src/core/ics02_client/error.rs index 568a0b5959..21f3bb339c 100644 --- a/modules/src/core/ics02_client/error.rs +++ b/modules/src/core/ics02_client/error.rs @@ -180,9 +180,9 @@ define_error! { [ Ics07Error ] | _ | { "tendermint error" }, - Beefy + Beefy [ Ics011Error ] - | _ | { "tendermint error" }, + | _ | { "Beefy error" }, InvalidPacketTimestamp [ crate::timestamp::ParseTimestampError ] From f08b49b22f8a14190e889a32386ab5e6a8bb97f9 Mon Sep 17 00:00:00 2001 From: David Salami Date: Wed, 6 Apr 2022 11:30:12 +0100 Subject: [PATCH 03/96] fix --- modules/src/clients/ics11_beefy/client_def.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/modules/src/clients/ics11_beefy/client_def.rs b/modules/src/clients/ics11_beefy/client_def.rs index 96c894c24c..e28e69fd0f 100644 --- a/modules/src/clients/ics11_beefy/client_def.rs +++ b/modules/src/clients/ics11_beefy/client_def.rs @@ -40,12 +40,14 @@ use crate::core::ics24_host::path::{ }; use crate::downcast; +pub trait BeefyLCStore: StorageRead + StorageWrite + Clone {} + #[derive(Clone, Debug, Default, PartialEq, Eq)] -pub struct BeefyClient { +pub struct BeefyClient { store: Store, } -impl ClientDef for BeefyClient { +impl ClientDef for BeefyClient { type Header = BeefyHeader; type ClientState = ClientState; type ConsensusState = ConsensusState; @@ -439,7 +441,7 @@ impl ClientDef for BeefyClient } fn verify_membership( - client_state: &ClientState, + _client_state: &ClientState, prefix: &CommitmentPrefix, proof: &CommitmentProofBytes, root: &CommitmentRoot, From a906dfa78a6e862bf5894f8bd08ca0ea20fb5be2 Mon Sep 17 00:00:00 2001 From: David Salami Date: Wed, 6 Apr 2022 13:35:32 +0100 Subject: [PATCH 04/96] client state creation validation --- .../src/clients/ics11_beefy/client_state.rs | 25 ++++++++++++++++--- 1 file changed, 21 insertions(+), 4 deletions(-) diff --git a/modules/src/clients/ics11_beefy/client_state.rs b/modules/src/clients/ics11_beefy/client_state.rs index 41bedd421d..e83aa336e9 100644 --- a/modules/src/clients/ics11_beefy/client_state.rs +++ b/modules/src/clients/ics11_beefy/client_state.rs @@ -55,13 +55,30 @@ impl ClientState { authority_set: BeefyNextAuthoritySet, next_authority_set: BeefyNextAuthoritySet, ) -> Result { - // Basic validation for the latest beefy height parameter. - if latest_beefy_height < 0 || latest_para_height < 0 { + if chain_id.version() <= 0 { + return Err(Error::validation( + "ClientState Chain id version must be the parachain id which cannot be less or equal to zero ".to_string(), + )); + } + + if latest_beefy_height < 0 { return Err(Error::validation( "ClientState latest beefy height and latest parachain height must be greater than or equal to zero".to_string(), )); } + if beefy_activation_block > latest_beefy_height { + return Err(Error::validation( + "ClientState beefy activation block cannot be greater than latest_beefy_height".to_string(), + )); + } + + if authority_set.id >= next_authority_set.id { + return Err(Error::validation( + "ClientState next authority set id must be greater than current authority set id".to_string(), + )); + } + Ok(Self { chain_id, mmr_root_hash, @@ -143,9 +160,9 @@ impl ClientState { /// Verify that the client is at a sufficient height and unfrozen at the given height pub fn verify_height(&self, height: Height) -> Result<(), Error> { - if (self.latest_beefy_height as u64) < height.revision_height { + if (self.latest_height() as u64) < height.revision_height { return Err(Error::insufficient_height( - self.latest_beefy_height(), + self.latest_height(), height, )); } From e191a944a15968d1c2131baa0899118360945e4f Mon Sep 17 00:00:00 2001 From: David Salami Date: Tue, 10 May 2022 21:22:08 +0100 Subject: [PATCH 05/96] initial refactor --- Cargo.lock | 4151 ++++++++++++----- Cargo.toml | 8 +- .../clients/ics07_tendermint/client_def.rs | 98 +- .../src/clients/ics07_tendermint/header.rs | 12 +- modules/src/clients/ics11_beefy/client_def.rs | 365 +- .../src/clients/ics11_beefy/client_state.rs | 61 +- .../clients/ics11_beefy/consensus_state.rs | 95 +- modules/src/clients/ics11_beefy/error.rs | 34 +- modules/src/clients/ics11_beefy/header.rs | 199 +- .../src/core/ics02_client/client_consensus.rs | 1 - modules/src/core/ics02_client/client_def.rs | 451 +- modules/src/core/ics02_client/client_state.rs | 25 +- modules/src/core/ics02_client/context.rs | 81 +- modules/src/core/ics02_client/error.rs | 9 +- modules/src/core/ics02_client/handler.rs | 8 +- .../ics02_client/handler/create_client.rs | 2 +- .../ics02_client/handler/update_client.rs | 92 +- .../ics02_client/handler/upgrade_client.rs | 12 +- modules/src/core/ics02_client/header.rs | 36 +- .../core/ics02_client/msgs/create_client.rs | 31 +- .../core/ics02_client/msgs/upgrade_client.rs | 35 +- modules/src/core/ics03_connection/handler.rs | 10 +- .../ics03_connection/handler/conn_open_ack.rs | 5 +- .../handler/conn_open_confirm.rs | 5 +- .../ics03_connection/handler/conn_open_try.rs | 5 +- .../core/ics03_connection/handler/verify.rs | 26 +- modules/src/core/ics04_channel/handler.rs | 23 +- .../ics04_channel/handler/acknowledgement.rs | 5 +- .../handler/chan_close_confirm.rs | 5 +- .../ics04_channel/handler/chan_open_ack.rs | 5 +- .../handler/chan_open_confirm.rs | 5 +- .../ics04_channel/handler/chan_open_try.rs | 5 +- .../core/ics04_channel/handler/recv_packet.rs | 8 +- .../src/core/ics04_channel/handler/timeout.rs | 10 +- .../ics04_channel/handler/timeout_on_close.rs | 9 +- .../src/core/ics04_channel/handler/verify.rs | 21 +- modules/src/core/ics24_host/identifier.rs | 2 +- modules/src/core/ics26_routing/handler.rs | 23 +- modules/src/mock/client_def.rs | 44 +- modules/src/relayer/ics18_relayer/utils.rs | 2 +- proto-compiler/src/cmd/clone.rs | 2 +- proto/src/IBC_GO_COMMIT | 2 +- scripts/sync-protobuf.sh | 2 +- 43 files changed, 4058 insertions(+), 1972 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index bcd590f5c2..29df546011 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5,6 +5,7 @@ version = 3 [[package]] name = "Inflector" version = "0.11.4" +<<<<<<< HEAD source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fe438c63458706e03479442743baae6c88256498e6431708f6dfc520a26515d3" dependencies = [ @@ -15,18 +16,14 @@ dependencies = [ [[package]] name = "abscissa_core" version = "0.6.0" +======= +>>>>>>> e05319ec (initial refactor) source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6750843603bf31a83accd3c8177f9dbf53a7d64275688fc7371e0a4d9f8628b5" +checksum = "fe438c63458706e03479442743baae6c88256498e6431708f6dfc520a26515d3" dependencies = [ - "abscissa_derive", - "arc-swap", - "backtrace", - "canonical-path", - "clap", - "color-eyre", - "fs-err", - "once_cell", + "lazy_static", "regex", +<<<<<<< HEAD "secrecy", "semver", "serde", @@ -49,6 +46,8 @@ dependencies = [ "quote", "syn", "synstructure", +======= +>>>>>>> e05319ec (initial refactor) ] [[package]] @@ -67,10 +66,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" [[package]] -name = "adler32" -version = "1.2.0" +name = "ahash" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aae1277d39aeec15cb388266ecc24b11c80469deae6067e17a1a7aa9e5c1f234" +checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" +dependencies = [ + "getrandom 0.2.6", + "once_cell", + "version_check", +] [[package]] name = "ahash" @@ -92,21 +96,6 @@ dependencies = [ "memchr", ] -[[package]] -name = "alloc-no-stdlib" -version = "2.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35ef4730490ad1c4eae5c4325b2a95f521d023e5c885853ff7aca0a6a1631db3" - -[[package]] -name = "alloc-stdlib" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "697ed7edc0f1711de49ce108c541623a0af97c6c60b2f6e2b65229847ac843c2" -dependencies = [ - "alloc-no-stdlib", -] - [[package]] name = "ansi_term" version = "0.12.1" @@ -132,18 +121,26 @@ dependencies = [ ] [[package]] +<<<<<<< HEAD name = "arc-swap" version = "1.5.0" +======= +name = "arrayref" +version = "0.3.6" +>>>>>>> e05319ec (initial refactor) source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5d78ce20460b82d3fa150275ed9d55e21064fc7951177baacf86a145c4a4b1f" +checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" [[package]] +<<<<<<< HEAD name = "arrayref" version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" [[package]] +======= +>>>>>>> e05319ec (initial refactor) name = "arrayvec" version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -161,14 +158,17 @@ checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" [[package]] name = "arrayvec" version = "0.7.2" +<<<<<<< HEAD source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6" [[package]] name = "ascii" version = "1.0.0" +======= +>>>>>>> e05319ec (initial refactor) source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbf56136a5198c7b01a49e3afcbef6cf84597273d298f54432926024107b0109" +checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6" [[package]] name = "async-stream" @@ -294,10 +294,10 @@ dependencies = [ ] [[package]] -name = "base16ct" -version = "0.1.1" +name = "base58" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "349a06037c7bf932dd7e7d1f653678b2038b9ad46a74102f1fc7bd7872678cce" +checksum = "6107fe1be6682a68940da878d9e9f5e90ca5745b3dec9fd1bb393c8777d4f581" [[package]] name = "base58" @@ -312,18 +312,49 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" [[package]] -name = "base64ct" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dea908e7347a8c64e378c17e30ef880ad73e3b4498346b055c2c00ea342f3179" +name = "beefy-generic-client" +version = "0.1.0" +source = "git+https://github.com/ComposableFi/beefy-client?branch=master#9a79987cdb3e1b9f90d4151325521608e4ebb506" +dependencies = [ + "beefy-primitives", + "pallet-beefy-mmr", + "pallet-mmr", + "pallet-mmr-primitives", + "parity-scale-codec", + "rs_merkle", + "sp-core", + "sp-core-hashing", + "sp-runtime", + "sp-std 4.0.0", +] [[package]] -name = "bech32" -version = "0.8.1" +name = "beefy-merkle-tree" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[package]] +name = "beefy-primitives" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +dependencies = [ + "parity-scale-codec", + "scale-info", + "sp-api", + "sp-application-crypto", + "sp-core", + "sp-runtime", + "sp-std 4.0.0", +] + +[[package]] +name = "bitflags" +version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf9ff0bbfd639f15c74af777d81383cf53efb7c93613f6cab67c6c11e05bbf8b" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] +<<<<<<< HEAD name = "bech32" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -339,22 +370,40 @@ dependencies = [ "bitcoin_hashes", "secp256k1", "serde", +======= +name = "bitvec" +version = "0.20.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7774144344a4faa177370406a7ff5f1da24303817368584c6206c8303eb07848" +dependencies = [ + "funty", + "radium", + "tap", + "wyz", +>>>>>>> e05319ec (initial refactor) ] [[package]] -name = "bitcoin_hashes" -version = "0.10.0" +name = "blake2-rfc" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "006cc91e1a1d99819bc5b8214be3555c1f0611b169f527a1fdc54ed1f2b745b0" +checksum = "5d6d530bdd2d52966a6d03b7a964add7ae1a288d25214066fd4b600f0f796400" dependencies = [ - "serde", + "arrayvec 0.4.12", + "constant_time_eq", ] [[package]] -name = "bitflags" -version = "1.3.2" +name = "block-buffer" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" +checksum = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" +dependencies = [ + "block-padding 0.1.5", + "byte-tools", + "byteorder", + "generic-array 0.12.4", +] [[package]] name = "bitvec" @@ -407,6 +456,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0bf7fe51849ea569fd452f37822f606a5cabb684dc918707a0193fd4664ff324" dependencies = [ "generic-array 0.14.5", +<<<<<<< HEAD ] [[package]] @@ -416,44 +466,24 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fa79dedbb091f449f1f39e53edf88d5dbe95f895dae6135a8d7b881fb5af73f5" dependencies = [ "byte-tools", +======= +>>>>>>> e05319ec (initial refactor) ] [[package]] name = "block-padding" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" - -[[package]] -name = "brotli" -version = "3.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1a0b1dbcc8ae29329621f8d4f0d835787c1c38bb1401979b49d13b0b305ff68" -dependencies = [ - "alloc-no-stdlib", - "alloc-stdlib", - "brotli-decompressor", -] - -[[package]] -name = "brotli-decompressor" -version = "2.3.2" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59ad2d4653bf5ca36ae797b1f4bb4dbddb60ce49ca4aed8a2ce4829f60425b80" +checksum = "fa79dedbb091f449f1f39e53edf88d5dbe95f895dae6135a8d7b881fb5af73f5" dependencies = [ - "alloc-no-stdlib", - "alloc-stdlib", + "byte-tools", ] [[package]] -name = "buf_redux" -version = "0.8.4" +name = "block-padding" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b953a6887648bb07a535631f2bc00fbdb2a2216f135552cb3f534ed136b9c07f" -dependencies = [ - "memchr", - "safemem", -] +checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" [[package]] name = "bumpalo" @@ -470,14 +500,17 @@ checksum = "87c5fdd0166095e1d463fc6cc01aa8ce547ad77a4e84d42eb6762b084e28067e" [[package]] name = "byte-tools" version = "0.3.1" +<<<<<<< HEAD source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" [[package]] name = "bytecount" version = "0.6.2" +======= +>>>>>>> e05319ec (initial refactor) source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72feb31ffc86498dacdbd0fcebb56138e7177a8cc5cea4516031d15ae85a742e" +checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" [[package]] name = "byteorder" @@ -492,6 +525,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4872d67bab6358e59559027aa3b9157c53d9358c51423c17554809a8858e0f8" [[package]] +<<<<<<< HEAD name = "bzip2" version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -550,6 +584,8 @@ dependencies = [ ] [[package]] +======= +>>>>>>> e05319ec (initial refactor) name = "cc" version = "1.0.73" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -643,54 +679,13 @@ dependencies = [ ] [[package]] -name = "color-eyre" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ebf286c900a6d5867aeff75cfee3192857bb7f24b547d4f0df2ed6baa812c90" -dependencies = [ - "backtrace", - "color-spantrace", - "eyre", - "indenter", - "once_cell", - "owo-colors", - "tracing-error", -] - -[[package]] -name = "color-spantrace" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ba75b3d9449ecdccb27ecbc479fdc0b87fa2dd43d2f8298f9bf0e59aacc8dce" -dependencies = [ - "once_cell", - "owo-colors", - "tracing-core", - "tracing-error", -] - -[[package]] -name = "console" -version = "0.15.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a28b32d32ca44b70c3e4acd7db1babf555fa026e385fb95f18028f88848b3c31" -dependencies = [ - "encode_unicode", - "libc", - "once_cell", - "regex", - "terminal_size", - "unicode-width", - "winapi", -] - -[[package]] -name = "const-oid" -version = "0.7.1" +name = "constant_time_eq" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4c78c047431fee22c1a7bb92e00ad095a02a983affe4d8a72e2a2c62c1b94f3" +checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" [[package]] +<<<<<<< HEAD name = "constant_time_eq" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -699,13 +694,12 @@ checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" [[package]] name = "contracts" version = "0.6.3" +======= +name = "convert_case" +version = "0.4.0" +>>>>>>> e05319ec (initial refactor) source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1d1429e3bd78171c65aa010eabcdf8f863ba3254728dbfb0ad4b1545beac15c" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] +checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" [[package]] name = "convert_case" @@ -747,16 +741,6 @@ dependencies = [ "cfg-if 1.0.0", ] -[[package]] -name = "crossbeam-channel" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b153fe7cbef478c567df0f972e02e6d736db11affe43dfc9c56a9374d1adfb87" -dependencies = [ - "crossbeam-utils 0.7.2", - "maybe-uninit", -] - [[package]] name = "crossbeam-channel" version = "0.5.4" @@ -764,7 +748,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5aaa7bd5fb665c6864b5f963dd9097905c54125909c7aa94c9e18507cdbe6c53" dependencies = [ "cfg-if 1.0.0", - "crossbeam-utils 0.8.8", + "crossbeam-utils", ] [[package]] @@ -774,23 +758,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6455c0ca19f0d2fbf751b908d5c55c1f5cbc65e03c4225427254b46890bdde1e" dependencies = [ "cfg-if 1.0.0", - "crossbeam-epoch 0.9.8", - "crossbeam-utils 0.8.8", -] - -[[package]] -name = "crossbeam-epoch" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "058ed274caafc1f60c4997b5fc07bf7dc7cca454af7c6e81edffe5f33f70dace" -dependencies = [ - "autocfg", - "cfg-if 0.1.10", - "crossbeam-utils 0.7.2", - "lazy_static", - "maybe-uninit", - "memoffset 0.5.6", - "scopeguard", + "crossbeam-epoch", + "crossbeam-utils", ] [[package]] @@ -801,23 +770,12 @@ checksum = "1145cf131a2c6ba0615079ab6a638f7e1973ac9c2634fcbeaaad6114246efe8c" dependencies = [ "autocfg", "cfg-if 1.0.0", - "crossbeam-utils 0.8.8", + "crossbeam-utils", "lazy_static", - "memoffset 0.6.5", + "memoffset", "scopeguard", ] -[[package]] -name = "crossbeam-utils" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8" -dependencies = [ - "autocfg", - "cfg-if 0.1.10", - "lazy_static", -] - [[package]] name = "crossbeam-utils" version = "0.8.8" @@ -835,6 +793,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" [[package]] +<<<<<<< HEAD name = "crypto-bigint" version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -847,6 +806,8 @@ dependencies = [ ] [[package]] +======= +>>>>>>> e05319ec (initial refactor) name = "crypto-common" version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -888,6 +849,7 @@ dependencies = [ [[package]] name = "curve25519-dalek" version = "2.1.3" +<<<<<<< HEAD source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4a9b85542f99a2dfa2a1b8e192662741c9859a846b296bef1c92ef9b58b5a216" dependencies = [ @@ -901,43 +863,29 @@ dependencies = [ [[package]] name = "curve25519-dalek" version = "3.2.0" +======= +>>>>>>> e05319ec (initial refactor) source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b9fdf9972b2bd6af2d913799d9ebc165ea4d2e65878e329d9c6b372c4491b61" +checksum = "4a9b85542f99a2dfa2a1b8e192662741c9859a846b296bef1c92ef9b58b5a216" dependencies = [ "byteorder", - "digest 0.9.0", + "digest 0.8.1", "rand_core 0.5.1", "subtle", "zeroize", ] [[package]] -name = "dashmap" -version = "4.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e77a43b28d0668df09411cb0bc9a8c2adc40f9a048afe863e05fd43251e8e39c" -dependencies = [ - "cfg-if 1.0.0", - "num_cpus", -] - -[[package]] -name = "deflate" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f95bf05dffba6e6cce8dfbb30def788154949ccd9aed761b472119c21e01c70" -dependencies = [ - "adler32", - "gzip-header", -] - -[[package]] -name = "der" -version = "0.5.1" +name = "curve25519-dalek" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6919815d73839e7ad218de758883aae3a257ba6759ce7a9992501efbb53d705c" +checksum = "0b9fdf9972b2bd6af2d913799d9ebc165ea4d2e65878e329d9c6b372c4491b61" dependencies = [ - "const-oid", + "byteorder", + "digest 0.9.0", + "rand_core 0.5.1", + "subtle", + "zeroize", ] [[package]] @@ -954,14 +902,19 @@ dependencies = [ ] [[package]] +<<<<<<< HEAD name = "dialoguer" version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d8c8ae48e400addc32a8710c8d62d55cb84249a7d58ac4cd959daecfbaddc545" +======= +name = "digest" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3d0c8c8752312f9713efd397ff63acb9f85585afbf179282e720e7704954dd5" +>>>>>>> e05319ec (initial refactor) dependencies = [ - "console", - "tempfile", - "zeroize", + "generic-array 0.12.4", ] [[package]] @@ -1001,16 +954,6 @@ dependencies = [ "dirs-sys", ] -[[package]] -name = "dirs-next" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b98cf8ebf19c3d1b223e151f99a4f9f0690dca41414773390fc824184ac833e1" -dependencies = [ - "cfg-if 1.0.0", - "dirs-sys-next", -] - [[package]] name = "dirs-sys" version = "0.3.7" @@ -1023,17 +966,13 @@ dependencies = [ ] [[package]] -name = "dirs-sys-next" -version = "0.1.2" +name = "downcast-rs" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ebda144c4fe02d1f7ea1a7d9641b6fc6b580adcfa024ae48797ecdeb6825b4d" -dependencies = [ - "libc", - "redox_users", - "winapi", -] +checksum = "9ea835d29036a4087793836fa931b08837ad5e957da9e23886b29586fb9b6650" [[package]] +<<<<<<< HEAD name = "downcast-rs" version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -1063,25 +1002,38 @@ dependencies = [ [[package]] name = "dyn-clone" version = "1.0.5" +======= +name = "dyn-clonable" +version = "0.9.0" +>>>>>>> e05319ec (initial refactor) source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21e50f3adc76d6a43f5ed73b698a87d0760ca74617f60f7c3b879003536fdd28" +checksum = "4e9232f0e607a262ceb9bd5141a3dfb3e4db6994b31989bbfd845878cba59fd4" +dependencies = [ + "dyn-clonable-impl", + "dyn-clone", +] [[package]] -name = "ecdsa" -version = "0.13.4" +name = "dyn-clonable-impl" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0d69ae62e0ce582d56380743515fefaf1a8c70cec685d9677636d7e30ae9dc9" +checksum = "558e40ea573c374cf53507fd240b7ee2f5477df7cfebdb97323ec61c719399c5" dependencies = [ - "der", - "elliptic-curve", - "rfc6979", - "signature", + "proc-macro2", + "quote", + "syn", ] [[package]] -name = "ed25519" -version = "1.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" +name = "dyn-clone" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21e50f3adc76d6a43f5ed73b698a87d0760ca74617f60f7c3b879003536fdd28" + +[[package]] +name = "ed25519" +version = "1.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e9c280362032ea4203659fc489832d0204ef09f247a0506f170dafcac08c369" dependencies = [ "signature", @@ -1108,6 +1060,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" [[package]] +<<<<<<< HEAD name = "elliptic-curve" version = "0.11.12" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -1132,6 +1085,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" [[package]] +======= +>>>>>>> e05319ec (initial refactor) name = "env_logger" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -1147,17 +1102,17 @@ dependencies = [ [[package]] name = "environmental" version = "1.1.3" +<<<<<<< HEAD source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68b91989ae21441195d7d9b9993a2f9295c7e1a8c96255d8b729accddc124797" [[package]] name = "error-chain" version = "0.12.4" +======= +>>>>>>> e05319ec (initial refactor) source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d2f06b9cac1506ece98fe3231e3cc9c4410ec3d5b1f24ae1c8946f0742cdefc" -dependencies = [ - "version_check", -] +checksum = "68b91989ae21441195d7d9b9993a2f9295c7e1a8c96255d8b729accddc124797" [[package]] name = "eyre" @@ -1185,6 +1140,7 @@ dependencies = [ ] [[package]] +<<<<<<< HEAD name = "ff" version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -1197,13 +1153,17 @@ dependencies = [ [[package]] name = "filetime" version = "0.2.16" +======= +name = "fixed-hash" +version = "0.7.0" +>>>>>>> e05319ec (initial refactor) source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0408e2626025178a6a7f7ffc05a25bc47103229f19c113755de7bf63816290c" +checksum = "cfcf0ed7fe52a17a03854ec54a9f76d6d84508d1c0e66bc1793301c73fc8493c" dependencies = [ - "cfg-if 1.0.0", - "libc", - "redox_syscall", - "winapi", + "byteorder", + "rand 0.8.5", + "rustc-hex", + "static_assertions", ] [[package]] @@ -1236,7 +1196,6 @@ version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c606d892c9de11507fa0dcffc116434f94e105d0bbdc4e405b61519464c49d7b" dependencies = [ - "anyhow", "eyre", "paste", ] @@ -1277,6 +1236,107 @@ dependencies = [ "sp-std 4.0.0", "sp-storage", ] +<<<<<<< HEAD +======= + +[[package]] +name = "frame-metadata" +version = "14.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37ed5e5c346de62ca5c184b4325a6600d1eaca210666e4606fe4e449574978d0" +dependencies = [ + "cfg-if 1.0.0", + "parity-scale-codec", + "scale-info", + "serde", +] + +[[package]] +name = "frame-support" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +dependencies = [ + "bitflags", + "frame-metadata", + "frame-support-procedural", + "impl-trait-for-tuples", + "log", + "once_cell", + "parity-scale-codec", + "paste", + "scale-info", + "serde", + "smallvec", + "sp-arithmetic", + "sp-core", + "sp-core-hashing-proc-macro", + "sp-inherents", + "sp-io", + "sp-runtime", + "sp-staking", + "sp-state-machine", + "sp-std 4.0.0", + "sp-tracing", + "tt-call", +] + +[[package]] +name = "frame-support-procedural" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +dependencies = [ + "Inflector", + "frame-support-procedural-tools", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "frame-support-procedural-tools" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +dependencies = [ + "frame-support-procedural-tools-derive", + "proc-macro-crate", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "frame-support-procedural-tools-derive" +version = "3.0.0" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "frame-system" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +dependencies = [ + "frame-support", + "log", + "parity-scale-codec", + "scale-info", + "serde", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std 4.0.0", + "sp-version", +] + +[[package]] +name = "funty" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fed34cd105917e91daa4da6b3728c47b068749d6a62c59811f06ed2ac71d9da7" +>>>>>>> e05319ec (initial refactor) [[package]] name = "frame-metadata" @@ -1529,23 +1589,6 @@ version = "0.26.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78cc372d058dcf6d5ecd98510e7fbc9e5aec4d21de70f65fea8fecebcd881bd4" -[[package]] -name = "glob" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" - -[[package]] -name = "group" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc5ac374b108929de78460075f3dc439fa66df9d8fc77e8f12caa5165fcf0c89" -dependencies = [ - "ff", - "rand_core 0.6.3", - "subtle", -] - [[package]] name = "gumdrop" version = "0.8.1" @@ -1566,15 +1609,6 @@ dependencies = [ "syn", ] -[[package]] -name = "gzip-header" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0131feb3d3bb2a5a238d8a4d09f6353b7ebfdc52e77bccbf4ea6eaa751dde639" -dependencies = [ - "crc32fast", -] - [[package]] name = "h2" version = "0.3.13" @@ -1595,10 +1629,19 @@ dependencies = [ ] [[package]] -name = "half" -version = "1.8.2" +name = "hash-db" +version = "0.15.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d23bd4e7b5eda0d0f3a307e8b381fdc8ba9000f26fbe912250c0a4cc3956364a" + +[[package]] +name = "hash256-std-hasher" +version = "0.15.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7" +checksum = "92c171d55b98633f4ed3860808f004099b36c1cc29c42cfc53aa8591b21efcf2" +dependencies = [ + "crunchy", +] [[package]] name = "hash-db" @@ -1623,6 +1666,7 @@ checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" dependencies = [ "ahash", ] +<<<<<<< HEAD [[package]] name = "hashbrown" @@ -1632,14 +1676,16 @@ checksum = "8c21d40587b92fa6a6c6e3c1bdbf87d75511db5672f9c93175574b3a00df1758" dependencies = [ "ahash", ] +======= +>>>>>>> e05319ec (initial refactor) [[package]] -name = "hdpath" -version = "0.6.0" +name = "hashbrown" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72adf5a17a0952ecfcddf8d46d071271d5ee52e78443f07ba0b2dcfe3063a132" +checksum = "db0d4cf898abf0081f964436dc980e96670a0f36863e4b83aaacdb65c9d7ccc3" dependencies = [ - "byteorder", + "ahash", ] [[package]] @@ -1765,16 +1811,6 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" -[[package]] -name = "humantime-serde" -version = "1.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57a3db5ea5923d99402c94e9feb261dc5ee9b4efa158b0315f788cf549cc200c" -dependencies = [ - "humantime", - "serde", -] - [[package]] name = "hyper" version = "0.14.18" @@ -1883,6 +1919,7 @@ dependencies = [ "test-log", "time 0.3.9", "tracing", +<<<<<<< HEAD "tracing-subscriber", "uint", ] @@ -1903,6 +1940,9 @@ dependencies = [ "tendermint", "tendermint-rpc", "time 0.3.9", +======= + "tracing-subscriber 0.3.11", +>>>>>>> e05319ec (initial refactor) ] [[package]] @@ -1920,6 +1960,7 @@ dependencies = [ ] [[package]] +<<<<<<< HEAD name = "ibc-relayer" version = "0.15.0" dependencies = [ @@ -1927,14 +1968,17 @@ dependencies = [ "async-stream", "bech32 0.9.0", "bitcoin", +======= +name = "ics23" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d454cc0a22bd556cc3d3c69f9d75a392a36244634840697a4b9eb81bc5c8ae0" +dependencies = [ + "anyhow", +>>>>>>> e05319ec (initial refactor) "bytes", - "crossbeam-channel 0.5.4", - "dirs-next", - "env_logger", - "flex-error", - "futures", - "hdpath", "hex", +<<<<<<< HEAD "http", "humantime", "humantime-serde", @@ -1947,11 +1991,11 @@ dependencies = [ "nanoid", "num-bigint 0.4.3", "num-rational 0.4.0", +======= +>>>>>>> e05319ec (initial refactor) "prost", - "prost-types", - "regex", - "retry", "ripemd160", +<<<<<<< HEAD "semver", "serde", "serde_derive", @@ -2097,14 +2141,12 @@ dependencies = [ "sha2 0.9.9", "sha3", "sp-std 3.0.0", +======= + "sha2 0.9.9", + "sha3", + "sp-std 3.0.0", ] -[[package]] -name = "ident_case" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" - [[package]] name = "idna" version = "0.2.3" @@ -2125,6 +2167,36 @@ dependencies = [ "parity-scale-codec", ] +[[package]] +name = "impl-serde" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4551f042f3438e64dbd6226b20527fc84a6e1fe65688b58746a2f53623f25f5c" +dependencies = [ + "serde", +>>>>>>> e05319ec (initial refactor) +] + +[[package]] +name = "impl-trait-for-tuples" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "impl-codec" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "161ebdfec3c8e3b52bf61c4f3550a1eea4f9579d10dc1b936f3171ebdcd6c443" +dependencies = [ + "parity-scale-codec", +] + [[package]] name = "indexmap" version = "1.8.1" @@ -2173,7 +2245,17 @@ dependencies = [ [[package]] name = "itoa" +<<<<<<< HEAD version = "1.0.2" +======= +version = "0.4.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" + +[[package]] +name = "itoa" +version = "1.0.1" +>>>>>>> e05319ec (initial refactor) source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "112c678d4050afce233f4f2852bb2eb519230b3cf12f33585275537d7e41578d" @@ -2186,19 +2268,6 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "k256" -version = "0.10.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19c3a5e0a0b8450278feda242592512e09f61c72e018b8cd5c859482802daf2d" -dependencies = [ - "cfg-if 1.0.0", - "ecdsa", - "elliptic-curve", - "sec1", - "sha2 0.9.9", -] - [[package]] name = "keccak" version = "0.1.0" @@ -2272,29 +2341,87 @@ dependencies = [ ] [[package]] -name = "linked-hash-map" -version = "0.5.4" +name = "libm" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" +checksum = "33a33a362ce288760ec6a508b94caaec573ae7d3bbbd91b87aa0bad4456839db" [[package]] -name = "linregress" -version = "0.4.4" +name = "libsecp256k1" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6c601a85f5ecd1aba625247bca0031585fb1c446461b142878a16f8245ddeb8" +checksum = "b0452aac8bab02242429380e9b2f94ea20cea2b37e2c1777a1358799bbe97f37" dependencies = [ - "nalgebra", - "statrs", + "arrayref", + "base64", + "digest 0.9.0", + "hmac-drbg", + "libsecp256k1-core", + "libsecp256k1-gen-ecmult", + "libsecp256k1-gen-genmult", + "rand 0.8.5", + "serde", + "sha2 0.9.9", + "typenum", ] [[package]] -name = "lock_api" -version = "0.4.7" +name = "libsecp256k1-core" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "327fa5b6a6940e4699ec49a9beae1ea4845c6bab9314e4f84ac68742139d8c53" +checksum = "5be9b9bb642d8522a44d533eab56c16c738301965504753b03ad1de3425d5451" dependencies = [ - "autocfg", - "scopeguard", + "crunchy", + "digest 0.9.0", + "subtle", +] + +[[package]] +name = "libsecp256k1-gen-ecmult" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3038c808c55c87e8a172643a7d87187fc6c4174468159cb3090659d55bcb4809" +dependencies = [ + "libsecp256k1-core", +] + +[[package]] +name = "libsecp256k1-gen-genmult" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3db8d6ba2cec9eacc40e6e8ccc98931840301f1006e95647ceb2dd5c3aa06f7c" +dependencies = [ + "libsecp256k1-core", +] + +[[package]] +name = "linregress" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6c601a85f5ecd1aba625247bca0031585fb1c446461b142878a16f8245ddeb8" +dependencies = [ + "nalgebra", + "statrs", +] + +[[package]] +name = "linregress" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6c601a85f5ecd1aba625247bca0031585fb1c446461b142878a16f8245ddeb8" +dependencies = [ + "nalgebra", + "statrs", +] + +[[package]] +name = "lock_api" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "327fa5b6a6940e4699ec49a9beae1ea4845c6bab9314e4f84ac68742139d8c53" +dependencies = [ + "autocfg", + "scopeguard", ] [[package]] @@ -2307,12 +2434,12 @@ dependencies = [ ] [[package]] -name = "mach" -version = "0.3.2" +name = "matchers" +version = "0.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b823e83b2affd8f40a9ee8c29dbc56404c1e34cd2710921f2801e2cf29527afa" +checksum = "f099785f7595cc4b4553a174ce30dd7589ef93391ff414dbb67f62392b9e0ce1" dependencies = [ - "libc", + "regex-automata", ] [[package]] @@ -2340,6 +2467,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a3e378b66a060d48947b590737b30a1be76706c8dd7b8ba0f2fe3989c68a853f" [[package]] +<<<<<<< HEAD name = "matchit" version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -2348,8 +2476,15 @@ checksum = "73cbba799671b762df5a175adf59ce145165747bb891505c43d09aefbbf38beb" [[package]] name = "maybe-uninit" version = "2.0.0" +======= +name = "matrixmultiply" +version = "0.3.2" +>>>>>>> e05319ec (initial refactor) source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" +checksum = "add85d4dd35074e6fedc608f8c8f513a3548619a9024b751949ef0e8e45a4d84" +dependencies = [ + "rawpointer", +] [[package]] name = "memchr" @@ -2359,23 +2494,26 @@ checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" [[package]] name = "memoffset" -version = "0.5.6" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "043175f069eda7b85febe4a74abbaeff828d9f8b448515d3151a14a3542811aa" +checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce" dependencies = [ "autocfg", ] [[package]] -name = "memoffset" -version = "0.6.5" +name = "memory-db" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce" +checksum = "d505169b746dacf02f7d14d8c80b34edfd8212159c63d23c977739a0d960c626" dependencies = [ - "autocfg", + "hash-db", + "hashbrown 0.11.2", + "parity-util-mem", ] [[package]] +<<<<<<< HEAD name = "memory-db" version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -2413,19 +2551,37 @@ checksum = "39617bc909d64b068dcffd0e3e31679195b5576d0c83fadc52690268cc2b2b55" [[package]] name = "mime" version = "0.3.16" +======= +name = "memory_units" +version = "0.3.0" +>>>>>>> e05319ec (initial refactor) source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" +checksum = "71d96e3f3c0b6325d8ccd83c33b28acb183edcb6c67938ba104ec546854b0882" [[package]] -name = "mime_guess" -version = "2.0.4" +name = "merlin" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4192263c238a5f0d0c6bfd21f336a313a4ce1c450542449ca191bb657b4642ef" +checksum = "4e261cf0f8b3c42ded9f7d2bb59dea03aa52bc8a1cbc7482f9fc3fd1229d3b42" dependencies = [ - "mime", - "unicase", + "byteorder", + "keccak", + "rand_core 0.5.1", + "zeroize", ] +[[package]] +name = "micromath" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39617bc909d64b068dcffd0e3e31679195b5576d0c83fadc52690268cc2b2b55" + +[[package]] +name = "mime" +version = "0.3.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" + [[package]] name = "minimal-lexical" version = "0.2.1" @@ -2498,12 +2654,17 @@ dependencies = [ "tempfile", "thiserror", "tracing", +<<<<<<< HEAD "tracing-subscriber 0.3.10", +======= + "tracing-subscriber 0.3.11", +>>>>>>> e05319ec (initial refactor) "ureq", "zip", ] [[package]] +<<<<<<< HEAD name = "moka" version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -2523,27 +2684,38 @@ dependencies = [ "thiserror", "triomphe", "uuid 0.8.2", +======= +name = "nalgebra" +version = "0.27.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "462fffe4002f4f2e1f6a9dcf12cc1a6fc0e15989014efc02a941d3e0f5dc2120" +dependencies = [ + "approx", + "matrixmultiply", + "nalgebra-macros", + "num-complex", + "num-rational 0.4.0", + "num-traits", + "rand 0.8.5", + "rand_distr", + "simba", + "typenum", +>>>>>>> e05319ec (initial refactor) ] [[package]] -name = "multipart" -version = "0.18.0" +name = "nalgebra-macros" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00dec633863867f29cb39df64a397cdf4a6354708ddd7759f70c7fb51c5f9182" +checksum = "01fcc0b8149b4632adc89ac3b7b31a12fb6099a0317a4eb2ebff574ef7de7218" dependencies = [ - "buf_redux", - "httparse", - "log", - "mime", - "mime_guess", - "quick-error", - "rand 0.8.5", - "safemem", - "tempfile", - "twoway", + "proc-macro2", + "quote", + "syn", ] [[package]] +<<<<<<< HEAD name = "nalgebra" version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -2560,6 +2732,12 @@ dependencies = [ "simba", "typenum", ] +======= +name = "nodrop" +version = "0.1.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72ef4a56884ca558e5ddb05a1d1e7e1bfd9a68d9ed024c21704cc98872dae1bb" +>>>>>>> e05319ec (initial refactor) [[package]] name = "nalgebra-macros" @@ -2610,14 +2788,22 @@ dependencies = [ [[package]] name = "num-bigint" -version = "0.4.3" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f93ab6289c7b344a8a9f60f88d80aa20032336fe78da341afc91c8a2341fc75f" +checksum = "090c7f9998ee0ff65aa5b723e4009f7b217707f1fb5ea551329cc4d6231fb304" dependencies = [ "autocfg", "num-integer", "num-traits", - "serde", +] + +[[package]] +name = "num-complex" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97fbc387afefefd5e9e39493299f3069e14a140dd34dc19b4c1c1a8fddb6a790" +dependencies = [ + "num-traits", ] [[package]] @@ -2667,7 +2853,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c000134b5dbf44adc5cb772486d335293351644b801551abe8f75c84cfa4aef" dependencies = [ "autocfg", +<<<<<<< HEAD "num-bigint 0.2.6", +======= + "num-bigint", +>>>>>>> e05319ec (initial refactor) "num-integer", "num-traits", ] @@ -2679,10 +2869,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d41702bd167c2df5520b384281bc111a4b5efcf7fbc4c9c222c815b07e0a6a6a" dependencies = [ "autocfg", +<<<<<<< HEAD "num-bigint 0.4.3", +======= +>>>>>>> e05319ec (initial refactor) "num-integer", "num-traits", - "serde", ] [[package]] @@ -2730,13 +2922,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7709cef83f0c1f58f666e746a08b21e0085f7440fa6a29cc194d68aac97a4225" [[package]] -name = "oneline-eyre" -version = "0.1.0" +name = "opaque-debug" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "862f17a1e689c0ce8ca158ea48e776c5101c5d14fdfbed3e01c15f89604c3097" -dependencies = [ - "eyre", -] +checksum = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c" [[package]] name = "opaque-debug" @@ -2757,50 +2946,95 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] -name = "opentelemetry" -version = "0.17.0" +name = "os_str_bytes" +version = "6.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6105e89802af13fdf48c49d7646d3b533a70e536d818aae7e78ba0433d01acb8" +checksum = "8e22443d1643a904602595ba1cd8f7d896afe56d26712531c5ff73a15b2fbf64" + +[[package]] +name = "pallet-beefy" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" dependencies = [ - "async-trait", - "crossbeam-channel 0.5.4", - "dashmap", - "fnv", - "futures-channel", - "futures-executor", - "futures-util", - "js-sys", - "lazy_static", - "percent-encoding", - "pin-project", - "rand 0.8.5", - "thiserror", + "beefy-primitives", + "frame-support", + "frame-system", + "pallet-session", + "parity-scale-codec", + "scale-info", + "serde", + "sp-runtime", + "sp-std 4.0.0", ] [[package]] -name = "opentelemetry-prometheus" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9328977e479cebe12ce0d3fcecdaea4721d234895a9440c5b5dfd113f0594ac6" +name = "pallet-beefy-mmr" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" dependencies = [ - "opentelemetry", - "prometheus", - "protobuf", + "beefy-merkle-tree", + "beefy-primitives", + "frame-support", + "frame-system", + "hex", + "libsecp256k1", + "log", + "pallet-beefy", + "pallet-mmr", + "pallet-mmr-primitives", + "pallet-session", + "parity-scale-codec", + "scale-info", + "serde", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std 4.0.0", ] [[package]] +<<<<<<< HEAD name = "os_str_bytes" version = "6.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "029d8d0b2f198229de29dca79676f2738ff952edf3fde542eb8bf94d8c21b435" +======= +name = "pallet-mmr" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +dependencies = [ + "ckb-merkle-mountain-range", + "frame-benchmarking", + "frame-support", + "frame-system", + "pallet-mmr-primitives", + "parity-scale-codec", + "scale-info", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std 4.0.0", +] +>>>>>>> e05319ec (initial refactor) [[package]] -name = "owo-colors" -version = "3.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "decf7381921fea4dcb2549c5667eda59b3ec297ab7e2b5fc33eac69d2e7da87b" +name = "pallet-mmr-primitives" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +dependencies = [ + "frame-support", + "frame-system", + "log", + "parity-scale-codec", + "serde", + "sp-api", + "sp-core", + "sp-runtime", + "sp-std 4.0.0", +] [[package]] +<<<<<<< HEAD name = "pallet-beefy" version = "4.0.0-dev" source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" @@ -2976,47 +3210,126 @@ name = "parking_lot" version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" +======= +name = "pallet-session" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +>>>>>>> e05319ec (initial refactor) dependencies = [ - "instant", - "lock_api", - "parking_lot_core 0.8.5", + "frame-support", + "frame-system", + "impl-trait-for-tuples", + "log", + "pallet-timestamp", + "parity-scale-codec", + "scale-info", + "sp-core", + "sp-io", + "sp-runtime", + "sp-session", + "sp-staking", + "sp-std 4.0.0", + "sp-trie", ] [[package]] -name = "parking_lot" -version = "0.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87f5ec2493a61ac0506c0f4199f99070cbe83857b0337006a30f3e6719b8ef58" +name = "pallet-timestamp" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" dependencies = [ - "lock_api", - "parking_lot_core 0.9.3", + "frame-benchmarking", + "frame-support", + "frame-system", + "log", + "parity-scale-codec", + "scale-info", + "sp-inherents", + "sp-runtime", + "sp-std 4.0.0", + "sp-timestamp", ] [[package]] -name = "parking_lot_core" -version = "0.8.5" +name = "parity-scale-codec" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d76e8e1493bcac0d2766c42737f34458f1c8c50c0d23bcb24ea953affb273216" +checksum = "373b1a4c1338d9cd3d1fa53b3a11bdab5ab6bd80a20f7f7becd76953ae2be909" +dependencies = [ + "arrayvec 0.7.2", + "bitvec", + "byte-slice-cast", + "impl-trait-for-tuples", + "parity-scale-codec-derive", + "serde", +] + +[[package]] +name = "parity-scale-codec-derive" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1557010476e0595c9b568d16dcfb81b93cdeb157612726f5170d31aa707bed27" +dependencies = [ + "proc-macro-crate", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "parity-util-mem" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f4cb4e169446179cbc6b8b6320cc9fca49bd2e94e8db25f25f200a8ea774770" dependencies = [ "cfg-if 1.0.0", - "instant", - "libc", - "redox_syscall", - "smallvec", + "hashbrown 0.11.2", + "impl-trait-for-tuples", + "parity-util-mem-derive", + "parking_lot", + "primitive-types", "winapi", ] +[[package]] +name = "parity-util-mem-derive" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f557c32c6d268a07c921471619c0295f5efad3a0e76d4f97a05c091a51d110b2" +dependencies = [ + "proc-macro2", + "syn", + "synstructure", +] + +[[package]] +name = "parity-wasm" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be5e13c266502aadf83426d87d81a0f5d1ef45b8027f5a471c360abfe4bfae92" + +[[package]] +name = "parking_lot" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" +dependencies = [ + "instant", + "lock_api", + "parking_lot_core", +] + [[package]] name = "parking_lot_core" -version = "0.9.3" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09a279cbf25cb0757810394fbc1e359949b59e348145c643a939a525692e6929" +checksum = "d76e8e1493bcac0d2766c42737f34458f1c8c50c0d23bcb24ea953affb273216" dependencies = [ "cfg-if 1.0.0", + "instant", "libc", "redox_syscall", "smallvec", - "windows-sys", + "winapi", ] [[package]] @@ -3109,17 +3422,26 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] -name = "pkcs8" -version = "0.8.0" +name = "ppv-lite86" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb9f9e6e233e5c4a35559a617bf40a4ec447db2e84c20b55a6f83167b7e57872" + +[[package]] +name = "primitive-types" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cabda3fb821068a9a4fab19a683eac3af12edf0f34b94a8be53c4972b8149d0" +checksum = "05e4722c697a58a99d5d06a08c30821d7c082a4632198de1eaa5a6c22ef42373" dependencies = [ - "der", - "spki", - "zeroize", + "fixed-hash", + "impl-codec", + "impl-serde", + "scale-info", + "uint", ] [[package]] +<<<<<<< HEAD name = "pkg-config" version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -3128,8 +3450,16 @@ checksum = "1df8c4ec4b0627e53bdf214615ad287367e482558cf84b109250b37464dc03ae" [[package]] name = "ppv-lite86" version = "0.2.16" +======= +name = "proc-macro-crate" +version = "1.1.3" +>>>>>>> e05319ec (initial refactor) source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb9f9e6e233e5c4a35559a617bf40a4ec447db2e84c20b55a6f83167b7e57872" +checksum = "e17d47ce914bf4de440332250b0edd23ce48c005f59fab39d3335866b114f11a" +dependencies = [ + "thiserror", + "toml", +] [[package]] name = "primitive-types" @@ -3188,6 +3518,7 @@ dependencies = [ ] [[package]] +<<<<<<< HEAD name = "prometheus" version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -3203,6 +3534,8 @@ dependencies = [ ] [[package]] +======= +>>>>>>> e05319ec (initial refactor) name = "prost" version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -3236,6 +3569,7 @@ dependencies = [ ] [[package]] +<<<<<<< HEAD name = "protobuf" version = "2.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -3275,6 +3609,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] +======= +>>>>>>> e05319ec (initial refactor) name = "quote" version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -3374,6 +3710,7 @@ dependencies = [ [[package]] name = "rand_pcg" version = "0.2.1" +<<<<<<< HEAD source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16abd0c1b639e9eb4d7c50c0b8100b0d0f849be2349829c740fe8e6eb4816429" dependencies = [ @@ -3383,10 +3720,12 @@ dependencies = [ [[package]] name = "raw-cpuid" version = "10.3.0" +======= +>>>>>>> e05319ec (initial refactor) source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "738bc47119e3eeccc7e94c4a506901aea5e7b4944ecd0829cbebf4af04ceda12" +checksum = "16abd0c1b639e9eb4d7c50c0b8100b0d0f849be2349829c740fe8e6eb4816429" dependencies = [ - "bitflags", + "rand_core 0.5.1", ] [[package]] @@ -3413,9 +3752,9 @@ version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "258bcdb5ac6dad48491bb2992db6b7cf74878b0384908af124823d118c99683f" dependencies = [ - "crossbeam-channel 0.5.4", + "crossbeam-channel", "crossbeam-deque", - "crossbeam-utils 0.8.8", + "crossbeam-utils", "num_cpus", ] @@ -3441,18 +3780,30 @@ dependencies = [ [[package]] name = "ref-cast" +<<<<<<< HEAD version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "300f2a835d808734ee295d45007adacb9ebb29dd3ae2424acfa17930cae541da" +======= +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "685d58625b6c2b83e4cc88a27c4bf65adb7b6b16dbdc413e515c9405b47432ab" +>>>>>>> e05319ec (initial refactor) dependencies = [ "ref-cast-impl", ] [[package]] name = "ref-cast-impl" +<<<<<<< HEAD version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c38e3aecd2b21cb3959637b883bb3714bc7e43f0268b9a29d3743ee3e55cdd2" +======= +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a043824e29c94169374ac5183ac0ed43f5724dc4556b19568007486bd840fa1f" +>>>>>>> e05319ec (initial refactor) dependencies = [ "proc-macro2", "quote", @@ -3494,23 +3845,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "retry" -version = "1.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac95c60a949a63fd2822f4964939662d8f2c16c4fa0624fd954bc6e703b9a3f6" - -[[package]] -name = "rfc6979" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96ef608575f6392792f9ecf7890c00086591d29a83910939d430753f7c050525" -dependencies = [ - "crypto-bigint", - "hmac 0.11.0", - "zeroize", -] - [[package]] name = "ring" version = "0.16.20" @@ -3538,11 +3872,12 @@ dependencies = [ ] [[package]] -name = "rouille" -version = "3.5.0" +name = "rs_merkle" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18b2380c42510ef4a28b5f228a174c801e0dec590103e215e60812e2e2f34d05" +checksum = "a632a43487c1332be8e183588079f89b6820fab24e04db49521eacd536837372" dependencies = [ +<<<<<<< HEAD "base64", "brotli", "chrono", @@ -3560,6 +3895,10 @@ dependencies = [ "time 0.3.9", "tiny_http", "url", +======= + "micromath", + "sha2 0.10.2", +>>>>>>> e05319ec (initial refactor) ] [[package]] @@ -3637,6 +3976,7 @@ dependencies = [ ] [[package]] +<<<<<<< HEAD name = "rustls-native-certs" version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -3664,6 +4004,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f2cc38e8fa666e2de3c4aba7edeb5ffc5246c1c2ed0e3d17e560aeeba736b23f" [[package]] +======= +>>>>>>> e05319ec (initial refactor) name = "ryu" version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -3716,12 +4058,6 @@ dependencies = [ "safe-regex-compiler", ] -[[package]] -name = "safemem" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef703b7cb59335eae2eb93ceb664c0eb7ea6bf567079d843e09420219668e072" - [[package]] name = "same-file" version = "1.0.6" @@ -3748,6 +4084,15 @@ dependencies = [ [[package]] name = "scale-info-derive" version = "1.0.0" +<<<<<<< HEAD +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baeb2780690380592f86205aa4ee49815feb2acad8c2f59e6dd207148c3f1fcd" +dependencies = [ + "proc-macro-crate", + "proc-macro2", + "quote", + "syn", +======= source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baeb2780690380592f86205aa4ee49815feb2acad8c2f59e6dd207148c3f1fcd" dependencies = [ @@ -3757,6 +4102,17 @@ dependencies = [ "syn", ] +[[package]] +name = "schannel" +version = "0.1.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f05ba609c234e60bee0d547fe94a4c7e9da733d1c962cf6e59efa4cd9c8bc75" +dependencies = [ + "lazy_static", + "winapi", +>>>>>>> e05319ec (initial refactor) +] + [[package]] name = "schannel" version = "0.1.20" @@ -3845,6 +4201,7 @@ dependencies = [ ] [[package]] +<<<<<<< HEAD name = "sec1" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -3877,12 +4234,13 @@ dependencies = [ ] [[package]] +======= +>>>>>>> e05319ec (initial refactor) name = "secrecy" version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9bd1c54ea06cfd2f6b63219704de0b9b4f72dcc2b8fdef820be6cd799780e91e" dependencies = [ - "serde", "zeroize", ] @@ -3914,9 +4272,6 @@ name = "semver" version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8cb243bdfdb5936c8dc3c45762a19d12ab4550cdc753bc247637d4ec35a040fd" -dependencies = [ - "serde", -] [[package]] name = "serde" @@ -3936,16 +4291,6 @@ dependencies = [ "serde", ] -[[package]] -name = "serde_cbor" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bef2ebfde456fb76bbcf9f59315333decc4fda0b2b44b420243c11e0f5ec1f5" -dependencies = [ - "half", - "serde", -] - [[package]] name = "serde_derive" version = "1.0.137" @@ -3991,6 +4336,7 @@ dependencies = [ ] [[package]] +<<<<<<< HEAD name = "serde_yaml" version = "0.8.24" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -4028,6 +4374,8 @@ dependencies = [ ] [[package]] +======= +>>>>>>> e05319ec (initial refactor) name = "sha-1" version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -4052,15 +4400,19 @@ dependencies = [ ] [[package]] -name = "sha1" -version = "0.6.1" +name = "sha2" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1da05c97445caa12d05e848c4a4fcbbea29e748ac28f7e80e9b010392063770" +checksum = "a256f46ea78a0c0d9ff00077504903ac881a1dafdc20da66545699e7776b3e69" dependencies = [ - "sha1_smol", + "block-buffer 0.7.3", + "digest 0.8.1", + "fake-simd", + "opaque-debug 0.2.3", ] [[package]] +<<<<<<< HEAD name = "sha1_smol" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -4079,10 +4431,13 @@ dependencies = [ ] [[package]] +======= +>>>>>>> e05319ec (initial refactor) name = "sha2" version = "0.9.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" +<<<<<<< HEAD dependencies = [ "block-buffer 0.9.0", "cfg-if 1.0.0", @@ -4458,6 +4813,14 @@ dependencies = [ "sp-tracing", "sp-wasm-interface", "static_assertions", +======= +dependencies = [ + "block-buffer 0.9.0", + "cfg-if 1.0.0", + "cpufeatures", + "digest 0.9.0", + "opaque-debug 0.3.0", +>>>>>>> e05319ec (initial refactor) ] [[package]] @@ -4477,6 +4840,7 @@ name = "sp-session" version = "4.0.0-dev" source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" dependencies = [ +<<<<<<< HEAD "parity-scale-codec", "scale-info", "sp-api", @@ -4484,6 +4848,12 @@ dependencies = [ "sp-runtime", "sp-staking", "sp-std 4.0.0", +======= + "block-buffer 0.9.0", + "digest 0.9.0", + "keccak", + "opaque-debug 0.3.0", +>>>>>>> e05319ec (initial refactor) ] [[package]] @@ -4498,6 +4868,7 @@ dependencies = [ ] [[package]] +<<<<<<< HEAD name = "signal-hook" version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -4543,6 +4914,23 @@ dependencies = [ "serde", "sp-debug-derive", "sp-std 4.0.0", +======= +name = "signature" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02658e48d89f2bec991f9a78e69cfa4c316f8d6a6c4ec12fae1aeb263d486788" + +[[package]] +name = "simba" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e82063457853d00243beda9952e910b82593e4b07ae9f721b9278a99a0d3d5c" +dependencies = [ + "approx", + "num-complex", + "num-traits", + "paste", +>>>>>>> e05319ec (initial refactor) ] [[package]] @@ -4562,6 +4950,7 @@ dependencies = [ ] [[package]] +<<<<<<< HEAD name = "sp-tracing" version = "4.0.0" source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" @@ -4574,6 +4963,8 @@ dependencies = [ ] [[package]] +======= +>>>>>>> e05319ec (initial refactor) name = "slab" version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -4597,6 +4988,7 @@ dependencies = [ ] [[package]] +<<<<<<< HEAD name = "sp-version-proc-macro" version = "4.0.0-dev" source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" @@ -4608,647 +5000,729 @@ dependencies = [ ] [[package]] -name = "sp-wasm-interface" -version = "4.1.0-dev" +======= +name = "socket2" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "66d72b759436ae32898a2af0a14218dbf55efde3feeb170eb623637db85ee1e0" +dependencies = [ + "libc", + "winapi", +] + +[[package]] +name = "sp-api" +version = "4.0.0-dev" source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" dependencies = [ - "impl-trait-for-tuples", + "hash-db", "log", "parity-scale-codec", + "sp-api-proc-macro", + "sp-core", + "sp-runtime", + "sp-state-machine", "sp-std 4.0.0", - "wasmi", + "sp-version", + "thiserror", ] [[package]] -name = "spin" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" +name = "sp-api-proc-macro" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +dependencies = [ + "blake2-rfc", + "proc-macro-crate", + "proc-macro2", + "quote", + "syn", +] [[package]] -name = "spki" -version = "0.5.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44d01ac02a6ccf3e07db148d2be087da624fea0221a16152ed01f0496a6b0a27" +name = "sp-application-crypto" +version = "4.0.0" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" dependencies = [ - "base64ct", - "der", + "parity-scale-codec", + "scale-info", + "serde", + "sp-core", + "sp-io", + "sp-std 4.0.0", ] [[package]] -name = "static_assertions" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" - -[[package]] -name = "statrs" -version = "0.15.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05bdbb8e4e78216a85785a85d3ec3183144f98d0097b9281802c019bb07a6f05" +name = "sp-arithmetic" +version = "4.0.0" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" dependencies = [ - "approx", - "lazy_static", - "nalgebra", + "integer-sqrt", "num-traits", - "rand 0.8.5", + "parity-scale-codec", + "scale-info", + "serde", + "sp-debug-derive", + "sp-std 4.0.0", + "static_assertions", ] [[package]] -name = "strsim" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" - -[[package]] -name = "substrate-bip39" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49eee6965196b32f882dd2ee85a92b1dbead41b04e53907f269de3b0dc04733c" +name = "sp-core" +version = "4.1.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" dependencies = [ - "hmac 0.11.0", - "pbkdf2 0.8.0", + "base58", + "bitflags", + "blake2-rfc", + "byteorder", + "dyn-clonable", + "ed25519-dalek", + "futures", + "hash-db", + "hash256-std-hasher", + "hex", + "impl-serde", + "lazy_static", + "libsecp256k1", + "log", + "merlin", + "num-traits", + "parity-scale-codec", + "parity-util-mem", + "parking_lot", + "primitive-types", + "rand 0.7.3", + "regex", + "scale-info", "schnorrkel", - "sha2 0.9.9", + "secrecy", + "serde", + "sha2 0.10.2", + "sp-core-hashing", + "sp-debug-derive", + "sp-externalities", + "sp-runtime-interface", + "sp-std 4.0.0", + "sp-storage", + "ss58-registry", + "substrate-bip39", + "thiserror", + "tiny-bip39", + "tiny-keccak", + "twox-hash", + "wasmi", "zeroize", ] [[package]] -name = "subtle" -version = "2.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" - -[[package]] -name = "subtle-encoding" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7dcb1ed7b8330c5eed5441052651dd7a12c75e2ed88f2ec024ae1fa3a5e59945" +name = "sp-core-hashing" +version = "4.0.0" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" dependencies = [ - "zeroize", + "blake2-rfc", + "byteorder", + "sha2 0.10.2", + "sp-std 4.0.0", + "tiny-keccak", + "twox-hash", ] [[package]] -name = "syn" -version = "1.0.96" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0748dd251e24453cb8717f0354206b91557e4ec8703673a4b30208f2abaf1ebf" +name = "sp-core-hashing-proc-macro" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" dependencies = [ "proc-macro2", "quote", - "unicode-ident", + "sp-core-hashing", + "syn", ] [[package]] -name = "sync_wrapper" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20518fe4a4c9acf048008599e464deb21beeae3d3578418951a189c235a7a9a8" - -[[package]] -name = "synstructure" -version = "0.12.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" +name = "sp-debug-derive" +version = "4.0.0" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" dependencies = [ "proc-macro2", "quote", "syn", - "unicode-xid", ] [[package]] -name = "tagptr" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b2093cf4c8eb1e67749a6762251bc9cd836b6fc171623bd0a9d324d37af2417" - -[[package]] -name = "tap" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" - -[[package]] -name = "tempfile" -version = "3.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4" +name = "sp-externalities" +version = "0.10.0" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" dependencies = [ - "cfg-if 1.0.0", - "fastrand", - "libc", - "redox_syscall", - "remove_dir_all", - "winapi", + "environmental", + "parity-scale-codec", + "sp-std 4.0.0", + "sp-storage", ] [[package]] -name = "tendermint" -version = "0.23.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ca881fa4dedd2b46334f13be7fbc8cc1549ba4be5a833fe4e73d1a1baaf7949" +name = "sp-inherents" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" dependencies = [ "async-trait", - "bytes", - "ed25519", - "ed25519-dalek", - "flex-error", - "futures", - "k256", - "num-traits", - "once_cell", - "prost", - "prost-types", - "ripemd160", - "serde", - "serde_bytes", - "serde_json", - "serde_repr", - "sha2 0.9.9", - "signature", - "subtle", - "subtle-encoding", - "tendermint-proto", - "time 0.3.9", - "zeroize", + "impl-trait-for-tuples", + "parity-scale-codec", + "sp-core", + "sp-runtime", + "sp-std 4.0.0", + "thiserror", ] [[package]] -name = "tendermint-config" -version = "0.23.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6c56ee93f4e9b7e7daba86d171f44572e91b741084384d0ae00df7991873dfd" +name = "sp-io" +version = "4.0.0" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" dependencies = [ - "flex-error", - "serde", - "serde_json", - "tendermint", - "toml", - "url", + "futures", + "hash-db", + "libsecp256k1", + "log", + "parity-scale-codec", + "parking_lot", + "sp-core", + "sp-externalities", + "sp-keystore", + "sp-runtime-interface", + "sp-state-machine", + "sp-std 4.0.0", + "sp-tracing", + "sp-trie", + "sp-wasm-interface", + "tracing", + "tracing-core", ] [[package]] -name = "tendermint-light-client" -version = "0.23.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e30a20da1169069a8629b9535bcee669be8b07480c696b5eb2f7d9cd4e4c431" +name = "sp-keystore" +version = "0.10.0" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" dependencies = [ - "contracts", - "crossbeam-channel 0.4.4", + "async-trait", "derive_more", - "flex-error", "futures", - "serde", - "serde_cbor", - "serde_derive", - "static_assertions", - "tendermint", - "tendermint-light-client-verifier", - "tendermint-rpc", - "time 0.3.9", - "tokio", + "merlin", + "parity-scale-codec", + "parking_lot", + "schnorrkel", + "sp-core", + "sp-externalities", ] [[package]] -name = "tendermint-light-client-verifier" -version = "0.23.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ae030a759b89cca84860d497d4d4e491615d8a9243cc04c61cd89335ba9b593" +name = "sp-panic-handler" +version = "4.0.0" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" dependencies = [ - "derive_more", - "flex-error", - "serde", - "tendermint", - "time 0.3.9", + "backtrace", + "lazy_static", + "regex", ] [[package]] -name = "tendermint-proto" -version = "0.23.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b71f925d74903f4abbdc4af0110635a307b3cb05b175fdff4a7247c14a4d0874" +name = "sp-runtime" +version = "4.1.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" dependencies = [ - "bytes", - "flex-error", - "num-derive", - "num-traits", - "prost", - "prost-types", + "either", + "hash256-std-hasher", + "impl-trait-for-tuples", + "log", + "parity-scale-codec", + "parity-util-mem", + "paste", + "rand 0.7.3", + "scale-info", "serde", - "serde_bytes", - "subtle-encoding", - "time 0.3.9", + "sp-application-crypto", + "sp-arithmetic", + "sp-core", + "sp-io", + "sp-std 4.0.0", ] [[package]] -name = "tendermint-rpc" -version = "0.23.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a13e63f57ee05a1e927887191c76d1b139de9fa40c180b9f8727ee44377242a6" +name = "sp-runtime-interface" +version = "4.1.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" dependencies = [ - "async-trait", - "async-tungstenite", - "bytes", - "flex-error", - "futures", - "getrandom 0.2.6", - "http", - "hyper", - "hyper-proxy", - "hyper-rustls", - "peg", - "pin-project", - "serde", - "serde_bytes", - "serde_json", - "subtle-encoding", - "tendermint", - "tendermint-config", - "tendermint-proto", - "thiserror", - "time 0.3.9", - "tokio", - "tracing", - "url", - "uuid 0.8.2", - "walkdir", + "impl-trait-for-tuples", + "parity-scale-codec", + "primitive-types", + "sp-externalities", + "sp-runtime-interface-proc-macro", + "sp-std 4.0.0", + "sp-storage", + "sp-tracing", + "sp-wasm-interface", + "static_assertions", ] [[package]] -name = "tendermint-testgen" -version = "0.23.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "442ede2d01e61466e515fd7f1d0aac7c3c86b3066535479caa86a43afb5e2e17" +name = "sp-runtime-interface-proc-macro" +version = "4.0.0" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" dependencies = [ - "ed25519-dalek", - "gumdrop", - "serde", - "serde_json", - "simple-error", - "tempfile", - "tendermint", - "time 0.3.9", + "Inflector", + "proc-macro-crate", + "proc-macro2", + "quote", + "syn", ] [[package]] -name = "termcolor" -version = "1.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bab24d30b911b2376f3a13cc2cd443142f0c81dda04c118693e35b3835757755" +name = "sp-session" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" dependencies = [ - "winapi-util", + "parity-scale-codec", + "scale-info", + "sp-api", + "sp-core", + "sp-runtime", + "sp-staking", + "sp-std 4.0.0", ] [[package]] -name = "terminal_size" -version = "0.1.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "633c1a546cee861a1a6d0dc69ebeca693bf4296661ba7852b9d21d159e0506df" +name = "sp-staking" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" dependencies = [ - "libc", - "winapi", + "parity-scale-codec", + "scale-info", + "sp-runtime", + "sp-std 4.0.0", ] [[package]] -name = "test-log" -version = "0.2.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4235dbf7ea878b3ef12dea20a59c134b405a66aafc4fc2c7b9935916e289e735" +name = "sp-state-machine" +version = "0.10.0" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" dependencies = [ - "proc-macro2", - "quote", - "syn", + "hash-db", + "log", + "num-traits", + "parity-scale-codec", + "parking_lot", + "rand 0.7.3", + "smallvec", + "sp-core", + "sp-externalities", + "sp-panic-handler", + "sp-std 4.0.0", + "sp-trie", + "thiserror", + "tracing", + "trie-db", + "trie-root", ] [[package]] -name = "textwrap" -version = "0.15.0" +name = "sp-std" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1141d4d61095b28419e22cb0bbf02755f5e54e0526f97f1e3d1d160e60885fb" +checksum = "35391ea974fa5ee869cb094d5b437688fbf3d8127d64d1b9fed5822a1ed39b12" [[package]] -name = "thiserror" -version = "1.0.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd829fe32373d27f76265620b5309d0340cb8550f523c1dda251d6298069069a" +name = "sp-std" +version = "4.0.0" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[package]] +name = "sp-storage" +version = "4.0.0" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" dependencies = [ - "thiserror-impl", + "impl-serde", + "parity-scale-codec", + "ref-cast", + "serde", + "sp-debug-derive", + "sp-std 4.0.0", ] [[package]] -name = "thiserror-impl" -version = "1.0.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0396bc89e626244658bef819e22d0cc459e795a5ebe878e6ec336d1674a8d79a" +name = "sp-timestamp" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" dependencies = [ - "proc-macro2", - "quote", - "syn", + "async-trait", + "futures-timer", + "log", + "parity-scale-codec", + "sp-api", + "sp-inherents", + "sp-runtime", + "sp-std 4.0.0", + "thiserror", ] [[package]] -name = "thread_local" -version = "1.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5516c27b78311c50bf42c071425c560ac799b11c30b31f87e3081965fe5e0180" +name = "sp-tracing" +version = "4.0.0" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" dependencies = [ - "once_cell", + "parity-scale-codec", + "sp-std 4.0.0", + "tracing", + "tracing-core", + "tracing-subscriber 0.2.25", ] [[package]] -name = "threadpool" -version = "1.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d050e60b33d41c19108b32cea32164033a9013fe3b46cbd4457559bfbf77afaa" +name = "sp-trie" +version = "4.0.0" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" dependencies = [ - "num_cpus", + "hash-db", + "memory-db", + "parity-scale-codec", + "scale-info", + "sp-core", + "sp-std 4.0.0", + "trie-db", + "trie-root", ] [[package]] -name = "time" -version = "0.1.43" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438" +name = "sp-version" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" dependencies = [ - "libc", - "winapi", + "impl-serde", + "parity-scale-codec", + "parity-wasm", + "scale-info", + "serde", + "sp-core-hashing-proc-macro", + "sp-runtime", + "sp-std 4.0.0", + "sp-version-proc-macro", + "thiserror", ] [[package]] -name = "time" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2702e08a7a860f005826c6815dcac101b19b5eb330c27fe4a5928fec1d20ddd" +name = "sp-version-proc-macro" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" dependencies = [ - "libc", - "num_threads", - "time-macros", + "parity-scale-codec", + "proc-macro2", + "quote", + "syn", ] [[package]] -name = "time-macros" -version = "0.2.4" +>>>>>>> e05319ec (initial refactor) +name = "sp-wasm-interface" +version = "4.1.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +dependencies = [ + "impl-trait-for-tuples", + "log", + "parity-scale-codec", + "sp-std 4.0.0", + "wasmi", +] + +[[package]] +name = "spin" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42657b1a6f4d817cda8e7a0ace261fe0cc946cf3a80314390b22cc61ae080792" +checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] -name = "tiny-bip39" -version = "0.8.2" +name = "ss58-registry" +version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffc59cb9dfc85bb312c3a78fd6aa8a8582e310b0fa885d5bb877f6dcc601839d" +checksum = "7b84a70894df7a73666e0694f44b41a9571625e9546fb58a0818a565d2c7e084" dependencies = [ - "anyhow", - "hmac 0.8.1", - "once_cell", - "pbkdf2 0.4.0", - "rand 0.7.3", - "rustc-hash", - "sha2 0.9.9", - "thiserror", - "unicode-normalization", - "wasm-bindgen", - "zeroize", + "Inflector", + "num-format", + "proc-macro2", + "quote", + "serde", + "serde_json", + "unicode-xid", ] [[package]] -name = "tiny-keccak" -version = "2.0.2" +name = "static_assertions" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" -dependencies = [ - "crunchy", -] +checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" [[package]] -name = "tiny_http" -version = "0.8.2" +name = "statrs" +version = "0.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ce51b50006056f590c9b7c3808c3bd70f0d1101666629713866c227d6e58d39" +checksum = "05bdbb8e4e78216a85785a85d3ec3183144f98d0097b9281802c019bb07a6f05" dependencies = [ - "ascii", - "chrono", - "chunked_transfer", - "log", - "url", + "approx", + "lazy_static", + "nalgebra", + "num-traits", + "rand 0.8.5", ] [[package]] -name = "tinyvec" -version = "1.6.0" +name = "strsim" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" +checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" + +[[package]] +name = "substrate-bip39" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49eee6965196b32f882dd2ee85a92b1dbead41b04e53907f269de3b0dc04733c" dependencies = [ - "tinyvec_macros", + "hmac 0.11.0", + "pbkdf2 0.8.0", + "schnorrkel", + "sha2 0.9.9", + "zeroize", ] [[package]] -name = "tinyvec_macros" -version = "0.1.0" +name = "subtle" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" +checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" [[package]] -name = "tokio" -version = "1.18.2" +name = "subtle-encoding" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4903bf0427cf68dddd5aa6a93220756f8be0c34fcfa9f5e6191e103e15a31395" +checksum = "7dcb1ed7b8330c5eed5441052651dd7a12c75e2ed88f2ec024ae1fa3a5e59945" dependencies = [ - "bytes", - "libc", - "memchr", - "mio", - "num_cpus", - "once_cell", - "parking_lot 0.12.0", - "pin-project-lite", - "signal-hook-registry", - "socket2", - "tokio-macros", - "winapi", + "zeroize", ] [[package]] -name = "tokio-io-timeout" -version = "1.2.0" +name = "syn" +version = "1.0.96" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" +checksum = "0748dd251e24453cb8717f0354206b91557e4ec8703673a4b30208f2abaf1ebf" dependencies = [ - "pin-project-lite", - "tokio", + "proc-macro2", + "quote", + "unicode-ident", ] [[package]] -name = "tokio-macros" -version = "1.7.0" +name = "sync_wrapper" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b557f72f448c511a979e2564e55d74e6c4432fc96ff4f6241bc6bded342643b7" +checksum = "20518fe4a4c9acf048008599e464deb21beeae3d3578418951a189c235a7a9a8" + +[[package]] +name = "synstructure" +version = "0.12.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" dependencies = [ "proc-macro2", "quote", "syn", + "unicode-xid", ] [[package]] -name = "tokio-rustls" -version = "0.22.0" +name = "tap" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc6844de72e57df1980054b38be3a9f4702aba4858be64dd700181a8a6d0e1b6" +checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" + +[[package]] +name = "tap" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" + +[[package]] +name = "tempfile" +version = "3.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4" dependencies = [ - "rustls 0.19.1", - "tokio", - "webpki 0.21.4", + "cfg-if 1.0.0", + "fastrand", + "libc", + "redox_syscall", + "remove_dir_all", + "winapi", ] [[package]] -name = "tokio-rustls" -version = "0.23.4" +name = "tendermint" +version = "0.23.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59" +checksum = "3ca881fa4dedd2b46334f13be7fbc8cc1549ba4be5a833fe4e73d1a1baaf7949" dependencies = [ - "rustls 0.20.6", - "tokio", - "webpki 0.22.0", + "async-trait", + "bytes", + "ed25519", + "ed25519-dalek", + "flex-error", + "futures", + "num-traits", + "once_cell", + "prost", + "prost-types", + "serde", + "serde_bytes", + "serde_json", + "serde_repr", + "sha2 0.9.9", + "signature", + "subtle", + "subtle-encoding", + "tendermint-proto", + "time 0.3.9", + "zeroize", ] [[package]] -name = "tokio-stream" -version = "0.1.8" +name = "tendermint-config" +version = "0.23.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50145484efff8818b5ccd256697f36863f587da82cf8b409c53adf1e840798e3" +checksum = "f6c56ee93f4e9b7e7daba86d171f44572e91b741084384d0ae00df7991873dfd" dependencies = [ - "futures-core", - "pin-project-lite", - "tokio", + "flex-error", + "serde", + "serde_json", + "tendermint", + "toml", + "url", ] [[package]] -name = "tokio-util" -version = "0.7.2" +<<<<<<< HEAD +name = "tendermint-light-client" +version = "0.23.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f988a1a1adc2fb21f9c12aa96441da33a1728193ae0b95d2be22dbd17fcb4e5c" +checksum = "7e30a20da1169069a8629b9535bcee669be8b07480c696b5eb2f7d9cd4e4c431" dependencies = [ - "bytes", - "futures-core", - "futures-sink", - "pin-project-lite", + "contracts", + "crossbeam-channel 0.4.4", + "derive_more", + "flex-error", + "futures", + "serde", + "serde_cbor", + "serde_derive", + "static_assertions", + "tendermint", + "tendermint-light-client-verifier", + "tendermint-rpc", + "time 0.3.9", "tokio", - "tracing", ] [[package]] -name = "toml" -version = "0.5.9" +======= +>>>>>>> e05319ec (initial refactor) +name = "tendermint-light-client-verifier" +version = "0.23.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d82e1a7758622a465f8cee077614c73484dac5b836c02ff6a40d5d1010324d7" +checksum = "5ae030a759b89cca84860d497d4d4e491615d8a9243cc04c61cd89335ba9b593" dependencies = [ + "derive_more", + "flex-error", "serde", + "tendermint", + "time 0.3.9", ] [[package]] -name = "tonic" -version = "0.7.2" +name = "tendermint-proto" +version = "0.23.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5be9d60db39854b30b835107500cf0aca0b0d14d6e1c3de124217c23a29c2ddb" +checksum = "b71f925d74903f4abbdc4af0110635a307b3cb05b175fdff4a7247c14a4d0874" dependencies = [ - "async-stream", - "async-trait", - "axum", - "base64", "bytes", - "futures-core", - "futures-util", - "h2", - "http", - "http-body", - "hyper", - "hyper-timeout", - "percent-encoding", - "pin-project", + "flex-error", + "num-derive", + "num-traits", "prost", - "prost-derive", - "rustls-native-certs 0.6.2", - "rustls-pemfile", - "tokio", - "tokio-rustls 0.23.4", - "tokio-stream", - "tokio-util", - "tower", - "tower-layer", - "tower-service", - "tracing", - "tracing-futures", + "prost-types", + "serde", + "serde_bytes", + "subtle-encoding", + "time 0.3.9", ] [[package]] -name = "tower" -version = "0.4.12" +name = "tendermint-rpc" +version = "0.23.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a89fd63ad6adf737582df5db40d286574513c69a11dac5214dc3b5603d6713e" +checksum = "a13e63f57ee05a1e927887191c76d1b139de9fa40c180b9f8727ee44377242a6" dependencies = [ - "futures-core", - "futures-util", - "indexmap", + "async-trait", + "async-tungstenite", + "bytes", + "flex-error", + "futures", + "getrandom 0.2.6", + "http", + "hyper", + "hyper-proxy", + "hyper-rustls", + "peg", "pin-project", - "pin-project-lite", - "rand 0.8.5", - "slab", + "serde", + "serde_bytes", + "serde_json", + "subtle-encoding", + "tendermint", + "tendermint-config", + "tendermint-proto", + "thiserror", + "time 0.3.9", "tokio", - "tokio-util", - "tower-layer", - "tower-service", "tracing", + "url", + "uuid 0.8.2", + "walkdir", ] [[package]] -name = "tower-http" -version = "0.3.3" +name = "tendermint-testgen" +version = "0.23.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d342c6d58709c0a6d48d48dabbb62d4ef955cf5f0f3bbfd845838e7ae88dbae" +checksum = "442ede2d01e61466e515fd7f1d0aac7c3c86b3066535479caa86a43afb5e2e17" dependencies = [ - "bitflags", - "bytes", - "futures-core", - "futures-util", - "http", - "http-body", - "http-range-header", - "pin-project-lite", - "tower", - "tower-layer", - "tower-service", + "ed25519-dalek", + "gumdrop", + "serde", + "serde_json", + "simple-error", + "tempfile", + "tendermint", + "time 0.3.9", ] [[package]] -name = "tower-layer" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "343bc9466d3fe6b0f960ef45960509f84480bf4fd96f92901afe7ff3df9d3a62" - -[[package]] -name = "tower-service" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" - -[[package]] -name = "tracing" -version = "0.1.34" +name = "termcolor" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d0ecdcb44a79f0fe9844f0c4f33a342cbcbb5117de8001e6ba0dc2351327d09" +checksum = "bab24d30b911b2376f3a13cc2cd443142f0c81dda04c118693e35b3835757755" dependencies = [ - "cfg-if 1.0.0", - "log", - "pin-project-lite", - "tracing-attributes", - "tracing-core", + "winapi-util", ] [[package]] -name = "tracing-attributes" -version = "0.1.21" +name = "test-log" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc6b8ad3567499f98a1db7a752b07a7c8c7c7c34c332ec00effb2b0027974b7c" +checksum = "4235dbf7ea878b3ef12dea20a59c134b405a66aafc4fc2c7b9935916e289e735" dependencies = [ "proc-macro2", "quote", @@ -5256,584 +5730,1799 @@ dependencies = [ ] [[package]] -name = "tracing-core" -version = "0.1.26" +name = "textwrap" +version = "0.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f54c8ca710e81886d498c2fd3331b56c93aa248d49de2222ad2742247c60072f" -dependencies = [ - "lazy_static", - "valuable", -] +checksum = "b1141d4d61095b28419e22cb0bbf02755f5e54e0526f97f1e3d1d160e60885fb" [[package]] -name = "tracing-error" -version = "0.2.0" +name = "thiserror" +version = "1.0.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d686ec1c0f384b1277f097b2f279a2ecc11afe8c133c1aabf036a27cb4cd206e" +checksum = "bd829fe32373d27f76265620b5309d0340cb8550f523c1dda251d6298069069a" dependencies = [ - "tracing", - "tracing-subscriber 0.3.10", + "thiserror-impl", ] [[package]] -name = "tracing-futures" -version = "0.2.5" +name = "thiserror-impl" +version = "1.0.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" +checksum = "0396bc89e626244658bef819e22d0cc459e795a5ebe878e6ec336d1674a8d79a" dependencies = [ - "pin-project", - "tracing", + "proc-macro2", + "quote", + "syn", ] [[package]] -name = "tracing-log" -version = "0.1.3" +name = "thread_local" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78ddad33d2d10b1ed7eb9d1f518a5674713876e97e5bb9b7345a7984fbb4f922" +checksum = "5516c27b78311c50bf42c071425c560ac799b11c30b31f87e3081965fe5e0180" dependencies = [ - "lazy_static", - "log", - "tracing-core", + "once_cell", ] [[package]] -name = "tracing-serde" -version = "0.1.3" +name = "time" +version = "0.1.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc6b213177105856957181934e4920de57730fc69bf42c37ee5bb664d406d9e1" +checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438" dependencies = [ - "serde", - "tracing-core", + "libc", + "winapi", ] [[package]] -name = "tracing-subscriber" -version = "0.3.11" +name = "time" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bc28f93baff38037f64e6f43d34cfa1605f27a49c34e8a04c5e78b0babf2596" +checksum = "c2702e08a7a860f005826c6815dcac101b19b5eb330c27fe4a5928fec1d20ddd" dependencies = [ - "ansi_term", - "lazy_static", - "matchers 0.1.0", - "regex", - "serde", - "serde_json", - "sharded-slab", - "smallvec", - "thread_local", - "tracing", - "tracing-core", - "tracing-log", - "tracing-serde", + "libc", + "num_threads", + "time-macros", ] [[package]] -name = "trie-db" -version = "0.23.1" +name = "time-macros" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d32d034c0d3db64b43c31de38e945f15b40cd4ca6d2dcfc26d4798ce8de4ab83" -dependencies = [ - "hash-db", - "hashbrown 0.12.0", - "log", - "rustc-hex", - "smallvec", -] +checksum = "42657b1a6f4d817cda8e7a0ace261fe0cc946cf3a80314390b22cc61ae080792" [[package]] -name = "trie-root" -version = "0.17.0" +name = "tiny-bip39" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a36c5ca3911ed3c9a5416ee6c679042064b93fc637ded67e25f92e68d783891" +checksum = "ffc59cb9dfc85bb312c3a78fd6aa8a8582e310b0fa885d5bb877f6dcc601839d" dependencies = [ - "hash-db", + "anyhow", + "hmac 0.8.1", + "once_cell", + "pbkdf2 0.4.0", + "rand 0.7.3", + "rustc-hash", + "sha2 0.9.9", + "thiserror", + "unicode-normalization", + "wasm-bindgen", + "zeroize", ] [[package]] -name = "triomphe" -version = "0.1.5" +name = "tiny-keccak" +version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c45e322b26410d7260e00f64234810c2f17d7ece356182af4df8f7ff07890f09" +checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" dependencies = [ - "memoffset 0.6.5", + "crunchy", ] [[package]] -name = "try-lock" -version = "0.2.3" +name = "tinyvec" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" +checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" +dependencies = [ + "tinyvec_macros", +] [[package]] -name = "tt-call" -version = "1.0.8" +name = "tinyvec_macros" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e66dcbec4290c69dd03c57e76c2469ea5c7ce109c6dd4351c13055cf71ea055" +checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] -name = "tungstenite" -version = "0.12.0" +name = "tokio" +version = "1.18.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ada8297e8d70872fa9a551d93250a9f407beb9f37ef86494eb20012a2ff7c24" +checksum = "4903bf0427cf68dddd5aa6a93220756f8be0c34fcfa9f5e6191e103e15a31395" dependencies = [ - "base64", - "byteorder", "bytes", - "http", - "httparse", - "input_buffer", - "log", - "rand 0.8.5", - "sha-1 0.9.8", - "url", - "utf-8", + "libc", + "memchr", + "mio", + "num_cpus", + "once_cell", + "pin-project-lite", + "socket2", + "tokio-macros", + "winapi", ] [[package]] -name = "twoway" -version = "0.1.8" +name = "tokio-io-timeout" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59b11b2b5241ba34be09c3cc85a36e56e48f9888862e19cedf23336d35316ed1" +checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" dependencies = [ - "memchr", + "pin-project-lite", + "tokio", ] [[package]] -name = "twox-hash" -version = "1.6.2" +name = "tokio-macros" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ee73e6e4924fe940354b8d4d98cad5231175d615cd855b758adc658c0aac6a0" +checksum = "b557f72f448c511a979e2564e55d74e6c4432fc96ff4f6241bc6bded342643b7" dependencies = [ - "cfg-if 1.0.0", - "rand 0.8.5", - "static_assertions", -] - -[[package]] -name = "typenum" -version = "1.15.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987" + "proc-macro2", + "quote", + "syn", +] [[package]] -name = "uint" -version = "0.9.3" +name = "tokio-rustls" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12f03af7ccf01dd611cc450a0d10dbc9b745770d096473e2faf0ca6e2d66d1e0" +checksum = "bc6844de72e57df1980054b38be3a9f4702aba4858be64dd700181a8a6d0e1b6" dependencies = [ - "byteorder", - "crunchy", - "hex", - "static_assertions", + "rustls 0.19.1", + "tokio", + "webpki 0.21.4", ] [[package]] -name = "unicase" -version = "2.6.0" +name = "tokio-rustls" +version = "0.23.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" +checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59" dependencies = [ - "version_check", + "rustls 0.20.6", + "tokio", + "webpki 0.22.0", ] [[package]] -name = "unicode-bidi" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "099b7128301d285f79ddd55b9a83d5e6b9e97c92e0ea0daebee7263e932de992" - -[[package]] -name = "unicode-ident" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d22af068fba1eb5edcb4aea19d382b2a3deb4c8f9d475c589b6ada9e0fd493ee" - -[[package]] -name = "unicode-normalization" -version = "0.1.19" +name = "tokio-stream" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d54590932941a9e9266f0832deed84ebe1bf2e4c9e4a3554d393d18f5e854bf9" +checksum = "50145484efff8818b5ccd256697f36863f587da82cf8b409c53adf1e840798e3" dependencies = [ - "tinyvec", + "futures-core", + "pin-project-lite", + "tokio", ] [[package]] -name = "unicode-width" -version = "0.1.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973" - -[[package]] -name = "unicode-xid" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "957e51f3646910546462e67d5f7599b9e4fb8acdd304b087a6494730f9eebf04" - -[[package]] -name = "untrusted" -version = "0.7.1" +name = "tokio-util" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" +checksum = "f988a1a1adc2fb21f9c12aa96441da33a1728193ae0b95d2be22dbd17fcb4e5c" +dependencies = [ + "bytes", + "futures-core", + "futures-sink", + "pin-project-lite", + "tokio", + "tracing", +] [[package]] -name = "ureq" -version = "2.4.0" +name = "toml" +version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9399fa2f927a3d327187cbd201480cee55bee6ac5d3c77dd27f0c6814cff16d5" +checksum = "8d82e1a7758622a465f8cee077614c73484dac5b836c02ff6a40d5d1010324d7" dependencies = [ - "base64", - "chunked_transfer", - "flate2", - "log", - "once_cell", - "rustls 0.20.6", - "url", - "webpki 0.22.0", - "webpki-roots 0.22.3", + "serde", ] [[package]] -name = "url" -version = "2.2.2" +name = "tonic" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a507c383b2d33b5fc35d1861e77e6b383d158b2da5e14fe51b83dfedf6fd578c" +checksum = "5be9d60db39854b30b835107500cf0aca0b0d14d6e1c3de124217c23a29c2ddb" dependencies = [ - "form_urlencoded", - "idna", - "matches", + "async-stream", + "async-trait", + "axum", + "base64", + "bytes", + "futures-core", + "futures-util", + "h2", + "http", + "http-body", + "hyper", + "hyper-timeout", "percent-encoding", + "pin-project", + "prost", + "prost-derive", +<<<<<<< HEAD + "rustls-native-certs 0.6.2", + "rustls-pemfile", + "tokio", + "tokio-rustls 0.23.4", +======= + "tokio", +>>>>>>> e05319ec (initial refactor) + "tokio-stream", + "tokio-util", + "tower", + "tower-layer", + "tower-service", + "tracing", + "tracing-futures", ] [[package]] -name = "utf-8" -version = "0.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" - -[[package]] -name = "uuid" -version = "0.8.2" +name = "tower" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" +checksum = "9a89fd63ad6adf737582df5db40d286574513c69a11dac5214dc3b5603d6713e" dependencies = [ - "getrandom 0.2.6", + "futures-core", + "futures-util", + "indexmap", + "pin-project", + "pin-project-lite", + "rand 0.8.5", + "slab", + "tokio", + "tokio-util", + "tower-layer", + "tower-service", + "tracing", ] [[package]] -name = "uuid" -version = "1.1.1" +name = "tower-http" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6d5d669b51467dcf7b2f1a796ce0f955f05f01cafda6c19d6e95f730df29238" +checksum = "7d342c6d58709c0a6d48d48dabbb62d4ef955cf5f0f3bbfd845838e7ae88dbae" dependencies = [ - "getrandom 0.2.6", + "bitflags", + "bytes", + "futures-core", + "futures-util", + "http", + "http-body", + "http-range-header", + "pin-project-lite", + "tower", + "tower-layer", + "tower-service", ] [[package]] -name = "valuable" -version = "0.1.0" +name = "tower-layer" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" +checksum = "343bc9466d3fe6b0f960ef45960509f84480bf4fd96f92901afe7ff3df9d3a62" [[package]] -name = "version_check" -version = "0.9.4" +name = "tower-service" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" +checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" [[package]] -name = "wait-timeout" -version = "0.2.0" +name = "tracing" +version = "0.1.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f200f5b12eb75f8c1ed65abd4b2db8a6e1b138a20de009dacee265a2498f3f6" +checksum = "5d0ecdcb44a79f0fe9844f0c4f33a342cbcbb5117de8001e6ba0dc2351327d09" dependencies = [ - "libc", + "cfg-if 1.0.0", + "log", + "pin-project-lite", + "tracing-attributes", + "tracing-core", ] [[package]] -name = "walkdir" -version = "2.3.2" +name = "tracing-attributes" +version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "808cf2735cd4b6866113f648b791c6adc5714537bc222d9347bb203386ffda56" +checksum = "cc6b8ad3567499f98a1db7a752b07a7c8c7c7c34c332ec00effb2b0027974b7c" dependencies = [ - "same-file", - "winapi", - "winapi-util", + "proc-macro2", + "quote", + "syn", ] [[package]] -name = "want" -version = "0.3.0" +name = "tracing-core" +version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0" +checksum = "f54c8ca710e81886d498c2fd3331b56c93aa248d49de2222ad2742247c60072f" dependencies = [ - "log", - "try-lock", + "lazy_static", + "valuable", ] [[package]] -name = "wasi" -version = "0.9.0+wasi-snapshot-preview1" +<<<<<<< HEAD +name = "tracing-error" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" +checksum = "d686ec1c0f384b1277f097b2f279a2ecc11afe8c133c1aabf036a27cb4cd206e" +dependencies = [ + "tracing", + "tracing-subscriber 0.3.10", +] [[package]] -name = "wasi" -version = "0.10.2+wasi-snapshot-preview1" +======= +>>>>>>> e05319ec (initial refactor) +name = "tracing-futures" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" +checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" +dependencies = [ + "pin-project", + "tracing", +] [[package]] -name = "wasi" -version = "0.11.0+wasi-snapshot-preview1" +name = "tracing-log" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" +checksum = "78ddad33d2d10b1ed7eb9d1f518a5674713876e97e5bb9b7345a7984fbb4f922" +dependencies = [ + "lazy_static", + "log", + "tracing-core", +] [[package]] -name = "wasm-bindgen" -version = "0.2.80" +name = "tracing-serde" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27370197c907c55e3f1a9fbe26f44e937fe6451368324e009cba39e139dc08ad" +checksum = "bc6b213177105856957181934e4920de57730fc69bf42c37ee5bb664d406d9e1" dependencies = [ - "cfg-if 1.0.0", - "wasm-bindgen-macro", + "serde", + "tracing-core", ] [[package]] -name = "wasm-bindgen-backend" -version = "0.2.80" +name = "tracing-subscriber" +version = "0.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53e04185bfa3a779273da532f5025e33398409573f348985af9a1cbf3774d3f4" +checksum = "0e0d2eaa99c3c2e41547cfa109e910a68ea03823cccad4a0525dcbc9b01e8c71" dependencies = [ - "bumpalo", + "ansi_term", + "chrono", "lazy_static", - "log", - "proc-macro2", - "quote", - "syn", - "wasm-bindgen-shared", + "matchers 0.0.1", + "regex", + "serde", + "serde_json", + "sharded-slab", + "smallvec", + "thread_local", + "tracing", + "tracing-core", + "tracing-log", + "tracing-serde", ] [[package]] -name = "wasm-bindgen-macro" -version = "0.2.80" +name = "tracing-subscriber" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17cae7ff784d7e83a2fe7611cfe766ecf034111b49deb850a3dc7699c08251f5" +checksum = "4bc28f93baff38037f64e6f43d34cfa1605f27a49c34e8a04c5e78b0babf2596" dependencies = [ - "quote", - "wasm-bindgen-macro-support", + "ansi_term", + "lazy_static", + "matchers 0.1.0", + "regex", + "serde", + "serde_json", + "sharded-slab", + "smallvec", + "thread_local", + "tracing", + "tracing-core", + "tracing-log", + "tracing-serde", ] [[package]] -name = "wasm-bindgen-macro-support" -version = "0.2.80" +name = "trie-db" +version = "0.23.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99ec0dc7a4756fffc231aab1b9f2f578d23cd391390ab27f952ae0c9b3ece20b" +checksum = "d32d034c0d3db64b43c31de38e945f15b40cd4ca6d2dcfc26d4798ce8de4ab83" dependencies = [ - "proc-macro2", - "quote", - "syn", - "wasm-bindgen-backend", - "wasm-bindgen-shared", + "hash-db", +<<<<<<< HEAD + "hashbrown 0.12.0", +======= + "hashbrown 0.12.1", +>>>>>>> e05319ec (initial refactor) + "log", + "rustc-hex", + "smallvec", ] [[package]] -name = "wasm-bindgen-shared" -version = "0.2.80" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d554b7f530dee5964d9a9468d95c1f8b8acae4f282807e7d27d4b03099a46744" - -[[package]] -name = "wasmi" -version = "0.9.1" +name = "trie-root" +version = "0.17.0" +<<<<<<< HEAD source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca00c5147c319a8ec91ec1a0edbec31e566ce2c9cc93b3f9bb86a9efd0eb795d" +checksum = "9a36c5ca3911ed3c9a5416ee6c679042064b93fc637ded67e25f92e68d783891" dependencies = [ - "downcast-rs", - "libc", - "memory_units", - "num-rational 0.2.4", - "num-traits", - "parity-wasm", - "wasmi-validation", + "hash-db", ] [[package]] -name = "wasmi-validation" -version = "0.4.1" +name = "triomphe" +version = "0.1.5" +======= +>>>>>>> e05319ec (initial refactor) source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "165343ecd6c018fc09ebcae280752702c9a2ef3e6f8d02f1cfcbdb53ef6d7937" +checksum = "9a36c5ca3911ed3c9a5416ee6c679042064b93fc637ded67e25f92e68d783891" dependencies = [ - "parity-wasm", + "hash-db", ] [[package]] -name = "web-sys" -version = "0.3.57" +name = "try-lock" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b17e741662c70c8bd24ac5c5b18de314a2c26c32bf8346ee1e6f53de919c283" -dependencies = [ - "js-sys", - "wasm-bindgen", -] +checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" [[package]] -name = "webpki" -version = "0.21.4" +name = "tt-call" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8e38c0608262c46d4a56202ebabdeb094cef7e560ca7a226c6bf055188aa4ea" -dependencies = [ - "ring", - "untrusted", -] +checksum = "5e66dcbec4290c69dd03c57e76c2469ea5c7ce109c6dd4351c13055cf71ea055" [[package]] -name = "webpki" -version = "0.22.0" +name = "tungstenite" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f095d78192e208183081cc07bc5515ef55216397af48b873e5edcd72637fa1bd" +checksum = "8ada8297e8d70872fa9a551d93250a9f407beb9f37ef86494eb20012a2ff7c24" dependencies = [ - "ring", - "untrusted", + "base64", + "byteorder", + "bytes", + "http", + "httparse", + "input_buffer", + "log", + "rand 0.8.5", + "sha-1 0.9.8", + "url", + "utf-8", ] [[package]] -name = "webpki-roots" -version = "0.21.1" +name = "twox-hash" +version = "1.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aabe153544e473b775453675851ecc86863d2a81d786d741f6b76778f2a48940" +checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" dependencies = [ - "webpki 0.21.4", + "cfg-if 1.0.0", + "rand 0.8.5", + "static_assertions", ] [[package]] -name = "webpki-roots" -version = "0.22.3" +name = "twox-hash" +version = "1.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44d8de8415c823c8abd270ad483c6feeac771fad964890779f9a8cb24fbbc1bf" +checksum = "4ee73e6e4924fe940354b8d4d98cad5231175d615cd855b758adc658c0aac6a0" dependencies = [ - "webpki 0.22.0", + "cfg-if 1.0.0", + "rand 0.8.5", + "static_assertions", ] [[package]] -name = "winapi" -version = "0.3.9" +name = "typenum" +version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" -dependencies = [ - "winapi-i686-pc-windows-gnu", - "winapi-x86_64-pc-windows-gnu", -] +checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987" [[package]] -name = "winapi-i686-pc-windows-gnu" -version = "0.4.0" +name = "uint" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" +checksum = "12f03af7ccf01dd611cc450a0d10dbc9b745770d096473e2faf0ca6e2d66d1e0" +dependencies = [ + "byteorder", + "crunchy", + "hex", + "static_assertions", +] [[package]] -name = "winapi-util" -version = "0.1.5" +name = "unicode-bidi" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" -dependencies = [ - "winapi", -] +checksum = "099b7128301d285f79ddd55b9a83d5e6b9e97c92e0ea0daebee7263e932de992" [[package]] -name = "winapi-x86_64-pc-windows-gnu" -version = "0.4.0" +name = "unicode-ident" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +checksum = "d22af068fba1eb5edcb4aea19d382b2a3deb4c8f9d475c589b6ada9e0fd493ee" [[package]] -name = "windows-sys" -version = "0.36.1" +name = "unicode-normalization" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea04155a16a59f9eab786fe12a4a450e75cdb175f9e0d80da1e17db09f55b8d2" +checksum = "d54590932941a9e9266f0832deed84ebe1bf2e4c9e4a3554d393d18f5e854bf9" dependencies = [ - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_msvc", + "tinyvec", ] [[package]] -name = "windows_aarch64_msvc" -version = "0.36.1" +name = "unicode-xid" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47" +checksum = "957e51f3646910546462e67d5f7599b9e4fb8acdd304b087a6494730f9eebf04" [[package]] -name = "windows_i686_gnu" -version = "0.36.1" +name = "untrusted" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6" +checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" [[package]] -name = "windows_i686_msvc" -version = "0.36.1" +name = "ureq" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024" +checksum = "9399fa2f927a3d327187cbd201480cee55bee6ac5d3c77dd27f0c6814cff16d5" +dependencies = [ + "base64", + "chunked_transfer", + "flate2", + "log", + "once_cell", + "rustls 0.20.6", + "url", + "webpki 0.22.0", + "webpki-roots 0.22.3", +] [[package]] -name = "windows_x86_64_gnu" -version = "0.36.1" +name = "url" +version = "2.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1" +checksum = "a507c383b2d33b5fc35d1861e77e6b383d158b2da5e14fe51b83dfedf6fd578c" +dependencies = [ + "form_urlencoded", + "idna", + "matches", + "percent-encoding", +] [[package]] -name = "windows_x86_64_msvc" -version = "0.36.1" +name = "utf-8" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680" +checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" [[package]] -name = "wyz" -version = "0.2.0" +name = "uuid" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85e60b0d1b5f99db2556934e21937020776a5d31520bf169e851ac44e6420214" +checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" [[package]] -name = "yaml-rust" -version = "0.4.5" +name = "uuid" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85" +checksum = "c6d5d669b51467dcf7b2f1a796ce0f955f05f01cafda6c19d6e95f730df29238" dependencies = [ - "linked-hash-map", + "getrandom 0.2.6", ] [[package]] -name = "zeroize" -version = "1.5.5" +name = "valuable" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94693807d016b2f2d2e14420eb3bfcca689311ff775dcf113d74ea624b7cdf07" +checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" + +[[package]] +name = "version_check" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" + +[[package]] +name = "walkdir" +version = "2.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "808cf2735cd4b6866113f648b791c6adc5714537bc222d9347bb203386ffda56" dependencies = [ - "zeroize_derive", + "same-file", + "winapi", + "winapi-util", ] [[package]] -name = "zeroize_derive" -version = "1.3.2" +name = "want" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f8f187641dad4f680d25c4bfc4225b418165984179f26ca76ec4fb6441d3a17" +checksum = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0" +dependencies = [ + "log", + "try-lock", +] + +[[package]] +name = "wasi" +version = "0.9.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" + +[[package]] +name = "wasi" +version = "0.10.2+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" + +[[package]] +name = "wasi" +version = "0.11.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" + +[[package]] +name = "wasm-bindgen" +version = "0.2.80" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "27370197c907c55e3f1a9fbe26f44e937fe6451368324e009cba39e139dc08ad" +dependencies = [ + "cfg-if 1.0.0", + "wasm-bindgen-macro", +] + +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.80" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53e04185bfa3a779273da532f5025e33398409573f348985af9a1cbf3774d3f4" +dependencies = [ + "bumpalo", + "lazy_static", + "log", + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.80" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17cae7ff784d7e83a2fe7611cfe766ecf034111b49deb850a3dc7699c08251f5" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.80" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99ec0dc7a4756fffc231aab1b9f2f578d23cd391390ab27f952ae0c9b3ece20b" dependencies = [ "proc-macro2", "quote", "syn", - "synstructure", + "wasm-bindgen-backend", + "wasm-bindgen-shared", ] [[package]] -name = "zip" -version = "0.5.13" +name = "wasm-bindgen-shared" +version = "0.2.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93ab48844d61251bb3835145c521d88aa4031d7139e8485990f60ca911fa0815" +checksum = "d554b7f530dee5964d9a9468d95c1f8b8acae4f282807e7d27d4b03099a46744" + +[[package]] +name = "wasmi" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca00c5147c319a8ec91ec1a0edbec31e566ce2c9cc93b3f9bb86a9efd0eb795d" dependencies = [ - "byteorder", - "bzip2", - "crc32fast", - "flate2", - "thiserror", - "time 0.1.43", + "downcast-rs", + "libc", + "memory_units", + "num-rational 0.2.4", + "num-traits", + "parity-wasm", + "wasmi-validation", +] + +[[package]] +name = "wasmi-validation" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "165343ecd6c018fc09ebcae280752702c9a2ef3e6f8d02f1cfcbdb53ef6d7937" +dependencies = [ + "parity-wasm", +] + +[[package]] +name = "web-sys" +version = "0.3.57" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b17e741662c70c8bd24ac5c5b18de314a2c26c32bf8346ee1e6f53de919c283" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "webpki" +version = "0.21.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8e38c0608262c46d4a56202ebabdeb094cef7e560ca7a226c6bf055188aa4ea" +dependencies = [ + "ring", + "untrusted", +] + +[[package]] +name = "webpki" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f095d78192e208183081cc07bc5515ef55216397af48b873e5edcd72637fa1bd" +dependencies = [ + "ring", + "untrusted", +] + +[[package]] +name = "webpki-roots" +version = "0.21.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aabe153544e473b775453675851ecc86863d2a81d786d741f6b76778f2a48940" +dependencies = [ + "webpki 0.21.4", +] + +[[package]] +name = "webpki-roots" +version = "0.22.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44d8de8415c823c8abd270ad483c6feeac771fad964890779f9a8cb24fbbc1bf" +dependencies = [ + "webpki 0.22.0", +] + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-util" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" +dependencies = [ + "winapi", +] + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +<<<<<<< HEAD +name = "windows-sys" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea04155a16a59f9eab786fe12a4a450e75cdb175f9e0d80da1e17db09f55b8d2" +dependencies = [ + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_msvc", ] + +[[package]] +name = "windows_aarch64_msvc" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47" + +[[package]] +name = "windows_i686_gnu" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6" + +[[package]] +name = "windows_i686_msvc" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680" + +[[package]] +name = "wyz" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85e60b0d1b5f99db2556934e21937020776a5d31520bf169e851ac44e6420214" + +[[package]] +name = "yaml-rust" +version = "0.4.5" +======= +name = "wyz" +version = "0.2.0" +>>>>>>> e05319ec (initial refactor) +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85e60b0d1b5f99db2556934e21937020776a5d31520bf169e851ac44e6420214" + +[[package]] +name = "zeroize" +version = "1.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94693807d016b2f2d2e14420eb3bfcca689311ff775dcf113d74ea624b7cdf07" +dependencies = [ + "zeroize_derive", +] + +[[package]] +name = "zeroize_derive" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f8f187641dad4f680d25c4bfc4225b418165984179f26ca76ec4fb6441d3a17" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "synstructure", +] + +<<<<<<< HEAD +[[package]] +name = "zip" +version = "0.5.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93ab48844d61251bb3835145c521d88aa4031d7139e8485990f60ca911fa0815" +dependencies = [ + "byteorder", + "bzip2", + "crc32fast", + "flate2", + "thiserror", + "time 0.1.43", +] +======= +[[patch.unused]] +name = "beefy-gadget" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "beefy-gadget-rpc" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "chain-spec-builder" +version = "2.0.0" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "fork-tree" +version = "3.0.0" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "frame-benchmarking-cli" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "frame-election-provider-support" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "frame-executive" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "frame-support-test" +version = "3.0.0" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "frame-support-test-compile-pass" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "frame-support-test-pallet" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "frame-system-benchmarking" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "frame-system-rpc-runtime-api" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "frame-try-runtime" +version = "0.10.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "generate-bags" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "node-bench" +version = "0.9.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "node-cli" +version = "3.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "node-executor" +version = "3.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "node-inspect" +version = "0.9.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "node-primitives" +version = "2.0.0" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "node-rpc" +version = "3.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "node-runtime" +version = "3.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "node-runtime-generate-bags" +version = "3.0.0" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "node-template" +version = "3.0.0" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "node-template-runtime" +version = "3.0.0" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "node-testing" +version = "3.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "pallet-asset-tx-payment" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "pallet-assets" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "pallet-atomic-swap" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "pallet-aura" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "pallet-authority-discovery" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "pallet-authorship" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "pallet-babe" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "pallet-bags-list" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "pallet-bags-list-fuzzer" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "pallet-bags-list-remote-tests" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "pallet-balances" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "pallet-bounties" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "pallet-child-bounties" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "pallet-collective" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "pallet-contracts" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "pallet-contracts-primitives" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "pallet-contracts-proc-macro" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "pallet-contracts-rpc" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "pallet-contracts-rpc-runtime-api" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "pallet-democracy" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "pallet-election-provider-multi-phase" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "pallet-elections-phragmen" +version = "5.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "pallet-example-basic" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "pallet-example-offchain-worker" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "pallet-example-parallel" +version = "3.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "pallet-gilt" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "pallet-grandpa" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "pallet-identity" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "pallet-im-online" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "pallet-indices" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "pallet-lottery" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "pallet-membership" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "pallet-mmr-rpc" +version = "3.0.0" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "pallet-multisig" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "pallet-nicks" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "pallet-node-authorization" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "pallet-offences" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "pallet-offences-benchmarking" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "pallet-preimage" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "pallet-proxy" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "pallet-randomness-collective-flip" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "pallet-recovery" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "pallet-scheduler" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "pallet-scored-pool" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "pallet-session-benchmarking" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "pallet-society" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "pallet-staking" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "pallet-staking-reward-curve" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "pallet-staking-reward-fn" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "pallet-sudo" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "pallet-template" +version = "3.0.0" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "pallet-tips" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "pallet-transaction-payment" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "pallet-transaction-payment-rpc" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "pallet-transaction-payment-rpc-runtime-api" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "pallet-transaction-storage" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "pallet-treasury" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "pallet-uniques" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "pallet-utility" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "pallet-vesting" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "remote-externalities" +version = "0.10.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sc-allocator" +version = "4.1.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sc-authority-discovery" +version = "0.10.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sc-basic-authorship" +version = "0.10.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sc-block-builder" +version = "0.10.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sc-chain-spec" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sc-chain-spec-derive" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sc-cli" +version = "0.10.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sc-client-api" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sc-client-db" +version = "0.10.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sc-consensus" +version = "0.10.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sc-consensus-aura" +version = "0.10.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sc-consensus-babe" +version = "0.10.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sc-consensus-babe-rpc" +version = "0.10.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sc-consensus-epochs" +version = "0.10.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sc-consensus-manual-seal" +version = "0.10.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sc-consensus-pow" +version = "0.10.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sc-consensus-slots" +version = "0.10.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sc-consensus-uncles" +version = "0.10.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sc-executor" +version = "0.10.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sc-executor-common" +version = "0.10.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sc-executor-wasmi" +version = "0.10.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sc-executor-wasmtime" +version = "0.10.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sc-finality-grandpa" +version = "0.10.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sc-finality-grandpa-rpc" +version = "0.10.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sc-informant" +version = "0.10.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sc-keystore" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sc-network" +version = "0.10.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sc-network-gossip" +version = "0.10.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sc-network-test" +version = "0.8.0" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sc-offchain" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sc-peerset" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sc-proposer-metrics" +version = "0.10.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sc-rpc" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sc-rpc-api" +version = "0.10.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sc-rpc-server" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sc-runtime-test" +version = "2.0.0" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sc-service" +version = "0.10.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sc-service-test" +version = "2.0.0" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sc-state-db" +version = "0.10.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sc-sync-state-rpc" +version = "0.10.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sc-telemetry" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sc-tracing" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sc-tracing-proc-macro" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sc-transaction-pool" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sc-transaction-pool-api" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sc-utils" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sp-api-test" +version = "2.0.1" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sp-application-crypto-test" +version = "2.0.0" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sp-arithmetic-fuzzer" +version = "2.0.0" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sp-authority-discovery" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sp-authorship" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sp-block-builder" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sp-blockchain" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sp-consensus" +version = "0.10.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sp-consensus-aura" +version = "0.10.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sp-consensus-babe" +version = "0.10.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sp-consensus-pow" +version = "0.10.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sp-consensus-slots" +version = "0.10.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sp-consensus-vrf" +version = "0.10.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sp-database" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sp-finality-grandpa" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sp-keyring" +version = "4.1.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sp-maybe-compressed-blob" +version = "4.1.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sp-npos-elections" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sp-npos-elections-fuzzer" +version = "2.0.0-alpha.5" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sp-npos-elections-solution-type" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sp-offchain" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sp-rpc" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sp-runtime-interface-test" +version = "2.0.0" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sp-runtime-interface-test-wasm" +version = "2.0.0" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sp-runtime-interface-test-wasm-deprecated" +version = "2.0.0" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sp-sandbox" +version = "0.10.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sp-serializer" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sp-tasks" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sp-test-primitives" +version = "2.0.0" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sp-transaction-pool" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "sp-transaction-storage-proof" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "subkey" +version = "2.0.1" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "substrate-build-script-utils" +version = "3.0.0" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "substrate-frame-cli" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "substrate-frame-rpc-support" +version = "3.0.0" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "substrate-frame-rpc-system" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "substrate-prometheus-endpoint" +version = "0.10.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "substrate-test-client" +version = "2.0.1" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "substrate-test-runtime" +version = "2.0.0" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "substrate-test-runtime-client" +version = "2.0.0" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "substrate-test-runtime-transaction-pool" +version = "2.0.0" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "substrate-test-utils" +version = "4.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "substrate-test-utils-derive" +version = "0.10.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "substrate-test-utils-test-crate" +version = "0.1.0" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "substrate-wasm-builder" +version = "5.0.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + +[[patch.unused]] +name = "try-runtime-cli" +version = "0.10.0-dev" +source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +>>>>>>> e05319ec (initial refactor) diff --git a/Cargo.toml b/Cargo.toml index bb2edb8e41..1676ee2110 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -4,13 +4,7 @@ resolver = "2" members = [ "modules", - "relayer", - "relayer-cli", - "relayer-rest", - "telemetry", - "proto", - "tools/integration-test", - "tools/test-framework", + "proto" ] exclude = [ diff --git a/modules/src/clients/ics07_tendermint/client_def.rs b/modules/src/clients/ics07_tendermint/client_def.rs index 9f81f1c2d7..cb23019ce6 100644 --- a/modules/src/clients/ics07_tendermint/client_def.rs +++ b/modules/src/clients/ics07_tendermint/client_def.rs @@ -11,7 +11,7 @@ use crate::clients::ics07_tendermint::consensus_state::ConsensusState; use crate::clients::ics07_tendermint::error::Error; use crate::clients::ics07_tendermint::header::Header; use crate::core::ics02_client::client_consensus::AnyConsensusState; -use crate::core::ics02_client::client_def::ClientDef; +use crate::core::ics02_client::client_def::{ClientDef, ConsensusUpdateResult}; use crate::core::ics02_client::client_state::AnyClientState; use crate::core::ics02_client::client_type::ClientType; use crate::core::ics02_client::context::ClientReader; @@ -46,13 +46,13 @@ impl ClientDef for TendermintClient { type ClientState = ClientState; type ConsensusState = ConsensusState; - fn check_header_and_update_state( + fn verify_header( &self, ctx: &dyn ClientReader, client_id: ClientId, client_state: Self::ClientState, header: Self::Header, - ) -> Result<(Self::ClientState, Self::ConsensusState), Ics02Error> { + ) -> Result<(), Ics02Error> { if header.height().revision_number != client_state.chain_id.version() { return Err(Ics02Error::tendermint_handler_error( Error::mismatched_revisions( @@ -62,25 +62,6 @@ impl ClientDef for TendermintClient { )); } - // Check if a consensus state is already installed; if so it should - // match the untrusted header. - let header_consensus_state = ConsensusState::from(header.clone()); - let existing_consensus_state = - match ctx.maybe_consensus_state(&client_id, header.height())? { - Some(cs) => { - let cs = downcast_consensus_state(cs)?; - // If this consensus state matches, skip verification - // (optimization) - if cs == header_consensus_state { - // Header is already installed and matches the incoming - // header (already verified) - return Ok((client_state, cs)); - } - Some(cs) - } - None => None, - }; - let trusted_consensus_state = downcast_consensus_state(ctx.consensus_state(&client_id, header.trusted_height)?)?; @@ -133,12 +114,68 @@ impl ClientDef for TendermintClient { } } + Ok(()) + } + + fn update_state( + &self, + _ctx: &dyn ClientReader, + _client_id: ClientId, + client_state: Self::ClientState, + header: Self::Header, + ) -> Result<(Self::ClientState, ConsensusUpdateResult), Ics02Error> { + let header_consensus_state = ConsensusState::from(header.clone()); + Ok(( + client_state.with_header(header), + ConsensusUpdateResult::Single(AnyConsensusState::Tendermint( + header_consensus_state.into(), + )), + )) + } + + fn update_state_on_misbehaviour( + &self, + client_state: Self::ClientState, + header: Self::Header, + ) -> Result { + client_state + .with_frozen_height(header.height()) + .map_err(|e| e.into()) + } + + fn check_for_misbehaviour( + &self, + ctx: &dyn ClientReader, + client_id: ClientId, + client_state: Self::ClientState, + header: Self::Header, + ) -> Result { + // Check if a consensus state is already installed; if so it should + // match the untrusted header. + let header_consensus_state = ConsensusState::from(header.clone()); + + let existing_consensus_state = + match ctx.maybe_consensus_state(&client_id, header.height())? { + Some(cs) => { + let cs = downcast_consensus_state(cs)?; + // If this consensus state matches, skip verification + // (optimization) + if cs == header_consensus_state { + // Header is already installed and matches the incoming + // header (already verified) + return Ok(false); + } + Some(cs) + } + None => None, + }; + // If the header has verified, but its corresponding consensus state // differs from the existing consensus state for that height, freeze the // client and return the installed consensus state. if let Some(cs) = existing_consensus_state { if cs != header_consensus_state { - return Ok((client_state.with_frozen_height(header.height())?, cs)); + return Ok(true); } } @@ -146,7 +183,7 @@ impl ClientDef for TendermintClient { // (cs-new, cs-next, cs-latest) if header.height() < client_state.latest_height() { let maybe_next_cs = ctx - .next_consensus_state(&client_id, header.height())? + .next_consensus_state(&client_id, header.height(), None)? .map(downcast_consensus_state) .transpose()?; @@ -166,7 +203,7 @@ impl ClientDef for TendermintClient { // (cs-trusted, cs-prev, cs-new) if header.trusted_height < header.height() { let maybe_prev_cs = ctx - .prev_consensus_state(&client_id, header.height())? + .prev_consensus_state(&client_id, header.height(), None)? .map(downcast_consensus_state) .transpose()?; @@ -184,10 +221,7 @@ impl ClientDef for TendermintClient { } } - Ok(( - client_state.with_header(header.clone()), - ConsensusState::from(header), - )) + Ok(false) } fn verify_client_consensus_state( @@ -399,9 +433,9 @@ impl ClientDef for TendermintClient { &self, _client_state: &Self::ClientState, _consensus_state: &Self::ConsensusState, - _proof_upgrade_client: RawMerkleProof, - _proof_upgrade_consensus_state: RawMerkleProof, - ) -> Result<(Self::ClientState, Self::ConsensusState), Ics02Error> { + _proof_upgrade_client: Vec, + _proof_upgrade_consensus_state: Vec, + ) -> Result<(Self::ClientState, ConsensusUpdateResult), Ics02Error> { todo!() } } diff --git a/modules/src/clients/ics07_tendermint/header.rs b/modules/src/clients/ics07_tendermint/header.rs index 8fe32105a5..121fdc944e 100644 --- a/modules/src/clients/ics07_tendermint/header.rs +++ b/modules/src/clients/ics07_tendermint/header.rs @@ -42,6 +42,10 @@ impl Header { ) } + pub fn timestamp(&self) -> Timestamp { + self.signed_header.header.time.into() + } + pub fn compatible_with(&self, other_header: &Header) -> bool { headers_compatible(&self.signed_header, &other_header.signed_header) } @@ -72,14 +76,6 @@ impl crate::core::ics02_client::header::Header for Header { ClientType::Tendermint } - fn height(&self) -> Height { - self.height() - } - - fn timestamp(&self) -> Timestamp { - self.signed_header.header.time.into() - } - fn wrap_any(self) -> AnyHeader { AnyHeader::Tendermint(self) } diff --git a/modules/src/clients/ics11_beefy/client_def.rs b/modules/src/clients/ics11_beefy/client_def.rs index e28e69fd0f..7f54c00641 100644 --- a/modules/src/clients/ics11_beefy/client_def.rs +++ b/modules/src/clients/ics11_beefy/client_def.rs @@ -1,26 +1,25 @@ -use beefy_client::primitives::{MmrUpdateProof, ParachainHeader, ParachainsUpdateProof}; -use beefy_client::traits::{StorageRead, StorageWrite}; +use beefy_client::primitives::{ParachainHeader, ParachainsUpdateProof}; +use beefy_client::traits::{HostFunctions, StorageRead, StorageWrite}; use beefy_client::BeefyLightClient; use codec::Encode; use core::convert::TryInto; use pallet_mmr_primitives::BatchProof; -use prost::Message; use sp_core::H256; -use sp_runtime::traits::BlakeTwo256; use tendermint_proto::Protobuf; use crate::clients::ics11_beefy::client_state::ClientState; use crate::clients::ics11_beefy::consensus_state::ConsensusState; -use crate::clients::ics11_beefy::error::Error; +use crate::clients::ics11_beefy::error::Error as BeefyError; use crate::clients::ics11_beefy::header::BeefyHeader; use crate::core::ics02_client::client_consensus::AnyConsensusState; -use crate::core::ics02_client::client_def::ClientDef; +use crate::core::ics02_client::client_def::{ClientDef, ConsensusUpdateResult}; use crate::core::ics02_client::client_state::AnyClientState; use crate::core::ics02_client::client_type::ClientType; use crate::core::ics02_client::context::ClientReader; -use crate::core::ics02_client::error::Error as Ics02Error; +use crate::core::ics02_client::error::Error; use crate::core::ics03_connection::connection::ConnectionEnd; use crate::core::ics04_channel::channel::ChannelEnd; +use crate::core::ics04_channel::commitment::{AcknowledgementCommitment, PacketCommitment}; use crate::core::ics04_channel::context::ChannelReader; use crate::core::ics04_channel::packet::Sequence; @@ -40,7 +39,26 @@ use crate::core::ics24_host::path::{ }; use crate::downcast; -pub trait BeefyLCStore: StorageRead + StorageWrite + Clone {} +/// Methods definitions specific to Beefy Light Client operation +pub trait BeefyLCStore: StorageRead + StorageWrite + HostFunctions + Clone + Default { + /// This function should verify membership in a trie proof using parity's sp-trie package + /// with a BlakeTwo256 Hasher + fn verify_membership_trie_proof( + root: &H256, + proof: &Vec>, + key: &[u8], + value: &[u8], + ) -> Result<(), Error>; + /// This function should verify non membership in a trie proof using parity's sp-trie package + /// with a BlakeTwo256 Hasher + fn verify_non_membership_trie_proof( + root: &H256, + proof: &Vec>, + key: &[u8], + ) -> Result<(), Error>; + fn store_latest_parachains_height(para_id_and_heights: Vec<(u64, u64)>) -> Result<(), Error>; + fn get_parachain_latest_height(para_id: u64) -> Result; +} #[derive(Clone, Debug, Default, PartialEq, Eq)] pub struct BeefyClient { @@ -52,18 +70,18 @@ impl ClientDef for BeefyClient { type ClientState = ClientState; type ConsensusState = ConsensusState; - fn check_header_and_update_state( + fn verify_header( &self, - ctx: &dyn ClientReader, - client_id: ClientId, + _ctx: &dyn ClientReader, + _client_id: ClientId, client_state: Self::ClientState, header: Self::Header, - ) -> Result<(Self::ClientState, Self::ConsensusState), Ics02Error> { - let mut light_client = BeefyLightClient::new(self.store.clone()); + ) -> Result<(), Error> { + let mut light_client = BeefyLightClient::<_, Store>::new(self.store.clone()); if let Some(mmr_update) = header.mmr_update_proof { light_client .ingest_mmr_root_with_proof(mmr_update) - .map_err(|e| Ics02Error::Beefy(Error::invalid_mmmr_update(format!("{:?}", e))))?; + .map_err(|e| Error::beefy(BeefyError::invalid_mmr_update(format!("{:?}", e))))?; } let mut leaf_indices = vec![]; @@ -96,110 +114,34 @@ impl ClientDef for BeefyClient { items: header .mmr_proofs .into_iter() - .map(|item| H256::from_slice(&item)), + .map(|item| H256::from_slice(&item)) + .collect(), }, }; light_client .verify_parachain_headers(parachain_update_proof) - .map_err(|e| Ics02Error::Beefy(Error::invalid_mmmr_update(format!("{:?}", e))))?; - // Check if a consensus state is already installed; if so it should - // match the untrusted header - let mut consensus_states = header - .parachain_headers - .into_iter() - .map(ConsensusState::from) - .collect::>(); - consensus_states.sort_by(|a, b| { - a.parachain_header - .parachain_header - .number - .cmp(&b.parachain_header.parachain_header.number) - }); - - let mut latest_para_height = client_state.latest_height(); - let trusted_consensus_state = - downcast_consensus_state(ctx.consensus_state(&client_id, latest_para_height)?)?; - let mut last_seen_cs = None; - let mut last_seen_height = None; - for cs_state in consensus_states { - let height = Height::new( - client_state.para_id as u64, - cs_state.parachain_header.parachain_header.number as u64, - ); - let existing_consensus_state = match ctx.maybe_consensus_state(&client_id, height)? { - Some(cs) => { - let cs = downcast_consensus_state(cs)?; - // If this consensus state matches, skip verification - // (optimization) - if cs == cs_state { - // Header is already installed and matches the incoming - // header (already verified) - continue; - } - Some(cs) - } - None => None, - }; - - // If the header has verified, but its corresponding consensus state - // differs from the existing consensus state for that height, freeze the - // client and return the installed consensus state. - if let Some(cs) = existing_consensus_state { - if cs != cs_state { - let frozen_height = Height::new( - client_state.para_id as u64, - client_state.latest_beefy_height as u64, - ); - return Ok((client_state.with_frozen_height(frozen_height)?, cs)); - } - } + .map_err(|e| Error::beefy(BeefyError::invalid_mmr_update(format!("{:?}", e)))) + } - // Monotonicity checks for timestamps for in-the-middle updates - // (cs-new, cs-next, cs-latest) - if height < latest_para_height { - let maybe_next_cs = ctx - .next_consensus_state(&client_id, height)? - .map(downcast_consensus_state) - .transpose()?; - - if let Some(next_cs) = maybe_next_cs { - // New (untrusted) header timestamp cannot occur after next - // consensus state's height - if cs_state.timestamp > next_cs.timestamp { - // return Err(Ics02Error::beefy( - // Error::header_timestamp_too_high( - // cs_state.timestamp.to_string(), - // next_cs.timestamp.to_string(), - // ), - // )); - continue; - } - } - } - // (cs-trusted, cs-prev, cs-new) - if height > latest_para_height { - let maybe_prev_cs = ctx - .prev_consensus_state(&client_id, header.height())? - .map(downcast_consensus_state) - .transpose()?; - - if let Some(maybe_prev_cs) = maybe_prev_cs { - // New (untrusted) header timestamp cannot occur before the - // previous consensus state's height - if cs_state.timestamp < maybe_prev_cs.timestamp { - // return Err(Ics02Error::beefy( - // Error::header_timestamp_too_low( - // header.signed_header.header().time.to_string(), - // prev_cs.timestamp.to_string(), - // ), - // )); - continue; - } - } + fn update_state( + &self, + ctx: &dyn ClientReader, + client_id: ClientId, + client_state: Self::ClientState, + header: Self::Header, + ) -> Result<(Self::ClientState, ConsensusUpdateResult), Error> { + let mut parachain_cs_states = vec![]; + for header in header.parachain_headers { + let height = Height::new(header.para_id as u64, header.parachain_header.number as u64); + // Skip duplicate consensus states + if let Ok(_) = ctx.consensus_state(&client_id, height) { + continue; } - last_seen_height = Some(height); - last_seen_cs = Some(cs_state) + parachain_cs_states.push(( + height, + AnyConsensusState::Beefy(ConsensusState::from(header)), + )) } let best_cs_state = if let Some(cs_state) = last_seen_cs { @@ -216,17 +158,44 @@ impl ClientDef for BeefyClient { let mmr_state = self .store .mmr_state() - .map_err(|e| Ics02Error::Beefy(Error::implementation_specific(format!("{:?}", e))))?; + .map_err(|e| Error::beefy(BeefyError::implementation_specific(format!("{:?}", e))))?; let authorities = self .store .authority_set() - .map_err(|e| Ics02Error::Beefy(Error::implementation_specific(format!("{:?}", e))))?; + .map_err(|e| Error::beefy(BeefyError::implementation_specific(format!("{:?}", e))))?; + + let client_state = client_state.with_updates(mmr_state, authorities); + Ok(( - client_state.with_updates(mmr_state, authorities, latest_para_height), - best_cs_state, + client_state, + ConsensusUpdateResult::Batch(parachain_cs_states), )) } + fn update_state_on_misbehaviour( + &self, + client_state: Self::ClientState, + _header: Self::Header, + ) -> Result { + let mmr_state = self + .store + .mmr_state() + .map_err(|_| Error::beefy(BeefyError::missing_latest_height()))?; + client_state + .with_frozen_height(Height::new(0, mmr_state.latest_beefy_height as u64)) + .map_err(|e| Error::beefy(BeefyError::implementation_specific(e.to_string()))) + } + + fn check_for_misbehaviour( + &self, + _ctx: &dyn ClientReader, + _client_id: ClientId, + _client_state: Self::ClientState, + _header: Self::Header, + ) -> Result { + todo!() + } + fn verify_client_consensus_state( &self, client_state: &Self::ClientState, @@ -237,8 +206,10 @@ impl ClientDef for BeefyClient { client_id: &ClientId, consensus_height: Height, expected_consensus_state: &AnyConsensusState, - ) -> Result<(), Ics02Error> { - client_state.verify_height(height)?; + ) -> Result<(), Error> { + client_state + .verify_parachain_height::(height) + .map_err(|e| Error::beefy(e))?; let path = ClientConsensusStatePath { client_id: client_id.clone(), @@ -246,7 +217,7 @@ impl ClientDef for BeefyClient { height: consensus_height.revision_height, }; let value = expected_consensus_state.encode_vec().unwrap(); - verify_membership(client_state, prefix, proof, root, path, value) + verify_membership::(prefix, proof, root, path, value) } fn verify_connection_state( @@ -258,12 +229,14 @@ impl ClientDef for BeefyClient { root: &CommitmentRoot, connection_id: &ConnectionId, expected_connection_end: &ConnectionEnd, - ) -> Result<(), Ics02Error> { - client_state.verify_height(height)?; + ) -> Result<(), Error> { + client_state + .verify_parachain_height::(height) + .map_err(|e| Error::beefy(e))?; let path = ConnectionsPath(connection_id.clone()); let value = expected_connection_end.encode_vec().unwrap(); - verify_membership(client_state, prefix, proof, root, path, value) + verify_membership::(prefix, proof, root, path, value) } fn verify_channel_state( @@ -276,12 +249,14 @@ impl ClientDef for BeefyClient { port_id: &PortId, channel_id: &ChannelId, expected_channel_end: &ChannelEnd, - ) -> Result<(), Ics02Error> { - client_state.verify_height(height)?; + ) -> Result<(), Error> { + client_state + .verify_parachain_height::(height) + .map_err(|e| Error::beefy(e))?; let path = ChannelEndsPath(port_id.clone(), channel_id.clone()); let value = expected_channel_end.encode_vec().unwrap(); - verify_membership(client_state, prefix, proof, root, path, value) + verify_membership::(prefix, proof, root, path, value) } fn verify_client_full_state( @@ -293,12 +268,14 @@ impl ClientDef for BeefyClient { root: &CommitmentRoot, client_id: &ClientId, expected_client_state: &AnyClientState, - ) -> Result<(), Ics02Error> { - client_state.verify_height(height)?; + ) -> Result<(), Error> { + client_state + .verify_parachain_height::(height) + .map_err(|e| Error::beefy(e))?; let path = ClientStatePath(client_id.clone()); let value = expected_client_state.encode_vec().unwrap(); - verify_membership(client_state, prefix, proof, root, path, value) + verify_membership::(prefix, proof, root, path, value) } fn verify_packet_data( @@ -312,9 +289,11 @@ impl ClientDef for BeefyClient { port_id: &PortId, channel_id: &ChannelId, sequence: Sequence, - commitment: String, - ) -> Result<(), Ics02Error> { - client_state.verify_height(height)?; + commitment: PacketCommitment, + ) -> Result<(), Error> { + client_state + .verify_parachain_height::(height) + .map_err(|e| Error::beefy(e))?; verify_delay_passed(ctx, height, connection_end)?; let commitment_path = CommitmentsPath { @@ -323,18 +302,12 @@ impl ClientDef for BeefyClient { sequence, }; - let mut commitment_bytes = Vec::new(); - commitment - .encode(&mut commitment_bytes) - .expect("buffer size too small"); - - verify_membership( - client_state, + verify_membership::( connection_end.counterparty().prefix(), proof, root, commitment_path, - commitment_bytes, + commitment.into_vec(), ) } @@ -349,9 +322,11 @@ impl ClientDef for BeefyClient { port_id: &PortId, channel_id: &ChannelId, sequence: Sequence, - ack: Vec, - ) -> Result<(), Ics02Error> { - client_state.verify_height(height)?; + ack: AcknowledgementCommitment, + ) -> Result<(), Error> { + client_state + .verify_parachain_height::(height) + .map_err(|e| Error::beefy(e))?; verify_delay_passed(ctx, height, connection_end)?; let ack_path = AcksPath { @@ -359,13 +334,12 @@ impl ClientDef for BeefyClient { channel_id: channel_id.clone(), sequence, }; - verify_membership( - client_state, + verify_membership::( connection_end.counterparty().prefix(), proof, root, ack_path, - ack, + ack.into_vec(), ) } @@ -380,18 +354,16 @@ impl ClientDef for BeefyClient { port_id: &PortId, channel_id: &ChannelId, sequence: Sequence, - ) -> Result<(), Ics02Error> { - client_state.verify_height(height)?; + ) -> Result<(), Error> { + client_state + .verify_parachain_height::(height) + .map_err(|e| Error::beefy(e))?; verify_delay_passed(ctx, height, connection_end)?; - let mut seq_bytes = Vec::new(); - u64::from(sequence) - .encode(&mut seq_bytes) - .expect("buffer size too small"); + let seq_bytes = codec::Encode::encode(&u64::from(sequence)); let seq_path = SeqRecvsPath(port_id.clone(), channel_id.clone()); - verify_membership( - client_state, + verify_membership::( connection_end.counterparty().prefix(), proof, root, @@ -411,8 +383,10 @@ impl ClientDef for BeefyClient { port_id: &PortId, channel_id: &ChannelId, sequence: Sequence, - ) -> Result<(), Ics02Error> { - client_state.verify_height(height)?; + ) -> Result<(), Error> { + client_state + .verify_parachain_height::(height) + .map_err(|e| Error::beefy(e))?; verify_delay_passed(ctx, height, connection_end)?; let receipt_path = ReceiptsPath { @@ -420,8 +394,7 @@ impl ClientDef for BeefyClient { channel_id: channel_id.clone(), sequence, }; - verify_non_membership( - client_state, + verify_non_membership::( connection_end.counterparty().prefix(), proof, root, @@ -433,83 +406,75 @@ impl ClientDef for BeefyClient { &self, _client_state: &Self::ClientState, _consensus_state: &Self::ConsensusState, - _proof_upgrade_client: RawMerkleProof, - _proof_upgrade_consensus_state: RawMerkleProof, - ) -> Result<(Self::ClientState, Self::ConsensusState), Ics02Error> { + _proof_upgrade_client: Vec, + _proof_upgrade_consensus_state: Vec, + ) -> Result<(Self::ClientState, ConsensusUpdateResult), Error> { todo!() } } -fn verify_membership( - _client_state: &ClientState, +fn verify_membership>( prefix: &CommitmentPrefix, proof: &CommitmentProofBytes, root: &CommitmentRoot, - path: impl Into, + path: P, value: Vec, -) -> Result<(), Ics02Error> { +) -> Result<(), Error> { if root.as_bytes().len() != 32 { - return Err(Ics02Error::beefy(Error::invalid_commitment_root)); + return Err(Error::beefy(BeefyError::invalid_commitment_root())); } let path: Path = path.into(); let path = path.to_string(); - let mut prefix = prefix.as_bytes().to_vec(); - prefix.extend_from_slice(path.as_bytes()); - let key = codec::Encode::encode(&prefix); + let path = vec![prefix.as_bytes(), path.as_bytes()]; + let key = codec::Encode::encode(&path); let trie_proof: Vec = proof.clone().into(); let trie_proof: Vec> = codec::Decode::decode(&mut &*trie_proof) - .map_err(|e| Ics02Error::beefy(Error::scale_decode(e)))?; - let root = H256::from_slice(root.into_vec().as_slice()); - sp_trie::verify_trie_proof::, _, _, _>( - &root, - &trie_proof, - vec![&(key, Some(value))], - ) - .map_err(|e| Ics02Error::beefy(Error::ics23_error(e))) + .map_err(|e| Error::beefy(BeefyError::scale_decode(e)))?; + let root = H256::from_slice(root.as_bytes()); + Verifier::verify_membership_trie_proof(&root, &trie_proof, &key, &value) } -fn verify_non_membership( - _client_state: &ClientState, +fn verify_non_membership>( prefix: &CommitmentPrefix, proof: &CommitmentProofBytes, root: &CommitmentRoot, - path: impl Into, -) -> Result<(), Ics02Error> { + path: P, +) -> Result<(), Error> { if root.as_bytes().len() != 32 { - return Err(Ics02Error::beefy(Error::invalid_commitment_root)); + return Err(Error::beefy(BeefyError::invalid_commitment_root())); } let path: Path = path.into(); let path = path.to_string(); - let mut prefix = prefix.as_bytes().to_vec(); - prefix.extend_from_slice(path.as_bytes()); - let key = codec::Encode::encode(&prefix); + let path = vec![prefix.as_bytes(), path.as_bytes()]; + let key = codec::Encode::encode(&path); let trie_proof: Vec = proof.clone().into(); let trie_proof: Vec> = codec::Decode::decode(&mut &*trie_proof) - .map_err(|e| Ics02Error::beefy(Error::scale_decode(e)))?; - let root = H256::from_slice(root.into_vec().as_slice()); - sp_trie::verify_trie_proof::, _, _, _>( - &root, - &trie_proof, - vec![&(key, None)], - ) - .map_err(|e| Ics02Error::beefy(Error::ics23_error(e))) + .map_err(|e| Error::beefy(BeefyError::scale_decode(e)))?; + let root = H256::from_slice(root.as_bytes()); + Verifier::verify_non_membership_trie_proof(&root, &trie_proof, &key) } fn verify_delay_passed( ctx: &dyn ChannelReader, height: Height, connection_end: &ConnectionEnd, -) -> Result<(), Ics02Error> { +) -> Result<(), Error> { let current_timestamp = ctx.host_timestamp(); let current_height = ctx.host_height(); let client_id = connection_end.client_id(); - let processed_time = ctx - .client_update_time(client_id, height) - .map_err(|_| Error::processed_time_not_found(client_id.clone(), height))?; - let processed_height = ctx - .client_update_height(client_id, height) - .map_err(|_| Error::processed_height_not_found(client_id.clone(), height))?; + let processed_time = ctx.client_update_time(client_id, height).map_err(|_| { + Error::beefy(BeefyError::processed_time_not_found( + client_id.clone(), + height, + )) + })?; + let processed_height = ctx.client_update_height(client_id, height).map_err(|_| { + Error::beefy(BeefyError::processed_height_not_found( + client_id.clone(), + height, + )) + })?; let delay_period_time = connection_end.delay_period(); let delay_period_height = ctx.block_delay(delay_period_time); @@ -525,9 +490,9 @@ fn verify_delay_passed( .map_err(|e| e.into()) } -fn downcast_consensus_state(cs: AnyConsensusState) -> Result { +pub fn downcast_consensus_state(cs: AnyConsensusState) -> Result { downcast!( cs => AnyConsensusState::Beefy ) - .ok_or_else(|| Ics02Error::client_args_type_mismatch(ClientType::Beefy)) + .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Beefy)) } diff --git a/modules/src/clients/ics11_beefy/client_state.rs b/modules/src/clients/ics11_beefy/client_state.rs index e83aa336e9..fea32a4c50 100644 --- a/modules/src/clients/ics11_beefy/client_state.rs +++ b/modules/src/clients/ics11_beefy/client_state.rs @@ -5,14 +5,15 @@ use beefy_primitives::mmr::BeefyNextAuthoritySet; use codec::{Decode, Encode}; use core::convert::TryFrom; use core::time::Duration; +use serde::{Deserialize, Serialize}; use sp_core::H256; use sp_runtime::SaturatedConversion; use tendermint_proto::Protobuf; +use crate::clients::ics11_beefy::client_def::BeefyLCStore; use ibc_proto::ibc::lightclients::beefy::v1::{BeefyAuthoritySet, ClientState as RawClientState}; use crate::clients::ics11_beefy::error::Error; -use crate::clients::ics11_beefy::header::BeefyHeader; use crate::core::ics02_client::client_state::AnyClientState; use crate::core::ics02_client::client_type::ClientType; use crate::core::ics24_host::identifier::ChainId; @@ -55,27 +56,23 @@ impl ClientState { authority_set: BeefyNextAuthoritySet, next_authority_set: BeefyNextAuthoritySet, ) -> Result { - if chain_id.version() <= 0 { + if chain_id.version() == 0 { return Err(Error::validation( - "ClientState Chain id version must be the parachain id which cannot be less or equal to zero ".to_string(), - )); - } - - if latest_beefy_height < 0 { - return Err(Error::validation( - "ClientState latest beefy height and latest parachain height must be greater than or equal to zero".to_string(), + "ClientState Chain id cannot be equal to zero ".to_string(), )); } if beefy_activation_block > latest_beefy_height { return Err(Error::validation( - "ClientState beefy activation block cannot be greater than latest_beefy_height".to_string(), + "ClientState beefy activation block cannot be greater than latest_beefy_height" + .to_string(), )); } if authority_set.id >= next_authority_set.id { return Err(Error::validation( - "ClientState next authority set id must be greater than current authority set id".to_string(), + "ClientState next authority set id must be greater than current authority set id" + .to_string(), )); } @@ -87,8 +84,6 @@ impl ClientState { beefy_activation_block, authority: authority_set, next_authority_set, - latest_para_height: None, - para_id: chain_id.version().saturated_into::(), }) } @@ -103,12 +98,7 @@ impl ClientState { self.beefy_activation_block - (block_number + 1) } - pub fn with_updates( - &self, - mmr_state: MmrState, - authorities: AuthoritySet, - latest_para_height: Height, - ) -> Self { + pub fn with_updates(&self, mmr_state: MmrState, authorities: AuthoritySet) -> Self { let clone = self.clone(); Self { mmr_root_hash: mmr_state.mmr_root_hash, @@ -162,7 +152,7 @@ impl ClientState { pub fn verify_height(&self, height: Height) -> Result<(), Error> { if (self.latest_height() as u64) < height.revision_height { return Err(Error::insufficient_height( - self.latest_height(), + Height::new(0, self.latest_beefy_height.into()), height, )); } @@ -174,6 +164,20 @@ impl ClientState { _ => Ok(()), } } + + pub fn verify_parachain_height( + &self, + height: Height, + ) -> Result<(), Error> { + let para_id = height.revision_number; + let trusted_para_height = LCStore::get_parachain_latest_height(para_id) + .map_err(|e| Error::implementation_specific(e.to_string()))?; + let latest_para_height = Height::new(para_id, trusted_para_height); + if latest_para_height < height { + return Err(Error::insufficient_height(latest_para_height, height)); + } + Ok(()) + } } #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] @@ -224,8 +228,7 @@ impl TryFrom for ClientState { type Error = Error; fn try_from(raw: RawClientState) -> Result { - // TODO: Change Revison number to para id when chain id is added to the beefy spec - let frozen_height = Some(Height::new(REVISION_NUMBER, raw.frozen_height)); + let frozen_height = Some(Height::new(0, raw.frozen_height)); let authority_set = raw .authority @@ -233,10 +236,10 @@ impl TryFrom for ClientState { Some(BeefyNextAuthoritySet { id: set.id, len: set.len, - root: H256::decode(&mut &set.authority_root).ok()?, + root: H256::decode(&mut &*set.authority_root).ok()?, }) }) - .ok_or(Error::missing_validator_set())?; + .ok_or(Error::missing_beefy_authority_set())?; let next_authority_set = raw .next_authority_set @@ -244,12 +247,13 @@ impl TryFrom for ClientState { Some(BeefyNextAuthoritySet { id: set.id, len: set.len, - root: H256::decode(&mut &set.authority_root).ok()?, + root: H256::decode(&mut &*set.authority_root).ok()?, }) }) - .ok_or(Error::missing_validator_set())?; + .ok_or(Error::missing_beefy_authority_set())?; - let mmr_root_hash = H256::decode(&mut &raw.mmr_root_hash).map_err(|_| Error::decode())?; + let mmr_root_hash = + H256::decode(&mut &*raw.mmr_root_hash).map_err(|e| Error::scale_decode(e))?; Ok(Self { chain_id: ChainId::default(), @@ -259,9 +263,6 @@ impl TryFrom for ClientState { beefy_activation_block: raw.beefy_activation_block, authority: authority_set, next_authority_set, - latest_para_height: None, - // TODO Para Id should be added to the client state spec - para_id: ChainId::default().version().saturated_into::(), }) } } diff --git a/modules/src/clients/ics11_beefy/consensus_state.rs b/modules/src/clients/ics11_beefy/consensus_state.rs index fb7e1bffb0..b8429a7110 100644 --- a/modules/src/clients/ics11_beefy/consensus_state.rs +++ b/modules/src/clients/ics11_beefy/consensus_state.rs @@ -1,46 +1,34 @@ use crate::prelude::*; -use beefy_client::primitives::PartialMmrLeaf; -use beefy_primitives::mmr::{BeefyNextAuthoritySet, MmrLeafVersion}; -use codec::Encode; use core::convert::Infallible; - use serde::Serialize; -use sp_core::H256; use sp_runtime::SaturatedConversion; -use tendermint::{hash::Algorithm, time::Time, Hash}; +use tendermint::time::Time; use tendermint_proto::google::protobuf as tpb; use tendermint_proto::Protobuf; -use ibc_proto::ibc::lightclients::beefy::v1::{ - BeefyAuthoritySet, BeefyMmrLeafPartial as RawPartialMmrLeaf, - ConsensusState as RawConsensusState, ParachainHeader as RawParachainHeader, -}; +use ibc_proto::ibc::lightclients::beefy::v1::ConsensusState as RawConsensusState; use crate::clients::ics11_beefy::error::Error; -use crate::clients::ics11_beefy::header::{ - decode_parachain_header, decode_timestamp_extrinsic, merge_leaf_version, split_leaf_version, - ParachainHeader, -}; +use crate::clients::ics11_beefy::header::{decode_timestamp_extrinsic, ParachainHeader}; use crate::core::ics02_client::client_consensus::AnyConsensusState; use crate::core::ics02_client::client_type::ClientType; use crate::core::ics23_commitment::commitment::CommitmentRoot; use crate::timestamp::Timestamp; +// This is a constant that comes from pallet-ibc pub const IBC_CONSENSUS_ID: [u8; 4] = *b"/IBC"; -#[derive(Clone, Debug, PartialEq, Eq)] +#[derive(Clone, Debug, PartialEq, Eq, Serialize)] pub struct ConsensusState { pub timestamp: Time, - pub root: Vec, - pub parachain_header: ParachainHeader, + pub root: CommitmentRoot, } impl ConsensusState { pub fn new(root: Vec, timestamp: Time, parachain_header: ParachainHeader) -> Self { Self { timestamp, - root, - parachain_header, + root: root.into(), } } } @@ -53,7 +41,7 @@ impl crate::core::ics02_client::client_consensus::ConsensusState for ConsensusSt } fn root(&self) -> &CommitmentRoot { - &self.root.into() + &self.root } fn wrap_any(self) -> AnyConsensusState { @@ -124,9 +112,8 @@ impl TryFrom for ConsensusState { extrinsic_proof: parachain_header.extrinsic_proof, }; Ok(Self { - root: raw.root, + root: raw.root.into(), timestamp, - parachain_header, }) } } @@ -138,56 +125,7 @@ impl From for RawConsensusState { RawConsensusState { timestamp: Some(timestamp), - root: value.root, - parachain_header: Some(RawParachainHeader { - parachain_header: value.parachain_header.encode(), - mmr_leaf_partial: Some(RawPartialMmrLeaf { - version: { - let (major, minor) = - value.parachain_header.partial_mmr_leaf.version.split(); - merge_leaf_version(major, minor) as u32 - }, - parent_number: value - .parachain_header - .partial_mmr_leaf - .parent_number_and_hash - .0, - parent_hash: value - .parachain_header - .partial_mmr_leaf - .parent_number_and_hash - .1 - .encode(), - beefy_next_authority_set: Some(BeefyAuthoritySet { - id: value - .parachain_header - .partial_mmr_leaf - .beefy_next_authority_set - .id, - len: value - .parachain_header - .partial_mmr_leaf - .beefy_next_authority_set - .len, - authority_root: value - .parachain_header - .partial_mmr_leaf - .beefy_next_authority_set - .root - .encode(), - }), - }), - para_id: value.parachain_header.para_id, - parachain_heads_proof: value - .parachain_header - .parachain_heads_proof - .into_iter() - .map(|item| item.encode()) - .collect(), - heads_leaf_index: value.parachain_header.heads_leaf_index, - heads_total_count: value.parachain_header.heads_total_count, - extrinsic_proof: value.parachain_header.extrinsic_proof, - }), + root: value.root.into_vec(), } } } @@ -199,9 +137,9 @@ impl From for ConsensusState { .parachain_header .digest .logs - .into_iter() + .iter() .filter_map(|digest| digest.as_consensus()) - .find(|(id, value)| id == &IBC_CONSENSUS_ID) + .find(|(id, _value)| id == &IBC_CONSENSUS_ID) .map(|(.., root)| root.to_vec()) .unwrap_or_default() }; @@ -209,12 +147,13 @@ impl From for ConsensusState { let timestamp = decode_timestamp_extrinsic(&header).unwrap_or_default(); let duration = core::time::Duration::from_millis(timestamp); let timestamp = Timestamp::from_nanoseconds(duration.as_nanos().saturated_into::()) - .unwrap_or_default(); + .unwrap_or_default() + .into_tm_time() + .unwrap(); Self { - root, - timestamp: timestamp.into(), - parachain_header: header, + root: root.into(), + timestamp, } } } diff --git a/modules/src/clients/ics11_beefy/error.rs b/modules/src/clients/ics11_beefy/error.rs index 126de42051..ee3f85935c 100644 --- a/modules/src/clients/ics11_beefy/error.rs +++ b/modules/src/clients/ics11_beefy/error.rs @@ -8,7 +8,6 @@ use crate::core::ics24_host::identifier::ClientId; use crate::timestamp::{Timestamp, TimestampOverflowError}; use beefy_client::error::BeefyClientError; use codec::Error as ScaleCodecError; -use sp_core::H256; use crate::Height; @@ -19,9 +18,9 @@ define_error! { |_| { "invalid address" }, InvalidMmrUpdate { reason: String } - |e| { "invalid address {}", e.reason }, + |e| { format_args!("invalid mmr update {}", e.reason) }, InvalidCommitmentRoot - |_| { "invalid commitment root" } + |_| { "invalid commitment root" }, TimestampExtrinsic |_| { "error decoding timestamp extrinsic" }, InvalidHeader @@ -38,15 +37,6 @@ define_error! { { reason: String } |e| { format_args!("invalid raw client state: {}", e.reason) }, - MissingValidatorSet - |_| { "missing validator set" }, - - MissingTrustedValidatorSet - |_| { "missing trusted validator set" }, - - MissingTrustedHeight - |_| { "missing trusted height" }, - InvalidChainIdentifier [ ValidationError ] |_| { "invalid chain identifier" }, @@ -54,6 +44,9 @@ define_error! { MissingLatestHeight |_| { "missing latest height" }, + MissingBeefyAuthoritySet + |_| { "missing beefy authority set" }, + MissingFrozenHeight |_| { "missing frozen height" }, @@ -166,25 +159,10 @@ define_error! { format_args!("the header's current/trusted revision number ({0}) and the update's revision number ({1}) should be the same", e.current_revision, e.update_revision) }, - InvalidValidatorSet - { - hash1: H256, - hash2: H256, - } - | e | { - format_args!("invalid validator set: header_validators_hash={} and validators_hash={}", e.hash1, e.hash2) - }, - - NotEnoughTrustedValsSigned - { reason: String } - | e | { - format_args!("not enough trust because insufficient validators overlap: {}", e.reason) - }, - VerificationError { reason: BeefyClientError } | e | { - format_args!("verification failed: {}", e.reason) + format_args!("verification failed: {:?}", e.reason) }, ProcessedTimeNotFound diff --git a/modules/src/clients/ics11_beefy/header.rs b/modules/src/clients/ics11_beefy/header.rs index b193dca8db..c76c8651c1 100644 --- a/modules/src/clients/ics11_beefy/header.rs +++ b/modules/src/clients/ics11_beefy/header.rs @@ -1,15 +1,10 @@ use prost::Message; -use serde_derive::{Deserialize, Serialize}; use tendermint_proto::Protobuf; -use crate::alloc::string::ToString; -use crate::clients::ics11_beefy::client_state::REVISION_NUMBER; use crate::clients::ics11_beefy::error::Error; use crate::core::ics02_client::client_type::ClientType; use crate::core::ics02_client::header::AnyHeader; -use crate::core::ics24_host::identifier::ChainId; -use crate::timestamp::Timestamp; -use crate::Height; +use alloc::string::ToString; use alloc::vec; use alloc::vec::Vec; use beefy_client::primitives::{ @@ -25,18 +20,16 @@ use ibc_proto::ibc::lightclients::beefy::v1::{ BeefyAuthoritySet as RawBeefyAuthoritySet, BeefyMmrLeaf as RawBeefyMmrLeaf, BeefyMmrLeafPartial as RawBeefyMmrLeafPartial, Commitment as RawCommitment, CommitmentSignature, Header as RawBeefyHeader, MmrUpdateProof as RawMmrUpdateProof, - ParachainHeader as RawParachainHeader, PayloadItem as RawPayloadItem, PayloadItem, - SignedCommitment as RawSignedCommitment, + PayloadItem, SignedCommitment as RawSignedCommitment, }; -use pallet_mmr_primitives::{BatchProof, Proof}; +use pallet_mmr_primitives::Proof; use sp_core::H256; -use sp_runtime::generic::{Header as SubstrateHeader, UncheckedExtrinsic}; +use sp_runtime::generic::Header as SubstrateHeader; use sp_runtime::traits::{BlakeTwo256, SaturatedConversion}; -use sp_runtime::Digest; -use sp_trie::{StorageProof, Trie, TrieDBMut}; +use sp_trie::{StorageProof, Trie}; /// Beefy consensus header -#[derive(Clone, PartialEq, Eq)] +#[derive(Clone, PartialEq, Eq, Debug)] pub struct BeefyHeader { pub parachain_headers: Vec, // contains the parachain headers pub mmr_proofs: Vec>, // mmr proofs for these headers @@ -44,7 +37,17 @@ pub struct BeefyHeader { pub mmr_update_proof: Option, // Proof for updating the latest mmr root hash } -#[derive(Clone, PartialEq, Eq, codec::Encode, codec::Decode)] +impl crate::core::ics02_client::header::Header for BeefyHeader { + fn client_type(&self) -> ClientType { + ClientType::Beefy + } + + fn wrap_any(self) -> AnyHeader { + AnyHeader::Beefy(self) + } +} + +#[derive(Clone, PartialEq, Eq, Debug, codec::Encode, codec::Decode)] pub struct ParachainHeader { pub parachain_header: SubstrateHeader, /// Reconstructed mmr leaf @@ -80,37 +83,32 @@ impl TryFrom for BeefyHeader { .parachain_headers .into_iter() .map(|raw_para_header| { + let mmr_partial_leaf = raw_para_header + .mmr_leaf_partial + .ok_or(Error::invalid_raw_header())?; let parent_hash = - H256::decode(&mut raw_para_header.mmr_leaf_partial.parent_hash.as_slice()) - .unwrap(); - let beefy_next_authority_set = if let Some(next_set) = - raw_para_header.mmr_leaf_partial.beefy_next_authority_set - { - BeefyNextAuthoritySet { - id: next_set.id, - len: next_set.len, - root: H256::decode(&mut next_set.root.as_slice()).unwrap(), - } - } else { - Default::default() - }; + H256::decode(&mut mmr_partial_leaf.parent_hash.as_slice()).unwrap(); + let beefy_next_authority_set = + if let Some(next_set) = mmr_partial_leaf.beefy_next_authority_set { + BeefyNextAuthoritySet { + id: next_set.id, + len: next_set.len, + root: H256::decode(&mut next_set.authority_root.as_slice()) + .map_err(|e| Error::invalid_mmr_update(e.to_string()))?, + } + } else { + Default::default() + }; Ok(ParachainHeader { parachain_header: decode_parachain_header(raw_para_header.parachain_header) - .map_err(|err| Error::invalid_header(err))?, + .map_err(|_| Error::invalid_raw_header())?, partial_mmr_leaf: PartialMmrLeaf { version: { - let (major, minor) = split_leaf_version( - raw_para_header - .mmr_leaf_partial - .version - .saturated_into::(), - ); + let (major, minor) = + split_leaf_version(mmr_partial_leaf.version.saturated_into::()); MmrLeafVersion::new(major, minor) }, - parent_number_and_hash: ( - raw_para_header.mmr_leaf_partial.parent_number, - parent_hash, - ), + parent_number_and_hash: (mmr_partial_leaf.parent_number, parent_hash), beefy_next_authority_set, }, para_id: raw_para_header.para_id, @@ -119,10 +117,13 @@ impl TryFrom for BeefyHeader { .into_iter() .map(|item| { let mut dest = [0u8; 32]; + if item.len() != 32 { + return Err(Error::invalid_raw_header()); + } dest.copy_from_slice(&*item); - dest + Ok(dest) }) - .collect(), + .collect::, Error>>()?, heads_leaf_index: raw_para_header.heads_leaf_index, heads_total_count: raw_para_header.heads_total_count, extrinsic_proof: raw_para_header.extrinsic_proof, @@ -135,46 +136,60 @@ impl TryFrom for BeefyHeader { mmr_update .signed_commitment .as_ref() - .unwrap() + .ok_or(Error::invalid_mmr_update("".to_string()))? .commitment - .unwrap() + .as_ref() + .ok_or(Error::invalid_mmr_update("".to_string()))? .payload .iter() - .map(|item| { + .filter_map(|item| { + if item.payload_id.as_slice() != &MMR_ROOT_ID { + return None; + } let mut payload_id = [0u8; 2]; payload_id.copy_from_slice(&item.payload_id); - Payload::new(payload_id, item.payload_data.clone()) + Some(Payload::new(payload_id, item.payload_data.clone())) }) - .collect() + .collect::>() + .get(0) + .ok_or(Error::invalid_mmr_update("".to_string()))? + .clone() }; let block_number = mmr_update .signed_commitment .as_ref() - .unwrap() + .ok_or(Error::invalid_mmr_update("".to_string()))? .commitment - .unwrap() + .as_ref() + .ok_or(Error::invalid_mmr_update("".to_string()))? .block_numer; let validator_set_id = mmr_update .signed_commitment .as_ref() - .unwrap() + .ok_or(Error::invalid_mmr_update("".to_string()))? .commitment - .unwrap() + .as_ref() + .ok_or(Error::invalid_mmr_update("".to_string()))? .validator_set_id; let signatures = mmr_update .signed_commitment - .unwrap() + .ok_or(Error::invalid_mmr_update("".to_string()))? .signatures .into_iter() - .map(|commitment_sig| SignatureWithAuthorityIndex { - signature: { - let mut sig = [0u8; 65]; - sig.copy_from_slice(&commitment_sig.signature); - sig - }, - index: commitment_sig.authority_index, + .map(|commitment_sig| { + if commitment_sig.signature.len() != 65 { + return Err(Error::invalid_mmr_update("".to_string())); + } + Ok(SignatureWithAuthorityIndex { + signature: { + let mut sig = [0u8; 65]; + sig.copy_from_slice(&commitment_sig.signature); + sig + }, + index: commitment_sig.authority_index, + }) }) - .collect(); + .collect::, Error>>()?; Some(MmrUpdateProof { signed_commitment: SignedCommitment { commitment: Commitment { @@ -190,50 +205,68 @@ impl TryFrom for BeefyHeader { mmr_update .mmr_leaf .as_ref() - .unwrap() + .ok_or(Error::invalid_mmr_update("".to_string()))? .version .saturated_into::(), ); MmrLeafVersion::new(major, minor) }, parent_number_and_hash: { - let parent_number = mmr_update.mmr_leaf.as_ref().unwrap().parent_number; + let parent_number = mmr_update + .mmr_leaf + .as_ref() + .ok_or(Error::invalid_mmr_update("".to_string()))? + .parent_number; let parent_hash = H256::decode( - &mut mmr_update.mmr_leaf.as_ref().unwrap().parent_hash.as_slice(), + &mut mmr_update + .mmr_leaf + .as_ref() + .ok_or(Error::invalid_mmr_update("".to_string()))? + .parent_hash + .as_slice(), ) - .unwrap(); + .map_err(|e| Error::invalid_mmr_update(e.to_string()))?; (parent_number, parent_hash) }, beefy_next_authority_set: BeefyNextAuthoritySet { id: mmr_update .mmr_leaf .as_ref() - .unwrap() + .ok_or(Error::invalid_mmr_update("".to_string()))? .beefy_next_authority_set - .unwrap() + .as_ref() + .ok_or(Error::invalid_mmr_update("".to_string()))? .id, len: mmr_update .mmr_leaf .as_ref() - .unwrap() + .ok_or(Error::invalid_mmr_update("".to_string()))? .beefy_next_authority_set - .unwrap() + .as_ref() + .ok_or(Error::invalid_mmr_update("".to_string()))? .len, root: H256::decode( - &mut &mmr_update + &mut mmr_update .mmr_leaf .as_ref() - .unwrap() + .ok_or(Error::invalid_mmr_update("".to_string()))? .beefy_next_authority_set - .unwrap() - .authority_root, + .as_ref() + .ok_or(Error::invalid_mmr_update("".to_string()))? + .authority_root + .as_slice(), ) - .unwrap(), + .map_err(|e| Error::invalid_mmr_update(e.to_string()))?, }, parachain_heads: H256::decode( - &mut &mmr_update.mmr_leaf.as_ref().unwrap().parachain_heads, + &mut mmr_update + .mmr_leaf + .as_ref() + .ok_or(Error::invalid_mmr_update("".to_string()))? + .parachain_heads + .as_slice(), ) - .unwrap(), + .map_err(|e| Error::invalid_mmr_update(e.to_string()))?, }, mmr_proof: Proof { leaf_index: mmr_update.mmr_leaf_index, @@ -241,18 +274,24 @@ impl TryFrom for BeefyHeader { items: mmr_update .mmr_proof .into_iter() - .map(|item| H256::decode(&mut &item).unwrap()) - .collect(), + .map(|item| { + H256::decode(&mut &*item) + .map_err(|e| Error::invalid_mmr_update(e.to_string())) + }) + .collect::, Error>>()?, }, authority_proof: mmr_update .authorities_proof .into_iter() .map(|item| { + if item.len() != 32 { + return Err(Error::invalid_mmr_update("".to_string())); + } let mut dest = [0u8; 32]; dest.copy_from_slice(&item); - dest + Ok(dest) }) - .collect(), + .collect::, Error>>()?, }) } else { None @@ -385,10 +424,10 @@ pub fn decode_parachain_header( raw_header: Vec, ) -> Result, Error> { SubstrateHeader::decode(&mut &*raw_header) - .map_err(|_| Error::invalid_header("failed to decode parachain header")) + .map_err(|_| Error::invalid_header("failed to decode parachain header".to_string())) } -pub fn decode_header(buf: B) -> Result { +pub fn decode_header(buf: B) -> Result { RawBeefyHeader::decode(buf) .map_err(Error::decode)? .try_into() @@ -410,7 +449,7 @@ pub fn decode_timestamp_extrinsic(header: &ParachainHeader) -> Result) = - codec::Decode::decode(&mut &*ext_bytes[2..]).map_err(|_| Error::timestamp_extrinsic())?; + codec::Decode::decode(&mut &ext_bytes[2..]).map_err(|_| Error::timestamp_extrinsic())?; Ok(timestamp.into()) } diff --git a/modules/src/core/ics02_client/client_consensus.rs b/modules/src/core/ics02_client/client_consensus.rs index 68842d013c..013e7147dd 100644 --- a/modules/src/core/ics02_client/client_consensus.rs +++ b/modules/src/core/ics02_client/client_consensus.rs @@ -45,7 +45,6 @@ pub trait ConsensusState: Clone + core::fmt::Debug + Send + Sync { #[serde(tag = "type")] pub enum AnyConsensusState { Tendermint(consensus_state::ConsensusState), - #[serde(skip)] Beefy(beefy_consensus_state::ConsensusState), #[cfg(any(test, feature = "mocks"))] Mock(MockConsensusState), diff --git a/modules/src/core/ics02_client/client_def.rs b/modules/src/core/ics02_client/client_def.rs index d0ced043c3..0d75a10e6f 100644 --- a/modules/src/core/ics02_client/client_def.rs +++ b/modules/src/core/ics02_client/client_def.rs @@ -1,6 +1,5 @@ -use ibc_proto::ibc::core::commitment::v1::MerkleProof; - use crate::clients::ics07_tendermint::client_def::TendermintClient; +use crate::clients::ics11_beefy::client_def::{BeefyClient, BeefyLCStore}; use crate::core::ics02_client::client_consensus::{AnyConsensusState, ConsensusState}; use crate::core::ics02_client::client_state::{AnyClientState, ClientState}; use crate::core::ics02_client::client_type::ClientType; @@ -23,27 +22,55 @@ use crate::Height; #[cfg(any(test, feature = "mocks"))] use crate::mock::client_def::MockClient; +#[derive(PartialEq, Eq, Clone, Debug)] +pub enum ConsensusUpdateResult { + Single(AnyConsensusState), + Batch(Vec<(Height, AnyConsensusState)>), +} + pub trait ClientDef: Clone { type Header: Header; type ClientState: ClientState; type ConsensusState: ConsensusState; - fn check_header_and_update_state( + fn verify_header( + &self, + ctx: &dyn ClientReader, + client_id: ClientId, + client_state: Self::ClientState, + header: Self::Header, + ) -> Result<(), Error>; + + fn update_state( + &self, + _ctx: &dyn ClientReader, + _client_id: ClientId, + client_state: Self::ClientState, + header: Self::Header, + ) -> Result<(Self::ClientState, ConsensusUpdateResult), Error>; + + fn update_state_on_misbehaviour( + &self, + client_state: Self::ClientState, + header: Self::Header, + ) -> Result; + + fn check_for_misbehaviour( &self, ctx: &dyn ClientReader, client_id: ClientId, client_state: Self::ClientState, header: Self::Header, - ) -> Result<(Self::ClientState, Self::ConsensusState), Error>; + ) -> Result; /// TODO fn verify_upgrade_and_update_state( &self, client_state: &Self::ClientState, consensus_state: &Self::ConsensusState, - proof_upgrade_client: MerkleProof, - proof_upgrade_consensus_state: MerkleProof, - ) -> Result<(Self::ClientState, Self::ConsensusState), Error>; + proof_upgrade_client: Vec, + proof_upgrade_consensus_state: Vec, + ) -> Result<(Self::ClientState, ConsensusUpdateResult), Error>; /// Verification functions as specified in: /// @@ -169,18 +196,18 @@ pub trait ClientDef: Clone { } #[derive(Clone, Debug, PartialEq, Eq)] -pub enum AnyClient { +pub enum AnyClient { Tendermint(TendermintClient), - + Beefy(BeefyClient), #[cfg(any(test, feature = "mocks"))] Mock(MockClient), } -impl AnyClient { - pub fn from_client_type(client_type: ClientType) -> AnyClient { +impl AnyClient { + pub fn from_client_type(client_type: ClientType) -> Self { match client_type { ClientType::Tendermint => Self::Tendermint(TendermintClient::default()), - + ClientType::Beefy => Self::Beefy(BeefyClient::default()), #[cfg(any(test, feature = "mocks"))] ClientType::Mock => Self::Mock(MockClient), } @@ -188,19 +215,61 @@ impl AnyClient { } // ⚠️ Beware of the awful boilerplate below ⚠️ -impl ClientDef for AnyClient { +impl ClientDef for AnyClient { type Header = AnyHeader; type ClientState = AnyClientState; type ConsensusState = AnyConsensusState; + /// Validate an incoming header + fn verify_header( + &self, + ctx: &dyn ClientReader, + client_id: ClientId, + client_state: Self::ClientState, + header: Self::Header, + ) -> Result<(), Error> { + match self { + Self::Tendermint(client) => { + let (client_state, header) = downcast!( + client_state => AnyClientState::Tendermint, + header => AnyHeader::Tendermint, + ) + .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Tendermint))?; + + client.verify_header(ctx, client_id, client_state, header) + } + + Self::Beefy(client) => { + let (client_state, header) = downcast!( + client_state => AnyClientState::Beefy, + header => AnyHeader::Beefy, + ) + .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Beefy))?; + + client.verify_header(ctx, client_id, client_state, header) + } + + #[cfg(any(test, feature = "mocks"))] + Self::Mock(client) => { + let (client_state, header) = downcast!( + client_state => AnyClientState::Mock, + header => AnyHeader::Mock, + ) + .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Mock))?; + + client.verify_header(ctx, client_id, client_state, header) + } + } + } + /// Validates an incoming `header` against the latest consensus state of this client. - fn check_header_and_update_state( + fn update_state( &self, ctx: &dyn ClientReader, client_id: ClientId, client_state: AnyClientState, header: AnyHeader, - ) -> Result<(AnyClientState, AnyConsensusState), Error> { + ) -> Result<(AnyClientState, ConsensusUpdateResult), Error> { match self { Self::Tendermint(client) => { let (client_state, header) = downcast!( @@ -210,12 +279,22 @@ impl ClientDef for AnyClient { .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Tendermint))?; let (new_state, new_consensus) = - client.check_header_and_update_state(ctx, client_id, client_state, header)?; + client.update_state(ctx, client_id, client_state, header)?; + + Ok((AnyClientState::Tendermint(new_state), new_consensus)) + } + + Self::Beefy(client) => { + let (client_state, header) = downcast!( + client_state => AnyClientState::Beefy, + header => AnyHeader::Beefy, + ) + .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Beefy))?; + + let (new_state, new_consensus) = + client.update_state(ctx, client_id, client_state, header)?; - Ok(( - AnyClientState::Tendermint(new_state), - AnyConsensusState::Tendermint(new_consensus), - )) + Ok((AnyClientState::Beefy(new_state), new_consensus)) } #[cfg(any(test, feature = "mocks"))] @@ -227,12 +306,149 @@ impl ClientDef for AnyClient { .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Mock))?; let (new_state, new_consensus) = - client.check_header_and_update_state(ctx, client_id, client_state, header)?; + client.update_state(ctx, client_id, client_state, header)?; + + Ok((AnyClientState::Mock(new_state), new_consensus)) + } + } + } + + fn update_state_on_misbehaviour( + &self, + client_state: Self::ClientState, + header: Self::Header, + ) -> Result { + match self { + AnyClient::Tendermint(client) => { + let (client_state, header) = downcast!( + client_state => AnyClientState::Tendermint, + header => AnyHeader::Tendermint, + ) + .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Tendermint))?; + let client_state = client.update_state_on_misbehaviour(client_state, header)?; + Ok(Self::ClientState::Tendermint(client_state)) + } + AnyClient::Beefy(client) => { + let (client_state, header) = downcast!( + client_state => AnyClientState::Beefy, + header => AnyHeader::Beefy, + ) + .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Beefy))?; + + let client_state = client.update_state_on_misbehaviour(client_state, header)?; + Ok(Self::ClientState::Beefy(client_state)) + } + #[cfg(any(test, feature = "mocks"))] + AnyClient::Mock(_) => { + let (client_state, header) = downcast!( + client_state => AnyClientState::Mock, + header => AnyHeader::Mock, + ) + .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Mock))?; + + let client_state = client.update_state_on_misbehaviour(client_state, header)?; + Ok(Self::ClientState::Mock(client_state)) + } + } + } + + /// Checks for misbehaviour in an incoming header + fn check_for_misbehaviour( + &self, + ctx: &dyn ClientReader, + client_id: ClientId, + client_state: Self::ClientState, + header: Self::Header, + ) -> Result { + match self { + AnyClient::Tendermint(client) => { + let (client_state, header) = downcast!( + client_state => AnyClientState::Tendermint, + header => AnyHeader::Tendermint, + ) + .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Tendermint))?; + client.check_for_misbehaviour(ctx, client_id, client_state, header) + } + AnyClient::Beefy(client) => { + let (client_state, header) = downcast!( + client_state => AnyClientState::Beefy, + header => AnyHeader::Beefy, + ) + .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Beefy))?; + + client.check_for_misbehaviour(ctx, client_id, client_state, header) + } + #[cfg(any(test, feature = "mocks"))] + AnyClient::Mock(_) => { + let (client_state, header) = downcast!( + client_state => AnyClientState::Mock, + header => AnyHeader::Mock, + ) + .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Mock))?; + + client.check_for_misbehaviour(ctx, client_id, client_state, header) + } + } + } + + fn verify_upgrade_and_update_state( + &self, + client_state: &Self::ClientState, + consensus_state: &Self::ConsensusState, + proof_upgrade_client: Vec, + proof_upgrade_consensus_state: Vec, + ) -> Result<(Self::ClientState, ConsensusUpdateResult), Error> { + match self { + Self::Tendermint(client) => { + let (client_state, consensus_state) = downcast!( + client_state => AnyClientState::Tendermint, + consensus_state => AnyConsensusState::Tendermint, + ) + .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Tendermint))?; + + let (new_state, new_consensus) = client.verify_upgrade_and_update_state( + client_state, + consensus_state, + proof_upgrade_client, + proof_upgrade_consensus_state, + )?; + + Ok((AnyClientState::Tendermint(new_state), new_consensus)) + } + + Self::Beefy(client) => { + let (client_state, consensus_state) = downcast!( + client_state => AnyClientState::Beefy, + consensus_state => AnyConsensusState::Beefy, + ) + .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Beefy))?; + + let (new_state, new_consensus) = client.verify_upgrade_and_update_state( + client_state, + consensus_state, + proof_upgrade_client, + proof_upgrade_consensus_state, + )?; + + Ok((AnyClientState::Beefy(new_state), new_consensus)) + } + + #[cfg(any(test, feature = "mocks"))] + Self::Mock(client) => { + let (client_state, consensus_state) = downcast!( + client_state => AnyClientState::Mock, + consensus_state => AnyConsensusState::Mock, + ) + .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Mock))?; - Ok(( - AnyClientState::Mock(new_state), - AnyConsensusState::Mock(new_consensus), - )) + let (new_state, new_consensus) = client.verify_upgrade_and_update_state( + client_state, + consensus_state, + proof_upgrade_client, + proof_upgrade_consensus_state, + )?; + + Ok((AnyClientState::Mock(new_state), new_consensus)) } } } @@ -267,6 +483,23 @@ impl ClientDef for AnyClient { ) } + Self::Beefy(client) => { + let client_state = downcast!( + client_state => AnyClientState::Beefy + ) + .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Beefy))?; + + client.verify_client_consensus_state( + client_state, + height, + prefix, + proof, + root, + client_id, + consensus_height, + expected_consensus_state, + ) + } #[cfg(any(test, feature = "mocks"))] Self::Mock(client) => { let client_state = downcast!( @@ -313,7 +546,20 @@ impl ClientDef for AnyClient { expected_connection_end, ) } + Self::Beefy(client) => { + let client_state = downcast!(client_state => AnyClientState::Beefy) + .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Beefy))?; + client.verify_connection_state( + client_state, + height, + prefix, + proof, + root, + connection_id, + expected_connection_end, + ) + } #[cfg(any(test, feature = "mocks"))] Self::Mock(client) => { let client_state = downcast!(client_state => AnyClientState::Mock) @@ -360,6 +606,22 @@ impl ClientDef for AnyClient { ) } + Self::Beefy(client) => { + let client_state = downcast!(client_state => AnyClientState::Beefy) + .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Beefy))?; + + client.verify_channel_state( + client_state, + height, + prefix, + proof, + root, + port_id, + channel_id, + expected_channel_end, + ) + } + #[cfg(any(test, feature = "mocks"))] Self::Mock(client) => { let client_state = downcast!(client_state => AnyClientState::Mock) @@ -378,7 +640,6 @@ impl ClientDef for AnyClient { } } } - fn verify_client_full_state( &self, client_state: &Self::ClientState, @@ -406,7 +667,22 @@ impl ClientDef for AnyClient { client_state_on_counterparty, ) } + Self::Beefy(client) => { + let client_state = downcast!( + client_state => AnyClientState::Beefy + ) + .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Beefy))?; + client.verify_client_full_state( + client_state, + height, + prefix, + proof, + root, + client_id, + client_state_on_counterparty, + ) + } #[cfg(any(test, feature = "mocks"))] Self::Mock(client) => { let client_state = downcast!( @@ -426,6 +702,7 @@ impl ClientDef for AnyClient { } } } + fn verify_packet_data( &self, ctx: &dyn ChannelReader, @@ -460,6 +737,26 @@ impl ClientDef for AnyClient { ) } + Self::Beefy(client) => { + let client_state = downcast!( + client_state => AnyClientState::Beefy + ) + .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Beefy))?; + + client.verify_packet_data( + ctx, + client_state, + height, + connection_end, + proof, + root, + port_id, + channel_id, + sequence, + commitment, + ) + } + #[cfg(any(test, feature = "mocks"))] Self::Mock(client) => { let client_state = downcast!( @@ -517,6 +814,25 @@ impl ClientDef for AnyClient { ) } + Self::Beefy(client) => { + let client_state = downcast!( + client_state => AnyClientState::Beefy + ) + .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Beefy))?; + + client.verify_packet_acknowledgement( + ctx, + client_state, + height, + connection_end, + proof, + root, + port_id, + channel_id, + sequence, + ack_commitment, + ) + } #[cfg(any(test, feature = "mocks"))] Self::Mock(client) => { let client_state = downcast!( @@ -539,7 +855,6 @@ impl ClientDef for AnyClient { } } } - fn verify_next_sequence_recv( &self, ctx: &dyn ChannelReader, @@ -572,6 +887,25 @@ impl ClientDef for AnyClient { ) } + Self::Beefy(client) => { + let client_state = downcast!( + client_state => AnyClientState::Beefy + ) + .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Beefy))?; + + client.verify_next_sequence_recv( + ctx, + client_state, + height, + connection_end, + proof, + root, + port_id, + channel_id, + sequence, + ) + } + #[cfg(any(test, feature = "mocks"))] Self::Mock(client) => { let client_state = downcast!( @@ -593,6 +927,7 @@ impl ClientDef for AnyClient { } } } + fn verify_packet_receipt_absence( &self, ctx: &dyn ChannelReader, @@ -625,12 +960,11 @@ impl ClientDef for AnyClient { ) } - #[cfg(any(test, feature = "mocks"))] - Self::Mock(client) => { + Self::Beefy(client) => { let client_state = downcast!( - client_state => AnyClientState::Mock + client_state => AnyClientState::Beefy ) - .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Mock))?; + .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Beefy))?; client.verify_packet_receipt_absence( ctx, @@ -644,56 +978,25 @@ impl ClientDef for AnyClient { sequence, ) } - } - } - - fn verify_upgrade_and_update_state( - &self, - client_state: &Self::ClientState, - consensus_state: &Self::ConsensusState, - proof_upgrade_client: MerkleProof, - proof_upgrade_consensus_state: MerkleProof, - ) -> Result<(Self::ClientState, Self::ConsensusState), Error> { - match self { - Self::Tendermint(client) => { - let (client_state, consensus_state) = downcast!( - client_state => AnyClientState::Tendermint, - consensus_state => AnyConsensusState::Tendermint, - ) - .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Tendermint))?; - - let (new_state, new_consensus) = client.verify_upgrade_and_update_state( - client_state, - consensus_state, - proof_upgrade_client, - proof_upgrade_consensus_state, - )?; - - Ok(( - AnyClientState::Tendermint(new_state), - AnyConsensusState::Tendermint(new_consensus), - )) - } #[cfg(any(test, feature = "mocks"))] Self::Mock(client) => { - let (client_state, consensus_state) = downcast!( - client_state => AnyClientState::Mock, - consensus_state => AnyConsensusState::Mock, + let client_state = downcast!( + client_state => AnyClientState::Mock ) .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Mock))?; - let (new_state, new_consensus) = client.verify_upgrade_and_update_state( + client.verify_packet_receipt_absence( + ctx, client_state, - consensus_state, - proof_upgrade_client, - proof_upgrade_consensus_state, - )?; - - Ok(( - AnyClientState::Mock(new_state), - AnyConsensusState::Mock(new_consensus), - )) + height, + connection_end, + proof, + root, + port_id, + channel_id, + sequence, + ) } } } diff --git a/modules/src/core/ics02_client/client_state.rs b/modules/src/core/ics02_client/client_state.rs index 0aa1625552..735608e662 100644 --- a/modules/src/core/ics02_client/client_state.rs +++ b/modules/src/core/ics02_client/client_state.rs @@ -100,6 +100,7 @@ impl AnyUpgradeOptions { #[serde(tag = "type")] pub enum AnyClientState { Tendermint(client_state::ClientState), + #[serde(skip)] Beefy(beefy_client_state::ClientState), #[cfg(any(test, feature = "mocks"))] Mock(MockClientState), @@ -109,7 +110,7 @@ impl AnyClientState { pub fn latest_height(&self) -> Height { match self { Self::Tendermint(tm_state) => tm_state.latest_height(), - Self::Beefy(bf_state) => bf_state.latest_para_height(), + Self::Beefy(bf_state) => bf_state.latest_height(), #[cfg(any(test, feature = "mocks"))] Self::Mock(mock_state) => mock_state.latest_height(), } @@ -136,7 +137,7 @@ impl AnyClientState { pub fn max_clock_drift(&self) -> Duration { match self { AnyClientState::Tendermint(state) => state.max_clock_drift, - AnyClientState::Beefy(_) => None, + AnyClientState::Beefy(_) => Duration::new(0, 0), #[cfg(any(test, feature = "mocks"))] AnyClientState::Mock(_) => Duration::new(0, 0), } @@ -163,7 +164,7 @@ impl AnyClientState { pub fn expired(&self, elapsed_since_latest: Duration) -> bool { match self { AnyClientState::Tendermint(tm_state) => tm_state.expired(elapsed_since_latest), - AnyClientState::Beefy(state) => false, + AnyClientState::Beefy(_) => false, #[cfg(any(test, feature = "mocks"))] AnyClientState::Mock(mock_state) => mock_state.expired(elapsed_since_latest), } @@ -185,9 +186,9 @@ impl TryFrom for AnyClientState { )), BEEFY_CLIENT_STATE_TYPE_URL => Ok(AnyClientState::Beefy( - beefy_client_state::ClientState::decode_vec(&raw.value), - )) - .map_err(Error::decode_raw_client_state)?, + beefy_client_state::ClientState::decode_vec(&raw.value) + .map_err(Error::decode_raw_client_state)?, + )), #[cfg(any(test, feature = "mocks"))] MOCK_CLIENT_STATE_TYPE_URL => Ok(AnyClientState::Mock( @@ -208,6 +209,12 @@ impl From for Any { .encode_vec() .expect("encoding to `Any` from `AnyClientState::Tendermint`"), }, + AnyClientState::Beefy(value) => Any { + type_url: BEEFY_CLIENT_STATE_TYPE_URL.to_string(), + value: value + .encode_vec() + .expect("encoding to `Any` from `AnyClientState::Tendermint`"), + }, #[cfg(any(test, feature = "mocks"))] AnyClientState::Mock(value) => Any { type_url: MOCK_CLIENT_STATE_TYPE_URL.to_string(), @@ -225,7 +232,7 @@ impl ClientState for AnyClientState { fn chain_id(&self) -> ChainId { match self { AnyClientState::Tendermint(tm_state) => tm_state.chain_id(), - + AnyClientState::Beefy(bf_state) => bf_state.chain_id(), #[cfg(any(test, feature = "mocks"))] AnyClientState::Mock(mock_state) => mock_state.chain_id(), } @@ -253,7 +260,9 @@ impl ClientState for AnyClientState { AnyClientState::Tendermint(tm_state) => tm_state .upgrade(upgrade_height, upgrade_options.into_tendermint(), chain_id) .wrap_any(), - + AnyClientState::Beefy(bf_state) => bf_state + .upgrade(upgrade_height, upgrade_options.into_beefy(), chain_id) + .wrap_any(), #[cfg(any(test, feature = "mocks"))] AnyClientState::Mock(mock_state) => { mock_state.upgrade(upgrade_height, (), chain_id).wrap_any() diff --git a/modules/src/core/ics02_client/context.rs b/modules/src/core/ics02_client/context.rs index b8c0ce4741..cc808c020c 100644 --- a/modules/src/core/ics02_client/context.rs +++ b/modules/src/core/ics02_client/context.rs @@ -3,6 +3,7 @@ //! "ADR 003: IBC protocol implementation" for more details. use crate::core::ics02_client::client_consensus::AnyConsensusState; +use crate::core::ics02_client::client_def::ConsensusUpdateResult; use crate::core::ics02_client::client_state::AnyClientState; use crate::core::ics02_client::client_type::ClientType; use crate::core::ics02_client::error::{Error, ErrorDetail}; @@ -10,6 +11,7 @@ use crate::core::ics02_client::handler::ClientResult::{self, Create, Update, Upg use crate::core::ics24_host::identifier::ClientId; use crate::timestamp::Timestamp; use crate::Height; +use alloc::boxed::Box; /// Defines the read-only part of ICS2 (client functions) context. pub trait ClientReader { @@ -47,6 +49,7 @@ pub trait ClientReader { &self, client_id: &ClientId, height: Height, + filter_fn: Option bool>>, ) -> Result, Error>; /// Search for the highest consensus state lower than `height`. @@ -54,6 +57,7 @@ pub trait ClientReader { &self, client_id: &ClientId, height: Height, + filter_fn: Option bool>>, ) -> Result, Error>; /// Returns the current height of the local chain. @@ -87,11 +91,16 @@ pub trait ClientKeeper { self.store_client_type(client_id.clone(), res.client_type)?; self.store_client_state(client_id.clone(), res.client_state.clone())?; - self.store_consensus_state( - client_id, - res.client_state.latest_height(), - res.consensus_state, - )?; + match res.consensus_state { + None => {} + Some(consensus_state) => { + self.store_consensus_state( + client_id, + res.client_state.latest_height(), + consensus_state, + )?; + } + } self.increase_client_counter(); self.store_update_time( res.client_id.clone(), @@ -107,11 +116,28 @@ pub trait ClientKeeper { } Update(res) => { self.store_client_state(res.client_id.clone(), res.client_state.clone())?; - self.store_consensus_state( - res.client_id.clone(), - res.client_state.latest_height(), - res.consensus_state, - )?; + match res.consensus_state { + None => {} + Some(cs_state_update) => match cs_state_update { + ConsensusUpdateResult::Single(cs_state) => { + self.store_consensus_state( + res.client_id.clone(), + res.client_state.latest_height(), + cs_state, + )?; + } + ConsensusUpdateResult::Batch(cs_states) => { + for (height, cs_state) in cs_states { + self.store_consensus_state( + res.client_id.clone(), + height, + cs_state, + )?; + } + } + }, + } + self.store_update_time( res.client_id.clone(), res.client_state.latest_height(), @@ -126,11 +152,27 @@ pub trait ClientKeeper { } Upgrade(res) => { self.store_client_state(res.client_id.clone(), res.client_state.clone())?; - self.store_consensus_state( - res.client_id.clone(), - res.client_state.latest_height(), - res.consensus_state, - )?; + match res.consensus_state { + None => {} + Some(cs_state_update) => match cs_state_update { + ConsensusUpdateResult::Single(cs_state) => { + self.store_consensus_state( + res.client_id.clone(), + res.client_state.latest_height(), + cs_state, + )?; + } + ConsensusUpdateResult::Batch(cs_states) => { + for (height, cs_state) in cs_states { + self.store_consensus_state( + res.client_id.clone(), + height, + cs_state, + )?; + } + } + }, + } Ok(()) } } @@ -158,6 +200,15 @@ pub trait ClientKeeper { consensus_state: AnyConsensusState, ) -> Result<(), Error>; + /// Called upon successful client creation and update for beefy light client + fn store_parachain_consensus_state( + &mut self, + client_id: ClientId, + para_id: u64, + height: Height, + consensus_state: AnyConsensusState, + ) -> Result<(), Error>; + /// Called upon client creation. /// Increases the counter which keeps track of how many clients have been created. /// Should never fail. diff --git a/modules/src/core/ics02_client/error.rs b/modules/src/core/ics02_client/error.rs index 21f3bb339c..626c730f4d 100644 --- a/modules/src/core/ics02_client/error.rs +++ b/modules/src/core/ics02_client/error.rs @@ -5,6 +5,7 @@ use tendermint::Error as TendermintError; use tendermint_proto::Error as TendermintProtoError; use crate::clients::ics07_tendermint::error::Error as Ics07Error; +use crate::clients::ics11_beefy::error::Error as Ics11Error; use crate::core::ics02_client::client_type::ClientType; use crate::core::ics02_client::height::HeightError; use crate::core::ics23_commitment::error::Error as Ics23Error; @@ -181,7 +182,7 @@ define_error! { | _ | { "tendermint error" }, Beefy - [ Ics011Error ] + [ Ics11Error ] | _ | { "Beefy error" }, InvalidPacketTimestamp @@ -284,3 +285,9 @@ impl From for Error { Error::tendermint_handler_error(e) } } + +impl From for Error { + fn from(e: Ics11Error) -> Error { + Error::beefy(e) + } +} diff --git a/modules/src/core/ics02_client/handler.rs b/modules/src/core/ics02_client/handler.rs index 9b2980e917..a641d38c8f 100644 --- a/modules/src/core/ics02_client/handler.rs +++ b/modules/src/core/ics02_client/handler.rs @@ -1,5 +1,6 @@ //! This module implements the processing logic for ICS2 (client abstractions and functions) msgs. +use crate::clients::ics11_beefy::client_def::BeefyLCStore; use crate::core::ics02_client::context::ClientReader; use crate::core::ics02_client::error::Error; use crate::core::ics02_client::msgs::ClientMsg; @@ -17,14 +18,15 @@ pub enum ClientResult { } /// General entry point for processing any message related to ICS2 (client functions) protocols. -pub fn dispatch(ctx: &Ctx, msg: ClientMsg) -> Result, Error> +pub fn dispatch(ctx: &Ctx, msg: ClientMsg) -> Result, Error> where Ctx: ClientReader, + Beefy: BeefyLCStore, { match msg { ClientMsg::CreateClient(msg) => create_client::process(ctx, msg), - ClientMsg::UpdateClient(msg) => update_client::process(ctx, msg), - ClientMsg::UpgradeClient(msg) => upgrade_client::process(ctx, msg), + ClientMsg::UpdateClient(msg) => update_client::process::(ctx, msg), + ClientMsg::UpgradeClient(msg) => upgrade_client::process::(ctx, msg), _ => { unimplemented!() } diff --git a/modules/src/core/ics02_client/handler/create_client.rs b/modules/src/core/ics02_client/handler/create_client.rs index 6175147694..deb1f5df45 100644 --- a/modules/src/core/ics02_client/handler/create_client.rs +++ b/modules/src/core/ics02_client/handler/create_client.rs @@ -23,7 +23,7 @@ pub struct Result { pub client_id: ClientId, pub client_type: ClientType, pub client_state: AnyClientState, - pub consensus_state: AnyConsensusState, + pub consensus_state: Option, pub processed_time: Timestamp, pub processed_height: Height, } diff --git a/modules/src/core/ics02_client/handler/update_client.rs b/modules/src/core/ics02_client/handler/update_client.rs index fd4e7a5fb3..1e445c5cfe 100644 --- a/modules/src/core/ics02_client/handler/update_client.rs +++ b/modules/src/core/ics02_client/handler/update_client.rs @@ -1,15 +1,15 @@ //! Protocol logic specific to processing ICS2 messages of type `MsgUpdateAnyClient`. +use crate::clients::ics11_beefy::client_def::BeefyLCStore; use tracing::debug; -use crate::core::ics02_client::client_consensus::AnyConsensusState; -use crate::core::ics02_client::client_def::{AnyClient, ClientDef}; +use crate::core::ics02_client::client_def::{AnyClient, ClientDef, ConsensusUpdateResult}; use crate::core::ics02_client::client_state::{AnyClientState, ClientState}; +use crate::core::ics02_client::client_type::ClientType; use crate::core::ics02_client::context::ClientReader; use crate::core::ics02_client::error::Error; use crate::core::ics02_client::events::Attributes; use crate::core::ics02_client::handler::ClientResult; -use crate::core::ics02_client::header::Header; use crate::core::ics02_client::height::Height; use crate::core::ics02_client::msgs::update_client::MsgUpdateAnyClient; use crate::core::ics24_host::identifier::ClientId; @@ -24,12 +24,12 @@ use crate::timestamp::Timestamp; pub struct Result { pub client_id: ClientId, pub client_state: AnyClientState, - pub consensus_state: AnyConsensusState, + pub consensus_state: Option, pub processed_time: Timestamp, pub processed_height: Height, } -pub fn process( +pub fn process( ctx: &dyn ClientReader, msg: MsgUpdateAnyClient, ) -> HandlerResult { @@ -44,7 +44,7 @@ pub fn process( // Read client type from the host chain store. The client should already exist. let client_type = ctx.client_type(&client_id)?; - let client_def = AnyClient::from_client_type(client_type); + let client_def = AnyClient::::from_client_type(client_type); // Read client state from the host chain store. let client_state = ctx.client_state(&client_id)?; @@ -53,49 +53,73 @@ pub fn process( return Err(Error::client_frozen(client_id)); } - // Read consensus state from the host chain store. - let latest_consensus_state = ctx - .consensus_state(&client_id, client_state.latest_height()) - .map_err(|_| { - Error::consensus_state_not_found(client_id.clone(), client_state.latest_height()) - })?; - - debug!("latest consensus state: {:?}", latest_consensus_state); - - let now = ctx.host_timestamp(); - let duration = now - .duration_since(&latest_consensus_state.timestamp()) - .ok_or_else(|| { - Error::invalid_consensus_state_timestamp(latest_consensus_state.timestamp(), now) - })?; - - if client_state.expired(duration) { - return Err(Error::header_not_within_trust_period( - latest_consensus_state.timestamp(), - header.timestamp(), - )); + if client_type != ClientType::Beefy { + // Read consensus state from the host chain store. + let latest_consensus_state = ctx + .consensus_state(&client_id, client_state.latest_height()) + .map_err(|_| { + Error::consensus_state_not_found(client_id.clone(), client_state.latest_height()) + })?; + + debug!("latest consensus state: {:?}", latest_consensus_state); + + let now = ctx.host_timestamp(); + let duration = now + .duration_since(&latest_consensus_state.timestamp()) + .ok_or_else(|| { + Error::invalid_consensus_state_timestamp(latest_consensus_state.timestamp(), now) + })?; + + if client_state.expired(duration) { + return Err(Error::header_not_within_trust_period( + latest_consensus_state.timestamp(), + header.timestamp(), + )); + } } + client_def + .verify_header(ctx, client_id.clone(), client_state.clone(), header.clone()) + .map_err(|e| Error::header_verification_failure(e.to_string()))?; + + let found_misbehaviour = client_def + .check_for_misbehaviour(ctx, client_id.clone(), client_state.clone(), header.clone()) + .map_err(|e| Error::header_verification_failure(e.to_string()))?; + + let event_attributes = Attributes { + client_id: client_id.clone(), + height: ctx.host_height(), + ..Default::default() + }; + + if found_misbehaviour { + let client_state = + client_def.update_state_on_misbehaviour(client_state.clone(), header.clone())?; + let result = ClientResult::Update(Result { + client_id: client_id.clone(), + client_state, + consensus_state: None, + processed_time: ctx.host_timestamp(), + processed_height: ctx.host_height(), + }); + output.emit(IbcEvent::ClientMisbehaviour(event_attributes.into())); + return Ok(output.with_result(result)); + } // Use client_state to validate the new header against the latest consensus_state. // This function will return the new client_state (its latest_height changed) and a // consensus_state obtained from header. These will be later persisted by the keeper. let (new_client_state, new_consensus_state) = client_def - .check_header_and_update_state(ctx, client_id.clone(), client_state, header) + .update_state(ctx, client_id.clone(), client_state, header) .map_err(|e| Error::header_verification_failure(e.to_string()))?; let result = ClientResult::Update(Result { client_id: client_id.clone(), client_state: new_client_state, - consensus_state: new_consensus_state, + consensus_state: Some(new_consensus_state), processed_time: ctx.host_timestamp(), processed_height: ctx.host_height(), }); - let event_attributes = Attributes { - client_id, - height: ctx.host_height(), - ..Default::default() - }; output.emit(IbcEvent::UpdateClient(event_attributes.into())); Ok(output.with_result(result)) diff --git a/modules/src/core/ics02_client/handler/upgrade_client.rs b/modules/src/core/ics02_client/handler/upgrade_client.rs index 54e2d5caa6..48ad38fa38 100644 --- a/modules/src/core/ics02_client/handler/upgrade_client.rs +++ b/modules/src/core/ics02_client/handler/upgrade_client.rs @@ -1,7 +1,7 @@ //! Protocol logic specific to processing ICS2 messages of type `MsgUpgradeAnyClient`. //! -use crate::core::ics02_client::client_consensus::AnyConsensusState; -use crate::core::ics02_client::client_def::{AnyClient, ClientDef}; +use crate::clients::ics11_beefy::client_def::BeefyLCStore; +use crate::core::ics02_client::client_def::{AnyClient, ClientDef, ConsensusUpdateResult}; use crate::core::ics02_client::client_state::{AnyClientState, ClientState}; use crate::core::ics02_client::context::ClientReader; use crate::core::ics02_client::error::Error; @@ -19,10 +19,10 @@ use crate::prelude::*; pub struct Result { pub client_id: ClientId, pub client_state: AnyClientState, - pub consensus_state: AnyConsensusState, + pub consensus_state: Option, } -pub fn process( +pub fn process( ctx: &dyn ClientReader, msg: MsgUpgradeAnyClient, ) -> HandlerResult { @@ -47,7 +47,7 @@ pub fn process( let client_type = ctx.client_type(&client_id)?; - let client_def = AnyClient::from_client_type(client_type); + let client_def = AnyClient::::from_client_type(client_type); let (new_client_state, new_consensus_state) = client_def.verify_upgrade_and_update_state( &upgrade_client_state, @@ -62,7 +62,7 @@ pub fn process( let result = ClientResult::Upgrade(Result { client_id: client_id.clone(), client_state: new_client_state, - consensus_state: new_consensus_state, + consensus_state: Some(new_consensus_state), }); let event_attributes = Attributes { client_id, diff --git a/modules/src/core/ics02_client/header.rs b/modules/src/core/ics02_client/header.rs index f757ee8a9f..346d80dfcd 100644 --- a/modules/src/core/ics02_client/header.rs +++ b/modules/src/core/ics02_client/header.rs @@ -24,12 +24,6 @@ pub trait Header: Clone + core::fmt::Debug + Send + Sync { /// The type of client (eg. Tendermint) fn client_type(&self) -> ClientType; - /// The height of the consensus state - fn height(&self) -> Height; - - /// The timestamp of the consensus state - fn timestamp(&self) -> Timestamp; - /// Wrap into an `AnyHeader` fn wrap_any(self) -> AnyHeader; } @@ -44,31 +38,33 @@ pub enum AnyHeader { Mock(MockHeader), } -impl Header for AnyHeader { - fn client_type(&self) -> ClientType { +impl AnyHeader { + pub fn height(&self) -> Height { match self { - Self::Tendermint(header) => header.client_type(), - Self::Beefy(header) => header.client_type(), + Self::Tendermint(header) => header.height(), + Self::Beefy(_header) => Default::default(), #[cfg(any(test, feature = "mocks"))] - Self::Mock(header) => header.client_type(), + Self::Mock(header) => header.height(), } } - fn height(&self) -> Height { + pub fn timestamp(&self) -> Timestamp { match self { - Self::Tendermint(header) => header.height(), - Self::Beefy(header) => Default::default(), + Self::Tendermint(header) => header.timestamp(), + Self::Beefy(_header) => Default::default(), #[cfg(any(test, feature = "mocks"))] - Self::Mock(header) => header.height(), + Self::Mock(header) => header.timestamp(), } } +} - fn timestamp(&self) -> Timestamp { +impl Header for AnyHeader { + fn client_type(&self) -> ClientType { match self { - Self::Tendermint(header) => header.timestamp(), - Self::Beefy(header) => Default::default(), + Self::Tendermint(header) => header.client_type(), + Self::Beefy(header) => header.client_type(), #[cfg(any(test, feature = "mocks"))] - Self::Mock(header) => header.timestamp(), + Self::Mock(header) => header.client_type(), } } @@ -104,7 +100,7 @@ impl TryFrom for AnyHeader { } BEEFY_HEADER_TYPE_URL => { - let val = decode_beefy_header(&raw.value).map_err(Error::beefy)?; + let val = decode_beefy_header(&*raw.value).map_err(Error::beefy)?; Ok(AnyHeader::Beefy(val)) } diff --git a/modules/src/core/ics02_client/msgs/create_client.rs b/modules/src/core/ics02_client/msgs/create_client.rs index 469bdc27e0..275de7b51e 100644 --- a/modules/src/core/ics02_client/msgs/create_client.rs +++ b/modules/src/core/ics02_client/msgs/create_client.rs @@ -18,22 +18,28 @@ pub const TYPE_URL: &str = "/ibc.core.client.v1.MsgCreateClient"; #[derive(Clone, Debug, PartialEq, Eq)] pub struct MsgCreateAnyClient { pub client_state: AnyClientState, - pub consensus_state: AnyConsensusState, + pub consensus_state: Option, pub signer: Signer, } impl MsgCreateAnyClient { pub fn new( client_state: AnyClientState, - consensus_state: AnyConsensusState, + consensus_state: Option, signer: Signer, ) -> Result { - if client_state.client_type() != consensus_state.client_type() { - return Err(Error::raw_client_and_consensus_state_types_mismatch( - client_state.client_type(), - consensus_state.client_type(), - )); + match consensus_state.as_ref() { + Some(consensus_state) + if client_state.client_type() != consensus_state.client_type() => + { + return Err(Error::raw_client_and_consensus_state_types_mismatch( + client_state.client_type(), + consensus_state.client_type(), + )) + } + _ => {} } + Ok(MsgCreateAnyClient { client_state, consensus_state, @@ -65,14 +71,15 @@ impl TryFrom for MsgCreateAnyClient { .client_state .ok_or_else(Error::missing_raw_client_state)?; - let raw_consensus_state = raw + let consensus_state = raw .consensus_state - .ok_or_else(Error::missing_raw_client_state)?; + .map(|cs| AnyConsensusState::try_from(cs).ok()) + .flatten(); MsgCreateAnyClient::new( AnyClientState::try_from(raw_client_state)?, - AnyConsensusState::try_from(raw_consensus_state)?, - raw.signer.parse().map_err(Error::signer)?, + consensus_state, + raw.signer.into(), ) } } @@ -81,7 +88,7 @@ impl From for RawMsgCreateClient { fn from(ics_msg: MsgCreateAnyClient) -> Self { RawMsgCreateClient { client_state: Some(ics_msg.client_state.into()), - consensus_state: Some(ics_msg.consensus_state.into()), + consensus_state: ics_msg.consensus_state.and_then(|cs| Some(cs.into())), signer: ics_msg.signer.to_string(), } } diff --git a/modules/src/core/ics02_client/msgs/upgrade_client.rs b/modules/src/core/ics02_client/msgs/upgrade_client.rs index 34ed86ea83..e4f5b4cabd 100644 --- a/modules/src/core/ics02_client/msgs/upgrade_client.rs +++ b/modules/src/core/ics02_client/msgs/upgrade_client.rs @@ -6,13 +6,10 @@ use core::str::FromStr; use tendermint_proto::Protobuf; use ibc_proto::ibc::core::client::v1::MsgUpgradeClient as RawMsgUpgradeClient; -use ibc_proto::ibc::core::commitment::v1::MerkleProof as RawMerkleProof; use crate::core::ics02_client::client_consensus::AnyConsensusState; use crate::core::ics02_client::client_state::AnyClientState; use crate::core::ics02_client::error::Error; -use crate::core::ics23_commitment::commitment::CommitmentProofBytes; -use crate::core::ics23_commitment::error::Error as Ics23Error; use crate::core::ics24_host::identifier::ClientId; use crate::signer::Signer; use crate::tx_msg::Msg; @@ -25,8 +22,8 @@ pub struct MsgUpgradeAnyClient { pub client_id: ClientId, pub client_state: AnyClientState, pub consensus_state: AnyConsensusState, - pub proof_upgrade_client: RawMerkleProof, - pub proof_upgrade_consensus_state: RawMerkleProof, + pub proof_upgrade_client: Vec, + pub proof_upgrade_consensus_state: Vec, pub signer: Signer, } impl MsgUpgradeAnyClient { @@ -34,8 +31,8 @@ impl MsgUpgradeAnyClient { client_id: ClientId, client_state: AnyClientState, consensus_state: AnyConsensusState, - proof_upgrade_client: RawMerkleProof, - proof_upgrade_consensus_state: RawMerkleProof, + proof_upgrade_client: Vec, + proof_upgrade_consensus_state: Vec, signer: Signer, ) -> Self { MsgUpgradeAnyClient { @@ -66,17 +63,12 @@ impl Protobuf for MsgUpgradeAnyClient {} impl From for RawMsgUpgradeClient { fn from(dm_msg: MsgUpgradeAnyClient) -> RawMsgUpgradeClient { - let c_bytes = CommitmentProofBytes::try_from(dm_msg.proof_upgrade_client) - .map_or(vec![], |c| c.into()); - let cs_bytes = CommitmentProofBytes::try_from(dm_msg.proof_upgrade_consensus_state) - .map_or(vec![], |c| c.into()); - RawMsgUpgradeClient { client_id: dm_msg.client_id.to_string(), client_state: Some(dm_msg.client_state.into()), consensus_state: Some(dm_msg.consensus_state.into()), - proof_upgrade_client: c_bytes, - proof_upgrade_consensus_state: cs_bytes, + proof_upgrade_client: dm_msg.proof_upgrade_client, + proof_upgrade_consensus_state: dm_msg.proof_upgrade_consensus_state, signer: dm_msg.signer.to_string(), } } @@ -94,23 +86,14 @@ impl TryFrom for MsgUpgradeAnyClient { .consensus_state .ok_or_else(Error::missing_raw_client_state)?; - let c_bytes = CommitmentProofBytes::try_from(proto_msg.proof_upgrade_client) - .map_err(|_| Error::invalid_upgrade_client_proof(Ics23Error::empty_merkle_proof()))?; - let cs_bytes = CommitmentProofBytes::try_from(proto_msg.proof_upgrade_consensus_state) - .map_err(|_| { - Error::invalid_upgrade_consensus_state_proof(Ics23Error::empty_merkle_proof()) - })?; - Ok(MsgUpgradeAnyClient { client_id: ClientId::from_str(&proto_msg.client_id) .map_err(Error::invalid_client_identifier)?, client_state: AnyClientState::try_from(raw_client_state)?, consensus_state: AnyConsensusState::try_from(raw_consensus_state)?, - proof_upgrade_client: RawMerkleProof::try_from(c_bytes) - .map_err(Error::invalid_upgrade_client_proof)?, - proof_upgrade_consensus_state: RawMerkleProof::try_from(cs_bytes) - .map_err(Error::invalid_upgrade_consensus_state_proof)?, - signer: proto_msg.signer.parse().map_err(Error::signer)?, + proof_upgrade_client: proto_msg.proof_upgrade_client, + proof_upgrade_consensus_state: proto_msg.proof_upgrade_consensus_state, + signer: proto_msg.signer.into(), }) } } diff --git a/modules/src/core/ics03_connection/handler.rs b/modules/src/core/ics03_connection/handler.rs index b096c32e1d..d6b4a77166 100644 --- a/modules/src/core/ics03_connection/handler.rs +++ b/modules/src/core/ics03_connection/handler.rs @@ -1,5 +1,6 @@ //! This module implements the processing logic for ICS3 (connection open handshake) messages. +use crate::clients::ics11_beefy::client_def::BeefyLCStore; use crate::core::ics03_connection::connection::ConnectionEnd; use crate::core::ics03_connection::context::ConnectionReader; use crate::core::ics03_connection::error::Error; @@ -41,17 +42,18 @@ pub struct ConnectionResult { /// General entry point for processing any type of message related to the ICS3 connection open /// handshake protocol. -pub fn dispatch( +pub fn dispatch( ctx: &Ctx, msg: ConnectionMsg, ) -> Result, Error> where Ctx: ConnectionReader, + Beefy: BeefyLCStore, { match msg { ConnectionMsg::ConnectionOpenInit(msg) => conn_open_init::process(ctx, msg), - ConnectionMsg::ConnectionOpenTry(msg) => conn_open_try::process(ctx, *msg), - ConnectionMsg::ConnectionOpenAck(msg) => conn_open_ack::process(ctx, *msg), - ConnectionMsg::ConnectionOpenConfirm(msg) => conn_open_confirm::process(ctx, msg), + ConnectionMsg::ConnectionOpenTry(msg) => conn_open_try::process::(ctx, *msg), + ConnectionMsg::ConnectionOpenAck(msg) => conn_open_ack::process::(ctx, *msg), + ConnectionMsg::ConnectionOpenConfirm(msg) => conn_open_confirm::process::(ctx, msg), } } diff --git a/modules/src/core/ics03_connection/handler/conn_open_ack.rs b/modules/src/core/ics03_connection/handler/conn_open_ack.rs index 086a8a9d12..7ab89a9f53 100644 --- a/modules/src/core/ics03_connection/handler/conn_open_ack.rs +++ b/modules/src/core/ics03_connection/handler/conn_open_ack.rs @@ -1,5 +1,6 @@ //! Protocol logic specific to processing ICS3 messages of type `MsgConnectionOpenAck`. +use crate::clients::ics11_beefy::client_def::BeefyLCStore; use crate::core::ics03_connection::connection::{ConnectionEnd, Counterparty, State}; use crate::core::ics03_connection::context::ConnectionReader; use crate::core::ics03_connection::error::Error; @@ -13,7 +14,7 @@ use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; -pub(crate) fn process( +pub(crate) fn process( ctx: &dyn ConnectionReader, msg: MsgConnectionOpenAck, ) -> HandlerResult { @@ -65,7 +66,7 @@ pub(crate) fn process( }; // 2. Pass the details to the verification function. - verify_proofs( + verify_proofs::( ctx, msg.client_state.clone(), msg.proofs.height(), diff --git a/modules/src/core/ics03_connection/handler/conn_open_confirm.rs b/modules/src/core/ics03_connection/handler/conn_open_confirm.rs index 3f32cc3fad..c64e26cbbd 100644 --- a/modules/src/core/ics03_connection/handler/conn_open_confirm.rs +++ b/modules/src/core/ics03_connection/handler/conn_open_confirm.rs @@ -1,5 +1,6 @@ //! Protocol logic specific to processing ICS3 messages of type `MsgConnectionOpenConfirm`. +use crate::clients::ics11_beefy::client_def::BeefyLCStore; use crate::core::ics03_connection::connection::{ConnectionEnd, Counterparty, State}; use crate::core::ics03_connection::context::ConnectionReader; use crate::core::ics03_connection::error::Error; @@ -11,7 +12,7 @@ use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; -pub(crate) fn process( +pub(crate) fn process( ctx: &dyn ConnectionReader, msg: MsgConnectionOpenConfirm, ) -> HandlerResult { @@ -40,7 +41,7 @@ pub(crate) fn process( ); // 2. Pass the details to the verification function. - verify_proofs( + verify_proofs::( ctx, None, msg.proofs.height(), diff --git a/modules/src/core/ics03_connection/handler/conn_open_try.rs b/modules/src/core/ics03_connection/handler/conn_open_try.rs index b9b2132029..530005bb28 100644 --- a/modules/src/core/ics03_connection/handler/conn_open_try.rs +++ b/modules/src/core/ics03_connection/handler/conn_open_try.rs @@ -1,5 +1,6 @@ //! Protocol logic specific to processing ICS3 messages of type `MsgConnectionOpenTry`. +use crate::clients::ics11_beefy::client_def::BeefyLCStore; use crate::core::ics03_connection::connection::{ConnectionEnd, Counterparty, State}; use crate::core::ics03_connection::context::ConnectionReader; use crate::core::ics03_connection::error::Error; @@ -14,7 +15,7 @@ use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; -pub(crate) fn process( +pub(crate) fn process( ctx: &dyn ConnectionReader, msg: MsgConnectionOpenTry, ) -> HandlerResult { @@ -78,7 +79,7 @@ pub(crate) fn process( ); // 2. Pass the details to the verification function. - verify_proofs( + verify_proofs::( ctx, msg.client_state.clone(), msg.proofs.height(), diff --git a/modules/src/core/ics03_connection/handler/verify.rs b/modules/src/core/ics03_connection/handler/verify.rs index 71e6ee65d3..b97e04a8ee 100644 --- a/modules/src/core/ics03_connection/handler/verify.rs +++ b/modules/src/core/ics03_connection/handler/verify.rs @@ -1,5 +1,6 @@ //! ICS3 verification functions, common across all four handlers of ICS3. +use crate::clients::ics11_beefy::client_def::BeefyLCStore; use crate::core::ics02_client::client_consensus::ConsensusState; use crate::core::ics02_client::client_state::{AnyClientState, ClientState}; use crate::core::ics02_client::{client_def::AnyClient, client_def::ClientDef}; @@ -11,7 +12,7 @@ use crate::proofs::{ConsensusProof, Proofs}; use crate::Height; /// Entry point for verifying all proofs bundled in any ICS3 message. -pub fn verify_proofs( +pub fn verify_proofs( ctx: &dyn ConnectionReader, client_state: Option, height: Height, @@ -19,7 +20,7 @@ pub fn verify_proofs( expected_conn: &ConnectionEnd, proofs: &Proofs, ) -> Result<(), Error> { - verify_connection_proof( + verify_connection_proof::( ctx, height, connection_end, @@ -30,7 +31,7 @@ pub fn verify_proofs( // If the message includes a client state, then verify the proof for that state. if let Some(expected_client_state) = client_state { - verify_client_proof( + verify_client_proof::( ctx, height, connection_end, @@ -45,7 +46,12 @@ pub fn verify_proofs( // If a consensus proof is attached to the message, then verify it. if let Some(proof) = proofs.consensus_proof() { - Ok(verify_consensus_proof(ctx, height, connection_end, &proof)?) + Ok(verify_consensus_proof::( + ctx, + height, + connection_end, + &proof, + )?) } else { Ok(()) } @@ -54,7 +60,7 @@ pub fn verify_proofs( /// Verifies the authenticity and semantic correctness of a commitment `proof`. The commitment /// claims to prove that an object of type connection exists on the source chain (i.e., the chain /// which created this proof). This object must match the state of `expected_conn`. -pub fn verify_connection_proof( +pub fn verify_connection_proof( ctx: &dyn ConnectionReader, height: Height, connection_end: &ConnectionEnd, @@ -80,7 +86,7 @@ pub fn verify_connection_proof( .connection_id() .ok_or_else(Error::invalid_counterparty)?; - let client_def = AnyClient::from_client_type(client_state.client_type()); + let client_def = AnyClient::::from_client_type(client_state.client_type()); // Verify the proof for the connection state against the expected connection end. client_def @@ -103,7 +109,7 @@ pub fn verify_connection_proof( /// complete verification: that the client state the counterparty stores is valid (i.e., not frozen, /// at the same revision as the current chain, with matching chain identifiers, etc) and that the /// `proof` is correct. -pub fn verify_client_proof( +pub fn verify_client_proof( ctx: &dyn ConnectionReader, height: Height, connection_end: &ConnectionEnd, @@ -120,7 +126,7 @@ pub fn verify_client_proof( let consensus_state = ctx.client_consensus_state(connection_end.client_id(), proof_height)?; - let client_def = AnyClient::from_client_type(client_state.client_type()); + let client_def = AnyClient::::from_client_type(client_state.client_type()); client_def .verify_client_full_state( @@ -137,7 +143,7 @@ pub fn verify_client_proof( }) } -pub fn verify_consensus_proof( +pub fn verify_consensus_proof( ctx: &dyn ConnectionReader, height: Height, connection_end: &ConnectionEnd, @@ -155,7 +161,7 @@ pub fn verify_consensus_proof( let consensus_state = ctx.client_consensus_state(connection_end.client_id(), height)?; - let client = AnyClient::from_client_type(client_state.client_type()); + let client = AnyClient::::from_client_type(client_state.client_type()); client .verify_client_consensus_state( diff --git a/modules/src/core/ics04_channel/handler.rs b/modules/src/core/ics04_channel/handler.rs index 0f428ea3cd..4b000aee2b 100644 --- a/modules/src/core/ics04_channel/handler.rs +++ b/modules/src/core/ics04_channel/handler.rs @@ -1,5 +1,6 @@ //! This module implements the processing logic for ICS4 (channel) messages. +use crate::clients::ics11_beefy::client_def::BeefyLCStore; use crate::core::ics04_channel::channel::ChannelEnd; use crate::core::ics04_channel::context::ChannelReader; use crate::core::ics04_channel::error::Error; @@ -58,20 +59,21 @@ where /// General entry point for processing any type of message related to the ICS4 channel open and /// channel close handshake protocols. -pub fn channel_dispatch( +pub fn channel_dispatch( ctx: &Ctx, msg: &ChannelMsg, ) -> Result<(HandlerOutputBuilder<()>, ChannelResult), Error> where Ctx: ChannelReader, + Beefy: BeefyLCStore, { let output = match msg { ChannelMsg::ChannelOpenInit(msg) => chan_open_init::process(ctx, msg), - ChannelMsg::ChannelOpenTry(msg) => chan_open_try::process(ctx, msg), - ChannelMsg::ChannelOpenAck(msg) => chan_open_ack::process(ctx, msg), - ChannelMsg::ChannelOpenConfirm(msg) => chan_open_confirm::process(ctx, msg), + ChannelMsg::ChannelOpenTry(msg) => chan_open_try::process::(ctx, msg), + ChannelMsg::ChannelOpenAck(msg) => chan_open_ack::process::(ctx, msg), + ChannelMsg::ChannelOpenConfirm(msg) => chan_open_confirm::process::(ctx, msg), ChannelMsg::ChannelCloseInit(msg) => chan_close_init::process(ctx, msg), - ChannelMsg::ChannelCloseConfirm(msg) => chan_close_confirm::process(ctx, msg), + ChannelMsg::ChannelCloseConfirm(msg) => chan_close_confirm::process::(ctx, msg), }?; let HandlerOutput { result, @@ -166,18 +168,19 @@ where } /// Dispatcher for processing any type of message related to the ICS4 packet protocols. -pub fn packet_dispatch( +pub fn packet_dispatch( ctx: &Ctx, msg: &PacketMsg, ) -> Result<(HandlerOutputBuilder<()>, PacketResult), Error> where Ctx: ChannelReader, + Beefy: BeefyLCStore, { let output = match msg { - PacketMsg::RecvPacket(msg) => recv_packet::process(ctx, msg), - PacketMsg::AckPacket(msg) => acknowledgement::process(ctx, msg), - PacketMsg::ToPacket(msg) => timeout::process(ctx, msg), - PacketMsg::ToClosePacket(msg) => timeout_on_close::process(ctx, msg), + PacketMsg::RecvPacket(msg) => recv_packet::process::(ctx, msg), + PacketMsg::AckPacket(msg) => acknowledgement::process::(ctx, msg), + PacketMsg::ToPacket(msg) => timeout::process::(ctx, msg), + PacketMsg::ToClosePacket(msg) => timeout_on_close::process::(ctx, msg), }?; let HandlerOutput { result, diff --git a/modules/src/core/ics04_channel/handler/acknowledgement.rs b/modules/src/core/ics04_channel/handler/acknowledgement.rs index 5246953b8d..67df501221 100644 --- a/modules/src/core/ics04_channel/handler/acknowledgement.rs +++ b/modules/src/core/ics04_channel/handler/acknowledgement.rs @@ -1,3 +1,4 @@ +use crate::clients::ics11_beefy::client_def::BeefyLCStore; use crate::core::ics03_connection::connection::State as ConnectionState; use crate::core::ics04_channel::channel::State; use crate::core::ics04_channel::channel::{Counterparty, Order}; @@ -19,7 +20,7 @@ pub struct AckPacketResult { pub seq_number: Option, } -pub fn process( +pub fn process( ctx: &dyn ChannelReader, msg: &MsgAcknowledgement, ) -> HandlerResult { @@ -72,7 +73,7 @@ pub fn process( } // Verify the acknowledgement proof - verify_packet_acknowledgement_proofs( + verify_packet_acknowledgement_proofs::( ctx, msg.proofs.height(), packet, diff --git a/modules/src/core/ics04_channel/handler/chan_close_confirm.rs b/modules/src/core/ics04_channel/handler/chan_close_confirm.rs index fb47d41ac0..dae79b1b6d 100644 --- a/modules/src/core/ics04_channel/handler/chan_close_confirm.rs +++ b/modules/src/core/ics04_channel/handler/chan_close_confirm.rs @@ -1,4 +1,5 @@ //! Protocol logic specific to ICS4 messages of type `MsgChannelCloseConfirm`. +use crate::clients::ics11_beefy::client_def::BeefyLCStore; use crate::core::ics03_connection::connection::State as ConnectionState; use crate::core::ics04_channel::channel::{ChannelEnd, Counterparty, State}; use crate::core::ics04_channel::context::ChannelReader; @@ -11,7 +12,7 @@ use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; -pub(crate) fn process( +pub(crate) fn process( ctx: &dyn ChannelReader, msg: &MsgChannelCloseConfirm, ) -> HandlerResult { @@ -61,7 +62,7 @@ pub(crate) fn process( channel_end.version().clone(), ); - verify_channel_proofs( + verify_channel_proofs::( ctx, msg.proofs.height(), &channel_end, diff --git a/modules/src/core/ics04_channel/handler/chan_open_ack.rs b/modules/src/core/ics04_channel/handler/chan_open_ack.rs index e883694e3e..e44e0d7052 100644 --- a/modules/src/core/ics04_channel/handler/chan_open_ack.rs +++ b/modules/src/core/ics04_channel/handler/chan_open_ack.rs @@ -1,4 +1,5 @@ //! Protocol logic specific to ICS4 messages of type `MsgChannelOpenAck`. +use crate::clients::ics11_beefy::client_def::BeefyLCStore; use crate::core::ics03_connection::connection::State as ConnectionState; use crate::core::ics04_channel::channel::{ChannelEnd, Counterparty, State}; use crate::core::ics04_channel::context::ChannelReader; @@ -11,7 +12,7 @@ use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; -pub(crate) fn process( +pub(crate) fn process( ctx: &dyn ChannelReader, msg: &MsgChannelOpenAck, ) -> HandlerResult { @@ -69,7 +70,7 @@ pub(crate) fn process( channel_end.set_counterparty_channel_id(msg.counterparty_channel_id); //2. Verify proofs - verify_channel_proofs( + verify_channel_proofs::( ctx, msg.proofs.height(), &channel_end, diff --git a/modules/src/core/ics04_channel/handler/chan_open_confirm.rs b/modules/src/core/ics04_channel/handler/chan_open_confirm.rs index 23420c4fa5..d21d53e900 100644 --- a/modules/src/core/ics04_channel/handler/chan_open_confirm.rs +++ b/modules/src/core/ics04_channel/handler/chan_open_confirm.rs @@ -1,4 +1,5 @@ //! Protocol logic specific to ICS4 messages of type `MsgChannelOpenConfirm`. +use crate::clients::ics11_beefy::client_def::BeefyLCStore; use crate::core::ics03_connection::connection::State as ConnectionState; use crate::core::ics04_channel::channel::{ChannelEnd, Counterparty, State}; use crate::core::ics04_channel::context::ChannelReader; @@ -11,7 +12,7 @@ use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; -pub(crate) fn process( +pub(crate) fn process( ctx: &dyn ChannelReader, msg: &MsgChannelOpenConfirm, ) -> HandlerResult { @@ -64,7 +65,7 @@ pub(crate) fn process( channel_end.version().clone(), ); //2. Verify proofs - verify_channel_proofs( + verify_channel_proofs::( ctx, msg.proofs.height(), &channel_end, diff --git a/modules/src/core/ics04_channel/handler/chan_open_try.rs b/modules/src/core/ics04_channel/handler/chan_open_try.rs index 126a7ad8e9..d319d0ce85 100644 --- a/modules/src/core/ics04_channel/handler/chan_open_try.rs +++ b/modules/src/core/ics04_channel/handler/chan_open_try.rs @@ -1,5 +1,6 @@ //! Protocol logic specific to ICS4 messages of type `MsgChannelOpenTry`. +use crate::clients::ics11_beefy::client_def::BeefyLCStore; use crate::core::ics03_connection::connection::State as ConnectionState; use crate::core::ics04_channel::channel::{ChannelEnd, Counterparty, State}; use crate::core::ics04_channel::context::ChannelReader; @@ -13,7 +14,7 @@ use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; -pub(crate) fn process( +pub(crate) fn process( ctx: &dyn ChannelReader, msg: &MsgChannelOpenTry, ) -> HandlerResult { @@ -108,7 +109,7 @@ pub(crate) fn process( ); // 2. Actual proofs are verified now. - verify_channel_proofs( + verify_channel_proofs::( ctx, msg.proofs.height(), &new_channel_end, diff --git a/modules/src/core/ics04_channel/handler/recv_packet.rs b/modules/src/core/ics04_channel/handler/recv_packet.rs index 8352f7e245..a8e1b33f07 100644 --- a/modules/src/core/ics04_channel/handler/recv_packet.rs +++ b/modules/src/core/ics04_channel/handler/recv_packet.rs @@ -1,3 +1,4 @@ +use crate::clients::ics11_beefy::client_def::BeefyLCStore; use crate::core::ics03_connection::connection::State as ConnectionState; use crate::core::ics04_channel::channel::{Counterparty, Order, State}; use crate::core::ics04_channel::context::ChannelReader; @@ -27,7 +28,10 @@ pub enum RecvPacketResult { NoOp, } -pub fn process(ctx: &dyn ChannelReader, msg: &MsgRecvPacket) -> HandlerResult { +pub fn process( + ctx: &dyn ChannelReader, + msg: &MsgRecvPacket, +) -> HandlerResult { let mut output = HandlerOutput::builder(); let packet = &msg.packet; @@ -72,7 +76,7 @@ pub fn process(ctx: &dyn ChannelReader, msg: &MsgRecvPacket) -> HandlerResult( ctx, msg.proofs.height(), packet, diff --git a/modules/src/core/ics04_channel/handler/timeout.rs b/modules/src/core/ics04_channel/handler/timeout.rs index c6153aa3cb..01c828a7a5 100644 --- a/modules/src/core/ics04_channel/handler/timeout.rs +++ b/modules/src/core/ics04_channel/handler/timeout.rs @@ -1,3 +1,4 @@ +use crate::clients::ics11_beefy::client_def::BeefyLCStore; use crate::core::ics04_channel::channel::State; use crate::core::ics04_channel::channel::{ChannelEnd, Counterparty, Order}; use crate::core::ics04_channel::events::TimeoutPacket; @@ -21,7 +22,10 @@ pub struct TimeoutPacketResult { pub channel: Option, } -pub fn process(ctx: &dyn ChannelReader, msg: &MsgTimeout) -> HandlerResult { +pub fn process( + ctx: &dyn ChannelReader, + msg: &MsgTimeout, +) -> HandlerResult { let mut output = HandlerOutput::builder(); let packet = &msg.packet; @@ -95,7 +99,7 @@ pub fn process(ctx: &dyn ChannelReader, msg: &MsgTimeout) -> HandlerResult( ctx, msg.proofs.height(), &connection_end, @@ -112,7 +116,7 @@ pub fn process(ctx: &dyn ChannelReader, msg: &MsgTimeout) -> HandlerResult( ctx, msg.proofs.height(), &connection_end, diff --git a/modules/src/core/ics04_channel/handler/timeout_on_close.rs b/modules/src/core/ics04_channel/handler/timeout_on_close.rs index 04a7550ec6..ce76cfdd24 100644 --- a/modules/src/core/ics04_channel/handler/timeout_on_close.rs +++ b/modules/src/core/ics04_channel/handler/timeout_on_close.rs @@ -1,3 +1,4 @@ +use crate::clients::ics11_beefy::client_def::BeefyLCStore; use crate::core::ics04_channel::channel::State; use crate::core::ics04_channel::channel::{ChannelEnd, Counterparty, Order}; use crate::core::ics04_channel::events::TimeoutOnClosePacket; @@ -14,7 +15,7 @@ use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; -pub fn process( +pub fn process( ctx: &dyn ChannelReader, msg: &MsgTimeoutOnClose, ) -> HandlerResult { @@ -73,7 +74,7 @@ pub fn process( source_channel_end.version().clone(), ); - verify_channel_proofs( + verify_channel_proofs::( ctx, msg.proofs.height(), &source_channel_end, @@ -89,7 +90,7 @@ pub fn process( msg.next_sequence_recv, )); } - verify_next_sequence_recv( + verify_next_sequence_recv::( ctx, msg.proofs.height(), &connection_end, @@ -105,7 +106,7 @@ pub fn process( channel: Some(source_channel_end), }) } else { - verify_packet_receipt_absence( + verify_packet_receipt_absence::( ctx, msg.proofs.height(), &connection_end, diff --git a/modules/src/core/ics04_channel/handler/verify.rs b/modules/src/core/ics04_channel/handler/verify.rs index 96735f4938..9a91032353 100644 --- a/modules/src/core/ics04_channel/handler/verify.rs +++ b/modules/src/core/ics04_channel/handler/verify.rs @@ -1,3 +1,4 @@ +use crate::clients::ics11_beefy::client_def::BeefyLCStore; use crate::core::ics02_client::client_consensus::ConsensusState; use crate::core::ics02_client::client_state::ClientState; use crate::core::ics02_client::{client_def::AnyClient, client_def::ClientDef}; @@ -12,7 +13,7 @@ use crate::proofs::Proofs; use crate::Height; /// Entry point for verifying all proofs bundled in any ICS4 message for channel protocols. -pub fn verify_channel_proofs( +pub fn verify_channel_proofs( ctx: &dyn ChannelReader, height: Height, channel_end: &ChannelEnd, @@ -32,7 +33,7 @@ pub fn verify_channel_proofs( let consensus_state = ctx.client_consensus_state(&client_id, proofs.height())?; - let client_def = AnyClient::from_client_type(client_state.client_type()); + let client_def = AnyClient::::from_client_type(client_state.client_type()); // Verify the proof for the channel state against the expected channel end. // A counterparty channel id of None in not possible, and is checked by validate_basic in msg. @@ -51,7 +52,7 @@ pub fn verify_channel_proofs( } /// Entry point for verifying all proofs bundled in a ICS4 packet recv. message. -pub fn verify_packet_recv_proofs( +pub fn verify_packet_recv_proofs( ctx: &dyn ChannelReader, height: Height, packet: &Packet, @@ -68,7 +69,7 @@ pub fn verify_packet_recv_proofs( let consensus_state = ctx.client_consensus_state(client_id, proofs.height())?; - let client_def = AnyClient::from_client_type(client_state.client_type()); + let client_def = AnyClient::::from_client_type(client_state.client_type()); let commitment = ctx.packet_commitment( packet.data.clone(), @@ -96,7 +97,7 @@ pub fn verify_packet_recv_proofs( } /// Entry point for verifying all proofs bundled in an ICS4 packet ack message. -pub fn verify_packet_acknowledgement_proofs( +pub fn verify_packet_acknowledgement_proofs( ctx: &dyn ChannelReader, height: Height, packet: &Packet, @@ -116,7 +117,7 @@ pub fn verify_packet_acknowledgement_proofs( let ack_commitment = ctx.ack_commitment(acknowledgement); - let client_def = AnyClient::from_client_type(client_state.client_type()); + let client_def = AnyClient::::from_client_type(client_state.client_type()); // Verify the proof for the packet against the chain store. client_def @@ -138,7 +139,7 @@ pub fn verify_packet_acknowledgement_proofs( } /// Entry point for verifying all timeout proofs. -pub fn verify_next_sequence_recv( +pub fn verify_next_sequence_recv( ctx: &dyn ChannelReader, height: Height, connection_end: &ConnectionEnd, @@ -156,7 +157,7 @@ pub fn verify_next_sequence_recv( let consensus_state = ctx.client_consensus_state(client_id, proofs.height())?; - let client_def = AnyClient::from_client_type(client_state.client_type()); + let client_def = AnyClient::::from_client_type(client_state.client_type()); // Verify the proof for the packet against the chain store. client_def @@ -176,7 +177,7 @@ pub fn verify_next_sequence_recv( Ok(()) } -pub fn verify_packet_receipt_absence( +pub fn verify_packet_receipt_absence( ctx: &dyn ChannelReader, height: Height, connection_end: &ConnectionEnd, @@ -193,7 +194,7 @@ pub fn verify_packet_receipt_absence( let consensus_state = ctx.client_consensus_state(client_id, proofs.height())?; - let client_def = AnyClient::from_client_type(client_state.client_type()); + let client_def = AnyClient::::from_client_type(client_state.client_type()); // Verify the proof for the packet against the chain store. client_def diff --git a/modules/src/core/ics24_host/identifier.rs b/modules/src/core/ics24_host/identifier.rs index 21f53b9363..efe9a8f613 100644 --- a/modules/src/core/ics24_host/identifier.rs +++ b/modules/src/core/ics24_host/identifier.rs @@ -171,7 +171,7 @@ impl ClientId { pub fn prefix(client_type: ClientType) -> &'static str { match client_type { ClientType::Tendermint => ClientType::Tendermint.as_str(), - + ClientType::Beefy => ClientType::Beefy.as_str(), #[cfg(any(test, feature = "mocks"))] ClientType::Mock => ClientType::Mock.as_str(), } diff --git a/modules/src/core/ics26_routing/handler.rs b/modules/src/core/ics26_routing/handler.rs index b5d5362ec5..03ca610512 100644 --- a/modules/src/core/ics26_routing/handler.rs +++ b/modules/src/core/ics26_routing/handler.rs @@ -2,6 +2,8 @@ use crate::prelude::*; use ibc_proto::google::protobuf::Any; +use crate::applications::ics20_fungible_token_transfer::relay_application_logic::send_transfer::send_transfer as ics20_msg_dispatcher; +use crate::clients::ics11_beefy::client_def::BeefyLCStore; use crate::core::ics02_client::handler::dispatch as ics2_msg_dispatcher; use crate::core::ics03_connection::handler::dispatch as ics3_msg_dispatcher; use crate::core::ics04_channel::handler::{ @@ -30,15 +32,19 @@ pub struct MsgReceipt { /// Mimics the DeliverTx ABCI interface, but for a single message and at a slightly lower level. /// No need for authentication info or signature checks here. /// Returns a vector of all events that got generated as a byproduct of processing `message`. -pub fn deliver(ctx: &mut Ctx, message: Any) -> Result +pub fn deliver( + ctx: &mut Ctx, + message: Any, +) -> Result<(Vec, Vec), Error> where Ctx: Ics26Context, + Beefy: BeefyLCStore, { // Decode the proto message into a domain message, creating an ICS26 envelope. let envelope = decode(message)?; // Process the envelope, and accumulate any events that were generated. - let HandlerOutput { log, events, .. } = dispatch(ctx, envelope)?; + let output = dispatch::<_, Beefy>(ctx, envelope)?; Ok(MsgReceipt { events, log }) } @@ -53,13 +59,15 @@ pub fn decode(message: Any) -> Result { /// and events produced after processing the input `msg`. /// If this method returns an error, the runtime is expected to rollback all state modifications to /// the `Ctx` caused by all messages from the transaction that this `msg` is a part of. -pub fn dispatch(ctx: &mut Ctx, msg: Ics26Envelope) -> Result, Error> +pub fn dispatch(ctx: &mut Ctx, msg: Ics26Envelope) -> Result, Error> where Ctx: Ics26Context, + Beefy: BeefyLCStore, { let output = match msg { Ics2Msg(msg) => { - let handler_output = ics2_msg_dispatcher(ctx, msg).map_err(Error::ics02_client)?; + let handler_output = + ics2_msg_dispatcher::<_, Beefy>(ctx, msg).map_err(Error::ics02_client)?; // Apply the result to the context (host chain store). ctx.store_client_result(handler_output.result) @@ -72,7 +80,8 @@ where } Ics3Msg(msg) => { - let handler_output = ics3_msg_dispatcher(ctx, msg).map_err(Error::ics03_connection)?; + let handler_output = + ics3_msg_dispatcher::<_, Beefy>(ctx, msg).map_err(Error::ics03_connection)?; // Apply any results to the host chain store. ctx.store_connection_result(handler_output.result) @@ -87,7 +96,7 @@ where Ics4ChannelMsg(msg) => { let module_id = ics4_validate(ctx, &msg).map_err(Error::ics04_channel)?; let (mut handler_builder, channel_result) = - ics4_msg_dispatcher(ctx, &msg).map_err(Error::ics04_channel)?; + ics4_msg_dispatcher::<_, Beefy>(ctx, &msg).map_err(Error::ics04_channel)?; let mut module_output = ModuleOutputBuilder::new(); let cb_result = @@ -105,7 +114,7 @@ where Ics4PacketMsg(msg) => { let module_id = get_module_for_packet_msg(ctx, &msg).map_err(Error::ics04_channel)?; let (mut handler_builder, packet_result) = - ics4_packet_msg_dispatcher(ctx, &msg).map_err(Error::ics04_channel)?; + ics4_packet_msg_dispatcher::<_, Beefy>(ctx, &msg).map_err(Error::ics04_channel)?; if matches!(packet_result, PacketResult::Recv(RecvPacketResult::NoOp)) { return Ok(handler_builder.with_result(())); diff --git a/modules/src/mock/client_def.rs b/modules/src/mock/client_def.rs index 427d7f3e29..413de852f5 100644 --- a/modules/src/mock/client_def.rs +++ b/modules/src/mock/client_def.rs @@ -1,7 +1,7 @@ use ibc_proto::ibc::core::commitment::v1::MerkleProof; use crate::core::ics02_client::client_consensus::AnyConsensusState; -use crate::core::ics02_client::client_def::ClientDef; +use crate::core::ics02_client::client_def::{ClientDef, ConsensusUpdateResult}; use crate::core::ics02_client::client_state::AnyClientState; use crate::core::ics02_client::context::ClientReader; use crate::core::ics02_client::error::Error; @@ -30,22 +30,23 @@ impl ClientDef for MockClient { type ClientState = MockClientState; type ConsensusState = MockConsensusState; - fn check_header_and_update_state( + fn update_state( &self, _ctx: &dyn ClientReader, _client_id: ClientId, client_state: Self::ClientState, header: Self::Header, - ) -> Result<(Self::ClientState, Self::ConsensusState), Error> { + ) -> Result<(Self::ClientState, ConsensusUpdateResult), Error> { if client_state.latest_height() >= header.height() { return Err(Error::low_header_height( header.height(), client_state.latest_height(), )); } + Ok(( MockClientState::new(header), - MockConsensusState::new(header), + ConsensusUpdateResult::Single(AnyConsensusState::Mock(MockConsensusState::new(header))), )) } @@ -180,7 +181,38 @@ impl ClientDef for MockClient { consensus_state: &Self::ConsensusState, _proof_upgrade_client: MerkleProof, _proof_upgrade_consensus_state: MerkleProof, - ) -> Result<(Self::ClientState, Self::ConsensusState), Error> { - Ok((*client_state, consensus_state.clone())) + ) -> Result<(Self::ClientState, ConsensusUpdateResult), Error> { + Ok(( + *client_state, + ConsensusUpdateResult::Single(AnyConsensusState::Mock(consensus_state.clone())), + )) + } + + fn verify_header( + &self, + _ctx: &dyn ClientReader, + _client_id: ClientId, + _client_state: Self::ClientState, + _header: Self::Header, + ) -> Result<(), Error> { + Ok(()) + } + + fn update_state_on_misbehaviour( + &self, + client_state: Self::ClientState, + _header: Self::Header, + ) -> Result { + Ok(client_state) + } + + fn check_for_misbehaviour( + &self, + _ctx: &dyn ClientReader, + _client_id: ClientId, + _client_state: Self::ClientState, + _header: Self::Header, + ) -> Result { + Ok(false) } } diff --git a/modules/src/relayer/ics18_relayer/utils.rs b/modules/src/relayer/ics18_relayer/utils.rs index 34cd79cd7a..51c0680b62 100644 --- a/modules/src/relayer/ics18_relayer/utils.rs +++ b/modules/src/relayer/ics18_relayer/utils.rs @@ -1,4 +1,4 @@ -use crate::core::ics02_client::header::{AnyHeader, Header}; +use crate::core::ics02_client::header::AnyHeader; use crate::core::ics02_client::msgs::update_client::MsgUpdateAnyClient; use crate::core::ics02_client::msgs::ClientMsg; use crate::core::ics24_host::identifier::ClientId; diff --git a/proto-compiler/src/cmd/clone.rs b/proto-compiler/src/cmd/clone.rs index 76556f5610..ce8ff132d6 100644 --- a/proto-compiler/src/cmd/clone.rs +++ b/proto-compiler/src/cmd/clone.rs @@ -26,7 +26,7 @@ pub struct CloneCmd { } pub const COSMOS_SDK_URL: &str = "https://github.com/cosmos/cosmos-sdk"; -pub const IBC_GO_URL: &str = "https://github.com/cosmos/ibc-go"; +pub const IBC_GO_URL: &str = "https://github.com/ComposableFi/ibc-go.git"; impl CloneCmd { pub fn validate(&self) { diff --git a/proto/src/IBC_GO_COMMIT b/proto/src/IBC_GO_COMMIT index 28c3c08558..96598685c4 100644 --- a/proto/src/IBC_GO_COMMIT +++ b/proto/src/IBC_GO_COMMIT @@ -1 +1 @@ -55344184b3a5a5eb2ad2a96a7b0f715a210494f9 +2923e0a7b627a0437ce6eea805120f1c31e3b525 diff --git a/scripts/sync-protobuf.sh b/scripts/sync-protobuf.sh index abb8b1ab94..d2143b642b 100755 --- a/scripts/sync-protobuf.sh +++ b/scripts/sync-protobuf.sh @@ -58,7 +58,7 @@ fi if [[ ! -e "$IBC_GO_GIT" ]] then echo "Cloning ibc-go source code to as bare git repository to $IBC_GO_GIT" - git clone --mirror https://github.com/cosmos/ibc-go.git "$IBC_GO_GIT" + git clone --mirror https://github.com/ComposableFi/ibc-go.git "$IBC_GO_GIT" else echo "Using existing ibc-go bare git repository at $IBC_GO_GIT" fi From 0ce15f3991b283ce7ac01daad0a16b44b1449c40 Mon Sep 17 00:00:00 2001 From: David Salami Date: Wed, 11 May 2022 10:19:32 +0100 Subject: [PATCH 06/96] store parachain latest heights --- modules/src/clients/ics11_beefy/client_def.rs | 22 +++++++++---------- modules/src/core/ics02_client/client_def.rs | 4 ++-- modules/src/core/ics02_client/context.rs | 9 -------- .../ics02_client/handler/update_client.rs | 2 +- modules/src/mock/client_def.rs | 6 ++--- modules/src/mock/context.rs | 2 ++ modules/src/mock/header.rs | 12 ++++------ 7 files changed, 22 insertions(+), 35 deletions(-) diff --git a/modules/src/clients/ics11_beefy/client_def.rs b/modules/src/clients/ics11_beefy/client_def.rs index 7f54c00641..f0c7149ad5 100644 --- a/modules/src/clients/ics11_beefy/client_def.rs +++ b/modules/src/clients/ics11_beefy/client_def.rs @@ -32,6 +32,7 @@ use crate::core::ics24_host::identifier::{ChannelId, ClientId, PortId}; use crate::core::ics24_host::Path; use crate::prelude::*; use crate::Height; +use alloc::collections::BTreeMap; use crate::core::ics24_host::path::{ AcksPath, ChannelEndsPath, ClientConsensusStatePath, ClientStatePath, CommitmentsPath, @@ -131,8 +132,13 @@ impl ClientDef for BeefyClient { client_state: Self::ClientState, header: Self::Header, ) -> Result<(Self::ClientState, ConsensusUpdateResult), Error> { + let mut parachain_latest_heights = BTreeMap::new(); let mut parachain_cs_states = vec![]; for header in header.parachain_headers { + let parachain_height = parachain_latest_heights.entry(header.para_id).or_default(); + if header.parachain_header.number > *parachain_height { + *parachain_height = header.parachain_header.number; + } let height = Height::new(header.para_id as u64, header.parachain_header.number as u64); // Skip duplicate consensus states if let Ok(_) = ctx.consensus_state(&client_id, height) { @@ -144,17 +150,11 @@ impl ClientDef for BeefyClient { )) } - let best_cs_state = if let Some(cs_state) = last_seen_cs { - cs_state - } else { - trusted_consensus_state - }; - - let best_para_height = if let Some(best_height) = last_seen_height { - best_height - } else { - latest_para_height - }; + let parachain_latest_heights = parachain_latest_heights + .into_iter() + .map(|(para_id, height)| (para_id as u64, height as u64)) + .collect(); + Store::store_latest_parachains_height(parachain_latest_heights)?; let mmr_state = self .store .mmr_state() diff --git a/modules/src/core/ics02_client/client_def.rs b/modules/src/core/ics02_client/client_def.rs index 0d75a10e6f..86b44ab769 100644 --- a/modules/src/core/ics02_client/client_def.rs +++ b/modules/src/core/ics02_client/client_def.rs @@ -339,7 +339,7 @@ impl ClientDef for AnyClient { Ok(Self::ClientState::Beefy(client_state)) } #[cfg(any(test, feature = "mocks"))] - AnyClient::Mock(_) => { + AnyClient::Mock(client) => { let (client_state, header) = downcast!( client_state => AnyClientState::Mock, header => AnyHeader::Mock, @@ -379,7 +379,7 @@ impl ClientDef for AnyClient { client.check_for_misbehaviour(ctx, client_id, client_state, header) } #[cfg(any(test, feature = "mocks"))] - AnyClient::Mock(_) => { + AnyClient::Mock(client ) => { let (client_state, header) = downcast!( client_state => AnyClientState::Mock, header => AnyHeader::Mock, diff --git a/modules/src/core/ics02_client/context.rs b/modules/src/core/ics02_client/context.rs index cc808c020c..2d9075187d 100644 --- a/modules/src/core/ics02_client/context.rs +++ b/modules/src/core/ics02_client/context.rs @@ -200,15 +200,6 @@ pub trait ClientKeeper { consensus_state: AnyConsensusState, ) -> Result<(), Error>; - /// Called upon successful client creation and update for beefy light client - fn store_parachain_consensus_state( - &mut self, - client_id: ClientId, - para_id: u64, - height: Height, - consensus_state: AnyConsensusState, - ) -> Result<(), Error>; - /// Called upon client creation. /// Increases the counter which keeps track of how many clients have been created. /// Should never fail. diff --git a/modules/src/core/ics02_client/handler/update_client.rs b/modules/src/core/ics02_client/handler/update_client.rs index 1e445c5cfe..0f7020aae7 100644 --- a/modules/src/core/ics02_client/handler/update_client.rs +++ b/modules/src/core/ics02_client/handler/update_client.rs @@ -137,7 +137,7 @@ mod tests { use crate::core::ics02_client::error::{Error, ErrorDetail}; use crate::core::ics02_client::handler::dispatch; use crate::core::ics02_client::handler::ClientResult::Update; - use crate::core::ics02_client::header::{AnyHeader, Header}; + use crate::core::ics02_client::header::AnyHeader; use crate::core::ics02_client::msgs::update_client::MsgUpdateAnyClient; use crate::core::ics02_client::msgs::ClientMsg; use crate::core::ics24_host::identifier::{ChainId, ClientId}; diff --git a/modules/src/mock/client_def.rs b/modules/src/mock/client_def.rs index 413de852f5..5c7f3eab1c 100644 --- a/modules/src/mock/client_def.rs +++ b/modules/src/mock/client_def.rs @@ -1,5 +1,3 @@ -use ibc_proto::ibc::core::commitment::v1::MerkleProof; - use crate::core::ics02_client::client_consensus::AnyConsensusState; use crate::core::ics02_client::client_def::{ClientDef, ConsensusUpdateResult}; use crate::core::ics02_client::client_state::AnyClientState; @@ -179,8 +177,8 @@ impl ClientDef for MockClient { &self, client_state: &Self::ClientState, consensus_state: &Self::ConsensusState, - _proof_upgrade_client: MerkleProof, - _proof_upgrade_consensus_state: MerkleProof, + _proof_upgrade_client: Vec, + _proof_upgrade_consensus_state: Vec, ) -> Result<(Self::ClientState, ConsensusUpdateResult), Error> { Ok(( *client_state, diff --git a/modules/src/mock/context.rs b/modules/src/mock/context.rs index 4c59475803..fdc1f5e7e9 100644 --- a/modules/src/mock/context.rs +++ b/modules/src/mock/context.rs @@ -1092,6 +1092,7 @@ impl ClientReader for MockContext { &self, client_id: &ClientId, height: Height, + _filter_fn: Option bool>> ) -> Result, Ics02Error> { let ibc_store = self.ibc_store.lock().unwrap(); let client_record = ibc_store @@ -1120,6 +1121,7 @@ impl ClientReader for MockContext { &self, client_id: &ClientId, height: Height, + _filter_fn: Option bool>> ) -> Result, Ics02Error> { let ibc_store = self.ibc_store.lock().unwrap(); let client_record = ibc_store diff --git a/modules/src/mock/header.rs b/modules/src/mock/header.rs index 381975ae48..cc3c38b092 100644 --- a/modules/src/mock/header.rs +++ b/modules/src/mock/header.rs @@ -54,6 +54,10 @@ impl MockHeader { } } + pub fn timestamp(&self) -> Timestamp { + self.timestamp + } + pub fn with_timestamp(self, timestamp: Timestamp) -> Self { Self { timestamp, ..self } } @@ -70,14 +74,6 @@ impl Header for MockHeader { ClientType::Mock } - fn height(&self) -> Height { - self.height - } - - fn timestamp(&self) -> Timestamp { - self.timestamp - } - fn wrap_any(self) -> AnyHeader { AnyHeader::Mock(self) } From 2fdbe63797f28fd14f11ee806695be6e7cd0e03c Mon Sep 17 00:00:00 2001 From: David Salami Date: Thu, 12 May 2022 08:49:36 +0100 Subject: [PATCH 07/96] refactor client trait definition --- .gitignore | 3 + .../clients/ics07_tendermint/client_def.rs | 12 ++ modules/src/clients/ics11_beefy/client_def.rs | 167 +++++++++--------- .../src/clients/ics11_beefy/client_state.rs | 63 ++++--- modules/src/core/ics02_client/client_def.rs | 67 ++++++- modules/src/core/ics02_client/handler.rs | 4 +- .../ics02_client/handler/update_client.rs | 4 +- .../ics02_client/handler/upgrade_client.rs | 4 +- modules/src/core/ics03_connection/handler.rs | 4 +- .../ics03_connection/handler/conn_open_ack.rs | 4 +- .../handler/conn_open_confirm.rs | 4 +- .../ics03_connection/handler/conn_open_try.rs | 4 +- .../core/ics03_connection/handler/verify.rs | 14 +- modules/src/core/ics04_channel/handler.rs | 6 +- .../ics04_channel/handler/acknowledgement.rs | 4 +- .../handler/chan_close_confirm.rs | 4 +- .../ics04_channel/handler/chan_open_ack.rs | 4 +- .../handler/chan_open_confirm.rs | 4 +- .../ics04_channel/handler/chan_open_try.rs | 4 +- .../core/ics04_channel/handler/recv_packet.rs | 4 +- .../src/core/ics04_channel/handler/timeout.rs | 4 +- .../ics04_channel/handler/timeout_on_close.rs | 4 +- .../src/core/ics04_channel/handler/verify.rs | 18 +- modules/src/core/ics26_routing/handler.rs | 6 +- modules/src/mock/client_def.rs | 11 ++ modules/src/mock/context.rs | 4 +- 26 files changed, 263 insertions(+), 168 deletions(-) diff --git a/.gitignore b/.gitignore index 2e9b0e8816..13a9c9349b 100644 --- a/.gitignore +++ b/.gitignore @@ -2,6 +2,9 @@ # will have compiled files and executables target/ +# Ignore cargo patch +.cargo + # These are backup files generated by rustfmt **/*.rs.bk diff --git a/modules/src/clients/ics07_tendermint/client_def.rs b/modules/src/clients/ics07_tendermint/client_def.rs index cb23019ce6..7a8555cfbc 100644 --- a/modules/src/clients/ics07_tendermint/client_def.rs +++ b/modules/src/clients/ics07_tendermint/client_def.rs @@ -17,6 +17,7 @@ use crate::core::ics02_client::client_type::ClientType; use crate::core::ics02_client::context::ClientReader; use crate::core::ics02_client::error::Error as Ics02Error; use crate::core::ics03_connection::connection::ConnectionEnd; +use crate::core::ics03_connection::context::ConnectionReader; use crate::core::ics04_channel::channel::ChannelEnd; use crate::core::ics04_channel::commitment::{AcknowledgementCommitment, PacketCommitment}; use crate::core::ics04_channel::context::ChannelReader; @@ -226,6 +227,7 @@ impl ClientDef for TendermintClient { fn verify_client_consensus_state( &self, + _ctx: &dyn ConnectionReader, client_state: &Self::ClientState, height: Height, prefix: &CommitmentPrefix, @@ -250,6 +252,8 @@ impl ClientDef for TendermintClient { fn verify_connection_state( &self, + _ctx: &dyn ConnectionReader, + _client_id: &ClientId, client_state: &Self::ClientState, height: Height, prefix: &CommitmentPrefix, @@ -269,6 +273,8 @@ impl ClientDef for TendermintClient { fn verify_channel_state( &self, + _ctx: &dyn ChannelReader, + _client_id: &ClientId, client_state: &Self::ClientState, height: Height, prefix: &CommitmentPrefix, @@ -289,6 +295,7 @@ impl ClientDef for TendermintClient { fn verify_client_full_state( &self, + _ctx: &dyn ConnectionReader, client_state: &Self::ClientState, height: Height, prefix: &CommitmentPrefix, @@ -309,6 +316,7 @@ impl ClientDef for TendermintClient { fn verify_packet_data( &self, ctx: &dyn ChannelReader, + _client_id: &ClientId, client_state: &Self::ClientState, height: Height, connection_end: &ConnectionEnd, @@ -341,6 +349,7 @@ impl ClientDef for TendermintClient { fn verify_packet_acknowledgement( &self, ctx: &dyn ChannelReader, + _client_id: &ClientId, client_state: &Self::ClientState, height: Height, connection_end: &ConnectionEnd, @@ -351,6 +360,7 @@ impl ClientDef for TendermintClient { sequence: Sequence, ack_commitment: AcknowledgementCommitment, ) -> Result<(), Ics02Error> { + // client state height = consensus state height client_state.verify_height(height)?; verify_delay_passed(ctx, height, connection_end)?; @@ -372,6 +382,7 @@ impl ClientDef for TendermintClient { fn verify_next_sequence_recv( &self, ctx: &dyn ChannelReader, + _client_id: &ClientId, client_state: &Self::ClientState, height: Height, connection_end: &ConnectionEnd, @@ -403,6 +414,7 @@ impl ClientDef for TendermintClient { fn verify_packet_receipt_absence( &self, ctx: &dyn ChannelReader, + _client_id: &ClientId, client_state: &Self::ClientState, height: Height, connection_end: &ConnectionEnd, diff --git a/modules/src/clients/ics11_beefy/client_def.rs b/modules/src/clients/ics11_beefy/client_def.rs index f0c7149ad5..37efab6f45 100644 --- a/modules/src/clients/ics11_beefy/client_def.rs +++ b/modules/src/clients/ics11_beefy/client_def.rs @@ -1,5 +1,5 @@ use beefy_client::primitives::{ParachainHeader, ParachainsUpdateProof}; -use beefy_client::traits::{HostFunctions, StorageRead, StorageWrite}; +use beefy_client::traits::{ClientState as LightClientState, HostFunctions as BeefyHostFunctions}; use beefy_client::BeefyLightClient; use codec::Encode; use core::convert::TryInto; @@ -18,6 +18,7 @@ use crate::core::ics02_client::client_type::ClientType; use crate::core::ics02_client::context::ClientReader; use crate::core::ics02_client::error::Error; use crate::core::ics03_connection::connection::ConnectionEnd; +use crate::core::ics03_connection::context::ConnectionReader; use crate::core::ics04_channel::channel::ChannelEnd; use crate::core::ics04_channel::commitment::{AcknowledgementCommitment, PacketCommitment}; use crate::core::ics04_channel::context::ChannelReader; @@ -32,7 +33,6 @@ use crate::core::ics24_host::identifier::{ChannelId, ClientId, PortId}; use crate::core::ics24_host::Path; use crate::prelude::*; use crate::Height; -use alloc::collections::BTreeMap; use crate::core::ics24_host::path::{ AcksPath, ChannelEndsPath, ClientConsensusStatePath, ClientStatePath, CommitmentsPath, @@ -41,7 +41,7 @@ use crate::core::ics24_host::path::{ use crate::downcast; /// Methods definitions specific to Beefy Light Client operation -pub trait BeefyLCStore: StorageRead + StorageWrite + HostFunctions + Clone + Default { +pub trait BeefyTraits: BeefyHostFunctions + Clone + Default { /// This function should verify membership in a trie proof using parity's sp-trie package /// with a BlakeTwo256 Hasher fn verify_membership_trie_proof( @@ -57,16 +57,12 @@ pub trait BeefyLCStore: StorageRead + StorageWrite + HostFunctions + Clone + Def proof: &Vec>, key: &[u8], ) -> Result<(), Error>; - fn store_latest_parachains_height(para_id_and_heights: Vec<(u64, u64)>) -> Result<(), Error>; - fn get_parachain_latest_height(para_id: u64) -> Result; } #[derive(Clone, Debug, Default, PartialEq, Eq)] -pub struct BeefyClient { - store: Store, -} +pub struct BeefyClient(core::marker::PhantomData); -impl ClientDef for BeefyClient { +impl ClientDef for BeefyClient { type Header = BeefyHeader; type ClientState = ClientState; type ConsensusState = ConsensusState; @@ -78,12 +74,21 @@ impl ClientDef for BeefyClient { client_state: Self::ClientState, header: Self::Header, ) -> Result<(), Error> { - let mut light_client = BeefyLightClient::<_, Store>::new(self.store.clone()); - if let Some(mmr_update) = header.mmr_update_proof { + let light_client_state = LightClientState { + latest_beefy_height: client_state.latest_beefy_height, + mmr_root_hash: client_state.mmr_root_hash, + current_authorities: client_state.authority.clone(), + next_authorities: client_state.next_authority_set.clone(), + }; + let mut light_client = BeefyLightClient::::new(); + // If mmr update exists verify it and return the new light client state + let light_client_state = if let Some(mmr_update) = header.mmr_update_proof { light_client - .ingest_mmr_root_with_proof(mmr_update) - .map_err(|e| Error::beefy(BeefyError::invalid_mmr_update(format!("{:?}", e))))?; - } + .ingest_mmr_root_with_proof(light_client_state, mmr_update) + .map_err(|e| Error::beefy(BeefyError::invalid_mmr_update(format!("{:?}", e))))? + } else { + light_client_state + }; let mut leaf_indices = vec![]; let parachain_headers = header @@ -105,7 +110,9 @@ impl ClientDef for BeefyClient { } }) .collect::>(); - let leaf_count = (client_state.to_leaf_index(client_state.latest_beefy_height) + 1) as u64; + + let leaf_count = + (client_state.to_leaf_index(light_client_state.latest_beefy_height) + 1) as u64; let parachain_update_proof = ParachainsUpdateProof { parachain_headers, @@ -121,7 +128,7 @@ impl ClientDef for BeefyClient { }; light_client - .verify_parachain_headers(parachain_update_proof) + .verify_parachain_headers(light_client_state, parachain_update_proof) .map_err(|e| Error::beefy(BeefyError::invalid_mmr_update(format!("{:?}", e)))) } @@ -132,13 +139,11 @@ impl ClientDef for BeefyClient { client_state: Self::ClientState, header: Self::Header, ) -> Result<(Self::ClientState, ConsensusUpdateResult), Error> { - let mut parachain_latest_heights = BTreeMap::new(); let mut parachain_cs_states = vec![]; + let client_state = client_state + .from_header(header.clone()) + .map_err(|e| Error::beefy(e))?; for header in header.parachain_headers { - let parachain_height = parachain_latest_heights.entry(header.para_id).or_default(); - if header.parachain_header.number > *parachain_height { - *parachain_height = header.parachain_header.number; - } let height = Height::new(header.para_id as u64, header.parachain_header.number as u64); // Skip duplicate consensus states if let Ok(_) = ctx.consensus_state(&client_id, height) { @@ -150,22 +155,6 @@ impl ClientDef for BeefyClient { )) } - let parachain_latest_heights = parachain_latest_heights - .into_iter() - .map(|(para_id, height)| (para_id as u64, height as u64)) - .collect(); - Store::store_latest_parachains_height(parachain_latest_heights)?; - let mmr_state = self - .store - .mmr_state() - .map_err(|e| Error::beefy(BeefyError::implementation_specific(format!("{:?}", e))))?; - let authorities = self - .store - .authority_set() - .map_err(|e| Error::beefy(BeefyError::implementation_specific(format!("{:?}", e))))?; - - let client_state = client_state.with_updates(mmr_state, authorities); - Ok(( client_state, ConsensusUpdateResult::Batch(parachain_cs_states), @@ -175,14 +164,18 @@ impl ClientDef for BeefyClient { fn update_state_on_misbehaviour( &self, client_state: Self::ClientState, - _header: Self::Header, + header: Self::Header, ) -> Result { - let mmr_state = self - .store - .mmr_state() - .map_err(|_| Error::beefy(BeefyError::missing_latest_height()))?; + let height = if let Some(mmr_update) = header.mmr_update_proof { + Height::new( + 0, + mmr_update.signed_commitment.commitment.block_number as u64, + ) + } else { + Height::new(0, client_state.latest_beefy_height as u64) + }; client_state - .with_frozen_height(Height::new(0, mmr_state.latest_beefy_height as u64)) + .with_frozen_height(height) .map_err(|e| Error::beefy(BeefyError::implementation_specific(e.to_string()))) } @@ -198,7 +191,8 @@ impl ClientDef for BeefyClient { fn verify_client_consensus_state( &self, - client_state: &Self::ClientState, + ctx: &dyn ConnectionReader, + _client_state: &Self::ClientState, height: Height, prefix: &CommitmentPrefix, proof: &CommitmentProofBytes, @@ -207,9 +201,8 @@ impl ClientDef for BeefyClient { consensus_height: Height, expected_consensus_state: &AnyConsensusState, ) -> Result<(), Error> { - client_state - .verify_parachain_height::(height) - .map_err(|e| Error::beefy(e))?; + ctx.client_consensus_state(client_id, height) + .map_err(|_| Error::consensus_state_not_found(client_id.clone(), height))?; let path = ClientConsensusStatePath { client_id: client_id.clone(), @@ -217,12 +210,14 @@ impl ClientDef for BeefyClient { height: consensus_height.revision_height, }; let value = expected_consensus_state.encode_vec().unwrap(); - verify_membership::(prefix, proof, root, path, value) + verify_membership::(prefix, proof, root, path, value) } fn verify_connection_state( &self, - client_state: &Self::ClientState, + ctx: &dyn ConnectionReader, + client_id: &ClientId, + _client_state: &Self::ClientState, height: Height, prefix: &CommitmentPrefix, proof: &CommitmentProofBytes, @@ -230,18 +225,19 @@ impl ClientDef for BeefyClient { connection_id: &ConnectionId, expected_connection_end: &ConnectionEnd, ) -> Result<(), Error> { - client_state - .verify_parachain_height::(height) - .map_err(|e| Error::beefy(e))?; + ctx.client_consensus_state(client_id, height) + .map_err(|_| Error::consensus_state_not_found(client_id.clone(), height))?; let path = ConnectionsPath(connection_id.clone()); let value = expected_connection_end.encode_vec().unwrap(); - verify_membership::(prefix, proof, root, path, value) + verify_membership::(prefix, proof, root, path, value) } fn verify_channel_state( &self, - client_state: &Self::ClientState, + ctx: &dyn ChannelReader, + client_id: &ClientId, + _client_state: &Self::ClientState, height: Height, prefix: &CommitmentPrefix, proof: &CommitmentProofBytes, @@ -250,18 +246,18 @@ impl ClientDef for BeefyClient { channel_id: &ChannelId, expected_channel_end: &ChannelEnd, ) -> Result<(), Error> { - client_state - .verify_parachain_height::(height) - .map_err(|e| Error::beefy(e))?; + ctx.client_consensus_state(client_id, height) + .map_err(|_| Error::consensus_state_not_found(client_id.clone(), height))?; let path = ChannelEndsPath(port_id.clone(), channel_id.clone()); let value = expected_channel_end.encode_vec().unwrap(); - verify_membership::(prefix, proof, root, path, value) + verify_membership::(prefix, proof, root, path, value) } fn verify_client_full_state( &self, - client_state: &Self::ClientState, + ctx: &dyn ConnectionReader, + _client_state: &Self::ClientState, height: Height, prefix: &CommitmentPrefix, proof: &CommitmentProofBytes, @@ -269,19 +265,19 @@ impl ClientDef for BeefyClient { client_id: &ClientId, expected_client_state: &AnyClientState, ) -> Result<(), Error> { - client_state - .verify_parachain_height::(height) - .map_err(|e| Error::beefy(e))?; + ctx.client_consensus_state(client_id, height) + .map_err(|_| Error::consensus_state_not_found(client_id.clone(), height))?; let path = ClientStatePath(client_id.clone()); let value = expected_client_state.encode_vec().unwrap(); - verify_membership::(prefix, proof, root, path, value) + verify_membership::(prefix, proof, root, path, value) } fn verify_packet_data( &self, ctx: &dyn ChannelReader, - client_state: &Self::ClientState, + client_id: &ClientId, + _client_state: &Self::ClientState, height: Height, connection_end: &ConnectionEnd, proof: &CommitmentProofBytes, @@ -291,9 +287,8 @@ impl ClientDef for BeefyClient { sequence: Sequence, commitment: PacketCommitment, ) -> Result<(), Error> { - client_state - .verify_parachain_height::(height) - .map_err(|e| Error::beefy(e))?; + ctx.client_consensus_state(client_id, height) + .map_err(|_| Error::consensus_state_not_found(client_id.clone(), height))?; verify_delay_passed(ctx, height, connection_end)?; let commitment_path = CommitmentsPath { @@ -302,7 +297,7 @@ impl ClientDef for BeefyClient { sequence, }; - verify_membership::( + verify_membership::( connection_end.counterparty().prefix(), proof, root, @@ -314,7 +309,8 @@ impl ClientDef for BeefyClient { fn verify_packet_acknowledgement( &self, ctx: &dyn ChannelReader, - client_state: &Self::ClientState, + client_id: &ClientId, + _client_state: &Self::ClientState, height: Height, connection_end: &ConnectionEnd, proof: &CommitmentProofBytes, @@ -324,9 +320,8 @@ impl ClientDef for BeefyClient { sequence: Sequence, ack: AcknowledgementCommitment, ) -> Result<(), Error> { - client_state - .verify_parachain_height::(height) - .map_err(|e| Error::beefy(e))?; + ctx.client_consensus_state(client_id, height) + .map_err(|_| Error::consensus_state_not_found(client_id.clone(), height))?; verify_delay_passed(ctx, height, connection_end)?; let ack_path = AcksPath { @@ -334,7 +329,7 @@ impl ClientDef for BeefyClient { channel_id: channel_id.clone(), sequence, }; - verify_membership::( + verify_membership::( connection_end.counterparty().prefix(), proof, root, @@ -346,7 +341,8 @@ impl ClientDef for BeefyClient { fn verify_next_sequence_recv( &self, ctx: &dyn ChannelReader, - client_state: &Self::ClientState, + client_id: &ClientId, + _client_state: &Self::ClientState, height: Height, connection_end: &ConnectionEnd, proof: &CommitmentProofBytes, @@ -355,15 +351,14 @@ impl ClientDef for BeefyClient { channel_id: &ChannelId, sequence: Sequence, ) -> Result<(), Error> { - client_state - .verify_parachain_height::(height) - .map_err(|e| Error::beefy(e))?; + ctx.client_consensus_state(client_id, height) + .map_err(|_| Error::consensus_state_not_found(client_id.clone(), height))?; verify_delay_passed(ctx, height, connection_end)?; let seq_bytes = codec::Encode::encode(&u64::from(sequence)); let seq_path = SeqRecvsPath(port_id.clone(), channel_id.clone()); - verify_membership::( + verify_membership::( connection_end.counterparty().prefix(), proof, root, @@ -375,7 +370,8 @@ impl ClientDef for BeefyClient { fn verify_packet_receipt_absence( &self, ctx: &dyn ChannelReader, - client_state: &Self::ClientState, + client_id: &ClientId, + _client_state: &Self::ClientState, height: Height, connection_end: &ConnectionEnd, proof: &CommitmentProofBytes, @@ -384,9 +380,8 @@ impl ClientDef for BeefyClient { channel_id: &ChannelId, sequence: Sequence, ) -> Result<(), Error> { - client_state - .verify_parachain_height::(height) - .map_err(|e| Error::beefy(e))?; + ctx.client_consensus_state(client_id, height) + .map_err(|_| Error::consensus_state_not_found(client_id.clone(), height))?; verify_delay_passed(ctx, height, connection_end)?; let receipt_path = ReceiptsPath { @@ -394,7 +389,7 @@ impl ClientDef for BeefyClient { channel_id: channel_id.clone(), sequence, }; - verify_non_membership::( + verify_non_membership::( connection_end.counterparty().prefix(), proof, root, @@ -413,7 +408,7 @@ impl ClientDef for BeefyClient { } } -fn verify_membership>( +fn verify_membership>( prefix: &CommitmentPrefix, proof: &CommitmentProofBytes, root: &CommitmentRoot, @@ -434,7 +429,7 @@ fn verify_membership>( Verifier::verify_membership_trie_proof(&root, &trie_proof, &key, &value) } -fn verify_non_membership>( +fn verify_non_membership>( prefix: &CommitmentPrefix, proof: &CommitmentProofBytes, root: &CommitmentRoot, @@ -494,5 +489,5 @@ pub fn downcast_consensus_state(cs: AnyConsensusState) -> Result AnyConsensusState::Beefy ) - .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Beefy)) + .ok_or(Error::client_args_type_mismatch(ClientType::Beefy)) } diff --git a/modules/src/clients/ics11_beefy/client_state.rs b/modules/src/clients/ics11_beefy/client_state.rs index fea32a4c50..5676500ea4 100644 --- a/modules/src/clients/ics11_beefy/client_state.rs +++ b/modules/src/clients/ics11_beefy/client_state.rs @@ -1,6 +1,6 @@ use crate::prelude::*; -use beefy_client::traits::{AuthoritySet, MmrState}; +use beefy_primitives::known_payload_ids::MMR_ROOT_ID; use beefy_primitives::mmr::BeefyNextAuthoritySet; use codec::{Decode, Encode}; use core::convert::TryFrom; @@ -10,10 +10,10 @@ use sp_core::H256; use sp_runtime::SaturatedConversion; use tendermint_proto::Protobuf; -use crate::clients::ics11_beefy::client_def::BeefyLCStore; use ibc_proto::ibc::lightclients::beefy::v1::{BeefyAuthoritySet, ClientState as RawClientState}; use crate::clients::ics11_beefy::error::Error; +use crate::clients::ics11_beefy::header::BeefyHeader; use crate::core::ics02_client::client_state::AnyClientState; use crate::core::ics02_client::client_type::ClientType; use crate::core::ics24_host::identifier::ChainId; @@ -98,16 +98,43 @@ impl ClientState { self.beefy_activation_block - (block_number + 1) } - pub fn with_updates(&self, mmr_state: MmrState, authorities: AuthoritySet) -> Self { - let clone = self.clone(); - Self { - mmr_root_hash: mmr_state.mmr_root_hash, - latest_beefy_height: mmr_state.latest_beefy_height, - next_authority_set: authorities.next_authorities, - authority: authorities.current_authorities, - latest_para_height: Some(latest_para_height), - ..clone + /// Should only be called if this header has been verified successfully + pub fn from_header(self, header: BeefyHeader) -> Result { + let mut clone = self.clone(); + let mut authority_changed = false; + let (mmr_root_hash, latest_beefy_height, next_authority_set) = + if let Some(mmr_update) = header.mmr_update_proof { + if mmr_update.signed_commitment.commitment.validator_set_id + != self.next_authority_set.id + { + authority_changed = true; + } + ( + H256::from_slice( + mmr_update + .signed_commitment + .commitment + .payload + .get_raw(&MMR_ROOT_ID) + .ok_or(Error::invalid_raw_header())?, + ), + mmr_update.signed_commitment.commitment.block_number, + mmr_update.latest_mmr_leaf.beefy_next_authority_set, + ) + } else { + ( + self.mmr_root_hash, + self.latest_beefy_height, + self.next_authority_set, + ) + }; + clone.mmr_root_hash = mmr_root_hash; + clone.latest_beefy_height = latest_beefy_height; + if authority_changed { + clone.authority = clone.next_authority_set; + clone.next_authority_set = next_authority_set; } + Ok(clone) } /// Verify the time and height delays @@ -164,20 +191,6 @@ impl ClientState { _ => Ok(()), } } - - pub fn verify_parachain_height( - &self, - height: Height, - ) -> Result<(), Error> { - let para_id = height.revision_number; - let trusted_para_height = LCStore::get_parachain_latest_height(para_id) - .map_err(|e| Error::implementation_specific(e.to_string()))?; - let latest_para_height = Height::new(para_id, trusted_para_height); - if latest_para_height < height { - return Err(Error::insufficient_height(latest_para_height, height)); - } - Ok(()) - } } #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] diff --git a/modules/src/core/ics02_client/client_def.rs b/modules/src/core/ics02_client/client_def.rs index 86b44ab769..237e5f8e05 100644 --- a/modules/src/core/ics02_client/client_def.rs +++ b/modules/src/core/ics02_client/client_def.rs @@ -1,5 +1,5 @@ use crate::clients::ics07_tendermint::client_def::TendermintClient; -use crate::clients::ics11_beefy::client_def::{BeefyClient, BeefyLCStore}; +use crate::clients::ics11_beefy::client_def::{BeefyClient, BeefyTraits}; use crate::core::ics02_client::client_consensus::{AnyConsensusState, ConsensusState}; use crate::core::ics02_client::client_state::{AnyClientState, ClientState}; use crate::core::ics02_client::client_type::ClientType; @@ -7,6 +7,7 @@ use crate::core::ics02_client::context::ClientReader; use crate::core::ics02_client::error::Error; use crate::core::ics02_client::header::{AnyHeader, Header}; use crate::core::ics03_connection::connection::ConnectionEnd; +use crate::core::ics03_connection::context::ConnectionReader; use crate::core::ics04_channel::channel::ChannelEnd; use crate::core::ics04_channel::commitment::{AcknowledgementCommitment, PacketCommitment}; use crate::core::ics04_channel::context::ChannelReader; @@ -43,8 +44,8 @@ pub trait ClientDef: Clone { fn update_state( &self, - _ctx: &dyn ClientReader, - _client_id: ClientId, + ctx: &dyn ClientReader, + client_id: ClientId, client_state: Self::ClientState, header: Self::Header, ) -> Result<(Self::ClientState, ConsensusUpdateResult), Error>; @@ -82,6 +83,7 @@ pub trait ClientDef: Clone { #[allow(clippy::too_many_arguments)] fn verify_client_consensus_state( &self, + ctx: &dyn ConnectionReader, client_state: &Self::ClientState, height: Height, prefix: &CommitmentPrefix, @@ -96,6 +98,8 @@ pub trait ClientDef: Clone { #[allow(clippy::too_many_arguments)] fn verify_connection_state( &self, + ctx: &dyn ConnectionReader, + client_id: &ClientId, client_state: &Self::ClientState, height: Height, prefix: &CommitmentPrefix, @@ -109,6 +113,8 @@ pub trait ClientDef: Clone { #[allow(clippy::too_many_arguments)] fn verify_channel_state( &self, + ctx: &dyn ChannelReader, + client_id: &ClientId, client_state: &Self::ClientState, height: Height, prefix: &CommitmentPrefix, @@ -123,6 +129,7 @@ pub trait ClientDef: Clone { #[allow(clippy::too_many_arguments)] fn verify_client_full_state( &self, + ctx: &dyn ConnectionReader, client_state: &Self::ClientState, height: Height, prefix: &CommitmentPrefix, @@ -137,6 +144,7 @@ pub trait ClientDef: Clone { fn verify_packet_data( &self, ctx: &dyn ChannelReader, + client_id: &ClientId, client_state: &Self::ClientState, height: Height, connection_end: &ConnectionEnd, @@ -153,6 +161,7 @@ pub trait ClientDef: Clone { fn verify_packet_acknowledgement( &self, ctx: &dyn ChannelReader, + client_id: &ClientId, client_state: &Self::ClientState, height: Height, connection_end: &ConnectionEnd, @@ -169,6 +178,7 @@ pub trait ClientDef: Clone { fn verify_next_sequence_recv( &self, ctx: &dyn ChannelReader, + client_id: &ClientId, client_state: &Self::ClientState, height: Height, connection_end: &ConnectionEnd, @@ -184,6 +194,7 @@ pub trait ClientDef: Clone { fn verify_packet_receipt_absence( &self, ctx: &dyn ChannelReader, + client_id: &ClientId, client_state: &Self::ClientState, height: Height, connection_end: &ConnectionEnd, @@ -196,14 +207,14 @@ pub trait ClientDef: Clone { } #[derive(Clone, Debug, PartialEq, Eq)] -pub enum AnyClient { +pub enum AnyClient { Tendermint(TendermintClient), - Beefy(BeefyClient), + Beefy(BeefyClient), #[cfg(any(test, feature = "mocks"))] Mock(MockClient), } -impl AnyClient { +impl AnyClient { pub fn from_client_type(client_type: ClientType) -> Self { match client_type { ClientType::Tendermint => Self::Tendermint(TendermintClient::default()), @@ -215,7 +226,7 @@ impl AnyClient { } // ⚠️ Beware of the awful boilerplate below ⚠️ -impl ClientDef for AnyClient { +impl ClientDef for AnyClient { type Header = AnyHeader; type ClientState = AnyClientState; type ConsensusState = AnyConsensusState; @@ -379,7 +390,7 @@ impl ClientDef for AnyClient { client.check_for_misbehaviour(ctx, client_id, client_state, header) } #[cfg(any(test, feature = "mocks"))] - AnyClient::Mock(client ) => { + AnyClient::Mock(client) => { let (client_state, header) = downcast!( client_state => AnyClientState::Mock, header => AnyHeader::Mock, @@ -455,6 +466,7 @@ impl ClientDef for AnyClient { fn verify_client_consensus_state( &self, + ctx: &dyn ConnectionReader, client_state: &Self::ClientState, height: Height, prefix: &CommitmentPrefix, @@ -472,6 +484,7 @@ impl ClientDef for AnyClient { .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Tendermint))?; client.verify_client_consensus_state( + ctx, client_state, height, prefix, @@ -490,6 +503,7 @@ impl ClientDef for AnyClient { .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Beefy))?; client.verify_client_consensus_state( + ctx, client_state, height, prefix, @@ -508,6 +522,7 @@ impl ClientDef for AnyClient { .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Mock))?; client.verify_client_consensus_state( + ctx, client_state, height, prefix, @@ -523,6 +538,8 @@ impl ClientDef for AnyClient { fn verify_connection_state( &self, + ctx: &dyn ConnectionReader, + client_id: &ClientId, client_state: &AnyClientState, height: Height, prefix: &CommitmentPrefix, @@ -537,6 +554,8 @@ impl ClientDef for AnyClient { .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Tendermint))?; client.verify_connection_state( + ctx, + client_id, client_state, height, prefix, @@ -551,6 +570,8 @@ impl ClientDef for AnyClient { .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Beefy))?; client.verify_connection_state( + ctx, + client_id, client_state, height, prefix, @@ -566,6 +587,8 @@ impl ClientDef for AnyClient { .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Mock))?; client.verify_connection_state( + ctx, + client_id, client_state, height, prefix, @@ -580,6 +603,8 @@ impl ClientDef for AnyClient { fn verify_channel_state( &self, + ctx: &dyn ChannelReader, + client_id: &ClientId, client_state: &AnyClientState, height: Height, prefix: &CommitmentPrefix, @@ -595,6 +620,8 @@ impl ClientDef for AnyClient { .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Tendermint))?; client.verify_channel_state( + ctx, + client_id, client_state, height, prefix, @@ -611,6 +638,8 @@ impl ClientDef for AnyClient { .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Beefy))?; client.verify_channel_state( + ctx, + client_id, client_state, height, prefix, @@ -628,6 +657,8 @@ impl ClientDef for AnyClient { .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Mock))?; client.verify_channel_state( + ctx, + client_id, client_state, height, prefix, @@ -642,6 +673,7 @@ impl ClientDef for AnyClient { } fn verify_client_full_state( &self, + ctx: &dyn ConnectionReader, client_state: &Self::ClientState, height: Height, prefix: &CommitmentPrefix, @@ -658,6 +690,7 @@ impl ClientDef for AnyClient { .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Tendermint))?; client.verify_client_full_state( + ctx, client_state, height, prefix, @@ -674,6 +707,7 @@ impl ClientDef for AnyClient { .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Beefy))?; client.verify_client_full_state( + ctx, client_state, height, prefix, @@ -691,6 +725,7 @@ impl ClientDef for AnyClient { .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Mock))?; client.verify_client_full_state( + ctx, client_state, height, prefix, @@ -706,6 +741,7 @@ impl ClientDef for AnyClient { fn verify_packet_data( &self, ctx: &dyn ChannelReader, + client_id: &ClientId, client_state: &Self::ClientState, height: Height, connection_end: &ConnectionEnd, @@ -725,6 +761,7 @@ impl ClientDef for AnyClient { client.verify_packet_data( ctx, + client_id, client_state, height, connection_end, @@ -745,6 +782,7 @@ impl ClientDef for AnyClient { client.verify_packet_data( ctx, + client_id, client_state, height, connection_end, @@ -766,6 +804,7 @@ impl ClientDef for AnyClient { client.verify_packet_data( ctx, + client_id, client_state, height, connection_end, @@ -783,6 +822,7 @@ impl ClientDef for AnyClient { fn verify_packet_acknowledgement( &self, ctx: &dyn ChannelReader, + client_id: &ClientId, client_state: &Self::ClientState, height: Height, connection_end: &ConnectionEnd, @@ -802,6 +842,7 @@ impl ClientDef for AnyClient { client.verify_packet_acknowledgement( ctx, + client_id, client_state, height, connection_end, @@ -822,6 +863,7 @@ impl ClientDef for AnyClient { client.verify_packet_acknowledgement( ctx, + client_id, client_state, height, connection_end, @@ -842,6 +884,7 @@ impl ClientDef for AnyClient { client.verify_packet_acknowledgement( ctx, + client_id, client_state, height, connection_end, @@ -858,6 +901,7 @@ impl ClientDef for AnyClient { fn verify_next_sequence_recv( &self, ctx: &dyn ChannelReader, + client_id: &ClientId, client_state: &Self::ClientState, height: Height, connection_end: &ConnectionEnd, @@ -876,6 +920,7 @@ impl ClientDef for AnyClient { client.verify_next_sequence_recv( ctx, + client_id, client_state, height, connection_end, @@ -895,6 +940,7 @@ impl ClientDef for AnyClient { client.verify_next_sequence_recv( ctx, + client_id, client_state, height, connection_end, @@ -915,6 +961,7 @@ impl ClientDef for AnyClient { client.verify_next_sequence_recv( ctx, + client_id, client_state, height, connection_end, @@ -931,6 +978,7 @@ impl ClientDef for AnyClient { fn verify_packet_receipt_absence( &self, ctx: &dyn ChannelReader, + client_id: &ClientId, client_state: &Self::ClientState, height: Height, connection_end: &ConnectionEnd, @@ -949,6 +997,7 @@ impl ClientDef for AnyClient { client.verify_packet_receipt_absence( ctx, + client_id, client_state, height, connection_end, @@ -968,6 +1017,7 @@ impl ClientDef for AnyClient { client.verify_packet_receipt_absence( ctx, + client_id, client_state, height, connection_end, @@ -988,6 +1038,7 @@ impl ClientDef for AnyClient { client.verify_packet_receipt_absence( ctx, + client_id, client_state, height, connection_end, diff --git a/modules/src/core/ics02_client/handler.rs b/modules/src/core/ics02_client/handler.rs index a641d38c8f..3c3971b4fe 100644 --- a/modules/src/core/ics02_client/handler.rs +++ b/modules/src/core/ics02_client/handler.rs @@ -1,6 +1,6 @@ //! This module implements the processing logic for ICS2 (client abstractions and functions) msgs. -use crate::clients::ics11_beefy::client_def::BeefyLCStore; +use crate::clients::ics11_beefy::client_def::BeefyTraits; use crate::core::ics02_client::context::ClientReader; use crate::core::ics02_client::error::Error; use crate::core::ics02_client::msgs::ClientMsg; @@ -21,7 +21,7 @@ pub enum ClientResult { pub fn dispatch(ctx: &Ctx, msg: ClientMsg) -> Result, Error> where Ctx: ClientReader, - Beefy: BeefyLCStore, + Beefy: BeefyTraits, { match msg { ClientMsg::CreateClient(msg) => create_client::process(ctx, msg), diff --git a/modules/src/core/ics02_client/handler/update_client.rs b/modules/src/core/ics02_client/handler/update_client.rs index 0f7020aae7..bd936d6f3f 100644 --- a/modules/src/core/ics02_client/handler/update_client.rs +++ b/modules/src/core/ics02_client/handler/update_client.rs @@ -1,6 +1,6 @@ //! Protocol logic specific to processing ICS2 messages of type `MsgUpdateAnyClient`. -use crate::clients::ics11_beefy::client_def::BeefyLCStore; +use crate::clients::ics11_beefy::client_def::BeefyTraits; use tracing::debug; use crate::core::ics02_client::client_def::{AnyClient, ClientDef, ConsensusUpdateResult}; @@ -29,7 +29,7 @@ pub struct Result { pub processed_height: Height, } -pub fn process( +pub fn process( ctx: &dyn ClientReader, msg: MsgUpdateAnyClient, ) -> HandlerResult { diff --git a/modules/src/core/ics02_client/handler/upgrade_client.rs b/modules/src/core/ics02_client/handler/upgrade_client.rs index 48ad38fa38..4790a657eb 100644 --- a/modules/src/core/ics02_client/handler/upgrade_client.rs +++ b/modules/src/core/ics02_client/handler/upgrade_client.rs @@ -1,6 +1,6 @@ //! Protocol logic specific to processing ICS2 messages of type `MsgUpgradeAnyClient`. //! -use crate::clients::ics11_beefy::client_def::BeefyLCStore; +use crate::clients::ics11_beefy::client_def::BeefyTraits; use crate::core::ics02_client::client_def::{AnyClient, ClientDef, ConsensusUpdateResult}; use crate::core::ics02_client::client_state::{AnyClientState, ClientState}; use crate::core::ics02_client::context::ClientReader; @@ -22,7 +22,7 @@ pub struct Result { pub consensus_state: Option, } -pub fn process( +pub fn process( ctx: &dyn ClientReader, msg: MsgUpgradeAnyClient, ) -> HandlerResult { diff --git a/modules/src/core/ics03_connection/handler.rs b/modules/src/core/ics03_connection/handler.rs index d6b4a77166..ead48067aa 100644 --- a/modules/src/core/ics03_connection/handler.rs +++ b/modules/src/core/ics03_connection/handler.rs @@ -1,6 +1,6 @@ //! This module implements the processing logic for ICS3 (connection open handshake) messages. -use crate::clients::ics11_beefy::client_def::BeefyLCStore; +use crate::clients::ics11_beefy::client_def::BeefyTraits; use crate::core::ics03_connection::connection::ConnectionEnd; use crate::core::ics03_connection::context::ConnectionReader; use crate::core::ics03_connection::error::Error; @@ -48,7 +48,7 @@ pub fn dispatch( ) -> Result, Error> where Ctx: ConnectionReader, - Beefy: BeefyLCStore, + Beefy: BeefyTraits, { match msg { ConnectionMsg::ConnectionOpenInit(msg) => conn_open_init::process(ctx, msg), diff --git a/modules/src/core/ics03_connection/handler/conn_open_ack.rs b/modules/src/core/ics03_connection/handler/conn_open_ack.rs index 7ab89a9f53..ec2efe411c 100644 --- a/modules/src/core/ics03_connection/handler/conn_open_ack.rs +++ b/modules/src/core/ics03_connection/handler/conn_open_ack.rs @@ -1,6 +1,6 @@ //! Protocol logic specific to processing ICS3 messages of type `MsgConnectionOpenAck`. -use crate::clients::ics11_beefy::client_def::BeefyLCStore; +use crate::clients::ics11_beefy::client_def::BeefyTraits; use crate::core::ics03_connection::connection::{ConnectionEnd, Counterparty, State}; use crate::core::ics03_connection::context::ConnectionReader; use crate::core::ics03_connection::error::Error; @@ -14,7 +14,7 @@ use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; -pub(crate) fn process( +pub(crate) fn process( ctx: &dyn ConnectionReader, msg: MsgConnectionOpenAck, ) -> HandlerResult { diff --git a/modules/src/core/ics03_connection/handler/conn_open_confirm.rs b/modules/src/core/ics03_connection/handler/conn_open_confirm.rs index c64e26cbbd..3a790214f4 100644 --- a/modules/src/core/ics03_connection/handler/conn_open_confirm.rs +++ b/modules/src/core/ics03_connection/handler/conn_open_confirm.rs @@ -1,6 +1,6 @@ //! Protocol logic specific to processing ICS3 messages of type `MsgConnectionOpenConfirm`. -use crate::clients::ics11_beefy::client_def::BeefyLCStore; +use crate::clients::ics11_beefy::client_def::BeefyTraits; use crate::core::ics03_connection::connection::{ConnectionEnd, Counterparty, State}; use crate::core::ics03_connection::context::ConnectionReader; use crate::core::ics03_connection::error::Error; @@ -12,7 +12,7 @@ use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; -pub(crate) fn process( +pub(crate) fn process( ctx: &dyn ConnectionReader, msg: MsgConnectionOpenConfirm, ) -> HandlerResult { diff --git a/modules/src/core/ics03_connection/handler/conn_open_try.rs b/modules/src/core/ics03_connection/handler/conn_open_try.rs index 530005bb28..6a61bd313a 100644 --- a/modules/src/core/ics03_connection/handler/conn_open_try.rs +++ b/modules/src/core/ics03_connection/handler/conn_open_try.rs @@ -1,6 +1,6 @@ //! Protocol logic specific to processing ICS3 messages of type `MsgConnectionOpenTry`. -use crate::clients::ics11_beefy::client_def::BeefyLCStore; +use crate::clients::ics11_beefy::client_def::BeefyTraits; use crate::core::ics03_connection::connection::{ConnectionEnd, Counterparty, State}; use crate::core::ics03_connection::context::ConnectionReader; use crate::core::ics03_connection::error::Error; @@ -15,7 +15,7 @@ use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; -pub(crate) fn process( +pub(crate) fn process( ctx: &dyn ConnectionReader, msg: MsgConnectionOpenTry, ) -> HandlerResult { diff --git a/modules/src/core/ics03_connection/handler/verify.rs b/modules/src/core/ics03_connection/handler/verify.rs index b97e04a8ee..3eefd81391 100644 --- a/modules/src/core/ics03_connection/handler/verify.rs +++ b/modules/src/core/ics03_connection/handler/verify.rs @@ -1,6 +1,6 @@ //! ICS3 verification functions, common across all four handlers of ICS3. -use crate::clients::ics11_beefy::client_def::BeefyLCStore; +use crate::clients::ics11_beefy::client_def::BeefyTraits; use crate::core::ics02_client::client_consensus::ConsensusState; use crate::core::ics02_client::client_state::{AnyClientState, ClientState}; use crate::core::ics02_client::{client_def::AnyClient, client_def::ClientDef}; @@ -12,7 +12,7 @@ use crate::proofs::{ConsensusProof, Proofs}; use crate::Height; /// Entry point for verifying all proofs bundled in any ICS3 message. -pub fn verify_proofs( +pub fn verify_proofs( ctx: &dyn ConnectionReader, client_state: Option, height: Height, @@ -60,7 +60,7 @@ pub fn verify_proofs( /// Verifies the authenticity and semantic correctness of a commitment `proof`. The commitment /// claims to prove that an object of type connection exists on the source chain (i.e., the chain /// which created this proof). This object must match the state of `expected_conn`. -pub fn verify_connection_proof( +pub fn verify_connection_proof( ctx: &dyn ConnectionReader, height: Height, connection_end: &ConnectionEnd, @@ -91,6 +91,8 @@ pub fn verify_connection_proof( // Verify the proof for the connection state against the expected connection end. client_def .verify_connection_state( + ctx, + connection_end.client_id(), &client_state, height, connection_end.counterparty().prefix(), @@ -109,7 +111,7 @@ pub fn verify_connection_proof( /// complete verification: that the client state the counterparty stores is valid (i.e., not frozen, /// at the same revision as the current chain, with matching chain identifiers, etc) and that the /// `proof` is correct. -pub fn verify_client_proof( +pub fn verify_client_proof( ctx: &dyn ConnectionReader, height: Height, connection_end: &ConnectionEnd, @@ -130,6 +132,7 @@ pub fn verify_client_proof( client_def .verify_client_full_state( + ctx, &client_state, height, connection_end.counterparty().prefix(), @@ -143,7 +146,7 @@ pub fn verify_client_proof( }) } -pub fn verify_consensus_proof( +pub fn verify_consensus_proof( ctx: &dyn ConnectionReader, height: Height, connection_end: &ConnectionEnd, @@ -165,6 +168,7 @@ pub fn verify_consensus_proof( client .verify_client_consensus_state( + ctx, &client_state, height, connection_end.counterparty().prefix(), diff --git a/modules/src/core/ics04_channel/handler.rs b/modules/src/core/ics04_channel/handler.rs index 4b000aee2b..49d0aa8506 100644 --- a/modules/src/core/ics04_channel/handler.rs +++ b/modules/src/core/ics04_channel/handler.rs @@ -1,6 +1,6 @@ //! This module implements the processing logic for ICS4 (channel) messages. -use crate::clients::ics11_beefy::client_def::BeefyLCStore; +use crate::clients::ics11_beefy::client_def::BeefyTraits; use crate::core::ics04_channel::channel::ChannelEnd; use crate::core::ics04_channel::context::ChannelReader; use crate::core::ics04_channel::error::Error; @@ -65,7 +65,7 @@ pub fn channel_dispatch( ) -> Result<(HandlerOutputBuilder<()>, ChannelResult), Error> where Ctx: ChannelReader, - Beefy: BeefyLCStore, + Beefy: BeefyTraits, { let output = match msg { ChannelMsg::ChannelOpenInit(msg) => chan_open_init::process(ctx, msg), @@ -174,7 +174,7 @@ pub fn packet_dispatch( ) -> Result<(HandlerOutputBuilder<()>, PacketResult), Error> where Ctx: ChannelReader, - Beefy: BeefyLCStore, + Beefy: BeefyTraits, { let output = match msg { PacketMsg::RecvPacket(msg) => recv_packet::process::(ctx, msg), diff --git a/modules/src/core/ics04_channel/handler/acknowledgement.rs b/modules/src/core/ics04_channel/handler/acknowledgement.rs index 67df501221..632c4159bd 100644 --- a/modules/src/core/ics04_channel/handler/acknowledgement.rs +++ b/modules/src/core/ics04_channel/handler/acknowledgement.rs @@ -1,4 +1,4 @@ -use crate::clients::ics11_beefy::client_def::BeefyLCStore; +use crate::clients::ics11_beefy::client_def::BeefyTraits; use crate::core::ics03_connection::connection::State as ConnectionState; use crate::core::ics04_channel::channel::State; use crate::core::ics04_channel::channel::{Counterparty, Order}; @@ -20,7 +20,7 @@ pub struct AckPacketResult { pub seq_number: Option, } -pub fn process( +pub fn process( ctx: &dyn ChannelReader, msg: &MsgAcknowledgement, ) -> HandlerResult { diff --git a/modules/src/core/ics04_channel/handler/chan_close_confirm.rs b/modules/src/core/ics04_channel/handler/chan_close_confirm.rs index dae79b1b6d..87b71442eb 100644 --- a/modules/src/core/ics04_channel/handler/chan_close_confirm.rs +++ b/modules/src/core/ics04_channel/handler/chan_close_confirm.rs @@ -1,5 +1,5 @@ //! Protocol logic specific to ICS4 messages of type `MsgChannelCloseConfirm`. -use crate::clients::ics11_beefy::client_def::BeefyLCStore; +use crate::clients::ics11_beefy::client_def::BeefyTraits; use crate::core::ics03_connection::connection::State as ConnectionState; use crate::core::ics04_channel::channel::{ChannelEnd, Counterparty, State}; use crate::core::ics04_channel::context::ChannelReader; @@ -12,7 +12,7 @@ use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; -pub(crate) fn process( +pub(crate) fn process( ctx: &dyn ChannelReader, msg: &MsgChannelCloseConfirm, ) -> HandlerResult { diff --git a/modules/src/core/ics04_channel/handler/chan_open_ack.rs b/modules/src/core/ics04_channel/handler/chan_open_ack.rs index e44e0d7052..38654bdfe3 100644 --- a/modules/src/core/ics04_channel/handler/chan_open_ack.rs +++ b/modules/src/core/ics04_channel/handler/chan_open_ack.rs @@ -1,5 +1,5 @@ //! Protocol logic specific to ICS4 messages of type `MsgChannelOpenAck`. -use crate::clients::ics11_beefy::client_def::BeefyLCStore; +use crate::clients::ics11_beefy::client_def::BeefyTraits; use crate::core::ics03_connection::connection::State as ConnectionState; use crate::core::ics04_channel::channel::{ChannelEnd, Counterparty, State}; use crate::core::ics04_channel::context::ChannelReader; @@ -12,7 +12,7 @@ use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; -pub(crate) fn process( +pub(crate) fn process( ctx: &dyn ChannelReader, msg: &MsgChannelOpenAck, ) -> HandlerResult { diff --git a/modules/src/core/ics04_channel/handler/chan_open_confirm.rs b/modules/src/core/ics04_channel/handler/chan_open_confirm.rs index d21d53e900..20751cdfc6 100644 --- a/modules/src/core/ics04_channel/handler/chan_open_confirm.rs +++ b/modules/src/core/ics04_channel/handler/chan_open_confirm.rs @@ -1,5 +1,5 @@ //! Protocol logic specific to ICS4 messages of type `MsgChannelOpenConfirm`. -use crate::clients::ics11_beefy::client_def::BeefyLCStore; +use crate::clients::ics11_beefy::client_def::BeefyTraits; use crate::core::ics03_connection::connection::State as ConnectionState; use crate::core::ics04_channel::channel::{ChannelEnd, Counterparty, State}; use crate::core::ics04_channel::context::ChannelReader; @@ -12,7 +12,7 @@ use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; -pub(crate) fn process( +pub(crate) fn process( ctx: &dyn ChannelReader, msg: &MsgChannelOpenConfirm, ) -> HandlerResult { diff --git a/modules/src/core/ics04_channel/handler/chan_open_try.rs b/modules/src/core/ics04_channel/handler/chan_open_try.rs index d319d0ce85..c8ddd5a745 100644 --- a/modules/src/core/ics04_channel/handler/chan_open_try.rs +++ b/modules/src/core/ics04_channel/handler/chan_open_try.rs @@ -1,6 +1,6 @@ //! Protocol logic specific to ICS4 messages of type `MsgChannelOpenTry`. -use crate::clients::ics11_beefy::client_def::BeefyLCStore; +use crate::clients::ics11_beefy::client_def::BeefyTraits; use crate::core::ics03_connection::connection::State as ConnectionState; use crate::core::ics04_channel::channel::{ChannelEnd, Counterparty, State}; use crate::core::ics04_channel::context::ChannelReader; @@ -14,7 +14,7 @@ use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; -pub(crate) fn process( +pub(crate) fn process( ctx: &dyn ChannelReader, msg: &MsgChannelOpenTry, ) -> HandlerResult { diff --git a/modules/src/core/ics04_channel/handler/recv_packet.rs b/modules/src/core/ics04_channel/handler/recv_packet.rs index a8e1b33f07..340fb5a7e7 100644 --- a/modules/src/core/ics04_channel/handler/recv_packet.rs +++ b/modules/src/core/ics04_channel/handler/recv_packet.rs @@ -1,4 +1,4 @@ -use crate::clients::ics11_beefy::client_def::BeefyLCStore; +use crate::clients::ics11_beefy::client_def::BeefyTraits; use crate::core::ics03_connection::connection::State as ConnectionState; use crate::core::ics04_channel::channel::{Counterparty, Order, State}; use crate::core::ics04_channel::context::ChannelReader; @@ -28,7 +28,7 @@ pub enum RecvPacketResult { NoOp, } -pub fn process( +pub fn process( ctx: &dyn ChannelReader, msg: &MsgRecvPacket, ) -> HandlerResult { diff --git a/modules/src/core/ics04_channel/handler/timeout.rs b/modules/src/core/ics04_channel/handler/timeout.rs index 01c828a7a5..829a14b8d4 100644 --- a/modules/src/core/ics04_channel/handler/timeout.rs +++ b/modules/src/core/ics04_channel/handler/timeout.rs @@ -1,4 +1,4 @@ -use crate::clients::ics11_beefy::client_def::BeefyLCStore; +use crate::clients::ics11_beefy::client_def::BeefyTraits; use crate::core::ics04_channel::channel::State; use crate::core::ics04_channel::channel::{ChannelEnd, Counterparty, Order}; use crate::core::ics04_channel::events::TimeoutPacket; @@ -22,7 +22,7 @@ pub struct TimeoutPacketResult { pub channel: Option, } -pub fn process( +pub fn process( ctx: &dyn ChannelReader, msg: &MsgTimeout, ) -> HandlerResult { diff --git a/modules/src/core/ics04_channel/handler/timeout_on_close.rs b/modules/src/core/ics04_channel/handler/timeout_on_close.rs index ce76cfdd24..4cc2063516 100644 --- a/modules/src/core/ics04_channel/handler/timeout_on_close.rs +++ b/modules/src/core/ics04_channel/handler/timeout_on_close.rs @@ -1,4 +1,4 @@ -use crate::clients::ics11_beefy::client_def::BeefyLCStore; +use crate::clients::ics11_beefy::client_def::BeefyTraits; use crate::core::ics04_channel::channel::State; use crate::core::ics04_channel::channel::{ChannelEnd, Counterparty, Order}; use crate::core::ics04_channel::events::TimeoutOnClosePacket; @@ -15,7 +15,7 @@ use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; -pub fn process( +pub fn process( ctx: &dyn ChannelReader, msg: &MsgTimeoutOnClose, ) -> HandlerResult { diff --git a/modules/src/core/ics04_channel/handler/verify.rs b/modules/src/core/ics04_channel/handler/verify.rs index 9a91032353..ee77fbd837 100644 --- a/modules/src/core/ics04_channel/handler/verify.rs +++ b/modules/src/core/ics04_channel/handler/verify.rs @@ -1,4 +1,4 @@ -use crate::clients::ics11_beefy::client_def::BeefyLCStore; +use crate::clients::ics11_beefy::client_def::BeefyTraits; use crate::core::ics02_client::client_consensus::ConsensusState; use crate::core::ics02_client::client_state::ClientState; use crate::core::ics02_client::{client_def::AnyClient, client_def::ClientDef}; @@ -13,7 +13,7 @@ use crate::proofs::Proofs; use crate::Height; /// Entry point for verifying all proofs bundled in any ICS4 message for channel protocols. -pub fn verify_channel_proofs( +pub fn verify_channel_proofs( ctx: &dyn ChannelReader, height: Height, channel_end: &ChannelEnd, @@ -39,6 +39,8 @@ pub fn verify_channel_proofs( // A counterparty channel id of None in not possible, and is checked by validate_basic in msg. client_def .verify_channel_state( + ctx, + &client_id, &client_state, height, connection_end.counterparty().prefix(), @@ -52,7 +54,7 @@ pub fn verify_channel_proofs( } /// Entry point for verifying all proofs bundled in a ICS4 packet recv. message. -pub fn verify_packet_recv_proofs( +pub fn verify_packet_recv_proofs( ctx: &dyn ChannelReader, height: Height, packet: &Packet, @@ -81,6 +83,7 @@ pub fn verify_packet_recv_proofs( client_def .verify_packet_data( ctx, + client_id, &client_state, height, connection_end, @@ -97,7 +100,7 @@ pub fn verify_packet_recv_proofs( } /// Entry point for verifying all proofs bundled in an ICS4 packet ack message. -pub fn verify_packet_acknowledgement_proofs( +pub fn verify_packet_acknowledgement_proofs( ctx: &dyn ChannelReader, height: Height, packet: &Packet, @@ -123,6 +126,7 @@ pub fn verify_packet_acknowledgement_proofs( client_def .verify_packet_acknowledgement( ctx, + client_id, &client_state, height, connection_end, @@ -139,7 +143,7 @@ pub fn verify_packet_acknowledgement_proofs( } /// Entry point for verifying all timeout proofs. -pub fn verify_next_sequence_recv( +pub fn verify_next_sequence_recv( ctx: &dyn ChannelReader, height: Height, connection_end: &ConnectionEnd, @@ -163,6 +167,7 @@ pub fn verify_next_sequence_recv( client_def .verify_next_sequence_recv( ctx, + client_id, &client_state, height, connection_end, @@ -177,7 +182,7 @@ pub fn verify_next_sequence_recv( Ok(()) } -pub fn verify_packet_receipt_absence( +pub fn verify_packet_receipt_absence( ctx: &dyn ChannelReader, height: Height, connection_end: &ConnectionEnd, @@ -200,6 +205,7 @@ pub fn verify_packet_receipt_absence( client_def .verify_packet_receipt_absence( ctx, + client_id, &client_state, height, connection_end, diff --git a/modules/src/core/ics26_routing/handler.rs b/modules/src/core/ics26_routing/handler.rs index 03ca610512..16c5bb4031 100644 --- a/modules/src/core/ics26_routing/handler.rs +++ b/modules/src/core/ics26_routing/handler.rs @@ -3,7 +3,7 @@ use crate::prelude::*; use ibc_proto::google::protobuf::Any; use crate::applications::ics20_fungible_token_transfer::relay_application_logic::send_transfer::send_transfer as ics20_msg_dispatcher; -use crate::clients::ics11_beefy::client_def::BeefyLCStore; +use crate::clients::ics11_beefy::client_def::BeefyTraits; use crate::core::ics02_client::handler::dispatch as ics2_msg_dispatcher; use crate::core::ics03_connection::handler::dispatch as ics3_msg_dispatcher; use crate::core::ics04_channel::handler::{ @@ -38,7 +38,7 @@ pub fn deliver( ) -> Result<(Vec, Vec), Error> where Ctx: Ics26Context, - Beefy: BeefyLCStore, + Beefy: BeefyTraits, { // Decode the proto message into a domain message, creating an ICS26 envelope. let envelope = decode(message)?; @@ -62,7 +62,7 @@ pub fn decode(message: Any) -> Result { pub fn dispatch(ctx: &mut Ctx, msg: Ics26Envelope) -> Result, Error> where Ctx: Ics26Context, - Beefy: BeefyLCStore, + Beefy: BeefyTraits, { let output = match msg { Ics2Msg(msg) => { diff --git a/modules/src/mock/client_def.rs b/modules/src/mock/client_def.rs index 5c7f3eab1c..ee20c970d1 100644 --- a/modules/src/mock/client_def.rs +++ b/modules/src/mock/client_def.rs @@ -4,6 +4,7 @@ use crate::core::ics02_client::client_state::AnyClientState; use crate::core::ics02_client::context::ClientReader; use crate::core::ics02_client::error::Error; use crate::core::ics03_connection::connection::ConnectionEnd; +use crate::core::ics03_connection::context::ConnectionReader; use crate::core::ics04_channel::channel::ChannelEnd; use crate::core::ics04_channel::commitment::{AcknowledgementCommitment, PacketCommitment}; use crate::core::ics04_channel::context::ChannelReader; @@ -50,6 +51,7 @@ impl ClientDef for MockClient { fn verify_client_consensus_state( &self, + _ctx: &dyn ConnectionReader, _client_state: &Self::ClientState, _height: Height, prefix: &CommitmentPrefix, @@ -73,6 +75,8 @@ impl ClientDef for MockClient { fn verify_connection_state( &self, + _ctx: &dyn ConnectionReader, + _client_id: &ClientId, _client_state: &Self::ClientState, _height: Height, _prefix: &CommitmentPrefix, @@ -86,6 +90,8 @@ impl ClientDef for MockClient { fn verify_channel_state( &self, + _ctx: &dyn ClientReader, + _client_id: &ClientId, _client_state: &Self::ClientState, _height: Height, _prefix: &CommitmentPrefix, @@ -100,6 +106,7 @@ impl ClientDef for MockClient { fn verify_client_full_state( &self, + _ctx: &dyn ConnectionReader, _client_state: &Self::ClientState, _height: Height, _prefix: &CommitmentPrefix, @@ -114,6 +121,7 @@ impl ClientDef for MockClient { fn verify_packet_data( &self, _ctx: &dyn ChannelReader, + _client_id: &ClientId, _client_state: &Self::ClientState, _height: Height, _connection_end: &ConnectionEnd, @@ -130,6 +138,7 @@ impl ClientDef for MockClient { fn verify_packet_acknowledgement( &self, _ctx: &dyn ChannelReader, + _client_id: &ClientId, _client_state: &Self::ClientState, _height: Height, _connection_end: &ConnectionEnd, @@ -146,6 +155,7 @@ impl ClientDef for MockClient { fn verify_next_sequence_recv( &self, _ctx: &dyn ChannelReader, + _client_id: &ClientId, _client_state: &Self::ClientState, _height: Height, _connection_end: &ConnectionEnd, @@ -161,6 +171,7 @@ impl ClientDef for MockClient { fn verify_packet_receipt_absence( &self, _ctx: &dyn ChannelReader, + _client_id: &ClientId, _client_state: &Self::ClientState, _height: Height, _connection_end: &ConnectionEnd, diff --git a/modules/src/mock/context.rs b/modules/src/mock/context.rs index fdc1f5e7e9..d191d428b2 100644 --- a/modules/src/mock/context.rs +++ b/modules/src/mock/context.rs @@ -1092,7 +1092,7 @@ impl ClientReader for MockContext { &self, client_id: &ClientId, height: Height, - _filter_fn: Option bool>> + _filter_fn: Option bool>>, ) -> Result, Ics02Error> { let ibc_store = self.ibc_store.lock().unwrap(); let client_record = ibc_store @@ -1121,7 +1121,7 @@ impl ClientReader for MockContext { &self, client_id: &ClientId, height: Height, - _filter_fn: Option bool>> + _filter_fn: Option bool>>, ) -> Result, Ics02Error> { let ibc_store = self.ibc_store.lock().unwrap(); let client_record = ibc_store From c4ba3ee1cefed60656a97d7ce9b99cd9868c0a2a Mon Sep 17 00:00:00 2001 From: Seun Lanlege Date: Wed, 11 May 2022 19:05:39 +0000 Subject: [PATCH 08/96] some code corrections --- modules/src/clients/ics11_beefy/header.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/src/clients/ics11_beefy/header.rs b/modules/src/clients/ics11_beefy/header.rs index c76c8651c1..ec854f6070 100644 --- a/modules/src/clients/ics11_beefy/header.rs +++ b/modules/src/clients/ics11_beefy/header.rs @@ -440,7 +440,7 @@ pub fn decode_timestamp_extrinsic(header: &ParachainHeader) -> Result>::new(&db, &extrinsic_root).unwrap(); // Timestamp extrinsic should be the first inherent and hence the first extrinsic - let key = codec::Compact(0u32).encode(); + let key = 0_u32.to_be_bytes().to_vec(); let ext_bytes = trie .get(&key) .map_err(|_| Error::timestamp_extrinsic())? From 5372e9f85bfaf0b38c84688688a83e2ac6802fa8 Mon Sep 17 00:00:00 2001 From: David Salami Date: Thu, 12 May 2022 08:58:54 +0100 Subject: [PATCH 09/96] revert changes to cargo.toml --- Cargo.lock | 2212 +++++++++++++++++++++++++++++++++++++++++++++------- Cargo.toml | 230 +----- 2 files changed, 1956 insertions(+), 486 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 29df546011..fe7c4a7471 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5,7 +5,6 @@ version = 3 [[package]] name = "Inflector" version = "0.11.4" -<<<<<<< HEAD source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fe438c63458706e03479442743baae6c88256498e6431708f6dfc520a26515d3" dependencies = [ @@ -16,14 +15,11 @@ dependencies = [ [[package]] name = "abscissa_core" version = "0.6.0" -======= ->>>>>>> e05319ec (initial refactor) source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fe438c63458706e03479442743baae6c88256498e6431708f6dfc520a26515d3" dependencies = [ "lazy_static", "regex", -<<<<<<< HEAD "secrecy", "semver", "serde", @@ -47,7 +43,7 @@ dependencies = [ "syn", "synstructure", ======= ->>>>>>> e05319ec (initial refactor) +>>>>>>> 8be3170b (revert changes to cargo.toml) ] [[package]] @@ -65,6 +61,12 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" +[[package]] +name = "adler32" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aae1277d39aeec15cb388266ecc24b11c80469deae6067e17a1a7aa9e5c1f234" + [[package]] name = "ahash" version = "0.7.6" @@ -96,6 +98,21 @@ dependencies = [ "memchr", ] +[[package]] +name = "alloc-no-stdlib" +version = "2.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "35ef4730490ad1c4eae5c4325b2a95f521d023e5c885853ff7aca0a6a1631db3" + +[[package]] +name = "alloc-stdlib" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "697ed7edc0f1711de49ce108c541623a0af97c6c60b2f6e2b65229847ac843c2" +dependencies = [ + "alloc-no-stdlib", +] + [[package]] name = "ansi_term" version = "0.12.1" @@ -122,6 +139,7 @@ dependencies = [ [[package]] <<<<<<< HEAD +<<<<<<< HEAD name = "arc-swap" version = "1.5.0" ======= @@ -133,6 +151,14 @@ checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" [[package]] <<<<<<< HEAD +======= +name = "arc-swap" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c5d78ce20460b82d3fa150275ed9d55e21064fc7951177baacf86a145c4a4b1f" + +[[package]] +>>>>>>> 8be3170b (revert changes to cargo.toml) name = "arrayref" version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -170,6 +196,12 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6" +[[package]] +name = "ascii" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbf56136a5198c7b01a49e3afcbef6cf84597273d298f54432926024107b0109" + [[package]] name = "async-stream" version = "0.3.3" @@ -293,6 +325,12 @@ dependencies = [ "rustc-demangle", ] +[[package]] +name = "base16ct" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "349a06037c7bf932dd7e7d1f653678b2038b9ad46a74102f1fc7bd7872678cce" + [[package]] name = "base58" version = "0.2.0" @@ -311,40 +349,87 @@ version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" +[[package]] +name = "base64ct" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dea908e7347a8c64e378c17e30ef880ad73e3b4498346b055c2c00ea342f3179" + +[[package]] +name = "bech32" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf9ff0bbfd639f15c74af777d81383cf53efb7c93613f6cab67c6c11e05bbf8b" + [[package]] name = "beefy-generic-client" version = "0.1.0" source = "git+https://github.com/ComposableFi/beefy-client?branch=master#9a79987cdb3e1b9f90d4151325521608e4ebb506" dependencies = [ - "beefy-primitives", + "beefy-primitives 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", "pallet-beefy-mmr", "pallet-mmr", - "pallet-mmr-primitives", + "pallet-mmr-primitives 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", "parity-scale-codec", "rs_merkle", - "sp-core", - "sp-core-hashing", - "sp-runtime", - "sp-std 4.0.0", + "sp-core 4.1.0-dev", + "sp-core-hashing 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", + "sp-runtime 4.1.0-dev", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", ] [[package]] name = "beefy-merkle-tree" version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" [[package]] name = "beefy-primitives" version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" dependencies = [ "parity-scale-codec", "scale-info", - "sp-api", - "sp-application-crypto", - "sp-core", - "sp-runtime", - "sp-std 4.0.0", + "sp-api 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", + "sp-application-crypto 4.0.0", + "sp-core 4.1.0-dev", + "sp-runtime 4.1.0-dev", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", +] + +[[package]] +name = "beefy-primitives" +version = "4.0.0-dev" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17#22d40c761a985482f93bbbea5ba4199bdba74f8e" +dependencies = [ + "parity-scale-codec", + "scale-info", + "sp-api 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", + "sp-application-crypto 5.0.0", + "sp-core 5.0.0", + "sp-runtime 5.0.0", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", +] + +[[package]] +name = "bitcoin" +version = "0.28.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05bba324e6baf655b882df672453dbbc527bc938cadd27750ae510aaccc3a66a" +dependencies = [ + "bech32", + "bitcoin_hashes", + "secp256k1", + "serde", +] + +[[package]] +name = "bitcoin_hashes" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "006cc91e1a1d99819bc5b8214be3555c1f0611b169f527a1fdc54ed1f2b745b0" +dependencies = [ + "serde", ] [[package]] @@ -485,6 +570,37 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" +[[package]] +name = "brotli" +version = "3.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1a0b1dbcc8ae29329621f8d4f0d835787c1c38bb1401979b49d13b0b305ff68" +dependencies = [ + "alloc-no-stdlib", + "alloc-stdlib", + "brotli-decompressor", +] + +[[package]] +name = "brotli-decompressor" +version = "2.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59ad2d4653bf5ca36ae797b1f4bb4dbddb60ce49ca4aed8a2ce4829f60425b80" +dependencies = [ + "alloc-no-stdlib", + "alloc-stdlib", +] + +[[package]] +name = "buf_redux" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b953a6887648bb07a535631f2bc00fbdb2a2216f135552cb3f534ed136b9c07f" +dependencies = [ + "memchr", + "safemem", +] + [[package]] name = "bumpalo" version = "3.9.1" @@ -512,6 +628,12 @@ version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" +[[package]] +name = "bytecount" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72feb31ffc86498dacdbd0fcebb56138e7177a8cc5cea4516031d15ae85a742e" + [[package]] name = "byteorder" version = "1.4.3" @@ -526,6 +648,7 @@ checksum = "c4872d67bab6358e59559027aa3b9157c53d9358c51423c17554809a8858e0f8" [[package]] <<<<<<< HEAD +<<<<<<< HEAD name = "bzip2" version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -551,6 +674,12 @@ name = "camino" version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "869119e97797867fd90f5e22af7d0bd274bd4635ebb9eb68c04f3f513ae6c412" +======= +name = "camino" +version = "1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07fd178c5af4d59e83498ef15cf3f154e1a6f9d091270cb86283c65ef44e9ef0" +>>>>>>> 8be3170b (revert changes to cargo.toml) dependencies = [ "serde", ] @@ -584,8 +713,11 @@ dependencies = [ ] [[package]] +<<<<<<< HEAD ======= >>>>>>> e05319ec (initial refactor) +======= +>>>>>>> 8be3170b (revert changes to cargo.toml) name = "cc" version = "1.0.73" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -678,6 +810,54 @@ dependencies = [ "os_str_bytes", ] +[[package]] +name = "color-eyre" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ebf286c900a6d5867aeff75cfee3192857bb7f24b547d4f0df2ed6baa812c90" +dependencies = [ + "backtrace", + "color-spantrace", + "eyre", + "indenter", + "once_cell", + "owo-colors", + "tracing-error", +] + +[[package]] +name = "color-spantrace" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ba75b3d9449ecdccb27ecbc479fdc0b87fa2dd43d2f8298f9bf0e59aacc8dce" +dependencies = [ + "once_cell", + "owo-colors", + "tracing-core", + "tracing-error", +] + +[[package]] +name = "console" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a28b32d32ca44b70c3e4acd7db1babf555fa026e385fb95f18028f88848b3c31" +dependencies = [ + "encode_unicode", + "libc", + "once_cell", + "regex", + "terminal_size", + "unicode-width", + "winapi", +] + +[[package]] +name = "const-oid" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e4c78c047431fee22c1a7bb92e00ad095a02a983affe4d8a72e2a2c62c1b94f3" + [[package]] name = "constant_time_eq" version = "0.1.5" @@ -686,6 +866,7 @@ checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" [[package]] <<<<<<< HEAD +<<<<<<< HEAD name = "constant_time_eq" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -700,6 +881,17 @@ version = "0.4.0" >>>>>>> e05319ec (initial refactor) source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" +======= +name = "contracts" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1d1429e3bd78171c65aa010eabcdf8f863ba3254728dbfb0ad4b1545beac15c" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] +>>>>>>> 8be3170b (revert changes to cargo.toml) [[package]] name = "convert_case" @@ -741,6 +933,16 @@ dependencies = [ "cfg-if 1.0.0", ] +[[package]] +name = "crossbeam-channel" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b153fe7cbef478c567df0f972e02e6d736db11affe43dfc9c56a9374d1adfb87" +dependencies = [ + "crossbeam-utils 0.7.2", + "maybe-uninit", +] + [[package]] name = "crossbeam-channel" version = "0.5.4" @@ -748,7 +950,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5aaa7bd5fb665c6864b5f963dd9097905c54125909c7aa94c9e18507cdbe6c53" dependencies = [ "cfg-if 1.0.0", - "crossbeam-utils", + "crossbeam-utils 0.8.8", ] [[package]] @@ -758,8 +960,23 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6455c0ca19f0d2fbf751b908d5c55c1f5cbc65e03c4225427254b46890bdde1e" dependencies = [ "cfg-if 1.0.0", - "crossbeam-epoch", - "crossbeam-utils", + "crossbeam-epoch 0.9.8", + "crossbeam-utils 0.8.8", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "058ed274caafc1f60c4997b5fc07bf7dc7cca454af7c6e81edffe5f33f70dace" +dependencies = [ + "autocfg", + "cfg-if 0.1.10", + "crossbeam-utils 0.7.2", + "lazy_static", + "maybe-uninit", + "memoffset 0.5.6", + "scopeguard", ] [[package]] @@ -770,12 +987,23 @@ checksum = "1145cf131a2c6ba0615079ab6a638f7e1973ac9c2634fcbeaaad6114246efe8c" dependencies = [ "autocfg", "cfg-if 1.0.0", - "crossbeam-utils", + "crossbeam-utils 0.8.8", "lazy_static", - "memoffset", + "memoffset 0.6.5", "scopeguard", ] +[[package]] +name = "crossbeam-utils" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8" +dependencies = [ + "autocfg", + "cfg-if 0.1.10", + "lazy_static", +] + [[package]] name = "crossbeam-utils" version = "0.8.8" @@ -794,6 +1022,9 @@ checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" [[package]] <<<<<<< HEAD +<<<<<<< HEAD +======= +>>>>>>> 8be3170b (revert changes to cargo.toml) name = "crypto-bigint" version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -806,8 +1037,11 @@ dependencies = [ ] [[package]] +<<<<<<< HEAD ======= >>>>>>> e05319ec (initial refactor) +======= +>>>>>>> 8be3170b (revert changes to cargo.toml) name = "crypto-common" version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -888,6 +1122,35 @@ dependencies = [ "zeroize", ] +[[package]] +name = "dashmap" +version = "4.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e77a43b28d0668df09411cb0bc9a8c2adc40f9a048afe863e05fd43251e8e39c" +dependencies = [ + "cfg-if 1.0.0", + "num_cpus", +] + +[[package]] +name = "deflate" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f95bf05dffba6e6cce8dfbb30def788154949ccd9aed761b472119c21e01c70" +dependencies = [ + "adler32", + "gzip-header", +] + +[[package]] +name = "der" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6919815d73839e7ad218de758883aae3a257ba6759ce7a9992501efbb53d705c" +dependencies = [ + "const-oid", +] + [[package]] name = "derive_more" version = "0.99.17" @@ -903,10 +1166,14 @@ dependencies = [ [[package]] <<<<<<< HEAD +<<<<<<< HEAD +======= +>>>>>>> 8be3170b (revert changes to cargo.toml) name = "dialoguer" version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d8c8ae48e400addc32a8710c8d62d55cb84249a7d58ac4cd959daecfbaddc545" +<<<<<<< HEAD ======= name = "digest" version = "0.8.1" @@ -915,6 +1182,12 @@ checksum = "f3d0c8c8752312f9713efd397ff63acb9f85585afbf179282e720e7704954dd5" >>>>>>> e05319ec (initial refactor) dependencies = [ "generic-array 0.12.4", +======= +dependencies = [ + "console", + "tempfile", + "zeroize", +>>>>>>> 8be3170b (revert changes to cargo.toml) ] [[package]] @@ -954,6 +1227,16 @@ dependencies = [ "dirs-sys", ] +[[package]] +name = "dirs-next" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b98cf8ebf19c3d1b223e151f99a4f9f0690dca41414773390fc824184ac833e1" +dependencies = [ + "cfg-if 1.0.0", + "dirs-sys-next", +] + [[package]] name = "dirs-sys" version = "0.3.7" @@ -965,6 +1248,17 @@ dependencies = [ "winapi", ] +[[package]] +name = "dirs-sys-next" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ebda144c4fe02d1f7ea1a7d9641b6fc6b580adcfa024ae48797ecdeb6825b4d" +dependencies = [ + "libc", + "redox_users", + "winapi", +] + [[package]] name = "downcast-rs" version = "1.2.0" @@ -1030,6 +1324,18 @@ version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "21e50f3adc76d6a43f5ed73b698a87d0760ca74617f60f7c3b879003536fdd28" +[[package]] +name = "ecdsa" +version = "0.13.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0d69ae62e0ce582d56380743515fefaf1a8c70cec685d9677636d7e30ae9dc9" +dependencies = [ + "der", + "elliptic-curve", + "rfc6979", + "signature", +] + [[package]] name = "ed25519" version = "1.5.2" @@ -1061,6 +1367,7 @@ checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" [[package]] <<<<<<< HEAD +<<<<<<< HEAD name = "elliptic-curve" version = "0.11.12" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -1089,21 +1396,49 @@ checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" >>>>>>> e05319ec (initial refactor) name = "env_logger" version = "0.9.0" +======= +name = "elliptic-curve" +version = "0.11.12" +>>>>>>> 8be3170b (revert changes to cargo.toml) source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b2cf0344971ee6c64c31be0d530793fba457d322dfec2810c453d0ef228f9c3" +checksum = "25b477563c2bfed38a3b7a60964c49e058b2510ad3f12ba3483fd8f62c2306d6" dependencies = [ - "atty", - "humantime", - "log", - "regex", - "termcolor", -] - -[[package]] -name = "environmental" -version = "1.1.3" -<<<<<<< HEAD -source = "registry+https://github.com/rust-lang/crates.io-index" + "base16ct", + "crypto-bigint", + "der", + "ff", + "generic-array 0.14.5", + "group", + "rand_core 0.6.3", + "sec1", + "subtle", + "zeroize", +] + +[[package]] +name = "encode_unicode" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" + +[[package]] +name = "env_logger" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b2cf0344971ee6c64c31be0d530793fba457d322dfec2810c453d0ef228f9c3" +dependencies = [ + "atty", + "humantime", + "log", + "regex", + "termcolor", +] + +[[package]] +name = "environmental" +version = "1.1.3" +<<<<<<< HEAD +source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68b91989ae21441195d7d9b9993a2f9295c7e1a8c96255d8b729accddc124797" [[package]] @@ -1114,6 +1449,15 @@ version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68b91989ae21441195d7d9b9993a2f9295c7e1a8c96255d8b729accddc124797" +[[package]] +name = "error-chain" +version = "0.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d2f06b9cac1506ece98fe3231e3cc9c4410ec3d5b1f24ae1c8946f0742cdefc" +dependencies = [ + "version_check", +] + [[package]] name = "eyre" version = "0.6.8" @@ -1141,6 +1485,9 @@ dependencies = [ [[package]] <<<<<<< HEAD +<<<<<<< HEAD +======= +>>>>>>> 8be3170b (revert changes to cargo.toml) name = "ff" version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -1153,6 +1500,7 @@ dependencies = [ [[package]] name = "filetime" version = "0.2.16" +<<<<<<< HEAD ======= name = "fixed-hash" version = "0.7.0" @@ -1164,6 +1512,15 @@ dependencies = [ "rand 0.8.5", "rustc-hex", "static_assertions", +======= +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0408e2626025178a6a7f7ffc05a25bc47103229f19c113755de7bf63816290c" +dependencies = [ + "cfg-if 1.0.0", + "libc", + "redox_syscall", + "winapi", +>>>>>>> 8be3170b (revert changes to cargo.toml) ] [[package]] @@ -1196,6 +1553,7 @@ version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c606d892c9de11507fa0dcffc116434f94e105d0bbdc4e405b61519464c49d7b" dependencies = [ + "anyhow", "eyre", "paste", ] @@ -1219,22 +1577,22 @@ dependencies = [ [[package]] name = "frame-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" dependencies = [ - "frame-support", - "frame-system", + "frame-support 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", + "frame-system 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", "linregress", "log", "parity-scale-codec", "paste", "scale-info", - "sp-api", - "sp-application-crypto", - "sp-io", - "sp-runtime", - "sp-runtime-interface", - "sp-std 4.0.0", - "sp-storage", + "sp-api 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", + "sp-application-crypto 4.0.0", + "sp-io 4.0.0", + "sp-runtime 4.1.0-dev", + "sp-runtime-interface 4.1.0-dev", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", + "sp-storage 4.0.0", ] <<<<<<< HEAD ======= @@ -1353,11 +1711,11 @@ dependencies = [ [[package]] name = "frame-support" version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" dependencies = [ "bitflags", "frame-metadata", - "frame-support-procedural", + "frame-support-procedural 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", "impl-trait-for-tuples", "log", "once_cell", @@ -1366,26 +1724,67 @@ dependencies = [ "scale-info", "serde", "smallvec", - "sp-arithmetic", - "sp-core", - "sp-core-hashing-proc-macro", - "sp-inherents", - "sp-io", - "sp-runtime", - "sp-staking", - "sp-state-machine", - "sp-std 4.0.0", - "sp-tracing", + "sp-arithmetic 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", + "sp-core 4.1.0-dev", + "sp-core-hashing-proc-macro 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", + "sp-inherents 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", + "sp-io 4.0.0", + "sp-runtime 4.1.0-dev", + "sp-staking 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", + "sp-state-machine 0.10.0", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", + "sp-tracing 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", + "tt-call", +] + +[[package]] +name = "frame-support" +version = "4.0.0-dev" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17#22d40c761a985482f93bbbea5ba4199bdba74f8e" +dependencies = [ + "bitflags", + "frame-metadata", + "frame-support-procedural 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", + "impl-trait-for-tuples", + "log", + "once_cell", + "parity-scale-codec", + "paste", + "scale-info", + "serde", + "smallvec", + "sp-arithmetic 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", + "sp-core 5.0.0", + "sp-core-hashing-proc-macro 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", + "sp-inherents 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", + "sp-io 5.0.0", + "sp-runtime 5.0.0", + "sp-staking 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", + "sp-state-machine 0.11.0", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", + "sp-tracing 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", "tt-call", ] [[package]] name = "frame-support-procedural" version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" dependencies = [ "Inflector", - "frame-support-procedural-tools", + "frame-support-procedural-tools 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "frame-support-procedural" +version = "4.0.0-dev" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17#22d40c761a985482f93bbbea5ba4199bdba74f8e" +dependencies = [ + "Inflector", + "frame-support-procedural-tools 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", "proc-macro2", "quote", "syn", @@ -1394,9 +1793,21 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools" version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" dependencies = [ - "frame-support-procedural-tools-derive", + "frame-support-procedural-tools-derive 3.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", + "proc-macro-crate", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "frame-support-procedural-tools" +version = "4.0.0-dev" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17#22d40c761a985482f93bbbea5ba4199bdba74f8e" +dependencies = [ + "frame-support-procedural-tools-derive 3.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", "proc-macro-crate", "proc-macro2", "quote", @@ -1406,7 +1817,17 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools-derive" version = "3.0.0" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "frame-support-procedural-tools-derive" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17#22d40c761a985482f93bbbea5ba4199bdba74f8e" dependencies = [ "proc-macro2", "quote", @@ -1416,18 +1837,35 @@ dependencies = [ [[package]] name = "frame-system" version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" dependencies = [ - "frame-support", + "frame-support 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", "log", "parity-scale-codec", "scale-info", "serde", - "sp-core", - "sp-io", - "sp-runtime", - "sp-std 4.0.0", - "sp-version", + "sp-core 4.1.0-dev", + "sp-io 4.0.0", + "sp-runtime 4.1.0-dev", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", + "sp-version 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", +] + +[[package]] +name = "frame-system" +version = "4.0.0-dev" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17#22d40c761a985482f93bbbea5ba4199bdba74f8e" +dependencies = [ + "frame-support 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", + "log", + "parity-scale-codec", + "scale-info", + "serde", + "sp-core 5.0.0", + "sp-io 5.0.0", + "sp-runtime 5.0.0", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", + "sp-version 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", ] [[package]] @@ -1589,6 +2027,23 @@ version = "0.26.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78cc372d058dcf6d5ecd98510e7fbc9e5aec4d21de70f65fea8fecebcd881bd4" +[[package]] +name = "glob" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" + +[[package]] +name = "group" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc5ac374b108929de78460075f3dc439fa66df9d8fc77e8f12caa5165fcf0c89" +dependencies = [ + "ff", + "rand_core 0.6.3", + "subtle", +] + [[package]] name = "gumdrop" version = "0.8.1" @@ -1609,6 +2064,15 @@ dependencies = [ "syn", ] +[[package]] +name = "gzip-header" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0131feb3d3bb2a5a238d8a4d09f6353b7ebfdc52e77bccbf4ea6eaa751dde639" +dependencies = [ + "crc32fast", +] + [[package]] name = "h2" version = "0.3.13" @@ -1628,6 +2092,12 @@ dependencies = [ "tracing", ] +[[package]] +name = "half" +version = "1.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7" + [[package]] name = "hash-db" version = "0.15.2" @@ -1688,6 +2158,15 @@ dependencies = [ "ahash", ] +[[package]] +name = "hdpath" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72adf5a17a0952ecfcddf8d46d071271d5ee52e78443f07ba0b2dcfe3063a132" +dependencies = [ + "byteorder", +] + [[package]] name = "headers" version = "0.3.7" @@ -1811,6 +2290,16 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" +[[package]] +name = "humantime-serde" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57a3db5ea5923d99402c94e9feb261dc5ee9b4efa158b0315f788cf549cc200c" +dependencies = [ + "humantime", + "serde", +] + [[package]] name = "hyper" version = "0.14.18" @@ -1889,7 +2378,7 @@ name = "ibc" version = "0.15.0" dependencies = [ "beefy-generic-client", - "beefy-primitives", + "beefy-primitives 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", "bytes", "derive_more", "env_logger", @@ -1898,7 +2387,7 @@ dependencies = [ "ics23", "modelator 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "num-traits", - "pallet-mmr-primitives", + "pallet-mmr-primitives 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", "parity-scale-codec", "prost", "prost-types", @@ -1907,9 +2396,9 @@ dependencies = [ "serde_derive", "serde_json", "sha2 0.10.2", - "sp-core", - "sp-runtime", - "sp-trie", + "sp-core 5.0.0", + "sp-runtime 5.0.0", + "sp-trie 5.0.0", "subtle-encoding", "tendermint", "tendermint-light-client-verifier", @@ -1945,6 +2434,22 @@ dependencies = [ >>>>>>> e05319ec (initial refactor) ] +[[package]] +name = "ibc-integration-test" +version = "0.14.1" +dependencies = [ + "ibc", + "ibc-proto", + "ibc-relayer", + "ibc-relayer-cli", + "ibc-test-framework", + "serde", + "serde_json", + "tendermint", + "tendermint-rpc", + "time", +] + [[package]] name = "ibc-proto" version = "0.18.0" @@ -1961,6 +2466,7 @@ dependencies = [ [[package]] <<<<<<< HEAD +<<<<<<< HEAD name = "ibc-relayer" version = "0.15.0" dependencies = [ @@ -1976,9 +2482,27 @@ checksum = "9d454cc0a22bd556cc3d3c69f9d75a392a36244634840697a4b9eb81bc5c8ae0" dependencies = [ "anyhow", >>>>>>> e05319ec (initial refactor) +======= +name = "ibc-relayer" +version = "0.14.1" +dependencies = [ + "anyhow", + "async-stream", + "bech32", + "bitcoin", +>>>>>>> 8be3170b (revert changes to cargo.toml) "bytes", + "crossbeam-channel 0.5.4", + "dirs-next", + "env_logger", + "flex-error", + "futures", + "hdpath", "hex", <<<<<<< HEAD +<<<<<<< HEAD +======= +>>>>>>> 8be3170b (revert changes to cargo.toml) "http", "humantime", "humantime-serde", @@ -1991,11 +2515,20 @@ dependencies = [ "nanoid", "num-bigint 0.4.3", "num-rational 0.4.0", +<<<<<<< HEAD ======= >>>>>>> e05319ec (initial refactor) +======= +>>>>>>> 8be3170b (revert changes to cargo.toml) "prost", + "prost-types", + "regex", + "retry", "ripemd160", <<<<<<< HEAD +<<<<<<< HEAD +======= +>>>>>>> 8be3170b (revert changes to cargo.toml) "semver", "serde", "serde_derive", @@ -2018,6 +2551,7 @@ dependencies = [ "toml", "tonic", "tracing", +<<<<<<< HEAD "tracing-subscriber", "uuid 1.1.1", ] @@ -2172,39 +2706,203 @@ name = "impl-serde" version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4551f042f3438e64dbd6226b20527fc84a6e1fe65688b58746a2f53623f25f5c" -dependencies = [ - "serde", ->>>>>>> e05319ec (initial refactor) -] - -[[package]] -name = "impl-trait-for-tuples" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "impl-codec" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "161ebdfec3c8e3b52bf61c4f3550a1eea4f9579d10dc1b936f3171ebdcd6c443" -dependencies = [ - "parity-scale-codec", +======= + "tracing-subscriber 0.3.11", + "uint", ] [[package]] -name = "indexmap" -version = "1.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f647032dfaa1f8b6dc29bd3edb7bbef4861b8b8007ebb118d6db284fd59f6ee" +name = "ibc-relayer-cli" +version = "0.14.1" dependencies = [ - "autocfg", - "hashbrown 0.11.2", + "abscissa_core", + "atty", + "clap", + "clap_complete", + "color-eyre", + "console", + "crossbeam-channel 0.5.4", + "dialoguer", + "dirs-next", + "eyre", + "flex-error", + "futures", + "hex", + "humantime", + "ibc", + "ibc-proto", + "ibc-relayer", + "ibc-relayer-rest", + "ibc-telemetry", + "itertools", + "once_cell", + "oneline-eyre", + "prost", + "prost-types", + "regex", + "serde", + "serde_derive", + "serde_json", + "signal-hook", + "subtle-encoding", + "tendermint", + "tendermint-light-client", + "tendermint-light-client-verifier", + "tendermint-proto", + "tendermint-rpc", + "tokio", + "toml", + "tracing", + "tracing-subscriber 0.3.11", +] + +[[package]] +name = "ibc-relayer-rest" +version = "0.14.1" +>>>>>>> 8be3170b (revert changes to cargo.toml) +dependencies = [ + "crossbeam-channel 0.5.4", + "ibc", + "ibc-relayer", + "rouille", + "serde", +<<<<<<< HEAD +>>>>>>> e05319ec (initial refactor) +======= + "serde_json", + "toml", + "tracing", + "ureq", +>>>>>>> 8be3170b (revert changes to cargo.toml) +] + +[[package]] +name = "ibc-telemetry" +version = "0.14.1" +dependencies = [ + "crossbeam-channel 0.5.4", + "ibc", + "once_cell", + "opentelemetry", + "opentelemetry-prometheus", + "prometheus", + "rouille", +] + +[[package]] +<<<<<<< HEAD +======= +name = "ibc-test-framework" +version = "0.14.1" +dependencies = [ + "color-eyre", + "crossbeam-channel 0.5.4", + "env_logger", + "eyre", + "flex-error", + "hex", + "ibc", + "ibc-proto", + "ibc-relayer", + "ibc-relayer-cli", + "itertools", + "prost", + "prost-types", + "rand 0.8.5", + "semver", + "serde", + "serde_json", + "serde_yaml", + "sha2 0.10.2", + "subtle-encoding", + "tendermint", + "tendermint-rpc", + "tokio", + "toml", + "tracing", + "tracing-subscriber 0.3.11", +] + +[[package]] +name = "ics23" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d454cc0a22bd556cc3d3c69f9d75a392a36244634840697a4b9eb81bc5c8ae0" +dependencies = [ + "anyhow", + "bytes", + "hex", + "prost", + "ripemd160", + "sha2 0.9.9", + "sha3", + "sp-std 3.0.0", +] + +[[package]] +name = "ident_case" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" + +[[package]] +name = "idna" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "418a0a6fab821475f634efe3ccc45c013f742efe03d853e8d3355d5cb850ecf8" +dependencies = [ + "matches", + "unicode-bidi", + "unicode-normalization", +] + +[[package]] +>>>>>>> 8be3170b (revert changes to cargo.toml) +name = "impl-codec" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "161ebdfec3c8e3b52bf61c4f3550a1eea4f9579d10dc1b936f3171ebdcd6c443" +dependencies = [ + "parity-scale-codec", +] + +[[package]] +<<<<<<< HEAD +======= +name = "impl-serde" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4551f042f3438e64dbd6226b20527fc84a6e1fe65688b58746a2f53623f25f5c" +dependencies = [ + "serde", +] + +[[package]] +name = "impl-trait-for-tuples" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "indenter" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce23b50ad8242c51a442f3ff322d56b02f08852c77e4c0b4d3fd684abc89c683" + +[[package]] +>>>>>>> 8be3170b (revert changes to cargo.toml) +name = "indexmap" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f647032dfaa1f8b6dc29bd3edb7bbef4861b8b8007ebb118d6db284fd59f6ee" +dependencies = [ + "autocfg", + "hashbrown 0.11.2", ] [[package]] @@ -2268,6 +2966,19 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "k256" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19c3a5e0a0b8450278feda242592512e09f61c72e018b8cd5c859482802daf2d" +dependencies = [ + "cfg-if 1.0.0", + "ecdsa", + "elliptic-curve", + "sec1", + "sha2 0.9.9", +] + [[package]] name = "keccak" version = "0.1.0" @@ -2394,6 +3105,12 @@ dependencies = [ "libsecp256k1-core", ] +[[package]] +name = "linked-hash-map" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" + [[package]] name = "linregress" version = "0.4.4" @@ -2433,6 +3150,15 @@ dependencies = [ "cfg-if 1.0.0", ] +[[package]] +name = "mach" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b823e83b2affd8f40a9ee8c29dbc56404c1e34cd2710921f2801e2cf29527afa" +dependencies = [ + "libc", +] + [[package]] name = "matchers" version = "0.0.1" @@ -2486,12 +3212,27 @@ dependencies = [ "rawpointer", ] +[[package]] +name = "maybe-uninit" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" + [[package]] name = "memchr" version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" +[[package]] +name = "memoffset" +version = "0.5.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "043175f069eda7b85febe4a74abbaeff828d9f8b448515d3151a14a3542811aa" +dependencies = [ + "autocfg", +] + [[package]] name = "memoffset" version = "0.6.5" @@ -2582,6 +3323,16 @@ version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" +[[package]] +name = "mime_guess" +version = "2.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4192263c238a5f0d0c6bfd21f336a313a4ce1c450542449ca191bb657b4642ef" +dependencies = [ + "mime", + "unicase", +] + [[package]] name = "minimal-lexical" version = "0.2.1" @@ -2665,10 +3416,17 @@ dependencies = [ [[package]] <<<<<<< HEAD +<<<<<<< HEAD name = "moka" version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df72b50274c0988d9f4a6e808e06d9d926f265db6f8bbda1576bcaa658e72763" +======= +name = "moka" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef9d038ced38770a867f2300ef21e1193b5e26d8e8e060fa5c034d1dddc57452" +>>>>>>> 8be3170b (revert changes to cargo.toml) dependencies = [ "crossbeam-channel 0.5.4", "crossbeam-epoch 0.8.2", @@ -2683,8 +3441,33 @@ dependencies = [ "tagptr", "thiserror", "triomphe", +<<<<<<< HEAD "uuid 0.8.2", ======= +======= + "uuid", +] + +[[package]] +name = "multipart" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00dec633863867f29cb39df64a397cdf4a6354708ddd7759f70c7fb51c5f9182" +dependencies = [ + "buf_redux", + "httparse", + "log", + "mime", + "mime_guess", + "quick-error", + "rand 0.8.5", + "safemem", + "tempfile", + "twoway", +] + +[[package]] +>>>>>>> 8be3170b (revert changes to cargo.toml) name = "nalgebra" version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -2716,6 +3499,7 @@ dependencies = [ [[package]] <<<<<<< HEAD +<<<<<<< HEAD name = "nalgebra" version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -2733,6 +3517,17 @@ dependencies = [ "typenum", ] ======= +======= +name = "nanoid" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ffa00dec017b5b1a8b7cf5e2c008bfda1aa7e0697ac1508b491fdf2622fb4d8" +dependencies = [ + "rand 0.8.5", +] + +[[package]] +>>>>>>> 8be3170b (revert changes to cargo.toml) name = "nodrop" version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -2798,10 +3593,22 @@ dependencies = [ ] [[package]] -name = "num-complex" -version = "0.4.1" +name = "num-bigint" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97fbc387afefefd5e9e39493299f3069e14a140dd34dc19b4c1c1a8fddb6a790" +checksum = "f93ab6289c7b344a8a9f60f88d80aa20032336fe78da341afc91c8a2341fc75f" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", + "serde", +] + +[[package]] +name = "num-complex" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97fbc387afefefd5e9e39493299f3069e14a140dd34dc19b4c1c1a8fddb6a790" dependencies = [ "num-traits", ] @@ -2853,11 +3660,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c000134b5dbf44adc5cb772486d335293351644b801551abe8f75c84cfa4aef" dependencies = [ "autocfg", +<<<<<<< HEAD <<<<<<< HEAD "num-bigint 0.2.6", ======= "num-bigint", >>>>>>> e05319ec (initial refactor) +======= + "num-bigint 0.2.6", +>>>>>>> 8be3170b (revert changes to cargo.toml) "num-integer", "num-traits", ] @@ -2869,12 +3680,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d41702bd167c2df5520b384281bc111a4b5efcf7fbc4c9c222c815b07e0a6a6a" dependencies = [ "autocfg", +<<<<<<< HEAD <<<<<<< HEAD "num-bigint 0.4.3", ======= >>>>>>> e05319ec (initial refactor) +======= + "num-bigint 0.4.3", +>>>>>>> 8be3170b (revert changes to cargo.toml) "num-integer", "num-traits", + "serde", ] [[package]] @@ -2927,6 +3743,15 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c" +[[package]] +name = "oneline-eyre" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "862f17a1e689c0ce8ca158ea48e776c5101c5d14fdfbed3e01c15f89604c3097" +dependencies = [ + "eyre", +] + [[package]] name = "opaque-debug" version = "0.2.3" @@ -2945,51 +3770,89 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" +[[package]] +name = "opentelemetry" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6105e89802af13fdf48c49d7646d3b533a70e536d818aae7e78ba0433d01acb8" +dependencies = [ + "async-trait", + "crossbeam-channel 0.5.4", + "dashmap", + "fnv", + "futures-channel", + "futures-executor", + "futures-util", + "js-sys", + "lazy_static", + "percent-encoding", + "pin-project", + "rand 0.8.5", + "thiserror", +] + +[[package]] +name = "opentelemetry-prometheus" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9328977e479cebe12ce0d3fcecdaea4721d234895a9440c5b5dfd113f0594ac6" +dependencies = [ + "opentelemetry", + "prometheus", + "protobuf", +] + [[package]] name = "os_str_bytes" version = "6.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e22443d1643a904602595ba1cd8f7d896afe56d26712531c5ff73a15b2fbf64" +[[package]] +name = "owo-colors" +version = "3.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "decf7381921fea4dcb2549c5667eda59b3ec297ab7e2b5fc33eac69d2e7da87b" + [[package]] name = "pallet-beefy" version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" dependencies = [ - "beefy-primitives", - "frame-support", - "frame-system", + "beefy-primitives 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", + "frame-support 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", + "frame-system 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", "pallet-session", "parity-scale-codec", "scale-info", "serde", - "sp-runtime", - "sp-std 4.0.0", + "sp-runtime 4.1.0-dev", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", ] [[package]] name = "pallet-beefy-mmr" version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" dependencies = [ "beefy-merkle-tree", - "beefy-primitives", - "frame-support", - "frame-system", + "beefy-primitives 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", + "frame-support 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", + "frame-system 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", "hex", "libsecp256k1", "log", "pallet-beefy", "pallet-mmr", - "pallet-mmr-primitives", + "pallet-mmr-primitives 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", "pallet-session", "parity-scale-codec", "scale-info", "serde", - "sp-core", - "sp-io", - "sp-runtime", - "sp-std 4.0.0", + "sp-core 4.1.0-dev", + "sp-io 4.0.0", + "sp-runtime 4.1.0-dev", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", ] [[package]] @@ -3078,72 +3941,88 @@ dependencies = [ [[package]] name = "pallet-mmr" version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" dependencies = [ "ckb-merkle-mountain-range", "frame-benchmarking", - "frame-support", - "frame-system", - "pallet-mmr-primitives", + "frame-support 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", + "frame-system 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", + "pallet-mmr-primitives 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", "parity-scale-codec", "scale-info", - "sp-core", - "sp-io", - "sp-runtime", - "sp-std 4.0.0", + "sp-core 4.1.0-dev", + "sp-io 4.0.0", + "sp-runtime 4.1.0-dev", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", ] [[package]] name = "pallet-mmr-primitives" version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" dependencies = [ - "frame-support", - "frame-system", + "frame-support 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", + "frame-system 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", "log", "parity-scale-codec", "serde", - "sp-api", - "sp-core", - "sp-runtime", - "sp-std 4.0.0", + "sp-api 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", + "sp-core 4.1.0-dev", + "sp-runtime 4.1.0-dev", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", +] + +[[package]] +name = "pallet-mmr-primitives" +version = "4.0.0-dev" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17#22d40c761a985482f93bbbea5ba4199bdba74f8e" +dependencies = [ + "frame-support 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", + "frame-system 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", + "log", + "parity-scale-codec", + "serde", + "sp-api 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", + "sp-core 5.0.0", + "sp-runtime 5.0.0", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", ] [[package]] name = "pallet-session" version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" dependencies = [ - "frame-support", - "frame-system", + "frame-support 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", + "frame-system 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", "impl-trait-for-tuples", "log", "pallet-timestamp", "parity-scale-codec", "scale-info", - "sp-core", - "sp-io", - "sp-runtime", + "sp-core 4.1.0-dev", + "sp-io 4.0.0", + "sp-runtime 4.1.0-dev", "sp-session", - "sp-staking", - "sp-std 4.0.0", - "sp-trie", + "sp-staking 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", + "sp-trie 4.0.0", ] [[package]] name = "pallet-timestamp" version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" dependencies = [ "frame-benchmarking", - "frame-support", - "frame-system", + "frame-support 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", + "frame-system 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", "log", "parity-scale-codec", "scale-info", - "sp-inherents", - "sp-runtime", - "sp-std 4.0.0", + "sp-inherents 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", + "sp-runtime 4.1.0-dev", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", "sp-timestamp", ] @@ -3184,6 +4063,7 @@ dependencies = [ "impl-trait-for-tuples", "parity-util-mem-derive", "parking_lot 0.11.2", +<<<<<<< HEAD "primitive-types", "winapi", ] @@ -3286,6 +4166,8 @@ dependencies = [ "impl-trait-for-tuples", "parity-util-mem-derive", "parking_lot", +======= +>>>>>>> 8be3170b (revert changes to cargo.toml) "primitive-types", "winapi", ] @@ -3315,7 +4197,17 @@ checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" dependencies = [ "instant", "lock_api", - "parking_lot_core", + "parking_lot_core 0.8.5", +] + +[[package]] +name = "parking_lot" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87f5ec2493a61ac0506c0f4199f99070cbe83857b0337006a30f3e6719b8ef58" +dependencies = [ + "lock_api", + "parking_lot_core 0.9.3", ] [[package]] @@ -3332,6 +4224,19 @@ dependencies = [ "winapi", ] +[[package]] +name = "parking_lot_core" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09a279cbf25cb0757810394fbc1e359949b59e348145c643a939a525692e6929" +dependencies = [ + "cfg-if 1.0.0", + "libc", + "redox_syscall", + "smallvec", + "windows-sys", +] + [[package]] name = "paste" version = "1.0.7" @@ -3421,6 +4326,17 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" +[[package]] +name = "pkcs8" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7cabda3fb821068a9a4fab19a683eac3af12edf0f34b94a8be53c4972b8149d0" +dependencies = [ + "der", + "spki", + "zeroize", +] + [[package]] name = "ppv-lite86" version = "0.2.16" @@ -3519,8 +4435,28 @@ dependencies = [ [[package]] <<<<<<< HEAD +<<<<<<< HEAD name = "prometheus" version = "0.13.1" +======= +name = "prometheus" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7f64969ffd5dd8f39bd57a68ac53c163a095ed9d0fb707146da1b27025a3504" +dependencies = [ + "cfg-if 1.0.0", + "fnv", + "lazy_static", + "memchr", + "parking_lot 0.11.2", + "protobuf", + "thiserror", +] + +[[package]] +name = "prost" +version = "0.9.0" +>>>>>>> 8be3170b (revert changes to cargo.toml) source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cface98dfa6d645ea4c789839f176e4b072265d085bfcc48eaa8d137f58d3c39" dependencies = [ @@ -3570,6 +4506,7 @@ dependencies = [ [[package]] <<<<<<< HEAD +<<<<<<< HEAD name = "protobuf" version = "2.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -3613,6 +4550,49 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" >>>>>>> e05319ec (initial refactor) name = "quote" version = "1.0.18" +======= +name = "protobuf" +version = "2.27.1" +>>>>>>> 8be3170b (revert changes to cargo.toml) +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf7e6d18738ecd0902d30d1ad232c9125985a3422929b16c65517b38adc14f96" + +[[package]] +name = "pulldown-cmark" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34f197a544b0c9ab3ae46c359a7ec9cbbb5c7bf97054266fecb7ead794a181d6" +dependencies = [ + "bitflags", + "memchr", + "unicase", +] + +[[package]] +name = "quanta" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20afe714292d5e879d8b12740aa223c6a88f118af41870e8b6196e39a02238a8" +dependencies = [ + "crossbeam-utils 0.8.8", + "libc", + "mach", + "once_cell", + "raw-cpuid", + "wasi 0.10.2+wasi-snapshot-preview1", + "web-sys", + "winapi", +] + +[[package]] +name = "quick-error" +version = "1.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" + +[[package]] +name = "quote" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1feb54ed693b93a84e14094943b84b7c4eae204c512b7ccb95ab0c66d278ad1" dependencies = [ @@ -3728,6 +4708,15 @@ dependencies = [ "rand_core 0.5.1", ] +[[package]] +name = "raw-cpuid" +version = "10.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "738bc47119e3eeccc7e94c4a506901aea5e7b4944ecd0829cbebf4af04ceda12" +dependencies = [ + "bitflags", +] + [[package]] name = "rawpointer" version = "0.2.1" @@ -3752,9 +4741,9 @@ version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "258bcdb5ac6dad48491bb2992db6b7cf74878b0384908af124823d118c99683f" dependencies = [ - "crossbeam-channel", + "crossbeam-channel 0.5.4", "crossbeam-deque", - "crossbeam-utils", + "crossbeam-utils 0.8.8", "num_cpus", ] @@ -3845,6 +4834,23 @@ dependencies = [ "winapi", ] +[[package]] +name = "retry" +version = "1.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac95c60a949a63fd2822f4964939662d8f2c16c4fa0624fd954bc6e703b9a3f6" + +[[package]] +name = "rfc6979" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96ef608575f6392792f9ecf7890c00086591d29a83910939d430753f7c050525" +dependencies = [ + "crypto-bigint", + "hmac 0.11.0", + "zeroize", +] + [[package]] name = "ring" version = "0.16.20" @@ -3871,6 +4877,31 @@ dependencies = [ "opaque-debug 0.3.0", ] +[[package]] +name = "rouille" +version = "3.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "18b2380c42510ef4a28b5f228a174c801e0dec590103e215e60812e2e2f34d05" +dependencies = [ + "base64", + "brotli", + "chrono", + "deflate", + "filetime", + "multipart", + "num_cpus", + "percent-encoding", + "rand 0.8.5", + "serde", + "serde_derive", + "serde_json", + "sha1", + "threadpool", + "time", + "tiny_http", + "url", +] + [[package]] name = "rs_merkle" version = "1.2.0" @@ -3977,6 +5008,7 @@ dependencies = [ [[package]] <<<<<<< HEAD +<<<<<<< HEAD name = "rustls-native-certs" version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -3998,14 +5030,19 @@ dependencies = [ ] [[package]] +======= +>>>>>>> 8be3170b (revert changes to cargo.toml) name = "rustversion" version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f2cc38e8fa666e2de3c4aba7edeb5ffc5246c1c2ed0e3d17e560aeeba736b23f" [[package]] +<<<<<<< HEAD ======= >>>>>>> e05319ec (initial refactor) +======= +>>>>>>> 8be3170b (revert changes to cargo.toml) name = "ryu" version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -4058,6 +5095,12 @@ dependencies = [ "safe-regex-compiler", ] +[[package]] +name = "safemem" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef703b7cb59335eae2eb93ceb664c0eb7ea6bf567079d843e09420219668e072" + [[package]] name = "same-file" version = "1.0.6" @@ -4133,13 +5176,22 @@ dependencies = [ ] [[package]] -name = "schemars" -version = "0.8.10" +name = "scheduled-thread-pool" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1847b767a3d62d95cbf3d8a9f0e421cf57a0d8aa4f411d4b16525afb0284d4ed" +checksum = "dc6f74fd1204073fa02d5d5d68bec8021be4c38690b61264b2fdb48083d0e7d7" dependencies = [ - "dyn-clone", - "schemars_derive", + "parking_lot 0.11.2", +] + +[[package]] +name = "schemars" +version = "0.8.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1847b767a3d62d95cbf3d8a9f0e421cf57a0d8aa4f411d4b16525afb0284d4ed" +dependencies = [ + "dyn-clone", + "schemars_derive", "serde", "serde_json", ] @@ -4202,6 +5254,9 @@ dependencies = [ [[package]] <<<<<<< HEAD +<<<<<<< HEAD +======= +>>>>>>> 8be3170b (revert changes to cargo.toml) name = "sec1" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -4234,13 +5289,17 @@ dependencies = [ ] [[package]] +<<<<<<< HEAD ======= >>>>>>> e05319ec (initial refactor) +======= +>>>>>>> 8be3170b (revert changes to cargo.toml) name = "secrecy" version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9bd1c54ea06cfd2f6b63219704de0b9b4f72dcc2b8fdef820be6cd799780e91e" dependencies = [ + "serde", "zeroize", ] @@ -4272,6 +5331,9 @@ name = "semver" version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8cb243bdfdb5936c8dc3c45762a19d12ab4550cdc753bc247637d4ec35a040fd" +dependencies = [ + "serde", +] [[package]] name = "serde" @@ -4291,6 +5353,16 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_cbor" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2bef2ebfde456fb76bbcf9f59315333decc4fda0b2b44b420243c11e0f5ec1f5" +dependencies = [ + "half", + "serde", +] + [[package]] name = "serde_derive" version = "1.0.137" @@ -4337,6 +5409,9 @@ dependencies = [ [[package]] <<<<<<< HEAD +<<<<<<< HEAD +======= +>>>>>>> 8be3170b (revert changes to cargo.toml) name = "serde_yaml" version = "0.8.24" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -4350,6 +5425,7 @@ dependencies = [ [[package]] name = "serial_test" +<<<<<<< HEAD version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d19dbfb999a147cedbfe82f042eb9555f5b0fa4ef95ee4570b74349103d9c9f4" @@ -4357,14 +5433,28 @@ dependencies = [ "lazy_static", "log", "parking_lot 0.12.0", +======= +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5bcc41d18f7a1d50525d080fd3e953be87c4f9f1a974f3c21798ca00d54ec15" +dependencies = [ + "lazy_static", + "parking_lot 0.11.2", +>>>>>>> 8be3170b (revert changes to cargo.toml) "serial_test_derive", ] [[package]] name = "serial_test_derive" +<<<<<<< HEAD version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb9e2050b2be1d681f8f1c1a528bcfe4e00afa2d8995f713974f5333288659f2" +======= +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2881bccd7d60fb32dfa3d7b3136385312f8ad75e2674aab2852867a09790cae8" +>>>>>>> 8be3170b (revert changes to cargo.toml) dependencies = [ "proc-macro-error", "proc-macro2", @@ -4374,8 +5464,11 @@ dependencies = [ ] [[package]] +<<<<<<< HEAD ======= >>>>>>> e05319ec (initial refactor) +======= +>>>>>>> 8be3170b (revert changes to cargo.toml) name = "sha-1" version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -4399,6 +5492,21 @@ dependencies = [ "digest 0.10.3", ] +[[package]] +name = "sha1" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1da05c97445caa12d05e848c4a4fcbbea29e748ac28f7e80e9b010392063770" +dependencies = [ + "sha1_smol", +] + +[[package]] +name = "sha1_smol" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae1a47186c03a32177042e55dbc5fd5aee900b8e0069a8d70fba96a9375cd012" + [[package]] name = "sha2" version = "0.8.2" @@ -4904,6 +6012,7 @@ version = "4.0.0" source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" [[package]] +<<<<<<< HEAD name = "sp-storage" version = "4.0.0" source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" @@ -4915,10 +6024,35 @@ dependencies = [ "sp-debug-derive", "sp-std 4.0.0", ======= +======= +name = "signal-hook" +version = "0.3.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "647c97df271007dcea485bb74ffdb57f2e683f1306c854f468a0c244badabf2d" +dependencies = [ + "libc", + "signal-hook-registry", +] + +[[package]] +name = "signal-hook-registry" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e51e73328dc4ac0c7ccbda3a494dfa03df1de2f46018127f60c693f2648455b0" +dependencies = [ + "libc", +] + +[[package]] +>>>>>>> 8be3170b (revert changes to cargo.toml) name = "signature" version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "02658e48d89f2bec991f9a78e69cfa4c316f8d6a6c4ec12fae1aeb263d486788" +dependencies = [ + "digest 0.9.0", + "rand_core 0.6.3", +] [[package]] name = "simba" @@ -4963,8 +6097,26 @@ dependencies = [ ] [[package]] +<<<<<<< HEAD ======= >>>>>>> e05319ec (initial refactor) +======= +name = "skeptic" +version = "0.13.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16d23b015676c90a0f01c197bfdc786c20342c73a0afdda9025adb0bc42940a8" +dependencies = [ + "bytecount", + "cargo_metadata", + "error-chain", + "glob", + "pulldown-cmark", + "tempfile", + "walkdir", +] + +[[package]] +>>>>>>> 8be3170b (revert changes to cargo.toml) name = "slab" version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -5013,24 +6165,53 @@ dependencies = [ [[package]] name = "sp-api" version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" dependencies = [ "hash-db", "log", "parity-scale-codec", - "sp-api-proc-macro", - "sp-core", - "sp-runtime", - "sp-state-machine", - "sp-std 4.0.0", - "sp-version", + "sp-api-proc-macro 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", + "sp-core 4.1.0-dev", + "sp-runtime 4.1.0-dev", + "sp-state-machine 0.10.0", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", + "sp-version 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", + "thiserror", +] + +[[package]] +name = "sp-api" +version = "4.0.0-dev" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17#22d40c761a985482f93bbbea5ba4199bdba74f8e" +dependencies = [ + "hash-db", + "log", + "parity-scale-codec", + "sp-api-proc-macro 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", + "sp-core 5.0.0", + "sp-runtime 5.0.0", + "sp-state-machine 0.11.0", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", + "sp-version 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", "thiserror", ] [[package]] name = "sp-api-proc-macro" version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" +dependencies = [ + "blake2-rfc", + "proc-macro-crate", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "sp-api-proc-macro" +version = "4.0.0-dev" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17#22d40c761a985482f93bbbea5ba4199bdba74f8e" dependencies = [ "blake2-rfc", "proc-macro-crate", @@ -5042,35 +6223,63 @@ dependencies = [ [[package]] name = "sp-application-crypto" version = "4.0.0" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" dependencies = [ "parity-scale-codec", "scale-info", "serde", - "sp-core", - "sp-io", - "sp-std 4.0.0", + "sp-core 4.1.0-dev", + "sp-io 4.0.0", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", +] + +[[package]] +name = "sp-application-crypto" +version = "5.0.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17#22d40c761a985482f93bbbea5ba4199bdba74f8e" +dependencies = [ + "parity-scale-codec", + "scale-info", + "serde", + "sp-core 5.0.0", + "sp-io 5.0.0", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", ] [[package]] name = "sp-arithmetic" version = "4.0.0" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" dependencies = [ "integer-sqrt", "num-traits", "parity-scale-codec", "scale-info", "serde", - "sp-debug-derive", - "sp-std 4.0.0", + "sp-debug-derive 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", + "static_assertions", +] + +[[package]] +name = "sp-arithmetic" +version = "4.0.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17#22d40c761a985482f93bbbea5ba4199bdba74f8e" +dependencies = [ + "integer-sqrt", + "num-traits", + "parity-scale-codec", + "scale-info", + "serde", + "sp-debug-derive 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", "static_assertions", ] [[package]] name = "sp-core" version = "4.1.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" dependencies = [ "base58", "bitflags", @@ -5090,7 +6299,7 @@ dependencies = [ "num-traits", "parity-scale-codec", "parity-util-mem", - "parking_lot", + "parking_lot 0.11.2", "primitive-types", "rand 0.7.3", "regex", @@ -5099,12 +6308,60 @@ dependencies = [ "secrecy", "serde", "sha2 0.10.2", - "sp-core-hashing", - "sp-debug-derive", - "sp-externalities", - "sp-runtime-interface", - "sp-std 4.0.0", - "sp-storage", + "sp-core-hashing 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", + "sp-debug-derive 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", + "sp-externalities 0.10.0", + "sp-runtime-interface 4.1.0-dev", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", + "sp-storage 4.0.0", + "ss58-registry", + "substrate-bip39", + "thiserror", + "tiny-bip39", + "tiny-keccak", + "twox-hash", + "wasmi", + "zeroize", +] + +[[package]] +name = "sp-core" +version = "5.0.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17#22d40c761a985482f93bbbea5ba4199bdba74f8e" +dependencies = [ + "base58", + "bitflags", + "blake2-rfc", + "byteorder", + "dyn-clonable", + "ed25519-dalek", + "futures", + "hash-db", + "hash256-std-hasher", + "hex", + "impl-serde", + "lazy_static", + "libsecp256k1", + "log", + "merlin", + "num-traits", + "parity-scale-codec", + "parity-util-mem", + "parking_lot 0.11.2", + "primitive-types", + "rand 0.7.3", + "regex", + "scale-info", + "schnorrkel", + "secrecy", + "serde", + "sha2 0.10.2", + "sp-core-hashing 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", + "sp-debug-derive 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", + "sp-externalities 0.11.0", + "sp-runtime-interface 5.0.0", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", + "sp-storage 5.0.0", "ss58-registry", "substrate-bip39", "thiserror", @@ -5118,12 +6375,25 @@ dependencies = [ [[package]] name = "sp-core-hashing" version = "4.0.0" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" dependencies = [ "blake2-rfc", "byteorder", "sha2 0.10.2", - "sp-std 4.0.0", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", + "tiny-keccak", + "twox-hash", +] + +[[package]] +name = "sp-core-hashing" +version = "4.0.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17#22d40c761a985482f93bbbea5ba4199bdba74f8e" +dependencies = [ + "blake2-rfc", + "byteorder", + "sha2 0.10.2", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", "tiny-keccak", "twox-hash", ] @@ -5131,18 +6401,39 @@ dependencies = [ [[package]] name = "sp-core-hashing-proc-macro" version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" dependencies = [ "proc-macro2", "quote", - "sp-core-hashing", + "sp-core-hashing 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", + "syn", +] + +[[package]] +name = "sp-core-hashing-proc-macro" +version = "4.0.0-dev" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17#22d40c761a985482f93bbbea5ba4199bdba74f8e" +dependencies = [ + "proc-macro2", + "quote", + "sp-core-hashing 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", "syn", ] [[package]] name = "sp-debug-derive" version = "4.0.0" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "sp-debug-derive" +version = "4.0.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17#22d40c761a985482f93bbbea5ba4199bdba74f8e" dependencies = [ "proc-macro2", "quote", @@ -5152,48 +6443,97 @@ dependencies = [ [[package]] name = "sp-externalities" version = "0.10.0" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" dependencies = [ "environmental", "parity-scale-codec", - "sp-std 4.0.0", - "sp-storage", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", + "sp-storage 4.0.0", +] + +[[package]] +name = "sp-externalities" +version = "0.11.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17#22d40c761a985482f93bbbea5ba4199bdba74f8e" +dependencies = [ + "environmental", + "parity-scale-codec", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", + "sp-storage 5.0.0", ] [[package]] name = "sp-inherents" version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" dependencies = [ "async-trait", "impl-trait-for-tuples", "parity-scale-codec", - "sp-core", - "sp-runtime", - "sp-std 4.0.0", + "sp-core 4.1.0-dev", + "sp-runtime 4.1.0-dev", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", + "thiserror", +] + +[[package]] +name = "sp-inherents" +version = "4.0.0-dev" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17#22d40c761a985482f93bbbea5ba4199bdba74f8e" +dependencies = [ + "async-trait", + "impl-trait-for-tuples", + "parity-scale-codec", + "sp-core 5.0.0", + "sp-runtime 5.0.0", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", "thiserror", ] [[package]] name = "sp-io" version = "4.0.0" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" dependencies = [ "futures", "hash-db", "libsecp256k1", "log", "parity-scale-codec", - "parking_lot", - "sp-core", - "sp-externalities", - "sp-keystore", - "sp-runtime-interface", - "sp-state-machine", - "sp-std 4.0.0", - "sp-tracing", - "sp-trie", - "sp-wasm-interface", + "parking_lot 0.11.2", + "sp-core 4.1.0-dev", + "sp-externalities 0.10.0", + "sp-keystore 0.10.0", + "sp-runtime-interface 4.1.0-dev", + "sp-state-machine 0.10.0", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", + "sp-tracing 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", + "sp-trie 4.0.0", + "sp-wasm-interface 4.1.0-dev", + "tracing", + "tracing-core", +] + +[[package]] +name = "sp-io" +version = "5.0.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17#22d40c761a985482f93bbbea5ba4199bdba74f8e" +dependencies = [ + "futures", + "hash-db", + "libsecp256k1", + "log", + "parity-scale-codec", + "parking_lot 0.11.2", + "sp-core 5.0.0", + "sp-externalities 0.11.0", + "sp-keystore 0.11.0", + "sp-runtime-interface 5.0.0", + "sp-state-machine 0.11.0", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", + "sp-tracing 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", + "sp-trie 5.0.0", + "sp-wasm-interface 5.0.0", "tracing", "tracing-core", ] @@ -5201,23 +6541,49 @@ dependencies = [ [[package]] name = "sp-keystore" version = "0.10.0" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" dependencies = [ "async-trait", "derive_more", "futures", "merlin", "parity-scale-codec", - "parking_lot", + "parking_lot 0.11.2", "schnorrkel", - "sp-core", - "sp-externalities", + "sp-core 4.1.0-dev", + "sp-externalities 0.10.0", +] + +[[package]] +name = "sp-keystore" +version = "0.11.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17#22d40c761a985482f93bbbea5ba4199bdba74f8e" +dependencies = [ + "async-trait", + "futures", + "merlin", + "parity-scale-codec", + "parking_lot 0.11.2", + "schnorrkel", + "sp-core 5.0.0", + "sp-externalities 0.11.0", + "thiserror", ] [[package]] name = "sp-panic-handler" version = "4.0.0" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" +dependencies = [ + "backtrace", + "lazy_static", + "regex", +] + +[[package]] +name = "sp-panic-handler" +version = "4.0.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17#22d40c761a985482f93bbbea5ba4199bdba74f8e" dependencies = [ "backtrace", "lazy_static", @@ -5227,7 +6593,7 @@ dependencies = [ [[package]] name = "sp-runtime" version = "4.1.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" dependencies = [ "either", "hash256-std-hasher", @@ -5239,34 +6605,85 @@ dependencies = [ "rand 0.7.3", "scale-info", "serde", - "sp-application-crypto", - "sp-arithmetic", - "sp-core", - "sp-io", - "sp-std 4.0.0", + "sp-application-crypto 4.0.0", + "sp-arithmetic 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", + "sp-core 4.1.0-dev", + "sp-io 4.0.0", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", +] + +[[package]] +name = "sp-runtime" +version = "5.0.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17#22d40c761a985482f93bbbea5ba4199bdba74f8e" +dependencies = [ + "either", + "hash256-std-hasher", + "impl-trait-for-tuples", + "log", + "parity-scale-codec", + "parity-util-mem", + "paste", + "rand 0.7.3", + "scale-info", + "serde", + "sp-application-crypto 5.0.0", + "sp-arithmetic 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", + "sp-core 5.0.0", + "sp-io 5.0.0", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", ] [[package]] name = "sp-runtime-interface" version = "4.1.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" dependencies = [ "impl-trait-for-tuples", "parity-scale-codec", "primitive-types", - "sp-externalities", - "sp-runtime-interface-proc-macro", - "sp-std 4.0.0", - "sp-storage", - "sp-tracing", - "sp-wasm-interface", + "sp-externalities 0.10.0", + "sp-runtime-interface-proc-macro 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", + "sp-storage 4.0.0", + "sp-tracing 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", + "sp-wasm-interface 4.1.0-dev", + "static_assertions", +] + +[[package]] +name = "sp-runtime-interface" +version = "5.0.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17#22d40c761a985482f93bbbea5ba4199bdba74f8e" +dependencies = [ + "impl-trait-for-tuples", + "parity-scale-codec", + "primitive-types", + "sp-externalities 0.11.0", + "sp-runtime-interface-proc-macro 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", + "sp-storage 5.0.0", + "sp-tracing 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", + "sp-wasm-interface 5.0.0", "static_assertions", ] [[package]] name = "sp-runtime-interface-proc-macro" version = "4.0.0" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" +dependencies = [ + "Inflector", + "proc-macro-crate", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "sp-runtime-interface-proc-macro" +version = "4.0.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17#22d40c761a985482f93bbbea5ba4199bdba74f8e" dependencies = [ "Inflector", "proc-macro-crate", @@ -5278,45 +6695,79 @@ dependencies = [ [[package]] name = "sp-session" version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" dependencies = [ "parity-scale-codec", "scale-info", - "sp-api", - "sp-core", - "sp-runtime", - "sp-staking", - "sp-std 4.0.0", + "sp-api 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", + "sp-core 4.1.0-dev", + "sp-runtime 4.1.0-dev", + "sp-staking 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", ] [[package]] name = "sp-staking" version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" dependencies = [ "parity-scale-codec", "scale-info", - "sp-runtime", - "sp-std 4.0.0", + "sp-runtime 4.1.0-dev", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", +] + +[[package]] +name = "sp-staking" +version = "4.0.0-dev" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17#22d40c761a985482f93bbbea5ba4199bdba74f8e" +dependencies = [ + "parity-scale-codec", + "scale-info", + "sp-runtime 5.0.0", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", ] [[package]] name = "sp-state-machine" version = "0.10.0" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" dependencies = [ "hash-db", "log", "num-traits", "parity-scale-codec", - "parking_lot", + "parking_lot 0.11.2", "rand 0.7.3", "smallvec", - "sp-core", - "sp-externalities", - "sp-panic-handler", - "sp-std 4.0.0", - "sp-trie", + "sp-core 4.1.0-dev", + "sp-externalities 0.10.0", + "sp-panic-handler 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", + "sp-trie 4.0.0", + "thiserror", + "tracing", + "trie-db", + "trie-root", +] + +[[package]] +name = "sp-state-machine" +version = "0.11.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17#22d40c761a985482f93bbbea5ba4199bdba74f8e" +dependencies = [ + "hash-db", + "log", + "num-traits", + "parity-scale-codec", + "parking_lot 0.11.2", + "rand 0.7.3", + "smallvec", + "sp-core 5.0.0", + "sp-externalities 0.11.0", + "sp-panic-handler 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", + "sp-trie 5.0.0", "thiserror", "tracing", "trie-db", @@ -5332,44 +6783,74 @@ checksum = "35391ea974fa5ee869cb094d5b437688fbf3d8127d64d1b9fed5822a1ed39b12" [[package]] name = "sp-std" version = "4.0.0" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" + +[[package]] +name = "sp-std" +version = "4.0.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17#22d40c761a985482f93bbbea5ba4199bdba74f8e" [[package]] name = "sp-storage" version = "4.0.0" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" dependencies = [ "impl-serde", "parity-scale-codec", "ref-cast", "serde", - "sp-debug-derive", - "sp-std 4.0.0", + "sp-debug-derive 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", +] + +[[package]] +name = "sp-storage" +version = "5.0.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17#22d40c761a985482f93bbbea5ba4199bdba74f8e" +dependencies = [ + "impl-serde", + "parity-scale-codec", + "ref-cast", + "serde", + "sp-debug-derive 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", ] [[package]] name = "sp-timestamp" version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" dependencies = [ "async-trait", "futures-timer", "log", "parity-scale-codec", - "sp-api", - "sp-inherents", - "sp-runtime", - "sp-std 4.0.0", + "sp-api 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", + "sp-inherents 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", + "sp-runtime 4.1.0-dev", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", "thiserror", ] [[package]] name = "sp-tracing" version = "4.0.0" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" dependencies = [ "parity-scale-codec", - "sp-std 4.0.0", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", + "tracing", + "tracing-core", + "tracing-subscriber 0.2.25", +] + +[[package]] +name = "sp-tracing" +version = "4.0.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17#22d40c761a985482f93bbbea5ba4199bdba74f8e" +dependencies = [ + "parity-scale-codec", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", "tracing", "tracing-core", "tracing-subscriber 0.2.25", @@ -5378,14 +6859,29 @@ dependencies = [ [[package]] name = "sp-trie" version = "4.0.0" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" dependencies = [ "hash-db", "memory-db", "parity-scale-codec", "scale-info", - "sp-core", - "sp-std 4.0.0", + "sp-core 4.1.0-dev", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", + "trie-db", + "trie-root", +] + +[[package]] +name = "sp-trie" +version = "5.0.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17#22d40c761a985482f93bbbea5ba4199bdba74f8e" +dependencies = [ + "hash-db", + "memory-db", + "parity-scale-codec", + "scale-info", + "sp-core 5.0.0", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", "trie-db", "trie-root", ] @@ -5393,24 +6889,52 @@ dependencies = [ [[package]] name = "sp-version" version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" dependencies = [ "impl-serde", "parity-scale-codec", "parity-wasm", "scale-info", "serde", - "sp-core-hashing-proc-macro", - "sp-runtime", - "sp-std 4.0.0", - "sp-version-proc-macro", + "sp-core-hashing-proc-macro 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", + "sp-runtime 4.1.0-dev", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", + "sp-version-proc-macro 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", + "thiserror", +] + +[[package]] +name = "sp-version" +version = "4.0.0-dev" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17#22d40c761a985482f93bbbea5ba4199bdba74f8e" +dependencies = [ + "impl-serde", + "parity-scale-codec", + "parity-wasm", + "scale-info", + "serde", + "sp-core-hashing-proc-macro 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", + "sp-runtime 5.0.0", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", + "sp-version-proc-macro 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", "thiserror", ] [[package]] name = "sp-version-proc-macro" version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" +dependencies = [ + "parity-scale-codec", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "sp-version-proc-macro" +version = "4.0.0-dev" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17#22d40c761a985482f93bbbea5ba4199bdba74f8e" dependencies = [ "parity-scale-codec", "proc-macro2", @@ -5422,12 +6946,24 @@ dependencies = [ >>>>>>> e05319ec (initial refactor) name = "sp-wasm-interface" version = "4.1.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" dependencies = [ "impl-trait-for-tuples", "log", "parity-scale-codec", - "sp-std 4.0.0", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", + "wasmi", +] + +[[package]] +name = "sp-wasm-interface" +version = "5.0.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17#22d40c761a985482f93bbbea5ba4199bdba74f8e" +dependencies = [ + "impl-trait-for-tuples", + "log", + "parity-scale-codec", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", "wasmi", ] @@ -5437,6 +6973,16 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" +[[package]] +name = "spki" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44d01ac02a6ccf3e07db148d2be087da624fea0221a16152ed01f0496a6b0a27" +dependencies = [ + "base64ct", + "der", +] + [[package]] name = "ss58-registry" version = "1.17.0" @@ -5534,6 +7080,12 @@ dependencies = [ "unicode-xid", ] +[[package]] +name = "tagptr" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b2093cf4c8eb1e67749a6762251bc9cd836b6fc171623bd0a9d324d37af2417" + [[package]] name = "tap" version = "1.0.1" @@ -5572,10 +7124,12 @@ dependencies = [ "ed25519-dalek", "flex-error", "futures", + "k256", "num-traits", "once_cell", "prost", "prost-types", + "ripemd160", "serde", "serde_bytes", "serde_json", @@ -5605,10 +7159,17 @@ dependencies = [ [[package]] <<<<<<< HEAD +<<<<<<< HEAD name = "tendermint-light-client" version = "0.23.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7e30a20da1169069a8629b9535bcee669be8b07480c696b5eb2f7d9cd4e4c431" +======= +name = "tendermint-light-client" +version = "0.23.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "571098c7da376550f19554227e16e00e91b8f5b99438615340e4679343ff18d1" +>>>>>>> 8be3170b (revert changes to cargo.toml) dependencies = [ "contracts", "crossbeam-channel 0.4.4", @@ -5622,13 +7183,20 @@ dependencies = [ "tendermint", "tendermint-light-client-verifier", "tendermint-rpc", +<<<<<<< HEAD "time 0.3.9", +======= + "time", +>>>>>>> 8be3170b (revert changes to cargo.toml) "tokio", ] [[package]] +<<<<<<< HEAD ======= >>>>>>> e05319ec (initial refactor) +======= +>>>>>>> 8be3170b (revert changes to cargo.toml) name = "tendermint-light-client-verifier" version = "0.23.7" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -5718,6 +7286,16 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "terminal_size" +version = "0.1.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "633c1a546cee861a1a6d0dc69ebeca693bf4296661ba7852b9d21d159e0506df" +dependencies = [ + "libc", + "winapi", +] + [[package]] name = "test-log" version = "0.2.10" @@ -5764,6 +7342,15 @@ dependencies = [ "once_cell", ] +[[package]] +name = "threadpool" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d050e60b33d41c19108b32cea32164033a9013fe3b46cbd4457559bfbf77afaa" +dependencies = [ + "num_cpus", +] + [[package]] name = "time" version = "0.1.43" @@ -5819,6 +7406,19 @@ dependencies = [ "crunchy", ] +[[package]] +name = "tiny_http" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ce51b50006056f590c9b7c3808c3bd70f0d1101666629713866c227d6e58d39" +dependencies = [ + "ascii", + "chrono", + "chunked_transfer", + "log", + "url", +] + [[package]] name = "tinyvec" version = "1.6.0" @@ -5846,7 +7446,9 @@ dependencies = [ "mio", "num_cpus", "once_cell", + "parking_lot 0.12.0", "pin-project-lite", + "signal-hook-registry", "socket2", "tokio-macros", "winapi", @@ -5951,6 +7553,7 @@ dependencies = [ "pin-project", "prost", "prost-derive", +<<<<<<< HEAD <<<<<<< HEAD "rustls-native-certs 0.6.2", "rustls-pemfile", @@ -5959,6 +7562,11 @@ dependencies = [ ======= "tokio", >>>>>>> e05319ec (initial refactor) +======= + "rustls-native-certs", + "tokio", + "tokio-rustls", +>>>>>>> 8be3170b (revert changes to cargo.toml) "tokio-stream", "tokio-util", "tower", @@ -6055,6 +7663,7 @@ dependencies = [ [[package]] <<<<<<< HEAD +<<<<<<< HEAD name = "tracing-error" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -6069,6 +7678,20 @@ dependencies = [ >>>>>>> e05319ec (initial refactor) name = "tracing-futures" version = "0.2.5" +======= +name = "tracing-error" +version = "0.2.0" +>>>>>>> 8be3170b (revert changes to cargo.toml) +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d686ec1c0f384b1277f097b2f279a2ecc11afe8c133c1aabf036a27cb4cd206e" +dependencies = [ + "tracing", + "tracing-subscriber 0.3.11", +] + +[[package]] +name = "tracing-futures" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" dependencies = [ @@ -6178,6 +7801,15 @@ dependencies = [ "hash-db", ] +[[package]] +name = "triomphe" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c45e322b26410d7260e00f64234810c2f17d7ece356182af4df8f7ff07890f09" +dependencies = [ + "memoffset 0.6.5", +] + [[package]] name = "try-lock" version = "0.2.3" @@ -6209,6 +7841,15 @@ dependencies = [ "utf-8", ] +[[package]] +name = "twoway" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59b11b2b5241ba34be09c3cc85a36e56e48f9888862e19cedf23336d35316ed1" +dependencies = [ + "memchr", +] + [[package]] name = "twox-hash" version = "1.6.3" @@ -6249,6 +7890,15 @@ dependencies = [ "static_assertions", ] +[[package]] +name = "unicase" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" +dependencies = [ + "version_check", +] + [[package]] name = "unicode-bidi" version = "0.3.8" @@ -6270,6 +7920,12 @@ dependencies = [ "tinyvec", ] +[[package]] +name = "unicode-width" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973" + [[package]] name = "unicode-xid" version = "0.2.3" @@ -6322,6 +7978,9 @@ name = "uuid" version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" +dependencies = [ + "getrandom 0.2.6", +] [[package]] name = "uuid" @@ -6344,6 +8003,15 @@ version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" +[[package]] +name = "wait-timeout" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f200f5b12eb75f8c1ed65abd4b2db8a6e1b138a20de009dacee265a2498f3f6" +dependencies = [ + "libc", +] + [[package]] name = "walkdir" version = "2.3.2" @@ -6542,6 +8210,9 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] <<<<<<< HEAD +<<<<<<< HEAD +======= +>>>>>>> 8be3170b (revert changes to cargo.toml) name = "windows-sys" version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -6585,6 +8256,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680" [[package]] +<<<<<<< HEAD name = "wyz" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -6594,12 +8266,23 @@ checksum = "85e60b0d1b5f99db2556934e21937020776a5d31520bf169e851ac44e6420214" name = "yaml-rust" version = "0.4.5" ======= +======= +>>>>>>> 8be3170b (revert changes to cargo.toml) name = "wyz" version = "0.2.0" >>>>>>> e05319ec (initial refactor) source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85e60b0d1b5f99db2556934e21937020776a5d31520bf169e851ac44e6420214" +[[package]] +name = "yaml-rust" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85" +dependencies = [ + "linked-hash-map", +] + [[package]] name = "zeroize" version = "1.5.5" @@ -6620,6 +8303,7 @@ dependencies = [ "syn", "synstructure", ] +<<<<<<< HEAD <<<<<<< HEAD [[package]] @@ -7526,3 +9210,5 @@ name = "try-runtime-cli" version = "0.10.0-dev" source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" >>>>>>> e05319ec (initial refactor) +======= +>>>>>>> 8be3170b (revert changes to cargo.toml) diff --git a/Cargo.toml b/Cargo.toml index 1676ee2110..eaf4e09dfc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -4,7 +4,13 @@ resolver = "2" members = [ "modules", - "proto" + "relayer", + "relayer-cli", + "relayer-rest", + "telemetry", + "proto", + "tools/integration-test", + "tools/test-framework", ] exclude = [ @@ -19,225 +25,3 @@ exclude = [ # tendermint-light-client = { git = "https://github.com/informalsystems/tendermint-rs", branch = "v0.23.x" } # tendermint-light-client-verifier = { git = "https://github.com/informalsystems/tendermint-rs", branch = "v0.23.x" } # tendermint-testgen = { git = "https://github.com/informalsystems/tendermint-rs", branch = "v0.23.x" } - - -[patch."https://github.com/paritytech/substrate"] -node-template ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -frame-benchmarking ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -frame-support ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -frame-support-procedural ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -frame-support-procedural-tools ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -frame-support-procedural-tools-derive ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sp-arithmetic ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sp-debug-derive ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sp-std ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sp-core ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sp-core-hashing ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sp-externalities ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sp-storage ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sp-runtime-interface ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sp-runtime-interface-proc-macro ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sp-tracing ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sp-wasm-interface ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sp-io ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sp-keystore ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sp-state-machine ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sp-panic-handler ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sp-trie ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sp-runtime ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sp-application-crypto ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sp-api ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sp-api-proc-macro ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sp-version ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sp-core-hashing-proc-macro ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sp-version-proc-macro ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sp-test-primitives ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -substrate-test-runtime-client ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sc-block-builder ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sc-client-api ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -substrate-prometheus-endpoint ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sc-executor ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sc-executor-common ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sc-allocator ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sp-maybe-compressed-blob ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sp-serializer ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sc-executor-wasmi ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sc-executor-wasmtime ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sc-runtime-test ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sp-sandbox ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sp-tasks ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -substrate-wasm-builder ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sc-tracing ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sc-rpc-server ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sc-tracing-proc-macro ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sp-blockchain ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sp-consensus ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sp-inherents ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sp-database ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sp-rpc ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -substrate-test-runtime ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -frame-system ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -frame-system-rpc-runtime-api ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -pallet-babe ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -pallet-authorship ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sp-authorship ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -pallet-session ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -pallet-timestamp ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sp-timestamp ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sp-session ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sp-staking ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sp-consensus-babe ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sp-consensus-slots ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sp-consensus-vrf ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -frame-election-provider-support ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sp-npos-elections ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sp-npos-elections-solution-type ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -substrate-test-utils ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -substrate-test-utils-derive ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sc-service ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sc-chain-spec ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sc-chain-spec-derive ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sc-network ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -fork-tree ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sc-consensus ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sc-utils ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sc-peerset ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sp-finality-grandpa ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sc-telemetry ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sc-client-db ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sc-state-db ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sc-informant ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sc-transaction-pool-api ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sc-keystore ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sc-offchain ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sp-offchain ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sc-transaction-pool ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sp-transaction-pool ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -substrate-test-runtime-transaction-pool ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sc-rpc ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sc-rpc-api ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sp-block-builder ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sp-transaction-storage-proof ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -pallet-balances ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -pallet-transaction-payment ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -pallet-offences ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -pallet-staking ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -pallet-bags-list ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -pallet-staking-reward-curve ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sp-consensus-aura ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sp-keyring ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -substrate-test-client ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sp-runtime-interface-test-wasm ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -frame-benchmarking-cli ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sc-cli ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -node-template-runtime ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -frame-executive ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -frame-system-benchmarking ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -pallet-aura ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -pallet-grandpa ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -pallet-randomness-collective-flip ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -pallet-sudo ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -pallet-template ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -pallet-transaction-payment-rpc-runtime-api ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -pallet-transaction-payment-rpc ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sc-basic-authorship ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sc-proposer-metrics ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sc-consensus-aura ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sc-consensus-slots ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sc-network-test ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sc-finality-grandpa ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sc-network-gossip ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -substrate-frame-rpc-system ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -substrate-build-script-utils ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -node-bench ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -node-primitives ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -node-runtime ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -frame-try-runtime ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -pallet-asset-tx-payment ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -pallet-assets ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -pallet-authority-discovery ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sp-authority-discovery ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -pallet-bounties ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -pallet-treasury ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -pallet-child-bounties ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -pallet-collective ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -pallet-contracts ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -pallet-contracts-primitives ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -pallet-contracts-proc-macro ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -pallet-utility ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -pallet-contracts-rpc-runtime-api ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -pallet-democracy ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -pallet-scheduler ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -pallet-preimage ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -pallet-election-provider-multi-phase ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -pallet-elections-phragmen ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -pallet-gilt ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -pallet-identity ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -pallet-im-online ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -pallet-indices ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -pallet-lottery ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -frame-support-test ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -frame-support-test-pallet ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -pallet-membership ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -pallet-mmr ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -pallet-mmr-primitives ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -pallet-multisig ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -pallet-offences-benchmarking ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -pallet-proxy ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -pallet-recovery ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -pallet-session-benchmarking ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -pallet-society ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -pallet-tips ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -pallet-transaction-storage ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -pallet-uniques ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -pallet-vesting ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -node-testing ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -node-executor ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -node-cli ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -node-inspect ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -node-rpc ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -pallet-contracts-rpc ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -pallet-mmr-rpc ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sc-consensus-babe ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sc-consensus-epochs ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sc-consensus-babe-rpc ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sc-finality-grandpa-rpc ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sc-sync-state-rpc ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sc-authority-discovery ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sc-consensus-uncles ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -try-runtime-cli ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -remote-externalities ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sc-service-test ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -substrate-frame-cli ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -chain-spec-builder ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -subkey ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -beefy-gadget ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -beefy-primitives ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -beefy-gadget-rpc ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sc-consensus-manual-seal ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sc-consensus-pow ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sp-consensus-pow ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -pallet-atomic-swap ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -pallet-beefy ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -pallet-beefy-mmr ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -beefy-merkle-tree ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -pallet-example-basic ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -pallet-example-offchain-worker ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -pallet-example-parallel ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -pallet-nicks ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -pallet-node-authorization ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -pallet-scored-pool ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -pallet-staking-reward-fn ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -frame-support-test-compile-pass ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -pallet-bags-list-remote-tests ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -pallet-bags-list-fuzzer ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sp-api-test ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sp-application-crypto-test ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sp-arithmetic-fuzzer ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sp-npos-elections-fuzzer ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sp-runtime-interface-test ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -sp-runtime-interface-test-wasm-deprecated ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -substrate-test-utils-test-crate ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -substrate-frame-rpc-support ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -generate-bags ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } -node-runtime-generate-bags ={git = "https://github.com/ComposableFi/substrate", branch = "mmr-polkadot-v0.9.16" } From ef8b9c47c8766b7643ac42f95b7a77233a092fd4 Mon Sep 17 00:00:00 2001 From: David Salami Date: Thu, 12 May 2022 11:05:11 +0100 Subject: [PATCH 10/96] minor fix --- modules/src/clients/ics11_beefy/client_state.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/src/clients/ics11_beefy/client_state.rs b/modules/src/clients/ics11_beefy/client_state.rs index 5676500ea4..11dc2e6ae9 100644 --- a/modules/src/clients/ics11_beefy/client_state.rs +++ b/modules/src/clients/ics11_beefy/client_state.rs @@ -105,7 +105,7 @@ impl ClientState { let (mmr_root_hash, latest_beefy_height, next_authority_set) = if let Some(mmr_update) = header.mmr_update_proof { if mmr_update.signed_commitment.commitment.validator_set_id - != self.next_authority_set.id + == self.next_authority_set.id { authority_changed = true; } From ee6906991a9c483d692139e2556c44365549dfb1 Mon Sep 17 00:00:00 2001 From: David Salami Date: Fri, 13 May 2022 15:26:20 +0100 Subject: [PATCH 11/96] seperate crypto functions into a trait --- Cargo.lock | 7219 ++++------------- modules/Cargo.toml | 14 +- .../relay_application_logic/send_transfer.rs | 51 + modules/src/clients/crypto_ops/crypto.rs | 22 + modules/src/clients/crypto_ops/mod.rs | 1 + .../clients/ics07_tendermint/client_def.rs | 28 +- modules/src/clients/ics11_beefy/client_def.rs | 130 +- .../src/clients/ics11_beefy/client_state.rs | 19 + .../clients/ics11_beefy/consensus_state.rs | 11 + modules/src/clients/ics11_beefy/header.rs | 12 +- modules/src/clients/mod.rs | 1 + modules/src/core/ics02_client/client_def.rs | 61 +- modules/src/core/ics02_client/handler.rs | 18 +- .../ics02_client/handler/create_client.rs | 4 +- .../ics02_client/handler/update_client.rs | 11 +- .../ics02_client/handler/upgrade_client.rs | 10 +- .../core/ics02_client/msgs/create_client.rs | 2 +- modules/src/core/ics03_connection/context.rs | 18 - modules/src/core/ics03_connection/handler.rs | 17 +- .../ics03_connection/handler/conn_open_ack.rs | 14 +- .../handler/conn_open_confirm.rs | 14 +- .../handler/conn_open_init.rs | 11 +- .../ics03_connection/handler/conn_open_try.rs | 14 +- .../core/ics03_connection/handler/verify.rs | 67 +- modules/src/core/ics04_channel/context.rs | 33 - modules/src/core/ics04_channel/handler.rs | 33 +- .../ics04_channel/handler/acknowledgement.rs | 15 +- .../handler/chan_close_confirm.rs | 16 +- .../ics04_channel/handler/chan_close_init.rs | 9 +- .../ics04_channel/handler/chan_open_ack.rs | 14 +- .../handler/chan_open_confirm.rs | 17 +- .../ics04_channel/handler/chan_open_init.rs | 9 +- .../ics04_channel/handler/chan_open_try.rs | 14 +- .../core/ics04_channel/handler/recv_packet.rs | 15 +- .../core/ics04_channel/handler/send_packet.rs | 21 +- .../src/core/ics04_channel/handler/timeout.rs | 21 +- .../ics04_channel/handler/timeout_on_close.rs | 21 +- .../src/core/ics04_channel/handler/verify.rs | 74 +- .../handler/write_acknowledgement.rs | 6 +- modules/src/core/ics05_port/error.rs | 2 +- modules/src/core/ics26_routing/context.rs | 5 + modules/src/core/ics26_routing/handler.rs | 20 +- modules/src/mock/context.rs | 120 +- 43 files changed, 2314 insertions(+), 5920 deletions(-) create mode 100644 modules/src/applications/ics20_fungible_token_transfer/relay_application_logic/send_transfer.rs create mode 100644 modules/src/clients/crypto_ops/crypto.rs create mode 100644 modules/src/clients/crypto_ops/mod.rs diff --git a/Cargo.lock b/Cargo.lock index fe7c4a7471..c79c1573a4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12,40 +12,6 @@ dependencies = [ "regex", ] -[[package]] -name = "abscissa_core" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe438c63458706e03479442743baae6c88256498e6431708f6dfc520a26515d3" -dependencies = [ - "lazy_static", - "regex", - "secrecy", - "semver", - "serde", - "termcolor", - "toml", - "tracing", - "tracing-log", - "tracing-subscriber 0.3.10", - "wait-timeout", -] - -[[package]] -name = "abscissa_derive" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a3473aa652e90865a06b723102aaa4a54a7d9f2092dbf4582497a61d0537d3f" -dependencies = [ - "ident_case", - "proc-macro2", - "quote", - "syn", - "synstructure", -======= ->>>>>>> 8be3170b (revert changes to cargo.toml) -] - [[package]] name = "addr2line" version = "0.17.0" @@ -61,12 +27,6 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" -[[package]] -name = "adler32" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aae1277d39aeec15cb388266ecc24b11c80469deae6067e17a1a7aa9e5c1f234" - [[package]] name = "ahash" version = "0.7.6" @@ -78,17 +38,6 @@ dependencies = [ "version_check", ] -[[package]] -name = "ahash" -version = "0.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" -dependencies = [ - "getrandom 0.2.5", - "once_cell", - "version_check", -] - [[package]] name = "aho-corasick" version = "0.7.18" @@ -98,21 +47,6 @@ dependencies = [ "memchr", ] -[[package]] -name = "alloc-no-stdlib" -version = "2.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35ef4730490ad1c4eae5c4325b2a95f521d023e5c885853ff7aca0a6a1631db3" - -[[package]] -name = "alloc-stdlib" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "697ed7edc0f1711de49ce108c541623a0af97c6c60b2f6e2b65229847ac843c2" -dependencies = [ - "alloc-no-stdlib", -] - [[package]] name = "ansi_term" version = "0.12.1" @@ -138,35 +72,12 @@ dependencies = [ ] [[package]] -<<<<<<< HEAD -<<<<<<< HEAD -name = "arc-swap" -version = "1.5.0" -======= -name = "arrayref" -version = "0.3.6" ->>>>>>> e05319ec (initial refactor) -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" - -[[package]] -<<<<<<< HEAD -======= -name = "arc-swap" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5d78ce20460b82d3fa150275ed9d55e21064fc7951177baacf86a145c4a4b1f" - -[[package]] ->>>>>>> 8be3170b (revert changes to cargo.toml) name = "arrayref" version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" [[package]] -======= ->>>>>>> e05319ec (initial refactor) name = "arrayvec" version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -184,24 +95,9 @@ checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" [[package]] name = "arrayvec" version = "0.7.2" -<<<<<<< HEAD -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6" - -[[package]] -name = "ascii" -version = "1.0.0" -======= ->>>>>>> e05319ec (initial refactor) source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6" -[[package]] -name = "ascii" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbf56136a5198c7b01a49e3afcbef6cf84597273d298f54432926024107b0109" - [[package]] name = "async-stream" version = "0.3.3" @@ -225,9 +121,9 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.56" +version = "0.1.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96cf8829f67d2eab0b2dfa42c5d0ef737e0724e4a82b01b3e292456202b19716" +checksum = "ed6aa3524a2dfcf9fe180c51eae2b58738348d819517ceadf95789c51fff7600" dependencies = [ "proc-macro2", "quote", @@ -281,7 +177,7 @@ dependencies = [ "http", "http-body", "hyper", - "itoa", + "itoa 1.0.2", "matchit", "memchr", "mime", @@ -337,12 +233,6 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6107fe1be6682a68940da878d9e9f5e90ca5745b3dec9fd1bb393c8777d4f581" -[[package]] -name = "base58" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6107fe1be6682a68940da878d9e9f5e90ca5745b3dec9fd1bb393c8777d4f581" - [[package]] name = "base64" version = "0.13.0" @@ -350,86 +240,57 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" [[package]] -name = "base64ct" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dea908e7347a8c64e378c17e30ef880ad73e3b4498346b055c2c00ea342f3179" - -[[package]] -name = "bech32" -version = "0.8.1" +name = "beef" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf9ff0bbfd639f15c74af777d81383cf53efb7c93613f6cab67c6c11e05bbf8b" +checksum = "bed554bd50246729a1ec158d08aa3235d1b69d94ad120ebe187e28894787e736" +dependencies = [ + "serde", +] [[package]] name = "beefy-generic-client" version = "0.1.0" -source = "git+https://github.com/ComposableFi/beefy-client?branch=master#9a79987cdb3e1b9f90d4151325521608e4ebb506" +source = "git+https://github.com/ComposableFi/beefy-client?branch=master#c8ef22e646f9eb33d66ac83e6fdad808b76aa4ed" dependencies = [ - "beefy-primitives 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", + "beefy-primitives", + "ckb-merkle-mountain-range", + "derive_more", + "frame-support", + "hex-literal", "pallet-beefy-mmr", "pallet-mmr", - "pallet-mmr-primitives 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", + "pallet-mmr-rpc", "parity-scale-codec", "rs_merkle", - "sp-core 4.1.0-dev", - "sp-core-hashing 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", - "sp-runtime 4.1.0-dev", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", + "serde_json", + "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-core-hashing 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-io 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-mmr-primitives", + "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-trie 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "subxt", ] [[package]] name = "beefy-merkle-tree" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" - -[[package]] -name = "beefy-primitives" -version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" -dependencies = [ - "parity-scale-codec", - "scale-info", - "sp-api 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", - "sp-application-crypto 4.0.0", - "sp-core 4.1.0-dev", - "sp-runtime 4.1.0-dev", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", -] +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" [[package]] name = "beefy-primitives" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17#22d40c761a985482f93bbbea5ba4199bdba74f8e" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" dependencies = [ "parity-scale-codec", "scale-info", - "sp-api 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", - "sp-application-crypto 5.0.0", - "sp-core 5.0.0", - "sp-runtime 5.0.0", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", -] - -[[package]] -name = "bitcoin" -version = "0.28.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05bba324e6baf655b882df672453dbbc527bc938cadd27750ae510aaccc3a66a" -dependencies = [ - "bech32", - "bitcoin_hashes", - "secp256k1", - "serde", -] - -[[package]] -name = "bitcoin_hashes" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "006cc91e1a1d99819bc5b8214be3555c1f0611b169f527a1fdc54ed1f2b745b0" -dependencies = [ - "serde", + "sp-api", + "sp-application-crypto 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", ] [[package]] @@ -439,67 +300,24 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] -<<<<<<< HEAD -name = "bech32" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5738be7561b0eeb501ef1d5c5db3f24e01ceb55fededd9b00039aada34966ad" - -[[package]] -name = "bitcoin" -version = "0.28.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05bba324e6baf655b882df672453dbbc527bc938cadd27750ae510aaccc3a66a" -dependencies = [ - "bech32 0.8.1", - "bitcoin_hashes", - "secp256k1", - "serde", -======= name = "bitvec" -version = "0.20.4" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7774144344a4faa177370406a7ff5f1da24303817368584c6206c8303eb07848" +checksum = "1489fcb93a5bb47da0462ca93ad252ad6af2145cce58d10d46a83931ba9f016b" dependencies = [ "funty", "radium", "tap", "wyz", ->>>>>>> e05319ec (initial refactor) -] - -[[package]] -name = "blake2-rfc" -version = "0.2.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d6d530bdd2d52966a6d03b7a964add7ae1a288d25214066fd4b600f0f796400" -dependencies = [ - "arrayvec 0.4.12", - "constant_time_eq", ] [[package]] -name = "block-buffer" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" -dependencies = [ - "block-padding 0.1.5", - "byte-tools", - "byteorder", - "generic-array 0.12.4", -] - -[[package]] -name = "bitvec" -version = "0.20.4" +name = "blake2" +version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7774144344a4faa177370406a7ff5f1da24303817368584c6206c8303eb07848" +checksum = "b9cf849ee05b2ee5fba5e36f97ff8ec2533916700fc0758d40d92136a42f3388" dependencies = [ - "funty", - "radium", - "tap", - "wyz", + "digest 0.10.3", ] [[package]] @@ -518,7 +336,7 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" dependencies = [ - "block-padding 0.1.5", + "block-padding", "byte-tools", "byteorder", "generic-array 0.12.4", @@ -530,7 +348,6 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" dependencies = [ - "block-padding 0.2.1", "generic-array 0.14.5", ] @@ -541,7 +358,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0bf7fe51849ea569fd452f37822f606a5cabb684dc918707a0193fd4664ff324" dependencies = [ "generic-array 0.14.5", -<<<<<<< HEAD ] [[package]] @@ -551,54 +367,60 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fa79dedbb091f449f1f39e53edf88d5dbe95f895dae6135a8d7b881fb5af73f5" dependencies = [ "byte-tools", -======= ->>>>>>> e05319ec (initial refactor) ] [[package]] -name = "block-padding" -version = "0.1.5" +name = "borsh" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa79dedbb091f449f1f39e53edf88d5dbe95f895dae6135a8d7b881fb5af73f5" +checksum = "15bf3650200d8bffa99015595e10f1fbd17de07abbc25bb067da79e769939bfa" dependencies = [ - "byte-tools", + "borsh-derive", + "hashbrown 0.11.2", ] [[package]] -name = "block-padding" -version = "0.2.1" +name = "borsh-derive" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" +checksum = "6441c552f230375d18e3cc377677914d2ca2b0d36e52129fe15450a2dce46775" +dependencies = [ + "borsh-derive-internal", + "borsh-schema-derive-internal", + "proc-macro-crate 0.1.5", + "proc-macro2", + "syn", +] [[package]] -name = "brotli" -version = "3.3.4" +name = "borsh-derive-internal" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1a0b1dbcc8ae29329621f8d4f0d835787c1c38bb1401979b49d13b0b305ff68" +checksum = "5449c28a7b352f2d1e592a8a28bf139bc71afb0764a14f3c02500935d8c44065" dependencies = [ - "alloc-no-stdlib", - "alloc-stdlib", - "brotli-decompressor", + "proc-macro2", + "quote", + "syn", ] [[package]] -name = "brotli-decompressor" -version = "2.3.2" +name = "borsh-schema-derive-internal" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59ad2d4653bf5ca36ae797b1f4bb4dbddb60ce49ca4aed8a2ce4829f60425b80" +checksum = "cdbd5696d8bfa21d53d9fe39a714a18538bad11492a42d066dbbc395fb1951c0" dependencies = [ - "alloc-no-stdlib", - "alloc-stdlib", + "proc-macro2", + "quote", + "syn", ] [[package]] -name = "buf_redux" -version = "0.8.4" +name = "bstr" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b953a6887648bb07a535631f2bc00fbdb2a2216f135552cb3f534ed136b9c07f" +checksum = "ba3569f383e8f1598449f1a423e72e99569137b47740b1da11ef19af3d5c3223" dependencies = [ "memchr", - "safemem", ] [[package]] @@ -616,24 +438,9 @@ checksum = "87c5fdd0166095e1d463fc6cc01aa8ce547ad77a4e84d42eb6762b084e28067e" [[package]] name = "byte-tools" version = "0.3.1" -<<<<<<< HEAD source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" -[[package]] -name = "bytecount" -version = "0.6.2" -======= ->>>>>>> e05319ec (initial refactor) -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" - -[[package]] -name = "bytecount" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72feb31ffc86498dacdbd0fcebb56138e7177a8cc5cea4516031d15ae85a742e" - [[package]] name = "byteorder" version = "1.4.3" @@ -647,77 +454,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4872d67bab6358e59559027aa3b9157c53d9358c51423c17554809a8858e0f8" [[package]] -<<<<<<< HEAD -<<<<<<< HEAD -name = "bzip2" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6afcd980b5f3a45017c57e57a2fcccbb351cc43a356ce117ef760ef8052b89b0" -dependencies = [ - "bzip2-sys", - "libc", -] - -[[package]] -name = "bzip2-sys" -version = "0.1.11+1.0.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "736a955f3fa7875102d57c82b8cac37ec45224a07fd32d58f9f7a186b6cd4cdc" -dependencies = [ - "cc", - "libc", - "pkg-config", -] - -[[package]] -name = "camino" -version = "1.0.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "869119e97797867fd90f5e22af7d0bd274bd4635ebb9eb68c04f3f513ae6c412" -======= -name = "camino" -version = "1.0.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07fd178c5af4d59e83498ef15cf3f154e1a6f9d091270cb86283c65ef44e9ef0" ->>>>>>> 8be3170b (revert changes to cargo.toml) -dependencies = [ - "serde", -] - -[[package]] -name = "canonical-path" -version = "2.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6e9e01327e6c86e92ec72b1c798d4a94810f147209bbe3ffab6a86954937a6f" - -[[package]] -name = "cargo-platform" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbdb825da8a5df079a43676dbe042702f1707b1109f713a01420fbb4cc71fa27" -dependencies = [ - "serde", -] - -[[package]] -name = "cargo_metadata" -version = "0.14.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4acbb09d9ee8e23699b9634375c72795d095bf268439da88562cf9b501f181fa" -dependencies = [ - "camino", - "cargo-platform", - "semver", - "serde", - "serde_json", -] - -[[package]] -<<<<<<< HEAD -======= ->>>>>>> e05319ec (initial refactor) -======= ->>>>>>> 8be3170b (revert changes to cargo.toml) name = "cc" version = "1.0.73" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -811,113 +547,36 @@ dependencies = [ ] [[package]] -name = "color-eyre" -version = "0.6.1" +name = "const-oid" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ebf286c900a6d5867aeff75cfee3192857bb7f24b547d4f0df2ed6baa812c90" -dependencies = [ - "backtrace", - "color-spantrace", - "eyre", - "indenter", - "once_cell", - "owo-colors", - "tracing-error", -] +checksum = "e4c78c047431fee22c1a7bb92e00ad095a02a983affe4d8a72e2a2c62c1b94f3" [[package]] -name = "color-spantrace" -version = "0.2.0" +name = "constant_time_eq" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ba75b3d9449ecdccb27ecbc479fdc0b87fa2dd43d2f8298f9bf0e59aacc8dce" -dependencies = [ - "once_cell", - "owo-colors", - "tracing-core", - "tracing-error", -] +checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" [[package]] -name = "console" -version = "0.15.0" +name = "core-foundation" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a28b32d32ca44b70c3e4acd7db1babf555fa026e385fb95f18028f88848b3c31" +checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146" dependencies = [ - "encode_unicode", + "core-foundation-sys", "libc", - "once_cell", - "regex", - "terminal_size", - "unicode-width", - "winapi", ] [[package]] -name = "const-oid" -version = "0.7.1" +name = "core-foundation-sys" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4c78c047431fee22c1a7bb92e00ad095a02a983affe4d8a72e2a2c62c1b94f3" +checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" [[package]] -name = "constant_time_eq" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" - -[[package]] -<<<<<<< HEAD -<<<<<<< HEAD -name = "constant_time_eq" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" - -[[package]] -name = "contracts" -version = "0.6.3" -======= -name = "convert_case" -version = "0.4.0" ->>>>>>> e05319ec (initial refactor) -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" -======= -name = "contracts" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1d1429e3bd78171c65aa010eabcdf8f863ba3254728dbfb0ad4b1545beac15c" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] ->>>>>>> 8be3170b (revert changes to cargo.toml) - -[[package]] -name = "convert_case" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" - -[[package]] -name = "core-foundation" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146" -dependencies = [ - "core-foundation-sys", - "libc", -] - -[[package]] -name = "core-foundation-sys" -version = "0.8.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" - -[[package]] -name = "cpufeatures" -version = "0.2.2" +name = "cpufeatures" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "59a6001667ab124aebae2a495118e11d30984c3a653e99d86d58971708cf5e4b" dependencies = [ @@ -933,16 +592,6 @@ dependencies = [ "cfg-if 1.0.0", ] -[[package]] -name = "crossbeam-channel" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b153fe7cbef478c567df0f972e02e6d736db11affe43dfc9c56a9374d1adfb87" -dependencies = [ - "crossbeam-utils 0.7.2", - "maybe-uninit", -] - [[package]] name = "crossbeam-channel" version = "0.5.4" @@ -950,7 +599,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5aaa7bd5fb665c6864b5f963dd9097905c54125909c7aa94c9e18507cdbe6c53" dependencies = [ "cfg-if 1.0.0", - "crossbeam-utils 0.8.8", + "crossbeam-utils", ] [[package]] @@ -960,23 +609,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6455c0ca19f0d2fbf751b908d5c55c1f5cbc65e03c4225427254b46890bdde1e" dependencies = [ "cfg-if 1.0.0", - "crossbeam-epoch 0.9.8", - "crossbeam-utils 0.8.8", -] - -[[package]] -name = "crossbeam-epoch" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "058ed274caafc1f60c4997b5fc07bf7dc7cca454af7c6e81edffe5f33f70dace" -dependencies = [ - "autocfg", - "cfg-if 0.1.10", - "crossbeam-utils 0.7.2", - "lazy_static", - "maybe-uninit", - "memoffset 0.5.6", - "scopeguard", + "crossbeam-epoch", + "crossbeam-utils", ] [[package]] @@ -987,23 +621,12 @@ checksum = "1145cf131a2c6ba0615079ab6a638f7e1973ac9c2634fcbeaaad6114246efe8c" dependencies = [ "autocfg", "cfg-if 1.0.0", - "crossbeam-utils 0.8.8", + "crossbeam-utils", "lazy_static", - "memoffset 0.6.5", + "memoffset", "scopeguard", ] -[[package]] -name = "crossbeam-utils" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8" -dependencies = [ - "autocfg", - "cfg-if 0.1.10", - "lazy_static", -] - [[package]] name = "crossbeam-utils" version = "0.8.8" @@ -1021,10 +644,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" [[package]] -<<<<<<< HEAD -<<<<<<< HEAD -======= ->>>>>>> 8be3170b (revert changes to cargo.toml) name = "crypto-bigint" version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -1037,11 +656,6 @@ dependencies = [ ] [[package]] -<<<<<<< HEAD -======= ->>>>>>> e05319ec (initial refactor) -======= ->>>>>>> 8be3170b (revert changes to cargo.toml) name = "crypto-common" version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -1083,7 +697,6 @@ dependencies = [ [[package]] name = "curve25519-dalek" version = "2.1.3" -<<<<<<< HEAD source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4a9b85542f99a2dfa2a1b8e192662741c9859a846b296bef1c92ef9b58b5a216" dependencies = [ @@ -1097,49 +710,49 @@ dependencies = [ [[package]] name = "curve25519-dalek" version = "3.2.0" -======= ->>>>>>> e05319ec (initial refactor) source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a9b85542f99a2dfa2a1b8e192662741c9859a846b296bef1c92ef9b58b5a216" +checksum = "0b9fdf9972b2bd6af2d913799d9ebc165ea4d2e65878e329d9c6b372c4491b61" dependencies = [ "byteorder", - "digest 0.8.1", + "digest 0.9.0", "rand_core 0.5.1", "subtle", "zeroize", ] [[package]] -name = "curve25519-dalek" -version = "3.2.0" +name = "darling" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b9fdf9972b2bd6af2d913799d9ebc165ea4d2e65878e329d9c6b372c4491b61" +checksum = "4529658bdda7fd6769b8614be250cdcfc3aeb0ee72fe66f9e41e5e5eb73eac02" dependencies = [ - "byteorder", - "digest 0.9.0", - "rand_core 0.5.1", - "subtle", - "zeroize", + "darling_core", + "darling_macro", ] [[package]] -name = "dashmap" -version = "4.0.2" +name = "darling_core" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e77a43b28d0668df09411cb0bc9a8c2adc40f9a048afe863e05fd43251e8e39c" +checksum = "649c91bc01e8b1eac09fb91e8dbc7d517684ca6be8ebc75bb9cafc894f9fdb6f" dependencies = [ - "cfg-if 1.0.0", - "num_cpus", + "fnv", + "ident_case", + "proc-macro2", + "quote", + "strsim", + "syn", ] [[package]] -name = "deflate" -version = "0.9.1" +name = "darling_macro" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f95bf05dffba6e6cce8dfbb30def788154949ccd9aed761b472119c21e01c70" +checksum = "ddfc69c5bfcbd2fc09a0f38451d2daf0e372e367986a83906d1b0dbc88134fb5" dependencies = [ - "adler32", - "gzip-header", + "darling_core", + "quote", + "syn", ] [[package]] @@ -1152,42 +765,25 @@ dependencies = [ ] [[package]] -name = "derive_more" -version = "0.99.17" +name = "derivative" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" +checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" dependencies = [ - "convert_case", "proc-macro2", "quote", - "rustc_version", "syn", ] [[package]] -<<<<<<< HEAD -<<<<<<< HEAD -======= ->>>>>>> 8be3170b (revert changes to cargo.toml) -name = "dialoguer" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8c8ae48e400addc32a8710c8d62d55cb84249a7d58ac4cd959daecfbaddc545" -<<<<<<< HEAD -======= -name = "digest" -version = "0.8.1" +name = "derive_more" +version = "0.99.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3d0c8c8752312f9713efd397ff63acb9f85585afbf179282e720e7704954dd5" ->>>>>>> e05319ec (initial refactor) -dependencies = [ - "generic-array 0.12.4", -======= +checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" dependencies = [ - "console", - "tempfile", - "zeroize", ->>>>>>> 8be3170b (revert changes to cargo.toml) + "proc-macro2", + "quote", + "syn", ] [[package]] @@ -1216,6 +812,7 @@ checksum = "f2fb860ca6fafa5552fb6d0e816a69c8e49f0908bf524e30a90d97c85892d506" dependencies = [ "block-buffer 0.10.2", "crypto-common", + "subtle", ] [[package]] @@ -1227,16 +824,6 @@ dependencies = [ "dirs-sys", ] -[[package]] -name = "dirs-next" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b98cf8ebf19c3d1b223e151f99a4f9f0690dca41414773390fc824184ac833e1" -dependencies = [ - "cfg-if 1.0.0", - "dirs-sys-next", -] - [[package]] name = "dirs-sys" version = "0.3.7" @@ -1249,24 +836,6 @@ dependencies = [ ] [[package]] -name = "dirs-sys-next" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ebda144c4fe02d1f7ea1a7d9641b6fc6b580adcfa024ae48797ecdeb6825b4d" -dependencies = [ - "libc", - "redox_users", - "winapi", -] - -[[package]] -name = "downcast-rs" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ea835d29036a4087793836fa931b08837ad5e957da9e23886b29586fb9b6650" - -[[package]] -<<<<<<< HEAD name = "downcast-rs" version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -1293,31 +862,6 @@ dependencies = [ "syn", ] -[[package]] -name = "dyn-clone" -version = "1.0.5" -======= -name = "dyn-clonable" -version = "0.9.0" ->>>>>>> e05319ec (initial refactor) -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e9232f0e607a262ceb9bd5141a3dfb3e4db6994b31989bbfd845878cba59fd4" -dependencies = [ - "dyn-clonable-impl", - "dyn-clone", -] - -[[package]] -name = "dyn-clonable-impl" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "558e40ea573c374cf53507fd240b7ee2f5477df7cfebdb97323ec61c719399c5" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "dyn-clone" version = "1.0.5" @@ -1366,40 +910,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" [[package]] -<<<<<<< HEAD -<<<<<<< HEAD -name = "elliptic-curve" -version = "0.11.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25b477563c2bfed38a3b7a60964c49e058b2510ad3f12ba3483fd8f62c2306d6" -dependencies = [ - "base16ct", - "crypto-bigint", - "der", - "ff", - "generic-array 0.14.5", - "group", - "rand_core 0.6.3", - "sec1", - "subtle", - "zeroize", -] - -[[package]] -name = "encode_unicode" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" - -[[package]] -======= ->>>>>>> e05319ec (initial refactor) -name = "env_logger" -version = "0.9.0" -======= name = "elliptic-curve" version = "0.11.12" ->>>>>>> 8be3170b (revert changes to cargo.toml) source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "25b477563c2bfed38a3b7a60964c49e058b2510ad3f12ba3483fd8f62c2306d6" dependencies = [ @@ -1415,12 +927,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "encode_unicode" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" - [[package]] name = "env_logger" version = "0.9.0" @@ -1437,27 +943,9 @@ dependencies = [ [[package]] name = "environmental" version = "1.1.3" -<<<<<<< HEAD -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68b91989ae21441195d7d9b9993a2f9295c7e1a8c96255d8b729accddc124797" - -[[package]] -name = "error-chain" -version = "0.12.4" -======= ->>>>>>> e05319ec (initial refactor) source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68b91989ae21441195d7d9b9993a2f9295c7e1a8c96255d8b729accddc124797" -[[package]] -name = "error-chain" -version = "0.12.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d2f06b9cac1506ece98fe3231e3cc9c4410ec3d5b1f24ae1c8946f0742cdefc" -dependencies = [ - "version_check", -] - [[package]] name = "eyre" version = "0.6.8" @@ -1484,10 +972,6 @@ dependencies = [ ] [[package]] -<<<<<<< HEAD -<<<<<<< HEAD -======= ->>>>>>> 8be3170b (revert changes to cargo.toml) name = "ff" version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -1497,32 +981,6 @@ dependencies = [ "subtle", ] -[[package]] -name = "filetime" -version = "0.2.16" -<<<<<<< HEAD -======= -name = "fixed-hash" -version = "0.7.0" ->>>>>>> e05319ec (initial refactor) -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfcf0ed7fe52a17a03854ec54a9f76d6d84508d1c0e66bc1793301c73fc8493c" -dependencies = [ - "byteorder", - "rand 0.8.5", - "rustc-hex", - "static_assertions", -======= -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0408e2626025178a6a7f7ffc05a25bc47103229f19c113755de7bf63816290c" -dependencies = [ - "cfg-if 1.0.0", - "libc", - "redox_syscall", - "winapi", ->>>>>>> 8be3170b (revert changes to cargo.toml) -] - [[package]] name = "fixed-hash" version = "0.7.0" @@ -1553,7 +1011,6 @@ version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c606d892c9de11507fa0dcffc116434f94e105d0bbdc4e405b61519464c49d7b" dependencies = [ - "anyhow", "eyre", "paste", ] @@ -1577,31 +1034,30 @@ dependencies = [ [[package]] name = "frame-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" dependencies = [ - "frame-support 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", - "frame-system 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", + "frame-support", + "frame-system", "linregress", "log", "parity-scale-codec", "paste", "scale-info", - "sp-api 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", - "sp-application-crypto 4.0.0", - "sp-io 4.0.0", - "sp-runtime 4.1.0-dev", - "sp-runtime-interface 4.1.0-dev", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", - "sp-storage 4.0.0", + "serde", + "sp-api", + "sp-application-crypto 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-io 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-runtime-interface 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-storage 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", ] -<<<<<<< HEAD -======= [[package]] name = "frame-metadata" -version = "14.2.0" +version = "15.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37ed5e5c346de62ca5c184b4325a6600d1eaca210666e4606fe4e449574978d0" +checksum = "df6bb8542ef006ef0de09a5c4420787d79823c0ed7924225822362fd2bf2ff2d" dependencies = [ "cfg-if 1.0.0", "parity-scale-codec", @@ -1612,12 +1068,13 @@ dependencies = [ [[package]] name = "frame-support" version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" dependencies = [ "bitflags", "frame-metadata", "frame-support-procedural", "impl-trait-for-tuples", + "k256", "log", "once_cell", "parity-scale-codec", @@ -1625,23 +1082,23 @@ dependencies = [ "scale-info", "serde", "smallvec", - "sp-arithmetic", - "sp-core", + "sp-arithmetic 5.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", "sp-core-hashing-proc-macro", "sp-inherents", - "sp-io", - "sp-runtime", + "sp-io 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", "sp-staking", - "sp-state-machine", - "sp-std 4.0.0", - "sp-tracing", + "sp-state-machine 0.12.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-tracing 5.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", "tt-call", ] [[package]] name = "frame-support-procedural" version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" dependencies = [ "Inflector", "frame-support-procedural-tools", @@ -1653,10 +1110,10 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools" version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" dependencies = [ "frame-support-procedural-tools-derive", - "proc-macro-crate", + "proc-macro-crate 1.1.3", "proc-macro2", "quote", "syn", @@ -1665,7 +1122,7 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools-derive" version = "3.0.0" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" dependencies = [ "proc-macro2", "quote", @@ -1675,231 +1132,46 @@ dependencies = [ [[package]] name = "frame-system" version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" dependencies = [ "frame-support", "log", "parity-scale-codec", "scale-info", "serde", - "sp-core", - "sp-io", - "sp-runtime", - "sp-std 4.0.0", + "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-io 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", "sp-version", ] [[package]] name = "funty" -version = "1.1.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fed34cd105917e91daa4da6b3728c47b068749d6a62c59811f06ed2ac71d9da7" ->>>>>>> e05319ec (initial refactor) +checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" [[package]] -name = "frame-metadata" -version = "14.2.0" +name = "futures" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37ed5e5c346de62ca5c184b4325a6600d1eaca210666e4606fe4e449574978d0" +checksum = "f73fe65f54d1e12b726f517d3e2135ca3125a437b6d998caf1962961f7172d9e" dependencies = [ - "cfg-if 1.0.0", - "parity-scale-codec", - "scale-info", - "serde", + "futures-channel", + "futures-core", + "futures-executor", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", ] [[package]] -name = "frame-support" -version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" -dependencies = [ - "bitflags", - "frame-metadata", - "frame-support-procedural 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", - "impl-trait-for-tuples", - "log", - "once_cell", - "parity-scale-codec", - "paste", - "scale-info", - "serde", - "smallvec", - "sp-arithmetic 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", - "sp-core 4.1.0-dev", - "sp-core-hashing-proc-macro 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", - "sp-inherents 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", - "sp-io 4.0.0", - "sp-runtime 4.1.0-dev", - "sp-staking 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", - "sp-state-machine 0.10.0", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", - "sp-tracing 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", - "tt-call", -] - -[[package]] -name = "frame-support" -version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17#22d40c761a985482f93bbbea5ba4199bdba74f8e" -dependencies = [ - "bitflags", - "frame-metadata", - "frame-support-procedural 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", - "impl-trait-for-tuples", - "log", - "once_cell", - "parity-scale-codec", - "paste", - "scale-info", - "serde", - "smallvec", - "sp-arithmetic 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", - "sp-core 5.0.0", - "sp-core-hashing-proc-macro 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", - "sp-inherents 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", - "sp-io 5.0.0", - "sp-runtime 5.0.0", - "sp-staking 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", - "sp-state-machine 0.11.0", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", - "sp-tracing 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", - "tt-call", -] - -[[package]] -name = "frame-support-procedural" -version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" -dependencies = [ - "Inflector", - "frame-support-procedural-tools 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "frame-support-procedural" -version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17#22d40c761a985482f93bbbea5ba4199bdba74f8e" -dependencies = [ - "Inflector", - "frame-support-procedural-tools 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "frame-support-procedural-tools" -version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" -dependencies = [ - "frame-support-procedural-tools-derive 3.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", - "proc-macro-crate", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "frame-support-procedural-tools" -version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17#22d40c761a985482f93bbbea5ba4199bdba74f8e" -dependencies = [ - "frame-support-procedural-tools-derive 3.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", - "proc-macro-crate", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "frame-support-procedural-tools-derive" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "frame-support-procedural-tools-derive" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17#22d40c761a985482f93bbbea5ba4199bdba74f8e" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "frame-system" -version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" -dependencies = [ - "frame-support 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", - "log", - "parity-scale-codec", - "scale-info", - "serde", - "sp-core 4.1.0-dev", - "sp-io 4.0.0", - "sp-runtime 4.1.0-dev", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", - "sp-version 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", -] - -[[package]] -name = "frame-system" -version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17#22d40c761a985482f93bbbea5ba4199bdba74f8e" -dependencies = [ - "frame-support 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", - "log", - "parity-scale-codec", - "scale-info", - "serde", - "sp-core 5.0.0", - "sp-io 5.0.0", - "sp-runtime 5.0.0", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", - "sp-version 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", -] - -[[package]] -name = "fs-err" -version = "2.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bd79fa345a495d3ae89fb7165fec01c0e72f41821d642dda363a1e97975652e" - -[[package]] -name = "funty" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fed34cd105917e91daa4da6b3728c47b068749d6a62c59811f06ed2ac71d9da7" - -[[package]] -name = "futures" -version = "0.3.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f73fe65f54d1e12b726f517d3e2135ca3125a437b6d998caf1962961f7172d9e" -dependencies = [ - "futures-channel", - "futures-core", - "futures-executor", - "futures-io", - "futures-sink", - "futures-task", - "futures-util", -] - -[[package]] -name = "futures-channel" -version = "0.3.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3083ce4b914124575708913bca19bfe887522d6e2e6d0952943f5eac4a74010" +name = "futures-channel" +version = "0.3.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3083ce4b914124575708913bca19bfe887522d6e2e6d0952943f5eac4a74010" dependencies = [ "futures-core", "futures-sink", @@ -2028,10 +1300,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78cc372d058dcf6d5ecd98510e7fbc9e5aec4d21de70f65fea8fecebcd881bd4" [[package]] -name = "glob" -version = "0.3.0" +name = "globset" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" +checksum = "10463d9ff00a2a068db14231982f5132edebad0d7660cd956a1c30292dbcbfbd" +dependencies = [ + "aho-corasick", + "bstr", + "fnv", + "log", + "regex", +] [[package]] name = "group" @@ -2064,15 +1343,6 @@ dependencies = [ "syn", ] -[[package]] -name = "gzip-header" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0131feb3d3bb2a5a238d8a4d09f6353b7ebfdc52e77bccbf4ea6eaa751dde639" -dependencies = [ - "crc32fast", -] - [[package]] name = "h2" version = "0.3.13" @@ -2092,27 +1362,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "half" -version = "1.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7" - -[[package]] -name = "hash-db" -version = "0.15.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d23bd4e7b5eda0d0f3a307e8b381fdc8ba9000f26fbe912250c0a4cc3956364a" - -[[package]] -name = "hash256-std-hasher" -version = "0.15.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92c171d55b98633f4ed3860808f004099b36c1cc29c42cfc53aa8591b21efcf2" -dependencies = [ - "crunchy", -] - [[package]] name = "hash-db" version = "0.15.2" @@ -2136,18 +1385,6 @@ checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" dependencies = [ "ahash", ] -<<<<<<< HEAD - -[[package]] -name = "hashbrown" -version = "0.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c21d40587b92fa6a6c6e3c1bdbf87d75511db5672f9c93175574b3a00df1758" -dependencies = [ - "ahash", -] -======= ->>>>>>> e05319ec (initial refactor) [[package]] name = "hashbrown" @@ -2158,15 +1395,6 @@ dependencies = [ "ahash", ] -[[package]] -name = "hdpath" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72adf5a17a0952ecfcddf8d46d071271d5ee52e78443f07ba0b2dcfe3063a132" -dependencies = [ - "byteorder", -] - [[package]] name = "headers" version = "0.3.7" @@ -2213,6 +1441,12 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" +[[package]] +name = "hex-literal" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ebdb29d2ea9ed0083cd8cece49bbd968021bd99b0849edb4a9a7ee0fdf6a4e0" + [[package]] name = "hmac" version = "0.8.1" @@ -2252,7 +1486,7 @@ checksum = "ff8670570af52249509a86f5e3e18a08c60b177071826898fde8997cf5f6bfbb" dependencies = [ "bytes", "fnv", - "itoa 1.0.1", + "itoa 1.0.2", ] [[package]] @@ -2290,16 +1524,6 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" -[[package]] -name = "humantime-serde" -version = "1.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57a3db5ea5923d99402c94e9feb261dc5ee9b4efa158b0315f788cf549cc200c" -dependencies = [ - "humantime", - "serde", -] - [[package]] name = "hyper" version = "0.14.18" @@ -2315,7 +1539,7 @@ dependencies = [ "http-body", "httparse", "httpdate", - "itoa 1.0.1", + "itoa 1.0.2", "pin-project-lite", "socket2", "tokio", @@ -2378,76 +1602,45 @@ name = "ibc" version = "0.15.0" dependencies = [ "beefy-generic-client", - "beefy-primitives 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", + "beefy-primitives", + "borsh", "bytes", "derive_more", "env_logger", "flex-error", "ibc-proto", "ics23", - "modelator 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "modelator", "num-traits", - "pallet-mmr-primitives 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", "parity-scale-codec", "prost", "prost-types", + "ripemd", "safe-regex", "serde", "serde_derive", "serde_json", "sha2 0.10.2", - "sp-core 5.0.0", - "sp-runtime 5.0.0", - "sp-trie 5.0.0", + "sha3", + "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-io 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-mmr-primitives", + "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-trie 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", "subtle-encoding", + "subxt", "tendermint", "tendermint-light-client-verifier", "tendermint-proto", "tendermint-rpc", "tendermint-testgen", "test-log", - "time 0.3.9", + "time", + "tokio", "tracing", -<<<<<<< HEAD - "tracing-subscriber", - "uint", -] - -[[package]] -name = "ibc-integration-test" -version = "0.15.0" -dependencies = [ - "ibc", - "ibc-proto", - "ibc-relayer", - "ibc-relayer-cli", - "ibc-test-framework", - "modelator 0.4.2 (git+https://github.com/informalsystems/modelator)", - "serde", - "serde_json", - "tempfile", - "tendermint", - "tendermint-rpc", - "time 0.3.9", -======= "tracing-subscriber 0.3.11", ->>>>>>> e05319ec (initial refactor) -] - -[[package]] -name = "ibc-integration-test" -version = "0.14.1" -dependencies = [ - "ibc", - "ibc-proto", - "ibc-relayer", - "ibc-relayer-cli", - "ibc-test-framework", - "serde", - "serde_json", - "tendermint", - "tendermint-rpc", - "time", + "uint", ] [[package]] @@ -2465,378 +1658,15 @@ dependencies = [ ] [[package]] -<<<<<<< HEAD -<<<<<<< HEAD -name = "ibc-relayer" -version = "0.15.0" -dependencies = [ - "anyhow", - "async-stream", - "bech32 0.9.0", - "bitcoin", -======= name = "ics23" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d454cc0a22bd556cc3d3c69f9d75a392a36244634840697a4b9eb81bc5c8ae0" -dependencies = [ - "anyhow", ->>>>>>> e05319ec (initial refactor) -======= -name = "ibc-relayer" -version = "0.14.1" +version = "0.8.0-alpha" +source = "git+https://github.com/composablefi/ics23?branch=seun-v0.8.0-alpha#c79df0901ddfdfb539dae1e451e7beadc01694f4" dependencies = [ "anyhow", - "async-stream", - "bech32", - "bitcoin", ->>>>>>> 8be3170b (revert changes to cargo.toml) "bytes", - "crossbeam-channel 0.5.4", - "dirs-next", - "env_logger", - "flex-error", - "futures", - "hdpath", "hex", -<<<<<<< HEAD -<<<<<<< HEAD -======= ->>>>>>> 8be3170b (revert changes to cargo.toml) - "http", - "humantime", - "humantime-serde", - "ibc", - "ibc-proto", - "ibc-telemetry", - "itertools", - "k256", - "moka", - "nanoid", - "num-bigint 0.4.3", - "num-rational 0.4.0", -<<<<<<< HEAD -======= ->>>>>>> e05319ec (initial refactor) -======= ->>>>>>> 8be3170b (revert changes to cargo.toml) "prost", - "prost-types", - "regex", - "retry", - "ripemd160", -<<<<<<< HEAD -<<<<<<< HEAD -======= ->>>>>>> 8be3170b (revert changes to cargo.toml) - "semver", - "serde", - "serde_derive", - "serde_json", - "serial_test", - "sha2 0.10.2", - "signature", - "subtle-encoding", - "tendermint", - "tendermint-light-client", - "tendermint-light-client-verifier", - "tendermint-proto", - "tendermint-rpc", - "tendermint-testgen", - "test-log", - "thiserror", - "tiny-bip39", - "tiny-keccak", - "tokio", - "toml", - "tonic", - "tracing", -<<<<<<< HEAD - "tracing-subscriber", - "uuid 1.1.1", -] - -[[package]] -name = "ibc-relayer-cli" -version = "0.15.0" -dependencies = [ - "abscissa_core", - "atty", - "clap", - "clap_complete", - "color-eyre", - "console", - "crossbeam-channel 0.5.4", - "dialoguer", - "dirs-next", - "eyre", - "flex-error", - "futures", - "hex", - "humantime", - "ibc", - "ibc-proto", - "ibc-relayer", - "ibc-relayer-rest", - "ibc-telemetry", - "itertools", - "once_cell", - "oneline-eyre", - "regex", - "serde", - "serde_derive", - "serde_json", - "signal-hook", - "subtle-encoding", - "tendermint", - "tendermint-light-client", - "tendermint-light-client-verifier", - "tendermint-proto", - "tendermint-rpc", - "tokio", - "toml", - "tracing", - "tracing-subscriber 0.3.10", -] - -[[package]] -name = "ibc-relayer-rest" -version = "0.15.0" -dependencies = [ - "crossbeam-channel 0.5.4", - "ibc", - "ibc-relayer", - "rouille", - "serde", - "serde_json", - "toml", - "tracing", - "ureq", -] - -[[package]] -name = "ibc-telemetry" -version = "0.15.0" -dependencies = [ - "crossbeam-channel 0.5.4", - "ibc", - "moka", - "once_cell", - "opentelemetry", - "opentelemetry-prometheus", - "prometheus", - "rouille", - "uuid 1.1.1", -] - -[[package]] -name = "ibc-test-framework" -version = "0.15.0" -dependencies = [ - "async-trait", - "color-eyre", - "crossbeam-channel 0.5.4", - "env_logger", - "eyre", - "flex-error", - "hex", - "http", - "ibc", - "ibc-proto", - "ibc-relayer", - "ibc-relayer-cli", - "itertools", - "rand 0.8.5", - "semver", - "serde", - "serde_json", - "serde_yaml", - "sha2 0.10.2", - "subtle-encoding", - "tendermint", - "tendermint-rpc", - "tokio", - "toml", - "tracing", - "tracing-subscriber 0.3.10", -] - -[[package]] -name = "ics23" -version = "0.8.0-alpha" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18a435f2471c1b2ce14771da465d47321c5905fac866d0effa9e0a3eb5d94fcf" -dependencies = [ - "anyhow", - "bytes", - "hex", - "prost", - "ripemd160", - "sha2 0.9.9", - "sha3", - "sp-std 3.0.0", -======= - "sha2 0.9.9", - "sha3", - "sp-std 3.0.0", -] - -[[package]] -name = "idna" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "418a0a6fab821475f634efe3ccc45c013f742efe03d853e8d3355d5cb850ecf8" -dependencies = [ - "matches", - "unicode-bidi", - "unicode-normalization", -] - -[[package]] -name = "impl-codec" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "161ebdfec3c8e3b52bf61c4f3550a1eea4f9579d10dc1b936f3171ebdcd6c443" -dependencies = [ - "parity-scale-codec", -] - -[[package]] -name = "impl-serde" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4551f042f3438e64dbd6226b20527fc84a6e1fe65688b58746a2f53623f25f5c" -======= - "tracing-subscriber 0.3.11", - "uint", -] - -[[package]] -name = "ibc-relayer-cli" -version = "0.14.1" -dependencies = [ - "abscissa_core", - "atty", - "clap", - "clap_complete", - "color-eyre", - "console", - "crossbeam-channel 0.5.4", - "dialoguer", - "dirs-next", - "eyre", - "flex-error", - "futures", - "hex", - "humantime", - "ibc", - "ibc-proto", - "ibc-relayer", - "ibc-relayer-rest", - "ibc-telemetry", - "itertools", - "once_cell", - "oneline-eyre", - "prost", - "prost-types", - "regex", - "serde", - "serde_derive", - "serde_json", - "signal-hook", - "subtle-encoding", - "tendermint", - "tendermint-light-client", - "tendermint-light-client-verifier", - "tendermint-proto", - "tendermint-rpc", - "tokio", - "toml", - "tracing", - "tracing-subscriber 0.3.11", -] - -[[package]] -name = "ibc-relayer-rest" -version = "0.14.1" ->>>>>>> 8be3170b (revert changes to cargo.toml) -dependencies = [ - "crossbeam-channel 0.5.4", - "ibc", - "ibc-relayer", - "rouille", - "serde", -<<<<<<< HEAD ->>>>>>> e05319ec (initial refactor) -======= - "serde_json", - "toml", - "tracing", - "ureq", ->>>>>>> 8be3170b (revert changes to cargo.toml) -] - -[[package]] -name = "ibc-telemetry" -version = "0.14.1" -dependencies = [ - "crossbeam-channel 0.5.4", - "ibc", - "once_cell", - "opentelemetry", - "opentelemetry-prometheus", - "prometheus", - "rouille", -] - -[[package]] -<<<<<<< HEAD -======= -name = "ibc-test-framework" -version = "0.14.1" -dependencies = [ - "color-eyre", - "crossbeam-channel 0.5.4", - "env_logger", - "eyre", - "flex-error", - "hex", - "ibc", - "ibc-proto", - "ibc-relayer", - "ibc-relayer-cli", - "itertools", - "prost", - "prost-types", - "rand 0.8.5", - "semver", - "serde", - "serde_json", - "serde_yaml", - "sha2 0.10.2", - "subtle-encoding", - "tendermint", - "tendermint-rpc", - "tokio", - "toml", - "tracing", - "tracing-subscriber 0.3.11", -] - -[[package]] -name = "ics23" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d454cc0a22bd556cc3d3c69f9d75a392a36244634840697a4b9eb81bc5c8ae0" -dependencies = [ - "anyhow", - "bytes", - "hex", - "prost", - "ripemd160", - "sha2 0.9.9", - "sha3", - "sp-std 3.0.0", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", ] [[package]] @@ -2857,18 +1687,15 @@ dependencies = [ ] [[package]] ->>>>>>> 8be3170b (revert changes to cargo.toml) name = "impl-codec" -version = "0.5.1" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "161ebdfec3c8e3b52bf61c4f3550a1eea4f9579d10dc1b936f3171ebdcd6c443" +checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f" dependencies = [ "parity-scale-codec", ] [[package]] -<<<<<<< HEAD -======= name = "impl-serde" version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -2895,7 +1722,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce23b50ad8242c51a442f3ff322d56b02f08852c77e4c0b4d3fd684abc89c683" [[package]] ->>>>>>> 8be3170b (revert changes to cargo.toml) name = "indexmap" version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -2943,17 +1769,13 @@ dependencies = [ [[package]] name = "itoa" -<<<<<<< HEAD -version = "1.0.2" -======= version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" [[package]] name = "itoa" -version = "1.0.1" ->>>>>>> e05319ec (initial refactor) +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "112c678d4050afce233f4f2852bb2eb519230b3cf12f33585275537d7e41578d" @@ -2967,90 +1789,214 @@ dependencies = [ ] [[package]] -name = "k256" -version = "0.10.4" +name = "jsonrpsee" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19c3a5e0a0b8450278feda242592512e09f61c72e018b8cd5c859482802daf2d" +checksum = "91dc760c341fa81173f9a434931aaf32baad5552b0230cc6c93e8fb7eaad4c19" dependencies = [ - "cfg-if 1.0.0", - "ecdsa", - "elliptic-curve", - "sec1", - "sha2 0.9.9", + "jsonrpsee-client-transport", + "jsonrpsee-core 0.10.1", ] [[package]] -name = "keccak" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67c21572b4949434e4fc1e1978b99c5f77064153c59d998bf13ecd96fb5ecba7" - -[[package]] -name = "lazy_static" -version = "1.4.0" +name = "jsonrpsee" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" +checksum = "a1f2ab5a60e558e74ea93bcf5164ebc47939a7fff8938fa9b5233bbc63e16061" +dependencies = [ + "jsonrpsee-core 0.13.1", + "jsonrpsee-http-server", + "jsonrpsee-proc-macros", + "jsonrpsee-types 0.13.1", + "jsonrpsee-ws-server", + "tracing", +] [[package]] -name = "libc" -version = "0.2.126" +name = "jsonrpsee-client-transport" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "349d5a591cd28b49e1d1037471617a32ddcda5731b99419008085f72d5a53836" +checksum = "765f7a36d5087f74e3b3b47805c2188fef8eb54afcb587b078d9f8ebfe9c7220" +dependencies = [ + "futures", + "http", + "jsonrpsee-core 0.10.1", + "jsonrpsee-types 0.10.1", + "pin-project", + "rustls-native-certs 0.6.2", + "soketto", + "thiserror", + "tokio", + "tokio-rustls 0.23.4", + "tokio-util", + "tracing", + "webpki-roots 0.22.3", +] [[package]] -name = "libm" -version = "0.2.2" +name = "jsonrpsee-core" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33a33a362ce288760ec6a508b94caaec573ae7d3bbbd91b87aa0bad4456839db" +checksum = "82ef77ecd20c2254d54f5da8c0738eacca61e6b6511268a8f2753e3148c6c706" +dependencies = [ + "anyhow", + "arrayvec 0.7.2", + "async-trait", + "beef", + "futures-channel", + "futures-util", + "hyper", + "jsonrpsee-types 0.10.1", + "rustc-hash", + "serde", + "serde_json", + "soketto", + "thiserror", + "tokio", + "tracing", +] [[package]] -name = "libsecp256k1" -version = "0.7.0" +name = "jsonrpsee-core" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0452aac8bab02242429380e9b2f94ea20cea2b37e2c1777a1358799bbe97f37" +checksum = "6e27462b21279edf9a6a91f46ffbe125e9cdc58b901d2e08bf59b31a47d7d0ab" dependencies = [ - "arrayref", - "base64", - "digest 0.9.0", - "hmac-drbg", - "libsecp256k1-core", - "libsecp256k1-gen-ecmult", - "libsecp256k1-gen-genmult", + "anyhow", + "arrayvec 0.7.2", + "async-trait", + "beef", + "futures-channel", + "futures-util", + "hyper", + "jsonrpsee-types 0.13.1", + "parking_lot", "rand 0.8.5", + "rustc-hash", "serde", - "sha2 0.9.9", - "typenum", + "serde_json", + "soketto", + "thiserror", + "tokio", + "tracing", ] [[package]] -name = "libsecp256k1-core" -version = "0.3.0" +name = "jsonrpsee-http-server" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5be9b9bb642d8522a44d533eab56c16c738301965504753b03ad1de3425d5451" +checksum = "7178f16eabd7154c094e24d295b9ee355ec1e5f24c328759c56255ff7bbd4548" dependencies = [ - "crunchy", - "digest 0.9.0", - "subtle", + "futures-channel", + "futures-util", + "globset", + "hyper", + "jsonrpsee-core 0.13.1", + "jsonrpsee-types 0.13.1", + "lazy_static", + "serde_json", + "tokio", + "tracing", + "unicase", ] [[package]] -name = "libsecp256k1-gen-ecmult" -version = "0.3.0" +name = "jsonrpsee-proc-macros" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3038c808c55c87e8a172643a7d87187fc6c4174468159cb3090659d55bcb4809" +checksum = "8b8d7f449cab3b747f12c3efc27f5cad537f3b597c6a3838b0fac628f4bf730a" dependencies = [ - "libsecp256k1-core", + "proc-macro-crate 1.1.3", + "proc-macro2", + "quote", + "syn", ] [[package]] -name = "libsecp256k1-gen-genmult" -version = "0.3.0" +name = "jsonrpsee-types" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3db8d6ba2cec9eacc40e6e8ccc98931840301f1006e95647ceb2dd5c3aa06f7c" +checksum = "38b6aa52f322cbf20c762407629b8300f39bcc0cf0619840d9252a2f65fd2dd9" dependencies = [ - "libsecp256k1-core", + "anyhow", + "beef", + "serde", + "serde_json", + "thiserror", + "tracing", +] + +[[package]] +name = "jsonrpsee-types" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fd11763134104122ddeb0f97e4bbe393058017dfb077db63fbf44b4dd0dd86e" +dependencies = [ + "anyhow", + "beef", + "serde", + "serde_json", + "thiserror", + "tracing", +] + +[[package]] +name = "jsonrpsee-ws-server" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dfb6c21556c551582b56e4e8e6e6249b0bbdb69bb7fa39efe9b9a6b54af9f206" +dependencies = [ + "futures-channel", + "futures-util", + "jsonrpsee-core 0.13.1", + "jsonrpsee-types 0.13.1", + "serde_json", + "soketto", + "tokio", + "tokio-util", + "tracing", +] + +[[package]] +name = "k256" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19c3a5e0a0b8450278feda242592512e09f61c72e018b8cd5c859482802daf2d" +dependencies = [ + "cfg-if 1.0.0", + "ecdsa", + "elliptic-curve", + "sec1", +] + +[[package]] +name = "keccak" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67c21572b4949434e4fc1e1978b99c5f77064153c59d998bf13ecd96fb5ecba7" + +[[package]] +name = "kvdb" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a301d8ecb7989d4a6e2c57a49baca77d353bdbf879909debe3f375fe25d61f86" +dependencies = [ + "parity-util-mem", + "smallvec", ] +[[package]] +name = "lazy_static" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" + +[[package]] +name = "libc" +version = "0.2.126" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "349d5a591cd28b49e1d1037471617a32ddcda5731b99419008085f72d5a53836" + [[package]] name = "libm" version = "0.2.2" @@ -3105,22 +2051,6 @@ dependencies = [ "libsecp256k1-core", ] -[[package]] -name = "linked-hash-map" -version = "0.5.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" - -[[package]] -name = "linregress" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6c601a85f5ecd1aba625247bca0031585fb1c446461b142878a16f8245ddeb8" -dependencies = [ - "nalgebra", - "statrs", -] - [[package]] name = "linregress" version = "0.4.4" @@ -3151,21 +2081,12 @@ dependencies = [ ] [[package]] -name = "mach" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b823e83b2affd8f40a9ee8c29dbc56404c1e34cd2710921f2801e2cf29527afa" -dependencies = [ - "libc", -] - -[[package]] -name = "matchers" -version = "0.0.1" +name = "lru" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f099785f7595cc4b4553a174ce30dd7589ef93391ff414dbb67f62392b9e0ce1" +checksum = "32613e41de4c47ab04970c348ca7ae7382cf116625755af070b008a15516a889" dependencies = [ - "regex-automata", + "hashbrown 0.11.2", ] [[package]] @@ -3193,46 +2114,26 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a3e378b66a060d48947b590737b30a1be76706c8dd7b8ba0f2fe3989c68a853f" [[package]] -<<<<<<< HEAD name = "matchit" version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "73cbba799671b762df5a175adf59ce145165747bb891505c43d09aefbbf38beb" [[package]] -name = "maybe-uninit" -version = "2.0.0" -======= name = "matrixmultiply" version = "0.3.2" ->>>>>>> e05319ec (initial refactor) source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "add85d4dd35074e6fedc608f8c8f513a3548619a9024b751949ef0e8e45a4d84" dependencies = [ "rawpointer", ] -[[package]] -name = "maybe-uninit" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" - [[package]] name = "memchr" version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" -[[package]] -name = "memoffset" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "043175f069eda7b85febe4a74abbaeff828d9f8b448515d3151a14a3542811aa" -dependencies = [ - "autocfg", -] - [[package]] name = "memoffset" version = "0.6.5" @@ -3244,24 +2145,12 @@ dependencies = [ [[package]] name = "memory-db" -version = "0.28.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d505169b746dacf02f7d14d8c80b34edfd8212159c63d23c977739a0d960c626" -dependencies = [ - "hash-db", - "hashbrown 0.11.2", - "parity-util-mem", -] - -[[package]] -<<<<<<< HEAD -name = "memory-db" -version = "0.28.0" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d505169b746dacf02f7d14d8c80b34edfd8212159c63d23c977739a0d960c626" +checksum = "6566c70c1016f525ced45d7b7f97730a2bafb037c788211d0c186ef5b2189f0a" dependencies = [ "hash-db", - "hashbrown 0.11.2", + "hashbrown 0.12.1", "parity-util-mem", ] @@ -3289,50 +2178,12 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39617bc909d64b068dcffd0e3e31679195b5576d0c83fadc52690268cc2b2b55" -[[package]] -name = "mime" -version = "0.3.16" -======= -name = "memory_units" -version = "0.3.0" ->>>>>>> e05319ec (initial refactor) -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71d96e3f3c0b6325d8ccd83c33b28acb183edcb6c67938ba104ec546854b0882" - -[[package]] -name = "merlin" -version = "2.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e261cf0f8b3c42ded9f7d2bb59dea03aa52bc8a1cbc7482f9fc3fd1229d3b42" -dependencies = [ - "byteorder", - "keccak", - "rand_core 0.5.1", - "zeroize", -] - -[[package]] -name = "micromath" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39617bc909d64b068dcffd0e3e31679195b5576d0c83fadc52690268cc2b2b55" - [[package]] name = "mime" version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" -[[package]] -name = "mime_guess" -version = "2.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4192263c238a5f0d0c6bfd21f336a313a4ce1c450542449ca191bb657b4642ef" -dependencies = [ - "mime", - "unicase", -] - [[package]] name = "minimal-lexical" version = "0.2.1" @@ -3381,93 +2232,11 @@ dependencies = [ "tempfile", "thiserror", "tracing", - "tracing-subscriber", - "ureq", -] - -[[package]] -name = "modelator" -version = "0.4.2" -source = "git+https://github.com/informalsystems/modelator#414fdeb192c165803639363808d88c14e6f972ad" -dependencies = [ - "clap", - "clap_complete", - "directories", - "hex", - "lazy_static", - "nom", - "once_cell", - "rayon", - "regex", - "serde", - "serde_json", - "sha2 0.10.2", - "tempfile", - "thiserror", - "tracing", -<<<<<<< HEAD - "tracing-subscriber 0.3.10", -======= "tracing-subscriber 0.3.11", ->>>>>>> e05319ec (initial refactor) "ureq", - "zip", -] - -[[package]] -<<<<<<< HEAD -<<<<<<< HEAD -name = "moka" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df72b50274c0988d9f4a6e808e06d9d926f265db6f8bbda1576bcaa658e72763" -======= -name = "moka" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef9d038ced38770a867f2300ef21e1193b5e26d8e8e060fa5c034d1dddc57452" ->>>>>>> 8be3170b (revert changes to cargo.toml) -dependencies = [ - "crossbeam-channel 0.5.4", - "crossbeam-epoch 0.8.2", - "crossbeam-utils 0.8.8", - "num_cpus", - "once_cell", - "parking_lot 0.12.0", - "quanta", - "scheduled-thread-pool", - "skeptic", - "smallvec", - "tagptr", - "thiserror", - "triomphe", -<<<<<<< HEAD - "uuid 0.8.2", -======= -======= - "uuid", -] - -[[package]] -name = "multipart" -version = "0.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00dec633863867f29cb39df64a397cdf4a6354708ddd7759f70c7fb51c5f9182" -dependencies = [ - "buf_redux", - "httparse", - "log", - "mime", - "mime_guess", - "quick-error", - "rand 0.8.5", - "safemem", - "tempfile", - "twoway", ] [[package]] ->>>>>>> 8be3170b (revert changes to cargo.toml) name = "nalgebra" version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -3483,7 +2252,6 @@ dependencies = [ "rand_distr", "simba", "typenum", ->>>>>>> e05319ec (initial refactor) ] [[package]] @@ -3498,73 +2266,16 @@ dependencies = [ ] [[package]] -<<<<<<< HEAD -<<<<<<< HEAD -name = "nalgebra" -version = "0.27.1" +name = "nodrop" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "462fffe4002f4f2e1f6a9dcf12cc1a6fc0e15989014efc02a941d3e0f5dc2120" -dependencies = [ - "approx", - "matrixmultiply", - "nalgebra-macros", - "num-complex", - "num-rational 0.4.0", - "num-traits", - "rand 0.8.5", - "rand_distr", - "simba", - "typenum", -] -======= -======= -name = "nanoid" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ffa00dec017b5b1a8b7cf5e2c008bfda1aa7e0697ac1508b491fdf2622fb4d8" -dependencies = [ - "rand 0.8.5", -] - -[[package]] ->>>>>>> 8be3170b (revert changes to cargo.toml) -name = "nodrop" -version = "0.1.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72ef4a56884ca558e5ddb05a1d1e7e1bfd9a68d9ed024c21704cc98872dae1bb" ->>>>>>> e05319ec (initial refactor) - -[[package]] -name = "nalgebra-macros" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01fcc0b8149b4632adc89ac3b7b31a12fb6099a0317a4eb2ebff574ef7de7218" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "nanoid" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ffa00dec017b5b1a8b7cf5e2c008bfda1aa7e0697ac1508b491fdf2622fb4d8" -dependencies = [ - "rand 0.8.5", -] - -[[package]] -name = "nodrop" -version = "0.1.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72ef4a56884ca558e5ddb05a1d1e7e1bfd9a68d9ed024c21704cc98872dae1bb" - -[[package]] -name = "nom" -version = "7.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8903e5a29a317527874d0402f867152a3d21c908bb0b933e416c65e301d4c36" +checksum = "72ef4a56884ca558e5ddb05a1d1e7e1bfd9a68d9ed024c21704cc98872dae1bb" + +[[package]] +name = "nom" +version = "7.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8903e5a29a317527874d0402f867152a3d21c908bb0b933e416c65e301d4c36" dependencies = [ "memchr", "minimal-lexical", @@ -3581,29 +2292,6 @@ dependencies = [ "num-traits", ] -[[package]] -name = "num-bigint" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "090c7f9998ee0ff65aa5b723e4009f7b217707f1fb5ea551329cc4d6231fb304" -dependencies = [ - "autocfg", - "num-integer", - "num-traits", -] - -[[package]] -name = "num-bigint" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f93ab6289c7b344a8a9f60f88d80aa20032336fe78da341afc91c8a2341fc75f" -dependencies = [ - "autocfg", - "num-integer", - "num-traits", - "serde", -] - [[package]] name = "num-complex" version = "0.4.1" @@ -3613,15 +2301,6 @@ dependencies = [ "num-traits", ] -[[package]] -name = "num-complex" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26873667bbbb7c5182d4a37c1add32cdf09f841af72da53318fdb81543c15085" -dependencies = [ - "num-traits", -] - [[package]] name = "num-derive" version = "0.3.3" @@ -3660,15 +2339,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c000134b5dbf44adc5cb772486d335293351644b801551abe8f75c84cfa4aef" dependencies = [ "autocfg", -<<<<<<< HEAD -<<<<<<< HEAD - "num-bigint 0.2.6", -======= "num-bigint", ->>>>>>> e05319ec (initial refactor) -======= - "num-bigint 0.2.6", ->>>>>>> 8be3170b (revert changes to cargo.toml) "num-integer", "num-traits", ] @@ -3680,17 +2351,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d41702bd167c2df5520b384281bc111a4b5efcf7fbc4c9c222c815b07e0a6a6a" dependencies = [ "autocfg", -<<<<<<< HEAD -<<<<<<< HEAD - "num-bigint 0.4.3", -======= ->>>>>>> e05319ec (initial refactor) -======= - "num-bigint 0.4.3", ->>>>>>> 8be3170b (revert changes to cargo.toml) "num-integer", "num-traits", - "serde", ] [[package]] @@ -3743,21 +2405,6 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c" -[[package]] -name = "oneline-eyre" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "862f17a1e689c0ce8ca158ea48e776c5101c5d14fdfbed3e01c15f89604c3097" -dependencies = [ - "eyre", -] - -[[package]] -name = "opaque-debug" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c" - [[package]] name = "opaque-debug" version = "0.3.0" @@ -3771,136 +2418,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] -name = "opentelemetry" -version = "0.17.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6105e89802af13fdf48c49d7646d3b533a70e536d818aae7e78ba0433d01acb8" -dependencies = [ - "async-trait", - "crossbeam-channel 0.5.4", - "dashmap", - "fnv", - "futures-channel", - "futures-executor", - "futures-util", - "js-sys", - "lazy_static", - "percent-encoding", - "pin-project", - "rand 0.8.5", - "thiserror", -] - -[[package]] -name = "opentelemetry-prometheus" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9328977e479cebe12ce0d3fcecdaea4721d234895a9440c5b5dfd113f0594ac6" -dependencies = [ - "opentelemetry", - "prometheus", - "protobuf", -] - -[[package]] -name = "os_str_bytes" -version = "6.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e22443d1643a904602595ba1cd8f7d896afe56d26712531c5ff73a15b2fbf64" - -[[package]] -name = "owo-colors" -version = "3.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "decf7381921fea4dcb2549c5667eda59b3ec297ab7e2b5fc33eac69d2e7da87b" - -[[package]] -name = "pallet-beefy" -version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" -dependencies = [ - "beefy-primitives 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", - "frame-support 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", - "frame-system 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", - "pallet-session", - "parity-scale-codec", - "scale-info", - "serde", - "sp-runtime 4.1.0-dev", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", -] - -[[package]] -name = "pallet-beefy-mmr" -version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" -dependencies = [ - "beefy-merkle-tree", - "beefy-primitives 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", - "frame-support 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", - "frame-system 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", - "hex", - "libsecp256k1", - "log", - "pallet-beefy", - "pallet-mmr", - "pallet-mmr-primitives 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", - "pallet-session", - "parity-scale-codec", - "scale-info", - "serde", - "sp-core 4.1.0-dev", - "sp-io 4.0.0", - "sp-runtime 4.1.0-dev", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", -] - -[[package]] -<<<<<<< HEAD name = "os_str_bytes" version = "6.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "029d8d0b2f198229de29dca79676f2738ff952edf3fde542eb8bf94d8c21b435" -======= -name = "pallet-mmr" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" -dependencies = [ - "ckb-merkle-mountain-range", - "frame-benchmarking", - "frame-support", - "frame-system", - "pallet-mmr-primitives", - "parity-scale-codec", - "scale-info", - "sp-core", - "sp-io", - "sp-runtime", - "sp-std 4.0.0", -] ->>>>>>> e05319ec (initial refactor) - -[[package]] -name = "pallet-mmr-primitives" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" -dependencies = [ - "frame-support", - "frame-system", - "log", - "parity-scale-codec", - "serde", - "sp-api", - "sp-core", - "sp-runtime", - "sp-std 4.0.0", -] [[package]] -<<<<<<< HEAD name = "pallet-beefy" version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" dependencies = [ "beefy-primitives", "frame-support", @@ -3909,128 +2435,109 @@ dependencies = [ "parity-scale-codec", "scale-info", "serde", - "sp-runtime", - "sp-std 4.0.0", + "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", ] [[package]] name = "pallet-beefy-mmr" version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" dependencies = [ "beefy-merkle-tree", "beefy-primitives", "frame-support", "frame-system", "hex", - "libsecp256k1", "log", "pallet-beefy", "pallet-mmr", - "pallet-mmr-primitives", "pallet-session", "parity-scale-codec", "scale-info", "serde", - "sp-core", - "sp-io", - "sp-runtime", - "sp-std 4.0.0", + "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-io 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", ] [[package]] name = "pallet-mmr" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" dependencies = [ "ckb-merkle-mountain-range", "frame-benchmarking", - "frame-support 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", - "frame-system 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", - "pallet-mmr-primitives 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", + "frame-support", + "frame-system", "parity-scale-codec", "scale-info", - "sp-core 4.1.0-dev", - "sp-io 4.0.0", - "sp-runtime 4.1.0-dev", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", -] - -[[package]] -name = "pallet-mmr-primitives" -version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" -dependencies = [ - "frame-support 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", - "frame-system 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", - "log", - "parity-scale-codec", - "serde", - "sp-api 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", - "sp-core 4.1.0-dev", - "sp-runtime 4.1.0-dev", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", + "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-io 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-mmr-primitives", + "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", ] [[package]] -name = "pallet-mmr-primitives" -version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17#22d40c761a985482f93bbbea5ba4199bdba74f8e" +name = "pallet-mmr-rpc" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" dependencies = [ - "frame-support 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", - "frame-system 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", - "log", + "jsonrpsee 0.13.1", "parity-scale-codec", "serde", - "sp-api 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", - "sp-core 5.0.0", - "sp-runtime 5.0.0", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", + "sp-api", + "sp-blockchain", + "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-mmr-primitives", + "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", ] [[package]] name = "pallet-session" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" dependencies = [ - "frame-support 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", - "frame-system 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", + "frame-support", + "frame-system", "impl-trait-for-tuples", "log", "pallet-timestamp", "parity-scale-codec", "scale-info", - "sp-core 4.1.0-dev", - "sp-io 4.0.0", - "sp-runtime 4.1.0-dev", + "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-io 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", "sp-session", - "sp-staking 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", - "sp-trie 4.0.0", + "sp-staking", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-trie 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", ] [[package]] name = "pallet-timestamp" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" dependencies = [ "frame-benchmarking", - "frame-support 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", - "frame-system 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", + "frame-support", + "frame-system", "log", "parity-scale-codec", "scale-info", - "sp-inherents 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", - "sp-runtime 4.1.0-dev", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", + "sp-inherents", + "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", "sp-timestamp", ] [[package]] name = "parity-scale-codec" -version = "2.3.1" +version = "3.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "373b1a4c1338d9cd3d1fa53b3a11bdab5ab6bd80a20f7f7becd76953ae2be909" +checksum = "e8b44461635bbb1a0300f100a841e571e7d919c81c73075ef5d152ffdb521066" dependencies = [ "arrayvec 0.7.2", "bitvec", @@ -4042,11 +2549,11 @@ dependencies = [ [[package]] name = "parity-scale-codec-derive" -version = "2.3.1" +version = "3.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1557010476e0595c9b568d16dcfb81b93cdeb157612726f5170d31aa707bed27" +checksum = "c45ed1f39709f5a89338fab50e59816b2e8815f5bb58276e7ddf9afd495f73f8" dependencies = [ - "proc-macro-crate", + "proc-macro-crate 1.1.3", "proc-macro2", "quote", "syn", @@ -4054,16 +2561,15 @@ dependencies = [ [[package]] name = "parity-util-mem" -version = "0.10.2" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f4cb4e169446179cbc6b8b6320cc9fca49bd2e94e8db25f25f200a8ea774770" +checksum = "c32561d248d352148124f036cac253a644685a21dc9fea383eb4907d7bd35a8f" dependencies = [ "cfg-if 1.0.0", - "hashbrown 0.11.2", + "hashbrown 0.12.1", "impl-trait-for-tuples", "parity-util-mem-derive", - "parking_lot 0.11.2", -<<<<<<< HEAD + "parking_lot", "primitive-types", "winapi", ] @@ -4087,169 +2593,40 @@ checksum = "be5e13c266502aadf83426d87d81a0f5d1ef45b8027f5a471c360abfe4bfae92" [[package]] name = "parking_lot" -version = "0.11.2" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" -======= -name = "pallet-session" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" ->>>>>>> e05319ec (initial refactor) +checksum = "87f5ec2493a61ac0506c0f4199f99070cbe83857b0337006a30f3e6719b8ef58" dependencies = [ - "frame-support", - "frame-system", - "impl-trait-for-tuples", - "log", - "pallet-timestamp", - "parity-scale-codec", - "scale-info", - "sp-core", - "sp-io", - "sp-runtime", - "sp-session", - "sp-staking", - "sp-std 4.0.0", - "sp-trie", + "lock_api", + "parking_lot_core", ] [[package]] -name = "pallet-timestamp" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +name = "parking_lot_core" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09a279cbf25cb0757810394fbc1e359949b59e348145c643a939a525692e6929" dependencies = [ - "frame-benchmarking", - "frame-support", - "frame-system", - "log", - "parity-scale-codec", - "scale-info", - "sp-inherents", - "sp-runtime", - "sp-std 4.0.0", - "sp-timestamp", + "cfg-if 1.0.0", + "libc", + "redox_syscall", + "smallvec", + "windows-sys", ] [[package]] -name = "parity-scale-codec" -version = "2.3.1" +name = "paste" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "373b1a4c1338d9cd3d1fa53b3a11bdab5ab6bd80a20f7f7becd76953ae2be909" -dependencies = [ - "arrayvec 0.7.2", - "bitvec", - "byte-slice-cast", - "impl-trait-for-tuples", - "parity-scale-codec-derive", - "serde", -] +checksum = "0c520e05135d6e763148b6426a837e239041653ba7becd2e538c076c738025fc" [[package]] -name = "parity-scale-codec-derive" -version = "2.3.1" +name = "pbkdf2" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1557010476e0595c9b568d16dcfb81b93cdeb157612726f5170d31aa707bed27" +checksum = "216eaa586a190f0a738f2f918511eecfa90f13295abec0e457cdebcceda80cbd" dependencies = [ - "proc-macro-crate", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "parity-util-mem" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f4cb4e169446179cbc6b8b6320cc9fca49bd2e94e8db25f25f200a8ea774770" -dependencies = [ - "cfg-if 1.0.0", - "hashbrown 0.11.2", - "impl-trait-for-tuples", - "parity-util-mem-derive", - "parking_lot", -======= ->>>>>>> 8be3170b (revert changes to cargo.toml) - "primitive-types", - "winapi", -] - -[[package]] -name = "parity-util-mem-derive" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f557c32c6d268a07c921471619c0295f5efad3a0e76d4f97a05c091a51d110b2" -dependencies = [ - "proc-macro2", - "syn", - "synstructure", -] - -[[package]] -name = "parity-wasm" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be5e13c266502aadf83426d87d81a0f5d1ef45b8027f5a471c360abfe4bfae92" - -[[package]] -name = "parking_lot" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" -dependencies = [ - "instant", - "lock_api", - "parking_lot_core 0.8.5", -] - -[[package]] -name = "parking_lot" -version = "0.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87f5ec2493a61ac0506c0f4199f99070cbe83857b0337006a30f3e6719b8ef58" -dependencies = [ - "lock_api", - "parking_lot_core 0.9.3", -] - -[[package]] -name = "parking_lot_core" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d76e8e1493bcac0d2766c42737f34458f1c8c50c0d23bcb24ea953affb273216" -dependencies = [ - "cfg-if 1.0.0", - "instant", - "libc", - "redox_syscall", - "smallvec", - "winapi", -] - -[[package]] -name = "parking_lot_core" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09a279cbf25cb0757810394fbc1e359949b59e348145c643a939a525692e6929" -dependencies = [ - "cfg-if 1.0.0", - "libc", - "redox_syscall", - "smallvec", - "windows-sys", -] - -[[package]] -name = "paste" -version = "1.0.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c520e05135d6e763148b6426a837e239041653ba7becd2e538c076c738025fc" - -[[package]] -name = "pbkdf2" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "216eaa586a190f0a738f2f918511eecfa90f13295abec0e457cdebcceda80cbd" -dependencies = [ - "crypto-mac 0.8.0", + "crypto-mac 0.8.0", ] [[package]] @@ -4326,17 +2703,6 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" -[[package]] -name = "pkcs8" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cabda3fb821068a9a4fab19a683eac3af12edf0f34b94a8be53c4972b8149d0" -dependencies = [ - "der", - "spki", - "zeroize", -] - [[package]] name = "ppv-lite86" version = "0.2.16" @@ -4345,9 +2711,9 @@ checksum = "eb9f9e6e233e5c4a35559a617bf40a4ec447db2e84c20b55a6f83167b7e57872" [[package]] name = "primitive-types" -version = "0.10.1" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05e4722c697a58a99d5d06a08c30821d7c082a4632198de1eaa5a6c22ef42373" +checksum = "e28720988bff275df1f51b171e1b2a18c30d194c4d2b61defdacecd625a5d94a" dependencies = [ "fixed-hash", "impl-codec", @@ -4357,39 +2723,14 @@ dependencies = [ ] [[package]] -<<<<<<< HEAD -name = "pkg-config" -version = "0.3.25" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1df8c4ec4b0627e53bdf214615ad287367e482558cf84b109250b37464dc03ae" - -[[package]] -name = "ppv-lite86" -version = "0.2.16" -======= name = "proc-macro-crate" -version = "1.1.3" ->>>>>>> e05319ec (initial refactor) +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e17d47ce914bf4de440332250b0edd23ce48c005f59fab39d3335866b114f11a" +checksum = "1d6ea3c4595b96363c13943497db34af4460fb474a95c43f4446ad341b8c9785" dependencies = [ - "thiserror", "toml", ] -[[package]] -name = "primitive-types" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05e4722c697a58a99d5d06a08c30821d7c082a4632198de1eaa5a6c22ef42373" -dependencies = [ - "fixed-hash", - "impl-codec", - "impl-serde", - "scale-info", - "uint", -] - [[package]] name = "proc-macro-crate" version = "1.1.3" @@ -4434,44 +2775,6 @@ dependencies = [ ] [[package]] -<<<<<<< HEAD -<<<<<<< HEAD -name = "prometheus" -version = "0.13.1" -======= -name = "prometheus" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7f64969ffd5dd8f39bd57a68ac53c163a095ed9d0fb707146da1b27025a3504" -dependencies = [ - "cfg-if 1.0.0", - "fnv", - "lazy_static", - "memchr", - "parking_lot 0.11.2", - "protobuf", - "thiserror", -] - -[[package]] -name = "prost" -version = "0.9.0" ->>>>>>> 8be3170b (revert changes to cargo.toml) -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cface98dfa6d645ea4c789839f176e4b072265d085bfcc48eaa8d137f58d3c39" -dependencies = [ - "cfg-if 1.0.0", - "fnv", - "lazy_static", - "memchr", - "parking_lot 0.12.0", - "protobuf", - "thiserror", -] - -[[package]] -======= ->>>>>>> e05319ec (initial refactor) name = "prost" version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -4504,92 +2807,6 @@ dependencies = [ "prost", ] -[[package]] -<<<<<<< HEAD -<<<<<<< HEAD -name = "protobuf" -version = "2.27.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf7e6d18738ecd0902d30d1ad232c9125985a3422929b16c65517b38adc14f96" - -[[package]] -name = "pulldown-cmark" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34f197a544b0c9ab3ae46c359a7ec9cbbb5c7bf97054266fecb7ead794a181d6" -dependencies = [ - "bitflags", - "memchr", - "unicase", -] - -[[package]] -name = "quanta" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bafd74c340a0a7e79415981ede3460df16b530fd071541901a57416eea950b17" -dependencies = [ - "crossbeam-utils 0.8.8", - "libc", - "mach", - "once_cell", - "raw-cpuid", - "wasi 0.10.2+wasi-snapshot-preview1", - "web-sys", - "winapi", -] - -[[package]] -name = "quick-error" -version = "1.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" - -[[package]] -======= ->>>>>>> e05319ec (initial refactor) -name = "quote" -version = "1.0.18" -======= -name = "protobuf" -version = "2.27.1" ->>>>>>> 8be3170b (revert changes to cargo.toml) -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf7e6d18738ecd0902d30d1ad232c9125985a3422929b16c65517b38adc14f96" - -[[package]] -name = "pulldown-cmark" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34f197a544b0c9ab3ae46c359a7ec9cbbb5c7bf97054266fecb7ead794a181d6" -dependencies = [ - "bitflags", - "memchr", - "unicase", -] - -[[package]] -name = "quanta" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20afe714292d5e879d8b12740aa223c6a88f118af41870e8b6196e39a02238a8" -dependencies = [ - "crossbeam-utils 0.8.8", - "libc", - "mach", - "once_cell", - "raw-cpuid", - "wasi 0.10.2+wasi-snapshot-preview1", - "web-sys", - "winapi", -] - -[[package]] -name = "quick-error" -version = "1.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" - [[package]] name = "quote" version = "1.0.18" @@ -4601,9 +2818,9 @@ dependencies = [ [[package]] name = "radium" -version = "0.6.2" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "643f8f41a8ebc4c5dc4515c82bb8abd397b527fc20fd681b7c011c2aee5d44fb" +checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" [[package]] name = "rand" @@ -4690,33 +2907,12 @@ dependencies = [ [[package]] name = "rand_pcg" version = "0.2.1" -<<<<<<< HEAD -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16abd0c1b639e9eb4d7c50c0b8100b0d0f849be2349829c740fe8e6eb4816429" -dependencies = [ - "rand_core 0.5.1", -] - -[[package]] -name = "raw-cpuid" -version = "10.3.0" -======= ->>>>>>> e05319ec (initial refactor) source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16abd0c1b639e9eb4d7c50c0b8100b0d0f849be2349829c740fe8e6eb4816429" dependencies = [ "rand_core 0.5.1", ] -[[package]] -name = "raw-cpuid" -version = "10.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "738bc47119e3eeccc7e94c4a506901aea5e7b4944ecd0829cbebf4af04ceda12" -dependencies = [ - "bitflags", -] - [[package]] name = "rawpointer" version = "0.2.1" @@ -4741,9 +2937,9 @@ version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "258bcdb5ac6dad48491bb2992db6b7cf74878b0384908af124823d118c99683f" dependencies = [ - "crossbeam-channel 0.5.4", + "crossbeam-channel", "crossbeam-deque", - "crossbeam-utils 0.8.8", + "crossbeam-utils", "num_cpus", ] @@ -4769,30 +2965,18 @@ dependencies = [ [[package]] name = "ref-cast" -<<<<<<< HEAD -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "300f2a835d808734ee295d45007adacb9ebb29dd3ae2424acfa17930cae541da" -======= version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "685d58625b6c2b83e4cc88a27c4bf65adb7b6b16dbdc413e515c9405b47432ab" ->>>>>>> e05319ec (initial refactor) dependencies = [ "ref-cast-impl", ] [[package]] name = "ref-cast-impl" -<<<<<<< HEAD -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c38e3aecd2b21cb3959637b883bb3714bc7e43f0268b9a29d3743ee3e55cdd2" -======= version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a043824e29c94169374ac5183ac0ed43f5724dc4556b19568007486bd840fa1f" ->>>>>>> e05319ec (initial refactor) dependencies = [ "proc-macro2", "quote", @@ -4834,12 +3018,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "retry" -version = "1.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac95c60a949a63fd2822f4964939662d8f2c16c4fa0624fd954bc6e703b9a3f6" - [[package]] name = "rfc6979" version = "0.1.0" @@ -4867,39 +3045,12 @@ dependencies = [ ] [[package]] -name = "ripemd160" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2eca4ecc81b7f313189bf73ce724400a07da2a6dac19588b03c8bd76a2dcc251" -dependencies = [ - "block-buffer 0.9.0", - "digest 0.9.0", - "opaque-debug 0.3.0", -] - -[[package]] -name = "rouille" -version = "3.5.0" +name = "ripemd" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18b2380c42510ef4a28b5f228a174c801e0dec590103e215e60812e2e2f34d05" +checksum = "1facec54cb5e0dc08553501fa740091086d0259ad0067e0d4103448e4cb22ed3" dependencies = [ - "base64", - "brotli", - "chrono", - "deflate", - "filetime", - "multipart", - "num_cpus", - "percent-encoding", - "rand 0.8.5", - "serde", - "serde_derive", - "serde_json", - "sha1", - "threadpool", - "time", - "tiny_http", - "url", + "digest 0.10.3", ] [[package]] @@ -4908,43 +3059,13 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a632a43487c1332be8e183588079f89b6820fab24e04db49521eacd536837372" dependencies = [ -<<<<<<< HEAD - "base64", - "brotli", - "chrono", - "deflate", - "filetime", - "multipart", - "num_cpus", - "percent-encoding", - "rand 0.8.5", - "serde", - "serde_derive", - "serde_json", - "sha1", - "threadpool", - "time 0.3.9", - "tiny_http", - "url", -======= "micromath", "sha2 0.10.2", ->>>>>>> e05319ec (initial refactor) ] [[package]] -name = "rs_merkle" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a632a43487c1332be8e183588079f89b6820fab24e04db49521eacd536837372" -dependencies = [ - "micromath", - "sha2 0.10.2", -] - -[[package]] -name = "rustc-demangle" -version = "0.1.21" +name = "rustc-demangle" +version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ef03e0a2b150c7a90d01faf6254c9c48a41e95fb2a8c2ac1c6f0d2b9aefc342" @@ -4960,15 +3081,6 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" -[[package]] -name = "rustc_version" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" -dependencies = [ - "semver", -] - [[package]] name = "rustls" version = "0.19.1" @@ -5007,8 +3119,6 @@ dependencies = [ ] [[package]] -<<<<<<< HEAD -<<<<<<< HEAD name = "rustls-native-certs" version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -5030,19 +3140,6 @@ dependencies = [ ] [[package]] -======= ->>>>>>> 8be3170b (revert changes to cargo.toml) -name = "rustversion" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2cc38e8fa666e2de3c4aba7edeb5ffc5246c1c2ed0e3d17e560aeeba736b23f" - -[[package]] -<<<<<<< HEAD -======= ->>>>>>> e05319ec (initial refactor) -======= ->>>>>>> 8be3170b (revert changes to cargo.toml) name = "ryu" version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -5095,12 +3192,6 @@ dependencies = [ "safe-regex-compiler", ] -[[package]] -name = "safemem" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef703b7cb59335eae2eb93ceb664c0eb7ea6bf567079d843e09420219668e072" - [[package]] name = "same-file" version = "1.0.6" @@ -5112,9 +3203,9 @@ dependencies = [ [[package]] name = "scale-info" -version = "1.0.0" +version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c55b744399c25532d63a0d2789b109df8d46fc93752d46b0782991a931a782f" +checksum = "c46be926081c9f4dd5dd9b6f1d3e3229f2360bc6502dd8836f84a93b7c75e99a" dependencies = [ "bitvec", "cfg-if 1.0.0", @@ -5126,36 +3217,16 @@ dependencies = [ [[package]] name = "scale-info-derive" -version = "1.0.0" -<<<<<<< HEAD -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baeb2780690380592f86205aa4ee49815feb2acad8c2f59e6dd207148c3f1fcd" -dependencies = [ - "proc-macro-crate", - "proc-macro2", - "quote", - "syn", -======= +version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baeb2780690380592f86205aa4ee49815feb2acad8c2f59e6dd207148c3f1fcd" +checksum = "50e334bb10a245e28e5fd755cabcafd96cfcd167c99ae63a46924ca8d8703a3c" dependencies = [ - "proc-macro-crate", + "proc-macro-crate 1.1.3", "proc-macro2", "quote", "syn", ] -[[package]] -name = "schannel" -version = "0.1.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f05ba609c234e60bee0d547fe94a4c7e9da733d1c962cf6e59efa4cd9c8bc75" -dependencies = [ - "lazy_static", - "winapi", ->>>>>>> e05319ec (initial refactor) -] - [[package]] name = "schannel" version = "0.1.20" @@ -5166,24 +3237,6 @@ dependencies = [ "windows-sys", ] -[[package]] -name = "scheduled-thread-pool" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc6f74fd1204073fa02d5d5d68bec8021be4c38690b61264b2fdb48083d0e7d7" -dependencies = [ - "parking_lot 0.11.2", -] - -[[package]] -name = "scheduled-thread-pool" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc6f74fd1204073fa02d5d5d68bec8021be4c38690b61264b2fdb48083d0e7d7" -dependencies = [ - "parking_lot 0.11.2", -] - [[package]] name = "schemars" version = "0.8.10" @@ -5253,10 +3306,6 @@ dependencies = [ ] [[package]] -<<<<<<< HEAD -<<<<<<< HEAD -======= ->>>>>>> 8be3170b (revert changes to cargo.toml) name = "sec1" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -5264,42 +3313,34 @@ checksum = "08da66b8b0965a5555b6bd6639e68ccba85e1e2506f5fbb089e93f8a04e1a2d1" dependencies = [ "der", "generic-array 0.14.5", - "pkcs8", "subtle", "zeroize", ] [[package]] name = "secp256k1" -version = "0.22.1" +version = "0.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26947345339603ae8395f68e2f3d85a6b0a8ddfe6315818e80b8504415099db0" +checksum = "9c42e6f1735c5f00f51e43e28d6634141f2bcad10931b2609ddd74a86d751260" dependencies = [ "secp256k1-sys", - "serde", ] [[package]] name = "secp256k1-sys" -version = "0.5.2" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "152e20a0fd0519390fc43ab404663af8a0b794273d2a91d60ad4a39f13ffe110" +checksum = "957da2573cde917463ece3570eab4a0b3f19de6f1646cde62e6fd3868f566036" dependencies = [ "cc", ] [[package]] -<<<<<<< HEAD -======= ->>>>>>> e05319ec (initial refactor) -======= ->>>>>>> 8be3170b (revert changes to cargo.toml) name = "secrecy" version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9bd1c54ea06cfd2f6b63219704de0b9b4f72dcc2b8fdef820be6cd799780e91e" dependencies = [ - "serde", "zeroize", ] @@ -5326,15 +3367,6 @@ dependencies = [ "libc", ] -[[package]] -name = "semver" -version = "1.0.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cb243bdfdb5936c8dc3c45762a19d12ab4550cdc753bc247637d4ec35a040fd" -dependencies = [ - "serde", -] - [[package]] name = "serde" version = "1.0.137" @@ -5353,16 +3385,6 @@ dependencies = [ "serde", ] -[[package]] -name = "serde_cbor" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bef2ebfde456fb76bbcf9f59315333decc4fda0b2b44b420243c11e0f5ec1f5" -dependencies = [ - "half", - "serde", -] - [[package]] name = "serde_derive" version = "1.0.137" @@ -5391,7 +3413,7 @@ version = "1.0.81" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b7ce2b32a1aed03c558dc61a5cd328f15aff2dbc17daad8fb8af04d2100e15c" dependencies = [ - "itoa 1.0.1", + "itoa 1.0.2", "ryu", "serde", ] @@ -5408,67 +3430,6 @@ dependencies = [ ] [[package]] -<<<<<<< HEAD -<<<<<<< HEAD -======= ->>>>>>> 8be3170b (revert changes to cargo.toml) -name = "serde_yaml" -version = "0.8.24" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "707d15895415db6628332b737c838b88c598522e4dc70647e59b72312924aebc" -dependencies = [ - "indexmap", - "ryu", - "serde", - "yaml-rust", -] - -[[package]] -name = "serial_test" -<<<<<<< HEAD -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d19dbfb999a147cedbfe82f042eb9555f5b0fa4ef95ee4570b74349103d9c9f4" -dependencies = [ - "lazy_static", - "log", - "parking_lot 0.12.0", -======= -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5bcc41d18f7a1d50525d080fd3e953be87c4f9f1a974f3c21798ca00d54ec15" -dependencies = [ - "lazy_static", - "parking_lot 0.11.2", ->>>>>>> 8be3170b (revert changes to cargo.toml) - "serial_test_derive", -] - -[[package]] -name = "serial_test_derive" -<<<<<<< HEAD -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb9e2050b2be1d681f8f1c1a528bcfe4e00afa2d8995f713974f5333288659f2" -======= -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2881bccd7d60fb32dfa3d7b3136385312f8ad75e2674aab2852867a09790cae8" ->>>>>>> 8be3170b (revert changes to cargo.toml) -dependencies = [ - "proc-macro-error", - "proc-macro2", - "quote", - "rustversion", - "syn", -] - -[[package]] -<<<<<<< HEAD -======= ->>>>>>> e05319ec (initial refactor) -======= ->>>>>>> 8be3170b (revert changes to cargo.toml) name = "sha-1" version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -5492,40 +3453,6 @@ dependencies = [ "digest 0.10.3", ] -[[package]] -name = "sha1" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1da05c97445caa12d05e848c4a4fcbbea29e748ac28f7e80e9b010392063770" -dependencies = [ - "sha1_smol", -] - -[[package]] -name = "sha1_smol" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae1a47186c03a32177042e55dbc5fd5aee900b8e0069a8d70fba96a9375cd012" - -[[package]] -name = "sha2" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a256f46ea78a0c0d9ff00077504903ac881a1dafdc20da66545699e7776b3e69" -dependencies = [ - "block-buffer 0.7.3", - "digest 0.8.1", - "fake-simd", - "opaque-debug 0.2.3", -] - -[[package]] -<<<<<<< HEAD -name = "sha1_smol" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae1a47186c03a32177042e55dbc5fd5aee900b8e0069a8d70fba96a9375cd012" - [[package]] name = "sha2" version = "0.8.2" @@ -5539,13 +3466,10 @@ dependencies = [ ] [[package]] -======= ->>>>>>> e05319ec (initial refactor) name = "sha2" version = "0.9.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" -<<<<<<< HEAD dependencies = [ "block-buffer 0.9.0", "cfg-if 1.0.0", @@ -5567,14 +3491,12 @@ dependencies = [ [[package]] name = "sha3" -version = "0.9.1" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f81199417d4e5de3f04b1e871023acea7389672c4135918f05aa9cbf2f2fa809" +checksum = "881bf8156c87b6301fc5ca6b27f11eeb2761224c7081e69b409d5a1951a70c86" dependencies = [ - "block-buffer 0.9.0", - "digest 0.9.0", + "digest 0.10.3", "keccak", - "opaque-debug 0.3.0", ] [[package]] @@ -5586,16 +3508,6 @@ dependencies = [ "lazy_static", ] -[[package]] -name = "signal-hook" -version = "0.3.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "647c97df271007dcea485bb74ffdb57f2e683f1306c854f468a0c244badabf2d" -dependencies = [ - "libc", - "signal-hook-registry", -] - [[package]] name = "signal-hook-registry" version = "1.4.0" @@ -5633,26 +3545,11 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cc47a29ce97772ca5c927f75bac34866b16d64e07f330c3248e2d7226623901b" -[[package]] -name = "skeptic" -version = "0.13.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16d23b015676c90a0f01c197bfdc786c20342c73a0afdda9025adb0bc42940a8" -dependencies = [ - "bytecount", - "cargo_metadata", - "error-chain", - "glob", - "pulldown-cmark", - "tempfile", - "walkdir", -] - [[package]] name = "slab" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9def91fd1e018fe007022791f865d0ccc9b3a0d5001e01aabb8b40e46000afb5" +checksum = "eb703cfe953bccee95685111adeedb76fabe4e97549a58d16f03ea7b9367bb32" [[package]] name = "smallvec" @@ -5670,19 +3567,34 @@ dependencies = [ "winapi", ] +[[package]] +name = "soketto" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41d1c5305e39e09653383c2c7244f2f78b3bcae37cf50c64cb4789c9f5096ec2" +dependencies = [ + "base64", + "bytes", + "futures", + "httparse", + "log", + "rand 0.8.5", + "sha-1 0.9.8", +] + [[package]] name = "sp-api" version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" dependencies = [ "hash-db", "log", "parity-scale-codec", "sp-api-proc-macro", - "sp-core", - "sp-runtime", - "sp-state-machine", - "sp-std 4.0.0", + "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-state-machine 0.12.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", "sp-version", "thiserror", ] @@ -5690,10 +3602,10 @@ dependencies = [ [[package]] name = "sp-api-proc-macro" version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" dependencies = [ - "blake2-rfc", - "proc-macro-crate", + "blake2", + "proc-macro-crate 1.1.3", "proc-macro2", "quote", "syn", @@ -5701,36 +3613,104 @@ dependencies = [ [[package]] name = "sp-application-crypto" -version = "4.0.0" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +version = "6.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "acb4490364cb3b097a6755343e552495b0013778152300714be4647d107e9a2e" +dependencies = [ + "parity-scale-codec", + "scale-info", + "serde", + "sp-core 6.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-io 6.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-std 4.0.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "sp-application-crypto" +version = "6.0.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" dependencies = [ "parity-scale-codec", "scale-info", "serde", - "sp-core", - "sp-io", - "sp-std 4.0.0", + "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-io 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", ] [[package]] name = "sp-arithmetic" -version = "4.0.0" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +version = "5.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31ef21f82cc10f75ed046b65e2f8048080ee76e59f1b8aed55c7150daebfd35b" +dependencies = [ + "integer-sqrt", + "num-traits", + "parity-scale-codec", + "scale-info", + "serde", + "sp-debug-derive 4.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-std 4.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "static_assertions", +] + +[[package]] +name = "sp-arithmetic" +version = "5.0.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" dependencies = [ "integer-sqrt", "num-traits", "parity-scale-codec", "scale-info", "serde", - "sp-debug-derive", - "sp-std 4.0.0", + "sp-debug-derive 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", "static_assertions", ] +[[package]] +name = "sp-blockchain" +version = "4.0.0-dev" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" +dependencies = [ + "futures", + "log", + "lru", + "parity-scale-codec", + "parking_lot", + "sp-api", + "sp-consensus", + "sp-database", + "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-state-machine 0.12.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "thiserror", +] + +[[package]] +name = "sp-consensus" +version = "0.10.0-dev" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" +dependencies = [ + "async-trait", + "futures", + "futures-timer", + "log", + "parity-scale-codec", + "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-inherents", + "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-state-machine 0.12.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-version", + "thiserror", +] + [[package]] name = "sp-core" -version = "4.1.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +version = "6.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77963e2aa8fadb589118c3aede2e78b6c4bcf1c01d588fbf33e915b390825fbd" dependencies = [ "base58", "bitflags", @@ -5750,134 +3730,295 @@ dependencies = [ "num-traits", "parity-scale-codec", "parity-util-mem", - "parking_lot 0.11.2", + "parking_lot", "primitive-types", "rand 0.7.3", "regex", "scale-info", "schnorrkel", + "secp256k1", "secrecy", "serde", - "sha2 0.10.2", - "sp-core-hashing", - "sp-debug-derive", - "sp-externalities", - "sp-runtime-interface", - "sp-std 4.0.0", - "sp-storage", + "sp-core-hashing 4.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-debug-derive 4.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-externalities 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-runtime-interface 6.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-std 4.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-storage 6.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "ss58-registry", "substrate-bip39", "thiserror", "tiny-bip39", - "tiny-keccak", - "twox-hash", "wasmi", "zeroize", ] [[package]] -name = "sp-core-hashing" -version = "4.0.0" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +name = "sp-core" +version = "6.0.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" dependencies = [ + "base58", + "bitflags", "blake2-rfc", "byteorder", - "sha2 0.10.2", - "sp-std 4.0.0", - "tiny-keccak", - "twox-hash", -] - -[[package]] -name = "sp-core-hashing-proc-macro" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" -dependencies = [ - "proc-macro2", - "quote", - "sp-core-hashing", - "syn", -] - -[[package]] -name = "sp-debug-derive" -version = "4.0.0" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "sp-externalities" -version = "0.10.0" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" + "dyn-clonable", + "ed25519-dalek", + "futures", + "hash-db", + "hash256-std-hasher", + "hex", + "impl-serde", + "lazy_static", + "libsecp256k1", + "log", + "merlin", + "num-traits", + "parity-scale-codec", + "parity-util-mem", + "parking_lot", + "primitive-types", + "rand 0.7.3", + "regex", + "scale-info", + "schnorrkel", + "secp256k1", + "secrecy", + "serde", + "sp-core-hashing 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-debug-derive 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-externalities 0.12.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-runtime-interface 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-storage 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "ss58-registry", + "substrate-bip39", + "thiserror", + "tiny-bip39", + "wasmi", + "zeroize", +] + +[[package]] +name = "sp-core-hashing" +version = "4.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec864a6a67249f0c8dd3d5acab43623a61677e85ff4f2f9b04b802d2fe780e83" +dependencies = [ + "blake2-rfc", + "byteorder", + "sha2 0.9.9", + "sp-std 4.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "tiny-keccak", + "twox-hash", +] + +[[package]] +name = "sp-core-hashing" +version = "4.0.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" +dependencies = [ + "blake2", + "byteorder", + "digest 0.10.3", + "sha2 0.10.2", + "sha3", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "twox-hash", +] + +[[package]] +name = "sp-core-hashing-proc-macro" +version = "5.0.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" +dependencies = [ + "proc-macro2", + "quote", + "sp-core-hashing 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "syn", +] + +[[package]] +name = "sp-database" +version = "4.0.0-dev" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" +dependencies = [ + "kvdb", + "parking_lot", +] + +[[package]] +name = "sp-debug-derive" +version = "4.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d676664972e22a0796176e81e7bec41df461d1edf52090955cdab55f2c956ff2" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "sp-debug-derive" +version = "4.0.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "sp-externalities" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fcfd91f92a2a59224230a77c4a5d6f51709620c0aab4e51f108ccece6adc56f" +dependencies = [ + "environmental", + "parity-scale-codec", + "sp-std 4.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-storage 6.0.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "sp-externalities" +version = "0.12.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" dependencies = [ "environmental", "parity-scale-codec", - "sp-std 4.0.0", - "sp-storage", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-storage 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", ] [[package]] name = "sp-inherents" version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" dependencies = [ "async-trait", "impl-trait-for-tuples", "parity-scale-codec", - "sp-core", - "sp-runtime", - "sp-std 4.0.0", + "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", "thiserror", ] [[package]] name = "sp-io" -version = "4.0.0" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +version = "6.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "935fd3c71bad6811a7984cabb74d323b8ca3107024024c3eabb610e0182ba8d3" +dependencies = [ + "futures", + "hash-db", + "libsecp256k1", + "log", + "parity-scale-codec", + "parking_lot", + "secp256k1", + "sp-core 6.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-externalities 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-keystore 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-runtime-interface 6.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-state-machine 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-std 4.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-tracing 5.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-trie 6.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-wasm-interface 6.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "tracing", + "tracing-core", +] + +[[package]] +name = "sp-io" +version = "6.0.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" dependencies = [ "futures", "hash-db", "libsecp256k1", "log", "parity-scale-codec", - "parking_lot 0.11.2", - "sp-core", - "sp-externalities", - "sp-keystore", - "sp-runtime-interface", - "sp-state-machine", - "sp-std 4.0.0", - "sp-tracing", - "sp-trie", - "sp-wasm-interface", + "parking_lot", + "secp256k1", + "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-externalities 0.12.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-keystore 0.12.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-runtime-interface 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-state-machine 0.12.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-tracing 5.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-trie 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-wasm-interface 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", "tracing", "tracing-core", ] [[package]] name = "sp-keystore" -version = "0.10.0" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3261eddca8c8926e3e1de136a7980cb3afc3455247d9d6f3119d9b292f73aaee" +dependencies = [ + "async-trait", + "futures", + "merlin", + "parity-scale-codec", + "parking_lot", + "schnorrkel", + "sp-core 6.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-externalities 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)", + "thiserror", +] + +[[package]] +name = "sp-keystore" +version = "0.12.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" dependencies = [ "async-trait", - "derive_more", "futures", "merlin", "parity-scale-codec", - "parking_lot 0.11.2", + "parking_lot", "schnorrkel", - "sp-core", - "sp-externalities", + "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-externalities 0.12.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "thiserror", +] + +[[package]] +name = "sp-mmr-primitives" +version = "4.0.0-dev" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" +dependencies = [ + "log", + "parity-scale-codec", + "serde", + "sp-api", + "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-debug-derive 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", +] + +[[package]] +name = "sp-panic-handler" +version = "4.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2101f3c555fceafcfcfb0e61c55ea9ed80dc60bd77d54d9f25b369edb029e9a4" +dependencies = [ + "backtrace", + "lazy_static", + "regex", ] [[package]] name = "sp-panic-handler" version = "4.0.0" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" dependencies = [ "backtrace", "lazy_static", @@ -5886,8 +4027,31 @@ dependencies = [ [[package]] name = "sp-runtime" -version = "4.1.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +version = "6.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb7d8a8d5ab5d349c6cf9300af1721b7b6446ba63401dbb11c10a1d65197aa5f" +dependencies = [ + "either", + "hash256-std-hasher", + "impl-trait-for-tuples", + "log", + "parity-scale-codec", + "parity-util-mem", + "paste", + "rand 0.7.3", + "scale-info", + "serde", + "sp-application-crypto 6.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-arithmetic 5.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-core 6.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-io 6.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-std 4.0.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "sp-runtime" +version = "6.0.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" dependencies = [ "either", "hash256-std-hasher", @@ -5899,45 +4063,68 @@ dependencies = [ "rand 0.7.3", "scale-info", "serde", - "sp-application-crypto", - "sp-arithmetic", - "sp-core", - "sp-io", - "sp-std 4.0.0", + "sp-application-crypto 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-arithmetic 5.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-io 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", ] [[package]] name = "sp-runtime-interface" -version = "4.1.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +version = "6.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "158bf0305c75a50fc0e334b889568f519a126e32b87900c3f4251202dece7b4b" dependencies = [ "impl-trait-for-tuples", "parity-scale-codec", "primitive-types", - "sp-externalities", - "sp-runtime-interface-proc-macro", - "sp-std 4.0.0", - "sp-storage", - "sp-tracing", - "sp-wasm-interface", + "sp-externalities 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-runtime-interface-proc-macro 5.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-std 4.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-storage 6.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-tracing 5.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-wasm-interface 6.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "static_assertions", -======= +] + +[[package]] +name = "sp-runtime-interface" +version = "6.0.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" dependencies = [ - "block-buffer 0.9.0", - "cfg-if 1.0.0", - "cpufeatures", - "digest 0.9.0", - "opaque-debug 0.3.0", ->>>>>>> e05319ec (initial refactor) + "impl-trait-for-tuples", + "parity-scale-codec", + "primitive-types", + "sp-externalities 0.12.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-runtime-interface-proc-macro 5.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-storage 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-tracing 5.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-wasm-interface 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "static_assertions", ] [[package]] name = "sp-runtime-interface-proc-macro" -version = "4.0.0" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +version = "5.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22ecb916b9664ed9f90abef0ff5a3e61454c1efea5861b2997e03f39b59b955f" +dependencies = [ + "Inflector", + "proc-macro-crate 1.1.3", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "sp-runtime-interface-proc-macro" +version = "5.0.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" dependencies = [ "Inflector", - "proc-macro-crate", + "proc-macro-crate 1.1.3", "proc-macro2", "quote", "syn", @@ -5946,131 +4133,116 @@ dependencies = [ [[package]] name = "sp-session" version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" dependencies = [ -<<<<<<< HEAD "parity-scale-codec", "scale-info", "sp-api", - "sp-core", - "sp-runtime", + "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", "sp-staking", - "sp-std 4.0.0", -======= - "block-buffer 0.9.0", - "digest 0.9.0", - "keccak", - "opaque-debug 0.3.0", ->>>>>>> e05319ec (initial refactor) + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", ] [[package]] name = "sp-staking" version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" dependencies = [ "parity-scale-codec", "scale-info", - "sp-runtime", - "sp-std 4.0.0", + "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", ] [[package]] -<<<<<<< HEAD -name = "signal-hook" -version = "0.3.14" +name = "sp-state-machine" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a253b5e89e2698464fc26b545c9edceb338e18a89effeeecfea192c3025be29d" +checksum = "ecee3b33eb78c99997676a571656bcc35db6886abecfddd13e76a73b5871c6c1" dependencies = [ "hash-db", "log", "num-traits", "parity-scale-codec", - "parking_lot 0.11.2", + "parking_lot", "rand 0.7.3", "smallvec", - "sp-core", - "sp-externalities", - "sp-panic-handler", - "sp-std 4.0.0", - "sp-trie", + "sp-core 6.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-externalities 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-panic-handler 4.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-std 4.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-trie 6.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "thiserror", "tracing", "trie-db", "trie-root", ] +[[package]] +name = "sp-state-machine" +version = "0.12.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" +dependencies = [ + "hash-db", + "log", + "num-traits", + "parity-scale-codec", + "parking_lot", + "rand 0.7.3", + "smallvec", + "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-externalities 0.12.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-panic-handler 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-trie 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "thiserror", + "tracing", + "trie-root", +] + [[package]] name = "sp-std" -version = "3.0.0" +version = "4.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35391ea974fa5ee869cb094d5b437688fbf3d8127d64d1b9fed5822a1ed39b12" +checksum = "14804d6069ee7a388240b665f17908d98386ffb0b5d39f89a4099fc7a2a4c03f" [[package]] name = "sp-std" version = "4.0.0" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" [[package]] -<<<<<<< HEAD name = "sp-storage" -version = "4.0.0" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +version = "6.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5dab53af846068e3e0716d3ccc70ea0db44035c79b2ed5821aaa6635039efa37" dependencies = [ "impl-serde", "parity-scale-codec", "ref-cast", "serde", - "sp-debug-derive", - "sp-std 4.0.0", -======= -======= -name = "signal-hook" -version = "0.3.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "647c97df271007dcea485bb74ffdb57f2e683f1306c854f468a0c244badabf2d" -dependencies = [ - "libc", - "signal-hook-registry", + "sp-debug-derive 4.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-std 4.0.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] -name = "signal-hook-registry" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51e73328dc4ac0c7ccbda3a494dfa03df1de2f46018127f60c693f2648455b0" +name = "sp-storage" +version = "6.0.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" dependencies = [ - "libc", -] - -[[package]] ->>>>>>> 8be3170b (revert changes to cargo.toml) -name = "signature" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02658e48d89f2bec991f9a78e69cfa4c316f8d6a6c4ec12fae1aeb263d486788" -dependencies = [ - "digest 0.9.0", - "rand_core 0.6.3", -] - -[[package]] -name = "simba" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e82063457853d00243beda9952e910b82593e4b07ae9f721b9278a99a0d3d5c" -dependencies = [ - "approx", - "num-complex", - "num-traits", - "paste", ->>>>>>> e05319ec (initial refactor) + "impl-serde", + "parity-scale-codec", + "ref-cast", + "serde", + "sp-debug-derive 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", ] [[package]] name = "sp-timestamp" version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" dependencies = [ "async-trait", "futures-timer", @@ -6078,54 +4250,72 @@ dependencies = [ "parity-scale-codec", "sp-api", "sp-inherents", - "sp-runtime", - "sp-std 4.0.0", + "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", "thiserror", ] [[package]] -<<<<<<< HEAD name = "sp-tracing" -version = "4.0.0" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +version = "5.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69a67e555d171c4238bd223393cda747dd20ec7d4f5fe5c042c056cb7fde9eda" dependencies = [ "parity-scale-codec", - "sp-std 4.0.0", + "sp-std 4.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "tracing", "tracing-core", "tracing-subscriber 0.2.25", ] [[package]] -<<<<<<< HEAD -======= ->>>>>>> e05319ec (initial refactor) -======= -name = "skeptic" -version = "0.13.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16d23b015676c90a0f01c197bfdc786c20342c73a0afdda9025adb0bc42940a8" +name = "sp-tracing" +version = "5.0.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" dependencies = [ - "bytecount", - "cargo_metadata", - "error-chain", - "glob", - "pulldown-cmark", - "tempfile", - "walkdir", + "parity-scale-codec", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "tracing", + "tracing-core", + "tracing-subscriber 0.2.25", ] [[package]] ->>>>>>> 8be3170b (revert changes to cargo.toml) -name = "slab" -version = "0.4.6" +name = "sp-trie" +version = "6.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb703cfe953bccee95685111adeedb76fabe4e97549a58d16f03ea7b9367bb32" +checksum = "d6fc34f4f291886914733e083b62708d829f3e6b8d7a7ca7fa8a55a3d7640b0b" +dependencies = [ + "hash-db", + "memory-db", + "parity-scale-codec", + "scale-info", + "sp-core 6.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-std 4.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "trie-db", + "trie-root", +] + +[[package]] +name = "sp-trie" +version = "6.0.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" +dependencies = [ + "hash-db", + "memory-db", + "parity-scale-codec", + "scale-info", + "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "thiserror", + "trie-db", + "trie-root", +] [[package]] name = "sp-version" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +version = "5.0.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" dependencies = [ "impl-serde", "parity-scale-codec", @@ -6133,17 +4323,16 @@ dependencies = [ "scale-info", "serde", "sp-core-hashing-proc-macro", - "sp-runtime", - "sp-std 4.0.0", + "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", "sp-version-proc-macro", "thiserror", ] [[package]] -<<<<<<< HEAD name = "sp-version-proc-macro" version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" dependencies = [ "parity-scale-codec", "proc-macro2", @@ -6152,3063 +4341,1185 @@ dependencies = [ ] [[package]] -======= -name = "socket2" -version = "0.4.4" +name = "sp-wasm-interface" +version = "6.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66d72b759436ae32898a2af0a14218dbf55efde3feeb170eb623637db85ee1e0" -dependencies = [ - "libc", - "winapi", -] - -[[package]] -name = "sp-api" -version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" +checksum = "10d88debe690c2b24eaa9536a150334fcef2ae184c21a0e5b3e80135407a7d52" dependencies = [ - "hash-db", + "impl-trait-for-tuples", "log", "parity-scale-codec", - "sp-api-proc-macro 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", - "sp-core 4.1.0-dev", - "sp-runtime 4.1.0-dev", - "sp-state-machine 0.10.0", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", - "sp-version 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", - "thiserror", + "sp-std 4.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "wasmi", ] [[package]] -name = "sp-api" -version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17#22d40c761a985482f93bbbea5ba4199bdba74f8e" +name = "sp-wasm-interface" +version = "6.0.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" dependencies = [ - "hash-db", + "impl-trait-for-tuples", "log", "parity-scale-codec", - "sp-api-proc-macro 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", - "sp-core 5.0.0", - "sp-runtime 5.0.0", - "sp-state-machine 0.11.0", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", - "sp-version 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", - "thiserror", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "wasmi", ] [[package]] -name = "sp-api-proc-macro" -version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" -dependencies = [ - "blake2-rfc", - "proc-macro-crate", - "proc-macro2", - "quote", - "syn", -] +name = "spin" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] -name = "sp-api-proc-macro" -version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17#22d40c761a985482f93bbbea5ba4199bdba74f8e" +name = "ss58-registry" +version = "1.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ceb8b72a924ccfe7882d0e26144c114503760a4d1248bb5cd06c8ab2d55404cc" dependencies = [ - "blake2-rfc", - "proc-macro-crate", + "Inflector", + "num-format", "proc-macro2", "quote", - "syn", + "serde", + "serde_json", + "unicode-xid", ] [[package]] -name = "sp-application-crypto" -version = "4.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" -dependencies = [ - "parity-scale-codec", - "scale-info", - "serde", - "sp-core 4.1.0-dev", - "sp-io 4.0.0", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", -] +name = "static_assertions" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" [[package]] -name = "sp-application-crypto" -version = "5.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17#22d40c761a985482f93bbbea5ba4199bdba74f8e" +name = "statrs" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05bdbb8e4e78216a85785a85d3ec3183144f98d0097b9281802c019bb07a6f05" dependencies = [ - "parity-scale-codec", - "scale-info", - "serde", - "sp-core 5.0.0", - "sp-io 5.0.0", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", + "approx", + "lazy_static", + "nalgebra", + "num-traits", + "rand 0.8.5", ] [[package]] -name = "sp-arithmetic" -version = "4.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" +name = "strsim" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" + +[[package]] +name = "substrate-bip39" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49eee6965196b32f882dd2ee85a92b1dbead41b04e53907f269de3b0dc04733c" dependencies = [ - "integer-sqrt", - "num-traits", - "parity-scale-codec", - "scale-info", - "serde", - "sp-debug-derive 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", - "static_assertions", + "hmac 0.11.0", + "pbkdf2 0.8.0", + "schnorrkel", + "sha2 0.9.9", + "zeroize", ] [[package]] -name = "sp-arithmetic" -version = "4.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17#22d40c761a985482f93bbbea5ba4199bdba74f8e" +name = "subtle" +version = "2.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" + +[[package]] +name = "subtle-encoding" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7dcb1ed7b8330c5eed5441052651dd7a12c75e2ed88f2ec024ae1fa3a5e59945" dependencies = [ - "integer-sqrt", - "num-traits", - "parity-scale-codec", - "scale-info", - "serde", - "sp-debug-derive 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", - "static_assertions", + "zeroize", ] [[package]] -name = "sp-core" -version = "4.1.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" +name = "subxt" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ab902b8d1bf5ecdc16c78e1f7fdef77665d5ce77004b2be1f6ac4b4f679d987" dependencies = [ - "base58", - "bitflags", - "blake2-rfc", - "byteorder", - "dyn-clonable", - "ed25519-dalek", + "bitvec", + "derivative", + "frame-metadata", "futures", - "hash-db", - "hash256-std-hasher", "hex", - "impl-serde", - "lazy_static", - "libsecp256k1", + "jsonrpsee 0.10.1", "log", - "merlin", - "num-traits", "parity-scale-codec", - "parity-util-mem", - "parking_lot 0.11.2", - "primitive-types", - "rand 0.7.3", - "regex", + "parking_lot", "scale-info", - "schnorrkel", - "secrecy", "serde", - "sha2 0.10.2", - "sp-core-hashing 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", - "sp-debug-derive 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", - "sp-externalities 0.10.0", - "sp-runtime-interface 4.1.0-dev", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", - "sp-storage 4.0.0", - "ss58-registry", - "substrate-bip39", + "serde_json", + "sp-core 6.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-runtime 6.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "subxt-macro", + "subxt-metadata", "thiserror", - "tiny-bip39", - "tiny-keccak", - "twox-hash", - "wasmi", - "zeroize", ] [[package]] -name = "sp-core" -version = "5.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17#22d40c761a985482f93bbbea5ba4199bdba74f8e" +name = "subxt-codegen" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2ac3c4e3565338616f009bc40419f45fa7d9472a5717fa8cce129777c709d1a1" dependencies = [ - "base58", - "bitflags", - "blake2-rfc", - "byteorder", - "dyn-clonable", - "ed25519-dalek", - "futures", - "hash-db", - "hash256-std-hasher", - "hex", - "impl-serde", - "lazy_static", - "libsecp256k1", - "log", - "merlin", - "num-traits", + "darling", + "frame-metadata", + "heck", "parity-scale-codec", - "parity-util-mem", - "parking_lot 0.11.2", - "primitive-types", - "rand 0.7.3", - "regex", + "proc-macro-error", + "proc-macro2", + "quote", "scale-info", - "schnorrkel", - "secrecy", - "serde", - "sha2 0.10.2", - "sp-core-hashing 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", - "sp-debug-derive 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", - "sp-externalities 0.11.0", - "sp-runtime-interface 5.0.0", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", - "sp-storage 5.0.0", - "ss58-registry", - "substrate-bip39", - "thiserror", - "tiny-bip39", - "tiny-keccak", - "twox-hash", - "wasmi", - "zeroize", + "subxt-metadata", + "syn", ] [[package]] -name = "sp-core-hashing" -version = "4.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" +name = "subxt-macro" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "078edfe8f06cb00848e2e64e923fe809f345042c3d7ec13edcd7f0e617656a9b" dependencies = [ - "blake2-rfc", - "byteorder", - "sha2 0.10.2", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", - "tiny-keccak", - "twox-hash", + "darling", + "proc-macro-error", + "subxt-codegen", + "syn", ] [[package]] -name = "sp-core-hashing" -version = "4.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17#22d40c761a985482f93bbbea5ba4199bdba74f8e" +name = "subxt-metadata" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2819a10a1a13bd9645419f59ac9d7cc8deb51052566b9d0c2157354ea44513d5" dependencies = [ - "blake2-rfc", - "byteorder", - "sha2 0.10.2", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", - "tiny-keccak", - "twox-hash", + "frame-metadata", + "parity-scale-codec", + "scale-info", + "sp-core 6.0.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] -name = "sp-core-hashing-proc-macro" -version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" +name = "syn" +version = "1.0.95" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fbaf6116ab8924f39d52792136fb74fd60a80194cf1b1c6ffa6453eef1c3f942" dependencies = [ "proc-macro2", "quote", - "sp-core-hashing 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", - "syn", + "unicode-ident", ] [[package]] -name = "sp-core-hashing-proc-macro" -version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17#22d40c761a985482f93bbbea5ba4199bdba74f8e" -dependencies = [ - "proc-macro2", - "quote", - "sp-core-hashing 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", - "syn", -] +name = "sync_wrapper" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20518fe4a4c9acf048008599e464deb21beeae3d3578418951a189c235a7a9a8" [[package]] -name = "sp-debug-derive" -version = "4.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" +name = "synstructure" +version = "0.12.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" dependencies = [ "proc-macro2", "quote", "syn", + "unicode-xid", ] [[package]] -name = "sp-debug-derive" -version = "4.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17#22d40c761a985482f93bbbea5ba4199bdba74f8e" +name = "tap" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" + +[[package]] +name = "tempfile" +version = "3.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4" dependencies = [ - "proc-macro2", - "quote", - "syn", + "cfg-if 1.0.0", + "fastrand", + "libc", + "redox_syscall", + "remove_dir_all", + "winapi", ] [[package]] -name = "sp-externalities" -version = "0.10.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" +name = "tendermint" +version = "0.23.7" +source = "git+https://github.com/composableFi/tendermint-rs?branch=seun-0.23.7#cb79c164e84c3d9176b374f2b227393fe000e691" dependencies = [ - "environmental", - "parity-scale-codec", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", - "sp-storage 4.0.0", + "async-trait", + "bytes", + "ed25519", + "ed25519-dalek", + "flex-error", + "futures", + "num-traits", + "once_cell", + "prost", + "prost-types", + "serde", + "serde_bytes", + "serde_json", + "serde_repr", + "sha2 0.9.9", + "signature", + "subtle", + "subtle-encoding", + "tendermint-proto", + "time", + "zeroize", ] [[package]] -name = "sp-externalities" -version = "0.11.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17#22d40c761a985482f93bbbea5ba4199bdba74f8e" +name = "tendermint-config" +version = "0.23.7" +source = "git+https://github.com/composableFi/tendermint-rs?branch=seun-0.23.7#cb79c164e84c3d9176b374f2b227393fe000e691" dependencies = [ - "environmental", - "parity-scale-codec", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", - "sp-storage 5.0.0", + "flex-error", + "serde", + "serde_json", + "tendermint", + "toml", + "url", ] [[package]] -name = "sp-inherents" -version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" +name = "tendermint-light-client-verifier" +version = "0.23.7" +source = "git+https://github.com/composableFi/tendermint-rs?branch=seun-0.23.7#cb79c164e84c3d9176b374f2b227393fe000e691" dependencies = [ - "async-trait", - "impl-trait-for-tuples", - "parity-scale-codec", - "sp-core 4.1.0-dev", - "sp-runtime 4.1.0-dev", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", - "thiserror", + "derive_more", + "flex-error", + "serde", + "tendermint", + "time", ] [[package]] -name = "sp-inherents" -version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17#22d40c761a985482f93bbbea5ba4199bdba74f8e" +name = "tendermint-proto" +version = "0.23.7" +source = "git+https://github.com/composableFi/tendermint-rs?branch=seun-0.23.7#cb79c164e84c3d9176b374f2b227393fe000e691" dependencies = [ - "async-trait", - "impl-trait-for-tuples", - "parity-scale-codec", - "sp-core 5.0.0", - "sp-runtime 5.0.0", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", - "thiserror", + "bytes", + "flex-error", + "num-derive", + "num-traits", + "prost", + "prost-types", + "serde", + "serde_bytes", + "subtle-encoding", + "time", ] [[package]] -name = "sp-io" -version = "4.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" +name = "tendermint-rpc" +version = "0.23.7" +source = "git+https://github.com/composableFi/tendermint-rs?branch=seun-0.23.7#cb79c164e84c3d9176b374f2b227393fe000e691" dependencies = [ + "async-trait", + "async-tungstenite", + "bytes", + "flex-error", "futures", - "hash-db", - "libsecp256k1", - "log", - "parity-scale-codec", - "parking_lot 0.11.2", - "sp-core 4.1.0-dev", - "sp-externalities 0.10.0", - "sp-keystore 0.10.0", - "sp-runtime-interface 4.1.0-dev", - "sp-state-machine 0.10.0", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", - "sp-tracing 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", - "sp-trie 4.0.0", - "sp-wasm-interface 4.1.0-dev", + "getrandom 0.2.6", + "http", + "hyper", + "hyper-proxy", + "hyper-rustls", + "peg", + "pin-project", + "serde", + "serde_bytes", + "serde_json", + "subtle-encoding", + "tendermint", + "tendermint-config", + "tendermint-proto", + "thiserror", + "time", + "tokio", "tracing", - "tracing-core", + "url", + "uuid", + "walkdir", ] [[package]] -name = "sp-io" -version = "5.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17#22d40c761a985482f93bbbea5ba4199bdba74f8e" +name = "tendermint-testgen" +version = "0.23.7" +source = "git+https://github.com/composableFi/tendermint-rs?branch=seun-0.23.7#cb79c164e84c3d9176b374f2b227393fe000e691" dependencies = [ - "futures", - "hash-db", - "libsecp256k1", - "log", - "parity-scale-codec", - "parking_lot 0.11.2", - "sp-core 5.0.0", - "sp-externalities 0.11.0", - "sp-keystore 0.11.0", - "sp-runtime-interface 5.0.0", - "sp-state-machine 0.11.0", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", - "sp-tracing 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", - "sp-trie 5.0.0", - "sp-wasm-interface 5.0.0", - "tracing", - "tracing-core", + "ed25519-dalek", + "gumdrop", + "serde", + "serde_json", + "simple-error", + "tempfile", + "tendermint", + "time", ] [[package]] -name = "sp-keystore" -version = "0.10.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" +name = "termcolor" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bab24d30b911b2376f3a13cc2cd443142f0c81dda04c118693e35b3835757755" dependencies = [ - "async-trait", - "derive_more", - "futures", - "merlin", - "parity-scale-codec", - "parking_lot 0.11.2", - "schnorrkel", - "sp-core 4.1.0-dev", - "sp-externalities 0.10.0", + "winapi-util", ] [[package]] -name = "sp-keystore" -version = "0.11.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17#22d40c761a985482f93bbbea5ba4199bdba74f8e" +name = "test-log" +version = "0.2.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4235dbf7ea878b3ef12dea20a59c134b405a66aafc4fc2c7b9935916e289e735" dependencies = [ - "async-trait", - "futures", - "merlin", - "parity-scale-codec", - "parking_lot 0.11.2", - "schnorrkel", - "sp-core 5.0.0", - "sp-externalities 0.11.0", - "thiserror", + "proc-macro2", + "quote", + "syn", ] [[package]] -name = "sp-panic-handler" -version = "4.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" +name = "textwrap" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1141d4d61095b28419e22cb0bbf02755f5e54e0526f97f1e3d1d160e60885fb" + +[[package]] +name = "thiserror" +version = "1.0.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd829fe32373d27f76265620b5309d0340cb8550f523c1dda251d6298069069a" dependencies = [ - "backtrace", - "lazy_static", - "regex", + "thiserror-impl", ] [[package]] -name = "sp-panic-handler" -version = "4.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17#22d40c761a985482f93bbbea5ba4199bdba74f8e" +name = "thiserror-impl" +version = "1.0.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0396bc89e626244658bef819e22d0cc459e795a5ebe878e6ec336d1674a8d79a" dependencies = [ - "backtrace", - "lazy_static", - "regex", + "proc-macro2", + "quote", + "syn", ] [[package]] -name = "sp-runtime" -version = "4.1.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" +name = "thread_local" +version = "1.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5516c27b78311c50bf42c071425c560ac799b11c30b31f87e3081965fe5e0180" dependencies = [ - "either", - "hash256-std-hasher", - "impl-trait-for-tuples", - "log", - "parity-scale-codec", - "parity-util-mem", - "paste", - "rand 0.7.3", - "scale-info", - "serde", - "sp-application-crypto 4.0.0", - "sp-arithmetic 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", - "sp-core 4.1.0-dev", - "sp-io 4.0.0", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", + "once_cell", ] [[package]] -name = "sp-runtime" -version = "5.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17#22d40c761a985482f93bbbea5ba4199bdba74f8e" +name = "time" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2702e08a7a860f005826c6815dcac101b19b5eb330c27fe4a5928fec1d20ddd" dependencies = [ - "either", - "hash256-std-hasher", - "impl-trait-for-tuples", - "log", - "parity-scale-codec", - "parity-util-mem", - "paste", - "rand 0.7.3", - "scale-info", - "serde", - "sp-application-crypto 5.0.0", - "sp-arithmetic 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", - "sp-core 5.0.0", - "sp-io 5.0.0", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", + "libc", + "num_threads", + "time-macros", ] [[package]] -name = "sp-runtime-interface" -version = "4.1.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" -dependencies = [ - "impl-trait-for-tuples", - "parity-scale-codec", - "primitive-types", - "sp-externalities 0.10.0", - "sp-runtime-interface-proc-macro 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", - "sp-storage 4.0.0", - "sp-tracing 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", - "sp-wasm-interface 4.1.0-dev", - "static_assertions", -] +name = "time-macros" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42657b1a6f4d817cda8e7a0ace261fe0cc946cf3a80314390b22cc61ae080792" [[package]] -name = "sp-runtime-interface" -version = "5.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17#22d40c761a985482f93bbbea5ba4199bdba74f8e" +name = "tiny-bip39" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffc59cb9dfc85bb312c3a78fd6aa8a8582e310b0fa885d5bb877f6dcc601839d" dependencies = [ - "impl-trait-for-tuples", - "parity-scale-codec", - "primitive-types", - "sp-externalities 0.11.0", - "sp-runtime-interface-proc-macro 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", - "sp-storage 5.0.0", - "sp-tracing 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", - "sp-wasm-interface 5.0.0", - "static_assertions", + "anyhow", + "hmac 0.8.1", + "once_cell", + "pbkdf2 0.4.0", + "rand 0.7.3", + "rustc-hash", + "sha2 0.9.9", + "thiserror", + "unicode-normalization", + "wasm-bindgen", + "zeroize", ] [[package]] -name = "sp-runtime-interface-proc-macro" -version = "4.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" +name = "tiny-keccak" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" dependencies = [ - "Inflector", - "proc-macro-crate", - "proc-macro2", - "quote", - "syn", + "crunchy", ] [[package]] -name = "sp-runtime-interface-proc-macro" -version = "4.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17#22d40c761a985482f93bbbea5ba4199bdba74f8e" +name = "tinyvec" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" dependencies = [ - "Inflector", - "proc-macro-crate", - "proc-macro2", - "quote", - "syn", + "tinyvec_macros", ] [[package]] -name = "sp-session" -version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" -dependencies = [ - "parity-scale-codec", - "scale-info", - "sp-api 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", - "sp-core 4.1.0-dev", - "sp-runtime 4.1.0-dev", - "sp-staking 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", -] +name = "tinyvec_macros" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] -name = "sp-staking" -version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" +name = "tokio" +version = "1.18.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4903bf0427cf68dddd5aa6a93220756f8be0c34fcfa9f5e6191e103e15a31395" dependencies = [ - "parity-scale-codec", - "scale-info", - "sp-runtime 4.1.0-dev", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", + "bytes", + "libc", + "memchr", + "mio", + "num_cpus", + "once_cell", + "parking_lot", + "pin-project-lite", + "signal-hook-registry", + "socket2", + "tokio-macros", + "winapi", ] [[package]] -name = "sp-staking" -version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17#22d40c761a985482f93bbbea5ba4199bdba74f8e" +name = "tokio-io-timeout" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" dependencies = [ - "parity-scale-codec", - "scale-info", - "sp-runtime 5.0.0", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", + "pin-project-lite", + "tokio", ] [[package]] -name = "sp-state-machine" -version = "0.10.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" +name = "tokio-macros" +version = "1.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b557f72f448c511a979e2564e55d74e6c4432fc96ff4f6241bc6bded342643b7" dependencies = [ - "hash-db", - "log", - "num-traits", - "parity-scale-codec", - "parking_lot 0.11.2", - "rand 0.7.3", - "smallvec", - "sp-core 4.1.0-dev", - "sp-externalities 0.10.0", - "sp-panic-handler 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", - "sp-trie 4.0.0", - "thiserror", - "tracing", - "trie-db", - "trie-root", + "proc-macro2", + "quote", + "syn", ] [[package]] -name = "sp-state-machine" -version = "0.11.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17#22d40c761a985482f93bbbea5ba4199bdba74f8e" +name = "tokio-rustls" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc6844de72e57df1980054b38be3a9f4702aba4858be64dd700181a8a6d0e1b6" dependencies = [ - "hash-db", - "log", - "num-traits", - "parity-scale-codec", - "parking_lot 0.11.2", - "rand 0.7.3", - "smallvec", - "sp-core 5.0.0", - "sp-externalities 0.11.0", - "sp-panic-handler 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", - "sp-trie 5.0.0", - "thiserror", - "tracing", - "trie-db", - "trie-root", + "rustls 0.19.1", + "tokio", + "webpki 0.21.4", ] [[package]] -name = "sp-std" -version = "3.0.0" +name = "tokio-rustls" +version = "0.23.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35391ea974fa5ee869cb094d5b437688fbf3d8127d64d1b9fed5822a1ed39b12" - -[[package]] -name = "sp-std" -version = "4.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" +checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59" +dependencies = [ + "rustls 0.20.6", + "tokio", + "webpki 0.22.0", +] [[package]] -name = "sp-std" -version = "4.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17#22d40c761a985482f93bbbea5ba4199bdba74f8e" +name = "tokio-stream" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50145484efff8818b5ccd256697f36863f587da82cf8b409c53adf1e840798e3" +dependencies = [ + "futures-core", + "pin-project-lite", + "tokio", +] [[package]] -name = "sp-storage" -version = "4.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" +name = "tokio-util" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f988a1a1adc2fb21f9c12aa96441da33a1728193ae0b95d2be22dbd17fcb4e5c" dependencies = [ - "impl-serde", - "parity-scale-codec", - "ref-cast", - "serde", - "sp-debug-derive 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", + "bytes", + "futures-core", + "futures-io", + "futures-sink", + "pin-project-lite", + "tokio", + "tracing", ] [[package]] -name = "sp-storage" -version = "5.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17#22d40c761a985482f93bbbea5ba4199bdba74f8e" +name = "toml" +version = "0.5.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d82e1a7758622a465f8cee077614c73484dac5b836c02ff6a40d5d1010324d7" dependencies = [ - "impl-serde", - "parity-scale-codec", - "ref-cast", "serde", - "sp-debug-derive 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", ] [[package]] -name = "sp-timestamp" -version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" +name = "tonic" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5be9d60db39854b30b835107500cf0aca0b0d14d6e1c3de124217c23a29c2ddb" dependencies = [ + "async-stream", "async-trait", - "futures-timer", - "log", - "parity-scale-codec", - "sp-api 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", - "sp-inherents 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", - "sp-runtime 4.1.0-dev", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", - "thiserror", + "axum", + "base64", + "bytes", + "futures-core", + "futures-util", + "h2", + "http", + "http-body", + "hyper", + "hyper-timeout", + "percent-encoding", + "pin-project", + "prost", + "prost-derive", + "tokio", + "tokio-stream", + "tokio-util", + "tower", + "tower-layer", + "tower-service", + "tracing", + "tracing-futures", ] [[package]] -name = "sp-tracing" -version = "4.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" +name = "tower" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a89fd63ad6adf737582df5db40d286574513c69a11dac5214dc3b5603d6713e" dependencies = [ - "parity-scale-codec", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", + "futures-core", + "futures-util", + "indexmap", + "pin-project", + "pin-project-lite", + "rand 0.8.5", + "slab", + "tokio", + "tokio-util", + "tower-layer", + "tower-service", "tracing", - "tracing-core", - "tracing-subscriber 0.2.25", ] [[package]] -name = "sp-tracing" -version = "4.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17#22d40c761a985482f93bbbea5ba4199bdba74f8e" +name = "tower-http" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d342c6d58709c0a6d48d48dabbb62d4ef955cf5f0f3bbfd845838e7ae88dbae" dependencies = [ - "parity-scale-codec", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", - "tracing", - "tracing-core", - "tracing-subscriber 0.2.25", -] - -[[package]] -name = "sp-trie" -version = "4.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" -dependencies = [ - "hash-db", - "memory-db", - "parity-scale-codec", - "scale-info", - "sp-core 4.1.0-dev", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", - "trie-db", - "trie-root", + "bitflags", + "bytes", + "futures-core", + "futures-util", + "http", + "http-body", + "http-range-header", + "pin-project-lite", + "tower", + "tower-layer", + "tower-service", ] [[package]] -name = "sp-trie" -version = "5.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17#22d40c761a985482f93bbbea5ba4199bdba74f8e" -dependencies = [ - "hash-db", - "memory-db", - "parity-scale-codec", - "scale-info", - "sp-core 5.0.0", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", - "trie-db", - "trie-root", -] +name = "tower-layer" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "343bc9466d3fe6b0f960ef45960509f84480bf4fd96f92901afe7ff3df9d3a62" [[package]] -name = "sp-version" -version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" -dependencies = [ - "impl-serde", - "parity-scale-codec", - "parity-wasm", - "scale-info", - "serde", - "sp-core-hashing-proc-macro 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", - "sp-runtime 4.1.0-dev", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", - "sp-version-proc-macro 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", - "thiserror", -] +name = "tower-service" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" [[package]] -name = "sp-version" -version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17#22d40c761a985482f93bbbea5ba4199bdba74f8e" +name = "tracing" +version = "0.1.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d0ecdcb44a79f0fe9844f0c4f33a342cbcbb5117de8001e6ba0dc2351327d09" dependencies = [ - "impl-serde", - "parity-scale-codec", - "parity-wasm", - "scale-info", - "serde", - "sp-core-hashing-proc-macro 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", - "sp-runtime 5.0.0", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", - "sp-version-proc-macro 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", - "thiserror", + "cfg-if 1.0.0", + "log", + "pin-project-lite", + "tracing-attributes", + "tracing-core", ] [[package]] -name = "sp-version-proc-macro" -version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" +name = "tracing-attributes" +version = "0.1.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc6b8ad3567499f98a1db7a752b07a7c8c7c7c34c332ec00effb2b0027974b7c" dependencies = [ - "parity-scale-codec", "proc-macro2", "quote", "syn", ] [[package]] -name = "sp-version-proc-macro" -version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17#22d40c761a985482f93bbbea5ba4199bdba74f8e" +name = "tracing-core" +version = "0.1.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f54c8ca710e81886d498c2fd3331b56c93aa248d49de2222ad2742247c60072f" dependencies = [ - "parity-scale-codec", - "proc-macro2", - "quote", - "syn", + "lazy_static", + "valuable", ] [[package]] ->>>>>>> e05319ec (initial refactor) -name = "sp-wasm-interface" -version = "4.1.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16#19162e43be45817b44c7d48e50d03f074f60fbf4" +name = "tracing-futures" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" dependencies = [ - "impl-trait-for-tuples", - "log", - "parity-scale-codec", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.16)", - "wasmi", + "pin-project", + "tracing", ] [[package]] -name = "sp-wasm-interface" -version = "5.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17#22d40c761a985482f93bbbea5ba4199bdba74f8e" +name = "tracing-log" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78ddad33d2d10b1ed7eb9d1f518a5674713876e97e5bb9b7345a7984fbb4f922" dependencies = [ - "impl-trait-for-tuples", + "lazy_static", "log", - "parity-scale-codec", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.17)", - "wasmi", + "tracing-core", ] [[package]] -name = "spin" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" - -[[package]] -name = "spki" -version = "0.5.4" +name = "tracing-serde" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44d01ac02a6ccf3e07db148d2be087da624fea0221a16152ed01f0496a6b0a27" +checksum = "bc6b213177105856957181934e4920de57730fc69bf42c37ee5bb664d406d9e1" dependencies = [ - "base64ct", - "der", + "serde", + "tracing-core", ] [[package]] -name = "ss58-registry" -version = "1.17.0" +name = "tracing-subscriber" +version = "0.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b84a70894df7a73666e0694f44b41a9571625e9546fb58a0818a565d2c7e084" +checksum = "0e0d2eaa99c3c2e41547cfa109e910a68ea03823cccad4a0525dcbc9b01e8c71" dependencies = [ - "Inflector", - "num-format", - "proc-macro2", - "quote", + "ansi_term", + "chrono", + "lazy_static", + "matchers 0.0.1", + "regex", "serde", "serde_json", - "unicode-xid", + "sharded-slab", + "smallvec", + "thread_local", + "tracing", + "tracing-core", + "tracing-log", + "tracing-serde", ] [[package]] -name = "static_assertions" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" - -[[package]] -name = "statrs" -version = "0.15.0" +name = "tracing-subscriber" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05bdbb8e4e78216a85785a85d3ec3183144f98d0097b9281802c019bb07a6f05" +checksum = "4bc28f93baff38037f64e6f43d34cfa1605f27a49c34e8a04c5e78b0babf2596" dependencies = [ - "approx", + "ansi_term", "lazy_static", - "nalgebra", - "num-traits", - "rand 0.8.5", + "matchers 0.1.0", + "regex", + "serde", + "serde_json", + "sharded-slab", + "smallvec", + "thread_local", + "tracing", + "tracing-core", + "tracing-log", + "tracing-serde", ] [[package]] -name = "strsim" -version = "0.10.0" +name = "trie-db" +version = "0.23.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" +checksum = "d32d034c0d3db64b43c31de38e945f15b40cd4ca6d2dcfc26d4798ce8de4ab83" +dependencies = [ + "hash-db", + "hashbrown 0.12.1", + "log", + "rustc-hex", + "smallvec", +] [[package]] -name = "substrate-bip39" -version = "0.4.4" +name = "trie-root" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49eee6965196b32f882dd2ee85a92b1dbead41b04e53907f269de3b0dc04733c" +checksum = "9a36c5ca3911ed3c9a5416ee6c679042064b93fc637ded67e25f92e68d783891" dependencies = [ - "hmac 0.11.0", - "pbkdf2 0.8.0", - "schnorrkel", - "sha2 0.9.9", - "zeroize", + "hash-db", ] [[package]] -name = "subtle" -version = "2.4.1" +name = "try-lock" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" +checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" [[package]] -name = "subtle-encoding" -version = "0.5.1" +name = "tt-call" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7dcb1ed7b8330c5eed5441052651dd7a12c75e2ed88f2ec024ae1fa3a5e59945" -dependencies = [ - "zeroize", -] +checksum = "5e66dcbec4290c69dd03c57e76c2469ea5c7ce109c6dd4351c13055cf71ea055" [[package]] -name = "syn" -version = "1.0.96" +name = "tungstenite" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0748dd251e24453cb8717f0354206b91557e4ec8703673a4b30208f2abaf1ebf" +checksum = "8ada8297e8d70872fa9a551d93250a9f407beb9f37ef86494eb20012a2ff7c24" dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", + "base64", + "byteorder", + "bytes", + "http", + "httparse", + "input_buffer", + "log", + "rand 0.8.5", + "sha-1 0.9.8", + "url", + "utf-8", ] [[package]] -name = "sync_wrapper" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20518fe4a4c9acf048008599e464deb21beeae3d3578418951a189c235a7a9a8" - -[[package]] -name = "synstructure" -version = "0.12.6" +name = "twox-hash" +version = "1.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" +checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" dependencies = [ - "proc-macro2", - "quote", - "syn", - "unicode-xid", + "cfg-if 1.0.0", + "digest 0.10.3", + "rand 0.8.5", + "static_assertions", ] [[package]] -name = "tagptr" -version = "0.2.0" +name = "typenum" +version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b2093cf4c8eb1e67749a6762251bc9cd836b6fc171623bd0a9d324d37af2417" +checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987" [[package]] -name = "tap" -version = "1.0.1" +name = "uint" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" +checksum = "12f03af7ccf01dd611cc450a0d10dbc9b745770d096473e2faf0ca6e2d66d1e0" +dependencies = [ + "byteorder", + "crunchy", + "hex", + "static_assertions", +] [[package]] -name = "tap" -version = "1.0.1" +name = "unicase" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" +checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" +dependencies = [ + "version_check", +] [[package]] -name = "tempfile" -version = "3.3.0" +name = "unicode-bidi" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4" -dependencies = [ - "cfg-if 1.0.0", - "fastrand", - "libc", - "redox_syscall", - "remove_dir_all", - "winapi", -] +checksum = "099b7128301d285f79ddd55b9a83d5e6b9e97c92e0ea0daebee7263e932de992" [[package]] -name = "tendermint" -version = "0.23.7" +name = "unicode-ident" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ca881fa4dedd2b46334f13be7fbc8cc1549ba4be5a833fe4e73d1a1baaf7949" -dependencies = [ - "async-trait", - "bytes", - "ed25519", - "ed25519-dalek", - "flex-error", - "futures", - "k256", - "num-traits", - "once_cell", - "prost", - "prost-types", - "ripemd160", - "serde", - "serde_bytes", - "serde_json", - "serde_repr", - "sha2 0.9.9", - "signature", - "subtle", - "subtle-encoding", - "tendermint-proto", - "time 0.3.9", - "zeroize", -] +checksum = "d22af068fba1eb5edcb4aea19d382b2a3deb4c8f9d475c589b6ada9e0fd493ee" [[package]] -name = "tendermint-config" -version = "0.23.7" +name = "unicode-normalization" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6c56ee93f4e9b7e7daba86d171f44572e91b741084384d0ae00df7991873dfd" +checksum = "d54590932941a9e9266f0832deed84ebe1bf2e4c9e4a3554d393d18f5e854bf9" dependencies = [ - "flex-error", - "serde", - "serde_json", - "tendermint", - "toml", - "url", + "tinyvec", ] [[package]] -<<<<<<< HEAD -<<<<<<< HEAD -name = "tendermint-light-client" -version = "0.23.7" +name = "unicode-xid" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e30a20da1169069a8629b9535bcee669be8b07480c696b5eb2f7d9cd4e4c431" -======= -name = "tendermint-light-client" -version = "0.23.6" +checksum = "957e51f3646910546462e67d5f7599b9e4fb8acdd304b087a6494730f9eebf04" + +[[package]] +name = "untrusted" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "571098c7da376550f19554227e16e00e91b8f5b99438615340e4679343ff18d1" ->>>>>>> 8be3170b (revert changes to cargo.toml) -dependencies = [ - "contracts", - "crossbeam-channel 0.4.4", - "derive_more", - "flex-error", - "futures", - "serde", - "serde_cbor", - "serde_derive", - "static_assertions", - "tendermint", - "tendermint-light-client-verifier", - "tendermint-rpc", -<<<<<<< HEAD - "time 0.3.9", -======= - "time", ->>>>>>> 8be3170b (revert changes to cargo.toml) - "tokio", -] +checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" [[package]] -<<<<<<< HEAD -======= ->>>>>>> e05319ec (initial refactor) -======= ->>>>>>> 8be3170b (revert changes to cargo.toml) -name = "tendermint-light-client-verifier" -version = "0.23.7" +name = "ureq" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ae030a759b89cca84860d497d4d4e491615d8a9243cc04c61cd89335ba9b593" +checksum = "9399fa2f927a3d327187cbd201480cee55bee6ac5d3c77dd27f0c6814cff16d5" dependencies = [ - "derive_more", - "flex-error", - "serde", - "tendermint", - "time 0.3.9", + "base64", + "chunked_transfer", + "flate2", + "log", + "once_cell", + "rustls 0.20.6", + "url", + "webpki 0.22.0", + "webpki-roots 0.22.3", ] [[package]] -name = "tendermint-proto" -version = "0.23.7" +name = "url" +version = "2.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b71f925d74903f4abbdc4af0110635a307b3cb05b175fdff4a7247c14a4d0874" +checksum = "a507c383b2d33b5fc35d1861e77e6b383d158b2da5e14fe51b83dfedf6fd578c" dependencies = [ - "bytes", - "flex-error", - "num-derive", - "num-traits", - "prost", - "prost-types", - "serde", - "serde_bytes", - "subtle-encoding", - "time 0.3.9", + "form_urlencoded", + "idna", + "matches", + "percent-encoding", ] [[package]] -name = "tendermint-rpc" -version = "0.23.7" +name = "utf-8" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a13e63f57ee05a1e927887191c76d1b139de9fa40c180b9f8727ee44377242a6" -dependencies = [ - "async-trait", - "async-tungstenite", - "bytes", - "flex-error", - "futures", - "getrandom 0.2.6", - "http", - "hyper", - "hyper-proxy", - "hyper-rustls", - "peg", - "pin-project", - "serde", - "serde_bytes", - "serde_json", - "subtle-encoding", - "tendermint", - "tendermint-config", - "tendermint-proto", - "thiserror", - "time 0.3.9", - "tokio", - "tracing", - "url", - "uuid 0.8.2", - "walkdir", -] +checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" [[package]] -name = "tendermint-testgen" -version = "0.23.7" +name = "uuid" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "442ede2d01e61466e515fd7f1d0aac7c3c86b3066535479caa86a43afb5e2e17" -dependencies = [ - "ed25519-dalek", - "gumdrop", - "serde", - "serde_json", - "simple-error", - "tempfile", - "tendermint", - "time 0.3.9", -] +checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" [[package]] -name = "termcolor" -version = "1.1.3" +name = "valuable" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bab24d30b911b2376f3a13cc2cd443142f0c81dda04c118693e35b3835757755" -dependencies = [ - "winapi-util", -] +checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" + +[[package]] +name = "version_check" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" [[package]] -name = "terminal_size" -version = "0.1.17" +name = "walkdir" +version = "2.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "633c1a546cee861a1a6d0dc69ebeca693bf4296661ba7852b9d21d159e0506df" +checksum = "808cf2735cd4b6866113f648b791c6adc5714537bc222d9347bb203386ffda56" dependencies = [ - "libc", + "same-file", "winapi", + "winapi-util", ] [[package]] -name = "test-log" -version = "0.2.10" +name = "want" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4235dbf7ea878b3ef12dea20a59c134b405a66aafc4fc2c7b9935916e289e735" +checksum = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0" dependencies = [ - "proc-macro2", - "quote", - "syn", + "log", + "try-lock", ] [[package]] -name = "textwrap" -version = "0.15.0" +name = "wasi" +version = "0.9.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1141d4d61095b28419e22cb0bbf02755f5e54e0526f97f1e3d1d160e60885fb" +checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" [[package]] -name = "thiserror" -version = "1.0.31" +name = "wasi" +version = "0.10.2+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd829fe32373d27f76265620b5309d0340cb8550f523c1dda251d6298069069a" +checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" + +[[package]] +name = "wasi" +version = "0.11.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" + +[[package]] +name = "wasm-bindgen" +version = "0.2.80" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "27370197c907c55e3f1a9fbe26f44e937fe6451368324e009cba39e139dc08ad" dependencies = [ - "thiserror-impl", + "cfg-if 1.0.0", + "wasm-bindgen-macro", ] [[package]] -name = "thiserror-impl" -version = "1.0.31" +name = "wasm-bindgen-backend" +version = "0.2.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0396bc89e626244658bef819e22d0cc459e795a5ebe878e6ec336d1674a8d79a" +checksum = "53e04185bfa3a779273da532f5025e33398409573f348985af9a1cbf3774d3f4" dependencies = [ + "bumpalo", + "lazy_static", + "log", "proc-macro2", "quote", "syn", + "wasm-bindgen-shared", ] [[package]] -name = "thread_local" -version = "1.1.4" +name = "wasm-bindgen-macro" +version = "0.2.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5516c27b78311c50bf42c071425c560ac799b11c30b31f87e3081965fe5e0180" +checksum = "17cae7ff784d7e83a2fe7611cfe766ecf034111b49deb850a3dc7699c08251f5" dependencies = [ - "once_cell", + "quote", + "wasm-bindgen-macro-support", ] [[package]] -name = "threadpool" -version = "1.8.1" +name = "wasm-bindgen-macro-support" +version = "0.2.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d050e60b33d41c19108b32cea32164033a9013fe3b46cbd4457559bfbf77afaa" +checksum = "99ec0dc7a4756fffc231aab1b9f2f578d23cd391390ab27f952ae0c9b3ece20b" dependencies = [ - "num_cpus", + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-backend", + "wasm-bindgen-shared", ] [[package]] -name = "time" -version = "0.1.43" +name = "wasm-bindgen-shared" +version = "0.2.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438" -dependencies = [ - "libc", - "winapi", -] +checksum = "d554b7f530dee5964d9a9468d95c1f8b8acae4f282807e7d27d4b03099a46744" [[package]] -name = "time" -version = "0.3.9" +name = "wasmi" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2702e08a7a860f005826c6815dcac101b19b5eb330c27fe4a5928fec1d20ddd" +checksum = "ca00c5147c319a8ec91ec1a0edbec31e566ce2c9cc93b3f9bb86a9efd0eb795d" dependencies = [ + "downcast-rs", "libc", - "num_threads", - "time-macros", + "memory_units", + "num-rational 0.2.4", + "num-traits", + "parity-wasm", + "wasmi-validation", ] [[package]] -name = "time-macros" -version = "0.2.4" +name = "wasmi-validation" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42657b1a6f4d817cda8e7a0ace261fe0cc946cf3a80314390b22cc61ae080792" +checksum = "165343ecd6c018fc09ebcae280752702c9a2ef3e6f8d02f1cfcbdb53ef6d7937" +dependencies = [ + "parity-wasm", +] [[package]] -name = "tiny-bip39" -version = "0.8.2" +name = "web-sys" +version = "0.3.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffc59cb9dfc85bb312c3a78fd6aa8a8582e310b0fa885d5bb877f6dcc601839d" +checksum = "7b17e741662c70c8bd24ac5c5b18de314a2c26c32bf8346ee1e6f53de919c283" dependencies = [ - "anyhow", - "hmac 0.8.1", - "once_cell", - "pbkdf2 0.4.0", - "rand 0.7.3", - "rustc-hash", - "sha2 0.9.9", - "thiserror", - "unicode-normalization", + "js-sys", "wasm-bindgen", - "zeroize", ] [[package]] -name = "tiny-keccak" -version = "2.0.2" +name = "webpki" +version = "0.21.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" +checksum = "b8e38c0608262c46d4a56202ebabdeb094cef7e560ca7a226c6bf055188aa4ea" dependencies = [ - "crunchy", + "ring", + "untrusted", ] [[package]] -name = "tiny_http" -version = "0.8.2" +name = "webpki" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ce51b50006056f590c9b7c3808c3bd70f0d1101666629713866c227d6e58d39" +checksum = "f095d78192e208183081cc07bc5515ef55216397af48b873e5edcd72637fa1bd" dependencies = [ - "ascii", - "chrono", - "chunked_transfer", - "log", - "url", + "ring", + "untrusted", ] [[package]] -name = "tinyvec" -version = "1.6.0" +name = "webpki-roots" +version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" +checksum = "aabe153544e473b775453675851ecc86863d2a81d786d741f6b76778f2a48940" dependencies = [ - "tinyvec_macros", + "webpki 0.21.4", ] [[package]] -name = "tinyvec_macros" -version = "0.1.0" +name = "webpki-roots" +version = "0.22.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" +checksum = "44d8de8415c823c8abd270ad483c6feeac771fad964890779f9a8cb24fbbc1bf" +dependencies = [ + "webpki 0.22.0", +] [[package]] -name = "tokio" -version = "1.18.2" +name = "winapi" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4903bf0427cf68dddd5aa6a93220756f8be0c34fcfa9f5e6191e103e15a31395" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" dependencies = [ - "bytes", - "libc", - "memchr", - "mio", - "num_cpus", - "once_cell", - "parking_lot 0.12.0", - "pin-project-lite", - "signal-hook-registry", - "socket2", - "tokio-macros", - "winapi", + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", ] [[package]] -name = "tokio-io-timeout" -version = "1.2.0" +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" -dependencies = [ - "pin-project-lite", - "tokio", -] +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] -name = "tokio-macros" -version = "1.7.0" +name = "winapi-util" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b557f72f448c511a979e2564e55d74e6c4432fc96ff4f6241bc6bded342643b7" +checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" dependencies = [ - "proc-macro2", - "quote", - "syn", + "winapi", ] [[package]] -name = "tokio-rustls" -version = "0.22.0" +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc6844de72e57df1980054b38be3a9f4702aba4858be64dd700181a8a6d0e1b6" -dependencies = [ - "rustls 0.19.1", - "tokio", - "webpki 0.21.4", -] +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] -name = "tokio-rustls" -version = "0.23.4" +name = "windows-sys" +version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59" +checksum = "ea04155a16a59f9eab786fe12a4a450e75cdb175f9e0d80da1e17db09f55b8d2" dependencies = [ - "rustls 0.20.6", - "tokio", - "webpki 0.22.0", + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_msvc", ] [[package]] -name = "tokio-stream" -version = "0.1.8" +name = "windows_aarch64_msvc" +version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50145484efff8818b5ccd256697f36863f587da82cf8b409c53adf1e840798e3" -dependencies = [ - "futures-core", - "pin-project-lite", - "tokio", -] +checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47" [[package]] -name = "tokio-util" -version = "0.7.2" +name = "windows_i686_gnu" +version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f988a1a1adc2fb21f9c12aa96441da33a1728193ae0b95d2be22dbd17fcb4e5c" -dependencies = [ - "bytes", - "futures-core", - "futures-sink", - "pin-project-lite", - "tokio", - "tracing", -] +checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6" [[package]] -name = "toml" -version = "0.5.9" +name = "windows_i686_msvc" +version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d82e1a7758622a465f8cee077614c73484dac5b836c02ff6a40d5d1010324d7" +checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680" + +[[package]] +name = "wyz" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30b31594f29d27036c383b53b59ed3476874d518f0efb151b27a4c275141390e" dependencies = [ - "serde", + "tap", ] [[package]] -name = "tonic" -version = "0.7.2" +name = "zeroize" +version = "1.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5be9d60db39854b30b835107500cf0aca0b0d14d6e1c3de124217c23a29c2ddb" +checksum = "94693807d016b2f2d2e14420eb3bfcca689311ff775dcf113d74ea624b7cdf07" dependencies = [ - "async-stream", - "async-trait", - "axum", - "base64", - "bytes", - "futures-core", - "futures-util", - "h2", - "http", - "http-body", - "hyper", - "hyper-timeout", - "percent-encoding", - "pin-project", - "prost", - "prost-derive", -<<<<<<< HEAD -<<<<<<< HEAD - "rustls-native-certs 0.6.2", - "rustls-pemfile", - "tokio", - "tokio-rustls 0.23.4", -======= - "tokio", ->>>>>>> e05319ec (initial refactor) -======= - "rustls-native-certs", - "tokio", - "tokio-rustls", ->>>>>>> 8be3170b (revert changes to cargo.toml) - "tokio-stream", - "tokio-util", - "tower", - "tower-layer", - "tower-service", - "tracing", - "tracing-futures", + "zeroize_derive", ] [[package]] -name = "tower" -version = "0.4.12" +name = "zeroize_derive" +version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a89fd63ad6adf737582df5db40d286574513c69a11dac5214dc3b5603d6713e" -dependencies = [ - "futures-core", - "futures-util", - "indexmap", - "pin-project", - "pin-project-lite", - "rand 0.8.5", - "slab", - "tokio", - "tokio-util", - "tower-layer", - "tower-service", - "tracing", -] - -[[package]] -name = "tower-http" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d342c6d58709c0a6d48d48dabbb62d4ef955cf5f0f3bbfd845838e7ae88dbae" -dependencies = [ - "bitflags", - "bytes", - "futures-core", - "futures-util", - "http", - "http-body", - "http-range-header", - "pin-project-lite", - "tower", - "tower-layer", - "tower-service", -] - -[[package]] -name = "tower-layer" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "343bc9466d3fe6b0f960ef45960509f84480bf4fd96f92901afe7ff3df9d3a62" - -[[package]] -name = "tower-service" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" - -[[package]] -name = "tracing" -version = "0.1.34" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d0ecdcb44a79f0fe9844f0c4f33a342cbcbb5117de8001e6ba0dc2351327d09" -dependencies = [ - "cfg-if 1.0.0", - "log", - "pin-project-lite", - "tracing-attributes", - "tracing-core", -] - -[[package]] -name = "tracing-attributes" -version = "0.1.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc6b8ad3567499f98a1db7a752b07a7c8c7c7c34c332ec00effb2b0027974b7c" +checksum = "3f8f187641dad4f680d25c4bfc4225b418165984179f26ca76ec4fb6441d3a17" dependencies = [ "proc-macro2", "quote", "syn", + "synstructure", ] - -[[package]] -name = "tracing-core" -version = "0.1.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f54c8ca710e81886d498c2fd3331b56c93aa248d49de2222ad2742247c60072f" -dependencies = [ - "lazy_static", - "valuable", -] - -[[package]] -<<<<<<< HEAD -<<<<<<< HEAD -name = "tracing-error" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d686ec1c0f384b1277f097b2f279a2ecc11afe8c133c1aabf036a27cb4cd206e" -dependencies = [ - "tracing", - "tracing-subscriber 0.3.10", -] - -[[package]] -======= ->>>>>>> e05319ec (initial refactor) -name = "tracing-futures" -version = "0.2.5" -======= -name = "tracing-error" -version = "0.2.0" ->>>>>>> 8be3170b (revert changes to cargo.toml) -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d686ec1c0f384b1277f097b2f279a2ecc11afe8c133c1aabf036a27cb4cd206e" -dependencies = [ - "tracing", - "tracing-subscriber 0.3.11", -] - -[[package]] -name = "tracing-futures" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" -dependencies = [ - "pin-project", - "tracing", -] - -[[package]] -name = "tracing-log" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78ddad33d2d10b1ed7eb9d1f518a5674713876e97e5bb9b7345a7984fbb4f922" -dependencies = [ - "lazy_static", - "log", - "tracing-core", -] - -[[package]] -name = "tracing-serde" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc6b213177105856957181934e4920de57730fc69bf42c37ee5bb664d406d9e1" -dependencies = [ - "serde", - "tracing-core", -] - -[[package]] -name = "tracing-subscriber" -version = "0.2.25" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e0d2eaa99c3c2e41547cfa109e910a68ea03823cccad4a0525dcbc9b01e8c71" -dependencies = [ - "ansi_term", - "chrono", - "lazy_static", - "matchers 0.0.1", - "regex", - "serde", - "serde_json", - "sharded-slab", - "smallvec", - "thread_local", - "tracing", - "tracing-core", - "tracing-log", - "tracing-serde", -] - -[[package]] -name = "tracing-subscriber" -version = "0.3.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bc28f93baff38037f64e6f43d34cfa1605f27a49c34e8a04c5e78b0babf2596" -dependencies = [ - "ansi_term", - "lazy_static", - "matchers 0.1.0", - "regex", - "serde", - "serde_json", - "sharded-slab", - "smallvec", - "thread_local", - "tracing", - "tracing-core", - "tracing-log", - "tracing-serde", -] - -[[package]] -name = "trie-db" -version = "0.23.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d32d034c0d3db64b43c31de38e945f15b40cd4ca6d2dcfc26d4798ce8de4ab83" -dependencies = [ - "hash-db", -<<<<<<< HEAD - "hashbrown 0.12.0", -======= - "hashbrown 0.12.1", ->>>>>>> e05319ec (initial refactor) - "log", - "rustc-hex", - "smallvec", -] - -[[package]] -name = "trie-root" -version = "0.17.0" -<<<<<<< HEAD -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a36c5ca3911ed3c9a5416ee6c679042064b93fc637ded67e25f92e68d783891" -dependencies = [ - "hash-db", -] - -[[package]] -name = "triomphe" -version = "0.1.5" -======= ->>>>>>> e05319ec (initial refactor) -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a36c5ca3911ed3c9a5416ee6c679042064b93fc637ded67e25f92e68d783891" -dependencies = [ - "hash-db", -] - -[[package]] -name = "triomphe" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c45e322b26410d7260e00f64234810c2f17d7ece356182af4df8f7ff07890f09" -dependencies = [ - "memoffset 0.6.5", -] - -[[package]] -name = "try-lock" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" - -[[package]] -name = "tt-call" -version = "1.0.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e66dcbec4290c69dd03c57e76c2469ea5c7ce109c6dd4351c13055cf71ea055" - -[[package]] -name = "tungstenite" -version = "0.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ada8297e8d70872fa9a551d93250a9f407beb9f37ef86494eb20012a2ff7c24" -dependencies = [ - "base64", - "byteorder", - "bytes", - "http", - "httparse", - "input_buffer", - "log", - "rand 0.8.5", - "sha-1 0.9.8", - "url", - "utf-8", -] - -[[package]] -name = "twoway" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59b11b2b5241ba34be09c3cc85a36e56e48f9888862e19cedf23336d35316ed1" -dependencies = [ - "memchr", -] - -[[package]] -name = "twox-hash" -version = "1.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" -dependencies = [ - "cfg-if 1.0.0", - "rand 0.8.5", - "static_assertions", -] - -[[package]] -name = "twox-hash" -version = "1.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ee73e6e4924fe940354b8d4d98cad5231175d615cd855b758adc658c0aac6a0" -dependencies = [ - "cfg-if 1.0.0", - "rand 0.8.5", - "static_assertions", -] - -[[package]] -name = "typenum" -version = "1.15.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987" - -[[package]] -name = "uint" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12f03af7ccf01dd611cc450a0d10dbc9b745770d096473e2faf0ca6e2d66d1e0" -dependencies = [ - "byteorder", - "crunchy", - "hex", - "static_assertions", -] - -[[package]] -name = "unicase" -version = "2.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" -dependencies = [ - "version_check", -] - -[[package]] -name = "unicode-bidi" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "099b7128301d285f79ddd55b9a83d5e6b9e97c92e0ea0daebee7263e932de992" - -[[package]] -name = "unicode-ident" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d22af068fba1eb5edcb4aea19d382b2a3deb4c8f9d475c589b6ada9e0fd493ee" - -[[package]] -name = "unicode-normalization" -version = "0.1.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d54590932941a9e9266f0832deed84ebe1bf2e4c9e4a3554d393d18f5e854bf9" -dependencies = [ - "tinyvec", -] - -[[package]] -name = "unicode-width" -version = "0.1.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973" - -[[package]] -name = "unicode-xid" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "957e51f3646910546462e67d5f7599b9e4fb8acdd304b087a6494730f9eebf04" - -[[package]] -name = "untrusted" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" - -[[package]] -name = "ureq" -version = "2.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9399fa2f927a3d327187cbd201480cee55bee6ac5d3c77dd27f0c6814cff16d5" -dependencies = [ - "base64", - "chunked_transfer", - "flate2", - "log", - "once_cell", - "rustls 0.20.6", - "url", - "webpki 0.22.0", - "webpki-roots 0.22.3", -] - -[[package]] -name = "url" -version = "2.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a507c383b2d33b5fc35d1861e77e6b383d158b2da5e14fe51b83dfedf6fd578c" -dependencies = [ - "form_urlencoded", - "idna", - "matches", - "percent-encoding", -] - -[[package]] -name = "utf-8" -version = "0.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" - -[[package]] -name = "uuid" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" -dependencies = [ - "getrandom 0.2.6", -] - -[[package]] -name = "uuid" -version = "1.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6d5d669b51467dcf7b2f1a796ce0f955f05f01cafda6c19d6e95f730df29238" -dependencies = [ - "getrandom 0.2.6", -] - -[[package]] -name = "valuable" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" - -[[package]] -name = "version_check" -version = "0.9.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" - -[[package]] -name = "wait-timeout" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f200f5b12eb75f8c1ed65abd4b2db8a6e1b138a20de009dacee265a2498f3f6" -dependencies = [ - "libc", -] - -[[package]] -name = "walkdir" -version = "2.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "808cf2735cd4b6866113f648b791c6adc5714537bc222d9347bb203386ffda56" -dependencies = [ - "same-file", - "winapi", - "winapi-util", -] - -[[package]] -name = "want" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0" -dependencies = [ - "log", - "try-lock", -] - -[[package]] -name = "wasi" -version = "0.9.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" - -[[package]] -name = "wasi" -version = "0.10.2+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" - -[[package]] -name = "wasi" -version = "0.11.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" - -[[package]] -name = "wasm-bindgen" -version = "0.2.80" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27370197c907c55e3f1a9fbe26f44e937fe6451368324e009cba39e139dc08ad" -dependencies = [ - "cfg-if 1.0.0", - "wasm-bindgen-macro", -] - -[[package]] -name = "wasm-bindgen-backend" -version = "0.2.80" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53e04185bfa3a779273da532f5025e33398409573f348985af9a1cbf3774d3f4" -dependencies = [ - "bumpalo", - "lazy_static", - "log", - "proc-macro2", - "quote", - "syn", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-macro" -version = "0.2.80" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17cae7ff784d7e83a2fe7611cfe766ecf034111b49deb850a3dc7699c08251f5" -dependencies = [ - "quote", - "wasm-bindgen-macro-support", -] - -[[package]] -name = "wasm-bindgen-macro-support" -version = "0.2.80" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99ec0dc7a4756fffc231aab1b9f2f578d23cd391390ab27f952ae0c9b3ece20b" -dependencies = [ - "proc-macro2", - "quote", - "syn", - "wasm-bindgen-backend", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-shared" -version = "0.2.80" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d554b7f530dee5964d9a9468d95c1f8b8acae4f282807e7d27d4b03099a46744" - -[[package]] -name = "wasmi" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca00c5147c319a8ec91ec1a0edbec31e566ce2c9cc93b3f9bb86a9efd0eb795d" -dependencies = [ - "downcast-rs", - "libc", - "memory_units", - "num-rational 0.2.4", - "num-traits", - "parity-wasm", - "wasmi-validation", -] - -[[package]] -name = "wasmi-validation" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "165343ecd6c018fc09ebcae280752702c9a2ef3e6f8d02f1cfcbdb53ef6d7937" -dependencies = [ - "parity-wasm", -] - -[[package]] -name = "web-sys" -version = "0.3.57" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b17e741662c70c8bd24ac5c5b18de314a2c26c32bf8346ee1e6f53de919c283" -dependencies = [ - "js-sys", - "wasm-bindgen", -] - -[[package]] -name = "webpki" -version = "0.21.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8e38c0608262c46d4a56202ebabdeb094cef7e560ca7a226c6bf055188aa4ea" -dependencies = [ - "ring", - "untrusted", -] - -[[package]] -name = "webpki" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f095d78192e208183081cc07bc5515ef55216397af48b873e5edcd72637fa1bd" -dependencies = [ - "ring", - "untrusted", -] - -[[package]] -name = "webpki-roots" -version = "0.21.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aabe153544e473b775453675851ecc86863d2a81d786d741f6b76778f2a48940" -dependencies = [ - "webpki 0.21.4", -] - -[[package]] -name = "webpki-roots" -version = "0.22.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44d8de8415c823c8abd270ad483c6feeac771fad964890779f9a8cb24fbbc1bf" -dependencies = [ - "webpki 0.22.0", -] - -[[package]] -name = "winapi" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" -dependencies = [ - "winapi-i686-pc-windows-gnu", - "winapi-x86_64-pc-windows-gnu", -] - -[[package]] -name = "winapi-i686-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" - -[[package]] -name = "winapi-util" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" -dependencies = [ - "winapi", -] - -[[package]] -name = "winapi-x86_64-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" - -[[package]] -<<<<<<< HEAD -<<<<<<< HEAD -======= ->>>>>>> 8be3170b (revert changes to cargo.toml) -name = "windows-sys" -version = "0.36.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea04155a16a59f9eab786fe12a4a450e75cdb175f9e0d80da1e17db09f55b8d2" -dependencies = [ - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_msvc", -] - -[[package]] -name = "windows_aarch64_msvc" -version = "0.36.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47" - -[[package]] -name = "windows_i686_gnu" -version = "0.36.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6" - -[[package]] -name = "windows_i686_msvc" -version = "0.36.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.36.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1" - -[[package]] -name = "windows_x86_64_msvc" -version = "0.36.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680" - -[[package]] -<<<<<<< HEAD -name = "wyz" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85e60b0d1b5f99db2556934e21937020776a5d31520bf169e851ac44e6420214" - -[[package]] -name = "yaml-rust" -version = "0.4.5" -======= -======= ->>>>>>> 8be3170b (revert changes to cargo.toml) -name = "wyz" -version = "0.2.0" ->>>>>>> e05319ec (initial refactor) -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85e60b0d1b5f99db2556934e21937020776a5d31520bf169e851ac44e6420214" - -[[package]] -name = "yaml-rust" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85" -dependencies = [ - "linked-hash-map", -] - -[[package]] -name = "zeroize" -version = "1.5.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94693807d016b2f2d2e14420eb3bfcca689311ff775dcf113d74ea624b7cdf07" -dependencies = [ - "zeroize_derive", -] - -[[package]] -name = "zeroize_derive" -version = "1.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f8f187641dad4f680d25c4bfc4225b418165984179f26ca76ec4fb6441d3a17" -dependencies = [ - "proc-macro2", - "quote", - "syn", - "synstructure", -] -<<<<<<< HEAD - -<<<<<<< HEAD -[[package]] -name = "zip" -version = "0.5.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93ab48844d61251bb3835145c521d88aa4031d7139e8485990f60ca911fa0815" -dependencies = [ - "byteorder", - "bzip2", - "crc32fast", - "flate2", - "thiserror", - "time 0.1.43", -] -======= -[[patch.unused]] -name = "beefy-gadget" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "beefy-gadget-rpc" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "chain-spec-builder" -version = "2.0.0" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "fork-tree" -version = "3.0.0" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "frame-benchmarking-cli" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "frame-election-provider-support" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "frame-executive" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "frame-support-test" -version = "3.0.0" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "frame-support-test-compile-pass" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "frame-support-test-pallet" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "frame-system-benchmarking" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "frame-system-rpc-runtime-api" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "frame-try-runtime" -version = "0.10.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "generate-bags" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "node-bench" -version = "0.9.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "node-cli" -version = "3.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "node-executor" -version = "3.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "node-inspect" -version = "0.9.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "node-primitives" -version = "2.0.0" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "node-rpc" -version = "3.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "node-runtime" -version = "3.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "node-runtime-generate-bags" -version = "3.0.0" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "node-template" -version = "3.0.0" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "node-template-runtime" -version = "3.0.0" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "node-testing" -version = "3.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "pallet-asset-tx-payment" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "pallet-assets" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "pallet-atomic-swap" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "pallet-aura" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "pallet-authority-discovery" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "pallet-authorship" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "pallet-babe" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "pallet-bags-list" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "pallet-bags-list-fuzzer" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "pallet-bags-list-remote-tests" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "pallet-balances" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "pallet-bounties" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "pallet-child-bounties" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "pallet-collective" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "pallet-contracts" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "pallet-contracts-primitives" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "pallet-contracts-proc-macro" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "pallet-contracts-rpc" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "pallet-contracts-rpc-runtime-api" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "pallet-democracy" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "pallet-election-provider-multi-phase" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "pallet-elections-phragmen" -version = "5.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "pallet-example-basic" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "pallet-example-offchain-worker" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "pallet-example-parallel" -version = "3.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "pallet-gilt" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "pallet-grandpa" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "pallet-identity" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "pallet-im-online" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "pallet-indices" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "pallet-lottery" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "pallet-membership" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "pallet-mmr-rpc" -version = "3.0.0" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "pallet-multisig" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "pallet-nicks" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "pallet-node-authorization" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "pallet-offences" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "pallet-offences-benchmarking" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "pallet-preimage" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "pallet-proxy" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "pallet-randomness-collective-flip" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "pallet-recovery" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "pallet-scheduler" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "pallet-scored-pool" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "pallet-session-benchmarking" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "pallet-society" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "pallet-staking" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "pallet-staking-reward-curve" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "pallet-staking-reward-fn" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "pallet-sudo" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "pallet-template" -version = "3.0.0" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "pallet-tips" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "pallet-transaction-payment" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "pallet-transaction-payment-rpc" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "pallet-transaction-payment-rpc-runtime-api" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "pallet-transaction-storage" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "pallet-treasury" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "pallet-uniques" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "pallet-utility" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "pallet-vesting" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "remote-externalities" -version = "0.10.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sc-allocator" -version = "4.1.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sc-authority-discovery" -version = "0.10.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sc-basic-authorship" -version = "0.10.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sc-block-builder" -version = "0.10.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sc-chain-spec" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sc-chain-spec-derive" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sc-cli" -version = "0.10.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sc-client-api" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sc-client-db" -version = "0.10.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sc-consensus" -version = "0.10.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sc-consensus-aura" -version = "0.10.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sc-consensus-babe" -version = "0.10.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sc-consensus-babe-rpc" -version = "0.10.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sc-consensus-epochs" -version = "0.10.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sc-consensus-manual-seal" -version = "0.10.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sc-consensus-pow" -version = "0.10.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sc-consensus-slots" -version = "0.10.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sc-consensus-uncles" -version = "0.10.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sc-executor" -version = "0.10.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sc-executor-common" -version = "0.10.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sc-executor-wasmi" -version = "0.10.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sc-executor-wasmtime" -version = "0.10.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sc-finality-grandpa" -version = "0.10.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sc-finality-grandpa-rpc" -version = "0.10.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sc-informant" -version = "0.10.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sc-keystore" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sc-network" -version = "0.10.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sc-network-gossip" -version = "0.10.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sc-network-test" -version = "0.8.0" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sc-offchain" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sc-peerset" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sc-proposer-metrics" -version = "0.10.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sc-rpc" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sc-rpc-api" -version = "0.10.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sc-rpc-server" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sc-runtime-test" -version = "2.0.0" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sc-service" -version = "0.10.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sc-service-test" -version = "2.0.0" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sc-state-db" -version = "0.10.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sc-sync-state-rpc" -version = "0.10.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sc-telemetry" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sc-tracing" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sc-tracing-proc-macro" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sc-transaction-pool" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sc-transaction-pool-api" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sc-utils" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sp-api-test" -version = "2.0.1" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sp-application-crypto-test" -version = "2.0.0" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sp-arithmetic-fuzzer" -version = "2.0.0" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sp-authority-discovery" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sp-authorship" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sp-block-builder" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sp-blockchain" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sp-consensus" -version = "0.10.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sp-consensus-aura" -version = "0.10.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sp-consensus-babe" -version = "0.10.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sp-consensus-pow" -version = "0.10.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sp-consensus-slots" -version = "0.10.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sp-consensus-vrf" -version = "0.10.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sp-database" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sp-finality-grandpa" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sp-keyring" -version = "4.1.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sp-maybe-compressed-blob" -version = "4.1.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sp-npos-elections" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sp-npos-elections-fuzzer" -version = "2.0.0-alpha.5" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sp-npos-elections-solution-type" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sp-offchain" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sp-rpc" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sp-runtime-interface-test" -version = "2.0.0" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sp-runtime-interface-test-wasm" -version = "2.0.0" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sp-runtime-interface-test-wasm-deprecated" -version = "2.0.0" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sp-sandbox" -version = "0.10.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sp-serializer" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sp-tasks" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sp-test-primitives" -version = "2.0.0" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sp-transaction-pool" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "sp-transaction-storage-proof" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "subkey" -version = "2.0.1" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "substrate-build-script-utils" -version = "3.0.0" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "substrate-frame-cli" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "substrate-frame-rpc-support" -version = "3.0.0" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "substrate-frame-rpc-system" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "substrate-prometheus-endpoint" -version = "0.10.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "substrate-test-client" -version = "2.0.1" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "substrate-test-runtime" -version = "2.0.0" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "substrate-test-runtime-client" -version = "2.0.0" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "substrate-test-runtime-transaction-pool" -version = "2.0.0" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "substrate-test-utils" -version = "4.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "substrate-test-utils-derive" -version = "0.10.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "substrate-test-utils-test-crate" -version = "0.1.0" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "substrate-wasm-builder" -version = "5.0.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" - -[[patch.unused]] -name = "try-runtime-cli" -version = "0.10.0-dev" -source = "git+https://github.com/ComposableFi/substrate?branch=mmr-polkadot-v0.9.16#11ce421f78949728ae2ce08fb0fd4f71c84d8acf" ->>>>>>> e05319ec (initial refactor) -======= ->>>>>>> 8be3170b (revert changes to cargo.toml) diff --git a/modules/Cargo.toml b/modules/Cargo.toml index db5a08ae50..d6264705a4 100644 --- a/modules/Cargo.toml +++ b/modules/Cargo.toml @@ -56,13 +56,13 @@ flex-error = { version = "0.4.4", default-features = false } num-traits = { version = "0.2.14", default-features = false } derive_more = { version = "0.99.17", default-features = false, features = ["from", "display"] } uint = { version = "0.9", default-features = false } -beefy-client = { package = "beefy-generic-client", git = "https://github.com/ComposableFi/beefy-client", branch = "david/verify_parachain_headers", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.17", default-features = false } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.17", default-features = false } -pallet-mmr-primitives = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.17", default-features = false } -beefy-primitives = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.17", default-features = false } -codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } -sp-trie = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.17", default-features = false } +beefy-client = { package = "beefy-generic-client", git = "https://github.com/ComposableFi/beefy-client", branch = "david/refactor-traits", default-features = false } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22" , default-features = false } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22" , default-features = false } +pallet-mmr-primitives = { package = "sp-mmr-primitives", git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22" , default-features = false } +beefy-primitives = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22" , default-features = false } +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } +sp-trie = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22" , default-features = false } [dependencies.tendermint] version = "=0.23.7" diff --git a/modules/src/applications/ics20_fungible_token_transfer/relay_application_logic/send_transfer.rs b/modules/src/applications/ics20_fungible_token_transfer/relay_application_logic/send_transfer.rs new file mode 100644 index 0000000000..761a4c9f00 --- /dev/null +++ b/modules/src/applications/ics20_fungible_token_transfer/relay_application_logic/send_transfer.rs @@ -0,0 +1,51 @@ +use crate::applications::ics20_fungible_token_transfer::error::Error; +use crate::applications::ics20_fungible_token_transfer::msgs::transfer::MsgTransfer; +use crate::core::ics04_channel::handler::send_packet::send_packet; +use crate::core::ics04_channel::packet::Packet; +use crate::core::ics04_channel::packet::PacketResult; +use crate::core::ics26_routing::context::LightClientContext; +use crate::handler::HandlerOutput; +use crate::prelude::*; + +pub(crate) fn send_transfer( + ctx: &Ctx, + msg: MsgTransfer, +) -> Result, Error> +where + Ctx: LightClientContext, +{ + let source_channel_end = ctx + .channel_end(&(msg.source_port.clone(), msg.source_channel)) + .map_err(Error::ics04_channel)?; + + let destination_port = source_channel_end.counterparty().port_id().clone(); + let destination_channel = source_channel_end + .counterparty() + .channel_id() + .ok_or_else(|| { + Error::destination_channel_not_found(msg.source_port.clone(), msg.source_channel) + })?; + + // get the next sequence + let sequence = ctx + .get_next_sequence_send(&(msg.source_port.clone(), msg.source_channel)) + .map_err(Error::ics04_channel)?; + + //TODO: Application LOGIC. + + let packet = Packet { + sequence, + source_port: msg.source_port, + source_channel: msg.source_channel, + destination_port, + destination_channel: *destination_channel, + data: vec![0], + timeout_height: msg.timeout_height, + timeout_timestamp: msg.timeout_timestamp, + }; + + let handler_output = send_packet(ctx, packet).map_err(Error::ics04_channel)?; + + //TODO: add event/atributes and writes to the store issued by the application logic for packet sending. + Ok(handler_output) +} diff --git a/modules/src/clients/crypto_ops/crypto.rs b/modules/src/clients/crypto_ops/crypto.rs new file mode 100644 index 0000000000..492bdfb7fc --- /dev/null +++ b/modules/src/clients/crypto_ops/crypto.rs @@ -0,0 +1,22 @@ +use crate::core::ics02_client::error::Error; +use crate::prelude::*; +use beefy_client::traits::HostFunctions; +use sp_core::H256; + +pub trait CryptoOps: HostFunctions + Clone { + /// This function should verify membership in a trie proof using parity's sp-trie package + /// with a BlakeTwo256 Hasher + fn verify_membership_trie_proof( + root: &H256, + proof: &Vec>, + key: &[u8], + value: &[u8], + ) -> Result<(), Error>; + /// This function should verify non membership in a trie proof using parity's sp-trie package + /// with a BlakeTwo256 Hasher + fn verify_non_membership_trie_proof( + root: &H256, + proof: &Vec>, + key: &[u8], + ) -> Result<(), Error>; +} diff --git a/modules/src/clients/crypto_ops/mod.rs b/modules/src/clients/crypto_ops/mod.rs new file mode 100644 index 0000000000..274f0edcd3 --- /dev/null +++ b/modules/src/clients/crypto_ops/mod.rs @@ -0,0 +1 @@ +pub mod crypto; diff --git a/modules/src/clients/ics07_tendermint/client_def.rs b/modules/src/clients/ics07_tendermint/client_def.rs index 7a8555cfbc..d1d5a17999 100644 --- a/modules/src/clients/ics07_tendermint/client_def.rs +++ b/modules/src/clients/ics07_tendermint/client_def.rs @@ -14,13 +14,10 @@ use crate::core::ics02_client::client_consensus::AnyConsensusState; use crate::core::ics02_client::client_def::{ClientDef, ConsensusUpdateResult}; use crate::core::ics02_client::client_state::AnyClientState; use crate::core::ics02_client::client_type::ClientType; -use crate::core::ics02_client::context::ClientReader; use crate::core::ics02_client::error::Error as Ics02Error; use crate::core::ics03_connection::connection::ConnectionEnd; -use crate::core::ics03_connection::context::ConnectionReader; use crate::core::ics04_channel::channel::ChannelEnd; use crate::core::ics04_channel::commitment::{AcknowledgementCommitment, PacketCommitment}; -use crate::core::ics04_channel::context::ChannelReader; use crate::core::ics04_channel::packet::Sequence; use crate::core::ics23_commitment::commitment::{ CommitmentPrefix, CommitmentProofBytes, CommitmentRoot, @@ -33,6 +30,7 @@ use crate::core::ics24_host::path::{ ConnectionsPath, ReceiptsPath, SeqRecvsPath, }; use crate::core::ics24_host::Path; +use crate::core::ics26_routing::context::LightClientContext; use crate::downcast; use crate::prelude::*; use crate::Height; @@ -49,7 +47,7 @@ impl ClientDef for TendermintClient { fn verify_header( &self, - ctx: &dyn ClientReader, + ctx: &dyn LightClientContext, client_id: ClientId, client_state: Self::ClientState, header: Self::Header, @@ -120,7 +118,7 @@ impl ClientDef for TendermintClient { fn update_state( &self, - _ctx: &dyn ClientReader, + _ctx: &dyn LightClientContext, _client_id: ClientId, client_state: Self::ClientState, header: Self::Header, @@ -146,7 +144,7 @@ impl ClientDef for TendermintClient { fn check_for_misbehaviour( &self, - ctx: &dyn ClientReader, + ctx: &dyn LightClientContext, client_id: ClientId, client_state: Self::ClientState, header: Self::Header, @@ -227,7 +225,7 @@ impl ClientDef for TendermintClient { fn verify_client_consensus_state( &self, - _ctx: &dyn ConnectionReader, + _ctx: &dyn LightClientContext, client_state: &Self::ClientState, height: Height, prefix: &CommitmentPrefix, @@ -252,7 +250,7 @@ impl ClientDef for TendermintClient { fn verify_connection_state( &self, - _ctx: &dyn ConnectionReader, + _ctx: &dyn LightClientContext, _client_id: &ClientId, client_state: &Self::ClientState, height: Height, @@ -273,7 +271,7 @@ impl ClientDef for TendermintClient { fn verify_channel_state( &self, - _ctx: &dyn ChannelReader, + _ctx: &dyn LightClientContext, _client_id: &ClientId, client_state: &Self::ClientState, height: Height, @@ -295,7 +293,7 @@ impl ClientDef for TendermintClient { fn verify_client_full_state( &self, - _ctx: &dyn ConnectionReader, + _ctx: &dyn LightClientContext, client_state: &Self::ClientState, height: Height, prefix: &CommitmentPrefix, @@ -315,7 +313,7 @@ impl ClientDef for TendermintClient { fn verify_packet_data( &self, - ctx: &dyn ChannelReader, + ctx: &dyn LightClientContext, _client_id: &ClientId, client_state: &Self::ClientState, height: Height, @@ -348,7 +346,7 @@ impl ClientDef for TendermintClient { fn verify_packet_acknowledgement( &self, - ctx: &dyn ChannelReader, + ctx: &dyn LightClientContext, _client_id: &ClientId, client_state: &Self::ClientState, height: Height, @@ -381,7 +379,7 @@ impl ClientDef for TendermintClient { fn verify_next_sequence_recv( &self, - ctx: &dyn ChannelReader, + ctx: &dyn LightClientContext, _client_id: &ClientId, client_state: &Self::ClientState, height: Height, @@ -413,7 +411,7 @@ impl ClientDef for TendermintClient { fn verify_packet_receipt_absence( &self, - ctx: &dyn ChannelReader, + ctx: &dyn LightClientContext, _client_id: &ClientId, client_state: &Self::ClientState, height: Height, @@ -494,7 +492,7 @@ fn verify_non_membership( } fn verify_delay_passed( - ctx: &dyn ChannelReader, + ctx: &dyn LightClientContext, height: Height, connection_end: &ConnectionEnd, ) -> Result<(), Ics02Error> { diff --git a/modules/src/clients/ics11_beefy/client_def.rs b/modules/src/clients/ics11_beefy/client_def.rs index 37efab6f45..be9d1dab5a 100644 --- a/modules/src/clients/ics11_beefy/client_def.rs +++ b/modules/src/clients/ics11_beefy/client_def.rs @@ -7,6 +7,7 @@ use pallet_mmr_primitives::BatchProof; use sp_core::H256; use tendermint_proto::Protobuf; +use crate::clients::crypto_ops::crypto::CryptoOps; use crate::clients::ics11_beefy::client_state::ClientState; use crate::clients::ics11_beefy::consensus_state::ConsensusState; use crate::clients::ics11_beefy::error::Error as BeefyError; @@ -15,13 +16,10 @@ use crate::core::ics02_client::client_consensus::AnyConsensusState; use crate::core::ics02_client::client_def::{ClientDef, ConsensusUpdateResult}; use crate::core::ics02_client::client_state::AnyClientState; use crate::core::ics02_client::client_type::ClientType; -use crate::core::ics02_client::context::ClientReader; use crate::core::ics02_client::error::Error; use crate::core::ics03_connection::connection::ConnectionEnd; -use crate::core::ics03_connection::context::ConnectionReader; use crate::core::ics04_channel::channel::ChannelEnd; use crate::core::ics04_channel::commitment::{AcknowledgementCommitment, PacketCommitment}; -use crate::core::ics04_channel::context::ChannelReader; use crate::core::ics04_channel::packet::Sequence; use crate::core::ics23_commitment::commitment::{ @@ -31,8 +29,10 @@ use crate::core::ics23_commitment::commitment::{ use crate::core::ics24_host::identifier::ConnectionId; use crate::core::ics24_host::identifier::{ChannelId, ClientId, PortId}; use crate::core::ics24_host::Path; +use crate::core::ics26_routing::context::LightClientContext; use crate::prelude::*; use crate::Height; +use core::marker::PhantomData; use crate::core::ics24_host::path::{ AcksPath, ChannelEndsPath, ClientConsensusStatePath, ClientStatePath, CommitmentsPath, @@ -41,35 +41,25 @@ use crate::core::ics24_host::path::{ use crate::downcast; /// Methods definitions specific to Beefy Light Client operation -pub trait BeefyTraits: BeefyHostFunctions + Clone + Default { - /// This function should verify membership in a trie proof using parity's sp-trie package - /// with a BlakeTwo256 Hasher - fn verify_membership_trie_proof( - root: &H256, - proof: &Vec>, - key: &[u8], - value: &[u8], - ) -> Result<(), Error>; - /// This function should verify non membership in a trie proof using parity's sp-trie package - /// with a BlakeTwo256 Hasher - fn verify_non_membership_trie_proof( - root: &H256, - proof: &Vec>, - key: &[u8], - ) -> Result<(), Error>; -} +pub trait BeefyTraits: BeefyHostFunctions + Clone + Default {} -#[derive(Clone, Debug, Default, PartialEq, Eq)] -pub struct BeefyClient(core::marker::PhantomData); +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct BeefyClient(PhantomData); -impl ClientDef for BeefyClient { +impl Default for BeefyClient { + fn default() -> Self { + Self(PhantomData::default()) + } +} + +impl ClientDef for BeefyClient { type Header = BeefyHeader; type ClientState = ClientState; type ConsensusState = ConsensusState; fn verify_header( &self, - _ctx: &dyn ClientReader, + _ctx: &dyn LightClientContext, _client_id: ClientId, client_state: Self::ClientState, header: Self::Header, @@ -79,12 +69,13 @@ impl ClientDef for BeefyClient { mmr_root_hash: client_state.mmr_root_hash, current_authorities: client_state.authority.clone(), next_authorities: client_state.next_authority_set.clone(), + beefy_activation_block: client_state.beefy_activation_block, }; - let mut light_client = BeefyLightClient::::new(); + let mut light_client = BeefyLightClient::::new(); // If mmr update exists verify it and return the new light client state let light_client_state = if let Some(mmr_update) = header.mmr_update_proof { light_client - .ingest_mmr_root_with_proof(light_client_state, mmr_update) + .verify_mmr_root_with_proof(light_client_state, mmr_update) .map_err(|e| Error::beefy(BeefyError::invalid_mmr_update(format!("{:?}", e))))? } else { light_client_state @@ -134,7 +125,7 @@ impl ClientDef for BeefyClient { fn update_state( &self, - ctx: &dyn ClientReader, + ctx: &dyn LightClientContext, client_id: ClientId, client_state: Self::ClientState, header: Self::Header, @@ -181,7 +172,7 @@ impl ClientDef for BeefyClient { fn check_for_misbehaviour( &self, - _ctx: &dyn ClientReader, + _ctx: &dyn LightClientContext, _client_id: ClientId, _client_state: Self::ClientState, _header: Self::Header, @@ -191,9 +182,9 @@ impl ClientDef for BeefyClient { fn verify_client_consensus_state( &self, - ctx: &dyn ConnectionReader, + _ctx: &dyn LightClientContext, _client_state: &Self::ClientState, - height: Height, + _height: Height, prefix: &CommitmentPrefix, proof: &CommitmentProofBytes, root: &CommitmentRoot, @@ -201,44 +192,39 @@ impl ClientDef for BeefyClient { consensus_height: Height, expected_consensus_state: &AnyConsensusState, ) -> Result<(), Error> { - ctx.client_consensus_state(client_id, height) - .map_err(|_| Error::consensus_state_not_found(client_id.clone(), height))?; - let path = ClientConsensusStatePath { client_id: client_id.clone(), epoch: consensus_height.revision_number, height: consensus_height.revision_height, }; let value = expected_consensus_state.encode_vec().unwrap(); - verify_membership::(prefix, proof, root, path, value) + verify_membership::(prefix, proof, root, path, value) } + // Consensus state will be verified in the verification functions before these are called fn verify_connection_state( &self, - ctx: &dyn ConnectionReader, - client_id: &ClientId, + _ctx: &dyn LightClientContext, + _client_id: &ClientId, _client_state: &Self::ClientState, - height: Height, + _height: Height, prefix: &CommitmentPrefix, proof: &CommitmentProofBytes, root: &CommitmentRoot, connection_id: &ConnectionId, expected_connection_end: &ConnectionEnd, ) -> Result<(), Error> { - ctx.client_consensus_state(client_id, height) - .map_err(|_| Error::consensus_state_not_found(client_id.clone(), height))?; - let path = ConnectionsPath(connection_id.clone()); let value = expected_connection_end.encode_vec().unwrap(); - verify_membership::(prefix, proof, root, path, value) + verify_membership::(prefix, proof, root, path, value) } fn verify_channel_state( &self, - ctx: &dyn ChannelReader, - client_id: &ClientId, + _ctx: &dyn LightClientContext, + _client_id: &ClientId, _client_state: &Self::ClientState, - height: Height, + _height: Height, prefix: &CommitmentPrefix, proof: &CommitmentProofBytes, root: &CommitmentRoot, @@ -246,37 +232,31 @@ impl ClientDef for BeefyClient { channel_id: &ChannelId, expected_channel_end: &ChannelEnd, ) -> Result<(), Error> { - ctx.client_consensus_state(client_id, height) - .map_err(|_| Error::consensus_state_not_found(client_id.clone(), height))?; - let path = ChannelEndsPath(port_id.clone(), channel_id.clone()); let value = expected_channel_end.encode_vec().unwrap(); - verify_membership::(prefix, proof, root, path, value) + verify_membership::(prefix, proof, root, path, value) } fn verify_client_full_state( &self, - ctx: &dyn ConnectionReader, + _ctx: &dyn LightClientContext, _client_state: &Self::ClientState, - height: Height, + _height: Height, prefix: &CommitmentPrefix, proof: &CommitmentProofBytes, root: &CommitmentRoot, client_id: &ClientId, expected_client_state: &AnyClientState, ) -> Result<(), Error> { - ctx.client_consensus_state(client_id, height) - .map_err(|_| Error::consensus_state_not_found(client_id.clone(), height))?; - let path = ClientStatePath(client_id.clone()); let value = expected_client_state.encode_vec().unwrap(); - verify_membership::(prefix, proof, root, path, value) + verify_membership::(prefix, proof, root, path, value) } fn verify_packet_data( &self, - ctx: &dyn ChannelReader, - client_id: &ClientId, + ctx: &dyn LightClientContext, + _client_id: &ClientId, _client_state: &Self::ClientState, height: Height, connection_end: &ConnectionEnd, @@ -287,8 +267,6 @@ impl ClientDef for BeefyClient { sequence: Sequence, commitment: PacketCommitment, ) -> Result<(), Error> { - ctx.client_consensus_state(client_id, height) - .map_err(|_| Error::consensus_state_not_found(client_id.clone(), height))?; verify_delay_passed(ctx, height, connection_end)?; let commitment_path = CommitmentsPath { @@ -297,7 +275,7 @@ impl ClientDef for BeefyClient { sequence, }; - verify_membership::( + verify_membership::( connection_end.counterparty().prefix(), proof, root, @@ -308,8 +286,8 @@ impl ClientDef for BeefyClient { fn verify_packet_acknowledgement( &self, - ctx: &dyn ChannelReader, - client_id: &ClientId, + ctx: &dyn LightClientContext, + _client_id: &ClientId, _client_state: &Self::ClientState, height: Height, connection_end: &ConnectionEnd, @@ -320,8 +298,6 @@ impl ClientDef for BeefyClient { sequence: Sequence, ack: AcknowledgementCommitment, ) -> Result<(), Error> { - ctx.client_consensus_state(client_id, height) - .map_err(|_| Error::consensus_state_not_found(client_id.clone(), height))?; verify_delay_passed(ctx, height, connection_end)?; let ack_path = AcksPath { @@ -329,7 +305,7 @@ impl ClientDef for BeefyClient { channel_id: channel_id.clone(), sequence, }; - verify_membership::( + verify_membership::( connection_end.counterparty().prefix(), proof, root, @@ -340,8 +316,8 @@ impl ClientDef for BeefyClient { fn verify_next_sequence_recv( &self, - ctx: &dyn ChannelReader, - client_id: &ClientId, + ctx: &dyn LightClientContext, + _client_id: &ClientId, _client_state: &Self::ClientState, height: Height, connection_end: &ConnectionEnd, @@ -351,14 +327,12 @@ impl ClientDef for BeefyClient { channel_id: &ChannelId, sequence: Sequence, ) -> Result<(), Error> { - ctx.client_consensus_state(client_id, height) - .map_err(|_| Error::consensus_state_not_found(client_id.clone(), height))?; verify_delay_passed(ctx, height, connection_end)?; let seq_bytes = codec::Encode::encode(&u64::from(sequence)); let seq_path = SeqRecvsPath(port_id.clone(), channel_id.clone()); - verify_membership::( + verify_membership::( connection_end.counterparty().prefix(), proof, root, @@ -369,8 +343,8 @@ impl ClientDef for BeefyClient { fn verify_packet_receipt_absence( &self, - ctx: &dyn ChannelReader, - client_id: &ClientId, + ctx: &dyn LightClientContext, + _client_id: &ClientId, _client_state: &Self::ClientState, height: Height, connection_end: &ConnectionEnd, @@ -380,8 +354,6 @@ impl ClientDef for BeefyClient { channel_id: &ChannelId, sequence: Sequence, ) -> Result<(), Error> { - ctx.client_consensus_state(client_id, height) - .map_err(|_| Error::consensus_state_not_found(client_id.clone(), height))?; verify_delay_passed(ctx, height, connection_end)?; let receipt_path = ReceiptsPath { @@ -389,7 +361,7 @@ impl ClientDef for BeefyClient { channel_id: channel_id.clone(), sequence, }; - verify_non_membership::( + verify_non_membership::( connection_end.counterparty().prefix(), proof, root, @@ -408,7 +380,7 @@ impl ClientDef for BeefyClient { } } -fn verify_membership>( +fn verify_membership>( prefix: &CommitmentPrefix, proof: &CommitmentProofBytes, root: &CommitmentRoot, @@ -426,10 +398,10 @@ fn verify_membership>( let trie_proof: Vec> = codec::Decode::decode(&mut &*trie_proof) .map_err(|e| Error::beefy(BeefyError::scale_decode(e)))?; let root = H256::from_slice(root.as_bytes()); - Verifier::verify_membership_trie_proof(&root, &trie_proof, &key, &value) + Crypto::verify_membership_trie_proof(&root, &trie_proof, &key, &value) } -fn verify_non_membership>( +fn verify_non_membership>( prefix: &CommitmentPrefix, proof: &CommitmentProofBytes, root: &CommitmentRoot, @@ -446,11 +418,11 @@ fn verify_non_membership>( let trie_proof: Vec> = codec::Decode::decode(&mut &*trie_proof) .map_err(|e| Error::beefy(BeefyError::scale_decode(e)))?; let root = H256::from_slice(root.as_bytes()); - Verifier::verify_non_membership_trie_proof(&root, &trie_proof, &key) + Crypto::verify_non_membership_trie_proof(&root, &trie_proof, &key) } fn verify_delay_passed( - ctx: &dyn ChannelReader, + ctx: &dyn LightClientContext, height: Height, connection_end: &ConnectionEnd, ) -> Result<(), Error> { diff --git a/modules/src/clients/ics11_beefy/client_state.rs b/modules/src/clients/ics11_beefy/client_state.rs index 11dc2e6ae9..6d708a41ae 100644 --- a/modules/src/clients/ics11_beefy/client_state.rs +++ b/modules/src/clients/ics11_beefy/client_state.rs @@ -304,6 +304,25 @@ impl From for RawClientState { } } +#[cfg(any(test, feature = "mocks"))] +pub mod test_util { + use super::*; + use crate::core::ics02_client::client_state::AnyClientState; + + pub fn get_dummy_beefy_state() -> AnyClientState { + AnyClientState::Beefy( + ClientState::new( + ChainId::new("polkadot".to_string(), 1), + Default::default(), + 0, + 1, + Default::default(), + Default::default(), + ) + .unwrap(), + ) + } +} #[cfg(test)] mod tests { #[test] diff --git a/modules/src/clients/ics11_beefy/consensus_state.rs b/modules/src/clients/ics11_beefy/consensus_state.rs index b8429a7110..b42d728fed 100644 --- a/modules/src/clients/ics11_beefy/consensus_state.rs +++ b/modules/src/clients/ics11_beefy/consensus_state.rs @@ -158,5 +158,16 @@ impl From for ConsensusState { } } +#[cfg(any(test, feature = "mocks"))] +pub mod test_util { + use super::*; + + pub fn get_dummy_beefy_consensus_state() -> AnyConsensusState { + AnyConsensusState::Beefy(ConsensusState { + timestamp: Time::now(), + root: vec![0; 32].into(), + }) + } +} #[cfg(test)] mod tests {} diff --git a/modules/src/clients/ics11_beefy/header.rs b/modules/src/clients/ics11_beefy/header.rs index ec854f6070..c58ced57e4 100644 --- a/modules/src/clients/ics11_beefy/header.rs +++ b/modules/src/clients/ics11_beefy/header.rs @@ -15,7 +15,7 @@ use beefy_primitives::known_payload_ids::MMR_ROOT_ID; use beefy_primitives::mmr::{MmrLeaf, MmrLeafVersion}; use beefy_primitives::{Commitment, Payload}; use bytes::Buf; -use codec::{Decode, Encode}; +use codec::{Compact, Decode, Encode}; use ibc_proto::ibc::lightclients::beefy::v1::{ BeefyAuthoritySet as RawBeefyAuthoritySet, BeefyMmrLeaf as RawBeefyMmrLeaf, BeefyMmrLeafPartial as RawBeefyMmrLeafPartial, Commitment as RawCommitment, @@ -258,7 +258,7 @@ impl TryFrom for BeefyHeader { ) .map_err(|e| Error::invalid_mmr_update(e.to_string()))?, }, - parachain_heads: H256::decode( + leaf_extra: H256::decode( &mut mmr_update .mmr_leaf .as_ref() @@ -368,7 +368,7 @@ impl From for RawBeefyHeader { .root .encode(), }), - parachain_heads: mmr_update.latest_mmr_leaf.parachain_heads.encode(), + parachain_heads: mmr_update.latest_mmr_leaf.leaf_extra.encode(), }), mmr_leaf_index: mmr_update.mmr_proof.leaf_index, mmr_proof: mmr_update @@ -433,6 +433,7 @@ pub fn decode_header(buf: B) -> Result { .try_into() } +/// Attempt to extract the timestamp extrinsic from the parachain header pub fn decode_timestamp_extrinsic(header: &ParachainHeader) -> Result { let proof = header.extrinsic_proof.clone(); let extrinsic_root = header.parachain_header.extrinsics_root; @@ -440,7 +441,8 @@ pub fn decode_timestamp_extrinsic(header: &ParachainHeader) -> Result>::new(&db, &extrinsic_root).unwrap(); // Timestamp extrinsic should be the first inherent and hence the first extrinsic - let key = 0_u32.to_be_bytes().to_vec(); + // https://github.com/paritytech/substrate/blob/d602397a0bbb24b5d627795b797259a44a5e29e9/primitives/trie/src/lib.rs#L99-L101 + let key = codec::Encode::encode(&Compact(0u32)); let ext_bytes = trie .get(&key) .map_err(|_| Error::timestamp_extrinsic())? @@ -448,7 +450,7 @@ pub fn decode_timestamp_extrinsic(header: &ParachainHeader) -> Result) = + let (_, _, timestamp): (u8, u8, Compact) = codec::Decode::decode(&mut &ext_bytes[2..]).map_err(|_| Error::timestamp_extrinsic())?; Ok(timestamp.into()) } diff --git a/modules/src/clients/mod.rs b/modules/src/clients/mod.rs index aaf80babb9..a4fe74ad6e 100644 --- a/modules/src/clients/mod.rs +++ b/modules/src/clients/mod.rs @@ -1,4 +1,5 @@ //! Implementations of client verification algorithms for specific types of chains. +pub mod crypto_ops; pub mod ics07_tendermint; pub mod ics11_beefy; diff --git a/modules/src/core/ics02_client/client_def.rs b/modules/src/core/ics02_client/client_def.rs index 237e5f8e05..64cc9bd8ae 100644 --- a/modules/src/core/ics02_client/client_def.rs +++ b/modules/src/core/ics02_client/client_def.rs @@ -1,21 +1,20 @@ +use crate::clients::crypto_ops::crypto::CryptoOps; use crate::clients::ics07_tendermint::client_def::TendermintClient; -use crate::clients::ics11_beefy::client_def::{BeefyClient, BeefyTraits}; +use crate::clients::ics11_beefy::client_def::BeefyClient; use crate::core::ics02_client::client_consensus::{AnyConsensusState, ConsensusState}; use crate::core::ics02_client::client_state::{AnyClientState, ClientState}; use crate::core::ics02_client::client_type::ClientType; -use crate::core::ics02_client::context::ClientReader; use crate::core::ics02_client::error::Error; use crate::core::ics02_client::header::{AnyHeader, Header}; use crate::core::ics03_connection::connection::ConnectionEnd; -use crate::core::ics03_connection::context::ConnectionReader; use crate::core::ics04_channel::channel::ChannelEnd; use crate::core::ics04_channel::commitment::{AcknowledgementCommitment, PacketCommitment}; -use crate::core::ics04_channel::context::ChannelReader; use crate::core::ics04_channel::packet::Sequence; use crate::core::ics23_commitment::commitment::{ CommitmentPrefix, CommitmentProofBytes, CommitmentRoot, }; use crate::core::ics24_host::identifier::{ChannelId, ClientId, ConnectionId, PortId}; +use crate::core::ics26_routing::context::LightClientContext; use crate::downcast; use crate::prelude::*; use crate::Height; @@ -36,7 +35,7 @@ pub trait ClientDef: Clone { fn verify_header( &self, - ctx: &dyn ClientReader, + ctx: &dyn LightClientContext, client_id: ClientId, client_state: Self::ClientState, header: Self::Header, @@ -44,7 +43,7 @@ pub trait ClientDef: Clone { fn update_state( &self, - ctx: &dyn ClientReader, + ctx: &dyn LightClientContext, client_id: ClientId, client_state: Self::ClientState, header: Self::Header, @@ -58,7 +57,7 @@ pub trait ClientDef: Clone { fn check_for_misbehaviour( &self, - ctx: &dyn ClientReader, + ctx: &dyn LightClientContext, client_id: ClientId, client_state: Self::ClientState, header: Self::Header, @@ -83,7 +82,7 @@ pub trait ClientDef: Clone { #[allow(clippy::too_many_arguments)] fn verify_client_consensus_state( &self, - ctx: &dyn ConnectionReader, + ctx: &dyn LightClientContext, client_state: &Self::ClientState, height: Height, prefix: &CommitmentPrefix, @@ -98,7 +97,7 @@ pub trait ClientDef: Clone { #[allow(clippy::too_many_arguments)] fn verify_connection_state( &self, - ctx: &dyn ConnectionReader, + ctx: &dyn LightClientContext, client_id: &ClientId, client_state: &Self::ClientState, height: Height, @@ -113,7 +112,7 @@ pub trait ClientDef: Clone { #[allow(clippy::too_many_arguments)] fn verify_channel_state( &self, - ctx: &dyn ChannelReader, + ctx: &dyn LightClientContext, client_id: &ClientId, client_state: &Self::ClientState, height: Height, @@ -129,7 +128,7 @@ pub trait ClientDef: Clone { #[allow(clippy::too_many_arguments)] fn verify_client_full_state( &self, - ctx: &dyn ConnectionReader, + ctx: &dyn LightClientContext, client_state: &Self::ClientState, height: Height, prefix: &CommitmentPrefix, @@ -143,7 +142,7 @@ pub trait ClientDef: Clone { #[allow(clippy::too_many_arguments)] fn verify_packet_data( &self, - ctx: &dyn ChannelReader, + ctx: &dyn LightClientContext, client_id: &ClientId, client_state: &Self::ClientState, height: Height, @@ -160,7 +159,7 @@ pub trait ClientDef: Clone { #[allow(clippy::too_many_arguments)] fn verify_packet_acknowledgement( &self, - ctx: &dyn ChannelReader, + ctx: &dyn LightClientContext, client_id: &ClientId, client_state: &Self::ClientState, height: Height, @@ -177,7 +176,7 @@ pub trait ClientDef: Clone { #[allow(clippy::too_many_arguments)] fn verify_next_sequence_recv( &self, - ctx: &dyn ChannelReader, + ctx: &dyn LightClientContext, client_id: &ClientId, client_state: &Self::ClientState, height: Height, @@ -193,7 +192,7 @@ pub trait ClientDef: Clone { #[allow(clippy::too_many_arguments)] fn verify_packet_receipt_absence( &self, - ctx: &dyn ChannelReader, + ctx: &dyn LightClientContext, client_id: &ClientId, client_state: &Self::ClientState, height: Height, @@ -207,18 +206,18 @@ pub trait ClientDef: Clone { } #[derive(Clone, Debug, PartialEq, Eq)] -pub enum AnyClient { +pub enum AnyClient { Tendermint(TendermintClient), - Beefy(BeefyClient), + Beefy(BeefyClient), #[cfg(any(test, feature = "mocks"))] Mock(MockClient), } -impl AnyClient { +impl AnyClient { pub fn from_client_type(client_type: ClientType) -> Self { match client_type { ClientType::Tendermint => Self::Tendermint(TendermintClient::default()), - ClientType::Beefy => Self::Beefy(BeefyClient::default()), + ClientType::Beefy => Self::Beefy(BeefyClient::::default()), #[cfg(any(test, feature = "mocks"))] ClientType::Mock => Self::Mock(MockClient), } @@ -226,7 +225,7 @@ impl AnyClient { } // ⚠️ Beware of the awful boilerplate below ⚠️ -impl ClientDef for AnyClient { +impl ClientDef for AnyClient { type Header = AnyHeader; type ClientState = AnyClientState; type ConsensusState = AnyConsensusState; @@ -234,7 +233,7 @@ impl ClientDef for AnyClient { /// Validate an incoming header fn verify_header( &self, - ctx: &dyn ClientReader, + ctx: &dyn LightClientContext, client_id: ClientId, client_state: Self::ClientState, header: Self::Header, @@ -276,7 +275,7 @@ impl ClientDef for AnyClient { /// Validates an incoming `header` against the latest consensus state of this client. fn update_state( &self, - ctx: &dyn ClientReader, + ctx: &dyn LightClientContext, client_id: ClientId, client_state: AnyClientState, header: AnyHeader, @@ -366,7 +365,7 @@ impl ClientDef for AnyClient { /// Checks for misbehaviour in an incoming header fn check_for_misbehaviour( &self, - ctx: &dyn ClientReader, + ctx: &dyn LightClientContext, client_id: ClientId, client_state: Self::ClientState, header: Self::Header, @@ -466,7 +465,7 @@ impl ClientDef for AnyClient { fn verify_client_consensus_state( &self, - ctx: &dyn ConnectionReader, + ctx: &dyn LightClientContext, client_state: &Self::ClientState, height: Height, prefix: &CommitmentPrefix, @@ -538,7 +537,7 @@ impl ClientDef for AnyClient { fn verify_connection_state( &self, - ctx: &dyn ConnectionReader, + ctx: &dyn LightClientContext, client_id: &ClientId, client_state: &AnyClientState, height: Height, @@ -603,7 +602,7 @@ impl ClientDef for AnyClient { fn verify_channel_state( &self, - ctx: &dyn ChannelReader, + ctx: &dyn LightClientContext, client_id: &ClientId, client_state: &AnyClientState, height: Height, @@ -673,7 +672,7 @@ impl ClientDef for AnyClient { } fn verify_client_full_state( &self, - ctx: &dyn ConnectionReader, + ctx: &dyn LightClientContext, client_state: &Self::ClientState, height: Height, prefix: &CommitmentPrefix, @@ -740,7 +739,7 @@ impl ClientDef for AnyClient { fn verify_packet_data( &self, - ctx: &dyn ChannelReader, + ctx: &dyn LightClientContext, client_id: &ClientId, client_state: &Self::ClientState, height: Height, @@ -821,7 +820,7 @@ impl ClientDef for AnyClient { fn verify_packet_acknowledgement( &self, - ctx: &dyn ChannelReader, + ctx: &dyn LightClientContext, client_id: &ClientId, client_state: &Self::ClientState, height: Height, @@ -900,7 +899,7 @@ impl ClientDef for AnyClient { } fn verify_next_sequence_recv( &self, - ctx: &dyn ChannelReader, + ctx: &dyn LightClientContext, client_id: &ClientId, client_state: &Self::ClientState, height: Height, @@ -977,7 +976,7 @@ impl ClientDef for AnyClient { fn verify_packet_receipt_absence( &self, - ctx: &dyn ChannelReader, + ctx: &dyn LightClientContext, client_id: &ClientId, client_state: &Self::ClientState, height: Height, diff --git a/modules/src/core/ics02_client/handler.rs b/modules/src/core/ics02_client/handler.rs index 3c3971b4fe..4eaf41113a 100644 --- a/modules/src/core/ics02_client/handler.rs +++ b/modules/src/core/ics02_client/handler.rs @@ -1,9 +1,8 @@ //! This module implements the processing logic for ICS2 (client abstractions and functions) msgs. - -use crate::clients::ics11_beefy::client_def::BeefyTraits; -use crate::core::ics02_client::context::ClientReader; +use crate::clients::crypto_ops::crypto::CryptoOps; use crate::core::ics02_client::error::Error; use crate::core::ics02_client::msgs::ClientMsg; +use crate::core::ics26_routing::context::LightClientContext; use crate::handler::HandlerOutput; pub mod create_client; @@ -18,15 +17,18 @@ pub enum ClientResult { } /// General entry point for processing any message related to ICS2 (client functions) protocols. -pub fn dispatch(ctx: &Ctx, msg: ClientMsg) -> Result, Error> +pub fn dispatch( + ctx: &Ctx, + msg: ClientMsg, +) -> Result, Error> where - Ctx: ClientReader, - Beefy: BeefyTraits, + Ctx: LightClientContext, + Crypto: CryptoOps, { match msg { ClientMsg::CreateClient(msg) => create_client::process(ctx, msg), - ClientMsg::UpdateClient(msg) => update_client::process::(ctx, msg), - ClientMsg::UpgradeClient(msg) => upgrade_client::process::(ctx, msg), + ClientMsg::UpdateClient(msg) => update_client::process::(ctx, msg), + ClientMsg::UpgradeClient(msg) => upgrade_client::process::(ctx, msg), _ => { unimplemented!() } diff --git a/modules/src/core/ics02_client/handler/create_client.rs b/modules/src/core/ics02_client/handler/create_client.rs index deb1f5df45..8218ad65bd 100644 --- a/modules/src/core/ics02_client/handler/create_client.rs +++ b/modules/src/core/ics02_client/handler/create_client.rs @@ -1,11 +1,11 @@ //! Protocol logic specific to processing ICS2 messages of type `MsgCreateAnyClient`. +use crate::core::ics26_routing::context::LightClientContext; use crate::prelude::*; use crate::core::ics02_client::client_consensus::AnyConsensusState; use crate::core::ics02_client::client_state::AnyClientState; use crate::core::ics02_client::client_type::ClientType; -use crate::core::ics02_client::context::ClientReader; use crate::core::ics02_client::error::Error; use crate::core::ics02_client::events::Attributes; use crate::core::ics02_client::handler::ClientResult; @@ -29,7 +29,7 @@ pub struct Result { } pub fn process( - ctx: &dyn ClientReader, + ctx: &dyn LightClientContext, msg: MsgCreateAnyClient, ) -> HandlerResult { let mut output = HandlerOutput::builder(); diff --git a/modules/src/core/ics02_client/handler/update_client.rs b/modules/src/core/ics02_client/handler/update_client.rs index bd936d6f3f..58843f15c9 100644 --- a/modules/src/core/ics02_client/handler/update_client.rs +++ b/modules/src/core/ics02_client/handler/update_client.rs @@ -1,18 +1,17 @@ //! Protocol logic specific to processing ICS2 messages of type `MsgUpdateAnyClient`. - -use crate::clients::ics11_beefy::client_def::BeefyTraits; use tracing::debug; +use crate::clients::crypto_ops::crypto::CryptoOps; use crate::core::ics02_client::client_def::{AnyClient, ClientDef, ConsensusUpdateResult}; use crate::core::ics02_client::client_state::{AnyClientState, ClientState}; use crate::core::ics02_client::client_type::ClientType; -use crate::core::ics02_client::context::ClientReader; use crate::core::ics02_client::error::Error; use crate::core::ics02_client::events::Attributes; use crate::core::ics02_client::handler::ClientResult; use crate::core::ics02_client::height::Height; use crate::core::ics02_client::msgs::update_client::MsgUpdateAnyClient; use crate::core::ics24_host::identifier::ClientId; +use crate::core::ics26_routing::context::LightClientContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; @@ -29,8 +28,8 @@ pub struct Result { pub processed_height: Height, } -pub fn process( - ctx: &dyn ClientReader, +pub fn process( + ctx: &dyn LightClientContext, msg: MsgUpdateAnyClient, ) -> HandlerResult { let mut output = HandlerOutput::builder(); @@ -44,7 +43,7 @@ pub fn process( // Read client type from the host chain store. The client should already exist. let client_type = ctx.client_type(&client_id)?; - let client_def = AnyClient::::from_client_type(client_type); + let client_def = AnyClient::::from_client_type(client_type); // Read client state from the host chain store. let client_state = ctx.client_state(&client_id)?; diff --git a/modules/src/core/ics02_client/handler/upgrade_client.rs b/modules/src/core/ics02_client/handler/upgrade_client.rs index 4790a657eb..c05de32dbc 100644 --- a/modules/src/core/ics02_client/handler/upgrade_client.rs +++ b/modules/src/core/ics02_client/handler/upgrade_client.rs @@ -1,14 +1,14 @@ //! Protocol logic specific to processing ICS2 messages of type `MsgUpgradeAnyClient`. //! -use crate::clients::ics11_beefy::client_def::BeefyTraits; +use crate::clients::crypto_ops::crypto::CryptoOps; use crate::core::ics02_client::client_def::{AnyClient, ClientDef, ConsensusUpdateResult}; use crate::core::ics02_client::client_state::{AnyClientState, ClientState}; -use crate::core::ics02_client::context::ClientReader; use crate::core::ics02_client::error::Error; use crate::core::ics02_client::events::Attributes; use crate::core::ics02_client::handler::ClientResult; use crate::core::ics02_client::msgs::upgrade_client::MsgUpgradeAnyClient; use crate::core::ics24_host::identifier::ClientId; +use crate::core::ics26_routing::context::LightClientContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; @@ -22,8 +22,8 @@ pub struct Result { pub consensus_state: Option, } -pub fn process( - ctx: &dyn ClientReader, +pub fn process( + ctx: &dyn LightClientContext, msg: MsgUpgradeAnyClient, ) -> HandlerResult { let mut output = HandlerOutput::builder(); @@ -47,7 +47,7 @@ pub fn process( let client_type = ctx.client_type(&client_id)?; - let client_def = AnyClient::::from_client_type(client_type); + let client_def = AnyClient::::from_client_type(client_type); let (new_client_state, new_consensus_state) = client_def.verify_upgrade_and_update_state( &upgrade_client_state, diff --git a/modules/src/core/ics02_client/msgs/create_client.rs b/modules/src/core/ics02_client/msgs/create_client.rs index 275de7b51e..f79bdc0541 100644 --- a/modules/src/core/ics02_client/msgs/create_client.rs +++ b/modules/src/core/ics02_client/msgs/create_client.rs @@ -116,7 +116,7 @@ mod tests { let msg = MsgCreateAnyClient::new( tm_client_state, - AnyConsensusState::Tendermint(tm_header.try_into().unwrap()), + Some(AnyConsensusState::Tendermint(tm_header.try_into().unwrap())), signer, ) .unwrap(); diff --git a/modules/src/core/ics03_connection/context.rs b/modules/src/core/ics03_connection/context.rs index 3a29f23a6d..4ec14c2983 100644 --- a/modules/src/core/ics03_connection/context.rs +++ b/modules/src/core/ics03_connection/context.rs @@ -2,8 +2,6 @@ //! the interface that any host chain must implement to be able to process any `ConnectionMsg`. //! See "ADR 003: IBC protocol implementation" for more details. -use crate::core::ics02_client::client_consensus::AnyConsensusState; -use crate::core::ics02_client::client_state::AnyClientState; use crate::core::ics03_connection::connection::ConnectionEnd; use crate::core::ics03_connection::error::Error; use crate::core::ics03_connection::handler::{ConnectionIdState, ConnectionResult}; @@ -18,28 +16,12 @@ pub trait ConnectionReader { /// Returns the ConnectionEnd for the given identifier `conn_id`. fn connection_end(&self, conn_id: &ConnectionId) -> Result; - /// Returns the ClientState for the given identifier `client_id`. - fn client_state(&self, client_id: &ClientId) -> Result; - - /// Returns the current height of the local chain. - fn host_current_height(&self) -> Height; - /// Returns the oldest height available on the local chain. fn host_oldest_height(&self) -> Height; /// Returns the prefix that the local chain uses in the KV store. fn commitment_prefix(&self) -> CommitmentPrefix; - /// Returns the ConsensusState that the given client stores at a specific height. - fn client_consensus_state( - &self, - client_id: &ClientId, - height: Height, - ) -> Result; - - /// Returns the ConsensusState of the host (local) chain at a specific height. - fn host_consensus_state(&self, height: Height) -> Result; - /// Function required by ICS 03. Returns the list of all possible versions that the connection /// handshake protocol supports. fn get_compatible_versions(&self) -> Vec { diff --git a/modules/src/core/ics03_connection/handler.rs b/modules/src/core/ics03_connection/handler.rs index ead48067aa..7e2323a7b1 100644 --- a/modules/src/core/ics03_connection/handler.rs +++ b/modules/src/core/ics03_connection/handler.rs @@ -1,11 +1,10 @@ //! This module implements the processing logic for ICS3 (connection open handshake) messages. - -use crate::clients::ics11_beefy::client_def::BeefyTraits; +use crate::clients::crypto_ops::crypto::CryptoOps; use crate::core::ics03_connection::connection::ConnectionEnd; -use crate::core::ics03_connection::context::ConnectionReader; use crate::core::ics03_connection::error::Error; use crate::core::ics03_connection::msgs::ConnectionMsg; use crate::core::ics24_host::identifier::ConnectionId; +use crate::core::ics26_routing::context::LightClientContext; use crate::handler::HandlerOutput; pub mod conn_open_ack; @@ -42,18 +41,18 @@ pub struct ConnectionResult { /// General entry point for processing any type of message related to the ICS3 connection open /// handshake protocol. -pub fn dispatch( +pub fn dispatch( ctx: &Ctx, msg: ConnectionMsg, ) -> Result, Error> where - Ctx: ConnectionReader, - Beefy: BeefyTraits, + Ctx: LightClientContext, + Crypto: CryptoOps, { match msg { ConnectionMsg::ConnectionOpenInit(msg) => conn_open_init::process(ctx, msg), - ConnectionMsg::ConnectionOpenTry(msg) => conn_open_try::process::(ctx, *msg), - ConnectionMsg::ConnectionOpenAck(msg) => conn_open_ack::process::(ctx, *msg), - ConnectionMsg::ConnectionOpenConfirm(msg) => conn_open_confirm::process::(ctx, msg), + ConnectionMsg::ConnectionOpenTry(msg) => conn_open_try::process::(ctx, *msg), + ConnectionMsg::ConnectionOpenAck(msg) => conn_open_ack::process::(ctx, *msg), + ConnectionMsg::ConnectionOpenConfirm(msg) => conn_open_confirm::process::(ctx, msg), } } diff --git a/modules/src/core/ics03_connection/handler/conn_open_ack.rs b/modules/src/core/ics03_connection/handler/conn_open_ack.rs index ec2efe411c..9648074d4d 100644 --- a/modules/src/core/ics03_connection/handler/conn_open_ack.rs +++ b/modules/src/core/ics03_connection/handler/conn_open_ack.rs @@ -1,8 +1,7 @@ //! Protocol logic specific to processing ICS3 messages of type `MsgConnectionOpenAck`. -use crate::clients::ics11_beefy::client_def::BeefyTraits; +use crate::clients::crypto_ops::crypto::CryptoOps; use crate::core::ics03_connection::connection::{ConnectionEnd, Counterparty, State}; -use crate::core::ics03_connection::context::ConnectionReader; use crate::core::ics03_connection::error::Error; use crate::core::ics03_connection::events::Attributes; use crate::core::ics03_connection::handler::verify::{ @@ -10,12 +9,13 @@ use crate::core::ics03_connection::handler::verify::{ }; use crate::core::ics03_connection::handler::{ConnectionIdState, ConnectionResult}; use crate::core::ics03_connection::msgs::conn_open_ack::MsgConnectionOpenAck; +use crate::core::ics26_routing::context::LightClientContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; -pub(crate) fn process( - ctx: &dyn ConnectionReader, +pub(crate) fn process( + ctx: &dyn LightClientContext, msg: MsgConnectionOpenAck, ) -> HandlerResult { let mut output = HandlerOutput::builder(); @@ -66,7 +66,7 @@ pub(crate) fn process( }; // 2. Pass the details to the verification function. - verify_proofs::( + verify_proofs::( ctx, msg.client_state.clone(), msg.proofs.height(), @@ -85,7 +85,7 @@ pub(crate) fn process( let event_attributes = Attributes { connection_id: Some(result.connection_id.clone()), - height: ctx.host_current_height(), + height: ctx.host_height(), ..Default::default() }; output.emit(IbcEvent::OpenAckConnection(event_attributes.into())); @@ -250,7 +250,7 @@ mod tests { for e in proto_output.events.iter() { assert!(matches!(e, &IbcEvent::OpenAckConnection(_))); - assert_eq!(e.height(), test.ctx.host_current_height()); + assert_eq!(e.height(), test.ctx.host_height()); } } Err(e) => { diff --git a/modules/src/core/ics03_connection/handler/conn_open_confirm.rs b/modules/src/core/ics03_connection/handler/conn_open_confirm.rs index 3a790214f4..a39608e3ca 100644 --- a/modules/src/core/ics03_connection/handler/conn_open_confirm.rs +++ b/modules/src/core/ics03_connection/handler/conn_open_confirm.rs @@ -1,19 +1,19 @@ //! Protocol logic specific to processing ICS3 messages of type `MsgConnectionOpenConfirm`. -use crate::clients::ics11_beefy::client_def::BeefyTraits; +use crate::clients::crypto_ops::crypto::CryptoOps; use crate::core::ics03_connection::connection::{ConnectionEnd, Counterparty, State}; -use crate::core::ics03_connection::context::ConnectionReader; use crate::core::ics03_connection::error::Error; use crate::core::ics03_connection::events::Attributes; use crate::core::ics03_connection::handler::verify::verify_proofs; use crate::core::ics03_connection::handler::{ConnectionIdState, ConnectionResult}; use crate::core::ics03_connection::msgs::conn_open_confirm::MsgConnectionOpenConfirm; +use crate::core::ics26_routing::context::LightClientContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; -pub(crate) fn process( - ctx: &dyn ConnectionReader, +pub(crate) fn process( + ctx: &dyn LightClientContext, msg: MsgConnectionOpenConfirm, ) -> HandlerResult { let mut output = HandlerOutput::builder(); @@ -41,7 +41,7 @@ pub(crate) fn process( ); // 2. Pass the details to the verification function. - verify_proofs::( + verify_proofs::( ctx, None, msg.proofs.height(), @@ -63,7 +63,7 @@ pub(crate) fn process( let event_attributes = Attributes { connection_id: Some(result.connection_id.clone()), - height: ctx.host_current_height(), + height: ctx.host_height(), ..Default::default() }; output.emit(IbcEvent::OpenConfirmConnection(event_attributes.into())); @@ -171,7 +171,7 @@ mod tests { for e in proto_output.events.iter() { assert!(matches!(e, &IbcEvent::OpenConfirmConnection(_))); - assert_eq!(e.height(), test.ctx.host_current_height()); + assert_eq!(e.height(), test.ctx.host_height()); } } Err(e) => { diff --git a/modules/src/core/ics03_connection/handler/conn_open_init.rs b/modules/src/core/ics03_connection/handler/conn_open_init.rs index 4cfd3d59ae..d91ac2f11a 100644 --- a/modules/src/core/ics03_connection/handler/conn_open_init.rs +++ b/modules/src/core/ics03_connection/handler/conn_open_init.rs @@ -1,24 +1,25 @@ //! Protocol logic specific to ICS3 messages of type `MsgConnectionOpenInit`. use crate::core::ics03_connection::connection::{ConnectionEnd, State}; -use crate::core::ics03_connection::context::ConnectionReader; use crate::core::ics03_connection::error::Error; use crate::core::ics03_connection::events::Attributes; use crate::core::ics03_connection::handler::{ConnectionIdState, ConnectionResult}; use crate::core::ics03_connection::msgs::conn_open_init::MsgConnectionOpenInit; use crate::core::ics24_host::identifier::ConnectionId; +use crate::core::ics26_routing::context::LightClientContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; pub(crate) fn process( - ctx: &dyn ConnectionReader, + ctx: &dyn LightClientContext, msg: MsgConnectionOpenInit, ) -> HandlerResult { let mut output = HandlerOutput::builder(); // An IBC client running on the local (host) chain should exist. - ctx.client_state(&msg.client_id)?; + ctx.client_state(&msg.client_id) + .map_err(|e| Error::ics02_client(e))?; let versions = match msg.version { Some(version) => { @@ -56,7 +57,7 @@ pub(crate) fn process( let event_attributes = Attributes { connection_id: Some(conn_id), - height: ctx.host_current_height(), + height: ctx.host_height(), ..Default::default() }; output.emit(IbcEvent::OpenInitConnection(event_attributes.into())); @@ -158,7 +159,7 @@ mod tests { for e in proto_output.events.iter() { assert!(matches!(e, &IbcEvent::OpenInitConnection(_))); - assert_eq!(e.height(), test.ctx.host_current_height()); + assert_eq!(e.height(), test.ctx.host_height()); } assert_eq!(res.connection_end.versions(), test.expected_versions); diff --git a/modules/src/core/ics03_connection/handler/conn_open_try.rs b/modules/src/core/ics03_connection/handler/conn_open_try.rs index 6a61bd313a..89a150062f 100644 --- a/modules/src/core/ics03_connection/handler/conn_open_try.rs +++ b/modules/src/core/ics03_connection/handler/conn_open_try.rs @@ -1,8 +1,7 @@ //! Protocol logic specific to processing ICS3 messages of type `MsgConnectionOpenTry`. -use crate::clients::ics11_beefy::client_def::BeefyTraits; +use crate::clients::crypto_ops::crypto::CryptoOps; use crate::core::ics03_connection::connection::{ConnectionEnd, Counterparty, State}; -use crate::core::ics03_connection::context::ConnectionReader; use crate::core::ics03_connection::error::Error; use crate::core::ics03_connection::events::Attributes; use crate::core::ics03_connection::handler::verify::{ @@ -11,12 +10,13 @@ use crate::core::ics03_connection::handler::verify::{ use crate::core::ics03_connection::handler::{ConnectionIdState, ConnectionResult}; use crate::core::ics03_connection::msgs::conn_open_try::MsgConnectionOpenTry; use crate::core::ics24_host::identifier::ConnectionId; +use crate::core::ics26_routing::context::LightClientContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; -pub(crate) fn process( - ctx: &dyn ConnectionReader, +pub(crate) fn process( + ctx: &dyn LightClientContext, msg: MsgConnectionOpenTry, ) -> HandlerResult { let mut output = HandlerOutput::builder(); @@ -79,7 +79,7 @@ pub(crate) fn process( ); // 2. Pass the details to the verification function. - verify_proofs::( + verify_proofs::( ctx, msg.client_state.clone(), msg.proofs.height(), @@ -113,7 +113,7 @@ pub(crate) fn process( let event_attributes = Attributes { connection_id: Some(conn_id), - height: ctx.host_current_height(), + height: ctx.host_height(), ..Default::default() }; output.emit(IbcEvent::OpenTryConnection(event_attributes.into())); @@ -251,7 +251,7 @@ mod tests { for e in proto_output.events.iter() { assert!(matches!(e, &IbcEvent::OpenTryConnection(_))); - assert_eq!(e.height(), test.ctx.host_current_height()); + assert_eq!(e.height(), test.ctx.host_height()); } } Err(e) => { diff --git a/modules/src/core/ics03_connection/handler/verify.rs b/modules/src/core/ics03_connection/handler/verify.rs index 3eefd81391..9e61191c28 100644 --- a/modules/src/core/ics03_connection/handler/verify.rs +++ b/modules/src/core/ics03_connection/handler/verify.rs @@ -1,26 +1,25 @@ //! ICS3 verification functions, common across all four handlers of ICS3. - -use crate::clients::ics11_beefy::client_def::BeefyTraits; +use crate::clients::crypto_ops::crypto::CryptoOps; use crate::core::ics02_client::client_consensus::ConsensusState; use crate::core::ics02_client::client_state::{AnyClientState, ClientState}; use crate::core::ics02_client::{client_def::AnyClient, client_def::ClientDef}; use crate::core::ics03_connection::connection::ConnectionEnd; -use crate::core::ics03_connection::context::ConnectionReader; use crate::core::ics03_connection::error::Error; use crate::core::ics23_commitment::commitment::CommitmentProofBytes; +use crate::core::ics26_routing::context::LightClientContext; use crate::proofs::{ConsensusProof, Proofs}; use crate::Height; /// Entry point for verifying all proofs bundled in any ICS3 message. -pub fn verify_proofs( - ctx: &dyn ConnectionReader, +pub fn verify_proofs( + ctx: &dyn LightClientContext, client_state: Option, height: Height, connection_end: &ConnectionEnd, expected_conn: &ConnectionEnd, proofs: &Proofs, ) -> Result<(), Error> { - verify_connection_proof::( + verify_connection_proof::( ctx, height, connection_end, @@ -31,7 +30,7 @@ pub fn verify_proofs( // If the message includes a client state, then verify the proof for that state. if let Some(expected_client_state) = client_state { - verify_client_proof::( + verify_client_proof::( ctx, height, connection_end, @@ -46,7 +45,7 @@ pub fn verify_proofs( // If a consensus proof is attached to the message, then verify it. if let Some(proof) = proofs.consensus_proof() { - Ok(verify_consensus_proof::( + Ok(verify_consensus_proof::( ctx, height, connection_end, @@ -60,8 +59,8 @@ pub fn verify_proofs( /// Verifies the authenticity and semantic correctness of a commitment `proof`. The commitment /// claims to prove that an object of type connection exists on the source chain (i.e., the chain /// which created this proof). This object must match the state of `expected_conn`. -pub fn verify_connection_proof( - ctx: &dyn ConnectionReader, +pub fn verify_connection_proof( + ctx: &dyn LightClientContext, height: Height, connection_end: &ConnectionEnd, expected_conn: &ConnectionEnd, @@ -69,7 +68,9 @@ pub fn verify_connection_proof( proof: &CommitmentProofBytes, ) -> Result<(), Error> { // Fetch the client state (IBC client on the local/host chain). - let client_state = ctx.client_state(connection_end.client_id())?; + let client_state = ctx + .client_state(connection_end.client_id()) + .map_err(|e| Error::ics02_client(e))?; // The client must not be frozen. if client_state.is_frozen() { @@ -77,7 +78,9 @@ pub fn verify_connection_proof( } // The client must have the consensus state for the height where this proof was created. - let consensus_state = ctx.client_consensus_state(connection_end.client_id(), proof_height)?; + let consensus_state = ctx + .consensus_state(connection_end.client_id(), proof_height) + .map_err(|e| Error::consensus_state_verification_failure(proof_height, e))?; // A counterparty connection id of None causes `unwrap()` below and indicates an internal // error as this is the connection id on the counterparty chain that must always be present. @@ -86,7 +89,7 @@ pub fn verify_connection_proof( .connection_id() .ok_or_else(Error::invalid_counterparty)?; - let client_def = AnyClient::::from_client_type(client_state.client_type()); + let client_def = AnyClient::::from_client_type(client_state.client_type()); // Verify the proof for the connection state against the expected connection end. client_def @@ -111,8 +114,8 @@ pub fn verify_connection_proof( /// complete verification: that the client state the counterparty stores is valid (i.e., not frozen, /// at the same revision as the current chain, with matching chain identifiers, etc) and that the /// `proof` is correct. -pub fn verify_client_proof( - ctx: &dyn ConnectionReader, +pub fn verify_client_proof( + ctx: &dyn LightClientContext, height: Height, connection_end: &ConnectionEnd, expected_client_state: AnyClientState, @@ -120,15 +123,19 @@ pub fn verify_client_proof( proof: &CommitmentProofBytes, ) -> Result<(), Error> { // Fetch the local client state (IBC client running on the host chain). - let client_state = ctx.client_state(connection_end.client_id())?; + let client_state = ctx + .client_state(connection_end.client_id()) + .map_err(|e| Error::ics02_client(e))?; if client_state.is_frozen() { return Err(Error::frozen_client(connection_end.client_id().clone())); } - let consensus_state = ctx.client_consensus_state(connection_end.client_id(), proof_height)?; + let consensus_state = ctx + .consensus_state(connection_end.client_id(), proof_height) + .map_err(|e| Error::consensus_state_verification_failure(proof_height, e))?; - let client_def = AnyClient::::from_client_type(client_state.client_type()); + let client_def = AnyClient::::from_client_type(client_state.client_type()); client_def .verify_client_full_state( @@ -146,25 +153,31 @@ pub fn verify_client_proof( }) } -pub fn verify_consensus_proof( - ctx: &dyn ConnectionReader, +pub fn verify_consensus_proof( + ctx: &dyn LightClientContext, height: Height, connection_end: &ConnectionEnd, proof: &ConsensusProof, ) -> Result<(), Error> { // Fetch the client state (IBC client on the local chain). - let client_state = ctx.client_state(connection_end.client_id())?; + let client_state = ctx + .client_state(connection_end.client_id()) + .map_err(|e| Error::ics02_client(e))?; if client_state.is_frozen() { return Err(Error::frozen_client(connection_end.client_id().clone())); } // Fetch the expected consensus state from the historical (local) header data. - let expected_consensus = ctx.host_consensus_state(proof.height())?; + let expected_consensus = ctx + .host_consensus_state(proof.height()) + .map_err(|e| Error::consensus_state_verification_failure(proof.height(), e))?; - let consensus_state = ctx.client_consensus_state(connection_end.client_id(), height)?; + let consensus_state = ctx + .consensus_state(connection_end.client_id(), height) + .map_err(|e| Error::consensus_state_verification_failure(height, e))?; - let client = AnyClient::::from_client_type(client_state.client_type()); + let client = AnyClient::::from_client_type(client_state.client_type()); client .verify_client_consensus_state( @@ -184,14 +197,14 @@ pub fn verify_consensus_proof( /// Checks that `claimed_height` is within normal bounds, i.e., fresh enough so that the chain has /// not pruned it yet, but not newer than the current (actual) height of the local chain. pub fn check_client_consensus_height( - ctx: &dyn ConnectionReader, + ctx: &dyn LightClientContext, claimed_height: Height, ) -> Result<(), Error> { - if claimed_height > ctx.host_current_height() { + if claimed_height > ctx.host_height() { // Fail if the consensus height is too advanced. return Err(Error::invalid_consensus_height( claimed_height, - ctx.host_current_height(), + ctx.host_height(), )); } diff --git a/modules/src/core/ics04_channel/context.rs b/modules/src/core/ics04_channel/context.rs index 2f42208af3..13f2df1ffc 100644 --- a/modules/src/core/ics04_channel/context.rs +++ b/modules/src/core/ics04_channel/context.rs @@ -4,9 +4,6 @@ use core::time::Duration; use num_traits::float::FloatCore; -use crate::core::ics02_client::client_consensus::AnyConsensusState; -use crate::core::ics02_client::client_state::AnyClientState; -use crate::core::ics03_connection::connection::ConnectionEnd; use crate::core::ics04_channel::channel::ChannelEnd; use crate::core::ics04_channel::commitment::{AcknowledgementCommitment, PacketCommitment}; use crate::core::ics04_channel::handler::recv_packet::RecvPacketResult; @@ -25,21 +22,8 @@ pub trait ChannelReader { /// Returns the ChannelEnd for the given `port_id` and `chan_id`. fn channel_end(&self, port_channel_id: &(PortId, ChannelId)) -> Result; - /// Returns the ConnectionState for the given identifier `connection_id`. - fn connection_end(&self, connection_id: &ConnectionId) -> Result; - fn connection_channels(&self, cid: &ConnectionId) -> Result, Error>; - /// Returns the ClientState for the given identifier `client_id`. Necessary dependency towards - /// proof verification. - fn client_state(&self, client_id: &ClientId) -> Result; - - fn client_consensus_state( - &self, - client_id: &ClientId, - height: Height, - ) -> Result; - fn get_next_sequence_send( &self, port_channel_id: &(PortId, ChannelId), @@ -90,23 +74,6 @@ pub trait ChannelReader { /// A hashing function for packet commitments fn hash(&self, value: Vec) -> Vec; - /// Returns the current height of the local chain. - fn host_height(&self) -> Height; - - /// Returns the current timestamp of the local chain. - fn host_timestamp(&self) -> Timestamp { - let pending_consensus_state = self - .pending_host_consensus_state() - .expect("host must have pending consensus state"); - pending_consensus_state.timestamp() - } - - /// Returns the `ConsensusState` of the host (local) chain at a specific height. - fn host_consensus_state(&self, height: Height) -> Result; - - /// Returns the pending `ConsensusState` of the host (local) chain. - fn pending_host_consensus_state(&self) -> Result; - /// Returns the time when the client state for the given [`ClientId`] was updated with a header for the given [`Height`] fn client_update_time(&self, client_id: &ClientId, height: Height) -> Result; diff --git a/modules/src/core/ics04_channel/handler.rs b/modules/src/core/ics04_channel/handler.rs index 49d0aa8506..752fae0091 100644 --- a/modules/src/core/ics04_channel/handler.rs +++ b/modules/src/core/ics04_channel/handler.rs @@ -1,14 +1,13 @@ //! This module implements the processing logic for ICS4 (channel) messages. -use crate::clients::ics11_beefy::client_def::BeefyTraits; +use crate::clients::crypto_ops::crypto::CryptoOps; use crate::core::ics04_channel::channel::ChannelEnd; -use crate::core::ics04_channel::context::ChannelReader; use crate::core::ics04_channel::error::Error; use crate::core::ics04_channel::msgs::ChannelMsg; use crate::core::ics04_channel::{msgs::PacketMsg, packet::PacketResult}; use crate::core::ics24_host::identifier::{ChannelId, PortId}; use crate::core::ics26_routing::context::{ - Ics26Context, ModuleId, ModuleOutputBuilder, OnRecvPacketAck, Router, + Ics26Context, LightClientContext, ModuleId, ModuleOutput, OnRecvPacketAck, Router, }; use crate::handler::{HandlerOutput, HandlerOutputBuilder}; @@ -59,21 +58,21 @@ where /// General entry point for processing any type of message related to the ICS4 channel open and /// channel close handshake protocols. -pub fn channel_dispatch( +pub fn channel_dispatch( ctx: &Ctx, msg: &ChannelMsg, ) -> Result<(HandlerOutputBuilder<()>, ChannelResult), Error> where - Ctx: ChannelReader, - Beefy: BeefyTraits, + Ctx: LightClientContext, + Crypto: CryptoOps, { let output = match msg { ChannelMsg::ChannelOpenInit(msg) => chan_open_init::process(ctx, msg), - ChannelMsg::ChannelOpenTry(msg) => chan_open_try::process::(ctx, msg), - ChannelMsg::ChannelOpenAck(msg) => chan_open_ack::process::(ctx, msg), - ChannelMsg::ChannelOpenConfirm(msg) => chan_open_confirm::process::(ctx, msg), + ChannelMsg::ChannelOpenTry(msg) => chan_open_try::process::(ctx, msg), + ChannelMsg::ChannelOpenAck(msg) => chan_open_ack::process::(ctx, msg), + ChannelMsg::ChannelOpenConfirm(msg) => chan_open_confirm::process::(ctx, msg), ChannelMsg::ChannelCloseInit(msg) => chan_close_init::process(ctx, msg), - ChannelMsg::ChannelCloseConfirm(msg) => chan_close_confirm::process::(ctx, msg), + ChannelMsg::ChannelCloseConfirm(msg) => chan_close_confirm::process::(ctx, msg), }?; let HandlerOutput { result, @@ -168,19 +167,19 @@ where } /// Dispatcher for processing any type of message related to the ICS4 packet protocols. -pub fn packet_dispatch( +pub fn packet_dispatch( ctx: &Ctx, msg: &PacketMsg, ) -> Result<(HandlerOutputBuilder<()>, PacketResult), Error> where - Ctx: ChannelReader, - Beefy: BeefyTraits, + Ctx: LightClientContext, + Crypto: CryptoOps, { let output = match msg { - PacketMsg::RecvPacket(msg) => recv_packet::process::(ctx, msg), - PacketMsg::AckPacket(msg) => acknowledgement::process::(ctx, msg), - PacketMsg::ToPacket(msg) => timeout::process::(ctx, msg), - PacketMsg::ToClosePacket(msg) => timeout_on_close::process::(ctx, msg), + PacketMsg::RecvPacket(msg) => recv_packet::process::(ctx, msg), + PacketMsg::AckPacket(msg) => acknowledgement::process::(ctx, msg), + PacketMsg::ToPacket(msg) => timeout::process::(ctx, msg), + PacketMsg::ToClosePacket(msg) => timeout_on_close::process::(ctx, msg), }?; let HandlerOutput { result, diff --git a/modules/src/core/ics04_channel/handler/acknowledgement.rs b/modules/src/core/ics04_channel/handler/acknowledgement.rs index 632c4159bd..032a73386b 100644 --- a/modules/src/core/ics04_channel/handler/acknowledgement.rs +++ b/modules/src/core/ics04_channel/handler/acknowledgement.rs @@ -1,13 +1,14 @@ -use crate::clients::ics11_beefy::client_def::BeefyTraits; +use crate::clients::crypto_ops::crypto::CryptoOps; use crate::core::ics03_connection::connection::State as ConnectionState; use crate::core::ics04_channel::channel::State; use crate::core::ics04_channel::channel::{Counterparty, Order}; +use crate::core::ics04_channel::error::Error; use crate::core::ics04_channel::events::AcknowledgePacket; use crate::core::ics04_channel::handler::verify::verify_packet_acknowledgement_proofs; use crate::core::ics04_channel::msgs::acknowledgement::MsgAcknowledgement; use crate::core::ics04_channel::packet::{PacketResult, Sequence}; -use crate::core::ics04_channel::{context::ChannelReader, error::Error}; use crate::core::ics24_host::identifier::{ChannelId, PortId}; +use crate::core::ics26_routing::context::LightClientContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; @@ -20,8 +21,8 @@ pub struct AckPacketResult { pub seq_number: Option, } -pub fn process( - ctx: &dyn ChannelReader, +pub fn process( + ctx: &dyn LightClientContext, msg: &MsgAcknowledgement, ) -> HandlerResult { let mut output = HandlerOutput::builder(); @@ -47,7 +48,9 @@ pub fn process( )); } - let connection_end = ctx.connection_end(&source_channel_end.connection_hops()[0])?; + let connection_end = ctx + .connection_end(&source_channel_end.connection_hops()[0]) + .map_err(|_| Error::connection_not_open(source_channel_end.connection_hops()[0].clone()))?; if !connection_end.state_matches(&ConnectionState::Open) { return Err(Error::connection_not_open( @@ -73,7 +76,7 @@ pub fn process( } // Verify the acknowledgement proof - verify_packet_acknowledgement_proofs::( + verify_packet_acknowledgement_proofs::( ctx, msg.proofs.height(), packet, diff --git a/modules/src/core/ics04_channel/handler/chan_close_confirm.rs b/modules/src/core/ics04_channel/handler/chan_close_confirm.rs index 87b71442eb..bad22ff279 100644 --- a/modules/src/core/ics04_channel/handler/chan_close_confirm.rs +++ b/modules/src/core/ics04_channel/handler/chan_close_confirm.rs @@ -1,19 +1,20 @@ //! Protocol logic specific to ICS4 messages of type `MsgChannelCloseConfirm`. -use crate::clients::ics11_beefy::client_def::BeefyTraits; + +use crate::clients::crypto_ops::crypto::CryptoOps; use crate::core::ics03_connection::connection::State as ConnectionState; use crate::core::ics04_channel::channel::{ChannelEnd, Counterparty, State}; -use crate::core::ics04_channel::context::ChannelReader; use crate::core::ics04_channel::error::Error; use crate::core::ics04_channel::events::Attributes; use crate::core::ics04_channel::handler::verify::verify_channel_proofs; use crate::core::ics04_channel::handler::{ChannelIdState, ChannelResult}; use crate::core::ics04_channel::msgs::chan_close_confirm::MsgChannelCloseConfirm; +use crate::core::ics26_routing::context::LightClientContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; -pub(crate) fn process( - ctx: &dyn ChannelReader, +pub(crate) fn process( + ctx: &dyn LightClientContext, msg: &MsgChannelCloseConfirm, ) -> HandlerResult { let mut output = HandlerOutput::builder(); @@ -34,7 +35,9 @@ pub(crate) fn process( )); } - let conn = ctx.connection_end(&channel_end.connection_hops()[0])?; + let conn = ctx + .connection_end(&channel_end.connection_hops()[0]) + .map_err(|_| Error::connection_not_open(channel_end.connection_hops()[0].clone()))?; if !conn.state_matches(&ConnectionState::Open) { return Err(Error::connection_not_open( @@ -62,7 +65,7 @@ pub(crate) fn process( channel_end.version().clone(), ); - verify_channel_proofs::( + verify_channel_proofs::( ctx, msg.proofs.height(), &channel_end, @@ -99,7 +102,6 @@ pub(crate) fn process( #[cfg(test)] mod tests { - use crate::core::ics04_channel::context::ChannelReader; use crate::core::ics04_channel::msgs::chan_close_confirm::test_util::get_dummy_raw_msg_chan_close_confirm; use crate::core::ics04_channel::msgs::chan_close_confirm::MsgChannelCloseConfirm; use crate::core::ics04_channel::msgs::ChannelMsg; diff --git a/modules/src/core/ics04_channel/handler/chan_close_init.rs b/modules/src/core/ics04_channel/handler/chan_close_init.rs index 3e503a7753..07a34e9eb7 100644 --- a/modules/src/core/ics04_channel/handler/chan_close_init.rs +++ b/modules/src/core/ics04_channel/handler/chan_close_init.rs @@ -1,16 +1,16 @@ //! Protocol logic specific to ICS4 messages of type `MsgChannelCloseInit`. use crate::core::ics03_connection::connection::State as ConnectionState; use crate::core::ics04_channel::channel::State; -use crate::core::ics04_channel::context::ChannelReader; use crate::core::ics04_channel::error::Error; use crate::core::ics04_channel::events::Attributes; use crate::core::ics04_channel::handler::{ChannelIdState, ChannelResult}; use crate::core::ics04_channel::msgs::chan_close_init::MsgChannelCloseInit; +use crate::core::ics26_routing::context::LightClientContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; pub(crate) fn process( - ctx: &dyn ChannelReader, + ctx: &dyn LightClientContext, msg: &MsgChannelCloseInit, ) -> HandlerResult { let mut output = HandlerOutput::builder(); @@ -34,7 +34,9 @@ pub(crate) fn process( )); } - let conn = ctx.connection_end(&channel_end.connection_hops()[0])?; + let conn = ctx + .connection_end(&channel_end.connection_hops()[0]) + .map_err(|_| Error::connection_not_open(channel_end.connection_hops()[0].clone()))?; if !conn.state_matches(&ConnectionState::Open) { return Err(Error::connection_not_open( @@ -70,7 +72,6 @@ pub(crate) fn process( #[cfg(test)] mod tests { - use crate::core::ics04_channel::context::ChannelReader; use crate::core::ics04_channel::msgs::chan_close_init::test_util::get_dummy_raw_msg_chan_close_init; use crate::core::ics04_channel::msgs::chan_close_init::MsgChannelCloseInit; use crate::core::ics04_channel::msgs::ChannelMsg; diff --git a/modules/src/core/ics04_channel/handler/chan_open_ack.rs b/modules/src/core/ics04_channel/handler/chan_open_ack.rs index 38654bdfe3..690d3cee20 100644 --- a/modules/src/core/ics04_channel/handler/chan_open_ack.rs +++ b/modules/src/core/ics04_channel/handler/chan_open_ack.rs @@ -1,19 +1,19 @@ //! Protocol logic specific to ICS4 messages of type `MsgChannelOpenAck`. -use crate::clients::ics11_beefy::client_def::BeefyTraits; +use crate::clients::crypto_ops::crypto::CryptoOps; use crate::core::ics03_connection::connection::State as ConnectionState; use crate::core::ics04_channel::channel::{ChannelEnd, Counterparty, State}; -use crate::core::ics04_channel::context::ChannelReader; use crate::core::ics04_channel::error::Error; use crate::core::ics04_channel::events::Attributes; use crate::core::ics04_channel::handler::verify::verify_channel_proofs; use crate::core::ics04_channel::handler::{ChannelIdState, ChannelResult}; use crate::core::ics04_channel::msgs::chan_open_ack::MsgChannelOpenAck; +use crate::core::ics26_routing::context::LightClientContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; -pub(crate) fn process( - ctx: &dyn ChannelReader, +pub(crate) fn process( + ctx: &dyn LightClientContext, msg: &MsgChannelOpenAck, ) -> HandlerResult { let mut output = HandlerOutput::builder(); @@ -38,7 +38,9 @@ pub(crate) fn process( )); } - let conn = ctx.connection_end(&channel_end.connection_hops()[0])?; + let conn = ctx + .connection_end(&channel_end.connection_hops()[0]) + .map_err(|_| Error::connection_not_open(channel_end.connection_hops()[0].clone()))?; if !conn.state_matches(&ConnectionState::Open) { return Err(Error::connection_not_open( @@ -70,7 +72,7 @@ pub(crate) fn process( channel_end.set_counterparty_channel_id(msg.counterparty_channel_id); //2. Verify proofs - verify_channel_proofs::( + verify_channel_proofs::( ctx, msg.proofs.height(), &channel_end, diff --git a/modules/src/core/ics04_channel/handler/chan_open_confirm.rs b/modules/src/core/ics04_channel/handler/chan_open_confirm.rs index 20751cdfc6..140425c6d2 100644 --- a/modules/src/core/ics04_channel/handler/chan_open_confirm.rs +++ b/modules/src/core/ics04_channel/handler/chan_open_confirm.rs @@ -1,19 +1,19 @@ //! Protocol logic specific to ICS4 messages of type `MsgChannelOpenConfirm`. -use crate::clients::ics11_beefy::client_def::BeefyTraits; +use crate::clients::crypto_ops::crypto::CryptoOps; use crate::core::ics03_connection::connection::State as ConnectionState; use crate::core::ics04_channel::channel::{ChannelEnd, Counterparty, State}; -use crate::core::ics04_channel::context::ChannelReader; use crate::core::ics04_channel::error::Error; use crate::core::ics04_channel::events::Attributes; use crate::core::ics04_channel::handler::verify::verify_channel_proofs; use crate::core::ics04_channel::handler::{ChannelIdState, ChannelResult}; use crate::core::ics04_channel::msgs::chan_open_confirm::MsgChannelOpenConfirm; +use crate::core::ics26_routing::context::LightClientContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; -pub(crate) fn process( - ctx: &dyn ChannelReader, +pub(crate) fn process( + ctx: &dyn LightClientContext, msg: &MsgChannelOpenConfirm, ) -> HandlerResult { let mut output = HandlerOutput::builder(); @@ -37,7 +37,9 @@ pub(crate) fn process( )); } - let conn = ctx.connection_end(&channel_end.connection_hops()[0])?; + let conn = ctx + .connection_end(&channel_end.connection_hops()[0]) + .map_err(|_| Error::connection_not_open(channel_end.connection_hops()[0].clone()))?; if !conn.state_matches(&ConnectionState::Open) { return Err(Error::connection_not_open( @@ -65,7 +67,7 @@ pub(crate) fn process( channel_end.version().clone(), ); //2. Verify proofs - verify_channel_proofs::( + verify_channel_proofs::( ctx, msg.proofs.height(), &channel_end, @@ -111,7 +113,6 @@ mod tests { use crate::core::ics03_connection::connection::ConnectionEnd; use crate::core::ics03_connection::connection::Counterparty as ConnectionCounterparty; use crate::core::ics03_connection::connection::State as ConnectionState; - use crate::core::ics03_connection::context::ConnectionReader; use crate::core::ics03_connection::msgs::test_util::get_dummy_raw_counterparty; use crate::core::ics03_connection::version::get_compatible_versions; use crate::core::ics04_channel::channel::{ChannelEnd, Counterparty, Order, State}; @@ -139,7 +140,7 @@ mod tests { let client_id = ClientId::new(ClientType::Mock, 24).unwrap(); let conn_id = ConnectionId::new(2); let context = MockContext::default(); - let client_consensus_state_height = context.host_current_height().revision_height; + let client_consensus_state_height = context.host_height().revision_height; // The connection underlying the channel we're trying to open. let conn_end = ConnectionEnd::new( diff --git a/modules/src/core/ics04_channel/handler/chan_open_init.rs b/modules/src/core/ics04_channel/handler/chan_open_init.rs index 9a7bff9ff7..f055a7b534 100644 --- a/modules/src/core/ics04_channel/handler/chan_open_init.rs +++ b/modules/src/core/ics04_channel/handler/chan_open_init.rs @@ -1,18 +1,18 @@ //! Protocol logic specific to ICS4 messages of type `MsgChannelOpenInit`. use crate::core::ics04_channel::channel::{ChannelEnd, State}; -use crate::core::ics04_channel::context::ChannelReader; use crate::core::ics04_channel::error::Error; use crate::core::ics04_channel::events::Attributes; use crate::core::ics04_channel::handler::{ChannelIdState, ChannelResult}; use crate::core::ics04_channel::msgs::chan_open_init::MsgChannelOpenInit; use crate::core::ics24_host::identifier::ChannelId; +use crate::core::ics26_routing::context::LightClientContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; pub(crate) fn process( - ctx: &dyn ChannelReader, + ctx: &dyn LightClientContext, msg: &MsgChannelOpenInit, ) -> HandlerResult { let mut output = HandlerOutput::builder(); @@ -25,7 +25,9 @@ pub(crate) fn process( } // An IBC connection running on the local (host) chain should exist. - let conn = ctx.connection_end(&msg.channel.connection_hops()[0])?; + let conn = ctx + .connection_end(&msg.channel.connection_hops()[0]) + .map_err(|_| Error::connection_not_open(msg.channel.connection_hops()[0].clone()))?; let get_versions = conn.versions(); let version = match get_versions { [version] => version, @@ -89,7 +91,6 @@ mod tests { use crate::core::ics03_connection::msgs::conn_open_init::MsgConnectionOpenInit; use crate::core::ics03_connection::version::get_compatible_versions; use crate::core::ics04_channel::channel::State; - use crate::core::ics04_channel::context::ChannelReader; use crate::core::ics04_channel::handler::channel_dispatch; use crate::core::ics04_channel::msgs::chan_open_init::test_util::get_dummy_raw_msg_chan_open_init; use crate::core::ics04_channel::msgs::chan_open_init::MsgChannelOpenInit; diff --git a/modules/src/core/ics04_channel/handler/chan_open_try.rs b/modules/src/core/ics04_channel/handler/chan_open_try.rs index c8ddd5a745..fc71016a30 100644 --- a/modules/src/core/ics04_channel/handler/chan_open_try.rs +++ b/modules/src/core/ics04_channel/handler/chan_open_try.rs @@ -1,21 +1,21 @@ //! Protocol logic specific to ICS4 messages of type `MsgChannelOpenTry`. -use crate::clients::ics11_beefy::client_def::BeefyTraits; +use crate::clients::crypto_ops::crypto::CryptoOps; use crate::core::ics03_connection::connection::State as ConnectionState; use crate::core::ics04_channel::channel::{ChannelEnd, Counterparty, State}; -use crate::core::ics04_channel::context::ChannelReader; use crate::core::ics04_channel::error::Error; use crate::core::ics04_channel::events::Attributes; use crate::core::ics04_channel::handler::verify::verify_channel_proofs; use crate::core::ics04_channel::handler::{ChannelIdState, ChannelResult}; use crate::core::ics04_channel::msgs::chan_open_try::MsgChannelOpenTry; use crate::core::ics24_host::identifier::ChannelId; +use crate::core::ics26_routing::context::LightClientContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; -pub(crate) fn process( - ctx: &dyn ChannelReader, +pub(crate) fn process( + ctx: &dyn LightClientContext, msg: &MsgChannelOpenTry, ) -> HandlerResult { let mut output = HandlerOutput::builder(); @@ -70,7 +70,9 @@ pub(crate) fn process( )); } - let conn = ctx.connection_end(&msg.channel.connection_hops()[0])?; + let conn = ctx + .connection_end(&msg.channel.connection_hops()[0]) + .map_err(|_| Error::connection_not_open(msg.channel.connection_hops()[0].clone()))?; if !conn.state_matches(&ConnectionState::Open) { return Err(Error::connection_not_open( msg.channel.connection_hops()[0].clone(), @@ -109,7 +111,7 @@ pub(crate) fn process( ); // 2. Actual proofs are verified now. - verify_channel_proofs::( + verify_channel_proofs::( ctx, msg.proofs.height(), &new_channel_end, diff --git a/modules/src/core/ics04_channel/handler/recv_packet.rs b/modules/src/core/ics04_channel/handler/recv_packet.rs index 340fb5a7e7..3a63611328 100644 --- a/modules/src/core/ics04_channel/handler/recv_packet.rs +++ b/modules/src/core/ics04_channel/handler/recv_packet.rs @@ -1,13 +1,13 @@ -use crate::clients::ics11_beefy::client_def::BeefyTraits; +use crate::clients::crypto_ops::crypto::CryptoOps; use crate::core::ics03_connection::connection::State as ConnectionState; use crate::core::ics04_channel::channel::{Counterparty, Order, State}; -use crate::core::ics04_channel::context::ChannelReader; use crate::core::ics04_channel::error::Error; use crate::core::ics04_channel::events::ReceivePacket; use crate::core::ics04_channel::handler::verify::verify_packet_recv_proofs; use crate::core::ics04_channel::msgs::recv_packet::MsgRecvPacket; use crate::core::ics04_channel::packet::{PacketResult, Receipt, Sequence}; use crate::core::ics24_host::identifier::{ChannelId, PortId}; +use crate::core::ics26_routing::context::LightClientContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::timestamp::Expiry; @@ -28,8 +28,8 @@ pub enum RecvPacketResult { NoOp, } -pub fn process( - ctx: &dyn ChannelReader, +pub fn process( + ctx: &dyn LightClientContext, msg: &MsgRecvPacket, ) -> HandlerResult { let mut output = HandlerOutput::builder(); @@ -55,7 +55,9 @@ pub fn process( )); } - let connection_end = ctx.connection_end(&dest_channel_end.connection_hops()[0])?; + let connection_end = ctx + .connection_end(&dest_channel_end.connection_hops()[0]) + .map_err(|_| Error::connection_not_open(dest_channel_end.connection_hops()[0].clone()))?; if !connection_end.state_matches(&ConnectionState::Open) { return Err(Error::connection_not_open( @@ -76,7 +78,7 @@ pub fn process( return Err(Error::low_packet_timestamp()); } - verify_packet_recv_proofs::( + verify_packet_recv_proofs::( ctx, msg.proofs.height(), packet, @@ -149,7 +151,6 @@ pub fn process( #[cfg(test)] mod tests { - use crate::core::ics04_channel::context::ChannelReader; use crate::prelude::*; use test_log::test; diff --git a/modules/src/core/ics04_channel/handler/send_packet.rs b/modules/src/core/ics04_channel/handler/send_packet.rs index 00e573aef2..4fdd48617a 100644 --- a/modules/src/core/ics04_channel/handler/send_packet.rs +++ b/modules/src/core/ics04_channel/handler/send_packet.rs @@ -4,8 +4,9 @@ use crate::core::ics04_channel::channel::State; use crate::core::ics04_channel::commitment::PacketCommitment; use crate::core::ics04_channel::events::SendPacket; use crate::core::ics04_channel::packet::{PacketResult, Sequence}; -use crate::core::ics04_channel::{context::ChannelReader, error::Error, packet::Packet}; +use crate::core::ics04_channel::{error::Error, packet::Packet}; use crate::core::ics24_host::identifier::{ChannelId, PortId}; +use crate::core::ics26_routing::context::LightClientContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; @@ -20,7 +21,10 @@ pub struct SendPacketResult { pub commitment: PacketCommitment, } -pub fn send_packet(ctx: &dyn ChannelReader, packet: Packet) -> HandlerResult { +pub fn send_packet( + ctx: &dyn LightClientContext, + packet: Packet, +) -> HandlerResult { let mut output = HandlerOutput::builder(); let source_channel_end = @@ -42,11 +46,15 @@ pub fn send_packet(ctx: &dyn ChannelReader, packet: Packet) -> HandlerResult HandlerResult, } -pub fn process( - ctx: &dyn ChannelReader, +pub fn process( + ctx: &dyn LightClientContext, msg: &MsgTimeout, ) -> HandlerResult { let mut output = HandlerOutput::builder(); @@ -49,7 +50,9 @@ pub fn process( )); } - let connection_end = ctx.connection_end(&source_channel_end.connection_hops()[0])?; + let connection_end = ctx + .connection_end(&source_channel_end.connection_hops()[0]) + .map_err(|_| Error::connection_not_open(source_channel_end.connection_hops()[0].clone()))?; let client_id = connection_end.client_id().clone(); @@ -64,7 +67,9 @@ pub fn process( )); } - let consensus_state = ctx.client_consensus_state(&client_id, proof_height)?; + let consensus_state = ctx + .consensus_state(&client_id, proof_height) + .map_err(|_| Error::error_invalid_consensus_state())?; let proof_timestamp = consensus_state.timestamp(); @@ -99,7 +104,7 @@ pub fn process( msg.next_sequence_recv, )); } - verify_next_sequence_recv::( + verify_next_sequence_recv::( ctx, msg.proofs.height(), &connection_end, @@ -116,7 +121,7 @@ pub fn process( channel: Some(source_channel_end), }) } else { - verify_packet_receipt_absence::( + verify_packet_receipt_absence::( ctx, msg.proofs.height(), &connection_end, diff --git a/modules/src/core/ics04_channel/handler/timeout_on_close.rs b/modules/src/core/ics04_channel/handler/timeout_on_close.rs index 4cc2063516..f1ff655be4 100644 --- a/modules/src/core/ics04_channel/handler/timeout_on_close.rs +++ b/modules/src/core/ics04_channel/handler/timeout_on_close.rs @@ -1,4 +1,4 @@ -use crate::clients::ics11_beefy::client_def::BeefyTraits; +use crate::clients::crypto_ops::crypto::CryptoOps; use crate::core::ics04_channel::channel::State; use crate::core::ics04_channel::channel::{ChannelEnd, Counterparty, Order}; use crate::core::ics04_channel::events::TimeoutOnClosePacket; @@ -8,15 +8,14 @@ use crate::core::ics04_channel::handler::verify::{ }; use crate::core::ics04_channel::msgs::timeout_on_close::MsgTimeoutOnClose; use crate::core::ics04_channel::packet::PacketResult; -use crate::core::ics04_channel::{ - context::ChannelReader, error::Error, handler::timeout::TimeoutPacketResult, -}; +use crate::core::ics04_channel::{error::Error, handler::timeout::TimeoutPacketResult}; +use crate::core::ics26_routing::context::LightClientContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; -pub fn process( - ctx: &dyn ChannelReader, +pub fn process( + ctx: &dyn LightClientContext, msg: &MsgTimeoutOnClose, ) -> HandlerResult { let mut output = HandlerOutput::builder(); @@ -38,7 +37,9 @@ pub fn process( )); } - let connection_end = ctx.connection_end(&source_channel_end.connection_hops()[0])?; + let connection_end = ctx + .connection_end(&source_channel_end.connection_hops()[0]) + .map_err(|_| Error::connection_not_open(source_channel_end.connection_hops()[0].clone()))?; //verify the packet was sent, check the store let packet_commitment = ctx.get_packet_commitment(&( @@ -74,7 +75,7 @@ pub fn process( source_channel_end.version().clone(), ); - verify_channel_proofs::( + verify_channel_proofs::( ctx, msg.proofs.height(), &source_channel_end, @@ -90,7 +91,7 @@ pub fn process( msg.next_sequence_recv, )); } - verify_next_sequence_recv::( + verify_next_sequence_recv::( ctx, msg.proofs.height(), &connection_end, @@ -106,7 +107,7 @@ pub fn process( channel: Some(source_channel_end), }) } else { - verify_packet_receipt_absence::( + verify_packet_receipt_absence::( ctx, msg.proofs.height(), &connection_end, diff --git a/modules/src/core/ics04_channel/handler/verify.rs b/modules/src/core/ics04_channel/handler/verify.rs index ee77fbd837..664f30928c 100644 --- a/modules/src/core/ics04_channel/handler/verify.rs +++ b/modules/src/core/ics04_channel/handler/verify.rs @@ -1,20 +1,20 @@ -use crate::clients::ics11_beefy::client_def::BeefyTraits; +use crate::clients::crypto_ops::crypto::CryptoOps; use crate::core::ics02_client::client_consensus::ConsensusState; use crate::core::ics02_client::client_state::ClientState; use crate::core::ics02_client::{client_def::AnyClient, client_def::ClientDef}; use crate::core::ics03_connection::connection::ConnectionEnd; use crate::core::ics04_channel::channel::ChannelEnd; -use crate::core::ics04_channel::context::ChannelReader; use crate::core::ics04_channel::error::Error; use crate::core::ics04_channel::msgs::acknowledgement::Acknowledgement; use crate::core::ics04_channel::packet::{Packet, Sequence}; +use crate::core::ics26_routing::context::LightClientContext; use crate::prelude::*; use crate::proofs::Proofs; use crate::Height; /// Entry point for verifying all proofs bundled in any ICS4 message for channel protocols. -pub fn verify_channel_proofs( - ctx: &dyn ChannelReader, +pub fn verify_channel_proofs( + ctx: &dyn LightClientContext, height: Height, channel_end: &ChannelEnd, connection_end: &ConnectionEnd, @@ -24,16 +24,20 @@ pub fn verify_channel_proofs( // This is the client which will perform proof verification. let client_id = connection_end.client_id().clone(); - let client_state = ctx.client_state(&client_id)?; + let client_state = ctx + .client_state(&client_id) + .map_err(|_| Error::implementation_specific())?; // The client must not be frozen. if client_state.is_frozen() { return Err(Error::frozen_client(client_id)); } - let consensus_state = ctx.client_consensus_state(&client_id, proofs.height())?; + let consensus_state = ctx + .consensus_state(&client_id, proofs.height()) + .map_err(|_| Error::error_invalid_consensus_state())?; - let client_def = AnyClient::::from_client_type(client_state.client_type()); + let client_def = AnyClient::::from_client_type(client_state.client_type()); // Verify the proof for the channel state against the expected channel end. // A counterparty channel id of None in not possible, and is checked by validate_basic in msg. @@ -54,24 +58,28 @@ pub fn verify_channel_proofs( } /// Entry point for verifying all proofs bundled in a ICS4 packet recv. message. -pub fn verify_packet_recv_proofs( - ctx: &dyn ChannelReader, +pub fn verify_packet_recv_proofs( + ctx: &dyn LightClientContext, height: Height, packet: &Packet, connection_end: &ConnectionEnd, proofs: &Proofs, ) -> Result<(), Error> { let client_id = connection_end.client_id(); - let client_state = ctx.client_state(client_id)?; + let client_state = ctx + .client_state(client_id) + .map_err(|_| Error::implementation_specific())?; // The client must not be frozen. if client_state.is_frozen() { return Err(Error::frozen_client(client_id.clone())); } - let consensus_state = ctx.client_consensus_state(client_id, proofs.height())?; + let consensus_state = ctx + .consensus_state(client_id, proofs.height()) + .map_err(|_| Error::error_invalid_consensus_state())?; - let client_def = AnyClient::::from_client_type(client_state.client_type()); + let client_def = AnyClient::::from_client_type(client_state.client_type()); let commitment = ctx.packet_commitment( packet.data.clone(), @@ -100,8 +108,8 @@ pub fn verify_packet_recv_proofs( } /// Entry point for verifying all proofs bundled in an ICS4 packet ack message. -pub fn verify_packet_acknowledgement_proofs( - ctx: &dyn ChannelReader, +pub fn verify_packet_acknowledgement_proofs( + ctx: &dyn LightClientContext, height: Height, packet: &Packet, acknowledgement: Acknowledgement, @@ -109,18 +117,22 @@ pub fn verify_packet_acknowledgement_proofs( proofs: &Proofs, ) -> Result<(), Error> { let client_id = connection_end.client_id(); - let client_state = ctx.client_state(client_id)?; + let client_state = ctx + .client_state(client_id) + .map_err(|_| Error::implementation_specific())?; // The client must not be frozen. if client_state.is_frozen() { return Err(Error::frozen_client(client_id.clone())); } - let consensus_state = ctx.client_consensus_state(client_id, proofs.height())?; + let consensus_state = ctx + .consensus_state(client_id, proofs.height()) + .map_err(|_| Error::error_invalid_consensus_state())?; let ack_commitment = ctx.ack_commitment(acknowledgement); - let client_def = AnyClient::::from_client_type(client_state.client_type()); + let client_def = AnyClient::::from_client_type(client_state.client_type()); // Verify the proof for the packet against the chain store. client_def @@ -143,8 +155,8 @@ pub fn verify_packet_acknowledgement_proofs( } /// Entry point for verifying all timeout proofs. -pub fn verify_next_sequence_recv( - ctx: &dyn ChannelReader, +pub fn verify_next_sequence_recv( + ctx: &dyn LightClientContext, height: Height, connection_end: &ConnectionEnd, packet: Packet, @@ -152,16 +164,20 @@ pub fn verify_next_sequence_recv( proofs: &Proofs, ) -> Result<(), Error> { let client_id = connection_end.client_id(); - let client_state = ctx.client_state(client_id)?; + let client_state = ctx + .client_state(client_id) + .map_err(|_| Error::implementation_specific())?; // The client must not be frozen. if client_state.is_frozen() { return Err(Error::frozen_client(client_id.clone())); } - let consensus_state = ctx.client_consensus_state(client_id, proofs.height())?; + let consensus_state = ctx + .consensus_state(client_id, proofs.height()) + .map_err(|_| Error::error_invalid_consensus_state())?; - let client_def = AnyClient::::from_client_type(client_state.client_type()); + let client_def = AnyClient::::from_client_type(client_state.client_type()); // Verify the proof for the packet against the chain store. client_def @@ -182,24 +198,28 @@ pub fn verify_next_sequence_recv( Ok(()) } -pub fn verify_packet_receipt_absence( - ctx: &dyn ChannelReader, +pub fn verify_packet_receipt_absence( + ctx: &dyn LightClientContext, height: Height, connection_end: &ConnectionEnd, packet: Packet, proofs: &Proofs, ) -> Result<(), Error> { let client_id = connection_end.client_id(); - let client_state = ctx.client_state(client_id)?; + let client_state = ctx + .client_state(client_id) + .map_err(|_| Error::implementation_specific())?; // The client must not be frozen. if client_state.is_frozen() { return Err(Error::frozen_client(client_id.clone())); } - let consensus_state = ctx.client_consensus_state(client_id, proofs.height())?; + let consensus_state = ctx + .consensus_state(client_id, proofs.height()) + .map_err(|_| Error::error_invalid_consensus_state())?; - let client_def = AnyClient::::from_client_type(client_state.client_type()); + let client_def = AnyClient::::from_client_type(client_state.client_type()); // Verify the proof for the packet against the chain store. client_def diff --git a/modules/src/core/ics04_channel/handler/write_acknowledgement.rs b/modules/src/core/ics04_channel/handler/write_acknowledgement.rs index 3bf30c57c0..79e1bad6c8 100644 --- a/modules/src/core/ics04_channel/handler/write_acknowledgement.rs +++ b/modules/src/core/ics04_channel/handler/write_acknowledgement.rs @@ -1,9 +1,10 @@ use crate::core::ics04_channel::channel::State; use crate::core::ics04_channel::commitment::AcknowledgementCommitment; +use crate::core::ics04_channel::error::Error; use crate::core::ics04_channel::events::WriteAcknowledgement; use crate::core::ics04_channel::packet::{Packet, PacketResult, Sequence}; -use crate::core::ics04_channel::{context::ChannelReader, error::Error}; use crate::core::ics24_host::identifier::{ChannelId, PortId}; +use crate::core::ics26_routing::context::LightClientContext; use crate::prelude::*; use crate::{ events::IbcEvent, @@ -19,7 +20,7 @@ pub struct WriteAckPacketResult { } pub fn process( - ctx: &dyn ChannelReader, + ctx: &dyn LightClientContext, packet: Packet, ack: Vec, ) -> HandlerResult { @@ -73,7 +74,6 @@ pub fn process( #[cfg(test)] mod tests { - use crate::core::ics04_channel::context::ChannelReader; use crate::prelude::*; use test_log::test; diff --git a/modules/src/core/ics05_port/error.rs b/modules/src/core/ics05_port/error.rs index 4c49d3d800..e06ffc9081 100644 --- a/modules/src/core/ics05_port/error.rs +++ b/modules/src/core/ics05_port/error.rs @@ -2,7 +2,7 @@ use crate::core::ics24_host::identifier::PortId; use flex_error::define_error; define_error! { - #[derive(Debug, PartialEq, Eq)] + #[derive(Debug, PartialEq, Eq, derive_more::From)] Error { UnknownPort { port_id: PortId } diff --git a/modules/src/core/ics26_routing/context.rs b/modules/src/core/ics26_routing/context.rs index c22cc0a14c..aac61fb214 100644 --- a/modules/src/core/ics26_routing/context.rs +++ b/modules/src/core/ics26_routing/context.rs @@ -21,6 +21,9 @@ use crate::events::ModuleEvent; use crate::handler::HandlerOutputBuilder; use crate::signer::Signer; +/// This trait captures all the functional dependencies of needed in light client implementations +pub trait LightClientContext: ClientReader + ConnectionReader + ChannelReader {} + /// This trait captures all the functional dependencies (i.e., context) which the ICS26 module /// requires to be able to dispatch and process IBC messages. In other words, this is the /// representation of a chain from the perspective of the IBC module of that chain. @@ -32,6 +35,8 @@ pub trait Ics26Context: + ChannelKeeper + ChannelReader + PortReader + + Ics20Context + + LightClientContext { type Router: Router; diff --git a/modules/src/core/ics26_routing/handler.rs b/modules/src/core/ics26_routing/handler.rs index 16c5bb4031..b1f5186c1e 100644 --- a/modules/src/core/ics26_routing/handler.rs +++ b/modules/src/core/ics26_routing/handler.rs @@ -1,9 +1,9 @@ +use crate::clients::crypto_ops::crypto::CryptoOps; use crate::prelude::*; use ibc_proto::google::protobuf::Any; use crate::applications::ics20_fungible_token_transfer::relay_application_logic::send_transfer::send_transfer as ics20_msg_dispatcher; -use crate::clients::ics11_beefy::client_def::BeefyTraits; use crate::core::ics02_client::handler::dispatch as ics2_msg_dispatcher; use crate::core::ics03_connection::handler::dispatch as ics3_msg_dispatcher; use crate::core::ics04_channel::handler::{ @@ -32,19 +32,19 @@ pub struct MsgReceipt { /// Mimics the DeliverTx ABCI interface, but for a single message and at a slightly lower level. /// No need for authentication info or signature checks here. /// Returns a vector of all events that got generated as a byproduct of processing `message`. -pub fn deliver( +pub fn deliver( ctx: &mut Ctx, message: Any, ) -> Result<(Vec, Vec), Error> where Ctx: Ics26Context, - Beefy: BeefyTraits, + Crypto: CryptoOps, { // Decode the proto message into a domain message, creating an ICS26 envelope. let envelope = decode(message)?; // Process the envelope, and accumulate any events that were generated. - let output = dispatch::<_, Beefy>(ctx, envelope)?; + let output = dispatch::<_, Crypto>(ctx, envelope)?; Ok(MsgReceipt { events, log }) } @@ -59,15 +59,15 @@ pub fn decode(message: Any) -> Result { /// and events produced after processing the input `msg`. /// If this method returns an error, the runtime is expected to rollback all state modifications to /// the `Ctx` caused by all messages from the transaction that this `msg` is a part of. -pub fn dispatch(ctx: &mut Ctx, msg: Ics26Envelope) -> Result, Error> +pub fn dispatch(ctx: &mut Ctx, msg: Ics26Envelope) -> Result, Error> where Ctx: Ics26Context, - Beefy: BeefyTraits, + Crypto: CryptoOps, { let output = match msg { Ics2Msg(msg) => { let handler_output = - ics2_msg_dispatcher::<_, Beefy>(ctx, msg).map_err(Error::ics02_client)?; + ics2_msg_dispatcher::<_, Crypto>(ctx, msg).map_err(Error::ics02_client)?; // Apply the result to the context (host chain store). ctx.store_client_result(handler_output.result) @@ -81,7 +81,7 @@ where Ics3Msg(msg) => { let handler_output = - ics3_msg_dispatcher::<_, Beefy>(ctx, msg).map_err(Error::ics03_connection)?; + ics3_msg_dispatcher::<_, Crypto>(ctx, msg).map_err(Error::ics03_connection)?; // Apply any results to the host chain store. ctx.store_connection_result(handler_output.result) @@ -96,7 +96,7 @@ where Ics4ChannelMsg(msg) => { let module_id = ics4_validate(ctx, &msg).map_err(Error::ics04_channel)?; let (mut handler_builder, channel_result) = - ics4_msg_dispatcher::<_, Beefy>(ctx, &msg).map_err(Error::ics04_channel)?; + ics4_msg_dispatcher::<_, Crypto>(ctx, &msg).map_err(Error::ics04_channel)?; let mut module_output = ModuleOutputBuilder::new(); let cb_result = @@ -114,7 +114,7 @@ where Ics4PacketMsg(msg) => { let module_id = get_module_for_packet_msg(ctx, &msg).map_err(Error::ics04_channel)?; let (mut handler_builder, packet_result) = - ics4_packet_msg_dispatcher::<_, Beefy>(ctx, &msg).map_err(Error::ics04_channel)?; + ics4_packet_msg_dispatcher::<_, Crypto>(ctx, &msg).map_err(Error::ics04_channel)?; if matches!(packet_result, PacketResult::Recv(RecvPacketResult::NoOp)) { return Ok(handler_builder.with_result(())); diff --git a/modules/src/mock/context.rs b/modules/src/mock/context.rs index d191d428b2..12a117e2e1 100644 --- a/modules/src/mock/context.rs +++ b/modules/src/mock/context.rs @@ -1,9 +1,11 @@ //! Implementation of a global context mock. Used in testing handlers of all IBC modules. +use crate::clients::crypto_ops::crypto::CryptoOps; use crate::prelude::*; use alloc::collections::btree_map::BTreeMap; use alloc::sync::Arc; +use beefy_client::traits::HostFunctions; use core::borrow::Borrow; use core::cmp::min; use core::fmt::Debug; @@ -16,6 +18,8 @@ use sha2::Digest; use tracing::debug; use crate::clients::ics07_tendermint::client_state::test_util::get_dummy_tendermint_client_state; +use crate::clients::ics11_beefy::client_state::test_util::get_dummy_beefy_state; +use crate::clients::ics11_beefy::consensus_state::test_util::get_dummy_beefy_consensus_state; use crate::core::ics02_client::client_consensus::{AnyConsensusState, AnyConsensusStateWithHeight}; use crate::core::ics02_client::client_state::AnyClientState; use crate::core::ics02_client::client_type::ClientType; @@ -35,7 +39,9 @@ use crate::core::ics05_port::error::Error as Ics05Error; use crate::core::ics05_port::error::Error; use crate::core::ics23_commitment::commitment::CommitmentPrefix; use crate::core::ics24_host::identifier::{ChainId, ChannelId, ClientId, ConnectionId, PortId}; -use crate::core::ics26_routing::context::{Ics26Context, Module, ModuleId, Router, RouterBuilder}; +use crate::core::ics26_routing::context::{ + Ics26Context, LightClientContext, Module, ModuleId, Router, RouterBuilder, +}; use crate::core::ics26_routing::handler::{deliver, dispatch, MsgReceipt}; use crate::core::ics26_routing::msgs::Ics26Envelope; use crate::events::IbcEvent; @@ -198,6 +204,10 @@ impl MockContext { Some(MockClientState::new(MockHeader::new(client_state_height)).into()), MockConsensusState::new(MockHeader::new(cs_height)).into(), ), + ClientType::Beefy => ( + Some(get_dummy_beefy_state()), + get_dummy_beefy_consensus_state(), + ), // If it's a Tendermint client, we need TM states. ClientType::Tendermint => { let light_block = HostBlock::generate_tm_block( @@ -250,6 +260,11 @@ impl MockContext { Some(MockClientState::new(MockHeader::new(client_state_height)).into()), MockConsensusState::new(MockHeader::new(cs_height)).into(), ), + + ClientType::Beefy => ( + Some(get_dummy_beefy_state()), + get_dummy_beefy_consensus_state(), + ), // If it's a Tendermint client, we need TM states. ClientType::Tendermint => { let light_block = HostBlock::generate_tm_block( @@ -271,6 +286,7 @@ impl MockContext { // If it's a mock client, create the corresponding mock states. ClientType::Mock => MockConsensusState::new(MockHeader::new(prev_cs_height)).into(), // If it's a Tendermint client, we need TM states. + ClientType::Beefy => get_dummy_beefy_consensus_state(), ClientType::Tendermint => { let light_block = HostBlock::generate_tm_block( self.host_chain_id.clone(), @@ -444,7 +460,7 @@ impl MockContext { /// Alternative method to `Ics18Context::send` that does not exercise any serialization. /// Used in testing the Ics18 algorithms, hence this may return a Ics18Error. pub fn deliver(&mut self, msg: Ics26Envelope) -> Result<(), Ics18Error> { - dispatch(self, msg).map_err(Ics18Error::transaction_failed)?; + dispatch::<_, Self>(self, msg).map_err(Ics18Error::transaction_failed)?; // Create a new block. self.advance_host_chain_height(); Ok(()) @@ -625,6 +641,40 @@ impl Router for MockRouter { } } +impl HostFunctions for MockContext { + fn keccak_256(input: &[u8]) -> [u8; 32] { + todo!() + } + + fn secp256k1_ecdsa_recover_compressed( + signature: &[u8; 65], + value: &[u8; 32], + ) -> Option> { + todo!() + } +} + +impl LightClientContext for MockContext {} + +impl CryptoOps for MockContext { + fn verify_membership_trie_proof( + root: &sp_core::H256, + proof: &Vec>, + key: &[u8], + value: &[u8], + ) -> Result<(), Ics02Error> { + todo!() + } + + fn verify_non_membership_trie_proof( + root: &sp_core::H256, + proof: &Vec>, + key: &[u8], + ) -> Result<(), Ics02Error> { + todo!() + } +} + impl Ics26Context for MockContext { type Router = MockRouter; @@ -654,10 +704,6 @@ impl ChannelReader for MockContext { } } - fn connection_end(&self, cid: &ConnectionId) -> Result { - ConnectionReader::connection_end(self, cid).map_err(Ics04Error::ics03_connection) - } - fn connection_channels( &self, cid: &ConnectionId, @@ -668,20 +714,6 @@ impl ChannelReader for MockContext { } } - fn client_state(&self, client_id: &ClientId) -> Result { - ClientReader::client_state(self, client_id) - .map_err(|e| Ics04Error::ics03_connection(Ics03Error::ics02_client(e))) - } - - fn client_consensus_state( - &self, - client_id: &ClientId, - height: Height, - ) -> Result { - ClientReader::consensus_state(self, client_id, height) - .map_err(|e| Ics04Error::ics03_connection(Ics03Error::ics02_client(e))) - } - fn get_next_sequence_send( &self, port_channel_id: &(PortId, ChannelId), @@ -770,23 +802,6 @@ impl ChannelReader for MockContext { sha2::Sha256::digest(value).to_vec() } - fn host_height(&self) -> Height { - self.latest_height() - } - - fn host_timestamp(&self) -> Timestamp { - ClientReader::host_timestamp(self) - } - - fn host_consensus_state(&self, height: Height) -> Result { - ConnectionReader::host_consensus_state(self, height).map_err(Ics04Error::ics03_connection) - } - - fn pending_host_consensus_state(&self) -> Result { - ClientReader::pending_host_consensus_state(self) - .map_err(|e| Ics04Error::ics03_connection(Ics03Error::ics02_client(e))) - } - fn client_update_time( &self, client_id: &ClientId, @@ -980,15 +995,6 @@ impl ConnectionReader for MockContext { } } - fn client_state(&self, client_id: &ClientId) -> Result { - // Forward method call to the Ics2 Client-specific method. - ClientReader::client_state(self, client_id).map_err(Ics03Error::ics02_client) - } - - fn host_current_height(&self) -> Height { - self.latest_height() - } - fn host_oldest_height(&self) -> Height { // history must be non-empty, so `self.history[0]` is valid self.history[0].height() @@ -998,20 +1004,6 @@ impl ConnectionReader for MockContext { CommitmentPrefix::try_from(b"mock".to_vec()).unwrap() } - fn client_consensus_state( - &self, - client_id: &ClientId, - height: Height, - ) -> Result { - // Forward method call to the Ics2Client-specific method. - self.consensus_state(client_id, height) - .map_err(Ics03Error::ics02_client) - } - - fn host_consensus_state(&self, height: Height) -> Result { - ClientReader::host_consensus_state(self, height).map_err(Ics03Error::ics02_client) - } - fn connection_counter(&self) -> Result { Ok(self.ibc_store.lock().unwrap().connection_ids_counter) } @@ -1272,7 +1264,7 @@ impl ClientKeeper for MockContext { impl Ics18Context for MockContext { fn query_latest_height(&self) -> Height { - self.host_current_height() + self.host_height() } fn query_client_full_state(&self, client_id: &ClientId) -> Option { @@ -1281,7 +1273,7 @@ impl Ics18Context for MockContext { } fn query_latest_header(&self) -> Option { - let block_ref = self.host_block(self.host_current_height()); + let block_ref = self.host_block(self.host_height()); block_ref.cloned().map(Into::into) } @@ -1289,8 +1281,8 @@ impl Ics18Context for MockContext { // Forward call to Ics26 delivery method. let mut all_events = vec![]; for msg in msgs { - let MsgReceipt { mut events, .. } = - deliver(self, msg).map_err(Ics18Error::transaction_failed)?; + let (mut events, _) = + deliver::<_, Self>(self, msg).map_err(Ics18Error::transaction_failed)?; all_events.append(&mut events); } self.advance_host_chain_height(); // Advance chain height From 5e03e10556027f7c8501e8bc4c89e7ce8ba7f844 Mon Sep 17 00:00:00 2001 From: David Salami Date: Fri, 13 May 2022 17:25:54 +0100 Subject: [PATCH 12/96] fix tests --- modules/Cargo.toml | 12 +- modules/src/clients/ics11_beefy/error.rs | 2 + .../ics02_client/handler/create_client.rs | 48 +-- .../ics02_client/handler/update_client.rs | 19 +- .../ics02_client/handler/upgrade_client.rs | 8 +- .../core/ics02_client/msgs/upgrade_client.rs | 8 +- .../ics03_connection/handler/conn_open_ack.rs | 5 +- .../handler/conn_open_confirm.rs | 4 +- .../handler/conn_open_init.rs | 4 +- .../ics03_connection/handler/conn_open_try.rs | 5 +- modules/src/core/ics04_channel/error.rs | 4 + .../ics04_channel/handler/acknowledgement.rs | 6 +- .../handler/chan_close_confirm.rs | 6 +- .../ics04_channel/handler/chan_close_init.rs | 11 +- .../ics04_channel/handler/chan_open_ack.rs | 7 +- .../handler/chan_open_confirm.rs | 7 +- .../ics04_channel/handler/chan_open_init.rs | 6 +- .../ics04_channel/handler/chan_open_try.rs | 7 +- .../core/ics04_channel/handler/recv_packet.rs | 6 +- .../core/ics04_channel/handler/send_packet.rs | 3 +- .../src/core/ics04_channel/handler/timeout.rs | 6 +- .../ics04_channel/handler/timeout_on_close.rs | 6 +- .../src/core/ics04_channel/handler/verify.rs | 10 +- .../handler/write_acknowledgement.rs | 1 + modules/src/core/ics26_routing/handler.rs | 38 +- modules/src/mock/client_def.rs | 26 +- modules/src/mock/context.rs | 40 +-- modules/src/relayer/ics18_relayer/utils.rs | 1 + modules/src/test_utils.rs | 339 +++--------------- modules/tests/runner/mod.rs | 4 +- 30 files changed, 197 insertions(+), 452 deletions(-) diff --git a/modules/Cargo.toml b/modules/Cargo.toml index d6264705a4..f2d1b9de0e 100644 --- a/modules/Cargo.toml +++ b/modules/Cargo.toml @@ -35,7 +35,7 @@ clock = ["tendermint/clock", "time/std"] # This feature grants access to development-time mocking libraries, such as `MockContext` or `MockHeader`. # Depends on the `testgen` suite for generating Tendermint light blocks. -mocks = ["tendermint-testgen", "clock", "std"] +mocks = ["tendermint-testgen", "clock", "std", "sp-io", "sp-io/std"] [dependencies] # Proto definitions for all IBC-related interfaces, e.g., connections or channels. @@ -57,12 +57,13 @@ num-traits = { version = "0.2.14", default-features = false } derive_more = { version = "0.99.17", default-features = false, features = ["from", "display"] } uint = { version = "0.9", default-features = false } beefy-client = { package = "beefy-generic-client", git = "https://github.com/ComposableFi/beefy-client", branch = "david/refactor-traits", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22" , default-features = false } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22" , default-features = false } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22", default-features = false } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22", default-features = false } pallet-mmr-primitives = { package = "sp-mmr-primitives", git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22" , default-features = false } -beefy-primitives = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22" , default-features = false } +beefy-primitives = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22", default-features = false } codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } -sp-trie = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22" , default-features = false } +sp-trie = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22", default-features = false } +sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22", optional = true } [dependencies.tendermint] version = "=0.23.7" @@ -89,6 +90,7 @@ modelator = "0.4.2" sha2 = { version = "0.10.2" } tendermint-rpc = { version = "=0.23.7", features = ["http-client", "websocket-client"] } tendermint-testgen = { version = "=0.23.7" } # Needed for generating (synthetic) light blocks. +sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22"} [[test]] name = "mbt" diff --git a/modules/src/clients/ics11_beefy/error.rs b/modules/src/clients/ics11_beefy/error.rs index ee3f85935c..7007efa8ac 100644 --- a/modules/src/clients/ics11_beefy/error.rs +++ b/modules/src/clients/ics11_beefy/error.rs @@ -16,6 +16,8 @@ define_error! { Error { InvalidAddress |_| { "invalid address" }, + InvalidTrieProof + |_| { "invalid trie proof" }, InvalidMmrUpdate { reason: String } |e| { format_args!("invalid mmr update {}", e.reason) }, diff --git a/modules/src/core/ics02_client/handler/create_client.rs b/modules/src/core/ics02_client/handler/create_client.rs index 8218ad65bd..bd8abaacce 100644 --- a/modules/src/core/ics02_client/handler/create_client.rs +++ b/modules/src/core/ics02_client/handler/create_client.rs @@ -90,7 +90,7 @@ mod tests { use crate::mock::client_state::{MockClientState, MockConsensusState}; use crate::mock::context::MockContext; use crate::mock::header::MockHeader; - use crate::test_utils::get_dummy_account_id; + use crate::test_utils::{get_dummy_account_id, Crypto}; use crate::Height; #[test] @@ -101,12 +101,12 @@ mod tests { let msg = MsgCreateAnyClient::new( MockClientState::new(MockHeader::new(height)).into(), - MockConsensusState::new(MockHeader::new(height)).into(), + Some(MockConsensusState::new(MockHeader::new(height)).into()), signer, ) .unwrap(); - let output = dispatch(&ctx, ClientMsg::CreateClient(msg.clone())); + let output = dispatch::<_, Crypto>(&ctx, ClientMsg::CreateClient(msg.clone())); match output { Ok(HandlerOutput { @@ -152,11 +152,13 @@ mod tests { ..height })) .into(), - MockConsensusState::new(MockHeader::new(Height { - revision_height: 42, - ..height - })) - .into(), + Some( + MockConsensusState::new(MockHeader::new(Height { + revision_height: 42, + ..height + })) + .into(), + ), signer.clone(), ) .unwrap(), @@ -166,11 +168,13 @@ mod tests { ..height })) .into(), - MockConsensusState::new(MockHeader::new(Height { - revision_height: 42, - ..height - })) - .into(), + Some( + MockConsensusState::new(MockHeader::new(Height { + revision_height: 42, + ..height + })) + .into(), + ), signer.clone(), ) .unwrap(), @@ -180,11 +184,13 @@ mod tests { ..height })) .into(), - MockConsensusState::new(MockHeader::new(Height { - revision_height: 50, - ..height - })) - .into(), + Some( + MockConsensusState::new(MockHeader::new(Height { + revision_height: 50, + ..height + })) + .into(), + ), signer, ) .unwrap(), @@ -198,7 +204,7 @@ mod tests { let expected_client_id = ClientId::new(ClientType::Mock, 0).unwrap(); for msg in create_client_msgs { - let output = dispatch(&ctx, ClientMsg::CreateClient(msg.clone())); + let output = dispatch::<_, Crypto>(&ctx, ClientMsg::CreateClient(msg.clone())); match output { Ok(HandlerOutput { @@ -256,12 +262,12 @@ mod tests { let msg = MsgCreateAnyClient::new( tm_client_state, - AnyConsensusState::Tendermint(tm_header.try_into().unwrap()), + Some(AnyConsensusState::Tendermint(tm_header.try_into().unwrap())), signer, ) .unwrap(); - let output = dispatch(&ctx, ClientMsg::CreateClient(msg.clone())); + let output = dispatch::<_, Crypto>(&ctx, ClientMsg::CreateClient(msg.clone())); match output { Ok(HandlerOutput { diff --git a/modules/src/core/ics02_client/handler/update_client.rs b/modules/src/core/ics02_client/handler/update_client.rs index 58843f15c9..7bdd1a5894 100644 --- a/modules/src/core/ics02_client/handler/update_client.rs +++ b/modules/src/core/ics02_client/handler/update_client.rs @@ -147,7 +147,7 @@ mod tests { use crate::mock::header::MockHeader; use crate::mock::host::HostType; use crate::prelude::*; - use crate::test_utils::get_dummy_account_id; + use crate::test_utils::{get_dummy_account_id, Crypto}; use crate::timestamp::Timestamp; use crate::Height; @@ -167,7 +167,7 @@ mod tests { signer, }; - let output = dispatch(&ctx, ClientMsg::UpdateClient(msg.clone())); + let output = dispatch::<_, Crypto>(&ctx, ClientMsg::UpdateClient(msg.clone())); match output { Ok(HandlerOutput { @@ -215,7 +215,7 @@ mod tests { signer, }; - let output = dispatch(&ctx, ClientMsg::UpdateClient(msg.clone())); + let output = dispatch::<_, Crypto>(&ctx, ClientMsg::UpdateClient(msg.clone())); match output { Err(Error(ErrorDetail::ClientNotFound(e), _)) => { @@ -251,7 +251,7 @@ mod tests { signer: signer.clone(), }; - let output = dispatch(&ctx, ClientMsg::UpdateClient(msg.clone())); + let output = dispatch::<_, Crypto>(&ctx, ClientMsg::UpdateClient(msg.clone())); match output { Ok(HandlerOutput { @@ -310,6 +310,7 @@ mod tests { theader.trusted_height = client_height; AnyHeader::Tendermint(theader) } + AnyHeader::Beefy(h) => AnyHeader::Beefy(h), AnyHeader::Mock(m) => AnyHeader::Mock(m), }; @@ -319,7 +320,7 @@ mod tests { signer, }; - let output = dispatch(&ctx, ClientMsg::UpdateClient(msg.clone())); + let output = dispatch::<_, Crypto>(&ctx, ClientMsg::UpdateClient(msg.clone())); match output { Ok(HandlerOutput { @@ -388,6 +389,7 @@ mod tests { theader.trusted_height = trusted_height; AnyHeader::Tendermint(theader) } + AnyHeader::Beefy(h) => AnyHeader::Beefy(h), AnyHeader::Mock(m) => AnyHeader::Mock(m), }; @@ -397,7 +399,7 @@ mod tests { signer, }; - let output = dispatch(&ctx, ClientMsg::UpdateClient(msg.clone())); + let output = dispatch::<_, Crypto>(&ctx, ClientMsg::UpdateClient(msg.clone())); match output { Ok(HandlerOutput { @@ -467,6 +469,7 @@ mod tests { } AnyHeader::Tendermint(theader) } + AnyHeader::Beefy(h) => AnyHeader::Beefy(h), AnyHeader::Mock(header) => AnyHeader::Mock(header), }; @@ -476,7 +479,7 @@ mod tests { signer, }; - let output = dispatch(&ctx, ClientMsg::UpdateClient(msg.clone())); + let output = dispatch::<_, Crypto>(&ctx, ClientMsg::UpdateClient(msg.clone())); match output { Ok(HandlerOutput { @@ -548,7 +551,7 @@ mod tests { signer, }; - let output = dispatch(&ctx, ClientMsg::UpdateClient(msg)); + let output = dispatch::<_, Crypto>(&ctx, ClientMsg::UpdateClient(msg)); match output { Ok(_) => { diff --git a/modules/src/core/ics02_client/handler/upgrade_client.rs b/modules/src/core/ics02_client/handler/upgrade_client.rs index c05de32dbc..61a8a91e99 100644 --- a/modules/src/core/ics02_client/handler/upgrade_client.rs +++ b/modules/src/core/ics02_client/handler/upgrade_client.rs @@ -92,7 +92,7 @@ mod tests { use crate::mock::client_state::{MockClientState, MockConsensusState}; use crate::mock::context::MockContext; use crate::mock::header::MockHeader; - use crate::test_utils::get_dummy_account_id; + use crate::test_utils::{get_dummy_account_id, Crypto}; use crate::Height; #[test] @@ -111,7 +111,7 @@ mod tests { signer, }; - let output = dispatch(&ctx, ClientMsg::UpgradeClient(msg.clone())); + let output = dispatch::<_, Crypto>(&ctx, ClientMsg::UpgradeClient(msg.clone())); match output { Ok(HandlerOutput { @@ -157,7 +157,7 @@ mod tests { signer, }; - let output = dispatch(&ctx, ClientMsg::UpgradeClient(msg.clone())); + let output = dispatch::<_, Crypto>(&ctx, ClientMsg::UpgradeClient(msg.clone())); match output { Err(Error(ErrorDetail::ClientNotFound(e), _)) => { @@ -185,7 +185,7 @@ mod tests { signer, }; - let output = dispatch(&ctx, ClientMsg::UpgradeClient(msg.clone())); + let output = dispatch::<_, Crypto>(&ctx, ClientMsg::UpgradeClient(msg.clone())); match output { Err(Error(ErrorDetail::LowUpgradeHeight(e), _)) => { diff --git a/modules/src/core/ics02_client/msgs/upgrade_client.rs b/modules/src/core/ics02_client/msgs/upgrade_client.rs index e4f5b4cabd..db7907dfed 100644 --- a/modules/src/core/ics02_client/msgs/upgrade_client.rs +++ b/modules/src/core/ics02_client/msgs/upgrade_client.rs @@ -146,6 +146,7 @@ pub mod test_util { #[cfg(test)] mod tests { + use alloc::vec::Vec; use ibc_proto::ibc::core::client::v1::MsgUpgradeClient as RawMsgUpgradeClient; use crate::{ @@ -176,13 +177,14 @@ mod tests { AnyConsensusState::Mock(MockConsensusState::new(MockHeader::new(height))); let proof = get_dummy_merkle_proof(); - + let mut proof_buf = Vec::new(); + prost::Message::encode(&proof, &mut proof_buf).unwrap(); let msg = MsgUpgradeAnyClient::new( client_id, client_state, consensus_state, - proof.clone(), - proof, + proof_buf.clone(), + proof_buf, signer, ); let raw: RawMsgUpgradeClient = RawMsgUpgradeClient::from(msg.clone()); diff --git a/modules/src/core/ics03_connection/handler/conn_open_ack.rs b/modules/src/core/ics03_connection/handler/conn_open_ack.rs index 9648074d4d..4c9fe1e47a 100644 --- a/modules/src/core/ics03_connection/handler/conn_open_ack.rs +++ b/modules/src/core/ics03_connection/handler/conn_open_ack.rs @@ -100,8 +100,8 @@ mod tests { use core::str::FromStr; use test_log::test; + use crate::core::ics02_client::context::ClientReader; use crate::core::ics03_connection::connection::{ConnectionEnd, Counterparty, State}; - use crate::core::ics03_connection::context::ConnectionReader; use crate::core::ics03_connection::error; use crate::core::ics03_connection::handler::{dispatch, ConnectionResult}; use crate::core::ics03_connection::msgs::conn_open_ack::test_util::get_dummy_raw_msg_conn_open_ack; @@ -112,6 +112,7 @@ mod tests { use crate::events::IbcEvent; use crate::mock::context::MockContext; use crate::mock::host::HostType; + use crate::test_utils::Crypto; use crate::timestamp::ZERO_DURATION; #[test] @@ -224,7 +225,7 @@ mod tests { ]; for test in tests { - let res = dispatch(&test.ctx, test.msg.clone()); + let res = dispatch::<_, Crypto>(&test.ctx, test.msg.clone()); // Additionally check the events and the output objects in the result. match res { Ok(proto_output) => { diff --git a/modules/src/core/ics03_connection/handler/conn_open_confirm.rs b/modules/src/core/ics03_connection/handler/conn_open_confirm.rs index a39608e3ca..693caa2b7d 100644 --- a/modules/src/core/ics03_connection/handler/conn_open_confirm.rs +++ b/modules/src/core/ics03_connection/handler/conn_open_confirm.rs @@ -78,6 +78,7 @@ mod tests { use core::str::FromStr; use test_log::test; + use crate::core::ics02_client::context::ClientReader; use crate::core::ics03_connection::connection::{ConnectionEnd, Counterparty, State}; use crate::core::ics03_connection::context::ConnectionReader; use crate::core::ics03_connection::handler::{dispatch, ConnectionResult}; @@ -88,6 +89,7 @@ mod tests { use crate::core::ics24_host::identifier::ClientId; use crate::events::IbcEvent; use crate::mock::context::MockContext; + use crate::test_utils::Crypto; use crate::timestamp::ZERO_DURATION; use crate::Height; @@ -151,7 +153,7 @@ mod tests { .collect(); for test in tests { - let res = dispatch(&test.ctx, test.msg.clone()); + let res = dispatch::<_, Crypto>(&test.ctx, test.msg.clone()); // Additionally check the events and the output objects in the result. match res { Ok(proto_output) => { diff --git a/modules/src/core/ics03_connection/handler/conn_open_init.rs b/modules/src/core/ics03_connection/handler/conn_open_init.rs index d91ac2f11a..126b2f0c09 100644 --- a/modules/src/core/ics03_connection/handler/conn_open_init.rs +++ b/modules/src/core/ics03_connection/handler/conn_open_init.rs @@ -69,6 +69,7 @@ pub(crate) fn process( mod tests { use test_log::test; + use crate::core::ics02_client::context::ClientReader; use crate::core::ics03_connection::connection::State; use crate::core::ics03_connection::context::ConnectionReader; use crate::core::ics03_connection::handler::{dispatch, ConnectionResult}; @@ -79,6 +80,7 @@ mod tests { use crate::events::IbcEvent; use crate::mock::context::MockContext; use crate::prelude::*; + use crate::test_utils::Crypto; use crate::Height; use ibc_proto::ibc::core::connection::v1::Version as RawVersion; @@ -147,7 +149,7 @@ mod tests { .collect(); for test in tests { - let res = dispatch(&test.ctx, test.msg.clone()); + let res = dispatch::<_, Crypto>(&test.ctx, test.msg.clone()); // Additionally check the events and the output objects in the result. match res { Ok(proto_output) => { diff --git a/modules/src/core/ics03_connection/handler/conn_open_try.rs b/modules/src/core/ics03_connection/handler/conn_open_try.rs index 89a150062f..75165da65c 100644 --- a/modules/src/core/ics03_connection/handler/conn_open_try.rs +++ b/modules/src/core/ics03_connection/handler/conn_open_try.rs @@ -127,8 +127,8 @@ mod tests { use test_log::test; + use crate::core::ics02_client::context::ClientReader; use crate::core::ics03_connection::connection::State; - use crate::core::ics03_connection::context::ConnectionReader; use crate::core::ics03_connection::handler::{dispatch, ConnectionResult}; use crate::core::ics03_connection::msgs::conn_open_try::test_util::get_dummy_raw_msg_conn_open_try; use crate::core::ics03_connection::msgs::conn_open_try::MsgConnectionOpenTry; @@ -137,6 +137,7 @@ mod tests { use crate::events::IbcEvent; use crate::mock::context::MockContext; use crate::mock::host::HostType; + use crate::test_utils::Crypto; use crate::Height; #[test] @@ -231,7 +232,7 @@ mod tests { .collect(); for test in tests { - let res = dispatch(&test.ctx, test.msg.clone()); + let res = dispatch::<_, Crypto>(&test.ctx, test.msg.clone()); // Additionally check the events and the output objects in the result. match res { Ok(proto_output) => { diff --git a/modules/src/core/ics04_channel/error.rs b/modules/src/core/ics04_channel/error.rs index 18bf45aa97..2f5451e23d 100644 --- a/modules/src/core/ics04_channel/error.rs +++ b/modules/src/core/ics04_channel/error.rs @@ -21,6 +21,10 @@ define_error! { [ connection_error::Error ] | _ | { "ics03 connection error" }, + Ics02Client + [ client_error::Error ] + | _ | { "ics02 client error" }, + Ics05Port [ port_error::Error ] | _ | { "ics05 port error" }, diff --git a/modules/src/core/ics04_channel/handler/acknowledgement.rs b/modules/src/core/ics04_channel/handler/acknowledgement.rs index 032a73386b..0f4b88bc61 100644 --- a/modules/src/core/ics04_channel/handler/acknowledgement.rs +++ b/modules/src/core/ics04_channel/handler/acknowledgement.rs @@ -50,7 +50,7 @@ pub fn process( let connection_end = ctx .connection_end(&source_channel_end.connection_hops()[0]) - .map_err(|_| Error::connection_not_open(source_channel_end.connection_hops()[0].clone()))?; + .map_err(|e| Error::ics03_connection(e))?; if !connection_end.state_matches(&ConnectionState::Open) { return Err(Error::connection_not_open( @@ -125,6 +125,7 @@ pub fn process( mod tests { use test_log::test; + use crate::core::ics02_client::context::ClientReader; use crate::core::ics02_client::height::Height; use crate::core::ics03_connection::connection::ConnectionEnd; use crate::core::ics03_connection::connection::Counterparty as ConnectionCounterparty; @@ -140,6 +141,7 @@ mod tests { use crate::events::IbcEvent; use crate::mock::context::MockContext; use crate::prelude::*; + use crate::test_utils::Crypto; use crate::timestamp::ZERO_DURATION; #[test] @@ -226,7 +228,7 @@ mod tests { .collect(); for test in tests { - let res = process(&test.ctx, &test.msg); + let res = process::(&test.ctx, &test.msg); // Additionally check the events and the output objects in the result. match res { Ok(proto_output) => { diff --git a/modules/src/core/ics04_channel/handler/chan_close_confirm.rs b/modules/src/core/ics04_channel/handler/chan_close_confirm.rs index bad22ff279..073047d990 100644 --- a/modules/src/core/ics04_channel/handler/chan_close_confirm.rs +++ b/modules/src/core/ics04_channel/handler/chan_close_confirm.rs @@ -37,7 +37,7 @@ pub(crate) fn process( let conn = ctx .connection_end(&channel_end.connection_hops()[0]) - .map_err(|_| Error::connection_not_open(channel_end.connection_hops()[0].clone()))?; + .map_err(|e| Error::ics03_connection(e))?; if !conn.state_matches(&ConnectionState::Open) { return Err(Error::connection_not_open( @@ -109,6 +109,7 @@ mod tests { use crate::prelude::*; use crate::core::ics02_client::client_type::ClientType; + use crate::core::ics02_client::context::ClientReader; use crate::core::ics03_connection::connection::ConnectionEnd; use crate::core::ics03_connection::connection::Counterparty as ConnectionCounterparty; use crate::core::ics03_connection::connection::State as ConnectionState; @@ -120,6 +121,7 @@ mod tests { use crate::core::ics04_channel::handler::channel_dispatch; use crate::core::ics04_channel::Version; use crate::core::ics24_host::identifier::{ClientId, ConnectionId}; + use crate::test_utils::Crypto; use crate::mock::context::MockContext; use crate::timestamp::ZERO_DURATION; @@ -164,7 +166,7 @@ mod tests { chan_end, ); - let (handler_output_builder, _) = channel_dispatch( + let (handler_output_builder, _) = channel_dispatch::<_, Crypto>( &context, &ChannelMsg::ChannelCloseConfirm(msg_chan_close_confirm), ) diff --git a/modules/src/core/ics04_channel/handler/chan_close_init.rs b/modules/src/core/ics04_channel/handler/chan_close_init.rs index 07a34e9eb7..45d5b3aac5 100644 --- a/modules/src/core/ics04_channel/handler/chan_close_init.rs +++ b/modules/src/core/ics04_channel/handler/chan_close_init.rs @@ -36,7 +36,7 @@ pub(crate) fn process( let conn = ctx .connection_end(&channel_end.connection_hops()[0]) - .map_err(|_| Error::connection_not_open(channel_end.connection_hops()[0].clone()))?; + .map_err(|e| Error::ics03_connection(e))?; if !conn.state_matches(&ConnectionState::Open) { return Err(Error::connection_not_open( @@ -91,7 +91,9 @@ mod tests { use crate::core::ics04_channel::Version; use crate::core::ics24_host::identifier::{ClientId, ConnectionId}; + use crate::core::ics02_client::context::ClientReader; use crate::mock::context::MockContext; + use crate::test_utils::Crypto; use crate::timestamp::ZERO_DURATION; #[test] @@ -135,8 +137,11 @@ mod tests { ) }; - let (handler_output_builder, _) = - channel_dispatch(&context, &ChannelMsg::ChannelCloseInit(msg_chan_close_init)).unwrap(); + let (handler_output_builder, _) = channel_dispatch::<_, Crypto>( + &context, + &ChannelMsg::ChannelCloseInit(msg_chan_close_init), + ) + .unwrap(); let handler_output = handler_output_builder.with_result(()); assert!(!handler_output.events.is_empty()); // Some events must exist. diff --git a/modules/src/core/ics04_channel/handler/chan_open_ack.rs b/modules/src/core/ics04_channel/handler/chan_open_ack.rs index 690d3cee20..addc4f1f14 100644 --- a/modules/src/core/ics04_channel/handler/chan_open_ack.rs +++ b/modules/src/core/ics04_channel/handler/chan_open_ack.rs @@ -40,7 +40,7 @@ pub(crate) fn process( let conn = ctx .connection_end(&channel_end.connection_hops()[0]) - .map_err(|_| Error::connection_not_open(channel_end.connection_hops()[0].clone()))?; + .map_err(|e| Error::ics03_connection(e))?; if !conn.state_matches(&ConnectionState::Open) { return Err(Error::connection_not_open( @@ -114,6 +114,7 @@ mod tests { use test_log::test; + use crate::core::ics02_client::context::ClientReader; use crate::core::ics03_connection::connection::ConnectionEnd; use crate::core::ics03_connection::connection::Counterparty as ConnectionCounterparty; use crate::core::ics03_connection::connection::State as ConnectionState; @@ -123,7 +124,6 @@ mod tests { use crate::core::ics03_connection::msgs::conn_open_try::MsgConnectionOpenTry; use crate::core::ics03_connection::version::get_compatible_versions; use crate::core::ics04_channel::channel::{ChannelEnd, Counterparty, State}; - use crate::core::ics04_channel::context::ChannelReader; use crate::core::ics04_channel::handler::channel_dispatch; use crate::core::ics04_channel::msgs::chan_open_ack::test_util::get_dummy_raw_msg_chan_open_ack; use crate::core::ics04_channel::msgs::chan_open_ack::MsgChannelOpenAck; @@ -134,6 +134,7 @@ mod tests { use crate::events::IbcEvent; use crate::mock::context::MockContext; use crate::prelude::*; + use crate::test_utils::Crypto; use crate::Height; // TODO: The tests here are very fragile and complex. @@ -283,7 +284,7 @@ mod tests { .collect(); for test in tests { - let res = channel_dispatch(&test.ctx, &test.msg); + let res = channel_dispatch::<_, Crypto>(&test.ctx, &test.msg); // Additionally check the events and the output objects in the result. match res { Ok((proto_output, res)) => { diff --git a/modules/src/core/ics04_channel/handler/chan_open_confirm.rs b/modules/src/core/ics04_channel/handler/chan_open_confirm.rs index 140425c6d2..c2c9643050 100644 --- a/modules/src/core/ics04_channel/handler/chan_open_confirm.rs +++ b/modules/src/core/ics04_channel/handler/chan_open_confirm.rs @@ -39,7 +39,7 @@ pub(crate) fn process( let conn = ctx .connection_end(&channel_end.connection_hops()[0]) - .map_err(|_| Error::connection_not_open(channel_end.connection_hops()[0].clone()))?; + .map_err(|e| Error::ics03_connection(e))?; if !conn.state_matches(&ConnectionState::Open) { return Err(Error::connection_not_open( @@ -110,13 +110,13 @@ mod tests { use test_log::test; use crate::core::ics02_client::client_type::ClientType; + use crate::core::ics02_client::context::ClientReader; use crate::core::ics03_connection::connection::ConnectionEnd; use crate::core::ics03_connection::connection::Counterparty as ConnectionCounterparty; use crate::core::ics03_connection::connection::State as ConnectionState; use crate::core::ics03_connection::msgs::test_util::get_dummy_raw_counterparty; use crate::core::ics03_connection::version::get_compatible_versions; use crate::core::ics04_channel::channel::{ChannelEnd, Counterparty, Order, State}; - use crate::core::ics04_channel::context::ChannelReader; use crate::core::ics04_channel::handler::channel_dispatch; use crate::core::ics04_channel::msgs::chan_open_confirm::test_util::get_dummy_raw_msg_chan_open_confirm; use crate::core::ics04_channel::msgs::chan_open_confirm::MsgChannelOpenConfirm; @@ -125,6 +125,7 @@ mod tests { use crate::core::ics24_host::identifier::{ClientId, ConnectionId}; use crate::events::IbcEvent; use crate::mock::context::MockContext; + use crate::test_utils::Crypto; use crate::timestamp::ZERO_DURATION; use crate::Height; @@ -184,7 +185,7 @@ mod tests { .collect(); for test in tests { - let res = channel_dispatch(&test.ctx, &test.msg); + let res = channel_dispatch::<_, Crypto>(&test.ctx, &test.msg); // Additionally check the events and the output objects in the result. match res { Ok((proto_output, res)) => { diff --git a/modules/src/core/ics04_channel/handler/chan_open_init.rs b/modules/src/core/ics04_channel/handler/chan_open_init.rs index f055a7b534..f3a84a0b08 100644 --- a/modules/src/core/ics04_channel/handler/chan_open_init.rs +++ b/modules/src/core/ics04_channel/handler/chan_open_init.rs @@ -27,7 +27,7 @@ pub(crate) fn process( // An IBC connection running on the local (host) chain should exist. let conn = ctx .connection_end(&msg.channel.connection_hops()[0]) - .map_err(|_| Error::connection_not_open(msg.channel.connection_hops()[0].clone()))?; + .map_err(|e| Error::ics03_connection(e))?; let get_versions = conn.versions(); let version = match get_versions { [version] => version, @@ -85,6 +85,7 @@ mod tests { use test_log::test; + use crate::core::ics02_client::context::ClientReader; use crate::core::ics03_connection::connection::ConnectionEnd; use crate::core::ics03_connection::connection::State as ConnectionState; use crate::core::ics03_connection::msgs::conn_open_init::test_util::get_dummy_raw_msg_conn_open_init; @@ -98,6 +99,7 @@ mod tests { use crate::core::ics24_host::identifier::ConnectionId; use crate::events::IbcEvent; use crate::mock::context::MockContext; + use crate::test_utils::Crypto; #[test] fn chan_open_init_msg_processing() { @@ -144,7 +146,7 @@ mod tests { .collect(); for test in tests { - let res = channel_dispatch(&test.ctx, &test.msg); + let res = channel_dispatch::<_, Crypto>(&test.ctx, &test.msg); // Additionally check the events and the output objects in the result. match res { Ok((proto_output, res)) => { diff --git a/modules/src/core/ics04_channel/handler/chan_open_try.rs b/modules/src/core/ics04_channel/handler/chan_open_try.rs index fc71016a30..814c818acc 100644 --- a/modules/src/core/ics04_channel/handler/chan_open_try.rs +++ b/modules/src/core/ics04_channel/handler/chan_open_try.rs @@ -72,7 +72,7 @@ pub(crate) fn process( let conn = ctx .connection_end(&msg.channel.connection_hops()[0]) - .map_err(|_| Error::connection_not_open(msg.channel.connection_hops()[0].clone()))?; + .map_err(|e| Error::ics03_connection(e))?; if !conn.state_matches(&ConnectionState::Open) { return Err(Error::connection_not_open( msg.channel.connection_hops()[0].clone(), @@ -157,6 +157,7 @@ mod tests { use test_log::test; use crate::core::ics02_client::client_type::ClientType; + use crate::core::ics02_client::context::ClientReader; use crate::core::ics02_client::error as ics02_error; use crate::core::ics03_connection::connection::ConnectionEnd; use crate::core::ics03_connection::connection::Counterparty as ConnectionCounterparty; @@ -165,7 +166,6 @@ mod tests { use crate::core::ics03_connection::msgs::test_util::get_dummy_raw_counterparty; use crate::core::ics03_connection::version::get_compatible_versions; use crate::core::ics04_channel::channel::{ChannelEnd, State}; - use crate::core::ics04_channel::context::ChannelReader; use crate::core::ics04_channel::handler::channel_dispatch; use crate::core::ics04_channel::msgs::chan_open_try::test_util::get_dummy_raw_msg_chan_open_try; use crate::core::ics04_channel::msgs::chan_open_try::MsgChannelOpenTry; @@ -174,6 +174,7 @@ mod tests { use crate::core::ics24_host::identifier::{ChannelId, ClientId, ConnectionId}; use crate::events::IbcEvent; use crate::mock::context::MockContext; + use crate::test_utils::Crypto; use crate::timestamp::ZERO_DURATION; use crate::Height; @@ -388,7 +389,7 @@ mod tests { .collect(); for test in tests { - let res = channel_dispatch(&test.ctx, &test.msg); + let res = channel_dispatch::<_, Crypto>(&test.ctx, &test.msg); // Additionally check the events and the output objects in the result. match res { Ok((proto_output, res)) => { diff --git a/modules/src/core/ics04_channel/handler/recv_packet.rs b/modules/src/core/ics04_channel/handler/recv_packet.rs index 3a63611328..ab1ab95db3 100644 --- a/modules/src/core/ics04_channel/handler/recv_packet.rs +++ b/modules/src/core/ics04_channel/handler/recv_packet.rs @@ -57,7 +57,7 @@ pub fn process( let connection_end = ctx .connection_end(&dest_channel_end.connection_hops()[0]) - .map_err(|_| Error::connection_not_open(dest_channel_end.connection_hops()[0].clone()))?; + .map_err(|e| Error::ics03_connection(e))?; if !connection_end.state_matches(&ConnectionState::Open) { return Err(Error::connection_not_open( @@ -155,6 +155,7 @@ mod tests { use test_log::test; + use crate::core::ics02_client::context::ClientReader; use crate::core::ics03_connection::connection::ConnectionEnd; use crate::core::ics03_connection::connection::Counterparty as ConnectionCounterparty; use crate::core::ics03_connection::connection::State as ConnectionState; @@ -168,6 +169,7 @@ mod tests { use crate::mock::context::MockContext; use crate::relayer::ics18_relayer::context::Ics18Context; use crate::test_utils::get_dummy_account_id; + use crate::test_utils::Crypto; use crate::timestamp::Timestamp; use crate::timestamp::ZERO_DURATION; use crate::{core::ics04_channel::packet::Packet, events::IbcEvent}; @@ -276,7 +278,7 @@ mod tests { .collect(); for test in tests { - let res = process(&test.ctx, &test.msg); + let res = process::(&test.ctx, &test.msg); // Additionally check the events and the output objects in the result. match res { Ok(proto_output) => { diff --git a/modules/src/core/ics04_channel/handler/send_packet.rs b/modules/src/core/ics04_channel/handler/send_packet.rs index 4fdd48617a..2c6a1f2451 100644 --- a/modules/src/core/ics04_channel/handler/send_packet.rs +++ b/modules/src/core/ics04_channel/handler/send_packet.rs @@ -48,7 +48,7 @@ pub fn send_packet( let connection_end = ctx .connection_end(&source_channel_end.connection_hops()[0]) - .map_err(|_| Error::connection_not_open(source_channel_end.connection_hops()[0].clone()))?; + .map_err(|e| Error::ics03_connection(e))?; let client_id = connection_end.client_id().clone(); @@ -118,6 +118,7 @@ mod tests { use test_log::test; + use crate::core::ics02_client::context::ClientReader; use crate::core::ics02_client::height::Height; use crate::core::ics03_connection::connection::ConnectionEnd; use crate::core::ics03_connection::connection::Counterparty as ConnectionCounterparty; diff --git a/modules/src/core/ics04_channel/handler/timeout.rs b/modules/src/core/ics04_channel/handler/timeout.rs index 1cfe7733e0..ce8ac55f14 100644 --- a/modules/src/core/ics04_channel/handler/timeout.rs +++ b/modules/src/core/ics04_channel/handler/timeout.rs @@ -52,7 +52,7 @@ pub fn process( let connection_end = ctx .connection_end(&source_channel_end.connection_hops()[0]) - .map_err(|_| Error::connection_not_open(source_channel_end.connection_hops()[0].clone()))?; + .map_err(|e| Error::ics03_connection(e))?; let client_id = connection_end.client_id().clone(); @@ -151,6 +151,7 @@ pub fn process( mod tests { use test_log::test; + use crate::core::ics02_client::context::ClientReader; use crate::core::ics02_client::height::Height; use crate::core::ics03_connection::connection::ConnectionEnd; use crate::core::ics03_connection::connection::Counterparty as ConnectionCounterparty; @@ -166,6 +167,7 @@ mod tests { use crate::events::IbcEvent; use crate::mock::context::MockContext; use crate::prelude::*; + use crate::test_utils::Crypto; use crate::timestamp::ZERO_DURATION; #[test] @@ -303,7 +305,7 @@ mod tests { .collect(); for test in tests { - let res = process(&test.ctx, &test.msg); + let res = process::(&test.ctx, &test.msg); // Additionally check the events and the output objects in the result. match res { Ok(proto_output) => { diff --git a/modules/src/core/ics04_channel/handler/timeout_on_close.rs b/modules/src/core/ics04_channel/handler/timeout_on_close.rs index f1ff655be4..5bcc4c6282 100644 --- a/modules/src/core/ics04_channel/handler/timeout_on_close.rs +++ b/modules/src/core/ics04_channel/handler/timeout_on_close.rs @@ -39,7 +39,7 @@ pub fn process( let connection_end = ctx .connection_end(&source_channel_end.connection_hops()[0]) - .map_err(|_| Error::connection_not_open(source_channel_end.connection_hops()[0].clone()))?; + .map_err(|e| Error::ics03_connection(e))?; //verify the packet was sent, check the store let packet_commitment = ctx.get_packet_commitment(&( @@ -137,6 +137,7 @@ pub fn process( mod tests { use test_log::test; + use crate::core::ics02_client::context::ClientReader; use crate::core::ics02_client::height::Height; use crate::core::ics03_connection::connection::ConnectionEnd; use crate::core::ics03_connection::connection::Counterparty as ConnectionCounterparty; @@ -152,6 +153,7 @@ mod tests { use crate::events::IbcEvent; use crate::mock::context::MockContext; use crate::prelude::*; + use crate::test_utils::Crypto; use crate::timestamp::ZERO_DURATION; #[test] @@ -250,7 +252,7 @@ mod tests { .collect(); for test in tests { - let res = process(&test.ctx, &test.msg); + let res = process::(&test.ctx, &test.msg); // Additionally check the events and the output objects in the result. match res { Ok(proto_output) => { diff --git a/modules/src/core/ics04_channel/handler/verify.rs b/modules/src/core/ics04_channel/handler/verify.rs index 664f30928c..ca53d1a3e2 100644 --- a/modules/src/core/ics04_channel/handler/verify.rs +++ b/modules/src/core/ics04_channel/handler/verify.rs @@ -26,7 +26,7 @@ pub fn verify_channel_proofs( let client_state = ctx .client_state(&client_id) - .map_err(|_| Error::implementation_specific())?; + .map_err(|e| Error::ics02_client(e))?; // The client must not be frozen. if client_state.is_frozen() { @@ -68,7 +68,7 @@ pub fn verify_packet_recv_proofs( let client_id = connection_end.client_id(); let client_state = ctx .client_state(client_id) - .map_err(|_| Error::implementation_specific())?; + .map_err(|e| Error::ics02_client(e))?; // The client must not be frozen. if client_state.is_frozen() { @@ -119,7 +119,7 @@ pub fn verify_packet_acknowledgement_proofs( let client_id = connection_end.client_id(); let client_state = ctx .client_state(client_id) - .map_err(|_| Error::implementation_specific())?; + .map_err(|e| Error::ics02_client(e))?; // The client must not be frozen. if client_state.is_frozen() { @@ -166,7 +166,7 @@ pub fn verify_next_sequence_recv( let client_id = connection_end.client_id(); let client_state = ctx .client_state(client_id) - .map_err(|_| Error::implementation_specific())?; + .map_err(|e| Error::ics02_client(e))?; // The client must not be frozen. if client_state.is_frozen() { @@ -208,7 +208,7 @@ pub fn verify_packet_receipt_absence( let client_id = connection_end.client_id(); let client_state = ctx .client_state(client_id) - .map_err(|_| Error::implementation_specific())?; + .map_err(|e| Error::ics02_client(e))?; // The client must not be frozen. if client_state.is_frozen() { diff --git a/modules/src/core/ics04_channel/handler/write_acknowledgement.rs b/modules/src/core/ics04_channel/handler/write_acknowledgement.rs index 79e1bad6c8..e86f1f5821 100644 --- a/modules/src/core/ics04_channel/handler/write_acknowledgement.rs +++ b/modules/src/core/ics04_channel/handler/write_acknowledgement.rs @@ -78,6 +78,7 @@ mod tests { use test_log::test; + use crate::core::ics02_client::context::ClientReader; use crate::core::ics02_client::height::Height; use crate::core::ics03_connection::connection::ConnectionEnd; use crate::core::ics03_connection::connection::Counterparty as ConnectionCounterparty; diff --git a/modules/src/core/ics26_routing/handler.rs b/modules/src/core/ics26_routing/handler.rs index b1f5186c1e..5928d2e397 100644 --- a/modules/src/core/ics26_routing/handler.rs +++ b/modules/src/core/ics26_routing/handler.rs @@ -142,10 +142,12 @@ mod tests { use test_log::test; - use crate::applications::transfer::context::test::deliver as ics20_deliver; - use crate::applications::transfer::PrefixedCoin; + use crate::applications::ics20_fungible_token_transfer::msgs::transfer::test_util::get_dummy_msg_transfer; use crate::core::ics02_client::client_consensus::AnyConsensusState; use crate::core::ics02_client::client_state::AnyClientState; + use crate::events::IbcEvent; + use crate::test_utils::Crypto; + use crate::core::ics02_client::msgs::{ create_client::MsgCreateAnyClient, update_client::MsgUpdateAnyClient, upgrade_client::MsgUpgradeAnyClient, ClientMsg, @@ -246,8 +248,8 @@ mod tests { let create_client_msg = MsgCreateAnyClient::new( AnyClientState::from(MockClientState::new(MockHeader::new(start_client_height))), - AnyConsensusState::Mock(MockConsensusState::new(MockHeader::new( - start_client_height, + Some(AnyConsensusState::Mock(MockConsensusState::new( + MockHeader::new(start_client_height), ))), default_signer.clone(), ) @@ -326,7 +328,7 @@ mod tests { let msg_recv_packet = MsgRecvPacket::try_from(get_dummy_raw_msg_recv_packet(35)).unwrap(); // First, create a client.. - let res = dispatch( + let res = dispatch::<_, Crypto>( &mut ctx, Ics26Envelope::Ics2Msg(ClientMsg::CreateClient(create_client_msg.clone())), ); @@ -512,8 +514,8 @@ mod tests { AnyConsensusState::Mock(MockConsensusState::new(MockHeader::new( upgrade_client_height, ))), - get_dummy_merkle_proof(), - get_dummy_merkle_proof(), + Vec::new(), + Vec::new(), default_signer.clone(), ))) .into(), @@ -529,8 +531,8 @@ mod tests { AnyConsensusState::Mock(MockConsensusState::new(MockHeader::new( upgrade_client_height_second, ))), - get_dummy_merkle_proof(), - get_dummy_merkle_proof(), + Vec::new(), + Vec::new(), default_signer, ))) .into(), @@ -541,23 +543,7 @@ mod tests { .collect(); for test in tests { - let res = match test.msg.clone() { - TestMsg::Ics26(msg) => dispatch(&mut ctx, msg).map(|_| ()), - TestMsg::Ics20(msg) => { - let transfer_module = - ctx.router_mut().get_route_mut(&transfer_module_id).unwrap(); - ics20_deliver( - transfer_module - .as_any_mut() - .downcast_mut::() - .unwrap(), - &mut HandlerOutputBuilder::new(), - msg, - ) - .map(|_| ()) - .map_err(Error::ics04_channel) - } - }; + let res = dispatch::<_, Crypto>(&mut ctx, test.msg.clone()); assert_eq!( test.want_pass, diff --git a/modules/src/mock/client_def.rs b/modules/src/mock/client_def.rs index ee20c970d1..8b57e6bc4d 100644 --- a/modules/src/mock/client_def.rs +++ b/modules/src/mock/client_def.rs @@ -1,13 +1,10 @@ use crate::core::ics02_client::client_consensus::AnyConsensusState; use crate::core::ics02_client::client_def::{ClientDef, ConsensusUpdateResult}; use crate::core::ics02_client::client_state::AnyClientState; -use crate::core::ics02_client::context::ClientReader; use crate::core::ics02_client::error::Error; use crate::core::ics03_connection::connection::ConnectionEnd; -use crate::core::ics03_connection::context::ConnectionReader; use crate::core::ics04_channel::channel::ChannelEnd; use crate::core::ics04_channel::commitment::{AcknowledgementCommitment, PacketCommitment}; -use crate::core::ics04_channel::context::ChannelReader; use crate::core::ics04_channel::packet::Sequence; use crate::core::ics23_commitment::commitment::{ CommitmentPrefix, CommitmentProofBytes, CommitmentRoot, @@ -16,6 +13,7 @@ use crate::core::ics23_commitment::merkle::apply_prefix; use crate::core::ics24_host::identifier::{ChannelId, ClientId, ConnectionId, PortId}; use crate::core::ics24_host::path::ClientConsensusStatePath; use crate::core::ics24_host::Path; +use crate::core::ics26_routing::context::LightClientContext; use crate::mock::client_state::{MockClientState, MockConsensusState}; use crate::mock::header::MockHeader; use crate::prelude::*; @@ -31,7 +29,7 @@ impl ClientDef for MockClient { fn update_state( &self, - _ctx: &dyn ClientReader, + _ctx: &dyn LightClientContext, _client_id: ClientId, client_state: Self::ClientState, header: Self::Header, @@ -51,7 +49,7 @@ impl ClientDef for MockClient { fn verify_client_consensus_state( &self, - _ctx: &dyn ConnectionReader, + _ctx: &dyn LightClientContext, _client_state: &Self::ClientState, _height: Height, prefix: &CommitmentPrefix, @@ -75,7 +73,7 @@ impl ClientDef for MockClient { fn verify_connection_state( &self, - _ctx: &dyn ConnectionReader, + _ctx: &dyn LightClientContext, _client_id: &ClientId, _client_state: &Self::ClientState, _height: Height, @@ -90,7 +88,7 @@ impl ClientDef for MockClient { fn verify_channel_state( &self, - _ctx: &dyn ClientReader, + _ctx: &dyn LightClientContext, _client_id: &ClientId, _client_state: &Self::ClientState, _height: Height, @@ -106,7 +104,7 @@ impl ClientDef for MockClient { fn verify_client_full_state( &self, - _ctx: &dyn ConnectionReader, + _ctx: &dyn LightClientContext, _client_state: &Self::ClientState, _height: Height, _prefix: &CommitmentPrefix, @@ -120,7 +118,7 @@ impl ClientDef for MockClient { fn verify_packet_data( &self, - _ctx: &dyn ChannelReader, + _ctx: &dyn LightClientContext, _client_id: &ClientId, _client_state: &Self::ClientState, _height: Height, @@ -137,7 +135,7 @@ impl ClientDef for MockClient { fn verify_packet_acknowledgement( &self, - _ctx: &dyn ChannelReader, + _ctx: &dyn LightClientContext, _client_id: &ClientId, _client_state: &Self::ClientState, _height: Height, @@ -154,7 +152,7 @@ impl ClientDef for MockClient { fn verify_next_sequence_recv( &self, - _ctx: &dyn ChannelReader, + _ctx: &dyn LightClientContext, _client_id: &ClientId, _client_state: &Self::ClientState, _height: Height, @@ -170,7 +168,7 @@ impl ClientDef for MockClient { fn verify_packet_receipt_absence( &self, - _ctx: &dyn ChannelReader, + _ctx: &dyn LightClientContext, _client_id: &ClientId, _client_state: &Self::ClientState, _height: Height, @@ -199,7 +197,7 @@ impl ClientDef for MockClient { fn verify_header( &self, - _ctx: &dyn ClientReader, + _ctx: &dyn LightClientContext, _client_id: ClientId, _client_state: Self::ClientState, _header: Self::Header, @@ -217,7 +215,7 @@ impl ClientDef for MockClient { fn check_for_misbehaviour( &self, - _ctx: &dyn ClientReader, + _ctx: &dyn LightClientContext, _client_id: ClientId, _client_state: Self::ClientState, _header: Self::Header, diff --git a/modules/src/mock/context.rs b/modules/src/mock/context.rs index 12a117e2e1..4c95d88b0b 100644 --- a/modules/src/mock/context.rs +++ b/modules/src/mock/context.rs @@ -1,11 +1,8 @@ //! Implementation of a global context mock. Used in testing handlers of all IBC modules. - -use crate::clients::crypto_ops::crypto::CryptoOps; use crate::prelude::*; use alloc::collections::btree_map::BTreeMap; use alloc::sync::Arc; -use beefy_client::traits::HostFunctions; use core::borrow::Borrow; use core::cmp::min; use core::fmt::Debug; @@ -51,6 +48,7 @@ use crate::mock::host::{HostBlock, HostType}; use crate::relayer::ics18_relayer::context::Ics18Context; use crate::relayer::ics18_relayer::error::Error as Ics18Error; use crate::signer::Signer; +use crate::test_utils::Crypto; use crate::timestamp::Timestamp; use crate::Height; @@ -460,7 +458,7 @@ impl MockContext { /// Alternative method to `Ics18Context::send` that does not exercise any serialization. /// Used in testing the Ics18 algorithms, hence this may return a Ics18Error. pub fn deliver(&mut self, msg: Ics26Envelope) -> Result<(), Ics18Error> { - dispatch::<_, Self>(self, msg).map_err(Ics18Error::transaction_failed)?; + dispatch::<_, Crypto>(self, msg).map_err(Ics18Error::transaction_failed)?; // Create a new block. self.advance_host_chain_height(); Ok(()) @@ -641,40 +639,8 @@ impl Router for MockRouter { } } -impl HostFunctions for MockContext { - fn keccak_256(input: &[u8]) -> [u8; 32] { - todo!() - } - - fn secp256k1_ecdsa_recover_compressed( - signature: &[u8; 65], - value: &[u8; 32], - ) -> Option> { - todo!() - } -} - impl LightClientContext for MockContext {} -impl CryptoOps for MockContext { - fn verify_membership_trie_proof( - root: &sp_core::H256, - proof: &Vec>, - key: &[u8], - value: &[u8], - ) -> Result<(), Ics02Error> { - todo!() - } - - fn verify_non_membership_trie_proof( - root: &sp_core::H256, - proof: &Vec>, - key: &[u8], - ) -> Result<(), Ics02Error> { - todo!() - } -} - impl Ics26Context for MockContext { type Router = MockRouter; @@ -1282,7 +1248,7 @@ impl Ics18Context for MockContext { let mut all_events = vec![]; for msg in msgs { let (mut events, _) = - deliver::<_, Self>(self, msg).map_err(Ics18Error::transaction_failed)?; + deliver::<_, Crypto>(self, msg).map_err(Ics18Error::transaction_failed)?; all_events.append(&mut events); } self.advance_host_chain_height(); // Advance chain height diff --git a/modules/src/relayer/ics18_relayer/utils.rs b/modules/src/relayer/ics18_relayer/utils.rs index 51c0680b62..060cef55b5 100644 --- a/modules/src/relayer/ics18_relayer/utils.rs +++ b/modules/src/relayer/ics18_relayer/utils.rs @@ -159,6 +159,7 @@ mod tests { hheader.trusted_height = th.decrement().unwrap(); hheader.wrap_any() } + AnyHeader::Beefy(h) => h.wrap_any(), AnyHeader::Mock(header) => header.wrap_any(), }; diff --git a/modules/src/test_utils.rs b/modules/src/test_utils.rs index 9dbbe0596b..c94cd61078 100644 --- a/modules/src/test_utils.rs +++ b/modules/src/test_utils.rs @@ -1,18 +1,16 @@ use std::sync::{Arc, Mutex}; use std::time::Duration; +use crate::clients::crypto_ops::crypto::CryptoOps; +use crate::prelude::*; +use beefy_client::traits::HostFunctions; +use sp_core::keccak_256; +use sp_trie::LayoutV0; use tendermint::{block, consensus, evidence, public_key::Algorithm}; -use crate::applications::transfer::context::{BankKeeper, Ics20Context, Ics20Keeper, Ics20Reader}; -use crate::applications::transfer::{error::Error as Ics20Error, PrefixedCoin}; -use crate::core::ics02_client::client_consensus::AnyConsensusState; -use crate::core::ics02_client::client_state::AnyClientState; +use crate::clients::ics11_beefy::error::Error as BeefyError; use crate::core::ics02_client::error::Error as Ics02Error; -use crate::core::ics03_connection::connection::ConnectionEnd; -use crate::core::ics03_connection::error::Error as Ics03Error; -use crate::core::ics04_channel::channel::{ChannelEnd, Counterparty, Order}; -use crate::core::ics04_channel::commitment::{AcknowledgementCommitment, PacketCommitment}; -use crate::core::ics04_channel::context::{ChannelKeeper, ChannelReader}; +use crate::core::ics04_channel::channel::{Counterparty, Order}; use crate::core::ics04_channel::error::Error; use crate::core::ics04_channel::packet::{Receipt, Sequence}; use crate::core::ics04_channel::Version; @@ -87,298 +85,47 @@ impl Module for DummyTransferModule { } } -impl Ics20Keeper for DummyTransferModule { - type AccountId = Signer; -} - -impl ChannelKeeper for DummyTransferModule { - fn store_packet_commitment( - &mut self, - key: (PortId, ChannelId, Sequence), - commitment: PacketCommitment, - ) -> Result<(), Error> { - self.ibc_store - .lock() - .unwrap() - .packet_commitment - .insert(key, commitment); - Ok(()) - } - - fn delete_packet_commitment( - &mut self, - _key: (PortId, ChannelId, Sequence), - ) -> Result<(), Error> { - unimplemented!() - } - - fn store_packet_receipt( - &mut self, - _key: (PortId, ChannelId, Sequence), - _receipt: Receipt, - ) -> Result<(), Error> { - unimplemented!() - } - - fn store_packet_acknowledgement( - &mut self, - _key: (PortId, ChannelId, Sequence), - _ack: AcknowledgementCommitment, - ) -> Result<(), Error> { - unimplemented!() - } - - fn delete_packet_acknowledgement( - &mut self, - _key: (PortId, ChannelId, Sequence), - ) -> Result<(), Error> { - unimplemented!() - } - - fn store_connection_channels( - &mut self, - _conn_id: ConnectionId, - _port_channel_id: &(PortId, ChannelId), - ) -> Result<(), Error> { - unimplemented!() - } - - fn store_channel( - &mut self, - _port_channel_id: (PortId, ChannelId), - _channel_end: &ChannelEnd, - ) -> Result<(), Error> { - unimplemented!() - } - - fn store_next_sequence_send( - &mut self, - port_channel_id: (PortId, ChannelId), - seq: Sequence, - ) -> Result<(), Error> { - self.ibc_store - .lock() - .unwrap() - .next_sequence_send - .insert(port_channel_id, seq); - Ok(()) - } - - fn store_next_sequence_recv( - &mut self, - _port_channel_id: (PortId, ChannelId), - _seq: Sequence, - ) -> Result<(), Error> { - unimplemented!() - } - - fn store_next_sequence_ack( - &mut self, - _port_channel_id: (PortId, ChannelId), - _seq: Sequence, - ) -> Result<(), Error> { - unimplemented!() - } - - fn increase_channel_counter(&mut self) { - unimplemented!() - } -} - -impl PortReader for DummyTransferModule { - fn lookup_module_by_port(&self, _port_id: &PortId) -> Result { - unimplemented!() - } -} - -impl BankKeeper for DummyTransferModule { - type AccountId = Signer; +#[derive(Clone)] +pub struct Crypto; - fn send_coins( - &mut self, - _from: &Self::AccountId, - _to: &Self::AccountId, - _amt: &PrefixedCoin, - ) -> Result<(), Ics20Error> { - Ok(()) +impl HostFunctions for Crypto { + fn keccak_256(input: &[u8]) -> [u8; 32] { + keccak_256(input) } - fn mint_coins( - &mut self, - _account: &Self::AccountId, - _amt: &PrefixedCoin, - ) -> Result<(), Ics20Error> { - Ok(()) - } - - fn burn_coins( - &mut self, - _account: &Self::AccountId, - _amt: &PrefixedCoin, - ) -> Result<(), Ics20Error> { - Ok(()) + fn secp256k1_ecdsa_recover_compressed( + signature: &[u8; 65], + value: &[u8; 32], + ) -> Option> { + sp_io::crypto::secp256k1_ecdsa_recover_compressed(signature, value) + .ok() + .map(|val| val.to_vec()) } } -impl Ics20Reader for DummyTransferModule { - type AccountId = Signer; - - fn get_port(&self) -> Result { - Ok(PortId::transfer()) +impl CryptoOps for Crypto { + fn verify_membership_trie_proof( + root: &sp_core::H256, + proof: &Vec>, + key: &[u8], + value: &[u8], + ) -> Result<(), Ics02Error> { + let item = vec![(key, Some(value))]; + sp_trie::verify_trie_proof::, _, _, _>( + root, proof, &item, + ) + .map_err(|_| Ics02Error::beefy(BeefyError::invalid_trie_proof())) + } + + fn verify_non_membership_trie_proof( + root: &sp_core::H256, + proof: &Vec>, + key: &[u8], + ) -> Result<(), Ics02Error> { + let item: Vec<(&[u8], Option<&[u8]>)> = vec![(key, None)]; + sp_trie::verify_trie_proof::, _, _, _>( + root, proof, &item, + ) + .map_err(|_| Ics02Error::beefy(BeefyError::invalid_trie_proof())) } - - fn is_send_enabled(&self) -> bool { - true - } - - fn is_receive_enabled(&self) -> bool { - true - } -} - -impl ChannelReader for DummyTransferModule { - fn channel_end(&self, pcid: &(PortId, ChannelId)) -> Result { - match self.ibc_store.lock().unwrap().channels.get(pcid) { - Some(channel_end) => Ok(channel_end.clone()), - None => Err(Error::channel_not_found(pcid.0.clone(), pcid.1)), - } - } - - fn connection_end(&self, cid: &ConnectionId) -> Result { - match self.ibc_store.lock().unwrap().connections.get(cid) { - Some(connection_end) => Ok(connection_end.clone()), - None => Err(Ics03Error::connection_not_found(cid.clone())), - } - .map_err(Error::ics03_connection) - } - - fn connection_channels(&self, _cid: &ConnectionId) -> Result, Error> { - unimplemented!() - } - - fn client_state(&self, client_id: &ClientId) -> Result { - match self.ibc_store.lock().unwrap().clients.get(client_id) { - Some(client_record) => client_record - .client_state - .clone() - .ok_or_else(|| Ics02Error::client_not_found(client_id.clone())), - None => Err(Ics02Error::client_not_found(client_id.clone())), - } - .map_err(|e| Error::ics03_connection(Ics03Error::ics02_client(e))) - } - - fn client_consensus_state( - &self, - client_id: &ClientId, - height: Height, - ) -> Result { - match self.ibc_store.lock().unwrap().clients.get(client_id) { - Some(client_record) => match client_record.consensus_states.get(&height) { - Some(consensus_state) => Ok(consensus_state.clone()), - None => Err(Ics02Error::consensus_state_not_found( - client_id.clone(), - height, - )), - }, - None => Err(Ics02Error::consensus_state_not_found( - client_id.clone(), - height, - )), - } - .map_err(|e| Error::ics03_connection(Ics03Error::ics02_client(e))) - } - - fn get_next_sequence_send( - &self, - port_channel_id: &(PortId, ChannelId), - ) -> Result { - match self - .ibc_store - .lock() - .unwrap() - .next_sequence_send - .get(port_channel_id) - { - Some(sequence) => Ok(*sequence), - None => Err(Error::missing_next_send_seq(port_channel_id.clone())), - } - } - - fn get_next_sequence_recv( - &self, - _port_channel_id: &(PortId, ChannelId), - ) -> Result { - unimplemented!() - } - - fn get_next_sequence_ack( - &self, - _port_channel_id: &(PortId, ChannelId), - ) -> Result { - unimplemented!() - } - - fn get_packet_commitment( - &self, - _key: &(PortId, ChannelId, Sequence), - ) -> Result { - unimplemented!() - } - - fn get_packet_receipt(&self, _key: &(PortId, ChannelId, Sequence)) -> Result { - unimplemented!() - } - - fn get_packet_acknowledgement( - &self, - _key: &(PortId, ChannelId, Sequence), - ) -> Result { - unimplemented!() - } - - fn hash(&self, value: Vec) -> Vec { - use sha2::Digest; - - sha2::Sha256::digest(value).to_vec() - } - - fn host_height(&self) -> Height { - Height::zero() - } - - fn host_consensus_state(&self, _height: Height) -> Result { - unimplemented!() - } - - fn pending_host_consensus_state(&self) -> Result { - unimplemented!() - } - - fn client_update_time( - &self, - _client_id: &ClientId, - _height: Height, - ) -> Result { - unimplemented!() - } - - fn client_update_height( - &self, - _client_id: &ClientId, - _height: Height, - ) -> Result { - unimplemented!() - } - - fn channel_counter(&self) -> Result { - unimplemented!() - } - - fn max_expected_time_per_block(&self) -> Duration { - unimplemented!() - } -} - -impl Ics20Context for DummyTransferModule { - type AccountId = Signer; } diff --git a/modules/tests/runner/mod.rs b/modules/tests/runner/mod.rs index 061b1ef81f..96a4e0ce11 100644 --- a/modules/tests/runner/mod.rs +++ b/modules/tests/runner/mod.rs @@ -17,6 +17,7 @@ use ibc::core::ics02_client::msgs::update_client::MsgUpdateAnyClient; use ibc::core::ics02_client::msgs::upgrade_client::MsgUpgradeAnyClient; use ibc::core::ics02_client::msgs::ClientMsg; use ibc::core::ics03_connection::connection::{Counterparty, State as ConnectionState}; +use ibc::core::ics03_connection::context::ConnectionReader; use ibc::core::ics03_connection::error as connection_error; use ibc::core::ics03_connection::msgs::conn_open_ack::MsgConnectionOpenAck; use ibc::core::ics03_connection::msgs::conn_open_confirm::MsgConnectionOpenConfirm; @@ -24,7 +25,6 @@ use ibc::core::ics03_connection::msgs::conn_open_init::MsgConnectionOpenInit; use ibc::core::ics03_connection::msgs::conn_open_try::MsgConnectionOpenTry; use ibc::core::ics03_connection::msgs::ConnectionMsg; use ibc::core::ics03_connection::version::Version; -use ibc::core::ics04_channel::context::ChannelReader; use ibc::core::ics23_commitment::commitment::{CommitmentPrefix, CommitmentProofBytes}; use ibc::core::ics24_host::identifier::{ChainId, ClientId, ConnectionId}; use ibc::core::ics26_routing::error as routing_error; @@ -311,7 +311,7 @@ impl IbcTestRunner { // create ICS26 message and deliver it let msg = Ics26Envelope::Ics2Msg(ClientMsg::CreateClient(MsgCreateAnyClient { client_state: Self::client_state(client_state), - consensus_state: Self::consensus_state(consensus_state), + consensus_state: Some(Self::consensus_state(consensus_state)), signer: Self::signer(), })); ctx.deliver(msg) From 2357be21448668d416ac7817102e6b2122379f73 Mon Sep 17 00:00:00 2001 From: David Salami Date: Fri, 13 May 2022 19:27:59 +0100 Subject: [PATCH 13/96] clippy fixes --- modules/src/clients/crypto_ops/crypto.rs | 6 ++- .../clients/ics07_tendermint/client_def.rs | 4 +- modules/src/clients/ics11_beefy/client_def.rs | 21 ++++------ .../src/clients/ics11_beefy/client_state.rs | 9 ++-- modules/src/clients/ics11_beefy/header.rs | 42 +++++++++---------- .../ics02_client/handler/update_client.rs | 7 ++-- .../core/ics02_client/msgs/create_client.rs | 5 +-- .../handler/conn_open_init.rs | 2 +- .../core/ics03_connection/handler/verify.rs | 6 +-- .../ics04_channel/handler/acknowledgement.rs | 2 +- .../handler/chan_close_confirm.rs | 2 +- .../ics04_channel/handler/chan_close_init.rs | 2 +- .../ics04_channel/handler/chan_open_ack.rs | 2 +- .../handler/chan_open_confirm.rs | 2 +- .../ics04_channel/handler/chan_open_init.rs | 2 +- .../ics04_channel/handler/chan_open_try.rs | 2 +- .../core/ics04_channel/handler/recv_packet.rs | 2 +- .../core/ics04_channel/handler/send_packet.rs | 2 +- .../src/core/ics04_channel/handler/timeout.rs | 2 +- .../ics04_channel/handler/timeout_on_close.rs | 2 +- .../src/core/ics04_channel/handler/verify.rs | 20 +++------ modules/src/test_utils.rs | 4 +- 22 files changed, 66 insertions(+), 82 deletions(-) diff --git a/modules/src/clients/crypto_ops/crypto.rs b/modules/src/clients/crypto_ops/crypto.rs index 492bdfb7fc..8e178f72a0 100644 --- a/modules/src/clients/crypto_ops/crypto.rs +++ b/modules/src/clients/crypto_ops/crypto.rs @@ -3,12 +3,14 @@ use crate::prelude::*; use beefy_client::traits::HostFunctions; use sp_core::H256; +/// This trait captures all the functions that the host chain should provide for +/// crypto operations. pub trait CryptoOps: HostFunctions + Clone { /// This function should verify membership in a trie proof using parity's sp-trie package /// with a BlakeTwo256 Hasher fn verify_membership_trie_proof( root: &H256, - proof: &Vec>, + proof: &[Vec], key: &[u8], value: &[u8], ) -> Result<(), Error>; @@ -16,7 +18,7 @@ pub trait CryptoOps: HostFunctions + Clone { /// with a BlakeTwo256 Hasher fn verify_non_membership_trie_proof( root: &H256, - proof: &Vec>, + proof: &[Vec], key: &[u8], ) -> Result<(), Error>; } diff --git a/modules/src/clients/ics07_tendermint/client_def.rs b/modules/src/clients/ics07_tendermint/client_def.rs index d1d5a17999..3acef4eabd 100644 --- a/modules/src/clients/ics07_tendermint/client_def.rs +++ b/modules/src/clients/ics07_tendermint/client_def.rs @@ -126,9 +126,7 @@ impl ClientDef for TendermintClient { let header_consensus_state = ConsensusState::from(header.clone()); Ok(( client_state.with_header(header), - ConsensusUpdateResult::Single(AnyConsensusState::Tendermint( - header_consensus_state.into(), - )), + ConsensusUpdateResult::Single(AnyConsensusState::Tendermint(header_consensus_state)), )) } diff --git a/modules/src/clients/ics11_beefy/client_def.rs b/modules/src/clients/ics11_beefy/client_def.rs index be9d1dab5a..9fc8be7365 100644 --- a/modules/src/clients/ics11_beefy/client_def.rs +++ b/modules/src/clients/ics11_beefy/client_def.rs @@ -1,5 +1,5 @@ use beefy_client::primitives::{ParachainHeader, ParachainsUpdateProof}; -use beefy_client::traits::{ClientState as LightClientState, HostFunctions as BeefyHostFunctions}; +use beefy_client::traits::ClientState as LightClientState; use beefy_client::BeefyLightClient; use codec::Encode; use core::convert::TryInto; @@ -40,9 +40,6 @@ use crate::core::ics24_host::path::{ }; use crate::downcast; -/// Methods definitions specific to Beefy Light Client operation -pub trait BeefyTraits: BeefyHostFunctions + Clone + Default {} - #[derive(Clone, Debug, PartialEq, Eq)] pub struct BeefyClient(PhantomData); @@ -133,11 +130,11 @@ impl ClientDef for BeefyClient { let mut parachain_cs_states = vec![]; let client_state = client_state .from_header(header.clone()) - .map_err(|e| Error::beefy(e))?; + .map_err(Error::beefy)?; for header in header.parachain_headers { let height = Height::new(header.para_id as u64, header.parachain_header.number as u64); // Skip duplicate consensus states - if let Ok(_) = ctx.consensus_state(&client_id, height) { + if ctx.consensus_state(&client_id, height).is_ok() { continue; } parachain_cs_states.push(( @@ -232,7 +229,7 @@ impl ClientDef for BeefyClient { channel_id: &ChannelId, expected_channel_end: &ChannelEnd, ) -> Result<(), Error> { - let path = ChannelEndsPath(port_id.clone(), channel_id.clone()); + let path = ChannelEndsPath(port_id.clone(), *channel_id); let value = expected_channel_end.encode_vec().unwrap(); verify_membership::(prefix, proof, root, path, value) } @@ -271,7 +268,7 @@ impl ClientDef for BeefyClient { let commitment_path = CommitmentsPath { port_id: port_id.clone(), - channel_id: channel_id.clone(), + channel_id: *channel_id, sequence, }; @@ -302,7 +299,7 @@ impl ClientDef for BeefyClient { let ack_path = AcksPath { port_id: port_id.clone(), - channel_id: channel_id.clone(), + channel_id: *channel_id, sequence, }; verify_membership::( @@ -331,7 +328,7 @@ impl ClientDef for BeefyClient { let seq_bytes = codec::Encode::encode(&u64::from(sequence)); - let seq_path = SeqRecvsPath(port_id.clone(), channel_id.clone()); + let seq_path = SeqRecvsPath(port_id.clone(), *channel_id); verify_membership::( connection_end.counterparty().prefix(), proof, @@ -358,7 +355,7 @@ impl ClientDef for BeefyClient { let receipt_path = ReceiptsPath { port_id: port_id.clone(), - channel_id: channel_id.clone(), + channel_id: *channel_id, sequence, }; verify_non_membership::( @@ -461,5 +458,5 @@ pub fn downcast_consensus_state(cs: AnyConsensusState) -> Result AnyConsensusState::Beefy ) - .ok_or(Error::client_args_type_mismatch(ClientType::Beefy)) + .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Beefy)) } diff --git a/modules/src/clients/ics11_beefy/client_state.rs b/modules/src/clients/ics11_beefy/client_state.rs index 6d708a41ae..227cea1ed7 100644 --- a/modules/src/clients/ics11_beefy/client_state.rs +++ b/modules/src/clients/ics11_beefy/client_state.rs @@ -116,7 +116,7 @@ impl ClientState { .commitment .payload .get_raw(&MMR_ROOT_ID) - .ok_or(Error::invalid_raw_header())?, + .ok_or_else(Error::invalid_raw_header)?, ), mmr_update.signed_commitment.commitment.block_number, mmr_update.latest_mmr_leaf.beefy_next_authority_set, @@ -252,7 +252,7 @@ impl TryFrom for ClientState { root: H256::decode(&mut &*set.authority_root).ok()?, }) }) - .ok_or(Error::missing_beefy_authority_set())?; + .ok_or_else(Error::missing_beefy_authority_set)?; let next_authority_set = raw .next_authority_set @@ -263,10 +263,9 @@ impl TryFrom for ClientState { root: H256::decode(&mut &*set.authority_root).ok()?, }) }) - .ok_or(Error::missing_beefy_authority_set())?; + .ok_or_else(Error::missing_beefy_authority_set)?; - let mmr_root_hash = - H256::decode(&mut &*raw.mmr_root_hash).map_err(|e| Error::scale_decode(e))?; + let mmr_root_hash = H256::decode(&mut &*raw.mmr_root_hash).map_err(Error::scale_decode)?; Ok(Self { chain_id: ChainId::default(), diff --git a/modules/src/clients/ics11_beefy/header.rs b/modules/src/clients/ics11_beefy/header.rs index c58ced57e4..a37b5c35ea 100644 --- a/modules/src/clients/ics11_beefy/header.rs +++ b/modules/src/clients/ics11_beefy/header.rs @@ -85,7 +85,7 @@ impl TryFrom for BeefyHeader { .map(|raw_para_header| { let mmr_partial_leaf = raw_para_header .mmr_leaf_partial - .ok_or(Error::invalid_raw_header())?; + .ok_or_else(Error::invalid_raw_header)?; let parent_hash = H256::decode(&mut mmr_partial_leaf.parent_hash.as_slice()).unwrap(); let beefy_next_authority_set = @@ -136,14 +136,14 @@ impl TryFrom for BeefyHeader { mmr_update .signed_commitment .as_ref() - .ok_or(Error::invalid_mmr_update("".to_string()))? + .ok_or_else(|| Error::invalid_mmr_update("".to_string()))? .commitment .as_ref() - .ok_or(Error::invalid_mmr_update("".to_string()))? + .ok_or_else(|| Error::invalid_mmr_update("".to_string()))? .payload .iter() .filter_map(|item| { - if item.payload_id.as_slice() != &MMR_ROOT_ID { + if item.payload_id.as_slice() != MMR_ROOT_ID { return None; } let mut payload_id = [0u8; 2]; @@ -152,28 +152,28 @@ impl TryFrom for BeefyHeader { }) .collect::>() .get(0) - .ok_or(Error::invalid_mmr_update("".to_string()))? + .ok_or_else(|| Error::invalid_mmr_update("".to_string()))? .clone() }; let block_number = mmr_update .signed_commitment .as_ref() - .ok_or(Error::invalid_mmr_update("".to_string()))? + .ok_or_else(|| Error::invalid_mmr_update("".to_string()))? .commitment .as_ref() - .ok_or(Error::invalid_mmr_update("".to_string()))? + .ok_or_else(|| Error::invalid_mmr_update("".to_string()))? .block_numer; let validator_set_id = mmr_update .signed_commitment .as_ref() - .ok_or(Error::invalid_mmr_update("".to_string()))? + .ok_or_else(|| Error::invalid_mmr_update("".to_string()))? .commitment .as_ref() - .ok_or(Error::invalid_mmr_update("".to_string()))? + .ok_or_else(|| Error::invalid_mmr_update("".to_string()))? .validator_set_id; let signatures = mmr_update .signed_commitment - .ok_or(Error::invalid_mmr_update("".to_string()))? + .ok_or_else(|| Error::invalid_mmr_update("".to_string()))? .signatures .into_iter() .map(|commitment_sig| { @@ -205,7 +205,7 @@ impl TryFrom for BeefyHeader { mmr_update .mmr_leaf .as_ref() - .ok_or(Error::invalid_mmr_update("".to_string()))? + .ok_or_else(|| Error::invalid_mmr_update("".to_string()))? .version .saturated_into::(), ); @@ -215,13 +215,13 @@ impl TryFrom for BeefyHeader { let parent_number = mmr_update .mmr_leaf .as_ref() - .ok_or(Error::invalid_mmr_update("".to_string()))? + .ok_or_else(|| Error::invalid_mmr_update("".to_string()))? .parent_number; let parent_hash = H256::decode( &mut mmr_update .mmr_leaf .as_ref() - .ok_or(Error::invalid_mmr_update("".to_string()))? + .ok_or_else(|| Error::invalid_mmr_update("".to_string()))? .parent_hash .as_slice(), ) @@ -232,27 +232,27 @@ impl TryFrom for BeefyHeader { id: mmr_update .mmr_leaf .as_ref() - .ok_or(Error::invalid_mmr_update("".to_string()))? + .ok_or_else(|| Error::invalid_mmr_update("".to_string()))? .beefy_next_authority_set .as_ref() - .ok_or(Error::invalid_mmr_update("".to_string()))? + .ok_or_else(|| Error::invalid_mmr_update("".to_string()))? .id, len: mmr_update .mmr_leaf .as_ref() - .ok_or(Error::invalid_mmr_update("".to_string()))? + .ok_or_else(|| Error::invalid_mmr_update("".to_string()))? .beefy_next_authority_set .as_ref() - .ok_or(Error::invalid_mmr_update("".to_string()))? + .ok_or_else(|| Error::invalid_mmr_update("".to_string()))? .len, root: H256::decode( &mut mmr_update .mmr_leaf .as_ref() - .ok_or(Error::invalid_mmr_update("".to_string()))? + .ok_or_else(|| Error::invalid_mmr_update("".to_string()))? .beefy_next_authority_set .as_ref() - .ok_or(Error::invalid_mmr_update("".to_string()))? + .ok_or_else(|| Error::invalid_mmr_update("".to_string()))? .authority_root .as_slice(), ) @@ -262,7 +262,7 @@ impl TryFrom for BeefyHeader { &mut mmr_update .mmr_leaf .as_ref() - .ok_or(Error::invalid_mmr_update("".to_string()))? + .ok_or_else(|| Error::invalid_mmr_update("".to_string()))? .parachain_heads .as_slice(), ) @@ -446,7 +446,7 @@ pub fn decode_timestamp_extrinsic(header: &ParachainHeader) -> Result( }; if found_misbehaviour { - let client_state = - client_def.update_state_on_misbehaviour(client_state.clone(), header.clone())?; + let client_state = client_def.update_state_on_misbehaviour(client_state, header)?; let result = ClientResult::Update(Result { - client_id: client_id.clone(), + client_id, client_state, consensus_state: None, processed_time: ctx.host_timestamp(), @@ -112,7 +111,7 @@ pub fn process( .map_err(|e| Error::header_verification_failure(e.to_string()))?; let result = ClientResult::Update(Result { - client_id: client_id.clone(), + client_id, client_state: new_client_state, consensus_state: Some(new_consensus_state), processed_time: ctx.host_timestamp(), diff --git a/modules/src/core/ics02_client/msgs/create_client.rs b/modules/src/core/ics02_client/msgs/create_client.rs index f79bdc0541..effd6bec72 100644 --- a/modules/src/core/ics02_client/msgs/create_client.rs +++ b/modules/src/core/ics02_client/msgs/create_client.rs @@ -73,8 +73,7 @@ impl TryFrom for MsgCreateAnyClient { let consensus_state = raw .consensus_state - .map(|cs| AnyConsensusState::try_from(cs).ok()) - .flatten(); + .and_then(|cs| AnyConsensusState::try_from(cs).ok()); MsgCreateAnyClient::new( AnyClientState::try_from(raw_client_state)?, @@ -88,7 +87,7 @@ impl From for RawMsgCreateClient { fn from(ics_msg: MsgCreateAnyClient) -> Self { RawMsgCreateClient { client_state: Some(ics_msg.client_state.into()), - consensus_state: ics_msg.consensus_state.and_then(|cs| Some(cs.into())), + consensus_state: ics_msg.consensus_state.map(|cs| cs.into()), signer: ics_msg.signer.to_string(), } } diff --git a/modules/src/core/ics03_connection/handler/conn_open_init.rs b/modules/src/core/ics03_connection/handler/conn_open_init.rs index 126b2f0c09..89b8efbb68 100644 --- a/modules/src/core/ics03_connection/handler/conn_open_init.rs +++ b/modules/src/core/ics03_connection/handler/conn_open_init.rs @@ -19,7 +19,7 @@ pub(crate) fn process( // An IBC client running on the local (host) chain should exist. ctx.client_state(&msg.client_id) - .map_err(|e| Error::ics02_client(e))?; + .map_err(Error::ics02_client)?; let versions = match msg.version { Some(version) => { diff --git a/modules/src/core/ics03_connection/handler/verify.rs b/modules/src/core/ics03_connection/handler/verify.rs index 9e61191c28..072688476f 100644 --- a/modules/src/core/ics03_connection/handler/verify.rs +++ b/modules/src/core/ics03_connection/handler/verify.rs @@ -70,7 +70,7 @@ pub fn verify_connection_proof( // Fetch the client state (IBC client on the local/host chain). let client_state = ctx .client_state(connection_end.client_id()) - .map_err(|e| Error::ics02_client(e))?; + .map_err(Error::ics02_client)?; // The client must not be frozen. if client_state.is_frozen() { @@ -125,7 +125,7 @@ pub fn verify_client_proof( // Fetch the local client state (IBC client running on the host chain). let client_state = ctx .client_state(connection_end.client_id()) - .map_err(|e| Error::ics02_client(e))?; + .map_err(Error::ics02_client)?; if client_state.is_frozen() { return Err(Error::frozen_client(connection_end.client_id().clone())); @@ -162,7 +162,7 @@ pub fn verify_consensus_proof( // Fetch the client state (IBC client on the local chain). let client_state = ctx .client_state(connection_end.client_id()) - .map_err(|e| Error::ics02_client(e))?; + .map_err(Error::ics02_client)?; if client_state.is_frozen() { return Err(Error::frozen_client(connection_end.client_id().clone())); diff --git a/modules/src/core/ics04_channel/handler/acknowledgement.rs b/modules/src/core/ics04_channel/handler/acknowledgement.rs index 0f4b88bc61..58f0ead9a7 100644 --- a/modules/src/core/ics04_channel/handler/acknowledgement.rs +++ b/modules/src/core/ics04_channel/handler/acknowledgement.rs @@ -50,7 +50,7 @@ pub fn process( let connection_end = ctx .connection_end(&source_channel_end.connection_hops()[0]) - .map_err(|e| Error::ics03_connection(e))?; + .map_err(Error::ics03_connection)?; if !connection_end.state_matches(&ConnectionState::Open) { return Err(Error::connection_not_open( diff --git a/modules/src/core/ics04_channel/handler/chan_close_confirm.rs b/modules/src/core/ics04_channel/handler/chan_close_confirm.rs index 073047d990..8aa92bb568 100644 --- a/modules/src/core/ics04_channel/handler/chan_close_confirm.rs +++ b/modules/src/core/ics04_channel/handler/chan_close_confirm.rs @@ -37,7 +37,7 @@ pub(crate) fn process( let conn = ctx .connection_end(&channel_end.connection_hops()[0]) - .map_err(|e| Error::ics03_connection(e))?; + .map_err(Error::ics03_connection)?; if !conn.state_matches(&ConnectionState::Open) { return Err(Error::connection_not_open( diff --git a/modules/src/core/ics04_channel/handler/chan_close_init.rs b/modules/src/core/ics04_channel/handler/chan_close_init.rs index 45d5b3aac5..634d661c51 100644 --- a/modules/src/core/ics04_channel/handler/chan_close_init.rs +++ b/modules/src/core/ics04_channel/handler/chan_close_init.rs @@ -36,7 +36,7 @@ pub(crate) fn process( let conn = ctx .connection_end(&channel_end.connection_hops()[0]) - .map_err(|e| Error::ics03_connection(e))?; + .map_err(Error::ics03_connection)?; if !conn.state_matches(&ConnectionState::Open) { return Err(Error::connection_not_open( diff --git a/modules/src/core/ics04_channel/handler/chan_open_ack.rs b/modules/src/core/ics04_channel/handler/chan_open_ack.rs index addc4f1f14..20109cafe0 100644 --- a/modules/src/core/ics04_channel/handler/chan_open_ack.rs +++ b/modules/src/core/ics04_channel/handler/chan_open_ack.rs @@ -40,7 +40,7 @@ pub(crate) fn process( let conn = ctx .connection_end(&channel_end.connection_hops()[0]) - .map_err(|e| Error::ics03_connection(e))?; + .map_err(Error::ics03_connection)?; if !conn.state_matches(&ConnectionState::Open) { return Err(Error::connection_not_open( diff --git a/modules/src/core/ics04_channel/handler/chan_open_confirm.rs b/modules/src/core/ics04_channel/handler/chan_open_confirm.rs index c2c9643050..fc955886ae 100644 --- a/modules/src/core/ics04_channel/handler/chan_open_confirm.rs +++ b/modules/src/core/ics04_channel/handler/chan_open_confirm.rs @@ -39,7 +39,7 @@ pub(crate) fn process( let conn = ctx .connection_end(&channel_end.connection_hops()[0]) - .map_err(|e| Error::ics03_connection(e))?; + .map_err(Error::ics03_connection)?; if !conn.state_matches(&ConnectionState::Open) { return Err(Error::connection_not_open( diff --git a/modules/src/core/ics04_channel/handler/chan_open_init.rs b/modules/src/core/ics04_channel/handler/chan_open_init.rs index f3a84a0b08..4a1e9ecc7a 100644 --- a/modules/src/core/ics04_channel/handler/chan_open_init.rs +++ b/modules/src/core/ics04_channel/handler/chan_open_init.rs @@ -27,7 +27,7 @@ pub(crate) fn process( // An IBC connection running on the local (host) chain should exist. let conn = ctx .connection_end(&msg.channel.connection_hops()[0]) - .map_err(|e| Error::ics03_connection(e))?; + .map_err(Error::ics03_connection)?; let get_versions = conn.versions(); let version = match get_versions { [version] => version, diff --git a/modules/src/core/ics04_channel/handler/chan_open_try.rs b/modules/src/core/ics04_channel/handler/chan_open_try.rs index 814c818acc..2ce309dc7f 100644 --- a/modules/src/core/ics04_channel/handler/chan_open_try.rs +++ b/modules/src/core/ics04_channel/handler/chan_open_try.rs @@ -72,7 +72,7 @@ pub(crate) fn process( let conn = ctx .connection_end(&msg.channel.connection_hops()[0]) - .map_err(|e| Error::ics03_connection(e))?; + .map_err(Error::ics03_connection)?; if !conn.state_matches(&ConnectionState::Open) { return Err(Error::connection_not_open( msg.channel.connection_hops()[0].clone(), diff --git a/modules/src/core/ics04_channel/handler/recv_packet.rs b/modules/src/core/ics04_channel/handler/recv_packet.rs index ab1ab95db3..b316807e4e 100644 --- a/modules/src/core/ics04_channel/handler/recv_packet.rs +++ b/modules/src/core/ics04_channel/handler/recv_packet.rs @@ -57,7 +57,7 @@ pub fn process( let connection_end = ctx .connection_end(&dest_channel_end.connection_hops()[0]) - .map_err(|e| Error::ics03_connection(e))?; + .map_err(Error::ics03_connection)?; if !connection_end.state_matches(&ConnectionState::Open) { return Err(Error::connection_not_open( diff --git a/modules/src/core/ics04_channel/handler/send_packet.rs b/modules/src/core/ics04_channel/handler/send_packet.rs index 2c6a1f2451..6023f79ee0 100644 --- a/modules/src/core/ics04_channel/handler/send_packet.rs +++ b/modules/src/core/ics04_channel/handler/send_packet.rs @@ -48,7 +48,7 @@ pub fn send_packet( let connection_end = ctx .connection_end(&source_channel_end.connection_hops()[0]) - .map_err(|e| Error::ics03_connection(e))?; + .map_err(Error::ics03_connection)?; let client_id = connection_end.client_id().clone(); diff --git a/modules/src/core/ics04_channel/handler/timeout.rs b/modules/src/core/ics04_channel/handler/timeout.rs index ce8ac55f14..0c45e2709e 100644 --- a/modules/src/core/ics04_channel/handler/timeout.rs +++ b/modules/src/core/ics04_channel/handler/timeout.rs @@ -52,7 +52,7 @@ pub fn process( let connection_end = ctx .connection_end(&source_channel_end.connection_hops()[0]) - .map_err(|e| Error::ics03_connection(e))?; + .map_err(Error::ics03_connection)?; let client_id = connection_end.client_id().clone(); diff --git a/modules/src/core/ics04_channel/handler/timeout_on_close.rs b/modules/src/core/ics04_channel/handler/timeout_on_close.rs index 5bcc4c6282..0e944e7d19 100644 --- a/modules/src/core/ics04_channel/handler/timeout_on_close.rs +++ b/modules/src/core/ics04_channel/handler/timeout_on_close.rs @@ -39,7 +39,7 @@ pub fn process( let connection_end = ctx .connection_end(&source_channel_end.connection_hops()[0]) - .map_err(|e| Error::ics03_connection(e))?; + .map_err(Error::ics03_connection)?; //verify the packet was sent, check the store let packet_commitment = ctx.get_packet_commitment(&( diff --git a/modules/src/core/ics04_channel/handler/verify.rs b/modules/src/core/ics04_channel/handler/verify.rs index ca53d1a3e2..b5e9d29106 100644 --- a/modules/src/core/ics04_channel/handler/verify.rs +++ b/modules/src/core/ics04_channel/handler/verify.rs @@ -24,9 +24,7 @@ pub fn verify_channel_proofs( // This is the client which will perform proof verification. let client_id = connection_end.client_id().clone(); - let client_state = ctx - .client_state(&client_id) - .map_err(|e| Error::ics02_client(e))?; + let client_state = ctx.client_state(&client_id).map_err(Error::ics02_client)?; // The client must not be frozen. if client_state.is_frozen() { @@ -66,9 +64,7 @@ pub fn verify_packet_recv_proofs( proofs: &Proofs, ) -> Result<(), Error> { let client_id = connection_end.client_id(); - let client_state = ctx - .client_state(client_id) - .map_err(|e| Error::ics02_client(e))?; + let client_state = ctx.client_state(client_id).map_err(Error::ics02_client)?; // The client must not be frozen. if client_state.is_frozen() { @@ -117,9 +113,7 @@ pub fn verify_packet_acknowledgement_proofs( proofs: &Proofs, ) -> Result<(), Error> { let client_id = connection_end.client_id(); - let client_state = ctx - .client_state(client_id) - .map_err(|e| Error::ics02_client(e))?; + let client_state = ctx.client_state(client_id).map_err(Error::ics02_client)?; // The client must not be frozen. if client_state.is_frozen() { @@ -164,9 +158,7 @@ pub fn verify_next_sequence_recv( proofs: &Proofs, ) -> Result<(), Error> { let client_id = connection_end.client_id(); - let client_state = ctx - .client_state(client_id) - .map_err(|e| Error::ics02_client(e))?; + let client_state = ctx.client_state(client_id).map_err(Error::ics02_client)?; // The client must not be frozen. if client_state.is_frozen() { @@ -206,9 +198,7 @@ pub fn verify_packet_receipt_absence( proofs: &Proofs, ) -> Result<(), Error> { let client_id = connection_end.client_id(); - let client_state = ctx - .client_state(client_id) - .map_err(|e| Error::ics02_client(e))?; + let client_state = ctx.client_state(client_id).map_err(Error::ics02_client)?; // The client must not be frozen. if client_state.is_frozen() { diff --git a/modules/src/test_utils.rs b/modules/src/test_utils.rs index c94cd61078..219249018f 100644 --- a/modules/src/test_utils.rs +++ b/modules/src/test_utils.rs @@ -106,7 +106,7 @@ impl HostFunctions for Crypto { impl CryptoOps for Crypto { fn verify_membership_trie_proof( root: &sp_core::H256, - proof: &Vec>, + proof: &[Vec], key: &[u8], value: &[u8], ) -> Result<(), Ics02Error> { @@ -119,7 +119,7 @@ impl CryptoOps for Crypto { fn verify_non_membership_trie_proof( root: &sp_core::H256, - proof: &Vec>, + proof: &[Vec], key: &[u8], ) -> Result<(), Ics02Error> { let item: Vec<(&[u8], Option<&[u8]>)> = vec![(key, None)]; From b001191725eed090682271e496edf769c6af1f74 Mon Sep 17 00:00:00 2001 From: David Salami Date: Mon, 16 May 2022 15:56:35 +0100 Subject: [PATCH 14/96] fix failing tests --- Cargo.lock | 26 ++++--------------- Cargo.toml | 12 ++++----- .../clients/ics07_tendermint/client_def.rs | 18 +++++++++++++ modules/src/clients/ics11_beefy/client_def.rs | 6 ++++- .../ics04_channel/handler/chan_open_try.rs | 12 +++------ 5 files changed, 38 insertions(+), 36 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c79c1573a4..47f600fe8c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -141,7 +141,7 @@ dependencies = [ "log", "pin-project-lite", "tokio", - "tokio-rustls 0.22.0", + "tokio-rustls", "tungstenite", "webpki-roots 0.21.1", ] @@ -1560,9 +1560,9 @@ dependencies = [ "http", "hyper", "hyper-rustls", - "rustls-native-certs 0.5.0", + "rustls-native-certs", "tokio", - "tokio-rustls 0.22.0", + "tokio-rustls", "tower-service", "webpki 0.21.4", ] @@ -1578,9 +1578,9 @@ dependencies = [ "hyper", "log", "rustls 0.19.1", - "rustls-native-certs 0.5.0", + "rustls-native-certs", "tokio", - "tokio-rustls 0.22.0", + "tokio-rustls", "webpki 0.21.4", "webpki-roots 0.21.1", ] @@ -1669,12 +1669,6 @@ dependencies = [ "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", ] -[[package]] -name = "ident_case" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" - [[package]] name = "idna" version = "0.2.3" @@ -4802,7 +4796,6 @@ dependencies = [ "once_cell", "parking_lot", "pin-project-lite", - "signal-hook-registry", "socket2", "tokio-macros", "winapi", @@ -5160,15 +5153,6 @@ dependencies = [ "static_assertions", ] -[[package]] -name = "unicase" -version = "2.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" -dependencies = [ - "version_check", -] - [[package]] name = "unicode-bidi" version = "0.3.8" diff --git a/Cargo.toml b/Cargo.toml index eaf4e09dfc..ec9bd9cbf0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -4,13 +4,13 @@ resolver = "2" members = [ "modules", - "relayer", - "relayer-cli", - "relayer-rest", - "telemetry", + # "relayer", + # "relayer-cli", + # "relayer-rest", + # "telemetry", "proto", - "tools/integration-test", - "tools/test-framework", + # "tools/integration-test", + # "tools/test-framework", ] exclude = [ diff --git a/modules/src/clients/ics07_tendermint/client_def.rs b/modules/src/clients/ics07_tendermint/client_def.rs index 3acef4eabd..9d7997f354 100644 --- a/modules/src/clients/ics07_tendermint/client_def.rs +++ b/modules/src/clients/ics07_tendermint/client_def.rs @@ -61,6 +61,24 @@ impl ClientDef for TendermintClient { )); } + // Check if a consensus state is already installed; if so skip + let header_consensus_state = ConsensusState::from(header.clone()); + + let _ = match ctx.maybe_consensus_state(&client_id, header.height())? { + Some(cs) => { + let cs = downcast_consensus_state(cs)?; + // If this consensus state matches, skip verification + // (optimization) + if cs == header_consensus_state { + // Header is already installed and matches the incoming + // header (already verified) + return Ok(()); + } + Some(cs) + } + None => None, + }; + let trusted_consensus_state = downcast_consensus_state(ctx.consensus_state(&client_id, header.trusted_height)?)?; diff --git a/modules/src/clients/ics11_beefy/client_def.rs b/modules/src/clients/ics11_beefy/client_def.rs index 9fc8be7365..dd406ab2b2 100644 --- a/modules/src/clients/ics11_beefy/client_def.rs +++ b/modules/src/clients/ics11_beefy/client_def.rs @@ -70,6 +70,7 @@ impl ClientDef for BeefyClient { }; let mut light_client = BeefyLightClient::::new(); // If mmr update exists verify it and return the new light client state + // or else return existing light client state let light_client_state = if let Some(mmr_update) = header.mmr_update_proof { light_client .verify_mmr_root_with_proof(light_client_state, mmr_update) @@ -78,6 +79,7 @@ impl ClientDef for BeefyClient { light_client_state }; + // Extract parachain headers from the beefy header if they exist let mut leaf_indices = vec![]; let parachain_headers = header .parachain_headers @@ -115,6 +117,7 @@ impl ClientDef for BeefyClient { }, }; + // Perform the parachain header verification light_client .verify_parachain_headers(light_client_state, parachain_update_proof) .map_err(|e| Error::beefy(BeefyError::invalid_mmr_update(format!("{:?}", e)))) @@ -128,6 +131,7 @@ impl ClientDef for BeefyClient { header: Self::Header, ) -> Result<(Self::ClientState, ConsensusUpdateResult), Error> { let mut parachain_cs_states = vec![]; + // Extract the new client state from the verified header let client_state = client_state .from_header(header.clone()) .map_err(Error::beefy)?; @@ -174,7 +178,7 @@ impl ClientDef for BeefyClient { _client_state: Self::ClientState, _header: Self::Header, ) -> Result { - todo!() + Ok(false) } fn verify_client_consensus_state( diff --git a/modules/src/core/ics04_channel/handler/chan_open_try.rs b/modules/src/core/ics04_channel/handler/chan_open_try.rs index 2ce309dc7f..6e38c4d100 100644 --- a/modules/src/core/ics04_channel/handler/chan_open_try.rs +++ b/modules/src/core/ics04_channel/handler/chan_open_try.rs @@ -344,16 +344,12 @@ mod tests { msg: ChannelMsg::ChannelOpenTry(msg.clone()), want_pass: false, match_error: Box::new(|e| match e { - error::ErrorDetail::Ics03Connection(e) => { + error::ErrorDetail::Ics02Client(e) => { assert_eq!( e.source, - ics03_error::ErrorDetail::Ics02Client( - ics03_error::Ics02ClientSubdetail { - source: ics02_error::ErrorDetail::ClientNotFound( - ics02_error::ClientNotFoundSubdetail { - client_id: ClientId::new(ClientType::Mock, 45).unwrap() - } - ) + ics02_error::ErrorDetail::ClientNotFound( + ics02_error::ClientNotFoundSubdetail { + client_id: ClientId::new(ClientType::Mock, 45).unwrap() } ) ); From 03d72a1bd41fd5d94283be0254cfb8eb6553ddbd Mon Sep 17 00:00:00 2001 From: David Salami Date: Tue, 17 May 2022 10:29:35 +0100 Subject: [PATCH 15/96] add failing beefy client update test --- Cargo.lock | 41 +- modules/Cargo.toml | 11 +- modules/src/clients/ics11_beefy/client_def.rs | 4 +- .../src/clients/ics11_beefy/client_state.rs | 17 +- .../clients/ics11_beefy/consensus_state.rs | 13 +- modules/src/clients/ics11_beefy/mod.rs | 3 + .../clients/ics11_beefy/polkadot_runtime.rs | 32110 ++++++++++++++++ .../ics02_client/handler/update_client.rs | 424 +- modules/src/lib.rs | 4 +- 9 files changed, 32594 insertions(+), 33 deletions(-) create mode 100644 modules/src/clients/ics11_beefy/polkadot_runtime.rs diff --git a/Cargo.lock b/Cargo.lock index 47f600fe8c..fb9c11724a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -141,7 +141,7 @@ dependencies = [ "log", "pin-project-lite", "tokio", - "tokio-rustls", + "tokio-rustls 0.22.0", "tungstenite", "webpki-roots 0.21.1", ] @@ -423,6 +423,15 @@ dependencies = [ "memchr", ] +[[package]] +name = "bstr" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba3569f383e8f1598449f1a423e72e99569137b47740b1da11ef19af3d5c3223" +dependencies = [ + "memchr", +] + [[package]] name = "bumpalo" version = "3.9.1" @@ -1560,9 +1569,9 @@ dependencies = [ "http", "hyper", "hyper-rustls", - "rustls-native-certs", + "rustls-native-certs 0.5.0", "tokio", - "tokio-rustls", + "tokio-rustls 0.22.0", "tower-service", "webpki 0.21.4", ] @@ -1578,9 +1587,9 @@ dependencies = [ "hyper", "log", "rustls 0.19.1", - "rustls-native-certs", + "rustls-native-certs 0.5.0", "tokio", - "tokio-rustls", + "tokio-rustls 0.22.0", "webpki 0.21.4", "webpki-roots 0.21.1", ] @@ -1608,10 +1617,14 @@ dependencies = [ "derive_more", "env_logger", "flex-error", + "frame-support", + "hex-literal", "ibc-proto", "ics23", "modelator", "num-traits", + "pallet-beefy-mmr", + "pallet-mmr-rpc", "parity-scale-codec", "prost", "prost-types", @@ -1669,6 +1682,12 @@ dependencies = [ "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", ] +[[package]] +name = "ident_case" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" + [[package]] name = "idna" version = "0.2.3" @@ -3653,8 +3672,6 @@ name = "sp-arithmetic" version = "5.0.0" source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" dependencies = [ - "integer-sqrt", - "num-traits", "parity-scale-codec", "scale-info", "serde", @@ -4796,6 +4813,7 @@ dependencies = [ "once_cell", "parking_lot", "pin-project-lite", + "signal-hook-registry", "socket2", "tokio-macros", "winapi", @@ -5153,6 +5171,15 @@ dependencies = [ "static_assertions", ] +[[package]] +name = "unicase" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" +dependencies = [ + "version_check", +] + [[package]] name = "unicode-bidi" version = "0.3.8" diff --git a/modules/Cargo.toml b/modules/Cargo.toml index f2d1b9de0e..50b176207d 100644 --- a/modules/Cargo.toml +++ b/modules/Cargo.toml @@ -59,7 +59,7 @@ uint = { version = "0.9", default-features = false } beefy-client = { package = "beefy-generic-client", git = "https://github.com/ComposableFi/beefy-client", branch = "david/refactor-traits", default-features = false } sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22", default-features = false } sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22", default-features = false } -pallet-mmr-primitives = { package = "sp-mmr-primitives", git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22" , default-features = false } +pallet-mmr-primitives = { package = "sp-mmr-primitives", git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22", default-features = false } beefy-primitives = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22", default-features = false } codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } sp-trie = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22", default-features = false } @@ -90,7 +90,16 @@ modelator = "0.4.2" sha2 = { version = "0.10.2" } tendermint-rpc = { version = "=0.23.7", features = ["http-client", "websocket-client"] } tendermint-testgen = { version = "=0.23.7" } # Needed for generating (synthetic) light blocks. +# Beefy Light Client testing dependencies sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22"} +subxt = "0.21.0" +tokio = { version = "1.17.0", features = ["full"] } +hex-literal = "0.3.4" +serde_json = "1.0.74" +pallet-mmr-rpc = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22" } +frame-support = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22" } +rs_merkle = { version = "1.2.0" } +beefy-mmr = { package = "pallet-beefy-mmr", git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22" } [[test]] name = "mbt" diff --git a/modules/src/clients/ics11_beefy/client_def.rs b/modules/src/clients/ics11_beefy/client_def.rs index dd406ab2b2..e4679eb05d 100644 --- a/modules/src/clients/ics11_beefy/client_def.rs +++ b/modules/src/clients/ics11_beefy/client_def.rs @@ -87,7 +87,7 @@ impl ClientDef for BeefyClient { .into_iter() .map(|header| { let leaf_index = - client_state.to_leaf_index(header.partial_mmr_leaf.parent_number_and_hash.0); + client_state.to_leaf_index(header.partial_mmr_leaf.parent_number_and_hash.0 + 1); leaf_indices.push(leaf_index as u64); ParachainHeader { parachain_header: header.parachain_header.encode(), @@ -143,7 +143,7 @@ impl ClientDef for BeefyClient { } parachain_cs_states.push(( height, - AnyConsensusState::Beefy(ConsensusState::from(header)), + AnyConsensusState::Beefy(ConsensusState::try_from(header)?), )) } diff --git a/modules/src/clients/ics11_beefy/client_state.rs b/modules/src/clients/ics11_beefy/client_state.rs index 227cea1ed7..7a42ea8f40 100644 --- a/modules/src/clients/ics11_beefy/client_state.rs +++ b/modules/src/clients/ics11_beefy/client_state.rs @@ -93,9 +93,9 @@ impl ClientState { pub fn to_leaf_index(&self, block_number: u32) -> u32 { if self.beefy_activation_block == 0 { - return block_number - 1; + return block_number.saturating_sub(1); } - self.beefy_activation_block - (block_number + 1) + self.beefy_activation_block.saturating_sub(block_number + 1) } /// Should only be called if this header has been verified successfully @@ -314,7 +314,7 @@ pub mod test_util { ChainId::new("polkadot".to_string(), 1), Default::default(), 0, - 1, + 0, Default::default(), Default::default(), ) @@ -322,14 +322,3 @@ pub mod test_util { ) } } -#[cfg(test)] -mod tests { - #[test] - fn client_state_new() {} - - #[test] - fn client_state_verify_delay_passed() {} - - #[test] - fn client_state_verify_height() {} -} diff --git a/modules/src/clients/ics11_beefy/consensus_state.rs b/modules/src/clients/ics11_beefy/consensus_state.rs index b42d728fed..f6f3c4b3d6 100644 --- a/modules/src/clients/ics11_beefy/consensus_state.rs +++ b/modules/src/clients/ics11_beefy/consensus_state.rs @@ -130,8 +130,9 @@ impl From for RawConsensusState { } } -impl From for ConsensusState { - fn from(header: ParachainHeader) -> Self { +impl TryFrom for ConsensusState { + type Error = Error; + fn try_from(header: ParachainHeader) -> Result { let root = { header .parachain_header @@ -141,7 +142,7 @@ impl From for ConsensusState { .filter_map(|digest| digest.as_consensus()) .find(|(id, _value)| id == &IBC_CONSENSUS_ID) .map(|(.., root)| root.to_vec()) - .unwrap_or_default() + .ok_or(Error::invalid_header("cannot find ibc commitment root".to_string()))? }; let timestamp = decode_timestamp_extrinsic(&header).unwrap_or_default(); @@ -149,12 +150,12 @@ impl From for ConsensusState { let timestamp = Timestamp::from_nanoseconds(duration.as_nanos().saturated_into::()) .unwrap_or_default() .into_tm_time() - .unwrap(); + .ok_or(Error::invalid_header("cannot decode timestamp extrinsic".to_string()))?; - Self { + Ok(Self { root: root.into(), timestamp, - } + }) } } diff --git a/modules/src/clients/ics11_beefy/mod.rs b/modules/src/clients/ics11_beefy/mod.rs index 2b16db780e..430166e55d 100644 --- a/modules/src/clients/ics11_beefy/mod.rs +++ b/modules/src/clients/ics11_beefy/mod.rs @@ -7,3 +7,6 @@ pub mod consensus_state; pub mod error; pub mod header; pub mod misbehaviour; + +#[cfg(test)] +pub mod polkadot_runtime; diff --git a/modules/src/clients/ics11_beefy/polkadot_runtime.rs b/modules/src/clients/ics11_beefy/polkadot_runtime.rs new file mode 100644 index 0000000000..13e74d0809 --- /dev/null +++ b/modules/src/clients/ics11_beefy/polkadot_runtime.rs @@ -0,0 +1,32110 @@ +#[allow(dead_code, unused_imports, non_camel_case_types, unused_qualifications)] +pub mod api { + use super::api as root_mod; + pub static PALLETS: [&str; 49usize] = [ + "System", + "Babe", + "Timestamp", + "Indices", + "Balances", + "TransactionPayment", + "Authorship", + "Offences", + "Historical", + "Session", + "Grandpa", + "ImOnline", + "AuthorityDiscovery", + "ParachainsOrigin", + "Configuration", + "ParasShared", + "ParaInclusion", + "ParaInherent", + "ParaScheduler", + "Paras", + "Initializer", + "Dmp", + "Ump", + "Hrmp", + "ParaSessionInfo", + "ParasDisputes", + "Registrar", + "Auctions", + "Crowdloan", + "Slots", + "ParasSudoWrapper", + "AssignedSlots", + "Sudo", + "Mmr", + "Beefy", + "MmrLeaf", + "ValidatorManager", + "BridgeRococoGrandpa", + "BridgeWococoGrandpa", + "BridgeRococoMessages", + "BridgeWococoMessages", + "BridgeRococoMessagesDispatch", + "BridgeWococoMessagesDispatch", + "Collective", + "Membership", + "Utility", + "Proxy", + "Multisig", + "XcmPallet", + ]; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Event { + #[codec(index = 0)] + System(system::Event), + #[codec(index = 3)] + Indices(indices::Event), + #[codec(index = 4)] + Balances(balances::Event), + #[codec(index = 7)] + Offences(offences::Event), + #[codec(index = 9)] + Session(session::Event), + #[codec(index = 10)] + Grandpa(grandpa::Event), + #[codec(index = 11)] + ImOnline(im_online::Event), + #[codec(index = 16)] + ParaInclusion(para_inclusion::Event), + #[codec(index = 19)] + Paras(paras::Event), + #[codec(index = 22)] + Ump(ump::Event), + #[codec(index = 23)] + Hrmp(hrmp::Event), + #[codec(index = 25)] + ParasDisputes(paras_disputes::Event), + #[codec(index = 26)] + Registrar(registrar::Event), + #[codec(index = 27)] + Auctions(auctions::Event), + #[codec(index = 28)] + Crowdloan(crowdloan::Event), + #[codec(index = 29)] + Slots(slots::Event), + #[codec(index = 31)] + AssignedSlots(assigned_slots::Event), + #[codec(index = 32)] + Sudo(sudo::Event), + #[codec(index = 36)] + ValidatorManager(validator_manager::Event), + #[codec(index = 43)] + BridgeRococoMessages(bridge_rococo_messages::Event), + #[codec(index = 44)] + BridgeWococoMessages(bridge_wococo_messages::Event), + #[codec(index = 45)] + BridgeRococoMessagesDispatch(bridge_rococo_messages_dispatch::Event), + #[codec(index = 46)] + BridgeWococoMessagesDispatch(bridge_wococo_messages_dispatch::Event), + #[codec(index = 80)] + Collective(collective::Event), + #[codec(index = 81)] + Membership(membership::Event), + #[codec(index = 90)] + Utility(utility::Event), + #[codec(index = 91)] + Proxy(proxy::Event), + #[codec(index = 92)] + Multisig(multisig::Event), + #[codec(index = 99)] + XcmPallet(xcm_pallet::Event), + } + pub mod system { + use super::root_mod; + use super::runtime_types; + pub mod calls { + use super::root_mod; + use super::runtime_types; + type DispatchError = runtime_types::sp_runtime::DispatchError; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct FillBlock { + pub ratio: runtime_types::sp_arithmetic::per_things::Perbill, + } + impl ::subxt::Call for FillBlock { + const PALLET: &'static str = "System"; + const FUNCTION: &'static str = "fill_block"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct Remark { + pub remark: ::std::vec::Vec<::core::primitive::u8>, + } + impl ::subxt::Call for Remark { + const PALLET: &'static str = "System"; + const FUNCTION: &'static str = "remark"; + } + #[derive( + :: subxt :: codec :: CompactAs, + :: subxt :: codec :: Decode, + :: subxt :: codec :: Encode, + Debug, + )] + pub struct SetHeapPages { + pub pages: ::core::primitive::u64, + } + impl ::subxt::Call for SetHeapPages { + const PALLET: &'static str = "System"; + const FUNCTION: &'static str = "set_heap_pages"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct SetCode { + pub code: ::std::vec::Vec<::core::primitive::u8>, + } + impl ::subxt::Call for SetCode { + const PALLET: &'static str = "System"; + const FUNCTION: &'static str = "set_code"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct SetCodeWithoutChecks { + pub code: ::std::vec::Vec<::core::primitive::u8>, + } + impl ::subxt::Call for SetCodeWithoutChecks { + const PALLET: &'static str = "System"; + const FUNCTION: &'static str = "set_code_without_checks"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct SetStorage { + pub items: ::std::vec::Vec<( + ::std::vec::Vec<::core::primitive::u8>, + ::std::vec::Vec<::core::primitive::u8>, + )>, + } + impl ::subxt::Call for SetStorage { + const PALLET: &'static str = "System"; + const FUNCTION: &'static str = "set_storage"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct KillStorage { + pub keys: ::std::vec::Vec<::std::vec::Vec<::core::primitive::u8>>, + } + impl ::subxt::Call for KillStorage { + const PALLET: &'static str = "System"; + const FUNCTION: &'static str = "kill_storage"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct KillPrefix { + pub prefix: ::std::vec::Vec<::core::primitive::u8>, + pub subkeys: ::core::primitive::u32, + } + impl ::subxt::Call for KillPrefix { + const PALLET: &'static str = "System"; + const FUNCTION: &'static str = "kill_prefix"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct RemarkWithEvent { + pub remark: ::std::vec::Vec<::core::primitive::u8>, + } + impl ::subxt::Call for RemarkWithEvent { + const PALLET: &'static str = "System"; + const FUNCTION: &'static str = "remark_with_event"; + } + pub struct TransactionApi<'a, T: ::subxt::Config, X> { + client: &'a ::subxt::Client, + marker: ::core::marker::PhantomData, + } + impl<'a, T, X> TransactionApi<'a, T, X> + where + T: ::subxt::Config, + X: ::subxt::extrinsic::ExtrinsicParams, + { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { + client, + marker: ::core::marker::PhantomData, + } + } + #[doc = "A dispatch that will fill the block weight up to the given ratio."] + pub fn fill_block( + &self, + ratio: runtime_types::sp_arithmetic::per_things::Perbill, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + FillBlock, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 19u8, 117u8, 154u8, 157u8, 101u8, 132u8, 50u8, 77u8, 245u8, 5u8, 230u8, + 232u8, 2u8, 69u8, 26u8, 239u8, 159u8, 26u8, 226u8, 121u8, 106u8, 82u8, + 67u8, 53u8, 138u8, 184u8, 39u8, 83u8, 220u8, 251u8, 206u8, 16u8, + ] + { + let call = FillBlock { ratio }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Make some on-chain remark."] + #[doc = ""] + #[doc = "# "] + #[doc = "- `O(1)`"] + #[doc = "# "] + pub fn remark( + &self, + remark: ::std::vec::Vec<::core::primitive::u8>, + ) -> Result< + ::subxt::SubmittableExtrinsic<'a, T, X, Remark, DispatchError, root_mod::Event>, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 186u8, 79u8, 33u8, 199u8, 216u8, 115u8, 19u8, 146u8, 220u8, 174u8, + 98u8, 61u8, 179u8, 230u8, 40u8, 70u8, 22u8, 251u8, 77u8, 62u8, 133u8, + 80u8, 186u8, 70u8, 135u8, 172u8, 178u8, 241u8, 69u8, 106u8, 235u8, + 140u8, + ] + { + let call = Remark { remark }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Set the number of pages in the WebAssembly environment's heap."] + pub fn set_heap_pages( + &self, + pages: ::core::primitive::u64, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SetHeapPages, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 77u8, 138u8, 122u8, 55u8, 179u8, 101u8, 60u8, 137u8, 173u8, 39u8, 28u8, + 36u8, 237u8, 243u8, 232u8, 162u8, 76u8, 176u8, 135u8, 58u8, 60u8, + 177u8, 105u8, 136u8, 94u8, 53u8, 26u8, 31u8, 41u8, 156u8, 228u8, 241u8, + ] + { + let call = SetHeapPages { pages }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Set the new runtime code."] + #[doc = ""] + #[doc = "# "] + #[doc = "- `O(C + S)` where `C` length of `code` and `S` complexity of `can_set_code`"] + #[doc = "- 1 call to `can_set_code`: `O(S)` (calls `sp_io::misc::runtime_version` which is"] + #[doc = " expensive)."] + #[doc = "- 1 storage write (codec `O(C)`)."] + #[doc = "- 1 digest item."] + #[doc = "- 1 event."] + #[doc = "The weight of this function is dependent on the runtime, but generally this is very"] + #[doc = "expensive. We will treat this as a full block."] + #[doc = "# "] + pub fn set_code( + &self, + code: ::std::vec::Vec<::core::primitive::u8>, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SetCode, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 35u8, 75u8, 103u8, 203u8, 91u8, 141u8, 77u8, 95u8, 37u8, 157u8, 107u8, + 240u8, 54u8, 242u8, 245u8, 205u8, 104u8, 165u8, 177u8, 37u8, 86u8, + 197u8, 28u8, 202u8, 121u8, 159u8, 18u8, 204u8, 237u8, 117u8, 141u8, + 131u8, + ] + { + let call = SetCode { code }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Set the new runtime code without doing any checks of the given `code`."] + #[doc = ""] + #[doc = "# "] + #[doc = "- `O(C)` where `C` length of `code`"] + #[doc = "- 1 storage write (codec `O(C)`)."] + #[doc = "- 1 digest item."] + #[doc = "- 1 event."] + #[doc = "The weight of this function is dependent on the runtime. We will treat this as a full"] + #[doc = "block. # "] + pub fn set_code_without_checks( + &self, + code: ::std::vec::Vec<::core::primitive::u8>, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SetCodeWithoutChecks, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 150u8, 148u8, 119u8, 129u8, 77u8, 216u8, 135u8, 187u8, 127u8, 24u8, + 238u8, 15u8, 227u8, 229u8, 191u8, 217u8, 106u8, 129u8, 149u8, 79u8, + 154u8, 78u8, 53u8, 159u8, 89u8, 69u8, 103u8, 197u8, 93u8, 161u8, 134u8, + 17u8, + ] + { + let call = SetCodeWithoutChecks { code }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Set some items of storage."] + pub fn set_storage( + &self, + items: ::std::vec::Vec<( + ::std::vec::Vec<::core::primitive::u8>, + ::std::vec::Vec<::core::primitive::u8>, + )>, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SetStorage, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 197u8, 12u8, 119u8, 205u8, 152u8, 103u8, 211u8, 170u8, 146u8, 253u8, + 25u8, 56u8, 180u8, 146u8, 74u8, 75u8, 38u8, 108u8, 212u8, 154u8, 23u8, + 22u8, 148u8, 175u8, 107u8, 186u8, 222u8, 13u8, 149u8, 132u8, 204u8, + 217u8, + ] + { + let call = SetStorage { items }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Kill some items from storage."] + pub fn kill_storage( + &self, + keys: ::std::vec::Vec<::std::vec::Vec<::core::primitive::u8>>, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + KillStorage, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 154u8, 115u8, 185u8, 20u8, 126u8, 90u8, 222u8, 131u8, 199u8, 57u8, + 184u8, 226u8, 43u8, 245u8, 161u8, 176u8, 194u8, 123u8, 139u8, 97u8, + 97u8, 94u8, 47u8, 64u8, 204u8, 96u8, 190u8, 94u8, 216u8, 237u8, 69u8, + 51u8, + ] + { + let call = KillStorage { keys }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Kill all storage items with a key that starts with the given prefix."] + #[doc = ""] + #[doc = "**NOTE:** We rely on the Root origin to provide us the number of subkeys under"] + #[doc = "the prefix we are removing to accurately calculate the weight of this function."] + pub fn kill_prefix( + &self, + prefix: ::std::vec::Vec<::core::primitive::u8>, + subkeys: ::core::primitive::u32, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + KillPrefix, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 214u8, 101u8, 191u8, 241u8, 1u8, 241u8, 144u8, 116u8, 246u8, 199u8, + 159u8, 249u8, 155u8, 164u8, 220u8, 221u8, 75u8, 33u8, 204u8, 3u8, + 255u8, 201u8, 187u8, 238u8, 181u8, 213u8, 41u8, 105u8, 234u8, 120u8, + 202u8, 115u8, + ] + { + let call = KillPrefix { prefix, subkeys }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Make some on-chain remark and emit event."] + pub fn remark_with_event( + &self, + remark: ::std::vec::Vec<::core::primitive::u8>, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + RemarkWithEvent, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 171u8, 82u8, 75u8, 237u8, 69u8, 197u8, 223u8, 125u8, 123u8, 51u8, + 241u8, 35u8, 202u8, 210u8, 227u8, 109u8, 1u8, 241u8, 255u8, 63u8, 33u8, + 115u8, 156u8, 239u8, 97u8, 76u8, 193u8, 35u8, 74u8, 199u8, 43u8, 255u8, + ] + { + let call = RemarkWithEvent { remark }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + pub type Event = runtime_types::frame_system::pallet::Event; + pub mod events { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "An extrinsic completed successfully."] + pub struct ExtrinsicSuccess { + pub dispatch_info: runtime_types::frame_support::weights::DispatchInfo, + } + impl ::subxt::Event for ExtrinsicSuccess { + const PALLET: &'static str = "System"; + const EVENT: &'static str = "ExtrinsicSuccess"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "An extrinsic failed."] + pub struct ExtrinsicFailed { + pub dispatch_error: runtime_types::sp_runtime::DispatchError, + pub dispatch_info: runtime_types::frame_support::weights::DispatchInfo, + } + impl ::subxt::Event for ExtrinsicFailed { + const PALLET: &'static str = "System"; + const EVENT: &'static str = "ExtrinsicFailed"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "`:code` was updated."] + pub struct CodeUpdated; + impl ::subxt::Event for CodeUpdated { + const PALLET: &'static str = "System"; + const EVENT: &'static str = "CodeUpdated"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "A new account was created."] + pub struct NewAccount { + pub account: ::subxt::sp_core::crypto::AccountId32, + } + impl ::subxt::Event for NewAccount { + const PALLET: &'static str = "System"; + const EVENT: &'static str = "NewAccount"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "An account was reaped."] + pub struct KilledAccount { + pub account: ::subxt::sp_core::crypto::AccountId32, + } + impl ::subxt::Event for KilledAccount { + const PALLET: &'static str = "System"; + const EVENT: &'static str = "KilledAccount"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "On on-chain remark happened."] + pub struct Remarked { + pub sender: ::subxt::sp_core::crypto::AccountId32, + pub hash: ::subxt::sp_core::H256, + } + impl ::subxt::Event for Remarked { + const PALLET: &'static str = "System"; + const EVENT: &'static str = "Remarked"; + } + } + pub mod storage { + use super::runtime_types; + pub struct Account<'a>(pub &'a ::subxt::sp_core::crypto::AccountId32); + impl ::subxt::StorageEntry for Account<'_> { + const PALLET: &'static str = "System"; + const STORAGE: &'static str = "Account"; + type Value = runtime_types::frame_system::AccountInfo< + ::core::primitive::u32, + runtime_types::pallet_balances::AccountData<::core::primitive::u128>, + >; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Blake2_128Concat, + )]) + } + } + pub struct ExtrinsicCount; + impl ::subxt::StorageEntry for ExtrinsicCount { + const PALLET: &'static str = "System"; + const STORAGE: &'static str = "ExtrinsicCount"; + type Value = ::core::primitive::u32; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct BlockWeight; + impl ::subxt::StorageEntry for BlockWeight { + const PALLET: &'static str = "System"; + const STORAGE: &'static str = "BlockWeight"; + type Value = + runtime_types::frame_support::weights::PerDispatchClass<::core::primitive::u64>; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct AllExtrinsicsLen; + impl ::subxt::StorageEntry for AllExtrinsicsLen { + const PALLET: &'static str = "System"; + const STORAGE: &'static str = "AllExtrinsicsLen"; + type Value = ::core::primitive::u32; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct BlockHash<'a>(pub &'a ::core::primitive::u32); + impl ::subxt::StorageEntry for BlockHash<'_> { + const PALLET: &'static str = "System"; + const STORAGE: &'static str = "BlockHash"; + type Value = ::subxt::sp_core::H256; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Twox64Concat, + )]) + } + } + pub struct ExtrinsicData<'a>(pub &'a ::core::primitive::u32); + impl ::subxt::StorageEntry for ExtrinsicData<'_> { + const PALLET: &'static str = "System"; + const STORAGE: &'static str = "ExtrinsicData"; + type Value = ::std::vec::Vec<::core::primitive::u8>; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Twox64Concat, + )]) + } + } + pub struct Number; + impl ::subxt::StorageEntry for Number { + const PALLET: &'static str = "System"; + const STORAGE: &'static str = "Number"; + type Value = ::core::primitive::u32; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct ParentHash; + impl ::subxt::StorageEntry for ParentHash { + const PALLET: &'static str = "System"; + const STORAGE: &'static str = "ParentHash"; + type Value = ::subxt::sp_core::H256; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct Digest; + impl ::subxt::StorageEntry for Digest { + const PALLET: &'static str = "System"; + const STORAGE: &'static str = "Digest"; + type Value = runtime_types::sp_runtime::generic::digest::Digest; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct Events; + impl ::subxt::StorageEntry for Events { + const PALLET: &'static str = "System"; + const STORAGE: &'static str = "Events"; + type Value = ::std::vec::Vec< + runtime_types::frame_system::EventRecord< + runtime_types::rococo_runtime::Event, + ::subxt::sp_core::H256, + >, + >; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct EventCount; + impl ::subxt::StorageEntry for EventCount { + const PALLET: &'static str = "System"; + const STORAGE: &'static str = "EventCount"; + type Value = ::core::primitive::u32; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct EventTopics<'a>(pub &'a ::subxt::sp_core::H256); + impl ::subxt::StorageEntry for EventTopics<'_> { + const PALLET: &'static str = "System"; + const STORAGE: &'static str = "EventTopics"; + type Value = ::std::vec::Vec<(::core::primitive::u32, ::core::primitive::u32)>; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Blake2_128Concat, + )]) + } + } + pub struct LastRuntimeUpgrade; + impl ::subxt::StorageEntry for LastRuntimeUpgrade { + const PALLET: &'static str = "System"; + const STORAGE: &'static str = "LastRuntimeUpgrade"; + type Value = runtime_types::frame_system::LastRuntimeUpgradeInfo; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct UpgradedToU32RefCount; + impl ::subxt::StorageEntry for UpgradedToU32RefCount { + const PALLET: &'static str = "System"; + const STORAGE: &'static str = "UpgradedToU32RefCount"; + type Value = ::core::primitive::bool; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct UpgradedToTripleRefCount; + impl ::subxt::StorageEntry for UpgradedToTripleRefCount { + const PALLET: &'static str = "System"; + const STORAGE: &'static str = "UpgradedToTripleRefCount"; + type Value = ::core::primitive::bool; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct ExecutionPhase; + impl ::subxt::StorageEntry for ExecutionPhase { + const PALLET: &'static str = "System"; + const STORAGE: &'static str = "ExecutionPhase"; + type Value = runtime_types::frame_system::Phase; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct StorageApi<'a, T: ::subxt::Config> { + client: &'a ::subxt::Client, + } + impl<'a, T: ::subxt::Config> StorageApi<'a, T> { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { client } + } + #[doc = " The full account information for a particular account ID."] + pub async fn account( + &self, + _0: &::subxt::sp_core::crypto::AccountId32, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + runtime_types::frame_system::AccountInfo< + ::core::primitive::u32, + runtime_types::pallet_balances::AccountData<::core::primitive::u128>, + >, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 224u8, 184u8, 2u8, 14u8, 38u8, 177u8, 223u8, 98u8, 223u8, 15u8, 130u8, + 23u8, 212u8, 69u8, 61u8, 165u8, 171u8, 61u8, 171u8, 57u8, 88u8, 71u8, + 168u8, 172u8, 54u8, 91u8, 109u8, 231u8, 169u8, 167u8, 195u8, 46u8, + ] + { + let entry = Account(_0); + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The full account information for a particular account ID."] + pub async fn account_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result<::subxt::KeyIter<'a, T, Account<'a>>, ::subxt::BasicError> + { + if self.client.metadata().storage_hash::()? + == [ + 224u8, 184u8, 2u8, 14u8, 38u8, 177u8, 223u8, 98u8, 223u8, 15u8, 130u8, + 23u8, 212u8, 69u8, 61u8, 165u8, 171u8, 61u8, 171u8, 57u8, 88u8, 71u8, + 168u8, 172u8, 54u8, 91u8, 109u8, 231u8, 169u8, 167u8, 195u8, 46u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Total extrinsics count for the current block."] + pub async fn extrinsic_count( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option<::core::primitive::u32>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 223u8, 60u8, 201u8, 120u8, 36u8, 44u8, 180u8, 210u8, 242u8, 53u8, + 222u8, 154u8, 123u8, 176u8, 249u8, 8u8, 225u8, 28u8, 232u8, 4u8, 136u8, + 41u8, 151u8, 82u8, 189u8, 149u8, 49u8, 166u8, 139u8, 9u8, 163u8, 231u8, + ] + { + let entry = ExtrinsicCount; + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The current weight for the block."] + pub async fn block_weight( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + runtime_types::frame_support::weights::PerDispatchClass<::core::primitive::u64>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 2u8, 236u8, 190u8, 174u8, 244u8, 98u8, 194u8, 168u8, 89u8, 208u8, 7u8, + 45u8, 175u8, 171u8, 177u8, 121u8, 215u8, 190u8, 184u8, 195u8, 49u8, + 133u8, 44u8, 1u8, 181u8, 215u8, 89u8, 84u8, 255u8, 16u8, 57u8, 152u8, + ] + { + let entry = BlockWeight; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Total length (in bytes) for all extrinsics put together, for the current block."] + pub async fn all_extrinsics_len( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option<::core::primitive::u32>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 202u8, 145u8, 209u8, 225u8, 40u8, 220u8, 174u8, 74u8, 93u8, 164u8, + 254u8, 248u8, 254u8, 192u8, 32u8, 117u8, 96u8, 149u8, 53u8, 145u8, + 219u8, 64u8, 234u8, 18u8, 217u8, 200u8, 203u8, 141u8, 145u8, 28u8, + 134u8, 60u8, + ] + { + let entry = AllExtrinsicsLen; + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Map of block numbers to block hashes."] + pub async fn block_hash( + &self, + _0: &::core::primitive::u32, + block_hash: ::core::option::Option, + ) -> ::core::result::Result<::subxt::sp_core::H256, ::subxt::BasicError> + { + if self.client.metadata().storage_hash::()? + == [ + 111u8, 201u8, 1u8, 177u8, 247u8, 64u8, 190u8, 182u8, 232u8, 51u8, + 217u8, 13u8, 155u8, 248u8, 105u8, 99u8, 205u8, 215u8, 155u8, 66u8, + 132u8, 103u8, 79u8, 100u8, 63u8, 118u8, 106u8, 195u8, 134u8, 237u8, + 236u8, 148u8, + ] + { + let entry = BlockHash(_0); + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Map of block numbers to block hashes."] + pub async fn block_hash_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::subxt::KeyIter<'a, T, BlockHash<'a>>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 111u8, 201u8, 1u8, 177u8, 247u8, 64u8, 190u8, 182u8, 232u8, 51u8, + 217u8, 13u8, 155u8, 248u8, 105u8, 99u8, 205u8, 215u8, 155u8, 66u8, + 132u8, 103u8, 79u8, 100u8, 63u8, 118u8, 106u8, 195u8, 134u8, 237u8, + 236u8, 148u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Extrinsics data for the current block (maps an extrinsic's index to its data)."] + pub async fn extrinsic_data( + &self, + _0: &::core::primitive::u32, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::std::vec::Vec<::core::primitive::u8>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 210u8, 224u8, 211u8, 186u8, 118u8, 210u8, 185u8, 194u8, 238u8, 211u8, + 254u8, 73u8, 67u8, 184u8, 31u8, 229u8, 168u8, 125u8, 98u8, 23u8, 241u8, + 59u8, 49u8, 86u8, 126u8, 9u8, 114u8, 163u8, 160u8, 62u8, 50u8, 67u8, + ] + { + let entry = ExtrinsicData(_0); + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Extrinsics data for the current block (maps an extrinsic's index to its data)."] + pub async fn extrinsic_data_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::subxt::KeyIter<'a, T, ExtrinsicData<'a>>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 210u8, 224u8, 211u8, 186u8, 118u8, 210u8, 185u8, 194u8, 238u8, 211u8, + 254u8, 73u8, 67u8, 184u8, 31u8, 229u8, 168u8, 125u8, 98u8, 23u8, 241u8, + 59u8, 49u8, 86u8, 126u8, 9u8, 114u8, 163u8, 160u8, 62u8, 50u8, 67u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The current block number being processed. Set by `execute_block`."] + pub async fn number( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> + { + if self.client.metadata().storage_hash::()? + == [ + 228u8, 96u8, 102u8, 190u8, 252u8, 130u8, 239u8, 172u8, 126u8, 235u8, + 246u8, 139u8, 208u8, 15u8, 88u8, 245u8, 141u8, 232u8, 43u8, 204u8, + 36u8, 87u8, 211u8, 141u8, 187u8, 68u8, 236u8, 70u8, 193u8, 235u8, + 164u8, 191u8, + ] + { + let entry = Number; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Hash of the previous block."] + pub async fn parent_hash( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result<::subxt::sp_core::H256, ::subxt::BasicError> + { + if self.client.metadata().storage_hash::()? + == [ + 181u8, 119u8, 0u8, 41u8, 126u8, 107u8, 154u8, 144u8, 248u8, 193u8, + 101u8, 89u8, 243u8, 242u8, 70u8, 202u8, 1u8, 65u8, 77u8, 38u8, 35u8, + 175u8, 192u8, 103u8, 250u8, 36u8, 51u8, 175u8, 71u8, 74u8, 117u8, 93u8, + ] + { + let entry = ParentHash; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Digest of the current block, also part of the block header."] + pub async fn digest( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + runtime_types::sp_runtime::generic::digest::Digest, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 10u8, 176u8, 13u8, 228u8, 226u8, 42u8, 210u8, 151u8, 107u8, 212u8, + 136u8, 15u8, 38u8, 182u8, 225u8, 12u8, 250u8, 56u8, 193u8, 243u8, + 219u8, 113u8, 95u8, 233u8, 21u8, 229u8, 125u8, 146u8, 92u8, 250u8, + 32u8, 168u8, + ] + { + let entry = Digest; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Events deposited for the current block."] + #[doc = ""] + #[doc = " NOTE: The item is unbound and should therefore never be read on chain."] + #[doc = " It could otherwise inflate the PoV size of a block."] + #[doc = ""] + #[doc = " Events have a large in-memory size. Box the events to not go out-of-memory"] + #[doc = " just in case someone still reads them from within the runtime."] + pub async fn events( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::std::vec::Vec< + runtime_types::frame_system::EventRecord< + runtime_types::rococo_runtime::Event, + ::subxt::sp_core::H256, + >, + >, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 170u8, 108u8, 138u8, 175u8, 31u8, 58u8, 48u8, 44u8, 240u8, 15u8, 234u8, + 219u8, 45u8, 212u8, 124u8, 19u8, 128u8, 188u8, 18u8, 149u8, 64u8, + 237u8, 155u8, 101u8, 87u8, 89u8, 70u8, 180u8, 60u8, 201u8, 90u8, 25u8, + ] + { + let entry = Events; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The number of events in the `Events` list."] + pub async fn event_count( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> + { + if self.client.metadata().storage_hash::()? + == [ + 236u8, 93u8, 90u8, 177u8, 250u8, 211u8, 138u8, 187u8, 26u8, 208u8, + 203u8, 113u8, 221u8, 233u8, 227u8, 9u8, 249u8, 25u8, 202u8, 185u8, + 161u8, 144u8, 167u8, 104u8, 127u8, 187u8, 38u8, 18u8, 52u8, 61u8, 66u8, + 112u8, + ] + { + let entry = EventCount; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Mapping between a topic (represented by T::Hash) and a vector of indexes"] + #[doc = " of events in the `>` list."] + #[doc = ""] + #[doc = " All topic vectors have deterministic storage locations depending on the topic. This"] + #[doc = " allows light-clients to leverage the changes trie storage tracking mechanism and"] + #[doc = " in case of changes fetch the list of events of interest."] + #[doc = ""] + #[doc = " The value has the type `(T::BlockNumber, EventIndex)` because if we used only just"] + #[doc = " the `EventIndex` then in case if the topic has the same contents on the next block"] + #[doc = " no notification will be triggered thus the event might be lost."] + pub async fn event_topics( + &self, + _0: &::subxt::sp_core::H256, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::std::vec::Vec<(::core::primitive::u32, ::core::primitive::u32)>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 144u8, 227u8, 63u8, 224u8, 232u8, 119u8, 45u8, 240u8, 123u8, 241u8, + 77u8, 214u8, 215u8, 164u8, 35u8, 64u8, 51u8, 235u8, 122u8, 146u8, + 182u8, 88u8, 109u8, 61u8, 43u8, 105u8, 84u8, 230u8, 166u8, 187u8, + 239u8, 95u8, + ] + { + let entry = EventTopics(_0); + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Mapping between a topic (represented by T::Hash) and a vector of indexes"] + #[doc = " of events in the `>` list."] + #[doc = ""] + #[doc = " All topic vectors have deterministic storage locations depending on the topic. This"] + #[doc = " allows light-clients to leverage the changes trie storage tracking mechanism and"] + #[doc = " in case of changes fetch the list of events of interest."] + #[doc = ""] + #[doc = " The value has the type `(T::BlockNumber, EventIndex)` because if we used only just"] + #[doc = " the `EventIndex` then in case if the topic has the same contents on the next block"] + #[doc = " no notification will be triggered thus the event might be lost."] + pub async fn event_topics_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::subxt::KeyIter<'a, T, EventTopics<'a>>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 144u8, 227u8, 63u8, 224u8, 232u8, 119u8, 45u8, 240u8, 123u8, 241u8, + 77u8, 214u8, 215u8, 164u8, 35u8, 64u8, 51u8, 235u8, 122u8, 146u8, + 182u8, 88u8, 109u8, 61u8, 43u8, 105u8, 84u8, 230u8, 166u8, 187u8, + 239u8, 95u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Stores the `spec_version` and `spec_name` of when the last runtime upgrade happened."] + pub async fn last_runtime_upgrade( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .storage_hash::()? + == [ + 219u8, 153u8, 158u8, 38u8, 45u8, 65u8, 151u8, 137u8, 53u8, 76u8, 11u8, + 181u8, 218u8, 248u8, 125u8, 190u8, 100u8, 240u8, 173u8, 75u8, 179u8, + 137u8, 198u8, 197u8, 248u8, 185u8, 118u8, 58u8, 42u8, 165u8, 125u8, + 119u8, + ] + { + let entry = LastRuntimeUpgrade; + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " True if we have upgraded so that `type RefCount` is `u32`. False (default) if not."] + pub async fn upgraded_to_u32_ref_count( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result<::core::primitive::bool, ::subxt::BasicError> + { + if self + .client + .metadata() + .storage_hash::()? + == [ + 171u8, 88u8, 244u8, 92u8, 122u8, 67u8, 27u8, 18u8, 59u8, 175u8, 175u8, + 178u8, 20u8, 150u8, 213u8, 59u8, 222u8, 141u8, 32u8, 107u8, 3u8, 114u8, + 83u8, 250u8, 180u8, 233u8, 152u8, 54u8, 187u8, 99u8, 131u8, 204u8, + ] + { + let entry = UpgradedToU32RefCount; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " True if we have upgraded so that AccountInfo contains three types of `RefCount`. False"] + #[doc = " (default) if not."] + pub async fn upgraded_to_triple_ref_count( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result<::core::primitive::bool, ::subxt::BasicError> + { + if self + .client + .metadata() + .storage_hash::()? + == [ + 90u8, 33u8, 56u8, 86u8, 90u8, 101u8, 89u8, 133u8, 203u8, 56u8, 201u8, + 210u8, 244u8, 232u8, 150u8, 18u8, 51u8, 105u8, 14u8, 230u8, 103u8, + 155u8, 246u8, 99u8, 53u8, 207u8, 225u8, 128u8, 186u8, 76u8, 40u8, + 185u8, + ] + { + let entry = UpgradedToTripleRefCount; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The execution phase of the block."] + pub async fn execution_phase( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 174u8, 13u8, 230u8, 220u8, 239u8, 161u8, 172u8, 122u8, 188u8, 95u8, + 141u8, 118u8, 91u8, 158u8, 111u8, 145u8, 243u8, 173u8, 226u8, 212u8, + 187u8, 118u8, 94u8, 132u8, 221u8, 244u8, 61u8, 148u8, 217u8, 30u8, + 238u8, 225u8, + ] + { + let entry = ExecutionPhase; + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + pub mod constants { + use super::runtime_types; + pub struct ConstantsApi<'a, T: ::subxt::Config> { + client: &'a ::subxt::Client, + } + impl<'a, T: ::subxt::Config> ConstantsApi<'a, T> { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { client } + } + #[doc = " Block & extrinsics weights: base values and limits."] + pub fn block_weights( + &self, + ) -> ::core::result::Result< + runtime_types::frame_system::limits::BlockWeights, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .constant_hash("System", "BlockWeights")? + == [ + 215u8, 15u8, 243u8, 205u8, 15u8, 204u8, 67u8, 181u8, 5u8, 25u8, 77u8, + 32u8, 15u8, 69u8, 250u8, 90u8, 118u8, 42u8, 116u8, 3u8, 231u8, 203u8, + 152u8, 28u8, 230u8, 136u8, 184u8, 234u8, 38u8, 25u8, 58u8, 181u8, + ] + { + let pallet = self.client.metadata().pallet("System")?; + let constant = pallet.constant("BlockWeights")?; + let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; + Ok(value) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The maximum length of a block (in bytes)."] + pub fn block_length( + &self, + ) -> ::core::result::Result< + runtime_types::frame_system::limits::BlockLength, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .constant_hash("System", "BlockLength")? + == [ + 120u8, 249u8, 182u8, 103u8, 246u8, 214u8, 149u8, 44u8, 42u8, 64u8, 2u8, + 56u8, 157u8, 184u8, 43u8, 195u8, 214u8, 251u8, 207u8, 207u8, 249u8, + 105u8, 203u8, 108u8, 179u8, 93u8, 93u8, 246u8, 40u8, 175u8, 160u8, + 114u8, + ] + { + let pallet = self.client.metadata().pallet("System")?; + let constant = pallet.constant("BlockLength")?; + let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; + Ok(value) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Maximum number of block number to block hash mappings to keep (oldest pruned first)."] + pub fn block_hash_count( + &self, + ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> + { + if self + .client + .metadata() + .constant_hash("System", "BlockHashCount")? + == [ + 123u8, 126u8, 182u8, 103u8, 71u8, 187u8, 233u8, 8u8, 47u8, 226u8, + 159u8, 139u8, 0u8, 59u8, 190u8, 135u8, 189u8, 77u8, 190u8, 81u8, 39u8, + 198u8, 224u8, 219u8, 70u8, 143u8, 6u8, 132u8, 196u8, 61u8, 117u8, + 194u8, + ] + { + let pallet = self.client.metadata().pallet("System")?; + let constant = pallet.constant("BlockHashCount")?; + let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; + Ok(value) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The weight of runtime database operations the runtime can invoke."] + pub fn db_weight( + &self, + ) -> ::core::result::Result< + runtime_types::frame_support::weights::RuntimeDbWeight, + ::subxt::BasicError, + > { + if self.client.metadata().constant_hash("System", "DbWeight")? + == [ + 203u8, 8u8, 106u8, 152u8, 74u8, 132u8, 2u8, 132u8, 244u8, 106u8, 147u8, + 12u8, 93u8, 80u8, 61u8, 158u8, 172u8, 178u8, 228u8, 125u8, 213u8, + 102u8, 75u8, 210u8, 64u8, 185u8, 204u8, 84u8, 10u8, 164u8, 204u8, 62u8, + ] + { + let pallet = self.client.metadata().pallet("System")?; + let constant = pallet.constant("DbWeight")?; + let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; + Ok(value) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Get the chain's current version."] + pub fn version( + &self, + ) -> ::core::result::Result< + runtime_types::sp_version::RuntimeVersion, + ::subxt::BasicError, + > { + if self.client.metadata().constant_hash("System", "Version")? + == [ + 204u8, 182u8, 166u8, 232u8, 201u8, 27u8, 210u8, 58u8, 156u8, 104u8, + 233u8, 214u8, 202u8, 35u8, 247u8, 203u8, 119u8, 118u8, 106u8, 249u8, + 73u8, 145u8, 104u8, 122u8, 34u8, 30u8, 41u8, 131u8, 209u8, 223u8, + 165u8, 89u8, + ] + { + let pallet = self.client.metadata().pallet("System")?; + let constant = pallet.constant("Version")?; + let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; + Ok(value) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The designated SS85 prefix of this chain."] + #[doc = ""] + #[doc = " This replaces the \"ss58Format\" property declared in the chain spec. Reason is"] + #[doc = " that the runtime should know about the prefix in order to make use of it as"] + #[doc = " an identifier of the chain."] + pub fn ss58_prefix( + &self, + ) -> ::core::result::Result<::core::primitive::u16, ::subxt::BasicError> + { + if self + .client + .metadata() + .constant_hash("System", "SS58Prefix")? + == [ + 197u8, 217u8, 49u8, 68u8, 82u8, 238u8, 120u8, 50u8, 91u8, 58u8, 6u8, + 156u8, 40u8, 1u8, 241u8, 213u8, 141u8, 74u8, 83u8, 115u8, 117u8, 41u8, + 119u8, 50u8, 140u8, 136u8, 163u8, 185u8, 34u8, 190u8, 60u8, 97u8, + ] + { + let pallet = self.client.metadata().pallet("System")?; + let constant = pallet.constant("SS58Prefix")?; + let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; + Ok(value) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + } + pub mod babe { + use super::root_mod; + use super::runtime_types; + pub mod calls { + use super::root_mod; + use super::runtime_types; + type DispatchError = runtime_types::sp_runtime::DispatchError; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct ReportEquivocation { + pub equivocation_proof: ::std::boxed::Box< + runtime_types::sp_consensus_slots::EquivocationProof< + runtime_types::sp_runtime::generic::header::Header< + ::core::primitive::u32, + runtime_types::sp_runtime::traits::BlakeTwo256, + >, + runtime_types::sp_consensus_babe::app::Public, + >, + >, + pub key_owner_proof: runtime_types::sp_session::MembershipProof, + } + impl ::subxt::Call for ReportEquivocation { + const PALLET: &'static str = "Babe"; + const FUNCTION: &'static str = "report_equivocation"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct ReportEquivocationUnsigned { + pub equivocation_proof: ::std::boxed::Box< + runtime_types::sp_consensus_slots::EquivocationProof< + runtime_types::sp_runtime::generic::header::Header< + ::core::primitive::u32, + runtime_types::sp_runtime::traits::BlakeTwo256, + >, + runtime_types::sp_consensus_babe::app::Public, + >, + >, + pub key_owner_proof: runtime_types::sp_session::MembershipProof, + } + impl ::subxt::Call for ReportEquivocationUnsigned { + const PALLET: &'static str = "Babe"; + const FUNCTION: &'static str = "report_equivocation_unsigned"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct PlanConfigChange { + pub config: runtime_types::sp_consensus_babe::digests::NextConfigDescriptor, + } + impl ::subxt::Call for PlanConfigChange { + const PALLET: &'static str = "Babe"; + const FUNCTION: &'static str = "plan_config_change"; + } + pub struct TransactionApi<'a, T: ::subxt::Config, X> { + client: &'a ::subxt::Client, + marker: ::core::marker::PhantomData, + } + impl<'a, T, X> TransactionApi<'a, T, X> + where + T: ::subxt::Config, + X: ::subxt::extrinsic::ExtrinsicParams, + { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { + client, + marker: ::core::marker::PhantomData, + } + } + #[doc = "Report authority equivocation/misbehavior. This method will verify"] + #[doc = "the equivocation proof and validate the given key ownership proof"] + #[doc = "against the extracted offender. If both are valid, the offence will"] + #[doc = "be reported."] + pub fn report_equivocation( + &self, + equivocation_proof: runtime_types::sp_consensus_slots::EquivocationProof< + runtime_types::sp_runtime::generic::header::Header< + ::core::primitive::u32, + runtime_types::sp_runtime::traits::BlakeTwo256, + >, + runtime_types::sp_consensus_babe::app::Public, + >, + key_owner_proof: runtime_types::sp_session::MembershipProof, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + ReportEquivocation, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 232u8, 217u8, 226u8, 60u8, 92u8, 214u8, 213u8, 240u8, 11u8, 38u8, + 145u8, 190u8, 54u8, 204u8, 114u8, 237u8, 217u8, 125u8, 134u8, 160u8, + 46u8, 159u8, 183u8, 227u8, 28u8, 35u8, 223u8, 54u8, 160u8, 75u8, 26u8, + 236u8, + ] + { + let call = ReportEquivocation { + equivocation_proof: ::std::boxed::Box::new(equivocation_proof), + key_owner_proof, + }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Report authority equivocation/misbehavior. This method will verify"] + #[doc = "the equivocation proof and validate the given key ownership proof"] + #[doc = "against the extracted offender. If both are valid, the offence will"] + #[doc = "be reported."] + #[doc = "This extrinsic must be called unsigned and it is expected that only"] + #[doc = "block authors will call it (validated in `ValidateUnsigned`), as such"] + #[doc = "if the block author is defined it will be defined as the equivocation"] + #[doc = "reporter."] + pub fn report_equivocation_unsigned( + &self, + equivocation_proof: runtime_types::sp_consensus_slots::EquivocationProof< + runtime_types::sp_runtime::generic::header::Header< + ::core::primitive::u32, + runtime_types::sp_runtime::traits::BlakeTwo256, + >, + runtime_types::sp_consensus_babe::app::Public, + >, + key_owner_proof: runtime_types::sp_session::MembershipProof, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + ReportEquivocationUnsigned, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .call_hash::()? + == [ + 2u8, 244u8, 199u8, 230u8, 31u8, 46u8, 172u8, 21u8, 66u8, 34u8, 136u8, + 7u8, 58u8, 92u8, 103u8, 61u8, 1u8, 149u8, 243u8, 88u8, 247u8, 209u8, + 37u8, 52u8, 54u8, 100u8, 84u8, 59u8, 77u8, 176u8, 246u8, 246u8, + ] + { + let call = ReportEquivocationUnsigned { + equivocation_proof: ::std::boxed::Box::new(equivocation_proof), + key_owner_proof, + }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Plan an epoch config change. The epoch config change is recorded and will be enacted on"] + #[doc = "the next call to `enact_epoch_change`. The config will be activated one epoch after."] + #[doc = "Multiple calls to this method will replace any existing planned config change that had"] + #[doc = "not been enacted yet."] + pub fn plan_config_change( + &self, + config: runtime_types::sp_consensus_babe::digests::NextConfigDescriptor, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + PlanConfigChange, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 215u8, 121u8, 90u8, 87u8, 178u8, 247u8, 114u8, 53u8, 174u8, 28u8, 20u8, + 33u8, 139u8, 216u8, 13u8, 187u8, 74u8, 198u8, 38u8, 28u8, 175u8, 13u8, + 73u8, 132u8, 103u8, 78u8, 217u8, 207u8, 113u8, 169u8, 42u8, 103u8, + ] + { + let call = PlanConfigChange { config }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + pub mod storage { + use super::runtime_types; + pub struct EpochIndex; + impl ::subxt::StorageEntry for EpochIndex { + const PALLET: &'static str = "Babe"; + const STORAGE: &'static str = "EpochIndex"; + type Value = ::core::primitive::u64; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct Authorities; + impl ::subxt::StorageEntry for Authorities { + const PALLET: &'static str = "Babe"; + const STORAGE: &'static str = "Authorities"; + type Value = + runtime_types::frame_support::storage::weak_bounded_vec::WeakBoundedVec<( + runtime_types::sp_consensus_babe::app::Public, + ::core::primitive::u64, + )>; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct GenesisSlot; + impl ::subxt::StorageEntry for GenesisSlot { + const PALLET: &'static str = "Babe"; + const STORAGE: &'static str = "GenesisSlot"; + type Value = runtime_types::sp_consensus_slots::Slot; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct CurrentSlot; + impl ::subxt::StorageEntry for CurrentSlot { + const PALLET: &'static str = "Babe"; + const STORAGE: &'static str = "CurrentSlot"; + type Value = runtime_types::sp_consensus_slots::Slot; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct Randomness; + impl ::subxt::StorageEntry for Randomness { + const PALLET: &'static str = "Babe"; + const STORAGE: &'static str = "Randomness"; + type Value = [::core::primitive::u8; 32usize]; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct PendingEpochConfigChange; + impl ::subxt::StorageEntry for PendingEpochConfigChange { + const PALLET: &'static str = "Babe"; + const STORAGE: &'static str = "PendingEpochConfigChange"; + type Value = runtime_types::sp_consensus_babe::digests::NextConfigDescriptor; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct NextRandomness; + impl ::subxt::StorageEntry for NextRandomness { + const PALLET: &'static str = "Babe"; + const STORAGE: &'static str = "NextRandomness"; + type Value = [::core::primitive::u8; 32usize]; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct NextAuthorities; + impl ::subxt::StorageEntry for NextAuthorities { + const PALLET: &'static str = "Babe"; + const STORAGE: &'static str = "NextAuthorities"; + type Value = + runtime_types::frame_support::storage::weak_bounded_vec::WeakBoundedVec<( + runtime_types::sp_consensus_babe::app::Public, + ::core::primitive::u64, + )>; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct SegmentIndex; + impl ::subxt::StorageEntry for SegmentIndex { + const PALLET: &'static str = "Babe"; + const STORAGE: &'static str = "SegmentIndex"; + type Value = ::core::primitive::u32; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct UnderConstruction<'a>(pub &'a ::core::primitive::u32); + impl ::subxt::StorageEntry for UnderConstruction<'_> { + const PALLET: &'static str = "Babe"; + const STORAGE: &'static str = "UnderConstruction"; + type Value = runtime_types::frame_support::storage::bounded_vec::BoundedVec< + [::core::primitive::u8; 32usize], + >; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Twox64Concat, + )]) + } + } + pub struct Initialized; + impl ::subxt::StorageEntry for Initialized { + const PALLET: &'static str = "Babe"; + const STORAGE: &'static str = "Initialized"; + type Value = + ::core::option::Option; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct AuthorVrfRandomness; + impl ::subxt::StorageEntry for AuthorVrfRandomness { + const PALLET: &'static str = "Babe"; + const STORAGE: &'static str = "AuthorVrfRandomness"; + type Value = ::core::option::Option<[::core::primitive::u8; 32usize]>; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct EpochStart; + impl ::subxt::StorageEntry for EpochStart { + const PALLET: &'static str = "Babe"; + const STORAGE: &'static str = "EpochStart"; + type Value = (::core::primitive::u32, ::core::primitive::u32); + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct Lateness; + impl ::subxt::StorageEntry for Lateness { + const PALLET: &'static str = "Babe"; + const STORAGE: &'static str = "Lateness"; + type Value = ::core::primitive::u32; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct EpochConfig; + impl ::subxt::StorageEntry for EpochConfig { + const PALLET: &'static str = "Babe"; + const STORAGE: &'static str = "EpochConfig"; + type Value = runtime_types::sp_consensus_babe::BabeEpochConfiguration; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct NextEpochConfig; + impl ::subxt::StorageEntry for NextEpochConfig { + const PALLET: &'static str = "Babe"; + const STORAGE: &'static str = "NextEpochConfig"; + type Value = runtime_types::sp_consensus_babe::BabeEpochConfiguration; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct StorageApi<'a, T: ::subxt::Config> { + client: &'a ::subxt::Client, + } + impl<'a, T: ::subxt::Config> StorageApi<'a, T> { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { client } + } + #[doc = " Current epoch index."] + pub async fn epoch_index( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result<::core::primitive::u64, ::subxt::BasicError> + { + if self.client.metadata().storage_hash::()? + == [ + 51u8, 27u8, 91u8, 156u8, 118u8, 99u8, 46u8, 219u8, 190u8, 147u8, 205u8, + 23u8, 106u8, 169u8, 121u8, 218u8, 208u8, 235u8, 135u8, 127u8, 243u8, + 41u8, 55u8, 243u8, 235u8, 122u8, 57u8, 86u8, 37u8, 90u8, 208u8, 71u8, + ] + { + let entry = EpochIndex; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Current epoch authorities."] + pub async fn authorities( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + runtime_types::frame_support::storage::weak_bounded_vec::WeakBoundedVec<( + runtime_types::sp_consensus_babe::app::Public, + ::core::primitive::u64, + )>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 7u8, 239u8, 126u8, 115u8, 17u8, 7u8, 136u8, 192u8, 194u8, 197u8, 14u8, + 87u8, 34u8, 22u8, 170u8, 159u8, 63u8, 35u8, 206u8, 74u8, 18u8, 243u8, + 242u8, 250u8, 226u8, 214u8, 230u8, 55u8, 169u8, 195u8, 97u8, 88u8, + ] + { + let entry = Authorities; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The slot at which the first epoch actually started. This is 0"] + #[doc = " until the first block of the chain."] + pub async fn genesis_slot( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + runtime_types::sp_consensus_slots::Slot, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 136u8, 244u8, 7u8, 142u8, 224u8, 33u8, 144u8, 186u8, 155u8, 144u8, + 68u8, 81u8, 241u8, 57u8, 40u8, 207u8, 35u8, 39u8, 28u8, 41u8, 210u8, + 213u8, 53u8, 195u8, 175u8, 119u8, 6u8, 175u8, 100u8, 192u8, 180u8, + 73u8, + ] + { + let entry = GenesisSlot; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Current slot number."] + pub async fn current_slot( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + runtime_types::sp_consensus_slots::Slot, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 233u8, 102u8, 77u8, 99u8, 103u8, 50u8, 151u8, 229u8, 46u8, 226u8, + 181u8, 37u8, 117u8, 204u8, 234u8, 120u8, 116u8, 166u8, 80u8, 188u8, + 92u8, 154u8, 137u8, 150u8, 79u8, 164u8, 29u8, 203u8, 2u8, 51u8, 123u8, + 104u8, + ] + { + let entry = CurrentSlot; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The epoch randomness for the *current* epoch."] + #[doc = ""] + #[doc = " # Security"] + #[doc = ""] + #[doc = " This MUST NOT be used for gambling, as it can be influenced by a"] + #[doc = " malicious validator in the short term. It MAY be used in many"] + #[doc = " cryptographic protocols, however, so long as one remembers that this"] + #[doc = " (like everything else on-chain) it is public. For example, it can be"] + #[doc = " used where a number is needed that cannot have been chosen by an"] + #[doc = " adversary, for purposes such as public-coin zero-knowledge proofs."] + pub async fn randomness( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result<[::core::primitive::u8; 32usize], ::subxt::BasicError> + { + if self.client.metadata().storage_hash::()? + == [ + 191u8, 197u8, 25u8, 164u8, 104u8, 248u8, 247u8, 193u8, 244u8, 60u8, + 181u8, 195u8, 248u8, 90u8, 41u8, 199u8, 82u8, 123u8, 72u8, 126u8, 18u8, + 17u8, 128u8, 215u8, 34u8, 251u8, 227u8, 70u8, 166u8, 10u8, 104u8, + 140u8, + ] + { + let entry = Randomness; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Pending epoch configuration change that will be applied when the next epoch is enacted."] + pub async fn pending_epoch_config_change( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option< + runtime_types::sp_consensus_babe::digests::NextConfigDescriptor, + >, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .storage_hash::()? + == [ + 98u8, 52u8, 22u8, 32u8, 76u8, 196u8, 89u8, 78u8, 119u8, 181u8, 17u8, + 49u8, 220u8, 159u8, 195u8, 74u8, 33u8, 59u8, 15u8, 104u8, 26u8, 111u8, + 165u8, 68u8, 147u8, 14u8, 86u8, 94u8, 250u8, 167u8, 146u8, 82u8, + ] + { + let entry = PendingEpochConfigChange; + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Next epoch randomness."] + pub async fn next_randomness( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result<[::core::primitive::u8; 32usize], ::subxt::BasicError> + { + if self.client.metadata().storage_hash::()? + == [ + 185u8, 98u8, 45u8, 109u8, 253u8, 38u8, 238u8, 221u8, 240u8, 29u8, 38u8, + 107u8, 118u8, 117u8, 131u8, 115u8, 21u8, 255u8, 203u8, 81u8, 243u8, + 251u8, 91u8, 60u8, 163u8, 202u8, 125u8, 193u8, 173u8, 234u8, 166u8, + 92u8, + ] + { + let entry = NextRandomness; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Next epoch authorities."] + pub async fn next_authorities( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + runtime_types::frame_support::storage::weak_bounded_vec::WeakBoundedVec<( + runtime_types::sp_consensus_babe::app::Public, + ::core::primitive::u64, + )>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 243u8, 38u8, 95u8, 14u8, 35u8, 130u8, 244u8, 182u8, 82u8, 19u8, 22u8, + 151u8, 193u8, 183u8, 153u8, 206u8, 24u8, 13u8, 166u8, 75u8, 242u8, + 217u8, 247u8, 87u8, 47u8, 58u8, 87u8, 109u8, 83u8, 208u8, 222u8, 241u8, + ] + { + let entry = NextAuthorities; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Randomness under construction."] + #[doc = ""] + #[doc = " We make a trade-off between storage accesses and list length."] + #[doc = " We store the under-construction randomness in segments of up to"] + #[doc = " `UNDER_CONSTRUCTION_SEGMENT_LENGTH`."] + #[doc = ""] + #[doc = " Once a segment reaches this length, we begin the next one."] + #[doc = " We reset all segments and return to `0` at the beginning of every"] + #[doc = " epoch."] + pub async fn segment_index( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> + { + if self.client.metadata().storage_hash::()? + == [ + 128u8, 45u8, 87u8, 58u8, 174u8, 152u8, 241u8, 156u8, 56u8, 192u8, 19u8, + 45u8, 75u8, 160u8, 35u8, 253u8, 145u8, 11u8, 178u8, 81u8, 114u8, 117u8, + 112u8, 107u8, 163u8, 208u8, 240u8, 151u8, 102u8, 176u8, 246u8, 5u8, + ] + { + let entry = SegmentIndex; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " TWOX-NOTE: `SegmentIndex` is an increasing integer, so this is okay."] + pub async fn under_construction( + &self, + _0: &::core::primitive::u32, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + runtime_types::frame_support::storage::bounded_vec::BoundedVec< + [::core::primitive::u8; 32usize], + >, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 12u8, 167u8, 30u8, 96u8, 161u8, 63u8, 210u8, 63u8, 91u8, 199u8, 188u8, + 78u8, 254u8, 255u8, 253u8, 202u8, 203u8, 26u8, 4u8, 105u8, 76u8, 125u8, + 191u8, 245u8, 32u8, 97u8, 127u8, 129u8, 167u8, 80u8, 210u8, 123u8, + ] + { + let entry = UnderConstruction(_0); + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " TWOX-NOTE: `SegmentIndex` is an increasing integer, so this is okay."] + pub async fn under_construction_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::subxt::KeyIter<'a, T, UnderConstruction<'a>>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 12u8, 167u8, 30u8, 96u8, 161u8, 63u8, 210u8, 63u8, 91u8, 199u8, 188u8, + 78u8, 254u8, 255u8, 253u8, 202u8, 203u8, 26u8, 4u8, 105u8, 76u8, 125u8, + 191u8, 245u8, 32u8, 97u8, 127u8, 129u8, 167u8, 80u8, 210u8, 123u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Temporary value (cleared at block finalization) which is `Some`"] + #[doc = " if per-block initialization has already been called for current block."] + pub async fn initialized( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option< + ::core::option::Option< + runtime_types::sp_consensus_babe::digests::PreDigest, + >, + >, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 174u8, 23u8, 254u8, 52u8, 114u8, 235u8, 65u8, 46u8, 39u8, 97u8, 238u8, + 243u8, 237u8, 138u8, 142u8, 85u8, 114u8, 69u8, 58u8, 172u8, 7u8, 238u8, + 110u8, 153u8, 22u8, 122u8, 117u8, 149u8, 113u8, 221u8, 127u8, 225u8, + ] + { + let entry = Initialized; + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " This field should always be populated during block processing unless"] + #[doc = " secondary plain slots are enabled (which don't contain a VRF output)."] + #[doc = ""] + #[doc = " It is set in `on_finalize`, before it will contain the value from the last block."] + pub async fn author_vrf_randomness( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option<[::core::primitive::u8; 32usize]>, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .storage_hash::()? + == [ + 66u8, 235u8, 74u8, 252u8, 222u8, 135u8, 19u8, 28u8, 74u8, 191u8, 170u8, + 197u8, 207u8, 127u8, 77u8, 121u8, 138u8, 138u8, 110u8, 187u8, 34u8, + 14u8, 230u8, 43u8, 241u8, 241u8, 63u8, 163u8, 53u8, 179u8, 250u8, + 247u8, + ] + { + let entry = AuthorVrfRandomness; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The block numbers when the last and current epoch have started, respectively `N-1` and"] + #[doc = " `N`."] + #[doc = " NOTE: We track this is in order to annotate the block number when a given pool of"] + #[doc = " entropy was fixed (i.e. it was known to chain observers). Since epochs are defined in"] + #[doc = " slots, which may be skipped, the block numbers may not line up with the slot numbers."] + pub async fn epoch_start( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + (::core::primitive::u32, ::core::primitive::u32), + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 196u8, 39u8, 241u8, 20u8, 150u8, 180u8, 136u8, 4u8, 195u8, 205u8, + 218u8, 10u8, 130u8, 131u8, 168u8, 243u8, 207u8, 249u8, 58u8, 195u8, + 177u8, 119u8, 110u8, 243u8, 241u8, 3u8, 245u8, 56u8, 157u8, 5u8, 68u8, + 60u8, + ] + { + let entry = EpochStart; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " How late the current block is compared to its parent."] + #[doc = ""] + #[doc = " This entry is populated as part of block execution and is cleaned up"] + #[doc = " on block finalization. Querying this storage entry outside of block"] + #[doc = " execution context should always yield zero."] + pub async fn lateness( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> + { + if self.client.metadata().storage_hash::()? + == [ + 229u8, 230u8, 224u8, 89u8, 49u8, 213u8, 198u8, 236u8, 144u8, 56u8, + 193u8, 234u8, 62u8, 242u8, 191u8, 199u8, 105u8, 131u8, 74u8, 63u8, + 75u8, 1u8, 210u8, 49u8, 3u8, 128u8, 18u8, 77u8, 219u8, 146u8, 60u8, + 88u8, + ] + { + let entry = Lateness; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The configuration for the current epoch. Should never be `None` as it is initialized in"] + #[doc = " genesis."] + pub async fn epoch_config( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option< + runtime_types::sp_consensus_babe::BabeEpochConfiguration, + >, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 169u8, 189u8, 214u8, 159u8, 181u8, 232u8, 243u8, 4u8, 113u8, 24u8, + 221u8, 229u8, 27u8, 35u8, 3u8, 121u8, 136u8, 88u8, 187u8, 193u8, 207u8, + 153u8, 223u8, 225u8, 166u8, 183u8, 53u8, 3u8, 162u8, 207u8, 88u8, + 133u8, + ] + { + let entry = EpochConfig; + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The configuration for the next epoch, `None` if the config will not change"] + #[doc = " (you can fallback to `EpochConfig` instead in that case)."] + pub async fn next_epoch_config( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option< + runtime_types::sp_consensus_babe::BabeEpochConfiguration, + >, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 239u8, 125u8, 203u8, 223u8, 161u8, 107u8, 232u8, 54u8, 158u8, 100u8, + 244u8, 140u8, 119u8, 58u8, 253u8, 245u8, 73u8, 236u8, 50u8, 67u8, + 228u8, 162u8, 166u8, 168u8, 162u8, 152u8, 239u8, 246u8, 153u8, 223u8, + 109u8, 121u8, + ] + { + let entry = NextEpochConfig; + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + pub mod constants { + use super::runtime_types; + pub struct ConstantsApi<'a, T: ::subxt::Config> { + client: &'a ::subxt::Client, + } + impl<'a, T: ::subxt::Config> ConstantsApi<'a, T> { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { client } + } + #[doc = " The amount of time, in slots, that each epoch should last."] + #[doc = " NOTE: Currently it is not possible to change the epoch duration after"] + #[doc = " the chain has started. Attempting to do so will brick block production."] + pub fn epoch_duration( + &self, + ) -> ::core::result::Result<::core::primitive::u64, ::subxt::BasicError> + { + if self + .client + .metadata() + .constant_hash("Babe", "EpochDuration")? + == [ + 59u8, 175u8, 230u8, 66u8, 80u8, 146u8, 114u8, 61u8, 39u8, 30u8, 164u8, + 158u8, 155u8, 71u8, 224u8, 229u8, 68u8, 52u8, 30u8, 195u8, 39u8, 8u8, + 6u8, 196u8, 21u8, 54u8, 163u8, 187u8, 4u8, 42u8, 47u8, 92u8, + ] + { + let pallet = self.client.metadata().pallet("Babe")?; + let constant = pallet.constant("EpochDuration")?; + let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; + Ok(value) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The expected average block time at which BABE should be creating"] + #[doc = " blocks. Since BABE is probabilistic it is not trivial to figure out"] + #[doc = " what the expected average block time should be based on the slot"] + #[doc = " duration and the security parameter `c` (where `1 - c` represents"] + #[doc = " the probability of a slot being empty)."] + pub fn expected_block_time( + &self, + ) -> ::core::result::Result<::core::primitive::u64, ::subxt::BasicError> + { + if self + .client + .metadata() + .constant_hash("Babe", "ExpectedBlockTime")? + == [ + 249u8, 170u8, 37u8, 7u8, 132u8, 115u8, 106u8, 71u8, 116u8, 166u8, 78u8, + 251u8, 242u8, 146u8, 99u8, 207u8, 204u8, 225u8, 157u8, 57u8, 19u8, + 17u8, 202u8, 231u8, 50u8, 67u8, 17u8, 205u8, 238u8, 80u8, 154u8, 125u8, + ] + { + let pallet = self.client.metadata().pallet("Babe")?; + let constant = pallet.constant("ExpectedBlockTime")?; + let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; + Ok(value) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Max number of authorities allowed"] + pub fn max_authorities( + &self, + ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> + { + if self + .client + .metadata() + .constant_hash("Babe", "MaxAuthorities")? + == [ + 248u8, 195u8, 131u8, 166u8, 10u8, 50u8, 71u8, 223u8, 41u8, 49u8, 43u8, + 99u8, 251u8, 113u8, 75u8, 193u8, 159u8, 15u8, 77u8, 217u8, 147u8, + 205u8, 165u8, 50u8, 6u8, 166u8, 77u8, 189u8, 102u8, 22u8, 201u8, 19u8, + ] + { + let pallet = self.client.metadata().pallet("Babe")?; + let constant = pallet.constant("MaxAuthorities")?; + let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; + Ok(value) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + } + pub mod timestamp { + use super::root_mod; + use super::runtime_types; + pub mod calls { + use super::root_mod; + use super::runtime_types; + type DispatchError = runtime_types::sp_runtime::DispatchError; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct Set { + #[codec(compact)] + pub now: ::core::primitive::u64, + } + impl ::subxt::Call for Set { + const PALLET: &'static str = "Timestamp"; + const FUNCTION: &'static str = "set"; + } + pub struct TransactionApi<'a, T: ::subxt::Config, X> { + client: &'a ::subxt::Client, + marker: ::core::marker::PhantomData, + } + impl<'a, T, X> TransactionApi<'a, T, X> + where + T: ::subxt::Config, + X: ::subxt::extrinsic::ExtrinsicParams, + { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { + client, + marker: ::core::marker::PhantomData, + } + } + #[doc = "Set the current time."] + #[doc = ""] + #[doc = "This call should be invoked exactly once per block. It will panic at the finalization"] + #[doc = "phase, if this call hasn't been invoked by that time."] + #[doc = ""] + #[doc = "The timestamp should be greater than the previous one by the amount specified by"] + #[doc = "`MinimumPeriod`."] + #[doc = ""] + #[doc = "The dispatch origin for this call must be `Inherent`."] + #[doc = ""] + #[doc = "# "] + #[doc = "- `O(1)` (Note that implementations of `OnTimestampSet` must also be `O(1)`)"] + #[doc = "- 1 storage read and 1 storage mutation (codec `O(1)`). (because of `DidUpdate::take` in"] + #[doc = " `on_finalize`)"] + #[doc = "- 1 event handler `on_timestamp_set`. Must be `O(1)`."] + #[doc = "# "] + pub fn set( + &self, + now: ::core::primitive::u64, + ) -> Result< + ::subxt::SubmittableExtrinsic<'a, T, X, Set, DispatchError, root_mod::Event>, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 191u8, 73u8, 102u8, 150u8, 65u8, 157u8, 172u8, 194u8, 7u8, 72u8, 1u8, + 35u8, 54u8, 99u8, 245u8, 139u8, 40u8, 136u8, 245u8, 53u8, 167u8, 100u8, + 143u8, 244u8, 160u8, 5u8, 18u8, 130u8, 77u8, 160u8, 227u8, 51u8, + ] + { + let call = Set { now }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + pub mod storage { + use super::runtime_types; + pub struct Now; + impl ::subxt::StorageEntry for Now { + const PALLET: &'static str = "Timestamp"; + const STORAGE: &'static str = "Now"; + type Value = ::core::primitive::u64; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct DidUpdate; + impl ::subxt::StorageEntry for DidUpdate { + const PALLET: &'static str = "Timestamp"; + const STORAGE: &'static str = "DidUpdate"; + type Value = ::core::primitive::bool; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct StorageApi<'a, T: ::subxt::Config> { + client: &'a ::subxt::Client, + } + impl<'a, T: ::subxt::Config> StorageApi<'a, T> { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { client } + } + #[doc = " Current time for the current block."] + pub async fn now( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result<::core::primitive::u64, ::subxt::BasicError> + { + if self.client.metadata().storage_hash::()? + == [ + 148u8, 53u8, 50u8, 54u8, 13u8, 161u8, 57u8, 150u8, 16u8, 83u8, 144u8, + 221u8, 59u8, 75u8, 158u8, 130u8, 39u8, 123u8, 106u8, 134u8, 202u8, + 185u8, 83u8, 85u8, 60u8, 41u8, 120u8, 96u8, 210u8, 34u8, 2u8, 250u8, + ] + { + let entry = Now; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Did the timestamp get updated in this block?"] + pub async fn did_update( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result<::core::primitive::bool, ::subxt::BasicError> + { + if self.client.metadata().storage_hash::()? + == [ + 70u8, 13u8, 92u8, 186u8, 80u8, 151u8, 167u8, 90u8, 158u8, 232u8, 175u8, + 13u8, 103u8, 135u8, 2u8, 78u8, 16u8, 6u8, 39u8, 158u8, 167u8, 85u8, + 27u8, 47u8, 122u8, 73u8, 127u8, 26u8, 35u8, 168u8, 72u8, 204u8, + ] + { + let entry = DidUpdate; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + pub mod constants { + use super::runtime_types; + pub struct ConstantsApi<'a, T: ::subxt::Config> { + client: &'a ::subxt::Client, + } + impl<'a, T: ::subxt::Config> ConstantsApi<'a, T> { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { client } + } + #[doc = " The minimum period between blocks. Beware that this is different to the *expected*"] + #[doc = " period that the block production apparatus provides. Your chosen consensus system will"] + #[doc = " generally work with this to determine a sensible block time. e.g. For Aura, it will be"] + #[doc = " double this period on default settings."] + pub fn minimum_period( + &self, + ) -> ::core::result::Result<::core::primitive::u64, ::subxt::BasicError> + { + if self + .client + .metadata() + .constant_hash("Timestamp", "MinimumPeriod")? + == [ + 141u8, 242u8, 40u8, 24u8, 83u8, 43u8, 33u8, 194u8, 156u8, 149u8, 219u8, + 61u8, 10u8, 123u8, 120u8, 247u8, 228u8, 22u8, 25u8, 24u8, 214u8, 188u8, + 54u8, 135u8, 240u8, 162u8, 41u8, 216u8, 3u8, 58u8, 238u8, 39u8, + ] + { + let pallet = self.client.metadata().pallet("Timestamp")?; + let constant = pallet.constant("MinimumPeriod")?; + let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; + Ok(value) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + } + pub mod indices { + use super::root_mod; + use super::runtime_types; + pub mod calls { + use super::root_mod; + use super::runtime_types; + type DispatchError = runtime_types::sp_runtime::DispatchError; + #[derive( + :: subxt :: codec :: CompactAs, + :: subxt :: codec :: Decode, + :: subxt :: codec :: Encode, + Debug, + )] + pub struct Claim { + pub index: ::core::primitive::u32, + } + impl ::subxt::Call for Claim { + const PALLET: &'static str = "Indices"; + const FUNCTION: &'static str = "claim"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct Transfer { + pub new: ::subxt::sp_core::crypto::AccountId32, + pub index: ::core::primitive::u32, + } + impl ::subxt::Call for Transfer { + const PALLET: &'static str = "Indices"; + const FUNCTION: &'static str = "transfer"; + } + #[derive( + :: subxt :: codec :: CompactAs, + :: subxt :: codec :: Decode, + :: subxt :: codec :: Encode, + Debug, + )] + pub struct Free { + pub index: ::core::primitive::u32, + } + impl ::subxt::Call for Free { + const PALLET: &'static str = "Indices"; + const FUNCTION: &'static str = "free"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct ForceTransfer { + pub new: ::subxt::sp_core::crypto::AccountId32, + pub index: ::core::primitive::u32, + pub freeze: ::core::primitive::bool, + } + impl ::subxt::Call for ForceTransfer { + const PALLET: &'static str = "Indices"; + const FUNCTION: &'static str = "force_transfer"; + } + #[derive( + :: subxt :: codec :: CompactAs, + :: subxt :: codec :: Decode, + :: subxt :: codec :: Encode, + Debug, + )] + pub struct Freeze { + pub index: ::core::primitive::u32, + } + impl ::subxt::Call for Freeze { + const PALLET: &'static str = "Indices"; + const FUNCTION: &'static str = "freeze"; + } + pub struct TransactionApi<'a, T: ::subxt::Config, X> { + client: &'a ::subxt::Client, + marker: ::core::marker::PhantomData, + } + impl<'a, T, X> TransactionApi<'a, T, X> + where + T: ::subxt::Config, + X: ::subxt::extrinsic::ExtrinsicParams, + { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { + client, + marker: ::core::marker::PhantomData, + } + } + #[doc = "Assign an previously unassigned index."] + #[doc = ""] + #[doc = "Payment: `Deposit` is reserved from the sender account."] + #[doc = ""] + #[doc = "The dispatch origin for this call must be _Signed_."] + #[doc = ""] + #[doc = "- `index`: the index to be claimed. This must not be in use."] + #[doc = ""] + #[doc = "Emits `IndexAssigned` if successful."] + #[doc = ""] + #[doc = "# "] + #[doc = "- `O(1)`."] + #[doc = "- One storage mutation (codec `O(1)`)."] + #[doc = "- One reserve operation."] + #[doc = "- One event."] + #[doc = "-------------------"] + #[doc = "- DB Weight: 1 Read/Write (Accounts)"] + #[doc = "# "] + pub fn claim( + &self, + index: ::core::primitive::u32, + ) -> Result< + ::subxt::SubmittableExtrinsic<'a, T, X, Claim, DispatchError, root_mod::Event>, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 27u8, 4u8, 108u8, 55u8, 23u8, 109u8, 175u8, 25u8, 201u8, 230u8, 228u8, + 51u8, 164u8, 15u8, 79u8, 10u8, 219u8, 182u8, 242u8, 102u8, 164u8, + 148u8, 39u8, 91u8, 106u8, 197u8, 29u8, 190u8, 178u8, 221u8, 16u8, 87u8, + ] + { + let call = Claim { index }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Assign an index already owned by the sender to another account. The balance reservation"] + #[doc = "is effectively transferred to the new account."] + #[doc = ""] + #[doc = "The dispatch origin for this call must be _Signed_."] + #[doc = ""] + #[doc = "- `index`: the index to be re-assigned. This must be owned by the sender."] + #[doc = "- `new`: the new owner of the index. This function is a no-op if it is equal to sender."] + #[doc = ""] + #[doc = "Emits `IndexAssigned` if successful."] + #[doc = ""] + #[doc = "# "] + #[doc = "- `O(1)`."] + #[doc = "- One storage mutation (codec `O(1)`)."] + #[doc = "- One transfer operation."] + #[doc = "- One event."] + #[doc = "-------------------"] + #[doc = "- DB Weight:"] + #[doc = " - Reads: Indices Accounts, System Account (recipient)"] + #[doc = " - Writes: Indices Accounts, System Account (recipient)"] + #[doc = "# "] + pub fn transfer( + &self, + new: ::subxt::sp_core::crypto::AccountId32, + index: ::core::primitive::u32, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + Transfer, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 124u8, 83u8, 33u8, 230u8, 23u8, 70u8, 83u8, 59u8, 76u8, 100u8, 219u8, + 100u8, 165u8, 163u8, 102u8, 193u8, 11u8, 22u8, 30u8, 125u8, 114u8, + 28u8, 61u8, 156u8, 38u8, 170u8, 129u8, 74u8, 187u8, 28u8, 33u8, 65u8, + ] + { + let call = Transfer { new, index }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Free up an index owned by the sender."] + #[doc = ""] + #[doc = "Payment: Any previous deposit placed for the index is unreserved in the sender account."] + #[doc = ""] + #[doc = "The dispatch origin for this call must be _Signed_ and the sender must own the index."] + #[doc = ""] + #[doc = "- `index`: the index to be freed. This must be owned by the sender."] + #[doc = ""] + #[doc = "Emits `IndexFreed` if successful."] + #[doc = ""] + #[doc = "# "] + #[doc = "- `O(1)`."] + #[doc = "- One storage mutation (codec `O(1)`)."] + #[doc = "- One reserve operation."] + #[doc = "- One event."] + #[doc = "-------------------"] + #[doc = "- DB Weight: 1 Read/Write (Accounts)"] + #[doc = "# "] + pub fn free( + &self, + index: ::core::primitive::u32, + ) -> Result< + ::subxt::SubmittableExtrinsic<'a, T, X, Free, DispatchError, root_mod::Event>, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 153u8, 143u8, 162u8, 33u8, 229u8, 3u8, 159u8, 153u8, 111u8, 100u8, + 160u8, 250u8, 227u8, 24u8, 157u8, 226u8, 173u8, 39u8, 25u8, 200u8, + 137u8, 147u8, 232u8, 213u8, 182u8, 49u8, 142u8, 250u8, 139u8, 155u8, + 84u8, 214u8, + ] + { + let call = Free { index }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Force an index to an account. This doesn't require a deposit. If the index is already"] + #[doc = "held, then any deposit is reimbursed to its current owner."] + #[doc = ""] + #[doc = "The dispatch origin for this call must be _Root_."] + #[doc = ""] + #[doc = "- `index`: the index to be (re-)assigned."] + #[doc = "- `new`: the new owner of the index. This function is a no-op if it is equal to sender."] + #[doc = "- `freeze`: if set to `true`, will freeze the index so it cannot be transferred."] + #[doc = ""] + #[doc = "Emits `IndexAssigned` if successful."] + #[doc = ""] + #[doc = "# "] + #[doc = "- `O(1)`."] + #[doc = "- One storage mutation (codec `O(1)`)."] + #[doc = "- Up to one reserve operation."] + #[doc = "- One event."] + #[doc = "-------------------"] + #[doc = "- DB Weight:"] + #[doc = " - Reads: Indices Accounts, System Account (original owner)"] + #[doc = " - Writes: Indices Accounts, System Account (original owner)"] + #[doc = "# "] + pub fn force_transfer( + &self, + new: ::subxt::sp_core::crypto::AccountId32, + index: ::core::primitive::u32, + freeze: ::core::primitive::bool, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + ForceTransfer, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 181u8, 143u8, 90u8, 135u8, 132u8, 11u8, 145u8, 85u8, 4u8, 211u8, 56u8, + 110u8, 213u8, 153u8, 224u8, 106u8, 198u8, 250u8, 130u8, 253u8, 72u8, + 58u8, 133u8, 150u8, 102u8, 119u8, 177u8, 175u8, 77u8, 106u8, 253u8, + 99u8, + ] + { + let call = ForceTransfer { new, index, freeze }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Freeze an index so it will always point to the sender account. This consumes the"] + #[doc = "deposit."] + #[doc = ""] + #[doc = "The dispatch origin for this call must be _Signed_ and the signing account must have a"] + #[doc = "non-frozen account `index`."] + #[doc = ""] + #[doc = "- `index`: the index to be frozen in place."] + #[doc = ""] + #[doc = "Emits `IndexFrozen` if successful."] + #[doc = ""] + #[doc = "# "] + #[doc = "- `O(1)`."] + #[doc = "- One storage mutation (codec `O(1)`)."] + #[doc = "- Up to one slash operation."] + #[doc = "- One event."] + #[doc = "-------------------"] + #[doc = "- DB Weight: 1 Read/Write (Accounts)"] + #[doc = "# "] + pub fn freeze( + &self, + index: ::core::primitive::u32, + ) -> Result< + ::subxt::SubmittableExtrinsic<'a, T, X, Freeze, DispatchError, root_mod::Event>, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 204u8, 127u8, 214u8, 137u8, 138u8, 28u8, 171u8, 169u8, 184u8, 164u8, + 235u8, 114u8, 132u8, 176u8, 14u8, 207u8, 72u8, 39u8, 179u8, 231u8, + 137u8, 243u8, 242u8, 57u8, 89u8, 57u8, 213u8, 210u8, 87u8, 12u8, 253u8, + 159u8, + ] + { + let call = Freeze { index }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + pub type Event = runtime_types::pallet_indices::pallet::Event; + pub mod events { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "A account index was assigned."] + pub struct IndexAssigned { + pub who: ::subxt::sp_core::crypto::AccountId32, + pub index: ::core::primitive::u32, + } + impl ::subxt::Event for IndexAssigned { + const PALLET: &'static str = "Indices"; + const EVENT: &'static str = "IndexAssigned"; + } + #[derive( + :: subxt :: codec :: CompactAs, + :: subxt :: codec :: Decode, + :: subxt :: codec :: Encode, + Debug, + )] + #[doc = "A account index has been freed up (unassigned)."] + pub struct IndexFreed { + pub index: ::core::primitive::u32, + } + impl ::subxt::Event for IndexFreed { + const PALLET: &'static str = "Indices"; + const EVENT: &'static str = "IndexFreed"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "A account index has been frozen to its current account ID."] + pub struct IndexFrozen { + pub index: ::core::primitive::u32, + pub who: ::subxt::sp_core::crypto::AccountId32, + } + impl ::subxt::Event for IndexFrozen { + const PALLET: &'static str = "Indices"; + const EVENT: &'static str = "IndexFrozen"; + } + } + pub mod storage { + use super::runtime_types; + pub struct Accounts<'a>(pub &'a ::core::primitive::u32); + impl ::subxt::StorageEntry for Accounts<'_> { + const PALLET: &'static str = "Indices"; + const STORAGE: &'static str = "Accounts"; + type Value = ( + ::subxt::sp_core::crypto::AccountId32, + ::core::primitive::u128, + ::core::primitive::bool, + ); + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Blake2_128Concat, + )]) + } + } + pub struct StorageApi<'a, T: ::subxt::Config> { + client: &'a ::subxt::Client, + } + impl<'a, T: ::subxt::Config> StorageApi<'a, T> { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { client } + } + #[doc = " The lookup from index to account."] + pub async fn accounts( + &self, + _0: &::core::primitive::u32, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option<( + ::subxt::sp_core::crypto::AccountId32, + ::core::primitive::u128, + ::core::primitive::bool, + )>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 105u8, 208u8, 81u8, 30u8, 157u8, 108u8, 22u8, 122u8, 152u8, 220u8, + 40u8, 97u8, 255u8, 166u8, 222u8, 11u8, 81u8, 245u8, 143u8, 79u8, 57u8, + 19u8, 174u8, 164u8, 220u8, 59u8, 77u8, 117u8, 39u8, 72u8, 251u8, 234u8, + ] + { + let entry = Accounts(_0); + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The lookup from index to account."] + pub async fn accounts_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::subxt::KeyIter<'a, T, Accounts<'a>>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 105u8, 208u8, 81u8, 30u8, 157u8, 108u8, 22u8, 122u8, 152u8, 220u8, + 40u8, 97u8, 255u8, 166u8, 222u8, 11u8, 81u8, 245u8, 143u8, 79u8, 57u8, + 19u8, 174u8, 164u8, 220u8, 59u8, 77u8, 117u8, 39u8, 72u8, 251u8, 234u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + pub mod constants { + use super::runtime_types; + pub struct ConstantsApi<'a, T: ::subxt::Config> { + client: &'a ::subxt::Client, + } + impl<'a, T: ::subxt::Config> ConstantsApi<'a, T> { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { client } + } + #[doc = " The deposit needed for reserving an index."] + pub fn deposit( + &self, + ) -> ::core::result::Result<::core::primitive::u128, ::subxt::BasicError> + { + if self.client.metadata().constant_hash("Indices", "Deposit")? + == [ + 217u8, 97u8, 70u8, 109u8, 180u8, 214u8, 183u8, 67u8, 253u8, 148u8, + 245u8, 108u8, 187u8, 95u8, 0u8, 15u8, 167u8, 149u8, 163u8, 194u8, + 206u8, 220u8, 164u8, 101u8, 1u8, 99u8, 206u8, 165u8, 63u8, 141u8, + 109u8, 1u8, + ] + { + let pallet = self.client.metadata().pallet("Indices")?; + let constant = pallet.constant("Deposit")?; + let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; + Ok(value) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + } + pub mod balances { + use super::root_mod; + use super::runtime_types; + pub mod calls { + use super::root_mod; + use super::runtime_types; + type DispatchError = runtime_types::sp_runtime::DispatchError; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct Transfer { + pub dest: + ::subxt::sp_runtime::MultiAddress<::subxt::sp_core::crypto::AccountId32, ()>, + #[codec(compact)] + pub value: ::core::primitive::u128, + } + impl ::subxt::Call for Transfer { + const PALLET: &'static str = "Balances"; + const FUNCTION: &'static str = "transfer"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct SetBalance { + pub who: + ::subxt::sp_runtime::MultiAddress<::subxt::sp_core::crypto::AccountId32, ()>, + #[codec(compact)] + pub new_free: ::core::primitive::u128, + #[codec(compact)] + pub new_reserved: ::core::primitive::u128, + } + impl ::subxt::Call for SetBalance { + const PALLET: &'static str = "Balances"; + const FUNCTION: &'static str = "set_balance"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct ForceTransfer { + pub source: + ::subxt::sp_runtime::MultiAddress<::subxt::sp_core::crypto::AccountId32, ()>, + pub dest: + ::subxt::sp_runtime::MultiAddress<::subxt::sp_core::crypto::AccountId32, ()>, + #[codec(compact)] + pub value: ::core::primitive::u128, + } + impl ::subxt::Call for ForceTransfer { + const PALLET: &'static str = "Balances"; + const FUNCTION: &'static str = "force_transfer"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct TransferKeepAlive { + pub dest: + ::subxt::sp_runtime::MultiAddress<::subxt::sp_core::crypto::AccountId32, ()>, + #[codec(compact)] + pub value: ::core::primitive::u128, + } + impl ::subxt::Call for TransferKeepAlive { + const PALLET: &'static str = "Balances"; + const FUNCTION: &'static str = "transfer_keep_alive"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct TransferAll { + pub dest: + ::subxt::sp_runtime::MultiAddress<::subxt::sp_core::crypto::AccountId32, ()>, + pub keep_alive: ::core::primitive::bool, + } + impl ::subxt::Call for TransferAll { + const PALLET: &'static str = "Balances"; + const FUNCTION: &'static str = "transfer_all"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct ForceUnreserve { + pub who: + ::subxt::sp_runtime::MultiAddress<::subxt::sp_core::crypto::AccountId32, ()>, + pub amount: ::core::primitive::u128, + } + impl ::subxt::Call for ForceUnreserve { + const PALLET: &'static str = "Balances"; + const FUNCTION: &'static str = "force_unreserve"; + } + pub struct TransactionApi<'a, T: ::subxt::Config, X> { + client: &'a ::subxt::Client, + marker: ::core::marker::PhantomData, + } + impl<'a, T, X> TransactionApi<'a, T, X> + where + T: ::subxt::Config, + X: ::subxt::extrinsic::ExtrinsicParams, + { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { + client, + marker: ::core::marker::PhantomData, + } + } + #[doc = "Transfer some liquid free balance to another account."] + #[doc = ""] + #[doc = "`transfer` will set the `FreeBalance` of the sender and receiver."] + #[doc = "If the sender's account is below the existential deposit as a result"] + #[doc = "of the transfer, the account will be reaped."] + #[doc = ""] + #[doc = "The dispatch origin for this call must be `Signed` by the transactor."] + #[doc = ""] + #[doc = "# "] + #[doc = "- Dependent on arguments but not critical, given proper implementations for input config"] + #[doc = " types. See related functions below."] + #[doc = "- It contains a limited number of reads and writes internally and no complex"] + #[doc = " computation."] + #[doc = ""] + #[doc = "Related functions:"] + #[doc = ""] + #[doc = " - `ensure_can_withdraw` is always called internally but has a bounded complexity."] + #[doc = " - Transferring balances to accounts that did not exist before will cause"] + #[doc = " `T::OnNewAccount::on_new_account` to be called."] + #[doc = " - Removing enough funds from an account will trigger `T::DustRemoval::on_unbalanced`."] + #[doc = " - `transfer_keep_alive` works the same way as `transfer`, but has an additional check"] + #[doc = " that the transfer will not kill the origin account."] + #[doc = "---------------------------------"] + #[doc = "- Origin account is already in memory, so no DB operations for them."] + #[doc = "# "] + pub fn transfer( + &self, + dest: ::subxt::sp_runtime::MultiAddress< + ::subxt::sp_core::crypto::AccountId32, + (), + >, + value: ::core::primitive::u128, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + Transfer, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 250u8, 8u8, 164u8, 186u8, 80u8, 220u8, 134u8, 247u8, 142u8, 121u8, + 34u8, 22u8, 169u8, 39u8, 6u8, 93u8, 72u8, 47u8, 44u8, 107u8, 9u8, 98u8, + 203u8, 190u8, 136u8, 55u8, 251u8, 78u8, 216u8, 150u8, 98u8, 118u8, + ] + { + let call = Transfer { dest, value }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Set the balances of a given account."] + #[doc = ""] + #[doc = "This will alter `FreeBalance` and `ReservedBalance` in storage. it will"] + #[doc = "also alter the total issuance of the system (`TotalIssuance`) appropriately."] + #[doc = "If the new free or reserved balance is below the existential deposit,"] + #[doc = "it will reset the account nonce (`frame_system::AccountNonce`)."] + #[doc = ""] + #[doc = "The dispatch origin for this call is `root`."] + pub fn set_balance( + &self, + who: ::subxt::sp_runtime::MultiAddress< + ::subxt::sp_core::crypto::AccountId32, + (), + >, + new_free: ::core::primitive::u128, + new_reserved: ::core::primitive::u128, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SetBalance, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 232u8, 6u8, 27u8, 131u8, 163u8, 72u8, 148u8, 197u8, 14u8, 239u8, 94u8, + 1u8, 32u8, 94u8, 17u8, 14u8, 123u8, 82u8, 39u8, 233u8, 77u8, 20u8, + 40u8, 139u8, 222u8, 137u8, 103u8, 18u8, 126u8, 63u8, 200u8, 149u8, + ] + { + let call = SetBalance { + who, + new_free, + new_reserved, + }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Exactly as `transfer`, except the origin must be root and the source account may be"] + #[doc = "specified."] + #[doc = "# "] + #[doc = "- Same as transfer, but additional read and write because the source account is not"] + #[doc = " assumed to be in the overlay."] + #[doc = "# "] + pub fn force_transfer( + &self, + source: ::subxt::sp_runtime::MultiAddress< + ::subxt::sp_core::crypto::AccountId32, + (), + >, + dest: ::subxt::sp_runtime::MultiAddress< + ::subxt::sp_core::crypto::AccountId32, + (), + >, + value: ::core::primitive::u128, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + ForceTransfer, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 120u8, 66u8, 111u8, 84u8, 176u8, 241u8, 214u8, 118u8, 219u8, 75u8, + 127u8, 222u8, 45u8, 33u8, 204u8, 147u8, 126u8, 214u8, 101u8, 190u8, + 37u8, 37u8, 159u8, 166u8, 61u8, 143u8, 22u8, 32u8, 15u8, 83u8, 221u8, + 230u8, + ] + { + let call = ForceTransfer { + source, + dest, + value, + }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Same as the [`transfer`] call, but with a check that the transfer will not kill the"] + #[doc = "origin account."] + #[doc = ""] + #[doc = "99% of the time you want [`transfer`] instead."] + #[doc = ""] + #[doc = "[`transfer`]: struct.Pallet.html#method.transfer"] + pub fn transfer_keep_alive( + &self, + dest: ::subxt::sp_runtime::MultiAddress< + ::subxt::sp_core::crypto::AccountId32, + (), + >, + value: ::core::primitive::u128, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + TransferKeepAlive, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 111u8, 233u8, 125u8, 71u8, 223u8, 141u8, 112u8, 94u8, 157u8, 11u8, + 88u8, 7u8, 239u8, 145u8, 247u8, 183u8, 245u8, 87u8, 157u8, 35u8, 49u8, + 91u8, 54u8, 103u8, 101u8, 76u8, 110u8, 94u8, 81u8, 170u8, 153u8, 209u8, + ] + { + let call = TransferKeepAlive { dest, value }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Transfer the entire transferable balance from the caller account."] + #[doc = ""] + #[doc = "NOTE: This function only attempts to transfer _transferable_ balances. This means that"] + #[doc = "any locked, reserved, or existential deposits (when `keep_alive` is `true`), will not be"] + #[doc = "transferred by this function. To ensure that this function results in a killed account,"] + #[doc = "you might need to prepare the account by removing any reference counters, storage"] + #[doc = "deposits, etc..."] + #[doc = ""] + #[doc = "The dispatch origin of this call must be Signed."] + #[doc = ""] + #[doc = "- `dest`: The recipient of the transfer."] + #[doc = "- `keep_alive`: A boolean to determine if the `transfer_all` operation should send all"] + #[doc = " of the funds the account has, causing the sender account to be killed (false), or"] + #[doc = " transfer everything except at least the existential deposit, which will guarantee to"] + #[doc = " keep the sender account alive (true). # "] + #[doc = "- O(1). Just like transfer, but reading the user's transferable balance first."] + #[doc = " #"] + pub fn transfer_all( + &self, + dest: ::subxt::sp_runtime::MultiAddress< + ::subxt::sp_core::crypto::AccountId32, + (), + >, + keep_alive: ::core::primitive::bool, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + TransferAll, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 240u8, 165u8, 185u8, 144u8, 24u8, 149u8, 15u8, 46u8, 60u8, 147u8, 19u8, + 187u8, 96u8, 24u8, 150u8, 53u8, 151u8, 232u8, 200u8, 164u8, 176u8, + 167u8, 8u8, 23u8, 63u8, 135u8, 68u8, 110u8, 5u8, 21u8, 35u8, 78u8, + ] + { + let call = TransferAll { dest, keep_alive }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Unreserve some balance from a user by force."] + #[doc = ""] + #[doc = "Can only be called by ROOT."] + pub fn force_unreserve( + &self, + who: ::subxt::sp_runtime::MultiAddress< + ::subxt::sp_core::crypto::AccountId32, + (), + >, + amount: ::core::primitive::u128, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + ForceUnreserve, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 106u8, 42u8, 48u8, 136u8, 41u8, 155u8, 214u8, 112u8, 99u8, 122u8, + 202u8, 250u8, 95u8, 60u8, 182u8, 13u8, 25u8, 149u8, 212u8, 212u8, + 247u8, 191u8, 130u8, 95u8, 84u8, 252u8, 252u8, 197u8, 244u8, 149u8, + 103u8, 67u8, + ] + { + let call = ForceUnreserve { who, amount }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + pub type Event = runtime_types::pallet_balances::pallet::Event; + pub mod events { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "An account was created with some free balance."] + pub struct Endowed { + pub account: ::subxt::sp_core::crypto::AccountId32, + pub free_balance: ::core::primitive::u128, + } + impl ::subxt::Event for Endowed { + const PALLET: &'static str = "Balances"; + const EVENT: &'static str = "Endowed"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "An account was removed whose balance was non-zero but below ExistentialDeposit,"] + #[doc = "resulting in an outright loss."] + pub struct DustLost { + pub account: ::subxt::sp_core::crypto::AccountId32, + pub amount: ::core::primitive::u128, + } + impl ::subxt::Event for DustLost { + const PALLET: &'static str = "Balances"; + const EVENT: &'static str = "DustLost"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "Transfer succeeded."] + pub struct Transfer { + pub from: ::subxt::sp_core::crypto::AccountId32, + pub to: ::subxt::sp_core::crypto::AccountId32, + pub amount: ::core::primitive::u128, + } + impl ::subxt::Event for Transfer { + const PALLET: &'static str = "Balances"; + const EVENT: &'static str = "Transfer"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "A balance was set by root."] + pub struct BalanceSet { + pub who: ::subxt::sp_core::crypto::AccountId32, + pub free: ::core::primitive::u128, + pub reserved: ::core::primitive::u128, + } + impl ::subxt::Event for BalanceSet { + const PALLET: &'static str = "Balances"; + const EVENT: &'static str = "BalanceSet"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "Some balance was reserved (moved from free to reserved)."] + pub struct Reserved { + pub who: ::subxt::sp_core::crypto::AccountId32, + pub amount: ::core::primitive::u128, + } + impl ::subxt::Event for Reserved { + const PALLET: &'static str = "Balances"; + const EVENT: &'static str = "Reserved"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "Some balance was unreserved (moved from reserved to free)."] + pub struct Unreserved { + pub who: ::subxt::sp_core::crypto::AccountId32, + pub amount: ::core::primitive::u128, + } + impl ::subxt::Event for Unreserved { + const PALLET: &'static str = "Balances"; + const EVENT: &'static str = "Unreserved"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "Some balance was moved from the reserve of the first account to the second account."] + #[doc = "Final argument indicates the destination balance type."] + pub struct ReserveRepatriated { + pub from: ::subxt::sp_core::crypto::AccountId32, + pub to: ::subxt::sp_core::crypto::AccountId32, + pub amount: ::core::primitive::u128, + pub destination_status: + runtime_types::frame_support::traits::tokens::misc::BalanceStatus, + } + impl ::subxt::Event for ReserveRepatriated { + const PALLET: &'static str = "Balances"; + const EVENT: &'static str = "ReserveRepatriated"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "Some amount was deposited (e.g. for transaction fees)."] + pub struct Deposit { + pub who: ::subxt::sp_core::crypto::AccountId32, + pub amount: ::core::primitive::u128, + } + impl ::subxt::Event for Deposit { + const PALLET: &'static str = "Balances"; + const EVENT: &'static str = "Deposit"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "Some amount was withdrawn from the account (e.g. for transaction fees)."] + pub struct Withdraw { + pub who: ::subxt::sp_core::crypto::AccountId32, + pub amount: ::core::primitive::u128, + } + impl ::subxt::Event for Withdraw { + const PALLET: &'static str = "Balances"; + const EVENT: &'static str = "Withdraw"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "Some amount was removed from the account (e.g. for misbehavior)."] + pub struct Slashed { + pub who: ::subxt::sp_core::crypto::AccountId32, + pub amount: ::core::primitive::u128, + } + impl ::subxt::Event for Slashed { + const PALLET: &'static str = "Balances"; + const EVENT: &'static str = "Slashed"; + } + } + pub mod storage { + use super::runtime_types; + pub struct TotalIssuance; + impl ::subxt::StorageEntry for TotalIssuance { + const PALLET: &'static str = "Balances"; + const STORAGE: &'static str = "TotalIssuance"; + type Value = ::core::primitive::u128; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct Account<'a>(pub &'a ::subxt::sp_core::crypto::AccountId32); + impl ::subxt::StorageEntry for Account<'_> { + const PALLET: &'static str = "Balances"; + const STORAGE: &'static str = "Account"; + type Value = runtime_types::pallet_balances::AccountData<::core::primitive::u128>; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Blake2_128Concat, + )]) + } + } + pub struct Locks<'a>(pub &'a ::subxt::sp_core::crypto::AccountId32); + impl ::subxt::StorageEntry for Locks<'_> { + const PALLET: &'static str = "Balances"; + const STORAGE: &'static str = "Locks"; + type Value = + runtime_types::frame_support::storage::weak_bounded_vec::WeakBoundedVec< + runtime_types::pallet_balances::BalanceLock<::core::primitive::u128>, + >; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Blake2_128Concat, + )]) + } + } + pub struct Reserves<'a>(pub &'a ::subxt::sp_core::crypto::AccountId32); + impl ::subxt::StorageEntry for Reserves<'_> { + const PALLET: &'static str = "Balances"; + const STORAGE: &'static str = "Reserves"; + type Value = runtime_types::frame_support::storage::bounded_vec::BoundedVec< + runtime_types::pallet_balances::ReserveData< + [::core::primitive::u8; 8usize], + ::core::primitive::u128, + >, + >; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Blake2_128Concat, + )]) + } + } + pub struct StorageVersion; + impl ::subxt::StorageEntry for StorageVersion { + const PALLET: &'static str = "Balances"; + const STORAGE: &'static str = "StorageVersion"; + type Value = runtime_types::pallet_balances::Releases; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct StorageApi<'a, T: ::subxt::Config> { + client: &'a ::subxt::Client, + } + impl<'a, T: ::subxt::Config> StorageApi<'a, T> { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { client } + } + #[doc = " The total units issued in the system."] + pub async fn total_issuance( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result<::core::primitive::u128, ::subxt::BasicError> + { + if self.client.metadata().storage_hash::()? + == [ + 1u8, 206u8, 252u8, 237u8, 6u8, 30u8, 20u8, 232u8, 164u8, 115u8, 51u8, + 156u8, 156u8, 206u8, 241u8, 187u8, 44u8, 84u8, 25u8, 164u8, 235u8, + 20u8, 86u8, 242u8, 124u8, 23u8, 28u8, 140u8, 26u8, 73u8, 231u8, 51u8, + ] + { + let entry = TotalIssuance; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The Balances pallet example of storing the balance of an account."] + #[doc = ""] + #[doc = " # Example"] + #[doc = ""] + #[doc = " ```nocompile"] + #[doc = " impl pallet_balances::Config for Runtime {"] + #[doc = " type AccountStore = StorageMapShim, frame_system::Provider, AccountId, Self::AccountData>"] + #[doc = " }"] + #[doc = " ```"] + #[doc = ""] + #[doc = " You can also store the balance of an account in the `System` pallet."] + #[doc = ""] + #[doc = " # Example"] + #[doc = ""] + #[doc = " ```nocompile"] + #[doc = " impl pallet_balances::Config for Runtime {"] + #[doc = " type AccountStore = System"] + #[doc = " }"] + #[doc = " ```"] + #[doc = ""] + #[doc = " But this comes with tradeoffs, storing account balances in the system pallet stores"] + #[doc = " `frame_system` data alongside the account data contrary to storing account balances in the"] + #[doc = " `Balances` pallet, which uses a `StorageMap` to store balances data only."] + #[doc = " NOTE: This is only used in the case that this pallet is used to store balances."] + pub async fn account( + &self, + _0: &::subxt::sp_core::crypto::AccountId32, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + runtime_types::pallet_balances::AccountData<::core::primitive::u128>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 129u8, 169u8, 171u8, 206u8, 229u8, 178u8, 69u8, 118u8, 199u8, 64u8, + 254u8, 67u8, 16u8, 154u8, 160u8, 197u8, 177u8, 161u8, 148u8, 199u8, + 78u8, 219u8, 187u8, 83u8, 99u8, 110u8, 207u8, 252u8, 243u8, 39u8, 46u8, + 106u8, + ] + { + let entry = Account(_0); + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The Balances pallet example of storing the balance of an account."] + #[doc = ""] + #[doc = " # Example"] + #[doc = ""] + #[doc = " ```nocompile"] + #[doc = " impl pallet_balances::Config for Runtime {"] + #[doc = " type AccountStore = StorageMapShim, frame_system::Provider, AccountId, Self::AccountData>"] + #[doc = " }"] + #[doc = " ```"] + #[doc = ""] + #[doc = " You can also store the balance of an account in the `System` pallet."] + #[doc = ""] + #[doc = " # Example"] + #[doc = ""] + #[doc = " ```nocompile"] + #[doc = " impl pallet_balances::Config for Runtime {"] + #[doc = " type AccountStore = System"] + #[doc = " }"] + #[doc = " ```"] + #[doc = ""] + #[doc = " But this comes with tradeoffs, storing account balances in the system pallet stores"] + #[doc = " `frame_system` data alongside the account data contrary to storing account balances in the"] + #[doc = " `Balances` pallet, which uses a `StorageMap` to store balances data only."] + #[doc = " NOTE: This is only used in the case that this pallet is used to store balances."] + pub async fn account_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result<::subxt::KeyIter<'a, T, Account<'a>>, ::subxt::BasicError> + { + if self.client.metadata().storage_hash::()? + == [ + 129u8, 169u8, 171u8, 206u8, 229u8, 178u8, 69u8, 118u8, 199u8, 64u8, + 254u8, 67u8, 16u8, 154u8, 160u8, 197u8, 177u8, 161u8, 148u8, 199u8, + 78u8, 219u8, 187u8, 83u8, 99u8, 110u8, 207u8, 252u8, 243u8, 39u8, 46u8, + 106u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Any liquidity locks on some account balances."] + #[doc = " NOTE: Should only be accessed when setting, changing and freeing a lock."] + pub async fn locks( + &self, + _0: &::subxt::sp_core::crypto::AccountId32, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + runtime_types::frame_support::storage::weak_bounded_vec::WeakBoundedVec< + runtime_types::pallet_balances::BalanceLock<::core::primitive::u128>, + >, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 31u8, 76u8, 213u8, 60u8, 86u8, 11u8, 155u8, 151u8, 33u8, 212u8, 74u8, + 89u8, 174u8, 74u8, 195u8, 107u8, 29u8, 163u8, 178u8, 34u8, 209u8, 8u8, + 201u8, 237u8, 77u8, 99u8, 205u8, 212u8, 236u8, 132u8, 2u8, 252u8, + ] + { + let entry = Locks(_0); + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Any liquidity locks on some account balances."] + #[doc = " NOTE: Should only be accessed when setting, changing and freeing a lock."] + pub async fn locks_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result<::subxt::KeyIter<'a, T, Locks<'a>>, ::subxt::BasicError> + { + if self.client.metadata().storage_hash::()? + == [ + 31u8, 76u8, 213u8, 60u8, 86u8, 11u8, 155u8, 151u8, 33u8, 212u8, 74u8, + 89u8, 174u8, 74u8, 195u8, 107u8, 29u8, 163u8, 178u8, 34u8, 209u8, 8u8, + 201u8, 237u8, 77u8, 99u8, 205u8, 212u8, 236u8, 132u8, 2u8, 252u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Named reserves on some account balances."] + pub async fn reserves( + &self, + _0: &::subxt::sp_core::crypto::AccountId32, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + runtime_types::frame_support::storage::bounded_vec::BoundedVec< + runtime_types::pallet_balances::ReserveData< + [::core::primitive::u8; 8usize], + ::core::primitive::u128, + >, + >, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 103u8, 6u8, 69u8, 151u8, 81u8, 40u8, 146u8, 113u8, 56u8, 239u8, 104u8, + 31u8, 168u8, 242u8, 141u8, 121u8, 213u8, 213u8, 114u8, 63u8, 62u8, + 47u8, 91u8, 119u8, 57u8, 91u8, 95u8, 81u8, 19u8, 208u8, 59u8, 146u8, + ] + { + let entry = Reserves(_0); + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Named reserves on some account balances."] + pub async fn reserves_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::subxt::KeyIter<'a, T, Reserves<'a>>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 103u8, 6u8, 69u8, 151u8, 81u8, 40u8, 146u8, 113u8, 56u8, 239u8, 104u8, + 31u8, 168u8, 242u8, 141u8, 121u8, 213u8, 213u8, 114u8, 63u8, 62u8, + 47u8, 91u8, 119u8, 57u8, 91u8, 95u8, 81u8, 19u8, 208u8, 59u8, 146u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Storage version of the pallet."] + #[doc = ""] + #[doc = " This is set to v2.0.0 for new networks."] + pub async fn storage_version( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + runtime_types::pallet_balances::Releases, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 135u8, 96u8, 28u8, 234u8, 124u8, 212u8, 56u8, 140u8, 40u8, 101u8, + 235u8, 128u8, 136u8, 221u8, 182u8, 81u8, 17u8, 9u8, 184u8, 228u8, + 174u8, 165u8, 200u8, 162u8, 214u8, 178u8, 227u8, 72u8, 34u8, 5u8, + 173u8, 96u8, + ] + { + let entry = StorageVersion; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + pub mod constants { + use super::runtime_types; + pub struct ConstantsApi<'a, T: ::subxt::Config> { + client: &'a ::subxt::Client, + } + impl<'a, T: ::subxt::Config> ConstantsApi<'a, T> { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { client } + } + #[doc = " The minimum amount required to keep an account open."] + pub fn existential_deposit( + &self, + ) -> ::core::result::Result<::core::primitive::u128, ::subxt::BasicError> + { + if self + .client + .metadata() + .constant_hash("Balances", "ExistentialDeposit")? + == [ + 100u8, 197u8, 144u8, 241u8, 166u8, 142u8, 204u8, 246u8, 114u8, 229u8, + 145u8, 5u8, 133u8, 180u8, 23u8, 117u8, 117u8, 204u8, 228u8, 32u8, 70u8, + 243u8, 110u8, 36u8, 218u8, 106u8, 47u8, 136u8, 193u8, 46u8, 121u8, + 242u8, + ] + { + let pallet = self.client.metadata().pallet("Balances")?; + let constant = pallet.constant("ExistentialDeposit")?; + let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; + Ok(value) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The maximum number of locks that should exist on an account."] + #[doc = " Not strictly enforced, but used for weight estimation."] + pub fn max_locks( + &self, + ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> + { + if self + .client + .metadata() + .constant_hash("Balances", "MaxLocks")? + == [ + 250u8, 58u8, 19u8, 15u8, 35u8, 113u8, 227u8, 89u8, 39u8, 75u8, 21u8, + 108u8, 202u8, 32u8, 163u8, 167u8, 207u8, 233u8, 69u8, 151u8, 53u8, + 164u8, 230u8, 16u8, 14u8, 22u8, 172u8, 46u8, 36u8, 216u8, 29u8, 1u8, + ] + { + let pallet = self.client.metadata().pallet("Balances")?; + let constant = pallet.constant("MaxLocks")?; + let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; + Ok(value) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The maximum number of named reserves that can exist on an account."] + pub fn max_reserves( + &self, + ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> + { + if self + .client + .metadata() + .constant_hash("Balances", "MaxReserves")? + == [ + 24u8, 30u8, 77u8, 89u8, 216u8, 114u8, 140u8, 11u8, 127u8, 252u8, 130u8, + 203u8, 4u8, 55u8, 62u8, 240u8, 65u8, 182u8, 187u8, 189u8, 140u8, 6u8, + 177u8, 216u8, 159u8, 108u8, 18u8, 73u8, 95u8, 67u8, 62u8, 50u8, + ] + { + let pallet = self.client.metadata().pallet("Balances")?; + let constant = pallet.constant("MaxReserves")?; + let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; + Ok(value) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + } + pub mod transaction_payment { + use super::root_mod; + use super::runtime_types; + pub mod storage { + use super::runtime_types; + pub struct NextFeeMultiplier; + impl ::subxt::StorageEntry for NextFeeMultiplier { + const PALLET: &'static str = "TransactionPayment"; + const STORAGE: &'static str = "NextFeeMultiplier"; + type Value = runtime_types::sp_arithmetic::fixed_point::FixedU128; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct StorageVersion; + impl ::subxt::StorageEntry for StorageVersion { + const PALLET: &'static str = "TransactionPayment"; + const STORAGE: &'static str = "StorageVersion"; + type Value = runtime_types::pallet_transaction_payment::Releases; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct StorageApi<'a, T: ::subxt::Config> { + client: &'a ::subxt::Client, + } + impl<'a, T: ::subxt::Config> StorageApi<'a, T> { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { client } + } + pub async fn next_fee_multiplier( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + runtime_types::sp_arithmetic::fixed_point::FixedU128, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 88u8, 50u8, 72u8, 23u8, 241u8, 137u8, 135u8, 135u8, 33u8, 57u8, 241u8, + 247u8, 212u8, 19u8, 116u8, 144u8, 60u8, 2u8, 6u8, 191u8, 190u8, 96u8, + 133u8, 199u8, 29u8, 132u8, 49u8, 121u8, 73u8, 116u8, 104u8, 141u8, + ] + { + let entry = NextFeeMultiplier; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + pub async fn storage_version( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + runtime_types::pallet_transaction_payment::Releases, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 219u8, 243u8, 82u8, 176u8, 65u8, 5u8, 132u8, 114u8, 8u8, 82u8, 176u8, + 200u8, 97u8, 150u8, 177u8, 164u8, 166u8, 11u8, 34u8, 12u8, 12u8, 198u8, + 58u8, 191u8, 186u8, 221u8, 221u8, 119u8, 181u8, 253u8, 154u8, 228u8, + ] + { + let entry = StorageVersion; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + pub mod constants { + use super::runtime_types; + pub struct ConstantsApi<'a, T: ::subxt::Config> { + client: &'a ::subxt::Client, + } + impl<'a, T: ::subxt::Config> ConstantsApi<'a, T> { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { client } + } + #[doc = " A fee mulitplier for `Operational` extrinsics to compute \"virtual tip\" to boost their"] + #[doc = " `priority`"] + #[doc = ""] + #[doc = " This value is multipled by the `final_fee` to obtain a \"virtual tip\" that is later"] + #[doc = " added to a tip component in regular `priority` calculations."] + #[doc = " It means that a `Normal` transaction can front-run a similarly-sized `Operational`"] + #[doc = " extrinsic (with no tip), by including a tip value greater than the virtual tip."] + #[doc = ""] + #[doc = " ```rust,ignore"] + #[doc = " // For `Normal`"] + #[doc = " let priority = priority_calc(tip);"] + #[doc = ""] + #[doc = " // For `Operational`"] + #[doc = " let virtual_tip = (inclusion_fee + tip) * OperationalFeeMultiplier;"] + #[doc = " let priority = priority_calc(tip + virtual_tip);"] + #[doc = " ```"] + #[doc = ""] + #[doc = " Note that since we use `final_fee` the multiplier applies also to the regular `tip`"] + #[doc = " sent with the transaction. So, not only does the transaction get a priority bump based"] + #[doc = " on the `inclusion_fee`, but we also amplify the impact of tips applied to `Operational`"] + #[doc = " transactions."] + pub fn operational_fee_multiplier( + &self, + ) -> ::core::result::Result<::core::primitive::u8, ::subxt::BasicError> + { + if self + .client + .metadata() + .constant_hash("TransactionPayment", "OperationalFeeMultiplier")? + == [ + 161u8, 232u8, 150u8, 43u8, 106u8, 83u8, 56u8, 248u8, 54u8, 123u8, + 244u8, 73u8, 5u8, 49u8, 245u8, 150u8, 70u8, 92u8, 158u8, 207u8, 127u8, + 115u8, 211u8, 21u8, 24u8, 136u8, 89u8, 44u8, 151u8, 211u8, 235u8, + 196u8, + ] + { + let pallet = self.client.metadata().pallet("TransactionPayment")?; + let constant = pallet.constant("OperationalFeeMultiplier")?; + let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; + Ok(value) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The polynomial that is applied in order to derive fee from weight."] + pub fn weight_to_fee( + &self, + ) -> ::core::result::Result< + ::std::vec::Vec< + runtime_types::frame_support::weights::WeightToFeeCoefficient< + ::core::primitive::u128, + >, + >, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .constant_hash("TransactionPayment", "WeightToFee")? + == [ + 45u8, 79u8, 182u8, 151u8, 56u8, 94u8, 151u8, 17u8, 186u8, 52u8, 33u8, + 209u8, 168u8, 84u8, 55u8, 203u8, 54u8, 162u8, 132u8, 64u8, 111u8, + 141u8, 19u8, 218u8, 142u8, 4u8, 246u8, 166u8, 126u8, 79u8, 11u8, 132u8, + ] + { + let pallet = self.client.metadata().pallet("TransactionPayment")?; + let constant = pallet.constant("WeightToFee")?; + let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; + Ok(value) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The polynomial that is applied in order to derive fee from length."] + pub fn length_to_fee( + &self, + ) -> ::core::result::Result< + ::std::vec::Vec< + runtime_types::frame_support::weights::WeightToFeeCoefficient< + ::core::primitive::u128, + >, + >, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .constant_hash("TransactionPayment", "LengthToFee")? + == [ + 247u8, 235u8, 15u8, 82u8, 189u8, 42u8, 103u8, 179u8, 146u8, 133u8, + 145u8, 191u8, 59u8, 45u8, 132u8, 195u8, 181u8, 238u8, 176u8, 137u8, + 82u8, 126u8, 92u8, 175u8, 9u8, 189u8, 137u8, 94u8, 165u8, 150u8, 25u8, + 81u8, + ] + { + let pallet = self.client.metadata().pallet("TransactionPayment")?; + let constant = pallet.constant("LengthToFee")?; + let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; + Ok(value) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + } + pub mod authorship { + use super::root_mod; + use super::runtime_types; + pub mod calls { + use super::root_mod; + use super::runtime_types; + type DispatchError = runtime_types::sp_runtime::DispatchError; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct SetUncles { + pub new_uncles: ::std::vec::Vec< + runtime_types::sp_runtime::generic::header::Header< + ::core::primitive::u32, + runtime_types::sp_runtime::traits::BlakeTwo256, + >, + >, + } + impl ::subxt::Call for SetUncles { + const PALLET: &'static str = "Authorship"; + const FUNCTION: &'static str = "set_uncles"; + } + pub struct TransactionApi<'a, T: ::subxt::Config, X> { + client: &'a ::subxt::Client, + marker: ::core::marker::PhantomData, + } + impl<'a, T, X> TransactionApi<'a, T, X> + where + T: ::subxt::Config, + X: ::subxt::extrinsic::ExtrinsicParams, + { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { + client, + marker: ::core::marker::PhantomData, + } + } + #[doc = "Provide a set of uncles."] + pub fn set_uncles( + &self, + new_uncles: ::std::vec::Vec< + runtime_types::sp_runtime::generic::header::Header< + ::core::primitive::u32, + runtime_types::sp_runtime::traits::BlakeTwo256, + >, + >, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SetUncles, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 77u8, 73u8, 220u8, 106u8, 126u8, 48u8, 20u8, 254u8, 87u8, 185u8, 110u8, + 253u8, 250u8, 10u8, 89u8, 77u8, 72u8, 90u8, 244u8, 27u8, 125u8, 43u8, + 58u8, 217u8, 112u8, 98u8, 233u8, 35u8, 194u8, 214u8, 183u8, 36u8, + ] + { + let call = SetUncles { new_uncles }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + pub mod storage { + use super::runtime_types; + pub struct Uncles; + impl ::subxt::StorageEntry for Uncles { + const PALLET: &'static str = "Authorship"; + const STORAGE: &'static str = "Uncles"; + type Value = ::std::vec::Vec< + runtime_types::pallet_authorship::UncleEntryItem< + ::core::primitive::u32, + ::subxt::sp_core::H256, + ::subxt::sp_core::crypto::AccountId32, + >, + >; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct Author; + impl ::subxt::StorageEntry for Author { + const PALLET: &'static str = "Authorship"; + const STORAGE: &'static str = "Author"; + type Value = ::subxt::sp_core::crypto::AccountId32; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct DidSetUncles; + impl ::subxt::StorageEntry for DidSetUncles { + const PALLET: &'static str = "Authorship"; + const STORAGE: &'static str = "DidSetUncles"; + type Value = ::core::primitive::bool; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct StorageApi<'a, T: ::subxt::Config> { + client: &'a ::subxt::Client, + } + impl<'a, T: ::subxt::Config> StorageApi<'a, T> { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { client } + } + #[doc = " Uncles"] + pub async fn uncles( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::std::vec::Vec< + runtime_types::pallet_authorship::UncleEntryItem< + ::core::primitive::u32, + ::subxt::sp_core::H256, + ::subxt::sp_core::crypto::AccountId32, + >, + >, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 234u8, 33u8, 171u8, 180u8, 154u8, 148u8, 191u8, 3u8, 191u8, 250u8, + 235u8, 39u8, 70u8, 41u8, 146u8, 155u8, 118u8, 154u8, 122u8, 27u8, + 126u8, 251u8, 2u8, 157u8, 187u8, 222u8, 120u8, 240u8, 21u8, 45u8, + 222u8, 13u8, + ] + { + let entry = Uncles; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Author of current block."] + pub async fn author( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option<::subxt::sp_core::crypto::AccountId32>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 191u8, 57u8, 3u8, 242u8, 220u8, 123u8, 103u8, 215u8, 149u8, 120u8, + 20u8, 139u8, 146u8, 234u8, 180u8, 105u8, 129u8, 128u8, 114u8, 147u8, + 114u8, 236u8, 23u8, 21u8, 15u8, 250u8, 180u8, 19u8, 177u8, 145u8, 77u8, + 228u8, + ] + { + let entry = Author; + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Whether uncles were already set in this block."] + pub async fn did_set_uncles( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result<::core::primitive::bool, ::subxt::BasicError> + { + if self.client.metadata().storage_hash::()? + == [ + 64u8, 3u8, 208u8, 187u8, 50u8, 45u8, 37u8, 88u8, 163u8, 226u8, 37u8, + 126u8, 232u8, 107u8, 156u8, 187u8, 29u8, 15u8, 53u8, 46u8, 28u8, 73u8, + 83u8, 123u8, 14u8, 244u8, 243u8, 43u8, 245u8, 143u8, 15u8, 115u8, + ] + { + let entry = DidSetUncles; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + pub mod constants { + use super::runtime_types; + pub struct ConstantsApi<'a, T: ::subxt::Config> { + client: &'a ::subxt::Client, + } + impl<'a, T: ::subxt::Config> ConstantsApi<'a, T> { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { client } + } + #[doc = " The number of blocks back we should accept uncles."] + #[doc = " This means that we will deal with uncle-parents that are"] + #[doc = " `UncleGenerations + 1` before `now`."] + pub fn uncle_generations( + &self, + ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> + { + if self + .client + .metadata() + .constant_hash("Authorship", "UncleGenerations")? + == [ + 0u8, 72u8, 57u8, 175u8, 222u8, 143u8, 191u8, 33u8, 163u8, 157u8, 202u8, + 83u8, 186u8, 103u8, 162u8, 103u8, 227u8, 158u8, 239u8, 212u8, 205u8, + 193u8, 226u8, 138u8, 5u8, 220u8, 221u8, 42u8, 7u8, 146u8, 173u8, 205u8, + ] + { + let pallet = self.client.metadata().pallet("Authorship")?; + let constant = pallet.constant("UncleGenerations")?; + let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; + Ok(value) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + } + pub mod offences { + use super::root_mod; + use super::runtime_types; + pub type Event = runtime_types::pallet_offences::pallet::Event; + pub mod events { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "There is an offence reported of the given `kind` happened at the `session_index` and"] + #[doc = "(kind-specific) time slot. This event is not deposited for duplicate slashes."] + #[doc = "\\[kind, timeslot\\]."] + pub struct Offence { + pub kind: [::core::primitive::u8; 16usize], + pub timeslot: ::std::vec::Vec<::core::primitive::u8>, + } + impl ::subxt::Event for Offence { + const PALLET: &'static str = "Offences"; + const EVENT: &'static str = "Offence"; + } + } + pub mod storage { + use super::runtime_types; + pub struct Reports<'a>(pub &'a ::subxt::sp_core::H256); + impl ::subxt::StorageEntry for Reports<'_> { + const PALLET: &'static str = "Offences"; + const STORAGE: &'static str = "Reports"; + type Value = runtime_types::sp_staking::offence::OffenceDetails< + ::subxt::sp_core::crypto::AccountId32, + (::subxt::sp_core::crypto::AccountId32, ()), + >; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Twox64Concat, + )]) + } + } + pub struct ConcurrentReportsIndex<'a>( + pub &'a [::core::primitive::u8; 16usize], + pub &'a [::core::primitive::u8], + ); + impl ::subxt::StorageEntry for ConcurrentReportsIndex<'_> { + const PALLET: &'static str = "Offences"; + const STORAGE: &'static str = "ConcurrentReportsIndex"; + type Value = ::std::vec::Vec<::subxt::sp_core::H256>; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![ + ::subxt::StorageMapKey::new(&self.0, ::subxt::StorageHasher::Twox64Concat), + ::subxt::StorageMapKey::new(&self.1, ::subxt::StorageHasher::Twox64Concat), + ]) + } + } + pub struct ReportsByKindIndex<'a>(pub &'a [::core::primitive::u8; 16usize]); + impl ::subxt::StorageEntry for ReportsByKindIndex<'_> { + const PALLET: &'static str = "Offences"; + const STORAGE: &'static str = "ReportsByKindIndex"; + type Value = ::std::vec::Vec<::core::primitive::u8>; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Twox64Concat, + )]) + } + } + pub struct StorageApi<'a, T: ::subxt::Config> { + client: &'a ::subxt::Client, + } + impl<'a, T: ::subxt::Config> StorageApi<'a, T> { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { client } + } + #[doc = " The primary structure that holds all offence records keyed by report identifiers."] + pub async fn reports( + &self, + _0: &::subxt::sp_core::H256, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option< + runtime_types::sp_staking::offence::OffenceDetails< + ::subxt::sp_core::crypto::AccountId32, + (::subxt::sp_core::crypto::AccountId32, ()), + >, + >, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 232u8, 29u8, 235u8, 182u8, 72u8, 179u8, 168u8, 231u8, 177u8, 122u8, + 225u8, 193u8, 172u8, 163u8, 228u8, 219u8, 59u8, 210u8, 1u8, 11u8, + 181u8, 218u8, 26u8, 187u8, 176u8, 101u8, 212u8, 178u8, 70u8, 229u8, + 85u8, 205u8, + ] + { + let entry = Reports(_0); + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The primary structure that holds all offence records keyed by report identifiers."] + pub async fn reports_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result<::subxt::KeyIter<'a, T, Reports<'a>>, ::subxt::BasicError> + { + if self.client.metadata().storage_hash::()? + == [ + 232u8, 29u8, 235u8, 182u8, 72u8, 179u8, 168u8, 231u8, 177u8, 122u8, + 225u8, 193u8, 172u8, 163u8, 228u8, 219u8, 59u8, 210u8, 1u8, 11u8, + 181u8, 218u8, 26u8, 187u8, 176u8, 101u8, 212u8, 178u8, 70u8, 229u8, + 85u8, 205u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " A vector of reports of the same kind that happened at the same time slot."] + pub async fn concurrent_reports_index( + &self, + _0: &[::core::primitive::u8; 16usize], + _1: &[::core::primitive::u8], + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::std::vec::Vec<::subxt::sp_core::H256>, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .storage_hash::()? + == [ + 110u8, 182u8, 108u8, 15u8, 22u8, 226u8, 241u8, 98u8, 191u8, 37u8, + 135u8, 119u8, 88u8, 238u8, 202u8, 216u8, 221u8, 165u8, 144u8, 236u8, + 113u8, 49u8, 55u8, 18u8, 238u8, 238u8, 128u8, 210u8, 161u8, 134u8, + 130u8, 195u8, + ] + { + let entry = ConcurrentReportsIndex(_0, _1); + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " A vector of reports of the same kind that happened at the same time slot."] + pub async fn concurrent_reports_index_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::subxt::KeyIter<'a, T, ConcurrentReportsIndex<'a>>, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .storage_hash::()? + == [ + 110u8, 182u8, 108u8, 15u8, 22u8, 226u8, 241u8, 98u8, 191u8, 37u8, + 135u8, 119u8, 88u8, 238u8, 202u8, 216u8, 221u8, 165u8, 144u8, 236u8, + 113u8, 49u8, 55u8, 18u8, 238u8, 238u8, 128u8, 210u8, 161u8, 134u8, + 130u8, 195u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Enumerates all reports of a kind along with the time they happened."] + #[doc = ""] + #[doc = " All reports are sorted by the time of offence."] + #[doc = ""] + #[doc = " Note that the actual type of this mapping is `Vec`, this is because values of"] + #[doc = " different types are not supported at the moment so we are doing the manual serialization."] + pub async fn reports_by_kind_index( + &self, + _0: &[::core::primitive::u8; 16usize], + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::std::vec::Vec<::core::primitive::u8>, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .storage_hash::()? + == [ + 162u8, 66u8, 131u8, 48u8, 250u8, 237u8, 179u8, 214u8, 36u8, 137u8, + 226u8, 136u8, 120u8, 61u8, 215u8, 43u8, 164u8, 50u8, 91u8, 164u8, 20u8, + 96u8, 189u8, 100u8, 242u8, 106u8, 21u8, 136u8, 98u8, 215u8, 180u8, + 145u8, + ] + { + let entry = ReportsByKindIndex(_0); + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Enumerates all reports of a kind along with the time they happened."] + #[doc = ""] + #[doc = " All reports are sorted by the time of offence."] + #[doc = ""] + #[doc = " Note that the actual type of this mapping is `Vec`, this is because values of"] + #[doc = " different types are not supported at the moment so we are doing the manual serialization."] + pub async fn reports_by_kind_index_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::subxt::KeyIter<'a, T, ReportsByKindIndex<'a>>, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .storage_hash::()? + == [ + 162u8, 66u8, 131u8, 48u8, 250u8, 237u8, 179u8, 214u8, 36u8, 137u8, + 226u8, 136u8, 120u8, 61u8, 215u8, 43u8, 164u8, 50u8, 91u8, 164u8, 20u8, + 96u8, 189u8, 100u8, 242u8, 106u8, 21u8, 136u8, 98u8, 215u8, 180u8, + 145u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + } + pub mod historical { + use super::root_mod; + use super::runtime_types; + pub mod storage { + use super::runtime_types; + pub struct HistoricalSessions<'a>(pub &'a ::core::primitive::u32); + impl ::subxt::StorageEntry for HistoricalSessions<'_> { + const PALLET: &'static str = "Historical"; + const STORAGE: &'static str = "HistoricalSessions"; + type Value = (::subxt::sp_core::H256, ::core::primitive::u32); + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Twox64Concat, + )]) + } + } + pub struct StoredRange; + impl ::subxt::StorageEntry for StoredRange { + const PALLET: &'static str = "Historical"; + const STORAGE: &'static str = "StoredRange"; + type Value = (::core::primitive::u32, ::core::primitive::u32); + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct StorageApi<'a, T: ::subxt::Config> { + client: &'a ::subxt::Client, + } + impl<'a, T: ::subxt::Config> StorageApi<'a, T> { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { client } + } + #[doc = " Mapping from historical session indices to session-data root hash and validator count."] + pub async fn historical_sessions( + &self, + _0: &::core::primitive::u32, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option<(::subxt::sp_core::H256, ::core::primitive::u32)>, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .storage_hash::()? + == [ + 221u8, 75u8, 4u8, 83u8, 130u8, 251u8, 43u8, 26u8, 173u8, 40u8, 222u8, + 39u8, 228u8, 129u8, 201u8, 246u8, 81u8, 147u8, 64u8, 150u8, 147u8, + 165u8, 5u8, 44u8, 153u8, 125u8, 128u8, 222u8, 79u8, 16u8, 252u8, 93u8, + ] + { + let entry = HistoricalSessions(_0); + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Mapping from historical session indices to session-data root hash and validator count."] + pub async fn historical_sessions_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::subxt::KeyIter<'a, T, HistoricalSessions<'a>>, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .storage_hash::()? + == [ + 221u8, 75u8, 4u8, 83u8, 130u8, 251u8, 43u8, 26u8, 173u8, 40u8, 222u8, + 39u8, 228u8, 129u8, 201u8, 246u8, 81u8, 147u8, 64u8, 150u8, 147u8, + 165u8, 5u8, 44u8, 153u8, 125u8, 128u8, 222u8, 79u8, 16u8, 252u8, 93u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The range of historical sessions we store. [first, last)"] + pub async fn stored_range( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option<(::core::primitive::u32, ::core::primitive::u32)>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 89u8, 239u8, 197u8, 93u8, 135u8, 62u8, 142u8, 237u8, 64u8, 200u8, + 164u8, 4u8, 130u8, 233u8, 16u8, 238u8, 166u8, 206u8, 71u8, 42u8, 171u8, + 84u8, 8u8, 245u8, 183u8, 216u8, 212u8, 16u8, 190u8, 3u8, 167u8, 189u8, + ] + { + let entry = StoredRange; + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + } + pub mod session { + use super::root_mod; + use super::runtime_types; + pub mod calls { + use super::root_mod; + use super::runtime_types; + type DispatchError = runtime_types::sp_runtime::DispatchError; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct SetKeys { + pub keys: runtime_types::rococo_runtime::SessionKeys, + pub proof: ::std::vec::Vec<::core::primitive::u8>, + } + impl ::subxt::Call for SetKeys { + const PALLET: &'static str = "Session"; + const FUNCTION: &'static str = "set_keys"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct PurgeKeys; + impl ::subxt::Call for PurgeKeys { + const PALLET: &'static str = "Session"; + const FUNCTION: &'static str = "purge_keys"; + } + pub struct TransactionApi<'a, T: ::subxt::Config, X> { + client: &'a ::subxt::Client, + marker: ::core::marker::PhantomData, + } + impl<'a, T, X> TransactionApi<'a, T, X> + where + T: ::subxt::Config, + X: ::subxt::extrinsic::ExtrinsicParams, + { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { + client, + marker: ::core::marker::PhantomData, + } + } + #[doc = "Sets the session key(s) of the function caller to `keys`."] + #[doc = "Allows an account to set its session key prior to becoming a validator."] + #[doc = "This doesn't take effect until the next session."] + #[doc = ""] + #[doc = "The dispatch origin of this function must be signed."] + #[doc = ""] + #[doc = "# "] + #[doc = "- Complexity: `O(1)`. Actual cost depends on the number of length of"] + #[doc = " `T::Keys::key_ids()` which is fixed."] + #[doc = "- DbReads: `origin account`, `T::ValidatorIdOf`, `NextKeys`"] + #[doc = "- DbWrites: `origin account`, `NextKeys`"] + #[doc = "- DbReads per key id: `KeyOwner`"] + #[doc = "- DbWrites per key id: `KeyOwner`"] + #[doc = "# "] + pub fn set_keys( + &self, + keys: runtime_types::rococo_runtime::SessionKeys, + proof: ::std::vec::Vec<::core::primitive::u8>, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SetKeys, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 11u8, 191u8, 79u8, 20u8, 252u8, 226u8, 99u8, 96u8, 217u8, 230u8, 232u8, + 139u8, 135u8, 196u8, 199u8, 148u8, 127u8, 60u8, 118u8, 169u8, 252u8, + 78u8, 184u8, 44u8, 240u8, 62u8, 134u8, 192u8, 96u8, 31u8, 207u8, 125u8, + ] + { + let call = SetKeys { keys, proof }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Removes any session key(s) of the function caller."] + #[doc = ""] + #[doc = "This doesn't take effect until the next session."] + #[doc = ""] + #[doc = "The dispatch origin of this function must be Signed and the account must be either be"] + #[doc = "convertible to a validator ID using the chain's typical addressing system (this usually"] + #[doc = "means being a controller account) or directly convertible into a validator ID (which"] + #[doc = "usually means being a stash account)."] + #[doc = ""] + #[doc = "# "] + #[doc = "- Complexity: `O(1)` in number of key types. Actual cost depends on the number of length"] + #[doc = " of `T::Keys::key_ids()` which is fixed."] + #[doc = "- DbReads: `T::ValidatorIdOf`, `NextKeys`, `origin account`"] + #[doc = "- DbWrites: `NextKeys`, `origin account`"] + #[doc = "- DbWrites per key id: `KeyOwner`"] + #[doc = "# "] + pub fn purge_keys( + &self, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + PurgeKeys, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 200u8, 255u8, 4u8, 213u8, 188u8, 92u8, 99u8, 116u8, 163u8, 152u8, 29u8, + 35u8, 133u8, 119u8, 246u8, 44u8, 91u8, 31u8, 145u8, 23u8, 213u8, 64u8, + 71u8, 242u8, 207u8, 239u8, 231u8, 37u8, 61u8, 63u8, 190u8, 35u8, + ] + { + let call = PurgeKeys {}; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + pub type Event = runtime_types::pallet_session::pallet::Event; + pub mod events { + use super::runtime_types; + #[derive( + :: subxt :: codec :: CompactAs, + :: subxt :: codec :: Decode, + :: subxt :: codec :: Encode, + Debug, + )] + #[doc = "New session has happened. Note that the argument is the session index, not the"] + #[doc = "block number as the type might suggest."] + pub struct NewSession { + pub session_index: ::core::primitive::u32, + } + impl ::subxt::Event for NewSession { + const PALLET: &'static str = "Session"; + const EVENT: &'static str = "NewSession"; + } + } + pub mod storage { + use super::runtime_types; + pub struct Validators; + impl ::subxt::StorageEntry for Validators { + const PALLET: &'static str = "Session"; + const STORAGE: &'static str = "Validators"; + type Value = ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct CurrentIndex; + impl ::subxt::StorageEntry for CurrentIndex { + const PALLET: &'static str = "Session"; + const STORAGE: &'static str = "CurrentIndex"; + type Value = ::core::primitive::u32; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct QueuedChanged; + impl ::subxt::StorageEntry for QueuedChanged { + const PALLET: &'static str = "Session"; + const STORAGE: &'static str = "QueuedChanged"; + type Value = ::core::primitive::bool; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct QueuedKeys; + impl ::subxt::StorageEntry for QueuedKeys { + const PALLET: &'static str = "Session"; + const STORAGE: &'static str = "QueuedKeys"; + type Value = ::std::vec::Vec<( + ::subxt::sp_core::crypto::AccountId32, + runtime_types::rococo_runtime::SessionKeys, + )>; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct DisabledValidators; + impl ::subxt::StorageEntry for DisabledValidators { + const PALLET: &'static str = "Session"; + const STORAGE: &'static str = "DisabledValidators"; + type Value = ::std::vec::Vec<::core::primitive::u32>; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct NextKeys<'a>(pub &'a ::subxt::sp_core::crypto::AccountId32); + impl ::subxt::StorageEntry for NextKeys<'_> { + const PALLET: &'static str = "Session"; + const STORAGE: &'static str = "NextKeys"; + type Value = runtime_types::rococo_runtime::SessionKeys; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Twox64Concat, + )]) + } + } + pub struct KeyOwner<'a>( + pub &'a runtime_types::sp_core::crypto::KeyTypeId, + pub &'a [::core::primitive::u8], + ); + impl ::subxt::StorageEntry for KeyOwner<'_> { + const PALLET: &'static str = "Session"; + const STORAGE: &'static str = "KeyOwner"; + type Value = ::subxt::sp_core::crypto::AccountId32; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &(&self.0, &self.1), + ::subxt::StorageHasher::Twox64Concat, + )]) + } + } + pub struct StorageApi<'a, T: ::subxt::Config> { + client: &'a ::subxt::Client, + } + impl<'a, T: ::subxt::Config> StorageApi<'a, T> { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { client } + } + #[doc = " The current set of validators."] + pub async fn validators( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 186u8, 248u8, 234u8, 74u8, 245u8, 141u8, 90u8, 152u8, 226u8, 220u8, + 255u8, 104u8, 174u8, 1u8, 37u8, 152u8, 23u8, 208u8, 25u8, 49u8, 33u8, + 253u8, 254u8, 251u8, 141u8, 16u8, 18u8, 175u8, 196u8, 188u8, 163u8, + 209u8, + ] + { + let entry = Validators; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Current index of the session."] + pub async fn current_index( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> + { + if self.client.metadata().storage_hash::()? + == [ + 148u8, 179u8, 159u8, 15u8, 197u8, 95u8, 214u8, 30u8, 209u8, 251u8, + 183u8, 231u8, 91u8, 25u8, 181u8, 191u8, 143u8, 252u8, 227u8, 80u8, + 159u8, 66u8, 194u8, 67u8, 113u8, 74u8, 111u8, 91u8, 218u8, 187u8, + 130u8, 40u8, + ] + { + let entry = CurrentIndex; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " True if the underlying economic identities or weighting behind the validators"] + #[doc = " has changed in the queued validator set."] + pub async fn queued_changed( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result<::core::primitive::bool, ::subxt::BasicError> + { + if self.client.metadata().storage_hash::()? + == [ + 105u8, 140u8, 235u8, 218u8, 96u8, 100u8, 252u8, 10u8, 58u8, 221u8, + 244u8, 251u8, 67u8, 91u8, 80u8, 202u8, 152u8, 42u8, 50u8, 113u8, 200u8, + 247u8, 59u8, 213u8, 77u8, 195u8, 1u8, 150u8, 220u8, 18u8, 245u8, 46u8, + ] + { + let entry = QueuedChanged; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The queued keys for the next session. When the next session begins, these keys"] + #[doc = " will be used to determine the validator's session keys."] + pub async fn queued_keys( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::std::vec::Vec<( + ::subxt::sp_core::crypto::AccountId32, + runtime_types::rococo_runtime::SessionKeys, + )>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 72u8, 58u8, 252u8, 252u8, 133u8, 178u8, 214u8, 96u8, 221u8, 140u8, + 221u8, 249u8, 229u8, 136u8, 231u8, 167u8, 96u8, 223u8, 182u8, 175u8, + 235u8, 246u8, 80u8, 99u8, 246u8, 37u8, 141u8, 88u8, 213u8, 189u8, + 165u8, 181u8, + ] + { + let entry = QueuedKeys; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Indices of disabled validators."] + #[doc = ""] + #[doc = " The vec is always kept sorted so that we can find whether a given validator is"] + #[doc = " disabled using binary search. It gets cleared when `on_session_ending` returns"] + #[doc = " a new set of identities."] + pub async fn disabled_validators( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::std::vec::Vec<::core::primitive::u32>, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .storage_hash::()? + == [ + 135u8, 22u8, 22u8, 97u8, 82u8, 217u8, 144u8, 141u8, 121u8, 240u8, + 189u8, 16u8, 176u8, 88u8, 177u8, 31u8, 20u8, 242u8, 73u8, 104u8, 11u8, + 110u8, 214u8, 34u8, 52u8, 217u8, 106u8, 33u8, 174u8, 174u8, 198u8, + 84u8, + ] + { + let entry = DisabledValidators; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The next session keys for a validator."] + pub async fn next_keys( + &self, + _0: &::subxt::sp_core::crypto::AccountId32, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 186u8, 156u8, 115u8, 10u8, 142u8, 35u8, 26u8, 253u8, 76u8, 189u8, + 201u8, 63u8, 248u8, 4u8, 63u8, 54u8, 149u8, 171u8, 41u8, 24u8, 230u8, + 63u8, 152u8, 177u8, 134u8, 22u8, 84u8, 199u8, 198u8, 154u8, 137u8, + 38u8, + ] + { + let entry = NextKeys(_0); + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The next session keys for a validator."] + pub async fn next_keys_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::subxt::KeyIter<'a, T, NextKeys<'a>>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 186u8, 156u8, 115u8, 10u8, 142u8, 35u8, 26u8, 253u8, 76u8, 189u8, + 201u8, 63u8, 248u8, 4u8, 63u8, 54u8, 149u8, 171u8, 41u8, 24u8, 230u8, + 63u8, 152u8, 177u8, 134u8, 22u8, 84u8, 199u8, 198u8, 154u8, 137u8, + 38u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The owner of a key. The key is the `KeyTypeId` + the encoded key."] + pub async fn key_owner( + &self, + _0: &runtime_types::sp_core::crypto::KeyTypeId, + _1: &[::core::primitive::u8], + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option<::subxt::sp_core::crypto::AccountId32>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 49u8, 245u8, 212u8, 141u8, 211u8, 208u8, 109u8, 102u8, 249u8, 161u8, + 41u8, 93u8, 220u8, 230u8, 14u8, 59u8, 251u8, 176u8, 33u8, 127u8, 93u8, + 149u8, 205u8, 229u8, 113u8, 129u8, 162u8, 177u8, 155u8, 216u8, 151u8, + 57u8, + ] + { + let entry = KeyOwner(_0, _1); + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The owner of a key. The key is the `KeyTypeId` + the encoded key."] + pub async fn key_owner_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::subxt::KeyIter<'a, T, KeyOwner<'a>>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 49u8, 245u8, 212u8, 141u8, 211u8, 208u8, 109u8, 102u8, 249u8, 161u8, + 41u8, 93u8, 220u8, 230u8, 14u8, 59u8, 251u8, 176u8, 33u8, 127u8, 93u8, + 149u8, 205u8, 229u8, 113u8, 129u8, 162u8, 177u8, 155u8, 216u8, 151u8, + 57u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + } + pub mod grandpa { + use super::root_mod; + use super::runtime_types; + pub mod calls { + use super::root_mod; + use super::runtime_types; + type DispatchError = runtime_types::sp_runtime::DispatchError; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct ReportEquivocation { + pub equivocation_proof: ::std::boxed::Box< + runtime_types::sp_finality_grandpa::EquivocationProof< + ::subxt::sp_core::H256, + ::core::primitive::u32, + >, + >, + pub key_owner_proof: runtime_types::sp_session::MembershipProof, + } + impl ::subxt::Call for ReportEquivocation { + const PALLET: &'static str = "Grandpa"; + const FUNCTION: &'static str = "report_equivocation"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct ReportEquivocationUnsigned { + pub equivocation_proof: ::std::boxed::Box< + runtime_types::sp_finality_grandpa::EquivocationProof< + ::subxt::sp_core::H256, + ::core::primitive::u32, + >, + >, + pub key_owner_proof: runtime_types::sp_session::MembershipProof, + } + impl ::subxt::Call for ReportEquivocationUnsigned { + const PALLET: &'static str = "Grandpa"; + const FUNCTION: &'static str = "report_equivocation_unsigned"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct NoteStalled { + pub delay: ::core::primitive::u32, + pub best_finalized_block_number: ::core::primitive::u32, + } + impl ::subxt::Call for NoteStalled { + const PALLET: &'static str = "Grandpa"; + const FUNCTION: &'static str = "note_stalled"; + } + pub struct TransactionApi<'a, T: ::subxt::Config, X> { + client: &'a ::subxt::Client, + marker: ::core::marker::PhantomData, + } + impl<'a, T, X> TransactionApi<'a, T, X> + where + T: ::subxt::Config, + X: ::subxt::extrinsic::ExtrinsicParams, + { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { + client, + marker: ::core::marker::PhantomData, + } + } + #[doc = "Report voter equivocation/misbehavior. This method will verify the"] + #[doc = "equivocation proof and validate the given key ownership proof"] + #[doc = "against the extracted offender. If both are valid, the offence"] + #[doc = "will be reported."] + pub fn report_equivocation( + &self, + equivocation_proof: runtime_types::sp_finality_grandpa::EquivocationProof< + ::subxt::sp_core::H256, + ::core::primitive::u32, + >, + key_owner_proof: runtime_types::sp_session::MembershipProof, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + ReportEquivocation, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 64u8, 99u8, 188u8, 190u8, 206u8, 120u8, 44u8, 136u8, 56u8, 142u8, + 221u8, 12u8, 124u8, 245u8, 168u8, 204u8, 39u8, 141u8, 189u8, 189u8, + 218u8, 162u8, 202u8, 220u8, 101u8, 136u8, 66u8, 195u8, 136u8, 4u8, + 66u8, 152u8, + ] + { + let call = ReportEquivocation { + equivocation_proof: ::std::boxed::Box::new(equivocation_proof), + key_owner_proof, + }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Report voter equivocation/misbehavior. This method will verify the"] + #[doc = "equivocation proof and validate the given key ownership proof"] + #[doc = "against the extracted offender. If both are valid, the offence"] + #[doc = "will be reported."] + #[doc = ""] + #[doc = "This extrinsic must be called unsigned and it is expected that only"] + #[doc = "block authors will call it (validated in `ValidateUnsigned`), as such"] + #[doc = "if the block author is defined it will be defined as the equivocation"] + #[doc = "reporter."] + pub fn report_equivocation_unsigned( + &self, + equivocation_proof: runtime_types::sp_finality_grandpa::EquivocationProof< + ::subxt::sp_core::H256, + ::core::primitive::u32, + >, + key_owner_proof: runtime_types::sp_session::MembershipProof, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + ReportEquivocationUnsigned, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .call_hash::()? + == [ + 235u8, 164u8, 157u8, 180u8, 230u8, 16u8, 125u8, 74u8, 171u8, 208u8, + 158u8, 179u8, 175u8, 87u8, 111u8, 32u8, 33u8, 72u8, 74u8, 113u8, 113u8, + 113u8, 65u8, 234u8, 184u8, 224u8, 206u8, 126u8, 254u8, 49u8, 6u8, 44u8, + ] + { + let call = ReportEquivocationUnsigned { + equivocation_proof: ::std::boxed::Box::new(equivocation_proof), + key_owner_proof, + }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Note that the current authority set of the GRANDPA finality gadget has"] + #[doc = "stalled. This will trigger a forced authority set change at the beginning"] + #[doc = "of the next session, to be enacted `delay` blocks after that. The delay"] + #[doc = "should be high enough to safely assume that the block signalling the"] + #[doc = "forced change will not be re-orged (e.g. 1000 blocks). The GRANDPA voters"] + #[doc = "will start the new authority set using the given finalized block as base."] + #[doc = "Only callable by root."] + pub fn note_stalled( + &self, + delay: ::core::primitive::u32, + best_finalized_block_number: ::core::primitive::u32, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + NoteStalled, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 227u8, 98u8, 249u8, 158u8, 96u8, 124u8, 72u8, 188u8, 27u8, 215u8, 73u8, + 62u8, 103u8, 79u8, 38u8, 48u8, 212u8, 88u8, 233u8, 187u8, 11u8, 95u8, + 39u8, 247u8, 55u8, 184u8, 228u8, 102u8, 13u8, 251u8, 52u8, 206u8, + ] + { + let call = NoteStalled { + delay, + best_finalized_block_number, + }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + pub type Event = runtime_types::pallet_grandpa::pallet::Event; + pub mod events { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "New authority set has been applied."] + pub struct NewAuthorities { + pub authority_set: ::std::vec::Vec<( + runtime_types::sp_finality_grandpa::app::Public, + ::core::primitive::u64, + )>, + } + impl ::subxt::Event for NewAuthorities { + const PALLET: &'static str = "Grandpa"; + const EVENT: &'static str = "NewAuthorities"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "Current authority set has been paused."] + pub struct Paused; + impl ::subxt::Event for Paused { + const PALLET: &'static str = "Grandpa"; + const EVENT: &'static str = "Paused"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "Current authority set has been resumed."] + pub struct Resumed; + impl ::subxt::Event for Resumed { + const PALLET: &'static str = "Grandpa"; + const EVENT: &'static str = "Resumed"; + } + } + pub mod storage { + use super::runtime_types; + pub struct State; + impl ::subxt::StorageEntry for State { + const PALLET: &'static str = "Grandpa"; + const STORAGE: &'static str = "State"; + type Value = runtime_types::pallet_grandpa::StoredState<::core::primitive::u32>; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct PendingChange; + impl ::subxt::StorageEntry for PendingChange { + const PALLET: &'static str = "Grandpa"; + const STORAGE: &'static str = "PendingChange"; + type Value = + runtime_types::pallet_grandpa::StoredPendingChange<::core::primitive::u32>; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct NextForced; + impl ::subxt::StorageEntry for NextForced { + const PALLET: &'static str = "Grandpa"; + const STORAGE: &'static str = "NextForced"; + type Value = ::core::primitive::u32; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct Stalled; + impl ::subxt::StorageEntry for Stalled { + const PALLET: &'static str = "Grandpa"; + const STORAGE: &'static str = "Stalled"; + type Value = (::core::primitive::u32, ::core::primitive::u32); + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct CurrentSetId; + impl ::subxt::StorageEntry for CurrentSetId { + const PALLET: &'static str = "Grandpa"; + const STORAGE: &'static str = "CurrentSetId"; + type Value = ::core::primitive::u64; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct SetIdSession<'a>(pub &'a ::core::primitive::u64); + impl ::subxt::StorageEntry for SetIdSession<'_> { + const PALLET: &'static str = "Grandpa"; + const STORAGE: &'static str = "SetIdSession"; + type Value = ::core::primitive::u32; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Twox64Concat, + )]) + } + } + pub struct StorageApi<'a, T: ::subxt::Config> { + client: &'a ::subxt::Client, + } + impl<'a, T: ::subxt::Config> StorageApi<'a, T> { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { client } + } + #[doc = " State of the current authority set."] + pub async fn state( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + runtime_types::pallet_grandpa::StoredState<::core::primitive::u32>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 159u8, 75u8, 78u8, 23u8, 98u8, 89u8, 239u8, 230u8, 192u8, 67u8, 139u8, + 222u8, 151u8, 237u8, 216u8, 20u8, 235u8, 247u8, 180u8, 24u8, 64u8, + 160u8, 58u8, 15u8, 205u8, 191u8, 120u8, 68u8, 32u8, 5u8, 161u8, 106u8, + ] + { + let entry = State; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Pending change: (signaled at, scheduled change)."] + pub async fn pending_change( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option< + runtime_types::pallet_grandpa::StoredPendingChange<::core::primitive::u32>, + >, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 83u8, 71u8, 180u8, 126u8, 51u8, 111u8, 238u8, 160u8, 5u8, 100u8, 152u8, + 23u8, 138u8, 228u8, 46u8, 67u8, 145u8, 183u8, 100u8, 97u8, 153u8, + 140u8, 244u8, 179u8, 157u8, 150u8, 75u8, 236u8, 73u8, 209u8, 106u8, + 147u8, + ] + { + let entry = PendingChange; + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " next block number where we can force a change."] + pub async fn next_forced( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option<::core::primitive::u32>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 99u8, 43u8, 245u8, 201u8, 60u8, 9u8, 122u8, 99u8, 188u8, 29u8, 67u8, + 6u8, 193u8, 133u8, 179u8, 67u8, 202u8, 208u8, 62u8, 179u8, 19u8, 169u8, + 196u8, 119u8, 107u8, 75u8, 100u8, 3u8, 121u8, 18u8, 80u8, 156u8, + ] + { + let entry = NextForced; + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " `true` if we are currently stalled."] + pub async fn stalled( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option<(::core::primitive::u32, ::core::primitive::u32)>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 219u8, 8u8, 37u8, 78u8, 150u8, 55u8, 0u8, 57u8, 201u8, 170u8, 186u8, + 189u8, 56u8, 161u8, 44u8, 15u8, 53u8, 178u8, 224u8, 208u8, 231u8, + 109u8, 14u8, 209u8, 57u8, 205u8, 237u8, 153u8, 231u8, 156u8, 24u8, + 185u8, + ] + { + let entry = Stalled; + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The number of changes (both in terms of keys and underlying economic responsibilities)"] + #[doc = " in the \"set\" of Grandpa validators from genesis."] + pub async fn current_set_id( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result<::core::primitive::u64, ::subxt::BasicError> + { + if self.client.metadata().storage_hash::()? + == [ + 129u8, 7u8, 62u8, 101u8, 199u8, 60u8, 56u8, 33u8, 54u8, 158u8, 20u8, + 178u8, 244u8, 145u8, 189u8, 197u8, 157u8, 163u8, 116u8, 36u8, 105u8, + 52u8, 149u8, 244u8, 108u8, 94u8, 109u8, 111u8, 244u8, 137u8, 7u8, + 108u8, + ] + { + let entry = CurrentSetId; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " A mapping from grandpa set ID to the index of the *most recent* session for which its"] + #[doc = " members were responsible."] + #[doc = ""] + #[doc = " TWOX-NOTE: `SetId` is not under user control."] + pub async fn set_id_session( + &self, + _0: &::core::primitive::u64, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option<::core::primitive::u32>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 91u8, 175u8, 145u8, 127u8, 242u8, 81u8, 13u8, 231u8, 110u8, 11u8, + 166u8, 169u8, 103u8, 146u8, 123u8, 133u8, 157u8, 15u8, 33u8, 234u8, + 108u8, 13u8, 88u8, 115u8, 254u8, 9u8, 145u8, 199u8, 102u8, 47u8, 53u8, + 134u8, + ] + { + let entry = SetIdSession(_0); + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " A mapping from grandpa set ID to the index of the *most recent* session for which its"] + #[doc = " members were responsible."] + #[doc = ""] + #[doc = " TWOX-NOTE: `SetId` is not under user control."] + pub async fn set_id_session_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::subxt::KeyIter<'a, T, SetIdSession<'a>>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 91u8, 175u8, 145u8, 127u8, 242u8, 81u8, 13u8, 231u8, 110u8, 11u8, + 166u8, 169u8, 103u8, 146u8, 123u8, 133u8, 157u8, 15u8, 33u8, 234u8, + 108u8, 13u8, 88u8, 115u8, 254u8, 9u8, 145u8, 199u8, 102u8, 47u8, 53u8, + 134u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + pub mod constants { + use super::runtime_types; + pub struct ConstantsApi<'a, T: ::subxt::Config> { + client: &'a ::subxt::Client, + } + impl<'a, T: ::subxt::Config> ConstantsApi<'a, T> { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { client } + } + #[doc = " Max Authorities in use"] + pub fn max_authorities( + &self, + ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> + { + if self + .client + .metadata() + .constant_hash("Grandpa", "MaxAuthorities")? + == [ + 248u8, 195u8, 131u8, 166u8, 10u8, 50u8, 71u8, 223u8, 41u8, 49u8, 43u8, + 99u8, 251u8, 113u8, 75u8, 193u8, 159u8, 15u8, 77u8, 217u8, 147u8, + 205u8, 165u8, 50u8, 6u8, 166u8, 77u8, 189u8, 102u8, 22u8, 201u8, 19u8, + ] + { + let pallet = self.client.metadata().pallet("Grandpa")?; + let constant = pallet.constant("MaxAuthorities")?; + let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; + Ok(value) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + } + pub mod im_online { + use super::root_mod; + use super::runtime_types; + pub mod calls { + use super::root_mod; + use super::runtime_types; + type DispatchError = runtime_types::sp_runtime::DispatchError; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct Heartbeat { + pub heartbeat: runtime_types::pallet_im_online::Heartbeat<::core::primitive::u32>, + pub signature: runtime_types::pallet_im_online::sr25519::app_sr25519::Signature, + } + impl ::subxt::Call for Heartbeat { + const PALLET: &'static str = "ImOnline"; + const FUNCTION: &'static str = "heartbeat"; + } + pub struct TransactionApi<'a, T: ::subxt::Config, X> { + client: &'a ::subxt::Client, + marker: ::core::marker::PhantomData, + } + impl<'a, T, X> TransactionApi<'a, T, X> + where + T: ::subxt::Config, + X: ::subxt::extrinsic::ExtrinsicParams, + { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { + client, + marker: ::core::marker::PhantomData, + } + } + #[doc = "# "] + #[doc = "- Complexity: `O(K + E)` where K is length of `Keys` (heartbeat.validators_len) and E is"] + #[doc = " length of `heartbeat.network_state.external_address`"] + #[doc = " - `O(K)`: decoding of length `K`"] + #[doc = " - `O(E)`: decoding/encoding of length `E`"] + #[doc = "- DbReads: pallet_session `Validators`, pallet_session `CurrentIndex`, `Keys`,"] + #[doc = " `ReceivedHeartbeats`"] + #[doc = "- DbWrites: `ReceivedHeartbeats`"] + #[doc = "# "] + pub fn heartbeat( + &self, + heartbeat: runtime_types::pallet_im_online::Heartbeat<::core::primitive::u32>, + signature: runtime_types::pallet_im_online::sr25519::app_sr25519::Signature, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + Heartbeat, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 92u8, 180u8, 156u8, 19u8, 58u8, 158u8, 191u8, 159u8, 72u8, 119u8, + 227u8, 229u8, 8u8, 139u8, 185u8, 101u8, 114u8, 161u8, 206u8, 77u8, + 110u8, 41u8, 12u8, 154u8, 147u8, 12u8, 227u8, 46u8, 61u8, 114u8, 166u8, + 2u8, + ] + { + let call = Heartbeat { + heartbeat, + signature, + }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + pub type Event = runtime_types::pallet_im_online::pallet::Event; + pub mod events { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "A new heartbeat was received from `AuthorityId`."] + pub struct HeartbeatReceived { + pub authority_id: runtime_types::pallet_im_online::sr25519::app_sr25519::Public, + } + impl ::subxt::Event for HeartbeatReceived { + const PALLET: &'static str = "ImOnline"; + const EVENT: &'static str = "HeartbeatReceived"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "At the end of the session, no offence was committed."] + pub struct AllGood; + impl ::subxt::Event for AllGood { + const PALLET: &'static str = "ImOnline"; + const EVENT: &'static str = "AllGood"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "At the end of the session, at least one validator was found to be offline."] + pub struct SomeOffline { + pub offline: ::std::vec::Vec<(::subxt::sp_core::crypto::AccountId32, ())>, + } + impl ::subxt::Event for SomeOffline { + const PALLET: &'static str = "ImOnline"; + const EVENT: &'static str = "SomeOffline"; + } + } + pub mod storage { + use super::runtime_types; + pub struct HeartbeatAfter; + impl ::subxt::StorageEntry for HeartbeatAfter { + const PALLET: &'static str = "ImOnline"; + const STORAGE: &'static str = "HeartbeatAfter"; + type Value = ::core::primitive::u32; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct Keys; + impl ::subxt::StorageEntry for Keys { + const PALLET: &'static str = "ImOnline"; + const STORAGE: &'static str = "Keys"; + type Value = + runtime_types::frame_support::storage::weak_bounded_vec::WeakBoundedVec< + runtime_types::pallet_im_online::sr25519::app_sr25519::Public, + >; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct ReceivedHeartbeats<'a>( + pub &'a ::core::primitive::u32, + pub &'a ::core::primitive::u32, + ); + impl ::subxt::StorageEntry for ReceivedHeartbeats<'_> { + const PALLET: &'static str = "ImOnline"; + const STORAGE: &'static str = "ReceivedHeartbeats"; + type Value = runtime_types::frame_support::traits::misc::WrapperOpaque< + runtime_types::pallet_im_online::BoundedOpaqueNetworkState, + >; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![ + ::subxt::StorageMapKey::new(&self.0, ::subxt::StorageHasher::Twox64Concat), + ::subxt::StorageMapKey::new(&self.1, ::subxt::StorageHasher::Twox64Concat), + ]) + } + } + pub struct AuthoredBlocks<'a>( + pub &'a ::core::primitive::u32, + pub &'a ::subxt::sp_core::crypto::AccountId32, + ); + impl ::subxt::StorageEntry for AuthoredBlocks<'_> { + const PALLET: &'static str = "ImOnline"; + const STORAGE: &'static str = "AuthoredBlocks"; + type Value = ::core::primitive::u32; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![ + ::subxt::StorageMapKey::new(&self.0, ::subxt::StorageHasher::Twox64Concat), + ::subxt::StorageMapKey::new(&self.1, ::subxt::StorageHasher::Twox64Concat), + ]) + } + } + pub struct StorageApi<'a, T: ::subxt::Config> { + client: &'a ::subxt::Client, + } + impl<'a, T: ::subxt::Config> StorageApi<'a, T> { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { client } + } + #[doc = " The block number after which it's ok to send heartbeats in the current"] + #[doc = " session."] + #[doc = ""] + #[doc = " At the beginning of each session we set this to a value that should fall"] + #[doc = " roughly in the middle of the session duration. The idea is to first wait for"] + #[doc = " the validators to produce a block in the current session, so that the"] + #[doc = " heartbeat later on will not be necessary."] + #[doc = ""] + #[doc = " This value will only be used as a fallback if we fail to get a proper session"] + #[doc = " progress estimate from `NextSessionRotation`, as those estimates should be"] + #[doc = " more accurate then the value we calculate for `HeartbeatAfter`."] + pub async fn heartbeat_after( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> + { + if self.client.metadata().storage_hash::()? + == [ + 108u8, 100u8, 85u8, 198u8, 226u8, 122u8, 94u8, 225u8, 97u8, 154u8, + 135u8, 95u8, 106u8, 28u8, 185u8, 78u8, 192u8, 196u8, 35u8, 191u8, 12u8, + 19u8, 163u8, 46u8, 232u8, 235u8, 193u8, 81u8, 126u8, 204u8, 25u8, + 228u8, + ] + { + let entry = HeartbeatAfter; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The current set of keys that may issue a heartbeat."] + pub async fn keys( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + runtime_types::frame_support::storage::weak_bounded_vec::WeakBoundedVec< + runtime_types::pallet_im_online::sr25519::app_sr25519::Public, + >, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 88u8, 127u8, 143u8, 47u8, 75u8, 103u8, 131u8, 82u8, 76u8, 17u8, 255u8, + 108u8, 83u8, 251u8, 44u8, 225u8, 190u8, 66u8, 26u8, 168u8, 61u8, 87u8, + 92u8, 209u8, 147u8, 10u8, 204u8, 48u8, 214u8, 28u8, 60u8, 222u8, + ] + { + let entry = Keys; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " For each session index, we keep a mapping of `SessionIndex` and `AuthIndex` to"] + #[doc = " `WrapperOpaque`."] + pub async fn received_heartbeats( + &self, + _0: &::core::primitive::u32, + _1: &::core::primitive::u32, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option< + runtime_types::frame_support::traits::misc::WrapperOpaque< + runtime_types::pallet_im_online::BoundedOpaqueNetworkState, + >, + >, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .storage_hash::()? + == [ + 29u8, 40u8, 67u8, 222u8, 59u8, 104u8, 24u8, 193u8, 249u8, 200u8, 152u8, + 225u8, 72u8, 243u8, 140u8, 114u8, 121u8, 216u8, 54u8, 145u8, 205u8, + 82u8, 133u8, 128u8, 109u8, 54u8, 153u8, 118u8, 66u8, 147u8, 251u8, + 148u8, + ] + { + let entry = ReceivedHeartbeats(_0, _1); + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " For each session index, we keep a mapping of `SessionIndex` and `AuthIndex` to"] + #[doc = " `WrapperOpaque`."] + pub async fn received_heartbeats_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::subxt::KeyIter<'a, T, ReceivedHeartbeats<'a>>, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .storage_hash::()? + == [ + 29u8, 40u8, 67u8, 222u8, 59u8, 104u8, 24u8, 193u8, 249u8, 200u8, 152u8, + 225u8, 72u8, 243u8, 140u8, 114u8, 121u8, 216u8, 54u8, 145u8, 205u8, + 82u8, 133u8, 128u8, 109u8, 54u8, 153u8, 118u8, 66u8, 147u8, 251u8, + 148u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " For each session index, we keep a mapping of `ValidatorId` to the"] + #[doc = " number of blocks authored by the given authority."] + pub async fn authored_blocks( + &self, + _0: &::core::primitive::u32, + _1: &::subxt::sp_core::crypto::AccountId32, + block_hash: ::core::option::Option, + ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> + { + if self.client.metadata().storage_hash::()? + == [ + 94u8, 193u8, 107u8, 126u8, 3u8, 13u8, 28u8, 151u8, 197u8, 226u8, 224u8, + 48u8, 138u8, 113u8, 31u8, 57u8, 111u8, 184u8, 218u8, 215u8, 185u8, + 83u8, 209u8, 139u8, 114u8, 241u8, 68u8, 110u8, 157u8, 208u8, 16u8, + 22u8, + ] + { + let entry = AuthoredBlocks(_0, _1); + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " For each session index, we keep a mapping of `ValidatorId` to the"] + #[doc = " number of blocks authored by the given authority."] + pub async fn authored_blocks_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::subxt::KeyIter<'a, T, AuthoredBlocks<'a>>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 94u8, 193u8, 107u8, 126u8, 3u8, 13u8, 28u8, 151u8, 197u8, 226u8, 224u8, + 48u8, 138u8, 113u8, 31u8, 57u8, 111u8, 184u8, 218u8, 215u8, 185u8, + 83u8, 209u8, 139u8, 114u8, 241u8, 68u8, 110u8, 157u8, 208u8, 16u8, + 22u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + pub mod constants { + use super::runtime_types; + pub struct ConstantsApi<'a, T: ::subxt::Config> { + client: &'a ::subxt::Client, + } + impl<'a, T: ::subxt::Config> ConstantsApi<'a, T> { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { client } + } + #[doc = " A configuration for base priority of unsigned transactions."] + #[doc = ""] + #[doc = " This is exposed so that it can be tuned for particular runtime, when"] + #[doc = " multiple pallets send unsigned transactions."] + pub fn unsigned_priority( + &self, + ) -> ::core::result::Result<::core::primitive::u64, ::subxt::BasicError> + { + if self + .client + .metadata() + .constant_hash("ImOnline", "UnsignedPriority")? + == [ + 78u8, 226u8, 84u8, 70u8, 162u8, 23u8, 167u8, 100u8, 156u8, 228u8, + 119u8, 16u8, 28u8, 202u8, 21u8, 71u8, 72u8, 244u8, 3u8, 255u8, 243u8, + 55u8, 109u8, 238u8, 26u8, 180u8, 207u8, 175u8, 221u8, 27u8, 213u8, + 217u8, + ] + { + let pallet = self.client.metadata().pallet("ImOnline")?; + let constant = pallet.constant("UnsignedPriority")?; + let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; + Ok(value) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + } + pub mod authority_discovery { + use super::root_mod; + use super::runtime_types; + pub mod storage { + use super::runtime_types; + pub struct Keys; + impl ::subxt::StorageEntry for Keys { + const PALLET: &'static str = "AuthorityDiscovery"; + const STORAGE: &'static str = "Keys"; + type Value = + runtime_types::frame_support::storage::weak_bounded_vec::WeakBoundedVec< + runtime_types::sp_authority_discovery::app::Public, + >; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct NextKeys; + impl ::subxt::StorageEntry for NextKeys { + const PALLET: &'static str = "AuthorityDiscovery"; + const STORAGE: &'static str = "NextKeys"; + type Value = + runtime_types::frame_support::storage::weak_bounded_vec::WeakBoundedVec< + runtime_types::sp_authority_discovery::app::Public, + >; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct StorageApi<'a, T: ::subxt::Config> { + client: &'a ::subxt::Client, + } + impl<'a, T: ::subxt::Config> StorageApi<'a, T> { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { client } + } + #[doc = " Keys of the current authority set."] + pub async fn keys( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + runtime_types::frame_support::storage::weak_bounded_vec::WeakBoundedVec< + runtime_types::sp_authority_discovery::app::Public, + >, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 88u8, 127u8, 143u8, 47u8, 75u8, 103u8, 131u8, 82u8, 76u8, 17u8, 255u8, + 108u8, 83u8, 251u8, 44u8, 225u8, 190u8, 66u8, 26u8, 168u8, 61u8, 87u8, + 92u8, 209u8, 147u8, 10u8, 204u8, 48u8, 214u8, 28u8, 60u8, 222u8, + ] + { + let entry = Keys; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Keys of the next authority set."] + pub async fn next_keys( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + runtime_types::frame_support::storage::weak_bounded_vec::WeakBoundedVec< + runtime_types::sp_authority_discovery::app::Public, + >, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 139u8, 231u8, 99u8, 138u8, 194u8, 192u8, 123u8, 163u8, 239u8, 10u8, + 211u8, 7u8, 154u8, 1u8, 182u8, 43u8, 203u8, 128u8, 55u8, 150u8, 108u8, + 94u8, 163u8, 49u8, 230u8, 18u8, 208u8, 240u8, 83u8, 226u8, 125u8, 36u8, + ] + { + let entry = NextKeys; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + } + pub mod parachains_origin { + use super::root_mod; + use super::runtime_types; + } + pub mod configuration { + use super::root_mod; + use super::runtime_types; + pub mod calls { + use super::root_mod; + use super::runtime_types; + type DispatchError = runtime_types::sp_runtime::DispatchError; + #[derive( + :: subxt :: codec :: CompactAs, + :: subxt :: codec :: Decode, + :: subxt :: codec :: Encode, + Debug, + )] + pub struct SetValidationUpgradeCooldown { + pub new: ::core::primitive::u32, + } + impl ::subxt::Call for SetValidationUpgradeCooldown { + const PALLET: &'static str = "Configuration"; + const FUNCTION: &'static str = "set_validation_upgrade_cooldown"; + } + #[derive( + :: subxt :: codec :: CompactAs, + :: subxt :: codec :: Decode, + :: subxt :: codec :: Encode, + Debug, + )] + pub struct SetValidationUpgradeDelay { + pub new: ::core::primitive::u32, + } + impl ::subxt::Call for SetValidationUpgradeDelay { + const PALLET: &'static str = "Configuration"; + const FUNCTION: &'static str = "set_validation_upgrade_delay"; + } + #[derive( + :: subxt :: codec :: CompactAs, + :: subxt :: codec :: Decode, + :: subxt :: codec :: Encode, + Debug, + )] + pub struct SetCodeRetentionPeriod { + pub new: ::core::primitive::u32, + } + impl ::subxt::Call for SetCodeRetentionPeriod { + const PALLET: &'static str = "Configuration"; + const FUNCTION: &'static str = "set_code_retention_period"; + } + #[derive( + :: subxt :: codec :: CompactAs, + :: subxt :: codec :: Decode, + :: subxt :: codec :: Encode, + Debug, + )] + pub struct SetMaxCodeSize { + pub new: ::core::primitive::u32, + } + impl ::subxt::Call for SetMaxCodeSize { + const PALLET: &'static str = "Configuration"; + const FUNCTION: &'static str = "set_max_code_size"; + } + #[derive( + :: subxt :: codec :: CompactAs, + :: subxt :: codec :: Decode, + :: subxt :: codec :: Encode, + Debug, + )] + pub struct SetMaxPovSize { + pub new: ::core::primitive::u32, + } + impl ::subxt::Call for SetMaxPovSize { + const PALLET: &'static str = "Configuration"; + const FUNCTION: &'static str = "set_max_pov_size"; + } + #[derive( + :: subxt :: codec :: CompactAs, + :: subxt :: codec :: Decode, + :: subxt :: codec :: Encode, + Debug, + )] + pub struct SetMaxHeadDataSize { + pub new: ::core::primitive::u32, + } + impl ::subxt::Call for SetMaxHeadDataSize { + const PALLET: &'static str = "Configuration"; + const FUNCTION: &'static str = "set_max_head_data_size"; + } + #[derive( + :: subxt :: codec :: CompactAs, + :: subxt :: codec :: Decode, + :: subxt :: codec :: Encode, + Debug, + )] + pub struct SetParathreadCores { + pub new: ::core::primitive::u32, + } + impl ::subxt::Call for SetParathreadCores { + const PALLET: &'static str = "Configuration"; + const FUNCTION: &'static str = "set_parathread_cores"; + } + #[derive( + :: subxt :: codec :: CompactAs, + :: subxt :: codec :: Decode, + :: subxt :: codec :: Encode, + Debug, + )] + pub struct SetParathreadRetries { + pub new: ::core::primitive::u32, + } + impl ::subxt::Call for SetParathreadRetries { + const PALLET: &'static str = "Configuration"; + const FUNCTION: &'static str = "set_parathread_retries"; + } + #[derive( + :: subxt :: codec :: CompactAs, + :: subxt :: codec :: Decode, + :: subxt :: codec :: Encode, + Debug, + )] + pub struct SetGroupRotationFrequency { + pub new: ::core::primitive::u32, + } + impl ::subxt::Call for SetGroupRotationFrequency { + const PALLET: &'static str = "Configuration"; + const FUNCTION: &'static str = "set_group_rotation_frequency"; + } + #[derive( + :: subxt :: codec :: CompactAs, + :: subxt :: codec :: Decode, + :: subxt :: codec :: Encode, + Debug, + )] + pub struct SetChainAvailabilityPeriod { + pub new: ::core::primitive::u32, + } + impl ::subxt::Call for SetChainAvailabilityPeriod { + const PALLET: &'static str = "Configuration"; + const FUNCTION: &'static str = "set_chain_availability_period"; + } + #[derive( + :: subxt :: codec :: CompactAs, + :: subxt :: codec :: Decode, + :: subxt :: codec :: Encode, + Debug, + )] + pub struct SetThreadAvailabilityPeriod { + pub new: ::core::primitive::u32, + } + impl ::subxt::Call for SetThreadAvailabilityPeriod { + const PALLET: &'static str = "Configuration"; + const FUNCTION: &'static str = "set_thread_availability_period"; + } + #[derive( + :: subxt :: codec :: CompactAs, + :: subxt :: codec :: Decode, + :: subxt :: codec :: Encode, + Debug, + )] + pub struct SetSchedulingLookahead { + pub new: ::core::primitive::u32, + } + impl ::subxt::Call for SetSchedulingLookahead { + const PALLET: &'static str = "Configuration"; + const FUNCTION: &'static str = "set_scheduling_lookahead"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct SetMaxValidatorsPerCore { + pub new: ::core::option::Option<::core::primitive::u32>, + } + impl ::subxt::Call for SetMaxValidatorsPerCore { + const PALLET: &'static str = "Configuration"; + const FUNCTION: &'static str = "set_max_validators_per_core"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct SetMaxValidators { + pub new: ::core::option::Option<::core::primitive::u32>, + } + impl ::subxt::Call for SetMaxValidators { + const PALLET: &'static str = "Configuration"; + const FUNCTION: &'static str = "set_max_validators"; + } + #[derive( + :: subxt :: codec :: CompactAs, + :: subxt :: codec :: Decode, + :: subxt :: codec :: Encode, + Debug, + )] + pub struct SetDisputePeriod { + pub new: ::core::primitive::u32, + } + impl ::subxt::Call for SetDisputePeriod { + const PALLET: &'static str = "Configuration"; + const FUNCTION: &'static str = "set_dispute_period"; + } + #[derive( + :: subxt :: codec :: CompactAs, + :: subxt :: codec :: Decode, + :: subxt :: codec :: Encode, + Debug, + )] + pub struct SetDisputePostConclusionAcceptancePeriod { + pub new: ::core::primitive::u32, + } + impl ::subxt::Call for SetDisputePostConclusionAcceptancePeriod { + const PALLET: &'static str = "Configuration"; + const FUNCTION: &'static str = "set_dispute_post_conclusion_acceptance_period"; + } + #[derive( + :: subxt :: codec :: CompactAs, + :: subxt :: codec :: Decode, + :: subxt :: codec :: Encode, + Debug, + )] + pub struct SetDisputeMaxSpamSlots { + pub new: ::core::primitive::u32, + } + impl ::subxt::Call for SetDisputeMaxSpamSlots { + const PALLET: &'static str = "Configuration"; + const FUNCTION: &'static str = "set_dispute_max_spam_slots"; + } + #[derive( + :: subxt :: codec :: CompactAs, + :: subxt :: codec :: Decode, + :: subxt :: codec :: Encode, + Debug, + )] + pub struct SetDisputeConclusionByTimeOutPeriod { + pub new: ::core::primitive::u32, + } + impl ::subxt::Call for SetDisputeConclusionByTimeOutPeriod { + const PALLET: &'static str = "Configuration"; + const FUNCTION: &'static str = "set_dispute_conclusion_by_time_out_period"; + } + #[derive( + :: subxt :: codec :: CompactAs, + :: subxt :: codec :: Decode, + :: subxt :: codec :: Encode, + Debug, + )] + pub struct SetNoShowSlots { + pub new: ::core::primitive::u32, + } + impl ::subxt::Call for SetNoShowSlots { + const PALLET: &'static str = "Configuration"; + const FUNCTION: &'static str = "set_no_show_slots"; + } + #[derive( + :: subxt :: codec :: CompactAs, + :: subxt :: codec :: Decode, + :: subxt :: codec :: Encode, + Debug, + )] + pub struct SetNDelayTranches { + pub new: ::core::primitive::u32, + } + impl ::subxt::Call for SetNDelayTranches { + const PALLET: &'static str = "Configuration"; + const FUNCTION: &'static str = "set_n_delay_tranches"; + } + #[derive( + :: subxt :: codec :: CompactAs, + :: subxt :: codec :: Decode, + :: subxt :: codec :: Encode, + Debug, + )] + pub struct SetZerothDelayTrancheWidth { + pub new: ::core::primitive::u32, + } + impl ::subxt::Call for SetZerothDelayTrancheWidth { + const PALLET: &'static str = "Configuration"; + const FUNCTION: &'static str = "set_zeroth_delay_tranche_width"; + } + #[derive( + :: subxt :: codec :: CompactAs, + :: subxt :: codec :: Decode, + :: subxt :: codec :: Encode, + Debug, + )] + pub struct SetNeededApprovals { + pub new: ::core::primitive::u32, + } + impl ::subxt::Call for SetNeededApprovals { + const PALLET: &'static str = "Configuration"; + const FUNCTION: &'static str = "set_needed_approvals"; + } + #[derive( + :: subxt :: codec :: CompactAs, + :: subxt :: codec :: Decode, + :: subxt :: codec :: Encode, + Debug, + )] + pub struct SetRelayVrfModuloSamples { + pub new: ::core::primitive::u32, + } + impl ::subxt::Call for SetRelayVrfModuloSamples { + const PALLET: &'static str = "Configuration"; + const FUNCTION: &'static str = "set_relay_vrf_modulo_samples"; + } + #[derive( + :: subxt :: codec :: CompactAs, + :: subxt :: codec :: Decode, + :: subxt :: codec :: Encode, + Debug, + )] + pub struct SetMaxUpwardQueueCount { + pub new: ::core::primitive::u32, + } + impl ::subxt::Call for SetMaxUpwardQueueCount { + const PALLET: &'static str = "Configuration"; + const FUNCTION: &'static str = "set_max_upward_queue_count"; + } + #[derive( + :: subxt :: codec :: CompactAs, + :: subxt :: codec :: Decode, + :: subxt :: codec :: Encode, + Debug, + )] + pub struct SetMaxUpwardQueueSize { + pub new: ::core::primitive::u32, + } + impl ::subxt::Call for SetMaxUpwardQueueSize { + const PALLET: &'static str = "Configuration"; + const FUNCTION: &'static str = "set_max_upward_queue_size"; + } + #[derive( + :: subxt :: codec :: CompactAs, + :: subxt :: codec :: Decode, + :: subxt :: codec :: Encode, + Debug, + )] + pub struct SetMaxDownwardMessageSize { + pub new: ::core::primitive::u32, + } + impl ::subxt::Call for SetMaxDownwardMessageSize { + const PALLET: &'static str = "Configuration"; + const FUNCTION: &'static str = "set_max_downward_message_size"; + } + #[derive( + :: subxt :: codec :: CompactAs, + :: subxt :: codec :: Decode, + :: subxt :: codec :: Encode, + Debug, + )] + pub struct SetUmpServiceTotalWeight { + pub new: ::core::primitive::u64, + } + impl ::subxt::Call for SetUmpServiceTotalWeight { + const PALLET: &'static str = "Configuration"; + const FUNCTION: &'static str = "set_ump_service_total_weight"; + } + #[derive( + :: subxt :: codec :: CompactAs, + :: subxt :: codec :: Decode, + :: subxt :: codec :: Encode, + Debug, + )] + pub struct SetMaxUpwardMessageSize { + pub new: ::core::primitive::u32, + } + impl ::subxt::Call for SetMaxUpwardMessageSize { + const PALLET: &'static str = "Configuration"; + const FUNCTION: &'static str = "set_max_upward_message_size"; + } + #[derive( + :: subxt :: codec :: CompactAs, + :: subxt :: codec :: Decode, + :: subxt :: codec :: Encode, + Debug, + )] + pub struct SetMaxUpwardMessageNumPerCandidate { + pub new: ::core::primitive::u32, + } + impl ::subxt::Call for SetMaxUpwardMessageNumPerCandidate { + const PALLET: &'static str = "Configuration"; + const FUNCTION: &'static str = "set_max_upward_message_num_per_candidate"; + } + #[derive( + :: subxt :: codec :: CompactAs, + :: subxt :: codec :: Decode, + :: subxt :: codec :: Encode, + Debug, + )] + pub struct SetHrmpOpenRequestTtl { + pub new: ::core::primitive::u32, + } + impl ::subxt::Call for SetHrmpOpenRequestTtl { + const PALLET: &'static str = "Configuration"; + const FUNCTION: &'static str = "set_hrmp_open_request_ttl"; + } + #[derive( + :: subxt :: codec :: CompactAs, + :: subxt :: codec :: Decode, + :: subxt :: codec :: Encode, + Debug, + )] + pub struct SetHrmpSenderDeposit { + pub new: ::core::primitive::u128, + } + impl ::subxt::Call for SetHrmpSenderDeposit { + const PALLET: &'static str = "Configuration"; + const FUNCTION: &'static str = "set_hrmp_sender_deposit"; + } + #[derive( + :: subxt :: codec :: CompactAs, + :: subxt :: codec :: Decode, + :: subxt :: codec :: Encode, + Debug, + )] + pub struct SetHrmpRecipientDeposit { + pub new: ::core::primitive::u128, + } + impl ::subxt::Call for SetHrmpRecipientDeposit { + const PALLET: &'static str = "Configuration"; + const FUNCTION: &'static str = "set_hrmp_recipient_deposit"; + } + #[derive( + :: subxt :: codec :: CompactAs, + :: subxt :: codec :: Decode, + :: subxt :: codec :: Encode, + Debug, + )] + pub struct SetHrmpChannelMaxCapacity { + pub new: ::core::primitive::u32, + } + impl ::subxt::Call for SetHrmpChannelMaxCapacity { + const PALLET: &'static str = "Configuration"; + const FUNCTION: &'static str = "set_hrmp_channel_max_capacity"; + } + #[derive( + :: subxt :: codec :: CompactAs, + :: subxt :: codec :: Decode, + :: subxt :: codec :: Encode, + Debug, + )] + pub struct SetHrmpChannelMaxTotalSize { + pub new: ::core::primitive::u32, + } + impl ::subxt::Call for SetHrmpChannelMaxTotalSize { + const PALLET: &'static str = "Configuration"; + const FUNCTION: &'static str = "set_hrmp_channel_max_total_size"; + } + #[derive( + :: subxt :: codec :: CompactAs, + :: subxt :: codec :: Decode, + :: subxt :: codec :: Encode, + Debug, + )] + pub struct SetHrmpMaxParachainInboundChannels { + pub new: ::core::primitive::u32, + } + impl ::subxt::Call for SetHrmpMaxParachainInboundChannels { + const PALLET: &'static str = "Configuration"; + const FUNCTION: &'static str = "set_hrmp_max_parachain_inbound_channels"; + } + #[derive( + :: subxt :: codec :: CompactAs, + :: subxt :: codec :: Decode, + :: subxt :: codec :: Encode, + Debug, + )] + pub struct SetHrmpMaxParathreadInboundChannels { + pub new: ::core::primitive::u32, + } + impl ::subxt::Call for SetHrmpMaxParathreadInboundChannels { + const PALLET: &'static str = "Configuration"; + const FUNCTION: &'static str = "set_hrmp_max_parathread_inbound_channels"; + } + #[derive( + :: subxt :: codec :: CompactAs, + :: subxt :: codec :: Decode, + :: subxt :: codec :: Encode, + Debug, + )] + pub struct SetHrmpChannelMaxMessageSize { + pub new: ::core::primitive::u32, + } + impl ::subxt::Call for SetHrmpChannelMaxMessageSize { + const PALLET: &'static str = "Configuration"; + const FUNCTION: &'static str = "set_hrmp_channel_max_message_size"; + } + #[derive( + :: subxt :: codec :: CompactAs, + :: subxt :: codec :: Decode, + :: subxt :: codec :: Encode, + Debug, + )] + pub struct SetHrmpMaxParachainOutboundChannels { + pub new: ::core::primitive::u32, + } + impl ::subxt::Call for SetHrmpMaxParachainOutboundChannels { + const PALLET: &'static str = "Configuration"; + const FUNCTION: &'static str = "set_hrmp_max_parachain_outbound_channels"; + } + #[derive( + :: subxt :: codec :: CompactAs, + :: subxt :: codec :: Decode, + :: subxt :: codec :: Encode, + Debug, + )] + pub struct SetHrmpMaxParathreadOutboundChannels { + pub new: ::core::primitive::u32, + } + impl ::subxt::Call for SetHrmpMaxParathreadOutboundChannels { + const PALLET: &'static str = "Configuration"; + const FUNCTION: &'static str = "set_hrmp_max_parathread_outbound_channels"; + } + #[derive( + :: subxt :: codec :: CompactAs, + :: subxt :: codec :: Decode, + :: subxt :: codec :: Encode, + Debug, + )] + pub struct SetHrmpMaxMessageNumPerCandidate { + pub new: ::core::primitive::u32, + } + impl ::subxt::Call for SetHrmpMaxMessageNumPerCandidate { + const PALLET: &'static str = "Configuration"; + const FUNCTION: &'static str = "set_hrmp_max_message_num_per_candidate"; + } + #[derive( + :: subxt :: codec :: CompactAs, + :: subxt :: codec :: Decode, + :: subxt :: codec :: Encode, + Debug, + )] + pub struct SetUmpMaxIndividualWeight { + pub new: ::core::primitive::u64, + } + impl ::subxt::Call for SetUmpMaxIndividualWeight { + const PALLET: &'static str = "Configuration"; + const FUNCTION: &'static str = "set_ump_max_individual_weight"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct SetPvfCheckingEnabled { + pub new: ::core::primitive::bool, + } + impl ::subxt::Call for SetPvfCheckingEnabled { + const PALLET: &'static str = "Configuration"; + const FUNCTION: &'static str = "set_pvf_checking_enabled"; + } + #[derive( + :: subxt :: codec :: CompactAs, + :: subxt :: codec :: Decode, + :: subxt :: codec :: Encode, + Debug, + )] + pub struct SetPvfVotingTtl { + pub new: ::core::primitive::u32, + } + impl ::subxt::Call for SetPvfVotingTtl { + const PALLET: &'static str = "Configuration"; + const FUNCTION: &'static str = "set_pvf_voting_ttl"; + } + #[derive( + :: subxt :: codec :: CompactAs, + :: subxt :: codec :: Decode, + :: subxt :: codec :: Encode, + Debug, + )] + pub struct SetMinimumValidationUpgradeDelay { + pub new: ::core::primitive::u32, + } + impl ::subxt::Call for SetMinimumValidationUpgradeDelay { + const PALLET: &'static str = "Configuration"; + const FUNCTION: &'static str = "set_minimum_validation_upgrade_delay"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct SetBypassConsistencyCheck { + pub new: ::core::primitive::bool, + } + impl ::subxt::Call for SetBypassConsistencyCheck { + const PALLET: &'static str = "Configuration"; + const FUNCTION: &'static str = "set_bypass_consistency_check"; + } + pub struct TransactionApi<'a, T: ::subxt::Config, X> { + client: &'a ::subxt::Client, + marker: ::core::marker::PhantomData, + } + impl<'a, T, X> TransactionApi<'a, T, X> + where + T: ::subxt::Config, + X: ::subxt::extrinsic::ExtrinsicParams, + { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { + client, + marker: ::core::marker::PhantomData, + } + } + #[doc = "Set the validation upgrade cooldown."] + pub fn set_validation_upgrade_cooldown( + &self, + new: ::core::primitive::u32, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SetValidationUpgradeCooldown, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .call_hash::()? + == [ + 153u8, 60u8, 171u8, 164u8, 241u8, 214u8, 235u8, 141u8, 4u8, 32u8, + 129u8, 253u8, 128u8, 148u8, 185u8, 51u8, 65u8, 34u8, 68u8, 72u8, 202u8, + 159u8, 74u8, 243u8, 35u8, 138u8, 208u8, 26u8, 182u8, 189u8, 41u8, 11u8, + ] + { + let call = SetValidationUpgradeCooldown { new }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Set the validation upgrade delay."] + pub fn set_validation_upgrade_delay( + &self, + new: ::core::primitive::u32, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SetValidationUpgradeDelay, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .call_hash::()? + == [ + 136u8, 220u8, 63u8, 166u8, 202u8, 19u8, 241u8, 32u8, 100u8, 14u8, + 101u8, 244u8, 241u8, 141u8, 144u8, 213u8, 185u8, 88u8, 193u8, 2u8, + 55u8, 154u8, 24u8, 77u8, 66u8, 167u8, 69u8, 245u8, 224u8, 63u8, 196u8, + 200u8, + ] + { + let call = SetValidationUpgradeDelay { new }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Set the acceptance period for an included candidate."] + pub fn set_code_retention_period( + &self, + new: ::core::primitive::u32, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SetCodeRetentionPeriod, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .call_hash::()? + == [ + 94u8, 104u8, 13u8, 127u8, 95u8, 137u8, 66u8, 224u8, 22u8, 53u8, 14u8, + 161u8, 67u8, 85u8, 78u8, 161u8, 92u8, 81u8, 190u8, 213u8, 113u8, 235u8, + 64u8, 19u8, 112u8, 164u8, 71u8, 88u8, 183u8, 234u8, 237u8, 9u8, + ] + { + let call = SetCodeRetentionPeriod { new }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Set the max validation code size for incoming upgrades."] + pub fn set_max_code_size( + &self, + new: ::core::primitive::u32, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SetMaxCodeSize, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 74u8, 39u8, 190u8, 155u8, 121u8, 60u8, 233u8, 95u8, 177u8, 57u8, 116u8, + 107u8, 200u8, 44u8, 2u8, 215u8, 209u8, 50u8, 37u8, 112u8, 136u8, 107u8, + 202u8, 142u8, 114u8, 25u8, 43u8, 134u8, 250u8, 15u8, 81u8, 13u8, + ] + { + let call = SetMaxCodeSize { new }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Set the max POV block size for incoming upgrades."] + pub fn set_max_pov_size( + &self, + new: ::core::primitive::u32, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SetMaxPovSize, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 77u8, 199u8, 18u8, 53u8, 223u8, 107u8, 57u8, 141u8, 8u8, 138u8, 180u8, + 175u8, 73u8, 88u8, 205u8, 185u8, 56u8, 106u8, 43u8, 87u8, 109u8, 9u8, + 103u8, 103u8, 50u8, 158u8, 11u8, 77u8, 162u8, 38u8, 57u8, 27u8, + ] + { + let call = SetMaxPovSize { new }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Set the max head data size for paras."] + pub fn set_max_head_data_size( + &self, + new: ::core::primitive::u32, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SetMaxHeadDataSize, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 30u8, 132u8, 5u8, 207u8, 126u8, 145u8, 187u8, 129u8, 36u8, 235u8, + 179u8, 61u8, 243u8, 87u8, 178u8, 107u8, 8u8, 21u8, 43u8, 39u8, 119u8, + 138u8, 146u8, 146u8, 109u8, 189u8, 56u8, 160u8, 14u8, 78u8, 230u8, + 149u8, + ] + { + let call = SetMaxHeadDataSize { new }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Set the number of parathread execution cores."] + pub fn set_parathread_cores( + &self, + new: ::core::primitive::u32, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SetParathreadCores, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 5u8, 198u8, 156u8, 226u8, 125u8, 16u8, 2u8, 64u8, 28u8, 189u8, 213u8, + 85u8, 6u8, 112u8, 173u8, 183u8, 174u8, 207u8, 129u8, 110u8, 201u8, + 161u8, 163u8, 191u8, 20u8, 14u8, 65u8, 106u8, 234u8, 203u8, 39u8, 75u8, + ] + { + let call = SetParathreadCores { new }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Set the number of retries for a particular parathread."] + pub fn set_parathread_retries( + &self, + new: ::core::primitive::u32, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SetParathreadRetries, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 146u8, 134u8, 204u8, 109u8, 167u8, 35u8, 255u8, 245u8, 98u8, 24u8, + 213u8, 33u8, 144u8, 194u8, 196u8, 196u8, 66u8, 220u8, 168u8, 156u8, + 171u8, 179u8, 154u8, 30u8, 221u8, 45u8, 65u8, 192u8, 194u8, 130u8, + 87u8, 100u8, + ] + { + let call = SetParathreadRetries { new }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Set the parachain validator-group rotation frequency"] + pub fn set_group_rotation_frequency( + &self, + new: ::core::primitive::u32, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SetGroupRotationFrequency, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .call_hash::()? + == [ + 102u8, 192u8, 226u8, 120u8, 69u8, 117u8, 239u8, 156u8, 111u8, 239u8, + 197u8, 191u8, 221u8, 18u8, 140u8, 214u8, 154u8, 212u8, 151u8, 35u8, + 176u8, 2u8, 162u8, 131u8, 115u8, 102u8, 177u8, 106u8, 35u8, 214u8, + 151u8, 227u8, + ] + { + let call = SetGroupRotationFrequency { new }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Set the availability period for parachains."] + pub fn set_chain_availability_period( + &self, + new: ::core::primitive::u32, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SetChainAvailabilityPeriod, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .call_hash::()? + == [ + 3u8, 83u8, 31u8, 241u8, 73u8, 137u8, 18u8, 95u8, 119u8, 143u8, 28u8, + 110u8, 151u8, 229u8, 172u8, 208u8, 50u8, 25u8, 89u8, 222u8, 128u8, + 125u8, 112u8, 25u8, 204u8, 141u8, 175u8, 69u8, 57u8, 161u8, 189u8, + 167u8, + ] + { + let call = SetChainAvailabilityPeriod { new }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Set the availability period for parathreads."] + pub fn set_thread_availability_period( + &self, + new: ::core::primitive::u32, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SetThreadAvailabilityPeriod, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .call_hash::()? + == [ + 242u8, 204u8, 158u8, 5u8, 123u8, 163u8, 6u8, 209u8, 44u8, 73u8, 112u8, + 249u8, 96u8, 160u8, 188u8, 151u8, 107u8, 21u8, 9u8, 100u8, 104u8, + 184u8, 97u8, 77u8, 122u8, 254u8, 88u8, 94u8, 22u8, 15u8, 57u8, 44u8, + ] + { + let call = SetThreadAvailabilityPeriod { new }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Set the scheduling lookahead, in expected number of blocks at peak throughput."] + pub fn set_scheduling_lookahead( + &self, + new: ::core::primitive::u32, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SetSchedulingLookahead, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .call_hash::()? + == [ + 146u8, 149u8, 10u8, 57u8, 122u8, 116u8, 61u8, 181u8, 97u8, 240u8, 87u8, + 37u8, 227u8, 233u8, 123u8, 26u8, 243u8, 58u8, 54u8, 93u8, 111u8, 204u8, + 108u8, 18u8, 167u8, 20u8, 255u8, 173u8, 46u8, 212u8, 246u8, 201u8, + ] + { + let call = SetSchedulingLookahead { new }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Set the maximum number of validators to assign to any core."] + pub fn set_max_validators_per_core( + &self, + new: ::core::option::Option<::core::primitive::u32>, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SetMaxValidatorsPerCore, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .call_hash::()? + == [ + 27u8, 160u8, 153u8, 252u8, 121u8, 42u8, 94u8, 131u8, 199u8, 216u8, + 15u8, 65u8, 94u8, 69u8, 127u8, 130u8, 179u8, 236u8, 49u8, 32u8, 239u8, + 37u8, 58u8, 0u8, 50u8, 5u8, 255u8, 30u8, 203u8, 230u8, 135u8, 202u8, + ] + { + let call = SetMaxValidatorsPerCore { new }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Set the maximum number of validators to use in parachain consensus."] + pub fn set_max_validators( + &self, + new: ::core::option::Option<::core::primitive::u32>, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SetMaxValidators, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 192u8, 156u8, 115u8, 10u8, 225u8, 94u8, 190u8, 180u8, 242u8, 131u8, + 202u8, 13u8, 82u8, 27u8, 8u8, 144u8, 70u8, 92u8, 136u8, 206u8, 205u8, + 3u8, 242u8, 130u8, 77u8, 114u8, 242u8, 111u8, 99u8, 24u8, 238u8, 55u8, + ] + { + let call = SetMaxValidators { new }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Set the dispute period, in number of sessions to keep for disputes."] + pub fn set_dispute_period( + &self, + new: ::core::primitive::u32, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SetDisputePeriod, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 232u8, 96u8, 104u8, 249u8, 183u8, 148u8, 126u8, 80u8, 64u8, 39u8, 2u8, + 208u8, 183u8, 189u8, 139u8, 201u8, 61u8, 63u8, 42u8, 155u8, 215u8, + 32u8, 212u8, 158u8, 90u8, 80u8, 159u8, 23u8, 249u8, 204u8, 218u8, + 217u8, + ] + { + let call = SetDisputePeriod { new }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Set the dispute post conclusion acceptance period."] + pub fn set_dispute_post_conclusion_acceptance_period( + &self, + new: ::core::primitive::u32, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SetDisputePostConclusionAcceptancePeriod, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .call_hash::()? + == [ + 45u8, 140u8, 213u8, 62u8, 212u8, 31u8, 126u8, 94u8, 102u8, 176u8, + 203u8, 240u8, 28u8, 25u8, 116u8, 77u8, 187u8, 147u8, 32u8, 20u8, 25u8, + 124u8, 164u8, 162u8, 246u8, 223u8, 146u8, 28u8, 35u8, 4u8, 174u8, 47u8, + ] + { + let call = SetDisputePostConclusionAcceptancePeriod { new }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Set the maximum number of dispute spam slots."] + pub fn set_dispute_max_spam_slots( + &self, + new: ::core::primitive::u32, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SetDisputeMaxSpamSlots, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .call_hash::()? + == [ + 180u8, 195u8, 6u8, 141u8, 89u8, 252u8, 245u8, 202u8, 36u8, 123u8, + 105u8, 35u8, 161u8, 60u8, 233u8, 213u8, 191u8, 65u8, 68u8, 4u8, 19u8, + 201u8, 226u8, 103u8, 124u8, 181u8, 201u8, 91u8, 84u8, 170u8, 48u8, + 154u8, + ] + { + let call = SetDisputeMaxSpamSlots { new }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Set the dispute conclusion by time out period."] + pub fn set_dispute_conclusion_by_time_out_period( + &self, + new: ::core::primitive::u32, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SetDisputeConclusionByTimeOutPeriod, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .call_hash::()? + == [ + 50u8, 221u8, 129u8, 199u8, 147u8, 98u8, 11u8, 104u8, 133u8, 161u8, + 53u8, 163u8, 100u8, 155u8, 228u8, 167u8, 146u8, 87u8, 186u8, 228u8, + 147u8, 44u8, 142u8, 160u8, 119u8, 146u8, 10u8, 155u8, 5u8, 35u8, 8u8, + 165u8, + ] + { + let call = SetDisputeConclusionByTimeOutPeriod { new }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Set the no show slots, in number of number of consensus slots."] + #[doc = "Must be at least 1."] + pub fn set_no_show_slots( + &self, + new: ::core::primitive::u32, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SetNoShowSlots, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 235u8, 5u8, 35u8, 159u8, 200u8, 58u8, 171u8, 179u8, 78u8, 70u8, 161u8, + 47u8, 237u8, 245u8, 77u8, 81u8, 1u8, 138u8, 145u8, 137u8, 45u8, 126u8, + 255u8, 227u8, 130u8, 217u8, 36u8, 251u8, 72u8, 235u8, 16u8, 231u8, + ] + { + let call = SetNoShowSlots { new }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Set the total number of delay tranches."] + pub fn set_n_delay_tranches( + &self, + new: ::core::primitive::u32, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SetNDelayTranches, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 109u8, 208u8, 13u8, 18u8, 178u8, 117u8, 101u8, 169u8, 162u8, 255u8, + 28u8, 88u8, 199u8, 89u8, 83u8, 59u8, 46u8, 105u8, 186u8, 4u8, 7u8, + 171u8, 78u8, 122u8, 197u8, 110u8, 63u8, 164u8, 140u8, 59u8, 179u8, + 236u8, + ] + { + let call = SetNDelayTranches { new }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Set the zeroth delay tranche width."] + pub fn set_zeroth_delay_tranche_width( + &self, + new: ::core::primitive::u32, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SetZerothDelayTrancheWidth, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .call_hash::()? + == [ + 162u8, 20u8, 162u8, 90u8, 59u8, 194u8, 147u8, 255u8, 198u8, 203u8, + 50u8, 13u8, 134u8, 142u8, 6u8, 156u8, 205u8, 128u8, 222u8, 225u8, + 150u8, 68u8, 198u8, 212u8, 198u8, 238u8, 3u8, 209u8, 224u8, 19u8, + 118u8, 147u8, + ] + { + let call = SetZerothDelayTrancheWidth { new }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Set the number of validators needed to approve a block."] + pub fn set_needed_approvals( + &self, + new: ::core::primitive::u32, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SetNeededApprovals, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 83u8, 164u8, 204u8, 168u8, 93u8, 165u8, 118u8, 111u8, 149u8, 129u8, + 126u8, 250u8, 95u8, 148u8, 193u8, 173u8, 239u8, 1u8, 14u8, 102u8, 77u8, + 150u8, 149u8, 55u8, 82u8, 179u8, 2u8, 117u8, 19u8, 34u8, 223u8, 173u8, + ] + { + let call = SetNeededApprovals { new }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Set the number of samples to do of the `RelayVRFModulo` approval assignment criterion."] + pub fn set_relay_vrf_modulo_samples( + &self, + new: ::core::primitive::u32, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SetRelayVrfModuloSamples, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .call_hash::()? + == [ + 22u8, 11u8, 132u8, 96u8, 58u8, 253u8, 183u8, 31u8, 137u8, 231u8, 187u8, + 145u8, 119u8, 164u8, 55u8, 142u8, 37u8, 151u8, 227u8, 112u8, 113u8, + 18u8, 200u8, 247u8, 238u8, 10u8, 223u8, 74u8, 4u8, 132u8, 115u8, 119u8, + ] + { + let call = SetRelayVrfModuloSamples { new }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Sets the maximum items that can present in a upward dispatch queue at once."] + pub fn set_max_upward_queue_count( + &self, + new: ::core::primitive::u32, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SetMaxUpwardQueueCount, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .call_hash::()? + == [ + 16u8, 31u8, 245u8, 94u8, 243u8, 122u8, 55u8, 155u8, 161u8, 239u8, 5u8, + 59u8, 186u8, 207u8, 136u8, 253u8, 255u8, 176u8, 135u8, 242u8, 199u8, + 96u8, 226u8, 150u8, 15u8, 160u8, 60u8, 101u8, 66u8, 143u8, 93u8, 104u8, + ] + { + let call = SetMaxUpwardQueueCount { new }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Sets the maximum total size of items that can present in a upward dispatch queue at once."] + pub fn set_max_upward_queue_size( + &self, + new: ::core::primitive::u32, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SetMaxUpwardQueueSize, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .call_hash::()? + == [ + 203u8, 170u8, 21u8, 149u8, 170u8, 246u8, 91u8, 54u8, 197u8, 91u8, 41u8, + 114u8, 210u8, 239u8, 73u8, 236u8, 68u8, 194u8, 157u8, 116u8, 229u8, + 1u8, 34u8, 135u8, 144u8, 191u8, 56u8, 77u8, 13u8, 92u8, 221u8, 4u8, + ] + { + let call = SetMaxUpwardQueueSize { new }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Set the critical downward message size."] + pub fn set_max_downward_message_size( + &self, + new: ::core::primitive::u32, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SetMaxDownwardMessageSize, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .call_hash::()? + == [ + 55u8, 181u8, 6u8, 126u8, 31u8, 154u8, 42u8, 194u8, 64u8, 23u8, 34u8, + 255u8, 151u8, 186u8, 52u8, 32u8, 168u8, 233u8, 44u8, 35u8, 152u8, 78u8, + 230u8, 242u8, 169u8, 85u8, 103u8, 133u8, 177u8, 239u8, 175u8, 119u8, + ] + { + let call = SetMaxDownwardMessageSize { new }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Sets the soft limit for the phase of dispatching dispatchable upward messages."] + pub fn set_ump_service_total_weight( + &self, + new: ::core::primitive::u64, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SetUmpServiceTotalWeight, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .call_hash::()? + == [ + 14u8, 179u8, 217u8, 169u8, 84u8, 45u8, 193u8, 3u8, 7u8, 196u8, 56u8, + 209u8, 50u8, 148u8, 32u8, 205u8, 99u8, 202u8, 72u8, 246u8, 151u8, + 230u8, 145u8, 98u8, 188u8, 1u8, 136u8, 241u8, 217u8, 37u8, 6u8, 101u8, + ] + { + let call = SetUmpServiceTotalWeight { new }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Sets the maximum size of an upward message that can be sent by a candidate."] + pub fn set_max_upward_message_size( + &self, + new: ::core::primitive::u32, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SetMaxUpwardMessageSize, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .call_hash::()? + == [ + 134u8, 232u8, 5u8, 70u8, 81u8, 177u8, 81u8, 235u8, 93u8, 145u8, 193u8, + 42u8, 150u8, 61u8, 236u8, 20u8, 38u8, 176u8, 124u8, 170u8, 248u8, + 149u8, 57u8, 88u8, 17u8, 46u8, 202u8, 74u8, 35u8, 82u8, 190u8, 223u8, + ] + { + let call = SetMaxUpwardMessageSize { new }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Sets the maximum number of messages that a candidate can contain."] + pub fn set_max_upward_message_num_per_candidate( + &self, + new: ::core::primitive::u32, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SetMaxUpwardMessageNumPerCandidate, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .call_hash::()? + == [ + 14u8, 79u8, 128u8, 66u8, 119u8, 24u8, 26u8, 116u8, 249u8, 254u8, 86u8, + 228u8, 248u8, 75u8, 111u8, 90u8, 101u8, 96u8, 124u8, 25u8, 245u8, + 115u8, 119u8, 14u8, 213u8, 180u8, 224u8, 224u8, 188u8, 172u8, 152u8, + 16u8, + ] + { + let call = SetMaxUpwardMessageNumPerCandidate { new }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Sets the number of sessions after which an HRMP open channel request expires."] + pub fn set_hrmp_open_request_ttl( + &self, + new: ::core::primitive::u32, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SetHrmpOpenRequestTtl, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .call_hash::()? + == [ + 168u8, 254u8, 189u8, 22u8, 61u8, 90u8, 131u8, 1u8, 103u8, 208u8, 179u8, + 85u8, 80u8, 215u8, 9u8, 3u8, 34u8, 73u8, 130u8, 19u8, 166u8, 77u8, + 131u8, 148u8, 183u8, 86u8, 186u8, 148u8, 109u8, 173u8, 74u8, 94u8, + ] + { + let call = SetHrmpOpenRequestTtl { new }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Sets the amount of funds that the sender should provide for opening an HRMP channel."] + pub fn set_hrmp_sender_deposit( + &self, + new: ::core::primitive::u128, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SetHrmpSenderDeposit, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 250u8, 23u8, 196u8, 206u8, 34u8, 86u8, 28u8, 14u8, 110u8, 189u8, 38u8, + 39u8, 2u8, 16u8, 212u8, 32u8, 65u8, 249u8, 120u8, 163u8, 89u8, 232u8, + 3u8, 49u8, 155u8, 174u8, 96u8, 21u8, 240u8, 185u8, 140u8, 243u8, + ] + { + let call = SetHrmpSenderDeposit { new }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Sets the amount of funds that the recipient should provide for accepting opening an HRMP"] + #[doc = "channel."] + pub fn set_hrmp_recipient_deposit( + &self, + new: ::core::primitive::u128, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SetHrmpRecipientDeposit, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .call_hash::()? + == [ + 104u8, 35u8, 129u8, 31u8, 111u8, 57u8, 190u8, 42u8, 159u8, 220u8, 86u8, + 136u8, 200u8, 4u8, 62u8, 241u8, 141u8, 90u8, 200u8, 132u8, 141u8, + 154u8, 117u8, 206u8, 79u8, 160u8, 124u8, 186u8, 231u8, 250u8, 86u8, + 87u8, + ] + { + let call = SetHrmpRecipientDeposit { new }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Sets the maximum number of messages allowed in an HRMP channel at once."] + pub fn set_hrmp_channel_max_capacity( + &self, + new: ::core::primitive::u32, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SetHrmpChannelMaxCapacity, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .call_hash::()? + == [ + 211u8, 49u8, 82u8, 59u8, 16u8, 97u8, 253u8, 64u8, 185u8, 216u8, 235u8, + 10u8, 84u8, 194u8, 231u8, 115u8, 153u8, 20u8, 31u8, 86u8, 47u8, 226u8, + 245u8, 214u8, 134u8, 194u8, 13u8, 254u8, 230u8, 66u8, 54u8, 240u8, + ] + { + let call = SetHrmpChannelMaxCapacity { new }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Sets the maximum total size of messages in bytes allowed in an HRMP channel at once."] + pub fn set_hrmp_channel_max_total_size( + &self, + new: ::core::primitive::u32, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SetHrmpChannelMaxTotalSize, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .call_hash::()? + == [ + 254u8, 196u8, 171u8, 29u8, 208u8, 179u8, 204u8, 58u8, 64u8, 41u8, 52u8, + 73u8, 153u8, 245u8, 29u8, 132u8, 129u8, 29u8, 94u8, 241u8, 136u8, 20u8, + 12u8, 20u8, 255u8, 244u8, 252u8, 98u8, 136u8, 222u8, 7u8, 19u8, + ] + { + let call = SetHrmpChannelMaxTotalSize { new }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Sets the maximum number of inbound HRMP channels a parachain is allowed to accept."] + pub fn set_hrmp_max_parachain_inbound_channels( + &self, + new: ::core::primitive::u32, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SetHrmpMaxParachainInboundChannels, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .call_hash::()? + == [ + 219u8, 88u8, 3u8, 249u8, 16u8, 182u8, 182u8, 233u8, 152u8, 24u8, 29u8, + 96u8, 227u8, 50u8, 156u8, 98u8, 71u8, 196u8, 158u8, 103u8, 114u8, 55u8, + 65u8, 199u8, 211u8, 225u8, 235u8, 172u8, 218u8, 123u8, 158u8, 57u8, + ] + { + let call = SetHrmpMaxParachainInboundChannels { new }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Sets the maximum number of inbound HRMP channels a parathread is allowed to accept."] + pub fn set_hrmp_max_parathread_inbound_channels( + &self, + new: ::core::primitive::u32, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SetHrmpMaxParathreadInboundChannels, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .call_hash::()? + == [ + 153u8, 169u8, 153u8, 141u8, 45u8, 21u8, 26u8, 33u8, 207u8, 234u8, + 186u8, 154u8, 12u8, 148u8, 2u8, 226u8, 55u8, 125u8, 58u8, 127u8, 154u8, + 176u8, 3u8, 47u8, 164u8, 63u8, 25u8, 42u8, 66u8, 131u8, 143u8, 254u8, + ] + { + let call = SetHrmpMaxParathreadInboundChannels { new }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Sets the maximum size of a message that could ever be put into an HRMP channel."] + pub fn set_hrmp_channel_max_message_size( + &self, + new: ::core::primitive::u32, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SetHrmpChannelMaxMessageSize, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .call_hash::()? + == [ + 237u8, 103u8, 126u8, 197u8, 164u8, 247u8, 67u8, 144u8, 30u8, 192u8, + 161u8, 243u8, 254u8, 26u8, 254u8, 33u8, 59u8, 216u8, 159u8, 105u8, + 166u8, 138u8, 38u8, 124u8, 248u8, 81u8, 11u8, 223u8, 120u8, 75u8, + 176u8, 177u8, + ] + { + let call = SetHrmpChannelMaxMessageSize { new }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Sets the maximum number of outbound HRMP channels a parachain is allowed to open."] + pub fn set_hrmp_max_parachain_outbound_channels( + &self, + new: ::core::primitive::u32, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SetHrmpMaxParachainOutboundChannels, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .call_hash::()? + == [ + 173u8, 184u8, 49u8, 66u8, 158u8, 142u8, 95u8, 225u8, 90u8, 171u8, 4u8, + 20u8, 210u8, 180u8, 54u8, 236u8, 60u8, 5u8, 76u8, 173u8, 226u8, 203u8, + 7u8, 156u8, 54u8, 9u8, 198u8, 171u8, 250u8, 1u8, 120u8, 240u8, + ] + { + let call = SetHrmpMaxParachainOutboundChannels { new }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Sets the maximum number of outbound HRMP channels a parathread is allowed to open."] + pub fn set_hrmp_max_parathread_outbound_channels( + &self, + new: ::core::primitive::u32, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SetHrmpMaxParathreadOutboundChannels, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .call_hash::()? + == [ + 166u8, 73u8, 121u8, 53u8, 27u8, 77u8, 150u8, 115u8, 29u8, 202u8, 34u8, + 4u8, 35u8, 161u8, 113u8, 15u8, 66u8, 60u8, 214u8, 129u8, 157u8, 143u8, + 227u8, 134u8, 213u8, 9u8, 231u8, 224u8, 187u8, 36u8, 16u8, 68u8, + ] + { + let call = SetHrmpMaxParathreadOutboundChannels { new }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Sets the maximum number of outbound HRMP messages can be sent by a candidate."] + pub fn set_hrmp_max_message_num_per_candidate( + &self, + new: ::core::primitive::u32, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SetHrmpMaxMessageNumPerCandidate, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .call_hash::()? + == [ + 235u8, 47u8, 114u8, 29u8, 87u8, 198u8, 62u8, 200u8, 235u8, 184u8, + 204u8, 35u8, 251u8, 210u8, 88u8, 150u8, 22u8, 61u8, 242u8, 196u8, + 240u8, 76u8, 45u8, 54u8, 155u8, 111u8, 244u8, 31u8, 158u8, 48u8, 68u8, + 233u8, + ] + { + let call = SetHrmpMaxMessageNumPerCandidate { new }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Sets the maximum amount of weight any individual upward message may consume."] + pub fn set_ump_max_individual_weight( + &self, + new: ::core::primitive::u64, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SetUmpMaxIndividualWeight, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .call_hash::()? + == [ + 61u8, 174u8, 42u8, 53u8, 120u8, 56u8, 252u8, 117u8, 173u8, 223u8, + 100u8, 141u8, 209u8, 29u8, 173u8, 240u8, 180u8, 113u8, 27u8, 24u8, 4u8, + 157u8, 107u8, 247u8, 235u8, 121u8, 152u8, 6u8, 176u8, 254u8, 18u8, + 70u8, + ] + { + let call = SetUmpMaxIndividualWeight { new }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Enable or disable PVF pre-checking. Consult the field documentation prior executing."] + pub fn set_pvf_checking_enabled( + &self, + new: ::core::primitive::bool, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SetPvfCheckingEnabled, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .call_hash::()? + == [ + 224u8, 199u8, 197u8, 208u8, 178u8, 211u8, 14u8, 102u8, 174u8, 205u8, + 207u8, 181u8, 75u8, 125u8, 209u8, 69u8, 85u8, 1u8, 98u8, 251u8, 17u8, + 42u8, 73u8, 9u8, 252u8, 184u8, 81u8, 202u8, 132u8, 236u8, 97u8, 121u8, + ] + { + let call = SetPvfCheckingEnabled { new }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Set the number of session changes after which a PVF pre-checking voting is rejected."] + pub fn set_pvf_voting_ttl( + &self, + new: ::core::primitive::u32, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SetPvfVotingTtl, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 179u8, 71u8, 42u8, 140u8, 187u8, 43u8, 138u8, 16u8, 104u8, 41u8, 30u8, + 220u8, 131u8, 179u8, 200u8, 184u8, 105u8, 58u8, 131u8, 225u8, 169u8, + 253u8, 46u8, 186u8, 102u8, 52u8, 147u8, 244u8, 22u8, 255u8, 41u8, 6u8, + ] + { + let call = SetPvfVotingTtl { new }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Sets the minimum delay between announcing the upgrade block for a parachain until the"] + #[doc = "upgrade taking place."] + #[doc = ""] + #[doc = "See the field documentation for information and constraints for the new value."] + pub fn set_minimum_validation_upgrade_delay( + &self, + new: ::core::primitive::u32, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SetMinimumValidationUpgradeDelay, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .call_hash::()? + == [ + 225u8, 178u8, 41u8, 194u8, 154u8, 222u8, 247u8, 129u8, 35u8, 102u8, + 248u8, 144u8, 21u8, 74u8, 42u8, 239u8, 135u8, 205u8, 173u8, 190u8, + 112u8, 30u8, 240u8, 106u8, 10u8, 217u8, 208u8, 11u8, 79u8, 47u8, 198u8, + 37u8, + ] + { + let call = SetMinimumValidationUpgradeDelay { new }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Setting this to true will disable consistency checks for the configuration setters."] + #[doc = "Use with caution."] + pub fn set_bypass_consistency_check( + &self, + new: ::core::primitive::bool, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SetBypassConsistencyCheck, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .call_hash::()? + == [ + 5u8, 54u8, 178u8, 218u8, 46u8, 61u8, 99u8, 23u8, 227u8, 202u8, 201u8, + 164u8, 121u8, 226u8, 65u8, 253u8, 29u8, 164u8, 170u8, 130u8, 32u8, + 85u8, 222u8, 10u8, 232u8, 252u8, 73u8, 23u8, 69u8, 30u8, 1u8, 87u8, + ] + { + let call = SetBypassConsistencyCheck { new }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + pub mod storage { + use super::runtime_types; + pub struct ActiveConfig; + impl ::subxt::StorageEntry for ActiveConfig { + const PALLET: &'static str = "Configuration"; + const STORAGE: &'static str = "ActiveConfig"; + type Value = + runtime_types::polkadot_runtime_parachains::configuration::HostConfiguration< + ::core::primitive::u32, + >; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct PendingConfigs; + impl ::subxt::StorageEntry for PendingConfigs { + const PALLET: &'static str = "Configuration"; + const STORAGE: &'static str = "PendingConfigs"; + type Value = ::std::vec::Vec<( + ::core::primitive::u32, + runtime_types::polkadot_runtime_parachains::configuration::HostConfiguration< + ::core::primitive::u32, + >, + )>; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct BypassConsistencyCheck; + impl ::subxt::StorageEntry for BypassConsistencyCheck { + const PALLET: &'static str = "Configuration"; + const STORAGE: &'static str = "BypassConsistencyCheck"; + type Value = ::core::primitive::bool; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct StorageApi<'a, T: ::subxt::Config> { + client: &'a ::subxt::Client, + } + impl<'a, T: ::subxt::Config> StorageApi<'a, T> { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { client } + } + #[doc = " The active configuration for the current session."] + pub async fn active_config( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + runtime_types::polkadot_runtime_parachains::configuration::HostConfiguration< + ::core::primitive::u32, + >, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 6u8, 31u8, 218u8, 51u8, 202u8, 166u8, 183u8, 192u8, 151u8, 184u8, + 103u8, 73u8, 239u8, 78u8, 183u8, 38u8, 192u8, 201u8, 27u8, 128u8, 59u8, + 48u8, 197u8, 23u8, 43u8, 39u8, 158u8, 35u8, 194u8, 23u8, 151u8, 145u8, + ] + { + let entry = ActiveConfig; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Pending configuration changes."] + #[doc = ""] + #[doc = " This is a list of configuration changes, each with a session index at which it should"] + #[doc = " be applied."] + #[doc = ""] + #[doc = " The list is sorted ascending by session index. Also, this list can only contain at most"] + #[doc = " 2 items: for the next session and for the `scheduled_session`."] pub async fn pending_configs (& self , block_hash : :: core :: option :: Option < T :: Hash > ,) -> :: core :: result :: Result < :: std :: vec :: Vec < (:: core :: primitive :: u32 , runtime_types :: polkadot_runtime_parachains :: configuration :: HostConfiguration < :: core :: primitive :: u32 > ,) > , :: subxt :: BasicError >{ + if self.client.metadata().storage_hash::()? + == [ + 198u8, 168u8, 227u8, 228u8, 110u8, 98u8, 34u8, 21u8, 159u8, 114u8, + 202u8, 135u8, 39u8, 190u8, 40u8, 214u8, 170u8, 126u8, 203u8, 10u8, + 44u8, 114u8, 254u8, 208u8, 133u8, 129u8, 8u8, 112u8, 168u8, 135u8, + 196u8, 43u8, + ] + { + let entry = PendingConfigs; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " If this is set, then the configuration setters will bypass the consistency checks. This"] + #[doc = " is meant to be used only as the last resort."] + pub async fn bypass_consistency_check( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result<::core::primitive::bool, ::subxt::BasicError> + { + if self + .client + .metadata() + .storage_hash::()? + == [ + 42u8, 191u8, 122u8, 163u8, 112u8, 2u8, 148u8, 59u8, 79u8, 219u8, 184u8, + 172u8, 246u8, 136u8, 185u8, 251u8, 189u8, 226u8, 83u8, 129u8, 162u8, + 109u8, 148u8, 75u8, 120u8, 216u8, 44u8, 28u8, 221u8, 78u8, 177u8, 94u8, + ] + { + let entry = BypassConsistencyCheck; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + } + pub mod paras_shared { + use super::root_mod; + use super::runtime_types; + pub mod calls { + use super::root_mod; + use super::runtime_types; + type DispatchError = runtime_types::sp_runtime::DispatchError; + pub struct TransactionApi<'a, T: ::subxt::Config, X> { + client: &'a ::subxt::Client, + marker: ::core::marker::PhantomData, + } + impl<'a, T, X> TransactionApi<'a, T, X> + where + T: ::subxt::Config, + X: ::subxt::extrinsic::ExtrinsicParams, + { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { + client, + marker: ::core::marker::PhantomData, + } + } + } + } + pub mod storage { + use super::runtime_types; + pub struct CurrentSessionIndex; + impl ::subxt::StorageEntry for CurrentSessionIndex { + const PALLET: &'static str = "ParasShared"; + const STORAGE: &'static str = "CurrentSessionIndex"; + type Value = ::core::primitive::u32; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct ActiveValidatorIndices; + impl ::subxt::StorageEntry for ActiveValidatorIndices { + const PALLET: &'static str = "ParasShared"; + const STORAGE: &'static str = "ActiveValidatorIndices"; + type Value = + ::std::vec::Vec; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct ActiveValidatorKeys; + impl ::subxt::StorageEntry for ActiveValidatorKeys { + const PALLET: &'static str = "ParasShared"; + const STORAGE: &'static str = "ActiveValidatorKeys"; + type Value = + ::std::vec::Vec; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct StorageApi<'a, T: ::subxt::Config> { + client: &'a ::subxt::Client, + } + impl<'a, T: ::subxt::Config> StorageApi<'a, T> { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { client } + } + #[doc = " The current session index."] + pub async fn current_session_index( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> + { + if self + .client + .metadata() + .storage_hash::()? + == [ + 83u8, 15u8, 20u8, 55u8, 103u8, 65u8, 76u8, 202u8, 69u8, 14u8, 221u8, + 93u8, 38u8, 163u8, 167u8, 83u8, 18u8, 245u8, 33u8, 175u8, 7u8, 97u8, + 67u8, 186u8, 96u8, 57u8, 147u8, 120u8, 107u8, 91u8, 147u8, 64u8, + ] + { + let entry = CurrentSessionIndex; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " All the validators actively participating in parachain consensus."] + #[doc = " Indices are into the broader validator set."] + pub async fn active_validator_indices( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::std::vec::Vec, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .storage_hash::()? + == [ + 128u8, 98u8, 186u8, 22u8, 178u8, 51u8, 151u8, 235u8, 201u8, 2u8, 245u8, + 177u8, 4u8, 125u8, 1u8, 245u8, 56u8, 102u8, 166u8, 129u8, 211u8, 189u8, + 137u8, 149u8, 234u8, 252u8, 97u8, 139u8, 151u8, 16u8, 129u8, 24u8, + ] + { + let entry = ActiveValidatorIndices; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The parachain attestation keys of the validators actively participating in parachain consensus."] + #[doc = " This should be the same length as `ActiveValidatorIndices`."] + pub async fn active_validator_keys( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::std::vec::Vec, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .storage_hash::()? + == [ + 231u8, 46u8, 32u8, 152u8, 56u8, 71u8, 153u8, 56u8, 241u8, 29u8, 64u8, + 70u8, 19u8, 31u8, 220u8, 139u8, 58u8, 212u8, 221u8, 140u8, 87u8, 140u8, + 218u8, 50u8, 204u8, 221u8, 214u8, 168u8, 135u8, 118u8, 94u8, 21u8, + ] + { + let entry = ActiveValidatorKeys; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + } + pub mod para_inclusion { + use super::root_mod; + use super::runtime_types; + pub mod calls { + use super::root_mod; + use super::runtime_types; + type DispatchError = runtime_types::sp_runtime::DispatchError; + pub struct TransactionApi<'a, T: ::subxt::Config, X> { + client: &'a ::subxt::Client, + marker: ::core::marker::PhantomData, + } + impl<'a, T, X> TransactionApi<'a, T, X> + where + T: ::subxt::Config, + X: ::subxt::extrinsic::ExtrinsicParams, + { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { + client, + marker: ::core::marker::PhantomData, + } + } + } + } + pub type Event = runtime_types::polkadot_runtime_parachains::inclusion::pallet::Event; + pub mod events { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "A candidate was backed. `[candidate, head_data]`"] + pub struct CandidateBacked( + pub runtime_types::polkadot_primitives::v2::CandidateReceipt<::subxt::sp_core::H256>, + pub runtime_types::polkadot_parachain::primitives::HeadData, + pub runtime_types::polkadot_primitives::v2::CoreIndex, + pub runtime_types::polkadot_primitives::v2::GroupIndex, + ); + impl ::subxt::Event for CandidateBacked { + const PALLET: &'static str = "ParaInclusion"; + const EVENT: &'static str = "CandidateBacked"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "A candidate was included. `[candidate, head_data]`"] + pub struct CandidateIncluded( + pub runtime_types::polkadot_primitives::v2::CandidateReceipt<::subxt::sp_core::H256>, + pub runtime_types::polkadot_parachain::primitives::HeadData, + pub runtime_types::polkadot_primitives::v2::CoreIndex, + pub runtime_types::polkadot_primitives::v2::GroupIndex, + ); + impl ::subxt::Event for CandidateIncluded { + const PALLET: &'static str = "ParaInclusion"; + const EVENT: &'static str = "CandidateIncluded"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "A candidate timed out. `[candidate, head_data]`"] + pub struct CandidateTimedOut( + pub runtime_types::polkadot_primitives::v2::CandidateReceipt<::subxt::sp_core::H256>, + pub runtime_types::polkadot_parachain::primitives::HeadData, + pub runtime_types::polkadot_primitives::v2::CoreIndex, + ); + impl ::subxt::Event for CandidateTimedOut { + const PALLET: &'static str = "ParaInclusion"; + const EVENT: &'static str = "CandidateTimedOut"; + } + } + pub mod storage { + use super::runtime_types; + pub struct AvailabilityBitfields<'a>( + pub &'a runtime_types::polkadot_primitives::v2::ValidatorIndex, + ); + impl ::subxt::StorageEntry for AvailabilityBitfields<'_> { + const PALLET: &'static str = "ParaInclusion"; + const STORAGE: &'static str = "AvailabilityBitfields"; + type Value = runtime_types :: polkadot_runtime_parachains :: inclusion :: AvailabilityBitfieldRecord < :: core :: primitive :: u32 > ; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Twox64Concat, + )]) + } + } + pub struct PendingAvailability<'a>( + pub &'a runtime_types::polkadot_parachain::primitives::Id, + ); + impl ::subxt::StorageEntry for PendingAvailability<'_> { + const PALLET: &'static str = "ParaInclusion"; + const STORAGE: &'static str = "PendingAvailability"; + type Value = runtime_types :: polkadot_runtime_parachains :: inclusion :: CandidatePendingAvailability < :: subxt :: sp_core :: H256 , :: core :: primitive :: u32 > ; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Twox64Concat, + )]) + } + } + pub struct PendingAvailabilityCommitments<'a>( + pub &'a runtime_types::polkadot_parachain::primitives::Id, + ); + impl ::subxt::StorageEntry for PendingAvailabilityCommitments<'_> { + const PALLET: &'static str = "ParaInclusion"; + const STORAGE: &'static str = "PendingAvailabilityCommitments"; + type Value = runtime_types::polkadot_primitives::v2::CandidateCommitments< + ::core::primitive::u32, + >; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Twox64Concat, + )]) + } + } + pub struct StorageApi<'a, T: ::subxt::Config> { + client: &'a ::subxt::Client, + } + impl<'a, T: ::subxt::Config> StorageApi<'a, T> { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { client } + } + #[doc = " The latest bitfield for each validator, referred to by their index in the validator set."] pub async fn availability_bitfields (& self , _0 : & runtime_types :: polkadot_primitives :: v2 :: ValidatorIndex , block_hash : :: core :: option :: Option < T :: Hash > ,) -> :: core :: result :: Result < :: core :: option :: Option < runtime_types :: polkadot_runtime_parachains :: inclusion :: AvailabilityBitfieldRecord < :: core :: primitive :: u32 > > , :: subxt :: BasicError >{ + if self + .client + .metadata() + .storage_hash::()? + == [ + 223u8, 74u8, 17u8, 152u8, 136u8, 20u8, 241u8, 47u8, 169u8, 34u8, 128u8, + 78u8, 121u8, 47u8, 165u8, 35u8, 222u8, 15u8, 236u8, 90u8, 215u8, 160u8, + 10u8, 18u8, 152u8, 69u8, 38u8, 97u8, 122u8, 247u8, 241u8, 255u8, + ] + { + let entry = AvailabilityBitfields(_0); + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The latest bitfield for each validator, referred to by their index in the validator set."] + pub async fn availability_bitfields_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::subxt::KeyIter<'a, T, AvailabilityBitfields<'a>>, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .storage_hash::()? + == [ + 223u8, 74u8, 17u8, 152u8, 136u8, 20u8, 241u8, 47u8, 169u8, 34u8, 128u8, + 78u8, 121u8, 47u8, 165u8, 35u8, 222u8, 15u8, 236u8, 90u8, 215u8, 160u8, + 10u8, 18u8, 152u8, 69u8, 38u8, 97u8, 122u8, 247u8, 241u8, 255u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Candidates pending availability by `ParaId`."] pub async fn pending_availability (& self , _0 : & runtime_types :: polkadot_parachain :: primitives :: Id , block_hash : :: core :: option :: Option < T :: Hash > ,) -> :: core :: result :: Result < :: core :: option :: Option < runtime_types :: polkadot_runtime_parachains :: inclusion :: CandidatePendingAvailability < :: subxt :: sp_core :: H256 , :: core :: primitive :: u32 > > , :: subxt :: BasicError >{ + if self + .client + .metadata() + .storage_hash::()? + == [ + 87u8, 140u8, 64u8, 234u8, 110u8, 229u8, 7u8, 83u8, 100u8, 45u8, 125u8, + 76u8, 72u8, 179u8, 132u8, 190u8, 38u8, 22u8, 112u8, 85u8, 241u8, 82u8, + 208u8, 133u8, 187u8, 130u8, 6u8, 121u8, 100u8, 43u8, 106u8, 121u8, + ] + { + let entry = PendingAvailability(_0); + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Candidates pending availability by `ParaId`."] + pub async fn pending_availability_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::subxt::KeyIter<'a, T, PendingAvailability<'a>>, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .storage_hash::()? + == [ + 87u8, 140u8, 64u8, 234u8, 110u8, 229u8, 7u8, 83u8, 100u8, 45u8, 125u8, + 76u8, 72u8, 179u8, 132u8, 190u8, 38u8, 22u8, 112u8, 85u8, 241u8, 82u8, + 208u8, 133u8, 187u8, 130u8, 6u8, 121u8, 100u8, 43u8, 106u8, 121u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The commitments of candidates pending availability, by `ParaId`."] + pub async fn pending_availability_commitments( + &self, + _0: &runtime_types::polkadot_parachain::primitives::Id, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option< + runtime_types::polkadot_primitives::v2::CandidateCommitments< + ::core::primitive::u32, + >, + >, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .storage_hash::()? + == [ + 164u8, 245u8, 130u8, 208u8, 141u8, 88u8, 99u8, 247u8, 90u8, 215u8, + 40u8, 99u8, 239u8, 7u8, 231u8, 13u8, 233u8, 204u8, 223u8, 137u8, 158u8, + 250u8, 24u8, 107u8, 152u8, 240u8, 195u8, 28u8, 170u8, 219u8, 174u8, + 213u8, + ] + { + let entry = PendingAvailabilityCommitments(_0); + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The commitments of candidates pending availability, by `ParaId`."] + pub async fn pending_availability_commitments_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::subxt::KeyIter<'a, T, PendingAvailabilityCommitments<'a>>, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .storage_hash::()? + == [ + 164u8, 245u8, 130u8, 208u8, 141u8, 88u8, 99u8, 247u8, 90u8, 215u8, + 40u8, 99u8, 239u8, 7u8, 231u8, 13u8, 233u8, 204u8, 223u8, 137u8, 158u8, + 250u8, 24u8, 107u8, 152u8, 240u8, 195u8, 28u8, 170u8, 219u8, 174u8, + 213u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + } + pub mod para_inherent { + use super::root_mod; + use super::runtime_types; + pub mod calls { + use super::root_mod; + use super::runtime_types; + type DispatchError = runtime_types::sp_runtime::DispatchError; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct Enter { + pub data: runtime_types::polkadot_primitives::v2::InherentData< + runtime_types::sp_runtime::generic::header::Header< + ::core::primitive::u32, + runtime_types::sp_runtime::traits::BlakeTwo256, + >, + >, + } + impl ::subxt::Call for Enter { + const PALLET: &'static str = "ParaInherent"; + const FUNCTION: &'static str = "enter"; + } + pub struct TransactionApi<'a, T: ::subxt::Config, X> { + client: &'a ::subxt::Client, + marker: ::core::marker::PhantomData, + } + impl<'a, T, X> TransactionApi<'a, T, X> + where + T: ::subxt::Config, + X: ::subxt::extrinsic::ExtrinsicParams, + { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { + client, + marker: ::core::marker::PhantomData, + } + } + #[doc = "Enter the paras inherent. This will process bitfields and backed candidates."] + pub fn enter( + &self, + data: runtime_types::polkadot_primitives::v2::InherentData< + runtime_types::sp_runtime::generic::header::Header< + ::core::primitive::u32, + runtime_types::sp_runtime::traits::BlakeTwo256, + >, + >, + ) -> Result< + ::subxt::SubmittableExtrinsic<'a, T, X, Enter, DispatchError, root_mod::Event>, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 15u8, 12u8, 250u8, 143u8, 7u8, 146u8, 79u8, 126u8, 205u8, 10u8, 135u8, + 108u8, 193u8, 195u8, 225u8, 117u8, 58u8, 45u8, 16u8, 229u8, 6u8, 122u8, + 92u8, 75u8, 174u8, 150u8, 109u8, 176u8, 54u8, 199u8, 236u8, 38u8, + ] + { + let call = Enter { data }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + pub mod storage { + use super::runtime_types; + pub struct Included; + impl ::subxt::StorageEntry for Included { + const PALLET: &'static str = "ParaInherent"; + const STORAGE: &'static str = "Included"; + type Value = (); + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct OnChainVotes; + impl ::subxt::StorageEntry for OnChainVotes { + const PALLET: &'static str = "ParaInherent"; + const STORAGE: &'static str = "OnChainVotes"; + type Value = runtime_types::polkadot_primitives::v2::ScrapedOnChainVotes< + ::subxt::sp_core::H256, + >; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct StorageApi<'a, T: ::subxt::Config> { + client: &'a ::subxt::Client, + } + impl<'a, T: ::subxt::Config> StorageApi<'a, T> { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { client } + } + #[doc = " Whether the paras inherent was included within this block."] + #[doc = ""] + #[doc = " The `Option<()>` is effectively a `bool`, but it never hits storage in the `None` variant"] + #[doc = " due to the guarantees of FRAME's storage APIs."] + #[doc = ""] + #[doc = " If this is `None` at the end of the block, we panic and render the block invalid."] + pub async fn included( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result<::core::option::Option<()>, ::subxt::BasicError> + { + if self.client.metadata().storage_hash::()? + == [ + 208u8, 213u8, 76u8, 64u8, 90u8, 141u8, 144u8, 52u8, 220u8, 35u8, 143u8, + 171u8, 45u8, 59u8, 9u8, 218u8, 29u8, 186u8, 139u8, 203u8, 205u8, 12u8, + 10u8, 2u8, 27u8, 167u8, 182u8, 244u8, 167u8, 220u8, 44u8, 16u8, + ] + { + let entry = Included; + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Scraped on chain data for extracting resolved disputes as well as backing votes."] + pub async fn on_chain_votes( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option< + runtime_types::polkadot_primitives::v2::ScrapedOnChainVotes< + ::subxt::sp_core::H256, + >, + >, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 245u8, 37u8, 52u8, 78u8, 128u8, 131u8, 93u8, 38u8, 210u8, 78u8, 218u8, + 171u8, 131u8, 175u8, 215u8, 91u8, 122u8, 134u8, 127u8, 79u8, 7u8, + 165u8, 122u8, 184u8, 122u8, 168u8, 218u8, 207u8, 15u8, 23u8, 162u8, + 4u8, + ] + { + let entry = OnChainVotes; + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + } + pub mod para_scheduler { + use super::root_mod; + use super::runtime_types; + pub mod storage { + use super::runtime_types; + pub struct ValidatorGroups; + impl ::subxt::StorageEntry for ValidatorGroups { + const PALLET: &'static str = "ParaScheduler"; + const STORAGE: &'static str = "ValidatorGroups"; + type Value = ::std::vec::Vec< + ::std::vec::Vec, + >; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct ParathreadQueue; + impl ::subxt::StorageEntry for ParathreadQueue { + const PALLET: &'static str = "ParaScheduler"; + const STORAGE: &'static str = "ParathreadQueue"; + type Value = + runtime_types::polkadot_runtime_parachains::scheduler::ParathreadClaimQueue; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct AvailabilityCores; + impl ::subxt::StorageEntry for AvailabilityCores { + const PALLET: &'static str = "ParaScheduler"; + const STORAGE: &'static str = "AvailabilityCores"; + type Value = ::std::vec::Vec< + ::core::option::Option, + >; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct ParathreadClaimIndex; + impl ::subxt::StorageEntry for ParathreadClaimIndex { + const PALLET: &'static str = "ParaScheduler"; + const STORAGE: &'static str = "ParathreadClaimIndex"; + type Value = ::std::vec::Vec; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct SessionStartBlock; + impl ::subxt::StorageEntry for SessionStartBlock { + const PALLET: &'static str = "ParaScheduler"; + const STORAGE: &'static str = "SessionStartBlock"; + type Value = ::core::primitive::u32; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct Scheduled; + impl ::subxt::StorageEntry for Scheduled { + const PALLET: &'static str = "ParaScheduler"; + const STORAGE: &'static str = "Scheduled"; + type Value = ::std::vec::Vec< + runtime_types::polkadot_runtime_parachains::scheduler::CoreAssignment, + >; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct StorageApi<'a, T: ::subxt::Config> { + client: &'a ::subxt::Client, + } + impl<'a, T: ::subxt::Config> StorageApi<'a, T> { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { client } + } + #[doc = " All the validator groups. One for each core. Indices are into `ActiveValidators` - not the"] + #[doc = " broader set of Polkadot validators, but instead just the subset used for parachains during"] + #[doc = " this session."] + #[doc = ""] + #[doc = " Bound: The number of cores is the sum of the numbers of parachains and parathread multiplexers."] + #[doc = " Reasonably, 100-1000. The dominant factor is the number of validators: safe upper bound at 10k."] + pub async fn validator_groups( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::std::vec::Vec< + ::std::vec::Vec, + >, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 84u8, 195u8, 53u8, 111u8, 186u8, 61u8, 3u8, 36u8, 10u8, 9u8, 66u8, + 119u8, 116u8, 213u8, 86u8, 153u8, 18u8, 149u8, 83u8, 92u8, 232u8, + 212u8, 175u8, 52u8, 74u8, 135u8, 137u8, 34u8, 123u8, 232u8, 131u8, + 22u8, + ] + { + let entry = ValidatorGroups; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " A queue of upcoming claims and which core they should be mapped onto."] + #[doc = ""] + #[doc = " The number of queued claims is bounded at the `scheduling_lookahead`"] + #[doc = " multiplied by the number of parathread multiplexer cores. Reasonably, 10 * 50 = 500."] + pub async fn parathread_queue( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + runtime_types::polkadot_runtime_parachains::scheduler::ParathreadClaimQueue, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 72u8, 99u8, 70u8, 216u8, 91u8, 118u8, 160u8, 100u8, 20u8, 192u8, 78u8, + 214u8, 165u8, 200u8, 223u8, 166u8, 50u8, 214u8, 41u8, 241u8, 84u8, + 68u8, 21u8, 86u8, 130u8, 13u8, 124u8, 128u8, 104u8, 194u8, 23u8, 223u8, + ] + { + let entry = ParathreadQueue; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " One entry for each availability core. Entries are `None` if the core is not currently occupied. Can be"] + #[doc = " temporarily `Some` if scheduled but not occupied."] + #[doc = " The i'th parachain belongs to the i'th core, with the remaining cores all being"] + #[doc = " parathread-multiplexers."] + #[doc = ""] + #[doc = " Bounded by the maximum of either of these two values:"] + #[doc = " * The number of parachains and parathread multiplexers"] + #[doc = " * The number of validators divided by `configuration.max_validators_per_core`."] + pub async fn availability_cores( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::std::vec::Vec< + ::core::option::Option< + runtime_types::polkadot_primitives::v2::CoreOccupied, + >, + >, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 190u8, 79u8, 55u8, 188u8, 40u8, 219u8, 187u8, 11u8, 142u8, 67u8, 86u8, + 242u8, 107u8, 26u8, 63u8, 138u8, 169u8, 24u8, 36u8, 112u8, 61u8, 206u8, + 32u8, 168u8, 167u8, 236u8, 133u8, 90u8, 16u8, 130u8, 121u8, 113u8, + ] + { + let entry = AvailabilityCores; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " An index used to ensure that only one claim on a parathread exists in the queue or is"] + #[doc = " currently being handled by an occupied core."] + #[doc = ""] + #[doc = " Bounded by the number of parathread cores and scheduling lookahead. Reasonably, 10 * 50 = 500."] + pub async fn parathread_claim_index( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::std::vec::Vec, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .storage_hash::()? + == [ + 187u8, 105u8, 221u8, 0u8, 103u8, 9u8, 52u8, 127u8, 47u8, 155u8, 147u8, + 84u8, 249u8, 213u8, 140u8, 75u8, 99u8, 238u8, 220u8, 242u8, 220u8, + 99u8, 204u8, 178u8, 153u8, 170u8, 72u8, 34u8, 83u8, 238u8, 211u8, + 150u8, + ] + { + let entry = ParathreadClaimIndex; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The block number where the session start occurred. Used to track how many group rotations have occurred."] + #[doc = ""] + #[doc = " Note that in the context of parachains modules the session change is signaled during"] + #[doc = " the block and enacted at the end of the block (at the finalization stage, to be exact)."] + #[doc = " Thus for all intents and purposes the effect of the session change is observed at the"] + #[doc = " block following the session change, block number of which we save in this storage value."] + pub async fn session_start_block( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> + { + if self.client.metadata().storage_hash::()? + == [ + 122u8, 37u8, 150u8, 1u8, 185u8, 201u8, 168u8, 67u8, 55u8, 17u8, 101u8, + 18u8, 133u8, 212u8, 6u8, 73u8, 191u8, 204u8, 229u8, 22u8, 185u8, 120u8, + 24u8, 245u8, 121u8, 215u8, 124u8, 210u8, 49u8, 28u8, 26u8, 80u8, + ] + { + let entry = SessionStartBlock; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Currently scheduled cores - free but up to be occupied."] + #[doc = ""] + #[doc = " Bounded by the number of cores: one for each parachain and parathread multiplexer."] + #[doc = ""] + #[doc = " The value contained here will not be valid after the end of a block. Runtime APIs should be used to determine scheduled cores/"] + #[doc = " for the upcoming block."] + pub async fn scheduled( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::std::vec::Vec< + runtime_types::polkadot_runtime_parachains::scheduler::CoreAssignment, + >, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 167u8, 69u8, 107u8, 76u8, 233u8, 106u8, 232u8, 95u8, 167u8, 73u8, 93u8, + 110u8, 43u8, 94u8, 27u8, 207u8, 152u8, 184u8, 43u8, 245u8, 200u8, + 141u8, 65u8, 32u8, 201u8, 80u8, 73u8, 155u8, 26u8, 82u8, 121u8, 194u8, + ] + { + let entry = Scheduled; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + } + pub mod paras { + use super::root_mod; + use super::runtime_types; + pub mod calls { + use super::root_mod; + use super::runtime_types; + type DispatchError = runtime_types::sp_runtime::DispatchError; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct ForceSetCurrentCode { + pub para: runtime_types::polkadot_parachain::primitives::Id, + pub new_code: runtime_types::polkadot_parachain::primitives::ValidationCode, + } + impl ::subxt::Call for ForceSetCurrentCode { + const PALLET: &'static str = "Paras"; + const FUNCTION: &'static str = "force_set_current_code"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct ForceSetCurrentHead { + pub para: runtime_types::polkadot_parachain::primitives::Id, + pub new_head: runtime_types::polkadot_parachain::primitives::HeadData, + } + impl ::subxt::Call for ForceSetCurrentHead { + const PALLET: &'static str = "Paras"; + const FUNCTION: &'static str = "force_set_current_head"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct ForceScheduleCodeUpgrade { + pub para: runtime_types::polkadot_parachain::primitives::Id, + pub new_code: runtime_types::polkadot_parachain::primitives::ValidationCode, + pub relay_parent_number: ::core::primitive::u32, + } + impl ::subxt::Call for ForceScheduleCodeUpgrade { + const PALLET: &'static str = "Paras"; + const FUNCTION: &'static str = "force_schedule_code_upgrade"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct ForceNoteNewHead { + pub para: runtime_types::polkadot_parachain::primitives::Id, + pub new_head: runtime_types::polkadot_parachain::primitives::HeadData, + } + impl ::subxt::Call for ForceNoteNewHead { + const PALLET: &'static str = "Paras"; + const FUNCTION: &'static str = "force_note_new_head"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct ForceQueueAction { + pub para: runtime_types::polkadot_parachain::primitives::Id, + } + impl ::subxt::Call for ForceQueueAction { + const PALLET: &'static str = "Paras"; + const FUNCTION: &'static str = "force_queue_action"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct AddTrustedValidationCode { + pub validation_code: runtime_types::polkadot_parachain::primitives::ValidationCode, + } + impl ::subxt::Call for AddTrustedValidationCode { + const PALLET: &'static str = "Paras"; + const FUNCTION: &'static str = "add_trusted_validation_code"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct PokeUnusedValidationCode { + pub validation_code_hash: + runtime_types::polkadot_parachain::primitives::ValidationCodeHash, + } + impl ::subxt::Call for PokeUnusedValidationCode { + const PALLET: &'static str = "Paras"; + const FUNCTION: &'static str = "poke_unused_validation_code"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct IncludePvfCheckStatement { + pub stmt: runtime_types::polkadot_primitives::v2::PvfCheckStatement, + pub signature: runtime_types::polkadot_primitives::v2::validator_app::Signature, + } + impl ::subxt::Call for IncludePvfCheckStatement { + const PALLET: &'static str = "Paras"; + const FUNCTION: &'static str = "include_pvf_check_statement"; + } + pub struct TransactionApi<'a, T: ::subxt::Config, X> { + client: &'a ::subxt::Client, + marker: ::core::marker::PhantomData, + } + impl<'a, T, X> TransactionApi<'a, T, X> + where + T: ::subxt::Config, + X: ::subxt::extrinsic::ExtrinsicParams, + { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { + client, + marker: ::core::marker::PhantomData, + } + } + #[doc = "Set the storage for the parachain validation code immediately."] + pub fn force_set_current_code( + &self, + para: runtime_types::polkadot_parachain::primitives::Id, + new_code: runtime_types::polkadot_parachain::primitives::ValidationCode, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + ForceSetCurrentCode, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 100u8, 36u8, 105u8, 246u8, 77u8, 252u8, 162u8, 139u8, 60u8, 37u8, 12u8, + 148u8, 206u8, 160u8, 134u8, 105u8, 50u8, 52u8, 156u8, 252u8, 217u8, + 174u8, 211u8, 208u8, 88u8, 81u8, 236u8, 66u8, 27u8, 59u8, 126u8, 5u8, + ] + { + let call = ForceSetCurrentCode { para, new_code }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Set the storage for the current parachain head data immediately."] + pub fn force_set_current_head( + &self, + para: runtime_types::polkadot_parachain::primitives::Id, + new_head: runtime_types::polkadot_parachain::primitives::HeadData, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + ForceSetCurrentHead, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 119u8, 46u8, 120u8, 202u8, 138u8, 190u8, 179u8, 78u8, 155u8, 167u8, + 220u8, 233u8, 170u8, 248u8, 202u8, 92u8, 73u8, 246u8, 224u8, 56u8, + 208u8, 124u8, 215u8, 19u8, 235u8, 246u8, 89u8, 189u8, 19u8, 205u8, + 22u8, 70u8, + ] + { + let call = ForceSetCurrentHead { para, new_head }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Schedule an upgrade as if it was scheduled in the given relay parent block."] + pub fn force_schedule_code_upgrade( + &self, + para: runtime_types::polkadot_parachain::primitives::Id, + new_code: runtime_types::polkadot_parachain::primitives::ValidationCode, + relay_parent_number: ::core::primitive::u32, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + ForceScheduleCodeUpgrade, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .call_hash::()? + == [ + 254u8, 60u8, 105u8, 37u8, 116u8, 190u8, 30u8, 255u8, 210u8, 24u8, + 120u8, 99u8, 174u8, 215u8, 233u8, 83u8, 57u8, 200u8, 24u8, 49u8, 220u8, + 12u8, 103u8, 30u8, 165u8, 10u8, 125u8, 255u8, 88u8, 134u8, 199u8, 3u8, + ] + { + let call = ForceScheduleCodeUpgrade { + para, + new_code, + relay_parent_number, + }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Note a new block head for para within the context of the current block."] + pub fn force_note_new_head( + &self, + para: runtime_types::polkadot_parachain::primitives::Id, + new_head: runtime_types::polkadot_parachain::primitives::HeadData, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + ForceNoteNewHead, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 203u8, 31u8, 68u8, 125u8, 105u8, 218u8, 177u8, 205u8, 248u8, 131u8, + 25u8, 170u8, 140u8, 56u8, 183u8, 106u8, 2u8, 118u8, 79u8, 22u8, 228u8, + 91u8, 33u8, 66u8, 245u8, 144u8, 147u8, 142u8, 14u8, 171u8, 125u8, + 233u8, + ] + { + let call = ForceNoteNewHead { para, new_head }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Put a parachain directly into the next session's action queue."] + #[doc = "We can't queue it any sooner than this without going into the"] + #[doc = "initializer..."] + pub fn force_queue_action( + &self, + para: runtime_types::polkadot_parachain::primitives::Id, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + ForceQueueAction, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 141u8, 235u8, 245u8, 93u8, 24u8, 155u8, 106u8, 136u8, 190u8, 236u8, + 216u8, 131u8, 245u8, 5u8, 186u8, 131u8, 159u8, 240u8, 95u8, 139u8, + 231u8, 12u8, 255u8, 74u8, 194u8, 13u8, 112u8, 78u8, 110u8, 95u8, 26u8, + 133u8, + ] + { + let call = ForceQueueAction { para }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Adds the validation code to the storage."] + #[doc = ""] + #[doc = "The code will not be added if it is already present. Additionally, if PVF pre-checking"] + #[doc = "is running for that code, it will be instantly accepted."] + #[doc = ""] + #[doc = "Otherwise, the code will be added into the storage. Note that the code will be added"] + #[doc = "into storage with reference count 0. This is to account the fact that there are no users"] + #[doc = "for this code yet. The caller will have to make sure that this code eventually gets"] + #[doc = "used by some parachain or removed from the storage to avoid storage leaks. For the latter"] + #[doc = "prefer to use the `poke_unused_validation_code` dispatchable to raw storage manipulation."] + #[doc = ""] + #[doc = "This function is mainly meant to be used for upgrading parachains that do not follow"] + #[doc = "the go-ahead signal while the PVF pre-checking feature is enabled."] + pub fn add_trusted_validation_code( + &self, + validation_code: runtime_types::polkadot_parachain::primitives::ValidationCode, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + AddTrustedValidationCode, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .call_hash::()? + == [ + 110u8, 255u8, 249u8, 176u8, 109u8, 54u8, 87u8, 19u8, 7u8, 62u8, 220u8, + 143u8, 196u8, 99u8, 66u8, 49u8, 18u8, 225u8, 14u8, 42u8, 243u8, 228u8, + 232u8, 207u8, 246u8, 34u8, 179u8, 127u8, 246u8, 239u8, 30u8, 214u8, + ] + { + let call = AddTrustedValidationCode { validation_code }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Remove the validation code from the storage iff the reference count is 0."] + #[doc = ""] + #[doc = "This is better than removing the storage directly, because it will not remove the code"] + #[doc = "that was suddenly got used by some parachain while this dispatchable was pending"] + #[doc = "dispatching."] + pub fn poke_unused_validation_code( + &self, + validation_code_hash : runtime_types :: polkadot_parachain :: primitives :: ValidationCodeHash, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + PokeUnusedValidationCode, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .call_hash::()? + == [ + 128u8, 49u8, 50u8, 239u8, 119u8, 116u8, 110u8, 52u8, 85u8, 66u8, 127u8, + 118u8, 206u8, 191u8, 206u8, 84u8, 255u8, 88u8, 179u8, 43u8, 163u8, + 185u8, 237u8, 191u8, 34u8, 135u8, 44u8, 231u8, 199u8, 5u8, 183u8, 5u8, + ] + { + let call = PokeUnusedValidationCode { + validation_code_hash, + }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Includes a statement for a PVF pre-checking vote. Potentially, finalizes the vote and"] + #[doc = "enacts the results if that was the last vote before achieving the supermajority."] + pub fn include_pvf_check_statement( + &self, + stmt: runtime_types::polkadot_primitives::v2::PvfCheckStatement, + signature: runtime_types::polkadot_primitives::v2::validator_app::Signature, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + IncludePvfCheckStatement, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .call_hash::()? + == [ + 138u8, 112u8, 12u8, 226u8, 95u8, 253u8, 48u8, 219u8, 9u8, 35u8, 99u8, + 122u8, 35u8, 194u8, 79u8, 103u8, 52u8, 242u8, 39u8, 110u8, 166u8, + 212u8, 80u8, 105u8, 3u8, 242u8, 59u8, 13u8, 161u8, 32u8, 224u8, 25u8, + ] + { + let call = IncludePvfCheckStatement { stmt, signature }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + pub type Event = runtime_types::polkadot_runtime_parachains::paras::pallet::Event; + pub mod events { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "Current code has been updated for a Para. `para_id`"] + pub struct CurrentCodeUpdated(pub runtime_types::polkadot_parachain::primitives::Id); + impl ::subxt::Event for CurrentCodeUpdated { + const PALLET: &'static str = "Paras"; + const EVENT: &'static str = "CurrentCodeUpdated"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "Current head has been updated for a Para. `para_id`"] + pub struct CurrentHeadUpdated(pub runtime_types::polkadot_parachain::primitives::Id); + impl ::subxt::Event for CurrentHeadUpdated { + const PALLET: &'static str = "Paras"; + const EVENT: &'static str = "CurrentHeadUpdated"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "A code upgrade has been scheduled for a Para. `para_id`"] + pub struct CodeUpgradeScheduled(pub runtime_types::polkadot_parachain::primitives::Id); + impl ::subxt::Event for CodeUpgradeScheduled { + const PALLET: &'static str = "Paras"; + const EVENT: &'static str = "CodeUpgradeScheduled"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "A new head has been noted for a Para. `para_id`"] + pub struct NewHeadNoted(pub runtime_types::polkadot_parachain::primitives::Id); + impl ::subxt::Event for NewHeadNoted { + const PALLET: &'static str = "Paras"; + const EVENT: &'static str = "NewHeadNoted"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "A para has been queued to execute pending actions. `para_id`"] + pub struct ActionQueued( + pub runtime_types::polkadot_parachain::primitives::Id, + pub ::core::primitive::u32, + ); + impl ::subxt::Event for ActionQueued { + const PALLET: &'static str = "Paras"; + const EVENT: &'static str = "ActionQueued"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "The given para either initiated or subscribed to a PVF check for the given validation"] + #[doc = "code. `code_hash` `para_id`"] + pub struct PvfCheckStarted( + pub runtime_types::polkadot_parachain::primitives::ValidationCodeHash, + pub runtime_types::polkadot_parachain::primitives::Id, + ); + impl ::subxt::Event for PvfCheckStarted { + const PALLET: &'static str = "Paras"; + const EVENT: &'static str = "PvfCheckStarted"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "The given validation code was accepted by the PVF pre-checking vote."] + #[doc = "`code_hash` `para_id`"] + pub struct PvfCheckAccepted( + pub runtime_types::polkadot_parachain::primitives::ValidationCodeHash, + pub runtime_types::polkadot_parachain::primitives::Id, + ); + impl ::subxt::Event for PvfCheckAccepted { + const PALLET: &'static str = "Paras"; + const EVENT: &'static str = "PvfCheckAccepted"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "The given validation code was rejected by the PVF pre-checking vote."] + #[doc = "`code_hash` `para_id`"] + pub struct PvfCheckRejected( + pub runtime_types::polkadot_parachain::primitives::ValidationCodeHash, + pub runtime_types::polkadot_parachain::primitives::Id, + ); + impl ::subxt::Event for PvfCheckRejected { + const PALLET: &'static str = "Paras"; + const EVENT: &'static str = "PvfCheckRejected"; + } + } + pub mod storage { + use super::runtime_types; + pub struct PvfActiveVoteMap<'a>( + pub &'a runtime_types::polkadot_parachain::primitives::ValidationCodeHash, + ); + impl ::subxt::StorageEntry for PvfActiveVoteMap<'_> { + const PALLET: &'static str = "Paras"; + const STORAGE: &'static str = "PvfActiveVoteMap"; + type Value = + runtime_types::polkadot_runtime_parachains::paras::PvfCheckActiveVoteState< + ::core::primitive::u32, + >; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Twox64Concat, + )]) + } + } + pub struct PvfActiveVoteList; + impl ::subxt::StorageEntry for PvfActiveVoteList { + const PALLET: &'static str = "Paras"; + const STORAGE: &'static str = "PvfActiveVoteList"; + type Value = ::std::vec::Vec< + runtime_types::polkadot_parachain::primitives::ValidationCodeHash, + >; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct Parachains; + impl ::subxt::StorageEntry for Parachains { + const PALLET: &'static str = "Paras"; + const STORAGE: &'static str = "Parachains"; + type Value = ::std::vec::Vec; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct ParaLifecycles<'a>( + pub &'a runtime_types::polkadot_parachain::primitives::Id, + ); + impl ::subxt::StorageEntry for ParaLifecycles<'_> { + const PALLET: &'static str = "Paras"; + const STORAGE: &'static str = "ParaLifecycles"; + type Value = runtime_types::polkadot_runtime_parachains::paras::ParaLifecycle; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Twox64Concat, + )]) + } + } + pub struct Heads<'a>(pub &'a runtime_types::polkadot_parachain::primitives::Id); + impl ::subxt::StorageEntry for Heads<'_> { + const PALLET: &'static str = "Paras"; + const STORAGE: &'static str = "Heads"; + type Value = runtime_types::polkadot_parachain::primitives::HeadData; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Twox64Concat, + )]) + } + } + pub struct CurrentCodeHash<'a>( + pub &'a runtime_types::polkadot_parachain::primitives::Id, + ); + impl ::subxt::StorageEntry for CurrentCodeHash<'_> { + const PALLET: &'static str = "Paras"; + const STORAGE: &'static str = "CurrentCodeHash"; + type Value = runtime_types::polkadot_parachain::primitives::ValidationCodeHash; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Twox64Concat, + )]) + } + } + pub struct PastCodeHash<'a>( + pub &'a runtime_types::polkadot_parachain::primitives::Id, + pub &'a ::core::primitive::u32, + ); + impl ::subxt::StorageEntry for PastCodeHash<'_> { + const PALLET: &'static str = "Paras"; + const STORAGE: &'static str = "PastCodeHash"; + type Value = runtime_types::polkadot_parachain::primitives::ValidationCodeHash; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &(&self.0, &self.1), + ::subxt::StorageHasher::Twox64Concat, + )]) + } + } + pub struct PastCodeMeta<'a>(pub &'a runtime_types::polkadot_parachain::primitives::Id); + impl ::subxt::StorageEntry for PastCodeMeta<'_> { + const PALLET: &'static str = "Paras"; + const STORAGE: &'static str = "PastCodeMeta"; + type Value = runtime_types::polkadot_runtime_parachains::paras::ParaPastCodeMeta< + ::core::primitive::u32, + >; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Twox64Concat, + )]) + } + } + pub struct PastCodePruning; + impl ::subxt::StorageEntry for PastCodePruning { + const PALLET: &'static str = "Paras"; + const STORAGE: &'static str = "PastCodePruning"; + type Value = ::std::vec::Vec<( + runtime_types::polkadot_parachain::primitives::Id, + ::core::primitive::u32, + )>; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct FutureCodeUpgrades<'a>( + pub &'a runtime_types::polkadot_parachain::primitives::Id, + ); + impl ::subxt::StorageEntry for FutureCodeUpgrades<'_> { + const PALLET: &'static str = "Paras"; + const STORAGE: &'static str = "FutureCodeUpgrades"; + type Value = ::core::primitive::u32; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Twox64Concat, + )]) + } + } + pub struct FutureCodeHash<'a>( + pub &'a runtime_types::polkadot_parachain::primitives::Id, + ); + impl ::subxt::StorageEntry for FutureCodeHash<'_> { + const PALLET: &'static str = "Paras"; + const STORAGE: &'static str = "FutureCodeHash"; + type Value = runtime_types::polkadot_parachain::primitives::ValidationCodeHash; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Twox64Concat, + )]) + } + } + pub struct UpgradeGoAheadSignal<'a>( + pub &'a runtime_types::polkadot_parachain::primitives::Id, + ); + impl ::subxt::StorageEntry for UpgradeGoAheadSignal<'_> { + const PALLET: &'static str = "Paras"; + const STORAGE: &'static str = "UpgradeGoAheadSignal"; + type Value = runtime_types::polkadot_primitives::v2::UpgradeGoAhead; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Twox64Concat, + )]) + } + } + pub struct UpgradeRestrictionSignal<'a>( + pub &'a runtime_types::polkadot_parachain::primitives::Id, + ); + impl ::subxt::StorageEntry for UpgradeRestrictionSignal<'_> { + const PALLET: &'static str = "Paras"; + const STORAGE: &'static str = "UpgradeRestrictionSignal"; + type Value = runtime_types::polkadot_primitives::v2::UpgradeRestriction; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Twox64Concat, + )]) + } + } + pub struct UpgradeCooldowns; + impl ::subxt::StorageEntry for UpgradeCooldowns { + const PALLET: &'static str = "Paras"; + const STORAGE: &'static str = "UpgradeCooldowns"; + type Value = ::std::vec::Vec<( + runtime_types::polkadot_parachain::primitives::Id, + ::core::primitive::u32, + )>; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct UpcomingUpgrades; + impl ::subxt::StorageEntry for UpcomingUpgrades { + const PALLET: &'static str = "Paras"; + const STORAGE: &'static str = "UpcomingUpgrades"; + type Value = ::std::vec::Vec<( + runtime_types::polkadot_parachain::primitives::Id, + ::core::primitive::u32, + )>; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct ActionsQueue<'a>(pub &'a ::core::primitive::u32); + impl ::subxt::StorageEntry for ActionsQueue<'_> { + const PALLET: &'static str = "Paras"; + const STORAGE: &'static str = "ActionsQueue"; + type Value = ::std::vec::Vec; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Twox64Concat, + )]) + } + } + pub struct UpcomingParasGenesis<'a>( + pub &'a runtime_types::polkadot_parachain::primitives::Id, + ); + impl ::subxt::StorageEntry for UpcomingParasGenesis<'_> { + const PALLET: &'static str = "Paras"; + const STORAGE: &'static str = "UpcomingParasGenesis"; + type Value = runtime_types::polkadot_runtime_parachains::paras::ParaGenesisArgs; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Twox64Concat, + )]) + } + } + pub struct CodeByHashRefs<'a>( + pub &'a runtime_types::polkadot_parachain::primitives::ValidationCodeHash, + ); + impl ::subxt::StorageEntry for CodeByHashRefs<'_> { + const PALLET: &'static str = "Paras"; + const STORAGE: &'static str = "CodeByHashRefs"; + type Value = ::core::primitive::u32; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Identity, + )]) + } + } + pub struct CodeByHash<'a>( + pub &'a runtime_types::polkadot_parachain::primitives::ValidationCodeHash, + ); + impl ::subxt::StorageEntry for CodeByHash<'_> { + const PALLET: &'static str = "Paras"; + const STORAGE: &'static str = "CodeByHash"; + type Value = runtime_types::polkadot_parachain::primitives::ValidationCode; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Identity, + )]) + } + } + pub struct StorageApi<'a, T: ::subxt::Config> { + client: &'a ::subxt::Client, + } + impl<'a, T: ::subxt::Config> StorageApi<'a, T> { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { client } + } + #[doc = " All currently active PVF pre-checking votes."] + #[doc = ""] + #[doc = " Invariant:"] + #[doc = " - There are no PVF pre-checking votes that exists in list but not in the set and vice versa."] + pub async fn pvf_active_vote_map( + &self, + _0: &runtime_types::polkadot_parachain::primitives::ValidationCodeHash, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option< + runtime_types::polkadot_runtime_parachains::paras::PvfCheckActiveVoteState< + ::core::primitive::u32, + >, + >, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 245u8, 158u8, 140u8, 115u8, 132u8, 96u8, 139u8, 158u8, 209u8, 44u8, + 247u8, 149u8, 226u8, 150u8, 181u8, 35u8, 22u8, 89u8, 106u8, 100u8, + 88u8, 72u8, 43u8, 31u8, 91u8, 210u8, 130u8, 38u8, 171u8, 192u8, 173u8, + 15u8, + ] + { + let entry = PvfActiveVoteMap(_0); + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " All currently active PVF pre-checking votes."] + #[doc = ""] + #[doc = " Invariant:"] + #[doc = " - There are no PVF pre-checking votes that exists in list but not in the set and vice versa."] + pub async fn pvf_active_vote_map_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::subxt::KeyIter<'a, T, PvfActiveVoteMap<'a>>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 245u8, 158u8, 140u8, 115u8, 132u8, 96u8, 139u8, 158u8, 209u8, 44u8, + 247u8, 149u8, 226u8, 150u8, 181u8, 35u8, 22u8, 89u8, 106u8, 100u8, + 88u8, 72u8, 43u8, 31u8, 91u8, 210u8, 130u8, 38u8, 171u8, 192u8, 173u8, + 15u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The list of all currently active PVF votes. Auxiliary to `PvfActiveVoteMap`."] + pub async fn pvf_active_vote_list( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::std::vec::Vec< + runtime_types::polkadot_parachain::primitives::ValidationCodeHash, + >, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 154u8, 101u8, 96u8, 29u8, 134u8, 220u8, 111u8, 135u8, 207u8, 255u8, + 121u8, 52u8, 188u8, 108u8, 101u8, 7u8, 138u8, 255u8, 13u8, 58u8, 211u8, + 131u8, 66u8, 126u8, 53u8, 207u8, 119u8, 13u8, 39u8, 177u8, 89u8, 33u8, + ] + { + let entry = PvfActiveVoteList; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " All parachains. Ordered ascending by `ParaId`. Parathreads are not included."] + #[doc = ""] + #[doc = " Consider using the [`ParachainsCache`] type of modifying."] + pub async fn parachains( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::std::vec::Vec, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 174u8, 146u8, 170u8, 102u8, 125u8, 176u8, 74u8, 177u8, 28u8, 54u8, + 13u8, 73u8, 188u8, 248u8, 78u8, 144u8, 88u8, 183u8, 224u8, 69u8, 224u8, + 31u8, 30u8, 115u8, 191u8, 166u8, 252u8, 218u8, 114u8, 241u8, 110u8, + 39u8, + ] + { + let entry = Parachains; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The current lifecycle of a all known Para IDs."] + pub async fn para_lifecycles( + &self, + _0: &runtime_types::polkadot_parachain::primitives::Id, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option< + runtime_types::polkadot_runtime_parachains::paras::ParaLifecycle, + >, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 38u8, 31u8, 0u8, 253u8, 63u8, 27u8, 13u8, 12u8, 247u8, 34u8, 21u8, + 166u8, 166u8, 236u8, 178u8, 217u8, 230u8, 117u8, 215u8, 8u8, 149u8, + 37u8, 231u8, 160u8, 226u8, 89u8, 12u8, 162u8, 197u8, 237u8, 235u8, + 127u8, + ] + { + let entry = ParaLifecycles(_0); + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The current lifecycle of a all known Para IDs."] + pub async fn para_lifecycles_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::subxt::KeyIter<'a, T, ParaLifecycles<'a>>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 38u8, 31u8, 0u8, 253u8, 63u8, 27u8, 13u8, 12u8, 247u8, 34u8, 21u8, + 166u8, 166u8, 236u8, 178u8, 217u8, 230u8, 117u8, 215u8, 8u8, 149u8, + 37u8, 231u8, 160u8, 226u8, 89u8, 12u8, 162u8, 197u8, 237u8, 235u8, + 127u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The head-data of every registered para."] + pub async fn heads( + &self, + _0: &runtime_types::polkadot_parachain::primitives::Id, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 242u8, 145u8, 237u8, 33u8, 204u8, 183u8, 18u8, 135u8, 182u8, 47u8, + 220u8, 187u8, 118u8, 79u8, 163u8, 122u8, 227u8, 215u8, 43u8, 70u8, + 24u8, 33u8, 74u8, 113u8, 67u8, 25u8, 47u8, 210u8, 136u8, 236u8, 83u8, + 148u8, + ] + { + let entry = Heads(_0); + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The head-data of every registered para."] + pub async fn heads_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result<::subxt::KeyIter<'a, T, Heads<'a>>, ::subxt::BasicError> + { + if self.client.metadata().storage_hash::()? + == [ + 242u8, 145u8, 237u8, 33u8, 204u8, 183u8, 18u8, 135u8, 182u8, 47u8, + 220u8, 187u8, 118u8, 79u8, 163u8, 122u8, 227u8, 215u8, 43u8, 70u8, + 24u8, 33u8, 74u8, 113u8, 67u8, 25u8, 47u8, 210u8, 136u8, 236u8, 83u8, + 148u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The validation code hash of every live para."] + #[doc = ""] + #[doc = " Corresponding code can be retrieved with [`CodeByHash`]."] + pub async fn current_code_hash( + &self, + _0: &runtime_types::polkadot_parachain::primitives::Id, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option< + runtime_types::polkadot_parachain::primitives::ValidationCodeHash, + >, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 22u8, 155u8, 81u8, 176u8, 112u8, 20u8, 205u8, 107u8, 87u8, 40u8, 219u8, + 0u8, 112u8, 111u8, 97u8, 196u8, 161u8, 111u8, 207u8, 247u8, 91u8, 47u8, + 163u8, 209u8, 188u8, 144u8, 37u8, 102u8, 240u8, 21u8, 33u8, 77u8, + ] + { + let entry = CurrentCodeHash(_0); + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The validation code hash of every live para."] + #[doc = ""] + #[doc = " Corresponding code can be retrieved with [`CodeByHash`]."] + pub async fn current_code_hash_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::subxt::KeyIter<'a, T, CurrentCodeHash<'a>>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 22u8, 155u8, 81u8, 176u8, 112u8, 20u8, 205u8, 107u8, 87u8, 40u8, 219u8, + 0u8, 112u8, 111u8, 97u8, 196u8, 161u8, 111u8, 207u8, 247u8, 91u8, 47u8, + 163u8, 209u8, 188u8, 144u8, 37u8, 102u8, 240u8, 21u8, 33u8, 77u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Actual past code hash, indicated by the para id as well as the block number at which it"] + #[doc = " became outdated."] + #[doc = ""] + #[doc = " Corresponding code can be retrieved with [`CodeByHash`]."] + pub async fn past_code_hash( + &self, + _0: &runtime_types::polkadot_parachain::primitives::Id, + _1: &::core::primitive::u32, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option< + runtime_types::polkadot_parachain::primitives::ValidationCodeHash, + >, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 26u8, 56u8, 165u8, 239u8, 180u8, 241u8, 183u8, 26u8, 106u8, 71u8, + 213u8, 114u8, 124u8, 190u8, 69u8, 128u8, 159u8, 119u8, 145u8, 47u8, + 93u8, 64u8, 74u8, 76u8, 220u8, 121u8, 144u8, 162u8, 163u8, 149u8, + 132u8, 6u8, + ] + { + let entry = PastCodeHash(_0, _1); + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Actual past code hash, indicated by the para id as well as the block number at which it"] + #[doc = " became outdated."] + #[doc = ""] + #[doc = " Corresponding code can be retrieved with [`CodeByHash`]."] + pub async fn past_code_hash_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::subxt::KeyIter<'a, T, PastCodeHash<'a>>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 26u8, 56u8, 165u8, 239u8, 180u8, 241u8, 183u8, 26u8, 106u8, 71u8, + 213u8, 114u8, 124u8, 190u8, 69u8, 128u8, 159u8, 119u8, 145u8, 47u8, + 93u8, 64u8, 74u8, 76u8, 220u8, 121u8, 144u8, 162u8, 163u8, 149u8, + 132u8, 6u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Past code of parachains. The parachains themselves may not be registered anymore,"] + #[doc = " but we also keep their code on-chain for the same amount of time as outdated code"] + #[doc = " to keep it available for secondary checkers."] + pub async fn past_code_meta( + &self, + _0: &runtime_types::polkadot_parachain::primitives::Id, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + runtime_types::polkadot_runtime_parachains::paras::ParaPastCodeMeta< + ::core::primitive::u32, + >, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 121u8, 14u8, 91u8, 135u8, 231u8, 67u8, 189u8, 66u8, 108u8, 27u8, 241u8, + 117u8, 101u8, 34u8, 24u8, 16u8, 52u8, 198u8, 205u8, 155u8, 138u8, 9u8, + 140u8, 207u8, 27u8, 172u8, 212u8, 217u8, 47u8, 134u8, 122u8, 162u8, + ] + { + let entry = PastCodeMeta(_0); + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Past code of parachains. The parachains themselves may not be registered anymore,"] + #[doc = " but we also keep their code on-chain for the same amount of time as outdated code"] + #[doc = " to keep it available for secondary checkers."] + pub async fn past_code_meta_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::subxt::KeyIter<'a, T, PastCodeMeta<'a>>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 121u8, 14u8, 91u8, 135u8, 231u8, 67u8, 189u8, 66u8, 108u8, 27u8, 241u8, + 117u8, 101u8, 34u8, 24u8, 16u8, 52u8, 198u8, 205u8, 155u8, 138u8, 9u8, + 140u8, 207u8, 27u8, 172u8, 212u8, 217u8, 47u8, 134u8, 122u8, 162u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Which paras have past code that needs pruning and the relay-chain block at which the code was replaced."] + #[doc = " Note that this is the actual height of the included block, not the expected height at which the"] + #[doc = " code upgrade would be applied, although they may be equal."] + #[doc = " This is to ensure the entire acceptance period is covered, not an offset acceptance period starting"] + #[doc = " from the time at which the parachain perceives a code upgrade as having occurred."] + #[doc = " Multiple entries for a single para are permitted. Ordered ascending by block number."] + pub async fn past_code_pruning( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::std::vec::Vec<( + runtime_types::polkadot_parachain::primitives::Id, + ::core::primitive::u32, + )>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 142u8, 32u8, 134u8, 51u8, 34u8, 214u8, 75u8, 69u8, 77u8, 178u8, 103u8, + 117u8, 180u8, 105u8, 249u8, 178u8, 143u8, 25u8, 212u8, 207u8, 28u8, + 28u8, 175u8, 193u8, 43u8, 58u8, 51u8, 149u8, 155u8, 204u8, 37u8, 153u8, + ] + { + let entry = PastCodePruning; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The block number at which the planned code change is expected for a para."] + #[doc = " The change will be applied after the first parablock for this ID included which executes"] + #[doc = " in the context of a relay chain block with a number >= `expected_at`."] + pub async fn future_code_upgrades( + &self, + _0: &runtime_types::polkadot_parachain::primitives::Id, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option<::core::primitive::u32>, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .storage_hash::()? + == [ + 211u8, 254u8, 201u8, 63u8, 89u8, 112u8, 57u8, 82u8, 255u8, 163u8, 49u8, + 246u8, 197u8, 154u8, 55u8, 10u8, 65u8, 188u8, 172u8, 110u8, 194u8, + 155u8, 37u8, 44u8, 250u8, 154u8, 4u8, 184u8, 225u8, 79u8, 248u8, 80u8, + ] + { + let entry = FutureCodeUpgrades(_0); + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The block number at which the planned code change is expected for a para."] + #[doc = " The change will be applied after the first parablock for this ID included which executes"] + #[doc = " in the context of a relay chain block with a number >= `expected_at`."] + pub async fn future_code_upgrades_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::subxt::KeyIter<'a, T, FutureCodeUpgrades<'a>>, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .storage_hash::()? + == [ + 211u8, 254u8, 201u8, 63u8, 89u8, 112u8, 57u8, 82u8, 255u8, 163u8, 49u8, + 246u8, 197u8, 154u8, 55u8, 10u8, 65u8, 188u8, 172u8, 110u8, 194u8, + 155u8, 37u8, 44u8, 250u8, 154u8, 4u8, 184u8, 225u8, 79u8, 248u8, 80u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The actual future code hash of a para."] + #[doc = ""] + #[doc = " Corresponding code can be retrieved with [`CodeByHash`]."] + pub async fn future_code_hash( + &self, + _0: &runtime_types::polkadot_parachain::primitives::Id, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option< + runtime_types::polkadot_parachain::primitives::ValidationCodeHash, + >, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 89u8, 18u8, 35u8, 84u8, 61u8, 191u8, 189u8, 140u8, 223u8, 6u8, 38u8, + 238u8, 22u8, 72u8, 221u8, 168u8, 239u8, 113u8, 33u8, 254u8, 41u8, 96u8, + 102u8, 173u8, 131u8, 111u8, 11u8, 112u8, 65u8, 71u8, 189u8, 121u8, + ] + { + let entry = FutureCodeHash(_0); + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The actual future code hash of a para."] + #[doc = ""] + #[doc = " Corresponding code can be retrieved with [`CodeByHash`]."] + pub async fn future_code_hash_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::subxt::KeyIter<'a, T, FutureCodeHash<'a>>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 89u8, 18u8, 35u8, 84u8, 61u8, 191u8, 189u8, 140u8, 223u8, 6u8, 38u8, + 238u8, 22u8, 72u8, 221u8, 168u8, 239u8, 113u8, 33u8, 254u8, 41u8, 96u8, + 102u8, 173u8, 131u8, 111u8, 11u8, 112u8, 65u8, 71u8, 189u8, 121u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " This is used by the relay-chain to communicate to a parachain a go-ahead with in the upgrade procedure."] + #[doc = ""] + #[doc = " This value is absent when there are no upgrades scheduled or during the time the relay chain"] + #[doc = " performs the checks. It is set at the first relay-chain block when the corresponding parachain"] + #[doc = " can switch its upgrade function. As soon as the parachain's block is included, the value"] + #[doc = " gets reset to `None`."] + #[doc = ""] + #[doc = " NOTE that this field is used by parachains via merkle storage proofs, therefore changing"] + #[doc = " the format will require migration of parachains."] + pub async fn upgrade_go_ahead_signal( + &self, + _0: &runtime_types::polkadot_parachain::primitives::Id, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .storage_hash::()? + == [ + 100u8, 87u8, 135u8, 185u8, 95u8, 13u8, 74u8, 134u8, 19u8, 97u8, 80u8, + 104u8, 177u8, 30u8, 82u8, 145u8, 171u8, 250u8, 99u8, 214u8, 26u8, + 243u8, 118u8, 118u8, 19u8, 188u8, 187u8, 142u8, 138u8, 68u8, 54u8, + 114u8, + ] + { + let entry = UpgradeGoAheadSignal(_0); + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " This is used by the relay-chain to communicate to a parachain a go-ahead with in the upgrade procedure."] + #[doc = ""] + #[doc = " This value is absent when there are no upgrades scheduled or during the time the relay chain"] + #[doc = " performs the checks. It is set at the first relay-chain block when the corresponding parachain"] + #[doc = " can switch its upgrade function. As soon as the parachain's block is included, the value"] + #[doc = " gets reset to `None`."] + #[doc = ""] + #[doc = " NOTE that this field is used by parachains via merkle storage proofs, therefore changing"] + #[doc = " the format will require migration of parachains."] + pub async fn upgrade_go_ahead_signal_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::subxt::KeyIter<'a, T, UpgradeGoAheadSignal<'a>>, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .storage_hash::()? + == [ + 100u8, 87u8, 135u8, 185u8, 95u8, 13u8, 74u8, 134u8, 19u8, 97u8, 80u8, + 104u8, 177u8, 30u8, 82u8, 145u8, 171u8, 250u8, 99u8, 214u8, 26u8, + 243u8, 118u8, 118u8, 19u8, 188u8, 187u8, 142u8, 138u8, 68u8, 54u8, + 114u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " This is used by the relay-chain to communicate that there are restrictions for performing"] + #[doc = " an upgrade for this parachain."] + #[doc = ""] + #[doc = " This may be a because the parachain waits for the upgrade cooldown to expire. Another"] + #[doc = " potential use case is when we want to perform some maintenance (such as storage migration)"] + #[doc = " we could restrict upgrades to make the process simpler."] + #[doc = ""] + #[doc = " NOTE that this field is used by parachains via merkle storage proofs, therefore changing"] + #[doc = " the format will require migration of parachains."] + pub async fn upgrade_restriction_signal( + &self, + _0: &runtime_types::polkadot_parachain::primitives::Id, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option< + runtime_types::polkadot_primitives::v2::UpgradeRestriction, + >, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .storage_hash::()? + == [ + 173u8, 198u8, 89u8, 108u8, 43u8, 93u8, 143u8, 224u8, 141u8, 248u8, + 238u8, 221u8, 237u8, 220u8, 140u8, 24u8, 7u8, 14u8, 136u8, 251u8, + 159u8, 190u8, 70u8, 98u8, 100u8, 118u8, 24u8, 212u8, 82u8, 96u8, 120u8, + 206u8, + ] + { + let entry = UpgradeRestrictionSignal(_0); + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " This is used by the relay-chain to communicate that there are restrictions for performing"] + #[doc = " an upgrade for this parachain."] + #[doc = ""] + #[doc = " This may be a because the parachain waits for the upgrade cooldown to expire. Another"] + #[doc = " potential use case is when we want to perform some maintenance (such as storage migration)"] + #[doc = " we could restrict upgrades to make the process simpler."] + #[doc = ""] + #[doc = " NOTE that this field is used by parachains via merkle storage proofs, therefore changing"] + #[doc = " the format will require migration of parachains."] + pub async fn upgrade_restriction_signal_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::subxt::KeyIter<'a, T, UpgradeRestrictionSignal<'a>>, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .storage_hash::()? + == [ + 173u8, 198u8, 89u8, 108u8, 43u8, 93u8, 143u8, 224u8, 141u8, 248u8, + 238u8, 221u8, 237u8, 220u8, 140u8, 24u8, 7u8, 14u8, 136u8, 251u8, + 159u8, 190u8, 70u8, 98u8, 100u8, 118u8, 24u8, 212u8, 82u8, 96u8, 120u8, + 206u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The list of parachains that are awaiting for their upgrade restriction to cooldown."] + #[doc = ""] + #[doc = " Ordered ascending by block number."] + pub async fn upgrade_cooldowns( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::std::vec::Vec<( + runtime_types::polkadot_parachain::primitives::Id, + ::core::primitive::u32, + )>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 120u8, 214u8, 165u8, 35u8, 125u8, 56u8, 152u8, 76u8, 124u8, 159u8, + 160u8, 93u8, 16u8, 30u8, 208u8, 199u8, 162u8, 74u8, 124u8, 141u8, + 137u8, 237u8, 229u8, 61u8, 62u8, 71u8, 54u8, 92u8, 243u8, 208u8, 114u8, + 19u8, + ] + { + let entry = UpgradeCooldowns; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The list of upcoming code upgrades. Each item is a pair of which para performs a code"] + #[doc = " upgrade and at which relay-chain block it is expected at."] + #[doc = ""] + #[doc = " Ordered ascending by block number."] + pub async fn upcoming_upgrades( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::std::vec::Vec<( + runtime_types::polkadot_parachain::primitives::Id, + ::core::primitive::u32, + )>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 16u8, 74u8, 254u8, 39u8, 241u8, 98u8, 106u8, 203u8, 189u8, 157u8, 66u8, + 99u8, 164u8, 176u8, 20u8, 206u8, 15u8, 212u8, 229u8, 9u8, 117u8, 214u8, + 250u8, 8u8, 51u8, 80u8, 35u8, 236u8, 120u8, 4u8, 246u8, 62u8, + ] + { + let entry = UpcomingUpgrades; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The actions to perform during the start of a specific session index."] + pub async fn actions_queue( + &self, + _0: &::core::primitive::u32, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::std::vec::Vec, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 103u8, 197u8, 76u8, 84u8, 133u8, 3u8, 67u8, 57u8, 107u8, 31u8, 87u8, + 33u8, 196u8, 130u8, 119u8, 93u8, 171u8, 173u8, 76u8, 242u8, 22u8, 15u8, + 133u8, 193u8, 122u8, 0u8, 112u8, 121u8, 233u8, 29u8, 17u8, 185u8, + ] + { + let entry = ActionsQueue(_0); + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The actions to perform during the start of a specific session index."] + pub async fn actions_queue_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::subxt::KeyIter<'a, T, ActionsQueue<'a>>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 103u8, 197u8, 76u8, 84u8, 133u8, 3u8, 67u8, 57u8, 107u8, 31u8, 87u8, + 33u8, 196u8, 130u8, 119u8, 93u8, 171u8, 173u8, 76u8, 242u8, 22u8, 15u8, + 133u8, 193u8, 122u8, 0u8, 112u8, 121u8, 233u8, 29u8, 17u8, 185u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Upcoming paras instantiation arguments."] + #[doc = ""] + #[doc = " NOTE that after PVF pre-checking is enabled the para genesis arg will have it's code set"] + #[doc = " to empty. Instead, the code will be saved into the storage right away via `CodeByHash`."] + pub async fn upcoming_paras_genesis( + &self, + _0: &runtime_types::polkadot_parachain::primitives::Id, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option< + runtime_types::polkadot_runtime_parachains::paras::ParaGenesisArgs, + >, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .storage_hash::()? + == [ + 98u8, 249u8, 92u8, 177u8, 21u8, 84u8, 199u8, 194u8, 150u8, 213u8, + 143u8, 107u8, 99u8, 194u8, 141u8, 225u8, 55u8, 94u8, 44u8, 147u8, + 209u8, 144u8, 118u8, 66u8, 139u8, 170u8, 68u8, 62u8, 45u8, 137u8, 91u8, + 8u8, + ] + { + let entry = UpcomingParasGenesis(_0); + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Upcoming paras instantiation arguments."] + #[doc = ""] + #[doc = " NOTE that after PVF pre-checking is enabled the para genesis arg will have it's code set"] + #[doc = " to empty. Instead, the code will be saved into the storage right away via `CodeByHash`."] + pub async fn upcoming_paras_genesis_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::subxt::KeyIter<'a, T, UpcomingParasGenesis<'a>>, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .storage_hash::()? + == [ + 98u8, 249u8, 92u8, 177u8, 21u8, 84u8, 199u8, 194u8, 150u8, 213u8, + 143u8, 107u8, 99u8, 194u8, 141u8, 225u8, 55u8, 94u8, 44u8, 147u8, + 209u8, 144u8, 118u8, 66u8, 139u8, 170u8, 68u8, 62u8, 45u8, 137u8, 91u8, + 8u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The number of reference on the validation code in [`CodeByHash`] storage."] + pub async fn code_by_hash_refs( + &self, + _0: &runtime_types::polkadot_parachain::primitives::ValidationCodeHash, + block_hash: ::core::option::Option, + ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> + { + if self.client.metadata().storage_hash::()? + == [ + 70u8, 116u8, 27u8, 141u8, 242u8, 54u8, 32u8, 253u8, 176u8, 224u8, + 241u8, 171u8, 22u8, 45u8, 189u8, 95u8, 137u8, 24u8, 211u8, 181u8, + 123u8, 141u8, 200u8, 49u8, 214u8, 177u8, 176u8, 219u8, 178u8, 101u8, + 69u8, 204u8, + ] + { + let entry = CodeByHashRefs(_0); + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The number of reference on the validation code in [`CodeByHash`] storage."] + pub async fn code_by_hash_refs_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::subxt::KeyIter<'a, T, CodeByHashRefs<'a>>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 70u8, 116u8, 27u8, 141u8, 242u8, 54u8, 32u8, 253u8, 176u8, 224u8, + 241u8, 171u8, 22u8, 45u8, 189u8, 95u8, 137u8, 24u8, 211u8, 181u8, + 123u8, 141u8, 200u8, 49u8, 214u8, 177u8, 176u8, 219u8, 178u8, 101u8, + 69u8, 204u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Validation code stored by its hash."] + #[doc = ""] + #[doc = " This storage is consistent with [`FutureCodeHash`], [`CurrentCodeHash`] and"] + #[doc = " [`PastCodeHash`]."] + pub async fn code_by_hash( + &self, + _0: &runtime_types::polkadot_parachain::primitives::ValidationCodeHash, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option< + runtime_types::polkadot_parachain::primitives::ValidationCode, + >, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 173u8, 226u8, 170u8, 98u8, 93u8, 151u8, 151u8, 250u8, 215u8, 64u8, + 137u8, 97u8, 15u8, 200u8, 188u8, 113u8, 192u8, 195u8, 179u8, 229u8, + 141u8, 239u8, 97u8, 95u8, 100u8, 47u8, 202u8, 135u8, 110u8, 225u8, + 243u8, 153u8, + ] + { + let entry = CodeByHash(_0); + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Validation code stored by its hash."] + #[doc = ""] + #[doc = " This storage is consistent with [`FutureCodeHash`], [`CurrentCodeHash`] and"] + #[doc = " [`PastCodeHash`]."] + pub async fn code_by_hash_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::subxt::KeyIter<'a, T, CodeByHash<'a>>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 173u8, 226u8, 170u8, 98u8, 93u8, 151u8, 151u8, 250u8, 215u8, 64u8, + 137u8, 97u8, 15u8, 200u8, 188u8, 113u8, 192u8, 195u8, 179u8, 229u8, + 141u8, 239u8, 97u8, 95u8, 100u8, 47u8, 202u8, 135u8, 110u8, 225u8, + 243u8, 153u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + pub mod constants { + use super::runtime_types; + pub struct ConstantsApi<'a, T: ::subxt::Config> { + client: &'a ::subxt::Client, + } + impl<'a, T: ::subxt::Config> ConstantsApi<'a, T> { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { client } + } + pub fn unsigned_priority( + &self, + ) -> ::core::result::Result<::core::primitive::u64, ::subxt::BasicError> + { + if self + .client + .metadata() + .constant_hash("Paras", "UnsignedPriority")? + == [ + 78u8, 226u8, 84u8, 70u8, 162u8, 23u8, 167u8, 100u8, 156u8, 228u8, + 119u8, 16u8, 28u8, 202u8, 21u8, 71u8, 72u8, 244u8, 3u8, 255u8, 243u8, + 55u8, 109u8, 238u8, 26u8, 180u8, 207u8, 175u8, 221u8, 27u8, 213u8, + 217u8, + ] + { + let pallet = self.client.metadata().pallet("Paras")?; + let constant = pallet.constant("UnsignedPriority")?; + let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; + Ok(value) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + } + pub mod initializer { + use super::root_mod; + use super::runtime_types; + pub mod calls { + use super::root_mod; + use super::runtime_types; + type DispatchError = runtime_types::sp_runtime::DispatchError; + #[derive( + :: subxt :: codec :: CompactAs, + :: subxt :: codec :: Decode, + :: subxt :: codec :: Encode, + Debug, + )] + pub struct ForceApprove { + pub up_to: ::core::primitive::u32, + } + impl ::subxt::Call for ForceApprove { + const PALLET: &'static str = "Initializer"; + const FUNCTION: &'static str = "force_approve"; + } + pub struct TransactionApi<'a, T: ::subxt::Config, X> { + client: &'a ::subxt::Client, + marker: ::core::marker::PhantomData, + } + impl<'a, T, X> TransactionApi<'a, T, X> + where + T: ::subxt::Config, + X: ::subxt::extrinsic::ExtrinsicParams, + { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { + client, + marker: ::core::marker::PhantomData, + } + } + #[doc = "Issue a signal to the consensus engine to forcibly act as though all parachain"] + #[doc = "blocks in all relay chain blocks up to and including the given number in the current"] + #[doc = "chain are valid and should be finalized."] + pub fn force_approve( + &self, + up_to: ::core::primitive::u32, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + ForceApprove, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 61u8, 29u8, 75u8, 222u8, 82u8, 250u8, 124u8, 164u8, 70u8, 114u8, 150u8, + 28u8, 103u8, 53u8, 185u8, 147u8, 168u8, 239u8, 207u8, 197u8, 23u8, + 158u8, 16u8, 255u8, 139u8, 18u8, 214u8, 174u8, 53u8, 191u8, 49u8, 73u8, + ] + { + let call = ForceApprove { up_to }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + pub mod storage { + use super::runtime_types; + pub struct HasInitialized; + impl ::subxt::StorageEntry for HasInitialized { + const PALLET: &'static str = "Initializer"; + const STORAGE: &'static str = "HasInitialized"; + type Value = (); + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct BufferedSessionChanges; + impl ::subxt::StorageEntry for BufferedSessionChanges { + const PALLET: &'static str = "Initializer"; + const STORAGE: &'static str = "BufferedSessionChanges"; + type Value = ::std::vec::Vec< + runtime_types::polkadot_runtime_parachains::initializer::BufferedSessionChange, + >; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct StorageApi<'a, T: ::subxt::Config> { + client: &'a ::subxt::Client, + } + impl<'a, T: ::subxt::Config> StorageApi<'a, T> { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { client } + } + #[doc = " Whether the parachains modules have been initialized within this block."] + #[doc = ""] + #[doc = " Semantically a `bool`, but this guarantees it should never hit the trie,"] + #[doc = " as this is cleared in `on_finalize` and Frame optimizes `None` values to be empty values."] + #[doc = ""] + #[doc = " As a `bool`, `set(false)` and `remove()` both lead to the next `get()` being false, but one of"] + #[doc = " them writes to the trie and one does not. This confusion makes `Option<()>` more suitable for"] + #[doc = " the semantics of this variable."] + pub async fn has_initialized( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result<::core::option::Option<()>, ::subxt::BasicError> + { + if self.client.metadata().storage_hash::()? + == [ + 251u8, 135u8, 247u8, 61u8, 139u8, 102u8, 12u8, 122u8, 227u8, 123u8, + 11u8, 232u8, 120u8, 80u8, 81u8, 48u8, 216u8, 115u8, 159u8, 131u8, + 133u8, 105u8, 200u8, 122u8, 114u8, 6u8, 109u8, 4u8, 164u8, 204u8, + 214u8, 111u8, + ] + { + let entry = HasInitialized; + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Buffered session changes along with the block number at which they should be applied."] + #[doc = ""] + #[doc = " Typically this will be empty or one element long. Apart from that this item never hits"] + #[doc = " the storage."] + #[doc = ""] + #[doc = " However this is a `Vec` regardless to handle various edge cases that may occur at runtime"] + #[doc = " upgrade boundaries or if governance intervenes."] pub async fn buffered_session_changes (& self , block_hash : :: core :: option :: Option < T :: Hash > ,) -> :: core :: result :: Result < :: std :: vec :: Vec < runtime_types :: polkadot_runtime_parachains :: initializer :: BufferedSessionChange > , :: subxt :: BasicError >{ + if self + .client + .metadata() + .storage_hash::()? + == [ + 78u8, 99u8, 243u8, 162u8, 81u8, 154u8, 54u8, 67u8, 201u8, 223u8, 231u8, + 45u8, 78u8, 146u8, 170u8, 176u8, 55u8, 109u8, 35u8, 214u8, 246u8, + 112u8, 26u8, 150u8, 216u8, 140u8, 67u8, 125u8, 112u8, 43u8, 40u8, + 156u8, + ] + { + let entry = BufferedSessionChanges; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + } + pub mod dmp { + use super::root_mod; + use super::runtime_types; + pub mod calls { + use super::root_mod; + use super::runtime_types; + type DispatchError = runtime_types::sp_runtime::DispatchError; + pub struct TransactionApi<'a, T: ::subxt::Config, X> { + client: &'a ::subxt::Client, + marker: ::core::marker::PhantomData, + } + impl<'a, T, X> TransactionApi<'a, T, X> + where + T: ::subxt::Config, + X: ::subxt::extrinsic::ExtrinsicParams, + { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { + client, + marker: ::core::marker::PhantomData, + } + } + } + } + pub mod storage { + use super::runtime_types; + pub struct DownwardMessageQueues<'a>( + pub &'a runtime_types::polkadot_parachain::primitives::Id, + ); + impl ::subxt::StorageEntry for DownwardMessageQueues<'_> { + const PALLET: &'static str = "Dmp"; + const STORAGE: &'static str = "DownwardMessageQueues"; + type Value = ::std::vec::Vec< + runtime_types::polkadot_core_primitives::InboundDownwardMessage< + ::core::primitive::u32, + >, + >; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Twox64Concat, + )]) + } + } + pub struct DownwardMessageQueueHeads<'a>( + pub &'a runtime_types::polkadot_parachain::primitives::Id, + ); + impl ::subxt::StorageEntry for DownwardMessageQueueHeads<'_> { + const PALLET: &'static str = "Dmp"; + const STORAGE: &'static str = "DownwardMessageQueueHeads"; + type Value = ::subxt::sp_core::H256; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Twox64Concat, + )]) + } + } + pub struct StorageApi<'a, T: ::subxt::Config> { + client: &'a ::subxt::Client, + } + impl<'a, T: ::subxt::Config> StorageApi<'a, T> { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { client } + } + #[doc = " The downward messages addressed for a certain para."] + pub async fn downward_message_queues( + &self, + _0: &runtime_types::polkadot_parachain::primitives::Id, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::std::vec::Vec< + runtime_types::polkadot_core_primitives::InboundDownwardMessage< + ::core::primitive::u32, + >, + >, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .storage_hash::()? + == [ + 104u8, 117u8, 177u8, 125u8, 208u8, 212u8, 216u8, 171u8, 212u8, 235u8, + 43u8, 255u8, 146u8, 230u8, 243u8, 27u8, 133u8, 109u8, 129u8, 162u8, + 247u8, 23u8, 195u8, 9u8, 219u8, 235u8, 119u8, 220u8, 179u8, 198u8, + 130u8, 4u8, + ] + { + let entry = DownwardMessageQueues(_0); + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The downward messages addressed for a certain para."] + pub async fn downward_message_queues_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::subxt::KeyIter<'a, T, DownwardMessageQueues<'a>>, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .storage_hash::()? + == [ + 104u8, 117u8, 177u8, 125u8, 208u8, 212u8, 216u8, 171u8, 212u8, 235u8, + 43u8, 255u8, 146u8, 230u8, 243u8, 27u8, 133u8, 109u8, 129u8, 162u8, + 247u8, 23u8, 195u8, 9u8, 219u8, 235u8, 119u8, 220u8, 179u8, 198u8, + 130u8, 4u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " A mapping that stores the downward message queue MQC head for each para."] + #[doc = ""] + #[doc = " Each link in this chain has a form:"] + #[doc = " `(prev_head, B, H(M))`, where"] + #[doc = " - `prev_head`: is the previous head hash or zero if none."] + #[doc = " - `B`: is the relay-chain block number in which a message was appended."] + #[doc = " - `H(M)`: is the hash of the message being appended."] + pub async fn downward_message_queue_heads( + &self, + _0: &runtime_types::polkadot_parachain::primitives::Id, + block_hash: ::core::option::Option, + ) -> ::core::result::Result<::subxt::sp_core::H256, ::subxt::BasicError> + { + if self + .client + .metadata() + .storage_hash::()? + == [ + 47u8, 135u8, 173u8, 197u8, 128u8, 135u8, 195u8, 174u8, 186u8, 62u8, + 136u8, 160u8, 75u8, 221u8, 166u8, 13u8, 82u8, 131u8, 124u8, 197u8, + 88u8, 86u8, 205u8, 48u8, 170u8, 230u8, 71u8, 238u8, 146u8, 189u8, 3u8, + 154u8, + ] + { + let entry = DownwardMessageQueueHeads(_0); + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " A mapping that stores the downward message queue MQC head for each para."] + #[doc = ""] + #[doc = " Each link in this chain has a form:"] + #[doc = " `(prev_head, B, H(M))`, where"] + #[doc = " - `prev_head`: is the previous head hash or zero if none."] + #[doc = " - `B`: is the relay-chain block number in which a message was appended."] + #[doc = " - `H(M)`: is the hash of the message being appended."] + pub async fn downward_message_queue_heads_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::subxt::KeyIter<'a, T, DownwardMessageQueueHeads<'a>>, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .storage_hash::()? + == [ + 47u8, 135u8, 173u8, 197u8, 128u8, 135u8, 195u8, 174u8, 186u8, 62u8, + 136u8, 160u8, 75u8, 221u8, 166u8, 13u8, 82u8, 131u8, 124u8, 197u8, + 88u8, 86u8, 205u8, 48u8, 170u8, 230u8, 71u8, 238u8, 146u8, 189u8, 3u8, + 154u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + } + pub mod ump { + use super::root_mod; + use super::runtime_types; + pub mod calls { + use super::root_mod; + use super::runtime_types; + type DispatchError = runtime_types::sp_runtime::DispatchError; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct ServiceOverweight { + pub index: ::core::primitive::u64, + pub weight_limit: ::core::primitive::u64, + } + impl ::subxt::Call for ServiceOverweight { + const PALLET: &'static str = "Ump"; + const FUNCTION: &'static str = "service_overweight"; + } + pub struct TransactionApi<'a, T: ::subxt::Config, X> { + client: &'a ::subxt::Client, + marker: ::core::marker::PhantomData, + } + impl<'a, T, X> TransactionApi<'a, T, X> + where + T: ::subxt::Config, + X: ::subxt::extrinsic::ExtrinsicParams, + { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { + client, + marker: ::core::marker::PhantomData, + } + } + #[doc = "Service a single overweight upward message."] + #[doc = ""] + #[doc = "- `origin`: Must pass `ExecuteOverweightOrigin`."] + #[doc = "- `index`: The index of the overweight message to service."] + #[doc = "- `weight_limit`: The amount of weight that message execution may take."] + #[doc = ""] + #[doc = "Errors:"] + #[doc = "- `UnknownMessageIndex`: Message of `index` is unknown."] + #[doc = "- `WeightOverLimit`: Message execution may use greater than `weight_limit`."] + #[doc = ""] + #[doc = "Events:"] + #[doc = "- `OverweightServiced`: On success."] + pub fn service_overweight( + &self, + index: ::core::primitive::u64, + weight_limit: ::core::primitive::u64, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + ServiceOverweight, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 229u8, 167u8, 106u8, 63u8, 141u8, 80u8, 8u8, 201u8, 156u8, 34u8, 47u8, + 104u8, 116u8, 57u8, 35u8, 216u8, 132u8, 3u8, 201u8, 169u8, 38u8, 107u8, + 149u8, 120u8, 42u8, 130u8, 100u8, 133u8, 214u8, 48u8, 99u8, 146u8, + ] + { + let call = ServiceOverweight { + index, + weight_limit, + }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + pub type Event = runtime_types::polkadot_runtime_parachains::ump::pallet::Event; + pub mod events { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "Upward message is invalid XCM."] + #[doc = "\\[ id \\]"] + pub struct InvalidFormat(pub [::core::primitive::u8; 32usize]); + impl ::subxt::Event for InvalidFormat { + const PALLET: &'static str = "Ump"; + const EVENT: &'static str = "InvalidFormat"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "Upward message is unsupported version of XCM."] + #[doc = "\\[ id \\]"] + pub struct UnsupportedVersion(pub [::core::primitive::u8; 32usize]); + impl ::subxt::Event for UnsupportedVersion { + const PALLET: &'static str = "Ump"; + const EVENT: &'static str = "UnsupportedVersion"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "Upward message executed with the given outcome."] + #[doc = "\\[ id, outcome \\]"] + pub struct ExecutedUpward( + pub [::core::primitive::u8; 32usize], + pub runtime_types::xcm::v2::traits::Outcome, + ); + impl ::subxt::Event for ExecutedUpward { + const PALLET: &'static str = "Ump"; + const EVENT: &'static str = "ExecutedUpward"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "The weight limit for handling upward messages was reached."] + #[doc = "\\[ id, remaining, required \\]"] + pub struct WeightExhausted( + pub [::core::primitive::u8; 32usize], + pub ::core::primitive::u64, + pub ::core::primitive::u64, + ); + impl ::subxt::Event for WeightExhausted { + const PALLET: &'static str = "Ump"; + const EVENT: &'static str = "WeightExhausted"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "Some upward messages have been received and will be processed."] + #[doc = "\\[ para, count, size \\]"] + pub struct UpwardMessagesReceived( + pub runtime_types::polkadot_parachain::primitives::Id, + pub ::core::primitive::u32, + pub ::core::primitive::u32, + ); + impl ::subxt::Event for UpwardMessagesReceived { + const PALLET: &'static str = "Ump"; + const EVENT: &'static str = "UpwardMessagesReceived"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "The weight budget was exceeded for an individual upward message."] + #[doc = ""] + #[doc = "This message can be later dispatched manually using `service_overweight` dispatchable"] + #[doc = "using the assigned `overweight_index`."] + #[doc = ""] + #[doc = "\\[ para, id, overweight_index, required \\]"] + pub struct OverweightEnqueued( + pub runtime_types::polkadot_parachain::primitives::Id, + pub [::core::primitive::u8; 32usize], + pub ::core::primitive::u64, + pub ::core::primitive::u64, + ); + impl ::subxt::Event for OverweightEnqueued { + const PALLET: &'static str = "Ump"; + const EVENT: &'static str = "OverweightEnqueued"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "Upward message from the overweight queue was executed with the given actual weight"] + #[doc = "used."] + #[doc = ""] + #[doc = "\\[ overweight_index, used \\]"] + pub struct OverweightServiced(pub ::core::primitive::u64, pub ::core::primitive::u64); + impl ::subxt::Event for OverweightServiced { + const PALLET: &'static str = "Ump"; + const EVENT: &'static str = "OverweightServiced"; + } + } + pub mod storage { + use super::runtime_types; + pub struct RelayDispatchQueues<'a>( + pub &'a runtime_types::polkadot_parachain::primitives::Id, + ); + impl ::subxt::StorageEntry for RelayDispatchQueues<'_> { + const PALLET: &'static str = "Ump"; + const STORAGE: &'static str = "RelayDispatchQueues"; + type Value = ::std::vec::Vec<::std::vec::Vec<::core::primitive::u8>>; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Twox64Concat, + )]) + } + } + pub struct RelayDispatchQueueSize<'a>( + pub &'a runtime_types::polkadot_parachain::primitives::Id, + ); + impl ::subxt::StorageEntry for RelayDispatchQueueSize<'_> { + const PALLET: &'static str = "Ump"; + const STORAGE: &'static str = "RelayDispatchQueueSize"; + type Value = (::core::primitive::u32, ::core::primitive::u32); + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Twox64Concat, + )]) + } + } + pub struct NeedsDispatch; + impl ::subxt::StorageEntry for NeedsDispatch { + const PALLET: &'static str = "Ump"; + const STORAGE: &'static str = "NeedsDispatch"; + type Value = ::std::vec::Vec; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct NextDispatchRoundStartWith; + impl ::subxt::StorageEntry for NextDispatchRoundStartWith { + const PALLET: &'static str = "Ump"; + const STORAGE: &'static str = "NextDispatchRoundStartWith"; + type Value = runtime_types::polkadot_parachain::primitives::Id; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct Overweight<'a>(pub &'a ::core::primitive::u64); + impl ::subxt::StorageEntry for Overweight<'_> { + const PALLET: &'static str = "Ump"; + const STORAGE: &'static str = "Overweight"; + type Value = ( + runtime_types::polkadot_parachain::primitives::Id, + ::std::vec::Vec<::core::primitive::u8>, + ); + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Twox64Concat, + )]) + } + } + pub struct OverweightCount; + impl ::subxt::StorageEntry for OverweightCount { + const PALLET: &'static str = "Ump"; + const STORAGE: &'static str = "OverweightCount"; + type Value = ::core::primitive::u64; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct StorageApi<'a, T: ::subxt::Config> { + client: &'a ::subxt::Client, + } + impl<'a, T: ::subxt::Config> StorageApi<'a, T> { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { client } + } + #[doc = " The messages waiting to be handled by the relay-chain originating from a certain parachain."] + #[doc = ""] + #[doc = " Note that some upward messages might have been already processed by the inclusion logic. E.g."] + #[doc = " channel management messages."] + #[doc = ""] + #[doc = " The messages are processed in FIFO order."] + pub async fn relay_dispatch_queues( + &self, + _0: &runtime_types::polkadot_parachain::primitives::Id, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::std::vec::Vec<::std::vec::Vec<::core::primitive::u8>>, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .storage_hash::()? + == [ + 22u8, 48u8, 215u8, 37u8, 42u8, 115u8, 27u8, 8u8, 249u8, 65u8, 47u8, + 61u8, 96u8, 1u8, 196u8, 143u8, 53u8, 7u8, 241u8, 126u8, 4u8, 242u8, + 42u8, 171u8, 66u8, 162u8, 203u8, 200u8, 239u8, 50u8, 87u8, 72u8, + ] + { + let entry = RelayDispatchQueues(_0); + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The messages waiting to be handled by the relay-chain originating from a certain parachain."] + #[doc = ""] + #[doc = " Note that some upward messages might have been already processed by the inclusion logic. E.g."] + #[doc = " channel management messages."] + #[doc = ""] + #[doc = " The messages are processed in FIFO order."] + pub async fn relay_dispatch_queues_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::subxt::KeyIter<'a, T, RelayDispatchQueues<'a>>, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .storage_hash::()? + == [ + 22u8, 48u8, 215u8, 37u8, 42u8, 115u8, 27u8, 8u8, 249u8, 65u8, 47u8, + 61u8, 96u8, 1u8, 196u8, 143u8, 53u8, 7u8, 241u8, 126u8, 4u8, 242u8, + 42u8, 171u8, 66u8, 162u8, 203u8, 200u8, 239u8, 50u8, 87u8, 72u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Size of the dispatch queues. Caches sizes of the queues in `RelayDispatchQueue`."] + #[doc = ""] + #[doc = " First item in the tuple is the count of messages and second"] + #[doc = " is the total length (in bytes) of the message payloads."] + #[doc = ""] + #[doc = " Note that this is an auxiliary mapping: it's possible to tell the byte size and the number of"] + #[doc = " messages only looking at `RelayDispatchQueues`. This mapping is separate to avoid the cost of"] + #[doc = " loading the whole message queue if only the total size and count are required."] + #[doc = ""] + #[doc = " Invariant:"] + #[doc = " - The set of keys should exactly match the set of keys of `RelayDispatchQueues`."] + pub async fn relay_dispatch_queue_size( + &self, + _0: &runtime_types::polkadot_parachain::primitives::Id, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + (::core::primitive::u32, ::core::primitive::u32), + ::subxt::BasicError, + > { + if self + .client + .metadata() + .storage_hash::()? + == [ + 8u8, 0u8, 54u8, 33u8, 185u8, 112u8, 21u8, 174u8, 15u8, 147u8, 134u8, + 184u8, 108u8, 144u8, 55u8, 138u8, 24u8, 66u8, 255u8, 197u8, 131u8, + 229u8, 35u8, 107u8, 251u8, 226u8, 78u8, 218u8, 41u8, 251u8, 155u8, + 79u8, + ] + { + let entry = RelayDispatchQueueSize(_0); + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Size of the dispatch queues. Caches sizes of the queues in `RelayDispatchQueue`."] + #[doc = ""] + #[doc = " First item in the tuple is the count of messages and second"] + #[doc = " is the total length (in bytes) of the message payloads."] + #[doc = ""] + #[doc = " Note that this is an auxiliary mapping: it's possible to tell the byte size and the number of"] + #[doc = " messages only looking at `RelayDispatchQueues`. This mapping is separate to avoid the cost of"] + #[doc = " loading the whole message queue if only the total size and count are required."] + #[doc = ""] + #[doc = " Invariant:"] + #[doc = " - The set of keys should exactly match the set of keys of `RelayDispatchQueues`."] + pub async fn relay_dispatch_queue_size_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::subxt::KeyIter<'a, T, RelayDispatchQueueSize<'a>>, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .storage_hash::()? + == [ + 8u8, 0u8, 54u8, 33u8, 185u8, 112u8, 21u8, 174u8, 15u8, 147u8, 134u8, + 184u8, 108u8, 144u8, 55u8, 138u8, 24u8, 66u8, 255u8, 197u8, 131u8, + 229u8, 35u8, 107u8, 251u8, 226u8, 78u8, 218u8, 41u8, 251u8, 155u8, + 79u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The ordered list of `ParaId`s that have a `RelayDispatchQueue` entry."] + #[doc = ""] + #[doc = " Invariant:"] + #[doc = " - The set of items from this vector should be exactly the set of the keys in"] + #[doc = " `RelayDispatchQueues` and `RelayDispatchQueueSize`."] + pub async fn needs_dispatch( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::std::vec::Vec, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 75u8, 38u8, 232u8, 83u8, 71u8, 101u8, 248u8, 170u8, 5u8, 32u8, 209u8, + 97u8, 190u8, 31u8, 241u8, 1u8, 98u8, 87u8, 64u8, 208u8, 26u8, 100u8, + 93u8, 79u8, 61u8, 114u8, 11u8, 172u8, 112u8, 164u8, 171u8, 237u8, + ] + { + let entry = NeedsDispatch; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " This is the para that gets will get dispatched first during the next upward dispatchable queue"] + #[doc = " execution round."] + #[doc = ""] + #[doc = " Invariant:"] + #[doc = " - If `Some(para)`, then `para` must be present in `NeedsDispatch`."] + pub async fn next_dispatch_round_start_with( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .storage_hash::()? + == [ + 102u8, 165u8, 118u8, 140u8, 84u8, 122u8, 91u8, 169u8, 232u8, 125u8, + 52u8, 228u8, 15u8, 228u8, 91u8, 79u8, 218u8, 62u8, 93u8, 42u8, 204u8, + 6u8, 34u8, 185u8, 218u8, 150u8, 7u8, 250u8, 79u8, 142u8, 211u8, 0u8, + ] + { + let entry = NextDispatchRoundStartWith; + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The messages that exceeded max individual message weight budget."] + #[doc = ""] + #[doc = " These messages stay there until manually dispatched."] + pub async fn overweight( + &self, + _0: &::core::primitive::u64, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option<( + runtime_types::polkadot_parachain::primitives::Id, + ::std::vec::Vec<::core::primitive::u8>, + )>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 223u8, 155u8, 1u8, 100u8, 77u8, 13u8, 92u8, 235u8, 64u8, 30u8, 199u8, + 178u8, 149u8, 66u8, 155u8, 201u8, 84u8, 26u8, 81u8, 183u8, 0u8, 113u8, + 182u8, 37u8, 69u8, 66u8, 240u8, 151u8, 254u8, 249u8, 134u8, 51u8, + ] + { + let entry = Overweight(_0); + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The messages that exceeded max individual message weight budget."] + #[doc = ""] + #[doc = " These messages stay there until manually dispatched."] + pub async fn overweight_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::subxt::KeyIter<'a, T, Overweight<'a>>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 223u8, 155u8, 1u8, 100u8, 77u8, 13u8, 92u8, 235u8, 64u8, 30u8, 199u8, + 178u8, 149u8, 66u8, 155u8, 201u8, 84u8, 26u8, 81u8, 183u8, 0u8, 113u8, + 182u8, 37u8, 69u8, 66u8, 240u8, 151u8, 254u8, 249u8, 134u8, 51u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The number of overweight messages ever recorded in `Overweight` (and thus the lowest free"] + #[doc = " index)."] + pub async fn overweight_count( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result<::core::primitive::u64, ::subxt::BasicError> + { + if self.client.metadata().storage_hash::()? + == [ + 102u8, 180u8, 196u8, 148u8, 115u8, 62u8, 46u8, 238u8, 97u8, 116u8, + 117u8, 42u8, 14u8, 5u8, 72u8, 237u8, 230u8, 46u8, 150u8, 126u8, 89u8, + 64u8, 233u8, 166u8, 180u8, 137u8, 52u8, 233u8, 252u8, 255u8, 36u8, + 20u8, + ] + { + let entry = OverweightCount; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + } + pub mod hrmp { + use super::root_mod; + use super::runtime_types; + pub mod calls { + use super::root_mod; + use super::runtime_types; + type DispatchError = runtime_types::sp_runtime::DispatchError; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct HrmpInitOpenChannel { + pub recipient: runtime_types::polkadot_parachain::primitives::Id, + pub proposed_max_capacity: ::core::primitive::u32, + pub proposed_max_message_size: ::core::primitive::u32, + } + impl ::subxt::Call for HrmpInitOpenChannel { + const PALLET: &'static str = "Hrmp"; + const FUNCTION: &'static str = "hrmp_init_open_channel"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct HrmpAcceptOpenChannel { + pub sender: runtime_types::polkadot_parachain::primitives::Id, + } + impl ::subxt::Call for HrmpAcceptOpenChannel { + const PALLET: &'static str = "Hrmp"; + const FUNCTION: &'static str = "hrmp_accept_open_channel"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct HrmpCloseChannel { + pub channel_id: runtime_types::polkadot_parachain::primitives::HrmpChannelId, + } + impl ::subxt::Call for HrmpCloseChannel { + const PALLET: &'static str = "Hrmp"; + const FUNCTION: &'static str = "hrmp_close_channel"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct ForceCleanHrmp { + pub para: runtime_types::polkadot_parachain::primitives::Id, + pub inbound: ::core::primitive::u32, + pub outbound: ::core::primitive::u32, + } + impl ::subxt::Call for ForceCleanHrmp { + const PALLET: &'static str = "Hrmp"; + const FUNCTION: &'static str = "force_clean_hrmp"; + } + #[derive( + :: subxt :: codec :: CompactAs, + :: subxt :: codec :: Decode, + :: subxt :: codec :: Encode, + Debug, + )] + pub struct ForceProcessHrmpOpen { + pub channels: ::core::primitive::u32, + } + impl ::subxt::Call for ForceProcessHrmpOpen { + const PALLET: &'static str = "Hrmp"; + const FUNCTION: &'static str = "force_process_hrmp_open"; + } + #[derive( + :: subxt :: codec :: CompactAs, + :: subxt :: codec :: Decode, + :: subxt :: codec :: Encode, + Debug, + )] + pub struct ForceProcessHrmpClose { + pub channels: ::core::primitive::u32, + } + impl ::subxt::Call for ForceProcessHrmpClose { + const PALLET: &'static str = "Hrmp"; + const FUNCTION: &'static str = "force_process_hrmp_close"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct HrmpCancelOpenRequest { + pub channel_id: runtime_types::polkadot_parachain::primitives::HrmpChannelId, + pub open_requests: ::core::primitive::u32, + } + impl ::subxt::Call for HrmpCancelOpenRequest { + const PALLET: &'static str = "Hrmp"; + const FUNCTION: &'static str = "hrmp_cancel_open_request"; + } + pub struct TransactionApi<'a, T: ::subxt::Config, X> { + client: &'a ::subxt::Client, + marker: ::core::marker::PhantomData, + } + impl<'a, T, X> TransactionApi<'a, T, X> + where + T: ::subxt::Config, + X: ::subxt::extrinsic::ExtrinsicParams, + { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { + client, + marker: ::core::marker::PhantomData, + } + } + #[doc = "Initiate opening a channel from a parachain to a given recipient with given channel"] + #[doc = "parameters."] + #[doc = ""] + #[doc = "- `proposed_max_capacity` - specifies how many messages can be in the channel at once."] + #[doc = "- `proposed_max_message_size` - specifies the maximum size of the messages."] + #[doc = ""] + #[doc = "These numbers are a subject to the relay-chain configuration limits."] + #[doc = ""] + #[doc = "The channel can be opened only after the recipient confirms it and only on a session"] + #[doc = "change."] + pub fn hrmp_init_open_channel( + &self, + recipient: runtime_types::polkadot_parachain::primitives::Id, + proposed_max_capacity: ::core::primitive::u32, + proposed_max_message_size: ::core::primitive::u32, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + HrmpInitOpenChannel, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 244u8, 142u8, 161u8, 144u8, 109u8, 104u8, 164u8, 198u8, 201u8, 79u8, + 178u8, 136u8, 107u8, 104u8, 83u8, 11u8, 167u8, 164u8, 223u8, 147u8, + 135u8, 35u8, 133u8, 176u8, 236u8, 112u8, 107u8, 131u8, 184u8, 105u8, + 174u8, 12u8, + ] + { + let call = HrmpInitOpenChannel { + recipient, + proposed_max_capacity, + proposed_max_message_size, + }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Accept a pending open channel request from the given sender."] + #[doc = ""] + #[doc = "The channel will be opened only on the next session boundary."] + pub fn hrmp_accept_open_channel( + &self, + sender: runtime_types::polkadot_parachain::primitives::Id, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + HrmpAcceptOpenChannel, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .call_hash::()? + == [ + 95u8, 196u8, 155u8, 220u8, 235u8, 120u8, 67u8, 247u8, 245u8, 20u8, + 162u8, 41u8, 4u8, 204u8, 125u8, 16u8, 224u8, 72u8, 198u8, 237u8, 84u8, + 46u8, 201u8, 17u8, 172u8, 55u8, 115u8, 51u8, 16u8, 140u8, 4u8, 253u8, + ] + { + let call = HrmpAcceptOpenChannel { sender }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Initiate unilateral closing of a channel. The origin must be either the sender or the"] + #[doc = "recipient in the channel being closed."] + #[doc = ""] + #[doc = "The closure can only happen on a session change."] + pub fn hrmp_close_channel( + &self, + channel_id: runtime_types::polkadot_parachain::primitives::HrmpChannelId, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + HrmpCloseChannel, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 199u8, 9u8, 55u8, 184u8, 196u8, 45u8, 46u8, 251u8, 48u8, 23u8, 132u8, + 74u8, 188u8, 121u8, 41u8, 18u8, 71u8, 65u8, 129u8, 14u8, 38u8, 48u8, + 253u8, 119u8, 171u8, 202u8, 9u8, 65u8, 250u8, 98u8, 185u8, 220u8, + ] + { + let call = HrmpCloseChannel { channel_id }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "This extrinsic triggers the cleanup of all the HRMP storage items that"] + #[doc = "a para may have. Normally this happens once per session, but this allows"] + #[doc = "you to trigger the cleanup immediately for a specific parachain."] + #[doc = ""] + #[doc = "Origin must be Root."] + #[doc = ""] + #[doc = "Number of inbound and outbound channels for `para` must be provided as witness data of weighing."] + pub fn force_clean_hrmp( + &self, + para: runtime_types::polkadot_parachain::primitives::Id, + inbound: ::core::primitive::u32, + outbound: ::core::primitive::u32, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + ForceCleanHrmp, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 182u8, 231u8, 99u8, 129u8, 130u8, 109u8, 97u8, 108u8, 37u8, 107u8, + 203u8, 70u8, 133u8, 106u8, 226u8, 77u8, 110u8, 189u8, 227u8, 26u8, + 129u8, 189u8, 234u8, 215u8, 112u8, 22u8, 127u8, 185u8, 152u8, 157u8, + 14u8, 66u8, + ] + { + let call = ForceCleanHrmp { + para, + inbound, + outbound, + }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Force process HRMP open channel requests."] + #[doc = ""] + #[doc = "If there are pending HRMP open channel requests, you can use this"] + #[doc = "function process all of those requests immediately."] + #[doc = ""] + #[doc = "Total number of opening channels must be provided as witness data of weighing."] + pub fn force_process_hrmp_open( + &self, + channels: ::core::primitive::u32, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + ForceProcessHrmpOpen, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 162u8, 53u8, 194u8, 175u8, 117u8, 32u8, 217u8, 177u8, 9u8, 255u8, 88u8, + 40u8, 8u8, 174u8, 8u8, 11u8, 26u8, 82u8, 213u8, 40u8, 20u8, 89u8, + 227u8, 209u8, 95u8, 162u8, 221u8, 97u8, 230u8, 98u8, 110u8, 85u8, + ] + { + let call = ForceProcessHrmpOpen { channels }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Force process HRMP close channel requests."] + #[doc = ""] + #[doc = "If there are pending HRMP close channel requests, you can use this"] + #[doc = "function process all of those requests immediately."] + #[doc = ""] + #[doc = "Total number of closing channels must be provided as witness data of weighing."] + pub fn force_process_hrmp_close( + &self, + channels: ::core::primitive::u32, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + ForceProcessHrmpClose, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .call_hash::()? + == [ + 128u8, 141u8, 191u8, 255u8, 204u8, 137u8, 27u8, 170u8, 180u8, 166u8, + 93u8, 144u8, 70u8, 56u8, 132u8, 100u8, 5u8, 114u8, 252u8, 163u8, 164u8, + 246u8, 234u8, 152u8, 193u8, 79u8, 89u8, 137u8, 46u8, 171u8, 32u8, + 119u8, + ] + { + let call = ForceProcessHrmpClose { channels }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "This cancels a pending open channel request. It can be canceled by either of the sender"] + #[doc = "or the recipient for that request. The origin must be either of those."] + #[doc = ""] + #[doc = "The cancellation happens immediately. It is not possible to cancel the request if it is"] + #[doc = "already accepted."] + #[doc = ""] + #[doc = "Total number of open requests (i.e. `HrmpOpenChannelRequestsList`) must be provided as"] + #[doc = "witness data."] + pub fn hrmp_cancel_open_request( + &self, + channel_id: runtime_types::polkadot_parachain::primitives::HrmpChannelId, + open_requests: ::core::primitive::u32, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + HrmpCancelOpenRequest, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .call_hash::()? + == [ + 8u8, 83u8, 32u8, 187u8, 220u8, 1u8, 212u8, 226u8, 72u8, 61u8, 110u8, + 211u8, 238u8, 119u8, 95u8, 48u8, 150u8, 51u8, 177u8, 182u8, 209u8, + 174u8, 245u8, 25u8, 194u8, 199u8, 212u8, 131u8, 77u8, 72u8, 9u8, 120u8, + ] + { + let call = HrmpCancelOpenRequest { + channel_id, + open_requests, + }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + pub type Event = runtime_types::polkadot_runtime_parachains::hrmp::pallet::Event; + pub mod events { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "Open HRMP channel requested."] + #[doc = "`[sender, recipient, proposed_max_capacity, proposed_max_message_size]`"] + pub struct OpenChannelRequested( + pub runtime_types::polkadot_parachain::primitives::Id, + pub runtime_types::polkadot_parachain::primitives::Id, + pub ::core::primitive::u32, + pub ::core::primitive::u32, + ); + impl ::subxt::Event for OpenChannelRequested { + const PALLET: &'static str = "Hrmp"; + const EVENT: &'static str = "OpenChannelRequested"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "An HRMP channel request sent by the receiver was canceled by either party."] + #[doc = "`[by_parachain, channel_id]`"] + pub struct OpenChannelCanceled( + pub runtime_types::polkadot_parachain::primitives::Id, + pub runtime_types::polkadot_parachain::primitives::HrmpChannelId, + ); + impl ::subxt::Event for OpenChannelCanceled { + const PALLET: &'static str = "Hrmp"; + const EVENT: &'static str = "OpenChannelCanceled"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "Open HRMP channel accepted. `[sender, recipient]`"] + pub struct OpenChannelAccepted( + pub runtime_types::polkadot_parachain::primitives::Id, + pub runtime_types::polkadot_parachain::primitives::Id, + ); + impl ::subxt::Event for OpenChannelAccepted { + const PALLET: &'static str = "Hrmp"; + const EVENT: &'static str = "OpenChannelAccepted"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "HRMP channel closed. `[by_parachain, channel_id]`"] + pub struct ChannelClosed( + pub runtime_types::polkadot_parachain::primitives::Id, + pub runtime_types::polkadot_parachain::primitives::HrmpChannelId, + ); + impl ::subxt::Event for ChannelClosed { + const PALLET: &'static str = "Hrmp"; + const EVENT: &'static str = "ChannelClosed"; + } + } + pub mod storage { + use super::runtime_types; + pub struct HrmpOpenChannelRequests<'a>( + pub &'a runtime_types::polkadot_parachain::primitives::HrmpChannelId, + ); + impl ::subxt::StorageEntry for HrmpOpenChannelRequests<'_> { + const PALLET: &'static str = "Hrmp"; + const STORAGE: &'static str = "HrmpOpenChannelRequests"; + type Value = + runtime_types::polkadot_runtime_parachains::hrmp::HrmpOpenChannelRequest; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Twox64Concat, + )]) + } + } + pub struct HrmpOpenChannelRequestsList; + impl ::subxt::StorageEntry for HrmpOpenChannelRequestsList { + const PALLET: &'static str = "Hrmp"; + const STORAGE: &'static str = "HrmpOpenChannelRequestsList"; + type Value = + ::std::vec::Vec; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct HrmpOpenChannelRequestCount<'a>( + pub &'a runtime_types::polkadot_parachain::primitives::Id, + ); + impl ::subxt::StorageEntry for HrmpOpenChannelRequestCount<'_> { + const PALLET: &'static str = "Hrmp"; + const STORAGE: &'static str = "HrmpOpenChannelRequestCount"; + type Value = ::core::primitive::u32; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Twox64Concat, + )]) + } + } + pub struct HrmpAcceptedChannelRequestCount<'a>( + pub &'a runtime_types::polkadot_parachain::primitives::Id, + ); + impl ::subxt::StorageEntry for HrmpAcceptedChannelRequestCount<'_> { + const PALLET: &'static str = "Hrmp"; + const STORAGE: &'static str = "HrmpAcceptedChannelRequestCount"; + type Value = ::core::primitive::u32; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Twox64Concat, + )]) + } + } + pub struct HrmpCloseChannelRequests<'a>( + pub &'a runtime_types::polkadot_parachain::primitives::HrmpChannelId, + ); + impl ::subxt::StorageEntry for HrmpCloseChannelRequests<'_> { + const PALLET: &'static str = "Hrmp"; + const STORAGE: &'static str = "HrmpCloseChannelRequests"; + type Value = (); + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Twox64Concat, + )]) + } + } + pub struct HrmpCloseChannelRequestsList; + impl ::subxt::StorageEntry for HrmpCloseChannelRequestsList { + const PALLET: &'static str = "Hrmp"; + const STORAGE: &'static str = "HrmpCloseChannelRequestsList"; + type Value = + ::std::vec::Vec; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct HrmpWatermarks<'a>( + pub &'a runtime_types::polkadot_parachain::primitives::Id, + ); + impl ::subxt::StorageEntry for HrmpWatermarks<'_> { + const PALLET: &'static str = "Hrmp"; + const STORAGE: &'static str = "HrmpWatermarks"; + type Value = ::core::primitive::u32; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Twox64Concat, + )]) + } + } + pub struct HrmpChannels<'a>( + pub &'a runtime_types::polkadot_parachain::primitives::HrmpChannelId, + ); + impl ::subxt::StorageEntry for HrmpChannels<'_> { + const PALLET: &'static str = "Hrmp"; + const STORAGE: &'static str = "HrmpChannels"; + type Value = runtime_types::polkadot_runtime_parachains::hrmp::HrmpChannel; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Twox64Concat, + )]) + } + } + pub struct HrmpIngressChannelsIndex<'a>( + pub &'a runtime_types::polkadot_parachain::primitives::Id, + ); + impl ::subxt::StorageEntry for HrmpIngressChannelsIndex<'_> { + const PALLET: &'static str = "Hrmp"; + const STORAGE: &'static str = "HrmpIngressChannelsIndex"; + type Value = ::std::vec::Vec; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Twox64Concat, + )]) + } + } + pub struct HrmpEgressChannelsIndex<'a>( + pub &'a runtime_types::polkadot_parachain::primitives::Id, + ); + impl ::subxt::StorageEntry for HrmpEgressChannelsIndex<'_> { + const PALLET: &'static str = "Hrmp"; + const STORAGE: &'static str = "HrmpEgressChannelsIndex"; + type Value = ::std::vec::Vec; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Twox64Concat, + )]) + } + } + pub struct HrmpChannelContents<'a>( + pub &'a runtime_types::polkadot_parachain::primitives::HrmpChannelId, + ); + impl ::subxt::StorageEntry for HrmpChannelContents<'_> { + const PALLET: &'static str = "Hrmp"; + const STORAGE: &'static str = "HrmpChannelContents"; + type Value = ::std::vec::Vec< + runtime_types::polkadot_core_primitives::InboundHrmpMessage< + ::core::primitive::u32, + >, + >; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Twox64Concat, + )]) + } + } + pub struct HrmpChannelDigests<'a>( + pub &'a runtime_types::polkadot_parachain::primitives::Id, + ); + impl ::subxt::StorageEntry for HrmpChannelDigests<'_> { + const PALLET: &'static str = "Hrmp"; + const STORAGE: &'static str = "HrmpChannelDigests"; + type Value = ::std::vec::Vec<( + ::core::primitive::u32, + ::std::vec::Vec, + )>; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Twox64Concat, + )]) + } + } + pub struct StorageApi<'a, T: ::subxt::Config> { + client: &'a ::subxt::Client, + } + impl<'a, T: ::subxt::Config> StorageApi<'a, T> { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { client } + } + #[doc = " The set of pending HRMP open channel requests."] + #[doc = ""] + #[doc = " The set is accompanied by a list for iteration."] + #[doc = ""] + #[doc = " Invariant:"] + #[doc = " - There are no channels that exists in list but not in the set and vice versa."] + pub async fn hrmp_open_channel_requests( + &self, + _0: &runtime_types::polkadot_parachain::primitives::HrmpChannelId, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option< + runtime_types::polkadot_runtime_parachains::hrmp::HrmpOpenChannelRequest, + >, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .storage_hash::()? + == [ + 58u8, 216u8, 106u8, 4u8, 117u8, 77u8, 168u8, 230u8, 50u8, 6u8, 175u8, + 26u8, 110u8, 45u8, 143u8, 207u8, 174u8, 77u8, 5u8, 245u8, 172u8, 114u8, + 20u8, 229u8, 153u8, 137u8, 220u8, 189u8, 155u8, 5u8, 116u8, 236u8, + ] + { + let entry = HrmpOpenChannelRequests(_0); + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The set of pending HRMP open channel requests."] + #[doc = ""] + #[doc = " The set is accompanied by a list for iteration."] + #[doc = ""] + #[doc = " Invariant:"] + #[doc = " - There are no channels that exists in list but not in the set and vice versa."] + pub async fn hrmp_open_channel_requests_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::subxt::KeyIter<'a, T, HrmpOpenChannelRequests<'a>>, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .storage_hash::()? + == [ + 58u8, 216u8, 106u8, 4u8, 117u8, 77u8, 168u8, 230u8, 50u8, 6u8, 175u8, + 26u8, 110u8, 45u8, 143u8, 207u8, 174u8, 77u8, 5u8, 245u8, 172u8, 114u8, + 20u8, 229u8, 153u8, 137u8, 220u8, 189u8, 155u8, 5u8, 116u8, 236u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + pub async fn hrmp_open_channel_requests_list( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::std::vec::Vec, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .storage_hash::()? + == [ + 176u8, 22u8, 136u8, 206u8, 243u8, 208u8, 67u8, 150u8, 187u8, 163u8, + 141u8, 37u8, 235u8, 84u8, 176u8, 63u8, 55u8, 38u8, 215u8, 185u8, 206u8, + 127u8, 37u8, 108u8, 245u8, 237u8, 154u8, 151u8, 111u8, 33u8, 39u8, + 102u8, + ] + { + let entry = HrmpOpenChannelRequestsList; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " This mapping tracks how many open channel requests are initiated by a given sender para."] + #[doc = " Invariant: `HrmpOpenChannelRequests` should contain the same number of items that has"] + #[doc = " `(X, _)` as the number of `HrmpOpenChannelRequestCount` for `X`."] + pub async fn hrmp_open_channel_request_count( + &self, + _0: &runtime_types::polkadot_parachain::primitives::Id, + block_hash: ::core::option::Option, + ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> + { + if self + .client + .metadata() + .storage_hash::()? + == [ + 103u8, 47u8, 152u8, 1u8, 119u8, 244u8, 62u8, 249u8, 141u8, 194u8, + 157u8, 149u8, 58u8, 208u8, 113u8, 77u8, 4u8, 248u8, 114u8, 94u8, 153u8, + 20u8, 179u8, 4u8, 43u8, 32u8, 248u8, 118u8, 115u8, 206u8, 228u8, 28u8, + ] + { + let entry = HrmpOpenChannelRequestCount(_0); + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " This mapping tracks how many open channel requests are initiated by a given sender para."] + #[doc = " Invariant: `HrmpOpenChannelRequests` should contain the same number of items that has"] + #[doc = " `(X, _)` as the number of `HrmpOpenChannelRequestCount` for `X`."] + pub async fn hrmp_open_channel_request_count_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::subxt::KeyIter<'a, T, HrmpOpenChannelRequestCount<'a>>, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .storage_hash::()? + == [ + 103u8, 47u8, 152u8, 1u8, 119u8, 244u8, 62u8, 249u8, 141u8, 194u8, + 157u8, 149u8, 58u8, 208u8, 113u8, 77u8, 4u8, 248u8, 114u8, 94u8, 153u8, + 20u8, 179u8, 4u8, 43u8, 32u8, 248u8, 118u8, 115u8, 206u8, 228u8, 28u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " This mapping tracks how many open channel requests were accepted by a given recipient para."] + #[doc = " Invariant: `HrmpOpenChannelRequests` should contain the same number of items `(_, X)` with"] + #[doc = " `confirmed` set to true, as the number of `HrmpAcceptedChannelRequestCount` for `X`."] + pub async fn hrmp_accepted_channel_request_count( + &self, + _0: &runtime_types::polkadot_parachain::primitives::Id, + block_hash: ::core::option::Option, + ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> + { + if self + .client + .metadata() + .storage_hash::()? + == [ + 166u8, 207u8, 97u8, 222u8, 30u8, 204u8, 203u8, 122u8, 72u8, 66u8, + 247u8, 169u8, 128u8, 122u8, 145u8, 124u8, 214u8, 183u8, 251u8, 85u8, + 93u8, 37u8, 143u8, 71u8, 45u8, 61u8, 168u8, 211u8, 222u8, 58u8, 91u8, + 202u8, + ] + { + let entry = HrmpAcceptedChannelRequestCount(_0); + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " This mapping tracks how many open channel requests were accepted by a given recipient para."] + #[doc = " Invariant: `HrmpOpenChannelRequests` should contain the same number of items `(_, X)` with"] + #[doc = " `confirmed` set to true, as the number of `HrmpAcceptedChannelRequestCount` for `X`."] + pub async fn hrmp_accepted_channel_request_count_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::subxt::KeyIter<'a, T, HrmpAcceptedChannelRequestCount<'a>>, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .storage_hash::()? + == [ + 166u8, 207u8, 97u8, 222u8, 30u8, 204u8, 203u8, 122u8, 72u8, 66u8, + 247u8, 169u8, 128u8, 122u8, 145u8, 124u8, 214u8, 183u8, 251u8, 85u8, + 93u8, 37u8, 143u8, 71u8, 45u8, 61u8, 168u8, 211u8, 222u8, 58u8, 91u8, + 202u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " A set of pending HRMP close channel requests that are going to be closed during the session"] + #[doc = " change. Used for checking if a given channel is registered for closure."] + #[doc = ""] + #[doc = " The set is accompanied by a list for iteration."] + #[doc = ""] + #[doc = " Invariant:"] + #[doc = " - There are no channels that exists in list but not in the set and vice versa."] + pub async fn hrmp_close_channel_requests( + &self, + _0: &runtime_types::polkadot_parachain::primitives::HrmpChannelId, + block_hash: ::core::option::Option, + ) -> ::core::result::Result<::core::option::Option<()>, ::subxt::BasicError> + { + if self + .client + .metadata() + .storage_hash::()? + == [ + 118u8, 8u8, 142u8, 158u8, 184u8, 200u8, 38u8, 112u8, 217u8, 69u8, + 161u8, 255u8, 116u8, 143u8, 94u8, 185u8, 95u8, 247u8, 227u8, 101u8, + 107u8, 55u8, 172u8, 164u8, 58u8, 182u8, 193u8, 140u8, 142u8, 118u8, + 223u8, 240u8, + ] + { + let entry = HrmpCloseChannelRequests(_0); + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " A set of pending HRMP close channel requests that are going to be closed during the session"] + #[doc = " change. Used for checking if a given channel is registered for closure."] + #[doc = ""] + #[doc = " The set is accompanied by a list for iteration."] + #[doc = ""] + #[doc = " Invariant:"] + #[doc = " - There are no channels that exists in list but not in the set and vice versa."] + pub async fn hrmp_close_channel_requests_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::subxt::KeyIter<'a, T, HrmpCloseChannelRequests<'a>>, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .storage_hash::()? + == [ + 118u8, 8u8, 142u8, 158u8, 184u8, 200u8, 38u8, 112u8, 217u8, 69u8, + 161u8, 255u8, 116u8, 143u8, 94u8, 185u8, 95u8, 247u8, 227u8, 101u8, + 107u8, 55u8, 172u8, 164u8, 58u8, 182u8, 193u8, 140u8, 142u8, 118u8, + 223u8, 240u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + pub async fn hrmp_close_channel_requests_list( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::std::vec::Vec, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .storage_hash::()? + == [ + 203u8, 46u8, 200u8, 63u8, 120u8, 238u8, 88u8, 170u8, 239u8, 27u8, 99u8, + 104u8, 254u8, 194u8, 152u8, 221u8, 126u8, 188u8, 2u8, 153u8, 79u8, + 183u8, 236u8, 145u8, 120u8, 151u8, 235u8, 56u8, 130u8, 240u8, 74u8, + 211u8, + ] + { + let entry = HrmpCloseChannelRequestsList; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The HRMP watermark associated with each para."] + #[doc = " Invariant:"] + #[doc = " - each para `P` used here as a key should satisfy `Paras::is_valid_para(P)` within a session."] + pub async fn hrmp_watermarks( + &self, + _0: &runtime_types::polkadot_parachain::primitives::Id, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option<::core::primitive::u32>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 28u8, 187u8, 5u8, 0u8, 130u8, 11u8, 241u8, 171u8, 141u8, 109u8, 236u8, + 151u8, 194u8, 124u8, 172u8, 180u8, 36u8, 144u8, 134u8, 53u8, 162u8, + 247u8, 138u8, 209u8, 99u8, 194u8, 213u8, 100u8, 254u8, 15u8, 51u8, + 94u8, + ] + { + let entry = HrmpWatermarks(_0); + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The HRMP watermark associated with each para."] + #[doc = " Invariant:"] + #[doc = " - each para `P` used here as a key should satisfy `Paras::is_valid_para(P)` within a session."] + pub async fn hrmp_watermarks_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::subxt::KeyIter<'a, T, HrmpWatermarks<'a>>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 28u8, 187u8, 5u8, 0u8, 130u8, 11u8, 241u8, 171u8, 141u8, 109u8, 236u8, + 151u8, 194u8, 124u8, 172u8, 180u8, 36u8, 144u8, 134u8, 53u8, 162u8, + 247u8, 138u8, 209u8, 99u8, 194u8, 213u8, 100u8, 254u8, 15u8, 51u8, + 94u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " HRMP channel data associated with each para."] + #[doc = " Invariant:"] + #[doc = " - each participant in the channel should satisfy `Paras::is_valid_para(P)` within a session."] + pub async fn hrmp_channels( + &self, + _0: &runtime_types::polkadot_parachain::primitives::HrmpChannelId, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option< + runtime_types::polkadot_runtime_parachains::hrmp::HrmpChannel, + >, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 146u8, 253u8, 102u8, 91u8, 69u8, 206u8, 61u8, 201u8, 63u8, 22u8, 119u8, + 249u8, 119u8, 232u8, 154u8, 132u8, 123u8, 244u8, 12u8, 61u8, 95u8, + 138u8, 104u8, 112u8, 157u8, 31u8, 39u8, 126u8, 184u8, 15u8, 33u8, + 171u8, + ] + { + let entry = HrmpChannels(_0); + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " HRMP channel data associated with each para."] + #[doc = " Invariant:"] + #[doc = " - each participant in the channel should satisfy `Paras::is_valid_para(P)` within a session."] + pub async fn hrmp_channels_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::subxt::KeyIter<'a, T, HrmpChannels<'a>>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 146u8, 253u8, 102u8, 91u8, 69u8, 206u8, 61u8, 201u8, 63u8, 22u8, 119u8, + 249u8, 119u8, 232u8, 154u8, 132u8, 123u8, 244u8, 12u8, 61u8, 95u8, + 138u8, 104u8, 112u8, 157u8, 31u8, 39u8, 126u8, 184u8, 15u8, 33u8, + 171u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Ingress/egress indexes allow to find all the senders and receivers given the opposite side."] + #[doc = " I.e."] + #[doc = ""] + #[doc = " (a) ingress index allows to find all the senders for a given recipient."] + #[doc = " (b) egress index allows to find all the recipients for a given sender."] + #[doc = ""] + #[doc = " Invariants:"] + #[doc = " - for each ingress index entry for `P` each item `I` in the index should present in"] + #[doc = " `HrmpChannels` as `(I, P)`."] + #[doc = " - for each egress index entry for `P` each item `E` in the index should present in"] + #[doc = " `HrmpChannels` as `(P, E)`."] + #[doc = " - there should be no other dangling channels in `HrmpChannels`."] + #[doc = " - the vectors are sorted."] + pub async fn hrmp_ingress_channels_index( + &self, + _0: &runtime_types::polkadot_parachain::primitives::Id, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::std::vec::Vec, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .storage_hash::()? + == [ + 193u8, 185u8, 164u8, 194u8, 89u8, 218u8, 214u8, 184u8, 100u8, 238u8, + 232u8, 90u8, 243u8, 230u8, 93u8, 191u8, 197u8, 182u8, 215u8, 254u8, + 192u8, 11u8, 171u8, 211u8, 150u8, 210u8, 75u8, 216u8, 149u8, 60u8, + 49u8, 166u8, + ] + { + let entry = HrmpIngressChannelsIndex(_0); + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Ingress/egress indexes allow to find all the senders and receivers given the opposite side."] + #[doc = " I.e."] + #[doc = ""] + #[doc = " (a) ingress index allows to find all the senders for a given recipient."] + #[doc = " (b) egress index allows to find all the recipients for a given sender."] + #[doc = ""] + #[doc = " Invariants:"] + #[doc = " - for each ingress index entry for `P` each item `I` in the index should present in"] + #[doc = " `HrmpChannels` as `(I, P)`."] + #[doc = " - for each egress index entry for `P` each item `E` in the index should present in"] + #[doc = " `HrmpChannels` as `(P, E)`."] + #[doc = " - there should be no other dangling channels in `HrmpChannels`."] + #[doc = " - the vectors are sorted."] + pub async fn hrmp_ingress_channels_index_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::subxt::KeyIter<'a, T, HrmpIngressChannelsIndex<'a>>, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .storage_hash::()? + == [ + 193u8, 185u8, 164u8, 194u8, 89u8, 218u8, 214u8, 184u8, 100u8, 238u8, + 232u8, 90u8, 243u8, 230u8, 93u8, 191u8, 197u8, 182u8, 215u8, 254u8, + 192u8, 11u8, 171u8, 211u8, 150u8, 210u8, 75u8, 216u8, 149u8, 60u8, + 49u8, 166u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + pub async fn hrmp_egress_channels_index( + &self, + _0: &runtime_types::polkadot_parachain::primitives::Id, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::std::vec::Vec, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .storage_hash::()? + == [ + 242u8, 138u8, 89u8, 201u8, 60u8, 216u8, 73u8, 66u8, 167u8, 82u8, 225u8, + 42u8, 61u8, 50u8, 54u8, 187u8, 212u8, 8u8, 255u8, 183u8, 85u8, 180u8, + 176u8, 0u8, 226u8, 173u8, 45u8, 155u8, 172u8, 28u8, 229u8, 157u8, + ] + { + let entry = HrmpEgressChannelsIndex(_0); + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + pub async fn hrmp_egress_channels_index_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::subxt::KeyIter<'a, T, HrmpEgressChannelsIndex<'a>>, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .storage_hash::()? + == [ + 242u8, 138u8, 89u8, 201u8, 60u8, 216u8, 73u8, 66u8, 167u8, 82u8, 225u8, + 42u8, 61u8, 50u8, 54u8, 187u8, 212u8, 8u8, 255u8, 183u8, 85u8, 180u8, + 176u8, 0u8, 226u8, 173u8, 45u8, 155u8, 172u8, 28u8, 229u8, 157u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Storage for the messages for each channel."] + #[doc = " Invariant: cannot be non-empty if the corresponding channel in `HrmpChannels` is `None`."] + pub async fn hrmp_channel_contents( + &self, + _0: &runtime_types::polkadot_parachain::primitives::HrmpChannelId, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::std::vec::Vec< + runtime_types::polkadot_core_primitives::InboundHrmpMessage< + ::core::primitive::u32, + >, + >, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .storage_hash::()? + == [ + 71u8, 246u8, 41u8, 12u8, 125u8, 10u8, 60u8, 209u8, 14u8, 254u8, 125u8, + 217u8, 251u8, 172u8, 243u8, 73u8, 33u8, 230u8, 242u8, 16u8, 207u8, + 165u8, 33u8, 136u8, 78u8, 83u8, 206u8, 134u8, 65u8, 115u8, 166u8, + 192u8, + ] + { + let entry = HrmpChannelContents(_0); + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Storage for the messages for each channel."] + #[doc = " Invariant: cannot be non-empty if the corresponding channel in `HrmpChannels` is `None`."] + pub async fn hrmp_channel_contents_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::subxt::KeyIter<'a, T, HrmpChannelContents<'a>>, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .storage_hash::()? + == [ + 71u8, 246u8, 41u8, 12u8, 125u8, 10u8, 60u8, 209u8, 14u8, 254u8, 125u8, + 217u8, 251u8, 172u8, 243u8, 73u8, 33u8, 230u8, 242u8, 16u8, 207u8, + 165u8, 33u8, 136u8, 78u8, 83u8, 206u8, 134u8, 65u8, 115u8, 166u8, + 192u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Maintains a mapping that can be used to answer the question: What paras sent a message at"] + #[doc = " the given block number for a given receiver. Invariants:"] + #[doc = " - The inner `Vec` is never empty."] + #[doc = " - The inner `Vec` cannot store two same `ParaId`."] + #[doc = " - The outer vector is sorted ascending by block number and cannot store two items with the"] + #[doc = " same block number."] + pub async fn hrmp_channel_digests( + &self, + _0: &runtime_types::polkadot_parachain::primitives::Id, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::std::vec::Vec<( + ::core::primitive::u32, + ::std::vec::Vec, + )>, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .storage_hash::()? + == [ + 54u8, 106u8, 76u8, 21u8, 18u8, 49u8, 1u8, 34u8, 247u8, 101u8, 150u8, + 142u8, 214u8, 137u8, 193u8, 100u8, 208u8, 162u8, 55u8, 229u8, 203u8, + 36u8, 154u8, 138u8, 48u8, 204u8, 114u8, 243u8, 54u8, 185u8, 27u8, + 173u8, + ] + { + let entry = HrmpChannelDigests(_0); + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Maintains a mapping that can be used to answer the question: What paras sent a message at"] + #[doc = " the given block number for a given receiver. Invariants:"] + #[doc = " - The inner `Vec` is never empty."] + #[doc = " - The inner `Vec` cannot store two same `ParaId`."] + #[doc = " - The outer vector is sorted ascending by block number and cannot store two items with the"] + #[doc = " same block number."] + pub async fn hrmp_channel_digests_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::subxt::KeyIter<'a, T, HrmpChannelDigests<'a>>, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .storage_hash::()? + == [ + 54u8, 106u8, 76u8, 21u8, 18u8, 49u8, 1u8, 34u8, 247u8, 101u8, 150u8, + 142u8, 214u8, 137u8, 193u8, 100u8, 208u8, 162u8, 55u8, 229u8, 203u8, + 36u8, 154u8, 138u8, 48u8, 204u8, 114u8, 243u8, 54u8, 185u8, 27u8, + 173u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + } + pub mod para_session_info { + use super::root_mod; + use super::runtime_types; + pub mod storage { + use super::runtime_types; + pub struct AssignmentKeysUnsafe; + impl ::subxt::StorageEntry for AssignmentKeysUnsafe { + const PALLET: &'static str = "ParaSessionInfo"; + const STORAGE: &'static str = "AssignmentKeysUnsafe"; + type Value = + ::std::vec::Vec; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct EarliestStoredSession; + impl ::subxt::StorageEntry for EarliestStoredSession { + const PALLET: &'static str = "ParaSessionInfo"; + const STORAGE: &'static str = "EarliestStoredSession"; + type Value = ::core::primitive::u32; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct Sessions<'a>(pub &'a ::core::primitive::u32); + impl ::subxt::StorageEntry for Sessions<'_> { + const PALLET: &'static str = "ParaSessionInfo"; + const STORAGE: &'static str = "Sessions"; + type Value = runtime_types::polkadot_primitives::v2::SessionInfo; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Identity, + )]) + } + } + pub struct StorageApi<'a, T: ::subxt::Config> { + client: &'a ::subxt::Client, + } + impl<'a, T: ::subxt::Config> StorageApi<'a, T> { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { client } + } + #[doc = " Assignment keys for the current session."] + #[doc = " Note that this API is private due to it being prone to 'off-by-one' at session boundaries."] + #[doc = " When in doubt, use `Sessions` API instead."] + pub async fn assignment_keys_unsafe( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::std::vec::Vec, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .storage_hash::()? + == [ + 150u8, 56u8, 43u8, 74u8, 246u8, 13u8, 148u8, 80u8, 105u8, 17u8, 36u8, + 246u8, 229u8, 105u8, 156u8, 206u8, 206u8, 77u8, 240u8, 24u8, 127u8, + 200u8, 14u8, 144u8, 246u8, 88u8, 173u8, 111u8, 176u8, 208u8, 31u8, + 248u8, + ] + { + let entry = AssignmentKeysUnsafe; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The earliest session for which previous session info is stored."] + pub async fn earliest_stored_session( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> + { + if self + .client + .metadata() + .storage_hash::()? + == [ + 25u8, 143u8, 246u8, 184u8, 35u8, 166u8, 140u8, 147u8, 171u8, 5u8, + 164u8, 159u8, 228u8, 21u8, 248u8, 236u8, 48u8, 210u8, 133u8, 140u8, + 171u8, 3u8, 85u8, 250u8, 160u8, 102u8, 95u8, 46u8, 33u8, 81u8, 102u8, + 241u8, + ] + { + let entry = EarliestStoredSession; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Session information in a rolling window."] + #[doc = " Should have an entry in range `EarliestStoredSession..=CurrentSessionIndex`."] + #[doc = " Does not have any entries before the session index in the first session change notification."] + pub async fn sessions( + &self, + _0: &::core::primitive::u32, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 163u8, 206u8, 134u8, 169u8, 87u8, 219u8, 254u8, 50u8, 16u8, 32u8, + 247u8, 205u8, 100u8, 140u8, 177u8, 89u8, 128u8, 178u8, 126u8, 175u8, + 198u8, 39u8, 251u8, 145u8, 92u8, 90u8, 10u8, 27u8, 62u8, 95u8, 128u8, + 168u8, + ] + { + let entry = Sessions(_0); + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Session information in a rolling window."] + #[doc = " Should have an entry in range `EarliestStoredSession..=CurrentSessionIndex`."] + #[doc = " Does not have any entries before the session index in the first session change notification."] + pub async fn sessions_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::subxt::KeyIter<'a, T, Sessions<'a>>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 163u8, 206u8, 134u8, 169u8, 87u8, 219u8, 254u8, 50u8, 16u8, 32u8, + 247u8, 205u8, 100u8, 140u8, 177u8, 89u8, 128u8, 178u8, 126u8, 175u8, + 198u8, 39u8, 251u8, 145u8, 92u8, 90u8, 10u8, 27u8, 62u8, 95u8, 128u8, + 168u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + } + pub mod paras_disputes { + use super::root_mod; + use super::runtime_types; + pub mod calls { + use super::root_mod; + use super::runtime_types; + type DispatchError = runtime_types::sp_runtime::DispatchError; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct ForceUnfreeze; + impl ::subxt::Call for ForceUnfreeze { + const PALLET: &'static str = "ParasDisputes"; + const FUNCTION: &'static str = "force_unfreeze"; + } + pub struct TransactionApi<'a, T: ::subxt::Config, X> { + client: &'a ::subxt::Client, + marker: ::core::marker::PhantomData, + } + impl<'a, T, X> TransactionApi<'a, T, X> + where + T: ::subxt::Config, + X: ::subxt::extrinsic::ExtrinsicParams, + { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { + client, + marker: ::core::marker::PhantomData, + } + } + pub fn force_unfreeze( + &self, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + ForceUnfreeze, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 212u8, 211u8, 58u8, 159u8, 23u8, 220u8, 64u8, 175u8, 65u8, 50u8, 192u8, + 122u8, 113u8, 189u8, 74u8, 191u8, 48u8, 93u8, 251u8, 50u8, 237u8, + 240u8, 91u8, 139u8, 193u8, 114u8, 131u8, 125u8, 124u8, 236u8, 191u8, + 190u8, + ] + { + let call = ForceUnfreeze {}; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + pub type Event = runtime_types::polkadot_runtime_parachains::disputes::pallet::Event; + pub mod events { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "A dispute has been initiated. \\[candidate hash, dispute location\\]"] + pub struct DisputeInitiated( + pub runtime_types::polkadot_core_primitives::CandidateHash, + pub runtime_types::polkadot_runtime_parachains::disputes::DisputeLocation, + ); + impl ::subxt::Event for DisputeInitiated { + const PALLET: &'static str = "ParasDisputes"; + const EVENT: &'static str = "DisputeInitiated"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "A dispute has concluded for or against a candidate."] + #[doc = "`\\[para id, candidate hash, dispute result\\]`"] + pub struct DisputeConcluded( + pub runtime_types::polkadot_core_primitives::CandidateHash, + pub runtime_types::polkadot_runtime_parachains::disputes::DisputeResult, + ); + impl ::subxt::Event for DisputeConcluded { + const PALLET: &'static str = "ParasDisputes"; + const EVENT: &'static str = "DisputeConcluded"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "A dispute has timed out due to insufficient participation."] + #[doc = "`\\[para id, candidate hash\\]`"] + pub struct DisputeTimedOut(pub runtime_types::polkadot_core_primitives::CandidateHash); + impl ::subxt::Event for DisputeTimedOut { + const PALLET: &'static str = "ParasDisputes"; + const EVENT: &'static str = "DisputeTimedOut"; + } + #[derive( + :: subxt :: codec :: CompactAs, + :: subxt :: codec :: Decode, + :: subxt :: codec :: Encode, + Debug, + )] + #[doc = "A dispute has concluded with supermajority against a candidate."] + #[doc = "Block authors should no longer build on top of this head and should"] + #[doc = "instead revert the block at the given height. This should be the"] + #[doc = "number of the child of the last known valid block in the chain."] + pub struct Revert(pub ::core::primitive::u32); + impl ::subxt::Event for Revert { + const PALLET: &'static str = "ParasDisputes"; + const EVENT: &'static str = "Revert"; + } + } + pub mod storage { + use super::runtime_types; + pub struct LastPrunedSession; + impl ::subxt::StorageEntry for LastPrunedSession { + const PALLET: &'static str = "ParasDisputes"; + const STORAGE: &'static str = "LastPrunedSession"; + type Value = ::core::primitive::u32; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct Disputes<'a>( + pub &'a ::core::primitive::u32, + pub &'a runtime_types::polkadot_core_primitives::CandidateHash, + ); + impl ::subxt::StorageEntry for Disputes<'_> { + const PALLET: &'static str = "ParasDisputes"; + const STORAGE: &'static str = "Disputes"; + type Value = + runtime_types::polkadot_primitives::v2::DisputeState<::core::primitive::u32>; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![ + ::subxt::StorageMapKey::new(&self.0, ::subxt::StorageHasher::Twox64Concat), + ::subxt::StorageMapKey::new( + &self.1, + ::subxt::StorageHasher::Blake2_128Concat, + ), + ]) + } + } + pub struct Included<'a>( + pub &'a ::core::primitive::u32, + pub &'a runtime_types::polkadot_core_primitives::CandidateHash, + ); + impl ::subxt::StorageEntry for Included<'_> { + const PALLET: &'static str = "ParasDisputes"; + const STORAGE: &'static str = "Included"; + type Value = ::core::primitive::u32; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![ + ::subxt::StorageMapKey::new(&self.0, ::subxt::StorageHasher::Twox64Concat), + ::subxt::StorageMapKey::new( + &self.1, + ::subxt::StorageHasher::Blake2_128Concat, + ), + ]) + } + } + pub struct SpamSlots<'a>(pub &'a ::core::primitive::u32); + impl ::subxt::StorageEntry for SpamSlots<'_> { + const PALLET: &'static str = "ParasDisputes"; + const STORAGE: &'static str = "SpamSlots"; + type Value = ::std::vec::Vec<::core::primitive::u32>; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Twox64Concat, + )]) + } + } + pub struct Frozen; + impl ::subxt::StorageEntry for Frozen { + const PALLET: &'static str = "ParasDisputes"; + const STORAGE: &'static str = "Frozen"; + type Value = ::core::option::Option<::core::primitive::u32>; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct StorageApi<'a, T: ::subxt::Config> { + client: &'a ::subxt::Client, + } + impl<'a, T: ::subxt::Config> StorageApi<'a, T> { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { client } + } + #[doc = " The last pruned session, if any. All data stored by this module"] + #[doc = " references sessions."] + pub async fn last_pruned_session( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option<::core::primitive::u32>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 125u8, 138u8, 99u8, 242u8, 9u8, 246u8, 215u8, 246u8, 141u8, 6u8, 129u8, + 87u8, 27u8, 58u8, 53u8, 121u8, 61u8, 119u8, 35u8, 104u8, 33u8, 43u8, + 179u8, 82u8, 244u8, 121u8, 174u8, 135u8, 87u8, 119u8, 236u8, 105u8, + ] + { + let entry = LastPrunedSession; + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " All ongoing or concluded disputes for the last several sessions."] + pub async fn disputes( + &self, + _0: &::core::primitive::u32, + _1: &runtime_types::polkadot_core_primitives::CandidateHash, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option< + runtime_types::polkadot_primitives::v2::DisputeState< + ::core::primitive::u32, + >, + >, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 157u8, 84u8, 172u8, 11u8, 64u8, 109u8, 34u8, 117u8, 91u8, 57u8, 117u8, + 163u8, 65u8, 172u8, 97u8, 39u8, 27u8, 10u8, 125u8, 194u8, 12u8, 252u8, + 180u8, 223u8, 118u8, 150u8, 160u8, 143u8, 217u8, 178u8, 28u8, 93u8, + ] + { + let entry = Disputes(_0, _1); + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " All ongoing or concluded disputes for the last several sessions."] + pub async fn disputes_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::subxt::KeyIter<'a, T, Disputes<'a>>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 157u8, 84u8, 172u8, 11u8, 64u8, 109u8, 34u8, 117u8, 91u8, 57u8, 117u8, + 163u8, 65u8, 172u8, 97u8, 39u8, 27u8, 10u8, 125u8, 194u8, 12u8, 252u8, + 180u8, 223u8, 118u8, 150u8, 160u8, 143u8, 217u8, 178u8, 28u8, 93u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " All included blocks on the chain, as well as the block number in this chain that"] + #[doc = " should be reverted back to if the candidate is disputed and determined to be invalid."] + pub async fn included( + &self, + _0: &::core::primitive::u32, + _1: &runtime_types::polkadot_core_primitives::CandidateHash, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option<::core::primitive::u32>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 152u8, 13u8, 87u8, 4u8, 129u8, 181u8, 136u8, 38u8, 235u8, 70u8, 0u8, + 166u8, 190u8, 30u8, 247u8, 188u8, 192u8, 114u8, 13u8, 125u8, 254u8, + 120u8, 57u8, 91u8, 28u8, 160u8, 194u8, 242u8, 116u8, 146u8, 217u8, + 91u8, + ] + { + let entry = Included(_0, _1); + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " All included blocks on the chain, as well as the block number in this chain that"] + #[doc = " should be reverted back to if the candidate is disputed and determined to be invalid."] + pub async fn included_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::subxt::KeyIter<'a, T, Included<'a>>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 152u8, 13u8, 87u8, 4u8, 129u8, 181u8, 136u8, 38u8, 235u8, 70u8, 0u8, + 166u8, 190u8, 30u8, 247u8, 188u8, 192u8, 114u8, 13u8, 125u8, 254u8, + 120u8, 57u8, 91u8, 28u8, 160u8, 194u8, 242u8, 116u8, 146u8, 217u8, + 91u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Maps session indices to a vector indicating the number of potentially-spam disputes"] + #[doc = " each validator is participating in. Potentially-spam disputes are remote disputes which have"] + #[doc = " fewer than `byzantine_threshold + 1` validators."] + #[doc = ""] + #[doc = " The i'th entry of the vector corresponds to the i'th validator in the session."] + pub async fn spam_slots( + &self, + _0: &::core::primitive::u32, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option<::std::vec::Vec<::core::primitive::u32>>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 172u8, 23u8, 120u8, 188u8, 71u8, 248u8, 252u8, 41u8, 132u8, 221u8, + 98u8, 215u8, 33u8, 242u8, 168u8, 196u8, 90u8, 123u8, 190u8, 27u8, + 147u8, 6u8, 196u8, 175u8, 198u8, 216u8, 50u8, 74u8, 138u8, 122u8, + 251u8, 238u8, + ] + { + let entry = SpamSlots(_0); + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Maps session indices to a vector indicating the number of potentially-spam disputes"] + #[doc = " each validator is participating in. Potentially-spam disputes are remote disputes which have"] + #[doc = " fewer than `byzantine_threshold + 1` validators."] + #[doc = ""] + #[doc = " The i'th entry of the vector corresponds to the i'th validator in the session."] + pub async fn spam_slots_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::subxt::KeyIter<'a, T, SpamSlots<'a>>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 172u8, 23u8, 120u8, 188u8, 71u8, 248u8, 252u8, 41u8, 132u8, 221u8, + 98u8, 215u8, 33u8, 242u8, 168u8, 196u8, 90u8, 123u8, 190u8, 27u8, + 147u8, 6u8, 196u8, 175u8, 198u8, 216u8, 50u8, 74u8, 138u8, 122u8, + 251u8, 238u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Whether the chain is frozen. Starts as `None`. When this is `Some`,"] + #[doc = " the chain will not accept any new parachain blocks for backing or inclusion,"] + #[doc = " and its value indicates the last valid block number in the chain."] + #[doc = " It can only be set back to `None` by governance intervention."] + pub async fn frozen( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option<::core::primitive::u32>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 133u8, 100u8, 86u8, 220u8, 180u8, 189u8, 65u8, 131u8, 64u8, 56u8, + 219u8, 47u8, 130u8, 167u8, 210u8, 125u8, 49u8, 7u8, 153u8, 254u8, 20u8, + 53u8, 218u8, 177u8, 122u8, 148u8, 16u8, 198u8, 251u8, 50u8, 194u8, + 128u8, + ] + { + let entry = Frozen; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + } + pub mod registrar { + use super::root_mod; + use super::runtime_types; + pub mod calls { + use super::root_mod; + use super::runtime_types; + type DispatchError = runtime_types::sp_runtime::DispatchError; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct Register { + pub id: runtime_types::polkadot_parachain::primitives::Id, + pub genesis_head: runtime_types::polkadot_parachain::primitives::HeadData, + pub validation_code: runtime_types::polkadot_parachain::primitives::ValidationCode, + } + impl ::subxt::Call for Register { + const PALLET: &'static str = "Registrar"; + const FUNCTION: &'static str = "register"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct ForceRegister { + pub who: ::subxt::sp_core::crypto::AccountId32, + pub deposit: ::core::primitive::u128, + pub id: runtime_types::polkadot_parachain::primitives::Id, + pub genesis_head: runtime_types::polkadot_parachain::primitives::HeadData, + pub validation_code: runtime_types::polkadot_parachain::primitives::ValidationCode, + } + impl ::subxt::Call for ForceRegister { + const PALLET: &'static str = "Registrar"; + const FUNCTION: &'static str = "force_register"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct Deregister { + pub id: runtime_types::polkadot_parachain::primitives::Id, + } + impl ::subxt::Call for Deregister { + const PALLET: &'static str = "Registrar"; + const FUNCTION: &'static str = "deregister"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct Swap { + pub id: runtime_types::polkadot_parachain::primitives::Id, + pub other: runtime_types::polkadot_parachain::primitives::Id, + } + impl ::subxt::Call for Swap { + const PALLET: &'static str = "Registrar"; + const FUNCTION: &'static str = "swap"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct ForceRemoveLock { + pub para: runtime_types::polkadot_parachain::primitives::Id, + } + impl ::subxt::Call for ForceRemoveLock { + const PALLET: &'static str = "Registrar"; + const FUNCTION: &'static str = "force_remove_lock"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct Reserve; + impl ::subxt::Call for Reserve { + const PALLET: &'static str = "Registrar"; + const FUNCTION: &'static str = "reserve"; + } + pub struct TransactionApi<'a, T: ::subxt::Config, X> { + client: &'a ::subxt::Client, + marker: ::core::marker::PhantomData, + } + impl<'a, T, X> TransactionApi<'a, T, X> + where + T: ::subxt::Config, + X: ::subxt::extrinsic::ExtrinsicParams, + { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { + client, + marker: ::core::marker::PhantomData, + } + } + #[doc = "Register head data and validation code for a reserved Para Id."] + #[doc = ""] + #[doc = "## Arguments"] + #[doc = "- `origin`: Must be called by a `Signed` origin."] + #[doc = "- `id`: The para ID. Must be owned/managed by the `origin` signing account."] + #[doc = "- `genesis_head`: The genesis head data of the parachain/thread."] + #[doc = "- `validation_code`: The initial validation code of the parachain/thread."] + #[doc = ""] + #[doc = "## Deposits/Fees"] + #[doc = "The origin signed account must reserve a corresponding deposit for the registration. Anything already"] + #[doc = "reserved previously for this para ID is accounted for."] + #[doc = ""] + #[doc = "## Events"] + #[doc = "The `Registered` event is emitted in case of success."] + pub fn register( + &self, + id: runtime_types::polkadot_parachain::primitives::Id, + genesis_head: runtime_types::polkadot_parachain::primitives::HeadData, + validation_code: runtime_types::polkadot_parachain::primitives::ValidationCode, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + Register, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 180u8, 21u8, 142u8, 73u8, 21u8, 31u8, 64u8, 210u8, 196u8, 4u8, 142u8, + 153u8, 172u8, 207u8, 95u8, 209u8, 177u8, 75u8, 202u8, 85u8, 95u8, + 208u8, 123u8, 237u8, 190u8, 148u8, 5u8, 64u8, 65u8, 191u8, 221u8, + 203u8, + ] + { + let call = Register { + id, + genesis_head, + validation_code, + }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Force the registration of a Para Id on the relay chain."] + #[doc = ""] + #[doc = "This function must be called by a Root origin."] + #[doc = ""] + #[doc = "The deposit taken can be specified for this registration. Any `ParaId`"] + #[doc = "can be registered, including sub-1000 IDs which are System Parachains."] + pub fn force_register( + &self, + who: ::subxt::sp_core::crypto::AccountId32, + deposit: ::core::primitive::u128, + id: runtime_types::polkadot_parachain::primitives::Id, + genesis_head: runtime_types::polkadot_parachain::primitives::HeadData, + validation_code: runtime_types::polkadot_parachain::primitives::ValidationCode, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + ForceRegister, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 191u8, 198u8, 172u8, 68u8, 118u8, 126u8, 110u8, 47u8, 193u8, 147u8, + 61u8, 27u8, 122u8, 107u8, 49u8, 222u8, 87u8, 199u8, 184u8, 247u8, + 153u8, 137u8, 205u8, 153u8, 6u8, 15u8, 246u8, 8u8, 36u8, 76u8, 54u8, + 63u8, + ] + { + let call = ForceRegister { + who, + deposit, + id, + genesis_head, + validation_code, + }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Deregister a Para Id, freeing all data and returning any deposit."] + #[doc = ""] + #[doc = "The caller must be Root, the `para` owner, or the `para` itself. The para must be a parathread."] + pub fn deregister( + &self, + id: runtime_types::polkadot_parachain::primitives::Id, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + Deregister, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 147u8, 4u8, 172u8, 215u8, 67u8, 142u8, 93u8, 245u8, 108u8, 83u8, 5u8, + 250u8, 87u8, 138u8, 231u8, 10u8, 159u8, 216u8, 85u8, 233u8, 244u8, + 200u8, 37u8, 33u8, 160u8, 143u8, 119u8, 11u8, 70u8, 177u8, 8u8, 123u8, + ] + { + let call = Deregister { id }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Swap a parachain with another parachain or parathread."] + #[doc = ""] + #[doc = "The origin must be Root, the `para` owner, or the `para` itself."] + #[doc = ""] + #[doc = "The swap will happen only if there is already an opposite swap pending. If there is not,"] + #[doc = "the swap will be stored in the pending swaps map, ready for a later confirmatory swap."] + #[doc = ""] + #[doc = "The `ParaId`s remain mapped to the same head data and code so external code can rely on"] + #[doc = "`ParaId` to be a long-term identifier of a notional \"parachain\". However, their"] + #[doc = "scheduling info (i.e. whether they're a parathread or parachain), auction information"] + #[doc = "and the auction deposit are switched."] + pub fn swap( + &self, + id: runtime_types::polkadot_parachain::primitives::Id, + other: runtime_types::polkadot_parachain::primitives::Id, + ) -> Result< + ::subxt::SubmittableExtrinsic<'a, T, X, Swap, DispatchError, root_mod::Event>, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 145u8, 163u8, 246u8, 239u8, 241u8, 209u8, 58u8, 241u8, 63u8, 134u8, + 102u8, 55u8, 217u8, 125u8, 176u8, 91u8, 27u8, 32u8, 220u8, 236u8, 18u8, + 20u8, 7u8, 187u8, 100u8, 116u8, 161u8, 133u8, 127u8, 187u8, 86u8, + 109u8, + ] + { + let call = Swap { id, other }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Remove a manager lock from a para. This will allow the manager of a"] + #[doc = "previously locked para to deregister or swap a para without using governance."] + #[doc = ""] + #[doc = "Can only be called by the Root origin."] + pub fn force_remove_lock( + &self, + para: runtime_types::polkadot_parachain::primitives::Id, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + ForceRemoveLock, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 205u8, 174u8, 132u8, 188u8, 1u8, 59u8, 82u8, 135u8, 123u8, 55u8, 144u8, + 39u8, 205u8, 171u8, 13u8, 252u8, 65u8, 56u8, 98u8, 216u8, 23u8, 175u8, + 16u8, 200u8, 198u8, 252u8, 133u8, 238u8, 81u8, 142u8, 254u8, 124u8, + ] + { + let call = ForceRemoveLock { para }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Reserve a Para Id on the relay chain."] + #[doc = ""] + #[doc = "This function will reserve a new Para Id to be owned/managed by the origin account."] + #[doc = "The origin account is able to register head data and validation code using `register` to create"] + #[doc = "a parathread. Using the Slots pallet, a parathread can then be upgraded to get a parachain slot."] + #[doc = ""] + #[doc = "## Arguments"] + #[doc = "- `origin`: Must be called by a `Signed` origin. Becomes the manager/owner of the new para ID."] + #[doc = ""] + #[doc = "## Deposits/Fees"] + #[doc = "The origin must reserve a deposit of `ParaDeposit` for the registration."] + #[doc = ""] + #[doc = "## Events"] + #[doc = "The `Reserved` event is emitted in case of success, which provides the ID reserved for use."] + pub fn reserve( + &self, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + Reserve, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 22u8, 210u8, 13u8, 54u8, 253u8, 13u8, 89u8, 174u8, 232u8, 119u8, 148u8, + 206u8, 130u8, 133u8, 199u8, 127u8, 201u8, 205u8, 8u8, 213u8, 108u8, + 93u8, 135u8, 88u8, 238u8, 171u8, 31u8, 193u8, 23u8, 113u8, 106u8, + 135u8, + ] + { + let call = Reserve {}; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + pub type Event = runtime_types::polkadot_runtime_common::paras_registrar::pallet::Event; + pub mod events { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct Registered( + pub runtime_types::polkadot_parachain::primitives::Id, + pub ::subxt::sp_core::crypto::AccountId32, + ); + impl ::subxt::Event for Registered { + const PALLET: &'static str = "Registrar"; + const EVENT: &'static str = "Registered"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct Deregistered(pub runtime_types::polkadot_parachain::primitives::Id); + impl ::subxt::Event for Deregistered { + const PALLET: &'static str = "Registrar"; + const EVENT: &'static str = "Deregistered"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct Reserved( + pub runtime_types::polkadot_parachain::primitives::Id, + pub ::subxt::sp_core::crypto::AccountId32, + ); + impl ::subxt::Event for Reserved { + const PALLET: &'static str = "Registrar"; + const EVENT: &'static str = "Reserved"; + } + } + pub mod storage { + use super::runtime_types; + pub struct PendingSwap<'a>(pub &'a runtime_types::polkadot_parachain::primitives::Id); + impl ::subxt::StorageEntry for PendingSwap<'_> { + const PALLET: &'static str = "Registrar"; + const STORAGE: &'static str = "PendingSwap"; + type Value = runtime_types::polkadot_parachain::primitives::Id; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Twox64Concat, + )]) + } + } + pub struct Paras<'a>(pub &'a runtime_types::polkadot_parachain::primitives::Id); + impl ::subxt::StorageEntry for Paras<'_> { + const PALLET: &'static str = "Registrar"; + const STORAGE: &'static str = "Paras"; + type Value = runtime_types::polkadot_runtime_common::paras_registrar::ParaInfo< + ::subxt::sp_core::crypto::AccountId32, + ::core::primitive::u128, + >; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Twox64Concat, + )]) + } + } + pub struct NextFreeParaId; + impl ::subxt::StorageEntry for NextFreeParaId { + const PALLET: &'static str = "Registrar"; + const STORAGE: &'static str = "NextFreeParaId"; + type Value = runtime_types::polkadot_parachain::primitives::Id; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct StorageApi<'a, T: ::subxt::Config> { + client: &'a ::subxt::Client, + } + impl<'a, T: ::subxt::Config> StorageApi<'a, T> { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { client } + } + #[doc = " Pending swap operations."] + pub async fn pending_swap( + &self, + _0: &runtime_types::polkadot_parachain::primitives::Id, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 130u8, 4u8, 116u8, 91u8, 196u8, 41u8, 66u8, 48u8, 17u8, 2u8, 255u8, + 189u8, 132u8, 10u8, 129u8, 102u8, 117u8, 56u8, 114u8, 231u8, 78u8, + 112u8, 11u8, 76u8, 152u8, 41u8, 70u8, 232u8, 212u8, 71u8, 193u8, 107u8, + ] + { + let entry = PendingSwap(_0); + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Pending swap operations."] + pub async fn pending_swap_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::subxt::KeyIter<'a, T, PendingSwap<'a>>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 130u8, 4u8, 116u8, 91u8, 196u8, 41u8, 66u8, 48u8, 17u8, 2u8, 255u8, + 189u8, 132u8, 10u8, 129u8, 102u8, 117u8, 56u8, 114u8, 231u8, 78u8, + 112u8, 11u8, 76u8, 152u8, 41u8, 70u8, 232u8, 212u8, 71u8, 193u8, 107u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Amount held on deposit for each para and the original depositor."] + #[doc = ""] + #[doc = " The given account ID is responsible for registering the code and initial head data, but may only do"] + #[doc = " so if it isn't yet registered. (After that, it's up to governance to do so.)"] + pub async fn paras( + &self, + _0: &runtime_types::polkadot_parachain::primitives::Id, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option< + runtime_types::polkadot_runtime_common::paras_registrar::ParaInfo< + ::subxt::sp_core::crypto::AccountId32, + ::core::primitive::u128, + >, + >, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 180u8, 146u8, 122u8, 242u8, 222u8, 203u8, 19u8, 110u8, 22u8, 53u8, + 147u8, 127u8, 165u8, 158u8, 113u8, 196u8, 105u8, 209u8, 45u8, 250u8, + 163u8, 78u8, 120u8, 129u8, 180u8, 128u8, 63u8, 195u8, 71u8, 176u8, + 247u8, 206u8, + ] + { + let entry = Paras(_0); + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Amount held on deposit for each para and the original depositor."] + #[doc = ""] + #[doc = " The given account ID is responsible for registering the code and initial head data, but may only do"] + #[doc = " so if it isn't yet registered. (After that, it's up to governance to do so.)"] + pub async fn paras_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result<::subxt::KeyIter<'a, T, Paras<'a>>, ::subxt::BasicError> + { + if self.client.metadata().storage_hash::()? + == [ + 180u8, 146u8, 122u8, 242u8, 222u8, 203u8, 19u8, 110u8, 22u8, 53u8, + 147u8, 127u8, 165u8, 158u8, 113u8, 196u8, 105u8, 209u8, 45u8, 250u8, + 163u8, 78u8, 120u8, 129u8, 180u8, 128u8, 63u8, 195u8, 71u8, 176u8, + 247u8, 206u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The next free `ParaId`."] + pub async fn next_free_para_id( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + runtime_types::polkadot_parachain::primitives::Id, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 112u8, 52u8, 84u8, 181u8, 132u8, 61u8, 46u8, 69u8, 165u8, 85u8, 253u8, + 243u8, 228u8, 151u8, 15u8, 239u8, 172u8, 28u8, 102u8, 38u8, 155u8, + 90u8, 55u8, 162u8, 254u8, 139u8, 59u8, 186u8, 152u8, 239u8, 53u8, + 216u8, + ] + { + let entry = NextFreeParaId; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + pub mod constants { + use super::runtime_types; + pub struct ConstantsApi<'a, T: ::subxt::Config> { + client: &'a ::subxt::Client, + } + impl<'a, T: ::subxt::Config> ConstantsApi<'a, T> { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { client } + } + #[doc = " The deposit to be paid to run a parathread."] + #[doc = " This should include the cost for storing the genesis head and validation code."] + pub fn para_deposit( + &self, + ) -> ::core::result::Result<::core::primitive::u128, ::subxt::BasicError> + { + if self + .client + .metadata() + .constant_hash("Registrar", "ParaDeposit")? + == [ + 177u8, 138u8, 242u8, 166u8, 12u8, 97u8, 93u8, 2u8, 123u8, 45u8, 85u8, + 25u8, 46u8, 14u8, 221u8, 50u8, 157u8, 45u8, 243u8, 106u8, 171u8, 191u8, + 36u8, 192u8, 126u8, 91u8, 2u8, 240u8, 187u8, 201u8, 39u8, 110u8, + ] + { + let pallet = self.client.metadata().pallet("Registrar")?; + let constant = pallet.constant("ParaDeposit")?; + let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; + Ok(value) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The deposit to be paid per byte stored on chain."] + pub fn data_deposit_per_byte( + &self, + ) -> ::core::result::Result<::core::primitive::u128, ::subxt::BasicError> + { + if self + .client + .metadata() + .constant_hash("Registrar", "DataDepositPerByte")? + == [ + 75u8, 45u8, 63u8, 192u8, 73u8, 118u8, 130u8, 12u8, 38u8, 42u8, 196u8, + 189u8, 156u8, 218u8, 152u8, 165u8, 124u8, 253u8, 108u8, 113u8, 3u8, + 149u8, 83u8, 27u8, 234u8, 163u8, 225u8, 231u8, 179u8, 212u8, 26u8, + 156u8, + ] + { + let pallet = self.client.metadata().pallet("Registrar")?; + let constant = pallet.constant("DataDepositPerByte")?; + let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; + Ok(value) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + } + pub mod auctions { + use super::root_mod; + use super::runtime_types; + pub mod calls { + use super::root_mod; + use super::runtime_types; + type DispatchError = runtime_types::sp_runtime::DispatchError; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct NewAuction { + #[codec(compact)] + pub duration: ::core::primitive::u32, + #[codec(compact)] + pub lease_period_index: ::core::primitive::u32, + } + impl ::subxt::Call for NewAuction { + const PALLET: &'static str = "Auctions"; + const FUNCTION: &'static str = "new_auction"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct Bid { + #[codec(compact)] + pub para: runtime_types::polkadot_parachain::primitives::Id, + #[codec(compact)] + pub auction_index: ::core::primitive::u32, + #[codec(compact)] + pub first_slot: ::core::primitive::u32, + #[codec(compact)] + pub last_slot: ::core::primitive::u32, + #[codec(compact)] + pub amount: ::core::primitive::u128, + } + impl ::subxt::Call for Bid { + const PALLET: &'static str = "Auctions"; + const FUNCTION: &'static str = "bid"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct CancelAuction; + impl ::subxt::Call for CancelAuction { + const PALLET: &'static str = "Auctions"; + const FUNCTION: &'static str = "cancel_auction"; + } + pub struct TransactionApi<'a, T: ::subxt::Config, X> { + client: &'a ::subxt::Client, + marker: ::core::marker::PhantomData, + } + impl<'a, T, X> TransactionApi<'a, T, X> + where + T: ::subxt::Config, + X: ::subxt::extrinsic::ExtrinsicParams, + { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { + client, + marker: ::core::marker::PhantomData, + } + } + #[doc = "Create a new auction."] + #[doc = ""] + #[doc = "This can only happen when there isn't already an auction in progress and may only be"] + #[doc = "called by the root origin. Accepts the `duration` of this auction and the"] + #[doc = "`lease_period_index` of the initial lease period of the four that are to be auctioned."] + pub fn new_auction( + &self, + duration: ::core::primitive::u32, + lease_period_index: ::core::primitive::u32, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + NewAuction, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 12u8, 43u8, 152u8, 0u8, 229u8, 15u8, 32u8, 205u8, 208u8, 71u8, 57u8, + 169u8, 201u8, 177u8, 52u8, 10u8, 93u8, 183u8, 5u8, 156u8, 231u8, 188u8, + 77u8, 238u8, 119u8, 238u8, 87u8, 251u8, 121u8, 199u8, 18u8, 129u8, + ] + { + let call = NewAuction { + duration, + lease_period_index, + }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Make a new bid from an account (including a parachain account) for deploying a new"] + #[doc = "parachain."] + #[doc = ""] + #[doc = "Multiple simultaneous bids from the same bidder are allowed only as long as all active"] + #[doc = "bids overlap each other (i.e. are mutually exclusive). Bids cannot be redacted."] + #[doc = ""] + #[doc = "- `sub` is the sub-bidder ID, allowing for multiple competing bids to be made by (and"] + #[doc = "funded by) the same account."] + #[doc = "- `auction_index` is the index of the auction to bid on. Should just be the present"] + #[doc = "value of `AuctionCounter`."] + #[doc = "- `first_slot` is the first lease period index of the range to bid on. This is the"] + #[doc = "absolute lease period index value, not an auction-specific offset."] + #[doc = "- `last_slot` is the last lease period index of the range to bid on. This is the"] + #[doc = "absolute lease period index value, not an auction-specific offset."] + #[doc = "- `amount` is the amount to bid to be held as deposit for the parachain should the"] + #[doc = "bid win. This amount is held throughout the range."] + pub fn bid( + &self, + para: runtime_types::polkadot_parachain::primitives::Id, + auction_index: ::core::primitive::u32, + first_slot: ::core::primitive::u32, + last_slot: ::core::primitive::u32, + amount: ::core::primitive::u128, + ) -> Result< + ::subxt::SubmittableExtrinsic<'a, T, X, Bid, DispatchError, root_mod::Event>, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 206u8, 22u8, 15u8, 251u8, 222u8, 193u8, 192u8, 125u8, 160u8, 131u8, + 209u8, 129u8, 105u8, 46u8, 77u8, 204u8, 107u8, 112u8, 13u8, 188u8, + 193u8, 73u8, 225u8, 232u8, 179u8, 205u8, 39u8, 69u8, 242u8, 79u8, 36u8, + 121u8, + ] + { + let call = Bid { + para, + auction_index, + first_slot, + last_slot, + amount, + }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Cancel an in-progress auction."] + #[doc = ""] + #[doc = "Can only be called by Root origin."] + pub fn cancel_auction( + &self, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + CancelAuction, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 182u8, 223u8, 178u8, 136u8, 1u8, 115u8, 229u8, 78u8, 166u8, 128u8, + 28u8, 106u8, 6u8, 248u8, 46u8, 55u8, 110u8, 120u8, 213u8, 11u8, 90u8, + 217u8, 42u8, 120u8, 47u8, 83u8, 126u8, 216u8, 236u8, 251u8, 255u8, + 50u8, + ] + { + let call = CancelAuction {}; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + pub type Event = runtime_types::polkadot_runtime_common::auctions::pallet::Event; + pub mod events { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "An auction started. Provides its index and the block number where it will begin to"] + #[doc = "close and the first lease period of the quadruplet that is auctioned."] + #[doc = "`[auction_index, lease_period, ending]`"] + pub struct AuctionStarted( + pub ::core::primitive::u32, + pub ::core::primitive::u32, + pub ::core::primitive::u32, + ); + impl ::subxt::Event for AuctionStarted { + const PALLET: &'static str = "Auctions"; + const EVENT: &'static str = "AuctionStarted"; + } + #[derive( + :: subxt :: codec :: CompactAs, + :: subxt :: codec :: Decode, + :: subxt :: codec :: Encode, + Debug, + )] + #[doc = "An auction ended. All funds become unreserved. `[auction_index]`"] + pub struct AuctionClosed(pub ::core::primitive::u32); + impl ::subxt::Event for AuctionClosed { + const PALLET: &'static str = "Auctions"; + const EVENT: &'static str = "AuctionClosed"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "Funds were reserved for a winning bid. First balance is the extra amount reserved."] + #[doc = "Second is the total. `[bidder, extra_reserved, total_amount]`"] + pub struct Reserved( + pub ::subxt::sp_core::crypto::AccountId32, + pub ::core::primitive::u128, + pub ::core::primitive::u128, + ); + impl ::subxt::Event for Reserved { + const PALLET: &'static str = "Auctions"; + const EVENT: &'static str = "Reserved"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "Funds were unreserved since bidder is no longer active. `[bidder, amount]`"] + pub struct Unreserved( + pub ::subxt::sp_core::crypto::AccountId32, + pub ::core::primitive::u128, + ); + impl ::subxt::Event for Unreserved { + const PALLET: &'static str = "Auctions"; + const EVENT: &'static str = "Unreserved"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "Someone attempted to lease the same slot twice for a parachain. The amount is held in reserve"] + #[doc = "but no parachain slot has been leased."] + #[doc = "`[parachain_id, leaser, amount]`"] + pub struct ReserveConfiscated( + pub runtime_types::polkadot_parachain::primitives::Id, + pub ::subxt::sp_core::crypto::AccountId32, + pub ::core::primitive::u128, + ); + impl ::subxt::Event for ReserveConfiscated { + const PALLET: &'static str = "Auctions"; + const EVENT: &'static str = "ReserveConfiscated"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "A new bid has been accepted as the current winner."] + #[doc = "`[who, para_id, amount, first_slot, last_slot]`"] + pub struct BidAccepted( + pub ::subxt::sp_core::crypto::AccountId32, + pub runtime_types::polkadot_parachain::primitives::Id, + pub ::core::primitive::u128, + pub ::core::primitive::u32, + pub ::core::primitive::u32, + ); + impl ::subxt::Event for BidAccepted { + const PALLET: &'static str = "Auctions"; + const EVENT: &'static str = "BidAccepted"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "The winning offset was chosen for an auction. This will map into the `Winning` storage map."] + #[doc = "`[auction_index, block_number]`"] + pub struct WinningOffset(pub ::core::primitive::u32, pub ::core::primitive::u32); + impl ::subxt::Event for WinningOffset { + const PALLET: &'static str = "Auctions"; + const EVENT: &'static str = "WinningOffset"; + } + } + pub mod storage { + use super::runtime_types; + pub struct AuctionCounter; + impl ::subxt::StorageEntry for AuctionCounter { + const PALLET: &'static str = "Auctions"; + const STORAGE: &'static str = "AuctionCounter"; + type Value = ::core::primitive::u32; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct AuctionInfo; + impl ::subxt::StorageEntry for AuctionInfo { + const PALLET: &'static str = "Auctions"; + const STORAGE: &'static str = "AuctionInfo"; + type Value = (::core::primitive::u32, ::core::primitive::u32); + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct ReservedAmounts<'a>( + pub &'a ::subxt::sp_core::crypto::AccountId32, + pub &'a runtime_types::polkadot_parachain::primitives::Id, + ); + impl ::subxt::StorageEntry for ReservedAmounts<'_> { + const PALLET: &'static str = "Auctions"; + const STORAGE: &'static str = "ReservedAmounts"; + type Value = ::core::primitive::u128; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &(&self.0, &self.1), + ::subxt::StorageHasher::Twox64Concat, + )]) + } + } + pub struct Winning<'a>(pub &'a ::core::primitive::u32); + impl ::subxt::StorageEntry for Winning<'_> { + const PALLET: &'static str = "Auctions"; + const STORAGE: &'static str = "Winning"; + type Value = [::core::option::Option<( + ::subxt::sp_core::crypto::AccountId32, + runtime_types::polkadot_parachain::primitives::Id, + ::core::primitive::u128, + )>; 36usize]; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Twox64Concat, + )]) + } + } + pub struct StorageApi<'a, T: ::subxt::Config> { + client: &'a ::subxt::Client, + } + impl<'a, T: ::subxt::Config> StorageApi<'a, T> { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { client } + } + #[doc = " Number of auctions started so far."] + pub async fn auction_counter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> + { + if self.client.metadata().storage_hash::()? + == [ + 67u8, 247u8, 96u8, 152u8, 0u8, 224u8, 230u8, 98u8, 194u8, 107u8, 3u8, + 203u8, 51u8, 201u8, 149u8, 22u8, 184u8, 80u8, 251u8, 239u8, 253u8, + 19u8, 58u8, 192u8, 65u8, 96u8, 189u8, 54u8, 175u8, 130u8, 143u8, 181u8, + ] + { + let entry = AuctionCounter; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Information relating to the current auction, if there is one."] + #[doc = ""] + #[doc = " The first item in the tuple is the lease period index that the first of the four"] + #[doc = " contiguous lease periods on auction is for. The second is the block number when the"] + #[doc = " auction will \"begin to end\", i.e. the first block of the Ending Period of the auction."] + pub async fn auction_info( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option<(::core::primitive::u32, ::core::primitive::u32)>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 73u8, 216u8, 173u8, 230u8, 132u8, 78u8, 83u8, 62u8, 200u8, 69u8, 17u8, + 73u8, 57u8, 107u8, 160u8, 90u8, 147u8, 84u8, 29u8, 110u8, 144u8, 215u8, + 169u8, 110u8, 217u8, 77u8, 109u8, 204u8, 1u8, 164u8, 95u8, 83u8, + ] + { + let entry = AuctionInfo; + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Amounts currently reserved in the accounts of the bidders currently winning"] + #[doc = " (sub-)ranges."] + pub async fn reserved_amounts( + &self, + _0: &::subxt::sp_core::crypto::AccountId32, + _1: &runtime_types::polkadot_parachain::primitives::Id, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option<::core::primitive::u128>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 195u8, 56u8, 142u8, 154u8, 193u8, 115u8, 13u8, 64u8, 101u8, 179u8, + 69u8, 175u8, 185u8, 12u8, 31u8, 65u8, 147u8, 211u8, 74u8, 40u8, 190u8, + 254u8, 190u8, 176u8, 117u8, 159u8, 234u8, 214u8, 157u8, 83u8, 56u8, + 192u8, + ] + { + let entry = ReservedAmounts(_0, _1); + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Amounts currently reserved in the accounts of the bidders currently winning"] + #[doc = " (sub-)ranges."] + pub async fn reserved_amounts_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::subxt::KeyIter<'a, T, ReservedAmounts<'a>>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 195u8, 56u8, 142u8, 154u8, 193u8, 115u8, 13u8, 64u8, 101u8, 179u8, + 69u8, 175u8, 185u8, 12u8, 31u8, 65u8, 147u8, 211u8, 74u8, 40u8, 190u8, + 254u8, 190u8, 176u8, 117u8, 159u8, 234u8, 214u8, 157u8, 83u8, 56u8, + 192u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The winning bids for each of the 10 ranges at each sample in the final Ending Period of"] + #[doc = " the current auction. The map's key is the 0-based index into the Sample Size. The"] + #[doc = " first sample of the ending period is 0; the last is `Sample Size - 1`."] + pub async fn winning( + &self, + _0: &::core::primitive::u32, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option< + [::core::option::Option<( + ::subxt::sp_core::crypto::AccountId32, + runtime_types::polkadot_parachain::primitives::Id, + ::core::primitive::u128, + )>; 36usize], + >, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 152u8, 246u8, 158u8, 193u8, 21u8, 56u8, 204u8, 29u8, 146u8, 90u8, + 133u8, 246u8, 75u8, 111u8, 157u8, 150u8, 175u8, 33u8, 127u8, 215u8, + 158u8, 55u8, 231u8, 78u8, 143u8, 128u8, 92u8, 70u8, 61u8, 23u8, 43u8, + 68u8, + ] + { + let entry = Winning(_0); + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The winning bids for each of the 10 ranges at each sample in the final Ending Period of"] + #[doc = " the current auction. The map's key is the 0-based index into the Sample Size. The"] + #[doc = " first sample of the ending period is 0; the last is `Sample Size - 1`."] + pub async fn winning_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result<::subxt::KeyIter<'a, T, Winning<'a>>, ::subxt::BasicError> + { + if self.client.metadata().storage_hash::()? + == [ + 152u8, 246u8, 158u8, 193u8, 21u8, 56u8, 204u8, 29u8, 146u8, 90u8, + 133u8, 246u8, 75u8, 111u8, 157u8, 150u8, 175u8, 33u8, 127u8, 215u8, + 158u8, 55u8, 231u8, 78u8, 143u8, 128u8, 92u8, 70u8, 61u8, 23u8, 43u8, + 68u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + pub mod constants { + use super::runtime_types; + pub struct ConstantsApi<'a, T: ::subxt::Config> { + client: &'a ::subxt::Client, + } + impl<'a, T: ::subxt::Config> ConstantsApi<'a, T> { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { client } + } + #[doc = " The number of blocks over which an auction may be retroactively ended."] + pub fn ending_period( + &self, + ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> + { + if self + .client + .metadata() + .constant_hash("Auctions", "EndingPeriod")? + == [ + 41u8, 212u8, 17u8, 243u8, 76u8, 205u8, 95u8, 195u8, 181u8, 1u8, 59u8, + 31u8, 204u8, 20u8, 83u8, 117u8, 69u8, 25u8, 74u8, 59u8, 18u8, 11u8, + 110u8, 123u8, 62u8, 254u8, 188u8, 62u8, 89u8, 80u8, 213u8, 97u8, + ] + { + let pallet = self.client.metadata().pallet("Auctions")?; + let constant = pallet.constant("EndingPeriod")?; + let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; + Ok(value) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The length of each sample to take during the ending period."] + #[doc = ""] + #[doc = " `EndingPeriod` / `SampleLength` = Total # of Samples"] + pub fn sample_length( + &self, + ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> + { + if self + .client + .metadata() + .constant_hash("Auctions", "SampleLength")? + == [ + 120u8, 204u8, 79u8, 231u8, 92u8, 177u8, 250u8, 183u8, 207u8, 218u8, + 171u8, 81u8, 94u8, 92u8, 233u8, 87u8, 74u8, 212u8, 178u8, 104u8, 137u8, + 187u8, 31u8, 163u8, 157u8, 136u8, 111u8, 129u8, 149u8, 85u8, 122u8, + 181u8, + ] + { + let pallet = self.client.metadata().pallet("Auctions")?; + let constant = pallet.constant("SampleLength")?; + let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; + Ok(value) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + pub fn slot_range_count( + &self, + ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> + { + if self + .client + .metadata() + .constant_hash("Auctions", "SlotRangeCount")? + == [ + 32u8, 147u8, 38u8, 54u8, 172u8, 189u8, 240u8, 136u8, 216u8, 182u8, + 191u8, 129u8, 122u8, 1u8, 129u8, 244u8, 180u8, 210u8, 219u8, 142u8, + 224u8, 151u8, 237u8, 192u8, 103u8, 206u8, 101u8, 131u8, 78u8, 181u8, + 163u8, 44u8, + ] + { + let pallet = self.client.metadata().pallet("Auctions")?; + let constant = pallet.constant("SlotRangeCount")?; + let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; + Ok(value) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + pub fn lease_periods_per_slot( + &self, + ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> + { + if self + .client + .metadata() + .constant_hash("Auctions", "LeasePeriodsPerSlot")? + == [ + 174u8, 18u8, 150u8, 44u8, 219u8, 36u8, 218u8, 28u8, 34u8, 132u8, 235u8, + 161u8, 23u8, 173u8, 80u8, 175u8, 93u8, 163u8, 6u8, 226u8, 11u8, 212u8, + 186u8, 119u8, 185u8, 85u8, 111u8, 216u8, 214u8, 111u8, 148u8, 28u8, + ] + { + let pallet = self.client.metadata().pallet("Auctions")?; + let constant = pallet.constant("LeasePeriodsPerSlot")?; + let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; + Ok(value) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + } + pub mod crowdloan { + use super::root_mod; + use super::runtime_types; + pub mod calls { + use super::root_mod; + use super::runtime_types; + type DispatchError = runtime_types::sp_runtime::DispatchError; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct Create { + #[codec(compact)] + pub index: runtime_types::polkadot_parachain::primitives::Id, + #[codec(compact)] + pub cap: ::core::primitive::u128, + #[codec(compact)] + pub first_period: ::core::primitive::u32, + #[codec(compact)] + pub last_period: ::core::primitive::u32, + #[codec(compact)] + pub end: ::core::primitive::u32, + pub verifier: ::core::option::Option, + } + impl ::subxt::Call for Create { + const PALLET: &'static str = "Crowdloan"; + const FUNCTION: &'static str = "create"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct Contribute { + #[codec(compact)] + pub index: runtime_types::polkadot_parachain::primitives::Id, + #[codec(compact)] + pub value: ::core::primitive::u128, + pub signature: ::core::option::Option, + } + impl ::subxt::Call for Contribute { + const PALLET: &'static str = "Crowdloan"; + const FUNCTION: &'static str = "contribute"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct Withdraw { + pub who: ::subxt::sp_core::crypto::AccountId32, + #[codec(compact)] + pub index: runtime_types::polkadot_parachain::primitives::Id, + } + impl ::subxt::Call for Withdraw { + const PALLET: &'static str = "Crowdloan"; + const FUNCTION: &'static str = "withdraw"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct Refund { + #[codec(compact)] + pub index: runtime_types::polkadot_parachain::primitives::Id, + } + impl ::subxt::Call for Refund { + const PALLET: &'static str = "Crowdloan"; + const FUNCTION: &'static str = "refund"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct Dissolve { + #[codec(compact)] + pub index: runtime_types::polkadot_parachain::primitives::Id, + } + impl ::subxt::Call for Dissolve { + const PALLET: &'static str = "Crowdloan"; + const FUNCTION: &'static str = "dissolve"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct Edit { + #[codec(compact)] + pub index: runtime_types::polkadot_parachain::primitives::Id, + #[codec(compact)] + pub cap: ::core::primitive::u128, + #[codec(compact)] + pub first_period: ::core::primitive::u32, + #[codec(compact)] + pub last_period: ::core::primitive::u32, + #[codec(compact)] + pub end: ::core::primitive::u32, + pub verifier: ::core::option::Option, + } + impl ::subxt::Call for Edit { + const PALLET: &'static str = "Crowdloan"; + const FUNCTION: &'static str = "edit"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct AddMemo { + pub index: runtime_types::polkadot_parachain::primitives::Id, + pub memo: ::std::vec::Vec<::core::primitive::u8>, + } + impl ::subxt::Call for AddMemo { + const PALLET: &'static str = "Crowdloan"; + const FUNCTION: &'static str = "add_memo"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct Poke { + pub index: runtime_types::polkadot_parachain::primitives::Id, + } + impl ::subxt::Call for Poke { + const PALLET: &'static str = "Crowdloan"; + const FUNCTION: &'static str = "poke"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct ContributeAll { + #[codec(compact)] + pub index: runtime_types::polkadot_parachain::primitives::Id, + pub signature: ::core::option::Option, + } + impl ::subxt::Call for ContributeAll { + const PALLET: &'static str = "Crowdloan"; + const FUNCTION: &'static str = "contribute_all"; + } + pub struct TransactionApi<'a, T: ::subxt::Config, X> { + client: &'a ::subxt::Client, + marker: ::core::marker::PhantomData, + } + impl<'a, T, X> TransactionApi<'a, T, X> + where + T: ::subxt::Config, + X: ::subxt::extrinsic::ExtrinsicParams, + { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { + client, + marker: ::core::marker::PhantomData, + } + } + #[doc = "Create a new crowdloaning campaign for a parachain slot with the given lease period range."] + #[doc = ""] + #[doc = "This applies a lock to your parachain configuration, ensuring that it cannot be changed"] + #[doc = "by the parachain manager."] + pub fn create( + &self, + index: runtime_types::polkadot_parachain::primitives::Id, + cap: ::core::primitive::u128, + first_period: ::core::primitive::u32, + last_period: ::core::primitive::u32, + end: ::core::primitive::u32, + verifier: ::core::option::Option, + ) -> Result< + ::subxt::SubmittableExtrinsic<'a, T, X, Create, DispatchError, root_mod::Event>, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 94u8, 115u8, 154u8, 239u8, 215u8, 180u8, 175u8, 240u8, 137u8, 240u8, + 74u8, 159u8, 67u8, 54u8, 69u8, 199u8, 161u8, 155u8, 243u8, 222u8, + 205u8, 163u8, 142u8, 251u8, 156u8, 94u8, 65u8, 153u8, 39u8, 226u8, + 79u8, 195u8, + ] + { + let call = Create { + index, + cap, + first_period, + last_period, + end, + verifier, + }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Contribute to a crowd sale. This will transfer some balance over to fund a parachain"] + #[doc = "slot. It will be withdrawable when the crowdloan has ended and the funds are unused."] + pub fn contribute( + &self, + index: runtime_types::polkadot_parachain::primitives::Id, + value: ::core::primitive::u128, + signature: ::core::option::Option, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + Contribute, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 95u8, 255u8, 35u8, 30u8, 44u8, 150u8, 10u8, 166u8, 0u8, 204u8, 106u8, + 59u8, 150u8, 254u8, 216u8, 128u8, 232u8, 129u8, 30u8, 101u8, 196u8, + 198u8, 180u8, 156u8, 122u8, 252u8, 139u8, 28u8, 164u8, 115u8, 153u8, + 109u8, + ] + { + let call = Contribute { + index, + value, + signature, + }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Withdraw full balance of a specific contributor."] + #[doc = ""] + #[doc = "Origin must be signed, but can come from anyone."] + #[doc = ""] + #[doc = "The fund must be either in, or ready for, retirement. For a fund to be *in* retirement, then the retirement"] + #[doc = "flag must be set. For a fund to be ready for retirement, then:"] + #[doc = "- it must not already be in retirement;"] + #[doc = "- the amount of raised funds must be bigger than the _free_ balance of the account;"] + #[doc = "- and either:"] + #[doc = " - the block number must be at least `end`; or"] + #[doc = " - the current lease period must be greater than the fund's `last_period`."] + #[doc = ""] + #[doc = "In this case, the fund's retirement flag is set and its `end` is reset to the current block"] + #[doc = "number."] + #[doc = ""] + #[doc = "- `who`: The account whose contribution should be withdrawn."] + #[doc = "- `index`: The parachain to whose crowdloan the contribution was made."] + pub fn withdraw( + &self, + who: ::subxt::sp_core::crypto::AccountId32, + index: runtime_types::polkadot_parachain::primitives::Id, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + Withdraw, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 67u8, 65u8, 89u8, 108u8, 193u8, 99u8, 74u8, 32u8, 163u8, 13u8, 81u8, + 131u8, 64u8, 107u8, 72u8, 23u8, 35u8, 177u8, 130u8, 171u8, 70u8, 232u8, + 246u8, 254u8, 67u8, 219u8, 84u8, 96u8, 165u8, 20u8, 183u8, 209u8, + ] + { + let call = Withdraw { who, index }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Automatically refund contributors of an ended crowdloan."] + #[doc = "Due to weight restrictions, this function may need to be called multiple"] + #[doc = "times to fully refund all users. We will refund `RemoveKeysLimit` users at a time."] + #[doc = ""] + #[doc = "Origin must be signed, but can come from anyone."] + pub fn refund( + &self, + index: runtime_types::polkadot_parachain::primitives::Id, + ) -> Result< + ::subxt::SubmittableExtrinsic<'a, T, X, Refund, DispatchError, root_mod::Event>, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 202u8, 206u8, 79u8, 226u8, 114u8, 228u8, 110u8, 18u8, 178u8, 173u8, + 23u8, 83u8, 64u8, 11u8, 201u8, 19u8, 57u8, 75u8, 181u8, 241u8, 231u8, + 189u8, 211u8, 48u8, 82u8, 64u8, 220u8, 22u8, 247u8, 7u8, 68u8, 211u8, + ] + { + let call = Refund { index }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Remove a fund after the retirement period has ended and all funds have been returned."] + pub fn dissolve( + &self, + index: runtime_types::polkadot_parachain::primitives::Id, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + Dissolve, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 210u8, 3u8, 221u8, 185u8, 64u8, 178u8, 56u8, 132u8, 72u8, 127u8, 105u8, + 31u8, 167u8, 107u8, 127u8, 224u8, 174u8, 221u8, 111u8, 105u8, 47u8, + 247u8, 10u8, 5u8, 37u8, 180u8, 61u8, 180u8, 3u8, 164u8, 196u8, 194u8, + ] + { + let call = Dissolve { index }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Edit the configuration for an in-progress crowdloan."] + #[doc = ""] + #[doc = "Can only be called by Root origin."] + pub fn edit( + &self, + index: runtime_types::polkadot_parachain::primitives::Id, + cap: ::core::primitive::u128, + first_period: ::core::primitive::u32, + last_period: ::core::primitive::u32, + end: ::core::primitive::u32, + verifier: ::core::option::Option, + ) -> Result< + ::subxt::SubmittableExtrinsic<'a, T, X, Edit, DispatchError, root_mod::Event>, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 34u8, 43u8, 47u8, 39u8, 106u8, 245u8, 49u8, 40u8, 191u8, 195u8, 202u8, + 113u8, 137u8, 98u8, 143u8, 172u8, 191u8, 55u8, 240u8, 75u8, 234u8, + 180u8, 90u8, 206u8, 93u8, 214u8, 115u8, 215u8, 140u8, 144u8, 105u8, + 89u8, + ] + { + let call = Edit { + index, + cap, + first_period, + last_period, + end, + verifier, + }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Add an optional memo to an existing crowdloan contribution."] + #[doc = ""] + #[doc = "Origin must be Signed, and the user must have contributed to the crowdloan."] + pub fn add_memo( + &self, + index: runtime_types::polkadot_parachain::primitives::Id, + memo: ::std::vec::Vec<::core::primitive::u8>, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + AddMemo, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 97u8, 218u8, 115u8, 187u8, 167u8, 70u8, 229u8, 231u8, 148u8, 77u8, + 169u8, 139u8, 16u8, 15u8, 116u8, 128u8, 32u8, 59u8, 154u8, 146u8, 12u8, + 65u8, 36u8, 36u8, 69u8, 19u8, 74u8, 79u8, 66u8, 25u8, 215u8, 57u8, + ] + { + let call = AddMemo { index, memo }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Poke the fund into `NewRaise`"] + #[doc = ""] + #[doc = "Origin must be Signed, and the fund has non-zero raise."] + pub fn poke( + &self, + index: runtime_types::polkadot_parachain::primitives::Id, + ) -> Result< + ::subxt::SubmittableExtrinsic<'a, T, X, Poke, DispatchError, root_mod::Event>, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 99u8, 158u8, 48u8, 3u8, 228u8, 210u8, 249u8, 42u8, 44u8, 49u8, 24u8, + 212u8, 69u8, 69u8, 189u8, 194u8, 124u8, 251u8, 25u8, 123u8, 234u8, 3u8, + 184u8, 227u8, 1u8, 195u8, 219u8, 118u8, 235u8, 237u8, 11u8, 159u8, + ] + { + let call = Poke { index }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Contribute your entire balance to a crowd sale. This will transfer the entire balance of a user over to fund a parachain"] + #[doc = "slot. It will be withdrawable when the crowdloan has ended and the funds are unused."] + pub fn contribute_all( + &self, + index: runtime_types::polkadot_parachain::primitives::Id, + signature: ::core::option::Option, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + ContributeAll, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 64u8, 224u8, 233u8, 196u8, 182u8, 109u8, 69u8, 220u8, 46u8, 60u8, + 189u8, 125u8, 17u8, 28u8, 207u8, 63u8, 129u8, 56u8, 32u8, 239u8, 182u8, + 214u8, 237u8, 95u8, 228u8, 171u8, 209u8, 233u8, 205u8, 212u8, 147u8, + 176u8, + ] + { + let call = ContributeAll { index, signature }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + pub type Event = runtime_types::polkadot_runtime_common::crowdloan::pallet::Event; + pub mod events { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "Create a new crowdloaning campaign. `[fund_index]`"] + pub struct Created(pub runtime_types::polkadot_parachain::primitives::Id); + impl ::subxt::Event for Created { + const PALLET: &'static str = "Crowdloan"; + const EVENT: &'static str = "Created"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "Contributed to a crowd sale. `[who, fund_index, amount]`"] + pub struct Contributed( + pub ::subxt::sp_core::crypto::AccountId32, + pub runtime_types::polkadot_parachain::primitives::Id, + pub ::core::primitive::u128, + ); + impl ::subxt::Event for Contributed { + const PALLET: &'static str = "Crowdloan"; + const EVENT: &'static str = "Contributed"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "Withdrew full balance of a contributor. `[who, fund_index, amount]`"] + pub struct Withdrew( + pub ::subxt::sp_core::crypto::AccountId32, + pub runtime_types::polkadot_parachain::primitives::Id, + pub ::core::primitive::u128, + ); + impl ::subxt::Event for Withdrew { + const PALLET: &'static str = "Crowdloan"; + const EVENT: &'static str = "Withdrew"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "The loans in a fund have been partially dissolved, i.e. there are some left"] + #[doc = "over child keys that still need to be killed. `[fund_index]`"] + pub struct PartiallyRefunded(pub runtime_types::polkadot_parachain::primitives::Id); + impl ::subxt::Event for PartiallyRefunded { + const PALLET: &'static str = "Crowdloan"; + const EVENT: &'static str = "PartiallyRefunded"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "All loans in a fund have been refunded. `[fund_index]`"] + pub struct AllRefunded(pub runtime_types::polkadot_parachain::primitives::Id); + impl ::subxt::Event for AllRefunded { + const PALLET: &'static str = "Crowdloan"; + const EVENT: &'static str = "AllRefunded"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "Fund is dissolved. `[fund_index]`"] + pub struct Dissolved(pub runtime_types::polkadot_parachain::primitives::Id); + impl ::subxt::Event for Dissolved { + const PALLET: &'static str = "Crowdloan"; + const EVENT: &'static str = "Dissolved"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "The result of trying to submit a new bid to the Slots pallet."] + pub struct HandleBidResult( + pub runtime_types::polkadot_parachain::primitives::Id, + pub ::core::result::Result<(), runtime_types::sp_runtime::DispatchError>, + ); + impl ::subxt::Event for HandleBidResult { + const PALLET: &'static str = "Crowdloan"; + const EVENT: &'static str = "HandleBidResult"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "The configuration to a crowdloan has been edited. `[fund_index]`"] + pub struct Edited(pub runtime_types::polkadot_parachain::primitives::Id); + impl ::subxt::Event for Edited { + const PALLET: &'static str = "Crowdloan"; + const EVENT: &'static str = "Edited"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "A memo has been updated. `[who, fund_index, memo]`"] + pub struct MemoUpdated( + pub ::subxt::sp_core::crypto::AccountId32, + pub runtime_types::polkadot_parachain::primitives::Id, + pub ::std::vec::Vec<::core::primitive::u8>, + ); + impl ::subxt::Event for MemoUpdated { + const PALLET: &'static str = "Crowdloan"; + const EVENT: &'static str = "MemoUpdated"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "A parachain has been moved to `NewRaise`"] + pub struct AddedToNewRaise(pub runtime_types::polkadot_parachain::primitives::Id); + impl ::subxt::Event for AddedToNewRaise { + const PALLET: &'static str = "Crowdloan"; + const EVENT: &'static str = "AddedToNewRaise"; + } + } + pub mod storage { + use super::runtime_types; + pub struct Funds<'a>(pub &'a runtime_types::polkadot_parachain::primitives::Id); + impl ::subxt::StorageEntry for Funds<'_> { + const PALLET: &'static str = "Crowdloan"; + const STORAGE: &'static str = "Funds"; + type Value = runtime_types::polkadot_runtime_common::crowdloan::FundInfo< + ::subxt::sp_core::crypto::AccountId32, + ::core::primitive::u128, + ::core::primitive::u32, + ::core::primitive::u32, + >; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Twox64Concat, + )]) + } + } + pub struct NewRaise; + impl ::subxt::StorageEntry for NewRaise { + const PALLET: &'static str = "Crowdloan"; + const STORAGE: &'static str = "NewRaise"; + type Value = ::std::vec::Vec; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct EndingsCount; + impl ::subxt::StorageEntry for EndingsCount { + const PALLET: &'static str = "Crowdloan"; + const STORAGE: &'static str = "EndingsCount"; + type Value = ::core::primitive::u32; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct NextFundIndex; + impl ::subxt::StorageEntry for NextFundIndex { + const PALLET: &'static str = "Crowdloan"; + const STORAGE: &'static str = "NextFundIndex"; + type Value = ::core::primitive::u32; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct StorageApi<'a, T: ::subxt::Config> { + client: &'a ::subxt::Client, + } + impl<'a, T: ::subxt::Config> StorageApi<'a, T> { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { client } + } + #[doc = " Info on all of the funds."] + pub async fn funds( + &self, + _0: &runtime_types::polkadot_parachain::primitives::Id, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option< + runtime_types::polkadot_runtime_common::crowdloan::FundInfo< + ::subxt::sp_core::crypto::AccountId32, + ::core::primitive::u128, + ::core::primitive::u32, + ::core::primitive::u32, + >, + >, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 13u8, 211u8, 240u8, 138u8, 231u8, 78u8, 123u8, 252u8, 210u8, 27u8, + 202u8, 82u8, 157u8, 118u8, 209u8, 218u8, 160u8, 183u8, 225u8, 77u8, + 230u8, 131u8, 180u8, 238u8, 83u8, 202u8, 29u8, 106u8, 114u8, 223u8, + 250u8, 3u8, + ] + { + let entry = Funds(_0); + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Info on all of the funds."] + pub async fn funds_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result<::subxt::KeyIter<'a, T, Funds<'a>>, ::subxt::BasicError> + { + if self.client.metadata().storage_hash::()? + == [ + 13u8, 211u8, 240u8, 138u8, 231u8, 78u8, 123u8, 252u8, 210u8, 27u8, + 202u8, 82u8, 157u8, 118u8, 209u8, 218u8, 160u8, 183u8, 225u8, 77u8, + 230u8, 131u8, 180u8, 238u8, 83u8, 202u8, 29u8, 106u8, 114u8, 223u8, + 250u8, 3u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The funds that have had additional contributions during the last block. This is used"] + #[doc = " in order to determine which funds should submit new or updated bids."] + pub async fn new_raise( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::std::vec::Vec, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 243u8, 204u8, 121u8, 230u8, 151u8, 223u8, 248u8, 199u8, 68u8, 209u8, + 226u8, 159u8, 217u8, 105u8, 39u8, 127u8, 162u8, 133u8, 56u8, 1u8, 70u8, + 7u8, 176u8, 56u8, 81u8, 49u8, 155u8, 143u8, 100u8, 153u8, 59u8, 86u8, + ] + { + let entry = NewRaise; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The number of auctions that have entered into their ending period so far."] + pub async fn endings_count( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> + { + if self.client.metadata().storage_hash::()? + == [ + 12u8, 159u8, 166u8, 75u8, 192u8, 33u8, 21u8, 244u8, 149u8, 200u8, 49u8, + 54u8, 191u8, 174u8, 202u8, 86u8, 76u8, 115u8, 189u8, 35u8, 192u8, + 175u8, 156u8, 188u8, 41u8, 23u8, 92u8, 36u8, 141u8, 235u8, 248u8, + 143u8, + ] + { + let entry = EndingsCount; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Tracker for the next available fund index"] + pub async fn next_fund_index( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> + { + if self.client.metadata().storage_hash::()? + == [ + 1u8, 215u8, 164u8, 194u8, 231u8, 34u8, 207u8, 19u8, 149u8, 187u8, 3u8, + 176u8, 194u8, 240u8, 180u8, 169u8, 214u8, 194u8, 202u8, 240u8, 209u8, + 6u8, 244u8, 46u8, 54u8, 142u8, 61u8, 220u8, 240u8, 96u8, 10u8, 168u8, + ] + { + let entry = NextFundIndex; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + pub mod constants { + use super::runtime_types; + pub struct ConstantsApi<'a, T: ::subxt::Config> { + client: &'a ::subxt::Client, + } + impl<'a, T: ::subxt::Config> ConstantsApi<'a, T> { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { client } + } + #[doc = " `PalletId` for the crowdloan pallet. An appropriate value could be `PalletId(*b\"py/cfund\")`"] + pub fn pallet_id( + &self, + ) -> ::core::result::Result< + runtime_types::frame_support::PalletId, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .constant_hash("Crowdloan", "PalletId")? + == [ + 190u8, 62u8, 112u8, 88u8, 48u8, 222u8, 234u8, 76u8, 230u8, 81u8, 205u8, + 113u8, 202u8, 11u8, 184u8, 229u8, 189u8, 124u8, 132u8, 255u8, 46u8, + 202u8, 80u8, 86u8, 182u8, 212u8, 149u8, 200u8, 57u8, 215u8, 195u8, + 132u8, + ] + { + let pallet = self.client.metadata().pallet("Crowdloan")?; + let constant = pallet.constant("PalletId")?; + let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; + Ok(value) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The minimum amount that may be contributed into a crowdloan. Should almost certainly be at"] + #[doc = " least `ExistentialDeposit`."] + pub fn min_contribution( + &self, + ) -> ::core::result::Result<::core::primitive::u128, ::subxt::BasicError> + { + if self + .client + .metadata() + .constant_hash("Crowdloan", "MinContribution")? + == [ + 202u8, 28u8, 7u8, 249u8, 127u8, 100u8, 197u8, 70u8, 224u8, 205u8, 34u8, + 128u8, 198u8, 242u8, 54u8, 124u8, 230u8, 52u8, 142u8, 219u8, 30u8, + 229u8, 65u8, 136u8, 5u8, 244u8, 26u8, 9u8, 162u8, 58u8, 172u8, 141u8, + ] + { + let pallet = self.client.metadata().pallet("Crowdloan")?; + let constant = pallet.constant("MinContribution")?; + let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; + Ok(value) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Max number of storage keys to remove per extrinsic call."] + pub fn remove_keys_limit( + &self, + ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> + { + if self + .client + .metadata() + .constant_hash("Crowdloan", "RemoveKeysLimit")? + == [ + 199u8, 136u8, 0u8, 136u8, 48u8, 93u8, 45u8, 100u8, 156u8, 106u8, 111u8, + 137u8, 126u8, 251u8, 185u8, 76u8, 37u8, 112u8, 241u8, 98u8, 237u8, 6u8, + 157u8, 204u8, 211u8, 246u8, 183u8, 101u8, 3u8, 214u8, 44u8, 135u8, + ] + { + let pallet = self.client.metadata().pallet("Crowdloan")?; + let constant = pallet.constant("RemoveKeysLimit")?; + let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; + Ok(value) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + } + pub mod slots { + use super::root_mod; + use super::runtime_types; + pub mod calls { + use super::root_mod; + use super::runtime_types; + type DispatchError = runtime_types::sp_runtime::DispatchError; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct ForceLease { + pub para: runtime_types::polkadot_parachain::primitives::Id, + pub leaser: ::subxt::sp_core::crypto::AccountId32, + pub amount: ::core::primitive::u128, + pub period_begin: ::core::primitive::u32, + pub period_count: ::core::primitive::u32, + } + impl ::subxt::Call for ForceLease { + const PALLET: &'static str = "Slots"; + const FUNCTION: &'static str = "force_lease"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct ClearAllLeases { + pub para: runtime_types::polkadot_parachain::primitives::Id, + } + impl ::subxt::Call for ClearAllLeases { + const PALLET: &'static str = "Slots"; + const FUNCTION: &'static str = "clear_all_leases"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct TriggerOnboard { + pub para: runtime_types::polkadot_parachain::primitives::Id, + } + impl ::subxt::Call for TriggerOnboard { + const PALLET: &'static str = "Slots"; + const FUNCTION: &'static str = "trigger_onboard"; + } + pub struct TransactionApi<'a, T: ::subxt::Config, X> { + client: &'a ::subxt::Client, + marker: ::core::marker::PhantomData, + } + impl<'a, T, X> TransactionApi<'a, T, X> + where + T: ::subxt::Config, + X: ::subxt::extrinsic::ExtrinsicParams, + { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { + client, + marker: ::core::marker::PhantomData, + } + } + #[doc = "Just a connect into the `lease_out` call, in case Root wants to force some lease to happen"] + #[doc = "independently of any other on-chain mechanism to use it."] + #[doc = ""] + #[doc = "The dispatch origin for this call must match `T::ForceOrigin`."] + pub fn force_lease( + &self, + para: runtime_types::polkadot_parachain::primitives::Id, + leaser: ::subxt::sp_core::crypto::AccountId32, + amount: ::core::primitive::u128, + period_begin: ::core::primitive::u32, + period_count: ::core::primitive::u32, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + ForceLease, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 110u8, 205u8, 106u8, 226u8, 3u8, 177u8, 198u8, 116u8, 52u8, 161u8, + 90u8, 240u8, 43u8, 160u8, 144u8, 63u8, 97u8, 231u8, 232u8, 176u8, 92u8, + 253u8, 16u8, 243u8, 187u8, 94u8, 20u8, 114u8, 23u8, 46u8, 231u8, 249u8, + ] + { + let call = ForceLease { + para, + leaser, + amount, + period_begin, + period_count, + }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Clear all leases for a Para Id, refunding any deposits back to the original owners."] + #[doc = ""] + #[doc = "The dispatch origin for this call must match `T::ForceOrigin`."] + pub fn clear_all_leases( + &self, + para: runtime_types::polkadot_parachain::primitives::Id, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + ClearAllLeases, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 101u8, 225u8, 10u8, 139u8, 34u8, 12u8, 48u8, 76u8, 97u8, 178u8, 5u8, + 110u8, 19u8, 3u8, 237u8, 183u8, 54u8, 113u8, 7u8, 138u8, 180u8, 201u8, + 245u8, 151u8, 61u8, 40u8, 69u8, 31u8, 28u8, 172u8, 253u8, 227u8, + ] + { + let call = ClearAllLeases { para }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Try to onboard a parachain that has a lease for the current lease period."] + #[doc = ""] + #[doc = "This function can be useful if there was some state issue with a para that should"] + #[doc = "have onboarded, but was unable to. As long as they have a lease period, we can"] + #[doc = "let them onboard from here."] + #[doc = ""] + #[doc = "Origin must be signed, but can be called by anyone."] + pub fn trigger_onboard( + &self, + para: runtime_types::polkadot_parachain::primitives::Id, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + TriggerOnboard, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 85u8, 246u8, 247u8, 252u8, 46u8, 143u8, 200u8, 102u8, 105u8, 51u8, + 148u8, 164u8, 27u8, 25u8, 139u8, 167u8, 150u8, 129u8, 131u8, 187u8, + 153u8, 6u8, 169u8, 153u8, 192u8, 116u8, 130u8, 12u8, 22u8, 199u8, 52u8, + 8u8, + ] + { + let call = TriggerOnboard { para }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + pub type Event = runtime_types::polkadot_runtime_common::slots::pallet::Event; + pub mod events { + use super::runtime_types; + #[derive( + :: subxt :: codec :: CompactAs, + :: subxt :: codec :: Decode, + :: subxt :: codec :: Encode, + Debug, + )] + #[doc = "A new `[lease_period]` is beginning."] + pub struct NewLeasePeriod(pub ::core::primitive::u32); + impl ::subxt::Event for NewLeasePeriod { + const PALLET: &'static str = "Slots"; + const EVENT: &'static str = "NewLeasePeriod"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "A para has won the right to a continuous set of lease periods as a parachain."] + #[doc = "First balance is any extra amount reserved on top of the para's existing deposit."] + #[doc = "Second balance is the total amount reserved."] + #[doc = "`[parachain_id, leaser, period_begin, period_count, extra_reserved, total_amount]`"] + pub struct Leased( + pub runtime_types::polkadot_parachain::primitives::Id, + pub ::subxt::sp_core::crypto::AccountId32, + pub ::core::primitive::u32, + pub ::core::primitive::u32, + pub ::core::primitive::u128, + pub ::core::primitive::u128, + ); + impl ::subxt::Event for Leased { + const PALLET: &'static str = "Slots"; + const EVENT: &'static str = "Leased"; + } + } + pub mod storage { + use super::runtime_types; + pub struct Leases<'a>(pub &'a runtime_types::polkadot_parachain::primitives::Id); + impl ::subxt::StorageEntry for Leases<'_> { + const PALLET: &'static str = "Slots"; + const STORAGE: &'static str = "Leases"; + type Value = ::std::vec::Vec< + ::core::option::Option<( + ::subxt::sp_core::crypto::AccountId32, + ::core::primitive::u128, + )>, + >; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Twox64Concat, + )]) + } + } + pub struct StorageApi<'a, T: ::subxt::Config> { + client: &'a ::subxt::Client, + } + impl<'a, T: ::subxt::Config> StorageApi<'a, T> { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { client } + } + #[doc = " Amounts held on deposit for each (possibly future) leased parachain."] + #[doc = ""] + #[doc = " The actual amount locked on its behalf by any account at any time is the maximum of the second values"] + #[doc = " of the items in this list whose first value is the account."] + #[doc = ""] + #[doc = " The first item in the list is the amount locked for the current Lease Period. Following"] + #[doc = " items are for the subsequent lease periods."] + #[doc = ""] + #[doc = " The default value (an empty list) implies that the parachain no longer exists (or never"] + #[doc = " existed) as far as this pallet is concerned."] + #[doc = ""] + #[doc = " If a parachain doesn't exist *yet* but is scheduled to exist in the future, then it"] + #[doc = " will be left-padded with one or more `None`s to denote the fact that nothing is held on"] + #[doc = " deposit for the non-existent chain currently, but is held at some point in the future."] + #[doc = ""] + #[doc = " It is illegal for a `None` value to trail in the list."] + pub async fn leases( + &self, + _0: &runtime_types::polkadot_parachain::primitives::Id, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::std::vec::Vec< + ::core::option::Option<( + ::subxt::sp_core::crypto::AccountId32, + ::core::primitive::u128, + )>, + >, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 83u8, 145u8, 119u8, 74u8, 166u8, 90u8, 141u8, 47u8, 125u8, 250u8, + 173u8, 63u8, 193u8, 78u8, 96u8, 119u8, 111u8, 126u8, 83u8, 83u8, 80u8, + 32u8, 43u8, 173u8, 123u8, 126u8, 132u8, 166u8, 252u8, 39u8, 18u8, 39u8, + ] + { + let entry = Leases(_0); + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Amounts held on deposit for each (possibly future) leased parachain."] + #[doc = ""] + #[doc = " The actual amount locked on its behalf by any account at any time is the maximum of the second values"] + #[doc = " of the items in this list whose first value is the account."] + #[doc = ""] + #[doc = " The first item in the list is the amount locked for the current Lease Period. Following"] + #[doc = " items are for the subsequent lease periods."] + #[doc = ""] + #[doc = " The default value (an empty list) implies that the parachain no longer exists (or never"] + #[doc = " existed) as far as this pallet is concerned."] + #[doc = ""] + #[doc = " If a parachain doesn't exist *yet* but is scheduled to exist in the future, then it"] + #[doc = " will be left-padded with one or more `None`s to denote the fact that nothing is held on"] + #[doc = " deposit for the non-existent chain currently, but is held at some point in the future."] + #[doc = ""] + #[doc = " It is illegal for a `None` value to trail in the list."] + pub async fn leases_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result<::subxt::KeyIter<'a, T, Leases<'a>>, ::subxt::BasicError> + { + if self.client.metadata().storage_hash::()? + == [ + 83u8, 145u8, 119u8, 74u8, 166u8, 90u8, 141u8, 47u8, 125u8, 250u8, + 173u8, 63u8, 193u8, 78u8, 96u8, 119u8, 111u8, 126u8, 83u8, 83u8, 80u8, + 32u8, 43u8, 173u8, 123u8, 126u8, 132u8, 166u8, 252u8, 39u8, 18u8, 39u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + pub mod constants { + use super::runtime_types; + pub struct ConstantsApi<'a, T: ::subxt::Config> { + client: &'a ::subxt::Client, + } + impl<'a, T: ::subxt::Config> ConstantsApi<'a, T> { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { client } + } + #[doc = " The number of blocks over which a single period lasts."] + pub fn lease_period( + &self, + ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> + { + if self + .client + .metadata() + .constant_hash("Slots", "LeasePeriod")? + == [ + 199u8, 146u8, 34u8, 83u8, 56u8, 115u8, 56u8, 28u8, 80u8, 78u8, 80u8, + 106u8, 53u8, 187u8, 228u8, 50u8, 192u8, 147u8, 102u8, 175u8, 145u8, + 103u8, 186u8, 172u8, 235u8, 174u8, 247u8, 121u8, 47u8, 193u8, 44u8, + 60u8, + ] + { + let pallet = self.client.metadata().pallet("Slots")?; + let constant = pallet.constant("LeasePeriod")?; + let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; + Ok(value) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The number of blocks to offset each lease period by."] + pub fn lease_offset( + &self, + ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> + { + if self + .client + .metadata() + .constant_hash("Slots", "LeaseOffset")? + == [ + 236u8, 50u8, 129u8, 231u8, 6u8, 181u8, 238u8, 115u8, 32u8, 62u8, 217u8, + 32u8, 198u8, 36u8, 84u8, 223u8, 239u8, 223u8, 53u8, 13u8, 21u8, 33u8, + 230u8, 17u8, 103u8, 37u8, 154u8, 230u8, 240u8, 143u8, 9u8, 179u8, + ] + { + let pallet = self.client.metadata().pallet("Slots")?; + let constant = pallet.constant("LeaseOffset")?; + let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; + Ok(value) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + } + pub mod paras_sudo_wrapper { + use super::root_mod; + use super::runtime_types; + pub mod calls { + use super::root_mod; + use super::runtime_types; + type DispatchError = runtime_types::sp_runtime::DispatchError; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct SudoScheduleParaInitialize { + pub id: runtime_types::polkadot_parachain::primitives::Id, + pub genesis: runtime_types::polkadot_runtime_parachains::paras::ParaGenesisArgs, + } + impl ::subxt::Call for SudoScheduleParaInitialize { + const PALLET: &'static str = "ParasSudoWrapper"; + const FUNCTION: &'static str = "sudo_schedule_para_initialize"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct SudoScheduleParaCleanup { + pub id: runtime_types::polkadot_parachain::primitives::Id, + } + impl ::subxt::Call for SudoScheduleParaCleanup { + const PALLET: &'static str = "ParasSudoWrapper"; + const FUNCTION: &'static str = "sudo_schedule_para_cleanup"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct SudoScheduleParathreadUpgrade { + pub id: runtime_types::polkadot_parachain::primitives::Id, + } + impl ::subxt::Call for SudoScheduleParathreadUpgrade { + const PALLET: &'static str = "ParasSudoWrapper"; + const FUNCTION: &'static str = "sudo_schedule_parathread_upgrade"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct SudoScheduleParachainDowngrade { + pub id: runtime_types::polkadot_parachain::primitives::Id, + } + impl ::subxt::Call for SudoScheduleParachainDowngrade { + const PALLET: &'static str = "ParasSudoWrapper"; + const FUNCTION: &'static str = "sudo_schedule_parachain_downgrade"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct SudoQueueDownwardXcm { + pub id: runtime_types::polkadot_parachain::primitives::Id, + pub xcm: ::std::boxed::Box, + } + impl ::subxt::Call for SudoQueueDownwardXcm { + const PALLET: &'static str = "ParasSudoWrapper"; + const FUNCTION: &'static str = "sudo_queue_downward_xcm"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct SudoEstablishHrmpChannel { + pub sender: runtime_types::polkadot_parachain::primitives::Id, + pub recipient: runtime_types::polkadot_parachain::primitives::Id, + pub max_capacity: ::core::primitive::u32, + pub max_message_size: ::core::primitive::u32, + } + impl ::subxt::Call for SudoEstablishHrmpChannel { + const PALLET: &'static str = "ParasSudoWrapper"; + const FUNCTION: &'static str = "sudo_establish_hrmp_channel"; + } + pub struct TransactionApi<'a, T: ::subxt::Config, X> { + client: &'a ::subxt::Client, + marker: ::core::marker::PhantomData, + } + impl<'a, T, X> TransactionApi<'a, T, X> + where + T: ::subxt::Config, + X: ::subxt::extrinsic::ExtrinsicParams, + { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { + client, + marker: ::core::marker::PhantomData, + } + } + #[doc = "Schedule a para to be initialized at the start of the next session."] + pub fn sudo_schedule_para_initialize( + &self, + id: runtime_types::polkadot_parachain::primitives::Id, + genesis: runtime_types::polkadot_runtime_parachains::paras::ParaGenesisArgs, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SudoScheduleParaInitialize, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .call_hash::()? + == [ + 86u8, 164u8, 77u8, 56u8, 96u8, 214u8, 248u8, 7u8, 89u8, 247u8, 201u8, + 39u8, 212u8, 110u8, 252u8, 13u8, 2u8, 157u8, 243u8, 243u8, 22u8, 0u8, + 105u8, 138u8, 14u8, 232u8, 50u8, 121u8, 110u8, 222u8, 86u8, 47u8, + ] + { + let call = SudoScheduleParaInitialize { id, genesis }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Schedule a para to be cleaned up at the start of the next session."] + pub fn sudo_schedule_para_cleanup( + &self, + id: runtime_types::polkadot_parachain::primitives::Id, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SudoScheduleParaCleanup, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .call_hash::()? + == [ + 108u8, 244u8, 138u8, 161u8, 12u8, 207u8, 245u8, 145u8, 139u8, 18u8, + 37u8, 156u8, 86u8, 114u8, 183u8, 19u8, 172u8, 209u8, 127u8, 255u8, + 217u8, 189u8, 24u8, 79u8, 93u8, 121u8, 9u8, 163u8, 84u8, 20u8, 212u8, + 222u8, + ] + { + let call = SudoScheduleParaCleanup { id }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Upgrade a parathread to a parachain"] + pub fn sudo_schedule_parathread_upgrade( + &self, + id: runtime_types::polkadot_parachain::primitives::Id, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SudoScheduleParathreadUpgrade, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .call_hash::()? + == [ + 169u8, 58u8, 222u8, 27u8, 223u8, 115u8, 47u8, 226u8, 148u8, 82u8, 2u8, + 86u8, 135u8, 202u8, 102u8, 191u8, 40u8, 221u8, 170u8, 13u8, 225u8, + 131u8, 121u8, 27u8, 165u8, 179u8, 175u8, 34u8, 209u8, 115u8, 93u8, + 85u8, + ] + { + let call = SudoScheduleParathreadUpgrade { id }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Downgrade a parachain to a parathread"] + pub fn sudo_schedule_parachain_downgrade( + &self, + id: runtime_types::polkadot_parachain::primitives::Id, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SudoScheduleParachainDowngrade, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .call_hash::()? + == [ + 183u8, 209u8, 11u8, 52u8, 110u8, 163u8, 61u8, 191u8, 87u8, 84u8, 179u8, + 101u8, 251u8, 145u8, 158u8, 249u8, 48u8, 229u8, 84u8, 247u8, 21u8, 4u8, + 181u8, 104u8, 224u8, 128u8, 126u8, 249u8, 146u8, 158u8, 233u8, 128u8, + ] + { + let call = SudoScheduleParachainDowngrade { id }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Send a downward XCM to the given para."] + #[doc = ""] + #[doc = "The given parachain should exist and the payload should not exceed the preconfigured size"] + #[doc = "`config.max_downward_message_size`."] + pub fn sudo_queue_downward_xcm( + &self, + id: runtime_types::polkadot_parachain::primitives::Id, + xcm: runtime_types::xcm::VersionedXcm, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SudoQueueDownwardXcm, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 81u8, 30u8, 40u8, 17u8, 248u8, 225u8, 213u8, 76u8, 11u8, 167u8, 196u8, + 12u8, 113u8, 152u8, 98u8, 196u8, 204u8, 166u8, 103u8, 199u8, 146u8, + 98u8, 73u8, 188u8, 128u8, 100u8, 77u8, 203u8, 103u8, 139u8, 105u8, + 50u8, + ] + { + let call = SudoQueueDownwardXcm { + id, + xcm: ::std::boxed::Box::new(xcm), + }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Forcefully establish a channel from the sender to the recipient."] + #[doc = ""] + #[doc = "This is equivalent to sending an `Hrmp::hrmp_init_open_channel` extrinsic followed by"] + #[doc = "`Hrmp::hrmp_accept_open_channel`."] + pub fn sudo_establish_hrmp_channel( + &self, + sender: runtime_types::polkadot_parachain::primitives::Id, + recipient: runtime_types::polkadot_parachain::primitives::Id, + max_capacity: ::core::primitive::u32, + max_message_size: ::core::primitive::u32, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SudoEstablishHrmpChannel, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .call_hash::()? + == [ + 37u8, 236u8, 235u8, 162u8, 207u8, 3u8, 97u8, 139u8, 72u8, 211u8, 203u8, + 78u8, 188u8, 159u8, 108u8, 13u8, 149u8, 224u8, 51u8, 96u8, 14u8, 60u8, + 124u8, 249u8, 48u8, 30u8, 6u8, 211u8, 205u8, 230u8, 252u8, 77u8, + ] + { + let call = SudoEstablishHrmpChannel { + sender, + recipient, + max_capacity, + max_message_size, + }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + } + pub mod assigned_slots { + use super::root_mod; + use super::runtime_types; + pub mod calls { + use super::root_mod; + use super::runtime_types; + type DispatchError = runtime_types::sp_runtime::DispatchError; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct AssignPermParachainSlot { + pub id: runtime_types::polkadot_parachain::primitives::Id, + } + impl ::subxt::Call for AssignPermParachainSlot { + const PALLET: &'static str = "AssignedSlots"; + const FUNCTION: &'static str = "assign_perm_parachain_slot"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct AssignTempParachainSlot { + pub id: runtime_types::polkadot_parachain::primitives::Id, + pub lease_period_start: + runtime_types::polkadot_runtime_common::assigned_slots::SlotLeasePeriodStart, + } + impl ::subxt::Call for AssignTempParachainSlot { + const PALLET: &'static str = "AssignedSlots"; + const FUNCTION: &'static str = "assign_temp_parachain_slot"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct UnassignParachainSlot { + pub id: runtime_types::polkadot_parachain::primitives::Id, + } + impl ::subxt::Call for UnassignParachainSlot { + const PALLET: &'static str = "AssignedSlots"; + const FUNCTION: &'static str = "unassign_parachain_slot"; + } + pub struct TransactionApi<'a, T: ::subxt::Config, X> { + client: &'a ::subxt::Client, + marker: ::core::marker::PhantomData, + } + impl<'a, T, X> TransactionApi<'a, T, X> + where + T: ::subxt::Config, + X: ::subxt::extrinsic::ExtrinsicParams, + { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { + client, + marker: ::core::marker::PhantomData, + } + } + #[doc = "Assign a permanent parachain slot and immediately create a lease for it."] + pub fn assign_perm_parachain_slot( + &self, + id: runtime_types::polkadot_parachain::primitives::Id, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + AssignPermParachainSlot, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .call_hash::()? + == [ + 182u8, 103u8, 59u8, 125u8, 140u8, 208u8, 37u8, 240u8, 39u8, 40u8, 34u8, + 213u8, 245u8, 19u8, 51u8, 202u8, 153u8, 174u8, 151u8, 229u8, 26u8, + 252u8, 91u8, 36u8, 67u8, 87u8, 249u8, 89u8, 149u8, 178u8, 87u8, 212u8, + ] + { + let call = AssignPermParachainSlot { id }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Assign a temporary parachain slot. The function tries to create a lease for it"] + #[doc = "immediately if `SlotLeasePeriodStart::Current` is specified, and if the number"] + #[doc = "of currently active temporary slots is below `MaxTemporarySlotPerLeasePeriod`."] + pub fn assign_temp_parachain_slot( + &self, + id: runtime_types::polkadot_parachain::primitives::Id, + lease_period_start : runtime_types :: polkadot_runtime_common :: assigned_slots :: SlotLeasePeriodStart, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + AssignTempParachainSlot, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .call_hash::()? + == [ + 166u8, 193u8, 161u8, 214u8, 110u8, 114u8, 94u8, 122u8, 247u8, 90u8, + 4u8, 153u8, 252u8, 215u8, 19u8, 80u8, 91u8, 82u8, 153u8, 101u8, 174u8, + 205u8, 41u8, 117u8, 144u8, 243u8, 206u8, 146u8, 170u8, 124u8, 53u8, + 109u8, + ] + { + let call = AssignTempParachainSlot { + id, + lease_period_start, + }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Unassign a permanent or temporary parachain slot"] + pub fn unassign_parachain_slot( + &self, + id: runtime_types::polkadot_parachain::primitives::Id, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + UnassignParachainSlot, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .call_hash::()? + == [ + 70u8, 155u8, 13u8, 223u8, 35u8, 4u8, 112u8, 133u8, 100u8, 136u8, 68u8, + 253u8, 52u8, 210u8, 70u8, 60u8, 13u8, 73u8, 39u8, 5u8, 163u8, 39u8, + 143u8, 187u8, 46u8, 54u8, 107u8, 160u8, 48u8, 227u8, 107u8, 106u8, + ] + { + let call = UnassignParachainSlot { id }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + pub type Event = runtime_types::polkadot_runtime_common::assigned_slots::pallet::Event; + pub mod events { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "A para was assigned a permanent parachain slot"] + pub struct PermanentSlotAssigned(pub runtime_types::polkadot_parachain::primitives::Id); + impl ::subxt::Event for PermanentSlotAssigned { + const PALLET: &'static str = "AssignedSlots"; + const EVENT: &'static str = "PermanentSlotAssigned"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "A para was assigned a temporary parachain slot"] + pub struct TemporarySlotAssigned(pub runtime_types::polkadot_parachain::primitives::Id); + impl ::subxt::Event for TemporarySlotAssigned { + const PALLET: &'static str = "AssignedSlots"; + const EVENT: &'static str = "TemporarySlotAssigned"; + } + } + pub mod storage { + use super::runtime_types; + pub struct PermanentSlots<'a>( + pub &'a runtime_types::polkadot_parachain::primitives::Id, + ); + impl ::subxt::StorageEntry for PermanentSlots<'_> { + const PALLET: &'static str = "AssignedSlots"; + const STORAGE: &'static str = "PermanentSlots"; + type Value = (::core::primitive::u32, ::core::primitive::u32); + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Twox64Concat, + )]) + } + } + pub struct PermanentSlotCount; + impl ::subxt::StorageEntry for PermanentSlotCount { + const PALLET: &'static str = "AssignedSlots"; + const STORAGE: &'static str = "PermanentSlotCount"; + type Value = ::core::primitive::u32; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct TemporarySlots<'a>( + pub &'a runtime_types::polkadot_parachain::primitives::Id, + ); + impl ::subxt::StorageEntry for TemporarySlots<'_> { + const PALLET: &'static str = "AssignedSlots"; + const STORAGE: &'static str = "TemporarySlots"; + type Value = + runtime_types::polkadot_runtime_common::assigned_slots::ParachainTemporarySlot< + ::subxt::sp_core::crypto::AccountId32, + ::core::primitive::u32, + >; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Twox64Concat, + )]) + } + } + pub struct TemporarySlotCount; + impl ::subxt::StorageEntry for TemporarySlotCount { + const PALLET: &'static str = "AssignedSlots"; + const STORAGE: &'static str = "TemporarySlotCount"; + type Value = ::core::primitive::u32; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct ActiveTemporarySlotCount; + impl ::subxt::StorageEntry for ActiveTemporarySlotCount { + const PALLET: &'static str = "AssignedSlots"; + const STORAGE: &'static str = "ActiveTemporarySlotCount"; + type Value = ::core::primitive::u32; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct StorageApi<'a, T: ::subxt::Config> { + client: &'a ::subxt::Client, + } + impl<'a, T: ::subxt::Config> StorageApi<'a, T> { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { client } + } + #[doc = " Assigned permanent slots, with their start lease period, and duration."] + pub async fn permanent_slots( + &self, + _0: &runtime_types::polkadot_parachain::primitives::Id, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option<(::core::primitive::u32, ::core::primitive::u32)>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 128u8, 156u8, 45u8, 33u8, 173u8, 102u8, 61u8, 221u8, 125u8, 205u8, + 152u8, 190u8, 12u8, 209u8, 203u8, 24u8, 208u8, 50u8, 234u8, 124u8, + 172u8, 20u8, 20u8, 196u8, 232u8, 177u8, 117u8, 82u8, 116u8, 151u8, + 199u8, 204u8, + ] + { + let entry = PermanentSlots(_0); + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Assigned permanent slots, with their start lease period, and duration."] + pub async fn permanent_slots_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::subxt::KeyIter<'a, T, PermanentSlots<'a>>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 128u8, 156u8, 45u8, 33u8, 173u8, 102u8, 61u8, 221u8, 125u8, 205u8, + 152u8, 190u8, 12u8, 209u8, 203u8, 24u8, 208u8, 50u8, 234u8, 124u8, + 172u8, 20u8, 20u8, 196u8, 232u8, 177u8, 117u8, 82u8, 116u8, 151u8, + 199u8, 204u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Number of assigned (and active) permanent slots."] + pub async fn permanent_slot_count( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> + { + if self + .client + .metadata() + .storage_hash::()? + == [ + 186u8, 224u8, 144u8, 167u8, 64u8, 193u8, 68u8, 25u8, 146u8, 86u8, + 109u8, 81u8, 100u8, 197u8, 25u8, 4u8, 27u8, 131u8, 162u8, 7u8, 148u8, + 198u8, 162u8, 100u8, 197u8, 86u8, 37u8, 43u8, 240u8, 25u8, 18u8, 66u8, + ] + { + let entry = PermanentSlotCount; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Assigned temporary slots."] pub async fn temporary_slots (& self , _0 : & runtime_types :: polkadot_parachain :: primitives :: Id , block_hash : :: core :: option :: Option < T :: Hash > ,) -> :: core :: result :: Result < :: core :: option :: Option < runtime_types :: polkadot_runtime_common :: assigned_slots :: ParachainTemporarySlot < :: subxt :: sp_core :: crypto :: AccountId32 , :: core :: primitive :: u32 > > , :: subxt :: BasicError >{ + if self.client.metadata().storage_hash::()? + == [ + 163u8, 37u8, 72u8, 142u8, 172u8, 117u8, 146u8, 111u8, 10u8, 100u8, + 92u8, 223u8, 253u8, 250u8, 19u8, 187u8, 227u8, 222u8, 91u8, 73u8, + 156u8, 158u8, 63u8, 183u8, 69u8, 16u8, 225u8, 58u8, 85u8, 89u8, 15u8, + 15u8, + ] + { + let entry = TemporarySlots(_0); + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Assigned temporary slots."] + pub async fn temporary_slots_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::subxt::KeyIter<'a, T, TemporarySlots<'a>>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 163u8, 37u8, 72u8, 142u8, 172u8, 117u8, 146u8, 111u8, 10u8, 100u8, + 92u8, 223u8, 253u8, 250u8, 19u8, 187u8, 227u8, 222u8, 91u8, 73u8, + 156u8, 158u8, 63u8, 183u8, 69u8, 16u8, 225u8, 58u8, 85u8, 89u8, 15u8, + 15u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Number of assigned temporary slots."] + pub async fn temporary_slot_count( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> + { + if self + .client + .metadata() + .storage_hash::()? + == [ + 19u8, 243u8, 53u8, 131u8, 195u8, 143u8, 31u8, 224u8, 182u8, 69u8, + 209u8, 123u8, 82u8, 155u8, 96u8, 242u8, 109u8, 6u8, 27u8, 193u8, 251u8, + 45u8, 204u8, 10u8, 43u8, 185u8, 152u8, 181u8, 35u8, 183u8, 235u8, + 204u8, + ] + { + let entry = TemporarySlotCount; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Number of active temporary slots in current slot lease period."] + pub async fn active_temporary_slot_count( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> + { + if self + .client + .metadata() + .storage_hash::()? + == [ + 72u8, 42u8, 13u8, 42u8, 195u8, 143u8, 174u8, 137u8, 110u8, 144u8, + 190u8, 117u8, 102u8, 91u8, 66u8, 131u8, 69u8, 139u8, 156u8, 149u8, + 99u8, 177u8, 118u8, 72u8, 168u8, 191u8, 198u8, 135u8, 72u8, 192u8, + 130u8, 139u8, + ] + { + let entry = ActiveTemporarySlotCount; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + pub mod constants { + use super::runtime_types; + pub struct ConstantsApi<'a, T: ::subxt::Config> { + client: &'a ::subxt::Client, + } + impl<'a, T: ::subxt::Config> ConstantsApi<'a, T> { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { client } + } + #[doc = " The number of lease periods a permanent parachain slot lasts."] + pub fn permanent_slot_lease_period_length( + &self, + ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> + { + if self + .client + .metadata() + .constant_hash("AssignedSlots", "PermanentSlotLeasePeriodLength")? + == [ + 197u8, 245u8, 45u8, 120u8, 116u8, 188u8, 189u8, 76u8, 192u8, 116u8, + 209u8, 236u8, 222u8, 167u8, 208u8, 214u8, 153u8, 142u8, 201u8, 25u8, + 34u8, 104u8, 166u8, 229u8, 62u8, 169u8, 76u8, 118u8, 72u8, 170u8, + 202u8, 37u8, + ] + { + let pallet = self.client.metadata().pallet("AssignedSlots")?; + let constant = pallet.constant("PermanentSlotLeasePeriodLength")?; + let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; + Ok(value) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The number of lease periods a temporary parachain slot lasts."] + pub fn temporary_slot_lease_period_length( + &self, + ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> + { + if self + .client + .metadata() + .constant_hash("AssignedSlots", "TemporarySlotLeasePeriodLength")? + == [ + 160u8, 81u8, 143u8, 128u8, 192u8, 146u8, 202u8, 116u8, 139u8, 129u8, + 88u8, 164u8, 184u8, 60u8, 5u8, 56u8, 73u8, 212u8, 151u8, 207u8, 103u8, + 234u8, 152u8, 57u8, 230u8, 97u8, 135u8, 234u8, 34u8, 207u8, 116u8, + 164u8, + ] + { + let pallet = self.client.metadata().pallet("AssignedSlots")?; + let constant = pallet.constant("TemporarySlotLeasePeriodLength")?; + let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; + Ok(value) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The max number of permanent slots that can be assigned."] + pub fn max_permanent_slots( + &self, + ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> + { + if self + .client + .metadata() + .constant_hash("AssignedSlots", "MaxPermanentSlots")? + == [ + 75u8, 219u8, 223u8, 108u8, 146u8, 170u8, 51u8, 167u8, 148u8, 224u8, + 43u8, 171u8, 119u8, 109u8, 29u8, 18u8, 235u8, 142u8, 46u8, 172u8, 33u8, + 164u8, 74u8, 200u8, 206u8, 184u8, 170u8, 212u8, 233u8, 202u8, 191u8, + 47u8, + ] + { + let pallet = self.client.metadata().pallet("AssignedSlots")?; + let constant = pallet.constant("MaxPermanentSlots")?; + let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; + Ok(value) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The max number of temporary slots that can be assigned."] + pub fn max_temporary_slots( + &self, + ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> + { + if self + .client + .metadata() + .constant_hash("AssignedSlots", "MaxTemporarySlots")? + == [ + 165u8, 58u8, 243u8, 192u8, 228u8, 193u8, 249u8, 135u8, 28u8, 120u8, + 142u8, 150u8, 118u8, 250u8, 26u8, 107u8, 167u8, 219u8, 137u8, 231u8, + 8u8, 189u8, 114u8, 249u8, 86u8, 90u8, 224u8, 234u8, 229u8, 19u8, 65u8, + 211u8, + ] + { + let pallet = self.client.metadata().pallet("AssignedSlots")?; + let constant = pallet.constant("MaxTemporarySlots")?; + let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; + Ok(value) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The max number of temporary slots to be scheduled per lease periods."] + pub fn max_temporary_slot_per_lease_period( + &self, + ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> + { + if self + .client + .metadata() + .constant_hash("AssignedSlots", "MaxTemporarySlotPerLeasePeriod")? + == [ + 134u8, 16u8, 150u8, 86u8, 147u8, 116u8, 41u8, 63u8, 214u8, 209u8, 81u8, + 194u8, 90u8, 90u8, 12u8, 174u8, 120u8, 81u8, 50u8, 131u8, 35u8, 180u8, + 81u8, 105u8, 237u8, 186u8, 234u8, 114u8, 88u8, 106u8, 64u8, 254u8, + ] + { + let pallet = self.client.metadata().pallet("AssignedSlots")?; + let constant = pallet.constant("MaxTemporarySlotPerLeasePeriod")?; + let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; + Ok(value) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + } + pub mod sudo { + use super::root_mod; + use super::runtime_types; + pub mod calls { + use super::root_mod; + use super::runtime_types; + type DispatchError = runtime_types::sp_runtime::DispatchError; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct Sudo { + pub call: ::std::boxed::Box, + } + impl ::subxt::Call for Sudo { + const PALLET: &'static str = "Sudo"; + const FUNCTION: &'static str = "sudo"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct SudoUncheckedWeight { + pub call: ::std::boxed::Box, + pub weight: ::core::primitive::u64, + } + impl ::subxt::Call for SudoUncheckedWeight { + const PALLET: &'static str = "Sudo"; + const FUNCTION: &'static str = "sudo_unchecked_weight"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct SetKey { + pub new: + ::subxt::sp_runtime::MultiAddress<::subxt::sp_core::crypto::AccountId32, ()>, + } + impl ::subxt::Call for SetKey { + const PALLET: &'static str = "Sudo"; + const FUNCTION: &'static str = "set_key"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct SudoAs { + pub who: + ::subxt::sp_runtime::MultiAddress<::subxt::sp_core::crypto::AccountId32, ()>, + pub call: ::std::boxed::Box, + } + impl ::subxt::Call for SudoAs { + const PALLET: &'static str = "Sudo"; + const FUNCTION: &'static str = "sudo_as"; + } + pub struct TransactionApi<'a, T: ::subxt::Config, X> { + client: &'a ::subxt::Client, + marker: ::core::marker::PhantomData, + } + impl<'a, T, X> TransactionApi<'a, T, X> + where + T: ::subxt::Config, + X: ::subxt::extrinsic::ExtrinsicParams, + { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { + client, + marker: ::core::marker::PhantomData, + } + } + #[doc = "Authenticates the sudo key and dispatches a function call with `Root` origin."] + #[doc = ""] + #[doc = "The dispatch origin for this call must be _Signed_."] + #[doc = ""] + #[doc = "# "] + #[doc = "- O(1)."] + #[doc = "- Limited storage reads."] + #[doc = "- One DB write (event)."] + #[doc = "- Weight of derivative `call` execution + 10,000."] + #[doc = "# "] + pub fn sudo( + &self, + call: runtime_types::rococo_runtime::Call, + ) -> Result< + ::subxt::SubmittableExtrinsic<'a, T, X, Sudo, DispatchError, root_mod::Event>, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 144u8, 36u8, 5u8, 240u8, 96u8, 128u8, 55u8, 40u8, 57u8, 205u8, 184u8, + 6u8, 181u8, 43u8, 165u8, 1u8, 148u8, 140u8, 80u8, 207u8, 210u8, 224u8, + 232u8, 252u8, 193u8, 43u8, 36u8, 203u8, 108u8, 150u8, 184u8, 199u8, + ] + { + let call = Sudo { + call: ::std::boxed::Box::new(call), + }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Authenticates the sudo key and dispatches a function call with `Root` origin."] + #[doc = "This function does not check the weight of the call, and instead allows the"] + #[doc = "Sudo user to specify the weight of the call."] + #[doc = ""] + #[doc = "The dispatch origin for this call must be _Signed_."] + #[doc = ""] + #[doc = "# "] + #[doc = "- O(1)."] + #[doc = "- The weight of this call is defined by the caller."] + #[doc = "# "] + pub fn sudo_unchecked_weight( + &self, + call: runtime_types::rococo_runtime::Call, + weight: ::core::primitive::u64, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SudoUncheckedWeight, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 55u8, 233u8, 183u8, 246u8, 146u8, 48u8, 56u8, 202u8, 43u8, 90u8, 189u8, + 171u8, 47u8, 55u8, 7u8, 163u8, 136u8, 155u8, 23u8, 78u8, 58u8, 24u8, + 189u8, 2u8, 204u8, 15u8, 115u8, 222u8, 76u8, 94u8, 244u8, 43u8, + ] + { + let call = SudoUncheckedWeight { + call: ::std::boxed::Box::new(call), + weight, + }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Authenticates the current sudo key and sets the given AccountId (`new`) as the new sudo"] + #[doc = "key."] + #[doc = ""] + #[doc = "The dispatch origin for this call must be _Signed_."] + #[doc = ""] + #[doc = "# "] + #[doc = "- O(1)."] + #[doc = "- Limited storage reads."] + #[doc = "- One DB change."] + #[doc = "# "] + pub fn set_key( + &self, + new: ::subxt::sp_runtime::MultiAddress< + ::subxt::sp_core::crypto::AccountId32, + (), + >, + ) -> Result< + ::subxt::SubmittableExtrinsic<'a, T, X, SetKey, DispatchError, root_mod::Event>, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 77u8, 253u8, 211u8, 157u8, 74u8, 92u8, 1u8, 102u8, 178u8, 103u8, 126u8, + 56u8, 156u8, 105u8, 45u8, 44u8, 64u8, 154u8, 163u8, 102u8, 93u8, 93u8, + 212u8, 5u8, 148u8, 184u8, 22u8, 135u8, 110u8, 102u8, 44u8, 172u8, + ] + { + let call = SetKey { new }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Authenticates the sudo key and dispatches a function call with `Signed` origin from"] + #[doc = "a given account."] + #[doc = ""] + #[doc = "The dispatch origin for this call must be _Signed_."] + #[doc = ""] + #[doc = "# "] + #[doc = "- O(1)."] + #[doc = "- Limited storage reads."] + #[doc = "- One DB write (event)."] + #[doc = "- Weight of derivative `call` execution + 10,000."] + #[doc = "# "] + pub fn sudo_as( + &self, + who: ::subxt::sp_runtime::MultiAddress< + ::subxt::sp_core::crypto::AccountId32, + (), + >, + call: runtime_types::rococo_runtime::Call, + ) -> Result< + ::subxt::SubmittableExtrinsic<'a, T, X, SudoAs, DispatchError, root_mod::Event>, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 119u8, 211u8, 130u8, 52u8, 139u8, 44u8, 67u8, 44u8, 219u8, 122u8, + 101u8, 134u8, 13u8, 199u8, 98u8, 27u8, 145u8, 98u8, 213u8, 5u8, 225u8, + 94u8, 177u8, 56u8, 117u8, 242u8, 206u8, 119u8, 76u8, 207u8, 210u8, + 19u8, + ] + { + let call = SudoAs { + who, + call: ::std::boxed::Box::new(call), + }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + pub type Event = runtime_types::pallet_sudo::pallet::Event; + pub mod events { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "A sudo just took place. \\[result\\]"] + pub struct Sudid { + pub sudo_result: + ::core::result::Result<(), runtime_types::sp_runtime::DispatchError>, + } + impl ::subxt::Event for Sudid { + const PALLET: &'static str = "Sudo"; + const EVENT: &'static str = "Sudid"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "The \\[sudoer\\] just switched identity; the old key is supplied if one existed."] + pub struct KeyChanged { + pub old_sudoer: ::core::option::Option<::subxt::sp_core::crypto::AccountId32>, + } + impl ::subxt::Event for KeyChanged { + const PALLET: &'static str = "Sudo"; + const EVENT: &'static str = "KeyChanged"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "A sudo just took place. \\[result\\]"] + pub struct SudoAsDone { + pub sudo_result: + ::core::result::Result<(), runtime_types::sp_runtime::DispatchError>, + } + impl ::subxt::Event for SudoAsDone { + const PALLET: &'static str = "Sudo"; + const EVENT: &'static str = "SudoAsDone"; + } + } + pub mod storage { + use super::runtime_types; + pub struct Key; + impl ::subxt::StorageEntry for Key { + const PALLET: &'static str = "Sudo"; + const STORAGE: &'static str = "Key"; + type Value = ::subxt::sp_core::crypto::AccountId32; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct StorageApi<'a, T: ::subxt::Config> { + client: &'a ::subxt::Client, + } + impl<'a, T: ::subxt::Config> StorageApi<'a, T> { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { client } + } + #[doc = " The `AccountId` of the sudo key."] + pub async fn key( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option<::subxt::sp_core::crypto::AccountId32>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 222u8, 90u8, 158u8, 233u8, 184u8, 23u8, 141u8, 135u8, 81u8, 187u8, + 47u8, 100u8, 30u8, 81u8, 239u8, 197u8, 249u8, 253u8, 73u8, 207u8, + 161u8, 141u8, 174u8, 59u8, 74u8, 181u8, 10u8, 90u8, 22u8, 109u8, 62u8, + 27u8, + ] + { + let entry = Key; + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + } + pub mod mmr { + use super::root_mod; + use super::runtime_types; + pub mod storage { + use super::runtime_types; + pub struct RootHash; + impl ::subxt::StorageEntry for RootHash { + const PALLET: &'static str = "Mmr"; + const STORAGE: &'static str = "RootHash"; + type Value = ::subxt::sp_core::H256; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct NumberOfLeaves; + impl ::subxt::StorageEntry for NumberOfLeaves { + const PALLET: &'static str = "Mmr"; + const STORAGE: &'static str = "NumberOfLeaves"; + type Value = ::core::primitive::u64; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct Nodes<'a>(pub &'a ::core::primitive::u64); + impl ::subxt::StorageEntry for Nodes<'_> { + const PALLET: &'static str = "Mmr"; + const STORAGE: &'static str = "Nodes"; + type Value = ::std::vec::Vec<::core::primitive::u8>; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Identity, + )]) + } + } + pub struct Peaks<'a>(pub &'a ::core::primitive::u64); + impl ::subxt::StorageEntry for Peaks<'_> { + const PALLET: &'static str = "Mmr"; + const STORAGE: &'static str = "Peaks"; + type Value = ::subxt::sp_core::H256; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Identity, + )]) + } + } + pub struct StorageApi<'a, T: ::subxt::Config> { + client: &'a ::subxt::Client, + } + impl<'a, T: ::subxt::Config> StorageApi<'a, T> { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { client } + } + #[doc = " Latest MMR Root hash."] + pub async fn root_hash( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result<::subxt::sp_core::H256, ::subxt::BasicError> + { + if self.client.metadata().storage_hash::()? + == [ + 235u8, 26u8, 148u8, 114u8, 90u8, 241u8, 74u8, 26u8, 120u8, 199u8, + 205u8, 157u8, 22u8, 104u8, 182u8, 167u8, 93u8, 254u8, 95u8, 143u8, + 67u8, 0u8, 183u8, 46u8, 118u8, 61u8, 55u8, 31u8, 76u8, 232u8, 23u8, + 153u8, + ] + { + let entry = RootHash; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Current size of the MMR (number of leaves)."] + pub async fn number_of_leaves( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result<::core::primitive::u64, ::subxt::BasicError> + { + if self.client.metadata().storage_hash::()? + == [ + 138u8, 124u8, 23u8, 186u8, 255u8, 231u8, 187u8, 122u8, 213u8, 160u8, + 29u8, 24u8, 88u8, 98u8, 171u8, 36u8, 195u8, 216u8, 27u8, 190u8, 192u8, + 152u8, 8u8, 13u8, 210u8, 232u8, 45u8, 184u8, 240u8, 255u8, 156u8, + 204u8, + ] + { + let entry = NumberOfLeaves; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " All known nodes & leaves in the MMR, just until offchain db is fork aware"] + pub async fn nodes( + &self, + _0: &::core::primitive::u64, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option<::std::vec::Vec<::core::primitive::u8>>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 113u8, 225u8, 71u8, 185u8, 124u8, 250u8, 5u8, 111u8, 46u8, 137u8, 40u8, + 37u8, 190u8, 232u8, 247u8, 194u8, 199u8, 28u8, 48u8, 224u8, 131u8, 6u8, + 213u8, 79u8, 238u8, 33u8, 199u8, 124u8, 238u8, 237u8, 247u8, 226u8, + ] + { + let entry = Nodes(_0); + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " All known nodes & leaves in the MMR, just until offchain db is fork aware"] + pub async fn nodes_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result<::subxt::KeyIter<'a, T, Nodes<'a>>, ::subxt::BasicError> + { + if self.client.metadata().storage_hash::()? + == [ + 113u8, 225u8, 71u8, 185u8, 124u8, 250u8, 5u8, 111u8, 46u8, 137u8, 40u8, + 37u8, 190u8, 232u8, 247u8, 194u8, 199u8, 28u8, 48u8, 224u8, 131u8, 6u8, + 213u8, 79u8, 238u8, 33u8, 199u8, 124u8, 238u8, 237u8, 247u8, 226u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Hashes of the nodes in the MMR."] + #[doc = ""] + #[doc = " Note this collection only contains MMR peaks, the inner nodes (and leaves)"] + #[doc = " are pruned and only stored in the Offchain DB."] + pub async fn peaks( + &self, + _0: &::core::primitive::u64, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option<::subxt::sp_core::H256>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 12u8, 106u8, 223u8, 202u8, 71u8, 155u8, 228u8, 220u8, 197u8, 143u8, + 133u8, 165u8, 62u8, 172u8, 183u8, 222u8, 58u8, 225u8, 24u8, 57u8, + 144u8, 206u8, 66u8, 204u8, 16u8, 31u8, 139u8, 19u8, 175u8, 235u8, + 204u8, 135u8, + ] + { + let entry = Peaks(_0); + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Hashes of the nodes in the MMR."] + #[doc = ""] + #[doc = " Note this collection only contains MMR peaks, the inner nodes (and leaves)"] + #[doc = " are pruned and only stored in the Offchain DB."] + pub async fn peaks_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result<::subxt::KeyIter<'a, T, Peaks<'a>>, ::subxt::BasicError> + { + if self.client.metadata().storage_hash::()? + == [ + 12u8, 106u8, 223u8, 202u8, 71u8, 155u8, 228u8, 220u8, 197u8, 143u8, + 133u8, 165u8, 62u8, 172u8, 183u8, 222u8, 58u8, 225u8, 24u8, 57u8, + 144u8, 206u8, 66u8, 204u8, 16u8, 31u8, 139u8, 19u8, 175u8, 235u8, + 204u8, 135u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + } + pub mod beefy { + use super::root_mod; + use super::runtime_types; + pub mod calls { + use super::root_mod; + use super::runtime_types; + type DispatchError = runtime_types::sp_runtime::DispatchError; + pub struct TransactionApi<'a, T: ::subxt::Config, X> { + client: &'a ::subxt::Client, + marker: ::core::marker::PhantomData, + } + impl<'a, T, X> TransactionApi<'a, T, X> + where + T: ::subxt::Config, + X: ::subxt::extrinsic::ExtrinsicParams, + { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { + client, + marker: ::core::marker::PhantomData, + } + } + } + } + pub mod storage { + use super::runtime_types; + pub struct Authorities; + impl ::subxt::StorageEntry for Authorities { + const PALLET: &'static str = "Beefy"; + const STORAGE: &'static str = "Authorities"; + type Value = ::std::vec::Vec; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct ValidatorSetId; + impl ::subxt::StorageEntry for ValidatorSetId { + const PALLET: &'static str = "Beefy"; + const STORAGE: &'static str = "ValidatorSetId"; + type Value = ::core::primitive::u64; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct NextAuthorities; + impl ::subxt::StorageEntry for NextAuthorities { + const PALLET: &'static str = "Beefy"; + const STORAGE: &'static str = "NextAuthorities"; + type Value = ::std::vec::Vec; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct StorageApi<'a, T: ::subxt::Config> { + client: &'a ::subxt::Client, + } + impl<'a, T: ::subxt::Config> StorageApi<'a, T> { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { client } + } + #[doc = " The current authorities set"] + pub async fn authorities( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::std::vec::Vec, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 140u8, 118u8, 62u8, 97u8, 16u8, 215u8, 8u8, 84u8, 193u8, 102u8, 249u8, + 181u8, 228u8, 155u8, 194u8, 255u8, 209u8, 200u8, 186u8, 7u8, 246u8, + 149u8, 147u8, 224u8, 171u8, 218u8, 168u8, 130u8, 186u8, 119u8, 72u8, + 194u8, + ] + { + let entry = Authorities; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The current validator set id"] + pub async fn validator_set_id( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result<::core::primitive::u64, ::subxt::BasicError> + { + if self.client.metadata().storage_hash::()? + == [ + 132u8, 47u8, 139u8, 239u8, 214u8, 179u8, 24u8, 63u8, 55u8, 154u8, + 248u8, 206u8, 73u8, 7u8, 52u8, 135u8, 54u8, 111u8, 250u8, 106u8, 71u8, + 78u8, 44u8, 44u8, 235u8, 177u8, 36u8, 112u8, 17u8, 122u8, 252u8, 80u8, + ] + { + let entry = ValidatorSetId; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Authorities set scheduled to be used with the next session"] + pub async fn next_authorities( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::std::vec::Vec, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 120u8, 191u8, 31u8, 28u8, 34u8, 82u8, 116u8, 34u8, 81u8, 176u8, 225u8, + 117u8, 7u8, 58u8, 241u8, 174u8, 246u8, 230u8, 210u8, 6u8, 22u8, 191u8, + 150u8, 77u8, 102u8, 54u8, 25u8, 216u8, 64u8, 100u8, 247u8, 107u8, + ] + { + let entry = NextAuthorities; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + } + pub mod mmr_leaf { + use super::root_mod; + use super::runtime_types; + pub mod storage { + use super::runtime_types; + pub struct BeefyNextAuthorities; + impl ::subxt::StorageEntry for BeefyNextAuthorities { + const PALLET: &'static str = "MmrLeaf"; + const STORAGE: &'static str = "BeefyNextAuthorities"; + type Value = runtime_types::beefy_primitives::mmr::BeefyNextAuthoritySet< + ::subxt::sp_core::H256, + >; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct StorageApi<'a, T: ::subxt::Config> { + client: &'a ::subxt::Client, + } + impl<'a, T: ::subxt::Config> StorageApi<'a, T> { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { client } + } + #[doc = " Details of next BEEFY authority set."] + #[doc = ""] + #[doc = " This storage entry is used as cache for calls to `update_beefy_next_authority_set`."] + pub async fn beefy_next_authorities( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + runtime_types::beefy_primitives::mmr::BeefyNextAuthoritySet< + ::subxt::sp_core::H256, + >, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .storage_hash::()? + == [ + 219u8, 54u8, 44u8, 30u8, 213u8, 71u8, 67u8, 245u8, 172u8, 191u8, 183u8, + 146u8, 165u8, 202u8, 44u8, 121u8, 201u8, 252u8, 68u8, 93u8, 43u8, + 127u8, 189u8, 113u8, 196u8, 125u8, 184u8, 170u8, 9u8, 54u8, 69u8, + 104u8, + ] + { + let entry = BeefyNextAuthorities; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + } + pub mod validator_manager { + use super::root_mod; + use super::runtime_types; + pub mod calls { + use super::root_mod; + use super::runtime_types; + type DispatchError = runtime_types::sp_runtime::DispatchError; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct RegisterValidators { + pub validators: ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>, + } + impl ::subxt::Call for RegisterValidators { + const PALLET: &'static str = "ValidatorManager"; + const FUNCTION: &'static str = "register_validators"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct DeregisterValidators { + pub validators: ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>, + } + impl ::subxt::Call for DeregisterValidators { + const PALLET: &'static str = "ValidatorManager"; + const FUNCTION: &'static str = "deregister_validators"; + } + pub struct TransactionApi<'a, T: ::subxt::Config, X> { + client: &'a ::subxt::Client, + marker: ::core::marker::PhantomData, + } + impl<'a, T, X> TransactionApi<'a, T, X> + where + T: ::subxt::Config, + X: ::subxt::extrinsic::ExtrinsicParams, + { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { + client, + marker: ::core::marker::PhantomData, + } + } + #[doc = "Add new validators to the set."] + #[doc = ""] + #[doc = "The new validators will be active from current session + 2."] + pub fn register_validators( + &self, + validators: ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + RegisterValidators, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 211u8, 183u8, 121u8, 233u8, 106u8, 25u8, 23u8, 189u8, 167u8, 88u8, + 21u8, 191u8, 153u8, 233u8, 186u8, 3u8, 237u8, 24u8, 145u8, 35u8, 85u8, + 217u8, 142u8, 173u8, 62u8, 123u8, 67u8, 246u8, 252u8, 38u8, 101u8, + 22u8, + ] + { + let call = RegisterValidators { validators }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Remove validators from the set."] + #[doc = ""] + #[doc = "The removed validators will be deactivated from current session + 2."] + pub fn deregister_validators( + &self, + validators: ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + DeregisterValidators, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 150u8, 129u8, 133u8, 192u8, 213u8, 92u8, 94u8, 234u8, 253u8, 173u8, + 208u8, 236u8, 109u8, 105u8, 193u8, 122u8, 88u8, 234u8, 39u8, 152u8, + 245u8, 127u8, 195u8, 101u8, 189u8, 25u8, 24u8, 4u8, 179u8, 149u8, 73u8, + 216u8, + ] + { + let call = DeregisterValidators { validators }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + pub type Event = runtime_types::rococo_runtime::validator_manager::pallet::Event; + pub mod events { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "New validators were added to the set."] + pub struct ValidatorsRegistered( + pub ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>, + ); + impl ::subxt::Event for ValidatorsRegistered { + const PALLET: &'static str = "ValidatorManager"; + const EVENT: &'static str = "ValidatorsRegistered"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "Validators were removed from the set."] + pub struct ValidatorsDeregistered( + pub ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>, + ); + impl ::subxt::Event for ValidatorsDeregistered { + const PALLET: &'static str = "ValidatorManager"; + const EVENT: &'static str = "ValidatorsDeregistered"; + } + } + pub mod storage { + use super::runtime_types; + pub struct ValidatorsToRetire; + impl ::subxt::StorageEntry for ValidatorsToRetire { + const PALLET: &'static str = "ValidatorManager"; + const STORAGE: &'static str = "ValidatorsToRetire"; + type Value = ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct ValidatorsToAdd; + impl ::subxt::StorageEntry for ValidatorsToAdd { + const PALLET: &'static str = "ValidatorManager"; + const STORAGE: &'static str = "ValidatorsToAdd"; + type Value = ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct StorageApi<'a, T: ::subxt::Config> { + client: &'a ::subxt::Client, + } + impl<'a, T: ::subxt::Config> StorageApi<'a, T> { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { client } + } + #[doc = " Validators that should be retired, because their Parachain was deregistered."] + pub async fn validators_to_retire( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .storage_hash::()? + == [ + 132u8, 64u8, 206u8, 170u8, 240u8, 112u8, 249u8, 91u8, 54u8, 160u8, + 127u8, 52u8, 144u8, 203u8, 91u8, 42u8, 60u8, 139u8, 121u8, 51u8, 154u8, + 68u8, 5u8, 64u8, 32u8, 33u8, 235u8, 220u8, 161u8, 155u8, 105u8, 29u8, + ] + { + let entry = ValidatorsToRetire; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Validators that should be added."] + pub async fn validators_to_add( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 222u8, 254u8, 217u8, 103u8, 255u8, 143u8, 42u8, 9u8, 219u8, 218u8, 1u8, + 95u8, 225u8, 65u8, 100u8, 178u8, 255u8, 33u8, 196u8, 174u8, 29u8, 92u8, + 3u8, 66u8, 166u8, 37u8, 3u8, 156u8, 148u8, 169u8, 121u8, 208u8, + ] + { + let entry = ValidatorsToAdd; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + } + pub mod bridge_rococo_grandpa { + use super::root_mod; + use super::runtime_types; + pub mod calls { + use super::root_mod; + use super::runtime_types; + type DispatchError = runtime_types::sp_runtime::DispatchError; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct SubmitFinalityProof { + pub finality_target: ::std::boxed::Box< + runtime_types::sp_runtime::generic::header::Header< + ::core::primitive::u32, + runtime_types::sp_runtime::traits::BlakeTwo256, + >, + >, + pub justification: + runtime_types::bp_header_chain::justification::GrandpaJustification< + runtime_types::sp_runtime::generic::header::Header< + ::core::primitive::u32, + runtime_types::sp_runtime::traits::BlakeTwo256, + >, + >, + } + impl ::subxt::Call for SubmitFinalityProof { + const PALLET: &'static str = "BridgeRococoGrandpa"; + const FUNCTION: &'static str = "submit_finality_proof"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct Initialize { + pub init_data: runtime_types::bp_header_chain::InitializationData< + runtime_types::sp_runtime::generic::header::Header< + ::core::primitive::u32, + runtime_types::sp_runtime::traits::BlakeTwo256, + >, + >, + } + impl ::subxt::Call for Initialize { + const PALLET: &'static str = "BridgeRococoGrandpa"; + const FUNCTION: &'static str = "initialize"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct SetOwner { + pub new_owner: ::core::option::Option<::subxt::sp_core::crypto::AccountId32>, + } + impl ::subxt::Call for SetOwner { + const PALLET: &'static str = "BridgeRococoGrandpa"; + const FUNCTION: &'static str = "set_owner"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct SetOperational { + pub operational: ::core::primitive::bool, + } + impl ::subxt::Call for SetOperational { + const PALLET: &'static str = "BridgeRococoGrandpa"; + const FUNCTION: &'static str = "set_operational"; + } + pub struct TransactionApi<'a, T: ::subxt::Config, X> { + client: &'a ::subxt::Client, + marker: ::core::marker::PhantomData, + } + impl<'a, T, X> TransactionApi<'a, T, X> + where + T: ::subxt::Config, + X: ::subxt::extrinsic::ExtrinsicParams, + { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { + client, + marker: ::core::marker::PhantomData, + } + } + #[doc = "Verify a target header is finalized according to the given finality proof."] + #[doc = ""] + #[doc = "It will use the underlying storage pallet to fetch information about the current"] + #[doc = "authorities and best finalized header in order to verify that the header is finalized."] + #[doc = ""] + #[doc = "If successful in verification, it will write the target header to the underlying storage"] + #[doc = "pallet."] + pub fn submit_finality_proof( + &self, + finality_target: runtime_types::sp_runtime::generic::header::Header< + ::core::primitive::u32, + runtime_types::sp_runtime::traits::BlakeTwo256, + >, + justification : runtime_types :: bp_header_chain :: justification :: GrandpaJustification < runtime_types :: sp_runtime :: generic :: header :: Header < :: core :: primitive :: u32 , runtime_types :: sp_runtime :: traits :: BlakeTwo256 > >, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SubmitFinalityProof, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 212u8, 195u8, 142u8, 103u8, 130u8, 159u8, 71u8, 78u8, 81u8, 123u8, + 94u8, 3u8, 65u8, 153u8, 235u8, 38u8, 255u8, 11u8, 222u8, 100u8, 33u8, + 143u8, 234u8, 92u8, 109u8, 127u8, 255u8, 229u8, 177u8, 12u8, 172u8, + 216u8, + ] + { + let call = SubmitFinalityProof { + finality_target: ::std::boxed::Box::new(finality_target), + justification, + }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Bootstrap the bridge pallet with an initial header and authority set from which to sync."] + #[doc = ""] + #[doc = "The initial configuration provided does not need to be the genesis header of the bridged"] + #[doc = "chain, it can be any arbitrary header. You can also provide the next scheduled set"] + #[doc = "change if it is already know."] + #[doc = ""] + #[doc = "This function is only allowed to be called from a trusted origin and writes to storage"] + #[doc = "with practically no checks in terms of the validity of the data. It is important that"] + #[doc = "you ensure that valid data is being passed in."] + pub fn initialize( + &self, + init_data: runtime_types::bp_header_chain::InitializationData< + runtime_types::sp_runtime::generic::header::Header< + ::core::primitive::u32, + runtime_types::sp_runtime::traits::BlakeTwo256, + >, + >, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + Initialize, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 105u8, 67u8, 3u8, 233u8, 154u8, 49u8, 90u8, 36u8, 94u8, 157u8, 174u8, + 62u8, 171u8, 150u8, 148u8, 83u8, 191u8, 184u8, 94u8, 43u8, 25u8, 46u8, + 108u8, 139u8, 69u8, 188u8, 186u8, 190u8, 242u8, 243u8, 234u8, 61u8, + ] + { + let call = Initialize { init_data }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Change `PalletOwner`."] + #[doc = ""] + #[doc = "May only be called either by root, or by `PalletOwner`."] + pub fn set_owner( + &self, + new_owner: ::core::option::Option<::subxt::sp_core::crypto::AccountId32>, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SetOwner, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 168u8, 223u8, 175u8, 15u8, 5u8, 101u8, 85u8, 40u8, 177u8, 36u8, 145u8, + 67u8, 135u8, 179u8, 171u8, 30u8, 17u8, 130u8, 2u8, 99u8, 96u8, 141u8, + 109u8, 36u8, 54u8, 185u8, 38u8, 48u8, 191u8, 233u8, 104u8, 163u8, + ] + { + let call = SetOwner { new_owner }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Halt or resume all pallet operations."] + #[doc = ""] + #[doc = "May only be called either by root, or by `PalletOwner`."] + pub fn set_operational( + &self, + operational: ::core::primitive::bool, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SetOperational, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 0u8, 141u8, 239u8, 63u8, 232u8, 183u8, 89u8, 179u8, 33u8, 67u8, 107u8, + 73u8, 45u8, 231u8, 255u8, 182u8, 6u8, 245u8, 198u8, 20u8, 60u8, 69u8, + 110u8, 153u8, 105u8, 231u8, 38u8, 60u8, 160u8, 183u8, 33u8, 249u8, + ] + { + let call = SetOperational { operational }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + pub mod storage { + use super::runtime_types; + pub struct RequestCount; + impl ::subxt::StorageEntry for RequestCount { + const PALLET: &'static str = "BridgeRococoGrandpa"; + const STORAGE: &'static str = "RequestCount"; + type Value = ::core::primitive::u32; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct InitialHash; + impl ::subxt::StorageEntry for InitialHash { + const PALLET: &'static str = "BridgeRococoGrandpa"; + const STORAGE: &'static str = "InitialHash"; + type Value = ::subxt::sp_core::H256; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct BestFinalized; + impl ::subxt::StorageEntry for BestFinalized { + const PALLET: &'static str = "BridgeRococoGrandpa"; + const STORAGE: &'static str = "BestFinalized"; + type Value = ::subxt::sp_core::H256; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct ImportedHashes<'a>(pub &'a ::core::primitive::u32); + impl ::subxt::StorageEntry for ImportedHashes<'_> { + const PALLET: &'static str = "BridgeRococoGrandpa"; + const STORAGE: &'static str = "ImportedHashes"; + type Value = ::subxt::sp_core::H256; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Identity, + )]) + } + } + pub struct ImportedHashesPointer; + impl ::subxt::StorageEntry for ImportedHashesPointer { + const PALLET: &'static str = "BridgeRococoGrandpa"; + const STORAGE: &'static str = "ImportedHashesPointer"; + type Value = ::core::primitive::u32; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct ImportedHeaders<'a>(pub &'a ::subxt::sp_core::H256); + impl ::subxt::StorageEntry for ImportedHeaders<'_> { + const PALLET: &'static str = "BridgeRococoGrandpa"; + const STORAGE: &'static str = "ImportedHeaders"; + type Value = runtime_types::sp_runtime::generic::header::Header< + ::core::primitive::u32, + runtime_types::sp_runtime::traits::BlakeTwo256, + >; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Identity, + )]) + } + } + pub struct CurrentAuthoritySet; + impl ::subxt::StorageEntry for CurrentAuthoritySet { + const PALLET: &'static str = "BridgeRococoGrandpa"; + const STORAGE: &'static str = "CurrentAuthoritySet"; + type Value = runtime_types::bp_header_chain::AuthoritySet; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct PalletOwner; + impl ::subxt::StorageEntry for PalletOwner { + const PALLET: &'static str = "BridgeRococoGrandpa"; + const STORAGE: &'static str = "PalletOwner"; + type Value = ::subxt::sp_core::crypto::AccountId32; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct IsHalted; + impl ::subxt::StorageEntry for IsHalted { + const PALLET: &'static str = "BridgeRococoGrandpa"; + const STORAGE: &'static str = "IsHalted"; + type Value = ::core::primitive::bool; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct StorageApi<'a, T: ::subxt::Config> { + client: &'a ::subxt::Client, + } + impl<'a, T: ::subxt::Config> StorageApi<'a, T> { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { client } + } + #[doc = " The current number of requests which have written to storage."] + #[doc = ""] + #[doc = " If the `RequestCount` hits `MaxRequests`, no more calls will be allowed to the pallet until"] + #[doc = " the request capacity is increased."] + #[doc = ""] + #[doc = " The `RequestCount` is decreased by one at the beginning of every block. This is to ensure"] + #[doc = " that the pallet can always make progress."] + pub async fn request_count( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> + { + if self.client.metadata().storage_hash::()? + == [ + 100u8, 156u8, 98u8, 176u8, 229u8, 85u8, 81u8, 159u8, 120u8, 156u8, + 33u8, 179u8, 224u8, 237u8, 52u8, 198u8, 81u8, 81u8, 10u8, 180u8, 53u8, + 141u8, 96u8, 4u8, 39u8, 217u8, 58u8, 9u8, 57u8, 79u8, 47u8, 201u8, + ] + { + let entry = RequestCount; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Hash of the header used to bootstrap the pallet."] + pub async fn initial_hash( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result<::subxt::sp_core::H256, ::subxt::BasicError> + { + if self.client.metadata().storage_hash::()? + == [ + 243u8, 158u8, 214u8, 159u8, 84u8, 82u8, 193u8, 34u8, 24u8, 64u8, 21u8, + 172u8, 142u8, 116u8, 224u8, 19u8, 62u8, 232u8, 99u8, 201u8, 32u8, + 211u8, 139u8, 125u8, 41u8, 255u8, 107u8, 84u8, 165u8, 75u8, 201u8, + 142u8, + ] + { + let entry = InitialHash; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Hash of the best finalized header."] + pub async fn best_finalized( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result<::subxt::sp_core::H256, ::subxt::BasicError> + { + if self.client.metadata().storage_hash::()? + == [ + 155u8, 222u8, 92u8, 199u8, 26u8, 156u8, 146u8, 226u8, 24u8, 161u8, + 125u8, 18u8, 61u8, 237u8, 128u8, 26u8, 50u8, 55u8, 7u8, 42u8, 101u8, + 213u8, 0u8, 105u8, 219u8, 194u8, 227u8, 177u8, 147u8, 54u8, 22u8, 86u8, + ] + { + let entry = BestFinalized; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " A ring buffer of imported hashes. Ordered by the insertion time."] + pub async fn imported_hashes( + &self, + _0: &::core::primitive::u32, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option<::subxt::sp_core::H256>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 65u8, 162u8, 135u8, 91u8, 230u8, 102u8, 41u8, 123u8, 69u8, 20u8, 101u8, + 109u8, 178u8, 193u8, 239u8, 232u8, 66u8, 17u8, 222u8, 11u8, 188u8, + 53u8, 202u8, 80u8, 146u8, 234u8, 206u8, 192u8, 99u8, 4u8, 234u8, 67u8, + ] + { + let entry = ImportedHashes(_0); + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " A ring buffer of imported hashes. Ordered by the insertion time."] + pub async fn imported_hashes_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::subxt::KeyIter<'a, T, ImportedHashes<'a>>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 65u8, 162u8, 135u8, 91u8, 230u8, 102u8, 41u8, 123u8, 69u8, 20u8, 101u8, + 109u8, 178u8, 193u8, 239u8, 232u8, 66u8, 17u8, 222u8, 11u8, 188u8, + 53u8, 202u8, 80u8, 146u8, 234u8, 206u8, 192u8, 99u8, 4u8, 234u8, 67u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Current ring buffer position."] + pub async fn imported_hashes_pointer( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> + { + if self + .client + .metadata() + .storage_hash::()? + == [ + 159u8, 83u8, 35u8, 45u8, 27u8, 249u8, 155u8, 131u8, 181u8, 196u8, + 224u8, 26u8, 92u8, 132u8, 127u8, 237u8, 13u8, 142u8, 196u8, 147u8, + 221u8, 216u8, 11u8, 78u8, 190u8, 241u8, 201u8, 96u8, 74u8, 185u8, + 208u8, 42u8, + ] + { + let entry = ImportedHashesPointer; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Headers which have been imported into the pallet."] + pub async fn imported_headers( + &self, + _0: &::subxt::sp_core::H256, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option< + runtime_types::sp_runtime::generic::header::Header< + ::core::primitive::u32, + runtime_types::sp_runtime::traits::BlakeTwo256, + >, + >, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 169u8, 86u8, 51u8, 48u8, 82u8, 28u8, 180u8, 142u8, 152u8, 63u8, 234u8, + 84u8, 9u8, 136u8, 220u8, 18u8, 69u8, 4u8, 76u8, 54u8, 72u8, 139u8, + 234u8, 101u8, 238u8, 205u8, 95u8, 118u8, 216u8, 249u8, 147u8, 200u8, + ] + { + let entry = ImportedHeaders(_0); + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Headers which have been imported into the pallet."] + pub async fn imported_headers_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::subxt::KeyIter<'a, T, ImportedHeaders<'a>>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 169u8, 86u8, 51u8, 48u8, 82u8, 28u8, 180u8, 142u8, 152u8, 63u8, 234u8, + 84u8, 9u8, 136u8, 220u8, 18u8, 69u8, 4u8, 76u8, 54u8, 72u8, 139u8, + 234u8, 101u8, 238u8, 205u8, 95u8, 118u8, 216u8, 249u8, 147u8, 200u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The current GRANDPA Authority set."] + pub async fn current_authority_set( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + runtime_types::bp_header_chain::AuthoritySet, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .storage_hash::()? + == [ + 228u8, 156u8, 183u8, 185u8, 99u8, 81u8, 121u8, 110u8, 92u8, 105u8, + 111u8, 248u8, 123u8, 1u8, 94u8, 144u8, 248u8, 215u8, 100u8, 91u8, + 181u8, 1u8, 12u8, 203u8, 66u8, 9u8, 183u8, 173u8, 57u8, 239u8, 143u8, + 158u8, + ] + { + let entry = CurrentAuthoritySet; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Optional pallet owner."] + #[doc = ""] + #[doc = " Pallet owner has a right to halt all pallet operations and then resume it. If it is"] + #[doc = " `None`, then there are no direct ways to halt/resume pallet operations, but other"] + #[doc = " runtime methods may still be used to do that (i.e. democracy::referendum to update halt"] + #[doc = " flag directly or call the `halt_operations`)."] + pub async fn pallet_owner( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option<::subxt::sp_core::crypto::AccountId32>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 115u8, 57u8, 104u8, 22u8, 119u8, 16u8, 215u8, 71u8, 228u8, 104u8, + 111u8, 24u8, 53u8, 155u8, 26u8, 121u8, 143u8, 126u8, 72u8, 148u8, + 105u8, 132u8, 190u8, 40u8, 233u8, 219u8, 19u8, 143u8, 255u8, 20u8, + 220u8, 124u8, + ] + { + let entry = PalletOwner; + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " If true, all pallet transactions are failed immediately."] + pub async fn is_halted( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result<::core::primitive::bool, ::subxt::BasicError> + { + if self.client.metadata().storage_hash::()? + == [ + 107u8, 205u8, 253u8, 250u8, 98u8, 222u8, 141u8, 130u8, 74u8, 138u8, + 151u8, 77u8, 37u8, 226u8, 115u8, 116u8, 137u8, 247u8, 159u8, 72u8, + 230u8, 11u8, 85u8, 102u8, 122u8, 203u8, 235u8, 219u8, 54u8, 172u8, + 74u8, 22u8, + ] + { + let entry = IsHalted; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + pub mod constants { + use super::runtime_types; + pub struct ConstantsApi<'a, T: ::subxt::Config> { + client: &'a ::subxt::Client, + } + impl<'a, T: ::subxt::Config> ConstantsApi<'a, T> { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { client } + } + #[doc = " The upper bound on the number of requests allowed by the pallet."] + #[doc = ""] + #[doc = " A request refers to an action which writes a header to storage."] + #[doc = ""] + #[doc = " Once this bound is reached the pallet will not allow any dispatchables to be called"] + #[doc = " until the request count has decreased."] + pub fn max_requests( + &self, + ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> + { + if self + .client + .metadata() + .constant_hash("BridgeRococoGrandpa", "MaxRequests")? + == [ + 214u8, 232u8, 188u8, 57u8, 231u8, 189u8, 134u8, 244u8, 85u8, 191u8, + 134u8, 74u8, 207u8, 115u8, 21u8, 124u8, 19u8, 227u8, 59u8, 8u8, 252u8, + 8u8, 0u8, 252u8, 40u8, 49u8, 74u8, 145u8, 172u8, 109u8, 136u8, 63u8, + ] + { + let pallet = self.client.metadata().pallet("BridgeRococoGrandpa")?; + let constant = pallet.constant("MaxRequests")?; + let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; + Ok(value) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Maximal number of finalized headers to keep in the storage."] + #[doc = ""] + #[doc = " The setting is there to prevent growing the on-chain state indefinitely. Note"] + #[doc = " the setting does not relate to block numbers - we will simply keep as much items"] + #[doc = " in the storage, so it doesn't guarantee any fixed timeframe for finality headers."] + pub fn headers_to_keep( + &self, + ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> + { + if self + .client + .metadata() + .constant_hash("BridgeRococoGrandpa", "HeadersToKeep")? + == [ + 60u8, 85u8, 123u8, 208u8, 97u8, 205u8, 153u8, 170u8, 74u8, 94u8, 206u8, + 148u8, 171u8, 182u8, 210u8, 175u8, 1u8, 44u8, 152u8, 246u8, 144u8, + 232u8, 127u8, 202u8, 253u8, 214u8, 47u8, 246u8, 63u8, 86u8, 184u8, + 94u8, + ] + { + let pallet = self.client.metadata().pallet("BridgeRococoGrandpa")?; + let constant = pallet.constant("HeadersToKeep")?; + let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; + Ok(value) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + } + pub mod bridge_wococo_grandpa { + use super::root_mod; + use super::runtime_types; + pub mod calls { + use super::root_mod; + use super::runtime_types; + type DispatchError = runtime_types::sp_runtime::DispatchError; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct SubmitFinalityProof { + pub finality_target: ::std::boxed::Box< + runtime_types::sp_runtime::generic::header::Header< + ::core::primitive::u32, + runtime_types::sp_runtime::traits::BlakeTwo256, + >, + >, + pub justification: + runtime_types::bp_header_chain::justification::GrandpaJustification< + runtime_types::sp_runtime::generic::header::Header< + ::core::primitive::u32, + runtime_types::sp_runtime::traits::BlakeTwo256, + >, + >, + } + impl ::subxt::Call for SubmitFinalityProof { + const PALLET: &'static str = "BridgeWococoGrandpa"; + const FUNCTION: &'static str = "submit_finality_proof"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct Initialize { + pub init_data: runtime_types::bp_header_chain::InitializationData< + runtime_types::sp_runtime::generic::header::Header< + ::core::primitive::u32, + runtime_types::sp_runtime::traits::BlakeTwo256, + >, + >, + } + impl ::subxt::Call for Initialize { + const PALLET: &'static str = "BridgeWococoGrandpa"; + const FUNCTION: &'static str = "initialize"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct SetOwner { + pub new_owner: ::core::option::Option<::subxt::sp_core::crypto::AccountId32>, + } + impl ::subxt::Call for SetOwner { + const PALLET: &'static str = "BridgeWococoGrandpa"; + const FUNCTION: &'static str = "set_owner"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct SetOperational { + pub operational: ::core::primitive::bool, + } + impl ::subxt::Call for SetOperational { + const PALLET: &'static str = "BridgeWococoGrandpa"; + const FUNCTION: &'static str = "set_operational"; + } + pub struct TransactionApi<'a, T: ::subxt::Config, X> { + client: &'a ::subxt::Client, + marker: ::core::marker::PhantomData, + } + impl<'a, T, X> TransactionApi<'a, T, X> + where + T: ::subxt::Config, + X: ::subxt::extrinsic::ExtrinsicParams, + { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { + client, + marker: ::core::marker::PhantomData, + } + } + #[doc = "Verify a target header is finalized according to the given finality proof."] + #[doc = ""] + #[doc = "It will use the underlying storage pallet to fetch information about the current"] + #[doc = "authorities and best finalized header in order to verify that the header is finalized."] + #[doc = ""] + #[doc = "If successful in verification, it will write the target header to the underlying storage"] + #[doc = "pallet."] + pub fn submit_finality_proof( + &self, + finality_target: runtime_types::sp_runtime::generic::header::Header< + ::core::primitive::u32, + runtime_types::sp_runtime::traits::BlakeTwo256, + >, + justification : runtime_types :: bp_header_chain :: justification :: GrandpaJustification < runtime_types :: sp_runtime :: generic :: header :: Header < :: core :: primitive :: u32 , runtime_types :: sp_runtime :: traits :: BlakeTwo256 > >, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SubmitFinalityProof, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 212u8, 195u8, 142u8, 103u8, 130u8, 159u8, 71u8, 78u8, 81u8, 123u8, + 94u8, 3u8, 65u8, 153u8, 235u8, 38u8, 255u8, 11u8, 222u8, 100u8, 33u8, + 143u8, 234u8, 92u8, 109u8, 127u8, 255u8, 229u8, 177u8, 12u8, 172u8, + 216u8, + ] + { + let call = SubmitFinalityProof { + finality_target: ::std::boxed::Box::new(finality_target), + justification, + }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Bootstrap the bridge pallet with an initial header and authority set from which to sync."] + #[doc = ""] + #[doc = "The initial configuration provided does not need to be the genesis header of the bridged"] + #[doc = "chain, it can be any arbitrary header. You can also provide the next scheduled set"] + #[doc = "change if it is already know."] + #[doc = ""] + #[doc = "This function is only allowed to be called from a trusted origin and writes to storage"] + #[doc = "with practically no checks in terms of the validity of the data. It is important that"] + #[doc = "you ensure that valid data is being passed in."] + pub fn initialize( + &self, + init_data: runtime_types::bp_header_chain::InitializationData< + runtime_types::sp_runtime::generic::header::Header< + ::core::primitive::u32, + runtime_types::sp_runtime::traits::BlakeTwo256, + >, + >, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + Initialize, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 105u8, 67u8, 3u8, 233u8, 154u8, 49u8, 90u8, 36u8, 94u8, 157u8, 174u8, + 62u8, 171u8, 150u8, 148u8, 83u8, 191u8, 184u8, 94u8, 43u8, 25u8, 46u8, + 108u8, 139u8, 69u8, 188u8, 186u8, 190u8, 242u8, 243u8, 234u8, 61u8, + ] + { + let call = Initialize { init_data }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Change `PalletOwner`."] + #[doc = ""] + #[doc = "May only be called either by root, or by `PalletOwner`."] + pub fn set_owner( + &self, + new_owner: ::core::option::Option<::subxt::sp_core::crypto::AccountId32>, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SetOwner, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 168u8, 223u8, 175u8, 15u8, 5u8, 101u8, 85u8, 40u8, 177u8, 36u8, 145u8, + 67u8, 135u8, 179u8, 171u8, 30u8, 17u8, 130u8, 2u8, 99u8, 96u8, 141u8, + 109u8, 36u8, 54u8, 185u8, 38u8, 48u8, 191u8, 233u8, 104u8, 163u8, + ] + { + let call = SetOwner { new_owner }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Halt or resume all pallet operations."] + #[doc = ""] + #[doc = "May only be called either by root, or by `PalletOwner`."] + pub fn set_operational( + &self, + operational: ::core::primitive::bool, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SetOperational, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 0u8, 141u8, 239u8, 63u8, 232u8, 183u8, 89u8, 179u8, 33u8, 67u8, 107u8, + 73u8, 45u8, 231u8, 255u8, 182u8, 6u8, 245u8, 198u8, 20u8, 60u8, 69u8, + 110u8, 153u8, 105u8, 231u8, 38u8, 60u8, 160u8, 183u8, 33u8, 249u8, + ] + { + let call = SetOperational { operational }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + pub mod storage { + use super::runtime_types; + pub struct RequestCount; + impl ::subxt::StorageEntry for RequestCount { + const PALLET: &'static str = "BridgeWococoGrandpa"; + const STORAGE: &'static str = "RequestCount"; + type Value = ::core::primitive::u32; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct InitialHash; + impl ::subxt::StorageEntry for InitialHash { + const PALLET: &'static str = "BridgeWococoGrandpa"; + const STORAGE: &'static str = "InitialHash"; + type Value = ::subxt::sp_core::H256; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct BestFinalized; + impl ::subxt::StorageEntry for BestFinalized { + const PALLET: &'static str = "BridgeWococoGrandpa"; + const STORAGE: &'static str = "BestFinalized"; + type Value = ::subxt::sp_core::H256; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct ImportedHashes<'a>(pub &'a ::core::primitive::u32); + impl ::subxt::StorageEntry for ImportedHashes<'_> { + const PALLET: &'static str = "BridgeWococoGrandpa"; + const STORAGE: &'static str = "ImportedHashes"; + type Value = ::subxt::sp_core::H256; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Identity, + )]) + } + } + pub struct ImportedHashesPointer; + impl ::subxt::StorageEntry for ImportedHashesPointer { + const PALLET: &'static str = "BridgeWococoGrandpa"; + const STORAGE: &'static str = "ImportedHashesPointer"; + type Value = ::core::primitive::u32; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct ImportedHeaders<'a>(pub &'a ::subxt::sp_core::H256); + impl ::subxt::StorageEntry for ImportedHeaders<'_> { + const PALLET: &'static str = "BridgeWococoGrandpa"; + const STORAGE: &'static str = "ImportedHeaders"; + type Value = runtime_types::sp_runtime::generic::header::Header< + ::core::primitive::u32, + runtime_types::sp_runtime::traits::BlakeTwo256, + >; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Identity, + )]) + } + } + pub struct CurrentAuthoritySet; + impl ::subxt::StorageEntry for CurrentAuthoritySet { + const PALLET: &'static str = "BridgeWococoGrandpa"; + const STORAGE: &'static str = "CurrentAuthoritySet"; + type Value = runtime_types::bp_header_chain::AuthoritySet; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct PalletOwner; + impl ::subxt::StorageEntry for PalletOwner { + const PALLET: &'static str = "BridgeWococoGrandpa"; + const STORAGE: &'static str = "PalletOwner"; + type Value = ::subxt::sp_core::crypto::AccountId32; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct IsHalted; + impl ::subxt::StorageEntry for IsHalted { + const PALLET: &'static str = "BridgeWococoGrandpa"; + const STORAGE: &'static str = "IsHalted"; + type Value = ::core::primitive::bool; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct StorageApi<'a, T: ::subxt::Config> { + client: &'a ::subxt::Client, + } + impl<'a, T: ::subxt::Config> StorageApi<'a, T> { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { client } + } + #[doc = " The current number of requests which have written to storage."] + #[doc = ""] + #[doc = " If the `RequestCount` hits `MaxRequests`, no more calls will be allowed to the pallet until"] + #[doc = " the request capacity is increased."] + #[doc = ""] + #[doc = " The `RequestCount` is decreased by one at the beginning of every block. This is to ensure"] + #[doc = " that the pallet can always make progress."] + pub async fn request_count( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> + { + if self.client.metadata().storage_hash::()? + == [ + 100u8, 156u8, 98u8, 176u8, 229u8, 85u8, 81u8, 159u8, 120u8, 156u8, + 33u8, 179u8, 224u8, 237u8, 52u8, 198u8, 81u8, 81u8, 10u8, 180u8, 53u8, + 141u8, 96u8, 4u8, 39u8, 217u8, 58u8, 9u8, 57u8, 79u8, 47u8, 201u8, + ] + { + let entry = RequestCount; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Hash of the header used to bootstrap the pallet."] + pub async fn initial_hash( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result<::subxt::sp_core::H256, ::subxt::BasicError> + { + if self.client.metadata().storage_hash::()? + == [ + 243u8, 158u8, 214u8, 159u8, 84u8, 82u8, 193u8, 34u8, 24u8, 64u8, 21u8, + 172u8, 142u8, 116u8, 224u8, 19u8, 62u8, 232u8, 99u8, 201u8, 32u8, + 211u8, 139u8, 125u8, 41u8, 255u8, 107u8, 84u8, 165u8, 75u8, 201u8, + 142u8, + ] + { + let entry = InitialHash; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Hash of the best finalized header."] + pub async fn best_finalized( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result<::subxt::sp_core::H256, ::subxt::BasicError> + { + if self.client.metadata().storage_hash::()? + == [ + 155u8, 222u8, 92u8, 199u8, 26u8, 156u8, 146u8, 226u8, 24u8, 161u8, + 125u8, 18u8, 61u8, 237u8, 128u8, 26u8, 50u8, 55u8, 7u8, 42u8, 101u8, + 213u8, 0u8, 105u8, 219u8, 194u8, 227u8, 177u8, 147u8, 54u8, 22u8, 86u8, + ] + { + let entry = BestFinalized; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " A ring buffer of imported hashes. Ordered by the insertion time."] + pub async fn imported_hashes( + &self, + _0: &::core::primitive::u32, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option<::subxt::sp_core::H256>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 65u8, 162u8, 135u8, 91u8, 230u8, 102u8, 41u8, 123u8, 69u8, 20u8, 101u8, + 109u8, 178u8, 193u8, 239u8, 232u8, 66u8, 17u8, 222u8, 11u8, 188u8, + 53u8, 202u8, 80u8, 146u8, 234u8, 206u8, 192u8, 99u8, 4u8, 234u8, 67u8, + ] + { + let entry = ImportedHashes(_0); + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " A ring buffer of imported hashes. Ordered by the insertion time."] + pub async fn imported_hashes_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::subxt::KeyIter<'a, T, ImportedHashes<'a>>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 65u8, 162u8, 135u8, 91u8, 230u8, 102u8, 41u8, 123u8, 69u8, 20u8, 101u8, + 109u8, 178u8, 193u8, 239u8, 232u8, 66u8, 17u8, 222u8, 11u8, 188u8, + 53u8, 202u8, 80u8, 146u8, 234u8, 206u8, 192u8, 99u8, 4u8, 234u8, 67u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Current ring buffer position."] + pub async fn imported_hashes_pointer( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> + { + if self + .client + .metadata() + .storage_hash::()? + == [ + 159u8, 83u8, 35u8, 45u8, 27u8, 249u8, 155u8, 131u8, 181u8, 196u8, + 224u8, 26u8, 92u8, 132u8, 127u8, 237u8, 13u8, 142u8, 196u8, 147u8, + 221u8, 216u8, 11u8, 78u8, 190u8, 241u8, 201u8, 96u8, 74u8, 185u8, + 208u8, 42u8, + ] + { + let entry = ImportedHashesPointer; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Headers which have been imported into the pallet."] + pub async fn imported_headers( + &self, + _0: &::subxt::sp_core::H256, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option< + runtime_types::sp_runtime::generic::header::Header< + ::core::primitive::u32, + runtime_types::sp_runtime::traits::BlakeTwo256, + >, + >, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 169u8, 86u8, 51u8, 48u8, 82u8, 28u8, 180u8, 142u8, 152u8, 63u8, 234u8, + 84u8, 9u8, 136u8, 220u8, 18u8, 69u8, 4u8, 76u8, 54u8, 72u8, 139u8, + 234u8, 101u8, 238u8, 205u8, 95u8, 118u8, 216u8, 249u8, 147u8, 200u8, + ] + { + let entry = ImportedHeaders(_0); + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Headers which have been imported into the pallet."] + pub async fn imported_headers_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::subxt::KeyIter<'a, T, ImportedHeaders<'a>>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 169u8, 86u8, 51u8, 48u8, 82u8, 28u8, 180u8, 142u8, 152u8, 63u8, 234u8, + 84u8, 9u8, 136u8, 220u8, 18u8, 69u8, 4u8, 76u8, 54u8, 72u8, 139u8, + 234u8, 101u8, 238u8, 205u8, 95u8, 118u8, 216u8, 249u8, 147u8, 200u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The current GRANDPA Authority set."] + pub async fn current_authority_set( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + runtime_types::bp_header_chain::AuthoritySet, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .storage_hash::()? + == [ + 228u8, 156u8, 183u8, 185u8, 99u8, 81u8, 121u8, 110u8, 92u8, 105u8, + 111u8, 248u8, 123u8, 1u8, 94u8, 144u8, 248u8, 215u8, 100u8, 91u8, + 181u8, 1u8, 12u8, 203u8, 66u8, 9u8, 183u8, 173u8, 57u8, 239u8, 143u8, + 158u8, + ] + { + let entry = CurrentAuthoritySet; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Optional pallet owner."] + #[doc = ""] + #[doc = " Pallet owner has a right to halt all pallet operations and then resume it. If it is"] + #[doc = " `None`, then there are no direct ways to halt/resume pallet operations, but other"] + #[doc = " runtime methods may still be used to do that (i.e. democracy::referendum to update halt"] + #[doc = " flag directly or call the `halt_operations`)."] + pub async fn pallet_owner( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option<::subxt::sp_core::crypto::AccountId32>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 115u8, 57u8, 104u8, 22u8, 119u8, 16u8, 215u8, 71u8, 228u8, 104u8, + 111u8, 24u8, 53u8, 155u8, 26u8, 121u8, 143u8, 126u8, 72u8, 148u8, + 105u8, 132u8, 190u8, 40u8, 233u8, 219u8, 19u8, 143u8, 255u8, 20u8, + 220u8, 124u8, + ] + { + let entry = PalletOwner; + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " If true, all pallet transactions are failed immediately."] + pub async fn is_halted( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result<::core::primitive::bool, ::subxt::BasicError> + { + if self.client.metadata().storage_hash::()? + == [ + 107u8, 205u8, 253u8, 250u8, 98u8, 222u8, 141u8, 130u8, 74u8, 138u8, + 151u8, 77u8, 37u8, 226u8, 115u8, 116u8, 137u8, 247u8, 159u8, 72u8, + 230u8, 11u8, 85u8, 102u8, 122u8, 203u8, 235u8, 219u8, 54u8, 172u8, + 74u8, 22u8, + ] + { + let entry = IsHalted; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + pub mod constants { + use super::runtime_types; + pub struct ConstantsApi<'a, T: ::subxt::Config> { + client: &'a ::subxt::Client, + } + impl<'a, T: ::subxt::Config> ConstantsApi<'a, T> { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { client } + } + #[doc = " The upper bound on the number of requests allowed by the pallet."] + #[doc = ""] + #[doc = " A request refers to an action which writes a header to storage."] + #[doc = ""] + #[doc = " Once this bound is reached the pallet will not allow any dispatchables to be called"] + #[doc = " until the request count has decreased."] + pub fn max_requests( + &self, + ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> + { + if self + .client + .metadata() + .constant_hash("BridgeWococoGrandpa", "MaxRequests")? + == [ + 214u8, 232u8, 188u8, 57u8, 231u8, 189u8, 134u8, 244u8, 85u8, 191u8, + 134u8, 74u8, 207u8, 115u8, 21u8, 124u8, 19u8, 227u8, 59u8, 8u8, 252u8, + 8u8, 0u8, 252u8, 40u8, 49u8, 74u8, 145u8, 172u8, 109u8, 136u8, 63u8, + ] + { + let pallet = self.client.metadata().pallet("BridgeWococoGrandpa")?; + let constant = pallet.constant("MaxRequests")?; + let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; + Ok(value) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Maximal number of finalized headers to keep in the storage."] + #[doc = ""] + #[doc = " The setting is there to prevent growing the on-chain state indefinitely. Note"] + #[doc = " the setting does not relate to block numbers - we will simply keep as much items"] + #[doc = " in the storage, so it doesn't guarantee any fixed timeframe for finality headers."] + pub fn headers_to_keep( + &self, + ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> + { + if self + .client + .metadata() + .constant_hash("BridgeWococoGrandpa", "HeadersToKeep")? + == [ + 60u8, 85u8, 123u8, 208u8, 97u8, 205u8, 153u8, 170u8, 74u8, 94u8, 206u8, + 148u8, 171u8, 182u8, 210u8, 175u8, 1u8, 44u8, 152u8, 246u8, 144u8, + 232u8, 127u8, 202u8, 253u8, 214u8, 47u8, 246u8, 63u8, 86u8, 184u8, + 94u8, + ] + { + let pallet = self.client.metadata().pallet("BridgeWococoGrandpa")?; + let constant = pallet.constant("HeadersToKeep")?; + let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; + Ok(value) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + } + pub mod bridge_rococo_messages { + use super::root_mod; + use super::runtime_types; + pub mod calls { + use super::root_mod; + use super::runtime_types; + type DispatchError = runtime_types::sp_runtime::DispatchError; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct SetOwner { + pub new_owner: ::core::option::Option<::subxt::sp_core::crypto::AccountId32>, + } + impl ::subxt::Call for SetOwner { + const PALLET: &'static str = "BridgeRococoMessages"; + const FUNCTION: &'static str = "set_owner"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct SetOperatingMode { + pub operating_mode: runtime_types::bp_messages::OperatingMode, + } + impl ::subxt::Call for SetOperatingMode { + const PALLET: &'static str = "BridgeRococoMessages"; + const FUNCTION: &'static str = "set_operating_mode"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct UpdatePalletParameter { + pub parameter: (), + } + impl ::subxt::Call for UpdatePalletParameter { + const PALLET: &'static str = "BridgeRococoMessages"; + const FUNCTION: &'static str = "update_pallet_parameter"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct SendMessage { + pub lane_id: [::core::primitive::u8; 4usize], + pub payload: runtime_types::bp_message_dispatch::MessagePayload< + ::subxt::sp_core::crypto::AccountId32, + runtime_types::sp_runtime::MultiSigner, + runtime_types::sp_runtime::MultiSignature, + ::std::vec::Vec<::core::primitive::u8>, + >, + pub delivery_and_dispatch_fee: ::core::primitive::u128, + } + impl ::subxt::Call for SendMessage { + const PALLET: &'static str = "BridgeRococoMessages"; + const FUNCTION: &'static str = "send_message"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct IncreaseMessageFee { + pub lane_id: [::core::primitive::u8; 4usize], + pub nonce: ::core::primitive::u64, + pub additional_fee: ::core::primitive::u128, + } + impl ::subxt::Call for IncreaseMessageFee { + const PALLET: &'static str = "BridgeRococoMessages"; + const FUNCTION: &'static str = "increase_message_fee"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct ReceiveMessagesProof { pub relayer_id_at_bridged_chain : :: subxt :: sp_core :: crypto :: AccountId32 , pub proof : runtime_types :: bridge_runtime_common :: messages :: target :: FromBridgedChainMessagesProof < :: subxt :: sp_core :: H256 > , pub messages_count : :: core :: primitive :: u32 , pub dispatch_weight : :: core :: primitive :: u64 , } + impl ::subxt::Call for ReceiveMessagesProof { + const PALLET: &'static str = "BridgeRococoMessages"; + const FUNCTION: &'static str = "receive_messages_proof"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct ReceiveMessagesDeliveryProof { pub proof : runtime_types :: bridge_runtime_common :: messages :: source :: FromBridgedChainMessagesDeliveryProof < :: subxt :: sp_core :: H256 > , pub relayers_state : runtime_types :: bp_messages :: UnrewardedRelayersState , } + impl ::subxt::Call for ReceiveMessagesDeliveryProof { + const PALLET: &'static str = "BridgeRococoMessages"; + const FUNCTION: &'static str = "receive_messages_delivery_proof"; + } + pub struct TransactionApi<'a, T: ::subxt::Config, X> { + client: &'a ::subxt::Client, + marker: ::core::marker::PhantomData, + } + impl<'a, T, X> TransactionApi<'a, T, X> + where + T: ::subxt::Config, + X: ::subxt::extrinsic::ExtrinsicParams, + { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { + client, + marker: ::core::marker::PhantomData, + } + } + #[doc = "Change `PalletOwner`."] + #[doc = ""] + #[doc = "May only be called either by root, or by `PalletOwner`."] + pub fn set_owner( + &self, + new_owner: ::core::option::Option<::subxt::sp_core::crypto::AccountId32>, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SetOwner, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 168u8, 223u8, 175u8, 15u8, 5u8, 101u8, 85u8, 40u8, 177u8, 36u8, 145u8, + 67u8, 135u8, 179u8, 171u8, 30u8, 17u8, 130u8, 2u8, 99u8, 96u8, 141u8, + 109u8, 36u8, 54u8, 185u8, 38u8, 48u8, 191u8, 233u8, 104u8, 163u8, + ] + { + let call = SetOwner { new_owner }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Halt or resume all/some pallet operations."] + #[doc = ""] + #[doc = "May only be called either by root, or by `PalletOwner`."] + pub fn set_operating_mode( + &self, + operating_mode: runtime_types::bp_messages::OperatingMode, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SetOperatingMode, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 51u8, 64u8, 160u8, 51u8, 9u8, 118u8, 71u8, 106u8, 25u8, 107u8, 67u8, + 86u8, 123u8, 215u8, 161u8, 150u8, 233u8, 199u8, 212u8, 78u8, 233u8, + 35u8, 120u8, 249u8, 145u8, 110u8, 105u8, 78u8, 67u8, 64u8, 189u8, + 199u8, + ] + { + let call = SetOperatingMode { operating_mode }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Update pallet parameter."] + #[doc = ""] + #[doc = "May only be called either by root, or by `PalletOwner`."] + #[doc = ""] + #[doc = "The weight is: single read for permissions check + 2 writes for parameter value and"] + #[doc = "event."] + pub fn update_pallet_parameter( + &self, + parameter: (), + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + UpdatePalletParameter, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .call_hash::()? + == [ + 252u8, 109u8, 232u8, 190u8, 218u8, 178u8, 4u8, 197u8, 159u8, 44u8, + 100u8, 111u8, 106u8, 105u8, 69u8, 161u8, 170u8, 208u8, 241u8, 102u8, + 102u8, 157u8, 19u8, 93u8, 168u8, 66u8, 205u8, 174u8, 158u8, 21u8, + 201u8, 204u8, + ] + { + let call = UpdatePalletParameter { parameter }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Send message over lane."] + pub fn send_message( + &self, + lane_id: [::core::primitive::u8; 4usize], + payload: runtime_types::bp_message_dispatch::MessagePayload< + ::subxt::sp_core::crypto::AccountId32, + runtime_types::sp_runtime::MultiSigner, + runtime_types::sp_runtime::MultiSignature, + ::std::vec::Vec<::core::primitive::u8>, + >, + delivery_and_dispatch_fee: ::core::primitive::u128, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SendMessage, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 159u8, 54u8, 99u8, 44u8, 222u8, 24u8, 28u8, 193u8, 253u8, 233u8, 170u8, + 10u8, 56u8, 217u8, 127u8, 71u8, 83u8, 188u8, 101u8, 15u8, 38u8, 2u8, + 193u8, 228u8, 195u8, 106u8, 68u8, 10u8, 216u8, 237u8, 99u8, 201u8, + ] + { + let call = SendMessage { + lane_id, + payload, + delivery_and_dispatch_fee, + }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Pay additional fee for the message."] + pub fn increase_message_fee( + &self, + lane_id: [::core::primitive::u8; 4usize], + nonce: ::core::primitive::u64, + additional_fee: ::core::primitive::u128, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + IncreaseMessageFee, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 98u8, 74u8, 240u8, 247u8, 27u8, 236u8, 48u8, 148u8, 45u8, 53u8, 212u8, + 214u8, 25u8, 170u8, 120u8, 109u8, 35u8, 111u8, 27u8, 167u8, 195u8, + 112u8, 76u8, 112u8, 108u8, 74u8, 219u8, 100u8, 226u8, 255u8, 106u8, + 47u8, + ] + { + let call = IncreaseMessageFee { + lane_id, + nonce, + additional_fee, + }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Receive messages proof from bridged chain."] + #[doc = ""] + #[doc = "The weight of the call assumes that the transaction always brings outbound lane"] + #[doc = "state update. Because of that, the submitter (relayer) has no benefit of not including"] + #[doc = "this data in the transaction, so reward confirmations lags should be minimal."] + pub fn receive_messages_proof( + &self, + relayer_id_at_bridged_chain: ::subxt::sp_core::crypto::AccountId32, + proof : runtime_types :: bridge_runtime_common :: messages :: target :: FromBridgedChainMessagesProof < :: subxt :: sp_core :: H256 >, + messages_count: ::core::primitive::u32, + dispatch_weight: ::core::primitive::u64, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + ReceiveMessagesProof, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 73u8, 29u8, 15u8, 165u8, 110u8, 25u8, 158u8, 252u8, 156u8, 95u8, 235u8, + 130u8, 89u8, 18u8, 160u8, 103u8, 122u8, 6u8, 208u8, 159u8, 245u8, 36u8, + 219u8, 99u8, 72u8, 244u8, 213u8, 172u8, 199u8, 85u8, 109u8, 105u8, + ] + { + let call = ReceiveMessagesProof { + relayer_id_at_bridged_chain, + proof, + messages_count, + dispatch_weight, + }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Receive messages delivery proof from bridged chain."] + pub fn receive_messages_delivery_proof( + &self, + proof : runtime_types :: bridge_runtime_common :: messages :: source :: FromBridgedChainMessagesDeliveryProof < :: subxt :: sp_core :: H256 >, + relayers_state: runtime_types::bp_messages::UnrewardedRelayersState, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + ReceiveMessagesDeliveryProof, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .call_hash::()? + == [ + 53u8, 89u8, 207u8, 87u8, 143u8, 218u8, 160u8, 197u8, 228u8, 186u8, + 167u8, 99u8, 14u8, 227u8, 255u8, 92u8, 61u8, 116u8, 193u8, 22u8, 0u8, + 1u8, 140u8, 18u8, 6u8, 188u8, 97u8, 3u8, 194u8, 209u8, 152u8, 60u8, + ] + { + let call = ReceiveMessagesDeliveryProof { + proof, + relayers_state, + }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + pub type Event = runtime_types::pallet_bridge_messages::pallet::Event; + pub mod events { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "Pallet parameter has been updated."] + pub struct ParameterUpdated(pub ()); + impl ::subxt::Event for ParameterUpdated { + const PALLET: &'static str = "BridgeRococoMessages"; + const EVENT: &'static str = "ParameterUpdated"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "Message has been accepted and is waiting to be delivered."] + pub struct MessageAccepted( + pub [::core::primitive::u8; 4usize], + pub ::core::primitive::u64, + ); + impl ::subxt::Event for MessageAccepted { + const PALLET: &'static str = "BridgeRococoMessages"; + const EVENT: &'static str = "MessageAccepted"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "Messages in the inclusive range have been delivered to the bridged chain."] + pub struct MessagesDelivered( + pub [::core::primitive::u8; 4usize], + pub runtime_types::bp_messages::DeliveredMessages, + ); + impl ::subxt::Event for MessagesDelivered { + const PALLET: &'static str = "BridgeRococoMessages"; + const EVENT: &'static str = "MessagesDelivered"; + } + } + pub mod storage { + use super::runtime_types; + pub struct PalletOwner; + impl ::subxt::StorageEntry for PalletOwner { + const PALLET: &'static str = "BridgeRococoMessages"; + const STORAGE: &'static str = "PalletOwner"; + type Value = ::subxt::sp_core::crypto::AccountId32; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct PalletOperatingMode; + impl ::subxt::StorageEntry for PalletOperatingMode { + const PALLET: &'static str = "BridgeRococoMessages"; + const STORAGE: &'static str = "PalletOperatingMode"; + type Value = runtime_types::bp_messages::OperatingMode; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct InboundLanes<'a>(pub &'a [::core::primitive::u8; 4usize]); + impl ::subxt::StorageEntry for InboundLanes<'_> { + const PALLET: &'static str = "BridgeRococoMessages"; + const STORAGE: &'static str = "InboundLanes"; + type Value = runtime_types::bp_messages::InboundLaneData< + ::subxt::sp_core::crypto::AccountId32, + >; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Blake2_128Concat, + )]) + } + } + pub struct OutboundLanes<'a>(pub &'a [::core::primitive::u8; 4usize]); + impl ::subxt::StorageEntry for OutboundLanes<'_> { + const PALLET: &'static str = "BridgeRococoMessages"; + const STORAGE: &'static str = "OutboundLanes"; + type Value = runtime_types::bp_messages::OutboundLaneData; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Blake2_128Concat, + )]) + } + } + pub struct OutboundMessages<'a>(pub &'a runtime_types::bp_messages::MessageKey); + impl ::subxt::StorageEntry for OutboundMessages<'_> { + const PALLET: &'static str = "BridgeRococoMessages"; + const STORAGE: &'static str = "OutboundMessages"; + type Value = runtime_types::bp_messages::MessageData<::core::primitive::u128>; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Blake2_128Concat, + )]) + } + } + pub struct StorageApi<'a, T: ::subxt::Config> { + client: &'a ::subxt::Client, + } + impl<'a, T: ::subxt::Config> StorageApi<'a, T> { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { client } + } + #[doc = " Optional pallet owner."] + #[doc = ""] + #[doc = " Pallet owner has a right to halt all pallet operations and then resume it. If it is"] + #[doc = " `None`, then there are no direct ways to halt/resume pallet operations, but other"] + #[doc = " runtime methods may still be used to do that (i.e. democracy::referendum to update halt"] + #[doc = " flag directly or call the `halt_operations`)."] + pub async fn pallet_owner( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option<::subxt::sp_core::crypto::AccountId32>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 115u8, 57u8, 104u8, 22u8, 119u8, 16u8, 215u8, 71u8, 228u8, 104u8, + 111u8, 24u8, 53u8, 155u8, 26u8, 121u8, 143u8, 126u8, 72u8, 148u8, + 105u8, 132u8, 190u8, 40u8, 233u8, 219u8, 19u8, 143u8, 255u8, 20u8, + 220u8, 124u8, + ] + { + let entry = PalletOwner; + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The current operating mode of the pallet."] + #[doc = ""] + #[doc = " Depending on the mode either all, some, or no transactions will be allowed."] + pub async fn pallet_operating_mode( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + runtime_types::bp_messages::OperatingMode, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .storage_hash::()? + == [ + 210u8, 120u8, 80u8, 199u8, 37u8, 129u8, 219u8, 178u8, 3u8, 129u8, + 160u8, 77u8, 255u8, 190u8, 33u8, 163u8, 1u8, 234u8, 96u8, 88u8, 157u8, + 45u8, 31u8, 136u8, 137u8, 30u8, 21u8, 47u8, 118u8, 28u8, 240u8, 131u8, + ] + { + let entry = PalletOperatingMode; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Map of lane id => inbound lane data."] + pub async fn inbound_lanes( + &self, + _0: &[::core::primitive::u8; 4usize], + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + runtime_types::bp_messages::InboundLaneData< + ::subxt::sp_core::crypto::AccountId32, + >, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 147u8, 219u8, 249u8, 150u8, 150u8, 102u8, 58u8, 115u8, 13u8, 113u8, + 123u8, 132u8, 192u8, 87u8, 188u8, 170u8, 17u8, 101u8, 23u8, 37u8, + 209u8, 188u8, 148u8, 44u8, 67u8, 5u8, 197u8, 202u8, 247u8, 177u8, 87u8, + 22u8, + ] + { + let entry = InboundLanes(_0); + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Map of lane id => inbound lane data."] + pub async fn inbound_lanes_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::subxt::KeyIter<'a, T, InboundLanes<'a>>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 147u8, 219u8, 249u8, 150u8, 150u8, 102u8, 58u8, 115u8, 13u8, 113u8, + 123u8, 132u8, 192u8, 87u8, 188u8, 170u8, 17u8, 101u8, 23u8, 37u8, + 209u8, 188u8, 148u8, 44u8, 67u8, 5u8, 197u8, 202u8, 247u8, 177u8, 87u8, + 22u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Map of lane id => outbound lane data."] + pub async fn outbound_lanes( + &self, + _0: &[::core::primitive::u8; 4usize], + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + runtime_types::bp_messages::OutboundLaneData, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 167u8, 52u8, 228u8, 63u8, 97u8, 175u8, 67u8, 104u8, 3u8, 142u8, 1u8, + 95u8, 125u8, 145u8, 23u8, 141u8, 69u8, 159u8, 248u8, 138u8, 132u8, + 134u8, 226u8, 39u8, 80u8, 126u8, 65u8, 114u8, 181u8, 100u8, 194u8, + 217u8, + ] + { + let entry = OutboundLanes(_0); + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Map of lane id => outbound lane data."] + pub async fn outbound_lanes_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::subxt::KeyIter<'a, T, OutboundLanes<'a>>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 167u8, 52u8, 228u8, 63u8, 97u8, 175u8, 67u8, 104u8, 3u8, 142u8, 1u8, + 95u8, 125u8, 145u8, 23u8, 141u8, 69u8, 159u8, 248u8, 138u8, 132u8, + 134u8, 226u8, 39u8, 80u8, 126u8, 65u8, 114u8, 181u8, 100u8, 194u8, + 217u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " All queued outbound messages."] + pub async fn outbound_messages( + &self, + _0: &runtime_types::bp_messages::MessageKey, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option< + runtime_types::bp_messages::MessageData<::core::primitive::u128>, + >, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 177u8, 51u8, 170u8, 171u8, 178u8, 211u8, 79u8, 214u8, 67u8, 138u8, + 133u8, 155u8, 41u8, 236u8, 49u8, 87u8, 77u8, 61u8, 87u8, 87u8, 5u8, + 52u8, 16u8, 64u8, 202u8, 215u8, 40u8, 221u8, 179u8, 109u8, 76u8, 110u8, + ] + { + let entry = OutboundMessages(_0); + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " All queued outbound messages."] + pub async fn outbound_messages_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::subxt::KeyIter<'a, T, OutboundMessages<'a>>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 177u8, 51u8, 170u8, 171u8, 178u8, 211u8, 79u8, 214u8, 67u8, 138u8, + 133u8, 155u8, 41u8, 236u8, 49u8, 87u8, 77u8, 61u8, 87u8, 87u8, 5u8, + 52u8, 16u8, 64u8, 202u8, 215u8, 40u8, 221u8, 179u8, 109u8, 76u8, 110u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + pub mod constants { + use super::runtime_types; + pub struct ConstantsApi<'a, T: ::subxt::Config> { + client: &'a ::subxt::Client, + } + impl<'a, T: ::subxt::Config> ConstantsApi<'a, T> { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { client } + } + #[doc = " Gets the chain id value from the instance."] + pub fn bridged_chain_id( + &self, + ) -> ::core::result::Result<[::core::primitive::u8; 4usize], ::subxt::BasicError> + { + if self + .client + .metadata() + .constant_hash("BridgeRococoMessages", "BridgedChainId")? + == [ + 133u8, 139u8, 37u8, 221u8, 12u8, 53u8, 28u8, 244u8, 20u8, 208u8, 170u8, + 206u8, 199u8, 163u8, 64u8, 197u8, 53u8, 203u8, 37u8, 207u8, 163u8, 8u8, + 105u8, 94u8, 247u8, 117u8, 251u8, 97u8, 243u8, 237u8, 116u8, 130u8, + ] + { + let pallet = self.client.metadata().pallet("BridgeRococoMessages")?; + let constant = pallet.constant("BridgedChainId")?; + let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; + Ok(value) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + } + pub mod bridge_wococo_messages { + use super::root_mod; + use super::runtime_types; + pub mod calls { + use super::root_mod; + use super::runtime_types; + type DispatchError = runtime_types::sp_runtime::DispatchError; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct SetOwner { + pub new_owner: ::core::option::Option<::subxt::sp_core::crypto::AccountId32>, + } + impl ::subxt::Call for SetOwner { + const PALLET: &'static str = "BridgeWococoMessages"; + const FUNCTION: &'static str = "set_owner"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct SetOperatingMode { + pub operating_mode: runtime_types::bp_messages::OperatingMode, + } + impl ::subxt::Call for SetOperatingMode { + const PALLET: &'static str = "BridgeWococoMessages"; + const FUNCTION: &'static str = "set_operating_mode"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct UpdatePalletParameter { + pub parameter: (), + } + impl ::subxt::Call for UpdatePalletParameter { + const PALLET: &'static str = "BridgeWococoMessages"; + const FUNCTION: &'static str = "update_pallet_parameter"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct SendMessage { + pub lane_id: [::core::primitive::u8; 4usize], + pub payload: runtime_types::bp_message_dispatch::MessagePayload< + ::subxt::sp_core::crypto::AccountId32, + runtime_types::sp_runtime::MultiSigner, + runtime_types::sp_runtime::MultiSignature, + ::std::vec::Vec<::core::primitive::u8>, + >, + pub delivery_and_dispatch_fee: ::core::primitive::u128, + } + impl ::subxt::Call for SendMessage { + const PALLET: &'static str = "BridgeWococoMessages"; + const FUNCTION: &'static str = "send_message"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct IncreaseMessageFee { + pub lane_id: [::core::primitive::u8; 4usize], + pub nonce: ::core::primitive::u64, + pub additional_fee: ::core::primitive::u128, + } + impl ::subxt::Call for IncreaseMessageFee { + const PALLET: &'static str = "BridgeWococoMessages"; + const FUNCTION: &'static str = "increase_message_fee"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct ReceiveMessagesProof { pub relayer_id_at_bridged_chain : :: subxt :: sp_core :: crypto :: AccountId32 , pub proof : runtime_types :: bridge_runtime_common :: messages :: target :: FromBridgedChainMessagesProof < :: subxt :: sp_core :: H256 > , pub messages_count : :: core :: primitive :: u32 , pub dispatch_weight : :: core :: primitive :: u64 , } + impl ::subxt::Call for ReceiveMessagesProof { + const PALLET: &'static str = "BridgeWococoMessages"; + const FUNCTION: &'static str = "receive_messages_proof"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct ReceiveMessagesDeliveryProof { pub proof : runtime_types :: bridge_runtime_common :: messages :: source :: FromBridgedChainMessagesDeliveryProof < :: subxt :: sp_core :: H256 > , pub relayers_state : runtime_types :: bp_messages :: UnrewardedRelayersState , } + impl ::subxt::Call for ReceiveMessagesDeliveryProof { + const PALLET: &'static str = "BridgeWococoMessages"; + const FUNCTION: &'static str = "receive_messages_delivery_proof"; + } + pub struct TransactionApi<'a, T: ::subxt::Config, X> { + client: &'a ::subxt::Client, + marker: ::core::marker::PhantomData, + } + impl<'a, T, X> TransactionApi<'a, T, X> + where + T: ::subxt::Config, + X: ::subxt::extrinsic::ExtrinsicParams, + { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { + client, + marker: ::core::marker::PhantomData, + } + } + #[doc = "Change `PalletOwner`."] + #[doc = ""] + #[doc = "May only be called either by root, or by `PalletOwner`."] + pub fn set_owner( + &self, + new_owner: ::core::option::Option<::subxt::sp_core::crypto::AccountId32>, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SetOwner, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 168u8, 223u8, 175u8, 15u8, 5u8, 101u8, 85u8, 40u8, 177u8, 36u8, 145u8, + 67u8, 135u8, 179u8, 171u8, 30u8, 17u8, 130u8, 2u8, 99u8, 96u8, 141u8, + 109u8, 36u8, 54u8, 185u8, 38u8, 48u8, 191u8, 233u8, 104u8, 163u8, + ] + { + let call = SetOwner { new_owner }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Halt or resume all/some pallet operations."] + #[doc = ""] + #[doc = "May only be called either by root, or by `PalletOwner`."] + pub fn set_operating_mode( + &self, + operating_mode: runtime_types::bp_messages::OperatingMode, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SetOperatingMode, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 51u8, 64u8, 160u8, 51u8, 9u8, 118u8, 71u8, 106u8, 25u8, 107u8, 67u8, + 86u8, 123u8, 215u8, 161u8, 150u8, 233u8, 199u8, 212u8, 78u8, 233u8, + 35u8, 120u8, 249u8, 145u8, 110u8, 105u8, 78u8, 67u8, 64u8, 189u8, + 199u8, + ] + { + let call = SetOperatingMode { operating_mode }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Update pallet parameter."] + #[doc = ""] + #[doc = "May only be called either by root, or by `PalletOwner`."] + #[doc = ""] + #[doc = "The weight is: single read for permissions check + 2 writes for parameter value and"] + #[doc = "event."] + pub fn update_pallet_parameter( + &self, + parameter: (), + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + UpdatePalletParameter, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .call_hash::()? + == [ + 252u8, 109u8, 232u8, 190u8, 218u8, 178u8, 4u8, 197u8, 159u8, 44u8, + 100u8, 111u8, 106u8, 105u8, 69u8, 161u8, 170u8, 208u8, 241u8, 102u8, + 102u8, 157u8, 19u8, 93u8, 168u8, 66u8, 205u8, 174u8, 158u8, 21u8, + 201u8, 204u8, + ] + { + let call = UpdatePalletParameter { parameter }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Send message over lane."] + pub fn send_message( + &self, + lane_id: [::core::primitive::u8; 4usize], + payload: runtime_types::bp_message_dispatch::MessagePayload< + ::subxt::sp_core::crypto::AccountId32, + runtime_types::sp_runtime::MultiSigner, + runtime_types::sp_runtime::MultiSignature, + ::std::vec::Vec<::core::primitive::u8>, + >, + delivery_and_dispatch_fee: ::core::primitive::u128, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SendMessage, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 159u8, 54u8, 99u8, 44u8, 222u8, 24u8, 28u8, 193u8, 253u8, 233u8, 170u8, + 10u8, 56u8, 217u8, 127u8, 71u8, 83u8, 188u8, 101u8, 15u8, 38u8, 2u8, + 193u8, 228u8, 195u8, 106u8, 68u8, 10u8, 216u8, 237u8, 99u8, 201u8, + ] + { + let call = SendMessage { + lane_id, + payload, + delivery_and_dispatch_fee, + }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Pay additional fee for the message."] + pub fn increase_message_fee( + &self, + lane_id: [::core::primitive::u8; 4usize], + nonce: ::core::primitive::u64, + additional_fee: ::core::primitive::u128, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + IncreaseMessageFee, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 98u8, 74u8, 240u8, 247u8, 27u8, 236u8, 48u8, 148u8, 45u8, 53u8, 212u8, + 214u8, 25u8, 170u8, 120u8, 109u8, 35u8, 111u8, 27u8, 167u8, 195u8, + 112u8, 76u8, 112u8, 108u8, 74u8, 219u8, 100u8, 226u8, 255u8, 106u8, + 47u8, + ] + { + let call = IncreaseMessageFee { + lane_id, + nonce, + additional_fee, + }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Receive messages proof from bridged chain."] + #[doc = ""] + #[doc = "The weight of the call assumes that the transaction always brings outbound lane"] + #[doc = "state update. Because of that, the submitter (relayer) has no benefit of not including"] + #[doc = "this data in the transaction, so reward confirmations lags should be minimal."] + pub fn receive_messages_proof( + &self, + relayer_id_at_bridged_chain: ::subxt::sp_core::crypto::AccountId32, + proof : runtime_types :: bridge_runtime_common :: messages :: target :: FromBridgedChainMessagesProof < :: subxt :: sp_core :: H256 >, + messages_count: ::core::primitive::u32, + dispatch_weight: ::core::primitive::u64, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + ReceiveMessagesProof, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 73u8, 29u8, 15u8, 165u8, 110u8, 25u8, 158u8, 252u8, 156u8, 95u8, 235u8, + 130u8, 89u8, 18u8, 160u8, 103u8, 122u8, 6u8, 208u8, 159u8, 245u8, 36u8, + 219u8, 99u8, 72u8, 244u8, 213u8, 172u8, 199u8, 85u8, 109u8, 105u8, + ] + { + let call = ReceiveMessagesProof { + relayer_id_at_bridged_chain, + proof, + messages_count, + dispatch_weight, + }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Receive messages delivery proof from bridged chain."] + pub fn receive_messages_delivery_proof( + &self, + proof : runtime_types :: bridge_runtime_common :: messages :: source :: FromBridgedChainMessagesDeliveryProof < :: subxt :: sp_core :: H256 >, + relayers_state: runtime_types::bp_messages::UnrewardedRelayersState, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + ReceiveMessagesDeliveryProof, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .call_hash::()? + == [ + 53u8, 89u8, 207u8, 87u8, 143u8, 218u8, 160u8, 197u8, 228u8, 186u8, + 167u8, 99u8, 14u8, 227u8, 255u8, 92u8, 61u8, 116u8, 193u8, 22u8, 0u8, + 1u8, 140u8, 18u8, 6u8, 188u8, 97u8, 3u8, 194u8, 209u8, 152u8, 60u8, + ] + { + let call = ReceiveMessagesDeliveryProof { + proof, + relayers_state, + }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + pub type Event = runtime_types::pallet_bridge_messages::pallet::Event; + pub mod events { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "Pallet parameter has been updated."] + pub struct ParameterUpdated(pub ()); + impl ::subxt::Event for ParameterUpdated { + const PALLET: &'static str = "BridgeWococoMessages"; + const EVENT: &'static str = "ParameterUpdated"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "Message has been accepted and is waiting to be delivered."] + pub struct MessageAccepted( + pub [::core::primitive::u8; 4usize], + pub ::core::primitive::u64, + ); + impl ::subxt::Event for MessageAccepted { + const PALLET: &'static str = "BridgeWococoMessages"; + const EVENT: &'static str = "MessageAccepted"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "Messages in the inclusive range have been delivered to the bridged chain."] + pub struct MessagesDelivered( + pub [::core::primitive::u8; 4usize], + pub runtime_types::bp_messages::DeliveredMessages, + ); + impl ::subxt::Event for MessagesDelivered { + const PALLET: &'static str = "BridgeWococoMessages"; + const EVENT: &'static str = "MessagesDelivered"; + } + } + pub mod storage { + use super::runtime_types; + pub struct PalletOwner; + impl ::subxt::StorageEntry for PalletOwner { + const PALLET: &'static str = "BridgeWococoMessages"; + const STORAGE: &'static str = "PalletOwner"; + type Value = ::subxt::sp_core::crypto::AccountId32; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct PalletOperatingMode; + impl ::subxt::StorageEntry for PalletOperatingMode { + const PALLET: &'static str = "BridgeWococoMessages"; + const STORAGE: &'static str = "PalletOperatingMode"; + type Value = runtime_types::bp_messages::OperatingMode; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct InboundLanes<'a>(pub &'a [::core::primitive::u8; 4usize]); + impl ::subxt::StorageEntry for InboundLanes<'_> { + const PALLET: &'static str = "BridgeWococoMessages"; + const STORAGE: &'static str = "InboundLanes"; + type Value = runtime_types::bp_messages::InboundLaneData< + ::subxt::sp_core::crypto::AccountId32, + >; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Blake2_128Concat, + )]) + } + } + pub struct OutboundLanes<'a>(pub &'a [::core::primitive::u8; 4usize]); + impl ::subxt::StorageEntry for OutboundLanes<'_> { + const PALLET: &'static str = "BridgeWococoMessages"; + const STORAGE: &'static str = "OutboundLanes"; + type Value = runtime_types::bp_messages::OutboundLaneData; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Blake2_128Concat, + )]) + } + } + pub struct OutboundMessages<'a>(pub &'a runtime_types::bp_messages::MessageKey); + impl ::subxt::StorageEntry for OutboundMessages<'_> { + const PALLET: &'static str = "BridgeWococoMessages"; + const STORAGE: &'static str = "OutboundMessages"; + type Value = runtime_types::bp_messages::MessageData<::core::primitive::u128>; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Blake2_128Concat, + )]) + } + } + pub struct StorageApi<'a, T: ::subxt::Config> { + client: &'a ::subxt::Client, + } + impl<'a, T: ::subxt::Config> StorageApi<'a, T> { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { client } + } + #[doc = " Optional pallet owner."] + #[doc = ""] + #[doc = " Pallet owner has a right to halt all pallet operations and then resume it. If it is"] + #[doc = " `None`, then there are no direct ways to halt/resume pallet operations, but other"] + #[doc = " runtime methods may still be used to do that (i.e. democracy::referendum to update halt"] + #[doc = " flag directly or call the `halt_operations`)."] + pub async fn pallet_owner( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option<::subxt::sp_core::crypto::AccountId32>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 115u8, 57u8, 104u8, 22u8, 119u8, 16u8, 215u8, 71u8, 228u8, 104u8, + 111u8, 24u8, 53u8, 155u8, 26u8, 121u8, 143u8, 126u8, 72u8, 148u8, + 105u8, 132u8, 190u8, 40u8, 233u8, 219u8, 19u8, 143u8, 255u8, 20u8, + 220u8, 124u8, + ] + { + let entry = PalletOwner; + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The current operating mode of the pallet."] + #[doc = ""] + #[doc = " Depending on the mode either all, some, or no transactions will be allowed."] + pub async fn pallet_operating_mode( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + runtime_types::bp_messages::OperatingMode, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .storage_hash::()? + == [ + 210u8, 120u8, 80u8, 199u8, 37u8, 129u8, 219u8, 178u8, 3u8, 129u8, + 160u8, 77u8, 255u8, 190u8, 33u8, 163u8, 1u8, 234u8, 96u8, 88u8, 157u8, + 45u8, 31u8, 136u8, 137u8, 30u8, 21u8, 47u8, 118u8, 28u8, 240u8, 131u8, + ] + { + let entry = PalletOperatingMode; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Map of lane id => inbound lane data."] + pub async fn inbound_lanes( + &self, + _0: &[::core::primitive::u8; 4usize], + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + runtime_types::bp_messages::InboundLaneData< + ::subxt::sp_core::crypto::AccountId32, + >, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 147u8, 219u8, 249u8, 150u8, 150u8, 102u8, 58u8, 115u8, 13u8, 113u8, + 123u8, 132u8, 192u8, 87u8, 188u8, 170u8, 17u8, 101u8, 23u8, 37u8, + 209u8, 188u8, 148u8, 44u8, 67u8, 5u8, 197u8, 202u8, 247u8, 177u8, 87u8, + 22u8, + ] + { + let entry = InboundLanes(_0); + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Map of lane id => inbound lane data."] + pub async fn inbound_lanes_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::subxt::KeyIter<'a, T, InboundLanes<'a>>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 147u8, 219u8, 249u8, 150u8, 150u8, 102u8, 58u8, 115u8, 13u8, 113u8, + 123u8, 132u8, 192u8, 87u8, 188u8, 170u8, 17u8, 101u8, 23u8, 37u8, + 209u8, 188u8, 148u8, 44u8, 67u8, 5u8, 197u8, 202u8, 247u8, 177u8, 87u8, + 22u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Map of lane id => outbound lane data."] + pub async fn outbound_lanes( + &self, + _0: &[::core::primitive::u8; 4usize], + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + runtime_types::bp_messages::OutboundLaneData, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 167u8, 52u8, 228u8, 63u8, 97u8, 175u8, 67u8, 104u8, 3u8, 142u8, 1u8, + 95u8, 125u8, 145u8, 23u8, 141u8, 69u8, 159u8, 248u8, 138u8, 132u8, + 134u8, 226u8, 39u8, 80u8, 126u8, 65u8, 114u8, 181u8, 100u8, 194u8, + 217u8, + ] + { + let entry = OutboundLanes(_0); + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Map of lane id => outbound lane data."] + pub async fn outbound_lanes_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::subxt::KeyIter<'a, T, OutboundLanes<'a>>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 167u8, 52u8, 228u8, 63u8, 97u8, 175u8, 67u8, 104u8, 3u8, 142u8, 1u8, + 95u8, 125u8, 145u8, 23u8, 141u8, 69u8, 159u8, 248u8, 138u8, 132u8, + 134u8, 226u8, 39u8, 80u8, 126u8, 65u8, 114u8, 181u8, 100u8, 194u8, + 217u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " All queued outbound messages."] + pub async fn outbound_messages( + &self, + _0: &runtime_types::bp_messages::MessageKey, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option< + runtime_types::bp_messages::MessageData<::core::primitive::u128>, + >, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 177u8, 51u8, 170u8, 171u8, 178u8, 211u8, 79u8, 214u8, 67u8, 138u8, + 133u8, 155u8, 41u8, 236u8, 49u8, 87u8, 77u8, 61u8, 87u8, 87u8, 5u8, + 52u8, 16u8, 64u8, 202u8, 215u8, 40u8, 221u8, 179u8, 109u8, 76u8, 110u8, + ] + { + let entry = OutboundMessages(_0); + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " All queued outbound messages."] + pub async fn outbound_messages_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::subxt::KeyIter<'a, T, OutboundMessages<'a>>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 177u8, 51u8, 170u8, 171u8, 178u8, 211u8, 79u8, 214u8, 67u8, 138u8, + 133u8, 155u8, 41u8, 236u8, 49u8, 87u8, 77u8, 61u8, 87u8, 87u8, 5u8, + 52u8, 16u8, 64u8, 202u8, 215u8, 40u8, 221u8, 179u8, 109u8, 76u8, 110u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + pub mod constants { + use super::runtime_types; + pub struct ConstantsApi<'a, T: ::subxt::Config> { + client: &'a ::subxt::Client, + } + impl<'a, T: ::subxt::Config> ConstantsApi<'a, T> { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { client } + } + #[doc = " Gets the chain id value from the instance."] + pub fn bridged_chain_id( + &self, + ) -> ::core::result::Result<[::core::primitive::u8; 4usize], ::subxt::BasicError> + { + if self + .client + .metadata() + .constant_hash("BridgeWococoMessages", "BridgedChainId")? + == [ + 154u8, 72u8, 97u8, 79u8, 84u8, 66u8, 85u8, 2u8, 236u8, 184u8, 229u8, + 154u8, 144u8, 244u8, 122u8, 19u8, 61u8, 170u8, 228u8, 92u8, 221u8, + 160u8, 137u8, 95u8, 132u8, 191u8, 172u8, 201u8, 177u8, 162u8, 6u8, + 223u8, + ] + { + let pallet = self.client.metadata().pallet("BridgeWococoMessages")?; + let constant = pallet.constant("BridgedChainId")?; + let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; + Ok(value) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + } + pub mod bridge_rococo_messages_dispatch { + use super::root_mod; + use super::runtime_types; + pub type Event = runtime_types::pallet_bridge_dispatch::pallet::Event; + pub mod events { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "Message has been rejected before reaching dispatch."] + pub struct MessageRejected( + pub [::core::primitive::u8; 4usize], + pub ([::core::primitive::u8; 4usize], ::core::primitive::u64), + ); + impl ::subxt::Event for MessageRejected { + const PALLET: &'static str = "BridgeRococoMessagesDispatch"; + const EVENT: &'static str = "MessageRejected"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "Message has been rejected by dispatcher because of spec version mismatch."] + #[doc = "Last two arguments are: expected and passed spec version."] + pub struct MessageVersionSpecMismatch( + pub [::core::primitive::u8; 4usize], + pub ([::core::primitive::u8; 4usize], ::core::primitive::u64), + pub ::core::primitive::u32, + pub ::core::primitive::u32, + ); + impl ::subxt::Event for MessageVersionSpecMismatch { + const PALLET: &'static str = "BridgeRococoMessagesDispatch"; + const EVENT: &'static str = "MessageVersionSpecMismatch"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "Message has been rejected by dispatcher because of weight mismatch."] + #[doc = "Last two arguments are: expected and passed call weight."] + pub struct MessageWeightMismatch( + pub [::core::primitive::u8; 4usize], + pub ([::core::primitive::u8; 4usize], ::core::primitive::u64), + pub ::core::primitive::u64, + pub ::core::primitive::u64, + ); + impl ::subxt::Event for MessageWeightMismatch { + const PALLET: &'static str = "BridgeRococoMessagesDispatch"; + const EVENT: &'static str = "MessageWeightMismatch"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "Message signature mismatch."] + pub struct MessageSignatureMismatch( + pub [::core::primitive::u8; 4usize], + pub ([::core::primitive::u8; 4usize], ::core::primitive::u64), + ); + impl ::subxt::Event for MessageSignatureMismatch { + const PALLET: &'static str = "BridgeRococoMessagesDispatch"; + const EVENT: &'static str = "MessageSignatureMismatch"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "We have failed to decode Call from the message."] + pub struct MessageCallDecodeFailed( + pub [::core::primitive::u8; 4usize], + pub ([::core::primitive::u8; 4usize], ::core::primitive::u64), + ); + impl ::subxt::Event for MessageCallDecodeFailed { + const PALLET: &'static str = "BridgeRococoMessagesDispatch"; + const EVENT: &'static str = "MessageCallDecodeFailed"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "The call from the message has been rejected by the call filter."] + pub struct MessageCallRejected( + pub [::core::primitive::u8; 4usize], + pub ([::core::primitive::u8; 4usize], ::core::primitive::u64), + ); + impl ::subxt::Event for MessageCallRejected { + const PALLET: &'static str = "BridgeRococoMessagesDispatch"; + const EVENT: &'static str = "MessageCallRejected"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "The origin account has failed to pay fee for dispatching the message."] + pub struct MessageDispatchPaymentFailed( + pub [::core::primitive::u8; 4usize], + pub ([::core::primitive::u8; 4usize], ::core::primitive::u64), + pub ::subxt::sp_core::crypto::AccountId32, + pub ::core::primitive::u64, + ); + impl ::subxt::Event for MessageDispatchPaymentFailed { + const PALLET: &'static str = "BridgeRococoMessagesDispatch"; + const EVENT: &'static str = "MessageDispatchPaymentFailed"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "Message has been dispatched with given result."] + pub struct MessageDispatched( + pub [::core::primitive::u8; 4usize], + pub ([::core::primitive::u8; 4usize], ::core::primitive::u64), + pub ::core::result::Result<(), runtime_types::sp_runtime::DispatchError>, + ); + impl ::subxt::Event for MessageDispatched { + const PALLET: &'static str = "BridgeRococoMessagesDispatch"; + const EVENT: &'static str = "MessageDispatched"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "Phantom member, never used. Needed to handle multiple pallet instances."] + pub struct _Dummy; + impl ::subxt::Event for _Dummy { + const PALLET: &'static str = "BridgeRococoMessagesDispatch"; + const EVENT: &'static str = "_Dummy"; + } + } + } + pub mod bridge_wococo_messages_dispatch { + use super::root_mod; + use super::runtime_types; + pub type Event = runtime_types::pallet_bridge_dispatch::pallet::Event; + pub mod events { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "Message has been rejected before reaching dispatch."] + pub struct MessageRejected( + pub [::core::primitive::u8; 4usize], + pub ([::core::primitive::u8; 4usize], ::core::primitive::u64), + ); + impl ::subxt::Event for MessageRejected { + const PALLET: &'static str = "BridgeWococoMessagesDispatch"; + const EVENT: &'static str = "MessageRejected"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "Message has been rejected by dispatcher because of spec version mismatch."] + #[doc = "Last two arguments are: expected and passed spec version."] + pub struct MessageVersionSpecMismatch( + pub [::core::primitive::u8; 4usize], + pub ([::core::primitive::u8; 4usize], ::core::primitive::u64), + pub ::core::primitive::u32, + pub ::core::primitive::u32, + ); + impl ::subxt::Event for MessageVersionSpecMismatch { + const PALLET: &'static str = "BridgeWococoMessagesDispatch"; + const EVENT: &'static str = "MessageVersionSpecMismatch"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "Message has been rejected by dispatcher because of weight mismatch."] + #[doc = "Last two arguments are: expected and passed call weight."] + pub struct MessageWeightMismatch( + pub [::core::primitive::u8; 4usize], + pub ([::core::primitive::u8; 4usize], ::core::primitive::u64), + pub ::core::primitive::u64, + pub ::core::primitive::u64, + ); + impl ::subxt::Event for MessageWeightMismatch { + const PALLET: &'static str = "BridgeWococoMessagesDispatch"; + const EVENT: &'static str = "MessageWeightMismatch"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "Message signature mismatch."] + pub struct MessageSignatureMismatch( + pub [::core::primitive::u8; 4usize], + pub ([::core::primitive::u8; 4usize], ::core::primitive::u64), + ); + impl ::subxt::Event for MessageSignatureMismatch { + const PALLET: &'static str = "BridgeWococoMessagesDispatch"; + const EVENT: &'static str = "MessageSignatureMismatch"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "We have failed to decode Call from the message."] + pub struct MessageCallDecodeFailed( + pub [::core::primitive::u8; 4usize], + pub ([::core::primitive::u8; 4usize], ::core::primitive::u64), + ); + impl ::subxt::Event for MessageCallDecodeFailed { + const PALLET: &'static str = "BridgeWococoMessagesDispatch"; + const EVENT: &'static str = "MessageCallDecodeFailed"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "The call from the message has been rejected by the call filter."] + pub struct MessageCallRejected( + pub [::core::primitive::u8; 4usize], + pub ([::core::primitive::u8; 4usize], ::core::primitive::u64), + ); + impl ::subxt::Event for MessageCallRejected { + const PALLET: &'static str = "BridgeWococoMessagesDispatch"; + const EVENT: &'static str = "MessageCallRejected"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "The origin account has failed to pay fee for dispatching the message."] + pub struct MessageDispatchPaymentFailed( + pub [::core::primitive::u8; 4usize], + pub ([::core::primitive::u8; 4usize], ::core::primitive::u64), + pub ::subxt::sp_core::crypto::AccountId32, + pub ::core::primitive::u64, + ); + impl ::subxt::Event for MessageDispatchPaymentFailed { + const PALLET: &'static str = "BridgeWococoMessagesDispatch"; + const EVENT: &'static str = "MessageDispatchPaymentFailed"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "Message has been dispatched with given result."] + pub struct MessageDispatched( + pub [::core::primitive::u8; 4usize], + pub ([::core::primitive::u8; 4usize], ::core::primitive::u64), + pub ::core::result::Result<(), runtime_types::sp_runtime::DispatchError>, + ); + impl ::subxt::Event for MessageDispatched { + const PALLET: &'static str = "BridgeWococoMessagesDispatch"; + const EVENT: &'static str = "MessageDispatched"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "Phantom member, never used. Needed to handle multiple pallet instances."] + pub struct _Dummy; + impl ::subxt::Event for _Dummy { + const PALLET: &'static str = "BridgeWococoMessagesDispatch"; + const EVENT: &'static str = "_Dummy"; + } + } + } + pub mod collective { + use super::root_mod; + use super::runtime_types; + pub mod calls { + use super::root_mod; + use super::runtime_types; + type DispatchError = runtime_types::sp_runtime::DispatchError; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct SetMembers { + pub new_members: ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>, + pub prime: ::core::option::Option<::subxt::sp_core::crypto::AccountId32>, + pub old_count: ::core::primitive::u32, + } + impl ::subxt::Call for SetMembers { + const PALLET: &'static str = "Collective"; + const FUNCTION: &'static str = "set_members"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct Execute { + pub proposal: ::std::boxed::Box, + #[codec(compact)] + pub length_bound: ::core::primitive::u32, + } + impl ::subxt::Call for Execute { + const PALLET: &'static str = "Collective"; + const FUNCTION: &'static str = "execute"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct Propose { + #[codec(compact)] + pub threshold: ::core::primitive::u32, + pub proposal: ::std::boxed::Box, + #[codec(compact)] + pub length_bound: ::core::primitive::u32, + } + impl ::subxt::Call for Propose { + const PALLET: &'static str = "Collective"; + const FUNCTION: &'static str = "propose"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct Vote { + pub proposal: ::subxt::sp_core::H256, + #[codec(compact)] + pub index: ::core::primitive::u32, + pub approve: ::core::primitive::bool, + } + impl ::subxt::Call for Vote { + const PALLET: &'static str = "Collective"; + const FUNCTION: &'static str = "vote"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct Close { + pub proposal_hash: ::subxt::sp_core::H256, + #[codec(compact)] + pub index: ::core::primitive::u32, + #[codec(compact)] + pub proposal_weight_bound: ::core::primitive::u64, + #[codec(compact)] + pub length_bound: ::core::primitive::u32, + } + impl ::subxt::Call for Close { + const PALLET: &'static str = "Collective"; + const FUNCTION: &'static str = "close"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct DisapproveProposal { + pub proposal_hash: ::subxt::sp_core::H256, + } + impl ::subxt::Call for DisapproveProposal { + const PALLET: &'static str = "Collective"; + const FUNCTION: &'static str = "disapprove_proposal"; + } + pub struct TransactionApi<'a, T: ::subxt::Config, X> { + client: &'a ::subxt::Client, + marker: ::core::marker::PhantomData, + } + impl<'a, T, X> TransactionApi<'a, T, X> + where + T: ::subxt::Config, + X: ::subxt::extrinsic::ExtrinsicParams, + { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { + client, + marker: ::core::marker::PhantomData, + } + } + #[doc = "Set the collective's membership."] + #[doc = ""] + #[doc = "- `new_members`: The new member list. Be nice to the chain and provide it sorted."] + #[doc = "- `prime`: The prime member whose vote sets the default."] + #[doc = "- `old_count`: The upper bound for the previous number of members in storage. Used for"] + #[doc = " weight estimation."] + #[doc = ""] + #[doc = "Requires root origin."] + #[doc = ""] + #[doc = "NOTE: Does not enforce the expected `MaxMembers` limit on the amount of members, but"] + #[doc = " the weight estimations rely on it to estimate dispatchable weight."] + #[doc = ""] + #[doc = "# WARNING:"] + #[doc = ""] + #[doc = "The `pallet-collective` can also be managed by logic outside of the pallet through the"] + #[doc = "implementation of the trait [`ChangeMembers`]."] + #[doc = "Any call to `set_members` must be careful that the member set doesn't get out of sync"] + #[doc = "with other logic managing the member set."] + #[doc = ""] + #[doc = "# "] + #[doc = "## Weight"] + #[doc = "- `O(MP + N)` where:"] + #[doc = " - `M` old-members-count (code- and governance-bounded)"] + #[doc = " - `N` new-members-count (code- and governance-bounded)"] + #[doc = " - `P` proposals-count (code-bounded)"] + #[doc = "- DB:"] + #[doc = " - 1 storage mutation (codec `O(M)` read, `O(N)` write) for reading and writing the"] + #[doc = " members"] + #[doc = " - 1 storage read (codec `O(P)`) for reading the proposals"] + #[doc = " - `P` storage mutations (codec `O(M)`) for updating the votes for each proposal"] + #[doc = " - 1 storage write (codec `O(1)`) for deleting the old `prime` and setting the new one"] + #[doc = "# "] + pub fn set_members( + &self, + new_members: ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>, + prime: ::core::option::Option<::subxt::sp_core::crypto::AccountId32>, + old_count: ::core::primitive::u32, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SetMembers, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 228u8, 186u8, 17u8, 12u8, 231u8, 231u8, 139u8, 15u8, 96u8, 200u8, 68u8, + 27u8, 61u8, 106u8, 245u8, 199u8, 120u8, 141u8, 95u8, 215u8, 36u8, 49u8, + 0u8, 163u8, 172u8, 252u8, 221u8, 9u8, 1u8, 222u8, 44u8, 214u8, + ] + { + let call = SetMembers { + new_members, + prime, + old_count, + }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Dispatch a proposal from a member using the `Member` origin."] + #[doc = ""] + #[doc = "Origin must be a member of the collective."] + #[doc = ""] + #[doc = "# "] + #[doc = "## Weight"] + #[doc = "- `O(M + P)` where `M` members-count (code-bounded) and `P` complexity of dispatching"] + #[doc = " `proposal`"] + #[doc = "- DB: 1 read (codec `O(M)`) + DB access of `proposal`"] + #[doc = "- 1 event"] + #[doc = "# "] + pub fn execute( + &self, + proposal: runtime_types::rococo_runtime::Call, + length_bound: ::core::primitive::u32, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + Execute, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 33u8, 13u8, 40u8, 147u8, 30u8, 13u8, 55u8, 70u8, 225u8, 38u8, 185u8, + 112u8, 158u8, 76u8, 117u8, 198u8, 3u8, 89u8, 222u8, 72u8, 228u8, 107u8, + 216u8, 92u8, 33u8, 166u8, 225u8, 63u8, 204u8, 83u8, 73u8, 33u8, + ] + { + let call = Execute { + proposal: ::std::boxed::Box::new(proposal), + length_bound, + }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Add a new proposal to either be voted on or executed directly."] + #[doc = ""] + #[doc = "Requires the sender to be member."] + #[doc = ""] + #[doc = "`threshold` determines whether `proposal` is executed directly (`threshold < 2`)"] + #[doc = "or put up for voting."] + #[doc = ""] + #[doc = "# "] + #[doc = "## Weight"] + #[doc = "- `O(B + M + P1)` or `O(B + M + P2)` where:"] + #[doc = " - `B` is `proposal` size in bytes (length-fee-bounded)"] + #[doc = " - `M` is members-count (code- and governance-bounded)"] + #[doc = " - branching is influenced by `threshold` where:"] + #[doc = " - `P1` is proposal execution complexity (`threshold < 2`)"] + #[doc = " - `P2` is proposals-count (code-bounded) (`threshold >= 2`)"] + #[doc = "- DB:"] + #[doc = " - 1 storage read `is_member` (codec `O(M)`)"] + #[doc = " - 1 storage read `ProposalOf::contains_key` (codec `O(1)`)"] + #[doc = " - DB accesses influenced by `threshold`:"] + #[doc = " - EITHER storage accesses done by `proposal` (`threshold < 2`)"] + #[doc = " - OR proposal insertion (`threshold <= 2`)"] + #[doc = " - 1 storage mutation `Proposals` (codec `O(P2)`)"] + #[doc = " - 1 storage mutation `ProposalCount` (codec `O(1)`)"] + #[doc = " - 1 storage write `ProposalOf` (codec `O(B)`)"] + #[doc = " - 1 storage write `Voting` (codec `O(M)`)"] + #[doc = " - 1 event"] + #[doc = "# "] + pub fn propose( + &self, + threshold: ::core::primitive::u32, + proposal: runtime_types::rococo_runtime::Call, + length_bound: ::core::primitive::u32, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + Propose, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 244u8, 118u8, 5u8, 210u8, 146u8, 37u8, 198u8, 107u8, 132u8, 59u8, + 227u8, 142u8, 200u8, 130u8, 76u8, 147u8, 11u8, 196u8, 239u8, 145u8, + 198u8, 196u8, 209u8, 184u8, 113u8, 116u8, 8u8, 88u8, 190u8, 230u8, + 242u8, 130u8, + ] + { + let call = Propose { + threshold, + proposal: ::std::boxed::Box::new(proposal), + length_bound, + }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Add an aye or nay vote for the sender to the given proposal."] + #[doc = ""] + #[doc = "Requires the sender to be a member."] + #[doc = ""] + #[doc = "Transaction fees will be waived if the member is voting on any particular proposal"] + #[doc = "for the first time and the call is successful. Subsequent vote changes will charge a"] + #[doc = "fee."] + #[doc = "# "] + #[doc = "## Weight"] + #[doc = "- `O(M)` where `M` is members-count (code- and governance-bounded)"] + #[doc = "- DB:"] + #[doc = " - 1 storage read `Members` (codec `O(M)`)"] + #[doc = " - 1 storage mutation `Voting` (codec `O(M)`)"] + #[doc = "- 1 event"] + #[doc = "# "] + pub fn vote( + &self, + proposal: ::subxt::sp_core::H256, + index: ::core::primitive::u32, + approve: ::core::primitive::bool, + ) -> Result< + ::subxt::SubmittableExtrinsic<'a, T, X, Vote, DispatchError, root_mod::Event>, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 129u8, 129u8, 2u8, 51u8, 247u8, 109u8, 5u8, 198u8, 38u8, 192u8, 159u8, + 167u8, 176u8, 0u8, 181u8, 84u8, 92u8, 93u8, 179u8, 86u8, 108u8, 155u8, + 119u8, 3u8, 159u8, 12u8, 206u8, 121u8, 154u8, 226u8, 199u8, 146u8, + ] + { + let call = Vote { + proposal, + index, + approve, + }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Close a vote that is either approved, disapproved or whose voting period has ended."] + #[doc = ""] + #[doc = "May be called by any signed account in order to finish voting and close the proposal."] + #[doc = ""] + #[doc = "If called before the end of the voting period it will only close the vote if it is"] + #[doc = "has enough votes to be approved or disapproved."] + #[doc = ""] + #[doc = "If called after the end of the voting period abstentions are counted as rejections"] + #[doc = "unless there is a prime member set and the prime member cast an approval."] + #[doc = ""] + #[doc = "If the close operation completes successfully with disapproval, the transaction fee will"] + #[doc = "be waived. Otherwise execution of the approved operation will be charged to the caller."] + #[doc = ""] + #[doc = "+ `proposal_weight_bound`: The maximum amount of weight consumed by executing the closed"] + #[doc = "proposal."] + #[doc = "+ `length_bound`: The upper bound for the length of the proposal in storage. Checked via"] + #[doc = "`storage::read` so it is `size_of::() == 4` larger than the pure length."] + #[doc = ""] + #[doc = "# "] + #[doc = "## Weight"] + #[doc = "- `O(B + M + P1 + P2)` where:"] + #[doc = " - `B` is `proposal` size in bytes (length-fee-bounded)"] + #[doc = " - `M` is members-count (code- and governance-bounded)"] + #[doc = " - `P1` is the complexity of `proposal` preimage."] + #[doc = " - `P2` is proposal-count (code-bounded)"] + #[doc = "- DB:"] + #[doc = " - 2 storage reads (`Members`: codec `O(M)`, `Prime`: codec `O(1)`)"] + #[doc = " - 3 mutations (`Voting`: codec `O(M)`, `ProposalOf`: codec `O(B)`, `Proposals`: codec"] + #[doc = " `O(P2)`)"] + #[doc = " - any mutations done while executing `proposal` (`P1`)"] + #[doc = "- up to 3 events"] + #[doc = "# "] + pub fn close( + &self, + proposal_hash: ::subxt::sp_core::H256, + index: ::core::primitive::u32, + proposal_weight_bound: ::core::primitive::u64, + length_bound: ::core::primitive::u32, + ) -> Result< + ::subxt::SubmittableExtrinsic<'a, T, X, Close, DispatchError, root_mod::Event>, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 86u8, 13u8, 193u8, 66u8, 78u8, 210u8, 72u8, 79u8, 119u8, 244u8, 113u8, + 242u8, 84u8, 176u8, 73u8, 199u8, 151u8, 137u8, 180u8, 239u8, 27u8, + 114u8, 191u8, 180u8, 134u8, 165u8, 208u8, 80u8, 244u8, 166u8, 226u8, + 85u8, + ] + { + let call = Close { + proposal_hash, + index, + proposal_weight_bound, + length_bound, + }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Disapprove a proposal, close, and remove it from the system, regardless of its current"] + #[doc = "state."] + #[doc = ""] + #[doc = "Must be called by the Root origin."] + #[doc = ""] + #[doc = "Parameters:"] + #[doc = "* `proposal_hash`: The hash of the proposal that should be disapproved."] + #[doc = ""] + #[doc = "# "] + #[doc = "Complexity: O(P) where P is the number of max proposals"] + #[doc = "DB Weight:"] + #[doc = "* Reads: Proposals"] + #[doc = "* Writes: Voting, Proposals, ProposalOf"] + #[doc = "# "] + pub fn disapprove_proposal( + &self, + proposal_hash: ::subxt::sp_core::H256, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + DisapproveProposal, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 128u8, 85u8, 134u8, 138u8, 161u8, 42u8, 150u8, 65u8, 131u8, 61u8, + 184u8, 59u8, 167u8, 24u8, 200u8, 51u8, 223u8, 101u8, 4u8, 252u8, 159u8, + 239u8, 79u8, 195u8, 255u8, 40u8, 251u8, 239u8, 95u8, 121u8, 123u8, + 47u8, + ] + { + let call = DisapproveProposal { proposal_hash }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + pub type Event = runtime_types::pallet_collective::pallet::Event; + pub mod events { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "A motion (given hash) has been proposed (by given account) with a threshold (given"] + #[doc = "`MemberCount`)."] + pub struct Proposed { + pub account: ::subxt::sp_core::crypto::AccountId32, + pub proposal_index: ::core::primitive::u32, + pub proposal_hash: ::subxt::sp_core::H256, + pub threshold: ::core::primitive::u32, + } + impl ::subxt::Event for Proposed { + const PALLET: &'static str = "Collective"; + const EVENT: &'static str = "Proposed"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "A motion (given hash) has been voted on by given account, leaving"] + #[doc = "a tally (yes votes and no votes given respectively as `MemberCount`)."] + pub struct Voted { + pub account: ::subxt::sp_core::crypto::AccountId32, + pub proposal_hash: ::subxt::sp_core::H256, + pub voted: ::core::primitive::bool, + pub yes: ::core::primitive::u32, + pub no: ::core::primitive::u32, + } + impl ::subxt::Event for Voted { + const PALLET: &'static str = "Collective"; + const EVENT: &'static str = "Voted"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "A motion was approved by the required threshold."] + pub struct Approved { + pub proposal_hash: ::subxt::sp_core::H256, + } + impl ::subxt::Event for Approved { + const PALLET: &'static str = "Collective"; + const EVENT: &'static str = "Approved"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "A motion was not approved by the required threshold."] + pub struct Disapproved { + pub proposal_hash: ::subxt::sp_core::H256, + } + impl ::subxt::Event for Disapproved { + const PALLET: &'static str = "Collective"; + const EVENT: &'static str = "Disapproved"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "A motion was executed; result will be `Ok` if it returned without error."] + pub struct Executed { + pub proposal_hash: ::subxt::sp_core::H256, + pub result: ::core::result::Result<(), runtime_types::sp_runtime::DispatchError>, + } + impl ::subxt::Event for Executed { + const PALLET: &'static str = "Collective"; + const EVENT: &'static str = "Executed"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "A single member did some action; result will be `Ok` if it returned without error."] + pub struct MemberExecuted { + pub proposal_hash: ::subxt::sp_core::H256, + pub result: ::core::result::Result<(), runtime_types::sp_runtime::DispatchError>, + } + impl ::subxt::Event for MemberExecuted { + const PALLET: &'static str = "Collective"; + const EVENT: &'static str = "MemberExecuted"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "A proposal was closed because its threshold was reached or after its duration was up."] + pub struct Closed { + pub proposal_hash: ::subxt::sp_core::H256, + pub yes: ::core::primitive::u32, + pub no: ::core::primitive::u32, + } + impl ::subxt::Event for Closed { + const PALLET: &'static str = "Collective"; + const EVENT: &'static str = "Closed"; + } + } + pub mod storage { + use super::runtime_types; + pub struct Proposals; + impl ::subxt::StorageEntry for Proposals { + const PALLET: &'static str = "Collective"; + const STORAGE: &'static str = "Proposals"; + type Value = runtime_types::frame_support::storage::bounded_vec::BoundedVec< + ::subxt::sp_core::H256, + >; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct ProposalOf<'a>(pub &'a ::subxt::sp_core::H256); + impl ::subxt::StorageEntry for ProposalOf<'_> { + const PALLET: &'static str = "Collective"; + const STORAGE: &'static str = "ProposalOf"; + type Value = runtime_types::rococo_runtime::Call; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Identity, + )]) + } + } + pub struct Voting<'a>(pub &'a ::subxt::sp_core::H256); + impl ::subxt::StorageEntry for Voting<'_> { + const PALLET: &'static str = "Collective"; + const STORAGE: &'static str = "Voting"; + type Value = runtime_types::pallet_collective::Votes< + ::subxt::sp_core::crypto::AccountId32, + ::core::primitive::u32, + >; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Identity, + )]) + } + } + pub struct ProposalCount; + impl ::subxt::StorageEntry for ProposalCount { + const PALLET: &'static str = "Collective"; + const STORAGE: &'static str = "ProposalCount"; + type Value = ::core::primitive::u32; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct Members; + impl ::subxt::StorageEntry for Members { + const PALLET: &'static str = "Collective"; + const STORAGE: &'static str = "Members"; + type Value = ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct Prime; + impl ::subxt::StorageEntry for Prime { + const PALLET: &'static str = "Collective"; + const STORAGE: &'static str = "Prime"; + type Value = ::subxt::sp_core::crypto::AccountId32; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct StorageApi<'a, T: ::subxt::Config> { + client: &'a ::subxt::Client, + } + impl<'a, T: ::subxt::Config> StorageApi<'a, T> { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { client } + } + #[doc = " The hashes of the active proposals."] + pub async fn proposals( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + runtime_types::frame_support::storage::bounded_vec::BoundedVec< + ::subxt::sp_core::H256, + >, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 23u8, 209u8, 111u8, 241u8, 83u8, 72u8, 60u8, 51u8, 29u8, 14u8, 223u8, + 213u8, 120u8, 186u8, 243u8, 201u8, 205u8, 183u8, 127u8, 94u8, 190u8, + 180u8, 189u8, 131u8, 87u8, 197u8, 97u8, 231u8, 243u8, 232u8, 15u8, + 61u8, + ] + { + let entry = Proposals; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Actual proposal for a given hash, if it's current."] + pub async fn proposal_of( + &self, + _0: &::subxt::sp_core::H256, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 223u8, 62u8, 117u8, 37u8, 17u8, 61u8, 152u8, 232u8, 38u8, 182u8, 199u8, + 60u8, 43u8, 134u8, 252u8, 41u8, 71u8, 148u8, 114u8, 61u8, 236u8, 0u8, + 230u8, 40u8, 242u8, 136u8, 123u8, 40u8, 184u8, 160u8, 128u8, 191u8, + ] + { + let entry = ProposalOf(_0); + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Actual proposal for a given hash, if it's current."] + pub async fn proposal_of_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::subxt::KeyIter<'a, T, ProposalOf<'a>>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 223u8, 62u8, 117u8, 37u8, 17u8, 61u8, 152u8, 232u8, 38u8, 182u8, 199u8, + 60u8, 43u8, 134u8, 252u8, 41u8, 71u8, 148u8, 114u8, 61u8, 236u8, 0u8, + 230u8, 40u8, 242u8, 136u8, 123u8, 40u8, 184u8, 160u8, 128u8, 191u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Votes on a given proposal, if it is ongoing."] + pub async fn voting( + &self, + _0: &::subxt::sp_core::H256, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option< + runtime_types::pallet_collective::Votes< + ::subxt::sp_core::crypto::AccountId32, + ::core::primitive::u32, + >, + >, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 230u8, 117u8, 88u8, 61u8, 179u8, 199u8, 172u8, 121u8, 157u8, 115u8, + 63u8, 88u8, 182u8, 7u8, 191u8, 41u8, 187u8, 5u8, 31u8, 240u8, 202u8, + 14u8, 21u8, 175u8, 39u8, 72u8, 113u8, 220u8, 251u8, 127u8, 30u8, 93u8, + ] + { + let entry = Voting(_0); + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Votes on a given proposal, if it is ongoing."] + pub async fn voting_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result<::subxt::KeyIter<'a, T, Voting<'a>>, ::subxt::BasicError> + { + if self.client.metadata().storage_hash::()? + == [ + 230u8, 117u8, 88u8, 61u8, 179u8, 199u8, 172u8, 121u8, 157u8, 115u8, + 63u8, 88u8, 182u8, 7u8, 191u8, 41u8, 187u8, 5u8, 31u8, 240u8, 202u8, + 14u8, 21u8, 175u8, 39u8, 72u8, 113u8, 220u8, 251u8, 127u8, 30u8, 93u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Proposals so far."] + pub async fn proposal_count( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> + { + if self.client.metadata().storage_hash::()? + == [ + 132u8, 145u8, 78u8, 218u8, 51u8, 189u8, 55u8, 172u8, 143u8, 33u8, + 140u8, 99u8, 124u8, 208u8, 57u8, 232u8, 154u8, 110u8, 32u8, 142u8, + 24u8, 149u8, 109u8, 105u8, 30u8, 83u8, 39u8, 177u8, 127u8, 160u8, 34u8, + 70u8, + ] + { + let entry = ProposalCount; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The current members of the collective. This is stored sorted (just by value)."] + pub async fn members( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 136u8, 91u8, 140u8, 173u8, 238u8, 221u8, 4u8, 132u8, 238u8, 99u8, + 195u8, 142u8, 10u8, 35u8, 210u8, 227u8, 22u8, 72u8, 218u8, 222u8, + 227u8, 51u8, 55u8, 31u8, 252u8, 78u8, 195u8, 11u8, 195u8, 242u8, 171u8, + 75u8, + ] + { + let entry = Members; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The prime member that helps determine the default vote behavior in case of absentations."] + pub async fn prime( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option<::subxt::sp_core::crypto::AccountId32>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 70u8, 101u8, 20u8, 160u8, 173u8, 87u8, 190u8, 85u8, 60u8, 249u8, 144u8, + 77u8, 175u8, 195u8, 51u8, 196u8, 234u8, 62u8, 243u8, 199u8, 126u8, + 12u8, 88u8, 252u8, 1u8, 210u8, 65u8, 210u8, 33u8, 19u8, 222u8, 11u8, + ] + { + let entry = Prime; + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + } + pub mod membership { + use super::root_mod; + use super::runtime_types; + pub mod calls { + use super::root_mod; + use super::runtime_types; + type DispatchError = runtime_types::sp_runtime::DispatchError; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct AddMember { + pub who: ::subxt::sp_core::crypto::AccountId32, + } + impl ::subxt::Call for AddMember { + const PALLET: &'static str = "Membership"; + const FUNCTION: &'static str = "add_member"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct RemoveMember { + pub who: ::subxt::sp_core::crypto::AccountId32, + } + impl ::subxt::Call for RemoveMember { + const PALLET: &'static str = "Membership"; + const FUNCTION: &'static str = "remove_member"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct SwapMember { + pub remove: ::subxt::sp_core::crypto::AccountId32, + pub add: ::subxt::sp_core::crypto::AccountId32, + } + impl ::subxt::Call for SwapMember { + const PALLET: &'static str = "Membership"; + const FUNCTION: &'static str = "swap_member"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct ResetMembers { + pub members: ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>, + } + impl ::subxt::Call for ResetMembers { + const PALLET: &'static str = "Membership"; + const FUNCTION: &'static str = "reset_members"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct ChangeKey { + pub new: ::subxt::sp_core::crypto::AccountId32, + } + impl ::subxt::Call for ChangeKey { + const PALLET: &'static str = "Membership"; + const FUNCTION: &'static str = "change_key"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct SetPrime { + pub who: ::subxt::sp_core::crypto::AccountId32, + } + impl ::subxt::Call for SetPrime { + const PALLET: &'static str = "Membership"; + const FUNCTION: &'static str = "set_prime"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct ClearPrime; + impl ::subxt::Call for ClearPrime { + const PALLET: &'static str = "Membership"; + const FUNCTION: &'static str = "clear_prime"; + } + pub struct TransactionApi<'a, T: ::subxt::Config, X> { + client: &'a ::subxt::Client, + marker: ::core::marker::PhantomData, + } + impl<'a, T, X> TransactionApi<'a, T, X> + where + T: ::subxt::Config, + X: ::subxt::extrinsic::ExtrinsicParams, + { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { + client, + marker: ::core::marker::PhantomData, + } + } + #[doc = "Add a member `who` to the set."] + #[doc = ""] + #[doc = "May only be called from `T::AddOrigin`."] + pub fn add_member( + &self, + who: ::subxt::sp_core::crypto::AccountId32, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + AddMember, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 1u8, 149u8, 115u8, 222u8, 93u8, 9u8, 208u8, 58u8, 22u8, 148u8, 215u8, + 141u8, 204u8, 48u8, 107u8, 210u8, 202u8, 165u8, 43u8, 159u8, 45u8, + 161u8, 255u8, 127u8, 225u8, 100u8, 161u8, 195u8, 197u8, 206u8, 57u8, + 166u8, + ] + { + let call = AddMember { who }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Remove a member `who` from the set."] + #[doc = ""] + #[doc = "May only be called from `T::RemoveOrigin`."] + pub fn remove_member( + &self, + who: ::subxt::sp_core::crypto::AccountId32, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + RemoveMember, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 137u8, 249u8, 148u8, 139u8, 147u8, 47u8, 226u8, 228u8, 139u8, 219u8, + 109u8, 128u8, 254u8, 51u8, 227u8, 154u8, 105u8, 91u8, 229u8, 69u8, + 217u8, 241u8, 107u8, 229u8, 41u8, 202u8, 228u8, 227u8, 160u8, 162u8, + 45u8, 211u8, + ] + { + let call = RemoveMember { who }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Swap out one member `remove` for another `add`."] + #[doc = ""] + #[doc = "May only be called from `T::SwapOrigin`."] + #[doc = ""] + #[doc = "Prime membership is *not* passed from `remove` to `add`, if extant."] + pub fn swap_member( + &self, + remove: ::subxt::sp_core::crypto::AccountId32, + add: ::subxt::sp_core::crypto::AccountId32, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SwapMember, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 159u8, 62u8, 254u8, 117u8, 56u8, 185u8, 99u8, 29u8, 146u8, 210u8, 40u8, + 77u8, 169u8, 224u8, 215u8, 34u8, 106u8, 95u8, 204u8, 109u8, 72u8, 67u8, + 11u8, 183u8, 33u8, 84u8, 133u8, 4u8, 5u8, 13u8, 188u8, 123u8, + ] + { + let call = SwapMember { remove, add }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Change the membership to a new set, disregarding the existing membership. Be nice and"] + #[doc = "pass `members` pre-sorted."] + #[doc = ""] + #[doc = "May only be called from `T::ResetOrigin`."] + pub fn reset_members( + &self, + members: ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + ResetMembers, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 246u8, 84u8, 91u8, 191u8, 61u8, 245u8, 171u8, 80u8, 18u8, 120u8, 61u8, + 86u8, 23u8, 115u8, 161u8, 203u8, 128u8, 34u8, 166u8, 128u8, 33u8, 28u8, + 229u8, 81u8, 103u8, 217u8, 173u8, 151u8, 31u8, 118u8, 151u8, 217u8, + ] + { + let call = ResetMembers { members }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Swap out the sending member for some other key `new`."] + #[doc = ""] + #[doc = "May only be called from `Signed` origin of a current member."] + #[doc = ""] + #[doc = "Prime membership is passed from the origin account to `new`, if extant."] + pub fn change_key( + &self, + new: ::subxt::sp_core::crypto::AccountId32, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + ChangeKey, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 198u8, 93u8, 41u8, 52u8, 241u8, 11u8, 225u8, 82u8, 30u8, 114u8, 111u8, + 204u8, 13u8, 31u8, 34u8, 82u8, 171u8, 58u8, 180u8, 65u8, 3u8, 246u8, + 33u8, 167u8, 200u8, 23u8, 150u8, 235u8, 130u8, 172u8, 202u8, 216u8, + ] + { + let call = ChangeKey { new }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Set the prime member. Must be a current member."] + #[doc = ""] + #[doc = "May only be called from `T::PrimeOrigin`."] + pub fn set_prime( + &self, + who: ::subxt::sp_core::crypto::AccountId32, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + SetPrime, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 185u8, 53u8, 61u8, 154u8, 234u8, 77u8, 195u8, 126u8, 19u8, 39u8, 78u8, + 205u8, 109u8, 210u8, 137u8, 245u8, 128u8, 110u8, 2u8, 201u8, 20u8, + 153u8, 146u8, 177u8, 4u8, 144u8, 229u8, 125u8, 91u8, 131u8, 199u8, + 15u8, + ] + { + let call = SetPrime { who }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Remove the prime member if it exists."] + #[doc = ""] + #[doc = "May only be called from `T::PrimeOrigin`."] + pub fn clear_prime( + &self, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + ClearPrime, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 186u8, 182u8, 225u8, 90u8, 71u8, 124u8, 69u8, 100u8, 234u8, 25u8, 53u8, + 23u8, 182u8, 32u8, 176u8, 81u8, 54u8, 140u8, 235u8, 126u8, 247u8, 7u8, + 155u8, 62u8, 35u8, 135u8, 48u8, 61u8, 88u8, 160u8, 183u8, 72u8, + ] + { + let call = ClearPrime {}; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + pub type Event = runtime_types::pallet_membership::pallet::Event; + pub mod events { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "The given member was added; see the transaction for who."] + pub struct MemberAdded; + impl ::subxt::Event for MemberAdded { + const PALLET: &'static str = "Membership"; + const EVENT: &'static str = "MemberAdded"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "The given member was removed; see the transaction for who."] + pub struct MemberRemoved; + impl ::subxt::Event for MemberRemoved { + const PALLET: &'static str = "Membership"; + const EVENT: &'static str = "MemberRemoved"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "Two members were swapped; see the transaction for who."] + pub struct MembersSwapped; + impl ::subxt::Event for MembersSwapped { + const PALLET: &'static str = "Membership"; + const EVENT: &'static str = "MembersSwapped"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "The membership was reset; see the transaction for who the new set is."] + pub struct MembersReset; + impl ::subxt::Event for MembersReset { + const PALLET: &'static str = "Membership"; + const EVENT: &'static str = "MembersReset"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "One of the members' keys changed."] + pub struct KeyChanged; + impl ::subxt::Event for KeyChanged { + const PALLET: &'static str = "Membership"; + const EVENT: &'static str = "KeyChanged"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "Phantom member, never used."] + pub struct Dummy; + impl ::subxt::Event for Dummy { + const PALLET: &'static str = "Membership"; + const EVENT: &'static str = "Dummy"; + } + } + pub mod storage { + use super::runtime_types; + pub struct Members; + impl ::subxt::StorageEntry for Members { + const PALLET: &'static str = "Membership"; + const STORAGE: &'static str = "Members"; + type Value = ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct Prime; + impl ::subxt::StorageEntry for Prime { + const PALLET: &'static str = "Membership"; + const STORAGE: &'static str = "Prime"; + type Value = ::subxt::sp_core::crypto::AccountId32; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct StorageApi<'a, T: ::subxt::Config> { + client: &'a ::subxt::Client, + } + impl<'a, T: ::subxt::Config> StorageApi<'a, T> { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { client } + } + #[doc = " The current membership, stored as an ordered Vec."] + pub async fn members( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 136u8, 91u8, 140u8, 173u8, 238u8, 221u8, 4u8, 132u8, 238u8, 99u8, + 195u8, 142u8, 10u8, 35u8, 210u8, 227u8, 22u8, 72u8, 218u8, 222u8, + 227u8, 51u8, 55u8, 31u8, 252u8, 78u8, 195u8, 11u8, 195u8, 242u8, 171u8, + 75u8, + ] + { + let entry = Members; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The current prime member, if one exists."] + pub async fn prime( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option<::subxt::sp_core::crypto::AccountId32>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 70u8, 101u8, 20u8, 160u8, 173u8, 87u8, 190u8, 85u8, 60u8, 249u8, 144u8, + 77u8, 175u8, 195u8, 51u8, 196u8, 234u8, 62u8, 243u8, 199u8, 126u8, + 12u8, 88u8, 252u8, 1u8, 210u8, 65u8, 210u8, 33u8, 19u8, 222u8, 11u8, + ] + { + let entry = Prime; + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + } + pub mod utility { + use super::root_mod; + use super::runtime_types; + pub mod calls { + use super::root_mod; + use super::runtime_types; + type DispatchError = runtime_types::sp_runtime::DispatchError; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct Batch { + pub calls: ::std::vec::Vec, + } + impl ::subxt::Call for Batch { + const PALLET: &'static str = "Utility"; + const FUNCTION: &'static str = "batch"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct AsDerivative { + pub index: ::core::primitive::u16, + pub call: ::std::boxed::Box, + } + impl ::subxt::Call for AsDerivative { + const PALLET: &'static str = "Utility"; + const FUNCTION: &'static str = "as_derivative"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct BatchAll { + pub calls: ::std::vec::Vec, + } + impl ::subxt::Call for BatchAll { + const PALLET: &'static str = "Utility"; + const FUNCTION: &'static str = "batch_all"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct DispatchAs { + pub as_origin: ::std::boxed::Box, + pub call: ::std::boxed::Box, + } + impl ::subxt::Call for DispatchAs { + const PALLET: &'static str = "Utility"; + const FUNCTION: &'static str = "dispatch_as"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct ForceBatch { + pub calls: ::std::vec::Vec, + } + impl ::subxt::Call for ForceBatch { + const PALLET: &'static str = "Utility"; + const FUNCTION: &'static str = "force_batch"; + } + pub struct TransactionApi<'a, T: ::subxt::Config, X> { + client: &'a ::subxt::Client, + marker: ::core::marker::PhantomData, + } + impl<'a, T, X> TransactionApi<'a, T, X> + where + T: ::subxt::Config, + X: ::subxt::extrinsic::ExtrinsicParams, + { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { + client, + marker: ::core::marker::PhantomData, + } + } + #[doc = "Send a batch of dispatch calls."] + #[doc = ""] + #[doc = "May be called from any origin."] + #[doc = ""] + #[doc = "- `calls`: The calls to be dispatched from the same origin. The number of call must not"] + #[doc = " exceed the constant: `batched_calls_limit` (available in constant metadata)."] + #[doc = ""] + #[doc = "If origin is root then call are dispatch without checking origin filter. (This includes"] + #[doc = "bypassing `frame_system::Config::BaseCallFilter`)."] + #[doc = ""] + #[doc = "# "] + #[doc = "- Complexity: O(C) where C is the number of calls to be batched."] + #[doc = "# "] + #[doc = ""] + #[doc = "This will return `Ok` in all circumstances. To determine the success of the batch, an"] + #[doc = "event is deposited. If a call failed and the batch was interrupted, then the"] + #[doc = "`BatchInterrupted` event is deposited, along with the number of successful calls made"] + #[doc = "and the error of the failed call. If all were successful, then the `BatchCompleted`"] + #[doc = "event is deposited."] + pub fn batch( + &self, + calls: ::std::vec::Vec, + ) -> Result< + ::subxt::SubmittableExtrinsic<'a, T, X, Batch, DispatchError, root_mod::Event>, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 2u8, 176u8, 87u8, 184u8, 5u8, 237u8, 30u8, 222u8, 1u8, 60u8, 39u8, + 236u8, 29u8, 56u8, 243u8, 90u8, 119u8, 108u8, 67u8, 9u8, 160u8, 182u8, + 92u8, 68u8, 168u8, 9u8, 39u8, 31u8, 247u8, 37u8, 77u8, 224u8, + ] + { + let call = Batch { calls }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Send a call through an indexed pseudonym of the sender."] + #[doc = ""] + #[doc = "Filter from origin are passed along. The call will be dispatched with an origin which"] + #[doc = "use the same filter as the origin of this call."] + #[doc = ""] + #[doc = "NOTE: If you need to ensure that any account-based filtering is not honored (i.e."] + #[doc = "because you expect `proxy` to have been used prior in the call stack and you do not want"] + #[doc = "the call restrictions to apply to any sub-accounts), then use `as_multi_threshold_1`"] + #[doc = "in the Multisig pallet instead."] + #[doc = ""] + #[doc = "NOTE: Prior to version *12, this was called `as_limited_sub`."] + #[doc = ""] + #[doc = "The dispatch origin for this call must be _Signed_."] + pub fn as_derivative( + &self, + index: ::core::primitive::u16, + call: runtime_types::rococo_runtime::Call, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + AsDerivative, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 173u8, 238u8, 47u8, 118u8, 29u8, 201u8, 126u8, 189u8, 172u8, 159u8, + 37u8, 155u8, 153u8, 191u8, 86u8, 144u8, 64u8, 156u8, 120u8, 244u8, + 10u8, 154u8, 26u8, 89u8, 119u8, 181u8, 205u8, 214u8, 149u8, 177u8, + 248u8, 55u8, + ] + { + let call = AsDerivative { + index, + call: ::std::boxed::Box::new(call), + }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Send a batch of dispatch calls and atomically execute them."] + #[doc = "The whole transaction will rollback and fail if any of the calls failed."] + #[doc = ""] + #[doc = "May be called from any origin."] + #[doc = ""] + #[doc = "- `calls`: The calls to be dispatched from the same origin. The number of call must not"] + #[doc = " exceed the constant: `batched_calls_limit` (available in constant metadata)."] + #[doc = ""] + #[doc = "If origin is root then call are dispatch without checking origin filter. (This includes"] + #[doc = "bypassing `frame_system::Config::BaseCallFilter`)."] + #[doc = ""] + #[doc = "# "] + #[doc = "- Complexity: O(C) where C is the number of calls to be batched."] + #[doc = "# "] + pub fn batch_all( + &self, + calls: ::std::vec::Vec, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + BatchAll, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 179u8, 43u8, 224u8, 93u8, 67u8, 8u8, 84u8, 182u8, 54u8, 175u8, 46u8, + 126u8, 254u8, 67u8, 18u8, 191u8, 78u8, 231u8, 167u8, 128u8, 211u8, + 99u8, 40u8, 84u8, 232u8, 221u8, 133u8, 128u8, 198u8, 248u8, 94u8, + 140u8, + ] + { + let call = BatchAll { calls }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Dispatches a function call with a provided origin."] + #[doc = ""] + #[doc = "The dispatch origin for this call must be _Root_."] + #[doc = ""] + #[doc = "# "] + #[doc = "- O(1)."] + #[doc = "- Limited storage reads."] + #[doc = "- One DB write (event)."] + #[doc = "- Weight of derivative `call` execution + T::WeightInfo::dispatch_as()."] + #[doc = "# "] + pub fn dispatch_as( + &self, + as_origin: runtime_types::rococo_runtime::OriginCaller, + call: runtime_types::rococo_runtime::Call, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + DispatchAs, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 170u8, 154u8, 110u8, 35u8, 217u8, 69u8, 175u8, 79u8, 131u8, 41u8, 15u8, + 111u8, 156u8, 146u8, 242u8, 249u8, 217u8, 57u8, 167u8, 97u8, 93u8, + 169u8, 50u8, 214u8, 72u8, 111u8, 226u8, 23u8, 177u8, 186u8, 13u8, 38u8, + ] + { + let call = DispatchAs { + as_origin: ::std::boxed::Box::new(as_origin), + call: ::std::boxed::Box::new(call), + }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Send a batch of dispatch calls."] + #[doc = "Unlike `batch`, it allows errors and won't interrupt."] + #[doc = ""] + #[doc = "May be called from any origin."] + #[doc = ""] + #[doc = "- `calls`: The calls to be dispatched from the same origin. The number of call must not"] + #[doc = " exceed the constant: `batched_calls_limit` (available in constant metadata)."] + #[doc = ""] + #[doc = "If origin is root then call are dispatch without checking origin filter. (This includes"] + #[doc = "bypassing `frame_system::Config::BaseCallFilter`)."] + #[doc = ""] + #[doc = "# "] + #[doc = "- Complexity: O(C) where C is the number of calls to be batched."] + #[doc = "# "] + pub fn force_batch( + &self, + calls: ::std::vec::Vec, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + ForceBatch, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 236u8, 124u8, 176u8, 111u8, 53u8, 194u8, 175u8, 226u8, 3u8, 221u8, + 56u8, 196u8, 137u8, 21u8, 196u8, 127u8, 166u8, 15u8, 227u8, 72u8, 22u8, + 42u8, 212u8, 0u8, 14u8, 179u8, 65u8, 190u8, 134u8, 235u8, 93u8, 203u8, + ] + { + let call = ForceBatch { calls }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + pub type Event = runtime_types::pallet_utility::pallet::Event; + pub mod events { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "Batch of dispatches did not complete fully. Index of first failing dispatch given, as"] + #[doc = "well as the error."] + pub struct BatchInterrupted { + pub index: ::core::primitive::u32, + pub error: runtime_types::sp_runtime::DispatchError, + } + impl ::subxt::Event for BatchInterrupted { + const PALLET: &'static str = "Utility"; + const EVENT: &'static str = "BatchInterrupted"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "Batch of dispatches completed fully with no error."] + pub struct BatchCompleted; + impl ::subxt::Event for BatchCompleted { + const PALLET: &'static str = "Utility"; + const EVENT: &'static str = "BatchCompleted"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "Batch of dispatches completed but has errors."] + pub struct BatchCompletedWithErrors; + impl ::subxt::Event for BatchCompletedWithErrors { + const PALLET: &'static str = "Utility"; + const EVENT: &'static str = "BatchCompletedWithErrors"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "A single item within a Batch of dispatches has completed with no error."] + pub struct ItemCompleted; + impl ::subxt::Event for ItemCompleted { + const PALLET: &'static str = "Utility"; + const EVENT: &'static str = "ItemCompleted"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "A single item within a Batch of dispatches has completed with error."] + pub struct ItemFailed { + pub error: runtime_types::sp_runtime::DispatchError, + } + impl ::subxt::Event for ItemFailed { + const PALLET: &'static str = "Utility"; + const EVENT: &'static str = "ItemFailed"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "A call was dispatched."] + pub struct DispatchedAs { + pub result: ::core::result::Result<(), runtime_types::sp_runtime::DispatchError>, + } + impl ::subxt::Event for DispatchedAs { + const PALLET: &'static str = "Utility"; + const EVENT: &'static str = "DispatchedAs"; + } + } + pub mod constants { + use super::runtime_types; + pub struct ConstantsApi<'a, T: ::subxt::Config> { + client: &'a ::subxt::Client, + } + impl<'a, T: ::subxt::Config> ConstantsApi<'a, T> { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { client } + } + #[doc = " The limit on the number of batched calls."] + pub fn batched_calls_limit( + &self, + ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> + { + if self + .client + .metadata() + .constant_hash("Utility", "batched_calls_limit")? + == [ + 230u8, 161u8, 6u8, 191u8, 162u8, 108u8, 149u8, 245u8, 68u8, 101u8, + 120u8, 129u8, 140u8, 51u8, 77u8, 97u8, 30u8, 155u8, 115u8, 70u8, 72u8, + 235u8, 251u8, 192u8, 5u8, 8u8, 188u8, 72u8, 132u8, 227u8, 44u8, 2u8, + ] + { + let pallet = self.client.metadata().pallet("Utility")?; + let constant = pallet.constant("batched_calls_limit")?; + let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; + Ok(value) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + } + pub mod proxy { + use super::root_mod; + use super::runtime_types; + pub mod calls { + use super::root_mod; + use super::runtime_types; + type DispatchError = runtime_types::sp_runtime::DispatchError; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct Proxy { + pub real: ::subxt::sp_core::crypto::AccountId32, + pub force_proxy_type: + ::core::option::Option, + pub call: ::std::boxed::Box, + } + impl ::subxt::Call for Proxy { + const PALLET: &'static str = "Proxy"; + const FUNCTION: &'static str = "proxy"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct AddProxy { + pub delegate: ::subxt::sp_core::crypto::AccountId32, + pub proxy_type: runtime_types::rococo_runtime::ProxyType, + pub delay: ::core::primitive::u32, + } + impl ::subxt::Call for AddProxy { + const PALLET: &'static str = "Proxy"; + const FUNCTION: &'static str = "add_proxy"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct RemoveProxy { + pub delegate: ::subxt::sp_core::crypto::AccountId32, + pub proxy_type: runtime_types::rococo_runtime::ProxyType, + pub delay: ::core::primitive::u32, + } + impl ::subxt::Call for RemoveProxy { + const PALLET: &'static str = "Proxy"; + const FUNCTION: &'static str = "remove_proxy"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct RemoveProxies; + impl ::subxt::Call for RemoveProxies { + const PALLET: &'static str = "Proxy"; + const FUNCTION: &'static str = "remove_proxies"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct Anonymous { + pub proxy_type: runtime_types::rococo_runtime::ProxyType, + pub delay: ::core::primitive::u32, + pub index: ::core::primitive::u16, + } + impl ::subxt::Call for Anonymous { + const PALLET: &'static str = "Proxy"; + const FUNCTION: &'static str = "anonymous"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct KillAnonymous { + pub spawner: ::subxt::sp_core::crypto::AccountId32, + pub proxy_type: runtime_types::rococo_runtime::ProxyType, + pub index: ::core::primitive::u16, + #[codec(compact)] + pub height: ::core::primitive::u32, + #[codec(compact)] + pub ext_index: ::core::primitive::u32, + } + impl ::subxt::Call for KillAnonymous { + const PALLET: &'static str = "Proxy"; + const FUNCTION: &'static str = "kill_anonymous"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct Announce { + pub real: ::subxt::sp_core::crypto::AccountId32, + pub call_hash: ::subxt::sp_core::H256, + } + impl ::subxt::Call for Announce { + const PALLET: &'static str = "Proxy"; + const FUNCTION: &'static str = "announce"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct RemoveAnnouncement { + pub real: ::subxt::sp_core::crypto::AccountId32, + pub call_hash: ::subxt::sp_core::H256, + } + impl ::subxt::Call for RemoveAnnouncement { + const PALLET: &'static str = "Proxy"; + const FUNCTION: &'static str = "remove_announcement"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct RejectAnnouncement { + pub delegate: ::subxt::sp_core::crypto::AccountId32, + pub call_hash: ::subxt::sp_core::H256, + } + impl ::subxt::Call for RejectAnnouncement { + const PALLET: &'static str = "Proxy"; + const FUNCTION: &'static str = "reject_announcement"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct ProxyAnnounced { + pub delegate: ::subxt::sp_core::crypto::AccountId32, + pub real: ::subxt::sp_core::crypto::AccountId32, + pub force_proxy_type: + ::core::option::Option, + pub call: ::std::boxed::Box, + } + impl ::subxt::Call for ProxyAnnounced { + const PALLET: &'static str = "Proxy"; + const FUNCTION: &'static str = "proxy_announced"; + } + pub struct TransactionApi<'a, T: ::subxt::Config, X> { + client: &'a ::subxt::Client, + marker: ::core::marker::PhantomData, + } + impl<'a, T, X> TransactionApi<'a, T, X> + where + T: ::subxt::Config, + X: ::subxt::extrinsic::ExtrinsicParams, + { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { + client, + marker: ::core::marker::PhantomData, + } + } + #[doc = "Dispatch the given `call` from an account that the sender is authorised for through"] + #[doc = "`add_proxy`."] + #[doc = ""] + #[doc = "Removes any corresponding announcement(s)."] + #[doc = ""] + #[doc = "The dispatch origin for this call must be _Signed_."] + #[doc = ""] + #[doc = "Parameters:"] + #[doc = "- `real`: The account that the proxy will make a call on behalf of."] + #[doc = "- `force_proxy_type`: Specify the exact proxy type to be used and checked for this call."] + #[doc = "- `call`: The call to be made by the `real` account."] + #[doc = ""] + #[doc = "# "] + #[doc = "Weight is a function of the number of proxies the user has (P)."] + #[doc = "# "] + pub fn proxy( + &self, + real: ::subxt::sp_core::crypto::AccountId32, + force_proxy_type: ::core::option::Option< + runtime_types::rococo_runtime::ProxyType, + >, + call: runtime_types::rococo_runtime::Call, + ) -> Result< + ::subxt::SubmittableExtrinsic<'a, T, X, Proxy, DispatchError, root_mod::Event>, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 69u8, 48u8, 246u8, 104u8, 222u8, 230u8, 12u8, 24u8, 206u8, 11u8, 36u8, + 34u8, 34u8, 181u8, 165u8, 141u8, 26u8, 66u8, 25u8, 203u8, 105u8, 119u8, + 130u8, 207u8, 155u8, 248u8, 122u8, 232u8, 48u8, 3u8, 151u8, 136u8, + ] + { + let call = Proxy { + real, + force_proxy_type, + call: ::std::boxed::Box::new(call), + }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Register a proxy account for the sender that is able to make calls on its behalf."] + #[doc = ""] + #[doc = "The dispatch origin for this call must be _Signed_."] + #[doc = ""] + #[doc = "Parameters:"] + #[doc = "- `proxy`: The account that the `caller` would like to make a proxy."] + #[doc = "- `proxy_type`: The permissions allowed for this proxy account."] + #[doc = "- `delay`: The announcement period required of the initial proxy. Will generally be"] + #[doc = "zero."] + #[doc = ""] + #[doc = "# "] + #[doc = "Weight is a function of the number of proxies the user has (P)."] + #[doc = "# "] + pub fn add_proxy( + &self, + delegate: ::subxt::sp_core::crypto::AccountId32, + proxy_type: runtime_types::rococo_runtime::ProxyType, + delay: ::core::primitive::u32, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + AddProxy, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 147u8, 160u8, 155u8, 162u8, 94u8, 114u8, 60u8, 178u8, 78u8, 235u8, 9u8, + 249u8, 180u8, 152u8, 73u8, 248u8, 238u8, 155u8, 114u8, 32u8, 247u8, + 146u8, 16u8, 94u8, 135u8, 118u8, 13u8, 77u8, 78u8, 69u8, 200u8, 251u8, + ] + { + let call = AddProxy { + delegate, + proxy_type, + delay, + }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Unregister a proxy account for the sender."] + #[doc = ""] + #[doc = "The dispatch origin for this call must be _Signed_."] + #[doc = ""] + #[doc = "Parameters:"] + #[doc = "- `proxy`: The account that the `caller` would like to remove as a proxy."] + #[doc = "- `proxy_type`: The permissions currently enabled for the removed proxy account."] + #[doc = ""] + #[doc = "# "] + #[doc = "Weight is a function of the number of proxies the user has (P)."] + #[doc = "# "] + pub fn remove_proxy( + &self, + delegate: ::subxt::sp_core::crypto::AccountId32, + proxy_type: runtime_types::rococo_runtime::ProxyType, + delay: ::core::primitive::u32, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + RemoveProxy, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 76u8, 45u8, 70u8, 255u8, 181u8, 17u8, 146u8, 110u8, 26u8, 44u8, 191u8, + 48u8, 244u8, 61u8, 163u8, 235u8, 202u8, 184u8, 160u8, 156u8, 130u8, + 47u8, 35u8, 206u8, 12u8, 103u8, 25u8, 27u8, 129u8, 119u8, 162u8, 157u8, + ] + { + let call = RemoveProxy { + delegate, + proxy_type, + delay, + }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Unregister all proxy accounts for the sender."] + #[doc = ""] + #[doc = "The dispatch origin for this call must be _Signed_."] + #[doc = ""] + #[doc = "WARNING: This may be called on accounts created by `anonymous`, however if done, then"] + #[doc = "the unreserved fees will be inaccessible. **All access to this account will be lost.**"] + #[doc = ""] + #[doc = "# "] + #[doc = "Weight is a function of the number of proxies the user has (P)."] + #[doc = "# "] + pub fn remove_proxies( + &self, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + RemoveProxies, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 15u8, 237u8, 27u8, 166u8, 254u8, 218u8, 92u8, 5u8, 213u8, 239u8, 99u8, + 59u8, 1u8, 26u8, 73u8, 252u8, 81u8, 94u8, 214u8, 227u8, 169u8, 58u8, + 40u8, 253u8, 187u8, 225u8, 192u8, 26u8, 19u8, 23u8, 121u8, 129u8, + ] + { + let call = RemoveProxies {}; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Spawn a fresh new account that is guaranteed to be otherwise inaccessible, and"] + #[doc = "initialize it with a proxy of `proxy_type` for `origin` sender."] + #[doc = ""] + #[doc = "Requires a `Signed` origin."] + #[doc = ""] + #[doc = "- `proxy_type`: The type of the proxy that the sender will be registered as over the"] + #[doc = "new account. This will almost always be the most permissive `ProxyType` possible to"] + #[doc = "allow for maximum flexibility."] + #[doc = "- `index`: A disambiguation index, in case this is called multiple times in the same"] + #[doc = "transaction (e.g. with `utility::batch`). Unless you're using `batch` you probably just"] + #[doc = "want to use `0`."] + #[doc = "- `delay`: The announcement period required of the initial proxy. Will generally be"] + #[doc = "zero."] + #[doc = ""] + #[doc = "Fails with `Duplicate` if this has already been called in this transaction, from the"] + #[doc = "same sender, with the same parameters."] + #[doc = ""] + #[doc = "Fails if there are insufficient funds to pay for deposit."] + #[doc = ""] + #[doc = "# "] + #[doc = "Weight is a function of the number of proxies the user has (P)."] + #[doc = "# "] + #[doc = "TODO: Might be over counting 1 read"] + pub fn anonymous( + &self, + proxy_type: runtime_types::rococo_runtime::ProxyType, + delay: ::core::primitive::u32, + index: ::core::primitive::u16, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + Anonymous, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 196u8, 87u8, 202u8, 253u8, 227u8, 15u8, 4u8, 65u8, 86u8, 235u8, 205u8, + 19u8, 248u8, 108u8, 61u8, 206u8, 108u8, 178u8, 123u8, 154u8, 200u8, + 189u8, 124u8, 10u8, 251u8, 86u8, 5u8, 21u8, 172u8, 201u8, 4u8, 176u8, + ] + { + let call = Anonymous { + proxy_type, + delay, + index, + }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Removes a previously spawned anonymous proxy."] + #[doc = ""] + #[doc = "WARNING: **All access to this account will be lost.** Any funds held in it will be"] + #[doc = "inaccessible."] + #[doc = ""] + #[doc = "Requires a `Signed` origin, and the sender account must have been created by a call to"] + #[doc = "`anonymous` with corresponding parameters."] + #[doc = ""] + #[doc = "- `spawner`: The account that originally called `anonymous` to create this account."] + #[doc = "- `index`: The disambiguation index originally passed to `anonymous`. Probably `0`."] + #[doc = "- `proxy_type`: The proxy type originally passed to `anonymous`."] + #[doc = "- `height`: The height of the chain when the call to `anonymous` was processed."] + #[doc = "- `ext_index`: The extrinsic index in which the call to `anonymous` was processed."] + #[doc = ""] + #[doc = "Fails with `NoPermission` in case the caller is not a previously created anonymous"] + #[doc = "account whose `anonymous` call has corresponding parameters."] + #[doc = ""] + #[doc = "# "] + #[doc = "Weight is a function of the number of proxies the user has (P)."] + #[doc = "# "] + pub fn kill_anonymous( + &self, + spawner: ::subxt::sp_core::crypto::AccountId32, + proxy_type: runtime_types::rococo_runtime::ProxyType, + index: ::core::primitive::u16, + height: ::core::primitive::u32, + ext_index: ::core::primitive::u32, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + KillAnonymous, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 165u8, 240u8, 108u8, 74u8, 96u8, 200u8, 186u8, 64u8, 56u8, 175u8, + 123u8, 106u8, 122u8, 165u8, 200u8, 20u8, 93u8, 30u8, 93u8, 210u8, 89u8, + 108u8, 13u8, 180u8, 143u8, 218u8, 157u8, 135u8, 111u8, 9u8, 42u8, 18u8, + ] + { + let call = KillAnonymous { + spawner, + proxy_type, + index, + height, + ext_index, + }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Publish the hash of a proxy-call that will be made in the future."] + #[doc = ""] + #[doc = "This must be called some number of blocks before the corresponding `proxy` is attempted"] + #[doc = "if the delay associated with the proxy relationship is greater than zero."] + #[doc = ""] + #[doc = "No more than `MaxPending` announcements may be made at any one time."] + #[doc = ""] + #[doc = "This will take a deposit of `AnnouncementDepositFactor` as well as"] + #[doc = "`AnnouncementDepositBase` if there are no other pending announcements."] + #[doc = ""] + #[doc = "The dispatch origin for this call must be _Signed_ and a proxy of `real`."] + #[doc = ""] + #[doc = "Parameters:"] + #[doc = "- `real`: The account that the proxy will make a call on behalf of."] + #[doc = "- `call_hash`: The hash of the call to be made by the `real` account."] + #[doc = ""] + #[doc = "# "] + #[doc = "Weight is a function of:"] + #[doc = "- A: the number of announcements made."] + #[doc = "- P: the number of proxies the user has."] + #[doc = "# "] + pub fn announce( + &self, + real: ::subxt::sp_core::crypto::AccountId32, + call_hash: ::subxt::sp_core::H256, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + Announce, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 202u8, 10u8, 107u8, 1u8, 212u8, 97u8, 111u8, 209u8, 102u8, 157u8, + 109u8, 231u8, 123u8, 131u8, 173u8, 69u8, 79u8, 143u8, 148u8, 23u8, + 123u8, 22u8, 83u8, 52u8, 86u8, 220u8, 200u8, 11u8, 60u8, 169u8, 71u8, + 183u8, + ] + { + let call = Announce { real, call_hash }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Remove a given announcement."] + #[doc = ""] + #[doc = "May be called by a proxy account to remove a call they previously announced and return"] + #[doc = "the deposit."] + #[doc = ""] + #[doc = "The dispatch origin for this call must be _Signed_."] + #[doc = ""] + #[doc = "Parameters:"] + #[doc = "- `real`: The account that the proxy will make a call on behalf of."] + #[doc = "- `call_hash`: The hash of the call to be made by the `real` account."] + #[doc = ""] + #[doc = "# "] + #[doc = "Weight is a function of:"] + #[doc = "- A: the number of announcements made."] + #[doc = "- P: the number of proxies the user has."] + #[doc = "# "] + pub fn remove_announcement( + &self, + real: ::subxt::sp_core::crypto::AccountId32, + call_hash: ::subxt::sp_core::H256, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + RemoveAnnouncement, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 8u8, 157u8, 190u8, 96u8, 209u8, 28u8, 73u8, 31u8, 19u8, 221u8, 252u8, + 200u8, 180u8, 69u8, 83u8, 239u8, 162u8, 135u8, 102u8, 157u8, 149u8, + 107u8, 192u8, 41u8, 196u8, 83u8, 133u8, 107u8, 82u8, 215u8, 50u8, 8u8, + ] + { + let call = RemoveAnnouncement { real, call_hash }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Remove the given announcement of a delegate."] + #[doc = ""] + #[doc = "May be called by a target (proxied) account to remove a call that one of their delegates"] + #[doc = "(`delegate`) has announced they want to execute. The deposit is returned."] + #[doc = ""] + #[doc = "The dispatch origin for this call must be _Signed_."] + #[doc = ""] + #[doc = "Parameters:"] + #[doc = "- `delegate`: The account that previously announced the call."] + #[doc = "- `call_hash`: The hash of the call to be made."] + #[doc = ""] + #[doc = "# "] + #[doc = "Weight is a function of:"] + #[doc = "- A: the number of announcements made."] + #[doc = "- P: the number of proxies the user has."] + #[doc = "# "] + pub fn reject_announcement( + &self, + delegate: ::subxt::sp_core::crypto::AccountId32, + call_hash: ::subxt::sp_core::H256, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + RejectAnnouncement, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 218u8, 26u8, 11u8, 238u8, 82u8, 240u8, 191u8, 46u8, 107u8, 197u8, 58u8, + 160u8, 162u8, 152u8, 12u8, 188u8, 194u8, 185u8, 27u8, 75u8, 192u8, + 236u8, 32u8, 36u8, 131u8, 179u8, 99u8, 33u8, 14u8, 37u8, 163u8, 105u8, + ] + { + let call = RejectAnnouncement { + delegate, + call_hash, + }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Dispatch the given `call` from an account that the sender is authorized for through"] + #[doc = "`add_proxy`."] + #[doc = ""] + #[doc = "Removes any corresponding announcement(s)."] + #[doc = ""] + #[doc = "The dispatch origin for this call must be _Signed_."] + #[doc = ""] + #[doc = "Parameters:"] + #[doc = "- `real`: The account that the proxy will make a call on behalf of."] + #[doc = "- `force_proxy_type`: Specify the exact proxy type to be used and checked for this call."] + #[doc = "- `call`: The call to be made by the `real` account."] + #[doc = ""] + #[doc = "# "] + #[doc = "Weight is a function of:"] + #[doc = "- A: the number of announcements made."] + #[doc = "- P: the number of proxies the user has."] + #[doc = "# "] + pub fn proxy_announced( + &self, + delegate: ::subxt::sp_core::crypto::AccountId32, + real: ::subxt::sp_core::crypto::AccountId32, + force_proxy_type: ::core::option::Option< + runtime_types::rococo_runtime::ProxyType, + >, + call: runtime_types::rococo_runtime::Call, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + ProxyAnnounced, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 160u8, 150u8, 210u8, 154u8, 192u8, 19u8, 14u8, 155u8, 87u8, 97u8, + 248u8, 73u8, 94u8, 38u8, 68u8, 57u8, 216u8, 217u8, 104u8, 26u8, 96u8, + 108u8, 205u8, 25u8, 38u8, 148u8, 4u8, 185u8, 157u8, 183u8, 130u8, 18u8, + ] + { + let call = ProxyAnnounced { + delegate, + real, + force_proxy_type, + call: ::std::boxed::Box::new(call), + }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + pub type Event = runtime_types::pallet_proxy::pallet::Event; + pub mod events { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "A proxy was executed correctly, with the given."] + pub struct ProxyExecuted { + pub result: ::core::result::Result<(), runtime_types::sp_runtime::DispatchError>, + } + impl ::subxt::Event for ProxyExecuted { + const PALLET: &'static str = "Proxy"; + const EVENT: &'static str = "ProxyExecuted"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "Anonymous account has been created by new proxy with given"] + #[doc = "disambiguation index and proxy type."] + pub struct AnonymousCreated { + pub anonymous: ::subxt::sp_core::crypto::AccountId32, + pub who: ::subxt::sp_core::crypto::AccountId32, + pub proxy_type: runtime_types::rococo_runtime::ProxyType, + pub disambiguation_index: ::core::primitive::u16, + } + impl ::subxt::Event for AnonymousCreated { + const PALLET: &'static str = "Proxy"; + const EVENT: &'static str = "AnonymousCreated"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "An announcement was placed to make a call in the future."] + pub struct Announced { + pub real: ::subxt::sp_core::crypto::AccountId32, + pub proxy: ::subxt::sp_core::crypto::AccountId32, + pub call_hash: ::subxt::sp_core::H256, + } + impl ::subxt::Event for Announced { + const PALLET: &'static str = "Proxy"; + const EVENT: &'static str = "Announced"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "A proxy was added."] + pub struct ProxyAdded { + pub delegator: ::subxt::sp_core::crypto::AccountId32, + pub delegatee: ::subxt::sp_core::crypto::AccountId32, + pub proxy_type: runtime_types::rococo_runtime::ProxyType, + pub delay: ::core::primitive::u32, + } + impl ::subxt::Event for ProxyAdded { + const PALLET: &'static str = "Proxy"; + const EVENT: &'static str = "ProxyAdded"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "A proxy was removed."] + pub struct ProxyRemoved { + pub delegator: ::subxt::sp_core::crypto::AccountId32, + pub delegatee: ::subxt::sp_core::crypto::AccountId32, + pub proxy_type: runtime_types::rococo_runtime::ProxyType, + pub delay: ::core::primitive::u32, + } + impl ::subxt::Event for ProxyRemoved { + const PALLET: &'static str = "Proxy"; + const EVENT: &'static str = "ProxyRemoved"; + } + } + pub mod storage { + use super::runtime_types; + pub struct Proxies<'a>(pub &'a ::subxt::sp_core::crypto::AccountId32); + impl ::subxt::StorageEntry for Proxies<'_> { + const PALLET: &'static str = "Proxy"; + const STORAGE: &'static str = "Proxies"; + type Value = ( + runtime_types::frame_support::storage::bounded_vec::BoundedVec< + runtime_types::pallet_proxy::ProxyDefinition< + ::subxt::sp_core::crypto::AccountId32, + runtime_types::rococo_runtime::ProxyType, + ::core::primitive::u32, + >, + >, + ::core::primitive::u128, + ); + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Twox64Concat, + )]) + } + } + pub struct Announcements<'a>(pub &'a ::subxt::sp_core::crypto::AccountId32); + impl ::subxt::StorageEntry for Announcements<'_> { + const PALLET: &'static str = "Proxy"; + const STORAGE: &'static str = "Announcements"; + type Value = ( + runtime_types::frame_support::storage::bounded_vec::BoundedVec< + runtime_types::pallet_proxy::Announcement< + ::subxt::sp_core::crypto::AccountId32, + ::subxt::sp_core::H256, + ::core::primitive::u32, + >, + >, + ::core::primitive::u128, + ); + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Twox64Concat, + )]) + } + } + pub struct StorageApi<'a, T: ::subxt::Config> { + client: &'a ::subxt::Client, + } + impl<'a, T: ::subxt::Config> StorageApi<'a, T> { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { client } + } + #[doc = " The set of account proxies. Maps the account which has delegated to the accounts"] + #[doc = " which are being delegated to, together with the amount held on deposit."] + pub async fn proxies( + &self, + _0: &::subxt::sp_core::crypto::AccountId32, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ( + runtime_types::frame_support::storage::bounded_vec::BoundedVec< + runtime_types::pallet_proxy::ProxyDefinition< + ::subxt::sp_core::crypto::AccountId32, + runtime_types::rococo_runtime::ProxyType, + ::core::primitive::u32, + >, + >, + ::core::primitive::u128, + ), + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 6u8, 154u8, 57u8, 61u8, 169u8, 15u8, 54u8, 128u8, 62u8, 67u8, 181u8, + 251u8, 172u8, 194u8, 97u8, 9u8, 141u8, 230u8, 243u8, 33u8, 25u8, 29u8, + 46u8, 136u8, 208u8, 192u8, 147u8, 168u8, 57u8, 18u8, 155u8, 80u8, + ] + { + let entry = Proxies(_0); + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The set of account proxies. Maps the account which has delegated to the accounts"] + #[doc = " which are being delegated to, together with the amount held on deposit."] + pub async fn proxies_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result<::subxt::KeyIter<'a, T, Proxies<'a>>, ::subxt::BasicError> + { + if self.client.metadata().storage_hash::()? + == [ + 6u8, 154u8, 57u8, 61u8, 169u8, 15u8, 54u8, 128u8, 62u8, 67u8, 181u8, + 251u8, 172u8, 194u8, 97u8, 9u8, 141u8, 230u8, 243u8, 33u8, 25u8, 29u8, + 46u8, 136u8, 208u8, 192u8, 147u8, 168u8, 57u8, 18u8, 155u8, 80u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The announcements made by the proxy (key)."] + pub async fn announcements( + &self, + _0: &::subxt::sp_core::crypto::AccountId32, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ( + runtime_types::frame_support::storage::bounded_vec::BoundedVec< + runtime_types::pallet_proxy::Announcement< + ::subxt::sp_core::crypto::AccountId32, + ::subxt::sp_core::H256, + ::core::primitive::u32, + >, + >, + ::core::primitive::u128, + ), + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 87u8, 74u8, 65u8, 120u8, 190u8, 234u8, 159u8, 168u8, 155u8, 253u8, + 183u8, 229u8, 28u8, 118u8, 20u8, 120u8, 27u8, 10u8, 203u8, 236u8, + 174u8, 40u8, 89u8, 216u8, 217u8, 81u8, 135u8, 67u8, 245u8, 226u8, 10u8, + 17u8, + ] + { + let entry = Announcements(_0); + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The announcements made by the proxy (key)."] + pub async fn announcements_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::subxt::KeyIter<'a, T, Announcements<'a>>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 87u8, 74u8, 65u8, 120u8, 190u8, 234u8, 159u8, 168u8, 155u8, 253u8, + 183u8, 229u8, 28u8, 118u8, 20u8, 120u8, 27u8, 10u8, 203u8, 236u8, + 174u8, 40u8, 89u8, 216u8, 217u8, 81u8, 135u8, 67u8, 245u8, 226u8, 10u8, + 17u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + pub mod constants { + use super::runtime_types; + pub struct ConstantsApi<'a, T: ::subxt::Config> { + client: &'a ::subxt::Client, + } + impl<'a, T: ::subxt::Config> ConstantsApi<'a, T> { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { client } + } + #[doc = " The base amount of currency needed to reserve for creating a proxy."] + #[doc = ""] + #[doc = " This is held for an additional storage item whose value size is"] + #[doc = " `sizeof(Balance)` bytes and whose key size is `sizeof(AccountId)` bytes."] + pub fn proxy_deposit_base( + &self, + ) -> ::core::result::Result<::core::primitive::u128, ::subxt::BasicError> + { + if self + .client + .metadata() + .constant_hash("Proxy", "ProxyDepositBase")? + == [ + 126u8, 107u8, 187u8, 250u8, 199u8, 131u8, 62u8, 248u8, 122u8, 95u8, + 138u8, 186u8, 61u8, 129u8, 237u8, 236u8, 225u8, 91u8, 255u8, 54u8, + 183u8, 34u8, 103u8, 35u8, 145u8, 9u8, 1u8, 71u8, 124u8, 220u8, 147u8, + 88u8, + ] + { + let pallet = self.client.metadata().pallet("Proxy")?; + let constant = pallet.constant("ProxyDepositBase")?; + let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; + Ok(value) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The amount of currency needed per proxy added."] + #[doc = ""] + #[doc = " This is held for adding 32 bytes plus an instance of `ProxyType` more into a"] + #[doc = " pre-existing storage value. Thus, when configuring `ProxyDepositFactor` one should take"] + #[doc = " into account `32 + proxy_type.encode().len()` bytes of data."] + pub fn proxy_deposit_factor( + &self, + ) -> ::core::result::Result<::core::primitive::u128, ::subxt::BasicError> + { + if self + .client + .metadata() + .constant_hash("Proxy", "ProxyDepositFactor")? + == [ + 241u8, 48u8, 216u8, 37u8, 136u8, 147u8, 59u8, 234u8, 27u8, 8u8, 138u8, + 46u8, 158u8, 190u8, 141u8, 172u8, 176u8, 158u8, 46u8, 109u8, 188u8, + 240u8, 122u8, 122u8, 83u8, 127u8, 29u8, 89u8, 173u8, 110u8, 7u8, 5u8, + ] + { + let pallet = self.client.metadata().pallet("Proxy")?; + let constant = pallet.constant("ProxyDepositFactor")?; + let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; + Ok(value) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The maximum amount of proxies allowed for a single account."] + pub fn max_proxies( + &self, + ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> + { + if self + .client + .metadata() + .constant_hash("Proxy", "MaxProxies")? + == [ + 249u8, 153u8, 224u8, 128u8, 161u8, 3u8, 39u8, 192u8, 120u8, 150u8, + 184u8, 92u8, 225u8, 222u8, 76u8, 172u8, 131u8, 87u8, 231u8, 128u8, 5u8, + 62u8, 116u8, 112u8, 103u8, 4u8, 39u8, 163u8, 71u8, 97u8, 221u8, 19u8, + ] + { + let pallet = self.client.metadata().pallet("Proxy")?; + let constant = pallet.constant("MaxProxies")?; + let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; + Ok(value) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The maximum amount of time-delayed announcements that are allowed to be pending."] + pub fn max_pending( + &self, + ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> + { + if self + .client + .metadata() + .constant_hash("Proxy", "MaxPending")? + == [ + 88u8, 148u8, 146u8, 152u8, 151u8, 208u8, 255u8, 193u8, 239u8, 105u8, + 197u8, 153u8, 151u8, 18u8, 86u8, 13u8, 242u8, 242u8, 59u8, 92u8, 107u8, + 203u8, 102u8, 69u8, 147u8, 147u8, 37u8, 83u8, 237u8, 9u8, 114u8, 196u8, + ] + { + let pallet = self.client.metadata().pallet("Proxy")?; + let constant = pallet.constant("MaxPending")?; + let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; + Ok(value) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The base amount of currency needed to reserve for creating an announcement."] + #[doc = ""] + #[doc = " This is held when a new storage item holding a `Balance` is created (typically 16"] + #[doc = " bytes)."] + pub fn announcement_deposit_base( + &self, + ) -> ::core::result::Result<::core::primitive::u128, ::subxt::BasicError> + { + if self + .client + .metadata() + .constant_hash("Proxy", "AnnouncementDepositBase")? + == [ + 190u8, 15u8, 203u8, 82u8, 114u8, 33u8, 225u8, 62u8, 89u8, 39u8, 218u8, + 69u8, 217u8, 120u8, 4u8, 235u8, 209u8, 97u8, 119u8, 86u8, 157u8, 178u8, + 64u8, 170u8, 102u8, 187u8, 251u8, 125u8, 20u8, 181u8, 6u8, 102u8, + ] + { + let pallet = self.client.metadata().pallet("Proxy")?; + let constant = pallet.constant("AnnouncementDepositBase")?; + let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; + Ok(value) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The amount of currency needed per announcement made."] + #[doc = ""] + #[doc = " This is held for adding an `AccountId`, `Hash` and `BlockNumber` (typically 68 bytes)"] + #[doc = " into a pre-existing storage value."] + pub fn announcement_deposit_factor( + &self, + ) -> ::core::result::Result<::core::primitive::u128, ::subxt::BasicError> + { + if self + .client + .metadata() + .constant_hash("Proxy", "AnnouncementDepositFactor")? + == [ + 240u8, 165u8, 11u8, 46u8, 237u8, 248u8, 133u8, 48u8, 240u8, 235u8, + 26u8, 59u8, 42u8, 72u8, 18u8, 252u8, 167u8, 16u8, 15u8, 168u8, 197u8, + 45u8, 57u8, 49u8, 173u8, 31u8, 180u8, 27u8, 64u8, 94u8, 139u8, 251u8, + ] + { + let pallet = self.client.metadata().pallet("Proxy")?; + let constant = pallet.constant("AnnouncementDepositFactor")?; + let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; + Ok(value) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + } + pub mod multisig { + use super::root_mod; + use super::runtime_types; + pub mod calls { + use super::root_mod; + use super::runtime_types; + type DispatchError = runtime_types::sp_runtime::DispatchError; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct AsMultiThreshold1 { + pub other_signatories: ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>, + pub call: ::std::boxed::Box, + } + impl ::subxt::Call for AsMultiThreshold1 { + const PALLET: &'static str = "Multisig"; + const FUNCTION: &'static str = "as_multi_threshold_1"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct AsMulti { + pub threshold: ::core::primitive::u16, + pub other_signatories: ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>, + pub maybe_timepoint: ::core::option::Option< + runtime_types::pallet_multisig::Timepoint<::core::primitive::u32>, + >, + pub call: ::subxt::WrapperKeepOpaque, + pub store_call: ::core::primitive::bool, + pub max_weight: ::core::primitive::u64, + } + impl ::subxt::Call for AsMulti { + const PALLET: &'static str = "Multisig"; + const FUNCTION: &'static str = "as_multi"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct ApproveAsMulti { + pub threshold: ::core::primitive::u16, + pub other_signatories: ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>, + pub maybe_timepoint: ::core::option::Option< + runtime_types::pallet_multisig::Timepoint<::core::primitive::u32>, + >, + pub call_hash: [::core::primitive::u8; 32usize], + pub max_weight: ::core::primitive::u64, + } + impl ::subxt::Call for ApproveAsMulti { + const PALLET: &'static str = "Multisig"; + const FUNCTION: &'static str = "approve_as_multi"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct CancelAsMulti { + pub threshold: ::core::primitive::u16, + pub other_signatories: ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>, + pub timepoint: runtime_types::pallet_multisig::Timepoint<::core::primitive::u32>, + pub call_hash: [::core::primitive::u8; 32usize], + } + impl ::subxt::Call for CancelAsMulti { + const PALLET: &'static str = "Multisig"; + const FUNCTION: &'static str = "cancel_as_multi"; + } + pub struct TransactionApi<'a, T: ::subxt::Config, X> { + client: &'a ::subxt::Client, + marker: ::core::marker::PhantomData, + } + impl<'a, T, X> TransactionApi<'a, T, X> + where + T: ::subxt::Config, + X: ::subxt::extrinsic::ExtrinsicParams, + { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { + client, + marker: ::core::marker::PhantomData, + } + } + #[doc = "Immediately dispatch a multi-signature call using a single approval from the caller."] + #[doc = ""] + #[doc = "The dispatch origin for this call must be _Signed_."] + #[doc = ""] + #[doc = "- `other_signatories`: The accounts (other than the sender) who are part of the"] + #[doc = "multi-signature, but do not participate in the approval process."] + #[doc = "- `call`: The call to be executed."] + #[doc = ""] + #[doc = "Result is equivalent to the dispatched result."] + #[doc = ""] + #[doc = "# "] + #[doc = "O(Z + C) where Z is the length of the call and C its execution weight."] + #[doc = "-------------------------------"] + #[doc = "- DB Weight: None"] + #[doc = "- Plus Call Weight"] + #[doc = "# "] + pub fn as_multi_threshold_1( + &self, + other_signatories: ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>, + call: runtime_types::rococo_runtime::Call, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + AsMultiThreshold1, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 225u8, 196u8, 125u8, 147u8, 112u8, 168u8, 93u8, 153u8, 195u8, 61u8, + 54u8, 153u8, 75u8, 201u8, 168u8, 160u8, 21u8, 17u8, 159u8, 129u8, + 165u8, 19u8, 98u8, 32u8, 173u8, 187u8, 232u8, 181u8, 77u8, 173u8, + 139u8, 219u8, + ] + { + let call = AsMultiThreshold1 { + other_signatories, + call: ::std::boxed::Box::new(call), + }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Register approval for a dispatch to be made from a deterministic composite account if"] + #[doc = "approved by a total of `threshold - 1` of `other_signatories`."] + #[doc = ""] + #[doc = "If there are enough, then dispatch the call."] + #[doc = ""] + #[doc = "Payment: `DepositBase` will be reserved if this is the first approval, plus"] + #[doc = "`threshold` times `DepositFactor`. It is returned once this dispatch happens or"] + #[doc = "is cancelled."] + #[doc = ""] + #[doc = "The dispatch origin for this call must be _Signed_."] + #[doc = ""] + #[doc = "- `threshold`: The total number of approvals for this dispatch before it is executed."] + #[doc = "- `other_signatories`: The accounts (other than the sender) who can approve this"] + #[doc = "dispatch. May not be empty."] + #[doc = "- `maybe_timepoint`: If this is the first approval, then this must be `None`. If it is"] + #[doc = "not the first approval, then it must be `Some`, with the timepoint (block number and"] + #[doc = "transaction index) of the first approval transaction."] + #[doc = "- `call`: The call to be executed."] + #[doc = ""] + #[doc = "NOTE: Unless this is the final approval, you will generally want to use"] + #[doc = "`approve_as_multi` instead, since it only requires a hash of the call."] + #[doc = ""] + #[doc = "Result is equivalent to the dispatched result if `threshold` is exactly `1`. Otherwise"] + #[doc = "on success, result is `Ok` and the result from the interior call, if it was executed,"] + #[doc = "may be found in the deposited `MultisigExecuted` event."] + #[doc = ""] + #[doc = "# "] + #[doc = "- `O(S + Z + Call)`."] + #[doc = "- Up to one balance-reserve or unreserve operation."] + #[doc = "- One passthrough operation, one insert, both `O(S)` where `S` is the number of"] + #[doc = " signatories. `S` is capped by `MaxSignatories`, with weight being proportional."] + #[doc = "- One call encode & hash, both of complexity `O(Z)` where `Z` is tx-len."] + #[doc = "- One encode & hash, both of complexity `O(S)`."] + #[doc = "- Up to one binary search and insert (`O(logS + S)`)."] + #[doc = "- I/O: 1 read `O(S)`, up to 1 mutate `O(S)`. Up to one remove."] + #[doc = "- One event."] + #[doc = "- The weight of the `call`."] + #[doc = "- Storage: inserts one item, value size bounded by `MaxSignatories`, with a deposit"] + #[doc = " taken for its lifetime of `DepositBase + threshold * DepositFactor`."] + #[doc = "-------------------------------"] + #[doc = "- DB Weight:"] + #[doc = " - Reads: Multisig Storage, [Caller Account], Calls (if `store_call`)"] + #[doc = " - Writes: Multisig Storage, [Caller Account], Calls (if `store_call`)"] + #[doc = "- Plus Call Weight"] + #[doc = "# "] + pub fn as_multi( + &self, + threshold: ::core::primitive::u16, + other_signatories: ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>, + maybe_timepoint: ::core::option::Option< + runtime_types::pallet_multisig::Timepoint<::core::primitive::u32>, + >, + call: ::subxt::WrapperKeepOpaque, + store_call: ::core::primitive::bool, + max_weight: ::core::primitive::u64, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + AsMulti, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 50u8, 22u8, 66u8, 21u8, 62u8, 119u8, 198u8, 238u8, 36u8, 241u8, 8u8, + 249u8, 178u8, 13u8, 114u8, 221u8, 72u8, 137u8, 168u8, 42u8, 31u8, 25u8, + 197u8, 9u8, 45u8, 88u8, 248u8, 42u8, 136u8, 230u8, 64u8, 12u8, + ] + { + let call = AsMulti { + threshold, + other_signatories, + maybe_timepoint, + call, + store_call, + max_weight, + }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Register approval for a dispatch to be made from a deterministic composite account if"] + #[doc = "approved by a total of `threshold - 1` of `other_signatories`."] + #[doc = ""] + #[doc = "Payment: `DepositBase` will be reserved if this is the first approval, plus"] + #[doc = "`threshold` times `DepositFactor`. It is returned once this dispatch happens or"] + #[doc = "is cancelled."] + #[doc = ""] + #[doc = "The dispatch origin for this call must be _Signed_."] + #[doc = ""] + #[doc = "- `threshold`: The total number of approvals for this dispatch before it is executed."] + #[doc = "- `other_signatories`: The accounts (other than the sender) who can approve this"] + #[doc = "dispatch. May not be empty."] + #[doc = "- `maybe_timepoint`: If this is the first approval, then this must be `None`. If it is"] + #[doc = "not the first approval, then it must be `Some`, with the timepoint (block number and"] + #[doc = "transaction index) of the first approval transaction."] + #[doc = "- `call_hash`: The hash of the call to be executed."] + #[doc = ""] + #[doc = "NOTE: If this is the final approval, you will want to use `as_multi` instead."] + #[doc = ""] + #[doc = "# "] + #[doc = "- `O(S)`."] + #[doc = "- Up to one balance-reserve or unreserve operation."] + #[doc = "- One passthrough operation, one insert, both `O(S)` where `S` is the number of"] + #[doc = " signatories. `S` is capped by `MaxSignatories`, with weight being proportional."] + #[doc = "- One encode & hash, both of complexity `O(S)`."] + #[doc = "- Up to one binary search and insert (`O(logS + S)`)."] + #[doc = "- I/O: 1 read `O(S)`, up to 1 mutate `O(S)`. Up to one remove."] + #[doc = "- One event."] + #[doc = "- Storage: inserts one item, value size bounded by `MaxSignatories`, with a deposit"] + #[doc = " taken for its lifetime of `DepositBase + threshold * DepositFactor`."] + #[doc = "----------------------------------"] + #[doc = "- DB Weight:"] + #[doc = " - Read: Multisig Storage, [Caller Account]"] + #[doc = " - Write: Multisig Storage, [Caller Account]"] + #[doc = "# "] + pub fn approve_as_multi( + &self, + threshold: ::core::primitive::u16, + other_signatories: ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>, + maybe_timepoint: ::core::option::Option< + runtime_types::pallet_multisig::Timepoint<::core::primitive::u32>, + >, + call_hash: [::core::primitive::u8; 32usize], + max_weight: ::core::primitive::u64, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + ApproveAsMulti, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 114u8, 29u8, 118u8, 154u8, 91u8, 4u8, 127u8, 126u8, 190u8, 180u8, 57u8, + 112u8, 72u8, 8u8, 248u8, 126u8, 25u8, 190u8, 130u8, 86u8, 160u8, 164u8, + 76u8, 64u8, 25u8, 175u8, 132u8, 225u8, 147u8, 166u8, 12u8, 38u8, + ] + { + let call = ApproveAsMulti { + threshold, + other_signatories, + maybe_timepoint, + call_hash, + max_weight, + }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Cancel a pre-existing, on-going multisig transaction. Any deposit reserved previously"] + #[doc = "for this operation will be unreserved on success."] + #[doc = ""] + #[doc = "The dispatch origin for this call must be _Signed_."] + #[doc = ""] + #[doc = "- `threshold`: The total number of approvals for this dispatch before it is executed."] + #[doc = "- `other_signatories`: The accounts (other than the sender) who can approve this"] + #[doc = "dispatch. May not be empty."] + #[doc = "- `timepoint`: The timepoint (block number and transaction index) of the first approval"] + #[doc = "transaction for this dispatch."] + #[doc = "- `call_hash`: The hash of the call to be executed."] + #[doc = ""] + #[doc = "# "] + #[doc = "- `O(S)`."] + #[doc = "- Up to one balance-reserve or unreserve operation."] + #[doc = "- One passthrough operation, one insert, both `O(S)` where `S` is the number of"] + #[doc = " signatories. `S` is capped by `MaxSignatories`, with weight being proportional."] + #[doc = "- One encode & hash, both of complexity `O(S)`."] + #[doc = "- One event."] + #[doc = "- I/O: 1 read `O(S)`, one remove."] + #[doc = "- Storage: removes one item."] + #[doc = "----------------------------------"] + #[doc = "- DB Weight:"] + #[doc = " - Read: Multisig Storage, [Caller Account], Refund Account, Calls"] + #[doc = " - Write: Multisig Storage, [Caller Account], Refund Account, Calls"] + #[doc = "# "] + pub fn cancel_as_multi( + &self, + threshold: ::core::primitive::u16, + other_signatories: ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>, + timepoint: runtime_types::pallet_multisig::Timepoint<::core::primitive::u32>, + call_hash: [::core::primitive::u8; 32usize], + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + CancelAsMulti, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 195u8, 216u8, 37u8, 179u8, 9u8, 19u8, 238u8, 94u8, 156u8, 5u8, 120u8, + 78u8, 129u8, 99u8, 239u8, 142u8, 68u8, 12u8, 254u8, 46u8, 251u8, 8u8, + 193u8, 43u8, 37u8, 68u8, 249u8, 85u8, 163u8, 85u8, 193u8, 47u8, + ] + { + let call = CancelAsMulti { + threshold, + other_signatories, + timepoint, + call_hash, + }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + pub type Event = runtime_types::pallet_multisig::pallet::Event; + pub mod events { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "A new multisig operation has begun."] + pub struct NewMultisig { + pub approving: ::subxt::sp_core::crypto::AccountId32, + pub multisig: ::subxt::sp_core::crypto::AccountId32, + pub call_hash: [::core::primitive::u8; 32usize], + } + impl ::subxt::Event for NewMultisig { + const PALLET: &'static str = "Multisig"; + const EVENT: &'static str = "NewMultisig"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "A multisig operation has been approved by someone."] + pub struct MultisigApproval { + pub approving: ::subxt::sp_core::crypto::AccountId32, + pub timepoint: runtime_types::pallet_multisig::Timepoint<::core::primitive::u32>, + pub multisig: ::subxt::sp_core::crypto::AccountId32, + pub call_hash: [::core::primitive::u8; 32usize], + } + impl ::subxt::Event for MultisigApproval { + const PALLET: &'static str = "Multisig"; + const EVENT: &'static str = "MultisigApproval"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "A multisig operation has been executed."] + pub struct MultisigExecuted { + pub approving: ::subxt::sp_core::crypto::AccountId32, + pub timepoint: runtime_types::pallet_multisig::Timepoint<::core::primitive::u32>, + pub multisig: ::subxt::sp_core::crypto::AccountId32, + pub call_hash: [::core::primitive::u8; 32usize], + pub result: ::core::result::Result<(), runtime_types::sp_runtime::DispatchError>, + } + impl ::subxt::Event for MultisigExecuted { + const PALLET: &'static str = "Multisig"; + const EVENT: &'static str = "MultisigExecuted"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "A multisig operation has been cancelled."] + pub struct MultisigCancelled { + pub cancelling: ::subxt::sp_core::crypto::AccountId32, + pub timepoint: runtime_types::pallet_multisig::Timepoint<::core::primitive::u32>, + pub multisig: ::subxt::sp_core::crypto::AccountId32, + pub call_hash: [::core::primitive::u8; 32usize], + } + impl ::subxt::Event for MultisigCancelled { + const PALLET: &'static str = "Multisig"; + const EVENT: &'static str = "MultisigCancelled"; + } + } + pub mod storage { + use super::runtime_types; + pub struct Multisigs<'a>( + pub &'a ::subxt::sp_core::crypto::AccountId32, + pub &'a [::core::primitive::u8; 32usize], + ); + impl ::subxt::StorageEntry for Multisigs<'_> { + const PALLET: &'static str = "Multisig"; + const STORAGE: &'static str = "Multisigs"; + type Value = runtime_types::pallet_multisig::Multisig< + ::core::primitive::u32, + ::core::primitive::u128, + ::subxt::sp_core::crypto::AccountId32, + >; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![ + ::subxt::StorageMapKey::new(&self.0, ::subxt::StorageHasher::Twox64Concat), + ::subxt::StorageMapKey::new( + &self.1, + ::subxt::StorageHasher::Blake2_128Concat, + ), + ]) + } + } + pub struct Calls<'a>(pub &'a [::core::primitive::u8; 32usize]); + impl ::subxt::StorageEntry for Calls<'_> { + const PALLET: &'static str = "Multisig"; + const STORAGE: &'static str = "Calls"; + type Value = ( + ::subxt::WrapperKeepOpaque, + ::subxt::sp_core::crypto::AccountId32, + ::core::primitive::u128, + ); + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Identity, + )]) + } + } + pub struct StorageApi<'a, T: ::subxt::Config> { + client: &'a ::subxt::Client, + } + impl<'a, T: ::subxt::Config> StorageApi<'a, T> { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { client } + } + #[doc = " The set of open multisig operations."] + pub async fn multisigs( + &self, + _0: &::subxt::sp_core::crypto::AccountId32, + _1: &[::core::primitive::u8; 32usize], + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option< + runtime_types::pallet_multisig::Multisig< + ::core::primitive::u32, + ::core::primitive::u128, + ::subxt::sp_core::crypto::AccountId32, + >, + >, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 137u8, 130u8, 173u8, 65u8, 126u8, 244u8, 194u8, 167u8, 93u8, 174u8, + 104u8, 131u8, 115u8, 155u8, 93u8, 185u8, 54u8, 204u8, 155u8, 149u8, + 184u8, 24u8, 111u8, 40u8, 249u8, 215u8, 34u8, 251u8, 224u8, 110u8, + 202u8, 2u8, + ] + { + let entry = Multisigs(_0, _1); + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The set of open multisig operations."] + pub async fn multisigs_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::subxt::KeyIter<'a, T, Multisigs<'a>>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 137u8, 130u8, 173u8, 65u8, 126u8, 244u8, 194u8, 167u8, 93u8, 174u8, + 104u8, 131u8, 115u8, 155u8, 93u8, 185u8, 54u8, 204u8, 155u8, 149u8, + 184u8, 24u8, 111u8, 40u8, 249u8, 215u8, 34u8, 251u8, 224u8, 110u8, + 202u8, 2u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + pub async fn calls( + &self, + _0: &[::core::primitive::u8; 32usize], + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option<( + ::subxt::WrapperKeepOpaque, + ::subxt::sp_core::crypto::AccountId32, + ::core::primitive::u128, + )>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 45u8, 131u8, 59u8, 143u8, 103u8, 14u8, 145u8, 237u8, 41u8, 118u8, + 121u8, 206u8, 38u8, 247u8, 229u8, 187u8, 15u8, 228u8, 206u8, 250u8, + 198u8, 10u8, 157u8, 207u8, 83u8, 74u8, 34u8, 165u8, 205u8, 44u8, 129u8, + 179u8, + ] + { + let entry = Calls(_0); + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + pub async fn calls_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result<::subxt::KeyIter<'a, T, Calls<'a>>, ::subxt::BasicError> + { + if self.client.metadata().storage_hash::()? + == [ + 45u8, 131u8, 59u8, 143u8, 103u8, 14u8, 145u8, 237u8, 41u8, 118u8, + 121u8, 206u8, 38u8, 247u8, 229u8, 187u8, 15u8, 228u8, 206u8, 250u8, + 198u8, 10u8, 157u8, 207u8, 83u8, 74u8, 34u8, 165u8, 205u8, 44u8, 129u8, + 179u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + pub mod constants { + use super::runtime_types; + pub struct ConstantsApi<'a, T: ::subxt::Config> { + client: &'a ::subxt::Client, + } + impl<'a, T: ::subxt::Config> ConstantsApi<'a, T> { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { client } + } + #[doc = " The base amount of currency needed to reserve for creating a multisig execution or to"] + #[doc = " store a dispatch call for later."] + #[doc = ""] + #[doc = " This is held for an additional storage item whose value size is"] + #[doc = " `4 + sizeof((BlockNumber, Balance, AccountId))` bytes and whose key size is"] + #[doc = " `32 + sizeof(AccountId)` bytes."] + pub fn deposit_base( + &self, + ) -> ::core::result::Result<::core::primitive::u128, ::subxt::BasicError> + { + if self + .client + .metadata() + .constant_hash("Multisig", "DepositBase")? + == [ + 71u8, 154u8, 198u8, 152u8, 162u8, 128u8, 229u8, 128u8, 60u8, 108u8, + 172u8, 247u8, 145u8, 8u8, 159u8, 25u8, 36u8, 141u8, 28u8, 67u8, 30u8, + 14u8, 194u8, 98u8, 125u8, 161u8, 148u8, 41u8, 67u8, 120u8, 78u8, 162u8, + ] + { + let pallet = self.client.metadata().pallet("Multisig")?; + let constant = pallet.constant("DepositBase")?; + let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; + Ok(value) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The amount of currency needed per unit threshold when creating a multisig execution."] + #[doc = ""] + #[doc = " This is held for adding 32 bytes more into a pre-existing storage value."] + pub fn deposit_factor( + &self, + ) -> ::core::result::Result<::core::primitive::u128, ::subxt::BasicError> + { + if self + .client + .metadata() + .constant_hash("Multisig", "DepositFactor")? + == [ + 248u8, 238u8, 23u8, 116u8, 115u8, 32u8, 128u8, 25u8, 153u8, 128u8, + 14u8, 55u8, 124u8, 103u8, 61u8, 140u8, 106u8, 176u8, 226u8, 232u8, + 255u8, 246u8, 68u8, 23u8, 111u8, 168u8, 45u8, 130u8, 182u8, 15u8, 66u8, + 64u8, + ] + { + let pallet = self.client.metadata().pallet("Multisig")?; + let constant = pallet.constant("DepositFactor")?; + let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; + Ok(value) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The maximum amount of signatories allowed in the multisig."] + pub fn max_signatories( + &self, + ) -> ::core::result::Result<::core::primitive::u16, ::subxt::BasicError> + { + if self + .client + .metadata() + .constant_hash("Multisig", "MaxSignatories")? + == [ + 139u8, 36u8, 140u8, 198u8, 176u8, 106u8, 89u8, 194u8, 33u8, 23u8, 60u8, + 134u8, 143u8, 24u8, 176u8, 64u8, 47u8, 109u8, 159u8, 134u8, 240u8, + 231u8, 181u8, 146u8, 136u8, 249u8, 175u8, 67u8, 41u8, 152u8, 90u8, + 15u8, + ] + { + let pallet = self.client.metadata().pallet("Multisig")?; + let constant = pallet.constant("MaxSignatories")?; + let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; + Ok(value) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + } + pub mod xcm_pallet { + use super::root_mod; + use super::runtime_types; + pub mod calls { + use super::root_mod; + use super::runtime_types; + type DispatchError = runtime_types::sp_runtime::DispatchError; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct Send { + pub dest: ::std::boxed::Box, + pub message: ::std::boxed::Box, + } + impl ::subxt::Call for Send { + const PALLET: &'static str = "XcmPallet"; + const FUNCTION: &'static str = "send"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct TeleportAssets { + pub dest: ::std::boxed::Box, + pub beneficiary: ::std::boxed::Box, + pub assets: ::std::boxed::Box, + pub fee_asset_item: ::core::primitive::u32, + } + impl ::subxt::Call for TeleportAssets { + const PALLET: &'static str = "XcmPallet"; + const FUNCTION: &'static str = "teleport_assets"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct ReserveTransferAssets { + pub dest: ::std::boxed::Box, + pub beneficiary: ::std::boxed::Box, + pub assets: ::std::boxed::Box, + pub fee_asset_item: ::core::primitive::u32, + } + impl ::subxt::Call for ReserveTransferAssets { + const PALLET: &'static str = "XcmPallet"; + const FUNCTION: &'static str = "reserve_transfer_assets"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct Execute { + pub message: ::std::boxed::Box, + pub max_weight: ::core::primitive::u64, + } + impl ::subxt::Call for Execute { + const PALLET: &'static str = "XcmPallet"; + const FUNCTION: &'static str = "execute"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct ForceXcmVersion { + pub location: + ::std::boxed::Box, + pub xcm_version: ::core::primitive::u32, + } + impl ::subxt::Call for ForceXcmVersion { + const PALLET: &'static str = "XcmPallet"; + const FUNCTION: &'static str = "force_xcm_version"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct ForceDefaultXcmVersion { + pub maybe_xcm_version: ::core::option::Option<::core::primitive::u32>, + } + impl ::subxt::Call for ForceDefaultXcmVersion { + const PALLET: &'static str = "XcmPallet"; + const FUNCTION: &'static str = "force_default_xcm_version"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct ForceSubscribeVersionNotify { + pub location: ::std::boxed::Box, + } + impl ::subxt::Call for ForceSubscribeVersionNotify { + const PALLET: &'static str = "XcmPallet"; + const FUNCTION: &'static str = "force_subscribe_version_notify"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct ForceUnsubscribeVersionNotify { + pub location: ::std::boxed::Box, + } + impl ::subxt::Call for ForceUnsubscribeVersionNotify { + const PALLET: &'static str = "XcmPallet"; + const FUNCTION: &'static str = "force_unsubscribe_version_notify"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct LimitedReserveTransferAssets { + pub dest: ::std::boxed::Box, + pub beneficiary: ::std::boxed::Box, + pub assets: ::std::boxed::Box, + pub fee_asset_item: ::core::primitive::u32, + pub weight_limit: runtime_types::xcm::v2::WeightLimit, + } + impl ::subxt::Call for LimitedReserveTransferAssets { + const PALLET: &'static str = "XcmPallet"; + const FUNCTION: &'static str = "limited_reserve_transfer_assets"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct LimitedTeleportAssets { + pub dest: ::std::boxed::Box, + pub beneficiary: ::std::boxed::Box, + pub assets: ::std::boxed::Box, + pub fee_asset_item: ::core::primitive::u32, + pub weight_limit: runtime_types::xcm::v2::WeightLimit, + } + impl ::subxt::Call for LimitedTeleportAssets { + const PALLET: &'static str = "XcmPallet"; + const FUNCTION: &'static str = "limited_teleport_assets"; + } + pub struct TransactionApi<'a, T: ::subxt::Config, X> { + client: &'a ::subxt::Client, + marker: ::core::marker::PhantomData, + } + impl<'a, T, X> TransactionApi<'a, T, X> + where + T: ::subxt::Config, + X: ::subxt::extrinsic::ExtrinsicParams, + { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { + client, + marker: ::core::marker::PhantomData, + } + } + pub fn send( + &self, + dest: runtime_types::xcm::VersionedMultiLocation, + message: runtime_types::xcm::VersionedXcm, + ) -> Result< + ::subxt::SubmittableExtrinsic<'a, T, X, Send, DispatchError, root_mod::Event>, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 232u8, 188u8, 205u8, 27u8, 92u8, 141u8, 251u8, 24u8, 90u8, 155u8, 20u8, + 139u8, 7u8, 160u8, 39u8, 85u8, 205u8, 11u8, 111u8, 1u8, 250u8, 168u8, + 134u8, 61u8, 19u8, 216u8, 239u8, 127u8, 137u8, 136u8, 48u8, 19u8, + ] + { + let call = Send { + dest: ::std::boxed::Box::new(dest), + message: ::std::boxed::Box::new(message), + }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Teleport some assets from the local chain to some destination chain."] + #[doc = ""] + #[doc = "Fee payment on the destination side is made from the asset in the `assets` vector of"] + #[doc = "index `fee_asset_item`. The weight limit for fees is not provided and thus is unlimited,"] + #[doc = "with all fees taken as needed from the asset."] + #[doc = ""] + #[doc = "- `origin`: Must be capable of withdrawing the `assets` and executing XCM."] + #[doc = "- `dest`: Destination context for the assets. Will typically be `X2(Parent, Parachain(..))` to send"] + #[doc = " from parachain to parachain, or `X1(Parachain(..))` to send from relay to parachain."] + #[doc = "- `beneficiary`: A beneficiary location for the assets in the context of `dest`. Will generally be"] + #[doc = " an `AccountId32` value."] + #[doc = "- `assets`: The assets to be withdrawn. The first item should be the currency used to to pay the fee on the"] + #[doc = " `dest` side. May not be empty."] + #[doc = "- `fee_asset_item`: The index into `assets` of the item which should be used to pay"] + #[doc = " fees."] + pub fn teleport_assets( + &self, + dest: runtime_types::xcm::VersionedMultiLocation, + beneficiary: runtime_types::xcm::VersionedMultiLocation, + assets: runtime_types::xcm::VersionedMultiAssets, + fee_asset_item: ::core::primitive::u32, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + TeleportAssets, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 55u8, 192u8, 217u8, 186u8, 230u8, 234u8, 26u8, 194u8, 243u8, 199u8, + 16u8, 227u8, 225u8, 88u8, 130u8, 219u8, 228u8, 110u8, 20u8, 255u8, + 233u8, 147u8, 121u8, 173u8, 126u8, 248u8, 192u8, 243u8, 211u8, 91u8, + 115u8, 148u8, + ] + { + let call = TeleportAssets { + dest: ::std::boxed::Box::new(dest), + beneficiary: ::std::boxed::Box::new(beneficiary), + assets: ::std::boxed::Box::new(assets), + fee_asset_item, + }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Transfer some assets from the local chain to the sovereign account of a destination"] + #[doc = "chain and forward a notification XCM."] + #[doc = ""] + #[doc = "Fee payment on the destination side is made from the asset in the `assets` vector of"] + #[doc = "index `fee_asset_item`. The weight limit for fees is not provided and thus is unlimited,"] + #[doc = "with all fees taken as needed from the asset."] + #[doc = ""] + #[doc = "- `origin`: Must be capable of withdrawing the `assets` and executing XCM."] + #[doc = "- `dest`: Destination context for the assets. Will typically be `X2(Parent, Parachain(..))` to send"] + #[doc = " from parachain to parachain, or `X1(Parachain(..))` to send from relay to parachain."] + #[doc = "- `beneficiary`: A beneficiary location for the assets in the context of `dest`. Will generally be"] + #[doc = " an `AccountId32` value."] + #[doc = "- `assets`: The assets to be withdrawn. This should include the assets used to pay the fee on the"] + #[doc = " `dest` side."] + #[doc = "- `fee_asset_item`: The index into `assets` of the item which should be used to pay"] + #[doc = " fees."] + pub fn reserve_transfer_assets( + &self, + dest: runtime_types::xcm::VersionedMultiLocation, + beneficiary: runtime_types::xcm::VersionedMultiLocation, + assets: runtime_types::xcm::VersionedMultiAssets, + fee_asset_item: ::core::primitive::u32, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + ReserveTransferAssets, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .call_hash::()? + == [ + 134u8, 229u8, 104u8, 209u8, 160u8, 7u8, 99u8, 175u8, 128u8, 110u8, + 189u8, 225u8, 141u8, 1u8, 10u8, 17u8, 247u8, 233u8, 146u8, 19u8, 31u8, + 145u8, 217u8, 144u8, 85u8, 223u8, 197u8, 249u8, 1u8, 222u8, 98u8, 13u8, + ] + { + let call = ReserveTransferAssets { + dest: ::std::boxed::Box::new(dest), + beneficiary: ::std::boxed::Box::new(beneficiary), + assets: ::std::boxed::Box::new(assets), + fee_asset_item, + }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Execute an XCM message from a local, signed, origin."] + #[doc = ""] + #[doc = "An event is deposited indicating whether `msg` could be executed completely or only"] + #[doc = "partially."] + #[doc = ""] + #[doc = "No more than `max_weight` will be used in its attempted execution. If this is less than the"] + #[doc = "maximum amount of weight that the message could take to be executed, then no execution"] + #[doc = "attempt will be made."] + #[doc = ""] + #[doc = "NOTE: A successful return to this does *not* imply that the `msg` was executed successfully"] + #[doc = "to completion; only that *some* of it was executed."] + pub fn execute( + &self, + message: runtime_types::xcm::VersionedXcm, + max_weight: ::core::primitive::u64, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + Execute, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 95u8, 48u8, 201u8, 232u8, 83u8, 23u8, 20u8, 126u8, 116u8, 116u8, 176u8, + 206u8, 145u8, 9u8, 155u8, 109u8, 141u8, 226u8, 253u8, 196u8, 37u8, + 230u8, 243u8, 68u8, 39u8, 133u8, 233u8, 108u8, 226u8, 87u8, 5u8, 247u8, + ] + { + let call = Execute { + message: ::std::boxed::Box::new(message), + max_weight, + }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Extoll that a particular destination can be communicated with through a particular"] + #[doc = "version of XCM."] + #[doc = ""] + #[doc = "- `origin`: Must be Root."] + #[doc = "- `location`: The destination that is being described."] + #[doc = "- `xcm_version`: The latest version of XCM that `location` supports."] + pub fn force_xcm_version( + &self, + location: runtime_types::xcm::v1::multilocation::MultiLocation, + xcm_version: ::core::primitive::u32, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + ForceXcmVersion, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self.client.metadata().call_hash::()? + == [ + 32u8, 219u8, 213u8, 152u8, 203u8, 73u8, 121u8, 64u8, 78u8, 53u8, 110u8, + 23u8, 87u8, 93u8, 34u8, 166u8, 205u8, 189u8, 25u8, 160u8, 172u8, 178u8, + 125u8, 182u8, 37u8, 254u8, 220u8, 179u8, 70u8, 252u8, 63u8, 94u8, + ] + { + let call = ForceXcmVersion { + location: ::std::boxed::Box::new(location), + xcm_version, + }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Set a safe XCM version (the version that XCM should be encoded with if the most recent"] + #[doc = "version a destination can accept is unknown)."] + #[doc = ""] + #[doc = "- `origin`: Must be Root."] + #[doc = "- `maybe_xcm_version`: The default XCM encoding version, or `None` to disable."] + pub fn force_default_xcm_version( + &self, + maybe_xcm_version: ::core::option::Option<::core::primitive::u32>, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + ForceDefaultXcmVersion, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .call_hash::()? + == [ + 44u8, 161u8, 28u8, 189u8, 162u8, 221u8, 14u8, 31u8, 8u8, 211u8, 181u8, + 51u8, 197u8, 14u8, 87u8, 198u8, 3u8, 240u8, 90u8, 78u8, 141u8, 131u8, + 205u8, 250u8, 211u8, 150u8, 237u8, 160u8, 239u8, 226u8, 233u8, 29u8, + ] + { + let call = ForceDefaultXcmVersion { maybe_xcm_version }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Ask a location to notify us regarding their XCM version and any changes to it."] + #[doc = ""] + #[doc = "- `origin`: Must be Root."] + #[doc = "- `location`: The location to which we should subscribe for XCM version notifications."] + pub fn force_subscribe_version_notify( + &self, + location: runtime_types::xcm::VersionedMultiLocation, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + ForceSubscribeVersionNotify, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .call_hash::()? + == [ + 41u8, 248u8, 187u8, 195u8, 146u8, 143u8, 0u8, 246u8, 248u8, 38u8, + 128u8, 200u8, 143u8, 149u8, 127u8, 73u8, 3u8, 247u8, 106u8, 6u8, 56u8, + 50u8, 207u8, 234u8, 137u8, 201u8, 16u8, 21u8, 226u8, 148u8, 181u8, + 44u8, + ] + { + let call = ForceSubscribeVersionNotify { + location: ::std::boxed::Box::new(location), + }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Require that a particular destination should no longer notify us regarding any XCM"] + #[doc = "version changes."] + #[doc = ""] + #[doc = "- `origin`: Must be Root."] + #[doc = "- `location`: The location to which we are currently subscribed for XCM version"] + #[doc = " notifications which we no longer desire."] + pub fn force_unsubscribe_version_notify( + &self, + location: runtime_types::xcm::VersionedMultiLocation, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + ForceUnsubscribeVersionNotify, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .call_hash::()? + == [ + 150u8, 202u8, 148u8, 13u8, 187u8, 169u8, 5u8, 60u8, 25u8, 144u8, 43u8, + 196u8, 35u8, 215u8, 184u8, 72u8, 143u8, 220u8, 176u8, 27u8, 100u8, + 245u8, 31u8, 243u8, 0u8, 83u8, 165u8, 7u8, 102u8, 172u8, 218u8, 133u8, + ] + { + let call = ForceUnsubscribeVersionNotify { + location: ::std::boxed::Box::new(location), + }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Transfer some assets from the local chain to the sovereign account of a destination"] + #[doc = "chain and forward a notification XCM."] + #[doc = ""] + #[doc = "Fee payment on the destination side is made from the asset in the `assets` vector of"] + #[doc = "index `fee_asset_item`, up to enough to pay for `weight_limit` of weight. If more weight"] + #[doc = "is needed than `weight_limit`, then the operation will fail and the assets send may be"] + #[doc = "at risk."] + #[doc = ""] + #[doc = "- `origin`: Must be capable of withdrawing the `assets` and executing XCM."] + #[doc = "- `dest`: Destination context for the assets. Will typically be `X2(Parent, Parachain(..))` to send"] + #[doc = " from parachain to parachain, or `X1(Parachain(..))` to send from relay to parachain."] + #[doc = "- `beneficiary`: A beneficiary location for the assets in the context of `dest`. Will generally be"] + #[doc = " an `AccountId32` value."] + #[doc = "- `assets`: The assets to be withdrawn. This should include the assets used to pay the fee on the"] + #[doc = " `dest` side."] + #[doc = "- `fee_asset_item`: The index into `assets` of the item which should be used to pay"] + #[doc = " fees."] + #[doc = "- `weight_limit`: The remote-side weight limit, if any, for the XCM fee purchase."] + pub fn limited_reserve_transfer_assets( + &self, + dest: runtime_types::xcm::VersionedMultiLocation, + beneficiary: runtime_types::xcm::VersionedMultiLocation, + assets: runtime_types::xcm::VersionedMultiAssets, + fee_asset_item: ::core::primitive::u32, + weight_limit: runtime_types::xcm::v2::WeightLimit, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + LimitedReserveTransferAssets, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .call_hash::()? + == [ + 242u8, 206u8, 126u8, 164u8, 44u8, 116u8, 181u8, 90u8, 121u8, 124u8, + 120u8, 240u8, 129u8, 217u8, 131u8, 100u8, 248u8, 149u8, 56u8, 154u8, + 35u8, 91u8, 210u8, 118u8, 207u8, 110u8, 42u8, 249u8, 160u8, 155u8, + 251u8, 68u8, + ] + { + let call = LimitedReserveTransferAssets { + dest: ::std::boxed::Box::new(dest), + beneficiary: ::std::boxed::Box::new(beneficiary), + assets: ::std::boxed::Box::new(assets), + fee_asset_item, + weight_limit, + }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = "Teleport some assets from the local chain to some destination chain."] + #[doc = ""] + #[doc = "Fee payment on the destination side is made from the asset in the `assets` vector of"] + #[doc = "index `fee_asset_item`, up to enough to pay for `weight_limit` of weight. If more weight"] + #[doc = "is needed than `weight_limit`, then the operation will fail and the assets send may be"] + #[doc = "at risk."] + #[doc = ""] + #[doc = "- `origin`: Must be capable of withdrawing the `assets` and executing XCM."] + #[doc = "- `dest`: Destination context for the assets. Will typically be `X2(Parent, Parachain(..))` to send"] + #[doc = " from parachain to parachain, or `X1(Parachain(..))` to send from relay to parachain."] + #[doc = "- `beneficiary`: A beneficiary location for the assets in the context of `dest`. Will generally be"] + #[doc = " an `AccountId32` value."] + #[doc = "- `assets`: The assets to be withdrawn. The first item should be the currency used to to pay the fee on the"] + #[doc = " `dest` side. May not be empty."] + #[doc = "- `fee_asset_item`: The index into `assets` of the item which should be used to pay"] + #[doc = " fees."] + #[doc = "- `weight_limit`: The remote-side weight limit, if any, for the XCM fee purchase."] + pub fn limited_teleport_assets( + &self, + dest: runtime_types::xcm::VersionedMultiLocation, + beneficiary: runtime_types::xcm::VersionedMultiLocation, + assets: runtime_types::xcm::VersionedMultiAssets, + fee_asset_item: ::core::primitive::u32, + weight_limit: runtime_types::xcm::v2::WeightLimit, + ) -> Result< + ::subxt::SubmittableExtrinsic< + 'a, + T, + X, + LimitedTeleportAssets, + DispatchError, + root_mod::Event, + >, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .call_hash::()? + == [ + 189u8, 233u8, 43u8, 16u8, 158u8, 114u8, 154u8, 233u8, 179u8, 144u8, + 81u8, 179u8, 169u8, 38u8, 4u8, 130u8, 95u8, 237u8, 172u8, 167u8, 2u8, + 169u8, 53u8, 252u8, 159u8, 42u8, 143u8, 216u8, 112u8, 155u8, 48u8, + 129u8, + ] + { + let call = LimitedTeleportAssets { + dest: ::std::boxed::Box::new(dest), + beneficiary: ::std::boxed::Box::new(beneficiary), + assets: ::std::boxed::Box::new(assets), + fee_asset_item, + weight_limit, + }; + Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + pub type Event = runtime_types::pallet_xcm::pallet::Event; + pub mod events { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "Execution of an XCM message was attempted."] + #[doc = ""] + #[doc = "\\[ outcome \\]"] + pub struct Attempted(pub runtime_types::xcm::v2::traits::Outcome); + impl ::subxt::Event for Attempted { + const PALLET: &'static str = "XcmPallet"; + const EVENT: &'static str = "Attempted"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "A XCM message was sent."] + #[doc = ""] + #[doc = "\\[ origin, destination, message \\]"] + pub struct Sent( + pub runtime_types::xcm::v1::multilocation::MultiLocation, + pub runtime_types::xcm::v1::multilocation::MultiLocation, + pub runtime_types::xcm::v2::Xcm, + ); + impl ::subxt::Event for Sent { + const PALLET: &'static str = "XcmPallet"; + const EVENT: &'static str = "Sent"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "Query response received which does not match a registered query. This may be because a"] + #[doc = "matching query was never registered, it may be because it is a duplicate response, or"] + #[doc = "because the query timed out."] + #[doc = ""] + #[doc = "\\[ origin location, id \\]"] + pub struct UnexpectedResponse( + pub runtime_types::xcm::v1::multilocation::MultiLocation, + pub ::core::primitive::u64, + ); + impl ::subxt::Event for UnexpectedResponse { + const PALLET: &'static str = "XcmPallet"; + const EVENT: &'static str = "UnexpectedResponse"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "Query response has been received and is ready for taking with `take_response`. There is"] + #[doc = "no registered notification call."] + #[doc = ""] + #[doc = "\\[ id, response \\]"] + pub struct ResponseReady( + pub ::core::primitive::u64, + pub runtime_types::xcm::v2::Response, + ); + impl ::subxt::Event for ResponseReady { + const PALLET: &'static str = "XcmPallet"; + const EVENT: &'static str = "ResponseReady"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "Query response has been received and query is removed. The registered notification has"] + #[doc = "been dispatched and executed successfully."] + #[doc = ""] + #[doc = "\\[ id, pallet index, call index \\]"] + pub struct Notified( + pub ::core::primitive::u64, + pub ::core::primitive::u8, + pub ::core::primitive::u8, + ); + impl ::subxt::Event for Notified { + const PALLET: &'static str = "XcmPallet"; + const EVENT: &'static str = "Notified"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "Query response has been received and query is removed. The registered notification could"] + #[doc = "not be dispatched because the dispatch weight is greater than the maximum weight"] + #[doc = "originally budgeted by this runtime for the query result."] + #[doc = ""] + #[doc = "\\[ id, pallet index, call index, actual weight, max budgeted weight \\]"] + pub struct NotifyOverweight( + pub ::core::primitive::u64, + pub ::core::primitive::u8, + pub ::core::primitive::u8, + pub ::core::primitive::u64, + pub ::core::primitive::u64, + ); + impl ::subxt::Event for NotifyOverweight { + const PALLET: &'static str = "XcmPallet"; + const EVENT: &'static str = "NotifyOverweight"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "Query response has been received and query is removed. There was a general error with"] + #[doc = "dispatching the notification call."] + #[doc = ""] + #[doc = "\\[ id, pallet index, call index \\]"] + pub struct NotifyDispatchError( + pub ::core::primitive::u64, + pub ::core::primitive::u8, + pub ::core::primitive::u8, + ); + impl ::subxt::Event for NotifyDispatchError { + const PALLET: &'static str = "XcmPallet"; + const EVENT: &'static str = "NotifyDispatchError"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "Query response has been received and query is removed. The dispatch was unable to be"] + #[doc = "decoded into a `Call`; this might be due to dispatch function having a signature which"] + #[doc = "is not `(origin, QueryId, Response)`."] + #[doc = ""] + #[doc = "\\[ id, pallet index, call index \\]"] + pub struct NotifyDecodeFailed( + pub ::core::primitive::u64, + pub ::core::primitive::u8, + pub ::core::primitive::u8, + ); + impl ::subxt::Event for NotifyDecodeFailed { + const PALLET: &'static str = "XcmPallet"; + const EVENT: &'static str = "NotifyDecodeFailed"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "Expected query response has been received but the origin location of the response does"] + #[doc = "not match that expected. The query remains registered for a later, valid, response to"] + #[doc = "be received and acted upon."] + #[doc = ""] + #[doc = "\\[ origin location, id, expected location \\]"] + pub struct InvalidResponder( + pub runtime_types::xcm::v1::multilocation::MultiLocation, + pub ::core::primitive::u64, + pub ::core::option::Option, + ); + impl ::subxt::Event for InvalidResponder { + const PALLET: &'static str = "XcmPallet"; + const EVENT: &'static str = "InvalidResponder"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "Expected query response has been received but the expected origin location placed in"] + #[doc = "storage by this runtime previously cannot be decoded. The query remains registered."] + #[doc = ""] + #[doc = "This is unexpected (since a location placed in storage in a previously executing"] + #[doc = "runtime should be readable prior to query timeout) and dangerous since the possibly"] + #[doc = "valid response will be dropped. Manual governance intervention is probably going to be"] + #[doc = "needed."] + #[doc = ""] + #[doc = "\\[ origin location, id \\]"] + pub struct InvalidResponderVersion( + pub runtime_types::xcm::v1::multilocation::MultiLocation, + pub ::core::primitive::u64, + ); + impl ::subxt::Event for InvalidResponderVersion { + const PALLET: &'static str = "XcmPallet"; + const EVENT: &'static str = "InvalidResponderVersion"; + } + #[derive( + :: subxt :: codec :: CompactAs, + :: subxt :: codec :: Decode, + :: subxt :: codec :: Encode, + Debug, + )] + #[doc = "Received query response has been read and removed."] + #[doc = ""] + #[doc = "\\[ id \\]"] + pub struct ResponseTaken(pub ::core::primitive::u64); + impl ::subxt::Event for ResponseTaken { + const PALLET: &'static str = "XcmPallet"; + const EVENT: &'static str = "ResponseTaken"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "Some assets have been placed in an asset trap."] + #[doc = ""] + #[doc = "\\[ hash, origin, assets \\]"] + pub struct AssetsTrapped( + pub ::subxt::sp_core::H256, + pub runtime_types::xcm::v1::multilocation::MultiLocation, + pub runtime_types::xcm::VersionedMultiAssets, + ); + impl ::subxt::Event for AssetsTrapped { + const PALLET: &'static str = "XcmPallet"; + const EVENT: &'static str = "AssetsTrapped"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "An XCM version change notification message has been attempted to be sent."] + #[doc = ""] + #[doc = "\\[ destination, result \\]"] + pub struct VersionChangeNotified( + pub runtime_types::xcm::v1::multilocation::MultiLocation, + pub ::core::primitive::u32, + ); + impl ::subxt::Event for VersionChangeNotified { + const PALLET: &'static str = "XcmPallet"; + const EVENT: &'static str = "VersionChangeNotified"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "The supported version of a location has been changed. This might be through an"] + #[doc = "automatic notification or a manual intervention."] + #[doc = ""] + #[doc = "\\[ location, XCM version \\]"] + pub struct SupportedVersionChanged( + pub runtime_types::xcm::v1::multilocation::MultiLocation, + pub ::core::primitive::u32, + ); + impl ::subxt::Event for SupportedVersionChanged { + const PALLET: &'static str = "XcmPallet"; + const EVENT: &'static str = "SupportedVersionChanged"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "A given location which had a version change subscription was dropped owing to an error"] + #[doc = "sending the notification to it."] + #[doc = ""] + #[doc = "\\[ location, query ID, error \\]"] + pub struct NotifyTargetSendFail( + pub runtime_types::xcm::v1::multilocation::MultiLocation, + pub ::core::primitive::u64, + pub runtime_types::xcm::v2::traits::Error, + ); + impl ::subxt::Event for NotifyTargetSendFail { + const PALLET: &'static str = "XcmPallet"; + const EVENT: &'static str = "NotifyTargetSendFail"; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + #[doc = "A given location which had a version change subscription was dropped owing to an error"] + #[doc = "migrating the location to our new XCM format."] + #[doc = ""] + #[doc = "\\[ location, query ID \\]"] + pub struct NotifyTargetMigrationFail( + pub runtime_types::xcm::VersionedMultiLocation, + pub ::core::primitive::u64, + ); + impl ::subxt::Event for NotifyTargetMigrationFail { + const PALLET: &'static str = "XcmPallet"; + const EVENT: &'static str = "NotifyTargetMigrationFail"; + } + } + pub mod storage { + use super::runtime_types; + pub struct QueryCounter; + impl ::subxt::StorageEntry for QueryCounter { + const PALLET: &'static str = "XcmPallet"; + const STORAGE: &'static str = "QueryCounter"; + type Value = ::core::primitive::u64; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct Queries<'a>(pub &'a ::core::primitive::u64); + impl ::subxt::StorageEntry for Queries<'_> { + const PALLET: &'static str = "XcmPallet"; + const STORAGE: &'static str = "Queries"; + type Value = runtime_types::pallet_xcm::pallet::QueryStatus<::core::primitive::u32>; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Blake2_128Concat, + )]) + } + } + pub struct AssetTraps<'a>(pub &'a ::subxt::sp_core::H256); + impl ::subxt::StorageEntry for AssetTraps<'_> { + const PALLET: &'static str = "XcmPallet"; + const STORAGE: &'static str = "AssetTraps"; + type Value = ::core::primitive::u32; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( + &self.0, + ::subxt::StorageHasher::Identity, + )]) + } + } + pub struct SafeXcmVersion; + impl ::subxt::StorageEntry for SafeXcmVersion { + const PALLET: &'static str = "XcmPallet"; + const STORAGE: &'static str = "SafeXcmVersion"; + type Value = ::core::primitive::u32; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct SupportedVersion<'a>( + pub &'a ::core::primitive::u32, + pub &'a runtime_types::xcm::VersionedMultiLocation, + ); + impl ::subxt::StorageEntry for SupportedVersion<'_> { + const PALLET: &'static str = "XcmPallet"; + const STORAGE: &'static str = "SupportedVersion"; + type Value = ::core::primitive::u32; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![ + ::subxt::StorageMapKey::new(&self.0, ::subxt::StorageHasher::Twox64Concat), + ::subxt::StorageMapKey::new( + &self.1, + ::subxt::StorageHasher::Blake2_128Concat, + ), + ]) + } + } + pub struct VersionNotifiers<'a>( + pub &'a ::core::primitive::u32, + pub &'a runtime_types::xcm::VersionedMultiLocation, + ); + impl ::subxt::StorageEntry for VersionNotifiers<'_> { + const PALLET: &'static str = "XcmPallet"; + const STORAGE: &'static str = "VersionNotifiers"; + type Value = ::core::primitive::u64; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![ + ::subxt::StorageMapKey::new(&self.0, ::subxt::StorageHasher::Twox64Concat), + ::subxt::StorageMapKey::new( + &self.1, + ::subxt::StorageHasher::Blake2_128Concat, + ), + ]) + } + } + pub struct VersionNotifyTargets<'a>( + pub &'a ::core::primitive::u32, + pub &'a runtime_types::xcm::VersionedMultiLocation, + ); + impl ::subxt::StorageEntry for VersionNotifyTargets<'_> { + const PALLET: &'static str = "XcmPallet"; + const STORAGE: &'static str = "VersionNotifyTargets"; + type Value = ( + ::core::primitive::u64, + ::core::primitive::u64, + ::core::primitive::u32, + ); + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Map(vec![ + ::subxt::StorageMapKey::new(&self.0, ::subxt::StorageHasher::Twox64Concat), + ::subxt::StorageMapKey::new( + &self.1, + ::subxt::StorageHasher::Blake2_128Concat, + ), + ]) + } + } + pub struct VersionDiscoveryQueue; + impl ::subxt::StorageEntry for VersionDiscoveryQueue { + const PALLET: &'static str = "XcmPallet"; + const STORAGE: &'static str = "VersionDiscoveryQueue"; + type Value = runtime_types::frame_support::storage::bounded_vec::BoundedVec<( + runtime_types::xcm::VersionedMultiLocation, + ::core::primitive::u32, + )>; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct CurrentMigration; + impl ::subxt::StorageEntry for CurrentMigration { + const PALLET: &'static str = "XcmPallet"; + const STORAGE: &'static str = "CurrentMigration"; + type Value = runtime_types::pallet_xcm::pallet::VersionMigrationStage; + fn key(&self) -> ::subxt::StorageEntryKey { + ::subxt::StorageEntryKey::Plain + } + } + pub struct StorageApi<'a, T: ::subxt::Config> { + client: &'a ::subxt::Client, + } + impl<'a, T: ::subxt::Config> StorageApi<'a, T> { + pub fn new(client: &'a ::subxt::Client) -> Self { + Self { client } + } + #[doc = " The latest available query index."] + pub async fn query_counter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result<::core::primitive::u64, ::subxt::BasicError> + { + if self.client.metadata().storage_hash::()? + == [ + 137u8, 58u8, 184u8, 88u8, 247u8, 22u8, 151u8, 64u8, 50u8, 77u8, 49u8, + 10u8, 234u8, 84u8, 213u8, 156u8, 26u8, 200u8, 214u8, 225u8, 125u8, + 231u8, 42u8, 93u8, 159u8, 168u8, 86u8, 201u8, 116u8, 153u8, 41u8, + 127u8, + ] + { + let entry = QueryCounter; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The ongoing queries."] + pub async fn queries( + &self, + _0: &::core::primitive::u64, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option< + runtime_types::pallet_xcm::pallet::QueryStatus<::core::primitive::u32>, + >, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 47u8, 241u8, 126u8, 71u8, 203u8, 121u8, 171u8, 226u8, 89u8, 17u8, 61u8, + 198u8, 123u8, 73u8, 20u8, 197u8, 6u8, 23u8, 34u8, 127u8, 89u8, 35u8, + 49u8, 101u8, 110u8, 15u8, 206u8, 203u8, 155u8, 93u8, 0u8, 97u8, + ] + { + let entry = Queries(_0); + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The ongoing queries."] + pub async fn queries_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result<::subxt::KeyIter<'a, T, Queries<'a>>, ::subxt::BasicError> + { + if self.client.metadata().storage_hash::()? + == [ + 47u8, 241u8, 126u8, 71u8, 203u8, 121u8, 171u8, 226u8, 89u8, 17u8, 61u8, + 198u8, 123u8, 73u8, 20u8, 197u8, 6u8, 23u8, 34u8, 127u8, 89u8, 35u8, + 49u8, 101u8, 110u8, 15u8, 206u8, 203u8, 155u8, 93u8, 0u8, 97u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The existing asset traps."] + #[doc = ""] + #[doc = " Key is the blake2 256 hash of (origin, versioned `MultiAssets`) pair. Value is the number of"] + #[doc = " times this pair has been trapped (usually just 1 if it exists at all)."] + pub async fn asset_traps( + &self, + _0: &::subxt::sp_core::H256, + block_hash: ::core::option::Option, + ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> + { + if self.client.metadata().storage_hash::()? + == [ + 89u8, 0u8, 237u8, 90u8, 95u8, 21u8, 165u8, 163u8, 148u8, 203u8, 155u8, + 222u8, 180u8, 219u8, 220u8, 114u8, 179u8, 228u8, 1u8, 220u8, 169u8, + 43u8, 38u8, 12u8, 88u8, 159u8, 181u8, 206u8, 221u8, 197u8, 35u8, 150u8, + ] + { + let entry = AssetTraps(_0); + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The existing asset traps."] + #[doc = ""] + #[doc = " Key is the blake2 256 hash of (origin, versioned `MultiAssets`) pair. Value is the number of"] + #[doc = " times this pair has been trapped (usually just 1 if it exists at all)."] + pub async fn asset_traps_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::subxt::KeyIter<'a, T, AssetTraps<'a>>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 89u8, 0u8, 237u8, 90u8, 95u8, 21u8, 165u8, 163u8, 148u8, 203u8, 155u8, + 222u8, 180u8, 219u8, 220u8, 114u8, 179u8, 228u8, 1u8, 220u8, 169u8, + 43u8, 38u8, 12u8, 88u8, 159u8, 181u8, 206u8, 221u8, 197u8, 35u8, 150u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Default version to encode XCM when latest version of destination is unknown. If `None`,"] + #[doc = " then the destinations whose XCM version is unknown are considered unreachable."] + pub async fn safe_xcm_version( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option<::core::primitive::u32>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 1u8, 223u8, 218u8, 204u8, 222u8, 129u8, 137u8, 237u8, 197u8, 142u8, + 233u8, 66u8, 229u8, 153u8, 138u8, 222u8, 113u8, 164u8, 135u8, 213u8, + 233u8, 34u8, 24u8, 23u8, 215u8, 59u8, 40u8, 188u8, 45u8, 244u8, 205u8, + 199u8, + ] + { + let entry = SafeXcmVersion; + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The Latest versions that we know various locations support."] + pub async fn supported_version( + &self, + _0: &::core::primitive::u32, + _1: &runtime_types::xcm::VersionedMultiLocation, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option<::core::primitive::u32>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 231u8, 202u8, 129u8, 82u8, 121u8, 63u8, 67u8, 57u8, 191u8, 190u8, 25u8, + 27u8, 219u8, 42u8, 180u8, 142u8, 71u8, 119u8, 212u8, 211u8, 21u8, 11u8, + 8u8, 7u8, 9u8, 243u8, 11u8, 117u8, 66u8, 47u8, 246u8, 85u8, + ] + { + let entry = SupportedVersion(_0, _1); + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The Latest versions that we know various locations support."] + pub async fn supported_version_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::subxt::KeyIter<'a, T, SupportedVersion<'a>>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 231u8, 202u8, 129u8, 82u8, 121u8, 63u8, 67u8, 57u8, 191u8, 190u8, 25u8, + 27u8, 219u8, 42u8, 180u8, 142u8, 71u8, 119u8, 212u8, 211u8, 21u8, 11u8, + 8u8, 7u8, 9u8, 243u8, 11u8, 117u8, 66u8, 47u8, 246u8, 85u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " All locations that we have requested version notifications from."] + pub async fn version_notifiers( + &self, + _0: &::core::primitive::u32, + _1: &runtime_types::xcm::VersionedMultiLocation, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option<::core::primitive::u64>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 126u8, 49u8, 13u8, 135u8, 137u8, 68u8, 248u8, 211u8, 160u8, 160u8, + 93u8, 128u8, 157u8, 230u8, 62u8, 119u8, 191u8, 51u8, 147u8, 149u8, + 60u8, 227u8, 154u8, 97u8, 244u8, 249u8, 0u8, 220u8, 189u8, 92u8, 178u8, + 149u8, + ] + { + let entry = VersionNotifiers(_0, _1); + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " All locations that we have requested version notifications from."] + pub async fn version_notifiers_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::subxt::KeyIter<'a, T, VersionNotifiers<'a>>, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 126u8, 49u8, 13u8, 135u8, 137u8, 68u8, 248u8, 211u8, 160u8, 160u8, + 93u8, 128u8, 157u8, 230u8, 62u8, 119u8, 191u8, 51u8, 147u8, 149u8, + 60u8, 227u8, 154u8, 97u8, 244u8, 249u8, 0u8, 220u8, 189u8, 92u8, 178u8, + 149u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The target locations that are subscribed to our version changes, as well as the most recent"] + #[doc = " of our versions we informed them of."] + pub async fn version_notify_targets( + &self, + _0: &::core::primitive::u32, + _1: &runtime_types::xcm::VersionedMultiLocation, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option<( + ::core::primitive::u64, + ::core::primitive::u64, + ::core::primitive::u32, + )>, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .storage_hash::()? + == [ + 251u8, 128u8, 243u8, 94u8, 162u8, 11u8, 206u8, 101u8, 33u8, 24u8, + 163u8, 157u8, 112u8, 50u8, 91u8, 155u8, 241u8, 73u8, 77u8, 185u8, + 231u8, 3u8, 220u8, 161u8, 36u8, 208u8, 116u8, 183u8, 80u8, 38u8, 56u8, + 104u8, + ] + { + let entry = VersionNotifyTargets(_0, _1); + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The target locations that are subscribed to our version changes, as well as the most recent"] + #[doc = " of our versions we informed them of."] + pub async fn version_notify_targets_iter( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::subxt::KeyIter<'a, T, VersionNotifyTargets<'a>>, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .storage_hash::()? + == [ + 251u8, 128u8, 243u8, 94u8, 162u8, 11u8, 206u8, 101u8, 33u8, 24u8, + 163u8, 157u8, 112u8, 50u8, 91u8, 155u8, 241u8, 73u8, 77u8, 185u8, + 231u8, 3u8, 220u8, 161u8, 36u8, 208u8, 116u8, 183u8, 80u8, 38u8, 56u8, + 104u8, + ] + { + self.client.storage().iter(block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " Destinations whose latest XCM version we would like to know. Duplicates not allowed, and"] + #[doc = " the `u32` counter is the number of times that a send to the destination has been attempted,"] + #[doc = " which is used as a prioritization."] + pub async fn version_discovery_queue( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + runtime_types::frame_support::storage::bounded_vec::BoundedVec<( + runtime_types::xcm::VersionedMultiLocation, + ::core::primitive::u32, + )>, + ::subxt::BasicError, + > { + if self + .client + .metadata() + .storage_hash::()? + == [ + 45u8, 28u8, 29u8, 233u8, 239u8, 65u8, 24u8, 214u8, 153u8, 189u8, 132u8, + 235u8, 62u8, 197u8, 252u8, 56u8, 38u8, 97u8, 13u8, 16u8, 149u8, 25u8, + 252u8, 181u8, 206u8, 54u8, 250u8, 133u8, 133u8, 74u8, 186u8, 22u8, + ] + { + let entry = VersionDiscoveryQueue; + self.client + .storage() + .fetch_or_default(&entry, block_hash) + .await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + #[doc = " The current migration's stage, if any."] + pub async fn current_migration( + &self, + block_hash: ::core::option::Option, + ) -> ::core::result::Result< + ::core::option::Option< + runtime_types::pallet_xcm::pallet::VersionMigrationStage, + >, + ::subxt::BasicError, + > { + if self.client.metadata().storage_hash::()? + == [ + 228u8, 254u8, 240u8, 20u8, 92u8, 79u8, 40u8, 65u8, 176u8, 111u8, 243u8, + 168u8, 238u8, 147u8, 247u8, 170u8, 185u8, 107u8, 58u8, 54u8, 224u8, + 222u8, 141u8, 113u8, 95u8, 92u8, 17u8, 69u8, 162u8, 242u8, 245u8, 95u8, + ] + { + let entry = CurrentMigration; + self.client.storage().fetch(&entry, block_hash).await + } else { + Err(::subxt::MetadataError::IncompatibleMetadata.into()) + } + } + } + } + } + pub mod runtime_types { + use super::runtime_types; + pub mod beefy_primitives { + use super::runtime_types; + pub mod crypto { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct Public(pub runtime_types::sp_core::ecdsa::Public); + } + pub mod mmr { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct BeefyNextAuthoritySet<_0> { + pub id: ::core::primitive::u64, + pub len: ::core::primitive::u32, + pub root: _0, + } + } + } + pub mod bitvec { + use super::runtime_types; + pub mod order { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct Lsb0; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct Msb0; + } + } + pub mod bp_header_chain { + use super::runtime_types; + pub mod justification { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct GrandpaJustification<_0> { + pub round: ::core::primitive::u64, + pub commit: runtime_types::finality_grandpa::Commit< + ::subxt::sp_core::H256, + ::core::primitive::u32, + runtime_types::sp_finality_grandpa::app::Signature, + runtime_types::sp_finality_grandpa::app::Public, + >, + pub votes_ancestries: ::std::vec::Vec<_0>, + } + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct AuthoritySet { + pub authorities: ::std::vec::Vec<( + runtime_types::sp_finality_grandpa::app::Public, + ::core::primitive::u64, + )>, + pub set_id: ::core::primitive::u64, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct InitializationData<_0> { + pub header: ::std::boxed::Box<_0>, + pub authority_list: ::std::vec::Vec<( + runtime_types::sp_finality_grandpa::app::Public, + ::core::primitive::u64, + )>, + pub set_id: ::core::primitive::u64, + pub is_halted: ::core::primitive::bool, + } + } + pub mod bp_message_dispatch { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum CallOrigin<_0, _1, _2> { + #[codec(index = 0)] + SourceRoot, + #[codec(index = 1)] + TargetAccount(_0, _1, _2), + #[codec(index = 2)] + SourceAccount(_0), + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct MessagePayload<_0, _1, _2, _3> { + pub spec_version: ::core::primitive::u32, + pub weight: ::core::primitive::u64, + pub origin: runtime_types::bp_message_dispatch::CallOrigin<_0, _1, _2>, + pub dispatch_fee_payment: runtime_types::bp_runtime::messages::DispatchFeePayment, + pub call: _3, + } + } + pub mod bp_messages { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct DeliveredMessages { + pub begin: ::core::primitive::u64, + pub end: ::core::primitive::u64, + pub dispatch_results: ::subxt::bitvec::vec::BitVec< + ::core::primitive::u8, + ::subxt::bitvec::order::Msb0, + >, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct InboundLaneData<_0> { + pub relayers: ::std::vec::Vec>, + pub last_confirmed_nonce: ::core::primitive::u64, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct MessageData<_0> { + pub payload: ::std::vec::Vec<::core::primitive::u8>, + pub fee: _0, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct MessageKey { + pub lane_id: [::core::primitive::u8; 4usize], + pub nonce: ::core::primitive::u64, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum OperatingMode { + #[codec(index = 0)] + Normal, + #[codec(index = 1)] + RejectingOutboundMessages, + #[codec(index = 2)] + Halted, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct OutboundLaneData { + pub oldest_unpruned_nonce: ::core::primitive::u64, + pub latest_received_nonce: ::core::primitive::u64, + pub latest_generated_nonce: ::core::primitive::u64, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct UnrewardedRelayer<_0> { + pub relayer: _0, + pub messages: runtime_types::bp_messages::DeliveredMessages, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct UnrewardedRelayersState { + pub unrewarded_relayer_entries: ::core::primitive::u64, + pub messages_in_oldest_entry: ::core::primitive::u64, + pub total_messages: ::core::primitive::u64, + } + } + pub mod bp_runtime { + use super::runtime_types; + pub mod messages { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum DispatchFeePayment { + #[codec(index = 0)] + AtSourceChain, + #[codec(index = 1)] + AtTargetChain, + } + } + } + pub mod bridge_runtime_common { + use super::runtime_types; + pub mod messages { + use super::runtime_types; + pub mod source { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct FromBridgedChainMessagesDeliveryProof<_0> { + pub bridged_header_hash: _0, + pub storage_proof: ::std::vec::Vec<::std::vec::Vec<::core::primitive::u8>>, + pub lane: [::core::primitive::u8; 4usize], + } + } + pub mod target { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct FromBridgedChainMessagesProof<_0> { + pub bridged_header_hash: _0, + pub storage_proof: ::std::vec::Vec<::std::vec::Vec<::core::primitive::u8>>, + pub lane: [::core::primitive::u8; 4usize], + pub nonces_start: ::core::primitive::u64, + pub nonces_end: ::core::primitive::u64, + } + } + } + } + pub mod finality_grandpa { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct Commit<_0, _1, _2, _3> { + pub target_hash: _0, + pub target_number: _1, + pub precommits: ::std::vec::Vec< + runtime_types::finality_grandpa::SignedPrecommit<_0, _1, _2, _3>, + >, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct Equivocation<_0, _1, _2> { + pub round_number: ::core::primitive::u64, + pub identity: _0, + pub first: (_1, _2), + pub second: (_1, _2), + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct Precommit<_0, _1> { + pub target_hash: _0, + pub target_number: _1, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct Prevote<_0, _1> { + pub target_hash: _0, + pub target_number: _1, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct SignedPrecommit<_0, _1, _2, _3> { + pub precommit: runtime_types::finality_grandpa::Precommit<_0, _1>, + pub signature: _2, + pub id: _3, + } + } + pub mod frame_support { + use super::runtime_types; + pub mod dispatch { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum RawOrigin<_0> { + #[codec(index = 0)] + Root, + #[codec(index = 1)] + Signed(_0), + #[codec(index = 2)] + None, + } + } + pub mod storage { + use super::runtime_types; + pub mod bounded_vec { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct BoundedVec<_0>(pub ::std::vec::Vec<_0>); + } + pub mod weak_bounded_vec { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct WeakBoundedVec<_0>(pub ::std::vec::Vec<_0>); + } + } + pub mod traits { + use super::runtime_types; + pub mod misc { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct WrapperKeepOpaque<_0>( + #[codec(compact)] pub ::core::primitive::u32, + pub _0, + ); + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct WrapperOpaque<_0>( + #[codec(compact)] pub ::core::primitive::u32, + pub _0, + ); + } + pub mod tokens { + use super::runtime_types; + pub mod misc { + use super::runtime_types; + #[derive( + :: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug, + )] + pub enum BalanceStatus { + #[codec(index = 0)] + Free, + #[codec(index = 1)] + Reserved, + } + } + } + } + pub mod weights { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum DispatchClass { + #[codec(index = 0)] + Normal, + #[codec(index = 1)] + Operational, + #[codec(index = 2)] + Mandatory, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct DispatchInfo { + pub weight: ::core::primitive::u64, + pub class: runtime_types::frame_support::weights::DispatchClass, + pub pays_fee: runtime_types::frame_support::weights::Pays, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Pays { + #[codec(index = 0)] + Yes, + #[codec(index = 1)] + No, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct PerDispatchClass<_0> { + pub normal: _0, + pub operational: _0, + pub mandatory: _0, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct RuntimeDbWeight { + pub read: ::core::primitive::u64, + pub write: ::core::primitive::u64, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct WeightToFeeCoefficient<_0> { + pub coeff_integer: _0, + pub coeff_frac: runtime_types::sp_arithmetic::per_things::Perbill, + pub negative: ::core::primitive::bool, + pub degree: ::core::primitive::u8, + } + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct PalletId(pub [::core::primitive::u8; 8usize]); + } + pub mod frame_system { + use super::runtime_types; + pub mod extensions { + use super::runtime_types; + pub mod check_genesis { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct CheckGenesis; + } + pub mod check_mortality { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct CheckMortality(pub runtime_types::sp_runtime::generic::era::Era); + } + pub mod check_non_zero_sender { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct CheckNonZeroSender; + } + pub mod check_nonce { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct CheckNonce(#[codec(compact)] pub ::core::primitive::u32); + } + pub mod check_spec_version { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct CheckSpecVersion; + } + pub mod check_tx_version { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct CheckTxVersion; + } + pub mod check_weight { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct CheckWeight; + } + } + pub mod limits { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct BlockLength { + pub max: runtime_types::frame_support::weights::PerDispatchClass< + ::core::primitive::u32, + >, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct BlockWeights { + pub base_block: ::core::primitive::u64, + pub max_block: ::core::primitive::u64, + pub per_class: runtime_types::frame_support::weights::PerDispatchClass< + runtime_types::frame_system::limits::WeightsPerClass, + >, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct WeightsPerClass { + pub base_extrinsic: ::core::primitive::u64, + pub max_extrinsic: ::core::option::Option<::core::primitive::u64>, + pub max_total: ::core::option::Option<::core::primitive::u64>, + pub reserved: ::core::option::Option<::core::primitive::u64>, + } + } + pub mod pallet { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Call { + #[codec(index = 0)] + #[doc = "A dispatch that will fill the block weight up to the given ratio."] + fill_block { + ratio: runtime_types::sp_arithmetic::per_things::Perbill, + }, + #[codec(index = 1)] + #[doc = "Make some on-chain remark."] + #[doc = ""] + #[doc = "# "] + #[doc = "- `O(1)`"] + #[doc = "# "] + remark { + remark: ::std::vec::Vec<::core::primitive::u8>, + }, + #[codec(index = 2)] + #[doc = "Set the number of pages in the WebAssembly environment's heap."] + set_heap_pages { pages: ::core::primitive::u64 }, + #[codec(index = 3)] + #[doc = "Set the new runtime code."] + #[doc = ""] + #[doc = "# "] + #[doc = "- `O(C + S)` where `C` length of `code` and `S` complexity of `can_set_code`"] + #[doc = "- 1 call to `can_set_code`: `O(S)` (calls `sp_io::misc::runtime_version` which is"] + #[doc = " expensive)."] + #[doc = "- 1 storage write (codec `O(C)`)."] + #[doc = "- 1 digest item."] + #[doc = "- 1 event."] + #[doc = "The weight of this function is dependent on the runtime, but generally this is very"] + #[doc = "expensive. We will treat this as a full block."] + #[doc = "# "] + set_code { + code: ::std::vec::Vec<::core::primitive::u8>, + }, + #[codec(index = 4)] + #[doc = "Set the new runtime code without doing any checks of the given `code`."] + #[doc = ""] + #[doc = "# "] + #[doc = "- `O(C)` where `C` length of `code`"] + #[doc = "- 1 storage write (codec `O(C)`)."] + #[doc = "- 1 digest item."] + #[doc = "- 1 event."] + #[doc = "The weight of this function is dependent on the runtime. We will treat this as a full"] + #[doc = "block. # "] + set_code_without_checks { + code: ::std::vec::Vec<::core::primitive::u8>, + }, + #[codec(index = 5)] + #[doc = "Set some items of storage."] + set_storage { + items: ::std::vec::Vec<( + ::std::vec::Vec<::core::primitive::u8>, + ::std::vec::Vec<::core::primitive::u8>, + )>, + }, + #[codec(index = 6)] + #[doc = "Kill some items from storage."] + kill_storage { + keys: ::std::vec::Vec<::std::vec::Vec<::core::primitive::u8>>, + }, + #[codec(index = 7)] + #[doc = "Kill all storage items with a key that starts with the given prefix."] + #[doc = ""] + #[doc = "**NOTE:** We rely on the Root origin to provide us the number of subkeys under"] + #[doc = "the prefix we are removing to accurately calculate the weight of this function."] + kill_prefix { + prefix: ::std::vec::Vec<::core::primitive::u8>, + subkeys: ::core::primitive::u32, + }, + #[codec(index = 8)] + #[doc = "Make some on-chain remark and emit event."] + remark_with_event { + remark: ::std::vec::Vec<::core::primitive::u8>, + }, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Error { + #[codec(index = 0)] + #[doc = "The name of specification does not match between the current runtime"] + #[doc = "and the new runtime."] + InvalidSpecName, + #[codec(index = 1)] + #[doc = "The specification version is not allowed to decrease between the current runtime"] + #[doc = "and the new runtime."] + SpecVersionNeedsToIncrease, + #[codec(index = 2)] + #[doc = "Failed to extract the runtime version from the new runtime."] + #[doc = ""] + #[doc = "Either calling `Core_version` or decoding `RuntimeVersion` failed."] + FailedToExtractRuntimeVersion, + #[codec(index = 3)] + #[doc = "Suicide called when the account has non-default composite data."] + NonDefaultComposite, + #[codec(index = 4)] + #[doc = "There is a non-zero reference count preventing the account from being purged."] + NonZeroRefCount, + #[codec(index = 5)] + #[doc = "The origin filter prevent the call to be dispatched."] + CallFiltered, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Event { + #[codec(index = 0)] + #[doc = "An extrinsic completed successfully."] + ExtrinsicSuccess { + dispatch_info: runtime_types::frame_support::weights::DispatchInfo, + }, + #[codec(index = 1)] + #[doc = "An extrinsic failed."] + ExtrinsicFailed { + dispatch_error: runtime_types::sp_runtime::DispatchError, + dispatch_info: runtime_types::frame_support::weights::DispatchInfo, + }, + #[codec(index = 2)] + #[doc = "`:code` was updated."] + CodeUpdated, + #[codec(index = 3)] + #[doc = "A new account was created."] + NewAccount { + account: ::subxt::sp_core::crypto::AccountId32, + }, + #[codec(index = 4)] + #[doc = "An account was reaped."] + KilledAccount { + account: ::subxt::sp_core::crypto::AccountId32, + }, + #[codec(index = 5)] + #[doc = "On on-chain remark happened."] + Remarked { + sender: ::subxt::sp_core::crypto::AccountId32, + hash: ::subxt::sp_core::H256, + }, + } + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct AccountInfo<_0, _1> { + pub nonce: _0, + pub consumers: _0, + pub providers: _0, + pub sufficients: _0, + pub data: _1, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct EventRecord<_0, _1> { + pub phase: runtime_types::frame_system::Phase, + pub event: _0, + pub topics: ::std::vec::Vec<_1>, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct LastRuntimeUpgradeInfo { + #[codec(compact)] + pub spec_version: ::core::primitive::u32, + pub spec_name: ::std::string::String, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Phase { + #[codec(index = 0)] + ApplyExtrinsic(::core::primitive::u32), + #[codec(index = 1)] + Finalization, + #[codec(index = 2)] + Initialization, + } + } + pub mod pallet_authorship { + use super::runtime_types; + pub mod pallet { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Call { + #[codec(index = 0)] + #[doc = "Provide a set of uncles."] + set_uncles { + new_uncles: ::std::vec::Vec< + runtime_types::sp_runtime::generic::header::Header< + ::core::primitive::u32, + runtime_types::sp_runtime::traits::BlakeTwo256, + >, + >, + }, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Error { + #[codec(index = 0)] + #[doc = "The uncle parent not in the chain."] + InvalidUncleParent, + #[codec(index = 1)] + #[doc = "Uncles already set in the block."] + UnclesAlreadySet, + #[codec(index = 2)] + #[doc = "Too many uncles."] + TooManyUncles, + #[codec(index = 3)] + #[doc = "The uncle is genesis."] + GenesisUncle, + #[codec(index = 4)] + #[doc = "The uncle is too high in chain."] + TooHighUncle, + #[codec(index = 5)] + #[doc = "The uncle is already included."] + UncleAlreadyIncluded, + #[codec(index = 6)] + #[doc = "The uncle isn't recent enough to be included."] + OldUncle, + } + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum UncleEntryItem<_0, _1, _2> { + #[codec(index = 0)] + InclusionHeight(_0), + #[codec(index = 1)] + Uncle(_1, ::core::option::Option<_2>), + } + } + pub mod pallet_babe { + use super::runtime_types; + pub mod pallet { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Call { + #[codec(index = 0)] + #[doc = "Report authority equivocation/misbehavior. This method will verify"] + #[doc = "the equivocation proof and validate the given key ownership proof"] + #[doc = "against the extracted offender. If both are valid, the offence will"] + #[doc = "be reported."] + report_equivocation { + equivocation_proof: ::std::boxed::Box< + runtime_types::sp_consensus_slots::EquivocationProof< + runtime_types::sp_runtime::generic::header::Header< + ::core::primitive::u32, + runtime_types::sp_runtime::traits::BlakeTwo256, + >, + runtime_types::sp_consensus_babe::app::Public, + >, + >, + key_owner_proof: runtime_types::sp_session::MembershipProof, + }, + #[codec(index = 1)] + #[doc = "Report authority equivocation/misbehavior. This method will verify"] + #[doc = "the equivocation proof and validate the given key ownership proof"] + #[doc = "against the extracted offender. If both are valid, the offence will"] + #[doc = "be reported."] + #[doc = "This extrinsic must be called unsigned and it is expected that only"] + #[doc = "block authors will call it (validated in `ValidateUnsigned`), as such"] + #[doc = "if the block author is defined it will be defined as the equivocation"] + #[doc = "reporter."] + report_equivocation_unsigned { + equivocation_proof: ::std::boxed::Box< + runtime_types::sp_consensus_slots::EquivocationProof< + runtime_types::sp_runtime::generic::header::Header< + ::core::primitive::u32, + runtime_types::sp_runtime::traits::BlakeTwo256, + >, + runtime_types::sp_consensus_babe::app::Public, + >, + >, + key_owner_proof: runtime_types::sp_session::MembershipProof, + }, + #[codec(index = 2)] + #[doc = "Plan an epoch config change. The epoch config change is recorded and will be enacted on"] + #[doc = "the next call to `enact_epoch_change`. The config will be activated one epoch after."] + #[doc = "Multiple calls to this method will replace any existing planned config change that had"] + #[doc = "not been enacted yet."] + plan_config_change { + config: runtime_types::sp_consensus_babe::digests::NextConfigDescriptor, + }, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Error { + #[codec(index = 0)] + #[doc = "An equivocation proof provided as part of an equivocation report is invalid."] + InvalidEquivocationProof, + #[codec(index = 1)] + #[doc = "A key ownership proof provided as part of an equivocation report is invalid."] + InvalidKeyOwnershipProof, + #[codec(index = 2)] + #[doc = "A given equivocation report is valid but already previously reported."] + DuplicateOffenceReport, + #[codec(index = 3)] + #[doc = "Submitted configuration is invalid."] + InvalidConfiguration, + } + } + } + pub mod pallet_balances { + use super::runtime_types; + pub mod pallet { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Call { + #[codec(index = 0)] + #[doc = "Transfer some liquid free balance to another account."] + #[doc = ""] + #[doc = "`transfer` will set the `FreeBalance` of the sender and receiver."] + #[doc = "If the sender's account is below the existential deposit as a result"] + #[doc = "of the transfer, the account will be reaped."] + #[doc = ""] + #[doc = "The dispatch origin for this call must be `Signed` by the transactor."] + #[doc = ""] + #[doc = "# "] + #[doc = "- Dependent on arguments but not critical, given proper implementations for input config"] + #[doc = " types. See related functions below."] + #[doc = "- It contains a limited number of reads and writes internally and no complex"] + #[doc = " computation."] + #[doc = ""] + #[doc = "Related functions:"] + #[doc = ""] + #[doc = " - `ensure_can_withdraw` is always called internally but has a bounded complexity."] + #[doc = " - Transferring balances to accounts that did not exist before will cause"] + #[doc = " `T::OnNewAccount::on_new_account` to be called."] + #[doc = " - Removing enough funds from an account will trigger `T::DustRemoval::on_unbalanced`."] + #[doc = " - `transfer_keep_alive` works the same way as `transfer`, but has an additional check"] + #[doc = " that the transfer will not kill the origin account."] + #[doc = "---------------------------------"] + #[doc = "- Origin account is already in memory, so no DB operations for them."] + #[doc = "# "] + transfer { + dest: ::subxt::sp_runtime::MultiAddress< + ::subxt::sp_core::crypto::AccountId32, + (), + >, + #[codec(compact)] + value: ::core::primitive::u128, + }, + #[codec(index = 1)] + #[doc = "Set the balances of a given account."] + #[doc = ""] + #[doc = "This will alter `FreeBalance` and `ReservedBalance` in storage. it will"] + #[doc = "also alter the total issuance of the system (`TotalIssuance`) appropriately."] + #[doc = "If the new free or reserved balance is below the existential deposit,"] + #[doc = "it will reset the account nonce (`frame_system::AccountNonce`)."] + #[doc = ""] + #[doc = "The dispatch origin for this call is `root`."] + set_balance { + who: ::subxt::sp_runtime::MultiAddress< + ::subxt::sp_core::crypto::AccountId32, + (), + >, + #[codec(compact)] + new_free: ::core::primitive::u128, + #[codec(compact)] + new_reserved: ::core::primitive::u128, + }, + #[codec(index = 2)] + #[doc = "Exactly as `transfer`, except the origin must be root and the source account may be"] + #[doc = "specified."] + #[doc = "# "] + #[doc = "- Same as transfer, but additional read and write because the source account is not"] + #[doc = " assumed to be in the overlay."] + #[doc = "# "] + force_transfer { + source: ::subxt::sp_runtime::MultiAddress< + ::subxt::sp_core::crypto::AccountId32, + (), + >, + dest: ::subxt::sp_runtime::MultiAddress< + ::subxt::sp_core::crypto::AccountId32, + (), + >, + #[codec(compact)] + value: ::core::primitive::u128, + }, + #[codec(index = 3)] + #[doc = "Same as the [`transfer`] call, but with a check that the transfer will not kill the"] + #[doc = "origin account."] + #[doc = ""] + #[doc = "99% of the time you want [`transfer`] instead."] + #[doc = ""] + #[doc = "[`transfer`]: struct.Pallet.html#method.transfer"] + transfer_keep_alive { + dest: ::subxt::sp_runtime::MultiAddress< + ::subxt::sp_core::crypto::AccountId32, + (), + >, + #[codec(compact)] + value: ::core::primitive::u128, + }, + #[codec(index = 4)] + #[doc = "Transfer the entire transferable balance from the caller account."] + #[doc = ""] + #[doc = "NOTE: This function only attempts to transfer _transferable_ balances. This means that"] + #[doc = "any locked, reserved, or existential deposits (when `keep_alive` is `true`), will not be"] + #[doc = "transferred by this function. To ensure that this function results in a killed account,"] + #[doc = "you might need to prepare the account by removing any reference counters, storage"] + #[doc = "deposits, etc..."] + #[doc = ""] + #[doc = "The dispatch origin of this call must be Signed."] + #[doc = ""] + #[doc = "- `dest`: The recipient of the transfer."] + #[doc = "- `keep_alive`: A boolean to determine if the `transfer_all` operation should send all"] + #[doc = " of the funds the account has, causing the sender account to be killed (false), or"] + #[doc = " transfer everything except at least the existential deposit, which will guarantee to"] + #[doc = " keep the sender account alive (true). # "] + #[doc = "- O(1). Just like transfer, but reading the user's transferable balance first."] + #[doc = " #"] + transfer_all { + dest: ::subxt::sp_runtime::MultiAddress< + ::subxt::sp_core::crypto::AccountId32, + (), + >, + keep_alive: ::core::primitive::bool, + }, + #[codec(index = 5)] + #[doc = "Unreserve some balance from a user by force."] + #[doc = ""] + #[doc = "Can only be called by ROOT."] + force_unreserve { + who: ::subxt::sp_runtime::MultiAddress< + ::subxt::sp_core::crypto::AccountId32, + (), + >, + amount: ::core::primitive::u128, + }, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Error { + #[codec(index = 0)] + #[doc = "Vesting balance too high to send value"] + VestingBalance, + #[codec(index = 1)] + #[doc = "Account liquidity restrictions prevent withdrawal"] + LiquidityRestrictions, + #[codec(index = 2)] + #[doc = "Balance too low to send value"] + InsufficientBalance, + #[codec(index = 3)] + #[doc = "Value too low to create account due to existential deposit"] + ExistentialDeposit, + #[codec(index = 4)] + #[doc = "Transfer/payment would kill account"] + KeepAlive, + #[codec(index = 5)] + #[doc = "A vesting schedule already exists for this account"] + ExistingVestingSchedule, + #[codec(index = 6)] + #[doc = "Beneficiary account must pre-exist"] + DeadAccount, + #[codec(index = 7)] + #[doc = "Number of named reserves exceed MaxReserves"] + TooManyReserves, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Event { + #[codec(index = 0)] + #[doc = "An account was created with some free balance."] + Endowed { + account: ::subxt::sp_core::crypto::AccountId32, + free_balance: ::core::primitive::u128, + }, + #[codec(index = 1)] + #[doc = "An account was removed whose balance was non-zero but below ExistentialDeposit,"] + #[doc = "resulting in an outright loss."] + DustLost { + account: ::subxt::sp_core::crypto::AccountId32, + amount: ::core::primitive::u128, + }, + #[codec(index = 2)] + #[doc = "Transfer succeeded."] + Transfer { + from: ::subxt::sp_core::crypto::AccountId32, + to: ::subxt::sp_core::crypto::AccountId32, + amount: ::core::primitive::u128, + }, + #[codec(index = 3)] + #[doc = "A balance was set by root."] + BalanceSet { + who: ::subxt::sp_core::crypto::AccountId32, + free: ::core::primitive::u128, + reserved: ::core::primitive::u128, + }, + #[codec(index = 4)] + #[doc = "Some balance was reserved (moved from free to reserved)."] + Reserved { + who: ::subxt::sp_core::crypto::AccountId32, + amount: ::core::primitive::u128, + }, + #[codec(index = 5)] + #[doc = "Some balance was unreserved (moved from reserved to free)."] + Unreserved { + who: ::subxt::sp_core::crypto::AccountId32, + amount: ::core::primitive::u128, + }, + #[codec(index = 6)] + #[doc = "Some balance was moved from the reserve of the first account to the second account."] + #[doc = "Final argument indicates the destination balance type."] + ReserveRepatriated { + from: ::subxt::sp_core::crypto::AccountId32, + to: ::subxt::sp_core::crypto::AccountId32, + amount: ::core::primitive::u128, + destination_status: + runtime_types::frame_support::traits::tokens::misc::BalanceStatus, + }, + #[codec(index = 7)] + #[doc = "Some amount was deposited (e.g. for transaction fees)."] + Deposit { + who: ::subxt::sp_core::crypto::AccountId32, + amount: ::core::primitive::u128, + }, + #[codec(index = 8)] + #[doc = "Some amount was withdrawn from the account (e.g. for transaction fees)."] + Withdraw { + who: ::subxt::sp_core::crypto::AccountId32, + amount: ::core::primitive::u128, + }, + #[codec(index = 9)] + #[doc = "Some amount was removed from the account (e.g. for misbehavior)."] + Slashed { + who: ::subxt::sp_core::crypto::AccountId32, + amount: ::core::primitive::u128, + }, + } + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct AccountData<_0> { + pub free: _0, + pub reserved: _0, + pub misc_frozen: _0, + pub fee_frozen: _0, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct BalanceLock<_0> { + pub id: [::core::primitive::u8; 8usize], + pub amount: _0, + pub reasons: runtime_types::pallet_balances::Reasons, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Reasons { + #[codec(index = 0)] + Fee, + #[codec(index = 1)] + Misc, + #[codec(index = 2)] + All, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Releases { + #[codec(index = 0)] + V1_0_0, + #[codec(index = 1)] + V2_0_0, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct ReserveData<_0, _1> { + pub id: _0, + pub amount: _1, + } + } + pub mod pallet_beefy { + use super::runtime_types; + pub mod pallet { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Call {} + } + } + pub mod pallet_bridge_dispatch { + use super::runtime_types; + pub mod pallet { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Event { + #[codec(index = 0)] + #[doc = "Message has been rejected before reaching dispatch."] + MessageRejected( + [::core::primitive::u8; 4usize], + ([::core::primitive::u8; 4usize], ::core::primitive::u64), + ), + #[codec(index = 1)] + #[doc = "Message has been rejected by dispatcher because of spec version mismatch."] + #[doc = "Last two arguments are: expected and passed spec version."] + MessageVersionSpecMismatch( + [::core::primitive::u8; 4usize], + ([::core::primitive::u8; 4usize], ::core::primitive::u64), + ::core::primitive::u32, + ::core::primitive::u32, + ), + #[codec(index = 2)] + #[doc = "Message has been rejected by dispatcher because of weight mismatch."] + #[doc = "Last two arguments are: expected and passed call weight."] + MessageWeightMismatch( + [::core::primitive::u8; 4usize], + ([::core::primitive::u8; 4usize], ::core::primitive::u64), + ::core::primitive::u64, + ::core::primitive::u64, + ), + #[codec(index = 3)] + #[doc = "Message signature mismatch."] + MessageSignatureMismatch( + [::core::primitive::u8; 4usize], + ([::core::primitive::u8; 4usize], ::core::primitive::u64), + ), + #[codec(index = 4)] + #[doc = "We have failed to decode Call from the message."] + MessageCallDecodeFailed( + [::core::primitive::u8; 4usize], + ([::core::primitive::u8; 4usize], ::core::primitive::u64), + ), + #[codec(index = 5)] + #[doc = "The call from the message has been rejected by the call filter."] + MessageCallRejected( + [::core::primitive::u8; 4usize], + ([::core::primitive::u8; 4usize], ::core::primitive::u64), + ), + #[codec(index = 6)] + #[doc = "The origin account has failed to pay fee for dispatching the message."] + MessageDispatchPaymentFailed( + [::core::primitive::u8; 4usize], + ([::core::primitive::u8; 4usize], ::core::primitive::u64), + ::subxt::sp_core::crypto::AccountId32, + ::core::primitive::u64, + ), + #[codec(index = 7)] + #[doc = "Message has been dispatched with given result."] + MessageDispatched( + [::core::primitive::u8; 4usize], + ([::core::primitive::u8; 4usize], ::core::primitive::u64), + ::core::result::Result<(), runtime_types::sp_runtime::DispatchError>, + ), + #[codec(index = 8)] + #[doc = "Phantom member, never used. Needed to handle multiple pallet instances."] + _Dummy, + } + } + } + pub mod pallet_bridge_grandpa { + use super::runtime_types; + pub mod pallet { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Call { + #[codec(index = 0)] + #[doc = "Verify a target header is finalized according to the given finality proof."] + #[doc = ""] + #[doc = "It will use the underlying storage pallet to fetch information about the current"] + #[doc = "authorities and best finalized header in order to verify that the header is finalized."] + #[doc = ""] + #[doc = "If successful in verification, it will write the target header to the underlying storage"] + #[doc = "pallet."] + submit_finality_proof { + finality_target: ::std::boxed::Box< + runtime_types::sp_runtime::generic::header::Header< + ::core::primitive::u32, + runtime_types::sp_runtime::traits::BlakeTwo256, + >, + >, + justification: + runtime_types::bp_header_chain::justification::GrandpaJustification< + runtime_types::sp_runtime::generic::header::Header< + ::core::primitive::u32, + runtime_types::sp_runtime::traits::BlakeTwo256, + >, + >, + }, + #[codec(index = 1)] + #[doc = "Bootstrap the bridge pallet with an initial header and authority set from which to sync."] + #[doc = ""] + #[doc = "The initial configuration provided does not need to be the genesis header of the bridged"] + #[doc = "chain, it can be any arbitrary header. You can also provide the next scheduled set"] + #[doc = "change if it is already know."] + #[doc = ""] + #[doc = "This function is only allowed to be called from a trusted origin and writes to storage"] + #[doc = "with practically no checks in terms of the validity of the data. It is important that"] + #[doc = "you ensure that valid data is being passed in."] + initialize { + init_data: runtime_types::bp_header_chain::InitializationData< + runtime_types::sp_runtime::generic::header::Header< + ::core::primitive::u32, + runtime_types::sp_runtime::traits::BlakeTwo256, + >, + >, + }, + #[codec(index = 2)] + #[doc = "Change `PalletOwner`."] + #[doc = ""] + #[doc = "May only be called either by root, or by `PalletOwner`."] + set_owner { + new_owner: ::core::option::Option<::subxt::sp_core::crypto::AccountId32>, + }, + #[codec(index = 3)] + #[doc = "Halt or resume all pallet operations."] + #[doc = ""] + #[doc = "May only be called either by root, or by `PalletOwner`."] + set_operational { + operational: ::core::primitive::bool, + }, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Error { + #[codec(index = 0)] + #[doc = "The given justification is invalid for the given header."] + InvalidJustification, + #[codec(index = 1)] + #[doc = "The authority set from the underlying header chain is invalid."] + InvalidAuthoritySet, + #[codec(index = 2)] + #[doc = "There are too many requests for the current window to handle."] + TooManyRequests, + #[codec(index = 3)] + #[doc = "The header being imported is older than the best finalized header known to the pallet."] + OldHeader, + #[codec(index = 4)] + #[doc = "The header is unknown to the pallet."] + UnknownHeader, + #[codec(index = 5)] + #[doc = "The scheduled authority set change found in the header is unsupported by the pallet."] + #[doc = ""] + #[doc = "This is the case for non-standard (e.g forced) authority set changes."] + UnsupportedScheduledChange, + #[codec(index = 6)] + #[doc = "The pallet is not yet initialized."] + NotInitialized, + #[codec(index = 7)] + #[doc = "The pallet has already been initialized."] + AlreadyInitialized, + #[codec(index = 8)] + #[doc = "All pallet operations are halted."] + Halted, + #[codec(index = 9)] + #[doc = "The storage proof doesn't contains storage root. So it is invalid for given header."] + StorageRootMismatch, + } + } + } + pub mod pallet_bridge_messages { + use super::runtime_types; + pub mod pallet { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Call { + # [codec (index = 0)] # [doc = "Change `PalletOwner`."] # [doc = ""] # [doc = "May only be called either by root, or by `PalletOwner`."] set_owner { new_owner : :: core :: option :: Option < :: subxt :: sp_core :: crypto :: AccountId32 > , } , # [codec (index = 1)] # [doc = "Halt or resume all/some pallet operations."] # [doc = ""] # [doc = "May only be called either by root, or by `PalletOwner`."] set_operating_mode { operating_mode : runtime_types :: bp_messages :: OperatingMode , } , # [codec (index = 2)] # [doc = "Update pallet parameter."] # [doc = ""] # [doc = "May only be called either by root, or by `PalletOwner`."] # [doc = ""] # [doc = "The weight is: single read for permissions check + 2 writes for parameter value and"] # [doc = "event."] update_pallet_parameter { parameter : () , } , # [codec (index = 3)] # [doc = "Send message over lane."] send_message { lane_id : [:: core :: primitive :: u8 ; 4usize] , payload : runtime_types :: bp_message_dispatch :: MessagePayload < :: subxt :: sp_core :: crypto :: AccountId32 , runtime_types :: sp_runtime :: MultiSigner , runtime_types :: sp_runtime :: MultiSignature , :: std :: vec :: Vec < :: core :: primitive :: u8 > > , delivery_and_dispatch_fee : :: core :: primitive :: u128 , } , # [codec (index = 4)] # [doc = "Pay additional fee for the message."] increase_message_fee { lane_id : [:: core :: primitive :: u8 ; 4usize] , nonce : :: core :: primitive :: u64 , additional_fee : :: core :: primitive :: u128 , } , # [codec (index = 5)] # [doc = "Receive messages proof from bridged chain."] # [doc = ""] # [doc = "The weight of the call assumes that the transaction always brings outbound lane"] # [doc = "state update. Because of that, the submitter (relayer) has no benefit of not including"] # [doc = "this data in the transaction, so reward confirmations lags should be minimal."] receive_messages_proof { relayer_id_at_bridged_chain : :: subxt :: sp_core :: crypto :: AccountId32 , proof : runtime_types :: bridge_runtime_common :: messages :: target :: FromBridgedChainMessagesProof < :: subxt :: sp_core :: H256 > , messages_count : :: core :: primitive :: u32 , dispatch_weight : :: core :: primitive :: u64 , } , # [codec (index = 6)] # [doc = "Receive messages delivery proof from bridged chain."] receive_messages_delivery_proof { proof : runtime_types :: bridge_runtime_common :: messages :: source :: FromBridgedChainMessagesDeliveryProof < :: subxt :: sp_core :: H256 > , relayers_state : runtime_types :: bp_messages :: UnrewardedRelayersState , } , } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Error { + #[codec(index = 0)] + #[doc = "All pallet operations are halted."] + Halted, + #[codec(index = 1)] + #[doc = "Message has been treated as invalid by chain verifier."] + MessageRejectedByChainVerifier, + #[codec(index = 2)] + #[doc = "Message has been treated as invalid by lane verifier."] + MessageRejectedByLaneVerifier, + #[codec(index = 3)] + #[doc = "Submitter has failed to pay fee for delivering and dispatching messages."] + FailedToWithdrawMessageFee, + #[codec(index = 4)] + #[doc = "The transaction brings too many messages."] + TooManyMessagesInTheProof, + #[codec(index = 5)] + #[doc = "Invalid messages has been submitted."] + InvalidMessagesProof, + #[codec(index = 6)] + #[doc = "Invalid messages delivery proof has been submitted."] + InvalidMessagesDeliveryProof, + #[codec(index = 7)] + #[doc = "The bridged chain has invalid `UnrewardedRelayers` in its storage (fatal for the lane)."] + InvalidUnrewardedRelayers, + #[codec(index = 8)] + #[doc = "The relayer has declared invalid unrewarded relayers state in the"] + #[doc = "`receive_messages_delivery_proof` call."] + InvalidUnrewardedRelayersState, + #[codec(index = 9)] + #[doc = "The message someone is trying to work with (i.e. increase fee) is already-delivered."] + MessageIsAlreadyDelivered, + #[codec(index = 10)] + #[doc = "The message someone is trying to work with (i.e. increase fee) is not yet sent."] + MessageIsNotYetSent, + #[codec(index = 11)] + #[doc = "The number of actually confirmed messages is going to be larger than the number of"] + #[doc = "messages in the proof. This may mean that this or bridged chain storage is corrupted."] + TryingToConfirmMoreMessagesThanExpected, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Event { + #[codec(index = 0)] + #[doc = "Pallet parameter has been updated."] + ParameterUpdated(()), + #[codec(index = 1)] + #[doc = "Message has been accepted and is waiting to be delivered."] + MessageAccepted([::core::primitive::u8; 4usize], ::core::primitive::u64), + #[codec(index = 2)] + #[doc = "Messages in the inclusive range have been delivered to the bridged chain."] + MessagesDelivered( + [::core::primitive::u8; 4usize], + runtime_types::bp_messages::DeliveredMessages, + ), + } + } + } + pub mod pallet_collective { + use super::runtime_types; + pub mod pallet { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Call { + #[codec(index = 0)] + #[doc = "Set the collective's membership."] + #[doc = ""] + #[doc = "- `new_members`: The new member list. Be nice to the chain and provide it sorted."] + #[doc = "- `prime`: The prime member whose vote sets the default."] + #[doc = "- `old_count`: The upper bound for the previous number of members in storage. Used for"] + #[doc = " weight estimation."] + #[doc = ""] + #[doc = "Requires root origin."] + #[doc = ""] + #[doc = "NOTE: Does not enforce the expected `MaxMembers` limit on the amount of members, but"] + #[doc = " the weight estimations rely on it to estimate dispatchable weight."] + #[doc = ""] + #[doc = "# WARNING:"] + #[doc = ""] + #[doc = "The `pallet-collective` can also be managed by logic outside of the pallet through the"] + #[doc = "implementation of the trait [`ChangeMembers`]."] + #[doc = "Any call to `set_members` must be careful that the member set doesn't get out of sync"] + #[doc = "with other logic managing the member set."] + #[doc = ""] + #[doc = "# "] + #[doc = "## Weight"] + #[doc = "- `O(MP + N)` where:"] + #[doc = " - `M` old-members-count (code- and governance-bounded)"] + #[doc = " - `N` new-members-count (code- and governance-bounded)"] + #[doc = " - `P` proposals-count (code-bounded)"] + #[doc = "- DB:"] + #[doc = " - 1 storage mutation (codec `O(M)` read, `O(N)` write) for reading and writing the"] + #[doc = " members"] + #[doc = " - 1 storage read (codec `O(P)`) for reading the proposals"] + #[doc = " - `P` storage mutations (codec `O(M)`) for updating the votes for each proposal"] + #[doc = " - 1 storage write (codec `O(1)`) for deleting the old `prime` and setting the new one"] + #[doc = "# "] + set_members { + new_members: ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>, + prime: ::core::option::Option<::subxt::sp_core::crypto::AccountId32>, + old_count: ::core::primitive::u32, + }, + #[codec(index = 1)] + #[doc = "Dispatch a proposal from a member using the `Member` origin."] + #[doc = ""] + #[doc = "Origin must be a member of the collective."] + #[doc = ""] + #[doc = "# "] + #[doc = "## Weight"] + #[doc = "- `O(M + P)` where `M` members-count (code-bounded) and `P` complexity of dispatching"] + #[doc = " `proposal`"] + #[doc = "- DB: 1 read (codec `O(M)`) + DB access of `proposal`"] + #[doc = "- 1 event"] + #[doc = "# "] + execute { + proposal: ::std::boxed::Box, + #[codec(compact)] + length_bound: ::core::primitive::u32, + }, + #[codec(index = 2)] + #[doc = "Add a new proposal to either be voted on or executed directly."] + #[doc = ""] + #[doc = "Requires the sender to be member."] + #[doc = ""] + #[doc = "`threshold` determines whether `proposal` is executed directly (`threshold < 2`)"] + #[doc = "or put up for voting."] + #[doc = ""] + #[doc = "# "] + #[doc = "## Weight"] + #[doc = "- `O(B + M + P1)` or `O(B + M + P2)` where:"] + #[doc = " - `B` is `proposal` size in bytes (length-fee-bounded)"] + #[doc = " - `M` is members-count (code- and governance-bounded)"] + #[doc = " - branching is influenced by `threshold` where:"] + #[doc = " - `P1` is proposal execution complexity (`threshold < 2`)"] + #[doc = " - `P2` is proposals-count (code-bounded) (`threshold >= 2`)"] + #[doc = "- DB:"] + #[doc = " - 1 storage read `is_member` (codec `O(M)`)"] + #[doc = " - 1 storage read `ProposalOf::contains_key` (codec `O(1)`)"] + #[doc = " - DB accesses influenced by `threshold`:"] + #[doc = " - EITHER storage accesses done by `proposal` (`threshold < 2`)"] + #[doc = " - OR proposal insertion (`threshold <= 2`)"] + #[doc = " - 1 storage mutation `Proposals` (codec `O(P2)`)"] + #[doc = " - 1 storage mutation `ProposalCount` (codec `O(1)`)"] + #[doc = " - 1 storage write `ProposalOf` (codec `O(B)`)"] + #[doc = " - 1 storage write `Voting` (codec `O(M)`)"] + #[doc = " - 1 event"] + #[doc = "# "] + propose { + #[codec(compact)] + threshold: ::core::primitive::u32, + proposal: ::std::boxed::Box, + #[codec(compact)] + length_bound: ::core::primitive::u32, + }, + #[codec(index = 3)] + #[doc = "Add an aye or nay vote for the sender to the given proposal."] + #[doc = ""] + #[doc = "Requires the sender to be a member."] + #[doc = ""] + #[doc = "Transaction fees will be waived if the member is voting on any particular proposal"] + #[doc = "for the first time and the call is successful. Subsequent vote changes will charge a"] + #[doc = "fee."] + #[doc = "# "] + #[doc = "## Weight"] + #[doc = "- `O(M)` where `M` is members-count (code- and governance-bounded)"] + #[doc = "- DB:"] + #[doc = " - 1 storage read `Members` (codec `O(M)`)"] + #[doc = " - 1 storage mutation `Voting` (codec `O(M)`)"] + #[doc = "- 1 event"] + #[doc = "# "] + vote { + proposal: ::subxt::sp_core::H256, + #[codec(compact)] + index: ::core::primitive::u32, + approve: ::core::primitive::bool, + }, + #[codec(index = 4)] + #[doc = "Close a vote that is either approved, disapproved or whose voting period has ended."] + #[doc = ""] + #[doc = "May be called by any signed account in order to finish voting and close the proposal."] + #[doc = ""] + #[doc = "If called before the end of the voting period it will only close the vote if it is"] + #[doc = "has enough votes to be approved or disapproved."] + #[doc = ""] + #[doc = "If called after the end of the voting period abstentions are counted as rejections"] + #[doc = "unless there is a prime member set and the prime member cast an approval."] + #[doc = ""] + #[doc = "If the close operation completes successfully with disapproval, the transaction fee will"] + #[doc = "be waived. Otherwise execution of the approved operation will be charged to the caller."] + #[doc = ""] + #[doc = "+ `proposal_weight_bound`: The maximum amount of weight consumed by executing the closed"] + #[doc = "proposal."] + #[doc = "+ `length_bound`: The upper bound for the length of the proposal in storage. Checked via"] + #[doc = "`storage::read` so it is `size_of::() == 4` larger than the pure length."] + #[doc = ""] + #[doc = "# "] + #[doc = "## Weight"] + #[doc = "- `O(B + M + P1 + P2)` where:"] + #[doc = " - `B` is `proposal` size in bytes (length-fee-bounded)"] + #[doc = " - `M` is members-count (code- and governance-bounded)"] + #[doc = " - `P1` is the complexity of `proposal` preimage."] + #[doc = " - `P2` is proposal-count (code-bounded)"] + #[doc = "- DB:"] + #[doc = " - 2 storage reads (`Members`: codec `O(M)`, `Prime`: codec `O(1)`)"] + #[doc = " - 3 mutations (`Voting`: codec `O(M)`, `ProposalOf`: codec `O(B)`, `Proposals`: codec"] + #[doc = " `O(P2)`)"] + #[doc = " - any mutations done while executing `proposal` (`P1`)"] + #[doc = "- up to 3 events"] + #[doc = "# "] + close { + proposal_hash: ::subxt::sp_core::H256, + #[codec(compact)] + index: ::core::primitive::u32, + #[codec(compact)] + proposal_weight_bound: ::core::primitive::u64, + #[codec(compact)] + length_bound: ::core::primitive::u32, + }, + #[codec(index = 5)] + #[doc = "Disapprove a proposal, close, and remove it from the system, regardless of its current"] + #[doc = "state."] + #[doc = ""] + #[doc = "Must be called by the Root origin."] + #[doc = ""] + #[doc = "Parameters:"] + #[doc = "* `proposal_hash`: The hash of the proposal that should be disapproved."] + #[doc = ""] + #[doc = "# "] + #[doc = "Complexity: O(P) where P is the number of max proposals"] + #[doc = "DB Weight:"] + #[doc = "* Reads: Proposals"] + #[doc = "* Writes: Voting, Proposals, ProposalOf"] + #[doc = "# "] + disapprove_proposal { + proposal_hash: ::subxt::sp_core::H256, + }, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Error { + #[codec(index = 0)] + #[doc = "Account is not a member"] + NotMember, + #[codec(index = 1)] + #[doc = "Duplicate proposals not allowed"] + DuplicateProposal, + #[codec(index = 2)] + #[doc = "Proposal must exist"] + ProposalMissing, + #[codec(index = 3)] + #[doc = "Mismatched index"] + WrongIndex, + #[codec(index = 4)] + #[doc = "Duplicate vote ignored"] + DuplicateVote, + #[codec(index = 5)] + #[doc = "Members are already initialized!"] + AlreadyInitialized, + #[codec(index = 6)] + #[doc = "The close call was made too early, before the end of the voting."] + TooEarly, + #[codec(index = 7)] + #[doc = "There can only be a maximum of `MaxProposals` active proposals."] + TooManyProposals, + #[codec(index = 8)] + #[doc = "The given weight bound for the proposal was too low."] + WrongProposalWeight, + #[codec(index = 9)] + #[doc = "The given length bound for the proposal was too low."] + WrongProposalLength, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Event { + #[codec(index = 0)] + #[doc = "A motion (given hash) has been proposed (by given account) with a threshold (given"] + #[doc = "`MemberCount`)."] + Proposed { + account: ::subxt::sp_core::crypto::AccountId32, + proposal_index: ::core::primitive::u32, + proposal_hash: ::subxt::sp_core::H256, + threshold: ::core::primitive::u32, + }, + #[codec(index = 1)] + #[doc = "A motion (given hash) has been voted on by given account, leaving"] + #[doc = "a tally (yes votes and no votes given respectively as `MemberCount`)."] + Voted { + account: ::subxt::sp_core::crypto::AccountId32, + proposal_hash: ::subxt::sp_core::H256, + voted: ::core::primitive::bool, + yes: ::core::primitive::u32, + no: ::core::primitive::u32, + }, + #[codec(index = 2)] + #[doc = "A motion was approved by the required threshold."] + Approved { + proposal_hash: ::subxt::sp_core::H256, + }, + #[codec(index = 3)] + #[doc = "A motion was not approved by the required threshold."] + Disapproved { + proposal_hash: ::subxt::sp_core::H256, + }, + #[codec(index = 4)] + #[doc = "A motion was executed; result will be `Ok` if it returned without error."] + Executed { + proposal_hash: ::subxt::sp_core::H256, + result: + ::core::result::Result<(), runtime_types::sp_runtime::DispatchError>, + }, + #[codec(index = 5)] + #[doc = "A single member did some action; result will be `Ok` if it returned without error."] + MemberExecuted { + proposal_hash: ::subxt::sp_core::H256, + result: + ::core::result::Result<(), runtime_types::sp_runtime::DispatchError>, + }, + #[codec(index = 6)] + #[doc = "A proposal was closed because its threshold was reached or after its duration was up."] + Closed { + proposal_hash: ::subxt::sp_core::H256, + yes: ::core::primitive::u32, + no: ::core::primitive::u32, + }, + } + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum RawOrigin<_0> { + #[codec(index = 0)] + Members(::core::primitive::u32, ::core::primitive::u32), + #[codec(index = 1)] + Member(_0), + #[codec(index = 2)] + _Phantom, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct Votes<_0, _1> { + pub index: _1, + pub threshold: _1, + pub ayes: ::std::vec::Vec<_0>, + pub nays: ::std::vec::Vec<_0>, + pub end: _1, + } + } + pub mod pallet_grandpa { + use super::runtime_types; + pub mod pallet { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Call { + #[codec(index = 0)] + #[doc = "Report voter equivocation/misbehavior. This method will verify the"] + #[doc = "equivocation proof and validate the given key ownership proof"] + #[doc = "against the extracted offender. If both are valid, the offence"] + #[doc = "will be reported."] + report_equivocation { + equivocation_proof: ::std::boxed::Box< + runtime_types::sp_finality_grandpa::EquivocationProof< + ::subxt::sp_core::H256, + ::core::primitive::u32, + >, + >, + key_owner_proof: runtime_types::sp_session::MembershipProof, + }, + #[codec(index = 1)] + #[doc = "Report voter equivocation/misbehavior. This method will verify the"] + #[doc = "equivocation proof and validate the given key ownership proof"] + #[doc = "against the extracted offender. If both are valid, the offence"] + #[doc = "will be reported."] + #[doc = ""] + #[doc = "This extrinsic must be called unsigned and it is expected that only"] + #[doc = "block authors will call it (validated in `ValidateUnsigned`), as such"] + #[doc = "if the block author is defined it will be defined as the equivocation"] + #[doc = "reporter."] + report_equivocation_unsigned { + equivocation_proof: ::std::boxed::Box< + runtime_types::sp_finality_grandpa::EquivocationProof< + ::subxt::sp_core::H256, + ::core::primitive::u32, + >, + >, + key_owner_proof: runtime_types::sp_session::MembershipProof, + }, + #[codec(index = 2)] + #[doc = "Note that the current authority set of the GRANDPA finality gadget has"] + #[doc = "stalled. This will trigger a forced authority set change at the beginning"] + #[doc = "of the next session, to be enacted `delay` blocks after that. The delay"] + #[doc = "should be high enough to safely assume that the block signalling the"] + #[doc = "forced change will not be re-orged (e.g. 1000 blocks). The GRANDPA voters"] + #[doc = "will start the new authority set using the given finalized block as base."] + #[doc = "Only callable by root."] + note_stalled { + delay: ::core::primitive::u32, + best_finalized_block_number: ::core::primitive::u32, + }, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Error { + #[codec(index = 0)] + #[doc = "Attempt to signal GRANDPA pause when the authority set isn't live"] + #[doc = "(either paused or already pending pause)."] + PauseFailed, + #[codec(index = 1)] + #[doc = "Attempt to signal GRANDPA resume when the authority set isn't paused"] + #[doc = "(either live or already pending resume)."] + ResumeFailed, + #[codec(index = 2)] + #[doc = "Attempt to signal GRANDPA change with one already pending."] + ChangePending, + #[codec(index = 3)] + #[doc = "Cannot signal forced change so soon after last."] + TooSoon, + #[codec(index = 4)] + #[doc = "A key ownership proof provided as part of an equivocation report is invalid."] + InvalidKeyOwnershipProof, + #[codec(index = 5)] + #[doc = "An equivocation proof provided as part of an equivocation report is invalid."] + InvalidEquivocationProof, + #[codec(index = 6)] + #[doc = "A given equivocation report is valid but already previously reported."] + DuplicateOffenceReport, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Event { + #[codec(index = 0)] + #[doc = "New authority set has been applied."] + NewAuthorities { + authority_set: ::std::vec::Vec<( + runtime_types::sp_finality_grandpa::app::Public, + ::core::primitive::u64, + )>, + }, + #[codec(index = 1)] + #[doc = "Current authority set has been paused."] + Paused, + #[codec(index = 2)] + #[doc = "Current authority set has been resumed."] + Resumed, + } + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct StoredPendingChange<_0> { + pub scheduled_at: _0, + pub delay: _0, + pub next_authorities: + runtime_types::frame_support::storage::weak_bounded_vec::WeakBoundedVec<( + runtime_types::sp_finality_grandpa::app::Public, + ::core::primitive::u64, + )>, + pub forced: ::core::option::Option<_0>, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum StoredState<_0> { + #[codec(index = 0)] + Live, + #[codec(index = 1)] + PendingPause { scheduled_at: _0, delay: _0 }, + #[codec(index = 2)] + Paused, + #[codec(index = 3)] + PendingResume { scheduled_at: _0, delay: _0 }, + } + } + pub mod pallet_im_online { + use super::runtime_types; + pub mod pallet { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Call { + #[codec(index = 0)] + #[doc = "# "] + #[doc = "- Complexity: `O(K + E)` where K is length of `Keys` (heartbeat.validators_len) and E is"] + #[doc = " length of `heartbeat.network_state.external_address`"] + #[doc = " - `O(K)`: decoding of length `K`"] + #[doc = " - `O(E)`: decoding/encoding of length `E`"] + #[doc = "- DbReads: pallet_session `Validators`, pallet_session `CurrentIndex`, `Keys`,"] + #[doc = " `ReceivedHeartbeats`"] + #[doc = "- DbWrites: `ReceivedHeartbeats`"] + #[doc = "# "] + heartbeat { + heartbeat: + runtime_types::pallet_im_online::Heartbeat<::core::primitive::u32>, + signature: runtime_types::pallet_im_online::sr25519::app_sr25519::Signature, + }, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Error { + #[codec(index = 0)] + #[doc = "Non existent public key."] + InvalidKey, + #[codec(index = 1)] + #[doc = "Duplicated heartbeat."] + DuplicatedHeartbeat, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Event { + #[codec(index = 0)] + #[doc = "A new heartbeat was received from `AuthorityId`."] + HeartbeatReceived { + authority_id: runtime_types::pallet_im_online::sr25519::app_sr25519::Public, + }, + #[codec(index = 1)] + #[doc = "At the end of the session, no offence was committed."] + AllGood, + #[codec(index = 2)] + #[doc = "At the end of the session, at least one validator was found to be offline."] + SomeOffline { + offline: ::std::vec::Vec<(::subxt::sp_core::crypto::AccountId32, ())>, + }, + } + } + pub mod sr25519 { + use super::runtime_types; + pub mod app_sr25519 { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct Public(pub runtime_types::sp_core::sr25519::Public); + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct Signature(pub runtime_types::sp_core::sr25519::Signature); + } + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct BoundedOpaqueNetworkState { + pub peer_id: + runtime_types::frame_support::storage::weak_bounded_vec::WeakBoundedVec< + ::core::primitive::u8, + >, + pub external_addresses: + runtime_types::frame_support::storage::weak_bounded_vec::WeakBoundedVec< + runtime_types::frame_support::storage::weak_bounded_vec::WeakBoundedVec< + ::core::primitive::u8, + >, + >, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct Heartbeat<_0> { + pub block_number: _0, + pub network_state: runtime_types::sp_core::offchain::OpaqueNetworkState, + pub session_index: _0, + pub authority_index: _0, + pub validators_len: _0, + } + } + pub mod pallet_indices { + use super::runtime_types; + pub mod pallet { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Call { + #[codec(index = 0)] + #[doc = "Assign an previously unassigned index."] + #[doc = ""] + #[doc = "Payment: `Deposit` is reserved from the sender account."] + #[doc = ""] + #[doc = "The dispatch origin for this call must be _Signed_."] + #[doc = ""] + #[doc = "- `index`: the index to be claimed. This must not be in use."] + #[doc = ""] + #[doc = "Emits `IndexAssigned` if successful."] + #[doc = ""] + #[doc = "# "] + #[doc = "- `O(1)`."] + #[doc = "- One storage mutation (codec `O(1)`)."] + #[doc = "- One reserve operation."] + #[doc = "- One event."] + #[doc = "-------------------"] + #[doc = "- DB Weight: 1 Read/Write (Accounts)"] + #[doc = "# "] + claim { index: ::core::primitive::u32 }, + #[codec(index = 1)] + #[doc = "Assign an index already owned by the sender to another account. The balance reservation"] + #[doc = "is effectively transferred to the new account."] + #[doc = ""] + #[doc = "The dispatch origin for this call must be _Signed_."] + #[doc = ""] + #[doc = "- `index`: the index to be re-assigned. This must be owned by the sender."] + #[doc = "- `new`: the new owner of the index. This function is a no-op if it is equal to sender."] + #[doc = ""] + #[doc = "Emits `IndexAssigned` if successful."] + #[doc = ""] + #[doc = "# "] + #[doc = "- `O(1)`."] + #[doc = "- One storage mutation (codec `O(1)`)."] + #[doc = "- One transfer operation."] + #[doc = "- One event."] + #[doc = "-------------------"] + #[doc = "- DB Weight:"] + #[doc = " - Reads: Indices Accounts, System Account (recipient)"] + #[doc = " - Writes: Indices Accounts, System Account (recipient)"] + #[doc = "# "] + transfer { + new: ::subxt::sp_core::crypto::AccountId32, + index: ::core::primitive::u32, + }, + #[codec(index = 2)] + #[doc = "Free up an index owned by the sender."] + #[doc = ""] + #[doc = "Payment: Any previous deposit placed for the index is unreserved in the sender account."] + #[doc = ""] + #[doc = "The dispatch origin for this call must be _Signed_ and the sender must own the index."] + #[doc = ""] + #[doc = "- `index`: the index to be freed. This must be owned by the sender."] + #[doc = ""] + #[doc = "Emits `IndexFreed` if successful."] + #[doc = ""] + #[doc = "# "] + #[doc = "- `O(1)`."] + #[doc = "- One storage mutation (codec `O(1)`)."] + #[doc = "- One reserve operation."] + #[doc = "- One event."] + #[doc = "-------------------"] + #[doc = "- DB Weight: 1 Read/Write (Accounts)"] + #[doc = "# "] + free { index: ::core::primitive::u32 }, + #[codec(index = 3)] + #[doc = "Force an index to an account. This doesn't require a deposit. If the index is already"] + #[doc = "held, then any deposit is reimbursed to its current owner."] + #[doc = ""] + #[doc = "The dispatch origin for this call must be _Root_."] + #[doc = ""] + #[doc = "- `index`: the index to be (re-)assigned."] + #[doc = "- `new`: the new owner of the index. This function is a no-op if it is equal to sender."] + #[doc = "- `freeze`: if set to `true`, will freeze the index so it cannot be transferred."] + #[doc = ""] + #[doc = "Emits `IndexAssigned` if successful."] + #[doc = ""] + #[doc = "# "] + #[doc = "- `O(1)`."] + #[doc = "- One storage mutation (codec `O(1)`)."] + #[doc = "- Up to one reserve operation."] + #[doc = "- One event."] + #[doc = "-------------------"] + #[doc = "- DB Weight:"] + #[doc = " - Reads: Indices Accounts, System Account (original owner)"] + #[doc = " - Writes: Indices Accounts, System Account (original owner)"] + #[doc = "# "] + force_transfer { + new: ::subxt::sp_core::crypto::AccountId32, + index: ::core::primitive::u32, + freeze: ::core::primitive::bool, + }, + #[codec(index = 4)] + #[doc = "Freeze an index so it will always point to the sender account. This consumes the"] + #[doc = "deposit."] + #[doc = ""] + #[doc = "The dispatch origin for this call must be _Signed_ and the signing account must have a"] + #[doc = "non-frozen account `index`."] + #[doc = ""] + #[doc = "- `index`: the index to be frozen in place."] + #[doc = ""] + #[doc = "Emits `IndexFrozen` if successful."] + #[doc = ""] + #[doc = "# "] + #[doc = "- `O(1)`."] + #[doc = "- One storage mutation (codec `O(1)`)."] + #[doc = "- Up to one slash operation."] + #[doc = "- One event."] + #[doc = "-------------------"] + #[doc = "- DB Weight: 1 Read/Write (Accounts)"] + #[doc = "# "] + freeze { index: ::core::primitive::u32 }, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Error { + #[codec(index = 0)] + #[doc = "The index was not already assigned."] + NotAssigned, + #[codec(index = 1)] + #[doc = "The index is assigned to another account."] + NotOwner, + #[codec(index = 2)] + #[doc = "The index was not available."] + InUse, + #[codec(index = 3)] + #[doc = "The source and destination accounts are identical."] + NotTransfer, + #[codec(index = 4)] + #[doc = "The index is permanent and may not be freed/changed."] + Permanent, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Event { + #[codec(index = 0)] + #[doc = "A account index was assigned."] + IndexAssigned { + who: ::subxt::sp_core::crypto::AccountId32, + index: ::core::primitive::u32, + }, + #[codec(index = 1)] + #[doc = "A account index has been freed up (unassigned)."] + IndexFreed { index: ::core::primitive::u32 }, + #[codec(index = 2)] + #[doc = "A account index has been frozen to its current account ID."] + IndexFrozen { + index: ::core::primitive::u32, + who: ::subxt::sp_core::crypto::AccountId32, + }, + } + } + } + pub mod pallet_membership { + use super::runtime_types; + pub mod pallet { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Call { + #[codec(index = 0)] + #[doc = "Add a member `who` to the set."] + #[doc = ""] + #[doc = "May only be called from `T::AddOrigin`."] + add_member { + who: ::subxt::sp_core::crypto::AccountId32, + }, + #[codec(index = 1)] + #[doc = "Remove a member `who` from the set."] + #[doc = ""] + #[doc = "May only be called from `T::RemoveOrigin`."] + remove_member { + who: ::subxt::sp_core::crypto::AccountId32, + }, + #[codec(index = 2)] + #[doc = "Swap out one member `remove` for another `add`."] + #[doc = ""] + #[doc = "May only be called from `T::SwapOrigin`."] + #[doc = ""] + #[doc = "Prime membership is *not* passed from `remove` to `add`, if extant."] + swap_member { + remove: ::subxt::sp_core::crypto::AccountId32, + add: ::subxt::sp_core::crypto::AccountId32, + }, + #[codec(index = 3)] + #[doc = "Change the membership to a new set, disregarding the existing membership. Be nice and"] + #[doc = "pass `members` pre-sorted."] + #[doc = ""] + #[doc = "May only be called from `T::ResetOrigin`."] + reset_members { + members: ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>, + }, + #[codec(index = 4)] + #[doc = "Swap out the sending member for some other key `new`."] + #[doc = ""] + #[doc = "May only be called from `Signed` origin of a current member."] + #[doc = ""] + #[doc = "Prime membership is passed from the origin account to `new`, if extant."] + change_key { + new: ::subxt::sp_core::crypto::AccountId32, + }, + #[codec(index = 5)] + #[doc = "Set the prime member. Must be a current member."] + #[doc = ""] + #[doc = "May only be called from `T::PrimeOrigin`."] + set_prime { + who: ::subxt::sp_core::crypto::AccountId32, + }, + #[codec(index = 6)] + #[doc = "Remove the prime member if it exists."] + #[doc = ""] + #[doc = "May only be called from `T::PrimeOrigin`."] + clear_prime, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Error { + #[codec(index = 0)] + #[doc = "Already a member."] + AlreadyMember, + #[codec(index = 1)] + #[doc = "Not a member."] + NotMember, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Event { + #[codec(index = 0)] + #[doc = "The given member was added; see the transaction for who."] + MemberAdded, + #[codec(index = 1)] + #[doc = "The given member was removed; see the transaction for who."] + MemberRemoved, + #[codec(index = 2)] + #[doc = "Two members were swapped; see the transaction for who."] + MembersSwapped, + #[codec(index = 3)] + #[doc = "The membership was reset; see the transaction for who the new set is."] + MembersReset, + #[codec(index = 4)] + #[doc = "One of the members' keys changed."] + KeyChanged, + #[codec(index = 5)] + #[doc = "Phantom member, never used."] + Dummy, + } + } + } + pub mod pallet_multisig { + use super::runtime_types; + pub mod pallet { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Call { + #[codec(index = 0)] + #[doc = "Immediately dispatch a multi-signature call using a single approval from the caller."] + #[doc = ""] + #[doc = "The dispatch origin for this call must be _Signed_."] + #[doc = ""] + #[doc = "- `other_signatories`: The accounts (other than the sender) who are part of the"] + #[doc = "multi-signature, but do not participate in the approval process."] + #[doc = "- `call`: The call to be executed."] + #[doc = ""] + #[doc = "Result is equivalent to the dispatched result."] + #[doc = ""] + #[doc = "# "] + #[doc = "O(Z + C) where Z is the length of the call and C its execution weight."] + #[doc = "-------------------------------"] + #[doc = "- DB Weight: None"] + #[doc = "- Plus Call Weight"] + #[doc = "# "] + as_multi_threshold_1 { + other_signatories: ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>, + call: ::std::boxed::Box, + }, + #[codec(index = 1)] + #[doc = "Register approval for a dispatch to be made from a deterministic composite account if"] + #[doc = "approved by a total of `threshold - 1` of `other_signatories`."] + #[doc = ""] + #[doc = "If there are enough, then dispatch the call."] + #[doc = ""] + #[doc = "Payment: `DepositBase` will be reserved if this is the first approval, plus"] + #[doc = "`threshold` times `DepositFactor`. It is returned once this dispatch happens or"] + #[doc = "is cancelled."] + #[doc = ""] + #[doc = "The dispatch origin for this call must be _Signed_."] + #[doc = ""] + #[doc = "- `threshold`: The total number of approvals for this dispatch before it is executed."] + #[doc = "- `other_signatories`: The accounts (other than the sender) who can approve this"] + #[doc = "dispatch. May not be empty."] + #[doc = "- `maybe_timepoint`: If this is the first approval, then this must be `None`. If it is"] + #[doc = "not the first approval, then it must be `Some`, with the timepoint (block number and"] + #[doc = "transaction index) of the first approval transaction."] + #[doc = "- `call`: The call to be executed."] + #[doc = ""] + #[doc = "NOTE: Unless this is the final approval, you will generally want to use"] + #[doc = "`approve_as_multi` instead, since it only requires a hash of the call."] + #[doc = ""] + #[doc = "Result is equivalent to the dispatched result if `threshold` is exactly `1`. Otherwise"] + #[doc = "on success, result is `Ok` and the result from the interior call, if it was executed,"] + #[doc = "may be found in the deposited `MultisigExecuted` event."] + #[doc = ""] + #[doc = "# "] + #[doc = "- `O(S + Z + Call)`."] + #[doc = "- Up to one balance-reserve or unreserve operation."] + #[doc = "- One passthrough operation, one insert, both `O(S)` where `S` is the number of"] + #[doc = " signatories. `S` is capped by `MaxSignatories`, with weight being proportional."] + #[doc = "- One call encode & hash, both of complexity `O(Z)` where `Z` is tx-len."] + #[doc = "- One encode & hash, both of complexity `O(S)`."] + #[doc = "- Up to one binary search and insert (`O(logS + S)`)."] + #[doc = "- I/O: 1 read `O(S)`, up to 1 mutate `O(S)`. Up to one remove."] + #[doc = "- One event."] + #[doc = "- The weight of the `call`."] + #[doc = "- Storage: inserts one item, value size bounded by `MaxSignatories`, with a deposit"] + #[doc = " taken for its lifetime of `DepositBase + threshold * DepositFactor`."] + #[doc = "-------------------------------"] + #[doc = "- DB Weight:"] + #[doc = " - Reads: Multisig Storage, [Caller Account], Calls (if `store_call`)"] + #[doc = " - Writes: Multisig Storage, [Caller Account], Calls (if `store_call`)"] + #[doc = "- Plus Call Weight"] + #[doc = "# "] + as_multi { + threshold: ::core::primitive::u16, + other_signatories: ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>, + maybe_timepoint: ::core::option::Option< + runtime_types::pallet_multisig::Timepoint<::core::primitive::u32>, + >, + call: ::subxt::WrapperKeepOpaque, + store_call: ::core::primitive::bool, + max_weight: ::core::primitive::u64, + }, + #[codec(index = 2)] + #[doc = "Register approval for a dispatch to be made from a deterministic composite account if"] + #[doc = "approved by a total of `threshold - 1` of `other_signatories`."] + #[doc = ""] + #[doc = "Payment: `DepositBase` will be reserved if this is the first approval, plus"] + #[doc = "`threshold` times `DepositFactor`. It is returned once this dispatch happens or"] + #[doc = "is cancelled."] + #[doc = ""] + #[doc = "The dispatch origin for this call must be _Signed_."] + #[doc = ""] + #[doc = "- `threshold`: The total number of approvals for this dispatch before it is executed."] + #[doc = "- `other_signatories`: The accounts (other than the sender) who can approve this"] + #[doc = "dispatch. May not be empty."] + #[doc = "- `maybe_timepoint`: If this is the first approval, then this must be `None`. If it is"] + #[doc = "not the first approval, then it must be `Some`, with the timepoint (block number and"] + #[doc = "transaction index) of the first approval transaction."] + #[doc = "- `call_hash`: The hash of the call to be executed."] + #[doc = ""] + #[doc = "NOTE: If this is the final approval, you will want to use `as_multi` instead."] + #[doc = ""] + #[doc = "# "] + #[doc = "- `O(S)`."] + #[doc = "- Up to one balance-reserve or unreserve operation."] + #[doc = "- One passthrough operation, one insert, both `O(S)` where `S` is the number of"] + #[doc = " signatories. `S` is capped by `MaxSignatories`, with weight being proportional."] + #[doc = "- One encode & hash, both of complexity `O(S)`."] + #[doc = "- Up to one binary search and insert (`O(logS + S)`)."] + #[doc = "- I/O: 1 read `O(S)`, up to 1 mutate `O(S)`. Up to one remove."] + #[doc = "- One event."] + #[doc = "- Storage: inserts one item, value size bounded by `MaxSignatories`, with a deposit"] + #[doc = " taken for its lifetime of `DepositBase + threshold * DepositFactor`."] + #[doc = "----------------------------------"] + #[doc = "- DB Weight:"] + #[doc = " - Read: Multisig Storage, [Caller Account]"] + #[doc = " - Write: Multisig Storage, [Caller Account]"] + #[doc = "# "] + approve_as_multi { + threshold: ::core::primitive::u16, + other_signatories: ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>, + maybe_timepoint: ::core::option::Option< + runtime_types::pallet_multisig::Timepoint<::core::primitive::u32>, + >, + call_hash: [::core::primitive::u8; 32usize], + max_weight: ::core::primitive::u64, + }, + #[codec(index = 3)] + #[doc = "Cancel a pre-existing, on-going multisig transaction. Any deposit reserved previously"] + #[doc = "for this operation will be unreserved on success."] + #[doc = ""] + #[doc = "The dispatch origin for this call must be _Signed_."] + #[doc = ""] + #[doc = "- `threshold`: The total number of approvals for this dispatch before it is executed."] + #[doc = "- `other_signatories`: The accounts (other than the sender) who can approve this"] + #[doc = "dispatch. May not be empty."] + #[doc = "- `timepoint`: The timepoint (block number and transaction index) of the first approval"] + #[doc = "transaction for this dispatch."] + #[doc = "- `call_hash`: The hash of the call to be executed."] + #[doc = ""] + #[doc = "# "] + #[doc = "- `O(S)`."] + #[doc = "- Up to one balance-reserve or unreserve operation."] + #[doc = "- One passthrough operation, one insert, both `O(S)` where `S` is the number of"] + #[doc = " signatories. `S` is capped by `MaxSignatories`, with weight being proportional."] + #[doc = "- One encode & hash, both of complexity `O(S)`."] + #[doc = "- One event."] + #[doc = "- I/O: 1 read `O(S)`, one remove."] + #[doc = "- Storage: removes one item."] + #[doc = "----------------------------------"] + #[doc = "- DB Weight:"] + #[doc = " - Read: Multisig Storage, [Caller Account], Refund Account, Calls"] + #[doc = " - Write: Multisig Storage, [Caller Account], Refund Account, Calls"] + #[doc = "# "] + cancel_as_multi { + threshold: ::core::primitive::u16, + other_signatories: ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>, + timepoint: + runtime_types::pallet_multisig::Timepoint<::core::primitive::u32>, + call_hash: [::core::primitive::u8; 32usize], + }, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Error { + #[codec(index = 0)] + #[doc = "Threshold must be 2 or greater."] + MinimumThreshold, + #[codec(index = 1)] + #[doc = "Call is already approved by this signatory."] + AlreadyApproved, + #[codec(index = 2)] + #[doc = "Call doesn't need any (more) approvals."] + NoApprovalsNeeded, + #[codec(index = 3)] + #[doc = "There are too few signatories in the list."] + TooFewSignatories, + #[codec(index = 4)] + #[doc = "There are too many signatories in the list."] + TooManySignatories, + #[codec(index = 5)] + #[doc = "The signatories were provided out of order; they should be ordered."] + SignatoriesOutOfOrder, + #[codec(index = 6)] + #[doc = "The sender was contained in the other signatories; it shouldn't be."] + SenderInSignatories, + #[codec(index = 7)] + #[doc = "Multisig operation not found when attempting to cancel."] + NotFound, + #[codec(index = 8)] + #[doc = "Only the account that originally created the multisig is able to cancel it."] + NotOwner, + #[codec(index = 9)] + #[doc = "No timepoint was given, yet the multisig operation is already underway."] + NoTimepoint, + #[codec(index = 10)] + #[doc = "A different timepoint was given to the multisig operation that is underway."] + WrongTimepoint, + #[codec(index = 11)] + #[doc = "A timepoint was given, yet no multisig operation is underway."] + UnexpectedTimepoint, + #[codec(index = 12)] + #[doc = "The maximum weight information provided was too low."] + MaxWeightTooLow, + #[codec(index = 13)] + #[doc = "The data to be stored is already stored."] + AlreadyStored, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Event { + #[codec(index = 0)] + #[doc = "A new multisig operation has begun."] + NewMultisig { + approving: ::subxt::sp_core::crypto::AccountId32, + multisig: ::subxt::sp_core::crypto::AccountId32, + call_hash: [::core::primitive::u8; 32usize], + }, + #[codec(index = 1)] + #[doc = "A multisig operation has been approved by someone."] + MultisigApproval { + approving: ::subxt::sp_core::crypto::AccountId32, + timepoint: + runtime_types::pallet_multisig::Timepoint<::core::primitive::u32>, + multisig: ::subxt::sp_core::crypto::AccountId32, + call_hash: [::core::primitive::u8; 32usize], + }, + #[codec(index = 2)] + #[doc = "A multisig operation has been executed."] + MultisigExecuted { + approving: ::subxt::sp_core::crypto::AccountId32, + timepoint: + runtime_types::pallet_multisig::Timepoint<::core::primitive::u32>, + multisig: ::subxt::sp_core::crypto::AccountId32, + call_hash: [::core::primitive::u8; 32usize], + result: + ::core::result::Result<(), runtime_types::sp_runtime::DispatchError>, + }, + #[codec(index = 3)] + #[doc = "A multisig operation has been cancelled."] + MultisigCancelled { + cancelling: ::subxt::sp_core::crypto::AccountId32, + timepoint: + runtime_types::pallet_multisig::Timepoint<::core::primitive::u32>, + multisig: ::subxt::sp_core::crypto::AccountId32, + call_hash: [::core::primitive::u8; 32usize], + }, + } + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct Multisig<_0, _1, _2> { + pub when: runtime_types::pallet_multisig::Timepoint<_0>, + pub deposit: _1, + pub depositor: _2, + pub approvals: ::std::vec::Vec<_2>, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct Timepoint<_0> { + pub height: _0, + pub index: _0, + } + } + pub mod pallet_offences { + use super::runtime_types; + pub mod pallet { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Event { + #[codec(index = 0)] + #[doc = "There is an offence reported of the given `kind` happened at the `session_index` and"] + #[doc = "(kind-specific) time slot. This event is not deposited for duplicate slashes."] + #[doc = "\\[kind, timeslot\\]."] + Offence { + kind: [::core::primitive::u8; 16usize], + timeslot: ::std::vec::Vec<::core::primitive::u8>, + }, + } + } + } + pub mod pallet_proxy { + use super::runtime_types; + pub mod pallet { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Call { + #[codec(index = 0)] + #[doc = "Dispatch the given `call` from an account that the sender is authorised for through"] + #[doc = "`add_proxy`."] + #[doc = ""] + #[doc = "Removes any corresponding announcement(s)."] + #[doc = ""] + #[doc = "The dispatch origin for this call must be _Signed_."] + #[doc = ""] + #[doc = "Parameters:"] + #[doc = "- `real`: The account that the proxy will make a call on behalf of."] + #[doc = "- `force_proxy_type`: Specify the exact proxy type to be used and checked for this call."] + #[doc = "- `call`: The call to be made by the `real` account."] + #[doc = ""] + #[doc = "# "] + #[doc = "Weight is a function of the number of proxies the user has (P)."] + #[doc = "# "] + proxy { + real: ::subxt::sp_core::crypto::AccountId32, + force_proxy_type: + ::core::option::Option, + call: ::std::boxed::Box, + }, + #[codec(index = 1)] + #[doc = "Register a proxy account for the sender that is able to make calls on its behalf."] + #[doc = ""] + #[doc = "The dispatch origin for this call must be _Signed_."] + #[doc = ""] + #[doc = "Parameters:"] + #[doc = "- `proxy`: The account that the `caller` would like to make a proxy."] + #[doc = "- `proxy_type`: The permissions allowed for this proxy account."] + #[doc = "- `delay`: The announcement period required of the initial proxy. Will generally be"] + #[doc = "zero."] + #[doc = ""] + #[doc = "# "] + #[doc = "Weight is a function of the number of proxies the user has (P)."] + #[doc = "# "] + add_proxy { + delegate: ::subxt::sp_core::crypto::AccountId32, + proxy_type: runtime_types::rococo_runtime::ProxyType, + delay: ::core::primitive::u32, + }, + #[codec(index = 2)] + #[doc = "Unregister a proxy account for the sender."] + #[doc = ""] + #[doc = "The dispatch origin for this call must be _Signed_."] + #[doc = ""] + #[doc = "Parameters:"] + #[doc = "- `proxy`: The account that the `caller` would like to remove as a proxy."] + #[doc = "- `proxy_type`: The permissions currently enabled for the removed proxy account."] + #[doc = ""] + #[doc = "# "] + #[doc = "Weight is a function of the number of proxies the user has (P)."] + #[doc = "# "] + remove_proxy { + delegate: ::subxt::sp_core::crypto::AccountId32, + proxy_type: runtime_types::rococo_runtime::ProxyType, + delay: ::core::primitive::u32, + }, + #[codec(index = 3)] + #[doc = "Unregister all proxy accounts for the sender."] + #[doc = ""] + #[doc = "The dispatch origin for this call must be _Signed_."] + #[doc = ""] + #[doc = "WARNING: This may be called on accounts created by `anonymous`, however if done, then"] + #[doc = "the unreserved fees will be inaccessible. **All access to this account will be lost.**"] + #[doc = ""] + #[doc = "# "] + #[doc = "Weight is a function of the number of proxies the user has (P)."] + #[doc = "# "] + remove_proxies, + #[codec(index = 4)] + #[doc = "Spawn a fresh new account that is guaranteed to be otherwise inaccessible, and"] + #[doc = "initialize it with a proxy of `proxy_type` for `origin` sender."] + #[doc = ""] + #[doc = "Requires a `Signed` origin."] + #[doc = ""] + #[doc = "- `proxy_type`: The type of the proxy that the sender will be registered as over the"] + #[doc = "new account. This will almost always be the most permissive `ProxyType` possible to"] + #[doc = "allow for maximum flexibility."] + #[doc = "- `index`: A disambiguation index, in case this is called multiple times in the same"] + #[doc = "transaction (e.g. with `utility::batch`). Unless you're using `batch` you probably just"] + #[doc = "want to use `0`."] + #[doc = "- `delay`: The announcement period required of the initial proxy. Will generally be"] + #[doc = "zero."] + #[doc = ""] + #[doc = "Fails with `Duplicate` if this has already been called in this transaction, from the"] + #[doc = "same sender, with the same parameters."] + #[doc = ""] + #[doc = "Fails if there are insufficient funds to pay for deposit."] + #[doc = ""] + #[doc = "# "] + #[doc = "Weight is a function of the number of proxies the user has (P)."] + #[doc = "# "] + #[doc = "TODO: Might be over counting 1 read"] + anonymous { + proxy_type: runtime_types::rococo_runtime::ProxyType, + delay: ::core::primitive::u32, + index: ::core::primitive::u16, + }, + #[codec(index = 5)] + #[doc = "Removes a previously spawned anonymous proxy."] + #[doc = ""] + #[doc = "WARNING: **All access to this account will be lost.** Any funds held in it will be"] + #[doc = "inaccessible."] + #[doc = ""] + #[doc = "Requires a `Signed` origin, and the sender account must have been created by a call to"] + #[doc = "`anonymous` with corresponding parameters."] + #[doc = ""] + #[doc = "- `spawner`: The account that originally called `anonymous` to create this account."] + #[doc = "- `index`: The disambiguation index originally passed to `anonymous`. Probably `0`."] + #[doc = "- `proxy_type`: The proxy type originally passed to `anonymous`."] + #[doc = "- `height`: The height of the chain when the call to `anonymous` was processed."] + #[doc = "- `ext_index`: The extrinsic index in which the call to `anonymous` was processed."] + #[doc = ""] + #[doc = "Fails with `NoPermission` in case the caller is not a previously created anonymous"] + #[doc = "account whose `anonymous` call has corresponding parameters."] + #[doc = ""] + #[doc = "# "] + #[doc = "Weight is a function of the number of proxies the user has (P)."] + #[doc = "# "] + kill_anonymous { + spawner: ::subxt::sp_core::crypto::AccountId32, + proxy_type: runtime_types::rococo_runtime::ProxyType, + index: ::core::primitive::u16, + #[codec(compact)] + height: ::core::primitive::u32, + #[codec(compact)] + ext_index: ::core::primitive::u32, + }, + #[codec(index = 6)] + #[doc = "Publish the hash of a proxy-call that will be made in the future."] + #[doc = ""] + #[doc = "This must be called some number of blocks before the corresponding `proxy` is attempted"] + #[doc = "if the delay associated with the proxy relationship is greater than zero."] + #[doc = ""] + #[doc = "No more than `MaxPending` announcements may be made at any one time."] + #[doc = ""] + #[doc = "This will take a deposit of `AnnouncementDepositFactor` as well as"] + #[doc = "`AnnouncementDepositBase` if there are no other pending announcements."] + #[doc = ""] + #[doc = "The dispatch origin for this call must be _Signed_ and a proxy of `real`."] + #[doc = ""] + #[doc = "Parameters:"] + #[doc = "- `real`: The account that the proxy will make a call on behalf of."] + #[doc = "- `call_hash`: The hash of the call to be made by the `real` account."] + #[doc = ""] + #[doc = "# "] + #[doc = "Weight is a function of:"] + #[doc = "- A: the number of announcements made."] + #[doc = "- P: the number of proxies the user has."] + #[doc = "# "] + announce { + real: ::subxt::sp_core::crypto::AccountId32, + call_hash: ::subxt::sp_core::H256, + }, + #[codec(index = 7)] + #[doc = "Remove a given announcement."] + #[doc = ""] + #[doc = "May be called by a proxy account to remove a call they previously announced and return"] + #[doc = "the deposit."] + #[doc = ""] + #[doc = "The dispatch origin for this call must be _Signed_."] + #[doc = ""] + #[doc = "Parameters:"] + #[doc = "- `real`: The account that the proxy will make a call on behalf of."] + #[doc = "- `call_hash`: The hash of the call to be made by the `real` account."] + #[doc = ""] + #[doc = "# "] + #[doc = "Weight is a function of:"] + #[doc = "- A: the number of announcements made."] + #[doc = "- P: the number of proxies the user has."] + #[doc = "# "] + remove_announcement { + real: ::subxt::sp_core::crypto::AccountId32, + call_hash: ::subxt::sp_core::H256, + }, + #[codec(index = 8)] + #[doc = "Remove the given announcement of a delegate."] + #[doc = ""] + #[doc = "May be called by a target (proxied) account to remove a call that one of their delegates"] + #[doc = "(`delegate`) has announced they want to execute. The deposit is returned."] + #[doc = ""] + #[doc = "The dispatch origin for this call must be _Signed_."] + #[doc = ""] + #[doc = "Parameters:"] + #[doc = "- `delegate`: The account that previously announced the call."] + #[doc = "- `call_hash`: The hash of the call to be made."] + #[doc = ""] + #[doc = "# "] + #[doc = "Weight is a function of:"] + #[doc = "- A: the number of announcements made."] + #[doc = "- P: the number of proxies the user has."] + #[doc = "# "] + reject_announcement { + delegate: ::subxt::sp_core::crypto::AccountId32, + call_hash: ::subxt::sp_core::H256, + }, + #[codec(index = 9)] + #[doc = "Dispatch the given `call` from an account that the sender is authorized for through"] + #[doc = "`add_proxy`."] + #[doc = ""] + #[doc = "Removes any corresponding announcement(s)."] + #[doc = ""] + #[doc = "The dispatch origin for this call must be _Signed_."] + #[doc = ""] + #[doc = "Parameters:"] + #[doc = "- `real`: The account that the proxy will make a call on behalf of."] + #[doc = "- `force_proxy_type`: Specify the exact proxy type to be used and checked for this call."] + #[doc = "- `call`: The call to be made by the `real` account."] + #[doc = ""] + #[doc = "# "] + #[doc = "Weight is a function of:"] + #[doc = "- A: the number of announcements made."] + #[doc = "- P: the number of proxies the user has."] + #[doc = "# "] + proxy_announced { + delegate: ::subxt::sp_core::crypto::AccountId32, + real: ::subxt::sp_core::crypto::AccountId32, + force_proxy_type: + ::core::option::Option, + call: ::std::boxed::Box, + }, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Error { + #[codec(index = 0)] + #[doc = "There are too many proxies registered or too many announcements pending."] + TooMany, + #[codec(index = 1)] + #[doc = "Proxy registration not found."] + NotFound, + #[codec(index = 2)] + #[doc = "Sender is not a proxy of the account to be proxied."] + NotProxy, + #[codec(index = 3)] + #[doc = "A call which is incompatible with the proxy type's filter was attempted."] + Unproxyable, + #[codec(index = 4)] + #[doc = "Account is already a proxy."] + Duplicate, + #[codec(index = 5)] + #[doc = "Call may not be made by proxy because it may escalate its privileges."] + NoPermission, + #[codec(index = 6)] + #[doc = "Announcement, if made at all, was made too recently."] + Unannounced, + #[codec(index = 7)] + #[doc = "Cannot add self as proxy."] + NoSelfProxy, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Event { + #[codec(index = 0)] + #[doc = "A proxy was executed correctly, with the given."] + ProxyExecuted { + result: + ::core::result::Result<(), runtime_types::sp_runtime::DispatchError>, + }, + #[codec(index = 1)] + #[doc = "Anonymous account has been created by new proxy with given"] + #[doc = "disambiguation index and proxy type."] + AnonymousCreated { + anonymous: ::subxt::sp_core::crypto::AccountId32, + who: ::subxt::sp_core::crypto::AccountId32, + proxy_type: runtime_types::rococo_runtime::ProxyType, + disambiguation_index: ::core::primitive::u16, + }, + #[codec(index = 2)] + #[doc = "An announcement was placed to make a call in the future."] + Announced { + real: ::subxt::sp_core::crypto::AccountId32, + proxy: ::subxt::sp_core::crypto::AccountId32, + call_hash: ::subxt::sp_core::H256, + }, + #[codec(index = 3)] + #[doc = "A proxy was added."] + ProxyAdded { + delegator: ::subxt::sp_core::crypto::AccountId32, + delegatee: ::subxt::sp_core::crypto::AccountId32, + proxy_type: runtime_types::rococo_runtime::ProxyType, + delay: ::core::primitive::u32, + }, + #[codec(index = 4)] + #[doc = "A proxy was removed."] + ProxyRemoved { + delegator: ::subxt::sp_core::crypto::AccountId32, + delegatee: ::subxt::sp_core::crypto::AccountId32, + proxy_type: runtime_types::rococo_runtime::ProxyType, + delay: ::core::primitive::u32, + }, + } + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct Announcement<_0, _1, _2> { + pub real: _0, + pub call_hash: _1, + pub height: _2, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct ProxyDefinition<_0, _1, _2> { + pub delegate: _0, + pub proxy_type: _1, + pub delay: _2, + } + } + pub mod pallet_session { + use super::runtime_types; + pub mod pallet { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Call { + #[codec(index = 0)] + #[doc = "Sets the session key(s) of the function caller to `keys`."] + #[doc = "Allows an account to set its session key prior to becoming a validator."] + #[doc = "This doesn't take effect until the next session."] + #[doc = ""] + #[doc = "The dispatch origin of this function must be signed."] + #[doc = ""] + #[doc = "# "] + #[doc = "- Complexity: `O(1)`. Actual cost depends on the number of length of"] + #[doc = " `T::Keys::key_ids()` which is fixed."] + #[doc = "- DbReads: `origin account`, `T::ValidatorIdOf`, `NextKeys`"] + #[doc = "- DbWrites: `origin account`, `NextKeys`"] + #[doc = "- DbReads per key id: `KeyOwner`"] + #[doc = "- DbWrites per key id: `KeyOwner`"] + #[doc = "# "] + set_keys { + keys: runtime_types::rococo_runtime::SessionKeys, + proof: ::std::vec::Vec<::core::primitive::u8>, + }, + #[codec(index = 1)] + #[doc = "Removes any session key(s) of the function caller."] + #[doc = ""] + #[doc = "This doesn't take effect until the next session."] + #[doc = ""] + #[doc = "The dispatch origin of this function must be Signed and the account must be either be"] + #[doc = "convertible to a validator ID using the chain's typical addressing system (this usually"] + #[doc = "means being a controller account) or directly convertible into a validator ID (which"] + #[doc = "usually means being a stash account)."] + #[doc = ""] + #[doc = "# "] + #[doc = "- Complexity: `O(1)` in number of key types. Actual cost depends on the number of length"] + #[doc = " of `T::Keys::key_ids()` which is fixed."] + #[doc = "- DbReads: `T::ValidatorIdOf`, `NextKeys`, `origin account`"] + #[doc = "- DbWrites: `NextKeys`, `origin account`"] + #[doc = "- DbWrites per key id: `KeyOwner`"] + #[doc = "# "] + purge_keys, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Error { + #[codec(index = 0)] + #[doc = "Invalid ownership proof."] + InvalidProof, + #[codec(index = 1)] + #[doc = "No associated validator ID for account."] + NoAssociatedValidatorId, + #[codec(index = 2)] + #[doc = "Registered duplicate key."] + DuplicatedKey, + #[codec(index = 3)] + #[doc = "No keys are associated with this account."] + NoKeys, + #[codec(index = 4)] + #[doc = "Key setting account is not live, so it's impossible to associate keys."] + NoAccount, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Event { + #[codec(index = 0)] + #[doc = "New session has happened. Note that the argument is the session index, not the"] + #[doc = "block number as the type might suggest."] + NewSession { + session_index: ::core::primitive::u32, + }, + } + } + } + pub mod pallet_sudo { + use super::runtime_types; + pub mod pallet { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Call { + #[codec(index = 0)] + #[doc = "Authenticates the sudo key and dispatches a function call with `Root` origin."] + #[doc = ""] + #[doc = "The dispatch origin for this call must be _Signed_."] + #[doc = ""] + #[doc = "# "] + #[doc = "- O(1)."] + #[doc = "- Limited storage reads."] + #[doc = "- One DB write (event)."] + #[doc = "- Weight of derivative `call` execution + 10,000."] + #[doc = "# "] + sudo { + call: ::std::boxed::Box, + }, + #[codec(index = 1)] + #[doc = "Authenticates the sudo key and dispatches a function call with `Root` origin."] + #[doc = "This function does not check the weight of the call, and instead allows the"] + #[doc = "Sudo user to specify the weight of the call."] + #[doc = ""] + #[doc = "The dispatch origin for this call must be _Signed_."] + #[doc = ""] + #[doc = "# "] + #[doc = "- O(1)."] + #[doc = "- The weight of this call is defined by the caller."] + #[doc = "# "] + sudo_unchecked_weight { + call: ::std::boxed::Box, + weight: ::core::primitive::u64, + }, + #[codec(index = 2)] + #[doc = "Authenticates the current sudo key and sets the given AccountId (`new`) as the new sudo"] + #[doc = "key."] + #[doc = ""] + #[doc = "The dispatch origin for this call must be _Signed_."] + #[doc = ""] + #[doc = "# "] + #[doc = "- O(1)."] + #[doc = "- Limited storage reads."] + #[doc = "- One DB change."] + #[doc = "# "] + set_key { + new: ::subxt::sp_runtime::MultiAddress< + ::subxt::sp_core::crypto::AccountId32, + (), + >, + }, + #[codec(index = 3)] + #[doc = "Authenticates the sudo key and dispatches a function call with `Signed` origin from"] + #[doc = "a given account."] + #[doc = ""] + #[doc = "The dispatch origin for this call must be _Signed_."] + #[doc = ""] + #[doc = "# "] + #[doc = "- O(1)."] + #[doc = "- Limited storage reads."] + #[doc = "- One DB write (event)."] + #[doc = "- Weight of derivative `call` execution + 10,000."] + #[doc = "# "] + sudo_as { + who: ::subxt::sp_runtime::MultiAddress< + ::subxt::sp_core::crypto::AccountId32, + (), + >, + call: ::std::boxed::Box, + }, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Error { + #[codec(index = 0)] + #[doc = "Sender must be the Sudo account"] + RequireSudo, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Event { + #[codec(index = 0)] + #[doc = "A sudo just took place. \\[result\\]"] + Sudid { + sudo_result: + ::core::result::Result<(), runtime_types::sp_runtime::DispatchError>, + }, + #[codec(index = 1)] + #[doc = "The \\[sudoer\\] just switched identity; the old key is supplied if one existed."] + KeyChanged { + old_sudoer: ::core::option::Option<::subxt::sp_core::crypto::AccountId32>, + }, + #[codec(index = 2)] + #[doc = "A sudo just took place. \\[result\\]"] + SudoAsDone { + sudo_result: + ::core::result::Result<(), runtime_types::sp_runtime::DispatchError>, + }, + } + } + } + pub mod pallet_timestamp { + use super::runtime_types; + pub mod pallet { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Call { + #[codec(index = 0)] + #[doc = "Set the current time."] + #[doc = ""] + #[doc = "This call should be invoked exactly once per block. It will panic at the finalization"] + #[doc = "phase, if this call hasn't been invoked by that time."] + #[doc = ""] + #[doc = "The timestamp should be greater than the previous one by the amount specified by"] + #[doc = "`MinimumPeriod`."] + #[doc = ""] + #[doc = "The dispatch origin for this call must be `Inherent`."] + #[doc = ""] + #[doc = "# "] + #[doc = "- `O(1)` (Note that implementations of `OnTimestampSet` must also be `O(1)`)"] + #[doc = "- 1 storage read and 1 storage mutation (codec `O(1)`). (because of `DidUpdate::take` in"] + #[doc = " `on_finalize`)"] + #[doc = "- 1 event handler `on_timestamp_set`. Must be `O(1)`."] + #[doc = "# "] + set { + #[codec(compact)] + now: ::core::primitive::u64, + }, + } + } + } + pub mod pallet_transaction_payment { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct ChargeTransactionPayment(#[codec(compact)] pub ::core::primitive::u128); + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Releases { + #[codec(index = 0)] + V1Ancient, + #[codec(index = 1)] + V2, + } + } + pub mod pallet_utility { + use super::runtime_types; + pub mod pallet { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Call { + #[codec(index = 0)] + #[doc = "Send a batch of dispatch calls."] + #[doc = ""] + #[doc = "May be called from any origin."] + #[doc = ""] + #[doc = "- `calls`: The calls to be dispatched from the same origin. The number of call must not"] + #[doc = " exceed the constant: `batched_calls_limit` (available in constant metadata)."] + #[doc = ""] + #[doc = "If origin is root then call are dispatch without checking origin filter. (This includes"] + #[doc = "bypassing `frame_system::Config::BaseCallFilter`)."] + #[doc = ""] + #[doc = "# "] + #[doc = "- Complexity: O(C) where C is the number of calls to be batched."] + #[doc = "# "] + #[doc = ""] + #[doc = "This will return `Ok` in all circumstances. To determine the success of the batch, an"] + #[doc = "event is deposited. If a call failed and the batch was interrupted, then the"] + #[doc = "`BatchInterrupted` event is deposited, along with the number of successful calls made"] + #[doc = "and the error of the failed call. If all were successful, then the `BatchCompleted`"] + #[doc = "event is deposited."] + batch { + calls: ::std::vec::Vec, + }, + #[codec(index = 1)] + #[doc = "Send a call through an indexed pseudonym of the sender."] + #[doc = ""] + #[doc = "Filter from origin are passed along. The call will be dispatched with an origin which"] + #[doc = "use the same filter as the origin of this call."] + #[doc = ""] + #[doc = "NOTE: If you need to ensure that any account-based filtering is not honored (i.e."] + #[doc = "because you expect `proxy` to have been used prior in the call stack and you do not want"] + #[doc = "the call restrictions to apply to any sub-accounts), then use `as_multi_threshold_1`"] + #[doc = "in the Multisig pallet instead."] + #[doc = ""] + #[doc = "NOTE: Prior to version *12, this was called `as_limited_sub`."] + #[doc = ""] + #[doc = "The dispatch origin for this call must be _Signed_."] + as_derivative { + index: ::core::primitive::u16, + call: ::std::boxed::Box, + }, + #[codec(index = 2)] + #[doc = "Send a batch of dispatch calls and atomically execute them."] + #[doc = "The whole transaction will rollback and fail if any of the calls failed."] + #[doc = ""] + #[doc = "May be called from any origin."] + #[doc = ""] + #[doc = "- `calls`: The calls to be dispatched from the same origin. The number of call must not"] + #[doc = " exceed the constant: `batched_calls_limit` (available in constant metadata)."] + #[doc = ""] + #[doc = "If origin is root then call are dispatch without checking origin filter. (This includes"] + #[doc = "bypassing `frame_system::Config::BaseCallFilter`)."] + #[doc = ""] + #[doc = "# "] + #[doc = "- Complexity: O(C) where C is the number of calls to be batched."] + #[doc = "# "] + batch_all { + calls: ::std::vec::Vec, + }, + #[codec(index = 3)] + #[doc = "Dispatches a function call with a provided origin."] + #[doc = ""] + #[doc = "The dispatch origin for this call must be _Root_."] + #[doc = ""] + #[doc = "# "] + #[doc = "- O(1)."] + #[doc = "- Limited storage reads."] + #[doc = "- One DB write (event)."] + #[doc = "- Weight of derivative `call` execution + T::WeightInfo::dispatch_as()."] + #[doc = "# "] + dispatch_as { + as_origin: ::std::boxed::Box, + call: ::std::boxed::Box, + }, + #[codec(index = 4)] + #[doc = "Send a batch of dispatch calls."] + #[doc = "Unlike `batch`, it allows errors and won't interrupt."] + #[doc = ""] + #[doc = "May be called from any origin."] + #[doc = ""] + #[doc = "- `calls`: The calls to be dispatched from the same origin. The number of call must not"] + #[doc = " exceed the constant: `batched_calls_limit` (available in constant metadata)."] + #[doc = ""] + #[doc = "If origin is root then call are dispatch without checking origin filter. (This includes"] + #[doc = "bypassing `frame_system::Config::BaseCallFilter`)."] + #[doc = ""] + #[doc = "# "] + #[doc = "- Complexity: O(C) where C is the number of calls to be batched."] + #[doc = "# "] + force_batch { + calls: ::std::vec::Vec, + }, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Error { + #[codec(index = 0)] + #[doc = "Too many calls batched."] + TooManyCalls, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Event { + #[codec(index = 0)] + #[doc = "Batch of dispatches did not complete fully. Index of first failing dispatch given, as"] + #[doc = "well as the error."] + BatchInterrupted { + index: ::core::primitive::u32, + error: runtime_types::sp_runtime::DispatchError, + }, + #[codec(index = 1)] + #[doc = "Batch of dispatches completed fully with no error."] + BatchCompleted, + #[codec(index = 2)] + #[doc = "Batch of dispatches completed but has errors."] + BatchCompletedWithErrors, + #[codec(index = 3)] + #[doc = "A single item within a Batch of dispatches has completed with no error."] + ItemCompleted, + #[codec(index = 4)] + #[doc = "A single item within a Batch of dispatches has completed with error."] + ItemFailed { + error: runtime_types::sp_runtime::DispatchError, + }, + #[codec(index = 5)] + #[doc = "A call was dispatched."] + DispatchedAs { + result: + ::core::result::Result<(), runtime_types::sp_runtime::DispatchError>, + }, + } + } + } + pub mod pallet_xcm { + use super::runtime_types; + pub mod pallet { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Call { + #[codec(index = 0)] + send { + dest: ::std::boxed::Box, + message: ::std::boxed::Box, + }, + #[codec(index = 1)] + #[doc = "Teleport some assets from the local chain to some destination chain."] + #[doc = ""] + #[doc = "Fee payment on the destination side is made from the asset in the `assets` vector of"] + #[doc = "index `fee_asset_item`. The weight limit for fees is not provided and thus is unlimited,"] + #[doc = "with all fees taken as needed from the asset."] + #[doc = ""] + #[doc = "- `origin`: Must be capable of withdrawing the `assets` and executing XCM."] + #[doc = "- `dest`: Destination context for the assets. Will typically be `X2(Parent, Parachain(..))` to send"] + #[doc = " from parachain to parachain, or `X1(Parachain(..))` to send from relay to parachain."] + #[doc = "- `beneficiary`: A beneficiary location for the assets in the context of `dest`. Will generally be"] + #[doc = " an `AccountId32` value."] + #[doc = "- `assets`: The assets to be withdrawn. The first item should be the currency used to to pay the fee on the"] + #[doc = " `dest` side. May not be empty."] + #[doc = "- `fee_asset_item`: The index into `assets` of the item which should be used to pay"] + #[doc = " fees."] + teleport_assets { + dest: ::std::boxed::Box, + beneficiary: ::std::boxed::Box, + assets: ::std::boxed::Box, + fee_asset_item: ::core::primitive::u32, + }, + #[codec(index = 2)] + #[doc = "Transfer some assets from the local chain to the sovereign account of a destination"] + #[doc = "chain and forward a notification XCM."] + #[doc = ""] + #[doc = "Fee payment on the destination side is made from the asset in the `assets` vector of"] + #[doc = "index `fee_asset_item`. The weight limit for fees is not provided and thus is unlimited,"] + #[doc = "with all fees taken as needed from the asset."] + #[doc = ""] + #[doc = "- `origin`: Must be capable of withdrawing the `assets` and executing XCM."] + #[doc = "- `dest`: Destination context for the assets. Will typically be `X2(Parent, Parachain(..))` to send"] + #[doc = " from parachain to parachain, or `X1(Parachain(..))` to send from relay to parachain."] + #[doc = "- `beneficiary`: A beneficiary location for the assets in the context of `dest`. Will generally be"] + #[doc = " an `AccountId32` value."] + #[doc = "- `assets`: The assets to be withdrawn. This should include the assets used to pay the fee on the"] + #[doc = " `dest` side."] + #[doc = "- `fee_asset_item`: The index into `assets` of the item which should be used to pay"] + #[doc = " fees."] + reserve_transfer_assets { + dest: ::std::boxed::Box, + beneficiary: ::std::boxed::Box, + assets: ::std::boxed::Box, + fee_asset_item: ::core::primitive::u32, + }, + #[codec(index = 3)] + #[doc = "Execute an XCM message from a local, signed, origin."] + #[doc = ""] + #[doc = "An event is deposited indicating whether `msg` could be executed completely or only"] + #[doc = "partially."] + #[doc = ""] + #[doc = "No more than `max_weight` will be used in its attempted execution. If this is less than the"] + #[doc = "maximum amount of weight that the message could take to be executed, then no execution"] + #[doc = "attempt will be made."] + #[doc = ""] + #[doc = "NOTE: A successful return to this does *not* imply that the `msg` was executed successfully"] + #[doc = "to completion; only that *some* of it was executed."] + execute { + message: ::std::boxed::Box, + max_weight: ::core::primitive::u64, + }, + #[codec(index = 4)] + #[doc = "Extoll that a particular destination can be communicated with through a particular"] + #[doc = "version of XCM."] + #[doc = ""] + #[doc = "- `origin`: Must be Root."] + #[doc = "- `location`: The destination that is being described."] + #[doc = "- `xcm_version`: The latest version of XCM that `location` supports."] + force_xcm_version { + location: + ::std::boxed::Box, + xcm_version: ::core::primitive::u32, + }, + #[codec(index = 5)] + #[doc = "Set a safe XCM version (the version that XCM should be encoded with if the most recent"] + #[doc = "version a destination can accept is unknown)."] + #[doc = ""] + #[doc = "- `origin`: Must be Root."] + #[doc = "- `maybe_xcm_version`: The default XCM encoding version, or `None` to disable."] + force_default_xcm_version { + maybe_xcm_version: ::core::option::Option<::core::primitive::u32>, + }, + #[codec(index = 6)] + #[doc = "Ask a location to notify us regarding their XCM version and any changes to it."] + #[doc = ""] + #[doc = "- `origin`: Must be Root."] + #[doc = "- `location`: The location to which we should subscribe for XCM version notifications."] + force_subscribe_version_notify { + location: ::std::boxed::Box, + }, + #[codec(index = 7)] + #[doc = "Require that a particular destination should no longer notify us regarding any XCM"] + #[doc = "version changes."] + #[doc = ""] + #[doc = "- `origin`: Must be Root."] + #[doc = "- `location`: The location to which we are currently subscribed for XCM version"] + #[doc = " notifications which we no longer desire."] + force_unsubscribe_version_notify { + location: ::std::boxed::Box, + }, + #[codec(index = 8)] + #[doc = "Transfer some assets from the local chain to the sovereign account of a destination"] + #[doc = "chain and forward a notification XCM."] + #[doc = ""] + #[doc = "Fee payment on the destination side is made from the asset in the `assets` vector of"] + #[doc = "index `fee_asset_item`, up to enough to pay for `weight_limit` of weight. If more weight"] + #[doc = "is needed than `weight_limit`, then the operation will fail and the assets send may be"] + #[doc = "at risk."] + #[doc = ""] + #[doc = "- `origin`: Must be capable of withdrawing the `assets` and executing XCM."] + #[doc = "- `dest`: Destination context for the assets. Will typically be `X2(Parent, Parachain(..))` to send"] + #[doc = " from parachain to parachain, or `X1(Parachain(..))` to send from relay to parachain."] + #[doc = "- `beneficiary`: A beneficiary location for the assets in the context of `dest`. Will generally be"] + #[doc = " an `AccountId32` value."] + #[doc = "- `assets`: The assets to be withdrawn. This should include the assets used to pay the fee on the"] + #[doc = " `dest` side."] + #[doc = "- `fee_asset_item`: The index into `assets` of the item which should be used to pay"] + #[doc = " fees."] + #[doc = "- `weight_limit`: The remote-side weight limit, if any, for the XCM fee purchase."] + limited_reserve_transfer_assets { + dest: ::std::boxed::Box, + beneficiary: ::std::boxed::Box, + assets: ::std::boxed::Box, + fee_asset_item: ::core::primitive::u32, + weight_limit: runtime_types::xcm::v2::WeightLimit, + }, + #[codec(index = 9)] + #[doc = "Teleport some assets from the local chain to some destination chain."] + #[doc = ""] + #[doc = "Fee payment on the destination side is made from the asset in the `assets` vector of"] + #[doc = "index `fee_asset_item`, up to enough to pay for `weight_limit` of weight. If more weight"] + #[doc = "is needed than `weight_limit`, then the operation will fail and the assets send may be"] + #[doc = "at risk."] + #[doc = ""] + #[doc = "- `origin`: Must be capable of withdrawing the `assets` and executing XCM."] + #[doc = "- `dest`: Destination context for the assets. Will typically be `X2(Parent, Parachain(..))` to send"] + #[doc = " from parachain to parachain, or `X1(Parachain(..))` to send from relay to parachain."] + #[doc = "- `beneficiary`: A beneficiary location for the assets in the context of `dest`. Will generally be"] + #[doc = " an `AccountId32` value."] + #[doc = "- `assets`: The assets to be withdrawn. The first item should be the currency used to to pay the fee on the"] + #[doc = " `dest` side. May not be empty."] + #[doc = "- `fee_asset_item`: The index into `assets` of the item which should be used to pay"] + #[doc = " fees."] + #[doc = "- `weight_limit`: The remote-side weight limit, if any, for the XCM fee purchase."] + limited_teleport_assets { + dest: ::std::boxed::Box, + beneficiary: ::std::boxed::Box, + assets: ::std::boxed::Box, + fee_asset_item: ::core::primitive::u32, + weight_limit: runtime_types::xcm::v2::WeightLimit, + }, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Error { + #[codec(index = 0)] + #[doc = "The desired destination was unreachable, generally because there is a no way of routing"] + #[doc = "to it."] + Unreachable, + #[codec(index = 1)] + #[doc = "There was some other issue (i.e. not to do with routing) in sending the message. Perhaps"] + #[doc = "a lack of space for buffering the message."] + SendFailure, + #[codec(index = 2)] + #[doc = "The message execution fails the filter."] + Filtered, + #[codec(index = 3)] + #[doc = "The message's weight could not be determined."] + UnweighableMessage, + #[codec(index = 4)] + #[doc = "The destination `MultiLocation` provided cannot be inverted."] + DestinationNotInvertible, + #[codec(index = 5)] + #[doc = "The assets to be sent are empty."] + Empty, + #[codec(index = 6)] + #[doc = "Could not re-anchor the assets to declare the fees for the destination chain."] + CannotReanchor, + #[codec(index = 7)] + #[doc = "Too many assets have been attempted for transfer."] + TooManyAssets, + #[codec(index = 8)] + #[doc = "Origin is invalid for sending."] + InvalidOrigin, + #[codec(index = 9)] + #[doc = "The version of the `Versioned` value used is not able to be interpreted."] + BadVersion, + #[codec(index = 10)] + #[doc = "The given location could not be used (e.g. because it cannot be expressed in the"] + #[doc = "desired version of XCM)."] + BadLocation, + #[codec(index = 11)] + #[doc = "The referenced subscription could not be found."] + NoSubscription, + #[codec(index = 12)] + #[doc = "The location is invalid since it already has a subscription from us."] + AlreadySubscribed, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Event { + #[codec(index = 0)] + #[doc = "Execution of an XCM message was attempted."] + #[doc = ""] + #[doc = "\\[ outcome \\]"] + Attempted(runtime_types::xcm::v2::traits::Outcome), + #[codec(index = 1)] + #[doc = "A XCM message was sent."] + #[doc = ""] + #[doc = "\\[ origin, destination, message \\]"] + Sent( + runtime_types::xcm::v1::multilocation::MultiLocation, + runtime_types::xcm::v1::multilocation::MultiLocation, + runtime_types::xcm::v2::Xcm, + ), + #[codec(index = 2)] + #[doc = "Query response received which does not match a registered query. This may be because a"] + #[doc = "matching query was never registered, it may be because it is a duplicate response, or"] + #[doc = "because the query timed out."] + #[doc = ""] + #[doc = "\\[ origin location, id \\]"] + UnexpectedResponse( + runtime_types::xcm::v1::multilocation::MultiLocation, + ::core::primitive::u64, + ), + #[codec(index = 3)] + #[doc = "Query response has been received and is ready for taking with `take_response`. There is"] + #[doc = "no registered notification call."] + #[doc = ""] + #[doc = "\\[ id, response \\]"] + ResponseReady(::core::primitive::u64, runtime_types::xcm::v2::Response), + #[codec(index = 4)] + #[doc = "Query response has been received and query is removed. The registered notification has"] + #[doc = "been dispatched and executed successfully."] + #[doc = ""] + #[doc = "\\[ id, pallet index, call index \\]"] + Notified( + ::core::primitive::u64, + ::core::primitive::u8, + ::core::primitive::u8, + ), + #[codec(index = 5)] + #[doc = "Query response has been received and query is removed. The registered notification could"] + #[doc = "not be dispatched because the dispatch weight is greater than the maximum weight"] + #[doc = "originally budgeted by this runtime for the query result."] + #[doc = ""] + #[doc = "\\[ id, pallet index, call index, actual weight, max budgeted weight \\]"] + NotifyOverweight( + ::core::primitive::u64, + ::core::primitive::u8, + ::core::primitive::u8, + ::core::primitive::u64, + ::core::primitive::u64, + ), + #[codec(index = 6)] + #[doc = "Query response has been received and query is removed. There was a general error with"] + #[doc = "dispatching the notification call."] + #[doc = ""] + #[doc = "\\[ id, pallet index, call index \\]"] + NotifyDispatchError( + ::core::primitive::u64, + ::core::primitive::u8, + ::core::primitive::u8, + ), + #[codec(index = 7)] + #[doc = "Query response has been received and query is removed. The dispatch was unable to be"] + #[doc = "decoded into a `Call`; this might be due to dispatch function having a signature which"] + #[doc = "is not `(origin, QueryId, Response)`."] + #[doc = ""] + #[doc = "\\[ id, pallet index, call index \\]"] + NotifyDecodeFailed( + ::core::primitive::u64, + ::core::primitive::u8, + ::core::primitive::u8, + ), + #[codec(index = 8)] + #[doc = "Expected query response has been received but the origin location of the response does"] + #[doc = "not match that expected. The query remains registered for a later, valid, response to"] + #[doc = "be received and acted upon."] + #[doc = ""] + #[doc = "\\[ origin location, id, expected location \\]"] + InvalidResponder( + runtime_types::xcm::v1::multilocation::MultiLocation, + ::core::primitive::u64, + ::core::option::Option< + runtime_types::xcm::v1::multilocation::MultiLocation, + >, + ), + #[codec(index = 9)] + #[doc = "Expected query response has been received but the expected origin location placed in"] + #[doc = "storage by this runtime previously cannot be decoded. The query remains registered."] + #[doc = ""] + #[doc = "This is unexpected (since a location placed in storage in a previously executing"] + #[doc = "runtime should be readable prior to query timeout) and dangerous since the possibly"] + #[doc = "valid response will be dropped. Manual governance intervention is probably going to be"] + #[doc = "needed."] + #[doc = ""] + #[doc = "\\[ origin location, id \\]"] + InvalidResponderVersion( + runtime_types::xcm::v1::multilocation::MultiLocation, + ::core::primitive::u64, + ), + #[codec(index = 10)] + #[doc = "Received query response has been read and removed."] + #[doc = ""] + #[doc = "\\[ id \\]"] + ResponseTaken(::core::primitive::u64), + #[codec(index = 11)] + #[doc = "Some assets have been placed in an asset trap."] + #[doc = ""] + #[doc = "\\[ hash, origin, assets \\]"] + AssetsTrapped( + ::subxt::sp_core::H256, + runtime_types::xcm::v1::multilocation::MultiLocation, + runtime_types::xcm::VersionedMultiAssets, + ), + #[codec(index = 12)] + #[doc = "An XCM version change notification message has been attempted to be sent."] + #[doc = ""] + #[doc = "\\[ destination, result \\]"] + VersionChangeNotified( + runtime_types::xcm::v1::multilocation::MultiLocation, + ::core::primitive::u32, + ), + #[codec(index = 13)] + #[doc = "The supported version of a location has been changed. This might be through an"] + #[doc = "automatic notification or a manual intervention."] + #[doc = ""] + #[doc = "\\[ location, XCM version \\]"] + SupportedVersionChanged( + runtime_types::xcm::v1::multilocation::MultiLocation, + ::core::primitive::u32, + ), + #[codec(index = 14)] + #[doc = "A given location which had a version change subscription was dropped owing to an error"] + #[doc = "sending the notification to it."] + #[doc = ""] + #[doc = "\\[ location, query ID, error \\]"] + NotifyTargetSendFail( + runtime_types::xcm::v1::multilocation::MultiLocation, + ::core::primitive::u64, + runtime_types::xcm::v2::traits::Error, + ), + #[codec(index = 15)] + #[doc = "A given location which had a version change subscription was dropped owing to an error"] + #[doc = "migrating the location to our new XCM format."] + #[doc = ""] + #[doc = "\\[ location, query ID \\]"] + NotifyTargetMigrationFail( + runtime_types::xcm::VersionedMultiLocation, + ::core::primitive::u64, + ), + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Origin { + #[codec(index = 0)] + Xcm(runtime_types::xcm::v1::multilocation::MultiLocation), + #[codec(index = 1)] + Response(runtime_types::xcm::v1::multilocation::MultiLocation), + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum QueryStatus<_0> { + #[codec(index = 0)] + Pending { + responder: runtime_types::xcm::VersionedMultiLocation, + maybe_notify: + ::core::option::Option<(::core::primitive::u8, ::core::primitive::u8)>, + timeout: _0, + }, + #[codec(index = 1)] + VersionNotifier { + origin: runtime_types::xcm::VersionedMultiLocation, + is_active: ::core::primitive::bool, + }, + #[codec(index = 2)] + Ready { + response: runtime_types::xcm::VersionedResponse, + at: _0, + }, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum VersionMigrationStage { + #[codec(index = 0)] + MigrateSupportedVersion, + #[codec(index = 1)] + MigrateVersionNotifiers, + #[codec(index = 2)] + NotifyCurrentTargets( + ::core::option::Option<::std::vec::Vec<::core::primitive::u8>>, + ), + #[codec(index = 3)] + MigrateAndNotifyOldTargets, + } + } + } + pub mod polkadot_core_primitives { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct CandidateHash(pub ::subxt::sp_core::H256); + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct InboundDownwardMessage<_0> { + pub sent_at: _0, + pub msg: ::std::vec::Vec<::core::primitive::u8>, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct InboundHrmpMessage<_0> { + pub sent_at: _0, + pub data: ::std::vec::Vec<::core::primitive::u8>, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct OutboundHrmpMessage<_0> { + pub recipient: _0, + pub data: ::std::vec::Vec<::core::primitive::u8>, + } + } + pub mod polkadot_parachain { + use super::runtime_types; + pub mod primitives { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct HeadData(pub ::std::vec::Vec<::core::primitive::u8>); + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct HrmpChannelId { + pub sender: runtime_types::polkadot_parachain::primitives::Id, + pub recipient: runtime_types::polkadot_parachain::primitives::Id, + } + #[derive( + :: subxt :: codec :: CompactAs, + :: subxt :: codec :: Decode, + :: subxt :: codec :: Encode, + Debug, + )] + pub struct Id(pub ::core::primitive::u32); + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct ValidationCode(pub ::std::vec::Vec<::core::primitive::u8>); + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct ValidationCodeHash(pub ::subxt::sp_core::H256); + } + } + pub mod polkadot_primitives { + use super::runtime_types; + pub mod v2 { + use super::runtime_types; + pub mod assignment_app { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct Public(pub runtime_types::sp_core::sr25519::Public); + } + pub mod collator_app { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct Public(pub runtime_types::sp_core::sr25519::Public); + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct Signature(pub runtime_types::sp_core::sr25519::Signature); + } + pub mod signed { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct UncheckedSigned<_0, _1> { + pub payload: _0, + pub validator_index: runtime_types::polkadot_primitives::v2::ValidatorIndex, + pub signature: + runtime_types::polkadot_primitives::v2::validator_app::Signature, + #[codec(skip)] + pub __subxt_unused_type_params: ::core::marker::PhantomData<_1>, + } + } + pub mod validator_app { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct Public(pub runtime_types::sp_core::sr25519::Public); + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct Signature(pub runtime_types::sp_core::sr25519::Signature); + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct AvailabilityBitfield( + pub ::subxt::bitvec::vec::BitVec< + ::core::primitive::u8, + ::subxt::bitvec::order::Lsb0, + >, + ); + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct BackedCandidate<_0> { + pub candidate: + runtime_types::polkadot_primitives::v2::CommittedCandidateReceipt<_0>, + pub validity_votes: ::std::vec::Vec< + runtime_types::polkadot_primitives::v2::ValidityAttestation, + >, + pub validator_indices: ::subxt::bitvec::vec::BitVec< + ::core::primitive::u8, + ::subxt::bitvec::order::Lsb0, + >, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct CandidateCommitments<_0> { + pub upward_messages: ::std::vec::Vec<::std::vec::Vec<::core::primitive::u8>>, + pub horizontal_messages: ::std::vec::Vec< + runtime_types::polkadot_core_primitives::OutboundHrmpMessage< + runtime_types::polkadot_parachain::primitives::Id, + >, + >, + pub new_validation_code: ::core::option::Option< + runtime_types::polkadot_parachain::primitives::ValidationCode, + >, + pub head_data: runtime_types::polkadot_parachain::primitives::HeadData, + pub processed_downward_messages: _0, + pub hrmp_watermark: _0, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct CandidateDescriptor<_0> { + pub para_id: runtime_types::polkadot_parachain::primitives::Id, + pub relay_parent: _0, + pub collator: runtime_types::polkadot_primitives::v2::collator_app::Public, + pub persisted_validation_data_hash: _0, + pub pov_hash: _0, + pub erasure_root: _0, + pub signature: runtime_types::polkadot_primitives::v2::collator_app::Signature, + pub para_head: _0, + pub validation_code_hash: + runtime_types::polkadot_parachain::primitives::ValidationCodeHash, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct CandidateReceipt<_0> { + pub descriptor: runtime_types::polkadot_primitives::v2::CandidateDescriptor<_0>, + pub commitments_hash: _0, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct CommittedCandidateReceipt<_0> { + pub descriptor: runtime_types::polkadot_primitives::v2::CandidateDescriptor<_0>, + pub commitments: runtime_types::polkadot_primitives::v2::CandidateCommitments< + ::core::primitive::u32, + >, + } + #[derive( + :: subxt :: codec :: CompactAs, + :: subxt :: codec :: Decode, + :: subxt :: codec :: Encode, + Debug, + )] + pub struct CoreIndex(pub ::core::primitive::u32); + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum CoreOccupied { + #[codec(index = 0)] + Parathread(runtime_types::polkadot_primitives::v2::ParathreadEntry), + #[codec(index = 1)] + Parachain, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct DisputeState<_0> { + pub validators_for: ::subxt::bitvec::vec::BitVec< + ::core::primitive::u8, + ::subxt::bitvec::order::Lsb0, + >, + pub validators_against: ::subxt::bitvec::vec::BitVec< + ::core::primitive::u8, + ::subxt::bitvec::order::Lsb0, + >, + pub start: _0, + pub concluded_at: ::core::option::Option<_0>, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum DisputeStatement { + #[codec(index = 0)] + Valid(runtime_types::polkadot_primitives::v2::ValidDisputeStatementKind), + #[codec(index = 1)] + Invalid(runtime_types::polkadot_primitives::v2::InvalidDisputeStatementKind), + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct DisputeStatementSet { + pub candidate_hash: runtime_types::polkadot_core_primitives::CandidateHash, + pub session: ::core::primitive::u32, + pub statements: ::std::vec::Vec<( + runtime_types::polkadot_primitives::v2::DisputeStatement, + runtime_types::polkadot_primitives::v2::ValidatorIndex, + runtime_types::polkadot_primitives::v2::validator_app::Signature, + )>, + } + #[derive( + :: subxt :: codec :: CompactAs, + :: subxt :: codec :: Decode, + :: subxt :: codec :: Encode, + Debug, + )] + pub struct GroupIndex(pub ::core::primitive::u32); + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct InherentData<_0> { + pub bitfields: ::std::vec::Vec< + runtime_types::polkadot_primitives::v2::signed::UncheckedSigned< + runtime_types::polkadot_primitives::v2::AvailabilityBitfield, + runtime_types::polkadot_primitives::v2::AvailabilityBitfield, + >, + >, + pub backed_candidates: ::std::vec::Vec< + runtime_types::polkadot_primitives::v2::BackedCandidate< + ::subxt::sp_core::H256, + >, + >, + pub disputes: ::std::vec::Vec< + runtime_types::polkadot_primitives::v2::DisputeStatementSet, + >, + pub parent_header: _0, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum InvalidDisputeStatementKind { + #[codec(index = 0)] + Explicit, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct ParathreadClaim( + pub runtime_types::polkadot_parachain::primitives::Id, + pub runtime_types::polkadot_primitives::v2::collator_app::Public, + ); + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct ParathreadEntry { + pub claim: runtime_types::polkadot_primitives::v2::ParathreadClaim, + pub retries: ::core::primitive::u32, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct PvfCheckStatement { + pub accept: ::core::primitive::bool, + pub subject: runtime_types::polkadot_parachain::primitives::ValidationCodeHash, + pub session_index: ::core::primitive::u32, + pub validator_index: runtime_types::polkadot_primitives::v2::ValidatorIndex, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct ScrapedOnChainVotes<_0> { + pub session: ::core::primitive::u32, + pub backing_validators_per_candidate: ::std::vec::Vec<( + runtime_types::polkadot_primitives::v2::CandidateReceipt<_0>, + ::std::vec::Vec<( + runtime_types::polkadot_primitives::v2::ValidatorIndex, + runtime_types::polkadot_primitives::v2::ValidityAttestation, + )>, + )>, + pub disputes: ::std::vec::Vec< + runtime_types::polkadot_primitives::v2::DisputeStatementSet, + >, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct SessionInfo { + pub active_validator_indices: + ::std::vec::Vec, + pub random_seed: [::core::primitive::u8; 32usize], + pub dispute_period: ::core::primitive::u32, + pub validators: ::std::vec::Vec< + runtime_types::polkadot_primitives::v2::validator_app::Public, + >, + pub discovery_keys: + ::std::vec::Vec, + pub assignment_keys: ::std::vec::Vec< + runtime_types::polkadot_primitives::v2::assignment_app::Public, + >, + pub validator_groups: ::std::vec::Vec< + ::std::vec::Vec, + >, + pub n_cores: ::core::primitive::u32, + pub zeroth_delay_tranche_width: ::core::primitive::u32, + pub relay_vrf_modulo_samples: ::core::primitive::u32, + pub n_delay_tranches: ::core::primitive::u32, + pub no_show_slots: ::core::primitive::u32, + pub needed_approvals: ::core::primitive::u32, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum UpgradeGoAhead { + #[codec(index = 0)] + Abort, + #[codec(index = 1)] + GoAhead, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum UpgradeRestriction { + #[codec(index = 0)] + Present, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum ValidDisputeStatementKind { + #[codec(index = 0)] + Explicit, + #[codec(index = 1)] + BackingSeconded(::subxt::sp_core::H256), + #[codec(index = 2)] + BackingValid(::subxt::sp_core::H256), + #[codec(index = 3)] + ApprovalChecking, + } + #[derive( + :: subxt :: codec :: CompactAs, + :: subxt :: codec :: Decode, + :: subxt :: codec :: Encode, + Debug, + )] + pub struct ValidatorIndex(pub ::core::primitive::u32); + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum ValidityAttestation { + #[codec(index = 1)] + Implicit(runtime_types::polkadot_primitives::v2::validator_app::Signature), + #[codec(index = 2)] + Explicit(runtime_types::polkadot_primitives::v2::validator_app::Signature), + } + } + } + pub mod polkadot_runtime_common { + use super::runtime_types; + pub mod assigned_slots { + use super::runtime_types; + pub mod pallet { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Call { + # [codec (index = 0)] # [doc = "Assign a permanent parachain slot and immediately create a lease for it."] assign_perm_parachain_slot { id : runtime_types :: polkadot_parachain :: primitives :: Id , } , # [codec (index = 1)] # [doc = "Assign a temporary parachain slot. The function tries to create a lease for it"] # [doc = "immediately if `SlotLeasePeriodStart::Current` is specified, and if the number"] # [doc = "of currently active temporary slots is below `MaxTemporarySlotPerLeasePeriod`."] assign_temp_parachain_slot { id : runtime_types :: polkadot_parachain :: primitives :: Id , lease_period_start : runtime_types :: polkadot_runtime_common :: assigned_slots :: SlotLeasePeriodStart , } , # [codec (index = 2)] # [doc = "Unassign a permanent or temporary parachain slot"] unassign_parachain_slot { id : runtime_types :: polkadot_parachain :: primitives :: Id , } , } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Error { + #[codec(index = 0)] + #[doc = "The specified parachain or parathread is not registered."] + ParaDoesntExist, + #[codec(index = 1)] + #[doc = "Not a parathread."] + NotParathread, + #[codec(index = 2)] + #[doc = "Cannot upgrade parathread."] + CannotUpgrade, + #[codec(index = 3)] + #[doc = "Cannot downgrade parachain."] + CannotDowngrade, + #[codec(index = 4)] + #[doc = "Permanent or Temporary slot already assigned."] + SlotAlreadyAssigned, + #[codec(index = 5)] + #[doc = "Permanent or Temporary slot has not been assigned."] + SlotNotAssigned, + #[codec(index = 6)] + #[doc = "An ongoing lease already exists."] + OngoingLeaseExists, + #[codec(index = 7)] + MaxPermanentSlotsExceeded, + #[codec(index = 8)] + MaxTemporarySlotsExceeded, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Event { + #[codec(index = 0)] + #[doc = "A para was assigned a permanent parachain slot"] + PermanentSlotAssigned(runtime_types::polkadot_parachain::primitives::Id), + #[codec(index = 1)] + #[doc = "A para was assigned a temporary parachain slot"] + TemporarySlotAssigned(runtime_types::polkadot_parachain::primitives::Id), + } + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct ParachainTemporarySlot<_0, _1> { + pub manager: _0, + pub period_begin: _1, + pub period_count: _1, + pub last_lease: ::core::option::Option<_1>, + pub lease_count: _1, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum SlotLeasePeriodStart { + #[codec(index = 0)] + Current, + #[codec(index = 1)] + Next, + } + } + pub mod auctions { + use super::runtime_types; + pub mod pallet { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Call { + #[codec(index = 0)] + #[doc = "Create a new auction."] + #[doc = ""] + #[doc = "This can only happen when there isn't already an auction in progress and may only be"] + #[doc = "called by the root origin. Accepts the `duration` of this auction and the"] + #[doc = "`lease_period_index` of the initial lease period of the four that are to be auctioned."] + new_auction { + #[codec(compact)] + duration: ::core::primitive::u32, + #[codec(compact)] + lease_period_index: ::core::primitive::u32, + }, + #[codec(index = 1)] + #[doc = "Make a new bid from an account (including a parachain account) for deploying a new"] + #[doc = "parachain."] + #[doc = ""] + #[doc = "Multiple simultaneous bids from the same bidder are allowed only as long as all active"] + #[doc = "bids overlap each other (i.e. are mutually exclusive). Bids cannot be redacted."] + #[doc = ""] + #[doc = "- `sub` is the sub-bidder ID, allowing for multiple competing bids to be made by (and"] + #[doc = "funded by) the same account."] + #[doc = "- `auction_index` is the index of the auction to bid on. Should just be the present"] + #[doc = "value of `AuctionCounter`."] + #[doc = "- `first_slot` is the first lease period index of the range to bid on. This is the"] + #[doc = "absolute lease period index value, not an auction-specific offset."] + #[doc = "- `last_slot` is the last lease period index of the range to bid on. This is the"] + #[doc = "absolute lease period index value, not an auction-specific offset."] + #[doc = "- `amount` is the amount to bid to be held as deposit for the parachain should the"] + #[doc = "bid win. This amount is held throughout the range."] + bid { + #[codec(compact)] + para: runtime_types::polkadot_parachain::primitives::Id, + #[codec(compact)] + auction_index: ::core::primitive::u32, + #[codec(compact)] + first_slot: ::core::primitive::u32, + #[codec(compact)] + last_slot: ::core::primitive::u32, + #[codec(compact)] + amount: ::core::primitive::u128, + }, + #[codec(index = 2)] + #[doc = "Cancel an in-progress auction."] + #[doc = ""] + #[doc = "Can only be called by Root origin."] + cancel_auction, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Error { + #[codec(index = 0)] + #[doc = "This auction is already in progress."] + AuctionInProgress, + #[codec(index = 1)] + #[doc = "The lease period is in the past."] + LeasePeriodInPast, + #[codec(index = 2)] + #[doc = "Para is not registered"] + ParaNotRegistered, + #[codec(index = 3)] + #[doc = "Not a current auction."] + NotCurrentAuction, + #[codec(index = 4)] + #[doc = "Not an auction."] + NotAuction, + #[codec(index = 5)] + #[doc = "Auction has already ended."] + AuctionEnded, + #[codec(index = 6)] + #[doc = "The para is already leased out for part of this range."] + AlreadyLeasedOut, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Event { + #[codec(index = 0)] + #[doc = "An auction started. Provides its index and the block number where it will begin to"] + #[doc = "close and the first lease period of the quadruplet that is auctioned."] + #[doc = "`[auction_index, lease_period, ending]`"] + AuctionStarted( + ::core::primitive::u32, + ::core::primitive::u32, + ::core::primitive::u32, + ), + #[codec(index = 1)] + #[doc = "An auction ended. All funds become unreserved. `[auction_index]`"] + AuctionClosed(::core::primitive::u32), + #[codec(index = 2)] + #[doc = "Funds were reserved for a winning bid. First balance is the extra amount reserved."] + #[doc = "Second is the total. `[bidder, extra_reserved, total_amount]`"] + Reserved( + ::subxt::sp_core::crypto::AccountId32, + ::core::primitive::u128, + ::core::primitive::u128, + ), + #[codec(index = 3)] + #[doc = "Funds were unreserved since bidder is no longer active. `[bidder, amount]`"] + Unreserved( + ::subxt::sp_core::crypto::AccountId32, + ::core::primitive::u128, + ), + #[codec(index = 4)] + #[doc = "Someone attempted to lease the same slot twice for a parachain. The amount is held in reserve"] + #[doc = "but no parachain slot has been leased."] + #[doc = "`[parachain_id, leaser, amount]`"] + ReserveConfiscated( + runtime_types::polkadot_parachain::primitives::Id, + ::subxt::sp_core::crypto::AccountId32, + ::core::primitive::u128, + ), + #[codec(index = 5)] + #[doc = "A new bid has been accepted as the current winner."] + #[doc = "`[who, para_id, amount, first_slot, last_slot]`"] + BidAccepted( + ::subxt::sp_core::crypto::AccountId32, + runtime_types::polkadot_parachain::primitives::Id, + ::core::primitive::u128, + ::core::primitive::u32, + ::core::primitive::u32, + ), + #[codec(index = 6)] + #[doc = "The winning offset was chosen for an auction. This will map into the `Winning` storage map."] + #[doc = "`[auction_index, block_number]`"] + WinningOffset(::core::primitive::u32, ::core::primitive::u32), + } + } + } + pub mod crowdloan { + use super::runtime_types; + pub mod pallet { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Call { + #[codec(index = 0)] + #[doc = "Create a new crowdloaning campaign for a parachain slot with the given lease period range."] + #[doc = ""] + #[doc = "This applies a lock to your parachain configuration, ensuring that it cannot be changed"] + #[doc = "by the parachain manager."] + create { + #[codec(compact)] + index: runtime_types::polkadot_parachain::primitives::Id, + #[codec(compact)] + cap: ::core::primitive::u128, + #[codec(compact)] + first_period: ::core::primitive::u32, + #[codec(compact)] + last_period: ::core::primitive::u32, + #[codec(compact)] + end: ::core::primitive::u32, + verifier: + ::core::option::Option, + }, + #[codec(index = 1)] + #[doc = "Contribute to a crowd sale. This will transfer some balance over to fund a parachain"] + #[doc = "slot. It will be withdrawable when the crowdloan has ended and the funds are unused."] + contribute { + #[codec(compact)] + index: runtime_types::polkadot_parachain::primitives::Id, + #[codec(compact)] + value: ::core::primitive::u128, + signature: + ::core::option::Option, + }, + #[codec(index = 2)] + #[doc = "Withdraw full balance of a specific contributor."] + #[doc = ""] + #[doc = "Origin must be signed, but can come from anyone."] + #[doc = ""] + #[doc = "The fund must be either in, or ready for, retirement. For a fund to be *in* retirement, then the retirement"] + #[doc = "flag must be set. For a fund to be ready for retirement, then:"] + #[doc = "- it must not already be in retirement;"] + #[doc = "- the amount of raised funds must be bigger than the _free_ balance of the account;"] + #[doc = "- and either:"] + #[doc = " - the block number must be at least `end`; or"] + #[doc = " - the current lease period must be greater than the fund's `last_period`."] + #[doc = ""] + #[doc = "In this case, the fund's retirement flag is set and its `end` is reset to the current block"] + #[doc = "number."] + #[doc = ""] + #[doc = "- `who`: The account whose contribution should be withdrawn."] + #[doc = "- `index`: The parachain to whose crowdloan the contribution was made."] + withdraw { + who: ::subxt::sp_core::crypto::AccountId32, + #[codec(compact)] + index: runtime_types::polkadot_parachain::primitives::Id, + }, + #[codec(index = 3)] + #[doc = "Automatically refund contributors of an ended crowdloan."] + #[doc = "Due to weight restrictions, this function may need to be called multiple"] + #[doc = "times to fully refund all users. We will refund `RemoveKeysLimit` users at a time."] + #[doc = ""] + #[doc = "Origin must be signed, but can come from anyone."] + refund { + #[codec(compact)] + index: runtime_types::polkadot_parachain::primitives::Id, + }, + #[codec(index = 4)] + #[doc = "Remove a fund after the retirement period has ended and all funds have been returned."] + dissolve { + #[codec(compact)] + index: runtime_types::polkadot_parachain::primitives::Id, + }, + #[codec(index = 5)] + #[doc = "Edit the configuration for an in-progress crowdloan."] + #[doc = ""] + #[doc = "Can only be called by Root origin."] + edit { + #[codec(compact)] + index: runtime_types::polkadot_parachain::primitives::Id, + #[codec(compact)] + cap: ::core::primitive::u128, + #[codec(compact)] + first_period: ::core::primitive::u32, + #[codec(compact)] + last_period: ::core::primitive::u32, + #[codec(compact)] + end: ::core::primitive::u32, + verifier: + ::core::option::Option, + }, + #[codec(index = 6)] + #[doc = "Add an optional memo to an existing crowdloan contribution."] + #[doc = ""] + #[doc = "Origin must be Signed, and the user must have contributed to the crowdloan."] + add_memo { + index: runtime_types::polkadot_parachain::primitives::Id, + memo: ::std::vec::Vec<::core::primitive::u8>, + }, + #[codec(index = 7)] + #[doc = "Poke the fund into `NewRaise`"] + #[doc = ""] + #[doc = "Origin must be Signed, and the fund has non-zero raise."] + poke { + index: runtime_types::polkadot_parachain::primitives::Id, + }, + #[codec(index = 8)] + #[doc = "Contribute your entire balance to a crowd sale. This will transfer the entire balance of a user over to fund a parachain"] + #[doc = "slot. It will be withdrawable when the crowdloan has ended and the funds are unused."] + contribute_all { + #[codec(compact)] + index: runtime_types::polkadot_parachain::primitives::Id, + signature: + ::core::option::Option, + }, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Error { + #[codec(index = 0)] + #[doc = "The current lease period is more than the first lease period."] + FirstPeriodInPast, + #[codec(index = 1)] + #[doc = "The first lease period needs to at least be less than 3 `max_value`."] + FirstPeriodTooFarInFuture, + #[codec(index = 2)] + #[doc = "Last lease period must be greater than first lease period."] + LastPeriodBeforeFirstPeriod, + #[codec(index = 3)] + #[doc = "The last lease period cannot be more than 3 periods after the first period."] + LastPeriodTooFarInFuture, + #[codec(index = 4)] + #[doc = "The campaign ends before the current block number. The end must be in the future."] + CannotEndInPast, + #[codec(index = 5)] + #[doc = "The end date for this crowdloan is not sensible."] + EndTooFarInFuture, + #[codec(index = 6)] + #[doc = "There was an overflow."] + Overflow, + #[codec(index = 7)] + #[doc = "The contribution was below the minimum, `MinContribution`."] + ContributionTooSmall, + #[codec(index = 8)] + #[doc = "Invalid fund index."] + InvalidParaId, + #[codec(index = 9)] + #[doc = "Contributions exceed maximum amount."] + CapExceeded, + #[codec(index = 10)] + #[doc = "The contribution period has already ended."] + ContributionPeriodOver, + #[codec(index = 11)] + #[doc = "The origin of this call is invalid."] + InvalidOrigin, + #[codec(index = 12)] + #[doc = "This crowdloan does not correspond to a parachain."] + NotParachain, + #[codec(index = 13)] + #[doc = "This parachain lease is still active and retirement cannot yet begin."] + LeaseActive, + #[codec(index = 14)] + #[doc = "This parachain's bid or lease is still active and withdraw cannot yet begin."] + BidOrLeaseActive, + #[codec(index = 15)] + #[doc = "The crowdloan has not yet ended."] + FundNotEnded, + #[codec(index = 16)] + #[doc = "There are no contributions stored in this crowdloan."] + NoContributions, + #[codec(index = 17)] + #[doc = "The crowdloan is not ready to dissolve. Potentially still has a slot or in retirement period."] + NotReadyToDissolve, + #[codec(index = 18)] + #[doc = "Invalid signature."] + InvalidSignature, + #[codec(index = 19)] + #[doc = "The provided memo is too large."] + MemoTooLarge, + #[codec(index = 20)] + #[doc = "The fund is already in `NewRaise`"] + AlreadyInNewRaise, + #[codec(index = 21)] + #[doc = "No contributions allowed during the VRF delay"] + VrfDelayInProgress, + #[codec(index = 22)] + #[doc = "A lease period has not started yet, due to an offset in the starting block."] + NoLeasePeriod, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Event { + #[codec(index = 0)] + #[doc = "Create a new crowdloaning campaign. `[fund_index]`"] + Created(runtime_types::polkadot_parachain::primitives::Id), + #[codec(index = 1)] + #[doc = "Contributed to a crowd sale. `[who, fund_index, amount]`"] + Contributed( + ::subxt::sp_core::crypto::AccountId32, + runtime_types::polkadot_parachain::primitives::Id, + ::core::primitive::u128, + ), + #[codec(index = 2)] + #[doc = "Withdrew full balance of a contributor. `[who, fund_index, amount]`"] + Withdrew( + ::subxt::sp_core::crypto::AccountId32, + runtime_types::polkadot_parachain::primitives::Id, + ::core::primitive::u128, + ), + #[codec(index = 3)] + #[doc = "The loans in a fund have been partially dissolved, i.e. there are some left"] + #[doc = "over child keys that still need to be killed. `[fund_index]`"] + PartiallyRefunded(runtime_types::polkadot_parachain::primitives::Id), + #[codec(index = 4)] + #[doc = "All loans in a fund have been refunded. `[fund_index]`"] + AllRefunded(runtime_types::polkadot_parachain::primitives::Id), + #[codec(index = 5)] + #[doc = "Fund is dissolved. `[fund_index]`"] + Dissolved(runtime_types::polkadot_parachain::primitives::Id), + #[codec(index = 6)] + #[doc = "The result of trying to submit a new bid to the Slots pallet."] + HandleBidResult( + runtime_types::polkadot_parachain::primitives::Id, + ::core::result::Result<(), runtime_types::sp_runtime::DispatchError>, + ), + #[codec(index = 7)] + #[doc = "The configuration to a crowdloan has been edited. `[fund_index]`"] + Edited(runtime_types::polkadot_parachain::primitives::Id), + #[codec(index = 8)] + #[doc = "A memo has been updated. `[who, fund_index, memo]`"] + MemoUpdated( + ::subxt::sp_core::crypto::AccountId32, + runtime_types::polkadot_parachain::primitives::Id, + ::std::vec::Vec<::core::primitive::u8>, + ), + #[codec(index = 9)] + #[doc = "A parachain has been moved to `NewRaise`"] + AddedToNewRaise(runtime_types::polkadot_parachain::primitives::Id), + } + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct FundInfo<_0, _1, _2, _3> { + pub depositor: _0, + pub verifier: ::core::option::Option, + pub deposit: _1, + pub raised: _1, + pub end: _2, + pub cap: _1, + pub last_contribution: + runtime_types::polkadot_runtime_common::crowdloan::LastContribution<_2>, + pub first_period: _2, + pub last_period: _2, + pub fund_index: _2, + #[codec(skip)] + pub __subxt_unused_type_params: ::core::marker::PhantomData<_3>, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum LastContribution<_0> { + #[codec(index = 0)] + Never, + #[codec(index = 1)] + PreEnding(_0), + #[codec(index = 2)] + Ending(_0), + } + } + pub mod paras_registrar { + use super::runtime_types; + pub mod pallet { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Call { + #[codec(index = 0)] + #[doc = "Register head data and validation code for a reserved Para Id."] + #[doc = ""] + #[doc = "## Arguments"] + #[doc = "- `origin`: Must be called by a `Signed` origin."] + #[doc = "- `id`: The para ID. Must be owned/managed by the `origin` signing account."] + #[doc = "- `genesis_head`: The genesis head data of the parachain/thread."] + #[doc = "- `validation_code`: The initial validation code of the parachain/thread."] + #[doc = ""] + #[doc = "## Deposits/Fees"] + #[doc = "The origin signed account must reserve a corresponding deposit for the registration. Anything already"] + #[doc = "reserved previously for this para ID is accounted for."] + #[doc = ""] + #[doc = "## Events"] + #[doc = "The `Registered` event is emitted in case of success."] + register { + id: runtime_types::polkadot_parachain::primitives::Id, + genesis_head: runtime_types::polkadot_parachain::primitives::HeadData, + validation_code: + runtime_types::polkadot_parachain::primitives::ValidationCode, + }, + #[codec(index = 1)] + #[doc = "Force the registration of a Para Id on the relay chain."] + #[doc = ""] + #[doc = "This function must be called by a Root origin."] + #[doc = ""] + #[doc = "The deposit taken can be specified for this registration. Any `ParaId`"] + #[doc = "can be registered, including sub-1000 IDs which are System Parachains."] + force_register { + who: ::subxt::sp_core::crypto::AccountId32, + deposit: ::core::primitive::u128, + id: runtime_types::polkadot_parachain::primitives::Id, + genesis_head: runtime_types::polkadot_parachain::primitives::HeadData, + validation_code: + runtime_types::polkadot_parachain::primitives::ValidationCode, + }, + #[codec(index = 2)] + #[doc = "Deregister a Para Id, freeing all data and returning any deposit."] + #[doc = ""] + #[doc = "The caller must be Root, the `para` owner, or the `para` itself. The para must be a parathread."] + deregister { + id: runtime_types::polkadot_parachain::primitives::Id, + }, + #[codec(index = 3)] + #[doc = "Swap a parachain with another parachain or parathread."] + #[doc = ""] + #[doc = "The origin must be Root, the `para` owner, or the `para` itself."] + #[doc = ""] + #[doc = "The swap will happen only if there is already an opposite swap pending. If there is not,"] + #[doc = "the swap will be stored in the pending swaps map, ready for a later confirmatory swap."] + #[doc = ""] + #[doc = "The `ParaId`s remain mapped to the same head data and code so external code can rely on"] + #[doc = "`ParaId` to be a long-term identifier of a notional \"parachain\". However, their"] + #[doc = "scheduling info (i.e. whether they're a parathread or parachain), auction information"] + #[doc = "and the auction deposit are switched."] + swap { + id: runtime_types::polkadot_parachain::primitives::Id, + other: runtime_types::polkadot_parachain::primitives::Id, + }, + #[codec(index = 4)] + #[doc = "Remove a manager lock from a para. This will allow the manager of a"] + #[doc = "previously locked para to deregister or swap a para without using governance."] + #[doc = ""] + #[doc = "Can only be called by the Root origin."] + force_remove_lock { + para: runtime_types::polkadot_parachain::primitives::Id, + }, + #[codec(index = 5)] + #[doc = "Reserve a Para Id on the relay chain."] + #[doc = ""] + #[doc = "This function will reserve a new Para Id to be owned/managed by the origin account."] + #[doc = "The origin account is able to register head data and validation code using `register` to create"] + #[doc = "a parathread. Using the Slots pallet, a parathread can then be upgraded to get a parachain slot."] + #[doc = ""] + #[doc = "## Arguments"] + #[doc = "- `origin`: Must be called by a `Signed` origin. Becomes the manager/owner of the new para ID."] + #[doc = ""] + #[doc = "## Deposits/Fees"] + #[doc = "The origin must reserve a deposit of `ParaDeposit` for the registration."] + #[doc = ""] + #[doc = "## Events"] + #[doc = "The `Reserved` event is emitted in case of success, which provides the ID reserved for use."] + reserve, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Error { + #[codec(index = 0)] + #[doc = "The ID is not registered."] + NotRegistered, + #[codec(index = 1)] + #[doc = "The ID is already registered."] + AlreadyRegistered, + #[codec(index = 2)] + #[doc = "The caller is not the owner of this Id."] + NotOwner, + #[codec(index = 3)] + #[doc = "Invalid para code size."] + CodeTooLarge, + #[codec(index = 4)] + #[doc = "Invalid para head data size."] + HeadDataTooLarge, + #[codec(index = 5)] + #[doc = "Para is not a Parachain."] + NotParachain, + #[codec(index = 6)] + #[doc = "Para is not a Parathread."] + NotParathread, + #[codec(index = 7)] + #[doc = "Cannot deregister para"] + CannotDeregister, + #[codec(index = 8)] + #[doc = "Cannot schedule downgrade of parachain to parathread"] + CannotDowngrade, + #[codec(index = 9)] + #[doc = "Cannot schedule upgrade of parathread to parachain"] + CannotUpgrade, + #[codec(index = 10)] + #[doc = "Para is locked from manipulation by the manager. Must use parachain or relay chain governance."] + ParaLocked, + #[codec(index = 11)] + #[doc = "The ID given for registration has not been reserved."] + NotReserved, + #[codec(index = 12)] + #[doc = "Registering parachain with empty code is not allowed."] + EmptyCode, + #[codec(index = 13)] + #[doc = "Cannot perform a parachain slot / lifecycle swap. Check that the state of both paras are"] + #[doc = "correct for the swap to work."] + CannotSwap, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Event { + #[codec(index = 0)] + Registered( + runtime_types::polkadot_parachain::primitives::Id, + ::subxt::sp_core::crypto::AccountId32, + ), + #[codec(index = 1)] + Deregistered(runtime_types::polkadot_parachain::primitives::Id), + #[codec(index = 2)] + Reserved( + runtime_types::polkadot_parachain::primitives::Id, + ::subxt::sp_core::crypto::AccountId32, + ), + } + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct ParaInfo<_0, _1> { + pub manager: _0, + pub deposit: _1, + pub locked: ::core::primitive::bool, + } + } + pub mod paras_sudo_wrapper { + use super::runtime_types; + pub mod pallet { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Call { + #[codec(index = 0)] + #[doc = "Schedule a para to be initialized at the start of the next session."] + sudo_schedule_para_initialize { + id: runtime_types::polkadot_parachain::primitives::Id, + genesis: + runtime_types::polkadot_runtime_parachains::paras::ParaGenesisArgs, + }, + #[codec(index = 1)] + #[doc = "Schedule a para to be cleaned up at the start of the next session."] + sudo_schedule_para_cleanup { + id: runtime_types::polkadot_parachain::primitives::Id, + }, + #[codec(index = 2)] + #[doc = "Upgrade a parathread to a parachain"] + sudo_schedule_parathread_upgrade { + id: runtime_types::polkadot_parachain::primitives::Id, + }, + #[codec(index = 3)] + #[doc = "Downgrade a parachain to a parathread"] + sudo_schedule_parachain_downgrade { + id: runtime_types::polkadot_parachain::primitives::Id, + }, + #[codec(index = 4)] + #[doc = "Send a downward XCM to the given para."] + #[doc = ""] + #[doc = "The given parachain should exist and the payload should not exceed the preconfigured size"] + #[doc = "`config.max_downward_message_size`."] + sudo_queue_downward_xcm { + id: runtime_types::polkadot_parachain::primitives::Id, + xcm: ::std::boxed::Box, + }, + #[codec(index = 5)] + #[doc = "Forcefully establish a channel from the sender to the recipient."] + #[doc = ""] + #[doc = "This is equivalent to sending an `Hrmp::hrmp_init_open_channel` extrinsic followed by"] + #[doc = "`Hrmp::hrmp_accept_open_channel`."] + sudo_establish_hrmp_channel { + sender: runtime_types::polkadot_parachain::primitives::Id, + recipient: runtime_types::polkadot_parachain::primitives::Id, + max_capacity: ::core::primitive::u32, + max_message_size: ::core::primitive::u32, + }, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Error { + #[codec(index = 0)] + #[doc = "The specified parachain or parathread is not registered."] + ParaDoesntExist, + #[codec(index = 1)] + #[doc = "The specified parachain or parathread is already registered."] + ParaAlreadyExists, + #[codec(index = 2)] + #[doc = "A DMP message couldn't be sent because it exceeds the maximum size allowed for a downward"] + #[doc = "message."] + ExceedsMaxMessageSize, + #[codec(index = 3)] + #[doc = "Could not schedule para cleanup."] + CouldntCleanup, + #[codec(index = 4)] + #[doc = "Not a parathread."] + NotParathread, + #[codec(index = 5)] + #[doc = "Not a parachain."] + NotParachain, + #[codec(index = 6)] + #[doc = "Cannot upgrade parathread."] + CannotUpgrade, + #[codec(index = 7)] + #[doc = "Cannot downgrade parachain."] + CannotDowngrade, + } + } + } + pub mod slots { + use super::runtime_types; + pub mod pallet { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Call { + #[codec(index = 0)] + #[doc = "Just a connect into the `lease_out` call, in case Root wants to force some lease to happen"] + #[doc = "independently of any other on-chain mechanism to use it."] + #[doc = ""] + #[doc = "The dispatch origin for this call must match `T::ForceOrigin`."] + force_lease { + para: runtime_types::polkadot_parachain::primitives::Id, + leaser: ::subxt::sp_core::crypto::AccountId32, + amount: ::core::primitive::u128, + period_begin: ::core::primitive::u32, + period_count: ::core::primitive::u32, + }, + #[codec(index = 1)] + #[doc = "Clear all leases for a Para Id, refunding any deposits back to the original owners."] + #[doc = ""] + #[doc = "The dispatch origin for this call must match `T::ForceOrigin`."] + clear_all_leases { + para: runtime_types::polkadot_parachain::primitives::Id, + }, + #[codec(index = 2)] + #[doc = "Try to onboard a parachain that has a lease for the current lease period."] + #[doc = ""] + #[doc = "This function can be useful if there was some state issue with a para that should"] + #[doc = "have onboarded, but was unable to. As long as they have a lease period, we can"] + #[doc = "let them onboard from here."] + #[doc = ""] + #[doc = "Origin must be signed, but can be called by anyone."] + trigger_onboard { + para: runtime_types::polkadot_parachain::primitives::Id, + }, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Error { + #[codec(index = 0)] + #[doc = "The parachain ID is not onboarding."] + ParaNotOnboarding, + #[codec(index = 1)] + #[doc = "There was an error with the lease."] + LeaseError, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Event { + #[codec(index = 0)] + #[doc = "A new `[lease_period]` is beginning."] + NewLeasePeriod(::core::primitive::u32), + #[codec(index = 1)] + #[doc = "A para has won the right to a continuous set of lease periods as a parachain."] + #[doc = "First balance is any extra amount reserved on top of the para's existing deposit."] + #[doc = "Second balance is the total amount reserved."] + #[doc = "`[parachain_id, leaser, period_begin, period_count, extra_reserved, total_amount]`"] + Leased( + runtime_types::polkadot_parachain::primitives::Id, + ::subxt::sp_core::crypto::AccountId32, + ::core::primitive::u32, + ::core::primitive::u32, + ::core::primitive::u128, + ::core::primitive::u128, + ), + } + } + } + } + pub mod polkadot_runtime_parachains { + use super::runtime_types; + pub mod configuration { + use super::runtime_types; + pub mod pallet { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Call { + #[codec(index = 0)] + #[doc = "Set the validation upgrade cooldown."] + set_validation_upgrade_cooldown { new: ::core::primitive::u32 }, + #[codec(index = 1)] + #[doc = "Set the validation upgrade delay."] + set_validation_upgrade_delay { new: ::core::primitive::u32 }, + #[codec(index = 2)] + #[doc = "Set the acceptance period for an included candidate."] + set_code_retention_period { new: ::core::primitive::u32 }, + #[codec(index = 3)] + #[doc = "Set the max validation code size for incoming upgrades."] + set_max_code_size { new: ::core::primitive::u32 }, + #[codec(index = 4)] + #[doc = "Set the max POV block size for incoming upgrades."] + set_max_pov_size { new: ::core::primitive::u32 }, + #[codec(index = 5)] + #[doc = "Set the max head data size for paras."] + set_max_head_data_size { new: ::core::primitive::u32 }, + #[codec(index = 6)] + #[doc = "Set the number of parathread execution cores."] + set_parathread_cores { new: ::core::primitive::u32 }, + #[codec(index = 7)] + #[doc = "Set the number of retries for a particular parathread."] + set_parathread_retries { new: ::core::primitive::u32 }, + #[codec(index = 8)] + #[doc = "Set the parachain validator-group rotation frequency"] + set_group_rotation_frequency { new: ::core::primitive::u32 }, + #[codec(index = 9)] + #[doc = "Set the availability period for parachains."] + set_chain_availability_period { new: ::core::primitive::u32 }, + #[codec(index = 10)] + #[doc = "Set the availability period for parathreads."] + set_thread_availability_period { new: ::core::primitive::u32 }, + #[codec(index = 11)] + #[doc = "Set the scheduling lookahead, in expected number of blocks at peak throughput."] + set_scheduling_lookahead { new: ::core::primitive::u32 }, + #[codec(index = 12)] + #[doc = "Set the maximum number of validators to assign to any core."] + set_max_validators_per_core { + new: ::core::option::Option<::core::primitive::u32>, + }, + #[codec(index = 13)] + #[doc = "Set the maximum number of validators to use in parachain consensus."] + set_max_validators { + new: ::core::option::Option<::core::primitive::u32>, + }, + #[codec(index = 14)] + #[doc = "Set the dispute period, in number of sessions to keep for disputes."] + set_dispute_period { new: ::core::primitive::u32 }, + #[codec(index = 15)] + #[doc = "Set the dispute post conclusion acceptance period."] + set_dispute_post_conclusion_acceptance_period { + new: ::core::primitive::u32, + }, + #[codec(index = 16)] + #[doc = "Set the maximum number of dispute spam slots."] + set_dispute_max_spam_slots { new: ::core::primitive::u32 }, + #[codec(index = 17)] + #[doc = "Set the dispute conclusion by time out period."] + set_dispute_conclusion_by_time_out_period { new: ::core::primitive::u32 }, + #[codec(index = 18)] + #[doc = "Set the no show slots, in number of number of consensus slots."] + #[doc = "Must be at least 1."] + set_no_show_slots { new: ::core::primitive::u32 }, + #[codec(index = 19)] + #[doc = "Set the total number of delay tranches."] + set_n_delay_tranches { new: ::core::primitive::u32 }, + #[codec(index = 20)] + #[doc = "Set the zeroth delay tranche width."] + set_zeroth_delay_tranche_width { new: ::core::primitive::u32 }, + #[codec(index = 21)] + #[doc = "Set the number of validators needed to approve a block."] + set_needed_approvals { new: ::core::primitive::u32 }, + #[codec(index = 22)] + #[doc = "Set the number of samples to do of the `RelayVRFModulo` approval assignment criterion."] + set_relay_vrf_modulo_samples { new: ::core::primitive::u32 }, + #[codec(index = 23)] + #[doc = "Sets the maximum items that can present in a upward dispatch queue at once."] + set_max_upward_queue_count { new: ::core::primitive::u32 }, + #[codec(index = 24)] + #[doc = "Sets the maximum total size of items that can present in a upward dispatch queue at once."] + set_max_upward_queue_size { new: ::core::primitive::u32 }, + #[codec(index = 25)] + #[doc = "Set the critical downward message size."] + set_max_downward_message_size { new: ::core::primitive::u32 }, + #[codec(index = 26)] + #[doc = "Sets the soft limit for the phase of dispatching dispatchable upward messages."] + set_ump_service_total_weight { new: ::core::primitive::u64 }, + #[codec(index = 27)] + #[doc = "Sets the maximum size of an upward message that can be sent by a candidate."] + set_max_upward_message_size { new: ::core::primitive::u32 }, + #[codec(index = 28)] + #[doc = "Sets the maximum number of messages that a candidate can contain."] + set_max_upward_message_num_per_candidate { new: ::core::primitive::u32 }, + #[codec(index = 29)] + #[doc = "Sets the number of sessions after which an HRMP open channel request expires."] + set_hrmp_open_request_ttl { new: ::core::primitive::u32 }, + #[codec(index = 30)] + #[doc = "Sets the amount of funds that the sender should provide for opening an HRMP channel."] + set_hrmp_sender_deposit { new: ::core::primitive::u128 }, + #[codec(index = 31)] + #[doc = "Sets the amount of funds that the recipient should provide for accepting opening an HRMP"] + #[doc = "channel."] + set_hrmp_recipient_deposit { new: ::core::primitive::u128 }, + #[codec(index = 32)] + #[doc = "Sets the maximum number of messages allowed in an HRMP channel at once."] + set_hrmp_channel_max_capacity { new: ::core::primitive::u32 }, + #[codec(index = 33)] + #[doc = "Sets the maximum total size of messages in bytes allowed in an HRMP channel at once."] + set_hrmp_channel_max_total_size { new: ::core::primitive::u32 }, + #[codec(index = 34)] + #[doc = "Sets the maximum number of inbound HRMP channels a parachain is allowed to accept."] + set_hrmp_max_parachain_inbound_channels { new: ::core::primitive::u32 }, + #[codec(index = 35)] + #[doc = "Sets the maximum number of inbound HRMP channels a parathread is allowed to accept."] + set_hrmp_max_parathread_inbound_channels { new: ::core::primitive::u32 }, + #[codec(index = 36)] + #[doc = "Sets the maximum size of a message that could ever be put into an HRMP channel."] + set_hrmp_channel_max_message_size { new: ::core::primitive::u32 }, + #[codec(index = 37)] + #[doc = "Sets the maximum number of outbound HRMP channels a parachain is allowed to open."] + set_hrmp_max_parachain_outbound_channels { new: ::core::primitive::u32 }, + #[codec(index = 38)] + #[doc = "Sets the maximum number of outbound HRMP channels a parathread is allowed to open."] + set_hrmp_max_parathread_outbound_channels { new: ::core::primitive::u32 }, + #[codec(index = 39)] + #[doc = "Sets the maximum number of outbound HRMP messages can be sent by a candidate."] + set_hrmp_max_message_num_per_candidate { new: ::core::primitive::u32 }, + #[codec(index = 40)] + #[doc = "Sets the maximum amount of weight any individual upward message may consume."] + set_ump_max_individual_weight { new: ::core::primitive::u64 }, + #[codec(index = 41)] + #[doc = "Enable or disable PVF pre-checking. Consult the field documentation prior executing."] + set_pvf_checking_enabled { new: ::core::primitive::bool }, + #[codec(index = 42)] + #[doc = "Set the number of session changes after which a PVF pre-checking voting is rejected."] + set_pvf_voting_ttl { new: ::core::primitive::u32 }, + #[codec(index = 43)] + #[doc = "Sets the minimum delay between announcing the upgrade block for a parachain until the"] + #[doc = "upgrade taking place."] + #[doc = ""] + #[doc = "See the field documentation for information and constraints for the new value."] + set_minimum_validation_upgrade_delay { new: ::core::primitive::u32 }, + #[codec(index = 44)] + #[doc = "Setting this to true will disable consistency checks for the configuration setters."] + #[doc = "Use with caution."] + set_bypass_consistency_check { new: ::core::primitive::bool }, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Error { + #[codec(index = 0)] + #[doc = "The new value for a configuration parameter is invalid."] + InvalidNewValue, + } + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct HostConfiguration<_0> { + pub max_code_size: _0, + pub max_head_data_size: _0, + pub max_upward_queue_count: _0, + pub max_upward_queue_size: _0, + pub max_upward_message_size: _0, + pub max_upward_message_num_per_candidate: _0, + pub hrmp_max_message_num_per_candidate: _0, + pub validation_upgrade_cooldown: _0, + pub validation_upgrade_delay: _0, + pub max_pov_size: _0, + pub max_downward_message_size: _0, + pub ump_service_total_weight: ::core::primitive::u64, + pub hrmp_max_parachain_outbound_channels: _0, + pub hrmp_max_parathread_outbound_channels: _0, + pub hrmp_sender_deposit: ::core::primitive::u128, + pub hrmp_recipient_deposit: ::core::primitive::u128, + pub hrmp_channel_max_capacity: _0, + pub hrmp_channel_max_total_size: _0, + pub hrmp_max_parachain_inbound_channels: _0, + pub hrmp_max_parathread_inbound_channels: _0, + pub hrmp_channel_max_message_size: _0, + pub code_retention_period: _0, + pub parathread_cores: _0, + pub parathread_retries: _0, + pub group_rotation_frequency: _0, + pub chain_availability_period: _0, + pub thread_availability_period: _0, + pub scheduling_lookahead: _0, + pub max_validators_per_core: ::core::option::Option<_0>, + pub max_validators: ::core::option::Option<_0>, + pub dispute_period: _0, + pub dispute_post_conclusion_acceptance_period: _0, + pub dispute_max_spam_slots: _0, + pub dispute_conclusion_by_time_out_period: _0, + pub no_show_slots: _0, + pub n_delay_tranches: _0, + pub zeroth_delay_tranche_width: _0, + pub needed_approvals: _0, + pub relay_vrf_modulo_samples: _0, + pub ump_max_individual_weight: ::core::primitive::u64, + pub pvf_checking_enabled: ::core::primitive::bool, + pub pvf_voting_ttl: _0, + pub minimum_validation_upgrade_delay: _0, + } + } + pub mod disputes { + use super::runtime_types; + pub mod pallet { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Call { + #[codec(index = 0)] + force_unfreeze, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Error { + #[codec(index = 0)] + #[doc = "Duplicate dispute statement sets provided."] + DuplicateDisputeStatementSets, + #[codec(index = 1)] + #[doc = "Ancient dispute statement provided."] + AncientDisputeStatement, + #[codec(index = 2)] + #[doc = "Validator index on statement is out of bounds for session."] + ValidatorIndexOutOfBounds, + #[codec(index = 3)] + #[doc = "Invalid signature on statement."] + InvalidSignature, + #[codec(index = 4)] + #[doc = "Validator vote submitted more than once to dispute."] + DuplicateStatement, + #[codec(index = 5)] + #[doc = "Too many spam slots used by some specific validator."] + PotentialSpam, + #[codec(index = 6)] + #[doc = "A dispute where there are only votes on one side."] + SingleSidedDispute, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Event { + #[codec(index = 0)] + #[doc = "A dispute has been initiated. \\[candidate hash, dispute location\\]"] + DisputeInitiated( + runtime_types::polkadot_core_primitives::CandidateHash, + runtime_types::polkadot_runtime_parachains::disputes::DisputeLocation, + ), + #[codec(index = 1)] + #[doc = "A dispute has concluded for or against a candidate."] + #[doc = "`\\[para id, candidate hash, dispute result\\]`"] + DisputeConcluded( + runtime_types::polkadot_core_primitives::CandidateHash, + runtime_types::polkadot_runtime_parachains::disputes::DisputeResult, + ), + #[codec(index = 2)] + #[doc = "A dispute has timed out due to insufficient participation."] + #[doc = "`\\[para id, candidate hash\\]`"] + DisputeTimedOut(runtime_types::polkadot_core_primitives::CandidateHash), + #[codec(index = 3)] + #[doc = "A dispute has concluded with supermajority against a candidate."] + #[doc = "Block authors should no longer build on top of this head and should"] + #[doc = "instead revert the block at the given height. This should be the"] + #[doc = "number of the child of the last known valid block in the chain."] + Revert(::core::primitive::u32), + } + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum DisputeLocation { + #[codec(index = 0)] + Local, + #[codec(index = 1)] + Remote, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum DisputeResult { + #[codec(index = 0)] + Valid, + #[codec(index = 1)] + Invalid, + } + } + pub mod dmp { + use super::runtime_types; + pub mod pallet { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Call {} + } + } + pub mod hrmp { + use super::runtime_types; + pub mod pallet { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Call { + #[codec(index = 0)] + #[doc = "Initiate opening a channel from a parachain to a given recipient with given channel"] + #[doc = "parameters."] + #[doc = ""] + #[doc = "- `proposed_max_capacity` - specifies how many messages can be in the channel at once."] + #[doc = "- `proposed_max_message_size` - specifies the maximum size of the messages."] + #[doc = ""] + #[doc = "These numbers are a subject to the relay-chain configuration limits."] + #[doc = ""] + #[doc = "The channel can be opened only after the recipient confirms it and only on a session"] + #[doc = "change."] + hrmp_init_open_channel { + recipient: runtime_types::polkadot_parachain::primitives::Id, + proposed_max_capacity: ::core::primitive::u32, + proposed_max_message_size: ::core::primitive::u32, + }, + #[codec(index = 1)] + #[doc = "Accept a pending open channel request from the given sender."] + #[doc = ""] + #[doc = "The channel will be opened only on the next session boundary."] + hrmp_accept_open_channel { + sender: runtime_types::polkadot_parachain::primitives::Id, + }, + #[codec(index = 2)] + #[doc = "Initiate unilateral closing of a channel. The origin must be either the sender or the"] + #[doc = "recipient in the channel being closed."] + #[doc = ""] + #[doc = "The closure can only happen on a session change."] + hrmp_close_channel { + channel_id: + runtime_types::polkadot_parachain::primitives::HrmpChannelId, + }, + #[codec(index = 3)] + #[doc = "This extrinsic triggers the cleanup of all the HRMP storage items that"] + #[doc = "a para may have. Normally this happens once per session, but this allows"] + #[doc = "you to trigger the cleanup immediately for a specific parachain."] + #[doc = ""] + #[doc = "Origin must be Root."] + #[doc = ""] + #[doc = "Number of inbound and outbound channels for `para` must be provided as witness data of weighing."] + force_clean_hrmp { + para: runtime_types::polkadot_parachain::primitives::Id, + inbound: ::core::primitive::u32, + outbound: ::core::primitive::u32, + }, + #[codec(index = 4)] + #[doc = "Force process HRMP open channel requests."] + #[doc = ""] + #[doc = "If there are pending HRMP open channel requests, you can use this"] + #[doc = "function process all of those requests immediately."] + #[doc = ""] + #[doc = "Total number of opening channels must be provided as witness data of weighing."] + force_process_hrmp_open { channels: ::core::primitive::u32 }, + #[codec(index = 5)] + #[doc = "Force process HRMP close channel requests."] + #[doc = ""] + #[doc = "If there are pending HRMP close channel requests, you can use this"] + #[doc = "function process all of those requests immediately."] + #[doc = ""] + #[doc = "Total number of closing channels must be provided as witness data of weighing."] + force_process_hrmp_close { channels: ::core::primitive::u32 }, + #[codec(index = 6)] + #[doc = "This cancels a pending open channel request. It can be canceled by either of the sender"] + #[doc = "or the recipient for that request. The origin must be either of those."] + #[doc = ""] + #[doc = "The cancellation happens immediately. It is not possible to cancel the request if it is"] + #[doc = "already accepted."] + #[doc = ""] + #[doc = "Total number of open requests (i.e. `HrmpOpenChannelRequestsList`) must be provided as"] + #[doc = "witness data."] + hrmp_cancel_open_request { + channel_id: + runtime_types::polkadot_parachain::primitives::HrmpChannelId, + open_requests: ::core::primitive::u32, + }, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Error { + #[codec(index = 0)] + #[doc = "The sender tried to open a channel to themselves."] + OpenHrmpChannelToSelf, + #[codec(index = 1)] + #[doc = "The recipient is not a valid para."] + OpenHrmpChannelInvalidRecipient, + #[codec(index = 2)] + #[doc = "The requested capacity is zero."] + OpenHrmpChannelZeroCapacity, + #[codec(index = 3)] + #[doc = "The requested capacity exceeds the global limit."] + OpenHrmpChannelCapacityExceedsLimit, + #[codec(index = 4)] + #[doc = "The requested maximum message size is 0."] + OpenHrmpChannelZeroMessageSize, + #[codec(index = 5)] + #[doc = "The open request requested the message size that exceeds the global limit."] + OpenHrmpChannelMessageSizeExceedsLimit, + #[codec(index = 6)] + #[doc = "The channel already exists"] + OpenHrmpChannelAlreadyExists, + #[codec(index = 7)] + #[doc = "There is already a request to open the same channel."] + OpenHrmpChannelAlreadyRequested, + #[codec(index = 8)] + #[doc = "The sender already has the maximum number of allowed outbound channels."] + OpenHrmpChannelLimitExceeded, + #[codec(index = 9)] + #[doc = "The channel from the sender to the origin doesn't exist."] + AcceptHrmpChannelDoesntExist, + #[codec(index = 10)] + #[doc = "The channel is already confirmed."] + AcceptHrmpChannelAlreadyConfirmed, + #[codec(index = 11)] + #[doc = "The recipient already has the maximum number of allowed inbound channels."] + AcceptHrmpChannelLimitExceeded, + #[codec(index = 12)] + #[doc = "The origin tries to close a channel where it is neither the sender nor the recipient."] + CloseHrmpChannelUnauthorized, + #[codec(index = 13)] + #[doc = "The channel to be closed doesn't exist."] + CloseHrmpChannelDoesntExist, + #[codec(index = 14)] + #[doc = "The channel close request is already requested."] + CloseHrmpChannelAlreadyUnderway, + #[codec(index = 15)] + #[doc = "Canceling is requested by neither the sender nor recipient of the open channel request."] + CancelHrmpOpenChannelUnauthorized, + #[codec(index = 16)] + #[doc = "The open request doesn't exist."] + OpenHrmpChannelDoesntExist, + #[codec(index = 17)] + #[doc = "Cannot cancel an HRMP open channel request because it is already confirmed."] + OpenHrmpChannelAlreadyConfirmed, + #[codec(index = 18)] + #[doc = "The provided witness data is wrong."] + WrongWitness, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Event { + #[codec(index = 0)] + #[doc = "Open HRMP channel requested."] + #[doc = "`[sender, recipient, proposed_max_capacity, proposed_max_message_size]`"] + OpenChannelRequested( + runtime_types::polkadot_parachain::primitives::Id, + runtime_types::polkadot_parachain::primitives::Id, + ::core::primitive::u32, + ::core::primitive::u32, + ), + #[codec(index = 1)] + #[doc = "An HRMP channel request sent by the receiver was canceled by either party."] + #[doc = "`[by_parachain, channel_id]`"] + OpenChannelCanceled( + runtime_types::polkadot_parachain::primitives::Id, + runtime_types::polkadot_parachain::primitives::HrmpChannelId, + ), + #[codec(index = 2)] + #[doc = "Open HRMP channel accepted. `[sender, recipient]`"] + OpenChannelAccepted( + runtime_types::polkadot_parachain::primitives::Id, + runtime_types::polkadot_parachain::primitives::Id, + ), + #[codec(index = 3)] + #[doc = "HRMP channel closed. `[by_parachain, channel_id]`"] + ChannelClosed( + runtime_types::polkadot_parachain::primitives::Id, + runtime_types::polkadot_parachain::primitives::HrmpChannelId, + ), + } + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct HrmpChannel { + pub max_capacity: ::core::primitive::u32, + pub max_total_size: ::core::primitive::u32, + pub max_message_size: ::core::primitive::u32, + pub msg_count: ::core::primitive::u32, + pub total_size: ::core::primitive::u32, + pub mqc_head: ::core::option::Option<::subxt::sp_core::H256>, + pub sender_deposit: ::core::primitive::u128, + pub recipient_deposit: ::core::primitive::u128, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct HrmpOpenChannelRequest { + pub confirmed: ::core::primitive::bool, + pub _age: ::core::primitive::u32, + pub sender_deposit: ::core::primitive::u128, + pub max_message_size: ::core::primitive::u32, + pub max_capacity: ::core::primitive::u32, + pub max_total_size: ::core::primitive::u32, + } + } + pub mod inclusion { + use super::runtime_types; + pub mod pallet { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Call {} + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Error { + #[codec(index = 0)] + #[doc = "Validator indices are out of order or contains duplicates."] + UnsortedOrDuplicateValidatorIndices, + #[codec(index = 1)] + #[doc = "Dispute statement sets are out of order or contain duplicates."] + UnsortedOrDuplicateDisputeStatementSet, + #[codec(index = 2)] + #[doc = "Backed candidates are out of order (core index) or contain duplicates."] + UnsortedOrDuplicateBackedCandidates, + #[codec(index = 3)] + #[doc = "A different relay parent was provided compared to the on-chain stored one."] + UnexpectedRelayParent, + #[codec(index = 4)] + #[doc = "Availability bitfield has unexpected size."] + WrongBitfieldSize, + #[codec(index = 5)] + #[doc = "Bitfield consists of zeros only."] + BitfieldAllZeros, + #[codec(index = 6)] + #[doc = "Multiple bitfields submitted by same validator or validators out of order by index."] + BitfieldDuplicateOrUnordered, + #[codec(index = 7)] + #[doc = "Validator index out of bounds."] + ValidatorIndexOutOfBounds, + #[codec(index = 8)] + #[doc = "Invalid signature"] + InvalidBitfieldSignature, + #[codec(index = 9)] + #[doc = "Candidate submitted but para not scheduled."] + UnscheduledCandidate, + #[codec(index = 10)] + #[doc = "Candidate scheduled despite pending candidate already existing for the para."] + CandidateScheduledBeforeParaFree, + #[codec(index = 11)] + #[doc = "Candidate included with the wrong collator."] + WrongCollator, + #[codec(index = 12)] + #[doc = "Scheduled cores out of order."] + ScheduledOutOfOrder, + #[codec(index = 13)] + #[doc = "Head data exceeds the configured maximum."] + HeadDataTooLarge, + #[codec(index = 14)] + #[doc = "Code upgrade prematurely."] + PrematureCodeUpgrade, + #[codec(index = 15)] + #[doc = "Output code is too large"] + NewCodeTooLarge, + #[codec(index = 16)] + #[doc = "Candidate not in parent context."] + CandidateNotInParentContext, + #[codec(index = 17)] + #[doc = "Invalid group index in core assignment."] + InvalidGroupIndex, + #[codec(index = 18)] + #[doc = "Insufficient (non-majority) backing."] + InsufficientBacking, + #[codec(index = 19)] + #[doc = "Invalid (bad signature, unknown validator, etc.) backing."] + InvalidBacking, + #[codec(index = 20)] + #[doc = "Collator did not sign PoV."] + NotCollatorSigned, + #[codec(index = 21)] + #[doc = "The validation data hash does not match expected."] + ValidationDataHashMismatch, + #[codec(index = 22)] + #[doc = "The downward message queue is not processed correctly."] + IncorrectDownwardMessageHandling, + #[codec(index = 23)] + #[doc = "At least one upward message sent does not pass the acceptance criteria."] + InvalidUpwardMessages, + #[codec(index = 24)] + #[doc = "The candidate didn't follow the rules of HRMP watermark advancement."] + HrmpWatermarkMishandling, + #[codec(index = 25)] + #[doc = "The HRMP messages sent by the candidate is not valid."] + InvalidOutboundHrmp, + #[codec(index = 26)] + #[doc = "The validation code hash of the candidate is not valid."] + InvalidValidationCodeHash, + #[codec(index = 27)] + #[doc = "The `para_head` hash in the candidate descriptor doesn't match the hash of the actual para head in the"] + #[doc = "commitments."] + ParaHeadMismatch, + #[codec(index = 28)] + #[doc = "A bitfield that references a freed core,"] + #[doc = "either intentionally or as part of a concluded"] + #[doc = "invalid dispute."] + BitfieldReferencesFreedCore, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Event { + #[codec(index = 0)] + #[doc = "A candidate was backed. `[candidate, head_data]`"] + CandidateBacked( + runtime_types::polkadot_primitives::v2::CandidateReceipt< + ::subxt::sp_core::H256, + >, + runtime_types::polkadot_parachain::primitives::HeadData, + runtime_types::polkadot_primitives::v2::CoreIndex, + runtime_types::polkadot_primitives::v2::GroupIndex, + ), + #[codec(index = 1)] + #[doc = "A candidate was included. `[candidate, head_data]`"] + CandidateIncluded( + runtime_types::polkadot_primitives::v2::CandidateReceipt< + ::subxt::sp_core::H256, + >, + runtime_types::polkadot_parachain::primitives::HeadData, + runtime_types::polkadot_primitives::v2::CoreIndex, + runtime_types::polkadot_primitives::v2::GroupIndex, + ), + #[codec(index = 2)] + #[doc = "A candidate timed out. `[candidate, head_data]`"] + CandidateTimedOut( + runtime_types::polkadot_primitives::v2::CandidateReceipt< + ::subxt::sp_core::H256, + >, + runtime_types::polkadot_parachain::primitives::HeadData, + runtime_types::polkadot_primitives::v2::CoreIndex, + ), + } + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct AvailabilityBitfieldRecord<_0> { + pub bitfield: runtime_types::polkadot_primitives::v2::AvailabilityBitfield, + pub submitted_at: _0, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct CandidatePendingAvailability<_0, _1> { + pub core: runtime_types::polkadot_primitives::v2::CoreIndex, + pub hash: runtime_types::polkadot_core_primitives::CandidateHash, + pub descriptor: runtime_types::polkadot_primitives::v2::CandidateDescriptor<_0>, + pub availability_votes: ::subxt::bitvec::vec::BitVec< + ::core::primitive::u8, + ::subxt::bitvec::order::Lsb0, + >, + pub backers: ::subxt::bitvec::vec::BitVec< + ::core::primitive::u8, + ::subxt::bitvec::order::Lsb0, + >, + pub relay_parent_number: _1, + pub backed_in_number: _1, + pub backing_group: runtime_types::polkadot_primitives::v2::GroupIndex, + } + } + pub mod initializer { + use super::runtime_types; + pub mod pallet { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Call { + #[codec(index = 0)] + #[doc = "Issue a signal to the consensus engine to forcibly act as though all parachain"] + #[doc = "blocks in all relay chain blocks up to and including the given number in the current"] + #[doc = "chain are valid and should be finalized."] + force_approve { up_to: ::core::primitive::u32 }, + } + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct BufferedSessionChange { + pub validators: ::std::vec::Vec< + runtime_types::polkadot_primitives::v2::validator_app::Public, + >, + pub queued: ::std::vec::Vec< + runtime_types::polkadot_primitives::v2::validator_app::Public, + >, + pub session_index: ::core::primitive::u32, + } + } + pub mod origin { + use super::runtime_types; + pub mod pallet { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Origin { + #[codec(index = 0)] + Parachain(runtime_types::polkadot_parachain::primitives::Id), + } + } + } + pub mod paras { + use super::runtime_types; + pub mod pallet { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Call { + #[codec(index = 0)] + #[doc = "Set the storage for the parachain validation code immediately."] + force_set_current_code { + para: runtime_types::polkadot_parachain::primitives::Id, + new_code: runtime_types::polkadot_parachain::primitives::ValidationCode, + }, + #[codec(index = 1)] + #[doc = "Set the storage for the current parachain head data immediately."] + force_set_current_head { + para: runtime_types::polkadot_parachain::primitives::Id, + new_head: runtime_types::polkadot_parachain::primitives::HeadData, + }, + #[codec(index = 2)] + #[doc = "Schedule an upgrade as if it was scheduled in the given relay parent block."] + force_schedule_code_upgrade { + para: runtime_types::polkadot_parachain::primitives::Id, + new_code: runtime_types::polkadot_parachain::primitives::ValidationCode, + relay_parent_number: ::core::primitive::u32, + }, + #[codec(index = 3)] + #[doc = "Note a new block head for para within the context of the current block."] + force_note_new_head { + para: runtime_types::polkadot_parachain::primitives::Id, + new_head: runtime_types::polkadot_parachain::primitives::HeadData, + }, + #[codec(index = 4)] + #[doc = "Put a parachain directly into the next session's action queue."] + #[doc = "We can't queue it any sooner than this without going into the"] + #[doc = "initializer..."] + force_queue_action { + para: runtime_types::polkadot_parachain::primitives::Id, + }, + #[codec(index = 5)] + #[doc = "Adds the validation code to the storage."] + #[doc = ""] + #[doc = "The code will not be added if it is already present. Additionally, if PVF pre-checking"] + #[doc = "is running for that code, it will be instantly accepted."] + #[doc = ""] + #[doc = "Otherwise, the code will be added into the storage. Note that the code will be added"] + #[doc = "into storage with reference count 0. This is to account the fact that there are no users"] + #[doc = "for this code yet. The caller will have to make sure that this code eventually gets"] + #[doc = "used by some parachain or removed from the storage to avoid storage leaks. For the latter"] + #[doc = "prefer to use the `poke_unused_validation_code` dispatchable to raw storage manipulation."] + #[doc = ""] + #[doc = "This function is mainly meant to be used for upgrading parachains that do not follow"] + #[doc = "the go-ahead signal while the PVF pre-checking feature is enabled."] + add_trusted_validation_code { + validation_code: + runtime_types::polkadot_parachain::primitives::ValidationCode, + }, + #[codec(index = 6)] + #[doc = "Remove the validation code from the storage iff the reference count is 0."] + #[doc = ""] + #[doc = "This is better than removing the storage directly, because it will not remove the code"] + #[doc = "that was suddenly got used by some parachain while this dispatchable was pending"] + #[doc = "dispatching."] + poke_unused_validation_code { + validation_code_hash: + runtime_types::polkadot_parachain::primitives::ValidationCodeHash, + }, + #[codec(index = 7)] + #[doc = "Includes a statement for a PVF pre-checking vote. Potentially, finalizes the vote and"] + #[doc = "enacts the results if that was the last vote before achieving the supermajority."] + include_pvf_check_statement { + stmt: runtime_types::polkadot_primitives::v2::PvfCheckStatement, + signature: + runtime_types::polkadot_primitives::v2::validator_app::Signature, + }, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Error { + #[codec(index = 0)] + #[doc = "Para is not registered in our system."] + NotRegistered, + #[codec(index = 1)] + #[doc = "Para cannot be onboarded because it is already tracked by our system."] + CannotOnboard, + #[codec(index = 2)] + #[doc = "Para cannot be offboarded at this time."] + CannotOffboard, + #[codec(index = 3)] + #[doc = "Para cannot be upgraded to a parachain."] + CannotUpgrade, + #[codec(index = 4)] + #[doc = "Para cannot be downgraded to a parathread."] + CannotDowngrade, + #[codec(index = 5)] + #[doc = "The statement for PVF pre-checking is stale."] + PvfCheckStatementStale, + #[codec(index = 6)] + #[doc = "The statement for PVF pre-checking is for a future session."] + PvfCheckStatementFuture, + #[codec(index = 7)] + #[doc = "Claimed validator index is out of bounds."] + PvfCheckValidatorIndexOutOfBounds, + #[codec(index = 8)] + #[doc = "The signature for the PVF pre-checking is invalid."] + PvfCheckInvalidSignature, + #[codec(index = 9)] + #[doc = "The given validator already has cast a vote."] + PvfCheckDoubleVote, + #[codec(index = 10)] + #[doc = "The given PVF does not exist at the moment of process a vote."] + PvfCheckSubjectInvalid, + #[codec(index = 11)] + #[doc = "The PVF pre-checking statement cannot be included since the PVF pre-checking mechanism"] + #[doc = "is disabled."] + PvfCheckDisabled, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Event { + #[codec(index = 0)] + #[doc = "Current code has been updated for a Para. `para_id`"] + CurrentCodeUpdated(runtime_types::polkadot_parachain::primitives::Id), + #[codec(index = 1)] + #[doc = "Current head has been updated for a Para. `para_id`"] + CurrentHeadUpdated(runtime_types::polkadot_parachain::primitives::Id), + #[codec(index = 2)] + #[doc = "A code upgrade has been scheduled for a Para. `para_id`"] + CodeUpgradeScheduled(runtime_types::polkadot_parachain::primitives::Id), + #[codec(index = 3)] + #[doc = "A new head has been noted for a Para. `para_id`"] + NewHeadNoted(runtime_types::polkadot_parachain::primitives::Id), + #[codec(index = 4)] + #[doc = "A para has been queued to execute pending actions. `para_id`"] + ActionQueued( + runtime_types::polkadot_parachain::primitives::Id, + ::core::primitive::u32, + ), + #[codec(index = 5)] + #[doc = "The given para either initiated or subscribed to a PVF check for the given validation"] + #[doc = "code. `code_hash` `para_id`"] + PvfCheckStarted( + runtime_types::polkadot_parachain::primitives::ValidationCodeHash, + runtime_types::polkadot_parachain::primitives::Id, + ), + #[codec(index = 6)] + #[doc = "The given validation code was accepted by the PVF pre-checking vote."] + #[doc = "`code_hash` `para_id`"] + PvfCheckAccepted( + runtime_types::polkadot_parachain::primitives::ValidationCodeHash, + runtime_types::polkadot_parachain::primitives::Id, + ), + #[codec(index = 7)] + #[doc = "The given validation code was rejected by the PVF pre-checking vote."] + #[doc = "`code_hash` `para_id`"] + PvfCheckRejected( + runtime_types::polkadot_parachain::primitives::ValidationCodeHash, + runtime_types::polkadot_parachain::primitives::Id, + ), + } + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct ParaGenesisArgs { + pub genesis_head: runtime_types::polkadot_parachain::primitives::HeadData, + pub validation_code: + runtime_types::polkadot_parachain::primitives::ValidationCode, + pub parachain: ::core::primitive::bool, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum ParaLifecycle { + #[codec(index = 0)] + Onboarding, + #[codec(index = 1)] + Parathread, + #[codec(index = 2)] + Parachain, + #[codec(index = 3)] + UpgradingParathread, + #[codec(index = 4)] + DowngradingParachain, + #[codec(index = 5)] + OffboardingParathread, + #[codec(index = 6)] + OffboardingParachain, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct ParaPastCodeMeta<_0> { + pub upgrade_times: ::std::vec::Vec< + runtime_types::polkadot_runtime_parachains::paras::ReplacementTimes<_0>, + >, + pub last_pruned: ::core::option::Option<_0>, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct PvfCheckActiveVoteState<_0> { + pub votes_accept: ::subxt::bitvec::vec::BitVec< + ::core::primitive::u8, + ::subxt::bitvec::order::Lsb0, + >, + pub votes_reject: ::subxt::bitvec::vec::BitVec< + ::core::primitive::u8, + ::subxt::bitvec::order::Lsb0, + >, + pub age: _0, + pub created_at: _0, + pub causes: ::std::vec::Vec< + runtime_types::polkadot_runtime_parachains::paras::PvfCheckCause<_0>, + >, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum PvfCheckCause<_0> { + #[codec(index = 0)] + Onboarding(runtime_types::polkadot_parachain::primitives::Id), + #[codec(index = 1)] + Upgrade { + id: runtime_types::polkadot_parachain::primitives::Id, + relay_parent_number: _0, + }, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct ReplacementTimes<_0> { + pub expected_at: _0, + pub activated_at: _0, + } + } + pub mod paras_inherent { + use super::runtime_types; + pub mod pallet { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Call { + #[codec(index = 0)] + #[doc = "Enter the paras inherent. This will process bitfields and backed candidates."] + enter { + data: runtime_types::polkadot_primitives::v2::InherentData< + runtime_types::sp_runtime::generic::header::Header< + ::core::primitive::u32, + runtime_types::sp_runtime::traits::BlakeTwo256, + >, + >, + }, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Error { + #[codec(index = 0)] + #[doc = "Inclusion inherent called more than once per block."] + TooManyInclusionInherents, + #[codec(index = 1)] + #[doc = "The hash of the submitted parent header doesn't correspond to the saved block hash of"] + #[doc = "the parent."] + InvalidParentHeader, + #[codec(index = 2)] + #[doc = "Disputed candidate that was concluded invalid."] + CandidateConcludedInvalid, + #[codec(index = 3)] + #[doc = "The data given to the inherent will result in an overweight block."] + InherentOverweight, + #[codec(index = 4)] + #[doc = "The ordering of dispute statements was invalid."] + DisputeStatementsUnsortedOrDuplicates, + #[codec(index = 5)] + #[doc = "A dispute statement was invalid."] + DisputeInvalid, + } + } + } + pub mod scheduler { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum AssignmentKind { + #[codec(index = 0)] + Parachain, + #[codec(index = 1)] + Parathread( + runtime_types::polkadot_primitives::v2::collator_app::Public, + ::core::primitive::u32, + ), + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct CoreAssignment { + pub core: runtime_types::polkadot_primitives::v2::CoreIndex, + pub para_id: runtime_types::polkadot_parachain::primitives::Id, + pub kind: runtime_types::polkadot_runtime_parachains::scheduler::AssignmentKind, + pub group_idx: runtime_types::polkadot_primitives::v2::GroupIndex, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct ParathreadClaimQueue { + pub queue: ::std::vec::Vec< + runtime_types::polkadot_runtime_parachains::scheduler::QueuedParathread, + >, + pub next_core_offset: ::core::primitive::u32, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct QueuedParathread { + pub claim: runtime_types::polkadot_primitives::v2::ParathreadEntry, + pub core_offset: ::core::primitive::u32, + } + } + pub mod shared { + use super::runtime_types; + pub mod pallet { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Call {} + } + } + pub mod ump { + use super::runtime_types; + pub mod pallet { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Call { + #[codec(index = 0)] + #[doc = "Service a single overweight upward message."] + #[doc = ""] + #[doc = "- `origin`: Must pass `ExecuteOverweightOrigin`."] + #[doc = "- `index`: The index of the overweight message to service."] + #[doc = "- `weight_limit`: The amount of weight that message execution may take."] + #[doc = ""] + #[doc = "Errors:"] + #[doc = "- `UnknownMessageIndex`: Message of `index` is unknown."] + #[doc = "- `WeightOverLimit`: Message execution may use greater than `weight_limit`."] + #[doc = ""] + #[doc = "Events:"] + #[doc = "- `OverweightServiced`: On success."] + service_overweight { + index: ::core::primitive::u64, + weight_limit: ::core::primitive::u64, + }, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Error { + #[codec(index = 0)] + #[doc = "The message index given is unknown."] + UnknownMessageIndex, + #[codec(index = 1)] + #[doc = "The amount of weight given is possibly not enough for executing the message."] + WeightOverLimit, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Event { + #[codec(index = 0)] + #[doc = "Upward message is invalid XCM."] + #[doc = "\\[ id \\]"] + InvalidFormat([::core::primitive::u8; 32usize]), + #[codec(index = 1)] + #[doc = "Upward message is unsupported version of XCM."] + #[doc = "\\[ id \\]"] + UnsupportedVersion([::core::primitive::u8; 32usize]), + #[codec(index = 2)] + #[doc = "Upward message executed with the given outcome."] + #[doc = "\\[ id, outcome \\]"] + ExecutedUpward( + [::core::primitive::u8; 32usize], + runtime_types::xcm::v2::traits::Outcome, + ), + #[codec(index = 3)] + #[doc = "The weight limit for handling upward messages was reached."] + #[doc = "\\[ id, remaining, required \\]"] + WeightExhausted( + [::core::primitive::u8; 32usize], + ::core::primitive::u64, + ::core::primitive::u64, + ), + #[codec(index = 4)] + #[doc = "Some upward messages have been received and will be processed."] + #[doc = "\\[ para, count, size \\]"] + UpwardMessagesReceived( + runtime_types::polkadot_parachain::primitives::Id, + ::core::primitive::u32, + ::core::primitive::u32, + ), + #[codec(index = 5)] + #[doc = "The weight budget was exceeded for an individual upward message."] + #[doc = ""] + #[doc = "This message can be later dispatched manually using `service_overweight` dispatchable"] + #[doc = "using the assigned `overweight_index`."] + #[doc = ""] + #[doc = "\\[ para, id, overweight_index, required \\]"] + OverweightEnqueued( + runtime_types::polkadot_parachain::primitives::Id, + [::core::primitive::u8; 32usize], + ::core::primitive::u64, + ::core::primitive::u64, + ), + #[codec(index = 6)] + #[doc = "Upward message from the overweight queue was executed with the given actual weight"] + #[doc = "used."] + #[doc = ""] + #[doc = "\\[ overweight_index, used \\]"] + OverweightServiced(::core::primitive::u64, ::core::primitive::u64), + } + } + } + } + pub mod primitive_types { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct H256(pub [::core::primitive::u8; 32usize]); + } + pub mod rococo_runtime { + use super::runtime_types; + pub mod validator_manager { + use super::runtime_types; + pub mod pallet { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Call { + #[codec(index = 0)] + #[doc = "Add new validators to the set."] + #[doc = ""] + #[doc = "The new validators will be active from current session + 2."] + register_validators { + validators: ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>, + }, + #[codec(index = 1)] + #[doc = "Remove validators from the set."] + #[doc = ""] + #[doc = "The removed validators will be deactivated from current session + 2."] + deregister_validators { + validators: ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>, + }, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Event { + #[codec(index = 0)] + #[doc = "New validators were added to the set."] + ValidatorsRegistered( + ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>, + ), + #[codec(index = 1)] + #[doc = "Validators were removed from the set."] + ValidatorsDeregistered( + ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>, + ), + } + } + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Call { + #[codec(index = 0)] + System(runtime_types::frame_system::pallet::Call), + #[codec(index = 1)] + Babe(runtime_types::pallet_babe::pallet::Call), + #[codec(index = 2)] + Timestamp(runtime_types::pallet_timestamp::pallet::Call), + #[codec(index = 3)] + Indices(runtime_types::pallet_indices::pallet::Call), + #[codec(index = 4)] + Balances(runtime_types::pallet_balances::pallet::Call), + #[codec(index = 6)] + Authorship(runtime_types::pallet_authorship::pallet::Call), + #[codec(index = 9)] + Session(runtime_types::pallet_session::pallet::Call), + #[codec(index = 10)] + Grandpa(runtime_types::pallet_grandpa::pallet::Call), + #[codec(index = 11)] + ImOnline(runtime_types::pallet_im_online::pallet::Call), + #[codec(index = 14)] + Configuration( + runtime_types::polkadot_runtime_parachains::configuration::pallet::Call, + ), + #[codec(index = 15)] + ParasShared(runtime_types::polkadot_runtime_parachains::shared::pallet::Call), + #[codec(index = 16)] + ParaInclusion(runtime_types::polkadot_runtime_parachains::inclusion::pallet::Call), + #[codec(index = 17)] + ParaInherent( + runtime_types::polkadot_runtime_parachains::paras_inherent::pallet::Call, + ), + #[codec(index = 19)] + Paras(runtime_types::polkadot_runtime_parachains::paras::pallet::Call), + #[codec(index = 20)] + Initializer(runtime_types::polkadot_runtime_parachains::initializer::pallet::Call), + #[codec(index = 21)] + Dmp(runtime_types::polkadot_runtime_parachains::dmp::pallet::Call), + #[codec(index = 22)] + Ump(runtime_types::polkadot_runtime_parachains::ump::pallet::Call), + #[codec(index = 23)] + Hrmp(runtime_types::polkadot_runtime_parachains::hrmp::pallet::Call), + #[codec(index = 25)] + ParasDisputes(runtime_types::polkadot_runtime_parachains::disputes::pallet::Call), + #[codec(index = 26)] + Registrar(runtime_types::polkadot_runtime_common::paras_registrar::pallet::Call), + #[codec(index = 27)] + Auctions(runtime_types::polkadot_runtime_common::auctions::pallet::Call), + #[codec(index = 28)] + Crowdloan(runtime_types::polkadot_runtime_common::crowdloan::pallet::Call), + #[codec(index = 29)] + Slots(runtime_types::polkadot_runtime_common::slots::pallet::Call), + #[codec(index = 30)] + ParasSudoWrapper( + runtime_types::polkadot_runtime_common::paras_sudo_wrapper::pallet::Call, + ), + #[codec(index = 31)] + AssignedSlots(runtime_types::polkadot_runtime_common::assigned_slots::pallet::Call), + #[codec(index = 32)] + Sudo(runtime_types::pallet_sudo::pallet::Call), + #[codec(index = 34)] + Beefy(runtime_types::pallet_beefy::pallet::Call), + #[codec(index = 36)] + ValidatorManager(runtime_types::rococo_runtime::validator_manager::pallet::Call), + #[codec(index = 40)] + BridgeRococoGrandpa(runtime_types::pallet_bridge_grandpa::pallet::Call), + #[codec(index = 41)] + BridgeWococoGrandpa(runtime_types::pallet_bridge_grandpa::pallet::Call), + #[codec(index = 43)] + BridgeRococoMessages(runtime_types::pallet_bridge_messages::pallet::Call), + #[codec(index = 44)] + BridgeWococoMessages(runtime_types::pallet_bridge_messages::pallet::Call), + #[codec(index = 80)] + Collective(runtime_types::pallet_collective::pallet::Call), + #[codec(index = 81)] + Membership(runtime_types::pallet_membership::pallet::Call), + #[codec(index = 90)] + Utility(runtime_types::pallet_utility::pallet::Call), + #[codec(index = 91)] + Proxy(runtime_types::pallet_proxy::pallet::Call), + #[codec(index = 92)] + Multisig(runtime_types::pallet_multisig::pallet::Call), + #[codec(index = 99)] + XcmPallet(runtime_types::pallet_xcm::pallet::Call), + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Event { + #[codec(index = 0)] + System(runtime_types::frame_system::pallet::Event), + #[codec(index = 3)] + Indices(runtime_types::pallet_indices::pallet::Event), + #[codec(index = 4)] + Balances(runtime_types::pallet_balances::pallet::Event), + #[codec(index = 7)] + Offences(runtime_types::pallet_offences::pallet::Event), + #[codec(index = 9)] + Session(runtime_types::pallet_session::pallet::Event), + #[codec(index = 10)] + Grandpa(runtime_types::pallet_grandpa::pallet::Event), + #[codec(index = 11)] + ImOnline(runtime_types::pallet_im_online::pallet::Event), + #[codec(index = 16)] + ParaInclusion(runtime_types::polkadot_runtime_parachains::inclusion::pallet::Event), + #[codec(index = 19)] + Paras(runtime_types::polkadot_runtime_parachains::paras::pallet::Event), + #[codec(index = 22)] + Ump(runtime_types::polkadot_runtime_parachains::ump::pallet::Event), + #[codec(index = 23)] + Hrmp(runtime_types::polkadot_runtime_parachains::hrmp::pallet::Event), + #[codec(index = 25)] + ParasDisputes(runtime_types::polkadot_runtime_parachains::disputes::pallet::Event), + #[codec(index = 26)] + Registrar(runtime_types::polkadot_runtime_common::paras_registrar::pallet::Event), + #[codec(index = 27)] + Auctions(runtime_types::polkadot_runtime_common::auctions::pallet::Event), + #[codec(index = 28)] + Crowdloan(runtime_types::polkadot_runtime_common::crowdloan::pallet::Event), + #[codec(index = 29)] + Slots(runtime_types::polkadot_runtime_common::slots::pallet::Event), + #[codec(index = 31)] + AssignedSlots( + runtime_types::polkadot_runtime_common::assigned_slots::pallet::Event, + ), + #[codec(index = 32)] + Sudo(runtime_types::pallet_sudo::pallet::Event), + #[codec(index = 36)] + ValidatorManager(runtime_types::rococo_runtime::validator_manager::pallet::Event), + #[codec(index = 43)] + BridgeRococoMessages(runtime_types::pallet_bridge_messages::pallet::Event), + #[codec(index = 44)] + BridgeWococoMessages(runtime_types::pallet_bridge_messages::pallet::Event), + #[codec(index = 45)] + BridgeRococoMessagesDispatch(runtime_types::pallet_bridge_dispatch::pallet::Event), + #[codec(index = 46)] + BridgeWococoMessagesDispatch(runtime_types::pallet_bridge_dispatch::pallet::Event), + #[codec(index = 80)] + Collective(runtime_types::pallet_collective::pallet::Event), + #[codec(index = 81)] + Membership(runtime_types::pallet_membership::pallet::Event), + #[codec(index = 90)] + Utility(runtime_types::pallet_utility::pallet::Event), + #[codec(index = 91)] + Proxy(runtime_types::pallet_proxy::pallet::Event), + #[codec(index = 92)] + Multisig(runtime_types::pallet_multisig::pallet::Event), + #[codec(index = 99)] + XcmPallet(runtime_types::pallet_xcm::pallet::Event), + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum OriginCaller { + #[codec(index = 0)] + system( + runtime_types::frame_support::dispatch::RawOrigin< + ::subxt::sp_core::crypto::AccountId32, + >, + ), + #[codec(index = 13)] + ParachainsOrigin( + runtime_types::polkadot_runtime_parachains::origin::pallet::Origin, + ), + #[codec(index = 80)] + Collective( + runtime_types::pallet_collective::RawOrigin< + ::subxt::sp_core::crypto::AccountId32, + >, + ), + #[codec(index = 99)] + XcmPallet(runtime_types::pallet_xcm::pallet::Origin), + #[codec(index = 4)] + Void(runtime_types::sp_core::Void), + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum ProxyType { + #[codec(index = 0)] + Any, + #[codec(index = 1)] + CancelProxy, + #[codec(index = 2)] + Auction, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct Runtime; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct SessionKeys { + pub grandpa: runtime_types::sp_finality_grandpa::app::Public, + pub babe: runtime_types::sp_consensus_babe::app::Public, + pub im_online: runtime_types::pallet_im_online::sr25519::app_sr25519::Public, + pub para_validator: runtime_types::polkadot_primitives::v2::validator_app::Public, + pub para_assignment: runtime_types::polkadot_primitives::v2::assignment_app::Public, + pub authority_discovery: runtime_types::sp_authority_discovery::app::Public, + pub beefy: runtime_types::beefy_primitives::crypto::Public, + } + } + pub mod sp_arithmetic { + use super::runtime_types; + pub mod fixed_point { + use super::runtime_types; + #[derive( + :: subxt :: codec :: CompactAs, + :: subxt :: codec :: Decode, + :: subxt :: codec :: Encode, + Debug, + )] + pub struct FixedU128(pub ::core::primitive::u128); + } + pub mod per_things { + use super::runtime_types; + #[derive( + :: subxt :: codec :: CompactAs, + :: subxt :: codec :: Decode, + :: subxt :: codec :: Encode, + Debug, + )] + pub struct Perbill(pub ::core::primitive::u32); + } + } + pub mod sp_authority_discovery { + use super::runtime_types; + pub mod app { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct Public(pub runtime_types::sp_core::sr25519::Public); + } + } + pub mod sp_consensus_babe { + use super::runtime_types; + pub mod app { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct Public(pub runtime_types::sp_core::sr25519::Public); + } + pub mod digests { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum NextConfigDescriptor { + #[codec(index = 1)] + V1 { + c: (::core::primitive::u64, ::core::primitive::u64), + allowed_slots: runtime_types::sp_consensus_babe::AllowedSlots, + }, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum PreDigest { + #[codec(index = 1)] + Primary(runtime_types::sp_consensus_babe::digests::PrimaryPreDigest), + #[codec(index = 2)] + SecondaryPlain( + runtime_types::sp_consensus_babe::digests::SecondaryPlainPreDigest, + ), + #[codec(index = 3)] + SecondaryVRF(runtime_types::sp_consensus_babe::digests::SecondaryVRFPreDigest), + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct PrimaryPreDigest { + pub authority_index: ::core::primitive::u32, + pub slot: runtime_types::sp_consensus_slots::Slot, + pub vrf_output: [::core::primitive::u8; 32usize], + pub vrf_proof: [::core::primitive::u8; 64usize], + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct SecondaryPlainPreDigest { + pub authority_index: ::core::primitive::u32, + pub slot: runtime_types::sp_consensus_slots::Slot, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct SecondaryVRFPreDigest { + pub authority_index: ::core::primitive::u32, + pub slot: runtime_types::sp_consensus_slots::Slot, + pub vrf_output: [::core::primitive::u8; 32usize], + pub vrf_proof: [::core::primitive::u8; 64usize], + } + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum AllowedSlots { + #[codec(index = 0)] + PrimarySlots, + #[codec(index = 1)] + PrimaryAndSecondaryPlainSlots, + #[codec(index = 2)] + PrimaryAndSecondaryVRFSlots, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct BabeEpochConfiguration { + pub c: (::core::primitive::u64, ::core::primitive::u64), + pub allowed_slots: runtime_types::sp_consensus_babe::AllowedSlots, + } + } + pub mod sp_consensus_slots { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct EquivocationProof<_0, _1> { + pub offender: _1, + pub slot: runtime_types::sp_consensus_slots::Slot, + pub first_header: _0, + pub second_header: _0, + } + #[derive( + :: subxt :: codec :: CompactAs, + :: subxt :: codec :: Decode, + :: subxt :: codec :: Encode, + Debug, + )] + pub struct Slot(pub ::core::primitive::u64); + } + pub mod sp_core { + use super::runtime_types; + pub mod crypto { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct AccountId32(pub [::core::primitive::u8; 32usize]); + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct KeyTypeId(pub [::core::primitive::u8; 4usize]); + } + pub mod ecdsa { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct Public(pub [::core::primitive::u8; 33usize]); + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct Signature(pub [::core::primitive::u8; 65usize]); + } + pub mod ed25519 { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct Public(pub [::core::primitive::u8; 32usize]); + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct Signature(pub [::core::primitive::u8; 64usize]); + } + pub mod offchain { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct OpaqueMultiaddr(pub ::std::vec::Vec<::core::primitive::u8>); + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct OpaqueNetworkState { + pub peer_id: runtime_types::sp_core::OpaquePeerId, + pub external_addresses: + ::std::vec::Vec, + } + } + pub mod sr25519 { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct Public(pub [::core::primitive::u8; 32usize]); + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct Signature(pub [::core::primitive::u8; 64usize]); + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct OpaquePeerId(pub ::std::vec::Vec<::core::primitive::u8>); + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Void {} + } + pub mod sp_finality_grandpa { + use super::runtime_types; + pub mod app { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct Public(pub runtime_types::sp_core::ed25519::Public); + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct Signature(pub runtime_types::sp_core::ed25519::Signature); + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Equivocation<_0, _1> { + #[codec(index = 0)] + Prevote( + runtime_types::finality_grandpa::Equivocation< + runtime_types::sp_finality_grandpa::app::Public, + runtime_types::finality_grandpa::Prevote<_0, _1>, + runtime_types::sp_finality_grandpa::app::Signature, + >, + ), + #[codec(index = 1)] + Precommit( + runtime_types::finality_grandpa::Equivocation< + runtime_types::sp_finality_grandpa::app::Public, + runtime_types::finality_grandpa::Precommit<_0, _1>, + runtime_types::sp_finality_grandpa::app::Signature, + >, + ), + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct EquivocationProof<_0, _1> { + pub set_id: ::core::primitive::u64, + pub equivocation: runtime_types::sp_finality_grandpa::Equivocation<_0, _1>, + } + } + pub mod sp_runtime { + use super::runtime_types; + pub mod generic { + use super::runtime_types; + pub mod digest { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct Digest { + pub logs: + ::std::vec::Vec, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum DigestItem { + #[codec(index = 6)] + PreRuntime( + [::core::primitive::u8; 4usize], + ::std::vec::Vec<::core::primitive::u8>, + ), + #[codec(index = 4)] + Consensus( + [::core::primitive::u8; 4usize], + ::std::vec::Vec<::core::primitive::u8>, + ), + #[codec(index = 5)] + Seal( + [::core::primitive::u8; 4usize], + ::std::vec::Vec<::core::primitive::u8>, + ), + #[codec(index = 0)] + Other(::std::vec::Vec<::core::primitive::u8>), + #[codec(index = 8)] + RuntimeEnvironmentUpdated, + } + } + pub mod era { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Era { + #[codec(index = 0)] + Immortal, + #[codec(index = 1)] + Mortal1(::core::primitive::u8), + #[codec(index = 2)] + Mortal2(::core::primitive::u8), + #[codec(index = 3)] + Mortal3(::core::primitive::u8), + #[codec(index = 4)] + Mortal4(::core::primitive::u8), + #[codec(index = 5)] + Mortal5(::core::primitive::u8), + #[codec(index = 6)] + Mortal6(::core::primitive::u8), + #[codec(index = 7)] + Mortal7(::core::primitive::u8), + #[codec(index = 8)] + Mortal8(::core::primitive::u8), + #[codec(index = 9)] + Mortal9(::core::primitive::u8), + #[codec(index = 10)] + Mortal10(::core::primitive::u8), + #[codec(index = 11)] + Mortal11(::core::primitive::u8), + #[codec(index = 12)] + Mortal12(::core::primitive::u8), + #[codec(index = 13)] + Mortal13(::core::primitive::u8), + #[codec(index = 14)] + Mortal14(::core::primitive::u8), + #[codec(index = 15)] + Mortal15(::core::primitive::u8), + #[codec(index = 16)] + Mortal16(::core::primitive::u8), + #[codec(index = 17)] + Mortal17(::core::primitive::u8), + #[codec(index = 18)] + Mortal18(::core::primitive::u8), + #[codec(index = 19)] + Mortal19(::core::primitive::u8), + #[codec(index = 20)] + Mortal20(::core::primitive::u8), + #[codec(index = 21)] + Mortal21(::core::primitive::u8), + #[codec(index = 22)] + Mortal22(::core::primitive::u8), + #[codec(index = 23)] + Mortal23(::core::primitive::u8), + #[codec(index = 24)] + Mortal24(::core::primitive::u8), + #[codec(index = 25)] + Mortal25(::core::primitive::u8), + #[codec(index = 26)] + Mortal26(::core::primitive::u8), + #[codec(index = 27)] + Mortal27(::core::primitive::u8), + #[codec(index = 28)] + Mortal28(::core::primitive::u8), + #[codec(index = 29)] + Mortal29(::core::primitive::u8), + #[codec(index = 30)] + Mortal30(::core::primitive::u8), + #[codec(index = 31)] + Mortal31(::core::primitive::u8), + #[codec(index = 32)] + Mortal32(::core::primitive::u8), + #[codec(index = 33)] + Mortal33(::core::primitive::u8), + #[codec(index = 34)] + Mortal34(::core::primitive::u8), + #[codec(index = 35)] + Mortal35(::core::primitive::u8), + #[codec(index = 36)] + Mortal36(::core::primitive::u8), + #[codec(index = 37)] + Mortal37(::core::primitive::u8), + #[codec(index = 38)] + Mortal38(::core::primitive::u8), + #[codec(index = 39)] + Mortal39(::core::primitive::u8), + #[codec(index = 40)] + Mortal40(::core::primitive::u8), + #[codec(index = 41)] + Mortal41(::core::primitive::u8), + #[codec(index = 42)] + Mortal42(::core::primitive::u8), + #[codec(index = 43)] + Mortal43(::core::primitive::u8), + #[codec(index = 44)] + Mortal44(::core::primitive::u8), + #[codec(index = 45)] + Mortal45(::core::primitive::u8), + #[codec(index = 46)] + Mortal46(::core::primitive::u8), + #[codec(index = 47)] + Mortal47(::core::primitive::u8), + #[codec(index = 48)] + Mortal48(::core::primitive::u8), + #[codec(index = 49)] + Mortal49(::core::primitive::u8), + #[codec(index = 50)] + Mortal50(::core::primitive::u8), + #[codec(index = 51)] + Mortal51(::core::primitive::u8), + #[codec(index = 52)] + Mortal52(::core::primitive::u8), + #[codec(index = 53)] + Mortal53(::core::primitive::u8), + #[codec(index = 54)] + Mortal54(::core::primitive::u8), + #[codec(index = 55)] + Mortal55(::core::primitive::u8), + #[codec(index = 56)] + Mortal56(::core::primitive::u8), + #[codec(index = 57)] + Mortal57(::core::primitive::u8), + #[codec(index = 58)] + Mortal58(::core::primitive::u8), + #[codec(index = 59)] + Mortal59(::core::primitive::u8), + #[codec(index = 60)] + Mortal60(::core::primitive::u8), + #[codec(index = 61)] + Mortal61(::core::primitive::u8), + #[codec(index = 62)] + Mortal62(::core::primitive::u8), + #[codec(index = 63)] + Mortal63(::core::primitive::u8), + #[codec(index = 64)] + Mortal64(::core::primitive::u8), + #[codec(index = 65)] + Mortal65(::core::primitive::u8), + #[codec(index = 66)] + Mortal66(::core::primitive::u8), + #[codec(index = 67)] + Mortal67(::core::primitive::u8), + #[codec(index = 68)] + Mortal68(::core::primitive::u8), + #[codec(index = 69)] + Mortal69(::core::primitive::u8), + #[codec(index = 70)] + Mortal70(::core::primitive::u8), + #[codec(index = 71)] + Mortal71(::core::primitive::u8), + #[codec(index = 72)] + Mortal72(::core::primitive::u8), + #[codec(index = 73)] + Mortal73(::core::primitive::u8), + #[codec(index = 74)] + Mortal74(::core::primitive::u8), + #[codec(index = 75)] + Mortal75(::core::primitive::u8), + #[codec(index = 76)] + Mortal76(::core::primitive::u8), + #[codec(index = 77)] + Mortal77(::core::primitive::u8), + #[codec(index = 78)] + Mortal78(::core::primitive::u8), + #[codec(index = 79)] + Mortal79(::core::primitive::u8), + #[codec(index = 80)] + Mortal80(::core::primitive::u8), + #[codec(index = 81)] + Mortal81(::core::primitive::u8), + #[codec(index = 82)] + Mortal82(::core::primitive::u8), + #[codec(index = 83)] + Mortal83(::core::primitive::u8), + #[codec(index = 84)] + Mortal84(::core::primitive::u8), + #[codec(index = 85)] + Mortal85(::core::primitive::u8), + #[codec(index = 86)] + Mortal86(::core::primitive::u8), + #[codec(index = 87)] + Mortal87(::core::primitive::u8), + #[codec(index = 88)] + Mortal88(::core::primitive::u8), + #[codec(index = 89)] + Mortal89(::core::primitive::u8), + #[codec(index = 90)] + Mortal90(::core::primitive::u8), + #[codec(index = 91)] + Mortal91(::core::primitive::u8), + #[codec(index = 92)] + Mortal92(::core::primitive::u8), + #[codec(index = 93)] + Mortal93(::core::primitive::u8), + #[codec(index = 94)] + Mortal94(::core::primitive::u8), + #[codec(index = 95)] + Mortal95(::core::primitive::u8), + #[codec(index = 96)] + Mortal96(::core::primitive::u8), + #[codec(index = 97)] + Mortal97(::core::primitive::u8), + #[codec(index = 98)] + Mortal98(::core::primitive::u8), + #[codec(index = 99)] + Mortal99(::core::primitive::u8), + #[codec(index = 100)] + Mortal100(::core::primitive::u8), + #[codec(index = 101)] + Mortal101(::core::primitive::u8), + #[codec(index = 102)] + Mortal102(::core::primitive::u8), + #[codec(index = 103)] + Mortal103(::core::primitive::u8), + #[codec(index = 104)] + Mortal104(::core::primitive::u8), + #[codec(index = 105)] + Mortal105(::core::primitive::u8), + #[codec(index = 106)] + Mortal106(::core::primitive::u8), + #[codec(index = 107)] + Mortal107(::core::primitive::u8), + #[codec(index = 108)] + Mortal108(::core::primitive::u8), + #[codec(index = 109)] + Mortal109(::core::primitive::u8), + #[codec(index = 110)] + Mortal110(::core::primitive::u8), + #[codec(index = 111)] + Mortal111(::core::primitive::u8), + #[codec(index = 112)] + Mortal112(::core::primitive::u8), + #[codec(index = 113)] + Mortal113(::core::primitive::u8), + #[codec(index = 114)] + Mortal114(::core::primitive::u8), + #[codec(index = 115)] + Mortal115(::core::primitive::u8), + #[codec(index = 116)] + Mortal116(::core::primitive::u8), + #[codec(index = 117)] + Mortal117(::core::primitive::u8), + #[codec(index = 118)] + Mortal118(::core::primitive::u8), + #[codec(index = 119)] + Mortal119(::core::primitive::u8), + #[codec(index = 120)] + Mortal120(::core::primitive::u8), + #[codec(index = 121)] + Mortal121(::core::primitive::u8), + #[codec(index = 122)] + Mortal122(::core::primitive::u8), + #[codec(index = 123)] + Mortal123(::core::primitive::u8), + #[codec(index = 124)] + Mortal124(::core::primitive::u8), + #[codec(index = 125)] + Mortal125(::core::primitive::u8), + #[codec(index = 126)] + Mortal126(::core::primitive::u8), + #[codec(index = 127)] + Mortal127(::core::primitive::u8), + #[codec(index = 128)] + Mortal128(::core::primitive::u8), + #[codec(index = 129)] + Mortal129(::core::primitive::u8), + #[codec(index = 130)] + Mortal130(::core::primitive::u8), + #[codec(index = 131)] + Mortal131(::core::primitive::u8), + #[codec(index = 132)] + Mortal132(::core::primitive::u8), + #[codec(index = 133)] + Mortal133(::core::primitive::u8), + #[codec(index = 134)] + Mortal134(::core::primitive::u8), + #[codec(index = 135)] + Mortal135(::core::primitive::u8), + #[codec(index = 136)] + Mortal136(::core::primitive::u8), + #[codec(index = 137)] + Mortal137(::core::primitive::u8), + #[codec(index = 138)] + Mortal138(::core::primitive::u8), + #[codec(index = 139)] + Mortal139(::core::primitive::u8), + #[codec(index = 140)] + Mortal140(::core::primitive::u8), + #[codec(index = 141)] + Mortal141(::core::primitive::u8), + #[codec(index = 142)] + Mortal142(::core::primitive::u8), + #[codec(index = 143)] + Mortal143(::core::primitive::u8), + #[codec(index = 144)] + Mortal144(::core::primitive::u8), + #[codec(index = 145)] + Mortal145(::core::primitive::u8), + #[codec(index = 146)] + Mortal146(::core::primitive::u8), + #[codec(index = 147)] + Mortal147(::core::primitive::u8), + #[codec(index = 148)] + Mortal148(::core::primitive::u8), + #[codec(index = 149)] + Mortal149(::core::primitive::u8), + #[codec(index = 150)] + Mortal150(::core::primitive::u8), + #[codec(index = 151)] + Mortal151(::core::primitive::u8), + #[codec(index = 152)] + Mortal152(::core::primitive::u8), + #[codec(index = 153)] + Mortal153(::core::primitive::u8), + #[codec(index = 154)] + Mortal154(::core::primitive::u8), + #[codec(index = 155)] + Mortal155(::core::primitive::u8), + #[codec(index = 156)] + Mortal156(::core::primitive::u8), + #[codec(index = 157)] + Mortal157(::core::primitive::u8), + #[codec(index = 158)] + Mortal158(::core::primitive::u8), + #[codec(index = 159)] + Mortal159(::core::primitive::u8), + #[codec(index = 160)] + Mortal160(::core::primitive::u8), + #[codec(index = 161)] + Mortal161(::core::primitive::u8), + #[codec(index = 162)] + Mortal162(::core::primitive::u8), + #[codec(index = 163)] + Mortal163(::core::primitive::u8), + #[codec(index = 164)] + Mortal164(::core::primitive::u8), + #[codec(index = 165)] + Mortal165(::core::primitive::u8), + #[codec(index = 166)] + Mortal166(::core::primitive::u8), + #[codec(index = 167)] + Mortal167(::core::primitive::u8), + #[codec(index = 168)] + Mortal168(::core::primitive::u8), + #[codec(index = 169)] + Mortal169(::core::primitive::u8), + #[codec(index = 170)] + Mortal170(::core::primitive::u8), + #[codec(index = 171)] + Mortal171(::core::primitive::u8), + #[codec(index = 172)] + Mortal172(::core::primitive::u8), + #[codec(index = 173)] + Mortal173(::core::primitive::u8), + #[codec(index = 174)] + Mortal174(::core::primitive::u8), + #[codec(index = 175)] + Mortal175(::core::primitive::u8), + #[codec(index = 176)] + Mortal176(::core::primitive::u8), + #[codec(index = 177)] + Mortal177(::core::primitive::u8), + #[codec(index = 178)] + Mortal178(::core::primitive::u8), + #[codec(index = 179)] + Mortal179(::core::primitive::u8), + #[codec(index = 180)] + Mortal180(::core::primitive::u8), + #[codec(index = 181)] + Mortal181(::core::primitive::u8), + #[codec(index = 182)] + Mortal182(::core::primitive::u8), + #[codec(index = 183)] + Mortal183(::core::primitive::u8), + #[codec(index = 184)] + Mortal184(::core::primitive::u8), + #[codec(index = 185)] + Mortal185(::core::primitive::u8), + #[codec(index = 186)] + Mortal186(::core::primitive::u8), + #[codec(index = 187)] + Mortal187(::core::primitive::u8), + #[codec(index = 188)] + Mortal188(::core::primitive::u8), + #[codec(index = 189)] + Mortal189(::core::primitive::u8), + #[codec(index = 190)] + Mortal190(::core::primitive::u8), + #[codec(index = 191)] + Mortal191(::core::primitive::u8), + #[codec(index = 192)] + Mortal192(::core::primitive::u8), + #[codec(index = 193)] + Mortal193(::core::primitive::u8), + #[codec(index = 194)] + Mortal194(::core::primitive::u8), + #[codec(index = 195)] + Mortal195(::core::primitive::u8), + #[codec(index = 196)] + Mortal196(::core::primitive::u8), + #[codec(index = 197)] + Mortal197(::core::primitive::u8), + #[codec(index = 198)] + Mortal198(::core::primitive::u8), + #[codec(index = 199)] + Mortal199(::core::primitive::u8), + #[codec(index = 200)] + Mortal200(::core::primitive::u8), + #[codec(index = 201)] + Mortal201(::core::primitive::u8), + #[codec(index = 202)] + Mortal202(::core::primitive::u8), + #[codec(index = 203)] + Mortal203(::core::primitive::u8), + #[codec(index = 204)] + Mortal204(::core::primitive::u8), + #[codec(index = 205)] + Mortal205(::core::primitive::u8), + #[codec(index = 206)] + Mortal206(::core::primitive::u8), + #[codec(index = 207)] + Mortal207(::core::primitive::u8), + #[codec(index = 208)] + Mortal208(::core::primitive::u8), + #[codec(index = 209)] + Mortal209(::core::primitive::u8), + #[codec(index = 210)] + Mortal210(::core::primitive::u8), + #[codec(index = 211)] + Mortal211(::core::primitive::u8), + #[codec(index = 212)] + Mortal212(::core::primitive::u8), + #[codec(index = 213)] + Mortal213(::core::primitive::u8), + #[codec(index = 214)] + Mortal214(::core::primitive::u8), + #[codec(index = 215)] + Mortal215(::core::primitive::u8), + #[codec(index = 216)] + Mortal216(::core::primitive::u8), + #[codec(index = 217)] + Mortal217(::core::primitive::u8), + #[codec(index = 218)] + Mortal218(::core::primitive::u8), + #[codec(index = 219)] + Mortal219(::core::primitive::u8), + #[codec(index = 220)] + Mortal220(::core::primitive::u8), + #[codec(index = 221)] + Mortal221(::core::primitive::u8), + #[codec(index = 222)] + Mortal222(::core::primitive::u8), + #[codec(index = 223)] + Mortal223(::core::primitive::u8), + #[codec(index = 224)] + Mortal224(::core::primitive::u8), + #[codec(index = 225)] + Mortal225(::core::primitive::u8), + #[codec(index = 226)] + Mortal226(::core::primitive::u8), + #[codec(index = 227)] + Mortal227(::core::primitive::u8), + #[codec(index = 228)] + Mortal228(::core::primitive::u8), + #[codec(index = 229)] + Mortal229(::core::primitive::u8), + #[codec(index = 230)] + Mortal230(::core::primitive::u8), + #[codec(index = 231)] + Mortal231(::core::primitive::u8), + #[codec(index = 232)] + Mortal232(::core::primitive::u8), + #[codec(index = 233)] + Mortal233(::core::primitive::u8), + #[codec(index = 234)] + Mortal234(::core::primitive::u8), + #[codec(index = 235)] + Mortal235(::core::primitive::u8), + #[codec(index = 236)] + Mortal236(::core::primitive::u8), + #[codec(index = 237)] + Mortal237(::core::primitive::u8), + #[codec(index = 238)] + Mortal238(::core::primitive::u8), + #[codec(index = 239)] + Mortal239(::core::primitive::u8), + #[codec(index = 240)] + Mortal240(::core::primitive::u8), + #[codec(index = 241)] + Mortal241(::core::primitive::u8), + #[codec(index = 242)] + Mortal242(::core::primitive::u8), + #[codec(index = 243)] + Mortal243(::core::primitive::u8), + #[codec(index = 244)] + Mortal244(::core::primitive::u8), + #[codec(index = 245)] + Mortal245(::core::primitive::u8), + #[codec(index = 246)] + Mortal246(::core::primitive::u8), + #[codec(index = 247)] + Mortal247(::core::primitive::u8), + #[codec(index = 248)] + Mortal248(::core::primitive::u8), + #[codec(index = 249)] + Mortal249(::core::primitive::u8), + #[codec(index = 250)] + Mortal250(::core::primitive::u8), + #[codec(index = 251)] + Mortal251(::core::primitive::u8), + #[codec(index = 252)] + Mortal252(::core::primitive::u8), + #[codec(index = 253)] + Mortal253(::core::primitive::u8), + #[codec(index = 254)] + Mortal254(::core::primitive::u8), + #[codec(index = 255)] + Mortal255(::core::primitive::u8), + } + } + pub mod header { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct Header<_0, _1> { + pub parent_hash: ::subxt::sp_core::H256, + #[codec(compact)] + pub number: _0, + pub state_root: ::subxt::sp_core::H256, + pub extrinsics_root: ::subxt::sp_core::H256, + pub digest: runtime_types::sp_runtime::generic::digest::Digest, + #[codec(skip)] + pub __subxt_unused_type_params: ::core::marker::PhantomData<_1>, + } + } + pub mod unchecked_extrinsic { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct UncheckedExtrinsic<_0, _1, _2, _3>( + pub ::std::vec::Vec<::core::primitive::u8>, + #[codec(skip)] pub ::core::marker::PhantomData<(_0, _2, _1, _3)>, + ); + } + } + pub mod multiaddress { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum MultiAddress<_0, _1> { + #[codec(index = 0)] + Id(_0), + #[codec(index = 1)] + Index(#[codec(compact)] _1), + #[codec(index = 2)] + Raw(::std::vec::Vec<::core::primitive::u8>), + #[codec(index = 3)] + Address32([::core::primitive::u8; 32usize]), + #[codec(index = 4)] + Address20([::core::primitive::u8; 20usize]), + } + } + pub mod traits { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct BlakeTwo256; + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum ArithmeticError { + #[codec(index = 0)] + Underflow, + #[codec(index = 1)] + Overflow, + #[codec(index = 2)] + DivisionByZero, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum DispatchError { + #[codec(index = 0)] + Other, + #[codec(index = 1)] + CannotLookup, + #[codec(index = 2)] + BadOrigin, + #[codec(index = 3)] + Module(runtime_types::sp_runtime::ModuleError), + #[codec(index = 4)] + ConsumerRemaining, + #[codec(index = 5)] + NoProviders, + #[codec(index = 6)] + TooManyConsumers, + #[codec(index = 7)] + Token(runtime_types::sp_runtime::TokenError), + #[codec(index = 8)] + Arithmetic(runtime_types::sp_runtime::ArithmeticError), + #[codec(index = 9)] + Transactional(runtime_types::sp_runtime::TransactionalError), + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct ModuleError { + pub index: ::core::primitive::u8, + pub error: [::core::primitive::u8; 4usize], + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum MultiSignature { + #[codec(index = 0)] + Ed25519(runtime_types::sp_core::ed25519::Signature), + #[codec(index = 1)] + Sr25519(runtime_types::sp_core::sr25519::Signature), + #[codec(index = 2)] + Ecdsa(runtime_types::sp_core::ecdsa::Signature), + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum MultiSigner { + #[codec(index = 0)] + Ed25519(runtime_types::sp_core::ed25519::Public), + #[codec(index = 1)] + Sr25519(runtime_types::sp_core::sr25519::Public), + #[codec(index = 2)] + Ecdsa(runtime_types::sp_core::ecdsa::Public), + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum TokenError { + #[codec(index = 0)] + NoFunds, + #[codec(index = 1)] + WouldDie, + #[codec(index = 2)] + BelowMinimum, + #[codec(index = 3)] + CannotCreate, + #[codec(index = 4)] + UnknownAsset, + #[codec(index = 5)] + Frozen, + #[codec(index = 6)] + Unsupported, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum TransactionalError { + #[codec(index = 0)] + LimitReached, + #[codec(index = 1)] + NoLayer, + } + } + pub mod sp_session { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct MembershipProof { + pub session: ::core::primitive::u32, + pub trie_nodes: ::std::vec::Vec<::std::vec::Vec<::core::primitive::u8>>, + pub validator_count: ::core::primitive::u32, + } + } + pub mod sp_staking { + use super::runtime_types; + pub mod offence { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct OffenceDetails<_0, _1> { + pub offender: _1, + pub reporters: ::std::vec::Vec<_0>, + } + } + } + pub mod sp_version { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct RuntimeVersion { + pub spec_name: ::std::string::String, + pub impl_name: ::std::string::String, + pub authoring_version: ::core::primitive::u32, + pub spec_version: ::core::primitive::u32, + pub impl_version: ::core::primitive::u32, + pub apis: + ::std::vec::Vec<([::core::primitive::u8; 8usize], ::core::primitive::u32)>, + pub transaction_version: ::core::primitive::u32, + pub state_version: ::core::primitive::u8, + } + } + pub mod xcm { + use super::runtime_types; + pub mod double_encoded { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct DoubleEncoded { + pub encoded: ::std::vec::Vec<::core::primitive::u8>, + } + } + pub mod v0 { + use super::runtime_types; + pub mod junction { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum BodyId { + #[codec(index = 0)] + Unit, + #[codec(index = 1)] + Named(::std::vec::Vec<::core::primitive::u8>), + #[codec(index = 2)] + Index(#[codec(compact)] ::core::primitive::u32), + #[codec(index = 3)] + Executive, + #[codec(index = 4)] + Technical, + #[codec(index = 5)] + Legislative, + #[codec(index = 6)] + Judicial, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum BodyPart { + #[codec(index = 0)] + Voice, + #[codec(index = 1)] + Members { + #[codec(compact)] + count: ::core::primitive::u32, + }, + #[codec(index = 2)] + Fraction { + #[codec(compact)] + nom: ::core::primitive::u32, + #[codec(compact)] + denom: ::core::primitive::u32, + }, + #[codec(index = 3)] + AtLeastProportion { + #[codec(compact)] + nom: ::core::primitive::u32, + #[codec(compact)] + denom: ::core::primitive::u32, + }, + #[codec(index = 4)] + MoreThanProportion { + #[codec(compact)] + nom: ::core::primitive::u32, + #[codec(compact)] + denom: ::core::primitive::u32, + }, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Junction { + #[codec(index = 0)] + Parent, + #[codec(index = 1)] + Parachain(#[codec(compact)] ::core::primitive::u32), + #[codec(index = 2)] + AccountId32 { + network: runtime_types::xcm::v0::junction::NetworkId, + id: [::core::primitive::u8; 32usize], + }, + #[codec(index = 3)] + AccountIndex64 { + network: runtime_types::xcm::v0::junction::NetworkId, + #[codec(compact)] + index: ::core::primitive::u64, + }, + #[codec(index = 4)] + AccountKey20 { + network: runtime_types::xcm::v0::junction::NetworkId, + key: [::core::primitive::u8; 20usize], + }, + #[codec(index = 5)] + PalletInstance(::core::primitive::u8), + #[codec(index = 6)] + GeneralIndex(#[codec(compact)] ::core::primitive::u128), + #[codec(index = 7)] + GeneralKey(::std::vec::Vec<::core::primitive::u8>), + #[codec(index = 8)] + OnlyChild, + #[codec(index = 9)] + Plurality { + id: runtime_types::xcm::v0::junction::BodyId, + part: runtime_types::xcm::v0::junction::BodyPart, + }, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum NetworkId { + #[codec(index = 0)] + Any, + #[codec(index = 1)] + Named(::std::vec::Vec<::core::primitive::u8>), + #[codec(index = 2)] + Polkadot, + #[codec(index = 3)] + Kusama, + } + } + pub mod multi_asset { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum MultiAsset { + #[codec(index = 0)] + None, + #[codec(index = 1)] + All, + #[codec(index = 2)] + AllFungible, + #[codec(index = 3)] + AllNonFungible, + #[codec(index = 4)] + AllAbstractFungible { + id: ::std::vec::Vec<::core::primitive::u8>, + }, + #[codec(index = 5)] + AllAbstractNonFungible { + class: ::std::vec::Vec<::core::primitive::u8>, + }, + #[codec(index = 6)] + AllConcreteFungible { + id: runtime_types::xcm::v0::multi_location::MultiLocation, + }, + #[codec(index = 7)] + AllConcreteNonFungible { + class: runtime_types::xcm::v0::multi_location::MultiLocation, + }, + #[codec(index = 8)] + AbstractFungible { + id: ::std::vec::Vec<::core::primitive::u8>, + #[codec(compact)] + amount: ::core::primitive::u128, + }, + #[codec(index = 9)] + AbstractNonFungible { + class: ::std::vec::Vec<::core::primitive::u8>, + instance: runtime_types::xcm::v1::multiasset::AssetInstance, + }, + #[codec(index = 10)] + ConcreteFungible { + id: runtime_types::xcm::v0::multi_location::MultiLocation, + #[codec(compact)] + amount: ::core::primitive::u128, + }, + #[codec(index = 11)] + ConcreteNonFungible { + class: runtime_types::xcm::v0::multi_location::MultiLocation, + instance: runtime_types::xcm::v1::multiasset::AssetInstance, + }, + } + } + pub mod multi_location { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum MultiLocation { + #[codec(index = 0)] + Null, + #[codec(index = 1)] + X1(runtime_types::xcm::v0::junction::Junction), + #[codec(index = 2)] + X2( + runtime_types::xcm::v0::junction::Junction, + runtime_types::xcm::v0::junction::Junction, + ), + #[codec(index = 3)] + X3( + runtime_types::xcm::v0::junction::Junction, + runtime_types::xcm::v0::junction::Junction, + runtime_types::xcm::v0::junction::Junction, + ), + #[codec(index = 4)] + X4( + runtime_types::xcm::v0::junction::Junction, + runtime_types::xcm::v0::junction::Junction, + runtime_types::xcm::v0::junction::Junction, + runtime_types::xcm::v0::junction::Junction, + ), + #[codec(index = 5)] + X5( + runtime_types::xcm::v0::junction::Junction, + runtime_types::xcm::v0::junction::Junction, + runtime_types::xcm::v0::junction::Junction, + runtime_types::xcm::v0::junction::Junction, + runtime_types::xcm::v0::junction::Junction, + ), + #[codec(index = 6)] + X6( + runtime_types::xcm::v0::junction::Junction, + runtime_types::xcm::v0::junction::Junction, + runtime_types::xcm::v0::junction::Junction, + runtime_types::xcm::v0::junction::Junction, + runtime_types::xcm::v0::junction::Junction, + runtime_types::xcm::v0::junction::Junction, + ), + #[codec(index = 7)] + X7( + runtime_types::xcm::v0::junction::Junction, + runtime_types::xcm::v0::junction::Junction, + runtime_types::xcm::v0::junction::Junction, + runtime_types::xcm::v0::junction::Junction, + runtime_types::xcm::v0::junction::Junction, + runtime_types::xcm::v0::junction::Junction, + runtime_types::xcm::v0::junction::Junction, + ), + #[codec(index = 8)] + X8( + runtime_types::xcm::v0::junction::Junction, + runtime_types::xcm::v0::junction::Junction, + runtime_types::xcm::v0::junction::Junction, + runtime_types::xcm::v0::junction::Junction, + runtime_types::xcm::v0::junction::Junction, + runtime_types::xcm::v0::junction::Junction, + runtime_types::xcm::v0::junction::Junction, + runtime_types::xcm::v0::junction::Junction, + ), + } + } + pub mod order { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Order { + #[codec(index = 0)] + Null, + #[codec(index = 1)] + DepositAsset { + assets: + ::std::vec::Vec, + dest: runtime_types::xcm::v0::multi_location::MultiLocation, + }, + #[codec(index = 2)] + DepositReserveAsset { + assets: + ::std::vec::Vec, + dest: runtime_types::xcm::v0::multi_location::MultiLocation, + effects: ::std::vec::Vec, + }, + #[codec(index = 3)] + ExchangeAsset { + give: ::std::vec::Vec, + receive: + ::std::vec::Vec, + }, + #[codec(index = 4)] + InitiateReserveWithdraw { + assets: + ::std::vec::Vec, + reserve: runtime_types::xcm::v0::multi_location::MultiLocation, + effects: ::std::vec::Vec, + }, + #[codec(index = 5)] + InitiateTeleport { + assets: + ::std::vec::Vec, + dest: runtime_types::xcm::v0::multi_location::MultiLocation, + effects: ::std::vec::Vec, + }, + #[codec(index = 6)] + QueryHolding { + #[codec(compact)] + query_id: ::core::primitive::u64, + dest: runtime_types::xcm::v0::multi_location::MultiLocation, + assets: + ::std::vec::Vec, + }, + #[codec(index = 7)] + BuyExecution { + fees: runtime_types::xcm::v0::multi_asset::MultiAsset, + weight: ::core::primitive::u64, + debt: ::core::primitive::u64, + halt_on_error: ::core::primitive::bool, + xcm: ::std::vec::Vec, + }, + } + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum OriginKind { + #[codec(index = 0)] + Native, + #[codec(index = 1)] + SovereignAccount, + #[codec(index = 2)] + Superuser, + #[codec(index = 3)] + Xcm, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Response { + #[codec(index = 0)] + Assets(::std::vec::Vec), + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Xcm { + #[codec(index = 0)] + WithdrawAsset { + assets: ::std::vec::Vec, + effects: ::std::vec::Vec, + }, + #[codec(index = 1)] + ReserveAssetDeposit { + assets: ::std::vec::Vec, + effects: ::std::vec::Vec, + }, + #[codec(index = 2)] + TeleportAsset { + assets: ::std::vec::Vec, + effects: ::std::vec::Vec, + }, + #[codec(index = 3)] + QueryResponse { + #[codec(compact)] + query_id: ::core::primitive::u64, + response: runtime_types::xcm::v0::Response, + }, + #[codec(index = 4)] + TransferAsset { + assets: ::std::vec::Vec, + dest: runtime_types::xcm::v0::multi_location::MultiLocation, + }, + #[codec(index = 5)] + TransferReserveAsset { + assets: ::std::vec::Vec, + dest: runtime_types::xcm::v0::multi_location::MultiLocation, + effects: ::std::vec::Vec, + }, + #[codec(index = 6)] + Transact { + origin_type: runtime_types::xcm::v0::OriginKind, + require_weight_at_most: ::core::primitive::u64, + call: runtime_types::xcm::double_encoded::DoubleEncoded, + }, + #[codec(index = 7)] + HrmpNewChannelOpenRequest { + #[codec(compact)] + sender: ::core::primitive::u32, + #[codec(compact)] + max_message_size: ::core::primitive::u32, + #[codec(compact)] + max_capacity: ::core::primitive::u32, + }, + #[codec(index = 8)] + HrmpChannelAccepted { + #[codec(compact)] + recipient: ::core::primitive::u32, + }, + #[codec(index = 9)] + HrmpChannelClosing { + #[codec(compact)] + initiator: ::core::primitive::u32, + #[codec(compact)] + sender: ::core::primitive::u32, + #[codec(compact)] + recipient: ::core::primitive::u32, + }, + #[codec(index = 10)] + RelayedFrom { + who: runtime_types::xcm::v0::multi_location::MultiLocation, + message: ::std::boxed::Box, + }, + } + } + pub mod v1 { + use super::runtime_types; + pub mod junction { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Junction { + #[codec(index = 0)] + Parachain(#[codec(compact)] ::core::primitive::u32), + #[codec(index = 1)] + AccountId32 { + network: runtime_types::xcm::v0::junction::NetworkId, + id: [::core::primitive::u8; 32usize], + }, + #[codec(index = 2)] + AccountIndex64 { + network: runtime_types::xcm::v0::junction::NetworkId, + #[codec(compact)] + index: ::core::primitive::u64, + }, + #[codec(index = 3)] + AccountKey20 { + network: runtime_types::xcm::v0::junction::NetworkId, + key: [::core::primitive::u8; 20usize], + }, + #[codec(index = 4)] + PalletInstance(::core::primitive::u8), + #[codec(index = 5)] + GeneralIndex(#[codec(compact)] ::core::primitive::u128), + #[codec(index = 6)] + GeneralKey(::std::vec::Vec<::core::primitive::u8>), + #[codec(index = 7)] + OnlyChild, + #[codec(index = 8)] + Plurality { + id: runtime_types::xcm::v0::junction::BodyId, + part: runtime_types::xcm::v0::junction::BodyPart, + }, + } + } + pub mod multiasset { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum AssetId { + #[codec(index = 0)] + Concrete(runtime_types::xcm::v1::multilocation::MultiLocation), + #[codec(index = 1)] + Abstract(::std::vec::Vec<::core::primitive::u8>), + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum AssetInstance { + #[codec(index = 0)] + Undefined, + #[codec(index = 1)] + Index(#[codec(compact)] ::core::primitive::u128), + #[codec(index = 2)] + Array4([::core::primitive::u8; 4usize]), + #[codec(index = 3)] + Array8([::core::primitive::u8; 8usize]), + #[codec(index = 4)] + Array16([::core::primitive::u8; 16usize]), + #[codec(index = 5)] + Array32([::core::primitive::u8; 32usize]), + #[codec(index = 6)] + Blob(::std::vec::Vec<::core::primitive::u8>), + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Fungibility { + #[codec(index = 0)] + Fungible(#[codec(compact)] ::core::primitive::u128), + #[codec(index = 1)] + NonFungible(runtime_types::xcm::v1::multiasset::AssetInstance), + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct MultiAsset { + pub id: runtime_types::xcm::v1::multiasset::AssetId, + pub fun: runtime_types::xcm::v1::multiasset::Fungibility, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum MultiAssetFilter { + #[codec(index = 0)] + Definite(runtime_types::xcm::v1::multiasset::MultiAssets), + #[codec(index = 1)] + Wild(runtime_types::xcm::v1::multiasset::WildMultiAsset), + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct MultiAssets( + pub ::std::vec::Vec, + ); + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum WildFungibility { + #[codec(index = 0)] + Fungible, + #[codec(index = 1)] + NonFungible, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum WildMultiAsset { + #[codec(index = 0)] + All, + #[codec(index = 1)] + AllOf { + id: runtime_types::xcm::v1::multiasset::AssetId, + fun: runtime_types::xcm::v1::multiasset::WildFungibility, + }, + } + } + pub mod multilocation { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Junctions { + #[codec(index = 0)] + Here, + #[codec(index = 1)] + X1(runtime_types::xcm::v1::junction::Junction), + #[codec(index = 2)] + X2( + runtime_types::xcm::v1::junction::Junction, + runtime_types::xcm::v1::junction::Junction, + ), + #[codec(index = 3)] + X3( + runtime_types::xcm::v1::junction::Junction, + runtime_types::xcm::v1::junction::Junction, + runtime_types::xcm::v1::junction::Junction, + ), + #[codec(index = 4)] + X4( + runtime_types::xcm::v1::junction::Junction, + runtime_types::xcm::v1::junction::Junction, + runtime_types::xcm::v1::junction::Junction, + runtime_types::xcm::v1::junction::Junction, + ), + #[codec(index = 5)] + X5( + runtime_types::xcm::v1::junction::Junction, + runtime_types::xcm::v1::junction::Junction, + runtime_types::xcm::v1::junction::Junction, + runtime_types::xcm::v1::junction::Junction, + runtime_types::xcm::v1::junction::Junction, + ), + #[codec(index = 6)] + X6( + runtime_types::xcm::v1::junction::Junction, + runtime_types::xcm::v1::junction::Junction, + runtime_types::xcm::v1::junction::Junction, + runtime_types::xcm::v1::junction::Junction, + runtime_types::xcm::v1::junction::Junction, + runtime_types::xcm::v1::junction::Junction, + ), + #[codec(index = 7)] + X7( + runtime_types::xcm::v1::junction::Junction, + runtime_types::xcm::v1::junction::Junction, + runtime_types::xcm::v1::junction::Junction, + runtime_types::xcm::v1::junction::Junction, + runtime_types::xcm::v1::junction::Junction, + runtime_types::xcm::v1::junction::Junction, + runtime_types::xcm::v1::junction::Junction, + ), + #[codec(index = 8)] + X8( + runtime_types::xcm::v1::junction::Junction, + runtime_types::xcm::v1::junction::Junction, + runtime_types::xcm::v1::junction::Junction, + runtime_types::xcm::v1::junction::Junction, + runtime_types::xcm::v1::junction::Junction, + runtime_types::xcm::v1::junction::Junction, + runtime_types::xcm::v1::junction::Junction, + runtime_types::xcm::v1::junction::Junction, + ), + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct MultiLocation { + pub parents: ::core::primitive::u8, + pub interior: runtime_types::xcm::v1::multilocation::Junctions, + } + } + pub mod order { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Order { + #[codec(index = 0)] + Noop, + #[codec(index = 1)] + DepositAsset { + assets: runtime_types::xcm::v1::multiasset::MultiAssetFilter, + max_assets: ::core::primitive::u32, + beneficiary: runtime_types::xcm::v1::multilocation::MultiLocation, + }, + #[codec(index = 2)] + DepositReserveAsset { + assets: runtime_types::xcm::v1::multiasset::MultiAssetFilter, + max_assets: ::core::primitive::u32, + dest: runtime_types::xcm::v1::multilocation::MultiLocation, + effects: ::std::vec::Vec, + }, + #[codec(index = 3)] + ExchangeAsset { + give: runtime_types::xcm::v1::multiasset::MultiAssetFilter, + receive: runtime_types::xcm::v1::multiasset::MultiAssets, + }, + #[codec(index = 4)] + InitiateReserveWithdraw { + assets: runtime_types::xcm::v1::multiasset::MultiAssetFilter, + reserve: runtime_types::xcm::v1::multilocation::MultiLocation, + effects: ::std::vec::Vec, + }, + #[codec(index = 5)] + InitiateTeleport { + assets: runtime_types::xcm::v1::multiasset::MultiAssetFilter, + dest: runtime_types::xcm::v1::multilocation::MultiLocation, + effects: ::std::vec::Vec, + }, + #[codec(index = 6)] + QueryHolding { + #[codec(compact)] + query_id: ::core::primitive::u64, + dest: runtime_types::xcm::v1::multilocation::MultiLocation, + assets: runtime_types::xcm::v1::multiasset::MultiAssetFilter, + }, + #[codec(index = 7)] + BuyExecution { + fees: runtime_types::xcm::v1::multiasset::MultiAsset, + weight: ::core::primitive::u64, + debt: ::core::primitive::u64, + halt_on_error: ::core::primitive::bool, + instructions: ::std::vec::Vec, + }, + } + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Response { + #[codec(index = 0)] + Assets(runtime_types::xcm::v1::multiasset::MultiAssets), + #[codec(index = 1)] + Version(::core::primitive::u32), + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Xcm { + #[codec(index = 0)] + WithdrawAsset { + assets: runtime_types::xcm::v1::multiasset::MultiAssets, + effects: ::std::vec::Vec, + }, + #[codec(index = 1)] + ReserveAssetDeposited { + assets: runtime_types::xcm::v1::multiasset::MultiAssets, + effects: ::std::vec::Vec, + }, + #[codec(index = 2)] + ReceiveTeleportedAsset { + assets: runtime_types::xcm::v1::multiasset::MultiAssets, + effects: ::std::vec::Vec, + }, + #[codec(index = 3)] + QueryResponse { + #[codec(compact)] + query_id: ::core::primitive::u64, + response: runtime_types::xcm::v1::Response, + }, + #[codec(index = 4)] + TransferAsset { + assets: runtime_types::xcm::v1::multiasset::MultiAssets, + beneficiary: runtime_types::xcm::v1::multilocation::MultiLocation, + }, + #[codec(index = 5)] + TransferReserveAsset { + assets: runtime_types::xcm::v1::multiasset::MultiAssets, + dest: runtime_types::xcm::v1::multilocation::MultiLocation, + effects: ::std::vec::Vec, + }, + #[codec(index = 6)] + Transact { + origin_type: runtime_types::xcm::v0::OriginKind, + require_weight_at_most: ::core::primitive::u64, + call: runtime_types::xcm::double_encoded::DoubleEncoded, + }, + #[codec(index = 7)] + HrmpNewChannelOpenRequest { + #[codec(compact)] + sender: ::core::primitive::u32, + #[codec(compact)] + max_message_size: ::core::primitive::u32, + #[codec(compact)] + max_capacity: ::core::primitive::u32, + }, + #[codec(index = 8)] + HrmpChannelAccepted { + #[codec(compact)] + recipient: ::core::primitive::u32, + }, + #[codec(index = 9)] + HrmpChannelClosing { + #[codec(compact)] + initiator: ::core::primitive::u32, + #[codec(compact)] + sender: ::core::primitive::u32, + #[codec(compact)] + recipient: ::core::primitive::u32, + }, + #[codec(index = 10)] + RelayedFrom { + who: runtime_types::xcm::v1::multilocation::Junctions, + message: ::std::boxed::Box, + }, + #[codec(index = 11)] + SubscribeVersion { + #[codec(compact)] + query_id: ::core::primitive::u64, + #[codec(compact)] + max_response_weight: ::core::primitive::u64, + }, + #[codec(index = 12)] + UnsubscribeVersion, + } + } + pub mod v2 { + use super::runtime_types; + pub mod traits { + use super::runtime_types; + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Error { + #[codec(index = 0)] + Overflow, + #[codec(index = 1)] + Unimplemented, + #[codec(index = 2)] + UntrustedReserveLocation, + #[codec(index = 3)] + UntrustedTeleportLocation, + #[codec(index = 4)] + MultiLocationFull, + #[codec(index = 5)] + MultiLocationNotInvertible, + #[codec(index = 6)] + BadOrigin, + #[codec(index = 7)] + InvalidLocation, + #[codec(index = 8)] + AssetNotFound, + #[codec(index = 9)] + FailedToTransactAsset, + #[codec(index = 10)] + NotWithdrawable, + #[codec(index = 11)] + LocationCannotHold, + #[codec(index = 12)] + ExceedsMaxMessageSize, + #[codec(index = 13)] + DestinationUnsupported, + #[codec(index = 14)] + Transport, + #[codec(index = 15)] + Unroutable, + #[codec(index = 16)] + UnknownClaim, + #[codec(index = 17)] + FailedToDecode, + #[codec(index = 18)] + MaxWeightInvalid, + #[codec(index = 19)] + NotHoldingFees, + #[codec(index = 20)] + TooExpensive, + #[codec(index = 21)] + Trap(::core::primitive::u64), + #[codec(index = 22)] + UnhandledXcmVersion, + #[codec(index = 23)] + WeightLimitReached(::core::primitive::u64), + #[codec(index = 24)] + Barrier, + #[codec(index = 25)] + WeightNotComputable, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Outcome { + #[codec(index = 0)] + Complete(::core::primitive::u64), + #[codec(index = 1)] + Incomplete( + ::core::primitive::u64, + runtime_types::xcm::v2::traits::Error, + ), + #[codec(index = 2)] + Error(runtime_types::xcm::v2::traits::Error), + } + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Instruction { + #[codec(index = 0)] + WithdrawAsset(runtime_types::xcm::v1::multiasset::MultiAssets), + #[codec(index = 1)] + ReserveAssetDeposited(runtime_types::xcm::v1::multiasset::MultiAssets), + #[codec(index = 2)] + ReceiveTeleportedAsset(runtime_types::xcm::v1::multiasset::MultiAssets), + #[codec(index = 3)] + QueryResponse { + #[codec(compact)] + query_id: ::core::primitive::u64, + response: runtime_types::xcm::v2::Response, + #[codec(compact)] + max_weight: ::core::primitive::u64, + }, + #[codec(index = 4)] + TransferAsset { + assets: runtime_types::xcm::v1::multiasset::MultiAssets, + beneficiary: runtime_types::xcm::v1::multilocation::MultiLocation, + }, + #[codec(index = 5)] + TransferReserveAsset { + assets: runtime_types::xcm::v1::multiasset::MultiAssets, + dest: runtime_types::xcm::v1::multilocation::MultiLocation, + xcm: runtime_types::xcm::v2::Xcm, + }, + #[codec(index = 6)] + Transact { + origin_type: runtime_types::xcm::v0::OriginKind, + #[codec(compact)] + require_weight_at_most: ::core::primitive::u64, + call: runtime_types::xcm::double_encoded::DoubleEncoded, + }, + #[codec(index = 7)] + HrmpNewChannelOpenRequest { + #[codec(compact)] + sender: ::core::primitive::u32, + #[codec(compact)] + max_message_size: ::core::primitive::u32, + #[codec(compact)] + max_capacity: ::core::primitive::u32, + }, + #[codec(index = 8)] + HrmpChannelAccepted { + #[codec(compact)] + recipient: ::core::primitive::u32, + }, + #[codec(index = 9)] + HrmpChannelClosing { + #[codec(compact)] + initiator: ::core::primitive::u32, + #[codec(compact)] + sender: ::core::primitive::u32, + #[codec(compact)] + recipient: ::core::primitive::u32, + }, + #[codec(index = 10)] + ClearOrigin, + #[codec(index = 11)] + DescendOrigin(runtime_types::xcm::v1::multilocation::Junctions), + #[codec(index = 12)] + ReportError { + #[codec(compact)] + query_id: ::core::primitive::u64, + dest: runtime_types::xcm::v1::multilocation::MultiLocation, + #[codec(compact)] + max_response_weight: ::core::primitive::u64, + }, + #[codec(index = 13)] + DepositAsset { + assets: runtime_types::xcm::v1::multiasset::MultiAssetFilter, + #[codec(compact)] + max_assets: ::core::primitive::u32, + beneficiary: runtime_types::xcm::v1::multilocation::MultiLocation, + }, + #[codec(index = 14)] + DepositReserveAsset { + assets: runtime_types::xcm::v1::multiasset::MultiAssetFilter, + #[codec(compact)] + max_assets: ::core::primitive::u32, + dest: runtime_types::xcm::v1::multilocation::MultiLocation, + xcm: runtime_types::xcm::v2::Xcm, + }, + #[codec(index = 15)] + ExchangeAsset { + give: runtime_types::xcm::v1::multiasset::MultiAssetFilter, + receive: runtime_types::xcm::v1::multiasset::MultiAssets, + }, + #[codec(index = 16)] + InitiateReserveWithdraw { + assets: runtime_types::xcm::v1::multiasset::MultiAssetFilter, + reserve: runtime_types::xcm::v1::multilocation::MultiLocation, + xcm: runtime_types::xcm::v2::Xcm, + }, + #[codec(index = 17)] + InitiateTeleport { + assets: runtime_types::xcm::v1::multiasset::MultiAssetFilter, + dest: runtime_types::xcm::v1::multilocation::MultiLocation, + xcm: runtime_types::xcm::v2::Xcm, + }, + #[codec(index = 18)] + QueryHolding { + #[codec(compact)] + query_id: ::core::primitive::u64, + dest: runtime_types::xcm::v1::multilocation::MultiLocation, + assets: runtime_types::xcm::v1::multiasset::MultiAssetFilter, + #[codec(compact)] + max_response_weight: ::core::primitive::u64, + }, + #[codec(index = 19)] + BuyExecution { + fees: runtime_types::xcm::v1::multiasset::MultiAsset, + weight_limit: runtime_types::xcm::v2::WeightLimit, + }, + #[codec(index = 20)] + RefundSurplus, + #[codec(index = 21)] + SetErrorHandler(runtime_types::xcm::v2::Xcm), + #[codec(index = 22)] + SetAppendix(runtime_types::xcm::v2::Xcm), + #[codec(index = 23)] + ClearError, + #[codec(index = 24)] + ClaimAsset { + assets: runtime_types::xcm::v1::multiasset::MultiAssets, + ticket: runtime_types::xcm::v1::multilocation::MultiLocation, + }, + #[codec(index = 25)] + Trap(#[codec(compact)] ::core::primitive::u64), + #[codec(index = 26)] + SubscribeVersion { + #[codec(compact)] + query_id: ::core::primitive::u64, + #[codec(compact)] + max_response_weight: ::core::primitive::u64, + }, + #[codec(index = 27)] + UnsubscribeVersion, + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum Response { + #[codec(index = 0)] + Null, + #[codec(index = 1)] + Assets(runtime_types::xcm::v1::multiasset::MultiAssets), + #[codec(index = 2)] + ExecutionResult( + ::core::option::Option<( + ::core::primitive::u32, + runtime_types::xcm::v2::traits::Error, + )>, + ), + #[codec(index = 3)] + Version(::core::primitive::u32), + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum WeightLimit { + #[codec(index = 0)] + Unlimited, + #[codec(index = 1)] + Limited(#[codec(compact)] ::core::primitive::u64), + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub struct Xcm(pub ::std::vec::Vec); + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum VersionedMultiAssets { + #[codec(index = 0)] + V0(::std::vec::Vec), + #[codec(index = 1)] + V1(runtime_types::xcm::v1::multiasset::MultiAssets), + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum VersionedMultiLocation { + #[codec(index = 0)] + V0(runtime_types::xcm::v0::multi_location::MultiLocation), + #[codec(index = 1)] + V1(runtime_types::xcm::v1::multilocation::MultiLocation), + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum VersionedResponse { + #[codec(index = 0)] + V0(runtime_types::xcm::v0::Response), + #[codec(index = 1)] + V1(runtime_types::xcm::v1::Response), + #[codec(index = 2)] + V2(runtime_types::xcm::v2::Response), + } + #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] + pub enum VersionedXcm { + #[codec(index = 0)] + V0(runtime_types::xcm::v0::Xcm), + #[codec(index = 1)] + V1(runtime_types::xcm::v1::Xcm), + #[codec(index = 2)] + V2(runtime_types::xcm::v2::Xcm), + } + } + } + #[doc = r" The default error type returned when there is a runtime issue."] + pub type DispatchError = runtime_types::sp_runtime::DispatchError; + impl ::subxt::HasModuleError for runtime_types::sp_runtime::DispatchError { + fn module_error_data(&self) -> Option<::subxt::ModuleErrorData> { + if let Self::Module(module_error) = self { + Some(::subxt::ModuleErrorData { + pallet_index: module_error.index, + error: module_error.error, + }) + } else { + None + } + } + } + pub struct RuntimeApi { + pub client: ::subxt::Client, + marker: ::core::marker::PhantomData, + } + impl ::core::convert::From<::subxt::Client> for RuntimeApi + where + T: ::subxt::Config, + X: ::subxt::extrinsic::ExtrinsicParams, + { + fn from(client: ::subxt::Client) -> Self { + Self { + client, + marker: ::core::marker::PhantomData, + } + } + } + impl<'a, T, X> RuntimeApi + where + T: ::subxt::Config, + X: ::subxt::extrinsic::ExtrinsicParams, + { + pub fn validate_metadata(&'a self) -> Result<(), ::subxt::MetadataError> { + if self.client.metadata().metadata_hash(&PALLETS) + != [ + 171u8, 151u8, 238u8, 248u8, 146u8, 176u8, 17u8, 187u8, 196u8, 188u8, 233u8, + 111u8, 45u8, 124u8, 52u8, 50u8, 33u8, 206u8, 239u8, 173u8, 23u8, 8u8, 56u8, + 68u8, 21u8, 49u8, 188u8, 236u8, 27u8, 193u8, 246u8, 223u8, + ] + { + Err(::subxt::MetadataError::IncompatibleMetadata) + } else { + Ok(()) + } + } + pub fn constants(&'a self) -> ConstantsApi<'a, T> { + ConstantsApi { + client: &self.client, + } + } + pub fn storage(&'a self) -> StorageApi<'a, T> { + StorageApi { + client: &self.client, + } + } + pub fn tx(&'a self) -> TransactionApi<'a, T, X> { + TransactionApi { + client: &self.client, + marker: ::core::marker::PhantomData, + } + } + pub fn events(&'a self) -> EventsApi<'a, T> { + EventsApi { + client: &self.client, + } + } + } + pub struct EventsApi<'a, T: ::subxt::Config> { + client: &'a ::subxt::Client, + } + impl<'a, T: ::subxt::Config> EventsApi<'a, T> { + pub async fn at( + &self, + block_hash: T::Hash, + ) -> Result<::subxt::events::Events<'a, T, Event>, ::subxt::BasicError> { + ::subxt::events::at::(self.client, block_hash).await + } + pub async fn subscribe( + &self, + ) -> Result< + ::subxt::events::EventSubscription<'a, ::subxt::events::EventSub, T, Event>, + ::subxt::BasicError, + > { + ::subxt::events::subscribe::(self.client).await + } + pub async fn subscribe_finalized( + &self, + ) -> Result< + ::subxt::events::EventSubscription< + 'a, + ::subxt::events::FinalizedEventSub<'a, T::Header>, + T, + Event, + >, + ::subxt::BasicError, + > { + ::subxt::events::subscribe_finalized::(self.client).await + } + } + pub struct ConstantsApi<'a, T: ::subxt::Config> { + client: &'a ::subxt::Client, + } + impl<'a, T: ::subxt::Config> ConstantsApi<'a, T> { + pub fn system(&self) -> system::constants::ConstantsApi<'a, T> { + system::constants::ConstantsApi::new(self.client) + } + pub fn babe(&self) -> babe::constants::ConstantsApi<'a, T> { + babe::constants::ConstantsApi::new(self.client) + } + pub fn timestamp(&self) -> timestamp::constants::ConstantsApi<'a, T> { + timestamp::constants::ConstantsApi::new(self.client) + } + pub fn indices(&self) -> indices::constants::ConstantsApi<'a, T> { + indices::constants::ConstantsApi::new(self.client) + } + pub fn balances(&self) -> balances::constants::ConstantsApi<'a, T> { + balances::constants::ConstantsApi::new(self.client) + } + pub fn transaction_payment(&self) -> transaction_payment::constants::ConstantsApi<'a, T> { + transaction_payment::constants::ConstantsApi::new(self.client) + } + pub fn authorship(&self) -> authorship::constants::ConstantsApi<'a, T> { + authorship::constants::ConstantsApi::new(self.client) + } + pub fn grandpa(&self) -> grandpa::constants::ConstantsApi<'a, T> { + grandpa::constants::ConstantsApi::new(self.client) + } + pub fn im_online(&self) -> im_online::constants::ConstantsApi<'a, T> { + im_online::constants::ConstantsApi::new(self.client) + } + pub fn paras(&self) -> paras::constants::ConstantsApi<'a, T> { + paras::constants::ConstantsApi::new(self.client) + } + pub fn registrar(&self) -> registrar::constants::ConstantsApi<'a, T> { + registrar::constants::ConstantsApi::new(self.client) + } + pub fn auctions(&self) -> auctions::constants::ConstantsApi<'a, T> { + auctions::constants::ConstantsApi::new(self.client) + } + pub fn crowdloan(&self) -> crowdloan::constants::ConstantsApi<'a, T> { + crowdloan::constants::ConstantsApi::new(self.client) + } + pub fn slots(&self) -> slots::constants::ConstantsApi<'a, T> { + slots::constants::ConstantsApi::new(self.client) + } + pub fn assigned_slots(&self) -> assigned_slots::constants::ConstantsApi<'a, T> { + assigned_slots::constants::ConstantsApi::new(self.client) + } + pub fn bridge_rococo_grandpa( + &self, + ) -> bridge_rococo_grandpa::constants::ConstantsApi<'a, T> { + bridge_rococo_grandpa::constants::ConstantsApi::new(self.client) + } + pub fn bridge_wococo_grandpa( + &self, + ) -> bridge_wococo_grandpa::constants::ConstantsApi<'a, T> { + bridge_wococo_grandpa::constants::ConstantsApi::new(self.client) + } + pub fn bridge_rococo_messages( + &self, + ) -> bridge_rococo_messages::constants::ConstantsApi<'a, T> { + bridge_rococo_messages::constants::ConstantsApi::new(self.client) + } + pub fn bridge_wococo_messages( + &self, + ) -> bridge_wococo_messages::constants::ConstantsApi<'a, T> { + bridge_wococo_messages::constants::ConstantsApi::new(self.client) + } + pub fn utility(&self) -> utility::constants::ConstantsApi<'a, T> { + utility::constants::ConstantsApi::new(self.client) + } + pub fn proxy(&self) -> proxy::constants::ConstantsApi<'a, T> { + proxy::constants::ConstantsApi::new(self.client) + } + pub fn multisig(&self) -> multisig::constants::ConstantsApi<'a, T> { + multisig::constants::ConstantsApi::new(self.client) + } + } + pub struct StorageApi<'a, T: ::subxt::Config> { + client: &'a ::subxt::Client, + } + impl<'a, T> StorageApi<'a, T> + where + T: ::subxt::Config, + { + pub fn system(&self) -> system::storage::StorageApi<'a, T> { + system::storage::StorageApi::new(self.client) + } + pub fn babe(&self) -> babe::storage::StorageApi<'a, T> { + babe::storage::StorageApi::new(self.client) + } + pub fn timestamp(&self) -> timestamp::storage::StorageApi<'a, T> { + timestamp::storage::StorageApi::new(self.client) + } + pub fn indices(&self) -> indices::storage::StorageApi<'a, T> { + indices::storage::StorageApi::new(self.client) + } + pub fn balances(&self) -> balances::storage::StorageApi<'a, T> { + balances::storage::StorageApi::new(self.client) + } + pub fn transaction_payment(&self) -> transaction_payment::storage::StorageApi<'a, T> { + transaction_payment::storage::StorageApi::new(self.client) + } + pub fn authorship(&self) -> authorship::storage::StorageApi<'a, T> { + authorship::storage::StorageApi::new(self.client) + } + pub fn offences(&self) -> offences::storage::StorageApi<'a, T> { + offences::storage::StorageApi::new(self.client) + } + pub fn historical(&self) -> historical::storage::StorageApi<'a, T> { + historical::storage::StorageApi::new(self.client) + } + pub fn session(&self) -> session::storage::StorageApi<'a, T> { + session::storage::StorageApi::new(self.client) + } + pub fn grandpa(&self) -> grandpa::storage::StorageApi<'a, T> { + grandpa::storage::StorageApi::new(self.client) + } + pub fn im_online(&self) -> im_online::storage::StorageApi<'a, T> { + im_online::storage::StorageApi::new(self.client) + } + pub fn authority_discovery(&self) -> authority_discovery::storage::StorageApi<'a, T> { + authority_discovery::storage::StorageApi::new(self.client) + } + pub fn configuration(&self) -> configuration::storage::StorageApi<'a, T> { + configuration::storage::StorageApi::new(self.client) + } + pub fn paras_shared(&self) -> paras_shared::storage::StorageApi<'a, T> { + paras_shared::storage::StorageApi::new(self.client) + } + pub fn para_inclusion(&self) -> para_inclusion::storage::StorageApi<'a, T> { + para_inclusion::storage::StorageApi::new(self.client) + } + pub fn para_inherent(&self) -> para_inherent::storage::StorageApi<'a, T> { + para_inherent::storage::StorageApi::new(self.client) + } + pub fn para_scheduler(&self) -> para_scheduler::storage::StorageApi<'a, T> { + para_scheduler::storage::StorageApi::new(self.client) + } + pub fn paras(&self) -> paras::storage::StorageApi<'a, T> { + paras::storage::StorageApi::new(self.client) + } + pub fn initializer(&self) -> initializer::storage::StorageApi<'a, T> { + initializer::storage::StorageApi::new(self.client) + } + pub fn dmp(&self) -> dmp::storage::StorageApi<'a, T> { + dmp::storage::StorageApi::new(self.client) + } + pub fn ump(&self) -> ump::storage::StorageApi<'a, T> { + ump::storage::StorageApi::new(self.client) + } + pub fn hrmp(&self) -> hrmp::storage::StorageApi<'a, T> { + hrmp::storage::StorageApi::new(self.client) + } + pub fn para_session_info(&self) -> para_session_info::storage::StorageApi<'a, T> { + para_session_info::storage::StorageApi::new(self.client) + } + pub fn paras_disputes(&self) -> paras_disputes::storage::StorageApi<'a, T> { + paras_disputes::storage::StorageApi::new(self.client) + } + pub fn registrar(&self) -> registrar::storage::StorageApi<'a, T> { + registrar::storage::StorageApi::new(self.client) + } + pub fn auctions(&self) -> auctions::storage::StorageApi<'a, T> { + auctions::storage::StorageApi::new(self.client) + } + pub fn crowdloan(&self) -> crowdloan::storage::StorageApi<'a, T> { + crowdloan::storage::StorageApi::new(self.client) + } + pub fn slots(&self) -> slots::storage::StorageApi<'a, T> { + slots::storage::StorageApi::new(self.client) + } + pub fn assigned_slots(&self) -> assigned_slots::storage::StorageApi<'a, T> { + assigned_slots::storage::StorageApi::new(self.client) + } + pub fn sudo(&self) -> sudo::storage::StorageApi<'a, T> { + sudo::storage::StorageApi::new(self.client) + } + pub fn mmr(&self) -> mmr::storage::StorageApi<'a, T> { + mmr::storage::StorageApi::new(self.client) + } + pub fn beefy(&self) -> beefy::storage::StorageApi<'a, T> { + beefy::storage::StorageApi::new(self.client) + } + pub fn mmr_leaf(&self) -> mmr_leaf::storage::StorageApi<'a, T> { + mmr_leaf::storage::StorageApi::new(self.client) + } + pub fn validator_manager(&self) -> validator_manager::storage::StorageApi<'a, T> { + validator_manager::storage::StorageApi::new(self.client) + } + pub fn bridge_rococo_grandpa(&self) -> bridge_rococo_grandpa::storage::StorageApi<'a, T> { + bridge_rococo_grandpa::storage::StorageApi::new(self.client) + } + pub fn bridge_wococo_grandpa(&self) -> bridge_wococo_grandpa::storage::StorageApi<'a, T> { + bridge_wococo_grandpa::storage::StorageApi::new(self.client) + } + pub fn bridge_rococo_messages(&self) -> bridge_rococo_messages::storage::StorageApi<'a, T> { + bridge_rococo_messages::storage::StorageApi::new(self.client) + } + pub fn bridge_wococo_messages(&self) -> bridge_wococo_messages::storage::StorageApi<'a, T> { + bridge_wococo_messages::storage::StorageApi::new(self.client) + } + pub fn collective(&self) -> collective::storage::StorageApi<'a, T> { + collective::storage::StorageApi::new(self.client) + } + pub fn membership(&self) -> membership::storage::StorageApi<'a, T> { + membership::storage::StorageApi::new(self.client) + } + pub fn proxy(&self) -> proxy::storage::StorageApi<'a, T> { + proxy::storage::StorageApi::new(self.client) + } + pub fn multisig(&self) -> multisig::storage::StorageApi<'a, T> { + multisig::storage::StorageApi::new(self.client) + } + pub fn xcm_pallet(&self) -> xcm_pallet::storage::StorageApi<'a, T> { + xcm_pallet::storage::StorageApi::new(self.client) + } + } + pub struct TransactionApi<'a, T: ::subxt::Config, X> { + client: &'a ::subxt::Client, + marker: ::core::marker::PhantomData, + } + impl<'a, T, X> TransactionApi<'a, T, X> + where + T: ::subxt::Config, + X: ::subxt::extrinsic::ExtrinsicParams, + { + pub fn system(&self) -> system::calls::TransactionApi<'a, T, X> { + system::calls::TransactionApi::new(self.client) + } + pub fn babe(&self) -> babe::calls::TransactionApi<'a, T, X> { + babe::calls::TransactionApi::new(self.client) + } + pub fn timestamp(&self) -> timestamp::calls::TransactionApi<'a, T, X> { + timestamp::calls::TransactionApi::new(self.client) + } + pub fn indices(&self) -> indices::calls::TransactionApi<'a, T, X> { + indices::calls::TransactionApi::new(self.client) + } + pub fn balances(&self) -> balances::calls::TransactionApi<'a, T, X> { + balances::calls::TransactionApi::new(self.client) + } + pub fn authorship(&self) -> authorship::calls::TransactionApi<'a, T, X> { + authorship::calls::TransactionApi::new(self.client) + } + pub fn session(&self) -> session::calls::TransactionApi<'a, T, X> { + session::calls::TransactionApi::new(self.client) + } + pub fn grandpa(&self) -> grandpa::calls::TransactionApi<'a, T, X> { + grandpa::calls::TransactionApi::new(self.client) + } + pub fn im_online(&self) -> im_online::calls::TransactionApi<'a, T, X> { + im_online::calls::TransactionApi::new(self.client) + } + pub fn configuration(&self) -> configuration::calls::TransactionApi<'a, T, X> { + configuration::calls::TransactionApi::new(self.client) + } + pub fn paras_shared(&self) -> paras_shared::calls::TransactionApi<'a, T, X> { + paras_shared::calls::TransactionApi::new(self.client) + } + pub fn para_inclusion(&self) -> para_inclusion::calls::TransactionApi<'a, T, X> { + para_inclusion::calls::TransactionApi::new(self.client) + } + pub fn para_inherent(&self) -> para_inherent::calls::TransactionApi<'a, T, X> { + para_inherent::calls::TransactionApi::new(self.client) + } + pub fn paras(&self) -> paras::calls::TransactionApi<'a, T, X> { + paras::calls::TransactionApi::new(self.client) + } + pub fn initializer(&self) -> initializer::calls::TransactionApi<'a, T, X> { + initializer::calls::TransactionApi::new(self.client) + } + pub fn dmp(&self) -> dmp::calls::TransactionApi<'a, T, X> { + dmp::calls::TransactionApi::new(self.client) + } + pub fn ump(&self) -> ump::calls::TransactionApi<'a, T, X> { + ump::calls::TransactionApi::new(self.client) + } + pub fn hrmp(&self) -> hrmp::calls::TransactionApi<'a, T, X> { + hrmp::calls::TransactionApi::new(self.client) + } + pub fn paras_disputes(&self) -> paras_disputes::calls::TransactionApi<'a, T, X> { + paras_disputes::calls::TransactionApi::new(self.client) + } + pub fn registrar(&self) -> registrar::calls::TransactionApi<'a, T, X> { + registrar::calls::TransactionApi::new(self.client) + } + pub fn auctions(&self) -> auctions::calls::TransactionApi<'a, T, X> { + auctions::calls::TransactionApi::new(self.client) + } + pub fn crowdloan(&self) -> crowdloan::calls::TransactionApi<'a, T, X> { + crowdloan::calls::TransactionApi::new(self.client) + } + pub fn slots(&self) -> slots::calls::TransactionApi<'a, T, X> { + slots::calls::TransactionApi::new(self.client) + } + pub fn paras_sudo_wrapper(&self) -> paras_sudo_wrapper::calls::TransactionApi<'a, T, X> { + paras_sudo_wrapper::calls::TransactionApi::new(self.client) + } + pub fn assigned_slots(&self) -> assigned_slots::calls::TransactionApi<'a, T, X> { + assigned_slots::calls::TransactionApi::new(self.client) + } + pub fn sudo(&self) -> sudo::calls::TransactionApi<'a, T, X> { + sudo::calls::TransactionApi::new(self.client) + } + pub fn beefy(&self) -> beefy::calls::TransactionApi<'a, T, X> { + beefy::calls::TransactionApi::new(self.client) + } + pub fn validator_manager(&self) -> validator_manager::calls::TransactionApi<'a, T, X> { + validator_manager::calls::TransactionApi::new(self.client) + } + pub fn bridge_rococo_grandpa( + &self, + ) -> bridge_rococo_grandpa::calls::TransactionApi<'a, T, X> { + bridge_rococo_grandpa::calls::TransactionApi::new(self.client) + } + pub fn bridge_wococo_grandpa( + &self, + ) -> bridge_wococo_grandpa::calls::TransactionApi<'a, T, X> { + bridge_wococo_grandpa::calls::TransactionApi::new(self.client) + } + pub fn bridge_rococo_messages( + &self, + ) -> bridge_rococo_messages::calls::TransactionApi<'a, T, X> { + bridge_rococo_messages::calls::TransactionApi::new(self.client) + } + pub fn bridge_wococo_messages( + &self, + ) -> bridge_wococo_messages::calls::TransactionApi<'a, T, X> { + bridge_wococo_messages::calls::TransactionApi::new(self.client) + } + pub fn collective(&self) -> collective::calls::TransactionApi<'a, T, X> { + collective::calls::TransactionApi::new(self.client) + } + pub fn membership(&self) -> membership::calls::TransactionApi<'a, T, X> { + membership::calls::TransactionApi::new(self.client) + } + pub fn utility(&self) -> utility::calls::TransactionApi<'a, T, X> { + utility::calls::TransactionApi::new(self.client) + } + pub fn proxy(&self) -> proxy::calls::TransactionApi<'a, T, X> { + proxy::calls::TransactionApi::new(self.client) + } + pub fn multisig(&self) -> multisig::calls::TransactionApi<'a, T, X> { + multisig::calls::TransactionApi::new(self.client) + } + pub fn xcm_pallet(&self) -> xcm_pallet::calls::TransactionApi<'a, T, X> { + xcm_pallet::calls::TransactionApi::new(self.client) + } + } +} diff --git a/modules/src/core/ics02_client/handler/update_client.rs b/modules/src/core/ics02_client/handler/update_client.rs index 5bbdf7ad38..bd7b3eb368 100644 --- a/modules/src/core/ics02_client/handler/update_client.rs +++ b/modules/src/core/ics02_client/handler/update_client.rs @@ -128,14 +128,18 @@ mod tests { use core::str::FromStr; use test_log::test; + use crate::clients::ics11_beefy::client_state::ClientState as BeefyClientState; + use crate::clients::ics11_beefy::header::BeefyHeader; + use crate::clients::ics11_beefy::polkadot_runtime as runtime; use crate::core::ics02_client::client_consensus::AnyConsensusState; use crate::core::ics02_client::client_state::{AnyClientState, ClientState}; use crate::core::ics02_client::client_type::ClientType; - use crate::core::ics02_client::context::ClientReader; + use crate::core::ics02_client::context::{ClientKeeper, ClientReader}; use crate::core::ics02_client::error::{Error, ErrorDetail}; use crate::core::ics02_client::handler::dispatch; use crate::core::ics02_client::handler::ClientResult::Update; use crate::core::ics02_client::header::AnyHeader; + use crate::core::ics02_client::msgs::create_client::MsgCreateAnyClient; use crate::core::ics02_client::msgs::update_client::MsgUpdateAnyClient; use crate::core::ics02_client::msgs::ClientMsg; use crate::core::ics24_host::identifier::{ChainId, ClientId}; @@ -149,6 +153,9 @@ mod tests { use crate::test_utils::{get_dummy_account_id, Crypto}; use crate::timestamp::Timestamp; use crate::Height; + use beefy_primitives::mmr::BeefyNextAuthoritySet; + use hex_literal::hex; + use sp_core::H256; #[test] fn test_update_client_ok() { @@ -562,4 +569,419 @@ mod tests { }, } } + + use crate::clients::ics11_beefy::header::ParachainHeader as BeefyParachainHeader; + use beefy_client::primitives::{ + MmrLeaf, MmrUpdateProof, ParachainHeader, PartialMmrLeaf, SignatureWithAuthorityIndex, + SignedCommitment, + }; + use beefy_client::{MerkleHasher, NodesUtils}; + use codec::{Decode, Encode}; + use sp_core::keccak_256; + use sp_runtime::traits::Convert; + use std::collections::BTreeMap; + use subxt::rpc::ClientT; + use subxt::rpc::{rpc_params, JsonValue, Subscription, SubscriptionClientT}; + + const PARA_ID: u32 = 2000; + + /// Construct the mmr update for beefy light client + async fn get_mmr_update( + client: &subxt::Client, + signed_commitment: beefy_primitives::SignedCommitment< + u32, + beefy_primitives::crypto::Signature, + >, + ) -> MmrUpdateProof { + let api = + client.clone().to_runtime_api::, + >>(); + let subxt_block_number: subxt::BlockNumber = + signed_commitment.commitment.block_number.into(); + let block_hash = client + .rpc() + .block_hash(Some(subxt_block_number)) + .await + .unwrap(); + + let current_authorities = api.storage().beefy().authorities(block_hash).await.unwrap(); + + // Current LeafIndex + let block_number = signed_commitment.commitment.block_number; + let leaf_index = (block_number - 1) as u64; + let leaf_proof: pallet_mmr_rpc::LeafProof = client + .rpc() + .client + .request("mmr_generateProof", rpc_params!(leaf_index, block_hash)) + .await + .unwrap(); + + let opaque_leaf: Vec = codec::Decode::decode(&mut &*leaf_proof.leaf.0).unwrap(); + let latest_leaf: MmrLeaf = + codec::Decode::decode(&mut &*opaque_leaf).unwrap(); + let mmr_proof: pallet_mmr_primitives::Proof = + codec::Decode::decode(&mut &*leaf_proof.proof.0).unwrap(); + + let authority_address_hashes = current_authorities + .into_iter() + .map(|x| { + let id: beefy_primitives::crypto::AuthorityId = + codec::Decode::decode(&mut &*x.encode()).unwrap(); + keccak_256(&beefy_mmr::BeefyEcdsaToEthereum::convert(id)) + }) + .collect::>(); + + let signatures = signed_commitment + .signatures + .into_iter() + .enumerate() + .map(|(index, x)| { + if let Some(sig) = x { + let mut temp = [0u8; 65]; + if sig.len() == 65 { + temp.copy_from_slice(&*sig.encode()); + Some(SignatureWithAuthorityIndex { + index: index as u32, + signature: temp, + }) + } else { + None + } + } else { + None + } + }) + .filter_map(|x| x) + .collect::>(); + + let signature_indices = signatures + .iter() + .map(|x| x.index as usize) + .collect::>(); + + let tree = + rs_merkle::MerkleTree::>::from_leaves(&authority_address_hashes); + + let authority_proof = tree.proof(&signature_indices); + + MmrUpdateProof { + signed_commitment: SignedCommitment { + commitment: signed_commitment.commitment.clone(), + signatures, + }, + latest_mmr_leaf: latest_leaf.clone(), + mmr_proof, + authority_proof: authority_proof.proof_hashes().to_vec(), + } + } + + #[tokio::test] + async fn test_continuous_update_of_beefy_client() { + let client_id = ClientId::new(ClientType::Beefy, 0).unwrap(); + + let chain_start_height = Height::new(1, 11); + + let mut ctx = MockContext::new( + ChainId::new("mockgaiaA".to_string(), 1), + HostType::Mock, + 5, + chain_start_height, + ); + + let signer = get_dummy_account_id(); + + let beefy_client_state = BeefyClientState { + chain_id: Default::default(), + frozen_height: None, + latest_beefy_height: 0, + mmr_root_hash: Default::default(), + authority: BeefyNextAuthoritySet { + id: 0, + len: 5, + root: H256::from(hex!( + "baa93c7834125ee3120bac6e3342bd3f28611110ad21ab6075367abdffefeb09" + )), + }, + next_authority_set: BeefyNextAuthoritySet { + id: 1, + len: 5, + root: H256::from(hex!( + "baa93c7834125ee3120bac6e3342bd3f28611110ad21ab6075367abdffefeb09" + )), + }, + beefy_activation_block: 0, + }; + + let create_client = MsgCreateAnyClient { + client_state: AnyClientState::Beefy(beefy_client_state), + consensus_state: None, + signer: signer.clone(), + }; + + // Create the client + let res = dispatch::<_, Crypto>(&ctx, ClientMsg::CreateClient(create_client)).unwrap(); + ctx.store_client_result(res.result).unwrap(); + + let url = std::env::var("NODE_ENDPOINT").unwrap_or("ws://127.0.0.1:9944".to_string()); + let client = subxt::ClientBuilder::new() + .set_url(url) + .build::() + .await + .unwrap(); + let api = + client.clone().to_runtime_api::, + >>(); + let mut subscription: Subscription = client + .rpc() + .client + .subscribe( + "beefy_subscribeJustifications", + rpc_params![], + "beefy_unsubscribeJustifications", + ) + .await + .unwrap(); + let mut count = 0; + let client_state = ctx.client_state(&client_id).unwrap(); + // Before watching for commitments, we need to check that out initial validator set id is correct + let next_val_set = api + .storage() + .mmr_leaf() + .beefy_next_authorities(None) + .await + .unwrap(); + match client_state { + AnyClientState::Tendermint(_) => {} + AnyClientState::Beefy(mut client_state) => { + if next_val_set.id != client_state.next_authority_set.id { + // Update the Id + // Note that the authorities are not changing, only the id is changing in this development scenario + client_state.next_authority_set.id = next_val_set.id; + client_state.authority.id = next_val_set.id - 1; + ctx.store_client_state(client_id.clone(), AnyClientState::Beefy(client_state)) + .unwrap(); + } + } + AnyClientState::Mock(_) => {} + } + let mut latest_beefy_height = 0; + + while let Some(Ok(commitment)) = subscription.next().await { + if count == 10 { + break; + } + let recv_commitment: sp_core::Bytes = + serde_json::from_value(JsonValue::String(commitment)).unwrap(); + let signed_commitment: beefy_primitives::SignedCommitment< + u32, + beefy_primitives::crypto::Signature, + > = codec::Decode::decode(&mut &*recv_commitment).unwrap(); + + std::println!( + "Received signed commitmment for: {:?}", + signed_commitment.commitment.block_number + ); + + let block_number = signed_commitment.commitment.block_number; + let subxt_block_number: subxt::BlockNumber = block_number.into(); + let block_hash = client + .rpc() + .block_hash(Some(subxt_block_number)) + .await + .unwrap(); + + let para_ids = api.storage().paras().parachains(block_hash).await.unwrap(); + let storage_prefix = frame_support::storage::storage_prefix(b"Paras", b"Heads"); + let mut para_header_keys = Vec::new(); + + for para_id in para_ids { + let encoded_para_id = para_id.encode(); + + let mut full_key = storage_prefix.clone().to_vec(); + full_key.extend_from_slice(sp_core::hashing::twox_64(&encoded_para_id).as_slice()); + full_key.extend_from_slice(&encoded_para_id); + para_header_keys.push(subxt::sp_core::storage::StorageKey(full_key)); + } + + let previous_finalized_block_number: subxt::BlockNumber = + (latest_beefy_height + 1).into(); + let previous_finalized_hash = client + .rpc() + .block_hash(Some(previous_finalized_block_number)) + .await + .unwrap() + .unwrap(); + + let change_set = client + .storage() + .query_storage(para_header_keys, previous_finalized_hash, block_hash) + .await + .unwrap(); + let mut finalized_blocks = BTreeMap::new(); + let mut leaf_indices = vec![]; + for changes in change_set { + let header = client + .rpc() + .header(Some(changes.block)) + .await + .unwrap() + .unwrap(); + + let mut heads = BTreeMap::new(); + + for (key, value) in changes.changes { + if let Some(storage_data) = value { + let key = key.0.to_vec(); + let para_id = u32::decode(&mut &key[40..]).unwrap(); + let head_data: runtime::api::runtime_types::polkadot_parachain::primitives::HeadData = Decode::decode(&mut &*storage_data.0).unwrap(); + heads.insert(para_id, head_data.0); + } + } + + if !heads.contains_key(&PARA_ID) { + continue; + } + finalized_blocks.insert(header.number as u64, heads); + leaf_indices.push(header.number - 1); + } + + let batch_proof: pallet_mmr_rpc::LeafBatchProof = client + .rpc() + .client + .request( + "mmr_generateBatchProof", + rpc_params!(leaf_indices.clone(), block_hash), + ) + .await + .unwrap(); + + let leaves: Vec> = Decode::decode(&mut &*batch_proof.leaves.to_vec()).unwrap(); + + let mut parachain_headers = vec![]; + for leaf_bytes in leaves { + let leaf: MmrLeaf = + Decode::decode(&mut &*leaf_bytes).unwrap(); + let leaf_block_number = (leaf.parent_number_and_hash.0 + 1) as u64; + let para_headers = finalized_blocks.get(&leaf_block_number).unwrap(); + + let mut index = None; + let mut parachain_leaves = vec![]; + // Values are already sorted by key which is the para_id + for (idx, (key, header)) in para_headers.iter().enumerate() { + let pair = (*key, header.clone()); + let leaf_hash = keccak_256(pair.encode().as_slice()); + parachain_leaves.push(leaf_hash); + if key == &PARA_ID { + index = Some(idx); + } + } + + let tree = + rs_merkle::MerkleTree::>::from_leaves(¶chain_leaves); + + let proof = if let Some(index) = index { + tree.proof(&[index]) + .proof_hashes() + .into_iter() + .map(|item| item.clone()) + .collect::>() + } else { + vec![] + }; + + let header = ParachainHeader { + parachain_header: para_headers.get(&PARA_ID).unwrap().clone(), + partial_mmr_leaf: PartialMmrLeaf { + version: leaf.version, + parent_number_and_hash: leaf.parent_number_and_hash, + beefy_next_authority_set: leaf.beefy_next_authority_set.clone(), + }, + para_id: PARA_ID, + parachain_heads_proof: proof, + heads_leaf_index: index.unwrap() as u32, + heads_total_count: parachain_leaves.len() as u32, + extrinsic_proof: vec![], + }; + + parachain_headers.push(header); + } + + let batch_proof: pallet_mmr_primitives::BatchProof = + codec::Decode::decode(&mut batch_proof.proof.0.as_slice()).unwrap(); + + let mmr_update = get_mmr_update(&client, signed_commitment.clone()).await; + + let mmr_size = NodesUtils::new(batch_proof.leaf_count).size(); + + let header = BeefyHeader { + parachain_headers: parachain_headers + .into_iter() + .map(|header| BeefyParachainHeader { + parachain_header: Decode::decode(&mut &*header.parachain_header.as_slice()) + .unwrap(), + partial_mmr_leaf: header.partial_mmr_leaf, + para_id: header.para_id, + parachain_heads_proof: header.parachain_heads_proof, + heads_leaf_index: header.heads_leaf_index, + heads_total_count: header.heads_total_count, + extrinsic_proof: header.extrinsic_proof, + }) + .collect(), + mmr_proofs: batch_proof + .items + .into_iter() + .map(|item| item.encode()) + .collect(), + mmr_size, + mmr_update_proof: Some(mmr_update), + }; + + let msg = MsgUpdateAnyClient { + client_id: client_id.clone(), + header: AnyHeader::Beefy(header), + signer: signer.clone(), + }; + + let res = dispatch::<_, Crypto>(&ctx, ClientMsg::UpdateClient(msg.clone())); + + match res { + Ok(HandlerOutput { + result, + mut events, + log, + }) => { + assert_eq!(events.len(), 1); + let event = events.pop().unwrap(); + assert!( + matches!(event, IbcEvent::UpdateClient(ref e) if e.client_id() == &msg.client_id) + ); + assert_eq!(event.height(), ctx.host_height()); + assert!(log.is_empty()); + + match result { + Update(upd_res) => { + assert_eq!(upd_res.client_id, client_id); + assert!(!upd_res.client_state.is_frozen()); + assert_eq!( + upd_res.client_state, + ctx.latest_client_states(&client_id).clone() + ); + assert_eq!( + upd_res.client_state.latest_height(), + Height::new(0, signed_commitment.commitment.block_number as u64), + ) + } + _ => panic!("update handler result has incorrect type"), + } + } + Err(e) => panic!("Unexpected error {:?}", e), + } + latest_beefy_height = signed_commitment.commitment.block_number; + count += 1; + } + } } diff --git a/modules/src/lib.rs b/modules/src/lib.rs index 758f180223..f38dcb315c 100644 --- a/modules/src/lib.rs +++ b/modules/src/lib.rs @@ -2,7 +2,7 @@ // https://github.com/informalsystems/ibc-rs/issues/987 // #![cfg_attr(not(test), deny(clippy::unwrap_used))] -#![no_std] +#![cfg_attr(not(feature = "std"), no_std)] #![allow(clippy::large_enum_variant)] #![deny( warnings, @@ -10,8 +10,8 @@ trivial_numeric_casts, unused_import_braces, unused_qualifications, - rust_2018_idioms )] +#![cfg_attr(not(test), deny(rust_2018_idioms))] #![forbid(unsafe_code)] //! This library implements the InterBlockchain Communication (IBC) protocol in Rust. IBC is From d86fc429de04256ae84857730427d8206eec98bc Mon Sep 17 00:00:00 2001 From: David Salami Date: Tue, 17 May 2022 16:05:23 +0100 Subject: [PATCH 16/96] patch test --- modules/src/clients/ics11_beefy/client_def.rs | 4 +- .../clients/ics11_beefy/consensus_state.rs | 39 +++++++-- modules/src/clients/ics11_beefy/header.rs | 10 +-- .../ics02_client/handler/update_client.rs | 79 +++++++++++++++---- modules/src/lib.rs | 2 +- 5 files changed, 103 insertions(+), 31 deletions(-) diff --git a/modules/src/clients/ics11_beefy/client_def.rs b/modules/src/clients/ics11_beefy/client_def.rs index e4679eb05d..75b35d7279 100644 --- a/modules/src/clients/ics11_beefy/client_def.rs +++ b/modules/src/clients/ics11_beefy/client_def.rs @@ -86,8 +86,8 @@ impl ClientDef for BeefyClient { .clone() .into_iter() .map(|header| { - let leaf_index = - client_state.to_leaf_index(header.partial_mmr_leaf.parent_number_and_hash.0 + 1); + let leaf_index = client_state + .to_leaf_index(header.partial_mmr_leaf.parent_number_and_hash.0 + 1); leaf_indices.push(leaf_index as u64); ParachainHeader { parachain_header: header.parachain_header.encode(), diff --git a/modules/src/clients/ics11_beefy/consensus_state.rs b/modules/src/clients/ics11_beefy/consensus_state.rs index f6f3c4b3d6..db270ddf6f 100644 --- a/modules/src/clients/ics11_beefy/consensus_state.rs +++ b/modules/src/clients/ics11_beefy/consensus_state.rs @@ -2,7 +2,6 @@ use crate::prelude::*; use core::convert::Infallible; use serde::Serialize; -use sp_runtime::SaturatedConversion; use tendermint::time::Time; use tendermint_proto::google::protobuf as tpb; use tendermint_proto::Protobuf; @@ -10,11 +9,10 @@ use tendermint_proto::Protobuf; use ibc_proto::ibc::lightclients::beefy::v1::ConsensusState as RawConsensusState; use crate::clients::ics11_beefy::error::Error; -use crate::clients::ics11_beefy::header::{decode_timestamp_extrinsic, ParachainHeader}; +use crate::clients::ics11_beefy::header::ParachainHeader; use crate::core::ics02_client::client_consensus::AnyConsensusState; use crate::core::ics02_client::client_type::ClientType; use crate::core::ics23_commitment::commitment::CommitmentRoot; -use crate::timestamp::Timestamp; // This is a constant that comes from pallet-ibc pub const IBC_CONSENSUS_ID: [u8; 4] = *b"/IBC"; @@ -132,7 +130,11 @@ impl From for RawConsensusState { impl TryFrom for ConsensusState { type Error = Error; + #[cfg(not(test))] fn try_from(header: ParachainHeader) -> Result { + use crate::clients::ics11_beefy::header::decode_timestamp_extrinsic; + use crate::timestamp::Timestamp; + use sp_runtime::SaturatedConversion; let root = { header .parachain_header @@ -142,7 +144,9 @@ impl TryFrom for ConsensusState { .filter_map(|digest| digest.as_consensus()) .find(|(id, _value)| id == &IBC_CONSENSUS_ID) .map(|(.., root)| root.to_vec()) - .ok_or(Error::invalid_header("cannot find ibc commitment root".to_string()))? + .ok_or(Error::invalid_header( + "cannot find ibc commitment root".to_string(), + ))? }; let timestamp = decode_timestamp_extrinsic(&header).unwrap_or_default(); @@ -150,7 +154,32 @@ impl TryFrom for ConsensusState { let timestamp = Timestamp::from_nanoseconds(duration.as_nanos().saturated_into::()) .unwrap_or_default() .into_tm_time() - .ok_or(Error::invalid_header("cannot decode timestamp extrinsic".to_string()))?; + .ok_or(Error::invalid_header( + "cannot decode timestamp extrinsic".to_string(), + ))?; + + Ok(Self { + root: root.into(), + timestamp, + }) + } + + #[cfg(test)] + fn try_from(header: ParachainHeader) -> Result { + let root = { + header + .parachain_header + .digest + .logs + .iter() + .filter_map(|digest| digest.as_consensus()) + .find(|(id, _value)| id == &IBC_CONSENSUS_ID) + .map(|(.., root)| root.to_vec()) + .unwrap_or_default() + }; + + // Todo: this is a placeholder for now until decoding extrinsic from extrinsic proof is figured out + let timestamp = Time::now(); Ok(Self { root: root.into(), diff --git a/modules/src/clients/ics11_beefy/header.rs b/modules/src/clients/ics11_beefy/header.rs index a37b5c35ea..4431779215 100644 --- a/modules/src/clients/ics11_beefy/header.rs +++ b/modules/src/clients/ics11_beefy/header.rs @@ -438,22 +438,18 @@ pub fn decode_timestamp_extrinsic(header: &ParachainHeader) -> Result(); - let trie = - sp_trie::TrieDB::>::new(&db, &extrinsic_root).unwrap(); + let trie = sp_trie::TrieDB::>::new(&db, &extrinsic_root) + .map_err(|_| Error::timestamp_extrinsic())?; // Timestamp extrinsic should be the first inherent and hence the first extrinsic // https://github.com/paritytech/substrate/blob/d602397a0bbb24b5d627795b797259a44a5e29e9/primitives/trie/src/lib.rs#L99-L101 let key = codec::Encode::encode(&Compact(0u32)); let ext_bytes = trie .get(&key) - .map_err(|_| Error::timestamp_extrinsic())? + .map_err(|e| Error::timestamp_extrinsic())? .ok_or_else(Error::timestamp_extrinsic)?; - // Decoding from the [2..] because the timestamp inmherent has two extra bytes before the call that represents the // call length and the extrinsic version. let (_, _, timestamp): (u8, u8, Compact) = codec::Decode::decode(&mut &ext_bytes[2..]).map_err(|_| Error::timestamp_extrinsic())?; Ok(timestamp.into()) } - -#[cfg(test)] -pub mod test_util {} diff --git a/modules/src/core/ics02_client/handler/update_client.rs b/modules/src/core/ics02_client/handler/update_client.rs index bd7b3eb368..3ea0c28b3c 100644 --- a/modules/src/core/ics02_client/handler/update_client.rs +++ b/modules/src/core/ics02_client/handler/update_client.rs @@ -126,6 +126,7 @@ pub fn process( #[cfg(test)] mod tests { use core::str::FromStr; + use sp_trie::{generate_trie_proof, TrieDBMut, TrieMut}; use test_log::test; use crate::clients::ics11_beefy::client_state::ClientState as BeefyClientState; @@ -156,6 +157,18 @@ mod tests { use beefy_primitives::mmr::BeefyNextAuthoritySet; use hex_literal::hex; use sp_core::H256; + use crate::clients::ics11_beefy::header::ParachainHeader as BeefyParachainHeader; + use beefy_client::primitives::{ + MmrLeaf, MmrUpdateProof, ParachainHeader, PartialMmrLeaf, SignatureWithAuthorityIndex, + SignedCommitment, + }; + use beefy_client::{MerkleHasher, NodesUtils}; + use codec::{Decode, Encode}; + use sp_core::keccak_256; + use sp_runtime::traits::{BlakeTwo256, Convert}; + use std::collections::BTreeMap; + use subxt::rpc::ClientT; + use subxt::rpc::{rpc_params, JsonValue, Subscription, SubscriptionClientT}; #[test] fn test_update_client_ok() { @@ -570,19 +583,6 @@ mod tests { } } - use crate::clients::ics11_beefy::header::ParachainHeader as BeefyParachainHeader; - use beefy_client::primitives::{ - MmrLeaf, MmrUpdateProof, ParachainHeader, PartialMmrLeaf, SignatureWithAuthorityIndex, - SignedCommitment, - }; - use beefy_client::{MerkleHasher, NodesUtils}; - use codec::{Decode, Encode}; - use sp_core::keccak_256; - use sp_runtime::traits::Convert; - use std::collections::BTreeMap; - use subxt::rpc::ClientT; - use subxt::rpc::{rpc_params, JsonValue, Subscription, SubscriptionClientT}; - const PARA_ID: u32 = 2000; /// Construct the mmr update for beefy light client @@ -730,6 +730,13 @@ mod tests { .build::() .await .unwrap(); + + let para_url = std::env::var("NODE_ENDPOINT").unwrap_or("ws://127.0.0.1:9988".to_string()); + let para_client = subxt::ClientBuilder::new() + .set_url(para_url) + .build::() + .await + .unwrap(); let api = client.clone().to_runtime_api:: = codec::Decode::decode(&mut &*recv_commitment).unwrap(); - std::println!( + println!( "Received signed commitmment for: {:?}", signed_commitment.commitment.block_number ); @@ -893,6 +900,45 @@ mod tests { vec![] }; + let block_number = leaf.parent_number_and_hash.0 + 1; + let subxt_block_number: subxt::BlockNumber = block_number.into(); + let block_hash = para_client + .rpc() + .block_hash(Some(subxt_block_number)) + .await + .unwrap(); + + let block = para_client.rpc().block(block_hash).await.unwrap().unwrap(); + let extrinsics = block + .block + .extrinsics + .into_iter() + .map(|e| e.encode()) + .collect::>(); + + let mut db = sp_trie::MemoryDB::::default(); + + let root = { + let mut root = Default::default(); + let mut trie = + >>::new(&mut db, &mut root); + + for (i, ext) in extrinsics.clone().into_iter().enumerate() { + let key = codec::Compact::(i as u32).encode(); + trie.insert(&key, &ext).unwrap(); + } + *trie.root() + }; + + let key = codec::Compact::(0u32).encode(); + let extrinsic_proof = + generate_trie_proof::, _, _, _>( + &db, + root, + vec![&key], + ) + .unwrap(); + let header = ParachainHeader { parachain_header: para_headers.get(&PARA_ID).unwrap().clone(), partial_mmr_leaf: PartialMmrLeaf { @@ -904,7 +950,7 @@ mod tests { parachain_heads_proof: proof, heads_leaf_index: index.unwrap() as u32, heads_total_count: parachain_leaves.len() as u32, - extrinsic_proof: vec![], + extrinsic_proof, }; parachain_headers.push(header); @@ -961,7 +1007,7 @@ mod tests { ); assert_eq!(event.height(), ctx.host_height()); assert!(log.is_empty()); - + ctx.store_client_result(result.clone()).unwrap(); match result { Update(upd_res) => { assert_eq!(upd_res.client_id, client_id); @@ -980,6 +1026,7 @@ mod tests { } Err(e) => panic!("Unexpected error {:?}", e), } + println!("Updated client successfully"); latest_beefy_height = signed_commitment.commitment.block_number; count += 1; } diff --git a/modules/src/lib.rs b/modules/src/lib.rs index f38dcb315c..506959eff6 100644 --- a/modules/src/lib.rs +++ b/modules/src/lib.rs @@ -9,7 +9,7 @@ trivial_casts, trivial_numeric_casts, unused_import_braces, - unused_qualifications, + unused_qualifications )] #![cfg_attr(not(test), deny(rust_2018_idioms))] #![forbid(unsafe_code)] From 92ef2cd3acd4000b0f512366769f45347d9342f9 Mon Sep 17 00:00:00 2001 From: David Salami Date: Wed, 18 May 2022 16:07:09 +0100 Subject: [PATCH 17/96] decode timestamp from extrinsic correctly --- Cargo.lock | 4 + .../relay_application_logic/send_transfer.rs | 5 +- .../clients/ics07_tendermint/client_def.rs | 90 +++++++++++-------- .../ics07_tendermint/consensus_state.rs | 29 +++--- modules/src/clients/ics11_beefy/client_def.rs | 49 +++++----- .../clients/ics11_beefy/consensus_state.rs | 50 +++++++---- modules/src/clients/ics11_beefy/header.rs | 33 +++---- .../src/core/ics02_client/client_consensus.rs | 42 +++++---- modules/src/core/ics02_client/client_def.rs | 77 ++++++++-------- modules/src/core/ics02_client/context.rs | 29 +++--- modules/src/core/ics02_client/handler.rs | 19 ++-- .../ics02_client/handler/create_client.rs | 14 +-- .../ics02_client/handler/update_client.rs | 19 ++-- .../ics02_client/handler/upgrade_client.rs | 13 +-- modules/src/core/ics02_client/msgs.rs | 6 +- .../core/ics02_client/msgs/create_client.rs | 18 ++-- .../core/ics02_client/msgs/upgrade_client.rs | 22 ++--- modules/src/core/ics03_connection/handler.rs | 5 +- .../ics03_connection/handler/conn_open_ack.rs | 7 +- .../handler/conn_open_confirm.rs | 5 +- .../handler/conn_open_init.rs | 5 +- .../ics03_connection/handler/conn_open_try.rs | 7 +- .../core/ics03_connection/handler/verify.rs | 21 ++--- modules/src/core/ics04_channel/handler.rs | 9 +- .../ics04_channel/handler/acknowledgement.rs | 5 +- .../handler/chan_close_confirm.rs | 5 +- .../ics04_channel/handler/chan_close_init.rs | 5 +- .../ics04_channel/handler/chan_open_ack.rs | 5 +- .../handler/chan_open_confirm.rs | 5 +- .../ics04_channel/handler/chan_open_init.rs | 5 +- .../ics04_channel/handler/chan_open_try.rs | 5 +- .../core/ics04_channel/handler/recv_packet.rs | 5 +- .../core/ics04_channel/handler/send_packet.rs | 5 +- .../src/core/ics04_channel/handler/timeout.rs | 5 +- .../ics04_channel/handler/timeout_on_close.rs | 5 +- .../src/core/ics04_channel/handler/verify.rs | 23 ++--- .../handler/write_acknowledgement.rs | 5 +- modules/src/core/ics26_routing/handler.rs | 37 ++++++-- modules/src/core/ics26_routing/msgs.rs | 6 +- modules/src/mock/client_def.rs | 38 ++++---- modules/src/mock/client_state.rs | 31 ++++--- modules/src/mock/context.rs | 40 +++++---- modules/src/mock/header.rs | 3 +- modules/src/mock/host.rs | 7 +- modules/src/relayer/ics18_relayer/utils.rs | 14 ++- modules/src/test_utils.rs | 2 +- proto/src/prost/ibc.lightclients.beefy.v1.rs | 4 +- 47 files changed, 488 insertions(+), 355 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fb9c11724a..8a2f52428a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -251,7 +251,11 @@ dependencies = [ [[package]] name = "beefy-generic-client" version = "0.1.0" +<<<<<<< HEAD source = "git+https://github.com/ComposableFi/beefy-client?branch=master#c8ef22e646f9eb33d66ac83e6fdad808b76aa4ed" +======= +source = "git+https://github.com/ComposableFi/beefy-client?branch=david/refactor-traits#c60a46d3335303b5980eb93280f84ec0e6d99466" +>>>>>>> 5c48fa8c (decode timestamp from extrinsic correctly) dependencies = [ "beefy-primitives", "ckb-merkle-mountain-range", diff --git a/modules/src/applications/ics20_fungible_token_transfer/relay_application_logic/send_transfer.rs b/modules/src/applications/ics20_fungible_token_transfer/relay_application_logic/send_transfer.rs index 761a4c9f00..c29f47064e 100644 --- a/modules/src/applications/ics20_fungible_token_transfer/relay_application_logic/send_transfer.rs +++ b/modules/src/applications/ics20_fungible_token_transfer/relay_application_logic/send_transfer.rs @@ -1,5 +1,6 @@ use crate::applications::ics20_fungible_token_transfer::error::Error; use crate::applications::ics20_fungible_token_transfer::msgs::transfer::MsgTransfer; +use crate::clients::crypto_ops::crypto::CryptoOps; use crate::core::ics04_channel::handler::send_packet::send_packet; use crate::core::ics04_channel::packet::Packet; use crate::core::ics04_channel::packet::PacketResult; @@ -7,12 +8,12 @@ use crate::core::ics26_routing::context::LightClientContext; use crate::handler::HandlerOutput; use crate::prelude::*; -pub(crate) fn send_transfer( +pub(crate) fn send_transfer( ctx: &Ctx, msg: MsgTransfer, ) -> Result, Error> where - Ctx: LightClientContext, + Ctx: LightClientContext, { let source_channel_end = ctx .channel_end(&(msg.source_port.clone(), msg.source_channel)) diff --git a/modules/src/clients/ics07_tendermint/client_def.rs b/modules/src/clients/ics07_tendermint/client_def.rs index 9d7997f354..6937db2ca3 100644 --- a/modules/src/clients/ics07_tendermint/client_def.rs +++ b/modules/src/clients/ics07_tendermint/client_def.rs @@ -1,5 +1,7 @@ use core::convert::TryInto; +use core::fmt::Debug; +use crate::clients::crypto_ops::crypto::CryptoOps; use ibc_proto::ibc::core::commitment::v1::MerkleProof as RawMerkleProof; use prost::Message; use tendermint_light_client_verifier::types::{TrustedBlockState, UntrustedBlockState}; @@ -35,19 +37,32 @@ use crate::downcast; use crate::prelude::*; use crate::Height; -#[derive(Clone, Debug, Default, PartialEq, Eq)] -pub struct TendermintClient { +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct TendermintClient { verifier: ProdVerifier, + _phantom: core::marker::PhantomData, } -impl ClientDef for TendermintClient { +impl Default for TendermintClient { + fn default() -> Self { + Self { + verifier: Default::default(), + _phantom: Default::default(), + } + } +} + +impl ClientDef + for TendermintClient +{ type Header = Header; type ClientState = ClientState; - type ConsensusState = ConsensusState; + type ConsensusState = ConsensusState; + type Crypto = Crypto; fn verify_header( &self, - ctx: &dyn LightClientContext, + ctx: &dyn LightClientContext, client_id: ClientId, client_state: Self::ClientState, header: Self::Header, @@ -62,11 +77,11 @@ impl ClientDef for TendermintClient { } // Check if a consensus state is already installed; if so skip - let header_consensus_state = ConsensusState::from(header.clone()); + let header_consensus_state = ConsensusState::::from(header.clone()); let _ = match ctx.maybe_consensus_state(&client_id, header.height())? { Some(cs) => { - let cs = downcast_consensus_state(cs)?; + let cs = downcast_consensus_state::(cs)?; // If this consensus state matches, skip verification // (optimization) if cs == header_consensus_state { @@ -79,8 +94,9 @@ impl ClientDef for TendermintClient { None => None, }; - let trusted_consensus_state = - downcast_consensus_state(ctx.consensus_state(&client_id, header.trusted_height)?)?; + let trusted_consensus_state = downcast_consensus_state::( + ctx.consensus_state(&client_id, header.trusted_height)?, + )?; let trusted_state = TrustedBlockState { header_time: trusted_consensus_state.timestamp, @@ -136,11 +152,11 @@ impl ClientDef for TendermintClient { fn update_state( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn LightClientContext, _client_id: ClientId, client_state: Self::ClientState, header: Self::Header, - ) -> Result<(Self::ClientState, ConsensusUpdateResult), Ics02Error> { + ) -> Result<(Self::ClientState, ConsensusUpdateResult), Ics02Error> { let header_consensus_state = ConsensusState::from(header.clone()); Ok(( client_state.with_header(header), @@ -160,19 +176,19 @@ impl ClientDef for TendermintClient { fn check_for_misbehaviour( &self, - ctx: &dyn LightClientContext, + ctx: &dyn LightClientContext, client_id: ClientId, client_state: Self::ClientState, header: Self::Header, ) -> Result { // Check if a consensus state is already installed; if so it should // match the untrusted header. - let header_consensus_state = ConsensusState::from(header.clone()); + let header_consensus_state = ConsensusState::::from(header.clone()); let existing_consensus_state = match ctx.maybe_consensus_state(&client_id, header.height())? { Some(cs) => { - let cs = downcast_consensus_state(cs)?; + let cs = downcast_consensus_state::(cs)?; // If this consensus state matches, skip verification // (optimization) if cs == header_consensus_state { @@ -198,8 +214,8 @@ impl ClientDef for TendermintClient { // (cs-new, cs-next, cs-latest) if header.height() < client_state.latest_height() { let maybe_next_cs = ctx - .next_consensus_state(&client_id, header.height(), None)? - .map(downcast_consensus_state) + .next_consensus_state(&client_id, header.height())? + .map(downcast_consensus_state::) .transpose()?; if let Some(next_cs) = maybe_next_cs { @@ -218,8 +234,8 @@ impl ClientDef for TendermintClient { // (cs-trusted, cs-prev, cs-new) if header.trusted_height < header.height() { let maybe_prev_cs = ctx - .prev_consensus_state(&client_id, header.height(), None)? - .map(downcast_consensus_state) + .prev_consensus_state(&client_id, header.height())? + .map(downcast_consensus_state::) .transpose()?; if let Some(prev_cs) = maybe_prev_cs { @@ -241,7 +257,7 @@ impl ClientDef for TendermintClient { fn verify_client_consensus_state( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn LightClientContext, client_state: &Self::ClientState, height: Height, prefix: &CommitmentPrefix, @@ -249,7 +265,7 @@ impl ClientDef for TendermintClient { root: &CommitmentRoot, client_id: &ClientId, consensus_height: Height, - expected_consensus_state: &AnyConsensusState, + expected_consensus_state: &AnyConsensusState, ) -> Result<(), Ics02Error> { client_state.verify_height(height)?; @@ -266,7 +282,7 @@ impl ClientDef for TendermintClient { fn verify_connection_state( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn LightClientContext, _client_id: &ClientId, client_state: &Self::ClientState, height: Height, @@ -287,7 +303,7 @@ impl ClientDef for TendermintClient { fn verify_channel_state( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn LightClientContext, _client_id: &ClientId, client_state: &Self::ClientState, height: Height, @@ -309,7 +325,7 @@ impl ClientDef for TendermintClient { fn verify_client_full_state( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn LightClientContext, client_state: &Self::ClientState, height: Height, prefix: &CommitmentPrefix, @@ -329,7 +345,7 @@ impl ClientDef for TendermintClient { fn verify_packet_data( &self, - ctx: &dyn LightClientContext, + ctx: &dyn LightClientContext, _client_id: &ClientId, client_state: &Self::ClientState, height: Height, @@ -342,7 +358,7 @@ impl ClientDef for TendermintClient { commitment: PacketCommitment, ) -> Result<(), Ics02Error> { client_state.verify_height(height)?; - verify_delay_passed(ctx, height, connection_end)?; + verify_delay_passed::(ctx, height, connection_end)?; let commitment_path = CommitmentsPath { port_id: port_id.clone(), @@ -362,7 +378,7 @@ impl ClientDef for TendermintClient { fn verify_packet_acknowledgement( &self, - ctx: &dyn LightClientContext, + ctx: &dyn LightClientContext, _client_id: &ClientId, client_state: &Self::ClientState, height: Height, @@ -376,7 +392,7 @@ impl ClientDef for TendermintClient { ) -> Result<(), Ics02Error> { // client state height = consensus state height client_state.verify_height(height)?; - verify_delay_passed(ctx, height, connection_end)?; + verify_delay_passed::(ctx, height, connection_end)?; let ack_path = AcksPath { port_id: port_id.clone(), @@ -395,7 +411,7 @@ impl ClientDef for TendermintClient { fn verify_next_sequence_recv( &self, - ctx: &dyn LightClientContext, + ctx: &dyn LightClientContext, _client_id: &ClientId, client_state: &Self::ClientState, height: Height, @@ -407,7 +423,7 @@ impl ClientDef for TendermintClient { sequence: Sequence, ) -> Result<(), Ics02Error> { client_state.verify_height(height)?; - verify_delay_passed(ctx, height, connection_end)?; + verify_delay_passed::(ctx, height, connection_end)?; let mut seq_bytes = Vec::new(); u64::from(sequence) @@ -427,7 +443,7 @@ impl ClientDef for TendermintClient { fn verify_packet_receipt_absence( &self, - ctx: &dyn LightClientContext, + ctx: &dyn LightClientContext, _client_id: &ClientId, client_state: &Self::ClientState, height: Height, @@ -439,7 +455,7 @@ impl ClientDef for TendermintClient { sequence: Sequence, ) -> Result<(), Ics02Error> { client_state.verify_height(height)?; - verify_delay_passed(ctx, height, connection_end)?; + verify_delay_passed::(ctx, height, connection_end)?; let receipt_path = ReceiptsPath { port_id: port_id.clone(), @@ -461,7 +477,7 @@ impl ClientDef for TendermintClient { _consensus_state: &Self::ConsensusState, _proof_upgrade_client: Vec, _proof_upgrade_consensus_state: Vec, - ) -> Result<(Self::ClientState, ConsensusUpdateResult), Ics02Error> { + ) -> Result<(Self::ClientState, ConsensusUpdateResult), Ics02Error> { todo!() } } @@ -507,8 +523,8 @@ fn verify_non_membership( .map_err(|e| Ics02Error::tendermint(Error::ics23_error(e))) } -fn verify_delay_passed( - ctx: &dyn LightClientContext, +fn verify_delay_passed( + ctx: &dyn LightClientContext, height: Height, connection_end: &ConnectionEnd, ) -> Result<(), Ics02Error> { @@ -537,9 +553,11 @@ fn verify_delay_passed( .map_err(|e| e.into()) } -fn downcast_consensus_state(cs: AnyConsensusState) -> Result { +fn downcast_consensus_state( + cs: AnyConsensusState, +) -> Result, Ics02Error> { downcast!( - cs => AnyConsensusState::Tendermint + cs => AnyConsensusState::::Tendermint ) .ok_or_else(|| Ics02Error::client_args_type_mismatch(ClientType::Tendermint)) } diff --git a/modules/src/clients/ics07_tendermint/consensus_state.rs b/modules/src/clients/ics07_tendermint/consensus_state.rs index f710a6bcce..21b1feb900 100644 --- a/modules/src/clients/ics07_tendermint/consensus_state.rs +++ b/modules/src/clients/ics07_tendermint/consensus_state.rs @@ -1,12 +1,14 @@ use crate::prelude::*; use core::convert::Infallible; +use core::fmt::Debug; use serde::Serialize; use tendermint::{hash::Algorithm, time::Time, Hash}; use tendermint_proto::google::protobuf as tpb; use tendermint_proto::Protobuf; +use crate::clients::crypto_ops::crypto::CryptoOps; use ibc_proto::ibc::lightclients::tendermint::v1::ConsensusState as RawConsensusState; use crate::clients::ics07_tendermint::error::Error; @@ -16,24 +18,29 @@ use crate::core::ics02_client::client_type::ClientType; use crate::core::ics23_commitment::commitment::CommitmentRoot; #[derive(Clone, Debug, PartialEq, Eq, Serialize)] -pub struct ConsensusState { +pub struct ConsensusState { pub timestamp: Time, pub root: CommitmentRoot, pub next_validators_hash: Hash, + _phantom: core::marker::PhantomData, } -impl ConsensusState { +impl ConsensusState { pub fn new(root: CommitmentRoot, timestamp: Time, next_validators_hash: Hash) -> Self { Self { timestamp, root, next_validators_hash, + _phantom: Default::default(), } } } -impl crate::core::ics02_client::client_consensus::ConsensusState for ConsensusState { +impl + crate::core::ics02_client::client_consensus::ConsensusState for ConsensusState +{ type Error = Infallible; + type Crypto = Crypto; fn client_type(&self) -> ClientType { ClientType::Tendermint @@ -43,14 +50,14 @@ impl crate::core::ics02_client::client_consensus::ConsensusState for ConsensusSt &self.root } - fn wrap_any(self) -> AnyConsensusState { + fn wrap_any(self) -> AnyConsensusState { AnyConsensusState::Tendermint(self) } } -impl Protobuf for ConsensusState {} +impl Protobuf for ConsensusState {} -impl TryFrom for ConsensusState { +impl TryFrom for ConsensusState { type Error = Error; fn try_from(raw: RawConsensusState) -> Result { @@ -75,12 +82,13 @@ impl TryFrom for ConsensusState { timestamp, next_validators_hash: Hash::from_bytes(Algorithm::Sha256, &raw.next_validators_hash) .map_err(|e| Error::invalid_raw_consensus_state(e.to_string()))?, + _phantom: Default::default(), }) } } -impl From for RawConsensusState { - fn from(value: ConsensusState) -> Self { +impl From> for RawConsensusState { + fn from(value: ConsensusState) -> Self { // FIXME: shunts like this are necessary due to // https://github.com/informalsystems/tendermint-rs/issues/1053 let tpb::Timestamp { seconds, nanos } = value.timestamp.into(); @@ -96,17 +104,18 @@ impl From for RawConsensusState { } } -impl From for ConsensusState { +impl From for ConsensusState { fn from(header: tendermint::block::Header) -> Self { Self { root: CommitmentRoot::from_bytes(header.app_hash.as_ref()), timestamp: header.time, next_validators_hash: header.next_validators_hash, + _phantom: Default::default(), } } } -impl From
for ConsensusState { +impl From
for ConsensusState { fn from(header: Header) -> Self { Self::from(header.signed_header.header) } diff --git a/modules/src/clients/ics11_beefy/client_def.rs b/modules/src/clients/ics11_beefy/client_def.rs index 75b35d7279..95926f067e 100644 --- a/modules/src/clients/ics11_beefy/client_def.rs +++ b/modules/src/clients/ics11_beefy/client_def.rs @@ -2,7 +2,7 @@ use beefy_client::primitives::{ParachainHeader, ParachainsUpdateProof}; use beefy_client::traits::ClientState as LightClientState; use beefy_client::BeefyLightClient; use codec::Encode; -use core::convert::TryInto; +use core::fmt::Debug; use pallet_mmr_primitives::BatchProof; use sp_core::H256; use tendermint_proto::Protobuf; @@ -41,7 +41,7 @@ use crate::core::ics24_host::path::{ use crate::downcast; #[derive(Clone, Debug, PartialEq, Eq)] -pub struct BeefyClient(PhantomData); +pub struct BeefyClient(PhantomData); impl Default for BeefyClient { fn default() -> Self { @@ -49,14 +49,15 @@ impl Default for BeefyClient { } } -impl ClientDef for BeefyClient { +impl ClientDef for BeefyClient { type Header = BeefyHeader; type ClientState = ClientState; - type ConsensusState = ConsensusState; + type ConsensusState = ConsensusState; + type Crypto = Crypto; fn verify_header( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn LightClientContext, _client_id: ClientId, client_state: Self::ClientState, header: Self::Header, @@ -96,7 +97,7 @@ impl ClientDef for BeefyClient { parachain_heads_proof: header.parachain_heads_proof, heads_leaf_index: header.heads_leaf_index, heads_total_count: header.heads_total_count, - extrinsic_proof: header.extrinsic_proof, + extrinsic_proof: header.extrinsic_proof.encode(), } }) .collect::>(); @@ -125,11 +126,11 @@ impl ClientDef for BeefyClient { fn update_state( &self, - ctx: &dyn LightClientContext, + ctx: &dyn LightClientContext, client_id: ClientId, client_state: Self::ClientState, header: Self::Header, - ) -> Result<(Self::ClientState, ConsensusUpdateResult), Error> { + ) -> Result<(Self::ClientState, ConsensusUpdateResult), Error> { let mut parachain_cs_states = vec![]; // Extract the new client state from the verified header let client_state = client_state @@ -143,7 +144,7 @@ impl ClientDef for BeefyClient { } parachain_cs_states.push(( height, - AnyConsensusState::Beefy(ConsensusState::try_from(header)?), + AnyConsensusState::Beefy(ConsensusState::::try_from(header)?), )) } @@ -173,7 +174,7 @@ impl ClientDef for BeefyClient { fn check_for_misbehaviour( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn LightClientContext, _client_id: ClientId, _client_state: Self::ClientState, _header: Self::Header, @@ -183,7 +184,7 @@ impl ClientDef for BeefyClient { fn verify_client_consensus_state( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn LightClientContext, _client_state: &Self::ClientState, _height: Height, prefix: &CommitmentPrefix, @@ -191,7 +192,7 @@ impl ClientDef for BeefyClient { root: &CommitmentRoot, client_id: &ClientId, consensus_height: Height, - expected_consensus_state: &AnyConsensusState, + expected_consensus_state: &AnyConsensusState, ) -> Result<(), Error> { let path = ClientConsensusStatePath { client_id: client_id.clone(), @@ -205,7 +206,7 @@ impl ClientDef for BeefyClient { // Consensus state will be verified in the verification functions before these are called fn verify_connection_state( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn LightClientContext, _client_id: &ClientId, _client_state: &Self::ClientState, _height: Height, @@ -222,7 +223,7 @@ impl ClientDef for BeefyClient { fn verify_channel_state( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn LightClientContext, _client_id: &ClientId, _client_state: &Self::ClientState, _height: Height, @@ -240,7 +241,7 @@ impl ClientDef for BeefyClient { fn verify_client_full_state( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn LightClientContext, _client_state: &Self::ClientState, _height: Height, prefix: &CommitmentPrefix, @@ -256,7 +257,7 @@ impl ClientDef for BeefyClient { fn verify_packet_data( &self, - ctx: &dyn LightClientContext, + ctx: &dyn LightClientContext, _client_id: &ClientId, _client_state: &Self::ClientState, height: Height, @@ -287,7 +288,7 @@ impl ClientDef for BeefyClient { fn verify_packet_acknowledgement( &self, - ctx: &dyn LightClientContext, + ctx: &dyn LightClientContext, _client_id: &ClientId, _client_state: &Self::ClientState, height: Height, @@ -317,7 +318,7 @@ impl ClientDef for BeefyClient { fn verify_next_sequence_recv( &self, - ctx: &dyn LightClientContext, + ctx: &dyn LightClientContext, _client_id: &ClientId, _client_state: &Self::ClientState, height: Height, @@ -344,7 +345,7 @@ impl ClientDef for BeefyClient { fn verify_packet_receipt_absence( &self, - ctx: &dyn LightClientContext, + ctx: &dyn LightClientContext, _client_id: &ClientId, _client_state: &Self::ClientState, height: Height, @@ -376,7 +377,7 @@ impl ClientDef for BeefyClient { _consensus_state: &Self::ConsensusState, _proof_upgrade_client: Vec, _proof_upgrade_consensus_state: Vec, - ) -> Result<(Self::ClientState, ConsensusUpdateResult), Error> { + ) -> Result<(Self::ClientState, ConsensusUpdateResult), Error> { todo!() } } @@ -422,8 +423,8 @@ fn verify_non_membership>( Crypto::verify_non_membership_trie_proof(&root, &trie_proof, &key) } -fn verify_delay_passed( - ctx: &dyn LightClientContext, +fn verify_delay_passed( + ctx: &dyn LightClientContext, height: Height, connection_end: &ConnectionEnd, ) -> Result<(), Error> { @@ -458,7 +459,9 @@ fn verify_delay_passed( .map_err(|e| e.into()) } -pub fn downcast_consensus_state(cs: AnyConsensusState) -> Result { +pub fn downcast_consensus_state( + cs: AnyConsensusState, +) -> Result, Error> { downcast!( cs => AnyConsensusState::Beefy ) diff --git a/modules/src/clients/ics11_beefy/consensus_state.rs b/modules/src/clients/ics11_beefy/consensus_state.rs index db270ddf6f..a0773662e3 100644 --- a/modules/src/clients/ics11_beefy/consensus_state.rs +++ b/modules/src/clients/ics11_beefy/consensus_state.rs @@ -1,11 +1,13 @@ use crate::prelude::*; use core::convert::Infallible; +use core::fmt::Debug; use serde::Serialize; use tendermint::time::Time; use tendermint_proto::google::protobuf as tpb; use tendermint_proto::Protobuf; +use crate::clients::crypto_ops::crypto::CryptoOps; use ibc_proto::ibc::lightclients::beefy::v1::ConsensusState as RawConsensusState; use crate::clients::ics11_beefy::error::Error; @@ -17,22 +19,27 @@ use crate::core::ics23_commitment::commitment::CommitmentRoot; // This is a constant that comes from pallet-ibc pub const IBC_CONSENSUS_ID: [u8; 4] = *b"/IBC"; #[derive(Clone, Debug, PartialEq, Eq, Serialize)] -pub struct ConsensusState { +pub struct ConsensusState { pub timestamp: Time, pub root: CommitmentRoot, + _phantom: core::marker::PhantomData, } -impl ConsensusState { - pub fn new(root: Vec, timestamp: Time, parachain_header: ParachainHeader) -> Self { +impl ConsensusState { + pub fn new(root: Vec, timestamp: Time) -> Self { Self { timestamp, root: root.into(), + _phantom: Default::default(), } } } -impl crate::core::ics02_client::client_consensus::ConsensusState for ConsensusState { +impl + crate::core::ics02_client::client_consensus::ConsensusState for ConsensusState +{ type Error = Infallible; + type Crypto = Crypto; fn client_type(&self) -> ClientType { ClientType::Beefy @@ -42,14 +49,14 @@ impl crate::core::ics02_client::client_consensus::ConsensusState for ConsensusSt &self.root } - fn wrap_any(self) -> AnyConsensusState { + fn wrap_any(self) -> AnyConsensusState { AnyConsensusState::Beefy(self) } } -impl Protobuf for ConsensusState {} +impl Protobuf for ConsensusState {} -impl TryFrom for ConsensusState { +impl TryFrom for ConsensusState { type Error = Error; fn try_from(raw: RawConsensusState) -> Result { @@ -112,12 +119,13 @@ impl TryFrom for ConsensusState { Ok(Self { root: raw.root.into(), timestamp, + _phantom: Default::default(), }) } } -impl From for RawConsensusState { - fn from(value: ConsensusState) -> Self { +impl From> for RawConsensusState { + fn from(value: ConsensusState) -> Self { let tpb::Timestamp { seconds, nanos } = value.timestamp.into(); let timestamp = ibc_proto::google::protobuf::Timestamp { seconds, nanos }; @@ -128,7 +136,7 @@ impl From for RawConsensusState { } } -impl TryFrom for ConsensusState { +impl TryFrom for ConsensusState { type Error = Error; #[cfg(not(test))] fn try_from(header: ParachainHeader) -> Result { @@ -149,7 +157,7 @@ impl TryFrom for ConsensusState { ))? }; - let timestamp = decode_timestamp_extrinsic(&header).unwrap_or_default(); + let timestamp = decode_timestamp_extrinsic::(&header).unwrap_or_default(); let duration = core::time::Duration::from_millis(timestamp); let timestamp = Timestamp::from_nanoseconds(duration.as_nanos().saturated_into::()) .unwrap_or_default() @@ -161,11 +169,15 @@ impl TryFrom for ConsensusState { Ok(Self { root: root.into(), timestamp, + _phantom: Default::default(), }) } #[cfg(test)] fn try_from(header: ParachainHeader) -> Result { + use crate::clients::ics11_beefy::header::decode_timestamp_extrinsic; + use crate::timestamp::Timestamp; + use sp_runtime::SaturatedConversion; let root = { header .parachain_header @@ -178,12 +190,19 @@ impl TryFrom for ConsensusState { .unwrap_or_default() }; - // Todo: this is a placeholder for now until decoding extrinsic from extrinsic proof is figured out - let timestamp = Time::now(); + let timestamp = decode_timestamp_extrinsic::(&header).unwrap_or_default(); + let duration = core::time::Duration::from_millis(timestamp); + let timestamp = Timestamp::from_nanoseconds(duration.as_nanos().saturated_into::()) + .unwrap_or_default() + .into_tm_time() + .ok_or(Error::invalid_header( + "cannot decode timestamp extrinsic".to_string(), + ))?; Ok(Self { root: root.into(), timestamp, + _phantom: Default::default(), }) } } @@ -192,12 +211,11 @@ impl TryFrom for ConsensusState { pub mod test_util { use super::*; - pub fn get_dummy_beefy_consensus_state() -> AnyConsensusState { + pub fn get_dummy_beefy_consensus_state() -> AnyConsensusState { AnyConsensusState::Beefy(ConsensusState { timestamp: Time::now(), root: vec![0; 32].into(), + _phantom: Default::default(), }) } } -#[cfg(test)] -mod tests {} diff --git a/modules/src/clients/ics11_beefy/header.rs b/modules/src/clients/ics11_beefy/header.rs index 4431779215..bd57cbe1a0 100644 --- a/modules/src/clients/ics11_beefy/header.rs +++ b/modules/src/clients/ics11_beefy/header.rs @@ -1,6 +1,7 @@ use prost::Message; use tendermint_proto::Protobuf; +use crate::clients::crypto_ops::crypto::CryptoOps; use crate::clients::ics11_beefy::error::Error; use crate::core::ics02_client::client_type::ClientType; use crate::core::ics02_client::header::AnyHeader; @@ -26,7 +27,6 @@ use pallet_mmr_primitives::Proof; use sp_core::H256; use sp_runtime::generic::Header as SubstrateHeader; use sp_runtime::traits::{BlakeTwo256, SaturatedConversion}; -use sp_trie::{StorageProof, Trie}; /// Beefy consensus header #[derive(Clone, PartialEq, Eq, Debug)] @@ -47,6 +47,9 @@ impl crate::core::ics02_client::header::Header for BeefyHeader { } } +#[derive(Clone, PartialEq, Eq, Debug, codec::Encode, codec::Decode)] +pub struct ExtrinsicProof(pub Vec, pub Vec>); + #[derive(Clone, PartialEq, Eq, Debug, codec::Encode, codec::Decode)] pub struct ParachainHeader { pub parachain_header: SubstrateHeader, @@ -62,7 +65,7 @@ pub struct ParachainHeader { pub heads_total_count: u32, /// Trie merkle proof of inclusion of the set timestamp extrinsic in header.extrinsic_root /// this already encodes the actual extrinsic - pub extrinsic_proof: Vec>, + pub extrinsic_proof: ExtrinsicProof, } pub fn split_leaf_version(version: u8) -> (u8, u8) { @@ -126,7 +129,8 @@ impl TryFrom for BeefyHeader { .collect::, Error>>()?, heads_leaf_index: raw_para_header.heads_leaf_index, heads_total_count: raw_para_header.heads_total_count, - extrinsic_proof: raw_para_header.extrinsic_proof, + extrinsic_proof: Decode::decode(&mut &*raw_para_header.extrinsic_proof) + .map_err(|_| Error::invalid_raw_header())?, }) }) .collect::, Error>>()?; @@ -344,7 +348,7 @@ impl From for RawBeefyHeader { .collect(), heads_leaf_index: para_header.heads_leaf_index, heads_total_count: para_header.heads_total_count, - extrinsic_proof: para_header.extrinsic_proof, + extrinsic_proof: para_header.extrinsic_proof.encode(), }, ) .collect(), @@ -434,22 +438,19 @@ pub fn decode_header(buf: B) -> Result { } /// Attempt to extract the timestamp extrinsic from the parachain header -pub fn decode_timestamp_extrinsic(header: &ParachainHeader) -> Result { - let proof = header.extrinsic_proof.clone(); +pub fn decode_timestamp_extrinsic( + header: &ParachainHeader, +) -> Result { + let proof = &*header.extrinsic_proof.1; + let ext = &*header.extrinsic_proof.0; let extrinsic_root = header.parachain_header.extrinsics_root; - let db = StorageProof::new(proof).into_memory_db::(); - let trie = sp_trie::TrieDB::>::new(&db, &extrinsic_root) - .map_err(|_| Error::timestamp_extrinsic())?; - // Timestamp extrinsic should be the first inherent and hence the first extrinsic - // https://github.com/paritytech/substrate/blob/d602397a0bbb24b5d627795b797259a44a5e29e9/primitives/trie/src/lib.rs#L99-L101 + let key = codec::Encode::encode(&Compact(0u32)); - let ext_bytes = trie - .get(&key) - .map_err(|e| Error::timestamp_extrinsic())? - .ok_or_else(Error::timestamp_extrinsic)?; + Crypto::verify_membership_trie_proof(&extrinsic_root, proof, &*key, ext) + .map_err(|_| Error::timestamp_extrinsic())?; // Decoding from the [2..] because the timestamp inmherent has two extra bytes before the call that represents the // call length and the extrinsic version. let (_, _, timestamp): (u8, u8, Compact) = - codec::Decode::decode(&mut &ext_bytes[2..]).map_err(|_| Error::timestamp_extrinsic())?; + codec::Decode::decode(&mut &ext[2..]).map_err(|_| Error::timestamp_extrinsic())?; Ok(timestamp.into()) } diff --git a/modules/src/core/ics02_client/client_consensus.rs b/modules/src/core/ics02_client/client_consensus.rs index 013e7147dd..516d5a3a59 100644 --- a/modules/src/core/ics02_client/client_consensus.rs +++ b/modules/src/core/ics02_client/client_consensus.rs @@ -1,8 +1,10 @@ use crate::prelude::*; use core::convert::Infallible; +use core::fmt::Debug; use core::marker::{Send, Sync}; +use crate::clients::crypto_ops::crypto::CryptoOps; use ibc_proto::google::protobuf::Any; use ibc_proto::ibc::core::client::v1::ConsensusStateWithHeight; use serde::Serialize; @@ -28,8 +30,9 @@ pub const BEEFY_CONSENSUS_STATE_TYPE_URL: &str = "/ibc.lightclients.beefy.v1.Con pub const MOCK_CONSENSUS_STATE_TYPE_URL: &str = "/ibc.mock.ConsensusState"; -pub trait ConsensusState: Clone + core::fmt::Debug + Send + Sync { +pub trait ConsensusState: Clone + Send + Sync { type Error; + type Crypto: CryptoOps; /// Type of client associated with this consensus state (eg. Tendermint) fn client_type(&self) -> ClientType; @@ -38,19 +41,19 @@ pub trait ConsensusState: Clone + core::fmt::Debug + Send + Sync { fn root(&self) -> &CommitmentRoot; /// Wrap into an `AnyConsensusState` - fn wrap_any(self) -> AnyConsensusState; + fn wrap_any(self) -> AnyConsensusState; } #[derive(Clone, Debug, PartialEq, Eq, Serialize)] #[serde(tag = "type")] -pub enum AnyConsensusState { - Tendermint(consensus_state::ConsensusState), - Beefy(beefy_consensus_state::ConsensusState), +pub enum AnyConsensusState { + Tendermint(consensus_state::ConsensusState), + Beefy(beefy_consensus_state::ConsensusState), #[cfg(any(test, feature = "mocks"))] - Mock(MockConsensusState), + Mock(MockConsensusState), } -impl AnyConsensusState { +impl AnyConsensusState { pub fn timestamp(&self) -> Timestamp { match self { Self::Tendermint(cs_state) => cs_state.timestamp.into(), @@ -70,9 +73,9 @@ impl AnyConsensusState { } } -impl Protobuf for AnyConsensusState {} +impl Protobuf for AnyConsensusState {} -impl TryFrom for AnyConsensusState { +impl TryFrom for AnyConsensusState { type Error = Error; fn try_from(value: Any) -> Result { @@ -100,8 +103,8 @@ impl TryFrom for AnyConsensusState { } } -impl From for Any { - fn from(value: AnyConsensusState) -> Self { +impl From> for Any { + fn from(value: AnyConsensusState) -> Self { match value { AnyConsensusState::Tendermint(value) => Any { type_url: TENDERMINT_CONSENSUS_STATE_TYPE_URL.to_string(), @@ -128,14 +131,14 @@ impl From for Any { } #[derive(Clone, Debug, PartialEq, Eq, Serialize)] -pub struct AnyConsensusStateWithHeight { +pub struct AnyConsensusStateWithHeight { pub height: Height, - pub consensus_state: AnyConsensusState, + pub consensus_state: AnyConsensusState, } -impl Protobuf for AnyConsensusStateWithHeight {} +impl Protobuf for AnyConsensusStateWithHeight {} -impl TryFrom for AnyConsensusStateWithHeight { +impl TryFrom for AnyConsensusStateWithHeight { type Error = Error; fn try_from(value: ConsensusStateWithHeight) -> Result { @@ -152,8 +155,8 @@ impl TryFrom for AnyConsensusStateWithHeight { } } -impl From for ConsensusStateWithHeight { - fn from(value: AnyConsensusStateWithHeight) -> Self { +impl From> for ConsensusStateWithHeight { + fn from(value: AnyConsensusStateWithHeight) -> Self { ConsensusStateWithHeight { height: Some(value.height.into()), consensus_state: Some(value.consensus_state.into()), @@ -161,7 +164,8 @@ impl From for ConsensusStateWithHeight { } } -impl ConsensusState for AnyConsensusState { +impl ConsensusState for AnyConsensusState { + type Crypto = Crypto; type Error = Infallible; fn client_type(&self) -> ClientType { @@ -177,7 +181,7 @@ impl ConsensusState for AnyConsensusState { } } - fn wrap_any(self) -> AnyConsensusState { + fn wrap_any(self) -> AnyConsensusState { self } } diff --git a/modules/src/core/ics02_client/client_def.rs b/modules/src/core/ics02_client/client_def.rs index 64cc9bd8ae..c9a64ec04a 100644 --- a/modules/src/core/ics02_client/client_def.rs +++ b/modules/src/core/ics02_client/client_def.rs @@ -18,24 +18,26 @@ use crate::core::ics26_routing::context::LightClientContext; use crate::downcast; use crate::prelude::*; use crate::Height; +use core::fmt::Debug; #[cfg(any(test, feature = "mocks"))] use crate::mock::client_def::MockClient; #[derive(PartialEq, Eq, Clone, Debug)] -pub enum ConsensusUpdateResult { - Single(AnyConsensusState), - Batch(Vec<(Height, AnyConsensusState)>), +pub enum ConsensusUpdateResult { + Single(AnyConsensusState), + Batch(Vec<(Height, AnyConsensusState)>), } pub trait ClientDef: Clone { type Header: Header; type ClientState: ClientState; type ConsensusState: ConsensusState; + type Crypto: CryptoOps + Debug + Send + Sync; fn verify_header( &self, - ctx: &dyn LightClientContext, + ctx: &dyn LightClientContext, client_id: ClientId, client_state: Self::ClientState, header: Self::Header, @@ -43,11 +45,11 @@ pub trait ClientDef: Clone { fn update_state( &self, - ctx: &dyn LightClientContext, + ctx: &dyn LightClientContext, client_id: ClientId, client_state: Self::ClientState, header: Self::Header, - ) -> Result<(Self::ClientState, ConsensusUpdateResult), Error>; + ) -> Result<(Self::ClientState, ConsensusUpdateResult), Error>; fn update_state_on_misbehaviour( &self, @@ -57,7 +59,7 @@ pub trait ClientDef: Clone { fn check_for_misbehaviour( &self, - ctx: &dyn LightClientContext, + ctx: &dyn LightClientContext, client_id: ClientId, client_state: Self::ClientState, header: Self::Header, @@ -70,7 +72,7 @@ pub trait ClientDef: Clone { consensus_state: &Self::ConsensusState, proof_upgrade_client: Vec, proof_upgrade_consensus_state: Vec, - ) -> Result<(Self::ClientState, ConsensusUpdateResult), Error>; + ) -> Result<(Self::ClientState, ConsensusUpdateResult), Error>; /// Verification functions as specified in: /// @@ -82,7 +84,7 @@ pub trait ClientDef: Clone { #[allow(clippy::too_many_arguments)] fn verify_client_consensus_state( &self, - ctx: &dyn LightClientContext, + ctx: &dyn LightClientContext, client_state: &Self::ClientState, height: Height, prefix: &CommitmentPrefix, @@ -90,14 +92,14 @@ pub trait ClientDef: Clone { root: &CommitmentRoot, client_id: &ClientId, consensus_height: Height, - expected_consensus_state: &AnyConsensusState, + expected_consensus_state: &AnyConsensusState, ) -> Result<(), Error>; /// Verify a `proof` that a connection state matches that of the input `connection_end`. #[allow(clippy::too_many_arguments)] fn verify_connection_state( &self, - ctx: &dyn LightClientContext, + ctx: &dyn LightClientContext, client_id: &ClientId, client_state: &Self::ClientState, height: Height, @@ -112,7 +114,7 @@ pub trait ClientDef: Clone { #[allow(clippy::too_many_arguments)] fn verify_channel_state( &self, - ctx: &dyn LightClientContext, + ctx: &dyn LightClientContext, client_id: &ClientId, client_state: &Self::ClientState, height: Height, @@ -128,7 +130,7 @@ pub trait ClientDef: Clone { #[allow(clippy::too_many_arguments)] fn verify_client_full_state( &self, - ctx: &dyn LightClientContext, + ctx: &dyn LightClientContext, client_state: &Self::ClientState, height: Height, prefix: &CommitmentPrefix, @@ -142,7 +144,7 @@ pub trait ClientDef: Clone { #[allow(clippy::too_many_arguments)] fn verify_packet_data( &self, - ctx: &dyn LightClientContext, + ctx: &dyn LightClientContext, client_id: &ClientId, client_state: &Self::ClientState, height: Height, @@ -159,7 +161,7 @@ pub trait ClientDef: Clone { #[allow(clippy::too_many_arguments)] fn verify_packet_acknowledgement( &self, - ctx: &dyn LightClientContext, + ctx: &dyn LightClientContext, client_id: &ClientId, client_state: &Self::ClientState, height: Height, @@ -176,7 +178,7 @@ pub trait ClientDef: Clone { #[allow(clippy::too_many_arguments)] fn verify_next_sequence_recv( &self, - ctx: &dyn LightClientContext, + ctx: &dyn LightClientContext, client_id: &ClientId, client_state: &Self::ClientState, height: Height, @@ -192,7 +194,7 @@ pub trait ClientDef: Clone { #[allow(clippy::too_many_arguments)] fn verify_packet_receipt_absence( &self, - ctx: &dyn LightClientContext, + ctx: &dyn LightClientContext, client_id: &ClientId, client_state: &Self::ClientState, height: Height, @@ -207,33 +209,34 @@ pub trait ClientDef: Clone { #[derive(Clone, Debug, PartialEq, Eq)] pub enum AnyClient { - Tendermint(TendermintClient), + Tendermint(TendermintClient), Beefy(BeefyClient), #[cfg(any(test, feature = "mocks"))] - Mock(MockClient), + Mock(MockClient), } impl AnyClient { pub fn from_client_type(client_type: ClientType) -> Self { match client_type { - ClientType::Tendermint => Self::Tendermint(TendermintClient::default()), + ClientType::Tendermint => Self::Tendermint(TendermintClient::::default()), ClientType::Beefy => Self::Beefy(BeefyClient::::default()), #[cfg(any(test, feature = "mocks"))] - ClientType::Mock => Self::Mock(MockClient), + ClientType::Mock => Self::Mock(MockClient::::default()), } } } // ⚠️ Beware of the awful boilerplate below ⚠️ -impl ClientDef for AnyClient { +impl ClientDef for AnyClient { type Header = AnyHeader; type ClientState = AnyClientState; - type ConsensusState = AnyConsensusState; + type ConsensusState = AnyConsensusState; + type Crypto = Crypto; /// Validate an incoming header fn verify_header( &self, - ctx: &dyn LightClientContext, + ctx: &dyn LightClientContext, client_id: ClientId, client_state: Self::ClientState, header: Self::Header, @@ -275,11 +278,11 @@ impl ClientDef for AnyClient { /// Validates an incoming `header` against the latest consensus state of this client. fn update_state( &self, - ctx: &dyn LightClientContext, + ctx: &dyn LightClientContext, client_id: ClientId, client_state: AnyClientState, header: AnyHeader, - ) -> Result<(AnyClientState, ConsensusUpdateResult), Error> { + ) -> Result<(AnyClientState, ConsensusUpdateResult), Error> { match self { Self::Tendermint(client) => { let (client_state, header) = downcast!( @@ -365,7 +368,7 @@ impl ClientDef for AnyClient { /// Checks for misbehaviour in an incoming header fn check_for_misbehaviour( &self, - ctx: &dyn LightClientContext, + ctx: &dyn LightClientContext, client_id: ClientId, client_state: Self::ClientState, header: Self::Header, @@ -407,7 +410,7 @@ impl ClientDef for AnyClient { consensus_state: &Self::ConsensusState, proof_upgrade_client: Vec, proof_upgrade_consensus_state: Vec, - ) -> Result<(Self::ClientState, ConsensusUpdateResult), Error> { + ) -> Result<(Self::ClientState, ConsensusUpdateResult), Error> { match self { Self::Tendermint(client) => { let (client_state, consensus_state) = downcast!( @@ -465,7 +468,7 @@ impl ClientDef for AnyClient { fn verify_client_consensus_state( &self, - ctx: &dyn LightClientContext, + ctx: &dyn LightClientContext, client_state: &Self::ClientState, height: Height, prefix: &CommitmentPrefix, @@ -473,7 +476,7 @@ impl ClientDef for AnyClient { root: &CommitmentRoot, client_id: &ClientId, consensus_height: Height, - expected_consensus_state: &AnyConsensusState, + expected_consensus_state: &AnyConsensusState, ) -> Result<(), Error> { match self { Self::Tendermint(client) => { @@ -537,7 +540,7 @@ impl ClientDef for AnyClient { fn verify_connection_state( &self, - ctx: &dyn LightClientContext, + ctx: &dyn LightClientContext, client_id: &ClientId, client_state: &AnyClientState, height: Height, @@ -602,7 +605,7 @@ impl ClientDef for AnyClient { fn verify_channel_state( &self, - ctx: &dyn LightClientContext, + ctx: &dyn LightClientContext, client_id: &ClientId, client_state: &AnyClientState, height: Height, @@ -672,7 +675,7 @@ impl ClientDef for AnyClient { } fn verify_client_full_state( &self, - ctx: &dyn LightClientContext, + ctx: &dyn LightClientContext, client_state: &Self::ClientState, height: Height, prefix: &CommitmentPrefix, @@ -739,7 +742,7 @@ impl ClientDef for AnyClient { fn verify_packet_data( &self, - ctx: &dyn LightClientContext, + ctx: &dyn LightClientContext, client_id: &ClientId, client_state: &Self::ClientState, height: Height, @@ -820,7 +823,7 @@ impl ClientDef for AnyClient { fn verify_packet_acknowledgement( &self, - ctx: &dyn LightClientContext, + ctx: &dyn LightClientContext, client_id: &ClientId, client_state: &Self::ClientState, height: Height, @@ -899,7 +902,7 @@ impl ClientDef for AnyClient { } fn verify_next_sequence_recv( &self, - ctx: &dyn LightClientContext, + ctx: &dyn LightClientContext, client_id: &ClientId, client_state: &Self::ClientState, height: Height, @@ -976,7 +979,7 @@ impl ClientDef for AnyClient { fn verify_packet_receipt_absence( &self, - ctx: &dyn LightClientContext, + ctx: &dyn LightClientContext, client_id: &ClientId, client_state: &Self::ClientState, height: Height, diff --git a/modules/src/core/ics02_client/context.rs b/modules/src/core/ics02_client/context.rs index 2d9075187d..abbc83209f 100644 --- a/modules/src/core/ics02_client/context.rs +++ b/modules/src/core/ics02_client/context.rs @@ -2,6 +2,7 @@ //! that any host chain must implement to be able to process any `ClientMsg`. See //! "ADR 003: IBC protocol implementation" for more details. +use crate::clients::crypto_ops::crypto::CryptoOps; use crate::core::ics02_client::client_consensus::AnyConsensusState; use crate::core::ics02_client::client_def::ConsensusUpdateResult; use crate::core::ics02_client::client_state::AnyClientState; @@ -11,10 +12,10 @@ use crate::core::ics02_client::handler::ClientResult::{self, Create, Update, Upg use crate::core::ics24_host::identifier::ClientId; use crate::timestamp::Timestamp; use crate::Height; -use alloc::boxed::Box; /// Defines the read-only part of ICS2 (client functions) context. pub trait ClientReader { + type Crypto: CryptoOps; fn client_type(&self, client_id: &ClientId) -> Result; fn client_state(&self, client_id: &ClientId) -> Result; @@ -26,7 +27,7 @@ pub trait ClientReader { &self, client_id: &ClientId, height: Height, - ) -> Result; + ) -> Result, Error>; /// Similar to `consensus_state`, attempt to retrieve the consensus state, /// but return `None` if no state exists at the given height. @@ -34,7 +35,7 @@ pub trait ClientReader { &self, client_id: &ClientId, height: Height, - ) -> Result, Error> { + ) -> Result>, Error> { match self.consensus_state(client_id, height) { Ok(cs) => Ok(Some(cs)), Err(e) => match e.detail() { @@ -49,16 +50,14 @@ pub trait ClientReader { &self, client_id: &ClientId, height: Height, - filter_fn: Option bool>>, - ) -> Result, Error>; + ) -> Result>, Error>; /// Search for the highest consensus state lower than `height`. fn prev_consensus_state( &self, client_id: &ClientId, height: Height, - filter_fn: Option bool>>, - ) -> Result, Error>; + ) -> Result>, Error>; /// Returns the current height of the local chain. fn host_height(&self) -> Height; @@ -72,10 +71,13 @@ pub trait ClientReader { } /// Returns the `ConsensusState` of the host (local) chain at a specific height. - fn host_consensus_state(&self, height: Height) -> Result; + fn host_consensus_state( + &self, + height: Height, + ) -> Result, Error>; /// Returns the pending `ConsensusState` of the host (local) chain. - fn pending_host_consensus_state(&self) -> Result; + fn pending_host_consensus_state(&self) -> Result, Error>; /// Returns a natural number, counting how many clients have been created thus far. /// The value of this counter should increase only via method `ClientKeeper::increase_client_counter`. @@ -84,7 +86,12 @@ pub trait ClientReader { /// Defines the write-only part of ICS2 (client functions) context. pub trait ClientKeeper { - fn store_client_result(&mut self, handler_res: ClientResult) -> Result<(), Error> { + type Crypto: CryptoOps; + + fn store_client_result( + &mut self, + handler_res: ClientResult, + ) -> Result<(), Error> { match handler_res { Create(res) => { let client_id = res.client_id.clone(); @@ -197,7 +204,7 @@ pub trait ClientKeeper { &mut self, client_id: ClientId, height: Height, - consensus_state: AnyConsensusState, + consensus_state: AnyConsensusState, ) -> Result<(), Error>; /// Called upon client creation. diff --git a/modules/src/core/ics02_client/handler.rs b/modules/src/core/ics02_client/handler.rs index 4eaf41113a..ef649183c2 100644 --- a/modules/src/core/ics02_client/handler.rs +++ b/modules/src/core/ics02_client/handler.rs @@ -4,29 +4,30 @@ use crate::core::ics02_client::error::Error; use crate::core::ics02_client::msgs::ClientMsg; use crate::core::ics26_routing::context::LightClientContext; use crate::handler::HandlerOutput; +use core::fmt::Debug; pub mod create_client; pub mod update_client; pub mod upgrade_client; #[derive(Clone, Debug, PartialEq, Eq)] -pub enum ClientResult { - Create(create_client::Result), - Update(update_client::Result), - Upgrade(upgrade_client::Result), +pub enum ClientResult { + Create(create_client::Result), + Update(update_client::Result), + Upgrade(upgrade_client::Result), } /// General entry point for processing any message related to ICS2 (client functions) protocols. pub fn dispatch( ctx: &Ctx, - msg: ClientMsg, -) -> Result, Error> + msg: ClientMsg, +) -> Result>, Error> where - Ctx: LightClientContext, - Crypto: CryptoOps, + Ctx: LightClientContext, + Crypto: CryptoOps + Debug + Send + Sync + PartialEq + Eq, { match msg { - ClientMsg::CreateClient(msg) => create_client::process(ctx, msg), + ClientMsg::CreateClient(msg) => create_client::process::(ctx, msg), ClientMsg::UpdateClient(msg) => update_client::process::(ctx, msg), ClientMsg::UpgradeClient(msg) => upgrade_client::process::(ctx, msg), _ => { diff --git a/modules/src/core/ics02_client/handler/create_client.rs b/modules/src/core/ics02_client/handler/create_client.rs index bd8abaacce..b8eb6198fb 100644 --- a/modules/src/core/ics02_client/handler/create_client.rs +++ b/modules/src/core/ics02_client/handler/create_client.rs @@ -1,7 +1,9 @@ //! Protocol logic specific to processing ICS2 messages of type `MsgCreateAnyClient`. +use crate::clients::crypto_ops::crypto::CryptoOps; use crate::core::ics26_routing::context::LightClientContext; use crate::prelude::*; +use core::fmt::Debug; use crate::core::ics02_client::client_consensus::AnyConsensusState; use crate::core::ics02_client::client_state::AnyClientState; @@ -19,19 +21,19 @@ use crate::timestamp::Timestamp; /// The result following the successful processing of a `MsgCreateAnyClient` message. Preferably /// this data type should be used with a qualified name `create_client::Result` to avoid ambiguity. #[derive(Clone, Debug, PartialEq, Eq)] -pub struct Result { +pub struct Result { pub client_id: ClientId, pub client_type: ClientType, pub client_state: AnyClientState, - pub consensus_state: Option, + pub consensus_state: Option>, pub processed_time: Timestamp, pub processed_height: Height, } -pub fn process( - ctx: &dyn LightClientContext, - msg: MsgCreateAnyClient, -) -> HandlerResult { +pub fn process( + ctx: &dyn LightClientContext, + msg: MsgCreateAnyClient, +) -> HandlerResult, Error> { let mut output = HandlerOutput::builder(); // Construct this client's identifier diff --git a/modules/src/core/ics02_client/handler/update_client.rs b/modules/src/core/ics02_client/handler/update_client.rs index 3ea0c28b3c..4a3b9659de 100644 --- a/modules/src/core/ics02_client/handler/update_client.rs +++ b/modules/src/core/ics02_client/handler/update_client.rs @@ -1,4 +1,5 @@ //! Protocol logic specific to processing ICS2 messages of type `MsgUpdateAnyClient`. +use core::fmt::Debug; use tracing::debug; use crate::clients::crypto_ops::crypto::CryptoOps; @@ -20,18 +21,18 @@ use crate::timestamp::Timestamp; /// The result following the successful processing of a `MsgUpdateAnyClient` message. Preferably /// this data type should be used with a qualified name `update_client::Result` to avoid ambiguity. #[derive(Clone, Debug, PartialEq, Eq)] -pub struct Result { +pub struct Result { pub client_id: ClientId, pub client_state: AnyClientState, - pub consensus_state: Option, + pub consensus_state: Option>, pub processed_time: Timestamp, pub processed_height: Height, } -pub fn process( - ctx: &dyn LightClientContext, +pub fn process( + ctx: &dyn LightClientContext, msg: MsgUpdateAnyClient, -) -> HandlerResult { +) -> HandlerResult, Error> { let mut output = HandlerOutput::builder(); let MsgUpdateAnyClient { @@ -131,6 +132,7 @@ mod tests { use crate::clients::ics11_beefy::client_state::ClientState as BeefyClientState; use crate::clients::ics11_beefy::header::BeefyHeader; + use crate::clients::ics11_beefy::header::ParachainHeader as BeefyParachainHeader; use crate::clients::ics11_beefy::polkadot_runtime as runtime; use crate::core::ics02_client::client_consensus::AnyConsensusState; use crate::core::ics02_client::client_state::{AnyClientState, ClientState}; @@ -154,17 +156,16 @@ mod tests { use crate::test_utils::{get_dummy_account_id, Crypto}; use crate::timestamp::Timestamp; use crate::Height; - use beefy_primitives::mmr::BeefyNextAuthoritySet; - use hex_literal::hex; - use sp_core::H256; - use crate::clients::ics11_beefy::header::ParachainHeader as BeefyParachainHeader; use beefy_client::primitives::{ MmrLeaf, MmrUpdateProof, ParachainHeader, PartialMmrLeaf, SignatureWithAuthorityIndex, SignedCommitment, }; use beefy_client::{MerkleHasher, NodesUtils}; + use beefy_primitives::mmr::BeefyNextAuthoritySet; use codec::{Decode, Encode}; + use hex_literal::hex; use sp_core::keccak_256; + use sp_core::H256; use sp_runtime::traits::{BlakeTwo256, Convert}; use std::collections::BTreeMap; use subxt::rpc::ClientT; diff --git a/modules/src/core/ics02_client/handler/upgrade_client.rs b/modules/src/core/ics02_client/handler/upgrade_client.rs index 61a8a91e99..fe71798063 100644 --- a/modules/src/core/ics02_client/handler/upgrade_client.rs +++ b/modules/src/core/ics02_client/handler/upgrade_client.rs @@ -12,20 +12,21 @@ use crate::core::ics26_routing::context::LightClientContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; +use core::fmt::Debug; /// The result following the successful processing of a `MsgUpgradeAnyClient` message. /// This data type should be used with a qualified name `upgrade_client::Result` to avoid ambiguity. #[derive(Clone, Debug, PartialEq, Eq)] -pub struct Result { +pub struct Result { pub client_id: ClientId, pub client_state: AnyClientState, - pub consensus_state: Option, + pub consensus_state: Option>, } -pub fn process( - ctx: &dyn LightClientContext, - msg: MsgUpgradeAnyClient, -) -> HandlerResult { +pub fn process( + ctx: &dyn LightClientContext, + msg: MsgUpgradeAnyClient, +) -> HandlerResult, Error> { let mut output = HandlerOutput::builder(); let MsgUpgradeAnyClient { client_id, .. } = msg; diff --git a/modules/src/core/ics02_client/msgs.rs b/modules/src/core/ics02_client/msgs.rs index 3131be9beb..d6db69bc89 100644 --- a/modules/src/core/ics02_client/msgs.rs +++ b/modules/src/core/ics02_client/msgs.rs @@ -16,9 +16,9 @@ pub mod upgrade_client; #[allow(clippy::large_enum_variant)] #[derive(Clone, Debug)] -pub enum ClientMsg { - CreateClient(MsgCreateAnyClient), +pub enum ClientMsg { + CreateClient(MsgCreateAnyClient), UpdateClient(MsgUpdateAnyClient), Misbehaviour(MsgSubmitAnyMisbehaviour), - UpgradeClient(MsgUpgradeAnyClient), + UpgradeClient(MsgUpgradeAnyClient), } diff --git a/modules/src/core/ics02_client/msgs/create_client.rs b/modules/src/core/ics02_client/msgs/create_client.rs index effd6bec72..a36157971a 100644 --- a/modules/src/core/ics02_client/msgs/create_client.rs +++ b/modules/src/core/ics02_client/msgs/create_client.rs @@ -16,16 +16,16 @@ pub const TYPE_URL: &str = "/ibc.core.client.v1.MsgCreateClient"; /// A type of message that triggers the creation of a new on-chain (IBC) client. #[derive(Clone, Debug, PartialEq, Eq)] -pub struct MsgCreateAnyClient { +pub struct MsgCreateAnyClient { pub client_state: AnyClientState, - pub consensus_state: Option, + pub consensus_state: Option>, pub signer: Signer, } -impl MsgCreateAnyClient { +impl MsgCreateAnyClient { pub fn new( client_state: AnyClientState, - consensus_state: Option, + consensus_state: Option>, signer: Signer, ) -> Result { match consensus_state.as_ref() { @@ -48,7 +48,7 @@ impl MsgCreateAnyClient { } } -impl Msg for MsgCreateAnyClient { +impl Msg for MsgCreateAnyClient { type ValidationError = crate::core::ics24_host::error::ValidationError; type Raw = RawMsgCreateClient; @@ -61,9 +61,9 @@ impl Msg for MsgCreateAnyClient { } } -impl Protobuf for MsgCreateAnyClient {} +impl Protobuf for MsgCreateAnyClient {} -impl TryFrom for MsgCreateAnyClient { +impl TryFrom for MsgCreateAnyClient { type Error = Error; fn try_from(raw: RawMsgCreateClient) -> Result { @@ -83,8 +83,8 @@ impl TryFrom for MsgCreateAnyClient { } } -impl From for RawMsgCreateClient { - fn from(ics_msg: MsgCreateAnyClient) -> Self { +impl From> for RawMsgCreateClient { + fn from(ics_msg: MsgCreateAnyClient) -> Self { RawMsgCreateClient { client_state: Some(ics_msg.client_state.into()), consensus_state: ics_msg.consensus_state.map(|cs| cs.into()), diff --git a/modules/src/core/ics02_client/msgs/upgrade_client.rs b/modules/src/core/ics02_client/msgs/upgrade_client.rs index db7907dfed..a1948e97b1 100644 --- a/modules/src/core/ics02_client/msgs/upgrade_client.rs +++ b/modules/src/core/ics02_client/msgs/upgrade_client.rs @@ -18,19 +18,19 @@ pub(crate) const TYPE_URL: &str = "/ibc.core.client.v1.MsgUpgradeClient"; /// A type of message that triggers the upgrade of an on-chain (IBC) client. #[derive(Clone, Debug, PartialEq)] -pub struct MsgUpgradeAnyClient { +pub struct MsgUpgradeAnyClient { pub client_id: ClientId, pub client_state: AnyClientState, - pub consensus_state: AnyConsensusState, + pub consensus_state: AnyConsensusState, pub proof_upgrade_client: Vec, pub proof_upgrade_consensus_state: Vec, pub signer: Signer, } -impl MsgUpgradeAnyClient { +impl MsgUpgradeAnyClient { pub fn new( client_id: ClientId, client_state: AnyClientState, - consensus_state: AnyConsensusState, + consensus_state: AnyConsensusState, proof_upgrade_client: Vec, proof_upgrade_consensus_state: Vec, signer: Signer, @@ -46,7 +46,7 @@ impl MsgUpgradeAnyClient { } } -impl Msg for MsgUpgradeAnyClient { +impl Msg for MsgUpgradeAnyClient { type ValidationError = crate::core::ics24_host::error::ValidationError; type Raw = RawMsgUpgradeClient; @@ -59,10 +59,10 @@ impl Msg for MsgUpgradeAnyClient { } } -impl Protobuf for MsgUpgradeAnyClient {} +impl Protobuf for MsgUpgradeAnyClient {} -impl From for RawMsgUpgradeClient { - fn from(dm_msg: MsgUpgradeAnyClient) -> RawMsgUpgradeClient { +impl From> for RawMsgUpgradeClient { + fn from(dm_msg: MsgUpgradeAnyClient) -> RawMsgUpgradeClient { RawMsgUpgradeClient { client_id: dm_msg.client_id.to_string(), client_state: Some(dm_msg.client_state.into()), @@ -74,7 +74,7 @@ impl From for RawMsgUpgradeClient { } } -impl TryFrom for MsgUpgradeAnyClient { +impl TryFrom for MsgUpgradeAnyClient { type Error = Error; fn try_from(proto_msg: RawMsgUpgradeClient) -> Result { @@ -113,13 +113,13 @@ pub mod test_util { client_state::{MockClientState, MockConsensusState}, header::MockHeader, }, - test_utils::{get_dummy_bech32_account, get_dummy_proof}, + test_utils::{get_dummy_bech32_account, get_dummy_proof, Crypto}, }; use super::MsgUpgradeAnyClient; /// Extends the implementation with additional helper methods. - impl MsgUpgradeAnyClient { + impl MsgUpgradeAnyClient { /// Setter for `client_id`. Amenable to chaining, since it consumes the input message. pub fn with_client_id(self, client_id: ClientId) -> Self { MsgUpgradeAnyClient { client_id, ..self } diff --git a/modules/src/core/ics03_connection/handler.rs b/modules/src/core/ics03_connection/handler.rs index 7e2323a7b1..fa5897c1c6 100644 --- a/modules/src/core/ics03_connection/handler.rs +++ b/modules/src/core/ics03_connection/handler.rs @@ -6,6 +6,7 @@ use crate::core::ics03_connection::msgs::ConnectionMsg; use crate::core::ics24_host::identifier::ConnectionId; use crate::core::ics26_routing::context::LightClientContext; use crate::handler::HandlerOutput; +use core::fmt::Debug; pub mod conn_open_ack; pub mod conn_open_confirm; @@ -46,8 +47,8 @@ pub fn dispatch( msg: ConnectionMsg, ) -> Result, Error> where - Ctx: LightClientContext, - Crypto: CryptoOps, + Ctx: LightClientContext, + Crypto: CryptoOps + Debug + Send + Sync + PartialEq + Eq, { match msg { ConnectionMsg::ConnectionOpenInit(msg) => conn_open_init::process(ctx, msg), diff --git a/modules/src/core/ics03_connection/handler/conn_open_ack.rs b/modules/src/core/ics03_connection/handler/conn_open_ack.rs index 4c9fe1e47a..1a0d62e4d9 100644 --- a/modules/src/core/ics03_connection/handler/conn_open_ack.rs +++ b/modules/src/core/ics03_connection/handler/conn_open_ack.rs @@ -13,15 +13,16 @@ use crate::core::ics26_routing::context::LightClientContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; +use core::fmt::Debug; -pub(crate) fn process( - ctx: &dyn LightClientContext, +pub(crate) fn process( + ctx: &dyn LightClientContext, msg: MsgConnectionOpenAck, ) -> HandlerResult { let mut output = HandlerOutput::builder(); // Check the client's (consensus state) proof height. - check_client_consensus_height(ctx, msg.consensus_height())?; + check_client_consensus_height::(ctx, msg.consensus_height())?; // Validate the connection end. let mut conn_end = ctx.connection_end(&msg.connection_id)?; diff --git a/modules/src/core/ics03_connection/handler/conn_open_confirm.rs b/modules/src/core/ics03_connection/handler/conn_open_confirm.rs index 693caa2b7d..2b70cdba70 100644 --- a/modules/src/core/ics03_connection/handler/conn_open_confirm.rs +++ b/modules/src/core/ics03_connection/handler/conn_open_confirm.rs @@ -11,9 +11,10 @@ use crate::core::ics26_routing::context::LightClientContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; +use core::fmt::Debug; -pub(crate) fn process( - ctx: &dyn LightClientContext, +pub(crate) fn process( + ctx: &dyn LightClientContext, msg: MsgConnectionOpenConfirm, ) -> HandlerResult { let mut output = HandlerOutput::builder(); diff --git a/modules/src/core/ics03_connection/handler/conn_open_init.rs b/modules/src/core/ics03_connection/handler/conn_open_init.rs index 89b8efbb68..db623b946e 100644 --- a/modules/src/core/ics03_connection/handler/conn_open_init.rs +++ b/modules/src/core/ics03_connection/handler/conn_open_init.rs @@ -1,5 +1,6 @@ //! Protocol logic specific to ICS3 messages of type `MsgConnectionOpenInit`. +use crate::clients::crypto_ops::crypto::CryptoOps; use crate::core::ics03_connection::connection::{ConnectionEnd, State}; use crate::core::ics03_connection::error::Error; use crate::core::ics03_connection::events::Attributes; @@ -11,8 +12,8 @@ use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; -pub(crate) fn process( - ctx: &dyn LightClientContext, +pub(crate) fn process( + ctx: &dyn LightClientContext, msg: MsgConnectionOpenInit, ) -> HandlerResult { let mut output = HandlerOutput::builder(); diff --git a/modules/src/core/ics03_connection/handler/conn_open_try.rs b/modules/src/core/ics03_connection/handler/conn_open_try.rs index 75165da65c..079bb584a9 100644 --- a/modules/src/core/ics03_connection/handler/conn_open_try.rs +++ b/modules/src/core/ics03_connection/handler/conn_open_try.rs @@ -14,15 +14,16 @@ use crate::core::ics26_routing::context::LightClientContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; +use core::fmt::Debug; -pub(crate) fn process( - ctx: &dyn LightClientContext, +pub(crate) fn process( + ctx: &dyn LightClientContext, msg: MsgConnectionOpenTry, ) -> HandlerResult { let mut output = HandlerOutput::builder(); // Check that consensus height (for client proof) in message is not too advanced nor too old. - check_client_consensus_height(ctx, msg.consensus_height())?; + check_client_consensus_height::(ctx, msg.consensus_height())?; // Unwrap the old connection end (if any) and its identifier. let (mut new_connection_end, conn_id) = match &msg.previous_connection_id { diff --git a/modules/src/core/ics03_connection/handler/verify.rs b/modules/src/core/ics03_connection/handler/verify.rs index 072688476f..3a3b5e587f 100644 --- a/modules/src/core/ics03_connection/handler/verify.rs +++ b/modules/src/core/ics03_connection/handler/verify.rs @@ -9,10 +9,11 @@ use crate::core::ics23_commitment::commitment::CommitmentProofBytes; use crate::core::ics26_routing::context::LightClientContext; use crate::proofs::{ConsensusProof, Proofs}; use crate::Height; +use core::fmt::Debug; /// Entry point for verifying all proofs bundled in any ICS3 message. -pub fn verify_proofs( - ctx: &dyn LightClientContext, +pub fn verify_proofs( + ctx: &dyn LightClientContext, client_state: Option, height: Height, connection_end: &ConnectionEnd, @@ -59,8 +60,8 @@ pub fn verify_proofs( /// Verifies the authenticity and semantic correctness of a commitment `proof`. The commitment /// claims to prove that an object of type connection exists on the source chain (i.e., the chain /// which created this proof). This object must match the state of `expected_conn`. -pub fn verify_connection_proof( - ctx: &dyn LightClientContext, +pub fn verify_connection_proof( + ctx: &dyn LightClientContext, height: Height, connection_end: &ConnectionEnd, expected_conn: &ConnectionEnd, @@ -114,8 +115,8 @@ pub fn verify_connection_proof( /// complete verification: that the client state the counterparty stores is valid (i.e., not frozen, /// at the same revision as the current chain, with matching chain identifiers, etc) and that the /// `proof` is correct. -pub fn verify_client_proof( - ctx: &dyn LightClientContext, +pub fn verify_client_proof( + ctx: &dyn LightClientContext, height: Height, connection_end: &ConnectionEnd, expected_client_state: AnyClientState, @@ -153,8 +154,8 @@ pub fn verify_client_proof( }) } -pub fn verify_consensus_proof( - ctx: &dyn LightClientContext, +pub fn verify_consensus_proof( + ctx: &dyn LightClientContext, height: Height, connection_end: &ConnectionEnd, proof: &ConsensusProof, @@ -196,8 +197,8 @@ pub fn verify_consensus_proof( /// Checks that `claimed_height` is within normal bounds, i.e., fresh enough so that the chain has /// not pruned it yet, but not newer than the current (actual) height of the local chain. -pub fn check_client_consensus_height( - ctx: &dyn LightClientContext, +pub fn check_client_consensus_height( + ctx: &dyn LightClientContext, claimed_height: Height, ) -> Result<(), Error> { if claimed_height > ctx.host_height() { diff --git a/modules/src/core/ics04_channel/handler.rs b/modules/src/core/ics04_channel/handler.rs index 752fae0091..fc052ab6bf 100644 --- a/modules/src/core/ics04_channel/handler.rs +++ b/modules/src/core/ics04_channel/handler.rs @@ -10,6 +10,7 @@ use crate::core::ics26_routing::context::{ Ics26Context, LightClientContext, ModuleId, ModuleOutput, OnRecvPacketAck, Router, }; use crate::handler::{HandlerOutput, HandlerOutputBuilder}; +use core::fmt::Debug; pub mod acknowledgement; pub mod chan_close_confirm; @@ -63,8 +64,8 @@ pub fn channel_dispatch( msg: &ChannelMsg, ) -> Result<(HandlerOutputBuilder<()>, ChannelResult), Error> where - Ctx: LightClientContext, - Crypto: CryptoOps, + Ctx: LightClientContext, + Crypto: CryptoOps + Debug + Send + Sync + PartialEq + Eq, { let output = match msg { ChannelMsg::ChannelOpenInit(msg) => chan_open_init::process(ctx, msg), @@ -172,8 +173,8 @@ pub fn packet_dispatch( msg: &PacketMsg, ) -> Result<(HandlerOutputBuilder<()>, PacketResult), Error> where - Ctx: LightClientContext, - Crypto: CryptoOps, + Ctx: LightClientContext, + Crypto: CryptoOps + Debug + Send + Sync + PartialEq + Eq, { let output = match msg { PacketMsg::RecvPacket(msg) => recv_packet::process::(ctx, msg), diff --git a/modules/src/core/ics04_channel/handler/acknowledgement.rs b/modules/src/core/ics04_channel/handler/acknowledgement.rs index 58f0ead9a7..501475526b 100644 --- a/modules/src/core/ics04_channel/handler/acknowledgement.rs +++ b/modules/src/core/ics04_channel/handler/acknowledgement.rs @@ -12,6 +12,7 @@ use crate::core::ics26_routing::context::LightClientContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; +use core::fmt::Debug; #[derive(Clone, Debug)] pub struct AckPacketResult { @@ -21,8 +22,8 @@ pub struct AckPacketResult { pub seq_number: Option, } -pub fn process( - ctx: &dyn LightClientContext, +pub fn process( + ctx: &dyn LightClientContext, msg: &MsgAcknowledgement, ) -> HandlerResult { let mut output = HandlerOutput::builder(); diff --git a/modules/src/core/ics04_channel/handler/chan_close_confirm.rs b/modules/src/core/ics04_channel/handler/chan_close_confirm.rs index 8aa92bb568..574ebe2fdd 100644 --- a/modules/src/core/ics04_channel/handler/chan_close_confirm.rs +++ b/modules/src/core/ics04_channel/handler/chan_close_confirm.rs @@ -12,9 +12,10 @@ use crate::core::ics26_routing::context::LightClientContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; +use core::fmt::Debug; -pub(crate) fn process( - ctx: &dyn LightClientContext, +pub(crate) fn process( + ctx: &dyn LightClientContext, msg: &MsgChannelCloseConfirm, ) -> HandlerResult { let mut output = HandlerOutput::builder(); diff --git a/modules/src/core/ics04_channel/handler/chan_close_init.rs b/modules/src/core/ics04_channel/handler/chan_close_init.rs index 634d661c51..e5f62ad737 100644 --- a/modules/src/core/ics04_channel/handler/chan_close_init.rs +++ b/modules/src/core/ics04_channel/handler/chan_close_init.rs @@ -1,4 +1,5 @@ //! Protocol logic specific to ICS4 messages of type `MsgChannelCloseInit`. +use crate::clients::crypto_ops::crypto::CryptoOps; use crate::core::ics03_connection::connection::State as ConnectionState; use crate::core::ics04_channel::channel::State; use crate::core::ics04_channel::error::Error; @@ -9,8 +10,8 @@ use crate::core::ics26_routing::context::LightClientContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; -pub(crate) fn process( - ctx: &dyn LightClientContext, +pub(crate) fn process( + ctx: &dyn LightClientContext, msg: &MsgChannelCloseInit, ) -> HandlerResult { let mut output = HandlerOutput::builder(); diff --git a/modules/src/core/ics04_channel/handler/chan_open_ack.rs b/modules/src/core/ics04_channel/handler/chan_open_ack.rs index 20109cafe0..ca0a0689da 100644 --- a/modules/src/core/ics04_channel/handler/chan_open_ack.rs +++ b/modules/src/core/ics04_channel/handler/chan_open_ack.rs @@ -11,9 +11,10 @@ use crate::core::ics26_routing::context::LightClientContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; +use core::fmt::Debug; -pub(crate) fn process( - ctx: &dyn LightClientContext, +pub(crate) fn process( + ctx: &dyn LightClientContext, msg: &MsgChannelOpenAck, ) -> HandlerResult { let mut output = HandlerOutput::builder(); diff --git a/modules/src/core/ics04_channel/handler/chan_open_confirm.rs b/modules/src/core/ics04_channel/handler/chan_open_confirm.rs index fc955886ae..b0a864370a 100644 --- a/modules/src/core/ics04_channel/handler/chan_open_confirm.rs +++ b/modules/src/core/ics04_channel/handler/chan_open_confirm.rs @@ -11,9 +11,10 @@ use crate::core::ics26_routing::context::LightClientContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; +use core::fmt::Debug; -pub(crate) fn process( - ctx: &dyn LightClientContext, +pub(crate) fn process( + ctx: &dyn LightClientContext, msg: &MsgChannelOpenConfirm, ) -> HandlerResult { let mut output = HandlerOutput::builder(); diff --git a/modules/src/core/ics04_channel/handler/chan_open_init.rs b/modules/src/core/ics04_channel/handler/chan_open_init.rs index 4a1e9ecc7a..c3685efdee 100644 --- a/modules/src/core/ics04_channel/handler/chan_open_init.rs +++ b/modules/src/core/ics04_channel/handler/chan_open_init.rs @@ -1,5 +1,6 @@ //! Protocol logic specific to ICS4 messages of type `MsgChannelOpenInit`. +use crate::clients::crypto_ops::crypto::CryptoOps; use crate::core::ics04_channel::channel::{ChannelEnd, State}; use crate::core::ics04_channel::error::Error; use crate::core::ics04_channel::events::Attributes; @@ -11,8 +12,8 @@ use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; -pub(crate) fn process( - ctx: &dyn LightClientContext, +pub(crate) fn process( + ctx: &dyn LightClientContext, msg: &MsgChannelOpenInit, ) -> HandlerResult { let mut output = HandlerOutput::builder(); diff --git a/modules/src/core/ics04_channel/handler/chan_open_try.rs b/modules/src/core/ics04_channel/handler/chan_open_try.rs index 6e38c4d100..c7ac35e4c0 100644 --- a/modules/src/core/ics04_channel/handler/chan_open_try.rs +++ b/modules/src/core/ics04_channel/handler/chan_open_try.rs @@ -13,9 +13,10 @@ use crate::core::ics26_routing::context::LightClientContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; +use core::fmt::Debug; -pub(crate) fn process( - ctx: &dyn LightClientContext, +pub(crate) fn process( + ctx: &dyn LightClientContext, msg: &MsgChannelOpenTry, ) -> HandlerResult { let mut output = HandlerOutput::builder(); diff --git a/modules/src/core/ics04_channel/handler/recv_packet.rs b/modules/src/core/ics04_channel/handler/recv_packet.rs index b316807e4e..e42979ac46 100644 --- a/modules/src/core/ics04_channel/handler/recv_packet.rs +++ b/modules/src/core/ics04_channel/handler/recv_packet.rs @@ -12,6 +12,7 @@ use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::timestamp::Expiry; use crate::Height; +use core::fmt::Debug; #[derive(Clone, Debug)] pub struct RecvPacketSuccess { @@ -28,8 +29,8 @@ pub enum RecvPacketResult { NoOp, } -pub fn process( - ctx: &dyn LightClientContext, +pub fn process( + ctx: &dyn LightClientContext, msg: &MsgRecvPacket, ) -> HandlerResult { let mut output = HandlerOutput::builder(); diff --git a/modules/src/core/ics04_channel/handler/send_packet.rs b/modules/src/core/ics04_channel/handler/send_packet.rs index 6023f79ee0..c2e5b767dd 100644 --- a/modules/src/core/ics04_channel/handler/send_packet.rs +++ b/modules/src/core/ics04_channel/handler/send_packet.rs @@ -1,3 +1,4 @@ +use crate::clients::crypto_ops::crypto::CryptoOps; use crate::core::ics02_client::client_state::ClientState; use crate::core::ics04_channel::channel::Counterparty; use crate::core::ics04_channel::channel::State; @@ -21,8 +22,8 @@ pub struct SendPacketResult { pub commitment: PacketCommitment, } -pub fn send_packet( - ctx: &dyn LightClientContext, +pub fn send_packet( + ctx: &dyn LightClientContext, packet: Packet, ) -> HandlerResult { let mut output = HandlerOutput::builder(); diff --git a/modules/src/core/ics04_channel/handler/timeout.rs b/modules/src/core/ics04_channel/handler/timeout.rs index 0c45e2709e..88befedeaf 100644 --- a/modules/src/core/ics04_channel/handler/timeout.rs +++ b/modules/src/core/ics04_channel/handler/timeout.rs @@ -14,6 +14,7 @@ use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; use crate::timestamp::Expiry; +use core::fmt::Debug; #[derive(Clone, Debug)] pub struct TimeoutPacketResult { @@ -23,8 +24,8 @@ pub struct TimeoutPacketResult { pub channel: Option, } -pub fn process( - ctx: &dyn LightClientContext, +pub fn process( + ctx: &dyn LightClientContext, msg: &MsgTimeout, ) -> HandlerResult { let mut output = HandlerOutput::builder(); diff --git a/modules/src/core/ics04_channel/handler/timeout_on_close.rs b/modules/src/core/ics04_channel/handler/timeout_on_close.rs index 0e944e7d19..07e2014ef1 100644 --- a/modules/src/core/ics04_channel/handler/timeout_on_close.rs +++ b/modules/src/core/ics04_channel/handler/timeout_on_close.rs @@ -13,9 +13,10 @@ use crate::core::ics26_routing::context::LightClientContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; +use core::fmt::Debug; -pub fn process( - ctx: &dyn LightClientContext, +pub fn process( + ctx: &dyn LightClientContext, msg: &MsgTimeoutOnClose, ) -> HandlerResult { let mut output = HandlerOutput::builder(); diff --git a/modules/src/core/ics04_channel/handler/verify.rs b/modules/src/core/ics04_channel/handler/verify.rs index b5e9d29106..fe0e854138 100644 --- a/modules/src/core/ics04_channel/handler/verify.rs +++ b/modules/src/core/ics04_channel/handler/verify.rs @@ -11,10 +11,11 @@ use crate::core::ics26_routing::context::LightClientContext; use crate::prelude::*; use crate::proofs::Proofs; use crate::Height; +use core::fmt::Debug; /// Entry point for verifying all proofs bundled in any ICS4 message for channel protocols. -pub fn verify_channel_proofs( - ctx: &dyn LightClientContext, +pub fn verify_channel_proofs( + ctx: &dyn LightClientContext, height: Height, channel_end: &ChannelEnd, connection_end: &ConnectionEnd, @@ -56,8 +57,8 @@ pub fn verify_channel_proofs( } /// Entry point for verifying all proofs bundled in a ICS4 packet recv. message. -pub fn verify_packet_recv_proofs( - ctx: &dyn LightClientContext, +pub fn verify_packet_recv_proofs( + ctx: &dyn LightClientContext, height: Height, packet: &Packet, connection_end: &ConnectionEnd, @@ -104,8 +105,10 @@ pub fn verify_packet_recv_proofs( } /// Entry point for verifying all proofs bundled in an ICS4 packet ack message. -pub fn verify_packet_acknowledgement_proofs( - ctx: &dyn LightClientContext, +pub fn verify_packet_acknowledgement_proofs< + Crypto: CryptoOps + Debug + Send + Sync + PartialEq + Eq, +>( + ctx: &dyn LightClientContext, height: Height, packet: &Packet, acknowledgement: Acknowledgement, @@ -149,8 +152,8 @@ pub fn verify_packet_acknowledgement_proofs( } /// Entry point for verifying all timeout proofs. -pub fn verify_next_sequence_recv( - ctx: &dyn LightClientContext, +pub fn verify_next_sequence_recv( + ctx: &dyn LightClientContext, height: Height, connection_end: &ConnectionEnd, packet: Packet, @@ -190,8 +193,8 @@ pub fn verify_next_sequence_recv( Ok(()) } -pub fn verify_packet_receipt_absence( - ctx: &dyn LightClientContext, +pub fn verify_packet_receipt_absence( + ctx: &dyn LightClientContext, height: Height, connection_end: &ConnectionEnd, packet: Packet, diff --git a/modules/src/core/ics04_channel/handler/write_acknowledgement.rs b/modules/src/core/ics04_channel/handler/write_acknowledgement.rs index e86f1f5821..f60d6a126b 100644 --- a/modules/src/core/ics04_channel/handler/write_acknowledgement.rs +++ b/modules/src/core/ics04_channel/handler/write_acknowledgement.rs @@ -1,3 +1,4 @@ +use crate::clients::crypto_ops::crypto::CryptoOps; use crate::core::ics04_channel::channel::State; use crate::core::ics04_channel::commitment::AcknowledgementCommitment; use crate::core::ics04_channel::error::Error; @@ -19,8 +20,8 @@ pub struct WriteAckPacketResult { pub ack_commitment: AcknowledgementCommitment, } -pub fn process( - ctx: &dyn LightClientContext, +pub fn process( + ctx: &dyn LightClientContext, packet: Packet, ack: Vec, ) -> HandlerResult { diff --git a/modules/src/core/ics26_routing/handler.rs b/modules/src/core/ics26_routing/handler.rs index 5928d2e397..79aca9fa48 100644 --- a/modules/src/core/ics26_routing/handler.rs +++ b/modules/src/core/ics26_routing/handler.rs @@ -1,9 +1,11 @@ use crate::clients::crypto_ops::crypto::CryptoOps; use crate::prelude::*; +use core::fmt::Debug; use ibc_proto::google::protobuf::Any; use crate::applications::ics20_fungible_token_transfer::relay_application_logic::send_transfer::send_transfer as ics20_msg_dispatcher; +use crate::core::ics02_client::context::{ClientKeeper, ClientReader}; use crate::core::ics02_client::handler::dispatch as ics2_msg_dispatcher; use crate::core::ics03_connection::handler::dispatch as ics3_msg_dispatcher; use crate::core::ics04_channel::handler::{ @@ -37,11 +39,11 @@ pub fn deliver( message: Any, ) -> Result<(Vec, Vec), Error> where - Ctx: Ics26Context, - Crypto: CryptoOps, + Ctx: Ics26Context + ClientReader + ClientKeeper, + Crypto: CryptoOps + Debug + Send + Sync + PartialEq + Eq, { // Decode the proto message into a domain message, creating an ICS26 envelope. - let envelope = decode(message)?; + let envelope = decode::(message)?; // Process the envelope, and accumulate any events that were generated. let output = dispatch::<_, Crypto>(ctx, envelope)?; @@ -50,7 +52,7 @@ where } /// Attempts to convert a message into a [Ics26Envelope] message -pub fn decode(message: Any) -> Result { +pub fn decode(message: Any) -> Result, Error> { message.try_into() } @@ -59,10 +61,13 @@ pub fn decode(message: Any) -> Result { /// and events produced after processing the input `msg`. /// If this method returns an error, the runtime is expected to rollback all state modifications to /// the `Ctx` caused by all messages from the transaction that this `msg` is a part of. -pub fn dispatch(ctx: &mut Ctx, msg: Ics26Envelope) -> Result, Error> +pub fn dispatch( + ctx: &mut Ctx, + msg: Ics26Envelope, +) -> Result, Error> where - Ctx: Ics26Context, - Crypto: CryptoOps, + Ctx: Ics26Context + ClientReader + ClientKeeper, + Crypto: CryptoOps + Debug + Send + Sync + PartialEq + Eq, { let output = match msg { Ics2Msg(msg) => { @@ -111,6 +116,20 @@ where handler_builder.with_result(()) } + Ics20Msg(msg) => { + let handler_output = ics20_msg_dispatcher::<_, Crypto>(ctx, msg) + .map_err(Error::ics20_fungible_token_transfer)?; + + // Apply any results to the host chain store. + ctx.store_packet_result(handler_output.result) + .map_err(Error::ics04_channel)?; + + HandlerOutput::builder() + .with_log(handler_output.log) + .with_events(handler_output.events) + .with_result(()) + } + Ics4PacketMsg(msg) => { let module_id = get_module_for_packet_msg(ctx, &msg).map_err(Error::ics04_channel)?; let (mut handler_builder, packet_result) = @@ -218,7 +237,7 @@ mod tests { // Test parameters struct Test { name: String, - msg: TestMsg, + msg: Ics26Envelope, want_pass: bool, } let default_signer = get_dummy_account_id(); @@ -248,7 +267,7 @@ mod tests { let create_client_msg = MsgCreateAnyClient::new( AnyClientState::from(MockClientState::new(MockHeader::new(start_client_height))), - Some(AnyConsensusState::Mock(MockConsensusState::new( + Some(AnyConsensusState::Mock(MockConsensusState::::new( MockHeader::new(start_client_height), ))), default_signer.clone(), diff --git a/modules/src/core/ics26_routing/msgs.rs b/modules/src/core/ics26_routing/msgs.rs index 3f2306e6e9..3f3b638e00 100644 --- a/modules/src/core/ics26_routing/msgs.rs +++ b/modules/src/core/ics26_routing/msgs.rs @@ -15,14 +15,14 @@ use tendermint_proto::Protobuf; /// Enumeration of all messages that the local ICS26 module is capable of routing. #[derive(Clone, Debug)] -pub enum Ics26Envelope { - Ics2Msg(ClientMsg), +pub enum Ics26Envelope { + Ics2Msg(ClientMsg), Ics3Msg(ConnectionMsg), Ics4ChannelMsg(ChannelMsg), Ics4PacketMsg(PacketMsg), } -impl TryFrom for Ics26Envelope { +impl TryFrom for Ics26Envelope { type Error = Error; fn try_from(any_msg: Any) -> Result { diff --git a/modules/src/mock/client_def.rs b/modules/src/mock/client_def.rs index 8b57e6bc4d..1404eb408c 100644 --- a/modules/src/mock/client_def.rs +++ b/modules/src/mock/client_def.rs @@ -1,3 +1,4 @@ +use crate::clients::crypto_ops::crypto::CryptoOps; use crate::core::ics02_client::client_consensus::AnyConsensusState; use crate::core::ics02_client::client_def::{ClientDef, ConsensusUpdateResult}; use crate::core::ics02_client::client_state::AnyClientState; @@ -19,21 +20,22 @@ use crate::mock::header::MockHeader; use crate::prelude::*; use crate::Height; -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct MockClient; +#[derive(Clone, Debug, Default, PartialEq, Eq)] +pub struct MockClient(core::marker::PhantomData); -impl ClientDef for MockClient { +impl ClientDef for MockClient { type Header = MockHeader; type ClientState = MockClientState; - type ConsensusState = MockConsensusState; + type ConsensusState = MockConsensusState; + type Crypto = Crypto; fn update_state( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn LightClientContext, _client_id: ClientId, client_state: Self::ClientState, header: Self::Header, - ) -> Result<(Self::ClientState, ConsensusUpdateResult), Error> { + ) -> Result<(Self::ClientState, ConsensusUpdateResult), Error> { if client_state.latest_height() >= header.height() { return Err(Error::low_header_height( header.height(), @@ -49,7 +51,7 @@ impl ClientDef for MockClient { fn verify_client_consensus_state( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn LightClientContext, _client_state: &Self::ClientState, _height: Height, prefix: &CommitmentPrefix, @@ -57,7 +59,7 @@ impl ClientDef for MockClient { _root: &CommitmentRoot, client_id: &ClientId, consensus_height: Height, - _expected_consensus_state: &AnyConsensusState, + _expected_consensus_state: &AnyConsensusState, ) -> Result<(), Error> { let client_prefixed_path = Path::ClientConsensusState(ClientConsensusStatePath { client_id: client_id.clone(), @@ -73,7 +75,7 @@ impl ClientDef for MockClient { fn verify_connection_state( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn LightClientContext, _client_id: &ClientId, _client_state: &Self::ClientState, _height: Height, @@ -88,7 +90,7 @@ impl ClientDef for MockClient { fn verify_channel_state( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn LightClientContext, _client_id: &ClientId, _client_state: &Self::ClientState, _height: Height, @@ -104,7 +106,7 @@ impl ClientDef for MockClient { fn verify_client_full_state( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn LightClientContext, _client_state: &Self::ClientState, _height: Height, _prefix: &CommitmentPrefix, @@ -118,7 +120,7 @@ impl ClientDef for MockClient { fn verify_packet_data( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn LightClientContext, _client_id: &ClientId, _client_state: &Self::ClientState, _height: Height, @@ -135,7 +137,7 @@ impl ClientDef for MockClient { fn verify_packet_acknowledgement( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn LightClientContext, _client_id: &ClientId, _client_state: &Self::ClientState, _height: Height, @@ -152,7 +154,7 @@ impl ClientDef for MockClient { fn verify_next_sequence_recv( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn LightClientContext, _client_id: &ClientId, _client_state: &Self::ClientState, _height: Height, @@ -168,7 +170,7 @@ impl ClientDef for MockClient { fn verify_packet_receipt_absence( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn LightClientContext, _client_id: &ClientId, _client_state: &Self::ClientState, _height: Height, @@ -188,7 +190,7 @@ impl ClientDef for MockClient { consensus_state: &Self::ConsensusState, _proof_upgrade_client: Vec, _proof_upgrade_consensus_state: Vec, - ) -> Result<(Self::ClientState, ConsensusUpdateResult), Error> { + ) -> Result<(Self::ClientState, ConsensusUpdateResult), Error> { Ok(( *client_state, ConsensusUpdateResult::Single(AnyConsensusState::Mock(consensus_state.clone())), @@ -197,7 +199,7 @@ impl ClientDef for MockClient { fn verify_header( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn LightClientContext, _client_id: ClientId, _client_state: Self::ClientState, _header: Self::Header, @@ -215,7 +217,7 @@ impl ClientDef for MockClient { fn check_for_misbehaviour( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn LightClientContext, _client_id: ClientId, _client_state: Self::ClientState, _header: Self::Header, diff --git a/modules/src/mock/client_state.rs b/modules/src/mock/client_state.rs index aa4d7863ca..f2000608b4 100644 --- a/modules/src/mock/client_state.rs +++ b/modules/src/mock/client_state.rs @@ -1,8 +1,10 @@ use crate::prelude::*; +use crate::test_utils::Crypto; use alloc::collections::btree_map::BTreeMap as HashMap; use core::convert::Infallible; +use core::fmt::Debug; use core::time::Duration; use serde::{Deserialize, Serialize}; @@ -32,7 +34,7 @@ pub struct MockClientRecord { pub client_state: Option, /// Mapping of heights to consensus states for this client. - pub consensus_states: HashMap, + pub consensus_states: HashMap>, } /// A mock of a client state. For an example of a real structure that this mocks, you can see @@ -119,23 +121,25 @@ impl ClientState for MockClientState { } } -impl From for MockClientState { - fn from(cs: MockConsensusState) -> Self { +impl From> for MockClientState { + fn from(cs: MockConsensusState) -> Self { Self::new(cs.header) } } #[derive(Clone, Debug, PartialEq, Eq, Serialize)] -pub struct MockConsensusState { +pub struct MockConsensusState { pub header: MockHeader, pub root: CommitmentRoot, + _phantom: core::marker::PhantomData, } -impl MockConsensusState { +impl MockConsensusState { pub fn new(header: MockHeader) -> Self { MockConsensusState { header, root: CommitmentRoot::from(vec![0]), + _phantom: Default::default(), } } @@ -144,9 +148,9 @@ impl MockConsensusState { } } -impl Protobuf for MockConsensusState {} +impl Protobuf for MockConsensusState {} -impl TryFrom for MockConsensusState { +impl TryFrom for MockConsensusState { type Error = Error; fn try_from(raw: RawMockConsensusState) -> Result { @@ -155,12 +159,13 @@ impl TryFrom for MockConsensusState { Ok(Self { header: MockHeader::try_from(raw_header)?, root: CommitmentRoot::from(vec![0]), + _phantom: Default::default(), }) } } -impl From for RawMockConsensusState { - fn from(value: MockConsensusState) -> Self { +impl From> for RawMockConsensusState { + fn from(value: MockConsensusState) -> Self { RawMockConsensusState { header: Some(ibc_proto::ibc::mock::Header { height: Some(value.header.height().into()), @@ -170,13 +175,13 @@ impl From for RawMockConsensusState { } } -impl From for AnyConsensusState { - fn from(mcs: MockConsensusState) -> Self { +impl From> for AnyConsensusState { + fn from(mcs: MockConsensusState) -> Self { Self::Mock(mcs) } } -impl ConsensusState for MockConsensusState { +impl ConsensusState for MockConsensusState { type Error = Infallible; fn client_type(&self) -> ClientType { @@ -187,7 +192,7 @@ impl ConsensusState for MockConsensusState { &self.root } - fn wrap_any(self) -> AnyConsensusState { + fn wrap_any(self) -> AnyConsensusState { AnyConsensusState::Mock(self) } } diff --git a/modules/src/mock/context.rs b/modules/src/mock/context.rs index 4c95d88b0b..8fe80d6ad6 100644 --- a/modules/src/mock/context.rs +++ b/modules/src/mock/context.rs @@ -457,7 +457,7 @@ impl MockContext { /// A datagram passes from the relayer to the IBC module (on host chain). /// Alternative method to `Ics18Context::send` that does not exercise any serialization. /// Used in testing the Ics18 algorithms, hence this may return a Ics18Error. - pub fn deliver(&mut self, msg: Ics26Envelope) -> Result<(), Ics18Error> { + pub fn deliver(&mut self, msg: Ics26Envelope) -> Result<(), Ics18Error> { dispatch::<_, Crypto>(self, msg).map_err(Ics18Error::transaction_failed)?; // Create a new block. self.advance_host_chain_height(); @@ -509,8 +509,11 @@ impl MockContext { .insert(port_id, module_id); } - pub fn consensus_states(&self, client_id: &ClientId) -> Vec { - self.ibc_store.lock().unwrap().clients[client_id] + pub fn consensus_states( + &self, + client_id: &ClientId, + ) -> Vec> { + self.clients[client_id] .consensus_states .iter() .map(|(k, v)| AnyConsensusStateWithHeight { @@ -532,8 +535,8 @@ impl MockContext { &self, client_id: &ClientId, height: &Height, - ) -> AnyConsensusState { - self.ibc_store.lock().unwrap().clients[client_id] + ) -> &AnyConsensusState { + self.clients[client_id] .consensus_states .get(height) .unwrap() @@ -1008,6 +1011,7 @@ impl ConnectionKeeper for MockContext { } impl ClientReader for MockContext { + type Crypto = Crypto; fn client_type(&self, client_id: &ClientId) -> Result { match self.ibc_store.lock().unwrap().clients.get(client_id) { Some(client_record) => Ok(client_record.client_type), @@ -1029,8 +1033,8 @@ impl ClientReader for MockContext { &self, client_id: &ClientId, height: Height, - ) -> Result { - match self.ibc_store.lock().unwrap().clients.get(client_id) { + ) -> Result, Ics02Error> { + match self.clients.get(client_id) { Some(client_record) => match client_record.consensus_states.get(&height) { Some(consensus_state) => Ok(consensus_state.clone()), None => Err(Ics02Error::consensus_state_not_found( @@ -1050,10 +1054,8 @@ impl ClientReader for MockContext { &self, client_id: &ClientId, height: Height, - _filter_fn: Option bool>>, - ) -> Result, Ics02Error> { - let ibc_store = self.ibc_store.lock().unwrap(); - let client_record = ibc_store + ) -> Result>, Ics02Error> { + let client_record = self .clients .get(client_id) .ok_or_else(|| Ics02Error::client_not_found(client_id.clone()))?; @@ -1079,10 +1081,8 @@ impl ClientReader for MockContext { &self, client_id: &ClientId, height: Height, - _filter_fn: Option bool>>, - ) -> Result, Ics02Error> { - let ibc_store = self.ibc_store.lock().unwrap(); - let client_record = ibc_store + ) -> Result>, Ics02Error> { + let client_record = self .clients .get(client_id) .ok_or_else(|| Ics02Error::client_not_found(client_id.clone()))?; @@ -1116,14 +1116,17 @@ impl ClientReader for MockContext { .unwrap() } - fn host_consensus_state(&self, height: Height) -> Result { + fn host_consensus_state( + &self, + height: Height, + ) -> Result, Ics02Error> { match self.host_block(height) { Some(block_ref) => Ok(block_ref.clone().into()), None => Err(Ics02Error::missing_local_consensus_state(height)), } } - fn pending_host_consensus_state(&self) -> Result { + fn pending_host_consensus_state(&self) -> Result, Ics02Error> { Err(Ics02Error::missing_local_consensus_state(Height::zero())) } @@ -1133,6 +1136,7 @@ impl ClientReader for MockContext { } impl ClientKeeper for MockContext { + type Crypto = Crypto; fn store_client_type( &mut self, client_id: ClientId, @@ -1175,7 +1179,7 @@ impl ClientKeeper for MockContext { &mut self, client_id: ClientId, height: Height, - consensus_state: AnyConsensusState, + consensus_state: AnyConsensusState, ) -> Result<(), Ics02Error> { let mut ibc_store = self.ibc_store.lock().unwrap(); let client_record = ibc_store diff --git a/modules/src/mock/header.rs b/modules/src/mock/header.rs index cc3c38b092..f140d9852c 100644 --- a/modules/src/mock/header.rs +++ b/modules/src/mock/header.rs @@ -9,6 +9,7 @@ use crate::core::ics02_client::error::Error; use crate::core::ics02_client::header::AnyHeader; use crate::core::ics02_client::header::Header; use crate::mock::client_state::MockConsensusState; +use crate::test_utils::Crypto; use crate::timestamp::Timestamp; use crate::Height; @@ -79,7 +80,7 @@ impl Header for MockHeader { } } -impl From for AnyConsensusState { +impl From for AnyConsensusState { fn from(h: MockHeader) -> Self { AnyConsensusState::Mock(MockConsensusState::new(h)) } diff --git a/modules/src/mock/host.rs b/modules/src/mock/host.rs index d94b90f893..a4e64f38d9 100644 --- a/modules/src/mock/host.rs +++ b/modules/src/mock/host.rs @@ -10,6 +10,7 @@ use crate::core::ics02_client::header::AnyHeader; use crate::core::ics24_host::identifier::ChainId; use crate::mock::header::MockHeader; use crate::prelude::*; +use crate::test_utils::Crypto; use crate::timestamp::Timestamp; use crate::Height; @@ -83,14 +84,14 @@ impl HostBlock { } } -impl From for AnyConsensusState { +impl From for AnyConsensusState { fn from(light_block: TmLightBlock) -> Self { - let cs = TMConsensusState::from(light_block.signed_header.header); + let cs = TMConsensusState::::from(light_block.signed_header.header); AnyConsensusState::Tendermint(cs) } } -impl From for AnyConsensusState { +impl From for AnyConsensusState { fn from(any_block: HostBlock) -> Self { match any_block { HostBlock::Mock(mock_header) => mock_header.into(), diff --git a/modules/src/relayer/ics18_relayer/utils.rs b/modules/src/relayer/ics18_relayer/utils.rs index 060cef55b5..8cae83dbdd 100644 --- a/modules/src/relayer/ics18_relayer/utils.rs +++ b/modules/src/relayer/ics18_relayer/utils.rs @@ -1,3 +1,4 @@ +use crate::clients::crypto_ops::crypto::CryptoOps; use crate::core::ics02_client::header::AnyHeader; use crate::core::ics02_client::msgs::update_client::MsgUpdateAnyClient; use crate::core::ics02_client::msgs::ClientMsg; @@ -7,13 +8,14 @@ use crate::relayer::ics18_relayer::error::Error; /// Builds a `ClientMsg::UpdateClient` for a client with id `client_id` running on the `dest` /// context, assuming that the latest header on the source context is `src_header`. -pub fn build_client_update_datagram( +pub fn build_client_update_datagram( dest: &Ctx, client_id: &ClientId, src_header: AnyHeader, -) -> Result +) -> Result, Error> where Ctx: Ics18Context, + Crypto: CryptoOps, { // Check if client for ibc0 on ibc1 has been updated to latest height: // - query client state on destination chain @@ -58,6 +60,7 @@ mod tests { use crate::prelude::*; use crate::relayer::ics18_relayer::context::Ics18Context; use crate::relayer::ics18_relayer::utils::build_client_update_datagram; + use crate::test_utils::Crypto; use crate::Height; use test_log::test; use tracing::debug; @@ -171,8 +174,11 @@ mod tests { ClientType::Tendermint ); - let client_msg_a_res = - build_client_update_datagram(&ctx_a, &client_on_a_for_b, b_latest_header); + let client_msg_a_res = build_client_update_datagram::<_, Crypto>( + &ctx_a, + &client_on_a_for_b, + b_latest_header, + ); assert!( client_msg_a_res.is_ok(), diff --git a/modules/src/test_utils.rs b/modules/src/test_utils.rs index 219249018f..8958f02b08 100644 --- a/modules/src/test_utils.rs +++ b/modules/src/test_utils.rs @@ -85,7 +85,7 @@ impl Module for DummyTransferModule { } } -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct Crypto; impl HostFunctions for Crypto { diff --git a/proto/src/prost/ibc.lightclients.beefy.v1.rs b/proto/src/prost/ibc.lightclients.beefy.v1.rs index defe33d827..2cc15c0ffa 100644 --- a/proto/src/prost/ibc.lightclients.beefy.v1.rs +++ b/proto/src/prost/ibc.lightclients.beefy.v1.rs @@ -146,8 +146,8 @@ pub struct ParachainHeader { pub heads_total_count: u32, /// trie merkle proof of inclusion in header.extrinsic_root /// this already encodes the actual extrinsic - #[prost(bytes = "vec", repeated, tag = "7")] - pub extrinsic_proof: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, + #[prost(bytes = "vec", tag = "7")] + pub extrinsic_proof: ::prost::alloc::vec::Vec, } /// Partial data for MmrLeaf #[derive(Clone, PartialEq, ::prost::Message)] From 3a473cdb3ea9a896fe0bc6436390b3e1a56a35ad Mon Sep 17 00:00:00 2001 From: David Salami Date: Wed, 18 May 2022 16:36:06 +0100 Subject: [PATCH 18/96] fix tests --- modules/src/clients/ics11_beefy/consensus_state.rs | 3 ++- .../src/core/ics02_client/handler/create_client.rs | 12 +++++++----- .../src/core/ics02_client/handler/update_client.rs | 8 +++++--- .../src/core/ics02_client/handler/upgrade_client.rs | 6 ++++-- modules/src/core/ics02_client/msgs/create_client.rs | 6 ++++-- modules/src/core/ics02_client/msgs/upgrade_client.rs | 6 ++++-- modules/src/mock/client_def.rs | 11 +++++++++-- modules/src/mock/client_state.rs | 12 ++++++++---- modules/src/test_utils.rs | 2 +- 9 files changed, 44 insertions(+), 22 deletions(-) diff --git a/modules/src/clients/ics11_beefy/consensus_state.rs b/modules/src/clients/ics11_beefy/consensus_state.rs index a0773662e3..35ec56253d 100644 --- a/modules/src/clients/ics11_beefy/consensus_state.rs +++ b/modules/src/clients/ics11_beefy/consensus_state.rs @@ -210,8 +210,9 @@ impl TryFrom for ConsensusState { #[cfg(any(test, feature = "mocks"))] pub mod test_util { use super::*; + use crate::test_utils::Crypto; - pub fn get_dummy_beefy_consensus_state() -> AnyConsensusState { + pub fn get_dummy_beefy_consensus_state() -> AnyConsensusState { AnyConsensusState::Beefy(ConsensusState { timestamp: Time::now(), root: vec![0; 32].into(), diff --git a/modules/src/core/ics02_client/handler/create_client.rs b/modules/src/core/ics02_client/handler/create_client.rs index b8eb6198fb..62041ffdd0 100644 --- a/modules/src/core/ics02_client/handler/create_client.rs +++ b/modules/src/core/ics02_client/handler/create_client.rs @@ -103,7 +103,7 @@ mod tests { let msg = MsgCreateAnyClient::new( MockClientState::new(MockHeader::new(height)).into(), - Some(MockConsensusState::new(MockHeader::new(height)).into()), + Some(MockConsensusState::::new(MockHeader::new(height)).into()), signer, ) .unwrap(); @@ -147,7 +147,7 @@ mod tests { let ctx = MockContext::default().with_client(&existing_client_id, height); - let create_client_msgs: Vec = vec![ + let create_client_msgs: Vec> = vec![ MsgCreateAnyClient::new( MockClientState::new(MockHeader::new(Height { revision_height: 42, @@ -155,7 +155,7 @@ mod tests { })) .into(), Some( - MockConsensusState::new(MockHeader::new(Height { + MockConsensusState::::new(MockHeader::new(Height { revision_height: 42, ..height })) @@ -171,7 +171,7 @@ mod tests { })) .into(), Some( - MockConsensusState::new(MockHeader::new(Height { + MockConsensusState::::new(MockHeader::new(Height { revision_height: 42, ..height })) @@ -264,7 +264,9 @@ mod tests { let msg = MsgCreateAnyClient::new( tm_client_state, - Some(AnyConsensusState::Tendermint(tm_header.try_into().unwrap())), + Some(AnyConsensusState::::Tendermint( + tm_header.try_into().unwrap(), + )), signer, ) .unwrap(); diff --git a/modules/src/core/ics02_client/handler/update_client.rs b/modules/src/core/ics02_client/handler/update_client.rs index 4a3b9659de..7983b890a2 100644 --- a/modules/src/core/ics02_client/handler/update_client.rs +++ b/modules/src/core/ics02_client/handler/update_client.rs @@ -131,8 +131,8 @@ mod tests { use test_log::test; use crate::clients::ics11_beefy::client_state::ClientState as BeefyClientState; - use crate::clients::ics11_beefy::header::BeefyHeader; use crate::clients::ics11_beefy::header::ParachainHeader as BeefyParachainHeader; + use crate::clients::ics11_beefy::header::{BeefyHeader, ExtrinsicProof}; use crate::clients::ics11_beefy::polkadot_runtime as runtime; use crate::core::ics02_client::client_consensus::AnyConsensusState; use crate::core::ics02_client::client_state::{AnyClientState, ClientState}; @@ -916,6 +916,7 @@ mod tests { .into_iter() .map(|e| e.encode()) .collect::>(); + let timestamp_ext = extrinsics[0].clone(); let mut db = sp_trie::MemoryDB::::default(); @@ -939,7 +940,7 @@ mod tests { vec![&key], ) .unwrap(); - + let extrinsic_proof = ExtrinsicProof(timestamp_ext, extrinsic_proof).encode(); let header = ParachainHeader { parachain_header: para_headers.get(&PARA_ID).unwrap().clone(), partial_mmr_leaf: PartialMmrLeaf { @@ -975,7 +976,8 @@ mod tests { parachain_heads_proof: header.parachain_heads_proof, heads_leaf_index: header.heads_leaf_index, heads_total_count: header.heads_total_count, - extrinsic_proof: header.extrinsic_proof, + extrinsic_proof: ExtrinsicProof::decode(&mut &*header.extrinsic_proof) + .unwrap(), }) .collect(), mmr_proofs: batch_proof diff --git a/modules/src/core/ics02_client/handler/upgrade_client.rs b/modules/src/core/ics02_client/handler/upgrade_client.rs index fe71798063..a99ffcfee8 100644 --- a/modules/src/core/ics02_client/handler/upgrade_client.rs +++ b/modules/src/core/ics02_client/handler/upgrade_client.rs @@ -152,7 +152,8 @@ mod tests { let msg = MsgUpgradeAnyClient { client_id: ClientId::from_str("nonexistingclient").unwrap(), client_state: MockClientState::new(MockHeader::new(Height::new(1, 26))).into(), - consensus_state: MockConsensusState::new(MockHeader::new(Height::new(1, 26))).into(), + consensus_state: MockConsensusState::::new(MockHeader::new(Height::new(1, 26))) + .into(), proof_upgrade_client: Default::default(), proof_upgrade_consensus_state: Default::default(), signer, @@ -180,7 +181,8 @@ mod tests { let msg = MsgUpgradeAnyClient { client_id, client_state: MockClientState::new(MockHeader::new(Height::new(0, 26))).into(), - consensus_state: MockConsensusState::new(MockHeader::new(Height::new(0, 26))).into(), + consensus_state: MockConsensusState::::new(MockHeader::new(Height::new(0, 26))) + .into(), proof_upgrade_client: Default::default(), proof_upgrade_consensus_state: Default::default(), signer, diff --git a/modules/src/core/ics02_client/msgs/create_client.rs b/modules/src/core/ics02_client/msgs/create_client.rs index a36157971a..2a1e91ee6a 100644 --- a/modules/src/core/ics02_client/msgs/create_client.rs +++ b/modules/src/core/ics02_client/msgs/create_client.rs @@ -104,7 +104,7 @@ mod tests { use crate::clients::ics07_tendermint::header::test_util::get_dummy_tendermint_header; use crate::core::ics02_client::client_consensus::AnyConsensusState; use crate::core::ics02_client::msgs::MsgCreateAnyClient; - use crate::test_utils::get_dummy_account_id; + use crate::test_utils::{get_dummy_account_id, Crypto}; #[test] fn msg_create_client_serialization() { @@ -115,7 +115,9 @@ mod tests { let msg = MsgCreateAnyClient::new( tm_client_state, - Some(AnyConsensusState::Tendermint(tm_header.try_into().unwrap())), + Some(AnyConsensusState::::Tendermint( + tm_header.try_into().unwrap(), + )), signer, ) .unwrap(); diff --git a/modules/src/core/ics02_client/msgs/upgrade_client.rs b/modules/src/core/ics02_client/msgs/upgrade_client.rs index a1948e97b1..5142093b5c 100644 --- a/modules/src/core/ics02_client/msgs/upgrade_client.rs +++ b/modules/src/core/ics02_client/msgs/upgrade_client.rs @@ -134,7 +134,8 @@ pub mod test_util { AnyClientState::Mock(MockClientState::new(MockHeader::new(height))).into(), ), consensus_state: Some( - AnyConsensusState::Mock(MockConsensusState::new(MockHeader::new(height))).into(), + AnyConsensusState::Mock(MockConsensusState::::new(MockHeader::new(height))) + .into(), ), proof_upgrade_client: get_dummy_proof(), proof_upgrade_consensus_state: get_dummy_proof(), @@ -149,6 +150,7 @@ mod tests { use alloc::vec::Vec; use ibc_proto::ibc::core::client::v1::MsgUpgradeClient as RawMsgUpgradeClient; + use crate::test_utils::Crypto; use crate::{ core::{ ics02_client::{ @@ -174,7 +176,7 @@ mod tests { let client_state = AnyClientState::Mock(MockClientState::new(MockHeader::new(height))); let consensus_state = - AnyConsensusState::Mock(MockConsensusState::new(MockHeader::new(height))); + AnyConsensusState::Mock(MockConsensusState::::new(MockHeader::new(height))); let proof = get_dummy_merkle_proof(); let mut proof_buf = Vec::new(); diff --git a/modules/src/mock/client_def.rs b/modules/src/mock/client_def.rs index 1404eb408c..b095f14730 100644 --- a/modules/src/mock/client_def.rs +++ b/modules/src/mock/client_def.rs @@ -19,11 +19,18 @@ use crate::mock::client_state::{MockClientState, MockConsensusState}; use crate::mock::header::MockHeader; use crate::prelude::*; use crate::Height; +use core::fmt::Debug; -#[derive(Clone, Debug, Default, PartialEq, Eq)] +#[derive(Clone, Debug, PartialEq, Eq)] pub struct MockClient(core::marker::PhantomData); -impl ClientDef for MockClient { +impl Default for MockClient { + fn default() -> Self { + Self(Default::default()) + } +} + +impl ClientDef for MockClient { type Header = MockHeader; type ClientState = MockClientState; type ConsensusState = MockConsensusState; diff --git a/modules/src/mock/client_state.rs b/modules/src/mock/client_state.rs index f2000608b4..639cc8a1f0 100644 --- a/modules/src/mock/client_state.rs +++ b/modules/src/mock/client_state.rs @@ -10,6 +10,7 @@ use core::time::Duration; use serde::{Deserialize, Serialize}; use tendermint_proto::Protobuf; +use crate::clients::crypto_ops::crypto::CryptoOps; use ibc_proto::ibc::mock::ClientState as RawMockClientState; use ibc_proto::ibc::mock::ConsensusState as RawMockConsensusState; @@ -148,9 +149,9 @@ impl MockConsensusState { } } -impl Protobuf for MockConsensusState {} +impl Protobuf for MockConsensusState {} -impl TryFrom for MockConsensusState { +impl TryFrom for MockConsensusState { type Error = Error; fn try_from(raw: RawMockConsensusState) -> Result { @@ -164,7 +165,7 @@ impl TryFrom for MockConsensusState { } } -impl From> for RawMockConsensusState { +impl From> for RawMockConsensusState { fn from(value: MockConsensusState) -> Self { RawMockConsensusState { header: Some(ibc_proto::ibc::mock::Header { @@ -181,8 +182,11 @@ impl From> for AnyConsensusState { } } -impl ConsensusState for MockConsensusState { +impl ConsensusState + for MockConsensusState +{ type Error = Infallible; + type Crypto = Crypto; fn client_type(&self) -> ClientType { ClientType::Mock diff --git a/modules/src/test_utils.rs b/modules/src/test_utils.rs index 8958f02b08..9e925c6851 100644 --- a/modules/src/test_utils.rs +++ b/modules/src/test_utils.rs @@ -85,7 +85,7 @@ impl Module for DummyTransferModule { } } -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Default, PartialEq, Eq)] pub struct Crypto; impl HostFunctions for Crypto { From fb0504bb9a34d5c767bd5f9e37c9dd9869be36f4 Mon Sep 17 00:00:00 2001 From: David Salami Date: Thu, 19 May 2022 12:58:58 +0100 Subject: [PATCH 19/96] fixed bugs in beefy client update --- modules/src/clients/ics11_beefy/client_def.rs | 14 ++++- .../clients/ics11_beefy/consensus_state.rs | 16 ++--- modules/src/clients/ics11_beefy/error.rs | 3 +- modules/src/clients/ics11_beefy/header.rs | 11 ++-- .../ics02_client/handler/update_client.rs | 61 +++++++++++-------- 5 files changed, 65 insertions(+), 40 deletions(-) diff --git a/modules/src/clients/ics11_beefy/client_def.rs b/modules/src/clients/ics11_beefy/client_def.rs index 95926f067e..d22efcd54f 100644 --- a/modules/src/clients/ics11_beefy/client_def.rs +++ b/modules/src/clients/ics11_beefy/client_def.rs @@ -1,7 +1,7 @@ use beefy_client::primitives::{ParachainHeader, ParachainsUpdateProof}; use beefy_client::traits::ClientState as LightClientState; use beefy_client::BeefyLightClient; -use codec::Encode; +use codec::{Decode, Encode}; use core::fmt::Debug; use pallet_mmr_primitives::BatchProof; use sp_core::H256; @@ -113,8 +113,12 @@ impl ClientDef for BeefyClient items: header .mmr_proofs .into_iter() - .map(|item| H256::from_slice(&item)) - .collect(), + .map(|item| { + H256::decode(&mut &*item).map_err(|e| { + Error::beefy(BeefyError::invalid_mmr_update(format!("{:?}", e))) + }) + }) + .collect::, _>>()?, }, }; @@ -137,6 +141,10 @@ impl ClientDef for BeefyClient .from_header(header.clone()) .map_err(Error::beefy)?; for header in header.parachain_headers { + // Skip genesis block of parachains since it has no timestamp or ibc root + if header.parachain_header.number == 0 { + continue; + } let height = Height::new(header.para_id as u64, header.parachain_header.number as u64); // Skip duplicate consensus states if ctx.consensus_state(&client_id, height).is_ok() { diff --git a/modules/src/clients/ics11_beefy/consensus_state.rs b/modules/src/clients/ics11_beefy/consensus_state.rs index 35ec56253d..5c9ee444c8 100644 --- a/modules/src/clients/ics11_beefy/consensus_state.rs +++ b/modules/src/clients/ics11_beefy/consensus_state.rs @@ -152,19 +152,19 @@ impl TryFrom for ConsensusState { .filter_map(|digest| digest.as_consensus()) .find(|(id, _value)| id == &IBC_CONSENSUS_ID) .map(|(.., root)| root.to_vec()) - .ok_or(Error::invalid_header( - "cannot find ibc commitment root".to_string(), - ))? + .ok_or_else(|| { + Error::invalid_header("cannot find ibc commitment root".to_string()) + })? }; - let timestamp = decode_timestamp_extrinsic::(&header).unwrap_or_default(); + let timestamp = decode_timestamp_extrinsic::(&header)?; let duration = core::time::Duration::from_millis(timestamp); let timestamp = Timestamp::from_nanoseconds(duration.as_nanos().saturated_into::()) .unwrap_or_default() .into_tm_time() - .ok_or(Error::invalid_header( - "cannot decode timestamp extrinsic".to_string(), - ))?; + .ok_or_else(|| { + Error::invalid_header("cannot decode timestamp extrinsic".to_string()) + })?; Ok(Self { root: root.into(), @@ -190,7 +190,7 @@ impl TryFrom for ConsensusState { .unwrap_or_default() }; - let timestamp = decode_timestamp_extrinsic::(&header).unwrap_or_default(); + let timestamp = decode_timestamp_extrinsic::(&header)?; let duration = core::time::Duration::from_millis(timestamp); let timestamp = Timestamp::from_nanoseconds(duration.as_nanos().saturated_into::()) .unwrap_or_default() diff --git a/modules/src/clients/ics11_beefy/error.rs b/modules/src/clients/ics11_beefy/error.rs index 7007efa8ac..c18dd84235 100644 --- a/modules/src/clients/ics11_beefy/error.rs +++ b/modules/src/clients/ics11_beefy/error.rs @@ -24,7 +24,8 @@ define_error! { InvalidCommitmentRoot |_| { "invalid commitment root" }, TimestampExtrinsic - |_| { "error decoding timestamp extrinsic" }, + { reason: String } + |e| { format_args!("error decoding timestamp extrinsic {}", e.reason) }, InvalidHeader { reason: String } |e| { format_args!("invalid header, failed basic validation: {}", e.reason) }, diff --git a/modules/src/clients/ics11_beefy/header.rs b/modules/src/clients/ics11_beefy/header.rs index bd57cbe1a0..31ca4b6dbc 100644 --- a/modules/src/clients/ics11_beefy/header.rs +++ b/modules/src/clients/ics11_beefy/header.rs @@ -5,6 +5,7 @@ use crate::clients::crypto_ops::crypto::CryptoOps; use crate::clients::ics11_beefy::error::Error; use crate::core::ics02_client::client_type::ClientType; use crate::core::ics02_client::header::AnyHeader; +use alloc::format; use alloc::string::ToString; use alloc::vec; use alloc::vec::Vec; @@ -47,7 +48,7 @@ impl crate::core::ics02_client::header::Header for BeefyHeader { } } -#[derive(Clone, PartialEq, Eq, Debug, codec::Encode, codec::Decode)] +#[derive(Clone, PartialEq, Eq, Debug, Default, codec::Encode, codec::Decode)] pub struct ExtrinsicProof(pub Vec, pub Vec>); #[derive(Clone, PartialEq, Eq, Debug, codec::Encode, codec::Decode)] @@ -445,12 +446,14 @@ pub fn decode_timestamp_extrinsic( let ext = &*header.extrinsic_proof.0; let extrinsic_root = header.parachain_header.extrinsics_root; + // Timestamp extrinsic should be the first inherent and hence the first extrinsic + // https://github.com/paritytech/substrate/blob/d602397a0bbb24b5d627795b797259a44a5e29e9/primitives/trie/src/lib.rs#L99-L101 let key = codec::Encode::encode(&Compact(0u32)); Crypto::verify_membership_trie_proof(&extrinsic_root, proof, &*key, ext) - .map_err(|_| Error::timestamp_extrinsic())?; + .map_err(|e| Error::timestamp_extrinsic(format!("Proof Verification failed {:?}", e)))?; // Decoding from the [2..] because the timestamp inmherent has two extra bytes before the call that represents the // call length and the extrinsic version. - let (_, _, timestamp): (u8, u8, Compact) = - codec::Decode::decode(&mut &ext[2..]).map_err(|_| Error::timestamp_extrinsic())?; + let (_, _, timestamp): (u8, u8, Compact) = codec::Decode::decode(&mut &ext[2..]) + .map_err(|_| Error::timestamp_extrinsic("Failed to decode extrinsic".to_string()))?; Ok(timestamp.into()) } diff --git a/modules/src/core/ics02_client/handler/update_client.rs b/modules/src/core/ics02_client/handler/update_client.rs index 7983b890a2..a4ba2a2d15 100644 --- a/modules/src/core/ics02_client/handler/update_client.rs +++ b/modules/src/core/ics02_client/handler/update_client.rs @@ -131,7 +131,9 @@ mod tests { use test_log::test; use crate::clients::ics11_beefy::client_state::ClientState as BeefyClientState; - use crate::clients::ics11_beefy::header::ParachainHeader as BeefyParachainHeader; + use crate::clients::ics11_beefy::header::{ + decode_parachain_header, ParachainHeader as BeefyParachainHeader, + }; use crate::clients::ics11_beefy::header::{BeefyHeader, ExtrinsicProof}; use crate::clients::ics11_beefy::polkadot_runtime as runtime; use crate::core::ics02_client::client_consensus::AnyConsensusState; @@ -843,7 +845,8 @@ mod tests { for (key, value) in changes.changes { if let Some(storage_data) = value { - let key = key.0.to_vec(); + let key = key.0; + // Storage prefix and storage key hash take up the first 40 bytes let para_id = u32::decode(&mut &key[40..]).unwrap(); let head_data: runtime::api::runtime_types::polkadot_parachain::primitives::HeadData = Decode::decode(&mut &*storage_data.0).unwrap(); heads.insert(para_id, head_data.0); @@ -901,7 +904,10 @@ mod tests { vec![] }; - let block_number = leaf.parent_number_and_hash.0 + 1; + let para_head = para_headers.get(&PARA_ID).unwrap().clone(); + let decoded_para_head = decode_parachain_header(para_head.clone()).unwrap(); + + let block_number = decoded_para_head.number; let subxt_block_number: subxt::BlockNumber = block_number.into(); let block_hash = para_client .rpc() @@ -916,33 +922,40 @@ mod tests { .into_iter() .map(|e| e.encode()) .collect::>(); - let timestamp_ext = extrinsics[0].clone(); + let extrinsic_proof = { + if extrinsics.is_empty() { + ExtrinsicProof::default().encode() + } else { + let timestamp_ext = extrinsics[0].clone(); - let mut db = sp_trie::MemoryDB::::default(); + let mut db = sp_trie::MemoryDB::::default(); - let root = { - let mut root = Default::default(); - let mut trie = - >>::new(&mut db, &mut root); + let root = { + let mut root = Default::default(); + let mut trie = >>::new( + &mut db, &mut root, + ); - for (i, ext) in extrinsics.clone().into_iter().enumerate() { - let key = codec::Compact::(i as u32).encode(); - trie.insert(&key, &ext).unwrap(); + for (i, ext) in extrinsics.into_iter().enumerate() { + let key = codec::Compact(i as u32).encode(); + trie.insert(&key, &ext).unwrap(); + } + *trie.root() + }; + + let key = codec::Compact::(0u32).encode(); + let extrinsic_proof = generate_trie_proof::< + sp_trie::LayoutV0, + _, + _, + _, + >(&db, root, vec![&key]) + .unwrap(); + ExtrinsicProof(timestamp_ext, extrinsic_proof).encode() } - *trie.root() }; - - let key = codec::Compact::(0u32).encode(); - let extrinsic_proof = - generate_trie_proof::, _, _, _>( - &db, - root, - vec![&key], - ) - .unwrap(); - let extrinsic_proof = ExtrinsicProof(timestamp_ext, extrinsic_proof).encode(); let header = ParachainHeader { - parachain_header: para_headers.get(&PARA_ID).unwrap().clone(), + parachain_header: para_head, partial_mmr_leaf: PartialMmrLeaf { version: leaf.version, parent_number_and_hash: leaf.parent_number_and_hash, From aa1cc2414c5e7c2dae9c7a4e7a69b154e38c4a35 Mon Sep 17 00:00:00 2001 From: David Salami Date: Thu, 19 May 2022 13:35:47 +0100 Subject: [PATCH 20/96] minor fix --- modules/tests/runner/mod.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/modules/tests/runner/mod.rs b/modules/tests/runner/mod.rs index 96a4e0ce11..e4786210c4 100644 --- a/modules/tests/runner/mod.rs +++ b/modules/tests/runner/mod.rs @@ -1,6 +1,7 @@ pub mod step; use alloc::collections::btree_map::BTreeMap as HashMap; +use ibc::test_utils::Crypto; use core::convert::TryInto; use core::fmt::Debug; @@ -158,8 +159,8 @@ impl IbcTestRunner { AnyClientState::Mock(MockClientState::new(Self::mock_header(height))) } - pub fn consensus_state(height: Height) -> AnyConsensusState { - AnyConsensusState::Mock(MockConsensusState::new(Self::mock_header(height))) + pub fn consensus_state(height: Height) -> AnyConsensusState { + AnyConsensusState::Mock(MockConsensusState::::new(Self::mock_header(height))) } fn signer() -> Signer { From c36caa4dd90f029b681b1840b3668f070ab02129 Mon Sep 17 00:00:00 2001 From: David Salami Date: Thu, 19 May 2022 19:52:56 +0100 Subject: [PATCH 21/96] fixed flaky beefy client test issues --- Cargo.lock | 11 +- modules/Cargo.toml | 6 +- modules/src/clients/ics11_beefy/client_def.rs | 3 +- modules/src/clients/ics11_beefy/header.rs | 17 +- modules/src/clients/ics11_beefy/mod.rs | 3 - .../clients/ics11_beefy/polkadot_runtime.rs | 32110 ---------------- .../ics02_client/handler/update_client.rs | 398 +- proto/src/IBC_GO_COMMIT | 2 +- proto/src/lib.rs | 5 - proto/src/prost/ibc.core.channel.v1.rs | 20 + proto/src/prost/ibc.lightclients.beefy.v1.rs | 102 +- .../prost/ibc.lightclients.localhost.v1.rs | 11 - 12 files changed, 143 insertions(+), 32545 deletions(-) delete mode 100644 modules/src/clients/ics11_beefy/polkadot_runtime.rs delete mode 100644 proto/src/prost/ibc.lightclients.localhost.v1.rs diff --git a/Cargo.lock b/Cargo.lock index 8a2f52428a..b1fab2b4e3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -252,10 +252,14 @@ dependencies = [ name = "beefy-generic-client" version = "0.1.0" <<<<<<< HEAD +<<<<<<< HEAD source = "git+https://github.com/ComposableFi/beefy-client?branch=master#c8ef22e646f9eb33d66ac83e6fdad808b76aa4ed" ======= source = "git+https://github.com/ComposableFi/beefy-client?branch=david/refactor-traits#c60a46d3335303b5980eb93280f84ec0e6d99466" >>>>>>> 5c48fa8c (decode timestamp from extrinsic correctly) +======= +source = "git+https://github.com/ComposableFi/beefy-client?branch=david/refactor-traits#2228e4277c19c5cf643d3e613a6c1cc41cb0862c" +>>>>>>> b77494c4 (fixed flaky beefy client test issues) dependencies = [ "beefy-primitives", "ckb-merkle-mountain-range", @@ -1621,18 +1625,17 @@ dependencies = [ "derive_more", "env_logger", "flex-error", - "frame-support", - "hex-literal", "ibc-proto", "ics23", "modelator", "num-traits", - "pallet-beefy-mmr", - "pallet-mmr-rpc", "parity-scale-codec", "prost", "prost-types", +<<<<<<< HEAD "ripemd", +======= +>>>>>>> b77494c4 (fixed flaky beefy client test issues) "safe-regex", "serde", "serde_derive", diff --git a/modules/Cargo.toml b/modules/Cargo.toml index 50b176207d..516c20b9f0 100644 --- a/modules/Cargo.toml +++ b/modules/Cargo.toml @@ -94,12 +94,8 @@ tendermint-testgen = { version = "=0.23.7" } # Needed for generating (synthetic) sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22"} subxt = "0.21.0" tokio = { version = "1.17.0", features = ["full"] } -hex-literal = "0.3.4" serde_json = "1.0.74" -pallet-mmr-rpc = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22" } -frame-support = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22" } -rs_merkle = { version = "1.2.0" } -beefy-mmr = { package = "pallet-beefy-mmr", git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22" } +beefy-client = { package = "beefy-generic-client", git = "https://github.com/ComposableFi/beefy-client", branch = "david/refactor-traits", features = ["mocks"]} [[test]] name = "mbt" diff --git a/modules/src/clients/ics11_beefy/client_def.rs b/modules/src/clients/ics11_beefy/client_def.rs index d22efcd54f..17fa000990 100644 --- a/modules/src/clients/ics11_beefy/client_def.rs +++ b/modules/src/clients/ics11_beefy/client_def.rs @@ -97,7 +97,8 @@ impl ClientDef for BeefyClient parachain_heads_proof: header.parachain_heads_proof, heads_leaf_index: header.heads_leaf_index, heads_total_count: header.heads_total_count, - extrinsic_proof: header.extrinsic_proof.encode(), + extrinsic_proof: header.extrinsic_proof, + timestamp_extrinsic: header.timestamp_extrinsic, } }) .collect::>(); diff --git a/modules/src/clients/ics11_beefy/header.rs b/modules/src/clients/ics11_beefy/header.rs index 31ca4b6dbc..cf745a4ee7 100644 --- a/modules/src/clients/ics11_beefy/header.rs +++ b/modules/src/clients/ics11_beefy/header.rs @@ -48,9 +48,6 @@ impl crate::core::ics02_client::header::Header for BeefyHeader { } } -#[derive(Clone, PartialEq, Eq, Debug, Default, codec::Encode, codec::Decode)] -pub struct ExtrinsicProof(pub Vec, pub Vec>); - #[derive(Clone, PartialEq, Eq, Debug, codec::Encode, codec::Decode)] pub struct ParachainHeader { pub parachain_header: SubstrateHeader, @@ -65,8 +62,9 @@ pub struct ParachainHeader { /// Total number of parachain heads pub heads_total_count: u32, /// Trie merkle proof of inclusion of the set timestamp extrinsic in header.extrinsic_root + pub extrinsic_proof: Vec>, /// this already encodes the actual extrinsic - pub extrinsic_proof: ExtrinsicProof, + pub timestamp_extrinsic: Vec, } pub fn split_leaf_version(version: u8) -> (u8, u8) { @@ -130,8 +128,8 @@ impl TryFrom for BeefyHeader { .collect::, Error>>()?, heads_leaf_index: raw_para_header.heads_leaf_index, heads_total_count: raw_para_header.heads_total_count, - extrinsic_proof: Decode::decode(&mut &*raw_para_header.extrinsic_proof) - .map_err(|_| Error::invalid_raw_header())?, + extrinsic_proof: raw_para_header.extrinsic_proof, + timestamp_extrinsic: raw_para_header.timestamp_extrinsic, }) }) .collect::, Error>>()?; @@ -349,7 +347,8 @@ impl From for RawBeefyHeader { .collect(), heads_leaf_index: para_header.heads_leaf_index, heads_total_count: para_header.heads_total_count, - extrinsic_proof: para_header.extrinsic_proof.encode(), + extrinsic_proof: para_header.extrinsic_proof, + timestamp_extrinsic: para_header.timestamp_extrinsic, }, ) .collect(), @@ -442,8 +441,8 @@ pub fn decode_header(buf: B) -> Result { pub fn decode_timestamp_extrinsic( header: &ParachainHeader, ) -> Result { - let proof = &*header.extrinsic_proof.1; - let ext = &*header.extrinsic_proof.0; + let proof = &*header.extrinsic_proof; + let ext = &*header.timestamp_extrinsic; let extrinsic_root = header.parachain_header.extrinsics_root; // Timestamp extrinsic should be the first inherent and hence the first extrinsic diff --git a/modules/src/clients/ics11_beefy/mod.rs b/modules/src/clients/ics11_beefy/mod.rs index 430166e55d..2b16db780e 100644 --- a/modules/src/clients/ics11_beefy/mod.rs +++ b/modules/src/clients/ics11_beefy/mod.rs @@ -7,6 +7,3 @@ pub mod consensus_state; pub mod error; pub mod header; pub mod misbehaviour; - -#[cfg(test)] -pub mod polkadot_runtime; diff --git a/modules/src/clients/ics11_beefy/polkadot_runtime.rs b/modules/src/clients/ics11_beefy/polkadot_runtime.rs deleted file mode 100644 index 13e74d0809..0000000000 --- a/modules/src/clients/ics11_beefy/polkadot_runtime.rs +++ /dev/null @@ -1,32110 +0,0 @@ -#[allow(dead_code, unused_imports, non_camel_case_types, unused_qualifications)] -pub mod api { - use super::api as root_mod; - pub static PALLETS: [&str; 49usize] = [ - "System", - "Babe", - "Timestamp", - "Indices", - "Balances", - "TransactionPayment", - "Authorship", - "Offences", - "Historical", - "Session", - "Grandpa", - "ImOnline", - "AuthorityDiscovery", - "ParachainsOrigin", - "Configuration", - "ParasShared", - "ParaInclusion", - "ParaInherent", - "ParaScheduler", - "Paras", - "Initializer", - "Dmp", - "Ump", - "Hrmp", - "ParaSessionInfo", - "ParasDisputes", - "Registrar", - "Auctions", - "Crowdloan", - "Slots", - "ParasSudoWrapper", - "AssignedSlots", - "Sudo", - "Mmr", - "Beefy", - "MmrLeaf", - "ValidatorManager", - "BridgeRococoGrandpa", - "BridgeWococoGrandpa", - "BridgeRococoMessages", - "BridgeWococoMessages", - "BridgeRococoMessagesDispatch", - "BridgeWococoMessagesDispatch", - "Collective", - "Membership", - "Utility", - "Proxy", - "Multisig", - "XcmPallet", - ]; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Event { - #[codec(index = 0)] - System(system::Event), - #[codec(index = 3)] - Indices(indices::Event), - #[codec(index = 4)] - Balances(balances::Event), - #[codec(index = 7)] - Offences(offences::Event), - #[codec(index = 9)] - Session(session::Event), - #[codec(index = 10)] - Grandpa(grandpa::Event), - #[codec(index = 11)] - ImOnline(im_online::Event), - #[codec(index = 16)] - ParaInclusion(para_inclusion::Event), - #[codec(index = 19)] - Paras(paras::Event), - #[codec(index = 22)] - Ump(ump::Event), - #[codec(index = 23)] - Hrmp(hrmp::Event), - #[codec(index = 25)] - ParasDisputes(paras_disputes::Event), - #[codec(index = 26)] - Registrar(registrar::Event), - #[codec(index = 27)] - Auctions(auctions::Event), - #[codec(index = 28)] - Crowdloan(crowdloan::Event), - #[codec(index = 29)] - Slots(slots::Event), - #[codec(index = 31)] - AssignedSlots(assigned_slots::Event), - #[codec(index = 32)] - Sudo(sudo::Event), - #[codec(index = 36)] - ValidatorManager(validator_manager::Event), - #[codec(index = 43)] - BridgeRococoMessages(bridge_rococo_messages::Event), - #[codec(index = 44)] - BridgeWococoMessages(bridge_wococo_messages::Event), - #[codec(index = 45)] - BridgeRococoMessagesDispatch(bridge_rococo_messages_dispatch::Event), - #[codec(index = 46)] - BridgeWococoMessagesDispatch(bridge_wococo_messages_dispatch::Event), - #[codec(index = 80)] - Collective(collective::Event), - #[codec(index = 81)] - Membership(membership::Event), - #[codec(index = 90)] - Utility(utility::Event), - #[codec(index = 91)] - Proxy(proxy::Event), - #[codec(index = 92)] - Multisig(multisig::Event), - #[codec(index = 99)] - XcmPallet(xcm_pallet::Event), - } - pub mod system { - use super::root_mod; - use super::runtime_types; - pub mod calls { - use super::root_mod; - use super::runtime_types; - type DispatchError = runtime_types::sp_runtime::DispatchError; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct FillBlock { - pub ratio: runtime_types::sp_arithmetic::per_things::Perbill, - } - impl ::subxt::Call for FillBlock { - const PALLET: &'static str = "System"; - const FUNCTION: &'static str = "fill_block"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct Remark { - pub remark: ::std::vec::Vec<::core::primitive::u8>, - } - impl ::subxt::Call for Remark { - const PALLET: &'static str = "System"; - const FUNCTION: &'static str = "remark"; - } - #[derive( - :: subxt :: codec :: CompactAs, - :: subxt :: codec :: Decode, - :: subxt :: codec :: Encode, - Debug, - )] - pub struct SetHeapPages { - pub pages: ::core::primitive::u64, - } - impl ::subxt::Call for SetHeapPages { - const PALLET: &'static str = "System"; - const FUNCTION: &'static str = "set_heap_pages"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct SetCode { - pub code: ::std::vec::Vec<::core::primitive::u8>, - } - impl ::subxt::Call for SetCode { - const PALLET: &'static str = "System"; - const FUNCTION: &'static str = "set_code"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct SetCodeWithoutChecks { - pub code: ::std::vec::Vec<::core::primitive::u8>, - } - impl ::subxt::Call for SetCodeWithoutChecks { - const PALLET: &'static str = "System"; - const FUNCTION: &'static str = "set_code_without_checks"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct SetStorage { - pub items: ::std::vec::Vec<( - ::std::vec::Vec<::core::primitive::u8>, - ::std::vec::Vec<::core::primitive::u8>, - )>, - } - impl ::subxt::Call for SetStorage { - const PALLET: &'static str = "System"; - const FUNCTION: &'static str = "set_storage"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct KillStorage { - pub keys: ::std::vec::Vec<::std::vec::Vec<::core::primitive::u8>>, - } - impl ::subxt::Call for KillStorage { - const PALLET: &'static str = "System"; - const FUNCTION: &'static str = "kill_storage"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct KillPrefix { - pub prefix: ::std::vec::Vec<::core::primitive::u8>, - pub subkeys: ::core::primitive::u32, - } - impl ::subxt::Call for KillPrefix { - const PALLET: &'static str = "System"; - const FUNCTION: &'static str = "kill_prefix"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct RemarkWithEvent { - pub remark: ::std::vec::Vec<::core::primitive::u8>, - } - impl ::subxt::Call for RemarkWithEvent { - const PALLET: &'static str = "System"; - const FUNCTION: &'static str = "remark_with_event"; - } - pub struct TransactionApi<'a, T: ::subxt::Config, X> { - client: &'a ::subxt::Client, - marker: ::core::marker::PhantomData, - } - impl<'a, T, X> TransactionApi<'a, T, X> - where - T: ::subxt::Config, - X: ::subxt::extrinsic::ExtrinsicParams, - { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { - client, - marker: ::core::marker::PhantomData, - } - } - #[doc = "A dispatch that will fill the block weight up to the given ratio."] - pub fn fill_block( - &self, - ratio: runtime_types::sp_arithmetic::per_things::Perbill, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - FillBlock, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 19u8, 117u8, 154u8, 157u8, 101u8, 132u8, 50u8, 77u8, 245u8, 5u8, 230u8, - 232u8, 2u8, 69u8, 26u8, 239u8, 159u8, 26u8, 226u8, 121u8, 106u8, 82u8, - 67u8, 53u8, 138u8, 184u8, 39u8, 83u8, 220u8, 251u8, 206u8, 16u8, - ] - { - let call = FillBlock { ratio }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Make some on-chain remark."] - #[doc = ""] - #[doc = "# "] - #[doc = "- `O(1)`"] - #[doc = "# "] - pub fn remark( - &self, - remark: ::std::vec::Vec<::core::primitive::u8>, - ) -> Result< - ::subxt::SubmittableExtrinsic<'a, T, X, Remark, DispatchError, root_mod::Event>, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 186u8, 79u8, 33u8, 199u8, 216u8, 115u8, 19u8, 146u8, 220u8, 174u8, - 98u8, 61u8, 179u8, 230u8, 40u8, 70u8, 22u8, 251u8, 77u8, 62u8, 133u8, - 80u8, 186u8, 70u8, 135u8, 172u8, 178u8, 241u8, 69u8, 106u8, 235u8, - 140u8, - ] - { - let call = Remark { remark }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Set the number of pages in the WebAssembly environment's heap."] - pub fn set_heap_pages( - &self, - pages: ::core::primitive::u64, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SetHeapPages, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 77u8, 138u8, 122u8, 55u8, 179u8, 101u8, 60u8, 137u8, 173u8, 39u8, 28u8, - 36u8, 237u8, 243u8, 232u8, 162u8, 76u8, 176u8, 135u8, 58u8, 60u8, - 177u8, 105u8, 136u8, 94u8, 53u8, 26u8, 31u8, 41u8, 156u8, 228u8, 241u8, - ] - { - let call = SetHeapPages { pages }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Set the new runtime code."] - #[doc = ""] - #[doc = "# "] - #[doc = "- `O(C + S)` where `C` length of `code` and `S` complexity of `can_set_code`"] - #[doc = "- 1 call to `can_set_code`: `O(S)` (calls `sp_io::misc::runtime_version` which is"] - #[doc = " expensive)."] - #[doc = "- 1 storage write (codec `O(C)`)."] - #[doc = "- 1 digest item."] - #[doc = "- 1 event."] - #[doc = "The weight of this function is dependent on the runtime, but generally this is very"] - #[doc = "expensive. We will treat this as a full block."] - #[doc = "# "] - pub fn set_code( - &self, - code: ::std::vec::Vec<::core::primitive::u8>, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SetCode, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 35u8, 75u8, 103u8, 203u8, 91u8, 141u8, 77u8, 95u8, 37u8, 157u8, 107u8, - 240u8, 54u8, 242u8, 245u8, 205u8, 104u8, 165u8, 177u8, 37u8, 86u8, - 197u8, 28u8, 202u8, 121u8, 159u8, 18u8, 204u8, 237u8, 117u8, 141u8, - 131u8, - ] - { - let call = SetCode { code }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Set the new runtime code without doing any checks of the given `code`."] - #[doc = ""] - #[doc = "# "] - #[doc = "- `O(C)` where `C` length of `code`"] - #[doc = "- 1 storage write (codec `O(C)`)."] - #[doc = "- 1 digest item."] - #[doc = "- 1 event."] - #[doc = "The weight of this function is dependent on the runtime. We will treat this as a full"] - #[doc = "block. # "] - pub fn set_code_without_checks( - &self, - code: ::std::vec::Vec<::core::primitive::u8>, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SetCodeWithoutChecks, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 150u8, 148u8, 119u8, 129u8, 77u8, 216u8, 135u8, 187u8, 127u8, 24u8, - 238u8, 15u8, 227u8, 229u8, 191u8, 217u8, 106u8, 129u8, 149u8, 79u8, - 154u8, 78u8, 53u8, 159u8, 89u8, 69u8, 103u8, 197u8, 93u8, 161u8, 134u8, - 17u8, - ] - { - let call = SetCodeWithoutChecks { code }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Set some items of storage."] - pub fn set_storage( - &self, - items: ::std::vec::Vec<( - ::std::vec::Vec<::core::primitive::u8>, - ::std::vec::Vec<::core::primitive::u8>, - )>, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SetStorage, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 197u8, 12u8, 119u8, 205u8, 152u8, 103u8, 211u8, 170u8, 146u8, 253u8, - 25u8, 56u8, 180u8, 146u8, 74u8, 75u8, 38u8, 108u8, 212u8, 154u8, 23u8, - 22u8, 148u8, 175u8, 107u8, 186u8, 222u8, 13u8, 149u8, 132u8, 204u8, - 217u8, - ] - { - let call = SetStorage { items }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Kill some items from storage."] - pub fn kill_storage( - &self, - keys: ::std::vec::Vec<::std::vec::Vec<::core::primitive::u8>>, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - KillStorage, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 154u8, 115u8, 185u8, 20u8, 126u8, 90u8, 222u8, 131u8, 199u8, 57u8, - 184u8, 226u8, 43u8, 245u8, 161u8, 176u8, 194u8, 123u8, 139u8, 97u8, - 97u8, 94u8, 47u8, 64u8, 204u8, 96u8, 190u8, 94u8, 216u8, 237u8, 69u8, - 51u8, - ] - { - let call = KillStorage { keys }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Kill all storage items with a key that starts with the given prefix."] - #[doc = ""] - #[doc = "**NOTE:** We rely on the Root origin to provide us the number of subkeys under"] - #[doc = "the prefix we are removing to accurately calculate the weight of this function."] - pub fn kill_prefix( - &self, - prefix: ::std::vec::Vec<::core::primitive::u8>, - subkeys: ::core::primitive::u32, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - KillPrefix, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 214u8, 101u8, 191u8, 241u8, 1u8, 241u8, 144u8, 116u8, 246u8, 199u8, - 159u8, 249u8, 155u8, 164u8, 220u8, 221u8, 75u8, 33u8, 204u8, 3u8, - 255u8, 201u8, 187u8, 238u8, 181u8, 213u8, 41u8, 105u8, 234u8, 120u8, - 202u8, 115u8, - ] - { - let call = KillPrefix { prefix, subkeys }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Make some on-chain remark and emit event."] - pub fn remark_with_event( - &self, - remark: ::std::vec::Vec<::core::primitive::u8>, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - RemarkWithEvent, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 171u8, 82u8, 75u8, 237u8, 69u8, 197u8, 223u8, 125u8, 123u8, 51u8, - 241u8, 35u8, 202u8, 210u8, 227u8, 109u8, 1u8, 241u8, 255u8, 63u8, 33u8, - 115u8, 156u8, 239u8, 97u8, 76u8, 193u8, 35u8, 74u8, 199u8, 43u8, 255u8, - ] - { - let call = RemarkWithEvent { remark }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - pub type Event = runtime_types::frame_system::pallet::Event; - pub mod events { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "An extrinsic completed successfully."] - pub struct ExtrinsicSuccess { - pub dispatch_info: runtime_types::frame_support::weights::DispatchInfo, - } - impl ::subxt::Event for ExtrinsicSuccess { - const PALLET: &'static str = "System"; - const EVENT: &'static str = "ExtrinsicSuccess"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "An extrinsic failed."] - pub struct ExtrinsicFailed { - pub dispatch_error: runtime_types::sp_runtime::DispatchError, - pub dispatch_info: runtime_types::frame_support::weights::DispatchInfo, - } - impl ::subxt::Event for ExtrinsicFailed { - const PALLET: &'static str = "System"; - const EVENT: &'static str = "ExtrinsicFailed"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "`:code` was updated."] - pub struct CodeUpdated; - impl ::subxt::Event for CodeUpdated { - const PALLET: &'static str = "System"; - const EVENT: &'static str = "CodeUpdated"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "A new account was created."] - pub struct NewAccount { - pub account: ::subxt::sp_core::crypto::AccountId32, - } - impl ::subxt::Event for NewAccount { - const PALLET: &'static str = "System"; - const EVENT: &'static str = "NewAccount"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "An account was reaped."] - pub struct KilledAccount { - pub account: ::subxt::sp_core::crypto::AccountId32, - } - impl ::subxt::Event for KilledAccount { - const PALLET: &'static str = "System"; - const EVENT: &'static str = "KilledAccount"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "On on-chain remark happened."] - pub struct Remarked { - pub sender: ::subxt::sp_core::crypto::AccountId32, - pub hash: ::subxt::sp_core::H256, - } - impl ::subxt::Event for Remarked { - const PALLET: &'static str = "System"; - const EVENT: &'static str = "Remarked"; - } - } - pub mod storage { - use super::runtime_types; - pub struct Account<'a>(pub &'a ::subxt::sp_core::crypto::AccountId32); - impl ::subxt::StorageEntry for Account<'_> { - const PALLET: &'static str = "System"; - const STORAGE: &'static str = "Account"; - type Value = runtime_types::frame_system::AccountInfo< - ::core::primitive::u32, - runtime_types::pallet_balances::AccountData<::core::primitive::u128>, - >; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Blake2_128Concat, - )]) - } - } - pub struct ExtrinsicCount; - impl ::subxt::StorageEntry for ExtrinsicCount { - const PALLET: &'static str = "System"; - const STORAGE: &'static str = "ExtrinsicCount"; - type Value = ::core::primitive::u32; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct BlockWeight; - impl ::subxt::StorageEntry for BlockWeight { - const PALLET: &'static str = "System"; - const STORAGE: &'static str = "BlockWeight"; - type Value = - runtime_types::frame_support::weights::PerDispatchClass<::core::primitive::u64>; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct AllExtrinsicsLen; - impl ::subxt::StorageEntry for AllExtrinsicsLen { - const PALLET: &'static str = "System"; - const STORAGE: &'static str = "AllExtrinsicsLen"; - type Value = ::core::primitive::u32; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct BlockHash<'a>(pub &'a ::core::primitive::u32); - impl ::subxt::StorageEntry for BlockHash<'_> { - const PALLET: &'static str = "System"; - const STORAGE: &'static str = "BlockHash"; - type Value = ::subxt::sp_core::H256; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Twox64Concat, - )]) - } - } - pub struct ExtrinsicData<'a>(pub &'a ::core::primitive::u32); - impl ::subxt::StorageEntry for ExtrinsicData<'_> { - const PALLET: &'static str = "System"; - const STORAGE: &'static str = "ExtrinsicData"; - type Value = ::std::vec::Vec<::core::primitive::u8>; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Twox64Concat, - )]) - } - } - pub struct Number; - impl ::subxt::StorageEntry for Number { - const PALLET: &'static str = "System"; - const STORAGE: &'static str = "Number"; - type Value = ::core::primitive::u32; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct ParentHash; - impl ::subxt::StorageEntry for ParentHash { - const PALLET: &'static str = "System"; - const STORAGE: &'static str = "ParentHash"; - type Value = ::subxt::sp_core::H256; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct Digest; - impl ::subxt::StorageEntry for Digest { - const PALLET: &'static str = "System"; - const STORAGE: &'static str = "Digest"; - type Value = runtime_types::sp_runtime::generic::digest::Digest; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct Events; - impl ::subxt::StorageEntry for Events { - const PALLET: &'static str = "System"; - const STORAGE: &'static str = "Events"; - type Value = ::std::vec::Vec< - runtime_types::frame_system::EventRecord< - runtime_types::rococo_runtime::Event, - ::subxt::sp_core::H256, - >, - >; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct EventCount; - impl ::subxt::StorageEntry for EventCount { - const PALLET: &'static str = "System"; - const STORAGE: &'static str = "EventCount"; - type Value = ::core::primitive::u32; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct EventTopics<'a>(pub &'a ::subxt::sp_core::H256); - impl ::subxt::StorageEntry for EventTopics<'_> { - const PALLET: &'static str = "System"; - const STORAGE: &'static str = "EventTopics"; - type Value = ::std::vec::Vec<(::core::primitive::u32, ::core::primitive::u32)>; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Blake2_128Concat, - )]) - } - } - pub struct LastRuntimeUpgrade; - impl ::subxt::StorageEntry for LastRuntimeUpgrade { - const PALLET: &'static str = "System"; - const STORAGE: &'static str = "LastRuntimeUpgrade"; - type Value = runtime_types::frame_system::LastRuntimeUpgradeInfo; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct UpgradedToU32RefCount; - impl ::subxt::StorageEntry for UpgradedToU32RefCount { - const PALLET: &'static str = "System"; - const STORAGE: &'static str = "UpgradedToU32RefCount"; - type Value = ::core::primitive::bool; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct UpgradedToTripleRefCount; - impl ::subxt::StorageEntry for UpgradedToTripleRefCount { - const PALLET: &'static str = "System"; - const STORAGE: &'static str = "UpgradedToTripleRefCount"; - type Value = ::core::primitive::bool; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct ExecutionPhase; - impl ::subxt::StorageEntry for ExecutionPhase { - const PALLET: &'static str = "System"; - const STORAGE: &'static str = "ExecutionPhase"; - type Value = runtime_types::frame_system::Phase; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct StorageApi<'a, T: ::subxt::Config> { - client: &'a ::subxt::Client, - } - impl<'a, T: ::subxt::Config> StorageApi<'a, T> { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { client } - } - #[doc = " The full account information for a particular account ID."] - pub async fn account( - &self, - _0: &::subxt::sp_core::crypto::AccountId32, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - runtime_types::frame_system::AccountInfo< - ::core::primitive::u32, - runtime_types::pallet_balances::AccountData<::core::primitive::u128>, - >, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 224u8, 184u8, 2u8, 14u8, 38u8, 177u8, 223u8, 98u8, 223u8, 15u8, 130u8, - 23u8, 212u8, 69u8, 61u8, 165u8, 171u8, 61u8, 171u8, 57u8, 88u8, 71u8, - 168u8, 172u8, 54u8, 91u8, 109u8, 231u8, 169u8, 167u8, 195u8, 46u8, - ] - { - let entry = Account(_0); - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The full account information for a particular account ID."] - pub async fn account_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result<::subxt::KeyIter<'a, T, Account<'a>>, ::subxt::BasicError> - { - if self.client.metadata().storage_hash::()? - == [ - 224u8, 184u8, 2u8, 14u8, 38u8, 177u8, 223u8, 98u8, 223u8, 15u8, 130u8, - 23u8, 212u8, 69u8, 61u8, 165u8, 171u8, 61u8, 171u8, 57u8, 88u8, 71u8, - 168u8, 172u8, 54u8, 91u8, 109u8, 231u8, 169u8, 167u8, 195u8, 46u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Total extrinsics count for the current block."] - pub async fn extrinsic_count( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option<::core::primitive::u32>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 223u8, 60u8, 201u8, 120u8, 36u8, 44u8, 180u8, 210u8, 242u8, 53u8, - 222u8, 154u8, 123u8, 176u8, 249u8, 8u8, 225u8, 28u8, 232u8, 4u8, 136u8, - 41u8, 151u8, 82u8, 189u8, 149u8, 49u8, 166u8, 139u8, 9u8, 163u8, 231u8, - ] - { - let entry = ExtrinsicCount; - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The current weight for the block."] - pub async fn block_weight( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - runtime_types::frame_support::weights::PerDispatchClass<::core::primitive::u64>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 2u8, 236u8, 190u8, 174u8, 244u8, 98u8, 194u8, 168u8, 89u8, 208u8, 7u8, - 45u8, 175u8, 171u8, 177u8, 121u8, 215u8, 190u8, 184u8, 195u8, 49u8, - 133u8, 44u8, 1u8, 181u8, 215u8, 89u8, 84u8, 255u8, 16u8, 57u8, 152u8, - ] - { - let entry = BlockWeight; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Total length (in bytes) for all extrinsics put together, for the current block."] - pub async fn all_extrinsics_len( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option<::core::primitive::u32>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 202u8, 145u8, 209u8, 225u8, 40u8, 220u8, 174u8, 74u8, 93u8, 164u8, - 254u8, 248u8, 254u8, 192u8, 32u8, 117u8, 96u8, 149u8, 53u8, 145u8, - 219u8, 64u8, 234u8, 18u8, 217u8, 200u8, 203u8, 141u8, 145u8, 28u8, - 134u8, 60u8, - ] - { - let entry = AllExtrinsicsLen; - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Map of block numbers to block hashes."] - pub async fn block_hash( - &self, - _0: &::core::primitive::u32, - block_hash: ::core::option::Option, - ) -> ::core::result::Result<::subxt::sp_core::H256, ::subxt::BasicError> - { - if self.client.metadata().storage_hash::()? - == [ - 111u8, 201u8, 1u8, 177u8, 247u8, 64u8, 190u8, 182u8, 232u8, 51u8, - 217u8, 13u8, 155u8, 248u8, 105u8, 99u8, 205u8, 215u8, 155u8, 66u8, - 132u8, 103u8, 79u8, 100u8, 63u8, 118u8, 106u8, 195u8, 134u8, 237u8, - 236u8, 148u8, - ] - { - let entry = BlockHash(_0); - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Map of block numbers to block hashes."] - pub async fn block_hash_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::subxt::KeyIter<'a, T, BlockHash<'a>>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 111u8, 201u8, 1u8, 177u8, 247u8, 64u8, 190u8, 182u8, 232u8, 51u8, - 217u8, 13u8, 155u8, 248u8, 105u8, 99u8, 205u8, 215u8, 155u8, 66u8, - 132u8, 103u8, 79u8, 100u8, 63u8, 118u8, 106u8, 195u8, 134u8, 237u8, - 236u8, 148u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Extrinsics data for the current block (maps an extrinsic's index to its data)."] - pub async fn extrinsic_data( - &self, - _0: &::core::primitive::u32, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::std::vec::Vec<::core::primitive::u8>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 210u8, 224u8, 211u8, 186u8, 118u8, 210u8, 185u8, 194u8, 238u8, 211u8, - 254u8, 73u8, 67u8, 184u8, 31u8, 229u8, 168u8, 125u8, 98u8, 23u8, 241u8, - 59u8, 49u8, 86u8, 126u8, 9u8, 114u8, 163u8, 160u8, 62u8, 50u8, 67u8, - ] - { - let entry = ExtrinsicData(_0); - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Extrinsics data for the current block (maps an extrinsic's index to its data)."] - pub async fn extrinsic_data_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::subxt::KeyIter<'a, T, ExtrinsicData<'a>>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 210u8, 224u8, 211u8, 186u8, 118u8, 210u8, 185u8, 194u8, 238u8, 211u8, - 254u8, 73u8, 67u8, 184u8, 31u8, 229u8, 168u8, 125u8, 98u8, 23u8, 241u8, - 59u8, 49u8, 86u8, 126u8, 9u8, 114u8, 163u8, 160u8, 62u8, 50u8, 67u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The current block number being processed. Set by `execute_block`."] - pub async fn number( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> - { - if self.client.metadata().storage_hash::()? - == [ - 228u8, 96u8, 102u8, 190u8, 252u8, 130u8, 239u8, 172u8, 126u8, 235u8, - 246u8, 139u8, 208u8, 15u8, 88u8, 245u8, 141u8, 232u8, 43u8, 204u8, - 36u8, 87u8, 211u8, 141u8, 187u8, 68u8, 236u8, 70u8, 193u8, 235u8, - 164u8, 191u8, - ] - { - let entry = Number; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Hash of the previous block."] - pub async fn parent_hash( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result<::subxt::sp_core::H256, ::subxt::BasicError> - { - if self.client.metadata().storage_hash::()? - == [ - 181u8, 119u8, 0u8, 41u8, 126u8, 107u8, 154u8, 144u8, 248u8, 193u8, - 101u8, 89u8, 243u8, 242u8, 70u8, 202u8, 1u8, 65u8, 77u8, 38u8, 35u8, - 175u8, 192u8, 103u8, 250u8, 36u8, 51u8, 175u8, 71u8, 74u8, 117u8, 93u8, - ] - { - let entry = ParentHash; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Digest of the current block, also part of the block header."] - pub async fn digest( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - runtime_types::sp_runtime::generic::digest::Digest, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 10u8, 176u8, 13u8, 228u8, 226u8, 42u8, 210u8, 151u8, 107u8, 212u8, - 136u8, 15u8, 38u8, 182u8, 225u8, 12u8, 250u8, 56u8, 193u8, 243u8, - 219u8, 113u8, 95u8, 233u8, 21u8, 229u8, 125u8, 146u8, 92u8, 250u8, - 32u8, 168u8, - ] - { - let entry = Digest; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Events deposited for the current block."] - #[doc = ""] - #[doc = " NOTE: The item is unbound and should therefore never be read on chain."] - #[doc = " It could otherwise inflate the PoV size of a block."] - #[doc = ""] - #[doc = " Events have a large in-memory size. Box the events to not go out-of-memory"] - #[doc = " just in case someone still reads them from within the runtime."] - pub async fn events( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::std::vec::Vec< - runtime_types::frame_system::EventRecord< - runtime_types::rococo_runtime::Event, - ::subxt::sp_core::H256, - >, - >, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 170u8, 108u8, 138u8, 175u8, 31u8, 58u8, 48u8, 44u8, 240u8, 15u8, 234u8, - 219u8, 45u8, 212u8, 124u8, 19u8, 128u8, 188u8, 18u8, 149u8, 64u8, - 237u8, 155u8, 101u8, 87u8, 89u8, 70u8, 180u8, 60u8, 201u8, 90u8, 25u8, - ] - { - let entry = Events; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The number of events in the `Events` list."] - pub async fn event_count( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> - { - if self.client.metadata().storage_hash::()? - == [ - 236u8, 93u8, 90u8, 177u8, 250u8, 211u8, 138u8, 187u8, 26u8, 208u8, - 203u8, 113u8, 221u8, 233u8, 227u8, 9u8, 249u8, 25u8, 202u8, 185u8, - 161u8, 144u8, 167u8, 104u8, 127u8, 187u8, 38u8, 18u8, 52u8, 61u8, 66u8, - 112u8, - ] - { - let entry = EventCount; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Mapping between a topic (represented by T::Hash) and a vector of indexes"] - #[doc = " of events in the `>` list."] - #[doc = ""] - #[doc = " All topic vectors have deterministic storage locations depending on the topic. This"] - #[doc = " allows light-clients to leverage the changes trie storage tracking mechanism and"] - #[doc = " in case of changes fetch the list of events of interest."] - #[doc = ""] - #[doc = " The value has the type `(T::BlockNumber, EventIndex)` because if we used only just"] - #[doc = " the `EventIndex` then in case if the topic has the same contents on the next block"] - #[doc = " no notification will be triggered thus the event might be lost."] - pub async fn event_topics( - &self, - _0: &::subxt::sp_core::H256, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::std::vec::Vec<(::core::primitive::u32, ::core::primitive::u32)>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 144u8, 227u8, 63u8, 224u8, 232u8, 119u8, 45u8, 240u8, 123u8, 241u8, - 77u8, 214u8, 215u8, 164u8, 35u8, 64u8, 51u8, 235u8, 122u8, 146u8, - 182u8, 88u8, 109u8, 61u8, 43u8, 105u8, 84u8, 230u8, 166u8, 187u8, - 239u8, 95u8, - ] - { - let entry = EventTopics(_0); - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Mapping between a topic (represented by T::Hash) and a vector of indexes"] - #[doc = " of events in the `>` list."] - #[doc = ""] - #[doc = " All topic vectors have deterministic storage locations depending on the topic. This"] - #[doc = " allows light-clients to leverage the changes trie storage tracking mechanism and"] - #[doc = " in case of changes fetch the list of events of interest."] - #[doc = ""] - #[doc = " The value has the type `(T::BlockNumber, EventIndex)` because if we used only just"] - #[doc = " the `EventIndex` then in case if the topic has the same contents on the next block"] - #[doc = " no notification will be triggered thus the event might be lost."] - pub async fn event_topics_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::subxt::KeyIter<'a, T, EventTopics<'a>>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 144u8, 227u8, 63u8, 224u8, 232u8, 119u8, 45u8, 240u8, 123u8, 241u8, - 77u8, 214u8, 215u8, 164u8, 35u8, 64u8, 51u8, 235u8, 122u8, 146u8, - 182u8, 88u8, 109u8, 61u8, 43u8, 105u8, 84u8, 230u8, 166u8, 187u8, - 239u8, 95u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Stores the `spec_version` and `spec_name` of when the last runtime upgrade happened."] - pub async fn last_runtime_upgrade( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .storage_hash::()? - == [ - 219u8, 153u8, 158u8, 38u8, 45u8, 65u8, 151u8, 137u8, 53u8, 76u8, 11u8, - 181u8, 218u8, 248u8, 125u8, 190u8, 100u8, 240u8, 173u8, 75u8, 179u8, - 137u8, 198u8, 197u8, 248u8, 185u8, 118u8, 58u8, 42u8, 165u8, 125u8, - 119u8, - ] - { - let entry = LastRuntimeUpgrade; - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " True if we have upgraded so that `type RefCount` is `u32`. False (default) if not."] - pub async fn upgraded_to_u32_ref_count( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result<::core::primitive::bool, ::subxt::BasicError> - { - if self - .client - .metadata() - .storage_hash::()? - == [ - 171u8, 88u8, 244u8, 92u8, 122u8, 67u8, 27u8, 18u8, 59u8, 175u8, 175u8, - 178u8, 20u8, 150u8, 213u8, 59u8, 222u8, 141u8, 32u8, 107u8, 3u8, 114u8, - 83u8, 250u8, 180u8, 233u8, 152u8, 54u8, 187u8, 99u8, 131u8, 204u8, - ] - { - let entry = UpgradedToU32RefCount; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " True if we have upgraded so that AccountInfo contains three types of `RefCount`. False"] - #[doc = " (default) if not."] - pub async fn upgraded_to_triple_ref_count( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result<::core::primitive::bool, ::subxt::BasicError> - { - if self - .client - .metadata() - .storage_hash::()? - == [ - 90u8, 33u8, 56u8, 86u8, 90u8, 101u8, 89u8, 133u8, 203u8, 56u8, 201u8, - 210u8, 244u8, 232u8, 150u8, 18u8, 51u8, 105u8, 14u8, 230u8, 103u8, - 155u8, 246u8, 99u8, 53u8, 207u8, 225u8, 128u8, 186u8, 76u8, 40u8, - 185u8, - ] - { - let entry = UpgradedToTripleRefCount; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The execution phase of the block."] - pub async fn execution_phase( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 174u8, 13u8, 230u8, 220u8, 239u8, 161u8, 172u8, 122u8, 188u8, 95u8, - 141u8, 118u8, 91u8, 158u8, 111u8, 145u8, 243u8, 173u8, 226u8, 212u8, - 187u8, 118u8, 94u8, 132u8, 221u8, 244u8, 61u8, 148u8, 217u8, 30u8, - 238u8, 225u8, - ] - { - let entry = ExecutionPhase; - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - pub mod constants { - use super::runtime_types; - pub struct ConstantsApi<'a, T: ::subxt::Config> { - client: &'a ::subxt::Client, - } - impl<'a, T: ::subxt::Config> ConstantsApi<'a, T> { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { client } - } - #[doc = " Block & extrinsics weights: base values and limits."] - pub fn block_weights( - &self, - ) -> ::core::result::Result< - runtime_types::frame_system::limits::BlockWeights, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .constant_hash("System", "BlockWeights")? - == [ - 215u8, 15u8, 243u8, 205u8, 15u8, 204u8, 67u8, 181u8, 5u8, 25u8, 77u8, - 32u8, 15u8, 69u8, 250u8, 90u8, 118u8, 42u8, 116u8, 3u8, 231u8, 203u8, - 152u8, 28u8, 230u8, 136u8, 184u8, 234u8, 38u8, 25u8, 58u8, 181u8, - ] - { - let pallet = self.client.metadata().pallet("System")?; - let constant = pallet.constant("BlockWeights")?; - let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; - Ok(value) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The maximum length of a block (in bytes)."] - pub fn block_length( - &self, - ) -> ::core::result::Result< - runtime_types::frame_system::limits::BlockLength, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .constant_hash("System", "BlockLength")? - == [ - 120u8, 249u8, 182u8, 103u8, 246u8, 214u8, 149u8, 44u8, 42u8, 64u8, 2u8, - 56u8, 157u8, 184u8, 43u8, 195u8, 214u8, 251u8, 207u8, 207u8, 249u8, - 105u8, 203u8, 108u8, 179u8, 93u8, 93u8, 246u8, 40u8, 175u8, 160u8, - 114u8, - ] - { - let pallet = self.client.metadata().pallet("System")?; - let constant = pallet.constant("BlockLength")?; - let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; - Ok(value) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Maximum number of block number to block hash mappings to keep (oldest pruned first)."] - pub fn block_hash_count( - &self, - ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> - { - if self - .client - .metadata() - .constant_hash("System", "BlockHashCount")? - == [ - 123u8, 126u8, 182u8, 103u8, 71u8, 187u8, 233u8, 8u8, 47u8, 226u8, - 159u8, 139u8, 0u8, 59u8, 190u8, 135u8, 189u8, 77u8, 190u8, 81u8, 39u8, - 198u8, 224u8, 219u8, 70u8, 143u8, 6u8, 132u8, 196u8, 61u8, 117u8, - 194u8, - ] - { - let pallet = self.client.metadata().pallet("System")?; - let constant = pallet.constant("BlockHashCount")?; - let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; - Ok(value) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The weight of runtime database operations the runtime can invoke."] - pub fn db_weight( - &self, - ) -> ::core::result::Result< - runtime_types::frame_support::weights::RuntimeDbWeight, - ::subxt::BasicError, - > { - if self.client.metadata().constant_hash("System", "DbWeight")? - == [ - 203u8, 8u8, 106u8, 152u8, 74u8, 132u8, 2u8, 132u8, 244u8, 106u8, 147u8, - 12u8, 93u8, 80u8, 61u8, 158u8, 172u8, 178u8, 228u8, 125u8, 213u8, - 102u8, 75u8, 210u8, 64u8, 185u8, 204u8, 84u8, 10u8, 164u8, 204u8, 62u8, - ] - { - let pallet = self.client.metadata().pallet("System")?; - let constant = pallet.constant("DbWeight")?; - let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; - Ok(value) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Get the chain's current version."] - pub fn version( - &self, - ) -> ::core::result::Result< - runtime_types::sp_version::RuntimeVersion, - ::subxt::BasicError, - > { - if self.client.metadata().constant_hash("System", "Version")? - == [ - 204u8, 182u8, 166u8, 232u8, 201u8, 27u8, 210u8, 58u8, 156u8, 104u8, - 233u8, 214u8, 202u8, 35u8, 247u8, 203u8, 119u8, 118u8, 106u8, 249u8, - 73u8, 145u8, 104u8, 122u8, 34u8, 30u8, 41u8, 131u8, 209u8, 223u8, - 165u8, 89u8, - ] - { - let pallet = self.client.metadata().pallet("System")?; - let constant = pallet.constant("Version")?; - let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; - Ok(value) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The designated SS85 prefix of this chain."] - #[doc = ""] - #[doc = " This replaces the \"ss58Format\" property declared in the chain spec. Reason is"] - #[doc = " that the runtime should know about the prefix in order to make use of it as"] - #[doc = " an identifier of the chain."] - pub fn ss58_prefix( - &self, - ) -> ::core::result::Result<::core::primitive::u16, ::subxt::BasicError> - { - if self - .client - .metadata() - .constant_hash("System", "SS58Prefix")? - == [ - 197u8, 217u8, 49u8, 68u8, 82u8, 238u8, 120u8, 50u8, 91u8, 58u8, 6u8, - 156u8, 40u8, 1u8, 241u8, 213u8, 141u8, 74u8, 83u8, 115u8, 117u8, 41u8, - 119u8, 50u8, 140u8, 136u8, 163u8, 185u8, 34u8, 190u8, 60u8, 97u8, - ] - { - let pallet = self.client.metadata().pallet("System")?; - let constant = pallet.constant("SS58Prefix")?; - let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; - Ok(value) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - } - pub mod babe { - use super::root_mod; - use super::runtime_types; - pub mod calls { - use super::root_mod; - use super::runtime_types; - type DispatchError = runtime_types::sp_runtime::DispatchError; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct ReportEquivocation { - pub equivocation_proof: ::std::boxed::Box< - runtime_types::sp_consensus_slots::EquivocationProof< - runtime_types::sp_runtime::generic::header::Header< - ::core::primitive::u32, - runtime_types::sp_runtime::traits::BlakeTwo256, - >, - runtime_types::sp_consensus_babe::app::Public, - >, - >, - pub key_owner_proof: runtime_types::sp_session::MembershipProof, - } - impl ::subxt::Call for ReportEquivocation { - const PALLET: &'static str = "Babe"; - const FUNCTION: &'static str = "report_equivocation"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct ReportEquivocationUnsigned { - pub equivocation_proof: ::std::boxed::Box< - runtime_types::sp_consensus_slots::EquivocationProof< - runtime_types::sp_runtime::generic::header::Header< - ::core::primitive::u32, - runtime_types::sp_runtime::traits::BlakeTwo256, - >, - runtime_types::sp_consensus_babe::app::Public, - >, - >, - pub key_owner_proof: runtime_types::sp_session::MembershipProof, - } - impl ::subxt::Call for ReportEquivocationUnsigned { - const PALLET: &'static str = "Babe"; - const FUNCTION: &'static str = "report_equivocation_unsigned"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct PlanConfigChange { - pub config: runtime_types::sp_consensus_babe::digests::NextConfigDescriptor, - } - impl ::subxt::Call for PlanConfigChange { - const PALLET: &'static str = "Babe"; - const FUNCTION: &'static str = "plan_config_change"; - } - pub struct TransactionApi<'a, T: ::subxt::Config, X> { - client: &'a ::subxt::Client, - marker: ::core::marker::PhantomData, - } - impl<'a, T, X> TransactionApi<'a, T, X> - where - T: ::subxt::Config, - X: ::subxt::extrinsic::ExtrinsicParams, - { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { - client, - marker: ::core::marker::PhantomData, - } - } - #[doc = "Report authority equivocation/misbehavior. This method will verify"] - #[doc = "the equivocation proof and validate the given key ownership proof"] - #[doc = "against the extracted offender. If both are valid, the offence will"] - #[doc = "be reported."] - pub fn report_equivocation( - &self, - equivocation_proof: runtime_types::sp_consensus_slots::EquivocationProof< - runtime_types::sp_runtime::generic::header::Header< - ::core::primitive::u32, - runtime_types::sp_runtime::traits::BlakeTwo256, - >, - runtime_types::sp_consensus_babe::app::Public, - >, - key_owner_proof: runtime_types::sp_session::MembershipProof, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - ReportEquivocation, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 232u8, 217u8, 226u8, 60u8, 92u8, 214u8, 213u8, 240u8, 11u8, 38u8, - 145u8, 190u8, 54u8, 204u8, 114u8, 237u8, 217u8, 125u8, 134u8, 160u8, - 46u8, 159u8, 183u8, 227u8, 28u8, 35u8, 223u8, 54u8, 160u8, 75u8, 26u8, - 236u8, - ] - { - let call = ReportEquivocation { - equivocation_proof: ::std::boxed::Box::new(equivocation_proof), - key_owner_proof, - }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Report authority equivocation/misbehavior. This method will verify"] - #[doc = "the equivocation proof and validate the given key ownership proof"] - #[doc = "against the extracted offender. If both are valid, the offence will"] - #[doc = "be reported."] - #[doc = "This extrinsic must be called unsigned and it is expected that only"] - #[doc = "block authors will call it (validated in `ValidateUnsigned`), as such"] - #[doc = "if the block author is defined it will be defined as the equivocation"] - #[doc = "reporter."] - pub fn report_equivocation_unsigned( - &self, - equivocation_proof: runtime_types::sp_consensus_slots::EquivocationProof< - runtime_types::sp_runtime::generic::header::Header< - ::core::primitive::u32, - runtime_types::sp_runtime::traits::BlakeTwo256, - >, - runtime_types::sp_consensus_babe::app::Public, - >, - key_owner_proof: runtime_types::sp_session::MembershipProof, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - ReportEquivocationUnsigned, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .call_hash::()? - == [ - 2u8, 244u8, 199u8, 230u8, 31u8, 46u8, 172u8, 21u8, 66u8, 34u8, 136u8, - 7u8, 58u8, 92u8, 103u8, 61u8, 1u8, 149u8, 243u8, 88u8, 247u8, 209u8, - 37u8, 52u8, 54u8, 100u8, 84u8, 59u8, 77u8, 176u8, 246u8, 246u8, - ] - { - let call = ReportEquivocationUnsigned { - equivocation_proof: ::std::boxed::Box::new(equivocation_proof), - key_owner_proof, - }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Plan an epoch config change. The epoch config change is recorded and will be enacted on"] - #[doc = "the next call to `enact_epoch_change`. The config will be activated one epoch after."] - #[doc = "Multiple calls to this method will replace any existing planned config change that had"] - #[doc = "not been enacted yet."] - pub fn plan_config_change( - &self, - config: runtime_types::sp_consensus_babe::digests::NextConfigDescriptor, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - PlanConfigChange, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 215u8, 121u8, 90u8, 87u8, 178u8, 247u8, 114u8, 53u8, 174u8, 28u8, 20u8, - 33u8, 139u8, 216u8, 13u8, 187u8, 74u8, 198u8, 38u8, 28u8, 175u8, 13u8, - 73u8, 132u8, 103u8, 78u8, 217u8, 207u8, 113u8, 169u8, 42u8, 103u8, - ] - { - let call = PlanConfigChange { config }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - pub mod storage { - use super::runtime_types; - pub struct EpochIndex; - impl ::subxt::StorageEntry for EpochIndex { - const PALLET: &'static str = "Babe"; - const STORAGE: &'static str = "EpochIndex"; - type Value = ::core::primitive::u64; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct Authorities; - impl ::subxt::StorageEntry for Authorities { - const PALLET: &'static str = "Babe"; - const STORAGE: &'static str = "Authorities"; - type Value = - runtime_types::frame_support::storage::weak_bounded_vec::WeakBoundedVec<( - runtime_types::sp_consensus_babe::app::Public, - ::core::primitive::u64, - )>; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct GenesisSlot; - impl ::subxt::StorageEntry for GenesisSlot { - const PALLET: &'static str = "Babe"; - const STORAGE: &'static str = "GenesisSlot"; - type Value = runtime_types::sp_consensus_slots::Slot; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct CurrentSlot; - impl ::subxt::StorageEntry for CurrentSlot { - const PALLET: &'static str = "Babe"; - const STORAGE: &'static str = "CurrentSlot"; - type Value = runtime_types::sp_consensus_slots::Slot; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct Randomness; - impl ::subxt::StorageEntry for Randomness { - const PALLET: &'static str = "Babe"; - const STORAGE: &'static str = "Randomness"; - type Value = [::core::primitive::u8; 32usize]; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct PendingEpochConfigChange; - impl ::subxt::StorageEntry for PendingEpochConfigChange { - const PALLET: &'static str = "Babe"; - const STORAGE: &'static str = "PendingEpochConfigChange"; - type Value = runtime_types::sp_consensus_babe::digests::NextConfigDescriptor; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct NextRandomness; - impl ::subxt::StorageEntry for NextRandomness { - const PALLET: &'static str = "Babe"; - const STORAGE: &'static str = "NextRandomness"; - type Value = [::core::primitive::u8; 32usize]; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct NextAuthorities; - impl ::subxt::StorageEntry for NextAuthorities { - const PALLET: &'static str = "Babe"; - const STORAGE: &'static str = "NextAuthorities"; - type Value = - runtime_types::frame_support::storage::weak_bounded_vec::WeakBoundedVec<( - runtime_types::sp_consensus_babe::app::Public, - ::core::primitive::u64, - )>; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct SegmentIndex; - impl ::subxt::StorageEntry for SegmentIndex { - const PALLET: &'static str = "Babe"; - const STORAGE: &'static str = "SegmentIndex"; - type Value = ::core::primitive::u32; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct UnderConstruction<'a>(pub &'a ::core::primitive::u32); - impl ::subxt::StorageEntry for UnderConstruction<'_> { - const PALLET: &'static str = "Babe"; - const STORAGE: &'static str = "UnderConstruction"; - type Value = runtime_types::frame_support::storage::bounded_vec::BoundedVec< - [::core::primitive::u8; 32usize], - >; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Twox64Concat, - )]) - } - } - pub struct Initialized; - impl ::subxt::StorageEntry for Initialized { - const PALLET: &'static str = "Babe"; - const STORAGE: &'static str = "Initialized"; - type Value = - ::core::option::Option; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct AuthorVrfRandomness; - impl ::subxt::StorageEntry for AuthorVrfRandomness { - const PALLET: &'static str = "Babe"; - const STORAGE: &'static str = "AuthorVrfRandomness"; - type Value = ::core::option::Option<[::core::primitive::u8; 32usize]>; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct EpochStart; - impl ::subxt::StorageEntry for EpochStart { - const PALLET: &'static str = "Babe"; - const STORAGE: &'static str = "EpochStart"; - type Value = (::core::primitive::u32, ::core::primitive::u32); - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct Lateness; - impl ::subxt::StorageEntry for Lateness { - const PALLET: &'static str = "Babe"; - const STORAGE: &'static str = "Lateness"; - type Value = ::core::primitive::u32; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct EpochConfig; - impl ::subxt::StorageEntry for EpochConfig { - const PALLET: &'static str = "Babe"; - const STORAGE: &'static str = "EpochConfig"; - type Value = runtime_types::sp_consensus_babe::BabeEpochConfiguration; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct NextEpochConfig; - impl ::subxt::StorageEntry for NextEpochConfig { - const PALLET: &'static str = "Babe"; - const STORAGE: &'static str = "NextEpochConfig"; - type Value = runtime_types::sp_consensus_babe::BabeEpochConfiguration; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct StorageApi<'a, T: ::subxt::Config> { - client: &'a ::subxt::Client, - } - impl<'a, T: ::subxt::Config> StorageApi<'a, T> { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { client } - } - #[doc = " Current epoch index."] - pub async fn epoch_index( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result<::core::primitive::u64, ::subxt::BasicError> - { - if self.client.metadata().storage_hash::()? - == [ - 51u8, 27u8, 91u8, 156u8, 118u8, 99u8, 46u8, 219u8, 190u8, 147u8, 205u8, - 23u8, 106u8, 169u8, 121u8, 218u8, 208u8, 235u8, 135u8, 127u8, 243u8, - 41u8, 55u8, 243u8, 235u8, 122u8, 57u8, 86u8, 37u8, 90u8, 208u8, 71u8, - ] - { - let entry = EpochIndex; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Current epoch authorities."] - pub async fn authorities( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - runtime_types::frame_support::storage::weak_bounded_vec::WeakBoundedVec<( - runtime_types::sp_consensus_babe::app::Public, - ::core::primitive::u64, - )>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 7u8, 239u8, 126u8, 115u8, 17u8, 7u8, 136u8, 192u8, 194u8, 197u8, 14u8, - 87u8, 34u8, 22u8, 170u8, 159u8, 63u8, 35u8, 206u8, 74u8, 18u8, 243u8, - 242u8, 250u8, 226u8, 214u8, 230u8, 55u8, 169u8, 195u8, 97u8, 88u8, - ] - { - let entry = Authorities; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The slot at which the first epoch actually started. This is 0"] - #[doc = " until the first block of the chain."] - pub async fn genesis_slot( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - runtime_types::sp_consensus_slots::Slot, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 136u8, 244u8, 7u8, 142u8, 224u8, 33u8, 144u8, 186u8, 155u8, 144u8, - 68u8, 81u8, 241u8, 57u8, 40u8, 207u8, 35u8, 39u8, 28u8, 41u8, 210u8, - 213u8, 53u8, 195u8, 175u8, 119u8, 6u8, 175u8, 100u8, 192u8, 180u8, - 73u8, - ] - { - let entry = GenesisSlot; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Current slot number."] - pub async fn current_slot( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - runtime_types::sp_consensus_slots::Slot, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 233u8, 102u8, 77u8, 99u8, 103u8, 50u8, 151u8, 229u8, 46u8, 226u8, - 181u8, 37u8, 117u8, 204u8, 234u8, 120u8, 116u8, 166u8, 80u8, 188u8, - 92u8, 154u8, 137u8, 150u8, 79u8, 164u8, 29u8, 203u8, 2u8, 51u8, 123u8, - 104u8, - ] - { - let entry = CurrentSlot; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The epoch randomness for the *current* epoch."] - #[doc = ""] - #[doc = " # Security"] - #[doc = ""] - #[doc = " This MUST NOT be used for gambling, as it can be influenced by a"] - #[doc = " malicious validator in the short term. It MAY be used in many"] - #[doc = " cryptographic protocols, however, so long as one remembers that this"] - #[doc = " (like everything else on-chain) it is public. For example, it can be"] - #[doc = " used where a number is needed that cannot have been chosen by an"] - #[doc = " adversary, for purposes such as public-coin zero-knowledge proofs."] - pub async fn randomness( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result<[::core::primitive::u8; 32usize], ::subxt::BasicError> - { - if self.client.metadata().storage_hash::()? - == [ - 191u8, 197u8, 25u8, 164u8, 104u8, 248u8, 247u8, 193u8, 244u8, 60u8, - 181u8, 195u8, 248u8, 90u8, 41u8, 199u8, 82u8, 123u8, 72u8, 126u8, 18u8, - 17u8, 128u8, 215u8, 34u8, 251u8, 227u8, 70u8, 166u8, 10u8, 104u8, - 140u8, - ] - { - let entry = Randomness; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Pending epoch configuration change that will be applied when the next epoch is enacted."] - pub async fn pending_epoch_config_change( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option< - runtime_types::sp_consensus_babe::digests::NextConfigDescriptor, - >, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .storage_hash::()? - == [ - 98u8, 52u8, 22u8, 32u8, 76u8, 196u8, 89u8, 78u8, 119u8, 181u8, 17u8, - 49u8, 220u8, 159u8, 195u8, 74u8, 33u8, 59u8, 15u8, 104u8, 26u8, 111u8, - 165u8, 68u8, 147u8, 14u8, 86u8, 94u8, 250u8, 167u8, 146u8, 82u8, - ] - { - let entry = PendingEpochConfigChange; - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Next epoch randomness."] - pub async fn next_randomness( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result<[::core::primitive::u8; 32usize], ::subxt::BasicError> - { - if self.client.metadata().storage_hash::()? - == [ - 185u8, 98u8, 45u8, 109u8, 253u8, 38u8, 238u8, 221u8, 240u8, 29u8, 38u8, - 107u8, 118u8, 117u8, 131u8, 115u8, 21u8, 255u8, 203u8, 81u8, 243u8, - 251u8, 91u8, 60u8, 163u8, 202u8, 125u8, 193u8, 173u8, 234u8, 166u8, - 92u8, - ] - { - let entry = NextRandomness; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Next epoch authorities."] - pub async fn next_authorities( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - runtime_types::frame_support::storage::weak_bounded_vec::WeakBoundedVec<( - runtime_types::sp_consensus_babe::app::Public, - ::core::primitive::u64, - )>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 243u8, 38u8, 95u8, 14u8, 35u8, 130u8, 244u8, 182u8, 82u8, 19u8, 22u8, - 151u8, 193u8, 183u8, 153u8, 206u8, 24u8, 13u8, 166u8, 75u8, 242u8, - 217u8, 247u8, 87u8, 47u8, 58u8, 87u8, 109u8, 83u8, 208u8, 222u8, 241u8, - ] - { - let entry = NextAuthorities; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Randomness under construction."] - #[doc = ""] - #[doc = " We make a trade-off between storage accesses and list length."] - #[doc = " We store the under-construction randomness in segments of up to"] - #[doc = " `UNDER_CONSTRUCTION_SEGMENT_LENGTH`."] - #[doc = ""] - #[doc = " Once a segment reaches this length, we begin the next one."] - #[doc = " We reset all segments and return to `0` at the beginning of every"] - #[doc = " epoch."] - pub async fn segment_index( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> - { - if self.client.metadata().storage_hash::()? - == [ - 128u8, 45u8, 87u8, 58u8, 174u8, 152u8, 241u8, 156u8, 56u8, 192u8, 19u8, - 45u8, 75u8, 160u8, 35u8, 253u8, 145u8, 11u8, 178u8, 81u8, 114u8, 117u8, - 112u8, 107u8, 163u8, 208u8, 240u8, 151u8, 102u8, 176u8, 246u8, 5u8, - ] - { - let entry = SegmentIndex; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " TWOX-NOTE: `SegmentIndex` is an increasing integer, so this is okay."] - pub async fn under_construction( - &self, - _0: &::core::primitive::u32, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - runtime_types::frame_support::storage::bounded_vec::BoundedVec< - [::core::primitive::u8; 32usize], - >, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 12u8, 167u8, 30u8, 96u8, 161u8, 63u8, 210u8, 63u8, 91u8, 199u8, 188u8, - 78u8, 254u8, 255u8, 253u8, 202u8, 203u8, 26u8, 4u8, 105u8, 76u8, 125u8, - 191u8, 245u8, 32u8, 97u8, 127u8, 129u8, 167u8, 80u8, 210u8, 123u8, - ] - { - let entry = UnderConstruction(_0); - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " TWOX-NOTE: `SegmentIndex` is an increasing integer, so this is okay."] - pub async fn under_construction_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::subxt::KeyIter<'a, T, UnderConstruction<'a>>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 12u8, 167u8, 30u8, 96u8, 161u8, 63u8, 210u8, 63u8, 91u8, 199u8, 188u8, - 78u8, 254u8, 255u8, 253u8, 202u8, 203u8, 26u8, 4u8, 105u8, 76u8, 125u8, - 191u8, 245u8, 32u8, 97u8, 127u8, 129u8, 167u8, 80u8, 210u8, 123u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Temporary value (cleared at block finalization) which is `Some`"] - #[doc = " if per-block initialization has already been called for current block."] - pub async fn initialized( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option< - ::core::option::Option< - runtime_types::sp_consensus_babe::digests::PreDigest, - >, - >, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 174u8, 23u8, 254u8, 52u8, 114u8, 235u8, 65u8, 46u8, 39u8, 97u8, 238u8, - 243u8, 237u8, 138u8, 142u8, 85u8, 114u8, 69u8, 58u8, 172u8, 7u8, 238u8, - 110u8, 153u8, 22u8, 122u8, 117u8, 149u8, 113u8, 221u8, 127u8, 225u8, - ] - { - let entry = Initialized; - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " This field should always be populated during block processing unless"] - #[doc = " secondary plain slots are enabled (which don't contain a VRF output)."] - #[doc = ""] - #[doc = " It is set in `on_finalize`, before it will contain the value from the last block."] - pub async fn author_vrf_randomness( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option<[::core::primitive::u8; 32usize]>, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .storage_hash::()? - == [ - 66u8, 235u8, 74u8, 252u8, 222u8, 135u8, 19u8, 28u8, 74u8, 191u8, 170u8, - 197u8, 207u8, 127u8, 77u8, 121u8, 138u8, 138u8, 110u8, 187u8, 34u8, - 14u8, 230u8, 43u8, 241u8, 241u8, 63u8, 163u8, 53u8, 179u8, 250u8, - 247u8, - ] - { - let entry = AuthorVrfRandomness; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The block numbers when the last and current epoch have started, respectively `N-1` and"] - #[doc = " `N`."] - #[doc = " NOTE: We track this is in order to annotate the block number when a given pool of"] - #[doc = " entropy was fixed (i.e. it was known to chain observers). Since epochs are defined in"] - #[doc = " slots, which may be skipped, the block numbers may not line up with the slot numbers."] - pub async fn epoch_start( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - (::core::primitive::u32, ::core::primitive::u32), - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 196u8, 39u8, 241u8, 20u8, 150u8, 180u8, 136u8, 4u8, 195u8, 205u8, - 218u8, 10u8, 130u8, 131u8, 168u8, 243u8, 207u8, 249u8, 58u8, 195u8, - 177u8, 119u8, 110u8, 243u8, 241u8, 3u8, 245u8, 56u8, 157u8, 5u8, 68u8, - 60u8, - ] - { - let entry = EpochStart; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " How late the current block is compared to its parent."] - #[doc = ""] - #[doc = " This entry is populated as part of block execution and is cleaned up"] - #[doc = " on block finalization. Querying this storage entry outside of block"] - #[doc = " execution context should always yield zero."] - pub async fn lateness( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> - { - if self.client.metadata().storage_hash::()? - == [ - 229u8, 230u8, 224u8, 89u8, 49u8, 213u8, 198u8, 236u8, 144u8, 56u8, - 193u8, 234u8, 62u8, 242u8, 191u8, 199u8, 105u8, 131u8, 74u8, 63u8, - 75u8, 1u8, 210u8, 49u8, 3u8, 128u8, 18u8, 77u8, 219u8, 146u8, 60u8, - 88u8, - ] - { - let entry = Lateness; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The configuration for the current epoch. Should never be `None` as it is initialized in"] - #[doc = " genesis."] - pub async fn epoch_config( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option< - runtime_types::sp_consensus_babe::BabeEpochConfiguration, - >, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 169u8, 189u8, 214u8, 159u8, 181u8, 232u8, 243u8, 4u8, 113u8, 24u8, - 221u8, 229u8, 27u8, 35u8, 3u8, 121u8, 136u8, 88u8, 187u8, 193u8, 207u8, - 153u8, 223u8, 225u8, 166u8, 183u8, 53u8, 3u8, 162u8, 207u8, 88u8, - 133u8, - ] - { - let entry = EpochConfig; - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The configuration for the next epoch, `None` if the config will not change"] - #[doc = " (you can fallback to `EpochConfig` instead in that case)."] - pub async fn next_epoch_config( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option< - runtime_types::sp_consensus_babe::BabeEpochConfiguration, - >, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 239u8, 125u8, 203u8, 223u8, 161u8, 107u8, 232u8, 54u8, 158u8, 100u8, - 244u8, 140u8, 119u8, 58u8, 253u8, 245u8, 73u8, 236u8, 50u8, 67u8, - 228u8, 162u8, 166u8, 168u8, 162u8, 152u8, 239u8, 246u8, 153u8, 223u8, - 109u8, 121u8, - ] - { - let entry = NextEpochConfig; - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - pub mod constants { - use super::runtime_types; - pub struct ConstantsApi<'a, T: ::subxt::Config> { - client: &'a ::subxt::Client, - } - impl<'a, T: ::subxt::Config> ConstantsApi<'a, T> { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { client } - } - #[doc = " The amount of time, in slots, that each epoch should last."] - #[doc = " NOTE: Currently it is not possible to change the epoch duration after"] - #[doc = " the chain has started. Attempting to do so will brick block production."] - pub fn epoch_duration( - &self, - ) -> ::core::result::Result<::core::primitive::u64, ::subxt::BasicError> - { - if self - .client - .metadata() - .constant_hash("Babe", "EpochDuration")? - == [ - 59u8, 175u8, 230u8, 66u8, 80u8, 146u8, 114u8, 61u8, 39u8, 30u8, 164u8, - 158u8, 155u8, 71u8, 224u8, 229u8, 68u8, 52u8, 30u8, 195u8, 39u8, 8u8, - 6u8, 196u8, 21u8, 54u8, 163u8, 187u8, 4u8, 42u8, 47u8, 92u8, - ] - { - let pallet = self.client.metadata().pallet("Babe")?; - let constant = pallet.constant("EpochDuration")?; - let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; - Ok(value) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The expected average block time at which BABE should be creating"] - #[doc = " blocks. Since BABE is probabilistic it is not trivial to figure out"] - #[doc = " what the expected average block time should be based on the slot"] - #[doc = " duration and the security parameter `c` (where `1 - c` represents"] - #[doc = " the probability of a slot being empty)."] - pub fn expected_block_time( - &self, - ) -> ::core::result::Result<::core::primitive::u64, ::subxt::BasicError> - { - if self - .client - .metadata() - .constant_hash("Babe", "ExpectedBlockTime")? - == [ - 249u8, 170u8, 37u8, 7u8, 132u8, 115u8, 106u8, 71u8, 116u8, 166u8, 78u8, - 251u8, 242u8, 146u8, 99u8, 207u8, 204u8, 225u8, 157u8, 57u8, 19u8, - 17u8, 202u8, 231u8, 50u8, 67u8, 17u8, 205u8, 238u8, 80u8, 154u8, 125u8, - ] - { - let pallet = self.client.metadata().pallet("Babe")?; - let constant = pallet.constant("ExpectedBlockTime")?; - let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; - Ok(value) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Max number of authorities allowed"] - pub fn max_authorities( - &self, - ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> - { - if self - .client - .metadata() - .constant_hash("Babe", "MaxAuthorities")? - == [ - 248u8, 195u8, 131u8, 166u8, 10u8, 50u8, 71u8, 223u8, 41u8, 49u8, 43u8, - 99u8, 251u8, 113u8, 75u8, 193u8, 159u8, 15u8, 77u8, 217u8, 147u8, - 205u8, 165u8, 50u8, 6u8, 166u8, 77u8, 189u8, 102u8, 22u8, 201u8, 19u8, - ] - { - let pallet = self.client.metadata().pallet("Babe")?; - let constant = pallet.constant("MaxAuthorities")?; - let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; - Ok(value) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - } - pub mod timestamp { - use super::root_mod; - use super::runtime_types; - pub mod calls { - use super::root_mod; - use super::runtime_types; - type DispatchError = runtime_types::sp_runtime::DispatchError; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct Set { - #[codec(compact)] - pub now: ::core::primitive::u64, - } - impl ::subxt::Call for Set { - const PALLET: &'static str = "Timestamp"; - const FUNCTION: &'static str = "set"; - } - pub struct TransactionApi<'a, T: ::subxt::Config, X> { - client: &'a ::subxt::Client, - marker: ::core::marker::PhantomData, - } - impl<'a, T, X> TransactionApi<'a, T, X> - where - T: ::subxt::Config, - X: ::subxt::extrinsic::ExtrinsicParams, - { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { - client, - marker: ::core::marker::PhantomData, - } - } - #[doc = "Set the current time."] - #[doc = ""] - #[doc = "This call should be invoked exactly once per block. It will panic at the finalization"] - #[doc = "phase, if this call hasn't been invoked by that time."] - #[doc = ""] - #[doc = "The timestamp should be greater than the previous one by the amount specified by"] - #[doc = "`MinimumPeriod`."] - #[doc = ""] - #[doc = "The dispatch origin for this call must be `Inherent`."] - #[doc = ""] - #[doc = "# "] - #[doc = "- `O(1)` (Note that implementations of `OnTimestampSet` must also be `O(1)`)"] - #[doc = "- 1 storage read and 1 storage mutation (codec `O(1)`). (because of `DidUpdate::take` in"] - #[doc = " `on_finalize`)"] - #[doc = "- 1 event handler `on_timestamp_set`. Must be `O(1)`."] - #[doc = "# "] - pub fn set( - &self, - now: ::core::primitive::u64, - ) -> Result< - ::subxt::SubmittableExtrinsic<'a, T, X, Set, DispatchError, root_mod::Event>, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 191u8, 73u8, 102u8, 150u8, 65u8, 157u8, 172u8, 194u8, 7u8, 72u8, 1u8, - 35u8, 54u8, 99u8, 245u8, 139u8, 40u8, 136u8, 245u8, 53u8, 167u8, 100u8, - 143u8, 244u8, 160u8, 5u8, 18u8, 130u8, 77u8, 160u8, 227u8, 51u8, - ] - { - let call = Set { now }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - pub mod storage { - use super::runtime_types; - pub struct Now; - impl ::subxt::StorageEntry for Now { - const PALLET: &'static str = "Timestamp"; - const STORAGE: &'static str = "Now"; - type Value = ::core::primitive::u64; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct DidUpdate; - impl ::subxt::StorageEntry for DidUpdate { - const PALLET: &'static str = "Timestamp"; - const STORAGE: &'static str = "DidUpdate"; - type Value = ::core::primitive::bool; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct StorageApi<'a, T: ::subxt::Config> { - client: &'a ::subxt::Client, - } - impl<'a, T: ::subxt::Config> StorageApi<'a, T> { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { client } - } - #[doc = " Current time for the current block."] - pub async fn now( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result<::core::primitive::u64, ::subxt::BasicError> - { - if self.client.metadata().storage_hash::()? - == [ - 148u8, 53u8, 50u8, 54u8, 13u8, 161u8, 57u8, 150u8, 16u8, 83u8, 144u8, - 221u8, 59u8, 75u8, 158u8, 130u8, 39u8, 123u8, 106u8, 134u8, 202u8, - 185u8, 83u8, 85u8, 60u8, 41u8, 120u8, 96u8, 210u8, 34u8, 2u8, 250u8, - ] - { - let entry = Now; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Did the timestamp get updated in this block?"] - pub async fn did_update( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result<::core::primitive::bool, ::subxt::BasicError> - { - if self.client.metadata().storage_hash::()? - == [ - 70u8, 13u8, 92u8, 186u8, 80u8, 151u8, 167u8, 90u8, 158u8, 232u8, 175u8, - 13u8, 103u8, 135u8, 2u8, 78u8, 16u8, 6u8, 39u8, 158u8, 167u8, 85u8, - 27u8, 47u8, 122u8, 73u8, 127u8, 26u8, 35u8, 168u8, 72u8, 204u8, - ] - { - let entry = DidUpdate; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - pub mod constants { - use super::runtime_types; - pub struct ConstantsApi<'a, T: ::subxt::Config> { - client: &'a ::subxt::Client, - } - impl<'a, T: ::subxt::Config> ConstantsApi<'a, T> { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { client } - } - #[doc = " The minimum period between blocks. Beware that this is different to the *expected*"] - #[doc = " period that the block production apparatus provides. Your chosen consensus system will"] - #[doc = " generally work with this to determine a sensible block time. e.g. For Aura, it will be"] - #[doc = " double this period on default settings."] - pub fn minimum_period( - &self, - ) -> ::core::result::Result<::core::primitive::u64, ::subxt::BasicError> - { - if self - .client - .metadata() - .constant_hash("Timestamp", "MinimumPeriod")? - == [ - 141u8, 242u8, 40u8, 24u8, 83u8, 43u8, 33u8, 194u8, 156u8, 149u8, 219u8, - 61u8, 10u8, 123u8, 120u8, 247u8, 228u8, 22u8, 25u8, 24u8, 214u8, 188u8, - 54u8, 135u8, 240u8, 162u8, 41u8, 216u8, 3u8, 58u8, 238u8, 39u8, - ] - { - let pallet = self.client.metadata().pallet("Timestamp")?; - let constant = pallet.constant("MinimumPeriod")?; - let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; - Ok(value) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - } - pub mod indices { - use super::root_mod; - use super::runtime_types; - pub mod calls { - use super::root_mod; - use super::runtime_types; - type DispatchError = runtime_types::sp_runtime::DispatchError; - #[derive( - :: subxt :: codec :: CompactAs, - :: subxt :: codec :: Decode, - :: subxt :: codec :: Encode, - Debug, - )] - pub struct Claim { - pub index: ::core::primitive::u32, - } - impl ::subxt::Call for Claim { - const PALLET: &'static str = "Indices"; - const FUNCTION: &'static str = "claim"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct Transfer { - pub new: ::subxt::sp_core::crypto::AccountId32, - pub index: ::core::primitive::u32, - } - impl ::subxt::Call for Transfer { - const PALLET: &'static str = "Indices"; - const FUNCTION: &'static str = "transfer"; - } - #[derive( - :: subxt :: codec :: CompactAs, - :: subxt :: codec :: Decode, - :: subxt :: codec :: Encode, - Debug, - )] - pub struct Free { - pub index: ::core::primitive::u32, - } - impl ::subxt::Call for Free { - const PALLET: &'static str = "Indices"; - const FUNCTION: &'static str = "free"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct ForceTransfer { - pub new: ::subxt::sp_core::crypto::AccountId32, - pub index: ::core::primitive::u32, - pub freeze: ::core::primitive::bool, - } - impl ::subxt::Call for ForceTransfer { - const PALLET: &'static str = "Indices"; - const FUNCTION: &'static str = "force_transfer"; - } - #[derive( - :: subxt :: codec :: CompactAs, - :: subxt :: codec :: Decode, - :: subxt :: codec :: Encode, - Debug, - )] - pub struct Freeze { - pub index: ::core::primitive::u32, - } - impl ::subxt::Call for Freeze { - const PALLET: &'static str = "Indices"; - const FUNCTION: &'static str = "freeze"; - } - pub struct TransactionApi<'a, T: ::subxt::Config, X> { - client: &'a ::subxt::Client, - marker: ::core::marker::PhantomData, - } - impl<'a, T, X> TransactionApi<'a, T, X> - where - T: ::subxt::Config, - X: ::subxt::extrinsic::ExtrinsicParams, - { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { - client, - marker: ::core::marker::PhantomData, - } - } - #[doc = "Assign an previously unassigned index."] - #[doc = ""] - #[doc = "Payment: `Deposit` is reserved from the sender account."] - #[doc = ""] - #[doc = "The dispatch origin for this call must be _Signed_."] - #[doc = ""] - #[doc = "- `index`: the index to be claimed. This must not be in use."] - #[doc = ""] - #[doc = "Emits `IndexAssigned` if successful."] - #[doc = ""] - #[doc = "# "] - #[doc = "- `O(1)`."] - #[doc = "- One storage mutation (codec `O(1)`)."] - #[doc = "- One reserve operation."] - #[doc = "- One event."] - #[doc = "-------------------"] - #[doc = "- DB Weight: 1 Read/Write (Accounts)"] - #[doc = "# "] - pub fn claim( - &self, - index: ::core::primitive::u32, - ) -> Result< - ::subxt::SubmittableExtrinsic<'a, T, X, Claim, DispatchError, root_mod::Event>, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 27u8, 4u8, 108u8, 55u8, 23u8, 109u8, 175u8, 25u8, 201u8, 230u8, 228u8, - 51u8, 164u8, 15u8, 79u8, 10u8, 219u8, 182u8, 242u8, 102u8, 164u8, - 148u8, 39u8, 91u8, 106u8, 197u8, 29u8, 190u8, 178u8, 221u8, 16u8, 87u8, - ] - { - let call = Claim { index }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Assign an index already owned by the sender to another account. The balance reservation"] - #[doc = "is effectively transferred to the new account."] - #[doc = ""] - #[doc = "The dispatch origin for this call must be _Signed_."] - #[doc = ""] - #[doc = "- `index`: the index to be re-assigned. This must be owned by the sender."] - #[doc = "- `new`: the new owner of the index. This function is a no-op if it is equal to sender."] - #[doc = ""] - #[doc = "Emits `IndexAssigned` if successful."] - #[doc = ""] - #[doc = "# "] - #[doc = "- `O(1)`."] - #[doc = "- One storage mutation (codec `O(1)`)."] - #[doc = "- One transfer operation."] - #[doc = "- One event."] - #[doc = "-------------------"] - #[doc = "- DB Weight:"] - #[doc = " - Reads: Indices Accounts, System Account (recipient)"] - #[doc = " - Writes: Indices Accounts, System Account (recipient)"] - #[doc = "# "] - pub fn transfer( - &self, - new: ::subxt::sp_core::crypto::AccountId32, - index: ::core::primitive::u32, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - Transfer, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 124u8, 83u8, 33u8, 230u8, 23u8, 70u8, 83u8, 59u8, 76u8, 100u8, 219u8, - 100u8, 165u8, 163u8, 102u8, 193u8, 11u8, 22u8, 30u8, 125u8, 114u8, - 28u8, 61u8, 156u8, 38u8, 170u8, 129u8, 74u8, 187u8, 28u8, 33u8, 65u8, - ] - { - let call = Transfer { new, index }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Free up an index owned by the sender."] - #[doc = ""] - #[doc = "Payment: Any previous deposit placed for the index is unreserved in the sender account."] - #[doc = ""] - #[doc = "The dispatch origin for this call must be _Signed_ and the sender must own the index."] - #[doc = ""] - #[doc = "- `index`: the index to be freed. This must be owned by the sender."] - #[doc = ""] - #[doc = "Emits `IndexFreed` if successful."] - #[doc = ""] - #[doc = "# "] - #[doc = "- `O(1)`."] - #[doc = "- One storage mutation (codec `O(1)`)."] - #[doc = "- One reserve operation."] - #[doc = "- One event."] - #[doc = "-------------------"] - #[doc = "- DB Weight: 1 Read/Write (Accounts)"] - #[doc = "# "] - pub fn free( - &self, - index: ::core::primitive::u32, - ) -> Result< - ::subxt::SubmittableExtrinsic<'a, T, X, Free, DispatchError, root_mod::Event>, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 153u8, 143u8, 162u8, 33u8, 229u8, 3u8, 159u8, 153u8, 111u8, 100u8, - 160u8, 250u8, 227u8, 24u8, 157u8, 226u8, 173u8, 39u8, 25u8, 200u8, - 137u8, 147u8, 232u8, 213u8, 182u8, 49u8, 142u8, 250u8, 139u8, 155u8, - 84u8, 214u8, - ] - { - let call = Free { index }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Force an index to an account. This doesn't require a deposit. If the index is already"] - #[doc = "held, then any deposit is reimbursed to its current owner."] - #[doc = ""] - #[doc = "The dispatch origin for this call must be _Root_."] - #[doc = ""] - #[doc = "- `index`: the index to be (re-)assigned."] - #[doc = "- `new`: the new owner of the index. This function is a no-op if it is equal to sender."] - #[doc = "- `freeze`: if set to `true`, will freeze the index so it cannot be transferred."] - #[doc = ""] - #[doc = "Emits `IndexAssigned` if successful."] - #[doc = ""] - #[doc = "# "] - #[doc = "- `O(1)`."] - #[doc = "- One storage mutation (codec `O(1)`)."] - #[doc = "- Up to one reserve operation."] - #[doc = "- One event."] - #[doc = "-------------------"] - #[doc = "- DB Weight:"] - #[doc = " - Reads: Indices Accounts, System Account (original owner)"] - #[doc = " - Writes: Indices Accounts, System Account (original owner)"] - #[doc = "# "] - pub fn force_transfer( - &self, - new: ::subxt::sp_core::crypto::AccountId32, - index: ::core::primitive::u32, - freeze: ::core::primitive::bool, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - ForceTransfer, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 181u8, 143u8, 90u8, 135u8, 132u8, 11u8, 145u8, 85u8, 4u8, 211u8, 56u8, - 110u8, 213u8, 153u8, 224u8, 106u8, 198u8, 250u8, 130u8, 253u8, 72u8, - 58u8, 133u8, 150u8, 102u8, 119u8, 177u8, 175u8, 77u8, 106u8, 253u8, - 99u8, - ] - { - let call = ForceTransfer { new, index, freeze }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Freeze an index so it will always point to the sender account. This consumes the"] - #[doc = "deposit."] - #[doc = ""] - #[doc = "The dispatch origin for this call must be _Signed_ and the signing account must have a"] - #[doc = "non-frozen account `index`."] - #[doc = ""] - #[doc = "- `index`: the index to be frozen in place."] - #[doc = ""] - #[doc = "Emits `IndexFrozen` if successful."] - #[doc = ""] - #[doc = "# "] - #[doc = "- `O(1)`."] - #[doc = "- One storage mutation (codec `O(1)`)."] - #[doc = "- Up to one slash operation."] - #[doc = "- One event."] - #[doc = "-------------------"] - #[doc = "- DB Weight: 1 Read/Write (Accounts)"] - #[doc = "# "] - pub fn freeze( - &self, - index: ::core::primitive::u32, - ) -> Result< - ::subxt::SubmittableExtrinsic<'a, T, X, Freeze, DispatchError, root_mod::Event>, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 204u8, 127u8, 214u8, 137u8, 138u8, 28u8, 171u8, 169u8, 184u8, 164u8, - 235u8, 114u8, 132u8, 176u8, 14u8, 207u8, 72u8, 39u8, 179u8, 231u8, - 137u8, 243u8, 242u8, 57u8, 89u8, 57u8, 213u8, 210u8, 87u8, 12u8, 253u8, - 159u8, - ] - { - let call = Freeze { index }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - pub type Event = runtime_types::pallet_indices::pallet::Event; - pub mod events { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "A account index was assigned."] - pub struct IndexAssigned { - pub who: ::subxt::sp_core::crypto::AccountId32, - pub index: ::core::primitive::u32, - } - impl ::subxt::Event for IndexAssigned { - const PALLET: &'static str = "Indices"; - const EVENT: &'static str = "IndexAssigned"; - } - #[derive( - :: subxt :: codec :: CompactAs, - :: subxt :: codec :: Decode, - :: subxt :: codec :: Encode, - Debug, - )] - #[doc = "A account index has been freed up (unassigned)."] - pub struct IndexFreed { - pub index: ::core::primitive::u32, - } - impl ::subxt::Event for IndexFreed { - const PALLET: &'static str = "Indices"; - const EVENT: &'static str = "IndexFreed"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "A account index has been frozen to its current account ID."] - pub struct IndexFrozen { - pub index: ::core::primitive::u32, - pub who: ::subxt::sp_core::crypto::AccountId32, - } - impl ::subxt::Event for IndexFrozen { - const PALLET: &'static str = "Indices"; - const EVENT: &'static str = "IndexFrozen"; - } - } - pub mod storage { - use super::runtime_types; - pub struct Accounts<'a>(pub &'a ::core::primitive::u32); - impl ::subxt::StorageEntry for Accounts<'_> { - const PALLET: &'static str = "Indices"; - const STORAGE: &'static str = "Accounts"; - type Value = ( - ::subxt::sp_core::crypto::AccountId32, - ::core::primitive::u128, - ::core::primitive::bool, - ); - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Blake2_128Concat, - )]) - } - } - pub struct StorageApi<'a, T: ::subxt::Config> { - client: &'a ::subxt::Client, - } - impl<'a, T: ::subxt::Config> StorageApi<'a, T> { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { client } - } - #[doc = " The lookup from index to account."] - pub async fn accounts( - &self, - _0: &::core::primitive::u32, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option<( - ::subxt::sp_core::crypto::AccountId32, - ::core::primitive::u128, - ::core::primitive::bool, - )>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 105u8, 208u8, 81u8, 30u8, 157u8, 108u8, 22u8, 122u8, 152u8, 220u8, - 40u8, 97u8, 255u8, 166u8, 222u8, 11u8, 81u8, 245u8, 143u8, 79u8, 57u8, - 19u8, 174u8, 164u8, 220u8, 59u8, 77u8, 117u8, 39u8, 72u8, 251u8, 234u8, - ] - { - let entry = Accounts(_0); - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The lookup from index to account."] - pub async fn accounts_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::subxt::KeyIter<'a, T, Accounts<'a>>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 105u8, 208u8, 81u8, 30u8, 157u8, 108u8, 22u8, 122u8, 152u8, 220u8, - 40u8, 97u8, 255u8, 166u8, 222u8, 11u8, 81u8, 245u8, 143u8, 79u8, 57u8, - 19u8, 174u8, 164u8, 220u8, 59u8, 77u8, 117u8, 39u8, 72u8, 251u8, 234u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - pub mod constants { - use super::runtime_types; - pub struct ConstantsApi<'a, T: ::subxt::Config> { - client: &'a ::subxt::Client, - } - impl<'a, T: ::subxt::Config> ConstantsApi<'a, T> { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { client } - } - #[doc = " The deposit needed for reserving an index."] - pub fn deposit( - &self, - ) -> ::core::result::Result<::core::primitive::u128, ::subxt::BasicError> - { - if self.client.metadata().constant_hash("Indices", "Deposit")? - == [ - 217u8, 97u8, 70u8, 109u8, 180u8, 214u8, 183u8, 67u8, 253u8, 148u8, - 245u8, 108u8, 187u8, 95u8, 0u8, 15u8, 167u8, 149u8, 163u8, 194u8, - 206u8, 220u8, 164u8, 101u8, 1u8, 99u8, 206u8, 165u8, 63u8, 141u8, - 109u8, 1u8, - ] - { - let pallet = self.client.metadata().pallet("Indices")?; - let constant = pallet.constant("Deposit")?; - let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; - Ok(value) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - } - pub mod balances { - use super::root_mod; - use super::runtime_types; - pub mod calls { - use super::root_mod; - use super::runtime_types; - type DispatchError = runtime_types::sp_runtime::DispatchError; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct Transfer { - pub dest: - ::subxt::sp_runtime::MultiAddress<::subxt::sp_core::crypto::AccountId32, ()>, - #[codec(compact)] - pub value: ::core::primitive::u128, - } - impl ::subxt::Call for Transfer { - const PALLET: &'static str = "Balances"; - const FUNCTION: &'static str = "transfer"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct SetBalance { - pub who: - ::subxt::sp_runtime::MultiAddress<::subxt::sp_core::crypto::AccountId32, ()>, - #[codec(compact)] - pub new_free: ::core::primitive::u128, - #[codec(compact)] - pub new_reserved: ::core::primitive::u128, - } - impl ::subxt::Call for SetBalance { - const PALLET: &'static str = "Balances"; - const FUNCTION: &'static str = "set_balance"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct ForceTransfer { - pub source: - ::subxt::sp_runtime::MultiAddress<::subxt::sp_core::crypto::AccountId32, ()>, - pub dest: - ::subxt::sp_runtime::MultiAddress<::subxt::sp_core::crypto::AccountId32, ()>, - #[codec(compact)] - pub value: ::core::primitive::u128, - } - impl ::subxt::Call for ForceTransfer { - const PALLET: &'static str = "Balances"; - const FUNCTION: &'static str = "force_transfer"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct TransferKeepAlive { - pub dest: - ::subxt::sp_runtime::MultiAddress<::subxt::sp_core::crypto::AccountId32, ()>, - #[codec(compact)] - pub value: ::core::primitive::u128, - } - impl ::subxt::Call for TransferKeepAlive { - const PALLET: &'static str = "Balances"; - const FUNCTION: &'static str = "transfer_keep_alive"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct TransferAll { - pub dest: - ::subxt::sp_runtime::MultiAddress<::subxt::sp_core::crypto::AccountId32, ()>, - pub keep_alive: ::core::primitive::bool, - } - impl ::subxt::Call for TransferAll { - const PALLET: &'static str = "Balances"; - const FUNCTION: &'static str = "transfer_all"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct ForceUnreserve { - pub who: - ::subxt::sp_runtime::MultiAddress<::subxt::sp_core::crypto::AccountId32, ()>, - pub amount: ::core::primitive::u128, - } - impl ::subxt::Call for ForceUnreserve { - const PALLET: &'static str = "Balances"; - const FUNCTION: &'static str = "force_unreserve"; - } - pub struct TransactionApi<'a, T: ::subxt::Config, X> { - client: &'a ::subxt::Client, - marker: ::core::marker::PhantomData, - } - impl<'a, T, X> TransactionApi<'a, T, X> - where - T: ::subxt::Config, - X: ::subxt::extrinsic::ExtrinsicParams, - { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { - client, - marker: ::core::marker::PhantomData, - } - } - #[doc = "Transfer some liquid free balance to another account."] - #[doc = ""] - #[doc = "`transfer` will set the `FreeBalance` of the sender and receiver."] - #[doc = "If the sender's account is below the existential deposit as a result"] - #[doc = "of the transfer, the account will be reaped."] - #[doc = ""] - #[doc = "The dispatch origin for this call must be `Signed` by the transactor."] - #[doc = ""] - #[doc = "# "] - #[doc = "- Dependent on arguments but not critical, given proper implementations for input config"] - #[doc = " types. See related functions below."] - #[doc = "- It contains a limited number of reads and writes internally and no complex"] - #[doc = " computation."] - #[doc = ""] - #[doc = "Related functions:"] - #[doc = ""] - #[doc = " - `ensure_can_withdraw` is always called internally but has a bounded complexity."] - #[doc = " - Transferring balances to accounts that did not exist before will cause"] - #[doc = " `T::OnNewAccount::on_new_account` to be called."] - #[doc = " - Removing enough funds from an account will trigger `T::DustRemoval::on_unbalanced`."] - #[doc = " - `transfer_keep_alive` works the same way as `transfer`, but has an additional check"] - #[doc = " that the transfer will not kill the origin account."] - #[doc = "---------------------------------"] - #[doc = "- Origin account is already in memory, so no DB operations for them."] - #[doc = "# "] - pub fn transfer( - &self, - dest: ::subxt::sp_runtime::MultiAddress< - ::subxt::sp_core::crypto::AccountId32, - (), - >, - value: ::core::primitive::u128, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - Transfer, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 250u8, 8u8, 164u8, 186u8, 80u8, 220u8, 134u8, 247u8, 142u8, 121u8, - 34u8, 22u8, 169u8, 39u8, 6u8, 93u8, 72u8, 47u8, 44u8, 107u8, 9u8, 98u8, - 203u8, 190u8, 136u8, 55u8, 251u8, 78u8, 216u8, 150u8, 98u8, 118u8, - ] - { - let call = Transfer { dest, value }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Set the balances of a given account."] - #[doc = ""] - #[doc = "This will alter `FreeBalance` and `ReservedBalance` in storage. it will"] - #[doc = "also alter the total issuance of the system (`TotalIssuance`) appropriately."] - #[doc = "If the new free or reserved balance is below the existential deposit,"] - #[doc = "it will reset the account nonce (`frame_system::AccountNonce`)."] - #[doc = ""] - #[doc = "The dispatch origin for this call is `root`."] - pub fn set_balance( - &self, - who: ::subxt::sp_runtime::MultiAddress< - ::subxt::sp_core::crypto::AccountId32, - (), - >, - new_free: ::core::primitive::u128, - new_reserved: ::core::primitive::u128, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SetBalance, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 232u8, 6u8, 27u8, 131u8, 163u8, 72u8, 148u8, 197u8, 14u8, 239u8, 94u8, - 1u8, 32u8, 94u8, 17u8, 14u8, 123u8, 82u8, 39u8, 233u8, 77u8, 20u8, - 40u8, 139u8, 222u8, 137u8, 103u8, 18u8, 126u8, 63u8, 200u8, 149u8, - ] - { - let call = SetBalance { - who, - new_free, - new_reserved, - }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Exactly as `transfer`, except the origin must be root and the source account may be"] - #[doc = "specified."] - #[doc = "# "] - #[doc = "- Same as transfer, but additional read and write because the source account is not"] - #[doc = " assumed to be in the overlay."] - #[doc = "# "] - pub fn force_transfer( - &self, - source: ::subxt::sp_runtime::MultiAddress< - ::subxt::sp_core::crypto::AccountId32, - (), - >, - dest: ::subxt::sp_runtime::MultiAddress< - ::subxt::sp_core::crypto::AccountId32, - (), - >, - value: ::core::primitive::u128, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - ForceTransfer, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 120u8, 66u8, 111u8, 84u8, 176u8, 241u8, 214u8, 118u8, 219u8, 75u8, - 127u8, 222u8, 45u8, 33u8, 204u8, 147u8, 126u8, 214u8, 101u8, 190u8, - 37u8, 37u8, 159u8, 166u8, 61u8, 143u8, 22u8, 32u8, 15u8, 83u8, 221u8, - 230u8, - ] - { - let call = ForceTransfer { - source, - dest, - value, - }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Same as the [`transfer`] call, but with a check that the transfer will not kill the"] - #[doc = "origin account."] - #[doc = ""] - #[doc = "99% of the time you want [`transfer`] instead."] - #[doc = ""] - #[doc = "[`transfer`]: struct.Pallet.html#method.transfer"] - pub fn transfer_keep_alive( - &self, - dest: ::subxt::sp_runtime::MultiAddress< - ::subxt::sp_core::crypto::AccountId32, - (), - >, - value: ::core::primitive::u128, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - TransferKeepAlive, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 111u8, 233u8, 125u8, 71u8, 223u8, 141u8, 112u8, 94u8, 157u8, 11u8, - 88u8, 7u8, 239u8, 145u8, 247u8, 183u8, 245u8, 87u8, 157u8, 35u8, 49u8, - 91u8, 54u8, 103u8, 101u8, 76u8, 110u8, 94u8, 81u8, 170u8, 153u8, 209u8, - ] - { - let call = TransferKeepAlive { dest, value }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Transfer the entire transferable balance from the caller account."] - #[doc = ""] - #[doc = "NOTE: This function only attempts to transfer _transferable_ balances. This means that"] - #[doc = "any locked, reserved, or existential deposits (when `keep_alive` is `true`), will not be"] - #[doc = "transferred by this function. To ensure that this function results in a killed account,"] - #[doc = "you might need to prepare the account by removing any reference counters, storage"] - #[doc = "deposits, etc..."] - #[doc = ""] - #[doc = "The dispatch origin of this call must be Signed."] - #[doc = ""] - #[doc = "- `dest`: The recipient of the transfer."] - #[doc = "- `keep_alive`: A boolean to determine if the `transfer_all` operation should send all"] - #[doc = " of the funds the account has, causing the sender account to be killed (false), or"] - #[doc = " transfer everything except at least the existential deposit, which will guarantee to"] - #[doc = " keep the sender account alive (true). # "] - #[doc = "- O(1). Just like transfer, but reading the user's transferable balance first."] - #[doc = " #"] - pub fn transfer_all( - &self, - dest: ::subxt::sp_runtime::MultiAddress< - ::subxt::sp_core::crypto::AccountId32, - (), - >, - keep_alive: ::core::primitive::bool, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - TransferAll, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 240u8, 165u8, 185u8, 144u8, 24u8, 149u8, 15u8, 46u8, 60u8, 147u8, 19u8, - 187u8, 96u8, 24u8, 150u8, 53u8, 151u8, 232u8, 200u8, 164u8, 176u8, - 167u8, 8u8, 23u8, 63u8, 135u8, 68u8, 110u8, 5u8, 21u8, 35u8, 78u8, - ] - { - let call = TransferAll { dest, keep_alive }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Unreserve some balance from a user by force."] - #[doc = ""] - #[doc = "Can only be called by ROOT."] - pub fn force_unreserve( - &self, - who: ::subxt::sp_runtime::MultiAddress< - ::subxt::sp_core::crypto::AccountId32, - (), - >, - amount: ::core::primitive::u128, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - ForceUnreserve, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 106u8, 42u8, 48u8, 136u8, 41u8, 155u8, 214u8, 112u8, 99u8, 122u8, - 202u8, 250u8, 95u8, 60u8, 182u8, 13u8, 25u8, 149u8, 212u8, 212u8, - 247u8, 191u8, 130u8, 95u8, 84u8, 252u8, 252u8, 197u8, 244u8, 149u8, - 103u8, 67u8, - ] - { - let call = ForceUnreserve { who, amount }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - pub type Event = runtime_types::pallet_balances::pallet::Event; - pub mod events { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "An account was created with some free balance."] - pub struct Endowed { - pub account: ::subxt::sp_core::crypto::AccountId32, - pub free_balance: ::core::primitive::u128, - } - impl ::subxt::Event for Endowed { - const PALLET: &'static str = "Balances"; - const EVENT: &'static str = "Endowed"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "An account was removed whose balance was non-zero but below ExistentialDeposit,"] - #[doc = "resulting in an outright loss."] - pub struct DustLost { - pub account: ::subxt::sp_core::crypto::AccountId32, - pub amount: ::core::primitive::u128, - } - impl ::subxt::Event for DustLost { - const PALLET: &'static str = "Balances"; - const EVENT: &'static str = "DustLost"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "Transfer succeeded."] - pub struct Transfer { - pub from: ::subxt::sp_core::crypto::AccountId32, - pub to: ::subxt::sp_core::crypto::AccountId32, - pub amount: ::core::primitive::u128, - } - impl ::subxt::Event for Transfer { - const PALLET: &'static str = "Balances"; - const EVENT: &'static str = "Transfer"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "A balance was set by root."] - pub struct BalanceSet { - pub who: ::subxt::sp_core::crypto::AccountId32, - pub free: ::core::primitive::u128, - pub reserved: ::core::primitive::u128, - } - impl ::subxt::Event for BalanceSet { - const PALLET: &'static str = "Balances"; - const EVENT: &'static str = "BalanceSet"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "Some balance was reserved (moved from free to reserved)."] - pub struct Reserved { - pub who: ::subxt::sp_core::crypto::AccountId32, - pub amount: ::core::primitive::u128, - } - impl ::subxt::Event for Reserved { - const PALLET: &'static str = "Balances"; - const EVENT: &'static str = "Reserved"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "Some balance was unreserved (moved from reserved to free)."] - pub struct Unreserved { - pub who: ::subxt::sp_core::crypto::AccountId32, - pub amount: ::core::primitive::u128, - } - impl ::subxt::Event for Unreserved { - const PALLET: &'static str = "Balances"; - const EVENT: &'static str = "Unreserved"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "Some balance was moved from the reserve of the first account to the second account."] - #[doc = "Final argument indicates the destination balance type."] - pub struct ReserveRepatriated { - pub from: ::subxt::sp_core::crypto::AccountId32, - pub to: ::subxt::sp_core::crypto::AccountId32, - pub amount: ::core::primitive::u128, - pub destination_status: - runtime_types::frame_support::traits::tokens::misc::BalanceStatus, - } - impl ::subxt::Event for ReserveRepatriated { - const PALLET: &'static str = "Balances"; - const EVENT: &'static str = "ReserveRepatriated"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "Some amount was deposited (e.g. for transaction fees)."] - pub struct Deposit { - pub who: ::subxt::sp_core::crypto::AccountId32, - pub amount: ::core::primitive::u128, - } - impl ::subxt::Event for Deposit { - const PALLET: &'static str = "Balances"; - const EVENT: &'static str = "Deposit"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "Some amount was withdrawn from the account (e.g. for transaction fees)."] - pub struct Withdraw { - pub who: ::subxt::sp_core::crypto::AccountId32, - pub amount: ::core::primitive::u128, - } - impl ::subxt::Event for Withdraw { - const PALLET: &'static str = "Balances"; - const EVENT: &'static str = "Withdraw"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "Some amount was removed from the account (e.g. for misbehavior)."] - pub struct Slashed { - pub who: ::subxt::sp_core::crypto::AccountId32, - pub amount: ::core::primitive::u128, - } - impl ::subxt::Event for Slashed { - const PALLET: &'static str = "Balances"; - const EVENT: &'static str = "Slashed"; - } - } - pub mod storage { - use super::runtime_types; - pub struct TotalIssuance; - impl ::subxt::StorageEntry for TotalIssuance { - const PALLET: &'static str = "Balances"; - const STORAGE: &'static str = "TotalIssuance"; - type Value = ::core::primitive::u128; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct Account<'a>(pub &'a ::subxt::sp_core::crypto::AccountId32); - impl ::subxt::StorageEntry for Account<'_> { - const PALLET: &'static str = "Balances"; - const STORAGE: &'static str = "Account"; - type Value = runtime_types::pallet_balances::AccountData<::core::primitive::u128>; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Blake2_128Concat, - )]) - } - } - pub struct Locks<'a>(pub &'a ::subxt::sp_core::crypto::AccountId32); - impl ::subxt::StorageEntry for Locks<'_> { - const PALLET: &'static str = "Balances"; - const STORAGE: &'static str = "Locks"; - type Value = - runtime_types::frame_support::storage::weak_bounded_vec::WeakBoundedVec< - runtime_types::pallet_balances::BalanceLock<::core::primitive::u128>, - >; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Blake2_128Concat, - )]) - } - } - pub struct Reserves<'a>(pub &'a ::subxt::sp_core::crypto::AccountId32); - impl ::subxt::StorageEntry for Reserves<'_> { - const PALLET: &'static str = "Balances"; - const STORAGE: &'static str = "Reserves"; - type Value = runtime_types::frame_support::storage::bounded_vec::BoundedVec< - runtime_types::pallet_balances::ReserveData< - [::core::primitive::u8; 8usize], - ::core::primitive::u128, - >, - >; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Blake2_128Concat, - )]) - } - } - pub struct StorageVersion; - impl ::subxt::StorageEntry for StorageVersion { - const PALLET: &'static str = "Balances"; - const STORAGE: &'static str = "StorageVersion"; - type Value = runtime_types::pallet_balances::Releases; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct StorageApi<'a, T: ::subxt::Config> { - client: &'a ::subxt::Client, - } - impl<'a, T: ::subxt::Config> StorageApi<'a, T> { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { client } - } - #[doc = " The total units issued in the system."] - pub async fn total_issuance( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result<::core::primitive::u128, ::subxt::BasicError> - { - if self.client.metadata().storage_hash::()? - == [ - 1u8, 206u8, 252u8, 237u8, 6u8, 30u8, 20u8, 232u8, 164u8, 115u8, 51u8, - 156u8, 156u8, 206u8, 241u8, 187u8, 44u8, 84u8, 25u8, 164u8, 235u8, - 20u8, 86u8, 242u8, 124u8, 23u8, 28u8, 140u8, 26u8, 73u8, 231u8, 51u8, - ] - { - let entry = TotalIssuance; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The Balances pallet example of storing the balance of an account."] - #[doc = ""] - #[doc = " # Example"] - #[doc = ""] - #[doc = " ```nocompile"] - #[doc = " impl pallet_balances::Config for Runtime {"] - #[doc = " type AccountStore = StorageMapShim, frame_system::Provider, AccountId, Self::AccountData>"] - #[doc = " }"] - #[doc = " ```"] - #[doc = ""] - #[doc = " You can also store the balance of an account in the `System` pallet."] - #[doc = ""] - #[doc = " # Example"] - #[doc = ""] - #[doc = " ```nocompile"] - #[doc = " impl pallet_balances::Config for Runtime {"] - #[doc = " type AccountStore = System"] - #[doc = " }"] - #[doc = " ```"] - #[doc = ""] - #[doc = " But this comes with tradeoffs, storing account balances in the system pallet stores"] - #[doc = " `frame_system` data alongside the account data contrary to storing account balances in the"] - #[doc = " `Balances` pallet, which uses a `StorageMap` to store balances data only."] - #[doc = " NOTE: This is only used in the case that this pallet is used to store balances."] - pub async fn account( - &self, - _0: &::subxt::sp_core::crypto::AccountId32, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - runtime_types::pallet_balances::AccountData<::core::primitive::u128>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 129u8, 169u8, 171u8, 206u8, 229u8, 178u8, 69u8, 118u8, 199u8, 64u8, - 254u8, 67u8, 16u8, 154u8, 160u8, 197u8, 177u8, 161u8, 148u8, 199u8, - 78u8, 219u8, 187u8, 83u8, 99u8, 110u8, 207u8, 252u8, 243u8, 39u8, 46u8, - 106u8, - ] - { - let entry = Account(_0); - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The Balances pallet example of storing the balance of an account."] - #[doc = ""] - #[doc = " # Example"] - #[doc = ""] - #[doc = " ```nocompile"] - #[doc = " impl pallet_balances::Config for Runtime {"] - #[doc = " type AccountStore = StorageMapShim, frame_system::Provider, AccountId, Self::AccountData>"] - #[doc = " }"] - #[doc = " ```"] - #[doc = ""] - #[doc = " You can also store the balance of an account in the `System` pallet."] - #[doc = ""] - #[doc = " # Example"] - #[doc = ""] - #[doc = " ```nocompile"] - #[doc = " impl pallet_balances::Config for Runtime {"] - #[doc = " type AccountStore = System"] - #[doc = " }"] - #[doc = " ```"] - #[doc = ""] - #[doc = " But this comes with tradeoffs, storing account balances in the system pallet stores"] - #[doc = " `frame_system` data alongside the account data contrary to storing account balances in the"] - #[doc = " `Balances` pallet, which uses a `StorageMap` to store balances data only."] - #[doc = " NOTE: This is only used in the case that this pallet is used to store balances."] - pub async fn account_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result<::subxt::KeyIter<'a, T, Account<'a>>, ::subxt::BasicError> - { - if self.client.metadata().storage_hash::()? - == [ - 129u8, 169u8, 171u8, 206u8, 229u8, 178u8, 69u8, 118u8, 199u8, 64u8, - 254u8, 67u8, 16u8, 154u8, 160u8, 197u8, 177u8, 161u8, 148u8, 199u8, - 78u8, 219u8, 187u8, 83u8, 99u8, 110u8, 207u8, 252u8, 243u8, 39u8, 46u8, - 106u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Any liquidity locks on some account balances."] - #[doc = " NOTE: Should only be accessed when setting, changing and freeing a lock."] - pub async fn locks( - &self, - _0: &::subxt::sp_core::crypto::AccountId32, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - runtime_types::frame_support::storage::weak_bounded_vec::WeakBoundedVec< - runtime_types::pallet_balances::BalanceLock<::core::primitive::u128>, - >, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 31u8, 76u8, 213u8, 60u8, 86u8, 11u8, 155u8, 151u8, 33u8, 212u8, 74u8, - 89u8, 174u8, 74u8, 195u8, 107u8, 29u8, 163u8, 178u8, 34u8, 209u8, 8u8, - 201u8, 237u8, 77u8, 99u8, 205u8, 212u8, 236u8, 132u8, 2u8, 252u8, - ] - { - let entry = Locks(_0); - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Any liquidity locks on some account balances."] - #[doc = " NOTE: Should only be accessed when setting, changing and freeing a lock."] - pub async fn locks_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result<::subxt::KeyIter<'a, T, Locks<'a>>, ::subxt::BasicError> - { - if self.client.metadata().storage_hash::()? - == [ - 31u8, 76u8, 213u8, 60u8, 86u8, 11u8, 155u8, 151u8, 33u8, 212u8, 74u8, - 89u8, 174u8, 74u8, 195u8, 107u8, 29u8, 163u8, 178u8, 34u8, 209u8, 8u8, - 201u8, 237u8, 77u8, 99u8, 205u8, 212u8, 236u8, 132u8, 2u8, 252u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Named reserves on some account balances."] - pub async fn reserves( - &self, - _0: &::subxt::sp_core::crypto::AccountId32, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - runtime_types::frame_support::storage::bounded_vec::BoundedVec< - runtime_types::pallet_balances::ReserveData< - [::core::primitive::u8; 8usize], - ::core::primitive::u128, - >, - >, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 103u8, 6u8, 69u8, 151u8, 81u8, 40u8, 146u8, 113u8, 56u8, 239u8, 104u8, - 31u8, 168u8, 242u8, 141u8, 121u8, 213u8, 213u8, 114u8, 63u8, 62u8, - 47u8, 91u8, 119u8, 57u8, 91u8, 95u8, 81u8, 19u8, 208u8, 59u8, 146u8, - ] - { - let entry = Reserves(_0); - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Named reserves on some account balances."] - pub async fn reserves_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::subxt::KeyIter<'a, T, Reserves<'a>>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 103u8, 6u8, 69u8, 151u8, 81u8, 40u8, 146u8, 113u8, 56u8, 239u8, 104u8, - 31u8, 168u8, 242u8, 141u8, 121u8, 213u8, 213u8, 114u8, 63u8, 62u8, - 47u8, 91u8, 119u8, 57u8, 91u8, 95u8, 81u8, 19u8, 208u8, 59u8, 146u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Storage version of the pallet."] - #[doc = ""] - #[doc = " This is set to v2.0.0 for new networks."] - pub async fn storage_version( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - runtime_types::pallet_balances::Releases, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 135u8, 96u8, 28u8, 234u8, 124u8, 212u8, 56u8, 140u8, 40u8, 101u8, - 235u8, 128u8, 136u8, 221u8, 182u8, 81u8, 17u8, 9u8, 184u8, 228u8, - 174u8, 165u8, 200u8, 162u8, 214u8, 178u8, 227u8, 72u8, 34u8, 5u8, - 173u8, 96u8, - ] - { - let entry = StorageVersion; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - pub mod constants { - use super::runtime_types; - pub struct ConstantsApi<'a, T: ::subxt::Config> { - client: &'a ::subxt::Client, - } - impl<'a, T: ::subxt::Config> ConstantsApi<'a, T> { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { client } - } - #[doc = " The minimum amount required to keep an account open."] - pub fn existential_deposit( - &self, - ) -> ::core::result::Result<::core::primitive::u128, ::subxt::BasicError> - { - if self - .client - .metadata() - .constant_hash("Balances", "ExistentialDeposit")? - == [ - 100u8, 197u8, 144u8, 241u8, 166u8, 142u8, 204u8, 246u8, 114u8, 229u8, - 145u8, 5u8, 133u8, 180u8, 23u8, 117u8, 117u8, 204u8, 228u8, 32u8, 70u8, - 243u8, 110u8, 36u8, 218u8, 106u8, 47u8, 136u8, 193u8, 46u8, 121u8, - 242u8, - ] - { - let pallet = self.client.metadata().pallet("Balances")?; - let constant = pallet.constant("ExistentialDeposit")?; - let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; - Ok(value) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The maximum number of locks that should exist on an account."] - #[doc = " Not strictly enforced, but used for weight estimation."] - pub fn max_locks( - &self, - ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> - { - if self - .client - .metadata() - .constant_hash("Balances", "MaxLocks")? - == [ - 250u8, 58u8, 19u8, 15u8, 35u8, 113u8, 227u8, 89u8, 39u8, 75u8, 21u8, - 108u8, 202u8, 32u8, 163u8, 167u8, 207u8, 233u8, 69u8, 151u8, 53u8, - 164u8, 230u8, 16u8, 14u8, 22u8, 172u8, 46u8, 36u8, 216u8, 29u8, 1u8, - ] - { - let pallet = self.client.metadata().pallet("Balances")?; - let constant = pallet.constant("MaxLocks")?; - let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; - Ok(value) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The maximum number of named reserves that can exist on an account."] - pub fn max_reserves( - &self, - ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> - { - if self - .client - .metadata() - .constant_hash("Balances", "MaxReserves")? - == [ - 24u8, 30u8, 77u8, 89u8, 216u8, 114u8, 140u8, 11u8, 127u8, 252u8, 130u8, - 203u8, 4u8, 55u8, 62u8, 240u8, 65u8, 182u8, 187u8, 189u8, 140u8, 6u8, - 177u8, 216u8, 159u8, 108u8, 18u8, 73u8, 95u8, 67u8, 62u8, 50u8, - ] - { - let pallet = self.client.metadata().pallet("Balances")?; - let constant = pallet.constant("MaxReserves")?; - let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; - Ok(value) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - } - pub mod transaction_payment { - use super::root_mod; - use super::runtime_types; - pub mod storage { - use super::runtime_types; - pub struct NextFeeMultiplier; - impl ::subxt::StorageEntry for NextFeeMultiplier { - const PALLET: &'static str = "TransactionPayment"; - const STORAGE: &'static str = "NextFeeMultiplier"; - type Value = runtime_types::sp_arithmetic::fixed_point::FixedU128; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct StorageVersion; - impl ::subxt::StorageEntry for StorageVersion { - const PALLET: &'static str = "TransactionPayment"; - const STORAGE: &'static str = "StorageVersion"; - type Value = runtime_types::pallet_transaction_payment::Releases; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct StorageApi<'a, T: ::subxt::Config> { - client: &'a ::subxt::Client, - } - impl<'a, T: ::subxt::Config> StorageApi<'a, T> { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { client } - } - pub async fn next_fee_multiplier( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - runtime_types::sp_arithmetic::fixed_point::FixedU128, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 88u8, 50u8, 72u8, 23u8, 241u8, 137u8, 135u8, 135u8, 33u8, 57u8, 241u8, - 247u8, 212u8, 19u8, 116u8, 144u8, 60u8, 2u8, 6u8, 191u8, 190u8, 96u8, - 133u8, 199u8, 29u8, 132u8, 49u8, 121u8, 73u8, 116u8, 104u8, 141u8, - ] - { - let entry = NextFeeMultiplier; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - pub async fn storage_version( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - runtime_types::pallet_transaction_payment::Releases, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 219u8, 243u8, 82u8, 176u8, 65u8, 5u8, 132u8, 114u8, 8u8, 82u8, 176u8, - 200u8, 97u8, 150u8, 177u8, 164u8, 166u8, 11u8, 34u8, 12u8, 12u8, 198u8, - 58u8, 191u8, 186u8, 221u8, 221u8, 119u8, 181u8, 253u8, 154u8, 228u8, - ] - { - let entry = StorageVersion; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - pub mod constants { - use super::runtime_types; - pub struct ConstantsApi<'a, T: ::subxt::Config> { - client: &'a ::subxt::Client, - } - impl<'a, T: ::subxt::Config> ConstantsApi<'a, T> { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { client } - } - #[doc = " A fee mulitplier for `Operational` extrinsics to compute \"virtual tip\" to boost their"] - #[doc = " `priority`"] - #[doc = ""] - #[doc = " This value is multipled by the `final_fee` to obtain a \"virtual tip\" that is later"] - #[doc = " added to a tip component in regular `priority` calculations."] - #[doc = " It means that a `Normal` transaction can front-run a similarly-sized `Operational`"] - #[doc = " extrinsic (with no tip), by including a tip value greater than the virtual tip."] - #[doc = ""] - #[doc = " ```rust,ignore"] - #[doc = " // For `Normal`"] - #[doc = " let priority = priority_calc(tip);"] - #[doc = ""] - #[doc = " // For `Operational`"] - #[doc = " let virtual_tip = (inclusion_fee + tip) * OperationalFeeMultiplier;"] - #[doc = " let priority = priority_calc(tip + virtual_tip);"] - #[doc = " ```"] - #[doc = ""] - #[doc = " Note that since we use `final_fee` the multiplier applies also to the regular `tip`"] - #[doc = " sent with the transaction. So, not only does the transaction get a priority bump based"] - #[doc = " on the `inclusion_fee`, but we also amplify the impact of tips applied to `Operational`"] - #[doc = " transactions."] - pub fn operational_fee_multiplier( - &self, - ) -> ::core::result::Result<::core::primitive::u8, ::subxt::BasicError> - { - if self - .client - .metadata() - .constant_hash("TransactionPayment", "OperationalFeeMultiplier")? - == [ - 161u8, 232u8, 150u8, 43u8, 106u8, 83u8, 56u8, 248u8, 54u8, 123u8, - 244u8, 73u8, 5u8, 49u8, 245u8, 150u8, 70u8, 92u8, 158u8, 207u8, 127u8, - 115u8, 211u8, 21u8, 24u8, 136u8, 89u8, 44u8, 151u8, 211u8, 235u8, - 196u8, - ] - { - let pallet = self.client.metadata().pallet("TransactionPayment")?; - let constant = pallet.constant("OperationalFeeMultiplier")?; - let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; - Ok(value) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The polynomial that is applied in order to derive fee from weight."] - pub fn weight_to_fee( - &self, - ) -> ::core::result::Result< - ::std::vec::Vec< - runtime_types::frame_support::weights::WeightToFeeCoefficient< - ::core::primitive::u128, - >, - >, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .constant_hash("TransactionPayment", "WeightToFee")? - == [ - 45u8, 79u8, 182u8, 151u8, 56u8, 94u8, 151u8, 17u8, 186u8, 52u8, 33u8, - 209u8, 168u8, 84u8, 55u8, 203u8, 54u8, 162u8, 132u8, 64u8, 111u8, - 141u8, 19u8, 218u8, 142u8, 4u8, 246u8, 166u8, 126u8, 79u8, 11u8, 132u8, - ] - { - let pallet = self.client.metadata().pallet("TransactionPayment")?; - let constant = pallet.constant("WeightToFee")?; - let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; - Ok(value) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The polynomial that is applied in order to derive fee from length."] - pub fn length_to_fee( - &self, - ) -> ::core::result::Result< - ::std::vec::Vec< - runtime_types::frame_support::weights::WeightToFeeCoefficient< - ::core::primitive::u128, - >, - >, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .constant_hash("TransactionPayment", "LengthToFee")? - == [ - 247u8, 235u8, 15u8, 82u8, 189u8, 42u8, 103u8, 179u8, 146u8, 133u8, - 145u8, 191u8, 59u8, 45u8, 132u8, 195u8, 181u8, 238u8, 176u8, 137u8, - 82u8, 126u8, 92u8, 175u8, 9u8, 189u8, 137u8, 94u8, 165u8, 150u8, 25u8, - 81u8, - ] - { - let pallet = self.client.metadata().pallet("TransactionPayment")?; - let constant = pallet.constant("LengthToFee")?; - let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; - Ok(value) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - } - pub mod authorship { - use super::root_mod; - use super::runtime_types; - pub mod calls { - use super::root_mod; - use super::runtime_types; - type DispatchError = runtime_types::sp_runtime::DispatchError; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct SetUncles { - pub new_uncles: ::std::vec::Vec< - runtime_types::sp_runtime::generic::header::Header< - ::core::primitive::u32, - runtime_types::sp_runtime::traits::BlakeTwo256, - >, - >, - } - impl ::subxt::Call for SetUncles { - const PALLET: &'static str = "Authorship"; - const FUNCTION: &'static str = "set_uncles"; - } - pub struct TransactionApi<'a, T: ::subxt::Config, X> { - client: &'a ::subxt::Client, - marker: ::core::marker::PhantomData, - } - impl<'a, T, X> TransactionApi<'a, T, X> - where - T: ::subxt::Config, - X: ::subxt::extrinsic::ExtrinsicParams, - { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { - client, - marker: ::core::marker::PhantomData, - } - } - #[doc = "Provide a set of uncles."] - pub fn set_uncles( - &self, - new_uncles: ::std::vec::Vec< - runtime_types::sp_runtime::generic::header::Header< - ::core::primitive::u32, - runtime_types::sp_runtime::traits::BlakeTwo256, - >, - >, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SetUncles, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 77u8, 73u8, 220u8, 106u8, 126u8, 48u8, 20u8, 254u8, 87u8, 185u8, 110u8, - 253u8, 250u8, 10u8, 89u8, 77u8, 72u8, 90u8, 244u8, 27u8, 125u8, 43u8, - 58u8, 217u8, 112u8, 98u8, 233u8, 35u8, 194u8, 214u8, 183u8, 36u8, - ] - { - let call = SetUncles { new_uncles }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - pub mod storage { - use super::runtime_types; - pub struct Uncles; - impl ::subxt::StorageEntry for Uncles { - const PALLET: &'static str = "Authorship"; - const STORAGE: &'static str = "Uncles"; - type Value = ::std::vec::Vec< - runtime_types::pallet_authorship::UncleEntryItem< - ::core::primitive::u32, - ::subxt::sp_core::H256, - ::subxt::sp_core::crypto::AccountId32, - >, - >; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct Author; - impl ::subxt::StorageEntry for Author { - const PALLET: &'static str = "Authorship"; - const STORAGE: &'static str = "Author"; - type Value = ::subxt::sp_core::crypto::AccountId32; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct DidSetUncles; - impl ::subxt::StorageEntry for DidSetUncles { - const PALLET: &'static str = "Authorship"; - const STORAGE: &'static str = "DidSetUncles"; - type Value = ::core::primitive::bool; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct StorageApi<'a, T: ::subxt::Config> { - client: &'a ::subxt::Client, - } - impl<'a, T: ::subxt::Config> StorageApi<'a, T> { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { client } - } - #[doc = " Uncles"] - pub async fn uncles( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::std::vec::Vec< - runtime_types::pallet_authorship::UncleEntryItem< - ::core::primitive::u32, - ::subxt::sp_core::H256, - ::subxt::sp_core::crypto::AccountId32, - >, - >, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 234u8, 33u8, 171u8, 180u8, 154u8, 148u8, 191u8, 3u8, 191u8, 250u8, - 235u8, 39u8, 70u8, 41u8, 146u8, 155u8, 118u8, 154u8, 122u8, 27u8, - 126u8, 251u8, 2u8, 157u8, 187u8, 222u8, 120u8, 240u8, 21u8, 45u8, - 222u8, 13u8, - ] - { - let entry = Uncles; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Author of current block."] - pub async fn author( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option<::subxt::sp_core::crypto::AccountId32>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 191u8, 57u8, 3u8, 242u8, 220u8, 123u8, 103u8, 215u8, 149u8, 120u8, - 20u8, 139u8, 146u8, 234u8, 180u8, 105u8, 129u8, 128u8, 114u8, 147u8, - 114u8, 236u8, 23u8, 21u8, 15u8, 250u8, 180u8, 19u8, 177u8, 145u8, 77u8, - 228u8, - ] - { - let entry = Author; - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Whether uncles were already set in this block."] - pub async fn did_set_uncles( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result<::core::primitive::bool, ::subxt::BasicError> - { - if self.client.metadata().storage_hash::()? - == [ - 64u8, 3u8, 208u8, 187u8, 50u8, 45u8, 37u8, 88u8, 163u8, 226u8, 37u8, - 126u8, 232u8, 107u8, 156u8, 187u8, 29u8, 15u8, 53u8, 46u8, 28u8, 73u8, - 83u8, 123u8, 14u8, 244u8, 243u8, 43u8, 245u8, 143u8, 15u8, 115u8, - ] - { - let entry = DidSetUncles; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - pub mod constants { - use super::runtime_types; - pub struct ConstantsApi<'a, T: ::subxt::Config> { - client: &'a ::subxt::Client, - } - impl<'a, T: ::subxt::Config> ConstantsApi<'a, T> { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { client } - } - #[doc = " The number of blocks back we should accept uncles."] - #[doc = " This means that we will deal with uncle-parents that are"] - #[doc = " `UncleGenerations + 1` before `now`."] - pub fn uncle_generations( - &self, - ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> - { - if self - .client - .metadata() - .constant_hash("Authorship", "UncleGenerations")? - == [ - 0u8, 72u8, 57u8, 175u8, 222u8, 143u8, 191u8, 33u8, 163u8, 157u8, 202u8, - 83u8, 186u8, 103u8, 162u8, 103u8, 227u8, 158u8, 239u8, 212u8, 205u8, - 193u8, 226u8, 138u8, 5u8, 220u8, 221u8, 42u8, 7u8, 146u8, 173u8, 205u8, - ] - { - let pallet = self.client.metadata().pallet("Authorship")?; - let constant = pallet.constant("UncleGenerations")?; - let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; - Ok(value) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - } - pub mod offences { - use super::root_mod; - use super::runtime_types; - pub type Event = runtime_types::pallet_offences::pallet::Event; - pub mod events { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "There is an offence reported of the given `kind` happened at the `session_index` and"] - #[doc = "(kind-specific) time slot. This event is not deposited for duplicate slashes."] - #[doc = "\\[kind, timeslot\\]."] - pub struct Offence { - pub kind: [::core::primitive::u8; 16usize], - pub timeslot: ::std::vec::Vec<::core::primitive::u8>, - } - impl ::subxt::Event for Offence { - const PALLET: &'static str = "Offences"; - const EVENT: &'static str = "Offence"; - } - } - pub mod storage { - use super::runtime_types; - pub struct Reports<'a>(pub &'a ::subxt::sp_core::H256); - impl ::subxt::StorageEntry for Reports<'_> { - const PALLET: &'static str = "Offences"; - const STORAGE: &'static str = "Reports"; - type Value = runtime_types::sp_staking::offence::OffenceDetails< - ::subxt::sp_core::crypto::AccountId32, - (::subxt::sp_core::crypto::AccountId32, ()), - >; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Twox64Concat, - )]) - } - } - pub struct ConcurrentReportsIndex<'a>( - pub &'a [::core::primitive::u8; 16usize], - pub &'a [::core::primitive::u8], - ); - impl ::subxt::StorageEntry for ConcurrentReportsIndex<'_> { - const PALLET: &'static str = "Offences"; - const STORAGE: &'static str = "ConcurrentReportsIndex"; - type Value = ::std::vec::Vec<::subxt::sp_core::H256>; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![ - ::subxt::StorageMapKey::new(&self.0, ::subxt::StorageHasher::Twox64Concat), - ::subxt::StorageMapKey::new(&self.1, ::subxt::StorageHasher::Twox64Concat), - ]) - } - } - pub struct ReportsByKindIndex<'a>(pub &'a [::core::primitive::u8; 16usize]); - impl ::subxt::StorageEntry for ReportsByKindIndex<'_> { - const PALLET: &'static str = "Offences"; - const STORAGE: &'static str = "ReportsByKindIndex"; - type Value = ::std::vec::Vec<::core::primitive::u8>; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Twox64Concat, - )]) - } - } - pub struct StorageApi<'a, T: ::subxt::Config> { - client: &'a ::subxt::Client, - } - impl<'a, T: ::subxt::Config> StorageApi<'a, T> { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { client } - } - #[doc = " The primary structure that holds all offence records keyed by report identifiers."] - pub async fn reports( - &self, - _0: &::subxt::sp_core::H256, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option< - runtime_types::sp_staking::offence::OffenceDetails< - ::subxt::sp_core::crypto::AccountId32, - (::subxt::sp_core::crypto::AccountId32, ()), - >, - >, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 232u8, 29u8, 235u8, 182u8, 72u8, 179u8, 168u8, 231u8, 177u8, 122u8, - 225u8, 193u8, 172u8, 163u8, 228u8, 219u8, 59u8, 210u8, 1u8, 11u8, - 181u8, 218u8, 26u8, 187u8, 176u8, 101u8, 212u8, 178u8, 70u8, 229u8, - 85u8, 205u8, - ] - { - let entry = Reports(_0); - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The primary structure that holds all offence records keyed by report identifiers."] - pub async fn reports_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result<::subxt::KeyIter<'a, T, Reports<'a>>, ::subxt::BasicError> - { - if self.client.metadata().storage_hash::()? - == [ - 232u8, 29u8, 235u8, 182u8, 72u8, 179u8, 168u8, 231u8, 177u8, 122u8, - 225u8, 193u8, 172u8, 163u8, 228u8, 219u8, 59u8, 210u8, 1u8, 11u8, - 181u8, 218u8, 26u8, 187u8, 176u8, 101u8, 212u8, 178u8, 70u8, 229u8, - 85u8, 205u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " A vector of reports of the same kind that happened at the same time slot."] - pub async fn concurrent_reports_index( - &self, - _0: &[::core::primitive::u8; 16usize], - _1: &[::core::primitive::u8], - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::std::vec::Vec<::subxt::sp_core::H256>, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .storage_hash::()? - == [ - 110u8, 182u8, 108u8, 15u8, 22u8, 226u8, 241u8, 98u8, 191u8, 37u8, - 135u8, 119u8, 88u8, 238u8, 202u8, 216u8, 221u8, 165u8, 144u8, 236u8, - 113u8, 49u8, 55u8, 18u8, 238u8, 238u8, 128u8, 210u8, 161u8, 134u8, - 130u8, 195u8, - ] - { - let entry = ConcurrentReportsIndex(_0, _1); - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " A vector of reports of the same kind that happened at the same time slot."] - pub async fn concurrent_reports_index_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::subxt::KeyIter<'a, T, ConcurrentReportsIndex<'a>>, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .storage_hash::()? - == [ - 110u8, 182u8, 108u8, 15u8, 22u8, 226u8, 241u8, 98u8, 191u8, 37u8, - 135u8, 119u8, 88u8, 238u8, 202u8, 216u8, 221u8, 165u8, 144u8, 236u8, - 113u8, 49u8, 55u8, 18u8, 238u8, 238u8, 128u8, 210u8, 161u8, 134u8, - 130u8, 195u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Enumerates all reports of a kind along with the time they happened."] - #[doc = ""] - #[doc = " All reports are sorted by the time of offence."] - #[doc = ""] - #[doc = " Note that the actual type of this mapping is `Vec`, this is because values of"] - #[doc = " different types are not supported at the moment so we are doing the manual serialization."] - pub async fn reports_by_kind_index( - &self, - _0: &[::core::primitive::u8; 16usize], - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::std::vec::Vec<::core::primitive::u8>, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .storage_hash::()? - == [ - 162u8, 66u8, 131u8, 48u8, 250u8, 237u8, 179u8, 214u8, 36u8, 137u8, - 226u8, 136u8, 120u8, 61u8, 215u8, 43u8, 164u8, 50u8, 91u8, 164u8, 20u8, - 96u8, 189u8, 100u8, 242u8, 106u8, 21u8, 136u8, 98u8, 215u8, 180u8, - 145u8, - ] - { - let entry = ReportsByKindIndex(_0); - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Enumerates all reports of a kind along with the time they happened."] - #[doc = ""] - #[doc = " All reports are sorted by the time of offence."] - #[doc = ""] - #[doc = " Note that the actual type of this mapping is `Vec`, this is because values of"] - #[doc = " different types are not supported at the moment so we are doing the manual serialization."] - pub async fn reports_by_kind_index_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::subxt::KeyIter<'a, T, ReportsByKindIndex<'a>>, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .storage_hash::()? - == [ - 162u8, 66u8, 131u8, 48u8, 250u8, 237u8, 179u8, 214u8, 36u8, 137u8, - 226u8, 136u8, 120u8, 61u8, 215u8, 43u8, 164u8, 50u8, 91u8, 164u8, 20u8, - 96u8, 189u8, 100u8, 242u8, 106u8, 21u8, 136u8, 98u8, 215u8, 180u8, - 145u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - } - pub mod historical { - use super::root_mod; - use super::runtime_types; - pub mod storage { - use super::runtime_types; - pub struct HistoricalSessions<'a>(pub &'a ::core::primitive::u32); - impl ::subxt::StorageEntry for HistoricalSessions<'_> { - const PALLET: &'static str = "Historical"; - const STORAGE: &'static str = "HistoricalSessions"; - type Value = (::subxt::sp_core::H256, ::core::primitive::u32); - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Twox64Concat, - )]) - } - } - pub struct StoredRange; - impl ::subxt::StorageEntry for StoredRange { - const PALLET: &'static str = "Historical"; - const STORAGE: &'static str = "StoredRange"; - type Value = (::core::primitive::u32, ::core::primitive::u32); - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct StorageApi<'a, T: ::subxt::Config> { - client: &'a ::subxt::Client, - } - impl<'a, T: ::subxt::Config> StorageApi<'a, T> { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { client } - } - #[doc = " Mapping from historical session indices to session-data root hash and validator count."] - pub async fn historical_sessions( - &self, - _0: &::core::primitive::u32, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option<(::subxt::sp_core::H256, ::core::primitive::u32)>, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .storage_hash::()? - == [ - 221u8, 75u8, 4u8, 83u8, 130u8, 251u8, 43u8, 26u8, 173u8, 40u8, 222u8, - 39u8, 228u8, 129u8, 201u8, 246u8, 81u8, 147u8, 64u8, 150u8, 147u8, - 165u8, 5u8, 44u8, 153u8, 125u8, 128u8, 222u8, 79u8, 16u8, 252u8, 93u8, - ] - { - let entry = HistoricalSessions(_0); - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Mapping from historical session indices to session-data root hash and validator count."] - pub async fn historical_sessions_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::subxt::KeyIter<'a, T, HistoricalSessions<'a>>, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .storage_hash::()? - == [ - 221u8, 75u8, 4u8, 83u8, 130u8, 251u8, 43u8, 26u8, 173u8, 40u8, 222u8, - 39u8, 228u8, 129u8, 201u8, 246u8, 81u8, 147u8, 64u8, 150u8, 147u8, - 165u8, 5u8, 44u8, 153u8, 125u8, 128u8, 222u8, 79u8, 16u8, 252u8, 93u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The range of historical sessions we store. [first, last)"] - pub async fn stored_range( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option<(::core::primitive::u32, ::core::primitive::u32)>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 89u8, 239u8, 197u8, 93u8, 135u8, 62u8, 142u8, 237u8, 64u8, 200u8, - 164u8, 4u8, 130u8, 233u8, 16u8, 238u8, 166u8, 206u8, 71u8, 42u8, 171u8, - 84u8, 8u8, 245u8, 183u8, 216u8, 212u8, 16u8, 190u8, 3u8, 167u8, 189u8, - ] - { - let entry = StoredRange; - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - } - pub mod session { - use super::root_mod; - use super::runtime_types; - pub mod calls { - use super::root_mod; - use super::runtime_types; - type DispatchError = runtime_types::sp_runtime::DispatchError; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct SetKeys { - pub keys: runtime_types::rococo_runtime::SessionKeys, - pub proof: ::std::vec::Vec<::core::primitive::u8>, - } - impl ::subxt::Call for SetKeys { - const PALLET: &'static str = "Session"; - const FUNCTION: &'static str = "set_keys"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct PurgeKeys; - impl ::subxt::Call for PurgeKeys { - const PALLET: &'static str = "Session"; - const FUNCTION: &'static str = "purge_keys"; - } - pub struct TransactionApi<'a, T: ::subxt::Config, X> { - client: &'a ::subxt::Client, - marker: ::core::marker::PhantomData, - } - impl<'a, T, X> TransactionApi<'a, T, X> - where - T: ::subxt::Config, - X: ::subxt::extrinsic::ExtrinsicParams, - { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { - client, - marker: ::core::marker::PhantomData, - } - } - #[doc = "Sets the session key(s) of the function caller to `keys`."] - #[doc = "Allows an account to set its session key prior to becoming a validator."] - #[doc = "This doesn't take effect until the next session."] - #[doc = ""] - #[doc = "The dispatch origin of this function must be signed."] - #[doc = ""] - #[doc = "# "] - #[doc = "- Complexity: `O(1)`. Actual cost depends on the number of length of"] - #[doc = " `T::Keys::key_ids()` which is fixed."] - #[doc = "- DbReads: `origin account`, `T::ValidatorIdOf`, `NextKeys`"] - #[doc = "- DbWrites: `origin account`, `NextKeys`"] - #[doc = "- DbReads per key id: `KeyOwner`"] - #[doc = "- DbWrites per key id: `KeyOwner`"] - #[doc = "# "] - pub fn set_keys( - &self, - keys: runtime_types::rococo_runtime::SessionKeys, - proof: ::std::vec::Vec<::core::primitive::u8>, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SetKeys, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 11u8, 191u8, 79u8, 20u8, 252u8, 226u8, 99u8, 96u8, 217u8, 230u8, 232u8, - 139u8, 135u8, 196u8, 199u8, 148u8, 127u8, 60u8, 118u8, 169u8, 252u8, - 78u8, 184u8, 44u8, 240u8, 62u8, 134u8, 192u8, 96u8, 31u8, 207u8, 125u8, - ] - { - let call = SetKeys { keys, proof }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Removes any session key(s) of the function caller."] - #[doc = ""] - #[doc = "This doesn't take effect until the next session."] - #[doc = ""] - #[doc = "The dispatch origin of this function must be Signed and the account must be either be"] - #[doc = "convertible to a validator ID using the chain's typical addressing system (this usually"] - #[doc = "means being a controller account) or directly convertible into a validator ID (which"] - #[doc = "usually means being a stash account)."] - #[doc = ""] - #[doc = "# "] - #[doc = "- Complexity: `O(1)` in number of key types. Actual cost depends on the number of length"] - #[doc = " of `T::Keys::key_ids()` which is fixed."] - #[doc = "- DbReads: `T::ValidatorIdOf`, `NextKeys`, `origin account`"] - #[doc = "- DbWrites: `NextKeys`, `origin account`"] - #[doc = "- DbWrites per key id: `KeyOwner`"] - #[doc = "# "] - pub fn purge_keys( - &self, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - PurgeKeys, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 200u8, 255u8, 4u8, 213u8, 188u8, 92u8, 99u8, 116u8, 163u8, 152u8, 29u8, - 35u8, 133u8, 119u8, 246u8, 44u8, 91u8, 31u8, 145u8, 23u8, 213u8, 64u8, - 71u8, 242u8, 207u8, 239u8, 231u8, 37u8, 61u8, 63u8, 190u8, 35u8, - ] - { - let call = PurgeKeys {}; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - pub type Event = runtime_types::pallet_session::pallet::Event; - pub mod events { - use super::runtime_types; - #[derive( - :: subxt :: codec :: CompactAs, - :: subxt :: codec :: Decode, - :: subxt :: codec :: Encode, - Debug, - )] - #[doc = "New session has happened. Note that the argument is the session index, not the"] - #[doc = "block number as the type might suggest."] - pub struct NewSession { - pub session_index: ::core::primitive::u32, - } - impl ::subxt::Event for NewSession { - const PALLET: &'static str = "Session"; - const EVENT: &'static str = "NewSession"; - } - } - pub mod storage { - use super::runtime_types; - pub struct Validators; - impl ::subxt::StorageEntry for Validators { - const PALLET: &'static str = "Session"; - const STORAGE: &'static str = "Validators"; - type Value = ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct CurrentIndex; - impl ::subxt::StorageEntry for CurrentIndex { - const PALLET: &'static str = "Session"; - const STORAGE: &'static str = "CurrentIndex"; - type Value = ::core::primitive::u32; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct QueuedChanged; - impl ::subxt::StorageEntry for QueuedChanged { - const PALLET: &'static str = "Session"; - const STORAGE: &'static str = "QueuedChanged"; - type Value = ::core::primitive::bool; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct QueuedKeys; - impl ::subxt::StorageEntry for QueuedKeys { - const PALLET: &'static str = "Session"; - const STORAGE: &'static str = "QueuedKeys"; - type Value = ::std::vec::Vec<( - ::subxt::sp_core::crypto::AccountId32, - runtime_types::rococo_runtime::SessionKeys, - )>; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct DisabledValidators; - impl ::subxt::StorageEntry for DisabledValidators { - const PALLET: &'static str = "Session"; - const STORAGE: &'static str = "DisabledValidators"; - type Value = ::std::vec::Vec<::core::primitive::u32>; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct NextKeys<'a>(pub &'a ::subxt::sp_core::crypto::AccountId32); - impl ::subxt::StorageEntry for NextKeys<'_> { - const PALLET: &'static str = "Session"; - const STORAGE: &'static str = "NextKeys"; - type Value = runtime_types::rococo_runtime::SessionKeys; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Twox64Concat, - )]) - } - } - pub struct KeyOwner<'a>( - pub &'a runtime_types::sp_core::crypto::KeyTypeId, - pub &'a [::core::primitive::u8], - ); - impl ::subxt::StorageEntry for KeyOwner<'_> { - const PALLET: &'static str = "Session"; - const STORAGE: &'static str = "KeyOwner"; - type Value = ::subxt::sp_core::crypto::AccountId32; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &(&self.0, &self.1), - ::subxt::StorageHasher::Twox64Concat, - )]) - } - } - pub struct StorageApi<'a, T: ::subxt::Config> { - client: &'a ::subxt::Client, - } - impl<'a, T: ::subxt::Config> StorageApi<'a, T> { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { client } - } - #[doc = " The current set of validators."] - pub async fn validators( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 186u8, 248u8, 234u8, 74u8, 245u8, 141u8, 90u8, 152u8, 226u8, 220u8, - 255u8, 104u8, 174u8, 1u8, 37u8, 152u8, 23u8, 208u8, 25u8, 49u8, 33u8, - 253u8, 254u8, 251u8, 141u8, 16u8, 18u8, 175u8, 196u8, 188u8, 163u8, - 209u8, - ] - { - let entry = Validators; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Current index of the session."] - pub async fn current_index( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> - { - if self.client.metadata().storage_hash::()? - == [ - 148u8, 179u8, 159u8, 15u8, 197u8, 95u8, 214u8, 30u8, 209u8, 251u8, - 183u8, 231u8, 91u8, 25u8, 181u8, 191u8, 143u8, 252u8, 227u8, 80u8, - 159u8, 66u8, 194u8, 67u8, 113u8, 74u8, 111u8, 91u8, 218u8, 187u8, - 130u8, 40u8, - ] - { - let entry = CurrentIndex; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " True if the underlying economic identities or weighting behind the validators"] - #[doc = " has changed in the queued validator set."] - pub async fn queued_changed( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result<::core::primitive::bool, ::subxt::BasicError> - { - if self.client.metadata().storage_hash::()? - == [ - 105u8, 140u8, 235u8, 218u8, 96u8, 100u8, 252u8, 10u8, 58u8, 221u8, - 244u8, 251u8, 67u8, 91u8, 80u8, 202u8, 152u8, 42u8, 50u8, 113u8, 200u8, - 247u8, 59u8, 213u8, 77u8, 195u8, 1u8, 150u8, 220u8, 18u8, 245u8, 46u8, - ] - { - let entry = QueuedChanged; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The queued keys for the next session. When the next session begins, these keys"] - #[doc = " will be used to determine the validator's session keys."] - pub async fn queued_keys( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::std::vec::Vec<( - ::subxt::sp_core::crypto::AccountId32, - runtime_types::rococo_runtime::SessionKeys, - )>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 72u8, 58u8, 252u8, 252u8, 133u8, 178u8, 214u8, 96u8, 221u8, 140u8, - 221u8, 249u8, 229u8, 136u8, 231u8, 167u8, 96u8, 223u8, 182u8, 175u8, - 235u8, 246u8, 80u8, 99u8, 246u8, 37u8, 141u8, 88u8, 213u8, 189u8, - 165u8, 181u8, - ] - { - let entry = QueuedKeys; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Indices of disabled validators."] - #[doc = ""] - #[doc = " The vec is always kept sorted so that we can find whether a given validator is"] - #[doc = " disabled using binary search. It gets cleared when `on_session_ending` returns"] - #[doc = " a new set of identities."] - pub async fn disabled_validators( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::std::vec::Vec<::core::primitive::u32>, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .storage_hash::()? - == [ - 135u8, 22u8, 22u8, 97u8, 82u8, 217u8, 144u8, 141u8, 121u8, 240u8, - 189u8, 16u8, 176u8, 88u8, 177u8, 31u8, 20u8, 242u8, 73u8, 104u8, 11u8, - 110u8, 214u8, 34u8, 52u8, 217u8, 106u8, 33u8, 174u8, 174u8, 198u8, - 84u8, - ] - { - let entry = DisabledValidators; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The next session keys for a validator."] - pub async fn next_keys( - &self, - _0: &::subxt::sp_core::crypto::AccountId32, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 186u8, 156u8, 115u8, 10u8, 142u8, 35u8, 26u8, 253u8, 76u8, 189u8, - 201u8, 63u8, 248u8, 4u8, 63u8, 54u8, 149u8, 171u8, 41u8, 24u8, 230u8, - 63u8, 152u8, 177u8, 134u8, 22u8, 84u8, 199u8, 198u8, 154u8, 137u8, - 38u8, - ] - { - let entry = NextKeys(_0); - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The next session keys for a validator."] - pub async fn next_keys_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::subxt::KeyIter<'a, T, NextKeys<'a>>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 186u8, 156u8, 115u8, 10u8, 142u8, 35u8, 26u8, 253u8, 76u8, 189u8, - 201u8, 63u8, 248u8, 4u8, 63u8, 54u8, 149u8, 171u8, 41u8, 24u8, 230u8, - 63u8, 152u8, 177u8, 134u8, 22u8, 84u8, 199u8, 198u8, 154u8, 137u8, - 38u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The owner of a key. The key is the `KeyTypeId` + the encoded key."] - pub async fn key_owner( - &self, - _0: &runtime_types::sp_core::crypto::KeyTypeId, - _1: &[::core::primitive::u8], - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option<::subxt::sp_core::crypto::AccountId32>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 49u8, 245u8, 212u8, 141u8, 211u8, 208u8, 109u8, 102u8, 249u8, 161u8, - 41u8, 93u8, 220u8, 230u8, 14u8, 59u8, 251u8, 176u8, 33u8, 127u8, 93u8, - 149u8, 205u8, 229u8, 113u8, 129u8, 162u8, 177u8, 155u8, 216u8, 151u8, - 57u8, - ] - { - let entry = KeyOwner(_0, _1); - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The owner of a key. The key is the `KeyTypeId` + the encoded key."] - pub async fn key_owner_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::subxt::KeyIter<'a, T, KeyOwner<'a>>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 49u8, 245u8, 212u8, 141u8, 211u8, 208u8, 109u8, 102u8, 249u8, 161u8, - 41u8, 93u8, 220u8, 230u8, 14u8, 59u8, 251u8, 176u8, 33u8, 127u8, 93u8, - 149u8, 205u8, 229u8, 113u8, 129u8, 162u8, 177u8, 155u8, 216u8, 151u8, - 57u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - } - pub mod grandpa { - use super::root_mod; - use super::runtime_types; - pub mod calls { - use super::root_mod; - use super::runtime_types; - type DispatchError = runtime_types::sp_runtime::DispatchError; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct ReportEquivocation { - pub equivocation_proof: ::std::boxed::Box< - runtime_types::sp_finality_grandpa::EquivocationProof< - ::subxt::sp_core::H256, - ::core::primitive::u32, - >, - >, - pub key_owner_proof: runtime_types::sp_session::MembershipProof, - } - impl ::subxt::Call for ReportEquivocation { - const PALLET: &'static str = "Grandpa"; - const FUNCTION: &'static str = "report_equivocation"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct ReportEquivocationUnsigned { - pub equivocation_proof: ::std::boxed::Box< - runtime_types::sp_finality_grandpa::EquivocationProof< - ::subxt::sp_core::H256, - ::core::primitive::u32, - >, - >, - pub key_owner_proof: runtime_types::sp_session::MembershipProof, - } - impl ::subxt::Call for ReportEquivocationUnsigned { - const PALLET: &'static str = "Grandpa"; - const FUNCTION: &'static str = "report_equivocation_unsigned"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct NoteStalled { - pub delay: ::core::primitive::u32, - pub best_finalized_block_number: ::core::primitive::u32, - } - impl ::subxt::Call for NoteStalled { - const PALLET: &'static str = "Grandpa"; - const FUNCTION: &'static str = "note_stalled"; - } - pub struct TransactionApi<'a, T: ::subxt::Config, X> { - client: &'a ::subxt::Client, - marker: ::core::marker::PhantomData, - } - impl<'a, T, X> TransactionApi<'a, T, X> - where - T: ::subxt::Config, - X: ::subxt::extrinsic::ExtrinsicParams, - { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { - client, - marker: ::core::marker::PhantomData, - } - } - #[doc = "Report voter equivocation/misbehavior. This method will verify the"] - #[doc = "equivocation proof and validate the given key ownership proof"] - #[doc = "against the extracted offender. If both are valid, the offence"] - #[doc = "will be reported."] - pub fn report_equivocation( - &self, - equivocation_proof: runtime_types::sp_finality_grandpa::EquivocationProof< - ::subxt::sp_core::H256, - ::core::primitive::u32, - >, - key_owner_proof: runtime_types::sp_session::MembershipProof, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - ReportEquivocation, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 64u8, 99u8, 188u8, 190u8, 206u8, 120u8, 44u8, 136u8, 56u8, 142u8, - 221u8, 12u8, 124u8, 245u8, 168u8, 204u8, 39u8, 141u8, 189u8, 189u8, - 218u8, 162u8, 202u8, 220u8, 101u8, 136u8, 66u8, 195u8, 136u8, 4u8, - 66u8, 152u8, - ] - { - let call = ReportEquivocation { - equivocation_proof: ::std::boxed::Box::new(equivocation_proof), - key_owner_proof, - }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Report voter equivocation/misbehavior. This method will verify the"] - #[doc = "equivocation proof and validate the given key ownership proof"] - #[doc = "against the extracted offender. If both are valid, the offence"] - #[doc = "will be reported."] - #[doc = ""] - #[doc = "This extrinsic must be called unsigned and it is expected that only"] - #[doc = "block authors will call it (validated in `ValidateUnsigned`), as such"] - #[doc = "if the block author is defined it will be defined as the equivocation"] - #[doc = "reporter."] - pub fn report_equivocation_unsigned( - &self, - equivocation_proof: runtime_types::sp_finality_grandpa::EquivocationProof< - ::subxt::sp_core::H256, - ::core::primitive::u32, - >, - key_owner_proof: runtime_types::sp_session::MembershipProof, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - ReportEquivocationUnsigned, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .call_hash::()? - == [ - 235u8, 164u8, 157u8, 180u8, 230u8, 16u8, 125u8, 74u8, 171u8, 208u8, - 158u8, 179u8, 175u8, 87u8, 111u8, 32u8, 33u8, 72u8, 74u8, 113u8, 113u8, - 113u8, 65u8, 234u8, 184u8, 224u8, 206u8, 126u8, 254u8, 49u8, 6u8, 44u8, - ] - { - let call = ReportEquivocationUnsigned { - equivocation_proof: ::std::boxed::Box::new(equivocation_proof), - key_owner_proof, - }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Note that the current authority set of the GRANDPA finality gadget has"] - #[doc = "stalled. This will trigger a forced authority set change at the beginning"] - #[doc = "of the next session, to be enacted `delay` blocks after that. The delay"] - #[doc = "should be high enough to safely assume that the block signalling the"] - #[doc = "forced change will not be re-orged (e.g. 1000 blocks). The GRANDPA voters"] - #[doc = "will start the new authority set using the given finalized block as base."] - #[doc = "Only callable by root."] - pub fn note_stalled( - &self, - delay: ::core::primitive::u32, - best_finalized_block_number: ::core::primitive::u32, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - NoteStalled, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 227u8, 98u8, 249u8, 158u8, 96u8, 124u8, 72u8, 188u8, 27u8, 215u8, 73u8, - 62u8, 103u8, 79u8, 38u8, 48u8, 212u8, 88u8, 233u8, 187u8, 11u8, 95u8, - 39u8, 247u8, 55u8, 184u8, 228u8, 102u8, 13u8, 251u8, 52u8, 206u8, - ] - { - let call = NoteStalled { - delay, - best_finalized_block_number, - }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - pub type Event = runtime_types::pallet_grandpa::pallet::Event; - pub mod events { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "New authority set has been applied."] - pub struct NewAuthorities { - pub authority_set: ::std::vec::Vec<( - runtime_types::sp_finality_grandpa::app::Public, - ::core::primitive::u64, - )>, - } - impl ::subxt::Event for NewAuthorities { - const PALLET: &'static str = "Grandpa"; - const EVENT: &'static str = "NewAuthorities"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "Current authority set has been paused."] - pub struct Paused; - impl ::subxt::Event for Paused { - const PALLET: &'static str = "Grandpa"; - const EVENT: &'static str = "Paused"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "Current authority set has been resumed."] - pub struct Resumed; - impl ::subxt::Event for Resumed { - const PALLET: &'static str = "Grandpa"; - const EVENT: &'static str = "Resumed"; - } - } - pub mod storage { - use super::runtime_types; - pub struct State; - impl ::subxt::StorageEntry for State { - const PALLET: &'static str = "Grandpa"; - const STORAGE: &'static str = "State"; - type Value = runtime_types::pallet_grandpa::StoredState<::core::primitive::u32>; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct PendingChange; - impl ::subxt::StorageEntry for PendingChange { - const PALLET: &'static str = "Grandpa"; - const STORAGE: &'static str = "PendingChange"; - type Value = - runtime_types::pallet_grandpa::StoredPendingChange<::core::primitive::u32>; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct NextForced; - impl ::subxt::StorageEntry for NextForced { - const PALLET: &'static str = "Grandpa"; - const STORAGE: &'static str = "NextForced"; - type Value = ::core::primitive::u32; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct Stalled; - impl ::subxt::StorageEntry for Stalled { - const PALLET: &'static str = "Grandpa"; - const STORAGE: &'static str = "Stalled"; - type Value = (::core::primitive::u32, ::core::primitive::u32); - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct CurrentSetId; - impl ::subxt::StorageEntry for CurrentSetId { - const PALLET: &'static str = "Grandpa"; - const STORAGE: &'static str = "CurrentSetId"; - type Value = ::core::primitive::u64; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct SetIdSession<'a>(pub &'a ::core::primitive::u64); - impl ::subxt::StorageEntry for SetIdSession<'_> { - const PALLET: &'static str = "Grandpa"; - const STORAGE: &'static str = "SetIdSession"; - type Value = ::core::primitive::u32; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Twox64Concat, - )]) - } - } - pub struct StorageApi<'a, T: ::subxt::Config> { - client: &'a ::subxt::Client, - } - impl<'a, T: ::subxt::Config> StorageApi<'a, T> { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { client } - } - #[doc = " State of the current authority set."] - pub async fn state( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - runtime_types::pallet_grandpa::StoredState<::core::primitive::u32>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 159u8, 75u8, 78u8, 23u8, 98u8, 89u8, 239u8, 230u8, 192u8, 67u8, 139u8, - 222u8, 151u8, 237u8, 216u8, 20u8, 235u8, 247u8, 180u8, 24u8, 64u8, - 160u8, 58u8, 15u8, 205u8, 191u8, 120u8, 68u8, 32u8, 5u8, 161u8, 106u8, - ] - { - let entry = State; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Pending change: (signaled at, scheduled change)."] - pub async fn pending_change( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option< - runtime_types::pallet_grandpa::StoredPendingChange<::core::primitive::u32>, - >, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 83u8, 71u8, 180u8, 126u8, 51u8, 111u8, 238u8, 160u8, 5u8, 100u8, 152u8, - 23u8, 138u8, 228u8, 46u8, 67u8, 145u8, 183u8, 100u8, 97u8, 153u8, - 140u8, 244u8, 179u8, 157u8, 150u8, 75u8, 236u8, 73u8, 209u8, 106u8, - 147u8, - ] - { - let entry = PendingChange; - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " next block number where we can force a change."] - pub async fn next_forced( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option<::core::primitive::u32>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 99u8, 43u8, 245u8, 201u8, 60u8, 9u8, 122u8, 99u8, 188u8, 29u8, 67u8, - 6u8, 193u8, 133u8, 179u8, 67u8, 202u8, 208u8, 62u8, 179u8, 19u8, 169u8, - 196u8, 119u8, 107u8, 75u8, 100u8, 3u8, 121u8, 18u8, 80u8, 156u8, - ] - { - let entry = NextForced; - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " `true` if we are currently stalled."] - pub async fn stalled( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option<(::core::primitive::u32, ::core::primitive::u32)>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 219u8, 8u8, 37u8, 78u8, 150u8, 55u8, 0u8, 57u8, 201u8, 170u8, 186u8, - 189u8, 56u8, 161u8, 44u8, 15u8, 53u8, 178u8, 224u8, 208u8, 231u8, - 109u8, 14u8, 209u8, 57u8, 205u8, 237u8, 153u8, 231u8, 156u8, 24u8, - 185u8, - ] - { - let entry = Stalled; - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The number of changes (both in terms of keys and underlying economic responsibilities)"] - #[doc = " in the \"set\" of Grandpa validators from genesis."] - pub async fn current_set_id( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result<::core::primitive::u64, ::subxt::BasicError> - { - if self.client.metadata().storage_hash::()? - == [ - 129u8, 7u8, 62u8, 101u8, 199u8, 60u8, 56u8, 33u8, 54u8, 158u8, 20u8, - 178u8, 244u8, 145u8, 189u8, 197u8, 157u8, 163u8, 116u8, 36u8, 105u8, - 52u8, 149u8, 244u8, 108u8, 94u8, 109u8, 111u8, 244u8, 137u8, 7u8, - 108u8, - ] - { - let entry = CurrentSetId; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " A mapping from grandpa set ID to the index of the *most recent* session for which its"] - #[doc = " members were responsible."] - #[doc = ""] - #[doc = " TWOX-NOTE: `SetId` is not under user control."] - pub async fn set_id_session( - &self, - _0: &::core::primitive::u64, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option<::core::primitive::u32>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 91u8, 175u8, 145u8, 127u8, 242u8, 81u8, 13u8, 231u8, 110u8, 11u8, - 166u8, 169u8, 103u8, 146u8, 123u8, 133u8, 157u8, 15u8, 33u8, 234u8, - 108u8, 13u8, 88u8, 115u8, 254u8, 9u8, 145u8, 199u8, 102u8, 47u8, 53u8, - 134u8, - ] - { - let entry = SetIdSession(_0); - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " A mapping from grandpa set ID to the index of the *most recent* session for which its"] - #[doc = " members were responsible."] - #[doc = ""] - #[doc = " TWOX-NOTE: `SetId` is not under user control."] - pub async fn set_id_session_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::subxt::KeyIter<'a, T, SetIdSession<'a>>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 91u8, 175u8, 145u8, 127u8, 242u8, 81u8, 13u8, 231u8, 110u8, 11u8, - 166u8, 169u8, 103u8, 146u8, 123u8, 133u8, 157u8, 15u8, 33u8, 234u8, - 108u8, 13u8, 88u8, 115u8, 254u8, 9u8, 145u8, 199u8, 102u8, 47u8, 53u8, - 134u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - pub mod constants { - use super::runtime_types; - pub struct ConstantsApi<'a, T: ::subxt::Config> { - client: &'a ::subxt::Client, - } - impl<'a, T: ::subxt::Config> ConstantsApi<'a, T> { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { client } - } - #[doc = " Max Authorities in use"] - pub fn max_authorities( - &self, - ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> - { - if self - .client - .metadata() - .constant_hash("Grandpa", "MaxAuthorities")? - == [ - 248u8, 195u8, 131u8, 166u8, 10u8, 50u8, 71u8, 223u8, 41u8, 49u8, 43u8, - 99u8, 251u8, 113u8, 75u8, 193u8, 159u8, 15u8, 77u8, 217u8, 147u8, - 205u8, 165u8, 50u8, 6u8, 166u8, 77u8, 189u8, 102u8, 22u8, 201u8, 19u8, - ] - { - let pallet = self.client.metadata().pallet("Grandpa")?; - let constant = pallet.constant("MaxAuthorities")?; - let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; - Ok(value) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - } - pub mod im_online { - use super::root_mod; - use super::runtime_types; - pub mod calls { - use super::root_mod; - use super::runtime_types; - type DispatchError = runtime_types::sp_runtime::DispatchError; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct Heartbeat { - pub heartbeat: runtime_types::pallet_im_online::Heartbeat<::core::primitive::u32>, - pub signature: runtime_types::pallet_im_online::sr25519::app_sr25519::Signature, - } - impl ::subxt::Call for Heartbeat { - const PALLET: &'static str = "ImOnline"; - const FUNCTION: &'static str = "heartbeat"; - } - pub struct TransactionApi<'a, T: ::subxt::Config, X> { - client: &'a ::subxt::Client, - marker: ::core::marker::PhantomData, - } - impl<'a, T, X> TransactionApi<'a, T, X> - where - T: ::subxt::Config, - X: ::subxt::extrinsic::ExtrinsicParams, - { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { - client, - marker: ::core::marker::PhantomData, - } - } - #[doc = "# "] - #[doc = "- Complexity: `O(K + E)` where K is length of `Keys` (heartbeat.validators_len) and E is"] - #[doc = " length of `heartbeat.network_state.external_address`"] - #[doc = " - `O(K)`: decoding of length `K`"] - #[doc = " - `O(E)`: decoding/encoding of length `E`"] - #[doc = "- DbReads: pallet_session `Validators`, pallet_session `CurrentIndex`, `Keys`,"] - #[doc = " `ReceivedHeartbeats`"] - #[doc = "- DbWrites: `ReceivedHeartbeats`"] - #[doc = "# "] - pub fn heartbeat( - &self, - heartbeat: runtime_types::pallet_im_online::Heartbeat<::core::primitive::u32>, - signature: runtime_types::pallet_im_online::sr25519::app_sr25519::Signature, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - Heartbeat, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 92u8, 180u8, 156u8, 19u8, 58u8, 158u8, 191u8, 159u8, 72u8, 119u8, - 227u8, 229u8, 8u8, 139u8, 185u8, 101u8, 114u8, 161u8, 206u8, 77u8, - 110u8, 41u8, 12u8, 154u8, 147u8, 12u8, 227u8, 46u8, 61u8, 114u8, 166u8, - 2u8, - ] - { - let call = Heartbeat { - heartbeat, - signature, - }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - pub type Event = runtime_types::pallet_im_online::pallet::Event; - pub mod events { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "A new heartbeat was received from `AuthorityId`."] - pub struct HeartbeatReceived { - pub authority_id: runtime_types::pallet_im_online::sr25519::app_sr25519::Public, - } - impl ::subxt::Event for HeartbeatReceived { - const PALLET: &'static str = "ImOnline"; - const EVENT: &'static str = "HeartbeatReceived"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "At the end of the session, no offence was committed."] - pub struct AllGood; - impl ::subxt::Event for AllGood { - const PALLET: &'static str = "ImOnline"; - const EVENT: &'static str = "AllGood"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "At the end of the session, at least one validator was found to be offline."] - pub struct SomeOffline { - pub offline: ::std::vec::Vec<(::subxt::sp_core::crypto::AccountId32, ())>, - } - impl ::subxt::Event for SomeOffline { - const PALLET: &'static str = "ImOnline"; - const EVENT: &'static str = "SomeOffline"; - } - } - pub mod storage { - use super::runtime_types; - pub struct HeartbeatAfter; - impl ::subxt::StorageEntry for HeartbeatAfter { - const PALLET: &'static str = "ImOnline"; - const STORAGE: &'static str = "HeartbeatAfter"; - type Value = ::core::primitive::u32; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct Keys; - impl ::subxt::StorageEntry for Keys { - const PALLET: &'static str = "ImOnline"; - const STORAGE: &'static str = "Keys"; - type Value = - runtime_types::frame_support::storage::weak_bounded_vec::WeakBoundedVec< - runtime_types::pallet_im_online::sr25519::app_sr25519::Public, - >; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct ReceivedHeartbeats<'a>( - pub &'a ::core::primitive::u32, - pub &'a ::core::primitive::u32, - ); - impl ::subxt::StorageEntry for ReceivedHeartbeats<'_> { - const PALLET: &'static str = "ImOnline"; - const STORAGE: &'static str = "ReceivedHeartbeats"; - type Value = runtime_types::frame_support::traits::misc::WrapperOpaque< - runtime_types::pallet_im_online::BoundedOpaqueNetworkState, - >; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![ - ::subxt::StorageMapKey::new(&self.0, ::subxt::StorageHasher::Twox64Concat), - ::subxt::StorageMapKey::new(&self.1, ::subxt::StorageHasher::Twox64Concat), - ]) - } - } - pub struct AuthoredBlocks<'a>( - pub &'a ::core::primitive::u32, - pub &'a ::subxt::sp_core::crypto::AccountId32, - ); - impl ::subxt::StorageEntry for AuthoredBlocks<'_> { - const PALLET: &'static str = "ImOnline"; - const STORAGE: &'static str = "AuthoredBlocks"; - type Value = ::core::primitive::u32; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![ - ::subxt::StorageMapKey::new(&self.0, ::subxt::StorageHasher::Twox64Concat), - ::subxt::StorageMapKey::new(&self.1, ::subxt::StorageHasher::Twox64Concat), - ]) - } - } - pub struct StorageApi<'a, T: ::subxt::Config> { - client: &'a ::subxt::Client, - } - impl<'a, T: ::subxt::Config> StorageApi<'a, T> { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { client } - } - #[doc = " The block number after which it's ok to send heartbeats in the current"] - #[doc = " session."] - #[doc = ""] - #[doc = " At the beginning of each session we set this to a value that should fall"] - #[doc = " roughly in the middle of the session duration. The idea is to first wait for"] - #[doc = " the validators to produce a block in the current session, so that the"] - #[doc = " heartbeat later on will not be necessary."] - #[doc = ""] - #[doc = " This value will only be used as a fallback if we fail to get a proper session"] - #[doc = " progress estimate from `NextSessionRotation`, as those estimates should be"] - #[doc = " more accurate then the value we calculate for `HeartbeatAfter`."] - pub async fn heartbeat_after( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> - { - if self.client.metadata().storage_hash::()? - == [ - 108u8, 100u8, 85u8, 198u8, 226u8, 122u8, 94u8, 225u8, 97u8, 154u8, - 135u8, 95u8, 106u8, 28u8, 185u8, 78u8, 192u8, 196u8, 35u8, 191u8, 12u8, - 19u8, 163u8, 46u8, 232u8, 235u8, 193u8, 81u8, 126u8, 204u8, 25u8, - 228u8, - ] - { - let entry = HeartbeatAfter; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The current set of keys that may issue a heartbeat."] - pub async fn keys( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - runtime_types::frame_support::storage::weak_bounded_vec::WeakBoundedVec< - runtime_types::pallet_im_online::sr25519::app_sr25519::Public, - >, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 88u8, 127u8, 143u8, 47u8, 75u8, 103u8, 131u8, 82u8, 76u8, 17u8, 255u8, - 108u8, 83u8, 251u8, 44u8, 225u8, 190u8, 66u8, 26u8, 168u8, 61u8, 87u8, - 92u8, 209u8, 147u8, 10u8, 204u8, 48u8, 214u8, 28u8, 60u8, 222u8, - ] - { - let entry = Keys; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " For each session index, we keep a mapping of `SessionIndex` and `AuthIndex` to"] - #[doc = " `WrapperOpaque`."] - pub async fn received_heartbeats( - &self, - _0: &::core::primitive::u32, - _1: &::core::primitive::u32, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option< - runtime_types::frame_support::traits::misc::WrapperOpaque< - runtime_types::pallet_im_online::BoundedOpaqueNetworkState, - >, - >, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .storage_hash::()? - == [ - 29u8, 40u8, 67u8, 222u8, 59u8, 104u8, 24u8, 193u8, 249u8, 200u8, 152u8, - 225u8, 72u8, 243u8, 140u8, 114u8, 121u8, 216u8, 54u8, 145u8, 205u8, - 82u8, 133u8, 128u8, 109u8, 54u8, 153u8, 118u8, 66u8, 147u8, 251u8, - 148u8, - ] - { - let entry = ReceivedHeartbeats(_0, _1); - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " For each session index, we keep a mapping of `SessionIndex` and `AuthIndex` to"] - #[doc = " `WrapperOpaque`."] - pub async fn received_heartbeats_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::subxt::KeyIter<'a, T, ReceivedHeartbeats<'a>>, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .storage_hash::()? - == [ - 29u8, 40u8, 67u8, 222u8, 59u8, 104u8, 24u8, 193u8, 249u8, 200u8, 152u8, - 225u8, 72u8, 243u8, 140u8, 114u8, 121u8, 216u8, 54u8, 145u8, 205u8, - 82u8, 133u8, 128u8, 109u8, 54u8, 153u8, 118u8, 66u8, 147u8, 251u8, - 148u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " For each session index, we keep a mapping of `ValidatorId` to the"] - #[doc = " number of blocks authored by the given authority."] - pub async fn authored_blocks( - &self, - _0: &::core::primitive::u32, - _1: &::subxt::sp_core::crypto::AccountId32, - block_hash: ::core::option::Option, - ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> - { - if self.client.metadata().storage_hash::()? - == [ - 94u8, 193u8, 107u8, 126u8, 3u8, 13u8, 28u8, 151u8, 197u8, 226u8, 224u8, - 48u8, 138u8, 113u8, 31u8, 57u8, 111u8, 184u8, 218u8, 215u8, 185u8, - 83u8, 209u8, 139u8, 114u8, 241u8, 68u8, 110u8, 157u8, 208u8, 16u8, - 22u8, - ] - { - let entry = AuthoredBlocks(_0, _1); - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " For each session index, we keep a mapping of `ValidatorId` to the"] - #[doc = " number of blocks authored by the given authority."] - pub async fn authored_blocks_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::subxt::KeyIter<'a, T, AuthoredBlocks<'a>>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 94u8, 193u8, 107u8, 126u8, 3u8, 13u8, 28u8, 151u8, 197u8, 226u8, 224u8, - 48u8, 138u8, 113u8, 31u8, 57u8, 111u8, 184u8, 218u8, 215u8, 185u8, - 83u8, 209u8, 139u8, 114u8, 241u8, 68u8, 110u8, 157u8, 208u8, 16u8, - 22u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - pub mod constants { - use super::runtime_types; - pub struct ConstantsApi<'a, T: ::subxt::Config> { - client: &'a ::subxt::Client, - } - impl<'a, T: ::subxt::Config> ConstantsApi<'a, T> { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { client } - } - #[doc = " A configuration for base priority of unsigned transactions."] - #[doc = ""] - #[doc = " This is exposed so that it can be tuned for particular runtime, when"] - #[doc = " multiple pallets send unsigned transactions."] - pub fn unsigned_priority( - &self, - ) -> ::core::result::Result<::core::primitive::u64, ::subxt::BasicError> - { - if self - .client - .metadata() - .constant_hash("ImOnline", "UnsignedPriority")? - == [ - 78u8, 226u8, 84u8, 70u8, 162u8, 23u8, 167u8, 100u8, 156u8, 228u8, - 119u8, 16u8, 28u8, 202u8, 21u8, 71u8, 72u8, 244u8, 3u8, 255u8, 243u8, - 55u8, 109u8, 238u8, 26u8, 180u8, 207u8, 175u8, 221u8, 27u8, 213u8, - 217u8, - ] - { - let pallet = self.client.metadata().pallet("ImOnline")?; - let constant = pallet.constant("UnsignedPriority")?; - let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; - Ok(value) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - } - pub mod authority_discovery { - use super::root_mod; - use super::runtime_types; - pub mod storage { - use super::runtime_types; - pub struct Keys; - impl ::subxt::StorageEntry for Keys { - const PALLET: &'static str = "AuthorityDiscovery"; - const STORAGE: &'static str = "Keys"; - type Value = - runtime_types::frame_support::storage::weak_bounded_vec::WeakBoundedVec< - runtime_types::sp_authority_discovery::app::Public, - >; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct NextKeys; - impl ::subxt::StorageEntry for NextKeys { - const PALLET: &'static str = "AuthorityDiscovery"; - const STORAGE: &'static str = "NextKeys"; - type Value = - runtime_types::frame_support::storage::weak_bounded_vec::WeakBoundedVec< - runtime_types::sp_authority_discovery::app::Public, - >; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct StorageApi<'a, T: ::subxt::Config> { - client: &'a ::subxt::Client, - } - impl<'a, T: ::subxt::Config> StorageApi<'a, T> { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { client } - } - #[doc = " Keys of the current authority set."] - pub async fn keys( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - runtime_types::frame_support::storage::weak_bounded_vec::WeakBoundedVec< - runtime_types::sp_authority_discovery::app::Public, - >, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 88u8, 127u8, 143u8, 47u8, 75u8, 103u8, 131u8, 82u8, 76u8, 17u8, 255u8, - 108u8, 83u8, 251u8, 44u8, 225u8, 190u8, 66u8, 26u8, 168u8, 61u8, 87u8, - 92u8, 209u8, 147u8, 10u8, 204u8, 48u8, 214u8, 28u8, 60u8, 222u8, - ] - { - let entry = Keys; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Keys of the next authority set."] - pub async fn next_keys( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - runtime_types::frame_support::storage::weak_bounded_vec::WeakBoundedVec< - runtime_types::sp_authority_discovery::app::Public, - >, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 139u8, 231u8, 99u8, 138u8, 194u8, 192u8, 123u8, 163u8, 239u8, 10u8, - 211u8, 7u8, 154u8, 1u8, 182u8, 43u8, 203u8, 128u8, 55u8, 150u8, 108u8, - 94u8, 163u8, 49u8, 230u8, 18u8, 208u8, 240u8, 83u8, 226u8, 125u8, 36u8, - ] - { - let entry = NextKeys; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - } - pub mod parachains_origin { - use super::root_mod; - use super::runtime_types; - } - pub mod configuration { - use super::root_mod; - use super::runtime_types; - pub mod calls { - use super::root_mod; - use super::runtime_types; - type DispatchError = runtime_types::sp_runtime::DispatchError; - #[derive( - :: subxt :: codec :: CompactAs, - :: subxt :: codec :: Decode, - :: subxt :: codec :: Encode, - Debug, - )] - pub struct SetValidationUpgradeCooldown { - pub new: ::core::primitive::u32, - } - impl ::subxt::Call for SetValidationUpgradeCooldown { - const PALLET: &'static str = "Configuration"; - const FUNCTION: &'static str = "set_validation_upgrade_cooldown"; - } - #[derive( - :: subxt :: codec :: CompactAs, - :: subxt :: codec :: Decode, - :: subxt :: codec :: Encode, - Debug, - )] - pub struct SetValidationUpgradeDelay { - pub new: ::core::primitive::u32, - } - impl ::subxt::Call for SetValidationUpgradeDelay { - const PALLET: &'static str = "Configuration"; - const FUNCTION: &'static str = "set_validation_upgrade_delay"; - } - #[derive( - :: subxt :: codec :: CompactAs, - :: subxt :: codec :: Decode, - :: subxt :: codec :: Encode, - Debug, - )] - pub struct SetCodeRetentionPeriod { - pub new: ::core::primitive::u32, - } - impl ::subxt::Call for SetCodeRetentionPeriod { - const PALLET: &'static str = "Configuration"; - const FUNCTION: &'static str = "set_code_retention_period"; - } - #[derive( - :: subxt :: codec :: CompactAs, - :: subxt :: codec :: Decode, - :: subxt :: codec :: Encode, - Debug, - )] - pub struct SetMaxCodeSize { - pub new: ::core::primitive::u32, - } - impl ::subxt::Call for SetMaxCodeSize { - const PALLET: &'static str = "Configuration"; - const FUNCTION: &'static str = "set_max_code_size"; - } - #[derive( - :: subxt :: codec :: CompactAs, - :: subxt :: codec :: Decode, - :: subxt :: codec :: Encode, - Debug, - )] - pub struct SetMaxPovSize { - pub new: ::core::primitive::u32, - } - impl ::subxt::Call for SetMaxPovSize { - const PALLET: &'static str = "Configuration"; - const FUNCTION: &'static str = "set_max_pov_size"; - } - #[derive( - :: subxt :: codec :: CompactAs, - :: subxt :: codec :: Decode, - :: subxt :: codec :: Encode, - Debug, - )] - pub struct SetMaxHeadDataSize { - pub new: ::core::primitive::u32, - } - impl ::subxt::Call for SetMaxHeadDataSize { - const PALLET: &'static str = "Configuration"; - const FUNCTION: &'static str = "set_max_head_data_size"; - } - #[derive( - :: subxt :: codec :: CompactAs, - :: subxt :: codec :: Decode, - :: subxt :: codec :: Encode, - Debug, - )] - pub struct SetParathreadCores { - pub new: ::core::primitive::u32, - } - impl ::subxt::Call for SetParathreadCores { - const PALLET: &'static str = "Configuration"; - const FUNCTION: &'static str = "set_parathread_cores"; - } - #[derive( - :: subxt :: codec :: CompactAs, - :: subxt :: codec :: Decode, - :: subxt :: codec :: Encode, - Debug, - )] - pub struct SetParathreadRetries { - pub new: ::core::primitive::u32, - } - impl ::subxt::Call for SetParathreadRetries { - const PALLET: &'static str = "Configuration"; - const FUNCTION: &'static str = "set_parathread_retries"; - } - #[derive( - :: subxt :: codec :: CompactAs, - :: subxt :: codec :: Decode, - :: subxt :: codec :: Encode, - Debug, - )] - pub struct SetGroupRotationFrequency { - pub new: ::core::primitive::u32, - } - impl ::subxt::Call for SetGroupRotationFrequency { - const PALLET: &'static str = "Configuration"; - const FUNCTION: &'static str = "set_group_rotation_frequency"; - } - #[derive( - :: subxt :: codec :: CompactAs, - :: subxt :: codec :: Decode, - :: subxt :: codec :: Encode, - Debug, - )] - pub struct SetChainAvailabilityPeriod { - pub new: ::core::primitive::u32, - } - impl ::subxt::Call for SetChainAvailabilityPeriod { - const PALLET: &'static str = "Configuration"; - const FUNCTION: &'static str = "set_chain_availability_period"; - } - #[derive( - :: subxt :: codec :: CompactAs, - :: subxt :: codec :: Decode, - :: subxt :: codec :: Encode, - Debug, - )] - pub struct SetThreadAvailabilityPeriod { - pub new: ::core::primitive::u32, - } - impl ::subxt::Call for SetThreadAvailabilityPeriod { - const PALLET: &'static str = "Configuration"; - const FUNCTION: &'static str = "set_thread_availability_period"; - } - #[derive( - :: subxt :: codec :: CompactAs, - :: subxt :: codec :: Decode, - :: subxt :: codec :: Encode, - Debug, - )] - pub struct SetSchedulingLookahead { - pub new: ::core::primitive::u32, - } - impl ::subxt::Call for SetSchedulingLookahead { - const PALLET: &'static str = "Configuration"; - const FUNCTION: &'static str = "set_scheduling_lookahead"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct SetMaxValidatorsPerCore { - pub new: ::core::option::Option<::core::primitive::u32>, - } - impl ::subxt::Call for SetMaxValidatorsPerCore { - const PALLET: &'static str = "Configuration"; - const FUNCTION: &'static str = "set_max_validators_per_core"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct SetMaxValidators { - pub new: ::core::option::Option<::core::primitive::u32>, - } - impl ::subxt::Call for SetMaxValidators { - const PALLET: &'static str = "Configuration"; - const FUNCTION: &'static str = "set_max_validators"; - } - #[derive( - :: subxt :: codec :: CompactAs, - :: subxt :: codec :: Decode, - :: subxt :: codec :: Encode, - Debug, - )] - pub struct SetDisputePeriod { - pub new: ::core::primitive::u32, - } - impl ::subxt::Call for SetDisputePeriod { - const PALLET: &'static str = "Configuration"; - const FUNCTION: &'static str = "set_dispute_period"; - } - #[derive( - :: subxt :: codec :: CompactAs, - :: subxt :: codec :: Decode, - :: subxt :: codec :: Encode, - Debug, - )] - pub struct SetDisputePostConclusionAcceptancePeriod { - pub new: ::core::primitive::u32, - } - impl ::subxt::Call for SetDisputePostConclusionAcceptancePeriod { - const PALLET: &'static str = "Configuration"; - const FUNCTION: &'static str = "set_dispute_post_conclusion_acceptance_period"; - } - #[derive( - :: subxt :: codec :: CompactAs, - :: subxt :: codec :: Decode, - :: subxt :: codec :: Encode, - Debug, - )] - pub struct SetDisputeMaxSpamSlots { - pub new: ::core::primitive::u32, - } - impl ::subxt::Call for SetDisputeMaxSpamSlots { - const PALLET: &'static str = "Configuration"; - const FUNCTION: &'static str = "set_dispute_max_spam_slots"; - } - #[derive( - :: subxt :: codec :: CompactAs, - :: subxt :: codec :: Decode, - :: subxt :: codec :: Encode, - Debug, - )] - pub struct SetDisputeConclusionByTimeOutPeriod { - pub new: ::core::primitive::u32, - } - impl ::subxt::Call for SetDisputeConclusionByTimeOutPeriod { - const PALLET: &'static str = "Configuration"; - const FUNCTION: &'static str = "set_dispute_conclusion_by_time_out_period"; - } - #[derive( - :: subxt :: codec :: CompactAs, - :: subxt :: codec :: Decode, - :: subxt :: codec :: Encode, - Debug, - )] - pub struct SetNoShowSlots { - pub new: ::core::primitive::u32, - } - impl ::subxt::Call for SetNoShowSlots { - const PALLET: &'static str = "Configuration"; - const FUNCTION: &'static str = "set_no_show_slots"; - } - #[derive( - :: subxt :: codec :: CompactAs, - :: subxt :: codec :: Decode, - :: subxt :: codec :: Encode, - Debug, - )] - pub struct SetNDelayTranches { - pub new: ::core::primitive::u32, - } - impl ::subxt::Call for SetNDelayTranches { - const PALLET: &'static str = "Configuration"; - const FUNCTION: &'static str = "set_n_delay_tranches"; - } - #[derive( - :: subxt :: codec :: CompactAs, - :: subxt :: codec :: Decode, - :: subxt :: codec :: Encode, - Debug, - )] - pub struct SetZerothDelayTrancheWidth { - pub new: ::core::primitive::u32, - } - impl ::subxt::Call for SetZerothDelayTrancheWidth { - const PALLET: &'static str = "Configuration"; - const FUNCTION: &'static str = "set_zeroth_delay_tranche_width"; - } - #[derive( - :: subxt :: codec :: CompactAs, - :: subxt :: codec :: Decode, - :: subxt :: codec :: Encode, - Debug, - )] - pub struct SetNeededApprovals { - pub new: ::core::primitive::u32, - } - impl ::subxt::Call for SetNeededApprovals { - const PALLET: &'static str = "Configuration"; - const FUNCTION: &'static str = "set_needed_approvals"; - } - #[derive( - :: subxt :: codec :: CompactAs, - :: subxt :: codec :: Decode, - :: subxt :: codec :: Encode, - Debug, - )] - pub struct SetRelayVrfModuloSamples { - pub new: ::core::primitive::u32, - } - impl ::subxt::Call for SetRelayVrfModuloSamples { - const PALLET: &'static str = "Configuration"; - const FUNCTION: &'static str = "set_relay_vrf_modulo_samples"; - } - #[derive( - :: subxt :: codec :: CompactAs, - :: subxt :: codec :: Decode, - :: subxt :: codec :: Encode, - Debug, - )] - pub struct SetMaxUpwardQueueCount { - pub new: ::core::primitive::u32, - } - impl ::subxt::Call for SetMaxUpwardQueueCount { - const PALLET: &'static str = "Configuration"; - const FUNCTION: &'static str = "set_max_upward_queue_count"; - } - #[derive( - :: subxt :: codec :: CompactAs, - :: subxt :: codec :: Decode, - :: subxt :: codec :: Encode, - Debug, - )] - pub struct SetMaxUpwardQueueSize { - pub new: ::core::primitive::u32, - } - impl ::subxt::Call for SetMaxUpwardQueueSize { - const PALLET: &'static str = "Configuration"; - const FUNCTION: &'static str = "set_max_upward_queue_size"; - } - #[derive( - :: subxt :: codec :: CompactAs, - :: subxt :: codec :: Decode, - :: subxt :: codec :: Encode, - Debug, - )] - pub struct SetMaxDownwardMessageSize { - pub new: ::core::primitive::u32, - } - impl ::subxt::Call for SetMaxDownwardMessageSize { - const PALLET: &'static str = "Configuration"; - const FUNCTION: &'static str = "set_max_downward_message_size"; - } - #[derive( - :: subxt :: codec :: CompactAs, - :: subxt :: codec :: Decode, - :: subxt :: codec :: Encode, - Debug, - )] - pub struct SetUmpServiceTotalWeight { - pub new: ::core::primitive::u64, - } - impl ::subxt::Call for SetUmpServiceTotalWeight { - const PALLET: &'static str = "Configuration"; - const FUNCTION: &'static str = "set_ump_service_total_weight"; - } - #[derive( - :: subxt :: codec :: CompactAs, - :: subxt :: codec :: Decode, - :: subxt :: codec :: Encode, - Debug, - )] - pub struct SetMaxUpwardMessageSize { - pub new: ::core::primitive::u32, - } - impl ::subxt::Call for SetMaxUpwardMessageSize { - const PALLET: &'static str = "Configuration"; - const FUNCTION: &'static str = "set_max_upward_message_size"; - } - #[derive( - :: subxt :: codec :: CompactAs, - :: subxt :: codec :: Decode, - :: subxt :: codec :: Encode, - Debug, - )] - pub struct SetMaxUpwardMessageNumPerCandidate { - pub new: ::core::primitive::u32, - } - impl ::subxt::Call for SetMaxUpwardMessageNumPerCandidate { - const PALLET: &'static str = "Configuration"; - const FUNCTION: &'static str = "set_max_upward_message_num_per_candidate"; - } - #[derive( - :: subxt :: codec :: CompactAs, - :: subxt :: codec :: Decode, - :: subxt :: codec :: Encode, - Debug, - )] - pub struct SetHrmpOpenRequestTtl { - pub new: ::core::primitive::u32, - } - impl ::subxt::Call for SetHrmpOpenRequestTtl { - const PALLET: &'static str = "Configuration"; - const FUNCTION: &'static str = "set_hrmp_open_request_ttl"; - } - #[derive( - :: subxt :: codec :: CompactAs, - :: subxt :: codec :: Decode, - :: subxt :: codec :: Encode, - Debug, - )] - pub struct SetHrmpSenderDeposit { - pub new: ::core::primitive::u128, - } - impl ::subxt::Call for SetHrmpSenderDeposit { - const PALLET: &'static str = "Configuration"; - const FUNCTION: &'static str = "set_hrmp_sender_deposit"; - } - #[derive( - :: subxt :: codec :: CompactAs, - :: subxt :: codec :: Decode, - :: subxt :: codec :: Encode, - Debug, - )] - pub struct SetHrmpRecipientDeposit { - pub new: ::core::primitive::u128, - } - impl ::subxt::Call for SetHrmpRecipientDeposit { - const PALLET: &'static str = "Configuration"; - const FUNCTION: &'static str = "set_hrmp_recipient_deposit"; - } - #[derive( - :: subxt :: codec :: CompactAs, - :: subxt :: codec :: Decode, - :: subxt :: codec :: Encode, - Debug, - )] - pub struct SetHrmpChannelMaxCapacity { - pub new: ::core::primitive::u32, - } - impl ::subxt::Call for SetHrmpChannelMaxCapacity { - const PALLET: &'static str = "Configuration"; - const FUNCTION: &'static str = "set_hrmp_channel_max_capacity"; - } - #[derive( - :: subxt :: codec :: CompactAs, - :: subxt :: codec :: Decode, - :: subxt :: codec :: Encode, - Debug, - )] - pub struct SetHrmpChannelMaxTotalSize { - pub new: ::core::primitive::u32, - } - impl ::subxt::Call for SetHrmpChannelMaxTotalSize { - const PALLET: &'static str = "Configuration"; - const FUNCTION: &'static str = "set_hrmp_channel_max_total_size"; - } - #[derive( - :: subxt :: codec :: CompactAs, - :: subxt :: codec :: Decode, - :: subxt :: codec :: Encode, - Debug, - )] - pub struct SetHrmpMaxParachainInboundChannels { - pub new: ::core::primitive::u32, - } - impl ::subxt::Call for SetHrmpMaxParachainInboundChannels { - const PALLET: &'static str = "Configuration"; - const FUNCTION: &'static str = "set_hrmp_max_parachain_inbound_channels"; - } - #[derive( - :: subxt :: codec :: CompactAs, - :: subxt :: codec :: Decode, - :: subxt :: codec :: Encode, - Debug, - )] - pub struct SetHrmpMaxParathreadInboundChannels { - pub new: ::core::primitive::u32, - } - impl ::subxt::Call for SetHrmpMaxParathreadInboundChannels { - const PALLET: &'static str = "Configuration"; - const FUNCTION: &'static str = "set_hrmp_max_parathread_inbound_channels"; - } - #[derive( - :: subxt :: codec :: CompactAs, - :: subxt :: codec :: Decode, - :: subxt :: codec :: Encode, - Debug, - )] - pub struct SetHrmpChannelMaxMessageSize { - pub new: ::core::primitive::u32, - } - impl ::subxt::Call for SetHrmpChannelMaxMessageSize { - const PALLET: &'static str = "Configuration"; - const FUNCTION: &'static str = "set_hrmp_channel_max_message_size"; - } - #[derive( - :: subxt :: codec :: CompactAs, - :: subxt :: codec :: Decode, - :: subxt :: codec :: Encode, - Debug, - )] - pub struct SetHrmpMaxParachainOutboundChannels { - pub new: ::core::primitive::u32, - } - impl ::subxt::Call for SetHrmpMaxParachainOutboundChannels { - const PALLET: &'static str = "Configuration"; - const FUNCTION: &'static str = "set_hrmp_max_parachain_outbound_channels"; - } - #[derive( - :: subxt :: codec :: CompactAs, - :: subxt :: codec :: Decode, - :: subxt :: codec :: Encode, - Debug, - )] - pub struct SetHrmpMaxParathreadOutboundChannels { - pub new: ::core::primitive::u32, - } - impl ::subxt::Call for SetHrmpMaxParathreadOutboundChannels { - const PALLET: &'static str = "Configuration"; - const FUNCTION: &'static str = "set_hrmp_max_parathread_outbound_channels"; - } - #[derive( - :: subxt :: codec :: CompactAs, - :: subxt :: codec :: Decode, - :: subxt :: codec :: Encode, - Debug, - )] - pub struct SetHrmpMaxMessageNumPerCandidate { - pub new: ::core::primitive::u32, - } - impl ::subxt::Call for SetHrmpMaxMessageNumPerCandidate { - const PALLET: &'static str = "Configuration"; - const FUNCTION: &'static str = "set_hrmp_max_message_num_per_candidate"; - } - #[derive( - :: subxt :: codec :: CompactAs, - :: subxt :: codec :: Decode, - :: subxt :: codec :: Encode, - Debug, - )] - pub struct SetUmpMaxIndividualWeight { - pub new: ::core::primitive::u64, - } - impl ::subxt::Call for SetUmpMaxIndividualWeight { - const PALLET: &'static str = "Configuration"; - const FUNCTION: &'static str = "set_ump_max_individual_weight"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct SetPvfCheckingEnabled { - pub new: ::core::primitive::bool, - } - impl ::subxt::Call for SetPvfCheckingEnabled { - const PALLET: &'static str = "Configuration"; - const FUNCTION: &'static str = "set_pvf_checking_enabled"; - } - #[derive( - :: subxt :: codec :: CompactAs, - :: subxt :: codec :: Decode, - :: subxt :: codec :: Encode, - Debug, - )] - pub struct SetPvfVotingTtl { - pub new: ::core::primitive::u32, - } - impl ::subxt::Call for SetPvfVotingTtl { - const PALLET: &'static str = "Configuration"; - const FUNCTION: &'static str = "set_pvf_voting_ttl"; - } - #[derive( - :: subxt :: codec :: CompactAs, - :: subxt :: codec :: Decode, - :: subxt :: codec :: Encode, - Debug, - )] - pub struct SetMinimumValidationUpgradeDelay { - pub new: ::core::primitive::u32, - } - impl ::subxt::Call for SetMinimumValidationUpgradeDelay { - const PALLET: &'static str = "Configuration"; - const FUNCTION: &'static str = "set_minimum_validation_upgrade_delay"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct SetBypassConsistencyCheck { - pub new: ::core::primitive::bool, - } - impl ::subxt::Call for SetBypassConsistencyCheck { - const PALLET: &'static str = "Configuration"; - const FUNCTION: &'static str = "set_bypass_consistency_check"; - } - pub struct TransactionApi<'a, T: ::subxt::Config, X> { - client: &'a ::subxt::Client, - marker: ::core::marker::PhantomData, - } - impl<'a, T, X> TransactionApi<'a, T, X> - where - T: ::subxt::Config, - X: ::subxt::extrinsic::ExtrinsicParams, - { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { - client, - marker: ::core::marker::PhantomData, - } - } - #[doc = "Set the validation upgrade cooldown."] - pub fn set_validation_upgrade_cooldown( - &self, - new: ::core::primitive::u32, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SetValidationUpgradeCooldown, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .call_hash::()? - == [ - 153u8, 60u8, 171u8, 164u8, 241u8, 214u8, 235u8, 141u8, 4u8, 32u8, - 129u8, 253u8, 128u8, 148u8, 185u8, 51u8, 65u8, 34u8, 68u8, 72u8, 202u8, - 159u8, 74u8, 243u8, 35u8, 138u8, 208u8, 26u8, 182u8, 189u8, 41u8, 11u8, - ] - { - let call = SetValidationUpgradeCooldown { new }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Set the validation upgrade delay."] - pub fn set_validation_upgrade_delay( - &self, - new: ::core::primitive::u32, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SetValidationUpgradeDelay, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .call_hash::()? - == [ - 136u8, 220u8, 63u8, 166u8, 202u8, 19u8, 241u8, 32u8, 100u8, 14u8, - 101u8, 244u8, 241u8, 141u8, 144u8, 213u8, 185u8, 88u8, 193u8, 2u8, - 55u8, 154u8, 24u8, 77u8, 66u8, 167u8, 69u8, 245u8, 224u8, 63u8, 196u8, - 200u8, - ] - { - let call = SetValidationUpgradeDelay { new }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Set the acceptance period for an included candidate."] - pub fn set_code_retention_period( - &self, - new: ::core::primitive::u32, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SetCodeRetentionPeriod, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .call_hash::()? - == [ - 94u8, 104u8, 13u8, 127u8, 95u8, 137u8, 66u8, 224u8, 22u8, 53u8, 14u8, - 161u8, 67u8, 85u8, 78u8, 161u8, 92u8, 81u8, 190u8, 213u8, 113u8, 235u8, - 64u8, 19u8, 112u8, 164u8, 71u8, 88u8, 183u8, 234u8, 237u8, 9u8, - ] - { - let call = SetCodeRetentionPeriod { new }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Set the max validation code size for incoming upgrades."] - pub fn set_max_code_size( - &self, - new: ::core::primitive::u32, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SetMaxCodeSize, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 74u8, 39u8, 190u8, 155u8, 121u8, 60u8, 233u8, 95u8, 177u8, 57u8, 116u8, - 107u8, 200u8, 44u8, 2u8, 215u8, 209u8, 50u8, 37u8, 112u8, 136u8, 107u8, - 202u8, 142u8, 114u8, 25u8, 43u8, 134u8, 250u8, 15u8, 81u8, 13u8, - ] - { - let call = SetMaxCodeSize { new }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Set the max POV block size for incoming upgrades."] - pub fn set_max_pov_size( - &self, - new: ::core::primitive::u32, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SetMaxPovSize, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 77u8, 199u8, 18u8, 53u8, 223u8, 107u8, 57u8, 141u8, 8u8, 138u8, 180u8, - 175u8, 73u8, 88u8, 205u8, 185u8, 56u8, 106u8, 43u8, 87u8, 109u8, 9u8, - 103u8, 103u8, 50u8, 158u8, 11u8, 77u8, 162u8, 38u8, 57u8, 27u8, - ] - { - let call = SetMaxPovSize { new }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Set the max head data size for paras."] - pub fn set_max_head_data_size( - &self, - new: ::core::primitive::u32, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SetMaxHeadDataSize, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 30u8, 132u8, 5u8, 207u8, 126u8, 145u8, 187u8, 129u8, 36u8, 235u8, - 179u8, 61u8, 243u8, 87u8, 178u8, 107u8, 8u8, 21u8, 43u8, 39u8, 119u8, - 138u8, 146u8, 146u8, 109u8, 189u8, 56u8, 160u8, 14u8, 78u8, 230u8, - 149u8, - ] - { - let call = SetMaxHeadDataSize { new }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Set the number of parathread execution cores."] - pub fn set_parathread_cores( - &self, - new: ::core::primitive::u32, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SetParathreadCores, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 5u8, 198u8, 156u8, 226u8, 125u8, 16u8, 2u8, 64u8, 28u8, 189u8, 213u8, - 85u8, 6u8, 112u8, 173u8, 183u8, 174u8, 207u8, 129u8, 110u8, 201u8, - 161u8, 163u8, 191u8, 20u8, 14u8, 65u8, 106u8, 234u8, 203u8, 39u8, 75u8, - ] - { - let call = SetParathreadCores { new }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Set the number of retries for a particular parathread."] - pub fn set_parathread_retries( - &self, - new: ::core::primitive::u32, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SetParathreadRetries, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 146u8, 134u8, 204u8, 109u8, 167u8, 35u8, 255u8, 245u8, 98u8, 24u8, - 213u8, 33u8, 144u8, 194u8, 196u8, 196u8, 66u8, 220u8, 168u8, 156u8, - 171u8, 179u8, 154u8, 30u8, 221u8, 45u8, 65u8, 192u8, 194u8, 130u8, - 87u8, 100u8, - ] - { - let call = SetParathreadRetries { new }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Set the parachain validator-group rotation frequency"] - pub fn set_group_rotation_frequency( - &self, - new: ::core::primitive::u32, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SetGroupRotationFrequency, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .call_hash::()? - == [ - 102u8, 192u8, 226u8, 120u8, 69u8, 117u8, 239u8, 156u8, 111u8, 239u8, - 197u8, 191u8, 221u8, 18u8, 140u8, 214u8, 154u8, 212u8, 151u8, 35u8, - 176u8, 2u8, 162u8, 131u8, 115u8, 102u8, 177u8, 106u8, 35u8, 214u8, - 151u8, 227u8, - ] - { - let call = SetGroupRotationFrequency { new }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Set the availability period for parachains."] - pub fn set_chain_availability_period( - &self, - new: ::core::primitive::u32, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SetChainAvailabilityPeriod, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .call_hash::()? - == [ - 3u8, 83u8, 31u8, 241u8, 73u8, 137u8, 18u8, 95u8, 119u8, 143u8, 28u8, - 110u8, 151u8, 229u8, 172u8, 208u8, 50u8, 25u8, 89u8, 222u8, 128u8, - 125u8, 112u8, 25u8, 204u8, 141u8, 175u8, 69u8, 57u8, 161u8, 189u8, - 167u8, - ] - { - let call = SetChainAvailabilityPeriod { new }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Set the availability period for parathreads."] - pub fn set_thread_availability_period( - &self, - new: ::core::primitive::u32, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SetThreadAvailabilityPeriod, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .call_hash::()? - == [ - 242u8, 204u8, 158u8, 5u8, 123u8, 163u8, 6u8, 209u8, 44u8, 73u8, 112u8, - 249u8, 96u8, 160u8, 188u8, 151u8, 107u8, 21u8, 9u8, 100u8, 104u8, - 184u8, 97u8, 77u8, 122u8, 254u8, 88u8, 94u8, 22u8, 15u8, 57u8, 44u8, - ] - { - let call = SetThreadAvailabilityPeriod { new }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Set the scheduling lookahead, in expected number of blocks at peak throughput."] - pub fn set_scheduling_lookahead( - &self, - new: ::core::primitive::u32, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SetSchedulingLookahead, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .call_hash::()? - == [ - 146u8, 149u8, 10u8, 57u8, 122u8, 116u8, 61u8, 181u8, 97u8, 240u8, 87u8, - 37u8, 227u8, 233u8, 123u8, 26u8, 243u8, 58u8, 54u8, 93u8, 111u8, 204u8, - 108u8, 18u8, 167u8, 20u8, 255u8, 173u8, 46u8, 212u8, 246u8, 201u8, - ] - { - let call = SetSchedulingLookahead { new }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Set the maximum number of validators to assign to any core."] - pub fn set_max_validators_per_core( - &self, - new: ::core::option::Option<::core::primitive::u32>, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SetMaxValidatorsPerCore, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .call_hash::()? - == [ - 27u8, 160u8, 153u8, 252u8, 121u8, 42u8, 94u8, 131u8, 199u8, 216u8, - 15u8, 65u8, 94u8, 69u8, 127u8, 130u8, 179u8, 236u8, 49u8, 32u8, 239u8, - 37u8, 58u8, 0u8, 50u8, 5u8, 255u8, 30u8, 203u8, 230u8, 135u8, 202u8, - ] - { - let call = SetMaxValidatorsPerCore { new }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Set the maximum number of validators to use in parachain consensus."] - pub fn set_max_validators( - &self, - new: ::core::option::Option<::core::primitive::u32>, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SetMaxValidators, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 192u8, 156u8, 115u8, 10u8, 225u8, 94u8, 190u8, 180u8, 242u8, 131u8, - 202u8, 13u8, 82u8, 27u8, 8u8, 144u8, 70u8, 92u8, 136u8, 206u8, 205u8, - 3u8, 242u8, 130u8, 77u8, 114u8, 242u8, 111u8, 99u8, 24u8, 238u8, 55u8, - ] - { - let call = SetMaxValidators { new }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Set the dispute period, in number of sessions to keep for disputes."] - pub fn set_dispute_period( - &self, - new: ::core::primitive::u32, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SetDisputePeriod, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 232u8, 96u8, 104u8, 249u8, 183u8, 148u8, 126u8, 80u8, 64u8, 39u8, 2u8, - 208u8, 183u8, 189u8, 139u8, 201u8, 61u8, 63u8, 42u8, 155u8, 215u8, - 32u8, 212u8, 158u8, 90u8, 80u8, 159u8, 23u8, 249u8, 204u8, 218u8, - 217u8, - ] - { - let call = SetDisputePeriod { new }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Set the dispute post conclusion acceptance period."] - pub fn set_dispute_post_conclusion_acceptance_period( - &self, - new: ::core::primitive::u32, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SetDisputePostConclusionAcceptancePeriod, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .call_hash::()? - == [ - 45u8, 140u8, 213u8, 62u8, 212u8, 31u8, 126u8, 94u8, 102u8, 176u8, - 203u8, 240u8, 28u8, 25u8, 116u8, 77u8, 187u8, 147u8, 32u8, 20u8, 25u8, - 124u8, 164u8, 162u8, 246u8, 223u8, 146u8, 28u8, 35u8, 4u8, 174u8, 47u8, - ] - { - let call = SetDisputePostConclusionAcceptancePeriod { new }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Set the maximum number of dispute spam slots."] - pub fn set_dispute_max_spam_slots( - &self, - new: ::core::primitive::u32, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SetDisputeMaxSpamSlots, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .call_hash::()? - == [ - 180u8, 195u8, 6u8, 141u8, 89u8, 252u8, 245u8, 202u8, 36u8, 123u8, - 105u8, 35u8, 161u8, 60u8, 233u8, 213u8, 191u8, 65u8, 68u8, 4u8, 19u8, - 201u8, 226u8, 103u8, 124u8, 181u8, 201u8, 91u8, 84u8, 170u8, 48u8, - 154u8, - ] - { - let call = SetDisputeMaxSpamSlots { new }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Set the dispute conclusion by time out period."] - pub fn set_dispute_conclusion_by_time_out_period( - &self, - new: ::core::primitive::u32, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SetDisputeConclusionByTimeOutPeriod, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .call_hash::()? - == [ - 50u8, 221u8, 129u8, 199u8, 147u8, 98u8, 11u8, 104u8, 133u8, 161u8, - 53u8, 163u8, 100u8, 155u8, 228u8, 167u8, 146u8, 87u8, 186u8, 228u8, - 147u8, 44u8, 142u8, 160u8, 119u8, 146u8, 10u8, 155u8, 5u8, 35u8, 8u8, - 165u8, - ] - { - let call = SetDisputeConclusionByTimeOutPeriod { new }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Set the no show slots, in number of number of consensus slots."] - #[doc = "Must be at least 1."] - pub fn set_no_show_slots( - &self, - new: ::core::primitive::u32, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SetNoShowSlots, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 235u8, 5u8, 35u8, 159u8, 200u8, 58u8, 171u8, 179u8, 78u8, 70u8, 161u8, - 47u8, 237u8, 245u8, 77u8, 81u8, 1u8, 138u8, 145u8, 137u8, 45u8, 126u8, - 255u8, 227u8, 130u8, 217u8, 36u8, 251u8, 72u8, 235u8, 16u8, 231u8, - ] - { - let call = SetNoShowSlots { new }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Set the total number of delay tranches."] - pub fn set_n_delay_tranches( - &self, - new: ::core::primitive::u32, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SetNDelayTranches, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 109u8, 208u8, 13u8, 18u8, 178u8, 117u8, 101u8, 169u8, 162u8, 255u8, - 28u8, 88u8, 199u8, 89u8, 83u8, 59u8, 46u8, 105u8, 186u8, 4u8, 7u8, - 171u8, 78u8, 122u8, 197u8, 110u8, 63u8, 164u8, 140u8, 59u8, 179u8, - 236u8, - ] - { - let call = SetNDelayTranches { new }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Set the zeroth delay tranche width."] - pub fn set_zeroth_delay_tranche_width( - &self, - new: ::core::primitive::u32, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SetZerothDelayTrancheWidth, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .call_hash::()? - == [ - 162u8, 20u8, 162u8, 90u8, 59u8, 194u8, 147u8, 255u8, 198u8, 203u8, - 50u8, 13u8, 134u8, 142u8, 6u8, 156u8, 205u8, 128u8, 222u8, 225u8, - 150u8, 68u8, 198u8, 212u8, 198u8, 238u8, 3u8, 209u8, 224u8, 19u8, - 118u8, 147u8, - ] - { - let call = SetZerothDelayTrancheWidth { new }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Set the number of validators needed to approve a block."] - pub fn set_needed_approvals( - &self, - new: ::core::primitive::u32, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SetNeededApprovals, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 83u8, 164u8, 204u8, 168u8, 93u8, 165u8, 118u8, 111u8, 149u8, 129u8, - 126u8, 250u8, 95u8, 148u8, 193u8, 173u8, 239u8, 1u8, 14u8, 102u8, 77u8, - 150u8, 149u8, 55u8, 82u8, 179u8, 2u8, 117u8, 19u8, 34u8, 223u8, 173u8, - ] - { - let call = SetNeededApprovals { new }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Set the number of samples to do of the `RelayVRFModulo` approval assignment criterion."] - pub fn set_relay_vrf_modulo_samples( - &self, - new: ::core::primitive::u32, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SetRelayVrfModuloSamples, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .call_hash::()? - == [ - 22u8, 11u8, 132u8, 96u8, 58u8, 253u8, 183u8, 31u8, 137u8, 231u8, 187u8, - 145u8, 119u8, 164u8, 55u8, 142u8, 37u8, 151u8, 227u8, 112u8, 113u8, - 18u8, 200u8, 247u8, 238u8, 10u8, 223u8, 74u8, 4u8, 132u8, 115u8, 119u8, - ] - { - let call = SetRelayVrfModuloSamples { new }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Sets the maximum items that can present in a upward dispatch queue at once."] - pub fn set_max_upward_queue_count( - &self, - new: ::core::primitive::u32, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SetMaxUpwardQueueCount, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .call_hash::()? - == [ - 16u8, 31u8, 245u8, 94u8, 243u8, 122u8, 55u8, 155u8, 161u8, 239u8, 5u8, - 59u8, 186u8, 207u8, 136u8, 253u8, 255u8, 176u8, 135u8, 242u8, 199u8, - 96u8, 226u8, 150u8, 15u8, 160u8, 60u8, 101u8, 66u8, 143u8, 93u8, 104u8, - ] - { - let call = SetMaxUpwardQueueCount { new }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Sets the maximum total size of items that can present in a upward dispatch queue at once."] - pub fn set_max_upward_queue_size( - &self, - new: ::core::primitive::u32, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SetMaxUpwardQueueSize, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .call_hash::()? - == [ - 203u8, 170u8, 21u8, 149u8, 170u8, 246u8, 91u8, 54u8, 197u8, 91u8, 41u8, - 114u8, 210u8, 239u8, 73u8, 236u8, 68u8, 194u8, 157u8, 116u8, 229u8, - 1u8, 34u8, 135u8, 144u8, 191u8, 56u8, 77u8, 13u8, 92u8, 221u8, 4u8, - ] - { - let call = SetMaxUpwardQueueSize { new }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Set the critical downward message size."] - pub fn set_max_downward_message_size( - &self, - new: ::core::primitive::u32, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SetMaxDownwardMessageSize, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .call_hash::()? - == [ - 55u8, 181u8, 6u8, 126u8, 31u8, 154u8, 42u8, 194u8, 64u8, 23u8, 34u8, - 255u8, 151u8, 186u8, 52u8, 32u8, 168u8, 233u8, 44u8, 35u8, 152u8, 78u8, - 230u8, 242u8, 169u8, 85u8, 103u8, 133u8, 177u8, 239u8, 175u8, 119u8, - ] - { - let call = SetMaxDownwardMessageSize { new }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Sets the soft limit for the phase of dispatching dispatchable upward messages."] - pub fn set_ump_service_total_weight( - &self, - new: ::core::primitive::u64, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SetUmpServiceTotalWeight, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .call_hash::()? - == [ - 14u8, 179u8, 217u8, 169u8, 84u8, 45u8, 193u8, 3u8, 7u8, 196u8, 56u8, - 209u8, 50u8, 148u8, 32u8, 205u8, 99u8, 202u8, 72u8, 246u8, 151u8, - 230u8, 145u8, 98u8, 188u8, 1u8, 136u8, 241u8, 217u8, 37u8, 6u8, 101u8, - ] - { - let call = SetUmpServiceTotalWeight { new }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Sets the maximum size of an upward message that can be sent by a candidate."] - pub fn set_max_upward_message_size( - &self, - new: ::core::primitive::u32, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SetMaxUpwardMessageSize, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .call_hash::()? - == [ - 134u8, 232u8, 5u8, 70u8, 81u8, 177u8, 81u8, 235u8, 93u8, 145u8, 193u8, - 42u8, 150u8, 61u8, 236u8, 20u8, 38u8, 176u8, 124u8, 170u8, 248u8, - 149u8, 57u8, 88u8, 17u8, 46u8, 202u8, 74u8, 35u8, 82u8, 190u8, 223u8, - ] - { - let call = SetMaxUpwardMessageSize { new }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Sets the maximum number of messages that a candidate can contain."] - pub fn set_max_upward_message_num_per_candidate( - &self, - new: ::core::primitive::u32, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SetMaxUpwardMessageNumPerCandidate, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .call_hash::()? - == [ - 14u8, 79u8, 128u8, 66u8, 119u8, 24u8, 26u8, 116u8, 249u8, 254u8, 86u8, - 228u8, 248u8, 75u8, 111u8, 90u8, 101u8, 96u8, 124u8, 25u8, 245u8, - 115u8, 119u8, 14u8, 213u8, 180u8, 224u8, 224u8, 188u8, 172u8, 152u8, - 16u8, - ] - { - let call = SetMaxUpwardMessageNumPerCandidate { new }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Sets the number of sessions after which an HRMP open channel request expires."] - pub fn set_hrmp_open_request_ttl( - &self, - new: ::core::primitive::u32, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SetHrmpOpenRequestTtl, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .call_hash::()? - == [ - 168u8, 254u8, 189u8, 22u8, 61u8, 90u8, 131u8, 1u8, 103u8, 208u8, 179u8, - 85u8, 80u8, 215u8, 9u8, 3u8, 34u8, 73u8, 130u8, 19u8, 166u8, 77u8, - 131u8, 148u8, 183u8, 86u8, 186u8, 148u8, 109u8, 173u8, 74u8, 94u8, - ] - { - let call = SetHrmpOpenRequestTtl { new }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Sets the amount of funds that the sender should provide for opening an HRMP channel."] - pub fn set_hrmp_sender_deposit( - &self, - new: ::core::primitive::u128, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SetHrmpSenderDeposit, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 250u8, 23u8, 196u8, 206u8, 34u8, 86u8, 28u8, 14u8, 110u8, 189u8, 38u8, - 39u8, 2u8, 16u8, 212u8, 32u8, 65u8, 249u8, 120u8, 163u8, 89u8, 232u8, - 3u8, 49u8, 155u8, 174u8, 96u8, 21u8, 240u8, 185u8, 140u8, 243u8, - ] - { - let call = SetHrmpSenderDeposit { new }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Sets the amount of funds that the recipient should provide for accepting opening an HRMP"] - #[doc = "channel."] - pub fn set_hrmp_recipient_deposit( - &self, - new: ::core::primitive::u128, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SetHrmpRecipientDeposit, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .call_hash::()? - == [ - 104u8, 35u8, 129u8, 31u8, 111u8, 57u8, 190u8, 42u8, 159u8, 220u8, 86u8, - 136u8, 200u8, 4u8, 62u8, 241u8, 141u8, 90u8, 200u8, 132u8, 141u8, - 154u8, 117u8, 206u8, 79u8, 160u8, 124u8, 186u8, 231u8, 250u8, 86u8, - 87u8, - ] - { - let call = SetHrmpRecipientDeposit { new }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Sets the maximum number of messages allowed in an HRMP channel at once."] - pub fn set_hrmp_channel_max_capacity( - &self, - new: ::core::primitive::u32, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SetHrmpChannelMaxCapacity, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .call_hash::()? - == [ - 211u8, 49u8, 82u8, 59u8, 16u8, 97u8, 253u8, 64u8, 185u8, 216u8, 235u8, - 10u8, 84u8, 194u8, 231u8, 115u8, 153u8, 20u8, 31u8, 86u8, 47u8, 226u8, - 245u8, 214u8, 134u8, 194u8, 13u8, 254u8, 230u8, 66u8, 54u8, 240u8, - ] - { - let call = SetHrmpChannelMaxCapacity { new }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Sets the maximum total size of messages in bytes allowed in an HRMP channel at once."] - pub fn set_hrmp_channel_max_total_size( - &self, - new: ::core::primitive::u32, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SetHrmpChannelMaxTotalSize, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .call_hash::()? - == [ - 254u8, 196u8, 171u8, 29u8, 208u8, 179u8, 204u8, 58u8, 64u8, 41u8, 52u8, - 73u8, 153u8, 245u8, 29u8, 132u8, 129u8, 29u8, 94u8, 241u8, 136u8, 20u8, - 12u8, 20u8, 255u8, 244u8, 252u8, 98u8, 136u8, 222u8, 7u8, 19u8, - ] - { - let call = SetHrmpChannelMaxTotalSize { new }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Sets the maximum number of inbound HRMP channels a parachain is allowed to accept."] - pub fn set_hrmp_max_parachain_inbound_channels( - &self, - new: ::core::primitive::u32, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SetHrmpMaxParachainInboundChannels, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .call_hash::()? - == [ - 219u8, 88u8, 3u8, 249u8, 16u8, 182u8, 182u8, 233u8, 152u8, 24u8, 29u8, - 96u8, 227u8, 50u8, 156u8, 98u8, 71u8, 196u8, 158u8, 103u8, 114u8, 55u8, - 65u8, 199u8, 211u8, 225u8, 235u8, 172u8, 218u8, 123u8, 158u8, 57u8, - ] - { - let call = SetHrmpMaxParachainInboundChannels { new }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Sets the maximum number of inbound HRMP channels a parathread is allowed to accept."] - pub fn set_hrmp_max_parathread_inbound_channels( - &self, - new: ::core::primitive::u32, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SetHrmpMaxParathreadInboundChannels, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .call_hash::()? - == [ - 153u8, 169u8, 153u8, 141u8, 45u8, 21u8, 26u8, 33u8, 207u8, 234u8, - 186u8, 154u8, 12u8, 148u8, 2u8, 226u8, 55u8, 125u8, 58u8, 127u8, 154u8, - 176u8, 3u8, 47u8, 164u8, 63u8, 25u8, 42u8, 66u8, 131u8, 143u8, 254u8, - ] - { - let call = SetHrmpMaxParathreadInboundChannels { new }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Sets the maximum size of a message that could ever be put into an HRMP channel."] - pub fn set_hrmp_channel_max_message_size( - &self, - new: ::core::primitive::u32, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SetHrmpChannelMaxMessageSize, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .call_hash::()? - == [ - 237u8, 103u8, 126u8, 197u8, 164u8, 247u8, 67u8, 144u8, 30u8, 192u8, - 161u8, 243u8, 254u8, 26u8, 254u8, 33u8, 59u8, 216u8, 159u8, 105u8, - 166u8, 138u8, 38u8, 124u8, 248u8, 81u8, 11u8, 223u8, 120u8, 75u8, - 176u8, 177u8, - ] - { - let call = SetHrmpChannelMaxMessageSize { new }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Sets the maximum number of outbound HRMP channels a parachain is allowed to open."] - pub fn set_hrmp_max_parachain_outbound_channels( - &self, - new: ::core::primitive::u32, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SetHrmpMaxParachainOutboundChannels, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .call_hash::()? - == [ - 173u8, 184u8, 49u8, 66u8, 158u8, 142u8, 95u8, 225u8, 90u8, 171u8, 4u8, - 20u8, 210u8, 180u8, 54u8, 236u8, 60u8, 5u8, 76u8, 173u8, 226u8, 203u8, - 7u8, 156u8, 54u8, 9u8, 198u8, 171u8, 250u8, 1u8, 120u8, 240u8, - ] - { - let call = SetHrmpMaxParachainOutboundChannels { new }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Sets the maximum number of outbound HRMP channels a parathread is allowed to open."] - pub fn set_hrmp_max_parathread_outbound_channels( - &self, - new: ::core::primitive::u32, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SetHrmpMaxParathreadOutboundChannels, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .call_hash::()? - == [ - 166u8, 73u8, 121u8, 53u8, 27u8, 77u8, 150u8, 115u8, 29u8, 202u8, 34u8, - 4u8, 35u8, 161u8, 113u8, 15u8, 66u8, 60u8, 214u8, 129u8, 157u8, 143u8, - 227u8, 134u8, 213u8, 9u8, 231u8, 224u8, 187u8, 36u8, 16u8, 68u8, - ] - { - let call = SetHrmpMaxParathreadOutboundChannels { new }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Sets the maximum number of outbound HRMP messages can be sent by a candidate."] - pub fn set_hrmp_max_message_num_per_candidate( - &self, - new: ::core::primitive::u32, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SetHrmpMaxMessageNumPerCandidate, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .call_hash::()? - == [ - 235u8, 47u8, 114u8, 29u8, 87u8, 198u8, 62u8, 200u8, 235u8, 184u8, - 204u8, 35u8, 251u8, 210u8, 88u8, 150u8, 22u8, 61u8, 242u8, 196u8, - 240u8, 76u8, 45u8, 54u8, 155u8, 111u8, 244u8, 31u8, 158u8, 48u8, 68u8, - 233u8, - ] - { - let call = SetHrmpMaxMessageNumPerCandidate { new }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Sets the maximum amount of weight any individual upward message may consume."] - pub fn set_ump_max_individual_weight( - &self, - new: ::core::primitive::u64, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SetUmpMaxIndividualWeight, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .call_hash::()? - == [ - 61u8, 174u8, 42u8, 53u8, 120u8, 56u8, 252u8, 117u8, 173u8, 223u8, - 100u8, 141u8, 209u8, 29u8, 173u8, 240u8, 180u8, 113u8, 27u8, 24u8, 4u8, - 157u8, 107u8, 247u8, 235u8, 121u8, 152u8, 6u8, 176u8, 254u8, 18u8, - 70u8, - ] - { - let call = SetUmpMaxIndividualWeight { new }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Enable or disable PVF pre-checking. Consult the field documentation prior executing."] - pub fn set_pvf_checking_enabled( - &self, - new: ::core::primitive::bool, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SetPvfCheckingEnabled, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .call_hash::()? - == [ - 224u8, 199u8, 197u8, 208u8, 178u8, 211u8, 14u8, 102u8, 174u8, 205u8, - 207u8, 181u8, 75u8, 125u8, 209u8, 69u8, 85u8, 1u8, 98u8, 251u8, 17u8, - 42u8, 73u8, 9u8, 252u8, 184u8, 81u8, 202u8, 132u8, 236u8, 97u8, 121u8, - ] - { - let call = SetPvfCheckingEnabled { new }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Set the number of session changes after which a PVF pre-checking voting is rejected."] - pub fn set_pvf_voting_ttl( - &self, - new: ::core::primitive::u32, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SetPvfVotingTtl, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 179u8, 71u8, 42u8, 140u8, 187u8, 43u8, 138u8, 16u8, 104u8, 41u8, 30u8, - 220u8, 131u8, 179u8, 200u8, 184u8, 105u8, 58u8, 131u8, 225u8, 169u8, - 253u8, 46u8, 186u8, 102u8, 52u8, 147u8, 244u8, 22u8, 255u8, 41u8, 6u8, - ] - { - let call = SetPvfVotingTtl { new }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Sets the minimum delay between announcing the upgrade block for a parachain until the"] - #[doc = "upgrade taking place."] - #[doc = ""] - #[doc = "See the field documentation for information and constraints for the new value."] - pub fn set_minimum_validation_upgrade_delay( - &self, - new: ::core::primitive::u32, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SetMinimumValidationUpgradeDelay, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .call_hash::()? - == [ - 225u8, 178u8, 41u8, 194u8, 154u8, 222u8, 247u8, 129u8, 35u8, 102u8, - 248u8, 144u8, 21u8, 74u8, 42u8, 239u8, 135u8, 205u8, 173u8, 190u8, - 112u8, 30u8, 240u8, 106u8, 10u8, 217u8, 208u8, 11u8, 79u8, 47u8, 198u8, - 37u8, - ] - { - let call = SetMinimumValidationUpgradeDelay { new }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Setting this to true will disable consistency checks for the configuration setters."] - #[doc = "Use with caution."] - pub fn set_bypass_consistency_check( - &self, - new: ::core::primitive::bool, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SetBypassConsistencyCheck, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .call_hash::()? - == [ - 5u8, 54u8, 178u8, 218u8, 46u8, 61u8, 99u8, 23u8, 227u8, 202u8, 201u8, - 164u8, 121u8, 226u8, 65u8, 253u8, 29u8, 164u8, 170u8, 130u8, 32u8, - 85u8, 222u8, 10u8, 232u8, 252u8, 73u8, 23u8, 69u8, 30u8, 1u8, 87u8, - ] - { - let call = SetBypassConsistencyCheck { new }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - pub mod storage { - use super::runtime_types; - pub struct ActiveConfig; - impl ::subxt::StorageEntry for ActiveConfig { - const PALLET: &'static str = "Configuration"; - const STORAGE: &'static str = "ActiveConfig"; - type Value = - runtime_types::polkadot_runtime_parachains::configuration::HostConfiguration< - ::core::primitive::u32, - >; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct PendingConfigs; - impl ::subxt::StorageEntry for PendingConfigs { - const PALLET: &'static str = "Configuration"; - const STORAGE: &'static str = "PendingConfigs"; - type Value = ::std::vec::Vec<( - ::core::primitive::u32, - runtime_types::polkadot_runtime_parachains::configuration::HostConfiguration< - ::core::primitive::u32, - >, - )>; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct BypassConsistencyCheck; - impl ::subxt::StorageEntry for BypassConsistencyCheck { - const PALLET: &'static str = "Configuration"; - const STORAGE: &'static str = "BypassConsistencyCheck"; - type Value = ::core::primitive::bool; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct StorageApi<'a, T: ::subxt::Config> { - client: &'a ::subxt::Client, - } - impl<'a, T: ::subxt::Config> StorageApi<'a, T> { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { client } - } - #[doc = " The active configuration for the current session."] - pub async fn active_config( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - runtime_types::polkadot_runtime_parachains::configuration::HostConfiguration< - ::core::primitive::u32, - >, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 6u8, 31u8, 218u8, 51u8, 202u8, 166u8, 183u8, 192u8, 151u8, 184u8, - 103u8, 73u8, 239u8, 78u8, 183u8, 38u8, 192u8, 201u8, 27u8, 128u8, 59u8, - 48u8, 197u8, 23u8, 43u8, 39u8, 158u8, 35u8, 194u8, 23u8, 151u8, 145u8, - ] - { - let entry = ActiveConfig; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Pending configuration changes."] - #[doc = ""] - #[doc = " This is a list of configuration changes, each with a session index at which it should"] - #[doc = " be applied."] - #[doc = ""] - #[doc = " The list is sorted ascending by session index. Also, this list can only contain at most"] - #[doc = " 2 items: for the next session and for the `scheduled_session`."] pub async fn pending_configs (& self , block_hash : :: core :: option :: Option < T :: Hash > ,) -> :: core :: result :: Result < :: std :: vec :: Vec < (:: core :: primitive :: u32 , runtime_types :: polkadot_runtime_parachains :: configuration :: HostConfiguration < :: core :: primitive :: u32 > ,) > , :: subxt :: BasicError >{ - if self.client.metadata().storage_hash::()? - == [ - 198u8, 168u8, 227u8, 228u8, 110u8, 98u8, 34u8, 21u8, 159u8, 114u8, - 202u8, 135u8, 39u8, 190u8, 40u8, 214u8, 170u8, 126u8, 203u8, 10u8, - 44u8, 114u8, 254u8, 208u8, 133u8, 129u8, 8u8, 112u8, 168u8, 135u8, - 196u8, 43u8, - ] - { - let entry = PendingConfigs; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " If this is set, then the configuration setters will bypass the consistency checks. This"] - #[doc = " is meant to be used only as the last resort."] - pub async fn bypass_consistency_check( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result<::core::primitive::bool, ::subxt::BasicError> - { - if self - .client - .metadata() - .storage_hash::()? - == [ - 42u8, 191u8, 122u8, 163u8, 112u8, 2u8, 148u8, 59u8, 79u8, 219u8, 184u8, - 172u8, 246u8, 136u8, 185u8, 251u8, 189u8, 226u8, 83u8, 129u8, 162u8, - 109u8, 148u8, 75u8, 120u8, 216u8, 44u8, 28u8, 221u8, 78u8, 177u8, 94u8, - ] - { - let entry = BypassConsistencyCheck; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - } - pub mod paras_shared { - use super::root_mod; - use super::runtime_types; - pub mod calls { - use super::root_mod; - use super::runtime_types; - type DispatchError = runtime_types::sp_runtime::DispatchError; - pub struct TransactionApi<'a, T: ::subxt::Config, X> { - client: &'a ::subxt::Client, - marker: ::core::marker::PhantomData, - } - impl<'a, T, X> TransactionApi<'a, T, X> - where - T: ::subxt::Config, - X: ::subxt::extrinsic::ExtrinsicParams, - { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { - client, - marker: ::core::marker::PhantomData, - } - } - } - } - pub mod storage { - use super::runtime_types; - pub struct CurrentSessionIndex; - impl ::subxt::StorageEntry for CurrentSessionIndex { - const PALLET: &'static str = "ParasShared"; - const STORAGE: &'static str = "CurrentSessionIndex"; - type Value = ::core::primitive::u32; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct ActiveValidatorIndices; - impl ::subxt::StorageEntry for ActiveValidatorIndices { - const PALLET: &'static str = "ParasShared"; - const STORAGE: &'static str = "ActiveValidatorIndices"; - type Value = - ::std::vec::Vec; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct ActiveValidatorKeys; - impl ::subxt::StorageEntry for ActiveValidatorKeys { - const PALLET: &'static str = "ParasShared"; - const STORAGE: &'static str = "ActiveValidatorKeys"; - type Value = - ::std::vec::Vec; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct StorageApi<'a, T: ::subxt::Config> { - client: &'a ::subxt::Client, - } - impl<'a, T: ::subxt::Config> StorageApi<'a, T> { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { client } - } - #[doc = " The current session index."] - pub async fn current_session_index( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> - { - if self - .client - .metadata() - .storage_hash::()? - == [ - 83u8, 15u8, 20u8, 55u8, 103u8, 65u8, 76u8, 202u8, 69u8, 14u8, 221u8, - 93u8, 38u8, 163u8, 167u8, 83u8, 18u8, 245u8, 33u8, 175u8, 7u8, 97u8, - 67u8, 186u8, 96u8, 57u8, 147u8, 120u8, 107u8, 91u8, 147u8, 64u8, - ] - { - let entry = CurrentSessionIndex; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " All the validators actively participating in parachain consensus."] - #[doc = " Indices are into the broader validator set."] - pub async fn active_validator_indices( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::std::vec::Vec, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .storage_hash::()? - == [ - 128u8, 98u8, 186u8, 22u8, 178u8, 51u8, 151u8, 235u8, 201u8, 2u8, 245u8, - 177u8, 4u8, 125u8, 1u8, 245u8, 56u8, 102u8, 166u8, 129u8, 211u8, 189u8, - 137u8, 149u8, 234u8, 252u8, 97u8, 139u8, 151u8, 16u8, 129u8, 24u8, - ] - { - let entry = ActiveValidatorIndices; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The parachain attestation keys of the validators actively participating in parachain consensus."] - #[doc = " This should be the same length as `ActiveValidatorIndices`."] - pub async fn active_validator_keys( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::std::vec::Vec, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .storage_hash::()? - == [ - 231u8, 46u8, 32u8, 152u8, 56u8, 71u8, 153u8, 56u8, 241u8, 29u8, 64u8, - 70u8, 19u8, 31u8, 220u8, 139u8, 58u8, 212u8, 221u8, 140u8, 87u8, 140u8, - 218u8, 50u8, 204u8, 221u8, 214u8, 168u8, 135u8, 118u8, 94u8, 21u8, - ] - { - let entry = ActiveValidatorKeys; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - } - pub mod para_inclusion { - use super::root_mod; - use super::runtime_types; - pub mod calls { - use super::root_mod; - use super::runtime_types; - type DispatchError = runtime_types::sp_runtime::DispatchError; - pub struct TransactionApi<'a, T: ::subxt::Config, X> { - client: &'a ::subxt::Client, - marker: ::core::marker::PhantomData, - } - impl<'a, T, X> TransactionApi<'a, T, X> - where - T: ::subxt::Config, - X: ::subxt::extrinsic::ExtrinsicParams, - { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { - client, - marker: ::core::marker::PhantomData, - } - } - } - } - pub type Event = runtime_types::polkadot_runtime_parachains::inclusion::pallet::Event; - pub mod events { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "A candidate was backed. `[candidate, head_data]`"] - pub struct CandidateBacked( - pub runtime_types::polkadot_primitives::v2::CandidateReceipt<::subxt::sp_core::H256>, - pub runtime_types::polkadot_parachain::primitives::HeadData, - pub runtime_types::polkadot_primitives::v2::CoreIndex, - pub runtime_types::polkadot_primitives::v2::GroupIndex, - ); - impl ::subxt::Event for CandidateBacked { - const PALLET: &'static str = "ParaInclusion"; - const EVENT: &'static str = "CandidateBacked"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "A candidate was included. `[candidate, head_data]`"] - pub struct CandidateIncluded( - pub runtime_types::polkadot_primitives::v2::CandidateReceipt<::subxt::sp_core::H256>, - pub runtime_types::polkadot_parachain::primitives::HeadData, - pub runtime_types::polkadot_primitives::v2::CoreIndex, - pub runtime_types::polkadot_primitives::v2::GroupIndex, - ); - impl ::subxt::Event for CandidateIncluded { - const PALLET: &'static str = "ParaInclusion"; - const EVENT: &'static str = "CandidateIncluded"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "A candidate timed out. `[candidate, head_data]`"] - pub struct CandidateTimedOut( - pub runtime_types::polkadot_primitives::v2::CandidateReceipt<::subxt::sp_core::H256>, - pub runtime_types::polkadot_parachain::primitives::HeadData, - pub runtime_types::polkadot_primitives::v2::CoreIndex, - ); - impl ::subxt::Event for CandidateTimedOut { - const PALLET: &'static str = "ParaInclusion"; - const EVENT: &'static str = "CandidateTimedOut"; - } - } - pub mod storage { - use super::runtime_types; - pub struct AvailabilityBitfields<'a>( - pub &'a runtime_types::polkadot_primitives::v2::ValidatorIndex, - ); - impl ::subxt::StorageEntry for AvailabilityBitfields<'_> { - const PALLET: &'static str = "ParaInclusion"; - const STORAGE: &'static str = "AvailabilityBitfields"; - type Value = runtime_types :: polkadot_runtime_parachains :: inclusion :: AvailabilityBitfieldRecord < :: core :: primitive :: u32 > ; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Twox64Concat, - )]) - } - } - pub struct PendingAvailability<'a>( - pub &'a runtime_types::polkadot_parachain::primitives::Id, - ); - impl ::subxt::StorageEntry for PendingAvailability<'_> { - const PALLET: &'static str = "ParaInclusion"; - const STORAGE: &'static str = "PendingAvailability"; - type Value = runtime_types :: polkadot_runtime_parachains :: inclusion :: CandidatePendingAvailability < :: subxt :: sp_core :: H256 , :: core :: primitive :: u32 > ; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Twox64Concat, - )]) - } - } - pub struct PendingAvailabilityCommitments<'a>( - pub &'a runtime_types::polkadot_parachain::primitives::Id, - ); - impl ::subxt::StorageEntry for PendingAvailabilityCommitments<'_> { - const PALLET: &'static str = "ParaInclusion"; - const STORAGE: &'static str = "PendingAvailabilityCommitments"; - type Value = runtime_types::polkadot_primitives::v2::CandidateCommitments< - ::core::primitive::u32, - >; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Twox64Concat, - )]) - } - } - pub struct StorageApi<'a, T: ::subxt::Config> { - client: &'a ::subxt::Client, - } - impl<'a, T: ::subxt::Config> StorageApi<'a, T> { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { client } - } - #[doc = " The latest bitfield for each validator, referred to by their index in the validator set."] pub async fn availability_bitfields (& self , _0 : & runtime_types :: polkadot_primitives :: v2 :: ValidatorIndex , block_hash : :: core :: option :: Option < T :: Hash > ,) -> :: core :: result :: Result < :: core :: option :: Option < runtime_types :: polkadot_runtime_parachains :: inclusion :: AvailabilityBitfieldRecord < :: core :: primitive :: u32 > > , :: subxt :: BasicError >{ - if self - .client - .metadata() - .storage_hash::()? - == [ - 223u8, 74u8, 17u8, 152u8, 136u8, 20u8, 241u8, 47u8, 169u8, 34u8, 128u8, - 78u8, 121u8, 47u8, 165u8, 35u8, 222u8, 15u8, 236u8, 90u8, 215u8, 160u8, - 10u8, 18u8, 152u8, 69u8, 38u8, 97u8, 122u8, 247u8, 241u8, 255u8, - ] - { - let entry = AvailabilityBitfields(_0); - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The latest bitfield for each validator, referred to by their index in the validator set."] - pub async fn availability_bitfields_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::subxt::KeyIter<'a, T, AvailabilityBitfields<'a>>, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .storage_hash::()? - == [ - 223u8, 74u8, 17u8, 152u8, 136u8, 20u8, 241u8, 47u8, 169u8, 34u8, 128u8, - 78u8, 121u8, 47u8, 165u8, 35u8, 222u8, 15u8, 236u8, 90u8, 215u8, 160u8, - 10u8, 18u8, 152u8, 69u8, 38u8, 97u8, 122u8, 247u8, 241u8, 255u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Candidates pending availability by `ParaId`."] pub async fn pending_availability (& self , _0 : & runtime_types :: polkadot_parachain :: primitives :: Id , block_hash : :: core :: option :: Option < T :: Hash > ,) -> :: core :: result :: Result < :: core :: option :: Option < runtime_types :: polkadot_runtime_parachains :: inclusion :: CandidatePendingAvailability < :: subxt :: sp_core :: H256 , :: core :: primitive :: u32 > > , :: subxt :: BasicError >{ - if self - .client - .metadata() - .storage_hash::()? - == [ - 87u8, 140u8, 64u8, 234u8, 110u8, 229u8, 7u8, 83u8, 100u8, 45u8, 125u8, - 76u8, 72u8, 179u8, 132u8, 190u8, 38u8, 22u8, 112u8, 85u8, 241u8, 82u8, - 208u8, 133u8, 187u8, 130u8, 6u8, 121u8, 100u8, 43u8, 106u8, 121u8, - ] - { - let entry = PendingAvailability(_0); - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Candidates pending availability by `ParaId`."] - pub async fn pending_availability_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::subxt::KeyIter<'a, T, PendingAvailability<'a>>, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .storage_hash::()? - == [ - 87u8, 140u8, 64u8, 234u8, 110u8, 229u8, 7u8, 83u8, 100u8, 45u8, 125u8, - 76u8, 72u8, 179u8, 132u8, 190u8, 38u8, 22u8, 112u8, 85u8, 241u8, 82u8, - 208u8, 133u8, 187u8, 130u8, 6u8, 121u8, 100u8, 43u8, 106u8, 121u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The commitments of candidates pending availability, by `ParaId`."] - pub async fn pending_availability_commitments( - &self, - _0: &runtime_types::polkadot_parachain::primitives::Id, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option< - runtime_types::polkadot_primitives::v2::CandidateCommitments< - ::core::primitive::u32, - >, - >, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .storage_hash::()? - == [ - 164u8, 245u8, 130u8, 208u8, 141u8, 88u8, 99u8, 247u8, 90u8, 215u8, - 40u8, 99u8, 239u8, 7u8, 231u8, 13u8, 233u8, 204u8, 223u8, 137u8, 158u8, - 250u8, 24u8, 107u8, 152u8, 240u8, 195u8, 28u8, 170u8, 219u8, 174u8, - 213u8, - ] - { - let entry = PendingAvailabilityCommitments(_0); - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The commitments of candidates pending availability, by `ParaId`."] - pub async fn pending_availability_commitments_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::subxt::KeyIter<'a, T, PendingAvailabilityCommitments<'a>>, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .storage_hash::()? - == [ - 164u8, 245u8, 130u8, 208u8, 141u8, 88u8, 99u8, 247u8, 90u8, 215u8, - 40u8, 99u8, 239u8, 7u8, 231u8, 13u8, 233u8, 204u8, 223u8, 137u8, 158u8, - 250u8, 24u8, 107u8, 152u8, 240u8, 195u8, 28u8, 170u8, 219u8, 174u8, - 213u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - } - pub mod para_inherent { - use super::root_mod; - use super::runtime_types; - pub mod calls { - use super::root_mod; - use super::runtime_types; - type DispatchError = runtime_types::sp_runtime::DispatchError; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct Enter { - pub data: runtime_types::polkadot_primitives::v2::InherentData< - runtime_types::sp_runtime::generic::header::Header< - ::core::primitive::u32, - runtime_types::sp_runtime::traits::BlakeTwo256, - >, - >, - } - impl ::subxt::Call for Enter { - const PALLET: &'static str = "ParaInherent"; - const FUNCTION: &'static str = "enter"; - } - pub struct TransactionApi<'a, T: ::subxt::Config, X> { - client: &'a ::subxt::Client, - marker: ::core::marker::PhantomData, - } - impl<'a, T, X> TransactionApi<'a, T, X> - where - T: ::subxt::Config, - X: ::subxt::extrinsic::ExtrinsicParams, - { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { - client, - marker: ::core::marker::PhantomData, - } - } - #[doc = "Enter the paras inherent. This will process bitfields and backed candidates."] - pub fn enter( - &self, - data: runtime_types::polkadot_primitives::v2::InherentData< - runtime_types::sp_runtime::generic::header::Header< - ::core::primitive::u32, - runtime_types::sp_runtime::traits::BlakeTwo256, - >, - >, - ) -> Result< - ::subxt::SubmittableExtrinsic<'a, T, X, Enter, DispatchError, root_mod::Event>, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 15u8, 12u8, 250u8, 143u8, 7u8, 146u8, 79u8, 126u8, 205u8, 10u8, 135u8, - 108u8, 193u8, 195u8, 225u8, 117u8, 58u8, 45u8, 16u8, 229u8, 6u8, 122u8, - 92u8, 75u8, 174u8, 150u8, 109u8, 176u8, 54u8, 199u8, 236u8, 38u8, - ] - { - let call = Enter { data }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - pub mod storage { - use super::runtime_types; - pub struct Included; - impl ::subxt::StorageEntry for Included { - const PALLET: &'static str = "ParaInherent"; - const STORAGE: &'static str = "Included"; - type Value = (); - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct OnChainVotes; - impl ::subxt::StorageEntry for OnChainVotes { - const PALLET: &'static str = "ParaInherent"; - const STORAGE: &'static str = "OnChainVotes"; - type Value = runtime_types::polkadot_primitives::v2::ScrapedOnChainVotes< - ::subxt::sp_core::H256, - >; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct StorageApi<'a, T: ::subxt::Config> { - client: &'a ::subxt::Client, - } - impl<'a, T: ::subxt::Config> StorageApi<'a, T> { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { client } - } - #[doc = " Whether the paras inherent was included within this block."] - #[doc = ""] - #[doc = " The `Option<()>` is effectively a `bool`, but it never hits storage in the `None` variant"] - #[doc = " due to the guarantees of FRAME's storage APIs."] - #[doc = ""] - #[doc = " If this is `None` at the end of the block, we panic and render the block invalid."] - pub async fn included( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result<::core::option::Option<()>, ::subxt::BasicError> - { - if self.client.metadata().storage_hash::()? - == [ - 208u8, 213u8, 76u8, 64u8, 90u8, 141u8, 144u8, 52u8, 220u8, 35u8, 143u8, - 171u8, 45u8, 59u8, 9u8, 218u8, 29u8, 186u8, 139u8, 203u8, 205u8, 12u8, - 10u8, 2u8, 27u8, 167u8, 182u8, 244u8, 167u8, 220u8, 44u8, 16u8, - ] - { - let entry = Included; - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Scraped on chain data for extracting resolved disputes as well as backing votes."] - pub async fn on_chain_votes( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option< - runtime_types::polkadot_primitives::v2::ScrapedOnChainVotes< - ::subxt::sp_core::H256, - >, - >, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 245u8, 37u8, 52u8, 78u8, 128u8, 131u8, 93u8, 38u8, 210u8, 78u8, 218u8, - 171u8, 131u8, 175u8, 215u8, 91u8, 122u8, 134u8, 127u8, 79u8, 7u8, - 165u8, 122u8, 184u8, 122u8, 168u8, 218u8, 207u8, 15u8, 23u8, 162u8, - 4u8, - ] - { - let entry = OnChainVotes; - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - } - pub mod para_scheduler { - use super::root_mod; - use super::runtime_types; - pub mod storage { - use super::runtime_types; - pub struct ValidatorGroups; - impl ::subxt::StorageEntry for ValidatorGroups { - const PALLET: &'static str = "ParaScheduler"; - const STORAGE: &'static str = "ValidatorGroups"; - type Value = ::std::vec::Vec< - ::std::vec::Vec, - >; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct ParathreadQueue; - impl ::subxt::StorageEntry for ParathreadQueue { - const PALLET: &'static str = "ParaScheduler"; - const STORAGE: &'static str = "ParathreadQueue"; - type Value = - runtime_types::polkadot_runtime_parachains::scheduler::ParathreadClaimQueue; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct AvailabilityCores; - impl ::subxt::StorageEntry for AvailabilityCores { - const PALLET: &'static str = "ParaScheduler"; - const STORAGE: &'static str = "AvailabilityCores"; - type Value = ::std::vec::Vec< - ::core::option::Option, - >; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct ParathreadClaimIndex; - impl ::subxt::StorageEntry for ParathreadClaimIndex { - const PALLET: &'static str = "ParaScheduler"; - const STORAGE: &'static str = "ParathreadClaimIndex"; - type Value = ::std::vec::Vec; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct SessionStartBlock; - impl ::subxt::StorageEntry for SessionStartBlock { - const PALLET: &'static str = "ParaScheduler"; - const STORAGE: &'static str = "SessionStartBlock"; - type Value = ::core::primitive::u32; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct Scheduled; - impl ::subxt::StorageEntry for Scheduled { - const PALLET: &'static str = "ParaScheduler"; - const STORAGE: &'static str = "Scheduled"; - type Value = ::std::vec::Vec< - runtime_types::polkadot_runtime_parachains::scheduler::CoreAssignment, - >; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct StorageApi<'a, T: ::subxt::Config> { - client: &'a ::subxt::Client, - } - impl<'a, T: ::subxt::Config> StorageApi<'a, T> { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { client } - } - #[doc = " All the validator groups. One for each core. Indices are into `ActiveValidators` - not the"] - #[doc = " broader set of Polkadot validators, but instead just the subset used for parachains during"] - #[doc = " this session."] - #[doc = ""] - #[doc = " Bound: The number of cores is the sum of the numbers of parachains and parathread multiplexers."] - #[doc = " Reasonably, 100-1000. The dominant factor is the number of validators: safe upper bound at 10k."] - pub async fn validator_groups( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::std::vec::Vec< - ::std::vec::Vec, - >, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 84u8, 195u8, 53u8, 111u8, 186u8, 61u8, 3u8, 36u8, 10u8, 9u8, 66u8, - 119u8, 116u8, 213u8, 86u8, 153u8, 18u8, 149u8, 83u8, 92u8, 232u8, - 212u8, 175u8, 52u8, 74u8, 135u8, 137u8, 34u8, 123u8, 232u8, 131u8, - 22u8, - ] - { - let entry = ValidatorGroups; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " A queue of upcoming claims and which core they should be mapped onto."] - #[doc = ""] - #[doc = " The number of queued claims is bounded at the `scheduling_lookahead`"] - #[doc = " multiplied by the number of parathread multiplexer cores. Reasonably, 10 * 50 = 500."] - pub async fn parathread_queue( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - runtime_types::polkadot_runtime_parachains::scheduler::ParathreadClaimQueue, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 72u8, 99u8, 70u8, 216u8, 91u8, 118u8, 160u8, 100u8, 20u8, 192u8, 78u8, - 214u8, 165u8, 200u8, 223u8, 166u8, 50u8, 214u8, 41u8, 241u8, 84u8, - 68u8, 21u8, 86u8, 130u8, 13u8, 124u8, 128u8, 104u8, 194u8, 23u8, 223u8, - ] - { - let entry = ParathreadQueue; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " One entry for each availability core. Entries are `None` if the core is not currently occupied. Can be"] - #[doc = " temporarily `Some` if scheduled but not occupied."] - #[doc = " The i'th parachain belongs to the i'th core, with the remaining cores all being"] - #[doc = " parathread-multiplexers."] - #[doc = ""] - #[doc = " Bounded by the maximum of either of these two values:"] - #[doc = " * The number of parachains and parathread multiplexers"] - #[doc = " * The number of validators divided by `configuration.max_validators_per_core`."] - pub async fn availability_cores( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::std::vec::Vec< - ::core::option::Option< - runtime_types::polkadot_primitives::v2::CoreOccupied, - >, - >, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 190u8, 79u8, 55u8, 188u8, 40u8, 219u8, 187u8, 11u8, 142u8, 67u8, 86u8, - 242u8, 107u8, 26u8, 63u8, 138u8, 169u8, 24u8, 36u8, 112u8, 61u8, 206u8, - 32u8, 168u8, 167u8, 236u8, 133u8, 90u8, 16u8, 130u8, 121u8, 113u8, - ] - { - let entry = AvailabilityCores; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " An index used to ensure that only one claim on a parathread exists in the queue or is"] - #[doc = " currently being handled by an occupied core."] - #[doc = ""] - #[doc = " Bounded by the number of parathread cores and scheduling lookahead. Reasonably, 10 * 50 = 500."] - pub async fn parathread_claim_index( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::std::vec::Vec, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .storage_hash::()? - == [ - 187u8, 105u8, 221u8, 0u8, 103u8, 9u8, 52u8, 127u8, 47u8, 155u8, 147u8, - 84u8, 249u8, 213u8, 140u8, 75u8, 99u8, 238u8, 220u8, 242u8, 220u8, - 99u8, 204u8, 178u8, 153u8, 170u8, 72u8, 34u8, 83u8, 238u8, 211u8, - 150u8, - ] - { - let entry = ParathreadClaimIndex; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The block number where the session start occurred. Used to track how many group rotations have occurred."] - #[doc = ""] - #[doc = " Note that in the context of parachains modules the session change is signaled during"] - #[doc = " the block and enacted at the end of the block (at the finalization stage, to be exact)."] - #[doc = " Thus for all intents and purposes the effect of the session change is observed at the"] - #[doc = " block following the session change, block number of which we save in this storage value."] - pub async fn session_start_block( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> - { - if self.client.metadata().storage_hash::()? - == [ - 122u8, 37u8, 150u8, 1u8, 185u8, 201u8, 168u8, 67u8, 55u8, 17u8, 101u8, - 18u8, 133u8, 212u8, 6u8, 73u8, 191u8, 204u8, 229u8, 22u8, 185u8, 120u8, - 24u8, 245u8, 121u8, 215u8, 124u8, 210u8, 49u8, 28u8, 26u8, 80u8, - ] - { - let entry = SessionStartBlock; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Currently scheduled cores - free but up to be occupied."] - #[doc = ""] - #[doc = " Bounded by the number of cores: one for each parachain and parathread multiplexer."] - #[doc = ""] - #[doc = " The value contained here will not be valid after the end of a block. Runtime APIs should be used to determine scheduled cores/"] - #[doc = " for the upcoming block."] - pub async fn scheduled( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::std::vec::Vec< - runtime_types::polkadot_runtime_parachains::scheduler::CoreAssignment, - >, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 167u8, 69u8, 107u8, 76u8, 233u8, 106u8, 232u8, 95u8, 167u8, 73u8, 93u8, - 110u8, 43u8, 94u8, 27u8, 207u8, 152u8, 184u8, 43u8, 245u8, 200u8, - 141u8, 65u8, 32u8, 201u8, 80u8, 73u8, 155u8, 26u8, 82u8, 121u8, 194u8, - ] - { - let entry = Scheduled; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - } - pub mod paras { - use super::root_mod; - use super::runtime_types; - pub mod calls { - use super::root_mod; - use super::runtime_types; - type DispatchError = runtime_types::sp_runtime::DispatchError; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct ForceSetCurrentCode { - pub para: runtime_types::polkadot_parachain::primitives::Id, - pub new_code: runtime_types::polkadot_parachain::primitives::ValidationCode, - } - impl ::subxt::Call for ForceSetCurrentCode { - const PALLET: &'static str = "Paras"; - const FUNCTION: &'static str = "force_set_current_code"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct ForceSetCurrentHead { - pub para: runtime_types::polkadot_parachain::primitives::Id, - pub new_head: runtime_types::polkadot_parachain::primitives::HeadData, - } - impl ::subxt::Call for ForceSetCurrentHead { - const PALLET: &'static str = "Paras"; - const FUNCTION: &'static str = "force_set_current_head"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct ForceScheduleCodeUpgrade { - pub para: runtime_types::polkadot_parachain::primitives::Id, - pub new_code: runtime_types::polkadot_parachain::primitives::ValidationCode, - pub relay_parent_number: ::core::primitive::u32, - } - impl ::subxt::Call for ForceScheduleCodeUpgrade { - const PALLET: &'static str = "Paras"; - const FUNCTION: &'static str = "force_schedule_code_upgrade"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct ForceNoteNewHead { - pub para: runtime_types::polkadot_parachain::primitives::Id, - pub new_head: runtime_types::polkadot_parachain::primitives::HeadData, - } - impl ::subxt::Call for ForceNoteNewHead { - const PALLET: &'static str = "Paras"; - const FUNCTION: &'static str = "force_note_new_head"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct ForceQueueAction { - pub para: runtime_types::polkadot_parachain::primitives::Id, - } - impl ::subxt::Call for ForceQueueAction { - const PALLET: &'static str = "Paras"; - const FUNCTION: &'static str = "force_queue_action"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct AddTrustedValidationCode { - pub validation_code: runtime_types::polkadot_parachain::primitives::ValidationCode, - } - impl ::subxt::Call for AddTrustedValidationCode { - const PALLET: &'static str = "Paras"; - const FUNCTION: &'static str = "add_trusted_validation_code"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct PokeUnusedValidationCode { - pub validation_code_hash: - runtime_types::polkadot_parachain::primitives::ValidationCodeHash, - } - impl ::subxt::Call for PokeUnusedValidationCode { - const PALLET: &'static str = "Paras"; - const FUNCTION: &'static str = "poke_unused_validation_code"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct IncludePvfCheckStatement { - pub stmt: runtime_types::polkadot_primitives::v2::PvfCheckStatement, - pub signature: runtime_types::polkadot_primitives::v2::validator_app::Signature, - } - impl ::subxt::Call for IncludePvfCheckStatement { - const PALLET: &'static str = "Paras"; - const FUNCTION: &'static str = "include_pvf_check_statement"; - } - pub struct TransactionApi<'a, T: ::subxt::Config, X> { - client: &'a ::subxt::Client, - marker: ::core::marker::PhantomData, - } - impl<'a, T, X> TransactionApi<'a, T, X> - where - T: ::subxt::Config, - X: ::subxt::extrinsic::ExtrinsicParams, - { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { - client, - marker: ::core::marker::PhantomData, - } - } - #[doc = "Set the storage for the parachain validation code immediately."] - pub fn force_set_current_code( - &self, - para: runtime_types::polkadot_parachain::primitives::Id, - new_code: runtime_types::polkadot_parachain::primitives::ValidationCode, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - ForceSetCurrentCode, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 100u8, 36u8, 105u8, 246u8, 77u8, 252u8, 162u8, 139u8, 60u8, 37u8, 12u8, - 148u8, 206u8, 160u8, 134u8, 105u8, 50u8, 52u8, 156u8, 252u8, 217u8, - 174u8, 211u8, 208u8, 88u8, 81u8, 236u8, 66u8, 27u8, 59u8, 126u8, 5u8, - ] - { - let call = ForceSetCurrentCode { para, new_code }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Set the storage for the current parachain head data immediately."] - pub fn force_set_current_head( - &self, - para: runtime_types::polkadot_parachain::primitives::Id, - new_head: runtime_types::polkadot_parachain::primitives::HeadData, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - ForceSetCurrentHead, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 119u8, 46u8, 120u8, 202u8, 138u8, 190u8, 179u8, 78u8, 155u8, 167u8, - 220u8, 233u8, 170u8, 248u8, 202u8, 92u8, 73u8, 246u8, 224u8, 56u8, - 208u8, 124u8, 215u8, 19u8, 235u8, 246u8, 89u8, 189u8, 19u8, 205u8, - 22u8, 70u8, - ] - { - let call = ForceSetCurrentHead { para, new_head }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Schedule an upgrade as if it was scheduled in the given relay parent block."] - pub fn force_schedule_code_upgrade( - &self, - para: runtime_types::polkadot_parachain::primitives::Id, - new_code: runtime_types::polkadot_parachain::primitives::ValidationCode, - relay_parent_number: ::core::primitive::u32, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - ForceScheduleCodeUpgrade, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .call_hash::()? - == [ - 254u8, 60u8, 105u8, 37u8, 116u8, 190u8, 30u8, 255u8, 210u8, 24u8, - 120u8, 99u8, 174u8, 215u8, 233u8, 83u8, 57u8, 200u8, 24u8, 49u8, 220u8, - 12u8, 103u8, 30u8, 165u8, 10u8, 125u8, 255u8, 88u8, 134u8, 199u8, 3u8, - ] - { - let call = ForceScheduleCodeUpgrade { - para, - new_code, - relay_parent_number, - }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Note a new block head for para within the context of the current block."] - pub fn force_note_new_head( - &self, - para: runtime_types::polkadot_parachain::primitives::Id, - new_head: runtime_types::polkadot_parachain::primitives::HeadData, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - ForceNoteNewHead, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 203u8, 31u8, 68u8, 125u8, 105u8, 218u8, 177u8, 205u8, 248u8, 131u8, - 25u8, 170u8, 140u8, 56u8, 183u8, 106u8, 2u8, 118u8, 79u8, 22u8, 228u8, - 91u8, 33u8, 66u8, 245u8, 144u8, 147u8, 142u8, 14u8, 171u8, 125u8, - 233u8, - ] - { - let call = ForceNoteNewHead { para, new_head }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Put a parachain directly into the next session's action queue."] - #[doc = "We can't queue it any sooner than this without going into the"] - #[doc = "initializer..."] - pub fn force_queue_action( - &self, - para: runtime_types::polkadot_parachain::primitives::Id, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - ForceQueueAction, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 141u8, 235u8, 245u8, 93u8, 24u8, 155u8, 106u8, 136u8, 190u8, 236u8, - 216u8, 131u8, 245u8, 5u8, 186u8, 131u8, 159u8, 240u8, 95u8, 139u8, - 231u8, 12u8, 255u8, 74u8, 194u8, 13u8, 112u8, 78u8, 110u8, 95u8, 26u8, - 133u8, - ] - { - let call = ForceQueueAction { para }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Adds the validation code to the storage."] - #[doc = ""] - #[doc = "The code will not be added if it is already present. Additionally, if PVF pre-checking"] - #[doc = "is running for that code, it will be instantly accepted."] - #[doc = ""] - #[doc = "Otherwise, the code will be added into the storage. Note that the code will be added"] - #[doc = "into storage with reference count 0. This is to account the fact that there are no users"] - #[doc = "for this code yet. The caller will have to make sure that this code eventually gets"] - #[doc = "used by some parachain or removed from the storage to avoid storage leaks. For the latter"] - #[doc = "prefer to use the `poke_unused_validation_code` dispatchable to raw storage manipulation."] - #[doc = ""] - #[doc = "This function is mainly meant to be used for upgrading parachains that do not follow"] - #[doc = "the go-ahead signal while the PVF pre-checking feature is enabled."] - pub fn add_trusted_validation_code( - &self, - validation_code: runtime_types::polkadot_parachain::primitives::ValidationCode, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - AddTrustedValidationCode, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .call_hash::()? - == [ - 110u8, 255u8, 249u8, 176u8, 109u8, 54u8, 87u8, 19u8, 7u8, 62u8, 220u8, - 143u8, 196u8, 99u8, 66u8, 49u8, 18u8, 225u8, 14u8, 42u8, 243u8, 228u8, - 232u8, 207u8, 246u8, 34u8, 179u8, 127u8, 246u8, 239u8, 30u8, 214u8, - ] - { - let call = AddTrustedValidationCode { validation_code }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Remove the validation code from the storage iff the reference count is 0."] - #[doc = ""] - #[doc = "This is better than removing the storage directly, because it will not remove the code"] - #[doc = "that was suddenly got used by some parachain while this dispatchable was pending"] - #[doc = "dispatching."] - pub fn poke_unused_validation_code( - &self, - validation_code_hash : runtime_types :: polkadot_parachain :: primitives :: ValidationCodeHash, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - PokeUnusedValidationCode, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .call_hash::()? - == [ - 128u8, 49u8, 50u8, 239u8, 119u8, 116u8, 110u8, 52u8, 85u8, 66u8, 127u8, - 118u8, 206u8, 191u8, 206u8, 84u8, 255u8, 88u8, 179u8, 43u8, 163u8, - 185u8, 237u8, 191u8, 34u8, 135u8, 44u8, 231u8, 199u8, 5u8, 183u8, 5u8, - ] - { - let call = PokeUnusedValidationCode { - validation_code_hash, - }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Includes a statement for a PVF pre-checking vote. Potentially, finalizes the vote and"] - #[doc = "enacts the results if that was the last vote before achieving the supermajority."] - pub fn include_pvf_check_statement( - &self, - stmt: runtime_types::polkadot_primitives::v2::PvfCheckStatement, - signature: runtime_types::polkadot_primitives::v2::validator_app::Signature, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - IncludePvfCheckStatement, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .call_hash::()? - == [ - 138u8, 112u8, 12u8, 226u8, 95u8, 253u8, 48u8, 219u8, 9u8, 35u8, 99u8, - 122u8, 35u8, 194u8, 79u8, 103u8, 52u8, 242u8, 39u8, 110u8, 166u8, - 212u8, 80u8, 105u8, 3u8, 242u8, 59u8, 13u8, 161u8, 32u8, 224u8, 25u8, - ] - { - let call = IncludePvfCheckStatement { stmt, signature }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - pub type Event = runtime_types::polkadot_runtime_parachains::paras::pallet::Event; - pub mod events { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "Current code has been updated for a Para. `para_id`"] - pub struct CurrentCodeUpdated(pub runtime_types::polkadot_parachain::primitives::Id); - impl ::subxt::Event for CurrentCodeUpdated { - const PALLET: &'static str = "Paras"; - const EVENT: &'static str = "CurrentCodeUpdated"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "Current head has been updated for a Para. `para_id`"] - pub struct CurrentHeadUpdated(pub runtime_types::polkadot_parachain::primitives::Id); - impl ::subxt::Event for CurrentHeadUpdated { - const PALLET: &'static str = "Paras"; - const EVENT: &'static str = "CurrentHeadUpdated"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "A code upgrade has been scheduled for a Para. `para_id`"] - pub struct CodeUpgradeScheduled(pub runtime_types::polkadot_parachain::primitives::Id); - impl ::subxt::Event for CodeUpgradeScheduled { - const PALLET: &'static str = "Paras"; - const EVENT: &'static str = "CodeUpgradeScheduled"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "A new head has been noted for a Para. `para_id`"] - pub struct NewHeadNoted(pub runtime_types::polkadot_parachain::primitives::Id); - impl ::subxt::Event for NewHeadNoted { - const PALLET: &'static str = "Paras"; - const EVENT: &'static str = "NewHeadNoted"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "A para has been queued to execute pending actions. `para_id`"] - pub struct ActionQueued( - pub runtime_types::polkadot_parachain::primitives::Id, - pub ::core::primitive::u32, - ); - impl ::subxt::Event for ActionQueued { - const PALLET: &'static str = "Paras"; - const EVENT: &'static str = "ActionQueued"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "The given para either initiated or subscribed to a PVF check for the given validation"] - #[doc = "code. `code_hash` `para_id`"] - pub struct PvfCheckStarted( - pub runtime_types::polkadot_parachain::primitives::ValidationCodeHash, - pub runtime_types::polkadot_parachain::primitives::Id, - ); - impl ::subxt::Event for PvfCheckStarted { - const PALLET: &'static str = "Paras"; - const EVENT: &'static str = "PvfCheckStarted"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "The given validation code was accepted by the PVF pre-checking vote."] - #[doc = "`code_hash` `para_id`"] - pub struct PvfCheckAccepted( - pub runtime_types::polkadot_parachain::primitives::ValidationCodeHash, - pub runtime_types::polkadot_parachain::primitives::Id, - ); - impl ::subxt::Event for PvfCheckAccepted { - const PALLET: &'static str = "Paras"; - const EVENT: &'static str = "PvfCheckAccepted"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "The given validation code was rejected by the PVF pre-checking vote."] - #[doc = "`code_hash` `para_id`"] - pub struct PvfCheckRejected( - pub runtime_types::polkadot_parachain::primitives::ValidationCodeHash, - pub runtime_types::polkadot_parachain::primitives::Id, - ); - impl ::subxt::Event for PvfCheckRejected { - const PALLET: &'static str = "Paras"; - const EVENT: &'static str = "PvfCheckRejected"; - } - } - pub mod storage { - use super::runtime_types; - pub struct PvfActiveVoteMap<'a>( - pub &'a runtime_types::polkadot_parachain::primitives::ValidationCodeHash, - ); - impl ::subxt::StorageEntry for PvfActiveVoteMap<'_> { - const PALLET: &'static str = "Paras"; - const STORAGE: &'static str = "PvfActiveVoteMap"; - type Value = - runtime_types::polkadot_runtime_parachains::paras::PvfCheckActiveVoteState< - ::core::primitive::u32, - >; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Twox64Concat, - )]) - } - } - pub struct PvfActiveVoteList; - impl ::subxt::StorageEntry for PvfActiveVoteList { - const PALLET: &'static str = "Paras"; - const STORAGE: &'static str = "PvfActiveVoteList"; - type Value = ::std::vec::Vec< - runtime_types::polkadot_parachain::primitives::ValidationCodeHash, - >; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct Parachains; - impl ::subxt::StorageEntry for Parachains { - const PALLET: &'static str = "Paras"; - const STORAGE: &'static str = "Parachains"; - type Value = ::std::vec::Vec; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct ParaLifecycles<'a>( - pub &'a runtime_types::polkadot_parachain::primitives::Id, - ); - impl ::subxt::StorageEntry for ParaLifecycles<'_> { - const PALLET: &'static str = "Paras"; - const STORAGE: &'static str = "ParaLifecycles"; - type Value = runtime_types::polkadot_runtime_parachains::paras::ParaLifecycle; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Twox64Concat, - )]) - } - } - pub struct Heads<'a>(pub &'a runtime_types::polkadot_parachain::primitives::Id); - impl ::subxt::StorageEntry for Heads<'_> { - const PALLET: &'static str = "Paras"; - const STORAGE: &'static str = "Heads"; - type Value = runtime_types::polkadot_parachain::primitives::HeadData; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Twox64Concat, - )]) - } - } - pub struct CurrentCodeHash<'a>( - pub &'a runtime_types::polkadot_parachain::primitives::Id, - ); - impl ::subxt::StorageEntry for CurrentCodeHash<'_> { - const PALLET: &'static str = "Paras"; - const STORAGE: &'static str = "CurrentCodeHash"; - type Value = runtime_types::polkadot_parachain::primitives::ValidationCodeHash; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Twox64Concat, - )]) - } - } - pub struct PastCodeHash<'a>( - pub &'a runtime_types::polkadot_parachain::primitives::Id, - pub &'a ::core::primitive::u32, - ); - impl ::subxt::StorageEntry for PastCodeHash<'_> { - const PALLET: &'static str = "Paras"; - const STORAGE: &'static str = "PastCodeHash"; - type Value = runtime_types::polkadot_parachain::primitives::ValidationCodeHash; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &(&self.0, &self.1), - ::subxt::StorageHasher::Twox64Concat, - )]) - } - } - pub struct PastCodeMeta<'a>(pub &'a runtime_types::polkadot_parachain::primitives::Id); - impl ::subxt::StorageEntry for PastCodeMeta<'_> { - const PALLET: &'static str = "Paras"; - const STORAGE: &'static str = "PastCodeMeta"; - type Value = runtime_types::polkadot_runtime_parachains::paras::ParaPastCodeMeta< - ::core::primitive::u32, - >; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Twox64Concat, - )]) - } - } - pub struct PastCodePruning; - impl ::subxt::StorageEntry for PastCodePruning { - const PALLET: &'static str = "Paras"; - const STORAGE: &'static str = "PastCodePruning"; - type Value = ::std::vec::Vec<( - runtime_types::polkadot_parachain::primitives::Id, - ::core::primitive::u32, - )>; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct FutureCodeUpgrades<'a>( - pub &'a runtime_types::polkadot_parachain::primitives::Id, - ); - impl ::subxt::StorageEntry for FutureCodeUpgrades<'_> { - const PALLET: &'static str = "Paras"; - const STORAGE: &'static str = "FutureCodeUpgrades"; - type Value = ::core::primitive::u32; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Twox64Concat, - )]) - } - } - pub struct FutureCodeHash<'a>( - pub &'a runtime_types::polkadot_parachain::primitives::Id, - ); - impl ::subxt::StorageEntry for FutureCodeHash<'_> { - const PALLET: &'static str = "Paras"; - const STORAGE: &'static str = "FutureCodeHash"; - type Value = runtime_types::polkadot_parachain::primitives::ValidationCodeHash; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Twox64Concat, - )]) - } - } - pub struct UpgradeGoAheadSignal<'a>( - pub &'a runtime_types::polkadot_parachain::primitives::Id, - ); - impl ::subxt::StorageEntry for UpgradeGoAheadSignal<'_> { - const PALLET: &'static str = "Paras"; - const STORAGE: &'static str = "UpgradeGoAheadSignal"; - type Value = runtime_types::polkadot_primitives::v2::UpgradeGoAhead; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Twox64Concat, - )]) - } - } - pub struct UpgradeRestrictionSignal<'a>( - pub &'a runtime_types::polkadot_parachain::primitives::Id, - ); - impl ::subxt::StorageEntry for UpgradeRestrictionSignal<'_> { - const PALLET: &'static str = "Paras"; - const STORAGE: &'static str = "UpgradeRestrictionSignal"; - type Value = runtime_types::polkadot_primitives::v2::UpgradeRestriction; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Twox64Concat, - )]) - } - } - pub struct UpgradeCooldowns; - impl ::subxt::StorageEntry for UpgradeCooldowns { - const PALLET: &'static str = "Paras"; - const STORAGE: &'static str = "UpgradeCooldowns"; - type Value = ::std::vec::Vec<( - runtime_types::polkadot_parachain::primitives::Id, - ::core::primitive::u32, - )>; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct UpcomingUpgrades; - impl ::subxt::StorageEntry for UpcomingUpgrades { - const PALLET: &'static str = "Paras"; - const STORAGE: &'static str = "UpcomingUpgrades"; - type Value = ::std::vec::Vec<( - runtime_types::polkadot_parachain::primitives::Id, - ::core::primitive::u32, - )>; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct ActionsQueue<'a>(pub &'a ::core::primitive::u32); - impl ::subxt::StorageEntry for ActionsQueue<'_> { - const PALLET: &'static str = "Paras"; - const STORAGE: &'static str = "ActionsQueue"; - type Value = ::std::vec::Vec; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Twox64Concat, - )]) - } - } - pub struct UpcomingParasGenesis<'a>( - pub &'a runtime_types::polkadot_parachain::primitives::Id, - ); - impl ::subxt::StorageEntry for UpcomingParasGenesis<'_> { - const PALLET: &'static str = "Paras"; - const STORAGE: &'static str = "UpcomingParasGenesis"; - type Value = runtime_types::polkadot_runtime_parachains::paras::ParaGenesisArgs; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Twox64Concat, - )]) - } - } - pub struct CodeByHashRefs<'a>( - pub &'a runtime_types::polkadot_parachain::primitives::ValidationCodeHash, - ); - impl ::subxt::StorageEntry for CodeByHashRefs<'_> { - const PALLET: &'static str = "Paras"; - const STORAGE: &'static str = "CodeByHashRefs"; - type Value = ::core::primitive::u32; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Identity, - )]) - } - } - pub struct CodeByHash<'a>( - pub &'a runtime_types::polkadot_parachain::primitives::ValidationCodeHash, - ); - impl ::subxt::StorageEntry for CodeByHash<'_> { - const PALLET: &'static str = "Paras"; - const STORAGE: &'static str = "CodeByHash"; - type Value = runtime_types::polkadot_parachain::primitives::ValidationCode; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Identity, - )]) - } - } - pub struct StorageApi<'a, T: ::subxt::Config> { - client: &'a ::subxt::Client, - } - impl<'a, T: ::subxt::Config> StorageApi<'a, T> { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { client } - } - #[doc = " All currently active PVF pre-checking votes."] - #[doc = ""] - #[doc = " Invariant:"] - #[doc = " - There are no PVF pre-checking votes that exists in list but not in the set and vice versa."] - pub async fn pvf_active_vote_map( - &self, - _0: &runtime_types::polkadot_parachain::primitives::ValidationCodeHash, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option< - runtime_types::polkadot_runtime_parachains::paras::PvfCheckActiveVoteState< - ::core::primitive::u32, - >, - >, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 245u8, 158u8, 140u8, 115u8, 132u8, 96u8, 139u8, 158u8, 209u8, 44u8, - 247u8, 149u8, 226u8, 150u8, 181u8, 35u8, 22u8, 89u8, 106u8, 100u8, - 88u8, 72u8, 43u8, 31u8, 91u8, 210u8, 130u8, 38u8, 171u8, 192u8, 173u8, - 15u8, - ] - { - let entry = PvfActiveVoteMap(_0); - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " All currently active PVF pre-checking votes."] - #[doc = ""] - #[doc = " Invariant:"] - #[doc = " - There are no PVF pre-checking votes that exists in list but not in the set and vice versa."] - pub async fn pvf_active_vote_map_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::subxt::KeyIter<'a, T, PvfActiveVoteMap<'a>>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 245u8, 158u8, 140u8, 115u8, 132u8, 96u8, 139u8, 158u8, 209u8, 44u8, - 247u8, 149u8, 226u8, 150u8, 181u8, 35u8, 22u8, 89u8, 106u8, 100u8, - 88u8, 72u8, 43u8, 31u8, 91u8, 210u8, 130u8, 38u8, 171u8, 192u8, 173u8, - 15u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The list of all currently active PVF votes. Auxiliary to `PvfActiveVoteMap`."] - pub async fn pvf_active_vote_list( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::std::vec::Vec< - runtime_types::polkadot_parachain::primitives::ValidationCodeHash, - >, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 154u8, 101u8, 96u8, 29u8, 134u8, 220u8, 111u8, 135u8, 207u8, 255u8, - 121u8, 52u8, 188u8, 108u8, 101u8, 7u8, 138u8, 255u8, 13u8, 58u8, 211u8, - 131u8, 66u8, 126u8, 53u8, 207u8, 119u8, 13u8, 39u8, 177u8, 89u8, 33u8, - ] - { - let entry = PvfActiveVoteList; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " All parachains. Ordered ascending by `ParaId`. Parathreads are not included."] - #[doc = ""] - #[doc = " Consider using the [`ParachainsCache`] type of modifying."] - pub async fn parachains( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::std::vec::Vec, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 174u8, 146u8, 170u8, 102u8, 125u8, 176u8, 74u8, 177u8, 28u8, 54u8, - 13u8, 73u8, 188u8, 248u8, 78u8, 144u8, 88u8, 183u8, 224u8, 69u8, 224u8, - 31u8, 30u8, 115u8, 191u8, 166u8, 252u8, 218u8, 114u8, 241u8, 110u8, - 39u8, - ] - { - let entry = Parachains; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The current lifecycle of a all known Para IDs."] - pub async fn para_lifecycles( - &self, - _0: &runtime_types::polkadot_parachain::primitives::Id, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option< - runtime_types::polkadot_runtime_parachains::paras::ParaLifecycle, - >, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 38u8, 31u8, 0u8, 253u8, 63u8, 27u8, 13u8, 12u8, 247u8, 34u8, 21u8, - 166u8, 166u8, 236u8, 178u8, 217u8, 230u8, 117u8, 215u8, 8u8, 149u8, - 37u8, 231u8, 160u8, 226u8, 89u8, 12u8, 162u8, 197u8, 237u8, 235u8, - 127u8, - ] - { - let entry = ParaLifecycles(_0); - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The current lifecycle of a all known Para IDs."] - pub async fn para_lifecycles_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::subxt::KeyIter<'a, T, ParaLifecycles<'a>>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 38u8, 31u8, 0u8, 253u8, 63u8, 27u8, 13u8, 12u8, 247u8, 34u8, 21u8, - 166u8, 166u8, 236u8, 178u8, 217u8, 230u8, 117u8, 215u8, 8u8, 149u8, - 37u8, 231u8, 160u8, 226u8, 89u8, 12u8, 162u8, 197u8, 237u8, 235u8, - 127u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The head-data of every registered para."] - pub async fn heads( - &self, - _0: &runtime_types::polkadot_parachain::primitives::Id, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 242u8, 145u8, 237u8, 33u8, 204u8, 183u8, 18u8, 135u8, 182u8, 47u8, - 220u8, 187u8, 118u8, 79u8, 163u8, 122u8, 227u8, 215u8, 43u8, 70u8, - 24u8, 33u8, 74u8, 113u8, 67u8, 25u8, 47u8, 210u8, 136u8, 236u8, 83u8, - 148u8, - ] - { - let entry = Heads(_0); - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The head-data of every registered para."] - pub async fn heads_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result<::subxt::KeyIter<'a, T, Heads<'a>>, ::subxt::BasicError> - { - if self.client.metadata().storage_hash::()? - == [ - 242u8, 145u8, 237u8, 33u8, 204u8, 183u8, 18u8, 135u8, 182u8, 47u8, - 220u8, 187u8, 118u8, 79u8, 163u8, 122u8, 227u8, 215u8, 43u8, 70u8, - 24u8, 33u8, 74u8, 113u8, 67u8, 25u8, 47u8, 210u8, 136u8, 236u8, 83u8, - 148u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The validation code hash of every live para."] - #[doc = ""] - #[doc = " Corresponding code can be retrieved with [`CodeByHash`]."] - pub async fn current_code_hash( - &self, - _0: &runtime_types::polkadot_parachain::primitives::Id, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option< - runtime_types::polkadot_parachain::primitives::ValidationCodeHash, - >, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 22u8, 155u8, 81u8, 176u8, 112u8, 20u8, 205u8, 107u8, 87u8, 40u8, 219u8, - 0u8, 112u8, 111u8, 97u8, 196u8, 161u8, 111u8, 207u8, 247u8, 91u8, 47u8, - 163u8, 209u8, 188u8, 144u8, 37u8, 102u8, 240u8, 21u8, 33u8, 77u8, - ] - { - let entry = CurrentCodeHash(_0); - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The validation code hash of every live para."] - #[doc = ""] - #[doc = " Corresponding code can be retrieved with [`CodeByHash`]."] - pub async fn current_code_hash_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::subxt::KeyIter<'a, T, CurrentCodeHash<'a>>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 22u8, 155u8, 81u8, 176u8, 112u8, 20u8, 205u8, 107u8, 87u8, 40u8, 219u8, - 0u8, 112u8, 111u8, 97u8, 196u8, 161u8, 111u8, 207u8, 247u8, 91u8, 47u8, - 163u8, 209u8, 188u8, 144u8, 37u8, 102u8, 240u8, 21u8, 33u8, 77u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Actual past code hash, indicated by the para id as well as the block number at which it"] - #[doc = " became outdated."] - #[doc = ""] - #[doc = " Corresponding code can be retrieved with [`CodeByHash`]."] - pub async fn past_code_hash( - &self, - _0: &runtime_types::polkadot_parachain::primitives::Id, - _1: &::core::primitive::u32, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option< - runtime_types::polkadot_parachain::primitives::ValidationCodeHash, - >, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 26u8, 56u8, 165u8, 239u8, 180u8, 241u8, 183u8, 26u8, 106u8, 71u8, - 213u8, 114u8, 124u8, 190u8, 69u8, 128u8, 159u8, 119u8, 145u8, 47u8, - 93u8, 64u8, 74u8, 76u8, 220u8, 121u8, 144u8, 162u8, 163u8, 149u8, - 132u8, 6u8, - ] - { - let entry = PastCodeHash(_0, _1); - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Actual past code hash, indicated by the para id as well as the block number at which it"] - #[doc = " became outdated."] - #[doc = ""] - #[doc = " Corresponding code can be retrieved with [`CodeByHash`]."] - pub async fn past_code_hash_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::subxt::KeyIter<'a, T, PastCodeHash<'a>>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 26u8, 56u8, 165u8, 239u8, 180u8, 241u8, 183u8, 26u8, 106u8, 71u8, - 213u8, 114u8, 124u8, 190u8, 69u8, 128u8, 159u8, 119u8, 145u8, 47u8, - 93u8, 64u8, 74u8, 76u8, 220u8, 121u8, 144u8, 162u8, 163u8, 149u8, - 132u8, 6u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Past code of parachains. The parachains themselves may not be registered anymore,"] - #[doc = " but we also keep their code on-chain for the same amount of time as outdated code"] - #[doc = " to keep it available for secondary checkers."] - pub async fn past_code_meta( - &self, - _0: &runtime_types::polkadot_parachain::primitives::Id, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - runtime_types::polkadot_runtime_parachains::paras::ParaPastCodeMeta< - ::core::primitive::u32, - >, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 121u8, 14u8, 91u8, 135u8, 231u8, 67u8, 189u8, 66u8, 108u8, 27u8, 241u8, - 117u8, 101u8, 34u8, 24u8, 16u8, 52u8, 198u8, 205u8, 155u8, 138u8, 9u8, - 140u8, 207u8, 27u8, 172u8, 212u8, 217u8, 47u8, 134u8, 122u8, 162u8, - ] - { - let entry = PastCodeMeta(_0); - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Past code of parachains. The parachains themselves may not be registered anymore,"] - #[doc = " but we also keep their code on-chain for the same amount of time as outdated code"] - #[doc = " to keep it available for secondary checkers."] - pub async fn past_code_meta_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::subxt::KeyIter<'a, T, PastCodeMeta<'a>>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 121u8, 14u8, 91u8, 135u8, 231u8, 67u8, 189u8, 66u8, 108u8, 27u8, 241u8, - 117u8, 101u8, 34u8, 24u8, 16u8, 52u8, 198u8, 205u8, 155u8, 138u8, 9u8, - 140u8, 207u8, 27u8, 172u8, 212u8, 217u8, 47u8, 134u8, 122u8, 162u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Which paras have past code that needs pruning and the relay-chain block at which the code was replaced."] - #[doc = " Note that this is the actual height of the included block, not the expected height at which the"] - #[doc = " code upgrade would be applied, although they may be equal."] - #[doc = " This is to ensure the entire acceptance period is covered, not an offset acceptance period starting"] - #[doc = " from the time at which the parachain perceives a code upgrade as having occurred."] - #[doc = " Multiple entries for a single para are permitted. Ordered ascending by block number."] - pub async fn past_code_pruning( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::std::vec::Vec<( - runtime_types::polkadot_parachain::primitives::Id, - ::core::primitive::u32, - )>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 142u8, 32u8, 134u8, 51u8, 34u8, 214u8, 75u8, 69u8, 77u8, 178u8, 103u8, - 117u8, 180u8, 105u8, 249u8, 178u8, 143u8, 25u8, 212u8, 207u8, 28u8, - 28u8, 175u8, 193u8, 43u8, 58u8, 51u8, 149u8, 155u8, 204u8, 37u8, 153u8, - ] - { - let entry = PastCodePruning; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The block number at which the planned code change is expected for a para."] - #[doc = " The change will be applied after the first parablock for this ID included which executes"] - #[doc = " in the context of a relay chain block with a number >= `expected_at`."] - pub async fn future_code_upgrades( - &self, - _0: &runtime_types::polkadot_parachain::primitives::Id, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option<::core::primitive::u32>, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .storage_hash::()? - == [ - 211u8, 254u8, 201u8, 63u8, 89u8, 112u8, 57u8, 82u8, 255u8, 163u8, 49u8, - 246u8, 197u8, 154u8, 55u8, 10u8, 65u8, 188u8, 172u8, 110u8, 194u8, - 155u8, 37u8, 44u8, 250u8, 154u8, 4u8, 184u8, 225u8, 79u8, 248u8, 80u8, - ] - { - let entry = FutureCodeUpgrades(_0); - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The block number at which the planned code change is expected for a para."] - #[doc = " The change will be applied after the first parablock for this ID included which executes"] - #[doc = " in the context of a relay chain block with a number >= `expected_at`."] - pub async fn future_code_upgrades_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::subxt::KeyIter<'a, T, FutureCodeUpgrades<'a>>, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .storage_hash::()? - == [ - 211u8, 254u8, 201u8, 63u8, 89u8, 112u8, 57u8, 82u8, 255u8, 163u8, 49u8, - 246u8, 197u8, 154u8, 55u8, 10u8, 65u8, 188u8, 172u8, 110u8, 194u8, - 155u8, 37u8, 44u8, 250u8, 154u8, 4u8, 184u8, 225u8, 79u8, 248u8, 80u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The actual future code hash of a para."] - #[doc = ""] - #[doc = " Corresponding code can be retrieved with [`CodeByHash`]."] - pub async fn future_code_hash( - &self, - _0: &runtime_types::polkadot_parachain::primitives::Id, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option< - runtime_types::polkadot_parachain::primitives::ValidationCodeHash, - >, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 89u8, 18u8, 35u8, 84u8, 61u8, 191u8, 189u8, 140u8, 223u8, 6u8, 38u8, - 238u8, 22u8, 72u8, 221u8, 168u8, 239u8, 113u8, 33u8, 254u8, 41u8, 96u8, - 102u8, 173u8, 131u8, 111u8, 11u8, 112u8, 65u8, 71u8, 189u8, 121u8, - ] - { - let entry = FutureCodeHash(_0); - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The actual future code hash of a para."] - #[doc = ""] - #[doc = " Corresponding code can be retrieved with [`CodeByHash`]."] - pub async fn future_code_hash_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::subxt::KeyIter<'a, T, FutureCodeHash<'a>>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 89u8, 18u8, 35u8, 84u8, 61u8, 191u8, 189u8, 140u8, 223u8, 6u8, 38u8, - 238u8, 22u8, 72u8, 221u8, 168u8, 239u8, 113u8, 33u8, 254u8, 41u8, 96u8, - 102u8, 173u8, 131u8, 111u8, 11u8, 112u8, 65u8, 71u8, 189u8, 121u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " This is used by the relay-chain to communicate to a parachain a go-ahead with in the upgrade procedure."] - #[doc = ""] - #[doc = " This value is absent when there are no upgrades scheduled or during the time the relay chain"] - #[doc = " performs the checks. It is set at the first relay-chain block when the corresponding parachain"] - #[doc = " can switch its upgrade function. As soon as the parachain's block is included, the value"] - #[doc = " gets reset to `None`."] - #[doc = ""] - #[doc = " NOTE that this field is used by parachains via merkle storage proofs, therefore changing"] - #[doc = " the format will require migration of parachains."] - pub async fn upgrade_go_ahead_signal( - &self, - _0: &runtime_types::polkadot_parachain::primitives::Id, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .storage_hash::()? - == [ - 100u8, 87u8, 135u8, 185u8, 95u8, 13u8, 74u8, 134u8, 19u8, 97u8, 80u8, - 104u8, 177u8, 30u8, 82u8, 145u8, 171u8, 250u8, 99u8, 214u8, 26u8, - 243u8, 118u8, 118u8, 19u8, 188u8, 187u8, 142u8, 138u8, 68u8, 54u8, - 114u8, - ] - { - let entry = UpgradeGoAheadSignal(_0); - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " This is used by the relay-chain to communicate to a parachain a go-ahead with in the upgrade procedure."] - #[doc = ""] - #[doc = " This value is absent when there are no upgrades scheduled or during the time the relay chain"] - #[doc = " performs the checks. It is set at the first relay-chain block when the corresponding parachain"] - #[doc = " can switch its upgrade function. As soon as the parachain's block is included, the value"] - #[doc = " gets reset to `None`."] - #[doc = ""] - #[doc = " NOTE that this field is used by parachains via merkle storage proofs, therefore changing"] - #[doc = " the format will require migration of parachains."] - pub async fn upgrade_go_ahead_signal_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::subxt::KeyIter<'a, T, UpgradeGoAheadSignal<'a>>, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .storage_hash::()? - == [ - 100u8, 87u8, 135u8, 185u8, 95u8, 13u8, 74u8, 134u8, 19u8, 97u8, 80u8, - 104u8, 177u8, 30u8, 82u8, 145u8, 171u8, 250u8, 99u8, 214u8, 26u8, - 243u8, 118u8, 118u8, 19u8, 188u8, 187u8, 142u8, 138u8, 68u8, 54u8, - 114u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " This is used by the relay-chain to communicate that there are restrictions for performing"] - #[doc = " an upgrade for this parachain."] - #[doc = ""] - #[doc = " This may be a because the parachain waits for the upgrade cooldown to expire. Another"] - #[doc = " potential use case is when we want to perform some maintenance (such as storage migration)"] - #[doc = " we could restrict upgrades to make the process simpler."] - #[doc = ""] - #[doc = " NOTE that this field is used by parachains via merkle storage proofs, therefore changing"] - #[doc = " the format will require migration of parachains."] - pub async fn upgrade_restriction_signal( - &self, - _0: &runtime_types::polkadot_parachain::primitives::Id, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option< - runtime_types::polkadot_primitives::v2::UpgradeRestriction, - >, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .storage_hash::()? - == [ - 173u8, 198u8, 89u8, 108u8, 43u8, 93u8, 143u8, 224u8, 141u8, 248u8, - 238u8, 221u8, 237u8, 220u8, 140u8, 24u8, 7u8, 14u8, 136u8, 251u8, - 159u8, 190u8, 70u8, 98u8, 100u8, 118u8, 24u8, 212u8, 82u8, 96u8, 120u8, - 206u8, - ] - { - let entry = UpgradeRestrictionSignal(_0); - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " This is used by the relay-chain to communicate that there are restrictions for performing"] - #[doc = " an upgrade for this parachain."] - #[doc = ""] - #[doc = " This may be a because the parachain waits for the upgrade cooldown to expire. Another"] - #[doc = " potential use case is when we want to perform some maintenance (such as storage migration)"] - #[doc = " we could restrict upgrades to make the process simpler."] - #[doc = ""] - #[doc = " NOTE that this field is used by parachains via merkle storage proofs, therefore changing"] - #[doc = " the format will require migration of parachains."] - pub async fn upgrade_restriction_signal_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::subxt::KeyIter<'a, T, UpgradeRestrictionSignal<'a>>, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .storage_hash::()? - == [ - 173u8, 198u8, 89u8, 108u8, 43u8, 93u8, 143u8, 224u8, 141u8, 248u8, - 238u8, 221u8, 237u8, 220u8, 140u8, 24u8, 7u8, 14u8, 136u8, 251u8, - 159u8, 190u8, 70u8, 98u8, 100u8, 118u8, 24u8, 212u8, 82u8, 96u8, 120u8, - 206u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The list of parachains that are awaiting for their upgrade restriction to cooldown."] - #[doc = ""] - #[doc = " Ordered ascending by block number."] - pub async fn upgrade_cooldowns( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::std::vec::Vec<( - runtime_types::polkadot_parachain::primitives::Id, - ::core::primitive::u32, - )>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 120u8, 214u8, 165u8, 35u8, 125u8, 56u8, 152u8, 76u8, 124u8, 159u8, - 160u8, 93u8, 16u8, 30u8, 208u8, 199u8, 162u8, 74u8, 124u8, 141u8, - 137u8, 237u8, 229u8, 61u8, 62u8, 71u8, 54u8, 92u8, 243u8, 208u8, 114u8, - 19u8, - ] - { - let entry = UpgradeCooldowns; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The list of upcoming code upgrades. Each item is a pair of which para performs a code"] - #[doc = " upgrade and at which relay-chain block it is expected at."] - #[doc = ""] - #[doc = " Ordered ascending by block number."] - pub async fn upcoming_upgrades( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::std::vec::Vec<( - runtime_types::polkadot_parachain::primitives::Id, - ::core::primitive::u32, - )>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 16u8, 74u8, 254u8, 39u8, 241u8, 98u8, 106u8, 203u8, 189u8, 157u8, 66u8, - 99u8, 164u8, 176u8, 20u8, 206u8, 15u8, 212u8, 229u8, 9u8, 117u8, 214u8, - 250u8, 8u8, 51u8, 80u8, 35u8, 236u8, 120u8, 4u8, 246u8, 62u8, - ] - { - let entry = UpcomingUpgrades; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The actions to perform during the start of a specific session index."] - pub async fn actions_queue( - &self, - _0: &::core::primitive::u32, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::std::vec::Vec, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 103u8, 197u8, 76u8, 84u8, 133u8, 3u8, 67u8, 57u8, 107u8, 31u8, 87u8, - 33u8, 196u8, 130u8, 119u8, 93u8, 171u8, 173u8, 76u8, 242u8, 22u8, 15u8, - 133u8, 193u8, 122u8, 0u8, 112u8, 121u8, 233u8, 29u8, 17u8, 185u8, - ] - { - let entry = ActionsQueue(_0); - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The actions to perform during the start of a specific session index."] - pub async fn actions_queue_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::subxt::KeyIter<'a, T, ActionsQueue<'a>>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 103u8, 197u8, 76u8, 84u8, 133u8, 3u8, 67u8, 57u8, 107u8, 31u8, 87u8, - 33u8, 196u8, 130u8, 119u8, 93u8, 171u8, 173u8, 76u8, 242u8, 22u8, 15u8, - 133u8, 193u8, 122u8, 0u8, 112u8, 121u8, 233u8, 29u8, 17u8, 185u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Upcoming paras instantiation arguments."] - #[doc = ""] - #[doc = " NOTE that after PVF pre-checking is enabled the para genesis arg will have it's code set"] - #[doc = " to empty. Instead, the code will be saved into the storage right away via `CodeByHash`."] - pub async fn upcoming_paras_genesis( - &self, - _0: &runtime_types::polkadot_parachain::primitives::Id, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option< - runtime_types::polkadot_runtime_parachains::paras::ParaGenesisArgs, - >, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .storage_hash::()? - == [ - 98u8, 249u8, 92u8, 177u8, 21u8, 84u8, 199u8, 194u8, 150u8, 213u8, - 143u8, 107u8, 99u8, 194u8, 141u8, 225u8, 55u8, 94u8, 44u8, 147u8, - 209u8, 144u8, 118u8, 66u8, 139u8, 170u8, 68u8, 62u8, 45u8, 137u8, 91u8, - 8u8, - ] - { - let entry = UpcomingParasGenesis(_0); - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Upcoming paras instantiation arguments."] - #[doc = ""] - #[doc = " NOTE that after PVF pre-checking is enabled the para genesis arg will have it's code set"] - #[doc = " to empty. Instead, the code will be saved into the storage right away via `CodeByHash`."] - pub async fn upcoming_paras_genesis_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::subxt::KeyIter<'a, T, UpcomingParasGenesis<'a>>, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .storage_hash::()? - == [ - 98u8, 249u8, 92u8, 177u8, 21u8, 84u8, 199u8, 194u8, 150u8, 213u8, - 143u8, 107u8, 99u8, 194u8, 141u8, 225u8, 55u8, 94u8, 44u8, 147u8, - 209u8, 144u8, 118u8, 66u8, 139u8, 170u8, 68u8, 62u8, 45u8, 137u8, 91u8, - 8u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The number of reference on the validation code in [`CodeByHash`] storage."] - pub async fn code_by_hash_refs( - &self, - _0: &runtime_types::polkadot_parachain::primitives::ValidationCodeHash, - block_hash: ::core::option::Option, - ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> - { - if self.client.metadata().storage_hash::()? - == [ - 70u8, 116u8, 27u8, 141u8, 242u8, 54u8, 32u8, 253u8, 176u8, 224u8, - 241u8, 171u8, 22u8, 45u8, 189u8, 95u8, 137u8, 24u8, 211u8, 181u8, - 123u8, 141u8, 200u8, 49u8, 214u8, 177u8, 176u8, 219u8, 178u8, 101u8, - 69u8, 204u8, - ] - { - let entry = CodeByHashRefs(_0); - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The number of reference on the validation code in [`CodeByHash`] storage."] - pub async fn code_by_hash_refs_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::subxt::KeyIter<'a, T, CodeByHashRefs<'a>>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 70u8, 116u8, 27u8, 141u8, 242u8, 54u8, 32u8, 253u8, 176u8, 224u8, - 241u8, 171u8, 22u8, 45u8, 189u8, 95u8, 137u8, 24u8, 211u8, 181u8, - 123u8, 141u8, 200u8, 49u8, 214u8, 177u8, 176u8, 219u8, 178u8, 101u8, - 69u8, 204u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Validation code stored by its hash."] - #[doc = ""] - #[doc = " This storage is consistent with [`FutureCodeHash`], [`CurrentCodeHash`] and"] - #[doc = " [`PastCodeHash`]."] - pub async fn code_by_hash( - &self, - _0: &runtime_types::polkadot_parachain::primitives::ValidationCodeHash, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option< - runtime_types::polkadot_parachain::primitives::ValidationCode, - >, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 173u8, 226u8, 170u8, 98u8, 93u8, 151u8, 151u8, 250u8, 215u8, 64u8, - 137u8, 97u8, 15u8, 200u8, 188u8, 113u8, 192u8, 195u8, 179u8, 229u8, - 141u8, 239u8, 97u8, 95u8, 100u8, 47u8, 202u8, 135u8, 110u8, 225u8, - 243u8, 153u8, - ] - { - let entry = CodeByHash(_0); - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Validation code stored by its hash."] - #[doc = ""] - #[doc = " This storage is consistent with [`FutureCodeHash`], [`CurrentCodeHash`] and"] - #[doc = " [`PastCodeHash`]."] - pub async fn code_by_hash_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::subxt::KeyIter<'a, T, CodeByHash<'a>>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 173u8, 226u8, 170u8, 98u8, 93u8, 151u8, 151u8, 250u8, 215u8, 64u8, - 137u8, 97u8, 15u8, 200u8, 188u8, 113u8, 192u8, 195u8, 179u8, 229u8, - 141u8, 239u8, 97u8, 95u8, 100u8, 47u8, 202u8, 135u8, 110u8, 225u8, - 243u8, 153u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - pub mod constants { - use super::runtime_types; - pub struct ConstantsApi<'a, T: ::subxt::Config> { - client: &'a ::subxt::Client, - } - impl<'a, T: ::subxt::Config> ConstantsApi<'a, T> { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { client } - } - pub fn unsigned_priority( - &self, - ) -> ::core::result::Result<::core::primitive::u64, ::subxt::BasicError> - { - if self - .client - .metadata() - .constant_hash("Paras", "UnsignedPriority")? - == [ - 78u8, 226u8, 84u8, 70u8, 162u8, 23u8, 167u8, 100u8, 156u8, 228u8, - 119u8, 16u8, 28u8, 202u8, 21u8, 71u8, 72u8, 244u8, 3u8, 255u8, 243u8, - 55u8, 109u8, 238u8, 26u8, 180u8, 207u8, 175u8, 221u8, 27u8, 213u8, - 217u8, - ] - { - let pallet = self.client.metadata().pallet("Paras")?; - let constant = pallet.constant("UnsignedPriority")?; - let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; - Ok(value) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - } - pub mod initializer { - use super::root_mod; - use super::runtime_types; - pub mod calls { - use super::root_mod; - use super::runtime_types; - type DispatchError = runtime_types::sp_runtime::DispatchError; - #[derive( - :: subxt :: codec :: CompactAs, - :: subxt :: codec :: Decode, - :: subxt :: codec :: Encode, - Debug, - )] - pub struct ForceApprove { - pub up_to: ::core::primitive::u32, - } - impl ::subxt::Call for ForceApprove { - const PALLET: &'static str = "Initializer"; - const FUNCTION: &'static str = "force_approve"; - } - pub struct TransactionApi<'a, T: ::subxt::Config, X> { - client: &'a ::subxt::Client, - marker: ::core::marker::PhantomData, - } - impl<'a, T, X> TransactionApi<'a, T, X> - where - T: ::subxt::Config, - X: ::subxt::extrinsic::ExtrinsicParams, - { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { - client, - marker: ::core::marker::PhantomData, - } - } - #[doc = "Issue a signal to the consensus engine to forcibly act as though all parachain"] - #[doc = "blocks in all relay chain blocks up to and including the given number in the current"] - #[doc = "chain are valid and should be finalized."] - pub fn force_approve( - &self, - up_to: ::core::primitive::u32, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - ForceApprove, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 61u8, 29u8, 75u8, 222u8, 82u8, 250u8, 124u8, 164u8, 70u8, 114u8, 150u8, - 28u8, 103u8, 53u8, 185u8, 147u8, 168u8, 239u8, 207u8, 197u8, 23u8, - 158u8, 16u8, 255u8, 139u8, 18u8, 214u8, 174u8, 53u8, 191u8, 49u8, 73u8, - ] - { - let call = ForceApprove { up_to }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - pub mod storage { - use super::runtime_types; - pub struct HasInitialized; - impl ::subxt::StorageEntry for HasInitialized { - const PALLET: &'static str = "Initializer"; - const STORAGE: &'static str = "HasInitialized"; - type Value = (); - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct BufferedSessionChanges; - impl ::subxt::StorageEntry for BufferedSessionChanges { - const PALLET: &'static str = "Initializer"; - const STORAGE: &'static str = "BufferedSessionChanges"; - type Value = ::std::vec::Vec< - runtime_types::polkadot_runtime_parachains::initializer::BufferedSessionChange, - >; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct StorageApi<'a, T: ::subxt::Config> { - client: &'a ::subxt::Client, - } - impl<'a, T: ::subxt::Config> StorageApi<'a, T> { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { client } - } - #[doc = " Whether the parachains modules have been initialized within this block."] - #[doc = ""] - #[doc = " Semantically a `bool`, but this guarantees it should never hit the trie,"] - #[doc = " as this is cleared in `on_finalize` and Frame optimizes `None` values to be empty values."] - #[doc = ""] - #[doc = " As a `bool`, `set(false)` and `remove()` both lead to the next `get()` being false, but one of"] - #[doc = " them writes to the trie and one does not. This confusion makes `Option<()>` more suitable for"] - #[doc = " the semantics of this variable."] - pub async fn has_initialized( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result<::core::option::Option<()>, ::subxt::BasicError> - { - if self.client.metadata().storage_hash::()? - == [ - 251u8, 135u8, 247u8, 61u8, 139u8, 102u8, 12u8, 122u8, 227u8, 123u8, - 11u8, 232u8, 120u8, 80u8, 81u8, 48u8, 216u8, 115u8, 159u8, 131u8, - 133u8, 105u8, 200u8, 122u8, 114u8, 6u8, 109u8, 4u8, 164u8, 204u8, - 214u8, 111u8, - ] - { - let entry = HasInitialized; - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Buffered session changes along with the block number at which they should be applied."] - #[doc = ""] - #[doc = " Typically this will be empty or one element long. Apart from that this item never hits"] - #[doc = " the storage."] - #[doc = ""] - #[doc = " However this is a `Vec` regardless to handle various edge cases that may occur at runtime"] - #[doc = " upgrade boundaries or if governance intervenes."] pub async fn buffered_session_changes (& self , block_hash : :: core :: option :: Option < T :: Hash > ,) -> :: core :: result :: Result < :: std :: vec :: Vec < runtime_types :: polkadot_runtime_parachains :: initializer :: BufferedSessionChange > , :: subxt :: BasicError >{ - if self - .client - .metadata() - .storage_hash::()? - == [ - 78u8, 99u8, 243u8, 162u8, 81u8, 154u8, 54u8, 67u8, 201u8, 223u8, 231u8, - 45u8, 78u8, 146u8, 170u8, 176u8, 55u8, 109u8, 35u8, 214u8, 246u8, - 112u8, 26u8, 150u8, 216u8, 140u8, 67u8, 125u8, 112u8, 43u8, 40u8, - 156u8, - ] - { - let entry = BufferedSessionChanges; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - } - pub mod dmp { - use super::root_mod; - use super::runtime_types; - pub mod calls { - use super::root_mod; - use super::runtime_types; - type DispatchError = runtime_types::sp_runtime::DispatchError; - pub struct TransactionApi<'a, T: ::subxt::Config, X> { - client: &'a ::subxt::Client, - marker: ::core::marker::PhantomData, - } - impl<'a, T, X> TransactionApi<'a, T, X> - where - T: ::subxt::Config, - X: ::subxt::extrinsic::ExtrinsicParams, - { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { - client, - marker: ::core::marker::PhantomData, - } - } - } - } - pub mod storage { - use super::runtime_types; - pub struct DownwardMessageQueues<'a>( - pub &'a runtime_types::polkadot_parachain::primitives::Id, - ); - impl ::subxt::StorageEntry for DownwardMessageQueues<'_> { - const PALLET: &'static str = "Dmp"; - const STORAGE: &'static str = "DownwardMessageQueues"; - type Value = ::std::vec::Vec< - runtime_types::polkadot_core_primitives::InboundDownwardMessage< - ::core::primitive::u32, - >, - >; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Twox64Concat, - )]) - } - } - pub struct DownwardMessageQueueHeads<'a>( - pub &'a runtime_types::polkadot_parachain::primitives::Id, - ); - impl ::subxt::StorageEntry for DownwardMessageQueueHeads<'_> { - const PALLET: &'static str = "Dmp"; - const STORAGE: &'static str = "DownwardMessageQueueHeads"; - type Value = ::subxt::sp_core::H256; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Twox64Concat, - )]) - } - } - pub struct StorageApi<'a, T: ::subxt::Config> { - client: &'a ::subxt::Client, - } - impl<'a, T: ::subxt::Config> StorageApi<'a, T> { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { client } - } - #[doc = " The downward messages addressed for a certain para."] - pub async fn downward_message_queues( - &self, - _0: &runtime_types::polkadot_parachain::primitives::Id, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::std::vec::Vec< - runtime_types::polkadot_core_primitives::InboundDownwardMessage< - ::core::primitive::u32, - >, - >, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .storage_hash::()? - == [ - 104u8, 117u8, 177u8, 125u8, 208u8, 212u8, 216u8, 171u8, 212u8, 235u8, - 43u8, 255u8, 146u8, 230u8, 243u8, 27u8, 133u8, 109u8, 129u8, 162u8, - 247u8, 23u8, 195u8, 9u8, 219u8, 235u8, 119u8, 220u8, 179u8, 198u8, - 130u8, 4u8, - ] - { - let entry = DownwardMessageQueues(_0); - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The downward messages addressed for a certain para."] - pub async fn downward_message_queues_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::subxt::KeyIter<'a, T, DownwardMessageQueues<'a>>, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .storage_hash::()? - == [ - 104u8, 117u8, 177u8, 125u8, 208u8, 212u8, 216u8, 171u8, 212u8, 235u8, - 43u8, 255u8, 146u8, 230u8, 243u8, 27u8, 133u8, 109u8, 129u8, 162u8, - 247u8, 23u8, 195u8, 9u8, 219u8, 235u8, 119u8, 220u8, 179u8, 198u8, - 130u8, 4u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " A mapping that stores the downward message queue MQC head for each para."] - #[doc = ""] - #[doc = " Each link in this chain has a form:"] - #[doc = " `(prev_head, B, H(M))`, where"] - #[doc = " - `prev_head`: is the previous head hash or zero if none."] - #[doc = " - `B`: is the relay-chain block number in which a message was appended."] - #[doc = " - `H(M)`: is the hash of the message being appended."] - pub async fn downward_message_queue_heads( - &self, - _0: &runtime_types::polkadot_parachain::primitives::Id, - block_hash: ::core::option::Option, - ) -> ::core::result::Result<::subxt::sp_core::H256, ::subxt::BasicError> - { - if self - .client - .metadata() - .storage_hash::()? - == [ - 47u8, 135u8, 173u8, 197u8, 128u8, 135u8, 195u8, 174u8, 186u8, 62u8, - 136u8, 160u8, 75u8, 221u8, 166u8, 13u8, 82u8, 131u8, 124u8, 197u8, - 88u8, 86u8, 205u8, 48u8, 170u8, 230u8, 71u8, 238u8, 146u8, 189u8, 3u8, - 154u8, - ] - { - let entry = DownwardMessageQueueHeads(_0); - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " A mapping that stores the downward message queue MQC head for each para."] - #[doc = ""] - #[doc = " Each link in this chain has a form:"] - #[doc = " `(prev_head, B, H(M))`, where"] - #[doc = " - `prev_head`: is the previous head hash or zero if none."] - #[doc = " - `B`: is the relay-chain block number in which a message was appended."] - #[doc = " - `H(M)`: is the hash of the message being appended."] - pub async fn downward_message_queue_heads_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::subxt::KeyIter<'a, T, DownwardMessageQueueHeads<'a>>, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .storage_hash::()? - == [ - 47u8, 135u8, 173u8, 197u8, 128u8, 135u8, 195u8, 174u8, 186u8, 62u8, - 136u8, 160u8, 75u8, 221u8, 166u8, 13u8, 82u8, 131u8, 124u8, 197u8, - 88u8, 86u8, 205u8, 48u8, 170u8, 230u8, 71u8, 238u8, 146u8, 189u8, 3u8, - 154u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - } - pub mod ump { - use super::root_mod; - use super::runtime_types; - pub mod calls { - use super::root_mod; - use super::runtime_types; - type DispatchError = runtime_types::sp_runtime::DispatchError; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct ServiceOverweight { - pub index: ::core::primitive::u64, - pub weight_limit: ::core::primitive::u64, - } - impl ::subxt::Call for ServiceOverweight { - const PALLET: &'static str = "Ump"; - const FUNCTION: &'static str = "service_overweight"; - } - pub struct TransactionApi<'a, T: ::subxt::Config, X> { - client: &'a ::subxt::Client, - marker: ::core::marker::PhantomData, - } - impl<'a, T, X> TransactionApi<'a, T, X> - where - T: ::subxt::Config, - X: ::subxt::extrinsic::ExtrinsicParams, - { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { - client, - marker: ::core::marker::PhantomData, - } - } - #[doc = "Service a single overweight upward message."] - #[doc = ""] - #[doc = "- `origin`: Must pass `ExecuteOverweightOrigin`."] - #[doc = "- `index`: The index of the overweight message to service."] - #[doc = "- `weight_limit`: The amount of weight that message execution may take."] - #[doc = ""] - #[doc = "Errors:"] - #[doc = "- `UnknownMessageIndex`: Message of `index` is unknown."] - #[doc = "- `WeightOverLimit`: Message execution may use greater than `weight_limit`."] - #[doc = ""] - #[doc = "Events:"] - #[doc = "- `OverweightServiced`: On success."] - pub fn service_overweight( - &self, - index: ::core::primitive::u64, - weight_limit: ::core::primitive::u64, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - ServiceOverweight, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 229u8, 167u8, 106u8, 63u8, 141u8, 80u8, 8u8, 201u8, 156u8, 34u8, 47u8, - 104u8, 116u8, 57u8, 35u8, 216u8, 132u8, 3u8, 201u8, 169u8, 38u8, 107u8, - 149u8, 120u8, 42u8, 130u8, 100u8, 133u8, 214u8, 48u8, 99u8, 146u8, - ] - { - let call = ServiceOverweight { - index, - weight_limit, - }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - pub type Event = runtime_types::polkadot_runtime_parachains::ump::pallet::Event; - pub mod events { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "Upward message is invalid XCM."] - #[doc = "\\[ id \\]"] - pub struct InvalidFormat(pub [::core::primitive::u8; 32usize]); - impl ::subxt::Event for InvalidFormat { - const PALLET: &'static str = "Ump"; - const EVENT: &'static str = "InvalidFormat"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "Upward message is unsupported version of XCM."] - #[doc = "\\[ id \\]"] - pub struct UnsupportedVersion(pub [::core::primitive::u8; 32usize]); - impl ::subxt::Event for UnsupportedVersion { - const PALLET: &'static str = "Ump"; - const EVENT: &'static str = "UnsupportedVersion"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "Upward message executed with the given outcome."] - #[doc = "\\[ id, outcome \\]"] - pub struct ExecutedUpward( - pub [::core::primitive::u8; 32usize], - pub runtime_types::xcm::v2::traits::Outcome, - ); - impl ::subxt::Event for ExecutedUpward { - const PALLET: &'static str = "Ump"; - const EVENT: &'static str = "ExecutedUpward"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "The weight limit for handling upward messages was reached."] - #[doc = "\\[ id, remaining, required \\]"] - pub struct WeightExhausted( - pub [::core::primitive::u8; 32usize], - pub ::core::primitive::u64, - pub ::core::primitive::u64, - ); - impl ::subxt::Event for WeightExhausted { - const PALLET: &'static str = "Ump"; - const EVENT: &'static str = "WeightExhausted"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "Some upward messages have been received and will be processed."] - #[doc = "\\[ para, count, size \\]"] - pub struct UpwardMessagesReceived( - pub runtime_types::polkadot_parachain::primitives::Id, - pub ::core::primitive::u32, - pub ::core::primitive::u32, - ); - impl ::subxt::Event for UpwardMessagesReceived { - const PALLET: &'static str = "Ump"; - const EVENT: &'static str = "UpwardMessagesReceived"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "The weight budget was exceeded for an individual upward message."] - #[doc = ""] - #[doc = "This message can be later dispatched manually using `service_overweight` dispatchable"] - #[doc = "using the assigned `overweight_index`."] - #[doc = ""] - #[doc = "\\[ para, id, overweight_index, required \\]"] - pub struct OverweightEnqueued( - pub runtime_types::polkadot_parachain::primitives::Id, - pub [::core::primitive::u8; 32usize], - pub ::core::primitive::u64, - pub ::core::primitive::u64, - ); - impl ::subxt::Event for OverweightEnqueued { - const PALLET: &'static str = "Ump"; - const EVENT: &'static str = "OverweightEnqueued"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "Upward message from the overweight queue was executed with the given actual weight"] - #[doc = "used."] - #[doc = ""] - #[doc = "\\[ overweight_index, used \\]"] - pub struct OverweightServiced(pub ::core::primitive::u64, pub ::core::primitive::u64); - impl ::subxt::Event for OverweightServiced { - const PALLET: &'static str = "Ump"; - const EVENT: &'static str = "OverweightServiced"; - } - } - pub mod storage { - use super::runtime_types; - pub struct RelayDispatchQueues<'a>( - pub &'a runtime_types::polkadot_parachain::primitives::Id, - ); - impl ::subxt::StorageEntry for RelayDispatchQueues<'_> { - const PALLET: &'static str = "Ump"; - const STORAGE: &'static str = "RelayDispatchQueues"; - type Value = ::std::vec::Vec<::std::vec::Vec<::core::primitive::u8>>; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Twox64Concat, - )]) - } - } - pub struct RelayDispatchQueueSize<'a>( - pub &'a runtime_types::polkadot_parachain::primitives::Id, - ); - impl ::subxt::StorageEntry for RelayDispatchQueueSize<'_> { - const PALLET: &'static str = "Ump"; - const STORAGE: &'static str = "RelayDispatchQueueSize"; - type Value = (::core::primitive::u32, ::core::primitive::u32); - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Twox64Concat, - )]) - } - } - pub struct NeedsDispatch; - impl ::subxt::StorageEntry for NeedsDispatch { - const PALLET: &'static str = "Ump"; - const STORAGE: &'static str = "NeedsDispatch"; - type Value = ::std::vec::Vec; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct NextDispatchRoundStartWith; - impl ::subxt::StorageEntry for NextDispatchRoundStartWith { - const PALLET: &'static str = "Ump"; - const STORAGE: &'static str = "NextDispatchRoundStartWith"; - type Value = runtime_types::polkadot_parachain::primitives::Id; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct Overweight<'a>(pub &'a ::core::primitive::u64); - impl ::subxt::StorageEntry for Overweight<'_> { - const PALLET: &'static str = "Ump"; - const STORAGE: &'static str = "Overweight"; - type Value = ( - runtime_types::polkadot_parachain::primitives::Id, - ::std::vec::Vec<::core::primitive::u8>, - ); - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Twox64Concat, - )]) - } - } - pub struct OverweightCount; - impl ::subxt::StorageEntry for OverweightCount { - const PALLET: &'static str = "Ump"; - const STORAGE: &'static str = "OverweightCount"; - type Value = ::core::primitive::u64; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct StorageApi<'a, T: ::subxt::Config> { - client: &'a ::subxt::Client, - } - impl<'a, T: ::subxt::Config> StorageApi<'a, T> { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { client } - } - #[doc = " The messages waiting to be handled by the relay-chain originating from a certain parachain."] - #[doc = ""] - #[doc = " Note that some upward messages might have been already processed by the inclusion logic. E.g."] - #[doc = " channel management messages."] - #[doc = ""] - #[doc = " The messages are processed in FIFO order."] - pub async fn relay_dispatch_queues( - &self, - _0: &runtime_types::polkadot_parachain::primitives::Id, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::std::vec::Vec<::std::vec::Vec<::core::primitive::u8>>, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .storage_hash::()? - == [ - 22u8, 48u8, 215u8, 37u8, 42u8, 115u8, 27u8, 8u8, 249u8, 65u8, 47u8, - 61u8, 96u8, 1u8, 196u8, 143u8, 53u8, 7u8, 241u8, 126u8, 4u8, 242u8, - 42u8, 171u8, 66u8, 162u8, 203u8, 200u8, 239u8, 50u8, 87u8, 72u8, - ] - { - let entry = RelayDispatchQueues(_0); - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The messages waiting to be handled by the relay-chain originating from a certain parachain."] - #[doc = ""] - #[doc = " Note that some upward messages might have been already processed by the inclusion logic. E.g."] - #[doc = " channel management messages."] - #[doc = ""] - #[doc = " The messages are processed in FIFO order."] - pub async fn relay_dispatch_queues_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::subxt::KeyIter<'a, T, RelayDispatchQueues<'a>>, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .storage_hash::()? - == [ - 22u8, 48u8, 215u8, 37u8, 42u8, 115u8, 27u8, 8u8, 249u8, 65u8, 47u8, - 61u8, 96u8, 1u8, 196u8, 143u8, 53u8, 7u8, 241u8, 126u8, 4u8, 242u8, - 42u8, 171u8, 66u8, 162u8, 203u8, 200u8, 239u8, 50u8, 87u8, 72u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Size of the dispatch queues. Caches sizes of the queues in `RelayDispatchQueue`."] - #[doc = ""] - #[doc = " First item in the tuple is the count of messages and second"] - #[doc = " is the total length (in bytes) of the message payloads."] - #[doc = ""] - #[doc = " Note that this is an auxiliary mapping: it's possible to tell the byte size and the number of"] - #[doc = " messages only looking at `RelayDispatchQueues`. This mapping is separate to avoid the cost of"] - #[doc = " loading the whole message queue if only the total size and count are required."] - #[doc = ""] - #[doc = " Invariant:"] - #[doc = " - The set of keys should exactly match the set of keys of `RelayDispatchQueues`."] - pub async fn relay_dispatch_queue_size( - &self, - _0: &runtime_types::polkadot_parachain::primitives::Id, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - (::core::primitive::u32, ::core::primitive::u32), - ::subxt::BasicError, - > { - if self - .client - .metadata() - .storage_hash::()? - == [ - 8u8, 0u8, 54u8, 33u8, 185u8, 112u8, 21u8, 174u8, 15u8, 147u8, 134u8, - 184u8, 108u8, 144u8, 55u8, 138u8, 24u8, 66u8, 255u8, 197u8, 131u8, - 229u8, 35u8, 107u8, 251u8, 226u8, 78u8, 218u8, 41u8, 251u8, 155u8, - 79u8, - ] - { - let entry = RelayDispatchQueueSize(_0); - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Size of the dispatch queues. Caches sizes of the queues in `RelayDispatchQueue`."] - #[doc = ""] - #[doc = " First item in the tuple is the count of messages and second"] - #[doc = " is the total length (in bytes) of the message payloads."] - #[doc = ""] - #[doc = " Note that this is an auxiliary mapping: it's possible to tell the byte size and the number of"] - #[doc = " messages only looking at `RelayDispatchQueues`. This mapping is separate to avoid the cost of"] - #[doc = " loading the whole message queue if only the total size and count are required."] - #[doc = ""] - #[doc = " Invariant:"] - #[doc = " - The set of keys should exactly match the set of keys of `RelayDispatchQueues`."] - pub async fn relay_dispatch_queue_size_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::subxt::KeyIter<'a, T, RelayDispatchQueueSize<'a>>, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .storage_hash::()? - == [ - 8u8, 0u8, 54u8, 33u8, 185u8, 112u8, 21u8, 174u8, 15u8, 147u8, 134u8, - 184u8, 108u8, 144u8, 55u8, 138u8, 24u8, 66u8, 255u8, 197u8, 131u8, - 229u8, 35u8, 107u8, 251u8, 226u8, 78u8, 218u8, 41u8, 251u8, 155u8, - 79u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The ordered list of `ParaId`s that have a `RelayDispatchQueue` entry."] - #[doc = ""] - #[doc = " Invariant:"] - #[doc = " - The set of items from this vector should be exactly the set of the keys in"] - #[doc = " `RelayDispatchQueues` and `RelayDispatchQueueSize`."] - pub async fn needs_dispatch( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::std::vec::Vec, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 75u8, 38u8, 232u8, 83u8, 71u8, 101u8, 248u8, 170u8, 5u8, 32u8, 209u8, - 97u8, 190u8, 31u8, 241u8, 1u8, 98u8, 87u8, 64u8, 208u8, 26u8, 100u8, - 93u8, 79u8, 61u8, 114u8, 11u8, 172u8, 112u8, 164u8, 171u8, 237u8, - ] - { - let entry = NeedsDispatch; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " This is the para that gets will get dispatched first during the next upward dispatchable queue"] - #[doc = " execution round."] - #[doc = ""] - #[doc = " Invariant:"] - #[doc = " - If `Some(para)`, then `para` must be present in `NeedsDispatch`."] - pub async fn next_dispatch_round_start_with( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .storage_hash::()? - == [ - 102u8, 165u8, 118u8, 140u8, 84u8, 122u8, 91u8, 169u8, 232u8, 125u8, - 52u8, 228u8, 15u8, 228u8, 91u8, 79u8, 218u8, 62u8, 93u8, 42u8, 204u8, - 6u8, 34u8, 185u8, 218u8, 150u8, 7u8, 250u8, 79u8, 142u8, 211u8, 0u8, - ] - { - let entry = NextDispatchRoundStartWith; - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The messages that exceeded max individual message weight budget."] - #[doc = ""] - #[doc = " These messages stay there until manually dispatched."] - pub async fn overweight( - &self, - _0: &::core::primitive::u64, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option<( - runtime_types::polkadot_parachain::primitives::Id, - ::std::vec::Vec<::core::primitive::u8>, - )>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 223u8, 155u8, 1u8, 100u8, 77u8, 13u8, 92u8, 235u8, 64u8, 30u8, 199u8, - 178u8, 149u8, 66u8, 155u8, 201u8, 84u8, 26u8, 81u8, 183u8, 0u8, 113u8, - 182u8, 37u8, 69u8, 66u8, 240u8, 151u8, 254u8, 249u8, 134u8, 51u8, - ] - { - let entry = Overweight(_0); - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The messages that exceeded max individual message weight budget."] - #[doc = ""] - #[doc = " These messages stay there until manually dispatched."] - pub async fn overweight_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::subxt::KeyIter<'a, T, Overweight<'a>>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 223u8, 155u8, 1u8, 100u8, 77u8, 13u8, 92u8, 235u8, 64u8, 30u8, 199u8, - 178u8, 149u8, 66u8, 155u8, 201u8, 84u8, 26u8, 81u8, 183u8, 0u8, 113u8, - 182u8, 37u8, 69u8, 66u8, 240u8, 151u8, 254u8, 249u8, 134u8, 51u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The number of overweight messages ever recorded in `Overweight` (and thus the lowest free"] - #[doc = " index)."] - pub async fn overweight_count( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result<::core::primitive::u64, ::subxt::BasicError> - { - if self.client.metadata().storage_hash::()? - == [ - 102u8, 180u8, 196u8, 148u8, 115u8, 62u8, 46u8, 238u8, 97u8, 116u8, - 117u8, 42u8, 14u8, 5u8, 72u8, 237u8, 230u8, 46u8, 150u8, 126u8, 89u8, - 64u8, 233u8, 166u8, 180u8, 137u8, 52u8, 233u8, 252u8, 255u8, 36u8, - 20u8, - ] - { - let entry = OverweightCount; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - } - pub mod hrmp { - use super::root_mod; - use super::runtime_types; - pub mod calls { - use super::root_mod; - use super::runtime_types; - type DispatchError = runtime_types::sp_runtime::DispatchError; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct HrmpInitOpenChannel { - pub recipient: runtime_types::polkadot_parachain::primitives::Id, - pub proposed_max_capacity: ::core::primitive::u32, - pub proposed_max_message_size: ::core::primitive::u32, - } - impl ::subxt::Call for HrmpInitOpenChannel { - const PALLET: &'static str = "Hrmp"; - const FUNCTION: &'static str = "hrmp_init_open_channel"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct HrmpAcceptOpenChannel { - pub sender: runtime_types::polkadot_parachain::primitives::Id, - } - impl ::subxt::Call for HrmpAcceptOpenChannel { - const PALLET: &'static str = "Hrmp"; - const FUNCTION: &'static str = "hrmp_accept_open_channel"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct HrmpCloseChannel { - pub channel_id: runtime_types::polkadot_parachain::primitives::HrmpChannelId, - } - impl ::subxt::Call for HrmpCloseChannel { - const PALLET: &'static str = "Hrmp"; - const FUNCTION: &'static str = "hrmp_close_channel"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct ForceCleanHrmp { - pub para: runtime_types::polkadot_parachain::primitives::Id, - pub inbound: ::core::primitive::u32, - pub outbound: ::core::primitive::u32, - } - impl ::subxt::Call for ForceCleanHrmp { - const PALLET: &'static str = "Hrmp"; - const FUNCTION: &'static str = "force_clean_hrmp"; - } - #[derive( - :: subxt :: codec :: CompactAs, - :: subxt :: codec :: Decode, - :: subxt :: codec :: Encode, - Debug, - )] - pub struct ForceProcessHrmpOpen { - pub channels: ::core::primitive::u32, - } - impl ::subxt::Call for ForceProcessHrmpOpen { - const PALLET: &'static str = "Hrmp"; - const FUNCTION: &'static str = "force_process_hrmp_open"; - } - #[derive( - :: subxt :: codec :: CompactAs, - :: subxt :: codec :: Decode, - :: subxt :: codec :: Encode, - Debug, - )] - pub struct ForceProcessHrmpClose { - pub channels: ::core::primitive::u32, - } - impl ::subxt::Call for ForceProcessHrmpClose { - const PALLET: &'static str = "Hrmp"; - const FUNCTION: &'static str = "force_process_hrmp_close"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct HrmpCancelOpenRequest { - pub channel_id: runtime_types::polkadot_parachain::primitives::HrmpChannelId, - pub open_requests: ::core::primitive::u32, - } - impl ::subxt::Call for HrmpCancelOpenRequest { - const PALLET: &'static str = "Hrmp"; - const FUNCTION: &'static str = "hrmp_cancel_open_request"; - } - pub struct TransactionApi<'a, T: ::subxt::Config, X> { - client: &'a ::subxt::Client, - marker: ::core::marker::PhantomData, - } - impl<'a, T, X> TransactionApi<'a, T, X> - where - T: ::subxt::Config, - X: ::subxt::extrinsic::ExtrinsicParams, - { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { - client, - marker: ::core::marker::PhantomData, - } - } - #[doc = "Initiate opening a channel from a parachain to a given recipient with given channel"] - #[doc = "parameters."] - #[doc = ""] - #[doc = "- `proposed_max_capacity` - specifies how many messages can be in the channel at once."] - #[doc = "- `proposed_max_message_size` - specifies the maximum size of the messages."] - #[doc = ""] - #[doc = "These numbers are a subject to the relay-chain configuration limits."] - #[doc = ""] - #[doc = "The channel can be opened only after the recipient confirms it and only on a session"] - #[doc = "change."] - pub fn hrmp_init_open_channel( - &self, - recipient: runtime_types::polkadot_parachain::primitives::Id, - proposed_max_capacity: ::core::primitive::u32, - proposed_max_message_size: ::core::primitive::u32, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - HrmpInitOpenChannel, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 244u8, 142u8, 161u8, 144u8, 109u8, 104u8, 164u8, 198u8, 201u8, 79u8, - 178u8, 136u8, 107u8, 104u8, 83u8, 11u8, 167u8, 164u8, 223u8, 147u8, - 135u8, 35u8, 133u8, 176u8, 236u8, 112u8, 107u8, 131u8, 184u8, 105u8, - 174u8, 12u8, - ] - { - let call = HrmpInitOpenChannel { - recipient, - proposed_max_capacity, - proposed_max_message_size, - }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Accept a pending open channel request from the given sender."] - #[doc = ""] - #[doc = "The channel will be opened only on the next session boundary."] - pub fn hrmp_accept_open_channel( - &self, - sender: runtime_types::polkadot_parachain::primitives::Id, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - HrmpAcceptOpenChannel, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .call_hash::()? - == [ - 95u8, 196u8, 155u8, 220u8, 235u8, 120u8, 67u8, 247u8, 245u8, 20u8, - 162u8, 41u8, 4u8, 204u8, 125u8, 16u8, 224u8, 72u8, 198u8, 237u8, 84u8, - 46u8, 201u8, 17u8, 172u8, 55u8, 115u8, 51u8, 16u8, 140u8, 4u8, 253u8, - ] - { - let call = HrmpAcceptOpenChannel { sender }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Initiate unilateral closing of a channel. The origin must be either the sender or the"] - #[doc = "recipient in the channel being closed."] - #[doc = ""] - #[doc = "The closure can only happen on a session change."] - pub fn hrmp_close_channel( - &self, - channel_id: runtime_types::polkadot_parachain::primitives::HrmpChannelId, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - HrmpCloseChannel, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 199u8, 9u8, 55u8, 184u8, 196u8, 45u8, 46u8, 251u8, 48u8, 23u8, 132u8, - 74u8, 188u8, 121u8, 41u8, 18u8, 71u8, 65u8, 129u8, 14u8, 38u8, 48u8, - 253u8, 119u8, 171u8, 202u8, 9u8, 65u8, 250u8, 98u8, 185u8, 220u8, - ] - { - let call = HrmpCloseChannel { channel_id }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "This extrinsic triggers the cleanup of all the HRMP storage items that"] - #[doc = "a para may have. Normally this happens once per session, but this allows"] - #[doc = "you to trigger the cleanup immediately for a specific parachain."] - #[doc = ""] - #[doc = "Origin must be Root."] - #[doc = ""] - #[doc = "Number of inbound and outbound channels for `para` must be provided as witness data of weighing."] - pub fn force_clean_hrmp( - &self, - para: runtime_types::polkadot_parachain::primitives::Id, - inbound: ::core::primitive::u32, - outbound: ::core::primitive::u32, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - ForceCleanHrmp, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 182u8, 231u8, 99u8, 129u8, 130u8, 109u8, 97u8, 108u8, 37u8, 107u8, - 203u8, 70u8, 133u8, 106u8, 226u8, 77u8, 110u8, 189u8, 227u8, 26u8, - 129u8, 189u8, 234u8, 215u8, 112u8, 22u8, 127u8, 185u8, 152u8, 157u8, - 14u8, 66u8, - ] - { - let call = ForceCleanHrmp { - para, - inbound, - outbound, - }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Force process HRMP open channel requests."] - #[doc = ""] - #[doc = "If there are pending HRMP open channel requests, you can use this"] - #[doc = "function process all of those requests immediately."] - #[doc = ""] - #[doc = "Total number of opening channels must be provided as witness data of weighing."] - pub fn force_process_hrmp_open( - &self, - channels: ::core::primitive::u32, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - ForceProcessHrmpOpen, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 162u8, 53u8, 194u8, 175u8, 117u8, 32u8, 217u8, 177u8, 9u8, 255u8, 88u8, - 40u8, 8u8, 174u8, 8u8, 11u8, 26u8, 82u8, 213u8, 40u8, 20u8, 89u8, - 227u8, 209u8, 95u8, 162u8, 221u8, 97u8, 230u8, 98u8, 110u8, 85u8, - ] - { - let call = ForceProcessHrmpOpen { channels }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Force process HRMP close channel requests."] - #[doc = ""] - #[doc = "If there are pending HRMP close channel requests, you can use this"] - #[doc = "function process all of those requests immediately."] - #[doc = ""] - #[doc = "Total number of closing channels must be provided as witness data of weighing."] - pub fn force_process_hrmp_close( - &self, - channels: ::core::primitive::u32, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - ForceProcessHrmpClose, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .call_hash::()? - == [ - 128u8, 141u8, 191u8, 255u8, 204u8, 137u8, 27u8, 170u8, 180u8, 166u8, - 93u8, 144u8, 70u8, 56u8, 132u8, 100u8, 5u8, 114u8, 252u8, 163u8, 164u8, - 246u8, 234u8, 152u8, 193u8, 79u8, 89u8, 137u8, 46u8, 171u8, 32u8, - 119u8, - ] - { - let call = ForceProcessHrmpClose { channels }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "This cancels a pending open channel request. It can be canceled by either of the sender"] - #[doc = "or the recipient for that request. The origin must be either of those."] - #[doc = ""] - #[doc = "The cancellation happens immediately. It is not possible to cancel the request if it is"] - #[doc = "already accepted."] - #[doc = ""] - #[doc = "Total number of open requests (i.e. `HrmpOpenChannelRequestsList`) must be provided as"] - #[doc = "witness data."] - pub fn hrmp_cancel_open_request( - &self, - channel_id: runtime_types::polkadot_parachain::primitives::HrmpChannelId, - open_requests: ::core::primitive::u32, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - HrmpCancelOpenRequest, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .call_hash::()? - == [ - 8u8, 83u8, 32u8, 187u8, 220u8, 1u8, 212u8, 226u8, 72u8, 61u8, 110u8, - 211u8, 238u8, 119u8, 95u8, 48u8, 150u8, 51u8, 177u8, 182u8, 209u8, - 174u8, 245u8, 25u8, 194u8, 199u8, 212u8, 131u8, 77u8, 72u8, 9u8, 120u8, - ] - { - let call = HrmpCancelOpenRequest { - channel_id, - open_requests, - }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - pub type Event = runtime_types::polkadot_runtime_parachains::hrmp::pallet::Event; - pub mod events { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "Open HRMP channel requested."] - #[doc = "`[sender, recipient, proposed_max_capacity, proposed_max_message_size]`"] - pub struct OpenChannelRequested( - pub runtime_types::polkadot_parachain::primitives::Id, - pub runtime_types::polkadot_parachain::primitives::Id, - pub ::core::primitive::u32, - pub ::core::primitive::u32, - ); - impl ::subxt::Event for OpenChannelRequested { - const PALLET: &'static str = "Hrmp"; - const EVENT: &'static str = "OpenChannelRequested"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "An HRMP channel request sent by the receiver was canceled by either party."] - #[doc = "`[by_parachain, channel_id]`"] - pub struct OpenChannelCanceled( - pub runtime_types::polkadot_parachain::primitives::Id, - pub runtime_types::polkadot_parachain::primitives::HrmpChannelId, - ); - impl ::subxt::Event for OpenChannelCanceled { - const PALLET: &'static str = "Hrmp"; - const EVENT: &'static str = "OpenChannelCanceled"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "Open HRMP channel accepted. `[sender, recipient]`"] - pub struct OpenChannelAccepted( - pub runtime_types::polkadot_parachain::primitives::Id, - pub runtime_types::polkadot_parachain::primitives::Id, - ); - impl ::subxt::Event for OpenChannelAccepted { - const PALLET: &'static str = "Hrmp"; - const EVENT: &'static str = "OpenChannelAccepted"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "HRMP channel closed. `[by_parachain, channel_id]`"] - pub struct ChannelClosed( - pub runtime_types::polkadot_parachain::primitives::Id, - pub runtime_types::polkadot_parachain::primitives::HrmpChannelId, - ); - impl ::subxt::Event for ChannelClosed { - const PALLET: &'static str = "Hrmp"; - const EVENT: &'static str = "ChannelClosed"; - } - } - pub mod storage { - use super::runtime_types; - pub struct HrmpOpenChannelRequests<'a>( - pub &'a runtime_types::polkadot_parachain::primitives::HrmpChannelId, - ); - impl ::subxt::StorageEntry for HrmpOpenChannelRequests<'_> { - const PALLET: &'static str = "Hrmp"; - const STORAGE: &'static str = "HrmpOpenChannelRequests"; - type Value = - runtime_types::polkadot_runtime_parachains::hrmp::HrmpOpenChannelRequest; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Twox64Concat, - )]) - } - } - pub struct HrmpOpenChannelRequestsList; - impl ::subxt::StorageEntry for HrmpOpenChannelRequestsList { - const PALLET: &'static str = "Hrmp"; - const STORAGE: &'static str = "HrmpOpenChannelRequestsList"; - type Value = - ::std::vec::Vec; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct HrmpOpenChannelRequestCount<'a>( - pub &'a runtime_types::polkadot_parachain::primitives::Id, - ); - impl ::subxt::StorageEntry for HrmpOpenChannelRequestCount<'_> { - const PALLET: &'static str = "Hrmp"; - const STORAGE: &'static str = "HrmpOpenChannelRequestCount"; - type Value = ::core::primitive::u32; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Twox64Concat, - )]) - } - } - pub struct HrmpAcceptedChannelRequestCount<'a>( - pub &'a runtime_types::polkadot_parachain::primitives::Id, - ); - impl ::subxt::StorageEntry for HrmpAcceptedChannelRequestCount<'_> { - const PALLET: &'static str = "Hrmp"; - const STORAGE: &'static str = "HrmpAcceptedChannelRequestCount"; - type Value = ::core::primitive::u32; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Twox64Concat, - )]) - } - } - pub struct HrmpCloseChannelRequests<'a>( - pub &'a runtime_types::polkadot_parachain::primitives::HrmpChannelId, - ); - impl ::subxt::StorageEntry for HrmpCloseChannelRequests<'_> { - const PALLET: &'static str = "Hrmp"; - const STORAGE: &'static str = "HrmpCloseChannelRequests"; - type Value = (); - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Twox64Concat, - )]) - } - } - pub struct HrmpCloseChannelRequestsList; - impl ::subxt::StorageEntry for HrmpCloseChannelRequestsList { - const PALLET: &'static str = "Hrmp"; - const STORAGE: &'static str = "HrmpCloseChannelRequestsList"; - type Value = - ::std::vec::Vec; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct HrmpWatermarks<'a>( - pub &'a runtime_types::polkadot_parachain::primitives::Id, - ); - impl ::subxt::StorageEntry for HrmpWatermarks<'_> { - const PALLET: &'static str = "Hrmp"; - const STORAGE: &'static str = "HrmpWatermarks"; - type Value = ::core::primitive::u32; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Twox64Concat, - )]) - } - } - pub struct HrmpChannels<'a>( - pub &'a runtime_types::polkadot_parachain::primitives::HrmpChannelId, - ); - impl ::subxt::StorageEntry for HrmpChannels<'_> { - const PALLET: &'static str = "Hrmp"; - const STORAGE: &'static str = "HrmpChannels"; - type Value = runtime_types::polkadot_runtime_parachains::hrmp::HrmpChannel; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Twox64Concat, - )]) - } - } - pub struct HrmpIngressChannelsIndex<'a>( - pub &'a runtime_types::polkadot_parachain::primitives::Id, - ); - impl ::subxt::StorageEntry for HrmpIngressChannelsIndex<'_> { - const PALLET: &'static str = "Hrmp"; - const STORAGE: &'static str = "HrmpIngressChannelsIndex"; - type Value = ::std::vec::Vec; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Twox64Concat, - )]) - } - } - pub struct HrmpEgressChannelsIndex<'a>( - pub &'a runtime_types::polkadot_parachain::primitives::Id, - ); - impl ::subxt::StorageEntry for HrmpEgressChannelsIndex<'_> { - const PALLET: &'static str = "Hrmp"; - const STORAGE: &'static str = "HrmpEgressChannelsIndex"; - type Value = ::std::vec::Vec; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Twox64Concat, - )]) - } - } - pub struct HrmpChannelContents<'a>( - pub &'a runtime_types::polkadot_parachain::primitives::HrmpChannelId, - ); - impl ::subxt::StorageEntry for HrmpChannelContents<'_> { - const PALLET: &'static str = "Hrmp"; - const STORAGE: &'static str = "HrmpChannelContents"; - type Value = ::std::vec::Vec< - runtime_types::polkadot_core_primitives::InboundHrmpMessage< - ::core::primitive::u32, - >, - >; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Twox64Concat, - )]) - } - } - pub struct HrmpChannelDigests<'a>( - pub &'a runtime_types::polkadot_parachain::primitives::Id, - ); - impl ::subxt::StorageEntry for HrmpChannelDigests<'_> { - const PALLET: &'static str = "Hrmp"; - const STORAGE: &'static str = "HrmpChannelDigests"; - type Value = ::std::vec::Vec<( - ::core::primitive::u32, - ::std::vec::Vec, - )>; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Twox64Concat, - )]) - } - } - pub struct StorageApi<'a, T: ::subxt::Config> { - client: &'a ::subxt::Client, - } - impl<'a, T: ::subxt::Config> StorageApi<'a, T> { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { client } - } - #[doc = " The set of pending HRMP open channel requests."] - #[doc = ""] - #[doc = " The set is accompanied by a list for iteration."] - #[doc = ""] - #[doc = " Invariant:"] - #[doc = " - There are no channels that exists in list but not in the set and vice versa."] - pub async fn hrmp_open_channel_requests( - &self, - _0: &runtime_types::polkadot_parachain::primitives::HrmpChannelId, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option< - runtime_types::polkadot_runtime_parachains::hrmp::HrmpOpenChannelRequest, - >, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .storage_hash::()? - == [ - 58u8, 216u8, 106u8, 4u8, 117u8, 77u8, 168u8, 230u8, 50u8, 6u8, 175u8, - 26u8, 110u8, 45u8, 143u8, 207u8, 174u8, 77u8, 5u8, 245u8, 172u8, 114u8, - 20u8, 229u8, 153u8, 137u8, 220u8, 189u8, 155u8, 5u8, 116u8, 236u8, - ] - { - let entry = HrmpOpenChannelRequests(_0); - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The set of pending HRMP open channel requests."] - #[doc = ""] - #[doc = " The set is accompanied by a list for iteration."] - #[doc = ""] - #[doc = " Invariant:"] - #[doc = " - There are no channels that exists in list but not in the set and vice versa."] - pub async fn hrmp_open_channel_requests_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::subxt::KeyIter<'a, T, HrmpOpenChannelRequests<'a>>, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .storage_hash::()? - == [ - 58u8, 216u8, 106u8, 4u8, 117u8, 77u8, 168u8, 230u8, 50u8, 6u8, 175u8, - 26u8, 110u8, 45u8, 143u8, 207u8, 174u8, 77u8, 5u8, 245u8, 172u8, 114u8, - 20u8, 229u8, 153u8, 137u8, 220u8, 189u8, 155u8, 5u8, 116u8, 236u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - pub async fn hrmp_open_channel_requests_list( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::std::vec::Vec, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .storage_hash::()? - == [ - 176u8, 22u8, 136u8, 206u8, 243u8, 208u8, 67u8, 150u8, 187u8, 163u8, - 141u8, 37u8, 235u8, 84u8, 176u8, 63u8, 55u8, 38u8, 215u8, 185u8, 206u8, - 127u8, 37u8, 108u8, 245u8, 237u8, 154u8, 151u8, 111u8, 33u8, 39u8, - 102u8, - ] - { - let entry = HrmpOpenChannelRequestsList; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " This mapping tracks how many open channel requests are initiated by a given sender para."] - #[doc = " Invariant: `HrmpOpenChannelRequests` should contain the same number of items that has"] - #[doc = " `(X, _)` as the number of `HrmpOpenChannelRequestCount` for `X`."] - pub async fn hrmp_open_channel_request_count( - &self, - _0: &runtime_types::polkadot_parachain::primitives::Id, - block_hash: ::core::option::Option, - ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> - { - if self - .client - .metadata() - .storage_hash::()? - == [ - 103u8, 47u8, 152u8, 1u8, 119u8, 244u8, 62u8, 249u8, 141u8, 194u8, - 157u8, 149u8, 58u8, 208u8, 113u8, 77u8, 4u8, 248u8, 114u8, 94u8, 153u8, - 20u8, 179u8, 4u8, 43u8, 32u8, 248u8, 118u8, 115u8, 206u8, 228u8, 28u8, - ] - { - let entry = HrmpOpenChannelRequestCount(_0); - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " This mapping tracks how many open channel requests are initiated by a given sender para."] - #[doc = " Invariant: `HrmpOpenChannelRequests` should contain the same number of items that has"] - #[doc = " `(X, _)` as the number of `HrmpOpenChannelRequestCount` for `X`."] - pub async fn hrmp_open_channel_request_count_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::subxt::KeyIter<'a, T, HrmpOpenChannelRequestCount<'a>>, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .storage_hash::()? - == [ - 103u8, 47u8, 152u8, 1u8, 119u8, 244u8, 62u8, 249u8, 141u8, 194u8, - 157u8, 149u8, 58u8, 208u8, 113u8, 77u8, 4u8, 248u8, 114u8, 94u8, 153u8, - 20u8, 179u8, 4u8, 43u8, 32u8, 248u8, 118u8, 115u8, 206u8, 228u8, 28u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " This mapping tracks how many open channel requests were accepted by a given recipient para."] - #[doc = " Invariant: `HrmpOpenChannelRequests` should contain the same number of items `(_, X)` with"] - #[doc = " `confirmed` set to true, as the number of `HrmpAcceptedChannelRequestCount` for `X`."] - pub async fn hrmp_accepted_channel_request_count( - &self, - _0: &runtime_types::polkadot_parachain::primitives::Id, - block_hash: ::core::option::Option, - ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> - { - if self - .client - .metadata() - .storage_hash::()? - == [ - 166u8, 207u8, 97u8, 222u8, 30u8, 204u8, 203u8, 122u8, 72u8, 66u8, - 247u8, 169u8, 128u8, 122u8, 145u8, 124u8, 214u8, 183u8, 251u8, 85u8, - 93u8, 37u8, 143u8, 71u8, 45u8, 61u8, 168u8, 211u8, 222u8, 58u8, 91u8, - 202u8, - ] - { - let entry = HrmpAcceptedChannelRequestCount(_0); - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " This mapping tracks how many open channel requests were accepted by a given recipient para."] - #[doc = " Invariant: `HrmpOpenChannelRequests` should contain the same number of items `(_, X)` with"] - #[doc = " `confirmed` set to true, as the number of `HrmpAcceptedChannelRequestCount` for `X`."] - pub async fn hrmp_accepted_channel_request_count_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::subxt::KeyIter<'a, T, HrmpAcceptedChannelRequestCount<'a>>, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .storage_hash::()? - == [ - 166u8, 207u8, 97u8, 222u8, 30u8, 204u8, 203u8, 122u8, 72u8, 66u8, - 247u8, 169u8, 128u8, 122u8, 145u8, 124u8, 214u8, 183u8, 251u8, 85u8, - 93u8, 37u8, 143u8, 71u8, 45u8, 61u8, 168u8, 211u8, 222u8, 58u8, 91u8, - 202u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " A set of pending HRMP close channel requests that are going to be closed during the session"] - #[doc = " change. Used for checking if a given channel is registered for closure."] - #[doc = ""] - #[doc = " The set is accompanied by a list for iteration."] - #[doc = ""] - #[doc = " Invariant:"] - #[doc = " - There are no channels that exists in list but not in the set and vice versa."] - pub async fn hrmp_close_channel_requests( - &self, - _0: &runtime_types::polkadot_parachain::primitives::HrmpChannelId, - block_hash: ::core::option::Option, - ) -> ::core::result::Result<::core::option::Option<()>, ::subxt::BasicError> - { - if self - .client - .metadata() - .storage_hash::()? - == [ - 118u8, 8u8, 142u8, 158u8, 184u8, 200u8, 38u8, 112u8, 217u8, 69u8, - 161u8, 255u8, 116u8, 143u8, 94u8, 185u8, 95u8, 247u8, 227u8, 101u8, - 107u8, 55u8, 172u8, 164u8, 58u8, 182u8, 193u8, 140u8, 142u8, 118u8, - 223u8, 240u8, - ] - { - let entry = HrmpCloseChannelRequests(_0); - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " A set of pending HRMP close channel requests that are going to be closed during the session"] - #[doc = " change. Used for checking if a given channel is registered for closure."] - #[doc = ""] - #[doc = " The set is accompanied by a list for iteration."] - #[doc = ""] - #[doc = " Invariant:"] - #[doc = " - There are no channels that exists in list but not in the set and vice versa."] - pub async fn hrmp_close_channel_requests_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::subxt::KeyIter<'a, T, HrmpCloseChannelRequests<'a>>, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .storage_hash::()? - == [ - 118u8, 8u8, 142u8, 158u8, 184u8, 200u8, 38u8, 112u8, 217u8, 69u8, - 161u8, 255u8, 116u8, 143u8, 94u8, 185u8, 95u8, 247u8, 227u8, 101u8, - 107u8, 55u8, 172u8, 164u8, 58u8, 182u8, 193u8, 140u8, 142u8, 118u8, - 223u8, 240u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - pub async fn hrmp_close_channel_requests_list( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::std::vec::Vec, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .storage_hash::()? - == [ - 203u8, 46u8, 200u8, 63u8, 120u8, 238u8, 88u8, 170u8, 239u8, 27u8, 99u8, - 104u8, 254u8, 194u8, 152u8, 221u8, 126u8, 188u8, 2u8, 153u8, 79u8, - 183u8, 236u8, 145u8, 120u8, 151u8, 235u8, 56u8, 130u8, 240u8, 74u8, - 211u8, - ] - { - let entry = HrmpCloseChannelRequestsList; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The HRMP watermark associated with each para."] - #[doc = " Invariant:"] - #[doc = " - each para `P` used here as a key should satisfy `Paras::is_valid_para(P)` within a session."] - pub async fn hrmp_watermarks( - &self, - _0: &runtime_types::polkadot_parachain::primitives::Id, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option<::core::primitive::u32>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 28u8, 187u8, 5u8, 0u8, 130u8, 11u8, 241u8, 171u8, 141u8, 109u8, 236u8, - 151u8, 194u8, 124u8, 172u8, 180u8, 36u8, 144u8, 134u8, 53u8, 162u8, - 247u8, 138u8, 209u8, 99u8, 194u8, 213u8, 100u8, 254u8, 15u8, 51u8, - 94u8, - ] - { - let entry = HrmpWatermarks(_0); - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The HRMP watermark associated with each para."] - #[doc = " Invariant:"] - #[doc = " - each para `P` used here as a key should satisfy `Paras::is_valid_para(P)` within a session."] - pub async fn hrmp_watermarks_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::subxt::KeyIter<'a, T, HrmpWatermarks<'a>>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 28u8, 187u8, 5u8, 0u8, 130u8, 11u8, 241u8, 171u8, 141u8, 109u8, 236u8, - 151u8, 194u8, 124u8, 172u8, 180u8, 36u8, 144u8, 134u8, 53u8, 162u8, - 247u8, 138u8, 209u8, 99u8, 194u8, 213u8, 100u8, 254u8, 15u8, 51u8, - 94u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " HRMP channel data associated with each para."] - #[doc = " Invariant:"] - #[doc = " - each participant in the channel should satisfy `Paras::is_valid_para(P)` within a session."] - pub async fn hrmp_channels( - &self, - _0: &runtime_types::polkadot_parachain::primitives::HrmpChannelId, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option< - runtime_types::polkadot_runtime_parachains::hrmp::HrmpChannel, - >, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 146u8, 253u8, 102u8, 91u8, 69u8, 206u8, 61u8, 201u8, 63u8, 22u8, 119u8, - 249u8, 119u8, 232u8, 154u8, 132u8, 123u8, 244u8, 12u8, 61u8, 95u8, - 138u8, 104u8, 112u8, 157u8, 31u8, 39u8, 126u8, 184u8, 15u8, 33u8, - 171u8, - ] - { - let entry = HrmpChannels(_0); - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " HRMP channel data associated with each para."] - #[doc = " Invariant:"] - #[doc = " - each participant in the channel should satisfy `Paras::is_valid_para(P)` within a session."] - pub async fn hrmp_channels_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::subxt::KeyIter<'a, T, HrmpChannels<'a>>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 146u8, 253u8, 102u8, 91u8, 69u8, 206u8, 61u8, 201u8, 63u8, 22u8, 119u8, - 249u8, 119u8, 232u8, 154u8, 132u8, 123u8, 244u8, 12u8, 61u8, 95u8, - 138u8, 104u8, 112u8, 157u8, 31u8, 39u8, 126u8, 184u8, 15u8, 33u8, - 171u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Ingress/egress indexes allow to find all the senders and receivers given the opposite side."] - #[doc = " I.e."] - #[doc = ""] - #[doc = " (a) ingress index allows to find all the senders for a given recipient."] - #[doc = " (b) egress index allows to find all the recipients for a given sender."] - #[doc = ""] - #[doc = " Invariants:"] - #[doc = " - for each ingress index entry for `P` each item `I` in the index should present in"] - #[doc = " `HrmpChannels` as `(I, P)`."] - #[doc = " - for each egress index entry for `P` each item `E` in the index should present in"] - #[doc = " `HrmpChannels` as `(P, E)`."] - #[doc = " - there should be no other dangling channels in `HrmpChannels`."] - #[doc = " - the vectors are sorted."] - pub async fn hrmp_ingress_channels_index( - &self, - _0: &runtime_types::polkadot_parachain::primitives::Id, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::std::vec::Vec, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .storage_hash::()? - == [ - 193u8, 185u8, 164u8, 194u8, 89u8, 218u8, 214u8, 184u8, 100u8, 238u8, - 232u8, 90u8, 243u8, 230u8, 93u8, 191u8, 197u8, 182u8, 215u8, 254u8, - 192u8, 11u8, 171u8, 211u8, 150u8, 210u8, 75u8, 216u8, 149u8, 60u8, - 49u8, 166u8, - ] - { - let entry = HrmpIngressChannelsIndex(_0); - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Ingress/egress indexes allow to find all the senders and receivers given the opposite side."] - #[doc = " I.e."] - #[doc = ""] - #[doc = " (a) ingress index allows to find all the senders for a given recipient."] - #[doc = " (b) egress index allows to find all the recipients for a given sender."] - #[doc = ""] - #[doc = " Invariants:"] - #[doc = " - for each ingress index entry for `P` each item `I` in the index should present in"] - #[doc = " `HrmpChannels` as `(I, P)`."] - #[doc = " - for each egress index entry for `P` each item `E` in the index should present in"] - #[doc = " `HrmpChannels` as `(P, E)`."] - #[doc = " - there should be no other dangling channels in `HrmpChannels`."] - #[doc = " - the vectors are sorted."] - pub async fn hrmp_ingress_channels_index_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::subxt::KeyIter<'a, T, HrmpIngressChannelsIndex<'a>>, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .storage_hash::()? - == [ - 193u8, 185u8, 164u8, 194u8, 89u8, 218u8, 214u8, 184u8, 100u8, 238u8, - 232u8, 90u8, 243u8, 230u8, 93u8, 191u8, 197u8, 182u8, 215u8, 254u8, - 192u8, 11u8, 171u8, 211u8, 150u8, 210u8, 75u8, 216u8, 149u8, 60u8, - 49u8, 166u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - pub async fn hrmp_egress_channels_index( - &self, - _0: &runtime_types::polkadot_parachain::primitives::Id, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::std::vec::Vec, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .storage_hash::()? - == [ - 242u8, 138u8, 89u8, 201u8, 60u8, 216u8, 73u8, 66u8, 167u8, 82u8, 225u8, - 42u8, 61u8, 50u8, 54u8, 187u8, 212u8, 8u8, 255u8, 183u8, 85u8, 180u8, - 176u8, 0u8, 226u8, 173u8, 45u8, 155u8, 172u8, 28u8, 229u8, 157u8, - ] - { - let entry = HrmpEgressChannelsIndex(_0); - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - pub async fn hrmp_egress_channels_index_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::subxt::KeyIter<'a, T, HrmpEgressChannelsIndex<'a>>, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .storage_hash::()? - == [ - 242u8, 138u8, 89u8, 201u8, 60u8, 216u8, 73u8, 66u8, 167u8, 82u8, 225u8, - 42u8, 61u8, 50u8, 54u8, 187u8, 212u8, 8u8, 255u8, 183u8, 85u8, 180u8, - 176u8, 0u8, 226u8, 173u8, 45u8, 155u8, 172u8, 28u8, 229u8, 157u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Storage for the messages for each channel."] - #[doc = " Invariant: cannot be non-empty if the corresponding channel in `HrmpChannels` is `None`."] - pub async fn hrmp_channel_contents( - &self, - _0: &runtime_types::polkadot_parachain::primitives::HrmpChannelId, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::std::vec::Vec< - runtime_types::polkadot_core_primitives::InboundHrmpMessage< - ::core::primitive::u32, - >, - >, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .storage_hash::()? - == [ - 71u8, 246u8, 41u8, 12u8, 125u8, 10u8, 60u8, 209u8, 14u8, 254u8, 125u8, - 217u8, 251u8, 172u8, 243u8, 73u8, 33u8, 230u8, 242u8, 16u8, 207u8, - 165u8, 33u8, 136u8, 78u8, 83u8, 206u8, 134u8, 65u8, 115u8, 166u8, - 192u8, - ] - { - let entry = HrmpChannelContents(_0); - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Storage for the messages for each channel."] - #[doc = " Invariant: cannot be non-empty if the corresponding channel in `HrmpChannels` is `None`."] - pub async fn hrmp_channel_contents_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::subxt::KeyIter<'a, T, HrmpChannelContents<'a>>, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .storage_hash::()? - == [ - 71u8, 246u8, 41u8, 12u8, 125u8, 10u8, 60u8, 209u8, 14u8, 254u8, 125u8, - 217u8, 251u8, 172u8, 243u8, 73u8, 33u8, 230u8, 242u8, 16u8, 207u8, - 165u8, 33u8, 136u8, 78u8, 83u8, 206u8, 134u8, 65u8, 115u8, 166u8, - 192u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Maintains a mapping that can be used to answer the question: What paras sent a message at"] - #[doc = " the given block number for a given receiver. Invariants:"] - #[doc = " - The inner `Vec` is never empty."] - #[doc = " - The inner `Vec` cannot store two same `ParaId`."] - #[doc = " - The outer vector is sorted ascending by block number and cannot store two items with the"] - #[doc = " same block number."] - pub async fn hrmp_channel_digests( - &self, - _0: &runtime_types::polkadot_parachain::primitives::Id, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::std::vec::Vec<( - ::core::primitive::u32, - ::std::vec::Vec, - )>, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .storage_hash::()? - == [ - 54u8, 106u8, 76u8, 21u8, 18u8, 49u8, 1u8, 34u8, 247u8, 101u8, 150u8, - 142u8, 214u8, 137u8, 193u8, 100u8, 208u8, 162u8, 55u8, 229u8, 203u8, - 36u8, 154u8, 138u8, 48u8, 204u8, 114u8, 243u8, 54u8, 185u8, 27u8, - 173u8, - ] - { - let entry = HrmpChannelDigests(_0); - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Maintains a mapping that can be used to answer the question: What paras sent a message at"] - #[doc = " the given block number for a given receiver. Invariants:"] - #[doc = " - The inner `Vec` is never empty."] - #[doc = " - The inner `Vec` cannot store two same `ParaId`."] - #[doc = " - The outer vector is sorted ascending by block number and cannot store two items with the"] - #[doc = " same block number."] - pub async fn hrmp_channel_digests_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::subxt::KeyIter<'a, T, HrmpChannelDigests<'a>>, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .storage_hash::()? - == [ - 54u8, 106u8, 76u8, 21u8, 18u8, 49u8, 1u8, 34u8, 247u8, 101u8, 150u8, - 142u8, 214u8, 137u8, 193u8, 100u8, 208u8, 162u8, 55u8, 229u8, 203u8, - 36u8, 154u8, 138u8, 48u8, 204u8, 114u8, 243u8, 54u8, 185u8, 27u8, - 173u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - } - pub mod para_session_info { - use super::root_mod; - use super::runtime_types; - pub mod storage { - use super::runtime_types; - pub struct AssignmentKeysUnsafe; - impl ::subxt::StorageEntry for AssignmentKeysUnsafe { - const PALLET: &'static str = "ParaSessionInfo"; - const STORAGE: &'static str = "AssignmentKeysUnsafe"; - type Value = - ::std::vec::Vec; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct EarliestStoredSession; - impl ::subxt::StorageEntry for EarliestStoredSession { - const PALLET: &'static str = "ParaSessionInfo"; - const STORAGE: &'static str = "EarliestStoredSession"; - type Value = ::core::primitive::u32; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct Sessions<'a>(pub &'a ::core::primitive::u32); - impl ::subxt::StorageEntry for Sessions<'_> { - const PALLET: &'static str = "ParaSessionInfo"; - const STORAGE: &'static str = "Sessions"; - type Value = runtime_types::polkadot_primitives::v2::SessionInfo; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Identity, - )]) - } - } - pub struct StorageApi<'a, T: ::subxt::Config> { - client: &'a ::subxt::Client, - } - impl<'a, T: ::subxt::Config> StorageApi<'a, T> { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { client } - } - #[doc = " Assignment keys for the current session."] - #[doc = " Note that this API is private due to it being prone to 'off-by-one' at session boundaries."] - #[doc = " When in doubt, use `Sessions` API instead."] - pub async fn assignment_keys_unsafe( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::std::vec::Vec, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .storage_hash::()? - == [ - 150u8, 56u8, 43u8, 74u8, 246u8, 13u8, 148u8, 80u8, 105u8, 17u8, 36u8, - 246u8, 229u8, 105u8, 156u8, 206u8, 206u8, 77u8, 240u8, 24u8, 127u8, - 200u8, 14u8, 144u8, 246u8, 88u8, 173u8, 111u8, 176u8, 208u8, 31u8, - 248u8, - ] - { - let entry = AssignmentKeysUnsafe; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The earliest session for which previous session info is stored."] - pub async fn earliest_stored_session( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> - { - if self - .client - .metadata() - .storage_hash::()? - == [ - 25u8, 143u8, 246u8, 184u8, 35u8, 166u8, 140u8, 147u8, 171u8, 5u8, - 164u8, 159u8, 228u8, 21u8, 248u8, 236u8, 48u8, 210u8, 133u8, 140u8, - 171u8, 3u8, 85u8, 250u8, 160u8, 102u8, 95u8, 46u8, 33u8, 81u8, 102u8, - 241u8, - ] - { - let entry = EarliestStoredSession; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Session information in a rolling window."] - #[doc = " Should have an entry in range `EarliestStoredSession..=CurrentSessionIndex`."] - #[doc = " Does not have any entries before the session index in the first session change notification."] - pub async fn sessions( - &self, - _0: &::core::primitive::u32, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 163u8, 206u8, 134u8, 169u8, 87u8, 219u8, 254u8, 50u8, 16u8, 32u8, - 247u8, 205u8, 100u8, 140u8, 177u8, 89u8, 128u8, 178u8, 126u8, 175u8, - 198u8, 39u8, 251u8, 145u8, 92u8, 90u8, 10u8, 27u8, 62u8, 95u8, 128u8, - 168u8, - ] - { - let entry = Sessions(_0); - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Session information in a rolling window."] - #[doc = " Should have an entry in range `EarliestStoredSession..=CurrentSessionIndex`."] - #[doc = " Does not have any entries before the session index in the first session change notification."] - pub async fn sessions_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::subxt::KeyIter<'a, T, Sessions<'a>>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 163u8, 206u8, 134u8, 169u8, 87u8, 219u8, 254u8, 50u8, 16u8, 32u8, - 247u8, 205u8, 100u8, 140u8, 177u8, 89u8, 128u8, 178u8, 126u8, 175u8, - 198u8, 39u8, 251u8, 145u8, 92u8, 90u8, 10u8, 27u8, 62u8, 95u8, 128u8, - 168u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - } - pub mod paras_disputes { - use super::root_mod; - use super::runtime_types; - pub mod calls { - use super::root_mod; - use super::runtime_types; - type DispatchError = runtime_types::sp_runtime::DispatchError; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct ForceUnfreeze; - impl ::subxt::Call for ForceUnfreeze { - const PALLET: &'static str = "ParasDisputes"; - const FUNCTION: &'static str = "force_unfreeze"; - } - pub struct TransactionApi<'a, T: ::subxt::Config, X> { - client: &'a ::subxt::Client, - marker: ::core::marker::PhantomData, - } - impl<'a, T, X> TransactionApi<'a, T, X> - where - T: ::subxt::Config, - X: ::subxt::extrinsic::ExtrinsicParams, - { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { - client, - marker: ::core::marker::PhantomData, - } - } - pub fn force_unfreeze( - &self, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - ForceUnfreeze, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 212u8, 211u8, 58u8, 159u8, 23u8, 220u8, 64u8, 175u8, 65u8, 50u8, 192u8, - 122u8, 113u8, 189u8, 74u8, 191u8, 48u8, 93u8, 251u8, 50u8, 237u8, - 240u8, 91u8, 139u8, 193u8, 114u8, 131u8, 125u8, 124u8, 236u8, 191u8, - 190u8, - ] - { - let call = ForceUnfreeze {}; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - pub type Event = runtime_types::polkadot_runtime_parachains::disputes::pallet::Event; - pub mod events { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "A dispute has been initiated. \\[candidate hash, dispute location\\]"] - pub struct DisputeInitiated( - pub runtime_types::polkadot_core_primitives::CandidateHash, - pub runtime_types::polkadot_runtime_parachains::disputes::DisputeLocation, - ); - impl ::subxt::Event for DisputeInitiated { - const PALLET: &'static str = "ParasDisputes"; - const EVENT: &'static str = "DisputeInitiated"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "A dispute has concluded for or against a candidate."] - #[doc = "`\\[para id, candidate hash, dispute result\\]`"] - pub struct DisputeConcluded( - pub runtime_types::polkadot_core_primitives::CandidateHash, - pub runtime_types::polkadot_runtime_parachains::disputes::DisputeResult, - ); - impl ::subxt::Event for DisputeConcluded { - const PALLET: &'static str = "ParasDisputes"; - const EVENT: &'static str = "DisputeConcluded"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "A dispute has timed out due to insufficient participation."] - #[doc = "`\\[para id, candidate hash\\]`"] - pub struct DisputeTimedOut(pub runtime_types::polkadot_core_primitives::CandidateHash); - impl ::subxt::Event for DisputeTimedOut { - const PALLET: &'static str = "ParasDisputes"; - const EVENT: &'static str = "DisputeTimedOut"; - } - #[derive( - :: subxt :: codec :: CompactAs, - :: subxt :: codec :: Decode, - :: subxt :: codec :: Encode, - Debug, - )] - #[doc = "A dispute has concluded with supermajority against a candidate."] - #[doc = "Block authors should no longer build on top of this head and should"] - #[doc = "instead revert the block at the given height. This should be the"] - #[doc = "number of the child of the last known valid block in the chain."] - pub struct Revert(pub ::core::primitive::u32); - impl ::subxt::Event for Revert { - const PALLET: &'static str = "ParasDisputes"; - const EVENT: &'static str = "Revert"; - } - } - pub mod storage { - use super::runtime_types; - pub struct LastPrunedSession; - impl ::subxt::StorageEntry for LastPrunedSession { - const PALLET: &'static str = "ParasDisputes"; - const STORAGE: &'static str = "LastPrunedSession"; - type Value = ::core::primitive::u32; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct Disputes<'a>( - pub &'a ::core::primitive::u32, - pub &'a runtime_types::polkadot_core_primitives::CandidateHash, - ); - impl ::subxt::StorageEntry for Disputes<'_> { - const PALLET: &'static str = "ParasDisputes"; - const STORAGE: &'static str = "Disputes"; - type Value = - runtime_types::polkadot_primitives::v2::DisputeState<::core::primitive::u32>; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![ - ::subxt::StorageMapKey::new(&self.0, ::subxt::StorageHasher::Twox64Concat), - ::subxt::StorageMapKey::new( - &self.1, - ::subxt::StorageHasher::Blake2_128Concat, - ), - ]) - } - } - pub struct Included<'a>( - pub &'a ::core::primitive::u32, - pub &'a runtime_types::polkadot_core_primitives::CandidateHash, - ); - impl ::subxt::StorageEntry for Included<'_> { - const PALLET: &'static str = "ParasDisputes"; - const STORAGE: &'static str = "Included"; - type Value = ::core::primitive::u32; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![ - ::subxt::StorageMapKey::new(&self.0, ::subxt::StorageHasher::Twox64Concat), - ::subxt::StorageMapKey::new( - &self.1, - ::subxt::StorageHasher::Blake2_128Concat, - ), - ]) - } - } - pub struct SpamSlots<'a>(pub &'a ::core::primitive::u32); - impl ::subxt::StorageEntry for SpamSlots<'_> { - const PALLET: &'static str = "ParasDisputes"; - const STORAGE: &'static str = "SpamSlots"; - type Value = ::std::vec::Vec<::core::primitive::u32>; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Twox64Concat, - )]) - } - } - pub struct Frozen; - impl ::subxt::StorageEntry for Frozen { - const PALLET: &'static str = "ParasDisputes"; - const STORAGE: &'static str = "Frozen"; - type Value = ::core::option::Option<::core::primitive::u32>; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct StorageApi<'a, T: ::subxt::Config> { - client: &'a ::subxt::Client, - } - impl<'a, T: ::subxt::Config> StorageApi<'a, T> { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { client } - } - #[doc = " The last pruned session, if any. All data stored by this module"] - #[doc = " references sessions."] - pub async fn last_pruned_session( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option<::core::primitive::u32>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 125u8, 138u8, 99u8, 242u8, 9u8, 246u8, 215u8, 246u8, 141u8, 6u8, 129u8, - 87u8, 27u8, 58u8, 53u8, 121u8, 61u8, 119u8, 35u8, 104u8, 33u8, 43u8, - 179u8, 82u8, 244u8, 121u8, 174u8, 135u8, 87u8, 119u8, 236u8, 105u8, - ] - { - let entry = LastPrunedSession; - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " All ongoing or concluded disputes for the last several sessions."] - pub async fn disputes( - &self, - _0: &::core::primitive::u32, - _1: &runtime_types::polkadot_core_primitives::CandidateHash, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option< - runtime_types::polkadot_primitives::v2::DisputeState< - ::core::primitive::u32, - >, - >, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 157u8, 84u8, 172u8, 11u8, 64u8, 109u8, 34u8, 117u8, 91u8, 57u8, 117u8, - 163u8, 65u8, 172u8, 97u8, 39u8, 27u8, 10u8, 125u8, 194u8, 12u8, 252u8, - 180u8, 223u8, 118u8, 150u8, 160u8, 143u8, 217u8, 178u8, 28u8, 93u8, - ] - { - let entry = Disputes(_0, _1); - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " All ongoing or concluded disputes for the last several sessions."] - pub async fn disputes_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::subxt::KeyIter<'a, T, Disputes<'a>>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 157u8, 84u8, 172u8, 11u8, 64u8, 109u8, 34u8, 117u8, 91u8, 57u8, 117u8, - 163u8, 65u8, 172u8, 97u8, 39u8, 27u8, 10u8, 125u8, 194u8, 12u8, 252u8, - 180u8, 223u8, 118u8, 150u8, 160u8, 143u8, 217u8, 178u8, 28u8, 93u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " All included blocks on the chain, as well as the block number in this chain that"] - #[doc = " should be reverted back to if the candidate is disputed and determined to be invalid."] - pub async fn included( - &self, - _0: &::core::primitive::u32, - _1: &runtime_types::polkadot_core_primitives::CandidateHash, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option<::core::primitive::u32>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 152u8, 13u8, 87u8, 4u8, 129u8, 181u8, 136u8, 38u8, 235u8, 70u8, 0u8, - 166u8, 190u8, 30u8, 247u8, 188u8, 192u8, 114u8, 13u8, 125u8, 254u8, - 120u8, 57u8, 91u8, 28u8, 160u8, 194u8, 242u8, 116u8, 146u8, 217u8, - 91u8, - ] - { - let entry = Included(_0, _1); - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " All included blocks on the chain, as well as the block number in this chain that"] - #[doc = " should be reverted back to if the candidate is disputed and determined to be invalid."] - pub async fn included_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::subxt::KeyIter<'a, T, Included<'a>>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 152u8, 13u8, 87u8, 4u8, 129u8, 181u8, 136u8, 38u8, 235u8, 70u8, 0u8, - 166u8, 190u8, 30u8, 247u8, 188u8, 192u8, 114u8, 13u8, 125u8, 254u8, - 120u8, 57u8, 91u8, 28u8, 160u8, 194u8, 242u8, 116u8, 146u8, 217u8, - 91u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Maps session indices to a vector indicating the number of potentially-spam disputes"] - #[doc = " each validator is participating in. Potentially-spam disputes are remote disputes which have"] - #[doc = " fewer than `byzantine_threshold + 1` validators."] - #[doc = ""] - #[doc = " The i'th entry of the vector corresponds to the i'th validator in the session."] - pub async fn spam_slots( - &self, - _0: &::core::primitive::u32, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option<::std::vec::Vec<::core::primitive::u32>>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 172u8, 23u8, 120u8, 188u8, 71u8, 248u8, 252u8, 41u8, 132u8, 221u8, - 98u8, 215u8, 33u8, 242u8, 168u8, 196u8, 90u8, 123u8, 190u8, 27u8, - 147u8, 6u8, 196u8, 175u8, 198u8, 216u8, 50u8, 74u8, 138u8, 122u8, - 251u8, 238u8, - ] - { - let entry = SpamSlots(_0); - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Maps session indices to a vector indicating the number of potentially-spam disputes"] - #[doc = " each validator is participating in. Potentially-spam disputes are remote disputes which have"] - #[doc = " fewer than `byzantine_threshold + 1` validators."] - #[doc = ""] - #[doc = " The i'th entry of the vector corresponds to the i'th validator in the session."] - pub async fn spam_slots_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::subxt::KeyIter<'a, T, SpamSlots<'a>>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 172u8, 23u8, 120u8, 188u8, 71u8, 248u8, 252u8, 41u8, 132u8, 221u8, - 98u8, 215u8, 33u8, 242u8, 168u8, 196u8, 90u8, 123u8, 190u8, 27u8, - 147u8, 6u8, 196u8, 175u8, 198u8, 216u8, 50u8, 74u8, 138u8, 122u8, - 251u8, 238u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Whether the chain is frozen. Starts as `None`. When this is `Some`,"] - #[doc = " the chain will not accept any new parachain blocks for backing or inclusion,"] - #[doc = " and its value indicates the last valid block number in the chain."] - #[doc = " It can only be set back to `None` by governance intervention."] - pub async fn frozen( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option<::core::primitive::u32>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 133u8, 100u8, 86u8, 220u8, 180u8, 189u8, 65u8, 131u8, 64u8, 56u8, - 219u8, 47u8, 130u8, 167u8, 210u8, 125u8, 49u8, 7u8, 153u8, 254u8, 20u8, - 53u8, 218u8, 177u8, 122u8, 148u8, 16u8, 198u8, 251u8, 50u8, 194u8, - 128u8, - ] - { - let entry = Frozen; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - } - pub mod registrar { - use super::root_mod; - use super::runtime_types; - pub mod calls { - use super::root_mod; - use super::runtime_types; - type DispatchError = runtime_types::sp_runtime::DispatchError; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct Register { - pub id: runtime_types::polkadot_parachain::primitives::Id, - pub genesis_head: runtime_types::polkadot_parachain::primitives::HeadData, - pub validation_code: runtime_types::polkadot_parachain::primitives::ValidationCode, - } - impl ::subxt::Call for Register { - const PALLET: &'static str = "Registrar"; - const FUNCTION: &'static str = "register"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct ForceRegister { - pub who: ::subxt::sp_core::crypto::AccountId32, - pub deposit: ::core::primitive::u128, - pub id: runtime_types::polkadot_parachain::primitives::Id, - pub genesis_head: runtime_types::polkadot_parachain::primitives::HeadData, - pub validation_code: runtime_types::polkadot_parachain::primitives::ValidationCode, - } - impl ::subxt::Call for ForceRegister { - const PALLET: &'static str = "Registrar"; - const FUNCTION: &'static str = "force_register"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct Deregister { - pub id: runtime_types::polkadot_parachain::primitives::Id, - } - impl ::subxt::Call for Deregister { - const PALLET: &'static str = "Registrar"; - const FUNCTION: &'static str = "deregister"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct Swap { - pub id: runtime_types::polkadot_parachain::primitives::Id, - pub other: runtime_types::polkadot_parachain::primitives::Id, - } - impl ::subxt::Call for Swap { - const PALLET: &'static str = "Registrar"; - const FUNCTION: &'static str = "swap"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct ForceRemoveLock { - pub para: runtime_types::polkadot_parachain::primitives::Id, - } - impl ::subxt::Call for ForceRemoveLock { - const PALLET: &'static str = "Registrar"; - const FUNCTION: &'static str = "force_remove_lock"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct Reserve; - impl ::subxt::Call for Reserve { - const PALLET: &'static str = "Registrar"; - const FUNCTION: &'static str = "reserve"; - } - pub struct TransactionApi<'a, T: ::subxt::Config, X> { - client: &'a ::subxt::Client, - marker: ::core::marker::PhantomData, - } - impl<'a, T, X> TransactionApi<'a, T, X> - where - T: ::subxt::Config, - X: ::subxt::extrinsic::ExtrinsicParams, - { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { - client, - marker: ::core::marker::PhantomData, - } - } - #[doc = "Register head data and validation code for a reserved Para Id."] - #[doc = ""] - #[doc = "## Arguments"] - #[doc = "- `origin`: Must be called by a `Signed` origin."] - #[doc = "- `id`: The para ID. Must be owned/managed by the `origin` signing account."] - #[doc = "- `genesis_head`: The genesis head data of the parachain/thread."] - #[doc = "- `validation_code`: The initial validation code of the parachain/thread."] - #[doc = ""] - #[doc = "## Deposits/Fees"] - #[doc = "The origin signed account must reserve a corresponding deposit for the registration. Anything already"] - #[doc = "reserved previously for this para ID is accounted for."] - #[doc = ""] - #[doc = "## Events"] - #[doc = "The `Registered` event is emitted in case of success."] - pub fn register( - &self, - id: runtime_types::polkadot_parachain::primitives::Id, - genesis_head: runtime_types::polkadot_parachain::primitives::HeadData, - validation_code: runtime_types::polkadot_parachain::primitives::ValidationCode, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - Register, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 180u8, 21u8, 142u8, 73u8, 21u8, 31u8, 64u8, 210u8, 196u8, 4u8, 142u8, - 153u8, 172u8, 207u8, 95u8, 209u8, 177u8, 75u8, 202u8, 85u8, 95u8, - 208u8, 123u8, 237u8, 190u8, 148u8, 5u8, 64u8, 65u8, 191u8, 221u8, - 203u8, - ] - { - let call = Register { - id, - genesis_head, - validation_code, - }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Force the registration of a Para Id on the relay chain."] - #[doc = ""] - #[doc = "This function must be called by a Root origin."] - #[doc = ""] - #[doc = "The deposit taken can be specified for this registration. Any `ParaId`"] - #[doc = "can be registered, including sub-1000 IDs which are System Parachains."] - pub fn force_register( - &self, - who: ::subxt::sp_core::crypto::AccountId32, - deposit: ::core::primitive::u128, - id: runtime_types::polkadot_parachain::primitives::Id, - genesis_head: runtime_types::polkadot_parachain::primitives::HeadData, - validation_code: runtime_types::polkadot_parachain::primitives::ValidationCode, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - ForceRegister, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 191u8, 198u8, 172u8, 68u8, 118u8, 126u8, 110u8, 47u8, 193u8, 147u8, - 61u8, 27u8, 122u8, 107u8, 49u8, 222u8, 87u8, 199u8, 184u8, 247u8, - 153u8, 137u8, 205u8, 153u8, 6u8, 15u8, 246u8, 8u8, 36u8, 76u8, 54u8, - 63u8, - ] - { - let call = ForceRegister { - who, - deposit, - id, - genesis_head, - validation_code, - }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Deregister a Para Id, freeing all data and returning any deposit."] - #[doc = ""] - #[doc = "The caller must be Root, the `para` owner, or the `para` itself. The para must be a parathread."] - pub fn deregister( - &self, - id: runtime_types::polkadot_parachain::primitives::Id, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - Deregister, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 147u8, 4u8, 172u8, 215u8, 67u8, 142u8, 93u8, 245u8, 108u8, 83u8, 5u8, - 250u8, 87u8, 138u8, 231u8, 10u8, 159u8, 216u8, 85u8, 233u8, 244u8, - 200u8, 37u8, 33u8, 160u8, 143u8, 119u8, 11u8, 70u8, 177u8, 8u8, 123u8, - ] - { - let call = Deregister { id }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Swap a parachain with another parachain or parathread."] - #[doc = ""] - #[doc = "The origin must be Root, the `para` owner, or the `para` itself."] - #[doc = ""] - #[doc = "The swap will happen only if there is already an opposite swap pending. If there is not,"] - #[doc = "the swap will be stored in the pending swaps map, ready for a later confirmatory swap."] - #[doc = ""] - #[doc = "The `ParaId`s remain mapped to the same head data and code so external code can rely on"] - #[doc = "`ParaId` to be a long-term identifier of a notional \"parachain\". However, their"] - #[doc = "scheduling info (i.e. whether they're a parathread or parachain), auction information"] - #[doc = "and the auction deposit are switched."] - pub fn swap( - &self, - id: runtime_types::polkadot_parachain::primitives::Id, - other: runtime_types::polkadot_parachain::primitives::Id, - ) -> Result< - ::subxt::SubmittableExtrinsic<'a, T, X, Swap, DispatchError, root_mod::Event>, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 145u8, 163u8, 246u8, 239u8, 241u8, 209u8, 58u8, 241u8, 63u8, 134u8, - 102u8, 55u8, 217u8, 125u8, 176u8, 91u8, 27u8, 32u8, 220u8, 236u8, 18u8, - 20u8, 7u8, 187u8, 100u8, 116u8, 161u8, 133u8, 127u8, 187u8, 86u8, - 109u8, - ] - { - let call = Swap { id, other }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Remove a manager lock from a para. This will allow the manager of a"] - #[doc = "previously locked para to deregister or swap a para without using governance."] - #[doc = ""] - #[doc = "Can only be called by the Root origin."] - pub fn force_remove_lock( - &self, - para: runtime_types::polkadot_parachain::primitives::Id, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - ForceRemoveLock, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 205u8, 174u8, 132u8, 188u8, 1u8, 59u8, 82u8, 135u8, 123u8, 55u8, 144u8, - 39u8, 205u8, 171u8, 13u8, 252u8, 65u8, 56u8, 98u8, 216u8, 23u8, 175u8, - 16u8, 200u8, 198u8, 252u8, 133u8, 238u8, 81u8, 142u8, 254u8, 124u8, - ] - { - let call = ForceRemoveLock { para }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Reserve a Para Id on the relay chain."] - #[doc = ""] - #[doc = "This function will reserve a new Para Id to be owned/managed by the origin account."] - #[doc = "The origin account is able to register head data and validation code using `register` to create"] - #[doc = "a parathread. Using the Slots pallet, a parathread can then be upgraded to get a parachain slot."] - #[doc = ""] - #[doc = "## Arguments"] - #[doc = "- `origin`: Must be called by a `Signed` origin. Becomes the manager/owner of the new para ID."] - #[doc = ""] - #[doc = "## Deposits/Fees"] - #[doc = "The origin must reserve a deposit of `ParaDeposit` for the registration."] - #[doc = ""] - #[doc = "## Events"] - #[doc = "The `Reserved` event is emitted in case of success, which provides the ID reserved for use."] - pub fn reserve( - &self, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - Reserve, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 22u8, 210u8, 13u8, 54u8, 253u8, 13u8, 89u8, 174u8, 232u8, 119u8, 148u8, - 206u8, 130u8, 133u8, 199u8, 127u8, 201u8, 205u8, 8u8, 213u8, 108u8, - 93u8, 135u8, 88u8, 238u8, 171u8, 31u8, 193u8, 23u8, 113u8, 106u8, - 135u8, - ] - { - let call = Reserve {}; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - pub type Event = runtime_types::polkadot_runtime_common::paras_registrar::pallet::Event; - pub mod events { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct Registered( - pub runtime_types::polkadot_parachain::primitives::Id, - pub ::subxt::sp_core::crypto::AccountId32, - ); - impl ::subxt::Event for Registered { - const PALLET: &'static str = "Registrar"; - const EVENT: &'static str = "Registered"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct Deregistered(pub runtime_types::polkadot_parachain::primitives::Id); - impl ::subxt::Event for Deregistered { - const PALLET: &'static str = "Registrar"; - const EVENT: &'static str = "Deregistered"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct Reserved( - pub runtime_types::polkadot_parachain::primitives::Id, - pub ::subxt::sp_core::crypto::AccountId32, - ); - impl ::subxt::Event for Reserved { - const PALLET: &'static str = "Registrar"; - const EVENT: &'static str = "Reserved"; - } - } - pub mod storage { - use super::runtime_types; - pub struct PendingSwap<'a>(pub &'a runtime_types::polkadot_parachain::primitives::Id); - impl ::subxt::StorageEntry for PendingSwap<'_> { - const PALLET: &'static str = "Registrar"; - const STORAGE: &'static str = "PendingSwap"; - type Value = runtime_types::polkadot_parachain::primitives::Id; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Twox64Concat, - )]) - } - } - pub struct Paras<'a>(pub &'a runtime_types::polkadot_parachain::primitives::Id); - impl ::subxt::StorageEntry for Paras<'_> { - const PALLET: &'static str = "Registrar"; - const STORAGE: &'static str = "Paras"; - type Value = runtime_types::polkadot_runtime_common::paras_registrar::ParaInfo< - ::subxt::sp_core::crypto::AccountId32, - ::core::primitive::u128, - >; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Twox64Concat, - )]) - } - } - pub struct NextFreeParaId; - impl ::subxt::StorageEntry for NextFreeParaId { - const PALLET: &'static str = "Registrar"; - const STORAGE: &'static str = "NextFreeParaId"; - type Value = runtime_types::polkadot_parachain::primitives::Id; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct StorageApi<'a, T: ::subxt::Config> { - client: &'a ::subxt::Client, - } - impl<'a, T: ::subxt::Config> StorageApi<'a, T> { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { client } - } - #[doc = " Pending swap operations."] - pub async fn pending_swap( - &self, - _0: &runtime_types::polkadot_parachain::primitives::Id, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 130u8, 4u8, 116u8, 91u8, 196u8, 41u8, 66u8, 48u8, 17u8, 2u8, 255u8, - 189u8, 132u8, 10u8, 129u8, 102u8, 117u8, 56u8, 114u8, 231u8, 78u8, - 112u8, 11u8, 76u8, 152u8, 41u8, 70u8, 232u8, 212u8, 71u8, 193u8, 107u8, - ] - { - let entry = PendingSwap(_0); - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Pending swap operations."] - pub async fn pending_swap_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::subxt::KeyIter<'a, T, PendingSwap<'a>>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 130u8, 4u8, 116u8, 91u8, 196u8, 41u8, 66u8, 48u8, 17u8, 2u8, 255u8, - 189u8, 132u8, 10u8, 129u8, 102u8, 117u8, 56u8, 114u8, 231u8, 78u8, - 112u8, 11u8, 76u8, 152u8, 41u8, 70u8, 232u8, 212u8, 71u8, 193u8, 107u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Amount held on deposit for each para and the original depositor."] - #[doc = ""] - #[doc = " The given account ID is responsible for registering the code and initial head data, but may only do"] - #[doc = " so if it isn't yet registered. (After that, it's up to governance to do so.)"] - pub async fn paras( - &self, - _0: &runtime_types::polkadot_parachain::primitives::Id, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option< - runtime_types::polkadot_runtime_common::paras_registrar::ParaInfo< - ::subxt::sp_core::crypto::AccountId32, - ::core::primitive::u128, - >, - >, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 180u8, 146u8, 122u8, 242u8, 222u8, 203u8, 19u8, 110u8, 22u8, 53u8, - 147u8, 127u8, 165u8, 158u8, 113u8, 196u8, 105u8, 209u8, 45u8, 250u8, - 163u8, 78u8, 120u8, 129u8, 180u8, 128u8, 63u8, 195u8, 71u8, 176u8, - 247u8, 206u8, - ] - { - let entry = Paras(_0); - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Amount held on deposit for each para and the original depositor."] - #[doc = ""] - #[doc = " The given account ID is responsible for registering the code and initial head data, but may only do"] - #[doc = " so if it isn't yet registered. (After that, it's up to governance to do so.)"] - pub async fn paras_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result<::subxt::KeyIter<'a, T, Paras<'a>>, ::subxt::BasicError> - { - if self.client.metadata().storage_hash::()? - == [ - 180u8, 146u8, 122u8, 242u8, 222u8, 203u8, 19u8, 110u8, 22u8, 53u8, - 147u8, 127u8, 165u8, 158u8, 113u8, 196u8, 105u8, 209u8, 45u8, 250u8, - 163u8, 78u8, 120u8, 129u8, 180u8, 128u8, 63u8, 195u8, 71u8, 176u8, - 247u8, 206u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The next free `ParaId`."] - pub async fn next_free_para_id( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - runtime_types::polkadot_parachain::primitives::Id, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 112u8, 52u8, 84u8, 181u8, 132u8, 61u8, 46u8, 69u8, 165u8, 85u8, 253u8, - 243u8, 228u8, 151u8, 15u8, 239u8, 172u8, 28u8, 102u8, 38u8, 155u8, - 90u8, 55u8, 162u8, 254u8, 139u8, 59u8, 186u8, 152u8, 239u8, 53u8, - 216u8, - ] - { - let entry = NextFreeParaId; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - pub mod constants { - use super::runtime_types; - pub struct ConstantsApi<'a, T: ::subxt::Config> { - client: &'a ::subxt::Client, - } - impl<'a, T: ::subxt::Config> ConstantsApi<'a, T> { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { client } - } - #[doc = " The deposit to be paid to run a parathread."] - #[doc = " This should include the cost for storing the genesis head and validation code."] - pub fn para_deposit( - &self, - ) -> ::core::result::Result<::core::primitive::u128, ::subxt::BasicError> - { - if self - .client - .metadata() - .constant_hash("Registrar", "ParaDeposit")? - == [ - 177u8, 138u8, 242u8, 166u8, 12u8, 97u8, 93u8, 2u8, 123u8, 45u8, 85u8, - 25u8, 46u8, 14u8, 221u8, 50u8, 157u8, 45u8, 243u8, 106u8, 171u8, 191u8, - 36u8, 192u8, 126u8, 91u8, 2u8, 240u8, 187u8, 201u8, 39u8, 110u8, - ] - { - let pallet = self.client.metadata().pallet("Registrar")?; - let constant = pallet.constant("ParaDeposit")?; - let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; - Ok(value) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The deposit to be paid per byte stored on chain."] - pub fn data_deposit_per_byte( - &self, - ) -> ::core::result::Result<::core::primitive::u128, ::subxt::BasicError> - { - if self - .client - .metadata() - .constant_hash("Registrar", "DataDepositPerByte")? - == [ - 75u8, 45u8, 63u8, 192u8, 73u8, 118u8, 130u8, 12u8, 38u8, 42u8, 196u8, - 189u8, 156u8, 218u8, 152u8, 165u8, 124u8, 253u8, 108u8, 113u8, 3u8, - 149u8, 83u8, 27u8, 234u8, 163u8, 225u8, 231u8, 179u8, 212u8, 26u8, - 156u8, - ] - { - let pallet = self.client.metadata().pallet("Registrar")?; - let constant = pallet.constant("DataDepositPerByte")?; - let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; - Ok(value) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - } - pub mod auctions { - use super::root_mod; - use super::runtime_types; - pub mod calls { - use super::root_mod; - use super::runtime_types; - type DispatchError = runtime_types::sp_runtime::DispatchError; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct NewAuction { - #[codec(compact)] - pub duration: ::core::primitive::u32, - #[codec(compact)] - pub lease_period_index: ::core::primitive::u32, - } - impl ::subxt::Call for NewAuction { - const PALLET: &'static str = "Auctions"; - const FUNCTION: &'static str = "new_auction"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct Bid { - #[codec(compact)] - pub para: runtime_types::polkadot_parachain::primitives::Id, - #[codec(compact)] - pub auction_index: ::core::primitive::u32, - #[codec(compact)] - pub first_slot: ::core::primitive::u32, - #[codec(compact)] - pub last_slot: ::core::primitive::u32, - #[codec(compact)] - pub amount: ::core::primitive::u128, - } - impl ::subxt::Call for Bid { - const PALLET: &'static str = "Auctions"; - const FUNCTION: &'static str = "bid"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct CancelAuction; - impl ::subxt::Call for CancelAuction { - const PALLET: &'static str = "Auctions"; - const FUNCTION: &'static str = "cancel_auction"; - } - pub struct TransactionApi<'a, T: ::subxt::Config, X> { - client: &'a ::subxt::Client, - marker: ::core::marker::PhantomData, - } - impl<'a, T, X> TransactionApi<'a, T, X> - where - T: ::subxt::Config, - X: ::subxt::extrinsic::ExtrinsicParams, - { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { - client, - marker: ::core::marker::PhantomData, - } - } - #[doc = "Create a new auction."] - #[doc = ""] - #[doc = "This can only happen when there isn't already an auction in progress and may only be"] - #[doc = "called by the root origin. Accepts the `duration` of this auction and the"] - #[doc = "`lease_period_index` of the initial lease period of the four that are to be auctioned."] - pub fn new_auction( - &self, - duration: ::core::primitive::u32, - lease_period_index: ::core::primitive::u32, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - NewAuction, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 12u8, 43u8, 152u8, 0u8, 229u8, 15u8, 32u8, 205u8, 208u8, 71u8, 57u8, - 169u8, 201u8, 177u8, 52u8, 10u8, 93u8, 183u8, 5u8, 156u8, 231u8, 188u8, - 77u8, 238u8, 119u8, 238u8, 87u8, 251u8, 121u8, 199u8, 18u8, 129u8, - ] - { - let call = NewAuction { - duration, - lease_period_index, - }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Make a new bid from an account (including a parachain account) for deploying a new"] - #[doc = "parachain."] - #[doc = ""] - #[doc = "Multiple simultaneous bids from the same bidder are allowed only as long as all active"] - #[doc = "bids overlap each other (i.e. are mutually exclusive). Bids cannot be redacted."] - #[doc = ""] - #[doc = "- `sub` is the sub-bidder ID, allowing for multiple competing bids to be made by (and"] - #[doc = "funded by) the same account."] - #[doc = "- `auction_index` is the index of the auction to bid on. Should just be the present"] - #[doc = "value of `AuctionCounter`."] - #[doc = "- `first_slot` is the first lease period index of the range to bid on. This is the"] - #[doc = "absolute lease period index value, not an auction-specific offset."] - #[doc = "- `last_slot` is the last lease period index of the range to bid on. This is the"] - #[doc = "absolute lease period index value, not an auction-specific offset."] - #[doc = "- `amount` is the amount to bid to be held as deposit for the parachain should the"] - #[doc = "bid win. This amount is held throughout the range."] - pub fn bid( - &self, - para: runtime_types::polkadot_parachain::primitives::Id, - auction_index: ::core::primitive::u32, - first_slot: ::core::primitive::u32, - last_slot: ::core::primitive::u32, - amount: ::core::primitive::u128, - ) -> Result< - ::subxt::SubmittableExtrinsic<'a, T, X, Bid, DispatchError, root_mod::Event>, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 206u8, 22u8, 15u8, 251u8, 222u8, 193u8, 192u8, 125u8, 160u8, 131u8, - 209u8, 129u8, 105u8, 46u8, 77u8, 204u8, 107u8, 112u8, 13u8, 188u8, - 193u8, 73u8, 225u8, 232u8, 179u8, 205u8, 39u8, 69u8, 242u8, 79u8, 36u8, - 121u8, - ] - { - let call = Bid { - para, - auction_index, - first_slot, - last_slot, - amount, - }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Cancel an in-progress auction."] - #[doc = ""] - #[doc = "Can only be called by Root origin."] - pub fn cancel_auction( - &self, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - CancelAuction, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 182u8, 223u8, 178u8, 136u8, 1u8, 115u8, 229u8, 78u8, 166u8, 128u8, - 28u8, 106u8, 6u8, 248u8, 46u8, 55u8, 110u8, 120u8, 213u8, 11u8, 90u8, - 217u8, 42u8, 120u8, 47u8, 83u8, 126u8, 216u8, 236u8, 251u8, 255u8, - 50u8, - ] - { - let call = CancelAuction {}; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - pub type Event = runtime_types::polkadot_runtime_common::auctions::pallet::Event; - pub mod events { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "An auction started. Provides its index and the block number where it will begin to"] - #[doc = "close and the first lease period of the quadruplet that is auctioned."] - #[doc = "`[auction_index, lease_period, ending]`"] - pub struct AuctionStarted( - pub ::core::primitive::u32, - pub ::core::primitive::u32, - pub ::core::primitive::u32, - ); - impl ::subxt::Event for AuctionStarted { - const PALLET: &'static str = "Auctions"; - const EVENT: &'static str = "AuctionStarted"; - } - #[derive( - :: subxt :: codec :: CompactAs, - :: subxt :: codec :: Decode, - :: subxt :: codec :: Encode, - Debug, - )] - #[doc = "An auction ended. All funds become unreserved. `[auction_index]`"] - pub struct AuctionClosed(pub ::core::primitive::u32); - impl ::subxt::Event for AuctionClosed { - const PALLET: &'static str = "Auctions"; - const EVENT: &'static str = "AuctionClosed"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "Funds were reserved for a winning bid. First balance is the extra amount reserved."] - #[doc = "Second is the total. `[bidder, extra_reserved, total_amount]`"] - pub struct Reserved( - pub ::subxt::sp_core::crypto::AccountId32, - pub ::core::primitive::u128, - pub ::core::primitive::u128, - ); - impl ::subxt::Event for Reserved { - const PALLET: &'static str = "Auctions"; - const EVENT: &'static str = "Reserved"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "Funds were unreserved since bidder is no longer active. `[bidder, amount]`"] - pub struct Unreserved( - pub ::subxt::sp_core::crypto::AccountId32, - pub ::core::primitive::u128, - ); - impl ::subxt::Event for Unreserved { - const PALLET: &'static str = "Auctions"; - const EVENT: &'static str = "Unreserved"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "Someone attempted to lease the same slot twice for a parachain. The amount is held in reserve"] - #[doc = "but no parachain slot has been leased."] - #[doc = "`[parachain_id, leaser, amount]`"] - pub struct ReserveConfiscated( - pub runtime_types::polkadot_parachain::primitives::Id, - pub ::subxt::sp_core::crypto::AccountId32, - pub ::core::primitive::u128, - ); - impl ::subxt::Event for ReserveConfiscated { - const PALLET: &'static str = "Auctions"; - const EVENT: &'static str = "ReserveConfiscated"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "A new bid has been accepted as the current winner."] - #[doc = "`[who, para_id, amount, first_slot, last_slot]`"] - pub struct BidAccepted( - pub ::subxt::sp_core::crypto::AccountId32, - pub runtime_types::polkadot_parachain::primitives::Id, - pub ::core::primitive::u128, - pub ::core::primitive::u32, - pub ::core::primitive::u32, - ); - impl ::subxt::Event for BidAccepted { - const PALLET: &'static str = "Auctions"; - const EVENT: &'static str = "BidAccepted"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "The winning offset was chosen for an auction. This will map into the `Winning` storage map."] - #[doc = "`[auction_index, block_number]`"] - pub struct WinningOffset(pub ::core::primitive::u32, pub ::core::primitive::u32); - impl ::subxt::Event for WinningOffset { - const PALLET: &'static str = "Auctions"; - const EVENT: &'static str = "WinningOffset"; - } - } - pub mod storage { - use super::runtime_types; - pub struct AuctionCounter; - impl ::subxt::StorageEntry for AuctionCounter { - const PALLET: &'static str = "Auctions"; - const STORAGE: &'static str = "AuctionCounter"; - type Value = ::core::primitive::u32; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct AuctionInfo; - impl ::subxt::StorageEntry for AuctionInfo { - const PALLET: &'static str = "Auctions"; - const STORAGE: &'static str = "AuctionInfo"; - type Value = (::core::primitive::u32, ::core::primitive::u32); - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct ReservedAmounts<'a>( - pub &'a ::subxt::sp_core::crypto::AccountId32, - pub &'a runtime_types::polkadot_parachain::primitives::Id, - ); - impl ::subxt::StorageEntry for ReservedAmounts<'_> { - const PALLET: &'static str = "Auctions"; - const STORAGE: &'static str = "ReservedAmounts"; - type Value = ::core::primitive::u128; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &(&self.0, &self.1), - ::subxt::StorageHasher::Twox64Concat, - )]) - } - } - pub struct Winning<'a>(pub &'a ::core::primitive::u32); - impl ::subxt::StorageEntry for Winning<'_> { - const PALLET: &'static str = "Auctions"; - const STORAGE: &'static str = "Winning"; - type Value = [::core::option::Option<( - ::subxt::sp_core::crypto::AccountId32, - runtime_types::polkadot_parachain::primitives::Id, - ::core::primitive::u128, - )>; 36usize]; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Twox64Concat, - )]) - } - } - pub struct StorageApi<'a, T: ::subxt::Config> { - client: &'a ::subxt::Client, - } - impl<'a, T: ::subxt::Config> StorageApi<'a, T> { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { client } - } - #[doc = " Number of auctions started so far."] - pub async fn auction_counter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> - { - if self.client.metadata().storage_hash::()? - == [ - 67u8, 247u8, 96u8, 152u8, 0u8, 224u8, 230u8, 98u8, 194u8, 107u8, 3u8, - 203u8, 51u8, 201u8, 149u8, 22u8, 184u8, 80u8, 251u8, 239u8, 253u8, - 19u8, 58u8, 192u8, 65u8, 96u8, 189u8, 54u8, 175u8, 130u8, 143u8, 181u8, - ] - { - let entry = AuctionCounter; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Information relating to the current auction, if there is one."] - #[doc = ""] - #[doc = " The first item in the tuple is the lease period index that the first of the four"] - #[doc = " contiguous lease periods on auction is for. The second is the block number when the"] - #[doc = " auction will \"begin to end\", i.e. the first block of the Ending Period of the auction."] - pub async fn auction_info( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option<(::core::primitive::u32, ::core::primitive::u32)>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 73u8, 216u8, 173u8, 230u8, 132u8, 78u8, 83u8, 62u8, 200u8, 69u8, 17u8, - 73u8, 57u8, 107u8, 160u8, 90u8, 147u8, 84u8, 29u8, 110u8, 144u8, 215u8, - 169u8, 110u8, 217u8, 77u8, 109u8, 204u8, 1u8, 164u8, 95u8, 83u8, - ] - { - let entry = AuctionInfo; - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Amounts currently reserved in the accounts of the bidders currently winning"] - #[doc = " (sub-)ranges."] - pub async fn reserved_amounts( - &self, - _0: &::subxt::sp_core::crypto::AccountId32, - _1: &runtime_types::polkadot_parachain::primitives::Id, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option<::core::primitive::u128>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 195u8, 56u8, 142u8, 154u8, 193u8, 115u8, 13u8, 64u8, 101u8, 179u8, - 69u8, 175u8, 185u8, 12u8, 31u8, 65u8, 147u8, 211u8, 74u8, 40u8, 190u8, - 254u8, 190u8, 176u8, 117u8, 159u8, 234u8, 214u8, 157u8, 83u8, 56u8, - 192u8, - ] - { - let entry = ReservedAmounts(_0, _1); - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Amounts currently reserved in the accounts of the bidders currently winning"] - #[doc = " (sub-)ranges."] - pub async fn reserved_amounts_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::subxt::KeyIter<'a, T, ReservedAmounts<'a>>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 195u8, 56u8, 142u8, 154u8, 193u8, 115u8, 13u8, 64u8, 101u8, 179u8, - 69u8, 175u8, 185u8, 12u8, 31u8, 65u8, 147u8, 211u8, 74u8, 40u8, 190u8, - 254u8, 190u8, 176u8, 117u8, 159u8, 234u8, 214u8, 157u8, 83u8, 56u8, - 192u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The winning bids for each of the 10 ranges at each sample in the final Ending Period of"] - #[doc = " the current auction. The map's key is the 0-based index into the Sample Size. The"] - #[doc = " first sample of the ending period is 0; the last is `Sample Size - 1`."] - pub async fn winning( - &self, - _0: &::core::primitive::u32, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option< - [::core::option::Option<( - ::subxt::sp_core::crypto::AccountId32, - runtime_types::polkadot_parachain::primitives::Id, - ::core::primitive::u128, - )>; 36usize], - >, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 152u8, 246u8, 158u8, 193u8, 21u8, 56u8, 204u8, 29u8, 146u8, 90u8, - 133u8, 246u8, 75u8, 111u8, 157u8, 150u8, 175u8, 33u8, 127u8, 215u8, - 158u8, 55u8, 231u8, 78u8, 143u8, 128u8, 92u8, 70u8, 61u8, 23u8, 43u8, - 68u8, - ] - { - let entry = Winning(_0); - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The winning bids for each of the 10 ranges at each sample in the final Ending Period of"] - #[doc = " the current auction. The map's key is the 0-based index into the Sample Size. The"] - #[doc = " first sample of the ending period is 0; the last is `Sample Size - 1`."] - pub async fn winning_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result<::subxt::KeyIter<'a, T, Winning<'a>>, ::subxt::BasicError> - { - if self.client.metadata().storage_hash::()? - == [ - 152u8, 246u8, 158u8, 193u8, 21u8, 56u8, 204u8, 29u8, 146u8, 90u8, - 133u8, 246u8, 75u8, 111u8, 157u8, 150u8, 175u8, 33u8, 127u8, 215u8, - 158u8, 55u8, 231u8, 78u8, 143u8, 128u8, 92u8, 70u8, 61u8, 23u8, 43u8, - 68u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - pub mod constants { - use super::runtime_types; - pub struct ConstantsApi<'a, T: ::subxt::Config> { - client: &'a ::subxt::Client, - } - impl<'a, T: ::subxt::Config> ConstantsApi<'a, T> { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { client } - } - #[doc = " The number of blocks over which an auction may be retroactively ended."] - pub fn ending_period( - &self, - ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> - { - if self - .client - .metadata() - .constant_hash("Auctions", "EndingPeriod")? - == [ - 41u8, 212u8, 17u8, 243u8, 76u8, 205u8, 95u8, 195u8, 181u8, 1u8, 59u8, - 31u8, 204u8, 20u8, 83u8, 117u8, 69u8, 25u8, 74u8, 59u8, 18u8, 11u8, - 110u8, 123u8, 62u8, 254u8, 188u8, 62u8, 89u8, 80u8, 213u8, 97u8, - ] - { - let pallet = self.client.metadata().pallet("Auctions")?; - let constant = pallet.constant("EndingPeriod")?; - let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; - Ok(value) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The length of each sample to take during the ending period."] - #[doc = ""] - #[doc = " `EndingPeriod` / `SampleLength` = Total # of Samples"] - pub fn sample_length( - &self, - ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> - { - if self - .client - .metadata() - .constant_hash("Auctions", "SampleLength")? - == [ - 120u8, 204u8, 79u8, 231u8, 92u8, 177u8, 250u8, 183u8, 207u8, 218u8, - 171u8, 81u8, 94u8, 92u8, 233u8, 87u8, 74u8, 212u8, 178u8, 104u8, 137u8, - 187u8, 31u8, 163u8, 157u8, 136u8, 111u8, 129u8, 149u8, 85u8, 122u8, - 181u8, - ] - { - let pallet = self.client.metadata().pallet("Auctions")?; - let constant = pallet.constant("SampleLength")?; - let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; - Ok(value) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - pub fn slot_range_count( - &self, - ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> - { - if self - .client - .metadata() - .constant_hash("Auctions", "SlotRangeCount")? - == [ - 32u8, 147u8, 38u8, 54u8, 172u8, 189u8, 240u8, 136u8, 216u8, 182u8, - 191u8, 129u8, 122u8, 1u8, 129u8, 244u8, 180u8, 210u8, 219u8, 142u8, - 224u8, 151u8, 237u8, 192u8, 103u8, 206u8, 101u8, 131u8, 78u8, 181u8, - 163u8, 44u8, - ] - { - let pallet = self.client.metadata().pallet("Auctions")?; - let constant = pallet.constant("SlotRangeCount")?; - let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; - Ok(value) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - pub fn lease_periods_per_slot( - &self, - ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> - { - if self - .client - .metadata() - .constant_hash("Auctions", "LeasePeriodsPerSlot")? - == [ - 174u8, 18u8, 150u8, 44u8, 219u8, 36u8, 218u8, 28u8, 34u8, 132u8, 235u8, - 161u8, 23u8, 173u8, 80u8, 175u8, 93u8, 163u8, 6u8, 226u8, 11u8, 212u8, - 186u8, 119u8, 185u8, 85u8, 111u8, 216u8, 214u8, 111u8, 148u8, 28u8, - ] - { - let pallet = self.client.metadata().pallet("Auctions")?; - let constant = pallet.constant("LeasePeriodsPerSlot")?; - let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; - Ok(value) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - } - pub mod crowdloan { - use super::root_mod; - use super::runtime_types; - pub mod calls { - use super::root_mod; - use super::runtime_types; - type DispatchError = runtime_types::sp_runtime::DispatchError; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct Create { - #[codec(compact)] - pub index: runtime_types::polkadot_parachain::primitives::Id, - #[codec(compact)] - pub cap: ::core::primitive::u128, - #[codec(compact)] - pub first_period: ::core::primitive::u32, - #[codec(compact)] - pub last_period: ::core::primitive::u32, - #[codec(compact)] - pub end: ::core::primitive::u32, - pub verifier: ::core::option::Option, - } - impl ::subxt::Call for Create { - const PALLET: &'static str = "Crowdloan"; - const FUNCTION: &'static str = "create"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct Contribute { - #[codec(compact)] - pub index: runtime_types::polkadot_parachain::primitives::Id, - #[codec(compact)] - pub value: ::core::primitive::u128, - pub signature: ::core::option::Option, - } - impl ::subxt::Call for Contribute { - const PALLET: &'static str = "Crowdloan"; - const FUNCTION: &'static str = "contribute"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct Withdraw { - pub who: ::subxt::sp_core::crypto::AccountId32, - #[codec(compact)] - pub index: runtime_types::polkadot_parachain::primitives::Id, - } - impl ::subxt::Call for Withdraw { - const PALLET: &'static str = "Crowdloan"; - const FUNCTION: &'static str = "withdraw"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct Refund { - #[codec(compact)] - pub index: runtime_types::polkadot_parachain::primitives::Id, - } - impl ::subxt::Call for Refund { - const PALLET: &'static str = "Crowdloan"; - const FUNCTION: &'static str = "refund"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct Dissolve { - #[codec(compact)] - pub index: runtime_types::polkadot_parachain::primitives::Id, - } - impl ::subxt::Call for Dissolve { - const PALLET: &'static str = "Crowdloan"; - const FUNCTION: &'static str = "dissolve"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct Edit { - #[codec(compact)] - pub index: runtime_types::polkadot_parachain::primitives::Id, - #[codec(compact)] - pub cap: ::core::primitive::u128, - #[codec(compact)] - pub first_period: ::core::primitive::u32, - #[codec(compact)] - pub last_period: ::core::primitive::u32, - #[codec(compact)] - pub end: ::core::primitive::u32, - pub verifier: ::core::option::Option, - } - impl ::subxt::Call for Edit { - const PALLET: &'static str = "Crowdloan"; - const FUNCTION: &'static str = "edit"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct AddMemo { - pub index: runtime_types::polkadot_parachain::primitives::Id, - pub memo: ::std::vec::Vec<::core::primitive::u8>, - } - impl ::subxt::Call for AddMemo { - const PALLET: &'static str = "Crowdloan"; - const FUNCTION: &'static str = "add_memo"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct Poke { - pub index: runtime_types::polkadot_parachain::primitives::Id, - } - impl ::subxt::Call for Poke { - const PALLET: &'static str = "Crowdloan"; - const FUNCTION: &'static str = "poke"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct ContributeAll { - #[codec(compact)] - pub index: runtime_types::polkadot_parachain::primitives::Id, - pub signature: ::core::option::Option, - } - impl ::subxt::Call for ContributeAll { - const PALLET: &'static str = "Crowdloan"; - const FUNCTION: &'static str = "contribute_all"; - } - pub struct TransactionApi<'a, T: ::subxt::Config, X> { - client: &'a ::subxt::Client, - marker: ::core::marker::PhantomData, - } - impl<'a, T, X> TransactionApi<'a, T, X> - where - T: ::subxt::Config, - X: ::subxt::extrinsic::ExtrinsicParams, - { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { - client, - marker: ::core::marker::PhantomData, - } - } - #[doc = "Create a new crowdloaning campaign for a parachain slot with the given lease period range."] - #[doc = ""] - #[doc = "This applies a lock to your parachain configuration, ensuring that it cannot be changed"] - #[doc = "by the parachain manager."] - pub fn create( - &self, - index: runtime_types::polkadot_parachain::primitives::Id, - cap: ::core::primitive::u128, - first_period: ::core::primitive::u32, - last_period: ::core::primitive::u32, - end: ::core::primitive::u32, - verifier: ::core::option::Option, - ) -> Result< - ::subxt::SubmittableExtrinsic<'a, T, X, Create, DispatchError, root_mod::Event>, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 94u8, 115u8, 154u8, 239u8, 215u8, 180u8, 175u8, 240u8, 137u8, 240u8, - 74u8, 159u8, 67u8, 54u8, 69u8, 199u8, 161u8, 155u8, 243u8, 222u8, - 205u8, 163u8, 142u8, 251u8, 156u8, 94u8, 65u8, 153u8, 39u8, 226u8, - 79u8, 195u8, - ] - { - let call = Create { - index, - cap, - first_period, - last_period, - end, - verifier, - }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Contribute to a crowd sale. This will transfer some balance over to fund a parachain"] - #[doc = "slot. It will be withdrawable when the crowdloan has ended and the funds are unused."] - pub fn contribute( - &self, - index: runtime_types::polkadot_parachain::primitives::Id, - value: ::core::primitive::u128, - signature: ::core::option::Option, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - Contribute, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 95u8, 255u8, 35u8, 30u8, 44u8, 150u8, 10u8, 166u8, 0u8, 204u8, 106u8, - 59u8, 150u8, 254u8, 216u8, 128u8, 232u8, 129u8, 30u8, 101u8, 196u8, - 198u8, 180u8, 156u8, 122u8, 252u8, 139u8, 28u8, 164u8, 115u8, 153u8, - 109u8, - ] - { - let call = Contribute { - index, - value, - signature, - }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Withdraw full balance of a specific contributor."] - #[doc = ""] - #[doc = "Origin must be signed, but can come from anyone."] - #[doc = ""] - #[doc = "The fund must be either in, or ready for, retirement. For a fund to be *in* retirement, then the retirement"] - #[doc = "flag must be set. For a fund to be ready for retirement, then:"] - #[doc = "- it must not already be in retirement;"] - #[doc = "- the amount of raised funds must be bigger than the _free_ balance of the account;"] - #[doc = "- and either:"] - #[doc = " - the block number must be at least `end`; or"] - #[doc = " - the current lease period must be greater than the fund's `last_period`."] - #[doc = ""] - #[doc = "In this case, the fund's retirement flag is set and its `end` is reset to the current block"] - #[doc = "number."] - #[doc = ""] - #[doc = "- `who`: The account whose contribution should be withdrawn."] - #[doc = "- `index`: The parachain to whose crowdloan the contribution was made."] - pub fn withdraw( - &self, - who: ::subxt::sp_core::crypto::AccountId32, - index: runtime_types::polkadot_parachain::primitives::Id, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - Withdraw, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 67u8, 65u8, 89u8, 108u8, 193u8, 99u8, 74u8, 32u8, 163u8, 13u8, 81u8, - 131u8, 64u8, 107u8, 72u8, 23u8, 35u8, 177u8, 130u8, 171u8, 70u8, 232u8, - 246u8, 254u8, 67u8, 219u8, 84u8, 96u8, 165u8, 20u8, 183u8, 209u8, - ] - { - let call = Withdraw { who, index }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Automatically refund contributors of an ended crowdloan."] - #[doc = "Due to weight restrictions, this function may need to be called multiple"] - #[doc = "times to fully refund all users. We will refund `RemoveKeysLimit` users at a time."] - #[doc = ""] - #[doc = "Origin must be signed, but can come from anyone."] - pub fn refund( - &self, - index: runtime_types::polkadot_parachain::primitives::Id, - ) -> Result< - ::subxt::SubmittableExtrinsic<'a, T, X, Refund, DispatchError, root_mod::Event>, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 202u8, 206u8, 79u8, 226u8, 114u8, 228u8, 110u8, 18u8, 178u8, 173u8, - 23u8, 83u8, 64u8, 11u8, 201u8, 19u8, 57u8, 75u8, 181u8, 241u8, 231u8, - 189u8, 211u8, 48u8, 82u8, 64u8, 220u8, 22u8, 247u8, 7u8, 68u8, 211u8, - ] - { - let call = Refund { index }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Remove a fund after the retirement period has ended and all funds have been returned."] - pub fn dissolve( - &self, - index: runtime_types::polkadot_parachain::primitives::Id, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - Dissolve, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 210u8, 3u8, 221u8, 185u8, 64u8, 178u8, 56u8, 132u8, 72u8, 127u8, 105u8, - 31u8, 167u8, 107u8, 127u8, 224u8, 174u8, 221u8, 111u8, 105u8, 47u8, - 247u8, 10u8, 5u8, 37u8, 180u8, 61u8, 180u8, 3u8, 164u8, 196u8, 194u8, - ] - { - let call = Dissolve { index }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Edit the configuration for an in-progress crowdloan."] - #[doc = ""] - #[doc = "Can only be called by Root origin."] - pub fn edit( - &self, - index: runtime_types::polkadot_parachain::primitives::Id, - cap: ::core::primitive::u128, - first_period: ::core::primitive::u32, - last_period: ::core::primitive::u32, - end: ::core::primitive::u32, - verifier: ::core::option::Option, - ) -> Result< - ::subxt::SubmittableExtrinsic<'a, T, X, Edit, DispatchError, root_mod::Event>, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 34u8, 43u8, 47u8, 39u8, 106u8, 245u8, 49u8, 40u8, 191u8, 195u8, 202u8, - 113u8, 137u8, 98u8, 143u8, 172u8, 191u8, 55u8, 240u8, 75u8, 234u8, - 180u8, 90u8, 206u8, 93u8, 214u8, 115u8, 215u8, 140u8, 144u8, 105u8, - 89u8, - ] - { - let call = Edit { - index, - cap, - first_period, - last_period, - end, - verifier, - }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Add an optional memo to an existing crowdloan contribution."] - #[doc = ""] - #[doc = "Origin must be Signed, and the user must have contributed to the crowdloan."] - pub fn add_memo( - &self, - index: runtime_types::polkadot_parachain::primitives::Id, - memo: ::std::vec::Vec<::core::primitive::u8>, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - AddMemo, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 97u8, 218u8, 115u8, 187u8, 167u8, 70u8, 229u8, 231u8, 148u8, 77u8, - 169u8, 139u8, 16u8, 15u8, 116u8, 128u8, 32u8, 59u8, 154u8, 146u8, 12u8, - 65u8, 36u8, 36u8, 69u8, 19u8, 74u8, 79u8, 66u8, 25u8, 215u8, 57u8, - ] - { - let call = AddMemo { index, memo }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Poke the fund into `NewRaise`"] - #[doc = ""] - #[doc = "Origin must be Signed, and the fund has non-zero raise."] - pub fn poke( - &self, - index: runtime_types::polkadot_parachain::primitives::Id, - ) -> Result< - ::subxt::SubmittableExtrinsic<'a, T, X, Poke, DispatchError, root_mod::Event>, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 99u8, 158u8, 48u8, 3u8, 228u8, 210u8, 249u8, 42u8, 44u8, 49u8, 24u8, - 212u8, 69u8, 69u8, 189u8, 194u8, 124u8, 251u8, 25u8, 123u8, 234u8, 3u8, - 184u8, 227u8, 1u8, 195u8, 219u8, 118u8, 235u8, 237u8, 11u8, 159u8, - ] - { - let call = Poke { index }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Contribute your entire balance to a crowd sale. This will transfer the entire balance of a user over to fund a parachain"] - #[doc = "slot. It will be withdrawable when the crowdloan has ended and the funds are unused."] - pub fn contribute_all( - &self, - index: runtime_types::polkadot_parachain::primitives::Id, - signature: ::core::option::Option, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - ContributeAll, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 64u8, 224u8, 233u8, 196u8, 182u8, 109u8, 69u8, 220u8, 46u8, 60u8, - 189u8, 125u8, 17u8, 28u8, 207u8, 63u8, 129u8, 56u8, 32u8, 239u8, 182u8, - 214u8, 237u8, 95u8, 228u8, 171u8, 209u8, 233u8, 205u8, 212u8, 147u8, - 176u8, - ] - { - let call = ContributeAll { index, signature }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - pub type Event = runtime_types::polkadot_runtime_common::crowdloan::pallet::Event; - pub mod events { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "Create a new crowdloaning campaign. `[fund_index]`"] - pub struct Created(pub runtime_types::polkadot_parachain::primitives::Id); - impl ::subxt::Event for Created { - const PALLET: &'static str = "Crowdloan"; - const EVENT: &'static str = "Created"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "Contributed to a crowd sale. `[who, fund_index, amount]`"] - pub struct Contributed( - pub ::subxt::sp_core::crypto::AccountId32, - pub runtime_types::polkadot_parachain::primitives::Id, - pub ::core::primitive::u128, - ); - impl ::subxt::Event for Contributed { - const PALLET: &'static str = "Crowdloan"; - const EVENT: &'static str = "Contributed"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "Withdrew full balance of a contributor. `[who, fund_index, amount]`"] - pub struct Withdrew( - pub ::subxt::sp_core::crypto::AccountId32, - pub runtime_types::polkadot_parachain::primitives::Id, - pub ::core::primitive::u128, - ); - impl ::subxt::Event for Withdrew { - const PALLET: &'static str = "Crowdloan"; - const EVENT: &'static str = "Withdrew"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "The loans in a fund have been partially dissolved, i.e. there are some left"] - #[doc = "over child keys that still need to be killed. `[fund_index]`"] - pub struct PartiallyRefunded(pub runtime_types::polkadot_parachain::primitives::Id); - impl ::subxt::Event for PartiallyRefunded { - const PALLET: &'static str = "Crowdloan"; - const EVENT: &'static str = "PartiallyRefunded"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "All loans in a fund have been refunded. `[fund_index]`"] - pub struct AllRefunded(pub runtime_types::polkadot_parachain::primitives::Id); - impl ::subxt::Event for AllRefunded { - const PALLET: &'static str = "Crowdloan"; - const EVENT: &'static str = "AllRefunded"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "Fund is dissolved. `[fund_index]`"] - pub struct Dissolved(pub runtime_types::polkadot_parachain::primitives::Id); - impl ::subxt::Event for Dissolved { - const PALLET: &'static str = "Crowdloan"; - const EVENT: &'static str = "Dissolved"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "The result of trying to submit a new bid to the Slots pallet."] - pub struct HandleBidResult( - pub runtime_types::polkadot_parachain::primitives::Id, - pub ::core::result::Result<(), runtime_types::sp_runtime::DispatchError>, - ); - impl ::subxt::Event for HandleBidResult { - const PALLET: &'static str = "Crowdloan"; - const EVENT: &'static str = "HandleBidResult"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "The configuration to a crowdloan has been edited. `[fund_index]`"] - pub struct Edited(pub runtime_types::polkadot_parachain::primitives::Id); - impl ::subxt::Event for Edited { - const PALLET: &'static str = "Crowdloan"; - const EVENT: &'static str = "Edited"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "A memo has been updated. `[who, fund_index, memo]`"] - pub struct MemoUpdated( - pub ::subxt::sp_core::crypto::AccountId32, - pub runtime_types::polkadot_parachain::primitives::Id, - pub ::std::vec::Vec<::core::primitive::u8>, - ); - impl ::subxt::Event for MemoUpdated { - const PALLET: &'static str = "Crowdloan"; - const EVENT: &'static str = "MemoUpdated"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "A parachain has been moved to `NewRaise`"] - pub struct AddedToNewRaise(pub runtime_types::polkadot_parachain::primitives::Id); - impl ::subxt::Event for AddedToNewRaise { - const PALLET: &'static str = "Crowdloan"; - const EVENT: &'static str = "AddedToNewRaise"; - } - } - pub mod storage { - use super::runtime_types; - pub struct Funds<'a>(pub &'a runtime_types::polkadot_parachain::primitives::Id); - impl ::subxt::StorageEntry for Funds<'_> { - const PALLET: &'static str = "Crowdloan"; - const STORAGE: &'static str = "Funds"; - type Value = runtime_types::polkadot_runtime_common::crowdloan::FundInfo< - ::subxt::sp_core::crypto::AccountId32, - ::core::primitive::u128, - ::core::primitive::u32, - ::core::primitive::u32, - >; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Twox64Concat, - )]) - } - } - pub struct NewRaise; - impl ::subxt::StorageEntry for NewRaise { - const PALLET: &'static str = "Crowdloan"; - const STORAGE: &'static str = "NewRaise"; - type Value = ::std::vec::Vec; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct EndingsCount; - impl ::subxt::StorageEntry for EndingsCount { - const PALLET: &'static str = "Crowdloan"; - const STORAGE: &'static str = "EndingsCount"; - type Value = ::core::primitive::u32; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct NextFundIndex; - impl ::subxt::StorageEntry for NextFundIndex { - const PALLET: &'static str = "Crowdloan"; - const STORAGE: &'static str = "NextFundIndex"; - type Value = ::core::primitive::u32; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct StorageApi<'a, T: ::subxt::Config> { - client: &'a ::subxt::Client, - } - impl<'a, T: ::subxt::Config> StorageApi<'a, T> { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { client } - } - #[doc = " Info on all of the funds."] - pub async fn funds( - &self, - _0: &runtime_types::polkadot_parachain::primitives::Id, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option< - runtime_types::polkadot_runtime_common::crowdloan::FundInfo< - ::subxt::sp_core::crypto::AccountId32, - ::core::primitive::u128, - ::core::primitive::u32, - ::core::primitive::u32, - >, - >, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 13u8, 211u8, 240u8, 138u8, 231u8, 78u8, 123u8, 252u8, 210u8, 27u8, - 202u8, 82u8, 157u8, 118u8, 209u8, 218u8, 160u8, 183u8, 225u8, 77u8, - 230u8, 131u8, 180u8, 238u8, 83u8, 202u8, 29u8, 106u8, 114u8, 223u8, - 250u8, 3u8, - ] - { - let entry = Funds(_0); - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Info on all of the funds."] - pub async fn funds_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result<::subxt::KeyIter<'a, T, Funds<'a>>, ::subxt::BasicError> - { - if self.client.metadata().storage_hash::()? - == [ - 13u8, 211u8, 240u8, 138u8, 231u8, 78u8, 123u8, 252u8, 210u8, 27u8, - 202u8, 82u8, 157u8, 118u8, 209u8, 218u8, 160u8, 183u8, 225u8, 77u8, - 230u8, 131u8, 180u8, 238u8, 83u8, 202u8, 29u8, 106u8, 114u8, 223u8, - 250u8, 3u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The funds that have had additional contributions during the last block. This is used"] - #[doc = " in order to determine which funds should submit new or updated bids."] - pub async fn new_raise( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::std::vec::Vec, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 243u8, 204u8, 121u8, 230u8, 151u8, 223u8, 248u8, 199u8, 68u8, 209u8, - 226u8, 159u8, 217u8, 105u8, 39u8, 127u8, 162u8, 133u8, 56u8, 1u8, 70u8, - 7u8, 176u8, 56u8, 81u8, 49u8, 155u8, 143u8, 100u8, 153u8, 59u8, 86u8, - ] - { - let entry = NewRaise; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The number of auctions that have entered into their ending period so far."] - pub async fn endings_count( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> - { - if self.client.metadata().storage_hash::()? - == [ - 12u8, 159u8, 166u8, 75u8, 192u8, 33u8, 21u8, 244u8, 149u8, 200u8, 49u8, - 54u8, 191u8, 174u8, 202u8, 86u8, 76u8, 115u8, 189u8, 35u8, 192u8, - 175u8, 156u8, 188u8, 41u8, 23u8, 92u8, 36u8, 141u8, 235u8, 248u8, - 143u8, - ] - { - let entry = EndingsCount; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Tracker for the next available fund index"] - pub async fn next_fund_index( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> - { - if self.client.metadata().storage_hash::()? - == [ - 1u8, 215u8, 164u8, 194u8, 231u8, 34u8, 207u8, 19u8, 149u8, 187u8, 3u8, - 176u8, 194u8, 240u8, 180u8, 169u8, 214u8, 194u8, 202u8, 240u8, 209u8, - 6u8, 244u8, 46u8, 54u8, 142u8, 61u8, 220u8, 240u8, 96u8, 10u8, 168u8, - ] - { - let entry = NextFundIndex; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - pub mod constants { - use super::runtime_types; - pub struct ConstantsApi<'a, T: ::subxt::Config> { - client: &'a ::subxt::Client, - } - impl<'a, T: ::subxt::Config> ConstantsApi<'a, T> { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { client } - } - #[doc = " `PalletId` for the crowdloan pallet. An appropriate value could be `PalletId(*b\"py/cfund\")`"] - pub fn pallet_id( - &self, - ) -> ::core::result::Result< - runtime_types::frame_support::PalletId, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .constant_hash("Crowdloan", "PalletId")? - == [ - 190u8, 62u8, 112u8, 88u8, 48u8, 222u8, 234u8, 76u8, 230u8, 81u8, 205u8, - 113u8, 202u8, 11u8, 184u8, 229u8, 189u8, 124u8, 132u8, 255u8, 46u8, - 202u8, 80u8, 86u8, 182u8, 212u8, 149u8, 200u8, 57u8, 215u8, 195u8, - 132u8, - ] - { - let pallet = self.client.metadata().pallet("Crowdloan")?; - let constant = pallet.constant("PalletId")?; - let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; - Ok(value) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The minimum amount that may be contributed into a crowdloan. Should almost certainly be at"] - #[doc = " least `ExistentialDeposit`."] - pub fn min_contribution( - &self, - ) -> ::core::result::Result<::core::primitive::u128, ::subxt::BasicError> - { - if self - .client - .metadata() - .constant_hash("Crowdloan", "MinContribution")? - == [ - 202u8, 28u8, 7u8, 249u8, 127u8, 100u8, 197u8, 70u8, 224u8, 205u8, 34u8, - 128u8, 198u8, 242u8, 54u8, 124u8, 230u8, 52u8, 142u8, 219u8, 30u8, - 229u8, 65u8, 136u8, 5u8, 244u8, 26u8, 9u8, 162u8, 58u8, 172u8, 141u8, - ] - { - let pallet = self.client.metadata().pallet("Crowdloan")?; - let constant = pallet.constant("MinContribution")?; - let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; - Ok(value) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Max number of storage keys to remove per extrinsic call."] - pub fn remove_keys_limit( - &self, - ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> - { - if self - .client - .metadata() - .constant_hash("Crowdloan", "RemoveKeysLimit")? - == [ - 199u8, 136u8, 0u8, 136u8, 48u8, 93u8, 45u8, 100u8, 156u8, 106u8, 111u8, - 137u8, 126u8, 251u8, 185u8, 76u8, 37u8, 112u8, 241u8, 98u8, 237u8, 6u8, - 157u8, 204u8, 211u8, 246u8, 183u8, 101u8, 3u8, 214u8, 44u8, 135u8, - ] - { - let pallet = self.client.metadata().pallet("Crowdloan")?; - let constant = pallet.constant("RemoveKeysLimit")?; - let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; - Ok(value) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - } - pub mod slots { - use super::root_mod; - use super::runtime_types; - pub mod calls { - use super::root_mod; - use super::runtime_types; - type DispatchError = runtime_types::sp_runtime::DispatchError; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct ForceLease { - pub para: runtime_types::polkadot_parachain::primitives::Id, - pub leaser: ::subxt::sp_core::crypto::AccountId32, - pub amount: ::core::primitive::u128, - pub period_begin: ::core::primitive::u32, - pub period_count: ::core::primitive::u32, - } - impl ::subxt::Call for ForceLease { - const PALLET: &'static str = "Slots"; - const FUNCTION: &'static str = "force_lease"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct ClearAllLeases { - pub para: runtime_types::polkadot_parachain::primitives::Id, - } - impl ::subxt::Call for ClearAllLeases { - const PALLET: &'static str = "Slots"; - const FUNCTION: &'static str = "clear_all_leases"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct TriggerOnboard { - pub para: runtime_types::polkadot_parachain::primitives::Id, - } - impl ::subxt::Call for TriggerOnboard { - const PALLET: &'static str = "Slots"; - const FUNCTION: &'static str = "trigger_onboard"; - } - pub struct TransactionApi<'a, T: ::subxt::Config, X> { - client: &'a ::subxt::Client, - marker: ::core::marker::PhantomData, - } - impl<'a, T, X> TransactionApi<'a, T, X> - where - T: ::subxt::Config, - X: ::subxt::extrinsic::ExtrinsicParams, - { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { - client, - marker: ::core::marker::PhantomData, - } - } - #[doc = "Just a connect into the `lease_out` call, in case Root wants to force some lease to happen"] - #[doc = "independently of any other on-chain mechanism to use it."] - #[doc = ""] - #[doc = "The dispatch origin for this call must match `T::ForceOrigin`."] - pub fn force_lease( - &self, - para: runtime_types::polkadot_parachain::primitives::Id, - leaser: ::subxt::sp_core::crypto::AccountId32, - amount: ::core::primitive::u128, - period_begin: ::core::primitive::u32, - period_count: ::core::primitive::u32, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - ForceLease, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 110u8, 205u8, 106u8, 226u8, 3u8, 177u8, 198u8, 116u8, 52u8, 161u8, - 90u8, 240u8, 43u8, 160u8, 144u8, 63u8, 97u8, 231u8, 232u8, 176u8, 92u8, - 253u8, 16u8, 243u8, 187u8, 94u8, 20u8, 114u8, 23u8, 46u8, 231u8, 249u8, - ] - { - let call = ForceLease { - para, - leaser, - amount, - period_begin, - period_count, - }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Clear all leases for a Para Id, refunding any deposits back to the original owners."] - #[doc = ""] - #[doc = "The dispatch origin for this call must match `T::ForceOrigin`."] - pub fn clear_all_leases( - &self, - para: runtime_types::polkadot_parachain::primitives::Id, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - ClearAllLeases, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 101u8, 225u8, 10u8, 139u8, 34u8, 12u8, 48u8, 76u8, 97u8, 178u8, 5u8, - 110u8, 19u8, 3u8, 237u8, 183u8, 54u8, 113u8, 7u8, 138u8, 180u8, 201u8, - 245u8, 151u8, 61u8, 40u8, 69u8, 31u8, 28u8, 172u8, 253u8, 227u8, - ] - { - let call = ClearAllLeases { para }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Try to onboard a parachain that has a lease for the current lease period."] - #[doc = ""] - #[doc = "This function can be useful if there was some state issue with a para that should"] - #[doc = "have onboarded, but was unable to. As long as they have a lease period, we can"] - #[doc = "let them onboard from here."] - #[doc = ""] - #[doc = "Origin must be signed, but can be called by anyone."] - pub fn trigger_onboard( - &self, - para: runtime_types::polkadot_parachain::primitives::Id, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - TriggerOnboard, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 85u8, 246u8, 247u8, 252u8, 46u8, 143u8, 200u8, 102u8, 105u8, 51u8, - 148u8, 164u8, 27u8, 25u8, 139u8, 167u8, 150u8, 129u8, 131u8, 187u8, - 153u8, 6u8, 169u8, 153u8, 192u8, 116u8, 130u8, 12u8, 22u8, 199u8, 52u8, - 8u8, - ] - { - let call = TriggerOnboard { para }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - pub type Event = runtime_types::polkadot_runtime_common::slots::pallet::Event; - pub mod events { - use super::runtime_types; - #[derive( - :: subxt :: codec :: CompactAs, - :: subxt :: codec :: Decode, - :: subxt :: codec :: Encode, - Debug, - )] - #[doc = "A new `[lease_period]` is beginning."] - pub struct NewLeasePeriod(pub ::core::primitive::u32); - impl ::subxt::Event for NewLeasePeriod { - const PALLET: &'static str = "Slots"; - const EVENT: &'static str = "NewLeasePeriod"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "A para has won the right to a continuous set of lease periods as a parachain."] - #[doc = "First balance is any extra amount reserved on top of the para's existing deposit."] - #[doc = "Second balance is the total amount reserved."] - #[doc = "`[parachain_id, leaser, period_begin, period_count, extra_reserved, total_amount]`"] - pub struct Leased( - pub runtime_types::polkadot_parachain::primitives::Id, - pub ::subxt::sp_core::crypto::AccountId32, - pub ::core::primitive::u32, - pub ::core::primitive::u32, - pub ::core::primitive::u128, - pub ::core::primitive::u128, - ); - impl ::subxt::Event for Leased { - const PALLET: &'static str = "Slots"; - const EVENT: &'static str = "Leased"; - } - } - pub mod storage { - use super::runtime_types; - pub struct Leases<'a>(pub &'a runtime_types::polkadot_parachain::primitives::Id); - impl ::subxt::StorageEntry for Leases<'_> { - const PALLET: &'static str = "Slots"; - const STORAGE: &'static str = "Leases"; - type Value = ::std::vec::Vec< - ::core::option::Option<( - ::subxt::sp_core::crypto::AccountId32, - ::core::primitive::u128, - )>, - >; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Twox64Concat, - )]) - } - } - pub struct StorageApi<'a, T: ::subxt::Config> { - client: &'a ::subxt::Client, - } - impl<'a, T: ::subxt::Config> StorageApi<'a, T> { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { client } - } - #[doc = " Amounts held on deposit for each (possibly future) leased parachain."] - #[doc = ""] - #[doc = " The actual amount locked on its behalf by any account at any time is the maximum of the second values"] - #[doc = " of the items in this list whose first value is the account."] - #[doc = ""] - #[doc = " The first item in the list is the amount locked for the current Lease Period. Following"] - #[doc = " items are for the subsequent lease periods."] - #[doc = ""] - #[doc = " The default value (an empty list) implies that the parachain no longer exists (or never"] - #[doc = " existed) as far as this pallet is concerned."] - #[doc = ""] - #[doc = " If a parachain doesn't exist *yet* but is scheduled to exist in the future, then it"] - #[doc = " will be left-padded with one or more `None`s to denote the fact that nothing is held on"] - #[doc = " deposit for the non-existent chain currently, but is held at some point in the future."] - #[doc = ""] - #[doc = " It is illegal for a `None` value to trail in the list."] - pub async fn leases( - &self, - _0: &runtime_types::polkadot_parachain::primitives::Id, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::std::vec::Vec< - ::core::option::Option<( - ::subxt::sp_core::crypto::AccountId32, - ::core::primitive::u128, - )>, - >, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 83u8, 145u8, 119u8, 74u8, 166u8, 90u8, 141u8, 47u8, 125u8, 250u8, - 173u8, 63u8, 193u8, 78u8, 96u8, 119u8, 111u8, 126u8, 83u8, 83u8, 80u8, - 32u8, 43u8, 173u8, 123u8, 126u8, 132u8, 166u8, 252u8, 39u8, 18u8, 39u8, - ] - { - let entry = Leases(_0); - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Amounts held on deposit for each (possibly future) leased parachain."] - #[doc = ""] - #[doc = " The actual amount locked on its behalf by any account at any time is the maximum of the second values"] - #[doc = " of the items in this list whose first value is the account."] - #[doc = ""] - #[doc = " The first item in the list is the amount locked for the current Lease Period. Following"] - #[doc = " items are for the subsequent lease periods."] - #[doc = ""] - #[doc = " The default value (an empty list) implies that the parachain no longer exists (or never"] - #[doc = " existed) as far as this pallet is concerned."] - #[doc = ""] - #[doc = " If a parachain doesn't exist *yet* but is scheduled to exist in the future, then it"] - #[doc = " will be left-padded with one or more `None`s to denote the fact that nothing is held on"] - #[doc = " deposit for the non-existent chain currently, but is held at some point in the future."] - #[doc = ""] - #[doc = " It is illegal for a `None` value to trail in the list."] - pub async fn leases_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result<::subxt::KeyIter<'a, T, Leases<'a>>, ::subxt::BasicError> - { - if self.client.metadata().storage_hash::()? - == [ - 83u8, 145u8, 119u8, 74u8, 166u8, 90u8, 141u8, 47u8, 125u8, 250u8, - 173u8, 63u8, 193u8, 78u8, 96u8, 119u8, 111u8, 126u8, 83u8, 83u8, 80u8, - 32u8, 43u8, 173u8, 123u8, 126u8, 132u8, 166u8, 252u8, 39u8, 18u8, 39u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - pub mod constants { - use super::runtime_types; - pub struct ConstantsApi<'a, T: ::subxt::Config> { - client: &'a ::subxt::Client, - } - impl<'a, T: ::subxt::Config> ConstantsApi<'a, T> { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { client } - } - #[doc = " The number of blocks over which a single period lasts."] - pub fn lease_period( - &self, - ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> - { - if self - .client - .metadata() - .constant_hash("Slots", "LeasePeriod")? - == [ - 199u8, 146u8, 34u8, 83u8, 56u8, 115u8, 56u8, 28u8, 80u8, 78u8, 80u8, - 106u8, 53u8, 187u8, 228u8, 50u8, 192u8, 147u8, 102u8, 175u8, 145u8, - 103u8, 186u8, 172u8, 235u8, 174u8, 247u8, 121u8, 47u8, 193u8, 44u8, - 60u8, - ] - { - let pallet = self.client.metadata().pallet("Slots")?; - let constant = pallet.constant("LeasePeriod")?; - let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; - Ok(value) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The number of blocks to offset each lease period by."] - pub fn lease_offset( - &self, - ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> - { - if self - .client - .metadata() - .constant_hash("Slots", "LeaseOffset")? - == [ - 236u8, 50u8, 129u8, 231u8, 6u8, 181u8, 238u8, 115u8, 32u8, 62u8, 217u8, - 32u8, 198u8, 36u8, 84u8, 223u8, 239u8, 223u8, 53u8, 13u8, 21u8, 33u8, - 230u8, 17u8, 103u8, 37u8, 154u8, 230u8, 240u8, 143u8, 9u8, 179u8, - ] - { - let pallet = self.client.metadata().pallet("Slots")?; - let constant = pallet.constant("LeaseOffset")?; - let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; - Ok(value) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - } - pub mod paras_sudo_wrapper { - use super::root_mod; - use super::runtime_types; - pub mod calls { - use super::root_mod; - use super::runtime_types; - type DispatchError = runtime_types::sp_runtime::DispatchError; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct SudoScheduleParaInitialize { - pub id: runtime_types::polkadot_parachain::primitives::Id, - pub genesis: runtime_types::polkadot_runtime_parachains::paras::ParaGenesisArgs, - } - impl ::subxt::Call for SudoScheduleParaInitialize { - const PALLET: &'static str = "ParasSudoWrapper"; - const FUNCTION: &'static str = "sudo_schedule_para_initialize"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct SudoScheduleParaCleanup { - pub id: runtime_types::polkadot_parachain::primitives::Id, - } - impl ::subxt::Call for SudoScheduleParaCleanup { - const PALLET: &'static str = "ParasSudoWrapper"; - const FUNCTION: &'static str = "sudo_schedule_para_cleanup"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct SudoScheduleParathreadUpgrade { - pub id: runtime_types::polkadot_parachain::primitives::Id, - } - impl ::subxt::Call for SudoScheduleParathreadUpgrade { - const PALLET: &'static str = "ParasSudoWrapper"; - const FUNCTION: &'static str = "sudo_schedule_parathread_upgrade"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct SudoScheduleParachainDowngrade { - pub id: runtime_types::polkadot_parachain::primitives::Id, - } - impl ::subxt::Call for SudoScheduleParachainDowngrade { - const PALLET: &'static str = "ParasSudoWrapper"; - const FUNCTION: &'static str = "sudo_schedule_parachain_downgrade"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct SudoQueueDownwardXcm { - pub id: runtime_types::polkadot_parachain::primitives::Id, - pub xcm: ::std::boxed::Box, - } - impl ::subxt::Call for SudoQueueDownwardXcm { - const PALLET: &'static str = "ParasSudoWrapper"; - const FUNCTION: &'static str = "sudo_queue_downward_xcm"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct SudoEstablishHrmpChannel { - pub sender: runtime_types::polkadot_parachain::primitives::Id, - pub recipient: runtime_types::polkadot_parachain::primitives::Id, - pub max_capacity: ::core::primitive::u32, - pub max_message_size: ::core::primitive::u32, - } - impl ::subxt::Call for SudoEstablishHrmpChannel { - const PALLET: &'static str = "ParasSudoWrapper"; - const FUNCTION: &'static str = "sudo_establish_hrmp_channel"; - } - pub struct TransactionApi<'a, T: ::subxt::Config, X> { - client: &'a ::subxt::Client, - marker: ::core::marker::PhantomData, - } - impl<'a, T, X> TransactionApi<'a, T, X> - where - T: ::subxt::Config, - X: ::subxt::extrinsic::ExtrinsicParams, - { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { - client, - marker: ::core::marker::PhantomData, - } - } - #[doc = "Schedule a para to be initialized at the start of the next session."] - pub fn sudo_schedule_para_initialize( - &self, - id: runtime_types::polkadot_parachain::primitives::Id, - genesis: runtime_types::polkadot_runtime_parachains::paras::ParaGenesisArgs, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SudoScheduleParaInitialize, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .call_hash::()? - == [ - 86u8, 164u8, 77u8, 56u8, 96u8, 214u8, 248u8, 7u8, 89u8, 247u8, 201u8, - 39u8, 212u8, 110u8, 252u8, 13u8, 2u8, 157u8, 243u8, 243u8, 22u8, 0u8, - 105u8, 138u8, 14u8, 232u8, 50u8, 121u8, 110u8, 222u8, 86u8, 47u8, - ] - { - let call = SudoScheduleParaInitialize { id, genesis }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Schedule a para to be cleaned up at the start of the next session."] - pub fn sudo_schedule_para_cleanup( - &self, - id: runtime_types::polkadot_parachain::primitives::Id, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SudoScheduleParaCleanup, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .call_hash::()? - == [ - 108u8, 244u8, 138u8, 161u8, 12u8, 207u8, 245u8, 145u8, 139u8, 18u8, - 37u8, 156u8, 86u8, 114u8, 183u8, 19u8, 172u8, 209u8, 127u8, 255u8, - 217u8, 189u8, 24u8, 79u8, 93u8, 121u8, 9u8, 163u8, 84u8, 20u8, 212u8, - 222u8, - ] - { - let call = SudoScheduleParaCleanup { id }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Upgrade a parathread to a parachain"] - pub fn sudo_schedule_parathread_upgrade( - &self, - id: runtime_types::polkadot_parachain::primitives::Id, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SudoScheduleParathreadUpgrade, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .call_hash::()? - == [ - 169u8, 58u8, 222u8, 27u8, 223u8, 115u8, 47u8, 226u8, 148u8, 82u8, 2u8, - 86u8, 135u8, 202u8, 102u8, 191u8, 40u8, 221u8, 170u8, 13u8, 225u8, - 131u8, 121u8, 27u8, 165u8, 179u8, 175u8, 34u8, 209u8, 115u8, 93u8, - 85u8, - ] - { - let call = SudoScheduleParathreadUpgrade { id }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Downgrade a parachain to a parathread"] - pub fn sudo_schedule_parachain_downgrade( - &self, - id: runtime_types::polkadot_parachain::primitives::Id, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SudoScheduleParachainDowngrade, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .call_hash::()? - == [ - 183u8, 209u8, 11u8, 52u8, 110u8, 163u8, 61u8, 191u8, 87u8, 84u8, 179u8, - 101u8, 251u8, 145u8, 158u8, 249u8, 48u8, 229u8, 84u8, 247u8, 21u8, 4u8, - 181u8, 104u8, 224u8, 128u8, 126u8, 249u8, 146u8, 158u8, 233u8, 128u8, - ] - { - let call = SudoScheduleParachainDowngrade { id }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Send a downward XCM to the given para."] - #[doc = ""] - #[doc = "The given parachain should exist and the payload should not exceed the preconfigured size"] - #[doc = "`config.max_downward_message_size`."] - pub fn sudo_queue_downward_xcm( - &self, - id: runtime_types::polkadot_parachain::primitives::Id, - xcm: runtime_types::xcm::VersionedXcm, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SudoQueueDownwardXcm, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 81u8, 30u8, 40u8, 17u8, 248u8, 225u8, 213u8, 76u8, 11u8, 167u8, 196u8, - 12u8, 113u8, 152u8, 98u8, 196u8, 204u8, 166u8, 103u8, 199u8, 146u8, - 98u8, 73u8, 188u8, 128u8, 100u8, 77u8, 203u8, 103u8, 139u8, 105u8, - 50u8, - ] - { - let call = SudoQueueDownwardXcm { - id, - xcm: ::std::boxed::Box::new(xcm), - }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Forcefully establish a channel from the sender to the recipient."] - #[doc = ""] - #[doc = "This is equivalent to sending an `Hrmp::hrmp_init_open_channel` extrinsic followed by"] - #[doc = "`Hrmp::hrmp_accept_open_channel`."] - pub fn sudo_establish_hrmp_channel( - &self, - sender: runtime_types::polkadot_parachain::primitives::Id, - recipient: runtime_types::polkadot_parachain::primitives::Id, - max_capacity: ::core::primitive::u32, - max_message_size: ::core::primitive::u32, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SudoEstablishHrmpChannel, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .call_hash::()? - == [ - 37u8, 236u8, 235u8, 162u8, 207u8, 3u8, 97u8, 139u8, 72u8, 211u8, 203u8, - 78u8, 188u8, 159u8, 108u8, 13u8, 149u8, 224u8, 51u8, 96u8, 14u8, 60u8, - 124u8, 249u8, 48u8, 30u8, 6u8, 211u8, 205u8, 230u8, 252u8, 77u8, - ] - { - let call = SudoEstablishHrmpChannel { - sender, - recipient, - max_capacity, - max_message_size, - }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - } - pub mod assigned_slots { - use super::root_mod; - use super::runtime_types; - pub mod calls { - use super::root_mod; - use super::runtime_types; - type DispatchError = runtime_types::sp_runtime::DispatchError; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct AssignPermParachainSlot { - pub id: runtime_types::polkadot_parachain::primitives::Id, - } - impl ::subxt::Call for AssignPermParachainSlot { - const PALLET: &'static str = "AssignedSlots"; - const FUNCTION: &'static str = "assign_perm_parachain_slot"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct AssignTempParachainSlot { - pub id: runtime_types::polkadot_parachain::primitives::Id, - pub lease_period_start: - runtime_types::polkadot_runtime_common::assigned_slots::SlotLeasePeriodStart, - } - impl ::subxt::Call for AssignTempParachainSlot { - const PALLET: &'static str = "AssignedSlots"; - const FUNCTION: &'static str = "assign_temp_parachain_slot"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct UnassignParachainSlot { - pub id: runtime_types::polkadot_parachain::primitives::Id, - } - impl ::subxt::Call for UnassignParachainSlot { - const PALLET: &'static str = "AssignedSlots"; - const FUNCTION: &'static str = "unassign_parachain_slot"; - } - pub struct TransactionApi<'a, T: ::subxt::Config, X> { - client: &'a ::subxt::Client, - marker: ::core::marker::PhantomData, - } - impl<'a, T, X> TransactionApi<'a, T, X> - where - T: ::subxt::Config, - X: ::subxt::extrinsic::ExtrinsicParams, - { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { - client, - marker: ::core::marker::PhantomData, - } - } - #[doc = "Assign a permanent parachain slot and immediately create a lease for it."] - pub fn assign_perm_parachain_slot( - &self, - id: runtime_types::polkadot_parachain::primitives::Id, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - AssignPermParachainSlot, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .call_hash::()? - == [ - 182u8, 103u8, 59u8, 125u8, 140u8, 208u8, 37u8, 240u8, 39u8, 40u8, 34u8, - 213u8, 245u8, 19u8, 51u8, 202u8, 153u8, 174u8, 151u8, 229u8, 26u8, - 252u8, 91u8, 36u8, 67u8, 87u8, 249u8, 89u8, 149u8, 178u8, 87u8, 212u8, - ] - { - let call = AssignPermParachainSlot { id }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Assign a temporary parachain slot. The function tries to create a lease for it"] - #[doc = "immediately if `SlotLeasePeriodStart::Current` is specified, and if the number"] - #[doc = "of currently active temporary slots is below `MaxTemporarySlotPerLeasePeriod`."] - pub fn assign_temp_parachain_slot( - &self, - id: runtime_types::polkadot_parachain::primitives::Id, - lease_period_start : runtime_types :: polkadot_runtime_common :: assigned_slots :: SlotLeasePeriodStart, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - AssignTempParachainSlot, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .call_hash::()? - == [ - 166u8, 193u8, 161u8, 214u8, 110u8, 114u8, 94u8, 122u8, 247u8, 90u8, - 4u8, 153u8, 252u8, 215u8, 19u8, 80u8, 91u8, 82u8, 153u8, 101u8, 174u8, - 205u8, 41u8, 117u8, 144u8, 243u8, 206u8, 146u8, 170u8, 124u8, 53u8, - 109u8, - ] - { - let call = AssignTempParachainSlot { - id, - lease_period_start, - }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Unassign a permanent or temporary parachain slot"] - pub fn unassign_parachain_slot( - &self, - id: runtime_types::polkadot_parachain::primitives::Id, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - UnassignParachainSlot, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .call_hash::()? - == [ - 70u8, 155u8, 13u8, 223u8, 35u8, 4u8, 112u8, 133u8, 100u8, 136u8, 68u8, - 253u8, 52u8, 210u8, 70u8, 60u8, 13u8, 73u8, 39u8, 5u8, 163u8, 39u8, - 143u8, 187u8, 46u8, 54u8, 107u8, 160u8, 48u8, 227u8, 107u8, 106u8, - ] - { - let call = UnassignParachainSlot { id }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - pub type Event = runtime_types::polkadot_runtime_common::assigned_slots::pallet::Event; - pub mod events { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "A para was assigned a permanent parachain slot"] - pub struct PermanentSlotAssigned(pub runtime_types::polkadot_parachain::primitives::Id); - impl ::subxt::Event for PermanentSlotAssigned { - const PALLET: &'static str = "AssignedSlots"; - const EVENT: &'static str = "PermanentSlotAssigned"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "A para was assigned a temporary parachain slot"] - pub struct TemporarySlotAssigned(pub runtime_types::polkadot_parachain::primitives::Id); - impl ::subxt::Event for TemporarySlotAssigned { - const PALLET: &'static str = "AssignedSlots"; - const EVENT: &'static str = "TemporarySlotAssigned"; - } - } - pub mod storage { - use super::runtime_types; - pub struct PermanentSlots<'a>( - pub &'a runtime_types::polkadot_parachain::primitives::Id, - ); - impl ::subxt::StorageEntry for PermanentSlots<'_> { - const PALLET: &'static str = "AssignedSlots"; - const STORAGE: &'static str = "PermanentSlots"; - type Value = (::core::primitive::u32, ::core::primitive::u32); - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Twox64Concat, - )]) - } - } - pub struct PermanentSlotCount; - impl ::subxt::StorageEntry for PermanentSlotCount { - const PALLET: &'static str = "AssignedSlots"; - const STORAGE: &'static str = "PermanentSlotCount"; - type Value = ::core::primitive::u32; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct TemporarySlots<'a>( - pub &'a runtime_types::polkadot_parachain::primitives::Id, - ); - impl ::subxt::StorageEntry for TemporarySlots<'_> { - const PALLET: &'static str = "AssignedSlots"; - const STORAGE: &'static str = "TemporarySlots"; - type Value = - runtime_types::polkadot_runtime_common::assigned_slots::ParachainTemporarySlot< - ::subxt::sp_core::crypto::AccountId32, - ::core::primitive::u32, - >; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Twox64Concat, - )]) - } - } - pub struct TemporarySlotCount; - impl ::subxt::StorageEntry for TemporarySlotCount { - const PALLET: &'static str = "AssignedSlots"; - const STORAGE: &'static str = "TemporarySlotCount"; - type Value = ::core::primitive::u32; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct ActiveTemporarySlotCount; - impl ::subxt::StorageEntry for ActiveTemporarySlotCount { - const PALLET: &'static str = "AssignedSlots"; - const STORAGE: &'static str = "ActiveTemporarySlotCount"; - type Value = ::core::primitive::u32; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct StorageApi<'a, T: ::subxt::Config> { - client: &'a ::subxt::Client, - } - impl<'a, T: ::subxt::Config> StorageApi<'a, T> { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { client } - } - #[doc = " Assigned permanent slots, with their start lease period, and duration."] - pub async fn permanent_slots( - &self, - _0: &runtime_types::polkadot_parachain::primitives::Id, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option<(::core::primitive::u32, ::core::primitive::u32)>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 128u8, 156u8, 45u8, 33u8, 173u8, 102u8, 61u8, 221u8, 125u8, 205u8, - 152u8, 190u8, 12u8, 209u8, 203u8, 24u8, 208u8, 50u8, 234u8, 124u8, - 172u8, 20u8, 20u8, 196u8, 232u8, 177u8, 117u8, 82u8, 116u8, 151u8, - 199u8, 204u8, - ] - { - let entry = PermanentSlots(_0); - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Assigned permanent slots, with their start lease period, and duration."] - pub async fn permanent_slots_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::subxt::KeyIter<'a, T, PermanentSlots<'a>>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 128u8, 156u8, 45u8, 33u8, 173u8, 102u8, 61u8, 221u8, 125u8, 205u8, - 152u8, 190u8, 12u8, 209u8, 203u8, 24u8, 208u8, 50u8, 234u8, 124u8, - 172u8, 20u8, 20u8, 196u8, 232u8, 177u8, 117u8, 82u8, 116u8, 151u8, - 199u8, 204u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Number of assigned (and active) permanent slots."] - pub async fn permanent_slot_count( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> - { - if self - .client - .metadata() - .storage_hash::()? - == [ - 186u8, 224u8, 144u8, 167u8, 64u8, 193u8, 68u8, 25u8, 146u8, 86u8, - 109u8, 81u8, 100u8, 197u8, 25u8, 4u8, 27u8, 131u8, 162u8, 7u8, 148u8, - 198u8, 162u8, 100u8, 197u8, 86u8, 37u8, 43u8, 240u8, 25u8, 18u8, 66u8, - ] - { - let entry = PermanentSlotCount; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Assigned temporary slots."] pub async fn temporary_slots (& self , _0 : & runtime_types :: polkadot_parachain :: primitives :: Id , block_hash : :: core :: option :: Option < T :: Hash > ,) -> :: core :: result :: Result < :: core :: option :: Option < runtime_types :: polkadot_runtime_common :: assigned_slots :: ParachainTemporarySlot < :: subxt :: sp_core :: crypto :: AccountId32 , :: core :: primitive :: u32 > > , :: subxt :: BasicError >{ - if self.client.metadata().storage_hash::()? - == [ - 163u8, 37u8, 72u8, 142u8, 172u8, 117u8, 146u8, 111u8, 10u8, 100u8, - 92u8, 223u8, 253u8, 250u8, 19u8, 187u8, 227u8, 222u8, 91u8, 73u8, - 156u8, 158u8, 63u8, 183u8, 69u8, 16u8, 225u8, 58u8, 85u8, 89u8, 15u8, - 15u8, - ] - { - let entry = TemporarySlots(_0); - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Assigned temporary slots."] - pub async fn temporary_slots_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::subxt::KeyIter<'a, T, TemporarySlots<'a>>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 163u8, 37u8, 72u8, 142u8, 172u8, 117u8, 146u8, 111u8, 10u8, 100u8, - 92u8, 223u8, 253u8, 250u8, 19u8, 187u8, 227u8, 222u8, 91u8, 73u8, - 156u8, 158u8, 63u8, 183u8, 69u8, 16u8, 225u8, 58u8, 85u8, 89u8, 15u8, - 15u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Number of assigned temporary slots."] - pub async fn temporary_slot_count( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> - { - if self - .client - .metadata() - .storage_hash::()? - == [ - 19u8, 243u8, 53u8, 131u8, 195u8, 143u8, 31u8, 224u8, 182u8, 69u8, - 209u8, 123u8, 82u8, 155u8, 96u8, 242u8, 109u8, 6u8, 27u8, 193u8, 251u8, - 45u8, 204u8, 10u8, 43u8, 185u8, 152u8, 181u8, 35u8, 183u8, 235u8, - 204u8, - ] - { - let entry = TemporarySlotCount; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Number of active temporary slots in current slot lease period."] - pub async fn active_temporary_slot_count( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> - { - if self - .client - .metadata() - .storage_hash::()? - == [ - 72u8, 42u8, 13u8, 42u8, 195u8, 143u8, 174u8, 137u8, 110u8, 144u8, - 190u8, 117u8, 102u8, 91u8, 66u8, 131u8, 69u8, 139u8, 156u8, 149u8, - 99u8, 177u8, 118u8, 72u8, 168u8, 191u8, 198u8, 135u8, 72u8, 192u8, - 130u8, 139u8, - ] - { - let entry = ActiveTemporarySlotCount; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - pub mod constants { - use super::runtime_types; - pub struct ConstantsApi<'a, T: ::subxt::Config> { - client: &'a ::subxt::Client, - } - impl<'a, T: ::subxt::Config> ConstantsApi<'a, T> { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { client } - } - #[doc = " The number of lease periods a permanent parachain slot lasts."] - pub fn permanent_slot_lease_period_length( - &self, - ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> - { - if self - .client - .metadata() - .constant_hash("AssignedSlots", "PermanentSlotLeasePeriodLength")? - == [ - 197u8, 245u8, 45u8, 120u8, 116u8, 188u8, 189u8, 76u8, 192u8, 116u8, - 209u8, 236u8, 222u8, 167u8, 208u8, 214u8, 153u8, 142u8, 201u8, 25u8, - 34u8, 104u8, 166u8, 229u8, 62u8, 169u8, 76u8, 118u8, 72u8, 170u8, - 202u8, 37u8, - ] - { - let pallet = self.client.metadata().pallet("AssignedSlots")?; - let constant = pallet.constant("PermanentSlotLeasePeriodLength")?; - let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; - Ok(value) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The number of lease periods a temporary parachain slot lasts."] - pub fn temporary_slot_lease_period_length( - &self, - ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> - { - if self - .client - .metadata() - .constant_hash("AssignedSlots", "TemporarySlotLeasePeriodLength")? - == [ - 160u8, 81u8, 143u8, 128u8, 192u8, 146u8, 202u8, 116u8, 139u8, 129u8, - 88u8, 164u8, 184u8, 60u8, 5u8, 56u8, 73u8, 212u8, 151u8, 207u8, 103u8, - 234u8, 152u8, 57u8, 230u8, 97u8, 135u8, 234u8, 34u8, 207u8, 116u8, - 164u8, - ] - { - let pallet = self.client.metadata().pallet("AssignedSlots")?; - let constant = pallet.constant("TemporarySlotLeasePeriodLength")?; - let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; - Ok(value) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The max number of permanent slots that can be assigned."] - pub fn max_permanent_slots( - &self, - ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> - { - if self - .client - .metadata() - .constant_hash("AssignedSlots", "MaxPermanentSlots")? - == [ - 75u8, 219u8, 223u8, 108u8, 146u8, 170u8, 51u8, 167u8, 148u8, 224u8, - 43u8, 171u8, 119u8, 109u8, 29u8, 18u8, 235u8, 142u8, 46u8, 172u8, 33u8, - 164u8, 74u8, 200u8, 206u8, 184u8, 170u8, 212u8, 233u8, 202u8, 191u8, - 47u8, - ] - { - let pallet = self.client.metadata().pallet("AssignedSlots")?; - let constant = pallet.constant("MaxPermanentSlots")?; - let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; - Ok(value) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The max number of temporary slots that can be assigned."] - pub fn max_temporary_slots( - &self, - ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> - { - if self - .client - .metadata() - .constant_hash("AssignedSlots", "MaxTemporarySlots")? - == [ - 165u8, 58u8, 243u8, 192u8, 228u8, 193u8, 249u8, 135u8, 28u8, 120u8, - 142u8, 150u8, 118u8, 250u8, 26u8, 107u8, 167u8, 219u8, 137u8, 231u8, - 8u8, 189u8, 114u8, 249u8, 86u8, 90u8, 224u8, 234u8, 229u8, 19u8, 65u8, - 211u8, - ] - { - let pallet = self.client.metadata().pallet("AssignedSlots")?; - let constant = pallet.constant("MaxTemporarySlots")?; - let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; - Ok(value) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The max number of temporary slots to be scheduled per lease periods."] - pub fn max_temporary_slot_per_lease_period( - &self, - ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> - { - if self - .client - .metadata() - .constant_hash("AssignedSlots", "MaxTemporarySlotPerLeasePeriod")? - == [ - 134u8, 16u8, 150u8, 86u8, 147u8, 116u8, 41u8, 63u8, 214u8, 209u8, 81u8, - 194u8, 90u8, 90u8, 12u8, 174u8, 120u8, 81u8, 50u8, 131u8, 35u8, 180u8, - 81u8, 105u8, 237u8, 186u8, 234u8, 114u8, 88u8, 106u8, 64u8, 254u8, - ] - { - let pallet = self.client.metadata().pallet("AssignedSlots")?; - let constant = pallet.constant("MaxTemporarySlotPerLeasePeriod")?; - let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; - Ok(value) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - } - pub mod sudo { - use super::root_mod; - use super::runtime_types; - pub mod calls { - use super::root_mod; - use super::runtime_types; - type DispatchError = runtime_types::sp_runtime::DispatchError; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct Sudo { - pub call: ::std::boxed::Box, - } - impl ::subxt::Call for Sudo { - const PALLET: &'static str = "Sudo"; - const FUNCTION: &'static str = "sudo"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct SudoUncheckedWeight { - pub call: ::std::boxed::Box, - pub weight: ::core::primitive::u64, - } - impl ::subxt::Call for SudoUncheckedWeight { - const PALLET: &'static str = "Sudo"; - const FUNCTION: &'static str = "sudo_unchecked_weight"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct SetKey { - pub new: - ::subxt::sp_runtime::MultiAddress<::subxt::sp_core::crypto::AccountId32, ()>, - } - impl ::subxt::Call for SetKey { - const PALLET: &'static str = "Sudo"; - const FUNCTION: &'static str = "set_key"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct SudoAs { - pub who: - ::subxt::sp_runtime::MultiAddress<::subxt::sp_core::crypto::AccountId32, ()>, - pub call: ::std::boxed::Box, - } - impl ::subxt::Call for SudoAs { - const PALLET: &'static str = "Sudo"; - const FUNCTION: &'static str = "sudo_as"; - } - pub struct TransactionApi<'a, T: ::subxt::Config, X> { - client: &'a ::subxt::Client, - marker: ::core::marker::PhantomData, - } - impl<'a, T, X> TransactionApi<'a, T, X> - where - T: ::subxt::Config, - X: ::subxt::extrinsic::ExtrinsicParams, - { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { - client, - marker: ::core::marker::PhantomData, - } - } - #[doc = "Authenticates the sudo key and dispatches a function call with `Root` origin."] - #[doc = ""] - #[doc = "The dispatch origin for this call must be _Signed_."] - #[doc = ""] - #[doc = "# "] - #[doc = "- O(1)."] - #[doc = "- Limited storage reads."] - #[doc = "- One DB write (event)."] - #[doc = "- Weight of derivative `call` execution + 10,000."] - #[doc = "# "] - pub fn sudo( - &self, - call: runtime_types::rococo_runtime::Call, - ) -> Result< - ::subxt::SubmittableExtrinsic<'a, T, X, Sudo, DispatchError, root_mod::Event>, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 144u8, 36u8, 5u8, 240u8, 96u8, 128u8, 55u8, 40u8, 57u8, 205u8, 184u8, - 6u8, 181u8, 43u8, 165u8, 1u8, 148u8, 140u8, 80u8, 207u8, 210u8, 224u8, - 232u8, 252u8, 193u8, 43u8, 36u8, 203u8, 108u8, 150u8, 184u8, 199u8, - ] - { - let call = Sudo { - call: ::std::boxed::Box::new(call), - }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Authenticates the sudo key and dispatches a function call with `Root` origin."] - #[doc = "This function does not check the weight of the call, and instead allows the"] - #[doc = "Sudo user to specify the weight of the call."] - #[doc = ""] - #[doc = "The dispatch origin for this call must be _Signed_."] - #[doc = ""] - #[doc = "# "] - #[doc = "- O(1)."] - #[doc = "- The weight of this call is defined by the caller."] - #[doc = "# "] - pub fn sudo_unchecked_weight( - &self, - call: runtime_types::rococo_runtime::Call, - weight: ::core::primitive::u64, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SudoUncheckedWeight, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 55u8, 233u8, 183u8, 246u8, 146u8, 48u8, 56u8, 202u8, 43u8, 90u8, 189u8, - 171u8, 47u8, 55u8, 7u8, 163u8, 136u8, 155u8, 23u8, 78u8, 58u8, 24u8, - 189u8, 2u8, 204u8, 15u8, 115u8, 222u8, 76u8, 94u8, 244u8, 43u8, - ] - { - let call = SudoUncheckedWeight { - call: ::std::boxed::Box::new(call), - weight, - }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Authenticates the current sudo key and sets the given AccountId (`new`) as the new sudo"] - #[doc = "key."] - #[doc = ""] - #[doc = "The dispatch origin for this call must be _Signed_."] - #[doc = ""] - #[doc = "# "] - #[doc = "- O(1)."] - #[doc = "- Limited storage reads."] - #[doc = "- One DB change."] - #[doc = "# "] - pub fn set_key( - &self, - new: ::subxt::sp_runtime::MultiAddress< - ::subxt::sp_core::crypto::AccountId32, - (), - >, - ) -> Result< - ::subxt::SubmittableExtrinsic<'a, T, X, SetKey, DispatchError, root_mod::Event>, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 77u8, 253u8, 211u8, 157u8, 74u8, 92u8, 1u8, 102u8, 178u8, 103u8, 126u8, - 56u8, 156u8, 105u8, 45u8, 44u8, 64u8, 154u8, 163u8, 102u8, 93u8, 93u8, - 212u8, 5u8, 148u8, 184u8, 22u8, 135u8, 110u8, 102u8, 44u8, 172u8, - ] - { - let call = SetKey { new }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Authenticates the sudo key and dispatches a function call with `Signed` origin from"] - #[doc = "a given account."] - #[doc = ""] - #[doc = "The dispatch origin for this call must be _Signed_."] - #[doc = ""] - #[doc = "# "] - #[doc = "- O(1)."] - #[doc = "- Limited storage reads."] - #[doc = "- One DB write (event)."] - #[doc = "- Weight of derivative `call` execution + 10,000."] - #[doc = "# "] - pub fn sudo_as( - &self, - who: ::subxt::sp_runtime::MultiAddress< - ::subxt::sp_core::crypto::AccountId32, - (), - >, - call: runtime_types::rococo_runtime::Call, - ) -> Result< - ::subxt::SubmittableExtrinsic<'a, T, X, SudoAs, DispatchError, root_mod::Event>, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 119u8, 211u8, 130u8, 52u8, 139u8, 44u8, 67u8, 44u8, 219u8, 122u8, - 101u8, 134u8, 13u8, 199u8, 98u8, 27u8, 145u8, 98u8, 213u8, 5u8, 225u8, - 94u8, 177u8, 56u8, 117u8, 242u8, 206u8, 119u8, 76u8, 207u8, 210u8, - 19u8, - ] - { - let call = SudoAs { - who, - call: ::std::boxed::Box::new(call), - }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - pub type Event = runtime_types::pallet_sudo::pallet::Event; - pub mod events { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "A sudo just took place. \\[result\\]"] - pub struct Sudid { - pub sudo_result: - ::core::result::Result<(), runtime_types::sp_runtime::DispatchError>, - } - impl ::subxt::Event for Sudid { - const PALLET: &'static str = "Sudo"; - const EVENT: &'static str = "Sudid"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "The \\[sudoer\\] just switched identity; the old key is supplied if one existed."] - pub struct KeyChanged { - pub old_sudoer: ::core::option::Option<::subxt::sp_core::crypto::AccountId32>, - } - impl ::subxt::Event for KeyChanged { - const PALLET: &'static str = "Sudo"; - const EVENT: &'static str = "KeyChanged"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "A sudo just took place. \\[result\\]"] - pub struct SudoAsDone { - pub sudo_result: - ::core::result::Result<(), runtime_types::sp_runtime::DispatchError>, - } - impl ::subxt::Event for SudoAsDone { - const PALLET: &'static str = "Sudo"; - const EVENT: &'static str = "SudoAsDone"; - } - } - pub mod storage { - use super::runtime_types; - pub struct Key; - impl ::subxt::StorageEntry for Key { - const PALLET: &'static str = "Sudo"; - const STORAGE: &'static str = "Key"; - type Value = ::subxt::sp_core::crypto::AccountId32; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct StorageApi<'a, T: ::subxt::Config> { - client: &'a ::subxt::Client, - } - impl<'a, T: ::subxt::Config> StorageApi<'a, T> { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { client } - } - #[doc = " The `AccountId` of the sudo key."] - pub async fn key( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option<::subxt::sp_core::crypto::AccountId32>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 222u8, 90u8, 158u8, 233u8, 184u8, 23u8, 141u8, 135u8, 81u8, 187u8, - 47u8, 100u8, 30u8, 81u8, 239u8, 197u8, 249u8, 253u8, 73u8, 207u8, - 161u8, 141u8, 174u8, 59u8, 74u8, 181u8, 10u8, 90u8, 22u8, 109u8, 62u8, - 27u8, - ] - { - let entry = Key; - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - } - pub mod mmr { - use super::root_mod; - use super::runtime_types; - pub mod storage { - use super::runtime_types; - pub struct RootHash; - impl ::subxt::StorageEntry for RootHash { - const PALLET: &'static str = "Mmr"; - const STORAGE: &'static str = "RootHash"; - type Value = ::subxt::sp_core::H256; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct NumberOfLeaves; - impl ::subxt::StorageEntry for NumberOfLeaves { - const PALLET: &'static str = "Mmr"; - const STORAGE: &'static str = "NumberOfLeaves"; - type Value = ::core::primitive::u64; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct Nodes<'a>(pub &'a ::core::primitive::u64); - impl ::subxt::StorageEntry for Nodes<'_> { - const PALLET: &'static str = "Mmr"; - const STORAGE: &'static str = "Nodes"; - type Value = ::std::vec::Vec<::core::primitive::u8>; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Identity, - )]) - } - } - pub struct Peaks<'a>(pub &'a ::core::primitive::u64); - impl ::subxt::StorageEntry for Peaks<'_> { - const PALLET: &'static str = "Mmr"; - const STORAGE: &'static str = "Peaks"; - type Value = ::subxt::sp_core::H256; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Identity, - )]) - } - } - pub struct StorageApi<'a, T: ::subxt::Config> { - client: &'a ::subxt::Client, - } - impl<'a, T: ::subxt::Config> StorageApi<'a, T> { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { client } - } - #[doc = " Latest MMR Root hash."] - pub async fn root_hash( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result<::subxt::sp_core::H256, ::subxt::BasicError> - { - if self.client.metadata().storage_hash::()? - == [ - 235u8, 26u8, 148u8, 114u8, 90u8, 241u8, 74u8, 26u8, 120u8, 199u8, - 205u8, 157u8, 22u8, 104u8, 182u8, 167u8, 93u8, 254u8, 95u8, 143u8, - 67u8, 0u8, 183u8, 46u8, 118u8, 61u8, 55u8, 31u8, 76u8, 232u8, 23u8, - 153u8, - ] - { - let entry = RootHash; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Current size of the MMR (number of leaves)."] - pub async fn number_of_leaves( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result<::core::primitive::u64, ::subxt::BasicError> - { - if self.client.metadata().storage_hash::()? - == [ - 138u8, 124u8, 23u8, 186u8, 255u8, 231u8, 187u8, 122u8, 213u8, 160u8, - 29u8, 24u8, 88u8, 98u8, 171u8, 36u8, 195u8, 216u8, 27u8, 190u8, 192u8, - 152u8, 8u8, 13u8, 210u8, 232u8, 45u8, 184u8, 240u8, 255u8, 156u8, - 204u8, - ] - { - let entry = NumberOfLeaves; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " All known nodes & leaves in the MMR, just until offchain db is fork aware"] - pub async fn nodes( - &self, - _0: &::core::primitive::u64, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option<::std::vec::Vec<::core::primitive::u8>>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 113u8, 225u8, 71u8, 185u8, 124u8, 250u8, 5u8, 111u8, 46u8, 137u8, 40u8, - 37u8, 190u8, 232u8, 247u8, 194u8, 199u8, 28u8, 48u8, 224u8, 131u8, 6u8, - 213u8, 79u8, 238u8, 33u8, 199u8, 124u8, 238u8, 237u8, 247u8, 226u8, - ] - { - let entry = Nodes(_0); - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " All known nodes & leaves in the MMR, just until offchain db is fork aware"] - pub async fn nodes_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result<::subxt::KeyIter<'a, T, Nodes<'a>>, ::subxt::BasicError> - { - if self.client.metadata().storage_hash::()? - == [ - 113u8, 225u8, 71u8, 185u8, 124u8, 250u8, 5u8, 111u8, 46u8, 137u8, 40u8, - 37u8, 190u8, 232u8, 247u8, 194u8, 199u8, 28u8, 48u8, 224u8, 131u8, 6u8, - 213u8, 79u8, 238u8, 33u8, 199u8, 124u8, 238u8, 237u8, 247u8, 226u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Hashes of the nodes in the MMR."] - #[doc = ""] - #[doc = " Note this collection only contains MMR peaks, the inner nodes (and leaves)"] - #[doc = " are pruned and only stored in the Offchain DB."] - pub async fn peaks( - &self, - _0: &::core::primitive::u64, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option<::subxt::sp_core::H256>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 12u8, 106u8, 223u8, 202u8, 71u8, 155u8, 228u8, 220u8, 197u8, 143u8, - 133u8, 165u8, 62u8, 172u8, 183u8, 222u8, 58u8, 225u8, 24u8, 57u8, - 144u8, 206u8, 66u8, 204u8, 16u8, 31u8, 139u8, 19u8, 175u8, 235u8, - 204u8, 135u8, - ] - { - let entry = Peaks(_0); - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Hashes of the nodes in the MMR."] - #[doc = ""] - #[doc = " Note this collection only contains MMR peaks, the inner nodes (and leaves)"] - #[doc = " are pruned and only stored in the Offchain DB."] - pub async fn peaks_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result<::subxt::KeyIter<'a, T, Peaks<'a>>, ::subxt::BasicError> - { - if self.client.metadata().storage_hash::()? - == [ - 12u8, 106u8, 223u8, 202u8, 71u8, 155u8, 228u8, 220u8, 197u8, 143u8, - 133u8, 165u8, 62u8, 172u8, 183u8, 222u8, 58u8, 225u8, 24u8, 57u8, - 144u8, 206u8, 66u8, 204u8, 16u8, 31u8, 139u8, 19u8, 175u8, 235u8, - 204u8, 135u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - } - pub mod beefy { - use super::root_mod; - use super::runtime_types; - pub mod calls { - use super::root_mod; - use super::runtime_types; - type DispatchError = runtime_types::sp_runtime::DispatchError; - pub struct TransactionApi<'a, T: ::subxt::Config, X> { - client: &'a ::subxt::Client, - marker: ::core::marker::PhantomData, - } - impl<'a, T, X> TransactionApi<'a, T, X> - where - T: ::subxt::Config, - X: ::subxt::extrinsic::ExtrinsicParams, - { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { - client, - marker: ::core::marker::PhantomData, - } - } - } - } - pub mod storage { - use super::runtime_types; - pub struct Authorities; - impl ::subxt::StorageEntry for Authorities { - const PALLET: &'static str = "Beefy"; - const STORAGE: &'static str = "Authorities"; - type Value = ::std::vec::Vec; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct ValidatorSetId; - impl ::subxt::StorageEntry for ValidatorSetId { - const PALLET: &'static str = "Beefy"; - const STORAGE: &'static str = "ValidatorSetId"; - type Value = ::core::primitive::u64; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct NextAuthorities; - impl ::subxt::StorageEntry for NextAuthorities { - const PALLET: &'static str = "Beefy"; - const STORAGE: &'static str = "NextAuthorities"; - type Value = ::std::vec::Vec; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct StorageApi<'a, T: ::subxt::Config> { - client: &'a ::subxt::Client, - } - impl<'a, T: ::subxt::Config> StorageApi<'a, T> { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { client } - } - #[doc = " The current authorities set"] - pub async fn authorities( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::std::vec::Vec, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 140u8, 118u8, 62u8, 97u8, 16u8, 215u8, 8u8, 84u8, 193u8, 102u8, 249u8, - 181u8, 228u8, 155u8, 194u8, 255u8, 209u8, 200u8, 186u8, 7u8, 246u8, - 149u8, 147u8, 224u8, 171u8, 218u8, 168u8, 130u8, 186u8, 119u8, 72u8, - 194u8, - ] - { - let entry = Authorities; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The current validator set id"] - pub async fn validator_set_id( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result<::core::primitive::u64, ::subxt::BasicError> - { - if self.client.metadata().storage_hash::()? - == [ - 132u8, 47u8, 139u8, 239u8, 214u8, 179u8, 24u8, 63u8, 55u8, 154u8, - 248u8, 206u8, 73u8, 7u8, 52u8, 135u8, 54u8, 111u8, 250u8, 106u8, 71u8, - 78u8, 44u8, 44u8, 235u8, 177u8, 36u8, 112u8, 17u8, 122u8, 252u8, 80u8, - ] - { - let entry = ValidatorSetId; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Authorities set scheduled to be used with the next session"] - pub async fn next_authorities( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::std::vec::Vec, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 120u8, 191u8, 31u8, 28u8, 34u8, 82u8, 116u8, 34u8, 81u8, 176u8, 225u8, - 117u8, 7u8, 58u8, 241u8, 174u8, 246u8, 230u8, 210u8, 6u8, 22u8, 191u8, - 150u8, 77u8, 102u8, 54u8, 25u8, 216u8, 64u8, 100u8, 247u8, 107u8, - ] - { - let entry = NextAuthorities; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - } - pub mod mmr_leaf { - use super::root_mod; - use super::runtime_types; - pub mod storage { - use super::runtime_types; - pub struct BeefyNextAuthorities; - impl ::subxt::StorageEntry for BeefyNextAuthorities { - const PALLET: &'static str = "MmrLeaf"; - const STORAGE: &'static str = "BeefyNextAuthorities"; - type Value = runtime_types::beefy_primitives::mmr::BeefyNextAuthoritySet< - ::subxt::sp_core::H256, - >; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct StorageApi<'a, T: ::subxt::Config> { - client: &'a ::subxt::Client, - } - impl<'a, T: ::subxt::Config> StorageApi<'a, T> { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { client } - } - #[doc = " Details of next BEEFY authority set."] - #[doc = ""] - #[doc = " This storage entry is used as cache for calls to `update_beefy_next_authority_set`."] - pub async fn beefy_next_authorities( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - runtime_types::beefy_primitives::mmr::BeefyNextAuthoritySet< - ::subxt::sp_core::H256, - >, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .storage_hash::()? - == [ - 219u8, 54u8, 44u8, 30u8, 213u8, 71u8, 67u8, 245u8, 172u8, 191u8, 183u8, - 146u8, 165u8, 202u8, 44u8, 121u8, 201u8, 252u8, 68u8, 93u8, 43u8, - 127u8, 189u8, 113u8, 196u8, 125u8, 184u8, 170u8, 9u8, 54u8, 69u8, - 104u8, - ] - { - let entry = BeefyNextAuthorities; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - } - pub mod validator_manager { - use super::root_mod; - use super::runtime_types; - pub mod calls { - use super::root_mod; - use super::runtime_types; - type DispatchError = runtime_types::sp_runtime::DispatchError; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct RegisterValidators { - pub validators: ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>, - } - impl ::subxt::Call for RegisterValidators { - const PALLET: &'static str = "ValidatorManager"; - const FUNCTION: &'static str = "register_validators"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct DeregisterValidators { - pub validators: ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>, - } - impl ::subxt::Call for DeregisterValidators { - const PALLET: &'static str = "ValidatorManager"; - const FUNCTION: &'static str = "deregister_validators"; - } - pub struct TransactionApi<'a, T: ::subxt::Config, X> { - client: &'a ::subxt::Client, - marker: ::core::marker::PhantomData, - } - impl<'a, T, X> TransactionApi<'a, T, X> - where - T: ::subxt::Config, - X: ::subxt::extrinsic::ExtrinsicParams, - { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { - client, - marker: ::core::marker::PhantomData, - } - } - #[doc = "Add new validators to the set."] - #[doc = ""] - #[doc = "The new validators will be active from current session + 2."] - pub fn register_validators( - &self, - validators: ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - RegisterValidators, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 211u8, 183u8, 121u8, 233u8, 106u8, 25u8, 23u8, 189u8, 167u8, 88u8, - 21u8, 191u8, 153u8, 233u8, 186u8, 3u8, 237u8, 24u8, 145u8, 35u8, 85u8, - 217u8, 142u8, 173u8, 62u8, 123u8, 67u8, 246u8, 252u8, 38u8, 101u8, - 22u8, - ] - { - let call = RegisterValidators { validators }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Remove validators from the set."] - #[doc = ""] - #[doc = "The removed validators will be deactivated from current session + 2."] - pub fn deregister_validators( - &self, - validators: ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - DeregisterValidators, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 150u8, 129u8, 133u8, 192u8, 213u8, 92u8, 94u8, 234u8, 253u8, 173u8, - 208u8, 236u8, 109u8, 105u8, 193u8, 122u8, 88u8, 234u8, 39u8, 152u8, - 245u8, 127u8, 195u8, 101u8, 189u8, 25u8, 24u8, 4u8, 179u8, 149u8, 73u8, - 216u8, - ] - { - let call = DeregisterValidators { validators }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - pub type Event = runtime_types::rococo_runtime::validator_manager::pallet::Event; - pub mod events { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "New validators were added to the set."] - pub struct ValidatorsRegistered( - pub ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>, - ); - impl ::subxt::Event for ValidatorsRegistered { - const PALLET: &'static str = "ValidatorManager"; - const EVENT: &'static str = "ValidatorsRegistered"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "Validators were removed from the set."] - pub struct ValidatorsDeregistered( - pub ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>, - ); - impl ::subxt::Event for ValidatorsDeregistered { - const PALLET: &'static str = "ValidatorManager"; - const EVENT: &'static str = "ValidatorsDeregistered"; - } - } - pub mod storage { - use super::runtime_types; - pub struct ValidatorsToRetire; - impl ::subxt::StorageEntry for ValidatorsToRetire { - const PALLET: &'static str = "ValidatorManager"; - const STORAGE: &'static str = "ValidatorsToRetire"; - type Value = ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct ValidatorsToAdd; - impl ::subxt::StorageEntry for ValidatorsToAdd { - const PALLET: &'static str = "ValidatorManager"; - const STORAGE: &'static str = "ValidatorsToAdd"; - type Value = ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct StorageApi<'a, T: ::subxt::Config> { - client: &'a ::subxt::Client, - } - impl<'a, T: ::subxt::Config> StorageApi<'a, T> { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { client } - } - #[doc = " Validators that should be retired, because their Parachain was deregistered."] - pub async fn validators_to_retire( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .storage_hash::()? - == [ - 132u8, 64u8, 206u8, 170u8, 240u8, 112u8, 249u8, 91u8, 54u8, 160u8, - 127u8, 52u8, 144u8, 203u8, 91u8, 42u8, 60u8, 139u8, 121u8, 51u8, 154u8, - 68u8, 5u8, 64u8, 32u8, 33u8, 235u8, 220u8, 161u8, 155u8, 105u8, 29u8, - ] - { - let entry = ValidatorsToRetire; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Validators that should be added."] - pub async fn validators_to_add( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 222u8, 254u8, 217u8, 103u8, 255u8, 143u8, 42u8, 9u8, 219u8, 218u8, 1u8, - 95u8, 225u8, 65u8, 100u8, 178u8, 255u8, 33u8, 196u8, 174u8, 29u8, 92u8, - 3u8, 66u8, 166u8, 37u8, 3u8, 156u8, 148u8, 169u8, 121u8, 208u8, - ] - { - let entry = ValidatorsToAdd; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - } - pub mod bridge_rococo_grandpa { - use super::root_mod; - use super::runtime_types; - pub mod calls { - use super::root_mod; - use super::runtime_types; - type DispatchError = runtime_types::sp_runtime::DispatchError; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct SubmitFinalityProof { - pub finality_target: ::std::boxed::Box< - runtime_types::sp_runtime::generic::header::Header< - ::core::primitive::u32, - runtime_types::sp_runtime::traits::BlakeTwo256, - >, - >, - pub justification: - runtime_types::bp_header_chain::justification::GrandpaJustification< - runtime_types::sp_runtime::generic::header::Header< - ::core::primitive::u32, - runtime_types::sp_runtime::traits::BlakeTwo256, - >, - >, - } - impl ::subxt::Call for SubmitFinalityProof { - const PALLET: &'static str = "BridgeRococoGrandpa"; - const FUNCTION: &'static str = "submit_finality_proof"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct Initialize { - pub init_data: runtime_types::bp_header_chain::InitializationData< - runtime_types::sp_runtime::generic::header::Header< - ::core::primitive::u32, - runtime_types::sp_runtime::traits::BlakeTwo256, - >, - >, - } - impl ::subxt::Call for Initialize { - const PALLET: &'static str = "BridgeRococoGrandpa"; - const FUNCTION: &'static str = "initialize"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct SetOwner { - pub new_owner: ::core::option::Option<::subxt::sp_core::crypto::AccountId32>, - } - impl ::subxt::Call for SetOwner { - const PALLET: &'static str = "BridgeRococoGrandpa"; - const FUNCTION: &'static str = "set_owner"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct SetOperational { - pub operational: ::core::primitive::bool, - } - impl ::subxt::Call for SetOperational { - const PALLET: &'static str = "BridgeRococoGrandpa"; - const FUNCTION: &'static str = "set_operational"; - } - pub struct TransactionApi<'a, T: ::subxt::Config, X> { - client: &'a ::subxt::Client, - marker: ::core::marker::PhantomData, - } - impl<'a, T, X> TransactionApi<'a, T, X> - where - T: ::subxt::Config, - X: ::subxt::extrinsic::ExtrinsicParams, - { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { - client, - marker: ::core::marker::PhantomData, - } - } - #[doc = "Verify a target header is finalized according to the given finality proof."] - #[doc = ""] - #[doc = "It will use the underlying storage pallet to fetch information about the current"] - #[doc = "authorities and best finalized header in order to verify that the header is finalized."] - #[doc = ""] - #[doc = "If successful in verification, it will write the target header to the underlying storage"] - #[doc = "pallet."] - pub fn submit_finality_proof( - &self, - finality_target: runtime_types::sp_runtime::generic::header::Header< - ::core::primitive::u32, - runtime_types::sp_runtime::traits::BlakeTwo256, - >, - justification : runtime_types :: bp_header_chain :: justification :: GrandpaJustification < runtime_types :: sp_runtime :: generic :: header :: Header < :: core :: primitive :: u32 , runtime_types :: sp_runtime :: traits :: BlakeTwo256 > >, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SubmitFinalityProof, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 212u8, 195u8, 142u8, 103u8, 130u8, 159u8, 71u8, 78u8, 81u8, 123u8, - 94u8, 3u8, 65u8, 153u8, 235u8, 38u8, 255u8, 11u8, 222u8, 100u8, 33u8, - 143u8, 234u8, 92u8, 109u8, 127u8, 255u8, 229u8, 177u8, 12u8, 172u8, - 216u8, - ] - { - let call = SubmitFinalityProof { - finality_target: ::std::boxed::Box::new(finality_target), - justification, - }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Bootstrap the bridge pallet with an initial header and authority set from which to sync."] - #[doc = ""] - #[doc = "The initial configuration provided does not need to be the genesis header of the bridged"] - #[doc = "chain, it can be any arbitrary header. You can also provide the next scheduled set"] - #[doc = "change if it is already know."] - #[doc = ""] - #[doc = "This function is only allowed to be called from a trusted origin and writes to storage"] - #[doc = "with practically no checks in terms of the validity of the data. It is important that"] - #[doc = "you ensure that valid data is being passed in."] - pub fn initialize( - &self, - init_data: runtime_types::bp_header_chain::InitializationData< - runtime_types::sp_runtime::generic::header::Header< - ::core::primitive::u32, - runtime_types::sp_runtime::traits::BlakeTwo256, - >, - >, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - Initialize, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 105u8, 67u8, 3u8, 233u8, 154u8, 49u8, 90u8, 36u8, 94u8, 157u8, 174u8, - 62u8, 171u8, 150u8, 148u8, 83u8, 191u8, 184u8, 94u8, 43u8, 25u8, 46u8, - 108u8, 139u8, 69u8, 188u8, 186u8, 190u8, 242u8, 243u8, 234u8, 61u8, - ] - { - let call = Initialize { init_data }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Change `PalletOwner`."] - #[doc = ""] - #[doc = "May only be called either by root, or by `PalletOwner`."] - pub fn set_owner( - &self, - new_owner: ::core::option::Option<::subxt::sp_core::crypto::AccountId32>, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SetOwner, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 168u8, 223u8, 175u8, 15u8, 5u8, 101u8, 85u8, 40u8, 177u8, 36u8, 145u8, - 67u8, 135u8, 179u8, 171u8, 30u8, 17u8, 130u8, 2u8, 99u8, 96u8, 141u8, - 109u8, 36u8, 54u8, 185u8, 38u8, 48u8, 191u8, 233u8, 104u8, 163u8, - ] - { - let call = SetOwner { new_owner }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Halt or resume all pallet operations."] - #[doc = ""] - #[doc = "May only be called either by root, or by `PalletOwner`."] - pub fn set_operational( - &self, - operational: ::core::primitive::bool, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SetOperational, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 0u8, 141u8, 239u8, 63u8, 232u8, 183u8, 89u8, 179u8, 33u8, 67u8, 107u8, - 73u8, 45u8, 231u8, 255u8, 182u8, 6u8, 245u8, 198u8, 20u8, 60u8, 69u8, - 110u8, 153u8, 105u8, 231u8, 38u8, 60u8, 160u8, 183u8, 33u8, 249u8, - ] - { - let call = SetOperational { operational }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - pub mod storage { - use super::runtime_types; - pub struct RequestCount; - impl ::subxt::StorageEntry for RequestCount { - const PALLET: &'static str = "BridgeRococoGrandpa"; - const STORAGE: &'static str = "RequestCount"; - type Value = ::core::primitive::u32; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct InitialHash; - impl ::subxt::StorageEntry for InitialHash { - const PALLET: &'static str = "BridgeRococoGrandpa"; - const STORAGE: &'static str = "InitialHash"; - type Value = ::subxt::sp_core::H256; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct BestFinalized; - impl ::subxt::StorageEntry for BestFinalized { - const PALLET: &'static str = "BridgeRococoGrandpa"; - const STORAGE: &'static str = "BestFinalized"; - type Value = ::subxt::sp_core::H256; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct ImportedHashes<'a>(pub &'a ::core::primitive::u32); - impl ::subxt::StorageEntry for ImportedHashes<'_> { - const PALLET: &'static str = "BridgeRococoGrandpa"; - const STORAGE: &'static str = "ImportedHashes"; - type Value = ::subxt::sp_core::H256; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Identity, - )]) - } - } - pub struct ImportedHashesPointer; - impl ::subxt::StorageEntry for ImportedHashesPointer { - const PALLET: &'static str = "BridgeRococoGrandpa"; - const STORAGE: &'static str = "ImportedHashesPointer"; - type Value = ::core::primitive::u32; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct ImportedHeaders<'a>(pub &'a ::subxt::sp_core::H256); - impl ::subxt::StorageEntry for ImportedHeaders<'_> { - const PALLET: &'static str = "BridgeRococoGrandpa"; - const STORAGE: &'static str = "ImportedHeaders"; - type Value = runtime_types::sp_runtime::generic::header::Header< - ::core::primitive::u32, - runtime_types::sp_runtime::traits::BlakeTwo256, - >; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Identity, - )]) - } - } - pub struct CurrentAuthoritySet; - impl ::subxt::StorageEntry for CurrentAuthoritySet { - const PALLET: &'static str = "BridgeRococoGrandpa"; - const STORAGE: &'static str = "CurrentAuthoritySet"; - type Value = runtime_types::bp_header_chain::AuthoritySet; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct PalletOwner; - impl ::subxt::StorageEntry for PalletOwner { - const PALLET: &'static str = "BridgeRococoGrandpa"; - const STORAGE: &'static str = "PalletOwner"; - type Value = ::subxt::sp_core::crypto::AccountId32; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct IsHalted; - impl ::subxt::StorageEntry for IsHalted { - const PALLET: &'static str = "BridgeRococoGrandpa"; - const STORAGE: &'static str = "IsHalted"; - type Value = ::core::primitive::bool; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct StorageApi<'a, T: ::subxt::Config> { - client: &'a ::subxt::Client, - } - impl<'a, T: ::subxt::Config> StorageApi<'a, T> { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { client } - } - #[doc = " The current number of requests which have written to storage."] - #[doc = ""] - #[doc = " If the `RequestCount` hits `MaxRequests`, no more calls will be allowed to the pallet until"] - #[doc = " the request capacity is increased."] - #[doc = ""] - #[doc = " The `RequestCount` is decreased by one at the beginning of every block. This is to ensure"] - #[doc = " that the pallet can always make progress."] - pub async fn request_count( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> - { - if self.client.metadata().storage_hash::()? - == [ - 100u8, 156u8, 98u8, 176u8, 229u8, 85u8, 81u8, 159u8, 120u8, 156u8, - 33u8, 179u8, 224u8, 237u8, 52u8, 198u8, 81u8, 81u8, 10u8, 180u8, 53u8, - 141u8, 96u8, 4u8, 39u8, 217u8, 58u8, 9u8, 57u8, 79u8, 47u8, 201u8, - ] - { - let entry = RequestCount; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Hash of the header used to bootstrap the pallet."] - pub async fn initial_hash( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result<::subxt::sp_core::H256, ::subxt::BasicError> - { - if self.client.metadata().storage_hash::()? - == [ - 243u8, 158u8, 214u8, 159u8, 84u8, 82u8, 193u8, 34u8, 24u8, 64u8, 21u8, - 172u8, 142u8, 116u8, 224u8, 19u8, 62u8, 232u8, 99u8, 201u8, 32u8, - 211u8, 139u8, 125u8, 41u8, 255u8, 107u8, 84u8, 165u8, 75u8, 201u8, - 142u8, - ] - { - let entry = InitialHash; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Hash of the best finalized header."] - pub async fn best_finalized( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result<::subxt::sp_core::H256, ::subxt::BasicError> - { - if self.client.metadata().storage_hash::()? - == [ - 155u8, 222u8, 92u8, 199u8, 26u8, 156u8, 146u8, 226u8, 24u8, 161u8, - 125u8, 18u8, 61u8, 237u8, 128u8, 26u8, 50u8, 55u8, 7u8, 42u8, 101u8, - 213u8, 0u8, 105u8, 219u8, 194u8, 227u8, 177u8, 147u8, 54u8, 22u8, 86u8, - ] - { - let entry = BestFinalized; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " A ring buffer of imported hashes. Ordered by the insertion time."] - pub async fn imported_hashes( - &self, - _0: &::core::primitive::u32, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option<::subxt::sp_core::H256>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 65u8, 162u8, 135u8, 91u8, 230u8, 102u8, 41u8, 123u8, 69u8, 20u8, 101u8, - 109u8, 178u8, 193u8, 239u8, 232u8, 66u8, 17u8, 222u8, 11u8, 188u8, - 53u8, 202u8, 80u8, 146u8, 234u8, 206u8, 192u8, 99u8, 4u8, 234u8, 67u8, - ] - { - let entry = ImportedHashes(_0); - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " A ring buffer of imported hashes. Ordered by the insertion time."] - pub async fn imported_hashes_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::subxt::KeyIter<'a, T, ImportedHashes<'a>>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 65u8, 162u8, 135u8, 91u8, 230u8, 102u8, 41u8, 123u8, 69u8, 20u8, 101u8, - 109u8, 178u8, 193u8, 239u8, 232u8, 66u8, 17u8, 222u8, 11u8, 188u8, - 53u8, 202u8, 80u8, 146u8, 234u8, 206u8, 192u8, 99u8, 4u8, 234u8, 67u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Current ring buffer position."] - pub async fn imported_hashes_pointer( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> - { - if self - .client - .metadata() - .storage_hash::()? - == [ - 159u8, 83u8, 35u8, 45u8, 27u8, 249u8, 155u8, 131u8, 181u8, 196u8, - 224u8, 26u8, 92u8, 132u8, 127u8, 237u8, 13u8, 142u8, 196u8, 147u8, - 221u8, 216u8, 11u8, 78u8, 190u8, 241u8, 201u8, 96u8, 74u8, 185u8, - 208u8, 42u8, - ] - { - let entry = ImportedHashesPointer; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Headers which have been imported into the pallet."] - pub async fn imported_headers( - &self, - _0: &::subxt::sp_core::H256, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option< - runtime_types::sp_runtime::generic::header::Header< - ::core::primitive::u32, - runtime_types::sp_runtime::traits::BlakeTwo256, - >, - >, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 169u8, 86u8, 51u8, 48u8, 82u8, 28u8, 180u8, 142u8, 152u8, 63u8, 234u8, - 84u8, 9u8, 136u8, 220u8, 18u8, 69u8, 4u8, 76u8, 54u8, 72u8, 139u8, - 234u8, 101u8, 238u8, 205u8, 95u8, 118u8, 216u8, 249u8, 147u8, 200u8, - ] - { - let entry = ImportedHeaders(_0); - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Headers which have been imported into the pallet."] - pub async fn imported_headers_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::subxt::KeyIter<'a, T, ImportedHeaders<'a>>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 169u8, 86u8, 51u8, 48u8, 82u8, 28u8, 180u8, 142u8, 152u8, 63u8, 234u8, - 84u8, 9u8, 136u8, 220u8, 18u8, 69u8, 4u8, 76u8, 54u8, 72u8, 139u8, - 234u8, 101u8, 238u8, 205u8, 95u8, 118u8, 216u8, 249u8, 147u8, 200u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The current GRANDPA Authority set."] - pub async fn current_authority_set( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - runtime_types::bp_header_chain::AuthoritySet, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .storage_hash::()? - == [ - 228u8, 156u8, 183u8, 185u8, 99u8, 81u8, 121u8, 110u8, 92u8, 105u8, - 111u8, 248u8, 123u8, 1u8, 94u8, 144u8, 248u8, 215u8, 100u8, 91u8, - 181u8, 1u8, 12u8, 203u8, 66u8, 9u8, 183u8, 173u8, 57u8, 239u8, 143u8, - 158u8, - ] - { - let entry = CurrentAuthoritySet; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Optional pallet owner."] - #[doc = ""] - #[doc = " Pallet owner has a right to halt all pallet operations and then resume it. If it is"] - #[doc = " `None`, then there are no direct ways to halt/resume pallet operations, but other"] - #[doc = " runtime methods may still be used to do that (i.e. democracy::referendum to update halt"] - #[doc = " flag directly or call the `halt_operations`)."] - pub async fn pallet_owner( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option<::subxt::sp_core::crypto::AccountId32>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 115u8, 57u8, 104u8, 22u8, 119u8, 16u8, 215u8, 71u8, 228u8, 104u8, - 111u8, 24u8, 53u8, 155u8, 26u8, 121u8, 143u8, 126u8, 72u8, 148u8, - 105u8, 132u8, 190u8, 40u8, 233u8, 219u8, 19u8, 143u8, 255u8, 20u8, - 220u8, 124u8, - ] - { - let entry = PalletOwner; - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " If true, all pallet transactions are failed immediately."] - pub async fn is_halted( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result<::core::primitive::bool, ::subxt::BasicError> - { - if self.client.metadata().storage_hash::()? - == [ - 107u8, 205u8, 253u8, 250u8, 98u8, 222u8, 141u8, 130u8, 74u8, 138u8, - 151u8, 77u8, 37u8, 226u8, 115u8, 116u8, 137u8, 247u8, 159u8, 72u8, - 230u8, 11u8, 85u8, 102u8, 122u8, 203u8, 235u8, 219u8, 54u8, 172u8, - 74u8, 22u8, - ] - { - let entry = IsHalted; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - pub mod constants { - use super::runtime_types; - pub struct ConstantsApi<'a, T: ::subxt::Config> { - client: &'a ::subxt::Client, - } - impl<'a, T: ::subxt::Config> ConstantsApi<'a, T> { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { client } - } - #[doc = " The upper bound on the number of requests allowed by the pallet."] - #[doc = ""] - #[doc = " A request refers to an action which writes a header to storage."] - #[doc = ""] - #[doc = " Once this bound is reached the pallet will not allow any dispatchables to be called"] - #[doc = " until the request count has decreased."] - pub fn max_requests( - &self, - ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> - { - if self - .client - .metadata() - .constant_hash("BridgeRococoGrandpa", "MaxRequests")? - == [ - 214u8, 232u8, 188u8, 57u8, 231u8, 189u8, 134u8, 244u8, 85u8, 191u8, - 134u8, 74u8, 207u8, 115u8, 21u8, 124u8, 19u8, 227u8, 59u8, 8u8, 252u8, - 8u8, 0u8, 252u8, 40u8, 49u8, 74u8, 145u8, 172u8, 109u8, 136u8, 63u8, - ] - { - let pallet = self.client.metadata().pallet("BridgeRococoGrandpa")?; - let constant = pallet.constant("MaxRequests")?; - let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; - Ok(value) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Maximal number of finalized headers to keep in the storage."] - #[doc = ""] - #[doc = " The setting is there to prevent growing the on-chain state indefinitely. Note"] - #[doc = " the setting does not relate to block numbers - we will simply keep as much items"] - #[doc = " in the storage, so it doesn't guarantee any fixed timeframe for finality headers."] - pub fn headers_to_keep( - &self, - ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> - { - if self - .client - .metadata() - .constant_hash("BridgeRococoGrandpa", "HeadersToKeep")? - == [ - 60u8, 85u8, 123u8, 208u8, 97u8, 205u8, 153u8, 170u8, 74u8, 94u8, 206u8, - 148u8, 171u8, 182u8, 210u8, 175u8, 1u8, 44u8, 152u8, 246u8, 144u8, - 232u8, 127u8, 202u8, 253u8, 214u8, 47u8, 246u8, 63u8, 86u8, 184u8, - 94u8, - ] - { - let pallet = self.client.metadata().pallet("BridgeRococoGrandpa")?; - let constant = pallet.constant("HeadersToKeep")?; - let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; - Ok(value) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - } - pub mod bridge_wococo_grandpa { - use super::root_mod; - use super::runtime_types; - pub mod calls { - use super::root_mod; - use super::runtime_types; - type DispatchError = runtime_types::sp_runtime::DispatchError; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct SubmitFinalityProof { - pub finality_target: ::std::boxed::Box< - runtime_types::sp_runtime::generic::header::Header< - ::core::primitive::u32, - runtime_types::sp_runtime::traits::BlakeTwo256, - >, - >, - pub justification: - runtime_types::bp_header_chain::justification::GrandpaJustification< - runtime_types::sp_runtime::generic::header::Header< - ::core::primitive::u32, - runtime_types::sp_runtime::traits::BlakeTwo256, - >, - >, - } - impl ::subxt::Call for SubmitFinalityProof { - const PALLET: &'static str = "BridgeWococoGrandpa"; - const FUNCTION: &'static str = "submit_finality_proof"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct Initialize { - pub init_data: runtime_types::bp_header_chain::InitializationData< - runtime_types::sp_runtime::generic::header::Header< - ::core::primitive::u32, - runtime_types::sp_runtime::traits::BlakeTwo256, - >, - >, - } - impl ::subxt::Call for Initialize { - const PALLET: &'static str = "BridgeWococoGrandpa"; - const FUNCTION: &'static str = "initialize"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct SetOwner { - pub new_owner: ::core::option::Option<::subxt::sp_core::crypto::AccountId32>, - } - impl ::subxt::Call for SetOwner { - const PALLET: &'static str = "BridgeWococoGrandpa"; - const FUNCTION: &'static str = "set_owner"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct SetOperational { - pub operational: ::core::primitive::bool, - } - impl ::subxt::Call for SetOperational { - const PALLET: &'static str = "BridgeWococoGrandpa"; - const FUNCTION: &'static str = "set_operational"; - } - pub struct TransactionApi<'a, T: ::subxt::Config, X> { - client: &'a ::subxt::Client, - marker: ::core::marker::PhantomData, - } - impl<'a, T, X> TransactionApi<'a, T, X> - where - T: ::subxt::Config, - X: ::subxt::extrinsic::ExtrinsicParams, - { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { - client, - marker: ::core::marker::PhantomData, - } - } - #[doc = "Verify a target header is finalized according to the given finality proof."] - #[doc = ""] - #[doc = "It will use the underlying storage pallet to fetch information about the current"] - #[doc = "authorities and best finalized header in order to verify that the header is finalized."] - #[doc = ""] - #[doc = "If successful in verification, it will write the target header to the underlying storage"] - #[doc = "pallet."] - pub fn submit_finality_proof( - &self, - finality_target: runtime_types::sp_runtime::generic::header::Header< - ::core::primitive::u32, - runtime_types::sp_runtime::traits::BlakeTwo256, - >, - justification : runtime_types :: bp_header_chain :: justification :: GrandpaJustification < runtime_types :: sp_runtime :: generic :: header :: Header < :: core :: primitive :: u32 , runtime_types :: sp_runtime :: traits :: BlakeTwo256 > >, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SubmitFinalityProof, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 212u8, 195u8, 142u8, 103u8, 130u8, 159u8, 71u8, 78u8, 81u8, 123u8, - 94u8, 3u8, 65u8, 153u8, 235u8, 38u8, 255u8, 11u8, 222u8, 100u8, 33u8, - 143u8, 234u8, 92u8, 109u8, 127u8, 255u8, 229u8, 177u8, 12u8, 172u8, - 216u8, - ] - { - let call = SubmitFinalityProof { - finality_target: ::std::boxed::Box::new(finality_target), - justification, - }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Bootstrap the bridge pallet with an initial header and authority set from which to sync."] - #[doc = ""] - #[doc = "The initial configuration provided does not need to be the genesis header of the bridged"] - #[doc = "chain, it can be any arbitrary header. You can also provide the next scheduled set"] - #[doc = "change if it is already know."] - #[doc = ""] - #[doc = "This function is only allowed to be called from a trusted origin and writes to storage"] - #[doc = "with practically no checks in terms of the validity of the data. It is important that"] - #[doc = "you ensure that valid data is being passed in."] - pub fn initialize( - &self, - init_data: runtime_types::bp_header_chain::InitializationData< - runtime_types::sp_runtime::generic::header::Header< - ::core::primitive::u32, - runtime_types::sp_runtime::traits::BlakeTwo256, - >, - >, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - Initialize, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 105u8, 67u8, 3u8, 233u8, 154u8, 49u8, 90u8, 36u8, 94u8, 157u8, 174u8, - 62u8, 171u8, 150u8, 148u8, 83u8, 191u8, 184u8, 94u8, 43u8, 25u8, 46u8, - 108u8, 139u8, 69u8, 188u8, 186u8, 190u8, 242u8, 243u8, 234u8, 61u8, - ] - { - let call = Initialize { init_data }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Change `PalletOwner`."] - #[doc = ""] - #[doc = "May only be called either by root, or by `PalletOwner`."] - pub fn set_owner( - &self, - new_owner: ::core::option::Option<::subxt::sp_core::crypto::AccountId32>, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SetOwner, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 168u8, 223u8, 175u8, 15u8, 5u8, 101u8, 85u8, 40u8, 177u8, 36u8, 145u8, - 67u8, 135u8, 179u8, 171u8, 30u8, 17u8, 130u8, 2u8, 99u8, 96u8, 141u8, - 109u8, 36u8, 54u8, 185u8, 38u8, 48u8, 191u8, 233u8, 104u8, 163u8, - ] - { - let call = SetOwner { new_owner }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Halt or resume all pallet operations."] - #[doc = ""] - #[doc = "May only be called either by root, or by `PalletOwner`."] - pub fn set_operational( - &self, - operational: ::core::primitive::bool, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SetOperational, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 0u8, 141u8, 239u8, 63u8, 232u8, 183u8, 89u8, 179u8, 33u8, 67u8, 107u8, - 73u8, 45u8, 231u8, 255u8, 182u8, 6u8, 245u8, 198u8, 20u8, 60u8, 69u8, - 110u8, 153u8, 105u8, 231u8, 38u8, 60u8, 160u8, 183u8, 33u8, 249u8, - ] - { - let call = SetOperational { operational }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - pub mod storage { - use super::runtime_types; - pub struct RequestCount; - impl ::subxt::StorageEntry for RequestCount { - const PALLET: &'static str = "BridgeWococoGrandpa"; - const STORAGE: &'static str = "RequestCount"; - type Value = ::core::primitive::u32; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct InitialHash; - impl ::subxt::StorageEntry for InitialHash { - const PALLET: &'static str = "BridgeWococoGrandpa"; - const STORAGE: &'static str = "InitialHash"; - type Value = ::subxt::sp_core::H256; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct BestFinalized; - impl ::subxt::StorageEntry for BestFinalized { - const PALLET: &'static str = "BridgeWococoGrandpa"; - const STORAGE: &'static str = "BestFinalized"; - type Value = ::subxt::sp_core::H256; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct ImportedHashes<'a>(pub &'a ::core::primitive::u32); - impl ::subxt::StorageEntry for ImportedHashes<'_> { - const PALLET: &'static str = "BridgeWococoGrandpa"; - const STORAGE: &'static str = "ImportedHashes"; - type Value = ::subxt::sp_core::H256; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Identity, - )]) - } - } - pub struct ImportedHashesPointer; - impl ::subxt::StorageEntry for ImportedHashesPointer { - const PALLET: &'static str = "BridgeWococoGrandpa"; - const STORAGE: &'static str = "ImportedHashesPointer"; - type Value = ::core::primitive::u32; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct ImportedHeaders<'a>(pub &'a ::subxt::sp_core::H256); - impl ::subxt::StorageEntry for ImportedHeaders<'_> { - const PALLET: &'static str = "BridgeWococoGrandpa"; - const STORAGE: &'static str = "ImportedHeaders"; - type Value = runtime_types::sp_runtime::generic::header::Header< - ::core::primitive::u32, - runtime_types::sp_runtime::traits::BlakeTwo256, - >; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Identity, - )]) - } - } - pub struct CurrentAuthoritySet; - impl ::subxt::StorageEntry for CurrentAuthoritySet { - const PALLET: &'static str = "BridgeWococoGrandpa"; - const STORAGE: &'static str = "CurrentAuthoritySet"; - type Value = runtime_types::bp_header_chain::AuthoritySet; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct PalletOwner; - impl ::subxt::StorageEntry for PalletOwner { - const PALLET: &'static str = "BridgeWococoGrandpa"; - const STORAGE: &'static str = "PalletOwner"; - type Value = ::subxt::sp_core::crypto::AccountId32; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct IsHalted; - impl ::subxt::StorageEntry for IsHalted { - const PALLET: &'static str = "BridgeWococoGrandpa"; - const STORAGE: &'static str = "IsHalted"; - type Value = ::core::primitive::bool; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct StorageApi<'a, T: ::subxt::Config> { - client: &'a ::subxt::Client, - } - impl<'a, T: ::subxt::Config> StorageApi<'a, T> { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { client } - } - #[doc = " The current number of requests which have written to storage."] - #[doc = ""] - #[doc = " If the `RequestCount` hits `MaxRequests`, no more calls will be allowed to the pallet until"] - #[doc = " the request capacity is increased."] - #[doc = ""] - #[doc = " The `RequestCount` is decreased by one at the beginning of every block. This is to ensure"] - #[doc = " that the pallet can always make progress."] - pub async fn request_count( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> - { - if self.client.metadata().storage_hash::()? - == [ - 100u8, 156u8, 98u8, 176u8, 229u8, 85u8, 81u8, 159u8, 120u8, 156u8, - 33u8, 179u8, 224u8, 237u8, 52u8, 198u8, 81u8, 81u8, 10u8, 180u8, 53u8, - 141u8, 96u8, 4u8, 39u8, 217u8, 58u8, 9u8, 57u8, 79u8, 47u8, 201u8, - ] - { - let entry = RequestCount; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Hash of the header used to bootstrap the pallet."] - pub async fn initial_hash( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result<::subxt::sp_core::H256, ::subxt::BasicError> - { - if self.client.metadata().storage_hash::()? - == [ - 243u8, 158u8, 214u8, 159u8, 84u8, 82u8, 193u8, 34u8, 24u8, 64u8, 21u8, - 172u8, 142u8, 116u8, 224u8, 19u8, 62u8, 232u8, 99u8, 201u8, 32u8, - 211u8, 139u8, 125u8, 41u8, 255u8, 107u8, 84u8, 165u8, 75u8, 201u8, - 142u8, - ] - { - let entry = InitialHash; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Hash of the best finalized header."] - pub async fn best_finalized( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result<::subxt::sp_core::H256, ::subxt::BasicError> - { - if self.client.metadata().storage_hash::()? - == [ - 155u8, 222u8, 92u8, 199u8, 26u8, 156u8, 146u8, 226u8, 24u8, 161u8, - 125u8, 18u8, 61u8, 237u8, 128u8, 26u8, 50u8, 55u8, 7u8, 42u8, 101u8, - 213u8, 0u8, 105u8, 219u8, 194u8, 227u8, 177u8, 147u8, 54u8, 22u8, 86u8, - ] - { - let entry = BestFinalized; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " A ring buffer of imported hashes. Ordered by the insertion time."] - pub async fn imported_hashes( - &self, - _0: &::core::primitive::u32, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option<::subxt::sp_core::H256>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 65u8, 162u8, 135u8, 91u8, 230u8, 102u8, 41u8, 123u8, 69u8, 20u8, 101u8, - 109u8, 178u8, 193u8, 239u8, 232u8, 66u8, 17u8, 222u8, 11u8, 188u8, - 53u8, 202u8, 80u8, 146u8, 234u8, 206u8, 192u8, 99u8, 4u8, 234u8, 67u8, - ] - { - let entry = ImportedHashes(_0); - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " A ring buffer of imported hashes. Ordered by the insertion time."] - pub async fn imported_hashes_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::subxt::KeyIter<'a, T, ImportedHashes<'a>>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 65u8, 162u8, 135u8, 91u8, 230u8, 102u8, 41u8, 123u8, 69u8, 20u8, 101u8, - 109u8, 178u8, 193u8, 239u8, 232u8, 66u8, 17u8, 222u8, 11u8, 188u8, - 53u8, 202u8, 80u8, 146u8, 234u8, 206u8, 192u8, 99u8, 4u8, 234u8, 67u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Current ring buffer position."] - pub async fn imported_hashes_pointer( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> - { - if self - .client - .metadata() - .storage_hash::()? - == [ - 159u8, 83u8, 35u8, 45u8, 27u8, 249u8, 155u8, 131u8, 181u8, 196u8, - 224u8, 26u8, 92u8, 132u8, 127u8, 237u8, 13u8, 142u8, 196u8, 147u8, - 221u8, 216u8, 11u8, 78u8, 190u8, 241u8, 201u8, 96u8, 74u8, 185u8, - 208u8, 42u8, - ] - { - let entry = ImportedHashesPointer; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Headers which have been imported into the pallet."] - pub async fn imported_headers( - &self, - _0: &::subxt::sp_core::H256, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option< - runtime_types::sp_runtime::generic::header::Header< - ::core::primitive::u32, - runtime_types::sp_runtime::traits::BlakeTwo256, - >, - >, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 169u8, 86u8, 51u8, 48u8, 82u8, 28u8, 180u8, 142u8, 152u8, 63u8, 234u8, - 84u8, 9u8, 136u8, 220u8, 18u8, 69u8, 4u8, 76u8, 54u8, 72u8, 139u8, - 234u8, 101u8, 238u8, 205u8, 95u8, 118u8, 216u8, 249u8, 147u8, 200u8, - ] - { - let entry = ImportedHeaders(_0); - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Headers which have been imported into the pallet."] - pub async fn imported_headers_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::subxt::KeyIter<'a, T, ImportedHeaders<'a>>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 169u8, 86u8, 51u8, 48u8, 82u8, 28u8, 180u8, 142u8, 152u8, 63u8, 234u8, - 84u8, 9u8, 136u8, 220u8, 18u8, 69u8, 4u8, 76u8, 54u8, 72u8, 139u8, - 234u8, 101u8, 238u8, 205u8, 95u8, 118u8, 216u8, 249u8, 147u8, 200u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The current GRANDPA Authority set."] - pub async fn current_authority_set( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - runtime_types::bp_header_chain::AuthoritySet, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .storage_hash::()? - == [ - 228u8, 156u8, 183u8, 185u8, 99u8, 81u8, 121u8, 110u8, 92u8, 105u8, - 111u8, 248u8, 123u8, 1u8, 94u8, 144u8, 248u8, 215u8, 100u8, 91u8, - 181u8, 1u8, 12u8, 203u8, 66u8, 9u8, 183u8, 173u8, 57u8, 239u8, 143u8, - 158u8, - ] - { - let entry = CurrentAuthoritySet; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Optional pallet owner."] - #[doc = ""] - #[doc = " Pallet owner has a right to halt all pallet operations and then resume it. If it is"] - #[doc = " `None`, then there are no direct ways to halt/resume pallet operations, but other"] - #[doc = " runtime methods may still be used to do that (i.e. democracy::referendum to update halt"] - #[doc = " flag directly or call the `halt_operations`)."] - pub async fn pallet_owner( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option<::subxt::sp_core::crypto::AccountId32>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 115u8, 57u8, 104u8, 22u8, 119u8, 16u8, 215u8, 71u8, 228u8, 104u8, - 111u8, 24u8, 53u8, 155u8, 26u8, 121u8, 143u8, 126u8, 72u8, 148u8, - 105u8, 132u8, 190u8, 40u8, 233u8, 219u8, 19u8, 143u8, 255u8, 20u8, - 220u8, 124u8, - ] - { - let entry = PalletOwner; - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " If true, all pallet transactions are failed immediately."] - pub async fn is_halted( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result<::core::primitive::bool, ::subxt::BasicError> - { - if self.client.metadata().storage_hash::()? - == [ - 107u8, 205u8, 253u8, 250u8, 98u8, 222u8, 141u8, 130u8, 74u8, 138u8, - 151u8, 77u8, 37u8, 226u8, 115u8, 116u8, 137u8, 247u8, 159u8, 72u8, - 230u8, 11u8, 85u8, 102u8, 122u8, 203u8, 235u8, 219u8, 54u8, 172u8, - 74u8, 22u8, - ] - { - let entry = IsHalted; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - pub mod constants { - use super::runtime_types; - pub struct ConstantsApi<'a, T: ::subxt::Config> { - client: &'a ::subxt::Client, - } - impl<'a, T: ::subxt::Config> ConstantsApi<'a, T> { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { client } - } - #[doc = " The upper bound on the number of requests allowed by the pallet."] - #[doc = ""] - #[doc = " A request refers to an action which writes a header to storage."] - #[doc = ""] - #[doc = " Once this bound is reached the pallet will not allow any dispatchables to be called"] - #[doc = " until the request count has decreased."] - pub fn max_requests( - &self, - ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> - { - if self - .client - .metadata() - .constant_hash("BridgeWococoGrandpa", "MaxRequests")? - == [ - 214u8, 232u8, 188u8, 57u8, 231u8, 189u8, 134u8, 244u8, 85u8, 191u8, - 134u8, 74u8, 207u8, 115u8, 21u8, 124u8, 19u8, 227u8, 59u8, 8u8, 252u8, - 8u8, 0u8, 252u8, 40u8, 49u8, 74u8, 145u8, 172u8, 109u8, 136u8, 63u8, - ] - { - let pallet = self.client.metadata().pallet("BridgeWococoGrandpa")?; - let constant = pallet.constant("MaxRequests")?; - let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; - Ok(value) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Maximal number of finalized headers to keep in the storage."] - #[doc = ""] - #[doc = " The setting is there to prevent growing the on-chain state indefinitely. Note"] - #[doc = " the setting does not relate to block numbers - we will simply keep as much items"] - #[doc = " in the storage, so it doesn't guarantee any fixed timeframe for finality headers."] - pub fn headers_to_keep( - &self, - ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> - { - if self - .client - .metadata() - .constant_hash("BridgeWococoGrandpa", "HeadersToKeep")? - == [ - 60u8, 85u8, 123u8, 208u8, 97u8, 205u8, 153u8, 170u8, 74u8, 94u8, 206u8, - 148u8, 171u8, 182u8, 210u8, 175u8, 1u8, 44u8, 152u8, 246u8, 144u8, - 232u8, 127u8, 202u8, 253u8, 214u8, 47u8, 246u8, 63u8, 86u8, 184u8, - 94u8, - ] - { - let pallet = self.client.metadata().pallet("BridgeWococoGrandpa")?; - let constant = pallet.constant("HeadersToKeep")?; - let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; - Ok(value) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - } - pub mod bridge_rococo_messages { - use super::root_mod; - use super::runtime_types; - pub mod calls { - use super::root_mod; - use super::runtime_types; - type DispatchError = runtime_types::sp_runtime::DispatchError; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct SetOwner { - pub new_owner: ::core::option::Option<::subxt::sp_core::crypto::AccountId32>, - } - impl ::subxt::Call for SetOwner { - const PALLET: &'static str = "BridgeRococoMessages"; - const FUNCTION: &'static str = "set_owner"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct SetOperatingMode { - pub operating_mode: runtime_types::bp_messages::OperatingMode, - } - impl ::subxt::Call for SetOperatingMode { - const PALLET: &'static str = "BridgeRococoMessages"; - const FUNCTION: &'static str = "set_operating_mode"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct UpdatePalletParameter { - pub parameter: (), - } - impl ::subxt::Call for UpdatePalletParameter { - const PALLET: &'static str = "BridgeRococoMessages"; - const FUNCTION: &'static str = "update_pallet_parameter"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct SendMessage { - pub lane_id: [::core::primitive::u8; 4usize], - pub payload: runtime_types::bp_message_dispatch::MessagePayload< - ::subxt::sp_core::crypto::AccountId32, - runtime_types::sp_runtime::MultiSigner, - runtime_types::sp_runtime::MultiSignature, - ::std::vec::Vec<::core::primitive::u8>, - >, - pub delivery_and_dispatch_fee: ::core::primitive::u128, - } - impl ::subxt::Call for SendMessage { - const PALLET: &'static str = "BridgeRococoMessages"; - const FUNCTION: &'static str = "send_message"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct IncreaseMessageFee { - pub lane_id: [::core::primitive::u8; 4usize], - pub nonce: ::core::primitive::u64, - pub additional_fee: ::core::primitive::u128, - } - impl ::subxt::Call for IncreaseMessageFee { - const PALLET: &'static str = "BridgeRococoMessages"; - const FUNCTION: &'static str = "increase_message_fee"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct ReceiveMessagesProof { pub relayer_id_at_bridged_chain : :: subxt :: sp_core :: crypto :: AccountId32 , pub proof : runtime_types :: bridge_runtime_common :: messages :: target :: FromBridgedChainMessagesProof < :: subxt :: sp_core :: H256 > , pub messages_count : :: core :: primitive :: u32 , pub dispatch_weight : :: core :: primitive :: u64 , } - impl ::subxt::Call for ReceiveMessagesProof { - const PALLET: &'static str = "BridgeRococoMessages"; - const FUNCTION: &'static str = "receive_messages_proof"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct ReceiveMessagesDeliveryProof { pub proof : runtime_types :: bridge_runtime_common :: messages :: source :: FromBridgedChainMessagesDeliveryProof < :: subxt :: sp_core :: H256 > , pub relayers_state : runtime_types :: bp_messages :: UnrewardedRelayersState , } - impl ::subxt::Call for ReceiveMessagesDeliveryProof { - const PALLET: &'static str = "BridgeRococoMessages"; - const FUNCTION: &'static str = "receive_messages_delivery_proof"; - } - pub struct TransactionApi<'a, T: ::subxt::Config, X> { - client: &'a ::subxt::Client, - marker: ::core::marker::PhantomData, - } - impl<'a, T, X> TransactionApi<'a, T, X> - where - T: ::subxt::Config, - X: ::subxt::extrinsic::ExtrinsicParams, - { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { - client, - marker: ::core::marker::PhantomData, - } - } - #[doc = "Change `PalletOwner`."] - #[doc = ""] - #[doc = "May only be called either by root, or by `PalletOwner`."] - pub fn set_owner( - &self, - new_owner: ::core::option::Option<::subxt::sp_core::crypto::AccountId32>, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SetOwner, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 168u8, 223u8, 175u8, 15u8, 5u8, 101u8, 85u8, 40u8, 177u8, 36u8, 145u8, - 67u8, 135u8, 179u8, 171u8, 30u8, 17u8, 130u8, 2u8, 99u8, 96u8, 141u8, - 109u8, 36u8, 54u8, 185u8, 38u8, 48u8, 191u8, 233u8, 104u8, 163u8, - ] - { - let call = SetOwner { new_owner }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Halt or resume all/some pallet operations."] - #[doc = ""] - #[doc = "May only be called either by root, or by `PalletOwner`."] - pub fn set_operating_mode( - &self, - operating_mode: runtime_types::bp_messages::OperatingMode, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SetOperatingMode, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 51u8, 64u8, 160u8, 51u8, 9u8, 118u8, 71u8, 106u8, 25u8, 107u8, 67u8, - 86u8, 123u8, 215u8, 161u8, 150u8, 233u8, 199u8, 212u8, 78u8, 233u8, - 35u8, 120u8, 249u8, 145u8, 110u8, 105u8, 78u8, 67u8, 64u8, 189u8, - 199u8, - ] - { - let call = SetOperatingMode { operating_mode }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Update pallet parameter."] - #[doc = ""] - #[doc = "May only be called either by root, or by `PalletOwner`."] - #[doc = ""] - #[doc = "The weight is: single read for permissions check + 2 writes for parameter value and"] - #[doc = "event."] - pub fn update_pallet_parameter( - &self, - parameter: (), - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - UpdatePalletParameter, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .call_hash::()? - == [ - 252u8, 109u8, 232u8, 190u8, 218u8, 178u8, 4u8, 197u8, 159u8, 44u8, - 100u8, 111u8, 106u8, 105u8, 69u8, 161u8, 170u8, 208u8, 241u8, 102u8, - 102u8, 157u8, 19u8, 93u8, 168u8, 66u8, 205u8, 174u8, 158u8, 21u8, - 201u8, 204u8, - ] - { - let call = UpdatePalletParameter { parameter }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Send message over lane."] - pub fn send_message( - &self, - lane_id: [::core::primitive::u8; 4usize], - payload: runtime_types::bp_message_dispatch::MessagePayload< - ::subxt::sp_core::crypto::AccountId32, - runtime_types::sp_runtime::MultiSigner, - runtime_types::sp_runtime::MultiSignature, - ::std::vec::Vec<::core::primitive::u8>, - >, - delivery_and_dispatch_fee: ::core::primitive::u128, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SendMessage, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 159u8, 54u8, 99u8, 44u8, 222u8, 24u8, 28u8, 193u8, 253u8, 233u8, 170u8, - 10u8, 56u8, 217u8, 127u8, 71u8, 83u8, 188u8, 101u8, 15u8, 38u8, 2u8, - 193u8, 228u8, 195u8, 106u8, 68u8, 10u8, 216u8, 237u8, 99u8, 201u8, - ] - { - let call = SendMessage { - lane_id, - payload, - delivery_and_dispatch_fee, - }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Pay additional fee for the message."] - pub fn increase_message_fee( - &self, - lane_id: [::core::primitive::u8; 4usize], - nonce: ::core::primitive::u64, - additional_fee: ::core::primitive::u128, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - IncreaseMessageFee, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 98u8, 74u8, 240u8, 247u8, 27u8, 236u8, 48u8, 148u8, 45u8, 53u8, 212u8, - 214u8, 25u8, 170u8, 120u8, 109u8, 35u8, 111u8, 27u8, 167u8, 195u8, - 112u8, 76u8, 112u8, 108u8, 74u8, 219u8, 100u8, 226u8, 255u8, 106u8, - 47u8, - ] - { - let call = IncreaseMessageFee { - lane_id, - nonce, - additional_fee, - }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Receive messages proof from bridged chain."] - #[doc = ""] - #[doc = "The weight of the call assumes that the transaction always brings outbound lane"] - #[doc = "state update. Because of that, the submitter (relayer) has no benefit of not including"] - #[doc = "this data in the transaction, so reward confirmations lags should be minimal."] - pub fn receive_messages_proof( - &self, - relayer_id_at_bridged_chain: ::subxt::sp_core::crypto::AccountId32, - proof : runtime_types :: bridge_runtime_common :: messages :: target :: FromBridgedChainMessagesProof < :: subxt :: sp_core :: H256 >, - messages_count: ::core::primitive::u32, - dispatch_weight: ::core::primitive::u64, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - ReceiveMessagesProof, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 73u8, 29u8, 15u8, 165u8, 110u8, 25u8, 158u8, 252u8, 156u8, 95u8, 235u8, - 130u8, 89u8, 18u8, 160u8, 103u8, 122u8, 6u8, 208u8, 159u8, 245u8, 36u8, - 219u8, 99u8, 72u8, 244u8, 213u8, 172u8, 199u8, 85u8, 109u8, 105u8, - ] - { - let call = ReceiveMessagesProof { - relayer_id_at_bridged_chain, - proof, - messages_count, - dispatch_weight, - }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Receive messages delivery proof from bridged chain."] - pub fn receive_messages_delivery_proof( - &self, - proof : runtime_types :: bridge_runtime_common :: messages :: source :: FromBridgedChainMessagesDeliveryProof < :: subxt :: sp_core :: H256 >, - relayers_state: runtime_types::bp_messages::UnrewardedRelayersState, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - ReceiveMessagesDeliveryProof, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .call_hash::()? - == [ - 53u8, 89u8, 207u8, 87u8, 143u8, 218u8, 160u8, 197u8, 228u8, 186u8, - 167u8, 99u8, 14u8, 227u8, 255u8, 92u8, 61u8, 116u8, 193u8, 22u8, 0u8, - 1u8, 140u8, 18u8, 6u8, 188u8, 97u8, 3u8, 194u8, 209u8, 152u8, 60u8, - ] - { - let call = ReceiveMessagesDeliveryProof { - proof, - relayers_state, - }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - pub type Event = runtime_types::pallet_bridge_messages::pallet::Event; - pub mod events { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "Pallet parameter has been updated."] - pub struct ParameterUpdated(pub ()); - impl ::subxt::Event for ParameterUpdated { - const PALLET: &'static str = "BridgeRococoMessages"; - const EVENT: &'static str = "ParameterUpdated"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "Message has been accepted and is waiting to be delivered."] - pub struct MessageAccepted( - pub [::core::primitive::u8; 4usize], - pub ::core::primitive::u64, - ); - impl ::subxt::Event for MessageAccepted { - const PALLET: &'static str = "BridgeRococoMessages"; - const EVENT: &'static str = "MessageAccepted"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "Messages in the inclusive range have been delivered to the bridged chain."] - pub struct MessagesDelivered( - pub [::core::primitive::u8; 4usize], - pub runtime_types::bp_messages::DeliveredMessages, - ); - impl ::subxt::Event for MessagesDelivered { - const PALLET: &'static str = "BridgeRococoMessages"; - const EVENT: &'static str = "MessagesDelivered"; - } - } - pub mod storage { - use super::runtime_types; - pub struct PalletOwner; - impl ::subxt::StorageEntry for PalletOwner { - const PALLET: &'static str = "BridgeRococoMessages"; - const STORAGE: &'static str = "PalletOwner"; - type Value = ::subxt::sp_core::crypto::AccountId32; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct PalletOperatingMode; - impl ::subxt::StorageEntry for PalletOperatingMode { - const PALLET: &'static str = "BridgeRococoMessages"; - const STORAGE: &'static str = "PalletOperatingMode"; - type Value = runtime_types::bp_messages::OperatingMode; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct InboundLanes<'a>(pub &'a [::core::primitive::u8; 4usize]); - impl ::subxt::StorageEntry for InboundLanes<'_> { - const PALLET: &'static str = "BridgeRococoMessages"; - const STORAGE: &'static str = "InboundLanes"; - type Value = runtime_types::bp_messages::InboundLaneData< - ::subxt::sp_core::crypto::AccountId32, - >; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Blake2_128Concat, - )]) - } - } - pub struct OutboundLanes<'a>(pub &'a [::core::primitive::u8; 4usize]); - impl ::subxt::StorageEntry for OutboundLanes<'_> { - const PALLET: &'static str = "BridgeRococoMessages"; - const STORAGE: &'static str = "OutboundLanes"; - type Value = runtime_types::bp_messages::OutboundLaneData; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Blake2_128Concat, - )]) - } - } - pub struct OutboundMessages<'a>(pub &'a runtime_types::bp_messages::MessageKey); - impl ::subxt::StorageEntry for OutboundMessages<'_> { - const PALLET: &'static str = "BridgeRococoMessages"; - const STORAGE: &'static str = "OutboundMessages"; - type Value = runtime_types::bp_messages::MessageData<::core::primitive::u128>; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Blake2_128Concat, - )]) - } - } - pub struct StorageApi<'a, T: ::subxt::Config> { - client: &'a ::subxt::Client, - } - impl<'a, T: ::subxt::Config> StorageApi<'a, T> { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { client } - } - #[doc = " Optional pallet owner."] - #[doc = ""] - #[doc = " Pallet owner has a right to halt all pallet operations and then resume it. If it is"] - #[doc = " `None`, then there are no direct ways to halt/resume pallet operations, but other"] - #[doc = " runtime methods may still be used to do that (i.e. democracy::referendum to update halt"] - #[doc = " flag directly or call the `halt_operations`)."] - pub async fn pallet_owner( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option<::subxt::sp_core::crypto::AccountId32>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 115u8, 57u8, 104u8, 22u8, 119u8, 16u8, 215u8, 71u8, 228u8, 104u8, - 111u8, 24u8, 53u8, 155u8, 26u8, 121u8, 143u8, 126u8, 72u8, 148u8, - 105u8, 132u8, 190u8, 40u8, 233u8, 219u8, 19u8, 143u8, 255u8, 20u8, - 220u8, 124u8, - ] - { - let entry = PalletOwner; - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The current operating mode of the pallet."] - #[doc = ""] - #[doc = " Depending on the mode either all, some, or no transactions will be allowed."] - pub async fn pallet_operating_mode( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - runtime_types::bp_messages::OperatingMode, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .storage_hash::()? - == [ - 210u8, 120u8, 80u8, 199u8, 37u8, 129u8, 219u8, 178u8, 3u8, 129u8, - 160u8, 77u8, 255u8, 190u8, 33u8, 163u8, 1u8, 234u8, 96u8, 88u8, 157u8, - 45u8, 31u8, 136u8, 137u8, 30u8, 21u8, 47u8, 118u8, 28u8, 240u8, 131u8, - ] - { - let entry = PalletOperatingMode; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Map of lane id => inbound lane data."] - pub async fn inbound_lanes( - &self, - _0: &[::core::primitive::u8; 4usize], - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - runtime_types::bp_messages::InboundLaneData< - ::subxt::sp_core::crypto::AccountId32, - >, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 147u8, 219u8, 249u8, 150u8, 150u8, 102u8, 58u8, 115u8, 13u8, 113u8, - 123u8, 132u8, 192u8, 87u8, 188u8, 170u8, 17u8, 101u8, 23u8, 37u8, - 209u8, 188u8, 148u8, 44u8, 67u8, 5u8, 197u8, 202u8, 247u8, 177u8, 87u8, - 22u8, - ] - { - let entry = InboundLanes(_0); - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Map of lane id => inbound lane data."] - pub async fn inbound_lanes_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::subxt::KeyIter<'a, T, InboundLanes<'a>>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 147u8, 219u8, 249u8, 150u8, 150u8, 102u8, 58u8, 115u8, 13u8, 113u8, - 123u8, 132u8, 192u8, 87u8, 188u8, 170u8, 17u8, 101u8, 23u8, 37u8, - 209u8, 188u8, 148u8, 44u8, 67u8, 5u8, 197u8, 202u8, 247u8, 177u8, 87u8, - 22u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Map of lane id => outbound lane data."] - pub async fn outbound_lanes( - &self, - _0: &[::core::primitive::u8; 4usize], - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - runtime_types::bp_messages::OutboundLaneData, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 167u8, 52u8, 228u8, 63u8, 97u8, 175u8, 67u8, 104u8, 3u8, 142u8, 1u8, - 95u8, 125u8, 145u8, 23u8, 141u8, 69u8, 159u8, 248u8, 138u8, 132u8, - 134u8, 226u8, 39u8, 80u8, 126u8, 65u8, 114u8, 181u8, 100u8, 194u8, - 217u8, - ] - { - let entry = OutboundLanes(_0); - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Map of lane id => outbound lane data."] - pub async fn outbound_lanes_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::subxt::KeyIter<'a, T, OutboundLanes<'a>>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 167u8, 52u8, 228u8, 63u8, 97u8, 175u8, 67u8, 104u8, 3u8, 142u8, 1u8, - 95u8, 125u8, 145u8, 23u8, 141u8, 69u8, 159u8, 248u8, 138u8, 132u8, - 134u8, 226u8, 39u8, 80u8, 126u8, 65u8, 114u8, 181u8, 100u8, 194u8, - 217u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " All queued outbound messages."] - pub async fn outbound_messages( - &self, - _0: &runtime_types::bp_messages::MessageKey, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option< - runtime_types::bp_messages::MessageData<::core::primitive::u128>, - >, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 177u8, 51u8, 170u8, 171u8, 178u8, 211u8, 79u8, 214u8, 67u8, 138u8, - 133u8, 155u8, 41u8, 236u8, 49u8, 87u8, 77u8, 61u8, 87u8, 87u8, 5u8, - 52u8, 16u8, 64u8, 202u8, 215u8, 40u8, 221u8, 179u8, 109u8, 76u8, 110u8, - ] - { - let entry = OutboundMessages(_0); - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " All queued outbound messages."] - pub async fn outbound_messages_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::subxt::KeyIter<'a, T, OutboundMessages<'a>>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 177u8, 51u8, 170u8, 171u8, 178u8, 211u8, 79u8, 214u8, 67u8, 138u8, - 133u8, 155u8, 41u8, 236u8, 49u8, 87u8, 77u8, 61u8, 87u8, 87u8, 5u8, - 52u8, 16u8, 64u8, 202u8, 215u8, 40u8, 221u8, 179u8, 109u8, 76u8, 110u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - pub mod constants { - use super::runtime_types; - pub struct ConstantsApi<'a, T: ::subxt::Config> { - client: &'a ::subxt::Client, - } - impl<'a, T: ::subxt::Config> ConstantsApi<'a, T> { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { client } - } - #[doc = " Gets the chain id value from the instance."] - pub fn bridged_chain_id( - &self, - ) -> ::core::result::Result<[::core::primitive::u8; 4usize], ::subxt::BasicError> - { - if self - .client - .metadata() - .constant_hash("BridgeRococoMessages", "BridgedChainId")? - == [ - 133u8, 139u8, 37u8, 221u8, 12u8, 53u8, 28u8, 244u8, 20u8, 208u8, 170u8, - 206u8, 199u8, 163u8, 64u8, 197u8, 53u8, 203u8, 37u8, 207u8, 163u8, 8u8, - 105u8, 94u8, 247u8, 117u8, 251u8, 97u8, 243u8, 237u8, 116u8, 130u8, - ] - { - let pallet = self.client.metadata().pallet("BridgeRococoMessages")?; - let constant = pallet.constant("BridgedChainId")?; - let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; - Ok(value) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - } - pub mod bridge_wococo_messages { - use super::root_mod; - use super::runtime_types; - pub mod calls { - use super::root_mod; - use super::runtime_types; - type DispatchError = runtime_types::sp_runtime::DispatchError; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct SetOwner { - pub new_owner: ::core::option::Option<::subxt::sp_core::crypto::AccountId32>, - } - impl ::subxt::Call for SetOwner { - const PALLET: &'static str = "BridgeWococoMessages"; - const FUNCTION: &'static str = "set_owner"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct SetOperatingMode { - pub operating_mode: runtime_types::bp_messages::OperatingMode, - } - impl ::subxt::Call for SetOperatingMode { - const PALLET: &'static str = "BridgeWococoMessages"; - const FUNCTION: &'static str = "set_operating_mode"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct UpdatePalletParameter { - pub parameter: (), - } - impl ::subxt::Call for UpdatePalletParameter { - const PALLET: &'static str = "BridgeWococoMessages"; - const FUNCTION: &'static str = "update_pallet_parameter"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct SendMessage { - pub lane_id: [::core::primitive::u8; 4usize], - pub payload: runtime_types::bp_message_dispatch::MessagePayload< - ::subxt::sp_core::crypto::AccountId32, - runtime_types::sp_runtime::MultiSigner, - runtime_types::sp_runtime::MultiSignature, - ::std::vec::Vec<::core::primitive::u8>, - >, - pub delivery_and_dispatch_fee: ::core::primitive::u128, - } - impl ::subxt::Call for SendMessage { - const PALLET: &'static str = "BridgeWococoMessages"; - const FUNCTION: &'static str = "send_message"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct IncreaseMessageFee { - pub lane_id: [::core::primitive::u8; 4usize], - pub nonce: ::core::primitive::u64, - pub additional_fee: ::core::primitive::u128, - } - impl ::subxt::Call for IncreaseMessageFee { - const PALLET: &'static str = "BridgeWococoMessages"; - const FUNCTION: &'static str = "increase_message_fee"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct ReceiveMessagesProof { pub relayer_id_at_bridged_chain : :: subxt :: sp_core :: crypto :: AccountId32 , pub proof : runtime_types :: bridge_runtime_common :: messages :: target :: FromBridgedChainMessagesProof < :: subxt :: sp_core :: H256 > , pub messages_count : :: core :: primitive :: u32 , pub dispatch_weight : :: core :: primitive :: u64 , } - impl ::subxt::Call for ReceiveMessagesProof { - const PALLET: &'static str = "BridgeWococoMessages"; - const FUNCTION: &'static str = "receive_messages_proof"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct ReceiveMessagesDeliveryProof { pub proof : runtime_types :: bridge_runtime_common :: messages :: source :: FromBridgedChainMessagesDeliveryProof < :: subxt :: sp_core :: H256 > , pub relayers_state : runtime_types :: bp_messages :: UnrewardedRelayersState , } - impl ::subxt::Call for ReceiveMessagesDeliveryProof { - const PALLET: &'static str = "BridgeWococoMessages"; - const FUNCTION: &'static str = "receive_messages_delivery_proof"; - } - pub struct TransactionApi<'a, T: ::subxt::Config, X> { - client: &'a ::subxt::Client, - marker: ::core::marker::PhantomData, - } - impl<'a, T, X> TransactionApi<'a, T, X> - where - T: ::subxt::Config, - X: ::subxt::extrinsic::ExtrinsicParams, - { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { - client, - marker: ::core::marker::PhantomData, - } - } - #[doc = "Change `PalletOwner`."] - #[doc = ""] - #[doc = "May only be called either by root, or by `PalletOwner`."] - pub fn set_owner( - &self, - new_owner: ::core::option::Option<::subxt::sp_core::crypto::AccountId32>, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SetOwner, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 168u8, 223u8, 175u8, 15u8, 5u8, 101u8, 85u8, 40u8, 177u8, 36u8, 145u8, - 67u8, 135u8, 179u8, 171u8, 30u8, 17u8, 130u8, 2u8, 99u8, 96u8, 141u8, - 109u8, 36u8, 54u8, 185u8, 38u8, 48u8, 191u8, 233u8, 104u8, 163u8, - ] - { - let call = SetOwner { new_owner }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Halt or resume all/some pallet operations."] - #[doc = ""] - #[doc = "May only be called either by root, or by `PalletOwner`."] - pub fn set_operating_mode( - &self, - operating_mode: runtime_types::bp_messages::OperatingMode, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SetOperatingMode, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 51u8, 64u8, 160u8, 51u8, 9u8, 118u8, 71u8, 106u8, 25u8, 107u8, 67u8, - 86u8, 123u8, 215u8, 161u8, 150u8, 233u8, 199u8, 212u8, 78u8, 233u8, - 35u8, 120u8, 249u8, 145u8, 110u8, 105u8, 78u8, 67u8, 64u8, 189u8, - 199u8, - ] - { - let call = SetOperatingMode { operating_mode }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Update pallet parameter."] - #[doc = ""] - #[doc = "May only be called either by root, or by `PalletOwner`."] - #[doc = ""] - #[doc = "The weight is: single read for permissions check + 2 writes for parameter value and"] - #[doc = "event."] - pub fn update_pallet_parameter( - &self, - parameter: (), - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - UpdatePalletParameter, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .call_hash::()? - == [ - 252u8, 109u8, 232u8, 190u8, 218u8, 178u8, 4u8, 197u8, 159u8, 44u8, - 100u8, 111u8, 106u8, 105u8, 69u8, 161u8, 170u8, 208u8, 241u8, 102u8, - 102u8, 157u8, 19u8, 93u8, 168u8, 66u8, 205u8, 174u8, 158u8, 21u8, - 201u8, 204u8, - ] - { - let call = UpdatePalletParameter { parameter }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Send message over lane."] - pub fn send_message( - &self, - lane_id: [::core::primitive::u8; 4usize], - payload: runtime_types::bp_message_dispatch::MessagePayload< - ::subxt::sp_core::crypto::AccountId32, - runtime_types::sp_runtime::MultiSigner, - runtime_types::sp_runtime::MultiSignature, - ::std::vec::Vec<::core::primitive::u8>, - >, - delivery_and_dispatch_fee: ::core::primitive::u128, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SendMessage, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 159u8, 54u8, 99u8, 44u8, 222u8, 24u8, 28u8, 193u8, 253u8, 233u8, 170u8, - 10u8, 56u8, 217u8, 127u8, 71u8, 83u8, 188u8, 101u8, 15u8, 38u8, 2u8, - 193u8, 228u8, 195u8, 106u8, 68u8, 10u8, 216u8, 237u8, 99u8, 201u8, - ] - { - let call = SendMessage { - lane_id, - payload, - delivery_and_dispatch_fee, - }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Pay additional fee for the message."] - pub fn increase_message_fee( - &self, - lane_id: [::core::primitive::u8; 4usize], - nonce: ::core::primitive::u64, - additional_fee: ::core::primitive::u128, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - IncreaseMessageFee, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 98u8, 74u8, 240u8, 247u8, 27u8, 236u8, 48u8, 148u8, 45u8, 53u8, 212u8, - 214u8, 25u8, 170u8, 120u8, 109u8, 35u8, 111u8, 27u8, 167u8, 195u8, - 112u8, 76u8, 112u8, 108u8, 74u8, 219u8, 100u8, 226u8, 255u8, 106u8, - 47u8, - ] - { - let call = IncreaseMessageFee { - lane_id, - nonce, - additional_fee, - }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Receive messages proof from bridged chain."] - #[doc = ""] - #[doc = "The weight of the call assumes that the transaction always brings outbound lane"] - #[doc = "state update. Because of that, the submitter (relayer) has no benefit of not including"] - #[doc = "this data in the transaction, so reward confirmations lags should be minimal."] - pub fn receive_messages_proof( - &self, - relayer_id_at_bridged_chain: ::subxt::sp_core::crypto::AccountId32, - proof : runtime_types :: bridge_runtime_common :: messages :: target :: FromBridgedChainMessagesProof < :: subxt :: sp_core :: H256 >, - messages_count: ::core::primitive::u32, - dispatch_weight: ::core::primitive::u64, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - ReceiveMessagesProof, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 73u8, 29u8, 15u8, 165u8, 110u8, 25u8, 158u8, 252u8, 156u8, 95u8, 235u8, - 130u8, 89u8, 18u8, 160u8, 103u8, 122u8, 6u8, 208u8, 159u8, 245u8, 36u8, - 219u8, 99u8, 72u8, 244u8, 213u8, 172u8, 199u8, 85u8, 109u8, 105u8, - ] - { - let call = ReceiveMessagesProof { - relayer_id_at_bridged_chain, - proof, - messages_count, - dispatch_weight, - }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Receive messages delivery proof from bridged chain."] - pub fn receive_messages_delivery_proof( - &self, - proof : runtime_types :: bridge_runtime_common :: messages :: source :: FromBridgedChainMessagesDeliveryProof < :: subxt :: sp_core :: H256 >, - relayers_state: runtime_types::bp_messages::UnrewardedRelayersState, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - ReceiveMessagesDeliveryProof, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .call_hash::()? - == [ - 53u8, 89u8, 207u8, 87u8, 143u8, 218u8, 160u8, 197u8, 228u8, 186u8, - 167u8, 99u8, 14u8, 227u8, 255u8, 92u8, 61u8, 116u8, 193u8, 22u8, 0u8, - 1u8, 140u8, 18u8, 6u8, 188u8, 97u8, 3u8, 194u8, 209u8, 152u8, 60u8, - ] - { - let call = ReceiveMessagesDeliveryProof { - proof, - relayers_state, - }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - pub type Event = runtime_types::pallet_bridge_messages::pallet::Event; - pub mod events { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "Pallet parameter has been updated."] - pub struct ParameterUpdated(pub ()); - impl ::subxt::Event for ParameterUpdated { - const PALLET: &'static str = "BridgeWococoMessages"; - const EVENT: &'static str = "ParameterUpdated"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "Message has been accepted and is waiting to be delivered."] - pub struct MessageAccepted( - pub [::core::primitive::u8; 4usize], - pub ::core::primitive::u64, - ); - impl ::subxt::Event for MessageAccepted { - const PALLET: &'static str = "BridgeWococoMessages"; - const EVENT: &'static str = "MessageAccepted"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "Messages in the inclusive range have been delivered to the bridged chain."] - pub struct MessagesDelivered( - pub [::core::primitive::u8; 4usize], - pub runtime_types::bp_messages::DeliveredMessages, - ); - impl ::subxt::Event for MessagesDelivered { - const PALLET: &'static str = "BridgeWococoMessages"; - const EVENT: &'static str = "MessagesDelivered"; - } - } - pub mod storage { - use super::runtime_types; - pub struct PalletOwner; - impl ::subxt::StorageEntry for PalletOwner { - const PALLET: &'static str = "BridgeWococoMessages"; - const STORAGE: &'static str = "PalletOwner"; - type Value = ::subxt::sp_core::crypto::AccountId32; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct PalletOperatingMode; - impl ::subxt::StorageEntry for PalletOperatingMode { - const PALLET: &'static str = "BridgeWococoMessages"; - const STORAGE: &'static str = "PalletOperatingMode"; - type Value = runtime_types::bp_messages::OperatingMode; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct InboundLanes<'a>(pub &'a [::core::primitive::u8; 4usize]); - impl ::subxt::StorageEntry for InboundLanes<'_> { - const PALLET: &'static str = "BridgeWococoMessages"; - const STORAGE: &'static str = "InboundLanes"; - type Value = runtime_types::bp_messages::InboundLaneData< - ::subxt::sp_core::crypto::AccountId32, - >; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Blake2_128Concat, - )]) - } - } - pub struct OutboundLanes<'a>(pub &'a [::core::primitive::u8; 4usize]); - impl ::subxt::StorageEntry for OutboundLanes<'_> { - const PALLET: &'static str = "BridgeWococoMessages"; - const STORAGE: &'static str = "OutboundLanes"; - type Value = runtime_types::bp_messages::OutboundLaneData; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Blake2_128Concat, - )]) - } - } - pub struct OutboundMessages<'a>(pub &'a runtime_types::bp_messages::MessageKey); - impl ::subxt::StorageEntry for OutboundMessages<'_> { - const PALLET: &'static str = "BridgeWococoMessages"; - const STORAGE: &'static str = "OutboundMessages"; - type Value = runtime_types::bp_messages::MessageData<::core::primitive::u128>; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Blake2_128Concat, - )]) - } - } - pub struct StorageApi<'a, T: ::subxt::Config> { - client: &'a ::subxt::Client, - } - impl<'a, T: ::subxt::Config> StorageApi<'a, T> { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { client } - } - #[doc = " Optional pallet owner."] - #[doc = ""] - #[doc = " Pallet owner has a right to halt all pallet operations and then resume it. If it is"] - #[doc = " `None`, then there are no direct ways to halt/resume pallet operations, but other"] - #[doc = " runtime methods may still be used to do that (i.e. democracy::referendum to update halt"] - #[doc = " flag directly or call the `halt_operations`)."] - pub async fn pallet_owner( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option<::subxt::sp_core::crypto::AccountId32>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 115u8, 57u8, 104u8, 22u8, 119u8, 16u8, 215u8, 71u8, 228u8, 104u8, - 111u8, 24u8, 53u8, 155u8, 26u8, 121u8, 143u8, 126u8, 72u8, 148u8, - 105u8, 132u8, 190u8, 40u8, 233u8, 219u8, 19u8, 143u8, 255u8, 20u8, - 220u8, 124u8, - ] - { - let entry = PalletOwner; - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The current operating mode of the pallet."] - #[doc = ""] - #[doc = " Depending on the mode either all, some, or no transactions will be allowed."] - pub async fn pallet_operating_mode( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - runtime_types::bp_messages::OperatingMode, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .storage_hash::()? - == [ - 210u8, 120u8, 80u8, 199u8, 37u8, 129u8, 219u8, 178u8, 3u8, 129u8, - 160u8, 77u8, 255u8, 190u8, 33u8, 163u8, 1u8, 234u8, 96u8, 88u8, 157u8, - 45u8, 31u8, 136u8, 137u8, 30u8, 21u8, 47u8, 118u8, 28u8, 240u8, 131u8, - ] - { - let entry = PalletOperatingMode; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Map of lane id => inbound lane data."] - pub async fn inbound_lanes( - &self, - _0: &[::core::primitive::u8; 4usize], - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - runtime_types::bp_messages::InboundLaneData< - ::subxt::sp_core::crypto::AccountId32, - >, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 147u8, 219u8, 249u8, 150u8, 150u8, 102u8, 58u8, 115u8, 13u8, 113u8, - 123u8, 132u8, 192u8, 87u8, 188u8, 170u8, 17u8, 101u8, 23u8, 37u8, - 209u8, 188u8, 148u8, 44u8, 67u8, 5u8, 197u8, 202u8, 247u8, 177u8, 87u8, - 22u8, - ] - { - let entry = InboundLanes(_0); - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Map of lane id => inbound lane data."] - pub async fn inbound_lanes_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::subxt::KeyIter<'a, T, InboundLanes<'a>>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 147u8, 219u8, 249u8, 150u8, 150u8, 102u8, 58u8, 115u8, 13u8, 113u8, - 123u8, 132u8, 192u8, 87u8, 188u8, 170u8, 17u8, 101u8, 23u8, 37u8, - 209u8, 188u8, 148u8, 44u8, 67u8, 5u8, 197u8, 202u8, 247u8, 177u8, 87u8, - 22u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Map of lane id => outbound lane data."] - pub async fn outbound_lanes( - &self, - _0: &[::core::primitive::u8; 4usize], - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - runtime_types::bp_messages::OutboundLaneData, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 167u8, 52u8, 228u8, 63u8, 97u8, 175u8, 67u8, 104u8, 3u8, 142u8, 1u8, - 95u8, 125u8, 145u8, 23u8, 141u8, 69u8, 159u8, 248u8, 138u8, 132u8, - 134u8, 226u8, 39u8, 80u8, 126u8, 65u8, 114u8, 181u8, 100u8, 194u8, - 217u8, - ] - { - let entry = OutboundLanes(_0); - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Map of lane id => outbound lane data."] - pub async fn outbound_lanes_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::subxt::KeyIter<'a, T, OutboundLanes<'a>>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 167u8, 52u8, 228u8, 63u8, 97u8, 175u8, 67u8, 104u8, 3u8, 142u8, 1u8, - 95u8, 125u8, 145u8, 23u8, 141u8, 69u8, 159u8, 248u8, 138u8, 132u8, - 134u8, 226u8, 39u8, 80u8, 126u8, 65u8, 114u8, 181u8, 100u8, 194u8, - 217u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " All queued outbound messages."] - pub async fn outbound_messages( - &self, - _0: &runtime_types::bp_messages::MessageKey, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option< - runtime_types::bp_messages::MessageData<::core::primitive::u128>, - >, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 177u8, 51u8, 170u8, 171u8, 178u8, 211u8, 79u8, 214u8, 67u8, 138u8, - 133u8, 155u8, 41u8, 236u8, 49u8, 87u8, 77u8, 61u8, 87u8, 87u8, 5u8, - 52u8, 16u8, 64u8, 202u8, 215u8, 40u8, 221u8, 179u8, 109u8, 76u8, 110u8, - ] - { - let entry = OutboundMessages(_0); - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " All queued outbound messages."] - pub async fn outbound_messages_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::subxt::KeyIter<'a, T, OutboundMessages<'a>>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 177u8, 51u8, 170u8, 171u8, 178u8, 211u8, 79u8, 214u8, 67u8, 138u8, - 133u8, 155u8, 41u8, 236u8, 49u8, 87u8, 77u8, 61u8, 87u8, 87u8, 5u8, - 52u8, 16u8, 64u8, 202u8, 215u8, 40u8, 221u8, 179u8, 109u8, 76u8, 110u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - pub mod constants { - use super::runtime_types; - pub struct ConstantsApi<'a, T: ::subxt::Config> { - client: &'a ::subxt::Client, - } - impl<'a, T: ::subxt::Config> ConstantsApi<'a, T> { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { client } - } - #[doc = " Gets the chain id value from the instance."] - pub fn bridged_chain_id( - &self, - ) -> ::core::result::Result<[::core::primitive::u8; 4usize], ::subxt::BasicError> - { - if self - .client - .metadata() - .constant_hash("BridgeWococoMessages", "BridgedChainId")? - == [ - 154u8, 72u8, 97u8, 79u8, 84u8, 66u8, 85u8, 2u8, 236u8, 184u8, 229u8, - 154u8, 144u8, 244u8, 122u8, 19u8, 61u8, 170u8, 228u8, 92u8, 221u8, - 160u8, 137u8, 95u8, 132u8, 191u8, 172u8, 201u8, 177u8, 162u8, 6u8, - 223u8, - ] - { - let pallet = self.client.metadata().pallet("BridgeWococoMessages")?; - let constant = pallet.constant("BridgedChainId")?; - let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; - Ok(value) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - } - pub mod bridge_rococo_messages_dispatch { - use super::root_mod; - use super::runtime_types; - pub type Event = runtime_types::pallet_bridge_dispatch::pallet::Event; - pub mod events { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "Message has been rejected before reaching dispatch."] - pub struct MessageRejected( - pub [::core::primitive::u8; 4usize], - pub ([::core::primitive::u8; 4usize], ::core::primitive::u64), - ); - impl ::subxt::Event for MessageRejected { - const PALLET: &'static str = "BridgeRococoMessagesDispatch"; - const EVENT: &'static str = "MessageRejected"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "Message has been rejected by dispatcher because of spec version mismatch."] - #[doc = "Last two arguments are: expected and passed spec version."] - pub struct MessageVersionSpecMismatch( - pub [::core::primitive::u8; 4usize], - pub ([::core::primitive::u8; 4usize], ::core::primitive::u64), - pub ::core::primitive::u32, - pub ::core::primitive::u32, - ); - impl ::subxt::Event for MessageVersionSpecMismatch { - const PALLET: &'static str = "BridgeRococoMessagesDispatch"; - const EVENT: &'static str = "MessageVersionSpecMismatch"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "Message has been rejected by dispatcher because of weight mismatch."] - #[doc = "Last two arguments are: expected and passed call weight."] - pub struct MessageWeightMismatch( - pub [::core::primitive::u8; 4usize], - pub ([::core::primitive::u8; 4usize], ::core::primitive::u64), - pub ::core::primitive::u64, - pub ::core::primitive::u64, - ); - impl ::subxt::Event for MessageWeightMismatch { - const PALLET: &'static str = "BridgeRococoMessagesDispatch"; - const EVENT: &'static str = "MessageWeightMismatch"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "Message signature mismatch."] - pub struct MessageSignatureMismatch( - pub [::core::primitive::u8; 4usize], - pub ([::core::primitive::u8; 4usize], ::core::primitive::u64), - ); - impl ::subxt::Event for MessageSignatureMismatch { - const PALLET: &'static str = "BridgeRococoMessagesDispatch"; - const EVENT: &'static str = "MessageSignatureMismatch"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "We have failed to decode Call from the message."] - pub struct MessageCallDecodeFailed( - pub [::core::primitive::u8; 4usize], - pub ([::core::primitive::u8; 4usize], ::core::primitive::u64), - ); - impl ::subxt::Event for MessageCallDecodeFailed { - const PALLET: &'static str = "BridgeRococoMessagesDispatch"; - const EVENT: &'static str = "MessageCallDecodeFailed"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "The call from the message has been rejected by the call filter."] - pub struct MessageCallRejected( - pub [::core::primitive::u8; 4usize], - pub ([::core::primitive::u8; 4usize], ::core::primitive::u64), - ); - impl ::subxt::Event for MessageCallRejected { - const PALLET: &'static str = "BridgeRococoMessagesDispatch"; - const EVENT: &'static str = "MessageCallRejected"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "The origin account has failed to pay fee for dispatching the message."] - pub struct MessageDispatchPaymentFailed( - pub [::core::primitive::u8; 4usize], - pub ([::core::primitive::u8; 4usize], ::core::primitive::u64), - pub ::subxt::sp_core::crypto::AccountId32, - pub ::core::primitive::u64, - ); - impl ::subxt::Event for MessageDispatchPaymentFailed { - const PALLET: &'static str = "BridgeRococoMessagesDispatch"; - const EVENT: &'static str = "MessageDispatchPaymentFailed"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "Message has been dispatched with given result."] - pub struct MessageDispatched( - pub [::core::primitive::u8; 4usize], - pub ([::core::primitive::u8; 4usize], ::core::primitive::u64), - pub ::core::result::Result<(), runtime_types::sp_runtime::DispatchError>, - ); - impl ::subxt::Event for MessageDispatched { - const PALLET: &'static str = "BridgeRococoMessagesDispatch"; - const EVENT: &'static str = "MessageDispatched"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "Phantom member, never used. Needed to handle multiple pallet instances."] - pub struct _Dummy; - impl ::subxt::Event for _Dummy { - const PALLET: &'static str = "BridgeRococoMessagesDispatch"; - const EVENT: &'static str = "_Dummy"; - } - } - } - pub mod bridge_wococo_messages_dispatch { - use super::root_mod; - use super::runtime_types; - pub type Event = runtime_types::pallet_bridge_dispatch::pallet::Event; - pub mod events { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "Message has been rejected before reaching dispatch."] - pub struct MessageRejected( - pub [::core::primitive::u8; 4usize], - pub ([::core::primitive::u8; 4usize], ::core::primitive::u64), - ); - impl ::subxt::Event for MessageRejected { - const PALLET: &'static str = "BridgeWococoMessagesDispatch"; - const EVENT: &'static str = "MessageRejected"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "Message has been rejected by dispatcher because of spec version mismatch."] - #[doc = "Last two arguments are: expected and passed spec version."] - pub struct MessageVersionSpecMismatch( - pub [::core::primitive::u8; 4usize], - pub ([::core::primitive::u8; 4usize], ::core::primitive::u64), - pub ::core::primitive::u32, - pub ::core::primitive::u32, - ); - impl ::subxt::Event for MessageVersionSpecMismatch { - const PALLET: &'static str = "BridgeWococoMessagesDispatch"; - const EVENT: &'static str = "MessageVersionSpecMismatch"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "Message has been rejected by dispatcher because of weight mismatch."] - #[doc = "Last two arguments are: expected and passed call weight."] - pub struct MessageWeightMismatch( - pub [::core::primitive::u8; 4usize], - pub ([::core::primitive::u8; 4usize], ::core::primitive::u64), - pub ::core::primitive::u64, - pub ::core::primitive::u64, - ); - impl ::subxt::Event for MessageWeightMismatch { - const PALLET: &'static str = "BridgeWococoMessagesDispatch"; - const EVENT: &'static str = "MessageWeightMismatch"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "Message signature mismatch."] - pub struct MessageSignatureMismatch( - pub [::core::primitive::u8; 4usize], - pub ([::core::primitive::u8; 4usize], ::core::primitive::u64), - ); - impl ::subxt::Event for MessageSignatureMismatch { - const PALLET: &'static str = "BridgeWococoMessagesDispatch"; - const EVENT: &'static str = "MessageSignatureMismatch"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "We have failed to decode Call from the message."] - pub struct MessageCallDecodeFailed( - pub [::core::primitive::u8; 4usize], - pub ([::core::primitive::u8; 4usize], ::core::primitive::u64), - ); - impl ::subxt::Event for MessageCallDecodeFailed { - const PALLET: &'static str = "BridgeWococoMessagesDispatch"; - const EVENT: &'static str = "MessageCallDecodeFailed"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "The call from the message has been rejected by the call filter."] - pub struct MessageCallRejected( - pub [::core::primitive::u8; 4usize], - pub ([::core::primitive::u8; 4usize], ::core::primitive::u64), - ); - impl ::subxt::Event for MessageCallRejected { - const PALLET: &'static str = "BridgeWococoMessagesDispatch"; - const EVENT: &'static str = "MessageCallRejected"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "The origin account has failed to pay fee for dispatching the message."] - pub struct MessageDispatchPaymentFailed( - pub [::core::primitive::u8; 4usize], - pub ([::core::primitive::u8; 4usize], ::core::primitive::u64), - pub ::subxt::sp_core::crypto::AccountId32, - pub ::core::primitive::u64, - ); - impl ::subxt::Event for MessageDispatchPaymentFailed { - const PALLET: &'static str = "BridgeWococoMessagesDispatch"; - const EVENT: &'static str = "MessageDispatchPaymentFailed"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "Message has been dispatched with given result."] - pub struct MessageDispatched( - pub [::core::primitive::u8; 4usize], - pub ([::core::primitive::u8; 4usize], ::core::primitive::u64), - pub ::core::result::Result<(), runtime_types::sp_runtime::DispatchError>, - ); - impl ::subxt::Event for MessageDispatched { - const PALLET: &'static str = "BridgeWococoMessagesDispatch"; - const EVENT: &'static str = "MessageDispatched"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "Phantom member, never used. Needed to handle multiple pallet instances."] - pub struct _Dummy; - impl ::subxt::Event for _Dummy { - const PALLET: &'static str = "BridgeWococoMessagesDispatch"; - const EVENT: &'static str = "_Dummy"; - } - } - } - pub mod collective { - use super::root_mod; - use super::runtime_types; - pub mod calls { - use super::root_mod; - use super::runtime_types; - type DispatchError = runtime_types::sp_runtime::DispatchError; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct SetMembers { - pub new_members: ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>, - pub prime: ::core::option::Option<::subxt::sp_core::crypto::AccountId32>, - pub old_count: ::core::primitive::u32, - } - impl ::subxt::Call for SetMembers { - const PALLET: &'static str = "Collective"; - const FUNCTION: &'static str = "set_members"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct Execute { - pub proposal: ::std::boxed::Box, - #[codec(compact)] - pub length_bound: ::core::primitive::u32, - } - impl ::subxt::Call for Execute { - const PALLET: &'static str = "Collective"; - const FUNCTION: &'static str = "execute"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct Propose { - #[codec(compact)] - pub threshold: ::core::primitive::u32, - pub proposal: ::std::boxed::Box, - #[codec(compact)] - pub length_bound: ::core::primitive::u32, - } - impl ::subxt::Call for Propose { - const PALLET: &'static str = "Collective"; - const FUNCTION: &'static str = "propose"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct Vote { - pub proposal: ::subxt::sp_core::H256, - #[codec(compact)] - pub index: ::core::primitive::u32, - pub approve: ::core::primitive::bool, - } - impl ::subxt::Call for Vote { - const PALLET: &'static str = "Collective"; - const FUNCTION: &'static str = "vote"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct Close { - pub proposal_hash: ::subxt::sp_core::H256, - #[codec(compact)] - pub index: ::core::primitive::u32, - #[codec(compact)] - pub proposal_weight_bound: ::core::primitive::u64, - #[codec(compact)] - pub length_bound: ::core::primitive::u32, - } - impl ::subxt::Call for Close { - const PALLET: &'static str = "Collective"; - const FUNCTION: &'static str = "close"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct DisapproveProposal { - pub proposal_hash: ::subxt::sp_core::H256, - } - impl ::subxt::Call for DisapproveProposal { - const PALLET: &'static str = "Collective"; - const FUNCTION: &'static str = "disapprove_proposal"; - } - pub struct TransactionApi<'a, T: ::subxt::Config, X> { - client: &'a ::subxt::Client, - marker: ::core::marker::PhantomData, - } - impl<'a, T, X> TransactionApi<'a, T, X> - where - T: ::subxt::Config, - X: ::subxt::extrinsic::ExtrinsicParams, - { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { - client, - marker: ::core::marker::PhantomData, - } - } - #[doc = "Set the collective's membership."] - #[doc = ""] - #[doc = "- `new_members`: The new member list. Be nice to the chain and provide it sorted."] - #[doc = "- `prime`: The prime member whose vote sets the default."] - #[doc = "- `old_count`: The upper bound for the previous number of members in storage. Used for"] - #[doc = " weight estimation."] - #[doc = ""] - #[doc = "Requires root origin."] - #[doc = ""] - #[doc = "NOTE: Does not enforce the expected `MaxMembers` limit on the amount of members, but"] - #[doc = " the weight estimations rely on it to estimate dispatchable weight."] - #[doc = ""] - #[doc = "# WARNING:"] - #[doc = ""] - #[doc = "The `pallet-collective` can also be managed by logic outside of the pallet through the"] - #[doc = "implementation of the trait [`ChangeMembers`]."] - #[doc = "Any call to `set_members` must be careful that the member set doesn't get out of sync"] - #[doc = "with other logic managing the member set."] - #[doc = ""] - #[doc = "# "] - #[doc = "## Weight"] - #[doc = "- `O(MP + N)` where:"] - #[doc = " - `M` old-members-count (code- and governance-bounded)"] - #[doc = " - `N` new-members-count (code- and governance-bounded)"] - #[doc = " - `P` proposals-count (code-bounded)"] - #[doc = "- DB:"] - #[doc = " - 1 storage mutation (codec `O(M)` read, `O(N)` write) for reading and writing the"] - #[doc = " members"] - #[doc = " - 1 storage read (codec `O(P)`) for reading the proposals"] - #[doc = " - `P` storage mutations (codec `O(M)`) for updating the votes for each proposal"] - #[doc = " - 1 storage write (codec `O(1)`) for deleting the old `prime` and setting the new one"] - #[doc = "# "] - pub fn set_members( - &self, - new_members: ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>, - prime: ::core::option::Option<::subxt::sp_core::crypto::AccountId32>, - old_count: ::core::primitive::u32, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SetMembers, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 228u8, 186u8, 17u8, 12u8, 231u8, 231u8, 139u8, 15u8, 96u8, 200u8, 68u8, - 27u8, 61u8, 106u8, 245u8, 199u8, 120u8, 141u8, 95u8, 215u8, 36u8, 49u8, - 0u8, 163u8, 172u8, 252u8, 221u8, 9u8, 1u8, 222u8, 44u8, 214u8, - ] - { - let call = SetMembers { - new_members, - prime, - old_count, - }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Dispatch a proposal from a member using the `Member` origin."] - #[doc = ""] - #[doc = "Origin must be a member of the collective."] - #[doc = ""] - #[doc = "# "] - #[doc = "## Weight"] - #[doc = "- `O(M + P)` where `M` members-count (code-bounded) and `P` complexity of dispatching"] - #[doc = " `proposal`"] - #[doc = "- DB: 1 read (codec `O(M)`) + DB access of `proposal`"] - #[doc = "- 1 event"] - #[doc = "# "] - pub fn execute( - &self, - proposal: runtime_types::rococo_runtime::Call, - length_bound: ::core::primitive::u32, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - Execute, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 33u8, 13u8, 40u8, 147u8, 30u8, 13u8, 55u8, 70u8, 225u8, 38u8, 185u8, - 112u8, 158u8, 76u8, 117u8, 198u8, 3u8, 89u8, 222u8, 72u8, 228u8, 107u8, - 216u8, 92u8, 33u8, 166u8, 225u8, 63u8, 204u8, 83u8, 73u8, 33u8, - ] - { - let call = Execute { - proposal: ::std::boxed::Box::new(proposal), - length_bound, - }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Add a new proposal to either be voted on or executed directly."] - #[doc = ""] - #[doc = "Requires the sender to be member."] - #[doc = ""] - #[doc = "`threshold` determines whether `proposal` is executed directly (`threshold < 2`)"] - #[doc = "or put up for voting."] - #[doc = ""] - #[doc = "# "] - #[doc = "## Weight"] - #[doc = "- `O(B + M + P1)` or `O(B + M + P2)` where:"] - #[doc = " - `B` is `proposal` size in bytes (length-fee-bounded)"] - #[doc = " - `M` is members-count (code- and governance-bounded)"] - #[doc = " - branching is influenced by `threshold` where:"] - #[doc = " - `P1` is proposal execution complexity (`threshold < 2`)"] - #[doc = " - `P2` is proposals-count (code-bounded) (`threshold >= 2`)"] - #[doc = "- DB:"] - #[doc = " - 1 storage read `is_member` (codec `O(M)`)"] - #[doc = " - 1 storage read `ProposalOf::contains_key` (codec `O(1)`)"] - #[doc = " - DB accesses influenced by `threshold`:"] - #[doc = " - EITHER storage accesses done by `proposal` (`threshold < 2`)"] - #[doc = " - OR proposal insertion (`threshold <= 2`)"] - #[doc = " - 1 storage mutation `Proposals` (codec `O(P2)`)"] - #[doc = " - 1 storage mutation `ProposalCount` (codec `O(1)`)"] - #[doc = " - 1 storage write `ProposalOf` (codec `O(B)`)"] - #[doc = " - 1 storage write `Voting` (codec `O(M)`)"] - #[doc = " - 1 event"] - #[doc = "# "] - pub fn propose( - &self, - threshold: ::core::primitive::u32, - proposal: runtime_types::rococo_runtime::Call, - length_bound: ::core::primitive::u32, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - Propose, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 244u8, 118u8, 5u8, 210u8, 146u8, 37u8, 198u8, 107u8, 132u8, 59u8, - 227u8, 142u8, 200u8, 130u8, 76u8, 147u8, 11u8, 196u8, 239u8, 145u8, - 198u8, 196u8, 209u8, 184u8, 113u8, 116u8, 8u8, 88u8, 190u8, 230u8, - 242u8, 130u8, - ] - { - let call = Propose { - threshold, - proposal: ::std::boxed::Box::new(proposal), - length_bound, - }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Add an aye or nay vote for the sender to the given proposal."] - #[doc = ""] - #[doc = "Requires the sender to be a member."] - #[doc = ""] - #[doc = "Transaction fees will be waived if the member is voting on any particular proposal"] - #[doc = "for the first time and the call is successful. Subsequent vote changes will charge a"] - #[doc = "fee."] - #[doc = "# "] - #[doc = "## Weight"] - #[doc = "- `O(M)` where `M` is members-count (code- and governance-bounded)"] - #[doc = "- DB:"] - #[doc = " - 1 storage read `Members` (codec `O(M)`)"] - #[doc = " - 1 storage mutation `Voting` (codec `O(M)`)"] - #[doc = "- 1 event"] - #[doc = "# "] - pub fn vote( - &self, - proposal: ::subxt::sp_core::H256, - index: ::core::primitive::u32, - approve: ::core::primitive::bool, - ) -> Result< - ::subxt::SubmittableExtrinsic<'a, T, X, Vote, DispatchError, root_mod::Event>, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 129u8, 129u8, 2u8, 51u8, 247u8, 109u8, 5u8, 198u8, 38u8, 192u8, 159u8, - 167u8, 176u8, 0u8, 181u8, 84u8, 92u8, 93u8, 179u8, 86u8, 108u8, 155u8, - 119u8, 3u8, 159u8, 12u8, 206u8, 121u8, 154u8, 226u8, 199u8, 146u8, - ] - { - let call = Vote { - proposal, - index, - approve, - }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Close a vote that is either approved, disapproved or whose voting period has ended."] - #[doc = ""] - #[doc = "May be called by any signed account in order to finish voting and close the proposal."] - #[doc = ""] - #[doc = "If called before the end of the voting period it will only close the vote if it is"] - #[doc = "has enough votes to be approved or disapproved."] - #[doc = ""] - #[doc = "If called after the end of the voting period abstentions are counted as rejections"] - #[doc = "unless there is a prime member set and the prime member cast an approval."] - #[doc = ""] - #[doc = "If the close operation completes successfully with disapproval, the transaction fee will"] - #[doc = "be waived. Otherwise execution of the approved operation will be charged to the caller."] - #[doc = ""] - #[doc = "+ `proposal_weight_bound`: The maximum amount of weight consumed by executing the closed"] - #[doc = "proposal."] - #[doc = "+ `length_bound`: The upper bound for the length of the proposal in storage. Checked via"] - #[doc = "`storage::read` so it is `size_of::() == 4` larger than the pure length."] - #[doc = ""] - #[doc = "# "] - #[doc = "## Weight"] - #[doc = "- `O(B + M + P1 + P2)` where:"] - #[doc = " - `B` is `proposal` size in bytes (length-fee-bounded)"] - #[doc = " - `M` is members-count (code- and governance-bounded)"] - #[doc = " - `P1` is the complexity of `proposal` preimage."] - #[doc = " - `P2` is proposal-count (code-bounded)"] - #[doc = "- DB:"] - #[doc = " - 2 storage reads (`Members`: codec `O(M)`, `Prime`: codec `O(1)`)"] - #[doc = " - 3 mutations (`Voting`: codec `O(M)`, `ProposalOf`: codec `O(B)`, `Proposals`: codec"] - #[doc = " `O(P2)`)"] - #[doc = " - any mutations done while executing `proposal` (`P1`)"] - #[doc = "- up to 3 events"] - #[doc = "# "] - pub fn close( - &self, - proposal_hash: ::subxt::sp_core::H256, - index: ::core::primitive::u32, - proposal_weight_bound: ::core::primitive::u64, - length_bound: ::core::primitive::u32, - ) -> Result< - ::subxt::SubmittableExtrinsic<'a, T, X, Close, DispatchError, root_mod::Event>, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 86u8, 13u8, 193u8, 66u8, 78u8, 210u8, 72u8, 79u8, 119u8, 244u8, 113u8, - 242u8, 84u8, 176u8, 73u8, 199u8, 151u8, 137u8, 180u8, 239u8, 27u8, - 114u8, 191u8, 180u8, 134u8, 165u8, 208u8, 80u8, 244u8, 166u8, 226u8, - 85u8, - ] - { - let call = Close { - proposal_hash, - index, - proposal_weight_bound, - length_bound, - }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Disapprove a proposal, close, and remove it from the system, regardless of its current"] - #[doc = "state."] - #[doc = ""] - #[doc = "Must be called by the Root origin."] - #[doc = ""] - #[doc = "Parameters:"] - #[doc = "* `proposal_hash`: The hash of the proposal that should be disapproved."] - #[doc = ""] - #[doc = "# "] - #[doc = "Complexity: O(P) where P is the number of max proposals"] - #[doc = "DB Weight:"] - #[doc = "* Reads: Proposals"] - #[doc = "* Writes: Voting, Proposals, ProposalOf"] - #[doc = "# "] - pub fn disapprove_proposal( - &self, - proposal_hash: ::subxt::sp_core::H256, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - DisapproveProposal, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 128u8, 85u8, 134u8, 138u8, 161u8, 42u8, 150u8, 65u8, 131u8, 61u8, - 184u8, 59u8, 167u8, 24u8, 200u8, 51u8, 223u8, 101u8, 4u8, 252u8, 159u8, - 239u8, 79u8, 195u8, 255u8, 40u8, 251u8, 239u8, 95u8, 121u8, 123u8, - 47u8, - ] - { - let call = DisapproveProposal { proposal_hash }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - pub type Event = runtime_types::pallet_collective::pallet::Event; - pub mod events { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "A motion (given hash) has been proposed (by given account) with a threshold (given"] - #[doc = "`MemberCount`)."] - pub struct Proposed { - pub account: ::subxt::sp_core::crypto::AccountId32, - pub proposal_index: ::core::primitive::u32, - pub proposal_hash: ::subxt::sp_core::H256, - pub threshold: ::core::primitive::u32, - } - impl ::subxt::Event for Proposed { - const PALLET: &'static str = "Collective"; - const EVENT: &'static str = "Proposed"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "A motion (given hash) has been voted on by given account, leaving"] - #[doc = "a tally (yes votes and no votes given respectively as `MemberCount`)."] - pub struct Voted { - pub account: ::subxt::sp_core::crypto::AccountId32, - pub proposal_hash: ::subxt::sp_core::H256, - pub voted: ::core::primitive::bool, - pub yes: ::core::primitive::u32, - pub no: ::core::primitive::u32, - } - impl ::subxt::Event for Voted { - const PALLET: &'static str = "Collective"; - const EVENT: &'static str = "Voted"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "A motion was approved by the required threshold."] - pub struct Approved { - pub proposal_hash: ::subxt::sp_core::H256, - } - impl ::subxt::Event for Approved { - const PALLET: &'static str = "Collective"; - const EVENT: &'static str = "Approved"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "A motion was not approved by the required threshold."] - pub struct Disapproved { - pub proposal_hash: ::subxt::sp_core::H256, - } - impl ::subxt::Event for Disapproved { - const PALLET: &'static str = "Collective"; - const EVENT: &'static str = "Disapproved"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "A motion was executed; result will be `Ok` if it returned without error."] - pub struct Executed { - pub proposal_hash: ::subxt::sp_core::H256, - pub result: ::core::result::Result<(), runtime_types::sp_runtime::DispatchError>, - } - impl ::subxt::Event for Executed { - const PALLET: &'static str = "Collective"; - const EVENT: &'static str = "Executed"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "A single member did some action; result will be `Ok` if it returned without error."] - pub struct MemberExecuted { - pub proposal_hash: ::subxt::sp_core::H256, - pub result: ::core::result::Result<(), runtime_types::sp_runtime::DispatchError>, - } - impl ::subxt::Event for MemberExecuted { - const PALLET: &'static str = "Collective"; - const EVENT: &'static str = "MemberExecuted"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "A proposal was closed because its threshold was reached or after its duration was up."] - pub struct Closed { - pub proposal_hash: ::subxt::sp_core::H256, - pub yes: ::core::primitive::u32, - pub no: ::core::primitive::u32, - } - impl ::subxt::Event for Closed { - const PALLET: &'static str = "Collective"; - const EVENT: &'static str = "Closed"; - } - } - pub mod storage { - use super::runtime_types; - pub struct Proposals; - impl ::subxt::StorageEntry for Proposals { - const PALLET: &'static str = "Collective"; - const STORAGE: &'static str = "Proposals"; - type Value = runtime_types::frame_support::storage::bounded_vec::BoundedVec< - ::subxt::sp_core::H256, - >; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct ProposalOf<'a>(pub &'a ::subxt::sp_core::H256); - impl ::subxt::StorageEntry for ProposalOf<'_> { - const PALLET: &'static str = "Collective"; - const STORAGE: &'static str = "ProposalOf"; - type Value = runtime_types::rococo_runtime::Call; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Identity, - )]) - } - } - pub struct Voting<'a>(pub &'a ::subxt::sp_core::H256); - impl ::subxt::StorageEntry for Voting<'_> { - const PALLET: &'static str = "Collective"; - const STORAGE: &'static str = "Voting"; - type Value = runtime_types::pallet_collective::Votes< - ::subxt::sp_core::crypto::AccountId32, - ::core::primitive::u32, - >; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Identity, - )]) - } - } - pub struct ProposalCount; - impl ::subxt::StorageEntry for ProposalCount { - const PALLET: &'static str = "Collective"; - const STORAGE: &'static str = "ProposalCount"; - type Value = ::core::primitive::u32; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct Members; - impl ::subxt::StorageEntry for Members { - const PALLET: &'static str = "Collective"; - const STORAGE: &'static str = "Members"; - type Value = ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct Prime; - impl ::subxt::StorageEntry for Prime { - const PALLET: &'static str = "Collective"; - const STORAGE: &'static str = "Prime"; - type Value = ::subxt::sp_core::crypto::AccountId32; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct StorageApi<'a, T: ::subxt::Config> { - client: &'a ::subxt::Client, - } - impl<'a, T: ::subxt::Config> StorageApi<'a, T> { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { client } - } - #[doc = " The hashes of the active proposals."] - pub async fn proposals( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - runtime_types::frame_support::storage::bounded_vec::BoundedVec< - ::subxt::sp_core::H256, - >, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 23u8, 209u8, 111u8, 241u8, 83u8, 72u8, 60u8, 51u8, 29u8, 14u8, 223u8, - 213u8, 120u8, 186u8, 243u8, 201u8, 205u8, 183u8, 127u8, 94u8, 190u8, - 180u8, 189u8, 131u8, 87u8, 197u8, 97u8, 231u8, 243u8, 232u8, 15u8, - 61u8, - ] - { - let entry = Proposals; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Actual proposal for a given hash, if it's current."] - pub async fn proposal_of( - &self, - _0: &::subxt::sp_core::H256, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 223u8, 62u8, 117u8, 37u8, 17u8, 61u8, 152u8, 232u8, 38u8, 182u8, 199u8, - 60u8, 43u8, 134u8, 252u8, 41u8, 71u8, 148u8, 114u8, 61u8, 236u8, 0u8, - 230u8, 40u8, 242u8, 136u8, 123u8, 40u8, 184u8, 160u8, 128u8, 191u8, - ] - { - let entry = ProposalOf(_0); - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Actual proposal for a given hash, if it's current."] - pub async fn proposal_of_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::subxt::KeyIter<'a, T, ProposalOf<'a>>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 223u8, 62u8, 117u8, 37u8, 17u8, 61u8, 152u8, 232u8, 38u8, 182u8, 199u8, - 60u8, 43u8, 134u8, 252u8, 41u8, 71u8, 148u8, 114u8, 61u8, 236u8, 0u8, - 230u8, 40u8, 242u8, 136u8, 123u8, 40u8, 184u8, 160u8, 128u8, 191u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Votes on a given proposal, if it is ongoing."] - pub async fn voting( - &self, - _0: &::subxt::sp_core::H256, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option< - runtime_types::pallet_collective::Votes< - ::subxt::sp_core::crypto::AccountId32, - ::core::primitive::u32, - >, - >, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 230u8, 117u8, 88u8, 61u8, 179u8, 199u8, 172u8, 121u8, 157u8, 115u8, - 63u8, 88u8, 182u8, 7u8, 191u8, 41u8, 187u8, 5u8, 31u8, 240u8, 202u8, - 14u8, 21u8, 175u8, 39u8, 72u8, 113u8, 220u8, 251u8, 127u8, 30u8, 93u8, - ] - { - let entry = Voting(_0); - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Votes on a given proposal, if it is ongoing."] - pub async fn voting_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result<::subxt::KeyIter<'a, T, Voting<'a>>, ::subxt::BasicError> - { - if self.client.metadata().storage_hash::()? - == [ - 230u8, 117u8, 88u8, 61u8, 179u8, 199u8, 172u8, 121u8, 157u8, 115u8, - 63u8, 88u8, 182u8, 7u8, 191u8, 41u8, 187u8, 5u8, 31u8, 240u8, 202u8, - 14u8, 21u8, 175u8, 39u8, 72u8, 113u8, 220u8, 251u8, 127u8, 30u8, 93u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Proposals so far."] - pub async fn proposal_count( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> - { - if self.client.metadata().storage_hash::()? - == [ - 132u8, 145u8, 78u8, 218u8, 51u8, 189u8, 55u8, 172u8, 143u8, 33u8, - 140u8, 99u8, 124u8, 208u8, 57u8, 232u8, 154u8, 110u8, 32u8, 142u8, - 24u8, 149u8, 109u8, 105u8, 30u8, 83u8, 39u8, 177u8, 127u8, 160u8, 34u8, - 70u8, - ] - { - let entry = ProposalCount; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The current members of the collective. This is stored sorted (just by value)."] - pub async fn members( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 136u8, 91u8, 140u8, 173u8, 238u8, 221u8, 4u8, 132u8, 238u8, 99u8, - 195u8, 142u8, 10u8, 35u8, 210u8, 227u8, 22u8, 72u8, 218u8, 222u8, - 227u8, 51u8, 55u8, 31u8, 252u8, 78u8, 195u8, 11u8, 195u8, 242u8, 171u8, - 75u8, - ] - { - let entry = Members; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The prime member that helps determine the default vote behavior in case of absentations."] - pub async fn prime( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option<::subxt::sp_core::crypto::AccountId32>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 70u8, 101u8, 20u8, 160u8, 173u8, 87u8, 190u8, 85u8, 60u8, 249u8, 144u8, - 77u8, 175u8, 195u8, 51u8, 196u8, 234u8, 62u8, 243u8, 199u8, 126u8, - 12u8, 88u8, 252u8, 1u8, 210u8, 65u8, 210u8, 33u8, 19u8, 222u8, 11u8, - ] - { - let entry = Prime; - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - } - pub mod membership { - use super::root_mod; - use super::runtime_types; - pub mod calls { - use super::root_mod; - use super::runtime_types; - type DispatchError = runtime_types::sp_runtime::DispatchError; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct AddMember { - pub who: ::subxt::sp_core::crypto::AccountId32, - } - impl ::subxt::Call for AddMember { - const PALLET: &'static str = "Membership"; - const FUNCTION: &'static str = "add_member"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct RemoveMember { - pub who: ::subxt::sp_core::crypto::AccountId32, - } - impl ::subxt::Call for RemoveMember { - const PALLET: &'static str = "Membership"; - const FUNCTION: &'static str = "remove_member"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct SwapMember { - pub remove: ::subxt::sp_core::crypto::AccountId32, - pub add: ::subxt::sp_core::crypto::AccountId32, - } - impl ::subxt::Call for SwapMember { - const PALLET: &'static str = "Membership"; - const FUNCTION: &'static str = "swap_member"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct ResetMembers { - pub members: ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>, - } - impl ::subxt::Call for ResetMembers { - const PALLET: &'static str = "Membership"; - const FUNCTION: &'static str = "reset_members"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct ChangeKey { - pub new: ::subxt::sp_core::crypto::AccountId32, - } - impl ::subxt::Call for ChangeKey { - const PALLET: &'static str = "Membership"; - const FUNCTION: &'static str = "change_key"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct SetPrime { - pub who: ::subxt::sp_core::crypto::AccountId32, - } - impl ::subxt::Call for SetPrime { - const PALLET: &'static str = "Membership"; - const FUNCTION: &'static str = "set_prime"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct ClearPrime; - impl ::subxt::Call for ClearPrime { - const PALLET: &'static str = "Membership"; - const FUNCTION: &'static str = "clear_prime"; - } - pub struct TransactionApi<'a, T: ::subxt::Config, X> { - client: &'a ::subxt::Client, - marker: ::core::marker::PhantomData, - } - impl<'a, T, X> TransactionApi<'a, T, X> - where - T: ::subxt::Config, - X: ::subxt::extrinsic::ExtrinsicParams, - { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { - client, - marker: ::core::marker::PhantomData, - } - } - #[doc = "Add a member `who` to the set."] - #[doc = ""] - #[doc = "May only be called from `T::AddOrigin`."] - pub fn add_member( - &self, - who: ::subxt::sp_core::crypto::AccountId32, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - AddMember, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 1u8, 149u8, 115u8, 222u8, 93u8, 9u8, 208u8, 58u8, 22u8, 148u8, 215u8, - 141u8, 204u8, 48u8, 107u8, 210u8, 202u8, 165u8, 43u8, 159u8, 45u8, - 161u8, 255u8, 127u8, 225u8, 100u8, 161u8, 195u8, 197u8, 206u8, 57u8, - 166u8, - ] - { - let call = AddMember { who }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Remove a member `who` from the set."] - #[doc = ""] - #[doc = "May only be called from `T::RemoveOrigin`."] - pub fn remove_member( - &self, - who: ::subxt::sp_core::crypto::AccountId32, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - RemoveMember, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 137u8, 249u8, 148u8, 139u8, 147u8, 47u8, 226u8, 228u8, 139u8, 219u8, - 109u8, 128u8, 254u8, 51u8, 227u8, 154u8, 105u8, 91u8, 229u8, 69u8, - 217u8, 241u8, 107u8, 229u8, 41u8, 202u8, 228u8, 227u8, 160u8, 162u8, - 45u8, 211u8, - ] - { - let call = RemoveMember { who }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Swap out one member `remove` for another `add`."] - #[doc = ""] - #[doc = "May only be called from `T::SwapOrigin`."] - #[doc = ""] - #[doc = "Prime membership is *not* passed from `remove` to `add`, if extant."] - pub fn swap_member( - &self, - remove: ::subxt::sp_core::crypto::AccountId32, - add: ::subxt::sp_core::crypto::AccountId32, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SwapMember, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 159u8, 62u8, 254u8, 117u8, 56u8, 185u8, 99u8, 29u8, 146u8, 210u8, 40u8, - 77u8, 169u8, 224u8, 215u8, 34u8, 106u8, 95u8, 204u8, 109u8, 72u8, 67u8, - 11u8, 183u8, 33u8, 84u8, 133u8, 4u8, 5u8, 13u8, 188u8, 123u8, - ] - { - let call = SwapMember { remove, add }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Change the membership to a new set, disregarding the existing membership. Be nice and"] - #[doc = "pass `members` pre-sorted."] - #[doc = ""] - #[doc = "May only be called from `T::ResetOrigin`."] - pub fn reset_members( - &self, - members: ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - ResetMembers, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 246u8, 84u8, 91u8, 191u8, 61u8, 245u8, 171u8, 80u8, 18u8, 120u8, 61u8, - 86u8, 23u8, 115u8, 161u8, 203u8, 128u8, 34u8, 166u8, 128u8, 33u8, 28u8, - 229u8, 81u8, 103u8, 217u8, 173u8, 151u8, 31u8, 118u8, 151u8, 217u8, - ] - { - let call = ResetMembers { members }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Swap out the sending member for some other key `new`."] - #[doc = ""] - #[doc = "May only be called from `Signed` origin of a current member."] - #[doc = ""] - #[doc = "Prime membership is passed from the origin account to `new`, if extant."] - pub fn change_key( - &self, - new: ::subxt::sp_core::crypto::AccountId32, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - ChangeKey, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 198u8, 93u8, 41u8, 52u8, 241u8, 11u8, 225u8, 82u8, 30u8, 114u8, 111u8, - 204u8, 13u8, 31u8, 34u8, 82u8, 171u8, 58u8, 180u8, 65u8, 3u8, 246u8, - 33u8, 167u8, 200u8, 23u8, 150u8, 235u8, 130u8, 172u8, 202u8, 216u8, - ] - { - let call = ChangeKey { new }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Set the prime member. Must be a current member."] - #[doc = ""] - #[doc = "May only be called from `T::PrimeOrigin`."] - pub fn set_prime( - &self, - who: ::subxt::sp_core::crypto::AccountId32, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - SetPrime, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 185u8, 53u8, 61u8, 154u8, 234u8, 77u8, 195u8, 126u8, 19u8, 39u8, 78u8, - 205u8, 109u8, 210u8, 137u8, 245u8, 128u8, 110u8, 2u8, 201u8, 20u8, - 153u8, 146u8, 177u8, 4u8, 144u8, 229u8, 125u8, 91u8, 131u8, 199u8, - 15u8, - ] - { - let call = SetPrime { who }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Remove the prime member if it exists."] - #[doc = ""] - #[doc = "May only be called from `T::PrimeOrigin`."] - pub fn clear_prime( - &self, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - ClearPrime, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 186u8, 182u8, 225u8, 90u8, 71u8, 124u8, 69u8, 100u8, 234u8, 25u8, 53u8, - 23u8, 182u8, 32u8, 176u8, 81u8, 54u8, 140u8, 235u8, 126u8, 247u8, 7u8, - 155u8, 62u8, 35u8, 135u8, 48u8, 61u8, 88u8, 160u8, 183u8, 72u8, - ] - { - let call = ClearPrime {}; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - pub type Event = runtime_types::pallet_membership::pallet::Event; - pub mod events { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "The given member was added; see the transaction for who."] - pub struct MemberAdded; - impl ::subxt::Event for MemberAdded { - const PALLET: &'static str = "Membership"; - const EVENT: &'static str = "MemberAdded"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "The given member was removed; see the transaction for who."] - pub struct MemberRemoved; - impl ::subxt::Event for MemberRemoved { - const PALLET: &'static str = "Membership"; - const EVENT: &'static str = "MemberRemoved"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "Two members were swapped; see the transaction for who."] - pub struct MembersSwapped; - impl ::subxt::Event for MembersSwapped { - const PALLET: &'static str = "Membership"; - const EVENT: &'static str = "MembersSwapped"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "The membership was reset; see the transaction for who the new set is."] - pub struct MembersReset; - impl ::subxt::Event for MembersReset { - const PALLET: &'static str = "Membership"; - const EVENT: &'static str = "MembersReset"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "One of the members' keys changed."] - pub struct KeyChanged; - impl ::subxt::Event for KeyChanged { - const PALLET: &'static str = "Membership"; - const EVENT: &'static str = "KeyChanged"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "Phantom member, never used."] - pub struct Dummy; - impl ::subxt::Event for Dummy { - const PALLET: &'static str = "Membership"; - const EVENT: &'static str = "Dummy"; - } - } - pub mod storage { - use super::runtime_types; - pub struct Members; - impl ::subxt::StorageEntry for Members { - const PALLET: &'static str = "Membership"; - const STORAGE: &'static str = "Members"; - type Value = ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct Prime; - impl ::subxt::StorageEntry for Prime { - const PALLET: &'static str = "Membership"; - const STORAGE: &'static str = "Prime"; - type Value = ::subxt::sp_core::crypto::AccountId32; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct StorageApi<'a, T: ::subxt::Config> { - client: &'a ::subxt::Client, - } - impl<'a, T: ::subxt::Config> StorageApi<'a, T> { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { client } - } - #[doc = " The current membership, stored as an ordered Vec."] - pub async fn members( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 136u8, 91u8, 140u8, 173u8, 238u8, 221u8, 4u8, 132u8, 238u8, 99u8, - 195u8, 142u8, 10u8, 35u8, 210u8, 227u8, 22u8, 72u8, 218u8, 222u8, - 227u8, 51u8, 55u8, 31u8, 252u8, 78u8, 195u8, 11u8, 195u8, 242u8, 171u8, - 75u8, - ] - { - let entry = Members; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The current prime member, if one exists."] - pub async fn prime( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option<::subxt::sp_core::crypto::AccountId32>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 70u8, 101u8, 20u8, 160u8, 173u8, 87u8, 190u8, 85u8, 60u8, 249u8, 144u8, - 77u8, 175u8, 195u8, 51u8, 196u8, 234u8, 62u8, 243u8, 199u8, 126u8, - 12u8, 88u8, 252u8, 1u8, 210u8, 65u8, 210u8, 33u8, 19u8, 222u8, 11u8, - ] - { - let entry = Prime; - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - } - pub mod utility { - use super::root_mod; - use super::runtime_types; - pub mod calls { - use super::root_mod; - use super::runtime_types; - type DispatchError = runtime_types::sp_runtime::DispatchError; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct Batch { - pub calls: ::std::vec::Vec, - } - impl ::subxt::Call for Batch { - const PALLET: &'static str = "Utility"; - const FUNCTION: &'static str = "batch"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct AsDerivative { - pub index: ::core::primitive::u16, - pub call: ::std::boxed::Box, - } - impl ::subxt::Call for AsDerivative { - const PALLET: &'static str = "Utility"; - const FUNCTION: &'static str = "as_derivative"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct BatchAll { - pub calls: ::std::vec::Vec, - } - impl ::subxt::Call for BatchAll { - const PALLET: &'static str = "Utility"; - const FUNCTION: &'static str = "batch_all"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct DispatchAs { - pub as_origin: ::std::boxed::Box, - pub call: ::std::boxed::Box, - } - impl ::subxt::Call for DispatchAs { - const PALLET: &'static str = "Utility"; - const FUNCTION: &'static str = "dispatch_as"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct ForceBatch { - pub calls: ::std::vec::Vec, - } - impl ::subxt::Call for ForceBatch { - const PALLET: &'static str = "Utility"; - const FUNCTION: &'static str = "force_batch"; - } - pub struct TransactionApi<'a, T: ::subxt::Config, X> { - client: &'a ::subxt::Client, - marker: ::core::marker::PhantomData, - } - impl<'a, T, X> TransactionApi<'a, T, X> - where - T: ::subxt::Config, - X: ::subxt::extrinsic::ExtrinsicParams, - { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { - client, - marker: ::core::marker::PhantomData, - } - } - #[doc = "Send a batch of dispatch calls."] - #[doc = ""] - #[doc = "May be called from any origin."] - #[doc = ""] - #[doc = "- `calls`: The calls to be dispatched from the same origin. The number of call must not"] - #[doc = " exceed the constant: `batched_calls_limit` (available in constant metadata)."] - #[doc = ""] - #[doc = "If origin is root then call are dispatch without checking origin filter. (This includes"] - #[doc = "bypassing `frame_system::Config::BaseCallFilter`)."] - #[doc = ""] - #[doc = "# "] - #[doc = "- Complexity: O(C) where C is the number of calls to be batched."] - #[doc = "# "] - #[doc = ""] - #[doc = "This will return `Ok` in all circumstances. To determine the success of the batch, an"] - #[doc = "event is deposited. If a call failed and the batch was interrupted, then the"] - #[doc = "`BatchInterrupted` event is deposited, along with the number of successful calls made"] - #[doc = "and the error of the failed call. If all were successful, then the `BatchCompleted`"] - #[doc = "event is deposited."] - pub fn batch( - &self, - calls: ::std::vec::Vec, - ) -> Result< - ::subxt::SubmittableExtrinsic<'a, T, X, Batch, DispatchError, root_mod::Event>, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 2u8, 176u8, 87u8, 184u8, 5u8, 237u8, 30u8, 222u8, 1u8, 60u8, 39u8, - 236u8, 29u8, 56u8, 243u8, 90u8, 119u8, 108u8, 67u8, 9u8, 160u8, 182u8, - 92u8, 68u8, 168u8, 9u8, 39u8, 31u8, 247u8, 37u8, 77u8, 224u8, - ] - { - let call = Batch { calls }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Send a call through an indexed pseudonym of the sender."] - #[doc = ""] - #[doc = "Filter from origin are passed along. The call will be dispatched with an origin which"] - #[doc = "use the same filter as the origin of this call."] - #[doc = ""] - #[doc = "NOTE: If you need to ensure that any account-based filtering is not honored (i.e."] - #[doc = "because you expect `proxy` to have been used prior in the call stack and you do not want"] - #[doc = "the call restrictions to apply to any sub-accounts), then use `as_multi_threshold_1`"] - #[doc = "in the Multisig pallet instead."] - #[doc = ""] - #[doc = "NOTE: Prior to version *12, this was called `as_limited_sub`."] - #[doc = ""] - #[doc = "The dispatch origin for this call must be _Signed_."] - pub fn as_derivative( - &self, - index: ::core::primitive::u16, - call: runtime_types::rococo_runtime::Call, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - AsDerivative, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 173u8, 238u8, 47u8, 118u8, 29u8, 201u8, 126u8, 189u8, 172u8, 159u8, - 37u8, 155u8, 153u8, 191u8, 86u8, 144u8, 64u8, 156u8, 120u8, 244u8, - 10u8, 154u8, 26u8, 89u8, 119u8, 181u8, 205u8, 214u8, 149u8, 177u8, - 248u8, 55u8, - ] - { - let call = AsDerivative { - index, - call: ::std::boxed::Box::new(call), - }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Send a batch of dispatch calls and atomically execute them."] - #[doc = "The whole transaction will rollback and fail if any of the calls failed."] - #[doc = ""] - #[doc = "May be called from any origin."] - #[doc = ""] - #[doc = "- `calls`: The calls to be dispatched from the same origin. The number of call must not"] - #[doc = " exceed the constant: `batched_calls_limit` (available in constant metadata)."] - #[doc = ""] - #[doc = "If origin is root then call are dispatch without checking origin filter. (This includes"] - #[doc = "bypassing `frame_system::Config::BaseCallFilter`)."] - #[doc = ""] - #[doc = "# "] - #[doc = "- Complexity: O(C) where C is the number of calls to be batched."] - #[doc = "# "] - pub fn batch_all( - &self, - calls: ::std::vec::Vec, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - BatchAll, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 179u8, 43u8, 224u8, 93u8, 67u8, 8u8, 84u8, 182u8, 54u8, 175u8, 46u8, - 126u8, 254u8, 67u8, 18u8, 191u8, 78u8, 231u8, 167u8, 128u8, 211u8, - 99u8, 40u8, 84u8, 232u8, 221u8, 133u8, 128u8, 198u8, 248u8, 94u8, - 140u8, - ] - { - let call = BatchAll { calls }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Dispatches a function call with a provided origin."] - #[doc = ""] - #[doc = "The dispatch origin for this call must be _Root_."] - #[doc = ""] - #[doc = "# "] - #[doc = "- O(1)."] - #[doc = "- Limited storage reads."] - #[doc = "- One DB write (event)."] - #[doc = "- Weight of derivative `call` execution + T::WeightInfo::dispatch_as()."] - #[doc = "# "] - pub fn dispatch_as( - &self, - as_origin: runtime_types::rococo_runtime::OriginCaller, - call: runtime_types::rococo_runtime::Call, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - DispatchAs, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 170u8, 154u8, 110u8, 35u8, 217u8, 69u8, 175u8, 79u8, 131u8, 41u8, 15u8, - 111u8, 156u8, 146u8, 242u8, 249u8, 217u8, 57u8, 167u8, 97u8, 93u8, - 169u8, 50u8, 214u8, 72u8, 111u8, 226u8, 23u8, 177u8, 186u8, 13u8, 38u8, - ] - { - let call = DispatchAs { - as_origin: ::std::boxed::Box::new(as_origin), - call: ::std::boxed::Box::new(call), - }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Send a batch of dispatch calls."] - #[doc = "Unlike `batch`, it allows errors and won't interrupt."] - #[doc = ""] - #[doc = "May be called from any origin."] - #[doc = ""] - #[doc = "- `calls`: The calls to be dispatched from the same origin. The number of call must not"] - #[doc = " exceed the constant: `batched_calls_limit` (available in constant metadata)."] - #[doc = ""] - #[doc = "If origin is root then call are dispatch without checking origin filter. (This includes"] - #[doc = "bypassing `frame_system::Config::BaseCallFilter`)."] - #[doc = ""] - #[doc = "# "] - #[doc = "- Complexity: O(C) where C is the number of calls to be batched."] - #[doc = "# "] - pub fn force_batch( - &self, - calls: ::std::vec::Vec, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - ForceBatch, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 236u8, 124u8, 176u8, 111u8, 53u8, 194u8, 175u8, 226u8, 3u8, 221u8, - 56u8, 196u8, 137u8, 21u8, 196u8, 127u8, 166u8, 15u8, 227u8, 72u8, 22u8, - 42u8, 212u8, 0u8, 14u8, 179u8, 65u8, 190u8, 134u8, 235u8, 93u8, 203u8, - ] - { - let call = ForceBatch { calls }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - pub type Event = runtime_types::pallet_utility::pallet::Event; - pub mod events { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "Batch of dispatches did not complete fully. Index of first failing dispatch given, as"] - #[doc = "well as the error."] - pub struct BatchInterrupted { - pub index: ::core::primitive::u32, - pub error: runtime_types::sp_runtime::DispatchError, - } - impl ::subxt::Event for BatchInterrupted { - const PALLET: &'static str = "Utility"; - const EVENT: &'static str = "BatchInterrupted"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "Batch of dispatches completed fully with no error."] - pub struct BatchCompleted; - impl ::subxt::Event for BatchCompleted { - const PALLET: &'static str = "Utility"; - const EVENT: &'static str = "BatchCompleted"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "Batch of dispatches completed but has errors."] - pub struct BatchCompletedWithErrors; - impl ::subxt::Event for BatchCompletedWithErrors { - const PALLET: &'static str = "Utility"; - const EVENT: &'static str = "BatchCompletedWithErrors"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "A single item within a Batch of dispatches has completed with no error."] - pub struct ItemCompleted; - impl ::subxt::Event for ItemCompleted { - const PALLET: &'static str = "Utility"; - const EVENT: &'static str = "ItemCompleted"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "A single item within a Batch of dispatches has completed with error."] - pub struct ItemFailed { - pub error: runtime_types::sp_runtime::DispatchError, - } - impl ::subxt::Event for ItemFailed { - const PALLET: &'static str = "Utility"; - const EVENT: &'static str = "ItemFailed"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "A call was dispatched."] - pub struct DispatchedAs { - pub result: ::core::result::Result<(), runtime_types::sp_runtime::DispatchError>, - } - impl ::subxt::Event for DispatchedAs { - const PALLET: &'static str = "Utility"; - const EVENT: &'static str = "DispatchedAs"; - } - } - pub mod constants { - use super::runtime_types; - pub struct ConstantsApi<'a, T: ::subxt::Config> { - client: &'a ::subxt::Client, - } - impl<'a, T: ::subxt::Config> ConstantsApi<'a, T> { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { client } - } - #[doc = " The limit on the number of batched calls."] - pub fn batched_calls_limit( - &self, - ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> - { - if self - .client - .metadata() - .constant_hash("Utility", "batched_calls_limit")? - == [ - 230u8, 161u8, 6u8, 191u8, 162u8, 108u8, 149u8, 245u8, 68u8, 101u8, - 120u8, 129u8, 140u8, 51u8, 77u8, 97u8, 30u8, 155u8, 115u8, 70u8, 72u8, - 235u8, 251u8, 192u8, 5u8, 8u8, 188u8, 72u8, 132u8, 227u8, 44u8, 2u8, - ] - { - let pallet = self.client.metadata().pallet("Utility")?; - let constant = pallet.constant("batched_calls_limit")?; - let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; - Ok(value) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - } - pub mod proxy { - use super::root_mod; - use super::runtime_types; - pub mod calls { - use super::root_mod; - use super::runtime_types; - type DispatchError = runtime_types::sp_runtime::DispatchError; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct Proxy { - pub real: ::subxt::sp_core::crypto::AccountId32, - pub force_proxy_type: - ::core::option::Option, - pub call: ::std::boxed::Box, - } - impl ::subxt::Call for Proxy { - const PALLET: &'static str = "Proxy"; - const FUNCTION: &'static str = "proxy"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct AddProxy { - pub delegate: ::subxt::sp_core::crypto::AccountId32, - pub proxy_type: runtime_types::rococo_runtime::ProxyType, - pub delay: ::core::primitive::u32, - } - impl ::subxt::Call for AddProxy { - const PALLET: &'static str = "Proxy"; - const FUNCTION: &'static str = "add_proxy"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct RemoveProxy { - pub delegate: ::subxt::sp_core::crypto::AccountId32, - pub proxy_type: runtime_types::rococo_runtime::ProxyType, - pub delay: ::core::primitive::u32, - } - impl ::subxt::Call for RemoveProxy { - const PALLET: &'static str = "Proxy"; - const FUNCTION: &'static str = "remove_proxy"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct RemoveProxies; - impl ::subxt::Call for RemoveProxies { - const PALLET: &'static str = "Proxy"; - const FUNCTION: &'static str = "remove_proxies"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct Anonymous { - pub proxy_type: runtime_types::rococo_runtime::ProxyType, - pub delay: ::core::primitive::u32, - pub index: ::core::primitive::u16, - } - impl ::subxt::Call for Anonymous { - const PALLET: &'static str = "Proxy"; - const FUNCTION: &'static str = "anonymous"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct KillAnonymous { - pub spawner: ::subxt::sp_core::crypto::AccountId32, - pub proxy_type: runtime_types::rococo_runtime::ProxyType, - pub index: ::core::primitive::u16, - #[codec(compact)] - pub height: ::core::primitive::u32, - #[codec(compact)] - pub ext_index: ::core::primitive::u32, - } - impl ::subxt::Call for KillAnonymous { - const PALLET: &'static str = "Proxy"; - const FUNCTION: &'static str = "kill_anonymous"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct Announce { - pub real: ::subxt::sp_core::crypto::AccountId32, - pub call_hash: ::subxt::sp_core::H256, - } - impl ::subxt::Call for Announce { - const PALLET: &'static str = "Proxy"; - const FUNCTION: &'static str = "announce"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct RemoveAnnouncement { - pub real: ::subxt::sp_core::crypto::AccountId32, - pub call_hash: ::subxt::sp_core::H256, - } - impl ::subxt::Call for RemoveAnnouncement { - const PALLET: &'static str = "Proxy"; - const FUNCTION: &'static str = "remove_announcement"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct RejectAnnouncement { - pub delegate: ::subxt::sp_core::crypto::AccountId32, - pub call_hash: ::subxt::sp_core::H256, - } - impl ::subxt::Call for RejectAnnouncement { - const PALLET: &'static str = "Proxy"; - const FUNCTION: &'static str = "reject_announcement"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct ProxyAnnounced { - pub delegate: ::subxt::sp_core::crypto::AccountId32, - pub real: ::subxt::sp_core::crypto::AccountId32, - pub force_proxy_type: - ::core::option::Option, - pub call: ::std::boxed::Box, - } - impl ::subxt::Call for ProxyAnnounced { - const PALLET: &'static str = "Proxy"; - const FUNCTION: &'static str = "proxy_announced"; - } - pub struct TransactionApi<'a, T: ::subxt::Config, X> { - client: &'a ::subxt::Client, - marker: ::core::marker::PhantomData, - } - impl<'a, T, X> TransactionApi<'a, T, X> - where - T: ::subxt::Config, - X: ::subxt::extrinsic::ExtrinsicParams, - { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { - client, - marker: ::core::marker::PhantomData, - } - } - #[doc = "Dispatch the given `call` from an account that the sender is authorised for through"] - #[doc = "`add_proxy`."] - #[doc = ""] - #[doc = "Removes any corresponding announcement(s)."] - #[doc = ""] - #[doc = "The dispatch origin for this call must be _Signed_."] - #[doc = ""] - #[doc = "Parameters:"] - #[doc = "- `real`: The account that the proxy will make a call on behalf of."] - #[doc = "- `force_proxy_type`: Specify the exact proxy type to be used and checked for this call."] - #[doc = "- `call`: The call to be made by the `real` account."] - #[doc = ""] - #[doc = "# "] - #[doc = "Weight is a function of the number of proxies the user has (P)."] - #[doc = "# "] - pub fn proxy( - &self, - real: ::subxt::sp_core::crypto::AccountId32, - force_proxy_type: ::core::option::Option< - runtime_types::rococo_runtime::ProxyType, - >, - call: runtime_types::rococo_runtime::Call, - ) -> Result< - ::subxt::SubmittableExtrinsic<'a, T, X, Proxy, DispatchError, root_mod::Event>, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 69u8, 48u8, 246u8, 104u8, 222u8, 230u8, 12u8, 24u8, 206u8, 11u8, 36u8, - 34u8, 34u8, 181u8, 165u8, 141u8, 26u8, 66u8, 25u8, 203u8, 105u8, 119u8, - 130u8, 207u8, 155u8, 248u8, 122u8, 232u8, 48u8, 3u8, 151u8, 136u8, - ] - { - let call = Proxy { - real, - force_proxy_type, - call: ::std::boxed::Box::new(call), - }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Register a proxy account for the sender that is able to make calls on its behalf."] - #[doc = ""] - #[doc = "The dispatch origin for this call must be _Signed_."] - #[doc = ""] - #[doc = "Parameters:"] - #[doc = "- `proxy`: The account that the `caller` would like to make a proxy."] - #[doc = "- `proxy_type`: The permissions allowed for this proxy account."] - #[doc = "- `delay`: The announcement period required of the initial proxy. Will generally be"] - #[doc = "zero."] - #[doc = ""] - #[doc = "# "] - #[doc = "Weight is a function of the number of proxies the user has (P)."] - #[doc = "# "] - pub fn add_proxy( - &self, - delegate: ::subxt::sp_core::crypto::AccountId32, - proxy_type: runtime_types::rococo_runtime::ProxyType, - delay: ::core::primitive::u32, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - AddProxy, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 147u8, 160u8, 155u8, 162u8, 94u8, 114u8, 60u8, 178u8, 78u8, 235u8, 9u8, - 249u8, 180u8, 152u8, 73u8, 248u8, 238u8, 155u8, 114u8, 32u8, 247u8, - 146u8, 16u8, 94u8, 135u8, 118u8, 13u8, 77u8, 78u8, 69u8, 200u8, 251u8, - ] - { - let call = AddProxy { - delegate, - proxy_type, - delay, - }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Unregister a proxy account for the sender."] - #[doc = ""] - #[doc = "The dispatch origin for this call must be _Signed_."] - #[doc = ""] - #[doc = "Parameters:"] - #[doc = "- `proxy`: The account that the `caller` would like to remove as a proxy."] - #[doc = "- `proxy_type`: The permissions currently enabled for the removed proxy account."] - #[doc = ""] - #[doc = "# "] - #[doc = "Weight is a function of the number of proxies the user has (P)."] - #[doc = "# "] - pub fn remove_proxy( - &self, - delegate: ::subxt::sp_core::crypto::AccountId32, - proxy_type: runtime_types::rococo_runtime::ProxyType, - delay: ::core::primitive::u32, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - RemoveProxy, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 76u8, 45u8, 70u8, 255u8, 181u8, 17u8, 146u8, 110u8, 26u8, 44u8, 191u8, - 48u8, 244u8, 61u8, 163u8, 235u8, 202u8, 184u8, 160u8, 156u8, 130u8, - 47u8, 35u8, 206u8, 12u8, 103u8, 25u8, 27u8, 129u8, 119u8, 162u8, 157u8, - ] - { - let call = RemoveProxy { - delegate, - proxy_type, - delay, - }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Unregister all proxy accounts for the sender."] - #[doc = ""] - #[doc = "The dispatch origin for this call must be _Signed_."] - #[doc = ""] - #[doc = "WARNING: This may be called on accounts created by `anonymous`, however if done, then"] - #[doc = "the unreserved fees will be inaccessible. **All access to this account will be lost.**"] - #[doc = ""] - #[doc = "# "] - #[doc = "Weight is a function of the number of proxies the user has (P)."] - #[doc = "# "] - pub fn remove_proxies( - &self, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - RemoveProxies, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 15u8, 237u8, 27u8, 166u8, 254u8, 218u8, 92u8, 5u8, 213u8, 239u8, 99u8, - 59u8, 1u8, 26u8, 73u8, 252u8, 81u8, 94u8, 214u8, 227u8, 169u8, 58u8, - 40u8, 253u8, 187u8, 225u8, 192u8, 26u8, 19u8, 23u8, 121u8, 129u8, - ] - { - let call = RemoveProxies {}; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Spawn a fresh new account that is guaranteed to be otherwise inaccessible, and"] - #[doc = "initialize it with a proxy of `proxy_type` for `origin` sender."] - #[doc = ""] - #[doc = "Requires a `Signed` origin."] - #[doc = ""] - #[doc = "- `proxy_type`: The type of the proxy that the sender will be registered as over the"] - #[doc = "new account. This will almost always be the most permissive `ProxyType` possible to"] - #[doc = "allow for maximum flexibility."] - #[doc = "- `index`: A disambiguation index, in case this is called multiple times in the same"] - #[doc = "transaction (e.g. with `utility::batch`). Unless you're using `batch` you probably just"] - #[doc = "want to use `0`."] - #[doc = "- `delay`: The announcement period required of the initial proxy. Will generally be"] - #[doc = "zero."] - #[doc = ""] - #[doc = "Fails with `Duplicate` if this has already been called in this transaction, from the"] - #[doc = "same sender, with the same parameters."] - #[doc = ""] - #[doc = "Fails if there are insufficient funds to pay for deposit."] - #[doc = ""] - #[doc = "# "] - #[doc = "Weight is a function of the number of proxies the user has (P)."] - #[doc = "# "] - #[doc = "TODO: Might be over counting 1 read"] - pub fn anonymous( - &self, - proxy_type: runtime_types::rococo_runtime::ProxyType, - delay: ::core::primitive::u32, - index: ::core::primitive::u16, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - Anonymous, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 196u8, 87u8, 202u8, 253u8, 227u8, 15u8, 4u8, 65u8, 86u8, 235u8, 205u8, - 19u8, 248u8, 108u8, 61u8, 206u8, 108u8, 178u8, 123u8, 154u8, 200u8, - 189u8, 124u8, 10u8, 251u8, 86u8, 5u8, 21u8, 172u8, 201u8, 4u8, 176u8, - ] - { - let call = Anonymous { - proxy_type, - delay, - index, - }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Removes a previously spawned anonymous proxy."] - #[doc = ""] - #[doc = "WARNING: **All access to this account will be lost.** Any funds held in it will be"] - #[doc = "inaccessible."] - #[doc = ""] - #[doc = "Requires a `Signed` origin, and the sender account must have been created by a call to"] - #[doc = "`anonymous` with corresponding parameters."] - #[doc = ""] - #[doc = "- `spawner`: The account that originally called `anonymous` to create this account."] - #[doc = "- `index`: The disambiguation index originally passed to `anonymous`. Probably `0`."] - #[doc = "- `proxy_type`: The proxy type originally passed to `anonymous`."] - #[doc = "- `height`: The height of the chain when the call to `anonymous` was processed."] - #[doc = "- `ext_index`: The extrinsic index in which the call to `anonymous` was processed."] - #[doc = ""] - #[doc = "Fails with `NoPermission` in case the caller is not a previously created anonymous"] - #[doc = "account whose `anonymous` call has corresponding parameters."] - #[doc = ""] - #[doc = "# "] - #[doc = "Weight is a function of the number of proxies the user has (P)."] - #[doc = "# "] - pub fn kill_anonymous( - &self, - spawner: ::subxt::sp_core::crypto::AccountId32, - proxy_type: runtime_types::rococo_runtime::ProxyType, - index: ::core::primitive::u16, - height: ::core::primitive::u32, - ext_index: ::core::primitive::u32, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - KillAnonymous, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 165u8, 240u8, 108u8, 74u8, 96u8, 200u8, 186u8, 64u8, 56u8, 175u8, - 123u8, 106u8, 122u8, 165u8, 200u8, 20u8, 93u8, 30u8, 93u8, 210u8, 89u8, - 108u8, 13u8, 180u8, 143u8, 218u8, 157u8, 135u8, 111u8, 9u8, 42u8, 18u8, - ] - { - let call = KillAnonymous { - spawner, - proxy_type, - index, - height, - ext_index, - }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Publish the hash of a proxy-call that will be made in the future."] - #[doc = ""] - #[doc = "This must be called some number of blocks before the corresponding `proxy` is attempted"] - #[doc = "if the delay associated with the proxy relationship is greater than zero."] - #[doc = ""] - #[doc = "No more than `MaxPending` announcements may be made at any one time."] - #[doc = ""] - #[doc = "This will take a deposit of `AnnouncementDepositFactor` as well as"] - #[doc = "`AnnouncementDepositBase` if there are no other pending announcements."] - #[doc = ""] - #[doc = "The dispatch origin for this call must be _Signed_ and a proxy of `real`."] - #[doc = ""] - #[doc = "Parameters:"] - #[doc = "- `real`: The account that the proxy will make a call on behalf of."] - #[doc = "- `call_hash`: The hash of the call to be made by the `real` account."] - #[doc = ""] - #[doc = "# "] - #[doc = "Weight is a function of:"] - #[doc = "- A: the number of announcements made."] - #[doc = "- P: the number of proxies the user has."] - #[doc = "# "] - pub fn announce( - &self, - real: ::subxt::sp_core::crypto::AccountId32, - call_hash: ::subxt::sp_core::H256, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - Announce, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 202u8, 10u8, 107u8, 1u8, 212u8, 97u8, 111u8, 209u8, 102u8, 157u8, - 109u8, 231u8, 123u8, 131u8, 173u8, 69u8, 79u8, 143u8, 148u8, 23u8, - 123u8, 22u8, 83u8, 52u8, 86u8, 220u8, 200u8, 11u8, 60u8, 169u8, 71u8, - 183u8, - ] - { - let call = Announce { real, call_hash }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Remove a given announcement."] - #[doc = ""] - #[doc = "May be called by a proxy account to remove a call they previously announced and return"] - #[doc = "the deposit."] - #[doc = ""] - #[doc = "The dispatch origin for this call must be _Signed_."] - #[doc = ""] - #[doc = "Parameters:"] - #[doc = "- `real`: The account that the proxy will make a call on behalf of."] - #[doc = "- `call_hash`: The hash of the call to be made by the `real` account."] - #[doc = ""] - #[doc = "# "] - #[doc = "Weight is a function of:"] - #[doc = "- A: the number of announcements made."] - #[doc = "- P: the number of proxies the user has."] - #[doc = "# "] - pub fn remove_announcement( - &self, - real: ::subxt::sp_core::crypto::AccountId32, - call_hash: ::subxt::sp_core::H256, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - RemoveAnnouncement, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 8u8, 157u8, 190u8, 96u8, 209u8, 28u8, 73u8, 31u8, 19u8, 221u8, 252u8, - 200u8, 180u8, 69u8, 83u8, 239u8, 162u8, 135u8, 102u8, 157u8, 149u8, - 107u8, 192u8, 41u8, 196u8, 83u8, 133u8, 107u8, 82u8, 215u8, 50u8, 8u8, - ] - { - let call = RemoveAnnouncement { real, call_hash }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Remove the given announcement of a delegate."] - #[doc = ""] - #[doc = "May be called by a target (proxied) account to remove a call that one of their delegates"] - #[doc = "(`delegate`) has announced they want to execute. The deposit is returned."] - #[doc = ""] - #[doc = "The dispatch origin for this call must be _Signed_."] - #[doc = ""] - #[doc = "Parameters:"] - #[doc = "- `delegate`: The account that previously announced the call."] - #[doc = "- `call_hash`: The hash of the call to be made."] - #[doc = ""] - #[doc = "# "] - #[doc = "Weight is a function of:"] - #[doc = "- A: the number of announcements made."] - #[doc = "- P: the number of proxies the user has."] - #[doc = "# "] - pub fn reject_announcement( - &self, - delegate: ::subxt::sp_core::crypto::AccountId32, - call_hash: ::subxt::sp_core::H256, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - RejectAnnouncement, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 218u8, 26u8, 11u8, 238u8, 82u8, 240u8, 191u8, 46u8, 107u8, 197u8, 58u8, - 160u8, 162u8, 152u8, 12u8, 188u8, 194u8, 185u8, 27u8, 75u8, 192u8, - 236u8, 32u8, 36u8, 131u8, 179u8, 99u8, 33u8, 14u8, 37u8, 163u8, 105u8, - ] - { - let call = RejectAnnouncement { - delegate, - call_hash, - }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Dispatch the given `call` from an account that the sender is authorized for through"] - #[doc = "`add_proxy`."] - #[doc = ""] - #[doc = "Removes any corresponding announcement(s)."] - #[doc = ""] - #[doc = "The dispatch origin for this call must be _Signed_."] - #[doc = ""] - #[doc = "Parameters:"] - #[doc = "- `real`: The account that the proxy will make a call on behalf of."] - #[doc = "- `force_proxy_type`: Specify the exact proxy type to be used and checked for this call."] - #[doc = "- `call`: The call to be made by the `real` account."] - #[doc = ""] - #[doc = "# "] - #[doc = "Weight is a function of:"] - #[doc = "- A: the number of announcements made."] - #[doc = "- P: the number of proxies the user has."] - #[doc = "# "] - pub fn proxy_announced( - &self, - delegate: ::subxt::sp_core::crypto::AccountId32, - real: ::subxt::sp_core::crypto::AccountId32, - force_proxy_type: ::core::option::Option< - runtime_types::rococo_runtime::ProxyType, - >, - call: runtime_types::rococo_runtime::Call, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - ProxyAnnounced, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 160u8, 150u8, 210u8, 154u8, 192u8, 19u8, 14u8, 155u8, 87u8, 97u8, - 248u8, 73u8, 94u8, 38u8, 68u8, 57u8, 216u8, 217u8, 104u8, 26u8, 96u8, - 108u8, 205u8, 25u8, 38u8, 148u8, 4u8, 185u8, 157u8, 183u8, 130u8, 18u8, - ] - { - let call = ProxyAnnounced { - delegate, - real, - force_proxy_type, - call: ::std::boxed::Box::new(call), - }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - pub type Event = runtime_types::pallet_proxy::pallet::Event; - pub mod events { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "A proxy was executed correctly, with the given."] - pub struct ProxyExecuted { - pub result: ::core::result::Result<(), runtime_types::sp_runtime::DispatchError>, - } - impl ::subxt::Event for ProxyExecuted { - const PALLET: &'static str = "Proxy"; - const EVENT: &'static str = "ProxyExecuted"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "Anonymous account has been created by new proxy with given"] - #[doc = "disambiguation index and proxy type."] - pub struct AnonymousCreated { - pub anonymous: ::subxt::sp_core::crypto::AccountId32, - pub who: ::subxt::sp_core::crypto::AccountId32, - pub proxy_type: runtime_types::rococo_runtime::ProxyType, - pub disambiguation_index: ::core::primitive::u16, - } - impl ::subxt::Event for AnonymousCreated { - const PALLET: &'static str = "Proxy"; - const EVENT: &'static str = "AnonymousCreated"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "An announcement was placed to make a call in the future."] - pub struct Announced { - pub real: ::subxt::sp_core::crypto::AccountId32, - pub proxy: ::subxt::sp_core::crypto::AccountId32, - pub call_hash: ::subxt::sp_core::H256, - } - impl ::subxt::Event for Announced { - const PALLET: &'static str = "Proxy"; - const EVENT: &'static str = "Announced"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "A proxy was added."] - pub struct ProxyAdded { - pub delegator: ::subxt::sp_core::crypto::AccountId32, - pub delegatee: ::subxt::sp_core::crypto::AccountId32, - pub proxy_type: runtime_types::rococo_runtime::ProxyType, - pub delay: ::core::primitive::u32, - } - impl ::subxt::Event for ProxyAdded { - const PALLET: &'static str = "Proxy"; - const EVENT: &'static str = "ProxyAdded"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "A proxy was removed."] - pub struct ProxyRemoved { - pub delegator: ::subxt::sp_core::crypto::AccountId32, - pub delegatee: ::subxt::sp_core::crypto::AccountId32, - pub proxy_type: runtime_types::rococo_runtime::ProxyType, - pub delay: ::core::primitive::u32, - } - impl ::subxt::Event for ProxyRemoved { - const PALLET: &'static str = "Proxy"; - const EVENT: &'static str = "ProxyRemoved"; - } - } - pub mod storage { - use super::runtime_types; - pub struct Proxies<'a>(pub &'a ::subxt::sp_core::crypto::AccountId32); - impl ::subxt::StorageEntry for Proxies<'_> { - const PALLET: &'static str = "Proxy"; - const STORAGE: &'static str = "Proxies"; - type Value = ( - runtime_types::frame_support::storage::bounded_vec::BoundedVec< - runtime_types::pallet_proxy::ProxyDefinition< - ::subxt::sp_core::crypto::AccountId32, - runtime_types::rococo_runtime::ProxyType, - ::core::primitive::u32, - >, - >, - ::core::primitive::u128, - ); - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Twox64Concat, - )]) - } - } - pub struct Announcements<'a>(pub &'a ::subxt::sp_core::crypto::AccountId32); - impl ::subxt::StorageEntry for Announcements<'_> { - const PALLET: &'static str = "Proxy"; - const STORAGE: &'static str = "Announcements"; - type Value = ( - runtime_types::frame_support::storage::bounded_vec::BoundedVec< - runtime_types::pallet_proxy::Announcement< - ::subxt::sp_core::crypto::AccountId32, - ::subxt::sp_core::H256, - ::core::primitive::u32, - >, - >, - ::core::primitive::u128, - ); - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Twox64Concat, - )]) - } - } - pub struct StorageApi<'a, T: ::subxt::Config> { - client: &'a ::subxt::Client, - } - impl<'a, T: ::subxt::Config> StorageApi<'a, T> { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { client } - } - #[doc = " The set of account proxies. Maps the account which has delegated to the accounts"] - #[doc = " which are being delegated to, together with the amount held on deposit."] - pub async fn proxies( - &self, - _0: &::subxt::sp_core::crypto::AccountId32, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ( - runtime_types::frame_support::storage::bounded_vec::BoundedVec< - runtime_types::pallet_proxy::ProxyDefinition< - ::subxt::sp_core::crypto::AccountId32, - runtime_types::rococo_runtime::ProxyType, - ::core::primitive::u32, - >, - >, - ::core::primitive::u128, - ), - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 6u8, 154u8, 57u8, 61u8, 169u8, 15u8, 54u8, 128u8, 62u8, 67u8, 181u8, - 251u8, 172u8, 194u8, 97u8, 9u8, 141u8, 230u8, 243u8, 33u8, 25u8, 29u8, - 46u8, 136u8, 208u8, 192u8, 147u8, 168u8, 57u8, 18u8, 155u8, 80u8, - ] - { - let entry = Proxies(_0); - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The set of account proxies. Maps the account which has delegated to the accounts"] - #[doc = " which are being delegated to, together with the amount held on deposit."] - pub async fn proxies_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result<::subxt::KeyIter<'a, T, Proxies<'a>>, ::subxt::BasicError> - { - if self.client.metadata().storage_hash::()? - == [ - 6u8, 154u8, 57u8, 61u8, 169u8, 15u8, 54u8, 128u8, 62u8, 67u8, 181u8, - 251u8, 172u8, 194u8, 97u8, 9u8, 141u8, 230u8, 243u8, 33u8, 25u8, 29u8, - 46u8, 136u8, 208u8, 192u8, 147u8, 168u8, 57u8, 18u8, 155u8, 80u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The announcements made by the proxy (key)."] - pub async fn announcements( - &self, - _0: &::subxt::sp_core::crypto::AccountId32, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ( - runtime_types::frame_support::storage::bounded_vec::BoundedVec< - runtime_types::pallet_proxy::Announcement< - ::subxt::sp_core::crypto::AccountId32, - ::subxt::sp_core::H256, - ::core::primitive::u32, - >, - >, - ::core::primitive::u128, - ), - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 87u8, 74u8, 65u8, 120u8, 190u8, 234u8, 159u8, 168u8, 155u8, 253u8, - 183u8, 229u8, 28u8, 118u8, 20u8, 120u8, 27u8, 10u8, 203u8, 236u8, - 174u8, 40u8, 89u8, 216u8, 217u8, 81u8, 135u8, 67u8, 245u8, 226u8, 10u8, - 17u8, - ] - { - let entry = Announcements(_0); - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The announcements made by the proxy (key)."] - pub async fn announcements_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::subxt::KeyIter<'a, T, Announcements<'a>>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 87u8, 74u8, 65u8, 120u8, 190u8, 234u8, 159u8, 168u8, 155u8, 253u8, - 183u8, 229u8, 28u8, 118u8, 20u8, 120u8, 27u8, 10u8, 203u8, 236u8, - 174u8, 40u8, 89u8, 216u8, 217u8, 81u8, 135u8, 67u8, 245u8, 226u8, 10u8, - 17u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - pub mod constants { - use super::runtime_types; - pub struct ConstantsApi<'a, T: ::subxt::Config> { - client: &'a ::subxt::Client, - } - impl<'a, T: ::subxt::Config> ConstantsApi<'a, T> { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { client } - } - #[doc = " The base amount of currency needed to reserve for creating a proxy."] - #[doc = ""] - #[doc = " This is held for an additional storage item whose value size is"] - #[doc = " `sizeof(Balance)` bytes and whose key size is `sizeof(AccountId)` bytes."] - pub fn proxy_deposit_base( - &self, - ) -> ::core::result::Result<::core::primitive::u128, ::subxt::BasicError> - { - if self - .client - .metadata() - .constant_hash("Proxy", "ProxyDepositBase")? - == [ - 126u8, 107u8, 187u8, 250u8, 199u8, 131u8, 62u8, 248u8, 122u8, 95u8, - 138u8, 186u8, 61u8, 129u8, 237u8, 236u8, 225u8, 91u8, 255u8, 54u8, - 183u8, 34u8, 103u8, 35u8, 145u8, 9u8, 1u8, 71u8, 124u8, 220u8, 147u8, - 88u8, - ] - { - let pallet = self.client.metadata().pallet("Proxy")?; - let constant = pallet.constant("ProxyDepositBase")?; - let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; - Ok(value) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The amount of currency needed per proxy added."] - #[doc = ""] - #[doc = " This is held for adding 32 bytes plus an instance of `ProxyType` more into a"] - #[doc = " pre-existing storage value. Thus, when configuring `ProxyDepositFactor` one should take"] - #[doc = " into account `32 + proxy_type.encode().len()` bytes of data."] - pub fn proxy_deposit_factor( - &self, - ) -> ::core::result::Result<::core::primitive::u128, ::subxt::BasicError> - { - if self - .client - .metadata() - .constant_hash("Proxy", "ProxyDepositFactor")? - == [ - 241u8, 48u8, 216u8, 37u8, 136u8, 147u8, 59u8, 234u8, 27u8, 8u8, 138u8, - 46u8, 158u8, 190u8, 141u8, 172u8, 176u8, 158u8, 46u8, 109u8, 188u8, - 240u8, 122u8, 122u8, 83u8, 127u8, 29u8, 89u8, 173u8, 110u8, 7u8, 5u8, - ] - { - let pallet = self.client.metadata().pallet("Proxy")?; - let constant = pallet.constant("ProxyDepositFactor")?; - let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; - Ok(value) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The maximum amount of proxies allowed for a single account."] - pub fn max_proxies( - &self, - ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> - { - if self - .client - .metadata() - .constant_hash("Proxy", "MaxProxies")? - == [ - 249u8, 153u8, 224u8, 128u8, 161u8, 3u8, 39u8, 192u8, 120u8, 150u8, - 184u8, 92u8, 225u8, 222u8, 76u8, 172u8, 131u8, 87u8, 231u8, 128u8, 5u8, - 62u8, 116u8, 112u8, 103u8, 4u8, 39u8, 163u8, 71u8, 97u8, 221u8, 19u8, - ] - { - let pallet = self.client.metadata().pallet("Proxy")?; - let constant = pallet.constant("MaxProxies")?; - let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; - Ok(value) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The maximum amount of time-delayed announcements that are allowed to be pending."] - pub fn max_pending( - &self, - ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> - { - if self - .client - .metadata() - .constant_hash("Proxy", "MaxPending")? - == [ - 88u8, 148u8, 146u8, 152u8, 151u8, 208u8, 255u8, 193u8, 239u8, 105u8, - 197u8, 153u8, 151u8, 18u8, 86u8, 13u8, 242u8, 242u8, 59u8, 92u8, 107u8, - 203u8, 102u8, 69u8, 147u8, 147u8, 37u8, 83u8, 237u8, 9u8, 114u8, 196u8, - ] - { - let pallet = self.client.metadata().pallet("Proxy")?; - let constant = pallet.constant("MaxPending")?; - let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; - Ok(value) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The base amount of currency needed to reserve for creating an announcement."] - #[doc = ""] - #[doc = " This is held when a new storage item holding a `Balance` is created (typically 16"] - #[doc = " bytes)."] - pub fn announcement_deposit_base( - &self, - ) -> ::core::result::Result<::core::primitive::u128, ::subxt::BasicError> - { - if self - .client - .metadata() - .constant_hash("Proxy", "AnnouncementDepositBase")? - == [ - 190u8, 15u8, 203u8, 82u8, 114u8, 33u8, 225u8, 62u8, 89u8, 39u8, 218u8, - 69u8, 217u8, 120u8, 4u8, 235u8, 209u8, 97u8, 119u8, 86u8, 157u8, 178u8, - 64u8, 170u8, 102u8, 187u8, 251u8, 125u8, 20u8, 181u8, 6u8, 102u8, - ] - { - let pallet = self.client.metadata().pallet("Proxy")?; - let constant = pallet.constant("AnnouncementDepositBase")?; - let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; - Ok(value) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The amount of currency needed per announcement made."] - #[doc = ""] - #[doc = " This is held for adding an `AccountId`, `Hash` and `BlockNumber` (typically 68 bytes)"] - #[doc = " into a pre-existing storage value."] - pub fn announcement_deposit_factor( - &self, - ) -> ::core::result::Result<::core::primitive::u128, ::subxt::BasicError> - { - if self - .client - .metadata() - .constant_hash("Proxy", "AnnouncementDepositFactor")? - == [ - 240u8, 165u8, 11u8, 46u8, 237u8, 248u8, 133u8, 48u8, 240u8, 235u8, - 26u8, 59u8, 42u8, 72u8, 18u8, 252u8, 167u8, 16u8, 15u8, 168u8, 197u8, - 45u8, 57u8, 49u8, 173u8, 31u8, 180u8, 27u8, 64u8, 94u8, 139u8, 251u8, - ] - { - let pallet = self.client.metadata().pallet("Proxy")?; - let constant = pallet.constant("AnnouncementDepositFactor")?; - let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; - Ok(value) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - } - pub mod multisig { - use super::root_mod; - use super::runtime_types; - pub mod calls { - use super::root_mod; - use super::runtime_types; - type DispatchError = runtime_types::sp_runtime::DispatchError; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct AsMultiThreshold1 { - pub other_signatories: ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>, - pub call: ::std::boxed::Box, - } - impl ::subxt::Call for AsMultiThreshold1 { - const PALLET: &'static str = "Multisig"; - const FUNCTION: &'static str = "as_multi_threshold_1"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct AsMulti { - pub threshold: ::core::primitive::u16, - pub other_signatories: ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>, - pub maybe_timepoint: ::core::option::Option< - runtime_types::pallet_multisig::Timepoint<::core::primitive::u32>, - >, - pub call: ::subxt::WrapperKeepOpaque, - pub store_call: ::core::primitive::bool, - pub max_weight: ::core::primitive::u64, - } - impl ::subxt::Call for AsMulti { - const PALLET: &'static str = "Multisig"; - const FUNCTION: &'static str = "as_multi"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct ApproveAsMulti { - pub threshold: ::core::primitive::u16, - pub other_signatories: ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>, - pub maybe_timepoint: ::core::option::Option< - runtime_types::pallet_multisig::Timepoint<::core::primitive::u32>, - >, - pub call_hash: [::core::primitive::u8; 32usize], - pub max_weight: ::core::primitive::u64, - } - impl ::subxt::Call for ApproveAsMulti { - const PALLET: &'static str = "Multisig"; - const FUNCTION: &'static str = "approve_as_multi"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct CancelAsMulti { - pub threshold: ::core::primitive::u16, - pub other_signatories: ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>, - pub timepoint: runtime_types::pallet_multisig::Timepoint<::core::primitive::u32>, - pub call_hash: [::core::primitive::u8; 32usize], - } - impl ::subxt::Call for CancelAsMulti { - const PALLET: &'static str = "Multisig"; - const FUNCTION: &'static str = "cancel_as_multi"; - } - pub struct TransactionApi<'a, T: ::subxt::Config, X> { - client: &'a ::subxt::Client, - marker: ::core::marker::PhantomData, - } - impl<'a, T, X> TransactionApi<'a, T, X> - where - T: ::subxt::Config, - X: ::subxt::extrinsic::ExtrinsicParams, - { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { - client, - marker: ::core::marker::PhantomData, - } - } - #[doc = "Immediately dispatch a multi-signature call using a single approval from the caller."] - #[doc = ""] - #[doc = "The dispatch origin for this call must be _Signed_."] - #[doc = ""] - #[doc = "- `other_signatories`: The accounts (other than the sender) who are part of the"] - #[doc = "multi-signature, but do not participate in the approval process."] - #[doc = "- `call`: The call to be executed."] - #[doc = ""] - #[doc = "Result is equivalent to the dispatched result."] - #[doc = ""] - #[doc = "# "] - #[doc = "O(Z + C) where Z is the length of the call and C its execution weight."] - #[doc = "-------------------------------"] - #[doc = "- DB Weight: None"] - #[doc = "- Plus Call Weight"] - #[doc = "# "] - pub fn as_multi_threshold_1( - &self, - other_signatories: ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>, - call: runtime_types::rococo_runtime::Call, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - AsMultiThreshold1, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 225u8, 196u8, 125u8, 147u8, 112u8, 168u8, 93u8, 153u8, 195u8, 61u8, - 54u8, 153u8, 75u8, 201u8, 168u8, 160u8, 21u8, 17u8, 159u8, 129u8, - 165u8, 19u8, 98u8, 32u8, 173u8, 187u8, 232u8, 181u8, 77u8, 173u8, - 139u8, 219u8, - ] - { - let call = AsMultiThreshold1 { - other_signatories, - call: ::std::boxed::Box::new(call), - }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Register approval for a dispatch to be made from a deterministic composite account if"] - #[doc = "approved by a total of `threshold - 1` of `other_signatories`."] - #[doc = ""] - #[doc = "If there are enough, then dispatch the call."] - #[doc = ""] - #[doc = "Payment: `DepositBase` will be reserved if this is the first approval, plus"] - #[doc = "`threshold` times `DepositFactor`. It is returned once this dispatch happens or"] - #[doc = "is cancelled."] - #[doc = ""] - #[doc = "The dispatch origin for this call must be _Signed_."] - #[doc = ""] - #[doc = "- `threshold`: The total number of approvals for this dispatch before it is executed."] - #[doc = "- `other_signatories`: The accounts (other than the sender) who can approve this"] - #[doc = "dispatch. May not be empty."] - #[doc = "- `maybe_timepoint`: If this is the first approval, then this must be `None`. If it is"] - #[doc = "not the first approval, then it must be `Some`, with the timepoint (block number and"] - #[doc = "transaction index) of the first approval transaction."] - #[doc = "- `call`: The call to be executed."] - #[doc = ""] - #[doc = "NOTE: Unless this is the final approval, you will generally want to use"] - #[doc = "`approve_as_multi` instead, since it only requires a hash of the call."] - #[doc = ""] - #[doc = "Result is equivalent to the dispatched result if `threshold` is exactly `1`. Otherwise"] - #[doc = "on success, result is `Ok` and the result from the interior call, if it was executed,"] - #[doc = "may be found in the deposited `MultisigExecuted` event."] - #[doc = ""] - #[doc = "# "] - #[doc = "- `O(S + Z + Call)`."] - #[doc = "- Up to one balance-reserve or unreserve operation."] - #[doc = "- One passthrough operation, one insert, both `O(S)` where `S` is the number of"] - #[doc = " signatories. `S` is capped by `MaxSignatories`, with weight being proportional."] - #[doc = "- One call encode & hash, both of complexity `O(Z)` where `Z` is tx-len."] - #[doc = "- One encode & hash, both of complexity `O(S)`."] - #[doc = "- Up to one binary search and insert (`O(logS + S)`)."] - #[doc = "- I/O: 1 read `O(S)`, up to 1 mutate `O(S)`. Up to one remove."] - #[doc = "- One event."] - #[doc = "- The weight of the `call`."] - #[doc = "- Storage: inserts one item, value size bounded by `MaxSignatories`, with a deposit"] - #[doc = " taken for its lifetime of `DepositBase + threshold * DepositFactor`."] - #[doc = "-------------------------------"] - #[doc = "- DB Weight:"] - #[doc = " - Reads: Multisig Storage, [Caller Account], Calls (if `store_call`)"] - #[doc = " - Writes: Multisig Storage, [Caller Account], Calls (if `store_call`)"] - #[doc = "- Plus Call Weight"] - #[doc = "# "] - pub fn as_multi( - &self, - threshold: ::core::primitive::u16, - other_signatories: ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>, - maybe_timepoint: ::core::option::Option< - runtime_types::pallet_multisig::Timepoint<::core::primitive::u32>, - >, - call: ::subxt::WrapperKeepOpaque, - store_call: ::core::primitive::bool, - max_weight: ::core::primitive::u64, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - AsMulti, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 50u8, 22u8, 66u8, 21u8, 62u8, 119u8, 198u8, 238u8, 36u8, 241u8, 8u8, - 249u8, 178u8, 13u8, 114u8, 221u8, 72u8, 137u8, 168u8, 42u8, 31u8, 25u8, - 197u8, 9u8, 45u8, 88u8, 248u8, 42u8, 136u8, 230u8, 64u8, 12u8, - ] - { - let call = AsMulti { - threshold, - other_signatories, - maybe_timepoint, - call, - store_call, - max_weight, - }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Register approval for a dispatch to be made from a deterministic composite account if"] - #[doc = "approved by a total of `threshold - 1` of `other_signatories`."] - #[doc = ""] - #[doc = "Payment: `DepositBase` will be reserved if this is the first approval, plus"] - #[doc = "`threshold` times `DepositFactor`. It is returned once this dispatch happens or"] - #[doc = "is cancelled."] - #[doc = ""] - #[doc = "The dispatch origin for this call must be _Signed_."] - #[doc = ""] - #[doc = "- `threshold`: The total number of approvals for this dispatch before it is executed."] - #[doc = "- `other_signatories`: The accounts (other than the sender) who can approve this"] - #[doc = "dispatch. May not be empty."] - #[doc = "- `maybe_timepoint`: If this is the first approval, then this must be `None`. If it is"] - #[doc = "not the first approval, then it must be `Some`, with the timepoint (block number and"] - #[doc = "transaction index) of the first approval transaction."] - #[doc = "- `call_hash`: The hash of the call to be executed."] - #[doc = ""] - #[doc = "NOTE: If this is the final approval, you will want to use `as_multi` instead."] - #[doc = ""] - #[doc = "# "] - #[doc = "- `O(S)`."] - #[doc = "- Up to one balance-reserve or unreserve operation."] - #[doc = "- One passthrough operation, one insert, both `O(S)` where `S` is the number of"] - #[doc = " signatories. `S` is capped by `MaxSignatories`, with weight being proportional."] - #[doc = "- One encode & hash, both of complexity `O(S)`."] - #[doc = "- Up to one binary search and insert (`O(logS + S)`)."] - #[doc = "- I/O: 1 read `O(S)`, up to 1 mutate `O(S)`. Up to one remove."] - #[doc = "- One event."] - #[doc = "- Storage: inserts one item, value size bounded by `MaxSignatories`, with a deposit"] - #[doc = " taken for its lifetime of `DepositBase + threshold * DepositFactor`."] - #[doc = "----------------------------------"] - #[doc = "- DB Weight:"] - #[doc = " - Read: Multisig Storage, [Caller Account]"] - #[doc = " - Write: Multisig Storage, [Caller Account]"] - #[doc = "# "] - pub fn approve_as_multi( - &self, - threshold: ::core::primitive::u16, - other_signatories: ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>, - maybe_timepoint: ::core::option::Option< - runtime_types::pallet_multisig::Timepoint<::core::primitive::u32>, - >, - call_hash: [::core::primitive::u8; 32usize], - max_weight: ::core::primitive::u64, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - ApproveAsMulti, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 114u8, 29u8, 118u8, 154u8, 91u8, 4u8, 127u8, 126u8, 190u8, 180u8, 57u8, - 112u8, 72u8, 8u8, 248u8, 126u8, 25u8, 190u8, 130u8, 86u8, 160u8, 164u8, - 76u8, 64u8, 25u8, 175u8, 132u8, 225u8, 147u8, 166u8, 12u8, 38u8, - ] - { - let call = ApproveAsMulti { - threshold, - other_signatories, - maybe_timepoint, - call_hash, - max_weight, - }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Cancel a pre-existing, on-going multisig transaction. Any deposit reserved previously"] - #[doc = "for this operation will be unreserved on success."] - #[doc = ""] - #[doc = "The dispatch origin for this call must be _Signed_."] - #[doc = ""] - #[doc = "- `threshold`: The total number of approvals for this dispatch before it is executed."] - #[doc = "- `other_signatories`: The accounts (other than the sender) who can approve this"] - #[doc = "dispatch. May not be empty."] - #[doc = "- `timepoint`: The timepoint (block number and transaction index) of the first approval"] - #[doc = "transaction for this dispatch."] - #[doc = "- `call_hash`: The hash of the call to be executed."] - #[doc = ""] - #[doc = "# "] - #[doc = "- `O(S)`."] - #[doc = "- Up to one balance-reserve or unreserve operation."] - #[doc = "- One passthrough operation, one insert, both `O(S)` where `S` is the number of"] - #[doc = " signatories. `S` is capped by `MaxSignatories`, with weight being proportional."] - #[doc = "- One encode & hash, both of complexity `O(S)`."] - #[doc = "- One event."] - #[doc = "- I/O: 1 read `O(S)`, one remove."] - #[doc = "- Storage: removes one item."] - #[doc = "----------------------------------"] - #[doc = "- DB Weight:"] - #[doc = " - Read: Multisig Storage, [Caller Account], Refund Account, Calls"] - #[doc = " - Write: Multisig Storage, [Caller Account], Refund Account, Calls"] - #[doc = "# "] - pub fn cancel_as_multi( - &self, - threshold: ::core::primitive::u16, - other_signatories: ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>, - timepoint: runtime_types::pallet_multisig::Timepoint<::core::primitive::u32>, - call_hash: [::core::primitive::u8; 32usize], - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - CancelAsMulti, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 195u8, 216u8, 37u8, 179u8, 9u8, 19u8, 238u8, 94u8, 156u8, 5u8, 120u8, - 78u8, 129u8, 99u8, 239u8, 142u8, 68u8, 12u8, 254u8, 46u8, 251u8, 8u8, - 193u8, 43u8, 37u8, 68u8, 249u8, 85u8, 163u8, 85u8, 193u8, 47u8, - ] - { - let call = CancelAsMulti { - threshold, - other_signatories, - timepoint, - call_hash, - }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - pub type Event = runtime_types::pallet_multisig::pallet::Event; - pub mod events { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "A new multisig operation has begun."] - pub struct NewMultisig { - pub approving: ::subxt::sp_core::crypto::AccountId32, - pub multisig: ::subxt::sp_core::crypto::AccountId32, - pub call_hash: [::core::primitive::u8; 32usize], - } - impl ::subxt::Event for NewMultisig { - const PALLET: &'static str = "Multisig"; - const EVENT: &'static str = "NewMultisig"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "A multisig operation has been approved by someone."] - pub struct MultisigApproval { - pub approving: ::subxt::sp_core::crypto::AccountId32, - pub timepoint: runtime_types::pallet_multisig::Timepoint<::core::primitive::u32>, - pub multisig: ::subxt::sp_core::crypto::AccountId32, - pub call_hash: [::core::primitive::u8; 32usize], - } - impl ::subxt::Event for MultisigApproval { - const PALLET: &'static str = "Multisig"; - const EVENT: &'static str = "MultisigApproval"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "A multisig operation has been executed."] - pub struct MultisigExecuted { - pub approving: ::subxt::sp_core::crypto::AccountId32, - pub timepoint: runtime_types::pallet_multisig::Timepoint<::core::primitive::u32>, - pub multisig: ::subxt::sp_core::crypto::AccountId32, - pub call_hash: [::core::primitive::u8; 32usize], - pub result: ::core::result::Result<(), runtime_types::sp_runtime::DispatchError>, - } - impl ::subxt::Event for MultisigExecuted { - const PALLET: &'static str = "Multisig"; - const EVENT: &'static str = "MultisigExecuted"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "A multisig operation has been cancelled."] - pub struct MultisigCancelled { - pub cancelling: ::subxt::sp_core::crypto::AccountId32, - pub timepoint: runtime_types::pallet_multisig::Timepoint<::core::primitive::u32>, - pub multisig: ::subxt::sp_core::crypto::AccountId32, - pub call_hash: [::core::primitive::u8; 32usize], - } - impl ::subxt::Event for MultisigCancelled { - const PALLET: &'static str = "Multisig"; - const EVENT: &'static str = "MultisigCancelled"; - } - } - pub mod storage { - use super::runtime_types; - pub struct Multisigs<'a>( - pub &'a ::subxt::sp_core::crypto::AccountId32, - pub &'a [::core::primitive::u8; 32usize], - ); - impl ::subxt::StorageEntry for Multisigs<'_> { - const PALLET: &'static str = "Multisig"; - const STORAGE: &'static str = "Multisigs"; - type Value = runtime_types::pallet_multisig::Multisig< - ::core::primitive::u32, - ::core::primitive::u128, - ::subxt::sp_core::crypto::AccountId32, - >; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![ - ::subxt::StorageMapKey::new(&self.0, ::subxt::StorageHasher::Twox64Concat), - ::subxt::StorageMapKey::new( - &self.1, - ::subxt::StorageHasher::Blake2_128Concat, - ), - ]) - } - } - pub struct Calls<'a>(pub &'a [::core::primitive::u8; 32usize]); - impl ::subxt::StorageEntry for Calls<'_> { - const PALLET: &'static str = "Multisig"; - const STORAGE: &'static str = "Calls"; - type Value = ( - ::subxt::WrapperKeepOpaque, - ::subxt::sp_core::crypto::AccountId32, - ::core::primitive::u128, - ); - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Identity, - )]) - } - } - pub struct StorageApi<'a, T: ::subxt::Config> { - client: &'a ::subxt::Client, - } - impl<'a, T: ::subxt::Config> StorageApi<'a, T> { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { client } - } - #[doc = " The set of open multisig operations."] - pub async fn multisigs( - &self, - _0: &::subxt::sp_core::crypto::AccountId32, - _1: &[::core::primitive::u8; 32usize], - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option< - runtime_types::pallet_multisig::Multisig< - ::core::primitive::u32, - ::core::primitive::u128, - ::subxt::sp_core::crypto::AccountId32, - >, - >, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 137u8, 130u8, 173u8, 65u8, 126u8, 244u8, 194u8, 167u8, 93u8, 174u8, - 104u8, 131u8, 115u8, 155u8, 93u8, 185u8, 54u8, 204u8, 155u8, 149u8, - 184u8, 24u8, 111u8, 40u8, 249u8, 215u8, 34u8, 251u8, 224u8, 110u8, - 202u8, 2u8, - ] - { - let entry = Multisigs(_0, _1); - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The set of open multisig operations."] - pub async fn multisigs_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::subxt::KeyIter<'a, T, Multisigs<'a>>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 137u8, 130u8, 173u8, 65u8, 126u8, 244u8, 194u8, 167u8, 93u8, 174u8, - 104u8, 131u8, 115u8, 155u8, 93u8, 185u8, 54u8, 204u8, 155u8, 149u8, - 184u8, 24u8, 111u8, 40u8, 249u8, 215u8, 34u8, 251u8, 224u8, 110u8, - 202u8, 2u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - pub async fn calls( - &self, - _0: &[::core::primitive::u8; 32usize], - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option<( - ::subxt::WrapperKeepOpaque, - ::subxt::sp_core::crypto::AccountId32, - ::core::primitive::u128, - )>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 45u8, 131u8, 59u8, 143u8, 103u8, 14u8, 145u8, 237u8, 41u8, 118u8, - 121u8, 206u8, 38u8, 247u8, 229u8, 187u8, 15u8, 228u8, 206u8, 250u8, - 198u8, 10u8, 157u8, 207u8, 83u8, 74u8, 34u8, 165u8, 205u8, 44u8, 129u8, - 179u8, - ] - { - let entry = Calls(_0); - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - pub async fn calls_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result<::subxt::KeyIter<'a, T, Calls<'a>>, ::subxt::BasicError> - { - if self.client.metadata().storage_hash::()? - == [ - 45u8, 131u8, 59u8, 143u8, 103u8, 14u8, 145u8, 237u8, 41u8, 118u8, - 121u8, 206u8, 38u8, 247u8, 229u8, 187u8, 15u8, 228u8, 206u8, 250u8, - 198u8, 10u8, 157u8, 207u8, 83u8, 74u8, 34u8, 165u8, 205u8, 44u8, 129u8, - 179u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - pub mod constants { - use super::runtime_types; - pub struct ConstantsApi<'a, T: ::subxt::Config> { - client: &'a ::subxt::Client, - } - impl<'a, T: ::subxt::Config> ConstantsApi<'a, T> { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { client } - } - #[doc = " The base amount of currency needed to reserve for creating a multisig execution or to"] - #[doc = " store a dispatch call for later."] - #[doc = ""] - #[doc = " This is held for an additional storage item whose value size is"] - #[doc = " `4 + sizeof((BlockNumber, Balance, AccountId))` bytes and whose key size is"] - #[doc = " `32 + sizeof(AccountId)` bytes."] - pub fn deposit_base( - &self, - ) -> ::core::result::Result<::core::primitive::u128, ::subxt::BasicError> - { - if self - .client - .metadata() - .constant_hash("Multisig", "DepositBase")? - == [ - 71u8, 154u8, 198u8, 152u8, 162u8, 128u8, 229u8, 128u8, 60u8, 108u8, - 172u8, 247u8, 145u8, 8u8, 159u8, 25u8, 36u8, 141u8, 28u8, 67u8, 30u8, - 14u8, 194u8, 98u8, 125u8, 161u8, 148u8, 41u8, 67u8, 120u8, 78u8, 162u8, - ] - { - let pallet = self.client.metadata().pallet("Multisig")?; - let constant = pallet.constant("DepositBase")?; - let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; - Ok(value) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The amount of currency needed per unit threshold when creating a multisig execution."] - #[doc = ""] - #[doc = " This is held for adding 32 bytes more into a pre-existing storage value."] - pub fn deposit_factor( - &self, - ) -> ::core::result::Result<::core::primitive::u128, ::subxt::BasicError> - { - if self - .client - .metadata() - .constant_hash("Multisig", "DepositFactor")? - == [ - 248u8, 238u8, 23u8, 116u8, 115u8, 32u8, 128u8, 25u8, 153u8, 128u8, - 14u8, 55u8, 124u8, 103u8, 61u8, 140u8, 106u8, 176u8, 226u8, 232u8, - 255u8, 246u8, 68u8, 23u8, 111u8, 168u8, 45u8, 130u8, 182u8, 15u8, 66u8, - 64u8, - ] - { - let pallet = self.client.metadata().pallet("Multisig")?; - let constant = pallet.constant("DepositFactor")?; - let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; - Ok(value) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The maximum amount of signatories allowed in the multisig."] - pub fn max_signatories( - &self, - ) -> ::core::result::Result<::core::primitive::u16, ::subxt::BasicError> - { - if self - .client - .metadata() - .constant_hash("Multisig", "MaxSignatories")? - == [ - 139u8, 36u8, 140u8, 198u8, 176u8, 106u8, 89u8, 194u8, 33u8, 23u8, 60u8, - 134u8, 143u8, 24u8, 176u8, 64u8, 47u8, 109u8, 159u8, 134u8, 240u8, - 231u8, 181u8, 146u8, 136u8, 249u8, 175u8, 67u8, 41u8, 152u8, 90u8, - 15u8, - ] - { - let pallet = self.client.metadata().pallet("Multisig")?; - let constant = pallet.constant("MaxSignatories")?; - let value = ::subxt::codec::Decode::decode(&mut &constant.value[..])?; - Ok(value) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - } - pub mod xcm_pallet { - use super::root_mod; - use super::runtime_types; - pub mod calls { - use super::root_mod; - use super::runtime_types; - type DispatchError = runtime_types::sp_runtime::DispatchError; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct Send { - pub dest: ::std::boxed::Box, - pub message: ::std::boxed::Box, - } - impl ::subxt::Call for Send { - const PALLET: &'static str = "XcmPallet"; - const FUNCTION: &'static str = "send"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct TeleportAssets { - pub dest: ::std::boxed::Box, - pub beneficiary: ::std::boxed::Box, - pub assets: ::std::boxed::Box, - pub fee_asset_item: ::core::primitive::u32, - } - impl ::subxt::Call for TeleportAssets { - const PALLET: &'static str = "XcmPallet"; - const FUNCTION: &'static str = "teleport_assets"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct ReserveTransferAssets { - pub dest: ::std::boxed::Box, - pub beneficiary: ::std::boxed::Box, - pub assets: ::std::boxed::Box, - pub fee_asset_item: ::core::primitive::u32, - } - impl ::subxt::Call for ReserveTransferAssets { - const PALLET: &'static str = "XcmPallet"; - const FUNCTION: &'static str = "reserve_transfer_assets"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct Execute { - pub message: ::std::boxed::Box, - pub max_weight: ::core::primitive::u64, - } - impl ::subxt::Call for Execute { - const PALLET: &'static str = "XcmPallet"; - const FUNCTION: &'static str = "execute"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct ForceXcmVersion { - pub location: - ::std::boxed::Box, - pub xcm_version: ::core::primitive::u32, - } - impl ::subxt::Call for ForceXcmVersion { - const PALLET: &'static str = "XcmPallet"; - const FUNCTION: &'static str = "force_xcm_version"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct ForceDefaultXcmVersion { - pub maybe_xcm_version: ::core::option::Option<::core::primitive::u32>, - } - impl ::subxt::Call for ForceDefaultXcmVersion { - const PALLET: &'static str = "XcmPallet"; - const FUNCTION: &'static str = "force_default_xcm_version"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct ForceSubscribeVersionNotify { - pub location: ::std::boxed::Box, - } - impl ::subxt::Call for ForceSubscribeVersionNotify { - const PALLET: &'static str = "XcmPallet"; - const FUNCTION: &'static str = "force_subscribe_version_notify"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct ForceUnsubscribeVersionNotify { - pub location: ::std::boxed::Box, - } - impl ::subxt::Call for ForceUnsubscribeVersionNotify { - const PALLET: &'static str = "XcmPallet"; - const FUNCTION: &'static str = "force_unsubscribe_version_notify"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct LimitedReserveTransferAssets { - pub dest: ::std::boxed::Box, - pub beneficiary: ::std::boxed::Box, - pub assets: ::std::boxed::Box, - pub fee_asset_item: ::core::primitive::u32, - pub weight_limit: runtime_types::xcm::v2::WeightLimit, - } - impl ::subxt::Call for LimitedReserveTransferAssets { - const PALLET: &'static str = "XcmPallet"; - const FUNCTION: &'static str = "limited_reserve_transfer_assets"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct LimitedTeleportAssets { - pub dest: ::std::boxed::Box, - pub beneficiary: ::std::boxed::Box, - pub assets: ::std::boxed::Box, - pub fee_asset_item: ::core::primitive::u32, - pub weight_limit: runtime_types::xcm::v2::WeightLimit, - } - impl ::subxt::Call for LimitedTeleportAssets { - const PALLET: &'static str = "XcmPallet"; - const FUNCTION: &'static str = "limited_teleport_assets"; - } - pub struct TransactionApi<'a, T: ::subxt::Config, X> { - client: &'a ::subxt::Client, - marker: ::core::marker::PhantomData, - } - impl<'a, T, X> TransactionApi<'a, T, X> - where - T: ::subxt::Config, - X: ::subxt::extrinsic::ExtrinsicParams, - { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { - client, - marker: ::core::marker::PhantomData, - } - } - pub fn send( - &self, - dest: runtime_types::xcm::VersionedMultiLocation, - message: runtime_types::xcm::VersionedXcm, - ) -> Result< - ::subxt::SubmittableExtrinsic<'a, T, X, Send, DispatchError, root_mod::Event>, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 232u8, 188u8, 205u8, 27u8, 92u8, 141u8, 251u8, 24u8, 90u8, 155u8, 20u8, - 139u8, 7u8, 160u8, 39u8, 85u8, 205u8, 11u8, 111u8, 1u8, 250u8, 168u8, - 134u8, 61u8, 19u8, 216u8, 239u8, 127u8, 137u8, 136u8, 48u8, 19u8, - ] - { - let call = Send { - dest: ::std::boxed::Box::new(dest), - message: ::std::boxed::Box::new(message), - }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Teleport some assets from the local chain to some destination chain."] - #[doc = ""] - #[doc = "Fee payment on the destination side is made from the asset in the `assets` vector of"] - #[doc = "index `fee_asset_item`. The weight limit for fees is not provided and thus is unlimited,"] - #[doc = "with all fees taken as needed from the asset."] - #[doc = ""] - #[doc = "- `origin`: Must be capable of withdrawing the `assets` and executing XCM."] - #[doc = "- `dest`: Destination context for the assets. Will typically be `X2(Parent, Parachain(..))` to send"] - #[doc = " from parachain to parachain, or `X1(Parachain(..))` to send from relay to parachain."] - #[doc = "- `beneficiary`: A beneficiary location for the assets in the context of `dest`. Will generally be"] - #[doc = " an `AccountId32` value."] - #[doc = "- `assets`: The assets to be withdrawn. The first item should be the currency used to to pay the fee on the"] - #[doc = " `dest` side. May not be empty."] - #[doc = "- `fee_asset_item`: The index into `assets` of the item which should be used to pay"] - #[doc = " fees."] - pub fn teleport_assets( - &self, - dest: runtime_types::xcm::VersionedMultiLocation, - beneficiary: runtime_types::xcm::VersionedMultiLocation, - assets: runtime_types::xcm::VersionedMultiAssets, - fee_asset_item: ::core::primitive::u32, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - TeleportAssets, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 55u8, 192u8, 217u8, 186u8, 230u8, 234u8, 26u8, 194u8, 243u8, 199u8, - 16u8, 227u8, 225u8, 88u8, 130u8, 219u8, 228u8, 110u8, 20u8, 255u8, - 233u8, 147u8, 121u8, 173u8, 126u8, 248u8, 192u8, 243u8, 211u8, 91u8, - 115u8, 148u8, - ] - { - let call = TeleportAssets { - dest: ::std::boxed::Box::new(dest), - beneficiary: ::std::boxed::Box::new(beneficiary), - assets: ::std::boxed::Box::new(assets), - fee_asset_item, - }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Transfer some assets from the local chain to the sovereign account of a destination"] - #[doc = "chain and forward a notification XCM."] - #[doc = ""] - #[doc = "Fee payment on the destination side is made from the asset in the `assets` vector of"] - #[doc = "index `fee_asset_item`. The weight limit for fees is not provided and thus is unlimited,"] - #[doc = "with all fees taken as needed from the asset."] - #[doc = ""] - #[doc = "- `origin`: Must be capable of withdrawing the `assets` and executing XCM."] - #[doc = "- `dest`: Destination context for the assets. Will typically be `X2(Parent, Parachain(..))` to send"] - #[doc = " from parachain to parachain, or `X1(Parachain(..))` to send from relay to parachain."] - #[doc = "- `beneficiary`: A beneficiary location for the assets in the context of `dest`. Will generally be"] - #[doc = " an `AccountId32` value."] - #[doc = "- `assets`: The assets to be withdrawn. This should include the assets used to pay the fee on the"] - #[doc = " `dest` side."] - #[doc = "- `fee_asset_item`: The index into `assets` of the item which should be used to pay"] - #[doc = " fees."] - pub fn reserve_transfer_assets( - &self, - dest: runtime_types::xcm::VersionedMultiLocation, - beneficiary: runtime_types::xcm::VersionedMultiLocation, - assets: runtime_types::xcm::VersionedMultiAssets, - fee_asset_item: ::core::primitive::u32, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - ReserveTransferAssets, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .call_hash::()? - == [ - 134u8, 229u8, 104u8, 209u8, 160u8, 7u8, 99u8, 175u8, 128u8, 110u8, - 189u8, 225u8, 141u8, 1u8, 10u8, 17u8, 247u8, 233u8, 146u8, 19u8, 31u8, - 145u8, 217u8, 144u8, 85u8, 223u8, 197u8, 249u8, 1u8, 222u8, 98u8, 13u8, - ] - { - let call = ReserveTransferAssets { - dest: ::std::boxed::Box::new(dest), - beneficiary: ::std::boxed::Box::new(beneficiary), - assets: ::std::boxed::Box::new(assets), - fee_asset_item, - }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Execute an XCM message from a local, signed, origin."] - #[doc = ""] - #[doc = "An event is deposited indicating whether `msg` could be executed completely or only"] - #[doc = "partially."] - #[doc = ""] - #[doc = "No more than `max_weight` will be used in its attempted execution. If this is less than the"] - #[doc = "maximum amount of weight that the message could take to be executed, then no execution"] - #[doc = "attempt will be made."] - #[doc = ""] - #[doc = "NOTE: A successful return to this does *not* imply that the `msg` was executed successfully"] - #[doc = "to completion; only that *some* of it was executed."] - pub fn execute( - &self, - message: runtime_types::xcm::VersionedXcm, - max_weight: ::core::primitive::u64, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - Execute, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 95u8, 48u8, 201u8, 232u8, 83u8, 23u8, 20u8, 126u8, 116u8, 116u8, 176u8, - 206u8, 145u8, 9u8, 155u8, 109u8, 141u8, 226u8, 253u8, 196u8, 37u8, - 230u8, 243u8, 68u8, 39u8, 133u8, 233u8, 108u8, 226u8, 87u8, 5u8, 247u8, - ] - { - let call = Execute { - message: ::std::boxed::Box::new(message), - max_weight, - }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Extoll that a particular destination can be communicated with through a particular"] - #[doc = "version of XCM."] - #[doc = ""] - #[doc = "- `origin`: Must be Root."] - #[doc = "- `location`: The destination that is being described."] - #[doc = "- `xcm_version`: The latest version of XCM that `location` supports."] - pub fn force_xcm_version( - &self, - location: runtime_types::xcm::v1::multilocation::MultiLocation, - xcm_version: ::core::primitive::u32, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - ForceXcmVersion, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self.client.metadata().call_hash::()? - == [ - 32u8, 219u8, 213u8, 152u8, 203u8, 73u8, 121u8, 64u8, 78u8, 53u8, 110u8, - 23u8, 87u8, 93u8, 34u8, 166u8, 205u8, 189u8, 25u8, 160u8, 172u8, 178u8, - 125u8, 182u8, 37u8, 254u8, 220u8, 179u8, 70u8, 252u8, 63u8, 94u8, - ] - { - let call = ForceXcmVersion { - location: ::std::boxed::Box::new(location), - xcm_version, - }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Set a safe XCM version (the version that XCM should be encoded with if the most recent"] - #[doc = "version a destination can accept is unknown)."] - #[doc = ""] - #[doc = "- `origin`: Must be Root."] - #[doc = "- `maybe_xcm_version`: The default XCM encoding version, or `None` to disable."] - pub fn force_default_xcm_version( - &self, - maybe_xcm_version: ::core::option::Option<::core::primitive::u32>, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - ForceDefaultXcmVersion, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .call_hash::()? - == [ - 44u8, 161u8, 28u8, 189u8, 162u8, 221u8, 14u8, 31u8, 8u8, 211u8, 181u8, - 51u8, 197u8, 14u8, 87u8, 198u8, 3u8, 240u8, 90u8, 78u8, 141u8, 131u8, - 205u8, 250u8, 211u8, 150u8, 237u8, 160u8, 239u8, 226u8, 233u8, 29u8, - ] - { - let call = ForceDefaultXcmVersion { maybe_xcm_version }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Ask a location to notify us regarding their XCM version and any changes to it."] - #[doc = ""] - #[doc = "- `origin`: Must be Root."] - #[doc = "- `location`: The location to which we should subscribe for XCM version notifications."] - pub fn force_subscribe_version_notify( - &self, - location: runtime_types::xcm::VersionedMultiLocation, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - ForceSubscribeVersionNotify, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .call_hash::()? - == [ - 41u8, 248u8, 187u8, 195u8, 146u8, 143u8, 0u8, 246u8, 248u8, 38u8, - 128u8, 200u8, 143u8, 149u8, 127u8, 73u8, 3u8, 247u8, 106u8, 6u8, 56u8, - 50u8, 207u8, 234u8, 137u8, 201u8, 16u8, 21u8, 226u8, 148u8, 181u8, - 44u8, - ] - { - let call = ForceSubscribeVersionNotify { - location: ::std::boxed::Box::new(location), - }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Require that a particular destination should no longer notify us regarding any XCM"] - #[doc = "version changes."] - #[doc = ""] - #[doc = "- `origin`: Must be Root."] - #[doc = "- `location`: The location to which we are currently subscribed for XCM version"] - #[doc = " notifications which we no longer desire."] - pub fn force_unsubscribe_version_notify( - &self, - location: runtime_types::xcm::VersionedMultiLocation, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - ForceUnsubscribeVersionNotify, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .call_hash::()? - == [ - 150u8, 202u8, 148u8, 13u8, 187u8, 169u8, 5u8, 60u8, 25u8, 144u8, 43u8, - 196u8, 35u8, 215u8, 184u8, 72u8, 143u8, 220u8, 176u8, 27u8, 100u8, - 245u8, 31u8, 243u8, 0u8, 83u8, 165u8, 7u8, 102u8, 172u8, 218u8, 133u8, - ] - { - let call = ForceUnsubscribeVersionNotify { - location: ::std::boxed::Box::new(location), - }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Transfer some assets from the local chain to the sovereign account of a destination"] - #[doc = "chain and forward a notification XCM."] - #[doc = ""] - #[doc = "Fee payment on the destination side is made from the asset in the `assets` vector of"] - #[doc = "index `fee_asset_item`, up to enough to pay for `weight_limit` of weight. If more weight"] - #[doc = "is needed than `weight_limit`, then the operation will fail and the assets send may be"] - #[doc = "at risk."] - #[doc = ""] - #[doc = "- `origin`: Must be capable of withdrawing the `assets` and executing XCM."] - #[doc = "- `dest`: Destination context for the assets. Will typically be `X2(Parent, Parachain(..))` to send"] - #[doc = " from parachain to parachain, or `X1(Parachain(..))` to send from relay to parachain."] - #[doc = "- `beneficiary`: A beneficiary location for the assets in the context of `dest`. Will generally be"] - #[doc = " an `AccountId32` value."] - #[doc = "- `assets`: The assets to be withdrawn. This should include the assets used to pay the fee on the"] - #[doc = " `dest` side."] - #[doc = "- `fee_asset_item`: The index into `assets` of the item which should be used to pay"] - #[doc = " fees."] - #[doc = "- `weight_limit`: The remote-side weight limit, if any, for the XCM fee purchase."] - pub fn limited_reserve_transfer_assets( - &self, - dest: runtime_types::xcm::VersionedMultiLocation, - beneficiary: runtime_types::xcm::VersionedMultiLocation, - assets: runtime_types::xcm::VersionedMultiAssets, - fee_asset_item: ::core::primitive::u32, - weight_limit: runtime_types::xcm::v2::WeightLimit, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - LimitedReserveTransferAssets, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .call_hash::()? - == [ - 242u8, 206u8, 126u8, 164u8, 44u8, 116u8, 181u8, 90u8, 121u8, 124u8, - 120u8, 240u8, 129u8, 217u8, 131u8, 100u8, 248u8, 149u8, 56u8, 154u8, - 35u8, 91u8, 210u8, 118u8, 207u8, 110u8, 42u8, 249u8, 160u8, 155u8, - 251u8, 68u8, - ] - { - let call = LimitedReserveTransferAssets { - dest: ::std::boxed::Box::new(dest), - beneficiary: ::std::boxed::Box::new(beneficiary), - assets: ::std::boxed::Box::new(assets), - fee_asset_item, - weight_limit, - }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = "Teleport some assets from the local chain to some destination chain."] - #[doc = ""] - #[doc = "Fee payment on the destination side is made from the asset in the `assets` vector of"] - #[doc = "index `fee_asset_item`, up to enough to pay for `weight_limit` of weight. If more weight"] - #[doc = "is needed than `weight_limit`, then the operation will fail and the assets send may be"] - #[doc = "at risk."] - #[doc = ""] - #[doc = "- `origin`: Must be capable of withdrawing the `assets` and executing XCM."] - #[doc = "- `dest`: Destination context for the assets. Will typically be `X2(Parent, Parachain(..))` to send"] - #[doc = " from parachain to parachain, or `X1(Parachain(..))` to send from relay to parachain."] - #[doc = "- `beneficiary`: A beneficiary location for the assets in the context of `dest`. Will generally be"] - #[doc = " an `AccountId32` value."] - #[doc = "- `assets`: The assets to be withdrawn. The first item should be the currency used to to pay the fee on the"] - #[doc = " `dest` side. May not be empty."] - #[doc = "- `fee_asset_item`: The index into `assets` of the item which should be used to pay"] - #[doc = " fees."] - #[doc = "- `weight_limit`: The remote-side weight limit, if any, for the XCM fee purchase."] - pub fn limited_teleport_assets( - &self, - dest: runtime_types::xcm::VersionedMultiLocation, - beneficiary: runtime_types::xcm::VersionedMultiLocation, - assets: runtime_types::xcm::VersionedMultiAssets, - fee_asset_item: ::core::primitive::u32, - weight_limit: runtime_types::xcm::v2::WeightLimit, - ) -> Result< - ::subxt::SubmittableExtrinsic< - 'a, - T, - X, - LimitedTeleportAssets, - DispatchError, - root_mod::Event, - >, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .call_hash::()? - == [ - 189u8, 233u8, 43u8, 16u8, 158u8, 114u8, 154u8, 233u8, 179u8, 144u8, - 81u8, 179u8, 169u8, 38u8, 4u8, 130u8, 95u8, 237u8, 172u8, 167u8, 2u8, - 169u8, 53u8, 252u8, 159u8, 42u8, 143u8, 216u8, 112u8, 155u8, 48u8, - 129u8, - ] - { - let call = LimitedTeleportAssets { - dest: ::std::boxed::Box::new(dest), - beneficiary: ::std::boxed::Box::new(beneficiary), - assets: ::std::boxed::Box::new(assets), - fee_asset_item, - weight_limit, - }; - Ok(::subxt::SubmittableExtrinsic::new(self.client, call)) - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - pub type Event = runtime_types::pallet_xcm::pallet::Event; - pub mod events { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "Execution of an XCM message was attempted."] - #[doc = ""] - #[doc = "\\[ outcome \\]"] - pub struct Attempted(pub runtime_types::xcm::v2::traits::Outcome); - impl ::subxt::Event for Attempted { - const PALLET: &'static str = "XcmPallet"; - const EVENT: &'static str = "Attempted"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "A XCM message was sent."] - #[doc = ""] - #[doc = "\\[ origin, destination, message \\]"] - pub struct Sent( - pub runtime_types::xcm::v1::multilocation::MultiLocation, - pub runtime_types::xcm::v1::multilocation::MultiLocation, - pub runtime_types::xcm::v2::Xcm, - ); - impl ::subxt::Event for Sent { - const PALLET: &'static str = "XcmPallet"; - const EVENT: &'static str = "Sent"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "Query response received which does not match a registered query. This may be because a"] - #[doc = "matching query was never registered, it may be because it is a duplicate response, or"] - #[doc = "because the query timed out."] - #[doc = ""] - #[doc = "\\[ origin location, id \\]"] - pub struct UnexpectedResponse( - pub runtime_types::xcm::v1::multilocation::MultiLocation, - pub ::core::primitive::u64, - ); - impl ::subxt::Event for UnexpectedResponse { - const PALLET: &'static str = "XcmPallet"; - const EVENT: &'static str = "UnexpectedResponse"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "Query response has been received and is ready for taking with `take_response`. There is"] - #[doc = "no registered notification call."] - #[doc = ""] - #[doc = "\\[ id, response \\]"] - pub struct ResponseReady( - pub ::core::primitive::u64, - pub runtime_types::xcm::v2::Response, - ); - impl ::subxt::Event for ResponseReady { - const PALLET: &'static str = "XcmPallet"; - const EVENT: &'static str = "ResponseReady"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "Query response has been received and query is removed. The registered notification has"] - #[doc = "been dispatched and executed successfully."] - #[doc = ""] - #[doc = "\\[ id, pallet index, call index \\]"] - pub struct Notified( - pub ::core::primitive::u64, - pub ::core::primitive::u8, - pub ::core::primitive::u8, - ); - impl ::subxt::Event for Notified { - const PALLET: &'static str = "XcmPallet"; - const EVENT: &'static str = "Notified"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "Query response has been received and query is removed. The registered notification could"] - #[doc = "not be dispatched because the dispatch weight is greater than the maximum weight"] - #[doc = "originally budgeted by this runtime for the query result."] - #[doc = ""] - #[doc = "\\[ id, pallet index, call index, actual weight, max budgeted weight \\]"] - pub struct NotifyOverweight( - pub ::core::primitive::u64, - pub ::core::primitive::u8, - pub ::core::primitive::u8, - pub ::core::primitive::u64, - pub ::core::primitive::u64, - ); - impl ::subxt::Event for NotifyOverweight { - const PALLET: &'static str = "XcmPallet"; - const EVENT: &'static str = "NotifyOverweight"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "Query response has been received and query is removed. There was a general error with"] - #[doc = "dispatching the notification call."] - #[doc = ""] - #[doc = "\\[ id, pallet index, call index \\]"] - pub struct NotifyDispatchError( - pub ::core::primitive::u64, - pub ::core::primitive::u8, - pub ::core::primitive::u8, - ); - impl ::subxt::Event for NotifyDispatchError { - const PALLET: &'static str = "XcmPallet"; - const EVENT: &'static str = "NotifyDispatchError"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "Query response has been received and query is removed. The dispatch was unable to be"] - #[doc = "decoded into a `Call`; this might be due to dispatch function having a signature which"] - #[doc = "is not `(origin, QueryId, Response)`."] - #[doc = ""] - #[doc = "\\[ id, pallet index, call index \\]"] - pub struct NotifyDecodeFailed( - pub ::core::primitive::u64, - pub ::core::primitive::u8, - pub ::core::primitive::u8, - ); - impl ::subxt::Event for NotifyDecodeFailed { - const PALLET: &'static str = "XcmPallet"; - const EVENT: &'static str = "NotifyDecodeFailed"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "Expected query response has been received but the origin location of the response does"] - #[doc = "not match that expected. The query remains registered for a later, valid, response to"] - #[doc = "be received and acted upon."] - #[doc = ""] - #[doc = "\\[ origin location, id, expected location \\]"] - pub struct InvalidResponder( - pub runtime_types::xcm::v1::multilocation::MultiLocation, - pub ::core::primitive::u64, - pub ::core::option::Option, - ); - impl ::subxt::Event for InvalidResponder { - const PALLET: &'static str = "XcmPallet"; - const EVENT: &'static str = "InvalidResponder"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "Expected query response has been received but the expected origin location placed in"] - #[doc = "storage by this runtime previously cannot be decoded. The query remains registered."] - #[doc = ""] - #[doc = "This is unexpected (since a location placed in storage in a previously executing"] - #[doc = "runtime should be readable prior to query timeout) and dangerous since the possibly"] - #[doc = "valid response will be dropped. Manual governance intervention is probably going to be"] - #[doc = "needed."] - #[doc = ""] - #[doc = "\\[ origin location, id \\]"] - pub struct InvalidResponderVersion( - pub runtime_types::xcm::v1::multilocation::MultiLocation, - pub ::core::primitive::u64, - ); - impl ::subxt::Event for InvalidResponderVersion { - const PALLET: &'static str = "XcmPallet"; - const EVENT: &'static str = "InvalidResponderVersion"; - } - #[derive( - :: subxt :: codec :: CompactAs, - :: subxt :: codec :: Decode, - :: subxt :: codec :: Encode, - Debug, - )] - #[doc = "Received query response has been read and removed."] - #[doc = ""] - #[doc = "\\[ id \\]"] - pub struct ResponseTaken(pub ::core::primitive::u64); - impl ::subxt::Event for ResponseTaken { - const PALLET: &'static str = "XcmPallet"; - const EVENT: &'static str = "ResponseTaken"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "Some assets have been placed in an asset trap."] - #[doc = ""] - #[doc = "\\[ hash, origin, assets \\]"] - pub struct AssetsTrapped( - pub ::subxt::sp_core::H256, - pub runtime_types::xcm::v1::multilocation::MultiLocation, - pub runtime_types::xcm::VersionedMultiAssets, - ); - impl ::subxt::Event for AssetsTrapped { - const PALLET: &'static str = "XcmPallet"; - const EVENT: &'static str = "AssetsTrapped"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "An XCM version change notification message has been attempted to be sent."] - #[doc = ""] - #[doc = "\\[ destination, result \\]"] - pub struct VersionChangeNotified( - pub runtime_types::xcm::v1::multilocation::MultiLocation, - pub ::core::primitive::u32, - ); - impl ::subxt::Event for VersionChangeNotified { - const PALLET: &'static str = "XcmPallet"; - const EVENT: &'static str = "VersionChangeNotified"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "The supported version of a location has been changed. This might be through an"] - #[doc = "automatic notification or a manual intervention."] - #[doc = ""] - #[doc = "\\[ location, XCM version \\]"] - pub struct SupportedVersionChanged( - pub runtime_types::xcm::v1::multilocation::MultiLocation, - pub ::core::primitive::u32, - ); - impl ::subxt::Event for SupportedVersionChanged { - const PALLET: &'static str = "XcmPallet"; - const EVENT: &'static str = "SupportedVersionChanged"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "A given location which had a version change subscription was dropped owing to an error"] - #[doc = "sending the notification to it."] - #[doc = ""] - #[doc = "\\[ location, query ID, error \\]"] - pub struct NotifyTargetSendFail( - pub runtime_types::xcm::v1::multilocation::MultiLocation, - pub ::core::primitive::u64, - pub runtime_types::xcm::v2::traits::Error, - ); - impl ::subxt::Event for NotifyTargetSendFail { - const PALLET: &'static str = "XcmPallet"; - const EVENT: &'static str = "NotifyTargetSendFail"; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - #[doc = "A given location which had a version change subscription was dropped owing to an error"] - #[doc = "migrating the location to our new XCM format."] - #[doc = ""] - #[doc = "\\[ location, query ID \\]"] - pub struct NotifyTargetMigrationFail( - pub runtime_types::xcm::VersionedMultiLocation, - pub ::core::primitive::u64, - ); - impl ::subxt::Event for NotifyTargetMigrationFail { - const PALLET: &'static str = "XcmPallet"; - const EVENT: &'static str = "NotifyTargetMigrationFail"; - } - } - pub mod storage { - use super::runtime_types; - pub struct QueryCounter; - impl ::subxt::StorageEntry for QueryCounter { - const PALLET: &'static str = "XcmPallet"; - const STORAGE: &'static str = "QueryCounter"; - type Value = ::core::primitive::u64; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct Queries<'a>(pub &'a ::core::primitive::u64); - impl ::subxt::StorageEntry for Queries<'_> { - const PALLET: &'static str = "XcmPallet"; - const STORAGE: &'static str = "Queries"; - type Value = runtime_types::pallet_xcm::pallet::QueryStatus<::core::primitive::u32>; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Blake2_128Concat, - )]) - } - } - pub struct AssetTraps<'a>(pub &'a ::subxt::sp_core::H256); - impl ::subxt::StorageEntry for AssetTraps<'_> { - const PALLET: &'static str = "XcmPallet"; - const STORAGE: &'static str = "AssetTraps"; - type Value = ::core::primitive::u32; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![::subxt::StorageMapKey::new( - &self.0, - ::subxt::StorageHasher::Identity, - )]) - } - } - pub struct SafeXcmVersion; - impl ::subxt::StorageEntry for SafeXcmVersion { - const PALLET: &'static str = "XcmPallet"; - const STORAGE: &'static str = "SafeXcmVersion"; - type Value = ::core::primitive::u32; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct SupportedVersion<'a>( - pub &'a ::core::primitive::u32, - pub &'a runtime_types::xcm::VersionedMultiLocation, - ); - impl ::subxt::StorageEntry for SupportedVersion<'_> { - const PALLET: &'static str = "XcmPallet"; - const STORAGE: &'static str = "SupportedVersion"; - type Value = ::core::primitive::u32; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![ - ::subxt::StorageMapKey::new(&self.0, ::subxt::StorageHasher::Twox64Concat), - ::subxt::StorageMapKey::new( - &self.1, - ::subxt::StorageHasher::Blake2_128Concat, - ), - ]) - } - } - pub struct VersionNotifiers<'a>( - pub &'a ::core::primitive::u32, - pub &'a runtime_types::xcm::VersionedMultiLocation, - ); - impl ::subxt::StorageEntry for VersionNotifiers<'_> { - const PALLET: &'static str = "XcmPallet"; - const STORAGE: &'static str = "VersionNotifiers"; - type Value = ::core::primitive::u64; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![ - ::subxt::StorageMapKey::new(&self.0, ::subxt::StorageHasher::Twox64Concat), - ::subxt::StorageMapKey::new( - &self.1, - ::subxt::StorageHasher::Blake2_128Concat, - ), - ]) - } - } - pub struct VersionNotifyTargets<'a>( - pub &'a ::core::primitive::u32, - pub &'a runtime_types::xcm::VersionedMultiLocation, - ); - impl ::subxt::StorageEntry for VersionNotifyTargets<'_> { - const PALLET: &'static str = "XcmPallet"; - const STORAGE: &'static str = "VersionNotifyTargets"; - type Value = ( - ::core::primitive::u64, - ::core::primitive::u64, - ::core::primitive::u32, - ); - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Map(vec![ - ::subxt::StorageMapKey::new(&self.0, ::subxt::StorageHasher::Twox64Concat), - ::subxt::StorageMapKey::new( - &self.1, - ::subxt::StorageHasher::Blake2_128Concat, - ), - ]) - } - } - pub struct VersionDiscoveryQueue; - impl ::subxt::StorageEntry for VersionDiscoveryQueue { - const PALLET: &'static str = "XcmPallet"; - const STORAGE: &'static str = "VersionDiscoveryQueue"; - type Value = runtime_types::frame_support::storage::bounded_vec::BoundedVec<( - runtime_types::xcm::VersionedMultiLocation, - ::core::primitive::u32, - )>; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct CurrentMigration; - impl ::subxt::StorageEntry for CurrentMigration { - const PALLET: &'static str = "XcmPallet"; - const STORAGE: &'static str = "CurrentMigration"; - type Value = runtime_types::pallet_xcm::pallet::VersionMigrationStage; - fn key(&self) -> ::subxt::StorageEntryKey { - ::subxt::StorageEntryKey::Plain - } - } - pub struct StorageApi<'a, T: ::subxt::Config> { - client: &'a ::subxt::Client, - } - impl<'a, T: ::subxt::Config> StorageApi<'a, T> { - pub fn new(client: &'a ::subxt::Client) -> Self { - Self { client } - } - #[doc = " The latest available query index."] - pub async fn query_counter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result<::core::primitive::u64, ::subxt::BasicError> - { - if self.client.metadata().storage_hash::()? - == [ - 137u8, 58u8, 184u8, 88u8, 247u8, 22u8, 151u8, 64u8, 50u8, 77u8, 49u8, - 10u8, 234u8, 84u8, 213u8, 156u8, 26u8, 200u8, 214u8, 225u8, 125u8, - 231u8, 42u8, 93u8, 159u8, 168u8, 86u8, 201u8, 116u8, 153u8, 41u8, - 127u8, - ] - { - let entry = QueryCounter; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The ongoing queries."] - pub async fn queries( - &self, - _0: &::core::primitive::u64, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option< - runtime_types::pallet_xcm::pallet::QueryStatus<::core::primitive::u32>, - >, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 47u8, 241u8, 126u8, 71u8, 203u8, 121u8, 171u8, 226u8, 89u8, 17u8, 61u8, - 198u8, 123u8, 73u8, 20u8, 197u8, 6u8, 23u8, 34u8, 127u8, 89u8, 35u8, - 49u8, 101u8, 110u8, 15u8, 206u8, 203u8, 155u8, 93u8, 0u8, 97u8, - ] - { - let entry = Queries(_0); - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The ongoing queries."] - pub async fn queries_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result<::subxt::KeyIter<'a, T, Queries<'a>>, ::subxt::BasicError> - { - if self.client.metadata().storage_hash::()? - == [ - 47u8, 241u8, 126u8, 71u8, 203u8, 121u8, 171u8, 226u8, 89u8, 17u8, 61u8, - 198u8, 123u8, 73u8, 20u8, 197u8, 6u8, 23u8, 34u8, 127u8, 89u8, 35u8, - 49u8, 101u8, 110u8, 15u8, 206u8, 203u8, 155u8, 93u8, 0u8, 97u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The existing asset traps."] - #[doc = ""] - #[doc = " Key is the blake2 256 hash of (origin, versioned `MultiAssets`) pair. Value is the number of"] - #[doc = " times this pair has been trapped (usually just 1 if it exists at all)."] - pub async fn asset_traps( - &self, - _0: &::subxt::sp_core::H256, - block_hash: ::core::option::Option, - ) -> ::core::result::Result<::core::primitive::u32, ::subxt::BasicError> - { - if self.client.metadata().storage_hash::()? - == [ - 89u8, 0u8, 237u8, 90u8, 95u8, 21u8, 165u8, 163u8, 148u8, 203u8, 155u8, - 222u8, 180u8, 219u8, 220u8, 114u8, 179u8, 228u8, 1u8, 220u8, 169u8, - 43u8, 38u8, 12u8, 88u8, 159u8, 181u8, 206u8, 221u8, 197u8, 35u8, 150u8, - ] - { - let entry = AssetTraps(_0); - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The existing asset traps."] - #[doc = ""] - #[doc = " Key is the blake2 256 hash of (origin, versioned `MultiAssets`) pair. Value is the number of"] - #[doc = " times this pair has been trapped (usually just 1 if it exists at all)."] - pub async fn asset_traps_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::subxt::KeyIter<'a, T, AssetTraps<'a>>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 89u8, 0u8, 237u8, 90u8, 95u8, 21u8, 165u8, 163u8, 148u8, 203u8, 155u8, - 222u8, 180u8, 219u8, 220u8, 114u8, 179u8, 228u8, 1u8, 220u8, 169u8, - 43u8, 38u8, 12u8, 88u8, 159u8, 181u8, 206u8, 221u8, 197u8, 35u8, 150u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Default version to encode XCM when latest version of destination is unknown. If `None`,"] - #[doc = " then the destinations whose XCM version is unknown are considered unreachable."] - pub async fn safe_xcm_version( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option<::core::primitive::u32>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 1u8, 223u8, 218u8, 204u8, 222u8, 129u8, 137u8, 237u8, 197u8, 142u8, - 233u8, 66u8, 229u8, 153u8, 138u8, 222u8, 113u8, 164u8, 135u8, 213u8, - 233u8, 34u8, 24u8, 23u8, 215u8, 59u8, 40u8, 188u8, 45u8, 244u8, 205u8, - 199u8, - ] - { - let entry = SafeXcmVersion; - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The Latest versions that we know various locations support."] - pub async fn supported_version( - &self, - _0: &::core::primitive::u32, - _1: &runtime_types::xcm::VersionedMultiLocation, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option<::core::primitive::u32>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 231u8, 202u8, 129u8, 82u8, 121u8, 63u8, 67u8, 57u8, 191u8, 190u8, 25u8, - 27u8, 219u8, 42u8, 180u8, 142u8, 71u8, 119u8, 212u8, 211u8, 21u8, 11u8, - 8u8, 7u8, 9u8, 243u8, 11u8, 117u8, 66u8, 47u8, 246u8, 85u8, - ] - { - let entry = SupportedVersion(_0, _1); - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The Latest versions that we know various locations support."] - pub async fn supported_version_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::subxt::KeyIter<'a, T, SupportedVersion<'a>>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 231u8, 202u8, 129u8, 82u8, 121u8, 63u8, 67u8, 57u8, 191u8, 190u8, 25u8, - 27u8, 219u8, 42u8, 180u8, 142u8, 71u8, 119u8, 212u8, 211u8, 21u8, 11u8, - 8u8, 7u8, 9u8, 243u8, 11u8, 117u8, 66u8, 47u8, 246u8, 85u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " All locations that we have requested version notifications from."] - pub async fn version_notifiers( - &self, - _0: &::core::primitive::u32, - _1: &runtime_types::xcm::VersionedMultiLocation, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option<::core::primitive::u64>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 126u8, 49u8, 13u8, 135u8, 137u8, 68u8, 248u8, 211u8, 160u8, 160u8, - 93u8, 128u8, 157u8, 230u8, 62u8, 119u8, 191u8, 51u8, 147u8, 149u8, - 60u8, 227u8, 154u8, 97u8, 244u8, 249u8, 0u8, 220u8, 189u8, 92u8, 178u8, - 149u8, - ] - { - let entry = VersionNotifiers(_0, _1); - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " All locations that we have requested version notifications from."] - pub async fn version_notifiers_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::subxt::KeyIter<'a, T, VersionNotifiers<'a>>, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 126u8, 49u8, 13u8, 135u8, 137u8, 68u8, 248u8, 211u8, 160u8, 160u8, - 93u8, 128u8, 157u8, 230u8, 62u8, 119u8, 191u8, 51u8, 147u8, 149u8, - 60u8, 227u8, 154u8, 97u8, 244u8, 249u8, 0u8, 220u8, 189u8, 92u8, 178u8, - 149u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The target locations that are subscribed to our version changes, as well as the most recent"] - #[doc = " of our versions we informed them of."] - pub async fn version_notify_targets( - &self, - _0: &::core::primitive::u32, - _1: &runtime_types::xcm::VersionedMultiLocation, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option<( - ::core::primitive::u64, - ::core::primitive::u64, - ::core::primitive::u32, - )>, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .storage_hash::()? - == [ - 251u8, 128u8, 243u8, 94u8, 162u8, 11u8, 206u8, 101u8, 33u8, 24u8, - 163u8, 157u8, 112u8, 50u8, 91u8, 155u8, 241u8, 73u8, 77u8, 185u8, - 231u8, 3u8, 220u8, 161u8, 36u8, 208u8, 116u8, 183u8, 80u8, 38u8, 56u8, - 104u8, - ] - { - let entry = VersionNotifyTargets(_0, _1); - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The target locations that are subscribed to our version changes, as well as the most recent"] - #[doc = " of our versions we informed them of."] - pub async fn version_notify_targets_iter( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::subxt::KeyIter<'a, T, VersionNotifyTargets<'a>>, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .storage_hash::()? - == [ - 251u8, 128u8, 243u8, 94u8, 162u8, 11u8, 206u8, 101u8, 33u8, 24u8, - 163u8, 157u8, 112u8, 50u8, 91u8, 155u8, 241u8, 73u8, 77u8, 185u8, - 231u8, 3u8, 220u8, 161u8, 36u8, 208u8, 116u8, 183u8, 80u8, 38u8, 56u8, - 104u8, - ] - { - self.client.storage().iter(block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " Destinations whose latest XCM version we would like to know. Duplicates not allowed, and"] - #[doc = " the `u32` counter is the number of times that a send to the destination has been attempted,"] - #[doc = " which is used as a prioritization."] - pub async fn version_discovery_queue( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - runtime_types::frame_support::storage::bounded_vec::BoundedVec<( - runtime_types::xcm::VersionedMultiLocation, - ::core::primitive::u32, - )>, - ::subxt::BasicError, - > { - if self - .client - .metadata() - .storage_hash::()? - == [ - 45u8, 28u8, 29u8, 233u8, 239u8, 65u8, 24u8, 214u8, 153u8, 189u8, 132u8, - 235u8, 62u8, 197u8, 252u8, 56u8, 38u8, 97u8, 13u8, 16u8, 149u8, 25u8, - 252u8, 181u8, 206u8, 54u8, 250u8, 133u8, 133u8, 74u8, 186u8, 22u8, - ] - { - let entry = VersionDiscoveryQueue; - self.client - .storage() - .fetch_or_default(&entry, block_hash) - .await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - #[doc = " The current migration's stage, if any."] - pub async fn current_migration( - &self, - block_hash: ::core::option::Option, - ) -> ::core::result::Result< - ::core::option::Option< - runtime_types::pallet_xcm::pallet::VersionMigrationStage, - >, - ::subxt::BasicError, - > { - if self.client.metadata().storage_hash::()? - == [ - 228u8, 254u8, 240u8, 20u8, 92u8, 79u8, 40u8, 65u8, 176u8, 111u8, 243u8, - 168u8, 238u8, 147u8, 247u8, 170u8, 185u8, 107u8, 58u8, 54u8, 224u8, - 222u8, 141u8, 113u8, 95u8, 92u8, 17u8, 69u8, 162u8, 242u8, 245u8, 95u8, - ] - { - let entry = CurrentMigration; - self.client.storage().fetch(&entry, block_hash).await - } else { - Err(::subxt::MetadataError::IncompatibleMetadata.into()) - } - } - } - } - } - pub mod runtime_types { - use super::runtime_types; - pub mod beefy_primitives { - use super::runtime_types; - pub mod crypto { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct Public(pub runtime_types::sp_core::ecdsa::Public); - } - pub mod mmr { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct BeefyNextAuthoritySet<_0> { - pub id: ::core::primitive::u64, - pub len: ::core::primitive::u32, - pub root: _0, - } - } - } - pub mod bitvec { - use super::runtime_types; - pub mod order { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct Lsb0; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct Msb0; - } - } - pub mod bp_header_chain { - use super::runtime_types; - pub mod justification { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct GrandpaJustification<_0> { - pub round: ::core::primitive::u64, - pub commit: runtime_types::finality_grandpa::Commit< - ::subxt::sp_core::H256, - ::core::primitive::u32, - runtime_types::sp_finality_grandpa::app::Signature, - runtime_types::sp_finality_grandpa::app::Public, - >, - pub votes_ancestries: ::std::vec::Vec<_0>, - } - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct AuthoritySet { - pub authorities: ::std::vec::Vec<( - runtime_types::sp_finality_grandpa::app::Public, - ::core::primitive::u64, - )>, - pub set_id: ::core::primitive::u64, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct InitializationData<_0> { - pub header: ::std::boxed::Box<_0>, - pub authority_list: ::std::vec::Vec<( - runtime_types::sp_finality_grandpa::app::Public, - ::core::primitive::u64, - )>, - pub set_id: ::core::primitive::u64, - pub is_halted: ::core::primitive::bool, - } - } - pub mod bp_message_dispatch { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum CallOrigin<_0, _1, _2> { - #[codec(index = 0)] - SourceRoot, - #[codec(index = 1)] - TargetAccount(_0, _1, _2), - #[codec(index = 2)] - SourceAccount(_0), - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct MessagePayload<_0, _1, _2, _3> { - pub spec_version: ::core::primitive::u32, - pub weight: ::core::primitive::u64, - pub origin: runtime_types::bp_message_dispatch::CallOrigin<_0, _1, _2>, - pub dispatch_fee_payment: runtime_types::bp_runtime::messages::DispatchFeePayment, - pub call: _3, - } - } - pub mod bp_messages { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct DeliveredMessages { - pub begin: ::core::primitive::u64, - pub end: ::core::primitive::u64, - pub dispatch_results: ::subxt::bitvec::vec::BitVec< - ::core::primitive::u8, - ::subxt::bitvec::order::Msb0, - >, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct InboundLaneData<_0> { - pub relayers: ::std::vec::Vec>, - pub last_confirmed_nonce: ::core::primitive::u64, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct MessageData<_0> { - pub payload: ::std::vec::Vec<::core::primitive::u8>, - pub fee: _0, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct MessageKey { - pub lane_id: [::core::primitive::u8; 4usize], - pub nonce: ::core::primitive::u64, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum OperatingMode { - #[codec(index = 0)] - Normal, - #[codec(index = 1)] - RejectingOutboundMessages, - #[codec(index = 2)] - Halted, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct OutboundLaneData { - pub oldest_unpruned_nonce: ::core::primitive::u64, - pub latest_received_nonce: ::core::primitive::u64, - pub latest_generated_nonce: ::core::primitive::u64, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct UnrewardedRelayer<_0> { - pub relayer: _0, - pub messages: runtime_types::bp_messages::DeliveredMessages, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct UnrewardedRelayersState { - pub unrewarded_relayer_entries: ::core::primitive::u64, - pub messages_in_oldest_entry: ::core::primitive::u64, - pub total_messages: ::core::primitive::u64, - } - } - pub mod bp_runtime { - use super::runtime_types; - pub mod messages { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum DispatchFeePayment { - #[codec(index = 0)] - AtSourceChain, - #[codec(index = 1)] - AtTargetChain, - } - } - } - pub mod bridge_runtime_common { - use super::runtime_types; - pub mod messages { - use super::runtime_types; - pub mod source { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct FromBridgedChainMessagesDeliveryProof<_0> { - pub bridged_header_hash: _0, - pub storage_proof: ::std::vec::Vec<::std::vec::Vec<::core::primitive::u8>>, - pub lane: [::core::primitive::u8; 4usize], - } - } - pub mod target { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct FromBridgedChainMessagesProof<_0> { - pub bridged_header_hash: _0, - pub storage_proof: ::std::vec::Vec<::std::vec::Vec<::core::primitive::u8>>, - pub lane: [::core::primitive::u8; 4usize], - pub nonces_start: ::core::primitive::u64, - pub nonces_end: ::core::primitive::u64, - } - } - } - } - pub mod finality_grandpa { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct Commit<_0, _1, _2, _3> { - pub target_hash: _0, - pub target_number: _1, - pub precommits: ::std::vec::Vec< - runtime_types::finality_grandpa::SignedPrecommit<_0, _1, _2, _3>, - >, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct Equivocation<_0, _1, _2> { - pub round_number: ::core::primitive::u64, - pub identity: _0, - pub first: (_1, _2), - pub second: (_1, _2), - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct Precommit<_0, _1> { - pub target_hash: _0, - pub target_number: _1, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct Prevote<_0, _1> { - pub target_hash: _0, - pub target_number: _1, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct SignedPrecommit<_0, _1, _2, _3> { - pub precommit: runtime_types::finality_grandpa::Precommit<_0, _1>, - pub signature: _2, - pub id: _3, - } - } - pub mod frame_support { - use super::runtime_types; - pub mod dispatch { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum RawOrigin<_0> { - #[codec(index = 0)] - Root, - #[codec(index = 1)] - Signed(_0), - #[codec(index = 2)] - None, - } - } - pub mod storage { - use super::runtime_types; - pub mod bounded_vec { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct BoundedVec<_0>(pub ::std::vec::Vec<_0>); - } - pub mod weak_bounded_vec { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct WeakBoundedVec<_0>(pub ::std::vec::Vec<_0>); - } - } - pub mod traits { - use super::runtime_types; - pub mod misc { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct WrapperKeepOpaque<_0>( - #[codec(compact)] pub ::core::primitive::u32, - pub _0, - ); - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct WrapperOpaque<_0>( - #[codec(compact)] pub ::core::primitive::u32, - pub _0, - ); - } - pub mod tokens { - use super::runtime_types; - pub mod misc { - use super::runtime_types; - #[derive( - :: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug, - )] - pub enum BalanceStatus { - #[codec(index = 0)] - Free, - #[codec(index = 1)] - Reserved, - } - } - } - } - pub mod weights { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum DispatchClass { - #[codec(index = 0)] - Normal, - #[codec(index = 1)] - Operational, - #[codec(index = 2)] - Mandatory, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct DispatchInfo { - pub weight: ::core::primitive::u64, - pub class: runtime_types::frame_support::weights::DispatchClass, - pub pays_fee: runtime_types::frame_support::weights::Pays, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Pays { - #[codec(index = 0)] - Yes, - #[codec(index = 1)] - No, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct PerDispatchClass<_0> { - pub normal: _0, - pub operational: _0, - pub mandatory: _0, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct RuntimeDbWeight { - pub read: ::core::primitive::u64, - pub write: ::core::primitive::u64, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct WeightToFeeCoefficient<_0> { - pub coeff_integer: _0, - pub coeff_frac: runtime_types::sp_arithmetic::per_things::Perbill, - pub negative: ::core::primitive::bool, - pub degree: ::core::primitive::u8, - } - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct PalletId(pub [::core::primitive::u8; 8usize]); - } - pub mod frame_system { - use super::runtime_types; - pub mod extensions { - use super::runtime_types; - pub mod check_genesis { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct CheckGenesis; - } - pub mod check_mortality { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct CheckMortality(pub runtime_types::sp_runtime::generic::era::Era); - } - pub mod check_non_zero_sender { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct CheckNonZeroSender; - } - pub mod check_nonce { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct CheckNonce(#[codec(compact)] pub ::core::primitive::u32); - } - pub mod check_spec_version { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct CheckSpecVersion; - } - pub mod check_tx_version { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct CheckTxVersion; - } - pub mod check_weight { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct CheckWeight; - } - } - pub mod limits { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct BlockLength { - pub max: runtime_types::frame_support::weights::PerDispatchClass< - ::core::primitive::u32, - >, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct BlockWeights { - pub base_block: ::core::primitive::u64, - pub max_block: ::core::primitive::u64, - pub per_class: runtime_types::frame_support::weights::PerDispatchClass< - runtime_types::frame_system::limits::WeightsPerClass, - >, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct WeightsPerClass { - pub base_extrinsic: ::core::primitive::u64, - pub max_extrinsic: ::core::option::Option<::core::primitive::u64>, - pub max_total: ::core::option::Option<::core::primitive::u64>, - pub reserved: ::core::option::Option<::core::primitive::u64>, - } - } - pub mod pallet { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Call { - #[codec(index = 0)] - #[doc = "A dispatch that will fill the block weight up to the given ratio."] - fill_block { - ratio: runtime_types::sp_arithmetic::per_things::Perbill, - }, - #[codec(index = 1)] - #[doc = "Make some on-chain remark."] - #[doc = ""] - #[doc = "# "] - #[doc = "- `O(1)`"] - #[doc = "# "] - remark { - remark: ::std::vec::Vec<::core::primitive::u8>, - }, - #[codec(index = 2)] - #[doc = "Set the number of pages in the WebAssembly environment's heap."] - set_heap_pages { pages: ::core::primitive::u64 }, - #[codec(index = 3)] - #[doc = "Set the new runtime code."] - #[doc = ""] - #[doc = "# "] - #[doc = "- `O(C + S)` where `C` length of `code` and `S` complexity of `can_set_code`"] - #[doc = "- 1 call to `can_set_code`: `O(S)` (calls `sp_io::misc::runtime_version` which is"] - #[doc = " expensive)."] - #[doc = "- 1 storage write (codec `O(C)`)."] - #[doc = "- 1 digest item."] - #[doc = "- 1 event."] - #[doc = "The weight of this function is dependent on the runtime, but generally this is very"] - #[doc = "expensive. We will treat this as a full block."] - #[doc = "# "] - set_code { - code: ::std::vec::Vec<::core::primitive::u8>, - }, - #[codec(index = 4)] - #[doc = "Set the new runtime code without doing any checks of the given `code`."] - #[doc = ""] - #[doc = "# "] - #[doc = "- `O(C)` where `C` length of `code`"] - #[doc = "- 1 storage write (codec `O(C)`)."] - #[doc = "- 1 digest item."] - #[doc = "- 1 event."] - #[doc = "The weight of this function is dependent on the runtime. We will treat this as a full"] - #[doc = "block. # "] - set_code_without_checks { - code: ::std::vec::Vec<::core::primitive::u8>, - }, - #[codec(index = 5)] - #[doc = "Set some items of storage."] - set_storage { - items: ::std::vec::Vec<( - ::std::vec::Vec<::core::primitive::u8>, - ::std::vec::Vec<::core::primitive::u8>, - )>, - }, - #[codec(index = 6)] - #[doc = "Kill some items from storage."] - kill_storage { - keys: ::std::vec::Vec<::std::vec::Vec<::core::primitive::u8>>, - }, - #[codec(index = 7)] - #[doc = "Kill all storage items with a key that starts with the given prefix."] - #[doc = ""] - #[doc = "**NOTE:** We rely on the Root origin to provide us the number of subkeys under"] - #[doc = "the prefix we are removing to accurately calculate the weight of this function."] - kill_prefix { - prefix: ::std::vec::Vec<::core::primitive::u8>, - subkeys: ::core::primitive::u32, - }, - #[codec(index = 8)] - #[doc = "Make some on-chain remark and emit event."] - remark_with_event { - remark: ::std::vec::Vec<::core::primitive::u8>, - }, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Error { - #[codec(index = 0)] - #[doc = "The name of specification does not match between the current runtime"] - #[doc = "and the new runtime."] - InvalidSpecName, - #[codec(index = 1)] - #[doc = "The specification version is not allowed to decrease between the current runtime"] - #[doc = "and the new runtime."] - SpecVersionNeedsToIncrease, - #[codec(index = 2)] - #[doc = "Failed to extract the runtime version from the new runtime."] - #[doc = ""] - #[doc = "Either calling `Core_version` or decoding `RuntimeVersion` failed."] - FailedToExtractRuntimeVersion, - #[codec(index = 3)] - #[doc = "Suicide called when the account has non-default composite data."] - NonDefaultComposite, - #[codec(index = 4)] - #[doc = "There is a non-zero reference count preventing the account from being purged."] - NonZeroRefCount, - #[codec(index = 5)] - #[doc = "The origin filter prevent the call to be dispatched."] - CallFiltered, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Event { - #[codec(index = 0)] - #[doc = "An extrinsic completed successfully."] - ExtrinsicSuccess { - dispatch_info: runtime_types::frame_support::weights::DispatchInfo, - }, - #[codec(index = 1)] - #[doc = "An extrinsic failed."] - ExtrinsicFailed { - dispatch_error: runtime_types::sp_runtime::DispatchError, - dispatch_info: runtime_types::frame_support::weights::DispatchInfo, - }, - #[codec(index = 2)] - #[doc = "`:code` was updated."] - CodeUpdated, - #[codec(index = 3)] - #[doc = "A new account was created."] - NewAccount { - account: ::subxt::sp_core::crypto::AccountId32, - }, - #[codec(index = 4)] - #[doc = "An account was reaped."] - KilledAccount { - account: ::subxt::sp_core::crypto::AccountId32, - }, - #[codec(index = 5)] - #[doc = "On on-chain remark happened."] - Remarked { - sender: ::subxt::sp_core::crypto::AccountId32, - hash: ::subxt::sp_core::H256, - }, - } - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct AccountInfo<_0, _1> { - pub nonce: _0, - pub consumers: _0, - pub providers: _0, - pub sufficients: _0, - pub data: _1, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct EventRecord<_0, _1> { - pub phase: runtime_types::frame_system::Phase, - pub event: _0, - pub topics: ::std::vec::Vec<_1>, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct LastRuntimeUpgradeInfo { - #[codec(compact)] - pub spec_version: ::core::primitive::u32, - pub spec_name: ::std::string::String, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Phase { - #[codec(index = 0)] - ApplyExtrinsic(::core::primitive::u32), - #[codec(index = 1)] - Finalization, - #[codec(index = 2)] - Initialization, - } - } - pub mod pallet_authorship { - use super::runtime_types; - pub mod pallet { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Call { - #[codec(index = 0)] - #[doc = "Provide a set of uncles."] - set_uncles { - new_uncles: ::std::vec::Vec< - runtime_types::sp_runtime::generic::header::Header< - ::core::primitive::u32, - runtime_types::sp_runtime::traits::BlakeTwo256, - >, - >, - }, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Error { - #[codec(index = 0)] - #[doc = "The uncle parent not in the chain."] - InvalidUncleParent, - #[codec(index = 1)] - #[doc = "Uncles already set in the block."] - UnclesAlreadySet, - #[codec(index = 2)] - #[doc = "Too many uncles."] - TooManyUncles, - #[codec(index = 3)] - #[doc = "The uncle is genesis."] - GenesisUncle, - #[codec(index = 4)] - #[doc = "The uncle is too high in chain."] - TooHighUncle, - #[codec(index = 5)] - #[doc = "The uncle is already included."] - UncleAlreadyIncluded, - #[codec(index = 6)] - #[doc = "The uncle isn't recent enough to be included."] - OldUncle, - } - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum UncleEntryItem<_0, _1, _2> { - #[codec(index = 0)] - InclusionHeight(_0), - #[codec(index = 1)] - Uncle(_1, ::core::option::Option<_2>), - } - } - pub mod pallet_babe { - use super::runtime_types; - pub mod pallet { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Call { - #[codec(index = 0)] - #[doc = "Report authority equivocation/misbehavior. This method will verify"] - #[doc = "the equivocation proof and validate the given key ownership proof"] - #[doc = "against the extracted offender. If both are valid, the offence will"] - #[doc = "be reported."] - report_equivocation { - equivocation_proof: ::std::boxed::Box< - runtime_types::sp_consensus_slots::EquivocationProof< - runtime_types::sp_runtime::generic::header::Header< - ::core::primitive::u32, - runtime_types::sp_runtime::traits::BlakeTwo256, - >, - runtime_types::sp_consensus_babe::app::Public, - >, - >, - key_owner_proof: runtime_types::sp_session::MembershipProof, - }, - #[codec(index = 1)] - #[doc = "Report authority equivocation/misbehavior. This method will verify"] - #[doc = "the equivocation proof and validate the given key ownership proof"] - #[doc = "against the extracted offender. If both are valid, the offence will"] - #[doc = "be reported."] - #[doc = "This extrinsic must be called unsigned and it is expected that only"] - #[doc = "block authors will call it (validated in `ValidateUnsigned`), as such"] - #[doc = "if the block author is defined it will be defined as the equivocation"] - #[doc = "reporter."] - report_equivocation_unsigned { - equivocation_proof: ::std::boxed::Box< - runtime_types::sp_consensus_slots::EquivocationProof< - runtime_types::sp_runtime::generic::header::Header< - ::core::primitive::u32, - runtime_types::sp_runtime::traits::BlakeTwo256, - >, - runtime_types::sp_consensus_babe::app::Public, - >, - >, - key_owner_proof: runtime_types::sp_session::MembershipProof, - }, - #[codec(index = 2)] - #[doc = "Plan an epoch config change. The epoch config change is recorded and will be enacted on"] - #[doc = "the next call to `enact_epoch_change`. The config will be activated one epoch after."] - #[doc = "Multiple calls to this method will replace any existing planned config change that had"] - #[doc = "not been enacted yet."] - plan_config_change { - config: runtime_types::sp_consensus_babe::digests::NextConfigDescriptor, - }, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Error { - #[codec(index = 0)] - #[doc = "An equivocation proof provided as part of an equivocation report is invalid."] - InvalidEquivocationProof, - #[codec(index = 1)] - #[doc = "A key ownership proof provided as part of an equivocation report is invalid."] - InvalidKeyOwnershipProof, - #[codec(index = 2)] - #[doc = "A given equivocation report is valid but already previously reported."] - DuplicateOffenceReport, - #[codec(index = 3)] - #[doc = "Submitted configuration is invalid."] - InvalidConfiguration, - } - } - } - pub mod pallet_balances { - use super::runtime_types; - pub mod pallet { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Call { - #[codec(index = 0)] - #[doc = "Transfer some liquid free balance to another account."] - #[doc = ""] - #[doc = "`transfer` will set the `FreeBalance` of the sender and receiver."] - #[doc = "If the sender's account is below the existential deposit as a result"] - #[doc = "of the transfer, the account will be reaped."] - #[doc = ""] - #[doc = "The dispatch origin for this call must be `Signed` by the transactor."] - #[doc = ""] - #[doc = "# "] - #[doc = "- Dependent on arguments but not critical, given proper implementations for input config"] - #[doc = " types. See related functions below."] - #[doc = "- It contains a limited number of reads and writes internally and no complex"] - #[doc = " computation."] - #[doc = ""] - #[doc = "Related functions:"] - #[doc = ""] - #[doc = " - `ensure_can_withdraw` is always called internally but has a bounded complexity."] - #[doc = " - Transferring balances to accounts that did not exist before will cause"] - #[doc = " `T::OnNewAccount::on_new_account` to be called."] - #[doc = " - Removing enough funds from an account will trigger `T::DustRemoval::on_unbalanced`."] - #[doc = " - `transfer_keep_alive` works the same way as `transfer`, but has an additional check"] - #[doc = " that the transfer will not kill the origin account."] - #[doc = "---------------------------------"] - #[doc = "- Origin account is already in memory, so no DB operations for them."] - #[doc = "# "] - transfer { - dest: ::subxt::sp_runtime::MultiAddress< - ::subxt::sp_core::crypto::AccountId32, - (), - >, - #[codec(compact)] - value: ::core::primitive::u128, - }, - #[codec(index = 1)] - #[doc = "Set the balances of a given account."] - #[doc = ""] - #[doc = "This will alter `FreeBalance` and `ReservedBalance` in storage. it will"] - #[doc = "also alter the total issuance of the system (`TotalIssuance`) appropriately."] - #[doc = "If the new free or reserved balance is below the existential deposit,"] - #[doc = "it will reset the account nonce (`frame_system::AccountNonce`)."] - #[doc = ""] - #[doc = "The dispatch origin for this call is `root`."] - set_balance { - who: ::subxt::sp_runtime::MultiAddress< - ::subxt::sp_core::crypto::AccountId32, - (), - >, - #[codec(compact)] - new_free: ::core::primitive::u128, - #[codec(compact)] - new_reserved: ::core::primitive::u128, - }, - #[codec(index = 2)] - #[doc = "Exactly as `transfer`, except the origin must be root and the source account may be"] - #[doc = "specified."] - #[doc = "# "] - #[doc = "- Same as transfer, but additional read and write because the source account is not"] - #[doc = " assumed to be in the overlay."] - #[doc = "# "] - force_transfer { - source: ::subxt::sp_runtime::MultiAddress< - ::subxt::sp_core::crypto::AccountId32, - (), - >, - dest: ::subxt::sp_runtime::MultiAddress< - ::subxt::sp_core::crypto::AccountId32, - (), - >, - #[codec(compact)] - value: ::core::primitive::u128, - }, - #[codec(index = 3)] - #[doc = "Same as the [`transfer`] call, but with a check that the transfer will not kill the"] - #[doc = "origin account."] - #[doc = ""] - #[doc = "99% of the time you want [`transfer`] instead."] - #[doc = ""] - #[doc = "[`transfer`]: struct.Pallet.html#method.transfer"] - transfer_keep_alive { - dest: ::subxt::sp_runtime::MultiAddress< - ::subxt::sp_core::crypto::AccountId32, - (), - >, - #[codec(compact)] - value: ::core::primitive::u128, - }, - #[codec(index = 4)] - #[doc = "Transfer the entire transferable balance from the caller account."] - #[doc = ""] - #[doc = "NOTE: This function only attempts to transfer _transferable_ balances. This means that"] - #[doc = "any locked, reserved, or existential deposits (when `keep_alive` is `true`), will not be"] - #[doc = "transferred by this function. To ensure that this function results in a killed account,"] - #[doc = "you might need to prepare the account by removing any reference counters, storage"] - #[doc = "deposits, etc..."] - #[doc = ""] - #[doc = "The dispatch origin of this call must be Signed."] - #[doc = ""] - #[doc = "- `dest`: The recipient of the transfer."] - #[doc = "- `keep_alive`: A boolean to determine if the `transfer_all` operation should send all"] - #[doc = " of the funds the account has, causing the sender account to be killed (false), or"] - #[doc = " transfer everything except at least the existential deposit, which will guarantee to"] - #[doc = " keep the sender account alive (true). # "] - #[doc = "- O(1). Just like transfer, but reading the user's transferable balance first."] - #[doc = " #"] - transfer_all { - dest: ::subxt::sp_runtime::MultiAddress< - ::subxt::sp_core::crypto::AccountId32, - (), - >, - keep_alive: ::core::primitive::bool, - }, - #[codec(index = 5)] - #[doc = "Unreserve some balance from a user by force."] - #[doc = ""] - #[doc = "Can only be called by ROOT."] - force_unreserve { - who: ::subxt::sp_runtime::MultiAddress< - ::subxt::sp_core::crypto::AccountId32, - (), - >, - amount: ::core::primitive::u128, - }, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Error { - #[codec(index = 0)] - #[doc = "Vesting balance too high to send value"] - VestingBalance, - #[codec(index = 1)] - #[doc = "Account liquidity restrictions prevent withdrawal"] - LiquidityRestrictions, - #[codec(index = 2)] - #[doc = "Balance too low to send value"] - InsufficientBalance, - #[codec(index = 3)] - #[doc = "Value too low to create account due to existential deposit"] - ExistentialDeposit, - #[codec(index = 4)] - #[doc = "Transfer/payment would kill account"] - KeepAlive, - #[codec(index = 5)] - #[doc = "A vesting schedule already exists for this account"] - ExistingVestingSchedule, - #[codec(index = 6)] - #[doc = "Beneficiary account must pre-exist"] - DeadAccount, - #[codec(index = 7)] - #[doc = "Number of named reserves exceed MaxReserves"] - TooManyReserves, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Event { - #[codec(index = 0)] - #[doc = "An account was created with some free balance."] - Endowed { - account: ::subxt::sp_core::crypto::AccountId32, - free_balance: ::core::primitive::u128, - }, - #[codec(index = 1)] - #[doc = "An account was removed whose balance was non-zero but below ExistentialDeposit,"] - #[doc = "resulting in an outright loss."] - DustLost { - account: ::subxt::sp_core::crypto::AccountId32, - amount: ::core::primitive::u128, - }, - #[codec(index = 2)] - #[doc = "Transfer succeeded."] - Transfer { - from: ::subxt::sp_core::crypto::AccountId32, - to: ::subxt::sp_core::crypto::AccountId32, - amount: ::core::primitive::u128, - }, - #[codec(index = 3)] - #[doc = "A balance was set by root."] - BalanceSet { - who: ::subxt::sp_core::crypto::AccountId32, - free: ::core::primitive::u128, - reserved: ::core::primitive::u128, - }, - #[codec(index = 4)] - #[doc = "Some balance was reserved (moved from free to reserved)."] - Reserved { - who: ::subxt::sp_core::crypto::AccountId32, - amount: ::core::primitive::u128, - }, - #[codec(index = 5)] - #[doc = "Some balance was unreserved (moved from reserved to free)."] - Unreserved { - who: ::subxt::sp_core::crypto::AccountId32, - amount: ::core::primitive::u128, - }, - #[codec(index = 6)] - #[doc = "Some balance was moved from the reserve of the first account to the second account."] - #[doc = "Final argument indicates the destination balance type."] - ReserveRepatriated { - from: ::subxt::sp_core::crypto::AccountId32, - to: ::subxt::sp_core::crypto::AccountId32, - amount: ::core::primitive::u128, - destination_status: - runtime_types::frame_support::traits::tokens::misc::BalanceStatus, - }, - #[codec(index = 7)] - #[doc = "Some amount was deposited (e.g. for transaction fees)."] - Deposit { - who: ::subxt::sp_core::crypto::AccountId32, - amount: ::core::primitive::u128, - }, - #[codec(index = 8)] - #[doc = "Some amount was withdrawn from the account (e.g. for transaction fees)."] - Withdraw { - who: ::subxt::sp_core::crypto::AccountId32, - amount: ::core::primitive::u128, - }, - #[codec(index = 9)] - #[doc = "Some amount was removed from the account (e.g. for misbehavior)."] - Slashed { - who: ::subxt::sp_core::crypto::AccountId32, - amount: ::core::primitive::u128, - }, - } - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct AccountData<_0> { - pub free: _0, - pub reserved: _0, - pub misc_frozen: _0, - pub fee_frozen: _0, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct BalanceLock<_0> { - pub id: [::core::primitive::u8; 8usize], - pub amount: _0, - pub reasons: runtime_types::pallet_balances::Reasons, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Reasons { - #[codec(index = 0)] - Fee, - #[codec(index = 1)] - Misc, - #[codec(index = 2)] - All, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Releases { - #[codec(index = 0)] - V1_0_0, - #[codec(index = 1)] - V2_0_0, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct ReserveData<_0, _1> { - pub id: _0, - pub amount: _1, - } - } - pub mod pallet_beefy { - use super::runtime_types; - pub mod pallet { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Call {} - } - } - pub mod pallet_bridge_dispatch { - use super::runtime_types; - pub mod pallet { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Event { - #[codec(index = 0)] - #[doc = "Message has been rejected before reaching dispatch."] - MessageRejected( - [::core::primitive::u8; 4usize], - ([::core::primitive::u8; 4usize], ::core::primitive::u64), - ), - #[codec(index = 1)] - #[doc = "Message has been rejected by dispatcher because of spec version mismatch."] - #[doc = "Last two arguments are: expected and passed spec version."] - MessageVersionSpecMismatch( - [::core::primitive::u8; 4usize], - ([::core::primitive::u8; 4usize], ::core::primitive::u64), - ::core::primitive::u32, - ::core::primitive::u32, - ), - #[codec(index = 2)] - #[doc = "Message has been rejected by dispatcher because of weight mismatch."] - #[doc = "Last two arguments are: expected and passed call weight."] - MessageWeightMismatch( - [::core::primitive::u8; 4usize], - ([::core::primitive::u8; 4usize], ::core::primitive::u64), - ::core::primitive::u64, - ::core::primitive::u64, - ), - #[codec(index = 3)] - #[doc = "Message signature mismatch."] - MessageSignatureMismatch( - [::core::primitive::u8; 4usize], - ([::core::primitive::u8; 4usize], ::core::primitive::u64), - ), - #[codec(index = 4)] - #[doc = "We have failed to decode Call from the message."] - MessageCallDecodeFailed( - [::core::primitive::u8; 4usize], - ([::core::primitive::u8; 4usize], ::core::primitive::u64), - ), - #[codec(index = 5)] - #[doc = "The call from the message has been rejected by the call filter."] - MessageCallRejected( - [::core::primitive::u8; 4usize], - ([::core::primitive::u8; 4usize], ::core::primitive::u64), - ), - #[codec(index = 6)] - #[doc = "The origin account has failed to pay fee for dispatching the message."] - MessageDispatchPaymentFailed( - [::core::primitive::u8; 4usize], - ([::core::primitive::u8; 4usize], ::core::primitive::u64), - ::subxt::sp_core::crypto::AccountId32, - ::core::primitive::u64, - ), - #[codec(index = 7)] - #[doc = "Message has been dispatched with given result."] - MessageDispatched( - [::core::primitive::u8; 4usize], - ([::core::primitive::u8; 4usize], ::core::primitive::u64), - ::core::result::Result<(), runtime_types::sp_runtime::DispatchError>, - ), - #[codec(index = 8)] - #[doc = "Phantom member, never used. Needed to handle multiple pallet instances."] - _Dummy, - } - } - } - pub mod pallet_bridge_grandpa { - use super::runtime_types; - pub mod pallet { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Call { - #[codec(index = 0)] - #[doc = "Verify a target header is finalized according to the given finality proof."] - #[doc = ""] - #[doc = "It will use the underlying storage pallet to fetch information about the current"] - #[doc = "authorities and best finalized header in order to verify that the header is finalized."] - #[doc = ""] - #[doc = "If successful in verification, it will write the target header to the underlying storage"] - #[doc = "pallet."] - submit_finality_proof { - finality_target: ::std::boxed::Box< - runtime_types::sp_runtime::generic::header::Header< - ::core::primitive::u32, - runtime_types::sp_runtime::traits::BlakeTwo256, - >, - >, - justification: - runtime_types::bp_header_chain::justification::GrandpaJustification< - runtime_types::sp_runtime::generic::header::Header< - ::core::primitive::u32, - runtime_types::sp_runtime::traits::BlakeTwo256, - >, - >, - }, - #[codec(index = 1)] - #[doc = "Bootstrap the bridge pallet with an initial header and authority set from which to sync."] - #[doc = ""] - #[doc = "The initial configuration provided does not need to be the genesis header of the bridged"] - #[doc = "chain, it can be any arbitrary header. You can also provide the next scheduled set"] - #[doc = "change if it is already know."] - #[doc = ""] - #[doc = "This function is only allowed to be called from a trusted origin and writes to storage"] - #[doc = "with practically no checks in terms of the validity of the data. It is important that"] - #[doc = "you ensure that valid data is being passed in."] - initialize { - init_data: runtime_types::bp_header_chain::InitializationData< - runtime_types::sp_runtime::generic::header::Header< - ::core::primitive::u32, - runtime_types::sp_runtime::traits::BlakeTwo256, - >, - >, - }, - #[codec(index = 2)] - #[doc = "Change `PalletOwner`."] - #[doc = ""] - #[doc = "May only be called either by root, or by `PalletOwner`."] - set_owner { - new_owner: ::core::option::Option<::subxt::sp_core::crypto::AccountId32>, - }, - #[codec(index = 3)] - #[doc = "Halt or resume all pallet operations."] - #[doc = ""] - #[doc = "May only be called either by root, or by `PalletOwner`."] - set_operational { - operational: ::core::primitive::bool, - }, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Error { - #[codec(index = 0)] - #[doc = "The given justification is invalid for the given header."] - InvalidJustification, - #[codec(index = 1)] - #[doc = "The authority set from the underlying header chain is invalid."] - InvalidAuthoritySet, - #[codec(index = 2)] - #[doc = "There are too many requests for the current window to handle."] - TooManyRequests, - #[codec(index = 3)] - #[doc = "The header being imported is older than the best finalized header known to the pallet."] - OldHeader, - #[codec(index = 4)] - #[doc = "The header is unknown to the pallet."] - UnknownHeader, - #[codec(index = 5)] - #[doc = "The scheduled authority set change found in the header is unsupported by the pallet."] - #[doc = ""] - #[doc = "This is the case for non-standard (e.g forced) authority set changes."] - UnsupportedScheduledChange, - #[codec(index = 6)] - #[doc = "The pallet is not yet initialized."] - NotInitialized, - #[codec(index = 7)] - #[doc = "The pallet has already been initialized."] - AlreadyInitialized, - #[codec(index = 8)] - #[doc = "All pallet operations are halted."] - Halted, - #[codec(index = 9)] - #[doc = "The storage proof doesn't contains storage root. So it is invalid for given header."] - StorageRootMismatch, - } - } - } - pub mod pallet_bridge_messages { - use super::runtime_types; - pub mod pallet { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Call { - # [codec (index = 0)] # [doc = "Change `PalletOwner`."] # [doc = ""] # [doc = "May only be called either by root, or by `PalletOwner`."] set_owner { new_owner : :: core :: option :: Option < :: subxt :: sp_core :: crypto :: AccountId32 > , } , # [codec (index = 1)] # [doc = "Halt or resume all/some pallet operations."] # [doc = ""] # [doc = "May only be called either by root, or by `PalletOwner`."] set_operating_mode { operating_mode : runtime_types :: bp_messages :: OperatingMode , } , # [codec (index = 2)] # [doc = "Update pallet parameter."] # [doc = ""] # [doc = "May only be called either by root, or by `PalletOwner`."] # [doc = ""] # [doc = "The weight is: single read for permissions check + 2 writes for parameter value and"] # [doc = "event."] update_pallet_parameter { parameter : () , } , # [codec (index = 3)] # [doc = "Send message over lane."] send_message { lane_id : [:: core :: primitive :: u8 ; 4usize] , payload : runtime_types :: bp_message_dispatch :: MessagePayload < :: subxt :: sp_core :: crypto :: AccountId32 , runtime_types :: sp_runtime :: MultiSigner , runtime_types :: sp_runtime :: MultiSignature , :: std :: vec :: Vec < :: core :: primitive :: u8 > > , delivery_and_dispatch_fee : :: core :: primitive :: u128 , } , # [codec (index = 4)] # [doc = "Pay additional fee for the message."] increase_message_fee { lane_id : [:: core :: primitive :: u8 ; 4usize] , nonce : :: core :: primitive :: u64 , additional_fee : :: core :: primitive :: u128 , } , # [codec (index = 5)] # [doc = "Receive messages proof from bridged chain."] # [doc = ""] # [doc = "The weight of the call assumes that the transaction always brings outbound lane"] # [doc = "state update. Because of that, the submitter (relayer) has no benefit of not including"] # [doc = "this data in the transaction, so reward confirmations lags should be minimal."] receive_messages_proof { relayer_id_at_bridged_chain : :: subxt :: sp_core :: crypto :: AccountId32 , proof : runtime_types :: bridge_runtime_common :: messages :: target :: FromBridgedChainMessagesProof < :: subxt :: sp_core :: H256 > , messages_count : :: core :: primitive :: u32 , dispatch_weight : :: core :: primitive :: u64 , } , # [codec (index = 6)] # [doc = "Receive messages delivery proof from bridged chain."] receive_messages_delivery_proof { proof : runtime_types :: bridge_runtime_common :: messages :: source :: FromBridgedChainMessagesDeliveryProof < :: subxt :: sp_core :: H256 > , relayers_state : runtime_types :: bp_messages :: UnrewardedRelayersState , } , } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Error { - #[codec(index = 0)] - #[doc = "All pallet operations are halted."] - Halted, - #[codec(index = 1)] - #[doc = "Message has been treated as invalid by chain verifier."] - MessageRejectedByChainVerifier, - #[codec(index = 2)] - #[doc = "Message has been treated as invalid by lane verifier."] - MessageRejectedByLaneVerifier, - #[codec(index = 3)] - #[doc = "Submitter has failed to pay fee for delivering and dispatching messages."] - FailedToWithdrawMessageFee, - #[codec(index = 4)] - #[doc = "The transaction brings too many messages."] - TooManyMessagesInTheProof, - #[codec(index = 5)] - #[doc = "Invalid messages has been submitted."] - InvalidMessagesProof, - #[codec(index = 6)] - #[doc = "Invalid messages delivery proof has been submitted."] - InvalidMessagesDeliveryProof, - #[codec(index = 7)] - #[doc = "The bridged chain has invalid `UnrewardedRelayers` in its storage (fatal for the lane)."] - InvalidUnrewardedRelayers, - #[codec(index = 8)] - #[doc = "The relayer has declared invalid unrewarded relayers state in the"] - #[doc = "`receive_messages_delivery_proof` call."] - InvalidUnrewardedRelayersState, - #[codec(index = 9)] - #[doc = "The message someone is trying to work with (i.e. increase fee) is already-delivered."] - MessageIsAlreadyDelivered, - #[codec(index = 10)] - #[doc = "The message someone is trying to work with (i.e. increase fee) is not yet sent."] - MessageIsNotYetSent, - #[codec(index = 11)] - #[doc = "The number of actually confirmed messages is going to be larger than the number of"] - #[doc = "messages in the proof. This may mean that this or bridged chain storage is corrupted."] - TryingToConfirmMoreMessagesThanExpected, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Event { - #[codec(index = 0)] - #[doc = "Pallet parameter has been updated."] - ParameterUpdated(()), - #[codec(index = 1)] - #[doc = "Message has been accepted and is waiting to be delivered."] - MessageAccepted([::core::primitive::u8; 4usize], ::core::primitive::u64), - #[codec(index = 2)] - #[doc = "Messages in the inclusive range have been delivered to the bridged chain."] - MessagesDelivered( - [::core::primitive::u8; 4usize], - runtime_types::bp_messages::DeliveredMessages, - ), - } - } - } - pub mod pallet_collective { - use super::runtime_types; - pub mod pallet { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Call { - #[codec(index = 0)] - #[doc = "Set the collective's membership."] - #[doc = ""] - #[doc = "- `new_members`: The new member list. Be nice to the chain and provide it sorted."] - #[doc = "- `prime`: The prime member whose vote sets the default."] - #[doc = "- `old_count`: The upper bound for the previous number of members in storage. Used for"] - #[doc = " weight estimation."] - #[doc = ""] - #[doc = "Requires root origin."] - #[doc = ""] - #[doc = "NOTE: Does not enforce the expected `MaxMembers` limit on the amount of members, but"] - #[doc = " the weight estimations rely on it to estimate dispatchable weight."] - #[doc = ""] - #[doc = "# WARNING:"] - #[doc = ""] - #[doc = "The `pallet-collective` can also be managed by logic outside of the pallet through the"] - #[doc = "implementation of the trait [`ChangeMembers`]."] - #[doc = "Any call to `set_members` must be careful that the member set doesn't get out of sync"] - #[doc = "with other logic managing the member set."] - #[doc = ""] - #[doc = "# "] - #[doc = "## Weight"] - #[doc = "- `O(MP + N)` where:"] - #[doc = " - `M` old-members-count (code- and governance-bounded)"] - #[doc = " - `N` new-members-count (code- and governance-bounded)"] - #[doc = " - `P` proposals-count (code-bounded)"] - #[doc = "- DB:"] - #[doc = " - 1 storage mutation (codec `O(M)` read, `O(N)` write) for reading and writing the"] - #[doc = " members"] - #[doc = " - 1 storage read (codec `O(P)`) for reading the proposals"] - #[doc = " - `P` storage mutations (codec `O(M)`) for updating the votes for each proposal"] - #[doc = " - 1 storage write (codec `O(1)`) for deleting the old `prime` and setting the new one"] - #[doc = "# "] - set_members { - new_members: ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>, - prime: ::core::option::Option<::subxt::sp_core::crypto::AccountId32>, - old_count: ::core::primitive::u32, - }, - #[codec(index = 1)] - #[doc = "Dispatch a proposal from a member using the `Member` origin."] - #[doc = ""] - #[doc = "Origin must be a member of the collective."] - #[doc = ""] - #[doc = "# "] - #[doc = "## Weight"] - #[doc = "- `O(M + P)` where `M` members-count (code-bounded) and `P` complexity of dispatching"] - #[doc = " `proposal`"] - #[doc = "- DB: 1 read (codec `O(M)`) + DB access of `proposal`"] - #[doc = "- 1 event"] - #[doc = "# "] - execute { - proposal: ::std::boxed::Box, - #[codec(compact)] - length_bound: ::core::primitive::u32, - }, - #[codec(index = 2)] - #[doc = "Add a new proposal to either be voted on or executed directly."] - #[doc = ""] - #[doc = "Requires the sender to be member."] - #[doc = ""] - #[doc = "`threshold` determines whether `proposal` is executed directly (`threshold < 2`)"] - #[doc = "or put up for voting."] - #[doc = ""] - #[doc = "# "] - #[doc = "## Weight"] - #[doc = "- `O(B + M + P1)` or `O(B + M + P2)` where:"] - #[doc = " - `B` is `proposal` size in bytes (length-fee-bounded)"] - #[doc = " - `M` is members-count (code- and governance-bounded)"] - #[doc = " - branching is influenced by `threshold` where:"] - #[doc = " - `P1` is proposal execution complexity (`threshold < 2`)"] - #[doc = " - `P2` is proposals-count (code-bounded) (`threshold >= 2`)"] - #[doc = "- DB:"] - #[doc = " - 1 storage read `is_member` (codec `O(M)`)"] - #[doc = " - 1 storage read `ProposalOf::contains_key` (codec `O(1)`)"] - #[doc = " - DB accesses influenced by `threshold`:"] - #[doc = " - EITHER storage accesses done by `proposal` (`threshold < 2`)"] - #[doc = " - OR proposal insertion (`threshold <= 2`)"] - #[doc = " - 1 storage mutation `Proposals` (codec `O(P2)`)"] - #[doc = " - 1 storage mutation `ProposalCount` (codec `O(1)`)"] - #[doc = " - 1 storage write `ProposalOf` (codec `O(B)`)"] - #[doc = " - 1 storage write `Voting` (codec `O(M)`)"] - #[doc = " - 1 event"] - #[doc = "# "] - propose { - #[codec(compact)] - threshold: ::core::primitive::u32, - proposal: ::std::boxed::Box, - #[codec(compact)] - length_bound: ::core::primitive::u32, - }, - #[codec(index = 3)] - #[doc = "Add an aye or nay vote for the sender to the given proposal."] - #[doc = ""] - #[doc = "Requires the sender to be a member."] - #[doc = ""] - #[doc = "Transaction fees will be waived if the member is voting on any particular proposal"] - #[doc = "for the first time and the call is successful. Subsequent vote changes will charge a"] - #[doc = "fee."] - #[doc = "# "] - #[doc = "## Weight"] - #[doc = "- `O(M)` where `M` is members-count (code- and governance-bounded)"] - #[doc = "- DB:"] - #[doc = " - 1 storage read `Members` (codec `O(M)`)"] - #[doc = " - 1 storage mutation `Voting` (codec `O(M)`)"] - #[doc = "- 1 event"] - #[doc = "# "] - vote { - proposal: ::subxt::sp_core::H256, - #[codec(compact)] - index: ::core::primitive::u32, - approve: ::core::primitive::bool, - }, - #[codec(index = 4)] - #[doc = "Close a vote that is either approved, disapproved or whose voting period has ended."] - #[doc = ""] - #[doc = "May be called by any signed account in order to finish voting and close the proposal."] - #[doc = ""] - #[doc = "If called before the end of the voting period it will only close the vote if it is"] - #[doc = "has enough votes to be approved or disapproved."] - #[doc = ""] - #[doc = "If called after the end of the voting period abstentions are counted as rejections"] - #[doc = "unless there is a prime member set and the prime member cast an approval."] - #[doc = ""] - #[doc = "If the close operation completes successfully with disapproval, the transaction fee will"] - #[doc = "be waived. Otherwise execution of the approved operation will be charged to the caller."] - #[doc = ""] - #[doc = "+ `proposal_weight_bound`: The maximum amount of weight consumed by executing the closed"] - #[doc = "proposal."] - #[doc = "+ `length_bound`: The upper bound for the length of the proposal in storage. Checked via"] - #[doc = "`storage::read` so it is `size_of::() == 4` larger than the pure length."] - #[doc = ""] - #[doc = "# "] - #[doc = "## Weight"] - #[doc = "- `O(B + M + P1 + P2)` where:"] - #[doc = " - `B` is `proposal` size in bytes (length-fee-bounded)"] - #[doc = " - `M` is members-count (code- and governance-bounded)"] - #[doc = " - `P1` is the complexity of `proposal` preimage."] - #[doc = " - `P2` is proposal-count (code-bounded)"] - #[doc = "- DB:"] - #[doc = " - 2 storage reads (`Members`: codec `O(M)`, `Prime`: codec `O(1)`)"] - #[doc = " - 3 mutations (`Voting`: codec `O(M)`, `ProposalOf`: codec `O(B)`, `Proposals`: codec"] - #[doc = " `O(P2)`)"] - #[doc = " - any mutations done while executing `proposal` (`P1`)"] - #[doc = "- up to 3 events"] - #[doc = "# "] - close { - proposal_hash: ::subxt::sp_core::H256, - #[codec(compact)] - index: ::core::primitive::u32, - #[codec(compact)] - proposal_weight_bound: ::core::primitive::u64, - #[codec(compact)] - length_bound: ::core::primitive::u32, - }, - #[codec(index = 5)] - #[doc = "Disapprove a proposal, close, and remove it from the system, regardless of its current"] - #[doc = "state."] - #[doc = ""] - #[doc = "Must be called by the Root origin."] - #[doc = ""] - #[doc = "Parameters:"] - #[doc = "* `proposal_hash`: The hash of the proposal that should be disapproved."] - #[doc = ""] - #[doc = "# "] - #[doc = "Complexity: O(P) where P is the number of max proposals"] - #[doc = "DB Weight:"] - #[doc = "* Reads: Proposals"] - #[doc = "* Writes: Voting, Proposals, ProposalOf"] - #[doc = "# "] - disapprove_proposal { - proposal_hash: ::subxt::sp_core::H256, - }, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Error { - #[codec(index = 0)] - #[doc = "Account is not a member"] - NotMember, - #[codec(index = 1)] - #[doc = "Duplicate proposals not allowed"] - DuplicateProposal, - #[codec(index = 2)] - #[doc = "Proposal must exist"] - ProposalMissing, - #[codec(index = 3)] - #[doc = "Mismatched index"] - WrongIndex, - #[codec(index = 4)] - #[doc = "Duplicate vote ignored"] - DuplicateVote, - #[codec(index = 5)] - #[doc = "Members are already initialized!"] - AlreadyInitialized, - #[codec(index = 6)] - #[doc = "The close call was made too early, before the end of the voting."] - TooEarly, - #[codec(index = 7)] - #[doc = "There can only be a maximum of `MaxProposals` active proposals."] - TooManyProposals, - #[codec(index = 8)] - #[doc = "The given weight bound for the proposal was too low."] - WrongProposalWeight, - #[codec(index = 9)] - #[doc = "The given length bound for the proposal was too low."] - WrongProposalLength, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Event { - #[codec(index = 0)] - #[doc = "A motion (given hash) has been proposed (by given account) with a threshold (given"] - #[doc = "`MemberCount`)."] - Proposed { - account: ::subxt::sp_core::crypto::AccountId32, - proposal_index: ::core::primitive::u32, - proposal_hash: ::subxt::sp_core::H256, - threshold: ::core::primitive::u32, - }, - #[codec(index = 1)] - #[doc = "A motion (given hash) has been voted on by given account, leaving"] - #[doc = "a tally (yes votes and no votes given respectively as `MemberCount`)."] - Voted { - account: ::subxt::sp_core::crypto::AccountId32, - proposal_hash: ::subxt::sp_core::H256, - voted: ::core::primitive::bool, - yes: ::core::primitive::u32, - no: ::core::primitive::u32, - }, - #[codec(index = 2)] - #[doc = "A motion was approved by the required threshold."] - Approved { - proposal_hash: ::subxt::sp_core::H256, - }, - #[codec(index = 3)] - #[doc = "A motion was not approved by the required threshold."] - Disapproved { - proposal_hash: ::subxt::sp_core::H256, - }, - #[codec(index = 4)] - #[doc = "A motion was executed; result will be `Ok` if it returned without error."] - Executed { - proposal_hash: ::subxt::sp_core::H256, - result: - ::core::result::Result<(), runtime_types::sp_runtime::DispatchError>, - }, - #[codec(index = 5)] - #[doc = "A single member did some action; result will be `Ok` if it returned without error."] - MemberExecuted { - proposal_hash: ::subxt::sp_core::H256, - result: - ::core::result::Result<(), runtime_types::sp_runtime::DispatchError>, - }, - #[codec(index = 6)] - #[doc = "A proposal was closed because its threshold was reached or after its duration was up."] - Closed { - proposal_hash: ::subxt::sp_core::H256, - yes: ::core::primitive::u32, - no: ::core::primitive::u32, - }, - } - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum RawOrigin<_0> { - #[codec(index = 0)] - Members(::core::primitive::u32, ::core::primitive::u32), - #[codec(index = 1)] - Member(_0), - #[codec(index = 2)] - _Phantom, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct Votes<_0, _1> { - pub index: _1, - pub threshold: _1, - pub ayes: ::std::vec::Vec<_0>, - pub nays: ::std::vec::Vec<_0>, - pub end: _1, - } - } - pub mod pallet_grandpa { - use super::runtime_types; - pub mod pallet { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Call { - #[codec(index = 0)] - #[doc = "Report voter equivocation/misbehavior. This method will verify the"] - #[doc = "equivocation proof and validate the given key ownership proof"] - #[doc = "against the extracted offender. If both are valid, the offence"] - #[doc = "will be reported."] - report_equivocation { - equivocation_proof: ::std::boxed::Box< - runtime_types::sp_finality_grandpa::EquivocationProof< - ::subxt::sp_core::H256, - ::core::primitive::u32, - >, - >, - key_owner_proof: runtime_types::sp_session::MembershipProof, - }, - #[codec(index = 1)] - #[doc = "Report voter equivocation/misbehavior. This method will verify the"] - #[doc = "equivocation proof and validate the given key ownership proof"] - #[doc = "against the extracted offender. If both are valid, the offence"] - #[doc = "will be reported."] - #[doc = ""] - #[doc = "This extrinsic must be called unsigned and it is expected that only"] - #[doc = "block authors will call it (validated in `ValidateUnsigned`), as such"] - #[doc = "if the block author is defined it will be defined as the equivocation"] - #[doc = "reporter."] - report_equivocation_unsigned { - equivocation_proof: ::std::boxed::Box< - runtime_types::sp_finality_grandpa::EquivocationProof< - ::subxt::sp_core::H256, - ::core::primitive::u32, - >, - >, - key_owner_proof: runtime_types::sp_session::MembershipProof, - }, - #[codec(index = 2)] - #[doc = "Note that the current authority set of the GRANDPA finality gadget has"] - #[doc = "stalled. This will trigger a forced authority set change at the beginning"] - #[doc = "of the next session, to be enacted `delay` blocks after that. The delay"] - #[doc = "should be high enough to safely assume that the block signalling the"] - #[doc = "forced change will not be re-orged (e.g. 1000 blocks). The GRANDPA voters"] - #[doc = "will start the new authority set using the given finalized block as base."] - #[doc = "Only callable by root."] - note_stalled { - delay: ::core::primitive::u32, - best_finalized_block_number: ::core::primitive::u32, - }, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Error { - #[codec(index = 0)] - #[doc = "Attempt to signal GRANDPA pause when the authority set isn't live"] - #[doc = "(either paused or already pending pause)."] - PauseFailed, - #[codec(index = 1)] - #[doc = "Attempt to signal GRANDPA resume when the authority set isn't paused"] - #[doc = "(either live or already pending resume)."] - ResumeFailed, - #[codec(index = 2)] - #[doc = "Attempt to signal GRANDPA change with one already pending."] - ChangePending, - #[codec(index = 3)] - #[doc = "Cannot signal forced change so soon after last."] - TooSoon, - #[codec(index = 4)] - #[doc = "A key ownership proof provided as part of an equivocation report is invalid."] - InvalidKeyOwnershipProof, - #[codec(index = 5)] - #[doc = "An equivocation proof provided as part of an equivocation report is invalid."] - InvalidEquivocationProof, - #[codec(index = 6)] - #[doc = "A given equivocation report is valid but already previously reported."] - DuplicateOffenceReport, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Event { - #[codec(index = 0)] - #[doc = "New authority set has been applied."] - NewAuthorities { - authority_set: ::std::vec::Vec<( - runtime_types::sp_finality_grandpa::app::Public, - ::core::primitive::u64, - )>, - }, - #[codec(index = 1)] - #[doc = "Current authority set has been paused."] - Paused, - #[codec(index = 2)] - #[doc = "Current authority set has been resumed."] - Resumed, - } - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct StoredPendingChange<_0> { - pub scheduled_at: _0, - pub delay: _0, - pub next_authorities: - runtime_types::frame_support::storage::weak_bounded_vec::WeakBoundedVec<( - runtime_types::sp_finality_grandpa::app::Public, - ::core::primitive::u64, - )>, - pub forced: ::core::option::Option<_0>, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum StoredState<_0> { - #[codec(index = 0)] - Live, - #[codec(index = 1)] - PendingPause { scheduled_at: _0, delay: _0 }, - #[codec(index = 2)] - Paused, - #[codec(index = 3)] - PendingResume { scheduled_at: _0, delay: _0 }, - } - } - pub mod pallet_im_online { - use super::runtime_types; - pub mod pallet { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Call { - #[codec(index = 0)] - #[doc = "# "] - #[doc = "- Complexity: `O(K + E)` where K is length of `Keys` (heartbeat.validators_len) and E is"] - #[doc = " length of `heartbeat.network_state.external_address`"] - #[doc = " - `O(K)`: decoding of length `K`"] - #[doc = " - `O(E)`: decoding/encoding of length `E`"] - #[doc = "- DbReads: pallet_session `Validators`, pallet_session `CurrentIndex`, `Keys`,"] - #[doc = " `ReceivedHeartbeats`"] - #[doc = "- DbWrites: `ReceivedHeartbeats`"] - #[doc = "# "] - heartbeat { - heartbeat: - runtime_types::pallet_im_online::Heartbeat<::core::primitive::u32>, - signature: runtime_types::pallet_im_online::sr25519::app_sr25519::Signature, - }, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Error { - #[codec(index = 0)] - #[doc = "Non existent public key."] - InvalidKey, - #[codec(index = 1)] - #[doc = "Duplicated heartbeat."] - DuplicatedHeartbeat, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Event { - #[codec(index = 0)] - #[doc = "A new heartbeat was received from `AuthorityId`."] - HeartbeatReceived { - authority_id: runtime_types::pallet_im_online::sr25519::app_sr25519::Public, - }, - #[codec(index = 1)] - #[doc = "At the end of the session, no offence was committed."] - AllGood, - #[codec(index = 2)] - #[doc = "At the end of the session, at least one validator was found to be offline."] - SomeOffline { - offline: ::std::vec::Vec<(::subxt::sp_core::crypto::AccountId32, ())>, - }, - } - } - pub mod sr25519 { - use super::runtime_types; - pub mod app_sr25519 { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct Public(pub runtime_types::sp_core::sr25519::Public); - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct Signature(pub runtime_types::sp_core::sr25519::Signature); - } - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct BoundedOpaqueNetworkState { - pub peer_id: - runtime_types::frame_support::storage::weak_bounded_vec::WeakBoundedVec< - ::core::primitive::u8, - >, - pub external_addresses: - runtime_types::frame_support::storage::weak_bounded_vec::WeakBoundedVec< - runtime_types::frame_support::storage::weak_bounded_vec::WeakBoundedVec< - ::core::primitive::u8, - >, - >, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct Heartbeat<_0> { - pub block_number: _0, - pub network_state: runtime_types::sp_core::offchain::OpaqueNetworkState, - pub session_index: _0, - pub authority_index: _0, - pub validators_len: _0, - } - } - pub mod pallet_indices { - use super::runtime_types; - pub mod pallet { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Call { - #[codec(index = 0)] - #[doc = "Assign an previously unassigned index."] - #[doc = ""] - #[doc = "Payment: `Deposit` is reserved from the sender account."] - #[doc = ""] - #[doc = "The dispatch origin for this call must be _Signed_."] - #[doc = ""] - #[doc = "- `index`: the index to be claimed. This must not be in use."] - #[doc = ""] - #[doc = "Emits `IndexAssigned` if successful."] - #[doc = ""] - #[doc = "# "] - #[doc = "- `O(1)`."] - #[doc = "- One storage mutation (codec `O(1)`)."] - #[doc = "- One reserve operation."] - #[doc = "- One event."] - #[doc = "-------------------"] - #[doc = "- DB Weight: 1 Read/Write (Accounts)"] - #[doc = "# "] - claim { index: ::core::primitive::u32 }, - #[codec(index = 1)] - #[doc = "Assign an index already owned by the sender to another account. The balance reservation"] - #[doc = "is effectively transferred to the new account."] - #[doc = ""] - #[doc = "The dispatch origin for this call must be _Signed_."] - #[doc = ""] - #[doc = "- `index`: the index to be re-assigned. This must be owned by the sender."] - #[doc = "- `new`: the new owner of the index. This function is a no-op if it is equal to sender."] - #[doc = ""] - #[doc = "Emits `IndexAssigned` if successful."] - #[doc = ""] - #[doc = "# "] - #[doc = "- `O(1)`."] - #[doc = "- One storage mutation (codec `O(1)`)."] - #[doc = "- One transfer operation."] - #[doc = "- One event."] - #[doc = "-------------------"] - #[doc = "- DB Weight:"] - #[doc = " - Reads: Indices Accounts, System Account (recipient)"] - #[doc = " - Writes: Indices Accounts, System Account (recipient)"] - #[doc = "# "] - transfer { - new: ::subxt::sp_core::crypto::AccountId32, - index: ::core::primitive::u32, - }, - #[codec(index = 2)] - #[doc = "Free up an index owned by the sender."] - #[doc = ""] - #[doc = "Payment: Any previous deposit placed for the index is unreserved in the sender account."] - #[doc = ""] - #[doc = "The dispatch origin for this call must be _Signed_ and the sender must own the index."] - #[doc = ""] - #[doc = "- `index`: the index to be freed. This must be owned by the sender."] - #[doc = ""] - #[doc = "Emits `IndexFreed` if successful."] - #[doc = ""] - #[doc = "# "] - #[doc = "- `O(1)`."] - #[doc = "- One storage mutation (codec `O(1)`)."] - #[doc = "- One reserve operation."] - #[doc = "- One event."] - #[doc = "-------------------"] - #[doc = "- DB Weight: 1 Read/Write (Accounts)"] - #[doc = "# "] - free { index: ::core::primitive::u32 }, - #[codec(index = 3)] - #[doc = "Force an index to an account. This doesn't require a deposit. If the index is already"] - #[doc = "held, then any deposit is reimbursed to its current owner."] - #[doc = ""] - #[doc = "The dispatch origin for this call must be _Root_."] - #[doc = ""] - #[doc = "- `index`: the index to be (re-)assigned."] - #[doc = "- `new`: the new owner of the index. This function is a no-op if it is equal to sender."] - #[doc = "- `freeze`: if set to `true`, will freeze the index so it cannot be transferred."] - #[doc = ""] - #[doc = "Emits `IndexAssigned` if successful."] - #[doc = ""] - #[doc = "# "] - #[doc = "- `O(1)`."] - #[doc = "- One storage mutation (codec `O(1)`)."] - #[doc = "- Up to one reserve operation."] - #[doc = "- One event."] - #[doc = "-------------------"] - #[doc = "- DB Weight:"] - #[doc = " - Reads: Indices Accounts, System Account (original owner)"] - #[doc = " - Writes: Indices Accounts, System Account (original owner)"] - #[doc = "# "] - force_transfer { - new: ::subxt::sp_core::crypto::AccountId32, - index: ::core::primitive::u32, - freeze: ::core::primitive::bool, - }, - #[codec(index = 4)] - #[doc = "Freeze an index so it will always point to the sender account. This consumes the"] - #[doc = "deposit."] - #[doc = ""] - #[doc = "The dispatch origin for this call must be _Signed_ and the signing account must have a"] - #[doc = "non-frozen account `index`."] - #[doc = ""] - #[doc = "- `index`: the index to be frozen in place."] - #[doc = ""] - #[doc = "Emits `IndexFrozen` if successful."] - #[doc = ""] - #[doc = "# "] - #[doc = "- `O(1)`."] - #[doc = "- One storage mutation (codec `O(1)`)."] - #[doc = "- Up to one slash operation."] - #[doc = "- One event."] - #[doc = "-------------------"] - #[doc = "- DB Weight: 1 Read/Write (Accounts)"] - #[doc = "# "] - freeze { index: ::core::primitive::u32 }, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Error { - #[codec(index = 0)] - #[doc = "The index was not already assigned."] - NotAssigned, - #[codec(index = 1)] - #[doc = "The index is assigned to another account."] - NotOwner, - #[codec(index = 2)] - #[doc = "The index was not available."] - InUse, - #[codec(index = 3)] - #[doc = "The source and destination accounts are identical."] - NotTransfer, - #[codec(index = 4)] - #[doc = "The index is permanent and may not be freed/changed."] - Permanent, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Event { - #[codec(index = 0)] - #[doc = "A account index was assigned."] - IndexAssigned { - who: ::subxt::sp_core::crypto::AccountId32, - index: ::core::primitive::u32, - }, - #[codec(index = 1)] - #[doc = "A account index has been freed up (unassigned)."] - IndexFreed { index: ::core::primitive::u32 }, - #[codec(index = 2)] - #[doc = "A account index has been frozen to its current account ID."] - IndexFrozen { - index: ::core::primitive::u32, - who: ::subxt::sp_core::crypto::AccountId32, - }, - } - } - } - pub mod pallet_membership { - use super::runtime_types; - pub mod pallet { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Call { - #[codec(index = 0)] - #[doc = "Add a member `who` to the set."] - #[doc = ""] - #[doc = "May only be called from `T::AddOrigin`."] - add_member { - who: ::subxt::sp_core::crypto::AccountId32, - }, - #[codec(index = 1)] - #[doc = "Remove a member `who` from the set."] - #[doc = ""] - #[doc = "May only be called from `T::RemoveOrigin`."] - remove_member { - who: ::subxt::sp_core::crypto::AccountId32, - }, - #[codec(index = 2)] - #[doc = "Swap out one member `remove` for another `add`."] - #[doc = ""] - #[doc = "May only be called from `T::SwapOrigin`."] - #[doc = ""] - #[doc = "Prime membership is *not* passed from `remove` to `add`, if extant."] - swap_member { - remove: ::subxt::sp_core::crypto::AccountId32, - add: ::subxt::sp_core::crypto::AccountId32, - }, - #[codec(index = 3)] - #[doc = "Change the membership to a new set, disregarding the existing membership. Be nice and"] - #[doc = "pass `members` pre-sorted."] - #[doc = ""] - #[doc = "May only be called from `T::ResetOrigin`."] - reset_members { - members: ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>, - }, - #[codec(index = 4)] - #[doc = "Swap out the sending member for some other key `new`."] - #[doc = ""] - #[doc = "May only be called from `Signed` origin of a current member."] - #[doc = ""] - #[doc = "Prime membership is passed from the origin account to `new`, if extant."] - change_key { - new: ::subxt::sp_core::crypto::AccountId32, - }, - #[codec(index = 5)] - #[doc = "Set the prime member. Must be a current member."] - #[doc = ""] - #[doc = "May only be called from `T::PrimeOrigin`."] - set_prime { - who: ::subxt::sp_core::crypto::AccountId32, - }, - #[codec(index = 6)] - #[doc = "Remove the prime member if it exists."] - #[doc = ""] - #[doc = "May only be called from `T::PrimeOrigin`."] - clear_prime, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Error { - #[codec(index = 0)] - #[doc = "Already a member."] - AlreadyMember, - #[codec(index = 1)] - #[doc = "Not a member."] - NotMember, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Event { - #[codec(index = 0)] - #[doc = "The given member was added; see the transaction for who."] - MemberAdded, - #[codec(index = 1)] - #[doc = "The given member was removed; see the transaction for who."] - MemberRemoved, - #[codec(index = 2)] - #[doc = "Two members were swapped; see the transaction for who."] - MembersSwapped, - #[codec(index = 3)] - #[doc = "The membership was reset; see the transaction for who the new set is."] - MembersReset, - #[codec(index = 4)] - #[doc = "One of the members' keys changed."] - KeyChanged, - #[codec(index = 5)] - #[doc = "Phantom member, never used."] - Dummy, - } - } - } - pub mod pallet_multisig { - use super::runtime_types; - pub mod pallet { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Call { - #[codec(index = 0)] - #[doc = "Immediately dispatch a multi-signature call using a single approval from the caller."] - #[doc = ""] - #[doc = "The dispatch origin for this call must be _Signed_."] - #[doc = ""] - #[doc = "- `other_signatories`: The accounts (other than the sender) who are part of the"] - #[doc = "multi-signature, but do not participate in the approval process."] - #[doc = "- `call`: The call to be executed."] - #[doc = ""] - #[doc = "Result is equivalent to the dispatched result."] - #[doc = ""] - #[doc = "# "] - #[doc = "O(Z + C) where Z is the length of the call and C its execution weight."] - #[doc = "-------------------------------"] - #[doc = "- DB Weight: None"] - #[doc = "- Plus Call Weight"] - #[doc = "# "] - as_multi_threshold_1 { - other_signatories: ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>, - call: ::std::boxed::Box, - }, - #[codec(index = 1)] - #[doc = "Register approval for a dispatch to be made from a deterministic composite account if"] - #[doc = "approved by a total of `threshold - 1` of `other_signatories`."] - #[doc = ""] - #[doc = "If there are enough, then dispatch the call."] - #[doc = ""] - #[doc = "Payment: `DepositBase` will be reserved if this is the first approval, plus"] - #[doc = "`threshold` times `DepositFactor`. It is returned once this dispatch happens or"] - #[doc = "is cancelled."] - #[doc = ""] - #[doc = "The dispatch origin for this call must be _Signed_."] - #[doc = ""] - #[doc = "- `threshold`: The total number of approvals for this dispatch before it is executed."] - #[doc = "- `other_signatories`: The accounts (other than the sender) who can approve this"] - #[doc = "dispatch. May not be empty."] - #[doc = "- `maybe_timepoint`: If this is the first approval, then this must be `None`. If it is"] - #[doc = "not the first approval, then it must be `Some`, with the timepoint (block number and"] - #[doc = "transaction index) of the first approval transaction."] - #[doc = "- `call`: The call to be executed."] - #[doc = ""] - #[doc = "NOTE: Unless this is the final approval, you will generally want to use"] - #[doc = "`approve_as_multi` instead, since it only requires a hash of the call."] - #[doc = ""] - #[doc = "Result is equivalent to the dispatched result if `threshold` is exactly `1`. Otherwise"] - #[doc = "on success, result is `Ok` and the result from the interior call, if it was executed,"] - #[doc = "may be found in the deposited `MultisigExecuted` event."] - #[doc = ""] - #[doc = "# "] - #[doc = "- `O(S + Z + Call)`."] - #[doc = "- Up to one balance-reserve or unreserve operation."] - #[doc = "- One passthrough operation, one insert, both `O(S)` where `S` is the number of"] - #[doc = " signatories. `S` is capped by `MaxSignatories`, with weight being proportional."] - #[doc = "- One call encode & hash, both of complexity `O(Z)` where `Z` is tx-len."] - #[doc = "- One encode & hash, both of complexity `O(S)`."] - #[doc = "- Up to one binary search and insert (`O(logS + S)`)."] - #[doc = "- I/O: 1 read `O(S)`, up to 1 mutate `O(S)`. Up to one remove."] - #[doc = "- One event."] - #[doc = "- The weight of the `call`."] - #[doc = "- Storage: inserts one item, value size bounded by `MaxSignatories`, with a deposit"] - #[doc = " taken for its lifetime of `DepositBase + threshold * DepositFactor`."] - #[doc = "-------------------------------"] - #[doc = "- DB Weight:"] - #[doc = " - Reads: Multisig Storage, [Caller Account], Calls (if `store_call`)"] - #[doc = " - Writes: Multisig Storage, [Caller Account], Calls (if `store_call`)"] - #[doc = "- Plus Call Weight"] - #[doc = "# "] - as_multi { - threshold: ::core::primitive::u16, - other_signatories: ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>, - maybe_timepoint: ::core::option::Option< - runtime_types::pallet_multisig::Timepoint<::core::primitive::u32>, - >, - call: ::subxt::WrapperKeepOpaque, - store_call: ::core::primitive::bool, - max_weight: ::core::primitive::u64, - }, - #[codec(index = 2)] - #[doc = "Register approval for a dispatch to be made from a deterministic composite account if"] - #[doc = "approved by a total of `threshold - 1` of `other_signatories`."] - #[doc = ""] - #[doc = "Payment: `DepositBase` will be reserved if this is the first approval, plus"] - #[doc = "`threshold` times `DepositFactor`. It is returned once this dispatch happens or"] - #[doc = "is cancelled."] - #[doc = ""] - #[doc = "The dispatch origin for this call must be _Signed_."] - #[doc = ""] - #[doc = "- `threshold`: The total number of approvals for this dispatch before it is executed."] - #[doc = "- `other_signatories`: The accounts (other than the sender) who can approve this"] - #[doc = "dispatch. May not be empty."] - #[doc = "- `maybe_timepoint`: If this is the first approval, then this must be `None`. If it is"] - #[doc = "not the first approval, then it must be `Some`, with the timepoint (block number and"] - #[doc = "transaction index) of the first approval transaction."] - #[doc = "- `call_hash`: The hash of the call to be executed."] - #[doc = ""] - #[doc = "NOTE: If this is the final approval, you will want to use `as_multi` instead."] - #[doc = ""] - #[doc = "# "] - #[doc = "- `O(S)`."] - #[doc = "- Up to one balance-reserve or unreserve operation."] - #[doc = "- One passthrough operation, one insert, both `O(S)` where `S` is the number of"] - #[doc = " signatories. `S` is capped by `MaxSignatories`, with weight being proportional."] - #[doc = "- One encode & hash, both of complexity `O(S)`."] - #[doc = "- Up to one binary search and insert (`O(logS + S)`)."] - #[doc = "- I/O: 1 read `O(S)`, up to 1 mutate `O(S)`. Up to one remove."] - #[doc = "- One event."] - #[doc = "- Storage: inserts one item, value size bounded by `MaxSignatories`, with a deposit"] - #[doc = " taken for its lifetime of `DepositBase + threshold * DepositFactor`."] - #[doc = "----------------------------------"] - #[doc = "- DB Weight:"] - #[doc = " - Read: Multisig Storage, [Caller Account]"] - #[doc = " - Write: Multisig Storage, [Caller Account]"] - #[doc = "# "] - approve_as_multi { - threshold: ::core::primitive::u16, - other_signatories: ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>, - maybe_timepoint: ::core::option::Option< - runtime_types::pallet_multisig::Timepoint<::core::primitive::u32>, - >, - call_hash: [::core::primitive::u8; 32usize], - max_weight: ::core::primitive::u64, - }, - #[codec(index = 3)] - #[doc = "Cancel a pre-existing, on-going multisig transaction. Any deposit reserved previously"] - #[doc = "for this operation will be unreserved on success."] - #[doc = ""] - #[doc = "The dispatch origin for this call must be _Signed_."] - #[doc = ""] - #[doc = "- `threshold`: The total number of approvals for this dispatch before it is executed."] - #[doc = "- `other_signatories`: The accounts (other than the sender) who can approve this"] - #[doc = "dispatch. May not be empty."] - #[doc = "- `timepoint`: The timepoint (block number and transaction index) of the first approval"] - #[doc = "transaction for this dispatch."] - #[doc = "- `call_hash`: The hash of the call to be executed."] - #[doc = ""] - #[doc = "# "] - #[doc = "- `O(S)`."] - #[doc = "- Up to one balance-reserve or unreserve operation."] - #[doc = "- One passthrough operation, one insert, both `O(S)` where `S` is the number of"] - #[doc = " signatories. `S` is capped by `MaxSignatories`, with weight being proportional."] - #[doc = "- One encode & hash, both of complexity `O(S)`."] - #[doc = "- One event."] - #[doc = "- I/O: 1 read `O(S)`, one remove."] - #[doc = "- Storage: removes one item."] - #[doc = "----------------------------------"] - #[doc = "- DB Weight:"] - #[doc = " - Read: Multisig Storage, [Caller Account], Refund Account, Calls"] - #[doc = " - Write: Multisig Storage, [Caller Account], Refund Account, Calls"] - #[doc = "# "] - cancel_as_multi { - threshold: ::core::primitive::u16, - other_signatories: ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>, - timepoint: - runtime_types::pallet_multisig::Timepoint<::core::primitive::u32>, - call_hash: [::core::primitive::u8; 32usize], - }, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Error { - #[codec(index = 0)] - #[doc = "Threshold must be 2 or greater."] - MinimumThreshold, - #[codec(index = 1)] - #[doc = "Call is already approved by this signatory."] - AlreadyApproved, - #[codec(index = 2)] - #[doc = "Call doesn't need any (more) approvals."] - NoApprovalsNeeded, - #[codec(index = 3)] - #[doc = "There are too few signatories in the list."] - TooFewSignatories, - #[codec(index = 4)] - #[doc = "There are too many signatories in the list."] - TooManySignatories, - #[codec(index = 5)] - #[doc = "The signatories were provided out of order; they should be ordered."] - SignatoriesOutOfOrder, - #[codec(index = 6)] - #[doc = "The sender was contained in the other signatories; it shouldn't be."] - SenderInSignatories, - #[codec(index = 7)] - #[doc = "Multisig operation not found when attempting to cancel."] - NotFound, - #[codec(index = 8)] - #[doc = "Only the account that originally created the multisig is able to cancel it."] - NotOwner, - #[codec(index = 9)] - #[doc = "No timepoint was given, yet the multisig operation is already underway."] - NoTimepoint, - #[codec(index = 10)] - #[doc = "A different timepoint was given to the multisig operation that is underway."] - WrongTimepoint, - #[codec(index = 11)] - #[doc = "A timepoint was given, yet no multisig operation is underway."] - UnexpectedTimepoint, - #[codec(index = 12)] - #[doc = "The maximum weight information provided was too low."] - MaxWeightTooLow, - #[codec(index = 13)] - #[doc = "The data to be stored is already stored."] - AlreadyStored, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Event { - #[codec(index = 0)] - #[doc = "A new multisig operation has begun."] - NewMultisig { - approving: ::subxt::sp_core::crypto::AccountId32, - multisig: ::subxt::sp_core::crypto::AccountId32, - call_hash: [::core::primitive::u8; 32usize], - }, - #[codec(index = 1)] - #[doc = "A multisig operation has been approved by someone."] - MultisigApproval { - approving: ::subxt::sp_core::crypto::AccountId32, - timepoint: - runtime_types::pallet_multisig::Timepoint<::core::primitive::u32>, - multisig: ::subxt::sp_core::crypto::AccountId32, - call_hash: [::core::primitive::u8; 32usize], - }, - #[codec(index = 2)] - #[doc = "A multisig operation has been executed."] - MultisigExecuted { - approving: ::subxt::sp_core::crypto::AccountId32, - timepoint: - runtime_types::pallet_multisig::Timepoint<::core::primitive::u32>, - multisig: ::subxt::sp_core::crypto::AccountId32, - call_hash: [::core::primitive::u8; 32usize], - result: - ::core::result::Result<(), runtime_types::sp_runtime::DispatchError>, - }, - #[codec(index = 3)] - #[doc = "A multisig operation has been cancelled."] - MultisigCancelled { - cancelling: ::subxt::sp_core::crypto::AccountId32, - timepoint: - runtime_types::pallet_multisig::Timepoint<::core::primitive::u32>, - multisig: ::subxt::sp_core::crypto::AccountId32, - call_hash: [::core::primitive::u8; 32usize], - }, - } - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct Multisig<_0, _1, _2> { - pub when: runtime_types::pallet_multisig::Timepoint<_0>, - pub deposit: _1, - pub depositor: _2, - pub approvals: ::std::vec::Vec<_2>, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct Timepoint<_0> { - pub height: _0, - pub index: _0, - } - } - pub mod pallet_offences { - use super::runtime_types; - pub mod pallet { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Event { - #[codec(index = 0)] - #[doc = "There is an offence reported of the given `kind` happened at the `session_index` and"] - #[doc = "(kind-specific) time slot. This event is not deposited for duplicate slashes."] - #[doc = "\\[kind, timeslot\\]."] - Offence { - kind: [::core::primitive::u8; 16usize], - timeslot: ::std::vec::Vec<::core::primitive::u8>, - }, - } - } - } - pub mod pallet_proxy { - use super::runtime_types; - pub mod pallet { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Call { - #[codec(index = 0)] - #[doc = "Dispatch the given `call` from an account that the sender is authorised for through"] - #[doc = "`add_proxy`."] - #[doc = ""] - #[doc = "Removes any corresponding announcement(s)."] - #[doc = ""] - #[doc = "The dispatch origin for this call must be _Signed_."] - #[doc = ""] - #[doc = "Parameters:"] - #[doc = "- `real`: The account that the proxy will make a call on behalf of."] - #[doc = "- `force_proxy_type`: Specify the exact proxy type to be used and checked for this call."] - #[doc = "- `call`: The call to be made by the `real` account."] - #[doc = ""] - #[doc = "# "] - #[doc = "Weight is a function of the number of proxies the user has (P)."] - #[doc = "# "] - proxy { - real: ::subxt::sp_core::crypto::AccountId32, - force_proxy_type: - ::core::option::Option, - call: ::std::boxed::Box, - }, - #[codec(index = 1)] - #[doc = "Register a proxy account for the sender that is able to make calls on its behalf."] - #[doc = ""] - #[doc = "The dispatch origin for this call must be _Signed_."] - #[doc = ""] - #[doc = "Parameters:"] - #[doc = "- `proxy`: The account that the `caller` would like to make a proxy."] - #[doc = "- `proxy_type`: The permissions allowed for this proxy account."] - #[doc = "- `delay`: The announcement period required of the initial proxy. Will generally be"] - #[doc = "zero."] - #[doc = ""] - #[doc = "# "] - #[doc = "Weight is a function of the number of proxies the user has (P)."] - #[doc = "# "] - add_proxy { - delegate: ::subxt::sp_core::crypto::AccountId32, - proxy_type: runtime_types::rococo_runtime::ProxyType, - delay: ::core::primitive::u32, - }, - #[codec(index = 2)] - #[doc = "Unregister a proxy account for the sender."] - #[doc = ""] - #[doc = "The dispatch origin for this call must be _Signed_."] - #[doc = ""] - #[doc = "Parameters:"] - #[doc = "- `proxy`: The account that the `caller` would like to remove as a proxy."] - #[doc = "- `proxy_type`: The permissions currently enabled for the removed proxy account."] - #[doc = ""] - #[doc = "# "] - #[doc = "Weight is a function of the number of proxies the user has (P)."] - #[doc = "# "] - remove_proxy { - delegate: ::subxt::sp_core::crypto::AccountId32, - proxy_type: runtime_types::rococo_runtime::ProxyType, - delay: ::core::primitive::u32, - }, - #[codec(index = 3)] - #[doc = "Unregister all proxy accounts for the sender."] - #[doc = ""] - #[doc = "The dispatch origin for this call must be _Signed_."] - #[doc = ""] - #[doc = "WARNING: This may be called on accounts created by `anonymous`, however if done, then"] - #[doc = "the unreserved fees will be inaccessible. **All access to this account will be lost.**"] - #[doc = ""] - #[doc = "# "] - #[doc = "Weight is a function of the number of proxies the user has (P)."] - #[doc = "# "] - remove_proxies, - #[codec(index = 4)] - #[doc = "Spawn a fresh new account that is guaranteed to be otherwise inaccessible, and"] - #[doc = "initialize it with a proxy of `proxy_type` for `origin` sender."] - #[doc = ""] - #[doc = "Requires a `Signed` origin."] - #[doc = ""] - #[doc = "- `proxy_type`: The type of the proxy that the sender will be registered as over the"] - #[doc = "new account. This will almost always be the most permissive `ProxyType` possible to"] - #[doc = "allow for maximum flexibility."] - #[doc = "- `index`: A disambiguation index, in case this is called multiple times in the same"] - #[doc = "transaction (e.g. with `utility::batch`). Unless you're using `batch` you probably just"] - #[doc = "want to use `0`."] - #[doc = "- `delay`: The announcement period required of the initial proxy. Will generally be"] - #[doc = "zero."] - #[doc = ""] - #[doc = "Fails with `Duplicate` if this has already been called in this transaction, from the"] - #[doc = "same sender, with the same parameters."] - #[doc = ""] - #[doc = "Fails if there are insufficient funds to pay for deposit."] - #[doc = ""] - #[doc = "# "] - #[doc = "Weight is a function of the number of proxies the user has (P)."] - #[doc = "# "] - #[doc = "TODO: Might be over counting 1 read"] - anonymous { - proxy_type: runtime_types::rococo_runtime::ProxyType, - delay: ::core::primitive::u32, - index: ::core::primitive::u16, - }, - #[codec(index = 5)] - #[doc = "Removes a previously spawned anonymous proxy."] - #[doc = ""] - #[doc = "WARNING: **All access to this account will be lost.** Any funds held in it will be"] - #[doc = "inaccessible."] - #[doc = ""] - #[doc = "Requires a `Signed` origin, and the sender account must have been created by a call to"] - #[doc = "`anonymous` with corresponding parameters."] - #[doc = ""] - #[doc = "- `spawner`: The account that originally called `anonymous` to create this account."] - #[doc = "- `index`: The disambiguation index originally passed to `anonymous`. Probably `0`."] - #[doc = "- `proxy_type`: The proxy type originally passed to `anonymous`."] - #[doc = "- `height`: The height of the chain when the call to `anonymous` was processed."] - #[doc = "- `ext_index`: The extrinsic index in which the call to `anonymous` was processed."] - #[doc = ""] - #[doc = "Fails with `NoPermission` in case the caller is not a previously created anonymous"] - #[doc = "account whose `anonymous` call has corresponding parameters."] - #[doc = ""] - #[doc = "# "] - #[doc = "Weight is a function of the number of proxies the user has (P)."] - #[doc = "# "] - kill_anonymous { - spawner: ::subxt::sp_core::crypto::AccountId32, - proxy_type: runtime_types::rococo_runtime::ProxyType, - index: ::core::primitive::u16, - #[codec(compact)] - height: ::core::primitive::u32, - #[codec(compact)] - ext_index: ::core::primitive::u32, - }, - #[codec(index = 6)] - #[doc = "Publish the hash of a proxy-call that will be made in the future."] - #[doc = ""] - #[doc = "This must be called some number of blocks before the corresponding `proxy` is attempted"] - #[doc = "if the delay associated with the proxy relationship is greater than zero."] - #[doc = ""] - #[doc = "No more than `MaxPending` announcements may be made at any one time."] - #[doc = ""] - #[doc = "This will take a deposit of `AnnouncementDepositFactor` as well as"] - #[doc = "`AnnouncementDepositBase` if there are no other pending announcements."] - #[doc = ""] - #[doc = "The dispatch origin for this call must be _Signed_ and a proxy of `real`."] - #[doc = ""] - #[doc = "Parameters:"] - #[doc = "- `real`: The account that the proxy will make a call on behalf of."] - #[doc = "- `call_hash`: The hash of the call to be made by the `real` account."] - #[doc = ""] - #[doc = "# "] - #[doc = "Weight is a function of:"] - #[doc = "- A: the number of announcements made."] - #[doc = "- P: the number of proxies the user has."] - #[doc = "# "] - announce { - real: ::subxt::sp_core::crypto::AccountId32, - call_hash: ::subxt::sp_core::H256, - }, - #[codec(index = 7)] - #[doc = "Remove a given announcement."] - #[doc = ""] - #[doc = "May be called by a proxy account to remove a call they previously announced and return"] - #[doc = "the deposit."] - #[doc = ""] - #[doc = "The dispatch origin for this call must be _Signed_."] - #[doc = ""] - #[doc = "Parameters:"] - #[doc = "- `real`: The account that the proxy will make a call on behalf of."] - #[doc = "- `call_hash`: The hash of the call to be made by the `real` account."] - #[doc = ""] - #[doc = "# "] - #[doc = "Weight is a function of:"] - #[doc = "- A: the number of announcements made."] - #[doc = "- P: the number of proxies the user has."] - #[doc = "# "] - remove_announcement { - real: ::subxt::sp_core::crypto::AccountId32, - call_hash: ::subxt::sp_core::H256, - }, - #[codec(index = 8)] - #[doc = "Remove the given announcement of a delegate."] - #[doc = ""] - #[doc = "May be called by a target (proxied) account to remove a call that one of their delegates"] - #[doc = "(`delegate`) has announced they want to execute. The deposit is returned."] - #[doc = ""] - #[doc = "The dispatch origin for this call must be _Signed_."] - #[doc = ""] - #[doc = "Parameters:"] - #[doc = "- `delegate`: The account that previously announced the call."] - #[doc = "- `call_hash`: The hash of the call to be made."] - #[doc = ""] - #[doc = "# "] - #[doc = "Weight is a function of:"] - #[doc = "- A: the number of announcements made."] - #[doc = "- P: the number of proxies the user has."] - #[doc = "# "] - reject_announcement { - delegate: ::subxt::sp_core::crypto::AccountId32, - call_hash: ::subxt::sp_core::H256, - }, - #[codec(index = 9)] - #[doc = "Dispatch the given `call` from an account that the sender is authorized for through"] - #[doc = "`add_proxy`."] - #[doc = ""] - #[doc = "Removes any corresponding announcement(s)."] - #[doc = ""] - #[doc = "The dispatch origin for this call must be _Signed_."] - #[doc = ""] - #[doc = "Parameters:"] - #[doc = "- `real`: The account that the proxy will make a call on behalf of."] - #[doc = "- `force_proxy_type`: Specify the exact proxy type to be used and checked for this call."] - #[doc = "- `call`: The call to be made by the `real` account."] - #[doc = ""] - #[doc = "# "] - #[doc = "Weight is a function of:"] - #[doc = "- A: the number of announcements made."] - #[doc = "- P: the number of proxies the user has."] - #[doc = "# "] - proxy_announced { - delegate: ::subxt::sp_core::crypto::AccountId32, - real: ::subxt::sp_core::crypto::AccountId32, - force_proxy_type: - ::core::option::Option, - call: ::std::boxed::Box, - }, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Error { - #[codec(index = 0)] - #[doc = "There are too many proxies registered or too many announcements pending."] - TooMany, - #[codec(index = 1)] - #[doc = "Proxy registration not found."] - NotFound, - #[codec(index = 2)] - #[doc = "Sender is not a proxy of the account to be proxied."] - NotProxy, - #[codec(index = 3)] - #[doc = "A call which is incompatible with the proxy type's filter was attempted."] - Unproxyable, - #[codec(index = 4)] - #[doc = "Account is already a proxy."] - Duplicate, - #[codec(index = 5)] - #[doc = "Call may not be made by proxy because it may escalate its privileges."] - NoPermission, - #[codec(index = 6)] - #[doc = "Announcement, if made at all, was made too recently."] - Unannounced, - #[codec(index = 7)] - #[doc = "Cannot add self as proxy."] - NoSelfProxy, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Event { - #[codec(index = 0)] - #[doc = "A proxy was executed correctly, with the given."] - ProxyExecuted { - result: - ::core::result::Result<(), runtime_types::sp_runtime::DispatchError>, - }, - #[codec(index = 1)] - #[doc = "Anonymous account has been created by new proxy with given"] - #[doc = "disambiguation index and proxy type."] - AnonymousCreated { - anonymous: ::subxt::sp_core::crypto::AccountId32, - who: ::subxt::sp_core::crypto::AccountId32, - proxy_type: runtime_types::rococo_runtime::ProxyType, - disambiguation_index: ::core::primitive::u16, - }, - #[codec(index = 2)] - #[doc = "An announcement was placed to make a call in the future."] - Announced { - real: ::subxt::sp_core::crypto::AccountId32, - proxy: ::subxt::sp_core::crypto::AccountId32, - call_hash: ::subxt::sp_core::H256, - }, - #[codec(index = 3)] - #[doc = "A proxy was added."] - ProxyAdded { - delegator: ::subxt::sp_core::crypto::AccountId32, - delegatee: ::subxt::sp_core::crypto::AccountId32, - proxy_type: runtime_types::rococo_runtime::ProxyType, - delay: ::core::primitive::u32, - }, - #[codec(index = 4)] - #[doc = "A proxy was removed."] - ProxyRemoved { - delegator: ::subxt::sp_core::crypto::AccountId32, - delegatee: ::subxt::sp_core::crypto::AccountId32, - proxy_type: runtime_types::rococo_runtime::ProxyType, - delay: ::core::primitive::u32, - }, - } - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct Announcement<_0, _1, _2> { - pub real: _0, - pub call_hash: _1, - pub height: _2, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct ProxyDefinition<_0, _1, _2> { - pub delegate: _0, - pub proxy_type: _1, - pub delay: _2, - } - } - pub mod pallet_session { - use super::runtime_types; - pub mod pallet { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Call { - #[codec(index = 0)] - #[doc = "Sets the session key(s) of the function caller to `keys`."] - #[doc = "Allows an account to set its session key prior to becoming a validator."] - #[doc = "This doesn't take effect until the next session."] - #[doc = ""] - #[doc = "The dispatch origin of this function must be signed."] - #[doc = ""] - #[doc = "# "] - #[doc = "- Complexity: `O(1)`. Actual cost depends on the number of length of"] - #[doc = " `T::Keys::key_ids()` which is fixed."] - #[doc = "- DbReads: `origin account`, `T::ValidatorIdOf`, `NextKeys`"] - #[doc = "- DbWrites: `origin account`, `NextKeys`"] - #[doc = "- DbReads per key id: `KeyOwner`"] - #[doc = "- DbWrites per key id: `KeyOwner`"] - #[doc = "# "] - set_keys { - keys: runtime_types::rococo_runtime::SessionKeys, - proof: ::std::vec::Vec<::core::primitive::u8>, - }, - #[codec(index = 1)] - #[doc = "Removes any session key(s) of the function caller."] - #[doc = ""] - #[doc = "This doesn't take effect until the next session."] - #[doc = ""] - #[doc = "The dispatch origin of this function must be Signed and the account must be either be"] - #[doc = "convertible to a validator ID using the chain's typical addressing system (this usually"] - #[doc = "means being a controller account) or directly convertible into a validator ID (which"] - #[doc = "usually means being a stash account)."] - #[doc = ""] - #[doc = "# "] - #[doc = "- Complexity: `O(1)` in number of key types. Actual cost depends on the number of length"] - #[doc = " of `T::Keys::key_ids()` which is fixed."] - #[doc = "- DbReads: `T::ValidatorIdOf`, `NextKeys`, `origin account`"] - #[doc = "- DbWrites: `NextKeys`, `origin account`"] - #[doc = "- DbWrites per key id: `KeyOwner`"] - #[doc = "# "] - purge_keys, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Error { - #[codec(index = 0)] - #[doc = "Invalid ownership proof."] - InvalidProof, - #[codec(index = 1)] - #[doc = "No associated validator ID for account."] - NoAssociatedValidatorId, - #[codec(index = 2)] - #[doc = "Registered duplicate key."] - DuplicatedKey, - #[codec(index = 3)] - #[doc = "No keys are associated with this account."] - NoKeys, - #[codec(index = 4)] - #[doc = "Key setting account is not live, so it's impossible to associate keys."] - NoAccount, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Event { - #[codec(index = 0)] - #[doc = "New session has happened. Note that the argument is the session index, not the"] - #[doc = "block number as the type might suggest."] - NewSession { - session_index: ::core::primitive::u32, - }, - } - } - } - pub mod pallet_sudo { - use super::runtime_types; - pub mod pallet { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Call { - #[codec(index = 0)] - #[doc = "Authenticates the sudo key and dispatches a function call with `Root` origin."] - #[doc = ""] - #[doc = "The dispatch origin for this call must be _Signed_."] - #[doc = ""] - #[doc = "# "] - #[doc = "- O(1)."] - #[doc = "- Limited storage reads."] - #[doc = "- One DB write (event)."] - #[doc = "- Weight of derivative `call` execution + 10,000."] - #[doc = "# "] - sudo { - call: ::std::boxed::Box, - }, - #[codec(index = 1)] - #[doc = "Authenticates the sudo key and dispatches a function call with `Root` origin."] - #[doc = "This function does not check the weight of the call, and instead allows the"] - #[doc = "Sudo user to specify the weight of the call."] - #[doc = ""] - #[doc = "The dispatch origin for this call must be _Signed_."] - #[doc = ""] - #[doc = "# "] - #[doc = "- O(1)."] - #[doc = "- The weight of this call is defined by the caller."] - #[doc = "# "] - sudo_unchecked_weight { - call: ::std::boxed::Box, - weight: ::core::primitive::u64, - }, - #[codec(index = 2)] - #[doc = "Authenticates the current sudo key and sets the given AccountId (`new`) as the new sudo"] - #[doc = "key."] - #[doc = ""] - #[doc = "The dispatch origin for this call must be _Signed_."] - #[doc = ""] - #[doc = "# "] - #[doc = "- O(1)."] - #[doc = "- Limited storage reads."] - #[doc = "- One DB change."] - #[doc = "# "] - set_key { - new: ::subxt::sp_runtime::MultiAddress< - ::subxt::sp_core::crypto::AccountId32, - (), - >, - }, - #[codec(index = 3)] - #[doc = "Authenticates the sudo key and dispatches a function call with `Signed` origin from"] - #[doc = "a given account."] - #[doc = ""] - #[doc = "The dispatch origin for this call must be _Signed_."] - #[doc = ""] - #[doc = "# "] - #[doc = "- O(1)."] - #[doc = "- Limited storage reads."] - #[doc = "- One DB write (event)."] - #[doc = "- Weight of derivative `call` execution + 10,000."] - #[doc = "# "] - sudo_as { - who: ::subxt::sp_runtime::MultiAddress< - ::subxt::sp_core::crypto::AccountId32, - (), - >, - call: ::std::boxed::Box, - }, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Error { - #[codec(index = 0)] - #[doc = "Sender must be the Sudo account"] - RequireSudo, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Event { - #[codec(index = 0)] - #[doc = "A sudo just took place. \\[result\\]"] - Sudid { - sudo_result: - ::core::result::Result<(), runtime_types::sp_runtime::DispatchError>, - }, - #[codec(index = 1)] - #[doc = "The \\[sudoer\\] just switched identity; the old key is supplied if one existed."] - KeyChanged { - old_sudoer: ::core::option::Option<::subxt::sp_core::crypto::AccountId32>, - }, - #[codec(index = 2)] - #[doc = "A sudo just took place. \\[result\\]"] - SudoAsDone { - sudo_result: - ::core::result::Result<(), runtime_types::sp_runtime::DispatchError>, - }, - } - } - } - pub mod pallet_timestamp { - use super::runtime_types; - pub mod pallet { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Call { - #[codec(index = 0)] - #[doc = "Set the current time."] - #[doc = ""] - #[doc = "This call should be invoked exactly once per block. It will panic at the finalization"] - #[doc = "phase, if this call hasn't been invoked by that time."] - #[doc = ""] - #[doc = "The timestamp should be greater than the previous one by the amount specified by"] - #[doc = "`MinimumPeriod`."] - #[doc = ""] - #[doc = "The dispatch origin for this call must be `Inherent`."] - #[doc = ""] - #[doc = "# "] - #[doc = "- `O(1)` (Note that implementations of `OnTimestampSet` must also be `O(1)`)"] - #[doc = "- 1 storage read and 1 storage mutation (codec `O(1)`). (because of `DidUpdate::take` in"] - #[doc = " `on_finalize`)"] - #[doc = "- 1 event handler `on_timestamp_set`. Must be `O(1)`."] - #[doc = "# "] - set { - #[codec(compact)] - now: ::core::primitive::u64, - }, - } - } - } - pub mod pallet_transaction_payment { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct ChargeTransactionPayment(#[codec(compact)] pub ::core::primitive::u128); - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Releases { - #[codec(index = 0)] - V1Ancient, - #[codec(index = 1)] - V2, - } - } - pub mod pallet_utility { - use super::runtime_types; - pub mod pallet { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Call { - #[codec(index = 0)] - #[doc = "Send a batch of dispatch calls."] - #[doc = ""] - #[doc = "May be called from any origin."] - #[doc = ""] - #[doc = "- `calls`: The calls to be dispatched from the same origin. The number of call must not"] - #[doc = " exceed the constant: `batched_calls_limit` (available in constant metadata)."] - #[doc = ""] - #[doc = "If origin is root then call are dispatch without checking origin filter. (This includes"] - #[doc = "bypassing `frame_system::Config::BaseCallFilter`)."] - #[doc = ""] - #[doc = "# "] - #[doc = "- Complexity: O(C) where C is the number of calls to be batched."] - #[doc = "# "] - #[doc = ""] - #[doc = "This will return `Ok` in all circumstances. To determine the success of the batch, an"] - #[doc = "event is deposited. If a call failed and the batch was interrupted, then the"] - #[doc = "`BatchInterrupted` event is deposited, along with the number of successful calls made"] - #[doc = "and the error of the failed call. If all were successful, then the `BatchCompleted`"] - #[doc = "event is deposited."] - batch { - calls: ::std::vec::Vec, - }, - #[codec(index = 1)] - #[doc = "Send a call through an indexed pseudonym of the sender."] - #[doc = ""] - #[doc = "Filter from origin are passed along. The call will be dispatched with an origin which"] - #[doc = "use the same filter as the origin of this call."] - #[doc = ""] - #[doc = "NOTE: If you need to ensure that any account-based filtering is not honored (i.e."] - #[doc = "because you expect `proxy` to have been used prior in the call stack and you do not want"] - #[doc = "the call restrictions to apply to any sub-accounts), then use `as_multi_threshold_1`"] - #[doc = "in the Multisig pallet instead."] - #[doc = ""] - #[doc = "NOTE: Prior to version *12, this was called `as_limited_sub`."] - #[doc = ""] - #[doc = "The dispatch origin for this call must be _Signed_."] - as_derivative { - index: ::core::primitive::u16, - call: ::std::boxed::Box, - }, - #[codec(index = 2)] - #[doc = "Send a batch of dispatch calls and atomically execute them."] - #[doc = "The whole transaction will rollback and fail if any of the calls failed."] - #[doc = ""] - #[doc = "May be called from any origin."] - #[doc = ""] - #[doc = "- `calls`: The calls to be dispatched from the same origin. The number of call must not"] - #[doc = " exceed the constant: `batched_calls_limit` (available in constant metadata)."] - #[doc = ""] - #[doc = "If origin is root then call are dispatch without checking origin filter. (This includes"] - #[doc = "bypassing `frame_system::Config::BaseCallFilter`)."] - #[doc = ""] - #[doc = "# "] - #[doc = "- Complexity: O(C) where C is the number of calls to be batched."] - #[doc = "# "] - batch_all { - calls: ::std::vec::Vec, - }, - #[codec(index = 3)] - #[doc = "Dispatches a function call with a provided origin."] - #[doc = ""] - #[doc = "The dispatch origin for this call must be _Root_."] - #[doc = ""] - #[doc = "# "] - #[doc = "- O(1)."] - #[doc = "- Limited storage reads."] - #[doc = "- One DB write (event)."] - #[doc = "- Weight of derivative `call` execution + T::WeightInfo::dispatch_as()."] - #[doc = "# "] - dispatch_as { - as_origin: ::std::boxed::Box, - call: ::std::boxed::Box, - }, - #[codec(index = 4)] - #[doc = "Send a batch of dispatch calls."] - #[doc = "Unlike `batch`, it allows errors and won't interrupt."] - #[doc = ""] - #[doc = "May be called from any origin."] - #[doc = ""] - #[doc = "- `calls`: The calls to be dispatched from the same origin. The number of call must not"] - #[doc = " exceed the constant: `batched_calls_limit` (available in constant metadata)."] - #[doc = ""] - #[doc = "If origin is root then call are dispatch without checking origin filter. (This includes"] - #[doc = "bypassing `frame_system::Config::BaseCallFilter`)."] - #[doc = ""] - #[doc = "# "] - #[doc = "- Complexity: O(C) where C is the number of calls to be batched."] - #[doc = "# "] - force_batch { - calls: ::std::vec::Vec, - }, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Error { - #[codec(index = 0)] - #[doc = "Too many calls batched."] - TooManyCalls, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Event { - #[codec(index = 0)] - #[doc = "Batch of dispatches did not complete fully. Index of first failing dispatch given, as"] - #[doc = "well as the error."] - BatchInterrupted { - index: ::core::primitive::u32, - error: runtime_types::sp_runtime::DispatchError, - }, - #[codec(index = 1)] - #[doc = "Batch of dispatches completed fully with no error."] - BatchCompleted, - #[codec(index = 2)] - #[doc = "Batch of dispatches completed but has errors."] - BatchCompletedWithErrors, - #[codec(index = 3)] - #[doc = "A single item within a Batch of dispatches has completed with no error."] - ItemCompleted, - #[codec(index = 4)] - #[doc = "A single item within a Batch of dispatches has completed with error."] - ItemFailed { - error: runtime_types::sp_runtime::DispatchError, - }, - #[codec(index = 5)] - #[doc = "A call was dispatched."] - DispatchedAs { - result: - ::core::result::Result<(), runtime_types::sp_runtime::DispatchError>, - }, - } - } - } - pub mod pallet_xcm { - use super::runtime_types; - pub mod pallet { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Call { - #[codec(index = 0)] - send { - dest: ::std::boxed::Box, - message: ::std::boxed::Box, - }, - #[codec(index = 1)] - #[doc = "Teleport some assets from the local chain to some destination chain."] - #[doc = ""] - #[doc = "Fee payment on the destination side is made from the asset in the `assets` vector of"] - #[doc = "index `fee_asset_item`. The weight limit for fees is not provided and thus is unlimited,"] - #[doc = "with all fees taken as needed from the asset."] - #[doc = ""] - #[doc = "- `origin`: Must be capable of withdrawing the `assets` and executing XCM."] - #[doc = "- `dest`: Destination context for the assets. Will typically be `X2(Parent, Parachain(..))` to send"] - #[doc = " from parachain to parachain, or `X1(Parachain(..))` to send from relay to parachain."] - #[doc = "- `beneficiary`: A beneficiary location for the assets in the context of `dest`. Will generally be"] - #[doc = " an `AccountId32` value."] - #[doc = "- `assets`: The assets to be withdrawn. The first item should be the currency used to to pay the fee on the"] - #[doc = " `dest` side. May not be empty."] - #[doc = "- `fee_asset_item`: The index into `assets` of the item which should be used to pay"] - #[doc = " fees."] - teleport_assets { - dest: ::std::boxed::Box, - beneficiary: ::std::boxed::Box, - assets: ::std::boxed::Box, - fee_asset_item: ::core::primitive::u32, - }, - #[codec(index = 2)] - #[doc = "Transfer some assets from the local chain to the sovereign account of a destination"] - #[doc = "chain and forward a notification XCM."] - #[doc = ""] - #[doc = "Fee payment on the destination side is made from the asset in the `assets` vector of"] - #[doc = "index `fee_asset_item`. The weight limit for fees is not provided and thus is unlimited,"] - #[doc = "with all fees taken as needed from the asset."] - #[doc = ""] - #[doc = "- `origin`: Must be capable of withdrawing the `assets` and executing XCM."] - #[doc = "- `dest`: Destination context for the assets. Will typically be `X2(Parent, Parachain(..))` to send"] - #[doc = " from parachain to parachain, or `X1(Parachain(..))` to send from relay to parachain."] - #[doc = "- `beneficiary`: A beneficiary location for the assets in the context of `dest`. Will generally be"] - #[doc = " an `AccountId32` value."] - #[doc = "- `assets`: The assets to be withdrawn. This should include the assets used to pay the fee on the"] - #[doc = " `dest` side."] - #[doc = "- `fee_asset_item`: The index into `assets` of the item which should be used to pay"] - #[doc = " fees."] - reserve_transfer_assets { - dest: ::std::boxed::Box, - beneficiary: ::std::boxed::Box, - assets: ::std::boxed::Box, - fee_asset_item: ::core::primitive::u32, - }, - #[codec(index = 3)] - #[doc = "Execute an XCM message from a local, signed, origin."] - #[doc = ""] - #[doc = "An event is deposited indicating whether `msg` could be executed completely or only"] - #[doc = "partially."] - #[doc = ""] - #[doc = "No more than `max_weight` will be used in its attempted execution. If this is less than the"] - #[doc = "maximum amount of weight that the message could take to be executed, then no execution"] - #[doc = "attempt will be made."] - #[doc = ""] - #[doc = "NOTE: A successful return to this does *not* imply that the `msg` was executed successfully"] - #[doc = "to completion; only that *some* of it was executed."] - execute { - message: ::std::boxed::Box, - max_weight: ::core::primitive::u64, - }, - #[codec(index = 4)] - #[doc = "Extoll that a particular destination can be communicated with through a particular"] - #[doc = "version of XCM."] - #[doc = ""] - #[doc = "- `origin`: Must be Root."] - #[doc = "- `location`: The destination that is being described."] - #[doc = "- `xcm_version`: The latest version of XCM that `location` supports."] - force_xcm_version { - location: - ::std::boxed::Box, - xcm_version: ::core::primitive::u32, - }, - #[codec(index = 5)] - #[doc = "Set a safe XCM version (the version that XCM should be encoded with if the most recent"] - #[doc = "version a destination can accept is unknown)."] - #[doc = ""] - #[doc = "- `origin`: Must be Root."] - #[doc = "- `maybe_xcm_version`: The default XCM encoding version, or `None` to disable."] - force_default_xcm_version { - maybe_xcm_version: ::core::option::Option<::core::primitive::u32>, - }, - #[codec(index = 6)] - #[doc = "Ask a location to notify us regarding their XCM version and any changes to it."] - #[doc = ""] - #[doc = "- `origin`: Must be Root."] - #[doc = "- `location`: The location to which we should subscribe for XCM version notifications."] - force_subscribe_version_notify { - location: ::std::boxed::Box, - }, - #[codec(index = 7)] - #[doc = "Require that a particular destination should no longer notify us regarding any XCM"] - #[doc = "version changes."] - #[doc = ""] - #[doc = "- `origin`: Must be Root."] - #[doc = "- `location`: The location to which we are currently subscribed for XCM version"] - #[doc = " notifications which we no longer desire."] - force_unsubscribe_version_notify { - location: ::std::boxed::Box, - }, - #[codec(index = 8)] - #[doc = "Transfer some assets from the local chain to the sovereign account of a destination"] - #[doc = "chain and forward a notification XCM."] - #[doc = ""] - #[doc = "Fee payment on the destination side is made from the asset in the `assets` vector of"] - #[doc = "index `fee_asset_item`, up to enough to pay for `weight_limit` of weight. If more weight"] - #[doc = "is needed than `weight_limit`, then the operation will fail and the assets send may be"] - #[doc = "at risk."] - #[doc = ""] - #[doc = "- `origin`: Must be capable of withdrawing the `assets` and executing XCM."] - #[doc = "- `dest`: Destination context for the assets. Will typically be `X2(Parent, Parachain(..))` to send"] - #[doc = " from parachain to parachain, or `X1(Parachain(..))` to send from relay to parachain."] - #[doc = "- `beneficiary`: A beneficiary location for the assets in the context of `dest`. Will generally be"] - #[doc = " an `AccountId32` value."] - #[doc = "- `assets`: The assets to be withdrawn. This should include the assets used to pay the fee on the"] - #[doc = " `dest` side."] - #[doc = "- `fee_asset_item`: The index into `assets` of the item which should be used to pay"] - #[doc = " fees."] - #[doc = "- `weight_limit`: The remote-side weight limit, if any, for the XCM fee purchase."] - limited_reserve_transfer_assets { - dest: ::std::boxed::Box, - beneficiary: ::std::boxed::Box, - assets: ::std::boxed::Box, - fee_asset_item: ::core::primitive::u32, - weight_limit: runtime_types::xcm::v2::WeightLimit, - }, - #[codec(index = 9)] - #[doc = "Teleport some assets from the local chain to some destination chain."] - #[doc = ""] - #[doc = "Fee payment on the destination side is made from the asset in the `assets` vector of"] - #[doc = "index `fee_asset_item`, up to enough to pay for `weight_limit` of weight. If more weight"] - #[doc = "is needed than `weight_limit`, then the operation will fail and the assets send may be"] - #[doc = "at risk."] - #[doc = ""] - #[doc = "- `origin`: Must be capable of withdrawing the `assets` and executing XCM."] - #[doc = "- `dest`: Destination context for the assets. Will typically be `X2(Parent, Parachain(..))` to send"] - #[doc = " from parachain to parachain, or `X1(Parachain(..))` to send from relay to parachain."] - #[doc = "- `beneficiary`: A beneficiary location for the assets in the context of `dest`. Will generally be"] - #[doc = " an `AccountId32` value."] - #[doc = "- `assets`: The assets to be withdrawn. The first item should be the currency used to to pay the fee on the"] - #[doc = " `dest` side. May not be empty."] - #[doc = "- `fee_asset_item`: The index into `assets` of the item which should be used to pay"] - #[doc = " fees."] - #[doc = "- `weight_limit`: The remote-side weight limit, if any, for the XCM fee purchase."] - limited_teleport_assets { - dest: ::std::boxed::Box, - beneficiary: ::std::boxed::Box, - assets: ::std::boxed::Box, - fee_asset_item: ::core::primitive::u32, - weight_limit: runtime_types::xcm::v2::WeightLimit, - }, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Error { - #[codec(index = 0)] - #[doc = "The desired destination was unreachable, generally because there is a no way of routing"] - #[doc = "to it."] - Unreachable, - #[codec(index = 1)] - #[doc = "There was some other issue (i.e. not to do with routing) in sending the message. Perhaps"] - #[doc = "a lack of space for buffering the message."] - SendFailure, - #[codec(index = 2)] - #[doc = "The message execution fails the filter."] - Filtered, - #[codec(index = 3)] - #[doc = "The message's weight could not be determined."] - UnweighableMessage, - #[codec(index = 4)] - #[doc = "The destination `MultiLocation` provided cannot be inverted."] - DestinationNotInvertible, - #[codec(index = 5)] - #[doc = "The assets to be sent are empty."] - Empty, - #[codec(index = 6)] - #[doc = "Could not re-anchor the assets to declare the fees for the destination chain."] - CannotReanchor, - #[codec(index = 7)] - #[doc = "Too many assets have been attempted for transfer."] - TooManyAssets, - #[codec(index = 8)] - #[doc = "Origin is invalid for sending."] - InvalidOrigin, - #[codec(index = 9)] - #[doc = "The version of the `Versioned` value used is not able to be interpreted."] - BadVersion, - #[codec(index = 10)] - #[doc = "The given location could not be used (e.g. because it cannot be expressed in the"] - #[doc = "desired version of XCM)."] - BadLocation, - #[codec(index = 11)] - #[doc = "The referenced subscription could not be found."] - NoSubscription, - #[codec(index = 12)] - #[doc = "The location is invalid since it already has a subscription from us."] - AlreadySubscribed, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Event { - #[codec(index = 0)] - #[doc = "Execution of an XCM message was attempted."] - #[doc = ""] - #[doc = "\\[ outcome \\]"] - Attempted(runtime_types::xcm::v2::traits::Outcome), - #[codec(index = 1)] - #[doc = "A XCM message was sent."] - #[doc = ""] - #[doc = "\\[ origin, destination, message \\]"] - Sent( - runtime_types::xcm::v1::multilocation::MultiLocation, - runtime_types::xcm::v1::multilocation::MultiLocation, - runtime_types::xcm::v2::Xcm, - ), - #[codec(index = 2)] - #[doc = "Query response received which does not match a registered query. This may be because a"] - #[doc = "matching query was never registered, it may be because it is a duplicate response, or"] - #[doc = "because the query timed out."] - #[doc = ""] - #[doc = "\\[ origin location, id \\]"] - UnexpectedResponse( - runtime_types::xcm::v1::multilocation::MultiLocation, - ::core::primitive::u64, - ), - #[codec(index = 3)] - #[doc = "Query response has been received and is ready for taking with `take_response`. There is"] - #[doc = "no registered notification call."] - #[doc = ""] - #[doc = "\\[ id, response \\]"] - ResponseReady(::core::primitive::u64, runtime_types::xcm::v2::Response), - #[codec(index = 4)] - #[doc = "Query response has been received and query is removed. The registered notification has"] - #[doc = "been dispatched and executed successfully."] - #[doc = ""] - #[doc = "\\[ id, pallet index, call index \\]"] - Notified( - ::core::primitive::u64, - ::core::primitive::u8, - ::core::primitive::u8, - ), - #[codec(index = 5)] - #[doc = "Query response has been received and query is removed. The registered notification could"] - #[doc = "not be dispatched because the dispatch weight is greater than the maximum weight"] - #[doc = "originally budgeted by this runtime for the query result."] - #[doc = ""] - #[doc = "\\[ id, pallet index, call index, actual weight, max budgeted weight \\]"] - NotifyOverweight( - ::core::primitive::u64, - ::core::primitive::u8, - ::core::primitive::u8, - ::core::primitive::u64, - ::core::primitive::u64, - ), - #[codec(index = 6)] - #[doc = "Query response has been received and query is removed. There was a general error with"] - #[doc = "dispatching the notification call."] - #[doc = ""] - #[doc = "\\[ id, pallet index, call index \\]"] - NotifyDispatchError( - ::core::primitive::u64, - ::core::primitive::u8, - ::core::primitive::u8, - ), - #[codec(index = 7)] - #[doc = "Query response has been received and query is removed. The dispatch was unable to be"] - #[doc = "decoded into a `Call`; this might be due to dispatch function having a signature which"] - #[doc = "is not `(origin, QueryId, Response)`."] - #[doc = ""] - #[doc = "\\[ id, pallet index, call index \\]"] - NotifyDecodeFailed( - ::core::primitive::u64, - ::core::primitive::u8, - ::core::primitive::u8, - ), - #[codec(index = 8)] - #[doc = "Expected query response has been received but the origin location of the response does"] - #[doc = "not match that expected. The query remains registered for a later, valid, response to"] - #[doc = "be received and acted upon."] - #[doc = ""] - #[doc = "\\[ origin location, id, expected location \\]"] - InvalidResponder( - runtime_types::xcm::v1::multilocation::MultiLocation, - ::core::primitive::u64, - ::core::option::Option< - runtime_types::xcm::v1::multilocation::MultiLocation, - >, - ), - #[codec(index = 9)] - #[doc = "Expected query response has been received but the expected origin location placed in"] - #[doc = "storage by this runtime previously cannot be decoded. The query remains registered."] - #[doc = ""] - #[doc = "This is unexpected (since a location placed in storage in a previously executing"] - #[doc = "runtime should be readable prior to query timeout) and dangerous since the possibly"] - #[doc = "valid response will be dropped. Manual governance intervention is probably going to be"] - #[doc = "needed."] - #[doc = ""] - #[doc = "\\[ origin location, id \\]"] - InvalidResponderVersion( - runtime_types::xcm::v1::multilocation::MultiLocation, - ::core::primitive::u64, - ), - #[codec(index = 10)] - #[doc = "Received query response has been read and removed."] - #[doc = ""] - #[doc = "\\[ id \\]"] - ResponseTaken(::core::primitive::u64), - #[codec(index = 11)] - #[doc = "Some assets have been placed in an asset trap."] - #[doc = ""] - #[doc = "\\[ hash, origin, assets \\]"] - AssetsTrapped( - ::subxt::sp_core::H256, - runtime_types::xcm::v1::multilocation::MultiLocation, - runtime_types::xcm::VersionedMultiAssets, - ), - #[codec(index = 12)] - #[doc = "An XCM version change notification message has been attempted to be sent."] - #[doc = ""] - #[doc = "\\[ destination, result \\]"] - VersionChangeNotified( - runtime_types::xcm::v1::multilocation::MultiLocation, - ::core::primitive::u32, - ), - #[codec(index = 13)] - #[doc = "The supported version of a location has been changed. This might be through an"] - #[doc = "automatic notification or a manual intervention."] - #[doc = ""] - #[doc = "\\[ location, XCM version \\]"] - SupportedVersionChanged( - runtime_types::xcm::v1::multilocation::MultiLocation, - ::core::primitive::u32, - ), - #[codec(index = 14)] - #[doc = "A given location which had a version change subscription was dropped owing to an error"] - #[doc = "sending the notification to it."] - #[doc = ""] - #[doc = "\\[ location, query ID, error \\]"] - NotifyTargetSendFail( - runtime_types::xcm::v1::multilocation::MultiLocation, - ::core::primitive::u64, - runtime_types::xcm::v2::traits::Error, - ), - #[codec(index = 15)] - #[doc = "A given location which had a version change subscription was dropped owing to an error"] - #[doc = "migrating the location to our new XCM format."] - #[doc = ""] - #[doc = "\\[ location, query ID \\]"] - NotifyTargetMigrationFail( - runtime_types::xcm::VersionedMultiLocation, - ::core::primitive::u64, - ), - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Origin { - #[codec(index = 0)] - Xcm(runtime_types::xcm::v1::multilocation::MultiLocation), - #[codec(index = 1)] - Response(runtime_types::xcm::v1::multilocation::MultiLocation), - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum QueryStatus<_0> { - #[codec(index = 0)] - Pending { - responder: runtime_types::xcm::VersionedMultiLocation, - maybe_notify: - ::core::option::Option<(::core::primitive::u8, ::core::primitive::u8)>, - timeout: _0, - }, - #[codec(index = 1)] - VersionNotifier { - origin: runtime_types::xcm::VersionedMultiLocation, - is_active: ::core::primitive::bool, - }, - #[codec(index = 2)] - Ready { - response: runtime_types::xcm::VersionedResponse, - at: _0, - }, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum VersionMigrationStage { - #[codec(index = 0)] - MigrateSupportedVersion, - #[codec(index = 1)] - MigrateVersionNotifiers, - #[codec(index = 2)] - NotifyCurrentTargets( - ::core::option::Option<::std::vec::Vec<::core::primitive::u8>>, - ), - #[codec(index = 3)] - MigrateAndNotifyOldTargets, - } - } - } - pub mod polkadot_core_primitives { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct CandidateHash(pub ::subxt::sp_core::H256); - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct InboundDownwardMessage<_0> { - pub sent_at: _0, - pub msg: ::std::vec::Vec<::core::primitive::u8>, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct InboundHrmpMessage<_0> { - pub sent_at: _0, - pub data: ::std::vec::Vec<::core::primitive::u8>, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct OutboundHrmpMessage<_0> { - pub recipient: _0, - pub data: ::std::vec::Vec<::core::primitive::u8>, - } - } - pub mod polkadot_parachain { - use super::runtime_types; - pub mod primitives { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct HeadData(pub ::std::vec::Vec<::core::primitive::u8>); - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct HrmpChannelId { - pub sender: runtime_types::polkadot_parachain::primitives::Id, - pub recipient: runtime_types::polkadot_parachain::primitives::Id, - } - #[derive( - :: subxt :: codec :: CompactAs, - :: subxt :: codec :: Decode, - :: subxt :: codec :: Encode, - Debug, - )] - pub struct Id(pub ::core::primitive::u32); - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct ValidationCode(pub ::std::vec::Vec<::core::primitive::u8>); - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct ValidationCodeHash(pub ::subxt::sp_core::H256); - } - } - pub mod polkadot_primitives { - use super::runtime_types; - pub mod v2 { - use super::runtime_types; - pub mod assignment_app { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct Public(pub runtime_types::sp_core::sr25519::Public); - } - pub mod collator_app { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct Public(pub runtime_types::sp_core::sr25519::Public); - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct Signature(pub runtime_types::sp_core::sr25519::Signature); - } - pub mod signed { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct UncheckedSigned<_0, _1> { - pub payload: _0, - pub validator_index: runtime_types::polkadot_primitives::v2::ValidatorIndex, - pub signature: - runtime_types::polkadot_primitives::v2::validator_app::Signature, - #[codec(skip)] - pub __subxt_unused_type_params: ::core::marker::PhantomData<_1>, - } - } - pub mod validator_app { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct Public(pub runtime_types::sp_core::sr25519::Public); - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct Signature(pub runtime_types::sp_core::sr25519::Signature); - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct AvailabilityBitfield( - pub ::subxt::bitvec::vec::BitVec< - ::core::primitive::u8, - ::subxt::bitvec::order::Lsb0, - >, - ); - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct BackedCandidate<_0> { - pub candidate: - runtime_types::polkadot_primitives::v2::CommittedCandidateReceipt<_0>, - pub validity_votes: ::std::vec::Vec< - runtime_types::polkadot_primitives::v2::ValidityAttestation, - >, - pub validator_indices: ::subxt::bitvec::vec::BitVec< - ::core::primitive::u8, - ::subxt::bitvec::order::Lsb0, - >, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct CandidateCommitments<_0> { - pub upward_messages: ::std::vec::Vec<::std::vec::Vec<::core::primitive::u8>>, - pub horizontal_messages: ::std::vec::Vec< - runtime_types::polkadot_core_primitives::OutboundHrmpMessage< - runtime_types::polkadot_parachain::primitives::Id, - >, - >, - pub new_validation_code: ::core::option::Option< - runtime_types::polkadot_parachain::primitives::ValidationCode, - >, - pub head_data: runtime_types::polkadot_parachain::primitives::HeadData, - pub processed_downward_messages: _0, - pub hrmp_watermark: _0, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct CandidateDescriptor<_0> { - pub para_id: runtime_types::polkadot_parachain::primitives::Id, - pub relay_parent: _0, - pub collator: runtime_types::polkadot_primitives::v2::collator_app::Public, - pub persisted_validation_data_hash: _0, - pub pov_hash: _0, - pub erasure_root: _0, - pub signature: runtime_types::polkadot_primitives::v2::collator_app::Signature, - pub para_head: _0, - pub validation_code_hash: - runtime_types::polkadot_parachain::primitives::ValidationCodeHash, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct CandidateReceipt<_0> { - pub descriptor: runtime_types::polkadot_primitives::v2::CandidateDescriptor<_0>, - pub commitments_hash: _0, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct CommittedCandidateReceipt<_0> { - pub descriptor: runtime_types::polkadot_primitives::v2::CandidateDescriptor<_0>, - pub commitments: runtime_types::polkadot_primitives::v2::CandidateCommitments< - ::core::primitive::u32, - >, - } - #[derive( - :: subxt :: codec :: CompactAs, - :: subxt :: codec :: Decode, - :: subxt :: codec :: Encode, - Debug, - )] - pub struct CoreIndex(pub ::core::primitive::u32); - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum CoreOccupied { - #[codec(index = 0)] - Parathread(runtime_types::polkadot_primitives::v2::ParathreadEntry), - #[codec(index = 1)] - Parachain, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct DisputeState<_0> { - pub validators_for: ::subxt::bitvec::vec::BitVec< - ::core::primitive::u8, - ::subxt::bitvec::order::Lsb0, - >, - pub validators_against: ::subxt::bitvec::vec::BitVec< - ::core::primitive::u8, - ::subxt::bitvec::order::Lsb0, - >, - pub start: _0, - pub concluded_at: ::core::option::Option<_0>, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum DisputeStatement { - #[codec(index = 0)] - Valid(runtime_types::polkadot_primitives::v2::ValidDisputeStatementKind), - #[codec(index = 1)] - Invalid(runtime_types::polkadot_primitives::v2::InvalidDisputeStatementKind), - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct DisputeStatementSet { - pub candidate_hash: runtime_types::polkadot_core_primitives::CandidateHash, - pub session: ::core::primitive::u32, - pub statements: ::std::vec::Vec<( - runtime_types::polkadot_primitives::v2::DisputeStatement, - runtime_types::polkadot_primitives::v2::ValidatorIndex, - runtime_types::polkadot_primitives::v2::validator_app::Signature, - )>, - } - #[derive( - :: subxt :: codec :: CompactAs, - :: subxt :: codec :: Decode, - :: subxt :: codec :: Encode, - Debug, - )] - pub struct GroupIndex(pub ::core::primitive::u32); - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct InherentData<_0> { - pub bitfields: ::std::vec::Vec< - runtime_types::polkadot_primitives::v2::signed::UncheckedSigned< - runtime_types::polkadot_primitives::v2::AvailabilityBitfield, - runtime_types::polkadot_primitives::v2::AvailabilityBitfield, - >, - >, - pub backed_candidates: ::std::vec::Vec< - runtime_types::polkadot_primitives::v2::BackedCandidate< - ::subxt::sp_core::H256, - >, - >, - pub disputes: ::std::vec::Vec< - runtime_types::polkadot_primitives::v2::DisputeStatementSet, - >, - pub parent_header: _0, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum InvalidDisputeStatementKind { - #[codec(index = 0)] - Explicit, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct ParathreadClaim( - pub runtime_types::polkadot_parachain::primitives::Id, - pub runtime_types::polkadot_primitives::v2::collator_app::Public, - ); - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct ParathreadEntry { - pub claim: runtime_types::polkadot_primitives::v2::ParathreadClaim, - pub retries: ::core::primitive::u32, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct PvfCheckStatement { - pub accept: ::core::primitive::bool, - pub subject: runtime_types::polkadot_parachain::primitives::ValidationCodeHash, - pub session_index: ::core::primitive::u32, - pub validator_index: runtime_types::polkadot_primitives::v2::ValidatorIndex, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct ScrapedOnChainVotes<_0> { - pub session: ::core::primitive::u32, - pub backing_validators_per_candidate: ::std::vec::Vec<( - runtime_types::polkadot_primitives::v2::CandidateReceipt<_0>, - ::std::vec::Vec<( - runtime_types::polkadot_primitives::v2::ValidatorIndex, - runtime_types::polkadot_primitives::v2::ValidityAttestation, - )>, - )>, - pub disputes: ::std::vec::Vec< - runtime_types::polkadot_primitives::v2::DisputeStatementSet, - >, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct SessionInfo { - pub active_validator_indices: - ::std::vec::Vec, - pub random_seed: [::core::primitive::u8; 32usize], - pub dispute_period: ::core::primitive::u32, - pub validators: ::std::vec::Vec< - runtime_types::polkadot_primitives::v2::validator_app::Public, - >, - pub discovery_keys: - ::std::vec::Vec, - pub assignment_keys: ::std::vec::Vec< - runtime_types::polkadot_primitives::v2::assignment_app::Public, - >, - pub validator_groups: ::std::vec::Vec< - ::std::vec::Vec, - >, - pub n_cores: ::core::primitive::u32, - pub zeroth_delay_tranche_width: ::core::primitive::u32, - pub relay_vrf_modulo_samples: ::core::primitive::u32, - pub n_delay_tranches: ::core::primitive::u32, - pub no_show_slots: ::core::primitive::u32, - pub needed_approvals: ::core::primitive::u32, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum UpgradeGoAhead { - #[codec(index = 0)] - Abort, - #[codec(index = 1)] - GoAhead, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum UpgradeRestriction { - #[codec(index = 0)] - Present, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum ValidDisputeStatementKind { - #[codec(index = 0)] - Explicit, - #[codec(index = 1)] - BackingSeconded(::subxt::sp_core::H256), - #[codec(index = 2)] - BackingValid(::subxt::sp_core::H256), - #[codec(index = 3)] - ApprovalChecking, - } - #[derive( - :: subxt :: codec :: CompactAs, - :: subxt :: codec :: Decode, - :: subxt :: codec :: Encode, - Debug, - )] - pub struct ValidatorIndex(pub ::core::primitive::u32); - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum ValidityAttestation { - #[codec(index = 1)] - Implicit(runtime_types::polkadot_primitives::v2::validator_app::Signature), - #[codec(index = 2)] - Explicit(runtime_types::polkadot_primitives::v2::validator_app::Signature), - } - } - } - pub mod polkadot_runtime_common { - use super::runtime_types; - pub mod assigned_slots { - use super::runtime_types; - pub mod pallet { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Call { - # [codec (index = 0)] # [doc = "Assign a permanent parachain slot and immediately create a lease for it."] assign_perm_parachain_slot { id : runtime_types :: polkadot_parachain :: primitives :: Id , } , # [codec (index = 1)] # [doc = "Assign a temporary parachain slot. The function tries to create a lease for it"] # [doc = "immediately if `SlotLeasePeriodStart::Current` is specified, and if the number"] # [doc = "of currently active temporary slots is below `MaxTemporarySlotPerLeasePeriod`."] assign_temp_parachain_slot { id : runtime_types :: polkadot_parachain :: primitives :: Id , lease_period_start : runtime_types :: polkadot_runtime_common :: assigned_slots :: SlotLeasePeriodStart , } , # [codec (index = 2)] # [doc = "Unassign a permanent or temporary parachain slot"] unassign_parachain_slot { id : runtime_types :: polkadot_parachain :: primitives :: Id , } , } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Error { - #[codec(index = 0)] - #[doc = "The specified parachain or parathread is not registered."] - ParaDoesntExist, - #[codec(index = 1)] - #[doc = "Not a parathread."] - NotParathread, - #[codec(index = 2)] - #[doc = "Cannot upgrade parathread."] - CannotUpgrade, - #[codec(index = 3)] - #[doc = "Cannot downgrade parachain."] - CannotDowngrade, - #[codec(index = 4)] - #[doc = "Permanent or Temporary slot already assigned."] - SlotAlreadyAssigned, - #[codec(index = 5)] - #[doc = "Permanent or Temporary slot has not been assigned."] - SlotNotAssigned, - #[codec(index = 6)] - #[doc = "An ongoing lease already exists."] - OngoingLeaseExists, - #[codec(index = 7)] - MaxPermanentSlotsExceeded, - #[codec(index = 8)] - MaxTemporarySlotsExceeded, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Event { - #[codec(index = 0)] - #[doc = "A para was assigned a permanent parachain slot"] - PermanentSlotAssigned(runtime_types::polkadot_parachain::primitives::Id), - #[codec(index = 1)] - #[doc = "A para was assigned a temporary parachain slot"] - TemporarySlotAssigned(runtime_types::polkadot_parachain::primitives::Id), - } - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct ParachainTemporarySlot<_0, _1> { - pub manager: _0, - pub period_begin: _1, - pub period_count: _1, - pub last_lease: ::core::option::Option<_1>, - pub lease_count: _1, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum SlotLeasePeriodStart { - #[codec(index = 0)] - Current, - #[codec(index = 1)] - Next, - } - } - pub mod auctions { - use super::runtime_types; - pub mod pallet { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Call { - #[codec(index = 0)] - #[doc = "Create a new auction."] - #[doc = ""] - #[doc = "This can only happen when there isn't already an auction in progress and may only be"] - #[doc = "called by the root origin. Accepts the `duration` of this auction and the"] - #[doc = "`lease_period_index` of the initial lease period of the four that are to be auctioned."] - new_auction { - #[codec(compact)] - duration: ::core::primitive::u32, - #[codec(compact)] - lease_period_index: ::core::primitive::u32, - }, - #[codec(index = 1)] - #[doc = "Make a new bid from an account (including a parachain account) for deploying a new"] - #[doc = "parachain."] - #[doc = ""] - #[doc = "Multiple simultaneous bids from the same bidder are allowed only as long as all active"] - #[doc = "bids overlap each other (i.e. are mutually exclusive). Bids cannot be redacted."] - #[doc = ""] - #[doc = "- `sub` is the sub-bidder ID, allowing for multiple competing bids to be made by (and"] - #[doc = "funded by) the same account."] - #[doc = "- `auction_index` is the index of the auction to bid on. Should just be the present"] - #[doc = "value of `AuctionCounter`."] - #[doc = "- `first_slot` is the first lease period index of the range to bid on. This is the"] - #[doc = "absolute lease period index value, not an auction-specific offset."] - #[doc = "- `last_slot` is the last lease period index of the range to bid on. This is the"] - #[doc = "absolute lease period index value, not an auction-specific offset."] - #[doc = "- `amount` is the amount to bid to be held as deposit for the parachain should the"] - #[doc = "bid win. This amount is held throughout the range."] - bid { - #[codec(compact)] - para: runtime_types::polkadot_parachain::primitives::Id, - #[codec(compact)] - auction_index: ::core::primitive::u32, - #[codec(compact)] - first_slot: ::core::primitive::u32, - #[codec(compact)] - last_slot: ::core::primitive::u32, - #[codec(compact)] - amount: ::core::primitive::u128, - }, - #[codec(index = 2)] - #[doc = "Cancel an in-progress auction."] - #[doc = ""] - #[doc = "Can only be called by Root origin."] - cancel_auction, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Error { - #[codec(index = 0)] - #[doc = "This auction is already in progress."] - AuctionInProgress, - #[codec(index = 1)] - #[doc = "The lease period is in the past."] - LeasePeriodInPast, - #[codec(index = 2)] - #[doc = "Para is not registered"] - ParaNotRegistered, - #[codec(index = 3)] - #[doc = "Not a current auction."] - NotCurrentAuction, - #[codec(index = 4)] - #[doc = "Not an auction."] - NotAuction, - #[codec(index = 5)] - #[doc = "Auction has already ended."] - AuctionEnded, - #[codec(index = 6)] - #[doc = "The para is already leased out for part of this range."] - AlreadyLeasedOut, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Event { - #[codec(index = 0)] - #[doc = "An auction started. Provides its index and the block number where it will begin to"] - #[doc = "close and the first lease period of the quadruplet that is auctioned."] - #[doc = "`[auction_index, lease_period, ending]`"] - AuctionStarted( - ::core::primitive::u32, - ::core::primitive::u32, - ::core::primitive::u32, - ), - #[codec(index = 1)] - #[doc = "An auction ended. All funds become unreserved. `[auction_index]`"] - AuctionClosed(::core::primitive::u32), - #[codec(index = 2)] - #[doc = "Funds were reserved for a winning bid. First balance is the extra amount reserved."] - #[doc = "Second is the total. `[bidder, extra_reserved, total_amount]`"] - Reserved( - ::subxt::sp_core::crypto::AccountId32, - ::core::primitive::u128, - ::core::primitive::u128, - ), - #[codec(index = 3)] - #[doc = "Funds were unreserved since bidder is no longer active. `[bidder, amount]`"] - Unreserved( - ::subxt::sp_core::crypto::AccountId32, - ::core::primitive::u128, - ), - #[codec(index = 4)] - #[doc = "Someone attempted to lease the same slot twice for a parachain. The amount is held in reserve"] - #[doc = "but no parachain slot has been leased."] - #[doc = "`[parachain_id, leaser, amount]`"] - ReserveConfiscated( - runtime_types::polkadot_parachain::primitives::Id, - ::subxt::sp_core::crypto::AccountId32, - ::core::primitive::u128, - ), - #[codec(index = 5)] - #[doc = "A new bid has been accepted as the current winner."] - #[doc = "`[who, para_id, amount, first_slot, last_slot]`"] - BidAccepted( - ::subxt::sp_core::crypto::AccountId32, - runtime_types::polkadot_parachain::primitives::Id, - ::core::primitive::u128, - ::core::primitive::u32, - ::core::primitive::u32, - ), - #[codec(index = 6)] - #[doc = "The winning offset was chosen for an auction. This will map into the `Winning` storage map."] - #[doc = "`[auction_index, block_number]`"] - WinningOffset(::core::primitive::u32, ::core::primitive::u32), - } - } - } - pub mod crowdloan { - use super::runtime_types; - pub mod pallet { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Call { - #[codec(index = 0)] - #[doc = "Create a new crowdloaning campaign for a parachain slot with the given lease period range."] - #[doc = ""] - #[doc = "This applies a lock to your parachain configuration, ensuring that it cannot be changed"] - #[doc = "by the parachain manager."] - create { - #[codec(compact)] - index: runtime_types::polkadot_parachain::primitives::Id, - #[codec(compact)] - cap: ::core::primitive::u128, - #[codec(compact)] - first_period: ::core::primitive::u32, - #[codec(compact)] - last_period: ::core::primitive::u32, - #[codec(compact)] - end: ::core::primitive::u32, - verifier: - ::core::option::Option, - }, - #[codec(index = 1)] - #[doc = "Contribute to a crowd sale. This will transfer some balance over to fund a parachain"] - #[doc = "slot. It will be withdrawable when the crowdloan has ended and the funds are unused."] - contribute { - #[codec(compact)] - index: runtime_types::polkadot_parachain::primitives::Id, - #[codec(compact)] - value: ::core::primitive::u128, - signature: - ::core::option::Option, - }, - #[codec(index = 2)] - #[doc = "Withdraw full balance of a specific contributor."] - #[doc = ""] - #[doc = "Origin must be signed, but can come from anyone."] - #[doc = ""] - #[doc = "The fund must be either in, or ready for, retirement. For a fund to be *in* retirement, then the retirement"] - #[doc = "flag must be set. For a fund to be ready for retirement, then:"] - #[doc = "- it must not already be in retirement;"] - #[doc = "- the amount of raised funds must be bigger than the _free_ balance of the account;"] - #[doc = "- and either:"] - #[doc = " - the block number must be at least `end`; or"] - #[doc = " - the current lease period must be greater than the fund's `last_period`."] - #[doc = ""] - #[doc = "In this case, the fund's retirement flag is set and its `end` is reset to the current block"] - #[doc = "number."] - #[doc = ""] - #[doc = "- `who`: The account whose contribution should be withdrawn."] - #[doc = "- `index`: The parachain to whose crowdloan the contribution was made."] - withdraw { - who: ::subxt::sp_core::crypto::AccountId32, - #[codec(compact)] - index: runtime_types::polkadot_parachain::primitives::Id, - }, - #[codec(index = 3)] - #[doc = "Automatically refund contributors of an ended crowdloan."] - #[doc = "Due to weight restrictions, this function may need to be called multiple"] - #[doc = "times to fully refund all users. We will refund `RemoveKeysLimit` users at a time."] - #[doc = ""] - #[doc = "Origin must be signed, but can come from anyone."] - refund { - #[codec(compact)] - index: runtime_types::polkadot_parachain::primitives::Id, - }, - #[codec(index = 4)] - #[doc = "Remove a fund after the retirement period has ended and all funds have been returned."] - dissolve { - #[codec(compact)] - index: runtime_types::polkadot_parachain::primitives::Id, - }, - #[codec(index = 5)] - #[doc = "Edit the configuration for an in-progress crowdloan."] - #[doc = ""] - #[doc = "Can only be called by Root origin."] - edit { - #[codec(compact)] - index: runtime_types::polkadot_parachain::primitives::Id, - #[codec(compact)] - cap: ::core::primitive::u128, - #[codec(compact)] - first_period: ::core::primitive::u32, - #[codec(compact)] - last_period: ::core::primitive::u32, - #[codec(compact)] - end: ::core::primitive::u32, - verifier: - ::core::option::Option, - }, - #[codec(index = 6)] - #[doc = "Add an optional memo to an existing crowdloan contribution."] - #[doc = ""] - #[doc = "Origin must be Signed, and the user must have contributed to the crowdloan."] - add_memo { - index: runtime_types::polkadot_parachain::primitives::Id, - memo: ::std::vec::Vec<::core::primitive::u8>, - }, - #[codec(index = 7)] - #[doc = "Poke the fund into `NewRaise`"] - #[doc = ""] - #[doc = "Origin must be Signed, and the fund has non-zero raise."] - poke { - index: runtime_types::polkadot_parachain::primitives::Id, - }, - #[codec(index = 8)] - #[doc = "Contribute your entire balance to a crowd sale. This will transfer the entire balance of a user over to fund a parachain"] - #[doc = "slot. It will be withdrawable when the crowdloan has ended and the funds are unused."] - contribute_all { - #[codec(compact)] - index: runtime_types::polkadot_parachain::primitives::Id, - signature: - ::core::option::Option, - }, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Error { - #[codec(index = 0)] - #[doc = "The current lease period is more than the first lease period."] - FirstPeriodInPast, - #[codec(index = 1)] - #[doc = "The first lease period needs to at least be less than 3 `max_value`."] - FirstPeriodTooFarInFuture, - #[codec(index = 2)] - #[doc = "Last lease period must be greater than first lease period."] - LastPeriodBeforeFirstPeriod, - #[codec(index = 3)] - #[doc = "The last lease period cannot be more than 3 periods after the first period."] - LastPeriodTooFarInFuture, - #[codec(index = 4)] - #[doc = "The campaign ends before the current block number. The end must be in the future."] - CannotEndInPast, - #[codec(index = 5)] - #[doc = "The end date for this crowdloan is not sensible."] - EndTooFarInFuture, - #[codec(index = 6)] - #[doc = "There was an overflow."] - Overflow, - #[codec(index = 7)] - #[doc = "The contribution was below the minimum, `MinContribution`."] - ContributionTooSmall, - #[codec(index = 8)] - #[doc = "Invalid fund index."] - InvalidParaId, - #[codec(index = 9)] - #[doc = "Contributions exceed maximum amount."] - CapExceeded, - #[codec(index = 10)] - #[doc = "The contribution period has already ended."] - ContributionPeriodOver, - #[codec(index = 11)] - #[doc = "The origin of this call is invalid."] - InvalidOrigin, - #[codec(index = 12)] - #[doc = "This crowdloan does not correspond to a parachain."] - NotParachain, - #[codec(index = 13)] - #[doc = "This parachain lease is still active and retirement cannot yet begin."] - LeaseActive, - #[codec(index = 14)] - #[doc = "This parachain's bid or lease is still active and withdraw cannot yet begin."] - BidOrLeaseActive, - #[codec(index = 15)] - #[doc = "The crowdloan has not yet ended."] - FundNotEnded, - #[codec(index = 16)] - #[doc = "There are no contributions stored in this crowdloan."] - NoContributions, - #[codec(index = 17)] - #[doc = "The crowdloan is not ready to dissolve. Potentially still has a slot or in retirement period."] - NotReadyToDissolve, - #[codec(index = 18)] - #[doc = "Invalid signature."] - InvalidSignature, - #[codec(index = 19)] - #[doc = "The provided memo is too large."] - MemoTooLarge, - #[codec(index = 20)] - #[doc = "The fund is already in `NewRaise`"] - AlreadyInNewRaise, - #[codec(index = 21)] - #[doc = "No contributions allowed during the VRF delay"] - VrfDelayInProgress, - #[codec(index = 22)] - #[doc = "A lease period has not started yet, due to an offset in the starting block."] - NoLeasePeriod, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Event { - #[codec(index = 0)] - #[doc = "Create a new crowdloaning campaign. `[fund_index]`"] - Created(runtime_types::polkadot_parachain::primitives::Id), - #[codec(index = 1)] - #[doc = "Contributed to a crowd sale. `[who, fund_index, amount]`"] - Contributed( - ::subxt::sp_core::crypto::AccountId32, - runtime_types::polkadot_parachain::primitives::Id, - ::core::primitive::u128, - ), - #[codec(index = 2)] - #[doc = "Withdrew full balance of a contributor. `[who, fund_index, amount]`"] - Withdrew( - ::subxt::sp_core::crypto::AccountId32, - runtime_types::polkadot_parachain::primitives::Id, - ::core::primitive::u128, - ), - #[codec(index = 3)] - #[doc = "The loans in a fund have been partially dissolved, i.e. there are some left"] - #[doc = "over child keys that still need to be killed. `[fund_index]`"] - PartiallyRefunded(runtime_types::polkadot_parachain::primitives::Id), - #[codec(index = 4)] - #[doc = "All loans in a fund have been refunded. `[fund_index]`"] - AllRefunded(runtime_types::polkadot_parachain::primitives::Id), - #[codec(index = 5)] - #[doc = "Fund is dissolved. `[fund_index]`"] - Dissolved(runtime_types::polkadot_parachain::primitives::Id), - #[codec(index = 6)] - #[doc = "The result of trying to submit a new bid to the Slots pallet."] - HandleBidResult( - runtime_types::polkadot_parachain::primitives::Id, - ::core::result::Result<(), runtime_types::sp_runtime::DispatchError>, - ), - #[codec(index = 7)] - #[doc = "The configuration to a crowdloan has been edited. `[fund_index]`"] - Edited(runtime_types::polkadot_parachain::primitives::Id), - #[codec(index = 8)] - #[doc = "A memo has been updated. `[who, fund_index, memo]`"] - MemoUpdated( - ::subxt::sp_core::crypto::AccountId32, - runtime_types::polkadot_parachain::primitives::Id, - ::std::vec::Vec<::core::primitive::u8>, - ), - #[codec(index = 9)] - #[doc = "A parachain has been moved to `NewRaise`"] - AddedToNewRaise(runtime_types::polkadot_parachain::primitives::Id), - } - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct FundInfo<_0, _1, _2, _3> { - pub depositor: _0, - pub verifier: ::core::option::Option, - pub deposit: _1, - pub raised: _1, - pub end: _2, - pub cap: _1, - pub last_contribution: - runtime_types::polkadot_runtime_common::crowdloan::LastContribution<_2>, - pub first_period: _2, - pub last_period: _2, - pub fund_index: _2, - #[codec(skip)] - pub __subxt_unused_type_params: ::core::marker::PhantomData<_3>, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum LastContribution<_0> { - #[codec(index = 0)] - Never, - #[codec(index = 1)] - PreEnding(_0), - #[codec(index = 2)] - Ending(_0), - } - } - pub mod paras_registrar { - use super::runtime_types; - pub mod pallet { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Call { - #[codec(index = 0)] - #[doc = "Register head data and validation code for a reserved Para Id."] - #[doc = ""] - #[doc = "## Arguments"] - #[doc = "- `origin`: Must be called by a `Signed` origin."] - #[doc = "- `id`: The para ID. Must be owned/managed by the `origin` signing account."] - #[doc = "- `genesis_head`: The genesis head data of the parachain/thread."] - #[doc = "- `validation_code`: The initial validation code of the parachain/thread."] - #[doc = ""] - #[doc = "## Deposits/Fees"] - #[doc = "The origin signed account must reserve a corresponding deposit for the registration. Anything already"] - #[doc = "reserved previously for this para ID is accounted for."] - #[doc = ""] - #[doc = "## Events"] - #[doc = "The `Registered` event is emitted in case of success."] - register { - id: runtime_types::polkadot_parachain::primitives::Id, - genesis_head: runtime_types::polkadot_parachain::primitives::HeadData, - validation_code: - runtime_types::polkadot_parachain::primitives::ValidationCode, - }, - #[codec(index = 1)] - #[doc = "Force the registration of a Para Id on the relay chain."] - #[doc = ""] - #[doc = "This function must be called by a Root origin."] - #[doc = ""] - #[doc = "The deposit taken can be specified for this registration. Any `ParaId`"] - #[doc = "can be registered, including sub-1000 IDs which are System Parachains."] - force_register { - who: ::subxt::sp_core::crypto::AccountId32, - deposit: ::core::primitive::u128, - id: runtime_types::polkadot_parachain::primitives::Id, - genesis_head: runtime_types::polkadot_parachain::primitives::HeadData, - validation_code: - runtime_types::polkadot_parachain::primitives::ValidationCode, - }, - #[codec(index = 2)] - #[doc = "Deregister a Para Id, freeing all data and returning any deposit."] - #[doc = ""] - #[doc = "The caller must be Root, the `para` owner, or the `para` itself. The para must be a parathread."] - deregister { - id: runtime_types::polkadot_parachain::primitives::Id, - }, - #[codec(index = 3)] - #[doc = "Swap a parachain with another parachain or parathread."] - #[doc = ""] - #[doc = "The origin must be Root, the `para` owner, or the `para` itself."] - #[doc = ""] - #[doc = "The swap will happen only if there is already an opposite swap pending. If there is not,"] - #[doc = "the swap will be stored in the pending swaps map, ready for a later confirmatory swap."] - #[doc = ""] - #[doc = "The `ParaId`s remain mapped to the same head data and code so external code can rely on"] - #[doc = "`ParaId` to be a long-term identifier of a notional \"parachain\". However, their"] - #[doc = "scheduling info (i.e. whether they're a parathread or parachain), auction information"] - #[doc = "and the auction deposit are switched."] - swap { - id: runtime_types::polkadot_parachain::primitives::Id, - other: runtime_types::polkadot_parachain::primitives::Id, - }, - #[codec(index = 4)] - #[doc = "Remove a manager lock from a para. This will allow the manager of a"] - #[doc = "previously locked para to deregister or swap a para without using governance."] - #[doc = ""] - #[doc = "Can only be called by the Root origin."] - force_remove_lock { - para: runtime_types::polkadot_parachain::primitives::Id, - }, - #[codec(index = 5)] - #[doc = "Reserve a Para Id on the relay chain."] - #[doc = ""] - #[doc = "This function will reserve a new Para Id to be owned/managed by the origin account."] - #[doc = "The origin account is able to register head data and validation code using `register` to create"] - #[doc = "a parathread. Using the Slots pallet, a parathread can then be upgraded to get a parachain slot."] - #[doc = ""] - #[doc = "## Arguments"] - #[doc = "- `origin`: Must be called by a `Signed` origin. Becomes the manager/owner of the new para ID."] - #[doc = ""] - #[doc = "## Deposits/Fees"] - #[doc = "The origin must reserve a deposit of `ParaDeposit` for the registration."] - #[doc = ""] - #[doc = "## Events"] - #[doc = "The `Reserved` event is emitted in case of success, which provides the ID reserved for use."] - reserve, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Error { - #[codec(index = 0)] - #[doc = "The ID is not registered."] - NotRegistered, - #[codec(index = 1)] - #[doc = "The ID is already registered."] - AlreadyRegistered, - #[codec(index = 2)] - #[doc = "The caller is not the owner of this Id."] - NotOwner, - #[codec(index = 3)] - #[doc = "Invalid para code size."] - CodeTooLarge, - #[codec(index = 4)] - #[doc = "Invalid para head data size."] - HeadDataTooLarge, - #[codec(index = 5)] - #[doc = "Para is not a Parachain."] - NotParachain, - #[codec(index = 6)] - #[doc = "Para is not a Parathread."] - NotParathread, - #[codec(index = 7)] - #[doc = "Cannot deregister para"] - CannotDeregister, - #[codec(index = 8)] - #[doc = "Cannot schedule downgrade of parachain to parathread"] - CannotDowngrade, - #[codec(index = 9)] - #[doc = "Cannot schedule upgrade of parathread to parachain"] - CannotUpgrade, - #[codec(index = 10)] - #[doc = "Para is locked from manipulation by the manager. Must use parachain or relay chain governance."] - ParaLocked, - #[codec(index = 11)] - #[doc = "The ID given for registration has not been reserved."] - NotReserved, - #[codec(index = 12)] - #[doc = "Registering parachain with empty code is not allowed."] - EmptyCode, - #[codec(index = 13)] - #[doc = "Cannot perform a parachain slot / lifecycle swap. Check that the state of both paras are"] - #[doc = "correct for the swap to work."] - CannotSwap, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Event { - #[codec(index = 0)] - Registered( - runtime_types::polkadot_parachain::primitives::Id, - ::subxt::sp_core::crypto::AccountId32, - ), - #[codec(index = 1)] - Deregistered(runtime_types::polkadot_parachain::primitives::Id), - #[codec(index = 2)] - Reserved( - runtime_types::polkadot_parachain::primitives::Id, - ::subxt::sp_core::crypto::AccountId32, - ), - } - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct ParaInfo<_0, _1> { - pub manager: _0, - pub deposit: _1, - pub locked: ::core::primitive::bool, - } - } - pub mod paras_sudo_wrapper { - use super::runtime_types; - pub mod pallet { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Call { - #[codec(index = 0)] - #[doc = "Schedule a para to be initialized at the start of the next session."] - sudo_schedule_para_initialize { - id: runtime_types::polkadot_parachain::primitives::Id, - genesis: - runtime_types::polkadot_runtime_parachains::paras::ParaGenesisArgs, - }, - #[codec(index = 1)] - #[doc = "Schedule a para to be cleaned up at the start of the next session."] - sudo_schedule_para_cleanup { - id: runtime_types::polkadot_parachain::primitives::Id, - }, - #[codec(index = 2)] - #[doc = "Upgrade a parathread to a parachain"] - sudo_schedule_parathread_upgrade { - id: runtime_types::polkadot_parachain::primitives::Id, - }, - #[codec(index = 3)] - #[doc = "Downgrade a parachain to a parathread"] - sudo_schedule_parachain_downgrade { - id: runtime_types::polkadot_parachain::primitives::Id, - }, - #[codec(index = 4)] - #[doc = "Send a downward XCM to the given para."] - #[doc = ""] - #[doc = "The given parachain should exist and the payload should not exceed the preconfigured size"] - #[doc = "`config.max_downward_message_size`."] - sudo_queue_downward_xcm { - id: runtime_types::polkadot_parachain::primitives::Id, - xcm: ::std::boxed::Box, - }, - #[codec(index = 5)] - #[doc = "Forcefully establish a channel from the sender to the recipient."] - #[doc = ""] - #[doc = "This is equivalent to sending an `Hrmp::hrmp_init_open_channel` extrinsic followed by"] - #[doc = "`Hrmp::hrmp_accept_open_channel`."] - sudo_establish_hrmp_channel { - sender: runtime_types::polkadot_parachain::primitives::Id, - recipient: runtime_types::polkadot_parachain::primitives::Id, - max_capacity: ::core::primitive::u32, - max_message_size: ::core::primitive::u32, - }, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Error { - #[codec(index = 0)] - #[doc = "The specified parachain or parathread is not registered."] - ParaDoesntExist, - #[codec(index = 1)] - #[doc = "The specified parachain or parathread is already registered."] - ParaAlreadyExists, - #[codec(index = 2)] - #[doc = "A DMP message couldn't be sent because it exceeds the maximum size allowed for a downward"] - #[doc = "message."] - ExceedsMaxMessageSize, - #[codec(index = 3)] - #[doc = "Could not schedule para cleanup."] - CouldntCleanup, - #[codec(index = 4)] - #[doc = "Not a parathread."] - NotParathread, - #[codec(index = 5)] - #[doc = "Not a parachain."] - NotParachain, - #[codec(index = 6)] - #[doc = "Cannot upgrade parathread."] - CannotUpgrade, - #[codec(index = 7)] - #[doc = "Cannot downgrade parachain."] - CannotDowngrade, - } - } - } - pub mod slots { - use super::runtime_types; - pub mod pallet { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Call { - #[codec(index = 0)] - #[doc = "Just a connect into the `lease_out` call, in case Root wants to force some lease to happen"] - #[doc = "independently of any other on-chain mechanism to use it."] - #[doc = ""] - #[doc = "The dispatch origin for this call must match `T::ForceOrigin`."] - force_lease { - para: runtime_types::polkadot_parachain::primitives::Id, - leaser: ::subxt::sp_core::crypto::AccountId32, - amount: ::core::primitive::u128, - period_begin: ::core::primitive::u32, - period_count: ::core::primitive::u32, - }, - #[codec(index = 1)] - #[doc = "Clear all leases for a Para Id, refunding any deposits back to the original owners."] - #[doc = ""] - #[doc = "The dispatch origin for this call must match `T::ForceOrigin`."] - clear_all_leases { - para: runtime_types::polkadot_parachain::primitives::Id, - }, - #[codec(index = 2)] - #[doc = "Try to onboard a parachain that has a lease for the current lease period."] - #[doc = ""] - #[doc = "This function can be useful if there was some state issue with a para that should"] - #[doc = "have onboarded, but was unable to. As long as they have a lease period, we can"] - #[doc = "let them onboard from here."] - #[doc = ""] - #[doc = "Origin must be signed, but can be called by anyone."] - trigger_onboard { - para: runtime_types::polkadot_parachain::primitives::Id, - }, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Error { - #[codec(index = 0)] - #[doc = "The parachain ID is not onboarding."] - ParaNotOnboarding, - #[codec(index = 1)] - #[doc = "There was an error with the lease."] - LeaseError, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Event { - #[codec(index = 0)] - #[doc = "A new `[lease_period]` is beginning."] - NewLeasePeriod(::core::primitive::u32), - #[codec(index = 1)] - #[doc = "A para has won the right to a continuous set of lease periods as a parachain."] - #[doc = "First balance is any extra amount reserved on top of the para's existing deposit."] - #[doc = "Second balance is the total amount reserved."] - #[doc = "`[parachain_id, leaser, period_begin, period_count, extra_reserved, total_amount]`"] - Leased( - runtime_types::polkadot_parachain::primitives::Id, - ::subxt::sp_core::crypto::AccountId32, - ::core::primitive::u32, - ::core::primitive::u32, - ::core::primitive::u128, - ::core::primitive::u128, - ), - } - } - } - } - pub mod polkadot_runtime_parachains { - use super::runtime_types; - pub mod configuration { - use super::runtime_types; - pub mod pallet { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Call { - #[codec(index = 0)] - #[doc = "Set the validation upgrade cooldown."] - set_validation_upgrade_cooldown { new: ::core::primitive::u32 }, - #[codec(index = 1)] - #[doc = "Set the validation upgrade delay."] - set_validation_upgrade_delay { new: ::core::primitive::u32 }, - #[codec(index = 2)] - #[doc = "Set the acceptance period for an included candidate."] - set_code_retention_period { new: ::core::primitive::u32 }, - #[codec(index = 3)] - #[doc = "Set the max validation code size for incoming upgrades."] - set_max_code_size { new: ::core::primitive::u32 }, - #[codec(index = 4)] - #[doc = "Set the max POV block size for incoming upgrades."] - set_max_pov_size { new: ::core::primitive::u32 }, - #[codec(index = 5)] - #[doc = "Set the max head data size for paras."] - set_max_head_data_size { new: ::core::primitive::u32 }, - #[codec(index = 6)] - #[doc = "Set the number of parathread execution cores."] - set_parathread_cores { new: ::core::primitive::u32 }, - #[codec(index = 7)] - #[doc = "Set the number of retries for a particular parathread."] - set_parathread_retries { new: ::core::primitive::u32 }, - #[codec(index = 8)] - #[doc = "Set the parachain validator-group rotation frequency"] - set_group_rotation_frequency { new: ::core::primitive::u32 }, - #[codec(index = 9)] - #[doc = "Set the availability period for parachains."] - set_chain_availability_period { new: ::core::primitive::u32 }, - #[codec(index = 10)] - #[doc = "Set the availability period for parathreads."] - set_thread_availability_period { new: ::core::primitive::u32 }, - #[codec(index = 11)] - #[doc = "Set the scheduling lookahead, in expected number of blocks at peak throughput."] - set_scheduling_lookahead { new: ::core::primitive::u32 }, - #[codec(index = 12)] - #[doc = "Set the maximum number of validators to assign to any core."] - set_max_validators_per_core { - new: ::core::option::Option<::core::primitive::u32>, - }, - #[codec(index = 13)] - #[doc = "Set the maximum number of validators to use in parachain consensus."] - set_max_validators { - new: ::core::option::Option<::core::primitive::u32>, - }, - #[codec(index = 14)] - #[doc = "Set the dispute period, in number of sessions to keep for disputes."] - set_dispute_period { new: ::core::primitive::u32 }, - #[codec(index = 15)] - #[doc = "Set the dispute post conclusion acceptance period."] - set_dispute_post_conclusion_acceptance_period { - new: ::core::primitive::u32, - }, - #[codec(index = 16)] - #[doc = "Set the maximum number of dispute spam slots."] - set_dispute_max_spam_slots { new: ::core::primitive::u32 }, - #[codec(index = 17)] - #[doc = "Set the dispute conclusion by time out period."] - set_dispute_conclusion_by_time_out_period { new: ::core::primitive::u32 }, - #[codec(index = 18)] - #[doc = "Set the no show slots, in number of number of consensus slots."] - #[doc = "Must be at least 1."] - set_no_show_slots { new: ::core::primitive::u32 }, - #[codec(index = 19)] - #[doc = "Set the total number of delay tranches."] - set_n_delay_tranches { new: ::core::primitive::u32 }, - #[codec(index = 20)] - #[doc = "Set the zeroth delay tranche width."] - set_zeroth_delay_tranche_width { new: ::core::primitive::u32 }, - #[codec(index = 21)] - #[doc = "Set the number of validators needed to approve a block."] - set_needed_approvals { new: ::core::primitive::u32 }, - #[codec(index = 22)] - #[doc = "Set the number of samples to do of the `RelayVRFModulo` approval assignment criterion."] - set_relay_vrf_modulo_samples { new: ::core::primitive::u32 }, - #[codec(index = 23)] - #[doc = "Sets the maximum items that can present in a upward dispatch queue at once."] - set_max_upward_queue_count { new: ::core::primitive::u32 }, - #[codec(index = 24)] - #[doc = "Sets the maximum total size of items that can present in a upward dispatch queue at once."] - set_max_upward_queue_size { new: ::core::primitive::u32 }, - #[codec(index = 25)] - #[doc = "Set the critical downward message size."] - set_max_downward_message_size { new: ::core::primitive::u32 }, - #[codec(index = 26)] - #[doc = "Sets the soft limit for the phase of dispatching dispatchable upward messages."] - set_ump_service_total_weight { new: ::core::primitive::u64 }, - #[codec(index = 27)] - #[doc = "Sets the maximum size of an upward message that can be sent by a candidate."] - set_max_upward_message_size { new: ::core::primitive::u32 }, - #[codec(index = 28)] - #[doc = "Sets the maximum number of messages that a candidate can contain."] - set_max_upward_message_num_per_candidate { new: ::core::primitive::u32 }, - #[codec(index = 29)] - #[doc = "Sets the number of sessions after which an HRMP open channel request expires."] - set_hrmp_open_request_ttl { new: ::core::primitive::u32 }, - #[codec(index = 30)] - #[doc = "Sets the amount of funds that the sender should provide for opening an HRMP channel."] - set_hrmp_sender_deposit { new: ::core::primitive::u128 }, - #[codec(index = 31)] - #[doc = "Sets the amount of funds that the recipient should provide for accepting opening an HRMP"] - #[doc = "channel."] - set_hrmp_recipient_deposit { new: ::core::primitive::u128 }, - #[codec(index = 32)] - #[doc = "Sets the maximum number of messages allowed in an HRMP channel at once."] - set_hrmp_channel_max_capacity { new: ::core::primitive::u32 }, - #[codec(index = 33)] - #[doc = "Sets the maximum total size of messages in bytes allowed in an HRMP channel at once."] - set_hrmp_channel_max_total_size { new: ::core::primitive::u32 }, - #[codec(index = 34)] - #[doc = "Sets the maximum number of inbound HRMP channels a parachain is allowed to accept."] - set_hrmp_max_parachain_inbound_channels { new: ::core::primitive::u32 }, - #[codec(index = 35)] - #[doc = "Sets the maximum number of inbound HRMP channels a parathread is allowed to accept."] - set_hrmp_max_parathread_inbound_channels { new: ::core::primitive::u32 }, - #[codec(index = 36)] - #[doc = "Sets the maximum size of a message that could ever be put into an HRMP channel."] - set_hrmp_channel_max_message_size { new: ::core::primitive::u32 }, - #[codec(index = 37)] - #[doc = "Sets the maximum number of outbound HRMP channels a parachain is allowed to open."] - set_hrmp_max_parachain_outbound_channels { new: ::core::primitive::u32 }, - #[codec(index = 38)] - #[doc = "Sets the maximum number of outbound HRMP channels a parathread is allowed to open."] - set_hrmp_max_parathread_outbound_channels { new: ::core::primitive::u32 }, - #[codec(index = 39)] - #[doc = "Sets the maximum number of outbound HRMP messages can be sent by a candidate."] - set_hrmp_max_message_num_per_candidate { new: ::core::primitive::u32 }, - #[codec(index = 40)] - #[doc = "Sets the maximum amount of weight any individual upward message may consume."] - set_ump_max_individual_weight { new: ::core::primitive::u64 }, - #[codec(index = 41)] - #[doc = "Enable or disable PVF pre-checking. Consult the field documentation prior executing."] - set_pvf_checking_enabled { new: ::core::primitive::bool }, - #[codec(index = 42)] - #[doc = "Set the number of session changes after which a PVF pre-checking voting is rejected."] - set_pvf_voting_ttl { new: ::core::primitive::u32 }, - #[codec(index = 43)] - #[doc = "Sets the minimum delay between announcing the upgrade block for a parachain until the"] - #[doc = "upgrade taking place."] - #[doc = ""] - #[doc = "See the field documentation for information and constraints for the new value."] - set_minimum_validation_upgrade_delay { new: ::core::primitive::u32 }, - #[codec(index = 44)] - #[doc = "Setting this to true will disable consistency checks for the configuration setters."] - #[doc = "Use with caution."] - set_bypass_consistency_check { new: ::core::primitive::bool }, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Error { - #[codec(index = 0)] - #[doc = "The new value for a configuration parameter is invalid."] - InvalidNewValue, - } - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct HostConfiguration<_0> { - pub max_code_size: _0, - pub max_head_data_size: _0, - pub max_upward_queue_count: _0, - pub max_upward_queue_size: _0, - pub max_upward_message_size: _0, - pub max_upward_message_num_per_candidate: _0, - pub hrmp_max_message_num_per_candidate: _0, - pub validation_upgrade_cooldown: _0, - pub validation_upgrade_delay: _0, - pub max_pov_size: _0, - pub max_downward_message_size: _0, - pub ump_service_total_weight: ::core::primitive::u64, - pub hrmp_max_parachain_outbound_channels: _0, - pub hrmp_max_parathread_outbound_channels: _0, - pub hrmp_sender_deposit: ::core::primitive::u128, - pub hrmp_recipient_deposit: ::core::primitive::u128, - pub hrmp_channel_max_capacity: _0, - pub hrmp_channel_max_total_size: _0, - pub hrmp_max_parachain_inbound_channels: _0, - pub hrmp_max_parathread_inbound_channels: _0, - pub hrmp_channel_max_message_size: _0, - pub code_retention_period: _0, - pub parathread_cores: _0, - pub parathread_retries: _0, - pub group_rotation_frequency: _0, - pub chain_availability_period: _0, - pub thread_availability_period: _0, - pub scheduling_lookahead: _0, - pub max_validators_per_core: ::core::option::Option<_0>, - pub max_validators: ::core::option::Option<_0>, - pub dispute_period: _0, - pub dispute_post_conclusion_acceptance_period: _0, - pub dispute_max_spam_slots: _0, - pub dispute_conclusion_by_time_out_period: _0, - pub no_show_slots: _0, - pub n_delay_tranches: _0, - pub zeroth_delay_tranche_width: _0, - pub needed_approvals: _0, - pub relay_vrf_modulo_samples: _0, - pub ump_max_individual_weight: ::core::primitive::u64, - pub pvf_checking_enabled: ::core::primitive::bool, - pub pvf_voting_ttl: _0, - pub minimum_validation_upgrade_delay: _0, - } - } - pub mod disputes { - use super::runtime_types; - pub mod pallet { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Call { - #[codec(index = 0)] - force_unfreeze, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Error { - #[codec(index = 0)] - #[doc = "Duplicate dispute statement sets provided."] - DuplicateDisputeStatementSets, - #[codec(index = 1)] - #[doc = "Ancient dispute statement provided."] - AncientDisputeStatement, - #[codec(index = 2)] - #[doc = "Validator index on statement is out of bounds for session."] - ValidatorIndexOutOfBounds, - #[codec(index = 3)] - #[doc = "Invalid signature on statement."] - InvalidSignature, - #[codec(index = 4)] - #[doc = "Validator vote submitted more than once to dispute."] - DuplicateStatement, - #[codec(index = 5)] - #[doc = "Too many spam slots used by some specific validator."] - PotentialSpam, - #[codec(index = 6)] - #[doc = "A dispute where there are only votes on one side."] - SingleSidedDispute, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Event { - #[codec(index = 0)] - #[doc = "A dispute has been initiated. \\[candidate hash, dispute location\\]"] - DisputeInitiated( - runtime_types::polkadot_core_primitives::CandidateHash, - runtime_types::polkadot_runtime_parachains::disputes::DisputeLocation, - ), - #[codec(index = 1)] - #[doc = "A dispute has concluded for or against a candidate."] - #[doc = "`\\[para id, candidate hash, dispute result\\]`"] - DisputeConcluded( - runtime_types::polkadot_core_primitives::CandidateHash, - runtime_types::polkadot_runtime_parachains::disputes::DisputeResult, - ), - #[codec(index = 2)] - #[doc = "A dispute has timed out due to insufficient participation."] - #[doc = "`\\[para id, candidate hash\\]`"] - DisputeTimedOut(runtime_types::polkadot_core_primitives::CandidateHash), - #[codec(index = 3)] - #[doc = "A dispute has concluded with supermajority against a candidate."] - #[doc = "Block authors should no longer build on top of this head and should"] - #[doc = "instead revert the block at the given height. This should be the"] - #[doc = "number of the child of the last known valid block in the chain."] - Revert(::core::primitive::u32), - } - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum DisputeLocation { - #[codec(index = 0)] - Local, - #[codec(index = 1)] - Remote, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum DisputeResult { - #[codec(index = 0)] - Valid, - #[codec(index = 1)] - Invalid, - } - } - pub mod dmp { - use super::runtime_types; - pub mod pallet { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Call {} - } - } - pub mod hrmp { - use super::runtime_types; - pub mod pallet { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Call { - #[codec(index = 0)] - #[doc = "Initiate opening a channel from a parachain to a given recipient with given channel"] - #[doc = "parameters."] - #[doc = ""] - #[doc = "- `proposed_max_capacity` - specifies how many messages can be in the channel at once."] - #[doc = "- `proposed_max_message_size` - specifies the maximum size of the messages."] - #[doc = ""] - #[doc = "These numbers are a subject to the relay-chain configuration limits."] - #[doc = ""] - #[doc = "The channel can be opened only after the recipient confirms it and only on a session"] - #[doc = "change."] - hrmp_init_open_channel { - recipient: runtime_types::polkadot_parachain::primitives::Id, - proposed_max_capacity: ::core::primitive::u32, - proposed_max_message_size: ::core::primitive::u32, - }, - #[codec(index = 1)] - #[doc = "Accept a pending open channel request from the given sender."] - #[doc = ""] - #[doc = "The channel will be opened only on the next session boundary."] - hrmp_accept_open_channel { - sender: runtime_types::polkadot_parachain::primitives::Id, - }, - #[codec(index = 2)] - #[doc = "Initiate unilateral closing of a channel. The origin must be either the sender or the"] - #[doc = "recipient in the channel being closed."] - #[doc = ""] - #[doc = "The closure can only happen on a session change."] - hrmp_close_channel { - channel_id: - runtime_types::polkadot_parachain::primitives::HrmpChannelId, - }, - #[codec(index = 3)] - #[doc = "This extrinsic triggers the cleanup of all the HRMP storage items that"] - #[doc = "a para may have. Normally this happens once per session, but this allows"] - #[doc = "you to trigger the cleanup immediately for a specific parachain."] - #[doc = ""] - #[doc = "Origin must be Root."] - #[doc = ""] - #[doc = "Number of inbound and outbound channels for `para` must be provided as witness data of weighing."] - force_clean_hrmp { - para: runtime_types::polkadot_parachain::primitives::Id, - inbound: ::core::primitive::u32, - outbound: ::core::primitive::u32, - }, - #[codec(index = 4)] - #[doc = "Force process HRMP open channel requests."] - #[doc = ""] - #[doc = "If there are pending HRMP open channel requests, you can use this"] - #[doc = "function process all of those requests immediately."] - #[doc = ""] - #[doc = "Total number of opening channels must be provided as witness data of weighing."] - force_process_hrmp_open { channels: ::core::primitive::u32 }, - #[codec(index = 5)] - #[doc = "Force process HRMP close channel requests."] - #[doc = ""] - #[doc = "If there are pending HRMP close channel requests, you can use this"] - #[doc = "function process all of those requests immediately."] - #[doc = ""] - #[doc = "Total number of closing channels must be provided as witness data of weighing."] - force_process_hrmp_close { channels: ::core::primitive::u32 }, - #[codec(index = 6)] - #[doc = "This cancels a pending open channel request. It can be canceled by either of the sender"] - #[doc = "or the recipient for that request. The origin must be either of those."] - #[doc = ""] - #[doc = "The cancellation happens immediately. It is not possible to cancel the request if it is"] - #[doc = "already accepted."] - #[doc = ""] - #[doc = "Total number of open requests (i.e. `HrmpOpenChannelRequestsList`) must be provided as"] - #[doc = "witness data."] - hrmp_cancel_open_request { - channel_id: - runtime_types::polkadot_parachain::primitives::HrmpChannelId, - open_requests: ::core::primitive::u32, - }, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Error { - #[codec(index = 0)] - #[doc = "The sender tried to open a channel to themselves."] - OpenHrmpChannelToSelf, - #[codec(index = 1)] - #[doc = "The recipient is not a valid para."] - OpenHrmpChannelInvalidRecipient, - #[codec(index = 2)] - #[doc = "The requested capacity is zero."] - OpenHrmpChannelZeroCapacity, - #[codec(index = 3)] - #[doc = "The requested capacity exceeds the global limit."] - OpenHrmpChannelCapacityExceedsLimit, - #[codec(index = 4)] - #[doc = "The requested maximum message size is 0."] - OpenHrmpChannelZeroMessageSize, - #[codec(index = 5)] - #[doc = "The open request requested the message size that exceeds the global limit."] - OpenHrmpChannelMessageSizeExceedsLimit, - #[codec(index = 6)] - #[doc = "The channel already exists"] - OpenHrmpChannelAlreadyExists, - #[codec(index = 7)] - #[doc = "There is already a request to open the same channel."] - OpenHrmpChannelAlreadyRequested, - #[codec(index = 8)] - #[doc = "The sender already has the maximum number of allowed outbound channels."] - OpenHrmpChannelLimitExceeded, - #[codec(index = 9)] - #[doc = "The channel from the sender to the origin doesn't exist."] - AcceptHrmpChannelDoesntExist, - #[codec(index = 10)] - #[doc = "The channel is already confirmed."] - AcceptHrmpChannelAlreadyConfirmed, - #[codec(index = 11)] - #[doc = "The recipient already has the maximum number of allowed inbound channels."] - AcceptHrmpChannelLimitExceeded, - #[codec(index = 12)] - #[doc = "The origin tries to close a channel where it is neither the sender nor the recipient."] - CloseHrmpChannelUnauthorized, - #[codec(index = 13)] - #[doc = "The channel to be closed doesn't exist."] - CloseHrmpChannelDoesntExist, - #[codec(index = 14)] - #[doc = "The channel close request is already requested."] - CloseHrmpChannelAlreadyUnderway, - #[codec(index = 15)] - #[doc = "Canceling is requested by neither the sender nor recipient of the open channel request."] - CancelHrmpOpenChannelUnauthorized, - #[codec(index = 16)] - #[doc = "The open request doesn't exist."] - OpenHrmpChannelDoesntExist, - #[codec(index = 17)] - #[doc = "Cannot cancel an HRMP open channel request because it is already confirmed."] - OpenHrmpChannelAlreadyConfirmed, - #[codec(index = 18)] - #[doc = "The provided witness data is wrong."] - WrongWitness, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Event { - #[codec(index = 0)] - #[doc = "Open HRMP channel requested."] - #[doc = "`[sender, recipient, proposed_max_capacity, proposed_max_message_size]`"] - OpenChannelRequested( - runtime_types::polkadot_parachain::primitives::Id, - runtime_types::polkadot_parachain::primitives::Id, - ::core::primitive::u32, - ::core::primitive::u32, - ), - #[codec(index = 1)] - #[doc = "An HRMP channel request sent by the receiver was canceled by either party."] - #[doc = "`[by_parachain, channel_id]`"] - OpenChannelCanceled( - runtime_types::polkadot_parachain::primitives::Id, - runtime_types::polkadot_parachain::primitives::HrmpChannelId, - ), - #[codec(index = 2)] - #[doc = "Open HRMP channel accepted. `[sender, recipient]`"] - OpenChannelAccepted( - runtime_types::polkadot_parachain::primitives::Id, - runtime_types::polkadot_parachain::primitives::Id, - ), - #[codec(index = 3)] - #[doc = "HRMP channel closed. `[by_parachain, channel_id]`"] - ChannelClosed( - runtime_types::polkadot_parachain::primitives::Id, - runtime_types::polkadot_parachain::primitives::HrmpChannelId, - ), - } - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct HrmpChannel { - pub max_capacity: ::core::primitive::u32, - pub max_total_size: ::core::primitive::u32, - pub max_message_size: ::core::primitive::u32, - pub msg_count: ::core::primitive::u32, - pub total_size: ::core::primitive::u32, - pub mqc_head: ::core::option::Option<::subxt::sp_core::H256>, - pub sender_deposit: ::core::primitive::u128, - pub recipient_deposit: ::core::primitive::u128, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct HrmpOpenChannelRequest { - pub confirmed: ::core::primitive::bool, - pub _age: ::core::primitive::u32, - pub sender_deposit: ::core::primitive::u128, - pub max_message_size: ::core::primitive::u32, - pub max_capacity: ::core::primitive::u32, - pub max_total_size: ::core::primitive::u32, - } - } - pub mod inclusion { - use super::runtime_types; - pub mod pallet { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Call {} - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Error { - #[codec(index = 0)] - #[doc = "Validator indices are out of order or contains duplicates."] - UnsortedOrDuplicateValidatorIndices, - #[codec(index = 1)] - #[doc = "Dispute statement sets are out of order or contain duplicates."] - UnsortedOrDuplicateDisputeStatementSet, - #[codec(index = 2)] - #[doc = "Backed candidates are out of order (core index) or contain duplicates."] - UnsortedOrDuplicateBackedCandidates, - #[codec(index = 3)] - #[doc = "A different relay parent was provided compared to the on-chain stored one."] - UnexpectedRelayParent, - #[codec(index = 4)] - #[doc = "Availability bitfield has unexpected size."] - WrongBitfieldSize, - #[codec(index = 5)] - #[doc = "Bitfield consists of zeros only."] - BitfieldAllZeros, - #[codec(index = 6)] - #[doc = "Multiple bitfields submitted by same validator or validators out of order by index."] - BitfieldDuplicateOrUnordered, - #[codec(index = 7)] - #[doc = "Validator index out of bounds."] - ValidatorIndexOutOfBounds, - #[codec(index = 8)] - #[doc = "Invalid signature"] - InvalidBitfieldSignature, - #[codec(index = 9)] - #[doc = "Candidate submitted but para not scheduled."] - UnscheduledCandidate, - #[codec(index = 10)] - #[doc = "Candidate scheduled despite pending candidate already existing for the para."] - CandidateScheduledBeforeParaFree, - #[codec(index = 11)] - #[doc = "Candidate included with the wrong collator."] - WrongCollator, - #[codec(index = 12)] - #[doc = "Scheduled cores out of order."] - ScheduledOutOfOrder, - #[codec(index = 13)] - #[doc = "Head data exceeds the configured maximum."] - HeadDataTooLarge, - #[codec(index = 14)] - #[doc = "Code upgrade prematurely."] - PrematureCodeUpgrade, - #[codec(index = 15)] - #[doc = "Output code is too large"] - NewCodeTooLarge, - #[codec(index = 16)] - #[doc = "Candidate not in parent context."] - CandidateNotInParentContext, - #[codec(index = 17)] - #[doc = "Invalid group index in core assignment."] - InvalidGroupIndex, - #[codec(index = 18)] - #[doc = "Insufficient (non-majority) backing."] - InsufficientBacking, - #[codec(index = 19)] - #[doc = "Invalid (bad signature, unknown validator, etc.) backing."] - InvalidBacking, - #[codec(index = 20)] - #[doc = "Collator did not sign PoV."] - NotCollatorSigned, - #[codec(index = 21)] - #[doc = "The validation data hash does not match expected."] - ValidationDataHashMismatch, - #[codec(index = 22)] - #[doc = "The downward message queue is not processed correctly."] - IncorrectDownwardMessageHandling, - #[codec(index = 23)] - #[doc = "At least one upward message sent does not pass the acceptance criteria."] - InvalidUpwardMessages, - #[codec(index = 24)] - #[doc = "The candidate didn't follow the rules of HRMP watermark advancement."] - HrmpWatermarkMishandling, - #[codec(index = 25)] - #[doc = "The HRMP messages sent by the candidate is not valid."] - InvalidOutboundHrmp, - #[codec(index = 26)] - #[doc = "The validation code hash of the candidate is not valid."] - InvalidValidationCodeHash, - #[codec(index = 27)] - #[doc = "The `para_head` hash in the candidate descriptor doesn't match the hash of the actual para head in the"] - #[doc = "commitments."] - ParaHeadMismatch, - #[codec(index = 28)] - #[doc = "A bitfield that references a freed core,"] - #[doc = "either intentionally or as part of a concluded"] - #[doc = "invalid dispute."] - BitfieldReferencesFreedCore, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Event { - #[codec(index = 0)] - #[doc = "A candidate was backed. `[candidate, head_data]`"] - CandidateBacked( - runtime_types::polkadot_primitives::v2::CandidateReceipt< - ::subxt::sp_core::H256, - >, - runtime_types::polkadot_parachain::primitives::HeadData, - runtime_types::polkadot_primitives::v2::CoreIndex, - runtime_types::polkadot_primitives::v2::GroupIndex, - ), - #[codec(index = 1)] - #[doc = "A candidate was included. `[candidate, head_data]`"] - CandidateIncluded( - runtime_types::polkadot_primitives::v2::CandidateReceipt< - ::subxt::sp_core::H256, - >, - runtime_types::polkadot_parachain::primitives::HeadData, - runtime_types::polkadot_primitives::v2::CoreIndex, - runtime_types::polkadot_primitives::v2::GroupIndex, - ), - #[codec(index = 2)] - #[doc = "A candidate timed out. `[candidate, head_data]`"] - CandidateTimedOut( - runtime_types::polkadot_primitives::v2::CandidateReceipt< - ::subxt::sp_core::H256, - >, - runtime_types::polkadot_parachain::primitives::HeadData, - runtime_types::polkadot_primitives::v2::CoreIndex, - ), - } - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct AvailabilityBitfieldRecord<_0> { - pub bitfield: runtime_types::polkadot_primitives::v2::AvailabilityBitfield, - pub submitted_at: _0, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct CandidatePendingAvailability<_0, _1> { - pub core: runtime_types::polkadot_primitives::v2::CoreIndex, - pub hash: runtime_types::polkadot_core_primitives::CandidateHash, - pub descriptor: runtime_types::polkadot_primitives::v2::CandidateDescriptor<_0>, - pub availability_votes: ::subxt::bitvec::vec::BitVec< - ::core::primitive::u8, - ::subxt::bitvec::order::Lsb0, - >, - pub backers: ::subxt::bitvec::vec::BitVec< - ::core::primitive::u8, - ::subxt::bitvec::order::Lsb0, - >, - pub relay_parent_number: _1, - pub backed_in_number: _1, - pub backing_group: runtime_types::polkadot_primitives::v2::GroupIndex, - } - } - pub mod initializer { - use super::runtime_types; - pub mod pallet { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Call { - #[codec(index = 0)] - #[doc = "Issue a signal to the consensus engine to forcibly act as though all parachain"] - #[doc = "blocks in all relay chain blocks up to and including the given number in the current"] - #[doc = "chain are valid and should be finalized."] - force_approve { up_to: ::core::primitive::u32 }, - } - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct BufferedSessionChange { - pub validators: ::std::vec::Vec< - runtime_types::polkadot_primitives::v2::validator_app::Public, - >, - pub queued: ::std::vec::Vec< - runtime_types::polkadot_primitives::v2::validator_app::Public, - >, - pub session_index: ::core::primitive::u32, - } - } - pub mod origin { - use super::runtime_types; - pub mod pallet { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Origin { - #[codec(index = 0)] - Parachain(runtime_types::polkadot_parachain::primitives::Id), - } - } - } - pub mod paras { - use super::runtime_types; - pub mod pallet { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Call { - #[codec(index = 0)] - #[doc = "Set the storage for the parachain validation code immediately."] - force_set_current_code { - para: runtime_types::polkadot_parachain::primitives::Id, - new_code: runtime_types::polkadot_parachain::primitives::ValidationCode, - }, - #[codec(index = 1)] - #[doc = "Set the storage for the current parachain head data immediately."] - force_set_current_head { - para: runtime_types::polkadot_parachain::primitives::Id, - new_head: runtime_types::polkadot_parachain::primitives::HeadData, - }, - #[codec(index = 2)] - #[doc = "Schedule an upgrade as if it was scheduled in the given relay parent block."] - force_schedule_code_upgrade { - para: runtime_types::polkadot_parachain::primitives::Id, - new_code: runtime_types::polkadot_parachain::primitives::ValidationCode, - relay_parent_number: ::core::primitive::u32, - }, - #[codec(index = 3)] - #[doc = "Note a new block head for para within the context of the current block."] - force_note_new_head { - para: runtime_types::polkadot_parachain::primitives::Id, - new_head: runtime_types::polkadot_parachain::primitives::HeadData, - }, - #[codec(index = 4)] - #[doc = "Put a parachain directly into the next session's action queue."] - #[doc = "We can't queue it any sooner than this without going into the"] - #[doc = "initializer..."] - force_queue_action { - para: runtime_types::polkadot_parachain::primitives::Id, - }, - #[codec(index = 5)] - #[doc = "Adds the validation code to the storage."] - #[doc = ""] - #[doc = "The code will not be added if it is already present. Additionally, if PVF pre-checking"] - #[doc = "is running for that code, it will be instantly accepted."] - #[doc = ""] - #[doc = "Otherwise, the code will be added into the storage. Note that the code will be added"] - #[doc = "into storage with reference count 0. This is to account the fact that there are no users"] - #[doc = "for this code yet. The caller will have to make sure that this code eventually gets"] - #[doc = "used by some parachain or removed from the storage to avoid storage leaks. For the latter"] - #[doc = "prefer to use the `poke_unused_validation_code` dispatchable to raw storage manipulation."] - #[doc = ""] - #[doc = "This function is mainly meant to be used for upgrading parachains that do not follow"] - #[doc = "the go-ahead signal while the PVF pre-checking feature is enabled."] - add_trusted_validation_code { - validation_code: - runtime_types::polkadot_parachain::primitives::ValidationCode, - }, - #[codec(index = 6)] - #[doc = "Remove the validation code from the storage iff the reference count is 0."] - #[doc = ""] - #[doc = "This is better than removing the storage directly, because it will not remove the code"] - #[doc = "that was suddenly got used by some parachain while this dispatchable was pending"] - #[doc = "dispatching."] - poke_unused_validation_code { - validation_code_hash: - runtime_types::polkadot_parachain::primitives::ValidationCodeHash, - }, - #[codec(index = 7)] - #[doc = "Includes a statement for a PVF pre-checking vote. Potentially, finalizes the vote and"] - #[doc = "enacts the results if that was the last vote before achieving the supermajority."] - include_pvf_check_statement { - stmt: runtime_types::polkadot_primitives::v2::PvfCheckStatement, - signature: - runtime_types::polkadot_primitives::v2::validator_app::Signature, - }, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Error { - #[codec(index = 0)] - #[doc = "Para is not registered in our system."] - NotRegistered, - #[codec(index = 1)] - #[doc = "Para cannot be onboarded because it is already tracked by our system."] - CannotOnboard, - #[codec(index = 2)] - #[doc = "Para cannot be offboarded at this time."] - CannotOffboard, - #[codec(index = 3)] - #[doc = "Para cannot be upgraded to a parachain."] - CannotUpgrade, - #[codec(index = 4)] - #[doc = "Para cannot be downgraded to a parathread."] - CannotDowngrade, - #[codec(index = 5)] - #[doc = "The statement for PVF pre-checking is stale."] - PvfCheckStatementStale, - #[codec(index = 6)] - #[doc = "The statement for PVF pre-checking is for a future session."] - PvfCheckStatementFuture, - #[codec(index = 7)] - #[doc = "Claimed validator index is out of bounds."] - PvfCheckValidatorIndexOutOfBounds, - #[codec(index = 8)] - #[doc = "The signature for the PVF pre-checking is invalid."] - PvfCheckInvalidSignature, - #[codec(index = 9)] - #[doc = "The given validator already has cast a vote."] - PvfCheckDoubleVote, - #[codec(index = 10)] - #[doc = "The given PVF does not exist at the moment of process a vote."] - PvfCheckSubjectInvalid, - #[codec(index = 11)] - #[doc = "The PVF pre-checking statement cannot be included since the PVF pre-checking mechanism"] - #[doc = "is disabled."] - PvfCheckDisabled, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Event { - #[codec(index = 0)] - #[doc = "Current code has been updated for a Para. `para_id`"] - CurrentCodeUpdated(runtime_types::polkadot_parachain::primitives::Id), - #[codec(index = 1)] - #[doc = "Current head has been updated for a Para. `para_id`"] - CurrentHeadUpdated(runtime_types::polkadot_parachain::primitives::Id), - #[codec(index = 2)] - #[doc = "A code upgrade has been scheduled for a Para. `para_id`"] - CodeUpgradeScheduled(runtime_types::polkadot_parachain::primitives::Id), - #[codec(index = 3)] - #[doc = "A new head has been noted for a Para. `para_id`"] - NewHeadNoted(runtime_types::polkadot_parachain::primitives::Id), - #[codec(index = 4)] - #[doc = "A para has been queued to execute pending actions. `para_id`"] - ActionQueued( - runtime_types::polkadot_parachain::primitives::Id, - ::core::primitive::u32, - ), - #[codec(index = 5)] - #[doc = "The given para either initiated or subscribed to a PVF check for the given validation"] - #[doc = "code. `code_hash` `para_id`"] - PvfCheckStarted( - runtime_types::polkadot_parachain::primitives::ValidationCodeHash, - runtime_types::polkadot_parachain::primitives::Id, - ), - #[codec(index = 6)] - #[doc = "The given validation code was accepted by the PVF pre-checking vote."] - #[doc = "`code_hash` `para_id`"] - PvfCheckAccepted( - runtime_types::polkadot_parachain::primitives::ValidationCodeHash, - runtime_types::polkadot_parachain::primitives::Id, - ), - #[codec(index = 7)] - #[doc = "The given validation code was rejected by the PVF pre-checking vote."] - #[doc = "`code_hash` `para_id`"] - PvfCheckRejected( - runtime_types::polkadot_parachain::primitives::ValidationCodeHash, - runtime_types::polkadot_parachain::primitives::Id, - ), - } - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct ParaGenesisArgs { - pub genesis_head: runtime_types::polkadot_parachain::primitives::HeadData, - pub validation_code: - runtime_types::polkadot_parachain::primitives::ValidationCode, - pub parachain: ::core::primitive::bool, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum ParaLifecycle { - #[codec(index = 0)] - Onboarding, - #[codec(index = 1)] - Parathread, - #[codec(index = 2)] - Parachain, - #[codec(index = 3)] - UpgradingParathread, - #[codec(index = 4)] - DowngradingParachain, - #[codec(index = 5)] - OffboardingParathread, - #[codec(index = 6)] - OffboardingParachain, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct ParaPastCodeMeta<_0> { - pub upgrade_times: ::std::vec::Vec< - runtime_types::polkadot_runtime_parachains::paras::ReplacementTimes<_0>, - >, - pub last_pruned: ::core::option::Option<_0>, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct PvfCheckActiveVoteState<_0> { - pub votes_accept: ::subxt::bitvec::vec::BitVec< - ::core::primitive::u8, - ::subxt::bitvec::order::Lsb0, - >, - pub votes_reject: ::subxt::bitvec::vec::BitVec< - ::core::primitive::u8, - ::subxt::bitvec::order::Lsb0, - >, - pub age: _0, - pub created_at: _0, - pub causes: ::std::vec::Vec< - runtime_types::polkadot_runtime_parachains::paras::PvfCheckCause<_0>, - >, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum PvfCheckCause<_0> { - #[codec(index = 0)] - Onboarding(runtime_types::polkadot_parachain::primitives::Id), - #[codec(index = 1)] - Upgrade { - id: runtime_types::polkadot_parachain::primitives::Id, - relay_parent_number: _0, - }, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct ReplacementTimes<_0> { - pub expected_at: _0, - pub activated_at: _0, - } - } - pub mod paras_inherent { - use super::runtime_types; - pub mod pallet { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Call { - #[codec(index = 0)] - #[doc = "Enter the paras inherent. This will process bitfields and backed candidates."] - enter { - data: runtime_types::polkadot_primitives::v2::InherentData< - runtime_types::sp_runtime::generic::header::Header< - ::core::primitive::u32, - runtime_types::sp_runtime::traits::BlakeTwo256, - >, - >, - }, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Error { - #[codec(index = 0)] - #[doc = "Inclusion inherent called more than once per block."] - TooManyInclusionInherents, - #[codec(index = 1)] - #[doc = "The hash of the submitted parent header doesn't correspond to the saved block hash of"] - #[doc = "the parent."] - InvalidParentHeader, - #[codec(index = 2)] - #[doc = "Disputed candidate that was concluded invalid."] - CandidateConcludedInvalid, - #[codec(index = 3)] - #[doc = "The data given to the inherent will result in an overweight block."] - InherentOverweight, - #[codec(index = 4)] - #[doc = "The ordering of dispute statements was invalid."] - DisputeStatementsUnsortedOrDuplicates, - #[codec(index = 5)] - #[doc = "A dispute statement was invalid."] - DisputeInvalid, - } - } - } - pub mod scheduler { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum AssignmentKind { - #[codec(index = 0)] - Parachain, - #[codec(index = 1)] - Parathread( - runtime_types::polkadot_primitives::v2::collator_app::Public, - ::core::primitive::u32, - ), - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct CoreAssignment { - pub core: runtime_types::polkadot_primitives::v2::CoreIndex, - pub para_id: runtime_types::polkadot_parachain::primitives::Id, - pub kind: runtime_types::polkadot_runtime_parachains::scheduler::AssignmentKind, - pub group_idx: runtime_types::polkadot_primitives::v2::GroupIndex, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct ParathreadClaimQueue { - pub queue: ::std::vec::Vec< - runtime_types::polkadot_runtime_parachains::scheduler::QueuedParathread, - >, - pub next_core_offset: ::core::primitive::u32, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct QueuedParathread { - pub claim: runtime_types::polkadot_primitives::v2::ParathreadEntry, - pub core_offset: ::core::primitive::u32, - } - } - pub mod shared { - use super::runtime_types; - pub mod pallet { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Call {} - } - } - pub mod ump { - use super::runtime_types; - pub mod pallet { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Call { - #[codec(index = 0)] - #[doc = "Service a single overweight upward message."] - #[doc = ""] - #[doc = "- `origin`: Must pass `ExecuteOverweightOrigin`."] - #[doc = "- `index`: The index of the overweight message to service."] - #[doc = "- `weight_limit`: The amount of weight that message execution may take."] - #[doc = ""] - #[doc = "Errors:"] - #[doc = "- `UnknownMessageIndex`: Message of `index` is unknown."] - #[doc = "- `WeightOverLimit`: Message execution may use greater than `weight_limit`."] - #[doc = ""] - #[doc = "Events:"] - #[doc = "- `OverweightServiced`: On success."] - service_overweight { - index: ::core::primitive::u64, - weight_limit: ::core::primitive::u64, - }, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Error { - #[codec(index = 0)] - #[doc = "The message index given is unknown."] - UnknownMessageIndex, - #[codec(index = 1)] - #[doc = "The amount of weight given is possibly not enough for executing the message."] - WeightOverLimit, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Event { - #[codec(index = 0)] - #[doc = "Upward message is invalid XCM."] - #[doc = "\\[ id \\]"] - InvalidFormat([::core::primitive::u8; 32usize]), - #[codec(index = 1)] - #[doc = "Upward message is unsupported version of XCM."] - #[doc = "\\[ id \\]"] - UnsupportedVersion([::core::primitive::u8; 32usize]), - #[codec(index = 2)] - #[doc = "Upward message executed with the given outcome."] - #[doc = "\\[ id, outcome \\]"] - ExecutedUpward( - [::core::primitive::u8; 32usize], - runtime_types::xcm::v2::traits::Outcome, - ), - #[codec(index = 3)] - #[doc = "The weight limit for handling upward messages was reached."] - #[doc = "\\[ id, remaining, required \\]"] - WeightExhausted( - [::core::primitive::u8; 32usize], - ::core::primitive::u64, - ::core::primitive::u64, - ), - #[codec(index = 4)] - #[doc = "Some upward messages have been received and will be processed."] - #[doc = "\\[ para, count, size \\]"] - UpwardMessagesReceived( - runtime_types::polkadot_parachain::primitives::Id, - ::core::primitive::u32, - ::core::primitive::u32, - ), - #[codec(index = 5)] - #[doc = "The weight budget was exceeded for an individual upward message."] - #[doc = ""] - #[doc = "This message can be later dispatched manually using `service_overweight` dispatchable"] - #[doc = "using the assigned `overweight_index`."] - #[doc = ""] - #[doc = "\\[ para, id, overweight_index, required \\]"] - OverweightEnqueued( - runtime_types::polkadot_parachain::primitives::Id, - [::core::primitive::u8; 32usize], - ::core::primitive::u64, - ::core::primitive::u64, - ), - #[codec(index = 6)] - #[doc = "Upward message from the overweight queue was executed with the given actual weight"] - #[doc = "used."] - #[doc = ""] - #[doc = "\\[ overweight_index, used \\]"] - OverweightServiced(::core::primitive::u64, ::core::primitive::u64), - } - } - } - } - pub mod primitive_types { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct H256(pub [::core::primitive::u8; 32usize]); - } - pub mod rococo_runtime { - use super::runtime_types; - pub mod validator_manager { - use super::runtime_types; - pub mod pallet { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Call { - #[codec(index = 0)] - #[doc = "Add new validators to the set."] - #[doc = ""] - #[doc = "The new validators will be active from current session + 2."] - register_validators { - validators: ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>, - }, - #[codec(index = 1)] - #[doc = "Remove validators from the set."] - #[doc = ""] - #[doc = "The removed validators will be deactivated from current session + 2."] - deregister_validators { - validators: ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>, - }, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Event { - #[codec(index = 0)] - #[doc = "New validators were added to the set."] - ValidatorsRegistered( - ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>, - ), - #[codec(index = 1)] - #[doc = "Validators were removed from the set."] - ValidatorsDeregistered( - ::std::vec::Vec<::subxt::sp_core::crypto::AccountId32>, - ), - } - } - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Call { - #[codec(index = 0)] - System(runtime_types::frame_system::pallet::Call), - #[codec(index = 1)] - Babe(runtime_types::pallet_babe::pallet::Call), - #[codec(index = 2)] - Timestamp(runtime_types::pallet_timestamp::pallet::Call), - #[codec(index = 3)] - Indices(runtime_types::pallet_indices::pallet::Call), - #[codec(index = 4)] - Balances(runtime_types::pallet_balances::pallet::Call), - #[codec(index = 6)] - Authorship(runtime_types::pallet_authorship::pallet::Call), - #[codec(index = 9)] - Session(runtime_types::pallet_session::pallet::Call), - #[codec(index = 10)] - Grandpa(runtime_types::pallet_grandpa::pallet::Call), - #[codec(index = 11)] - ImOnline(runtime_types::pallet_im_online::pallet::Call), - #[codec(index = 14)] - Configuration( - runtime_types::polkadot_runtime_parachains::configuration::pallet::Call, - ), - #[codec(index = 15)] - ParasShared(runtime_types::polkadot_runtime_parachains::shared::pallet::Call), - #[codec(index = 16)] - ParaInclusion(runtime_types::polkadot_runtime_parachains::inclusion::pallet::Call), - #[codec(index = 17)] - ParaInherent( - runtime_types::polkadot_runtime_parachains::paras_inherent::pallet::Call, - ), - #[codec(index = 19)] - Paras(runtime_types::polkadot_runtime_parachains::paras::pallet::Call), - #[codec(index = 20)] - Initializer(runtime_types::polkadot_runtime_parachains::initializer::pallet::Call), - #[codec(index = 21)] - Dmp(runtime_types::polkadot_runtime_parachains::dmp::pallet::Call), - #[codec(index = 22)] - Ump(runtime_types::polkadot_runtime_parachains::ump::pallet::Call), - #[codec(index = 23)] - Hrmp(runtime_types::polkadot_runtime_parachains::hrmp::pallet::Call), - #[codec(index = 25)] - ParasDisputes(runtime_types::polkadot_runtime_parachains::disputes::pallet::Call), - #[codec(index = 26)] - Registrar(runtime_types::polkadot_runtime_common::paras_registrar::pallet::Call), - #[codec(index = 27)] - Auctions(runtime_types::polkadot_runtime_common::auctions::pallet::Call), - #[codec(index = 28)] - Crowdloan(runtime_types::polkadot_runtime_common::crowdloan::pallet::Call), - #[codec(index = 29)] - Slots(runtime_types::polkadot_runtime_common::slots::pallet::Call), - #[codec(index = 30)] - ParasSudoWrapper( - runtime_types::polkadot_runtime_common::paras_sudo_wrapper::pallet::Call, - ), - #[codec(index = 31)] - AssignedSlots(runtime_types::polkadot_runtime_common::assigned_slots::pallet::Call), - #[codec(index = 32)] - Sudo(runtime_types::pallet_sudo::pallet::Call), - #[codec(index = 34)] - Beefy(runtime_types::pallet_beefy::pallet::Call), - #[codec(index = 36)] - ValidatorManager(runtime_types::rococo_runtime::validator_manager::pallet::Call), - #[codec(index = 40)] - BridgeRococoGrandpa(runtime_types::pallet_bridge_grandpa::pallet::Call), - #[codec(index = 41)] - BridgeWococoGrandpa(runtime_types::pallet_bridge_grandpa::pallet::Call), - #[codec(index = 43)] - BridgeRococoMessages(runtime_types::pallet_bridge_messages::pallet::Call), - #[codec(index = 44)] - BridgeWococoMessages(runtime_types::pallet_bridge_messages::pallet::Call), - #[codec(index = 80)] - Collective(runtime_types::pallet_collective::pallet::Call), - #[codec(index = 81)] - Membership(runtime_types::pallet_membership::pallet::Call), - #[codec(index = 90)] - Utility(runtime_types::pallet_utility::pallet::Call), - #[codec(index = 91)] - Proxy(runtime_types::pallet_proxy::pallet::Call), - #[codec(index = 92)] - Multisig(runtime_types::pallet_multisig::pallet::Call), - #[codec(index = 99)] - XcmPallet(runtime_types::pallet_xcm::pallet::Call), - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Event { - #[codec(index = 0)] - System(runtime_types::frame_system::pallet::Event), - #[codec(index = 3)] - Indices(runtime_types::pallet_indices::pallet::Event), - #[codec(index = 4)] - Balances(runtime_types::pallet_balances::pallet::Event), - #[codec(index = 7)] - Offences(runtime_types::pallet_offences::pallet::Event), - #[codec(index = 9)] - Session(runtime_types::pallet_session::pallet::Event), - #[codec(index = 10)] - Grandpa(runtime_types::pallet_grandpa::pallet::Event), - #[codec(index = 11)] - ImOnline(runtime_types::pallet_im_online::pallet::Event), - #[codec(index = 16)] - ParaInclusion(runtime_types::polkadot_runtime_parachains::inclusion::pallet::Event), - #[codec(index = 19)] - Paras(runtime_types::polkadot_runtime_parachains::paras::pallet::Event), - #[codec(index = 22)] - Ump(runtime_types::polkadot_runtime_parachains::ump::pallet::Event), - #[codec(index = 23)] - Hrmp(runtime_types::polkadot_runtime_parachains::hrmp::pallet::Event), - #[codec(index = 25)] - ParasDisputes(runtime_types::polkadot_runtime_parachains::disputes::pallet::Event), - #[codec(index = 26)] - Registrar(runtime_types::polkadot_runtime_common::paras_registrar::pallet::Event), - #[codec(index = 27)] - Auctions(runtime_types::polkadot_runtime_common::auctions::pallet::Event), - #[codec(index = 28)] - Crowdloan(runtime_types::polkadot_runtime_common::crowdloan::pallet::Event), - #[codec(index = 29)] - Slots(runtime_types::polkadot_runtime_common::slots::pallet::Event), - #[codec(index = 31)] - AssignedSlots( - runtime_types::polkadot_runtime_common::assigned_slots::pallet::Event, - ), - #[codec(index = 32)] - Sudo(runtime_types::pallet_sudo::pallet::Event), - #[codec(index = 36)] - ValidatorManager(runtime_types::rococo_runtime::validator_manager::pallet::Event), - #[codec(index = 43)] - BridgeRococoMessages(runtime_types::pallet_bridge_messages::pallet::Event), - #[codec(index = 44)] - BridgeWococoMessages(runtime_types::pallet_bridge_messages::pallet::Event), - #[codec(index = 45)] - BridgeRococoMessagesDispatch(runtime_types::pallet_bridge_dispatch::pallet::Event), - #[codec(index = 46)] - BridgeWococoMessagesDispatch(runtime_types::pallet_bridge_dispatch::pallet::Event), - #[codec(index = 80)] - Collective(runtime_types::pallet_collective::pallet::Event), - #[codec(index = 81)] - Membership(runtime_types::pallet_membership::pallet::Event), - #[codec(index = 90)] - Utility(runtime_types::pallet_utility::pallet::Event), - #[codec(index = 91)] - Proxy(runtime_types::pallet_proxy::pallet::Event), - #[codec(index = 92)] - Multisig(runtime_types::pallet_multisig::pallet::Event), - #[codec(index = 99)] - XcmPallet(runtime_types::pallet_xcm::pallet::Event), - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum OriginCaller { - #[codec(index = 0)] - system( - runtime_types::frame_support::dispatch::RawOrigin< - ::subxt::sp_core::crypto::AccountId32, - >, - ), - #[codec(index = 13)] - ParachainsOrigin( - runtime_types::polkadot_runtime_parachains::origin::pallet::Origin, - ), - #[codec(index = 80)] - Collective( - runtime_types::pallet_collective::RawOrigin< - ::subxt::sp_core::crypto::AccountId32, - >, - ), - #[codec(index = 99)] - XcmPallet(runtime_types::pallet_xcm::pallet::Origin), - #[codec(index = 4)] - Void(runtime_types::sp_core::Void), - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum ProxyType { - #[codec(index = 0)] - Any, - #[codec(index = 1)] - CancelProxy, - #[codec(index = 2)] - Auction, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct Runtime; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct SessionKeys { - pub grandpa: runtime_types::sp_finality_grandpa::app::Public, - pub babe: runtime_types::sp_consensus_babe::app::Public, - pub im_online: runtime_types::pallet_im_online::sr25519::app_sr25519::Public, - pub para_validator: runtime_types::polkadot_primitives::v2::validator_app::Public, - pub para_assignment: runtime_types::polkadot_primitives::v2::assignment_app::Public, - pub authority_discovery: runtime_types::sp_authority_discovery::app::Public, - pub beefy: runtime_types::beefy_primitives::crypto::Public, - } - } - pub mod sp_arithmetic { - use super::runtime_types; - pub mod fixed_point { - use super::runtime_types; - #[derive( - :: subxt :: codec :: CompactAs, - :: subxt :: codec :: Decode, - :: subxt :: codec :: Encode, - Debug, - )] - pub struct FixedU128(pub ::core::primitive::u128); - } - pub mod per_things { - use super::runtime_types; - #[derive( - :: subxt :: codec :: CompactAs, - :: subxt :: codec :: Decode, - :: subxt :: codec :: Encode, - Debug, - )] - pub struct Perbill(pub ::core::primitive::u32); - } - } - pub mod sp_authority_discovery { - use super::runtime_types; - pub mod app { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct Public(pub runtime_types::sp_core::sr25519::Public); - } - } - pub mod sp_consensus_babe { - use super::runtime_types; - pub mod app { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct Public(pub runtime_types::sp_core::sr25519::Public); - } - pub mod digests { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum NextConfigDescriptor { - #[codec(index = 1)] - V1 { - c: (::core::primitive::u64, ::core::primitive::u64), - allowed_slots: runtime_types::sp_consensus_babe::AllowedSlots, - }, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum PreDigest { - #[codec(index = 1)] - Primary(runtime_types::sp_consensus_babe::digests::PrimaryPreDigest), - #[codec(index = 2)] - SecondaryPlain( - runtime_types::sp_consensus_babe::digests::SecondaryPlainPreDigest, - ), - #[codec(index = 3)] - SecondaryVRF(runtime_types::sp_consensus_babe::digests::SecondaryVRFPreDigest), - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct PrimaryPreDigest { - pub authority_index: ::core::primitive::u32, - pub slot: runtime_types::sp_consensus_slots::Slot, - pub vrf_output: [::core::primitive::u8; 32usize], - pub vrf_proof: [::core::primitive::u8; 64usize], - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct SecondaryPlainPreDigest { - pub authority_index: ::core::primitive::u32, - pub slot: runtime_types::sp_consensus_slots::Slot, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct SecondaryVRFPreDigest { - pub authority_index: ::core::primitive::u32, - pub slot: runtime_types::sp_consensus_slots::Slot, - pub vrf_output: [::core::primitive::u8; 32usize], - pub vrf_proof: [::core::primitive::u8; 64usize], - } - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum AllowedSlots { - #[codec(index = 0)] - PrimarySlots, - #[codec(index = 1)] - PrimaryAndSecondaryPlainSlots, - #[codec(index = 2)] - PrimaryAndSecondaryVRFSlots, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct BabeEpochConfiguration { - pub c: (::core::primitive::u64, ::core::primitive::u64), - pub allowed_slots: runtime_types::sp_consensus_babe::AllowedSlots, - } - } - pub mod sp_consensus_slots { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct EquivocationProof<_0, _1> { - pub offender: _1, - pub slot: runtime_types::sp_consensus_slots::Slot, - pub first_header: _0, - pub second_header: _0, - } - #[derive( - :: subxt :: codec :: CompactAs, - :: subxt :: codec :: Decode, - :: subxt :: codec :: Encode, - Debug, - )] - pub struct Slot(pub ::core::primitive::u64); - } - pub mod sp_core { - use super::runtime_types; - pub mod crypto { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct AccountId32(pub [::core::primitive::u8; 32usize]); - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct KeyTypeId(pub [::core::primitive::u8; 4usize]); - } - pub mod ecdsa { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct Public(pub [::core::primitive::u8; 33usize]); - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct Signature(pub [::core::primitive::u8; 65usize]); - } - pub mod ed25519 { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct Public(pub [::core::primitive::u8; 32usize]); - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct Signature(pub [::core::primitive::u8; 64usize]); - } - pub mod offchain { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct OpaqueMultiaddr(pub ::std::vec::Vec<::core::primitive::u8>); - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct OpaqueNetworkState { - pub peer_id: runtime_types::sp_core::OpaquePeerId, - pub external_addresses: - ::std::vec::Vec, - } - } - pub mod sr25519 { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct Public(pub [::core::primitive::u8; 32usize]); - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct Signature(pub [::core::primitive::u8; 64usize]); - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct OpaquePeerId(pub ::std::vec::Vec<::core::primitive::u8>); - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Void {} - } - pub mod sp_finality_grandpa { - use super::runtime_types; - pub mod app { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct Public(pub runtime_types::sp_core::ed25519::Public); - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct Signature(pub runtime_types::sp_core::ed25519::Signature); - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Equivocation<_0, _1> { - #[codec(index = 0)] - Prevote( - runtime_types::finality_grandpa::Equivocation< - runtime_types::sp_finality_grandpa::app::Public, - runtime_types::finality_grandpa::Prevote<_0, _1>, - runtime_types::sp_finality_grandpa::app::Signature, - >, - ), - #[codec(index = 1)] - Precommit( - runtime_types::finality_grandpa::Equivocation< - runtime_types::sp_finality_grandpa::app::Public, - runtime_types::finality_grandpa::Precommit<_0, _1>, - runtime_types::sp_finality_grandpa::app::Signature, - >, - ), - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct EquivocationProof<_0, _1> { - pub set_id: ::core::primitive::u64, - pub equivocation: runtime_types::sp_finality_grandpa::Equivocation<_0, _1>, - } - } - pub mod sp_runtime { - use super::runtime_types; - pub mod generic { - use super::runtime_types; - pub mod digest { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct Digest { - pub logs: - ::std::vec::Vec, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum DigestItem { - #[codec(index = 6)] - PreRuntime( - [::core::primitive::u8; 4usize], - ::std::vec::Vec<::core::primitive::u8>, - ), - #[codec(index = 4)] - Consensus( - [::core::primitive::u8; 4usize], - ::std::vec::Vec<::core::primitive::u8>, - ), - #[codec(index = 5)] - Seal( - [::core::primitive::u8; 4usize], - ::std::vec::Vec<::core::primitive::u8>, - ), - #[codec(index = 0)] - Other(::std::vec::Vec<::core::primitive::u8>), - #[codec(index = 8)] - RuntimeEnvironmentUpdated, - } - } - pub mod era { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Era { - #[codec(index = 0)] - Immortal, - #[codec(index = 1)] - Mortal1(::core::primitive::u8), - #[codec(index = 2)] - Mortal2(::core::primitive::u8), - #[codec(index = 3)] - Mortal3(::core::primitive::u8), - #[codec(index = 4)] - Mortal4(::core::primitive::u8), - #[codec(index = 5)] - Mortal5(::core::primitive::u8), - #[codec(index = 6)] - Mortal6(::core::primitive::u8), - #[codec(index = 7)] - Mortal7(::core::primitive::u8), - #[codec(index = 8)] - Mortal8(::core::primitive::u8), - #[codec(index = 9)] - Mortal9(::core::primitive::u8), - #[codec(index = 10)] - Mortal10(::core::primitive::u8), - #[codec(index = 11)] - Mortal11(::core::primitive::u8), - #[codec(index = 12)] - Mortal12(::core::primitive::u8), - #[codec(index = 13)] - Mortal13(::core::primitive::u8), - #[codec(index = 14)] - Mortal14(::core::primitive::u8), - #[codec(index = 15)] - Mortal15(::core::primitive::u8), - #[codec(index = 16)] - Mortal16(::core::primitive::u8), - #[codec(index = 17)] - Mortal17(::core::primitive::u8), - #[codec(index = 18)] - Mortal18(::core::primitive::u8), - #[codec(index = 19)] - Mortal19(::core::primitive::u8), - #[codec(index = 20)] - Mortal20(::core::primitive::u8), - #[codec(index = 21)] - Mortal21(::core::primitive::u8), - #[codec(index = 22)] - Mortal22(::core::primitive::u8), - #[codec(index = 23)] - Mortal23(::core::primitive::u8), - #[codec(index = 24)] - Mortal24(::core::primitive::u8), - #[codec(index = 25)] - Mortal25(::core::primitive::u8), - #[codec(index = 26)] - Mortal26(::core::primitive::u8), - #[codec(index = 27)] - Mortal27(::core::primitive::u8), - #[codec(index = 28)] - Mortal28(::core::primitive::u8), - #[codec(index = 29)] - Mortal29(::core::primitive::u8), - #[codec(index = 30)] - Mortal30(::core::primitive::u8), - #[codec(index = 31)] - Mortal31(::core::primitive::u8), - #[codec(index = 32)] - Mortal32(::core::primitive::u8), - #[codec(index = 33)] - Mortal33(::core::primitive::u8), - #[codec(index = 34)] - Mortal34(::core::primitive::u8), - #[codec(index = 35)] - Mortal35(::core::primitive::u8), - #[codec(index = 36)] - Mortal36(::core::primitive::u8), - #[codec(index = 37)] - Mortal37(::core::primitive::u8), - #[codec(index = 38)] - Mortal38(::core::primitive::u8), - #[codec(index = 39)] - Mortal39(::core::primitive::u8), - #[codec(index = 40)] - Mortal40(::core::primitive::u8), - #[codec(index = 41)] - Mortal41(::core::primitive::u8), - #[codec(index = 42)] - Mortal42(::core::primitive::u8), - #[codec(index = 43)] - Mortal43(::core::primitive::u8), - #[codec(index = 44)] - Mortal44(::core::primitive::u8), - #[codec(index = 45)] - Mortal45(::core::primitive::u8), - #[codec(index = 46)] - Mortal46(::core::primitive::u8), - #[codec(index = 47)] - Mortal47(::core::primitive::u8), - #[codec(index = 48)] - Mortal48(::core::primitive::u8), - #[codec(index = 49)] - Mortal49(::core::primitive::u8), - #[codec(index = 50)] - Mortal50(::core::primitive::u8), - #[codec(index = 51)] - Mortal51(::core::primitive::u8), - #[codec(index = 52)] - Mortal52(::core::primitive::u8), - #[codec(index = 53)] - Mortal53(::core::primitive::u8), - #[codec(index = 54)] - Mortal54(::core::primitive::u8), - #[codec(index = 55)] - Mortal55(::core::primitive::u8), - #[codec(index = 56)] - Mortal56(::core::primitive::u8), - #[codec(index = 57)] - Mortal57(::core::primitive::u8), - #[codec(index = 58)] - Mortal58(::core::primitive::u8), - #[codec(index = 59)] - Mortal59(::core::primitive::u8), - #[codec(index = 60)] - Mortal60(::core::primitive::u8), - #[codec(index = 61)] - Mortal61(::core::primitive::u8), - #[codec(index = 62)] - Mortal62(::core::primitive::u8), - #[codec(index = 63)] - Mortal63(::core::primitive::u8), - #[codec(index = 64)] - Mortal64(::core::primitive::u8), - #[codec(index = 65)] - Mortal65(::core::primitive::u8), - #[codec(index = 66)] - Mortal66(::core::primitive::u8), - #[codec(index = 67)] - Mortal67(::core::primitive::u8), - #[codec(index = 68)] - Mortal68(::core::primitive::u8), - #[codec(index = 69)] - Mortal69(::core::primitive::u8), - #[codec(index = 70)] - Mortal70(::core::primitive::u8), - #[codec(index = 71)] - Mortal71(::core::primitive::u8), - #[codec(index = 72)] - Mortal72(::core::primitive::u8), - #[codec(index = 73)] - Mortal73(::core::primitive::u8), - #[codec(index = 74)] - Mortal74(::core::primitive::u8), - #[codec(index = 75)] - Mortal75(::core::primitive::u8), - #[codec(index = 76)] - Mortal76(::core::primitive::u8), - #[codec(index = 77)] - Mortal77(::core::primitive::u8), - #[codec(index = 78)] - Mortal78(::core::primitive::u8), - #[codec(index = 79)] - Mortal79(::core::primitive::u8), - #[codec(index = 80)] - Mortal80(::core::primitive::u8), - #[codec(index = 81)] - Mortal81(::core::primitive::u8), - #[codec(index = 82)] - Mortal82(::core::primitive::u8), - #[codec(index = 83)] - Mortal83(::core::primitive::u8), - #[codec(index = 84)] - Mortal84(::core::primitive::u8), - #[codec(index = 85)] - Mortal85(::core::primitive::u8), - #[codec(index = 86)] - Mortal86(::core::primitive::u8), - #[codec(index = 87)] - Mortal87(::core::primitive::u8), - #[codec(index = 88)] - Mortal88(::core::primitive::u8), - #[codec(index = 89)] - Mortal89(::core::primitive::u8), - #[codec(index = 90)] - Mortal90(::core::primitive::u8), - #[codec(index = 91)] - Mortal91(::core::primitive::u8), - #[codec(index = 92)] - Mortal92(::core::primitive::u8), - #[codec(index = 93)] - Mortal93(::core::primitive::u8), - #[codec(index = 94)] - Mortal94(::core::primitive::u8), - #[codec(index = 95)] - Mortal95(::core::primitive::u8), - #[codec(index = 96)] - Mortal96(::core::primitive::u8), - #[codec(index = 97)] - Mortal97(::core::primitive::u8), - #[codec(index = 98)] - Mortal98(::core::primitive::u8), - #[codec(index = 99)] - Mortal99(::core::primitive::u8), - #[codec(index = 100)] - Mortal100(::core::primitive::u8), - #[codec(index = 101)] - Mortal101(::core::primitive::u8), - #[codec(index = 102)] - Mortal102(::core::primitive::u8), - #[codec(index = 103)] - Mortal103(::core::primitive::u8), - #[codec(index = 104)] - Mortal104(::core::primitive::u8), - #[codec(index = 105)] - Mortal105(::core::primitive::u8), - #[codec(index = 106)] - Mortal106(::core::primitive::u8), - #[codec(index = 107)] - Mortal107(::core::primitive::u8), - #[codec(index = 108)] - Mortal108(::core::primitive::u8), - #[codec(index = 109)] - Mortal109(::core::primitive::u8), - #[codec(index = 110)] - Mortal110(::core::primitive::u8), - #[codec(index = 111)] - Mortal111(::core::primitive::u8), - #[codec(index = 112)] - Mortal112(::core::primitive::u8), - #[codec(index = 113)] - Mortal113(::core::primitive::u8), - #[codec(index = 114)] - Mortal114(::core::primitive::u8), - #[codec(index = 115)] - Mortal115(::core::primitive::u8), - #[codec(index = 116)] - Mortal116(::core::primitive::u8), - #[codec(index = 117)] - Mortal117(::core::primitive::u8), - #[codec(index = 118)] - Mortal118(::core::primitive::u8), - #[codec(index = 119)] - Mortal119(::core::primitive::u8), - #[codec(index = 120)] - Mortal120(::core::primitive::u8), - #[codec(index = 121)] - Mortal121(::core::primitive::u8), - #[codec(index = 122)] - Mortal122(::core::primitive::u8), - #[codec(index = 123)] - Mortal123(::core::primitive::u8), - #[codec(index = 124)] - Mortal124(::core::primitive::u8), - #[codec(index = 125)] - Mortal125(::core::primitive::u8), - #[codec(index = 126)] - Mortal126(::core::primitive::u8), - #[codec(index = 127)] - Mortal127(::core::primitive::u8), - #[codec(index = 128)] - Mortal128(::core::primitive::u8), - #[codec(index = 129)] - Mortal129(::core::primitive::u8), - #[codec(index = 130)] - Mortal130(::core::primitive::u8), - #[codec(index = 131)] - Mortal131(::core::primitive::u8), - #[codec(index = 132)] - Mortal132(::core::primitive::u8), - #[codec(index = 133)] - Mortal133(::core::primitive::u8), - #[codec(index = 134)] - Mortal134(::core::primitive::u8), - #[codec(index = 135)] - Mortal135(::core::primitive::u8), - #[codec(index = 136)] - Mortal136(::core::primitive::u8), - #[codec(index = 137)] - Mortal137(::core::primitive::u8), - #[codec(index = 138)] - Mortal138(::core::primitive::u8), - #[codec(index = 139)] - Mortal139(::core::primitive::u8), - #[codec(index = 140)] - Mortal140(::core::primitive::u8), - #[codec(index = 141)] - Mortal141(::core::primitive::u8), - #[codec(index = 142)] - Mortal142(::core::primitive::u8), - #[codec(index = 143)] - Mortal143(::core::primitive::u8), - #[codec(index = 144)] - Mortal144(::core::primitive::u8), - #[codec(index = 145)] - Mortal145(::core::primitive::u8), - #[codec(index = 146)] - Mortal146(::core::primitive::u8), - #[codec(index = 147)] - Mortal147(::core::primitive::u8), - #[codec(index = 148)] - Mortal148(::core::primitive::u8), - #[codec(index = 149)] - Mortal149(::core::primitive::u8), - #[codec(index = 150)] - Mortal150(::core::primitive::u8), - #[codec(index = 151)] - Mortal151(::core::primitive::u8), - #[codec(index = 152)] - Mortal152(::core::primitive::u8), - #[codec(index = 153)] - Mortal153(::core::primitive::u8), - #[codec(index = 154)] - Mortal154(::core::primitive::u8), - #[codec(index = 155)] - Mortal155(::core::primitive::u8), - #[codec(index = 156)] - Mortal156(::core::primitive::u8), - #[codec(index = 157)] - Mortal157(::core::primitive::u8), - #[codec(index = 158)] - Mortal158(::core::primitive::u8), - #[codec(index = 159)] - Mortal159(::core::primitive::u8), - #[codec(index = 160)] - Mortal160(::core::primitive::u8), - #[codec(index = 161)] - Mortal161(::core::primitive::u8), - #[codec(index = 162)] - Mortal162(::core::primitive::u8), - #[codec(index = 163)] - Mortal163(::core::primitive::u8), - #[codec(index = 164)] - Mortal164(::core::primitive::u8), - #[codec(index = 165)] - Mortal165(::core::primitive::u8), - #[codec(index = 166)] - Mortal166(::core::primitive::u8), - #[codec(index = 167)] - Mortal167(::core::primitive::u8), - #[codec(index = 168)] - Mortal168(::core::primitive::u8), - #[codec(index = 169)] - Mortal169(::core::primitive::u8), - #[codec(index = 170)] - Mortal170(::core::primitive::u8), - #[codec(index = 171)] - Mortal171(::core::primitive::u8), - #[codec(index = 172)] - Mortal172(::core::primitive::u8), - #[codec(index = 173)] - Mortal173(::core::primitive::u8), - #[codec(index = 174)] - Mortal174(::core::primitive::u8), - #[codec(index = 175)] - Mortal175(::core::primitive::u8), - #[codec(index = 176)] - Mortal176(::core::primitive::u8), - #[codec(index = 177)] - Mortal177(::core::primitive::u8), - #[codec(index = 178)] - Mortal178(::core::primitive::u8), - #[codec(index = 179)] - Mortal179(::core::primitive::u8), - #[codec(index = 180)] - Mortal180(::core::primitive::u8), - #[codec(index = 181)] - Mortal181(::core::primitive::u8), - #[codec(index = 182)] - Mortal182(::core::primitive::u8), - #[codec(index = 183)] - Mortal183(::core::primitive::u8), - #[codec(index = 184)] - Mortal184(::core::primitive::u8), - #[codec(index = 185)] - Mortal185(::core::primitive::u8), - #[codec(index = 186)] - Mortal186(::core::primitive::u8), - #[codec(index = 187)] - Mortal187(::core::primitive::u8), - #[codec(index = 188)] - Mortal188(::core::primitive::u8), - #[codec(index = 189)] - Mortal189(::core::primitive::u8), - #[codec(index = 190)] - Mortal190(::core::primitive::u8), - #[codec(index = 191)] - Mortal191(::core::primitive::u8), - #[codec(index = 192)] - Mortal192(::core::primitive::u8), - #[codec(index = 193)] - Mortal193(::core::primitive::u8), - #[codec(index = 194)] - Mortal194(::core::primitive::u8), - #[codec(index = 195)] - Mortal195(::core::primitive::u8), - #[codec(index = 196)] - Mortal196(::core::primitive::u8), - #[codec(index = 197)] - Mortal197(::core::primitive::u8), - #[codec(index = 198)] - Mortal198(::core::primitive::u8), - #[codec(index = 199)] - Mortal199(::core::primitive::u8), - #[codec(index = 200)] - Mortal200(::core::primitive::u8), - #[codec(index = 201)] - Mortal201(::core::primitive::u8), - #[codec(index = 202)] - Mortal202(::core::primitive::u8), - #[codec(index = 203)] - Mortal203(::core::primitive::u8), - #[codec(index = 204)] - Mortal204(::core::primitive::u8), - #[codec(index = 205)] - Mortal205(::core::primitive::u8), - #[codec(index = 206)] - Mortal206(::core::primitive::u8), - #[codec(index = 207)] - Mortal207(::core::primitive::u8), - #[codec(index = 208)] - Mortal208(::core::primitive::u8), - #[codec(index = 209)] - Mortal209(::core::primitive::u8), - #[codec(index = 210)] - Mortal210(::core::primitive::u8), - #[codec(index = 211)] - Mortal211(::core::primitive::u8), - #[codec(index = 212)] - Mortal212(::core::primitive::u8), - #[codec(index = 213)] - Mortal213(::core::primitive::u8), - #[codec(index = 214)] - Mortal214(::core::primitive::u8), - #[codec(index = 215)] - Mortal215(::core::primitive::u8), - #[codec(index = 216)] - Mortal216(::core::primitive::u8), - #[codec(index = 217)] - Mortal217(::core::primitive::u8), - #[codec(index = 218)] - Mortal218(::core::primitive::u8), - #[codec(index = 219)] - Mortal219(::core::primitive::u8), - #[codec(index = 220)] - Mortal220(::core::primitive::u8), - #[codec(index = 221)] - Mortal221(::core::primitive::u8), - #[codec(index = 222)] - Mortal222(::core::primitive::u8), - #[codec(index = 223)] - Mortal223(::core::primitive::u8), - #[codec(index = 224)] - Mortal224(::core::primitive::u8), - #[codec(index = 225)] - Mortal225(::core::primitive::u8), - #[codec(index = 226)] - Mortal226(::core::primitive::u8), - #[codec(index = 227)] - Mortal227(::core::primitive::u8), - #[codec(index = 228)] - Mortal228(::core::primitive::u8), - #[codec(index = 229)] - Mortal229(::core::primitive::u8), - #[codec(index = 230)] - Mortal230(::core::primitive::u8), - #[codec(index = 231)] - Mortal231(::core::primitive::u8), - #[codec(index = 232)] - Mortal232(::core::primitive::u8), - #[codec(index = 233)] - Mortal233(::core::primitive::u8), - #[codec(index = 234)] - Mortal234(::core::primitive::u8), - #[codec(index = 235)] - Mortal235(::core::primitive::u8), - #[codec(index = 236)] - Mortal236(::core::primitive::u8), - #[codec(index = 237)] - Mortal237(::core::primitive::u8), - #[codec(index = 238)] - Mortal238(::core::primitive::u8), - #[codec(index = 239)] - Mortal239(::core::primitive::u8), - #[codec(index = 240)] - Mortal240(::core::primitive::u8), - #[codec(index = 241)] - Mortal241(::core::primitive::u8), - #[codec(index = 242)] - Mortal242(::core::primitive::u8), - #[codec(index = 243)] - Mortal243(::core::primitive::u8), - #[codec(index = 244)] - Mortal244(::core::primitive::u8), - #[codec(index = 245)] - Mortal245(::core::primitive::u8), - #[codec(index = 246)] - Mortal246(::core::primitive::u8), - #[codec(index = 247)] - Mortal247(::core::primitive::u8), - #[codec(index = 248)] - Mortal248(::core::primitive::u8), - #[codec(index = 249)] - Mortal249(::core::primitive::u8), - #[codec(index = 250)] - Mortal250(::core::primitive::u8), - #[codec(index = 251)] - Mortal251(::core::primitive::u8), - #[codec(index = 252)] - Mortal252(::core::primitive::u8), - #[codec(index = 253)] - Mortal253(::core::primitive::u8), - #[codec(index = 254)] - Mortal254(::core::primitive::u8), - #[codec(index = 255)] - Mortal255(::core::primitive::u8), - } - } - pub mod header { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct Header<_0, _1> { - pub parent_hash: ::subxt::sp_core::H256, - #[codec(compact)] - pub number: _0, - pub state_root: ::subxt::sp_core::H256, - pub extrinsics_root: ::subxt::sp_core::H256, - pub digest: runtime_types::sp_runtime::generic::digest::Digest, - #[codec(skip)] - pub __subxt_unused_type_params: ::core::marker::PhantomData<_1>, - } - } - pub mod unchecked_extrinsic { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct UncheckedExtrinsic<_0, _1, _2, _3>( - pub ::std::vec::Vec<::core::primitive::u8>, - #[codec(skip)] pub ::core::marker::PhantomData<(_0, _2, _1, _3)>, - ); - } - } - pub mod multiaddress { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum MultiAddress<_0, _1> { - #[codec(index = 0)] - Id(_0), - #[codec(index = 1)] - Index(#[codec(compact)] _1), - #[codec(index = 2)] - Raw(::std::vec::Vec<::core::primitive::u8>), - #[codec(index = 3)] - Address32([::core::primitive::u8; 32usize]), - #[codec(index = 4)] - Address20([::core::primitive::u8; 20usize]), - } - } - pub mod traits { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct BlakeTwo256; - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum ArithmeticError { - #[codec(index = 0)] - Underflow, - #[codec(index = 1)] - Overflow, - #[codec(index = 2)] - DivisionByZero, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum DispatchError { - #[codec(index = 0)] - Other, - #[codec(index = 1)] - CannotLookup, - #[codec(index = 2)] - BadOrigin, - #[codec(index = 3)] - Module(runtime_types::sp_runtime::ModuleError), - #[codec(index = 4)] - ConsumerRemaining, - #[codec(index = 5)] - NoProviders, - #[codec(index = 6)] - TooManyConsumers, - #[codec(index = 7)] - Token(runtime_types::sp_runtime::TokenError), - #[codec(index = 8)] - Arithmetic(runtime_types::sp_runtime::ArithmeticError), - #[codec(index = 9)] - Transactional(runtime_types::sp_runtime::TransactionalError), - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct ModuleError { - pub index: ::core::primitive::u8, - pub error: [::core::primitive::u8; 4usize], - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum MultiSignature { - #[codec(index = 0)] - Ed25519(runtime_types::sp_core::ed25519::Signature), - #[codec(index = 1)] - Sr25519(runtime_types::sp_core::sr25519::Signature), - #[codec(index = 2)] - Ecdsa(runtime_types::sp_core::ecdsa::Signature), - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum MultiSigner { - #[codec(index = 0)] - Ed25519(runtime_types::sp_core::ed25519::Public), - #[codec(index = 1)] - Sr25519(runtime_types::sp_core::sr25519::Public), - #[codec(index = 2)] - Ecdsa(runtime_types::sp_core::ecdsa::Public), - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum TokenError { - #[codec(index = 0)] - NoFunds, - #[codec(index = 1)] - WouldDie, - #[codec(index = 2)] - BelowMinimum, - #[codec(index = 3)] - CannotCreate, - #[codec(index = 4)] - UnknownAsset, - #[codec(index = 5)] - Frozen, - #[codec(index = 6)] - Unsupported, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum TransactionalError { - #[codec(index = 0)] - LimitReached, - #[codec(index = 1)] - NoLayer, - } - } - pub mod sp_session { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct MembershipProof { - pub session: ::core::primitive::u32, - pub trie_nodes: ::std::vec::Vec<::std::vec::Vec<::core::primitive::u8>>, - pub validator_count: ::core::primitive::u32, - } - } - pub mod sp_staking { - use super::runtime_types; - pub mod offence { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct OffenceDetails<_0, _1> { - pub offender: _1, - pub reporters: ::std::vec::Vec<_0>, - } - } - } - pub mod sp_version { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct RuntimeVersion { - pub spec_name: ::std::string::String, - pub impl_name: ::std::string::String, - pub authoring_version: ::core::primitive::u32, - pub spec_version: ::core::primitive::u32, - pub impl_version: ::core::primitive::u32, - pub apis: - ::std::vec::Vec<([::core::primitive::u8; 8usize], ::core::primitive::u32)>, - pub transaction_version: ::core::primitive::u32, - pub state_version: ::core::primitive::u8, - } - } - pub mod xcm { - use super::runtime_types; - pub mod double_encoded { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct DoubleEncoded { - pub encoded: ::std::vec::Vec<::core::primitive::u8>, - } - } - pub mod v0 { - use super::runtime_types; - pub mod junction { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum BodyId { - #[codec(index = 0)] - Unit, - #[codec(index = 1)] - Named(::std::vec::Vec<::core::primitive::u8>), - #[codec(index = 2)] - Index(#[codec(compact)] ::core::primitive::u32), - #[codec(index = 3)] - Executive, - #[codec(index = 4)] - Technical, - #[codec(index = 5)] - Legislative, - #[codec(index = 6)] - Judicial, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum BodyPart { - #[codec(index = 0)] - Voice, - #[codec(index = 1)] - Members { - #[codec(compact)] - count: ::core::primitive::u32, - }, - #[codec(index = 2)] - Fraction { - #[codec(compact)] - nom: ::core::primitive::u32, - #[codec(compact)] - denom: ::core::primitive::u32, - }, - #[codec(index = 3)] - AtLeastProportion { - #[codec(compact)] - nom: ::core::primitive::u32, - #[codec(compact)] - denom: ::core::primitive::u32, - }, - #[codec(index = 4)] - MoreThanProportion { - #[codec(compact)] - nom: ::core::primitive::u32, - #[codec(compact)] - denom: ::core::primitive::u32, - }, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Junction { - #[codec(index = 0)] - Parent, - #[codec(index = 1)] - Parachain(#[codec(compact)] ::core::primitive::u32), - #[codec(index = 2)] - AccountId32 { - network: runtime_types::xcm::v0::junction::NetworkId, - id: [::core::primitive::u8; 32usize], - }, - #[codec(index = 3)] - AccountIndex64 { - network: runtime_types::xcm::v0::junction::NetworkId, - #[codec(compact)] - index: ::core::primitive::u64, - }, - #[codec(index = 4)] - AccountKey20 { - network: runtime_types::xcm::v0::junction::NetworkId, - key: [::core::primitive::u8; 20usize], - }, - #[codec(index = 5)] - PalletInstance(::core::primitive::u8), - #[codec(index = 6)] - GeneralIndex(#[codec(compact)] ::core::primitive::u128), - #[codec(index = 7)] - GeneralKey(::std::vec::Vec<::core::primitive::u8>), - #[codec(index = 8)] - OnlyChild, - #[codec(index = 9)] - Plurality { - id: runtime_types::xcm::v0::junction::BodyId, - part: runtime_types::xcm::v0::junction::BodyPart, - }, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum NetworkId { - #[codec(index = 0)] - Any, - #[codec(index = 1)] - Named(::std::vec::Vec<::core::primitive::u8>), - #[codec(index = 2)] - Polkadot, - #[codec(index = 3)] - Kusama, - } - } - pub mod multi_asset { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum MultiAsset { - #[codec(index = 0)] - None, - #[codec(index = 1)] - All, - #[codec(index = 2)] - AllFungible, - #[codec(index = 3)] - AllNonFungible, - #[codec(index = 4)] - AllAbstractFungible { - id: ::std::vec::Vec<::core::primitive::u8>, - }, - #[codec(index = 5)] - AllAbstractNonFungible { - class: ::std::vec::Vec<::core::primitive::u8>, - }, - #[codec(index = 6)] - AllConcreteFungible { - id: runtime_types::xcm::v0::multi_location::MultiLocation, - }, - #[codec(index = 7)] - AllConcreteNonFungible { - class: runtime_types::xcm::v0::multi_location::MultiLocation, - }, - #[codec(index = 8)] - AbstractFungible { - id: ::std::vec::Vec<::core::primitive::u8>, - #[codec(compact)] - amount: ::core::primitive::u128, - }, - #[codec(index = 9)] - AbstractNonFungible { - class: ::std::vec::Vec<::core::primitive::u8>, - instance: runtime_types::xcm::v1::multiasset::AssetInstance, - }, - #[codec(index = 10)] - ConcreteFungible { - id: runtime_types::xcm::v0::multi_location::MultiLocation, - #[codec(compact)] - amount: ::core::primitive::u128, - }, - #[codec(index = 11)] - ConcreteNonFungible { - class: runtime_types::xcm::v0::multi_location::MultiLocation, - instance: runtime_types::xcm::v1::multiasset::AssetInstance, - }, - } - } - pub mod multi_location { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum MultiLocation { - #[codec(index = 0)] - Null, - #[codec(index = 1)] - X1(runtime_types::xcm::v0::junction::Junction), - #[codec(index = 2)] - X2( - runtime_types::xcm::v0::junction::Junction, - runtime_types::xcm::v0::junction::Junction, - ), - #[codec(index = 3)] - X3( - runtime_types::xcm::v0::junction::Junction, - runtime_types::xcm::v0::junction::Junction, - runtime_types::xcm::v0::junction::Junction, - ), - #[codec(index = 4)] - X4( - runtime_types::xcm::v0::junction::Junction, - runtime_types::xcm::v0::junction::Junction, - runtime_types::xcm::v0::junction::Junction, - runtime_types::xcm::v0::junction::Junction, - ), - #[codec(index = 5)] - X5( - runtime_types::xcm::v0::junction::Junction, - runtime_types::xcm::v0::junction::Junction, - runtime_types::xcm::v0::junction::Junction, - runtime_types::xcm::v0::junction::Junction, - runtime_types::xcm::v0::junction::Junction, - ), - #[codec(index = 6)] - X6( - runtime_types::xcm::v0::junction::Junction, - runtime_types::xcm::v0::junction::Junction, - runtime_types::xcm::v0::junction::Junction, - runtime_types::xcm::v0::junction::Junction, - runtime_types::xcm::v0::junction::Junction, - runtime_types::xcm::v0::junction::Junction, - ), - #[codec(index = 7)] - X7( - runtime_types::xcm::v0::junction::Junction, - runtime_types::xcm::v0::junction::Junction, - runtime_types::xcm::v0::junction::Junction, - runtime_types::xcm::v0::junction::Junction, - runtime_types::xcm::v0::junction::Junction, - runtime_types::xcm::v0::junction::Junction, - runtime_types::xcm::v0::junction::Junction, - ), - #[codec(index = 8)] - X8( - runtime_types::xcm::v0::junction::Junction, - runtime_types::xcm::v0::junction::Junction, - runtime_types::xcm::v0::junction::Junction, - runtime_types::xcm::v0::junction::Junction, - runtime_types::xcm::v0::junction::Junction, - runtime_types::xcm::v0::junction::Junction, - runtime_types::xcm::v0::junction::Junction, - runtime_types::xcm::v0::junction::Junction, - ), - } - } - pub mod order { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Order { - #[codec(index = 0)] - Null, - #[codec(index = 1)] - DepositAsset { - assets: - ::std::vec::Vec, - dest: runtime_types::xcm::v0::multi_location::MultiLocation, - }, - #[codec(index = 2)] - DepositReserveAsset { - assets: - ::std::vec::Vec, - dest: runtime_types::xcm::v0::multi_location::MultiLocation, - effects: ::std::vec::Vec, - }, - #[codec(index = 3)] - ExchangeAsset { - give: ::std::vec::Vec, - receive: - ::std::vec::Vec, - }, - #[codec(index = 4)] - InitiateReserveWithdraw { - assets: - ::std::vec::Vec, - reserve: runtime_types::xcm::v0::multi_location::MultiLocation, - effects: ::std::vec::Vec, - }, - #[codec(index = 5)] - InitiateTeleport { - assets: - ::std::vec::Vec, - dest: runtime_types::xcm::v0::multi_location::MultiLocation, - effects: ::std::vec::Vec, - }, - #[codec(index = 6)] - QueryHolding { - #[codec(compact)] - query_id: ::core::primitive::u64, - dest: runtime_types::xcm::v0::multi_location::MultiLocation, - assets: - ::std::vec::Vec, - }, - #[codec(index = 7)] - BuyExecution { - fees: runtime_types::xcm::v0::multi_asset::MultiAsset, - weight: ::core::primitive::u64, - debt: ::core::primitive::u64, - halt_on_error: ::core::primitive::bool, - xcm: ::std::vec::Vec, - }, - } - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum OriginKind { - #[codec(index = 0)] - Native, - #[codec(index = 1)] - SovereignAccount, - #[codec(index = 2)] - Superuser, - #[codec(index = 3)] - Xcm, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Response { - #[codec(index = 0)] - Assets(::std::vec::Vec), - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Xcm { - #[codec(index = 0)] - WithdrawAsset { - assets: ::std::vec::Vec, - effects: ::std::vec::Vec, - }, - #[codec(index = 1)] - ReserveAssetDeposit { - assets: ::std::vec::Vec, - effects: ::std::vec::Vec, - }, - #[codec(index = 2)] - TeleportAsset { - assets: ::std::vec::Vec, - effects: ::std::vec::Vec, - }, - #[codec(index = 3)] - QueryResponse { - #[codec(compact)] - query_id: ::core::primitive::u64, - response: runtime_types::xcm::v0::Response, - }, - #[codec(index = 4)] - TransferAsset { - assets: ::std::vec::Vec, - dest: runtime_types::xcm::v0::multi_location::MultiLocation, - }, - #[codec(index = 5)] - TransferReserveAsset { - assets: ::std::vec::Vec, - dest: runtime_types::xcm::v0::multi_location::MultiLocation, - effects: ::std::vec::Vec, - }, - #[codec(index = 6)] - Transact { - origin_type: runtime_types::xcm::v0::OriginKind, - require_weight_at_most: ::core::primitive::u64, - call: runtime_types::xcm::double_encoded::DoubleEncoded, - }, - #[codec(index = 7)] - HrmpNewChannelOpenRequest { - #[codec(compact)] - sender: ::core::primitive::u32, - #[codec(compact)] - max_message_size: ::core::primitive::u32, - #[codec(compact)] - max_capacity: ::core::primitive::u32, - }, - #[codec(index = 8)] - HrmpChannelAccepted { - #[codec(compact)] - recipient: ::core::primitive::u32, - }, - #[codec(index = 9)] - HrmpChannelClosing { - #[codec(compact)] - initiator: ::core::primitive::u32, - #[codec(compact)] - sender: ::core::primitive::u32, - #[codec(compact)] - recipient: ::core::primitive::u32, - }, - #[codec(index = 10)] - RelayedFrom { - who: runtime_types::xcm::v0::multi_location::MultiLocation, - message: ::std::boxed::Box, - }, - } - } - pub mod v1 { - use super::runtime_types; - pub mod junction { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Junction { - #[codec(index = 0)] - Parachain(#[codec(compact)] ::core::primitive::u32), - #[codec(index = 1)] - AccountId32 { - network: runtime_types::xcm::v0::junction::NetworkId, - id: [::core::primitive::u8; 32usize], - }, - #[codec(index = 2)] - AccountIndex64 { - network: runtime_types::xcm::v0::junction::NetworkId, - #[codec(compact)] - index: ::core::primitive::u64, - }, - #[codec(index = 3)] - AccountKey20 { - network: runtime_types::xcm::v0::junction::NetworkId, - key: [::core::primitive::u8; 20usize], - }, - #[codec(index = 4)] - PalletInstance(::core::primitive::u8), - #[codec(index = 5)] - GeneralIndex(#[codec(compact)] ::core::primitive::u128), - #[codec(index = 6)] - GeneralKey(::std::vec::Vec<::core::primitive::u8>), - #[codec(index = 7)] - OnlyChild, - #[codec(index = 8)] - Plurality { - id: runtime_types::xcm::v0::junction::BodyId, - part: runtime_types::xcm::v0::junction::BodyPart, - }, - } - } - pub mod multiasset { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum AssetId { - #[codec(index = 0)] - Concrete(runtime_types::xcm::v1::multilocation::MultiLocation), - #[codec(index = 1)] - Abstract(::std::vec::Vec<::core::primitive::u8>), - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum AssetInstance { - #[codec(index = 0)] - Undefined, - #[codec(index = 1)] - Index(#[codec(compact)] ::core::primitive::u128), - #[codec(index = 2)] - Array4([::core::primitive::u8; 4usize]), - #[codec(index = 3)] - Array8([::core::primitive::u8; 8usize]), - #[codec(index = 4)] - Array16([::core::primitive::u8; 16usize]), - #[codec(index = 5)] - Array32([::core::primitive::u8; 32usize]), - #[codec(index = 6)] - Blob(::std::vec::Vec<::core::primitive::u8>), - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Fungibility { - #[codec(index = 0)] - Fungible(#[codec(compact)] ::core::primitive::u128), - #[codec(index = 1)] - NonFungible(runtime_types::xcm::v1::multiasset::AssetInstance), - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct MultiAsset { - pub id: runtime_types::xcm::v1::multiasset::AssetId, - pub fun: runtime_types::xcm::v1::multiasset::Fungibility, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum MultiAssetFilter { - #[codec(index = 0)] - Definite(runtime_types::xcm::v1::multiasset::MultiAssets), - #[codec(index = 1)] - Wild(runtime_types::xcm::v1::multiasset::WildMultiAsset), - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct MultiAssets( - pub ::std::vec::Vec, - ); - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum WildFungibility { - #[codec(index = 0)] - Fungible, - #[codec(index = 1)] - NonFungible, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum WildMultiAsset { - #[codec(index = 0)] - All, - #[codec(index = 1)] - AllOf { - id: runtime_types::xcm::v1::multiasset::AssetId, - fun: runtime_types::xcm::v1::multiasset::WildFungibility, - }, - } - } - pub mod multilocation { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Junctions { - #[codec(index = 0)] - Here, - #[codec(index = 1)] - X1(runtime_types::xcm::v1::junction::Junction), - #[codec(index = 2)] - X2( - runtime_types::xcm::v1::junction::Junction, - runtime_types::xcm::v1::junction::Junction, - ), - #[codec(index = 3)] - X3( - runtime_types::xcm::v1::junction::Junction, - runtime_types::xcm::v1::junction::Junction, - runtime_types::xcm::v1::junction::Junction, - ), - #[codec(index = 4)] - X4( - runtime_types::xcm::v1::junction::Junction, - runtime_types::xcm::v1::junction::Junction, - runtime_types::xcm::v1::junction::Junction, - runtime_types::xcm::v1::junction::Junction, - ), - #[codec(index = 5)] - X5( - runtime_types::xcm::v1::junction::Junction, - runtime_types::xcm::v1::junction::Junction, - runtime_types::xcm::v1::junction::Junction, - runtime_types::xcm::v1::junction::Junction, - runtime_types::xcm::v1::junction::Junction, - ), - #[codec(index = 6)] - X6( - runtime_types::xcm::v1::junction::Junction, - runtime_types::xcm::v1::junction::Junction, - runtime_types::xcm::v1::junction::Junction, - runtime_types::xcm::v1::junction::Junction, - runtime_types::xcm::v1::junction::Junction, - runtime_types::xcm::v1::junction::Junction, - ), - #[codec(index = 7)] - X7( - runtime_types::xcm::v1::junction::Junction, - runtime_types::xcm::v1::junction::Junction, - runtime_types::xcm::v1::junction::Junction, - runtime_types::xcm::v1::junction::Junction, - runtime_types::xcm::v1::junction::Junction, - runtime_types::xcm::v1::junction::Junction, - runtime_types::xcm::v1::junction::Junction, - ), - #[codec(index = 8)] - X8( - runtime_types::xcm::v1::junction::Junction, - runtime_types::xcm::v1::junction::Junction, - runtime_types::xcm::v1::junction::Junction, - runtime_types::xcm::v1::junction::Junction, - runtime_types::xcm::v1::junction::Junction, - runtime_types::xcm::v1::junction::Junction, - runtime_types::xcm::v1::junction::Junction, - runtime_types::xcm::v1::junction::Junction, - ), - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct MultiLocation { - pub parents: ::core::primitive::u8, - pub interior: runtime_types::xcm::v1::multilocation::Junctions, - } - } - pub mod order { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Order { - #[codec(index = 0)] - Noop, - #[codec(index = 1)] - DepositAsset { - assets: runtime_types::xcm::v1::multiasset::MultiAssetFilter, - max_assets: ::core::primitive::u32, - beneficiary: runtime_types::xcm::v1::multilocation::MultiLocation, - }, - #[codec(index = 2)] - DepositReserveAsset { - assets: runtime_types::xcm::v1::multiasset::MultiAssetFilter, - max_assets: ::core::primitive::u32, - dest: runtime_types::xcm::v1::multilocation::MultiLocation, - effects: ::std::vec::Vec, - }, - #[codec(index = 3)] - ExchangeAsset { - give: runtime_types::xcm::v1::multiasset::MultiAssetFilter, - receive: runtime_types::xcm::v1::multiasset::MultiAssets, - }, - #[codec(index = 4)] - InitiateReserveWithdraw { - assets: runtime_types::xcm::v1::multiasset::MultiAssetFilter, - reserve: runtime_types::xcm::v1::multilocation::MultiLocation, - effects: ::std::vec::Vec, - }, - #[codec(index = 5)] - InitiateTeleport { - assets: runtime_types::xcm::v1::multiasset::MultiAssetFilter, - dest: runtime_types::xcm::v1::multilocation::MultiLocation, - effects: ::std::vec::Vec, - }, - #[codec(index = 6)] - QueryHolding { - #[codec(compact)] - query_id: ::core::primitive::u64, - dest: runtime_types::xcm::v1::multilocation::MultiLocation, - assets: runtime_types::xcm::v1::multiasset::MultiAssetFilter, - }, - #[codec(index = 7)] - BuyExecution { - fees: runtime_types::xcm::v1::multiasset::MultiAsset, - weight: ::core::primitive::u64, - debt: ::core::primitive::u64, - halt_on_error: ::core::primitive::bool, - instructions: ::std::vec::Vec, - }, - } - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Response { - #[codec(index = 0)] - Assets(runtime_types::xcm::v1::multiasset::MultiAssets), - #[codec(index = 1)] - Version(::core::primitive::u32), - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Xcm { - #[codec(index = 0)] - WithdrawAsset { - assets: runtime_types::xcm::v1::multiasset::MultiAssets, - effects: ::std::vec::Vec, - }, - #[codec(index = 1)] - ReserveAssetDeposited { - assets: runtime_types::xcm::v1::multiasset::MultiAssets, - effects: ::std::vec::Vec, - }, - #[codec(index = 2)] - ReceiveTeleportedAsset { - assets: runtime_types::xcm::v1::multiasset::MultiAssets, - effects: ::std::vec::Vec, - }, - #[codec(index = 3)] - QueryResponse { - #[codec(compact)] - query_id: ::core::primitive::u64, - response: runtime_types::xcm::v1::Response, - }, - #[codec(index = 4)] - TransferAsset { - assets: runtime_types::xcm::v1::multiasset::MultiAssets, - beneficiary: runtime_types::xcm::v1::multilocation::MultiLocation, - }, - #[codec(index = 5)] - TransferReserveAsset { - assets: runtime_types::xcm::v1::multiasset::MultiAssets, - dest: runtime_types::xcm::v1::multilocation::MultiLocation, - effects: ::std::vec::Vec, - }, - #[codec(index = 6)] - Transact { - origin_type: runtime_types::xcm::v0::OriginKind, - require_weight_at_most: ::core::primitive::u64, - call: runtime_types::xcm::double_encoded::DoubleEncoded, - }, - #[codec(index = 7)] - HrmpNewChannelOpenRequest { - #[codec(compact)] - sender: ::core::primitive::u32, - #[codec(compact)] - max_message_size: ::core::primitive::u32, - #[codec(compact)] - max_capacity: ::core::primitive::u32, - }, - #[codec(index = 8)] - HrmpChannelAccepted { - #[codec(compact)] - recipient: ::core::primitive::u32, - }, - #[codec(index = 9)] - HrmpChannelClosing { - #[codec(compact)] - initiator: ::core::primitive::u32, - #[codec(compact)] - sender: ::core::primitive::u32, - #[codec(compact)] - recipient: ::core::primitive::u32, - }, - #[codec(index = 10)] - RelayedFrom { - who: runtime_types::xcm::v1::multilocation::Junctions, - message: ::std::boxed::Box, - }, - #[codec(index = 11)] - SubscribeVersion { - #[codec(compact)] - query_id: ::core::primitive::u64, - #[codec(compact)] - max_response_weight: ::core::primitive::u64, - }, - #[codec(index = 12)] - UnsubscribeVersion, - } - } - pub mod v2 { - use super::runtime_types; - pub mod traits { - use super::runtime_types; - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Error { - #[codec(index = 0)] - Overflow, - #[codec(index = 1)] - Unimplemented, - #[codec(index = 2)] - UntrustedReserveLocation, - #[codec(index = 3)] - UntrustedTeleportLocation, - #[codec(index = 4)] - MultiLocationFull, - #[codec(index = 5)] - MultiLocationNotInvertible, - #[codec(index = 6)] - BadOrigin, - #[codec(index = 7)] - InvalidLocation, - #[codec(index = 8)] - AssetNotFound, - #[codec(index = 9)] - FailedToTransactAsset, - #[codec(index = 10)] - NotWithdrawable, - #[codec(index = 11)] - LocationCannotHold, - #[codec(index = 12)] - ExceedsMaxMessageSize, - #[codec(index = 13)] - DestinationUnsupported, - #[codec(index = 14)] - Transport, - #[codec(index = 15)] - Unroutable, - #[codec(index = 16)] - UnknownClaim, - #[codec(index = 17)] - FailedToDecode, - #[codec(index = 18)] - MaxWeightInvalid, - #[codec(index = 19)] - NotHoldingFees, - #[codec(index = 20)] - TooExpensive, - #[codec(index = 21)] - Trap(::core::primitive::u64), - #[codec(index = 22)] - UnhandledXcmVersion, - #[codec(index = 23)] - WeightLimitReached(::core::primitive::u64), - #[codec(index = 24)] - Barrier, - #[codec(index = 25)] - WeightNotComputable, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Outcome { - #[codec(index = 0)] - Complete(::core::primitive::u64), - #[codec(index = 1)] - Incomplete( - ::core::primitive::u64, - runtime_types::xcm::v2::traits::Error, - ), - #[codec(index = 2)] - Error(runtime_types::xcm::v2::traits::Error), - } - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Instruction { - #[codec(index = 0)] - WithdrawAsset(runtime_types::xcm::v1::multiasset::MultiAssets), - #[codec(index = 1)] - ReserveAssetDeposited(runtime_types::xcm::v1::multiasset::MultiAssets), - #[codec(index = 2)] - ReceiveTeleportedAsset(runtime_types::xcm::v1::multiasset::MultiAssets), - #[codec(index = 3)] - QueryResponse { - #[codec(compact)] - query_id: ::core::primitive::u64, - response: runtime_types::xcm::v2::Response, - #[codec(compact)] - max_weight: ::core::primitive::u64, - }, - #[codec(index = 4)] - TransferAsset { - assets: runtime_types::xcm::v1::multiasset::MultiAssets, - beneficiary: runtime_types::xcm::v1::multilocation::MultiLocation, - }, - #[codec(index = 5)] - TransferReserveAsset { - assets: runtime_types::xcm::v1::multiasset::MultiAssets, - dest: runtime_types::xcm::v1::multilocation::MultiLocation, - xcm: runtime_types::xcm::v2::Xcm, - }, - #[codec(index = 6)] - Transact { - origin_type: runtime_types::xcm::v0::OriginKind, - #[codec(compact)] - require_weight_at_most: ::core::primitive::u64, - call: runtime_types::xcm::double_encoded::DoubleEncoded, - }, - #[codec(index = 7)] - HrmpNewChannelOpenRequest { - #[codec(compact)] - sender: ::core::primitive::u32, - #[codec(compact)] - max_message_size: ::core::primitive::u32, - #[codec(compact)] - max_capacity: ::core::primitive::u32, - }, - #[codec(index = 8)] - HrmpChannelAccepted { - #[codec(compact)] - recipient: ::core::primitive::u32, - }, - #[codec(index = 9)] - HrmpChannelClosing { - #[codec(compact)] - initiator: ::core::primitive::u32, - #[codec(compact)] - sender: ::core::primitive::u32, - #[codec(compact)] - recipient: ::core::primitive::u32, - }, - #[codec(index = 10)] - ClearOrigin, - #[codec(index = 11)] - DescendOrigin(runtime_types::xcm::v1::multilocation::Junctions), - #[codec(index = 12)] - ReportError { - #[codec(compact)] - query_id: ::core::primitive::u64, - dest: runtime_types::xcm::v1::multilocation::MultiLocation, - #[codec(compact)] - max_response_weight: ::core::primitive::u64, - }, - #[codec(index = 13)] - DepositAsset { - assets: runtime_types::xcm::v1::multiasset::MultiAssetFilter, - #[codec(compact)] - max_assets: ::core::primitive::u32, - beneficiary: runtime_types::xcm::v1::multilocation::MultiLocation, - }, - #[codec(index = 14)] - DepositReserveAsset { - assets: runtime_types::xcm::v1::multiasset::MultiAssetFilter, - #[codec(compact)] - max_assets: ::core::primitive::u32, - dest: runtime_types::xcm::v1::multilocation::MultiLocation, - xcm: runtime_types::xcm::v2::Xcm, - }, - #[codec(index = 15)] - ExchangeAsset { - give: runtime_types::xcm::v1::multiasset::MultiAssetFilter, - receive: runtime_types::xcm::v1::multiasset::MultiAssets, - }, - #[codec(index = 16)] - InitiateReserveWithdraw { - assets: runtime_types::xcm::v1::multiasset::MultiAssetFilter, - reserve: runtime_types::xcm::v1::multilocation::MultiLocation, - xcm: runtime_types::xcm::v2::Xcm, - }, - #[codec(index = 17)] - InitiateTeleport { - assets: runtime_types::xcm::v1::multiasset::MultiAssetFilter, - dest: runtime_types::xcm::v1::multilocation::MultiLocation, - xcm: runtime_types::xcm::v2::Xcm, - }, - #[codec(index = 18)] - QueryHolding { - #[codec(compact)] - query_id: ::core::primitive::u64, - dest: runtime_types::xcm::v1::multilocation::MultiLocation, - assets: runtime_types::xcm::v1::multiasset::MultiAssetFilter, - #[codec(compact)] - max_response_weight: ::core::primitive::u64, - }, - #[codec(index = 19)] - BuyExecution { - fees: runtime_types::xcm::v1::multiasset::MultiAsset, - weight_limit: runtime_types::xcm::v2::WeightLimit, - }, - #[codec(index = 20)] - RefundSurplus, - #[codec(index = 21)] - SetErrorHandler(runtime_types::xcm::v2::Xcm), - #[codec(index = 22)] - SetAppendix(runtime_types::xcm::v2::Xcm), - #[codec(index = 23)] - ClearError, - #[codec(index = 24)] - ClaimAsset { - assets: runtime_types::xcm::v1::multiasset::MultiAssets, - ticket: runtime_types::xcm::v1::multilocation::MultiLocation, - }, - #[codec(index = 25)] - Trap(#[codec(compact)] ::core::primitive::u64), - #[codec(index = 26)] - SubscribeVersion { - #[codec(compact)] - query_id: ::core::primitive::u64, - #[codec(compact)] - max_response_weight: ::core::primitive::u64, - }, - #[codec(index = 27)] - UnsubscribeVersion, - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum Response { - #[codec(index = 0)] - Null, - #[codec(index = 1)] - Assets(runtime_types::xcm::v1::multiasset::MultiAssets), - #[codec(index = 2)] - ExecutionResult( - ::core::option::Option<( - ::core::primitive::u32, - runtime_types::xcm::v2::traits::Error, - )>, - ), - #[codec(index = 3)] - Version(::core::primitive::u32), - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum WeightLimit { - #[codec(index = 0)] - Unlimited, - #[codec(index = 1)] - Limited(#[codec(compact)] ::core::primitive::u64), - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub struct Xcm(pub ::std::vec::Vec); - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum VersionedMultiAssets { - #[codec(index = 0)] - V0(::std::vec::Vec), - #[codec(index = 1)] - V1(runtime_types::xcm::v1::multiasset::MultiAssets), - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum VersionedMultiLocation { - #[codec(index = 0)] - V0(runtime_types::xcm::v0::multi_location::MultiLocation), - #[codec(index = 1)] - V1(runtime_types::xcm::v1::multilocation::MultiLocation), - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum VersionedResponse { - #[codec(index = 0)] - V0(runtime_types::xcm::v0::Response), - #[codec(index = 1)] - V1(runtime_types::xcm::v1::Response), - #[codec(index = 2)] - V2(runtime_types::xcm::v2::Response), - } - #[derive(:: subxt :: codec :: Decode, :: subxt :: codec :: Encode, Debug)] - pub enum VersionedXcm { - #[codec(index = 0)] - V0(runtime_types::xcm::v0::Xcm), - #[codec(index = 1)] - V1(runtime_types::xcm::v1::Xcm), - #[codec(index = 2)] - V2(runtime_types::xcm::v2::Xcm), - } - } - } - #[doc = r" The default error type returned when there is a runtime issue."] - pub type DispatchError = runtime_types::sp_runtime::DispatchError; - impl ::subxt::HasModuleError for runtime_types::sp_runtime::DispatchError { - fn module_error_data(&self) -> Option<::subxt::ModuleErrorData> { - if let Self::Module(module_error) = self { - Some(::subxt::ModuleErrorData { - pallet_index: module_error.index, - error: module_error.error, - }) - } else { - None - } - } - } - pub struct RuntimeApi { - pub client: ::subxt::Client, - marker: ::core::marker::PhantomData, - } - impl ::core::convert::From<::subxt::Client> for RuntimeApi - where - T: ::subxt::Config, - X: ::subxt::extrinsic::ExtrinsicParams, - { - fn from(client: ::subxt::Client) -> Self { - Self { - client, - marker: ::core::marker::PhantomData, - } - } - } - impl<'a, T, X> RuntimeApi - where - T: ::subxt::Config, - X: ::subxt::extrinsic::ExtrinsicParams, - { - pub fn validate_metadata(&'a self) -> Result<(), ::subxt::MetadataError> { - if self.client.metadata().metadata_hash(&PALLETS) - != [ - 171u8, 151u8, 238u8, 248u8, 146u8, 176u8, 17u8, 187u8, 196u8, 188u8, 233u8, - 111u8, 45u8, 124u8, 52u8, 50u8, 33u8, 206u8, 239u8, 173u8, 23u8, 8u8, 56u8, - 68u8, 21u8, 49u8, 188u8, 236u8, 27u8, 193u8, 246u8, 223u8, - ] - { - Err(::subxt::MetadataError::IncompatibleMetadata) - } else { - Ok(()) - } - } - pub fn constants(&'a self) -> ConstantsApi<'a, T> { - ConstantsApi { - client: &self.client, - } - } - pub fn storage(&'a self) -> StorageApi<'a, T> { - StorageApi { - client: &self.client, - } - } - pub fn tx(&'a self) -> TransactionApi<'a, T, X> { - TransactionApi { - client: &self.client, - marker: ::core::marker::PhantomData, - } - } - pub fn events(&'a self) -> EventsApi<'a, T> { - EventsApi { - client: &self.client, - } - } - } - pub struct EventsApi<'a, T: ::subxt::Config> { - client: &'a ::subxt::Client, - } - impl<'a, T: ::subxt::Config> EventsApi<'a, T> { - pub async fn at( - &self, - block_hash: T::Hash, - ) -> Result<::subxt::events::Events<'a, T, Event>, ::subxt::BasicError> { - ::subxt::events::at::(self.client, block_hash).await - } - pub async fn subscribe( - &self, - ) -> Result< - ::subxt::events::EventSubscription<'a, ::subxt::events::EventSub, T, Event>, - ::subxt::BasicError, - > { - ::subxt::events::subscribe::(self.client).await - } - pub async fn subscribe_finalized( - &self, - ) -> Result< - ::subxt::events::EventSubscription< - 'a, - ::subxt::events::FinalizedEventSub<'a, T::Header>, - T, - Event, - >, - ::subxt::BasicError, - > { - ::subxt::events::subscribe_finalized::(self.client).await - } - } - pub struct ConstantsApi<'a, T: ::subxt::Config> { - client: &'a ::subxt::Client, - } - impl<'a, T: ::subxt::Config> ConstantsApi<'a, T> { - pub fn system(&self) -> system::constants::ConstantsApi<'a, T> { - system::constants::ConstantsApi::new(self.client) - } - pub fn babe(&self) -> babe::constants::ConstantsApi<'a, T> { - babe::constants::ConstantsApi::new(self.client) - } - pub fn timestamp(&self) -> timestamp::constants::ConstantsApi<'a, T> { - timestamp::constants::ConstantsApi::new(self.client) - } - pub fn indices(&self) -> indices::constants::ConstantsApi<'a, T> { - indices::constants::ConstantsApi::new(self.client) - } - pub fn balances(&self) -> balances::constants::ConstantsApi<'a, T> { - balances::constants::ConstantsApi::new(self.client) - } - pub fn transaction_payment(&self) -> transaction_payment::constants::ConstantsApi<'a, T> { - transaction_payment::constants::ConstantsApi::new(self.client) - } - pub fn authorship(&self) -> authorship::constants::ConstantsApi<'a, T> { - authorship::constants::ConstantsApi::new(self.client) - } - pub fn grandpa(&self) -> grandpa::constants::ConstantsApi<'a, T> { - grandpa::constants::ConstantsApi::new(self.client) - } - pub fn im_online(&self) -> im_online::constants::ConstantsApi<'a, T> { - im_online::constants::ConstantsApi::new(self.client) - } - pub fn paras(&self) -> paras::constants::ConstantsApi<'a, T> { - paras::constants::ConstantsApi::new(self.client) - } - pub fn registrar(&self) -> registrar::constants::ConstantsApi<'a, T> { - registrar::constants::ConstantsApi::new(self.client) - } - pub fn auctions(&self) -> auctions::constants::ConstantsApi<'a, T> { - auctions::constants::ConstantsApi::new(self.client) - } - pub fn crowdloan(&self) -> crowdloan::constants::ConstantsApi<'a, T> { - crowdloan::constants::ConstantsApi::new(self.client) - } - pub fn slots(&self) -> slots::constants::ConstantsApi<'a, T> { - slots::constants::ConstantsApi::new(self.client) - } - pub fn assigned_slots(&self) -> assigned_slots::constants::ConstantsApi<'a, T> { - assigned_slots::constants::ConstantsApi::new(self.client) - } - pub fn bridge_rococo_grandpa( - &self, - ) -> bridge_rococo_grandpa::constants::ConstantsApi<'a, T> { - bridge_rococo_grandpa::constants::ConstantsApi::new(self.client) - } - pub fn bridge_wococo_grandpa( - &self, - ) -> bridge_wococo_grandpa::constants::ConstantsApi<'a, T> { - bridge_wococo_grandpa::constants::ConstantsApi::new(self.client) - } - pub fn bridge_rococo_messages( - &self, - ) -> bridge_rococo_messages::constants::ConstantsApi<'a, T> { - bridge_rococo_messages::constants::ConstantsApi::new(self.client) - } - pub fn bridge_wococo_messages( - &self, - ) -> bridge_wococo_messages::constants::ConstantsApi<'a, T> { - bridge_wococo_messages::constants::ConstantsApi::new(self.client) - } - pub fn utility(&self) -> utility::constants::ConstantsApi<'a, T> { - utility::constants::ConstantsApi::new(self.client) - } - pub fn proxy(&self) -> proxy::constants::ConstantsApi<'a, T> { - proxy::constants::ConstantsApi::new(self.client) - } - pub fn multisig(&self) -> multisig::constants::ConstantsApi<'a, T> { - multisig::constants::ConstantsApi::new(self.client) - } - } - pub struct StorageApi<'a, T: ::subxt::Config> { - client: &'a ::subxt::Client, - } - impl<'a, T> StorageApi<'a, T> - where - T: ::subxt::Config, - { - pub fn system(&self) -> system::storage::StorageApi<'a, T> { - system::storage::StorageApi::new(self.client) - } - pub fn babe(&self) -> babe::storage::StorageApi<'a, T> { - babe::storage::StorageApi::new(self.client) - } - pub fn timestamp(&self) -> timestamp::storage::StorageApi<'a, T> { - timestamp::storage::StorageApi::new(self.client) - } - pub fn indices(&self) -> indices::storage::StorageApi<'a, T> { - indices::storage::StorageApi::new(self.client) - } - pub fn balances(&self) -> balances::storage::StorageApi<'a, T> { - balances::storage::StorageApi::new(self.client) - } - pub fn transaction_payment(&self) -> transaction_payment::storage::StorageApi<'a, T> { - transaction_payment::storage::StorageApi::new(self.client) - } - pub fn authorship(&self) -> authorship::storage::StorageApi<'a, T> { - authorship::storage::StorageApi::new(self.client) - } - pub fn offences(&self) -> offences::storage::StorageApi<'a, T> { - offences::storage::StorageApi::new(self.client) - } - pub fn historical(&self) -> historical::storage::StorageApi<'a, T> { - historical::storage::StorageApi::new(self.client) - } - pub fn session(&self) -> session::storage::StorageApi<'a, T> { - session::storage::StorageApi::new(self.client) - } - pub fn grandpa(&self) -> grandpa::storage::StorageApi<'a, T> { - grandpa::storage::StorageApi::new(self.client) - } - pub fn im_online(&self) -> im_online::storage::StorageApi<'a, T> { - im_online::storage::StorageApi::new(self.client) - } - pub fn authority_discovery(&self) -> authority_discovery::storage::StorageApi<'a, T> { - authority_discovery::storage::StorageApi::new(self.client) - } - pub fn configuration(&self) -> configuration::storage::StorageApi<'a, T> { - configuration::storage::StorageApi::new(self.client) - } - pub fn paras_shared(&self) -> paras_shared::storage::StorageApi<'a, T> { - paras_shared::storage::StorageApi::new(self.client) - } - pub fn para_inclusion(&self) -> para_inclusion::storage::StorageApi<'a, T> { - para_inclusion::storage::StorageApi::new(self.client) - } - pub fn para_inherent(&self) -> para_inherent::storage::StorageApi<'a, T> { - para_inherent::storage::StorageApi::new(self.client) - } - pub fn para_scheduler(&self) -> para_scheduler::storage::StorageApi<'a, T> { - para_scheduler::storage::StorageApi::new(self.client) - } - pub fn paras(&self) -> paras::storage::StorageApi<'a, T> { - paras::storage::StorageApi::new(self.client) - } - pub fn initializer(&self) -> initializer::storage::StorageApi<'a, T> { - initializer::storage::StorageApi::new(self.client) - } - pub fn dmp(&self) -> dmp::storage::StorageApi<'a, T> { - dmp::storage::StorageApi::new(self.client) - } - pub fn ump(&self) -> ump::storage::StorageApi<'a, T> { - ump::storage::StorageApi::new(self.client) - } - pub fn hrmp(&self) -> hrmp::storage::StorageApi<'a, T> { - hrmp::storage::StorageApi::new(self.client) - } - pub fn para_session_info(&self) -> para_session_info::storage::StorageApi<'a, T> { - para_session_info::storage::StorageApi::new(self.client) - } - pub fn paras_disputes(&self) -> paras_disputes::storage::StorageApi<'a, T> { - paras_disputes::storage::StorageApi::new(self.client) - } - pub fn registrar(&self) -> registrar::storage::StorageApi<'a, T> { - registrar::storage::StorageApi::new(self.client) - } - pub fn auctions(&self) -> auctions::storage::StorageApi<'a, T> { - auctions::storage::StorageApi::new(self.client) - } - pub fn crowdloan(&self) -> crowdloan::storage::StorageApi<'a, T> { - crowdloan::storage::StorageApi::new(self.client) - } - pub fn slots(&self) -> slots::storage::StorageApi<'a, T> { - slots::storage::StorageApi::new(self.client) - } - pub fn assigned_slots(&self) -> assigned_slots::storage::StorageApi<'a, T> { - assigned_slots::storage::StorageApi::new(self.client) - } - pub fn sudo(&self) -> sudo::storage::StorageApi<'a, T> { - sudo::storage::StorageApi::new(self.client) - } - pub fn mmr(&self) -> mmr::storage::StorageApi<'a, T> { - mmr::storage::StorageApi::new(self.client) - } - pub fn beefy(&self) -> beefy::storage::StorageApi<'a, T> { - beefy::storage::StorageApi::new(self.client) - } - pub fn mmr_leaf(&self) -> mmr_leaf::storage::StorageApi<'a, T> { - mmr_leaf::storage::StorageApi::new(self.client) - } - pub fn validator_manager(&self) -> validator_manager::storage::StorageApi<'a, T> { - validator_manager::storage::StorageApi::new(self.client) - } - pub fn bridge_rococo_grandpa(&self) -> bridge_rococo_grandpa::storage::StorageApi<'a, T> { - bridge_rococo_grandpa::storage::StorageApi::new(self.client) - } - pub fn bridge_wococo_grandpa(&self) -> bridge_wococo_grandpa::storage::StorageApi<'a, T> { - bridge_wococo_grandpa::storage::StorageApi::new(self.client) - } - pub fn bridge_rococo_messages(&self) -> bridge_rococo_messages::storage::StorageApi<'a, T> { - bridge_rococo_messages::storage::StorageApi::new(self.client) - } - pub fn bridge_wococo_messages(&self) -> bridge_wococo_messages::storage::StorageApi<'a, T> { - bridge_wococo_messages::storage::StorageApi::new(self.client) - } - pub fn collective(&self) -> collective::storage::StorageApi<'a, T> { - collective::storage::StorageApi::new(self.client) - } - pub fn membership(&self) -> membership::storage::StorageApi<'a, T> { - membership::storage::StorageApi::new(self.client) - } - pub fn proxy(&self) -> proxy::storage::StorageApi<'a, T> { - proxy::storage::StorageApi::new(self.client) - } - pub fn multisig(&self) -> multisig::storage::StorageApi<'a, T> { - multisig::storage::StorageApi::new(self.client) - } - pub fn xcm_pallet(&self) -> xcm_pallet::storage::StorageApi<'a, T> { - xcm_pallet::storage::StorageApi::new(self.client) - } - } - pub struct TransactionApi<'a, T: ::subxt::Config, X> { - client: &'a ::subxt::Client, - marker: ::core::marker::PhantomData, - } - impl<'a, T, X> TransactionApi<'a, T, X> - where - T: ::subxt::Config, - X: ::subxt::extrinsic::ExtrinsicParams, - { - pub fn system(&self) -> system::calls::TransactionApi<'a, T, X> { - system::calls::TransactionApi::new(self.client) - } - pub fn babe(&self) -> babe::calls::TransactionApi<'a, T, X> { - babe::calls::TransactionApi::new(self.client) - } - pub fn timestamp(&self) -> timestamp::calls::TransactionApi<'a, T, X> { - timestamp::calls::TransactionApi::new(self.client) - } - pub fn indices(&self) -> indices::calls::TransactionApi<'a, T, X> { - indices::calls::TransactionApi::new(self.client) - } - pub fn balances(&self) -> balances::calls::TransactionApi<'a, T, X> { - balances::calls::TransactionApi::new(self.client) - } - pub fn authorship(&self) -> authorship::calls::TransactionApi<'a, T, X> { - authorship::calls::TransactionApi::new(self.client) - } - pub fn session(&self) -> session::calls::TransactionApi<'a, T, X> { - session::calls::TransactionApi::new(self.client) - } - pub fn grandpa(&self) -> grandpa::calls::TransactionApi<'a, T, X> { - grandpa::calls::TransactionApi::new(self.client) - } - pub fn im_online(&self) -> im_online::calls::TransactionApi<'a, T, X> { - im_online::calls::TransactionApi::new(self.client) - } - pub fn configuration(&self) -> configuration::calls::TransactionApi<'a, T, X> { - configuration::calls::TransactionApi::new(self.client) - } - pub fn paras_shared(&self) -> paras_shared::calls::TransactionApi<'a, T, X> { - paras_shared::calls::TransactionApi::new(self.client) - } - pub fn para_inclusion(&self) -> para_inclusion::calls::TransactionApi<'a, T, X> { - para_inclusion::calls::TransactionApi::new(self.client) - } - pub fn para_inherent(&self) -> para_inherent::calls::TransactionApi<'a, T, X> { - para_inherent::calls::TransactionApi::new(self.client) - } - pub fn paras(&self) -> paras::calls::TransactionApi<'a, T, X> { - paras::calls::TransactionApi::new(self.client) - } - pub fn initializer(&self) -> initializer::calls::TransactionApi<'a, T, X> { - initializer::calls::TransactionApi::new(self.client) - } - pub fn dmp(&self) -> dmp::calls::TransactionApi<'a, T, X> { - dmp::calls::TransactionApi::new(self.client) - } - pub fn ump(&self) -> ump::calls::TransactionApi<'a, T, X> { - ump::calls::TransactionApi::new(self.client) - } - pub fn hrmp(&self) -> hrmp::calls::TransactionApi<'a, T, X> { - hrmp::calls::TransactionApi::new(self.client) - } - pub fn paras_disputes(&self) -> paras_disputes::calls::TransactionApi<'a, T, X> { - paras_disputes::calls::TransactionApi::new(self.client) - } - pub fn registrar(&self) -> registrar::calls::TransactionApi<'a, T, X> { - registrar::calls::TransactionApi::new(self.client) - } - pub fn auctions(&self) -> auctions::calls::TransactionApi<'a, T, X> { - auctions::calls::TransactionApi::new(self.client) - } - pub fn crowdloan(&self) -> crowdloan::calls::TransactionApi<'a, T, X> { - crowdloan::calls::TransactionApi::new(self.client) - } - pub fn slots(&self) -> slots::calls::TransactionApi<'a, T, X> { - slots::calls::TransactionApi::new(self.client) - } - pub fn paras_sudo_wrapper(&self) -> paras_sudo_wrapper::calls::TransactionApi<'a, T, X> { - paras_sudo_wrapper::calls::TransactionApi::new(self.client) - } - pub fn assigned_slots(&self) -> assigned_slots::calls::TransactionApi<'a, T, X> { - assigned_slots::calls::TransactionApi::new(self.client) - } - pub fn sudo(&self) -> sudo::calls::TransactionApi<'a, T, X> { - sudo::calls::TransactionApi::new(self.client) - } - pub fn beefy(&self) -> beefy::calls::TransactionApi<'a, T, X> { - beefy::calls::TransactionApi::new(self.client) - } - pub fn validator_manager(&self) -> validator_manager::calls::TransactionApi<'a, T, X> { - validator_manager::calls::TransactionApi::new(self.client) - } - pub fn bridge_rococo_grandpa( - &self, - ) -> bridge_rococo_grandpa::calls::TransactionApi<'a, T, X> { - bridge_rococo_grandpa::calls::TransactionApi::new(self.client) - } - pub fn bridge_wococo_grandpa( - &self, - ) -> bridge_wococo_grandpa::calls::TransactionApi<'a, T, X> { - bridge_wococo_grandpa::calls::TransactionApi::new(self.client) - } - pub fn bridge_rococo_messages( - &self, - ) -> bridge_rococo_messages::calls::TransactionApi<'a, T, X> { - bridge_rococo_messages::calls::TransactionApi::new(self.client) - } - pub fn bridge_wococo_messages( - &self, - ) -> bridge_wococo_messages::calls::TransactionApi<'a, T, X> { - bridge_wococo_messages::calls::TransactionApi::new(self.client) - } - pub fn collective(&self) -> collective::calls::TransactionApi<'a, T, X> { - collective::calls::TransactionApi::new(self.client) - } - pub fn membership(&self) -> membership::calls::TransactionApi<'a, T, X> { - membership::calls::TransactionApi::new(self.client) - } - pub fn utility(&self) -> utility::calls::TransactionApi<'a, T, X> { - utility::calls::TransactionApi::new(self.client) - } - pub fn proxy(&self) -> proxy::calls::TransactionApi<'a, T, X> { - proxy::calls::TransactionApi::new(self.client) - } - pub fn multisig(&self) -> multisig::calls::TransactionApi<'a, T, X> { - multisig::calls::TransactionApi::new(self.client) - } - pub fn xcm_pallet(&self) -> xcm_pallet::calls::TransactionApi<'a, T, X> { - xcm_pallet::calls::TransactionApi::new(self.client) - } - } -} diff --git a/modules/src/core/ics02_client/handler/update_client.rs b/modules/src/core/ics02_client/handler/update_client.rs index a4ba2a2d15..df823870c1 100644 --- a/modules/src/core/ics02_client/handler/update_client.rs +++ b/modules/src/core/ics02_client/handler/update_client.rs @@ -127,15 +127,11 @@ pub fn process( #[cfg(test)] mod tests { use core::str::FromStr; - use sp_trie::{generate_trie_proof, TrieDBMut, TrieMut}; use test_log::test; use crate::clients::ics11_beefy::client_state::ClientState as BeefyClientState; - use crate::clients::ics11_beefy::header::{ - decode_parachain_header, ParachainHeader as BeefyParachainHeader, - }; - use crate::clients::ics11_beefy::header::{BeefyHeader, ExtrinsicProof}; - use crate::clients::ics11_beefy::polkadot_runtime as runtime; + use crate::clients::ics11_beefy::header::BeefyHeader; + use crate::clients::ics11_beefy::header::ParachainHeader as BeefyParachainHeader; use crate::core::ics02_client::client_consensus::AnyConsensusState; use crate::core::ics02_client::client_state::{AnyClientState, ClientState}; use crate::core::ics02_client::client_type::ClientType; @@ -158,19 +154,12 @@ mod tests { use crate::test_utils::{get_dummy_account_id, Crypto}; use crate::timestamp::Timestamp; use crate::Height; - use beefy_client::primitives::{ - MmrLeaf, MmrUpdateProof, ParachainHeader, PartialMmrLeaf, SignatureWithAuthorityIndex, - SignedCommitment, + use beefy_client::NodesUtils; + use beefy_client::{ + runtime, + test_utils::{get_initial_client_state, get_mmr_update, get_parachain_headers}, }; - use beefy_client::{MerkleHasher, NodesUtils}; - use beefy_primitives::mmr::BeefyNextAuthoritySet; use codec::{Decode, Encode}; - use hex_literal::hex; - use sp_core::keccak_256; - use sp_core::H256; - use sp_runtime::traits::{BlakeTwo256, Convert}; - use std::collections::BTreeMap; - use subxt::rpc::ClientT; use subxt::rpc::{rpc_params, JsonValue, Subscription, SubscriptionClientT}; #[test] @@ -586,100 +575,6 @@ mod tests { } } - const PARA_ID: u32 = 2000; - - /// Construct the mmr update for beefy light client - async fn get_mmr_update( - client: &subxt::Client, - signed_commitment: beefy_primitives::SignedCommitment< - u32, - beefy_primitives::crypto::Signature, - >, - ) -> MmrUpdateProof { - let api = - client.clone().to_runtime_api::, - >>(); - let subxt_block_number: subxt::BlockNumber = - signed_commitment.commitment.block_number.into(); - let block_hash = client - .rpc() - .block_hash(Some(subxt_block_number)) - .await - .unwrap(); - - let current_authorities = api.storage().beefy().authorities(block_hash).await.unwrap(); - - // Current LeafIndex - let block_number = signed_commitment.commitment.block_number; - let leaf_index = (block_number - 1) as u64; - let leaf_proof: pallet_mmr_rpc::LeafProof = client - .rpc() - .client - .request("mmr_generateProof", rpc_params!(leaf_index, block_hash)) - .await - .unwrap(); - - let opaque_leaf: Vec = codec::Decode::decode(&mut &*leaf_proof.leaf.0).unwrap(); - let latest_leaf: MmrLeaf = - codec::Decode::decode(&mut &*opaque_leaf).unwrap(); - let mmr_proof: pallet_mmr_primitives::Proof = - codec::Decode::decode(&mut &*leaf_proof.proof.0).unwrap(); - - let authority_address_hashes = current_authorities - .into_iter() - .map(|x| { - let id: beefy_primitives::crypto::AuthorityId = - codec::Decode::decode(&mut &*x.encode()).unwrap(); - keccak_256(&beefy_mmr::BeefyEcdsaToEthereum::convert(id)) - }) - .collect::>(); - - let signatures = signed_commitment - .signatures - .into_iter() - .enumerate() - .map(|(index, x)| { - if let Some(sig) = x { - let mut temp = [0u8; 65]; - if sig.len() == 65 { - temp.copy_from_slice(&*sig.encode()); - Some(SignatureWithAuthorityIndex { - index: index as u32, - signature: temp, - }) - } else { - None - } - } else { - None - } - }) - .filter_map(|x| x) - .collect::>(); - - let signature_indices = signatures - .iter() - .map(|x| x.index as usize) - .collect::>(); - - let tree = - rs_merkle::MerkleTree::>::from_leaves(&authority_address_hashes); - - let authority_proof = tree.proof(&signature_indices); - - MmrUpdateProof { - signed_commitment: SignedCommitment { - commitment: signed_commitment.commitment.clone(), - signatures, - }, - latest_mmr_leaf: latest_leaf.clone(), - mmr_proof, - authority_proof: authority_proof.proof_hashes().to_vec(), - } - } - #[tokio::test] async fn test_continuous_update_of_beefy_client() { let client_id = ClientId::new(ClientType::Beefy, 0).unwrap(); @@ -695,38 +590,6 @@ mod tests { let signer = get_dummy_account_id(); - let beefy_client_state = BeefyClientState { - chain_id: Default::default(), - frozen_height: None, - latest_beefy_height: 0, - mmr_root_hash: Default::default(), - authority: BeefyNextAuthoritySet { - id: 0, - len: 5, - root: H256::from(hex!( - "baa93c7834125ee3120bac6e3342bd3f28611110ad21ab6075367abdffefeb09" - )), - }, - next_authority_set: BeefyNextAuthoritySet { - id: 1, - len: 5, - root: H256::from(hex!( - "baa93c7834125ee3120bac6e3342bd3f28611110ad21ab6075367abdffefeb09" - )), - }, - beefy_activation_block: 0, - }; - - let create_client = MsgCreateAnyClient { - client_state: AnyClientState::Beefy(beefy_client_state), - consensus_state: None, - signer: signer.clone(), - }; - - // Create the client - let res = dispatch::<_, Crypto>(&ctx, ClientMsg::CreateClient(create_client)).unwrap(); - ctx.store_client_result(res.result).unwrap(); - let url = std::env::var("NODE_ENDPOINT").unwrap_or("ws://127.0.0.1:9944".to_string()); let client = subxt::ClientBuilder::new() .set_url(url) @@ -745,6 +608,27 @@ mod tests { subxt::DefaultConfig, subxt::PolkadotExtrinsicParams<_>, >>(); + let mut count = 0; + let client_state = get_initial_client_state(Some(&api)).await; + let beefy_client_state = BeefyClientState { + chain_id: Default::default(), + frozen_height: None, + latest_beefy_height: 0, + mmr_root_hash: Default::default(), + authority: client_state.current_authorities, + next_authority_set: client_state.next_authorities, + beefy_activation_block: 0, + }; + + let create_client = MsgCreateAnyClient { + client_state: AnyClientState::Beefy(beefy_client_state), + consensus_state: None, + signer: signer.clone(), + }; + + // Create the client + let res = dispatch::<_, Crypto>(&ctx, ClientMsg::CreateClient(create_client)).unwrap(); + ctx.store_client_result(res.result).unwrap(); let mut subscription: Subscription = client .rpc() .client @@ -755,30 +639,6 @@ mod tests { ) .await .unwrap(); - let mut count = 0; - let client_state = ctx.client_state(&client_id).unwrap(); - // Before watching for commitments, we need to check that out initial validator set id is correct - let next_val_set = api - .storage() - .mmr_leaf() - .beefy_next_authorities(None) - .await - .unwrap(); - match client_state { - AnyClientState::Tendermint(_) => {} - AnyClientState::Beefy(mut client_state) => { - if next_val_set.id != client_state.next_authority_set.id { - // Update the Id - // Note that the authorities are not changing, only the id is changing in this development scenario - client_state.next_authority_set.id = next_val_set.id; - client_state.authority.id = next_val_set.id - 1; - ctx.store_client_state(client_id.clone(), AnyClientState::Beefy(client_state)) - .unwrap(); - } - } - AnyClientState::Mock(_) => {} - } - let mut latest_beefy_height = 0; while let Some(Ok(commitment)) = subscription.next().await { if count == 10 { @@ -790,6 +650,22 @@ mod tests { u32, beefy_primitives::crypto::Signature, > = codec::Decode::decode(&mut &*recv_commitment).unwrap(); + let client_state: BeefyClientState = match ctx.client_state(&client_id).unwrap() { + AnyClientState::Beefy(client_state) => client_state, + _ => panic!("unexpected client state"), + }; + match signed_commitment.commitment.validator_set_id { + id if id < client_state.authority.id => { + // If validator set id of signed commitment is less than current validator set id we have + // Then commitment is outdated and we skip it. + println!( + "Skipping outdated commitment \n Received signed commitmment with validator_set_id: {:?}\n Current authority set id: {:?}\n Next authority set id: {:?}\n", + signed_commitment.commitment.validator_set_id, client_state.authority.id, client_state.next_authority_set.id + ); + continue; + } + _ => {} + } println!( "Received signed commitmment for: {:?}", @@ -797,182 +673,13 @@ mod tests { ); let block_number = signed_commitment.commitment.block_number; - let subxt_block_number: subxt::BlockNumber = block_number.into(); - let block_hash = client - .rpc() - .block_hash(Some(subxt_block_number)) - .await - .unwrap(); - - let para_ids = api.storage().paras().parachains(block_hash).await.unwrap(); - let storage_prefix = frame_support::storage::storage_prefix(b"Paras", b"Heads"); - let mut para_header_keys = Vec::new(); - - for para_id in para_ids { - let encoded_para_id = para_id.encode(); - - let mut full_key = storage_prefix.clone().to_vec(); - full_key.extend_from_slice(sp_core::hashing::twox_64(&encoded_para_id).as_slice()); - full_key.extend_from_slice(&encoded_para_id); - para_header_keys.push(subxt::sp_core::storage::StorageKey(full_key)); - } - - let previous_finalized_block_number: subxt::BlockNumber = - (latest_beefy_height + 1).into(); - let previous_finalized_hash = client - .rpc() - .block_hash(Some(previous_finalized_block_number)) - .await - .unwrap() - .unwrap(); - - let change_set = client - .storage() - .query_storage(para_header_keys, previous_finalized_hash, block_hash) - .await - .unwrap(); - let mut finalized_blocks = BTreeMap::new(); - let mut leaf_indices = vec![]; - for changes in change_set { - let header = client - .rpc() - .header(Some(changes.block)) - .await - .unwrap() - .unwrap(); - - let mut heads = BTreeMap::new(); - - for (key, value) in changes.changes { - if let Some(storage_data) = value { - let key = key.0; - // Storage prefix and storage key hash take up the first 40 bytes - let para_id = u32::decode(&mut &key[40..]).unwrap(); - let head_data: runtime::api::runtime_types::polkadot_parachain::primitives::HeadData = Decode::decode(&mut &*storage_data.0).unwrap(); - heads.insert(para_id, head_data.0); - } - } - - if !heads.contains_key(&PARA_ID) { - continue; - } - finalized_blocks.insert(header.number as u64, heads); - leaf_indices.push(header.number - 1); - } - - let batch_proof: pallet_mmr_rpc::LeafBatchProof = client - .rpc() - .client - .request( - "mmr_generateBatchProof", - rpc_params!(leaf_indices.clone(), block_hash), - ) - .await - .unwrap(); - - let leaves: Vec> = Decode::decode(&mut &*batch_proof.leaves.to_vec()).unwrap(); - - let mut parachain_headers = vec![]; - for leaf_bytes in leaves { - let leaf: MmrLeaf = - Decode::decode(&mut &*leaf_bytes).unwrap(); - let leaf_block_number = (leaf.parent_number_and_hash.0 + 1) as u64; - let para_headers = finalized_blocks.get(&leaf_block_number).unwrap(); - - let mut index = None; - let mut parachain_leaves = vec![]; - // Values are already sorted by key which is the para_id - for (idx, (key, header)) in para_headers.iter().enumerate() { - let pair = (*key, header.clone()); - let leaf_hash = keccak_256(pair.encode().as_slice()); - parachain_leaves.push(leaf_hash); - if key == &PARA_ID { - index = Some(idx); - } - } - - let tree = - rs_merkle::MerkleTree::>::from_leaves(¶chain_leaves); - - let proof = if let Some(index) = index { - tree.proof(&[index]) - .proof_hashes() - .into_iter() - .map(|item| item.clone()) - .collect::>() - } else { - vec![] - }; - - let para_head = para_headers.get(&PARA_ID).unwrap().clone(); - let decoded_para_head = decode_parachain_header(para_head.clone()).unwrap(); - - let block_number = decoded_para_head.number; - let subxt_block_number: subxt::BlockNumber = block_number.into(); - let block_hash = para_client - .rpc() - .block_hash(Some(subxt_block_number)) - .await - .unwrap(); - - let block = para_client.rpc().block(block_hash).await.unwrap().unwrap(); - let extrinsics = block - .block - .extrinsics - .into_iter() - .map(|e| e.encode()) - .collect::>(); - let extrinsic_proof = { - if extrinsics.is_empty() { - ExtrinsicProof::default().encode() - } else { - let timestamp_ext = extrinsics[0].clone(); - - let mut db = sp_trie::MemoryDB::::default(); - - let root = { - let mut root = Default::default(); - let mut trie = >>::new( - &mut db, &mut root, - ); - - for (i, ext) in extrinsics.into_iter().enumerate() { - let key = codec::Compact(i as u32).encode(); - trie.insert(&key, &ext).unwrap(); - } - *trie.root() - }; - - let key = codec::Compact::(0u32).encode(); - let extrinsic_proof = generate_trie_proof::< - sp_trie::LayoutV0, - _, - _, - _, - >(&db, root, vec![&key]) - .unwrap(); - ExtrinsicProof(timestamp_ext, extrinsic_proof).encode() - } - }; - let header = ParachainHeader { - parachain_header: para_head, - partial_mmr_leaf: PartialMmrLeaf { - version: leaf.version, - parent_number_and_hash: leaf.parent_number_and_hash, - beefy_next_authority_set: leaf.beefy_next_authority_set.clone(), - }, - para_id: PARA_ID, - parachain_heads_proof: proof, - heads_leaf_index: index.unwrap() as u32, - heads_total_count: parachain_leaves.len() as u32, - extrinsic_proof, - }; - - parachain_headers.push(header); - } - - let batch_proof: pallet_mmr_primitives::BatchProof = - codec::Decode::decode(&mut batch_proof.proof.0.as_slice()).unwrap(); + let (parachain_headers, batch_proof) = get_parachain_headers( + &client, + ¶_client, + block_number, + client_state.latest_beefy_height, + ) + .await; let mmr_update = get_mmr_update(&client, signed_commitment.clone()).await; @@ -989,8 +696,8 @@ mod tests { parachain_heads_proof: header.parachain_heads_proof, heads_leaf_index: header.heads_leaf_index, heads_total_count: header.heads_total_count, - extrinsic_proof: ExtrinsicProof::decode(&mut &*header.extrinsic_proof) - .unwrap(), + extrinsic_proof: header.extrinsic_proof, + timestamp_extrinsic: header.timestamp_extrinsic, }) .collect(), mmr_proofs: batch_proof @@ -1043,7 +750,6 @@ mod tests { Err(e) => panic!("Unexpected error {:?}", e), } println!("Updated client successfully"); - latest_beefy_height = signed_commitment.commitment.block_number; count += 1; } } diff --git a/proto/src/IBC_GO_COMMIT b/proto/src/IBC_GO_COMMIT index 96598685c4..1748fb8fa1 100644 --- a/proto/src/IBC_GO_COMMIT +++ b/proto/src/IBC_GO_COMMIT @@ -1 +1 @@ -2923e0a7b627a0437ce6eea805120f1c31e3b525 +c5d058f389c690d6846c36cb90ceed09c51adc9f diff --git a/proto/src/lib.rs b/proto/src/lib.rs index aa059a219e..51bbab7419 100644 --- a/proto/src/lib.rs +++ b/proto/src/lib.rs @@ -181,11 +181,6 @@ pub mod ibc { } } pub mod lightclients { - pub mod localhost { - pub mod v1 { - include_proto!("ibc.lightclients.localhost.v1.rs"); - } - } pub mod solomachine { pub mod v1 { include_proto!("ibc.lightclients.solomachine.v1.rs"); diff --git a/proto/src/prost/ibc.core.channel.v1.rs b/proto/src/prost/ibc.core.channel.v1.rs index b1f56ded38..0d47ed7846 100644 --- a/proto/src/prost/ibc.core.channel.v1.rs +++ b/proto/src/prost/ibc.core.channel.v1.rs @@ -356,6 +356,8 @@ pub struct MsgRecvPacket { #[derive(::serde::Serialize, ::serde::Deserialize)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct MsgRecvPacketResponse { + #[prost(enumeration="ResponseResultType", tag="1")] + pub result: i32, } /// MsgTimeout receives timed-out packet #[derive(::serde::Serialize, ::serde::Deserialize)] @@ -376,6 +378,8 @@ pub struct MsgTimeout { #[derive(::serde::Serialize, ::serde::Deserialize)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct MsgTimeoutResponse { + #[prost(enumeration="ResponseResultType", tag="1")] + pub result: i32, } /// MsgTimeoutOnClose timed-out packet upon counterparty channel closure. #[derive(::serde::Serialize, ::serde::Deserialize)] @@ -398,6 +402,8 @@ pub struct MsgTimeoutOnClose { #[derive(::serde::Serialize, ::serde::Deserialize)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct MsgTimeoutOnCloseResponse { + #[prost(enumeration="ResponseResultType", tag="1")] + pub result: i32, } /// MsgAcknowledgement receives incoming IBC acknowledgement #[derive(::serde::Serialize, ::serde::Deserialize)] @@ -418,6 +424,20 @@ pub struct MsgAcknowledgement { #[derive(::serde::Serialize, ::serde::Deserialize)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct MsgAcknowledgementResponse { + #[prost(enumeration="ResponseResultType", tag="1")] + pub result: i32, +} +/// ResponseResultType defines the possible outcomes of the execution of a message +#[derive(::serde::Serialize, ::serde::Deserialize)] +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum ResponseResultType { + /// Default zero value enumeration + Unspecified = 0, + /// The message did not call the IBC application callbacks (because, for example, the packet had already been relayed) + Noop = 1, + /// The message was executed successfully + Success = 2, } /// Generated client implementations. #[cfg(feature = "client")] diff --git a/proto/src/prost/ibc.lightclients.beefy.v1.rs b/proto/src/prost/ibc.lightclients.beefy.v1.rs index 2cc15c0ffa..f1cb23c8ba 100644 --- a/proto/src/prost/ibc.lightclients.beefy.v1.rs +++ b/proto/src/prost/ibc.lightclients.beefy.v1.rs @@ -3,85 +3,85 @@ #[derive(Clone, PartialEq, ::prost::Message)] pub struct ClientState { /// Latest mmr root hash - #[prost(bytes = "vec", tag = "1")] + #[prost(bytes="vec", tag="1")] pub mmr_root_hash: ::prost::alloc::vec::Vec, /// block number for the latest mmr_root_hash - #[prost(uint32, tag = "2")] + #[prost(uint32, tag="2")] pub latest_beefy_height: u32, /// Block height when the client was frozen due to a misbehaviour - #[prost(uint64, tag = "3")] + #[prost(uint64, tag="3")] pub frozen_height: u64, /// block number that the beefy protocol was activated on the relay chain. - /// This shoould be the first block in the merkle-mountain-range tree. - #[prost(uint32, tag = "4")] + /// This should be the first block in the merkle-mountain-range tree. + #[prost(uint32, tag="4")] pub beefy_activation_block: u32, /// authorities for the current round - #[prost(message, optional, tag = "5")] + #[prost(message, optional, tag="5")] pub authority: ::core::option::Option, /// authorities for the next round - #[prost(message, optional, tag = "6")] + #[prost(message, optional, tag="6")] pub next_authority_set: ::core::option::Option, } /// Actual payload items #[derive(Clone, PartialEq, ::prost::Message)] pub struct PayloadItem { /// 2-byte payload id - #[prost(bytes = "vec", tag = "1")] + #[prost(bytes="vec", tag="1")] pub payload_id: ::prost::alloc::vec::Vec, /// arbitrary length payload data., eg mmr_root_hash - #[prost(bytes = "vec", tag = "2")] + #[prost(bytes="vec", tag="2")] pub payload_data: ::prost::alloc::vec::Vec, } /// Commitment message signed by beefy validators #[derive(Clone, PartialEq, ::prost::Message)] pub struct Commitment { /// array of payload items signed by Beefy validators - #[prost(message, repeated, tag = "1")] + #[prost(message, repeated, tag="1")] pub payload: ::prost::alloc::vec::Vec, /// block number for this commitment - #[prost(uint32, tag = "2")] + #[prost(uint32, tag="2")] pub block_numer: u32, /// validator set that signed this commitment - #[prost(uint64, tag = "3")] + #[prost(uint64, tag="3")] pub validator_set_id: u64, } /// Signature belonging to a single validator #[derive(Clone, PartialEq, ::prost::Message)] pub struct CommitmentSignature { /// actual signature bytes - #[prost(bytes = "vec", tag = "1")] + #[prost(bytes="vec", tag="1")] pub signature: ::prost::alloc::vec::Vec, /// authority leaf index in the merkle tree. - #[prost(uint32, tag = "2")] + #[prost(uint32, tag="2")] pub authority_index: u32, } /// signed commitment data #[derive(Clone, PartialEq, ::prost::Message)] pub struct SignedCommitment { /// commitment data being signed - #[prost(message, optional, tag = "1")] + #[prost(message, optional, tag="1")] pub commitment: ::core::option::Option, /// gotten from rpc subscription - #[prost(message, repeated, tag = "2")] + #[prost(message, repeated, tag="2")] pub signatures: ::prost::alloc::vec::Vec, } /// data needed to update the client #[derive(Clone, PartialEq, ::prost::Message)] pub struct MmrUpdateProof { /// the new mmr leaf SCALE encoded. - #[prost(message, optional, tag = "1")] + #[prost(message, optional, tag="1")] pub mmr_leaf: ::core::option::Option, /// leaf index for the mmr_leaf - #[prost(uint64, tag = "2")] + #[prost(uint64, tag="2")] pub mmr_leaf_index: u64, /// proof that this mmr_leaf index is valid. - #[prost(bytes = "vec", repeated, tag = "3")] + #[prost(bytes="vec", repeated, tag="3")] pub mmr_proof: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, /// signed commitment data - #[prost(message, optional, tag = "4")] + #[prost(message, optional, tag="4")] pub signed_commitment: ::core::option::Option, /// generated using full authority list from runtime - #[prost(bytes = "vec", repeated, tag = "5")] + #[prost(bytes="vec", repeated, tag="5")] pub authorities_proof: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, } /// ConsensusState defines the consensus state from Tendermint. @@ -89,10 +89,10 @@ pub struct MmrUpdateProof { pub struct ConsensusState { /// timestamp that corresponds to the block height in which the ConsensusState /// was stored. - #[prost(message, optional, tag = "1")] + #[prost(message, optional, tag="1")] pub timestamp: ::core::option::Option, /// packet commitment root - #[prost(bytes = "vec", tag = "2")] + #[prost(bytes="vec", tag="2")] pub root: ::prost::alloc::vec::Vec, /// proof of inclusion for this parachain header in the Mmr. #[prost(message, optional, tag = "4")] @@ -102,97 +102,99 @@ pub struct ConsensusState { /// that implements Misbehaviour interface expected by ICS-02 #[derive(Clone, PartialEq, ::prost::Message)] pub struct Misbehaviour { - #[prost(message, optional, tag = "2")] + #[prost(message, optional, tag="2")] pub header_1: ::core::option::Option
, - #[prost(message, optional, tag = "3")] + #[prost(message, optional, tag="3")] pub header_2: ::core::option::Option
, } /// Header contains the neccessary data to proove finality about IBC commitments #[derive(Clone, PartialEq, ::prost::Message)] pub struct Header { /// parachain headers needed for proofs and ConsensusState - #[prost(message, repeated, tag = "1")] + #[prost(message, repeated, tag="1")] pub parachain_headers: ::prost::alloc::vec::Vec, /// mmr proofs for the headers gotten from rpc "mmr_generateProofs" - #[prost(bytes = "vec", repeated, tag = "2")] + #[prost(bytes="vec", repeated, tag="2")] pub mmr_proofs: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, /// size of the mmr for the given proof - #[prost(uint64, tag = "3")] + #[prost(uint64, tag="3")] pub mmr_size: u64, /// optional payload to update the mmr root hash. - #[prost(message, optional, tag = "4")] + #[prost(message, optional, tag="4")] pub mmr_update_proof: ::core::option::Option, } /// data needed to prove parachain header inclusion in mmr. #[derive(Clone, PartialEq, ::prost::Message)] pub struct ParachainHeader { /// scale-encoded parachain header bytes - #[prost(bytes = "vec", tag = "1")] + #[prost(bytes="vec", tag="1")] pub parachain_header: ::prost::alloc::vec::Vec, /// reconstructed MmrLeaf, see beefy-go spec - #[prost(message, optional, tag = "2")] + #[prost(message, optional, tag="2")] pub mmr_leaf_partial: ::core::option::Option, /// para_id of the header. - #[prost(uint32, tag = "3")] + #[prost(uint32, tag="3")] pub para_id: u32, /// proofs for our header in the parachain heads root - #[prost(bytes = "vec", repeated, tag = "4")] + #[prost(bytes="vec", repeated, tag="4")] pub parachain_heads_proof: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, /// leaf index for parachain heads proof - #[prost(uint32, tag = "5")] + #[prost(uint32, tag="5")] pub heads_leaf_index: u32, /// total number of para heads in parachain_heads_root - #[prost(uint32, tag = "6")] + #[prost(uint32, tag="6")] pub heads_total_count: u32, /// trie merkle proof of inclusion in header.extrinsic_root - /// this already encodes the actual extrinsic - #[prost(bytes = "vec", tag = "7")] - pub extrinsic_proof: ::prost::alloc::vec::Vec, + #[prost(bytes="vec", repeated, tag="7")] + pub extrinsic_proof: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, + /// the actual timestamp extrinsic + #[prost(bytes="vec", tag="8")] + pub timestamp_extrinsic: ::prost::alloc::vec::Vec, } /// Partial data for MmrLeaf #[derive(Clone, PartialEq, ::prost::Message)] pub struct BeefyMmrLeafPartial { /// leaf version - #[prost(uint32, tag = "1")] + #[prost(uint32, tag="1")] pub version: u32, /// parent block for this leaf - #[prost(uint32, tag = "2")] + #[prost(uint32, tag="2")] pub parent_number: u32, /// parent hash for this leaf - #[prost(bytes = "vec", tag = "3")] + #[prost(bytes="vec", tag="3")] pub parent_hash: ::prost::alloc::vec::Vec, /// next authority set. - #[prost(message, optional, tag = "4")] + #[prost(message, optional, tag="4")] pub beefy_next_authority_set: ::core::option::Option, } /// Beefy Authority Info #[derive(Clone, PartialEq, ::prost::Message)] pub struct BeefyAuthoritySet { /// Id of the authority set, it should be strictly increasing - #[prost(uint64, tag = "1")] + #[prost(uint64, tag="1")] pub id: u64, /// size of the authority set - #[prost(uint32, tag = "2")] + #[prost(uint32, tag="2")] pub len: u32, /// merkle root of the sorted authority public keys. - #[prost(bytes = "vec", tag = "3")] + #[prost(bytes="vec", tag="3")] pub authority_root: ::prost::alloc::vec::Vec, } #[derive(Clone, PartialEq, ::prost::Message)] pub struct BeefyMmrLeaf { /// leaf version - #[prost(uint32, tag = "1")] + #[prost(uint32, tag="1")] pub version: u32, /// parent block for this leaf - #[prost(uint32, tag = "2")] + #[prost(uint32, tag="2")] pub parent_number: u32, /// parent hash for this leaf - #[prost(bytes = "vec", tag = "3")] + #[prost(bytes="vec", tag="3")] pub parent_hash: ::prost::alloc::vec::Vec, /// beefy next authority set. - #[prost(message, optional, tag = "4")] + #[prost(message, optional, tag="4")] pub beefy_next_authority_set: ::core::option::Option, /// merkle root hash of parachain heads included in the leaf. - #[prost(bytes = "vec", tag = "5")] + #[prost(bytes="vec", tag="5")] pub parachain_heads: ::prost::alloc::vec::Vec, } diff --git a/proto/src/prost/ibc.lightclients.localhost.v1.rs b/proto/src/prost/ibc.lightclients.localhost.v1.rs deleted file mode 100644 index a822ae70ca..0000000000 --- a/proto/src/prost/ibc.lightclients.localhost.v1.rs +++ /dev/null @@ -1,11 +0,0 @@ -/// ClientState defines a loopback (localhost) client. It requires (read-only) -/// access to keys outside the client prefix. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ClientState { - /// self chain ID - #[prost(string, tag="1")] - pub chain_id: ::prost::alloc::string::String, - /// self latest block height - #[prost(message, optional, tag="2")] - pub height: ::core::option::Option, -} From fd5c124739e92ff1f492704fb2302678069c3705 Mon Sep 17 00:00:00 2001 From: David Salami Date: Fri, 20 May 2022 13:51:45 +0100 Subject: [PATCH 22/96] increaese test length --- modules/src/core/ics02_client/handler/update_client.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/src/core/ics02_client/handler/update_client.rs b/modules/src/core/ics02_client/handler/update_client.rs index df823870c1..84dde23512 100644 --- a/modules/src/core/ics02_client/handler/update_client.rs +++ b/modules/src/core/ics02_client/handler/update_client.rs @@ -641,7 +641,7 @@ mod tests { .unwrap(); while let Some(Ok(commitment)) = subscription.next().await { - if count == 10 { + if count == 100 { break; } let recv_commitment: sp_core::Bytes = From 87bf18f8063a6a5beb0c40c831980833c6ce66a2 Mon Sep 17 00:00:00 2001 From: Blas Rodriguez Irizar Date: Thu, 19 May 2022 14:51:08 -0300 Subject: [PATCH 23/96] setup Near variant in --- modules/src/clients/mod.rs | 1 + modules/src/clients/near/client_def.rs | 0 modules/src/clients/near/client_state.rs | 0 modules/src/clients/near/error.rs | 0 modules/src/clients/near/mod.rs | 1 + modules/src/core/ics02_client/client_def.rs | 53 +++++++++++++++++-- modules/src/core/ics02_client/client_state.rs | 19 +++++++ modules/src/core/ics02_client/client_type.rs | 3 ++ modules/src/core/ics24_host/identifier.rs | 1 + 9 files changed, 75 insertions(+), 3 deletions(-) create mode 100644 modules/src/clients/near/client_def.rs create mode 100644 modules/src/clients/near/client_state.rs create mode 100644 modules/src/clients/near/error.rs create mode 100644 modules/src/clients/near/mod.rs diff --git a/modules/src/clients/mod.rs b/modules/src/clients/mod.rs index a4fe74ad6e..135b6533eb 100644 --- a/modules/src/clients/mod.rs +++ b/modules/src/clients/mod.rs @@ -3,3 +3,4 @@ pub mod crypto_ops; pub mod ics07_tendermint; pub mod ics11_beefy; +pub mod near; diff --git a/modules/src/clients/near/client_def.rs b/modules/src/clients/near/client_def.rs new file mode 100644 index 0000000000..e69de29bb2 diff --git a/modules/src/clients/near/client_state.rs b/modules/src/clients/near/client_state.rs new file mode 100644 index 0000000000..e69de29bb2 diff --git a/modules/src/clients/near/error.rs b/modules/src/clients/near/error.rs new file mode 100644 index 0000000000..e69de29bb2 diff --git a/modules/src/clients/near/mod.rs b/modules/src/clients/near/mod.rs new file mode 100644 index 0000000000..8b13789179 --- /dev/null +++ b/modules/src/clients/near/mod.rs @@ -0,0 +1 @@ + diff --git a/modules/src/core/ics02_client/client_def.rs b/modules/src/core/ics02_client/client_def.rs index c9a64ec04a..2ac6b24357 100644 --- a/modules/src/core/ics02_client/client_def.rs +++ b/modules/src/core/ics02_client/client_def.rs @@ -211,6 +211,7 @@ pub trait ClientDef: Clone { pub enum AnyClient { Tendermint(TendermintClient), Beefy(BeefyClient), + Near(BeefyClient), #[cfg(any(test, feature = "mocks"))] Mock(MockClient), } @@ -220,6 +221,7 @@ impl AnyClient { match client_type { ClientType::Tendermint => Self::Tendermint(TendermintClient::::default()), ClientType::Beefy => Self::Beefy(BeefyClient::::default()), + ClientType::Near => Self::Near(BeefyClient::::default()), #[cfg(any(test, feature = "mocks"))] ClientType::Mock => Self::Mock(MockClient::::default()), } @@ -262,6 +264,17 @@ impl ClientDef for Any client.verify_header(ctx, client_id, client_state, header) } + Self::Near(_) => { + // let (client_state, header) = downcast!( + // client_state => AnyClientState::Beefy, + // header => AnyHeader::Beefy, + // ) + // .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Beefy))?; + + // client.verify_header(ctx, client_id, client_state, header) + todo!() + } + #[cfg(any(test, feature = "mocks"))] Self::Mock(client) => { let (client_state, header) = downcast!( @@ -309,6 +322,9 @@ impl ClientDef for Any Ok((AnyClientState::Beefy(new_state), new_consensus)) } + Self::Near(_) => { + todo!() + } #[cfg(any(test, feature = "mocks"))] Self::Mock(client) => { @@ -351,6 +367,9 @@ impl ClientDef for Any let client_state = client.update_state_on_misbehaviour(client_state, header)?; Ok(Self::ClientState::Beefy(client_state)) } + AnyClient::Near(_) => { + todo!() + } #[cfg(any(test, feature = "mocks"))] AnyClient::Mock(client) => { let (client_state, header) = downcast!( @@ -391,6 +410,9 @@ impl ClientDef for Any client.check_for_misbehaviour(ctx, client_id, client_state, header) } + AnyClient::Near(_) => { + todo!() + } #[cfg(any(test, feature = "mocks"))] AnyClient::Mock(client) => { let (client_state, header) = downcast!( @@ -446,6 +468,10 @@ impl ClientDef for Any Ok((AnyClientState::Beefy(new_state), new_consensus)) } + Self::Near(_) => { + todo!() + } + #[cfg(any(test, feature = "mocks"))] Self::Mock(client) => { let (client_state, consensus_state) = downcast!( @@ -516,6 +542,9 @@ impl ClientDef for Any expected_consensus_state, ) } + Self::Near(_) => { + todo!() + } #[cfg(any(test, feature = "mocks"))] Self::Mock(client) => { let client_state = downcast!( @@ -583,6 +612,9 @@ impl ClientDef for Any expected_connection_end, ) } + Self::Near(_) => { + todo!() + } #[cfg(any(test, feature = "mocks"))] Self::Mock(client) => { let client_state = downcast!(client_state => AnyClientState::Mock) @@ -652,6 +684,9 @@ impl ClientDef for Any expected_channel_end, ) } + Self::Near(_) => { + todo!() + } #[cfg(any(test, feature = "mocks"))] Self::Mock(client) => { @@ -719,6 +754,9 @@ impl ClientDef for Any client_state_on_counterparty, ) } + Self::Near(_) => { + todo!() + } #[cfg(any(test, feature = "mocks"))] Self::Mock(client) => { let client_state = downcast!( @@ -796,7 +834,9 @@ impl ClientDef for Any commitment, ) } - + Self::Near(_) => { + todo!() + } #[cfg(any(test, feature = "mocks"))] Self::Mock(client) => { let client_state = downcast!( @@ -877,6 +917,9 @@ impl ClientDef for Any ack_commitment, ) } + Self::Near(_) => { + todo!() + } #[cfg(any(test, feature = "mocks"))] Self::Mock(client) => { let client_state = downcast!( @@ -953,7 +996,9 @@ impl ClientDef for Any sequence, ) } - + Self::Near(_) => { + todo!() + } #[cfg(any(test, feature = "mocks"))] Self::Mock(client) => { let client_state = downcast!( @@ -1030,7 +1075,9 @@ impl ClientDef for Any sequence, ) } - + Self::Near(_) => { + todo!() + } #[cfg(any(test, feature = "mocks"))] Self::Mock(client) => { let client_state = downcast!( diff --git a/modules/src/core/ics02_client/client_state.rs b/modules/src/core/ics02_client/client_state.rs index 735608e662..3164ace075 100644 --- a/modules/src/core/ics02_client/client_state.rs +++ b/modules/src/core/ics02_client/client_state.rs @@ -102,6 +102,8 @@ pub enum AnyClientState { Tendermint(client_state::ClientState), #[serde(skip)] Beefy(beefy_client_state::ClientState), + #[serde(skip)] + Near(beefy_client_state::ClientState), #[cfg(any(test, feature = "mocks"))] Mock(MockClientState), } @@ -111,6 +113,7 @@ impl AnyClientState { match self { Self::Tendermint(tm_state) => tm_state.latest_height(), Self::Beefy(bf_state) => bf_state.latest_height(), + Self::Near(_) => todo!(), #[cfg(any(test, feature = "mocks"))] Self::Mock(mock_state) => mock_state.latest_height(), } @@ -120,6 +123,7 @@ impl AnyClientState { match self { Self::Tendermint(tm_state) => tm_state.frozen_height(), Self::Beefy(bf_state) => bf_state.frozen_height(), + Self::Near(_) => todo!(), #[cfg(any(test, feature = "mocks"))] Self::Mock(mock_state) => mock_state.frozen_height(), } @@ -129,6 +133,7 @@ impl AnyClientState { match self { AnyClientState::Tendermint(state) => Some(state.trust_level), AnyClientState::Beefy(_) => None, + Self::Near(_) => todo!(), #[cfg(any(test, feature = "mocks"))] AnyClientState::Mock(_) => None, } @@ -138,6 +143,7 @@ impl AnyClientState { match self { AnyClientState::Tendermint(state) => state.max_clock_drift, AnyClientState::Beefy(_) => Duration::new(0, 0), + Self::Near(_) => todo!(), #[cfg(any(test, feature = "mocks"))] AnyClientState::Mock(_) => Duration::new(0, 0), } @@ -147,6 +153,7 @@ impl AnyClientState { match self { Self::Tendermint(state) => state.client_type(), Self::Beefy(state) => state.client_type(), + Self::Near(_) => todo!(), #[cfg(any(test, feature = "mocks"))] Self::Mock(state) => state.client_type(), } @@ -156,6 +163,7 @@ impl AnyClientState { match self { AnyClientState::Tendermint(tm_state) => tm_state.refresh_time(), AnyClientState::Beefy(_) => None, + Self::Near(_) => None, #[cfg(any(test, feature = "mocks"))] AnyClientState::Mock(mock_state) => mock_state.refresh_time(), } @@ -165,6 +173,7 @@ impl AnyClientState { match self { AnyClientState::Tendermint(tm_state) => tm_state.expired(elapsed_since_latest), AnyClientState::Beefy(_) => false, + Self::Near(_) => false, #[cfg(any(test, feature = "mocks"))] AnyClientState::Mock(mock_state) => mock_state.expired(elapsed_since_latest), } @@ -215,6 +224,12 @@ impl From for Any { .encode_vec() .expect("encoding to `Any` from `AnyClientState::Tendermint`"), }, + AnyClientState::Near(_) => Any { + type_url: BEEFY_CLIENT_STATE_TYPE_URL.to_string(), + value: value + .encode_vec() + .expect("encoding to `Any` from `AnyClientState::Near`"), + }, #[cfg(any(test, feature = "mocks"))] AnyClientState::Mock(value) => Any { type_url: MOCK_CLIENT_STATE_TYPE_URL.to_string(), @@ -233,6 +248,7 @@ impl ClientState for AnyClientState { match self { AnyClientState::Tendermint(tm_state) => tm_state.chain_id(), AnyClientState::Beefy(bf_state) => bf_state.chain_id(), + AnyClientState::Near(_) => todo!(), #[cfg(any(test, feature = "mocks"))] AnyClientState::Mock(mock_state) => mock_state.chain_id(), } @@ -263,6 +279,9 @@ impl ClientState for AnyClientState { AnyClientState::Beefy(bf_state) => bf_state .upgrade(upgrade_height, upgrade_options.into_beefy(), chain_id) .wrap_any(), + AnyClientState::Near(near_state) => near_state + .upgrade(upgrade_height, upgrade_options.into_beefy(), chain_id) + .wrap_any(), #[cfg(any(test, feature = "mocks"))] AnyClientState::Mock(mock_state) => { mock_state.upgrade(upgrade_height, (), chain_id).wrap_any() diff --git a/modules/src/core/ics02_client/client_type.rs b/modules/src/core/ics02_client/client_type.rs index 1fb05edf14..e7ca17eef3 100644 --- a/modules/src/core/ics02_client/client_type.rs +++ b/modules/src/core/ics02_client/client_type.rs @@ -9,6 +9,7 @@ use super::error::Error; pub enum ClientType { Tendermint = 1, Beefy = 2, + Near = 3, #[cfg(any(test, feature = "mocks"))] Mock = 9999, } @@ -16,6 +17,7 @@ pub enum ClientType { impl ClientType { const TENDERMINT_STR: &'static str = "07-tendermint"; const BEEFY_STR: &'static str = "11-beefy"; + const NEAR_STR: &'static str = "11-beefy"; #[cfg_attr(not(test), allow(dead_code))] const MOCK_STR: &'static str = "9999-mock"; @@ -25,6 +27,7 @@ impl ClientType { match self { Self::Tendermint => Self::TENDERMINT_STR, Self::Beefy => Self::BEEFY_STR, + Self::Near => Self::NEAR_STR, #[cfg(any(test, feature = "mocks"))] Self::Mock => Self::MOCK_STR, } diff --git a/modules/src/core/ics24_host/identifier.rs b/modules/src/core/ics24_host/identifier.rs index efe9a8f613..db69345c61 100644 --- a/modules/src/core/ics24_host/identifier.rs +++ b/modules/src/core/ics24_host/identifier.rs @@ -172,6 +172,7 @@ impl ClientId { match client_type { ClientType::Tendermint => ClientType::Tendermint.as_str(), ClientType::Beefy => ClientType::Beefy.as_str(), + ClientType::Near => ClientType::Near.as_str(), #[cfg(any(test, feature = "mocks"))] ClientType::Mock => ClientType::Mock.as_str(), } From 3cc36f8733e4be9e6047ecc9b102de95b2510613 Mon Sep 17 00:00:00 2001 From: Blas Rodriguez Irizar Date: Mon, 23 May 2022 09:59:24 +0200 Subject: [PATCH 24/96] near: define client_def (w/ todos!()) --- modules/src/clients/ics12_near/client_def.rs | 201 ++++++++++++++++++ .../{near => ics12_near}/client_state.rs | 0 .../src/clients/{near => ics12_near}/error.rs | 0 modules/src/clients/ics12_near/mod.rs | 3 + modules/src/clients/mod.rs | 2 +- modules/src/clients/near/client_def.rs | 0 modules/src/clients/near/mod.rs | 1 - 7 files changed, 205 insertions(+), 2 deletions(-) create mode 100644 modules/src/clients/ics12_near/client_def.rs rename modules/src/clients/{near => ics12_near}/client_state.rs (100%) rename modules/src/clients/{near => ics12_near}/error.rs (100%) create mode 100644 modules/src/clients/ics12_near/mod.rs delete mode 100644 modules/src/clients/near/client_def.rs delete mode 100644 modules/src/clients/near/mod.rs diff --git a/modules/src/clients/ics12_near/client_def.rs b/modules/src/clients/ics12_near/client_def.rs new file mode 100644 index 0000000000..75e858c822 --- /dev/null +++ b/modules/src/clients/ics12_near/client_def.rs @@ -0,0 +1,201 @@ +use crate::core::ics02_client::client_def::ClientDef; + +pub struct NearClient {} + +impl ClientDef for NearClient { + type Header; + + type ClientState; + + type ConsensusState; + + type Crypto; + + fn verify_header( + &self, + ctx: &dyn crate::core::ics26_routing::context::LightClientContext, + client_id: crate::core::ics24_host::identifier::ClientId, + client_state: Self::ClientState, + header: Self::Header, + ) -> Result<(), Error> { + todo!() + } + + fn update_state( + &self, + ctx: &dyn crate::core::ics26_routing::context::LightClientContext, + client_id: crate::core::ics24_host::identifier::ClientId, + client_state: Self::ClientState, + header: Self::Header, + ) -> Result< + ( + Self::ClientState, + crate::core::ics02_client::client_def::ConsensusUpdateResult, + ), + Error, + > { + todo!() + } + + fn update_state_on_misbehaviour( + &self, + client_state: Self::ClientState, + header: Self::Header, + ) -> Result { + todo!() + } + + fn check_for_misbehaviour( + &self, + ctx: &dyn crate::core::ics26_routing::context::LightClientContext, + client_id: crate::core::ics24_host::identifier::ClientId, + client_state: Self::ClientState, + header: Self::Header, + ) -> Result { + todo!() + } + + fn verify_upgrade_and_update_state( + &self, + client_state: &Self::ClientState, + consensus_state: &Self::ConsensusState, + proof_upgrade_client: Vec, + proof_upgrade_consensus_state: Vec, + ) -> Result< + ( + Self::ClientState, + crate::core::ics02_client::client_def::ConsensusUpdateResult, + ), + Error, + > { + todo!() + } + + fn verify_client_consensus_state( + &self, + ctx: &dyn crate::core::ics26_routing::context::LightClientContext, + client_state: &Self::ClientState, + height: crate::Height, + prefix: &crate::core::ics23_commitment::commitment::CommitmentPrefix, + proof: &crate::core::ics23_commitment::commitment::CommitmentProofBytes, + root: &crate::core::ics23_commitment::commitment::CommitmentRoot, + client_id: &crate::core::ics24_host::identifier::ClientId, + consensus_height: crate::Height, + expected_consensus_state: &crate::core::ics02_client::client_consensus::AnyConsensusState< + Self::Crypto, + >, + ) -> Result<(), Error> { + todo!() + } + + fn verify_connection_state( + &self, + ctx: &dyn crate::core::ics26_routing::context::LightClientContext, + client_id: &crate::core::ics24_host::identifier::ClientId, + client_state: &Self::ClientState, + height: crate::Height, + prefix: &crate::core::ics23_commitment::commitment::CommitmentPrefix, + proof: &crate::core::ics23_commitment::commitment::CommitmentProofBytes, + root: &crate::core::ics23_commitment::commitment::CommitmentRoot, + connection_id: &crate::core::ics24_host::identifier::ConnectionId, + expected_connection_end: &crate::core::ics03_connection::connection::ConnectionEnd, + ) -> Result<(), Error> { + todo!() + } + + fn verify_channel_state( + &self, + ctx: &dyn crate::core::ics26_routing::context::LightClientContext, + client_id: &crate::core::ics24_host::identifier::ClientId, + client_state: &Self::ClientState, + height: crate::Height, + prefix: &crate::core::ics23_commitment::commitment::CommitmentPrefix, + proof: &crate::core::ics23_commitment::commitment::CommitmentProofBytes, + root: &crate::core::ics23_commitment::commitment::CommitmentRoot, + port_id: &crate::core::ics24_host::identifier::PortId, + channel_id: &crate::core::ics24_host::identifier::ChannelId, + expected_channel_end: &crate::core::ics04_channel::channel::ChannelEnd, + ) -> Result<(), Error> { + todo!() + } + + fn verify_client_full_state( + &self, + ctx: &dyn crate::core::ics26_routing::context::LightClientContext, + client_state: &Self::ClientState, + height: crate::Height, + prefix: &crate::core::ics23_commitment::commitment::CommitmentPrefix, + proof: &crate::core::ics23_commitment::commitment::CommitmentProofBytes, + root: &crate::core::ics23_commitment::commitment::CommitmentRoot, + client_id: &crate::core::ics24_host::identifier::ClientId, + expected_client_state: &crate::core::ics02_client::client_state::AnyClientState, + ) -> Result<(), Error> { + todo!() + } + + fn verify_packet_data( + &self, + ctx: &dyn crate::core::ics26_routing::context::LightClientContext, + client_id: &crate::core::ics24_host::identifier::ClientId, + client_state: &Self::ClientState, + height: crate::Height, + connection_end: &crate::core::ics03_connection::connection::ConnectionEnd, + proof: &crate::core::ics23_commitment::commitment::CommitmentProofBytes, + root: &crate::core::ics23_commitment::commitment::CommitmentRoot, + port_id: &crate::core::ics24_host::identifier::PortId, + channel_id: &crate::core::ics24_host::identifier::ChannelId, + sequence: crate::core::ics04_channel::packet::Sequence, + commitment: crate::core::ics04_channel::commitment::PacketCommitment, + ) -> Result<(), Error> { + todo!() + } + + fn verify_packet_acknowledgement( + &self, + ctx: &dyn crate::core::ics26_routing::context::LightClientContext, + client_id: &crate::core::ics24_host::identifier::ClientId, + client_state: &Self::ClientState, + height: crate::Height, + connection_end: &crate::core::ics03_connection::connection::ConnectionEnd, + proof: &crate::core::ics23_commitment::commitment::CommitmentProofBytes, + root: &crate::core::ics23_commitment::commitment::CommitmentRoot, + port_id: &crate::core::ics24_host::identifier::PortId, + channel_id: &crate::core::ics24_host::identifier::ChannelId, + sequence: crate::core::ics04_channel::packet::Sequence, + ack: crate::core::ics04_channel::commitment::AcknowledgementCommitment, + ) -> Result<(), Error> { + todo!() + } + + fn verify_next_sequence_recv( + &self, + ctx: &dyn crate::core::ics26_routing::context::LightClientContext, + client_id: &crate::core::ics24_host::identifier::ClientId, + client_state: &Self::ClientState, + height: crate::Height, + connection_end: &crate::core::ics03_connection::connection::ConnectionEnd, + proof: &crate::core::ics23_commitment::commitment::CommitmentProofBytes, + root: &crate::core::ics23_commitment::commitment::CommitmentRoot, + port_id: &crate::core::ics24_host::identifier::PortId, + channel_id: &crate::core::ics24_host::identifier::ChannelId, + sequence: crate::core::ics04_channel::packet::Sequence, + ) -> Result<(), Error> { + todo!() + } + + fn verify_packet_receipt_absence( + &self, + ctx: &dyn crate::core::ics26_routing::context::LightClientContext, + client_id: &crate::core::ics24_host::identifier::ClientId, + client_state: &Self::ClientState, + height: crate::Height, + connection_end: &crate::core::ics03_connection::connection::ConnectionEnd, + proof: &crate::core::ics23_commitment::commitment::CommitmentProofBytes, + root: &crate::core::ics23_commitment::commitment::CommitmentRoot, + port_id: &crate::core::ics24_host::identifier::PortId, + channel_id: &crate::core::ics24_host::identifier::ChannelId, + sequence: crate::core::ics04_channel::packet::Sequence, + ) -> Result<(), Error> { + todo!() + } +} diff --git a/modules/src/clients/near/client_state.rs b/modules/src/clients/ics12_near/client_state.rs similarity index 100% rename from modules/src/clients/near/client_state.rs rename to modules/src/clients/ics12_near/client_state.rs diff --git a/modules/src/clients/near/error.rs b/modules/src/clients/ics12_near/error.rs similarity index 100% rename from modules/src/clients/near/error.rs rename to modules/src/clients/ics12_near/error.rs diff --git a/modules/src/clients/ics12_near/mod.rs b/modules/src/clients/ics12_near/mod.rs new file mode 100644 index 0000000000..3ce69ea9b2 --- /dev/null +++ b/modules/src/clients/ics12_near/mod.rs @@ -0,0 +1,3 @@ +pub mod client_def; +pub mod client_state; +pub mod error; diff --git a/modules/src/clients/mod.rs b/modules/src/clients/mod.rs index 135b6533eb..16cb5a606e 100644 --- a/modules/src/clients/mod.rs +++ b/modules/src/clients/mod.rs @@ -3,4 +3,4 @@ pub mod crypto_ops; pub mod ics07_tendermint; pub mod ics11_beefy; -pub mod near; +pub mod ics12_near; diff --git a/modules/src/clients/near/client_def.rs b/modules/src/clients/near/client_def.rs deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/modules/src/clients/near/mod.rs b/modules/src/clients/near/mod.rs deleted file mode 100644 index 8b13789179..0000000000 --- a/modules/src/clients/near/mod.rs +++ /dev/null @@ -1 +0,0 @@ - From 5c51886c4068aca602554a65764cccf08521e05f Mon Sep 17 00:00:00 2001 From: Blas Rodriguez Irizar Date: Mon, 23 May 2022 10:04:43 +0200 Subject: [PATCH 25/96] define/mock header --- modules/src/clients/ics12_near/client_def.rs | 5 ++++- modules/src/clients/ics12_near/header.rs | 14 ++++++++++++++ modules/src/clients/ics12_near/mod.rs | 7 ++++--- 3 files changed, 22 insertions(+), 4 deletions(-) create mode 100644 modules/src/clients/ics12_near/header.rs diff --git a/modules/src/clients/ics12_near/client_def.rs b/modules/src/clients/ics12_near/client_def.rs index 75e858c822..1a96360334 100644 --- a/modules/src/clients/ics12_near/client_def.rs +++ b/modules/src/clients/ics12_near/client_def.rs @@ -1,9 +1,12 @@ use crate::core::ics02_client::client_def::ClientDef; +use crate::clients::ics12_near::header::NearHeader; + +#[derive(Debug, Clone)] pub struct NearClient {} impl ClientDef for NearClient { - type Header; + type Header = NearHeader; type ClientState; diff --git a/modules/src/clients/ics12_near/header.rs b/modules/src/clients/ics12_near/header.rs new file mode 100644 index 0000000000..b4673fb0bb --- /dev/null +++ b/modules/src/clients/ics12_near/header.rs @@ -0,0 +1,14 @@ +use crate::core::ics02_client::header::Header; + +#[derive(Debug, Clone)] +pub struct NearHeader {} + +impl Header for NearHeader { + fn client_type(&self) -> crate::core::ics02_client::client_type::ClientType { + todo!() + } + + fn wrap_any(self) -> crate::core::ics02_client::header::AnyHeader { + todo!() + } +} diff --git a/modules/src/clients/ics12_near/mod.rs b/modules/src/clients/ics12_near/mod.rs index 3ce69ea9b2..82a2e8f112 100644 --- a/modules/src/clients/ics12_near/mod.rs +++ b/modules/src/clients/ics12_near/mod.rs @@ -1,3 +1,4 @@ -pub mod client_def; -pub mod client_state; -pub mod error; +mod client_def; +mod client_state; +mod error; +mod header; From c94226928b289b82aa29a6b4f49d7ee4579e4ec3 Mon Sep 17 00:00:00 2001 From: Blas Rodriguez Irizar Date: Mon, 23 May 2022 16:10:00 +0200 Subject: [PATCH 26/96] define error and crypto ops --- modules/src/clients/ics12_near/client_def.rs | 11 +++-- .../src/clients/ics12_near/client_state.rs | 43 +++++++++++++++++++ .../src/clients/ics12_near/consensus_state.rs | 25 +++++++++++ modules/src/clients/ics12_near/crypto_ops.rs | 22 ++++++++++ modules/src/clients/ics12_near/error.rs | 6 +++ modules/src/clients/ics12_near/mod.rs | 2 + 6 files changed, 106 insertions(+), 3 deletions(-) create mode 100644 modules/src/clients/ics12_near/consensus_state.rs create mode 100644 modules/src/clients/ics12_near/crypto_ops.rs diff --git a/modules/src/clients/ics12_near/client_def.rs b/modules/src/clients/ics12_near/client_def.rs index 1a96360334..8e9a795764 100644 --- a/modules/src/clients/ics12_near/client_def.rs +++ b/modules/src/clients/ics12_near/client_def.rs @@ -2,17 +2,22 @@ use crate::core::ics02_client::client_def::ClientDef; use crate::clients::ics12_near::header::NearHeader; +use super::client_state::NearClientState; +use super::consensus_state::NearConsensusState; +use super::crypto_ops::NearCryptoOps; +use super::error::Error; + #[derive(Debug, Clone)] pub struct NearClient {} impl ClientDef for NearClient { type Header = NearHeader; - type ClientState; + type ClientState = NearClientState; - type ConsensusState; + type ConsensusState = NearConsensusState; - type Crypto; + type Crypto = NearCryptoOps; fn verify_header( &self, diff --git a/modules/src/clients/ics12_near/client_state.rs b/modules/src/clients/ics12_near/client_state.rs index e69de29bb2..0559d1641f 100644 --- a/modules/src/clients/ics12_near/client_state.rs +++ b/modules/src/clients/ics12_near/client_state.rs @@ -0,0 +1,43 @@ +use crate::core::ics02_client::client_state::ClientState; + +#[derive(Debug, Clone)] +pub struct NearClientState {} + +struct NearUpgradeOptions {} + +impl ClientState for NearClientState { + fn is_frozen(&self) -> bool { + self.frozen_height().is_some() + } + + type UpgradeOptions = NearUpgradeOptions; + + fn chain_id(&self) -> crate::core::ics24_host::identifier::ChainId { + todo!() + } + + fn client_type(&self) -> crate::core::ics02_client::client_type::ClientType { + todo!() + } + + fn latest_height(&self) -> crate::Height { + todo!() + } + + fn frozen_height(&self) -> Option { + todo!() + } + + fn upgrade( + self, + upgrade_height: crate::Height, + upgrade_options: Self::UpgradeOptions, + chain_id: crate::core::ics24_host::identifier::ChainId, + ) -> Self { + todo!() + } + + fn wrap_any(self) -> crate::core::ics02_client::client_state::AnyClientState { + todo!() + } +} diff --git a/modules/src/clients/ics12_near/consensus_state.rs b/modules/src/clients/ics12_near/consensus_state.rs new file mode 100644 index 0000000000..5474b1b47a --- /dev/null +++ b/modules/src/clients/ics12_near/consensus_state.rs @@ -0,0 +1,25 @@ +use crate::core::ics02_client::client_consensus::ConsensusState; +use crate::error::Error; + +use super::crypto_ops::NearCryptoOps; +pub struct NearConsensusState {} + +impl ConsensusState for NearConsensusState { + type Error = Error; + + type Crypto = NearCryptoOps; + + fn client_type(&self) -> crate::core::ics02_client::client_type::ClientType { + todo!() + } + + fn root(&self) -> &crate::core::ics23_commitment::commitment::CommitmentRoot { + todo!() + } + + fn wrap_any( + self, + ) -> crate::core::ics02_client::client_consensus::AnyConsensusState { + todo!() + } +} diff --git a/modules/src/clients/ics12_near/crypto_ops.rs b/modules/src/clients/ics12_near/crypto_ops.rs new file mode 100644 index 0000000000..cbdc01a341 --- /dev/null +++ b/modules/src/clients/ics12_near/crypto_ops.rs @@ -0,0 +1,22 @@ +use crate::clients::crypto_ops::crypto::CryptoOps; + +pub struct NearCryptoOps; + +impl CryptoOps for NearCryptoOps { + fn verify_membership_trie_proof( + root: &sp_core::H256, + proof: &[Vec], + key: &[u8], + value: &[u8], + ) -> Result<(), Error> { + todo!() + } + + fn verify_non_membership_trie_proof( + root: &sp_core::H256, + proof: &[Vec], + key: &[u8], + ) -> Result<(), Error> { + todo!() + } +} diff --git a/modules/src/clients/ics12_near/error.rs b/modules/src/clients/ics12_near/error.rs index e69de29bb2..b13ef54f5c 100644 --- a/modules/src/clients/ics12_near/error.rs +++ b/modules/src/clients/ics12_near/error.rs @@ -0,0 +1,6 @@ +use flex_error::define_error; + +define_error! { + #[derive(Debug, PartialEq, Eq)] + Error {} +} diff --git a/modules/src/clients/ics12_near/mod.rs b/modules/src/clients/ics12_near/mod.rs index 82a2e8f112..254036918e 100644 --- a/modules/src/clients/ics12_near/mod.rs +++ b/modules/src/clients/ics12_near/mod.rs @@ -1,4 +1,6 @@ mod client_def; mod client_state; +mod consensus_state; +mod crypto_ops; mod error; mod header; From ace17d450ffc12e8c49f66ec6ca3318771a4b107 Mon Sep 17 00:00:00 2001 From: Blas Rodriguez Irizar Date: Mon, 23 May 2022 20:35:25 +0200 Subject: [PATCH 27/96] some updates --- Cargo.lock | 45 ++++ modules/Cargo.toml | 4 +- modules/src/clients/ics12_near/client_def.rs | 3 +- .../src/clients/ics12_near/consensus_state.rs | 22 +- modules/src/clients/ics12_near/crypto_ops.rs | 7 +- modules/src/clients/ics12_near/header.rs | 19 +- modules/src/clients/ics12_near/mod.rs | 5 +- modules/src/clients/ics12_near/types.rs | 230 ++++++++++++++++++ modules/src/core/ics02_client/header.rs | 14 +- 9 files changed, 326 insertions(+), 23 deletions(-) create mode 100644 modules/src/clients/ics12_near/types.rs diff --git a/Cargo.lock b/Cargo.lock index b1fab2b4e3..7876341a32 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -431,6 +431,51 @@ dependencies = [ "memchr", ] +[[package]] +name = "borsh" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15bf3650200d8bffa99015595e10f1fbd17de07abbc25bb067da79e769939bfa" +dependencies = [ + "borsh-derive", + "hashbrown 0.11.2", +] + +[[package]] +name = "borsh-derive" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6441c552f230375d18e3cc377677914d2ca2b0d36e52129fe15450a2dce46775" +dependencies = [ + "borsh-derive-internal", + "borsh-schema-derive-internal", + "proc-macro-crate 0.1.5", + "proc-macro2", + "syn", +] + +[[package]] +name = "borsh-derive-internal" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5449c28a7b352f2d1e592a8a28bf139bc71afb0764a14f3c02500935d8c44065" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "borsh-schema-derive-internal" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cdbd5696d8bfa21d53d9fe39a714a18538bad11492a42d066dbbc395fb1951c0" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "bstr" version = "0.2.17" diff --git a/modules/Cargo.toml b/modules/Cargo.toml index 516c20b9f0..5505ea0c26 100644 --- a/modules/Cargo.toml +++ b/modules/Cargo.toml @@ -39,7 +39,8 @@ mocks = ["tendermint-testgen", "clock", "std", "sp-io", "sp-io/std"] [dependencies] # Proto definitions for all IBC-related interfaces, e.g., connections or channels. -ibc-proto = { version = "0.18.0", path = "../proto", default-features = false } +borsh = { version = "0.9.3", default-features = false } +ibc-proto = { version = "0.17.1", path = "../proto", default-features = false } ics23 = { version = "=0.8.0-alpha", default-features = false } time = { version = "0.3", default-features = false } serde_derive = { version = "1.0.104", default-features = false } @@ -62,6 +63,7 @@ sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot- pallet-mmr-primitives = { package = "sp-mmr-primitives", git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22", default-features = false } beefy-primitives = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22", default-features = false } codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22", default-features = false } sp-trie = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22", default-features = false } sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22", optional = true } diff --git a/modules/src/clients/ics12_near/client_def.rs b/modules/src/clients/ics12_near/client_def.rs index 8e9a795764..2c9aef2050 100644 --- a/modules/src/clients/ics12_near/client_def.rs +++ b/modules/src/clients/ics12_near/client_def.rs @@ -1,11 +1,10 @@ use crate::core::ics02_client::client_def::ClientDef; -use crate::clients::ics12_near::header::NearHeader; - use super::client_state::NearClientState; use super::consensus_state::NearConsensusState; use super::crypto_ops::NearCryptoOps; use super::error::Error; +use super::header::NearHeader; #[derive(Debug, Clone)] pub struct NearClient {} diff --git a/modules/src/clients/ics12_near/consensus_state.rs b/modules/src/clients/ics12_near/consensus_state.rs index 5474b1b47a..c9d4ff3d20 100644 --- a/modules/src/clients/ics12_near/consensus_state.rs +++ b/modules/src/clients/ics12_near/consensus_state.rs @@ -1,25 +1,29 @@ -use crate::core::ics02_client::client_consensus::ConsensusState; +use crate::core::ics02_client::client_consensus::{AnyConsensusState, ConsensusState}; +use crate::core::ics02_client::client_type::ClientType; +use crate::core::ics23_commitment::commitment::CommitmentRoot; use crate::error::Error; use super::crypto_ops::NearCryptoOps; -pub struct NearConsensusState {} + +#[derive(Debug, Clone)] +pub struct NearConsensusState { + commitment_root: CommitmentRoot, +} impl ConsensusState for NearConsensusState { type Error = Error; type Crypto = NearCryptoOps; - fn client_type(&self) -> crate::core::ics02_client::client_type::ClientType { - todo!() + fn client_type(&self) -> ClientType { + ClientType::Near } - fn root(&self) -> &crate::core::ics23_commitment::commitment::CommitmentRoot { - todo!() + fn root(&self) -> &CommitmentRoot { + &self.commitment_root } - fn wrap_any( - self, - ) -> crate::core::ics02_client::client_consensus::AnyConsensusState { + fn wrap_any(self) -> AnyConsensusState { todo!() } } diff --git a/modules/src/clients/ics12_near/crypto_ops.rs b/modules/src/clients/ics12_near/crypto_ops.rs index cbdc01a341..b00c6092f4 100644 --- a/modules/src/clients/ics12_near/crypto_ops.rs +++ b/modules/src/clients/ics12_near/crypto_ops.rs @@ -1,6 +1,9 @@ -use crate::clients::crypto_ops::crypto::CryptoOps; +use crate::clients::{crypto_ops::crypto::CryptoOps, ics12_near::error::Error}; -pub struct NearCryptoOps; +#[derive(Debug, Clone)] +pub struct NearCryptoOps { + // _p: PhantomData crate::core::ics02_client::client_type::ClientType { - todo!() + fn client_type(&self) -> ClientType { + ClientType::Near } - fn wrap_any(self) -> crate::core::ics02_client::header::AnyHeader { - todo!() + fn wrap_any(self) -> AnyHeader { + AnyHeader::Near(self.inner.clone()) } } diff --git a/modules/src/clients/ics12_near/mod.rs b/modules/src/clients/ics12_near/mod.rs index 254036918e..ebfc70b798 100644 --- a/modules/src/clients/ics12_near/mod.rs +++ b/modules/src/clients/ics12_near/mod.rs @@ -2,5 +2,6 @@ mod client_def; mod client_state; mod consensus_state; mod crypto_ops; -mod error; -mod header; +pub mod error; +pub mod header; +pub mod types; diff --git a/modules/src/clients/ics12_near/types.rs b/modules/src/clients/ics12_near/types.rs new file mode 100644 index 0000000000..aaadbb14b3 --- /dev/null +++ b/modules/src/clients/ics12_near/types.rs @@ -0,0 +1,230 @@ +use borsh::maybestd::{io::Write, string::String}; +use sp_std::vec::Vec; + +use borsh::{BorshDeserialize, BorshSerialize}; +use sp_core::ed25519::{Public as Ed25519Public, Signature as Ed25519Signature}; + +#[derive(Debug)] +pub struct ConversionError(String); +#[derive(Debug, Clone)] +pub struct PublicKey(pub [u8; 32]); + +#[derive(Debug, Clone)] +pub enum Signature { + Ed25519(Ed25519Signature), +} + +#[derive( + Debug, Ord, PartialOrd, PartialEq, Eq, Hash, Clone, Copy, BorshSerialize, BorshDeserialize, +)] +pub struct CryptoHash(pub [u8; 32]); + +impl Signature { + const LEN: usize = 64; + + pub fn from_raw(raw: &[u8]) -> Self { + Self::Ed25519(Ed25519Signature::from_raw(raw.try_into().unwrap())) + } + + pub fn as_bytes(&self) -> &[u8] { + match self { + Self::Ed25519(inner) => &inner.0, + } + } +} + +impl PublicKey { + const LEN: usize = 32; + + pub fn from_raw(raw: &[u8]) -> Self { + Self(raw.try_into().unwrap()) + } +} + +impl TryFrom<&[u8]> for CryptoHash { + type Error = ConversionError; + fn try_from(v: &[u8]) -> Result { + if v.len() != 32 { + return Err(ConversionError("wrong size".into())); + } + let inner: [u8; 32] = v.try_into().unwrap(); + Ok(CryptoHash(inner)) + } +} + +impl AsRef<[u8]> for CryptoHash { + fn as_ref(&self) -> &[u8] { + self.0.as_ref() + } +} + +impl From<&PublicKey> for Ed25519Public { + fn from(pubkey: &PublicKey) -> Ed25519Public { + Ed25519Public(pubkey.0) + } +} + +impl TryFrom<&[u8]> for PublicKey { + type Error = ConversionError; + fn try_from(v: &[u8]) -> Result { + if v.len() != 32 { + return Err(ConversionError("wrong size".into())); + } + let inner: [u8; 32] = v.try_into().unwrap(); + Ok(PublicKey(inner)) + } +} + +pub type BlockHeight = u64; +pub type AccountId = String; +pub type Balance = u128; +pub type Gas = u64; + +pub type MerkleHash = CryptoHash; + +#[derive(Debug, Clone, BorshDeserialize)] +pub struct MerklePath(pub Vec); + +#[derive(Debug, Clone)] +pub struct LightClientBlockLiteView { + pub prev_block_hash: CryptoHash, + pub inner_rest_hash: CryptoHash, + pub inner_lite: BlockHeaderInnerLiteView, +} + +#[derive(Debug, Clone, BorshSerialize, BorshDeserialize)] +pub struct LightClientBlockView { + pub prev_block_hash: CryptoHash, + pub next_block_inner_hash: CryptoHash, + pub inner_lite: BlockHeaderInnerLiteView, + pub inner_rest_hash: CryptoHash, + pub next_bps: Option>, + pub approvals_after_next: Vec>, +} + +#[derive(Debug, Clone, BorshSerialize, BorshDeserialize)] +pub struct BlockHeaderInnerLiteView { + pub height: BlockHeight, + pub epoch_id: CryptoHash, + pub next_epoch_id: CryptoHash, + pub prev_state_root: CryptoHash, + pub outcome_root: CryptoHash, + pub timestamp: u64, + pub timestamp_nanosec: u64, + pub next_bp_hash: CryptoHash, + pub block_merkle_root: CryptoHash, +} + +/// For some reason, when calculating the hash of the current block +/// `timestamp_nanosec` is ignored +#[derive(Debug, Clone, BorshSerialize, BorshDeserialize)] +pub struct BlockHeaderInnerLiteViewFinal { + pub height: BlockHeight, + pub epoch_id: CryptoHash, + pub next_epoch_id: CryptoHash, + pub prev_state_root: CryptoHash, + pub outcome_root: CryptoHash, + pub timestamp: u64, + pub next_bp_hash: CryptoHash, + pub block_merkle_root: CryptoHash, +} + +#[derive(Debug, BorshDeserialize, BorshSerialize)] +pub enum ApprovalInner { + Endorsement(CryptoHash), + Skip(BlockHeight), +} + +#[derive(Debug, Clone, BorshSerialize, BorshDeserialize)] +pub enum ValidatorStakeView { + V1(ValidatorStakeViewV1), +} +#[derive(Debug, Clone, BorshSerialize, BorshDeserialize)] +pub struct ValidatorStakeViewV1 { + pub account_id: AccountId, + pub public_key: PublicKey, + pub stake: Balance, +} + +#[derive(Debug, Clone, BorshDeserialize)] +pub struct ExecutionOutcomeView { + /// Logs from this transaction or receipt. + pub logs: Vec, + /// Receipt IDs generated by this transaction or receipt. + pub receipt_ids: Vec, + /// The amount of the gas burnt by the given transaction or receipt. + pub gas_burnt: Gas, + /// The amount of tokens burnt corresponding to the burnt gas amount. + /// This value doesn't always equal to the `gas_burnt` multiplied by the gas price, because + /// the prepaid gas price might be lower than the actual gas price and it creates a deficit. + pub tokens_burnt: u128, + /// The id of the account on which the execution happens. For transaction this is signer_id, + /// for receipt this is receiver_id. + pub executor_id: AccountId, + /// Execution status. Contains the result in case of successful execution. + pub status: Vec, // NOTE(blas): no need to deserialize this one (in order to avoid having to define too many unnecessary structs) +} + +#[derive(Debug, BorshDeserialize)] +pub struct OutcomeProof { + pub proof: Vec, + pub block_hash: CryptoHash, + pub id: CryptoHash, + pub outcome: ExecutionOutcomeView, +} + +#[cfg_attr(feature = "deepsize_feature", derive(deepsize::DeepSizeOf))] +#[derive(Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize)] +pub enum Direction { + Left, + Right, +} + +impl ValidatorStakeView { + pub fn into_validator_stake(self) -> ValidatorStakeViewV1 { + match self { + Self::V1(inner) => inner, + } + } +} +#[cfg_attr(feature = "deepsize_feature", derive(deepsize::DeepSizeOf))] +#[derive(Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize)] +pub struct MerklePathItem { + pub hash: MerkleHash, + pub direction: Direction, +} + +impl BorshDeserialize for Signature { + fn deserialize(buf: &mut &[u8]) -> Result { + let _key_type: [u8; 1] = BorshDeserialize::deserialize(buf)?; + let array: [u8; Self::LEN] = BorshDeserialize::deserialize(buf)?; + Ok(Signature::Ed25519(Ed25519Signature::from_raw(array))) + } +} + +impl BorshSerialize for Signature { + fn serialize(&self, writer: &mut W) -> Result<(), borsh::maybestd::io::Error> { + match self { + Signature::Ed25519(signature) => { + BorshSerialize::serialize(&0u8, writer)?; + writer.write_all(&signature.0)?; + } + } + Ok(()) + } +} + +impl BorshSerialize for PublicKey { + fn serialize(&self, writer: &mut W) -> Result<(), borsh::maybestd::io::Error> { + BorshSerialize::serialize(&0u8, writer)?; + writer.write_all(&self.0)?; + Ok(()) + } +} + +impl BorshDeserialize for PublicKey { + fn deserialize(buf: &mut &[u8]) -> Result { + let _key_type: [u8; 1] = BorshDeserialize::deserialize(buf)?; + Ok(Self(BorshDeserialize::deserialize(buf)?)) + } +} diff --git a/modules/src/core/ics02_client/header.rs b/modules/src/core/ics02_client/header.rs index 346d80dfcd..31f1546a0f 100644 --- a/modules/src/core/ics02_client/header.rs +++ b/modules/src/core/ics02_client/header.rs @@ -7,6 +7,7 @@ use tendermint_proto::Protobuf; use crate::clients::ics07_tendermint::header::{decode_header, Header as TendermintHeader}; use crate::clients::ics11_beefy::header::{decode_header as decode_beefy_header, BeefyHeader}; +use crate::clients::ics12_near::types::LightClientBlockView; use crate::core::ics02_client::client_type::ClientType; use crate::core::ics02_client::error::Error; #[cfg(any(test, feature = "mocks"))] @@ -17,6 +18,7 @@ use crate::Height; pub const TENDERMINT_HEADER_TYPE_URL: &str = "/ibc.lightclients.tendermint.v1.Header"; pub const BEEFY_HEADER_TYPE_URL: &str = "/ibc.lightclients.beefy.v1.Header"; +pub const NEAR_HEADER_TYPE_URL: &str = "/ibc.lightclients.near.v1.Header"; pub const MOCK_HEADER_TYPE_URL: &str = "/ibc.mock.Header"; /// Abstract of consensus state update information @@ -34,6 +36,8 @@ pub enum AnyHeader { Tendermint(TendermintHeader), #[serde(skip)] Beefy(BeefyHeader), + #[serde(skip)] + Near(LightClientBlockView), #[cfg(any(test, feature = "mocks"))] Mock(MockHeader), } @@ -43,6 +47,7 @@ impl AnyHeader { match self { Self::Tendermint(header) => header.height(), Self::Beefy(_header) => Default::default(), + Self::Near(_header) => Default::default(), #[cfg(any(test, feature = "mocks"))] Self::Mock(header) => header.height(), } @@ -52,6 +57,7 @@ impl AnyHeader { match self { Self::Tendermint(header) => header.timestamp(), Self::Beefy(_header) => Default::default(), + Self::Near(_header) => Default::default(), #[cfg(any(test, feature = "mocks"))] Self::Mock(header) => header.timestamp(), } @@ -63,6 +69,7 @@ impl Header for AnyHeader { match self { Self::Tendermint(header) => header.client_type(), Self::Beefy(header) => header.client_type(), + Self::Near(header) => header.client_type(), #[cfg(any(test, feature = "mocks"))] Self::Mock(header) => header.client_type(), } @@ -130,7 +137,12 @@ impl From for Any { .encode_vec() .expect("encoding to `Any` from `AnyHeader::Beefy`"), }, - + AnyHeader::Near(header) => Any { + type_url: NEAR_HEADER_TYPE_URL.to_string(), + value: header + .encode_vec() + .encode_vec("encodign to `Any` from AnyHeader::Near"), + }, #[cfg(any(test, feature = "mocks"))] AnyHeader::Mock(header) => Any { type_url: MOCK_HEADER_TYPE_URL.to_string(), From 972ec0156e3da2ca0d0a83e5a040317c3221b66b Mon Sep 17 00:00:00 2001 From: Blas Rodriguez Irizar Date: Mon, 23 May 2022 21:30:24 +0200 Subject: [PATCH 28/96] minor updates --- .../src/clients/ics12_near/client_state.rs | 30 ++++++++++++------- modules/src/clients/ics12_near/types.rs | 11 +++++++ 2 files changed, 31 insertions(+), 10 deletions(-) diff --git a/modules/src/clients/ics12_near/client_state.rs b/modules/src/clients/ics12_near/client_state.rs index 0559d1641f..68c8be1d82 100644 --- a/modules/src/clients/ics12_near/client_state.rs +++ b/modules/src/clients/ics12_near/client_state.rs @@ -1,7 +1,15 @@ -use crate::core::ics02_client::client_state::ClientState; +use crate::core::{ + ics02_client::{client_state::ClientState, client_type::ClientType}, + ics24_host::identifier::ChainId, +}; + +use super::types::LightClientBlockView; #[derive(Debug, Clone)] -pub struct NearClientState {} +pub struct NearClientState { + chain_id: ChainId, + head: LightClientBlockView, +} struct NearUpgradeOptions {} @@ -12,29 +20,31 @@ impl ClientState for NearClientState { type UpgradeOptions = NearUpgradeOptions; - fn chain_id(&self) -> crate::core::ics24_host::identifier::ChainId { - todo!() + fn chain_id(&self) -> ChainId { + self.chain_id.clone() } - fn client_type(&self) -> crate::core::ics02_client::client_type::ClientType { - todo!() + fn client_type(&self) -> ClientType { + ClientType::Near } fn latest_height(&self) -> crate::Height { - todo!() + self.head.get_height() } fn frozen_height(&self) -> Option { - todo!() + // TODO: validate this + Some(self.head.get_height()) } fn upgrade( self, upgrade_height: crate::Height, upgrade_options: Self::UpgradeOptions, - chain_id: crate::core::ics24_host::identifier::ChainId, + chain_id: ChainId, ) -> Self { - todo!() + // TODO: validate this -- not sure how to process the given parameters in this case + self } fn wrap_any(self) -> crate::core::ics02_client::client_state::AnyClientState { diff --git a/modules/src/clients/ics12_near/types.rs b/modules/src/clients/ics12_near/types.rs index aaadbb14b3..b75e5a2604 100644 --- a/modules/src/clients/ics12_near/types.rs +++ b/modules/src/clients/ics12_near/types.rs @@ -4,6 +4,8 @@ use sp_std::vec::Vec; use borsh::{BorshDeserialize, BorshSerialize}; use sp_core::ed25519::{Public as Ed25519Public, Signature as Ed25519Signature}; +use crate::Height; + #[derive(Debug)] pub struct ConversionError(String); #[derive(Debug, Clone)] @@ -228,3 +230,12 @@ impl BorshDeserialize for PublicKey { Ok(Self(BorshDeserialize::deserialize(buf)?)) } } + +impl LightClientBlockView { + fn get_height(&self) -> Height { + Height { + revision_number: 0, + revision_height: self.inner_lite.height, + } + } +} From 159f430ddf81a9ddd54f780aa33661e8c3f6056c Mon Sep 17 00:00:00 2001 From: Blas Rodriguez Irizar Date: Tue, 24 May 2022 22:43:10 +0200 Subject: [PATCH 29/96] comments from Seun --- modules/src/clients/ics12_near/client_def.rs | 28 +++++++++++++++++++- modules/src/clients/ics12_near/types.rs | 6 +++++ 2 files changed, 33 insertions(+), 1 deletion(-) diff --git a/modules/src/clients/ics12_near/client_def.rs b/modules/src/clients/ics12_near/client_def.rs index 2c9aef2050..b046b9e21d 100644 --- a/modules/src/clients/ics12_near/client_def.rs +++ b/modules/src/clients/ics12_near/client_def.rs @@ -10,14 +10,31 @@ use super::header::NearHeader; pub struct NearClient {} impl ClientDef for NearClient { + /// The data that we need to update the [`ClientState`] to a new block height type Header = NearHeader; + /// The data that we need to know, to validate incoming headers and update the state + /// of our [`ClientState`]. Ususally this will store: + /// - The current epoch + /// - The current validator set + /// + /// ```rust,no_run + /// pub struct NearLightClientState { + /// head: LightClientBlockView, + /// current_validators: Vec, + /// next_validators: Vec, + /// } + /// ``` type ClientState = NearClientState; + /// This is usually just two things, that should be derived from the header: + /// - The ibc commitment root hash as described by ics23 (possibly from tx outcome/ state proof) + /// - The timestamp of the header. type ConsensusState = NearConsensusState; type Crypto = NearCryptoOps; + // rehydrate client from its own storage, then call this function fn verify_header( &self, ctx: &dyn crate::core::ics26_routing::context::LightClientContext, @@ -25,6 +42,7 @@ impl ClientDef for NearClient { client_state: Self::ClientState, header: Self::Header, ) -> Result<(), Error> { + // your light client, shouldn't do storage anymore, it should just do verification here. todo!() } @@ -41,7 +59,15 @@ impl ClientDef for NearClient { ), Error, > { - todo!() + // 1. create new client state from this header, return that. + // 2. as well as all the neccessary consensus states. + // + // + // []--[]--[]--[]--[]--[]--[]--[]--[]--[] + // 11 12 13 14 15 16 17 18 19 20 <- block merkle root + // ^ ^ + // | <-------consensus states-----> | + // current state new state } fn update_state_on_misbehaviour( diff --git a/modules/src/clients/ics12_near/types.rs b/modules/src/clients/ics12_near/types.rs index b75e5a2604..f0053e6661 100644 --- a/modules/src/clients/ics12_near/types.rs +++ b/modules/src/clients/ics12_near/types.rs @@ -114,6 +114,7 @@ pub struct BlockHeaderInnerLiteView { pub timestamp: u64, pub timestamp_nanosec: u64, pub next_bp_hash: CryptoHash, + // lets assume that this is the merkle root of all blocks in this epoch, so far. pub block_merkle_root: CryptoHash, } @@ -169,10 +170,15 @@ pub struct ExecutionOutcomeView { #[derive(Debug, BorshDeserialize)] pub struct OutcomeProof { + /// this is the block merkle proof. pub proof: Vec, + /// this is the hash of the block. pub block_hash: CryptoHash, + /// transaction hash pub id: CryptoHash, pub outcome: ExecutionOutcomeView, + // TODO: where are the proofs for the block that this tx belongs + // in the block_merkle_root of our light client. } #[cfg_attr(feature = "deepsize_feature", derive(deepsize::DeepSizeOf))] From fd085412966395cc7f46b55f5973302684ee7109 Mon Sep 17 00:00:00 2001 From: Blas Rodriguez Irizar Date: Tue, 24 May 2022 23:22:56 +0200 Subject: [PATCH 30/96] update verify header --- modules/src/clients/ics12_near/client_def.rs | 124 +++++++++++++++++- .../src/clients/ics12_near/client_state.rs | 25 +++- modules/src/clients/ics12_near/error.rs | 27 +++- modules/src/clients/ics12_near/header.rs | 6 + 4 files changed, 179 insertions(+), 3 deletions(-) diff --git a/modules/src/clients/ics12_near/client_def.rs b/modules/src/clients/ics12_near/client_def.rs index b046b9e21d..e082626dd7 100644 --- a/modules/src/clients/ics12_near/client_def.rs +++ b/modules/src/clients/ics12_near/client_def.rs @@ -5,6 +5,7 @@ use super::consensus_state::NearConsensusState; use super::crypto_ops::NearCryptoOps; use super::error::Error; use super::header::NearHeader; +use super::types::{ApprovalInner, CryptoHash, LightClientBlockView, ValidatorStakeView}; #[derive(Debug, Clone)] pub struct NearClient {} @@ -43,7 +44,7 @@ impl ClientDef for NearClient { header: Self::Header, ) -> Result<(), Error> { // your light client, shouldn't do storage anymore, it should just do verification here. - todo!() + validate_light_block(&header, &client_state) } fn update_state( @@ -68,6 +69,8 @@ impl ClientDef for NearClient { // ^ ^ // | <-------consensus states-----> | // current state new state + + todo!() } fn update_state_on_misbehaviour( @@ -232,3 +235,122 @@ impl ClientDef for NearClient { todo!() } } + +pub fn validate_light_block( + header: &NearHeader, + client_state: NearClientState, +) -> Result<(), Error> { + //The light client updates its head with the information from LightClientBlockView iff: + + // 1. The height of the block is higher than the height of the current head; + // 2. The epoch of the block is equal to the epoch_id or next_epoch_id known for the current head; + // 3. If the epoch of the block is equal to the next_epoch_id of the head, then next_bps is not None; + // 4. approvals_after_next contain valid signatures on approval_message from the block producers of the corresponding + // epoch + // 5. The signatures present in approvals_after_next correspond to more than 2/3 of the total stake (see next section). + // 6. If next_bps is not none, sha256(borsh(next_bps)) corresponds to the next_bp_hash in inner_lite. + + // QUESTION: do we also want to pass the block hash received from the RPC? + // it's not on the spec, but it's an extra validation + + let new_block_view = header.get_light_client_block_view(); + let current_block_view = client_state.get_head().get_light_client_block_view(); + let (_current_block_hash, _next_block_hash, approval_message) = + reconstruct_light_client_block_view_fields::(new_block_view)?; + + // (1) + if new_block_view.inner_lite.height <= current_block_view.inner_lite.height { + return Err(Error::HeightTooOld); + } + + // (2) + if ![ + current_block_view.inner_lite.epoch_id, + current_block_view.inner_lite.next_epoch_id, + ] + .contains(&new_block_view.inner_lite.epoch_id) + { + return Err(Error::InvalidEpoch); + } + + // (3) + if new_block_view.inner_lite.epoch_id == current_block_view.inner_lite.next_epoch_id + && new_block_view.next_bps.is_none() + { + return Err(Error::UnavailableBlockProducers); + } + + // (4) and (5) + let mut total_stake = 0; + let mut approved_stake = 0; + + let epoch_block_producers = client_state + .get_validators_by_epoch(&new_block_view.inner_lite.epoch_id) + .ok_or(Error::InvalidEpoch)?; + + for (maybe_signature, block_producer) in new_block_view + .approvals_after_next + .iter() + .zip(epoch_block_producers.iter()) + { + let bp_stake_view = block_producer.clone().into_validator_stake(); + let bp_stake = bp_stake_view.stake; + total_stake += bp_stake; + + if maybe_signature.is_none() { + continue; + } + + approved_stake += bp_stake; + + let validator_public_key = bp_stake_view.public_key.clone(); + if !maybe_signature + .as_ref() + .unwrap() + .verify(&approval_message, validator_public_key.clone()) + { + return Err(Error::InvalidSignature); + } + } + + let threshold = total_stake * 2 / 3; + if approved_stake <= threshold { + return Err(Error::InsufficientStakedAmount); + } + + // # (6) + if new_block_view.next_bps.is_some() { + let new_block_view_next_bps_serialized = + new_block_view.next_bps.as_deref().unwrap().try_to_vec()?; + if D::digest(new_block_view_next_bps_serialized).as_slice() + != new_block_view.inner_lite.next_bp_hash.as_ref() + { + return Err(Error::SerializationError); + } + } + Ok(()) +} + +pub fn reconstruct_light_client_block_view_fields( + block_view: &LightClientBlockView, +) -> Result<(CryptoHash, CryptoHash, Vec), Error> { + let current_block_hash = block_view.current_block_hash::(); + let next_block_hash = + next_block_hash::(block_view.next_block_inner_hash, current_block_hash); + let approval_message = [ + ApprovalInner::Endorsement(next_block_hash).try_to_vec()?, + (block_view.inner_lite.height + 2).to_le().try_to_vec()?, + ] + .concat(); + Ok((current_block_hash, next_block_hash, approval_message)) +} + +pub(crate) fn next_block_hash( + next_block_inner_hash: CryptoHash, + current_block_hash: CryptoHash, +) -> CryptoHash { + D::digest([next_block_inner_hash.as_ref(), current_block_hash.as_ref()].concat()) + .as_slice() + .try_into() + .expect("Could not hash the next block") +} diff --git a/modules/src/clients/ics12_near/client_state.rs b/modules/src/clients/ics12_near/client_state.rs index 68c8be1d82..14b1caf67a 100644 --- a/modules/src/clients/ics12_near/client_state.rs +++ b/modules/src/clients/ics12_near/client_state.rs @@ -3,16 +3,39 @@ use crate::core::{ ics24_host::identifier::ChainId, }; -use super::types::LightClientBlockView; +use super::types::{CryptoHash, LightClientBlockView, ValidatorStakeView}; #[derive(Debug, Clone)] pub struct NearClientState { chain_id: ChainId, head: LightClientBlockView, + current_epoch: CryptoHash, + next_epoch: CryptoHash, + current_validators: Vec, + next_validators: Vec, } struct NearUpgradeOptions {} +impl NearClientState { + pub fn get_validators_by_epoch( + &self, + epoch_id: &CryptoHash, + ) -> Option<&Vec> { + if epoch_id == self.current_epoch { + Some(&self.current_validators) + } else if epoch_id == self.next_epoch { + Some(&self.next_validators) + } else { + None + } + } + + pub fn get_head(&self) -> &LightClientBlockView { + &self.head + } +} + impl ClientState for NearClientState { fn is_frozen(&self) -> bool { self.frozen_height().is_some() diff --git a/modules/src/clients/ics12_near/error.rs b/modules/src/clients/ics12_near/error.rs index b13ef54f5c..44ac7e3a36 100644 --- a/modules/src/clients/ics12_near/error.rs +++ b/modules/src/clients/ics12_near/error.rs @@ -1,6 +1,31 @@ +use super::types::CryptoHash; use flex_error::define_error; define_error! { #[derive(Debug, PartialEq, Eq)] - Error {} + Error { + InvalidEpoch + { epoch_id: CryptoHash } + | _ | { "invalid epoch id" }, + HeightTooOld + | e | { format_args!( + "height too old") + }, + InvalidSignature + | e | { format_args!( + "invalid signature") + }, + InsufficientStakedAmount + | e | { format_args!( + "insufficient staked amount") + }, + SerializationError + | e | { format_args!( + "serialization error") + }, + UnavailableBlockProducers + | e | { format_args!( + "unavailable block producers") + }, + } } diff --git a/modules/src/clients/ics12_near/header.rs b/modules/src/clients/ics12_near/header.rs index 76cf05b0d5..148002bdb4 100644 --- a/modules/src/clients/ics12_near/header.rs +++ b/modules/src/clients/ics12_near/header.rs @@ -10,6 +10,12 @@ pub struct NearHeader { inner: LightClientBlockView, } +impl NearHeader { + pub fn get_light_client_block_view(&self) -> &LightClientBlockView { + &self.inner + } +} + impl Header for NearHeader { fn client_type(&self) -> ClientType { ClientType::Near From 6dec6b67baab83fb7c6802412c8c7272f02593b9 Mon Sep 17 00:00:00 2001 From: Seun Lanlege Date: Wed, 25 May 2022 09:16:40 +0100 Subject: [PATCH 31/96] rename to ics13_near --- modules/src/clients/{ics12_near => ics13_near}/client_def.rs | 0 modules/src/clients/{ics12_near => ics13_near}/client_state.rs | 0 .../src/clients/{ics12_near => ics13_near}/consensus_state.rs | 0 modules/src/clients/{ics12_near => ics13_near}/crypto_ops.rs | 2 +- modules/src/clients/{ics12_near => ics13_near}/error.rs | 0 modules/src/clients/{ics12_near => ics13_near}/header.rs | 0 modules/src/clients/{ics12_near => ics13_near}/mod.rs | 0 modules/src/clients/{ics12_near => ics13_near}/types.rs | 0 modules/src/clients/mod.rs | 2 +- modules/src/core/ics02_client/header.rs | 2 +- 10 files changed, 3 insertions(+), 3 deletions(-) rename modules/src/clients/{ics12_near => ics13_near}/client_def.rs (100%) rename modules/src/clients/{ics12_near => ics13_near}/client_state.rs (100%) rename modules/src/clients/{ics12_near => ics13_near}/consensus_state.rs (100%) rename modules/src/clients/{ics12_near => ics13_near}/crypto_ops.rs (88%) rename modules/src/clients/{ics12_near => ics13_near}/error.rs (100%) rename modules/src/clients/{ics12_near => ics13_near}/header.rs (100%) rename modules/src/clients/{ics12_near => ics13_near}/mod.rs (100%) rename modules/src/clients/{ics12_near => ics13_near}/types.rs (100%) diff --git a/modules/src/clients/ics12_near/client_def.rs b/modules/src/clients/ics13_near/client_def.rs similarity index 100% rename from modules/src/clients/ics12_near/client_def.rs rename to modules/src/clients/ics13_near/client_def.rs diff --git a/modules/src/clients/ics12_near/client_state.rs b/modules/src/clients/ics13_near/client_state.rs similarity index 100% rename from modules/src/clients/ics12_near/client_state.rs rename to modules/src/clients/ics13_near/client_state.rs diff --git a/modules/src/clients/ics12_near/consensus_state.rs b/modules/src/clients/ics13_near/consensus_state.rs similarity index 100% rename from modules/src/clients/ics12_near/consensus_state.rs rename to modules/src/clients/ics13_near/consensus_state.rs diff --git a/modules/src/clients/ics12_near/crypto_ops.rs b/modules/src/clients/ics13_near/crypto_ops.rs similarity index 88% rename from modules/src/clients/ics12_near/crypto_ops.rs rename to modules/src/clients/ics13_near/crypto_ops.rs index b00c6092f4..9f5df41aba 100644 --- a/modules/src/clients/ics12_near/crypto_ops.rs +++ b/modules/src/clients/ics13_near/crypto_ops.rs @@ -1,4 +1,4 @@ -use crate::clients::{crypto_ops::crypto::CryptoOps, ics12_near::error::Error}; +use crate::clients::{crypto_ops::crypto::CryptoOps, ics13_near::error::Error}; #[derive(Debug, Clone)] pub struct NearCryptoOps { diff --git a/modules/src/clients/ics12_near/error.rs b/modules/src/clients/ics13_near/error.rs similarity index 100% rename from modules/src/clients/ics12_near/error.rs rename to modules/src/clients/ics13_near/error.rs diff --git a/modules/src/clients/ics12_near/header.rs b/modules/src/clients/ics13_near/header.rs similarity index 100% rename from modules/src/clients/ics12_near/header.rs rename to modules/src/clients/ics13_near/header.rs diff --git a/modules/src/clients/ics12_near/mod.rs b/modules/src/clients/ics13_near/mod.rs similarity index 100% rename from modules/src/clients/ics12_near/mod.rs rename to modules/src/clients/ics13_near/mod.rs diff --git a/modules/src/clients/ics12_near/types.rs b/modules/src/clients/ics13_near/types.rs similarity index 100% rename from modules/src/clients/ics12_near/types.rs rename to modules/src/clients/ics13_near/types.rs diff --git a/modules/src/clients/mod.rs b/modules/src/clients/mod.rs index 16cb5a606e..3bfcaab585 100644 --- a/modules/src/clients/mod.rs +++ b/modules/src/clients/mod.rs @@ -3,4 +3,4 @@ pub mod crypto_ops; pub mod ics07_tendermint; pub mod ics11_beefy; -pub mod ics12_near; +pub mod ics13_near; diff --git a/modules/src/core/ics02_client/header.rs b/modules/src/core/ics02_client/header.rs index 31f1546a0f..a5f3e8da54 100644 --- a/modules/src/core/ics02_client/header.rs +++ b/modules/src/core/ics02_client/header.rs @@ -7,7 +7,7 @@ use tendermint_proto::Protobuf; use crate::clients::ics07_tendermint::header::{decode_header, Header as TendermintHeader}; use crate::clients::ics11_beefy::header::{decode_header as decode_beefy_header, BeefyHeader}; -use crate::clients::ics12_near::types::LightClientBlockView; +use crate::clients::ics13_near::types::LightClientBlockView; use crate::core::ics02_client::client_type::ClientType; use crate::core::ics02_client::error::Error; #[cfg(any(test, feature = "mocks"))] From 9626cac3c8cdce9c8c7bc276d6ca323f0406c382 Mon Sep 17 00:00:00 2001 From: David Salami Date: Tue, 24 May 2022 15:05:27 +0100 Subject: [PATCH 32/96] use crypto ops only in client def --- .../relay_application_logic/send_transfer.rs | 2 +- .../clients/ics07_tendermint/client_def.rs | 85 ++++----- .../ics07_tendermint/consensus_state.rs | 28 +-- modules/src/clients/ics11_beefy/client_def.rs | 45 +++-- .../clients/ics11_beefy/consensus_state.rs | 171 ++++++------------ .../src/core/ics02_client/client_consensus.rs | 41 ++--- modules/src/core/ics02_client/client_def.rs | 80 ++++---- modules/src/core/ics02_client/context.rs | 26 +-- modules/src/core/ics02_client/handler.rs | 18 +- .../ics02_client/handler/create_client.rs | 25 ++- .../ics02_client/handler/update_client.rs | 10 +- .../ics02_client/handler/upgrade_client.rs | 18 +- modules/src/core/ics02_client/msgs.rs | 6 +- .../core/ics02_client/msgs/create_client.rs | 24 ++- .../core/ics02_client/msgs/upgrade_client.rs | 28 ++- modules/src/core/ics03_connection/handler.rs | 4 +- .../ics03_connection/handler/conn_open_ack.rs | 7 +- .../handler/conn_open_confirm.rs | 5 +- .../handler/conn_open_init.rs | 5 +- .../ics03_connection/handler/conn_open_try.rs | 7 +- .../core/ics03_connection/handler/verify.rs | 21 +-- modules/src/core/ics04_channel/handler.rs | 8 +- .../ics04_channel/handler/acknowledgement.rs | 4 +- .../handler/chan_close_confirm.rs | 5 +- .../ics04_channel/handler/chan_close_init.rs | 6 +- .../ics04_channel/handler/chan_open_ack.rs | 5 +- .../handler/chan_open_confirm.rs | 5 +- .../ics04_channel/handler/chan_open_init.rs | 5 +- .../ics04_channel/handler/chan_open_try.rs | 5 +- .../core/ics04_channel/handler/recv_packet.rs | 4 +- .../core/ics04_channel/handler/send_packet.rs | 5 +- .../src/core/ics04_channel/handler/timeout.rs | 4 +- .../ics04_channel/handler/timeout_on_close.rs | 5 +- .../src/core/ics04_channel/handler/verify.rs | 23 +-- .../handler/write_acknowledgement.rs | 5 +- modules/src/core/ics26_routing/handler.rs | 23 +-- modules/src/core/ics26_routing/msgs.rs | 6 +- modules/src/mock/client_def.rs | 44 ++--- modules/src/mock/client_state.rs | 34 ++-- modules/src/mock/context.rs | 26 +-- modules/src/mock/header.rs | 3 +- modules/src/mock/host.rs | 7 +- modules/src/relayer/ics18_relayer/utils.rs | 14 +- modules/tests/runner/mod.rs | 4 +- 44 files changed, 371 insertions(+), 535 deletions(-) diff --git a/modules/src/applications/ics20_fungible_token_transfer/relay_application_logic/send_transfer.rs b/modules/src/applications/ics20_fungible_token_transfer/relay_application_logic/send_transfer.rs index c29f47064e..2f7115c975 100644 --- a/modules/src/applications/ics20_fungible_token_transfer/relay_application_logic/send_transfer.rs +++ b/modules/src/applications/ics20_fungible_token_transfer/relay_application_logic/send_transfer.rs @@ -13,7 +13,7 @@ pub(crate) fn send_transfer( msg: MsgTransfer, ) -> Result, Error> where - Ctx: LightClientContext, + Ctx: LightClientContext, { let source_channel_end = ctx .channel_end(&(msg.source_port.clone(), msg.source_channel)) diff --git a/modules/src/clients/ics07_tendermint/client_def.rs b/modules/src/clients/ics07_tendermint/client_def.rs index 6937db2ca3..142b56f512 100644 --- a/modules/src/clients/ics07_tendermint/client_def.rs +++ b/modules/src/clients/ics07_tendermint/client_def.rs @@ -1,7 +1,6 @@ use core::convert::TryInto; use core::fmt::Debug; -use crate::clients::crypto_ops::crypto::CryptoOps; use ibc_proto::ibc::core::commitment::v1::MerkleProof as RawMerkleProof; use prost::Message; use tendermint_light_client_verifier::types::{TrustedBlockState, UntrustedBlockState}; @@ -37,32 +36,19 @@ use crate::downcast; use crate::prelude::*; use crate::Height; -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct TendermintClient { +#[derive(Clone, Debug, Default, PartialEq, Eq)] +pub struct TendermintClient { verifier: ProdVerifier, - _phantom: core::marker::PhantomData, } -impl Default for TendermintClient { - fn default() -> Self { - Self { - verifier: Default::default(), - _phantom: Default::default(), - } - } -} - -impl ClientDef - for TendermintClient -{ +impl ClientDef for TendermintClient { type Header = Header; type ClientState = ClientState; - type ConsensusState = ConsensusState; - type Crypto = Crypto; + type ConsensusState = ConsensusState; fn verify_header( &self, - ctx: &dyn LightClientContext, + ctx: &dyn LightClientContext, client_id: ClientId, client_state: Self::ClientState, header: Self::Header, @@ -77,11 +63,11 @@ impl ClientDef } // Check if a consensus state is already installed; if so skip - let header_consensus_state = ConsensusState::::from(header.clone()); + let header_consensus_state = ConsensusState::from(header.clone()); let _ = match ctx.maybe_consensus_state(&client_id, header.height())? { Some(cs) => { - let cs = downcast_consensus_state::(cs)?; + let cs = downcast_consensus_state(cs)?; // If this consensus state matches, skip verification // (optimization) if cs == header_consensus_state { @@ -94,9 +80,8 @@ impl ClientDef None => None, }; - let trusted_consensus_state = downcast_consensus_state::( - ctx.consensus_state(&client_id, header.trusted_height)?, - )?; + let trusted_consensus_state = + downcast_consensus_state(ctx.consensus_state(&client_id, header.trusted_height)?)?; let trusted_state = TrustedBlockState { header_time: trusted_consensus_state.timestamp, @@ -152,11 +137,11 @@ impl ClientDef fn update_state( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn LightClientContext, _client_id: ClientId, client_state: Self::ClientState, header: Self::Header, - ) -> Result<(Self::ClientState, ConsensusUpdateResult), Ics02Error> { + ) -> Result<(Self::ClientState, ConsensusUpdateResult), Ics02Error> { let header_consensus_state = ConsensusState::from(header.clone()); Ok(( client_state.with_header(header), @@ -176,19 +161,19 @@ impl ClientDef fn check_for_misbehaviour( &self, - ctx: &dyn LightClientContext, + ctx: &dyn LightClientContext, client_id: ClientId, client_state: Self::ClientState, header: Self::Header, ) -> Result { // Check if a consensus state is already installed; if so it should // match the untrusted header. - let header_consensus_state = ConsensusState::::from(header.clone()); + let header_consensus_state = ConsensusState::from(header.clone()); let existing_consensus_state = match ctx.maybe_consensus_state(&client_id, header.height())? { Some(cs) => { - let cs = downcast_consensus_state::(cs)?; + let cs = downcast_consensus_state(cs)?; // If this consensus state matches, skip verification // (optimization) if cs == header_consensus_state { @@ -215,7 +200,7 @@ impl ClientDef if header.height() < client_state.latest_height() { let maybe_next_cs = ctx .next_consensus_state(&client_id, header.height())? - .map(downcast_consensus_state::) + .map(downcast_consensus_state) .transpose()?; if let Some(next_cs) = maybe_next_cs { @@ -235,7 +220,7 @@ impl ClientDef if header.trusted_height < header.height() { let maybe_prev_cs = ctx .prev_consensus_state(&client_id, header.height())? - .map(downcast_consensus_state::) + .map(downcast_consensus_state) .transpose()?; if let Some(prev_cs) = maybe_prev_cs { @@ -257,7 +242,7 @@ impl ClientDef fn verify_client_consensus_state( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn LightClientContext, client_state: &Self::ClientState, height: Height, prefix: &CommitmentPrefix, @@ -265,7 +250,7 @@ impl ClientDef root: &CommitmentRoot, client_id: &ClientId, consensus_height: Height, - expected_consensus_state: &AnyConsensusState, + expected_consensus_state: &AnyConsensusState, ) -> Result<(), Ics02Error> { client_state.verify_height(height)?; @@ -282,7 +267,7 @@ impl ClientDef fn verify_connection_state( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn LightClientContext, _client_id: &ClientId, client_state: &Self::ClientState, height: Height, @@ -303,7 +288,7 @@ impl ClientDef fn verify_channel_state( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn LightClientContext, _client_id: &ClientId, client_state: &Self::ClientState, height: Height, @@ -325,7 +310,7 @@ impl ClientDef fn verify_client_full_state( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn LightClientContext, client_state: &Self::ClientState, height: Height, prefix: &CommitmentPrefix, @@ -345,7 +330,7 @@ impl ClientDef fn verify_packet_data( &self, - ctx: &dyn LightClientContext, + ctx: &dyn LightClientContext, _client_id: &ClientId, client_state: &Self::ClientState, height: Height, @@ -358,7 +343,7 @@ impl ClientDef commitment: PacketCommitment, ) -> Result<(), Ics02Error> { client_state.verify_height(height)?; - verify_delay_passed::(ctx, height, connection_end)?; + verify_delay_passed(ctx, height, connection_end)?; let commitment_path = CommitmentsPath { port_id: port_id.clone(), @@ -378,7 +363,7 @@ impl ClientDef fn verify_packet_acknowledgement( &self, - ctx: &dyn LightClientContext, + ctx: &dyn LightClientContext, _client_id: &ClientId, client_state: &Self::ClientState, height: Height, @@ -392,7 +377,7 @@ impl ClientDef ) -> Result<(), Ics02Error> { // client state height = consensus state height client_state.verify_height(height)?; - verify_delay_passed::(ctx, height, connection_end)?; + verify_delay_passed(ctx, height, connection_end)?; let ack_path = AcksPath { port_id: port_id.clone(), @@ -411,7 +396,7 @@ impl ClientDef fn verify_next_sequence_recv( &self, - ctx: &dyn LightClientContext, + ctx: &dyn LightClientContext, _client_id: &ClientId, client_state: &Self::ClientState, height: Height, @@ -423,7 +408,7 @@ impl ClientDef sequence: Sequence, ) -> Result<(), Ics02Error> { client_state.verify_height(height)?; - verify_delay_passed::(ctx, height, connection_end)?; + verify_delay_passed(ctx, height, connection_end)?; let mut seq_bytes = Vec::new(); u64::from(sequence) @@ -443,7 +428,7 @@ impl ClientDef fn verify_packet_receipt_absence( &self, - ctx: &dyn LightClientContext, + ctx: &dyn LightClientContext, _client_id: &ClientId, client_state: &Self::ClientState, height: Height, @@ -455,7 +440,7 @@ impl ClientDef sequence: Sequence, ) -> Result<(), Ics02Error> { client_state.verify_height(height)?; - verify_delay_passed::(ctx, height, connection_end)?; + verify_delay_passed(ctx, height, connection_end)?; let receipt_path = ReceiptsPath { port_id: port_id.clone(), @@ -477,7 +462,7 @@ impl ClientDef _consensus_state: &Self::ConsensusState, _proof_upgrade_client: Vec, _proof_upgrade_consensus_state: Vec, - ) -> Result<(Self::ClientState, ConsensusUpdateResult), Ics02Error> { + ) -> Result<(Self::ClientState, ConsensusUpdateResult), Ics02Error> { todo!() } } @@ -523,8 +508,8 @@ fn verify_non_membership( .map_err(|e| Ics02Error::tendermint(Error::ics23_error(e))) } -fn verify_delay_passed( - ctx: &dyn LightClientContext, +fn verify_delay_passed( + ctx: &dyn LightClientContext, height: Height, connection_end: &ConnectionEnd, ) -> Result<(), Ics02Error> { @@ -553,11 +538,9 @@ fn verify_delay_passed( .map_err(|e| e.into()) } -fn downcast_consensus_state( - cs: AnyConsensusState, -) -> Result, Ics02Error> { +fn downcast_consensus_state(cs: AnyConsensusState) -> Result { downcast!( - cs => AnyConsensusState::::Tendermint + cs => AnyConsensusState::Tendermint ) .ok_or_else(|| Ics02Error::client_args_type_mismatch(ClientType::Tendermint)) } diff --git a/modules/src/clients/ics07_tendermint/consensus_state.rs b/modules/src/clients/ics07_tendermint/consensus_state.rs index 21b1feb900..43b0ebaf49 100644 --- a/modules/src/clients/ics07_tendermint/consensus_state.rs +++ b/modules/src/clients/ics07_tendermint/consensus_state.rs @@ -8,7 +8,6 @@ use tendermint::{hash::Algorithm, time::Time, Hash}; use tendermint_proto::google::protobuf as tpb; use tendermint_proto::Protobuf; -use crate::clients::crypto_ops::crypto::CryptoOps; use ibc_proto::ibc::lightclients::tendermint::v1::ConsensusState as RawConsensusState; use crate::clients::ics07_tendermint::error::Error; @@ -18,29 +17,24 @@ use crate::core::ics02_client::client_type::ClientType; use crate::core::ics23_commitment::commitment::CommitmentRoot; #[derive(Clone, Debug, PartialEq, Eq, Serialize)] -pub struct ConsensusState { +pub struct ConsensusState { pub timestamp: Time, pub root: CommitmentRoot, pub next_validators_hash: Hash, - _phantom: core::marker::PhantomData, } -impl ConsensusState { +impl ConsensusState { pub fn new(root: CommitmentRoot, timestamp: Time, next_validators_hash: Hash) -> Self { Self { timestamp, root, next_validators_hash, - _phantom: Default::default(), } } } -impl - crate::core::ics02_client::client_consensus::ConsensusState for ConsensusState -{ +impl crate::core::ics02_client::client_consensus::ConsensusState for ConsensusState { type Error = Infallible; - type Crypto = Crypto; fn client_type(&self) -> ClientType { ClientType::Tendermint @@ -50,14 +44,14 @@ impl &self.root } - fn wrap_any(self) -> AnyConsensusState { + fn wrap_any(self) -> AnyConsensusState { AnyConsensusState::Tendermint(self) } } -impl Protobuf for ConsensusState {} +impl Protobuf for ConsensusState {} -impl TryFrom for ConsensusState { +impl TryFrom for ConsensusState { type Error = Error; fn try_from(raw: RawConsensusState) -> Result { @@ -82,13 +76,12 @@ impl TryFrom for ConsensusState { timestamp, next_validators_hash: Hash::from_bytes(Algorithm::Sha256, &raw.next_validators_hash) .map_err(|e| Error::invalid_raw_consensus_state(e.to_string()))?, - _phantom: Default::default(), }) } } -impl From> for RawConsensusState { - fn from(value: ConsensusState) -> Self { +impl From for RawConsensusState { + fn from(value: ConsensusState) -> Self { // FIXME: shunts like this are necessary due to // https://github.com/informalsystems/tendermint-rs/issues/1053 let tpb::Timestamp { seconds, nanos } = value.timestamp.into(); @@ -104,18 +97,17 @@ impl From> for RawConsensusState { } } -impl From for ConsensusState { +impl From for ConsensusState { fn from(header: tendermint::block::Header) -> Self { Self { root: CommitmentRoot::from_bytes(header.app_hash.as_ref()), timestamp: header.time, next_validators_hash: header.next_validators_hash, - _phantom: Default::default(), } } } -impl From
for ConsensusState { +impl From
for ConsensusState { fn from(header: Header) -> Self { Self::from(header.signed_header.header) } diff --git a/modules/src/clients/ics11_beefy/client_def.rs b/modules/src/clients/ics11_beefy/client_def.rs index 17fa000990..4aada31360 100644 --- a/modules/src/clients/ics11_beefy/client_def.rs +++ b/modules/src/clients/ics11_beefy/client_def.rs @@ -43,21 +43,20 @@ use crate::downcast; #[derive(Clone, Debug, PartialEq, Eq)] pub struct BeefyClient(PhantomData); -impl Default for BeefyClient { +impl Default for BeefyClient { fn default() -> Self { Self(PhantomData::default()) } } -impl ClientDef for BeefyClient { +impl ClientDef for BeefyClient { type Header = BeefyHeader; type ClientState = ClientState; - type ConsensusState = ConsensusState; - type Crypto = Crypto; + type ConsensusState = ConsensusState; fn verify_header( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn LightClientContext, _client_id: ClientId, client_state: Self::ClientState, header: Self::Header, @@ -131,11 +130,11 @@ impl ClientDef for BeefyClient fn update_state( &self, - ctx: &dyn LightClientContext, + ctx: &dyn LightClientContext, client_id: ClientId, client_state: Self::ClientState, header: Self::Header, - ) -> Result<(Self::ClientState, ConsensusUpdateResult), Error> { + ) -> Result<(Self::ClientState, ConsensusUpdateResult), Error> { let mut parachain_cs_states = vec![]; // Extract the new client state from the verified header let client_state = client_state @@ -153,7 +152,7 @@ impl ClientDef for BeefyClient } parachain_cs_states.push(( height, - AnyConsensusState::Beefy(ConsensusState::::try_from(header)?), + AnyConsensusState::Beefy(ConsensusState::from_header::(header)?), )) } @@ -183,7 +182,7 @@ impl ClientDef for BeefyClient fn check_for_misbehaviour( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn LightClientContext, _client_id: ClientId, _client_state: Self::ClientState, _header: Self::Header, @@ -193,7 +192,7 @@ impl ClientDef for BeefyClient fn verify_client_consensus_state( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn LightClientContext, _client_state: &Self::ClientState, _height: Height, prefix: &CommitmentPrefix, @@ -201,7 +200,7 @@ impl ClientDef for BeefyClient root: &CommitmentRoot, client_id: &ClientId, consensus_height: Height, - expected_consensus_state: &AnyConsensusState, + expected_consensus_state: &AnyConsensusState, ) -> Result<(), Error> { let path = ClientConsensusStatePath { client_id: client_id.clone(), @@ -215,7 +214,7 @@ impl ClientDef for BeefyClient // Consensus state will be verified in the verification functions before these are called fn verify_connection_state( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn LightClientContext, _client_id: &ClientId, _client_state: &Self::ClientState, _height: Height, @@ -232,7 +231,7 @@ impl ClientDef for BeefyClient fn verify_channel_state( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn LightClientContext, _client_id: &ClientId, _client_state: &Self::ClientState, _height: Height, @@ -250,7 +249,7 @@ impl ClientDef for BeefyClient fn verify_client_full_state( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn LightClientContext, _client_state: &Self::ClientState, _height: Height, prefix: &CommitmentPrefix, @@ -266,7 +265,7 @@ impl ClientDef for BeefyClient fn verify_packet_data( &self, - ctx: &dyn LightClientContext, + ctx: &dyn LightClientContext, _client_id: &ClientId, _client_state: &Self::ClientState, height: Height, @@ -297,7 +296,7 @@ impl ClientDef for BeefyClient fn verify_packet_acknowledgement( &self, - ctx: &dyn LightClientContext, + ctx: &dyn LightClientContext, _client_id: &ClientId, _client_state: &Self::ClientState, height: Height, @@ -327,7 +326,7 @@ impl ClientDef for BeefyClient fn verify_next_sequence_recv( &self, - ctx: &dyn LightClientContext, + ctx: &dyn LightClientContext, _client_id: &ClientId, _client_state: &Self::ClientState, height: Height, @@ -354,7 +353,7 @@ impl ClientDef for BeefyClient fn verify_packet_receipt_absence( &self, - ctx: &dyn LightClientContext, + ctx: &dyn LightClientContext, _client_id: &ClientId, _client_state: &Self::ClientState, height: Height, @@ -386,7 +385,7 @@ impl ClientDef for BeefyClient _consensus_state: &Self::ConsensusState, _proof_upgrade_client: Vec, _proof_upgrade_consensus_state: Vec, - ) -> Result<(Self::ClientState, ConsensusUpdateResult), Error> { + ) -> Result<(Self::ClientState, ConsensusUpdateResult), Error> { todo!() } } @@ -432,8 +431,8 @@ fn verify_non_membership>( Crypto::verify_non_membership_trie_proof(&root, &trie_proof, &key) } -fn verify_delay_passed( - ctx: &dyn LightClientContext, +fn verify_delay_passed( + ctx: &dyn LightClientContext, height: Height, connection_end: &ConnectionEnd, ) -> Result<(), Error> { @@ -468,9 +467,7 @@ fn verify_delay_passed( .map_err(|e| e.into()) } -pub fn downcast_consensus_state( - cs: AnyConsensusState, -) -> Result, Error> { +pub fn downcast_consensus_state(cs: AnyConsensusState) -> Result { downcast!( cs => AnyConsensusState::Beefy ) diff --git a/modules/src/clients/ics11_beefy/consensus_state.rs b/modules/src/clients/ics11_beefy/consensus_state.rs index 5c9ee444c8..7d7c2e209f 100644 --- a/modules/src/clients/ics11_beefy/consensus_state.rs +++ b/modules/src/clients/ics11_beefy/consensus_state.rs @@ -19,127 +19,21 @@ use crate::core::ics23_commitment::commitment::CommitmentRoot; // This is a constant that comes from pallet-ibc pub const IBC_CONSENSUS_ID: [u8; 4] = *b"/IBC"; #[derive(Clone, Debug, PartialEq, Eq, Serialize)] -pub struct ConsensusState { +pub struct ConsensusState { pub timestamp: Time, pub root: CommitmentRoot, - _phantom: core::marker::PhantomData, } -impl ConsensusState { +impl ConsensusState { pub fn new(root: Vec, timestamp: Time) -> Self { Self { timestamp, root: root.into(), - _phantom: Default::default(), } } -} - -impl - crate::core::ics02_client::client_consensus::ConsensusState for ConsensusState -{ - type Error = Infallible; - type Crypto = Crypto; - - fn client_type(&self) -> ClientType { - ClientType::Beefy - } - - fn root(&self) -> &CommitmentRoot { - &self.root - } - - fn wrap_any(self) -> AnyConsensusState { - AnyConsensusState::Beefy(self) - } -} - -impl Protobuf for ConsensusState {} - -impl TryFrom for ConsensusState { - type Error = Error; - - fn try_from(raw: RawConsensusState) -> Result { - let ibc_proto::google::protobuf::Timestamp { seconds, nanos } = raw - .timestamp - .ok_or_else(|| Error::invalid_raw_consensus_state("missing timestamp".into()))?; - let proto_timestamp = tpb::Timestamp { seconds, nanos }; - let timestamp = proto_timestamp - .try_into() - .map_err(|e| Error::invalid_raw_consensus_state(format!("invalid timestamp: {}", e)))?; - - let parachain_header = raw - .parachain_header - .ok_or_else(|| Error::invalid_raw_consensus_state("missing parachain header".into()))?; - - let parachain_header = ParachainHeader { - parachain_header: decode_parachain_header(parachain_header.parachain_header).map_err( - |_| Error::invalid_raw_consensus_state("invalid parachain header".into()), - )?, - partial_mmr_leaf: { - let partial_leaf = parachain_header.mmr_leaf_partial.ok_or_else( - Error::invalid_raw_consensus_state("missing mmr leaf".into()), - )?; - PartialMmrLeaf { - version: { - let (major, minor) = - split_leaf_version(partial_leaf.version.saturated_into::()); - MmrLeafVersion::new(major, minor) - }, - parent_number_and_hash: ( - partial_leaf.parent_number, - H256::from_slice(&partial_leaf.parent_hash), - ), - beefy_next_authority_set: { - let next_set = partial_leaf.beefy_next_authority_set.ok_or_else( - Error::invalid_raw_consensus_state("missing next authority set".into()), - )?; - BeefyNextAuthoritySet { - id: next_set.id, - len: next_set.len, - root: H256::from_slice(&next_set.authority_root), - } - }, - } - }, - para_id: parachain_header.para_id, - parachain_heads_proof: parachain_header - .parachain_heads_proof - .into_iter() - .map(|item| { - let mut dest = [0u8; 32]; - dest.copy_from_slice(&*item); - dest - }) - .collect(), - heads_leaf_index: parachain_header.heads_leaf_index, - heads_total_count: parachain_header.heads_total_count, - extrinsic_proof: parachain_header.extrinsic_proof, - }; - Ok(Self { - root: raw.root.into(), - timestamp, - _phantom: Default::default(), - }) - } -} - -impl From> for RawConsensusState { - fn from(value: ConsensusState) -> Self { - let tpb::Timestamp { seconds, nanos } = value.timestamp.into(); - let timestamp = ibc_proto::google::protobuf::Timestamp { seconds, nanos }; - - RawConsensusState { - timestamp: Some(timestamp), - root: value.root.into_vec(), - } - } -} -impl TryFrom for ConsensusState { - type Error = Error; #[cfg(not(test))] - fn try_from(header: ParachainHeader) -> Result { + pub fn from_header(header: ParachainHeader) -> Result { use crate::clients::ics11_beefy::header::decode_timestamp_extrinsic; use crate::timestamp::Timestamp; use sp_runtime::SaturatedConversion; @@ -169,12 +63,13 @@ impl TryFrom for ConsensusState { Ok(Self { root: root.into(), timestamp, - _phantom: Default::default(), }) } #[cfg(test)] - fn try_from(header: ParachainHeader) -> Result { + /// Leaving this here because there's no ibc commitment root in the runtime header that will be used in + /// testing + pub fn from_header(header: ParachainHeader) -> Result { use crate::clients::ics11_beefy::header::decode_timestamp_extrinsic; use crate::timestamp::Timestamp; use sp_runtime::SaturatedConversion; @@ -202,21 +97,67 @@ impl TryFrom for ConsensusState { Ok(Self { root: root.into(), timestamp, - _phantom: Default::default(), }) } } +impl crate::core::ics02_client::client_consensus::ConsensusState for ConsensusState { + type Error = Infallible; + + fn client_type(&self) -> ClientType { + ClientType::Beefy + } + + fn root(&self) -> &CommitmentRoot { + &self.root + } + + fn wrap_any(self) -> AnyConsensusState { + AnyConsensusState::Beefy(self) + } +} + +impl Protobuf for ConsensusState {} + +impl TryFrom for ConsensusState { + type Error = Error; + + fn try_from(raw: RawConsensusState) -> Result { + let ibc_proto::google::protobuf::Timestamp { seconds, nanos } = raw + .timestamp + .ok_or_else(|| Error::invalid_raw_consensus_state("missing timestamp".into()))?; + let proto_timestamp = tpb::Timestamp { seconds, nanos }; + let timestamp = proto_timestamp + .try_into() + .map_err(|e| Error::invalid_raw_consensus_state(format!("invalid timestamp: {}", e)))?; + + Ok(Self { + root: raw.root.into(), + timestamp, + }) + } +} + +impl From for RawConsensusState { + fn from(value: ConsensusState) -> Self { + let tpb::Timestamp { seconds, nanos } = value.timestamp.into(); + let timestamp = ibc_proto::google::protobuf::Timestamp { seconds, nanos }; + + RawConsensusState { + timestamp: Some(timestamp), + root: value.root.into_vec(), + } + } +} + #[cfg(any(test, feature = "mocks"))] pub mod test_util { use super::*; - use crate::test_utils::Crypto; - pub fn get_dummy_beefy_consensus_state() -> AnyConsensusState { + pub fn get_dummy_beefy_consensus_state() -> AnyConsensusState { AnyConsensusState::Beefy(ConsensusState { timestamp: Time::now(), root: vec![0; 32].into(), - _phantom: Default::default(), }) } } diff --git a/modules/src/core/ics02_client/client_consensus.rs b/modules/src/core/ics02_client/client_consensus.rs index 516d5a3a59..dabbf15013 100644 --- a/modules/src/core/ics02_client/client_consensus.rs +++ b/modules/src/core/ics02_client/client_consensus.rs @@ -4,7 +4,6 @@ use core::convert::Infallible; use core::fmt::Debug; use core::marker::{Send, Sync}; -use crate::clients::crypto_ops::crypto::CryptoOps; use ibc_proto::google::protobuf::Any; use ibc_proto::ibc::core::client::v1::ConsensusStateWithHeight; use serde::Serialize; @@ -30,9 +29,8 @@ pub const BEEFY_CONSENSUS_STATE_TYPE_URL: &str = "/ibc.lightclients.beefy.v1.Con pub const MOCK_CONSENSUS_STATE_TYPE_URL: &str = "/ibc.mock.ConsensusState"; -pub trait ConsensusState: Clone + Send + Sync { +pub trait ConsensusState: Clone + Debug + Send + Sync { type Error; - type Crypto: CryptoOps; /// Type of client associated with this consensus state (eg. Tendermint) fn client_type(&self) -> ClientType; @@ -41,19 +39,19 @@ pub trait ConsensusState: Clone + Send + Sync { fn root(&self) -> &CommitmentRoot; /// Wrap into an `AnyConsensusState` - fn wrap_any(self) -> AnyConsensusState; + fn wrap_any(self) -> AnyConsensusState; } #[derive(Clone, Debug, PartialEq, Eq, Serialize)] #[serde(tag = "type")] -pub enum AnyConsensusState { - Tendermint(consensus_state::ConsensusState), - Beefy(beefy_consensus_state::ConsensusState), +pub enum AnyConsensusState { + Tendermint(consensus_state::ConsensusState), + Beefy(beefy_consensus_state::ConsensusState), #[cfg(any(test, feature = "mocks"))] - Mock(MockConsensusState), + Mock(MockConsensusState), } -impl AnyConsensusState { +impl AnyConsensusState { pub fn timestamp(&self) -> Timestamp { match self { Self::Tendermint(cs_state) => cs_state.timestamp.into(), @@ -73,9 +71,9 @@ impl AnyConsensusState { } } -impl Protobuf for AnyConsensusState {} +impl Protobuf for AnyConsensusState {} -impl TryFrom for AnyConsensusState { +impl TryFrom for AnyConsensusState { type Error = Error; fn try_from(value: Any) -> Result { @@ -103,8 +101,8 @@ impl TryFrom for AnyConsensusState { } } -impl From> for Any { - fn from(value: AnyConsensusState) -> Self { +impl From for Any { + fn from(value: AnyConsensusState) -> Self { match value { AnyConsensusState::Tendermint(value) => Any { type_url: TENDERMINT_CONSENSUS_STATE_TYPE_URL.to_string(), @@ -131,14 +129,14 @@ impl From> for Any { } #[derive(Clone, Debug, PartialEq, Eq, Serialize)] -pub struct AnyConsensusStateWithHeight { +pub struct AnyConsensusStateWithHeight { pub height: Height, - pub consensus_state: AnyConsensusState, + pub consensus_state: AnyConsensusState, } -impl Protobuf for AnyConsensusStateWithHeight {} +impl Protobuf for AnyConsensusStateWithHeight {} -impl TryFrom for AnyConsensusStateWithHeight { +impl TryFrom for AnyConsensusStateWithHeight { type Error = Error; fn try_from(value: ConsensusStateWithHeight) -> Result { @@ -155,8 +153,8 @@ impl TryFrom for AnyConsensusStateWithH } } -impl From> for ConsensusStateWithHeight { - fn from(value: AnyConsensusStateWithHeight) -> Self { +impl From for ConsensusStateWithHeight { + fn from(value: AnyConsensusStateWithHeight) -> Self { ConsensusStateWithHeight { height: Some(value.height.into()), consensus_state: Some(value.consensus_state.into()), @@ -164,8 +162,7 @@ impl From> for ConsensusState } } -impl ConsensusState for AnyConsensusState { - type Crypto = Crypto; +impl ConsensusState for AnyConsensusState { type Error = Infallible; fn client_type(&self) -> ClientType { @@ -181,7 +178,7 @@ impl ConsensusState for AnyConsensusSta } } - fn wrap_any(self) -> AnyConsensusState { + fn wrap_any(self) -> AnyConsensusState { self } } diff --git a/modules/src/core/ics02_client/client_def.rs b/modules/src/core/ics02_client/client_def.rs index 2ac6b24357..a891e7393e 100644 --- a/modules/src/core/ics02_client/client_def.rs +++ b/modules/src/core/ics02_client/client_def.rs @@ -24,20 +24,19 @@ use core::fmt::Debug; use crate::mock::client_def::MockClient; #[derive(PartialEq, Eq, Clone, Debug)] -pub enum ConsensusUpdateResult { - Single(AnyConsensusState), - Batch(Vec<(Height, AnyConsensusState)>), +pub enum ConsensusUpdateResult { + Single(AnyConsensusState), + Batch(Vec<(Height, AnyConsensusState)>), } pub trait ClientDef: Clone { type Header: Header; type ClientState: ClientState; type ConsensusState: ConsensusState; - type Crypto: CryptoOps + Debug + Send + Sync; fn verify_header( &self, - ctx: &dyn LightClientContext, + ctx: &dyn LightClientContext, client_id: ClientId, client_state: Self::ClientState, header: Self::Header, @@ -45,11 +44,11 @@ pub trait ClientDef: Clone { fn update_state( &self, - ctx: &dyn LightClientContext, + ctx: &dyn LightClientContext, client_id: ClientId, client_state: Self::ClientState, header: Self::Header, - ) -> Result<(Self::ClientState, ConsensusUpdateResult), Error>; + ) -> Result<(Self::ClientState, ConsensusUpdateResult), Error>; fn update_state_on_misbehaviour( &self, @@ -59,7 +58,7 @@ pub trait ClientDef: Clone { fn check_for_misbehaviour( &self, - ctx: &dyn LightClientContext, + ctx: &dyn LightClientContext, client_id: ClientId, client_state: Self::ClientState, header: Self::Header, @@ -72,7 +71,7 @@ pub trait ClientDef: Clone { consensus_state: &Self::ConsensusState, proof_upgrade_client: Vec, proof_upgrade_consensus_state: Vec, - ) -> Result<(Self::ClientState, ConsensusUpdateResult), Error>; + ) -> Result<(Self::ClientState, ConsensusUpdateResult), Error>; /// Verification functions as specified in: /// @@ -84,7 +83,7 @@ pub trait ClientDef: Clone { #[allow(clippy::too_many_arguments)] fn verify_client_consensus_state( &self, - ctx: &dyn LightClientContext, + ctx: &dyn LightClientContext, client_state: &Self::ClientState, height: Height, prefix: &CommitmentPrefix, @@ -92,14 +91,14 @@ pub trait ClientDef: Clone { root: &CommitmentRoot, client_id: &ClientId, consensus_height: Height, - expected_consensus_state: &AnyConsensusState, + expected_consensus_state: &AnyConsensusState, ) -> Result<(), Error>; /// Verify a `proof` that a connection state matches that of the input `connection_end`. #[allow(clippy::too_many_arguments)] fn verify_connection_state( &self, - ctx: &dyn LightClientContext, + ctx: &dyn LightClientContext, client_id: &ClientId, client_state: &Self::ClientState, height: Height, @@ -114,7 +113,7 @@ pub trait ClientDef: Clone { #[allow(clippy::too_many_arguments)] fn verify_channel_state( &self, - ctx: &dyn LightClientContext, + ctx: &dyn LightClientContext, client_id: &ClientId, client_state: &Self::ClientState, height: Height, @@ -130,7 +129,7 @@ pub trait ClientDef: Clone { #[allow(clippy::too_many_arguments)] fn verify_client_full_state( &self, - ctx: &dyn LightClientContext, + ctx: &dyn LightClientContext, client_state: &Self::ClientState, height: Height, prefix: &CommitmentPrefix, @@ -144,7 +143,7 @@ pub trait ClientDef: Clone { #[allow(clippy::too_many_arguments)] fn verify_packet_data( &self, - ctx: &dyn LightClientContext, + ctx: &dyn LightClientContext, client_id: &ClientId, client_state: &Self::ClientState, height: Height, @@ -161,7 +160,7 @@ pub trait ClientDef: Clone { #[allow(clippy::too_many_arguments)] fn verify_packet_acknowledgement( &self, - ctx: &dyn LightClientContext, + ctx: &dyn LightClientContext, client_id: &ClientId, client_state: &Self::ClientState, height: Height, @@ -178,7 +177,7 @@ pub trait ClientDef: Clone { #[allow(clippy::too_many_arguments)] fn verify_next_sequence_recv( &self, - ctx: &dyn LightClientContext, + ctx: &dyn LightClientContext, client_id: &ClientId, client_state: &Self::ClientState, height: Height, @@ -194,7 +193,7 @@ pub trait ClientDef: Clone { #[allow(clippy::too_many_arguments)] fn verify_packet_receipt_absence( &self, - ctx: &dyn LightClientContext, + ctx: &dyn LightClientContext, client_id: &ClientId, client_state: &Self::ClientState, height: Height, @@ -208,37 +207,36 @@ pub trait ClientDef: Clone { } #[derive(Clone, Debug, PartialEq, Eq)] -pub enum AnyClient { - Tendermint(TendermintClient), +pub enum AnyClient { + Tendermint(TendermintClient), Beefy(BeefyClient), Near(BeefyClient), #[cfg(any(test, feature = "mocks"))] - Mock(MockClient), + Mock(MockClient), } -impl AnyClient { +impl AnyClient { pub fn from_client_type(client_type: ClientType) -> Self { match client_type { - ClientType::Tendermint => Self::Tendermint(TendermintClient::::default()), + ClientType::Tendermint => Self::Tendermint(TendermintClient::default()), ClientType::Beefy => Self::Beefy(BeefyClient::::default()), ClientType::Near => Self::Near(BeefyClient::::default()), #[cfg(any(test, feature = "mocks"))] - ClientType::Mock => Self::Mock(MockClient::::default()), + ClientType::Mock => Self::Mock(MockClient::default()), } } } // ⚠️ Beware of the awful boilerplate below ⚠️ -impl ClientDef for AnyClient { +impl ClientDef for AnyClient { type Header = AnyHeader; type ClientState = AnyClientState; - type ConsensusState = AnyConsensusState; - type Crypto = Crypto; + type ConsensusState = AnyConsensusState; /// Validate an incoming header fn verify_header( &self, - ctx: &dyn LightClientContext, + ctx: &dyn LightClientContext, client_id: ClientId, client_state: Self::ClientState, header: Self::Header, @@ -291,11 +289,11 @@ impl ClientDef for Any /// Validates an incoming `header` against the latest consensus state of this client. fn update_state( &self, - ctx: &dyn LightClientContext, + ctx: &dyn LightClientContext, client_id: ClientId, client_state: AnyClientState, header: AnyHeader, - ) -> Result<(AnyClientState, ConsensusUpdateResult), Error> { + ) -> Result<(AnyClientState, ConsensusUpdateResult), Error> { match self { Self::Tendermint(client) => { let (client_state, header) = downcast!( @@ -387,7 +385,7 @@ impl ClientDef for Any /// Checks for misbehaviour in an incoming header fn check_for_misbehaviour( &self, - ctx: &dyn LightClientContext, + ctx: &dyn LightClientContext, client_id: ClientId, client_state: Self::ClientState, header: Self::Header, @@ -432,7 +430,7 @@ impl ClientDef for Any consensus_state: &Self::ConsensusState, proof_upgrade_client: Vec, proof_upgrade_consensus_state: Vec, - ) -> Result<(Self::ClientState, ConsensusUpdateResult), Error> { + ) -> Result<(Self::ClientState, ConsensusUpdateResult), Error> { match self { Self::Tendermint(client) => { let (client_state, consensus_state) = downcast!( @@ -494,7 +492,7 @@ impl ClientDef for Any fn verify_client_consensus_state( &self, - ctx: &dyn LightClientContext, + ctx: &dyn LightClientContext, client_state: &Self::ClientState, height: Height, prefix: &CommitmentPrefix, @@ -502,7 +500,7 @@ impl ClientDef for Any root: &CommitmentRoot, client_id: &ClientId, consensus_height: Height, - expected_consensus_state: &AnyConsensusState, + expected_consensus_state: &AnyConsensusState, ) -> Result<(), Error> { match self { Self::Tendermint(client) => { @@ -569,7 +567,7 @@ impl ClientDef for Any fn verify_connection_state( &self, - ctx: &dyn LightClientContext, + ctx: &dyn LightClientContext, client_id: &ClientId, client_state: &AnyClientState, height: Height, @@ -637,7 +635,7 @@ impl ClientDef for Any fn verify_channel_state( &self, - ctx: &dyn LightClientContext, + ctx: &dyn LightClientContext, client_id: &ClientId, client_state: &AnyClientState, height: Height, @@ -710,7 +708,7 @@ impl ClientDef for Any } fn verify_client_full_state( &self, - ctx: &dyn LightClientContext, + ctx: &dyn LightClientContext, client_state: &Self::ClientState, height: Height, prefix: &CommitmentPrefix, @@ -780,7 +778,7 @@ impl ClientDef for Any fn verify_packet_data( &self, - ctx: &dyn LightClientContext, + ctx: &dyn LightClientContext, client_id: &ClientId, client_state: &Self::ClientState, height: Height, @@ -863,7 +861,7 @@ impl ClientDef for Any fn verify_packet_acknowledgement( &self, - ctx: &dyn LightClientContext, + ctx: &dyn LightClientContext, client_id: &ClientId, client_state: &Self::ClientState, height: Height, @@ -945,7 +943,7 @@ impl ClientDef for Any } fn verify_next_sequence_recv( &self, - ctx: &dyn LightClientContext, + ctx: &dyn LightClientContext, client_id: &ClientId, client_state: &Self::ClientState, height: Height, @@ -1024,7 +1022,7 @@ impl ClientDef for Any fn verify_packet_receipt_absence( &self, - ctx: &dyn LightClientContext, + ctx: &dyn LightClientContext, client_id: &ClientId, client_state: &Self::ClientState, height: Height, diff --git a/modules/src/core/ics02_client/context.rs b/modules/src/core/ics02_client/context.rs index abbc83209f..c02c0423d6 100644 --- a/modules/src/core/ics02_client/context.rs +++ b/modules/src/core/ics02_client/context.rs @@ -2,7 +2,6 @@ //! that any host chain must implement to be able to process any `ClientMsg`. See //! "ADR 003: IBC protocol implementation" for more details. -use crate::clients::crypto_ops::crypto::CryptoOps; use crate::core::ics02_client::client_consensus::AnyConsensusState; use crate::core::ics02_client::client_def::ConsensusUpdateResult; use crate::core::ics02_client::client_state::AnyClientState; @@ -15,7 +14,6 @@ use crate::Height; /// Defines the read-only part of ICS2 (client functions) context. pub trait ClientReader { - type Crypto: CryptoOps; fn client_type(&self, client_id: &ClientId) -> Result; fn client_state(&self, client_id: &ClientId) -> Result; @@ -27,7 +25,7 @@ pub trait ClientReader { &self, client_id: &ClientId, height: Height, - ) -> Result, Error>; + ) -> Result; /// Similar to `consensus_state`, attempt to retrieve the consensus state, /// but return `None` if no state exists at the given height. @@ -35,7 +33,7 @@ pub trait ClientReader { &self, client_id: &ClientId, height: Height, - ) -> Result>, Error> { + ) -> Result, Error> { match self.consensus_state(client_id, height) { Ok(cs) => Ok(Some(cs)), Err(e) => match e.detail() { @@ -50,14 +48,14 @@ pub trait ClientReader { &self, client_id: &ClientId, height: Height, - ) -> Result>, Error>; + ) -> Result, Error>; /// Search for the highest consensus state lower than `height`. fn prev_consensus_state( &self, client_id: &ClientId, height: Height, - ) -> Result>, Error>; + ) -> Result, Error>; /// Returns the current height of the local chain. fn host_height(&self) -> Height; @@ -71,13 +69,10 @@ pub trait ClientReader { } /// Returns the `ConsensusState` of the host (local) chain at a specific height. - fn host_consensus_state( - &self, - height: Height, - ) -> Result, Error>; + fn host_consensus_state(&self, height: Height) -> Result; /// Returns the pending `ConsensusState` of the host (local) chain. - fn pending_host_consensus_state(&self) -> Result, Error>; + fn pending_host_consensus_state(&self) -> Result; /// Returns a natural number, counting how many clients have been created thus far. /// The value of this counter should increase only via method `ClientKeeper::increase_client_counter`. @@ -86,12 +81,7 @@ pub trait ClientReader { /// Defines the write-only part of ICS2 (client functions) context. pub trait ClientKeeper { - type Crypto: CryptoOps; - - fn store_client_result( - &mut self, - handler_res: ClientResult, - ) -> Result<(), Error> { + fn store_client_result(&mut self, handler_res: ClientResult) -> Result<(), Error> { match handler_res { Create(res) => { let client_id = res.client_id.clone(); @@ -204,7 +194,7 @@ pub trait ClientKeeper { &mut self, client_id: ClientId, height: Height, - consensus_state: AnyConsensusState, + consensus_state: AnyConsensusState, ) -> Result<(), Error>; /// Called upon client creation. diff --git a/modules/src/core/ics02_client/handler.rs b/modules/src/core/ics02_client/handler.rs index ef649183c2..e85c48695f 100644 --- a/modules/src/core/ics02_client/handler.rs +++ b/modules/src/core/ics02_client/handler.rs @@ -11,23 +11,23 @@ pub mod update_client; pub mod upgrade_client; #[derive(Clone, Debug, PartialEq, Eq)] -pub enum ClientResult { - Create(create_client::Result), - Update(update_client::Result), - Upgrade(upgrade_client::Result), +pub enum ClientResult { + Create(create_client::Result), + Update(update_client::Result), + Upgrade(upgrade_client::Result), } /// General entry point for processing any message related to ICS2 (client functions) protocols. pub fn dispatch( ctx: &Ctx, - msg: ClientMsg, -) -> Result>, Error> + msg: ClientMsg, +) -> Result, Error> where - Ctx: LightClientContext, - Crypto: CryptoOps + Debug + Send + Sync + PartialEq + Eq, + Ctx: LightClientContext, + Crypto: CryptoOps, { match msg { - ClientMsg::CreateClient(msg) => create_client::process::(ctx, msg), + ClientMsg::CreateClient(msg) => create_client::process(ctx, msg), ClientMsg::UpdateClient(msg) => update_client::process::(ctx, msg), ClientMsg::UpgradeClient(msg) => upgrade_client::process::(ctx, msg), _ => { diff --git a/modules/src/core/ics02_client/handler/create_client.rs b/modules/src/core/ics02_client/handler/create_client.rs index 62041ffdd0..169864be05 100644 --- a/modules/src/core/ics02_client/handler/create_client.rs +++ b/modules/src/core/ics02_client/handler/create_client.rs @@ -1,6 +1,5 @@ //! Protocol logic specific to processing ICS2 messages of type `MsgCreateAnyClient`. -use crate::clients::crypto_ops::crypto::CryptoOps; use crate::core::ics26_routing::context::LightClientContext; use crate::prelude::*; use core::fmt::Debug; @@ -21,19 +20,19 @@ use crate::timestamp::Timestamp; /// The result following the successful processing of a `MsgCreateAnyClient` message. Preferably /// this data type should be used with a qualified name `create_client::Result` to avoid ambiguity. #[derive(Clone, Debug, PartialEq, Eq)] -pub struct Result { +pub struct Result { pub client_id: ClientId, pub client_type: ClientType, pub client_state: AnyClientState, - pub consensus_state: Option>, + pub consensus_state: Option, pub processed_time: Timestamp, pub processed_height: Height, } -pub fn process( - ctx: &dyn LightClientContext, - msg: MsgCreateAnyClient, -) -> HandlerResult, Error> { +pub fn process( + ctx: &dyn LightClientContext, + msg: MsgCreateAnyClient, +) -> HandlerResult { let mut output = HandlerOutput::builder(); // Construct this client's identifier @@ -103,7 +102,7 @@ mod tests { let msg = MsgCreateAnyClient::new( MockClientState::new(MockHeader::new(height)).into(), - Some(MockConsensusState::::new(MockHeader::new(height)).into()), + Some(MockConsensusState::new(MockHeader::new(height)).into()), signer, ) .unwrap(); @@ -147,7 +146,7 @@ mod tests { let ctx = MockContext::default().with_client(&existing_client_id, height); - let create_client_msgs: Vec> = vec![ + let create_client_msgs: Vec = vec![ MsgCreateAnyClient::new( MockClientState::new(MockHeader::new(Height { revision_height: 42, @@ -155,7 +154,7 @@ mod tests { })) .into(), Some( - MockConsensusState::::new(MockHeader::new(Height { + MockConsensusState::new(MockHeader::new(Height { revision_height: 42, ..height })) @@ -171,7 +170,7 @@ mod tests { })) .into(), Some( - MockConsensusState::::new(MockHeader::new(Height { + MockConsensusState::new(MockHeader::new(Height { revision_height: 42, ..height })) @@ -264,9 +263,7 @@ mod tests { let msg = MsgCreateAnyClient::new( tm_client_state, - Some(AnyConsensusState::::Tendermint( - tm_header.try_into().unwrap(), - )), + Some(AnyConsensusState::Tendermint(tm_header.try_into().unwrap())), signer, ) .unwrap(); diff --git a/modules/src/core/ics02_client/handler/update_client.rs b/modules/src/core/ics02_client/handler/update_client.rs index 84dde23512..6748454e55 100644 --- a/modules/src/core/ics02_client/handler/update_client.rs +++ b/modules/src/core/ics02_client/handler/update_client.rs @@ -21,18 +21,18 @@ use crate::timestamp::Timestamp; /// The result following the successful processing of a `MsgUpdateAnyClient` message. Preferably /// this data type should be used with a qualified name `update_client::Result` to avoid ambiguity. #[derive(Clone, Debug, PartialEq, Eq)] -pub struct Result { +pub struct Result { pub client_id: ClientId, pub client_state: AnyClientState, - pub consensus_state: Option>, + pub consensus_state: Option, pub processed_time: Timestamp, pub processed_height: Height, } -pub fn process( - ctx: &dyn LightClientContext, +pub fn process( + ctx: &dyn LightClientContext, msg: MsgUpdateAnyClient, -) -> HandlerResult, Error> { +) -> HandlerResult { let mut output = HandlerOutput::builder(); let MsgUpdateAnyClient { diff --git a/modules/src/core/ics02_client/handler/upgrade_client.rs b/modules/src/core/ics02_client/handler/upgrade_client.rs index a99ffcfee8..503b65f1b7 100644 --- a/modules/src/core/ics02_client/handler/upgrade_client.rs +++ b/modules/src/core/ics02_client/handler/upgrade_client.rs @@ -17,16 +17,16 @@ use core::fmt::Debug; /// The result following the successful processing of a `MsgUpgradeAnyClient` message. /// This data type should be used with a qualified name `upgrade_client::Result` to avoid ambiguity. #[derive(Clone, Debug, PartialEq, Eq)] -pub struct Result { +pub struct Result { pub client_id: ClientId, pub client_state: AnyClientState, - pub consensus_state: Option>, + pub consensus_state: Option, } -pub fn process( - ctx: &dyn LightClientContext, - msg: MsgUpgradeAnyClient, -) -> HandlerResult, Error> { +pub fn process( + ctx: &dyn LightClientContext, + msg: MsgUpgradeAnyClient, +) -> HandlerResult { let mut output = HandlerOutput::builder(); let MsgUpgradeAnyClient { client_id, .. } = msg; @@ -152,8 +152,7 @@ mod tests { let msg = MsgUpgradeAnyClient { client_id: ClientId::from_str("nonexistingclient").unwrap(), client_state: MockClientState::new(MockHeader::new(Height::new(1, 26))).into(), - consensus_state: MockConsensusState::::new(MockHeader::new(Height::new(1, 26))) - .into(), + consensus_state: MockConsensusState::new(MockHeader::new(Height::new(1, 26))).into(), proof_upgrade_client: Default::default(), proof_upgrade_consensus_state: Default::default(), signer, @@ -181,8 +180,7 @@ mod tests { let msg = MsgUpgradeAnyClient { client_id, client_state: MockClientState::new(MockHeader::new(Height::new(0, 26))).into(), - consensus_state: MockConsensusState::::new(MockHeader::new(Height::new(0, 26))) - .into(), + consensus_state: MockConsensusState::new(MockHeader::new(Height::new(0, 26))).into(), proof_upgrade_client: Default::default(), proof_upgrade_consensus_state: Default::default(), signer, diff --git a/modules/src/core/ics02_client/msgs.rs b/modules/src/core/ics02_client/msgs.rs index d6db69bc89..3131be9beb 100644 --- a/modules/src/core/ics02_client/msgs.rs +++ b/modules/src/core/ics02_client/msgs.rs @@ -16,9 +16,9 @@ pub mod upgrade_client; #[allow(clippy::large_enum_variant)] #[derive(Clone, Debug)] -pub enum ClientMsg { - CreateClient(MsgCreateAnyClient), +pub enum ClientMsg { + CreateClient(MsgCreateAnyClient), UpdateClient(MsgUpdateAnyClient), Misbehaviour(MsgSubmitAnyMisbehaviour), - UpgradeClient(MsgUpgradeAnyClient), + UpgradeClient(MsgUpgradeAnyClient), } diff --git a/modules/src/core/ics02_client/msgs/create_client.rs b/modules/src/core/ics02_client/msgs/create_client.rs index 2a1e91ee6a..effd6bec72 100644 --- a/modules/src/core/ics02_client/msgs/create_client.rs +++ b/modules/src/core/ics02_client/msgs/create_client.rs @@ -16,16 +16,16 @@ pub const TYPE_URL: &str = "/ibc.core.client.v1.MsgCreateClient"; /// A type of message that triggers the creation of a new on-chain (IBC) client. #[derive(Clone, Debug, PartialEq, Eq)] -pub struct MsgCreateAnyClient { +pub struct MsgCreateAnyClient { pub client_state: AnyClientState, - pub consensus_state: Option>, + pub consensus_state: Option, pub signer: Signer, } -impl MsgCreateAnyClient { +impl MsgCreateAnyClient { pub fn new( client_state: AnyClientState, - consensus_state: Option>, + consensus_state: Option, signer: Signer, ) -> Result { match consensus_state.as_ref() { @@ -48,7 +48,7 @@ impl MsgCreateAnyClient { } } -impl Msg for MsgCreateAnyClient { +impl Msg for MsgCreateAnyClient { type ValidationError = crate::core::ics24_host::error::ValidationError; type Raw = RawMsgCreateClient; @@ -61,9 +61,9 @@ impl Msg for MsgCreateAnyClient { } } -impl Protobuf for MsgCreateAnyClient {} +impl Protobuf for MsgCreateAnyClient {} -impl TryFrom for MsgCreateAnyClient { +impl TryFrom for MsgCreateAnyClient { type Error = Error; fn try_from(raw: RawMsgCreateClient) -> Result { @@ -83,8 +83,8 @@ impl TryFrom for MsgCreateAnyClient { } } -impl From> for RawMsgCreateClient { - fn from(ics_msg: MsgCreateAnyClient) -> Self { +impl From for RawMsgCreateClient { + fn from(ics_msg: MsgCreateAnyClient) -> Self { RawMsgCreateClient { client_state: Some(ics_msg.client_state.into()), consensus_state: ics_msg.consensus_state.map(|cs| cs.into()), @@ -104,7 +104,7 @@ mod tests { use crate::clients::ics07_tendermint::header::test_util::get_dummy_tendermint_header; use crate::core::ics02_client::client_consensus::AnyConsensusState; use crate::core::ics02_client::msgs::MsgCreateAnyClient; - use crate::test_utils::{get_dummy_account_id, Crypto}; + use crate::test_utils::get_dummy_account_id; #[test] fn msg_create_client_serialization() { @@ -115,9 +115,7 @@ mod tests { let msg = MsgCreateAnyClient::new( tm_client_state, - Some(AnyConsensusState::::Tendermint( - tm_header.try_into().unwrap(), - )), + Some(AnyConsensusState::Tendermint(tm_header.try_into().unwrap())), signer, ) .unwrap(); diff --git a/modules/src/core/ics02_client/msgs/upgrade_client.rs b/modules/src/core/ics02_client/msgs/upgrade_client.rs index 5142093b5c..db7907dfed 100644 --- a/modules/src/core/ics02_client/msgs/upgrade_client.rs +++ b/modules/src/core/ics02_client/msgs/upgrade_client.rs @@ -18,19 +18,19 @@ pub(crate) const TYPE_URL: &str = "/ibc.core.client.v1.MsgUpgradeClient"; /// A type of message that triggers the upgrade of an on-chain (IBC) client. #[derive(Clone, Debug, PartialEq)] -pub struct MsgUpgradeAnyClient { +pub struct MsgUpgradeAnyClient { pub client_id: ClientId, pub client_state: AnyClientState, - pub consensus_state: AnyConsensusState, + pub consensus_state: AnyConsensusState, pub proof_upgrade_client: Vec, pub proof_upgrade_consensus_state: Vec, pub signer: Signer, } -impl MsgUpgradeAnyClient { +impl MsgUpgradeAnyClient { pub fn new( client_id: ClientId, client_state: AnyClientState, - consensus_state: AnyConsensusState, + consensus_state: AnyConsensusState, proof_upgrade_client: Vec, proof_upgrade_consensus_state: Vec, signer: Signer, @@ -46,7 +46,7 @@ impl MsgUpgradeAnyClient { } } -impl Msg for MsgUpgradeAnyClient { +impl Msg for MsgUpgradeAnyClient { type ValidationError = crate::core::ics24_host::error::ValidationError; type Raw = RawMsgUpgradeClient; @@ -59,10 +59,10 @@ impl Msg for MsgUpgradeAnyClient { } } -impl Protobuf for MsgUpgradeAnyClient {} +impl Protobuf for MsgUpgradeAnyClient {} -impl From> for RawMsgUpgradeClient { - fn from(dm_msg: MsgUpgradeAnyClient) -> RawMsgUpgradeClient { +impl From for RawMsgUpgradeClient { + fn from(dm_msg: MsgUpgradeAnyClient) -> RawMsgUpgradeClient { RawMsgUpgradeClient { client_id: dm_msg.client_id.to_string(), client_state: Some(dm_msg.client_state.into()), @@ -74,7 +74,7 @@ impl From> for RawMsgUpgradeClient { } } -impl TryFrom for MsgUpgradeAnyClient { +impl TryFrom for MsgUpgradeAnyClient { type Error = Error; fn try_from(proto_msg: RawMsgUpgradeClient) -> Result { @@ -113,13 +113,13 @@ pub mod test_util { client_state::{MockClientState, MockConsensusState}, header::MockHeader, }, - test_utils::{get_dummy_bech32_account, get_dummy_proof, Crypto}, + test_utils::{get_dummy_bech32_account, get_dummy_proof}, }; use super::MsgUpgradeAnyClient; /// Extends the implementation with additional helper methods. - impl MsgUpgradeAnyClient { + impl MsgUpgradeAnyClient { /// Setter for `client_id`. Amenable to chaining, since it consumes the input message. pub fn with_client_id(self, client_id: ClientId) -> Self { MsgUpgradeAnyClient { client_id, ..self } @@ -134,8 +134,7 @@ pub mod test_util { AnyClientState::Mock(MockClientState::new(MockHeader::new(height))).into(), ), consensus_state: Some( - AnyConsensusState::Mock(MockConsensusState::::new(MockHeader::new(height))) - .into(), + AnyConsensusState::Mock(MockConsensusState::new(MockHeader::new(height))).into(), ), proof_upgrade_client: get_dummy_proof(), proof_upgrade_consensus_state: get_dummy_proof(), @@ -150,7 +149,6 @@ mod tests { use alloc::vec::Vec; use ibc_proto::ibc::core::client::v1::MsgUpgradeClient as RawMsgUpgradeClient; - use crate::test_utils::Crypto; use crate::{ core::{ ics02_client::{ @@ -176,7 +174,7 @@ mod tests { let client_state = AnyClientState::Mock(MockClientState::new(MockHeader::new(height))); let consensus_state = - AnyConsensusState::Mock(MockConsensusState::::new(MockHeader::new(height))); + AnyConsensusState::Mock(MockConsensusState::new(MockHeader::new(height))); let proof = get_dummy_merkle_proof(); let mut proof_buf = Vec::new(); diff --git a/modules/src/core/ics03_connection/handler.rs b/modules/src/core/ics03_connection/handler.rs index fa5897c1c6..3fc70b647e 100644 --- a/modules/src/core/ics03_connection/handler.rs +++ b/modules/src/core/ics03_connection/handler.rs @@ -47,8 +47,8 @@ pub fn dispatch( msg: ConnectionMsg, ) -> Result, Error> where - Ctx: LightClientContext, - Crypto: CryptoOps + Debug + Send + Sync + PartialEq + Eq, + Ctx: LightClientContext, + Crypto: CryptoOps, { match msg { ConnectionMsg::ConnectionOpenInit(msg) => conn_open_init::process(ctx, msg), diff --git a/modules/src/core/ics03_connection/handler/conn_open_ack.rs b/modules/src/core/ics03_connection/handler/conn_open_ack.rs index 1a0d62e4d9..4c9fe1e47a 100644 --- a/modules/src/core/ics03_connection/handler/conn_open_ack.rs +++ b/modules/src/core/ics03_connection/handler/conn_open_ack.rs @@ -13,16 +13,15 @@ use crate::core::ics26_routing::context::LightClientContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; -use core::fmt::Debug; -pub(crate) fn process( - ctx: &dyn LightClientContext, +pub(crate) fn process( + ctx: &dyn LightClientContext, msg: MsgConnectionOpenAck, ) -> HandlerResult { let mut output = HandlerOutput::builder(); // Check the client's (consensus state) proof height. - check_client_consensus_height::(ctx, msg.consensus_height())?; + check_client_consensus_height(ctx, msg.consensus_height())?; // Validate the connection end. let mut conn_end = ctx.connection_end(&msg.connection_id)?; diff --git a/modules/src/core/ics03_connection/handler/conn_open_confirm.rs b/modules/src/core/ics03_connection/handler/conn_open_confirm.rs index 2b70cdba70..693caa2b7d 100644 --- a/modules/src/core/ics03_connection/handler/conn_open_confirm.rs +++ b/modules/src/core/ics03_connection/handler/conn_open_confirm.rs @@ -11,10 +11,9 @@ use crate::core::ics26_routing::context::LightClientContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; -use core::fmt::Debug; -pub(crate) fn process( - ctx: &dyn LightClientContext, +pub(crate) fn process( + ctx: &dyn LightClientContext, msg: MsgConnectionOpenConfirm, ) -> HandlerResult { let mut output = HandlerOutput::builder(); diff --git a/modules/src/core/ics03_connection/handler/conn_open_init.rs b/modules/src/core/ics03_connection/handler/conn_open_init.rs index db623b946e..89b8efbb68 100644 --- a/modules/src/core/ics03_connection/handler/conn_open_init.rs +++ b/modules/src/core/ics03_connection/handler/conn_open_init.rs @@ -1,6 +1,5 @@ //! Protocol logic specific to ICS3 messages of type `MsgConnectionOpenInit`. -use crate::clients::crypto_ops::crypto::CryptoOps; use crate::core::ics03_connection::connection::{ConnectionEnd, State}; use crate::core::ics03_connection::error::Error; use crate::core::ics03_connection::events::Attributes; @@ -12,8 +11,8 @@ use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; -pub(crate) fn process( - ctx: &dyn LightClientContext, +pub(crate) fn process( + ctx: &dyn LightClientContext, msg: MsgConnectionOpenInit, ) -> HandlerResult { let mut output = HandlerOutput::builder(); diff --git a/modules/src/core/ics03_connection/handler/conn_open_try.rs b/modules/src/core/ics03_connection/handler/conn_open_try.rs index 079bb584a9..75165da65c 100644 --- a/modules/src/core/ics03_connection/handler/conn_open_try.rs +++ b/modules/src/core/ics03_connection/handler/conn_open_try.rs @@ -14,16 +14,15 @@ use crate::core::ics26_routing::context::LightClientContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; -use core::fmt::Debug; -pub(crate) fn process( - ctx: &dyn LightClientContext, +pub(crate) fn process( + ctx: &dyn LightClientContext, msg: MsgConnectionOpenTry, ) -> HandlerResult { let mut output = HandlerOutput::builder(); // Check that consensus height (for client proof) in message is not too advanced nor too old. - check_client_consensus_height::(ctx, msg.consensus_height())?; + check_client_consensus_height(ctx, msg.consensus_height())?; // Unwrap the old connection end (if any) and its identifier. let (mut new_connection_end, conn_id) = match &msg.previous_connection_id { diff --git a/modules/src/core/ics03_connection/handler/verify.rs b/modules/src/core/ics03_connection/handler/verify.rs index 3a3b5e587f..072688476f 100644 --- a/modules/src/core/ics03_connection/handler/verify.rs +++ b/modules/src/core/ics03_connection/handler/verify.rs @@ -9,11 +9,10 @@ use crate::core::ics23_commitment::commitment::CommitmentProofBytes; use crate::core::ics26_routing::context::LightClientContext; use crate::proofs::{ConsensusProof, Proofs}; use crate::Height; -use core::fmt::Debug; /// Entry point for verifying all proofs bundled in any ICS3 message. -pub fn verify_proofs( - ctx: &dyn LightClientContext, +pub fn verify_proofs( + ctx: &dyn LightClientContext, client_state: Option, height: Height, connection_end: &ConnectionEnd, @@ -60,8 +59,8 @@ pub fn verify_proofs( /// Verifies the authenticity and semantic correctness of a commitment `proof`. The commitment /// claims to prove that an object of type connection exists on the source chain (i.e., the chain /// which created this proof). This object must match the state of `expected_conn`. -pub fn verify_connection_proof( - ctx: &dyn LightClientContext, +pub fn verify_connection_proof( + ctx: &dyn LightClientContext, height: Height, connection_end: &ConnectionEnd, expected_conn: &ConnectionEnd, @@ -115,8 +114,8 @@ pub fn verify_connection_proof( - ctx: &dyn LightClientContext, +pub fn verify_client_proof( + ctx: &dyn LightClientContext, height: Height, connection_end: &ConnectionEnd, expected_client_state: AnyClientState, @@ -154,8 +153,8 @@ pub fn verify_client_proof( - ctx: &dyn LightClientContext, +pub fn verify_consensus_proof( + ctx: &dyn LightClientContext, height: Height, connection_end: &ConnectionEnd, proof: &ConsensusProof, @@ -197,8 +196,8 @@ pub fn verify_consensus_proof( - ctx: &dyn LightClientContext, +pub fn check_client_consensus_height( + ctx: &dyn LightClientContext, claimed_height: Height, ) -> Result<(), Error> { if claimed_height > ctx.host_height() { diff --git a/modules/src/core/ics04_channel/handler.rs b/modules/src/core/ics04_channel/handler.rs index fc052ab6bf..3b026f07e7 100644 --- a/modules/src/core/ics04_channel/handler.rs +++ b/modules/src/core/ics04_channel/handler.rs @@ -64,8 +64,8 @@ pub fn channel_dispatch( msg: &ChannelMsg, ) -> Result<(HandlerOutputBuilder<()>, ChannelResult), Error> where - Ctx: LightClientContext, - Crypto: CryptoOps + Debug + Send + Sync + PartialEq + Eq, + Ctx: LightClientContext, + Crypto: CryptoOps, { let output = match msg { ChannelMsg::ChannelOpenInit(msg) => chan_open_init::process(ctx, msg), @@ -173,8 +173,8 @@ pub fn packet_dispatch( msg: &PacketMsg, ) -> Result<(HandlerOutputBuilder<()>, PacketResult), Error> where - Ctx: LightClientContext, - Crypto: CryptoOps + Debug + Send + Sync + PartialEq + Eq, + Ctx: LightClientContext, + Crypto: CryptoOps, { let output = match msg { PacketMsg::RecvPacket(msg) => recv_packet::process::(ctx, msg), diff --git a/modules/src/core/ics04_channel/handler/acknowledgement.rs b/modules/src/core/ics04_channel/handler/acknowledgement.rs index 501475526b..efa39603eb 100644 --- a/modules/src/core/ics04_channel/handler/acknowledgement.rs +++ b/modules/src/core/ics04_channel/handler/acknowledgement.rs @@ -22,8 +22,8 @@ pub struct AckPacketResult { pub seq_number: Option, } -pub fn process( - ctx: &dyn LightClientContext, +pub fn process( + ctx: &dyn LightClientContext, msg: &MsgAcknowledgement, ) -> HandlerResult { let mut output = HandlerOutput::builder(); diff --git a/modules/src/core/ics04_channel/handler/chan_close_confirm.rs b/modules/src/core/ics04_channel/handler/chan_close_confirm.rs index 574ebe2fdd..8aa92bb568 100644 --- a/modules/src/core/ics04_channel/handler/chan_close_confirm.rs +++ b/modules/src/core/ics04_channel/handler/chan_close_confirm.rs @@ -12,10 +12,9 @@ use crate::core::ics26_routing::context::LightClientContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; -use core::fmt::Debug; -pub(crate) fn process( - ctx: &dyn LightClientContext, +pub(crate) fn process( + ctx: &dyn LightClientContext, msg: &MsgChannelCloseConfirm, ) -> HandlerResult { let mut output = HandlerOutput::builder(); diff --git a/modules/src/core/ics04_channel/handler/chan_close_init.rs b/modules/src/core/ics04_channel/handler/chan_close_init.rs index e5f62ad737..1cfd416536 100644 --- a/modules/src/core/ics04_channel/handler/chan_close_init.rs +++ b/modules/src/core/ics04_channel/handler/chan_close_init.rs @@ -1,5 +1,5 @@ //! Protocol logic specific to ICS4 messages of type `MsgChannelCloseInit`. -use crate::clients::crypto_ops::crypto::CryptoOps; + use crate::core::ics03_connection::connection::State as ConnectionState; use crate::core::ics04_channel::channel::State; use crate::core::ics04_channel::error::Error; @@ -10,8 +10,8 @@ use crate::core::ics26_routing::context::LightClientContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; -pub(crate) fn process( - ctx: &dyn LightClientContext, +pub(crate) fn process( + ctx: &dyn LightClientContext, msg: &MsgChannelCloseInit, ) -> HandlerResult { let mut output = HandlerOutput::builder(); diff --git a/modules/src/core/ics04_channel/handler/chan_open_ack.rs b/modules/src/core/ics04_channel/handler/chan_open_ack.rs index ca0a0689da..20109cafe0 100644 --- a/modules/src/core/ics04_channel/handler/chan_open_ack.rs +++ b/modules/src/core/ics04_channel/handler/chan_open_ack.rs @@ -11,10 +11,9 @@ use crate::core::ics26_routing::context::LightClientContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; -use core::fmt::Debug; -pub(crate) fn process( - ctx: &dyn LightClientContext, +pub(crate) fn process( + ctx: &dyn LightClientContext, msg: &MsgChannelOpenAck, ) -> HandlerResult { let mut output = HandlerOutput::builder(); diff --git a/modules/src/core/ics04_channel/handler/chan_open_confirm.rs b/modules/src/core/ics04_channel/handler/chan_open_confirm.rs index b0a864370a..fc955886ae 100644 --- a/modules/src/core/ics04_channel/handler/chan_open_confirm.rs +++ b/modules/src/core/ics04_channel/handler/chan_open_confirm.rs @@ -11,10 +11,9 @@ use crate::core::ics26_routing::context::LightClientContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; -use core::fmt::Debug; -pub(crate) fn process( - ctx: &dyn LightClientContext, +pub(crate) fn process( + ctx: &dyn LightClientContext, msg: &MsgChannelOpenConfirm, ) -> HandlerResult { let mut output = HandlerOutput::builder(); diff --git a/modules/src/core/ics04_channel/handler/chan_open_init.rs b/modules/src/core/ics04_channel/handler/chan_open_init.rs index c3685efdee..4a1e9ecc7a 100644 --- a/modules/src/core/ics04_channel/handler/chan_open_init.rs +++ b/modules/src/core/ics04_channel/handler/chan_open_init.rs @@ -1,6 +1,5 @@ //! Protocol logic specific to ICS4 messages of type `MsgChannelOpenInit`. -use crate::clients::crypto_ops::crypto::CryptoOps; use crate::core::ics04_channel::channel::{ChannelEnd, State}; use crate::core::ics04_channel::error::Error; use crate::core::ics04_channel::events::Attributes; @@ -12,8 +11,8 @@ use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; -pub(crate) fn process( - ctx: &dyn LightClientContext, +pub(crate) fn process( + ctx: &dyn LightClientContext, msg: &MsgChannelOpenInit, ) -> HandlerResult { let mut output = HandlerOutput::builder(); diff --git a/modules/src/core/ics04_channel/handler/chan_open_try.rs b/modules/src/core/ics04_channel/handler/chan_open_try.rs index c7ac35e4c0..6e38c4d100 100644 --- a/modules/src/core/ics04_channel/handler/chan_open_try.rs +++ b/modules/src/core/ics04_channel/handler/chan_open_try.rs @@ -13,10 +13,9 @@ use crate::core::ics26_routing::context::LightClientContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; -use core::fmt::Debug; -pub(crate) fn process( - ctx: &dyn LightClientContext, +pub(crate) fn process( + ctx: &dyn LightClientContext, msg: &MsgChannelOpenTry, ) -> HandlerResult { let mut output = HandlerOutput::builder(); diff --git a/modules/src/core/ics04_channel/handler/recv_packet.rs b/modules/src/core/ics04_channel/handler/recv_packet.rs index e42979ac46..cc650e2144 100644 --- a/modules/src/core/ics04_channel/handler/recv_packet.rs +++ b/modules/src/core/ics04_channel/handler/recv_packet.rs @@ -29,8 +29,8 @@ pub enum RecvPacketResult { NoOp, } -pub fn process( - ctx: &dyn LightClientContext, +pub fn process( + ctx: &dyn LightClientContext, msg: &MsgRecvPacket, ) -> HandlerResult { let mut output = HandlerOutput::builder(); diff --git a/modules/src/core/ics04_channel/handler/send_packet.rs b/modules/src/core/ics04_channel/handler/send_packet.rs index c2e5b767dd..6023f79ee0 100644 --- a/modules/src/core/ics04_channel/handler/send_packet.rs +++ b/modules/src/core/ics04_channel/handler/send_packet.rs @@ -1,4 +1,3 @@ -use crate::clients::crypto_ops::crypto::CryptoOps; use crate::core::ics02_client::client_state::ClientState; use crate::core::ics04_channel::channel::Counterparty; use crate::core::ics04_channel::channel::State; @@ -22,8 +21,8 @@ pub struct SendPacketResult { pub commitment: PacketCommitment, } -pub fn send_packet( - ctx: &dyn LightClientContext, +pub fn send_packet( + ctx: &dyn LightClientContext, packet: Packet, ) -> HandlerResult { let mut output = HandlerOutput::builder(); diff --git a/modules/src/core/ics04_channel/handler/timeout.rs b/modules/src/core/ics04_channel/handler/timeout.rs index 88befedeaf..9ec67eea10 100644 --- a/modules/src/core/ics04_channel/handler/timeout.rs +++ b/modules/src/core/ics04_channel/handler/timeout.rs @@ -24,8 +24,8 @@ pub struct TimeoutPacketResult { pub channel: Option, } -pub fn process( - ctx: &dyn LightClientContext, +pub fn process( + ctx: &dyn LightClientContext, msg: &MsgTimeout, ) -> HandlerResult { let mut output = HandlerOutput::builder(); diff --git a/modules/src/core/ics04_channel/handler/timeout_on_close.rs b/modules/src/core/ics04_channel/handler/timeout_on_close.rs index 07e2014ef1..0e944e7d19 100644 --- a/modules/src/core/ics04_channel/handler/timeout_on_close.rs +++ b/modules/src/core/ics04_channel/handler/timeout_on_close.rs @@ -13,10 +13,9 @@ use crate::core::ics26_routing::context::LightClientContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; -use core::fmt::Debug; -pub fn process( - ctx: &dyn LightClientContext, +pub fn process( + ctx: &dyn LightClientContext, msg: &MsgTimeoutOnClose, ) -> HandlerResult { let mut output = HandlerOutput::builder(); diff --git a/modules/src/core/ics04_channel/handler/verify.rs b/modules/src/core/ics04_channel/handler/verify.rs index fe0e854138..b5e9d29106 100644 --- a/modules/src/core/ics04_channel/handler/verify.rs +++ b/modules/src/core/ics04_channel/handler/verify.rs @@ -11,11 +11,10 @@ use crate::core::ics26_routing::context::LightClientContext; use crate::prelude::*; use crate::proofs::Proofs; use crate::Height; -use core::fmt::Debug; /// Entry point for verifying all proofs bundled in any ICS4 message for channel protocols. -pub fn verify_channel_proofs( - ctx: &dyn LightClientContext, +pub fn verify_channel_proofs( + ctx: &dyn LightClientContext, height: Height, channel_end: &ChannelEnd, connection_end: &ConnectionEnd, @@ -57,8 +56,8 @@ pub fn verify_channel_proofs( - ctx: &dyn LightClientContext, +pub fn verify_packet_recv_proofs( + ctx: &dyn LightClientContext, height: Height, packet: &Packet, connection_end: &ConnectionEnd, @@ -105,10 +104,8 @@ pub fn verify_packet_recv_proofs( - ctx: &dyn LightClientContext, +pub fn verify_packet_acknowledgement_proofs( + ctx: &dyn LightClientContext, height: Height, packet: &Packet, acknowledgement: Acknowledgement, @@ -152,8 +149,8 @@ pub fn verify_packet_acknowledgement_proofs< } /// Entry point for verifying all timeout proofs. -pub fn verify_next_sequence_recv( - ctx: &dyn LightClientContext, +pub fn verify_next_sequence_recv( + ctx: &dyn LightClientContext, height: Height, connection_end: &ConnectionEnd, packet: Packet, @@ -193,8 +190,8 @@ pub fn verify_next_sequence_recv( - ctx: &dyn LightClientContext, +pub fn verify_packet_receipt_absence( + ctx: &dyn LightClientContext, height: Height, connection_end: &ConnectionEnd, packet: Packet, diff --git a/modules/src/core/ics04_channel/handler/write_acknowledgement.rs b/modules/src/core/ics04_channel/handler/write_acknowledgement.rs index f60d6a126b..e86f1f5821 100644 --- a/modules/src/core/ics04_channel/handler/write_acknowledgement.rs +++ b/modules/src/core/ics04_channel/handler/write_acknowledgement.rs @@ -1,4 +1,3 @@ -use crate::clients::crypto_ops::crypto::CryptoOps; use crate::core::ics04_channel::channel::State; use crate::core::ics04_channel::commitment::AcknowledgementCommitment; use crate::core::ics04_channel::error::Error; @@ -20,8 +19,8 @@ pub struct WriteAckPacketResult { pub ack_commitment: AcknowledgementCommitment, } -pub fn process( - ctx: &dyn LightClientContext, +pub fn process( + ctx: &dyn LightClientContext, packet: Packet, ack: Vec, ) -> HandlerResult { diff --git a/modules/src/core/ics26_routing/handler.rs b/modules/src/core/ics26_routing/handler.rs index 79aca9fa48..9e56d2225c 100644 --- a/modules/src/core/ics26_routing/handler.rs +++ b/modules/src/core/ics26_routing/handler.rs @@ -1,11 +1,9 @@ use crate::clients::crypto_ops::crypto::CryptoOps; use crate::prelude::*; -use core::fmt::Debug; use ibc_proto::google::protobuf::Any; use crate::applications::ics20_fungible_token_transfer::relay_application_logic::send_transfer::send_transfer as ics20_msg_dispatcher; -use crate::core::ics02_client::context::{ClientKeeper, ClientReader}; use crate::core::ics02_client::handler::dispatch as ics2_msg_dispatcher; use crate::core::ics03_connection::handler::dispatch as ics3_msg_dispatcher; use crate::core::ics04_channel::handler::{ @@ -39,11 +37,11 @@ pub fn deliver( message: Any, ) -> Result<(Vec, Vec), Error> where - Ctx: Ics26Context + ClientReader + ClientKeeper, - Crypto: CryptoOps + Debug + Send + Sync + PartialEq + Eq, + Ctx: Ics26Context, + Crypto: CryptoOps, { // Decode the proto message into a domain message, creating an ICS26 envelope. - let envelope = decode::(message)?; + let envelope = decode(message)?; // Process the envelope, and accumulate any events that were generated. let output = dispatch::<_, Crypto>(ctx, envelope)?; @@ -52,7 +50,7 @@ where } /// Attempts to convert a message into a [Ics26Envelope] message -pub fn decode(message: Any) -> Result, Error> { +pub fn decode(message: Any) -> Result { message.try_into() } @@ -61,13 +59,10 @@ pub fn decode(message: Any) -> Result, Erro /// and events produced after processing the input `msg`. /// If this method returns an error, the runtime is expected to rollback all state modifications to /// the `Ctx` caused by all messages from the transaction that this `msg` is a part of. -pub fn dispatch( - ctx: &mut Ctx, - msg: Ics26Envelope, -) -> Result, Error> +pub fn dispatch(ctx: &mut Ctx, msg: Ics26Envelope) -> Result, Error> where - Ctx: Ics26Context + ClientReader + ClientKeeper, - Crypto: CryptoOps + Debug + Send + Sync + PartialEq + Eq, + Ctx: Ics26Context, + Crypto: CryptoOps, { let output = match msg { Ics2Msg(msg) => { @@ -237,7 +232,7 @@ mod tests { // Test parameters struct Test { name: String, - msg: Ics26Envelope, + msg: Ics26Envelope, want_pass: bool, } let default_signer = get_dummy_account_id(); @@ -267,7 +262,7 @@ mod tests { let create_client_msg = MsgCreateAnyClient::new( AnyClientState::from(MockClientState::new(MockHeader::new(start_client_height))), - Some(AnyConsensusState::Mock(MockConsensusState::::new( + Some(AnyConsensusState::Mock(MockConsensusState::new( MockHeader::new(start_client_height), ))), default_signer.clone(), diff --git a/modules/src/core/ics26_routing/msgs.rs b/modules/src/core/ics26_routing/msgs.rs index 3f3b638e00..3f2306e6e9 100644 --- a/modules/src/core/ics26_routing/msgs.rs +++ b/modules/src/core/ics26_routing/msgs.rs @@ -15,14 +15,14 @@ use tendermint_proto::Protobuf; /// Enumeration of all messages that the local ICS26 module is capable of routing. #[derive(Clone, Debug)] -pub enum Ics26Envelope { - Ics2Msg(ClientMsg), +pub enum Ics26Envelope { + Ics2Msg(ClientMsg), Ics3Msg(ConnectionMsg), Ics4ChannelMsg(ChannelMsg), Ics4PacketMsg(PacketMsg), } -impl TryFrom for Ics26Envelope { +impl TryFrom for Ics26Envelope { type Error = Error; fn try_from(any_msg: Any) -> Result { diff --git a/modules/src/mock/client_def.rs b/modules/src/mock/client_def.rs index b095f14730..155666847e 100644 --- a/modules/src/mock/client_def.rs +++ b/modules/src/mock/client_def.rs @@ -1,4 +1,3 @@ -use crate::clients::crypto_ops::crypto::CryptoOps; use crate::core::ics02_client::client_consensus::AnyConsensusState; use crate::core::ics02_client::client_def::{ClientDef, ConsensusUpdateResult}; use crate::core::ics02_client::client_state::AnyClientState; @@ -21,28 +20,21 @@ use crate::prelude::*; use crate::Height; use core::fmt::Debug; -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct MockClient(core::marker::PhantomData); +#[derive(Clone, Debug, Default, PartialEq, Eq)] +pub struct MockClient; -impl Default for MockClient { - fn default() -> Self { - Self(Default::default()) - } -} - -impl ClientDef for MockClient { +impl ClientDef for MockClient { type Header = MockHeader; type ClientState = MockClientState; - type ConsensusState = MockConsensusState; - type Crypto = Crypto; + type ConsensusState = MockConsensusState; fn update_state( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn LightClientContext, _client_id: ClientId, client_state: Self::ClientState, header: Self::Header, - ) -> Result<(Self::ClientState, ConsensusUpdateResult), Error> { + ) -> Result<(Self::ClientState, ConsensusUpdateResult), Error> { if client_state.latest_height() >= header.height() { return Err(Error::low_header_height( header.height(), @@ -58,7 +50,7 @@ impl ClientDef for Moc fn verify_client_consensus_state( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn LightClientContext, _client_state: &Self::ClientState, _height: Height, prefix: &CommitmentPrefix, @@ -66,7 +58,7 @@ impl ClientDef for Moc _root: &CommitmentRoot, client_id: &ClientId, consensus_height: Height, - _expected_consensus_state: &AnyConsensusState, + _expected_consensus_state: &AnyConsensusState, ) -> Result<(), Error> { let client_prefixed_path = Path::ClientConsensusState(ClientConsensusStatePath { client_id: client_id.clone(), @@ -82,7 +74,7 @@ impl ClientDef for Moc fn verify_connection_state( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn LightClientContext, _client_id: &ClientId, _client_state: &Self::ClientState, _height: Height, @@ -97,7 +89,7 @@ impl ClientDef for Moc fn verify_channel_state( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn LightClientContext, _client_id: &ClientId, _client_state: &Self::ClientState, _height: Height, @@ -113,7 +105,7 @@ impl ClientDef for Moc fn verify_client_full_state( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn LightClientContext, _client_state: &Self::ClientState, _height: Height, _prefix: &CommitmentPrefix, @@ -127,7 +119,7 @@ impl ClientDef for Moc fn verify_packet_data( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn LightClientContext, _client_id: &ClientId, _client_state: &Self::ClientState, _height: Height, @@ -144,7 +136,7 @@ impl ClientDef for Moc fn verify_packet_acknowledgement( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn LightClientContext, _client_id: &ClientId, _client_state: &Self::ClientState, _height: Height, @@ -161,7 +153,7 @@ impl ClientDef for Moc fn verify_next_sequence_recv( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn LightClientContext, _client_id: &ClientId, _client_state: &Self::ClientState, _height: Height, @@ -177,7 +169,7 @@ impl ClientDef for Moc fn verify_packet_receipt_absence( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn LightClientContext, _client_id: &ClientId, _client_state: &Self::ClientState, _height: Height, @@ -197,7 +189,7 @@ impl ClientDef for Moc consensus_state: &Self::ConsensusState, _proof_upgrade_client: Vec, _proof_upgrade_consensus_state: Vec, - ) -> Result<(Self::ClientState, ConsensusUpdateResult), Error> { + ) -> Result<(Self::ClientState, ConsensusUpdateResult), Error> { Ok(( *client_state, ConsensusUpdateResult::Single(AnyConsensusState::Mock(consensus_state.clone())), @@ -206,7 +198,7 @@ impl ClientDef for Moc fn verify_header( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn LightClientContext, _client_id: ClientId, _client_state: Self::ClientState, _header: Self::Header, @@ -224,7 +216,7 @@ impl ClientDef for Moc fn check_for_misbehaviour( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn LightClientContext, _client_id: ClientId, _client_state: Self::ClientState, _header: Self::Header, diff --git a/modules/src/mock/client_state.rs b/modules/src/mock/client_state.rs index 639cc8a1f0..2601d09511 100644 --- a/modules/src/mock/client_state.rs +++ b/modules/src/mock/client_state.rs @@ -1,5 +1,4 @@ use crate::prelude::*; -use crate::test_utils::Crypto; use alloc::collections::btree_map::BTreeMap as HashMap; @@ -10,7 +9,6 @@ use core::time::Duration; use serde::{Deserialize, Serialize}; use tendermint_proto::Protobuf; -use crate::clients::crypto_ops::crypto::CryptoOps; use ibc_proto::ibc::mock::ClientState as RawMockClientState; use ibc_proto::ibc::mock::ConsensusState as RawMockConsensusState; @@ -35,7 +33,7 @@ pub struct MockClientRecord { pub client_state: Option, /// Mapping of heights to consensus states for this client. - pub consensus_states: HashMap>, + pub consensus_states: HashMap, } /// A mock of a client state. For an example of a real structure that this mocks, you can see @@ -122,25 +120,23 @@ impl ClientState for MockClientState { } } -impl From> for MockClientState { - fn from(cs: MockConsensusState) -> Self { +impl From for MockClientState { + fn from(cs: MockConsensusState) -> Self { Self::new(cs.header) } } #[derive(Clone, Debug, PartialEq, Eq, Serialize)] -pub struct MockConsensusState { +pub struct MockConsensusState { pub header: MockHeader, pub root: CommitmentRoot, - _phantom: core::marker::PhantomData, } -impl MockConsensusState { +impl MockConsensusState { pub fn new(header: MockHeader) -> Self { MockConsensusState { header, root: CommitmentRoot::from(vec![0]), - _phantom: Default::default(), } } @@ -149,9 +145,9 @@ impl MockConsensusState { } } -impl Protobuf for MockConsensusState {} +impl Protobuf for MockConsensusState {} -impl TryFrom for MockConsensusState { +impl TryFrom for MockConsensusState { type Error = Error; fn try_from(raw: RawMockConsensusState) -> Result { @@ -160,13 +156,12 @@ impl TryFrom for MockConsensusState From> for RawMockConsensusState { - fn from(value: MockConsensusState) -> Self { +impl From for RawMockConsensusState { + fn from(value: MockConsensusState) -> Self { RawMockConsensusState { header: Some(ibc_proto::ibc::mock::Header { height: Some(value.header.height().into()), @@ -176,17 +171,14 @@ impl From> for RawMockConsensusState { } } -impl From> for AnyConsensusState { - fn from(mcs: MockConsensusState) -> Self { +impl From for AnyConsensusState { + fn from(mcs: MockConsensusState) -> Self { Self::Mock(mcs) } } -impl ConsensusState - for MockConsensusState -{ +impl ConsensusState for MockConsensusState { type Error = Infallible; - type Crypto = Crypto; fn client_type(&self) -> ClientType { ClientType::Mock @@ -196,7 +188,7 @@ impl ConsensusState &self.root } - fn wrap_any(self) -> AnyConsensusState { + fn wrap_any(self) -> AnyConsensusState { AnyConsensusState::Mock(self) } } diff --git a/modules/src/mock/context.rs b/modules/src/mock/context.rs index 8fe80d6ad6..3f2e9bc463 100644 --- a/modules/src/mock/context.rs +++ b/modules/src/mock/context.rs @@ -457,7 +457,7 @@ impl MockContext { /// A datagram passes from the relayer to the IBC module (on host chain). /// Alternative method to `Ics18Context::send` that does not exercise any serialization. /// Used in testing the Ics18 algorithms, hence this may return a Ics18Error. - pub fn deliver(&mut self, msg: Ics26Envelope) -> Result<(), Ics18Error> { + pub fn deliver(&mut self, msg: Ics26Envelope) -> Result<(), Ics18Error> { dispatch::<_, Crypto>(self, msg).map_err(Ics18Error::transaction_failed)?; // Create a new block. self.advance_host_chain_height(); @@ -509,10 +509,7 @@ impl MockContext { .insert(port_id, module_id); } - pub fn consensus_states( - &self, - client_id: &ClientId, - ) -> Vec> { + pub fn consensus_states(&self, client_id: &ClientId) -> Vec { self.clients[client_id] .consensus_states .iter() @@ -535,7 +532,7 @@ impl MockContext { &self, client_id: &ClientId, height: &Height, - ) -> &AnyConsensusState { + ) -> &AnyConsensusState { self.clients[client_id] .consensus_states .get(height) @@ -1011,7 +1008,6 @@ impl ConnectionKeeper for MockContext { } impl ClientReader for MockContext { - type Crypto = Crypto; fn client_type(&self, client_id: &ClientId) -> Result { match self.ibc_store.lock().unwrap().clients.get(client_id) { Some(client_record) => Ok(client_record.client_type), @@ -1033,7 +1029,7 @@ impl ClientReader for MockContext { &self, client_id: &ClientId, height: Height, - ) -> Result, Ics02Error> { + ) -> Result { match self.clients.get(client_id) { Some(client_record) => match client_record.consensus_states.get(&height) { Some(consensus_state) => Ok(consensus_state.clone()), @@ -1054,7 +1050,7 @@ impl ClientReader for MockContext { &self, client_id: &ClientId, height: Height, - ) -> Result>, Ics02Error> { + ) -> Result, Ics02Error> { let client_record = self .clients .get(client_id) @@ -1081,7 +1077,7 @@ impl ClientReader for MockContext { &self, client_id: &ClientId, height: Height, - ) -> Result>, Ics02Error> { + ) -> Result, Ics02Error> { let client_record = self .clients .get(client_id) @@ -1116,17 +1112,14 @@ impl ClientReader for MockContext { .unwrap() } - fn host_consensus_state( - &self, - height: Height, - ) -> Result, Ics02Error> { + fn host_consensus_state(&self, height: Height) -> Result { match self.host_block(height) { Some(block_ref) => Ok(block_ref.clone().into()), None => Err(Ics02Error::missing_local_consensus_state(height)), } } - fn pending_host_consensus_state(&self) -> Result, Ics02Error> { + fn pending_host_consensus_state(&self) -> Result { Err(Ics02Error::missing_local_consensus_state(Height::zero())) } @@ -1136,7 +1129,6 @@ impl ClientReader for MockContext { } impl ClientKeeper for MockContext { - type Crypto = Crypto; fn store_client_type( &mut self, client_id: ClientId, @@ -1179,7 +1171,7 @@ impl ClientKeeper for MockContext { &mut self, client_id: ClientId, height: Height, - consensus_state: AnyConsensusState, + consensus_state: AnyConsensusState, ) -> Result<(), Ics02Error> { let mut ibc_store = self.ibc_store.lock().unwrap(); let client_record = ibc_store diff --git a/modules/src/mock/header.rs b/modules/src/mock/header.rs index f140d9852c..cc3c38b092 100644 --- a/modules/src/mock/header.rs +++ b/modules/src/mock/header.rs @@ -9,7 +9,6 @@ use crate::core::ics02_client::error::Error; use crate::core::ics02_client::header::AnyHeader; use crate::core::ics02_client::header::Header; use crate::mock::client_state::MockConsensusState; -use crate::test_utils::Crypto; use crate::timestamp::Timestamp; use crate::Height; @@ -80,7 +79,7 @@ impl Header for MockHeader { } } -impl From for AnyConsensusState { +impl From for AnyConsensusState { fn from(h: MockHeader) -> Self { AnyConsensusState::Mock(MockConsensusState::new(h)) } diff --git a/modules/src/mock/host.rs b/modules/src/mock/host.rs index a4e64f38d9..d94b90f893 100644 --- a/modules/src/mock/host.rs +++ b/modules/src/mock/host.rs @@ -10,7 +10,6 @@ use crate::core::ics02_client::header::AnyHeader; use crate::core::ics24_host::identifier::ChainId; use crate::mock::header::MockHeader; use crate::prelude::*; -use crate::test_utils::Crypto; use crate::timestamp::Timestamp; use crate::Height; @@ -84,14 +83,14 @@ impl HostBlock { } } -impl From for AnyConsensusState { +impl From for AnyConsensusState { fn from(light_block: TmLightBlock) -> Self { - let cs = TMConsensusState::::from(light_block.signed_header.header); + let cs = TMConsensusState::from(light_block.signed_header.header); AnyConsensusState::Tendermint(cs) } } -impl From for AnyConsensusState { +impl From for AnyConsensusState { fn from(any_block: HostBlock) -> Self { match any_block { HostBlock::Mock(mock_header) => mock_header.into(), diff --git a/modules/src/relayer/ics18_relayer/utils.rs b/modules/src/relayer/ics18_relayer/utils.rs index 8cae83dbdd..060cef55b5 100644 --- a/modules/src/relayer/ics18_relayer/utils.rs +++ b/modules/src/relayer/ics18_relayer/utils.rs @@ -1,4 +1,3 @@ -use crate::clients::crypto_ops::crypto::CryptoOps; use crate::core::ics02_client::header::AnyHeader; use crate::core::ics02_client::msgs::update_client::MsgUpdateAnyClient; use crate::core::ics02_client::msgs::ClientMsg; @@ -8,14 +7,13 @@ use crate::relayer::ics18_relayer::error::Error; /// Builds a `ClientMsg::UpdateClient` for a client with id `client_id` running on the `dest` /// context, assuming that the latest header on the source context is `src_header`. -pub fn build_client_update_datagram( +pub fn build_client_update_datagram( dest: &Ctx, client_id: &ClientId, src_header: AnyHeader, -) -> Result, Error> +) -> Result where Ctx: Ics18Context, - Crypto: CryptoOps, { // Check if client for ibc0 on ibc1 has been updated to latest height: // - query client state on destination chain @@ -60,7 +58,6 @@ mod tests { use crate::prelude::*; use crate::relayer::ics18_relayer::context::Ics18Context; use crate::relayer::ics18_relayer::utils::build_client_update_datagram; - use crate::test_utils::Crypto; use crate::Height; use test_log::test; use tracing::debug; @@ -174,11 +171,8 @@ mod tests { ClientType::Tendermint ); - let client_msg_a_res = build_client_update_datagram::<_, Crypto>( - &ctx_a, - &client_on_a_for_b, - b_latest_header, - ); + let client_msg_a_res = + build_client_update_datagram(&ctx_a, &client_on_a_for_b, b_latest_header); assert!( client_msg_a_res.is_ok(), diff --git a/modules/tests/runner/mod.rs b/modules/tests/runner/mod.rs index e4786210c4..8cf9d2f3cd 100644 --- a/modules/tests/runner/mod.rs +++ b/modules/tests/runner/mod.rs @@ -159,8 +159,8 @@ impl IbcTestRunner { AnyClientState::Mock(MockClientState::new(Self::mock_header(height))) } - pub fn consensus_state(height: Height) -> AnyConsensusState { - AnyConsensusState::Mock(MockConsensusState::::new(Self::mock_header(height))) + pub fn consensus_state(height: Height) -> AnyConsensusState { + AnyConsensusState::Mock(MockConsensusState::new(Self::mock_header(height))) } fn signer() -> Signer { From f50be04f2edda0dbd5afb7376c15ada2e8beaf4d Mon Sep 17 00:00:00 2001 From: Seun Lanlege Date: Tue, 24 May 2022 13:31:08 +0100 Subject: [PATCH 33/96] fix some lints --- modules/src/clients/ics11_beefy/client_def.rs | 20 +++++++++---------- modules/src/clients/ics11_beefy/mod.rs | 3 +-- 2 files changed, 11 insertions(+), 12 deletions(-) diff --git a/modules/src/clients/ics11_beefy/client_def.rs b/modules/src/clients/ics11_beefy/client_def.rs index 4aada31360..03cb81e2e4 100644 --- a/modules/src/clients/ics11_beefy/client_def.rs +++ b/modules/src/clients/ics11_beefy/client_def.rs @@ -190,6 +190,16 @@ impl ClientDef for BeefyClient { Ok(false) } + fn verify_upgrade_and_update_state( + &self, + _client_state: &Self::ClientState, + _consensus_state: &Self::ConsensusState, + _proof_upgrade_client: Vec, + _proof_upgrade_consensus_state: Vec, + ) -> Result<(Self::ClientState, ConsensusUpdateResult), Error> { + todo!() + } + fn verify_client_consensus_state( &self, _ctx: &dyn LightClientContext, @@ -378,16 +388,6 @@ impl ClientDef for BeefyClient { receipt_path, ) } - - fn verify_upgrade_and_update_state( - &self, - _client_state: &Self::ClientState, - _consensus_state: &Self::ConsensusState, - _proof_upgrade_client: Vec, - _proof_upgrade_consensus_state: Vec, - ) -> Result<(Self::ClientState, ConsensusUpdateResult), Error> { - todo!() - } } fn verify_membership>( diff --git a/modules/src/clients/ics11_beefy/mod.rs b/modules/src/clients/ics11_beefy/mod.rs index 2b16db780e..ae63c9876d 100644 --- a/modules/src/clients/ics11_beefy/mod.rs +++ b/modules/src/clients/ics11_beefy/mod.rs @@ -1,5 +1,4 @@ -//! ICS 07: Tendermint Client implements a client verification algorithm for blockchains which use -//! the Beefy consensus algorithm. +//! ICS 13: Beefy Client implements a client verification algorithm for parachains in the dotsama ecosystem. pub mod client_def; pub mod client_state; From 47495ccdee07842522f8d6fbbd0c7f67a6cfa879 Mon Sep 17 00:00:00 2001 From: David Salami Date: Tue, 24 May 2022 15:28:38 +0100 Subject: [PATCH 34/96] better error description --- modules/src/clients/ics11_beefy/header.rs | 127 +++++++--------------- 1 file changed, 42 insertions(+), 85 deletions(-) diff --git a/modules/src/clients/ics11_beefy/header.rs b/modules/src/clients/ics11_beefy/header.rs index cf745a4ee7..391d6893dc 100644 --- a/modules/src/clients/ics11_beefy/header.rs +++ b/modules/src/clients/ics11_beefy/header.rs @@ -135,14 +135,17 @@ impl TryFrom for BeefyHeader { .collect::, Error>>()?; let mmr_update_proof = if let Some(mmr_update) = raw_header.mmr_update_proof { + let commitment = mmr_update + .signed_commitment + .as_ref() + .ok_or_else(|| { + Error::invalid_mmr_update("Signed commitment is missing".to_string()) + })? + .commitment + .as_ref() + .ok_or_else(|| Error::invalid_mmr_update("Commitment is missing".to_string()))?; let payload = { - mmr_update - .signed_commitment - .as_ref() - .ok_or_else(|| Error::invalid_mmr_update("".to_string()))? - .commitment - .as_ref() - .ok_or_else(|| Error::invalid_mmr_update("".to_string()))? + commitment .payload .iter() .filter_map(|item| { @@ -158,30 +161,20 @@ impl TryFrom for BeefyHeader { .ok_or_else(|| Error::invalid_mmr_update("".to_string()))? .clone() }; - let block_number = mmr_update - .signed_commitment - .as_ref() - .ok_or_else(|| Error::invalid_mmr_update("".to_string()))? - .commitment - .as_ref() - .ok_or_else(|| Error::invalid_mmr_update("".to_string()))? - .block_numer; - let validator_set_id = mmr_update - .signed_commitment - .as_ref() - .ok_or_else(|| Error::invalid_mmr_update("".to_string()))? - .commitment - .as_ref() - .ok_or_else(|| Error::invalid_mmr_update("".to_string()))? - .validator_set_id; + let block_number = commitment.block_numer; + let validator_set_id = commitment.validator_set_id; let signatures = mmr_update .signed_commitment - .ok_or_else(|| Error::invalid_mmr_update("".to_string()))? + .ok_or_else(|| { + Error::invalid_mmr_update("Signed Commiment is missing".to_string()) + })? .signatures .into_iter() .map(|commitment_sig| { if commitment_sig.signature.len() != 65 { - return Err(Error::invalid_mmr_update("".to_string())); + return Err(Error::invalid_mmr_update( + "Invalid signature length".to_string(), + )); } Ok(SignatureWithAuthorityIndex { signature: { @@ -193,6 +186,16 @@ impl TryFrom for BeefyHeader { }) }) .collect::, Error>>()?; + + let mmr_leaf = mmr_update + .mmr_leaf + .as_ref() + .ok_or_else(|| Error::invalid_mmr_update("Mmr Leaf is missing".to_string()))?; + let beefy_next_authority_set = + mmr_leaf.beefy_next_authority_set.as_ref().ok_or_else(|| { + Error::invalid_mmr_update("Beefy Next Authority set is missing".to_string()) + })?; + Some(MmrUpdateProof { signed_commitment: SignedCommitment { commitment: Commitment { @@ -204,72 +207,24 @@ impl TryFrom for BeefyHeader { }, latest_mmr_leaf: MmrLeaf { version: { - let (major, minor) = split_leaf_version( - mmr_update - .mmr_leaf - .as_ref() - .ok_or_else(|| Error::invalid_mmr_update("".to_string()))? - .version - .saturated_into::(), - ); + let (major, minor) = + split_leaf_version(mmr_leaf.version.saturated_into::()); MmrLeafVersion::new(major, minor) }, parent_number_and_hash: { - let parent_number = mmr_update - .mmr_leaf - .as_ref() - .ok_or_else(|| Error::invalid_mmr_update("".to_string()))? - .parent_number; - let parent_hash = H256::decode( - &mut mmr_update - .mmr_leaf - .as_ref() - .ok_or_else(|| Error::invalid_mmr_update("".to_string()))? - .parent_hash - .as_slice(), - ) - .map_err(|e| Error::invalid_mmr_update(e.to_string()))?; + let parent_number = mmr_leaf.parent_number; + let parent_hash = H256::decode(&mut mmr_leaf.parent_hash.as_slice()) + .map_err(|e| Error::invalid_mmr_update(e.to_string()))?; (parent_number, parent_hash) }, beefy_next_authority_set: BeefyNextAuthoritySet { - id: mmr_update - .mmr_leaf - .as_ref() - .ok_or_else(|| Error::invalid_mmr_update("".to_string()))? - .beefy_next_authority_set - .as_ref() - .ok_or_else(|| Error::invalid_mmr_update("".to_string()))? - .id, - len: mmr_update - .mmr_leaf - .as_ref() - .ok_or_else(|| Error::invalid_mmr_update("".to_string()))? - .beefy_next_authority_set - .as_ref() - .ok_or_else(|| Error::invalid_mmr_update("".to_string()))? - .len, - root: H256::decode( - &mut mmr_update - .mmr_leaf - .as_ref() - .ok_or_else(|| Error::invalid_mmr_update("".to_string()))? - .beefy_next_authority_set - .as_ref() - .ok_or_else(|| Error::invalid_mmr_update("".to_string()))? - .authority_root - .as_slice(), - ) - .map_err(|e| Error::invalid_mmr_update(e.to_string()))?, + id: beefy_next_authority_set.id, + len: beefy_next_authority_set.len, + root: H256::decode(&mut beefy_next_authority_set.authority_root.as_slice()) + .map_err(|e| Error::invalid_mmr_update(e.to_string()))?, }, - leaf_extra: H256::decode( - &mut mmr_update - .mmr_leaf - .as_ref() - .ok_or_else(|| Error::invalid_mmr_update("".to_string()))? - .parachain_heads - .as_slice(), - ) - .map_err(|e| Error::invalid_mmr_update(e.to_string()))?, + leaf_extra: H256::decode(&mut mmr_leaf.parachain_heads.as_slice()) + .map_err(|e| Error::invalid_mmr_update(e.to_string()))?, }, mmr_proof: Proof { leaf_index: mmr_update.mmr_leaf_index, @@ -288,7 +243,9 @@ impl TryFrom for BeefyHeader { .into_iter() .map(|item| { if item.len() != 32 { - return Err(Error::invalid_mmr_update("".to_string())); + return Err(Error::invalid_mmr_update( + "Invalid authorities proof".to_string(), + )); } let mut dest = [0u8; 32]; dest.copy_from_slice(&item); From f97beb86313392af447a9e2ac1ad202bf9b428a6 Mon Sep 17 00:00:00 2001 From: Seun Lanlege Date: Wed, 25 May 2022 09:53:26 +0100 Subject: [PATCH 35/96] naming refactors --- .../relay_application_logic/send_transfer.rs | 4 +- modules/src/clients/crypto_ops/mod.rs | 1 - .../crypto.rs => host_functions.rs} | 4 +- modules/src/clients/ics11_beefy/client_def.rs | 36 +-- .../clients/ics11_beefy/consensus_state.rs | 10 +- modules/src/clients/ics11_beefy/header.rs | 6 +- modules/src/clients/ics13_near/client_def.rs | 220 +++++++++--------- .../src/clients/ics13_near/consensus_state.rs | 4 +- modules/src/clients/ics13_near/crypto_ops.rs | 4 +- modules/src/clients/mod.rs | 2 +- modules/src/core/ics02_client/client_def.rs | 16 +- modules/src/core/ics02_client/handler.rs | 10 +- .../ics02_client/handler/update_client.rs | 6 +- .../ics02_client/handler/upgrade_client.rs | 6 +- modules/src/core/ics03_connection/handler.rs | 12 +- .../ics03_connection/handler/conn_open_ack.rs | 6 +- .../handler/conn_open_confirm.rs | 6 +- .../ics03_connection/handler/conn_open_try.rs | 6 +- .../core/ics03_connection/handler/verify.rs | 22 +- modules/src/core/ics04_channel/handler.rs | 26 +-- .../ics04_channel/handler/acknowledgement.rs | 6 +- .../handler/chan_close_confirm.rs | 6 +- .../ics04_channel/handler/chan_open_ack.rs | 8 +- .../handler/chan_open_confirm.rs | 6 +- .../ics04_channel/handler/chan_open_try.rs | 6 +- .../core/ics04_channel/handler/recv_packet.rs | 6 +- .../src/core/ics04_channel/handler/timeout.rs | 8 +- .../ics04_channel/handler/timeout_on_close.rs | 10 +- .../src/core/ics04_channel/handler/verify.rs | 22 +- modules/src/core/ics26_routing/handler.rs | 24 +- modules/src/test_utils.rs | 4 +- 31 files changed, 252 insertions(+), 261 deletions(-) delete mode 100644 modules/src/clients/crypto_ops/mod.rs rename modules/src/clients/{crypto_ops/crypto.rs => host_functions.rs} (89%) diff --git a/modules/src/applications/ics20_fungible_token_transfer/relay_application_logic/send_transfer.rs b/modules/src/applications/ics20_fungible_token_transfer/relay_application_logic/send_transfer.rs index 2f7115c975..aae23629a2 100644 --- a/modules/src/applications/ics20_fungible_token_transfer/relay_application_logic/send_transfer.rs +++ b/modules/src/applications/ics20_fungible_token_transfer/relay_application_logic/send_transfer.rs @@ -1,6 +1,6 @@ use crate::applications::ics20_fungible_token_transfer::error::Error; use crate::applications::ics20_fungible_token_transfer::msgs::transfer::MsgTransfer; -use crate::clients::crypto_ops::crypto::CryptoOps; +use crate::clients::host_functions::HostFunctionsProvider; use crate::core::ics04_channel::handler::send_packet::send_packet; use crate::core::ics04_channel::packet::Packet; use crate::core::ics04_channel::packet::PacketResult; @@ -8,7 +8,7 @@ use crate::core::ics26_routing::context::LightClientContext; use crate::handler::HandlerOutput; use crate::prelude::*; -pub(crate) fn send_transfer( +pub(crate) fn send_transfer( ctx: &Ctx, msg: MsgTransfer, ) -> Result, Error> diff --git a/modules/src/clients/crypto_ops/mod.rs b/modules/src/clients/crypto_ops/mod.rs deleted file mode 100644 index 274f0edcd3..0000000000 --- a/modules/src/clients/crypto_ops/mod.rs +++ /dev/null @@ -1 +0,0 @@ -pub mod crypto; diff --git a/modules/src/clients/crypto_ops/crypto.rs b/modules/src/clients/host_functions.rs similarity index 89% rename from modules/src/clients/crypto_ops/crypto.rs rename to modules/src/clients/host_functions.rs index 8e178f72a0..e619beca2c 100644 --- a/modules/src/clients/crypto_ops/crypto.rs +++ b/modules/src/clients/host_functions.rs @@ -1,11 +1,10 @@ use crate::core::ics02_client::error::Error; use crate::prelude::*; -use beefy_client::traits::HostFunctions; use sp_core::H256; /// This trait captures all the functions that the host chain should provide for /// crypto operations. -pub trait CryptoOps: HostFunctions + Clone { +pub trait HostFunctionsProvider: beefy_client::traits::HostFunctions + Clone { /// This function should verify membership in a trie proof using parity's sp-trie package /// with a BlakeTwo256 Hasher fn verify_membership_trie_proof( @@ -14,6 +13,7 @@ pub trait CryptoOps: HostFunctions + Clone { key: &[u8], value: &[u8], ) -> Result<(), Error>; + /// This function should verify non membership in a trie proof using parity's sp-trie package /// with a BlakeTwo256 Hasher fn verify_non_membership_trie_proof( diff --git a/modules/src/clients/ics11_beefy/client_def.rs b/modules/src/clients/ics11_beefy/client_def.rs index 03cb81e2e4..4913db79d8 100644 --- a/modules/src/clients/ics11_beefy/client_def.rs +++ b/modules/src/clients/ics11_beefy/client_def.rs @@ -7,7 +7,7 @@ use pallet_mmr_primitives::BatchProof; use sp_core::H256; use tendermint_proto::Protobuf; -use crate::clients::crypto_ops::crypto::CryptoOps; +use crate::clients::host_functions::HostFunctionsProvider; use crate::clients::ics11_beefy::client_state::ClientState; use crate::clients::ics11_beefy::consensus_state::ConsensusState; use crate::clients::ics11_beefy::error::Error as BeefyError; @@ -41,15 +41,15 @@ use crate::core::ics24_host::path::{ use crate::downcast; #[derive(Clone, Debug, PartialEq, Eq)] -pub struct BeefyClient(PhantomData); +pub struct BeefyClient(PhantomData); -impl Default for BeefyClient { +impl Default for BeefyClient { fn default() -> Self { Self(PhantomData::default()) } } -impl ClientDef for BeefyClient { +impl ClientDef for BeefyClient { type Header = BeefyHeader; type ClientState = ClientState; type ConsensusState = ConsensusState; @@ -68,7 +68,7 @@ impl ClientDef for BeefyClient { next_authorities: client_state.next_authority_set.clone(), beefy_activation_block: client_state.beefy_activation_block, }; - let mut light_client = BeefyLightClient::::new(); + let mut light_client = BeefyLightClient::::new(); // If mmr update exists verify it and return the new light client state // or else return existing light client state let light_client_state = if let Some(mmr_update) = header.mmr_update_proof { @@ -152,7 +152,7 @@ impl ClientDef for BeefyClient { } parachain_cs_states.push(( height, - AnyConsensusState::Beefy(ConsensusState::from_header::(header)?), + AnyConsensusState::Beefy(ConsensusState::from_header::(header)?), )) } @@ -218,7 +218,7 @@ impl ClientDef for BeefyClient { height: consensus_height.revision_height, }; let value = expected_consensus_state.encode_vec().unwrap(); - verify_membership::(prefix, proof, root, path, value) + verify_membership::(prefix, proof, root, path, value) } // Consensus state will be verified in the verification functions before these are called @@ -236,7 +236,7 @@ impl ClientDef for BeefyClient { ) -> Result<(), Error> { let path = ConnectionsPath(connection_id.clone()); let value = expected_connection_end.encode_vec().unwrap(); - verify_membership::(prefix, proof, root, path, value) + verify_membership::(prefix, proof, root, path, value) } fn verify_channel_state( @@ -254,7 +254,7 @@ impl ClientDef for BeefyClient { ) -> Result<(), Error> { let path = ChannelEndsPath(port_id.clone(), *channel_id); let value = expected_channel_end.encode_vec().unwrap(); - verify_membership::(prefix, proof, root, path, value) + verify_membership::(prefix, proof, root, path, value) } fn verify_client_full_state( @@ -270,7 +270,7 @@ impl ClientDef for BeefyClient { ) -> Result<(), Error> { let path = ClientStatePath(client_id.clone()); let value = expected_client_state.encode_vec().unwrap(); - verify_membership::(prefix, proof, root, path, value) + verify_membership::(prefix, proof, root, path, value) } fn verify_packet_data( @@ -295,7 +295,7 @@ impl ClientDef for BeefyClient { sequence, }; - verify_membership::( + verify_membership::( connection_end.counterparty().prefix(), proof, root, @@ -325,7 +325,7 @@ impl ClientDef for BeefyClient { channel_id: *channel_id, sequence, }; - verify_membership::( + verify_membership::( connection_end.counterparty().prefix(), proof, root, @@ -352,7 +352,7 @@ impl ClientDef for BeefyClient { let seq_bytes = codec::Encode::encode(&u64::from(sequence)); let seq_path = SeqRecvsPath(port_id.clone(), *channel_id); - verify_membership::( + verify_membership::( connection_end.counterparty().prefix(), proof, root, @@ -381,7 +381,7 @@ impl ClientDef for BeefyClient { channel_id: *channel_id, sequence, }; - verify_non_membership::( + verify_non_membership::( connection_end.counterparty().prefix(), proof, root, @@ -390,7 +390,7 @@ impl ClientDef for BeefyClient { } } -fn verify_membership>( +fn verify_membership>( prefix: &CommitmentPrefix, proof: &CommitmentProofBytes, root: &CommitmentRoot, @@ -408,10 +408,10 @@ fn verify_membership>( let trie_proof: Vec> = codec::Decode::decode(&mut &*trie_proof) .map_err(|e| Error::beefy(BeefyError::scale_decode(e)))?; let root = H256::from_slice(root.as_bytes()); - Crypto::verify_membership_trie_proof(&root, &trie_proof, &key, &value) + HostFunctions::verify_membership_trie_proof(&root, &trie_proof, &key, &value) } -fn verify_non_membership>( +fn verify_non_membership>( prefix: &CommitmentPrefix, proof: &CommitmentProofBytes, root: &CommitmentRoot, @@ -428,7 +428,7 @@ fn verify_non_membership>( let trie_proof: Vec> = codec::Decode::decode(&mut &*trie_proof) .map_err(|e| Error::beefy(BeefyError::scale_decode(e)))?; let root = H256::from_slice(root.as_bytes()); - Crypto::verify_non_membership_trie_proof(&root, &trie_proof, &key) + HostFunctions::verify_non_membership_trie_proof(&root, &trie_proof, &key) } fn verify_delay_passed( diff --git a/modules/src/clients/ics11_beefy/consensus_state.rs b/modules/src/clients/ics11_beefy/consensus_state.rs index 7d7c2e209f..32796dd6b2 100644 --- a/modules/src/clients/ics11_beefy/consensus_state.rs +++ b/modules/src/clients/ics11_beefy/consensus_state.rs @@ -7,7 +7,7 @@ use tendermint::time::Time; use tendermint_proto::google::protobuf as tpb; use tendermint_proto::Protobuf; -use crate::clients::crypto_ops::crypto::CryptoOps; +use crate::clients::host_functions::HostFunctionsProvider; use ibc_proto::ibc::lightclients::beefy::v1::ConsensusState as RawConsensusState; use crate::clients::ics11_beefy::error::Error; @@ -33,7 +33,7 @@ impl ConsensusState { } #[cfg(not(test))] - pub fn from_header(header: ParachainHeader) -> Result { + pub fn from_header(header: ParachainHeader) -> Result { use crate::clients::ics11_beefy::header::decode_timestamp_extrinsic; use crate::timestamp::Timestamp; use sp_runtime::SaturatedConversion; @@ -51,7 +51,7 @@ impl ConsensusState { })? }; - let timestamp = decode_timestamp_extrinsic::(&header)?; + let timestamp = decode_timestamp_extrinsic::(&header)?; let duration = core::time::Duration::from_millis(timestamp); let timestamp = Timestamp::from_nanoseconds(duration.as_nanos().saturated_into::()) .unwrap_or_default() @@ -69,7 +69,7 @@ impl ConsensusState { #[cfg(test)] /// Leaving this here because there's no ibc commitment root in the runtime header that will be used in /// testing - pub fn from_header(header: ParachainHeader) -> Result { + pub fn from_header(header: ParachainHeader) -> Result { use crate::clients::ics11_beefy::header::decode_timestamp_extrinsic; use crate::timestamp::Timestamp; use sp_runtime::SaturatedConversion; @@ -85,7 +85,7 @@ impl ConsensusState { .unwrap_or_default() }; - let timestamp = decode_timestamp_extrinsic::(&header)?; + let timestamp = decode_timestamp_extrinsic::(&header)?; let duration = core::time::Duration::from_millis(timestamp); let timestamp = Timestamp::from_nanoseconds(duration.as_nanos().saturated_into::()) .unwrap_or_default() diff --git a/modules/src/clients/ics11_beefy/header.rs b/modules/src/clients/ics11_beefy/header.rs index 391d6893dc..3279a740c1 100644 --- a/modules/src/clients/ics11_beefy/header.rs +++ b/modules/src/clients/ics11_beefy/header.rs @@ -1,7 +1,7 @@ use prost::Message; use tendermint_proto::Protobuf; -use crate::clients::crypto_ops::crypto::CryptoOps; +use crate::clients::host_functions::HostFunctionsProvider; use crate::clients::ics11_beefy::error::Error; use crate::core::ics02_client::client_type::ClientType; use crate::core::ics02_client::header::AnyHeader; @@ -395,7 +395,7 @@ pub fn decode_header(buf: B) -> Result { } /// Attempt to extract the timestamp extrinsic from the parachain header -pub fn decode_timestamp_extrinsic( +pub fn decode_timestamp_extrinsic( header: &ParachainHeader, ) -> Result { let proof = &*header.extrinsic_proof; @@ -405,7 +405,7 @@ pub fn decode_timestamp_extrinsic( // Timestamp extrinsic should be the first inherent and hence the first extrinsic // https://github.com/paritytech/substrate/blob/d602397a0bbb24b5d627795b797259a44a5e29e9/primitives/trie/src/lib.rs#L99-L101 let key = codec::Encode::encode(&Compact(0u32)); - Crypto::verify_membership_trie_proof(&extrinsic_root, proof, &*key, ext) + HostFunctions::verify_membership_trie_proof(&extrinsic_root, proof, &*key, ext) .map_err(|e| Error::timestamp_extrinsic(format!("Proof Verification failed {:?}", e)))?; // Decoding from the [2..] because the timestamp inmherent has two extra bytes before the call that represents the // call length and the extrinsic version. diff --git a/modules/src/clients/ics13_near/client_def.rs b/modules/src/clients/ics13_near/client_def.rs index e082626dd7..59c467e786 100644 --- a/modules/src/clients/ics13_near/client_def.rs +++ b/modules/src/clients/ics13_near/client_def.rs @@ -1,4 +1,12 @@ -use crate::core::ics02_client::client_def::ClientDef; +use crate::clients::host_functions::HostFunctionsProvider; +use crate::core::ics02_client::client_consensus::AnyConsensusState; +use crate::core::ics02_client::client_def::{ClientDef, ConsensusUpdateResult}; +use crate::core::ics23_commitment::commitment::{ + CommitmentPrefix, CommitmentProofBytes, CommitmentRoot, +}; +use crate::core::ics24_host::identifier::ClientId; +use crate::core::ics26_routing::context::LightClientContext; +use std::marker::PhantomData; use super::client_state::NearClientState; use super::consensus_state::NearConsensusState; @@ -8,9 +16,9 @@ use super::header::NearHeader; use super::types::{ApprovalInner, CryptoHash, LightClientBlockView, ValidatorStakeView}; #[derive(Debug, Clone)] -pub struct NearClient {} +pub struct NearClient(PhantomData); -impl ClientDef for NearClient { +impl ClientDef for NearClient { /// The data that we need to update the [`ClientState`] to a new block height type Header = NearHeader; @@ -33,33 +41,25 @@ impl ClientDef for NearClient { /// - The timestamp of the header. type ConsensusState = NearConsensusState; - type Crypto = NearCryptoOps; - // rehydrate client from its own storage, then call this function fn verify_header( &self, - ctx: &dyn crate::core::ics26_routing::context::LightClientContext, - client_id: crate::core::ics24_host::identifier::ClientId, + _ctx: &dyn LightClientContext, + _client_id: ClientId, client_state: Self::ClientState, header: Self::Header, ) -> Result<(), Error> { // your light client, shouldn't do storage anymore, it should just do verification here. - validate_light_block(&header, &client_state) + validate_light_block(&header, client_state) } fn update_state( &self, - ctx: &dyn crate::core::ics26_routing::context::LightClientContext, - client_id: crate::core::ics24_host::identifier::ClientId, + ctx: &dyn LightClientContext, + client_id: ClientId, client_state: Self::ClientState, header: Self::Header, - ) -> Result< - ( - Self::ClientState, - crate::core::ics02_client::client_def::ConsensusUpdateResult, - ), - Error, - > { + ) -> Result<(Self::ClientState, ConsensusUpdateResult), Error> { // 1. create new client state from this header, return that. // 2. as well as all the neccessary consensus states. // @@ -83,159 +83,153 @@ impl ClientDef for NearClient { fn check_for_misbehaviour( &self, - ctx: &dyn crate::core::ics26_routing::context::LightClientContext, - client_id: crate::core::ics24_host::identifier::ClientId, - client_state: Self::ClientState, - header: Self::Header, + _ctx: &dyn LightClientContext, + _client_id: ClientId, + _client_state: Self::ClientState, + _header: Self::Header, ) -> Result { - todo!() + Ok(false) } fn verify_upgrade_and_update_state( &self, - client_state: &Self::ClientState, - consensus_state: &Self::ConsensusState, - proof_upgrade_client: Vec, - proof_upgrade_consensus_state: Vec, - ) -> Result< - ( - Self::ClientState, - crate::core::ics02_client::client_def::ConsensusUpdateResult, - ), - Error, - > { + _client_state: &Self::ClientState, + _consensus_state: &Self::ConsensusState, + _proof_upgrade_client: Vec, + _proof_upgrade_consensus_state: Vec, + ) -> Result<(Self::ClientState, ConsensusUpdateResult), Error> { todo!() } fn verify_client_consensus_state( &self, - ctx: &dyn crate::core::ics26_routing::context::LightClientContext, - client_state: &Self::ClientState, - height: crate::Height, - prefix: &crate::core::ics23_commitment::commitment::CommitmentPrefix, - proof: &crate::core::ics23_commitment::commitment::CommitmentProofBytes, - root: &crate::core::ics23_commitment::commitment::CommitmentRoot, - client_id: &crate::core::ics24_host::identifier::ClientId, - consensus_height: crate::Height, - expected_consensus_state: &crate::core::ics02_client::client_consensus::AnyConsensusState< - Self::Crypto, - >, + _ctx: &dyn LightClientContext, + _client_state: &Self::ClientState, + _height: Height, + _prefix: &CommitmentPrefix, + _proof: &CommitmentProofBytes, + _root: &CommitmentRoot, + _client_id: &ClientId, + _consensus_height: Height, + _expected_consensus_state: &AnyConsensusState, ) -> Result<(), Error> { todo!() } + // Consensus state will be verified in the verification functions before these are called fn verify_connection_state( &self, - ctx: &dyn crate::core::ics26_routing::context::LightClientContext, - client_id: &crate::core::ics24_host::identifier::ClientId, - client_state: &Self::ClientState, - height: crate::Height, - prefix: &crate::core::ics23_commitment::commitment::CommitmentPrefix, - proof: &crate::core::ics23_commitment::commitment::CommitmentProofBytes, - root: &crate::core::ics23_commitment::commitment::CommitmentRoot, - connection_id: &crate::core::ics24_host::identifier::ConnectionId, - expected_connection_end: &crate::core::ics03_connection::connection::ConnectionEnd, + _ctx: &dyn LightClientContext, + _client_id: &ClientId, + _client_state: &Self::ClientState, + _height: Height, + _prefix: &CommitmentPrefix, + _proof: &CommitmentProofBytes, + _root: &CommitmentRoot, + _connection_id: &ConnectionId, + _expected_connection_end: &ConnectionEnd, ) -> Result<(), Error> { todo!() } fn verify_channel_state( &self, - ctx: &dyn crate::core::ics26_routing::context::LightClientContext, - client_id: &crate::core::ics24_host::identifier::ClientId, - client_state: &Self::ClientState, - height: crate::Height, - prefix: &crate::core::ics23_commitment::commitment::CommitmentPrefix, - proof: &crate::core::ics23_commitment::commitment::CommitmentProofBytes, - root: &crate::core::ics23_commitment::commitment::CommitmentRoot, - port_id: &crate::core::ics24_host::identifier::PortId, - channel_id: &crate::core::ics24_host::identifier::ChannelId, - expected_channel_end: &crate::core::ics04_channel::channel::ChannelEnd, + _ctx: &dyn LightClientContext, + _client_id: &ClientId, + _client_state: &Self::ClientState, + _height: Height, + _prefix: &CommitmentPrefix, + _proof: &CommitmentProofBytes, + _root: &CommitmentRoot, + _port_id: &PortId, + _channel_id: &ChannelId, + _expected_channel_end: &ChannelEnd, ) -> Result<(), Error> { todo!() } fn verify_client_full_state( &self, - ctx: &dyn crate::core::ics26_routing::context::LightClientContext, - client_state: &Self::ClientState, - height: crate::Height, - prefix: &crate::core::ics23_commitment::commitment::CommitmentPrefix, - proof: &crate::core::ics23_commitment::commitment::CommitmentProofBytes, - root: &crate::core::ics23_commitment::commitment::CommitmentRoot, - client_id: &crate::core::ics24_host::identifier::ClientId, - expected_client_state: &crate::core::ics02_client::client_state::AnyClientState, + _ctx: &dyn LightClientContext, + _client_state: &Self::ClientState, + _height: Height, + _prefix: &CommitmentPrefix, + _proof: &CommitmentProofBytes, + _root: &CommitmentRoot, + _client_id: &ClientId, + _expected_client_state: &AnyClientState, ) -> Result<(), Error> { todo!() } fn verify_packet_data( &self, - ctx: &dyn crate::core::ics26_routing::context::LightClientContext, - client_id: &crate::core::ics24_host::identifier::ClientId, - client_state: &Self::ClientState, - height: crate::Height, - connection_end: &crate::core::ics03_connection::connection::ConnectionEnd, - proof: &crate::core::ics23_commitment::commitment::CommitmentProofBytes, - root: &crate::core::ics23_commitment::commitment::CommitmentRoot, - port_id: &crate::core::ics24_host::identifier::PortId, - channel_id: &crate::core::ics24_host::identifier::ChannelId, - sequence: crate::core::ics04_channel::packet::Sequence, - commitment: crate::core::ics04_channel::commitment::PacketCommitment, + ctx: &dyn LightClientContext, + _client_id: &ClientId, + _client_state: &Self::ClientState, + _height: Height, + _connection_end: &ConnectionEnd, + _proof: &CommitmentProofBytes, + _root: &CommitmentRoot, + _port_id: &PortId, + _channel_id: &ChannelId, + _sequence: Sequence, + _commitment: PacketCommitment, ) -> Result<(), Error> { todo!() } fn verify_packet_acknowledgement( &self, - ctx: &dyn crate::core::ics26_routing::context::LightClientContext, - client_id: &crate::core::ics24_host::identifier::ClientId, - client_state: &Self::ClientState, - height: crate::Height, - connection_end: &crate::core::ics03_connection::connection::ConnectionEnd, - proof: &crate::core::ics23_commitment::commitment::CommitmentProofBytes, - root: &crate::core::ics23_commitment::commitment::CommitmentRoot, - port_id: &crate::core::ics24_host::identifier::PortId, - channel_id: &crate::core::ics24_host::identifier::ChannelId, - sequence: crate::core::ics04_channel::packet::Sequence, - ack: crate::core::ics04_channel::commitment::AcknowledgementCommitment, + ctx: &dyn LightClientContext, + _client_id: &ClientId, + _client_state: &Self::ClientState, + _height: Height, + _connection_end: &ConnectionEnd, + _proof: &CommitmentProofBytes, + _root: &CommitmentRoot, + _port_id: &PortId, + _channel_id: &ChannelId, + _sequence: Sequence, + _ack: AcknowledgementCommitment, ) -> Result<(), Error> { todo!() } fn verify_next_sequence_recv( &self, - ctx: &dyn crate::core::ics26_routing::context::LightClientContext, - client_id: &crate::core::ics24_host::identifier::ClientId, - client_state: &Self::ClientState, - height: crate::Height, - connection_end: &crate::core::ics03_connection::connection::ConnectionEnd, - proof: &crate::core::ics23_commitment::commitment::CommitmentProofBytes, - root: &crate::core::ics23_commitment::commitment::CommitmentRoot, - port_id: &crate::core::ics24_host::identifier::PortId, - channel_id: &crate::core::ics24_host::identifier::ChannelId, - sequence: crate::core::ics04_channel::packet::Sequence, + _ctx: &dyn LightClientContext, + _client_id: &ClientId, + _client_state: &Self::ClientState, + _height: Height, + _connection_end: &ConnectionEnd, + _proof: &CommitmentProofBytes, + _root: &CommitmentRoot, + _port_id: &PortId, + _channel_id: &ChannelId, + _sequence: Sequence, ) -> Result<(), Error> { todo!() } fn verify_packet_receipt_absence( &self, - ctx: &dyn crate::core::ics26_routing::context::LightClientContext, - client_id: &crate::core::ics24_host::identifier::ClientId, - client_state: &Self::ClientState, - height: crate::Height, - connection_end: &crate::core::ics03_connection::connection::ConnectionEnd, - proof: &crate::core::ics23_commitment::commitment::CommitmentProofBytes, - root: &crate::core::ics23_commitment::commitment::CommitmentRoot, - port_id: &crate::core::ics24_host::identifier::PortId, - channel_id: &crate::core::ics24_host::identifier::ChannelId, - sequence: crate::core::ics04_channel::packet::Sequence, + _ctx: &dyn LightClientContext, + _client_id: &ClientId, + _client_state: &Self::ClientState, + _height: Height, + _connection_end: &ConnectionEnd, + _proof: &CommitmentProofBytes, + _root: &CommitmentRoot, + _port_id: &PortId, + _channel_id: &ChannelId, + _sequence: Sequence, ) -> Result<(), Error> { todo!() } } +// TODO: refactor to use [`HostFunctions`] pub fn validate_light_block( header: &NearHeader, client_state: NearClientState, diff --git a/modules/src/clients/ics13_near/consensus_state.rs b/modules/src/clients/ics13_near/consensus_state.rs index c9d4ff3d20..22fcb47fd0 100644 --- a/modules/src/clients/ics13_near/consensus_state.rs +++ b/modules/src/clients/ics13_near/consensus_state.rs @@ -3,7 +3,6 @@ use crate::core::ics02_client::client_type::ClientType; use crate::core::ics23_commitment::commitment::CommitmentRoot; use crate::error::Error; -use super::crypto_ops::NearCryptoOps; #[derive(Debug, Clone)] pub struct NearConsensusState { @@ -13,7 +12,6 @@ pub struct NearConsensusState { impl ConsensusState for NearConsensusState { type Error = Error; - type Crypto = NearCryptoOps; fn client_type(&self) -> ClientType { ClientType::Near @@ -23,7 +21,7 @@ impl ConsensusState for NearConsensusState { &self.commitment_root } - fn wrap_any(self) -> AnyConsensusState { + fn wrap_any(self) -> AnyConsensusState { todo!() } } diff --git a/modules/src/clients/ics13_near/crypto_ops.rs b/modules/src/clients/ics13_near/crypto_ops.rs index 9f5df41aba..4acd74cb0e 100644 --- a/modules/src/clients/ics13_near/crypto_ops.rs +++ b/modules/src/clients/ics13_near/crypto_ops.rs @@ -1,11 +1,11 @@ -use crate::clients::{crypto_ops::crypto::CryptoOps, ics13_near::error::Error}; +use crate::clients::{host_functions::HostFunctionsProvider, ics13_near::error::Error}; #[derive(Debug, Clone)] pub struct NearCryptoOps { // _p: PhantomData], diff --git a/modules/src/clients/mod.rs b/modules/src/clients/mod.rs index 3bfcaab585..32a192b301 100644 --- a/modules/src/clients/mod.rs +++ b/modules/src/clients/mod.rs @@ -1,6 +1,6 @@ //! Implementations of client verification algorithms for specific types of chains. -pub mod crypto_ops; +pub mod host_functions; pub mod ics07_tendermint; pub mod ics11_beefy; pub mod ics13_near; diff --git a/modules/src/core/ics02_client/client_def.rs b/modules/src/core/ics02_client/client_def.rs index a891e7393e..ffe6c067ec 100644 --- a/modules/src/core/ics02_client/client_def.rs +++ b/modules/src/core/ics02_client/client_def.rs @@ -1,4 +1,4 @@ -use crate::clients::crypto_ops::crypto::CryptoOps; +use crate::clients::host_functions::HostFunctionsProvider; use crate::clients::ics07_tendermint::client_def::TendermintClient; use crate::clients::ics11_beefy::client_def::BeefyClient; use crate::core::ics02_client::client_consensus::{AnyConsensusState, ConsensusState}; @@ -207,20 +207,20 @@ pub trait ClientDef: Clone { } #[derive(Clone, Debug, PartialEq, Eq)] -pub enum AnyClient { +pub enum AnyClient { Tendermint(TendermintClient), - Beefy(BeefyClient), - Near(BeefyClient), + Beefy(BeefyClient), + Near(BeefyClient), #[cfg(any(test, feature = "mocks"))] Mock(MockClient), } -impl AnyClient { +impl AnyClient { pub fn from_client_type(client_type: ClientType) -> Self { match client_type { ClientType::Tendermint => Self::Tendermint(TendermintClient::default()), - ClientType::Beefy => Self::Beefy(BeefyClient::::default()), - ClientType::Near => Self::Near(BeefyClient::::default()), + ClientType::Beefy => Self::Beefy(BeefyClient::::default()), + ClientType::Near => Self::Near(BeefyClient::::default()), #[cfg(any(test, feature = "mocks"))] ClientType::Mock => Self::Mock(MockClient::default()), } @@ -228,7 +228,7 @@ impl AnyClient { } // ⚠️ Beware of the awful boilerplate below ⚠️ -impl ClientDef for AnyClient { +impl ClientDef for AnyClient { type Header = AnyHeader; type ClientState = AnyClientState; type ConsensusState = AnyConsensusState; diff --git a/modules/src/core/ics02_client/handler.rs b/modules/src/core/ics02_client/handler.rs index e85c48695f..34fad217db 100644 --- a/modules/src/core/ics02_client/handler.rs +++ b/modules/src/core/ics02_client/handler.rs @@ -1,5 +1,5 @@ //! This module implements the processing logic for ICS2 (client abstractions and functions) msgs. -use crate::clients::crypto_ops::crypto::CryptoOps; +use crate::clients::host_functions::HostFunctionsProvider; use crate::core::ics02_client::error::Error; use crate::core::ics02_client::msgs::ClientMsg; use crate::core::ics26_routing::context::LightClientContext; @@ -18,18 +18,18 @@ pub enum ClientResult { } /// General entry point for processing any message related to ICS2 (client functions) protocols. -pub fn dispatch( +pub fn dispatch( ctx: &Ctx, msg: ClientMsg, ) -> Result, Error> where Ctx: LightClientContext, - Crypto: CryptoOps, + HostFunctions: HostFunctionsProvider, { match msg { ClientMsg::CreateClient(msg) => create_client::process(ctx, msg), - ClientMsg::UpdateClient(msg) => update_client::process::(ctx, msg), - ClientMsg::UpgradeClient(msg) => upgrade_client::process::(ctx, msg), + ClientMsg::UpdateClient(msg) => update_client::process::(ctx, msg), + ClientMsg::UpgradeClient(msg) => upgrade_client::process::(ctx, msg), _ => { unimplemented!() } diff --git a/modules/src/core/ics02_client/handler/update_client.rs b/modules/src/core/ics02_client/handler/update_client.rs index 6748454e55..b3f3a57c48 100644 --- a/modules/src/core/ics02_client/handler/update_client.rs +++ b/modules/src/core/ics02_client/handler/update_client.rs @@ -2,7 +2,7 @@ use core::fmt::Debug; use tracing::debug; -use crate::clients::crypto_ops::crypto::CryptoOps; +use crate::clients::host_functions::HostFunctionsProvider; use crate::core::ics02_client::client_def::{AnyClient, ClientDef, ConsensusUpdateResult}; use crate::core::ics02_client::client_state::{AnyClientState, ClientState}; use crate::core::ics02_client::client_type::ClientType; @@ -29,7 +29,7 @@ pub struct Result { pub processed_height: Height, } -pub fn process( +pub fn process( ctx: &dyn LightClientContext, msg: MsgUpdateAnyClient, ) -> HandlerResult { @@ -44,7 +44,7 @@ pub fn process( // Read client type from the host chain store. The client should already exist. let client_type = ctx.client_type(&client_id)?; - let client_def = AnyClient::::from_client_type(client_type); + let client_def = AnyClient::::from_client_type(client_type); // Read client state from the host chain store. let client_state = ctx.client_state(&client_id)?; diff --git a/modules/src/core/ics02_client/handler/upgrade_client.rs b/modules/src/core/ics02_client/handler/upgrade_client.rs index 503b65f1b7..3cc7722c43 100644 --- a/modules/src/core/ics02_client/handler/upgrade_client.rs +++ b/modules/src/core/ics02_client/handler/upgrade_client.rs @@ -1,6 +1,6 @@ //! Protocol logic specific to processing ICS2 messages of type `MsgUpgradeAnyClient`. //! -use crate::clients::crypto_ops::crypto::CryptoOps; +use crate::clients::host_functions::HostFunctionsProvider; use crate::core::ics02_client::client_def::{AnyClient, ClientDef, ConsensusUpdateResult}; use crate::core::ics02_client::client_state::{AnyClientState, ClientState}; use crate::core::ics02_client::error::Error; @@ -23,7 +23,7 @@ pub struct Result { pub consensus_state: Option, } -pub fn process( +pub fn process( ctx: &dyn LightClientContext, msg: MsgUpgradeAnyClient, ) -> HandlerResult { @@ -48,7 +48,7 @@ pub fn process( let client_type = ctx.client_type(&client_id)?; - let client_def = AnyClient::::from_client_type(client_type); + let client_def = AnyClient::::from_client_type(client_type); let (new_client_state, new_consensus_state) = client_def.verify_upgrade_and_update_state( &upgrade_client_state, diff --git a/modules/src/core/ics03_connection/handler.rs b/modules/src/core/ics03_connection/handler.rs index 3fc70b647e..5f54080c1d 100644 --- a/modules/src/core/ics03_connection/handler.rs +++ b/modules/src/core/ics03_connection/handler.rs @@ -1,5 +1,5 @@ //! This module implements the processing logic for ICS3 (connection open handshake) messages. -use crate::clients::crypto_ops::crypto::CryptoOps; +use crate::clients::host_functions::HostFunctionsProvider; use crate::core::ics03_connection::connection::ConnectionEnd; use crate::core::ics03_connection::error::Error; use crate::core::ics03_connection::msgs::ConnectionMsg; @@ -42,18 +42,18 @@ pub struct ConnectionResult { /// General entry point for processing any type of message related to the ICS3 connection open /// handshake protocol. -pub fn dispatch( +pub fn dispatch( ctx: &Ctx, msg: ConnectionMsg, ) -> Result, Error> where Ctx: LightClientContext, - Crypto: CryptoOps, + HostFunctions: HostFunctionsProvider, { match msg { ConnectionMsg::ConnectionOpenInit(msg) => conn_open_init::process(ctx, msg), - ConnectionMsg::ConnectionOpenTry(msg) => conn_open_try::process::(ctx, *msg), - ConnectionMsg::ConnectionOpenAck(msg) => conn_open_ack::process::(ctx, *msg), - ConnectionMsg::ConnectionOpenConfirm(msg) => conn_open_confirm::process::(ctx, msg), + ConnectionMsg::ConnectionOpenTry(msg) => conn_open_try::process::(ctx, *msg), + ConnectionMsg::ConnectionOpenAck(msg) => conn_open_ack::process::(ctx, *msg), + ConnectionMsg::ConnectionOpenConfirm(msg) => conn_open_confirm::process::(ctx, msg), } } diff --git a/modules/src/core/ics03_connection/handler/conn_open_ack.rs b/modules/src/core/ics03_connection/handler/conn_open_ack.rs index 4c9fe1e47a..1fd4fca6bd 100644 --- a/modules/src/core/ics03_connection/handler/conn_open_ack.rs +++ b/modules/src/core/ics03_connection/handler/conn_open_ack.rs @@ -1,6 +1,6 @@ //! Protocol logic specific to processing ICS3 messages of type `MsgConnectionOpenAck`. -use crate::clients::crypto_ops::crypto::CryptoOps; +use crate::clients::host_functions::HostFunctionsProvider; use crate::core::ics03_connection::connection::{ConnectionEnd, Counterparty, State}; use crate::core::ics03_connection::error::Error; use crate::core::ics03_connection::events::Attributes; @@ -14,7 +14,7 @@ use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; -pub(crate) fn process( +pub(crate) fn process( ctx: &dyn LightClientContext, msg: MsgConnectionOpenAck, ) -> HandlerResult { @@ -66,7 +66,7 @@ pub(crate) fn process( }; // 2. Pass the details to the verification function. - verify_proofs::( + verify_proofs::( ctx, msg.client_state.clone(), msg.proofs.height(), diff --git a/modules/src/core/ics03_connection/handler/conn_open_confirm.rs b/modules/src/core/ics03_connection/handler/conn_open_confirm.rs index 693caa2b7d..aa196d60a5 100644 --- a/modules/src/core/ics03_connection/handler/conn_open_confirm.rs +++ b/modules/src/core/ics03_connection/handler/conn_open_confirm.rs @@ -1,6 +1,6 @@ //! Protocol logic specific to processing ICS3 messages of type `MsgConnectionOpenConfirm`. -use crate::clients::crypto_ops::crypto::CryptoOps; +use crate::clients::host_functions::HostFunctionsProvider; use crate::core::ics03_connection::connection::{ConnectionEnd, Counterparty, State}; use crate::core::ics03_connection::error::Error; use crate::core::ics03_connection::events::Attributes; @@ -12,7 +12,7 @@ use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; -pub(crate) fn process( +pub(crate) fn process( ctx: &dyn LightClientContext, msg: MsgConnectionOpenConfirm, ) -> HandlerResult { @@ -41,7 +41,7 @@ pub(crate) fn process( ); // 2. Pass the details to the verification function. - verify_proofs::( + verify_proofs::( ctx, None, msg.proofs.height(), diff --git a/modules/src/core/ics03_connection/handler/conn_open_try.rs b/modules/src/core/ics03_connection/handler/conn_open_try.rs index 75165da65c..dfc0c9301a 100644 --- a/modules/src/core/ics03_connection/handler/conn_open_try.rs +++ b/modules/src/core/ics03_connection/handler/conn_open_try.rs @@ -1,6 +1,6 @@ //! Protocol logic specific to processing ICS3 messages of type `MsgConnectionOpenTry`. -use crate::clients::crypto_ops::crypto::CryptoOps; +use crate::clients::host_functions::HostFunctionsProvider; use crate::core::ics03_connection::connection::{ConnectionEnd, Counterparty, State}; use crate::core::ics03_connection::error::Error; use crate::core::ics03_connection::events::Attributes; @@ -15,7 +15,7 @@ use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; -pub(crate) fn process( +pub(crate) fn process( ctx: &dyn LightClientContext, msg: MsgConnectionOpenTry, ) -> HandlerResult { @@ -79,7 +79,7 @@ pub(crate) fn process( ); // 2. Pass the details to the verification function. - verify_proofs::( + verify_proofs::( ctx, msg.client_state.clone(), msg.proofs.height(), diff --git a/modules/src/core/ics03_connection/handler/verify.rs b/modules/src/core/ics03_connection/handler/verify.rs index 072688476f..2acc9544e3 100644 --- a/modules/src/core/ics03_connection/handler/verify.rs +++ b/modules/src/core/ics03_connection/handler/verify.rs @@ -1,5 +1,5 @@ //! ICS3 verification functions, common across all four handlers of ICS3. -use crate::clients::crypto_ops::crypto::CryptoOps; +use crate::clients::host_functions::HostFunctionsProvider; use crate::core::ics02_client::client_consensus::ConsensusState; use crate::core::ics02_client::client_state::{AnyClientState, ClientState}; use crate::core::ics02_client::{client_def::AnyClient, client_def::ClientDef}; @@ -11,7 +11,7 @@ use crate::proofs::{ConsensusProof, Proofs}; use crate::Height; /// Entry point for verifying all proofs bundled in any ICS3 message. -pub fn verify_proofs( +pub fn verify_proofs( ctx: &dyn LightClientContext, client_state: Option, height: Height, @@ -19,7 +19,7 @@ pub fn verify_proofs( expected_conn: &ConnectionEnd, proofs: &Proofs, ) -> Result<(), Error> { - verify_connection_proof::( + verify_connection_proof::( ctx, height, connection_end, @@ -30,7 +30,7 @@ pub fn verify_proofs( // If the message includes a client state, then verify the proof for that state. if let Some(expected_client_state) = client_state { - verify_client_proof::( + verify_client_proof::( ctx, height, connection_end, @@ -45,7 +45,7 @@ pub fn verify_proofs( // If a consensus proof is attached to the message, then verify it. if let Some(proof) = proofs.consensus_proof() { - Ok(verify_consensus_proof::( + Ok(verify_consensus_proof::( ctx, height, connection_end, @@ -59,7 +59,7 @@ pub fn verify_proofs( /// Verifies the authenticity and semantic correctness of a commitment `proof`. The commitment /// claims to prove that an object of type connection exists on the source chain (i.e., the chain /// which created this proof). This object must match the state of `expected_conn`. -pub fn verify_connection_proof( +pub fn verify_connection_proof( ctx: &dyn LightClientContext, height: Height, connection_end: &ConnectionEnd, @@ -89,7 +89,7 @@ pub fn verify_connection_proof( .connection_id() .ok_or_else(Error::invalid_counterparty)?; - let client_def = AnyClient::::from_client_type(client_state.client_type()); + let client_def = AnyClient::::from_client_type(client_state.client_type()); // Verify the proof for the connection state against the expected connection end. client_def @@ -114,7 +114,7 @@ pub fn verify_connection_proof( /// complete verification: that the client state the counterparty stores is valid (i.e., not frozen, /// at the same revision as the current chain, with matching chain identifiers, etc) and that the /// `proof` is correct. -pub fn verify_client_proof( +pub fn verify_client_proof( ctx: &dyn LightClientContext, height: Height, connection_end: &ConnectionEnd, @@ -135,7 +135,7 @@ pub fn verify_client_proof( .consensus_state(connection_end.client_id(), proof_height) .map_err(|e| Error::consensus_state_verification_failure(proof_height, e))?; - let client_def = AnyClient::::from_client_type(client_state.client_type()); + let client_def = AnyClient::::from_client_type(client_state.client_type()); client_def .verify_client_full_state( @@ -153,7 +153,7 @@ pub fn verify_client_proof( }) } -pub fn verify_consensus_proof( +pub fn verify_consensus_proof( ctx: &dyn LightClientContext, height: Height, connection_end: &ConnectionEnd, @@ -177,7 +177,7 @@ pub fn verify_consensus_proof( .consensus_state(connection_end.client_id(), height) .map_err(|e| Error::consensus_state_verification_failure(height, e))?; - let client = AnyClient::::from_client_type(client_state.client_type()); + let client = AnyClient::::from_client_type(client_state.client_type()); client .verify_client_consensus_state( diff --git a/modules/src/core/ics04_channel/handler.rs b/modules/src/core/ics04_channel/handler.rs index 3b026f07e7..ed6dcb0224 100644 --- a/modules/src/core/ics04_channel/handler.rs +++ b/modules/src/core/ics04_channel/handler.rs @@ -1,6 +1,6 @@ //! This module implements the processing logic for ICS4 (channel) messages. -use crate::clients::crypto_ops::crypto::CryptoOps; +use crate::clients::host_functions::HostFunctionsProvider; use crate::core::ics04_channel::channel::ChannelEnd; use crate::core::ics04_channel::error::Error; use crate::core::ics04_channel::msgs::ChannelMsg; @@ -59,21 +59,21 @@ where /// General entry point for processing any type of message related to the ICS4 channel open and /// channel close handshake protocols. -pub fn channel_dispatch( +pub fn channel_dispatch( ctx: &Ctx, msg: &ChannelMsg, ) -> Result<(HandlerOutputBuilder<()>, ChannelResult), Error> where Ctx: LightClientContext, - Crypto: CryptoOps, + HostFunctions: HostFunctionsProvider, { let output = match msg { ChannelMsg::ChannelOpenInit(msg) => chan_open_init::process(ctx, msg), - ChannelMsg::ChannelOpenTry(msg) => chan_open_try::process::(ctx, msg), - ChannelMsg::ChannelOpenAck(msg) => chan_open_ack::process::(ctx, msg), - ChannelMsg::ChannelOpenConfirm(msg) => chan_open_confirm::process::(ctx, msg), + ChannelMsg::ChannelOpenTry(msg) => chan_open_try::process::(ctx, msg), + ChannelMsg::ChannelOpenAck(msg) => chan_open_ack::process::(ctx, msg), + ChannelMsg::ChannelOpenConfirm(msg) => chan_open_confirm::process::(ctx, msg), ChannelMsg::ChannelCloseInit(msg) => chan_close_init::process(ctx, msg), - ChannelMsg::ChannelCloseConfirm(msg) => chan_close_confirm::process::(ctx, msg), + ChannelMsg::ChannelCloseConfirm(msg) => chan_close_confirm::process::(ctx, msg), }?; let HandlerOutput { result, @@ -168,19 +168,19 @@ where } /// Dispatcher for processing any type of message related to the ICS4 packet protocols. -pub fn packet_dispatch( +pub fn packet_dispatch( ctx: &Ctx, msg: &PacketMsg, ) -> Result<(HandlerOutputBuilder<()>, PacketResult), Error> where Ctx: LightClientContext, - Crypto: CryptoOps, + HostFunctions: HostFunctionsProvider, { let output = match msg { - PacketMsg::RecvPacket(msg) => recv_packet::process::(ctx, msg), - PacketMsg::AckPacket(msg) => acknowledgement::process::(ctx, msg), - PacketMsg::ToPacket(msg) => timeout::process::(ctx, msg), - PacketMsg::ToClosePacket(msg) => timeout_on_close::process::(ctx, msg), + PacketMsg::RecvPacket(msg) => recv_packet::process::(ctx, msg), + PacketMsg::AckPacket(msg) => acknowledgement::process::(ctx, msg), + PacketMsg::ToPacket(msg) => timeout::process::(ctx, msg), + PacketMsg::ToClosePacket(msg) => timeout_on_close::process::(ctx, msg), }?; let HandlerOutput { result, diff --git a/modules/src/core/ics04_channel/handler/acknowledgement.rs b/modules/src/core/ics04_channel/handler/acknowledgement.rs index efa39603eb..d316e2a703 100644 --- a/modules/src/core/ics04_channel/handler/acknowledgement.rs +++ b/modules/src/core/ics04_channel/handler/acknowledgement.rs @@ -1,4 +1,4 @@ -use crate::clients::crypto_ops::crypto::CryptoOps; +use crate::clients::host_functions::HostFunctionsProvider; use crate::core::ics03_connection::connection::State as ConnectionState; use crate::core::ics04_channel::channel::State; use crate::core::ics04_channel::channel::{Counterparty, Order}; @@ -22,7 +22,7 @@ pub struct AckPacketResult { pub seq_number: Option, } -pub fn process( +pub fn process( ctx: &dyn LightClientContext, msg: &MsgAcknowledgement, ) -> HandlerResult { @@ -77,7 +77,7 @@ pub fn process( } // Verify the acknowledgement proof - verify_packet_acknowledgement_proofs::( + verify_packet_acknowledgement_proofs::( ctx, msg.proofs.height(), packet, diff --git a/modules/src/core/ics04_channel/handler/chan_close_confirm.rs b/modules/src/core/ics04_channel/handler/chan_close_confirm.rs index 8aa92bb568..308d1e90bb 100644 --- a/modules/src/core/ics04_channel/handler/chan_close_confirm.rs +++ b/modules/src/core/ics04_channel/handler/chan_close_confirm.rs @@ -1,6 +1,6 @@ //! Protocol logic specific to ICS4 messages of type `MsgChannelCloseConfirm`. -use crate::clients::crypto_ops::crypto::CryptoOps; +use crate::clients::host_functions::HostFunctionsProvider; use crate::core::ics03_connection::connection::State as ConnectionState; use crate::core::ics04_channel::channel::{ChannelEnd, Counterparty, State}; use crate::core::ics04_channel::error::Error; @@ -13,7 +13,7 @@ use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; -pub(crate) fn process( +pub(crate) fn process( ctx: &dyn LightClientContext, msg: &MsgChannelCloseConfirm, ) -> HandlerResult { @@ -65,7 +65,7 @@ pub(crate) fn process( channel_end.version().clone(), ); - verify_channel_proofs::( + verify_channel_proofs::( ctx, msg.proofs.height(), &channel_end, diff --git a/modules/src/core/ics04_channel/handler/chan_open_ack.rs b/modules/src/core/ics04_channel/handler/chan_open_ack.rs index 20109cafe0..a318d883f6 100644 --- a/modules/src/core/ics04_channel/handler/chan_open_ack.rs +++ b/modules/src/core/ics04_channel/handler/chan_open_ack.rs @@ -1,5 +1,5 @@ //! Protocol logic specific to ICS4 messages of type `MsgChannelOpenAck`. -use crate::clients::crypto_ops::crypto::CryptoOps; +use crate::clients::host_functions::HostFunctionsProvider; use crate::core::ics03_connection::connection::State as ConnectionState; use crate::core::ics04_channel::channel::{ChannelEnd, Counterparty, State}; use crate::core::ics04_channel::error::Error; @@ -12,7 +12,7 @@ use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; -pub(crate) fn process( +pub(crate) fn process( ctx: &dyn LightClientContext, msg: &MsgChannelOpenAck, ) -> HandlerResult { @@ -72,7 +72,7 @@ pub(crate) fn process( channel_end.set_counterparty_channel_id(msg.counterparty_channel_id); //2. Verify proofs - verify_channel_proofs::( + verify_channel_proofs::( ctx, msg.proofs.height(), &channel_end, @@ -284,7 +284,7 @@ mod tests { .collect(); for test in tests { - let res = channel_dispatch::<_, Crypto>(&test.ctx, &test.msg); + let res = channel_dispatch::<_, HostFunctions>(&test.ctx, &test.msg); // Additionally check the events and the output objects in the result. match res { Ok((proto_output, res)) => { diff --git a/modules/src/core/ics04_channel/handler/chan_open_confirm.rs b/modules/src/core/ics04_channel/handler/chan_open_confirm.rs index fc955886ae..7704959d67 100644 --- a/modules/src/core/ics04_channel/handler/chan_open_confirm.rs +++ b/modules/src/core/ics04_channel/handler/chan_open_confirm.rs @@ -1,5 +1,5 @@ //! Protocol logic specific to ICS4 messages of type `MsgChannelOpenConfirm`. -use crate::clients::crypto_ops::crypto::CryptoOps; +use crate::clients::host_functions::HostFunctionsProvider; use crate::core::ics03_connection::connection::State as ConnectionState; use crate::core::ics04_channel::channel::{ChannelEnd, Counterparty, State}; use crate::core::ics04_channel::error::Error; @@ -12,7 +12,7 @@ use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; -pub(crate) fn process( +pub(crate) fn process( ctx: &dyn LightClientContext, msg: &MsgChannelOpenConfirm, ) -> HandlerResult { @@ -67,7 +67,7 @@ pub(crate) fn process( channel_end.version().clone(), ); //2. Verify proofs - verify_channel_proofs::( + verify_channel_proofs::( ctx, msg.proofs.height(), &channel_end, diff --git a/modules/src/core/ics04_channel/handler/chan_open_try.rs b/modules/src/core/ics04_channel/handler/chan_open_try.rs index 6e38c4d100..810cc4c3bc 100644 --- a/modules/src/core/ics04_channel/handler/chan_open_try.rs +++ b/modules/src/core/ics04_channel/handler/chan_open_try.rs @@ -1,6 +1,6 @@ //! Protocol logic specific to ICS4 messages of type `MsgChannelOpenTry`. -use crate::clients::crypto_ops::crypto::CryptoOps; +use crate::clients::host_functions::HostFunctionsProvider; use crate::core::ics03_connection::connection::State as ConnectionState; use crate::core::ics04_channel::channel::{ChannelEnd, Counterparty, State}; use crate::core::ics04_channel::error::Error; @@ -14,7 +14,7 @@ use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; -pub(crate) fn process( +pub(crate) fn process( ctx: &dyn LightClientContext, msg: &MsgChannelOpenTry, ) -> HandlerResult { @@ -111,7 +111,7 @@ pub(crate) fn process( ); // 2. Actual proofs are verified now. - verify_channel_proofs::( + verify_channel_proofs::( ctx, msg.proofs.height(), &new_channel_end, diff --git a/modules/src/core/ics04_channel/handler/recv_packet.rs b/modules/src/core/ics04_channel/handler/recv_packet.rs index cc650e2144..c9d178edfd 100644 --- a/modules/src/core/ics04_channel/handler/recv_packet.rs +++ b/modules/src/core/ics04_channel/handler/recv_packet.rs @@ -1,4 +1,4 @@ -use crate::clients::crypto_ops::crypto::CryptoOps; +use crate::clients::host_functions::HostFunctionsProvider; use crate::core::ics03_connection::connection::State as ConnectionState; use crate::core::ics04_channel::channel::{Counterparty, Order, State}; use crate::core::ics04_channel::error::Error; @@ -29,7 +29,7 @@ pub enum RecvPacketResult { NoOp, } -pub fn process( +pub fn process( ctx: &dyn LightClientContext, msg: &MsgRecvPacket, ) -> HandlerResult { @@ -79,7 +79,7 @@ pub fn process( return Err(Error::low_packet_timestamp()); } - verify_packet_recv_proofs::( + verify_packet_recv_proofs::( ctx, msg.proofs.height(), packet, diff --git a/modules/src/core/ics04_channel/handler/timeout.rs b/modules/src/core/ics04_channel/handler/timeout.rs index 9ec67eea10..cf7c08b015 100644 --- a/modules/src/core/ics04_channel/handler/timeout.rs +++ b/modules/src/core/ics04_channel/handler/timeout.rs @@ -1,4 +1,4 @@ -use crate::clients::crypto_ops::crypto::CryptoOps; +use crate::clients::host_functions::HostFunctionsProvider; use crate::core::ics04_channel::channel::State; use crate::core::ics04_channel::channel::{ChannelEnd, Counterparty, Order}; use crate::core::ics04_channel::error::Error; @@ -24,7 +24,7 @@ pub struct TimeoutPacketResult { pub channel: Option, } -pub fn process( +pub fn process( ctx: &dyn LightClientContext, msg: &MsgTimeout, ) -> HandlerResult { @@ -105,7 +105,7 @@ pub fn process( msg.next_sequence_recv, )); } - verify_next_sequence_recv::( + verify_next_sequence_recv::( ctx, msg.proofs.height(), &connection_end, @@ -122,7 +122,7 @@ pub fn process( channel: Some(source_channel_end), }) } else { - verify_packet_receipt_absence::( + verify_packet_receipt_absence::( ctx, msg.proofs.height(), &connection_end, diff --git a/modules/src/core/ics04_channel/handler/timeout_on_close.rs b/modules/src/core/ics04_channel/handler/timeout_on_close.rs index 0e944e7d19..2be21ade6c 100644 --- a/modules/src/core/ics04_channel/handler/timeout_on_close.rs +++ b/modules/src/core/ics04_channel/handler/timeout_on_close.rs @@ -1,4 +1,4 @@ -use crate::clients::crypto_ops::crypto::CryptoOps; +use crate::clients::host_functions::HostFunctionsProvider; use crate::core::ics04_channel::channel::State; use crate::core::ics04_channel::channel::{ChannelEnd, Counterparty, Order}; use crate::core::ics04_channel::events::TimeoutOnClosePacket; @@ -14,7 +14,7 @@ use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; -pub fn process( +pub fn process( ctx: &dyn LightClientContext, msg: &MsgTimeoutOnClose, ) -> HandlerResult { @@ -75,7 +75,7 @@ pub fn process( source_channel_end.version().clone(), ); - verify_channel_proofs::( + verify_channel_proofs::( ctx, msg.proofs.height(), &source_channel_end, @@ -91,7 +91,7 @@ pub fn process( msg.next_sequence_recv, )); } - verify_next_sequence_recv::( + verify_next_sequence_recv::( ctx, msg.proofs.height(), &connection_end, @@ -107,7 +107,7 @@ pub fn process( channel: Some(source_channel_end), }) } else { - verify_packet_receipt_absence::( + verify_packet_receipt_absence::( ctx, msg.proofs.height(), &connection_end, diff --git a/modules/src/core/ics04_channel/handler/verify.rs b/modules/src/core/ics04_channel/handler/verify.rs index b5e9d29106..086cf591c8 100644 --- a/modules/src/core/ics04_channel/handler/verify.rs +++ b/modules/src/core/ics04_channel/handler/verify.rs @@ -1,4 +1,4 @@ -use crate::clients::crypto_ops::crypto::CryptoOps; +use crate::clients::host_functions::HostFunctionsProvider; use crate::core::ics02_client::client_consensus::ConsensusState; use crate::core::ics02_client::client_state::ClientState; use crate::core::ics02_client::{client_def::AnyClient, client_def::ClientDef}; @@ -13,7 +13,7 @@ use crate::proofs::Proofs; use crate::Height; /// Entry point for verifying all proofs bundled in any ICS4 message for channel protocols. -pub fn verify_channel_proofs( +pub fn verify_channel_proofs( ctx: &dyn LightClientContext, height: Height, channel_end: &ChannelEnd, @@ -35,7 +35,7 @@ pub fn verify_channel_proofs( .consensus_state(&client_id, proofs.height()) .map_err(|_| Error::error_invalid_consensus_state())?; - let client_def = AnyClient::::from_client_type(client_state.client_type()); + let client_def = AnyClient::::from_client_type(client_state.client_type()); // Verify the proof for the channel state against the expected channel end. // A counterparty channel id of None in not possible, and is checked by validate_basic in msg. @@ -56,7 +56,7 @@ pub fn verify_channel_proofs( } /// Entry point for verifying all proofs bundled in a ICS4 packet recv. message. -pub fn verify_packet_recv_proofs( +pub fn verify_packet_recv_proofs( ctx: &dyn LightClientContext, height: Height, packet: &Packet, @@ -75,7 +75,7 @@ pub fn verify_packet_recv_proofs( .consensus_state(client_id, proofs.height()) .map_err(|_| Error::error_invalid_consensus_state())?; - let client_def = AnyClient::::from_client_type(client_state.client_type()); + let client_def = AnyClient::::from_client_type(client_state.client_type()); let commitment = ctx.packet_commitment( packet.data.clone(), @@ -104,7 +104,7 @@ pub fn verify_packet_recv_proofs( } /// Entry point for verifying all proofs bundled in an ICS4 packet ack message. -pub fn verify_packet_acknowledgement_proofs( +pub fn verify_packet_acknowledgement_proofs( ctx: &dyn LightClientContext, height: Height, packet: &Packet, @@ -126,7 +126,7 @@ pub fn verify_packet_acknowledgement_proofs( let ack_commitment = ctx.ack_commitment(acknowledgement); - let client_def = AnyClient::::from_client_type(client_state.client_type()); + let client_def = AnyClient::::from_client_type(client_state.client_type()); // Verify the proof for the packet against the chain store. client_def @@ -149,7 +149,7 @@ pub fn verify_packet_acknowledgement_proofs( } /// Entry point for verifying all timeout proofs. -pub fn verify_next_sequence_recv( +pub fn verify_next_sequence_recv( ctx: &dyn LightClientContext, height: Height, connection_end: &ConnectionEnd, @@ -169,7 +169,7 @@ pub fn verify_next_sequence_recv( .consensus_state(client_id, proofs.height()) .map_err(|_| Error::error_invalid_consensus_state())?; - let client_def = AnyClient::::from_client_type(client_state.client_type()); + let client_def = AnyClient::::from_client_type(client_state.client_type()); // Verify the proof for the packet against the chain store. client_def @@ -190,7 +190,7 @@ pub fn verify_next_sequence_recv( Ok(()) } -pub fn verify_packet_receipt_absence( +pub fn verify_packet_receipt_absence( ctx: &dyn LightClientContext, height: Height, connection_end: &ConnectionEnd, @@ -209,7 +209,7 @@ pub fn verify_packet_receipt_absence( .consensus_state(client_id, proofs.height()) .map_err(|_| Error::error_invalid_consensus_state())?; - let client_def = AnyClient::::from_client_type(client_state.client_type()); + let client_def = AnyClient::::from_client_type(client_state.client_type()); // Verify the proof for the packet against the chain store. client_def diff --git a/modules/src/core/ics26_routing/handler.rs b/modules/src/core/ics26_routing/handler.rs index 9e56d2225c..c81ce32d81 100644 --- a/modules/src/core/ics26_routing/handler.rs +++ b/modules/src/core/ics26_routing/handler.rs @@ -1,4 +1,4 @@ -use crate::clients::crypto_ops::crypto::CryptoOps; +use crate::clients::host_functions::HostFunctionsProvider; use crate::prelude::*; use ibc_proto::google::protobuf::Any; @@ -32,19 +32,19 @@ pub struct MsgReceipt { /// Mimics the DeliverTx ABCI interface, but for a single message and at a slightly lower level. /// No need for authentication info or signature checks here. /// Returns a vector of all events that got generated as a byproduct of processing `message`. -pub fn deliver( +pub fn deliver( ctx: &mut Ctx, message: Any, ) -> Result<(Vec, Vec), Error> where Ctx: Ics26Context, - Crypto: CryptoOps, + HostFunctions: HostFunctionsProvider, { // Decode the proto message into a domain message, creating an ICS26 envelope. let envelope = decode(message)?; // Process the envelope, and accumulate any events that were generated. - let output = dispatch::<_, Crypto>(ctx, envelope)?; + let output = dispatch::<_, HostFunctions>(ctx, envelope)?; Ok(MsgReceipt { events, log }) } @@ -59,15 +59,15 @@ pub fn decode(message: Any) -> Result { /// and events produced after processing the input `msg`. /// If this method returns an error, the runtime is expected to rollback all state modifications to /// the `Ctx` caused by all messages from the transaction that this `msg` is a part of. -pub fn dispatch(ctx: &mut Ctx, msg: Ics26Envelope) -> Result, Error> +pub fn dispatch(ctx: &mut Ctx, msg: Ics26Envelope) -> Result, Error> where Ctx: Ics26Context, - Crypto: CryptoOps, + HostFunctions: HostFunctionsProvider, { let output = match msg { Ics2Msg(msg) => { let handler_output = - ics2_msg_dispatcher::<_, Crypto>(ctx, msg).map_err(Error::ics02_client)?; + ics2_msg_dispatcher::<_, HostFunctions>(ctx, msg).map_err(Error::ics02_client)?; // Apply the result to the context (host chain store). ctx.store_client_result(handler_output.result) @@ -81,7 +81,7 @@ where Ics3Msg(msg) => { let handler_output = - ics3_msg_dispatcher::<_, Crypto>(ctx, msg).map_err(Error::ics03_connection)?; + ics3_msg_dispatcher::<_, HostFunctions>(ctx, msg).map_err(Error::ics03_connection)?; // Apply any results to the host chain store. ctx.store_connection_result(handler_output.result) @@ -96,7 +96,7 @@ where Ics4ChannelMsg(msg) => { let module_id = ics4_validate(ctx, &msg).map_err(Error::ics04_channel)?; let (mut handler_builder, channel_result) = - ics4_msg_dispatcher::<_, Crypto>(ctx, &msg).map_err(Error::ics04_channel)?; + ics4_msg_dispatcher::<_, HostFunctions>(ctx, &msg).map_err(Error::ics04_channel)?; let mut module_output = ModuleOutputBuilder::new(); let cb_result = @@ -112,7 +112,7 @@ where } Ics20Msg(msg) => { - let handler_output = ics20_msg_dispatcher::<_, Crypto>(ctx, msg) + let handler_output = ics20_msg_dispatcher::<_, HostFunctions>(ctx, msg) .map_err(Error::ics20_fungible_token_transfer)?; // Apply any results to the host chain store. @@ -128,7 +128,7 @@ where Ics4PacketMsg(msg) => { let module_id = get_module_for_packet_msg(ctx, &msg).map_err(Error::ics04_channel)?; let (mut handler_builder, packet_result) = - ics4_packet_msg_dispatcher::<_, Crypto>(ctx, &msg).map_err(Error::ics04_channel)?; + ics4_packet_msg_dispatcher::<_, HostFunctions>(ctx, &msg).map_err(Error::ics04_channel)?; if matches!(packet_result, PacketResult::Recv(RecvPacketResult::NoOp)) { return Ok(handler_builder.with_result(())); @@ -342,7 +342,7 @@ mod tests { let msg_recv_packet = MsgRecvPacket::try_from(get_dummy_raw_msg_recv_packet(35)).unwrap(); // First, create a client.. - let res = dispatch::<_, Crypto>( + let res = dispatch::<_, HostFunctions>( &mut ctx, Ics26Envelope::Ics2Msg(ClientMsg::CreateClient(create_client_msg.clone())), ); diff --git a/modules/src/test_utils.rs b/modules/src/test_utils.rs index 9e925c6851..2f09a78c22 100644 --- a/modules/src/test_utils.rs +++ b/modules/src/test_utils.rs @@ -1,7 +1,7 @@ use std::sync::{Arc, Mutex}; use std::time::Duration; -use crate::clients::crypto_ops::crypto::CryptoOps; +use crate::clients::host_functions::HostFunctionsProvider; use crate::prelude::*; use beefy_client::traits::HostFunctions; use sp_core::keccak_256; @@ -103,7 +103,7 @@ impl HostFunctions for Crypto { } } -impl CryptoOps for Crypto { +impl HostFunctions for Crypto { fn verify_membership_trie_proof( root: &sp_core::H256, proof: &[Vec], From 5ceb384d95877f6c7ede014033d53522238d266f Mon Sep 17 00:00:00 2001 From: Seun Lanlege Date: Wed, 25 May 2022 11:57:21 +0100 Subject: [PATCH 36/96] HostFunctionsProvider super trait --- modules/src/clients/host_functions.rs | 31 ++++++++++++++++++- modules/src/clients/ics11_beefy/client_def.rs | 7 +++-- modules/src/clients/ics13_near/crypto_ops.rs | 8 +++++ modules/src/test_utils.rs | 4 +-- 4 files changed, 44 insertions(+), 6 deletions(-) diff --git a/modules/src/clients/host_functions.rs b/modules/src/clients/host_functions.rs index e619beca2c..eaf299e914 100644 --- a/modules/src/clients/host_functions.rs +++ b/modules/src/clients/host_functions.rs @@ -1,10 +1,20 @@ +use std::marker::PhantomData; use crate::core::ics02_client::error::Error; use crate::prelude::*; use sp_core::H256; /// This trait captures all the functions that the host chain should provide for /// crypto operations. -pub trait HostFunctionsProvider: beefy_client::traits::HostFunctions + Clone { +pub trait HostFunctionsProvider: Clone { + /// Keccak 256 hash function + fn keccak_256(input: &[u8]) -> [u8; 32]; + + /// Compressed Ecdsa public key recovery from a signature + fn secp256k1_ecdsa_recover_compressed( + signature: &[u8; 65], + value: &[u8; 32], + ) -> Option>; + /// This function should verify membership in a trie proof using parity's sp-trie package /// with a BlakeTwo256 Hasher fn verify_membership_trie_proof( @@ -22,3 +32,22 @@ pub trait HostFunctionsProvider: beefy_client::traits::HostFunctions + Clone { key: &[u8], ) -> Result<(), Error>; } + +/// This is a work around that allows us to have one super trait [`HostFunctionsProvider`] +/// that encapsulates all the needed host functions by different subsytems, and then +/// implement the needed traits through this wrapper. +pub struct HostFunctionsManager(PhantomData); + +// implementation for beefy host functions +impl beefy_client::traits::HostFunctions for HostFunctionsManager + where + T: HostFunctionsProvider +{ + fn keccak_256(input: &[u8]) -> [u8; 32] { + T::keccak_256(input) + } + + fn secp256k1_ecdsa_recover_compressed(signature: &[u8; 65], value: &[u8; 32]) -> Option> { + T::secp256k1_ecdsa_recover_compressed(signature, value) + } +} diff --git a/modules/src/clients/ics11_beefy/client_def.rs b/modules/src/clients/ics11_beefy/client_def.rs index 4913db79d8..65217f19e1 100644 --- a/modules/src/clients/ics11_beefy/client_def.rs +++ b/modules/src/clients/ics11_beefy/client_def.rs @@ -7,7 +7,7 @@ use pallet_mmr_primitives::BatchProof; use sp_core::H256; use tendermint_proto::Protobuf; -use crate::clients::host_functions::HostFunctionsProvider; +use crate::clients::host_functions::{HostFunctionsManager, HostFunctionsProvider}; use crate::clients::ics11_beefy::client_state::ClientState; use crate::clients::ics11_beefy::consensus_state::ConsensusState; use crate::clients::ics11_beefy::error::Error as BeefyError; @@ -61,6 +61,9 @@ impl ClientDef for BeefyClient Result<(), Error> { + // type alias for managing host functions impl. + type BeefyHostFunctions = HostFunctionsManager; + let light_client_state = LightClientState { latest_beefy_height: client_state.latest_beefy_height, mmr_root_hash: client_state.mmr_root_hash, @@ -68,7 +71,7 @@ impl ClientDef for BeefyClient::new(); + let mut light_client = BeefyLightClient::::new(); // If mmr update exists verify it and return the new light client state // or else return existing light client state let light_client_state = if let Some(mmr_update) = header.mmr_update_proof { diff --git a/modules/src/clients/ics13_near/crypto_ops.rs b/modules/src/clients/ics13_near/crypto_ops.rs index 4acd74cb0e..85dc8622bf 100644 --- a/modules/src/clients/ics13_near/crypto_ops.rs +++ b/modules/src/clients/ics13_near/crypto_ops.rs @@ -6,6 +6,14 @@ pub struct NearCryptoOps { } impl HostFunctionsProvider for NearCryptoOps { + fn keccak_256(input: &[u8]) -> [u8; 32] { + todo!() + } + + fn secp256k1_ecdsa_recover_compressed(signature: &[u8; 65], value: &[u8; 32]) -> Option> { + todo!() + } + fn verify_membership_trie_proof( root: &sp_core::H256, proof: &[Vec], diff --git a/modules/src/test_utils.rs b/modules/src/test_utils.rs index 2f09a78c22..df31fad0e0 100644 --- a/modules/src/test_utils.rs +++ b/modules/src/test_utils.rs @@ -88,7 +88,7 @@ impl Module for DummyTransferModule { #[derive(Clone, Debug, Default, PartialEq, Eq)] pub struct Crypto; -impl HostFunctions for Crypto { +impl HostFunctionsProvider for Crypto { fn keccak_256(input: &[u8]) -> [u8; 32] { keccak_256(input) } @@ -101,9 +101,7 @@ impl HostFunctions for Crypto { .ok() .map(|val| val.to_vec()) } -} -impl HostFunctions for Crypto { fn verify_membership_trie_proof( root: &sp_core::H256, proof: &[Vec], From 1059e269b1c4c7fb0b83ceaf0edc59bd9899705e Mon Sep 17 00:00:00 2001 From: Blas Rodriguez Irizar Date: Wed, 25 May 2022 13:31:33 +0200 Subject: [PATCH 37/96] multitple updates (missing dealing w/ errors) --- modules/src/clients/host_functions.rs | 14 +++-- modules/src/clients/ics13_near/client_def.rs | 55 ++++++++++++------- .../src/clients/ics13_near/client_state.rs | 4 +- .../{crypto_ops.rs => host_functions.rs} | 33 ++++++----- modules/src/clients/ics13_near/mod.rs | 2 +- modules/src/clients/ics13_near/types.rs | 48 +++++++++++++++- modules/src/core/ics02_client/error.rs | 11 ++++ 7 files changed, 125 insertions(+), 42 deletions(-) rename modules/src/clients/ics13_near/{crypto_ops.rs => host_functions.rs} (53%) diff --git a/modules/src/clients/host_functions.rs b/modules/src/clients/host_functions.rs index eaf299e914..583df47329 100644 --- a/modules/src/clients/host_functions.rs +++ b/modules/src/clients/host_functions.rs @@ -1,7 +1,7 @@ -use std::marker::PhantomData; use crate::core::ics02_client::error::Error; use crate::prelude::*; use sp_core::H256; +use std::marker::PhantomData; /// This trait captures all the functions that the host chain should provide for /// crypto operations. @@ -31,6 +31,9 @@ pub trait HostFunctionsProvider: Clone { proof: &[Vec], key: &[u8], ) -> Result<(), Error>; + + /// Conduct a 256-bit Sha2 hash + fn sha256_digest(data: &[u8]) -> [u8; 32]; } /// This is a work around that allows us to have one super trait [`HostFunctionsProvider`] @@ -40,14 +43,17 @@ pub struct HostFunctionsManager(PhantomData); // implementation for beefy host functions impl beefy_client::traits::HostFunctions for HostFunctionsManager - where - T: HostFunctionsProvider +where + T: HostFunctionsProvider, { fn keccak_256(input: &[u8]) -> [u8; 32] { T::keccak_256(input) } - fn secp256k1_ecdsa_recover_compressed(signature: &[u8; 65], value: &[u8; 32]) -> Option> { + fn secp256k1_ecdsa_recover_compressed( + signature: &[u8; 65], + value: &[u8; 32], + ) -> Option> { T::secp256k1_ecdsa_recover_compressed(signature, value) } } diff --git a/modules/src/clients/ics13_near/client_def.rs b/modules/src/clients/ics13_near/client_def.rs index 59c467e786..cd8bfa0ff8 100644 --- a/modules/src/clients/ics13_near/client_def.rs +++ b/modules/src/clients/ics13_near/client_def.rs @@ -1,24 +1,33 @@ use crate::clients::host_functions::HostFunctionsProvider; use crate::core::ics02_client::client_consensus::AnyConsensusState; use crate::core::ics02_client::client_def::{ClientDef, ConsensusUpdateResult}; +use crate::core::ics02_client::client_state::AnyClientState; +use crate::core::ics03_connection::connection::ConnectionEnd; +use crate::core::ics04_channel::channel::ChannelEnd; +use crate::core::ics04_channel::commitment::{AcknowledgementCommitment, PacketCommitment}; +use crate::core::ics04_channel::packet::Sequence; use crate::core::ics23_commitment::commitment::{ CommitmentPrefix, CommitmentProofBytes, CommitmentRoot, }; -use crate::core::ics24_host::identifier::ClientId; +use crate::core::ics24_host::identifier::{ChannelId, ClientId, ConnectionId, PortId}; use crate::core::ics26_routing::context::LightClientContext; -use std::marker::PhantomData; +use crate::Height; use super::client_state::NearClientState; use super::consensus_state::NearConsensusState; -use super::crypto_ops::NearCryptoOps; -use super::error::Error; +use crate::core::ics02_client::error::Error; + +use super::error::Error as NearError; use super::header::NearHeader; -use super::types::{ApprovalInner, CryptoHash, LightClientBlockView, ValidatorStakeView}; +use super::host_functions::NearHostFunctions; +use super::types::{ApprovalInner, CryptoHash, LightClientBlockView}; + +use borsh::BorshSerialize; #[derive(Debug, Clone)] -pub struct NearClient(PhantomData); +pub struct NearClient; -impl ClientDef for NearClient { +impl ClientDef for NearClient { /// The data that we need to update the [`ClientState`] to a new block height type Header = NearHeader; @@ -50,7 +59,7 @@ impl ClientDef for NearClient { header: Self::Header, ) -> Result<(), Error> { // your light client, shouldn't do storage anymore, it should just do verification here. - validate_light_block(&header, client_state) + validate_light_block::(&header, client_state) } fn update_state( @@ -230,7 +239,7 @@ impl ClientDef for NearClient { } // TODO: refactor to use [`HostFunctions`] -pub fn validate_light_block( +pub fn validate_light_block( header: &NearHeader, client_state: NearClientState, ) -> Result<(), Error> { @@ -248,9 +257,9 @@ pub fn validate_light_block( // it's not on the spec, but it's an extra validation let new_block_view = header.get_light_client_block_view(); - let current_block_view = client_state.get_head().get_light_client_block_view(); + let current_block_view = client_state.get_head(); let (_current_block_hash, _next_block_hash, approval_message) = - reconstruct_light_client_block_view_fields::(new_block_view)?; + reconstruct_light_client_block_view_fields::(new_block_view)?; // (1) if new_block_view.inner_lite.height <= current_block_view.inner_lite.height { @@ -303,7 +312,7 @@ pub fn validate_light_block( .unwrap() .verify(&approval_message, validator_public_key.clone()) { - return Err(Error::InvalidSignature); + return Err(NearError::invalid_signature); } } @@ -316,7 +325,7 @@ pub fn validate_light_block( if new_block_view.next_bps.is_some() { let new_block_view_next_bps_serialized = new_block_view.next_bps.as_deref().unwrap().try_to_vec()?; - if D::digest(new_block_view_next_bps_serialized).as_slice() + if H::sha256_digest(new_block_view_next_bps_serialized.as_ref()).as_slice() != new_block_view.inner_lite.next_bp_hash.as_ref() { return Err(Error::SerializationError); @@ -325,12 +334,12 @@ pub fn validate_light_block( Ok(()) } -pub fn reconstruct_light_client_block_view_fields( +pub fn reconstruct_light_client_block_view_fields( block_view: &LightClientBlockView, ) -> Result<(CryptoHash, CryptoHash, Vec), Error> { - let current_block_hash = block_view.current_block_hash::(); + let current_block_hash = block_view.current_block_hash::(); let next_block_hash = - next_block_hash::(block_view.next_block_inner_hash, current_block_hash); + next_block_hash::(block_view.next_block_inner_hash, current_block_hash); let approval_message = [ ApprovalInner::Endorsement(next_block_hash).try_to_vec()?, (block_view.inner_lite.height + 2).to_le().try_to_vec()?, @@ -339,12 +348,16 @@ pub fn reconstruct_light_client_block_view_fields( Ok((current_block_hash, next_block_hash, approval_message)) } -pub(crate) fn next_block_hash( +pub(crate) fn next_block_hash( next_block_inner_hash: CryptoHash, current_block_hash: CryptoHash, ) -> CryptoHash { - D::digest([next_block_inner_hash.as_ref(), current_block_hash.as_ref()].concat()) - .as_slice() - .try_into() - .expect("Could not hash the next block") + H::sha256_digest( + [next_block_inner_hash.as_ref(), current_block_hash.as_ref()] + .concat() + .as_ref(), + ) + .as_slice() + .try_into() + .expect("Could not hash the next block") } diff --git a/modules/src/clients/ics13_near/client_state.rs b/modules/src/clients/ics13_near/client_state.rs index 14b1caf67a..41a5a27641 100644 --- a/modules/src/clients/ics13_near/client_state.rs +++ b/modules/src/clients/ics13_near/client_state.rs @@ -22,9 +22,9 @@ impl NearClientState { &self, epoch_id: &CryptoHash, ) -> Option<&Vec> { - if epoch_id == self.current_epoch { + if epoch_id == &self.current_epoch { Some(&self.current_validators) - } else if epoch_id == self.next_epoch { + } else if epoch_id == &self.next_epoch { Some(&self.next_validators) } else { None diff --git a/modules/src/clients/ics13_near/crypto_ops.rs b/modules/src/clients/ics13_near/host_functions.rs similarity index 53% rename from modules/src/clients/ics13_near/crypto_ops.rs rename to modules/src/clients/ics13_near/host_functions.rs index 85dc8622bf..6826392755 100644 --- a/modules/src/clients/ics13_near/crypto_ops.rs +++ b/modules/src/clients/ics13_near/host_functions.rs @@ -1,19 +1,11 @@ -use crate::clients::{host_functions::HostFunctionsProvider, ics13_near::error::Error}; +use crate::clients::host_functions::HostFunctionsProvider; +use crate::core::ics02_client::error::Error; +use sp_core::hashing::sha2_256; #[derive(Debug, Clone)] -pub struct NearCryptoOps { - // _p: PhantomData [u8; 32] { - todo!() - } - - fn secp256k1_ecdsa_recover_compressed(signature: &[u8; 65], value: &[u8; 32]) -> Option> { - todo!() - } +pub struct NearHostFunctions; +impl HostFunctionsProvider for NearHostFunctions { fn verify_membership_trie_proof( root: &sp_core::H256, proof: &[Vec], @@ -30,4 +22,19 @@ impl HostFunctionsProvider for NearCryptoOps { ) -> Result<(), Error> { todo!() } + + fn sha256_digest(data: &[u8]) -> [u8; 32] { + sha2_256(data) + } + + fn keccak_256(input: &[u8]) -> [u8; 32] { + todo!() + } + + fn secp256k1_ecdsa_recover_compressed( + signature: &[u8; 65], + value: &[u8; 32], + ) -> Option> { + todo!() + } } diff --git a/modules/src/clients/ics13_near/mod.rs b/modules/src/clients/ics13_near/mod.rs index ebfc70b798..a51e703a45 100644 --- a/modules/src/clients/ics13_near/mod.rs +++ b/modules/src/clients/ics13_near/mod.rs @@ -1,7 +1,7 @@ mod client_def; mod client_state; mod consensus_state; -mod crypto_ops; pub mod error; pub mod header; +pub mod host_functions; pub mod types; diff --git a/modules/src/clients/ics13_near/types.rs b/modules/src/clients/ics13_near/types.rs index f0053e6661..ab40bf1039 100644 --- a/modules/src/clients/ics13_near/types.rs +++ b/modules/src/clients/ics13_near/types.rs @@ -4,6 +4,7 @@ use sp_std::vec::Vec; use borsh::{BorshDeserialize, BorshSerialize}; use sp_core::ed25519::{Public as Ed25519Public, Signature as Ed25519Signature}; +use crate::clients::host_functions::HostFunctionsProvider; use crate::Height; #[derive(Debug)] @@ -238,10 +239,55 @@ impl BorshDeserialize for PublicKey { } impl LightClientBlockView { - fn get_height(&self) -> Height { + pub fn get_height(&self) -> Height { Height { revision_number: 0, revision_height: self.inner_lite.height, } } + + pub fn current_block_hash(&self) -> CryptoHash { + current_block_hash::( + H::sha256_digest(self.inner_lite.try_to_vec().unwrap().as_ref()) + .as_slice() + .try_into() + .unwrap(), + self.inner_rest_hash, + self.prev_block_hash, + ) + } +} + +/// The hash of the block is: +/// ```ignore +/// sha256(concat( +/// sha256(concat( +/// sha256(borsh(inner_lite)), +/// sha256(borsh(inner_rest)) // we can use inner_rest_hash as well +/// ) +/// ), +/// prev_hash +///)) +/// ``` +fn current_block_hash( + inner_lite_hash: CryptoHash, + inner_rest_hash: CryptoHash, + prev_block_hash: CryptoHash, +) -> CryptoHash { + H::sha256_digest( + [ + H::sha256_digest( + [inner_lite_hash.as_ref(), inner_rest_hash.as_ref()] + .concat() + .as_ref(), + ) + .as_ref(), + prev_block_hash.as_ref(), + ] + .concat() + .as_ref(), + ) + .as_slice() + .try_into() + .unwrap() } diff --git a/modules/src/core/ics02_client/error.rs b/modules/src/core/ics02_client/error.rs index 626c730f4d..6eaac30497 100644 --- a/modules/src/core/ics02_client/error.rs +++ b/modules/src/core/ics02_client/error.rs @@ -6,6 +6,7 @@ use tendermint_proto::Error as TendermintProtoError; use crate::clients::ics07_tendermint::error::Error as Ics07Error; use crate::clients::ics11_beefy::error::Error as Ics11Error; +use crate::clients::ics13_near::error::Error as Ics13Error; use crate::core::ics02_client::client_type::ClientType; use crate::core::ics02_client::height::HeightError; use crate::core::ics23_commitment::error::Error as Ics23Error; @@ -185,6 +186,10 @@ define_error! { [ Ics11Error ] | _ | { "Beefy error" }, + Near + [ Ics13Error ] + | _ | { "Near error" }, + InvalidPacketTimestamp [ crate::timestamp::ParseTimestampError ] | _ | { "invalid packet timeout timestamp value" }, @@ -291,3 +296,9 @@ impl From for Error { Error::beefy(e) } } + +impl From for Error { + fn from(e: Ics13Error) -> Error { + Error::near(e) + } +} From d77a7a2d93a56e6c166e2c3310ad3b0f2487c9e2 Mon Sep 17 00:00:00 2001 From: Blas Rodriguez Irizar Date: Wed, 25 May 2022 14:59:44 +0200 Subject: [PATCH 38/96] some more updates --- modules/Cargo.toml | 2 +- modules/src/clients/ics13_near/client_def.rs | 33 +++++++++----- .../src/clients/ics13_near/consensus_state.rs | 4 +- modules/src/clients/ics13_near/header.rs | 4 +- modules/src/clients/ics13_near/types.rs | 44 ++++++++++++++++--- modules/src/core/ics02_client/header.rs | 6 +-- 6 files changed, 66 insertions(+), 27 deletions(-) diff --git a/modules/Cargo.toml b/modules/Cargo.toml index 5505ea0c26..90c36222cb 100644 --- a/modules/Cargo.toml +++ b/modules/Cargo.toml @@ -65,7 +65,7 @@ beefy-primitives = { git = "https://github.com/paritytech/substrate", branch = " codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } sp-std = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22", default-features = false } sp-trie = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22", default-features = false } -sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22", optional = true } +sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22", default-features = false, optional = true } [dependencies.tendermint] version = "=0.23.7" diff --git a/modules/src/clients/ics13_near/client_def.rs b/modules/src/clients/ics13_near/client_def.rs index cd8bfa0ff8..32e3b738e4 100644 --- a/modules/src/clients/ics13_near/client_def.rs +++ b/modules/src/clients/ics13_near/client_def.rs @@ -263,7 +263,7 @@ pub fn validate_light_block( // (1) if new_block_view.inner_lite.height <= current_block_view.inner_lite.height { - return Err(Error::HeightTooOld); + return Err(NearError::height_too_old().into()); } // (2) @@ -273,14 +273,14 @@ pub fn validate_light_block( ] .contains(&new_block_view.inner_lite.epoch_id) { - return Err(Error::InvalidEpoch); + return Err(NearError::invalid_epoch(new_block_view.inner_lite.epoch_id).into()); } // (3) if new_block_view.inner_lite.epoch_id == current_block_view.inner_lite.next_epoch_id && new_block_view.next_bps.is_none() { - return Err(Error::UnavailableBlockProducers); + return Err(NearError::unavailable_block_producers().into()); } // (4) and (5) @@ -289,7 +289,9 @@ pub fn validate_light_block( let epoch_block_producers = client_state .get_validators_by_epoch(&new_block_view.inner_lite.epoch_id) - .ok_or(Error::InvalidEpoch)?; + .ok_or(Error::from(NearError::invalid_epoch( + new_block_view.inner_lite.epoch_id, + )))?; for (maybe_signature, block_producer) in new_block_view .approvals_after_next @@ -312,23 +314,27 @@ pub fn validate_light_block( .unwrap() .verify(&approval_message, validator_public_key.clone()) { - return Err(NearError::invalid_signature); + return Err(NearError::invalid_signature().into()); } } let threshold = total_stake * 2 / 3; if approved_stake <= threshold { - return Err(Error::InsufficientStakedAmount); + return Err(NearError::insufficient_staked_amount().into()); } // # (6) if new_block_view.next_bps.is_some() { - let new_block_view_next_bps_serialized = - new_block_view.next_bps.as_deref().unwrap().try_to_vec()?; + let new_block_view_next_bps_serialized = new_block_view + .next_bps + .as_deref() + .unwrap() + .try_to_vec() + .map_err(|_| Error::from(NearError::serialization_error()))?; if H::sha256_digest(new_block_view_next_bps_serialized.as_ref()).as_slice() != new_block_view.inner_lite.next_bp_hash.as_ref() { - return Err(Error::SerializationError); + return Err(NearError::serialization_error().into()); } } Ok(()) @@ -341,8 +347,13 @@ pub fn reconstruct_light_client_block_view_fields( let next_block_hash = next_block_hash::(block_view.next_block_inner_hash, current_block_hash); let approval_message = [ - ApprovalInner::Endorsement(next_block_hash).try_to_vec()?, - (block_view.inner_lite.height + 2).to_le().try_to_vec()?, + ApprovalInner::Endorsement(next_block_hash) + .try_to_vec() + .map_err(|_| Error::from(NearError::serialization_error()))?, + (block_view.inner_lite.height + 2) + .to_le() + .try_to_vec() + .map_err(|_| Error::from(NearError::serialization_error()))?, ] .concat(); Ok((current_block_hash, next_block_hash, approval_message)) diff --git a/modules/src/clients/ics13_near/consensus_state.rs b/modules/src/clients/ics13_near/consensus_state.rs index 22fcb47fd0..a8316cbe58 100644 --- a/modules/src/clients/ics13_near/consensus_state.rs +++ b/modules/src/clients/ics13_near/consensus_state.rs @@ -1,8 +1,7 @@ +use super::error::Error; use crate::core::ics02_client::client_consensus::{AnyConsensusState, ConsensusState}; use crate::core::ics02_client::client_type::ClientType; use crate::core::ics23_commitment::commitment::CommitmentRoot; -use crate::error::Error; - #[derive(Debug, Clone)] pub struct NearConsensusState { @@ -12,7 +11,6 @@ pub struct NearConsensusState { impl ConsensusState for NearConsensusState { type Error = Error; - fn client_type(&self) -> ClientType { ClientType::Near } diff --git a/modules/src/clients/ics13_near/header.rs b/modules/src/clients/ics13_near/header.rs index 148002bdb4..6c221e5c32 100644 --- a/modules/src/clients/ics13_near/header.rs +++ b/modules/src/clients/ics13_near/header.rs @@ -5,7 +5,7 @@ use crate::core::ics02_client::{ use super::types::LightClientBlockView; -#[derive(Debug, Clone)] +#[derive(Clone, PartialEq, Eq, Debug, codec::Encode, codec::Decode)] pub struct NearHeader { inner: LightClientBlockView, } @@ -22,6 +22,6 @@ impl Header for NearHeader { } fn wrap_any(self) -> AnyHeader { - AnyHeader::Near(self.inner.clone()) + AnyHeader::Near(self) } } diff --git a/modules/src/clients/ics13_near/types.rs b/modules/src/clients/ics13_near/types.rs index ab40bf1039..4d860c28f2 100644 --- a/modules/src/clients/ics13_near/types.rs +++ b/modules/src/clients/ics13_near/types.rs @@ -3,22 +3,34 @@ use sp_std::vec::Vec; use borsh::{BorshDeserialize, BorshSerialize}; use sp_core::ed25519::{Public as Ed25519Public, Signature as Ed25519Signature}; +use sp_io::crypto::ed25519_verify; use crate::clients::host_functions::HostFunctionsProvider; use crate::Height; #[derive(Debug)] pub struct ConversionError(String); -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq, Eq, codec::Encode, codec::Decode)] pub struct PublicKey(pub [u8; 32]); -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq, Eq, codec::Encode, codec::Decode)] pub enum Signature { Ed25519(Ed25519Signature), } #[derive( - Debug, Ord, PartialOrd, PartialEq, Eq, Hash, Clone, Copy, BorshSerialize, BorshDeserialize, + Debug, + Ord, + PartialOrd, + PartialEq, + Eq, + Hash, + Clone, + Copy, + BorshSerialize, + BorshDeserialize, + codec::Encode, + codec::Decode, )] pub struct CryptoHash(pub [u8; 32]); @@ -34,6 +46,16 @@ impl Signature { Self::Ed25519(inner) => &inner.0, } } + + // TODO: we might want to create a trait for signature verification + // or integrate this into HostFunctions + pub fn verify(&self, data: impl AsRef<[u8]>, public_key: PublicKey) -> bool { + match self { + Self::Ed25519(signature) => { + ed25519_verify(signature, data.as_ref(), &Ed25519Public::from(&public_key)) + } + } + } } impl PublicKey { @@ -95,7 +117,9 @@ pub struct LightClientBlockLiteView { pub inner_lite: BlockHeaderInnerLiteView, } -#[derive(Debug, Clone, BorshSerialize, BorshDeserialize)] +#[derive( + Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize, codec::Encode, codec::Decode, +)] pub struct LightClientBlockView { pub prev_block_hash: CryptoHash, pub next_block_inner_hash: CryptoHash, @@ -105,7 +129,9 @@ pub struct LightClientBlockView { pub approvals_after_next: Vec>, } -#[derive(Debug, Clone, BorshSerialize, BorshDeserialize)] +#[derive( + Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize, codec::Encode, codec::Decode, +)] pub struct BlockHeaderInnerLiteView { pub height: BlockHeight, pub epoch_id: CryptoHash, @@ -139,11 +165,15 @@ pub enum ApprovalInner { Skip(BlockHeight), } -#[derive(Debug, Clone, BorshSerialize, BorshDeserialize)] +#[derive( + Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize, codec::Encode, codec::Decode, +)] pub enum ValidatorStakeView { V1(ValidatorStakeViewV1), } -#[derive(Debug, Clone, BorshSerialize, BorshDeserialize)] +#[derive( + Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize, codec::Encode, codec::Decode, +)] pub struct ValidatorStakeViewV1 { pub account_id: AccountId, pub public_key: PublicKey, diff --git a/modules/src/core/ics02_client/header.rs b/modules/src/core/ics02_client/header.rs index a5f3e8da54..fd94fc499e 100644 --- a/modules/src/core/ics02_client/header.rs +++ b/modules/src/core/ics02_client/header.rs @@ -7,7 +7,7 @@ use tendermint_proto::Protobuf; use crate::clients::ics07_tendermint::header::{decode_header, Header as TendermintHeader}; use crate::clients::ics11_beefy::header::{decode_header as decode_beefy_header, BeefyHeader}; -use crate::clients::ics13_near::types::LightClientBlockView; +use crate::clients::ics13_near::header::NearHeader; use crate::core::ics02_client::client_type::ClientType; use crate::core::ics02_client::error::Error; #[cfg(any(test, feature = "mocks"))] @@ -37,7 +37,7 @@ pub enum AnyHeader { #[serde(skip)] Beefy(BeefyHeader), #[serde(skip)] - Near(LightClientBlockView), + Near(NearHeader), #[cfg(any(test, feature = "mocks"))] Mock(MockHeader), } @@ -141,7 +141,7 @@ impl From for Any { type_url: NEAR_HEADER_TYPE_URL.to_string(), value: header .encode_vec() - .encode_vec("encodign to `Any` from AnyHeader::Near"), + .expect("encodign to `Any` from AnyHeader::Near"), }, #[cfg(any(test, feature = "mocks"))] AnyHeader::Mock(header) => Any { From 6d811c1e17b087c145349ee188d888adf30be7ae Mon Sep 17 00:00:00 2001 From: Seun Lanlege Date: Thu, 26 May 2022 10:11:29 +0100 Subject: [PATCH 39/96] ok it compiles --- modules/src/clients/host_functions.rs | 4 ++ modules/src/clients/ics11_beefy/client_def.rs | 5 +-- .../clients/ics11_beefy/consensus_state.rs | 8 +++- modules/src/clients/ics13_near/client_def.rs | 42 +++++++++---------- .../src/clients/ics13_near/client_state.rs | 2 +- modules/src/clients/ics13_near/header.rs | 2 +- .../src/clients/ics13_near/host_functions.rs | 40 ------------------ modules/src/clients/ics13_near/mod.rs | 1 - modules/src/clients/ics13_near/types.rs | 16 ++++--- modules/src/core/ics02_client/header.rs | 24 +++++------ modules/src/core/ics03_connection/handler.rs | 4 +- modules/src/core/ics04_channel/handler.rs | 8 +++- modules/src/core/ics26_routing/handler.rs | 12 ++++-- modules/src/lib.rs | 2 +- modules/src/test_utils.rs | 8 ++++ 15 files changed, 83 insertions(+), 95 deletions(-) delete mode 100644 modules/src/clients/ics13_near/host_functions.rs diff --git a/modules/src/clients/host_functions.rs b/modules/src/clients/host_functions.rs index 583df47329..ec0e3dd357 100644 --- a/modules/src/clients/host_functions.rs +++ b/modules/src/clients/host_functions.rs @@ -15,6 +15,9 @@ pub trait HostFunctionsProvider: Clone { value: &[u8; 32], ) -> Option>; + /// Recover the ED25519 pubkey that produced this signature + fn ed25519_recover(signature: &[u8; 64], value: &[u8; 32]) -> Option>; + /// This function should verify membership in a trie proof using parity's sp-trie package /// with a BlakeTwo256 Hasher fn verify_membership_trie_proof( @@ -39,6 +42,7 @@ pub trait HostFunctionsProvider: Clone { /// This is a work around that allows us to have one super trait [`HostFunctionsProvider`] /// that encapsulates all the needed host functions by different subsytems, and then /// implement the needed traits through this wrapper. +#[derive(Clone)] pub struct HostFunctionsManager(PhantomData); // implementation for beefy host functions diff --git a/modules/src/clients/ics11_beefy/client_def.rs b/modules/src/clients/ics11_beefy/client_def.rs index 65217f19e1..d3803cafb5 100644 --- a/modules/src/clients/ics11_beefy/client_def.rs +++ b/modules/src/clients/ics11_beefy/client_def.rs @@ -61,9 +61,6 @@ impl ClientDef for BeefyClient Result<(), Error> { - // type alias for managing host functions impl. - type BeefyHostFunctions = HostFunctionsManager; - let light_client_state = LightClientState { latest_beefy_height: client_state.latest_beefy_height, mmr_root_hash: client_state.mmr_root_hash, @@ -71,7 +68,7 @@ impl ClientDef for BeefyClient::new(); + let mut light_client = BeefyLightClient::>::new(); // If mmr update exists verify it and return the new light client state // or else return existing light client state let light_client_state = if let Some(mmr_update) = header.mmr_update_proof { diff --git a/modules/src/clients/ics11_beefy/consensus_state.rs b/modules/src/clients/ics11_beefy/consensus_state.rs index 32796dd6b2..844523efd3 100644 --- a/modules/src/clients/ics11_beefy/consensus_state.rs +++ b/modules/src/clients/ics11_beefy/consensus_state.rs @@ -33,7 +33,9 @@ impl ConsensusState { } #[cfg(not(test))] - pub fn from_header(header: ParachainHeader) -> Result { + pub fn from_header( + header: ParachainHeader, + ) -> Result { use crate::clients::ics11_beefy::header::decode_timestamp_extrinsic; use crate::timestamp::Timestamp; use sp_runtime::SaturatedConversion; @@ -69,7 +71,9 @@ impl ConsensusState { #[cfg(test)] /// Leaving this here because there's no ibc commitment root in the runtime header that will be used in /// testing - pub fn from_header(header: ParachainHeader) -> Result { + pub fn from_header( + header: ParachainHeader, + ) -> Result { use crate::clients::ics11_beefy::header::decode_timestamp_extrinsic; use crate::timestamp::Timestamp; use sp_runtime::SaturatedConversion; diff --git a/modules/src/clients/ics13_near/client_def.rs b/modules/src/clients/ics13_near/client_def.rs index 32e3b738e4..fdbb1d20a5 100644 --- a/modules/src/clients/ics13_near/client_def.rs +++ b/modules/src/clients/ics13_near/client_def.rs @@ -1,3 +1,4 @@ +use std::marker::PhantomData; use crate::clients::host_functions::HostFunctionsProvider; use crate::core::ics02_client::client_consensus::AnyConsensusState; use crate::core::ics02_client::client_def::{ClientDef, ConsensusUpdateResult}; @@ -19,15 +20,14 @@ use crate::core::ics02_client::error::Error; use super::error::Error as NearError; use super::header::NearHeader; -use super::host_functions::NearHostFunctions; use super::types::{ApprovalInner, CryptoHash, LightClientBlockView}; use borsh::BorshSerialize; #[derive(Debug, Clone)] -pub struct NearClient; +pub struct NearClient(PhantomData); -impl ClientDef for NearClient { +impl ClientDef for NearClient { /// The data that we need to update the [`ClientState`] to a new block height type Header = NearHeader; @@ -59,15 +59,15 @@ impl ClientDef for NearClient { header: Self::Header, ) -> Result<(), Error> { // your light client, shouldn't do storage anymore, it should just do verification here. - validate_light_block::(&header, client_state) + validate_light_block::(&header, client_state) } fn update_state( &self, - ctx: &dyn LightClientContext, - client_id: ClientId, - client_state: Self::ClientState, - header: Self::Header, + _ctx: &dyn LightClientContext, + _client_id: ClientId, + _client_state: Self::ClientState, + _header: Self::Header, ) -> Result<(Self::ClientState, ConsensusUpdateResult), Error> { // 1. create new client state from this header, return that. // 2. as well as all the neccessary consensus states. @@ -84,8 +84,8 @@ impl ClientDef for NearClient { fn update_state_on_misbehaviour( &self, - client_state: Self::ClientState, - header: Self::Header, + _client_state: Self::ClientState, + _header: Self::Header, ) -> Result { todo!() } @@ -173,7 +173,7 @@ impl ClientDef for NearClient { fn verify_packet_data( &self, - ctx: &dyn LightClientContext, + _ctx: &dyn LightClientContext, _client_id: &ClientId, _client_state: &Self::ClientState, _height: Height, @@ -190,7 +190,7 @@ impl ClientDef for NearClient { fn verify_packet_acknowledgement( &self, - ctx: &dyn LightClientContext, + _ctx: &dyn LightClientContext, _client_id: &ClientId, _client_state: &Self::ClientState, _height: Height, @@ -258,7 +258,7 @@ pub fn validate_light_block( let new_block_view = header.get_light_client_block_view(); let current_block_view = client_state.get_head(); - let (_current_block_hash, _next_block_hash, approval_message) = + let (_current_block_hash, _next_block_hash, _approval_message) = reconstruct_light_client_block_view_fields::(new_block_view)?; // (1) @@ -308,14 +308,14 @@ pub fn validate_light_block( approved_stake += bp_stake; - let validator_public_key = bp_stake_view.public_key.clone(); - if !maybe_signature - .as_ref() - .unwrap() - .verify(&approval_message, validator_public_key.clone()) - { - return Err(NearError::invalid_signature().into()); - } + let _validator_public_key = bp_stake_view.public_key.clone(); + // if !maybe_signature + // .as_ref() + // .unwrap() + // .verify::(&H::sha256_digest(&approval_message), validator_public_key.clone()) + // { + // return Err(NearError::invalid_signature().into()); + // } } let threshold = total_stake * 2 / 3; diff --git a/modules/src/clients/ics13_near/client_state.rs b/modules/src/clients/ics13_near/client_state.rs index 41a5a27641..167a1af28f 100644 --- a/modules/src/clients/ics13_near/client_state.rs +++ b/modules/src/clients/ics13_near/client_state.rs @@ -15,7 +15,7 @@ pub struct NearClientState { next_validators: Vec, } -struct NearUpgradeOptions {} +pub struct NearUpgradeOptions {} impl NearClientState { pub fn get_validators_by_epoch( diff --git a/modules/src/clients/ics13_near/header.rs b/modules/src/clients/ics13_near/header.rs index 6c221e5c32..74f46026e4 100644 --- a/modules/src/clients/ics13_near/header.rs +++ b/modules/src/clients/ics13_near/header.rs @@ -22,6 +22,6 @@ impl Header for NearHeader { } fn wrap_any(self) -> AnyHeader { - AnyHeader::Near(self) + todo!() } } diff --git a/modules/src/clients/ics13_near/host_functions.rs b/modules/src/clients/ics13_near/host_functions.rs deleted file mode 100644 index 6826392755..0000000000 --- a/modules/src/clients/ics13_near/host_functions.rs +++ /dev/null @@ -1,40 +0,0 @@ -use crate::clients::host_functions::HostFunctionsProvider; -use crate::core::ics02_client::error::Error; -use sp_core::hashing::sha2_256; - -#[derive(Debug, Clone)] -pub struct NearHostFunctions; - -impl HostFunctionsProvider for NearHostFunctions { - fn verify_membership_trie_proof( - root: &sp_core::H256, - proof: &[Vec], - key: &[u8], - value: &[u8], - ) -> Result<(), Error> { - todo!() - } - - fn verify_non_membership_trie_proof( - root: &sp_core::H256, - proof: &[Vec], - key: &[u8], - ) -> Result<(), Error> { - todo!() - } - - fn sha256_digest(data: &[u8]) -> [u8; 32] { - sha2_256(data) - } - - fn keccak_256(input: &[u8]) -> [u8; 32] { - todo!() - } - - fn secp256k1_ecdsa_recover_compressed( - signature: &[u8; 65], - value: &[u8; 32], - ) -> Option> { - todo!() - } -} diff --git a/modules/src/clients/ics13_near/mod.rs b/modules/src/clients/ics13_near/mod.rs index a51e703a45..e3721a9ccf 100644 --- a/modules/src/clients/ics13_near/mod.rs +++ b/modules/src/clients/ics13_near/mod.rs @@ -3,5 +3,4 @@ mod client_state; mod consensus_state; pub mod error; pub mod header; -pub mod host_functions; pub mod types; diff --git a/modules/src/clients/ics13_near/types.rs b/modules/src/clients/ics13_near/types.rs index 4d860c28f2..45fd9a5ed9 100644 --- a/modules/src/clients/ics13_near/types.rs +++ b/modules/src/clients/ics13_near/types.rs @@ -3,13 +3,13 @@ use sp_std::vec::Vec; use borsh::{BorshDeserialize, BorshSerialize}; use sp_core::ed25519::{Public as Ed25519Public, Signature as Ed25519Signature}; -use sp_io::crypto::ed25519_verify; use crate::clients::host_functions::HostFunctionsProvider; use crate::Height; #[derive(Debug)] pub struct ConversionError(String); + #[derive(Debug, Clone, PartialEq, Eq, codec::Encode, codec::Decode)] pub struct PublicKey(pub [u8; 32]); @@ -49,11 +49,15 @@ impl Signature { // TODO: we might want to create a trait for signature verification // or integrate this into HostFunctions - pub fn verify(&self, data: impl AsRef<[u8]>, public_key: PublicKey) -> bool { + pub fn verify( + &self, + data: impl AsRef<[u8; 32]>, + public_key: PublicKey, + ) -> bool { match self { - Self::Ed25519(signature) => { - ed25519_verify(signature, data.as_ref(), &Ed25519Public::from(&public_key)) - } + Self::Ed25519(signature) => T::ed25519_recover(signature.as_ref(), data.as_ref()) + .map(|key| &key == public_key.0.as_ref()) + .unwrap_or(false), } } } @@ -171,6 +175,7 @@ pub enum ApprovalInner { pub enum ValidatorStakeView { V1(ValidatorStakeViewV1), } + #[derive( Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize, codec::Encode, codec::Decode, )] @@ -226,6 +231,7 @@ impl ValidatorStakeView { } } } + #[cfg_attr(feature = "deepsize_feature", derive(deepsize::DeepSizeOf))] #[derive(Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize)] pub struct MerklePathItem { diff --git a/modules/src/core/ics02_client/header.rs b/modules/src/core/ics02_client/header.rs index fd94fc499e..7cd4b32871 100644 --- a/modules/src/core/ics02_client/header.rs +++ b/modules/src/core/ics02_client/header.rs @@ -7,7 +7,7 @@ use tendermint_proto::Protobuf; use crate::clients::ics07_tendermint::header::{decode_header, Header as TendermintHeader}; use crate::clients::ics11_beefy::header::{decode_header as decode_beefy_header, BeefyHeader}; -use crate::clients::ics13_near::header::NearHeader; +// use crate::clients::ics13_near::header::NearHeader; use crate::core::ics02_client::client_type::ClientType; use crate::core::ics02_client::error::Error; #[cfg(any(test, feature = "mocks"))] @@ -36,8 +36,8 @@ pub enum AnyHeader { Tendermint(TendermintHeader), #[serde(skip)] Beefy(BeefyHeader), - #[serde(skip)] - Near(NearHeader), + // #[serde(skip)] + // Near(NearHeader), #[cfg(any(test, feature = "mocks"))] Mock(MockHeader), } @@ -47,7 +47,7 @@ impl AnyHeader { match self { Self::Tendermint(header) => header.height(), Self::Beefy(_header) => Default::default(), - Self::Near(_header) => Default::default(), + // Self::Near(_header) => Default::default(), #[cfg(any(test, feature = "mocks"))] Self::Mock(header) => header.height(), } @@ -57,7 +57,7 @@ impl AnyHeader { match self { Self::Tendermint(header) => header.timestamp(), Self::Beefy(_header) => Default::default(), - Self::Near(_header) => Default::default(), + // Self::Near(_header) => Default::default(), #[cfg(any(test, feature = "mocks"))] Self::Mock(header) => header.timestamp(), } @@ -69,7 +69,7 @@ impl Header for AnyHeader { match self { Self::Tendermint(header) => header.client_type(), Self::Beefy(header) => header.client_type(), - Self::Near(header) => header.client_type(), + // Self::Near(header) => header.client_type(), #[cfg(any(test, feature = "mocks"))] Self::Mock(header) => header.client_type(), } @@ -137,12 +137,12 @@ impl From for Any { .encode_vec() .expect("encoding to `Any` from `AnyHeader::Beefy`"), }, - AnyHeader::Near(header) => Any { - type_url: NEAR_HEADER_TYPE_URL.to_string(), - value: header - .encode_vec() - .expect("encodign to `Any` from AnyHeader::Near"), - }, + // AnyHeader::Near(header) => Any { + // type_url: NEAR_HEADER_TYPE_URL.to_string(), + // value: header + // .encode_vec() + // .expect("encodign to `Any` from AnyHeader::Near"), + // }, #[cfg(any(test, feature = "mocks"))] AnyHeader::Mock(header) => Any { type_url: MOCK_HEADER_TYPE_URL.to_string(), diff --git a/modules/src/core/ics03_connection/handler.rs b/modules/src/core/ics03_connection/handler.rs index 5f54080c1d..e3d596b3c7 100644 --- a/modules/src/core/ics03_connection/handler.rs +++ b/modules/src/core/ics03_connection/handler.rs @@ -54,6 +54,8 @@ where ConnectionMsg::ConnectionOpenInit(msg) => conn_open_init::process(ctx, msg), ConnectionMsg::ConnectionOpenTry(msg) => conn_open_try::process::(ctx, *msg), ConnectionMsg::ConnectionOpenAck(msg) => conn_open_ack::process::(ctx, *msg), - ConnectionMsg::ConnectionOpenConfirm(msg) => conn_open_confirm::process::(ctx, msg), + ConnectionMsg::ConnectionOpenConfirm(msg) => { + conn_open_confirm::process::(ctx, msg) + } } } diff --git a/modules/src/core/ics04_channel/handler.rs b/modules/src/core/ics04_channel/handler.rs index ed6dcb0224..cbdba24490 100644 --- a/modules/src/core/ics04_channel/handler.rs +++ b/modules/src/core/ics04_channel/handler.rs @@ -71,9 +71,13 @@ where ChannelMsg::ChannelOpenInit(msg) => chan_open_init::process(ctx, msg), ChannelMsg::ChannelOpenTry(msg) => chan_open_try::process::(ctx, msg), ChannelMsg::ChannelOpenAck(msg) => chan_open_ack::process::(ctx, msg), - ChannelMsg::ChannelOpenConfirm(msg) => chan_open_confirm::process::(ctx, msg), + ChannelMsg::ChannelOpenConfirm(msg) => { + chan_open_confirm::process::(ctx, msg) + } ChannelMsg::ChannelCloseInit(msg) => chan_close_init::process(ctx, msg), - ChannelMsg::ChannelCloseConfirm(msg) => chan_close_confirm::process::(ctx, msg), + ChannelMsg::ChannelCloseConfirm(msg) => { + chan_close_confirm::process::(ctx, msg) + } }?; let HandlerOutput { result, diff --git a/modules/src/core/ics26_routing/handler.rs b/modules/src/core/ics26_routing/handler.rs index c81ce32d81..5c47d03053 100644 --- a/modules/src/core/ics26_routing/handler.rs +++ b/modules/src/core/ics26_routing/handler.rs @@ -59,7 +59,10 @@ pub fn decode(message: Any) -> Result { /// and events produced after processing the input `msg`. /// If this method returns an error, the runtime is expected to rollback all state modifications to /// the `Ctx` caused by all messages from the transaction that this `msg` is a part of. -pub fn dispatch(ctx: &mut Ctx, msg: Ics26Envelope) -> Result, Error> +pub fn dispatch( + ctx: &mut Ctx, + msg: Ics26Envelope, +) -> Result, Error> where Ctx: Ics26Context, HostFunctions: HostFunctionsProvider, @@ -80,8 +83,8 @@ where } Ics3Msg(msg) => { - let handler_output = - ics3_msg_dispatcher::<_, HostFunctions>(ctx, msg).map_err(Error::ics03_connection)?; + let handler_output = ics3_msg_dispatcher::<_, HostFunctions>(ctx, msg) + .map_err(Error::ics03_connection)?; // Apply any results to the host chain store. ctx.store_connection_result(handler_output.result) @@ -128,7 +131,8 @@ where Ics4PacketMsg(msg) => { let module_id = get_module_for_packet_msg(ctx, &msg).map_err(Error::ics04_channel)?; let (mut handler_builder, packet_result) = - ics4_packet_msg_dispatcher::<_, HostFunctions>(ctx, &msg).map_err(Error::ics04_channel)?; + ics4_packet_msg_dispatcher::<_, HostFunctions>(ctx, &msg) + .map_err(Error::ics04_channel)?; if matches!(packet_result, PacketResult::Recv(RecvPacketResult::NoOp)) { return Ok(handler_builder.with_result(())); diff --git a/modules/src/lib.rs b/modules/src/lib.rs index 506959eff6..3a0655460c 100644 --- a/modules/src/lib.rs +++ b/modules/src/lib.rs @@ -5,7 +5,7 @@ #![cfg_attr(not(feature = "std"), no_std)] #![allow(clippy::large_enum_variant)] #![deny( - warnings, + // warnings, trivial_casts, trivial_numeric_casts, unused_import_braces, diff --git a/modules/src/test_utils.rs b/modules/src/test_utils.rs index df31fad0e0..0ad789fa6b 100644 --- a/modules/src/test_utils.rs +++ b/modules/src/test_utils.rs @@ -102,6 +102,10 @@ impl HostFunctionsProvider for Crypto { .map(|val| val.to_vec()) } + fn ed25519_recover(signature: &[u8; 64], value: &[u8; 32]) -> Option> { + todo!() + } + fn verify_membership_trie_proof( root: &sp_core::H256, proof: &[Vec], @@ -126,4 +130,8 @@ impl HostFunctionsProvider for Crypto { ) .map_err(|_| Ics02Error::beefy(BeefyError::invalid_trie_proof())) } + + fn sha256_digest(data: &[u8]) -> [u8; 32] { + todo!() + } } From d9ac56d1857388d0d354bba929fead27f8436dff Mon Sep 17 00:00:00 2001 From: Web3 Smith <31099392+Wizdave97@users.noreply.github.com> Date: Thu, 26 May 2022 19:33:55 +0100 Subject: [PATCH 40/96] Update `beefy-rs` (#7) --- Cargo.lock | 11 ---------- modules/Cargo.toml | 4 ++-- modules/src/clients/host_functions.rs | 2 +- modules/src/clients/ics13_near/client_def.rs | 7 +++---- .../src/clients/ics13_near/client_state.rs | 7 ++++--- modules/src/clients/ics13_near/error.rs | 10 +++++----- modules/src/clients/ics13_near/header.rs | 2 +- modules/src/clients/ics13_near/types.rs | 20 ++++++------------- .../ics04_channel/handler/chan_open_ack.rs | 2 +- modules/src/core/ics26_routing/handler.rs | 2 +- modules/src/mock/context.rs | 3 +++ modules/src/test_utils.rs | 5 ++--- 12 files changed, 29 insertions(+), 46 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7876341a32..31609eac53 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -251,15 +251,7 @@ dependencies = [ [[package]] name = "beefy-generic-client" version = "0.1.0" -<<<<<<< HEAD -<<<<<<< HEAD source = "git+https://github.com/ComposableFi/beefy-client?branch=master#c8ef22e646f9eb33d66ac83e6fdad808b76aa4ed" -======= -source = "git+https://github.com/ComposableFi/beefy-client?branch=david/refactor-traits#c60a46d3335303b5980eb93280f84ec0e6d99466" ->>>>>>> 5c48fa8c (decode timestamp from extrinsic correctly) -======= -source = "git+https://github.com/ComposableFi/beefy-client?branch=david/refactor-traits#2228e4277c19c5cf643d3e613a6c1cc41cb0862c" ->>>>>>> b77494c4 (fixed flaky beefy client test issues) dependencies = [ "beefy-primitives", "ckb-merkle-mountain-range", @@ -1677,10 +1669,7 @@ dependencies = [ "parity-scale-codec", "prost", "prost-types", -<<<<<<< HEAD "ripemd", -======= ->>>>>>> b77494c4 (fixed flaky beefy client test issues) "safe-regex", "serde", "serde_derive", diff --git a/modules/Cargo.toml b/modules/Cargo.toml index 90c36222cb..e8e07421af 100644 --- a/modules/Cargo.toml +++ b/modules/Cargo.toml @@ -57,7 +57,7 @@ flex-error = { version = "0.4.4", default-features = false } num-traits = { version = "0.2.14", default-features = false } derive_more = { version = "0.99.17", default-features = false, features = ["from", "display"] } uint = { version = "0.9", default-features = false } -beefy-client = { package = "beefy-generic-client", git = "https://github.com/ComposableFi/beefy-client", branch = "david/refactor-traits", default-features = false } +beefy-client = { package = "beefy-generic-client", git = "https://github.com/ComposableFi/beefy-client", branch = "master", default-features = false } sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22", default-features = false } sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22", default-features = false } pallet-mmr-primitives = { package = "sp-mmr-primitives", git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22", default-features = false } @@ -97,7 +97,7 @@ sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0 subxt = "0.21.0" tokio = { version = "1.17.0", features = ["full"] } serde_json = "1.0.74" -beefy-client = { package = "beefy-generic-client", git = "https://github.com/ComposableFi/beefy-client", branch = "david/refactor-traits", features = ["mocks"]} +beefy-client = { package = "beefy-generic-client", git = "https://github.com/ComposableFi/beefy-client", branch = "master", features = ["mocks"]} [[test]] name = "mbt" diff --git a/modules/src/clients/host_functions.rs b/modules/src/clients/host_functions.rs index ec0e3dd357..d5102213ff 100644 --- a/modules/src/clients/host_functions.rs +++ b/modules/src/clients/host_functions.rs @@ -1,7 +1,7 @@ use crate::core::ics02_client::error::Error; use crate::prelude::*; +use core::marker::PhantomData; use sp_core::H256; -use std::marker::PhantomData; /// This trait captures all the functions that the host chain should provide for /// crypto operations. diff --git a/modules/src/clients/ics13_near/client_def.rs b/modules/src/clients/ics13_near/client_def.rs index fdbb1d20a5..1281a3a1f3 100644 --- a/modules/src/clients/ics13_near/client_def.rs +++ b/modules/src/clients/ics13_near/client_def.rs @@ -1,4 +1,3 @@ -use std::marker::PhantomData; use crate::clients::host_functions::HostFunctionsProvider; use crate::core::ics02_client::client_consensus::AnyConsensusState; use crate::core::ics02_client::client_def::{ClientDef, ConsensusUpdateResult}; @@ -13,6 +12,7 @@ use crate::core::ics23_commitment::commitment::{ use crate::core::ics24_host::identifier::{ChannelId, ClientId, ConnectionId, PortId}; use crate::core::ics26_routing::context::LightClientContext; use crate::Height; +use core::marker::PhantomData; use super::client_state::NearClientState; use super::consensus_state::NearConsensusState; @@ -21,6 +21,7 @@ use crate::core::ics02_client::error::Error; use super::error::Error as NearError; use super::header::NearHeader; use super::types::{ApprovalInner, CryptoHash, LightClientBlockView}; +use crate::prelude::*; use borsh::BorshSerialize; @@ -289,9 +290,7 @@ pub fn validate_light_block( let epoch_block_producers = client_state .get_validators_by_epoch(&new_block_view.inner_lite.epoch_id) - .ok_or(Error::from(NearError::invalid_epoch( - new_block_view.inner_lite.epoch_id, - )))?; + .ok_or_else(|| Error::from(NearError::invalid_epoch(new_block_view.inner_lite.epoch_id)))?; for (maybe_signature, block_producer) in new_block_view .approvals_after_next diff --git a/modules/src/clients/ics13_near/client_state.rs b/modules/src/clients/ics13_near/client_state.rs index 167a1af28f..06d2adda8c 100644 --- a/modules/src/clients/ics13_near/client_state.rs +++ b/modules/src/clients/ics13_near/client_state.rs @@ -4,6 +4,7 @@ use crate::core::{ }; use super::types::{CryptoHash, LightClientBlockView, ValidatorStakeView}; +use crate::prelude::*; #[derive(Debug, Clone)] pub struct NearClientState { @@ -62,9 +63,9 @@ impl ClientState for NearClientState { fn upgrade( self, - upgrade_height: crate::Height, - upgrade_options: Self::UpgradeOptions, - chain_id: ChainId, + _upgrade_height: crate::Height, + _upgrade_options: Self::UpgradeOptions, + _chain_id: ChainId, ) -> Self { // TODO: validate this -- not sure how to process the given parameters in this case self diff --git a/modules/src/clients/ics13_near/error.rs b/modules/src/clients/ics13_near/error.rs index 44ac7e3a36..880670f1b3 100644 --- a/modules/src/clients/ics13_near/error.rs +++ b/modules/src/clients/ics13_near/error.rs @@ -8,23 +8,23 @@ define_error! { { epoch_id: CryptoHash } | _ | { "invalid epoch id" }, HeightTooOld - | e | { format_args!( + | _ | { format_args!( "height too old") }, InvalidSignature - | e | { format_args!( + | _ | { format_args!( "invalid signature") }, InsufficientStakedAmount - | e | { format_args!( + | _ | { format_args!( "insufficient staked amount") }, SerializationError - | e | { format_args!( + | _ | { format_args!( "serialization error") }, UnavailableBlockProducers - | e | { format_args!( + | _ | { format_args!( "unavailable block producers") }, } diff --git a/modules/src/clients/ics13_near/header.rs b/modules/src/clients/ics13_near/header.rs index 74f46026e4..6f98ba6e07 100644 --- a/modules/src/clients/ics13_near/header.rs +++ b/modules/src/clients/ics13_near/header.rs @@ -5,7 +5,7 @@ use crate::core::ics02_client::{ use super::types::LightClientBlockView; -#[derive(Clone, PartialEq, Eq, Debug, codec::Encode, codec::Decode)] +#[derive(Clone, PartialEq, Eq, Debug)] pub struct NearHeader { inner: LightClientBlockView, } diff --git a/modules/src/clients/ics13_near/types.rs b/modules/src/clients/ics13_near/types.rs index 45fd9a5ed9..05675e8325 100644 --- a/modules/src/clients/ics13_near/types.rs +++ b/modules/src/clients/ics13_near/types.rs @@ -56,14 +56,14 @@ impl Signature { ) -> bool { match self { Self::Ed25519(signature) => T::ed25519_recover(signature.as_ref(), data.as_ref()) - .map(|key| &key == public_key.0.as_ref()) + .map(|key| key == public_key.0.as_ref()) .unwrap_or(false), } } } impl PublicKey { - const LEN: usize = 32; + const _LEN: usize = 32; pub fn from_raw(raw: &[u8]) -> Self { Self(raw.try_into().unwrap()) @@ -121,9 +121,7 @@ pub struct LightClientBlockLiteView { pub inner_lite: BlockHeaderInnerLiteView, } -#[derive( - Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize, codec::Encode, codec::Decode, -)] +#[derive(Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize)] pub struct LightClientBlockView { pub prev_block_hash: CryptoHash, pub next_block_inner_hash: CryptoHash, @@ -133,9 +131,7 @@ pub struct LightClientBlockView { pub approvals_after_next: Vec>, } -#[derive( - Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize, codec::Encode, codec::Decode, -)] +#[derive(Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize)] pub struct BlockHeaderInnerLiteView { pub height: BlockHeight, pub epoch_id: CryptoHash, @@ -169,16 +165,12 @@ pub enum ApprovalInner { Skip(BlockHeight), } -#[derive( - Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize, codec::Encode, codec::Decode, -)] +#[derive(Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize)] pub enum ValidatorStakeView { V1(ValidatorStakeViewV1), } -#[derive( - Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize, codec::Encode, codec::Decode, -)] +#[derive(Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize)] pub struct ValidatorStakeViewV1 { pub account_id: AccountId, pub public_key: PublicKey, diff --git a/modules/src/core/ics04_channel/handler/chan_open_ack.rs b/modules/src/core/ics04_channel/handler/chan_open_ack.rs index a318d883f6..d98316303a 100644 --- a/modules/src/core/ics04_channel/handler/chan_open_ack.rs +++ b/modules/src/core/ics04_channel/handler/chan_open_ack.rs @@ -284,7 +284,7 @@ mod tests { .collect(); for test in tests { - let res = channel_dispatch::<_, HostFunctions>(&test.ctx, &test.msg); + let res = channel_dispatch::<_, Crypto>(&test.ctx, &test.msg); // Additionally check the events and the output objects in the result. match res { Ok((proto_output, res)) => { diff --git a/modules/src/core/ics26_routing/handler.rs b/modules/src/core/ics26_routing/handler.rs index 5c47d03053..b0cd942218 100644 --- a/modules/src/core/ics26_routing/handler.rs +++ b/modules/src/core/ics26_routing/handler.rs @@ -346,7 +346,7 @@ mod tests { let msg_recv_packet = MsgRecvPacket::try_from(get_dummy_raw_msg_recv_packet(35)).unwrap(); // First, create a client.. - let res = dispatch::<_, HostFunctions>( + let res = dispatch::<_, Crypto>( &mut ctx, Ics26Envelope::Ics2Msg(ClientMsg::CreateClient(create_client_msg.clone())), ); diff --git a/modules/src/mock/context.rs b/modules/src/mock/context.rs index 3f2e9bc463..7473e81cff 100644 --- a/modules/src/mock/context.rs +++ b/modules/src/mock/context.rs @@ -221,6 +221,7 @@ impl MockContext { // Return the tuple. (Some(client_state), consensus_state) } + ClientType::Near => todo!(), }; let consensus_states = vec![(cs_height, consensus_state)].into_iter().collect(); @@ -278,6 +279,7 @@ impl MockContext { // Return the tuple. (Some(client_state), consensus_state) } + ClientType::Near => todo!(), }; let prev_consensus_state = match client_type { @@ -293,6 +295,7 @@ impl MockContext { ); AnyConsensusState::from(light_block) } + ClientType::Near => todo!(), }; let consensus_states = vec![ diff --git a/modules/src/test_utils.rs b/modules/src/test_utils.rs index 0ad789fa6b..ce50764d8e 100644 --- a/modules/src/test_utils.rs +++ b/modules/src/test_utils.rs @@ -3,7 +3,6 @@ use std::time::Duration; use crate::clients::host_functions::HostFunctionsProvider; use crate::prelude::*; -use beefy_client::traits::HostFunctions; use sp_core::keccak_256; use sp_trie::LayoutV0; use tendermint::{block, consensus, evidence, public_key::Algorithm}; @@ -102,7 +101,7 @@ impl HostFunctionsProvider for Crypto { .map(|val| val.to_vec()) } - fn ed25519_recover(signature: &[u8; 64], value: &[u8; 32]) -> Option> { + fn ed25519_recover(_signature: &[u8; 64], _value: &[u8; 32]) -> Option> { todo!() } @@ -132,6 +131,6 @@ impl HostFunctionsProvider for Crypto { } fn sha256_digest(data: &[u8]) -> [u8; 32] { - todo!() + sp_io::hashing::sha2_256(data) } } From 535849f674b9f6926c9706f3f8c9690a383f2c5e Mon Sep 17 00:00:00 2001 From: Web3 Smith <31099392+Wizdave97@users.noreply.github.com> Date: Tue, 31 May 2022 09:58:58 +0100 Subject: [PATCH 41/96] Clean up client trait definition and more descriptive errors. (#10) * allow a description in implementation specific errors * fmt --- .../clients/ics07_tendermint/client_def.rs | 5 ++- modules/src/clients/ics11_beefy/client_def.rs | 7 +++- modules/src/clients/ics11_beefy/error.rs | 38 +++++++------------ modules/src/core/ics02_client/context.rs | 10 +---- modules/src/core/ics02_client/error.rs | 3 +- modules/src/core/ics03_connection/error.rs | 4 +- modules/src/core/ics04_channel/error.rs | 11 +----- .../core/ics04_channel/handler/recv_packet.rs | 3 +- .../core/ics04_channel/handler/send_packet.rs | 2 +- modules/src/core/ics05_port/error.rs | 4 +- modules/src/mock/context.rs | 4 -- 11 files changed, 37 insertions(+), 54 deletions(-) diff --git a/modules/src/clients/ics07_tendermint/client_def.rs b/modules/src/clients/ics07_tendermint/client_def.rs index 142b56f512..8311d361af 100644 --- a/modules/src/clients/ics07_tendermint/client_def.rs +++ b/modules/src/clients/ics07_tendermint/client_def.rs @@ -463,7 +463,10 @@ impl ClientDef for TendermintClient { _proof_upgrade_client: Vec, _proof_upgrade_consensus_state: Vec, ) -> Result<(Self::ClientState, ConsensusUpdateResult), Ics02Error> { - todo!() + // TODO: + Err(Ics02Error::implementation_specific( + "Not implemented".to_string(), + )) } } diff --git a/modules/src/clients/ics11_beefy/client_def.rs b/modules/src/clients/ics11_beefy/client_def.rs index d3803cafb5..8f1d948a1e 100644 --- a/modules/src/clients/ics11_beefy/client_def.rs +++ b/modules/src/clients/ics11_beefy/client_def.rs @@ -196,8 +196,11 @@ impl ClientDef for BeefyClient, _proof_upgrade_consensus_state: Vec, - ) -> Result<(Self::ClientState, ConsensusUpdateResult), Error> { - todo!() + ) -> Result<(Self::ClientState, ConsensusUpdateResult), Error> { + // TODO: + Err(Error::beefy(BeefyError::implementation_specific( + "Not implemented".to_string(), + ))) } fn verify_client_consensus_state( diff --git a/modules/src/clients/ics11_beefy/error.rs b/modules/src/clients/ics11_beefy/error.rs index c18dd84235..004960ae7b 100644 --- a/modules/src/clients/ics11_beefy/error.rs +++ b/modules/src/clients/ics11_beefy/error.rs @@ -16,22 +16,29 @@ define_error! { Error { InvalidAddress |_| { "invalid address" }, + InvalidTrieProof |_| { "invalid trie proof" }, + InvalidMmrUpdate { reason: String } |e| { format_args!("invalid mmr update {}", e.reason) }, + InvalidCommitmentRoot |_| { "invalid commitment root" }, + TimestampExtrinsic { reason: String } |e| { format_args!("error decoding timestamp extrinsic {}", e.reason) }, + InvalidHeader { reason: String } |e| { format_args!("invalid header, failed basic validation: {}", e.reason) }, + ImplementationSpecific { reason: String } |e| { format_args!("Implementation specific error: {}", e.reason) }, + Validation { reason: String } |e| { format_args!("invalid header, failed basic validation: {}", e.reason) }, @@ -135,15 +142,6 @@ define_error! { format_args!("header height = {0} is invalid", e.height) }, - InvalidTrustedHeaderHeight - { - trusted_header_height: Height, - height_header: Height - } - | e | { - format_args!("header height is {0} and is lower than the trusted header height, which is {1} ", e.height_header, e.trusted_header_height) - }, - LowUpdateHeight { low: Height, @@ -153,21 +151,6 @@ define_error! { format_args!("header height is {0} but it must be greater than the current client height which is {1}", e.low, e.high) }, - MismatchedRevisions - { - current_revision: u64, - update_revision: u64, - } - | e | { - format_args!("the header's current/trusted revision number ({0}) and the update's revision number ({1}) should be the same", e.current_revision, e.update_revision) - }, - - VerificationError - { reason: BeefyClientError } - | e | { - format_args!("verification failed: {:?}", e.reason) - }, - ProcessedTimeNotFound { client_id: ClientId, @@ -190,6 +173,13 @@ define_error! { e.client_id, e.height) }, + + VerificationError + { reason: BeefyClientError } + | e | { + format_args!("verification failed: {:?}", e.reason) + }, + Ics23Error [ Ics23Error ] | _ | { "ics23 commitment error" }, diff --git a/modules/src/core/ics02_client/context.rs b/modules/src/core/ics02_client/context.rs index c02c0423d6..c6aa7831df 100644 --- a/modules/src/core/ics02_client/context.rs +++ b/modules/src/core/ics02_client/context.rs @@ -61,19 +61,11 @@ pub trait ClientReader { fn host_height(&self) -> Height; /// Returns the current timestamp of the local chain. - fn host_timestamp(&self) -> Timestamp { - let pending_consensus_state = self - .pending_host_consensus_state() - .expect("host must have pending consensus state"); - pending_consensus_state.timestamp() - } + fn host_timestamp(&self) -> Timestamp; /// Returns the `ConsensusState` of the host (local) chain at a specific height. fn host_consensus_state(&self, height: Height) -> Result; - /// Returns the pending `ConsensusState` of the host (local) chain. - fn pending_host_consensus_state(&self) -> Result; - /// Returns a natural number, counting how many clients have been created thus far. /// The value of this counter should increase only via method `ClientKeeper::increase_client_counter`. fn client_counter(&self) -> Result; diff --git a/modules/src/core/ics02_client/error.rs b/modules/src/core/ics02_client/error.rs index 6eaac30497..a1e58fd2f6 100644 --- a/modules/src/core/ics02_client/error.rs +++ b/modules/src/core/ics02_client/error.rs @@ -51,7 +51,8 @@ define_error! { }, ImplementationSpecific - | _ | { "implementation specific error" }, + { reason: String } + | e | { format_args!("implementation specific error: {}", e.reason) }, HeaderVerificationFailure { reason: String } diff --git a/modules/src/core/ics03_connection/error.rs b/modules/src/core/ics03_connection/error.rs index 619f14b2e1..4c90f06c41 100644 --- a/modules/src/core/ics03_connection/error.rs +++ b/modules/src/core/ics03_connection/error.rs @@ -2,6 +2,7 @@ use crate::core::ics02_client::error as client_error; use crate::core::ics03_connection::version::Version; use crate::core::ics24_host::error::ValidationError; use crate::core::ics24_host::identifier::{ClientId, ConnectionId}; +use crate::prelude::*; use crate::proofs::ProofError; use crate::signer::SignerError; use crate::Height; @@ -156,6 +157,7 @@ define_error! { }, ImplementationSpecific - | _ | { "implementation specific error" }, + { reason: String } + | e | { format_args!("implementation specific error: {}", e.reason) }, } } diff --git a/modules/src/core/ics04_channel/error.rs b/modules/src/core/ics04_channel/error.rs index 2f5451e23d..61f77a28e3 100644 --- a/modules/src/core/ics04_channel/error.rs +++ b/modules/src/core/ics04_channel/error.rs @@ -342,15 +342,8 @@ define_error! { | _ | { "route not found" }, ImplementationSpecific - | _ | { "implementation specific error" }, - - AppModule - { description: String } - | e | { - format_args!( - "application module error: {0}", - e.description) - }, + { reason: String } + | e | { format_args!("implementation specific error: {}", e.reason) }, } } diff --git a/modules/src/core/ics04_channel/handler/recv_packet.rs b/modules/src/core/ics04_channel/handler/recv_packet.rs index c9d178edfd..8d04445504 100644 --- a/modules/src/core/ics04_channel/handler/recv_packet.rs +++ b/modules/src/core/ics04_channel/handler/recv_packet.rs @@ -10,6 +10,7 @@ use crate::core::ics24_host::identifier::{ChannelId, PortId}; use crate::core::ics26_routing::context::LightClientContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; +use crate::prelude::*; use crate::timestamp::Expiry; use crate::Height; use core::fmt::Debug; @@ -136,7 +137,7 @@ pub fn process( receipt: Some(Receipt::Ok), })) } - Err(_) => return Err(Error::implementation_specific()), + Err(e) => return Err(Error::implementation_specific(e.to_string())), } }; diff --git a/modules/src/core/ics04_channel/handler/send_packet.rs b/modules/src/core/ics04_channel/handler/send_packet.rs index 6023f79ee0..bf0b34436d 100644 --- a/modules/src/core/ics04_channel/handler/send_packet.rs +++ b/modules/src/core/ics04_channel/handler/send_packet.rs @@ -54,7 +54,7 @@ pub fn send_packet( let client_state = ctx .client_state(&client_id) - .map_err(|_| Error::implementation_specific())?; + .map_err(|e| Error::implementation_specific(e.to_string()))?; // prevent accidental sends with clients that cannot be updated if client_state.is_frozen() { diff --git a/modules/src/core/ics05_port/error.rs b/modules/src/core/ics05_port/error.rs index e06ffc9081..6713f9b44d 100644 --- a/modules/src/core/ics05_port/error.rs +++ b/modules/src/core/ics05_port/error.rs @@ -1,4 +1,5 @@ use crate::core::ics24_host::identifier::PortId; +use crate::prelude::*; use flex_error::define_error; define_error! { @@ -17,6 +18,7 @@ define_error! { | e | { format_args!("could not retrieve module from port '{0}'", e.port_id) }, ImplementationSpecific - | _ | { "implementation specific error" }, + { reason: String } + | e | { format_args!("implementation specific error: {}", e.reason) }, } } diff --git a/modules/src/mock/context.rs b/modules/src/mock/context.rs index 7473e81cff..eec73176b5 100644 --- a/modules/src/mock/context.rs +++ b/modules/src/mock/context.rs @@ -1122,10 +1122,6 @@ impl ClientReader for MockContext { } } - fn pending_host_consensus_state(&self) -> Result { - Err(Ics02Error::missing_local_consensus_state(Height::zero())) - } - fn client_counter(&self) -> Result { Ok(self.ibc_store.lock().unwrap().client_ids_counter) } From c576870a1bd6f8cd3835784d1e0740f71609a36c Mon Sep 17 00:00:00 2001 From: Web3 Smith <31099392+Wizdave97@users.noreply.github.com> Date: Tue, 31 May 2022 14:28:18 +0100 Subject: [PATCH 42/96] remove minimum rust version (#11) --- modules/Cargo.toml | 1 - proto/Cargo.toml | 1 - 2 files changed, 2 deletions(-) diff --git a/modules/Cargo.toml b/modules/Cargo.toml index e8e07421af..024c306d94 100644 --- a/modules/Cargo.toml +++ b/modules/Cargo.toml @@ -7,7 +7,6 @@ readme = "README.md" keywords = ["blockchain", "consensus", "cosmos", "ibc", "tendermint"] repository = "https://github.com/informalsystems/ibc-rs" authors = ["Informal Systems "] -rust-version = "1.60" description = """ Implementation of the Inter-Blockchain Communication Protocol (IBC). This crate comprises the main data structures and on-chain logic. diff --git a/proto/Cargo.toml b/proto/Cargo.toml index 7e2fcbc1c9..3c0dcb08b7 100644 --- a/proto/Cargo.toml +++ b/proto/Cargo.toml @@ -9,7 +9,6 @@ readme = "README.md" categories = ["cryptography::cryptocurrencies", "encoding", "no-std"] keywords = ["blockchain", "cosmos", "tendermint", "ibc", "proto"] exclude = ["definitions"] -rust-version = "1.60" description = """ ibc-proto provides Cosmos SDK & IBC Protocol Buffers definitions """ From 5e22505d58130c94823d3e9fe5a5641b2b26b7c6 Mon Sep 17 00:00:00 2001 From: Blas Rodriguez Irizar Date: Wed, 1 Jun 2022 14:02:37 +0200 Subject: [PATCH 43/96] near: verify signatures through HostFunctionProvider (#8) * near: verify signatures through HostFunctionProvider * rename signature recovery to verification * rm verify method on signature Use host functions directly instead --- modules/src/clients/host_functions.rs | 2 +- modules/src/clients/ics13_near/client_def.rs | 23 +++++++++++--------- modules/src/clients/ics13_near/types.rs | 22 +++++-------------- modules/src/test_utils.rs | 4 ++-- 4 files changed, 22 insertions(+), 29 deletions(-) diff --git a/modules/src/clients/host_functions.rs b/modules/src/clients/host_functions.rs index d5102213ff..a0a6f352eb 100644 --- a/modules/src/clients/host_functions.rs +++ b/modules/src/clients/host_functions.rs @@ -16,7 +16,7 @@ pub trait HostFunctionsProvider: Clone { ) -> Option>; /// Recover the ED25519 pubkey that produced this signature - fn ed25519_recover(signature: &[u8; 64], value: &[u8; 32]) -> Option>; + fn ed25519_verify(signature: &[u8; 64], value: &[u8; 32], pubkey: &[u8]) -> bool; /// This function should verify membership in a trie proof using parity's sp-trie package /// with a BlakeTwo256 Hasher diff --git a/modules/src/clients/ics13_near/client_def.rs b/modules/src/clients/ics13_near/client_def.rs index 1281a3a1f3..b8523d6450 100644 --- a/modules/src/clients/ics13_near/client_def.rs +++ b/modules/src/clients/ics13_near/client_def.rs @@ -239,7 +239,8 @@ impl ClientDef for NearClient { } } -// TODO: refactor to use [`HostFunctions`] +/// validates a light block that's contained on the `NearHeader` based on the current +/// state of the light client. pub fn validate_light_block( header: &NearHeader, client_state: NearClientState, @@ -259,7 +260,7 @@ pub fn validate_light_block( let new_block_view = header.get_light_client_block_view(); let current_block_view = client_state.get_head(); - let (_current_block_hash, _next_block_hash, _approval_message) = + let (_current_block_hash, _next_block_hash, approval_message) = reconstruct_light_client_block_view_fields::(new_block_view)?; // (1) @@ -307,14 +308,16 @@ pub fn validate_light_block( approved_stake += bp_stake; - let _validator_public_key = bp_stake_view.public_key.clone(); - // if !maybe_signature - // .as_ref() - // .unwrap() - // .verify::(&H::sha256_digest(&approval_message), validator_public_key.clone()) - // { - // return Err(NearError::invalid_signature().into()); - // } + let validator_public_key = &bp_stake_view.public_key; + let data = H::sha256_digest(&approval_message); + let signature = maybe_signature.as_ref().unwrap(); + if H::ed25519_verify( + signature.get_inner(), + &data, + validator_public_key.get_inner(), + ) { + return Err(NearError::invalid_signature().into()); + } } let threshold = total_stake * 2 / 3; diff --git a/modules/src/clients/ics13_near/types.rs b/modules/src/clients/ics13_near/types.rs index 05675e8325..8493710b71 100644 --- a/modules/src/clients/ics13_near/types.rs +++ b/modules/src/clients/ics13_near/types.rs @@ -41,33 +41,23 @@ impl Signature { Self::Ed25519(Ed25519Signature::from_raw(raw.try_into().unwrap())) } - pub fn as_bytes(&self) -> &[u8] { + pub fn get_inner(&self) -> &[u8; Self::LEN] { match self { Self::Ed25519(inner) => &inner.0, } } - - // TODO: we might want to create a trait for signature verification - // or integrate this into HostFunctions - pub fn verify( - &self, - data: impl AsRef<[u8; 32]>, - public_key: PublicKey, - ) -> bool { - match self { - Self::Ed25519(signature) => T::ed25519_recover(signature.as_ref(), data.as_ref()) - .map(|key| key == public_key.0.as_ref()) - .unwrap_or(false), - } - } } impl PublicKey { - const _LEN: usize = 32; + const LEN: usize = 32; pub fn from_raw(raw: &[u8]) -> Self { Self(raw.try_into().unwrap()) } + + pub fn get_inner(&self) -> &[u8; Self::LEN] { + &self.0 + } } impl TryFrom<&[u8]> for CryptoHash { diff --git a/modules/src/test_utils.rs b/modules/src/test_utils.rs index ce50764d8e..04fb47b3ac 100644 --- a/modules/src/test_utils.rs +++ b/modules/src/test_utils.rs @@ -101,8 +101,8 @@ impl HostFunctionsProvider for Crypto { .map(|val| val.to_vec()) } - fn ed25519_recover(_signature: &[u8; 64], _value: &[u8; 32]) -> Option> { - todo!() + fn ed25519_verify(_signature: &[u8; 64], _value: &[u8; 32], public_key: &[u8; 32]) -> bool { + true } fn verify_membership_trie_proof( From 61fb44af6830ee85ad5f91b15e66dd11f10a491d Mon Sep 17 00:00:00 2001 From: Web3 Smith <31099392+Wizdave97@users.noreply.github.com> Date: Thu, 2 Jun 2022 12:24:39 +0100 Subject: [PATCH 44/96] Update with new changes including ICS20 (#12) * Release v0.15.0 (#2234) * Bump crates to v0.15.0 and ibc-proto to v0.18.0 * Include ibc-test-framework in release * Slightly improve main Cargo doc for ibc-test-framework * Fix components names * Update changelog * Update lockfile Co-authored-by: Soares Chen * Add type ascription to fix build when `telemetry` feature is disabled (#2235) * Bump once_cell from 1.11.0 to 1.12.0 (#2237) * Add `ibc-test-framework` to the main README (#2236) * Add `ibc-test-framework` to the main README * Cleanup * Fix `execute_schedule` method leaking operational data (#2118) * Pull in upstream changes * Adding integration test for execute_schedule * Adding integration test for execute_schedule * Finish stubbing out execute_schedule test * Call `chains.shutdown` method * Correctly shut down chain * Shut down node b as well * Debugging execute_schedule test * Add Debug derivations * Update integration test so that its flow correctly tests `execute_schedule` * Attempt to perform a second IBC transfer * Remove info's * Increase sleep timeout duration * Incorportate new test framework features * Remove unnecessary `sleep` call * Correctly use new test framework features * Get assertions passing for now * Send two transactions, one in each direction * Add doc comment for test * Improve panic messages * Refactor test so that it is actually testing the desired behavior * Attempt at fixing `execute_schedule` leaky logic * Flesh out doc comments some more * Remove a duplicate function * Make use of OperationalDataTarget enum * Remove redundant enum * Remove some Debug derives * Remove one more debug derive * Add `try_fetch_scheduled_operational_data` back in * Give `do_execute_schedule` a more descriptive name * Improve `execute_schedule_for_target_chain` method's documentation * Add a bunch of clarifying comments * More clarification of comments * Flesh out `OperationalData` docs * Add changelog entry * Incorporate PR feedback Co-authored-by: Adi Seredinschi * Add `keys balance` command to query the balance for a key (#2232) * Added subcommand 'balance' for command 'keys' to output the account balance associated with a given key * Added changelog entry for new command feature * Updated Hermes guide to include the new keys balance command * Improved error log for the CLI command and added doc comment * fixed fmt by running cargo fmt * Refactored query_balance to take key_name parameter. Added JSON output to keys balance command. * Fixed typo in comment in CosmosSdkChain query_balance method * Updated keys balance command to take the key_name as an optional flag Co-authored-by: Luca Joss Co-authored-by: Romain Ruetschi * Model based testing on integration tests (#2072) * ics20 token transfer spec * fixes for tla functions * full spec * update tla specs * mbt driver code * prepare for itf adoption * itf deserializer * description in comments * fix type * hack for apalache v0.20.2 (informalsystems/apalache#1304) * updated tla spec * example itf.json * code refactor * prepare for mbt * fix and update * updated example * updated trace types * working mbt driver * fix mbt test * added mbt readme * BinaryChainTest over BinaryChannelTest * wait for established channel * added packet state assertions * improved mbt integration * few fixes * Refactor bootstrap chain and foreign client code * Introduce BootstrapChannel/ConnectionOptions * Increase timeout for assert_eventual_wallet_amount for CI * Upgrade Rust to 1.60.0 * Fix fmt in Rust 1.60.0 * Update Rust MSRV to 1.60 * Try running CI integration test single-threaded * Fix variable mixup * Add MBT to CI * Disable failing parse_itf test * Disable test_self_connected_ibc_transfer * Use gaia6 to run MBT test * Debug log on CI * save itf trace on failure * initialize nested maps * updated mbt code * Use new Nix URL * better fix to empty nested maps * Remove stale comments * Fix merge conflicts Co-authored-by: Soares Chen Co-authored-by: Adi Seredinschi * Bump uuid from 1.0.0 to 1.1.0 (#2247) Bumps [uuid](https://github.com/uuid-rs/uuid) from 1.0.0 to 1.1.0. - [Release notes](https://github.com/uuid-rs/uuid/releases) - [Commits](https://github.com/uuid-rs/uuid/compare/1.0.0...1.1.0) --- updated-dependencies: - dependency-name: uuid dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Add `query channel client` command to query a channel's client state (#2248) * Added new CLI command 'query channel client' * Updated Hermes guide to include new query channel client command and updated changelog * Added missing subcommand in Hermes guide, section 7.9.3 Co-authored-by: Luca Joss * Fix wrong `create channel` command in "Create a new path" page in the guide (#2245) Fix wrong `create channel` command in guide Co-authored-by: Adi Seredinschi * Disable MBT tests on CI due to flakiness (#2253) * Complete ICS20 Implementation (#1989) * Add ics26 Module trait * newtype for Acknowledgement * Define Router interface * Improve Router trait * Update mock router impl * Test for router API * Disallow duplicate module_ids in MockRouter * Add RouterBuilder for seal-style API * Fix failing test * Fix CI after merge * Chainable RouterBuilder::add_route() * Fix test * Add Router::has_route() trait method * Add comments * Separate mutating/non-mutating trait methods for shadow-paging style API * Make most Module trait methods optional * Default impl for MockModule::on_chan_open_try() * Acknowledgement trait * Extend router test * Simplify OnRechPacketResult using FnOnce() * Cleanup * Use Cow for CapabilityName::new() * Use Cow for CapabilityName::new() * Use newtype ModuleId instead of trait assoc type * Module callbacks' args as refs * Fix mock impl * WIP channel callbacks * Fix ModuleId ctor validation * TypedCapability * Use typed capabilities * Add Router::route_mut() * Implement pre-dispatch channel message validation * Avoid cloning message during channel msg dispatch * Add ChannelReader::lookup_module_by_channel() * Complete ics4 dispatch with callbacks * Fix compile errors from rebase * Fix CI test * Improve mock impl for capabilities * cargo fmt * Set channel version returned from on_chan_open_try() * Implement packet handler verification * Add missing check already received packets on ordered channels * Avoid cloning message during packet msg dispatch * Implement RecvPacket NoOp * cargo fmt and remove unused errors * Make on_recv_packet() Ack result optional * Don't return Result from on_recv_packet() and pass GenericAcknowledgement to on_acknowledgement_packet() * Implement packet callbacks * Allow callbacks to write logs and emit events * Fix test * Add .changelog entry * Add a comment for state rollback expectation from dispatch() * ics26_routing::handler::deliver() takes single message as input * ics26_routing::handler::deliver() returns logs as well * Remove ctx_ro * Callbacks return ModuleOutput * Return HandlerOutputBuilder from channel and packet dispatch fn * Revert "Callbacks return ModuleOutput" This reverts commit 1d430c9f86168816b55cd9185c6c632c4190118d. * Address review feedback for comments * Extract ChannelMsg::lookup_module() * Add ICS20 Denom type * Add ICS20 TracePrefix & TracePath type * Define Coin and Decimal types * Impl conversions from/to RawDenomTrace * Make better use of derive-more * Impl conversions for Coin type * Add HashedDenom and polish DenomTrace impl * Define PacketData domain type and conversions * Use Coin domain type in MsgTransfer * Fix usage of Coin type in relayer code * Always panic on decimal arith overflow * Polish generic Signer impl * Implement ICS20 signer type * Coin type with generic Denom param * Use IbcCoin in MsgTransfer and PrefixedCoin in PacketData * Minor refactoring * Fix test_util * Impl AsRef<[u8]> for GenericAcknowledgement * Ics20Context is no longer a supertrait of Ics26Context * Add ICS20 Ack type * Add ICS20 event enum placeholder * Define all ICS20 expected keepers and context * Update send_transfer() for recent context changes * Give mut ref to set_channel_escrow_address() * Define ICS20 callback functions * Fix test build * Move denom derive functions to integration tests * Add version to chan_open_try callback * Rename ChannelId::counter() to sequence() * Add AppModule error variant to ChannelError * Implement validation helpers for callbacks * Define Ics20 callback errors * Implement channel handshake callbacks * Fix clippy errors * Impl Deserialize for PacketData * Manually implement AsRef for Signer * Fix ICS20 Ack success from impl * Add more ICS20 errors * Improve ack type ctor * Add ctor for TracePrefix * Provide denom trace methods to remove prefix and check for empty trace path * Implement conversion from PrefixedCoin to IbcCoin * Add Ics20Reader trait methods to check send/receive enabled * Provided trait method get_channel_escrow_address() * Add BankReader trait for is_blocked_account() * Add FromStr bound for Ics20Context::AccountId * Use IbcCoins in Bank traits * Impl on_recv_packet() for cases where receiver chain is source * Fallible OnRecvPacket::write_fn() * Complete on_recv_packet impl * Fix test build * Fix clippy errors * Implement remaining packet callbacks for ICS20 * Make set_denom_trace() fallible * Don't derive AsRef * Complete send_transfer impl * Manual deserialize impl for Acknowledgement * Add ctor for Acknowledgement * Handle packet-data deserialize error separately * Use U256 for Denom and move bigint.rs to modules/src * Rename Denom to Amount * Cleanup * Fix trait definitions * Use source enum * Fix AccountReader trait * Validate port_id * More refactoring * Rename Signer to Address * Use Address instead of Signer where applicable * Fix send_transfer packet creation * Fix clippy warnings * Define ICS20 events * Extract relay code into separate files * Fix clippy warnings * Make HandlerOutput/Builder generic over events * Define ModuleEvent * Add AppModule variant to IbcEvent * Impl Display for Acknowledgement * Derive serde for ModuleId * Add ModuleId to ModuleEvent * Impl conversion from tuple for ModuleEventAttribute * Impl conversions from ICS20 events to ModuleEvent * Add event for transfer * Remove bech32 validation from Address * Change MsgTransfer receiver type to Address * Extract MockContext IbcStore * Improve conversion from Signer to AccountId * Add deliver method to Module trait * Implement conversions for MsgTransfer to/from Protobuf Any * Store packet result directly in send_transfer() * Make all ICS20 mods public * Make all IbsStore fields public to enable access for app modules * Implement Ics20Context for DummyTransferModule * Fix failing test * Manual Clone impl for MockContext * Fix failing test * Revert "Fix failing test" This reverts commit 40aec618a22df1c8a2592e63028cd1262957f5a1. * Fix MockContext Clone impl * Update trait definitions after merge with master * Replace ModuleOutput with ModuleOutputBuilder in callbacks * Add inout param ModuleOutputBuilder to Ics20 callback handlers * Emit ICS20 receive packet events * Module::deliver() must be able to emit IbcEvents * Allow HandlerOutputBuilder to merge HandlerOutput * Emit transfer event * Add AckStatusEvent * Emit ICS20 ack events * Emit ICS20 timeout events * Remove ABCI error code * Remove #[allow(unused)] * Add log for send transfer * MsgReceipt abstraction * Handle missing IbcEvent to AbciEvent conversions for RecvPacket and TimeoutOnClose events * Implement ModuleEvent to AbciEvent conversion * Make send_transfer() public * Allow empty TracePaths * Fix TracePath multiple prefix parse bug * Fix TracePath empty str parse bug * Improve DenomTrace FromStr * Add denom validation test * Add denom trace and serde tests * TracePath tests * Allow Denom with '/' * Fix HashedDenom FromStr impl for empty hash * Minor refactoring * Add IbcCoin tests * Add .changelog entry * Fix clippy errors * Disallow empty Signer and replace Address with it * Use ToString as trait bound for ICS20 AccountId * Change AccountId trait bound from FromStr to TryFrom * Remove Signer::new() * Delete OCap related TODO * Remove the PortKeeper * Remove Module::deliver() * Fix clippy warnings * Rename transfer module * Rename mod relay * Fix tests failing due to empty signer * Rename PORT_ID const to PORT_ID_STR * Fix escrow addr gen * Test cosmos escrow addr gen * Rename receiver account * Remove `has_denom_trace()` * Remove `BankReader::is_blocked_account()` * Remove `AccountId::ToString` bound * Remove `AccountReader` * Mint/burn into user accounts directly * Use `chunks_exact()` instead of `windows().step_by()` * Rename `DenomTrace::has_prefix()` to `trace_starts_with()` * Move trace related methods into `TracePath` * Fix add/remove prefix * Add test for add/remove trace prefix * Set denom trace only if not already set * Use truncate instead of drain in cosmos_adr028_escrow_address() * Rename DenomTrace as PrefixedDenom * Modify all `BankKeeper` methods to use `PrefixedCoin` instead of `IbcCoin` * Rename `Denom` as `BaseDenom` * Impl Into and checked_add/sub() for Amount * Add more TracePath tests * Test TracePath is_empty() * Remove IbcCoin * Remove HashedDenom and HashedCoin * Fix test compilation * Add comment for send_transfer() * Functions for determining source chain * Minor refactoring * cargo fmt * Derive serde for PacketCommitment and AcknowledgementCommitment * docstring and comment Co-authored-by: Philippe Laferriere * Merge commands `keys add` and `keys restore` into a single command. (#2251) * Merged commands 'keys add' and 'keys restore' into a single command, 'keys add'. Improved restoring a key with mnemonic by taking a file containing the mnemonic as input instead of taking the mnemonic as command line input * Updated Hermes guide with new merged 'keys add' command. And updated changelog with improvement * Fixed e2e setup script to take correct flag when adding keys * Fixed small errors in Hermes guide and improved comments for changed 'keys add' command. * Improved Hermes guide section 7.2 * Updated the Hermes guide section 7.2 to match actual features * Bump moka from 0.8.4 to 0.8.5 (#2246) Bumps [moka](https://github.com/moka-rs/moka) from 0.8.4 to 0.8.5. - [Release notes](https://github.com/moka-rs/moka/releases) - [Changelog](https://github.com/moka-rs/moka/blob/master/CHANGELOG.md) - [Commits](https://github.com/moka-rs/moka/compare/v0.8.4...v0.8.5) --- updated-dependencies: - dependency-name: moka dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Adi Seredinschi * fmt * rename LightClientContext to ReaderContext * use host hash function in escrow address * minor fix * minor fix Co-authored-by: Romain Ruetschi Co-authored-by: Soares Chen Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Sean Chen Co-authored-by: Adi Seredinschi Co-authored-by: Luca Joss <43531661+ljoss17@users.noreply.github.com> Co-authored-by: Luca Joss Co-authored-by: Ranadeep Biswas Co-authored-by: Luca Joss Co-authored-by: Shoaib Ahmed Co-authored-by: Philippe Laferriere --- .../ibc/2104-fix-commitment-computation.md | 4 +- .../ibc/2114-fix-ack-verification.md | 4 +- .../bug-fixes/ibc/2178-conn-ack-bug-fix.md | 4 +- modules/Cargo.toml | 7 +- .../relay_application_logic/send_transfer.rs | 52 --- modules/src/applications/transfer/context.rs | 36 +- .../clients/ics07_tendermint/client_def.rs | 26 +- modules/src/clients/ics11_beefy/client_def.rs | 26 +- modules/src/clients/ics13_near/client_def.rs | 24 +- modules/src/core/ics02_client/client_def.rs | 46 +-- modules/src/core/ics02_client/handler.rs | 4 +- .../ics02_client/handler/create_client.rs | 4 +- .../ics02_client/handler/update_client.rs | 4 +- .../ics02_client/handler/upgrade_client.rs | 4 +- .../core/ics02_client/msgs/create_client.rs | 2 +- .../core/ics02_client/msgs/upgrade_client.rs | 2 +- modules/src/core/ics03_connection/handler.rs | 4 +- .../ics03_connection/handler/conn_open_ack.rs | 4 +- .../handler/conn_open_confirm.rs | 4 +- .../handler/conn_open_init.rs | 4 +- .../ics03_connection/handler/conn_open_try.rs | 4 +- .../core/ics03_connection/handler/verify.rs | 12 +- modules/src/core/ics04_channel/context.rs | 13 +- modules/src/core/ics04_channel/error.rs | 8 + modules/src/core/ics04_channel/handler.rs | 6 +- .../ics04_channel/handler/acknowledgement.rs | 4 +- .../handler/chan_close_confirm.rs | 4 +- .../ics04_channel/handler/chan_close_init.rs | 4 +- .../ics04_channel/handler/chan_open_ack.rs | 4 +- .../handler/chan_open_confirm.rs | 4 +- .../ics04_channel/handler/chan_open_init.rs | 4 +- .../ics04_channel/handler/chan_open_try.rs | 4 +- .../core/ics04_channel/handler/recv_packet.rs | 4 +- .../core/ics04_channel/handler/send_packet.rs | 9 +- .../src/core/ics04_channel/handler/timeout.rs | 4 +- .../ics04_channel/handler/timeout_on_close.rs | 4 +- .../src/core/ics04_channel/handler/verify.rs | 12 +- .../handler/write_acknowledgement.rs | 4 +- modules/src/core/ics26_routing/context.rs | 5 +- modules/src/core/ics26_routing/handler.rs | 50 ++- modules/src/mock/client_def.rs | 24 +- modules/src/mock/context.rs | 28 +- modules/src/test_utils.rs | 364 +++++++++++++++++- proto-compiler/Cargo.lock | 2 +- 44 files changed, 592 insertions(+), 254 deletions(-) delete mode 100644 modules/src/applications/ics20_fungible_token_transfer/relay_application_logic/send_transfer.rs diff --git a/.changelog/v0.15.0/bug-fixes/ibc/2104-fix-commitment-computation.md b/.changelog/v0.15.0/bug-fixes/ibc/2104-fix-commitment-computation.md index 570c1b3338..da0b858a75 100644 --- a/.changelog/v0.15.0/bug-fixes/ibc/2104-fix-commitment-computation.md +++ b/.changelog/v0.15.0/bug-fixes/ibc/2104-fix-commitment-computation.md @@ -1,2 +1,2 @@ -- Fix packet commitment calculation to match IBC-Go - ([#2104](https://github.com/informalsystems/ibc-rs/issues/2104)) +- Fix packet commitment calculation to match ibc-go ([#2104](https://github.com/informalsystems/ibc- + rs/issues/2104)) diff --git a/.changelog/v0.15.0/bug-fixes/ibc/2114-fix-ack-verification.md b/.changelog/v0.15.0/bug-fixes/ibc/2114-fix-ack-verification.md index 0987d40b6f..cbe6399a16 100644 --- a/.changelog/v0.15.0/bug-fixes/ibc/2114-fix-ack-verification.md +++ b/.changelog/v0.15.0/bug-fixes/ibc/2114-fix-ack-verification.md @@ -1,2 +1,2 @@ -- Fix incorrect acknowledgement verification - ([#2114](https://github.com/informalsystems/ibc-rs/issues/2114)) +- Fix incorrect acknowledgement verification ([#2114](https://github.com/informalsystems/ibc- + rs/issues/2114)) diff --git a/.changelog/v0.15.0/bug-fixes/ibc/2178-conn-ack-bug-fix.md b/.changelog/v0.15.0/bug-fixes/ibc/2178-conn-ack-bug-fix.md index af72298e4b..054fb34968 100644 --- a/.changelog/v0.15.0/bug-fixes/ibc/2178-conn-ack-bug-fix.md +++ b/.changelog/v0.15.0/bug-fixes/ibc/2178-conn-ack-bug-fix.md @@ -1,2 +1,2 @@ -- Fix connection identifier mix-up in connection acknowledgement processing - ([#2178](https://github.com/informalsystems/ibc-rs/issues/2178)) +- fix connection id mix-up in connection acknowledgement processing + ([#2178](https://github.com/informalsystems/ibc-rs/issues/2178)) \ No newline at end of file diff --git a/modules/Cargo.toml b/modules/Cargo.toml index 024c306d94..ab11162874 100644 --- a/modules/Cargo.toml +++ b/modules/Cargo.toml @@ -21,6 +21,7 @@ std = [ "flex-error/std", "flex-error/eyre_tracer", "ibc-proto/std", + "ics23/std", "clock", "beefy-client/std", "sp-runtime/std", @@ -39,7 +40,7 @@ mocks = ["tendermint-testgen", "clock", "std", "sp-io", "sp-io/std"] [dependencies] # Proto definitions for all IBC-related interfaces, e.g., connections or channels. borsh = { version = "0.9.3", default-features = false } -ibc-proto = { version = "0.17.1", path = "../proto", default-features = false } +ibc-proto = { version = "0.18.0", path = "../proto", default-features = false } ics23 = { version = "=0.8.0-alpha", default-features = false } time = { version = "0.3", default-features = false } serde_derive = { version = "1.0.104", default-features = false } @@ -53,8 +54,8 @@ safe-regex = { version = "0.2.5", default-features = false } subtle-encoding = { version = "0.5", default-features = false } sha2 = { version = "0.10.2", default-features = false } flex-error = { version = "0.4.4", default-features = false } -num-traits = { version = "0.2.14", default-features = false } -derive_more = { version = "0.99.17", default-features = false, features = ["from", "display"] } +num-traits = { version = "0.2.15", default-features = false } +derive_more = { version = "0.99.17", default-features = false, features = ["from", "into", "display"] } uint = { version = "0.9", default-features = false } beefy-client = { package = "beefy-generic-client", git = "https://github.com/ComposableFi/beefy-client", branch = "master", default-features = false } sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22", default-features = false } diff --git a/modules/src/applications/ics20_fungible_token_transfer/relay_application_logic/send_transfer.rs b/modules/src/applications/ics20_fungible_token_transfer/relay_application_logic/send_transfer.rs deleted file mode 100644 index aae23629a2..0000000000 --- a/modules/src/applications/ics20_fungible_token_transfer/relay_application_logic/send_transfer.rs +++ /dev/null @@ -1,52 +0,0 @@ -use crate::applications::ics20_fungible_token_transfer::error::Error; -use crate::applications::ics20_fungible_token_transfer::msgs::transfer::MsgTransfer; -use crate::clients::host_functions::HostFunctionsProvider; -use crate::core::ics04_channel::handler::send_packet::send_packet; -use crate::core::ics04_channel::packet::Packet; -use crate::core::ics04_channel::packet::PacketResult; -use crate::core::ics26_routing::context::LightClientContext; -use crate::handler::HandlerOutput; -use crate::prelude::*; - -pub(crate) fn send_transfer( - ctx: &Ctx, - msg: MsgTransfer, -) -> Result, Error> -where - Ctx: LightClientContext, -{ - let source_channel_end = ctx - .channel_end(&(msg.source_port.clone(), msg.source_channel)) - .map_err(Error::ics04_channel)?; - - let destination_port = source_channel_end.counterparty().port_id().clone(); - let destination_channel = source_channel_end - .counterparty() - .channel_id() - .ok_or_else(|| { - Error::destination_channel_not_found(msg.source_port.clone(), msg.source_channel) - })?; - - // get the next sequence - let sequence = ctx - .get_next_sequence_send(&(msg.source_port.clone(), msg.source_channel)) - .map_err(Error::ics04_channel)?; - - //TODO: Application LOGIC. - - let packet = Packet { - sequence, - source_port: msg.source_port, - source_channel: msg.source_channel, - destination_port, - destination_channel: *destination_channel, - data: vec![0], - timeout_height: msg.timeout_height, - timeout_timestamp: msg.timeout_timestamp, - }; - - let handler_output = send_packet(ctx, packet).map_err(Error::ics04_channel)?; - - //TODO: add event/atributes and writes to the store issued by the application logic for packet sending. - Ok(handler_output) -} diff --git a/modules/src/applications/transfer/context.rs b/modules/src/applications/transfer/context.rs index 13220ca87b..ab9c1afdb8 100644 --- a/modules/src/applications/transfer/context.rs +++ b/modules/src/applications/transfer/context.rs @@ -1,4 +1,3 @@ -use sha2::{Digest, Sha256}; use subtle_encoding::hex; use super::error::Error as Ics20Error; @@ -16,7 +15,7 @@ use crate::core::ics04_channel::packet::Packet; use crate::core::ics04_channel::Version; use crate::core::ics05_port::context::PortReader; use crate::core::ics24_host::identifier::{ChannelId, ConnectionId, PortId}; -use crate::core::ics26_routing::context::{ModuleOutputBuilder, OnRecvPacketAck}; +use crate::core::ics26_routing::context::{ModuleOutputBuilder, OnRecvPacketAck, ReaderContext}; use crate::prelude::*; use crate::signer::Signer; @@ -26,7 +25,10 @@ pub trait Ics20Keeper: type AccountId; } -pub trait Ics20Reader: ChannelReader + PortReader { +pub trait Ics20Reader: ChannelReader + PortReader +where + Self: Sized, +{ type AccountId: TryFrom; /// get_port returns the portID for the transfer module. @@ -38,7 +40,8 @@ pub trait Ics20Reader: ChannelReader + PortReader { port_id: &PortId, channel_id: ChannelId, ) -> Result<::AccountId, Ics20Error> { - let hash = cosmos_adr028_escrow_address(port_id, channel_id); + let hash = cosmos_adr028_escrow_address(self, port_id, channel_id); + String::from_utf8(hex::encode_upper(hash)) .expect("hex encoded bytes are not valid UTF8") .parse::() @@ -61,15 +64,17 @@ pub trait Ics20Reader: ChannelReader + PortReader { } // https://github.com/cosmos/cosmos-sdk/blob/master/docs/architecture/adr-028-public-key-addresses.md -fn cosmos_adr028_escrow_address(port_id: &PortId, channel_id: ChannelId) -> Vec { +fn cosmos_adr028_escrow_address( + ctx: &dyn ChannelReader, + port_id: &PortId, + channel_id: ChannelId, +) -> Vec { let contents = format!("{}/{}", port_id, channel_id); + let mut data = VERSION.as_bytes().to_vec(); + data.extend_from_slice(&[0]); + data.extend_from_slice(contents.as_bytes()); - let mut hasher = Sha256::new(); - hasher.update(VERSION.as_bytes()); - hasher.update([0]); - hasher.update(contents.as_bytes()); - - let mut hash = hasher.finalize().to_vec(); + let mut hash = ctx.hash(data); hash.truncate(20); hash } @@ -105,6 +110,7 @@ pub trait BankKeeper { pub trait Ics20Context: Ics20Keeper::AccountId> + Ics20Reader::AccountId> + + ReaderContext { type AccountId: TryFrom; } @@ -296,6 +302,9 @@ pub fn on_timeout_packet( #[cfg(test)] pub(crate) mod test { + use std::sync::Mutex; + + use std::sync::Arc; use subtle_encoding::bech32; use crate::applications::transfer::context::cosmos_adr028_escrow_address; @@ -305,6 +314,7 @@ pub(crate) mod test { use crate::applications::transfer::PrefixedCoin; use crate::core::ics04_channel::error::Error; use crate::handler::HandlerOutputBuilder; + use crate::mock::context::MockIbcStore; use crate::prelude::*; use crate::test_utils::DummyTransferModule; @@ -322,7 +332,9 @@ pub(crate) mod test { let port_id = port_id.parse().unwrap(); let channel_id = channel_id.parse().unwrap(); let gen_address = { - let addr = cosmos_adr028_escrow_address(&port_id, channel_id); + let ibc_store = MockIbcStore::default(); + let ctx = DummyTransferModule::new(Arc::new(Mutex::new(ibc_store))); + let addr = cosmos_adr028_escrow_address(&ctx, &port_id, channel_id); bech32::encode("cosmos", addr) }; assert_eq!(gen_address, address.to_owned()) diff --git a/modules/src/clients/ics07_tendermint/client_def.rs b/modules/src/clients/ics07_tendermint/client_def.rs index 8311d361af..8f3250d57f 100644 --- a/modules/src/clients/ics07_tendermint/client_def.rs +++ b/modules/src/clients/ics07_tendermint/client_def.rs @@ -31,7 +31,7 @@ use crate::core::ics24_host::path::{ ConnectionsPath, ReceiptsPath, SeqRecvsPath, }; use crate::core::ics24_host::Path; -use crate::core::ics26_routing::context::LightClientContext; +use crate::core::ics26_routing::context::ReaderContext; use crate::downcast; use crate::prelude::*; use crate::Height; @@ -48,7 +48,7 @@ impl ClientDef for TendermintClient { fn verify_header( &self, - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, client_id: ClientId, client_state: Self::ClientState, header: Self::Header, @@ -137,7 +137,7 @@ impl ClientDef for TendermintClient { fn update_state( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn ReaderContext, _client_id: ClientId, client_state: Self::ClientState, header: Self::Header, @@ -161,7 +161,7 @@ impl ClientDef for TendermintClient { fn check_for_misbehaviour( &self, - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, client_id: ClientId, client_state: Self::ClientState, header: Self::Header, @@ -242,7 +242,7 @@ impl ClientDef for TendermintClient { fn verify_client_consensus_state( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn ReaderContext, client_state: &Self::ClientState, height: Height, prefix: &CommitmentPrefix, @@ -267,7 +267,7 @@ impl ClientDef for TendermintClient { fn verify_connection_state( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn ReaderContext, _client_id: &ClientId, client_state: &Self::ClientState, height: Height, @@ -288,7 +288,7 @@ impl ClientDef for TendermintClient { fn verify_channel_state( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn ReaderContext, _client_id: &ClientId, client_state: &Self::ClientState, height: Height, @@ -310,7 +310,7 @@ impl ClientDef for TendermintClient { fn verify_client_full_state( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn ReaderContext, client_state: &Self::ClientState, height: Height, prefix: &CommitmentPrefix, @@ -330,7 +330,7 @@ impl ClientDef for TendermintClient { fn verify_packet_data( &self, - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, _client_id: &ClientId, client_state: &Self::ClientState, height: Height, @@ -363,7 +363,7 @@ impl ClientDef for TendermintClient { fn verify_packet_acknowledgement( &self, - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, _client_id: &ClientId, client_state: &Self::ClientState, height: Height, @@ -396,7 +396,7 @@ impl ClientDef for TendermintClient { fn verify_next_sequence_recv( &self, - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, _client_id: &ClientId, client_state: &Self::ClientState, height: Height, @@ -428,7 +428,7 @@ impl ClientDef for TendermintClient { fn verify_packet_receipt_absence( &self, - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, _client_id: &ClientId, client_state: &Self::ClientState, height: Height, @@ -512,7 +512,7 @@ fn verify_non_membership( } fn verify_delay_passed( - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, height: Height, connection_end: &ConnectionEnd, ) -> Result<(), Ics02Error> { diff --git a/modules/src/clients/ics11_beefy/client_def.rs b/modules/src/clients/ics11_beefy/client_def.rs index 8f1d948a1e..d734b1cf3b 100644 --- a/modules/src/clients/ics11_beefy/client_def.rs +++ b/modules/src/clients/ics11_beefy/client_def.rs @@ -29,7 +29,7 @@ use crate::core::ics23_commitment::commitment::{ use crate::core::ics24_host::identifier::ConnectionId; use crate::core::ics24_host::identifier::{ChannelId, ClientId, PortId}; use crate::core::ics24_host::Path; -use crate::core::ics26_routing::context::LightClientContext; +use crate::core::ics26_routing::context::ReaderContext; use crate::prelude::*; use crate::Height; use core::marker::PhantomData; @@ -56,7 +56,7 @@ impl ClientDef for BeefyClient ClientDef for BeefyClient ClientDef for BeefyClient ClientDef for BeefyClient ClientDef for BeefyClient ClientDef for BeefyClient ClientDef for BeefyClient ClientDef for BeefyClient ClientDef for BeefyClient ClientDef for BeefyClient ClientDef for BeefyClient>( } fn verify_delay_passed( - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, height: Height, connection_end: &ConnectionEnd, ) -> Result<(), Error> { diff --git a/modules/src/clients/ics13_near/client_def.rs b/modules/src/clients/ics13_near/client_def.rs index b8523d6450..a59f8e93bf 100644 --- a/modules/src/clients/ics13_near/client_def.rs +++ b/modules/src/clients/ics13_near/client_def.rs @@ -10,7 +10,7 @@ use crate::core::ics23_commitment::commitment::{ CommitmentPrefix, CommitmentProofBytes, CommitmentRoot, }; use crate::core::ics24_host::identifier::{ChannelId, ClientId, ConnectionId, PortId}; -use crate::core::ics26_routing::context::LightClientContext; +use crate::core::ics26_routing::context::ReaderContext; use crate::Height; use core::marker::PhantomData; @@ -54,7 +54,7 @@ impl ClientDef for NearClient { // rehydrate client from its own storage, then call this function fn verify_header( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn ReaderContext, _client_id: ClientId, client_state: Self::ClientState, header: Self::Header, @@ -65,7 +65,7 @@ impl ClientDef for NearClient { fn update_state( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn ReaderContext, _client_id: ClientId, _client_state: Self::ClientState, _header: Self::Header, @@ -93,7 +93,7 @@ impl ClientDef for NearClient { fn check_for_misbehaviour( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn ReaderContext, _client_id: ClientId, _client_state: Self::ClientState, _header: Self::Header, @@ -113,7 +113,7 @@ impl ClientDef for NearClient { fn verify_client_consensus_state( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn ReaderContext, _client_state: &Self::ClientState, _height: Height, _prefix: &CommitmentPrefix, @@ -129,7 +129,7 @@ impl ClientDef for NearClient { // Consensus state will be verified in the verification functions before these are called fn verify_connection_state( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn ReaderContext, _client_id: &ClientId, _client_state: &Self::ClientState, _height: Height, @@ -144,7 +144,7 @@ impl ClientDef for NearClient { fn verify_channel_state( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn ReaderContext, _client_id: &ClientId, _client_state: &Self::ClientState, _height: Height, @@ -160,7 +160,7 @@ impl ClientDef for NearClient { fn verify_client_full_state( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn ReaderContext, _client_state: &Self::ClientState, _height: Height, _prefix: &CommitmentPrefix, @@ -174,7 +174,7 @@ impl ClientDef for NearClient { fn verify_packet_data( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn ReaderContext, _client_id: &ClientId, _client_state: &Self::ClientState, _height: Height, @@ -191,7 +191,7 @@ impl ClientDef for NearClient { fn verify_packet_acknowledgement( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn ReaderContext, _client_id: &ClientId, _client_state: &Self::ClientState, _height: Height, @@ -208,7 +208,7 @@ impl ClientDef for NearClient { fn verify_next_sequence_recv( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn ReaderContext, _client_id: &ClientId, _client_state: &Self::ClientState, _height: Height, @@ -224,7 +224,7 @@ impl ClientDef for NearClient { fn verify_packet_receipt_absence( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn ReaderContext, _client_id: &ClientId, _client_state: &Self::ClientState, _height: Height, diff --git a/modules/src/core/ics02_client/client_def.rs b/modules/src/core/ics02_client/client_def.rs index ffe6c067ec..08470e7a29 100644 --- a/modules/src/core/ics02_client/client_def.rs +++ b/modules/src/core/ics02_client/client_def.rs @@ -14,7 +14,7 @@ use crate::core::ics23_commitment::commitment::{ CommitmentPrefix, CommitmentProofBytes, CommitmentRoot, }; use crate::core::ics24_host::identifier::{ChannelId, ClientId, ConnectionId, PortId}; -use crate::core::ics26_routing::context::LightClientContext; +use crate::core::ics26_routing::context::ReaderContext; use crate::downcast; use crate::prelude::*; use crate::Height; @@ -36,7 +36,7 @@ pub trait ClientDef: Clone { fn verify_header( &self, - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, client_id: ClientId, client_state: Self::ClientState, header: Self::Header, @@ -44,7 +44,7 @@ pub trait ClientDef: Clone { fn update_state( &self, - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, client_id: ClientId, client_state: Self::ClientState, header: Self::Header, @@ -58,7 +58,7 @@ pub trait ClientDef: Clone { fn check_for_misbehaviour( &self, - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, client_id: ClientId, client_state: Self::ClientState, header: Self::Header, @@ -83,7 +83,7 @@ pub trait ClientDef: Clone { #[allow(clippy::too_many_arguments)] fn verify_client_consensus_state( &self, - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, client_state: &Self::ClientState, height: Height, prefix: &CommitmentPrefix, @@ -98,7 +98,7 @@ pub trait ClientDef: Clone { #[allow(clippy::too_many_arguments)] fn verify_connection_state( &self, - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, client_id: &ClientId, client_state: &Self::ClientState, height: Height, @@ -113,7 +113,7 @@ pub trait ClientDef: Clone { #[allow(clippy::too_many_arguments)] fn verify_channel_state( &self, - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, client_id: &ClientId, client_state: &Self::ClientState, height: Height, @@ -129,7 +129,7 @@ pub trait ClientDef: Clone { #[allow(clippy::too_many_arguments)] fn verify_client_full_state( &self, - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, client_state: &Self::ClientState, height: Height, prefix: &CommitmentPrefix, @@ -143,7 +143,7 @@ pub trait ClientDef: Clone { #[allow(clippy::too_many_arguments)] fn verify_packet_data( &self, - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, client_id: &ClientId, client_state: &Self::ClientState, height: Height, @@ -160,7 +160,7 @@ pub trait ClientDef: Clone { #[allow(clippy::too_many_arguments)] fn verify_packet_acknowledgement( &self, - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, client_id: &ClientId, client_state: &Self::ClientState, height: Height, @@ -177,7 +177,7 @@ pub trait ClientDef: Clone { #[allow(clippy::too_many_arguments)] fn verify_next_sequence_recv( &self, - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, client_id: &ClientId, client_state: &Self::ClientState, height: Height, @@ -193,7 +193,7 @@ pub trait ClientDef: Clone { #[allow(clippy::too_many_arguments)] fn verify_packet_receipt_absence( &self, - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, client_id: &ClientId, client_state: &Self::ClientState, height: Height, @@ -236,7 +236,7 @@ impl ClientDef for AnyClient ClientDef for AnyClient ClientDef for AnyClient ClientDef for AnyClient ClientDef for AnyClient ClientDef for AnyClient ClientDef for AnyClient ClientDef for AnyClient ClientDef for AnyClient ClientDef for AnyClient ClientDef for AnyClient( msg: ClientMsg, ) -> Result, Error> where - Ctx: LightClientContext, + Ctx: ReaderContext, HostFunctions: HostFunctionsProvider, { match msg { diff --git a/modules/src/core/ics02_client/handler/create_client.rs b/modules/src/core/ics02_client/handler/create_client.rs index 169864be05..41cb2bf136 100644 --- a/modules/src/core/ics02_client/handler/create_client.rs +++ b/modules/src/core/ics02_client/handler/create_client.rs @@ -1,6 +1,6 @@ //! Protocol logic specific to processing ICS2 messages of type `MsgCreateAnyClient`. -use crate::core::ics26_routing::context::LightClientContext; +use crate::core::ics26_routing::context::ReaderContext; use crate::prelude::*; use core::fmt::Debug; @@ -30,7 +30,7 @@ pub struct Result { } pub fn process( - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, msg: MsgCreateAnyClient, ) -> HandlerResult { let mut output = HandlerOutput::builder(); diff --git a/modules/src/core/ics02_client/handler/update_client.rs b/modules/src/core/ics02_client/handler/update_client.rs index b3f3a57c48..472ebc9054 100644 --- a/modules/src/core/ics02_client/handler/update_client.rs +++ b/modules/src/core/ics02_client/handler/update_client.rs @@ -12,7 +12,7 @@ use crate::core::ics02_client::handler::ClientResult; use crate::core::ics02_client::height::Height; use crate::core::ics02_client::msgs::update_client::MsgUpdateAnyClient; use crate::core::ics24_host::identifier::ClientId; -use crate::core::ics26_routing::context::LightClientContext; +use crate::core::ics26_routing::context::ReaderContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; @@ -30,7 +30,7 @@ pub struct Result { } pub fn process( - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, msg: MsgUpdateAnyClient, ) -> HandlerResult { let mut output = HandlerOutput::builder(); diff --git a/modules/src/core/ics02_client/handler/upgrade_client.rs b/modules/src/core/ics02_client/handler/upgrade_client.rs index 3cc7722c43..0ef3738593 100644 --- a/modules/src/core/ics02_client/handler/upgrade_client.rs +++ b/modules/src/core/ics02_client/handler/upgrade_client.rs @@ -8,7 +8,7 @@ use crate::core::ics02_client::events::Attributes; use crate::core::ics02_client::handler::ClientResult; use crate::core::ics02_client::msgs::upgrade_client::MsgUpgradeAnyClient; use crate::core::ics24_host::identifier::ClientId; -use crate::core::ics26_routing::context::LightClientContext; +use crate::core::ics26_routing::context::ReaderContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; @@ -24,7 +24,7 @@ pub struct Result { } pub fn process( - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, msg: MsgUpgradeAnyClient, ) -> HandlerResult { let mut output = HandlerOutput::builder(); diff --git a/modules/src/core/ics02_client/msgs/create_client.rs b/modules/src/core/ics02_client/msgs/create_client.rs index effd6bec72..c3018f4e47 100644 --- a/modules/src/core/ics02_client/msgs/create_client.rs +++ b/modules/src/core/ics02_client/msgs/create_client.rs @@ -78,7 +78,7 @@ impl TryFrom for MsgCreateAnyClient { MsgCreateAnyClient::new( AnyClientState::try_from(raw_client_state)?, consensus_state, - raw.signer.into(), + raw.signer.parse().map_err(Error::signer)?, ) } } diff --git a/modules/src/core/ics02_client/msgs/upgrade_client.rs b/modules/src/core/ics02_client/msgs/upgrade_client.rs index db7907dfed..fe5b94347c 100644 --- a/modules/src/core/ics02_client/msgs/upgrade_client.rs +++ b/modules/src/core/ics02_client/msgs/upgrade_client.rs @@ -93,7 +93,7 @@ impl TryFrom for MsgUpgradeAnyClient { consensus_state: AnyConsensusState::try_from(raw_consensus_state)?, proof_upgrade_client: proto_msg.proof_upgrade_client, proof_upgrade_consensus_state: proto_msg.proof_upgrade_consensus_state, - signer: proto_msg.signer.into(), + signer: Signer::from_str(proto_msg.signer.as_str()).map_err(Error::signer)?, }) } } diff --git a/modules/src/core/ics03_connection/handler.rs b/modules/src/core/ics03_connection/handler.rs index e3d596b3c7..154c1a3e6f 100644 --- a/modules/src/core/ics03_connection/handler.rs +++ b/modules/src/core/ics03_connection/handler.rs @@ -4,7 +4,7 @@ use crate::core::ics03_connection::connection::ConnectionEnd; use crate::core::ics03_connection::error::Error; use crate::core::ics03_connection::msgs::ConnectionMsg; use crate::core::ics24_host::identifier::ConnectionId; -use crate::core::ics26_routing::context::LightClientContext; +use crate::core::ics26_routing::context::ReaderContext; use crate::handler::HandlerOutput; use core::fmt::Debug; @@ -47,7 +47,7 @@ pub fn dispatch( msg: ConnectionMsg, ) -> Result, Error> where - Ctx: LightClientContext, + Ctx: ReaderContext, HostFunctions: HostFunctionsProvider, { match msg { diff --git a/modules/src/core/ics03_connection/handler/conn_open_ack.rs b/modules/src/core/ics03_connection/handler/conn_open_ack.rs index 1fd4fca6bd..7f1dd4a9da 100644 --- a/modules/src/core/ics03_connection/handler/conn_open_ack.rs +++ b/modules/src/core/ics03_connection/handler/conn_open_ack.rs @@ -9,13 +9,13 @@ use crate::core::ics03_connection::handler::verify::{ }; use crate::core::ics03_connection::handler::{ConnectionIdState, ConnectionResult}; use crate::core::ics03_connection::msgs::conn_open_ack::MsgConnectionOpenAck; -use crate::core::ics26_routing::context::LightClientContext; +use crate::core::ics26_routing::context::ReaderContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; pub(crate) fn process( - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, msg: MsgConnectionOpenAck, ) -> HandlerResult { let mut output = HandlerOutput::builder(); diff --git a/modules/src/core/ics03_connection/handler/conn_open_confirm.rs b/modules/src/core/ics03_connection/handler/conn_open_confirm.rs index aa196d60a5..d331820a37 100644 --- a/modules/src/core/ics03_connection/handler/conn_open_confirm.rs +++ b/modules/src/core/ics03_connection/handler/conn_open_confirm.rs @@ -7,13 +7,13 @@ use crate::core::ics03_connection::events::Attributes; use crate::core::ics03_connection::handler::verify::verify_proofs; use crate::core::ics03_connection::handler::{ConnectionIdState, ConnectionResult}; use crate::core::ics03_connection::msgs::conn_open_confirm::MsgConnectionOpenConfirm; -use crate::core::ics26_routing::context::LightClientContext; +use crate::core::ics26_routing::context::ReaderContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; pub(crate) fn process( - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, msg: MsgConnectionOpenConfirm, ) -> HandlerResult { let mut output = HandlerOutput::builder(); diff --git a/modules/src/core/ics03_connection/handler/conn_open_init.rs b/modules/src/core/ics03_connection/handler/conn_open_init.rs index 89b8efbb68..f2c2f386d7 100644 --- a/modules/src/core/ics03_connection/handler/conn_open_init.rs +++ b/modules/src/core/ics03_connection/handler/conn_open_init.rs @@ -6,13 +6,13 @@ use crate::core::ics03_connection::events::Attributes; use crate::core::ics03_connection::handler::{ConnectionIdState, ConnectionResult}; use crate::core::ics03_connection::msgs::conn_open_init::MsgConnectionOpenInit; use crate::core::ics24_host::identifier::ConnectionId; -use crate::core::ics26_routing::context::LightClientContext; +use crate::core::ics26_routing::context::ReaderContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; pub(crate) fn process( - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, msg: MsgConnectionOpenInit, ) -> HandlerResult { let mut output = HandlerOutput::builder(); diff --git a/modules/src/core/ics03_connection/handler/conn_open_try.rs b/modules/src/core/ics03_connection/handler/conn_open_try.rs index dfc0c9301a..e5f556c780 100644 --- a/modules/src/core/ics03_connection/handler/conn_open_try.rs +++ b/modules/src/core/ics03_connection/handler/conn_open_try.rs @@ -10,13 +10,13 @@ use crate::core::ics03_connection::handler::verify::{ use crate::core::ics03_connection::handler::{ConnectionIdState, ConnectionResult}; use crate::core::ics03_connection::msgs::conn_open_try::MsgConnectionOpenTry; use crate::core::ics24_host::identifier::ConnectionId; -use crate::core::ics26_routing::context::LightClientContext; +use crate::core::ics26_routing::context::ReaderContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; pub(crate) fn process( - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, msg: MsgConnectionOpenTry, ) -> HandlerResult { let mut output = HandlerOutput::builder(); diff --git a/modules/src/core/ics03_connection/handler/verify.rs b/modules/src/core/ics03_connection/handler/verify.rs index 2acc9544e3..66f9404f70 100644 --- a/modules/src/core/ics03_connection/handler/verify.rs +++ b/modules/src/core/ics03_connection/handler/verify.rs @@ -6,13 +6,13 @@ use crate::core::ics02_client::{client_def::AnyClient, client_def::ClientDef}; use crate::core::ics03_connection::connection::ConnectionEnd; use crate::core::ics03_connection::error::Error; use crate::core::ics23_commitment::commitment::CommitmentProofBytes; -use crate::core::ics26_routing::context::LightClientContext; +use crate::core::ics26_routing::context::ReaderContext; use crate::proofs::{ConsensusProof, Proofs}; use crate::Height; /// Entry point for verifying all proofs bundled in any ICS3 message. pub fn verify_proofs( - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, client_state: Option, height: Height, connection_end: &ConnectionEnd, @@ -60,7 +60,7 @@ pub fn verify_proofs( /// claims to prove that an object of type connection exists on the source chain (i.e., the chain /// which created this proof). This object must match the state of `expected_conn`. pub fn verify_connection_proof( - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, height: Height, connection_end: &ConnectionEnd, expected_conn: &ConnectionEnd, @@ -115,7 +115,7 @@ pub fn verify_connection_proof( /// at the same revision as the current chain, with matching chain identifiers, etc) and that the /// `proof` is correct. pub fn verify_client_proof( - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, height: Height, connection_end: &ConnectionEnd, expected_client_state: AnyClientState, @@ -154,7 +154,7 @@ pub fn verify_client_proof( } pub fn verify_consensus_proof( - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, height: Height, connection_end: &ConnectionEnd, proof: &ConsensusProof, @@ -197,7 +197,7 @@ pub fn verify_consensus_proof( /// Checks that `claimed_height` is within normal bounds, i.e., fresh enough so that the chain has /// not pruned it yet, but not newer than the current (actual) height of the local chain. pub fn check_client_consensus_height( - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, claimed_height: Height, ) -> Result<(), Error> { if claimed_height > ctx.host_height() { diff --git a/modules/src/core/ics04_channel/context.rs b/modules/src/core/ics04_channel/context.rs index 13f2df1ffc..0cd1aff12f 100644 --- a/modules/src/core/ics04_channel/context.rs +++ b/modules/src/core/ics04_channel/context.rs @@ -15,7 +15,7 @@ use crate::prelude::*; use crate::timestamp::Timestamp; use crate::Height; -use super::packet::{PacketResult, Sequence}; +use super::packet::{Packet, PacketResult, Sequence}; /// A context supplying all the necessary read-only dependencies for processing any `ChannelMsg`. pub trait ChannelReader { @@ -71,7 +71,7 @@ pub trait ChannelReader { self.hash(ack.into_bytes()).into() } - /// A hashing function for packet commitments + /// A Sha2_256 hashing function fn hash(&self, value: Vec) -> Vec; /// Returns the time when the client state for the given [`ClientId`] was updated with a header for the given [`Height`] @@ -137,6 +137,8 @@ pub trait ChannelKeeper { (res.port_id.clone(), res.channel_id, res.seq), res.commitment, )?; + + self.store_packet((res.port_id.clone(), res.channel_id, res.seq), res.packet)?; } PacketResult::Recv(res) => { let res = match res { @@ -199,6 +201,13 @@ pub trait ChannelKeeper { commitment: PacketCommitment, ) -> Result<(), Error>; + /// Allow implementers to optionally store packet in storage + fn store_packet( + &mut self, + key: (PortId, ChannelId, Sequence), + packet: Packet, + ) -> Result<(), Error>; + fn delete_packet_commitment(&mut self, key: (PortId, ChannelId, Sequence)) -> Result<(), Error>; diff --git a/modules/src/core/ics04_channel/error.rs b/modules/src/core/ics04_channel/error.rs index 61f77a28e3..b4473966e3 100644 --- a/modules/src/core/ics04_channel/error.rs +++ b/modules/src/core/ics04_channel/error.rs @@ -344,6 +344,14 @@ define_error! { ImplementationSpecific { reason: String } | e | { format_args!("implementation specific error: {}", e.reason) }, + + AppModule + { description: String } + | e | { + format_args!( + "application module error: {0}", + e.description) + }, } } diff --git a/modules/src/core/ics04_channel/handler.rs b/modules/src/core/ics04_channel/handler.rs index cbdba24490..bb0a789a23 100644 --- a/modules/src/core/ics04_channel/handler.rs +++ b/modules/src/core/ics04_channel/handler.rs @@ -7,7 +7,7 @@ use crate::core::ics04_channel::msgs::ChannelMsg; use crate::core::ics04_channel::{msgs::PacketMsg, packet::PacketResult}; use crate::core::ics24_host::identifier::{ChannelId, PortId}; use crate::core::ics26_routing::context::{ - Ics26Context, LightClientContext, ModuleId, ModuleOutput, OnRecvPacketAck, Router, + Ics26Context, ModuleId, ModuleOutputBuilder, OnRecvPacketAck, ReaderContext, Router, }; use crate::handler::{HandlerOutput, HandlerOutputBuilder}; use core::fmt::Debug; @@ -64,7 +64,7 @@ pub fn channel_dispatch( msg: &ChannelMsg, ) -> Result<(HandlerOutputBuilder<()>, ChannelResult), Error> where - Ctx: LightClientContext, + Ctx: ReaderContext, HostFunctions: HostFunctionsProvider, { let output = match msg { @@ -177,7 +177,7 @@ pub fn packet_dispatch( msg: &PacketMsg, ) -> Result<(HandlerOutputBuilder<()>, PacketResult), Error> where - Ctx: LightClientContext, + Ctx: ReaderContext, HostFunctions: HostFunctionsProvider, { let output = match msg { diff --git a/modules/src/core/ics04_channel/handler/acknowledgement.rs b/modules/src/core/ics04_channel/handler/acknowledgement.rs index d316e2a703..ccdc305a55 100644 --- a/modules/src/core/ics04_channel/handler/acknowledgement.rs +++ b/modules/src/core/ics04_channel/handler/acknowledgement.rs @@ -8,7 +8,7 @@ use crate::core::ics04_channel::handler::verify::verify_packet_acknowledgement_p use crate::core::ics04_channel::msgs::acknowledgement::MsgAcknowledgement; use crate::core::ics04_channel::packet::{PacketResult, Sequence}; use crate::core::ics24_host::identifier::{ChannelId, PortId}; -use crate::core::ics26_routing::context::LightClientContext; +use crate::core::ics26_routing::context::ReaderContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; @@ -23,7 +23,7 @@ pub struct AckPacketResult { } pub fn process( - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, msg: &MsgAcknowledgement, ) -> HandlerResult { let mut output = HandlerOutput::builder(); diff --git a/modules/src/core/ics04_channel/handler/chan_close_confirm.rs b/modules/src/core/ics04_channel/handler/chan_close_confirm.rs index 308d1e90bb..54bdcbef43 100644 --- a/modules/src/core/ics04_channel/handler/chan_close_confirm.rs +++ b/modules/src/core/ics04_channel/handler/chan_close_confirm.rs @@ -8,13 +8,13 @@ use crate::core::ics04_channel::events::Attributes; use crate::core::ics04_channel::handler::verify::verify_channel_proofs; use crate::core::ics04_channel::handler::{ChannelIdState, ChannelResult}; use crate::core::ics04_channel::msgs::chan_close_confirm::MsgChannelCloseConfirm; -use crate::core::ics26_routing::context::LightClientContext; +use crate::core::ics26_routing::context::ReaderContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; pub(crate) fn process( - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, msg: &MsgChannelCloseConfirm, ) -> HandlerResult { let mut output = HandlerOutput::builder(); diff --git a/modules/src/core/ics04_channel/handler/chan_close_init.rs b/modules/src/core/ics04_channel/handler/chan_close_init.rs index 1cfd416536..8abfd0e339 100644 --- a/modules/src/core/ics04_channel/handler/chan_close_init.rs +++ b/modules/src/core/ics04_channel/handler/chan_close_init.rs @@ -6,12 +6,12 @@ use crate::core::ics04_channel::error::Error; use crate::core::ics04_channel::events::Attributes; use crate::core::ics04_channel::handler::{ChannelIdState, ChannelResult}; use crate::core::ics04_channel::msgs::chan_close_init::MsgChannelCloseInit; -use crate::core::ics26_routing::context::LightClientContext; +use crate::core::ics26_routing::context::ReaderContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; pub(crate) fn process( - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, msg: &MsgChannelCloseInit, ) -> HandlerResult { let mut output = HandlerOutput::builder(); diff --git a/modules/src/core/ics04_channel/handler/chan_open_ack.rs b/modules/src/core/ics04_channel/handler/chan_open_ack.rs index d98316303a..7138a55a66 100644 --- a/modules/src/core/ics04_channel/handler/chan_open_ack.rs +++ b/modules/src/core/ics04_channel/handler/chan_open_ack.rs @@ -7,13 +7,13 @@ use crate::core::ics04_channel::events::Attributes; use crate::core::ics04_channel::handler::verify::verify_channel_proofs; use crate::core::ics04_channel::handler::{ChannelIdState, ChannelResult}; use crate::core::ics04_channel::msgs::chan_open_ack::MsgChannelOpenAck; -use crate::core::ics26_routing::context::LightClientContext; +use crate::core::ics26_routing::context::ReaderContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; pub(crate) fn process( - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, msg: &MsgChannelOpenAck, ) -> HandlerResult { let mut output = HandlerOutput::builder(); diff --git a/modules/src/core/ics04_channel/handler/chan_open_confirm.rs b/modules/src/core/ics04_channel/handler/chan_open_confirm.rs index 7704959d67..f605500d50 100644 --- a/modules/src/core/ics04_channel/handler/chan_open_confirm.rs +++ b/modules/src/core/ics04_channel/handler/chan_open_confirm.rs @@ -7,13 +7,13 @@ use crate::core::ics04_channel::events::Attributes; use crate::core::ics04_channel::handler::verify::verify_channel_proofs; use crate::core::ics04_channel::handler::{ChannelIdState, ChannelResult}; use crate::core::ics04_channel::msgs::chan_open_confirm::MsgChannelOpenConfirm; -use crate::core::ics26_routing::context::LightClientContext; +use crate::core::ics26_routing::context::ReaderContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; pub(crate) fn process( - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, msg: &MsgChannelOpenConfirm, ) -> HandlerResult { let mut output = HandlerOutput::builder(); diff --git a/modules/src/core/ics04_channel/handler/chan_open_init.rs b/modules/src/core/ics04_channel/handler/chan_open_init.rs index 4a1e9ecc7a..4c7af9e743 100644 --- a/modules/src/core/ics04_channel/handler/chan_open_init.rs +++ b/modules/src/core/ics04_channel/handler/chan_open_init.rs @@ -6,13 +6,13 @@ use crate::core::ics04_channel::events::Attributes; use crate::core::ics04_channel::handler::{ChannelIdState, ChannelResult}; use crate::core::ics04_channel::msgs::chan_open_init::MsgChannelOpenInit; use crate::core::ics24_host::identifier::ChannelId; -use crate::core::ics26_routing::context::LightClientContext; +use crate::core::ics26_routing::context::ReaderContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; pub(crate) fn process( - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, msg: &MsgChannelOpenInit, ) -> HandlerResult { let mut output = HandlerOutput::builder(); diff --git a/modules/src/core/ics04_channel/handler/chan_open_try.rs b/modules/src/core/ics04_channel/handler/chan_open_try.rs index 810cc4c3bc..2564486364 100644 --- a/modules/src/core/ics04_channel/handler/chan_open_try.rs +++ b/modules/src/core/ics04_channel/handler/chan_open_try.rs @@ -9,13 +9,13 @@ use crate::core::ics04_channel::handler::verify::verify_channel_proofs; use crate::core::ics04_channel::handler::{ChannelIdState, ChannelResult}; use crate::core::ics04_channel::msgs::chan_open_try::MsgChannelOpenTry; use crate::core::ics24_host::identifier::ChannelId; -use crate::core::ics26_routing::context::LightClientContext; +use crate::core::ics26_routing::context::ReaderContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; pub(crate) fn process( - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, msg: &MsgChannelOpenTry, ) -> HandlerResult { let mut output = HandlerOutput::builder(); diff --git a/modules/src/core/ics04_channel/handler/recv_packet.rs b/modules/src/core/ics04_channel/handler/recv_packet.rs index 8d04445504..8ac7ae0c71 100644 --- a/modules/src/core/ics04_channel/handler/recv_packet.rs +++ b/modules/src/core/ics04_channel/handler/recv_packet.rs @@ -7,7 +7,7 @@ use crate::core::ics04_channel::handler::verify::verify_packet_recv_proofs; use crate::core::ics04_channel::msgs::recv_packet::MsgRecvPacket; use crate::core::ics04_channel::packet::{PacketResult, Receipt, Sequence}; use crate::core::ics24_host::identifier::{ChannelId, PortId}; -use crate::core::ics26_routing::context::LightClientContext; +use crate::core::ics26_routing::context::ReaderContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; @@ -31,7 +31,7 @@ pub enum RecvPacketResult { } pub fn process( - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, msg: &MsgRecvPacket, ) -> HandlerResult { let mut output = HandlerOutput::builder(); diff --git a/modules/src/core/ics04_channel/handler/send_packet.rs b/modules/src/core/ics04_channel/handler/send_packet.rs index bf0b34436d..ca484371f8 100644 --- a/modules/src/core/ics04_channel/handler/send_packet.rs +++ b/modules/src/core/ics04_channel/handler/send_packet.rs @@ -6,7 +6,7 @@ use crate::core::ics04_channel::events::SendPacket; use crate::core::ics04_channel::packet::{PacketResult, Sequence}; use crate::core::ics04_channel::{error::Error, packet::Packet}; use crate::core::ics24_host::identifier::{ChannelId, PortId}; -use crate::core::ics26_routing::context::LightClientContext; +use crate::core::ics26_routing::context::ReaderContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; @@ -19,12 +19,10 @@ pub struct SendPacketResult { pub seq: Sequence, pub seq_number: Sequence, pub commitment: PacketCommitment, + pub packet: Packet, } -pub fn send_packet( - ctx: &dyn LightClientContext, - packet: Packet, -) -> HandlerResult { +pub fn send_packet(ctx: &dyn ReaderContext, packet: Packet) -> HandlerResult { let mut output = HandlerOutput::builder(); let source_channel_end = @@ -96,6 +94,7 @@ pub fn send_packet( channel_id: packet.source_channel, seq: packet.sequence, seq_number: next_seq_send.increment(), + packet: packet.clone(), commitment: ctx.packet_commitment( packet.data.clone(), packet.timeout_height, diff --git a/modules/src/core/ics04_channel/handler/timeout.rs b/modules/src/core/ics04_channel/handler/timeout.rs index cf7c08b015..5f2e7b4480 100644 --- a/modules/src/core/ics04_channel/handler/timeout.rs +++ b/modules/src/core/ics04_channel/handler/timeout.rs @@ -9,7 +9,7 @@ use crate::core::ics04_channel::handler::verify::{ use crate::core::ics04_channel::msgs::timeout::MsgTimeout; use crate::core::ics04_channel::packet::{PacketResult, Sequence}; use crate::core::ics24_host::identifier::{ChannelId, PortId}; -use crate::core::ics26_routing::context::LightClientContext; +use crate::core::ics26_routing::context::ReaderContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; @@ -25,7 +25,7 @@ pub struct TimeoutPacketResult { } pub fn process( - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, msg: &MsgTimeout, ) -> HandlerResult { let mut output = HandlerOutput::builder(); diff --git a/modules/src/core/ics04_channel/handler/timeout_on_close.rs b/modules/src/core/ics04_channel/handler/timeout_on_close.rs index 2be21ade6c..3b194a395f 100644 --- a/modules/src/core/ics04_channel/handler/timeout_on_close.rs +++ b/modules/src/core/ics04_channel/handler/timeout_on_close.rs @@ -9,13 +9,13 @@ use crate::core::ics04_channel::handler::verify::{ use crate::core::ics04_channel::msgs::timeout_on_close::MsgTimeoutOnClose; use crate::core::ics04_channel::packet::PacketResult; use crate::core::ics04_channel::{error::Error, handler::timeout::TimeoutPacketResult}; -use crate::core::ics26_routing::context::LightClientContext; +use crate::core::ics26_routing::context::ReaderContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; pub fn process( - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, msg: &MsgTimeoutOnClose, ) -> HandlerResult { let mut output = HandlerOutput::builder(); diff --git a/modules/src/core/ics04_channel/handler/verify.rs b/modules/src/core/ics04_channel/handler/verify.rs index 086cf591c8..7e2f2e9ea9 100644 --- a/modules/src/core/ics04_channel/handler/verify.rs +++ b/modules/src/core/ics04_channel/handler/verify.rs @@ -7,14 +7,14 @@ use crate::core::ics04_channel::channel::ChannelEnd; use crate::core::ics04_channel::error::Error; use crate::core::ics04_channel::msgs::acknowledgement::Acknowledgement; use crate::core::ics04_channel::packet::{Packet, Sequence}; -use crate::core::ics26_routing::context::LightClientContext; +use crate::core::ics26_routing::context::ReaderContext; use crate::prelude::*; use crate::proofs::Proofs; use crate::Height; /// Entry point for verifying all proofs bundled in any ICS4 message for channel protocols. pub fn verify_channel_proofs( - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, height: Height, channel_end: &ChannelEnd, connection_end: &ConnectionEnd, @@ -57,7 +57,7 @@ pub fn verify_channel_proofs( /// Entry point for verifying all proofs bundled in a ICS4 packet recv. message. pub fn verify_packet_recv_proofs( - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, height: Height, packet: &Packet, connection_end: &ConnectionEnd, @@ -105,7 +105,7 @@ pub fn verify_packet_recv_proofs( /// Entry point for verifying all proofs bundled in an ICS4 packet ack message. pub fn verify_packet_acknowledgement_proofs( - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, height: Height, packet: &Packet, acknowledgement: Acknowledgement, @@ -150,7 +150,7 @@ pub fn verify_packet_acknowledgement_proofs( - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, height: Height, connection_end: &ConnectionEnd, packet: Packet, @@ -191,7 +191,7 @@ pub fn verify_next_sequence_recv( } pub fn verify_packet_receipt_absence( - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, height: Height, connection_end: &ConnectionEnd, packet: Packet, diff --git a/modules/src/core/ics04_channel/handler/write_acknowledgement.rs b/modules/src/core/ics04_channel/handler/write_acknowledgement.rs index e86f1f5821..299bd68f9d 100644 --- a/modules/src/core/ics04_channel/handler/write_acknowledgement.rs +++ b/modules/src/core/ics04_channel/handler/write_acknowledgement.rs @@ -4,7 +4,7 @@ use crate::core::ics04_channel::error::Error; use crate::core::ics04_channel::events::WriteAcknowledgement; use crate::core::ics04_channel::packet::{Packet, PacketResult, Sequence}; use crate::core::ics24_host::identifier::{ChannelId, PortId}; -use crate::core::ics26_routing::context::LightClientContext; +use crate::core::ics26_routing::context::ReaderContext; use crate::prelude::*; use crate::{ events::IbcEvent, @@ -20,7 +20,7 @@ pub struct WriteAckPacketResult { } pub fn process( - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, packet: Packet, ack: Vec, ) -> HandlerResult { diff --git a/modules/src/core/ics26_routing/context.rs b/modules/src/core/ics26_routing/context.rs index aac61fb214..0582bd9673 100644 --- a/modules/src/core/ics26_routing/context.rs +++ b/modules/src/core/ics26_routing/context.rs @@ -22,7 +22,7 @@ use crate::handler::HandlerOutputBuilder; use crate::signer::Signer; /// This trait captures all the functional dependencies of needed in light client implementations -pub trait LightClientContext: ClientReader + ConnectionReader + ChannelReader {} +pub trait ReaderContext: ClientReader + ConnectionReader + ChannelReader {} /// This trait captures all the functional dependencies (i.e., context) which the ICS26 module /// requires to be able to dispatch and process IBC messages. In other words, this is the @@ -35,8 +35,7 @@ pub trait Ics26Context: + ChannelKeeper + ChannelReader + PortReader - + Ics20Context - + LightClientContext + + ReaderContext { type Router: Router; diff --git a/modules/src/core/ics26_routing/handler.rs b/modules/src/core/ics26_routing/handler.rs index b0cd942218..c2f0765b88 100644 --- a/modules/src/core/ics26_routing/handler.rs +++ b/modules/src/core/ics26_routing/handler.rs @@ -3,7 +3,6 @@ use crate::prelude::*; use ibc_proto::google::protobuf::Any; -use crate::applications::ics20_fungible_token_transfer::relay_application_logic::send_transfer::send_transfer as ics20_msg_dispatcher; use crate::core::ics02_client::handler::dispatch as ics2_msg_dispatcher; use crate::core::ics03_connection::handler::dispatch as ics3_msg_dispatcher; use crate::core::ics04_channel::handler::{ @@ -32,10 +31,7 @@ pub struct MsgReceipt { /// Mimics the DeliverTx ABCI interface, but for a single message and at a slightly lower level. /// No need for authentication info or signature checks here. /// Returns a vector of all events that got generated as a byproduct of processing `message`. -pub fn deliver( - ctx: &mut Ctx, - message: Any, -) -> Result<(Vec, Vec), Error> +pub fn deliver(ctx: &mut Ctx, message: Any) -> Result where Ctx: Ics26Context, HostFunctions: HostFunctionsProvider, @@ -44,7 +40,7 @@ where let envelope = decode(message)?; // Process the envelope, and accumulate any events that were generated. - let output = dispatch::<_, HostFunctions>(ctx, envelope)?; + let HandlerOutput { log, events, .. } = dispatch::<_, HostFunctions>(ctx, envelope)?; Ok(MsgReceipt { events, log }) } @@ -114,20 +110,6 @@ where handler_builder.with_result(()) } - Ics20Msg(msg) => { - let handler_output = ics20_msg_dispatcher::<_, HostFunctions>(ctx, msg) - .map_err(Error::ics20_fungible_token_transfer)?; - - // Apply any results to the host chain store. - ctx.store_packet_result(handler_output.result) - .map_err(Error::ics04_channel)?; - - HandlerOutput::builder() - .with_log(handler_output.log) - .with_events(handler_output.events) - .with_result(()) - } - Ics4PacketMsg(msg) => { let module_id = get_module_for_packet_msg(ctx, &msg).map_err(Error::ics04_channel)?; let (mut handler_builder, packet_result) = @@ -160,12 +142,10 @@ mod tests { use test_log::test; - use crate::applications::ics20_fungible_token_transfer::msgs::transfer::test_util::get_dummy_msg_transfer; + use crate::applications::transfer::context::test::deliver as ics20_deliver; + use crate::applications::transfer::PrefixedCoin; use crate::core::ics02_client::client_consensus::AnyConsensusState; use crate::core::ics02_client::client_state::AnyClientState; - use crate::events::IbcEvent; - use crate::test_utils::Crypto; - use crate::core::ics02_client::msgs::{ create_client::MsgCreateAnyClient, update_client::MsgUpdateAnyClient, upgrade_client::MsgUpgradeAnyClient, ClientMsg, @@ -188,8 +168,8 @@ mod tests { timeout_on_close::{test_util::get_dummy_raw_msg_timeout_on_close, MsgTimeoutOnClose}, ChannelMsg, PacketMsg, }; - use crate::core::ics23_commitment::commitment::test_util::get_dummy_merkle_proof; use crate::events::IbcEvent; + use crate::test_utils::Crypto; use crate::{ applications::transfer::msgs::transfer::test_util::get_dummy_msg_transfer, applications::transfer::msgs::transfer::MsgTransfer, @@ -236,7 +216,7 @@ mod tests { // Test parameters struct Test { name: String, - msg: Ics26Envelope, + msg: TestMsg, want_pass: bool, } let default_signer = get_dummy_account_id(); @@ -561,7 +541,23 @@ mod tests { .collect(); for test in tests { - let res = dispatch::<_, Crypto>(&mut ctx, test.msg.clone()); + let res = match test.msg.clone() { + TestMsg::Ics26(msg) => dispatch::<_, Crypto>(&mut ctx, msg).map(|_| ()), + TestMsg::Ics20(msg) => { + let transfer_module = + ctx.router_mut().get_route_mut(&transfer_module_id).unwrap(); + ics20_deliver( + transfer_module + .as_any_mut() + .downcast_mut::() + .unwrap(), + &mut HandlerOutputBuilder::new(), + msg, + ) + .map(|_| ()) + .map_err(Error::ics04_channel) + } + }; assert_eq!( test.want_pass, diff --git a/modules/src/mock/client_def.rs b/modules/src/mock/client_def.rs index 155666847e..728d5be17a 100644 --- a/modules/src/mock/client_def.rs +++ b/modules/src/mock/client_def.rs @@ -13,7 +13,7 @@ use crate::core::ics23_commitment::merkle::apply_prefix; use crate::core::ics24_host::identifier::{ChannelId, ClientId, ConnectionId, PortId}; use crate::core::ics24_host::path::ClientConsensusStatePath; use crate::core::ics24_host::Path; -use crate::core::ics26_routing::context::LightClientContext; +use crate::core::ics26_routing::context::ReaderContext; use crate::mock::client_state::{MockClientState, MockConsensusState}; use crate::mock::header::MockHeader; use crate::prelude::*; @@ -30,7 +30,7 @@ impl ClientDef for MockClient { fn update_state( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn ReaderContext, _client_id: ClientId, client_state: Self::ClientState, header: Self::Header, @@ -50,7 +50,7 @@ impl ClientDef for MockClient { fn verify_client_consensus_state( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn ReaderContext, _client_state: &Self::ClientState, _height: Height, prefix: &CommitmentPrefix, @@ -74,7 +74,7 @@ impl ClientDef for MockClient { fn verify_connection_state( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn ReaderContext, _client_id: &ClientId, _client_state: &Self::ClientState, _height: Height, @@ -89,7 +89,7 @@ impl ClientDef for MockClient { fn verify_channel_state( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn ReaderContext, _client_id: &ClientId, _client_state: &Self::ClientState, _height: Height, @@ -105,7 +105,7 @@ impl ClientDef for MockClient { fn verify_client_full_state( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn ReaderContext, _client_state: &Self::ClientState, _height: Height, _prefix: &CommitmentPrefix, @@ -119,7 +119,7 @@ impl ClientDef for MockClient { fn verify_packet_data( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn ReaderContext, _client_id: &ClientId, _client_state: &Self::ClientState, _height: Height, @@ -136,7 +136,7 @@ impl ClientDef for MockClient { fn verify_packet_acknowledgement( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn ReaderContext, _client_id: &ClientId, _client_state: &Self::ClientState, _height: Height, @@ -153,7 +153,7 @@ impl ClientDef for MockClient { fn verify_next_sequence_recv( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn ReaderContext, _client_id: &ClientId, _client_state: &Self::ClientState, _height: Height, @@ -169,7 +169,7 @@ impl ClientDef for MockClient { fn verify_packet_receipt_absence( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn ReaderContext, _client_id: &ClientId, _client_state: &Self::ClientState, _height: Height, @@ -198,7 +198,7 @@ impl ClientDef for MockClient { fn verify_header( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn ReaderContext, _client_id: ClientId, _client_state: Self::ClientState, _header: Self::Header, @@ -216,7 +216,7 @@ impl ClientDef for MockClient { fn check_for_misbehaviour( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn ReaderContext, _client_id: ClientId, _client_state: Self::ClientState, _header: Self::Header, diff --git a/modules/src/mock/context.rs b/modules/src/mock/context.rs index eec73176b5..777aaa7c7b 100644 --- a/modules/src/mock/context.rs +++ b/modules/src/mock/context.rs @@ -37,7 +37,7 @@ use crate::core::ics05_port::error::Error; use crate::core::ics23_commitment::commitment::CommitmentPrefix; use crate::core::ics24_host::identifier::{ChainId, ChannelId, ClientId, ConnectionId, PortId}; use crate::core::ics26_routing::context::{ - Ics26Context, LightClientContext, Module, ModuleId, Router, RouterBuilder, + Ics26Context, Module, ModuleId, ReaderContext, Router, RouterBuilder, }; use crate::core::ics26_routing::handler::{deliver, dispatch, MsgReceipt}; use crate::core::ics26_routing::msgs::Ics26Envelope; @@ -513,7 +513,7 @@ impl MockContext { } pub fn consensus_states(&self, client_id: &ClientId) -> Vec { - self.clients[client_id] + self.ibc_store.lock().unwrap().clients[client_id] .consensus_states .iter() .map(|(k, v)| AnyConsensusStateWithHeight { @@ -535,8 +535,8 @@ impl MockContext { &self, client_id: &ClientId, height: &Height, - ) -> &AnyConsensusState { - self.clients[client_id] + ) -> AnyConsensusState { + self.ibc_store.lock().unwrap().clients[client_id] .consensus_states .get(height) .unwrap() @@ -642,7 +642,7 @@ impl Router for MockRouter { } } -impl LightClientContext for MockContext {} +impl ReaderContext for MockContext {} impl Ics26Context for MockContext { type Router = MockRouter; @@ -954,6 +954,14 @@ impl ChannelKeeper for MockContext { .insert(key, receipt); Ok(()) } + + fn store_packet( + &mut self, + _key: (PortId, ChannelId, Sequence), + _packet: crate::core::ics04_channel::packet::Packet, + ) -> Result<(), Ics04Error> { + Ok(()) + } } impl ConnectionReader for MockContext { @@ -1033,7 +1041,7 @@ impl ClientReader for MockContext { client_id: &ClientId, height: Height, ) -> Result { - match self.clients.get(client_id) { + match self.ibc_store.lock().unwrap().clients.get(client_id) { Some(client_record) => match client_record.consensus_states.get(&height) { Some(consensus_state) => Ok(consensus_state.clone()), None => Err(Ics02Error::consensus_state_not_found( @@ -1054,7 +1062,8 @@ impl ClientReader for MockContext { client_id: &ClientId, height: Height, ) -> Result, Ics02Error> { - let client_record = self + let ibc_store = self.ibc_store.lock().unwrap(); + let client_record = ibc_store .clients .get(client_id) .ok_or_else(|| Ics02Error::client_not_found(client_id.clone()))?; @@ -1081,7 +1090,8 @@ impl ClientReader for MockContext { client_id: &ClientId, height: Height, ) -> Result, Ics02Error> { - let client_record = self + let ibc_store = self.ibc_store.lock().unwrap(); + let client_record = ibc_store .clients .get(client_id) .ok_or_else(|| Ics02Error::client_not_found(client_id.clone()))?; @@ -1242,7 +1252,7 @@ impl Ics18Context for MockContext { // Forward call to Ics26 delivery method. let mut all_events = vec![]; for msg in msgs { - let (mut events, _) = + let MsgReceipt { mut events, .. } = deliver::<_, Crypto>(self, msg).map_err(Ics18Error::transaction_failed)?; all_events.append(&mut events); } diff --git a/modules/src/test_utils.rs b/modules/src/test_utils.rs index 04fb47b3ac..23bc3df164 100644 --- a/modules/src/test_utils.rs +++ b/modules/src/test_utils.rs @@ -2,6 +2,8 @@ use std::sync::{Arc, Mutex}; use std::time::Duration; use crate::clients::host_functions::HostFunctionsProvider; +use crate::core::ics02_client::context::ClientReader; +use crate::core::ics03_connection::context::ConnectionReader; use crate::prelude::*; use sp_core::keccak_256; use sp_trie::LayoutV0; @@ -9,16 +11,24 @@ use tendermint::{block, consensus, evidence, public_key::Algorithm}; use crate::clients::ics11_beefy::error::Error as BeefyError; use crate::core::ics02_client::error::Error as Ics02Error; -use crate::core::ics04_channel::channel::{Counterparty, Order}; + +use crate::applications::transfer::context::{BankKeeper, Ics20Context, Ics20Keeper, Ics20Reader}; +use crate::applications::transfer::{error::Error as Ics20Error, PrefixedCoin}; +use crate::core::ics02_client::client_consensus::AnyConsensusState; +use crate::core::ics02_client::client_state::AnyClientState; +use crate::core::ics03_connection::connection::ConnectionEnd; +use crate::core::ics03_connection::error::Error as Ics03Error; +use crate::core::ics04_channel::channel::{ChannelEnd, Counterparty, Order}; +use crate::core::ics04_channel::commitment::{AcknowledgementCommitment, PacketCommitment}; +use crate::core::ics04_channel::context::{ChannelKeeper, ChannelReader}; use crate::core::ics04_channel::error::Error; use crate::core::ics04_channel::packet::{Receipt, Sequence}; use crate::core::ics04_channel::Version; use crate::core::ics05_port::context::PortReader; use crate::core::ics05_port::error::Error as PortError; use crate::core::ics24_host::identifier::{ChannelId, ClientId, ConnectionId, PortId}; -use crate::core::ics26_routing::context::{Module, ModuleId, ModuleOutputBuilder}; +use crate::core::ics26_routing::context::{Module, ModuleId, ModuleOutputBuilder, ReaderContext}; use crate::mock::context::MockIbcStore; -use crate::prelude::*; use crate::signer::Signer; use crate::timestamp::Timestamp; use crate::Height; @@ -101,7 +111,7 @@ impl HostFunctionsProvider for Crypto { .map(|val| val.to_vec()) } - fn ed25519_verify(_signature: &[u8; 64], _value: &[u8; 32], public_key: &[u8; 32]) -> bool { + fn ed25519_verify(_signature: &[u8; 64], _value: &[u8; 32], _public_key: &[u8]) -> bool { true } @@ -134,3 +144,349 @@ impl HostFunctionsProvider for Crypto { sp_io::hashing::sha2_256(data) } } + +impl Ics20Keeper for DummyTransferModule { + type AccountId = Signer; +} + +impl ChannelKeeper for DummyTransferModule { + fn store_packet_commitment( + &mut self, + key: (PortId, ChannelId, Sequence), + commitment: PacketCommitment, + ) -> Result<(), Error> { + self.ibc_store + .lock() + .unwrap() + .packet_commitment + .insert(key, commitment); + Ok(()) + } + + fn delete_packet_commitment( + &mut self, + _key: (PortId, ChannelId, Sequence), + ) -> Result<(), Error> { + unimplemented!() + } + + fn store_packet_receipt( + &mut self, + _key: (PortId, ChannelId, Sequence), + _receipt: Receipt, + ) -> Result<(), Error> { + unimplemented!() + } + + fn store_packet_acknowledgement( + &mut self, + _key: (PortId, ChannelId, Sequence), + _ack: AcknowledgementCommitment, + ) -> Result<(), Error> { + unimplemented!() + } + + fn delete_packet_acknowledgement( + &mut self, + _key: (PortId, ChannelId, Sequence), + ) -> Result<(), Error> { + unimplemented!() + } + + fn store_connection_channels( + &mut self, + _conn_id: ConnectionId, + _port_channel_id: &(PortId, ChannelId), + ) -> Result<(), Error> { + unimplemented!() + } + + fn store_channel( + &mut self, + _port_channel_id: (PortId, ChannelId), + _channel_end: &ChannelEnd, + ) -> Result<(), Error> { + unimplemented!() + } + + fn store_next_sequence_send( + &mut self, + port_channel_id: (PortId, ChannelId), + seq: Sequence, + ) -> Result<(), Error> { + self.ibc_store + .lock() + .unwrap() + .next_sequence_send + .insert(port_channel_id, seq); + Ok(()) + } + + fn store_next_sequence_recv( + &mut self, + _port_channel_id: (PortId, ChannelId), + _seq: Sequence, + ) -> Result<(), Error> { + unimplemented!() + } + + fn store_next_sequence_ack( + &mut self, + _port_channel_id: (PortId, ChannelId), + _seq: Sequence, + ) -> Result<(), Error> { + unimplemented!() + } + + fn increase_channel_counter(&mut self) { + unimplemented!() + } + + fn store_packet( + &mut self, + _key: (PortId, ChannelId, Sequence), + _packet: crate::core::ics04_channel::packet::Packet, + ) -> Result<(), Error> { + Ok(()) + } +} + +impl PortReader for DummyTransferModule { + fn lookup_module_by_port(&self, _port_id: &PortId) -> Result { + unimplemented!() + } +} + +impl BankKeeper for DummyTransferModule { + type AccountId = Signer; + + fn send_coins( + &mut self, + _from: &Self::AccountId, + _to: &Self::AccountId, + _amt: &PrefixedCoin, + ) -> Result<(), Ics20Error> { + Ok(()) + } + + fn mint_coins( + &mut self, + _account: &Self::AccountId, + _amt: &PrefixedCoin, + ) -> Result<(), Ics20Error> { + Ok(()) + } + + fn burn_coins( + &mut self, + _account: &Self::AccountId, + _amt: &PrefixedCoin, + ) -> Result<(), Ics20Error> { + Ok(()) + } +} + +impl Ics20Reader for DummyTransferModule { + type AccountId = Signer; + + fn get_port(&self) -> Result { + Ok(PortId::transfer()) + } + + fn is_send_enabled(&self) -> bool { + true + } + + fn is_receive_enabled(&self) -> bool { + true + } +} + +impl ConnectionReader for DummyTransferModule { + fn connection_end(&self, cid: &ConnectionId) -> Result { + match self.ibc_store.lock().unwrap().connections.get(cid) { + Some(connection_end) => Ok(connection_end.clone()), + None => Err(Ics03Error::connection_not_found(cid.clone())), + } + } + + fn host_oldest_height(&self) -> Height { + todo!() + } + + fn commitment_prefix(&self) -> crate::core::ics23_commitment::commitment::CommitmentPrefix { + todo!() + } + + fn connection_counter(&self) -> Result { + todo!() + } +} + +impl ClientReader for DummyTransferModule { + fn client_state(&self, client_id: &ClientId) -> Result { + match self.ibc_store.lock().unwrap().clients.get(client_id) { + Some(client_record) => client_record + .client_state + .clone() + .ok_or_else(|| Ics02Error::client_not_found(client_id.clone())), + None => Err(Ics02Error::client_not_found(client_id.clone())), + } + } + + fn host_height(&self) -> Height { + Height::zero() + } + + fn host_consensus_state(&self, _height: Height) -> Result { + unimplemented!() + } + + fn consensus_state( + &self, + client_id: &ClientId, + height: Height, + ) -> Result { + match self.ibc_store.lock().unwrap().clients.get(client_id) { + Some(client_record) => match client_record.consensus_states.get(&height) { + Some(consensus_state) => Ok(consensus_state.clone()), + None => Err(Ics02Error::consensus_state_not_found( + client_id.clone(), + height, + )), + }, + None => Err(Ics02Error::consensus_state_not_found( + client_id.clone(), + height, + )), + } + } + + fn client_type( + &self, + _client_id: &ClientId, + ) -> Result { + todo!() + } + + fn next_consensus_state( + &self, + _client_id: &ClientId, + _height: Height, + ) -> Result, Ics02Error> { + todo!() + } + + fn prev_consensus_state( + &self, + _client_id: &ClientId, + _height: Height, + ) -> Result, Ics02Error> { + todo!() + } + + fn host_timestamp(&self) -> Timestamp { + todo!() + } + + fn client_counter(&self) -> Result { + todo!() + } +} + +impl ChannelReader for DummyTransferModule { + fn channel_end(&self, pcid: &(PortId, ChannelId)) -> Result { + match self.ibc_store.lock().unwrap().channels.get(pcid) { + Some(channel_end) => Ok(channel_end.clone()), + None => Err(Error::channel_not_found(pcid.0.clone(), pcid.1)), + } + } + + fn connection_channels(&self, _cid: &ConnectionId) -> Result, Error> { + unimplemented!() + } + + fn get_next_sequence_send( + &self, + port_channel_id: &(PortId, ChannelId), + ) -> Result { + match self + .ibc_store + .lock() + .unwrap() + .next_sequence_send + .get(port_channel_id) + { + Some(sequence) => Ok(*sequence), + None => Err(Error::missing_next_send_seq(port_channel_id.clone())), + } + } + + fn get_next_sequence_recv( + &self, + _port_channel_id: &(PortId, ChannelId), + ) -> Result { + unimplemented!() + } + + fn get_next_sequence_ack( + &self, + _port_channel_id: &(PortId, ChannelId), + ) -> Result { + unimplemented!() + } + + fn get_packet_commitment( + &self, + _key: &(PortId, ChannelId, Sequence), + ) -> Result { + unimplemented!() + } + + fn get_packet_receipt(&self, _key: &(PortId, ChannelId, Sequence)) -> Result { + unimplemented!() + } + + fn get_packet_acknowledgement( + &self, + _key: &(PortId, ChannelId, Sequence), + ) -> Result { + unimplemented!() + } + + fn hash(&self, value: Vec) -> Vec { + use sha2::Digest; + + sha2::Sha256::digest(value).to_vec() + } + + fn client_update_time( + &self, + _client_id: &ClientId, + _height: Height, + ) -> Result { + unimplemented!() + } + + fn client_update_height( + &self, + _client_id: &ClientId, + _height: Height, + ) -> Result { + unimplemented!() + } + + fn channel_counter(&self) -> Result { + unimplemented!() + } + + fn max_expected_time_per_block(&self) -> Duration { + unimplemented!() + } +} + +impl Ics20Context for DummyTransferModule { + type AccountId = Signer; +} + +impl ReaderContext for DummyTransferModule {} diff --git a/proto-compiler/Cargo.lock b/proto-compiler/Cargo.lock index 759acf3513..3b69bae13d 100644 --- a/proto-compiler/Cargo.lock +++ b/proto-compiler/Cargo.lock @@ -350,7 +350,7 @@ checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" [[package]] name = "hyper" -version = "0.14.19" +version = "0.15.04" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42dc3c131584288d375f2d07f822b0cb012d8c6fb899a5b9fdb3cb7eb9b6004f" dependencies = [ From 060b3bd9a621cfad4192e39fd3108e8ebfbf78f9 Mon Sep 17 00:00:00 2001 From: Web3 Philosopher Date: Thu, 2 Jun 2022 13:36:50 +0100 Subject: [PATCH 45/96] Introduces tendermint host functions (#13) * Introduces tendermint host functions * ok it compiles now --- Cargo.lock | 63 ++++++++++++++++--- modules/Cargo.toml | 9 ++- modules/src/clients/host_functions.rs | 27 ++++++-- .../clients/ics07_tendermint/client_def.rs | 12 ++-- modules/src/core/ics02_client/client_def.rs | 10 +-- modules/src/test_utils.rs | 2 +- proto/Cargo.toml | 3 +- 7 files changed, 99 insertions(+), 27 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 31609eac53..313aad9576 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1684,9 +1684,9 @@ dependencies = [ "sp-trie 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", "subtle-encoding", "subxt", - "tendermint", + "tendermint 0.23.7 (git+https://github.com/composableFi/tendermint-rs?branch=seun-0.23.7)", "tendermint-light-client-verifier", - "tendermint-proto", + "tendermint-proto 0.23.7 (git+https://github.com/composableFi/tendermint-rs?branch=seun-0.23.7)", "tendermint-rpc", "tendermint-testgen", "test-log", @@ -1707,7 +1707,7 @@ dependencies = [ "prost-types", "schemars", "serde", - "tendermint-proto", + "tendermint-proto 0.23.7 (git+https://github.com/composableFi/tendermint-rs?branch=seun-0.23.7)", "tonic", ] @@ -4630,7 +4630,35 @@ dependencies = [ "signature", "subtle", "subtle-encoding", - "tendermint-proto", + "tendermint-proto 0.23.7 (registry+https://github.com/rust-lang/crates.io-index)", + "time", + "zeroize", +] + +[[package]] +name = "tendermint" +version = "0.23.7" +source = "git+https://github.com/composableFi/tendermint-rs?branch=seun-0.23.7#cb79c164e84c3d9176b374f2b227393fe000e691" +dependencies = [ + "async-trait", + "bytes", + "ed25519", + "ed25519-dalek", + "flex-error", + "futures", + "num-traits", + "once_cell", + "prost", + "prost-types", + "serde", + "serde_bytes", + "serde_json", + "serde_repr", + "sha2 0.9.9", + "signature", + "subtle", + "subtle-encoding", + "tendermint-proto 0.23.7 (git+https://github.com/composableFi/tendermint-rs?branch=seun-0.23.7)", "time", "zeroize", ] @@ -4643,7 +4671,7 @@ dependencies = [ "flex-error", "serde", "serde_json", - "tendermint", + "tendermint 0.23.7 (registry+https://github.com/rust-lang/crates.io-index)", "toml", "url", ] @@ -4656,7 +4684,24 @@ dependencies = [ "derive_more", "flex-error", "serde", - "tendermint", + "tendermint 0.23.7 (git+https://github.com/composableFi/tendermint-rs?branch=seun-0.23.7)", + "time", +] + +[[package]] +name = "tendermint-proto" +version = "0.23.7" +source = "git+https://github.com/composableFi/tendermint-rs?branch=seun-0.23.7#cb79c164e84c3d9176b374f2b227393fe000e691" +dependencies = [ + "bytes", + "flex-error", + "num-derive", + "num-traits", + "prost", + "prost-types", + "serde", + "serde_bytes", + "subtle-encoding", "time", ] @@ -4698,9 +4743,9 @@ dependencies = [ "serde_bytes", "serde_json", "subtle-encoding", - "tendermint", + "tendermint 0.23.7 (registry+https://github.com/rust-lang/crates.io-index)", "tendermint-config", - "tendermint-proto", + "tendermint-proto 0.23.7 (registry+https://github.com/rust-lang/crates.io-index)", "thiserror", "time", "tokio", @@ -4721,7 +4766,7 @@ dependencies = [ "serde_json", "simple-error", "tempfile", - "tendermint", + "tendermint 0.23.7 (registry+https://github.com/rust-lang/crates.io-index)", "time", ] diff --git a/modules/Cargo.toml b/modules/Cargo.toml index ab11162874..c893b242b4 100644 --- a/modules/Cargo.toml +++ b/modules/Cargo.toml @@ -68,15 +68,18 @@ sp-trie = { git = "https://github.com/paritytech/substrate", branch = "polkadot- sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22", default-features = false, optional = true } [dependencies.tendermint] -version = "=0.23.7" +git = "https://github.com/composableFi/tendermint-rs" +branch = "seun-0.23.7" default-features = false [dependencies.tendermint-proto] -version = "=0.23.7" +git = "https://github.com/composableFi/tendermint-rs" +branch = "seun-0.23.7" default-features = false [dependencies.tendermint-light-client-verifier] -version = "=0.23.7" +git = "https://github.com/composableFi/tendermint-rs" +branch = "seun-0.23.7" default-features = false [dependencies.tendermint-testgen] diff --git a/modules/src/clients/host_functions.rs b/modules/src/clients/host_functions.rs index a0a6f352eb..a869bd2125 100644 --- a/modules/src/clients/host_functions.rs +++ b/modules/src/clients/host_functions.rs @@ -5,7 +5,7 @@ use sp_core::H256; /// This trait captures all the functions that the host chain should provide for /// crypto operations. -pub trait HostFunctionsProvider: Clone { +pub trait HostFunctionsProvider: Clone + Send + Sync + Default { /// Keccak 256 hash function fn keccak_256(input: &[u8]) -> [u8; 32]; @@ -15,8 +15,8 @@ pub trait HostFunctionsProvider: Clone { value: &[u8; 32], ) -> Option>; - /// Recover the ED25519 pubkey that produced this signature - fn ed25519_verify(signature: &[u8; 64], value: &[u8; 32], pubkey: &[u8]) -> bool; + /// Recover the ED25519 pubkey that produced this signature, given a arbitrarily sized message + fn ed25519_verify(signature: &[u8; 64], msg: &[u8], pubkey: &[u8]) -> bool; /// This function should verify membership in a trie proof using parity's sp-trie package /// with a BlakeTwo256 Hasher @@ -42,7 +42,7 @@ pub trait HostFunctionsProvider: Clone { /// This is a work around that allows us to have one super trait [`HostFunctionsProvider`] /// that encapsulates all the needed host functions by different subsytems, and then /// implement the needed traits through this wrapper. -#[derive(Clone)] +#[derive(Clone, Debug, Default)] pub struct HostFunctionsManager(PhantomData); // implementation for beefy host functions @@ -61,3 +61,22 @@ where T::secp256k1_ecdsa_recover_compressed(signature, value) } } + +impl tendermint_light_client_verifier::host_functions::HostFunctionsProvider for HostFunctionsManager + where + T: HostFunctionsProvider, +{ + fn sha2_256(preimage: &[u8]) -> [u8; 32] { + T::sha256_digest(preimage) + } + + fn ed25519_verify(sig: &[u8], msg: &[u8], pub_key: &[u8]) -> bool { + let mut signature = [0u8; 64]; + signature.copy_from_slice(sig); + T::ed25519_verify(&signature, msg, pub_key) + } + + fn secp256k1_verify(_sig: &[u8], _message: &[u8], _public: &[u8]) -> bool { + unimplemented!() + } +} diff --git a/modules/src/clients/ics07_tendermint/client_def.rs b/modules/src/clients/ics07_tendermint/client_def.rs index 8f3250d57f..dbc6432af2 100644 --- a/modules/src/clients/ics07_tendermint/client_def.rs +++ b/modules/src/clients/ics07_tendermint/client_def.rs @@ -6,6 +6,7 @@ use prost::Message; use tendermint_light_client_verifier::types::{TrustedBlockState, UntrustedBlockState}; use tendermint_light_client_verifier::{ProdVerifier, Verdict, Verifier}; use tendermint_proto::Protobuf; +use crate::clients::host_functions::{HostFunctionsManager, HostFunctionsProvider}; use crate::clients::ics07_tendermint::client_state::ClientState; use crate::clients::ics07_tendermint::consensus_state::ConsensusState; @@ -36,12 +37,15 @@ use crate::downcast; use crate::prelude::*; use crate::Height; -#[derive(Clone, Debug, Default, PartialEq, Eq)] -pub struct TendermintClient { - verifier: ProdVerifier, +#[derive(Clone, Debug, Default)] +pub struct TendermintClient { + verifier: ProdVerifier>, } -impl ClientDef for TendermintClient { +impl ClientDef for TendermintClient + where + H: HostFunctionsProvider +{ type Header = Header; type ClientState = ClientState; type ConsensusState = ConsensusState; diff --git a/modules/src/core/ics02_client/client_def.rs b/modules/src/core/ics02_client/client_def.rs index 08470e7a29..a87776eb67 100644 --- a/modules/src/core/ics02_client/client_def.rs +++ b/modules/src/core/ics02_client/client_def.rs @@ -206,19 +206,19 @@ pub trait ClientDef: Clone { ) -> Result<(), Error>; } -#[derive(Clone, Debug, PartialEq, Eq)] -pub enum AnyClient { - Tendermint(TendermintClient), +#[derive(Clone, Debug)] +pub enum AnyClient { + Tendermint(TendermintClient), Beefy(BeefyClient), Near(BeefyClient), #[cfg(any(test, feature = "mocks"))] Mock(MockClient), } -impl AnyClient { +impl AnyClient { pub fn from_client_type(client_type: ClientType) -> Self { match client_type { - ClientType::Tendermint => Self::Tendermint(TendermintClient::default()), + ClientType::Tendermint => Self::Tendermint(TendermintClient::::default()), ClientType::Beefy => Self::Beefy(BeefyClient::::default()), ClientType::Near => Self::Near(BeefyClient::::default()), #[cfg(any(test, feature = "mocks"))] diff --git a/modules/src/test_utils.rs b/modules/src/test_utils.rs index 23bc3df164..e8a28ef566 100644 --- a/modules/src/test_utils.rs +++ b/modules/src/test_utils.rs @@ -111,7 +111,7 @@ impl HostFunctionsProvider for Crypto { .map(|val| val.to_vec()) } - fn ed25519_verify(_signature: &[u8; 64], _value: &[u8; 32], _public_key: &[u8]) -> bool { + fn ed25519_verify(_signature: &[u8; 64], _msg: &[u8], _pubkey: &[u8]) -> bool { true } diff --git a/proto/Cargo.toml b/proto/Cargo.toml index 3c0dcb08b7..aea7b466e4 100644 --- a/proto/Cargo.toml +++ b/proto/Cargo.toml @@ -31,7 +31,8 @@ schemars = { version = "0.8", optional = true } base64 = { version = "0.13", default-features = false, features = ["alloc"] } [dependencies.tendermint-proto] -version = "=0.23.7" +git = "https://github.com/composableFi/tendermint-rs" +branch = "seun-0.23.7" default-features = false [features] From 3b9b85ae76586a13a62e23d645713c07d48cb55d Mon Sep 17 00:00:00 2001 From: Web3 Philosopher Date: Thu, 2 Jun 2022 16:20:17 +0100 Subject: [PATCH 46/96] adds support for ics23 `HostFunctions` (#14) adds support for ics23 HostFunctions --- modules/Cargo.toml | 2 +- modules/src/clients/host_functions.rs | 42 +++++++++++++++++++ .../clients/ics07_tendermint/client_def.rs | 38 ++++++++++------- .../src/core/ics23_commitment/commitment.rs | 4 +- modules/src/core/ics23_commitment/merkle.rs | 34 ++++++++------- 5 files changed, 87 insertions(+), 33 deletions(-) diff --git a/modules/Cargo.toml b/modules/Cargo.toml index c893b242b4..486c5695a0 100644 --- a/modules/Cargo.toml +++ b/modules/Cargo.toml @@ -41,7 +41,7 @@ mocks = ["tendermint-testgen", "clock", "std", "sp-io", "sp-io/std"] # Proto definitions for all IBC-related interfaces, e.g., connections or channels. borsh = { version = "0.9.3", default-features = false } ibc-proto = { version = "0.18.0", path = "../proto", default-features = false } -ics23 = { version = "=0.8.0-alpha", default-features = false } +ics23 = { git = "https://github.com/composablefi/ics23", branch = "seun-v0.8.0-alpha", default-features = false } time = { version = "0.3", default-features = false } serde_derive = { version = "1.0.104", default-features = false } serde = { version = "1.0", default-features = false } diff --git a/modules/src/clients/host_functions.rs b/modules/src/clients/host_functions.rs index a869bd2125..a50c1b9031 100644 --- a/modules/src/clients/host_functions.rs +++ b/modules/src/clients/host_functions.rs @@ -37,6 +37,21 @@ pub trait HostFunctionsProvider: Clone + Send + Sync + Default { /// Conduct a 256-bit Sha2 hash fn sha256_digest(data: &[u8]) -> [u8; 32]; + + /// The SHA-256 hash algorithm + fn sha2_256(message: &[u8]) -> [u8; 32]; + + /// The SHA-512 hash algorithm + fn sha2_512(message: &[u8]) -> [u8; 64]; + + /// The SHA-512 hash algorithm with its output truncated to 256 bits. + fn sha2_512_truncated(message: &[u8]) -> [u8; 32]; + + /// SHA-3-512 hash function. + fn sha3_512(message: &[u8]) -> [u8; 64]; + + /// Ripemd160 hash function. + fn ripemd160(message: &[u8]) -> [u8; 20]; } /// This is a work around that allows us to have one super trait [`HostFunctionsProvider`] @@ -62,6 +77,7 @@ where } } +// implementation for tendermint functions impl tendermint_light_client_verifier::host_functions::HostFunctionsProvider for HostFunctionsManager where T: HostFunctionsProvider, @@ -80,3 +96,29 @@ impl tendermint_light_client_verifier::host_functions::HostFunctionsProvider unimplemented!() } } + +// implementation for ics23 +impl ics23::HostFunctionsProvider for HostFunctionsManager + where + H: HostFunctionsProvider, +{ + fn sha2_256(message: &[u8]) -> [u8; 32] { + H::sha2_256(message) + } + + fn sha2_512(message: &[u8]) -> [u8; 64] { + H::sha2_512(message) + } + + fn sha2_512_truncated(message: &[u8]) -> [u8; 32] { + H::sha2_512_truncated(message) + } + + fn sha3_512(message: &[u8]) -> [u8; 64] { + H::sha3_512(message) + } + + fn ripemd160(message: &[u8]) -> [u8; 20] { + H::ripemd160(message) + } +} diff --git a/modules/src/clients/ics07_tendermint/client_def.rs b/modules/src/clients/ics07_tendermint/client_def.rs index dbc6432af2..5585952403 100644 --- a/modules/src/clients/ics07_tendermint/client_def.rs +++ b/modules/src/clients/ics07_tendermint/client_def.rs @@ -266,7 +266,7 @@ impl ClientDef for TendermintClient let value = expected_consensus_state .encode_vec() .map_err(Ics02Error::invalid_any_consensus_state)?; - verify_membership(client_state, prefix, proof, root, path, value) + verify_membership::(client_state, prefix, proof, root, path, value) } fn verify_connection_state( @@ -287,7 +287,7 @@ impl ClientDef for TendermintClient let value = expected_connection_end .encode_vec() .map_err(Ics02Error::invalid_connection_end)?; - verify_membership(client_state, prefix, proof, root, path, value) + verify_membership::(client_state, prefix, proof, root, path, value) } fn verify_channel_state( @@ -309,7 +309,7 @@ impl ClientDef for TendermintClient let value = expected_channel_end .encode_vec() .map_err(Ics02Error::invalid_channel_end)?; - verify_membership(client_state, prefix, proof, root, path, value) + verify_membership::(client_state, prefix, proof, root, path, value) } fn verify_client_full_state( @@ -329,7 +329,7 @@ impl ClientDef for TendermintClient let value = expected_client_state .encode_vec() .map_err(Ics02Error::invalid_any_client_state)?; - verify_membership(client_state, prefix, proof, root, path, value) + verify_membership::(client_state, prefix, proof, root, path, value) } fn verify_packet_data( @@ -355,7 +355,7 @@ impl ClientDef for TendermintClient sequence, }; - verify_membership( + verify_membership::( client_state, connection_end.counterparty().prefix(), proof, @@ -388,7 +388,7 @@ impl ClientDef for TendermintClient channel_id: *channel_id, sequence, }; - verify_membership( + verify_membership::( client_state, connection_end.counterparty().prefix(), proof, @@ -420,7 +420,7 @@ impl ClientDef for TendermintClient .expect("buffer size too small"); let seq_path = SeqRecvsPath(port_id.clone(), *channel_id); - verify_membership( + verify_membership::( client_state, connection_end.counterparty().prefix(), proof, @@ -451,7 +451,7 @@ impl ClientDef for TendermintClient channel_id: *channel_id, sequence, }; - verify_non_membership( + verify_non_membership::( client_state, connection_end.counterparty().prefix(), proof, @@ -474,16 +474,19 @@ impl ClientDef for TendermintClient } } -fn verify_membership( +fn verify_membership( client_state: &ClientState, prefix: &CommitmentPrefix, proof: &CommitmentProofBytes, root: &CommitmentRoot, - path: impl Into, + path: P, value: Vec, -) -> Result<(), Ics02Error> { +) -> Result<(), Ics02Error> + where + H: HostFunctionsProvider, P: Into, +{ let merkle_path = apply_prefix(prefix, vec![path.into().to_string()]); - let merkle_proof: MerkleProof = RawMerkleProof::try_from(proof.clone()) + let merkle_proof: MerkleProof = RawMerkleProof::try_from(proof.clone()) .map_err(Ics02Error::invalid_commitment_proof)? .into(); @@ -498,15 +501,18 @@ fn verify_membership( .map_err(|e| Ics02Error::tendermint(Error::ics23_error(e))) } -fn verify_non_membership( +fn verify_non_membership( client_state: &ClientState, prefix: &CommitmentPrefix, proof: &CommitmentProofBytes, root: &CommitmentRoot, - path: impl Into, -) -> Result<(), Ics02Error> { + path: P, +) -> Result<(), Ics02Error> + where + H: HostFunctionsProvider, P: Into, +{ let merkle_path = apply_prefix(prefix, vec![path.into().to_string()]); - let merkle_proof: MerkleProof = RawMerkleProof::try_from(proof.clone()) + let merkle_proof: MerkleProof = RawMerkleProof::try_from(proof.clone()) .map_err(Ics02Error::invalid_commitment_proof)? .into(); diff --git a/modules/src/core/ics23_commitment/commitment.rs b/modules/src/core/ics23_commitment/commitment.rs index fe77cb854e..8f25479f4e 100644 --- a/modules/src/core/ics23_commitment/commitment.rs +++ b/modules/src/core/ics23_commitment/commitment.rs @@ -90,10 +90,10 @@ impl TryFrom for CommitmentProofBytes { } } -impl TryFrom for CommitmentProofBytes { +impl TryFrom> for CommitmentProofBytes { type Error = ProofError; - fn try_from(value: MerkleProof) -> Result { + fn try_from(value: MerkleProof) -> Result { Self::try_from(RawMerkleProof::from(value)) } } diff --git a/modules/src/core/ics23_commitment/merkle.rs b/modules/src/core/ics23_commitment/merkle.rs index fcad180f62..0cc01b1e83 100644 --- a/modules/src/core/ics23_commitment/merkle.rs +++ b/modules/src/core/ics23_commitment/merkle.rs @@ -1,3 +1,4 @@ +use sp_std::marker::PhantomData; use crate::prelude::*; use tendermint::merkle::proof::Proof as TendermintProof; @@ -9,6 +10,7 @@ use ics23::{ calculate_existence_root, verify_membership, verify_non_membership, CommitmentProof, NonExistenceProof, }; +use crate::clients::host_functions::{HostFunctionsManager, HostFunctionsProvider}; use crate::core::ics23_commitment::commitment::{CommitmentPrefix, CommitmentRoot}; use crate::core::ics23_commitment::error::Error; @@ -29,14 +31,15 @@ impl From for MerkleRoot { } #[derive(Clone, Debug, PartialEq)] -pub struct MerkleProof { +pub struct MerkleProof { pub proofs: Vec, + _phantom: PhantomData, } /// Convert to ics23::CommitmentProof /// The encoding and decoding shouldn't fail since ics23::CommitmentProof and ibc_proto::ics23::CommitmentProof should be the same /// Ref. -impl From for MerkleProof { +impl From for MerkleProof { fn from(proof: RawMerkleProof) -> Self { let proofs: Vec = proof .proofs @@ -47,12 +50,12 @@ impl From for MerkleProof { prost::Message::decode(&*encoded).unwrap() }) .collect(); - Self { proofs } + Self { proofs, _phantom: PhantomData } } } -impl From for RawMerkleProof { - fn from(proof: MerkleProof) -> Self { +impl From> for RawMerkleProof { + fn from(proof: MerkleProof) -> Self { Self { proofs: proof .proofs @@ -67,7 +70,10 @@ impl From for RawMerkleProof { } } -impl MerkleProof { +impl MerkleProof + where + H: HostFunctionsProvider, +{ pub fn verify_membership( &self, specs: &ProofSpecs, @@ -107,9 +113,9 @@ impl MerkleProof { { match &proof.proof { Some(Proof::Exist(existence_proof)) => { - subroot = calculate_existence_root(existence_proof) + subroot = calculate_existence_root::>(existence_proof) .map_err(|_| Error::invalid_merkle_proof())?; - if !verify_membership(proof, spec, &subroot, key.as_bytes(), &value) { + if !verify_membership::>(proof, spec, &subroot, key.as_bytes(), &value) { return Err(Error::verification_failure()); } value = subroot.clone(); @@ -157,8 +163,8 @@ impl MerkleProof { .ok_or_else(Error::invalid_merkle_proof)?; match &proof.proof { Some(Proof::Nonexist(non_existence_proof)) => { - let subroot = calculate_non_existence_root(non_existence_proof)?; - if !verify_non_membership(proof, spec, &subroot, key.as_bytes()) { + let subroot = calculate_non_existence_root::(non_existence_proof)?; + if !verify_non_membership::>(proof, spec, &subroot, key.as_bytes()) { return Err(Error::verification_failure()); } // verify membership proofs starting from index 1 with value = subroot @@ -170,11 +176,11 @@ impl MerkleProof { } // TODO move to ics23 -fn calculate_non_existence_root(proof: &NonExistenceProof) -> Result, Error> { +fn calculate_non_existence_root(proof: &NonExistenceProof) -> Result, Error> { if let Some(left) = &proof.left { - calculate_existence_root(left).map_err(|_| Error::invalid_merkle_proof()) + calculate_existence_root::>(left).map_err(|_| Error::invalid_merkle_proof()) } else if let Some(right) = &proof.right { - calculate_existence_root(right).map_err(|_| Error::invalid_merkle_proof()) + calculate_existence_root::>(right).map_err(|_| Error::invalid_merkle_proof()) } else { Err(Error::invalid_merkle_proof()) } @@ -232,7 +238,7 @@ fn calculate_non_existence_root(proof: &NonExistenceProof) -> Result, Er // } // } -pub fn convert_tm_to_ics_merkle_proof(tm_proof: &TendermintProof) -> Result { +pub fn convert_tm_to_ics_merkle_proof(tm_proof: &TendermintProof) -> Result, Error> { let mut proofs = Vec::new(); for op in &tm_proof.ops { From 832eda809637d2b42db023196bedb50dcf358f48 Mon Sep 17 00:00:00 2001 From: Web3 Smith <31099392+Wizdave97@users.noreply.github.com> Date: Mon, 6 Jun 2022 12:58:23 +0100 Subject: [PATCH 47/96] Expose inner value in `Amount` struct (#15) expose amount and fix tests --- Cargo.lock | 63 +++---------------- modules/Cargo.toml | 9 ++- modules/src/applications/transfer/denom.rs | 4 ++ modules/src/clients/host_functions.rs | 11 ++-- .../clients/ics07_tendermint/client_def.rs | 16 ++--- modules/src/core/ics02_client/client_def.rs | 4 +- modules/src/core/ics23_commitment/merkle.rs | 42 +++++++++---- modules/src/test_utils.rs | 44 +++++++++++++ 8 files changed, 112 insertions(+), 81 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 313aad9576..31609eac53 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1684,9 +1684,9 @@ dependencies = [ "sp-trie 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", "subtle-encoding", "subxt", - "tendermint 0.23.7 (git+https://github.com/composableFi/tendermint-rs?branch=seun-0.23.7)", + "tendermint", "tendermint-light-client-verifier", - "tendermint-proto 0.23.7 (git+https://github.com/composableFi/tendermint-rs?branch=seun-0.23.7)", + "tendermint-proto", "tendermint-rpc", "tendermint-testgen", "test-log", @@ -1707,7 +1707,7 @@ dependencies = [ "prost-types", "schemars", "serde", - "tendermint-proto 0.23.7 (git+https://github.com/composableFi/tendermint-rs?branch=seun-0.23.7)", + "tendermint-proto", "tonic", ] @@ -4630,35 +4630,7 @@ dependencies = [ "signature", "subtle", "subtle-encoding", - "tendermint-proto 0.23.7 (registry+https://github.com/rust-lang/crates.io-index)", - "time", - "zeroize", -] - -[[package]] -name = "tendermint" -version = "0.23.7" -source = "git+https://github.com/composableFi/tendermint-rs?branch=seun-0.23.7#cb79c164e84c3d9176b374f2b227393fe000e691" -dependencies = [ - "async-trait", - "bytes", - "ed25519", - "ed25519-dalek", - "flex-error", - "futures", - "num-traits", - "once_cell", - "prost", - "prost-types", - "serde", - "serde_bytes", - "serde_json", - "serde_repr", - "sha2 0.9.9", - "signature", - "subtle", - "subtle-encoding", - "tendermint-proto 0.23.7 (git+https://github.com/composableFi/tendermint-rs?branch=seun-0.23.7)", + "tendermint-proto", "time", "zeroize", ] @@ -4671,7 +4643,7 @@ dependencies = [ "flex-error", "serde", "serde_json", - "tendermint 0.23.7 (registry+https://github.com/rust-lang/crates.io-index)", + "tendermint", "toml", "url", ] @@ -4684,24 +4656,7 @@ dependencies = [ "derive_more", "flex-error", "serde", - "tendermint 0.23.7 (git+https://github.com/composableFi/tendermint-rs?branch=seun-0.23.7)", - "time", -] - -[[package]] -name = "tendermint-proto" -version = "0.23.7" -source = "git+https://github.com/composableFi/tendermint-rs?branch=seun-0.23.7#cb79c164e84c3d9176b374f2b227393fe000e691" -dependencies = [ - "bytes", - "flex-error", - "num-derive", - "num-traits", - "prost", - "prost-types", - "serde", - "serde_bytes", - "subtle-encoding", + "tendermint", "time", ] @@ -4743,9 +4698,9 @@ dependencies = [ "serde_bytes", "serde_json", "subtle-encoding", - "tendermint 0.23.7 (registry+https://github.com/rust-lang/crates.io-index)", + "tendermint", "tendermint-config", - "tendermint-proto 0.23.7 (registry+https://github.com/rust-lang/crates.io-index)", + "tendermint-proto", "thiserror", "time", "tokio", @@ -4766,7 +4721,7 @@ dependencies = [ "serde_json", "simple-error", "tempfile", - "tendermint 0.23.7 (registry+https://github.com/rust-lang/crates.io-index)", + "tendermint", "time", ] diff --git a/modules/Cargo.toml b/modules/Cargo.toml index 486c5695a0..e0b16cd331 100644 --- a/modules/Cargo.toml +++ b/modules/Cargo.toml @@ -83,7 +83,8 @@ branch = "seun-0.23.7" default-features = false [dependencies.tendermint-testgen] -version = "=0.23.7" +git = "https://github.com/composableFi/tendermint-rs" +branch = "seun-0.23.7" optional = true default-features = false @@ -93,14 +94,16 @@ tracing-subscriber = { version = "0.3.11", features = ["fmt", "env-filter", "jso test-log = { version = "0.2.10", features = ["trace"] } modelator = "0.4.2" sha2 = { version = "0.10.2" } -tendermint-rpc = { version = "=0.23.7", features = ["http-client", "websocket-client"] } -tendermint-testgen = { version = "=0.23.7" } # Needed for generating (synthetic) light blocks. +tendermint-rpc = { git = "https://github.com/composableFi/tendermint-rs", branch = "seun-0.23.7", features = ["http-client", "websocket-client"] } +tendermint-testgen = { git = "https://github.com/composableFi/tendermint-rs", branch = "seun-0.23.7" } # Needed for generating (synthetic) light blocks. # Beefy Light Client testing dependencies sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22"} subxt = "0.21.0" tokio = { version = "1.17.0", features = ["full"] } serde_json = "1.0.74" beefy-client = { package = "beefy-generic-client", git = "https://github.com/ComposableFi/beefy-client", branch = "master", features = ["mocks"]} +sha3 = { version = "0.10.1" } +ripemd = { version = "0.1.1" } [[test]] name = "mbt" diff --git a/modules/src/applications/transfer/denom.rs b/modules/src/applications/transfer/denom.rs index 998f731a40..d5df947e29 100644 --- a/modules/src/applications/transfer/denom.rs +++ b/modules/src/applications/transfer/denom.rs @@ -289,6 +289,10 @@ impl Amount { pub fn checked_sub(self, rhs: Self) -> Option { self.0.checked_sub(rhs.0).map(Self) } + + pub fn as_u256(&self) -> U256 { + self.0 + } } impl FromStr for Amount { diff --git a/modules/src/clients/host_functions.rs b/modules/src/clients/host_functions.rs index a50c1b9031..1c54a3326e 100644 --- a/modules/src/clients/host_functions.rs +++ b/modules/src/clients/host_functions.rs @@ -78,9 +78,10 @@ where } // implementation for tendermint functions -impl tendermint_light_client_verifier::host_functions::HostFunctionsProvider for HostFunctionsManager - where - T: HostFunctionsProvider, +impl tendermint_light_client_verifier::host_functions::HostFunctionsProvider + for HostFunctionsManager +where + T: HostFunctionsProvider, { fn sha2_256(preimage: &[u8]) -> [u8; 32] { T::sha256_digest(preimage) @@ -99,8 +100,8 @@ impl tendermint_light_client_verifier::host_functions::HostFunctionsProvider // implementation for ics23 impl ics23::HostFunctionsProvider for HostFunctionsManager - where - H: HostFunctionsProvider, +where + H: HostFunctionsProvider, { fn sha2_256(message: &[u8]) -> [u8; 32] { H::sha2_256(message) diff --git a/modules/src/clients/ics07_tendermint/client_def.rs b/modules/src/clients/ics07_tendermint/client_def.rs index 5585952403..5b953f0e1b 100644 --- a/modules/src/clients/ics07_tendermint/client_def.rs +++ b/modules/src/clients/ics07_tendermint/client_def.rs @@ -1,12 +1,12 @@ use core::convert::TryInto; use core::fmt::Debug; +use crate::clients::host_functions::{HostFunctionsManager, HostFunctionsProvider}; use ibc_proto::ibc::core::commitment::v1::MerkleProof as RawMerkleProof; use prost::Message; use tendermint_light_client_verifier::types::{TrustedBlockState, UntrustedBlockState}; use tendermint_light_client_verifier::{ProdVerifier, Verdict, Verifier}; use tendermint_proto::Protobuf; -use crate::clients::host_functions::{HostFunctionsManager, HostFunctionsProvider}; use crate::clients::ics07_tendermint::client_state::ClientState; use crate::clients::ics07_tendermint::consensus_state::ConsensusState; @@ -43,8 +43,8 @@ pub struct TendermintClient { } impl ClientDef for TendermintClient - where - H: HostFunctionsProvider +where + H: HostFunctionsProvider, { type Header = Header; type ClientState = ClientState; @@ -482,8 +482,9 @@ fn verify_membership( path: P, value: Vec, ) -> Result<(), Ics02Error> - where - H: HostFunctionsProvider, P: Into, +where + H: HostFunctionsProvider, + P: Into, { let merkle_path = apply_prefix(prefix, vec![path.into().to_string()]); let merkle_proof: MerkleProof = RawMerkleProof::try_from(proof.clone()) @@ -508,8 +509,9 @@ fn verify_non_membership( root: &CommitmentRoot, path: P, ) -> Result<(), Ics02Error> - where - H: HostFunctionsProvider, P: Into, +where + H: HostFunctionsProvider, + P: Into, { let merkle_path = apply_prefix(prefix, vec![path.into().to_string()]); let merkle_proof: MerkleProof = RawMerkleProof::try_from(proof.clone()) diff --git a/modules/src/core/ics02_client/client_def.rs b/modules/src/core/ics02_client/client_def.rs index a87776eb67..d4ae84d88c 100644 --- a/modules/src/core/ics02_client/client_def.rs +++ b/modules/src/core/ics02_client/client_def.rs @@ -218,7 +218,9 @@ pub enum AnyClient { impl AnyClient { pub fn from_client_type(client_type: ClientType) -> Self { match client_type { - ClientType::Tendermint => Self::Tendermint(TendermintClient::::default()), + ClientType::Tendermint => { + Self::Tendermint(TendermintClient::::default()) + } ClientType::Beefy => Self::Beefy(BeefyClient::::default()), ClientType::Near => Self::Near(BeefyClient::::default()), #[cfg(any(test, feature = "mocks"))] diff --git a/modules/src/core/ics23_commitment/merkle.rs b/modules/src/core/ics23_commitment/merkle.rs index 0cc01b1e83..e208ec5af7 100644 --- a/modules/src/core/ics23_commitment/merkle.rs +++ b/modules/src/core/ics23_commitment/merkle.rs @@ -1,7 +1,8 @@ -use sp_std::marker::PhantomData; use crate::prelude::*; +use sp_std::marker::PhantomData; use tendermint::merkle::proof::Proof as TendermintProof; +use crate::clients::host_functions::{HostFunctionsManager, HostFunctionsProvider}; use ibc_proto::ibc::core::commitment::v1::MerklePath; use ibc_proto::ibc::core::commitment::v1::MerkleProof as RawMerkleProof; use ibc_proto::ibc::core::commitment::v1::MerkleRoot; @@ -10,7 +11,6 @@ use ics23::{ calculate_existence_root, verify_membership, verify_non_membership, CommitmentProof, NonExistenceProof, }; -use crate::clients::host_functions::{HostFunctionsManager, HostFunctionsProvider}; use crate::core::ics23_commitment::commitment::{CommitmentPrefix, CommitmentRoot}; use crate::core::ics23_commitment::error::Error; @@ -50,7 +50,10 @@ impl From for MerkleProof { prost::Message::decode(&*encoded).unwrap() }) .collect(); - Self { proofs, _phantom: PhantomData } + Self { + proofs, + _phantom: PhantomData, + } } } @@ -71,8 +74,8 @@ impl From> for RawMerkleProof { } impl MerkleProof - where - H: HostFunctionsProvider, +where + H: HostFunctionsProvider, { pub fn verify_membership( &self, @@ -115,7 +118,13 @@ impl MerkleProof Some(Proof::Exist(existence_proof)) => { subroot = calculate_existence_root::>(existence_proof) .map_err(|_| Error::invalid_merkle_proof())?; - if !verify_membership::>(proof, spec, &subroot, key.as_bytes(), &value) { + if !verify_membership::>( + proof, + spec, + &subroot, + key.as_bytes(), + &value, + ) { return Err(Error::verification_failure()); } value = subroot.clone(); @@ -164,7 +173,12 @@ impl MerkleProof match &proof.proof { Some(Proof::Nonexist(non_existence_proof)) => { let subroot = calculate_non_existence_root::(non_existence_proof)?; - if !verify_non_membership::>(proof, spec, &subroot, key.as_bytes()) { + if !verify_non_membership::>( + proof, + spec, + &subroot, + key.as_bytes(), + ) { return Err(Error::verification_failure()); } // verify membership proofs starting from index 1 with value = subroot @@ -176,11 +190,15 @@ impl MerkleProof } // TODO move to ics23 -fn calculate_non_existence_root(proof: &NonExistenceProof) -> Result, Error> { +fn calculate_non_existence_root( + proof: &NonExistenceProof, +) -> Result, Error> { if let Some(left) = &proof.left { - calculate_existence_root::>(left).map_err(|_| Error::invalid_merkle_proof()) + calculate_existence_root::>(left) + .map_err(|_| Error::invalid_merkle_proof()) } else if let Some(right) = &proof.right { - calculate_existence_root::>(right).map_err(|_| Error::invalid_merkle_proof()) + calculate_existence_root::>(right) + .map_err(|_| Error::invalid_merkle_proof()) } else { Err(Error::invalid_merkle_proof()) } @@ -238,7 +256,9 @@ fn calculate_non_existence_root(proof: &NonExistencePr // } // } -pub fn convert_tm_to_ics_merkle_proof(tm_proof: &TendermintProof) -> Result, Error> { +pub fn convert_tm_to_ics_merkle_proof( + tm_proof: &TendermintProof, +) -> Result, Error> { let mut proofs = Vec::new(); for op in &tm_proof.ops { diff --git a/modules/src/test_utils.rs b/modules/src/test_utils.rs index e8a28ef566..6f8b61a1c4 100644 --- a/modules/src/test_utils.rs +++ b/modules/src/test_utils.rs @@ -143,6 +143,50 @@ impl HostFunctionsProvider for Crypto { fn sha256_digest(data: &[u8]) -> [u8; 32] { sp_io::hashing::sha2_256(data) } + + fn sha2_256(message: &[u8]) -> [u8; 32] { + sp_io::hashing::sha2_256(message) + } + + fn sha2_512(message: &[u8]) -> [u8; 64] { + use sha2::Digest; + let mut hasher = sha2::Sha512::new(); + hasher.update(message); + let hash = hasher.finalize(); + let mut res = [0u8; 64]; + res.copy_from_slice(&hash); + res + } + + fn sha2_512_truncated(message: &[u8]) -> [u8; 32] { + use sha2::Digest; + let mut hasher = sha2::Sha512::new(); + hasher.update(message); + let hash = hasher.finalize(); + let mut res = [0u8; 32]; + res.copy_from_slice(&hash[..32]); + res + } + + fn sha3_512(message: &[u8]) -> [u8; 64] { + use sha3::Digest; + let mut hasher = sha3::Sha3_512::new(); + hasher.update(message); + let hash = hasher.finalize(); + let mut res = [0u8; 64]; + res.copy_from_slice(&hash); + res + } + + fn ripemd160(message: &[u8]) -> [u8; 20] { + use ripemd::Digest; + let mut hasher = ripemd::Ripemd160::new(); + hasher.update(message); + let hash = hasher.finalize(); + let mut res = [0u8; 20]; + res.copy_from_slice(&hash); + res + } } impl Ics20Keeper for DummyTransferModule { From fa95c263392747d86702241f3fe6299f134540a3 Mon Sep 17 00:00:00 2001 From: Web3 Smith <31099392+Wizdave97@users.noreply.github.com> Date: Mon, 6 Jun 2022 14:42:40 +0100 Subject: [PATCH 48/96] Fix build when `mocks` is enabled (#16) fix build in when mocks is enabled --- modules/Cargo.toml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/modules/Cargo.toml b/modules/Cargo.toml index e0b16cd331..f09d992cc0 100644 --- a/modules/Cargo.toml +++ b/modules/Cargo.toml @@ -35,7 +35,7 @@ clock = ["tendermint/clock", "time/std"] # This feature grants access to development-time mocking libraries, such as `MockContext` or `MockHeader`. # Depends on the `testgen` suite for generating Tendermint light blocks. -mocks = ["tendermint-testgen", "clock", "std", "sp-io", "sp-io/std"] +mocks = ["tendermint-testgen", "clock", "std", "sp-io", "sp-io/std", "sha3", "ripemd"] [dependencies] # Proto definitions for all IBC-related interfaces, e.g., connections or channels. @@ -66,6 +66,8 @@ codec = { package = "parity-scale-codec", version = "3.0.0", default-features = sp-std = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22", default-features = false } sp-trie = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22", default-features = false } sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22", default-features = false, optional = true } +sha3 = { version = "0.10.1", optional = true } +ripemd = { version = "0.1.1", optional = true } [dependencies.tendermint] git = "https://github.com/composableFi/tendermint-rs" From 450cf67b902e586c8aa1e69a5d1e87fc05a284cf Mon Sep 17 00:00:00 2001 From: Web3 Smith <31099392+Wizdave97@users.noreply.github.com> Date: Tue, 7 Jun 2022 14:24:28 +0100 Subject: [PATCH 49/96] Create getters for some struct fields (#17) --- modules/src/applications/transfer/denom.rs | 14 ++++++++++++++ modules/src/mock/client_state.rs | 2 +- 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/modules/src/applications/transfer/denom.rs b/modules/src/applications/transfer/denom.rs index d5df947e29..74a9ad783d 100644 --- a/modules/src/applications/transfer/denom.rs +++ b/modules/src/applications/transfer/denom.rs @@ -23,6 +23,12 @@ pub type BaseCoin = Coin; #[serde(transparent)] pub struct BaseDenom(String); +impl BaseDenom { + pub fn as_str(&self) -> &str { + &self.0 + } +} + impl FromStr for BaseDenom { type Err = Error; @@ -161,6 +167,14 @@ impl PrefixedDenom { pub fn add_trace_prefix(&mut self, prefix: TracePrefix) { self.trace_path.add_prefix(prefix) } + + pub fn trace_path(&self) -> &TracePath { + &self.trace_path + } + + pub fn base_denom(&self) -> &BaseDenom { + &self.base_denom + } } /// Returns true if the denomination originally came from the sender chain and diff --git a/modules/src/mock/client_state.rs b/modules/src/mock/client_state.rs index 2601d09511..720d307866 100644 --- a/modules/src/mock/client_state.rs +++ b/modules/src/mock/client_state.rs @@ -96,7 +96,7 @@ impl ClientState for MockClientState { type UpgradeOptions = (); fn chain_id(&self) -> ChainId { - todo!() + ChainId::default() } fn client_type(&self) -> ClientType { From 23fa4e1a2043c05d797dd2eaa64ebb99d6eb79b7 Mon Sep 17 00:00:00 2001 From: David Salami Date: Thu, 9 Jun 2022 17:06:00 +0100 Subject: [PATCH 50/96] rebase --- Cargo.lock | 56 +------------------ .../src/clients/ics11_beefy/client_state.rs | 21 ++----- proto/src/prost/ibc.lightclients.beefy.v1.rs | 6 +- 3 files changed, 8 insertions(+), 75 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 31609eac53..c79c1573a4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -423,60 +423,6 @@ dependencies = [ "memchr", ] -[[package]] -name = "borsh" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15bf3650200d8bffa99015595e10f1fbd17de07abbc25bb067da79e769939bfa" -dependencies = [ - "borsh-derive", - "hashbrown 0.11.2", -] - -[[package]] -name = "borsh-derive" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6441c552f230375d18e3cc377677914d2ca2b0d36e52129fe15450a2dce46775" -dependencies = [ - "borsh-derive-internal", - "borsh-schema-derive-internal", - "proc-macro-crate 0.1.5", - "proc-macro2", - "syn", -] - -[[package]] -name = "borsh-derive-internal" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5449c28a7b352f2d1e592a8a28bf139bc71afb0764a14f3c02500935d8c44065" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "borsh-schema-derive-internal" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdbd5696d8bfa21d53d9fe39a714a18538bad11492a42d066dbbc395fb1951c0" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "bstr" -version = "0.2.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba3569f383e8f1598449f1a423e72e99569137b47740b1da11ef19af3d5c3223" -dependencies = [ - "memchr", -] - [[package]] name = "bumpalo" version = "3.9.1" @@ -3713,6 +3659,8 @@ name = "sp-arithmetic" version = "5.0.0" source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" dependencies = [ + "integer-sqrt", + "num-traits", "parity-scale-codec", "scale-info", "serde", diff --git a/modules/src/clients/ics11_beefy/client_state.rs b/modules/src/clients/ics11_beefy/client_state.rs index 7a42ea8f40..43750579b4 100644 --- a/modules/src/clients/ics11_beefy/client_state.rs +++ b/modules/src/clients/ics11_beefy/client_state.rs @@ -20,10 +20,9 @@ use crate::core::ics24_host::identifier::ChainId; use crate::timestamp::Timestamp; use crate::Height; -pub const REVISION_NUMBER: u64 = 0; #[derive(Clone, Debug, PartialEq, Eq)] pub struct ClientState { - /// The chain id which encapsulates the para id + /// The chain id pub chain_id: ChainId, /// Latest mmr root hash pub mmr_root_hash: H256, @@ -38,10 +37,6 @@ pub struct ClientState { pub authority: BeefyNextAuthoritySet, /// authorities for the next round pub next_authority_set: BeefyNextAuthoritySet, - /// Latest parachain height - pub latest_para_height: Option, - /// Parachain id - pub para_id: u32, } impl Protobuf for ClientState {} @@ -87,10 +82,6 @@ impl ClientState { }) } - pub fn latest_height(&self) -> Height { - self.latest_para_height.unwrap_or_default() - } - pub fn to_leaf_index(&self, block_number: u32) -> u32 { if self.beefy_activation_block == 0 { return block_number.saturating_sub(1); @@ -177,7 +168,7 @@ impl ClientState { /// Verify that the client is at a sufficient height and unfrozen at the given height pub fn verify_height(&self, height: Height) -> Result<(), Error> { - if (self.latest_height() as u64) < height.revision_height { + if (self.latest_beefy_height as u64) < height.revision_height { return Err(Error::insufficient_height( Height::new(0, self.latest_beefy_height.into()), height, @@ -208,7 +199,7 @@ impl crate::core::ics02_client::client_state::ClientState for ClientState { } fn latest_height(&self) -> Height { - self.latest_height() + Height::new(0, self.latest_beefy_height.into()) } fn frozen_height(&self) -> Option { @@ -219,16 +210,12 @@ impl crate::core::ics02_client::client_state::ClientState for ClientState { mut self, upgrade_height: Height, _upgrade_options: UpgradeOptions, - chain_id: ChainId, + _chain_id: ChainId, ) -> Self { self.frozen_height = None; // Upgrade the client state self.latest_beefy_height = upgrade_height.revision_height.saturated_into::(); - self.latest_para_height = None; - - self.para_id = chain_id.version().saturated_into::(); - self } diff --git a/proto/src/prost/ibc.lightclients.beefy.v1.rs b/proto/src/prost/ibc.lightclients.beefy.v1.rs index f1cb23c8ba..8fae30ad66 100644 --- a/proto/src/prost/ibc.lightclients.beefy.v1.rs +++ b/proto/src/prost/ibc.lightclients.beefy.v1.rs @@ -1,4 +1,4 @@ -/// ClientState from Tendermint tracks the current validator set, latest height, +/// ClientState from Beefy tracks the current validator set, latest height, /// and a possible frozen height. #[derive(Clone, PartialEq, ::prost::Message)] pub struct ClientState { @@ -94,9 +94,6 @@ pub struct ConsensusState { /// packet commitment root #[prost(bytes="vec", tag="2")] pub root: ::prost::alloc::vec::Vec, - /// proof of inclusion for this parachain header in the Mmr. - #[prost(message, optional, tag = "4")] - pub parachain_header: ::core::option::Option, } /// Misbehaviour is a wrapper over two conflicting Headers /// that implements Misbehaviour interface expected by ICS-02 @@ -180,6 +177,7 @@ pub struct BeefyAuthoritySet { #[prost(bytes="vec", tag="3")] pub authority_root: ::prost::alloc::vec::Vec, } +/// BeefyMmrLeaf leaf data #[derive(Clone, PartialEq, ::prost::Message)] pub struct BeefyMmrLeaf { /// leaf version From 4895a474b587a38683d538b6e4649e683337d111 Mon Sep 17 00:00:00 2001 From: David Salami Date: Thu, 9 Jun 2022 17:19:31 +0100 Subject: [PATCH 51/96] align with upstream --- .cargo/config.toml | 3 --- .../bug-fixes/ibc/2104-fix-commitment-computation.md | 4 ++-- .../bug-fixes/ibc/2114-fix-ack-verification.md | 4 ++-- .../v0.15.0/bug-fixes/ibc/2178-conn-ack-bug-fix.md | 4 ++-- Cargo.toml | 12 ++++++------ 5 files changed, 12 insertions(+), 15 deletions(-) delete mode 100644 .cargo/config.toml diff --git a/.cargo/config.toml b/.cargo/config.toml deleted file mode 100644 index 78014a91b1..0000000000 --- a/.cargo/config.toml +++ /dev/null @@ -1,3 +0,0 @@ -paths = [ - "/Users/davidsalami/Documents/open-source/beefy-generic-client" -] \ No newline at end of file diff --git a/.changelog/v0.15.0/bug-fixes/ibc/2104-fix-commitment-computation.md b/.changelog/v0.15.0/bug-fixes/ibc/2104-fix-commitment-computation.md index da0b858a75..570c1b3338 100644 --- a/.changelog/v0.15.0/bug-fixes/ibc/2104-fix-commitment-computation.md +++ b/.changelog/v0.15.0/bug-fixes/ibc/2104-fix-commitment-computation.md @@ -1,2 +1,2 @@ -- Fix packet commitment calculation to match ibc-go ([#2104](https://github.com/informalsystems/ibc- - rs/issues/2104)) +- Fix packet commitment calculation to match IBC-Go + ([#2104](https://github.com/informalsystems/ibc-rs/issues/2104)) diff --git a/.changelog/v0.15.0/bug-fixes/ibc/2114-fix-ack-verification.md b/.changelog/v0.15.0/bug-fixes/ibc/2114-fix-ack-verification.md index cbe6399a16..0987d40b6f 100644 --- a/.changelog/v0.15.0/bug-fixes/ibc/2114-fix-ack-verification.md +++ b/.changelog/v0.15.0/bug-fixes/ibc/2114-fix-ack-verification.md @@ -1,2 +1,2 @@ -- Fix incorrect acknowledgement verification ([#2114](https://github.com/informalsystems/ibc- - rs/issues/2114)) +- Fix incorrect acknowledgement verification + ([#2114](https://github.com/informalsystems/ibc-rs/issues/2114)) diff --git a/.changelog/v0.15.0/bug-fixes/ibc/2178-conn-ack-bug-fix.md b/.changelog/v0.15.0/bug-fixes/ibc/2178-conn-ack-bug-fix.md index 054fb34968..af72298e4b 100644 --- a/.changelog/v0.15.0/bug-fixes/ibc/2178-conn-ack-bug-fix.md +++ b/.changelog/v0.15.0/bug-fixes/ibc/2178-conn-ack-bug-fix.md @@ -1,2 +1,2 @@ -- fix connection id mix-up in connection acknowledgement processing - ([#2178](https://github.com/informalsystems/ibc-rs/issues/2178)) \ No newline at end of file +- Fix connection identifier mix-up in connection acknowledgement processing + ([#2178](https://github.com/informalsystems/ibc-rs/issues/2178)) diff --git a/Cargo.toml b/Cargo.toml index ec9bd9cbf0..eaf4e09dfc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -4,13 +4,13 @@ resolver = "2" members = [ "modules", - # "relayer", - # "relayer-cli", - # "relayer-rest", - # "telemetry", + "relayer", + "relayer-cli", + "relayer-rest", + "telemetry", "proto", - # "tools/integration-test", - # "tools/test-framework", + "tools/integration-test", + "tools/test-framework", ] exclude = [ From c7c73ab423445f501ceae3d1d5d74815b502744a Mon Sep 17 00:00:00 2001 From: David Salami Date: Wed, 15 Jun 2022 15:02:26 +0100 Subject: [PATCH 52/96] update tendermint dependencies --- Cargo.lock | 1199 ++++++++++++++++++++++++++++- relayer-cli/Cargo.toml | 15 +- relayer/Cargo.toml | 14 +- tools/integration-test/Cargo.toml | 8 +- tools/test-framework/Cargo.toml | 4 +- 5 files changed, 1194 insertions(+), 46 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c79c1573a4..8ef9042c1f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12,6 +12,45 @@ dependencies = [ "regex", ] +[[package]] +name = "abscissa_core" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6750843603bf31a83accd3c8177f9dbf53a7d64275688fc7371e0a4d9f8628b5" +dependencies = [ + "abscissa_derive", + "arc-swap", + "backtrace", + "canonical-path", + "clap", + "color-eyre", + "fs-err", + "once_cell", + "regex", + "secrecy", + "semver", + "serde", + "termcolor", + "toml", + "tracing", + "tracing-log", + "tracing-subscriber 0.3.11", + "wait-timeout", +] + +[[package]] +name = "abscissa_derive" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a3473aa652e90865a06b723102aaa4a54a7d9f2092dbf4582497a61d0537d3f" +dependencies = [ + "ident_case", + "proc-macro2", + "quote", + "syn", + "synstructure", +] + [[package]] name = "addr2line" version = "0.17.0" @@ -27,6 +66,12 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" +[[package]] +name = "adler32" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aae1277d39aeec15cb388266ecc24b11c80469deae6067e17a1a7aa9e5c1f234" + [[package]] name = "ahash" version = "0.7.6" @@ -47,6 +92,21 @@ dependencies = [ "memchr", ] +[[package]] +name = "alloc-no-stdlib" +version = "2.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "35ef4730490ad1c4eae5c4325b2a95f521d023e5c885853ff7aca0a6a1631db3" + +[[package]] +name = "alloc-stdlib" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "697ed7edc0f1711de49ce108c541623a0af97c6c60b2f6e2b65229847ac843c2" +dependencies = [ + "alloc-no-stdlib", +] + [[package]] name = "ansi_term" version = "0.12.1" @@ -71,6 +131,12 @@ dependencies = [ "num-traits", ] +[[package]] +name = "arc-swap" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c5d78ce20460b82d3fa150275ed9d55e21064fc7951177baacf86a145c4a4b1f" + [[package]] name = "arrayref" version = "0.3.6" @@ -98,6 +164,12 @@ version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6" +[[package]] +name = "ascii" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbf56136a5198c7b01a49e3afcbef6cf84597273d298f54432926024107b0109" + [[package]] name = "async-stream" version = "0.3.3" @@ -121,9 +193,9 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.53" +version = "0.1.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed6aa3524a2dfcf9fe180c51eae2b58738348d819517ceadf95789c51fff7600" +checksum = "96cf8829f67d2eab0b2dfa42c5d0ef737e0724e4a82b01b3e292456202b19716" dependencies = [ "proc-macro2", "quote", @@ -239,6 +311,24 @@ version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" +[[package]] +name = "base64ct" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dea908e7347a8c64e378c17e30ef880ad73e3b4498346b055c2c00ea342f3179" + +[[package]] +name = "bech32" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf9ff0bbfd639f15c74af777d81383cf53efb7c93613f6cab67c6c11e05bbf8b" + +[[package]] +name = "bech32" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c5738be7561b0eeb501ef1d5c5db3f24e01ceb55fededd9b00039aada34966ad" + [[package]] name = "beef" version = "0.5.1" @@ -293,6 +383,27 @@ dependencies = [ "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", ] +[[package]] +name = "bitcoin" +version = "0.28.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05bba324e6baf655b882df672453dbbc527bc938cadd27750ae510aaccc3a66a" +dependencies = [ + "bech32 0.8.1", + "bitcoin_hashes", + "secp256k1 0.22.1", + "serde", +] + +[[package]] +name = "bitcoin_hashes" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "006cc91e1a1d99819bc5b8214be3555c1f0611b169f527a1fdc54ed1f2b745b0" +dependencies = [ + "serde", +] + [[package]] name = "bitflags" version = "1.3.2" @@ -414,6 +525,27 @@ dependencies = [ "syn", ] +[[package]] +name = "brotli" +version = "3.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1a0b1dbcc8ae29329621f8d4f0d835787c1c38bb1401979b49d13b0b305ff68" +dependencies = [ + "alloc-no-stdlib", + "alloc-stdlib", + "brotli-decompressor", +] + +[[package]] +name = "brotli-decompressor" +version = "2.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59ad2d4653bf5ca36ae797b1f4bb4dbddb60ce49ca4aed8a2ce4829f60425b80" +dependencies = [ + "alloc-no-stdlib", + "alloc-stdlib", +] + [[package]] name = "bstr" version = "0.2.17" @@ -423,6 +555,16 @@ dependencies = [ "memchr", ] +[[package]] +name = "buf_redux" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b953a6887648bb07a535631f2bc00fbdb2a2216f135552cb3f534ed136b9c07f" +dependencies = [ + "memchr", + "safemem", +] + [[package]] name = "bumpalo" version = "3.9.1" @@ -441,6 +583,12 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" +[[package]] +name = "bytecount" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c676a478f63e9fa2dd5368a42f28bba0d6c560b775f38583c8bbaa7fcd67c9c" + [[package]] name = "byteorder" version = "1.4.3" @@ -453,6 +601,43 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4872d67bab6358e59559027aa3b9157c53d9358c51423c17554809a8858e0f8" +[[package]] +name = "camino" +version = "1.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "869119e97797867fd90f5e22af7d0bd274bd4635ebb9eb68c04f3f513ae6c412" +dependencies = [ + "serde", +] + +[[package]] +name = "canonical-path" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6e9e01327e6c86e92ec72b1c798d4a94810f147209bbe3ffab6a86954937a6f" + +[[package]] +name = "cargo-platform" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cbdb825da8a5df079a43676dbe042702f1707b1109f713a01420fbb4cc71fa27" +dependencies = [ + "serde", +] + +[[package]] +name = "cargo_metadata" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4acbb09d9ee8e23699b9634375c72795d095bf268439da88562cf9b501f181fa" +dependencies = [ + "camino", + "cargo-platform", + "semver", + "serde", + "serde_json", +] + [[package]] name = "cc" version = "1.0.73" @@ -546,6 +731,48 @@ dependencies = [ "os_str_bytes", ] +[[package]] +name = "color-eyre" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ebf286c900a6d5867aeff75cfee3192857bb7f24b547d4f0df2ed6baa812c90" +dependencies = [ + "backtrace", + "color-spantrace", + "eyre", + "indenter", + "once_cell", + "owo-colors", + "tracing-error", +] + +[[package]] +name = "color-spantrace" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ba75b3d9449ecdccb27ecbc479fdc0b87fa2dd43d2f8298f9bf0e59aacc8dce" +dependencies = [ + "once_cell", + "owo-colors", + "tracing-core", + "tracing-error", +] + +[[package]] +name = "console" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a28b32d32ca44b70c3e4acd7db1babf555fa026e385fb95f18028f88848b3c31" +dependencies = [ + "encode_unicode", + "libc", + "once_cell", + "regex", + "terminal_size", + "unicode-width", + "winapi", +] + [[package]] name = "const-oid" version = "0.7.1" @@ -558,6 +785,17 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" +[[package]] +name = "contracts" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1d1429e3bd78171c65aa010eabcdf8f863ba3254728dbfb0ad4b1545beac15c" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "core-foundation" version = "0.9.3" @@ -592,6 +830,16 @@ dependencies = [ "cfg-if 1.0.0", ] +[[package]] +name = "crossbeam-channel" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b153fe7cbef478c567df0f972e02e6d736db11affe43dfc9c56a9374d1adfb87" +dependencies = [ + "crossbeam-utils 0.7.2", + "maybe-uninit", +] + [[package]] name = "crossbeam-channel" version = "0.5.4" @@ -599,7 +847,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5aaa7bd5fb665c6864b5f963dd9097905c54125909c7aa94c9e18507cdbe6c53" dependencies = [ "cfg-if 1.0.0", - "crossbeam-utils", + "crossbeam-utils 0.8.8", ] [[package]] @@ -609,8 +857,23 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6455c0ca19f0d2fbf751b908d5c55c1f5cbc65e03c4225427254b46890bdde1e" dependencies = [ "cfg-if 1.0.0", - "crossbeam-epoch", - "crossbeam-utils", + "crossbeam-epoch 0.9.8", + "crossbeam-utils 0.8.8", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "058ed274caafc1f60c4997b5fc07bf7dc7cca454af7c6e81edffe5f33f70dace" +dependencies = [ + "autocfg", + "cfg-if 0.1.10", + "crossbeam-utils 0.7.2", + "lazy_static", + "maybe-uninit", + "memoffset 0.5.6", + "scopeguard", ] [[package]] @@ -621,12 +884,23 @@ checksum = "1145cf131a2c6ba0615079ab6a638f7e1973ac9c2634fcbeaaad6114246efe8c" dependencies = [ "autocfg", "cfg-if 1.0.0", - "crossbeam-utils", + "crossbeam-utils 0.8.8", "lazy_static", - "memoffset", + "memoffset 0.6.5", "scopeguard", ] +[[package]] +name = "crossbeam-utils" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8" +dependencies = [ + "autocfg", + "cfg-if 0.1.10", + "lazy_static", +] + [[package]] name = "crossbeam-utils" version = "0.8.8" @@ -755,6 +1029,26 @@ dependencies = [ "syn", ] +[[package]] +name = "dashmap" +version = "4.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e77a43b28d0668df09411cb0bc9a8c2adc40f9a048afe863e05fd43251e8e39c" +dependencies = [ + "cfg-if 1.0.0", + "num_cpus", +] + +[[package]] +name = "deflate" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f95bf05dffba6e6cce8dfbb30def788154949ccd9aed761b472119c21e01c70" +dependencies = [ + "adler32", + "gzip-header", +] + [[package]] name = "der" version = "0.5.1" @@ -786,6 +1080,17 @@ dependencies = [ "syn", ] +[[package]] +name = "dialoguer" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8c8ae48e400addc32a8710c8d62d55cb84249a7d58ac4cd959daecfbaddc545" +dependencies = [ + "console", + "tempfile", + "zeroize", +] + [[package]] name = "digest" version = "0.8.1" @@ -824,6 +1129,16 @@ dependencies = [ "dirs-sys", ] +[[package]] +name = "dirs-next" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b98cf8ebf19c3d1b223e151f99a4f9f0690dca41414773390fc824184ac833e1" +dependencies = [ + "cfg-if 1.0.0", + "dirs-sys-next", +] + [[package]] name = "dirs-sys" version = "0.3.7" @@ -835,6 +1150,17 @@ dependencies = [ "winapi", ] +[[package]] +name = "dirs-sys-next" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ebda144c4fe02d1f7ea1a7d9641b6fc6b580adcfa024ae48797ecdeb6825b4d" +dependencies = [ + "libc", + "redox_users", + "winapi", +] + [[package]] name = "downcast-rs" version = "1.2.0" @@ -927,6 +1253,12 @@ dependencies = [ "zeroize", ] +[[package]] +name = "encode_unicode" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" + [[package]] name = "env_logger" version = "0.9.0" @@ -946,6 +1278,15 @@ version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68b91989ae21441195d7d9b9993a2f9295c7e1a8c96255d8b729accddc124797" +[[package]] +name = "error-chain" +version = "0.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d2f06b9cac1506ece98fe3231e3cc9c4410ec3d5b1f24ae1c8946f0742cdefc" +dependencies = [ + "version_check", +] + [[package]] name = "eyre" version = "0.6.8" @@ -981,6 +1322,18 @@ dependencies = [ "subtle", ] +[[package]] +name = "filetime" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0408e2626025178a6a7f7ffc05a25bc47103229f19c113755de7bf63816290c" +dependencies = [ + "cfg-if 1.0.0", + "libc", + "redox_syscall", + "winapi", +] + [[package]] name = "fixed-hash" version = "0.7.0" @@ -1011,6 +1364,7 @@ version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c606d892c9de11507fa0dcffc116434f94e105d0bbdc4e405b61519464c49d7b" dependencies = [ + "anyhow", "eyre", "paste", ] @@ -1146,6 +1500,12 @@ dependencies = [ "sp-version", ] +[[package]] +name = "fs-err" +version = "2.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5bd79fa345a495d3ae89fb7165fec01c0e72f41821d642dda363a1e97975652e" + [[package]] name = "funty" version = "2.0.0" @@ -1299,6 +1659,12 @@ version = "0.26.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78cc372d058dcf6d5ecd98510e7fbc9e5aec4d21de70f65fea8fecebcd881bd4" +[[package]] +name = "glob" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" + [[package]] name = "globset" version = "0.4.8" @@ -1343,6 +1709,15 @@ dependencies = [ "syn", ] +[[package]] +name = "gzip-header" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0131feb3d3bb2a5a238d8a4d09f6353b7ebfdc52e77bccbf4ea6eaa751dde639" +dependencies = [ + "crc32fast", +] + [[package]] name = "h2" version = "0.3.13" @@ -1363,7 +1738,13 @@ dependencies = [ ] [[package]] -name = "hash-db" +name = "half" +version = "1.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7" + +[[package]] +name = "hash-db" version = "0.15.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d23bd4e7b5eda0d0f3a307e8b381fdc8ba9000f26fbe912250c0a4cc3956364a" @@ -1395,6 +1776,15 @@ dependencies = [ "ahash", ] +[[package]] +name = "hdpath" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dafb09e5d85df264339ad786a147d9de1da13687a3697c52244297e5e7c32d9c" +dependencies = [ + "byteorder", +] + [[package]] name = "headers" version = "0.3.7" @@ -1524,6 +1914,16 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" +[[package]] +name = "humantime-serde" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57a3db5ea5923d99402c94e9feb261dc5ee9b4efa158b0315f788cf549cc200c" +dependencies = [ + "humantime", + "serde", +] + [[package]] name = "hyper" version = "0.14.18" @@ -1632,7 +2032,7 @@ dependencies = [ "subxt", "tendermint", "tendermint-light-client-verifier", - "tendermint-proto", + "tendermint-proto 0.23.7 (git+https://github.com/composableFi/tendermint-rs?branch=seun-0.23.7)", "tendermint-rpc", "tendermint-testgen", "test-log", @@ -1643,6 +2043,23 @@ dependencies = [ "uint", ] +[[package]] +name = "ibc-integration-test" +version = "0.15.0" +dependencies = [ + "ibc", + "ibc-proto", + "ibc-relayer", + "ibc-relayer-cli", + "ibc-test-framework", + "serde", + "serde_json", + "tempfile", + "tendermint", + "tendermint-rpc", + "time", +] + [[package]] name = "ibc-proto" version = "0.18.0" @@ -1653,8 +2070,172 @@ dependencies = [ "prost-types", "schemars", "serde", - "tendermint-proto", + "tendermint-proto 0.23.7 (git+https://github.com/composableFi/tendermint-rs?branch=seun-0.23.7)", + "tonic", +] + +[[package]] +name = "ibc-relayer" +version = "0.15.0" +dependencies = [ + "anyhow", + "async-stream", + "bech32 0.9.0", + "bitcoin", + "bytes", + "crossbeam-channel 0.5.4", + "dirs-next", + "env_logger", + "flex-error", + "futures", + "hdpath", + "hex", + "http", + "humantime", + "humantime-serde", + "ibc", + "ibc-proto", + "ibc-telemetry", + "itertools", + "k256", + "moka", + "nanoid", + "num-bigint 0.4.3", + "num-rational 0.4.0", + "prost", + "prost-types", + "regex", + "retry", + "ripemd160", + "semver", + "serde", + "serde_derive", + "serde_json", + "serial_test", + "sha2 0.10.2", + "signature", + "subtle-encoding", + "tendermint", + "tendermint-light-client", + "tendermint-light-client-verifier", + "tendermint-proto 0.23.7 (registry+https://github.com/rust-lang/crates.io-index)", + "tendermint-rpc", + "tendermint-testgen", + "test-log", + "thiserror", + "tiny-bip39", + "tiny-keccak", + "tokio", + "toml", "tonic", + "tracing", + "tracing-subscriber 0.3.11", + "uuid 1.1.2", +] + +[[package]] +name = "ibc-relayer-cli" +version = "0.15.0" +dependencies = [ + "abscissa_core", + "atty", + "clap", + "clap_complete", + "color-eyre", + "console", + "crossbeam-channel 0.5.4", + "dialoguer", + "dirs-next", + "eyre", + "flex-error", + "futures", + "hex", + "humantime", + "ibc", + "ibc-proto", + "ibc-relayer", + "ibc-relayer-rest", + "ibc-telemetry", + "itertools", + "once_cell", + "oneline-eyre", + "regex", + "serde", + "serde_derive", + "serde_json", + "signal-hook", + "subtle-encoding", + "tendermint", + "tendermint-light-client", + "tendermint-light-client-verifier", + "tendermint-proto 0.23.7 (git+https://github.com/composableFi/tendermint-rs?branch=seun-0.23.7)", + "tendermint-rpc", + "tokio", + "toml", + "tracing", + "tracing-subscriber 0.3.11", +] + +[[package]] +name = "ibc-relayer-rest" +version = "0.15.0" +dependencies = [ + "crossbeam-channel 0.5.4", + "ibc", + "ibc-relayer", + "rouille", + "serde", + "serde_json", + "toml", + "tracing", + "ureq", +] + +[[package]] +name = "ibc-telemetry" +version = "0.15.0" +dependencies = [ + "crossbeam-channel 0.5.4", + "ibc", + "moka", + "once_cell", + "opentelemetry", + "opentelemetry-prometheus", + "prometheus", + "rouille", + "uuid 1.1.2", +] + +[[package]] +name = "ibc-test-framework" +version = "0.15.0" +dependencies = [ + "async-trait", + "color-eyre", + "crossbeam-channel 0.5.4", + "env_logger", + "eyre", + "flex-error", + "hex", + "http", + "ibc", + "ibc-proto", + "ibc-relayer", + "ibc-relayer-cli", + "itertools", + "rand 0.8.5", + "semver", + "serde", + "serde_json", + "serde_yaml", + "sha2 0.10.2", + "subtle-encoding", + "tendermint", + "tendermint-rpc", + "tokio", + "toml", + "tracing", + "tracing-subscriber 0.3.11", ] [[package]] @@ -1967,6 +2548,7 @@ dependencies = [ "ecdsa", "elliptic-curve", "sec1", + "sha2 0.9.9", ] [[package]] @@ -2051,6 +2633,12 @@ dependencies = [ "libsecp256k1-core", ] +[[package]] +name = "linked-hash-map" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" + [[package]] name = "linregress" version = "0.4.4" @@ -2089,6 +2677,15 @@ dependencies = [ "hashbrown 0.11.2", ] +[[package]] +name = "mach" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b823e83b2affd8f40a9ee8c29dbc56404c1e34cd2710921f2801e2cf29527afa" +dependencies = [ + "libc", +] + [[package]] name = "matchers" version = "0.0.1" @@ -2128,12 +2725,27 @@ dependencies = [ "rawpointer", ] +[[package]] +name = "maybe-uninit" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" + [[package]] name = "memchr" version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" +[[package]] +name = "memoffset" +version = "0.5.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "043175f069eda7b85febe4a74abbaeff828d9f8b448515d3151a14a3542811aa" +dependencies = [ + "autocfg", +] + [[package]] name = "memoffset" version = "0.6.5" @@ -2184,6 +2796,16 @@ version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" +[[package]] +name = "mime_guess" +version = "2.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4192263c238a5f0d0c6bfd21f336a313a4ce1c450542449ca191bb657b4642ef" +dependencies = [ + "mime", + "unicase", +] + [[package]] name = "minimal-lexical" version = "0.2.1" @@ -2236,6 +2858,46 @@ dependencies = [ "ureq", ] +[[package]] +name = "moka" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df72b50274c0988d9f4a6e808e06d9d926f265db6f8bbda1576bcaa658e72763" +dependencies = [ + "crossbeam-channel 0.5.4", + "crossbeam-epoch 0.8.2", + "crossbeam-utils 0.8.8", + "num_cpus", + "once_cell", + "parking_lot", + "quanta", + "scheduled-thread-pool", + "skeptic", + "smallvec", + "tagptr", + "thiserror", + "triomphe", + "uuid 0.8.2", +] + +[[package]] +name = "multipart" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00dec633863867f29cb39df64a397cdf4a6354708ddd7759f70c7fb51c5f9182" +dependencies = [ + "buf_redux", + "httparse", + "log", + "mime", + "mime_guess", + "quick-error", + "rand 0.8.5", + "safemem", + "tempfile", + "twoway", +] + [[package]] name = "nalgebra" version = "0.27.1" @@ -2265,6 +2927,15 @@ dependencies = [ "syn", ] +[[package]] +name = "nanoid" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ffa00dec017b5b1a8b7cf5e2c008bfda1aa7e0697ac1508b491fdf2622fb4d8" +dependencies = [ + "rand 0.8.5", +] + [[package]] name = "nodrop" version = "0.1.14" @@ -2292,6 +2963,18 @@ dependencies = [ "num-traits", ] +[[package]] +name = "num-bigint" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f93ab6289c7b344a8a9f60f88d80aa20032336fe78da341afc91c8a2341fc75f" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", + "serde", +] + [[package]] name = "num-complex" version = "0.4.1" @@ -2339,7 +3022,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c000134b5dbf44adc5cb772486d335293351644b801551abe8f75c84cfa4aef" dependencies = [ "autocfg", - "num-bigint", + "num-bigint 0.2.6", "num-integer", "num-traits", ] @@ -2351,8 +3034,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d41702bd167c2df5520b384281bc111a4b5efcf7fbc4c9c222c815b07e0a6a6a" dependencies = [ "autocfg", + "num-bigint 0.4.3", "num-integer", "num-traits", + "serde", ] [[package]] @@ -2399,6 +3084,15 @@ version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7709cef83f0c1f58f666e746a08b21e0085f7440fa6a29cc194d68aac97a4225" +[[package]] +name = "oneline-eyre" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "862f17a1e689c0ce8ca158ea48e776c5101c5d14fdfbed3e01c15f89604c3097" +dependencies = [ + "eyre", +] + [[package]] name = "opaque-debug" version = "0.2.3" @@ -2417,12 +3111,50 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" +[[package]] +name = "opentelemetry" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6105e89802af13fdf48c49d7646d3b533a70e536d818aae7e78ba0433d01acb8" +dependencies = [ + "async-trait", + "crossbeam-channel 0.5.4", + "dashmap", + "fnv", + "futures-channel", + "futures-executor", + "futures-util", + "js-sys", + "lazy_static", + "percent-encoding", + "pin-project", + "rand 0.8.5", + "thiserror", +] + +[[package]] +name = "opentelemetry-prometheus" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9328977e479cebe12ce0d3fcecdaea4721d234895a9440c5b5dfd113f0594ac6" +dependencies = [ + "opentelemetry", + "prometheus", + "protobuf", +] + [[package]] name = "os_str_bytes" version = "6.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "029d8d0b2f198229de29dca79676f2738ff952edf3fde542eb8bf94d8c21b435" +[[package]] +name = "owo-colors" +version = "3.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "decf7381921fea4dcb2549c5667eda59b3ec297ab7e2b5fc33eac69d2e7da87b" + [[package]] name = "pallet-beefy" version = "4.0.0-dev" @@ -2703,6 +3435,17 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" +[[package]] +name = "pkcs8" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7cabda3fb821068a9a4fab19a683eac3af12edf0f34b94a8be53c4972b8149d0" +dependencies = [ + "der", + "spki", + "zeroize", +] + [[package]] name = "ppv-lite86" version = "0.2.16" @@ -2774,6 +3517,21 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "prometheus" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cface98dfa6d645ea4c789839f176e4b072265d085bfcc48eaa8d137f58d3c39" +dependencies = [ + "cfg-if 1.0.0", + "fnv", + "lazy_static", + "memchr", + "parking_lot", + "protobuf", + "thiserror", +] + [[package]] name = "prost" version = "0.10.3" @@ -2798,14 +3556,53 @@ dependencies = [ ] [[package]] -name = "prost-types" -version = "0.10.1" +name = "prost-types" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d0a014229361011dc8e69c8a1ec6c2e8d0f2af7c91e3ea3f5b2170298461e68" +dependencies = [ + "bytes", + "prost", +] + +[[package]] +name = "protobuf" +version = "2.27.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf7e6d18738ecd0902d30d1ad232c9125985a3422929b16c65517b38adc14f96" + +[[package]] +name = "pulldown-cmark" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34f197a544b0c9ab3ae46c359a7ec9cbbb5c7bf97054266fecb7ead794a181d6" +dependencies = [ + "bitflags", + "memchr", + "unicase", +] + +[[package]] +name = "quanta" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bafd74c340a0a7e79415981ede3460df16b530fd071541901a57416eea950b17" +dependencies = [ + "crossbeam-utils 0.8.8", + "libc", + "mach", + "once_cell", + "raw-cpuid", + "wasi 0.10.2+wasi-snapshot-preview1", + "web-sys", + "winapi", +] + +[[package]] +name = "quick-error" +version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d0a014229361011dc8e69c8a1ec6c2e8d0f2af7c91e3ea3f5b2170298461e68" -dependencies = [ - "bytes", - "prost", -] +checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quote" @@ -2913,6 +3710,15 @@ dependencies = [ "rand_core 0.5.1", ] +[[package]] +name = "raw-cpuid" +version = "10.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "738bc47119e3eeccc7e94c4a506901aea5e7b4944ecd0829cbebf4af04ceda12" +dependencies = [ + "bitflags", +] + [[package]] name = "rawpointer" version = "0.2.1" @@ -2937,9 +3743,9 @@ version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "258bcdb5ac6dad48491bb2992db6b7cf74878b0384908af124823d118c99683f" dependencies = [ - "crossbeam-channel", + "crossbeam-channel 0.5.4", "crossbeam-deque", - "crossbeam-utils", + "crossbeam-utils 0.8.8", "num_cpus", ] @@ -3018,6 +3824,12 @@ dependencies = [ "winapi", ] +[[package]] +name = "retry" +version = "1.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac95c60a949a63fd2822f4964939662d8f2c16c4fa0624fd954bc6e703b9a3f6" + [[package]] name = "rfc6979" version = "0.1.0" @@ -3053,6 +3865,42 @@ dependencies = [ "digest 0.10.3", ] +[[package]] +name = "ripemd160" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2eca4ecc81b7f313189bf73ce724400a07da2a6dac19588b03c8bd76a2dcc251" +dependencies = [ + "block-buffer 0.9.0", + "digest 0.9.0", + "opaque-debug 0.3.0", +] + +[[package]] +name = "rouille" +version = "3.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "18b2380c42510ef4a28b5f228a174c801e0dec590103e215e60812e2e2f34d05" +dependencies = [ + "base64", + "brotli", + "chrono", + "deflate", + "filetime", + "multipart", + "num_cpus", + "percent-encoding", + "rand 0.8.5", + "serde", + "serde_derive", + "serde_json", + "sha1", + "threadpool", + "time", + "tiny_http", + "url", +] + [[package]] name = "rs_merkle" version = "1.2.0" @@ -3139,6 +3987,12 @@ dependencies = [ "base64", ] +[[package]] +name = "rustversion" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2cc38e8fa666e2de3c4aba7edeb5ffc5246c1c2ed0e3d17e560aeeba736b23f" + [[package]] name = "ryu" version = "1.0.10" @@ -3192,6 +4046,12 @@ dependencies = [ "safe-regex-compiler", ] +[[package]] +name = "safemem" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef703b7cb59335eae2eb93ceb664c0eb7ea6bf567079d843e09420219668e072" + [[package]] name = "same-file" version = "1.0.6" @@ -3237,6 +4097,15 @@ dependencies = [ "windows-sys", ] +[[package]] +name = "scheduled-thread-pool" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "977a7519bff143a44f842fd07e80ad1329295bd71686457f18e496736f4bf9bf" +dependencies = [ + "parking_lot", +] + [[package]] name = "schemars" version = "0.8.10" @@ -3313,6 +4182,7 @@ checksum = "08da66b8b0965a5555b6bd6639e68ccba85e1e2506f5fbb089e93f8a04e1a2d1" dependencies = [ "der", "generic-array 0.14.5", + "pkcs8", "subtle", "zeroize", ] @@ -3323,7 +4193,17 @@ version = "0.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c42e6f1735c5f00f51e43e28d6634141f2bcad10931b2609ddd74a86d751260" dependencies = [ - "secp256k1-sys", + "secp256k1-sys 0.4.2", +] + +[[package]] +name = "secp256k1" +version = "0.22.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26947345339603ae8395f68e2f3d85a6b0a8ddfe6315818e80b8504415099db0" +dependencies = [ + "secp256k1-sys 0.5.2", + "serde", ] [[package]] @@ -3335,12 +4215,22 @@ dependencies = [ "cc", ] +[[package]] +name = "secp256k1-sys" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "152e20a0fd0519390fc43ab404663af8a0b794273d2a91d60ad4a39f13ffe110" +dependencies = [ + "cc", +] + [[package]] name = "secrecy" version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9bd1c54ea06cfd2f6b63219704de0b9b4f72dcc2b8fdef820be6cd799780e91e" dependencies = [ + "serde", "zeroize", ] @@ -3367,6 +4257,15 @@ dependencies = [ "libc", ] +[[package]] +name = "semver" +version = "1.0.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a41d061efea015927ac527063765e73601444cdc344ba855bc7bd44578b25e1c" +dependencies = [ + "serde", +] + [[package]] name = "serde" version = "1.0.137" @@ -3385,6 +4284,16 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_cbor" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2bef2ebfde456fb76bbcf9f59315333decc4fda0b2b44b420243c11e0f5ec1f5" +dependencies = [ + "half", + "serde", +] + [[package]] name = "serde_derive" version = "1.0.137" @@ -3429,6 +4338,43 @@ dependencies = [ "syn", ] +[[package]] +name = "serde_yaml" +version = "0.8.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "707d15895415db6628332b737c838b88c598522e4dc70647e59b72312924aebc" +dependencies = [ + "indexmap", + "ryu", + "serde", + "yaml-rust", +] + +[[package]] +name = "serial_test" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d19dbfb999a147cedbfe82f042eb9555f5b0fa4ef95ee4570b74349103d9c9f4" +dependencies = [ + "lazy_static", + "log", + "parking_lot", + "serial_test_derive", +] + +[[package]] +name = "serial_test_derive" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb9e2050b2be1d681f8f1c1a528bcfe4e00afa2d8995f713974f5333288659f2" +dependencies = [ + "proc-macro-error", + "proc-macro2", + "quote", + "rustversion", + "syn", +] + [[package]] name = "sha-1" version = "0.9.8" @@ -3453,6 +4399,21 @@ dependencies = [ "digest 0.10.3", ] +[[package]] +name = "sha1" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1da05c97445caa12d05e848c4a4fcbbea29e748ac28f7e80e9b010392063770" +dependencies = [ + "sha1_smol", +] + +[[package]] +name = "sha1_smol" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae1a47186c03a32177042e55dbc5fd5aee900b8e0069a8d70fba96a9375cd012" + [[package]] name = "sha2" version = "0.8.2" @@ -3508,6 +4469,16 @@ dependencies = [ "lazy_static", ] +[[package]] +name = "signal-hook" +version = "0.3.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a253b5e89e2698464fc26b545c9edceb338e18a89effeeecfea192c3025be29d" +dependencies = [ + "libc", + "signal-hook-registry", +] + [[package]] name = "signal-hook-registry" version = "1.4.0" @@ -3545,6 +4516,21 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cc47a29ce97772ca5c927f75bac34866b16d64e07f330c3248e2d7226623901b" +[[package]] +name = "skeptic" +version = "0.13.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16d23b015676c90a0f01c197bfdc786c20342c73a0afdda9025adb0bc42940a8" +dependencies = [ + "bytecount", + "cargo_metadata", + "error-chain", + "glob", + "pulldown-cmark", + "tempfile", + "walkdir", +] + [[package]] name = "slab" version = "0.4.6" @@ -3736,7 +4722,7 @@ dependencies = [ "regex", "scale-info", "schnorrkel", - "secp256k1", + "secp256k1 0.21.3", "secrecy", "serde", "sp-core-hashing 4.0.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -3782,7 +4768,7 @@ dependencies = [ "regex", "scale-info", "schnorrkel", - "secp256k1", + "secp256k1 0.21.3", "secrecy", "serde", "sp-core-hashing 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", @@ -3917,7 +4903,7 @@ dependencies = [ "log", "parity-scale-codec", "parking_lot", - "secp256k1", + "secp256k1 0.21.3", "sp-core 6.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "sp-externalities 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)", "sp-keystore 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -3942,7 +4928,7 @@ dependencies = [ "log", "parity-scale-codec", "parking_lot", - "secp256k1", + "secp256k1 0.21.3", "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", "sp-externalities 0.12.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", "sp-keystore 0.12.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", @@ -4371,6 +5357,16 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" +[[package]] +name = "spki" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44d01ac02a6ccf3e07db148d2be087da624fea0221a16152ed01f0496a6b0a27" +dependencies = [ + "base64ct", + "der", +] + [[package]] name = "ss58-registry" version = "1.18.0" @@ -4508,9 +5504,9 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.95" +version = "1.0.96" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbaf6116ab8924f39d52792136fb74fd60a80194cf1b1c6ffa6453eef1c3f942" +checksum = "0748dd251e24453cb8717f0354206b91557e4ec8703673a4b30208f2abaf1ebf" dependencies = [ "proc-macro2", "quote", @@ -4535,6 +5531,12 @@ dependencies = [ "unicode-xid", ] +[[package]] +name = "tagptr" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b2093cf4c8eb1e67749a6762251bc9cd836b6fc171623bd0a9d324d37af2417" + [[package]] name = "tap" version = "1.0.1" @@ -4566,10 +5568,12 @@ dependencies = [ "ed25519-dalek", "flex-error", "futures", + "k256", "num-traits", "once_cell", "prost", "prost-types", + "ripemd160", "serde", "serde_bytes", "serde_json", @@ -4578,7 +5582,7 @@ dependencies = [ "signature", "subtle", "subtle-encoding", - "tendermint-proto", + "tendermint-proto 0.23.7 (git+https://github.com/composableFi/tendermint-rs?branch=seun-0.23.7)", "time", "zeroize", ] @@ -4596,6 +5600,27 @@ dependencies = [ "url", ] +[[package]] +name = "tendermint-light-client" +version = "0.23.7" +source = "git+https://github.com/composableFi/tendermint-rs?branch=seun-0.23.7#cb79c164e84c3d9176b374f2b227393fe000e691" +dependencies = [ + "contracts", + "crossbeam-channel 0.4.4", + "derive_more", + "flex-error", + "futures", + "serde", + "serde_cbor", + "serde_derive", + "static_assertions", + "tendermint", + "tendermint-light-client-verifier", + "tendermint-rpc", + "time", + "tokio", +] + [[package]] name = "tendermint-light-client-verifier" version = "0.23.7" @@ -4608,6 +5633,24 @@ dependencies = [ "time", ] +[[package]] +name = "tendermint-proto" +version = "0.23.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b71f925d74903f4abbdc4af0110635a307b3cb05b175fdff4a7247c14a4d0874" +dependencies = [ + "bytes", + "flex-error", + "num-derive", + "num-traits", + "prost", + "prost-types", + "serde", + "serde_bytes", + "subtle-encoding", + "time", +] + [[package]] name = "tendermint-proto" version = "0.23.7" @@ -4648,13 +5691,13 @@ dependencies = [ "subtle-encoding", "tendermint", "tendermint-config", - "tendermint-proto", + "tendermint-proto 0.23.7 (git+https://github.com/composableFi/tendermint-rs?branch=seun-0.23.7)", "thiserror", "time", "tokio", "tracing", "url", - "uuid", + "uuid 0.8.2", "walkdir", ] @@ -4682,6 +5725,16 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "terminal_size" +version = "0.1.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "633c1a546cee861a1a6d0dc69ebeca693bf4296661ba7852b9d21d159e0506df" +dependencies = [ + "libc", + "winapi", +] + [[package]] name = "test-log" version = "0.2.10" @@ -4728,6 +5781,15 @@ dependencies = [ "once_cell", ] +[[package]] +name = "threadpool" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d050e60b33d41c19108b32cea32164033a9013fe3b46cbd4457559bfbf77afaa" +dependencies = [ + "num_cpus", +] + [[package]] name = "time" version = "0.3.9" @@ -4773,6 +5835,19 @@ dependencies = [ "crunchy", ] +[[package]] +name = "tiny_http" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ce51b50006056f590c9b7c3808c3bd70f0d1101666629713866c227d6e58d39" +dependencies = [ + "ascii", + "chrono", + "chunked_transfer", + "log", + "url", +] + [[package]] name = "tinyvec" version = "1.6.0" @@ -4908,7 +5983,10 @@ dependencies = [ "pin-project", "prost", "prost-derive", + "rustls-native-certs 0.6.2", + "rustls-pemfile", "tokio", + "tokio-rustls 0.23.4", "tokio-stream", "tokio-util", "tower", @@ -5003,6 +6081,16 @@ dependencies = [ "valuable", ] +[[package]] +name = "tracing-error" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d686ec1c0f384b1277f097b2f279a2ecc11afe8c133c1aabf036a27cb4cd206e" +dependencies = [ + "tracing", + "tracing-subscriber 0.3.11", +] + [[package]] name = "tracing-futures" version = "0.2.5" @@ -5099,6 +6187,12 @@ dependencies = [ "hash-db", ] +[[package]] +name = "triomphe" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eda0abf5a9b5ad4a5ac1393956ae03fb57033749d3983e2cac9afbfd5ae04ec2" + [[package]] name = "try-lock" version = "0.2.3" @@ -5130,6 +6224,15 @@ dependencies = [ "utf-8", ] +[[package]] +name = "twoway" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59b11b2b5241ba34be09c3cc85a36e56e48f9888862e19cedf23336d35316ed1" +dependencies = [ + "memchr", +] + [[package]] name = "twox-hash" version = "1.6.3" @@ -5190,6 +6293,12 @@ dependencies = [ "tinyvec", ] +[[package]] +name = "unicode-width" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973" + [[package]] name = "unicode-xid" version = "0.2.3" @@ -5242,6 +6351,18 @@ name = "uuid" version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" +dependencies = [ + "getrandom 0.2.6", +] + +[[package]] +name = "uuid" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd6469f4314d5f1ffec476e05f17cc9a78bc7a27a6a857842170bdf8d6f98d2f" +dependencies = [ + "getrandom 0.2.6", +] [[package]] name = "valuable" @@ -5255,6 +6376,15 @@ version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" +[[package]] +name = "wait-timeout" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f200f5b12eb75f8c1ed65abd4b2db8a6e1b138a20de009dacee265a2498f3f6" +dependencies = [ + "libc", +] + [[package]] name = "walkdir" version = "2.3.2" @@ -5503,6 +6633,15 @@ dependencies = [ "tap", ] +[[package]] +name = "yaml-rust" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85" +dependencies = [ + "linked-hash-map", +] + [[package]] name = "zeroize" version = "1.5.5" diff --git a/relayer-cli/Cargo.toml b/relayer-cli/Cargo.toml index 0e61126001..573470307b 100644 --- a/relayer-cli/Cargo.toml +++ b/relayer-cli/Cargo.toml @@ -58,22 +58,27 @@ dialoguer = "0.10.1" console = "0.15.0" [dependencies.tendermint-proto] -version = "=0.23.7" +git = "https://github.com/composableFi/tendermint-rs" +branch = "seun-0.23.7" [dependencies.tendermint] -version = "=0.23.7" +git = "https://github.com/composableFi/tendermint-rs" +branch = "seun-0.23.7" features = ["secp256k1"] [dependencies.tendermint-rpc] -version = "=0.23.7" +git = "https://github.com/composableFi/tendermint-rs" +branch = "seun-0.23.7" features = ["http-client", "websocket-client"] [dependencies.tendermint-light-client] -version = "=0.23.7" +git = "https://github.com/composableFi/tendermint-rs" +branch = "seun-0.23.7" features = ["unstable"] [dependencies.tendermint-light-client-verifier] -version = "=0.23.7" +git = "https://github.com/composableFi/tendermint-rs" +branch = "seun-0.23.7" [dependencies.abscissa_core] version = "=0.6.0" diff --git a/relayer/Cargo.toml b/relayer/Cargo.toml index 27ad02be9b..c8b0f24569 100644 --- a/relayer/Cargo.toml +++ b/relayer/Cargo.toml @@ -73,20 +73,24 @@ version = "0.4.0" features = ["num-bigint", "serde"] [dependencies.tendermint] -version = "=0.23.7" +git = "https://github.com/composableFi/tendermint-rs" +branch = "seun-0.23.7" features = ["secp256k1"] [dependencies.tendermint-rpc] -version = "=0.23.7" +git = "https://github.com/composableFi/tendermint-rs" +branch = "seun-0.23.7" features = ["http-client", "websocket-client"] [dependencies.tendermint-light-client] -version = "=0.23.7" +git = "https://github.com/composableFi/tendermint-rs" +branch = "seun-0.23.7" default-features = false features = ["rpc-client", "secp256k1", "unstable"] [dependencies.tendermint-light-client-verifier] -version = "=0.23.7" +git = "https://github.com/composableFi/tendermint-rs" +branch = "seun-0.23.7" default-features = false [dependencies.tendermint-proto] @@ -100,4 +104,4 @@ tracing-subscriber = { version = "0.3.11", features = ["fmt", "env-filter", "jso test-log = { version = "0.2.10", features = ["trace"] } # Needed for generating (synthetic) light blocks. -tendermint-testgen = { version = "=0.23.7" } +tendermint-testgen = { git = "https://github.com/composableFi/tendermint-rs", branch = "seun-0.23.7" } diff --git a/tools/integration-test/Cargo.toml b/tools/integration-test/Cargo.toml index bf00cb6a80..1c6de23908 100644 --- a/tools/integration-test/Cargo.toml +++ b/tools/integration-test/Cargo.toml @@ -19,11 +19,11 @@ ibc-relayer = { path = "../../relayer" } ibc-relayer-cli = { path = "../../relayer-cli" } ibc-proto = { path = "../../proto" } ibc-test-framework = { path = "../test-framework" } -tendermint = { version = "=0.23.7" } -tendermint-rpc = { version = "=0.23.7", features = ["http-client", "websocket-client"] } +tendermint = { git = "https://github.com/composableFi/tendermint-rs", branch = "seun-0.23.7" } +tendermint-rpc = { git = "https://github.com/composableFi/tendermint-rs", branch = "seun-0.23.7", features = ["http-client", "websocket-client"] } serde_json = "1" -modelator = { git = "https://github.com/informalsystems/modelator", optional = true } +# modelator = { git = "https://github.com/informalsystems/modelator", optional = true } time = "0.3" serde = "1.0.136" @@ -34,7 +34,7 @@ manual = [] ordered = [] ica = [] experimental = [] -mbt = ["modelator"] +# mbt = ["modelator"] [[bin]] name = "test_setup_with_binary_channel" diff --git a/tools/test-framework/Cargo.toml b/tools/test-framework/Cargo.toml index ca1d0ada4e..2d1a0419ef 100644 --- a/tools/test-framework/Cargo.toml +++ b/tools/test-framework/Cargo.toml @@ -18,8 +18,8 @@ ibc = { version = "=0.15.0", path = "../../modules" } ibc-relayer = { version = "=0.15.0", path = "../../relayer" } ibc-relayer-cli = { version = "=0.15.0", path = "../../relayer-cli" } ibc-proto = { version = "=0.18.0", path = "../../proto" } -tendermint = { version = "=0.23.7" } -tendermint-rpc = { version = "=0.23.7", features = ["http-client", "websocket-client"] } +tendermint = { git = "https://github.com/composableFi/tendermint-rs", branch = "seun-0.23.7" } +tendermint-rpc = { git = "https://github.com/composableFi/tendermint-rs", branch = "seun-0.23.7", features = ["http-client", "websocket-client"] } async-trait = "0.1.56" http = "0.2.6" From 379b508d173a7239b6f4698d552833bcbfb204a4 Mon Sep 17 00:00:00 2001 From: Blas Rodriguez Irizar Date: Fri, 17 Jun 2022 17:47:25 +0200 Subject: [PATCH 53/96] attempt to feature gate substrate libs for beefy (#20) * attempt to feature gate substrate libs for beefy Brute force approach for feature gating everything beefy related. There's likely a better approach, which might involve hiding some types and functions so that the exposure to sp* can be hidden and gated more easily. --- modules/Cargo.toml | 10 +- modules/src/clients/host_functions.rs | 5 +- modules/src/clients/ics11_beefy/client_def.rs | 4 +- modules/src/clients/ics11_beefy/header.rs | 2 +- modules/src/clients/mod.rs | 2 + .../src/core/ics02_client/client_consensus.rs | 8 +- modules/src/core/ics02_client/client_def.rs | 32 ++- modules/src/core/ics02_client/client_state.rs | 28 ++ modules/src/core/ics02_client/client_type.rs | 7 + modules/src/core/ics02_client/error.rs | 268 ++++++++++++++++++ .../ics02_client/handler/update_client.rs | 3 +- modules/src/core/ics02_client/header.rs | 7 + modules/src/core/ics23_commitment/merkle.rs | 3 + modules/src/core/ics24_host/identifier.rs | 2 + 14 files changed, 368 insertions(+), 13 deletions(-) diff --git a/modules/Cargo.toml b/modules/Cargo.toml index f09d992cc0..66638f1e88 100644 --- a/modules/Cargo.toml +++ b/modules/Cargo.toml @@ -36,6 +36,7 @@ clock = ["tendermint/clock", "time/std"] # This feature grants access to development-time mocking libraries, such as `MockContext` or `MockHeader`. # Depends on the `testgen` suite for generating Tendermint light blocks. mocks = ["tendermint-testgen", "clock", "std", "sp-io", "sp-io/std", "sha3", "ripemd"] +ics11-beefy = ["sp-io", "sp-core", "sp-std"] [dependencies] # Proto definitions for all IBC-related interfaces, e.g., connections or channels. @@ -58,17 +59,18 @@ num-traits = { version = "0.2.15", default-features = false } derive_more = { version = "0.99.17", default-features = false, features = ["from", "into", "display"] } uint = { version = "0.9", default-features = false } beefy-client = { package = "beefy-generic-client", git = "https://github.com/ComposableFi/beefy-client", branch = "master", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22", default-features = false } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22", default-features = false } pallet-mmr-primitives = { package = "sp-mmr-primitives", git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22", default-features = false } beefy-primitives = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22", default-features = false } codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22", default-features = false } -sp-trie = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22", default-features = false } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22", default-features = false, optional = true } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22", default-features = false, optional = true} +sp-std = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22", default-features = false, optional = true } +sp-trie = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22", default-features = false, optional = true } sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22", default-features = false, optional = true } sha3 = { version = "0.10.1", optional = true } ripemd = { version = "0.1.1", optional = true } + [dependencies.tendermint] git = "https://github.com/composableFi/tendermint-rs" branch = "seun-0.23.7" diff --git a/modules/src/clients/host_functions.rs b/modules/src/clients/host_functions.rs index 1c54a3326e..755d7b1261 100644 --- a/modules/src/clients/host_functions.rs +++ b/modules/src/clients/host_functions.rs @@ -1,7 +1,6 @@ use crate::core::ics02_client::error::Error; use crate::prelude::*; use core::marker::PhantomData; -use sp_core::H256; /// This trait captures all the functions that the host chain should provide for /// crypto operations. @@ -21,7 +20,7 @@ pub trait HostFunctionsProvider: Clone + Send + Sync + Default { /// This function should verify membership in a trie proof using parity's sp-trie package /// with a BlakeTwo256 Hasher fn verify_membership_trie_proof( - root: &H256, + root: &[u8; 32], proof: &[Vec], key: &[u8], value: &[u8], @@ -30,7 +29,7 @@ pub trait HostFunctionsProvider: Clone + Send + Sync + Default { /// This function should verify non membership in a trie proof using parity's sp-trie package /// with a BlakeTwo256 Hasher fn verify_non_membership_trie_proof( - root: &H256, + root: &[u8; 32], proof: &[Vec], key: &[u8], ) -> Result<(), Error>; diff --git a/modules/src/clients/ics11_beefy/client_def.rs b/modules/src/clients/ics11_beefy/client_def.rs index d734b1cf3b..4bfe7ae56b 100644 --- a/modules/src/clients/ics11_beefy/client_def.rs +++ b/modules/src/clients/ics11_beefy/client_def.rs @@ -411,7 +411,7 @@ fn verify_membership>( let trie_proof: Vec> = codec::Decode::decode(&mut &*trie_proof) .map_err(|e| Error::beefy(BeefyError::scale_decode(e)))?; let root = H256::from_slice(root.as_bytes()); - HostFunctions::verify_membership_trie_proof(&root, &trie_proof, &key, &value) + HostFunctions::verify_membership_trie_proof(root.as_fixed_bytes(), &trie_proof, &key, &value) } fn verify_non_membership>( @@ -431,7 +431,7 @@ fn verify_non_membership>( let trie_proof: Vec> = codec::Decode::decode(&mut &*trie_proof) .map_err(|e| Error::beefy(BeefyError::scale_decode(e)))?; let root = H256::from_slice(root.as_bytes()); - HostFunctions::verify_non_membership_trie_proof(&root, &trie_proof, &key) + HostFunctions::verify_non_membership_trie_proof(root.as_fixed_bytes(), &trie_proof, &key) } fn verify_delay_passed( diff --git a/modules/src/clients/ics11_beefy/header.rs b/modules/src/clients/ics11_beefy/header.rs index 3279a740c1..b20408a2a4 100644 --- a/modules/src/clients/ics11_beefy/header.rs +++ b/modules/src/clients/ics11_beefy/header.rs @@ -405,7 +405,7 @@ pub fn decode_timestamp_extrinsic( // Timestamp extrinsic should be the first inherent and hence the first extrinsic // https://github.com/paritytech/substrate/blob/d602397a0bbb24b5d627795b797259a44a5e29e9/primitives/trie/src/lib.rs#L99-L101 let key = codec::Encode::encode(&Compact(0u32)); - HostFunctions::verify_membership_trie_proof(&extrinsic_root, proof, &*key, ext) + HostFunctions::verify_membership_trie_proof(extrinsic_root.as_fixed_bytes(), proof, &*key, ext) .map_err(|e| Error::timestamp_extrinsic(format!("Proof Verification failed {:?}", e)))?; // Decoding from the [2..] because the timestamp inmherent has two extra bytes before the call that represents the // call length and the extrinsic version. diff --git a/modules/src/clients/mod.rs b/modules/src/clients/mod.rs index 32a192b301..1b716ed30f 100644 --- a/modules/src/clients/mod.rs +++ b/modules/src/clients/mod.rs @@ -2,5 +2,7 @@ pub mod host_functions; pub mod ics07_tendermint; +#[cfg(any(test, feature = "ics11_beefy"))] pub mod ics11_beefy; +#[cfg(any(test, feature = "ics11_beefy"))] pub mod ics13_near; diff --git a/modules/src/core/ics02_client/client_consensus.rs b/modules/src/core/ics02_client/client_consensus.rs index dabbf15013..40729d00a3 100644 --- a/modules/src/core/ics02_client/client_consensus.rs +++ b/modules/src/core/ics02_client/client_consensus.rs @@ -10,6 +10,7 @@ use serde::Serialize; use tendermint_proto::Protobuf; use crate::clients::ics07_tendermint::consensus_state; +#[cfg(any(test, feature = "ics11_beefy"))] use crate::clients::ics11_beefy::consensus_state as beefy_consensus_state; use crate::core::ics02_client::client_type::ClientType; use crate::core::ics02_client::error::Error; @@ -46,6 +47,7 @@ pub trait ConsensusState: Clone + Debug + Send + Sync { #[serde(tag = "type")] pub enum AnyConsensusState { Tendermint(consensus_state::ConsensusState), + #[cfg(any(test, feature = "ics11_beefy"))] Beefy(beefy_consensus_state::ConsensusState), #[cfg(any(test, feature = "mocks"))] Mock(MockConsensusState), @@ -55,6 +57,7 @@ impl AnyConsensusState { pub fn timestamp(&self) -> Timestamp { match self { Self::Tendermint(cs_state) => cs_state.timestamp.into(), + #[cfg(any(test, feature = "ics11_beefy"))] Self::Beefy(cs_state) => cs_state.timestamp.into(), #[cfg(any(test, feature = "mocks"))] Self::Mock(mock_state) => mock_state.timestamp(), @@ -64,6 +67,7 @@ impl AnyConsensusState { pub fn client_type(&self) -> ClientType { match self { AnyConsensusState::Tendermint(_cs) => ClientType::Tendermint, + #[cfg(any(test, feature = "ics11_beefy"))] AnyConsensusState::Beefy(_) => ClientType::Beefy, #[cfg(any(test, feature = "mocks"))] AnyConsensusState::Mock(_cs) => ClientType::Mock, @@ -85,6 +89,7 @@ impl TryFrom for AnyConsensusState { .map_err(Error::decode_raw_client_state)?, )), + #[cfg(any(test, feature = "ics11_beefy"))] BEEFY_CONSENSUS_STATE_TYPE_URL => Ok(AnyConsensusState::Beefy( beefy_consensus_state::ConsensusState::decode_vec(&value.value) .map_err(Error::decode_raw_client_state)?, @@ -110,7 +115,7 @@ impl From for Any { .encode_vec() .expect("encoding to `Any` from `AnyConsensusState::Tendermint`"), }, - + #[cfg(any(test, feature = "ics11_beefy"))] AnyConsensusState::Beefy(value) => Any { type_url: BEEFY_CONSENSUS_STATE_TYPE_URL.to_string(), value: value @@ -172,6 +177,7 @@ impl ConsensusState for AnyConsensusState { fn root(&self) -> &CommitmentRoot { match self { Self::Tendermint(cs_state) => cs_state.root(), + #[cfg(any(test, feature = "ics11_beefy"))] Self::Beefy(cs_state) => cs_state.root(), #[cfg(any(test, feature = "mocks"))] Self::Mock(mock_state) => mock_state.root(), diff --git a/modules/src/core/ics02_client/client_def.rs b/modules/src/core/ics02_client/client_def.rs index d4ae84d88c..d2bc7ed59a 100644 --- a/modules/src/core/ics02_client/client_def.rs +++ b/modules/src/core/ics02_client/client_def.rs @@ -1,5 +1,6 @@ use crate::clients::host_functions::HostFunctionsProvider; use crate::clients::ics07_tendermint::client_def::TendermintClient; +#[cfg(any(test, feature = "ics11_beefy"))] use crate::clients::ics11_beefy::client_def::BeefyClient; use crate::core::ics02_client::client_consensus::{AnyConsensusState, ConsensusState}; use crate::core::ics02_client::client_state::{AnyClientState, ClientState}; @@ -209,7 +210,9 @@ pub trait ClientDef: Clone { #[derive(Clone, Debug)] pub enum AnyClient { Tendermint(TendermintClient), + #[cfg(any(test, feature = "ics11_beefy"))] Beefy(BeefyClient), + #[cfg(any(test, feature = "ics11_beefy"))] Near(BeefyClient), #[cfg(any(test, feature = "mocks"))] Mock(MockClient), @@ -221,7 +224,9 @@ impl AnyClient { ClientType::Tendermint => { Self::Tendermint(TendermintClient::::default()) } + #[cfg(any(test, feature = "ics11_beefy"))] ClientType::Beefy => Self::Beefy(BeefyClient::::default()), + #[cfg(any(test, feature = "ics11_beefy"))] ClientType::Near => Self::Near(BeefyClient::::default()), #[cfg(any(test, feature = "mocks"))] ClientType::Mock => Self::Mock(MockClient::default()), @@ -254,6 +259,7 @@ impl ClientDef for AnyClient { let (client_state, header) = downcast!( client_state => AnyClientState::Beefy, @@ -264,6 +270,7 @@ impl ClientDef for AnyClient { // let (client_state, header) = downcast!( // client_state => AnyClientState::Beefy, @@ -309,7 +316,7 @@ impl ClientDef for AnyClient { let (client_state, header) = downcast!( client_state => AnyClientState::Beefy, @@ -322,6 +329,7 @@ impl ClientDef for AnyClient { todo!() } @@ -357,6 +365,7 @@ impl ClientDef for AnyClient { let (client_state, header) = downcast!( client_state => AnyClientState::Beefy, @@ -367,6 +376,7 @@ impl ClientDef for AnyClient { todo!() } @@ -401,6 +411,7 @@ impl ClientDef for AnyClient { let (client_state, header) = downcast!( client_state => AnyClientState::Beefy, @@ -410,6 +421,7 @@ impl ClientDef for AnyClient { todo!() } @@ -451,6 +463,7 @@ impl ClientDef for AnyClient { let (client_state, consensus_state) = downcast!( client_state => AnyClientState::Beefy, @@ -468,6 +481,7 @@ impl ClientDef for AnyClient { todo!() } @@ -524,6 +538,7 @@ impl ClientDef for AnyClient { let client_state = downcast!( client_state => AnyClientState::Beefy @@ -542,6 +557,7 @@ impl ClientDef for AnyClient { todo!() } @@ -596,6 +612,7 @@ impl ClientDef for AnyClient { let client_state = downcast!(client_state => AnyClientState::Beefy) .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Beefy))?; @@ -612,6 +629,7 @@ impl ClientDef for AnyClient { todo!() } @@ -667,6 +685,7 @@ impl ClientDef for AnyClient { let client_state = downcast!(client_state => AnyClientState::Beefy) .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Beefy))?; @@ -684,6 +703,7 @@ impl ClientDef for AnyClient { todo!() } @@ -737,6 +757,7 @@ impl ClientDef for AnyClient { let client_state = downcast!( client_state => AnyClientState::Beefy @@ -754,6 +775,7 @@ impl ClientDef for AnyClient { todo!() } @@ -814,6 +836,7 @@ impl ClientDef for AnyClient { let client_state = downcast!( client_state => AnyClientState::Beefy @@ -834,6 +857,7 @@ impl ClientDef for AnyClient { todo!() } @@ -897,6 +921,7 @@ impl ClientDef for AnyClient { let client_state = downcast!( client_state => AnyClientState::Beefy @@ -917,6 +942,7 @@ impl ClientDef for AnyClient { todo!() } @@ -977,6 +1003,7 @@ impl ClientDef for AnyClient { let client_state = downcast!( client_state => AnyClientState::Beefy @@ -996,6 +1023,7 @@ impl ClientDef for AnyClient { todo!() } @@ -1056,6 +1084,7 @@ impl ClientDef for AnyClient { let client_state = downcast!( client_state => AnyClientState::Beefy @@ -1075,6 +1104,7 @@ impl ClientDef for AnyClient { todo!() } diff --git a/modules/src/core/ics02_client/client_state.rs b/modules/src/core/ics02_client/client_state.rs index 3164ace075..fd696cdb12 100644 --- a/modules/src/core/ics02_client/client_state.rs +++ b/modules/src/core/ics02_client/client_state.rs @@ -8,6 +8,7 @@ use tendermint_proto::Protobuf; use ibc_proto::ibc::core::client::v1::IdentifiedClientState; use crate::clients::ics07_tendermint::client_state; +#[cfg(any(test, feature = "ics11_beefy"))] use crate::clients::ics11_beefy::client_state as beefy_client_state; use crate::core::ics02_client::client_type::ClientType; use crate::core::ics02_client::error::Error; @@ -63,6 +64,7 @@ pub trait ClientState: Clone + core::fmt::Debug + Send + Sync { #[serde(tag = "type")] pub enum AnyUpgradeOptions { Tendermint(client_state::UpgradeOptions), + #[cfg(any(test, feature = "ics11_beefy"))] Beefy(beefy_client_state::UpgradeOptions), #[cfg(any(test, feature = "mocks"))] Mock(()), @@ -72,6 +74,7 @@ impl AnyUpgradeOptions { fn into_tendermint(self) -> client_state::UpgradeOptions { match self { Self::Tendermint(options) => options, + #[cfg(any(test, feature = "ics11_beefy"))] Self::Beefy(_) => { panic!("cannot downcast AnyUpgradeOptions::Beefy to Tendermint::UpgradeOptions") } @@ -82,11 +85,13 @@ impl AnyUpgradeOptions { } } + #[cfg(any(test, feature = "ics11_beefy"))] fn into_beefy(self) -> beefy_client_state::UpgradeOptions { match self { Self::Tendermint(_) => { panic!("cannot downcast AnyUpgradeOptions::Tendermint to Beefy::UpgradeOptions") } + #[cfg(any(test, feature = "ics11_beefy"))] Self::Beefy(options) => options, #[cfg(any(test, feature = "mocks"))] Self::Mock(_) => { @@ -100,8 +105,10 @@ impl AnyUpgradeOptions { #[serde(tag = "type")] pub enum AnyClientState { Tendermint(client_state::ClientState), + #[cfg(any(test, feature = "ics11_beefy"))] #[serde(skip)] Beefy(beefy_client_state::ClientState), + #[cfg(any(test, feature = "ics11_beefy"))] #[serde(skip)] Near(beefy_client_state::ClientState), #[cfg(any(test, feature = "mocks"))] @@ -112,7 +119,9 @@ impl AnyClientState { pub fn latest_height(&self) -> Height { match self { Self::Tendermint(tm_state) => tm_state.latest_height(), + #[cfg(any(test, feature = "ics11_beefy"))] Self::Beefy(bf_state) => bf_state.latest_height(), + #[cfg(any(test, feature = "ics11_beefy"))] Self::Near(_) => todo!(), #[cfg(any(test, feature = "mocks"))] Self::Mock(mock_state) => mock_state.latest_height(), @@ -122,7 +131,9 @@ impl AnyClientState { pub fn frozen_height(&self) -> Option { match self { Self::Tendermint(tm_state) => tm_state.frozen_height(), + #[cfg(any(test, feature = "ics11_beefy"))] Self::Beefy(bf_state) => bf_state.frozen_height(), + #[cfg(any(test, feature = "ics11_beefy"))] Self::Near(_) => todo!(), #[cfg(any(test, feature = "mocks"))] Self::Mock(mock_state) => mock_state.frozen_height(), @@ -132,7 +143,9 @@ impl AnyClientState { pub fn trust_threshold(&self) -> Option { match self { AnyClientState::Tendermint(state) => Some(state.trust_level), + #[cfg(any(test, feature = "ics11_beefy"))] AnyClientState::Beefy(_) => None, + #[cfg(any(test, feature = "ics11_beefy"))] Self::Near(_) => todo!(), #[cfg(any(test, feature = "mocks"))] AnyClientState::Mock(_) => None, @@ -142,7 +155,9 @@ impl AnyClientState { pub fn max_clock_drift(&self) -> Duration { match self { AnyClientState::Tendermint(state) => state.max_clock_drift, + #[cfg(any(test, feature = "ics11_beefy"))] AnyClientState::Beefy(_) => Duration::new(0, 0), + #[cfg(any(test, feature = "ics11_beefy"))] Self::Near(_) => todo!(), #[cfg(any(test, feature = "mocks"))] AnyClientState::Mock(_) => Duration::new(0, 0), @@ -152,7 +167,9 @@ impl AnyClientState { pub fn client_type(&self) -> ClientType { match self { Self::Tendermint(state) => state.client_type(), + #[cfg(any(test, feature = "ics11_beefy"))] Self::Beefy(state) => state.client_type(), + #[cfg(any(test, feature = "ics11_beefy"))] Self::Near(_) => todo!(), #[cfg(any(test, feature = "mocks"))] Self::Mock(state) => state.client_type(), @@ -162,7 +179,9 @@ impl AnyClientState { pub fn refresh_period(&self) -> Option { match self { AnyClientState::Tendermint(tm_state) => tm_state.refresh_time(), + #[cfg(any(test, feature = "ics11_beefy"))] AnyClientState::Beefy(_) => None, + #[cfg(any(test, feature = "ics11_beefy"))] Self::Near(_) => None, #[cfg(any(test, feature = "mocks"))] AnyClientState::Mock(mock_state) => mock_state.refresh_time(), @@ -172,7 +191,9 @@ impl AnyClientState { pub fn expired(&self, elapsed_since_latest: Duration) -> bool { match self { AnyClientState::Tendermint(tm_state) => tm_state.expired(elapsed_since_latest), + #[cfg(any(test, feature = "ics11_beefy"))] AnyClientState::Beefy(_) => false, + #[cfg(any(test, feature = "ics11_beefy"))] Self::Near(_) => false, #[cfg(any(test, feature = "mocks"))] AnyClientState::Mock(mock_state) => mock_state.expired(elapsed_since_latest), @@ -194,6 +215,7 @@ impl TryFrom for AnyClientState { .map_err(Error::decode_raw_client_state)?, )), + #[cfg(any(test, feature = "ics11_beefy"))] BEEFY_CLIENT_STATE_TYPE_URL => Ok(AnyClientState::Beefy( beefy_client_state::ClientState::decode_vec(&raw.value) .map_err(Error::decode_raw_client_state)?, @@ -218,12 +240,14 @@ impl From for Any { .encode_vec() .expect("encoding to `Any` from `AnyClientState::Tendermint`"), }, + #[cfg(any(test, feature = "ics11_beefy"))] AnyClientState::Beefy(value) => Any { type_url: BEEFY_CLIENT_STATE_TYPE_URL.to_string(), value: value .encode_vec() .expect("encoding to `Any` from `AnyClientState::Tendermint`"), }, + #[cfg(any(test, feature = "ics11_beefy"))] AnyClientState::Near(_) => Any { type_url: BEEFY_CLIENT_STATE_TYPE_URL.to_string(), value: value @@ -247,7 +271,9 @@ impl ClientState for AnyClientState { fn chain_id(&self) -> ChainId { match self { AnyClientState::Tendermint(tm_state) => tm_state.chain_id(), + #[cfg(any(test, feature = "ics11_beefy"))] AnyClientState::Beefy(bf_state) => bf_state.chain_id(), + #[cfg(any(test, feature = "ics11_beefy"))] AnyClientState::Near(_) => todo!(), #[cfg(any(test, feature = "mocks"))] AnyClientState::Mock(mock_state) => mock_state.chain_id(), @@ -276,9 +302,11 @@ impl ClientState for AnyClientState { AnyClientState::Tendermint(tm_state) => tm_state .upgrade(upgrade_height, upgrade_options.into_tendermint(), chain_id) .wrap_any(), + #[cfg(any(test, feature = "ics11_beefy"))] AnyClientState::Beefy(bf_state) => bf_state .upgrade(upgrade_height, upgrade_options.into_beefy(), chain_id) .wrap_any(), + #[cfg(any(test, feature = "ics11_beefy"))] AnyClientState::Near(near_state) => near_state .upgrade(upgrade_height, upgrade_options.into_beefy(), chain_id) .wrap_any(), diff --git a/modules/src/core/ics02_client/client_type.rs b/modules/src/core/ics02_client/client_type.rs index e7ca17eef3..f2c84ac0e7 100644 --- a/modules/src/core/ics02_client/client_type.rs +++ b/modules/src/core/ics02_client/client_type.rs @@ -8,7 +8,9 @@ use super::error::Error; #[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] pub enum ClientType { Tendermint = 1, + #[cfg(any(test, feature = "ics11_beefy"))] Beefy = 2, + #[cfg(any(test, feature = "ics11_beefy"))] Near = 3, #[cfg(any(test, feature = "mocks"))] Mock = 9999, @@ -16,7 +18,9 @@ pub enum ClientType { impl ClientType { const TENDERMINT_STR: &'static str = "07-tendermint"; + #[cfg(any(test, feature = "ics11_beefy"))] const BEEFY_STR: &'static str = "11-beefy"; + #[cfg(any(test, feature = "ics11_beefy"))] const NEAR_STR: &'static str = "11-beefy"; #[cfg_attr(not(test), allow(dead_code))] @@ -26,7 +30,9 @@ impl ClientType { pub fn as_str(&self) -> &'static str { match self { Self::Tendermint => Self::TENDERMINT_STR, + #[cfg(any(test, feature = "ics11_beefy"))] Self::Beefy => Self::BEEFY_STR, + #[cfg(any(test, feature = "ics11_beefy"))] Self::Near => Self::NEAR_STR, #[cfg(any(test, feature = "mocks"))] Self::Mock => Self::MOCK_STR, @@ -46,6 +52,7 @@ impl core::str::FromStr for ClientType { fn from_str(s: &str) -> Result { match s { Self::TENDERMINT_STR => Ok(Self::Tendermint), + #[cfg(any(test, feature = "ics11_beefy"))] Self::BEEFY_STR => Ok(Self::Beefy), #[cfg(any(test, feature = "mocks"))] Self::MOCK_STR => Ok(Self::Mock), diff --git a/modules/src/core/ics02_client/error.rs b/modules/src/core/ics02_client/error.rs index a1e58fd2f6..f7a08e623c 100644 --- a/modules/src/core/ics02_client/error.rs +++ b/modules/src/core/ics02_client/error.rs @@ -5,7 +5,9 @@ use tendermint::Error as TendermintError; use tendermint_proto::Error as TendermintProtoError; use crate::clients::ics07_tendermint::error::Error as Ics07Error; +#[cfg(any(test, feature = "ics11_beefy"))] use crate::clients::ics11_beefy::error::Error as Ics11Error; +#[cfg(any(test, feature = "ics11_beefy"))] use crate::clients::ics13_near::error::Error as Ics13Error; use crate::core::ics02_client::client_type::ClientType; use crate::core::ics02_client::height::HeightError; @@ -16,6 +18,270 @@ use crate::signer::SignerError; use crate::timestamp::Timestamp; use crate::Height; +#[cfg(not(any(test, feature = "ics11_beefy")))] +define_error! { + #[derive(Debug, PartialEq, Eq)] + Error { + UnknownClientType + { client_type: String } + | e | { format_args!("unknown client type: {0}", e.client_type) }, + + ClientIdentifierConstructor + { client_type: ClientType, counter: u64 } + [ ValidationError ] + | e | { + format_args!("Client identifier constructor failed for type {0} with counter {1}", + e.client_type, e.counter) + }, + + ClientAlreadyExists + { client_id: ClientId } + | e | { format_args!("client already exists: {0}", e.client_id) }, + + ClientNotFound + { client_id: ClientId } + | e | { format_args!("client not found: {0}", e.client_id) }, + + ClientFrozen + { client_id: ClientId } + | e | { format_args!("client is frozen: {0}", e.client_id) }, + + ConsensusStateNotFound + { client_id: ClientId, height: Height } + | e | { + format_args!("consensus state not found at: {0} at height {1}", + e.client_id, e.height) + }, + + ImplementationSpecific + { reason: String } + | e | { format_args!("implementation specific error: {}", e.reason) }, + + HeaderVerificationFailure + { reason: String } + | e | { format_args!("header verification failed with reason: {}", e.reason) }, + + InvalidTrustThreshold + { numerator: u64, denominator: u64 } + | e | { format_args!("failed to build trust threshold from fraction: {}/{}", e.numerator, e.denominator) }, + + FailedTrustThresholdConversion + { numerator: u64, denominator: u64 } + [ TendermintError ] + | e | { format_args!("failed to build Tendermint domain type trust threshold from fraction: {}/{}", e.numerator, e.denominator) }, + + UnknownClientStateType + { client_state_type: String } + | e | { format_args!("unknown client state type: {0}", e.client_state_type) }, + + EmptyClientStateResponse + | _ | { "the client state was not found" }, + + EmptyPrefix + | _ | { "empty prefix" }, + + UnknownConsensusStateType + { consensus_state_type: String } + | e | { + format_args!("unknown client consensus state type: {0}", + e.consensus_state_type) + }, + + EmptyConsensusStateResponse + | _ | { "the client consensus state was not found" }, + + UnknownHeaderType + { header_type: String } + | e | { + format_args!("unknown header type: {0}", + e.header_type) + }, + + UnknownMisbehaviourType + { misbehavior_type: String } + | e | { + format_args!("unknown misbehaviour type: {0}", + e.misbehavior_type) + }, + + InvalidRawClientId + { client_id: String } + [ ValidationError ] + | e | { + format_args!("invalid raw client identifier {0}", + e.client_id) + }, + + DecodeRawClientState + [ TraceError ] + | _ | { "error decoding raw client state" }, + + MissingRawClientState + | _ | { "missing raw client state" }, + + InvalidRawConsensusState + [ TraceError ] + | _ | { "invalid raw client consensus state" }, + + MissingRawConsensusState + | _ | { "missing raw client consensus state" }, + + InvalidMsgUpdateClientId + [ ValidationError ] + | _ | { "invalid client id in the update client message" }, + + Decode + [ TraceError ] + | _ | { "decode error" }, + + MissingHeight + | _ | { "invalid raw client consensus state: the height field is missing" }, + + InvalidClientIdentifier + [ ValidationError ] + | _ | { "invalid client identifier" }, + + InvalidRawHeader + [ TraceError ] + | _ | { "invalid raw header" }, + + MissingRawHeader + | _ | { "missing raw header" }, + + DecodeRawMisbehaviour + [ TraceError ] + | _ | { "invalid raw misbehaviour" }, + + InvalidRawMisbehaviour + [ ValidationError ] + | _ | { "invalid raw misbehaviour" }, + + MissingRawMisbehaviour + | _ | { "missing raw misbehaviour" }, + + InvalidStringAsHeight + { value: String } + [ HeightError ] + | e | { format_args!("String {0} cannnot be converted to height", e.value) }, + + InvalidHeightResult + | _ | { "height cannot end up zero or negative" }, + + InvalidAddress + | _ | { "invalid address" }, + + InvalidUpgradeClientProof + [ Ics23Error ] + | _ | { "invalid proof for the upgraded client state" }, + + InvalidUpgradeConsensusStateProof + [ Ics23Error ] + | _ | { "invalid proof for the upgraded consensus state" }, + + InvalidCommitmentProof + [ Ics23Error ] + | _ | { "invalid commitment proof bytes" }, + + Tendermint + [ Ics07Error ] + | _ | { "tendermint error" }, + + InvalidPacketTimestamp + [ crate::timestamp::ParseTimestampError ] + | _ | { "invalid packet timeout timestamp value" }, + + ClientArgsTypeMismatch + { client_type: ClientType } + | e | { + format_args!("mismatch between client and arguments types, expected: {0:?}", + e.client_type) + }, + + InsufficientVotingPower + { reason: String } + | e | { + format_args!("Insufficient overlap {}", e.reason) + }, + + RawClientAndConsensusStateTypesMismatch + { + state_type: ClientType, + consensus_type: ClientType, + } + | e | { + format_args!("mismatch in raw client consensus state {} with expected state {}", + e.state_type, e.consensus_type) + }, + + LowHeaderHeight + { + header_height: Height, + latest_height: Height + } + | e | { + format!("received header height ({:?}) is lower than (or equal to) client latest height ({:?})", + e.header_height, e.latest_height) + }, + + LowUpgradeHeight + { + upgraded_height: Height, + client_height: Height, + } + | e | { + format_args!("upgraded client height {} must be at greater than current client height {}", + e.upgraded_height, e.client_height) + }, + + InvalidConsensusStateTimestamp + { + time1: Timestamp, + time2: Timestamp, + } + | e | { + format_args!("timestamp is invalid or missing, timestamp={0}, now={1}", e.time1, e.time2) + }, + + HeaderNotWithinTrustPeriod + { + latest_time:Timestamp, + update_time: Timestamp, + } + | e | { + format_args!("header not withing trusting period: expires_at={0} now={1}", e.latest_time, e.update_time) + }, + + TendermintHandlerError + [ Ics07Error ] + | _ | { format_args!("Tendermint-specific handler error") }, + + MissingLocalConsensusState + { height: Height } + | e | { format_args!("the local consensus state could not be retrieved for height {}", e.height) }, + + InvalidConnectionEnd + [ TraceError] + | _ | { "invalid connection end" }, + + InvalidChannelEnd + [ TraceError] + | _ | { "invalid channel end" }, + + InvalidAnyClientState + [ TraceError] + | _ | { "invalid any client state" }, + + InvalidAnyConsensusState + [ TraceError ] + | _ | { "invalid any client consensus state" }, + + Signer + [ SignerError ] + | _ | { "failed to parse signer" }, + } +} + +#[cfg(any(test, feature = "ics11_beefy"))] define_error! { #[derive(Debug, PartialEq, Eq)] Error { @@ -292,12 +558,14 @@ impl From for Error { } } +#[cfg(any(test, feature = "ics11_beefy"))] impl From for Error { fn from(e: Ics11Error) -> Error { Error::beefy(e) } } +#[cfg(any(test, feature = "ics11_beefy"))] impl From for Error { fn from(e: Ics13Error) -> Error { Error::near(e) diff --git a/modules/src/core/ics02_client/handler/update_client.rs b/modules/src/core/ics02_client/handler/update_client.rs index 472ebc9054..55fc8351f9 100644 --- a/modules/src/core/ics02_client/handler/update_client.rs +++ b/modules/src/core/ics02_client/handler/update_client.rs @@ -1,10 +1,10 @@ //! Protocol logic specific to processing ICS2 messages of type `MsgUpdateAnyClient`. use core::fmt::Debug; -use tracing::debug; use crate::clients::host_functions::HostFunctionsProvider; use crate::core::ics02_client::client_def::{AnyClient, ClientDef, ConsensusUpdateResult}; use crate::core::ics02_client::client_state::{AnyClientState, ClientState}; +#[cfg(any(test, feature = "ics11_beefy"))] use crate::core::ics02_client::client_type::ClientType; use crate::core::ics02_client::error::Error; use crate::core::ics02_client::events::Attributes; @@ -53,6 +53,7 @@ pub fn process( return Err(Error::client_frozen(client_id)); } + #[cfg(any(test, feature = "ics11_beefy"))] if client_type != ClientType::Beefy { // Read consensus state from the host chain store. let latest_consensus_state = ctx diff --git a/modules/src/core/ics02_client/header.rs b/modules/src/core/ics02_client/header.rs index 7cd4b32871..1baf6d828a 100644 --- a/modules/src/core/ics02_client/header.rs +++ b/modules/src/core/ics02_client/header.rs @@ -6,6 +6,7 @@ use subtle_encoding::hex; use tendermint_proto::Protobuf; use crate::clients::ics07_tendermint::header::{decode_header, Header as TendermintHeader}; +#[cfg(any(test, feature = "ics11_beefy"))] use crate::clients::ics11_beefy::header::{decode_header as decode_beefy_header, BeefyHeader}; // use crate::clients::ics13_near::header::NearHeader; use crate::core::ics02_client::client_type::ClientType; @@ -35,6 +36,7 @@ pub trait Header: Clone + core::fmt::Debug + Send + Sync { pub enum AnyHeader { Tendermint(TendermintHeader), #[serde(skip)] + #[cfg(any(test, feature = "ics11_beefy"))] Beefy(BeefyHeader), // #[serde(skip)] // Near(NearHeader), @@ -46,6 +48,7 @@ impl AnyHeader { pub fn height(&self) -> Height { match self { Self::Tendermint(header) => header.height(), + #[cfg(any(test, feature = "ics11_beefy"))] Self::Beefy(_header) => Default::default(), // Self::Near(_header) => Default::default(), #[cfg(any(test, feature = "mocks"))] @@ -56,6 +59,7 @@ impl AnyHeader { pub fn timestamp(&self) -> Timestamp { match self { Self::Tendermint(header) => header.timestamp(), + #[cfg(any(test, feature = "ics11_beefy"))] Self::Beefy(_header) => Default::default(), // Self::Near(_header) => Default::default(), #[cfg(any(test, feature = "mocks"))] @@ -68,6 +72,7 @@ impl Header for AnyHeader { fn client_type(&self) -> ClientType { match self { Self::Tendermint(header) => header.client_type(), + #[cfg(any(test, feature = "ics11_beefy"))] Self::Beefy(header) => header.client_type(), // Self::Near(header) => header.client_type(), #[cfg(any(test, feature = "mocks"))] @@ -106,6 +111,7 @@ impl TryFrom for AnyHeader { Ok(AnyHeader::Tendermint(val)) } + #[cfg(any(test, feature = "ics11_beefy"))] BEEFY_HEADER_TYPE_URL => { let val = decode_beefy_header(&*raw.value).map_err(Error::beefy)?; Ok(AnyHeader::Beefy(val)) @@ -131,6 +137,7 @@ impl From for Any { .expect("encoding to `Any` from `AnyHeader::Tendermint`"), }, + #[cfg(any(test, feature = "ics11_beefy"))] AnyHeader::Beefy(header) => Any { type_url: BEEFY_HEADER_TYPE_URL.to_string(), value: header diff --git a/modules/src/core/ics23_commitment/merkle.rs b/modules/src/core/ics23_commitment/merkle.rs index e208ec5af7..5d9a5c16bb 100644 --- a/modules/src/core/ics23_commitment/merkle.rs +++ b/modules/src/core/ics23_commitment/merkle.rs @@ -1,5 +1,8 @@ use crate::prelude::*; +#[cfg(any(test, feature = "ics11_beefy"))] use sp_std::marker::PhantomData; +#[cfg(not(any(test, feature = "ics11_beefy")))] +use std::marker::PhantomData; use tendermint::merkle::proof::Proof as TendermintProof; use crate::clients::host_functions::{HostFunctionsManager, HostFunctionsProvider}; diff --git a/modules/src/core/ics24_host/identifier.rs b/modules/src/core/ics24_host/identifier.rs index db69345c61..50066f81ac 100644 --- a/modules/src/core/ics24_host/identifier.rs +++ b/modules/src/core/ics24_host/identifier.rs @@ -171,7 +171,9 @@ impl ClientId { pub fn prefix(client_type: ClientType) -> &'static str { match client_type { ClientType::Tendermint => ClientType::Tendermint.as_str(), + #[cfg(any(test, feature = "ics11_beefy"))] ClientType::Beefy => ClientType::Beefy.as_str(), + #[cfg(any(test, feature = "ics11_beefy"))] ClientType::Near => ClientType::Near.as_str(), #[cfg(any(test, feature = "mocks"))] ClientType::Mock => ClientType::Mock.as_str(), From b54ced454435de0c1d7893bbc80d1b750a55eb1e Mon Sep 17 00:00:00 2001 From: Web3 Smith <31099392+Wizdave97@users.noreply.github.com> Date: Mon, 20 Jun 2022 12:48:08 +0100 Subject: [PATCH 54/96] Update `tendermint-rs`. (#22) * Add codespace information in unknown SDK error (#2268) * ICS20 API improvements (#2280) * Remove `Debug` and `'static` requirements on Module trait * Manually implement Debug for `MockRouter` * Remove `Ics20Reader` supertrait `PortReader` * Use primitive_types::U256 instead of uint::construct_uint!() * Impl serde for Amount * Add .changelog entries * Fix relayer Dockerfile for M1 Mac (#2286) Otherwise library not found error will be encountered Ref: https://github.com/nodejs/help/issues/3239 * Bump uuid from 1.1.1 to 1.1.2 (#2289) Bumps [uuid](https://github.com/uuid-rs/uuid) from 1.1.1 to 1.1.2. - [Release notes](https://github.com/uuid-rs/uuid/releases) - [Commits](https://github.com/uuid-rs/uuid/compare/1.1.1...1.1.2) --- updated-dependencies: - dependency-name: uuid dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Bump hdpath from 0.6.0 to 0.6.1 (#2292) Bumps [hdpath](https://github.com/emeraldpay/hdpath-rs) from 0.6.0 to 0.6.1. - [Release notes](https://github.com/emeraldpay/hdpath-rs/releases) - [Commits](https://github.com/emeraldpay/hdpath-rs/commits) --- updated-dependencies: - dependency-name: hdpath dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Bump clap from 3.1.18 to 3.2.1 (#2291) * Bump semver from 1.0.9 to 1.0.10 (#2295) * Bump http from 0.2.7 to 0.2.8 (#2296) * Bump tracing from 0.1.34 to 0.1.35 (#2290) * Hs/2210 - Fixed the variable TM to point to GAIAD_BINARY (#2297) * Fixed variable TM * change log entry and lib-gm version updated to v0.1.3 Co-authored-by: Harveen Singh * Bump clap_complete from 3.1.4 to 3.2.1 (#2288) * Bump clap_complete from 3.1.4 to 3.2.1 Bumps [clap_complete](https://github.com/clap-rs/clap) from 3.1.4 to 3.2.1. - [Release notes](https://github.com/clap-rs/clap/releases) - [Changelog](https://github.com/clap-rs/clap/blob/master/CHANGELOG.md) - [Commits](https://github.com/clap-rs/clap/compare/clap_complete-v3.1.4...clap_complete-v3.2.1) --- updated-dependencies: - dependency-name: clap_complete dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Bump clap dependency to 3.2.4 Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Mikhail Zabaluev * Fix recv packet handler incorrectly using dest port/chan to get receipt/next_seq_recv (#2294) * Use packet destination port/channel in recv_packet handler where appropriate * Add .changelog entry * Address review feedback * Use enum for recv-packet results * Improve RecvPacketResult * Ignore acc seq mismath when expected < got (#2298) * Ignore acc seq mismath when expected < got * Address review comments * Changelog entry * Add support for fetching & parsing the Tendermint version of a chain (#2302) * Fix for #2301 * changelog * changelog broken link * KV pairs in log h/t Mikhail * update dependencies to master branch * update tendermint dependency Co-authored-by: Soares Chen Co-authored-by: Shoaib Ahmed Co-authored-by: PikachuEXE Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: harveenSingh Co-authored-by: Harveen Singh Co-authored-by: Mikhail Zabaluev Co-authored-by: Anca Zamfir Co-authored-by: Adi Seredinschi --- .../ibc/2293-fix-recv-packet-dest-portchan.md | 3 + .../2301-tendermint-version-support.md | 2 + .../ibc/2279-u256-serde-derive.md | 1 + .../ibc/2280-ics20-api-improvements.md | 2 + .../relayer/2249-ignore-nonce-mismatch.md | 2 + Cargo.lock | 419 ++++++++++-------- ci/relayer.Dockerfile | 2 +- modules/Cargo.toml | 27 +- modules/src/applications/transfer/context.rs | 3 +- modules/src/applications/transfer/denom.rs | 4 +- .../applications/transfer/msgs/transfer.rs | 4 +- modules/src/bigint.rs | 9 +- .../clients/ics07_tendermint/client_def.rs | 16 +- modules/src/clients/ics11_beefy/client_def.rs | 8 +- modules/src/clients/ics13_near/types.rs | 2 +- .../src/core/ics02_client/client_consensus.rs | 12 +- modules/src/core/ics02_client/client_state.rs | 16 +- modules/src/core/ics02_client/events.rs | 43 +- .../ics02_client/handler/update_client.rs | 2 +- modules/src/core/ics02_client/header.rs | 14 +- modules/src/core/ics02_client/misbehaviour.rs | 8 +- modules/src/core/ics03_connection/events.rs | 41 +- modules/src/core/ics04_channel/context.rs | 36 +- modules/src/core/ics04_channel/events.rs | 96 ++-- .../core/ics04_channel/handler/recv_packet.rs | 56 +-- modules/src/core/ics23_commitment/merkle.rs | 5 +- modules/src/core/ics26_routing/context.rs | 2 +- modules/src/events.rs | 7 +- modules/src/lib.rs | 1 + modules/src/mock/context.rs | 10 +- modules/src/mock/header.rs | 2 +- modules/src/query.rs | 2 +- modules/src/test_utils.rs | 12 +- proto/Cargo.toml | 2 +- .../prost/cosmos.base.tendermint.v1beta1.rs | 2 +- relayer-cli/Cargo.toml | 16 +- relayer-cli/src/lib.rs | 1 + relayer/Cargo.toml | 21 +- relayer/src/chain/cosmos/estimate.rs | 5 +- relayer/src/chain/cosmos/retry.rs | 23 +- relayer/src/chain/cosmos/version.rs | 72 ++- relayer/src/error.rs | 126 +++++- relayer/src/sdk_error.rs | 15 +- scripts/gm/CHANGELOG.md | 8 + scripts/gm/bin/lib-gm | 4 +- telemetry/Cargo.toml | 2 +- tools/integration-test/Cargo.toml | 8 +- tools/test-framework/Cargo.toml | 10 +- 48 files changed, 705 insertions(+), 479 deletions(-) create mode 100644 .changelog/unreleased/bug-fixes/ibc/2293-fix-recv-packet-dest-portchan.md create mode 100644 .changelog/unreleased/features/relayer/2301-tendermint-version-support.md create mode 100644 .changelog/unreleased/improvements/ibc/2279-u256-serde-derive.md create mode 100644 .changelog/unreleased/improvements/ibc/2280-ics20-api-improvements.md create mode 100644 .changelog/unreleased/improvements/relayer/2249-ignore-nonce-mismatch.md diff --git a/.changelog/unreleased/bug-fixes/ibc/2293-fix-recv-packet-dest-portchan.md b/.changelog/unreleased/bug-fixes/ibc/2293-fix-recv-packet-dest-portchan.md new file mode 100644 index 0000000000..fc812b924b --- /dev/null +++ b/.changelog/unreleased/bug-fixes/ibc/2293-fix-recv-packet-dest-portchan.md @@ -0,0 +1,3 @@ +- Fix `recv_packet` handler incorrectly querying `packet_receipt` and `next_sequence_recv` using + packet's `source_{port, channel}`. + ([#2293](https://github.com/informalsystems/ibc-rs/issues/2293)) diff --git a/.changelog/unreleased/features/relayer/2301-tendermint-version-support.md b/.changelog/unreleased/features/relayer/2301-tendermint-version-support.md new file mode 100644 index 0000000000..40e5abb490 --- /dev/null +++ b/.changelog/unreleased/features/relayer/2301-tendermint-version-support.md @@ -0,0 +1,2 @@ +- Add support for fetching & parsing the Tendermint version of a network that + Hermes is connected to. ([#2301](https://github.com/informalsystems/ibc-rs/issues/2301)) diff --git a/.changelog/unreleased/improvements/ibc/2279-u256-serde-derive.md b/.changelog/unreleased/improvements/ibc/2279-u256-serde-derive.md new file mode 100644 index 0000000000..8b95125a33 --- /dev/null +++ b/.changelog/unreleased/improvements/ibc/2279-u256-serde-derive.md @@ -0,0 +1 @@ +- Derive `serde::{Serialize, Deserialize}` for `U256`. ([#2279](https://github.com/informalsystems/ibc-rs/issues/2279)) diff --git a/.changelog/unreleased/improvements/ibc/2280-ics20-api-improvements.md b/.changelog/unreleased/improvements/ibc/2280-ics20-api-improvements.md new file mode 100644 index 0000000000..0a7c3ef6f2 --- /dev/null +++ b/.changelog/unreleased/improvements/ibc/2280-ics20-api-improvements.md @@ -0,0 +1,2 @@ +- Remove unnecessary supertraits requirements from ICS20 traits. +([#2280](https://github.com/informalsystems/ibc-rs/pull/2280)) diff --git a/.changelog/unreleased/improvements/relayer/2249-ignore-nonce-mismatch.md b/.changelog/unreleased/improvements/relayer/2249-ignore-nonce-mismatch.md new file mode 100644 index 0000000000..57937fa2ca --- /dev/null +++ b/.changelog/unreleased/improvements/relayer/2249-ignore-nonce-mismatch.md @@ -0,0 +1,2 @@ +- Reduce relaying delay when some account mismatch errors occur during Tx + simulation ([#2249](https://github.com/informalsystems/ibc-rs/issues/2249)) \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock index 8ef9042c1f..7fcf210627 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -78,7 +78,7 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" dependencies = [ - "getrandom 0.2.6", + "getrandom 0.2.7", "once_cell", "version_check", ] @@ -170,6 +170,17 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbf56136a5198c7b01a49e3afcbef6cf84597273d298f54432926024107b0109" +[[package]] +name = "async-recursion" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7d78656ba01f1b93024b7c3a0467f1608e4be67d725749fdcd7d2c7678fd7a2" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "async-stream" version = "0.3.3" @@ -237,9 +248,9 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "axum" -version = "0.5.6" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab2504b827a8bef941ba3dd64bdffe9cf56ca182908a147edd6189c95fbcae7d" +checksum = "dc47084705629d09d15060d70a8dbfce479c842303d05929ce29c74c995916ae" dependencies = [ "async-trait", "axum-core", @@ -266,9 +277,9 @@ dependencies = [ [[package]] name = "axum-core" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da31c0ed7b4690e2c78fe4b880d21cd7db04a346ebc658b4270251b695437f17" +checksum = "c2efed1c501becea07ce48118786ebcf229531d0d3b28edf224a720020d9e106" dependencies = [ "async-trait", "bytes", @@ -331,9 +342,9 @@ checksum = "c5738be7561b0eeb501ef1d5c5db3f24e01ceb55fededd9b00039aada34966ad" [[package]] name = "beef" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bed554bd50246729a1ec158d08aa3235d1b69d94ad120ebe187e28894787e736" +checksum = "3a8241f3ebb85c056b509d4327ad0358fbbba6ffb340bf388f26350aeda225b1" dependencies = [ "serde", ] @@ -447,7 +458,7 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" dependencies = [ - "block-padding", + "block-padding 0.1.5", "byte-tools", "byteorder", "generic-array 0.12.4", @@ -459,6 +470,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" dependencies = [ + "block-padding 0.2.1", "generic-array 0.14.5", ] @@ -480,6 +492,12 @@ dependencies = [ "byte-tools", ] +[[package]] +name = "block-padding" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" + [[package]] name = "borsh" version = "0.9.3" @@ -567,9 +585,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.9.1" +version = "3.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a45a46ab1f2412e53d3a0ade76ffad2025804294569aae387231a0cd6e0899" +checksum = "37ccbd214614c6783386c1af30caf03192f17891059cecc394b4fb119e363de3" [[package]] name = "byte-slice-cast" @@ -600,6 +618,9 @@ name = "bytes" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4872d67bab6358e59559027aa3b9157c53d9358c51423c17554809a8858e0f8" +dependencies = [ + "serde", +] [[package]] name = "camino" @@ -685,16 +706,16 @@ dependencies = [ [[package]] name = "clap" -version = "3.1.18" +version = "3.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2dbdf4bdacb33466e854ce889eee8dfd5729abf7ccd7664d0a2d60cd384440b" +checksum = "d53da17d37dba964b9b3ecb5c5a1f193a2762c700e6829201e645b9381c99dc7" dependencies = [ "atty", "bitflags", "clap_derive", "clap_lex", "indexmap", - "lazy_static", + "once_cell", "strsim", "termcolor", "textwrap", @@ -702,18 +723,18 @@ dependencies = [ [[package]] name = "clap_complete" -version = "3.1.4" +version = "3.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da92e6facd8d73c22745a5d3cbb59bdf8e46e3235c923e516527d8e81eec14a4" +checksum = "0f6ebaab5f25e4f0312dfa07cb30a755204b96e6531457c2cfdecfdf5f2adf40" dependencies = [ "clap", ] [[package]] name = "clap_derive" -version = "3.1.18" +version = "3.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25320346e922cffe59c0bbc5410c8d8784509efb321488971081313cb1e1a33c" +checksum = "c11d40217d16aee8508cc8e5fde8b4ff24639758608e5374e731b53f85749fb9" dependencies = [ "heck", "proc-macro-error", @@ -724,9 +745,9 @@ dependencies = [ [[package]] name = "clap_lex" -version = "0.2.0" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a37c35f1112dad5e6e0b1adaff798507497a18fceeb30cceb3bae7d1427b9213" +checksum = "5538cd660450ebeb4234cfecf8f2284b844ffc4c50531e66d584ad5b91293613" dependencies = [ "os_str_bytes", ] @@ -832,22 +853,12 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b153fe7cbef478c567df0f972e02e6d736db11affe43dfc9c56a9374d1adfb87" -dependencies = [ - "crossbeam-utils 0.7.2", - "maybe-uninit", -] - -[[package]] -name = "crossbeam-channel" -version = "0.5.4" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aaa7bd5fb665c6864b5f963dd9097905c54125909c7aa94c9e18507cdbe6c53" +checksum = "4c02a4d71819009c192cf4872265391563fd6a84c81ff2c0f2a7026ca4c1d85c" dependencies = [ "cfg-if 1.0.0", - "crossbeam-utils 0.8.8", + "crossbeam-utils 0.8.9", ] [[package]] @@ -857,8 +868,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6455c0ca19f0d2fbf751b908d5c55c1f5cbc65e03c4225427254b46890bdde1e" dependencies = [ "cfg-if 1.0.0", - "crossbeam-epoch 0.9.8", - "crossbeam-utils 0.8.8", + "crossbeam-epoch 0.9.9", + "crossbeam-utils 0.8.9", ] [[package]] @@ -878,15 +889,15 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.8" +version = "0.9.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1145cf131a2c6ba0615079ab6a638f7e1973ac9c2634fcbeaaad6114246efe8c" +checksum = "07db9d94cbd326813772c968ccd25999e5f8ae22f4f8d1b11effa37ef6ce281d" dependencies = [ "autocfg", "cfg-if 1.0.0", - "crossbeam-utils 0.8.8", - "lazy_static", + "crossbeam-utils 0.8.9", "memoffset 0.6.5", + "once_cell", "scopeguard", ] @@ -903,12 +914,12 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.8" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bf124c720b7686e3c2663cf54062ab0f68a88af2fb6a030e87e30bf721fcb38" +checksum = "8ff1f980957787286a554052d03c7aee98d99cc32e09f6d45f0a814133c87978" dependencies = [ "cfg-if 1.0.0", - "lazy_static", + "once_cell", ] [[package]] @@ -994,6 +1005,19 @@ dependencies = [ "zeroize", ] +[[package]] +name = "curve25519-dalek-ng" +version = "4.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c359b7249347e46fb28804470d071c921156ad62b3eef5d34e2ba867533dec8" +dependencies = [ + "byteorder", + "digest 0.9.0", + "rand_core 0.6.3", + "subtle-ng", + "zeroize", +] + [[package]] name = "darling" version = "0.14.1" @@ -1215,6 +1239,19 @@ dependencies = [ "signature", ] +[[package]] +name = "ed25519-consensus" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "758e2a0cd8a6cdf483e1d369e7d081647e00b88d8953e34d8f2cbba05ae28368" +dependencies = [ + "curve25519-dalek-ng", + "hex", + "rand_core 0.6.3", + "sha2 0.9.9", + "zeroize", +] + [[package]] name = "ed25519-dalek" version = "1.0.1" @@ -1348,13 +1385,11 @@ dependencies = [ [[package]] name = "flate2" -version = "1.0.23" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b39522e96686d38f4bc984b9198e3a0613264abaebaff2c5c918bfa6b6da09af" +checksum = "f82b0f4c27ad9f8bfd1f3208d882da2b09c301bc1c828fd3a00d0216d2fbbff6" dependencies = [ - "cfg-if 1.0.0", "crc32fast", - "libc", "miniz_oxide", ] @@ -1369,6 +1404,18 @@ dependencies = [ "paste", ] +[[package]] +name = "flume" +version = "0.10.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ceeb589a3157cac0ab8cc585feb749bd2cea5cb55a6ee802ad72d9fd38303da" +dependencies = [ + "futures-core", + "futures-sink", + "pin-project", + "spin 0.9.3", +] + [[package]] name = "fnv" version = "1.0.7" @@ -1642,14 +1689,14 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9be70c98951c83b8d2f8f60d7065fa6d5146873094452a1008da8c2f1e4205ad" +checksum = "4eb1a864a501629691edf6c15a593b7a51eebaa1e8468e9ddc623de7c9b58ec6" dependencies = [ "cfg-if 1.0.0", "js-sys", "libc", - "wasi 0.10.2+wasi-snapshot-preview1", + "wasi 0.11.0+wasi-snapshot-preview1", "wasm-bindgen", ] @@ -1667,9 +1714,9 @@ checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" [[package]] name = "globset" -version = "0.4.8" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10463d9ff00a2a068db14231982f5132edebad0d7660cd956a1c30292dbcbfbd" +checksum = "0a1e17342619edbc21a964c2afbeb6c820c6a2560032872f397bb97ea127bd0a" dependencies = [ "aho-corasick", "bstr", @@ -1737,12 +1784,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "half" -version = "1.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7" - [[package]] name = "hash-db" version = "0.15.2" @@ -1870,9 +1911,9 @@ dependencies = [ [[package]] name = "http" -version = "0.2.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff8670570af52249509a86f5e3e18a08c60b177071826898fde8997cf5f6bfbb" +checksum = "75f43d41e26995c17e71ee126451dd3941010b0514a81a9d11f3b341debc2399" dependencies = [ "bytes", "fnv", @@ -1926,9 +1967,9 @@ dependencies = [ [[package]] name = "hyper" -version = "0.14.18" +version = "0.14.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b26ae0a80afebe130861d90abf98e3814a4f28a4c6ffeb5ab8ebb2be311e0ef2" +checksum = "42dc3c131584288d375f2d07f822b0cb012d8c6fb899a5b9fdb3cb7eb9b6004f" dependencies = [ "bytes", "futures-channel", @@ -2013,6 +2054,7 @@ dependencies = [ "modelator", "num-traits", "parity-scale-codec", + "primitive-types", "prost", "prost-types", "ripemd", @@ -2021,7 +2063,7 @@ dependencies = [ "serde_derive", "serde_json", "sha2 0.10.2", - "sha3", + "sha3 0.10.1", "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", "sp-io 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", "sp-mmr-primitives", @@ -2032,7 +2074,7 @@ dependencies = [ "subxt", "tendermint", "tendermint-light-client-verifier", - "tendermint-proto 0.23.7 (git+https://github.com/composableFi/tendermint-rs?branch=seun-0.23.7)", + "tendermint-proto", "tendermint-rpc", "tendermint-testgen", "test-log", @@ -2052,6 +2094,7 @@ dependencies = [ "ibc-relayer", "ibc-relayer-cli", "ibc-test-framework", + "modelator", "serde", "serde_json", "tempfile", @@ -2070,7 +2113,7 @@ dependencies = [ "prost-types", "schemars", "serde", - "tendermint-proto 0.23.7 (git+https://github.com/composableFi/tendermint-rs?branch=seun-0.23.7)", + "tendermint-proto", "tonic", ] @@ -2083,7 +2126,7 @@ dependencies = [ "bech32 0.9.0", "bitcoin", "bytes", - "crossbeam-channel 0.5.4", + "crossbeam-channel", "dirs-next", "env_logger", "flex-error", @@ -2118,7 +2161,7 @@ dependencies = [ "tendermint", "tendermint-light-client", "tendermint-light-client-verifier", - "tendermint-proto 0.23.7 (registry+https://github.com/rust-lang/crates.io-index)", + "tendermint-proto", "tendermint-rpc", "tendermint-testgen", "test-log", @@ -2143,7 +2186,7 @@ dependencies = [ "clap_complete", "color-eyre", "console", - "crossbeam-channel 0.5.4", + "crossbeam-channel", "dialoguer", "dirs-next", "eyre", @@ -2168,7 +2211,7 @@ dependencies = [ "tendermint", "tendermint-light-client", "tendermint-light-client-verifier", - "tendermint-proto 0.23.7 (git+https://github.com/composableFi/tendermint-rs?branch=seun-0.23.7)", + "tendermint-proto", "tendermint-rpc", "tokio", "toml", @@ -2180,7 +2223,7 @@ dependencies = [ name = "ibc-relayer-rest" version = "0.15.0" dependencies = [ - "crossbeam-channel 0.5.4", + "crossbeam-channel", "ibc", "ibc-relayer", "rouille", @@ -2195,7 +2238,7 @@ dependencies = [ name = "ibc-telemetry" version = "0.15.0" dependencies = [ - "crossbeam-channel 0.5.4", + "crossbeam-channel", "ibc", "moka", "once_cell", @@ -2212,7 +2255,7 @@ version = "0.15.0" dependencies = [ "async-trait", "color-eyre", - "crossbeam-channel 0.5.4", + "crossbeam-channel", "env_logger", "eyre", "flex-error", @@ -2241,12 +2284,16 @@ dependencies = [ [[package]] name = "ics23" version = "0.8.0-alpha" -source = "git+https://github.com/composablefi/ics23?branch=seun-v0.8.0-alpha#c79df0901ddfdfb539dae1e451e7beadc01694f4" +source = "git+https://github.com/composablefi/ics23?rev=86e970a4f22973946a0faf472b56951691d40874#86e970a4f22973946a0faf472b56951691d40874" dependencies = [ "anyhow", "bytes", "hex", "prost", + "ripemd160", + "sha2 0.9.9", + "sha3 0.9.1", + "sp-core 6.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", ] @@ -2304,9 +2351,9 @@ checksum = "ce23b50ad8242c51a442f3ff322d56b02f08852c77e4c0b4d3fd684abc89c683" [[package]] name = "indexmap" -version = "1.8.1" +version = "1.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f647032dfaa1f8b6dc29bd3edb7bbef4861b8b8007ebb118d6db284fd59f6ee" +checksum = "e6012d540c5baa3589337a98ce73408de9b5a25ec9fc2c6fd6be8f0d39e0ca5a" dependencies = [ "autocfg", "hashbrown 0.11.2", @@ -2362,9 +2409,9 @@ checksum = "112c678d4050afce233f4f2852bb2eb519230b3cf12f33585275537d7e41578d" [[package]] name = "js-sys" -version = "0.3.57" +version = "0.3.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "671a26f820db17c2a2750743f1dd03bafd15b98c9f30c7c2628c024c05d73397" +checksum = "c3fac17f7123a73ca62df411b1bf727ccc805daa070338fda671c86dac1bdc27" dependencies = [ "wasm-bindgen", ] @@ -2553,9 +2600,9 @@ dependencies = [ [[package]] name = "keccak" -version = "0.1.0" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67c21572b4949434e4fc1e1978b99c5f77064153c59d998bf13ecd96fb5ecba7" +checksum = "f9b7d56ba4a8344d6be9729995e6b06f928af29998cdf79fe390cbf6b1fee838" [[package]] name = "kvdb" @@ -2670,9 +2717,9 @@ dependencies = [ [[package]] name = "lru" -version = "0.7.5" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32613e41de4c47ab04970c348ca7ae7382cf116625755af070b008a15516a889" +checksum = "c84e6fe5655adc6ce00787cf7dcaf8dc4f998a0565d23eafc207a8b08ca3349a" dependencies = [ "hashbrown 0.11.2", ] @@ -2814,9 +2861,9 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.5.1" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2b29bd4bc3f33391105ebee3589c19197c4271e3e5a9ec9bfe8127eeff8f082" +checksum = "6f5c75688da582b8ffc1f1799e9db273f32133c49e048f614d22ec3256773ccc" dependencies = [ "adler", ] @@ -2864,9 +2911,9 @@ version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df72b50274c0988d9f4a6e808e06d9d926f265db6f8bbda1576bcaa658e72763" dependencies = [ - "crossbeam-channel 0.5.4", + "crossbeam-channel", "crossbeam-epoch 0.8.2", - "crossbeam-utils 0.8.8", + "crossbeam-utils 0.8.9", "num_cpus", "once_cell", "parking_lot", @@ -3118,7 +3165,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6105e89802af13fdf48c49d7646d3b533a70e536d818aae7e78ba0433d01acb8" dependencies = [ "async-trait", - "crossbeam-channel 0.5.4", + "crossbeam-channel", "dashmap", "fnv", "futures-channel", @@ -3145,9 +3192,9 @@ dependencies = [ [[package]] name = "os_str_bytes" -version = "6.0.1" +version = "6.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "029d8d0b2f198229de29dca79676f2738ff952edf3fde542eb8bf94d8c21b435" +checksum = "21326818e99cfe6ce1e524c2a805c189a99b5ae555a35d19f9a284b427d86afa" [[package]] name = "owo-colors" @@ -3267,9 +3314,9 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "3.1.2" +version = "3.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8b44461635bbb1a0300f100a841e571e7d919c81c73075ef5d152ffdb521066" +checksum = "9182e4a71cae089267ab03e67c99368db7cd877baf50f931e5d6d4b71e195ac0" dependencies = [ "arrayvec 0.7.2", "bitvec", @@ -3281,9 +3328,9 @@ dependencies = [ [[package]] name = "parity-scale-codec-derive" -version = "3.1.2" +version = "3.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c45ed1f39709f5a89338fab50e59816b2e8815f5bb58276e7ddf9afd495f73f8" +checksum = "9299338969a3d2f491d65f140b00ddec470858402f888af98e8642fb5e8965cd" dependencies = [ "proc-macro-crate 1.1.3", "proc-macro2", @@ -3325,9 +3372,9 @@ checksum = "be5e13c266502aadf83426d87d81a0f5d1ef45b8027f5a471c360abfe4bfae92" [[package]] name = "parking_lot" -version = "0.12.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87f5ec2493a61ac0506c0f4199f99070cbe83857b0337006a30f3e6719b8ef58" +checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ "lock_api", "parking_lot_core", @@ -3534,9 +3581,9 @@ dependencies = [ [[package]] name = "prost" -version = "0.10.3" +version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc03e116981ff7d8da8e5c220e374587b98d294af7ba7dd7fda761158f00086f" +checksum = "71adf41db68aa0daaefc69bb30bcd68ded9b9abaad5d1fbb6304c4fb390e083e" dependencies = [ "bytes", "prost-derive", @@ -3588,7 +3635,7 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bafd74c340a0a7e79415981ede3460df16b530fd071541901a57416eea950b17" dependencies = [ - "crossbeam-utils 0.8.8", + "crossbeam-utils 0.8.9", "libc", "mach", "once_cell", @@ -3679,7 +3726,7 @@ version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" dependencies = [ - "getrandom 0.2.6", + "getrandom 0.2.7", ] [[package]] @@ -3743,9 +3790,9 @@ version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "258bcdb5ac6dad48491bb2992db6b7cf74878b0384908af124823d118c99683f" dependencies = [ - "crossbeam-channel 0.5.4", + "crossbeam-channel", "crossbeam-deque", - "crossbeam-utils 0.8.8", + "crossbeam-utils 0.8.9", "num_cpus", ] @@ -3764,7 +3811,7 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" dependencies = [ - "getrandom 0.2.6", + "getrandom 0.2.7", "redox_syscall", "thiserror", ] @@ -3850,7 +3897,7 @@ dependencies = [ "cc", "libc", "once_cell", - "spin", + "spin 0.5.2", "untrusted", "web-sys", "winapi", @@ -4284,16 +4331,6 @@ dependencies = [ "serde", ] -[[package]] -name = "serde_cbor" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bef2ebfde456fb76bbcf9f59315333decc4fda0b2b44b420243c11e0f5ec1f5" -dependencies = [ - "half", - "serde", -] - [[package]] name = "serde_derive" version = "1.0.137" @@ -4450,6 +4487,18 @@ dependencies = [ "digest 0.10.3", ] +[[package]] +name = "sha3" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f81199417d4e5de3f04b1e871023acea7389672c4135918f05aa9cbf2f2fa809" +dependencies = [ + "block-buffer 0.9.0", + "digest 0.9.0", + "keccak", + "opaque-debug 0.3.0", +] + [[package]] name = "sha3" version = "0.10.1" @@ -4808,7 +4857,7 @@ dependencies = [ "byteorder", "digest 0.10.3", "sha2 0.10.2", - "sha3", + "sha3 0.10.1", "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", "twox-hash", ] @@ -5357,6 +5406,15 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" +[[package]] +name = "spin" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c530c2b0d0bf8b69304b39fe2001993e267461948b890cd037d8ad4293fa1a0d" +dependencies = [ + "lock_api", +] + [[package]] name = "spki" version = "0.5.4" @@ -5369,9 +5427,9 @@ dependencies = [ [[package]] name = "ss58-registry" -version = "1.18.0" +version = "1.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ceb8b72a924ccfe7882d0e26144c114503760a4d1248bb5cd06c8ab2d55404cc" +checksum = "5d804c8d48aeab838be31570866fce1130d275b563d49af08b4927a0bd561e7c" dependencies = [ "Inflector", "num-format", @@ -5435,6 +5493,12 @@ dependencies = [ "zeroize", ] +[[package]] +name = "subtle-ng" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "734676eb262c623cec13c3155096e08d1f8f29adce39ba17948b18dad1e54142" + [[package]] name = "subxt" version = "0.21.0" @@ -5559,13 +5623,13 @@ dependencies = [ [[package]] name = "tendermint" -version = "0.23.7" -source = "git+https://github.com/composableFi/tendermint-rs?branch=seun-0.23.7#cb79c164e84c3d9176b374f2b227393fe000e691" +version = "0.24.0-pre.2" +source = "git+https://github.com/composableFi/tendermint-rs?rev=5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8#5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8" dependencies = [ "async-trait", "bytes", "ed25519", - "ed25519-dalek", + "ed25519-consensus", "flex-error", "futures", "k256", @@ -5573,24 +5637,24 @@ dependencies = [ "once_cell", "prost", "prost-types", - "ripemd160", + "ripemd", "serde", "serde_bytes", "serde_json", "serde_repr", - "sha2 0.9.9", + "sha2 0.10.2", "signature", "subtle", "subtle-encoding", - "tendermint-proto 0.23.7 (git+https://github.com/composableFi/tendermint-rs?branch=seun-0.23.7)", + "tendermint-proto", "time", "zeroize", ] [[package]] name = "tendermint-config" -version = "0.23.7" -source = "git+https://github.com/composableFi/tendermint-rs?branch=seun-0.23.7#cb79c164e84c3d9176b374f2b227393fe000e691" +version = "0.24.0-pre.2" +source = "git+https://github.com/composableFi/tendermint-rs?rev=5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8#5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8" dependencies = [ "flex-error", "serde", @@ -5602,16 +5666,18 @@ dependencies = [ [[package]] name = "tendermint-light-client" -version = "0.23.7" -source = "git+https://github.com/composableFi/tendermint-rs?branch=seun-0.23.7#cb79c164e84c3d9176b374f2b227393fe000e691" +version = "0.24.0-pre.2" +source = "git+https://github.com/composableFi/tendermint-rs?rev=5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8#5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8" dependencies = [ + "async-recursion", + "async-trait", "contracts", - "crossbeam-channel 0.4.4", "derive_more", "flex-error", + "flume", "futures", + "futures-timer", "serde", - "serde_cbor", "serde_derive", "static_assertions", "tendermint", @@ -5623,8 +5689,8 @@ dependencies = [ [[package]] name = "tendermint-light-client-verifier" -version = "0.23.7" -source = "git+https://github.com/composableFi/tendermint-rs?branch=seun-0.23.7#cb79c164e84c3d9176b374f2b227393fe000e691" +version = "0.24.0-pre.2" +source = "git+https://github.com/composableFi/tendermint-rs?rev=5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8#5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8" dependencies = [ "derive_more", "flex-error", @@ -5635,26 +5701,8 @@ dependencies = [ [[package]] name = "tendermint-proto" -version = "0.23.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b71f925d74903f4abbdc4af0110635a307b3cb05b175fdff4a7247c14a4d0874" -dependencies = [ - "bytes", - "flex-error", - "num-derive", - "num-traits", - "prost", - "prost-types", - "serde", - "serde_bytes", - "subtle-encoding", - "time", -] - -[[package]] -name = "tendermint-proto" -version = "0.23.7" -source = "git+https://github.com/composableFi/tendermint-rs?branch=seun-0.23.7#cb79c164e84c3d9176b374f2b227393fe000e691" +version = "0.24.0-pre.2" +source = "git+https://github.com/composableFi/tendermint-rs?rev=5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8#5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8" dependencies = [ "bytes", "flex-error", @@ -5670,15 +5718,15 @@ dependencies = [ [[package]] name = "tendermint-rpc" -version = "0.23.7" -source = "git+https://github.com/composableFi/tendermint-rs?branch=seun-0.23.7#cb79c164e84c3d9176b374f2b227393fe000e691" +version = "0.24.0-pre.2" +source = "git+https://github.com/composableFi/tendermint-rs?rev=5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8#5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8" dependencies = [ "async-trait", "async-tungstenite", "bytes", "flex-error", "futures", - "getrandom 0.2.6", + "getrandom 0.2.7", "http", "hyper", "hyper-proxy", @@ -5688,10 +5736,11 @@ dependencies = [ "serde", "serde_bytes", "serde_json", + "subtle", "subtle-encoding", "tendermint", "tendermint-config", - "tendermint-proto 0.23.7 (git+https://github.com/composableFi/tendermint-rs?branch=seun-0.23.7)", + "tendermint-proto", "thiserror", "time", "tokio", @@ -5703,10 +5752,10 @@ dependencies = [ [[package]] name = "tendermint-testgen" -version = "0.23.7" -source = "git+https://github.com/composableFi/tendermint-rs?branch=seun-0.23.7#cb79c164e84c3d9176b374f2b227393fe000e691" +version = "0.24.0-pre.2" +source = "git+https://github.com/composableFi/tendermint-rs?rev=5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8#5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8" dependencies = [ - "ed25519-dalek", + "ed25519-consensus", "gumdrop", "serde", "serde_json", @@ -5865,9 +5914,9 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.18.2" +version = "1.19.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4903bf0427cf68dddd5aa6a93220756f8be0c34fcfa9f5e6191e103e15a31395" +checksum = "c51a52ed6686dd62c320f9b89299e9dfb46f730c7a48e635c19f21d116cb1439" dependencies = [ "bytes", "libc", @@ -5895,9 +5944,9 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b557f72f448c511a979e2564e55d74e6c4432fc96ff4f6241bc6bded342643b7" +checksum = "9724f9a975fb987ef7a3cd9be0350edcbe130698af5b8f7a631e23d42d052484" dependencies = [ "proc-macro2", "quote", @@ -5928,9 +5977,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.8" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50145484efff8818b5ccd256697f36863f587da82cf8b409c53adf1e840798e3" +checksum = "df54d54117d6fdc4e4fea40fe1e4e566b3505700e148a6827e59b34b0d2600d9" dependencies = [ "futures-core", "pin-project-lite", @@ -5939,9 +5988,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f988a1a1adc2fb21f9c12aa96441da33a1728193ae0b95d2be22dbd17fcb4e5c" +checksum = "cc463cd8deddc3770d20f9852143d50bf6094e640b485cb2e189a2099085ff45" dependencies = [ "bytes", "futures-core", @@ -6018,9 +6067,9 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.3.3" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d342c6d58709c0a6d48d48dabbb62d4ef955cf5f0f3bbfd845838e7ae88dbae" +checksum = "3c530c8675c1dbf98facee631536fa116b5fb6382d7dd6dc1b118d970eafe3ba" dependencies = [ "bitflags", "bytes", @@ -6049,9 +6098,9 @@ checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" [[package]] name = "tracing" -version = "0.1.34" +version = "0.1.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d0ecdcb44a79f0fe9844f0c4f33a342cbcbb5117de8001e6ba0dc2351327d09" +checksum = "a400e31aa60b9d44a52a8ee0343b5b18566b03a8321e0d321f695cf56e940160" dependencies = [ "cfg-if 1.0.0", "log", @@ -6073,11 +6122,11 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.26" +version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f54c8ca710e81886d498c2fd3331b56c93aa248d49de2222ad2742247c60072f" +checksum = "7709595b8878a4965ce5e87ebf880a7d39c9afc6837721b21a5a816a8117d921" dependencies = [ - "lazy_static", + "once_cell", "valuable", ] @@ -6280,9 +6329,9 @@ checksum = "099b7128301d285f79ddd55b9a83d5e6b9e97c92e0ea0daebee7263e932de992" [[package]] name = "unicode-ident" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d22af068fba1eb5edcb4aea19d382b2a3deb4c8f9d475c589b6ada9e0fd493ee" +checksum = "5bd2fe26506023ed7b5e1e315add59d6f584c621d037f9368fea9cfb988f368c" [[package]] name = "unicode-normalization" @@ -6352,7 +6401,7 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" dependencies = [ - "getrandom 0.2.6", + "getrandom 0.2.7", ] [[package]] @@ -6361,7 +6410,7 @@ version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dd6469f4314d5f1ffec476e05f17cc9a78bc7a27a6a857842170bdf8d6f98d2f" dependencies = [ - "getrandom 0.2.6", + "getrandom 0.2.7", ] [[package]] @@ -6426,9 +6475,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.80" +version = "0.2.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27370197c907c55e3f1a9fbe26f44e937fe6451368324e009cba39e139dc08ad" +checksum = "7c53b543413a17a202f4be280a7e5c62a1c69345f5de525ee64f8cfdbc954994" dependencies = [ "cfg-if 1.0.0", "wasm-bindgen-macro", @@ -6436,9 +6485,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.80" +version = "0.2.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53e04185bfa3a779273da532f5025e33398409573f348985af9a1cbf3774d3f4" +checksum = "5491a68ab4500fa6b4d726bd67408630c3dbe9c4fe7bda16d5c82a1fd8c7340a" dependencies = [ "bumpalo", "lazy_static", @@ -6451,9 +6500,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.80" +version = "0.2.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17cae7ff784d7e83a2fe7611cfe766ecf034111b49deb850a3dc7699c08251f5" +checksum = "c441e177922bc58f1e12c022624b6216378e5febc2f0533e41ba443d505b80aa" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -6461,9 +6510,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.80" +version = "0.2.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99ec0dc7a4756fffc231aab1b9f2f578d23cd391390ab27f952ae0c9b3ece20b" +checksum = "7d94ac45fcf608c1f45ef53e748d35660f168490c10b23704c7779ab8f5c3048" dependencies = [ "proc-macro2", "quote", @@ -6474,9 +6523,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.80" +version = "0.2.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d554b7f530dee5964d9a9468d95c1f8b8acae4f282807e7d27d4b03099a46744" +checksum = "6a89911bd99e5f3659ec4acf9c4d93b0a90fe4a2a11f15328472058edc5261be" [[package]] name = "wasmi" @@ -6504,9 +6553,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.57" +version = "0.3.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b17e741662c70c8bd24ac5c5b18de314a2c26c32bf8346ee1e6f53de919c283" +checksum = "2fed94beee57daf8dd7d51f2b15dc2bcde92d7a72304cdf662a4371008b71b90" dependencies = [ "js-sys", "wasm-bindgen", diff --git a/ci/relayer.Dockerfile b/ci/relayer.Dockerfile index 191b8cc166..265446581b 100644 --- a/ci/relayer.Dockerfile +++ b/ci/relayer.Dockerfile @@ -1,7 +1,7 @@ ##################################################### #### Relayer image #### ##################################################### -FROM ubuntu:21.04 +FROM --platform=linux/amd64 ubuntu:21.04 LABEL maintainer="hello@informal.systems" ARG RELEASE diff --git a/modules/Cargo.toml b/modules/Cargo.toml index 66638f1e88..47a937d069 100644 --- a/modules/Cargo.toml +++ b/modules/Cargo.toml @@ -29,25 +29,26 @@ std = [ "codec/std", "pallet-mmr-primitives/std", "beefy-primitives/std", - "sp-trie/std" + "sp-trie/std", + "tendermint-rpc" ] clock = ["tendermint/clock", "time/std"] # This feature grants access to development-time mocking libraries, such as `MockContext` or `MockHeader`. # Depends on the `testgen` suite for generating Tendermint light blocks. -mocks = ["tendermint-testgen", "clock", "std", "sp-io", "sp-io/std", "sha3", "ripemd"] +mocks = ["tendermint-testgen", "clock", "std", "sp-io/std", "sha3", "ripemd", "ics11-beefy"] ics11-beefy = ["sp-io", "sp-core", "sp-std"] [dependencies] # Proto definitions for all IBC-related interfaces, e.g., connections or channels. borsh = { version = "0.9.3", default-features = false } ibc-proto = { version = "0.18.0", path = "../proto", default-features = false } -ics23 = { git = "https://github.com/composablefi/ics23", branch = "seun-v0.8.0-alpha", default-features = false } +ics23 = { git = "https://github.com/composablefi/ics23", rev = "86e970a4f22973946a0faf472b56951691d40874", default-features = false } time = { version = "0.3", default-features = false } serde_derive = { version = "1.0.104", default-features = false } serde = { version = "1.0", default-features = false } serde_json = { version = "1", default-features = false } -tracing = { version = "0.1.34", default-features = false } +tracing = { version = "0.1.35", default-features = false } prost = { version = "0.10", default-features = false } prost-types = { version = "0.10", default-features = false } bytes = { version = "1.1.0", default-features = false } @@ -69,26 +70,32 @@ sp-trie = { git = "https://github.com/paritytech/substrate", branch = "polkadot- sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22", default-features = false, optional = true } sha3 = { version = "0.10.1", optional = true } ripemd = { version = "0.1.1", optional = true } +primitive-types = { version = "0.11.1", default-features = false, features = ["serde_no_std"] } [dependencies.tendermint] git = "https://github.com/composableFi/tendermint-rs" -branch = "seun-0.23.7" +rev = "5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8" default-features = false [dependencies.tendermint-proto] git = "https://github.com/composableFi/tendermint-rs" -branch = "seun-0.23.7" +rev = "5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8" default-features = false [dependencies.tendermint-light-client-verifier] git = "https://github.com/composableFi/tendermint-rs" -branch = "seun-0.23.7" +rev = "5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8" default-features = false +[dependencies.tendermint-rpc] +git = "https://github.com/composableFi/tendermint-rs" +rev = "5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8" +optional = true + [dependencies.tendermint-testgen] git = "https://github.com/composableFi/tendermint-rs" -branch = "seun-0.23.7" +rev = "5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8" optional = true default-features = false @@ -98,8 +105,8 @@ tracing-subscriber = { version = "0.3.11", features = ["fmt", "env-filter", "jso test-log = { version = "0.2.10", features = ["trace"] } modelator = "0.4.2" sha2 = { version = "0.10.2" } -tendermint-rpc = { git = "https://github.com/composableFi/tendermint-rs", branch = "seun-0.23.7", features = ["http-client", "websocket-client"] } -tendermint-testgen = { git = "https://github.com/composableFi/tendermint-rs", branch = "seun-0.23.7" } # Needed for generating (synthetic) light blocks. +tendermint-rpc = { git = "https://github.com/composableFi/tendermint-rs", rev = "5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8", features = ["http-client", "websocket-client"] } +tendermint-testgen = { git = "https://github.com/composableFi/tendermint-rs", rev = "5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8" } # Needed for generating (synthetic) light blocks. # Beefy Light Client testing dependencies sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22"} subxt = "0.21.0" diff --git a/modules/src/applications/transfer/context.rs b/modules/src/applications/transfer/context.rs index ab9c1afdb8..36e7ad7a91 100644 --- a/modules/src/applications/transfer/context.rs +++ b/modules/src/applications/transfer/context.rs @@ -13,7 +13,6 @@ use crate::core::ics04_channel::context::{ChannelKeeper, ChannelReader}; use crate::core::ics04_channel::msgs::acknowledgement::Acknowledgement as GenericAcknowledgement; use crate::core::ics04_channel::packet::Packet; use crate::core::ics04_channel::Version; -use crate::core::ics05_port::context::PortReader; use crate::core::ics24_host::identifier::{ChannelId, ConnectionId, PortId}; use crate::core::ics26_routing::context::{ModuleOutputBuilder, OnRecvPacketAck, ReaderContext}; use crate::prelude::*; @@ -25,7 +24,7 @@ pub trait Ics20Keeper: type AccountId; } -pub trait Ics20Reader: ChannelReader + PortReader +pub trait Ics20Reader: ChannelReader where Self: Sized, { diff --git a/modules/src/applications/transfer/denom.rs b/modules/src/applications/transfer/denom.rs index 74a9ad783d..c55b1fb506 100644 --- a/modules/src/applications/transfer/denom.rs +++ b/modules/src/applications/transfer/denom.rs @@ -292,7 +292,9 @@ impl fmt::Display for PrefixedDenom { } /// A type for representing token transfer amounts. -#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Display, From, Into)] +#[derive( + Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Serialize, Deserialize, Display, From, Into, +)] pub struct Amount(U256); impl Amount { diff --git a/modules/src/applications/transfer/msgs/transfer.rs b/modules/src/applications/transfer/msgs/transfer.rs index ee9c8afd1f..3ad22b6b39 100644 --- a/modules/src/applications/transfer/msgs/transfer.rs +++ b/modules/src/applications/transfer/msgs/transfer.rs @@ -119,9 +119,7 @@ impl From for Any { fn from(msg: MsgTransfer) -> Self { Self { type_url: TYPE_URL.to_string(), - value: msg - .encode_vec() - .expect("encoding to `Any` from `MsgTranfer`"), + value: msg.encode_vec(), } } } diff --git a/modules/src/bigint.rs b/modules/src/bigint.rs index f681f75308..4b33e02332 100644 --- a/modules/src/bigint.rs +++ b/modules/src/bigint.rs @@ -1,8 +1 @@ -#![allow(clippy::assign_op_pattern)] -#![allow(clippy::ptr_offset_with_cast)] - -use uint::construct_uint; - -construct_uint! { - pub struct U256(4); -} +pub use primitive_types::U256; diff --git a/modules/src/clients/ics07_tendermint/client_def.rs b/modules/src/clients/ics07_tendermint/client_def.rs index 5b953f0e1b..7dc1af23d9 100644 --- a/modules/src/clients/ics07_tendermint/client_def.rs +++ b/modules/src/clients/ics07_tendermint/client_def.rs @@ -263,9 +263,7 @@ where epoch: consensus_height.revision_number, height: consensus_height.revision_height, }; - let value = expected_consensus_state - .encode_vec() - .map_err(Ics02Error::invalid_any_consensus_state)?; + let value = expected_consensus_state.encode_vec(); verify_membership::(client_state, prefix, proof, root, path, value) } @@ -284,9 +282,7 @@ where client_state.verify_height(height)?; let path = ConnectionsPath(connection_id.clone()); - let value = expected_connection_end - .encode_vec() - .map_err(Ics02Error::invalid_connection_end)?; + let value = expected_connection_end.encode_vec(); verify_membership::(client_state, prefix, proof, root, path, value) } @@ -306,9 +302,7 @@ where client_state.verify_height(height)?; let path = ChannelEndsPath(port_id.clone(), *channel_id); - let value = expected_channel_end - .encode_vec() - .map_err(Ics02Error::invalid_channel_end)?; + let value = expected_channel_end.encode_vec(); verify_membership::(client_state, prefix, proof, root, path, value) } @@ -326,9 +320,7 @@ where client_state.verify_height(height)?; let path = ClientStatePath(client_id.clone()); - let value = expected_client_state - .encode_vec() - .map_err(Ics02Error::invalid_any_client_state)?; + let value = expected_client_state.encode_vec(); verify_membership::(client_state, prefix, proof, root, path, value) } diff --git a/modules/src/clients/ics11_beefy/client_def.rs b/modules/src/clients/ics11_beefy/client_def.rs index 4bfe7ae56b..af2fd7215b 100644 --- a/modules/src/clients/ics11_beefy/client_def.rs +++ b/modules/src/clients/ics11_beefy/client_def.rs @@ -220,7 +220,7 @@ impl ClientDef for BeefyClient(prefix, proof, root, path, value) } @@ -238,7 +238,7 @@ impl ClientDef for BeefyClient Result<(), Error> { let path = ConnectionsPath(connection_id.clone()); - let value = expected_connection_end.encode_vec().unwrap(); + let value = expected_connection_end.encode_vec(); verify_membership::(prefix, proof, root, path, value) } @@ -256,7 +256,7 @@ impl ClientDef for BeefyClient Result<(), Error> { let path = ChannelEndsPath(port_id.clone(), *channel_id); - let value = expected_channel_end.encode_vec().unwrap(); + let value = expected_channel_end.encode_vec(); verify_membership::(prefix, proof, root, path, value) } @@ -272,7 +272,7 @@ impl ClientDef for BeefyClient Result<(), Error> { let path = ClientStatePath(client_id.clone()); - let value = expected_client_state.encode_vec().unwrap(); + let value = expected_client_state.encode_vec(); verify_membership::(prefix, proof, root, path, value) } diff --git a/modules/src/clients/ics13_near/types.rs b/modules/src/clients/ics13_near/types.rs index 8493710b71..2f96a943f4 100644 --- a/modules/src/clients/ics13_near/types.rs +++ b/modules/src/clients/ics13_near/types.rs @@ -1,5 +1,5 @@ +use alloc::vec::Vec; use borsh::maybestd::{io::Write, string::String}; -use sp_std::vec::Vec; use borsh::{BorshDeserialize, BorshSerialize}; use sp_core::ed25519::{Public as Ed25519Public, Signature as Ed25519Signature}; diff --git a/modules/src/core/ics02_client/client_consensus.rs b/modules/src/core/ics02_client/client_consensus.rs index 40729d00a3..b0be254e61 100644 --- a/modules/src/core/ics02_client/client_consensus.rs +++ b/modules/src/core/ics02_client/client_consensus.rs @@ -111,23 +111,17 @@ impl From for Any { match value { AnyConsensusState::Tendermint(value) => Any { type_url: TENDERMINT_CONSENSUS_STATE_TYPE_URL.to_string(), - value: value - .encode_vec() - .expect("encoding to `Any` from `AnyConsensusState::Tendermint`"), + value: value.encode_vec(), }, #[cfg(any(test, feature = "ics11_beefy"))] AnyConsensusState::Beefy(value) => Any { type_url: BEEFY_CONSENSUS_STATE_TYPE_URL.to_string(), - value: value - .encode_vec() - .expect("encoding to `Any` from `AnyConsensusState::Beefy`"), + value: value.encode_vec(), }, #[cfg(any(test, feature = "mocks"))] AnyConsensusState::Mock(value) => Any { type_url: MOCK_CONSENSUS_STATE_TYPE_URL.to_string(), - value: value - .encode_vec() - .expect("encoding to `Any` from `AnyConsensusState::Mock`"), + value: value.encode_vec(), }, } } diff --git a/modules/src/core/ics02_client/client_state.rs b/modules/src/core/ics02_client/client_state.rs index fd696cdb12..af360ece07 100644 --- a/modules/src/core/ics02_client/client_state.rs +++ b/modules/src/core/ics02_client/client_state.rs @@ -236,30 +236,22 @@ impl From for Any { match value { AnyClientState::Tendermint(value) => Any { type_url: TENDERMINT_CLIENT_STATE_TYPE_URL.to_string(), - value: value - .encode_vec() - .expect("encoding to `Any` from `AnyClientState::Tendermint`"), + value: value.encode_vec(), }, #[cfg(any(test, feature = "ics11_beefy"))] AnyClientState::Beefy(value) => Any { type_url: BEEFY_CLIENT_STATE_TYPE_URL.to_string(), - value: value - .encode_vec() - .expect("encoding to `Any` from `AnyClientState::Tendermint`"), + value: value.encode_vec(), }, #[cfg(any(test, feature = "ics11_beefy"))] AnyClientState::Near(_) => Any { type_url: BEEFY_CLIENT_STATE_TYPE_URL.to_string(), - value: value - .encode_vec() - .expect("encoding to `Any` from `AnyClientState::Near`"), + value: value.encode_vec(), }, #[cfg(any(test, feature = "mocks"))] AnyClientState::Mock(value) => Any { type_url: MOCK_CLIENT_STATE_TYPE_URL.to_string(), - value: value - .encode_vec() - .expect("encoding to `Any` from `AnyClientState::Mock`"), + value: value.encode_vec(), }, } } diff --git a/modules/src/core/ics02_client/events.rs b/modules/src/core/ics02_client/events.rs index f858a7549f..6fec32e25a 100644 --- a/modules/src/core/ics02_client/events.rs +++ b/modules/src/core/ics02_client/events.rs @@ -1,8 +1,8 @@ //! Types for the IBC events emitted from Tendermint Websocket by the client module. use serde_derive::{Deserialize, Serialize}; -use tendermint::abci::tag::Tag; use tendermint::abci::Event as AbciEvent; +use tendermint::abci::EventAttribute; use crate::core::ics02_client::client_type::ClientType; use crate::core::ics02_client::error::Error; @@ -28,7 +28,7 @@ const CONSENSUS_HEIGHT_ATTRIBUTE_KEY: &str = "consensus_height"; const HEADER_ATTRIBUTE_KEY: &str = "header"; pub fn try_from_tx(event: &AbciEvent) -> Option { - match event.type_str.parse() { + match event.kind.parse() { Ok(IbcEventType::CreateClient) => extract_attributes_from_tx(event) .map(CreateClient) .map(IbcEvent::CreateClient) @@ -57,7 +57,7 @@ fn extract_attributes_from_tx(event: &AbciEvent) -> Result { for tag in &event.attributes { let key = tag.key.as_ref(); - let value = tag.value.as_ref(); + let value = tag.value.as_str(); match key { HEIGHT_ATTRIBUTE_KEY => { attr.height = value @@ -86,8 +86,8 @@ fn extract_attributes_from_tx(event: &AbciEvent) -> Result { pub fn extract_header_from_tx(event: &AbciEvent) -> Result { for tag in &event.attributes { - let key = tag.key.as_ref(); - let value = tag.value.as_ref(); + let key = tag.key.as_str(); + let value = tag.value.as_str(); if key == HEADER_ATTRIBUTE_KEY { return AnyHeader::decode_from_string(value); } @@ -147,23 +147,27 @@ impl Default for Attributes { /// is infallible, even if it is not represented in the error type. /// Once tendermint-rs improves the API of the `Key` and `Value` types, /// we will be able to remove the `.parse().unwrap()` calls. -impl From for Vec { +impl From for Vec { fn from(a: Attributes) -> Self { - let height = Tag { + let height = EventAttribute { key: HEIGHT_ATTRIBUTE_KEY.parse().unwrap(), value: a.height.to_string().parse().unwrap(), + index: false, }; - let client_id = Tag { + let client_id = EventAttribute { key: CLIENT_ID_ATTRIBUTE_KEY.parse().unwrap(), value: a.client_id.to_string().parse().unwrap(), + index: false, }; - let client_type = Tag { + let client_type = EventAttribute { key: CLIENT_TYPE_ATTRIBUTE_KEY.parse().unwrap(), value: a.client_type.as_str().parse().unwrap(), + index: false, }; - let consensus_height = Tag { + let consensus_height = EventAttribute { key: CONSENSUS_HEIGHT_ATTRIBUTE_KEY.parse().unwrap(), value: a.height.to_string().parse().unwrap(), + index: false, }; vec![height, client_id, client_type, consensus_height] } @@ -209,9 +213,9 @@ impl From for IbcEvent { impl From for AbciEvent { fn from(v: CreateClient) -> Self { - let attributes = Vec::::from(v.0); + let attributes = Vec::::from(v.0); AbciEvent { - type_str: IbcEventType::CreateClient.as_str().to_string(), + kind: IbcEventType::CreateClient.as_str().to_string(), attributes, } } @@ -268,16 +272,17 @@ impl From for IbcEvent { impl From for AbciEvent { fn from(v: UpdateClient) -> Self { - let mut attributes = Vec::::from(v.common); + let mut attributes = Vec::::from(v.common); if let Some(h) = v.header { - let header = Tag { + let header = EventAttribute { key: HEADER_ATTRIBUTE_KEY.parse().unwrap(), value: h.encode_to_string().parse().unwrap(), + index: false, }; attributes.push(header); } AbciEvent { - type_str: IbcEventType::UpdateClient.as_str().to_string(), + kind: IbcEventType::UpdateClient.as_str().to_string(), attributes, } } @@ -326,9 +331,9 @@ impl From for IbcEvent { impl From for AbciEvent { fn from(v: ClientMisbehaviour) -> Self { - let attributes = Vec::::from(v.0); + let attributes = Vec::::from(v.0); AbciEvent { - type_str: IbcEventType::ClientMisbehaviour.as_str().to_string(), + kind: IbcEventType::ClientMisbehaviour.as_str().to_string(), attributes, } } @@ -358,9 +363,9 @@ impl From for UpgradeClient { impl From for AbciEvent { fn from(v: UpgradeClient) -> Self { - let attributes = Vec::::from(v.0); + let attributes = Vec::::from(v.0); AbciEvent { - type_str: IbcEventType::UpgradeClient.as_str().to_string(), + kind: IbcEventType::UpgradeClient.as_str().to_string(), attributes, } } diff --git a/modules/src/core/ics02_client/handler/update_client.rs b/modules/src/core/ics02_client/handler/update_client.rs index 55fc8351f9..8133596849 100644 --- a/modules/src/core/ics02_client/handler/update_client.rs +++ b/modules/src/core/ics02_client/handler/update_client.rs @@ -62,7 +62,7 @@ pub fn process( Error::consensus_state_not_found(client_id.clone(), client_state.latest_height()) })?; - debug!("latest consensus state: {:?}", latest_consensus_state); + tracing::debug!("latest consensus state: {:?}", latest_consensus_state); let now = ctx.host_timestamp(); let duration = now diff --git a/modules/src/core/ics02_client/header.rs b/modules/src/core/ics02_client/header.rs index 1baf6d828a..88cbddf7da 100644 --- a/modules/src/core/ics02_client/header.rs +++ b/modules/src/core/ics02_client/header.rs @@ -87,7 +87,7 @@ impl Header for AnyHeader { impl AnyHeader { pub fn encode_to_string(&self) -> String { - let buf = Protobuf::encode_vec(self).expect("encoding shouldn't fail"); + let buf = Protobuf::encode_vec(self); let encoded = hex::encode(buf); String::from_utf8(encoded).expect("hex-encoded string should always be valid UTF-8") } @@ -132,17 +132,13 @@ impl From for Any { match value { AnyHeader::Tendermint(header) => Any { type_url: TENDERMINT_HEADER_TYPE_URL.to_string(), - value: header - .encode_vec() - .expect("encoding to `Any` from `AnyHeader::Tendermint`"), + value: header.encode_vec(), }, #[cfg(any(test, feature = "ics11_beefy"))] AnyHeader::Beefy(header) => Any { type_url: BEEFY_HEADER_TYPE_URL.to_string(), - value: header - .encode_vec() - .expect("encoding to `Any` from `AnyHeader::Beefy`"), + value: header.encode_vec(), }, // AnyHeader::Near(header) => Any { // type_url: NEAR_HEADER_TYPE_URL.to_string(), @@ -153,9 +149,7 @@ impl From for Any { #[cfg(any(test, feature = "mocks"))] AnyHeader::Mock(header) => Any { type_url: MOCK_HEADER_TYPE_URL.to_string(), - value: header - .encode_vec() - .expect("encoding to `Any` from `AnyHeader::Mock`"), + value: header.encode_vec(), }, } } diff --git a/modules/src/core/ics02_client/misbehaviour.rs b/modules/src/core/ics02_client/misbehaviour.rs index 51ba8f1144..98a8b55b10 100644 --- a/modules/src/core/ics02_client/misbehaviour.rs +++ b/modules/src/core/ics02_client/misbehaviour.rs @@ -87,17 +87,13 @@ impl From for Any { match value { AnyMisbehaviour::Tendermint(misbehaviour) => Any { type_url: TENDERMINT_MISBEHAVIOR_TYPE_URL.to_string(), - value: misbehaviour - .encode_vec() - .expect("encoding to `Any` from `AnyMisbehavior::Tendermint`"), + value: misbehaviour.encode_vec(), }, #[cfg(any(test, feature = "mocks"))] AnyMisbehaviour::Mock(misbehaviour) => Any { type_url: MOCK_MISBEHAVIOUR_TYPE_URL.to_string(), - value: misbehaviour - .encode_vec() - .expect("encoding to `Any` from `AnyMisbehavior::Mock`"), + value: misbehaviour.encode_vec(), }, } } diff --git a/modules/src/core/ics03_connection/events.rs b/modules/src/core/ics03_connection/events.rs index 702747c147..2a9e9abd99 100644 --- a/modules/src/core/ics03_connection/events.rs +++ b/modules/src/core/ics03_connection/events.rs @@ -1,8 +1,8 @@ //! Types for the IBC events emitted from Tendermint Websocket by the connection module. use serde_derive::{Deserialize, Serialize}; -use tendermint::abci::tag::Tag; use tendermint::abci::Event as AbciEvent; +use tendermint::abci::EventAttribute; use crate::core::ics02_client::error::Error as Ics02Error; use crate::core::ics02_client::height::Height; @@ -19,7 +19,7 @@ const COUNTERPARTY_CONN_ID_ATTRIBUTE_KEY: &str = "counterparty_connection_id"; const COUNTERPARTY_CLIENT_ID_ATTRIBUTE_KEY: &str = "counterparty_client_id"; pub fn try_from_tx(event: &tendermint::abci::Event) -> Option { - match event.type_str.parse() { + match event.kind.parse() { Ok(IbcEventType::OpenInitConnection) => extract_attributes_from_tx(event) .map(OpenInit::from) .map(IbcEvent::OpenInitConnection) @@ -44,8 +44,8 @@ fn extract_attributes_from_tx(event: &tendermint::abci::Event) -> Result { attr.height = value.parse().map_err(|e| { @@ -88,36 +88,41 @@ pub struct Attributes { /// is infallible, even if it is not represented in the error type. /// Once tendermint-rs improves the API of the `Key` and `Value` types, /// we will be able to remove the `.parse().unwrap()` calls. -impl From for Vec { +impl From for Vec { fn from(a: Attributes) -> Self { let mut attributes = vec![]; - let height = Tag { + let height = EventAttribute { key: HEIGHT_ATTRIBUTE_KEY.parse().unwrap(), value: a.height.to_string().parse().unwrap(), + index: false, }; attributes.push(height); if let Some(conn_id) = a.connection_id { - let conn_id = Tag { + let conn_id = EventAttribute { key: CONN_ID_ATTRIBUTE_KEY.parse().unwrap(), value: conn_id.to_string().parse().unwrap(), + index: false, }; attributes.push(conn_id); } - let client_id = Tag { + let client_id = EventAttribute { key: CLIENT_ID_ATTRIBUTE_KEY.parse().unwrap(), value: a.client_id.to_string().parse().unwrap(), + index: false, }; attributes.push(client_id); if let Some(conn_id) = a.counterparty_connection_id { - let conn_id = Tag { + let conn_id = EventAttribute { key: COUNTERPARTY_CONN_ID_ATTRIBUTE_KEY.parse().unwrap(), value: conn_id.to_string().parse().unwrap(), + index: false, }; attributes.push(conn_id); } - let counterparty_client_id = Tag { + let counterparty_client_id = EventAttribute { key: COUNTERPARTY_CLIENT_ID_ATTRIBUTE_KEY.parse().unwrap(), value: a.counterparty_client_id.to_string().parse().unwrap(), + index: false, }; attributes.push(counterparty_client_id); attributes @@ -156,9 +161,9 @@ impl From for IbcEvent { impl From for AbciEvent { fn from(v: OpenInit) -> Self { - let attributes = Vec::::from(v.0); + let attributes = Vec::::from(v.0); AbciEvent { - type_str: IbcEventType::OpenInitConnection.as_str().to_string(), + kind: IbcEventType::OpenInitConnection.as_str().to_string(), attributes, } } @@ -196,9 +201,9 @@ impl From for IbcEvent { impl From for AbciEvent { fn from(v: OpenTry) -> Self { - let attributes = Vec::::from(v.0); + let attributes = Vec::::from(v.0); AbciEvent { - type_str: IbcEventType::OpenTryConnection.as_str().to_string(), + kind: IbcEventType::OpenTryConnection.as_str().to_string(), attributes, } } @@ -236,9 +241,9 @@ impl From for IbcEvent { impl From for AbciEvent { fn from(v: OpenAck) -> Self { - let attributes = Vec::::from(v.0); + let attributes = Vec::::from(v.0); AbciEvent { - type_str: IbcEventType::OpenAckConnection.as_str().to_string(), + kind: IbcEventType::OpenAckConnection.as_str().to_string(), attributes, } } @@ -276,9 +281,9 @@ impl From for IbcEvent { impl From for AbciEvent { fn from(v: OpenConfirm) -> Self { - let attributes = Vec::::from(v.0); + let attributes = Vec::::from(v.0); AbciEvent { - type_str: IbcEventType::OpenConfirmConnection.as_str().to_string(), + kind: IbcEventType::OpenConfirmConnection.as_str().to_string(), attributes, } } diff --git a/modules/src/core/ics04_channel/context.rs b/modules/src/core/ics04_channel/context.rs index 0cd1aff12f..fd0764fd81 100644 --- a/modules/src/core/ics04_channel/context.rs +++ b/modules/src/core/ics04_channel/context.rs @@ -140,28 +140,20 @@ pub trait ChannelKeeper { self.store_packet((res.port_id.clone(), res.channel_id, res.seq), res.packet)?; } - PacketResult::Recv(res) => { - let res = match res { - RecvPacketResult::Success(res) => res, - RecvPacketResult::NoOp => unreachable!(), - }; - match res.receipt { - None => { - // Ordered channel - self.store_next_sequence_recv( - (res.port_id.clone(), res.channel_id), - res.seq_number, - )? - } - Some(r) => { - // Unordered channel - self.store_packet_receipt( - (res.port_id.clone(), res.channel_id, res.seq), - r, - )? - } - } - } + PacketResult::Recv(res) => match res { + RecvPacketResult::Ordered { + port_id, + channel_id, + next_seq_recv, + } => self.store_next_sequence_recv((port_id, channel_id), next_seq_recv)?, + RecvPacketResult::Unordered { + port_id, + channel_id, + sequence, + receipt, + } => self.store_packet_receipt((port_id, channel_id, sequence), receipt)?, + RecvPacketResult::NoOp => unreachable!(), + }, PacketResult::WriteAck(res) => { self.store_packet_acknowledgement( (res.port_id.clone(), res.channel_id, res.seq), diff --git a/modules/src/core/ics04_channel/events.rs b/modules/src/core/ics04_channel/events.rs index 35b4865ef6..c8e0acccf7 100644 --- a/modules/src/core/ics04_channel/events.rs +++ b/modules/src/core/ics04_channel/events.rs @@ -1,8 +1,8 @@ //! Types for the IBC events emitted from Tendermint Websocket by the channels module. use serde_derive::{Deserialize, Serialize}; -use tendermint::abci::tag::Tag; use tendermint::abci::Event as AbciEvent; +use tendermint::abci::EventAttribute; use crate::core::ics02_client::height::Height; use crate::core::ics04_channel::error::Error; @@ -34,7 +34,7 @@ const PKT_TIMEOUT_TIMESTAMP_ATTRIBUTE_KEY: &str = "packet_timeout_timestamp"; const PKT_ACK_ATTRIBUTE_KEY: &str = "packet_ack"; pub fn try_from_tx(event: &tendermint::abci::Event) -> Option { - match event.type_str.parse() { + match event.kind.parse() { Ok(IbcEventType::OpenInitChannel) => extract_attributes_from_tx(event) .map(OpenInit::try_from) .map(|res| res.ok().map(IbcEvent::OpenInitChannel)) @@ -118,8 +118,8 @@ fn extract_attributes_from_tx(event: &tendermint::abci::Event) -> Result attr.port_id = value.parse().map_err(Error::identifier)?, CHANNEL_ID_ATTRIBUTE_KEY => { @@ -147,8 +147,8 @@ fn extract_packet_and_write_ack_from_tx( let mut packet = Packet::default(); let mut write_ack: Vec = Vec::new(); for tag in &event.attributes { - let key = tag.key.as_ref(); - let value = tag.value.as_ref(); + let key = tag.key.as_str(); + let value = tag.value.as_str(); match key { PKT_SRC_PORT_ATTRIBUTE_KEY => { packet.source_port = value.parse().map_err(Error::identifier)?; @@ -240,40 +240,46 @@ impl Attributes { /// is infallible, even if it is not represented in the error type. /// Once tendermint-rs improves the API of the `Key` and `Value` types, /// we will be able to remove the `.parse().unwrap()` calls. -impl From for Vec { +impl From for Vec { fn from(a: Attributes) -> Self { let mut attributes = vec![]; - let height = Tag { + let height = EventAttribute { key: HEIGHT_ATTRIBUTE_KEY.parse().unwrap(), value: a.height.to_string().parse().unwrap(), + index: false, }; attributes.push(height); - let port_id = Tag { + let port_id = EventAttribute { key: PORT_ID_ATTRIBUTE_KEY.parse().unwrap(), value: a.port_id.to_string().parse().unwrap(), + index: false, }; attributes.push(port_id); if let Some(channel_id) = a.channel_id { - let channel_id = Tag { + let channel_id = EventAttribute { key: CHANNEL_ID_ATTRIBUTE_KEY.parse().unwrap(), value: channel_id.to_string().parse().unwrap(), + index: false, }; attributes.push(channel_id); } - let connection_id = Tag { + let connection_id = EventAttribute { key: CONNECTION_ID_ATTRIBUTE_KEY.parse().unwrap(), value: a.connection_id.to_string().parse().unwrap(), + index: false, }; attributes.push(connection_id); - let counterparty_port_id = Tag { + let counterparty_port_id = EventAttribute { key: COUNTERPARTY_PORT_ID_ATTRIBUTE_KEY.parse().unwrap(), value: a.counterparty_port_id.to_string().parse().unwrap(), + index: false, }; attributes.push(counterparty_port_id); if let Some(channel_id) = a.counterparty_channel_id { - let channel_id = Tag { + let channel_id = EventAttribute { key: COUNTERPARTY_CHANNEL_ID_ATTRIBUTE_KEY.parse().unwrap(), value: channel_id.to_string().parse().unwrap(), + index: false, }; attributes.push(channel_id); } @@ -289,41 +295,47 @@ impl From for Vec { /// is infallible, even if it is not represented in the error type. /// Once tendermint-rs improves the API of the `Key` and `Value` types, /// we will be able to remove the `.parse().unwrap()` calls. -impl TryFrom for Vec { +impl TryFrom for Vec { type Error = Error; fn try_from(p: Packet) -> Result { let mut attributes = vec![]; - let src_port = Tag { + let src_port = EventAttribute { key: PKT_SRC_PORT_ATTRIBUTE_KEY.parse().unwrap(), value: p.source_port.to_string().parse().unwrap(), + index: false, }; attributes.push(src_port); - let src_channel = Tag { + let src_channel = EventAttribute { key: PKT_SRC_CHANNEL_ATTRIBUTE_KEY.parse().unwrap(), value: p.source_channel.to_string().parse().unwrap(), + index: false, }; attributes.push(src_channel); - let dst_port = Tag { + let dst_port = EventAttribute { key: PKT_DST_PORT_ATTRIBUTE_KEY.parse().unwrap(), value: p.destination_port.to_string().parse().unwrap(), + index: false, }; attributes.push(dst_port); - let dst_channel = Tag { + let dst_channel = EventAttribute { key: PKT_DST_CHANNEL_ATTRIBUTE_KEY.parse().unwrap(), value: p.destination_channel.to_string().parse().unwrap(), + index: false, }; attributes.push(dst_channel); - let sequence = Tag { + let sequence = EventAttribute { key: PKT_SEQ_ATTRIBUTE_KEY.parse().unwrap(), value: p.sequence.to_string().parse().unwrap(), + index: false, }; attributes.push(sequence); - let timeout_height = Tag { + let timeout_height = EventAttribute { key: PKT_TIMEOUT_HEIGHT_ATTRIBUTE_KEY.parse().unwrap(), value: p.timeout_height.to_string().parse().unwrap(), + index: false, }; attributes.push(timeout_height); - let timeout_timestamp = Tag { + let timeout_timestamp = EventAttribute { key: PKT_TIMEOUT_TIMESTAMP_ATTRIBUTE_KEY.parse().unwrap(), value: p .timeout_timestamp @@ -331,18 +343,21 @@ impl TryFrom for Vec { .to_string() .parse() .unwrap(), + index: false, }; attributes.push(timeout_timestamp); let val = String::from_utf8(p.data).expect("hex-encoded string should always be valid UTF-8"); - let packet_data = Tag { + let packet_data = EventAttribute { key: PKT_DATA_ATTRIBUTE_KEY.parse().unwrap(), value: val.parse().unwrap(), + index: false, }; attributes.push(packet_data); - let ack = Tag { + let ack = EventAttribute { key: PKT_ACK_ATTRIBUTE_KEY.parse().unwrap(), value: "".parse().unwrap(), + index: false, }; attributes.push(ack); Ok(attributes) @@ -719,10 +734,10 @@ macro_rules! impl_from_ibc_to_abci_event { ($($event:ty),+) => { $(impl From<$event> for AbciEvent { fn from(v: $event) -> Self { - let attributes = Vec::::from(Attributes::from(v)); - let type_str = <$event>::event_type().as_str().to_string(); + let attributes = Vec::::from(Attributes::from(v)); + let kind = <$event>::event_type().as_str().to_string(); AbciEvent { - type_str, + kind, attributes, } } @@ -797,9 +812,9 @@ impl TryFrom for AbciEvent { type Error = Error; fn try_from(v: SendPacket) -> Result { - let attributes = Vec::::try_from(v.packet)?; + let attributes = Vec::::try_from(v.packet)?; Ok(AbciEvent { - type_str: IbcEventType::SendPacket.as_str().to_string(), + kind: IbcEventType::SendPacket.as_str().to_string(), attributes, }) } @@ -854,9 +869,9 @@ impl TryFrom for AbciEvent { type Error = Error; fn try_from(v: ReceivePacket) -> Result { - let attributes = Vec::::try_from(v.packet)?; + let attributes = Vec::::try_from(v.packet)?; Ok(AbciEvent { - type_str: IbcEventType::ReceivePacket.as_str().to_string(), + kind: IbcEventType::ReceivePacket.as_str().to_string(), attributes, }) } @@ -907,17 +922,18 @@ impl TryFrom for AbciEvent { type Error = Error; fn try_from(v: WriteAcknowledgement) -> Result { - let mut attributes = Vec::::try_from(v.packet)?; + let mut attributes = Vec::::try_from(v.packet)?; let val = String::from_utf8(v.ack).expect("hex-encoded string should always be valid UTF-8"); - // No actual conversion from string to `Tag::Key` or `Tag::Value` - let ack = Tag { + // No actual conversion from string to `EventAttribute::Key` or `EventAttribute::Value` + let ack = EventAttribute { key: PKT_ACK_ATTRIBUTE_KEY.parse().unwrap(), value: val.parse().unwrap(), + index: false, }; attributes.push(ack); Ok(AbciEvent { - type_str: IbcEventType::WriteAck.as_str().to_string(), + kind: IbcEventType::WriteAck.as_str().to_string(), attributes, }) } @@ -974,9 +990,9 @@ impl TryFrom for AbciEvent { type Error = Error; fn try_from(v: AcknowledgePacket) -> Result { - let attributes = Vec::::try_from(v.packet)?; + let attributes = Vec::::try_from(v.packet)?; Ok(AbciEvent { - type_str: IbcEventType::AckPacket.as_str().to_string(), + kind: IbcEventType::AckPacket.as_str().to_string(), attributes, }) } @@ -1031,9 +1047,9 @@ impl TryFrom for AbciEvent { type Error = Error; fn try_from(v: TimeoutPacket) -> Result { - let attributes = Vec::::try_from(v.packet)?; + let attributes = Vec::::try_from(v.packet)?; Ok(AbciEvent { - type_str: IbcEventType::Timeout.as_str().to_string(), + kind: IbcEventType::Timeout.as_str().to_string(), attributes, }) } @@ -1082,9 +1098,9 @@ impl TryFrom for AbciEvent { type Error = Error; fn try_from(v: TimeoutOnClosePacket) -> Result { - let attributes = Vec::::try_from(v.packet)?; + let attributes = Vec::::try_from(v.packet)?; Ok(AbciEvent { - type_str: IbcEventType::TimeoutOnClose.as_str().to_string(), + kind: IbcEventType::TimeoutOnClose.as_str().to_string(), attributes, }) } diff --git a/modules/src/core/ics04_channel/handler/recv_packet.rs b/modules/src/core/ics04_channel/handler/recv_packet.rs index 8ac7ae0c71..11b2b5edde 100644 --- a/modules/src/core/ics04_channel/handler/recv_packet.rs +++ b/modules/src/core/ics04_channel/handler/recv_packet.rs @@ -15,19 +15,20 @@ use crate::timestamp::Expiry; use crate::Height; use core::fmt::Debug; -#[derive(Clone, Debug)] -pub struct RecvPacketSuccess { - pub port_id: PortId, - pub channel_id: ChannelId, - pub seq: Sequence, - pub seq_number: Sequence, - pub receipt: Option, -} - #[derive(Clone, Debug)] pub enum RecvPacketResult { - Success(RecvPacketSuccess), NoOp, + Unordered { + port_id: PortId, + channel_id: ChannelId, + sequence: Sequence, + receipt: Receipt, + }, + Ordered { + port_id: PortId, + channel_id: ChannelId, + next_seq_recv: Sequence, + }, } pub fn process( @@ -89,8 +90,10 @@ pub fn process( )?; let result = if dest_channel_end.order_matches(&Order::Ordered) { - let next_seq_recv = - ctx.get_next_sequence_recv(&(packet.source_port.clone(), packet.source_channel))?; + let next_seq_recv = ctx.get_next_sequence_recv(&( + packet.destination_port.clone(), + packet.destination_channel, + ))?; if packet.sequence < next_seq_recv { output.emit(IbcEvent::ReceivePacket(ReceivePacket { @@ -105,17 +108,15 @@ pub fn process( )); } - PacketResult::Recv(RecvPacketResult::Success(RecvPacketSuccess { - port_id: packet.source_port.clone(), - channel_id: packet.source_channel, - seq: packet.sequence, - seq_number: next_seq_recv.increment(), - receipt: None, - })) + PacketResult::Recv(RecvPacketResult::Ordered { + port_id: packet.destination_port.clone(), + channel_id: packet.destination_channel, + next_seq_recv: next_seq_recv.increment(), + }) } else { let packet_rec = ctx.get_packet_receipt(&( - packet.source_port.clone(), - packet.source_channel, + packet.destination_port.clone(), + packet.destination_channel, packet.sequence, )); @@ -129,13 +130,12 @@ pub fn process( } Err(e) if e.detail() == Error::packet_receipt_not_found(packet.sequence).detail() => { // store a receipt that does not contain any data - PacketResult::Recv(RecvPacketResult::Success(RecvPacketSuccess { - port_id: packet.source_port.clone(), - channel_id: packet.source_channel, - seq: packet.sequence, - seq_number: 1.into(), - receipt: Some(Receipt::Ok), - })) + PacketResult::Recv(RecvPacketResult::Unordered { + port_id: packet.destination_port.clone(), + channel_id: packet.destination_channel, + sequence: packet.sequence, + receipt: Receipt::Ok, + }) } Err(e) => return Err(Error::implementation_specific(e.to_string())), } diff --git a/modules/src/core/ics23_commitment/merkle.rs b/modules/src/core/ics23_commitment/merkle.rs index 5d9a5c16bb..e5083cfa73 100644 --- a/modules/src/core/ics23_commitment/merkle.rs +++ b/modules/src/core/ics23_commitment/merkle.rs @@ -1,8 +1,5 @@ use crate::prelude::*; -#[cfg(any(test, feature = "ics11_beefy"))] -use sp_std::marker::PhantomData; -#[cfg(not(any(test, feature = "ics11_beefy")))] -use std::marker::PhantomData; +use core::marker::PhantomData; use tendermint::merkle::proof::Proof as TendermintProof; use crate::clients::host_functions::{HostFunctionsManager, HostFunctionsProvider}; diff --git a/modules/src/core/ics26_routing/context.rs b/modules/src/core/ics26_routing/context.rs index 0582bd9673..66555ce008 100644 --- a/modules/src/core/ics26_routing/context.rs +++ b/modules/src/core/ics26_routing/context.rs @@ -99,7 +99,7 @@ impl OnRecvPacketAck { pub type ModuleOutputBuilder = HandlerOutputBuilder<(), ModuleEvent>; -pub trait Module: Debug + Send + Sync + AsAnyMut + 'static { +pub trait Module: Send + Sync + AsAnyMut { #[allow(clippy::too_many_arguments)] fn on_chan_open_init( &mut self, diff --git a/modules/src/events.rs b/modules/src/events.rs index 58e6d54e2b..721b9cc41b 100644 --- a/modules/src/events.rs +++ b/modules/src/events.rs @@ -6,8 +6,8 @@ use core::str::FromStr; use flex_error::{define_error, TraceError}; use prost::alloc::fmt::Formatter; use serde_derive::{Deserialize, Serialize}; -use tendermint::abci::tag::Tag; use tendermint::abci::Event as AbciEvent; +use tendermint::abci::EventAttribute; use crate::core::ics02_client::error as client_error; use crate::core::ics02_client::events as ClientEvents; @@ -502,7 +502,7 @@ impl TryFrom for AbciEvent { let attributes = event.attributes.into_iter().map(Into::into).collect(); Ok(AbciEvent { - type_str: event.kind, + kind: event.kind, attributes, }) } @@ -529,7 +529,7 @@ impl From<(K, V)> for ModuleEventAttribute { } } -impl From for Tag { +impl From for EventAttribute { fn from(attr: ModuleEventAttribute) -> Self { Self { key: attr @@ -540,6 +540,7 @@ impl From for Tag { .key .parse() .expect("Value::from_str() impl is infallible"), + index: false, } } } diff --git a/modules/src/lib.rs b/modules/src/lib.rs index 3a0655460c..ae1121e581 100644 --- a/modules/src/lib.rs +++ b/modules/src/lib.rs @@ -60,6 +60,7 @@ pub mod handler; pub mod keys; pub mod macros; pub mod proofs; +#[cfg(feature = "std")] pub mod query; pub mod relayer; pub mod signer; diff --git a/modules/src/mock/context.rs b/modules/src/mock/context.rs index 777aaa7c7b..a2da78b11a 100644 --- a/modules/src/mock/context.rs +++ b/modules/src/mock/context.rs @@ -5,7 +5,7 @@ use alloc::collections::btree_map::BTreeMap; use alloc::sync::Arc; use core::borrow::Borrow; use core::cmp::min; -use core::fmt::Debug; +use core::fmt::{Debug, Formatter}; use core::ops::{Add, Sub}; use core::time::Duration; use std::sync::Mutex; @@ -629,9 +629,15 @@ impl RouterBuilder for MockRouterBuilder { } } -#[derive(Clone, Debug, Default)] +#[derive(Clone, Default)] pub struct MockRouter(BTreeMap>); +impl Debug for MockRouter { + fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { + write!(f, "{:?}", self.0.keys().collect::>()) + } +} + impl Router for MockRouter { fn get_route_mut(&mut self, module_id: &impl Borrow) -> Option<&mut dyn Module> { self.0.get_mut(module_id.borrow()).and_then(Arc::get_mut) diff --git a/modules/src/mock/header.rs b/modules/src/mock/header.rs index cc3c38b092..4a0df64298 100644 --- a/modules/src/mock/header.rs +++ b/modules/src/mock/header.rs @@ -92,7 +92,7 @@ mod tests { #[test] fn encode_any() { let header = MockHeader::new(Height::new(1, 10)).with_timestamp(Timestamp::none()); - let bytes = header.wrap_any().encode_vec().unwrap(); + let bytes = header.wrap_any().encode_vec(); assert_eq!( &bytes, diff --git a/modules/src/query.rs b/modules/src/query.rs index 9b7f035c8d..afccaba8b6 100644 --- a/modules/src/query.rs +++ b/modules/src/query.rs @@ -1,4 +1,4 @@ -use tendermint::abci::transaction::Hash; +use tendermint_rpc::abci::transaction::Hash; use crate::core::ics02_client::client_consensus::QueryClientEventRequest; use crate::core::ics04_channel::channel::QueryPacketEventDataRequest; diff --git a/modules/src/test_utils.rs b/modules/src/test_utils.rs index 6f8b61a1c4..41577be86c 100644 --- a/modules/src/test_utils.rs +++ b/modules/src/test_utils.rs @@ -116,26 +116,30 @@ impl HostFunctionsProvider for Crypto { } fn verify_membership_trie_proof( - root: &sp_core::H256, + root: &[u8; 32], proof: &[Vec], key: &[u8], value: &[u8], ) -> Result<(), Ics02Error> { let item = vec![(key, Some(value))]; sp_trie::verify_trie_proof::, _, _, _>( - root, proof, &item, + &sp_core::H256::from_slice(root), + proof, + &item, ) .map_err(|_| Ics02Error::beefy(BeefyError::invalid_trie_proof())) } fn verify_non_membership_trie_proof( - root: &sp_core::H256, + root: &[u8; 32], proof: &[Vec], key: &[u8], ) -> Result<(), Ics02Error> { let item: Vec<(&[u8], Option<&[u8]>)> = vec![(key, None)]; sp_trie::verify_trie_proof::, _, _, _>( - root, proof, &item, + &sp_core::H256::from_slice(root), + proof, + &item, ) .map_err(|_| Ics02Error::beefy(BeefyError::invalid_trie_proof())) } diff --git a/proto/Cargo.toml b/proto/Cargo.toml index aea7b466e4..11be83f9ee 100644 --- a/proto/Cargo.toml +++ b/proto/Cargo.toml @@ -32,7 +32,7 @@ base64 = { version = "0.13", default-features = false, features = ["alloc"] [dependencies.tendermint-proto] git = "https://github.com/composableFi/tendermint-rs" -branch = "seun-0.23.7" +rev = "5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8" default-features = false [features] diff --git a/proto/src/prost/cosmos.base.tendermint.v1beta1.rs b/proto/src/prost/cosmos.base.tendermint.v1beta1.rs index 9aff7b3da6..714957f61f 100644 --- a/proto/src/prost/cosmos.base.tendermint.v1beta1.rs +++ b/proto/src/prost/cosmos.base.tendermint.v1beta1.rs @@ -92,7 +92,7 @@ pub struct GetNodeInfoRequest { #[derive(Clone, PartialEq, ::prost::Message)] pub struct GetNodeInfoResponse { #[prost(message, optional, tag="1")] - pub default_node_info: ::core::option::Option<::tendermint_proto::p2p::DefaultNodeInfo>, + pub default_node_info: ::core::option::Option<::tendermint_proto::p2p::NodeInfo>, #[prost(message, optional, tag="2")] pub application_version: ::core::option::Option, } diff --git a/relayer-cli/Cargo.toml b/relayer-cli/Cargo.toml index 573470307b..85cc974ddb 100644 --- a/relayer-cli/Cargo.toml +++ b/relayer-cli/Cargo.toml @@ -32,12 +32,12 @@ ibc-proto = { version = "0.18.0", path = "../proto" } ibc-telemetry = { version = "0.15.0", path = "../telemetry", optional = true } ibc-relayer-rest = { version = "0.15.0", path = "../relayer-rest", optional = true } -clap = { version = "3.1", features = ["cargo"] } -clap_complete = "3.1" +clap = { version = "3.2", features = ["cargo"] } +clap_complete = "3.2" humantime = "2.1" serde = { version = "1.0", features = ["serde_derive"] } tokio = { version = "1.0", features = ["full"] } -tracing = "0.1.34" +tracing = "0.1.35" tracing-subscriber = { version = "0.3.11", features = ["fmt", "env-filter", "json"]} eyre = "0.6.8" color-eyre = "0.6" @@ -59,26 +59,26 @@ console = "0.15.0" [dependencies.tendermint-proto] git = "https://github.com/composableFi/tendermint-rs" -branch = "seun-0.23.7" +rev = "5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8" [dependencies.tendermint] git = "https://github.com/composableFi/tendermint-rs" -branch = "seun-0.23.7" +rev = "5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8" features = ["secp256k1"] [dependencies.tendermint-rpc] git = "https://github.com/composableFi/tendermint-rs" -branch = "seun-0.23.7" +rev = "5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8" features = ["http-client", "websocket-client"] [dependencies.tendermint-light-client] git = "https://github.com/composableFi/tendermint-rs" -branch = "seun-0.23.7" +rev = "5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8" features = ["unstable"] [dependencies.tendermint-light-client-verifier] git = "https://github.com/composableFi/tendermint-rs" -branch = "seun-0.23.7" +rev = "5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8" [dependencies.abscissa_core] version = "=0.6.0" diff --git a/relayer-cli/src/lib.rs b/relayer-cli/src/lib.rs index 9260d703bd..cab10648d9 100644 --- a/relayer-cli/src/lib.rs +++ b/relayer-cli/src/lib.rs @@ -19,6 +19,7 @@ unused_lifetimes, unused_qualifications )] +#![allow(deprecated)] extern crate alloc; diff --git a/relayer/Cargo.toml b/relayer/Cargo.toml index c8b0f24569..1c816c876a 100644 --- a/relayer/Cargo.toml +++ b/relayer/Cargo.toml @@ -31,7 +31,7 @@ serde = "1.0" serde_derive = "1.0" thiserror = "1.0.30" toml = "0.5" -tracing = "0.1.34" +tracing = "0.1.35" tokio = { version = "1.0", features = ["rt-multi-thread", "time", "sync"] } serde_json = { version = "1" } bytes = "1.1.0" @@ -44,7 +44,7 @@ k256 = { version = "0.10.4", features = ["ecdsa-core", "ecdsa", "sha256"]} hex = "0.4" bitcoin = { version = "=0.28", features = ["use-serde"] } tiny-bip39 = "0.8.0" -hdpath = { version = "0.6.0" } +hdpath = { version = "0.6.1" } sha2 = "0.10.2" tiny-keccak = { version = "2.0.2", features = ["keccak"], default-features = false } ripemd160 = "0.9.1" @@ -53,7 +53,7 @@ itertools = "0.10.3" dirs-next = "2.0.0" retry = { version = "1.3.1", default-features = false } async-stream = "0.3.3" -http = "0.2.6" +http = "0.2.8" flex-error = { version = "0.4.4", default-features = false } signature = "1.4.0" anyhow = "1.0" @@ -62,7 +62,7 @@ humantime = "2.1.0" nanoid = "0.4.0" regex = "1.5.5" moka = "0.8.5" -uuid = { version = "1.1.1", features = ["v4"] } +uuid = { version = "1.1.2", features = ["v4"] } [dependencies.num-bigint] version = "0.4" @@ -74,27 +74,28 @@ features = ["num-bigint", "serde"] [dependencies.tendermint] git = "https://github.com/composableFi/tendermint-rs" -branch = "seun-0.23.7" +rev = "5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8" features = ["secp256k1"] [dependencies.tendermint-rpc] git = "https://github.com/composableFi/tendermint-rs" -branch = "seun-0.23.7" +rev = "5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8" features = ["http-client", "websocket-client"] [dependencies.tendermint-light-client] git = "https://github.com/composableFi/tendermint-rs" -branch = "seun-0.23.7" +rev = "5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8" default-features = false features = ["rpc-client", "secp256k1", "unstable"] [dependencies.tendermint-light-client-verifier] git = "https://github.com/composableFi/tendermint-rs" -branch = "seun-0.23.7" +rev = "5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8" default-features = false [dependencies.tendermint-proto] -version = "=0.23.7" +git = "https://github.com/composableFi/tendermint-rs" +rev = "5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8" [dev-dependencies] ibc = { version = "0.15.0", path = "../modules", features = ["mocks"] } @@ -104,4 +105,4 @@ tracing-subscriber = { version = "0.3.11", features = ["fmt", "env-filter", "jso test-log = { version = "0.2.10", features = ["trace"] } # Needed for generating (synthetic) light blocks. -tendermint-testgen = { git = "https://github.com/composableFi/tendermint-rs", branch = "seun-0.23.7" } +tendermint-testgen = { git = "https://github.com/composableFi/tendermint-rs", rev = "5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8"} diff --git a/relayer/src/chain/cosmos/estimate.rs b/relayer/src/chain/cosmos/estimate.rs index 5b6379cc14..3b463b28e9 100644 --- a/relayer/src/chain/cosmos/estimate.rs +++ b/relayer/src/chain/cosmos/estimate.rs @@ -149,7 +149,10 @@ fn can_recover_from_simulation_failure(e: &Error) -> bool { use crate::error::ErrorDetail::*; match e.detail() { - GrpcStatus(detail) => detail.is_client_state_height_too_low(), + GrpcStatus(detail) => { + detail.is_client_state_height_too_low() + || detail.is_account_sequence_mismatch_that_can_be_ignored() + } _ => false, } } diff --git a/relayer/src/chain/cosmos/retry.rs b/relayer/src/chain/cosmos/retry.rs index 61c10362db..9a83b61a60 100644 --- a/relayer/src/chain/cosmos/retry.rs +++ b/relayer/src/chain/cosmos/retry.rs @@ -94,12 +94,9 @@ fn do_send_tx_with_account_sequence_retry<'a>( match tx_result { // Gas estimation failed with acct. s.n. mismatch at estimate gas step. - // This indicates that the full node did not yet push the previous tx out of its - // mempool. Possible explanations: fees too low, network congested, or full node - // congested. Whichever the case, it is more expedient in production to drop the tx - // and refresh the s.n., to allow proceeding to the other transactions. A separate - // retry at the worker-level will handle retrying. - Err(e) if mismatching_account_sequence_number(&e) => { + // It indicates that the account sequence cached by hermes is stale. + // This can happen when the same account is used by another agent. + Err(e) if mismatch_account_sequence_number_error_requires_refresh(&e) => { warn!("failed at estimate_gas step mismatching account sequence: dropping the tx & refreshing account sequence number"); refresh_account(&config.grpc_address, &key_entry.account, account).await?; // Note: propagating error here can lead to bug & dropped packets: @@ -143,18 +140,19 @@ fn do_send_tx_with_account_sequence_retry<'a>( // Catch-all arm for the Ok variant. // This is the case when gas estimation succeeded. Ok(response) => { - // Complete success. match response.code { + // Gas estimation succeeded and broadcasting was successful. Code::Ok => { debug!("broadcast_tx_sync: {:?}", response); account.sequence.increment_mut(); Ok(response) } + // Gas estimation succeeded, but broadcasting failed with unrecoverable error. Code::Err(code) => { - // Avoid increasing the account s.n. if CheckTx failed - // Log the error + // Do not increase the account s.n. if CheckTx failed. + // Log the error. error!( "broadcast_tx_sync: {:?}: diagnostic: {:?}", response, @@ -174,12 +172,13 @@ fn do_send_tx_with_account_sequence_retry<'a>( /// Determine whether the given error yielded by `tx_simulate` /// indicates hat the current sequence number cached in Hermes -/// may be out-of-sync with the full node's version of the s.n. -fn mismatching_account_sequence_number(e: &Error) -> bool { +/// is smaller than the full node's version of the s.n. and therefore +/// account needs to be refreshed. +fn mismatch_account_sequence_number_error_requires_refresh(e: &Error) -> bool { use crate::error::ErrorDetail::*; match e.detail() { - GrpcStatus(detail) => detail.is_account_sequence_mismatch(), + GrpcStatus(detail) => detail.is_account_sequence_mismatch_that_requires_refresh(), _ => false, } } diff --git a/relayer/src/chain/cosmos/version.rs b/relayer/src/chain/cosmos/version.rs index 5c605bf989..e115226e4f 100644 --- a/relayer/src/chain/cosmos/version.rs +++ b/relayer/src/chain/cosmos/version.rs @@ -1,5 +1,5 @@ //! Utilities for extracting and parsing versioning information -//! of Cosmos-SDK chains. The extracted version specification +//! of Cosmos-SDK networks. The extracted version specification //! is captured in a domain-type semver format in [`Specs`]. use flex_error::define_error; @@ -7,9 +7,9 @@ use tracing::trace; use ibc_proto::cosmos::base::tendermint::v1beta1::VersionInfo; -/// Specifies the SDK & IBC-go modules path, as it is expected +/// Specifies the SDK, IBC-go, and Tendermint modules path, as expected /// to appear in the application version information of a -/// Cosmos chain. +/// Cosmos-SDK network. /// /// The module identification is captured in a [`Module`] /// with the following structure as an example: @@ -22,17 +22,19 @@ use ibc_proto::cosmos::base::tendermint::v1beta1::VersionInfo; /// ``` const SDK_MODULE_NAME: &str = "cosmos/cosmos-sdk"; const IBC_GO_MODULE_NAME: &str = "cosmos/ibc-go"; +const TENDERMINT_MODULE_NAME: &str = "tendermint/tendermint"; /// Captures the version(s) specification of different -/// modules of a chain. +/// modules of a network. /// -/// Assumes that the chain runs on Cosmos SDK. +/// Assumes that the network runs on Cosmos SDK. /// Stores both the SDK version as well as /// the IBC-go module version (if existing). #[derive(Debug)] pub struct Specs { pub sdk_version: semver::Version, pub ibc_go_version: Option, + pub tendermint_version: semver::Version, } define_error! { @@ -42,7 +44,14 @@ define_error! { address: String, app: AppInfo, } - |e| { format!("node at {} running chain {} not caught up", e.address, e.app) }, + |e| { format!("failed to find the SDK module dependency ('{}') for application {}", e.address, e.app) }, + + TendermintModuleNotFound + { + address: String, + app: AppInfo, + } + |e| { format!("failed to find the Tendermint dependency ('{}') for application {}", e.address, e.app) }, VersionParsingFailed { @@ -51,7 +60,7 @@ define_error! { cause: String, app: AppInfo, } - |e| { format!("failed parsing the SDK module ('{}') version number '{}' into a semver for application {}; cause: {}", + |e| { format!("failed parsing the module path ('{}') version number '{}' into a semver for application {}; cause: {}", e.module_path, e.raw_version, e.app, e.cause) }, } } @@ -63,19 +72,22 @@ impl TryFrom for Specs { // Get the Cosmos SDK version let sdk_version = parse_sdk_version(&raw_version)?; let ibc_go_version = parse_ibc_go_version(&raw_version)?; + let tendermint_version = parse_tendermint_version(&raw_version)?; trace!( - "parsed version specification for {} {}@{} -> SDK={}; Ibc-Go status={:?}", - raw_version.app_name, - raw_version.version, - raw_version.git_commit, - sdk_version, - ibc_go_version + application = %raw_version.app_name, + version = %raw_version.version, + git_commit = %raw_version.git_commit, + sdk_version = %sdk_version, + ibc_go_status = ?ibc_go_version, + tendermint_version = %tendermint_version, + "parsed version specification" ); Ok(Self { sdk_version, ibc_go_version, + tendermint_version, }) } } @@ -117,7 +129,8 @@ fn parse_ibc_go_version(version_info: &VersionInfo) -> Result Ok(None), Some(ibc_module) => { // The raw version number has a leading 'v', trim it out; @@ -142,6 +155,37 @@ fn parse_ibc_go_version(version_info: &VersionInfo) -> Result Result { + let module = version_info + .build_deps + .iter() + .find(|&m| m.path.contains(TENDERMINT_MODULE_NAME)) + .ok_or_else(|| { + Error::tendermint_module_not_found( + TENDERMINT_MODULE_NAME.to_string(), + AppInfo::from(version_info), + ) + })?; + + // The raw version number has a leading 'v', trim it out; + let plain_version = module.version.trim_start_matches('v'); + + // Parse the module version + let mut version = semver::Version::parse(plain_version).map_err(|e| { + Error::version_parsing_failed( + module.path.clone(), + module.version.clone(), + e.to_string(), + AppInfo::from(version_info), + ) + })?; + + // Remove the pre-release version to ensure we don't give special treatment to pre-releases. + version.pre = semver::Prerelease::EMPTY; + + Ok(version) +} + /// Helper struct to capture all the reported information of an /// IBC application, e.g., `gaiad`. #[derive(Clone, Debug)] diff --git a/relayer/src/error.rs b/relayer/src/error.rs index 704da24d5f..d8eed2d709 100644 --- a/relayer/src/error.rs +++ b/relayer/src/error.rs @@ -6,6 +6,7 @@ use flex_error::{define_error, DisplayOnly, TraceClone, TraceError}; use http::uri::InvalidUri; use humantime::format_duration; use prost::{DecodeError, EncodeError}; +use regex::Regex; use tendermint::Error as TendermintError; use tendermint_light_client::components::io::IoError as LightClientIoError; use tendermint_light_client::errors::{ @@ -534,27 +535,134 @@ impl GrpcStatusSubdetail { msg.contains("verification failed") && msg.contains("client state height < proof height") } - /// Check whether this gRPC error matches - /// - message: "account sequence mismatch, expected 166791, got 166793: incorrect account sequence: invalid request" + /// Check whether this gRPC error message starts with "account sequence mismatch". /// /// # Note: /// This predicate is tested and validated against errors /// that appear at the `estimate_gas` step. The error /// predicate to be used at the `broadcast_tx_sync` step /// is different & relies on parsing the Response error code. - pub fn is_account_sequence_mismatch(&self) -> bool { - // The code changed in SDK 0.44 from `InvalidArgument` to `Unknown`. - // Workaround: Ignore code. We'll match only on the status message. - // if self.status.code() != tonic::Code::InvalidArgument { - // return false; - // } - + /// + /// It is currently expected that, in the case of a match, the error message is of form: + /// "account sequence mismatch, expected E, got G: incorrect account sequence", + /// where E > G. + /// The case where E < G is considered recoverable and should have been previously handled + /// (see `is_account_sequence_mismatch_that_can_be_ignored` for which the error is ignored and + /// simulation uses default gas). + /// However, if in future cosmos-sdk releases the gRPC error message changes such that + /// it still starts with "account sequence mismatch" but the rest doesn't match the remainder of + /// the pattern (", expected E, got G: incorrect account sequence"), or + /// there are hermes code changes such that the E < G case is not previously caught anymore, + /// then this predicate will catch all "account sequence mismatch" errors + pub fn is_account_sequence_mismatch_that_requires_refresh(&self) -> bool { self.status .message() .trim_start() .starts_with("account sequence mismatch") } + + /// Check whether this gRPC error matches: + /// "account sequence mismatch, expected E, got G", + /// where E < G. + /// It is currently expected that, in the case of a match, the error message is of form: + /// "account sequence mismatch, expected E, got G: incorrect account sequence" + /// + /// # Note: + /// This predicate is tested and validated against errors + /// that appear during the `estimate_gas` step. + /// If it evaluates to true then the error is ignored and the transaction that caused this + /// simulation error is still sent to mempool with `broadcast_tx_sync` allowing for potential + /// recovery after mempool's `recheckTxs` step. + /// More details in + pub fn is_account_sequence_mismatch_that_can_be_ignored(&self) -> bool { + match parse_sequences_in_mismatch_error_message(self.status.message()) { + None => false, + Some((expected, got)) => expected < got, + } + } +} + +/// Assumes that the cosmos-sdk account sequence mismatch error message, that may be seen +/// during simulating or broadcasting a transaction, includes the following pattern: +/// "account sequence mismatch, expected E, got G". +/// If a match is found it extracts and returns (E, G). +fn parse_sequences_in_mismatch_error_message(message: &str) -> Option<(u64, u64)> { + let re = + Regex::new(r#"account sequence mismatch, expected (?P\d+), got (?P\d+)"#) + .unwrap(); + match re.captures(message) { + None => None, + Some(captures) => match (captures["expected"].parse(), captures["got"].parse()) { + (Ok(e), Ok(g)) => Some((e, g)), + _ => None, + }, + } } pub const QUERY_PROOF_EXPECT_MSG: &str = "Internal error. Requested proof with query but no proof was returned."; + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_parse_sequences_in_mismatch_error_message() { + struct Test<'a> { + name: &'a str, + message: &'a str, + result: Option<(u64, u64)>, + } + let tests: Vec> = vec![ + Test { + name: "good mismatch error, expected < got", + message: + "account sequence mismatch, expected 100, got 200: incorrect account sequence", + result: Some((100, 200)), + }, + Test { + name: "good mismatch error, expected > got", + message: + "account sequence mismatch, expected 200, got 100: incorrect account sequence", + result: Some((200, 100)), + }, + Test { + name: "good changed mismatch error, expected < got", + message: "account sequence mismatch, expected 100, got 200: this part has changed", + result: Some((100, 200)), + }, + Test { + name: "good changed mismatch error, expected > got", + message: + "account sequence mismatch, expected 200, got 100 --> this part has changed", + result: Some((200, 100)), + }, + Test { + name: "bad mismatch error, bad expected", + message: + "account sequence mismatch, expected 2a5, got 100: incorrect account sequence", + result: None, + }, + Test { + name: "bad mismatch error, bad got", + message: + "account sequence mismatch, expected 25, got -29: incorrect account sequence", + result: None, + }, + Test { + name: "not a mismatch error", + message: "some other error message", + result: None, + }, + ]; + + for test in tests { + assert_eq!( + test.result, + parse_sequences_in_mismatch_error_message(test.message), + "{}", + test.name + ) + } + } +} diff --git a/relayer/src/sdk_error.rs b/relayer/src/sdk_error.rs index b4c7a2acee..499b8e897f 100644 --- a/relayer/src/sdk_error.rs +++ b/relayer/src/sdk_error.rs @@ -13,8 +13,17 @@ define_error! { |_| { "Expected error code, instead got Ok" }, UnknownSdk + { + codespace: String, + code: u32, + } + | e | { + format_args!("unknown SDK error with code space: {}, code: {}", e.codespace, e.code) + }, + + UnknownTxSync { code: u32 } - |e| { format!("unknown SDK error: {}", e.code) }, + | e | { format_args!("unknown TX sync response error: {}", e.code) }, OutOfGas { code: u32 } @@ -169,7 +178,7 @@ pub fn sdk_error_from_tx_result(result: &TxResult) -> SdkError { SdkError::client(client_error_from_code(code)) } else { // TODO: Implement mapping for other codespaces in ibc-go - SdkError::unknown_sdk(code) + SdkError::unknown_sdk(codespace, code) } } } @@ -186,6 +195,6 @@ pub fn sdk_error_from_tx_sync_error_code(code: u32) -> SdkError { // on the Hermes side. We'll inform the user to check for misconfig. 11 => SdkError::out_of_gas(code), 13 => SdkError::insufficient_fee(code), - _ => SdkError::unknown_sdk(code), + _ => SdkError::unknown_tx_sync(code), } } diff --git a/scripts/gm/CHANGELOG.md b/scripts/gm/CHANGELOG.md index 251a954565..d67eafe5be 100644 --- a/scripts/gm/CHANGELOG.md +++ b/scripts/gm/CHANGELOG.md @@ -1,5 +1,13 @@ # Gaiad Manager Change Log +## v0.1.3 + +### BUGFIXES + +- Fixed variable TM to reference the GAIAD_BINARY ([#2210]). + +[#2210]: https://github.com/informalsystems/ibc-rs/issues/2210 + ## v0.1.2 ### BUGFIXES diff --git a/scripts/gm/bin/lib-gm b/scripts/gm/bin/lib-gm index d1d637b77c..cb4e8877d4 100644 --- a/scripts/gm/bin/lib-gm +++ b/scripts/gm/bin/lib-gm @@ -5,7 +5,7 @@ if [ "${DEBUG:-}" = "2" ]; then fi version() { - VERSION="v0.1.2" + VERSION="v0.1.3" if is_json_output; then echo '{"status": "success", "message": "'"${VERSION}"'"}' else @@ -1072,7 +1072,7 @@ reset() { return 0 fi # `unsafe-reset-all` was moved to `gaiad tendermint` sub-command - TM="$(gaiad | grep -q 'unsafe' || echo "tendermint")" + TM="$($GAIAD_BINARY | grep -q 'unsafe' || echo "tendermint")" if is_json_output; then "$GAIAD_BINARY" "$TM" unsafe-reset-all --home "$HOME_DIR" 1> /dev/null 2> /dev/null else diff --git a/telemetry/Cargo.toml b/telemetry/Cargo.toml index ad1e1e5a20..0a7599f2f4 100644 --- a/telemetry/Cargo.toml +++ b/telemetry/Cargo.toml @@ -23,4 +23,4 @@ prometheus = "0.13.0" rouille = "3.5.0" moka = "0.8.5" -uuid = { version = "1.1.1", features = ["v4"] } +uuid = { version = "1.1.2", features = ["v4"] } diff --git a/tools/integration-test/Cargo.toml b/tools/integration-test/Cargo.toml index 1c6de23908..bf6a5a4f25 100644 --- a/tools/integration-test/Cargo.toml +++ b/tools/integration-test/Cargo.toml @@ -19,11 +19,11 @@ ibc-relayer = { path = "../../relayer" } ibc-relayer-cli = { path = "../../relayer-cli" } ibc-proto = { path = "../../proto" } ibc-test-framework = { path = "../test-framework" } -tendermint = { git = "https://github.com/composableFi/tendermint-rs", branch = "seun-0.23.7" } -tendermint-rpc = { git = "https://github.com/composableFi/tendermint-rs", branch = "seun-0.23.7", features = ["http-client", "websocket-client"] } +tendermint = { git = "https://github.com/composableFi/tendermint-rs", rev = "5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8" } +tendermint-rpc = { git = "https://github.com/composableFi/tendermint-rs", rev = "5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8", features = ["http-client", "websocket-client"] } serde_json = "1" -# modelator = { git = "https://github.com/informalsystems/modelator", optional = true } +modelator = { version = "0.4.2", optional = true } time = "0.3" serde = "1.0.136" @@ -34,7 +34,7 @@ manual = [] ordered = [] ica = [] experimental = [] -# mbt = ["modelator"] +mbt = ["modelator"] [[bin]] name = "test_setup_with_binary_channel" diff --git a/tools/test-framework/Cargo.toml b/tools/test-framework/Cargo.toml index 2d1a0419ef..a112edb8d0 100644 --- a/tools/test-framework/Cargo.toml +++ b/tools/test-framework/Cargo.toml @@ -18,13 +18,13 @@ ibc = { version = "=0.15.0", path = "../../modules" } ibc-relayer = { version = "=0.15.0", path = "../../relayer" } ibc-relayer-cli = { version = "=0.15.0", path = "../../relayer-cli" } ibc-proto = { version = "=0.18.0", path = "../../proto" } -tendermint = { git = "https://github.com/composableFi/tendermint-rs", branch = "seun-0.23.7" } -tendermint-rpc = { git = "https://github.com/composableFi/tendermint-rs", branch = "seun-0.23.7", features = ["http-client", "websocket-client"] } +tendermint = { git = "https://github.com/composableFi/tendermint-rs", rev = "5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8" } +tendermint-rpc = { git = "https://github.com/composableFi/tendermint-rs", rev = "5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8", features = ["http-client", "websocket-client"] } async-trait = "0.1.56" -http = "0.2.6" +http = "0.2.8" tokio = { version = "1.0", features = ["full"] } -tracing = "0.1.34" +tracing = "0.1.35" tracing-subscriber = "0.3.11" eyre = "0.6.8" color-eyre = "0.6" @@ -39,5 +39,5 @@ toml = "0.5" subtle-encoding = "0.5.1" sha2 = "0.10.2" crossbeam-channel = "0.5.4" -semver = "1.0.7" +semver = "1.0.10" flex-error = "0.4.4" From c3bef7fe9c4e64cba05213341d6cfc341cb436c4 Mon Sep 17 00:00:00 2001 From: Web3 Smith <31099392+Wizdave97@users.noreply.github.com> Date: Mon, 20 Jun 2022 13:07:58 +0100 Subject: [PATCH 55/96] fix feature flag (#23) --- modules/Cargo.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/Cargo.toml b/modules/Cargo.toml index 47a937d069..2f09f0e325 100644 --- a/modules/Cargo.toml +++ b/modules/Cargo.toml @@ -36,8 +36,8 @@ clock = ["tendermint/clock", "time/std"] # This feature grants access to development-time mocking libraries, such as `MockContext` or `MockHeader`. # Depends on the `testgen` suite for generating Tendermint light blocks. -mocks = ["tendermint-testgen", "clock", "std", "sp-io/std", "sha3", "ripemd", "ics11-beefy"] -ics11-beefy = ["sp-io", "sp-core", "sp-std"] +mocks = ["tendermint-testgen", "clock", "std", "sp-io/std", "sha3", "ripemd", "ics11_beefy"] +ics11_beefy = ["sp-io", "sp-core", "sp-std"] [dependencies] # Proto definitions for all IBC-related interfaces, e.g., connections or channels. From b3ab319018b1bb8917bca3aad977455aa2c9b179 Mon Sep 17 00:00:00 2001 From: Web3 Smith <31099392+Wizdave97@users.noreply.github.com> Date: Mon, 20 Jun 2022 13:47:03 +0100 Subject: [PATCH 56/96] Ics11 beefy feature (#24) * fix beefy client feature gating * fix feature gating --- modules/Cargo.toml | 14 +++++++++----- modules/src/clients/host_functions.rs | 1 + .../src/core/ics02_client/handler/update_client.rs | 1 + 3 files changed, 11 insertions(+), 5 deletions(-) diff --git a/modules/Cargo.toml b/modules/Cargo.toml index 2f09f0e325..d316e29f7b 100644 --- a/modules/Cargo.toml +++ b/modules/Cargo.toml @@ -30,14 +30,16 @@ std = [ "pallet-mmr-primitives/std", "beefy-primitives/std", "sp-trie/std", + "sp-io/std", + "sp-std/std", "tendermint-rpc" ] clock = ["tendermint/clock", "time/std"] # This feature grants access to development-time mocking libraries, such as `MockContext` or `MockHeader`. # Depends on the `testgen` suite for generating Tendermint light blocks. -mocks = ["tendermint-testgen", "clock", "std", "sp-io/std", "sha3", "ripemd", "ics11_beefy"] -ics11_beefy = ["sp-io", "sp-core", "sp-std"] +mocks = ["tendermint-testgen", "clock", "std", "sha3", "ripemd", "ics11_beefy"] +ics11_beefy = ["sp-io", "sp-core", "sp-std", "beefy-primitives", "sp-runtime", "sp-trie", "pallet-mmr-primitives", "beefy-client"] [dependencies] # Proto definitions for all IBC-related interfaces, e.g., connections or channels. @@ -59,9 +61,10 @@ flex-error = { version = "0.4.4", default-features = false } num-traits = { version = "0.2.15", default-features = false } derive_more = { version = "0.99.17", default-features = false, features = ["from", "into", "display"] } uint = { version = "0.9", default-features = false } -beefy-client = { package = "beefy-generic-client", git = "https://github.com/ComposableFi/beefy-client", branch = "master", default-features = false } -pallet-mmr-primitives = { package = "sp-mmr-primitives", git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22", default-features = false } -beefy-primitives = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22", default-features = false } + +beefy-client = { package = "beefy-generic-client", git = "https://github.com/ComposableFi/beefy-client", branch = "master", default-features = false, optional = true } +pallet-mmr-primitives = { package = "sp-mmr-primitives", git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22", default-features = false, optional = true } +beefy-primitives = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22", default-features = false, optional = true } codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22", default-features = false, optional = true } sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22", default-features = false, optional = true} @@ -70,6 +73,7 @@ sp-trie = { git = "https://github.com/paritytech/substrate", branch = "polkadot- sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22", default-features = false, optional = true } sha3 = { version = "0.10.1", optional = true } ripemd = { version = "0.1.1", optional = true } + primitive-types = { version = "0.11.1", default-features = false, features = ["serde_no_std"] } diff --git a/modules/src/clients/host_functions.rs b/modules/src/clients/host_functions.rs index 755d7b1261..151077c98a 100644 --- a/modules/src/clients/host_functions.rs +++ b/modules/src/clients/host_functions.rs @@ -60,6 +60,7 @@ pub trait HostFunctionsProvider: Clone + Send + Sync + Default { pub struct HostFunctionsManager(PhantomData); // implementation for beefy host functions +#[cfg(any(test, feature = "mocks", feature = "ics11_beefy"))] impl beefy_client::traits::HostFunctions for HostFunctionsManager where T: HostFunctionsProvider, diff --git a/modules/src/core/ics02_client/handler/update_client.rs b/modules/src/core/ics02_client/handler/update_client.rs index 8133596849..94f98dacef 100644 --- a/modules/src/core/ics02_client/handler/update_client.rs +++ b/modules/src/core/ics02_client/handler/update_client.rs @@ -576,6 +576,7 @@ mod tests { } } + #[cfg(feature = "ics11_beefy")] #[tokio::test] async fn test_continuous_update_of_beefy_client() { let client_id = ClientId::new(ClientType::Beefy, 0).unwrap(); From d7bef572268f3508292f9d3a6256d347ad49296d Mon Sep 17 00:00:00 2001 From: Web3 Smith <31099392+Wizdave97@users.noreply.github.com> Date: Tue, 5 Jul 2022 18:16:31 +0100 Subject: [PATCH 57/96] Bump to polkadot v0.9.24 (#26) --- Cargo.lock | 546 +++++++++++++++++++++++---------------------- modules/Cargo.toml | 16 +- 2 files changed, 282 insertions(+), 280 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7fcf210627..f8aab879e1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -34,7 +34,7 @@ dependencies = [ "toml", "tracing", "tracing-log", - "tracing-subscriber 0.3.11", + "tracing-subscriber 0.3.14", "wait-timeout", ] @@ -118,9 +118,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.57" +version = "1.0.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08f9b8508dccb7687a1d6c4ce66b2b0ecef467c94667de27d8d7fe1f8d2a9cdc" +checksum = "bb07d2053ccdbe10e2af2995a2f116c1330396493dc1269f6a91d0ae82e19704" [[package]] name = "approx" @@ -248,9 +248,9 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "axum" -version = "0.5.7" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc47084705629d09d15060d70a8dbfce479c842303d05929ce29c74c995916ae" +checksum = "c2cc6e8e8c993cb61a005fab8c1e5093a29199b7253b05a6883999312935c1ff" dependencies = [ "async-trait", "axum-core", @@ -277,9 +277,9 @@ dependencies = [ [[package]] name = "axum-core" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2efed1c501becea07ce48118786ebcf229531d0d3b28edf224a720020d9e106" +checksum = "cf4d047478b986f14a13edad31a009e2e05cb241f9805d0d75e4cba4e129ad4d" dependencies = [ "async-trait", "bytes", @@ -324,9 +324,9 @@ checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" [[package]] name = "base64ct" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dea908e7347a8c64e378c17e30ef880ad73e3b4498346b055c2c00ea342f3179" +checksum = "3bdca834647821e0b13d9539a8634eb62d3501b6b6c2cec1722786ee6671b851" [[package]] name = "bech32" @@ -352,7 +352,7 @@ dependencies = [ [[package]] name = "beefy-generic-client" version = "0.1.0" -source = "git+https://github.com/ComposableFi/beefy-client?branch=master#c8ef22e646f9eb33d66ac83e6fdad808b76aa4ed" +source = "git+https://github.com/ComposableFi/beefy-client?branch=master#156e6b68760464967cec0684ce689ad5244ea560" dependencies = [ "beefy-primitives", "ckb-merkle-mountain-range", @@ -365,33 +365,33 @@ dependencies = [ "parity-scale-codec", "rs_merkle", "serde_json", - "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-core-hashing 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-io 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-core-hashing 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-io 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", "sp-mmr-primitives", - "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-trie 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-trie 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", "subxt", ] [[package]] name = "beefy-merkle-tree" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" [[package]] name = "beefy-primitives" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" dependencies = [ "parity-scale-codec", "scale-info", "sp-api", - "sp-application-crypto 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-application-crypto 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", ] [[package]] @@ -706,9 +706,9 @@ dependencies = [ [[package]] name = "clap" -version = "3.2.5" +version = "3.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d53da17d37dba964b9b3ecb5c5a1f193a2762c700e6829201e645b9381c99dc7" +checksum = "190814073e85d238f31ff738fcb0bf6910cedeb73376c87cd69291028966fd83" dependencies = [ "atty", "bitflags", @@ -723,18 +723,18 @@ dependencies = [ [[package]] name = "clap_complete" -version = "3.2.1" +version = "3.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f6ebaab5f25e4f0312dfa07cb30a755204b96e6531457c2cfdecfdf5f2adf40" +checksum = "ead064480dfc4880a10764488415a97fdd36a4cf1bb022d372f02e8faf8386e1" dependencies = [ "clap", ] [[package]] name = "clap_derive" -version = "3.2.5" +version = "3.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c11d40217d16aee8508cc8e5fde8b4ff24639758608e5374e731b53f85749fb9" +checksum = "759bf187376e1afa7b85b959e6a664a3e7a95203415dba952ad19139e798f902" dependencies = [ "heck", "proc-macro-error", @@ -745,9 +745,9 @@ dependencies = [ [[package]] name = "clap_lex" -version = "0.2.2" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5538cd660450ebeb4234cfecf8f2284b844ffc4c50531e66d584ad5b91293613" +checksum = "2850f2f5a82cbf437dd5af4d49848fbdfc27c157c3d010345776f952765261c5" dependencies = [ "os_str_bytes", ] @@ -858,7 +858,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c02a4d71819009c192cf4872265391563fd6a84c81ff2c0f2a7026ca4c1d85c" dependencies = [ "cfg-if 1.0.0", - "crossbeam-utils 0.8.9", + "crossbeam-utils 0.8.10", ] [[package]] @@ -869,7 +869,7 @@ checksum = "6455c0ca19f0d2fbf751b908d5c55c1f5cbc65e03c4225427254b46890bdde1e" dependencies = [ "cfg-if 1.0.0", "crossbeam-epoch 0.9.9", - "crossbeam-utils 0.8.9", + "crossbeam-utils 0.8.10", ] [[package]] @@ -895,7 +895,7 @@ checksum = "07db9d94cbd326813772c968ccd25999e5f8ae22f4f8d1b11effa37ef6ce281d" dependencies = [ "autocfg", "cfg-if 1.0.0", - "crossbeam-utils 0.8.9", + "crossbeam-utils 0.8.10", "memoffset 0.6.5", "once_cell", "scopeguard", @@ -914,9 +914,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.9" +version = "0.8.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ff1f980957787286a554052d03c7aee98d99cc32e09f6d45f0a814133c87978" +checksum = "7d82ee10ce34d7bc12c2122495e7593a9c41347ecdd64185af4ecf72cb1a7f83" dependencies = [ "cfg-if 1.0.0", "once_cell", @@ -942,9 +942,9 @@ dependencies = [ [[package]] name = "crypto-common" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57952ca27b5e3606ff4dd79b0020231aaf9d6aa76dc05fd30137538c50bd3ce8" +checksum = "5999502d32b9c48d492abe66392408144895020ec4709e549e840799f3bb74c0" dependencies = [ "generic-array 0.14.5", "typenum", @@ -1214,9 +1214,9 @@ dependencies = [ [[package]] name = "dyn-clone" -version = "1.0.5" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21e50f3adc76d6a43f5ed73b698a87d0760ca74617f60f7c3b879003536fdd28" +checksum = "140206b78fb2bc3edbcfc9b5ccbd0b30699cfe8d348b8b31b330e47df5291a5a" [[package]] name = "ecdsa" @@ -1268,9 +1268,9 @@ dependencies = [ [[package]] name = "either" -version = "1.6.1" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" +checksum = "3f107b87b6afc2a64fd13cac55fe06d6c8859f12d4b14cbcdd2c67d0976781be" [[package]] name = "elliptic-curve" @@ -1361,14 +1361,14 @@ dependencies = [ [[package]] name = "filetime" -version = "0.2.16" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0408e2626025178a6a7f7ffc05a25bc47103229f19c113755de7bf63816290c" +checksum = "e94a7bbaa59354bc20dd75b67f23e2797b4490e9d6928203fb105c79e448c86c" dependencies = [ "cfg-if 1.0.0", "libc", "redox_syscall", - "winapi", + "windows-sys", ] [[package]] @@ -1435,7 +1435,7 @@ dependencies = [ [[package]] name = "frame-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" dependencies = [ "frame-support", "frame-system", @@ -1446,12 +1446,12 @@ dependencies = [ "scale-info", "serde", "sp-api", - "sp-application-crypto 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-io 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-runtime-interface 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-storage 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-application-crypto 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-io 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-runtime-interface 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-storage 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", ] [[package]] @@ -1469,7 +1469,7 @@ dependencies = [ [[package]] name = "frame-support" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" dependencies = [ "bitflags", "frame-metadata", @@ -1483,23 +1483,23 @@ dependencies = [ "scale-info", "serde", "smallvec", - "sp-arithmetic 5.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-arithmetic 5.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", "sp-core-hashing-proc-macro", "sp-inherents", - "sp-io 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-io 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", "sp-staking", - "sp-state-machine 0.12.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-tracing 5.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-state-machine 0.12.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-tracing 5.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", "tt-call", ] [[package]] name = "frame-support-procedural" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" dependencies = [ "Inflector", "frame-support-procedural-tools", @@ -1511,7 +1511,7 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" dependencies = [ "frame-support-procedural-tools-derive", "proc-macro-crate 1.1.3", @@ -1523,7 +1523,7 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools-derive" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" dependencies = [ "proc-macro2", "quote", @@ -1533,17 +1533,17 @@ dependencies = [ [[package]] name = "frame-system" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" dependencies = [ "frame-support", "log", "parity-scale-codec", "scale-info", "serde", - "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-io 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-io 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", "sp-version", ] @@ -2064,12 +2064,12 @@ dependencies = [ "serde_json", "sha2 0.10.2", "sha3 0.10.1", - "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-io 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-io 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", "sp-mmr-primitives", - "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-trie 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-trie 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", "subtle-encoding", "subxt", "tendermint", @@ -2081,7 +2081,7 @@ dependencies = [ "time", "tokio", "tracing", - "tracing-subscriber 0.3.11", + "tracing-subscriber 0.3.14", "uint", ] @@ -2144,7 +2144,7 @@ dependencies = [ "moka", "nanoid", "num-bigint 0.4.3", - "num-rational 0.4.0", + "num-rational 0.4.1", "prost", "prost-types", "regex", @@ -2172,7 +2172,7 @@ dependencies = [ "toml", "tonic", "tracing", - "tracing-subscriber 0.3.11", + "tracing-subscriber 0.3.14", "uuid 1.1.2", ] @@ -2216,7 +2216,7 @@ dependencies = [ "tokio", "toml", "tracing", - "tracing-subscriber 0.3.11", + "tracing-subscriber 0.3.14", ] [[package]] @@ -2278,7 +2278,7 @@ dependencies = [ "tokio", "toml", "tracing", - "tracing-subscriber 0.3.11", + "tracing-subscriber 0.3.14", ] [[package]] @@ -2351,12 +2351,12 @@ checksum = "ce23b50ad8242c51a442f3ff322d56b02f08852c77e4c0b4d3fd684abc89c683" [[package]] name = "indexmap" -version = "1.8.2" +version = "1.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6012d540c5baa3589337a98ce73408de9b5a25ec9fc2c6fd6be8f0d39e0ca5a" +checksum = "10a35a97730320ffe8e2d410b5d3b69279b98d2c14bdb8b70ea89ecf7888d41e" dependencies = [ "autocfg", - "hashbrown 0.11.2", + "hashbrown 0.12.1", ] [[package]] @@ -2682,9 +2682,9 @@ dependencies = [ [[package]] name = "linked-hash-map" -version = "0.5.4" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" +checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" [[package]] name = "linregress" @@ -2870,9 +2870,9 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "713d550d9b44d89174e066b7a6217ae06234c10cb47819a88290d2b353c31799" +checksum = "57ee1c23c7c63b0c9250c339ffdc69255f110b298b901b9f6c82547b7b87caaf" dependencies = [ "libc", "log", @@ -2901,19 +2901,19 @@ dependencies = [ "tempfile", "thiserror", "tracing", - "tracing-subscriber 0.3.11", + "tracing-subscriber 0.3.14", "ureq", ] [[package]] name = "moka" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df72b50274c0988d9f4a6e808e06d9d926f265db6f8bbda1576bcaa658e72763" +checksum = "975fa04238144061e7f8df9746b2e9cd93ef85881da5548d842a7c6a4b614415" dependencies = [ "crossbeam-channel", "crossbeam-epoch 0.8.2", - "crossbeam-utils 0.8.9", + "crossbeam-utils 0.8.10", "num_cpus", "once_cell", "parking_lot", @@ -2924,7 +2924,7 @@ dependencies = [ "tagptr", "thiserror", "triomphe", - "uuid 0.8.2", + "uuid 1.1.2", ] [[package]] @@ -2955,7 +2955,7 @@ dependencies = [ "matrixmultiply", "nalgebra-macros", "num-complex", - "num-rational 0.4.0", + "num-rational 0.4.1", "num-traits", "rand 0.8.5", "rand_distr", @@ -3024,9 +3024,9 @@ dependencies = [ [[package]] name = "num-complex" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97fbc387afefefd5e9e39493299f3069e14a140dd34dc19b4c1c1a8fddb6a790" +checksum = "7ae39348c8bc5fbd7f40c727a9925f03517afd2ab27d46702108b6a7e5414c19" dependencies = [ "num-traits", ] @@ -3076,9 +3076,9 @@ dependencies = [ [[package]] name = "num-rational" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d41702bd167c2df5520b384281bc111a4b5efcf7fbc4c9c222c815b07e0a6a6a" +checksum = "0638a1c9d0a3c0914158145bc76cff373a75a627e6ecbfb71cbe6f453a5a19b0" dependencies = [ "autocfg", "num-bigint 0.4.3", @@ -3127,9 +3127,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.12.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7709cef83f0c1f58f666e746a08b21e0085f7440fa6a29cc194d68aac97a4225" +checksum = "18a6dbe30758c9f83eb00cbea4ac95966305f5a7772f3f42ebfc7fc7eddbd8e1" [[package]] name = "oneline-eyre" @@ -3205,7 +3205,7 @@ checksum = "decf7381921fea4dcb2549c5667eda59b3ec297ab7e2b5fc33eac69d2e7da87b" [[package]] name = "pallet-beefy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" dependencies = [ "beefy-primitives", "frame-support", @@ -3214,14 +3214,14 @@ dependencies = [ "parity-scale-codec", "scale-info", "serde", - "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", ] [[package]] name = "pallet-beefy-mmr" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" dependencies = [ "beefy-merkle-tree", "beefy-primitives", @@ -3235,16 +3235,16 @@ dependencies = [ "parity-scale-codec", "scale-info", "serde", - "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-io 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-io 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", ] [[package]] name = "pallet-mmr" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" dependencies = [ "ckb-merkle-mountain-range", "frame-benchmarking", @@ -3252,32 +3252,32 @@ dependencies = [ "frame-system", "parity-scale-codec", "scale-info", - "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-io 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-io 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", "sp-mmr-primitives", - "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", ] [[package]] name = "pallet-mmr-rpc" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" dependencies = [ "jsonrpsee 0.13.1", "parity-scale-codec", "serde", "sp-api", "sp-blockchain", - "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", "sp-mmr-primitives", - "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", ] [[package]] name = "pallet-session" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" dependencies = [ "frame-support", "frame-system", @@ -3286,19 +3286,19 @@ dependencies = [ "pallet-timestamp", "parity-scale-codec", "scale-info", - "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-io 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-io 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", "sp-session", "sp-staking", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-trie 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-trie 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", ] [[package]] name = "pallet-timestamp" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" dependencies = [ "frame-benchmarking", "frame-support", @@ -3307,8 +3307,8 @@ dependencies = [ "parity-scale-codec", "scale-info", "sp-inherents", - "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", "sp-timestamp", ] @@ -3452,18 +3452,18 @@ checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" [[package]] name = "pin-project" -version = "1.0.10" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58ad3879ad3baf4e44784bc6a718a8698867bb991f8ce24d1bcbe2cfb4c3a75e" +checksum = "78203e83c48cffbe01e4a2d35d566ca4de445d79a85372fc64e378bfc812a260" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.0.10" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "744b6f092ba29c3650faf274db506afd39944f48420f6c86b17cfe0ee1cb36bb" +checksum = "710faf75e1b33345361201d36d04e98ac1ed8909151a017ed384700836104c74" dependencies = [ "proc-macro2", "quote", @@ -3557,9 +3557,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.39" +version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c54b25569025b7fc9651de43004ae593a75ad88543b17178aa5e1b9c4f15f56f" +checksum = "dd96a1e8ed2596c337f8eae5f24924ec83f5ad5ab21ea8e455d3566c69fbcaf7" dependencies = [ "unicode-ident", ] @@ -3635,7 +3635,7 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bafd74c340a0a7e79415981ede3460df16b530fd071541901a57416eea950b17" dependencies = [ - "crossbeam-utils 0.8.9", + "crossbeam-utils 0.8.10", "libc", "mach", "once_cell", @@ -3653,9 +3653,9 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quote" -version = "1.0.18" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1feb54ed693b93a84e14094943b84b7c4eae204c512b7ccb95ab0c66d278ad1" +checksum = "3bcdf212e9776fbcb2d23ab029360416bb1706b1aea2d1a5ba002727cbcab804" dependencies = [ "proc-macro2", ] @@ -3792,7 +3792,7 @@ checksum = "258bcdb5ac6dad48491bb2992db6b7cf74878b0384908af124823d118c99683f" dependencies = [ "crossbeam-channel", "crossbeam-deque", - "crossbeam-utils 0.8.9", + "crossbeam-utils 0.8.10", "num_cpus", ] @@ -4036,9 +4036,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.6" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2cc38e8fa666e2de3c4aba7edeb5ffc5246c1c2ed0e3d17e560aeeba736b23f" +checksum = "a0a5f7c728f5d284929a1cccb5bc19884422bfe6ef4d6c409da2c41838983fcf" [[package]] name = "ryu" @@ -4306,18 +4306,18 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.10" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a41d061efea015927ac527063765e73601444cdc344ba855bc7bd44578b25e1c" +checksum = "a2333e6df6d6598f2b1974829f853c2b4c5f4a6e503c10af918081aa6f8564e1" dependencies = [ "serde", ] [[package]] name = "serde" -version = "1.0.137" +version = "1.0.138" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61ea8d54c77f8315140a05f4c7237403bf38b72704d031543aa1d16abbf517d1" +checksum = "1578c6245786b9d168c5447eeacfb96856573ca56c9d68fdcf394be134882a47" dependencies = [ "serde_derive", ] @@ -4333,9 +4333,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.137" +version = "1.0.138" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f26faba0c3959972377d3b2d306ee9f71faee9714294e41bb777f83f88578be" +checksum = "023e9b1467aef8a10fb88f25611870ada9800ef7e22afce356bb0d2387b6f27c" dependencies = [ "proc-macro2", "quote", @@ -4355,9 +4355,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.81" +version = "1.0.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b7ce2b32a1aed03c558dc61a5cd328f15aff2dbc17daad8fb8af04d2100e15c" +checksum = "82c2c1fdcd807d1098552c5b9a36e425e42e9fbd7c6a37a8425f390f781f7fa7" dependencies = [ "itoa 1.0.2", "ryu", @@ -4588,9 +4588,9 @@ checksum = "eb703cfe953bccee95685111adeedb76fabe4e97549a58d16f03ea7b9367bb32" [[package]] name = "smallvec" -version = "1.8.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2dd574626839106c320a323308629dcb1acfc96e32a8cba364ddc61ac23ee83" +checksum = "2fd0db749597d91ff862fd1d55ea87f7855a744a8425a64695b6fca237d1dad1" [[package]] name = "socket2" @@ -4620,16 +4620,16 @@ dependencies = [ [[package]] name = "sp-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" dependencies = [ "hash-db", "log", "parity-scale-codec", "sp-api-proc-macro", - "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-state-machine 0.12.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-state-machine 0.12.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", "sp-version", "thiserror", ] @@ -4637,7 +4637,7 @@ dependencies = [ [[package]] name = "sp-api-proc-macro" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" dependencies = [ "blake2", "proc-macro-crate 1.1.3", @@ -4663,14 +4663,14 @@ dependencies = [ [[package]] name = "sp-application-crypto" version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" dependencies = [ "parity-scale-codec", "scale-info", "serde", - "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-io 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-io 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", ] [[package]] @@ -4692,22 +4692,22 @@ dependencies = [ [[package]] name = "sp-arithmetic" version = "5.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" dependencies = [ "integer-sqrt", "num-traits", "parity-scale-codec", "scale-info", "serde", - "sp-debug-derive 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-debug-derive 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", "static_assertions", ] [[package]] name = "sp-blockchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" dependencies = [ "futures", "log", @@ -4717,26 +4717,26 @@ dependencies = [ "sp-api", "sp-consensus", "sp-database", - "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-state-machine 0.12.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-state-machine 0.12.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", "thiserror", ] [[package]] name = "sp-consensus" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" dependencies = [ "async-trait", "futures", "futures-timer", "log", "parity-scale-codec", - "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", "sp-inherents", - "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-state-machine 0.12.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-state-machine 0.12.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", "sp-version", "thiserror", ] @@ -4791,7 +4791,7 @@ dependencies = [ [[package]] name = "sp-core" version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" dependencies = [ "base58", "bitflags", @@ -4820,12 +4820,12 @@ dependencies = [ "secp256k1 0.21.3", "secrecy", "serde", - "sp-core-hashing 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-debug-derive 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-externalities 0.12.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-runtime-interface 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-storage 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-core-hashing 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-debug-derive 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-externalities 0.12.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-runtime-interface 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-storage 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", "ss58-registry", "substrate-bip39", "thiserror", @@ -4851,32 +4851,32 @@ dependencies = [ [[package]] name = "sp-core-hashing" version = "4.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" dependencies = [ "blake2", "byteorder", "digest 0.10.3", "sha2 0.10.2", "sha3 0.10.1", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", "twox-hash", ] [[package]] name = "sp-core-hashing-proc-macro" version = "5.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" dependencies = [ "proc-macro2", "quote", - "sp-core-hashing 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-core-hashing 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", "syn", ] [[package]] name = "sp-database" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" dependencies = [ "kvdb", "parking_lot", @@ -4896,7 +4896,7 @@ dependencies = [ [[package]] name = "sp-debug-derive" version = "4.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" dependencies = [ "proc-macro2", "quote", @@ -4918,25 +4918,25 @@ dependencies = [ [[package]] name = "sp-externalities" version = "0.12.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" dependencies = [ "environmental", "parity-scale-codec", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-storage 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-storage 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", ] [[package]] name = "sp-inherents" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" dependencies = [ "async-trait", "impl-trait-for-tuples", "parity-scale-codec", - "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", "thiserror", ] @@ -4969,7 +4969,7 @@ dependencies = [ [[package]] name = "sp-io" version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" dependencies = [ "futures", "hash-db", @@ -4978,15 +4978,15 @@ dependencies = [ "parity-scale-codec", "parking_lot", "secp256k1 0.21.3", - "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-externalities 0.12.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-keystore 0.12.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-runtime-interface 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-state-machine 0.12.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-tracing 5.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-trie 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-wasm-interface 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-externalities 0.12.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-keystore 0.12.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-runtime-interface 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-state-machine 0.12.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-tracing 5.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-trie 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-wasm-interface 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", "tracing", "tracing-core", ] @@ -5011,7 +5011,7 @@ dependencies = [ [[package]] name = "sp-keystore" version = "0.12.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" dependencies = [ "async-trait", "futures", @@ -5019,24 +5019,24 @@ dependencies = [ "parity-scale-codec", "parking_lot", "schnorrkel", - "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-externalities 0.12.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-externalities 0.12.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", "thiserror", ] [[package]] name = "sp-mmr-primitives" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" dependencies = [ "log", "parity-scale-codec", "serde", "sp-api", - "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-debug-derive 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-debug-derive 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", ] [[package]] @@ -5053,7 +5053,7 @@ dependencies = [ [[package]] name = "sp-panic-handler" version = "4.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" dependencies = [ "backtrace", "lazy_static", @@ -5086,7 +5086,7 @@ dependencies = [ [[package]] name = "sp-runtime" version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" dependencies = [ "either", "hash256-std-hasher", @@ -5098,11 +5098,11 @@ dependencies = [ "rand 0.7.3", "scale-info", "serde", - "sp-application-crypto 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-arithmetic 5.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-io 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-application-crypto 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-arithmetic 5.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-io 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", ] [[package]] @@ -5126,17 +5126,17 @@ dependencies = [ [[package]] name = "sp-runtime-interface" version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" dependencies = [ "impl-trait-for-tuples", "parity-scale-codec", "primitive-types", - "sp-externalities 0.12.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-runtime-interface-proc-macro 5.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-storage 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-tracing 5.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-wasm-interface 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-externalities 0.12.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-runtime-interface-proc-macro 5.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-storage 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-tracing 5.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-wasm-interface 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", "static_assertions", ] @@ -5156,7 +5156,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface-proc-macro" version = "5.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" dependencies = [ "Inflector", "proc-macro-crate 1.1.3", @@ -5168,26 +5168,26 @@ dependencies = [ [[package]] name = "sp-session" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" dependencies = [ "parity-scale-codec", "scale-info", "sp-api", - "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", "sp-staking", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", ] [[package]] name = "sp-staking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" dependencies = [ "parity-scale-codec", "scale-info", - "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", ] [[package]] @@ -5217,7 +5217,7 @@ dependencies = [ [[package]] name = "sp-state-machine" version = "0.12.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" dependencies = [ "hash-db", "log", @@ -5226,11 +5226,11 @@ dependencies = [ "parking_lot", "rand 0.7.3", "smallvec", - "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-externalities 0.12.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-panic-handler 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-trie 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-externalities 0.12.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-panic-handler 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-trie 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", "thiserror", "tracing", "trie-root", @@ -5247,6 +5247,11 @@ name = "sp-std" version = "4.0.0" source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" +[[package]] +name = "sp-std" +version = "4.0.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" + [[package]] name = "sp-storage" version = "6.0.0" @@ -5264,20 +5269,20 @@ dependencies = [ [[package]] name = "sp-storage" version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" dependencies = [ "impl-serde", "parity-scale-codec", "ref-cast", "serde", - "sp-debug-derive 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-debug-derive 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", ] [[package]] name = "sp-timestamp" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" dependencies = [ "async-trait", "futures-timer", @@ -5285,8 +5290,8 @@ dependencies = [ "parity-scale-codec", "sp-api", "sp-inherents", - "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", "thiserror", ] @@ -5306,10 +5311,10 @@ dependencies = [ [[package]] name = "sp-tracing" version = "5.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" dependencies = [ "parity-scale-codec", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", "tracing", "tracing-core", "tracing-subscriber 0.2.25", @@ -5334,14 +5339,14 @@ dependencies = [ [[package]] name = "sp-trie" version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" dependencies = [ "hash-db", "memory-db", "parity-scale-codec", "scale-info", - "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", "thiserror", "trie-db", "trie-root", @@ -5350,7 +5355,7 @@ dependencies = [ [[package]] name = "sp-version" version = "5.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" dependencies = [ "impl-serde", "parity-scale-codec", @@ -5358,8 +5363,8 @@ dependencies = [ "scale-info", "serde", "sp-core-hashing-proc-macro", - "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", "sp-version-proc-macro", "thiserror", ] @@ -5367,7 +5372,7 @@ dependencies = [ [[package]] name = "sp-version-proc-macro" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" dependencies = [ "parity-scale-codec", "proc-macro2", @@ -5391,12 +5396,12 @@ dependencies = [ [[package]] name = "sp-wasm-interface" version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" dependencies = [ "impl-trait-for-tuples", "log", "parity-scale-codec", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", "wasmi", ] @@ -5427,9 +5432,9 @@ dependencies = [ [[package]] name = "ss58-registry" -version = "1.22.0" +version = "1.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d804c8d48aeab838be31570866fce1130d275b563d49af08b4927a0bd561e7c" +checksum = "77ef98aedad3dc52e10995e7ed15f1279e11d4da35795f5dac7305742d0feb66" dependencies = [ "Inflector", "num-format", @@ -5568,9 +5573,9 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.96" +version = "1.0.98" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0748dd251e24453cb8717f0354206b91557e4ec8703673a4b30208f2abaf1ebf" +checksum = "c50aef8a904de4c23c788f104b7dddc7d6f79c647c7c8ce4cc8f73eb0ca773dd" dependencies = [ "proc-macro2", "quote", @@ -5841,9 +5846,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.9" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2702e08a7a860f005826c6815dcac101b19b5eb330c27fe4a5928fec1d20ddd" +checksum = "72c91f41dcb2f096c05f0873d667dceec1087ce5bcf984ec8ffb19acddbb3217" dependencies = [ "libc", "num_threads", @@ -6047,9 +6052,9 @@ dependencies = [ [[package]] name = "tower" -version = "0.4.12" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a89fd63ad6adf737582df5db40d286574513c69a11dac5214dc3b5603d6713e" +checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" dependencies = [ "futures-core", "futures-util", @@ -6092,9 +6097,9 @@ checksum = "343bc9466d3fe6b0f960ef45960509f84480bf4fd96f92901afe7ff3df9d3a62" [[package]] name = "tower-service" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" +checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" [[package]] name = "tracing" @@ -6111,9 +6116,9 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.21" +version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc6b8ad3567499f98a1db7a752b07a7c8c7c7c34c332ec00effb2b0027974b7c" +checksum = "11c75893af559bc8e10716548bdef5cb2b983f8e637db9d0e15126b61b484ee2" dependencies = [ "proc-macro2", "quote", @@ -6122,9 +6127,9 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.27" +version = "0.1.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7709595b8878a4965ce5e87ebf880a7d39c9afc6837721b21a5a816a8117d921" +checksum = "7b7358be39f2f274f322d2aaed611acc57f382e8eb1e5b48cb9ae30933495ce7" dependencies = [ "once_cell", "valuable", @@ -6137,7 +6142,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d686ec1c0f384b1277f097b2f279a2ecc11afe8c133c1aabf036a27cb4cd206e" dependencies = [ "tracing", - "tracing-subscriber 0.3.11", + "tracing-subscriber 0.3.14", ] [[package]] @@ -6195,13 +6200,13 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.11" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bc28f93baff38037f64e6f43d34cfa1605f27a49c34e8a04c5e78b0babf2596" +checksum = "3a713421342a5a666b7577783721d3117f1b69a393df803ee17bb73b1e122a59" dependencies = [ "ansi_term", - "lazy_static", "matchers 0.1.0", + "once_cell", "regex", "serde", "serde_json", @@ -6335,9 +6340,9 @@ checksum = "5bd2fe26506023ed7b5e1e315add59d6f584c621d037f9368fea9cfb988f368c" [[package]] name = "unicode-normalization" -version = "0.1.19" +version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d54590932941a9e9266f0832deed84ebe1bf2e4c9e4a3554d393d18f5e854bf9" +checksum = "854cbdc4f7bc6ae19c820d44abdc3277ac3e1b2b93db20a636825d9322fb60e6" dependencies = [ "tinyvec", ] @@ -6400,9 +6405,6 @@ name = "uuid" version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" -dependencies = [ - "getrandom 0.2.7", -] [[package]] name = "uuid" @@ -6693,9 +6695,9 @@ dependencies = [ [[package]] name = "zeroize" -version = "1.5.5" +version = "1.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94693807d016b2f2d2e14420eb3bfcca689311ff775dcf113d74ea624b7cdf07" +checksum = "20b578acffd8516a6c3f2a1bdefc1ec37e547bb4e0fb8b6b01a4cafc886b4442" dependencies = [ "zeroize_derive", ] diff --git a/modules/Cargo.toml b/modules/Cargo.toml index d316e29f7b..4057cb2eda 100644 --- a/modules/Cargo.toml +++ b/modules/Cargo.toml @@ -63,14 +63,14 @@ derive_more = { version = "0.99.17", default-features = false, features = ["from uint = { version = "0.9", default-features = false } beefy-client = { package = "beefy-generic-client", git = "https://github.com/ComposableFi/beefy-client", branch = "master", default-features = false, optional = true } -pallet-mmr-primitives = { package = "sp-mmr-primitives", git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22", default-features = false, optional = true } -beefy-primitives = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22", default-features = false, optional = true } +pallet-mmr-primitives = { package = "sp-mmr-primitives", git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.24", default-features = false, optional = true } +beefy-primitives = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.24", default-features = false, optional = true } codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22", default-features = false, optional = true } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22", default-features = false, optional = true} -sp-std = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22", default-features = false, optional = true } -sp-trie = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22", default-features = false, optional = true } -sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22", default-features = false, optional = true } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.24", default-features = false, optional = true } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.24", default-features = false, optional = true} +sp-std = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.24", default-features = false, optional = true } +sp-trie = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.24", default-features = false, optional = true } +sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.24", default-features = false, optional = true } sha3 = { version = "0.10.1", optional = true } ripemd = { version = "0.1.1", optional = true } @@ -112,7 +112,7 @@ sha2 = { version = "0.10.2" } tendermint-rpc = { git = "https://github.com/composableFi/tendermint-rs", rev = "5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8", features = ["http-client", "websocket-client"] } tendermint-testgen = { git = "https://github.com/composableFi/tendermint-rs", rev = "5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8" } # Needed for generating (synthetic) light blocks. # Beefy Light Client testing dependencies -sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22"} +sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.24"} subxt = "0.21.0" tokio = { version = "1.17.0", features = ["full"] } serde_json = "1.0.74" From 2d652ef6f379451d24b744ce9c5447fd39423f5b Mon Sep 17 00:00:00 2001 From: Web3 Smith <31099392+Wizdave97@users.noreply.github.com> Date: Tue, 5 Jul 2022 20:27:00 +0100 Subject: [PATCH 58/96] Ics23 Update (#27) update ics23 --- Cargo.lock | 10 ++-------- modules/Cargo.toml | 6 +++--- 2 files changed, 5 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f8aab879e1..0f10762c7e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -352,7 +352,7 @@ dependencies = [ [[package]] name = "beefy-generic-client" version = "0.1.0" -source = "git+https://github.com/ComposableFi/beefy-client?branch=master#156e6b68760464967cec0684ce689ad5244ea560" +source = "git+https://github.com/ComposableFi/beefy-rs?branch=master#156e6b68760464967cec0684ce689ad5244ea560" dependencies = [ "beefy-primitives", "ckb-merkle-mountain-range", @@ -2284,7 +2284,7 @@ dependencies = [ [[package]] name = "ics23" version = "0.8.0-alpha" -source = "git+https://github.com/composablefi/ics23?rev=86e970a4f22973946a0faf472b56951691d40874#86e970a4f22973946a0faf472b56951691d40874" +source = "git+https://github.com/composablefi/ics23?rev=b500a5c6068eb53c83c4c6c13bd9d8c25e0bf927#b500a5c6068eb53c83c4c6c13bd9d8c25e0bf927" dependencies = [ "anyhow", "bytes", @@ -2294,7 +2294,6 @@ dependencies = [ "sha2 0.9.9", "sha3 0.9.1", "sp-core 6.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22)", ] [[package]] @@ -5242,11 +5241,6 @@ version = "4.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "14804d6069ee7a388240b665f17908d98386ffb0b5d39f89a4099fc7a2a4c03f" -[[package]] -name = "sp-std" -version = "4.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.22#616d33ea23bab86cafffaf116fc607b6790fb4eb" - [[package]] name = "sp-std" version = "4.0.0" diff --git a/modules/Cargo.toml b/modules/Cargo.toml index 4057cb2eda..d5d6491c50 100644 --- a/modules/Cargo.toml +++ b/modules/Cargo.toml @@ -45,7 +45,7 @@ ics11_beefy = ["sp-io", "sp-core", "sp-std", "beefy-primitives", "sp-runtime", " # Proto definitions for all IBC-related interfaces, e.g., connections or channels. borsh = { version = "0.9.3", default-features = false } ibc-proto = { version = "0.18.0", path = "../proto", default-features = false } -ics23 = { git = "https://github.com/composablefi/ics23", rev = "86e970a4f22973946a0faf472b56951691d40874", default-features = false } +ics23 = { git = "https://github.com/composablefi/ics23", rev = "b500a5c6068eb53c83c4c6c13bd9d8c25e0bf927", default-features = false } time = { version = "0.3", default-features = false } serde_derive = { version = "1.0.104", default-features = false } serde = { version = "1.0", default-features = false } @@ -62,7 +62,7 @@ num-traits = { version = "0.2.15", default-features = false } derive_more = { version = "0.99.17", default-features = false, features = ["from", "into", "display"] } uint = { version = "0.9", default-features = false } -beefy-client = { package = "beefy-generic-client", git = "https://github.com/ComposableFi/beefy-client", branch = "master", default-features = false, optional = true } +beefy-client = { package = "beefy-generic-client", git = "https://github.com/ComposableFi/beefy-rs", branch = "master", default-features = false, optional = true } pallet-mmr-primitives = { package = "sp-mmr-primitives", git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.24", default-features = false, optional = true } beefy-primitives = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.24", default-features = false, optional = true } codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } @@ -116,7 +116,7 @@ sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0 subxt = "0.21.0" tokio = { version = "1.17.0", features = ["full"] } serde_json = "1.0.74" -beefy-client = { package = "beefy-generic-client", git = "https://github.com/ComposableFi/beefy-client", branch = "master", features = ["mocks"]} +beefy-client = { package = "beefy-generic-client", git = "https://github.com/ComposableFi/beefy-rs", branch = "master", features = ["mocks"]} sha3 = { version = "0.10.1" } ripemd = { version = "0.1.1" } From ec349b3043a6e9ab84d2208d9901314cdfdddd69 Mon Sep 17 00:00:00 2001 From: Web3 Smith <31099392+Wizdave97@users.noreply.github.com> Date: Thu, 7 Jul 2022 18:17:06 +0100 Subject: [PATCH 59/96] Revert tracing version (#28) Co-authored-by: Andrey Orlov --- modules/Cargo.toml | 2 +- relayer-cli/Cargo.toml | 2 +- relayer/Cargo.toml | 2 +- tools/test-framework/Cargo.toml | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/modules/Cargo.toml b/modules/Cargo.toml index d5d6491c50..b8097968dd 100644 --- a/modules/Cargo.toml +++ b/modules/Cargo.toml @@ -50,7 +50,7 @@ time = { version = "0.3", default-features = false } serde_derive = { version = "1.0.104", default-features = false } serde = { version = "1.0", default-features = false } serde_json = { version = "1", default-features = false } -tracing = { version = "0.1.35", default-features = false } +tracing = { version = "0.1.34", default-features = false } prost = { version = "0.10", default-features = false } prost-types = { version = "0.10", default-features = false } bytes = { version = "1.1.0", default-features = false } diff --git a/relayer-cli/Cargo.toml b/relayer-cli/Cargo.toml index 85cc974ddb..e0c89b7eec 100644 --- a/relayer-cli/Cargo.toml +++ b/relayer-cli/Cargo.toml @@ -37,7 +37,7 @@ clap_complete = "3.2" humantime = "2.1" serde = { version = "1.0", features = ["serde_derive"] } tokio = { version = "1.0", features = ["full"] } -tracing = "0.1.35" +tracing = "0.1.34" tracing-subscriber = { version = "0.3.11", features = ["fmt", "env-filter", "json"]} eyre = "0.6.8" color-eyre = "0.6" diff --git a/relayer/Cargo.toml b/relayer/Cargo.toml index 1c816c876a..dc934b7f8a 100644 --- a/relayer/Cargo.toml +++ b/relayer/Cargo.toml @@ -31,7 +31,7 @@ serde = "1.0" serde_derive = "1.0" thiserror = "1.0.30" toml = "0.5" -tracing = "0.1.35" +tracing = "0.1.34" tokio = { version = "1.0", features = ["rt-multi-thread", "time", "sync"] } serde_json = { version = "1" } bytes = "1.1.0" diff --git a/tools/test-framework/Cargo.toml b/tools/test-framework/Cargo.toml index a112edb8d0..46d256f484 100644 --- a/tools/test-framework/Cargo.toml +++ b/tools/test-framework/Cargo.toml @@ -24,7 +24,7 @@ tendermint-rpc = { git = "https://github.com/composableFi/tendermint-rs", rev = async-trait = "0.1.56" http = "0.2.8" tokio = { version = "1.0", features = ["full"] } -tracing = "0.1.35" +tracing = "0.1.34" tracing-subscriber = "0.3.11" eyre = "0.6.8" color-eyre = "0.6" From 09ac302cedc33f3e1c100541bfa0a33ee2751679 Mon Sep 17 00:00:00 2001 From: Web3 Smith <31099392+Wizdave97@users.noreply.github.com> Date: Wed, 20 Jul 2022 14:13:01 +0100 Subject: [PATCH 60/96] Update trie key path derivation (#30) fix trie key path --- modules/src/clients/ics11_beefy/client_def.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/modules/src/clients/ics11_beefy/client_def.rs b/modules/src/clients/ics11_beefy/client_def.rs index af2fd7215b..19af267084 100644 --- a/modules/src/clients/ics11_beefy/client_def.rs +++ b/modules/src/clients/ics11_beefy/client_def.rs @@ -405,8 +405,8 @@ fn verify_membership>( } let path: Path = path.into(); let path = path.to_string(); - let path = vec![prefix.as_bytes(), path.as_bytes()]; - let key = codec::Encode::encode(&path); + let mut key = prefix.as_bytes().to_vec(); + key.extend(path.as_bytes()); let trie_proof: Vec = proof.clone().into(); let trie_proof: Vec> = codec::Decode::decode(&mut &*trie_proof) .map_err(|e| Error::beefy(BeefyError::scale_decode(e)))?; @@ -425,8 +425,8 @@ fn verify_non_membership>( } let path: Path = path.into(); let path = path.to_string(); - let path = vec![prefix.as_bytes(), path.as_bytes()]; - let key = codec::Encode::encode(&path); + let mut key = prefix.as_bytes().to_vec(); + key.extend(path.as_bytes()); let trie_proof: Vec = proof.clone().into(); let trie_proof: Vec> = codec::Decode::decode(&mut &*trie_proof) .map_err(|e| Error::beefy(BeefyError::scale_decode(e)))?; From 963151c8b9a1ff4b5e8fb0e2be029f14da64d580 Mon Sep 17 00:00:00 2001 From: Web3 Smith <31099392+Wizdave97@users.noreply.github.com> Date: Wed, 27 Jul 2022 13:38:19 +0100 Subject: [PATCH 61/96] Update events deposited with correct values (#34) fix deposited events --- .../core/ics02_client/handler/create_client.rs | 3 ++- .../core/ics02_client/handler/update_client.rs | 7 +++---- .../core/ics02_client/handler/upgrade_client.rs | 13 +++++++------ .../ics03_connection/handler/conn_open_ack.rs | 13 ++++++++----- .../ics03_connection/handler/conn_open_confirm.rs | 13 ++++++++----- .../ics03_connection/handler/conn_open_init.rs | 15 +++++++++------ .../ics03_connection/handler/conn_open_try.rs | 15 +++++++++------ .../ics04_channel/handler/chan_close_confirm.rs | 14 +++++++++----- .../core/ics04_channel/handler/chan_close_init.rs | 14 +++++++++----- .../core/ics04_channel/handler/chan_open_ack.rs | 14 +++++++++----- .../ics04_channel/handler/chan_open_confirm.rs | 14 +++++++++----- .../core/ics04_channel/handler/chan_open_init.rs | 14 +++++++++----- .../core/ics04_channel/handler/chan_open_try.rs | 14 +++++++++----- 13 files changed, 100 insertions(+), 63 deletions(-) diff --git a/modules/src/core/ics02_client/handler/create_client.rs b/modules/src/core/ics02_client/handler/create_client.rs index 41cb2bf136..0f3725a391 100644 --- a/modules/src/core/ics02_client/handler/create_client.rs +++ b/modules/src/core/ics02_client/handler/create_client.rs @@ -58,7 +58,8 @@ pub fn process( let event_attributes = Attributes { client_id, height: ctx.host_height(), - ..Default::default() + client_type: msg.client_state.client_type(), + consensus_height: msg.client_state.latest_height(), }; output.emit(IbcEvent::CreateClient(event_attributes.into())); diff --git a/modules/src/core/ics02_client/handler/update_client.rs b/modules/src/core/ics02_client/handler/update_client.rs index 94f98dacef..090a404aa3 100644 --- a/modules/src/core/ics02_client/handler/update_client.rs +++ b/modules/src/core/ics02_client/handler/update_client.rs @@ -4,7 +4,6 @@ use core::fmt::Debug; use crate::clients::host_functions::HostFunctionsProvider; use crate::core::ics02_client::client_def::{AnyClient, ClientDef, ConsensusUpdateResult}; use crate::core::ics02_client::client_state::{AnyClientState, ClientState}; -#[cfg(any(test, feature = "ics11_beefy"))] use crate::core::ics02_client::client_type::ClientType; use crate::core::ics02_client::error::Error; use crate::core::ics02_client::events::Attributes; @@ -53,8 +52,7 @@ pub fn process( return Err(Error::client_frozen(client_id)); } - #[cfg(any(test, feature = "ics11_beefy"))] - if client_type != ClientType::Beefy { + if client_type == ClientType::Tendermint { // Read consensus state from the host chain store. let latest_consensus_state = ctx .consensus_state(&client_id, client_state.latest_height()) @@ -90,7 +88,8 @@ pub fn process( let event_attributes = Attributes { client_id: client_id.clone(), height: ctx.host_height(), - ..Default::default() + client_type, + consensus_height: client_state.latest_height(), }; if found_misbehaviour { diff --git a/modules/src/core/ics02_client/handler/upgrade_client.rs b/modules/src/core/ics02_client/handler/upgrade_client.rs index 0ef3738593..5f3ea9785d 100644 --- a/modules/src/core/ics02_client/handler/upgrade_client.rs +++ b/modules/src/core/ics02_client/handler/upgrade_client.rs @@ -59,17 +59,18 @@ pub fn process( // Not implemented yet: https://github.com/informalsystems/ibc-rs/issues/722 // todo!() + let event_attributes = Attributes { + client_id: client_id.clone(), + height: ctx.host_height(), + client_type, + consensus_height: new_client_state.latest_height(), + }; let result = ClientResult::Upgrade(Result { - client_id: client_id.clone(), + client_id, client_state: new_client_state, consensus_state: Some(new_consensus_state), }); - let event_attributes = Attributes { - client_id, - height: ctx.host_height(), - ..Default::default() - }; output.emit(IbcEvent::UpgradeClient(event_attributes.into())); Ok(output.with_result(result)) diff --git a/modules/src/core/ics03_connection/handler/conn_open_ack.rs b/modules/src/core/ics03_connection/handler/conn_open_ack.rs index 7f1dd4a9da..dbfb34f598 100644 --- a/modules/src/core/ics03_connection/handler/conn_open_ack.rs +++ b/modules/src/core/ics03_connection/handler/conn_open_ack.rs @@ -77,17 +77,20 @@ pub(crate) fn process( output.log("success: connection verification passed"); + let event_attributes = Attributes { + connection_id: Some(msg.connection_id.clone()), + height: ctx.host_height(), + client_id: conn_end.client_id().clone(), + counterparty_connection_id: conn_end.counterparty().connection_id.clone(), + counterparty_client_id: conn_end.counterparty().client_id().clone(), + }; + let result = ConnectionResult { connection_id: msg.connection_id, connection_id_state: ConnectionIdState::Reused, connection_end: conn_end, }; - let event_attributes = Attributes { - connection_id: Some(result.connection_id.clone()), - height: ctx.host_height(), - ..Default::default() - }; output.emit(IbcEvent::OpenAckConnection(event_attributes.into())); Ok(output.with_result(result)) diff --git a/modules/src/core/ics03_connection/handler/conn_open_confirm.rs b/modules/src/core/ics03_connection/handler/conn_open_confirm.rs index d331820a37..307c4ae5fd 100644 --- a/modules/src/core/ics03_connection/handler/conn_open_confirm.rs +++ b/modules/src/core/ics03_connection/handler/conn_open_confirm.rs @@ -55,17 +55,20 @@ pub(crate) fn process( // Transition our own end of the connection to state OPEN. conn_end.set_state(State::Open); + let event_attributes = Attributes { + connection_id: Some(msg.connection_id.clone()), + height: ctx.host_height(), + client_id: conn_end.client_id().clone(), + counterparty_connection_id: conn_end.counterparty().connection_id.clone(), + counterparty_client_id: conn_end.counterparty().client_id().clone(), + }; + let result = ConnectionResult { connection_id: msg.connection_id, connection_id_state: ConnectionIdState::Reused, connection_end: conn_end, }; - let event_attributes = Attributes { - connection_id: Some(result.connection_id.clone()), - height: ctx.host_height(), - ..Default::default() - }; output.emit(IbcEvent::OpenConfirmConnection(event_attributes.into())); Ok(output.with_result(result)) diff --git a/modules/src/core/ics03_connection/handler/conn_open_init.rs b/modules/src/core/ics03_connection/handler/conn_open_init.rs index f2c2f386d7..da647a6ad0 100644 --- a/modules/src/core/ics03_connection/handler/conn_open_init.rs +++ b/modules/src/core/ics03_connection/handler/conn_open_init.rs @@ -49,17 +49,20 @@ pub(crate) fn process( conn_id )); + let event_attributes = Attributes { + connection_id: Some(conn_id.clone()), + height: ctx.host_height(), + client_id: new_connection_end.client_id().clone(), + counterparty_connection_id: new_connection_end.counterparty().connection_id.clone(), + counterparty_client_id: new_connection_end.counterparty().client_id().clone(), + }; + let result = ConnectionResult { - connection_id: conn_id.clone(), + connection_id: conn_id, connection_id_state: ConnectionIdState::Generated, connection_end: new_connection_end, }; - let event_attributes = Attributes { - connection_id: Some(conn_id), - height: ctx.host_height(), - ..Default::default() - }; output.emit(IbcEvent::OpenInitConnection(event_attributes.into())); Ok(output.with_result(result)) diff --git a/modules/src/core/ics03_connection/handler/conn_open_try.rs b/modules/src/core/ics03_connection/handler/conn_open_try.rs index e5f556c780..7ac8de4675 100644 --- a/modules/src/core/ics03_connection/handler/conn_open_try.rs +++ b/modules/src/core/ics03_connection/handler/conn_open_try.rs @@ -101,8 +101,16 @@ pub(crate) fn process( output.log("success: connection verification passed"); + let event_attributes = Attributes { + connection_id: Some(conn_id.clone()), + height: ctx.host_height(), + client_id: new_connection_end.client_id().clone(), + counterparty_connection_id: new_connection_end.counterparty().connection_id.clone(), + counterparty_client_id: new_connection_end.counterparty().client_id().clone(), + }; + let result = ConnectionResult { - connection_id: conn_id.clone(), + connection_id: conn_id, connection_id_state: if matches!(msg.previous_connection_id, None) { ConnectionIdState::Generated } else { @@ -111,11 +119,6 @@ pub(crate) fn process( connection_end: new_connection_end, }; - let event_attributes = Attributes { - connection_id: Some(conn_id), - height: ctx.host_height(), - ..Default::default() - }; output.emit(IbcEvent::OpenTryConnection(event_attributes.into())); Ok(output.with_result(result)) diff --git a/modules/src/core/ics04_channel/handler/chan_close_confirm.rs b/modules/src/core/ics04_channel/handler/chan_close_confirm.rs index 54bdcbef43..4c54788afe 100644 --- a/modules/src/core/ics04_channel/handler/chan_close_confirm.rs +++ b/modules/src/core/ics04_channel/handler/chan_close_confirm.rs @@ -79,6 +79,15 @@ pub(crate) fn process( // Transition the channel end to the new state & pick a version. channel_end.set_state(State::Closed); + let event_attributes = Attributes { + channel_id: Some(msg.channel_id), + height: ctx.host_height(), + port_id: msg.port_id.clone(), + connection_id: channel_end.connection_hops[0].clone(), + counterparty_port_id: channel_end.counterparty().port_id.clone(), + counterparty_channel_id: channel_end.counterparty().channel_id.clone(), + }; + let result = ChannelResult { port_id: msg.port_id.clone(), channel_id: msg.channel_id, @@ -86,11 +95,6 @@ pub(crate) fn process( channel_end, }; - let event_attributes = Attributes { - channel_id: Some(msg.channel_id), - height: ctx.host_height(), - ..Default::default() - }; output.emit(IbcEvent::CloseConfirmChannel( event_attributes .try_into() diff --git a/modules/src/core/ics04_channel/handler/chan_close_init.rs b/modules/src/core/ics04_channel/handler/chan_close_init.rs index 8abfd0e339..0cd4b3c0df 100644 --- a/modules/src/core/ics04_channel/handler/chan_close_init.rs +++ b/modules/src/core/ics04_channel/handler/chan_close_init.rs @@ -50,6 +50,15 @@ pub(crate) fn process( // Transition the channel end to the new state & pick a version. channel_end.set_state(State::Closed); + let event_attributes = Attributes { + channel_id: Some(msg.channel_id), + height: ctx.host_height(), + port_id: msg.port_id.clone(), + connection_id: channel_end.connection_hops[0].clone(), + counterparty_port_id: channel_end.counterparty().port_id.clone(), + counterparty_channel_id: channel_end.counterparty().channel_id.clone(), + }; + let result = ChannelResult { port_id: msg.port_id.clone(), channel_id: msg.channel_id, @@ -57,11 +66,6 @@ pub(crate) fn process( channel_end, }; - let event_attributes = Attributes { - channel_id: Some(msg.channel_id), - height: ctx.host_height(), - ..Default::default() - }; output.emit(IbcEvent::CloseInitChannel( event_attributes .try_into() diff --git a/modules/src/core/ics04_channel/handler/chan_open_ack.rs b/modules/src/core/ics04_channel/handler/chan_open_ack.rs index 7138a55a66..34f50d9f40 100644 --- a/modules/src/core/ics04_channel/handler/chan_open_ack.rs +++ b/modules/src/core/ics04_channel/handler/chan_open_ack.rs @@ -87,6 +87,15 @@ pub(crate) fn process( channel_end.set_state(State::Open); channel_end.set_version(msg.counterparty_version.clone()); + let event_attributes = Attributes { + channel_id: Some(msg.channel_id), + height: ctx.host_height(), + port_id: msg.port_id.clone(), + connection_id: channel_end.connection_hops[0].clone(), + counterparty_port_id: channel_end.counterparty().port_id.clone(), + counterparty_channel_id: channel_end.counterparty().channel_id.clone(), + }; + let result = ChannelResult { port_id: msg.port_id.clone(), channel_id: msg.channel_id, @@ -94,11 +103,6 @@ pub(crate) fn process( channel_end, }; - let event_attributes = Attributes { - channel_id: Some(msg.channel_id), - height: ctx.host_height(), - ..Default::default() - }; output.emit(IbcEvent::OpenAckChannel( event_attributes .try_into() diff --git a/modules/src/core/ics04_channel/handler/chan_open_confirm.rs b/modules/src/core/ics04_channel/handler/chan_open_confirm.rs index f605500d50..e2c9c58873 100644 --- a/modules/src/core/ics04_channel/handler/chan_open_confirm.rs +++ b/modules/src/core/ics04_channel/handler/chan_open_confirm.rs @@ -82,6 +82,15 @@ pub(crate) fn process( // Transition the channel end to the new state. channel_end.set_state(State::Open); + let event_attributes = Attributes { + channel_id: Some(msg.channel_id), + height: ctx.host_height(), + port_id: msg.port_id.clone(), + connection_id: channel_end.connection_hops[0].clone(), + counterparty_port_id: channel_end.counterparty().port_id.clone(), + counterparty_channel_id: channel_end.counterparty().channel_id.clone(), + }; + let result = ChannelResult { port_id: msg.port_id.clone(), channel_id: msg.channel_id, @@ -89,11 +98,6 @@ pub(crate) fn process( channel_end, }; - let event_attributes = Attributes { - channel_id: Some(msg.channel_id), - height: ctx.host_height(), - ..Default::default() - }; output.emit(IbcEvent::OpenConfirmChannel( event_attributes .try_into() diff --git a/modules/src/core/ics04_channel/handler/chan_open_init.rs b/modules/src/core/ics04_channel/handler/chan_open_init.rs index 4c7af9e743..43e3b01b41 100644 --- a/modules/src/core/ics04_channel/handler/chan_open_init.rs +++ b/modules/src/core/ics04_channel/handler/chan_open_init.rs @@ -58,6 +58,15 @@ pub(crate) fn process( output.log("success: no channel found"); + let event_attributes = Attributes { + channel_id: Some(chan_id), + height: ctx.host_height(), + port_id: msg.port_id.clone(), + connection_id: new_channel_end.connection_hops[0].clone(), + counterparty_port_id: new_channel_end.counterparty().port_id.clone(), + counterparty_channel_id: new_channel_end.counterparty().channel_id.clone(), + }; + let result = ChannelResult { port_id: msg.port_id.clone(), channel_id: chan_id, @@ -65,11 +74,6 @@ pub(crate) fn process( channel_id_state: ChannelIdState::Generated, }; - let event_attributes = Attributes { - channel_id: Some(chan_id), - height: ctx.host_height(), - ..Default::default() - }; output.emit(IbcEvent::OpenInitChannel( event_attributes .try_into() diff --git a/modules/src/core/ics04_channel/handler/chan_open_try.rs b/modules/src/core/ics04_channel/handler/chan_open_try.rs index 2564486364..6900b91c4e 100644 --- a/modules/src/core/ics04_channel/handler/chan_open_try.rs +++ b/modules/src/core/ics04_channel/handler/chan_open_try.rs @@ -125,6 +125,15 @@ pub(crate) fn process( // Transition the channel end to the new state & pick a version. new_channel_end.set_state(State::TryOpen); + let event_attributes = Attributes { + channel_id: Some(channel_id), + height: ctx.host_height(), + port_id: msg.port_id.clone(), + connection_id: new_channel_end.connection_hops[0].clone(), + counterparty_port_id: new_channel_end.counterparty().port_id.clone(), + counterparty_channel_id: new_channel_end.counterparty().channel_id.clone(), + }; + let result = ChannelResult { port_id: msg.port_id.clone(), channel_id_state: if matches!(msg.previous_channel_id, None) { @@ -136,11 +145,6 @@ pub(crate) fn process( channel_end: new_channel_end, }; - let event_attributes = Attributes { - channel_id: Some(channel_id), - height: ctx.host_height(), - ..Default::default() - }; output.emit(IbcEvent::OpenTryChannel( event_attributes .try_into() From 80f13fdcdee978217e0ccabfe6d83e790c425ee7 Mon Sep 17 00:00:00 2001 From: Web3 Smith <31099392+Wizdave97@users.noreply.github.com> Date: Thu, 28 Jul 2022 16:05:24 +0100 Subject: [PATCH 62/96] fix frozen height conversion (#35) --- modules/src/clients/ics11_beefy/client_state.rs | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/modules/src/clients/ics11_beefy/client_state.rs b/modules/src/clients/ics11_beefy/client_state.rs index 43750579b4..cd458a276e 100644 --- a/modules/src/clients/ics11_beefy/client_state.rs +++ b/modules/src/clients/ics11_beefy/client_state.rs @@ -228,7 +228,14 @@ impl TryFrom for ClientState { type Error = Error; fn try_from(raw: RawClientState) -> Result { - let frozen_height = Some(Height::new(0, raw.frozen_height)); + let frozen_height = { + let height = raw.frozen_height.into(); + if height == Height::zero() { + None + } else { + Some(height) + } + }; let authority_set = raw .authority From 790283269964b908da05cb9c3932a6a480563d62 Mon Sep 17 00:00:00 2001 From: Web3 Smith <31099392+Wizdave97@users.noreply.github.com> Date: Thu, 28 Jul 2022 16:19:35 +0100 Subject: [PATCH 63/96] David/fix beefy client state conversion (#36) * fix frozen height conversion * minor fix --- modules/src/clients/ics11_beefy/client_state.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/src/clients/ics11_beefy/client_state.rs b/modules/src/clients/ics11_beefy/client_state.rs index cd458a276e..c3cc35f398 100644 --- a/modules/src/clients/ics11_beefy/client_state.rs +++ b/modules/src/clients/ics11_beefy/client_state.rs @@ -229,7 +229,7 @@ impl TryFrom for ClientState { fn try_from(raw: RawClientState) -> Result { let frozen_height = { - let height = raw.frozen_height.into(); + let height = Height::new(0, raw.frozen_height.into()); if height == Height::zero() { None } else { From 6caccc984bf350cc2a6fdd2560d2264ff9fc5ff3 Mon Sep 17 00:00:00 2001 From: Web3 Smith <31099392+Wizdave97@users.noreply.github.com> Date: Mon, 1 Aug 2022 13:08:57 +0100 Subject: [PATCH 64/96] update beefy client with latest changes (#38) --- Cargo.lock | 298 ++++++++++++++---- modules/Cargo.toml | 8 +- modules/src/clients/host_functions.rs | 2 +- modules/src/clients/ics11_beefy/client_def.rs | 4 +- modules/src/clients/ics11_beefy/error.rs | 3 +- modules/src/clients/ics11_beefy/header.rs | 2 +- .../ics02_client/handler/update_client.rs | 43 +-- modules/tests/runner/mod.rs | 1 - relayer-cli/Cargo.toml | 2 +- 9 files changed, 278 insertions(+), 85 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0f10762c7e..1f7213e67f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -170,6 +170,15 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbf56136a5198c7b01a49e3afcbef6cf84597273d298f54432926024107b0109" +[[package]] +name = "async-lock" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e97a171d191782fba31bb902b14ad94e24a68145032b7eedf871ab0bc0d077b6" +dependencies = [ + "event-listener", +] + [[package]] name = "async-recursion" version = "0.3.2" @@ -352,27 +361,30 @@ dependencies = [ [[package]] name = "beefy-generic-client" version = "0.1.0" -source = "git+https://github.com/ComposableFi/beefy-rs?branch=master#156e6b68760464967cec0684ce689ad5244ea560" +source = "git+https://github.com/ComposableFi/beefy-rs?branch=master#14eb83a953cbc55e15aaf0de35b3608a50050413" dependencies = [ "beefy-primitives", "ckb-merkle-mountain-range", + "color-eyre", "derive_more", + "frame-metadata", "frame-support", - "hex-literal", + "hex", + "jsonrpsee 0.15.1", "pallet-beefy-mmr", "pallet-mmr", - "pallet-mmr-rpc", "parity-scale-codec", + "primitives", "rs_merkle", - "serde_json", - "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-core 6.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "sp-core-hashing 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", "sp-io 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", "sp-mmr-primitives", - "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-runtime 6.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-trie 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "subxt", + "subxt-codegen", + "syn", + "tokio", ] [[package]] @@ -394,6 +406,39 @@ dependencies = [ "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", ] +[[package]] +name = "beefy-queries" +version = "0.1.0" +source = "git+https://github.com/ComposableFi/beefy-rs?branch=master#14eb83a953cbc55e15aaf0de35b3608a50050413" +dependencies = [ + "beefy-primitives", + "color-eyre", + "derive_more", + "frame-metadata", + "frame-support", + "hex", + "hex-literal", + "jsonrpsee 0.15.1", + "pallet-beefy-mmr", + "pallet-mmr", + "pallet-mmr-rpc", + "parity-scale-codec", + "primitives", + "rs_merkle", + "serde_json", + "sp-core 6.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-core-hashing 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-io 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-mmr-primitives", + "sp-runtime 6.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-trie 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "subxt", + "subxt-codegen", + "syn", + "tokio", +] + [[package]] name = "bitcoin" version = "0.28.1" @@ -754,9 +799,9 @@ dependencies = [ [[package]] name = "color-eyre" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ebf286c900a6d5867aeff75cfee3192857bb7f24b547d4f0df2ed6baa812c90" +checksum = "5a667583cca8c4f8436db8de46ea8233c42a7d9ae424a82d338f2e4675229204" dependencies = [ "backtrace", "color-spantrace", @@ -817,6 +862,12 @@ dependencies = [ "syn", ] +[[package]] +name = "convert_case" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" + [[package]] name = "core-foundation" version = "0.9.3" @@ -1099,8 +1150,10 @@ version = "0.99.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" dependencies = [ + "convert_case", "proc-macro2", "quote", + "rustc_version", "syn", ] @@ -1324,6 +1377,12 @@ dependencies = [ "version_check", ] +[[package]] +name = "event-listener" +version = "2.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" + [[package]] name = "eyre" version = "0.6.8" @@ -2000,7 +2059,7 @@ dependencies = [ "headers", "http", "hyper", - "hyper-rustls", + "hyper-rustls 0.22.1", "rustls-native-certs 0.5.0", "tokio", "tokio-rustls 0.22.0", @@ -2026,6 +2085,22 @@ dependencies = [ "webpki-roots 0.21.1", ] +[[package]] +name = "hyper-rustls" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d87c48c02e0dc5e3b849a2041db3029fd066650f8f717c07bf8ed78ccb895cac" +dependencies = [ + "http", + "hyper", + "log", + "rustls 0.20.6", + "rustls-native-certs 0.6.2", + "tokio", + "tokio-rustls 0.23.4", + "webpki-roots 0.22.3", +] + [[package]] name = "hyper-timeout" version = "0.4.1" @@ -2044,6 +2119,7 @@ version = "0.15.0" dependencies = [ "beefy-generic-client", "beefy-primitives", + "beefy-queries", "borsh", "bytes", "derive_more", @@ -2055,6 +2131,7 @@ dependencies = [ "num-traits", "parity-scale-codec", "primitive-types", + "primitives", "prost", "prost-types", "ripemd", @@ -2415,16 +2492,6 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "jsonrpsee" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91dc760c341fa81173f9a434931aaf32baad5552b0230cc6c93e8fb7eaad4c19" -dependencies = [ - "jsonrpsee-client-transport", - "jsonrpsee-core 0.10.1", -] - [[package]] name = "jsonrpsee" version = "0.13.1" @@ -2439,16 +2506,59 @@ dependencies = [ "tracing", ] +[[package]] +name = "jsonrpsee" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11e017217fcd18da0a25296d3693153dd19c8a6aadab330b3595285d075385d1" +dependencies = [ + "jsonrpsee-client-transport 0.14.0", + "jsonrpsee-core 0.14.0", +] + +[[package]] +name = "jsonrpsee" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8bd0d559d5e679b1ab2f869b486a11182923863b1b3ee8b421763cdd707b783a" +dependencies = [ + "jsonrpsee-client-transport 0.15.1", + "jsonrpsee-core 0.15.1", + "jsonrpsee-http-client", + "jsonrpsee-types 0.15.1", +] + [[package]] name = "jsonrpsee-client-transport" -version = "0.10.1" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "765f7a36d5087f74e3b3b47805c2188fef8eb54afcb587b078d9f8ebfe9c7220" +checksum = "ce395539a14d3ad4ec1256fde105abd36a2da25d578a291cabe98f45adfdb111" dependencies = [ - "futures", + "futures-util", "http", - "jsonrpsee-core 0.10.1", - "jsonrpsee-types 0.10.1", + "jsonrpsee-core 0.14.0", + "jsonrpsee-types 0.14.0", + "pin-project", + "rustls-native-certs 0.6.2", + "soketto", + "thiserror", + "tokio", + "tokio-rustls 0.23.4", + "tokio-util", + "tracing", + "webpki-roots 0.22.3", +] + +[[package]] +name = "jsonrpsee-client-transport" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8752740ecd374bcbf8b69f3e80b0327942df76f793f8d4e60d3355650c31fb74" +dependencies = [ + "futures-util", + "http", + "jsonrpsee-core 0.15.1", + "jsonrpsee-types 0.15.1", "pin-project", "rustls-native-certs 0.6.2", "soketto", @@ -2462,9 +2572,9 @@ dependencies = [ [[package]] name = "jsonrpsee-core" -version = "0.10.1" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82ef77ecd20c2254d54f5da8c0738eacca61e6b6511268a8f2753e3148c6c706" +checksum = "6e27462b21279edf9a6a91f46ffbe125e9cdc58b901d2e08bf59b31a47d7d0ab" dependencies = [ "anyhow", "arrayvec 0.7.2", @@ -2473,7 +2583,9 @@ dependencies = [ "futures-channel", "futures-util", "hyper", - "jsonrpsee-types 0.10.1", + "jsonrpsee-types 0.13.1", + "parking_lot", + "rand 0.8.5", "rustc-hash", "serde", "serde_json", @@ -2485,27 +2597,68 @@ dependencies = [ [[package]] name = "jsonrpsee-core" -version = "0.13.1" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e27462b21279edf9a6a91f46ffbe125e9cdc58b901d2e08bf59b31a47d7d0ab" +checksum = "16efcd4477de857d4a2195a45769b2fe9ebb54f3ef5a4221d3b014a4fe33ec0b" dependencies = [ "anyhow", - "arrayvec 0.7.2", + "async-lock", "async-trait", "beef", "futures-channel", + "futures-timer", + "futures-util", + "jsonrpsee-types 0.14.0", + "rustc-hash", + "serde", + "serde_json", + "thiserror", + "tokio", + "tracing", +] + +[[package]] +name = "jsonrpsee-core" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3dc3e9cf2ba50b7b1d7d76a667619f82846caa39e8e8daa8a4962d74acaddca" +dependencies = [ + "anyhow", + "async-lock", + "async-trait", + "beef", + "futures-channel", + "futures-timer", "futures-util", "hyper", - "jsonrpsee-types 0.13.1", - "parking_lot", - "rand 0.8.5", + "jsonrpsee-types 0.15.1", "rustc-hash", "serde", "serde_json", - "soketto", "thiserror", "tokio", "tracing", + "tracing-futures", +] + +[[package]] +name = "jsonrpsee-http-client" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52f7c0e2333ab2115c302eeb4f137c8a4af5ab609762df68bbda8f06496677c9" +dependencies = [ + "async-trait", + "hyper", + "hyper-rustls 0.23.0", + "jsonrpsee-core 0.15.1", + "jsonrpsee-types 0.15.1", + "rustc-hash", + "serde", + "serde_json", + "thiserror", + "tokio", + "tracing", + "tracing-futures", ] [[package]] @@ -2541,9 +2694,9 @@ dependencies = [ [[package]] name = "jsonrpsee-types" -version = "0.10.1" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38b6aa52f322cbf20c762407629b8300f39bcc0cf0619840d9252a2f65fd2dd9" +checksum = "8fd11763134104122ddeb0f97e4bbe393058017dfb077db63fbf44b4dd0dd86e" dependencies = [ "anyhow", "beef", @@ -2555,9 +2708,23 @@ dependencies = [ [[package]] name = "jsonrpsee-types" -version = "0.13.1" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fd11763134104122ddeb0f97e4bbe393058017dfb077db63fbf44b4dd0dd86e" +checksum = "3bcf76cd316f5d3ad48138085af1f45e2c58c98e02f0779783dbb034d43f7c86" +dependencies = [ + "anyhow", + "beef", + "serde", + "serde_json", + "thiserror", + "tracing", +] + +[[package]] +name = "jsonrpsee-types" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e290bba767401b646812f608c099b922d8142603c9e73a50fb192d3ac86f4a0d" dependencies = [ "anyhow", "beef", @@ -2716,11 +2883,11 @@ dependencies = [ [[package]] name = "lru" -version = "0.7.7" +version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c84e6fe5655adc6ce00787cf7dcaf8dc4f998a0565d23eafc207a8b08ca3349a" +checksum = "e999beba7b6e8345721bd280141ed958096a2e4abdf74f67ff4ce49b4b54e47a" dependencies = [ - "hashbrown 0.11.2", + "hashbrown 0.12.1", ] [[package]] @@ -3511,6 +3678,20 @@ dependencies = [ "uint", ] +[[package]] +name = "primitives" +version = "0.1.0" +source = "git+https://github.com/ComposableFi/beefy-rs?branch=master#14eb83a953cbc55e15aaf0de35b3608a50050413" +dependencies = [ + "beefy-primitives", + "ckb-merkle-mountain-range", + "parity-scale-codec", + "rs_merkle", + "sp-core 6.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-mmr-primitives", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", +] + [[package]] name = "proc-macro-crate" version = "0.1.5" @@ -3975,6 +4156,15 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" +[[package]] +name = "rustc_version" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" +dependencies = [ + "semver", +] + [[package]] name = "rustls" version = "0.19.1" @@ -5500,17 +5690,16 @@ checksum = "734676eb262c623cec13c3155096e08d1f8f29adce39ba17948b18dad1e54142" [[package]] name = "subxt" -version = "0.21.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ab902b8d1bf5ecdc16c78e1f7fdef77665d5ce77004b2be1f6ac4b4f679d987" +checksum = "e731c0245979a80f9090a89b43635e23f367f13a225695f286f307978db36f11" dependencies = [ "bitvec", "derivative", "frame-metadata", "futures", "hex", - "jsonrpsee 0.10.1", - "log", + "jsonrpsee 0.14.0", "parity-scale-codec", "parking_lot", "scale-info", @@ -5521,13 +5710,14 @@ dependencies = [ "subxt-macro", "subxt-metadata", "thiserror", + "tracing", ] [[package]] name = "subxt-codegen" -version = "0.21.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ac3c4e3565338616f009bc40419f45fa7d9472a5717fa8cce129777c709d1a1" +checksum = "4c9462b52d539cde2e0dbbd1c89d28079459ed790f42218c5bfc9d61c9575e32" dependencies = [ "darling", "frame-metadata", @@ -5543,9 +5733,9 @@ dependencies = [ [[package]] name = "subxt-macro" -version = "0.21.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "078edfe8f06cb00848e2e64e923fe809f345042c3d7ec13edcd7f0e617656a9b" +checksum = "38521809516f4c244b6f38ed13fc67ef6ada29a846fa26123a4206ff743f3461" dependencies = [ "darling", "proc-macro-error", @@ -5555,9 +5745,9 @@ dependencies = [ [[package]] name = "subxt-metadata" -version = "0.21.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2819a10a1a13bd9645419f59ac9d7cc8deb51052566b9d0c2157354ea44513d5" +checksum = "37b01bac35f2524ce590fa1438fb6c81a63df1b4c94f686be391afd8d02615b3" dependencies = [ "frame-metadata", "parity-scale-codec", @@ -5729,7 +5919,7 @@ dependencies = [ "http", "hyper", "hyper-proxy", - "hyper-rustls", + "hyper-rustls 0.22.1", "peg", "pin-project", "serde", diff --git a/modules/Cargo.toml b/modules/Cargo.toml index b8097968dd..936c1af35a 100644 --- a/modules/Cargo.toml +++ b/modules/Cargo.toml @@ -39,7 +39,7 @@ clock = ["tendermint/clock", "time/std"] # This feature grants access to development-time mocking libraries, such as `MockContext` or `MockHeader`. # Depends on the `testgen` suite for generating Tendermint light blocks. mocks = ["tendermint-testgen", "clock", "std", "sha3", "ripemd", "ics11_beefy"] -ics11_beefy = ["sp-io", "sp-core", "sp-std", "beefy-primitives", "sp-runtime", "sp-trie", "pallet-mmr-primitives", "beefy-client"] +ics11_beefy = ["sp-io", "sp-core", "sp-std", "beefy-primitives", "sp-runtime", "sp-trie", "pallet-mmr-primitives", "beefy-client", "beefy-client-primitives"] [dependencies] # Proto definitions for all IBC-related interfaces, e.g., connections or channels. @@ -63,6 +63,7 @@ derive_more = { version = "0.99.17", default-features = false, features = ["from uint = { version = "0.9", default-features = false } beefy-client = { package = "beefy-generic-client", git = "https://github.com/ComposableFi/beefy-rs", branch = "master", default-features = false, optional = true } +beefy-client-primitives = { package = "primitives", git = "https://github.com/ComposableFi/beefy-rs", branch = "master", default-features = false, optional = true } pallet-mmr-primitives = { package = "sp-mmr-primitives", git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.24", default-features = false, optional = true } beefy-primitives = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.24", default-features = false, optional = true } codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } @@ -113,10 +114,11 @@ tendermint-rpc = { git = "https://github.com/composableFi/tendermint-rs", rev = tendermint-testgen = { git = "https://github.com/composableFi/tendermint-rs", rev = "5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8" } # Needed for generating (synthetic) light blocks. # Beefy Light Client testing dependencies sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.24"} -subxt = "0.21.0" +subxt = "0.22.0" tokio = { version = "1.17.0", features = ["full"] } serde_json = "1.0.74" -beefy-client = { package = "beefy-generic-client", git = "https://github.com/ComposableFi/beefy-rs", branch = "master", features = ["mocks"]} +beefy-client = { package = "beefy-generic-client", git = "https://github.com/ComposableFi/beefy-rs", branch = "master" } +beefy-queries = { git = "https://github.com/ComposableFi/beefy-rs", branch = "master" } sha3 = { version = "0.10.1" } ripemd = { version = "0.1.1" } diff --git a/modules/src/clients/host_functions.rs b/modules/src/clients/host_functions.rs index 151077c98a..2ebdeef626 100644 --- a/modules/src/clients/host_functions.rs +++ b/modules/src/clients/host_functions.rs @@ -61,7 +61,7 @@ pub struct HostFunctionsManager(PhantomData); // implementation for beefy host functions #[cfg(any(test, feature = "mocks", feature = "ics11_beefy"))] -impl beefy_client::traits::HostFunctions for HostFunctionsManager +impl beefy_client_primitives::HostFunctions for HostFunctionsManager where T: HostFunctionsProvider, { diff --git a/modules/src/clients/ics11_beefy/client_def.rs b/modules/src/clients/ics11_beefy/client_def.rs index 19af267084..5e2a4f03da 100644 --- a/modules/src/clients/ics11_beefy/client_def.rs +++ b/modules/src/clients/ics11_beefy/client_def.rs @@ -1,6 +1,6 @@ -use beefy_client::primitives::{ParachainHeader, ParachainsUpdateProof}; -use beefy_client::traits::ClientState as LightClientState; use beefy_client::BeefyLightClient; +use beefy_client_primitives::ClientState as LightClientState; +use beefy_client_primitives::{ParachainHeader, ParachainsUpdateProof}; use codec::{Decode, Encode}; use core::fmt::Debug; use pallet_mmr_primitives::BatchProof; diff --git a/modules/src/clients/ics11_beefy/error.rs b/modules/src/clients/ics11_beefy/error.rs index 004960ae7b..6d6c3181e2 100644 --- a/modules/src/clients/ics11_beefy/error.rs +++ b/modules/src/clients/ics11_beefy/error.rs @@ -6,7 +6,6 @@ use crate::core::ics23_commitment::error::Error as Ics23Error; use crate::core::ics24_host::error::ValidationError; use crate::core::ics24_host::identifier::ClientId; use crate::timestamp::{Timestamp, TimestampOverflowError}; -use beefy_client::error::BeefyClientError; use codec::Error as ScaleCodecError; use crate::Height; @@ -175,7 +174,7 @@ define_error! { VerificationError - { reason: BeefyClientError } + { reason: String } | e | { format_args!("verification failed: {:?}", e.reason) }, diff --git a/modules/src/clients/ics11_beefy/header.rs b/modules/src/clients/ics11_beefy/header.rs index b20408a2a4..0d311c53ed 100644 --- a/modules/src/clients/ics11_beefy/header.rs +++ b/modules/src/clients/ics11_beefy/header.rs @@ -9,7 +9,7 @@ use alloc::format; use alloc::string::ToString; use alloc::vec; use alloc::vec::Vec; -use beefy_client::primitives::{ +use beefy_client_primitives::{ BeefyNextAuthoritySet, Hash, MmrUpdateProof, PartialMmrLeaf, SignatureWithAuthorityIndex, SignedCommitment, }; diff --git a/modules/src/core/ics02_client/handler/update_client.rs b/modules/src/core/ics02_client/handler/update_client.rs index 090a404aa3..610f88a796 100644 --- a/modules/src/core/ics02_client/handler/update_client.rs +++ b/modules/src/core/ics02_client/handler/update_client.rs @@ -154,11 +154,8 @@ mod tests { use crate::test_utils::{get_dummy_account_id, Crypto}; use crate::timestamp::Timestamp; use crate::Height; - use beefy_client::NodesUtils; - use beefy_client::{ - runtime, - test_utils::{get_initial_client_state, get_mmr_update, get_parachain_headers}, - }; + use beefy_client_primitives::NodesUtils; + use beefy_queries::ClientWrapper; use codec::{Decode, Encode}; use subxt::rpc::{rpc_params, JsonValue, Subscription, SubscriptionClientT}; @@ -604,13 +601,16 @@ mod tests { .build::() .await .unwrap(); - let api = - client.clone().to_runtime_api::, - >>(); + let client_wrapper = ClientWrapper { + relay_client: client.clone(), + para_client: para_client.clone(), + beefy_activation_block: 0, + para_id: 2000, + }; + let mut count = 0; - let client_state = get_initial_client_state(Some(&api)).await; + let client_state = + ClientWrapper::::get_initial_client_state(Some(&client)).await; let beefy_client_state = BeefyClientState { chain_id: Default::default(), frozen_height: None, @@ -674,15 +674,18 @@ mod tests { ); let block_number = signed_commitment.commitment.block_number; - let (parachain_headers, batch_proof) = get_parachain_headers( - &client, - ¶_client, - block_number, - client_state.latest_beefy_height, - ) - .await; - - let mmr_update = get_mmr_update(&client, signed_commitment.clone()).await; + let (parachain_headers, batch_proof) = client_wrapper + .fetch_finalized_parachain_headers_at( + block_number, + client_state.latest_beefy_height, + ) + .await + .unwrap(); + + let mmr_update = client_wrapper + .fetch_mmr_update_proof_for(signed_commitment.clone()) + .await + .unwrap(); let mmr_size = NodesUtils::new(batch_proof.leaf_count).size(); diff --git a/modules/tests/runner/mod.rs b/modules/tests/runner/mod.rs index 8cf9d2f3cd..96a4e0ce11 100644 --- a/modules/tests/runner/mod.rs +++ b/modules/tests/runner/mod.rs @@ -1,7 +1,6 @@ pub mod step; use alloc::collections::btree_map::BTreeMap as HashMap; -use ibc::test_utils::Crypto; use core::convert::TryInto; use core::fmt::Debug; diff --git a/relayer-cli/Cargo.toml b/relayer-cli/Cargo.toml index e0c89b7eec..9ad50602e4 100644 --- a/relayer-cli/Cargo.toml +++ b/relayer-cli/Cargo.toml @@ -40,7 +40,7 @@ tokio = { version = "1.0", features = ["full"] } tracing = "0.1.34" tracing-subscriber = { version = "0.3.11", features = ["fmt", "env-filter", "json"]} eyre = "0.6.8" -color-eyre = "0.6" +color-eyre = "0.6.2" oneline-eyre = "0.1" futures = "0.3.21" toml = "0.5.9" From b97fcb0a5f6da3c1685f9eb96dd938e6e4cb9ae0 Mon Sep 17 00:00:00 2001 From: Web3 Smith <31099392+Wizdave97@users.noreply.github.com> Date: Wed, 3 Aug 2022 09:01:57 +0100 Subject: [PATCH 65/96] update beefy-client (#39) --- Cargo.lock | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1f7213e67f..dcb17244f6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -361,7 +361,7 @@ dependencies = [ [[package]] name = "beefy-generic-client" version = "0.1.0" -source = "git+https://github.com/ComposableFi/beefy-rs?branch=master#14eb83a953cbc55e15aaf0de35b3608a50050413" +source = "git+https://github.com/ComposableFi/beefy-rs?branch=master#f7a4724053f8dc22c2f75ad47d3fdd345f4230ad" dependencies = [ "beefy-primitives", "ckb-merkle-mountain-range", @@ -378,7 +378,6 @@ dependencies = [ "rs_merkle", "sp-core 6.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "sp-core-hashing 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-io 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", "sp-mmr-primitives", "sp-runtime 6.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", @@ -409,7 +408,7 @@ dependencies = [ [[package]] name = "beefy-queries" version = "0.1.0" -source = "git+https://github.com/ComposableFi/beefy-rs?branch=master#14eb83a953cbc55e15aaf0de35b3608a50050413" +source = "git+https://github.com/ComposableFi/beefy-rs?branch=master#f7a4724053f8dc22c2f75ad47d3fdd345f4230ad" dependencies = [ "beefy-primitives", "color-eyre", @@ -3681,7 +3680,7 @@ dependencies = [ [[package]] name = "primitives" version = "0.1.0" -source = "git+https://github.com/ComposableFi/beefy-rs?branch=master#14eb83a953cbc55e15aaf0de35b3608a50050413" +source = "git+https://github.com/ComposableFi/beefy-rs?branch=master#f7a4724053f8dc22c2f75ad47d3fdd345f4230ad" dependencies = [ "beefy-primitives", "ckb-merkle-mountain-range", From f88119c01284245d1fb5535bfe3ae8cd1d5fdc96 Mon Sep 17 00:00:00 2001 From: Web3 Smith <31099392+Wizdave97@users.noreply.github.com> Date: Wed, 3 Aug 2022 09:10:43 +0100 Subject: [PATCH 66/96] update beefy-client (#40) * update beefy-client * patch inc-rs --- Cargo.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index dcb17244f6..63f84997df 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -361,7 +361,7 @@ dependencies = [ [[package]] name = "beefy-generic-client" version = "0.1.0" -source = "git+https://github.com/ComposableFi/beefy-rs?branch=master#f7a4724053f8dc22c2f75ad47d3fdd345f4230ad" +source = "git+https://github.com/ComposableFi/beefy-rs?branch=master#2593be1c5bd28beed37be52a9b3b0b6c9cb386ea" dependencies = [ "beefy-primitives", "ckb-merkle-mountain-range", @@ -408,7 +408,7 @@ dependencies = [ [[package]] name = "beefy-queries" version = "0.1.0" -source = "git+https://github.com/ComposableFi/beefy-rs?branch=master#f7a4724053f8dc22c2f75ad47d3fdd345f4230ad" +source = "git+https://github.com/ComposableFi/beefy-rs?branch=master#2593be1c5bd28beed37be52a9b3b0b6c9cb386ea" dependencies = [ "beefy-primitives", "color-eyre", @@ -3680,7 +3680,7 @@ dependencies = [ [[package]] name = "primitives" version = "0.1.0" -source = "git+https://github.com/ComposableFi/beefy-rs?branch=master#f7a4724053f8dc22c2f75ad47d3fdd345f4230ad" +source = "git+https://github.com/ComposableFi/beefy-rs?branch=master#2593be1c5bd28beed37be52a9b3b0b6c9cb386ea" dependencies = [ "beefy-primitives", "ckb-merkle-mountain-range", From 40ab161e20a3c87c0a50fa152885cf292b8068e4 Mon Sep 17 00:00:00 2001 From: Web3 Smith <31099392+Wizdave97@users.noreply.github.com> Date: Thu, 4 Aug 2022 12:52:48 +0100 Subject: [PATCH 67/96] Make consensus verification optional in connection handlers (#41) * make consensus verification optional in connection handlers * update beefy client --- Cargo.lock | 13 ++-- modules/src/clients/host_functions.rs | 25 ++++++- modules/src/clients/ics11_beefy/client_def.rs | 2 +- .../clients/ics11_beefy/consensus_state.rs | 73 ++++--------------- modules/src/clients/ics11_beefy/header.rs | 12 +-- .../ics03_connection/handler/conn_open_ack.rs | 6 +- .../ics03_connection/handler/conn_open_try.rs | 6 +- .../ics03_connection/msgs/conn_open_ack.rs | 27 ++++--- .../ics03_connection/msgs/conn_open_try.rs | 28 +++---- modules/src/test_utils.rs | 20 +++++ 10 files changed, 101 insertions(+), 111 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 63f84997df..4f2cec0d8c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -361,7 +361,7 @@ dependencies = [ [[package]] name = "beefy-generic-client" version = "0.1.0" -source = "git+https://github.com/ComposableFi/beefy-rs?branch=master#2593be1c5bd28beed37be52a9b3b0b6c9cb386ea" +source = "git+https://github.com/ComposableFi/beefy-rs?branch=master#6b99ac5459caaa1baf94cafa208fffe8e9dd0b54" dependencies = [ "beefy-primitives", "ckb-merkle-mountain-range", @@ -376,10 +376,10 @@ dependencies = [ "parity-scale-codec", "primitives", "rs_merkle", - "sp-core 6.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", "sp-core-hashing 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", "sp-mmr-primitives", - "sp-runtime 6.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", "subxt-codegen", "syn", @@ -408,7 +408,7 @@ dependencies = [ [[package]] name = "beefy-queries" version = "0.1.0" -source = "git+https://github.com/ComposableFi/beefy-rs?branch=master#2593be1c5bd28beed37be52a9b3b0b6c9cb386ea" +source = "git+https://github.com/ComposableFi/beefy-rs?branch=master#6b99ac5459caaa1baf94cafa208fffe8e9dd0b54" dependencies = [ "beefy-primitives", "color-eyre", @@ -427,7 +427,7 @@ dependencies = [ "serde_json", "sp-core 6.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "sp-core-hashing 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-io 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-io 6.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "sp-mmr-primitives", "sp-runtime 6.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", @@ -3680,10 +3680,11 @@ dependencies = [ [[package]] name = "primitives" version = "0.1.0" -source = "git+https://github.com/ComposableFi/beefy-rs?branch=master#2593be1c5bd28beed37be52a9b3b0b6c9cb386ea" +source = "git+https://github.com/ComposableFi/beefy-rs?branch=master#6b99ac5459caaa1baf94cafa208fffe8e9dd0b54" dependencies = [ "beefy-primitives", "ckb-merkle-mountain-range", + "derive_more", "parity-scale-codec", "rs_merkle", "sp-core 6.0.0 (registry+https://github.com/rust-lang/crates.io-index)", diff --git a/modules/src/clients/host_functions.rs b/modules/src/clients/host_functions.rs index 2ebdeef626..cccb2d02d6 100644 --- a/modules/src/clients/host_functions.rs +++ b/modules/src/clients/host_functions.rs @@ -17,8 +17,7 @@ pub trait HostFunctionsProvider: Clone + Send + Sync + Default { /// Recover the ED25519 pubkey that produced this signature, given a arbitrarily sized message fn ed25519_verify(signature: &[u8; 64], msg: &[u8], pubkey: &[u8]) -> bool; - /// This function should verify membership in a trie proof using parity's sp-trie package - /// with a BlakeTwo256 Hasher + /// This function should verify membership in a trie proof using sp_state_machine's read_child_proof_check fn verify_membership_trie_proof( root: &[u8; 32], proof: &[Vec], @@ -26,14 +25,22 @@ pub trait HostFunctionsProvider: Clone + Send + Sync + Default { value: &[u8], ) -> Result<(), Error>; - /// This function should verify non membership in a trie proof using parity's sp-trie package - /// with a BlakeTwo256 Hasher + /// This function should verify non membership in a trie proof using sp_state_machine's read_child_proof_check fn verify_non_membership_trie_proof( root: &[u8; 32], proof: &[Vec], key: &[u8], ) -> Result<(), Error>; + /// This function should verify membership in a trie proof using parity's sp-trie package + /// with a BlakeTwo256 Hasher + fn verify_timestamp_extrinsic( + root: &[u8; 32], + proof: &[Vec], + key: &[u8], + value: &[u8], + ) -> Result<(), Error>; + /// Conduct a 256-bit Sha2 hash fn sha256_digest(data: &[u8]) -> [u8; 32]; @@ -75,6 +82,16 @@ where ) -> Option> { T::secp256k1_ecdsa_recover_compressed(signature, value) } + + fn verify_timestamp_extrinsic( + root: sp_core::H256, + proof: &[Vec], + key: &[u8], + value: &[u8], + ) -> Result<(), beefy_client_primitives::error::BeefyClientError> { + T::verify_timestamp_extrinsic(root.as_fixed_bytes(), proof, key, value) + .map_err(|_| From::from("Timestamp verification failed".to_string())) + } } // implementation for tendermint functions diff --git a/modules/src/clients/ics11_beefy/client_def.rs b/modules/src/clients/ics11_beefy/client_def.rs index 5e2a4f03da..cc61f63ec2 100644 --- a/modules/src/clients/ics11_beefy/client_def.rs +++ b/modules/src/clients/ics11_beefy/client_def.rs @@ -152,7 +152,7 @@ impl ClientDef for BeefyClient(header)?), + AnyConsensusState::Beefy(ConsensusState::from_header(header)?), )) } diff --git a/modules/src/clients/ics11_beefy/consensus_state.rs b/modules/src/clients/ics11_beefy/consensus_state.rs index 844523efd3..debaeb1130 100644 --- a/modules/src/clients/ics11_beefy/consensus_state.rs +++ b/modules/src/clients/ics11_beefy/consensus_state.rs @@ -7,7 +7,6 @@ use tendermint::time::Time; use tendermint_proto::google::protobuf as tpb; use tendermint_proto::Protobuf; -use crate::clients::host_functions::HostFunctionsProvider; use ibc_proto::ibc::lightclients::beefy::v1::ConsensusState as RawConsensusState; use crate::clients::ics11_beefy::error::Error; @@ -16,8 +15,6 @@ use crate::core::ics02_client::client_consensus::AnyConsensusState; use crate::core::ics02_client::client_type::ClientType; use crate::core::ics23_commitment::commitment::CommitmentRoot; -// This is a constant that comes from pallet-ibc -pub const IBC_CONSENSUS_ID: [u8; 4] = *b"/IBC"; #[derive(Clone, Debug, PartialEq, Eq, Serialize)] pub struct ConsensusState { pub timestamp: Time, @@ -32,34 +29,26 @@ impl ConsensusState { } } - #[cfg(not(test))] - pub fn from_header( - header: ParachainHeader, - ) -> Result { + pub fn from_header(header: ParachainHeader) -> Result { use crate::clients::ics11_beefy::header::decode_timestamp_extrinsic; use crate::timestamp::Timestamp; use sp_runtime::SaturatedConversion; - let root = { - header - .parachain_header - .digest - .logs - .iter() - .filter_map(|digest| digest.as_consensus()) - .find(|(id, _value)| id == &IBC_CONSENSUS_ID) - .map(|(.., root)| root.to_vec()) - .ok_or_else(|| { - Error::invalid_header("cannot find ibc commitment root".to_string()) - })? - }; - - let timestamp = decode_timestamp_extrinsic::(&header)?; + let root = header.parachain_header.state_root.0.to_vec(); + + let timestamp = decode_timestamp_extrinsic(&header)?; let duration = core::time::Duration::from_millis(timestamp); let timestamp = Timestamp::from_nanoseconds(duration.as_nanos().saturated_into::()) - .unwrap_or_default() + .map_err(|e| { + Error::invalid_header(format!( + "Failed to decode timestamp extrinsic, got {}", + e.to_string() + )) + })? .into_tm_time() .ok_or_else(|| { - Error::invalid_header("cannot decode timestamp extrinsic".to_string()) + Error::invalid_header( + "Error decoding Timestamp, timestamp cannot be zero".to_string(), + ) })?; Ok(Self { @@ -67,42 +56,6 @@ impl ConsensusState { timestamp, }) } - - #[cfg(test)] - /// Leaving this here because there's no ibc commitment root in the runtime header that will be used in - /// testing - pub fn from_header( - header: ParachainHeader, - ) -> Result { - use crate::clients::ics11_beefy::header::decode_timestamp_extrinsic; - use crate::timestamp::Timestamp; - use sp_runtime::SaturatedConversion; - let root = { - header - .parachain_header - .digest - .logs - .iter() - .filter_map(|digest| digest.as_consensus()) - .find(|(id, _value)| id == &IBC_CONSENSUS_ID) - .map(|(.., root)| root.to_vec()) - .unwrap_or_default() - }; - - let timestamp = decode_timestamp_extrinsic::(&header)?; - let duration = core::time::Duration::from_millis(timestamp); - let timestamp = Timestamp::from_nanoseconds(duration.as_nanos().saturated_into::()) - .unwrap_or_default() - .into_tm_time() - .ok_or(Error::invalid_header( - "cannot decode timestamp extrinsic".to_string(), - ))?; - - Ok(Self { - root: root.into(), - timestamp, - }) - } } impl crate::core::ics02_client::client_consensus::ConsensusState for ConsensusState { diff --git a/modules/src/clients/ics11_beefy/header.rs b/modules/src/clients/ics11_beefy/header.rs index 0d311c53ed..fee50b50e3 100644 --- a/modules/src/clients/ics11_beefy/header.rs +++ b/modules/src/clients/ics11_beefy/header.rs @@ -1,11 +1,9 @@ use prost::Message; use tendermint_proto::Protobuf; -use crate::clients::host_functions::HostFunctionsProvider; use crate::clients::ics11_beefy::error::Error; use crate::core::ics02_client::client_type::ClientType; use crate::core::ics02_client::header::AnyHeader; -use alloc::format; use alloc::string::ToString; use alloc::vec; use alloc::vec::Vec; @@ -395,18 +393,10 @@ pub fn decode_header(buf: B) -> Result { } /// Attempt to extract the timestamp extrinsic from the parachain header -pub fn decode_timestamp_extrinsic( - header: &ParachainHeader, -) -> Result { - let proof = &*header.extrinsic_proof; +pub fn decode_timestamp_extrinsic(header: &ParachainHeader) -> Result { let ext = &*header.timestamp_extrinsic; - let extrinsic_root = header.parachain_header.extrinsics_root; - // Timestamp extrinsic should be the first inherent and hence the first extrinsic // https://github.com/paritytech/substrate/blob/d602397a0bbb24b5d627795b797259a44a5e29e9/primitives/trie/src/lib.rs#L99-L101 - let key = codec::Encode::encode(&Compact(0u32)); - HostFunctions::verify_membership_trie_proof(extrinsic_root.as_fixed_bytes(), proof, &*key, ext) - .map_err(|e| Error::timestamp_extrinsic(format!("Proof Verification failed {:?}", e)))?; // Decoding from the [2..] because the timestamp inmherent has two extra bytes before the call that represents the // call length and the extrinsic version. let (_, _, timestamp): (u8, u8, Compact) = codec::Decode::decode(&mut &ext[2..]) diff --git a/modules/src/core/ics03_connection/handler/conn_open_ack.rs b/modules/src/core/ics03_connection/handler/conn_open_ack.rs index dbfb34f598..a157ca212f 100644 --- a/modules/src/core/ics03_connection/handler/conn_open_ack.rs +++ b/modules/src/core/ics03_connection/handler/conn_open_ack.rs @@ -20,8 +20,10 @@ pub(crate) fn process( ) -> HandlerResult { let mut output = HandlerOutput::builder(); - // Check the client's (consensus state) proof height. - check_client_consensus_height(ctx, msg.consensus_height())?; + // Check the client's (consensus state) proof height if it consensus proof is provided + if msg.proofs.consensus_proof().is_some() { + check_client_consensus_height(ctx, msg.consensus_height())?; + } // Validate the connection end. let mut conn_end = ctx.connection_end(&msg.connection_id)?; diff --git a/modules/src/core/ics03_connection/handler/conn_open_try.rs b/modules/src/core/ics03_connection/handler/conn_open_try.rs index 7ac8de4675..cd138a2e05 100644 --- a/modules/src/core/ics03_connection/handler/conn_open_try.rs +++ b/modules/src/core/ics03_connection/handler/conn_open_try.rs @@ -21,8 +21,10 @@ pub(crate) fn process( ) -> HandlerResult { let mut output = HandlerOutput::builder(); - // Check that consensus height (for client proof) in message is not too advanced nor too old. - check_client_consensus_height(ctx, msg.consensus_height())?; + // Check that consensus height if provided (for client proof) in message is not too advanced nor too old. + if msg.proofs.consensus_proof().is_some() { + check_client_consensus_height(ctx, msg.consensus_height())?; + } // Unwrap the old connection end (if any) and its identifier. let (mut new_connection_end, conn_id) = match &msg.previous_connection_id { diff --git a/modules/src/core/ics03_connection/msgs/conn_open_ack.rs b/modules/src/core/ics03_connection/msgs/conn_open_ack.rs index 60a42712ff..f95f7c3317 100644 --- a/modules/src/core/ics03_connection/msgs/conn_open_ack.rs +++ b/modules/src/core/ics03_connection/msgs/conn_open_ack.rs @@ -56,17 +56,20 @@ impl TryFrom for MsgConnectionOpenAck { type Error = Error; fn try_from(msg: RawMsgConnectionOpenAck) -> Result { - let consensus_height = msg - .consensus_height - .ok_or_else(Error::missing_consensus_height)? - .into(); - let consensus_proof_obj = ConsensusProof::new( - msg.proof_consensus - .try_into() - .map_err(Error::invalid_proof)?, - consensus_height, - ) - .map_err(Error::invalid_proof)?; + let consensus_proof_obj = { + let proof_bytes: Option = msg.proof_consensus.try_into().ok(); + let consensus_height = msg + .consensus_height + .map(|height| Height::new(height.revision_number, height.revision_height)); + if proof_bytes.is_some() && consensus_height.is_some() { + Some( + ConsensusProof::new(proof_bytes.unwrap(), consensus_height.unwrap()) + .map_err(Error::invalid_proof)?, + ) + } else { + None + } + }; let proof_height = msg .proof_height @@ -94,7 +97,7 @@ impl TryFrom for MsgConnectionOpenAck { proofs: Proofs::new( msg.proof_try.try_into().map_err(Error::invalid_proof)?, Some(client_proof), - Option::from(consensus_proof_obj), + consensus_proof_obj, None, proof_height, ) diff --git a/modules/src/core/ics03_connection/msgs/conn_open_try.rs b/modules/src/core/ics03_connection/msgs/conn_open_try.rs index 4709733489..f3593b3810 100644 --- a/modules/src/core/ics03_connection/msgs/conn_open_try.rs +++ b/modules/src/core/ics03_connection/msgs/conn_open_try.rs @@ -73,18 +73,20 @@ impl TryFrom for MsgConnectionOpenTry { .transpose() .map_err(Error::invalid_identifier)?; - let consensus_height = msg - .consensus_height - .ok_or_else(Error::missing_consensus_height)? - .into(); - - let consensus_proof_obj = ConsensusProof::new( - msg.proof_consensus - .try_into() - .map_err(Error::invalid_proof)?, - consensus_height, - ) - .map_err(Error::invalid_proof)?; + let consensus_proof_obj = { + let proof_bytes: Option = msg.proof_consensus.try_into().ok(); + let consensus_height = msg + .consensus_height + .map(|height| Height::new(height.revision_number, height.revision_height)); + if proof_bytes.is_some() && consensus_height.is_some() { + Some( + ConsensusProof::new(proof_bytes.unwrap(), consensus_height.unwrap()) + .map_err(Error::invalid_proof)?, + ) + } else { + None + } + }; let proof_height = msg .proof_height @@ -120,7 +122,7 @@ impl TryFrom for MsgConnectionOpenTry { proofs: Proofs::new( msg.proof_init.try_into().map_err(Error::invalid_proof)?, Some(client_proof), - Some(consensus_proof_obj), + consensus_proof_obj, None, proof_height, ) diff --git a/modules/src/test_utils.rs b/modules/src/test_utils.rs index 41577be86c..3281a0045c 100644 --- a/modules/src/test_utils.rs +++ b/modules/src/test_utils.rs @@ -191,6 +191,26 @@ impl HostFunctionsProvider for Crypto { res.copy_from_slice(&hash); res } + + fn verify_timestamp_extrinsic( + root: &[u8; 32], + proof: &[Vec], + key: &[u8], + value: &[u8], + ) -> Result<(), Ics02Error> { + let root = sp_core::H256::from_slice(root); + sp_io::trie::blake2_256_verify_proof( + root, + proof, + key, + value, + sp_core::storage::StateVersion::V0, + ) + .then(|| ()) + .ok_or_else(|| { + Ics02Error::implementation_specific("timestamp verification failed".to_string()) + }) + } } impl Ics20Keeper for DummyTransferModule { From d8dd882ef4338d6fea2ee4cafb37c25e89f45828 Mon Sep 17 00:00:00 2001 From: Seun Lanlege Date: Thu, 4 Aug 2022 17:20:08 +0100 Subject: [PATCH 68/96] bump beefy-rs --- Cargo.lock | 424 +++++++++++++++++++++++---------------------- modules/Cargo.toml | 11 +- 2 files changed, 220 insertions(+), 215 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4f2cec0d8c..280550ca71 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -34,7 +34,7 @@ dependencies = [ "toml", "tracing", "tracing-log", - "tracing-subscriber 0.3.14", + "tracing-subscriber 0.3.15", "wait-timeout", ] @@ -118,9 +118,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.58" +version = "1.0.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb07d2053ccdbe10e2af2995a2f116c1330396493dc1269f6a91d0ae82e19704" +checksum = "c91f1f46651137be86f3a2b9a8359f9ab421d04d941c62b5982e1ca21113adf9" [[package]] name = "approx" @@ -133,9 +133,9 @@ dependencies = [ [[package]] name = "arc-swap" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5d78ce20460b82d3fa150275ed9d55e21064fc7951177baacf86a145c4a4b1f" +checksum = "983cd8b9d4b02a6dc6ffa557262eb5858a27a0038ffffe21a0f133eaa819a164" [[package]] name = "arrayref" @@ -213,9 +213,9 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.56" +version = "0.1.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96cf8829f67d2eab0b2dfa42c5d0ef737e0724e4a82b01b3e292456202b19716" +checksum = "76464446b8bc32758d7e88ee1a804d9914cd9b1cb264c029899680b0be29826f" dependencies = [ "proc-macro2", "quote", @@ -257,9 +257,9 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "axum" -version = "0.5.11" +version = "0.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2cc6e8e8c993cb61a005fab8c1e5093a29199b7253b05a6883999312935c1ff" +checksum = "6b9496f0c1d1afb7a2af4338bbe1d969cddfead41d87a9fb3aaa6d0bbc7af648" dependencies = [ "async-trait", "axum-core", @@ -269,7 +269,7 @@ dependencies = [ "http", "http-body", "hyper", - "itoa 1.0.2", + "itoa 1.0.3", "matchit", "memchr", "mime", @@ -286,9 +286,9 @@ dependencies = [ [[package]] name = "axum-core" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf4d047478b986f14a13edad31a009e2e05cb241f9805d0d75e4cba4e129ad4d" +checksum = "e4f44a0e6200e9d11a1cdc989e4b358f6e3d354fbf48478f345a17f4e43f8635" dependencies = [ "async-trait", "bytes", @@ -300,9 +300,9 @@ dependencies = [ [[package]] name = "backtrace" -version = "0.3.65" +version = "0.3.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11a17d453482a265fd5f8479f2a3f405566e6ca627837aaddb85af8b1ab8ef61" +checksum = "cab84319d616cfb654d03394f38ab7e6f0919e181b1b57e1fd15e7fb4077d9a7" dependencies = [ "addr2line", "cc", @@ -361,7 +361,7 @@ dependencies = [ [[package]] name = "beefy-generic-client" version = "0.1.0" -source = "git+https://github.com/ComposableFi/beefy-rs?branch=master#6b99ac5459caaa1baf94cafa208fffe8e9dd0b54" +source = "git+https://github.com/ComposableFi/beefy-rs?rev=6b99ac5459caaa1baf94cafa208fffe8e9dd0b54#6b99ac5459caaa1baf94cafa208fffe8e9dd0b54" dependencies = [ "beefy-primitives", "ckb-merkle-mountain-range", @@ -408,7 +408,7 @@ dependencies = [ [[package]] name = "beefy-queries" version = "0.1.0" -source = "git+https://github.com/ComposableFi/beefy-rs?branch=master#6b99ac5459caaa1baf94cafa208fffe8e9dd0b54" +source = "git+https://github.com/ComposableFi/beefy-rs?rev=6b99ac5459caaa1baf94cafa208fffe8e9dd0b54#6b99ac5459caaa1baf94cafa208fffe8e9dd0b54" dependencies = [ "beefy-primitives", "color-eyre", @@ -467,9 +467,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitvec" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1489fcb93a5bb47da0462ca93ad252ad6af2145cce58d10d46a83931ba9f016b" +checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" dependencies = [ "funty", "radium", @@ -515,7 +515,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" dependencies = [ "block-padding 0.2.1", - "generic-array 0.14.5", + "generic-array 0.14.6", ] [[package]] @@ -524,7 +524,7 @@ version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0bf7fe51849ea569fd452f37822f606a5cabb684dc918707a0193fd4664ff324" dependencies = [ - "generic-array 0.14.5", + "generic-array 0.14.6", ] [[package]] @@ -659,9 +659,9 @@ checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" -version = "1.1.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4872d67bab6358e59559027aa3b9157c53d9358c51423c17554809a8858e0f8" +checksum = "ec8a7b6a70fde80372154c65702f00a0f56f3e1c36abbc6c440484be248856db" dependencies = [ "serde", ] @@ -723,13 +723,14 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chrono" -version = "0.4.19" +version = "0.4.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73" +checksum = "6127248204b9aba09a362f6c930ef6a78f2c1b2215f8a7b398c06e1083f17af0" dependencies = [ - "libc", + "js-sys", "num-integer", "num-traits", + "wasm-bindgen", "winapi", ] @@ -750,9 +751,9 @@ dependencies = [ [[package]] name = "clap" -version = "3.2.8" +version = "3.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "190814073e85d238f31ff738fcb0bf6910cedeb73376c87cd69291028966fd83" +checksum = "a3dbbb6653e7c55cc8595ad3e1f7be8f32aba4eb7ff7f0fd1163d4f3d137c0a9" dependencies = [ "atty", "bitflags", @@ -776,9 +777,9 @@ dependencies = [ [[package]] name = "clap_derive" -version = "3.2.7" +version = "3.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "759bf187376e1afa7b85b959e6a664a3e7a95203415dba952ad19139e798f902" +checksum = "9ba52acd3b0a5c33aeada5cdaa3267cdc7c594a98731d4268cdc1532f4264cb4" dependencies = [ "heck", "proc-macro-error", @@ -825,14 +826,13 @@ dependencies = [ [[package]] name = "console" -version = "0.15.0" +version = "0.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a28b32d32ca44b70c3e4acd7db1babf555fa026e385fb95f18028f88848b3c31" +checksum = "89eab4d20ce20cea182308bca13088fecea9c05f6776cf287205d41a0ed3c847" dependencies = [ "encode_unicode", "libc", "once_cell", - "regex", "terminal_size", "unicode-width", "winapi", @@ -903,23 +903,23 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.5" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c02a4d71819009c192cf4872265391563fd6a84c81ff2c0f2a7026ca4c1d85c" +checksum = "c2dd04ddaf88237dc3b8d8f9a3c1004b506b54b3313403944054d23c0870c521" dependencies = [ "cfg-if 1.0.0", - "crossbeam-utils 0.8.10", + "crossbeam-utils 0.8.11", ] [[package]] name = "crossbeam-deque" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6455c0ca19f0d2fbf751b908d5c55c1f5cbc65e03c4225427254b46890bdde1e" +checksum = "715e8152b692bba2d374b53d4875445368fdf21a94751410af607a5ac677d1fc" dependencies = [ "cfg-if 1.0.0", - "crossbeam-epoch 0.9.9", - "crossbeam-utils 0.8.10", + "crossbeam-epoch 0.9.10", + "crossbeam-utils 0.8.11", ] [[package]] @@ -939,13 +939,13 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.9" +version = "0.9.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07db9d94cbd326813772c968ccd25999e5f8ae22f4f8d1b11effa37ef6ce281d" +checksum = "045ebe27666471bb549370b4b0b3e51b07f56325befa4284db65fc89c02511b1" dependencies = [ "autocfg", "cfg-if 1.0.0", - "crossbeam-utils 0.8.10", + "crossbeam-utils 0.8.11", "memoffset 0.6.5", "once_cell", "scopeguard", @@ -964,9 +964,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.10" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d82ee10ce34d7bc12c2122495e7593a9c41347ecdd64185af4ecf72cb1a7f83" +checksum = "51887d4adc7b564537b15adcfb307936f8075dfcd5f00dde9a9f1d29383682bc" dependencies = [ "cfg-if 1.0.0", "once_cell", @@ -984,7 +984,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "03c6a1d5fa1de37e071642dfa44ec552ca5b299adb128fab16138e24b548fd21" dependencies = [ - "generic-array 0.14.5", + "generic-array 0.14.6", "rand_core 0.6.3", "subtle", "zeroize", @@ -992,11 +992,11 @@ dependencies = [ [[package]] name = "crypto-common" -version = "0.1.4" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5999502d32b9c48d492abe66392408144895020ec4709e549e840799f3bb74c0" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ - "generic-array 0.14.5", + "generic-array 0.14.6", "typenum", ] @@ -1006,7 +1006,7 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b584a330336237c1eecd3e94266efb216c56ed91225d634cb2991c5f3fd1aeab" dependencies = [ - "generic-array 0.14.5", + "generic-array 0.14.6", "subtle", ] @@ -1016,7 +1016,7 @@ version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b1d1a86f49236c215f271d40892d5fc950490551400b02ef360692c29815c714" dependencies = [ - "generic-array 0.14.5", + "generic-array 0.14.6", "subtle", ] @@ -1158,9 +1158,9 @@ dependencies = [ [[package]] name = "dialoguer" -version = "0.10.1" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8c8ae48e400addc32a8710c8d62d55cb84249a7d58ac4cd959daecfbaddc545" +checksum = "a92e7e37ecef6857fdc0c0c5d42fd5b0938e46590c2183cc92dd310a6d078eb1" dependencies = [ "console", "tempfile", @@ -1182,7 +1182,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" dependencies = [ - "generic-array 0.14.5", + "generic-array 0.14.6", ] [[package]] @@ -1266,9 +1266,9 @@ dependencies = [ [[package]] name = "dyn-clone" -version = "1.0.6" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "140206b78fb2bc3edbcfc9b5ccbd0b30699cfe8d348b8b31b330e47df5291a5a" +checksum = "4f94fa09c2aeea5b8839e414b7b841bf429fd25b9c522116ac97ee87856d88b2" [[package]] name = "ecdsa" @@ -1334,7 +1334,7 @@ dependencies = [ "crypto-bigint", "der", "ff", - "generic-array 0.14.5", + "generic-array 0.14.6", "group", "rand_core 0.6.3", "sec1", @@ -1400,9 +1400,9 @@ checksum = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed" [[package]] name = "fastrand" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3fcf0cee53519c866c09b5de1f6c56ff9d647101f81c1964fa632e148896cdf" +checksum = "a7a407cfaa3385c4ae6b23e84623d48c2798d06e3e6a1878f7f59f17b3f86499" dependencies = [ "instant", ] @@ -1464,14 +1464,14 @@ dependencies = [ [[package]] name = "flume" -version = "0.10.13" +version = "0.10.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ceeb589a3157cac0ab8cc585feb749bd2cea5cb55a6ee802ad72d9fd38303da" +checksum = "1657b4441c3403d9f7b3409e47575237dac27b1b5726df654a6ecbf92f0f7577" dependencies = [ "futures-core", "futures-sink", "pin-project", - "spin 0.9.3", + "spin 0.9.4", ] [[package]] @@ -1572,7 +1572,7 @@ version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" dependencies = [ "frame-support-procedural-tools-derive", - "proc-macro-crate 1.1.3", + "proc-macro-crate 1.2.0", "proc-macro2", "quote", "syn", @@ -1724,9 +1724,9 @@ dependencies = [ [[package]] name = "generic-array" -version = "0.14.5" +version = "0.14.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd48d33ec7f05fbfa152300fdad764757cbded343c1aa1cff2fbaf4134851803" +checksum = "bff49e947297f3312447abdca79f45f4738097cc82b06e72054d2223f601f1b9" dependencies = [ "typenum", "version_check", @@ -1760,9 +1760,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.26.1" +version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78cc372d058dcf6d5ecd98510e7fbc9e5aec4d21de70f65fea8fecebcd881bd4" +checksum = "22030e2c5a68ec659fde1e949a745124b48e6fa8b045b7ed5bd1fe4ccc5c4e5d" [[package]] name = "glob" @@ -1868,9 +1868,9 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.12.1" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db0d4cf898abf0081f964436dc980e96670a0f36863e4b83aaacdb65c9d7ccc3" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" dependencies = [ "ahash", ] @@ -1963,7 +1963,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "17ea0a1394df5b6574da6e0c1ade9e78868c9fb0a4e5ef4428e32da4676b85b1" dependencies = [ "digest 0.9.0", - "generic-array 0.14.5", + "generic-array 0.14.6", "hmac 0.8.1", ] @@ -1975,7 +1975,7 @@ checksum = "75f43d41e26995c17e71ee126451dd3941010b0514a81a9d11f3b341debc2399" dependencies = [ "bytes", "fnv", - "itoa 1.0.2", + "itoa 1.0.3", ] [[package]] @@ -2025,9 +2025,9 @@ dependencies = [ [[package]] name = "hyper" -version = "0.14.19" +version = "0.14.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42dc3c131584288d375f2d07f822b0cb012d8c6fb899a5b9fdb3cb7eb9b6004f" +checksum = "02c929dc5c39e335a03c405292728118860721b10190d98c2a0f0efd5baafbac" dependencies = [ "bytes", "futures-channel", @@ -2038,7 +2038,7 @@ dependencies = [ "http-body", "httparse", "httpdate", - "itoa 1.0.2", + "itoa 1.0.3", "pin-project-lite", "socket2", "tokio", @@ -2097,7 +2097,7 @@ dependencies = [ "rustls-native-certs 0.6.2", "tokio", "tokio-rustls 0.23.4", - "webpki-roots 0.22.3", + "webpki-roots 0.22.4", ] [[package]] @@ -2139,7 +2139,7 @@ dependencies = [ "serde_derive", "serde_json", "sha2 0.10.2", - "sha3 0.10.1", + "sha3 0.10.2", "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", "sp-io 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", "sp-mmr-primitives", @@ -2157,7 +2157,7 @@ dependencies = [ "time", "tokio", "tracing", - "tracing-subscriber 0.3.14", + "tracing-subscriber 0.3.15", "uint", ] @@ -2248,7 +2248,7 @@ dependencies = [ "toml", "tonic", "tracing", - "tracing-subscriber 0.3.14", + "tracing-subscriber 0.3.15", "uuid 1.1.2", ] @@ -2292,7 +2292,7 @@ dependencies = [ "tokio", "toml", "tracing", - "tracing-subscriber 0.3.14", + "tracing-subscriber 0.3.15", ] [[package]] @@ -2354,7 +2354,7 @@ dependencies = [ "tokio", "toml", "tracing", - "tracing-subscriber 0.3.14", + "tracing-subscriber 0.3.15", ] [[package]] @@ -2431,7 +2431,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "10a35a97730320ffe8e2d410b5d3b69279b98d2c14bdb8b70ea89ecf7888d41e" dependencies = [ "autocfg", - "hashbrown 0.12.1", + "hashbrown 0.12.3", ] [[package]] @@ -2478,15 +2478,15 @@ checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" [[package]] name = "itoa" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "112c678d4050afce233f4f2852bb2eb519230b3cf12f33585275537d7e41578d" +checksum = "6c8af84674fe1f223a982c933a0ee1086ac4d4052aa0fb8060c12c6ad838e754" [[package]] name = "js-sys" -version = "0.3.58" +version = "0.3.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3fac17f7123a73ca62df411b1bf727ccc805daa070338fda671c86dac1bdc27" +checksum = "258451ab10b34f8af53416d1fdab72c22e805f0c92a1136d59470ec0b11138b2" dependencies = [ "wasm-bindgen", ] @@ -2545,7 +2545,7 @@ dependencies = [ "tokio-rustls 0.23.4", "tokio-util", "tracing", - "webpki-roots 0.22.3", + "webpki-roots 0.22.4", ] [[package]] @@ -2566,7 +2566,7 @@ dependencies = [ "tokio-rustls 0.23.4", "tokio-util", "tracing", - "webpki-roots 0.22.3", + "webpki-roots 0.22.4", ] [[package]] @@ -2685,7 +2685,7 @@ version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b8d7f449cab3b747f12c3efc27f5cad537f3b597c6a3838b0fac628f4bf730a" dependencies = [ - "proc-macro-crate 1.1.3", + "proc-macro-crate 1.2.0", "proc-macro2", "quote", "syn", @@ -2787,21 +2787,21 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.126" +version = "0.2.127" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "349d5a591cd28b49e1d1037471617a32ddcda5731b99419008085f72d5a53836" +checksum = "505e71a4706fa491e9b1b55f51b95d4037d0821ee40131190475f692b35b009b" [[package]] name = "libm" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33a33a362ce288760ec6a508b94caaec573ae7d3bbbd91b87aa0bad4456839db" +checksum = "da83a57f3f5ba3680950aa3cbc806fc297bc0b289d42e8942ed528ace71b8145" [[package]] name = "libsecp256k1" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0452aac8bab02242429380e9b2f94ea20cea2b37e2c1777a1358799bbe97f37" +checksum = "95b09eff1b35ed3b33b877ced3a691fc7a481919c7e29c53c906226fcf55e2a1" dependencies = [ "arrayref", "base64", @@ -2886,7 +2886,7 @@ version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e999beba7b6e8345721bd280141ed958096a2e4abdf74f67ff4ce49b4b54e47a" dependencies = [ - "hashbrown 0.12.1", + "hashbrown 0.12.3", ] [[package]] @@ -2974,7 +2974,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6566c70c1016f525ced45d7b7f97730a2bafb037c788211d0c186ef5b2189f0a" dependencies = [ "hash-db", - "hashbrown 0.12.1", + "hashbrown 0.12.3", "parity-util-mem", ] @@ -3066,7 +3066,7 @@ dependencies = [ "tempfile", "thiserror", "tracing", - "tracing-subscriber 0.3.14", + "tracing-subscriber 0.3.15", "ureq", ] @@ -3078,7 +3078,7 @@ checksum = "975fa04238144061e7f8df9746b2e9cd93ef85881da5548d842a7c6a4b614415" dependencies = [ "crossbeam-channel", "crossbeam-epoch 0.8.2", - "crossbeam-utils 0.8.10", + "crossbeam-utils 0.8.11", "num_cpus", "once_cell", "parking_lot", @@ -3283,9 +3283,9 @@ dependencies = [ [[package]] name = "object" -version = "0.28.4" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e42c982f2d955fac81dd7e1d0e1426a7d702acd9c98d19ab01083a6a0328c424" +checksum = "21158b2c33aa6d4561f1c0a6ea283ca92bc54802a93b263e910746d679a7eb53" dependencies = [ "memchr", ] @@ -3357,9 +3357,9 @@ dependencies = [ [[package]] name = "os_str_bytes" -version = "6.1.0" +version = "6.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21326818e99cfe6ce1e524c2a805c189a99b5ae555a35d19f9a284b427d86afa" +checksum = "648001efe5d5c0102d8cea768e348da85d90af8ba91f0bea908f157951493cd4" [[package]] name = "owo-colors" @@ -3497,7 +3497,7 @@ version = "3.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9299338969a3d2f491d65f140b00ddec470858402f888af98e8642fb5e8965cd" dependencies = [ - "proc-macro-crate 1.1.3", + "proc-macro-crate 1.2.0", "proc-macro2", "quote", "syn", @@ -3510,7 +3510,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c32561d248d352148124f036cac253a644685a21dc9fea383eb4907d7bd35a8f" dependencies = [ "cfg-if 1.0.0", - "hashbrown 0.12.1", + "hashbrown 0.12.3", "impl-trait-for-tuples", "parity-util-mem-derive", "parking_lot", @@ -3560,9 +3560,9 @@ dependencies = [ [[package]] name = "paste" -version = "1.0.7" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c520e05135d6e763148b6426a837e239041653ba7becd2e538c076c738025fc" +checksum = "9423e2b32f7a043629287a536f21951e8c6a82482d0acb1eeebfc90bc2225b22" [[package]] name = "pbkdf2" @@ -3680,7 +3680,7 @@ dependencies = [ [[package]] name = "primitives" version = "0.1.0" -source = "git+https://github.com/ComposableFi/beefy-rs?branch=master#6b99ac5459caaa1baf94cafa208fffe8e9dd0b54" +source = "git+https://github.com/ComposableFi/beefy-rs?rev=6b99ac5459caaa1baf94cafa208fffe8e9dd0b54#6b99ac5459caaa1baf94cafa208fffe8e9dd0b54" dependencies = [ "beefy-primitives", "ckb-merkle-mountain-range", @@ -3703,10 +3703,11 @@ dependencies = [ [[package]] name = "proc-macro-crate" -version = "1.1.3" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e17d47ce914bf4de440332250b0edd23ce48c005f59fab39d3335866b114f11a" +checksum = "26d50bfb8c23f23915855a00d98b5a35ef2e0b871bb52937bacadb798fbb66c8" dependencies = [ + "once_cell", "thiserror", "toml", ] @@ -3737,9 +3738,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.40" +version = "1.0.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd96a1e8ed2596c337f8eae5f24924ec83f5ad5ab21ea8e455d3566c69fbcaf7" +checksum = "0a2ca2c61bc9f3d74d2886294ab7b9853abd9c1ad903a3ac7815c58989bb7bab" dependencies = [ "unicode-ident", ] @@ -3800,9 +3801,9 @@ checksum = "cf7e6d18738ecd0902d30d1ad232c9125985a3422929b16c65517b38adc14f96" [[package]] name = "pulldown-cmark" -version = "0.9.1" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34f197a544b0c9ab3ae46c359a7ec9cbbb5c7bf97054266fecb7ead794a181d6" +checksum = "2d9cc634bc78768157b5cbfe988ffcd1dcba95cd2b2f03a88316c08c6d00ed63" dependencies = [ "bitflags", "memchr", @@ -3811,11 +3812,11 @@ dependencies = [ [[package]] name = "quanta" -version = "0.10.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bafd74c340a0a7e79415981ede3460df16b530fd071541901a57416eea950b17" +checksum = "b7e31331286705f455e56cca62e0e717158474ff02b7936c1fa596d983f4ae27" dependencies = [ - "crossbeam-utils 0.8.10", + "crossbeam-utils 0.8.11", "libc", "mach", "once_cell", @@ -3833,9 +3834,9 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quote" -version = "1.0.20" +version = "1.0.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3bcdf212e9776fbcb2d23ab029360416bb1706b1aea2d1a5ba002727cbcab804" +checksum = "bbe448f377a7d6961e30f5955f9b8d106c3f5e449d493ee1b125c1d43c2b5179" dependencies = [ "proc-macro2", ] @@ -3939,9 +3940,9 @@ dependencies = [ [[package]] name = "raw-cpuid" -version = "10.3.0" +version = "10.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "738bc47119e3eeccc7e94c4a506901aea5e7b4944ecd0829cbebf4af04ceda12" +checksum = "2c49596760fce12ca21550ac21dc5a9617b2ea4b6e0aa7d8dab8ff2824fc2bba" dependencies = [ "bitflags", ] @@ -3972,15 +3973,15 @@ checksum = "258bcdb5ac6dad48491bb2992db6b7cf74878b0384908af124823d118c99683f" dependencies = [ "crossbeam-channel", "crossbeam-deque", - "crossbeam-utils 0.8.10", + "crossbeam-utils 0.8.11", "num_cpus", ] [[package]] name = "redox_syscall" -version = "0.2.13" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62f25bc4c7e55e0b0b7a1d43fb893f4fa1361d0abe38b9ce4f323c2adfe6ef42" +checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" dependencies = [ "bitflags", ] @@ -3998,18 +3999,18 @@ dependencies = [ [[package]] name = "ref-cast" -version = "1.0.7" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "685d58625b6c2b83e4cc88a27c4bf65adb7b6b16dbdc413e515c9405b47432ab" +checksum = "ed13bcd201494ab44900a96490291651d200730904221832b9547d24a87d332b" dependencies = [ "ref-cast-impl", ] [[package]] name = "ref-cast-impl" -version = "1.0.7" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a043824e29c94169374ac5183ac0ed43f5724dc4556b19568007486bd840fa1f" +checksum = "5234cd6063258a5e32903b53b1b6ac043a0541c8adc1f610f67b0326c7a578fa" dependencies = [ "proc-macro2", "quote", @@ -4018,9 +4019,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.5.6" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d83f127d94bdbcda4c8cc2e50f6f84f4b611f69c902699ca385a39c3a75f9ff1" +checksum = "4c4eb3267174b8c6c2f654116623910a0fef09c4753f8dd83db29c48a0df988b" dependencies = [ "aho-corasick", "memchr", @@ -4038,9 +4039,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.26" +version = "0.6.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49b3de9ec5dc0a3417da371aab17d729997c15010e7fd24ff707773a33bddb64" +checksum = "a3f87b73ce11b1619a3c6332f45341e0047173771e8b8b73f87bfeefb7b56244" [[package]] name = "remove_dir_all" @@ -4216,24 +4217,24 @@ dependencies = [ [[package]] name = "rustls-pemfile" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7522c9de787ff061458fe9a829dc790a3f5b22dc571694fc5883f448b94d9a9" +checksum = "0864aeff53f8c05aa08d86e5ef839d3dfcf07aeba2db32f12db0ef716e87bd55" dependencies = [ "base64", ] [[package]] name = "rustversion" -version = "1.0.7" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0a5f7c728f5d284929a1cccb5bc19884422bfe6ef4d6c409da2c41838983fcf" +checksum = "97477e48b4cf8603ad5f7aaf897467cf42ab4218a38ef76fb14c2d6773a6d6a8" [[package]] name = "ryu" -version = "1.0.10" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3f6f92acf49d1b98f7a81226834412ada05458b7364277387724a237f062695" +checksum = "4501abdff3ae82a1c1b477a17252eb69cee9e66eb915c1abaa4f44d873df9f09" [[package]] name = "safe-proc-macro2" @@ -4317,7 +4318,7 @@ version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "50e334bb10a245e28e5fd755cabcafd96cfcd167c99ae63a46924ca8d8703a3c" dependencies = [ - "proc-macro-crate 1.1.3", + "proc-macro-crate 1.2.0", "proc-macro2", "quote", "syn", @@ -4417,7 +4418,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08da66b8b0965a5555b6bd6639e68ccba85e1e2506f5fbb089e93f8a04e1a2d1" dependencies = [ "der", - "generic-array 0.14.5", + "generic-array 0.14.6", "pkcs8", "subtle", "zeroize", @@ -4495,36 +4496,36 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.12" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2333e6df6d6598f2b1974829f853c2b4c5f4a6e503c10af918081aa6f8564e1" +checksum = "93f6841e709003d68bb2deee8c343572bf446003ec20a583e76f7b15cebf3711" dependencies = [ "serde", ] [[package]] name = "serde" -version = "1.0.138" +version = "1.0.142" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1578c6245786b9d168c5447eeacfb96856573ca56c9d68fdcf394be134882a47" +checksum = "e590c437916fb6b221e1d00df6e3294f3fccd70ca7e92541c475d6ed6ef5fee2" dependencies = [ "serde_derive", ] [[package]] name = "serde_bytes" -version = "0.11.6" +version = "0.11.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "212e73464ebcde48d723aa02eb270ba62eff38a9b732df31f33f1b4e145f3a54" +checksum = "cfc50e8183eeeb6178dcb167ae34a8051d63535023ae38b5d8d12beae193d37b" dependencies = [ "serde", ] [[package]] name = "serde_derive" -version = "1.0.138" +version = "1.0.142" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "023e9b1467aef8a10fb88f25611870ada9800ef7e22afce356bb0d2387b6f27c" +checksum = "34b5b8d809babe02f538c2cfec6f2c1ed10804c0e5a6a041a049a4f5588ccc2e" dependencies = [ "proc-macro2", "quote", @@ -4544,20 +4545,20 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.82" +version = "1.0.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82c2c1fdcd807d1098552c5b9a36e425e42e9fbd7c6a37a8425f390f781f7fa7" +checksum = "38dd04e3c8279e75b31ef29dbdceebfe5ad89f4d0937213c53f7d49d01b3d5a7" dependencies = [ - "itoa 1.0.2", + "itoa 1.0.3", "ryu", "serde", ] [[package]] name = "serde_repr" -version = "0.1.8" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2ad84e47328a31223de7fed7a4f5087f2d6ddfe586cf3ca25b7a165bc0a5aed" +checksum = "1fe39d9fbb0ebf5eb2c7cb7e2a47e4f462fad1379f1166b8ae49ad9eae89a7ca" dependencies = [ "proc-macro2", "quote", @@ -4566,9 +4567,9 @@ dependencies = [ [[package]] name = "serde_yaml" -version = "0.8.24" +version = "0.8.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "707d15895415db6628332b737c838b88c598522e4dc70647e59b72312924aebc" +checksum = "578a7433b776b56a35785ed5ce9a7e777ac0598aac5a6dd1b4b18a307c7fc71b" dependencies = [ "indexmap", "ryu", @@ -4690,9 +4691,9 @@ dependencies = [ [[package]] name = "sha3" -version = "0.10.1" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "881bf8156c87b6301fc5ca6b27f11eeb2761224c7081e69b409d5a1951a70c86" +checksum = "0a31480366ec990f395a61b7c08122d99bd40544fdb5abcfc1b06bb29994312c" dependencies = [ "digest 0.10.3", "keccak", @@ -4771,9 +4772,12 @@ dependencies = [ [[package]] name = "slab" -version = "0.4.6" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb703cfe953bccee95685111adeedb76fabe4e97549a58d16f03ea7b9367bb32" +checksum = "4614a76b2a8be0058caa9dbbaf66d988527d86d003c11a94fbd335d7661edcef" +dependencies = [ + "autocfg", +] [[package]] name = "smallvec" @@ -4829,7 +4833,7 @@ version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" dependencies = [ "blake2", - "proc-macro-crate 1.1.3", + "proc-macro-crate 1.2.0", "proc-macro2", "quote", "syn", @@ -5046,7 +5050,7 @@ dependencies = [ "byteorder", "digest 0.10.3", "sha2 0.10.2", - "sha3 0.10.1", + "sha3 0.10.2", "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", "twox-hash", ] @@ -5336,7 +5340,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22ecb916b9664ed9f90abef0ff5a3e61454c1efea5861b2997e03f39b59b955f" dependencies = [ "Inflector", - "proc-macro-crate 1.1.3", + "proc-macro-crate 1.2.0", "proc-macro2", "quote", "syn", @@ -5348,7 +5352,7 @@ version = "5.0.0" source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" dependencies = [ "Inflector", - "proc-macro-crate 1.1.3", + "proc-macro-crate 1.2.0", "proc-macro2", "quote", "syn", @@ -5597,9 +5601,9 @@ checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] name = "spin" -version = "0.9.3" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c530c2b0d0bf8b69304b39fe2001993e267461948b890cd037d8ad4293fa1a0d" +checksum = "7f6002a767bff9e83f8eeecf883ecb8011875a21ae8da43bffb817a57e78cc09" dependencies = [ "lock_api", ] @@ -5616,9 +5620,9 @@ dependencies = [ [[package]] name = "ss58-registry" -version = "1.23.0" +version = "1.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77ef98aedad3dc52e10995e7ed15f1279e11d4da35795f5dac7305742d0feb66" +checksum = "a039906277e0d8db996cd9d1ef19278c10209d994ecfc1025ced16342873a17c" dependencies = [ "Inflector", "num-format", @@ -5757,9 +5761,9 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.98" +version = "1.0.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c50aef8a904de4c23c788f104b7dddc7d6f79c647c7c8ce4cc8f73eb0ca773dd" +checksum = "58dbef6ec655055e20b86b15a8cc6d439cca19b667537ac6a1369572d151ab13" dependencies = [ "proc-macro2", "quote", @@ -5975,9 +5979,9 @@ dependencies = [ [[package]] name = "test-log" -version = "0.2.10" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4235dbf7ea878b3ef12dea20a59c134b405a66aafc4fc2c7b9935916e289e735" +checksum = "38f0c854faeb68a048f0f2dc410c5ddae3bf83854ef0e4977d58306a5edef50e" dependencies = [ "proc-macro2", "quote", @@ -5992,18 +5996,18 @@ checksum = "b1141d4d61095b28419e22cb0bbf02755f5e54e0526f97f1e3d1d160e60885fb" [[package]] name = "thiserror" -version = "1.0.31" +version = "1.0.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd829fe32373d27f76265620b5309d0340cb8550f523c1dda251d6298069069a" +checksum = "f5f6586b7f764adc0231f4c79be7b920e766bb2f3e51b3661cdb263828f19994" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.31" +version = "1.0.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0396bc89e626244658bef819e22d0cc459e795a5ebe878e6ec336d1674a8d79a" +checksum = "12bafc5b54507e0149cdf1b145a5d80ab80a90bcd9275df43d4fff68460f6c21" dependencies = [ "proc-macro2", "quote", @@ -6030,10 +6034,11 @@ dependencies = [ [[package]] name = "time" -version = "0.3.11" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72c91f41dcb2f096c05f0873d667dceec1087ce5bcf984ec8ffb19acddbb3217" +checksum = "74b7cc93fc23ba97fde84f7eea56c55d1ba183f495c6715defdfc7b9cb8c870f" dependencies = [ + "js-sys", "libc", "num_threads", "time-macros", @@ -6103,10 +6108,11 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.19.2" +version = "1.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c51a52ed6686dd62c320f9b89299e9dfb46f730c7a48e635c19f21d116cb1439" +checksum = "7a8325f63a7d4774dd041e363b2409ed1c5cbbd0f867795e661df066b2b0a581" dependencies = [ + "autocfg", "bytes", "libc", "memchr", @@ -6287,9 +6293,9 @@ checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" [[package]] name = "tracing" -version = "0.1.35" +version = "0.1.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a400e31aa60b9d44a52a8ee0343b5b18566b03a8321e0d321f695cf56e940160" +checksum = "2fce9567bd60a67d08a16488756721ba392f24f29006402881e43b19aac64307" dependencies = [ "cfg-if 1.0.0", "log", @@ -6311,9 +6317,9 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.28" +version = "0.1.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b7358be39f2f274f322d2aaed611acc57f382e8eb1e5b48cb9ae30933495ce7" +checksum = "5aeea4303076558a00714b823f9ad67d58a3bbda1df83d8827d21193156e22f7" dependencies = [ "once_cell", "valuable", @@ -6326,7 +6332,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d686ec1c0f384b1277f097b2f279a2ecc11afe8c133c1aabf036a27cb4cd206e" dependencies = [ "tracing", - "tracing-subscriber 0.3.14", + "tracing-subscriber 0.3.15", ] [[package]] @@ -6384,9 +6390,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.14" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a713421342a5a666b7577783721d3117f1b69a393df803ee17bb73b1e122a59" +checksum = "60db860322da191b40952ad9affe65ea23e7dd6a5c442c2c42865810c6ab8e6b" dependencies = [ "ansi_term", "matchers 0.1.0", @@ -6410,7 +6416,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d32d034c0d3db64b43c31de38e945f15b40cd4ca6d2dcfc26d4798ce8de4ab83" dependencies = [ "hash-db", - "hashbrown 0.12.1", + "hashbrown 0.12.3", "log", "rustc-hex", "smallvec", @@ -6427,9 +6433,9 @@ dependencies = [ [[package]] name = "triomphe" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eda0abf5a9b5ad4a5ac1393956ae03fb57033749d3983e2cac9afbfd5ae04ec2" +checksum = "7fe1b3800b35f9b936c28dc59dbda91b195371269396784d931fe2a5a2be3d2f" [[package]] name = "try-lock" @@ -6518,9 +6524,9 @@ checksum = "099b7128301d285f79ddd55b9a83d5e6b9e97c92e0ea0daebee7263e932de992" [[package]] name = "unicode-ident" -version = "1.0.1" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bd2fe26506023ed7b5e1e315add59d6f584c621d037f9368fea9cfb988f368c" +checksum = "c4f5b37a154999a8f3f98cc23a628d850e154479cd94decf3414696e12e31aaf" [[package]] name = "unicode-normalization" @@ -6551,9 +6557,9 @@ checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" [[package]] name = "ureq" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9399fa2f927a3d327187cbd201480cee55bee6ac5d3c77dd27f0c6814cff16d5" +checksum = "b97acb4c28a254fd7a4aeec976c46a7fa404eac4d7c134b30c75144846d7cb8f" dependencies = [ "base64", "chunked_transfer", @@ -6563,7 +6569,7 @@ dependencies = [ "rustls 0.20.6", "url", "webpki 0.22.0", - "webpki-roots 0.22.3", + "webpki-roots 0.22.4", ] [[package]] @@ -6661,9 +6667,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.81" +version = "0.2.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c53b543413a17a202f4be280a7e5c62a1c69345f5de525ee64f8cfdbc954994" +checksum = "fc7652e3f6c4706c8d9cd54832c4a4ccb9b5336e2c3bd154d5cccfbf1c1f5f7d" dependencies = [ "cfg-if 1.0.0", "wasm-bindgen-macro", @@ -6671,13 +6677,13 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.81" +version = "0.2.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5491a68ab4500fa6b4d726bd67408630c3dbe9c4fe7bda16d5c82a1fd8c7340a" +checksum = "662cd44805586bd52971b9586b1df85cdbbd9112e4ef4d8f41559c334dc6ac3f" dependencies = [ "bumpalo", - "lazy_static", "log", + "once_cell", "proc-macro2", "quote", "syn", @@ -6686,9 +6692,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.81" +version = "0.2.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c441e177922bc58f1e12c022624b6216378e5febc2f0533e41ba443d505b80aa" +checksum = "b260f13d3012071dfb1512849c033b1925038373aea48ced3012c09df952c602" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -6696,9 +6702,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.81" +version = "0.2.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d94ac45fcf608c1f45ef53e748d35660f168490c10b23704c7779ab8f5c3048" +checksum = "5be8e654bdd9b79216c2929ab90721aa82faf65c48cdf08bdc4e7f51357b80da" dependencies = [ "proc-macro2", "quote", @@ -6709,9 +6715,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.81" +version = "0.2.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a89911bd99e5f3659ec4acf9c4d93b0a90fe4a2a11f15328472058edc5261be" +checksum = "6598dd0bd3c7d51095ff6531a5b23e02acdc81804e30d8f07afb77b7215a140a" [[package]] name = "wasmi" @@ -6739,9 +6745,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.58" +version = "0.3.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fed94beee57daf8dd7d51f2b15dc2bcde92d7a72304cdf662a4371008b71b90" +checksum = "ed055ab27f941423197eb86b2035720b1a3ce40504df082cac2ecc6ed73335a1" dependencies = [ "js-sys", "wasm-bindgen", @@ -6778,9 +6784,9 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.22.3" +version = "0.22.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44d8de8415c823c8abd270ad483c6feeac771fad964890779f9a8cb24fbbc1bf" +checksum = "f1c760f0d366a6c24a02ed7816e23e691f5d92291f94d15e836006fd11b04daf" dependencies = [ "webpki 0.22.0", ] @@ -6879,9 +6885,9 @@ dependencies = [ [[package]] name = "zeroize" -version = "1.5.6" +version = "1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20b578acffd8516a6c3f2a1bdefc1ec37e547bb4e0fb8b6b01a4cafc886b4442" +checksum = "c394b5bd0c6f669e7275d9c20aa90ae064cb22e75a1cad54e1b34088034b149f" dependencies = [ "zeroize_derive", ] diff --git a/modules/Cargo.toml b/modules/Cargo.toml index 936c1af35a..0e13f124a5 100644 --- a/modules/Cargo.toml +++ b/modules/Cargo.toml @@ -28,7 +28,7 @@ std = [ "sp-core/std", "codec/std", "pallet-mmr-primitives/std", - "beefy-primitives/std", + "beefy-client-primitives/std", "sp-trie/std", "sp-io/std", "sp-std/std", @@ -61,9 +61,8 @@ flex-error = { version = "0.4.4", default-features = false } num-traits = { version = "0.2.15", default-features = false } derive_more = { version = "0.99.17", default-features = false, features = ["from", "into", "display"] } uint = { version = "0.9", default-features = false } - -beefy-client = { package = "beefy-generic-client", git = "https://github.com/ComposableFi/beefy-rs", branch = "master", default-features = false, optional = true } -beefy-client-primitives = { package = "primitives", git = "https://github.com/ComposableFi/beefy-rs", branch = "master", default-features = false, optional = true } +beefy-client = { package = "beefy-generic-client", git = "https://github.com/ComposableFi/beefy-rs", rev = "6b99ac5459caaa1baf94cafa208fffe8e9dd0b54", default-features = false, optional = true } +beefy-client-primitives = { package = "primitives", git = "https://github.com/ComposableFi/beefy-rs", rev = "6b99ac5459caaa1baf94cafa208fffe8e9dd0b54", default-features = false, optional = true } pallet-mmr-primitives = { package = "sp-mmr-primitives", git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.24", default-features = false, optional = true } beefy-primitives = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.24", default-features = false, optional = true } codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } @@ -117,8 +116,8 @@ sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0 subxt = "0.22.0" tokio = { version = "1.17.0", features = ["full"] } serde_json = "1.0.74" -beefy-client = { package = "beefy-generic-client", git = "https://github.com/ComposableFi/beefy-rs", branch = "master" } -beefy-queries = { git = "https://github.com/ComposableFi/beefy-rs", branch = "master" } +beefy-client = { package = "beefy-generic-client", git = "https://github.com/ComposableFi/beefy-rs", rev = "6b99ac5459caaa1baf94cafa208fffe8e9dd0b54" } +beefy-queries = { git = "https://github.com/ComposableFi/beefy-rs", rev = "6b99ac5459caaa1baf94cafa208fffe8e9dd0b54" } sha3 = { version = "0.10.1" } ripemd = { version = "0.1.1" } From b62b5ddb88f255a67a8dc20ee9eef6d218c4fc00 Mon Sep 17 00:00:00 2001 From: Seun Lanlege Date: Fri, 5 Aug 2022 13:26:27 +0100 Subject: [PATCH 69/96] don't check timestamp of outgoing packets --- .../core/ics04_channel/handler/send_packet.rs | 34 +++++++++---------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/modules/src/core/ics04_channel/handler/send_packet.rs b/modules/src/core/ics04_channel/handler/send_packet.rs index ca484371f8..e20fee5fda 100644 --- a/modules/src/core/ics04_channel/handler/send_packet.rs +++ b/modules/src/core/ics04_channel/handler/send_packet.rs @@ -59,23 +59,23 @@ pub fn send_packet(ctx: &dyn ReaderContext, packet: Packet) -> HandlerResult Date: Mon, 8 Aug 2022 18:51:05 +0100 Subject: [PATCH 70/96] Fix packet data deserialization (#42) --- modules/Cargo.toml | 8 ++++++++ modules/src/core/ics04_channel/packet.rs | 16 +++++++++++++++- modules/src/serializers.rs | 17 ++++++++++++++++- 3 files changed, 39 insertions(+), 2 deletions(-) diff --git a/modules/Cargo.toml b/modules/Cargo.toml index 0e13f124a5..21ac94fab5 100644 --- a/modules/Cargo.toml +++ b/modules/Cargo.toml @@ -120,6 +120,14 @@ beefy-client = { package = "beefy-generic-client", git = "https://github.com/Co beefy-queries = { git = "https://github.com/ComposableFi/beefy-rs", rev = "6b99ac5459caaa1baf94cafa208fffe8e9dd0b54" } sha3 = { version = "0.10.1" } ripemd = { version = "0.1.1" } +beefy-client-primitives = { package = "primitives", git = "https://github.com/ComposableFi/beefy-rs", rev = "6b99ac5459caaa1baf94cafa208fffe8e9dd0b54" } +beefy-primitives = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.24" } +pallet-mmr-primitives = { package = "sp-mmr-primitives", git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.24" } +codec = { package = "parity-scale-codec", version = "3.0.0"} +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.24" } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.24" } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.24" } +sp-trie = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.24" } [[test]] name = "mbt" diff --git a/modules/src/core/ics04_channel/packet.rs b/modules/src/core/ics04_channel/packet.rs index 3d6a49886b..b8745e8f8e 100644 --- a/modules/src/core/ics04_channel/packet.rs +++ b/modules/src/core/ics04_channel/packet.rs @@ -104,7 +104,10 @@ pub struct Packet { pub source_channel: ChannelId, pub destination_port: PortId, pub destination_channel: ChannelId, - #[serde(serialize_with = "crate::serializers::ser_hex_upper")] + #[serde( + serialize_with = "crate::serializers::ser_hex_upper", + deserialize_with = "crate::serializers::deser_hex_upper" + )] pub data: Vec, pub timeout_height: Height, pub timeout_timestamp: Timestamp, @@ -466,4 +469,15 @@ mod tests { assert_eq!(raw, raw_back); assert_eq!(msg, msg_back); } + + #[test] + fn serialize_and_deserialize_packet() { + let packet = Packet { + data: vec![5; 32], + ..Default::default() + }; + let json = serde_json::to_string(&packet).unwrap(); + let deserialized_packet: Packet = serde_json::from_str(&json).unwrap(); + assert_eq!(packet, deserialized_packet); + } } diff --git a/modules/src/serializers.rs b/modules/src/serializers.rs index bd6f346336..20b1a65d3b 100644 --- a/modules/src/serializers.rs +++ b/modules/src/serializers.rs @@ -1,4 +1,8 @@ -use serde::ser::{Serialize, Serializer}; +use crate::prelude::*; +use serde::{ + ser::{Serialize, Serializer}, + Deserialize, Deserializer, +}; use subtle_encoding::{Encoding, Hex}; pub fn ser_hex_upper(data: T, serializer: S) -> Result @@ -10,6 +14,17 @@ where hex.serialize(serializer) } +pub fn deser_hex_upper<'de, T, D>(deserializer: D) -> Result +where + D: Deserializer<'de>, + T: AsRef<[u8]>, + T: From>, +{ + let hex = String::deserialize(deserializer)?; + let bytes = Hex::upper_case().decode(hex.as_bytes()).unwrap(); + Ok(bytes.into()) +} + pub mod serde_string { use alloc::string::String; use core::fmt::Display; From 1f1f06b0dab92802f64f97b77380c800899a2be7 Mon Sep 17 00:00:00 2001 From: Web3 Smith <31099392+Wizdave97@users.noreply.github.com> Date: Tue, 9 Aug 2022 19:08:35 +0100 Subject: [PATCH 71/96] Fix Acknowledgement deserialization (#44) --- modules/src/core/ics04_channel/events.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/modules/src/core/ics04_channel/events.rs b/modules/src/core/ics04_channel/events.rs index c8e0acccf7..c932787ccb 100644 --- a/modules/src/core/ics04_channel/events.rs +++ b/modules/src/core/ics04_channel/events.rs @@ -887,7 +887,10 @@ impl core::fmt::Display for ReceivePacket { pub struct WriteAcknowledgement { pub height: Height, pub packet: Packet, - #[serde(serialize_with = "crate::serializers::ser_hex_upper")] + #[serde( + serialize_with = "crate::serializers::ser_hex_upper", + deserialize_with = "crate::serializers::deser_hex_upper" + )] pub ack: Vec, } From c6a7cf0f3970d899e3a195ebde8986026cf75544 Mon Sep 17 00:00:00 2001 From: Web3 Philosopher Date: Thu, 11 Aug 2022 13:46:32 +0100 Subject: [PATCH 72/96] snip snip (#47) * we don't need all this * bring back proto-compiler * bump Cargo.lock * begone ci --- .github/CODEOWNERS | 15 - .github/ISSUE_TEMPLATE/bug-report.md | 38 - .github/ISSUE_TEMPLATE/feature-request.md | 41 - .github/ISSUE_TEMPLATE/release-template.md | 22 - .github/ISSUE_TEMPLATE/rust-update.md | 28 - .github/PULL_REQUEST_TEMPLATE.md | 27 - .github/actions-rs/grcov.yml | 6 - .github/dependabot.yml | 42 - .github/markdown-link-check.json | 11 - .github/workflows/audit.yaml | 19 - .github/workflows/cargo-doc.yaml | 27 - .../workflows/e2e-gaia-current-release.yaml | 60 - .../workflows/e2e-gaia-legacy-release.yaml | 59 - .github/workflows/guide.yml | 59 - .github/workflows/integration.yaml | 214 -- .github/workflows/markdown-link-check.yml | 19 - .github/workflows/no-std.yaml | 47 - .github/workflows/release.yml | 80 - .github/workflows/rust.yml | 139 - .github/workflows/scripts.yaml | 19 - .github/workflows/specs.yml | 30 - Cargo.lock | 1363 ++------- Cargo.toml | 20 +- ci/.gitignore | 3 - ci/README.md | 230 -- ci/bootstrap_gaia.sh | 94 - ci/build-chains.sh | 44 - ci/build-ibc-chains.sh | 80 - ci/build-simd.sh | 24 - ci/chain_a.Dockerfile | 14 - ci/chain_b.Dockerfile | 14 - ci/chains/gaia/v3.0.0/ibc-0/config/app.toml | 152 - .../gaia/v3.0.0/ibc-0/config/config.toml | 393 --- .../gaia/v3.0.0/ibc-0/config/genesis.json | 285 -- ...710169cd596180da34bc6dde5d28fd663e48c.json | 1 - .../gaia/v3.0.0/ibc-0/config/node_key.json | 1 - .../ibc-0/config/priv_validator_key.json | 11 - ci/chains/gaia/v3.0.0/ibc-0/key_seed.json | 1 - ...86e250f44deba09213d4fed3bdc32a8f39.address | 1 - ...4aae575a3f183d3db23c2463a5128adb09.address | 1 - .../gaia/v3.0.0/ibc-0/keyring-test/user.info | 1 - .../v3.0.0/ibc-0/keyring-test/validator.info | 1 - .../gaia/v3.0.0/ibc-0/validator_seed.json | 1 - ci/chains/gaia/v3.0.0/ibc-1/config/app.toml | 152 - .../gaia/v3.0.0/ibc-1/config/config.toml | 393 --- .../gaia/v3.0.0/ibc-1/config/genesis.json | 285 -- ...eddb601e66c008c241ccfc80f7cf3358cfa7f.json | 1 - .../gaia/v3.0.0/ibc-1/config/node_key.json | 1 - .../ibc-1/config/priv_validator_key.json | 11 - ci/chains/gaia/v3.0.0/ibc-1/key_seed.json | 1 - ...ee21668b3fa47d372ea12148bb39d5392d.address | 1 - ...4982972cd1da252c4dfe6d6fa5747d9163.address | 1 - .../gaia/v3.0.0/ibc-1/keyring-test/user.info | 1 - .../v3.0.0/ibc-1/keyring-test/validator.info | 1 - .../gaia/v3.0.0/ibc-1/validator_seed.json | 1 - ci/chains/gaia/v4.0.0/ibc-0/config/app.toml | 152 - .../gaia/v4.0.0/ibc-0/config/config.toml | 393 --- .../gaia/v4.0.0/ibc-0/config/genesis.json | 294 -- ...4bdc90fb01a1167636bb40362967569fa150f.json | 1 - .../gaia/v4.0.0/ibc-0/config/node_key.json | 1 - .../ibc-0/config/priv_validator_key.json | 11 - ci/chains/gaia/v4.0.0/ibc-0/key_seed.json | 1 - ...a596b84a4c080ea50852e813c47ce3bb71.address | 1 - ...3dea50a4530fcf574e45c9db41b5d21352.address | 1 - .../gaia/v4.0.0/ibc-0/keyring-test/user.info | 1 - .../v4.0.0/ibc-0/keyring-test/validator.info | 1 - .../gaia/v4.0.0/ibc-0/validator_seed.json | 1 - ci/chains/gaia/v4.0.0/ibc-1/config/app.toml | 152 - .../gaia/v4.0.0/ibc-1/config/config.toml | 393 --- .../gaia/v4.0.0/ibc-1/config/genesis.json | 294 -- ...08e59e6071a9ddfb7f66d08723b1bef442f71.json | 1 - .../gaia/v4.0.0/ibc-1/config/node_key.json | 1 - .../ibc-1/config/priv_validator_key.json | 11 - ci/chains/gaia/v4.0.0/ibc-1/key_seed.json | 1 - ...088bdfb2c0751757ad9ca7ed02e619ac76.address | 1 - ...1d0c67b11a7e38da39133259d6fcd4f969.address | 1 - .../gaia/v4.0.0/ibc-1/keyring-test/user.info | 1 - .../v4.0.0/ibc-1/keyring-test/validator.info | 1 - .../gaia/v4.0.0/ibc-1/validator_seed.json | 1 - .../gaia/v4.1.0/ibc-0/config/addrbook.json | 4 - ci/chains/gaia/v4.1.0/ibc-0/config/app.toml | 152 - .../gaia/v4.1.0/ibc-0/config/config.toml | 393 --- .../gaia/v4.1.0/ibc-0/config/genesis.json | 294 -- ...c9bede9a7d77955b8770b47267069f9d910b6.json | 1 - .../gaia/v4.1.0/ibc-0/config/node_key.json | 1 - .../ibc-0/config/priv_validator_key.json | 11 - ci/chains/gaia/v4.1.0/ibc-0/key_seed.json | 1 - ...f7f3ed82ac613f8458685c38f800ca221e.address | 1 - ...59b63c38c20d493d9ef1a6e35889d3576f.address | 1 - .../gaia/v4.1.0/ibc-0/keyring-test/user.info | 1 - .../v4.1.0/ibc-0/keyring-test/validator.info | 1 - .../gaia/v4.1.0/ibc-0/validator_seed.json | 1 - ci/chains/gaia/v4.1.0/ibc-1/config/app.toml | 152 - .../gaia/v4.1.0/ibc-1/config/config.toml | 393 --- .../gaia/v4.1.0/ibc-1/config/genesis.json | 294 -- ...974b6f467580c92b7a0fe752797eca18915d0.json | 1 - .../gaia/v4.1.0/ibc-1/config/node_key.json | 1 - .../ibc-1/config/priv_validator_key.json | 11 - ci/chains/gaia/v4.1.0/ibc-1/key_seed.json | 1 - ...3a32a22175d13d794ae1340b116ecf0c9c.address | 1 - ...07c8ddd9893a68be98c361ca81ffa7ded9.address | 1 - .../gaia/v4.1.0/ibc-1/keyring-test/user.info | 1 - .../v4.1.0/ibc-1/keyring-test/validator.info | 1 - .../gaia/v4.1.0/ibc-1/validator_seed.json | 1 - .../gaia/v4.2.0/ibc-0/config/addrbook.json | 4 - ci/chains/gaia/v4.2.0/ibc-0/config/app.toml | 152 - .../gaia/v4.2.0/ibc-0/config/config.toml | 393 --- .../gaia/v4.2.0/ibc-0/config/genesis.json | 314 -- ...c6c414bfb15dab6e170c7b1781de818736374.json | 1 - .../gaia/v4.2.0/ibc-0/config/node_key.json | 1 - .../ibc-0/config/priv_validator_key.json | 11 - ...9ffa1e036981c14124485bdd543654b343.address | 1 - ...fd1c8fa0119e95466d9fa27320bfcc0c1e.address | 1 - ...d9ee89a00b0907ecf985c4febfc1672d3d.address | 1 - .../gaia/v4.2.0/ibc-0/keyring-test/user.info | 1 - .../gaia/v4.2.0/ibc-0/keyring-test/user2.info | 1 - .../v4.2.0/ibc-0/keyring-test/validator.info | 1 - ci/chains/gaia/v4.2.0/ibc-0/user2_seed.json | 1 - ci/chains/gaia/v4.2.0/ibc-0/user_seed.json | 1 - .../gaia/v4.2.0/ibc-0/validator_seed.json | 1 - ci/chains/gaia/v4.2.0/ibc-1/config/app.toml | 152 - .../gaia/v4.2.0/ibc-1/config/config.toml | 393 --- .../gaia/v4.2.0/ibc-1/config/genesis.json | 314 -- ...d5b43951124b2e6e2d744328041a907f3968c.json | 1 - .../gaia/v4.2.0/ibc-1/config/node_key.json | 1 - .../ibc-1/config/priv_validator_key.json | 11 - ...52f253ce8ee2ced8317a8b6e5a0aaae76a.address | 1 - ...56fb667f52d759b741f7b29a99c7f0f185.address | 1 - ...f11aceb1a5cdc3aab2e4cfc9bcb8f7838c.address | 1 - .../gaia/v4.2.0/ibc-1/keyring-test/user.info | 1 - .../gaia/v4.2.0/ibc-1/keyring-test/user2.info | 1 - .../v4.2.0/ibc-1/keyring-test/validator.info | 1 - ci/chains/gaia/v4.2.0/ibc-1/user2_seed.json | 1 - ci/chains/gaia/v4.2.0/ibc-1/user_seed.json | 1 - .../gaia/v4.2.0/ibc-1/validator_seed.json | 1 - .../gaia/v5.0.5/ibc-0/config/addrbook.json | 4 - ci/chains/gaia/v5.0.5/ibc-0/config/app.toml | 152 - .../gaia/v5.0.5/ibc-0/config/client.toml | 17 - .../gaia/v5.0.5/ibc-0/config/config.toml | 400 --- .../gaia/v5.0.5/ibc-0/config/genesis.json | 342 --- ...7e76f6743997bde26b522ef43e886b1baa95d.json | 1 - .../gaia/v5.0.5/ibc-0/config/node_key.json | 1 - .../ibc-0/config/priv_validator_key.json | 11 - ...a276b243a6f235f16430bb45828989382d.address | 1 - ...907bd0f30dab598252aea96a3ae97b97ee.address | 1 - ...dad97dd40751c1d8fcd577d3eccc83847d.address | 1 - .../gaia/v5.0.5/ibc-0/keyring-test/user.info | 1 - .../gaia/v5.0.5/ibc-0/keyring-test/user2.info | 1 - .../v5.0.5/ibc-0/keyring-test/validator.info | 1 - ci/chains/gaia/v5.0.5/ibc-0/user2_seed.json | 1 - ci/chains/gaia/v5.0.5/ibc-0/user_seed.json | 1 - .../gaia/v5.0.5/ibc-0/validator_seed.json | 1 - ci/chains/gaia/v5.0.5/ibc-1/config/app.toml | 152 - .../gaia/v5.0.5/ibc-1/config/client.toml | 17 - .../gaia/v5.0.5/ibc-1/config/config.toml | 400 --- .../gaia/v5.0.5/ibc-1/config/genesis.json | 342 --- ...b1ad566fa381f6e6b4afd639559e0505fd9d0.json | 1 - .../gaia/v5.0.5/ibc-1/config/node_key.json | 1 - .../ibc-1/config/priv_validator_key.json | 11 - ...c429320d582a78e3c9997dedb9fe307082.address | 1 - ...a6bfd105eb477b2433c8459a2aea93c34f.address | 1 - ...57446ff6086960a12ad95a6d31787df226.address | 1 - .../gaia/v5.0.5/ibc-1/keyring-test/user.info | 1 - .../gaia/v5.0.5/ibc-1/keyring-test/user2.info | 1 - .../v5.0.5/ibc-1/keyring-test/validator.info | 1 - ci/chains/gaia/v5.0.5/ibc-1/user2_seed.json | 1 - ci/chains/gaia/v5.0.5/ibc-1/user_seed.json | 1 - .../gaia/v5.0.5/ibc-1/validator_seed.json | 1 - .../gaia/v5.0.8/ibc-0/config/addrbook.json | 4 - ci/chains/gaia/v5.0.8/ibc-0/config/app.toml | 152 - .../gaia/v5.0.8/ibc-0/config/client.toml | 17 - .../gaia/v5.0.8/ibc-0/config/config.toml | 401 --- .../gaia/v5.0.8/ibc-0/config/genesis.json | 342 --- ...4f6ada6dc23fa6927484cca6e00183dc3fe0b.json | 1 - .../gaia/v5.0.8/ibc-0/config/node_key.json | 1 - .../ibc-0/config/priv_validator_key.json | 11 - ...46bb8efbb409b474b8ebdda158b91f6070.address | 1 - ...a69b7a15ee095fb3da8055a6445188607e.address | 1 - ...a6e4891e5981366306f27b036bfb0d9b1b.address | 1 - .../gaia/v5.0.8/ibc-0/keyring-test/user.info | 1 - .../gaia/v5.0.8/ibc-0/keyring-test/user2.info | 1 - .../v5.0.8/ibc-0/keyring-test/validator.info | 1 - ci/chains/gaia/v5.0.8/ibc-0/user2_seed.json | 1 - ci/chains/gaia/v5.0.8/ibc-0/user_seed.json | 1 - .../gaia/v5.0.8/ibc-0/validator_seed.json | 1 - ci/chains/gaia/v5.0.8/ibc-1/config/app.toml | 152 - .../gaia/v5.0.8/ibc-1/config/client.toml | 17 - .../gaia/v5.0.8/ibc-1/config/config.toml | 401 --- .../gaia/v5.0.8/ibc-1/config/genesis.json | 342 --- ...17c1100350fcb281cc6749f1f79956222be5a.json | 1 - .../gaia/v5.0.8/ibc-1/config/node_key.json | 1 - .../ibc-1/config/priv_validator_key.json | 11 - ...1425eebf800a64ed9b19e280e1e981685c.address | 1 - ...6d779d88bdb47a8f0b5522b135812c3a10.address | 1 - ...463f3fe3dfdb6d98037a0c19e2b6d44ca0.address | 1 - .../gaia/v5.0.8/ibc-1/keyring-test/user.info | 1 - .../gaia/v5.0.8/ibc-1/keyring-test/user2.info | 1 - .../v5.0.8/ibc-1/keyring-test/validator.info | 1 - ci/chains/gaia/v5.0.8/ibc-1/user2_seed.json | 1 - ci/chains/gaia/v5.0.8/ibc-1/user_seed.json | 1 - .../gaia/v5.0.8/ibc-1/validator_seed.json | 1 - .../gaia/v6.0.0/ibc-0/config/addrbook.json | 4 - ci/chains/gaia/v6.0.0/ibc-0/config/app.toml | 192 -- .../gaia/v6.0.0/ibc-0/config/client.toml | 17 - .../gaia/v6.0.0/ibc-0/config/config.toml | 401 --- .../gaia/v6.0.0/ibc-0/config/genesis.json | 356 --- ...25df2c406207c64cbc02b1f543a53a46094e3.json | 1 - .../gaia/v6.0.0/ibc-0/config/node_key.json | 1 - .../ibc-0/config/priv_validator_key.json | 11 - ...2294206b404476f840eb8468a09bf47962.address | 1 - ...5fc00ac70c0285e741272d7978f9edcd21.address | 1 - ...cd58edadaadadea911cbba3de68785fc86.address | 1 - .../gaia/v6.0.0/ibc-0/keyring-test/user.info | 1 - .../gaia/v6.0.0/ibc-0/keyring-test/user2.info | 1 - .../v6.0.0/ibc-0/keyring-test/validator.info | 1 - ci/chains/gaia/v6.0.0/ibc-0/user2_seed.json | 1 - ci/chains/gaia/v6.0.0/ibc-0/user_seed.json | 1 - .../gaia/v6.0.0/ibc-0/validator_seed.json | 1 - ci/chains/gaia/v6.0.0/ibc-1/config/app.toml | 192 -- .../gaia/v6.0.0/ibc-1/config/client.toml | 17 - .../gaia/v6.0.0/ibc-1/config/config.toml | 401 --- .../gaia/v6.0.0/ibc-1/config/genesis.json | 356 --- ...010f06487a9d315952e3e57c6dadac4ac425b.json | 1 - .../gaia/v6.0.0/ibc-1/config/node_key.json | 1 - .../ibc-1/config/priv_validator_key.json | 11 - ...61acfae082089bc69ce6bfcf98a59ab78c.address | 1 - ...c6039f018e7fc78971b5af151cb0b52338.address | 1 - ...30d6196bf49e931d458ab59b95140dee6f.address | 1 - .../gaia/v6.0.0/ibc-1/keyring-test/user.info | 1 - .../gaia/v6.0.0/ibc-1/keyring-test/user2.info | 1 - .../v6.0.0/ibc-1/keyring-test/validator.info | 1 - ci/chains/gaia/v6.0.0/ibc-1/user2_seed.json | 1 - ci/chains/gaia/v6.0.0/ibc-1/user_seed.json | 1 - .../gaia/v6.0.0/ibc-1/validator_seed.json | 1 - .../gaia/v7.0.1/ibc-0/config/addrbook.json | 4 - ci/chains/gaia/v7.0.1/ibc-0/config/app.toml | 196 -- .../gaia/v7.0.1/ibc-0/config/client.toml | 17 - .../gaia/v7.0.1/ibc-0/config/config.toml | 428 --- .../gaia/v7.0.1/ibc-0/config/genesis.json | 375 --- ...4ad7d61f006035641dbcd204dcbdb25904c2b.json | 1 - .../gaia/v7.0.1/ibc-0/config/node_key.json | 1 - .../ibc-0/config/priv_validator_key.json | 11 - ...e6445bc37c895cdfe8da8038501e667c70.address | 1 - ...d74ec124eae9d0485340227095f6756908.address | 1 - ...759163a216b80128e371ccb0ae91ddeb45.address | 1 - .../gaia/v7.0.1/ibc-0/keyring-test/user.info | 1 - .../gaia/v7.0.1/ibc-0/keyring-test/user2.info | 1 - .../v7.0.1/ibc-0/keyring-test/validator.info | 1 - ci/chains/gaia/v7.0.1/ibc-0/user2_seed.json | 1 - ci/chains/gaia/v7.0.1/ibc-0/user_seed.json | 1 - .../gaia/v7.0.1/ibc-0/validator_seed.json | 1 - ci/chains/gaia/v7.0.1/ibc-1/config/app.toml | 196 -- .../gaia/v7.0.1/ibc-1/config/client.toml | 17 - .../gaia/v7.0.1/ibc-1/config/config.toml | 428 --- .../gaia/v7.0.1/ibc-1/config/genesis.json | 375 --- ...90e64fa1c26c79a5c03ae6432973f04e293cd.json | 1 - .../gaia/v7.0.1/ibc-1/config/node_key.json | 1 - .../ibc-1/config/priv_validator_key.json | 11 - ...40c5d1e0821d2e98931f332093d436e87b.address | 1 - ...1575324aafaca704a424074e01f2da5109.address | 1 - ...dbd3b361a2038cfb43067e349a805c6c0f.address | 1 - .../gaia/v7.0.1/ibc-1/keyring-test/user.info | 1 - .../gaia/v7.0.1/ibc-1/keyring-test/user2.info | 1 - .../v7.0.1/ibc-1/keyring-test/validator.info | 1 - ci/chains/gaia/v7.0.1/ibc-1/user2_seed.json | 1 - ci/chains/gaia/v7.0.1/ibc-1/user_seed.json | 1 - .../gaia/v7.0.1/ibc-1/validator_seed.json | 1 - ci/docker-compose-gaia-current.yml | 61 - ci/docker-compose-gaia-legacy.yml | 61 - ci/e2e.sh | 47 - ci/gaia.Dockerfile | 54 - ci/hermes.Dockerfile | 28 - ci/no-std-check/.gitignore | 1 - ci/no-std-check/Cargo.lock | 2540 ----------------- ci/no-std-check/Cargo.toml | 37 - ci/no-std-check/Makefile | 34 - ci/no-std-check/README.md | 191 -- ci/no-std-check/src/lib.rs | 52 - ci/relayer.Dockerfile | 40 - ci/run-gaiad.sh | 3 - ci/simd.Dockerfile | 14 - ci/simple_config.toml | 60 - docs/spec/README.md | 7 - docs/spec/connection-handshake/CHANGELOG.md | 0 docs/spec/connection-handshake/L1_2.md | 628 ---- .../L2-tla/Environment.tla | 406 --- .../L2-tla/ICS3Module.tla | 580 ---- .../connection-handshake/L2-tla/ICS3Types.tla | 364 --- .../connection-handshake/L2-tla/ICS3Utils.tla | 40 - .../connection-handshake/L2-tla/README.md | 64 - docs/spec/connection-handshake/README.md | 11 - docs/spec/relayer/Definitions.md | 292 -- docs/spec/relayer/Packets.md | 196 -- docs/spec/relayer/Relayer.md | 200 -- docs/spec/tla/client/Chain.tla | 137 - docs/spec/tla/client/ICS02ClientHandlers.tla | 97 - docs/spec/tla/client/ICS02Definitions.tla | 120 - .../client/ICS02SingleChainEnvironment.tla | 107 - .../tla/client/ICS02TwoChainsEnvironment.tla | 167 -- docs/spec/tla/client/MC_SingleChain.tla | 17 - docs/spec/tla/client/MC_TwoChains.tla | 22 - docs/spec/tla/client/README.md | 80 - .../spec/tla/fungible-token-transfer/Bank.tla | 115 - .../tla/fungible-token-transfer/Chain.tla | 203 -- .../IBCTokenTransfer.cfg | 16 - .../IBCTokenTransfer.tla | 440 --- .../IBCTokenTransferDefinitions.tla | 426 --- .../ICS04PacketHandlers.tla | 359 --- .../ICS20FungibleTokenTransferHandlers.tla | 200 -- .../MC_IBCTokenTransfer.tla | 31 - .../tla/fungible-token-transfer/README.md | 218 -- docs/spec/tla/ibc-core/Chain.tla | 290 -- docs/spec/tla/ibc-core/IBCCore.cfg | 24 - docs/spec/tla/ibc-core/IBCCore.tla | 610 ---- docs/spec/tla/ibc-core/IBCCoreDefinitions.tla | 703 ----- .../spec/tla/ibc-core/ICS02ClientHandlers.tla | 75 - .../tla/ibc-core/ICS03ConnectionHandlers.tla | 167 -- .../tla/ibc-core/ICS04ChannelHandlers.tla | 275 -- .../spec/tla/ibc-core/ICS04PacketHandlers.tla | 368 --- docs/spec/tla/ibc-core/ICS18Relayer.tla | 448 --- docs/spec/tla/ibc-core/MC_IBCCore.tla | 53 - docs/spec/tla/ibc-core/README.md | 221 -- docs/spec/tla/packet-delay/Chain.tla | 159 -- docs/spec/tla/packet-delay/IBCPacketDelay.cfg | 12 - docs/spec/tla/packet-delay/IBCPacketDelay.tla | 332 --- .../IBCPacketDelayDefinitions.tla | 419 --- .../tla/packet-delay/ICS04PacketHandlers.tla | 387 --- .../tla/packet-delay/MC_IBCPacketDelay.tla | 30 - docs/spec/tla/packet-delay/README.md | 121 - e2e/e2e/__init__.py | 0 e2e/e2e/channel.py | 638 ----- e2e/e2e/client.py | 132 - e2e/e2e/cmd.py | 106 - e2e/e2e/common.py | 44 - e2e/e2e/connection.py | 346 --- e2e/e2e/packet.py | 328 --- e2e/e2e/relayer.py | 11 - e2e/pyrightconfig.json | 2 - e2e/run.py | 277 -- guide/.gitignore | 1 - guide/README.md | 75 - guide/book.toml | 19 - guide/mermaid-init.js | 16 - guide/mermaid.min.js | 32 - guide/src/CNAME | 1 - guide/src/SUMMARY.md | 60 - guide/src/commands/config.md | 42 - guide/src/commands/global.md | 113 - guide/src/commands/index.md | 41 - guide/src/commands/keys/index.md | 344 --- guide/src/commands/listen/index.md | 162 -- guide/src/commands/misbehaviour/index.md | 127 - guide/src/commands/path-setup/channels.md | 486 ---- guide/src/commands/path-setup/clients.md | 152 - guide/src/commands/path-setup/connections.md | 215 -- guide/src/commands/path-setup/index.md | 43 - guide/src/commands/queries/channel.md | 413 --- guide/src/commands/queries/client.md | 306 -- guide/src/commands/queries/connection.md | 155 - guide/src/commands/queries/index.md | 35 - guide/src/commands/queries/packet.md | 290 -- guide/src/commands/queries/tx.md | 60 - guide/src/commands/raw/channel-close.md | 134 - guide/src/commands/raw/channel-open.md | 293 -- guide/src/commands/raw/client.md | 98 - guide/src/commands/raw/connection.md | 277 -- guide/src/commands/raw/index.md | 85 - guide/src/commands/raw/packet.md | 272 -- guide/src/commands/relaying/clear.md | 442 --- guide/src/commands/relaying/handshakes.md | 59 - guide/src/commands/relaying/index.md | 20 - guide/src/commands/relaying/packets.md | 64 - guide/src/commands/upgrade/index.md | 21 - guide/src/commands/upgrade/test.md | 232 -- guide/src/config.md | 183 -- guide/src/example-config.md | 7 - guide/src/features.md | 62 - guide/src/features/matrix.md | 65 - guide/src/getting_started.md | 13 - guide/src/glossary.md | 14 - guide/src/help.md | 586 ---- guide/src/images/grafana.png | Bin 350364 -> 0 bytes guide/src/index.md | 53 - guide/src/installation.md | 223 -- guide/src/pre_requisites.md | 43 - guide/src/relayer.md | 25 - guide/src/rest-api.md | 177 -- guide/src/telemetry.md | 190 -- guide/src/tutorials/index.md | 9 - guide/src/tutorials/local-chains/gaia.md | 42 - .../src/tutorials/local-chains/identifiers.md | 130 - guide/src/tutorials/local-chains/index.md | 7 - .../src/tutorials/local-chains/raw/channel.md | 46 - .../src/tutorials/local-chains/raw/client.md | 117 - .../tutorials/local-chains/raw/connection.md | 50 - guide/src/tutorials/local-chains/raw/index.md | 12 - .../src/tutorials/local-chains/raw/packet.md | 92 - .../relay-paths/create-new-path.md | 65 - .../local-chains/relay-paths/index.md | 12 - .../relay-paths/multiple-paths.md | 328 --- guide/src/tutorials/local-chains/start.md | 222 -- guide/theme/css/chrome.css | 495 ---- guide/theme/css/variables.css | 257 -- relayer-cli/.gitignore | 5 - relayer-cli/Cargo.toml | 90 - relayer-cli/README.md | 44 - relayer-cli/build.rs | 81 - relayer-cli/src/application.rs | 190 -- relayer-cli/src/bin/hermes/main.rs | 36 - relayer-cli/src/cli_utils.rs | 91 - relayer-cli/src/commands.rs | 169 -- relayer-cli/src/commands/clear.rs | 91 - relayer-cli/src/commands/completions.rs | 20 - relayer-cli/src/commands/config.rs | 13 - relayer-cli/src/commands/config/validate.rs | 49 - relayer-cli/src/commands/create.rs | 25 - relayer-cli/src/commands/create/channel.rs | 238 -- relayer-cli/src/commands/create/connection.rs | 166 -- relayer-cli/src/commands/health.rs | 41 - relayer-cli/src/commands/keys.rs | 24 - relayer-cli/src/commands/keys/add.rs | 181 -- relayer-cli/src/commands/keys/balance.rs | 66 - relayer-cli/src/commands/keys/delete.rs | 108 - relayer-cli/src/commands/keys/list.rs | 70 - relayer-cli/src/commands/listen.rs | 161 -- relayer-cli/src/commands/misbehaviour.rs | 132 - relayer-cli/src/commands/query.rs | 89 - relayer-cli/src/commands/query/channel.rs | 61 - .../src/commands/query/channel_client.rs | 44 - .../src/commands/query/channel_ends.rs | 220 -- relayer-cli/src/commands/query/channels.rs | 270 -- relayer-cli/src/commands/query/client.rs | 252 -- relayer-cli/src/commands/query/clients.rs | 103 - relayer-cli/src/commands/query/connection.rs | 108 - relayer-cli/src/commands/query/connections.rs | 44 - relayer-cli/src/commands/query/packet.rs | 34 - relayer-cli/src/commands/query/packet/ack.rs | 68 - relayer-cli/src/commands/query/packet/acks.rs | 63 - .../src/commands/query/packet/commitment.rs | 78 - .../src/commands/query/packet/commitments.rs | 58 - .../src/commands/query/packet/pending.rs | 95 - .../commands/query/packet/unreceived_acks.rs | 63 - .../query/packet/unreceived_packets.rs | 63 - relayer-cli/src/commands/query/tx.rs | 13 - relayer-cli/src/commands/query/tx/events.rs | 52 - relayer-cli/src/commands/start.rs | 190 -- relayer-cli/src/commands/tx.rs | 98 - relayer-cli/src/commands/tx/channel.rs | 471 --- relayer-cli/src/commands/tx/client.rs | 394 --- relayer-cli/src/commands/tx/connection.rs | 240 -- relayer-cli/src/commands/tx/packet.rs | 98 - relayer-cli/src/commands/tx/transfer.rs | 255 -- relayer-cli/src/commands/tx/upgrade.rs | 133 - relayer-cli/src/commands/update.rs | 12 - relayer-cli/src/commands/upgrade.rs | 15 - relayer-cli/src/commands/version.rs | 21 - relayer-cli/src/components.rs | 110 - relayer-cli/src/conclude.rs | 268 -- relayer-cli/src/config.rs | 170 -- relayer-cli/src/entry.rs | 72 - relayer-cli/src/error.rs | 104 - relayer-cli/src/lib.rs | 39 - relayer-cli/src/prelude.rs | 9 - relayer-cli/tests/acceptance.rs | 59 - relayer-cli/tests/fixtures/two_chains.toml | 55 - relayer-rest/Cargo.toml | 28 - relayer-rest/README.md | 42 - relayer-rest/src/config.rs | 24 - relayer-rest/src/handle.rs | 87 - relayer-rest/src/lib.rs | 9 - relayer-rest/src/server.rs | 96 - relayer-rest/tests/mock.rs | 139 - relayer/Cargo.toml | 108 - relayer/README.md | 40 - relayer/src/account.rs | 12 - relayer/src/cache.rs | 183 -- relayer/src/chain.rs | 76 - relayer/src/chain/client.rs | 33 - relayer/src/chain/cosmos.rs | 1749 ------------ relayer/src/chain/cosmos/batch.rs | 151 - relayer/src/chain/cosmos/client.rs | 58 - relayer/src/chain/cosmos/compatibility.rs | 87 - relayer/src/chain/cosmos/encode.rs | 179 -- relayer/src/chain/cosmos/estimate.rs | 158 - relayer/src/chain/cosmos/gas.rs | 74 - relayer/src/chain/cosmos/query.rs | 157 - relayer/src/chain/cosmos/query/account.rs | 83 - relayer/src/chain/cosmos/query/balance.rs | 37 - relayer/src/chain/cosmos/query/status.rs | 40 - relayer/src/chain/cosmos/query/tx.rs | 250 -- relayer/src/chain/cosmos/retry.rs | 184 -- relayer/src/chain/cosmos/simulate.rs | 33 - relayer/src/chain/cosmos/tx.rs | 53 - relayer/src/chain/cosmos/types/account.rs | 70 - relayer/src/chain/cosmos/types/config.rs | 44 - relayer/src/chain/cosmos/types/gas.rs | 80 - relayer/src/chain/cosmos/types/mod.rs | 4 - relayer/src/chain/cosmos/types/tx.rs | 18 - relayer/src/chain/cosmos/version.rs | 212 -- relayer/src/chain/cosmos/wait.rs | 114 - relayer/src/chain/counterparty.rs | 614 ---- relayer/src/chain/endpoint.rs | 578 ---- relayer/src/chain/handle.rs | 632 ---- relayer/src/chain/handle/base.rs | 484 ---- relayer/src/chain/handle/cache.rs | 484 ---- relayer/src/chain/handle/counting.rs | 468 --- relayer/src/chain/mock.rs | 484 ---- relayer/src/chain/requests.rs | 335 --- relayer/src/chain/runtime.rs | 897 ------ relayer/src/chain/tracking.rs | 97 - relayer/src/channel.rs | 1439 ---------- relayer/src/channel/error.rs | 209 -- relayer/src/channel/version.rs | 20 - relayer/src/config.rs | 431 --- relayer/src/config/error.rs | 17 - relayer/src/config/filter.rs | 501 ---- relayer/src/config/proof_specs.rs | 35 - relayer/src/config/types.rs | 322 --- relayer/src/connection.rs | 1332 --------- relayer/src/connection/error.rs | 199 -- relayer/src/error.rs | 668 ----- relayer/src/event.rs | 3 - relayer/src/event/bus.rs | 120 - relayer/src/event/monitor.rs | 487 ---- relayer/src/event/monitor/error.rs | 62 - relayer/src/event/rpc.rs | 255 -- relayer/src/foreign_client.rs | 1865 ------------ relayer/src/keyring.rs | 499 ---- relayer/src/keyring/errors.rs | 116 - relayer/src/keyring/pub_key.rs | 96 - relayer/src/lib.rs | 47 - relayer/src/light_client.rs | 54 - relayer/src/light_client/mock.rs | 86 - relayer/src/light_client/tendermint.rs | 296 -- relayer/src/link.rs | 162 -- relayer/src/link/cli.rs | 235 -- relayer/src/link/error.rs | 164 -- relayer/src/link/operational_data.rs | 401 --- relayer/src/link/packet_events.rs | 135 - relayer/src/link/pending.rs | 290 -- relayer/src/link/relay_path.rs | 1717 ----------- relayer/src/link/relay_sender.rs | 97 - relayer/src/link/relay_summary.rs | 38 - relayer/src/link/tx_hashes.rs | 28 - relayer/src/macros.rs | 69 - relayer/src/object.rs | 485 ---- relayer/src/path.rs | 32 - relayer/src/registry.rs | 122 - relayer/src/rest.rs | 94 - relayer/src/rest/error.rs | 54 - relayer/src/rest/request.rs | 39 - relayer/src/sdk_error.rs | 200 -- relayer/src/spawn.rs | 69 - relayer/src/supervisor.rs | 732 ----- relayer/src/supervisor/client_state_filter.rs | 410 --- relayer/src/supervisor/cmd.rs | 8 - relayer/src/supervisor/dump_state.rs | 77 - relayer/src/supervisor/error.rs | 79 - relayer/src/supervisor/scan.rs | 816 ------ relayer/src/supervisor/spawn.rs | 325 --- relayer/src/telemetry.rs | 47 - relayer/src/transfer.rs | 203 -- relayer/src/upgrade_chain.rs | 176 -- relayer/src/util.rs | 10 - relayer/src/util/block_on.rs | 12 - relayer/src/util/diff.rs | 98 - relayer/src/util/iter.rs | 20 - relayer/src/util/lock.rs | 36 - relayer/src/util/queue.rs | 71 - relayer/src/util/retry.rs | 179 -- relayer/src/util/serde.rs | 0 relayer/src/util/stream.rs | 177 -- relayer/src/util/task.rs | 196 -- relayer/src/worker.rs | 165 -- relayer/src/worker/channel.rs | 82 - relayer/src/worker/client.rs | 135 - relayer/src/worker/cmd.rs | 36 - relayer/src/worker/connection.rs | 85 - relayer/src/worker/error.rs | 35 - relayer/src/worker/handle.rs | 160 -- relayer/src/worker/map.rs | 241 -- relayer/src/worker/packet.rs | 268 -- relayer/src/worker/retry_strategy.rs | 40 - relayer/src/worker/wallet.rs | 42 - .../config/fixtures/relayer_conf_example.toml | 61 - scripts/dev-env | 41 - scripts/gm/CHANGELOG.md | 133 - scripts/gm/README.md | 326 --- scripts/gm/bin/gm | 272 -- scripts/gm/bin/lib-gm | 1115 -------- scripts/gm/bin/shell-support | 113 - scripts/gm/gm.toml | 92 - scripts/init-hermes | 76 - scripts/one-chain | 159 -- scripts/release.sh | 78 - scripts/setup-chains | 92 - scripts/sync-protobuf.sh | 121 - telemetry/Cargo.toml | 26 - telemetry/README.md | 42 - telemetry/src/lib.rs | 44 - telemetry/src/server.rs | 47 - telemetry/src/state.rs | 393 --- tools/integration-test/Cargo.toml | 44 - tools/integration-test/spec/.gitignore | 3 - tools/integration-test/spec/MC_Transfer.cfg | 2 - tools/integration-test/spec/MC_Transfer.tla | 49 - tools/integration-test/spec/README.md | 23 - tools/integration-test/spec/Transfer.tla | 398 --- .../spec/Transfer_typedefs.tla | 45 - .../src/bin/test_setup_with_binary_channel.rs | 82 - .../bin/test_setup_with_ternary_channel.rs | 84 - tools/integration-test/src/lib.rs | 7 - tools/integration-test/src/mbt/README.md | 7 - tools/integration-test/src/mbt/handlers.rs | 296 -- tools/integration-test/src/mbt/itf.rs | 132 - tools/integration-test/src/mbt/mod.rs | 8 - tools/integration-test/src/mbt/state.rs | 85 - tools/integration-test/src/mbt/transfer.rs | 365 --- tools/integration-test/src/mbt/utils.rs | 136 - .../src/tests/clear_packet.rs | 181 -- .../src/tests/client_expiration.rs | 482 ---- .../src/tests/client_settings.rs | 121 - .../src/tests/connection_delay.rs | 106 - tools/integration-test/src/tests/example.rs | 136 - .../src/tests/execute_schedule.rs | 92 - tools/integration-test/src/tests/ica.rs | 266 -- .../integration-test/src/tests/manual/mod.rs | 14 - .../src/tests/manual/simulation.rs | 98 - tools/integration-test/src/tests/memo.rs | 90 - tools/integration-test/src/tests/mod.rs | 30 - .../src/tests/ordered_channel.rs | 109 - tools/integration-test/src/tests/python.rs | 71 - .../src/tests/query_packet.rs | 144 - .../integration-test/src/tests/supervisor.rs | 149 - .../src/tests/ternary_transfer.rs | 208 -- tools/integration-test/src/tests/transfer.rs | 155 - tools/test-framework/.gitignore | 1 - tools/test-framework/Cargo.toml | 43 - tools/test-framework/README.md | 27 - .../src/bootstrap/binary/chain.rs | 301 -- .../src/bootstrap/binary/channel.rs | 231 -- .../src/bootstrap/binary/connection.rs | 152 - .../src/bootstrap/binary/mod.rs | 7 - tools/test-framework/src/bootstrap/init.rs | 84 - tools/test-framework/src/bootstrap/mod.rs | 17 - .../src/bootstrap/nary/chain.rs | 119 - .../src/bootstrap/nary/channel.rs | 149 - .../src/bootstrap/nary/connection.rs | 75 - .../test-framework/src/bootstrap/nary/mod.rs | 13 - tools/test-framework/src/bootstrap/single.rs | 155 - tools/test-framework/src/chain/builder.rs | 114 - tools/test-framework/src/chain/config.rs | 103 - tools/test-framework/src/chain/driver.rs | 514 ---- .../src/chain/driver/interchain.rs | 140 - .../src/chain/driver/query_txs.rs | 58 - .../src/chain/driver/transfer.rs | 37 - tools/test-framework/src/chain/exec.rs | 51 - tools/test-framework/src/chain/mod.rs | 22 - tools/test-framework/src/chain/tagged.rs | 239 -- tools/test-framework/src/chain/version.rs | 29 - tools/test-framework/src/error.rs | 129 - tools/test-framework/src/framework/base.rs | 108 - .../src/framework/binary/chain.rs | 310 -- .../src/framework/binary/channel.rs | 309 -- .../src/framework/binary/connection.rs | 258 -- .../src/framework/binary/mod.rs | 8 - .../src/framework/binary/node.rs | 181 -- tools/test-framework/src/framework/mod.rs | 39 - .../src/framework/nary/chain.rs | 247 -- .../src/framework/nary/channel.rs | 257 -- .../src/framework/nary/connection.rs | 189 -- .../test-framework/src/framework/nary/mod.rs | 8 - .../test-framework/src/framework/nary/node.rs | 86 - .../test-framework/src/framework/overrides.rs | 233 -- .../src/framework/supervisor.rs | 58 - tools/test-framework/src/ibc/denom.rs | 129 - tools/test-framework/src/ibc/mod.rs | 6 - tools/test-framework/src/lib.rs | 17 - tools/test-framework/src/prelude.rs | 69 - tools/test-framework/src/relayer/chain.rs | 404 --- tools/test-framework/src/relayer/channel.rs | 151 - .../test-framework/src/relayer/connection.rs | 161 -- tools/test-framework/src/relayer/driver.rs | 89 - .../src/relayer/foreign_client.rs | 47 - tools/test-framework/src/relayer/mod.rs | 28 - tools/test-framework/src/relayer/refresh.rs | 19 - tools/test-framework/src/relayer/transfer.rs | 92 - tools/test-framework/src/relayer/tx.rs | 129 - .../test-framework/src/types/binary/chains.rs | 175 -- .../src/types/binary/channel.rs | 98 - .../test-framework/src/types/binary/client.rs | 51 - .../src/types/binary/connection.rs | 95 - .../src/types/binary/foreign_client.rs | 62 - tools/test-framework/src/types/binary/mod.rs | 9 - tools/test-framework/src/types/config.rs | 61 - tools/test-framework/src/types/env.rs | 118 - tools/test-framework/src/types/id.rs | 76 - tools/test-framework/src/types/mod.rs | 19 - .../test-framework/src/types/nary/aliases.rs | 34 - tools/test-framework/src/types/nary/chains.rs | 255 -- .../test-framework/src/types/nary/channel.rs | 158 - .../src/types/nary/connection.rs | 135 - .../src/types/nary/foreign_client.rs | 88 - tools/test-framework/src/types/nary/mod.rs | 66 - tools/test-framework/src/types/process.rs | 52 - tools/test-framework/src/types/single/mod.rs | 5 - tools/test-framework/src/types/single/node.rs | 178 -- tools/test-framework/src/types/tagged/dual.rs | 470 --- tools/test-framework/src/types/tagged/mod.rs | 130 - tools/test-framework/src/types/tagged/mono.rs | 410 --- tools/test-framework/src/types/wallet.rs | 210 -- tools/test-framework/src/util/array.rs | 87 - tools/test-framework/src/util/assert.rs | 36 - tools/test-framework/src/util/file.rs | 32 - tools/test-framework/src/util/mod.rs | 10 - tools/test-framework/src/util/random.rs | 48 - tools/test-framework/src/util/retry.rs | 38 - tools/test-framework/src/util/suspend.rs | 49 - 718 files changed, 291 insertions(+), 93517 deletions(-) delete mode 100644 .github/CODEOWNERS delete mode 100644 .github/ISSUE_TEMPLATE/bug-report.md delete mode 100644 .github/ISSUE_TEMPLATE/feature-request.md delete mode 100644 .github/ISSUE_TEMPLATE/release-template.md delete mode 100644 .github/ISSUE_TEMPLATE/rust-update.md delete mode 100644 .github/PULL_REQUEST_TEMPLATE.md delete mode 100644 .github/actions-rs/grcov.yml delete mode 100644 .github/dependabot.yml delete mode 100644 .github/markdown-link-check.json delete mode 100644 .github/workflows/audit.yaml delete mode 100644 .github/workflows/cargo-doc.yaml delete mode 100644 .github/workflows/e2e-gaia-current-release.yaml delete mode 100644 .github/workflows/e2e-gaia-legacy-release.yaml delete mode 100644 .github/workflows/guide.yml delete mode 100644 .github/workflows/integration.yaml delete mode 100644 .github/workflows/markdown-link-check.yml delete mode 100644 .github/workflows/no-std.yaml delete mode 100644 .github/workflows/release.yml delete mode 100644 .github/workflows/rust.yml delete mode 100644 .github/workflows/scripts.yaml delete mode 100644 .github/workflows/specs.yml delete mode 100644 ci/.gitignore delete mode 100644 ci/README.md delete mode 100755 ci/bootstrap_gaia.sh delete mode 100755 ci/build-chains.sh delete mode 100755 ci/build-ibc-chains.sh delete mode 100755 ci/build-simd.sh delete mode 100644 ci/chain_a.Dockerfile delete mode 100644 ci/chain_b.Dockerfile delete mode 100644 ci/chains/gaia/v3.0.0/ibc-0/config/app.toml delete mode 100644 ci/chains/gaia/v3.0.0/ibc-0/config/config.toml delete mode 100644 ci/chains/gaia/v3.0.0/ibc-0/config/genesis.json delete mode 100644 ci/chains/gaia/v3.0.0/ibc-0/config/gentx/gentx-d2c710169cd596180da34bc6dde5d28fd663e48c.json delete mode 100644 ci/chains/gaia/v3.0.0/ibc-0/config/node_key.json delete mode 100644 ci/chains/gaia/v3.0.0/ibc-0/config/priv_validator_key.json delete mode 100644 ci/chains/gaia/v3.0.0/ibc-0/key_seed.json delete mode 100644 ci/chains/gaia/v3.0.0/ibc-0/keyring-test/41cf0c86e250f44deba09213d4fed3bdc32a8f39.address delete mode 100644 ci/chains/gaia/v3.0.0/ibc-0/keyring-test/eae7f54aae575a3f183d3db23c2463a5128adb09.address delete mode 100644 ci/chains/gaia/v3.0.0/ibc-0/keyring-test/user.info delete mode 100644 ci/chains/gaia/v3.0.0/ibc-0/keyring-test/validator.info delete mode 100644 ci/chains/gaia/v3.0.0/ibc-0/validator_seed.json delete mode 100644 ci/chains/gaia/v3.0.0/ibc-1/config/app.toml delete mode 100644 ci/chains/gaia/v3.0.0/ibc-1/config/config.toml delete mode 100644 ci/chains/gaia/v3.0.0/ibc-1/config/genesis.json delete mode 100644 ci/chains/gaia/v3.0.0/ibc-1/config/gentx/gentx-c15eddb601e66c008c241ccfc80f7cf3358cfa7f.json delete mode 100644 ci/chains/gaia/v3.0.0/ibc-1/config/node_key.json delete mode 100644 ci/chains/gaia/v3.0.0/ibc-1/config/priv_validator_key.json delete mode 100644 ci/chains/gaia/v3.0.0/ibc-1/key_seed.json delete mode 100644 ci/chains/gaia/v3.0.0/ibc-1/keyring-test/21e1daee21668b3fa47d372ea12148bb39d5392d.address delete mode 100644 ci/chains/gaia/v3.0.0/ibc-1/keyring-test/2508cc4982972cd1da252c4dfe6d6fa5747d9163.address delete mode 100644 ci/chains/gaia/v3.0.0/ibc-1/keyring-test/user.info delete mode 100644 ci/chains/gaia/v3.0.0/ibc-1/keyring-test/validator.info delete mode 100644 ci/chains/gaia/v3.0.0/ibc-1/validator_seed.json delete mode 100644 ci/chains/gaia/v4.0.0/ibc-0/config/app.toml delete mode 100644 ci/chains/gaia/v4.0.0/ibc-0/config/config.toml delete mode 100644 ci/chains/gaia/v4.0.0/ibc-0/config/genesis.json delete mode 100644 ci/chains/gaia/v4.0.0/ibc-0/config/gentx/gentx-f3e4bdc90fb01a1167636bb40362967569fa150f.json delete mode 100644 ci/chains/gaia/v4.0.0/ibc-0/config/node_key.json delete mode 100644 ci/chains/gaia/v4.0.0/ibc-0/config/priv_validator_key.json delete mode 100644 ci/chains/gaia/v4.0.0/ibc-0/key_seed.json delete mode 100644 ci/chains/gaia/v4.0.0/ibc-0/keyring-test/a268eea596b84a4c080ea50852e813c47ce3bb71.address delete mode 100644 ci/chains/gaia/v4.0.0/ibc-0/keyring-test/cc9f853dea50a4530fcf574e45c9db41b5d21352.address delete mode 100644 ci/chains/gaia/v4.0.0/ibc-0/keyring-test/user.info delete mode 100644 ci/chains/gaia/v4.0.0/ibc-0/keyring-test/validator.info delete mode 100644 ci/chains/gaia/v4.0.0/ibc-0/validator_seed.json delete mode 100644 ci/chains/gaia/v4.0.0/ibc-1/config/app.toml delete mode 100644 ci/chains/gaia/v4.0.0/ibc-1/config/config.toml delete mode 100644 ci/chains/gaia/v4.0.0/ibc-1/config/genesis.json delete mode 100644 ci/chains/gaia/v4.0.0/ibc-1/config/gentx/gentx-b6f08e59e6071a9ddfb7f66d08723b1bef442f71.json delete mode 100644 ci/chains/gaia/v4.0.0/ibc-1/config/node_key.json delete mode 100644 ci/chains/gaia/v4.0.0/ibc-1/config/priv_validator_key.json delete mode 100644 ci/chains/gaia/v4.0.0/ibc-1/key_seed.json delete mode 100644 ci/chains/gaia/v4.0.0/ibc-1/keyring-test/638d8d088bdfb2c0751757ad9ca7ed02e619ac76.address delete mode 100644 ci/chains/gaia/v4.0.0/ibc-1/keyring-test/fd3b601d0c67b11a7e38da39133259d6fcd4f969.address delete mode 100644 ci/chains/gaia/v4.0.0/ibc-1/keyring-test/user.info delete mode 100644 ci/chains/gaia/v4.0.0/ibc-1/keyring-test/validator.info delete mode 100644 ci/chains/gaia/v4.0.0/ibc-1/validator_seed.json delete mode 100644 ci/chains/gaia/v4.1.0/ibc-0/config/addrbook.json delete mode 100644 ci/chains/gaia/v4.1.0/ibc-0/config/app.toml delete mode 100644 ci/chains/gaia/v4.1.0/ibc-0/config/config.toml delete mode 100644 ci/chains/gaia/v4.1.0/ibc-0/config/genesis.json delete mode 100644 ci/chains/gaia/v4.1.0/ibc-0/config/gentx/gentx-45cc9bede9a7d77955b8770b47267069f9d910b6.json delete mode 100644 ci/chains/gaia/v4.1.0/ibc-0/config/node_key.json delete mode 100644 ci/chains/gaia/v4.1.0/ibc-0/config/priv_validator_key.json delete mode 100644 ci/chains/gaia/v4.1.0/ibc-0/key_seed.json delete mode 100644 ci/chains/gaia/v4.1.0/ibc-0/keyring-test/2a2c73f7f3ed82ac613f8458685c38f800ca221e.address delete mode 100644 ci/chains/gaia/v4.1.0/ibc-0/keyring-test/77f61459b63c38c20d493d9ef1a6e35889d3576f.address delete mode 100644 ci/chains/gaia/v4.1.0/ibc-0/keyring-test/user.info delete mode 100644 ci/chains/gaia/v4.1.0/ibc-0/keyring-test/validator.info delete mode 100644 ci/chains/gaia/v4.1.0/ibc-0/validator_seed.json delete mode 100644 ci/chains/gaia/v4.1.0/ibc-1/config/app.toml delete mode 100644 ci/chains/gaia/v4.1.0/ibc-1/config/config.toml delete mode 100644 ci/chains/gaia/v4.1.0/ibc-1/config/genesis.json delete mode 100644 ci/chains/gaia/v4.1.0/ibc-1/config/gentx/gentx-b56974b6f467580c92b7a0fe752797eca18915d0.json delete mode 100644 ci/chains/gaia/v4.1.0/ibc-1/config/node_key.json delete mode 100644 ci/chains/gaia/v4.1.0/ibc-1/config/priv_validator_key.json delete mode 100644 ci/chains/gaia/v4.1.0/ibc-1/key_seed.json delete mode 100644 ci/chains/gaia/v4.1.0/ibc-1/keyring-test/ad4e2f3a32a22175d13d794ae1340b116ecf0c9c.address delete mode 100644 ci/chains/gaia/v4.1.0/ibc-1/keyring-test/ff881d07c8ddd9893a68be98c361ca81ffa7ded9.address delete mode 100644 ci/chains/gaia/v4.1.0/ibc-1/keyring-test/user.info delete mode 100644 ci/chains/gaia/v4.1.0/ibc-1/keyring-test/validator.info delete mode 100644 ci/chains/gaia/v4.1.0/ibc-1/validator_seed.json delete mode 100644 ci/chains/gaia/v4.2.0/ibc-0/config/addrbook.json delete mode 100644 ci/chains/gaia/v4.2.0/ibc-0/config/app.toml delete mode 100644 ci/chains/gaia/v4.2.0/ibc-0/config/config.toml delete mode 100644 ci/chains/gaia/v4.2.0/ibc-0/config/genesis.json delete mode 100644 ci/chains/gaia/v4.2.0/ibc-0/config/gentx/gentx-240c6c414bfb15dab6e170c7b1781de818736374.json delete mode 100644 ci/chains/gaia/v4.2.0/ibc-0/config/node_key.json delete mode 100644 ci/chains/gaia/v4.2.0/ibc-0/config/priv_validator_key.json delete mode 100644 ci/chains/gaia/v4.2.0/ibc-0/keyring-test/0a112e9ffa1e036981c14124485bdd543654b343.address delete mode 100644 ci/chains/gaia/v4.2.0/ibc-0/keyring-test/8c1145fd1c8fa0119e95466d9fa27320bfcc0c1e.address delete mode 100644 ci/chains/gaia/v4.2.0/ibc-0/keyring-test/f5580fd9ee89a00b0907ecf985c4febfc1672d3d.address delete mode 100644 ci/chains/gaia/v4.2.0/ibc-0/keyring-test/user.info delete mode 100644 ci/chains/gaia/v4.2.0/ibc-0/keyring-test/user2.info delete mode 100644 ci/chains/gaia/v4.2.0/ibc-0/keyring-test/validator.info delete mode 100644 ci/chains/gaia/v4.2.0/ibc-0/user2_seed.json delete mode 100644 ci/chains/gaia/v4.2.0/ibc-0/user_seed.json delete mode 100644 ci/chains/gaia/v4.2.0/ibc-0/validator_seed.json delete mode 100644 ci/chains/gaia/v4.2.0/ibc-1/config/app.toml delete mode 100644 ci/chains/gaia/v4.2.0/ibc-1/config/config.toml delete mode 100644 ci/chains/gaia/v4.2.0/ibc-1/config/genesis.json delete mode 100644 ci/chains/gaia/v4.2.0/ibc-1/config/gentx/gentx-69dd5b43951124b2e6e2d744328041a907f3968c.json delete mode 100644 ci/chains/gaia/v4.2.0/ibc-1/config/node_key.json delete mode 100644 ci/chains/gaia/v4.2.0/ibc-1/config/priv_validator_key.json delete mode 100644 ci/chains/gaia/v4.2.0/ibc-1/keyring-test/4b137452f253ce8ee2ced8317a8b6e5a0aaae76a.address delete mode 100644 ci/chains/gaia/v4.2.0/ibc-1/keyring-test/779c0556fb667f52d759b741f7b29a99c7f0f185.address delete mode 100644 ci/chains/gaia/v4.2.0/ibc-1/keyring-test/a8319df11aceb1a5cdc3aab2e4cfc9bcb8f7838c.address delete mode 100644 ci/chains/gaia/v4.2.0/ibc-1/keyring-test/user.info delete mode 100644 ci/chains/gaia/v4.2.0/ibc-1/keyring-test/user2.info delete mode 100644 ci/chains/gaia/v4.2.0/ibc-1/keyring-test/validator.info delete mode 100644 ci/chains/gaia/v4.2.0/ibc-1/user2_seed.json delete mode 100644 ci/chains/gaia/v4.2.0/ibc-1/user_seed.json delete mode 100644 ci/chains/gaia/v4.2.0/ibc-1/validator_seed.json delete mode 100644 ci/chains/gaia/v5.0.5/ibc-0/config/addrbook.json delete mode 100644 ci/chains/gaia/v5.0.5/ibc-0/config/app.toml delete mode 100644 ci/chains/gaia/v5.0.5/ibc-0/config/client.toml delete mode 100644 ci/chains/gaia/v5.0.5/ibc-0/config/config.toml delete mode 100644 ci/chains/gaia/v5.0.5/ibc-0/config/genesis.json delete mode 100644 ci/chains/gaia/v5.0.5/ibc-0/config/gentx/gentx-0a67e76f6743997bde26b522ef43e886b1baa95d.json delete mode 100644 ci/chains/gaia/v5.0.5/ibc-0/config/node_key.json delete mode 100644 ci/chains/gaia/v5.0.5/ibc-0/config/priv_validator_key.json delete mode 100644 ci/chains/gaia/v5.0.5/ibc-0/keyring-test/3dccd2a276b243a6f235f16430bb45828989382d.address delete mode 100644 ci/chains/gaia/v5.0.5/ibc-0/keyring-test/4da888907bd0f30dab598252aea96a3ae97b97ee.address delete mode 100644 ci/chains/gaia/v5.0.5/ibc-0/keyring-test/d96ea1dad97dd40751c1d8fcd577d3eccc83847d.address delete mode 100644 ci/chains/gaia/v5.0.5/ibc-0/keyring-test/user.info delete mode 100644 ci/chains/gaia/v5.0.5/ibc-0/keyring-test/user2.info delete mode 100644 ci/chains/gaia/v5.0.5/ibc-0/keyring-test/validator.info delete mode 100644 ci/chains/gaia/v5.0.5/ibc-0/user2_seed.json delete mode 100644 ci/chains/gaia/v5.0.5/ibc-0/user_seed.json delete mode 100644 ci/chains/gaia/v5.0.5/ibc-0/validator_seed.json delete mode 100644 ci/chains/gaia/v5.0.5/ibc-1/config/app.toml delete mode 100644 ci/chains/gaia/v5.0.5/ibc-1/config/client.toml delete mode 100644 ci/chains/gaia/v5.0.5/ibc-1/config/config.toml delete mode 100644 ci/chains/gaia/v5.0.5/ibc-1/config/genesis.json delete mode 100644 ci/chains/gaia/v5.0.5/ibc-1/config/gentx/gentx-1feb1ad566fa381f6e6b4afd639559e0505fd9d0.json delete mode 100644 ci/chains/gaia/v5.0.5/ibc-1/config/node_key.json delete mode 100644 ci/chains/gaia/v5.0.5/ibc-1/config/priv_validator_key.json delete mode 100644 ci/chains/gaia/v5.0.5/ibc-1/keyring-test/340bb9c429320d582a78e3c9997dedb9fe307082.address delete mode 100644 ci/chains/gaia/v5.0.5/ibc-1/keyring-test/408f79a6bfd105eb477b2433c8459a2aea93c34f.address delete mode 100644 ci/chains/gaia/v5.0.5/ibc-1/keyring-test/faebec57446ff6086960a12ad95a6d31787df226.address delete mode 100644 ci/chains/gaia/v5.0.5/ibc-1/keyring-test/user.info delete mode 100644 ci/chains/gaia/v5.0.5/ibc-1/keyring-test/user2.info delete mode 100644 ci/chains/gaia/v5.0.5/ibc-1/keyring-test/validator.info delete mode 100644 ci/chains/gaia/v5.0.5/ibc-1/user2_seed.json delete mode 100644 ci/chains/gaia/v5.0.5/ibc-1/user_seed.json delete mode 100644 ci/chains/gaia/v5.0.5/ibc-1/validator_seed.json delete mode 100644 ci/chains/gaia/v5.0.8/ibc-0/config/addrbook.json delete mode 100644 ci/chains/gaia/v5.0.8/ibc-0/config/app.toml delete mode 100644 ci/chains/gaia/v5.0.8/ibc-0/config/client.toml delete mode 100644 ci/chains/gaia/v5.0.8/ibc-0/config/config.toml delete mode 100644 ci/chains/gaia/v5.0.8/ibc-0/config/genesis.json delete mode 100644 ci/chains/gaia/v5.0.8/ibc-0/config/gentx/gentx-e644f6ada6dc23fa6927484cca6e00183dc3fe0b.json delete mode 100644 ci/chains/gaia/v5.0.8/ibc-0/config/node_key.json delete mode 100644 ci/chains/gaia/v5.0.8/ibc-0/config/priv_validator_key.json delete mode 100644 ci/chains/gaia/v5.0.8/ibc-0/keyring-test/5a057346bb8efbb409b474b8ebdda158b91f6070.address delete mode 100644 ci/chains/gaia/v5.0.8/ibc-0/keyring-test/64fab9a69b7a15ee095fb3da8055a6445188607e.address delete mode 100644 ci/chains/gaia/v5.0.8/ibc-0/keyring-test/c99c7ba6e4891e5981366306f27b036bfb0d9b1b.address delete mode 100644 ci/chains/gaia/v5.0.8/ibc-0/keyring-test/user.info delete mode 100644 ci/chains/gaia/v5.0.8/ibc-0/keyring-test/user2.info delete mode 100644 ci/chains/gaia/v5.0.8/ibc-0/keyring-test/validator.info delete mode 100644 ci/chains/gaia/v5.0.8/ibc-0/user2_seed.json delete mode 100644 ci/chains/gaia/v5.0.8/ibc-0/user_seed.json delete mode 100644 ci/chains/gaia/v5.0.8/ibc-0/validator_seed.json delete mode 100644 ci/chains/gaia/v5.0.8/ibc-1/config/app.toml delete mode 100644 ci/chains/gaia/v5.0.8/ibc-1/config/client.toml delete mode 100644 ci/chains/gaia/v5.0.8/ibc-1/config/config.toml delete mode 100644 ci/chains/gaia/v5.0.8/ibc-1/config/genesis.json delete mode 100644 ci/chains/gaia/v5.0.8/ibc-1/config/gentx/gentx-b2617c1100350fcb281cc6749f1f79956222be5a.json delete mode 100644 ci/chains/gaia/v5.0.8/ibc-1/config/node_key.json delete mode 100644 ci/chains/gaia/v5.0.8/ibc-1/config/priv_validator_key.json delete mode 100644 ci/chains/gaia/v5.0.8/ibc-1/keyring-test/3744541425eebf800a64ed9b19e280e1e981685c.address delete mode 100644 ci/chains/gaia/v5.0.8/ibc-1/keyring-test/9680756d779d88bdb47a8f0b5522b135812c3a10.address delete mode 100644 ci/chains/gaia/v5.0.8/ibc-1/keyring-test/c109ea463f3fe3dfdb6d98037a0c19e2b6d44ca0.address delete mode 100644 ci/chains/gaia/v5.0.8/ibc-1/keyring-test/user.info delete mode 100644 ci/chains/gaia/v5.0.8/ibc-1/keyring-test/user2.info delete mode 100644 ci/chains/gaia/v5.0.8/ibc-1/keyring-test/validator.info delete mode 100644 ci/chains/gaia/v5.0.8/ibc-1/user2_seed.json delete mode 100644 ci/chains/gaia/v5.0.8/ibc-1/user_seed.json delete mode 100644 ci/chains/gaia/v5.0.8/ibc-1/validator_seed.json delete mode 100644 ci/chains/gaia/v6.0.0/ibc-0/config/addrbook.json delete mode 100644 ci/chains/gaia/v6.0.0/ibc-0/config/app.toml delete mode 100644 ci/chains/gaia/v6.0.0/ibc-0/config/client.toml delete mode 100644 ci/chains/gaia/v6.0.0/ibc-0/config/config.toml delete mode 100644 ci/chains/gaia/v6.0.0/ibc-0/config/genesis.json delete mode 100644 ci/chains/gaia/v6.0.0/ibc-0/config/gentx/gentx-f8825df2c406207c64cbc02b1f543a53a46094e3.json delete mode 100644 ci/chains/gaia/v6.0.0/ibc-0/config/node_key.json delete mode 100644 ci/chains/gaia/v6.0.0/ibc-0/config/priv_validator_key.json delete mode 100644 ci/chains/gaia/v6.0.0/ibc-0/keyring-test/1b49482294206b404476f840eb8468a09bf47962.address delete mode 100644 ci/chains/gaia/v6.0.0/ibc-0/keyring-test/6f5e095fc00ac70c0285e741272d7978f9edcd21.address delete mode 100644 ci/chains/gaia/v6.0.0/ibc-0/keyring-test/9bd0eccd58edadaadadea911cbba3de68785fc86.address delete mode 100644 ci/chains/gaia/v6.0.0/ibc-0/keyring-test/user.info delete mode 100644 ci/chains/gaia/v6.0.0/ibc-0/keyring-test/user2.info delete mode 100644 ci/chains/gaia/v6.0.0/ibc-0/keyring-test/validator.info delete mode 100644 ci/chains/gaia/v6.0.0/ibc-0/user2_seed.json delete mode 100644 ci/chains/gaia/v6.0.0/ibc-0/user_seed.json delete mode 100644 ci/chains/gaia/v6.0.0/ibc-0/validator_seed.json delete mode 100644 ci/chains/gaia/v6.0.0/ibc-1/config/app.toml delete mode 100644 ci/chains/gaia/v6.0.0/ibc-1/config/client.toml delete mode 100644 ci/chains/gaia/v6.0.0/ibc-1/config/config.toml delete mode 100644 ci/chains/gaia/v6.0.0/ibc-1/config/genesis.json delete mode 100644 ci/chains/gaia/v6.0.0/ibc-1/config/gentx/gentx-bb5010f06487a9d315952e3e57c6dadac4ac425b.json delete mode 100644 ci/chains/gaia/v6.0.0/ibc-1/config/node_key.json delete mode 100644 ci/chains/gaia/v6.0.0/ibc-1/config/priv_validator_key.json delete mode 100644 ci/chains/gaia/v6.0.0/ibc-1/keyring-test/118acf61acfae082089bc69ce6bfcf98a59ab78c.address delete mode 100644 ci/chains/gaia/v6.0.0/ibc-1/keyring-test/25725cc6039f018e7fc78971b5af151cb0b52338.address delete mode 100644 ci/chains/gaia/v6.0.0/ibc-1/keyring-test/897e3a30d6196bf49e931d458ab59b95140dee6f.address delete mode 100644 ci/chains/gaia/v6.0.0/ibc-1/keyring-test/user.info delete mode 100644 ci/chains/gaia/v6.0.0/ibc-1/keyring-test/user2.info delete mode 100644 ci/chains/gaia/v6.0.0/ibc-1/keyring-test/validator.info delete mode 100644 ci/chains/gaia/v6.0.0/ibc-1/user2_seed.json delete mode 100644 ci/chains/gaia/v6.0.0/ibc-1/user_seed.json delete mode 100644 ci/chains/gaia/v6.0.0/ibc-1/validator_seed.json delete mode 100644 ci/chains/gaia/v7.0.1/ibc-0/config/addrbook.json delete mode 100644 ci/chains/gaia/v7.0.1/ibc-0/config/app.toml delete mode 100644 ci/chains/gaia/v7.0.1/ibc-0/config/client.toml delete mode 100644 ci/chains/gaia/v7.0.1/ibc-0/config/config.toml delete mode 100644 ci/chains/gaia/v7.0.1/ibc-0/config/genesis.json delete mode 100644 ci/chains/gaia/v7.0.1/ibc-0/config/gentx/gentx-3494ad7d61f006035641dbcd204dcbdb25904c2b.json delete mode 100644 ci/chains/gaia/v7.0.1/ibc-0/config/node_key.json delete mode 100644 ci/chains/gaia/v7.0.1/ibc-0/config/priv_validator_key.json delete mode 100644 ci/chains/gaia/v7.0.1/ibc-0/keyring-test/14a8ece6445bc37c895cdfe8da8038501e667c70.address delete mode 100644 ci/chains/gaia/v7.0.1/ibc-0/keyring-test/806fffd74ec124eae9d0485340227095f6756908.address delete mode 100644 ci/chains/gaia/v7.0.1/ibc-0/keyring-test/d4e446759163a216b80128e371ccb0ae91ddeb45.address delete mode 100644 ci/chains/gaia/v7.0.1/ibc-0/keyring-test/user.info delete mode 100644 ci/chains/gaia/v7.0.1/ibc-0/keyring-test/user2.info delete mode 100644 ci/chains/gaia/v7.0.1/ibc-0/keyring-test/validator.info delete mode 100644 ci/chains/gaia/v7.0.1/ibc-0/user2_seed.json delete mode 100644 ci/chains/gaia/v7.0.1/ibc-0/user_seed.json delete mode 100644 ci/chains/gaia/v7.0.1/ibc-0/validator_seed.json delete mode 100644 ci/chains/gaia/v7.0.1/ibc-1/config/app.toml delete mode 100644 ci/chains/gaia/v7.0.1/ibc-1/config/client.toml delete mode 100644 ci/chains/gaia/v7.0.1/ibc-1/config/config.toml delete mode 100644 ci/chains/gaia/v7.0.1/ibc-1/config/genesis.json delete mode 100644 ci/chains/gaia/v7.0.1/ibc-1/config/gentx/gentx-56790e64fa1c26c79a5c03ae6432973f04e293cd.json delete mode 100644 ci/chains/gaia/v7.0.1/ibc-1/config/node_key.json delete mode 100644 ci/chains/gaia/v7.0.1/ibc-1/config/priv_validator_key.json delete mode 100644 ci/chains/gaia/v7.0.1/ibc-1/keyring-test/5221e440c5d1e0821d2e98931f332093d436e87b.address delete mode 100644 ci/chains/gaia/v7.0.1/ibc-1/keyring-test/d23ae61575324aafaca704a424074e01f2da5109.address delete mode 100644 ci/chains/gaia/v7.0.1/ibc-1/keyring-test/dc4dabdbd3b361a2038cfb43067e349a805c6c0f.address delete mode 100644 ci/chains/gaia/v7.0.1/ibc-1/keyring-test/user.info delete mode 100644 ci/chains/gaia/v7.0.1/ibc-1/keyring-test/user2.info delete mode 100644 ci/chains/gaia/v7.0.1/ibc-1/keyring-test/validator.info delete mode 100644 ci/chains/gaia/v7.0.1/ibc-1/user2_seed.json delete mode 100644 ci/chains/gaia/v7.0.1/ibc-1/user_seed.json delete mode 100644 ci/chains/gaia/v7.0.1/ibc-1/validator_seed.json delete mode 100644 ci/docker-compose-gaia-current.yml delete mode 100644 ci/docker-compose-gaia-legacy.yml delete mode 100755 ci/e2e.sh delete mode 100644 ci/gaia.Dockerfile delete mode 100644 ci/hermes.Dockerfile delete mode 100644 ci/no-std-check/.gitignore delete mode 100644 ci/no-std-check/Cargo.lock delete mode 100644 ci/no-std-check/Cargo.toml delete mode 100644 ci/no-std-check/Makefile delete mode 100644 ci/no-std-check/README.md delete mode 100644 ci/no-std-check/src/lib.rs delete mode 100644 ci/relayer.Dockerfile delete mode 100755 ci/run-gaiad.sh delete mode 100644 ci/simd.Dockerfile delete mode 100644 ci/simple_config.toml delete mode 100644 docs/spec/README.md delete mode 100644 docs/spec/connection-handshake/CHANGELOG.md delete mode 100644 docs/spec/connection-handshake/L1_2.md delete mode 100644 docs/spec/connection-handshake/L2-tla/Environment.tla delete mode 100644 docs/spec/connection-handshake/L2-tla/ICS3Module.tla delete mode 100644 docs/spec/connection-handshake/L2-tla/ICS3Types.tla delete mode 100644 docs/spec/connection-handshake/L2-tla/ICS3Utils.tla delete mode 100644 docs/spec/connection-handshake/L2-tla/README.md delete mode 100644 docs/spec/connection-handshake/README.md delete mode 100644 docs/spec/relayer/Definitions.md delete mode 100644 docs/spec/relayer/Packets.md delete mode 100644 docs/spec/relayer/Relayer.md delete mode 100644 docs/spec/tla/client/Chain.tla delete mode 100644 docs/spec/tla/client/ICS02ClientHandlers.tla delete mode 100644 docs/spec/tla/client/ICS02Definitions.tla delete mode 100644 docs/spec/tla/client/ICS02SingleChainEnvironment.tla delete mode 100644 docs/spec/tla/client/ICS02TwoChainsEnvironment.tla delete mode 100644 docs/spec/tla/client/MC_SingleChain.tla delete mode 100644 docs/spec/tla/client/MC_TwoChains.tla delete mode 100644 docs/spec/tla/client/README.md delete mode 100644 docs/spec/tla/fungible-token-transfer/Bank.tla delete mode 100644 docs/spec/tla/fungible-token-transfer/Chain.tla delete mode 100644 docs/spec/tla/fungible-token-transfer/IBCTokenTransfer.cfg delete mode 100644 docs/spec/tla/fungible-token-transfer/IBCTokenTransfer.tla delete mode 100644 docs/spec/tla/fungible-token-transfer/IBCTokenTransferDefinitions.tla delete mode 100644 docs/spec/tla/fungible-token-transfer/ICS04PacketHandlers.tla delete mode 100644 docs/spec/tla/fungible-token-transfer/ICS20FungibleTokenTransferHandlers.tla delete mode 100644 docs/spec/tla/fungible-token-transfer/MC_IBCTokenTransfer.tla delete mode 100644 docs/spec/tla/fungible-token-transfer/README.md delete mode 100644 docs/spec/tla/ibc-core/Chain.tla delete mode 100644 docs/spec/tla/ibc-core/IBCCore.cfg delete mode 100644 docs/spec/tla/ibc-core/IBCCore.tla delete mode 100644 docs/spec/tla/ibc-core/IBCCoreDefinitions.tla delete mode 100644 docs/spec/tla/ibc-core/ICS02ClientHandlers.tla delete mode 100644 docs/spec/tla/ibc-core/ICS03ConnectionHandlers.tla delete mode 100644 docs/spec/tla/ibc-core/ICS04ChannelHandlers.tla delete mode 100644 docs/spec/tla/ibc-core/ICS04PacketHandlers.tla delete mode 100644 docs/spec/tla/ibc-core/ICS18Relayer.tla delete mode 100644 docs/spec/tla/ibc-core/MC_IBCCore.tla delete mode 100644 docs/spec/tla/ibc-core/README.md delete mode 100644 docs/spec/tla/packet-delay/Chain.tla delete mode 100644 docs/spec/tla/packet-delay/IBCPacketDelay.cfg delete mode 100644 docs/spec/tla/packet-delay/IBCPacketDelay.tla delete mode 100644 docs/spec/tla/packet-delay/IBCPacketDelayDefinitions.tla delete mode 100644 docs/spec/tla/packet-delay/ICS04PacketHandlers.tla delete mode 100644 docs/spec/tla/packet-delay/MC_IBCPacketDelay.tla delete mode 100644 docs/spec/tla/packet-delay/README.md delete mode 100644 e2e/e2e/__init__.py delete mode 100644 e2e/e2e/channel.py delete mode 100644 e2e/e2e/client.py delete mode 100644 e2e/e2e/cmd.py delete mode 100644 e2e/e2e/common.py delete mode 100644 e2e/e2e/connection.py delete mode 100644 e2e/e2e/packet.py delete mode 100644 e2e/e2e/relayer.py delete mode 100644 e2e/pyrightconfig.json delete mode 100755 e2e/run.py delete mode 100644 guide/.gitignore delete mode 100644 guide/README.md delete mode 100644 guide/book.toml delete mode 100644 guide/mermaid-init.js delete mode 100644 guide/mermaid.min.js delete mode 100644 guide/src/CNAME delete mode 100644 guide/src/SUMMARY.md delete mode 100644 guide/src/commands/config.md delete mode 100644 guide/src/commands/global.md delete mode 100644 guide/src/commands/index.md delete mode 100644 guide/src/commands/keys/index.md delete mode 100644 guide/src/commands/listen/index.md delete mode 100644 guide/src/commands/misbehaviour/index.md delete mode 100644 guide/src/commands/path-setup/channels.md delete mode 100644 guide/src/commands/path-setup/clients.md delete mode 100644 guide/src/commands/path-setup/connections.md delete mode 100644 guide/src/commands/path-setup/index.md delete mode 100644 guide/src/commands/queries/channel.md delete mode 100644 guide/src/commands/queries/client.md delete mode 100644 guide/src/commands/queries/connection.md delete mode 100644 guide/src/commands/queries/index.md delete mode 100644 guide/src/commands/queries/packet.md delete mode 100644 guide/src/commands/queries/tx.md delete mode 100644 guide/src/commands/raw/channel-close.md delete mode 100644 guide/src/commands/raw/channel-open.md delete mode 100644 guide/src/commands/raw/client.md delete mode 100644 guide/src/commands/raw/connection.md delete mode 100644 guide/src/commands/raw/index.md delete mode 100644 guide/src/commands/raw/packet.md delete mode 100644 guide/src/commands/relaying/clear.md delete mode 100644 guide/src/commands/relaying/handshakes.md delete mode 100644 guide/src/commands/relaying/index.md delete mode 100644 guide/src/commands/relaying/packets.md delete mode 100644 guide/src/commands/upgrade/index.md delete mode 100644 guide/src/commands/upgrade/test.md delete mode 100644 guide/src/config.md delete mode 100644 guide/src/example-config.md delete mode 100644 guide/src/features.md delete mode 100644 guide/src/features/matrix.md delete mode 100644 guide/src/getting_started.md delete mode 100644 guide/src/glossary.md delete mode 100644 guide/src/help.md delete mode 100644 guide/src/images/grafana.png delete mode 100644 guide/src/index.md delete mode 100644 guide/src/installation.md delete mode 100644 guide/src/pre_requisites.md delete mode 100644 guide/src/relayer.md delete mode 100644 guide/src/rest-api.md delete mode 100644 guide/src/telemetry.md delete mode 100644 guide/src/tutorials/index.md delete mode 100644 guide/src/tutorials/local-chains/gaia.md delete mode 100644 guide/src/tutorials/local-chains/identifiers.md delete mode 100644 guide/src/tutorials/local-chains/index.md delete mode 100644 guide/src/tutorials/local-chains/raw/channel.md delete mode 100644 guide/src/tutorials/local-chains/raw/client.md delete mode 100644 guide/src/tutorials/local-chains/raw/connection.md delete mode 100644 guide/src/tutorials/local-chains/raw/index.md delete mode 100644 guide/src/tutorials/local-chains/raw/packet.md delete mode 100644 guide/src/tutorials/local-chains/relay-paths/create-new-path.md delete mode 100644 guide/src/tutorials/local-chains/relay-paths/index.md delete mode 100644 guide/src/tutorials/local-chains/relay-paths/multiple-paths.md delete mode 100644 guide/src/tutorials/local-chains/start.md delete mode 100644 guide/theme/css/chrome.css delete mode 100644 guide/theme/css/variables.css delete mode 100644 relayer-cli/.gitignore delete mode 100644 relayer-cli/Cargo.toml delete mode 100644 relayer-cli/README.md delete mode 100644 relayer-cli/build.rs delete mode 100644 relayer-cli/src/application.rs delete mode 100644 relayer-cli/src/bin/hermes/main.rs delete mode 100644 relayer-cli/src/cli_utils.rs delete mode 100644 relayer-cli/src/commands.rs delete mode 100644 relayer-cli/src/commands/clear.rs delete mode 100644 relayer-cli/src/commands/completions.rs delete mode 100644 relayer-cli/src/commands/config.rs delete mode 100644 relayer-cli/src/commands/config/validate.rs delete mode 100644 relayer-cli/src/commands/create.rs delete mode 100644 relayer-cli/src/commands/create/channel.rs delete mode 100644 relayer-cli/src/commands/create/connection.rs delete mode 100644 relayer-cli/src/commands/health.rs delete mode 100644 relayer-cli/src/commands/keys.rs delete mode 100644 relayer-cli/src/commands/keys/add.rs delete mode 100644 relayer-cli/src/commands/keys/balance.rs delete mode 100644 relayer-cli/src/commands/keys/delete.rs delete mode 100644 relayer-cli/src/commands/keys/list.rs delete mode 100644 relayer-cli/src/commands/listen.rs delete mode 100644 relayer-cli/src/commands/misbehaviour.rs delete mode 100644 relayer-cli/src/commands/query.rs delete mode 100644 relayer-cli/src/commands/query/channel.rs delete mode 100644 relayer-cli/src/commands/query/channel_client.rs delete mode 100644 relayer-cli/src/commands/query/channel_ends.rs delete mode 100644 relayer-cli/src/commands/query/channels.rs delete mode 100644 relayer-cli/src/commands/query/client.rs delete mode 100644 relayer-cli/src/commands/query/clients.rs delete mode 100644 relayer-cli/src/commands/query/connection.rs delete mode 100644 relayer-cli/src/commands/query/connections.rs delete mode 100644 relayer-cli/src/commands/query/packet.rs delete mode 100644 relayer-cli/src/commands/query/packet/ack.rs delete mode 100644 relayer-cli/src/commands/query/packet/acks.rs delete mode 100644 relayer-cli/src/commands/query/packet/commitment.rs delete mode 100644 relayer-cli/src/commands/query/packet/commitments.rs delete mode 100644 relayer-cli/src/commands/query/packet/pending.rs delete mode 100644 relayer-cli/src/commands/query/packet/unreceived_acks.rs delete mode 100644 relayer-cli/src/commands/query/packet/unreceived_packets.rs delete mode 100644 relayer-cli/src/commands/query/tx.rs delete mode 100644 relayer-cli/src/commands/query/tx/events.rs delete mode 100644 relayer-cli/src/commands/start.rs delete mode 100644 relayer-cli/src/commands/tx.rs delete mode 100644 relayer-cli/src/commands/tx/channel.rs delete mode 100644 relayer-cli/src/commands/tx/client.rs delete mode 100644 relayer-cli/src/commands/tx/connection.rs delete mode 100644 relayer-cli/src/commands/tx/packet.rs delete mode 100644 relayer-cli/src/commands/tx/transfer.rs delete mode 100644 relayer-cli/src/commands/tx/upgrade.rs delete mode 100644 relayer-cli/src/commands/update.rs delete mode 100644 relayer-cli/src/commands/upgrade.rs delete mode 100644 relayer-cli/src/commands/version.rs delete mode 100644 relayer-cli/src/components.rs delete mode 100644 relayer-cli/src/conclude.rs delete mode 100644 relayer-cli/src/config.rs delete mode 100644 relayer-cli/src/entry.rs delete mode 100644 relayer-cli/src/error.rs delete mode 100644 relayer-cli/src/lib.rs delete mode 100644 relayer-cli/src/prelude.rs delete mode 100644 relayer-cli/tests/acceptance.rs delete mode 100644 relayer-cli/tests/fixtures/two_chains.toml delete mode 100644 relayer-rest/Cargo.toml delete mode 100644 relayer-rest/README.md delete mode 100644 relayer-rest/src/config.rs delete mode 100644 relayer-rest/src/handle.rs delete mode 100644 relayer-rest/src/lib.rs delete mode 100644 relayer-rest/src/server.rs delete mode 100644 relayer-rest/tests/mock.rs delete mode 100644 relayer/Cargo.toml delete mode 100644 relayer/README.md delete mode 100644 relayer/src/account.rs delete mode 100644 relayer/src/cache.rs delete mode 100644 relayer/src/chain.rs delete mode 100644 relayer/src/chain/client.rs delete mode 100644 relayer/src/chain/cosmos.rs delete mode 100644 relayer/src/chain/cosmos/batch.rs delete mode 100644 relayer/src/chain/cosmos/client.rs delete mode 100644 relayer/src/chain/cosmos/compatibility.rs delete mode 100644 relayer/src/chain/cosmos/encode.rs delete mode 100644 relayer/src/chain/cosmos/estimate.rs delete mode 100644 relayer/src/chain/cosmos/gas.rs delete mode 100644 relayer/src/chain/cosmos/query.rs delete mode 100644 relayer/src/chain/cosmos/query/account.rs delete mode 100644 relayer/src/chain/cosmos/query/balance.rs delete mode 100644 relayer/src/chain/cosmos/query/status.rs delete mode 100644 relayer/src/chain/cosmos/query/tx.rs delete mode 100644 relayer/src/chain/cosmos/retry.rs delete mode 100644 relayer/src/chain/cosmos/simulate.rs delete mode 100644 relayer/src/chain/cosmos/tx.rs delete mode 100644 relayer/src/chain/cosmos/types/account.rs delete mode 100644 relayer/src/chain/cosmos/types/config.rs delete mode 100644 relayer/src/chain/cosmos/types/gas.rs delete mode 100644 relayer/src/chain/cosmos/types/mod.rs delete mode 100644 relayer/src/chain/cosmos/types/tx.rs delete mode 100644 relayer/src/chain/cosmos/version.rs delete mode 100644 relayer/src/chain/cosmos/wait.rs delete mode 100644 relayer/src/chain/counterparty.rs delete mode 100644 relayer/src/chain/endpoint.rs delete mode 100644 relayer/src/chain/handle.rs delete mode 100644 relayer/src/chain/handle/base.rs delete mode 100644 relayer/src/chain/handle/cache.rs delete mode 100644 relayer/src/chain/handle/counting.rs delete mode 100644 relayer/src/chain/mock.rs delete mode 100644 relayer/src/chain/requests.rs delete mode 100644 relayer/src/chain/runtime.rs delete mode 100644 relayer/src/chain/tracking.rs delete mode 100644 relayer/src/channel.rs delete mode 100644 relayer/src/channel/error.rs delete mode 100644 relayer/src/channel/version.rs delete mode 100644 relayer/src/config.rs delete mode 100644 relayer/src/config/error.rs delete mode 100644 relayer/src/config/filter.rs delete mode 100644 relayer/src/config/proof_specs.rs delete mode 100644 relayer/src/config/types.rs delete mode 100644 relayer/src/connection.rs delete mode 100644 relayer/src/connection/error.rs delete mode 100644 relayer/src/error.rs delete mode 100644 relayer/src/event.rs delete mode 100644 relayer/src/event/bus.rs delete mode 100644 relayer/src/event/monitor.rs delete mode 100644 relayer/src/event/monitor/error.rs delete mode 100644 relayer/src/event/rpc.rs delete mode 100644 relayer/src/foreign_client.rs delete mode 100644 relayer/src/keyring.rs delete mode 100644 relayer/src/keyring/errors.rs delete mode 100644 relayer/src/keyring/pub_key.rs delete mode 100644 relayer/src/lib.rs delete mode 100644 relayer/src/light_client.rs delete mode 100644 relayer/src/light_client/mock.rs delete mode 100644 relayer/src/light_client/tendermint.rs delete mode 100644 relayer/src/link.rs delete mode 100644 relayer/src/link/cli.rs delete mode 100644 relayer/src/link/error.rs delete mode 100644 relayer/src/link/operational_data.rs delete mode 100644 relayer/src/link/packet_events.rs delete mode 100644 relayer/src/link/pending.rs delete mode 100644 relayer/src/link/relay_path.rs delete mode 100644 relayer/src/link/relay_sender.rs delete mode 100644 relayer/src/link/relay_summary.rs delete mode 100644 relayer/src/link/tx_hashes.rs delete mode 100644 relayer/src/macros.rs delete mode 100644 relayer/src/object.rs delete mode 100644 relayer/src/path.rs delete mode 100644 relayer/src/registry.rs delete mode 100644 relayer/src/rest.rs delete mode 100644 relayer/src/rest/error.rs delete mode 100644 relayer/src/rest/request.rs delete mode 100644 relayer/src/sdk_error.rs delete mode 100644 relayer/src/spawn.rs delete mode 100644 relayer/src/supervisor.rs delete mode 100644 relayer/src/supervisor/client_state_filter.rs delete mode 100644 relayer/src/supervisor/cmd.rs delete mode 100644 relayer/src/supervisor/dump_state.rs delete mode 100644 relayer/src/supervisor/error.rs delete mode 100644 relayer/src/supervisor/scan.rs delete mode 100644 relayer/src/supervisor/spawn.rs delete mode 100644 relayer/src/telemetry.rs delete mode 100644 relayer/src/transfer.rs delete mode 100644 relayer/src/upgrade_chain.rs delete mode 100644 relayer/src/util.rs delete mode 100644 relayer/src/util/block_on.rs delete mode 100644 relayer/src/util/diff.rs delete mode 100644 relayer/src/util/iter.rs delete mode 100644 relayer/src/util/lock.rs delete mode 100644 relayer/src/util/queue.rs delete mode 100644 relayer/src/util/retry.rs delete mode 100644 relayer/src/util/serde.rs delete mode 100644 relayer/src/util/stream.rs delete mode 100644 relayer/src/util/task.rs delete mode 100644 relayer/src/worker.rs delete mode 100644 relayer/src/worker/channel.rs delete mode 100644 relayer/src/worker/client.rs delete mode 100644 relayer/src/worker/cmd.rs delete mode 100644 relayer/src/worker/connection.rs delete mode 100644 relayer/src/worker/error.rs delete mode 100644 relayer/src/worker/handle.rs delete mode 100644 relayer/src/worker/map.rs delete mode 100644 relayer/src/worker/packet.rs delete mode 100644 relayer/src/worker/retry_strategy.rs delete mode 100644 relayer/src/worker/wallet.rs delete mode 100644 relayer/tests/config/fixtures/relayer_conf_example.toml delete mode 100755 scripts/dev-env delete mode 100644 scripts/gm/CHANGELOG.md delete mode 100644 scripts/gm/README.md delete mode 100755 scripts/gm/bin/gm delete mode 100644 scripts/gm/bin/lib-gm delete mode 100644 scripts/gm/bin/shell-support delete mode 100644 scripts/gm/gm.toml delete mode 100755 scripts/init-hermes delete mode 100755 scripts/one-chain delete mode 100755 scripts/release.sh delete mode 100755 scripts/setup-chains delete mode 100755 scripts/sync-protobuf.sh delete mode 100644 telemetry/Cargo.toml delete mode 100644 telemetry/README.md delete mode 100644 telemetry/src/lib.rs delete mode 100644 telemetry/src/server.rs delete mode 100644 telemetry/src/state.rs delete mode 100644 tools/integration-test/Cargo.toml delete mode 100644 tools/integration-test/spec/.gitignore delete mode 100644 tools/integration-test/spec/MC_Transfer.cfg delete mode 100644 tools/integration-test/spec/MC_Transfer.tla delete mode 100644 tools/integration-test/spec/README.md delete mode 100644 tools/integration-test/spec/Transfer.tla delete mode 100644 tools/integration-test/spec/Transfer_typedefs.tla delete mode 100644 tools/integration-test/src/bin/test_setup_with_binary_channel.rs delete mode 100644 tools/integration-test/src/bin/test_setup_with_ternary_channel.rs delete mode 100644 tools/integration-test/src/lib.rs delete mode 100644 tools/integration-test/src/mbt/README.md delete mode 100644 tools/integration-test/src/mbt/handlers.rs delete mode 100644 tools/integration-test/src/mbt/itf.rs delete mode 100644 tools/integration-test/src/mbt/mod.rs delete mode 100644 tools/integration-test/src/mbt/state.rs delete mode 100644 tools/integration-test/src/mbt/transfer.rs delete mode 100644 tools/integration-test/src/mbt/utils.rs delete mode 100644 tools/integration-test/src/tests/clear_packet.rs delete mode 100644 tools/integration-test/src/tests/client_expiration.rs delete mode 100644 tools/integration-test/src/tests/client_settings.rs delete mode 100644 tools/integration-test/src/tests/connection_delay.rs delete mode 100644 tools/integration-test/src/tests/example.rs delete mode 100644 tools/integration-test/src/tests/execute_schedule.rs delete mode 100644 tools/integration-test/src/tests/ica.rs delete mode 100644 tools/integration-test/src/tests/manual/mod.rs delete mode 100644 tools/integration-test/src/tests/manual/simulation.rs delete mode 100644 tools/integration-test/src/tests/memo.rs delete mode 100644 tools/integration-test/src/tests/mod.rs delete mode 100644 tools/integration-test/src/tests/ordered_channel.rs delete mode 100644 tools/integration-test/src/tests/python.rs delete mode 100644 tools/integration-test/src/tests/query_packet.rs delete mode 100644 tools/integration-test/src/tests/supervisor.rs delete mode 100644 tools/integration-test/src/tests/ternary_transfer.rs delete mode 100644 tools/integration-test/src/tests/transfer.rs delete mode 100644 tools/test-framework/.gitignore delete mode 100644 tools/test-framework/Cargo.toml delete mode 100644 tools/test-framework/README.md delete mode 100644 tools/test-framework/src/bootstrap/binary/chain.rs delete mode 100644 tools/test-framework/src/bootstrap/binary/channel.rs delete mode 100644 tools/test-framework/src/bootstrap/binary/connection.rs delete mode 100644 tools/test-framework/src/bootstrap/binary/mod.rs delete mode 100644 tools/test-framework/src/bootstrap/init.rs delete mode 100644 tools/test-framework/src/bootstrap/mod.rs delete mode 100644 tools/test-framework/src/bootstrap/nary/chain.rs delete mode 100644 tools/test-framework/src/bootstrap/nary/channel.rs delete mode 100644 tools/test-framework/src/bootstrap/nary/connection.rs delete mode 100644 tools/test-framework/src/bootstrap/nary/mod.rs delete mode 100644 tools/test-framework/src/bootstrap/single.rs delete mode 100644 tools/test-framework/src/chain/builder.rs delete mode 100644 tools/test-framework/src/chain/config.rs delete mode 100644 tools/test-framework/src/chain/driver.rs delete mode 100644 tools/test-framework/src/chain/driver/interchain.rs delete mode 100644 tools/test-framework/src/chain/driver/query_txs.rs delete mode 100644 tools/test-framework/src/chain/driver/transfer.rs delete mode 100644 tools/test-framework/src/chain/exec.rs delete mode 100644 tools/test-framework/src/chain/mod.rs delete mode 100644 tools/test-framework/src/chain/tagged.rs delete mode 100644 tools/test-framework/src/chain/version.rs delete mode 100644 tools/test-framework/src/error.rs delete mode 100644 tools/test-framework/src/framework/base.rs delete mode 100644 tools/test-framework/src/framework/binary/chain.rs delete mode 100644 tools/test-framework/src/framework/binary/channel.rs delete mode 100644 tools/test-framework/src/framework/binary/connection.rs delete mode 100644 tools/test-framework/src/framework/binary/mod.rs delete mode 100644 tools/test-framework/src/framework/binary/node.rs delete mode 100644 tools/test-framework/src/framework/mod.rs delete mode 100644 tools/test-framework/src/framework/nary/chain.rs delete mode 100644 tools/test-framework/src/framework/nary/channel.rs delete mode 100644 tools/test-framework/src/framework/nary/connection.rs delete mode 100644 tools/test-framework/src/framework/nary/mod.rs delete mode 100644 tools/test-framework/src/framework/nary/node.rs delete mode 100644 tools/test-framework/src/framework/overrides.rs delete mode 100644 tools/test-framework/src/framework/supervisor.rs delete mode 100644 tools/test-framework/src/ibc/denom.rs delete mode 100644 tools/test-framework/src/ibc/mod.rs delete mode 100644 tools/test-framework/src/lib.rs delete mode 100644 tools/test-framework/src/prelude.rs delete mode 100644 tools/test-framework/src/relayer/chain.rs delete mode 100644 tools/test-framework/src/relayer/channel.rs delete mode 100644 tools/test-framework/src/relayer/connection.rs delete mode 100644 tools/test-framework/src/relayer/driver.rs delete mode 100644 tools/test-framework/src/relayer/foreign_client.rs delete mode 100644 tools/test-framework/src/relayer/mod.rs delete mode 100644 tools/test-framework/src/relayer/refresh.rs delete mode 100644 tools/test-framework/src/relayer/transfer.rs delete mode 100644 tools/test-framework/src/relayer/tx.rs delete mode 100644 tools/test-framework/src/types/binary/chains.rs delete mode 100644 tools/test-framework/src/types/binary/channel.rs delete mode 100644 tools/test-framework/src/types/binary/client.rs delete mode 100644 tools/test-framework/src/types/binary/connection.rs delete mode 100644 tools/test-framework/src/types/binary/foreign_client.rs delete mode 100644 tools/test-framework/src/types/binary/mod.rs delete mode 100644 tools/test-framework/src/types/config.rs delete mode 100644 tools/test-framework/src/types/env.rs delete mode 100644 tools/test-framework/src/types/id.rs delete mode 100644 tools/test-framework/src/types/mod.rs delete mode 100644 tools/test-framework/src/types/nary/aliases.rs delete mode 100644 tools/test-framework/src/types/nary/chains.rs delete mode 100644 tools/test-framework/src/types/nary/channel.rs delete mode 100644 tools/test-framework/src/types/nary/connection.rs delete mode 100644 tools/test-framework/src/types/nary/foreign_client.rs delete mode 100644 tools/test-framework/src/types/nary/mod.rs delete mode 100644 tools/test-framework/src/types/process.rs delete mode 100644 tools/test-framework/src/types/single/mod.rs delete mode 100644 tools/test-framework/src/types/single/node.rs delete mode 100644 tools/test-framework/src/types/tagged/dual.rs delete mode 100644 tools/test-framework/src/types/tagged/mod.rs delete mode 100644 tools/test-framework/src/types/tagged/mono.rs delete mode 100644 tools/test-framework/src/types/wallet.rs delete mode 100644 tools/test-framework/src/util/array.rs delete mode 100644 tools/test-framework/src/util/assert.rs delete mode 100644 tools/test-framework/src/util/file.rs delete mode 100644 tools/test-framework/src/util/mod.rs delete mode 100644 tools/test-framework/src/util/random.rs delete mode 100644 tools/test-framework/src/util/retry.rs delete mode 100644 tools/test-framework/src/util/suspend.rs diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS deleted file mode 100644 index 55403d0f2b..0000000000 --- a/.github/CODEOWNERS +++ /dev/null @@ -1,15 +0,0 @@ -# CODEOWNERS: https://help.github.com/articles/about-codeowners/ - -modules/ @hu55a1n1 @plafer @ancazamfir - -tools/ @soareschen - -docs/ @seanchen1991 @adizere - -guide/ @seanchen1991 - -relayer-cli/ @mzabaluev @romac @ancazamfir - -ci/ @mzabaluev @soareschen - -relayer/ @adizere @romac @seanchen1991 @ancazamfir \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/bug-report.md b/.github/ISSUE_TEMPLATE/bug-report.md deleted file mode 100644 index fae32a106a..0000000000 --- a/.github/ISSUE_TEMPLATE/bug-report.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -name: Bug Report -about: Create a report to help us squash bugs! - ---- - - - -## Summary of Bug - - - -## Version - - - -## Steps to Reproduce - - - - -## Acceptance Criteria - - - -____ - -## For Admin Use - -- [ ] Not duplicate issue -- [ ] Appropriate labels applied -- [ ] Appropriate milestone (priority) applied -- [ ] Appropriate contributors tagged -- [ ] Contributor assigned/self-assigned diff --git a/.github/ISSUE_TEMPLATE/feature-request.md b/.github/ISSUE_TEMPLATE/feature-request.md deleted file mode 100644 index b2a60b2bb2..0000000000 --- a/.github/ISSUE_TEMPLATE/feature-request.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -name: Feature Request -about: Create a proposal to request a feature - ---- - - - -## Summary - - - -## Problem Definition - - - -## Proposal - - - -## Acceptance Criteria - - - -____ - -#### For Admin Use - -- [ ] Not duplicate issue -- [ ] Appropriate labels applied -- [ ] Appropriate milestone (priority) applied -- [ ] Appropriate contributors tagged -- [ ] Contributor assigned/self-assigned diff --git a/.github/ISSUE_TEMPLATE/release-template.md b/.github/ISSUE_TEMPLATE/release-template.md deleted file mode 100644 index e1f3734d07..0000000000 --- a/.github/ISSUE_TEMPLATE/release-template.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -name: New Release Request -about: Create a proposal to track the release of a new version of IBC-RS ---- - - - - -# Release IBC-RS v.X.Y.Z - -⚡ - -- [ ] Create a new release in the changelog, using [`unclog`](https://github.com/informalsystems/unclog). - - If doing a release candidate (`rc`) version, then skip the `unclog release` step. -- [ ] Bump all crate versions to the new version. -- [ ] Reassign unfinished issues of previous milestone to the next milestone. -- [ ] Update Cargo.lock file (if re-publishing `ibc-relayer-cli`) diff --git a/.github/ISSUE_TEMPLATE/rust-update.md b/.github/ISSUE_TEMPLATE/rust-update.md deleted file mode 100644 index 2763d111ec..0000000000 --- a/.github/ISSUE_TEMPLATE/rust-update.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -name: Rust version update -about: A checklist to perform to update to a new version of Rust. ---- - -# Update to Rust release 1.x.y - -🦀 - -- [ ] Update the version in `rust-toolchain.toml`. -- [ ] Run `cargo clippy --all-features --fix`, review and commit the automatic - fixes, and fix all reported lints. Try to resolve the root causes for - the lints rather than silencing them with `#[allow(...)]`. - -## Update the MSRV (optional) - -Additional steps to perform in order to make the new minimal supported -Rust version: - -- [ ] Update the `rust-version` fields in all `Cargo.toml` files. -- [ ] Update the `msrv` field in `clippy.toml` and fix all lints reported by - `cargo clippy --all-features`. -- [ ] Update the MSRV shields in README files: - - `README.md` - - `relayer-rest/README.md` -- [ ] Update the MSRV in the guide: `guide/src/pre_requisites.md` -- [ ] Add a `.changelog` entry to the `breaking-changes` section, - announcing the new MSRV. diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md deleted file mode 100644 index e20e5078bd..0000000000 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ /dev/null @@ -1,27 +0,0 @@ - - -Closes: #XXX - -## Description - - - - -______ - -### PR author checklist: -- [ ] Added changelog entry, using [`unclog`](https://github.com/informalsystems/unclog). -- [ ] Added tests: integration (for Hermes) or unit/mock tests (for modules). -- [ ] Linked to GitHub issue. -- [ ] Updated code comments and documentation (e.g., `docs/`). - -### Reviewer checklist: - -- [ ] Reviewed `Files changed` in the GitHub PR explorer. -- [ ] Manually tested (in case integration/unit/mock tests are absent). \ No newline at end of file diff --git a/.github/actions-rs/grcov.yml b/.github/actions-rs/grcov.yml deleted file mode 100644 index d70ee56d03..0000000000 --- a/.github/actions-rs/grcov.yml +++ /dev/null @@ -1,6 +0,0 @@ -branch: true -ignore-not-existing: true -llvm: true -output-type: lcov -output-file: ./lcov.info -prefix-dir: /home/user/build/ diff --git a/.github/dependabot.yml b/.github/dependabot.yml deleted file mode 100644 index 981972a431..0000000000 --- a/.github/dependabot.yml +++ /dev/null @@ -1,42 +0,0 @@ -# Automatically open PRs to update outdated deps -# See https://docs.github.com/en/github/administering-a-repository/enabling-and-disabling-version-updates - -version: 2 -updates: - # Enable version updates for npm - - package-ecosystem: "cargo" - # Look for Cargo `.toml` and `.lock` files in the `root` directory - directory: "/" - # Check the cargo registry for updates every day (weekdays) - schedule: - interval: "weekly" - - - package-ecosystem: "cargo" - directory: "modules" - schedule: - interval: "weekly" - - - package-ecosystem: "cargo" - directory: "proto" - schedule: - interval: "weekly" - -## Disable dependabot for `proto-compiler`. -## Rationale: we maintain the dependencies for proto-compiler -## manually, so that we update the proto-compiler binary -## (and the Cargo.lock file) only when we regenerate -## Rust types from the .proto files. -# - package-ecosystem: "cargo" -# directory: "proto-compiler" -# schedule: -# interval: "weekly" - - - package-ecosystem: "cargo" - directory: "relayer" - schedule: - interval: "weekly" - - - package-ecosystem: "cargo" - directory: "relayer-cli" - schedule: - interval: "weekly" diff --git a/.github/markdown-link-check.json b/.github/markdown-link-check.json deleted file mode 100644 index 840f1615ef..0000000000 --- a/.github/markdown-link-check.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "ignorePatterns": [ - { - "pattern": "^https://crates.io" - }, - { - "pattern": "^https?://localhost" - } - ], - "aliveStatusCodes": [429, 200] -} diff --git a/.github/workflows/audit.yaml b/.github/workflows/audit.yaml deleted file mode 100644 index 4ca65ae9f7..0000000000 --- a/.github/workflows/audit.yaml +++ /dev/null @@ -1,19 +0,0 @@ -name: Security Audit -on: - schedule: - - cron: '0 0 * * *' - -jobs: - security_audit: - name: Security Audit - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - name: Cache cargo bin - uses: actions/cache@v1 - with: - path: ~/.cargo/bin - key: ${{ runner.os }}-cargo-audit-v0.11.2 - - uses: actions-rs/audit-check@v1.2.0 - with: - token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/cargo-doc.yaml b/.github/workflows/cargo-doc.yaml deleted file mode 100644 index 1d2a83f83b..0000000000 --- a/.github/workflows/cargo-doc.yaml +++ /dev/null @@ -1,27 +0,0 @@ -name: Publish Cargo Doc - -on: - push: - branches: - - master - pull_request: {} - -jobs: - deploy: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - - uses: actions-rs/toolchain@v1 - with: - toolchain: stable - - - run: cargo doc - - - name: Deploy - if: github.ref == 'refs/heads/master' - uses: peaceiris/actions-gh-pages@v3 - with: - deploy_key: ${{ secrets.IBC_RS_DOC_PRIVATE_KEY }} - external_repository: informalsystems/ibc-rs-doc - publish_dir: ./target/doc diff --git a/.github/workflows/e2e-gaia-current-release.yaml b/.github/workflows/e2e-gaia-current-release.yaml deleted file mode 100644 index 398e1c30c9..0000000000 --- a/.github/workflows/e2e-gaia-current-release.yaml +++ /dev/null @@ -1,60 +0,0 @@ -name: End to End testing (Gaia - v7.0.1) -on: - pull_request: - paths: - - .github/workflows/e2e.yaml - - Cargo.toml - - Cargo.lock - - ci/** - - e2e/** - - proto/** - - modules/** - - relayer/** - - relayer-cli/** - - relayer-rest/** - - telemetry/** - push: - branches: master - paths: - - .github/workflows/e2e.yaml - - Cargo.toml - - Cargo.lock - - ci/** - - e2e/** - - proto/** - - modules/** - - relayer/** - - relayer-cli/** - - relayer-rest/** - - telemetry/** - -env: - CARGO_INCREMENTAL: 0 - CARGO_PROFILE_DEV_DEBUG: 1 - CARGO_PROFILE_RELEASE_DEBUG: 1 - RUST_BACKTRACE: short - CARGO_NET_RETRY: 10 - RUSTUP_MAX_RETRIES: 10 - -jobs: - test-end-to-end-current-gaia: - runs-on: ubuntu-20.04 - steps: - - uses: actions/checkout@v2 - - uses: actions-rs/toolchain@v1 - with: - toolchain: stable - override: true - - uses: Swatinem/rust-cache@v1 - - uses: actions-rs/cargo@v1 - with: - command: build - args: --workspace - - run: cp ./target/debug/hermes . - - name: Build relayer image - run: docker-compose -f ci/docker-compose-gaia-current.yml build relayer - - name: Start chains and relayer - run: docker-compose -f ci/docker-compose-gaia-current.yml up -d ibc-0 ibc-1 relayer - - name: Run relayer workflow - continue-on-error: false - run: docker exec relayer /bin/sh -c /relayer/e2e.sh diff --git a/.github/workflows/e2e-gaia-legacy-release.yaml b/.github/workflows/e2e-gaia-legacy-release.yaml deleted file mode 100644 index f128d65e57..0000000000 --- a/.github/workflows/e2e-gaia-legacy-release.yaml +++ /dev/null @@ -1,59 +0,0 @@ -name: End to End testing (Gaia - v6.0.0) -on: - pull_request: - paths: - - .github/workflows/e2e.yaml - - Cargo.toml - - Cargo.lock - - ci/** - - e2e/** - - proto/** - - modules/** - - relayer/** - - relayer-cli/** - - relayer-rest/** - - telemetry/** - push: - branches: master - paths: - - .github/workflows/e2e.yaml - - Cargo.toml - - Cargo.lock - - ci/** - - e2e/** - - proto/** - - modules/** - - relayer/** - - relayer-cli/** - - relayer-rest/** - - telemetry/** - -env: - CARGO_INCREMENTAL: 0 - CARGO_PROFILE_DEV_DEBUG: 1 - CARGO_PROFILE_RELEASE_DEBUG: 1 - RUST_BACKTRACE: short - CARGO_NET_RETRY: 10 - RUSTUP_MAX_RETRIES: 10 - -jobs: - test-end-to-end-current-gaia: - runs-on: ubuntu-20.04 - steps: - - uses: actions/checkout@v2 - - uses: actions-rs/toolchain@v1 - with: - toolchain: stable - override: true - - uses: Swatinem/rust-cache@v1 - - uses: actions-rs/cargo@v1 - with: - command: build - args: --workspace - - run: cp ./target/debug/hermes . - - name: Build relayer image - run: docker-compose -f ci/docker-compose-gaia-legacy.yml build relayer - - name: Start chains and relayer - run: docker-compose -f ci/docker-compose-gaia-legacy.yml up -d ibc-0 ibc-1 relayer - - name: Run relayer workflow - run: docker exec relayer /bin/sh -c /relayer/e2e.sh diff --git a/.github/workflows/guide.yml b/.github/workflows/guide.yml deleted file mode 100644 index 3ee6996edc..0000000000 --- a/.github/workflows/guide.yml +++ /dev/null @@ -1,59 +0,0 @@ -name: Hermes Guide - -on: - push: - branches: - - master - tags: - - v[0-9]+.* - pull_request: - paths: - - guide/** - -jobs: - guide: - runs-on: ubuntu-20.04 - steps: - - uses: actions/checkout@v2 - - - name: cache .cargo directory - uses: actions/cache@v1 - with: - path: ~/.cargo - key: cargo-dir - restore-keys: | - cargo-dir - - - name: Install mdbook - uses: actions-rs/install@v0.1 - with: - crate: mdbook - version: latest - use-tool-cache: true - - - name: Install mdbook-mermaid - uses: actions-rs/install@v0.1 - with: - crate: mdbook-mermaid - version: latest - use-tool-cache: true - - - name: Install mdbook-toc - uses: actions-rs/install@v0.1 - with: - crate: mdbook-toc - version: latest - use-tool-cache: true - - - name: Build guide - run: | - cd guide - mdbook build - - # Only deploy guide when releasing a new version of Hermes - - name: Deploy to GitHub Pages - uses: peaceiris/actions-gh-pages@v3 - if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags') - with: - github_token: ${{ secrets.GITHUB_TOKEN }} - publish_dir: ./guide/book diff --git a/.github/workflows/integration.yaml b/.github/workflows/integration.yaml deleted file mode 100644 index c0b92677b9..0000000000 --- a/.github/workflows/integration.yaml +++ /dev/null @@ -1,214 +0,0 @@ -name: Rust -on: - pull_request: - paths: - - .github/workflows/integration.yaml - - Cargo.toml - - Cargo.lock - - ci/** - - e2e/** - - proto/** - - modules/** - - relayer/** - - relayer-cli/** - - relayer-rest/** - - telemetry/** - - tools/** - push: - branches: master - paths: - - .github/workflows/integration.yaml - - Cargo.toml - - Cargo.lock - - ci/** - - e2e/** - - proto/** - - modules/** - - relayer/** - - relayer-cli/** - - relayer-rest/** - - telemetry/** - - tools/** - -env: - CARGO_INCREMENTAL: 0 - CARGO_PROFILE_DEV_DEBUG: 1 - CARGO_PROFILE_RELEASE_DEBUG: 1 - RUST_BACKTRACE: short - CARGO_NET_RETRY: 10 - RUSTUP_MAX_RETRIES: 10 - -jobs: - integration-test: - runs-on: ubuntu-latest - strategy: - matrix: - gaiad: - - gaia5 - - gaia6 - - gaia7 - steps: - - uses: actions/checkout@v2 - - uses: cachix/install-nix-action@v15 - with: - install_url: https://nixos-nix-install-tests.cachix.org/serve/vij683ly7sl95nnhb67bdjjfabclr85m/install - install_options: '--tarball-url-prefix https://nixos-nix-install-tests.cachix.org/serve' - extra_nix_config: | - experimental-features = nix-command flakes - - uses: cachix/cachix-action@v10 - with: - name: cosmos - - uses: actions-rs/toolchain@v1 - with: - toolchain: stable - override: true - - uses: Swatinem/rust-cache@v1 - - uses: actions-rs/cargo@v1 - with: - command: test - args: -p ibc-integration-test --no-fail-fast --no-run - - env: - RUST_LOG: info - RUST_BACKTRACE: 1 - NO_COLOR_LOG: 1 - run: | - nix shell .#python .#${{ matrix.gaiad }} -c cargo \ - test -p ibc-integration-test --no-fail-fast -- \ - --nocapture --test-threads=2 - - ibc-go-integration-test: - runs-on: ubuntu-latest - strategy: - matrix: - simapp: - - ibc-go-v2-simapp - - ibc-go-v3-simapp - steps: - - uses: actions/checkout@v2 - - uses: cachix/install-nix-action@v15 - with: - install_url: https://nixos-nix-install-tests.cachix.org/serve/vij683ly7sl95nnhb67bdjjfabclr85m/install - install_options: '--tarball-url-prefix https://nixos-nix-install-tests.cachix.org/serve' - extra_nix_config: | - experimental-features = nix-command flakes - - uses: cachix/cachix-action@v10 - with: - name: cosmos - - uses: actions-rs/toolchain@v1 - with: - toolchain: stable - override: true - - uses: Swatinem/rust-cache@v1 - - uses: actions-rs/cargo@v1 - with: - command: test - args: -p ibc-integration-test --no-fail-fast --no-run - - env: - RUST_LOG: info - RUST_BACKTRACE: 1 - CHAIN_COMMAND_PATH: simd - run: | - nix shell .#python .#${{ matrix.simapp }} -c cargo \ - test -p ibc-integration-test --no-fail-fast -- --nocapture --test-threads=2 - - ordered-channel-test: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - uses: cachix/install-nix-action@v15 - with: - install_url: https://nixos-nix-install-tests.cachix.org/serve/vij683ly7sl95nnhb67bdjjfabclr85m/install - install_options: '--tarball-url-prefix https://nixos-nix-install-tests.cachix.org/serve' - extra_nix_config: | - experimental-features = nix-command flakes - - uses: cachix/cachix-action@v10 - with: - name: cosmos - - uses: actions-rs/toolchain@v1 - with: - toolchain: stable - override: true - - uses: Swatinem/rust-cache@v1 - - uses: actions-rs/cargo@v1 - with: - command: test - args: -p ibc-integration-test --no-fail-fast --no-run - - env: - RUST_LOG: info - RUST_BACKTRACE: 1 - NO_COLOR_LOG: 1 - run: | - nix shell .#python .#gaia6-ordered -c cargo \ - test -p ibc-integration-test --features ordered --no-fail-fast -- \ - --nocapture --test-threads=1 test_ordered_channel - - ica-filter-test: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - uses: cachix/install-nix-action@v15 - with: - install_url: https://nixos-nix-install-tests.cachix.org/serve/vij683ly7sl95nnhb67bdjjfabclr85m/install - install_options: '--tarball-url-prefix https://nixos-nix-install-tests.cachix.org/serve' - extra_nix_config: | - experimental-features = nix-command flakes - - uses: cachix/cachix-action@v10 - with: - name: cosmos - - uses: actions-rs/toolchain@v1 - with: - toolchain: stable - override: true - - uses: Swatinem/rust-cache@v1 - - uses: actions-rs/cargo@v1 - with: - command: test - args: -p ibc-integration-test --no-fail-fast --no-run - - env: - RUST_LOG: info - RUST_BACKTRACE: 1 - NO_COLOR_LOG: 1 - CHAIN_COMMAND_PATH: icad - run: | - nix shell .#python .#ica -c cargo \ - test -p ibc-integration-test --features ica --no-fail-fast -- \ - --nocapture --test-threads=1 test_ica_filter - - model-based-test: - runs-on: ubuntu-latest - strategy: - matrix: - gaiad: - - gaia6 - steps: - - uses: actions/checkout@v2 - - uses: cachix/install-nix-action@v15 - with: - install_url: https://nixos-nix-install-tests.cachix.org/serve/vij683ly7sl95nnhb67bdjjfabclr85m/install - install_options: '--tarball-url-prefix https://nixos-nix-install-tests.cachix.org/serve' - extra_nix_config: | - experimental-features = nix-command flakes - - uses: cachix/cachix-action@v10 - with: - name: cosmos - - uses: actions-rs/toolchain@v1 - with: - toolchain: stable - override: true - - uses: Swatinem/rust-cache@v1 - - uses: actions-rs/cargo@v1 - with: - command: test - args: -p ibc-integration-test --features mbt --no-fail-fast --no-run - # Disable running MBT tests until flakiness is addressed - # - env: - # RUST_LOG: debug - # RUST_BACKTRACE: 1 - # NO_COLOR_LOG: 1 - # run: | - # nix shell \ - # .#${{ matrix.gaiad }} \ - # .#apalache \ - # -c cargo \ - # test -p ibc-integration-test --features mbt --no-fail-fast -- \ - # --nocapture --test-threads=1 mbt diff --git a/.github/workflows/markdown-link-check.yml b/.github/workflows/markdown-link-check.yml deleted file mode 100644 index c699a85514..0000000000 --- a/.github/workflows/markdown-link-check.yml +++ /dev/null @@ -1,19 +0,0 @@ -name: Check Markdown links - -on: - schedule: - - cron: '0 0 * * *' - - workflow_dispatch: - -jobs: - md-link-check: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - name: markdown-link-check - uses: gaurav-nelson/github-action-markdown-link-check@1.0.13 - with: - config-file: '.github/markdown-link-check.json' - use-quiet-mode: 'yes' - use-verbose-mode: 'yes' diff --git a/.github/workflows/no-std.yaml b/.github/workflows/no-std.yaml deleted file mode 100644 index d9020d1de1..0000000000 --- a/.github/workflows/no-std.yaml +++ /dev/null @@ -1,47 +0,0 @@ -name: no_std check -on: - pull_request: - paths: - - .github/workflows/no-std.yml - - Cargo.toml - - Cargo.lock - - ci/** - - proto/** - - modules/** - push: - branches: master - paths: - - .github/workflows/no-std.yml - - Cargo.toml - - Cargo.lock - - ci/** - - proto/** - - modules/** - -jobs: - check-no-std-panic-conflict: - name: Check no_std panic conflict - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - uses: actions-rs/toolchain@v1 - with: - toolchain: stable - override: true - - run: | - cd ci/no-std-check - make check-panic-conflict - - check-substrate: - name: Check no_std substrate support - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - uses: actions-rs/toolchain@v1 - with: - toolchain: nightly - target: wasm32-unknown-unknown - override: true - - run: | - cd ci/no-std-check - make check-substrate diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml deleted file mode 100644 index fb64ad8395..0000000000 --- a/.github/workflows/release.yml +++ /dev/null @@ -1,80 +0,0 @@ -# Uploads Hermes binary. -# Ref: https://github.com/marketplace/actions/build-and-upload-rust-binary-to-github-releases - -name: Release - -on: - push: - tags: - - v[0-9]+.* - -jobs: - create-release: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - uses: taiki-e/create-gh-release-action@v1 - env: - # (required) - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - upload-assets: - strategy: - matrix: - config: - - { os: ubuntu-latest, target: x86_64-unknown-linux-gnu } - - { os: ubuntu-latest, target: aarch64-unknown-linux-gnu } - - { os: macos-latest, target: x86_64-apple-darwin } - runs-on: ${{ matrix.config.os }} - steps: - - uses: actions/checkout@v2 - - uses: taiki-e/upload-rust-binary-action@v1 - with: - # (required) - bin: hermes - # (optional) Target triple - target: ${{ matrix.config.target }} - # (optional) On which platform to distribute the `.tar.gz` file. - # [default value: unix] - # [possible values: all, unix, windows, none] - tar: unix - # (optional) On which platform to distribute the `.zip` file. - # [default value: windows] - # [possible values: all, unix, windows, none] - zip: unix - # (optional) Archive name (non-extension portion of filename) to be uploaded. - # [default value: $bin-$target] - # [possible values: the following variables and any string] - # variables: - # - $bin - Binary name (non-extension portion of filename). - # - $target - Target triple. - # - $tag - Tag of this release. - archive: $bin-$tag-$target - env: - # (required) - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - # (optional) - CARGO_PROFILE_RELEASE_LTO: true - docker-release: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - name: Set up Docker Buildx - id: buildx - uses: docker/setup-buildx-action@v1 - - name: Login to Docker Hub - uses: docker/login-action@v1 - with: - username: ${{ secrets.DOCKER_HUB_USERNAME }} - password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }} - - name: Get release version - run: echo "TAG=${GITHUB_REF#refs/tags/v}" >> $GITHUB_ENV - - name: Build and push - id: docker_build - uses: docker/build-push-action@v2 - with: - context: ./ci/ - file: ./ci/hermes.Dockerfile - push: true - build-args: TAG=v${{env.TAG}} - tags: informalsystems/hermes:${{env.TAG}} diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml deleted file mode 100644 index aa4d85f717..0000000000 --- a/.github/workflows/rust.yml +++ /dev/null @@ -1,139 +0,0 @@ -name: Rust -on: - pull_request: - paths: - - .github/workflows/rust.yml - - Cargo.toml - - Cargo.lock - - ci/** - - e2e/** - - proto/** - - modules/** - - relayer/** - - relayer-cli/** - - relayer-rest/** - - telemetry/** - - tools/** - push: - branches: master - paths: - - .github/workflows/rust.yml - - Cargo.toml - - Cargo.lock - - ci/** - - e2e/** - - proto/** - - modules/** - - relayer/** - - relayer-cli/** - - relayer-rest/** - - telemetry/** - - tools/** - -env: - CARGO_INCREMENTAL: 0 - CARGO_PROFILE_DEV_DEBUG: 1 - CARGO_PROFILE_RELEASE_DEBUG: 1 - RUST_BACKTRACE: short - CARGO_NET_RETRY: 10 - RUSTUP_MAX_RETRIES: 10 - -jobs: - cleanup-runs: - runs-on: ubuntu-latest - steps: - - uses: rokroskar/workflow-run-cleanup-action@master - env: - GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}" - if: "!startsWith(github.ref, 'refs/tags/') && github.ref != 'refs/heads/master'" - - fmt: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - uses: actions-rs/toolchain@v1 - with: - toolchain: stable - override: true - - uses: actions-rs/cargo@v1 - with: - command: fmt - args: --all -- --check - - clippy_all_features: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - uses: actions-rs/toolchain@v1 - with: - toolchain: stable - components: clippy - override: true - - uses: Swatinem/rust-cache@v1 - - uses: actions-rs/clippy-check@v1 - with: - token: ${{ secrets.GITHUB_TOKEN }} - args: --all-features --all-targets - - clippy_no_default_features: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - uses: actions-rs/toolchain@v1 - with: - toolchain: stable - components: clippy - override: true - - uses: Swatinem/rust-cache@v1 - - uses: actions-rs/clippy-check@v1 - with: - token: ${{ secrets.GITHUB_TOKEN }} - args: --no-default-features --all-targets - - test-stable: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - uses: actions-rs/toolchain@v1 - with: - toolchain: stable - override: true - - uses: Swatinem/rust-cache@v1 - - uses: actions-rs/cargo@v1 - with: - command: test - args: --all-features --no-fail-fast --no-run - - uses: actions-rs/cargo@v1 - with: - command: test - args: --all-features --no-fail-fast --workspace --exclude ibc-integration-test -- --nocapture - - # test-coverage: - # runs-on: ubuntu-latest - # steps: - # - uses: actions/checkout@v2 - # with: - # fetch-depth: 0 - # - uses: actions-rs/toolchain@v1 - # with: - # toolchain: stable - # override: true - # - uses: actions-rs/cargo@v1 - # with: - # command: clean - # - name: Run cargo-tarpaulin - # uses: actions-rs/tarpaulin@v0.1 - # with: - # version: '0.16.0' - # args: '-- --test-threads 1' - # timeout: 600 - # - name: Upload coverage to Codecov - # uses: codecov/codecov-action@v1 - # with: - # token: ${{ secrets.CODECOV_TOKEN }} - # fail_ci_if_error: true - # - name: Archive code coverage results - # uses: actions/upload-artifact@v2 - # with: - # name: code-coverage-report - # path: cobertura.xml diff --git a/.github/workflows/scripts.yaml b/.github/workflows/scripts.yaml deleted file mode 100644 index bf5e48778d..0000000000 --- a/.github/workflows/scripts.yaml +++ /dev/null @@ -1,19 +0,0 @@ -name: ShellCheck Scripts -on: - pull_request: - paths: - - scripts/** - push: - branches: master - paths: - - scripts/** - -jobs: - shellcheck: - runs-on: ubuntu-latest - steps: - - name: Run ShellCheck - uses: ludeeus/action-shellcheck@master - with: - scandir: './scripts' - additional_files: 'scripts/dev-env scripts/one-chain scripts/init-hermes' diff --git a/.github/workflows/specs.yml b/.github/workflows/specs.yml deleted file mode 100644 index acaa60ca77..0000000000 --- a/.github/workflows/specs.yml +++ /dev/null @@ -1,30 +0,0 @@ -name: TLA+ Specs -on: - pull_request: - paths: - - docs/spec/tla/** - push: - branches: master - paths: - - docs/specs/tla/** - -jobs: - typecheck-specs: - runs-on: ubuntu-latest - container: apalache/mc:0.15.2 - env: - working-directory: docs/spec/tla - steps: - - uses: actions/checkout@v2 - - name: IBC Core - run: apalache-mc typecheck IBCCore.tla | grep -F 'Type checker [OK]' - working-directory: ${{env.working-directory}}/ibc-core - - name: Fungible Token Transfer - run: apalache-mc typecheck IBCTokenTransfer.tla | grep -F 'Type checker [OK]' - working-directory: ${{env.working-directory}}/fungible-token-transfer - - name: ICS 02 Client / Single Chain - run: apalache-mc typecheck ICS02SingleChainEnvironment.tla | grep -F 'Type checker [OK]' - working-directory: ${{env.working-directory}}/client - - name: ICS 02 Client / Two Chains - run: apalache-mc typecheck ICS02TwoChainsEnvironment.tla | grep -F 'Type checker [OK]' - working-directory: ${{env.working-directory}}/client diff --git a/Cargo.lock b/Cargo.lock index 280550ca71..e5d5a59b31 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12,45 +12,6 @@ dependencies = [ "regex", ] -[[package]] -name = "abscissa_core" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6750843603bf31a83accd3c8177f9dbf53a7d64275688fc7371e0a4d9f8628b5" -dependencies = [ - "abscissa_derive", - "arc-swap", - "backtrace", - "canonical-path", - "clap", - "color-eyre", - "fs-err", - "once_cell", - "regex", - "secrecy", - "semver", - "serde", - "termcolor", - "toml", - "tracing", - "tracing-log", - "tracing-subscriber 0.3.15", - "wait-timeout", -] - -[[package]] -name = "abscissa_derive" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a3473aa652e90865a06b723102aaa4a54a7d9f2092dbf4582497a61d0537d3f" -dependencies = [ - "ident_case", - "proc-macro2", - "quote", - "syn", - "synstructure", -] - [[package]] name = "addr2line" version = "0.17.0" @@ -66,12 +27,6 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" -[[package]] -name = "adler32" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aae1277d39aeec15cb388266ecc24b11c80469deae6067e17a1a7aa9e5c1f234" - [[package]] name = "ahash" version = "0.7.6" @@ -92,21 +47,6 @@ dependencies = [ "memchr", ] -[[package]] -name = "alloc-no-stdlib" -version = "2.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35ef4730490ad1c4eae5c4325b2a95f521d023e5c885853ff7aca0a6a1631db3" - -[[package]] -name = "alloc-stdlib" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "697ed7edc0f1711de49ce108c541623a0af97c6c60b2f6e2b65229847ac843c2" -dependencies = [ - "alloc-no-stdlib", -] - [[package]] name = "ansi_term" version = "0.12.1" @@ -132,10 +72,33 @@ dependencies = [ ] [[package]] -name = "arc-swap" -version = "1.5.1" +name = "argh" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "983cd8b9d4b02a6dc6ffa557262eb5858a27a0038ffffe21a0f133eaa819a164" +checksum = "a7e7e4aa7e40747e023c0761dafcb42333a9517575bbf1241747f68dd3177a62" +dependencies = [ + "argh_derive", + "argh_shared", +] + +[[package]] +name = "argh_derive" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69f2bd7ff6ed6414f4e5521bd509bae46454bbd513801767ced3f21a751ab4bc" +dependencies = [ + "argh_shared", + "heck 0.3.3", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "argh_shared" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47253b98986dafc7a3e1cf3259194f1f47ac61abb57a57f46ec09e48d004ecda" [[package]] name = "arrayref" @@ -164,12 +127,6 @@ version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6" -[[package]] -name = "ascii" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbf56136a5198c7b01a49e3afcbef6cf84597273d298f54432926024107b0109" - [[package]] name = "async-lock" version = "2.5.0" @@ -179,17 +136,6 @@ dependencies = [ "event-listener", ] -[[package]] -name = "async-recursion" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7d78656ba01f1b93024b7c3a0467f1608e4be67d725749fdcd7d2c7678fd7a2" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "async-stream" version = "0.3.3" @@ -331,24 +277,6 @@ version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" -[[package]] -name = "base64ct" -version = "1.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3bdca834647821e0b13d9539a8634eb62d3501b6b6c2cec1722786ee6671b851" - -[[package]] -name = "bech32" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf9ff0bbfd639f15c74af777d81383cf53efb7c93613f6cab67c6c11e05bbf8b" - -[[package]] -name = "bech32" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5738be7561b0eeb501ef1d5c5db3f24e01ceb55fededd9b00039aada34966ad" - [[package]] name = "beef" version = "0.5.2" @@ -438,27 +366,6 @@ dependencies = [ "tokio", ] -[[package]] -name = "bitcoin" -version = "0.28.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05bba324e6baf655b882df672453dbbc527bc938cadd27750ae510aaccc3a66a" -dependencies = [ - "bech32 0.8.1", - "bitcoin_hashes", - "secp256k1 0.22.1", - "serde", -] - -[[package]] -name = "bitcoin_hashes" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "006cc91e1a1d99819bc5b8214be3555c1f0611b169f527a1fdc54ed1f2b745b0" -dependencies = [ - "serde", -] - [[package]] name = "bitflags" version = "1.3.2" @@ -587,27 +494,6 @@ dependencies = [ "syn", ] -[[package]] -name = "brotli" -version = "3.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1a0b1dbcc8ae29329621f8d4f0d835787c1c38bb1401979b49d13b0b305ff68" -dependencies = [ - "alloc-no-stdlib", - "alloc-stdlib", - "brotli-decompressor", -] - -[[package]] -name = "brotli-decompressor" -version = "2.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59ad2d4653bf5ca36ae797b1f4bb4dbddb60ce49ca4aed8a2ce4829f60425b80" -dependencies = [ - "alloc-no-stdlib", - "alloc-stdlib", -] - [[package]] name = "bstr" version = "0.2.17" @@ -617,16 +503,6 @@ dependencies = [ "memchr", ] -[[package]] -name = "buf_redux" -version = "0.8.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b953a6887648bb07a535631f2bc00fbdb2a2216f135552cb3f534ed136b9c07f" -dependencies = [ - "memchr", - "safemem", -] - [[package]] name = "bumpalo" version = "3.10.0" @@ -645,12 +521,6 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" -[[package]] -name = "bytecount" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c676a478f63e9fa2dd5368a42f28bba0d6c560b775f38583c8bbaa7fcd67c9c" - [[package]] name = "byteorder" version = "1.4.3" @@ -666,48 +536,14 @@ dependencies = [ "serde", ] -[[package]] -name = "camino" -version = "1.0.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "869119e97797867fd90f5e22af7d0bd274bd4635ebb9eb68c04f3f513ae6c412" -dependencies = [ - "serde", -] - -[[package]] -name = "canonical-path" -version = "2.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6e9e01327e6c86e92ec72b1c798d4a94810f147209bbe3ffab6a86954937a6f" - -[[package]] -name = "cargo-platform" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbdb825da8a5df079a43676dbe042702f1707b1109f713a01420fbb4cc71fa27" -dependencies = [ - "serde", -] - -[[package]] -name = "cargo_metadata" -version = "0.14.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4acbb09d9ee8e23699b9634375c72795d095bf268439da88562cf9b501f181fa" -dependencies = [ - "camino", - "cargo-platform", - "semver", - "serde", - "serde_json", -] - [[package]] name = "cc" version = "1.0.73" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2fff2a6927b3bb87f9595d67196a70493f627687a71d87a0d692242c33f58c11" +dependencies = [ + "jobserver", +] [[package]] name = "cfg-if" @@ -781,7 +617,7 @@ version = "3.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ba52acd3b0a5c33aeada5cdaa3267cdc7c594a98731d4268cdc1532f4264cb4" dependencies = [ - "heck", + "heck 0.4.0", "proc-macro-error", "proc-macro2", "quote", @@ -797,6 +633,15 @@ dependencies = [ "os_str_bytes", ] +[[package]] +name = "cmake" +version = "0.1.48" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8ad8cef104ac57b68b89df3208164d228503abbdce70f6880ffa3d970e7443a" +dependencies = [ + "cc", +] + [[package]] name = "color-eyre" version = "0.6.2" @@ -824,20 +669,6 @@ dependencies = [ "tracing-error", ] -[[package]] -name = "console" -version = "0.15.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89eab4d20ce20cea182308bca13088fecea9c05f6776cf287205d41a0ed3c847" -dependencies = [ - "encode_unicode", - "libc", - "once_cell", - "terminal_size", - "unicode-width", - "winapi", -] - [[package]] name = "const-oid" version = "0.7.1" @@ -850,17 +681,6 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" -[[package]] -name = "contracts" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1d1429e3bd78171c65aa010eabcdf8f863ba3254728dbfb0ad4b1545beac15c" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "convert_case" version = "0.4.0" @@ -908,7 +728,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2dd04ddaf88237dc3b8d8f9a3c1004b506b54b3313403944054d23c0870c521" dependencies = [ "cfg-if 1.0.0", - "crossbeam-utils 0.8.11", + "crossbeam-utils", ] [[package]] @@ -918,23 +738,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "715e8152b692bba2d374b53d4875445368fdf21a94751410af607a5ac677d1fc" dependencies = [ "cfg-if 1.0.0", - "crossbeam-epoch 0.9.10", - "crossbeam-utils 0.8.11", -] - -[[package]] -name = "crossbeam-epoch" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "058ed274caafc1f60c4997b5fc07bf7dc7cca454af7c6e81edffe5f33f70dace" -dependencies = [ - "autocfg", - "cfg-if 0.1.10", - "crossbeam-utils 0.7.2", - "lazy_static", - "maybe-uninit", - "memoffset 0.5.6", - "scopeguard", + "crossbeam-epoch", + "crossbeam-utils", ] [[package]] @@ -945,23 +750,12 @@ checksum = "045ebe27666471bb549370b4b0b3e51b07f56325befa4284db65fc89c02511b1" dependencies = [ "autocfg", "cfg-if 1.0.0", - "crossbeam-utils 0.8.11", - "memoffset 0.6.5", + "crossbeam-utils", + "memoffset", "once_cell", "scopeguard", ] -[[package]] -name = "crossbeam-utils" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8" -dependencies = [ - "autocfg", - "cfg-if 0.1.10", - "lazy_static", -] - [[package]] name = "crossbeam-utils" version = "0.8.11" @@ -1103,26 +897,6 @@ dependencies = [ "syn", ] -[[package]] -name = "dashmap" -version = "4.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e77a43b28d0668df09411cb0bc9a8c2adc40f9a048afe863e05fd43251e8e39c" -dependencies = [ - "cfg-if 1.0.0", - "num_cpus", -] - -[[package]] -name = "deflate" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f95bf05dffba6e6cce8dfbb30def788154949ccd9aed761b472119c21e01c70" -dependencies = [ - "adler32", - "gzip-header", -] - [[package]] name = "der" version = "0.5.1" @@ -1156,17 +930,6 @@ dependencies = [ "syn", ] -[[package]] -name = "dialoguer" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a92e7e37ecef6857fdc0c0c5d42fd5b0938e46590c2183cc92dd310a6d078eb1" -dependencies = [ - "console", - "tempfile", - "zeroize", -] - [[package]] name = "digest" version = "0.8.1" @@ -1205,16 +968,6 @@ dependencies = [ "dirs-sys", ] -[[package]] -name = "dirs-next" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b98cf8ebf19c3d1b223e151f99a4f9f0690dca41414773390fc824184ac833e1" -dependencies = [ - "cfg-if 1.0.0", - "dirs-sys-next", -] - [[package]] name = "dirs-sys" version = "0.3.7" @@ -1226,17 +979,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "dirs-sys-next" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ebda144c4fe02d1f7ea1a7d9641b6fc6b580adcfa024ae48797ecdeb6825b4d" -dependencies = [ - "libc", - "redox_users", - "winapi", -] - [[package]] name = "downcast-rs" version = "1.2.0" @@ -1342,12 +1084,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "encode_unicode" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" - [[package]] name = "env_logger" version = "0.9.0" @@ -1367,15 +1103,6 @@ version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68b91989ae21441195d7d9b9993a2f9295c7e1a8c96255d8b729accddc124797" -[[package]] -name = "error-chain" -version = "0.12.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d2f06b9cac1506ece98fe3231e3cc9c4410ec3d5b1f24ae1c8946f0742cdefc" -dependencies = [ - "version_check", -] - [[package]] name = "event-listener" version = "2.5.3" @@ -1417,18 +1144,6 @@ dependencies = [ "subtle", ] -[[package]] -name = "filetime" -version = "0.2.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e94a7bbaa59354bc20dd75b67f23e2797b4490e9d6928203fb105c79e448c86c" -dependencies = [ - "cfg-if 1.0.0", - "libc", - "redox_syscall", - "windows-sys", -] - [[package]] name = "fixed-hash" version = "0.7.0" @@ -1441,6 +1156,12 @@ dependencies = [ "static_assertions", ] +[[package]] +name = "fixedbitset" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" + [[package]] name = "flate2" version = "1.0.24" @@ -1457,23 +1178,10 @@ version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c606d892c9de11507fa0dcffc116434f94e105d0bbdc4e405b61519464c49d7b" dependencies = [ - "anyhow", "eyre", "paste", ] -[[package]] -name = "flume" -version = "0.10.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1657b4441c3403d9f7b3409e47575237dac27b1b5726df654a6ecbf92f0f7577" -dependencies = [ - "futures-core", - "futures-sink", - "pin-project", - "spin 0.9.4", -] - [[package]] name = "fnv" version = "1.0.7" @@ -1606,10 +1314,10 @@ dependencies = [ ] [[package]] -name = "fs-err" -version = "2.7.0" +name = "fuchsia-cprng" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bd79fa345a495d3ae89fb7165fec01c0e72f41821d642dda363a1e97975652e" +checksum = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba" [[package]] name = "funty" @@ -1765,10 +1473,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22030e2c5a68ec659fde1e949a745124b48e6fa8b045b7ed5bd1fe4ccc5c4e5d" [[package]] -name = "glob" -version = "0.3.0" +name = "git2" +version = "0.13.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" +checksum = "f29229cc1b24c0e6062f6e742aa3e256492a5323365e5ed3413599f8a5eff7d6" +dependencies = [ + "bitflags", + "libc", + "libgit2-sys", + "log", + "openssl-probe", + "openssl-sys", + "url", +] [[package]] name = "globset" @@ -1814,15 +1531,6 @@ dependencies = [ "syn", ] -[[package]] -name = "gzip-header" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0131feb3d3bb2a5a238d8a4d09f6353b7ebfdc52e77bccbf4ea6eaa751dde639" -dependencies = [ - "crc32fast", -] - [[package]] name = "h2" version = "0.3.13" @@ -1875,15 +1583,6 @@ dependencies = [ "ahash", ] -[[package]] -name = "hdpath" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dafb09e5d85df264339ad786a147d9de1da13687a3697c52244297e5e7c32d9c" -dependencies = [ - "byteorder", -] - [[package]] name = "headers" version = "0.3.7" @@ -1909,6 +1608,15 @@ dependencies = [ "http", ] +[[package]] +name = "heck" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d621efb26863f0e9924c6ac577e8275e5e6b77455db64ffa6c65c904e9e132c" +dependencies = [ + "unicode-segmentation", +] + [[package]] name = "heck" version = "0.4.0" @@ -2013,16 +1721,6 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" -[[package]] -name = "humantime-serde" -version = "1.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57a3db5ea5923d99402c94e9feb261dc5ee9b4efa158b0315f788cf549cc200c" -dependencies = [ - "humantime", - "serde", -] - [[package]] name = "hyper" version = "0.14.20" @@ -2152,209 +1850,40 @@ dependencies = [ "tendermint-light-client-verifier", "tendermint-proto", "tendermint-rpc", - "tendermint-testgen", - "test-log", - "time", - "tokio", - "tracing", - "tracing-subscriber 0.3.15", - "uint", -] - -[[package]] -name = "ibc-integration-test" -version = "0.15.0" -dependencies = [ - "ibc", - "ibc-proto", - "ibc-relayer", - "ibc-relayer-cli", - "ibc-test-framework", - "modelator", - "serde", - "serde_json", - "tempfile", - "tendermint", - "tendermint-rpc", - "time", -] - -[[package]] -name = "ibc-proto" -version = "0.18.0" -dependencies = [ - "base64", - "bytes", - "prost", - "prost-types", - "schemars", - "serde", - "tendermint-proto", - "tonic", -] - -[[package]] -name = "ibc-relayer" -version = "0.15.0" -dependencies = [ - "anyhow", - "async-stream", - "bech32 0.9.0", - "bitcoin", - "bytes", - "crossbeam-channel", - "dirs-next", - "env_logger", - "flex-error", - "futures", - "hdpath", - "hex", - "http", - "humantime", - "humantime-serde", - "ibc", - "ibc-proto", - "ibc-telemetry", - "itertools", - "k256", - "moka", - "nanoid", - "num-bigint 0.4.3", - "num-rational 0.4.1", - "prost", - "prost-types", - "regex", - "retry", - "ripemd160", - "semver", - "serde", - "serde_derive", - "serde_json", - "serial_test", - "sha2 0.10.2", - "signature", - "subtle-encoding", - "tendermint", - "tendermint-light-client", - "tendermint-light-client-verifier", - "tendermint-proto", - "tendermint-rpc", - "tendermint-testgen", - "test-log", - "thiserror", - "tiny-bip39", - "tiny-keccak", - "tokio", - "toml", - "tonic", - "tracing", - "tracing-subscriber 0.3.15", - "uuid 1.1.2", -] - -[[package]] -name = "ibc-relayer-cli" -version = "0.15.0" -dependencies = [ - "abscissa_core", - "atty", - "clap", - "clap_complete", - "color-eyre", - "console", - "crossbeam-channel", - "dialoguer", - "dirs-next", - "eyre", - "flex-error", - "futures", - "hex", - "humantime", - "ibc", - "ibc-proto", - "ibc-relayer", - "ibc-relayer-rest", - "ibc-telemetry", - "itertools", - "once_cell", - "oneline-eyre", - "regex", - "serde", - "serde_derive", - "serde_json", - "signal-hook", - "subtle-encoding", - "tendermint", - "tendermint-light-client", - "tendermint-light-client-verifier", - "tendermint-proto", - "tendermint-rpc", + "tendermint-testgen", + "test-log", + "time", "tokio", - "toml", "tracing", "tracing-subscriber 0.3.15", + "uint", ] [[package]] -name = "ibc-relayer-rest" -version = "0.15.0" +name = "ibc-proto" +version = "0.18.0" dependencies = [ - "crossbeam-channel", - "ibc", - "ibc-relayer", - "rouille", + "base64", + "bytes", + "prost", + "prost-types", + "schemars", "serde", - "serde_json", - "toml", - "tracing", - "ureq", -] - -[[package]] -name = "ibc-telemetry" -version = "0.15.0" -dependencies = [ - "crossbeam-channel", - "ibc", - "moka", - "once_cell", - "opentelemetry", - "opentelemetry-prometheus", - "prometheus", - "rouille", - "uuid 1.1.2", + "tendermint-proto", + "tonic", ] [[package]] -name = "ibc-test-framework" -version = "0.15.0" +name = "ibc-proto-compiler" +version = "0.2.0" dependencies = [ - "async-trait", - "color-eyre", - "crossbeam-channel", - "env_logger", - "eyre", - "flex-error", - "hex", - "http", - "ibc", - "ibc-proto", - "ibc-relayer", - "ibc-relayer-cli", - "itertools", - "rand 0.8.5", - "semver", - "serde", - "serde_json", - "serde_yaml", - "sha2 0.10.2", - "subtle-encoding", - "tendermint", - "tendermint-rpc", - "tokio", - "toml", - "tracing", - "tracing-subscriber 0.3.15", + "argh", + "git2", + "prost-build", + "tempdir", + "tonic", + "tonic-build", + "walkdir", ] [[package]] @@ -2482,6 +2011,15 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c8af84674fe1f223a982c933a0ee1086ac4d4052aa0fb8060c12c6ad838e754" +[[package]] +name = "jobserver" +version = "0.1.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af25a77299a7f711a01975c35a6a424eb6862092cc2d6c72c4ed6cbc56dfc1fa" +dependencies = [ + "libc", +] + [[package]] name = "js-sys" version = "0.3.59" @@ -2760,7 +2298,6 @@ dependencies = [ "ecdsa", "elliptic-curve", "sec1", - "sha2 0.9.9", ] [[package]] @@ -2791,6 +2328,20 @@ version = "0.2.127" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "505e71a4706fa491e9b1b55f51b95d4037d0821ee40131190475f692b35b009b" +[[package]] +name = "libgit2-sys" +version = "0.12.26+1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19e1c899248e606fbfe68dcb31d8b0176ebab833b103824af31bddf4b7457494" +dependencies = [ + "cc", + "libc", + "libssh2-sys", + "libz-sys", + "openssl-sys", + "pkg-config", +] + [[package]] name = "libm" version = "0.2.3" @@ -2846,10 +2397,30 @@ dependencies = [ ] [[package]] -name = "linked-hash-map" -version = "0.5.6" +name = "libssh2-sys" +version = "0.2.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b094a36eb4b8b8c8a7b4b8ae43b2944502be3e59cd87687595cf6b0a71b3f4ca" +dependencies = [ + "cc", + "libc", + "libz-sys", + "openssl-sys", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "libz-sys" +version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" +checksum = "9702761c3935f8cc2f101793272e202c72b99da8f4224a19ddcf1279a6450bbf" +dependencies = [ + "cc", + "libc", + "pkg-config", + "vcpkg", +] [[package]] name = "linregress" @@ -2889,15 +2460,6 @@ dependencies = [ "hashbrown 0.12.3", ] -[[package]] -name = "mach" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b823e83b2affd8f40a9ee8c29dbc56404c1e34cd2710921f2801e2cf29527afa" -dependencies = [ - "libc", -] - [[package]] name = "matchers" version = "0.0.1" @@ -2937,27 +2499,12 @@ dependencies = [ "rawpointer", ] -[[package]] -name = "maybe-uninit" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" - [[package]] name = "memchr" version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" -[[package]] -name = "memoffset" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "043175f069eda7b85febe4a74abbaeff828d9f8b448515d3151a14a3542811aa" -dependencies = [ - "autocfg", -] - [[package]] name = "memoffset" version = "0.6.5" @@ -3008,16 +2555,6 @@ version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" -[[package]] -name = "mime_guess" -version = "2.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4192263c238a5f0d0c6bfd21f336a313a4ce1c450542449ca191bb657b4642ef" -dependencies = [ - "mime", - "unicase", -] - [[package]] name = "minimal-lexical" version = "0.2.1" @@ -3071,44 +2608,10 @@ dependencies = [ ] [[package]] -name = "moka" -version = "0.8.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "975fa04238144061e7f8df9746b2e9cd93ef85881da5548d842a7c6a4b614415" -dependencies = [ - "crossbeam-channel", - "crossbeam-epoch 0.8.2", - "crossbeam-utils 0.8.11", - "num_cpus", - "once_cell", - "parking_lot", - "quanta", - "scheduled-thread-pool", - "skeptic", - "smallvec", - "tagptr", - "thiserror", - "triomphe", - "uuid 1.1.2", -] - -[[package]] -name = "multipart" -version = "0.18.0" +name = "multimap" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00dec633863867f29cb39df64a397cdf4a6354708ddd7759f70c7fb51c5f9182" -dependencies = [ - "buf_redux", - "httparse", - "log", - "mime", - "mime_guess", - "quick-error", - "rand 0.8.5", - "safemem", - "tempfile", - "twoway", -] +checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" [[package]] name = "nalgebra" @@ -3139,15 +2642,6 @@ dependencies = [ "syn", ] -[[package]] -name = "nanoid" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ffa00dec017b5b1a8b7cf5e2c008bfda1aa7e0697ac1508b491fdf2622fb4d8" -dependencies = [ - "rand 0.8.5", -] - [[package]] name = "nodrop" version = "0.1.14" @@ -3175,18 +2669,6 @@ dependencies = [ "num-traits", ] -[[package]] -name = "num-bigint" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f93ab6289c7b344a8a9f60f88d80aa20032336fe78da341afc91c8a2341fc75f" -dependencies = [ - "autocfg", - "num-integer", - "num-traits", - "serde", -] - [[package]] name = "num-complex" version = "0.4.2" @@ -3234,7 +2716,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c000134b5dbf44adc5cb772486d335293351644b801551abe8f75c84cfa4aef" dependencies = [ "autocfg", - "num-bigint 0.2.6", + "num-bigint", "num-integer", "num-traits", ] @@ -3246,10 +2728,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0638a1c9d0a3c0914158145bc76cff373a75a627e6ecbfb71cbe6f453a5a19b0" dependencies = [ "autocfg", - "num-bigint 0.4.3", "num-integer", "num-traits", - "serde", ] [[package]] @@ -3296,15 +2776,6 @@ version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "18a6dbe30758c9f83eb00cbea4ac95966305f5a7772f3f42ebfc7fc7eddbd8e1" -[[package]] -name = "oneline-eyre" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "862f17a1e689c0ce8ca158ea48e776c5101c5d14fdfbed3e01c15f89604c3097" -dependencies = [ - "eyre", -] - [[package]] name = "opaque-debug" version = "0.2.3" @@ -3324,35 +2795,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] -name = "opentelemetry" -version = "0.17.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6105e89802af13fdf48c49d7646d3b533a70e536d818aae7e78ba0433d01acb8" -dependencies = [ - "async-trait", - "crossbeam-channel", - "dashmap", - "fnv", - "futures-channel", - "futures-executor", - "futures-util", - "js-sys", - "lazy_static", - "percent-encoding", - "pin-project", - "rand 0.8.5", - "thiserror", -] - -[[package]] -name = "opentelemetry-prometheus" -version = "0.10.0" +name = "openssl-sys" +version = "0.9.75" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9328977e479cebe12ce0d3fcecdaea4721d234895a9440c5b5dfd113f0594ac6" +checksum = "e5f9bd0c2710541a3cda73d6f9ac4f1b240de4ae261065d309dbe73d9dceb42f" dependencies = [ - "opentelemetry", - "prometheus", - "protobuf", + "autocfg", + "cc", + "libc", + "pkg-config", + "vcpkg", ] [[package]] @@ -3615,6 +3067,16 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" +[[package]] +name = "petgraph" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6d5014253a1331579ce62aa67443b4a658c5e7dd03d4bc6d302b94474888143" +dependencies = [ + "fixedbitset", + "indexmap", +] + [[package]] name = "pin-project" version = "1.0.11" @@ -3648,15 +3110,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] -name = "pkcs8" -version = "0.8.0" +name = "pkg-config" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cabda3fb821068a9a4fab19a683eac3af12edf0f34b94a8be53c4972b8149d0" -dependencies = [ - "der", - "spki", - "zeroize", -] +checksum = "1df8c4ec4b0627e53bdf214615ad287367e482558cf84b109250b37464dc03ae" [[package]] name = "ppv-lite86" @@ -3664,6 +3121,16 @@ version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eb9f9e6e233e5c4a35559a617bf40a4ec447db2e84c20b55a6f83167b7e57872" +[[package]] +name = "prettyplease" +version = "0.1.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "697ae720ee02011f439e0701db107ffe2916d83f718342d65d7f8bf7b8a5fee9" +dependencies = [ + "proc-macro2", + "syn", +] + [[package]] name = "primitive-types" version = "0.11.1" @@ -3746,28 +3213,35 @@ dependencies = [ ] [[package]] -name = "prometheus" -version = "0.13.1" +name = "prost" +version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cface98dfa6d645ea4c789839f176e4b072265d085bfcc48eaa8d137f58d3c39" +checksum = "71adf41db68aa0daaefc69bb30bcd68ded9b9abaad5d1fbb6304c4fb390e083e" dependencies = [ - "cfg-if 1.0.0", - "fnv", - "lazy_static", - "memchr", - "parking_lot", - "protobuf", - "thiserror", + "bytes", + "prost-derive", ] [[package]] -name = "prost" +name = "prost-build" version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71adf41db68aa0daaefc69bb30bcd68ded9b9abaad5d1fbb6304c4fb390e083e" +checksum = "8ae5a4388762d5815a9fc0dea33c56b021cdc8dde0c55e0c9ca57197254b0cab" dependencies = [ "bytes", - "prost-derive", + "cfg-if 1.0.0", + "cmake", + "heck 0.4.0", + "itertools", + "lazy_static", + "log", + "multimap", + "petgraph", + "prost", + "prost-types", + "regex", + "tempfile", + "which", ] [[package]] @@ -3793,45 +3267,6 @@ dependencies = [ "prost", ] -[[package]] -name = "protobuf" -version = "2.27.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf7e6d18738ecd0902d30d1ad232c9125985a3422929b16c65517b38adc14f96" - -[[package]] -name = "pulldown-cmark" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d9cc634bc78768157b5cbfe988ffcd1dcba95cd2b2f03a88316c08c6d00ed63" -dependencies = [ - "bitflags", - "memchr", - "unicase", -] - -[[package]] -name = "quanta" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7e31331286705f455e56cca62e0e717158474ff02b7936c1fa596d983f4ae27" -dependencies = [ - "crossbeam-utils 0.8.11", - "libc", - "mach", - "once_cell", - "raw-cpuid", - "wasi 0.10.2+wasi-snapshot-preview1", - "web-sys", - "winapi", -] - -[[package]] -name = "quick-error" -version = "1.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" - [[package]] name = "quote" version = "1.0.21" @@ -3847,6 +3282,19 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" +[[package]] +name = "rand" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "552840b97013b1a26992c11eac34bdd778e464601a4c2054b5f0bff7c6761293" +dependencies = [ + "fuchsia-cprng", + "libc", + "rand_core 0.3.1", + "rdrand", + "winapi", +] + [[package]] name = "rand" version = "0.7.3" @@ -3883,15 +3331,30 @@ dependencies = [ ] [[package]] -name = "rand_chacha" +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core 0.6.3", +] + +[[package]] +name = "rand_core" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +checksum = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b" dependencies = [ - "ppv-lite86", - "rand_core 0.6.3", + "rand_core 0.4.2", ] +[[package]] +name = "rand_core" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc" + [[package]] name = "rand_core" version = "0.5.1" @@ -3938,15 +3401,6 @@ dependencies = [ "rand_core 0.5.1", ] -[[package]] -name = "raw-cpuid" -version = "10.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c49596760fce12ca21550ac21dc5a9617b2ea4b6e0aa7d8dab8ff2824fc2bba" -dependencies = [ - "bitflags", -] - [[package]] name = "rawpointer" version = "0.2.1" @@ -3973,10 +3427,19 @@ checksum = "258bcdb5ac6dad48491bb2992db6b7cf74878b0384908af124823d118c99683f" dependencies = [ "crossbeam-channel", "crossbeam-deque", - "crossbeam-utils 0.8.11", + "crossbeam-utils", "num_cpus", ] +[[package]] +name = "rdrand" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2" +dependencies = [ + "rand_core 0.3.1", +] + [[package]] name = "redox_syscall" version = "0.2.16" @@ -4052,12 +3515,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "retry" -version = "1.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac95c60a949a63fd2822f4964939662d8f2c16c4fa0624fd954bc6e703b9a3f6" - [[package]] name = "rfc6979" version = "0.1.0" @@ -4078,7 +3535,7 @@ dependencies = [ "cc", "libc", "once_cell", - "spin 0.5.2", + "spin", "untrusted", "web-sys", "winapi", @@ -4104,31 +3561,6 @@ dependencies = [ "opaque-debug 0.3.0", ] -[[package]] -name = "rouille" -version = "3.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18b2380c42510ef4a28b5f228a174c801e0dec590103e215e60812e2e2f34d05" -dependencies = [ - "base64", - "brotli", - "chrono", - "deflate", - "filetime", - "multipart", - "num_cpus", - "percent-encoding", - "rand 0.8.5", - "serde", - "serde_derive", - "serde_json", - "sha1", - "threadpool", - "time", - "tiny_http", - "url", -] - [[package]] name = "rs_merkle" version = "1.2.0" @@ -4224,12 +3656,6 @@ dependencies = [ "base64", ] -[[package]] -name = "rustversion" -version = "1.0.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97477e48b4cf8603ad5f7aaf897467cf42ab4218a38ef76fb14c2d6773a6d6a8" - [[package]] name = "ryu" version = "1.0.11" @@ -4283,12 +3709,6 @@ dependencies = [ "safe-regex-compiler", ] -[[package]] -name = "safemem" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef703b7cb59335eae2eb93ceb664c0eb7ea6bf567079d843e09420219668e072" - [[package]] name = "same-file" version = "1.0.6" @@ -4334,15 +3754,6 @@ dependencies = [ "windows-sys", ] -[[package]] -name = "scheduled-thread-pool" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "977a7519bff143a44f842fd07e80ad1329295bd71686457f18e496736f4bf9bf" -dependencies = [ - "parking_lot", -] - [[package]] name = "schemars" version = "0.8.10" @@ -4419,7 +3830,6 @@ checksum = "08da66b8b0965a5555b6bd6639e68ccba85e1e2506f5fbb089e93f8a04e1a2d1" dependencies = [ "der", "generic-array 0.14.6", - "pkcs8", "subtle", "zeroize", ] @@ -4430,17 +3840,7 @@ version = "0.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c42e6f1735c5f00f51e43e28d6634141f2bcad10931b2609ddd74a86d751260" dependencies = [ - "secp256k1-sys 0.4.2", -] - -[[package]] -name = "secp256k1" -version = "0.22.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26947345339603ae8395f68e2f3d85a6b0a8ddfe6315818e80b8504415099db0" -dependencies = [ - "secp256k1-sys 0.5.2", - "serde", + "secp256k1-sys", ] [[package]] @@ -4452,22 +3852,12 @@ dependencies = [ "cc", ] -[[package]] -name = "secp256k1-sys" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "152e20a0fd0519390fc43ab404663af8a0b794273d2a91d60ad4a39f13ffe110" -dependencies = [ - "cc", -] - [[package]] name = "secrecy" version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9bd1c54ea06cfd2f6b63219704de0b9b4f72dcc2b8fdef820be6cd799780e91e" dependencies = [ - "serde", "zeroize", ] @@ -4499,9 +3889,6 @@ name = "semver" version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "93f6841e709003d68bb2deee8c343572bf446003ec20a583e76f7b15cebf3711" -dependencies = [ - "serde", -] [[package]] name = "serde" @@ -4565,43 +3952,6 @@ dependencies = [ "syn", ] -[[package]] -name = "serde_yaml" -version = "0.8.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "578a7433b776b56a35785ed5ce9a7e777ac0598aac5a6dd1b4b18a307c7fc71b" -dependencies = [ - "indexmap", - "ryu", - "serde", - "yaml-rust", -] - -[[package]] -name = "serial_test" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d19dbfb999a147cedbfe82f042eb9555f5b0fa4ef95ee4570b74349103d9c9f4" -dependencies = [ - "lazy_static", - "log", - "parking_lot", - "serial_test_derive", -] - -[[package]] -name = "serial_test_derive" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb9e2050b2be1d681f8f1c1a528bcfe4e00afa2d8995f713974f5333288659f2" -dependencies = [ - "proc-macro-error", - "proc-macro2", - "quote", - "rustversion", - "syn", -] - [[package]] name = "sha-1" version = "0.9.8" @@ -4626,21 +3976,6 @@ dependencies = [ "digest 0.10.3", ] -[[package]] -name = "sha1" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1da05c97445caa12d05e848c4a4fcbbea29e748ac28f7e80e9b010392063770" -dependencies = [ - "sha1_smol", -] - -[[package]] -name = "sha1_smol" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae1a47186c03a32177042e55dbc5fd5aee900b8e0069a8d70fba96a9375cd012" - [[package]] name = "sha2" version = "0.8.2" @@ -4708,16 +4043,6 @@ dependencies = [ "lazy_static", ] -[[package]] -name = "signal-hook" -version = "0.3.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a253b5e89e2698464fc26b545c9edceb338e18a89effeeecfea192c3025be29d" -dependencies = [ - "libc", - "signal-hook-registry", -] - [[package]] name = "signal-hook-registry" version = "1.4.0" @@ -4755,21 +4080,6 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cc47a29ce97772ca5c927f75bac34866b16d64e07f330c3248e2d7226623901b" -[[package]] -name = "skeptic" -version = "0.13.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16d23b015676c90a0f01c197bfdc786c20342c73a0afdda9025adb0bc42940a8" -dependencies = [ - "bytecount", - "cargo_metadata", - "error-chain", - "glob", - "pulldown-cmark", - "tempfile", - "walkdir", -] - [[package]] name = "slab" version = "0.4.7" @@ -4964,7 +4274,7 @@ dependencies = [ "regex", "scale-info", "schnorrkel", - "secp256k1 0.21.3", + "secp256k1", "secrecy", "serde", "sp-core-hashing 4.0.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -5010,7 +4320,7 @@ dependencies = [ "regex", "scale-info", "schnorrkel", - "secp256k1 0.21.3", + "secp256k1", "secrecy", "serde", "sp-core-hashing 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", @@ -5145,7 +4455,7 @@ dependencies = [ "log", "parity-scale-codec", "parking_lot", - "secp256k1 0.21.3", + "secp256k1", "sp-core 6.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "sp-externalities 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)", "sp-keystore 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -5170,7 +4480,7 @@ dependencies = [ "log", "parity-scale-codec", "parking_lot", - "secp256k1 0.21.3", + "secp256k1", "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", "sp-externalities 0.12.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", "sp-keystore 0.12.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", @@ -5599,25 +4909,6 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" -[[package]] -name = "spin" -version = "0.9.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f6002a767bff9e83f8eeecf883ecb8011875a21ae8da43bffb817a57e78cc09" -dependencies = [ - "lock_api", -] - -[[package]] -name = "spki" -version = "0.5.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44d01ac02a6ccf3e07db148d2be087da624fea0221a16152ed01f0496a6b0a27" -dependencies = [ - "base64ct", - "der", -] - [[package]] name = "ss58-registry" version = "1.25.0" @@ -5725,7 +5016,7 @@ checksum = "4c9462b52d539cde2e0dbbd1c89d28079459ed790f42218c5bfc9d61c9575e32" dependencies = [ "darling", "frame-metadata", - "heck", + "heck 0.4.0", "parity-scale-codec", "proc-macro-error", "proc-macro2", @@ -5788,18 +5079,22 @@ dependencies = [ "unicode-xid", ] -[[package]] -name = "tagptr" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b2093cf4c8eb1e67749a6762251bc9cd836b6fc171623bd0a9d324d37af2417" - [[package]] name = "tap" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" +[[package]] +name = "tempdir" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15f2b5fb00ccdf689e0149d1b1b3c03fead81c2b37735d812fa8bddbbf41b6d8" +dependencies = [ + "rand 0.4.6", + "remove_dir_all", +] + [[package]] name = "tempfile" version = "3.3.0" @@ -5825,12 +5120,10 @@ dependencies = [ "ed25519-consensus", "flex-error", "futures", - "k256", "num-traits", "once_cell", "prost", "prost-types", - "ripemd", "serde", "serde_bytes", "serde_json", @@ -5857,29 +5150,6 @@ dependencies = [ "url", ] -[[package]] -name = "tendermint-light-client" -version = "0.24.0-pre.2" -source = "git+https://github.com/composableFi/tendermint-rs?rev=5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8#5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8" -dependencies = [ - "async-recursion", - "async-trait", - "contracts", - "derive_more", - "flex-error", - "flume", - "futures", - "futures-timer", - "serde", - "serde_derive", - "static_assertions", - "tendermint", - "tendermint-light-client-verifier", - "tendermint-rpc", - "time", - "tokio", -] - [[package]] name = "tendermint-light-client-verifier" version = "0.24.0-pre.2" @@ -5939,7 +5209,7 @@ dependencies = [ "tokio", "tracing", "url", - "uuid 0.8.2", + "uuid", "walkdir", ] @@ -5967,16 +5237,6 @@ dependencies = [ "winapi-util", ] -[[package]] -name = "terminal_size" -version = "0.1.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "633c1a546cee861a1a6d0dc69ebeca693bf4296661ba7852b9d21d159e0506df" -dependencies = [ - "libc", - "winapi", -] - [[package]] name = "test-log" version = "0.2.11" @@ -6023,15 +5283,6 @@ dependencies = [ "once_cell", ] -[[package]] -name = "threadpool" -version = "1.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d050e60b33d41c19108b32cea32164033a9013fe3b46cbd4457559bfbf77afaa" -dependencies = [ - "num_cpus", -] - [[package]] name = "time" version = "0.3.12" @@ -6078,19 +5329,6 @@ dependencies = [ "crunchy", ] -[[package]] -name = "tiny_http" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ce51b50006056f590c9b7c3808c3bd70f0d1101666629713866c227d6e58d39" -dependencies = [ - "ascii", - "chrono", - "chunked_transfer", - "log", - "url", -] - [[package]] name = "tinyvec" version = "1.6.0" @@ -6227,10 +5465,7 @@ dependencies = [ "pin-project", "prost", "prost-derive", - "rustls-native-certs 0.6.2", - "rustls-pemfile", "tokio", - "tokio-rustls 0.23.4", "tokio-stream", "tokio-util", "tower", @@ -6240,6 +5475,19 @@ dependencies = [ "tracing-futures", ] +[[package]] +name = "tonic-build" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9263bf4c9bfaae7317c1c2faf7f18491d2fe476f70c414b73bf5d445b00ffa1" +dependencies = [ + "prettyplease", + "proc-macro2", + "prost-build", + "quote", + "syn", +] + [[package]] name = "tower" version = "0.4.13" @@ -6431,12 +5679,6 @@ dependencies = [ "hash-db", ] -[[package]] -name = "triomphe" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fe1b3800b35f9b936c28dc59dbda91b195371269396784d931fe2a5a2be3d2f" - [[package]] name = "try-lock" version = "0.2.3" @@ -6468,15 +5710,6 @@ dependencies = [ "utf-8", ] -[[package]] -name = "twoway" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59b11b2b5241ba34be09c3cc85a36e56e48f9888862e19cedf23336d35316ed1" -dependencies = [ - "memchr", -] - [[package]] name = "twox-hash" version = "1.6.3" @@ -6538,10 +5771,10 @@ dependencies = [ ] [[package]] -name = "unicode-width" -version = "0.1.9" +name = "unicode-segmentation" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973" +checksum = "7e8820f5d777f6224dc4be3632222971ac30164d4a258d595640799554ebfd99" [[package]] name = "unicode-xid" @@ -6596,15 +5829,6 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" -[[package]] -name = "uuid" -version = "1.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd6469f4314d5f1ffec476e05f17cc9a78bc7a27a6a857842170bdf8d6f98d2f" -dependencies = [ - "getrandom 0.2.7", -] - [[package]] name = "valuable" version = "0.1.0" @@ -6612,19 +5836,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" [[package]] -name = "version_check" -version = "0.9.4" +name = "vcpkg" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" [[package]] -name = "wait-timeout" -version = "0.2.0" +name = "version_check" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f200f5b12eb75f8c1ed65abd4b2db8a6e1b138a20de009dacee265a2498f3f6" -dependencies = [ - "libc", -] +checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" [[package]] name = "walkdir" @@ -6653,12 +5874,6 @@ version = "0.9.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" -[[package]] -name = "wasi" -version = "0.10.2+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" - [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" @@ -6791,6 +6006,17 @@ dependencies = [ "webpki 0.22.0", ] +[[package]] +name = "which" +version = "4.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c4fb54e6113b6a8772ee41c3404fb0301ac79604489467e0a9ce1f3e97c24ae" +dependencies = [ + "either", + "lazy_static", + "libc", +] + [[package]] name = "winapi" version = "0.3.9" @@ -6874,15 +6100,6 @@ dependencies = [ "tap", ] -[[package]] -name = "yaml-rust" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85" -dependencies = [ - "linked-hash-map", -] - [[package]] name = "zeroize" version = "1.5.7" diff --git a/Cargo.toml b/Cargo.toml index eaf4e09dfc..e6336aa79b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -4,24 +4,6 @@ resolver = "2" members = [ "modules", - "relayer", - "relayer-cli", - "relayer-rest", - "telemetry", "proto", - "tools/integration-test", - "tools/test-framework", -] - -exclude = [ - "ci/no-std-check", "proto-compiler" -] - -# [patch.crates-io] -# tendermint = { git = "https://github.com/informalsystems/tendermint-rs", branch = "v0.23.x" } -# tendermint-rpc = { git = "https://github.com/informalsystems/tendermint-rs", branch = "v0.23.x" } -# tendermint-proto = { git = "https://github.com/informalsystems/tendermint-rs", branch = "v0.23.x" } -# tendermint-light-client = { git = "https://github.com/informalsystems/tendermint-rs", branch = "v0.23.x" } -# tendermint-light-client-verifier = { git = "https://github.com/informalsystems/tendermint-rs", branch = "v0.23.x" } -# tendermint-testgen = { git = "https://github.com/informalsystems/tendermint-rs", branch = "v0.23.x" } +] \ No newline at end of file diff --git a/ci/.gitignore b/ci/.gitignore deleted file mode 100644 index 699ff5ada7..0000000000 --- a/ci/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -/gaia/**/* -data/**/* -*.log \ No newline at end of file diff --git a/ci/README.md b/ci/README.md deleted file mode 100644 index 087686ece8..0000000000 --- a/ci/README.md +++ /dev/null @@ -1,230 +0,0 @@ -# Continous Integration for ibc-rs - -This folder contains the files required to run the End to end testing in [Github Actions](https://github.com/informalsystems/ibc-rs/actions) - -## End to end (e2e) testing - -The [End to end (e2e) testing workflow](https://github.com/informalsystems/ibc-rs/actions?query=workflow%3A%22End+to+End+testing%22) spins up two `gaia` chains (`ibc-0` and `ibc-1`) in Docker containers and one container that runs the relayer. There's a script that configures the relayer (e.g. configure light clients and add keys) and runs transactions and queries. A successful run of this script ensures that the relayer is working properly with two chains that support `IBC`. - -### Testing Ethermint-based networks -At this moment, the automated E2E workflow does not spin up a network with the (post-Stargate) Ethermint module. In the meantime, you can test it manually by following one of the resources below: - -- [the official documentation on ethermint.dev](https://docs.ethermint.zone/quickstart/run_node.html) -- [using the tweaked E2E scripts from the Injective's fork](https://github.com/InjectiveLabs/ibc-rs/commit/669535617a6e45be9916387e292d45a77e7d23d2) -- [using the nix-based integration test scripts in the Cronos project](https://github.com/crypto-org-chain/cronos#quitck-start) - -### Running an End to end (e2e) test locally - -If you want to run the end to end test locally, you will need [Docker](https://www.docker.com/) installed on your machine. - -Follow these steps to run the e2e test locally: - -__Note__: This assumes you are running this the first time, if not, please ensure you follow the steps to [clean up the containers](#cleaning-up) before running it again. - -1. Clone the `ibc-rs` repo. - -2. Go into the repository folder: - - `cd ibc-rs` - -3. Build the current up-to-date relayer container image. Make sure you have a - copy of the `hermes` binary in the root of the repo, as the docker script - expects this. If you don't, you can run `cp ./target/debug/hermes .`. This - command might take a few minutes since it will do a fresh compile and build - of all modules: - - `docker-compose -f ci/docker-compose-gaia-current.yml build relayer` - -> Note: If you're looking to test on a legacy version of gaia, run: -> `docker-compose -f ci/docker-compose-gaia-legacy.yml build relayer`. -> You'll use the `docker-compose-gaia-legacy.yml` configuration instead of -> `docker-compose-gaia-current.yml` in all cases. - -4. Run all the containers (two containers, one for each chain and one for the relayer) - - `docker-compose -f ci/docker-compose-gaia-current.yml up -d ibc-0 ibc-1 relayer` - - If everything is successful you should see among the output something - similar to this: - ```shell - Network ci_relaynet Created - Container ibc-1 Started - Container ibc-0 Started - Container relayer Started - ``` - - If you want to ensure they are all running properly you can use the command: - - `docker-compose -f ci/docker-compose-gaia-current.yml ps` - - You should see the message below. Please ensure all containers are in a `running` state. - ```shell - - ibc-0 "/chain/gaia/run-gaia..." ibc-0 running - ibc-1 "/chain/gaia/run-gaia..." ibc-1 running - relayer "/bin/sh" relayer running - ``` - - __Note__: If this is the first time you're running this command, the `informaldev/ibc-0:[RELEASE TAG]` and `informaldev/ibc-1:[RELEASE TAG]` container images will be pulled from the Docker Hub. For instructions on how to update these images in Docker Hub please see the [upgrading the release](#upgrading-chains) section. - - -5. Run the command below to execute the relayer end to end (e2e) test. This command will execute the `e2e.sh` on the relayer container. The script will configure the light clients for both chains, add the private keys for both chains and run transactions on both chains (e.g. create-client transaction). - - `docker exec relayer /bin/sh -c /relayer/e2e.sh` - - If the script runs sucessfully you should see an output similar to this one: -```shell -================================================================================================================= - INITIALIZE -================================================================================================================= ------------------------------------------------------------------------------------------------------------------ -Show relayer version ------------------------------------------------------------------------------------------------------------------ -relayer-cli 0.2.0 ------------------------------------------------------------------------------------------------------------------ -Setting up chains ------------------------------------------------------------------------------------------------------------------ -Config: /relayer/simple_config.toml - Chain: ibc-0 - creating chain store folder: [/data/ibc-0] - Chain: ibc-1 [/data/ibc-1] - creating chain store folder: [/data/ibc-1] -Waiting 20 seconds for chains to generate blocks... -================================================================================================================= - CONFIGURATION -================================================================================================================= ------------------------------------------------------------------------------------------------------------------ -Add keys for chains ------------------------------------------------------------------------------------------------------------------ -key add result: "Added key testkey (cosmos1tc3vcuxyyac0dmayf887v95tdg7qpyql48w7gj) on ibc-0 chain" -key add result: "Added key testkey (cosmos1zv3etpuk4n7p54r8fhsct0qys8eqf5zqw4pqp5) on ibc-1 chain" ------------------------------------------------------------------------------------------------------------------ -Set the primary peers for clients on each chain ------------------------------------------------------------------------------------------------------------------ -Executing: hermes -c /relayer/simple_config.toml light add tcp://ibc-0:26657 -c ibc-0 -s /data/ibc-0 -p -y -f - Success Added light client: - chain id: ibc-0 - address: tcp://ibc-0:26657 - peer id: DF766B47325BE49E27F9DF325327853AAFB5BBCA - height: 731 - hash: 22639F0B84C0E95D51AB70D900E7BC0CBFBDF642F3F945093FF7AEB8120CC8DC - primary: true ------------------------------------------------------------------------------------------------------------------ -Executing: hermes -c /relayer/simple_config.toml light add tcp://ibc-1:26657 -c ibc-1 -s /data/ibc-1 -p -y -f - Success Added light client: - chain id: ibc-1 - address: tcp://ibc-1:26657 - peer id: B7A2809AA8AA825225D618DDD5200AB9BA236797 - height: 733 - hash: D5C190747A1A0805C4840FBF66EC3339E0C30520359EF36553508DBD775A6EEF - primary: true ------------------------------------------------------------------------------------------------------------------ -Set the secondary peers for clients on each chain ------------------------------------------------------------------------------------------------------------------ -Executing: hermes -c /relayer/simple_config.toml light add tcp://ibc-0:26657 -c ibc-0 -s /data/ibc-0 --peer-id 17D46D8C1576A79203A6733F63B2C9B7235DD559 -y - Success Added light client: - chain id: ibc-0 - address: tcp://ibc-0:26657 - peer id: 17D46D8C1576A79203A6733F63B2C9B7235DD559 - height: 735 - hash: 463691EED61772C333D38C5DC5F267946341F98ADE8EF9FBBE501A96022E5F1A - primary: false ------------------------------------------------------------------------------------------------------------------ -Executing: hermes -c /relayer/simple_config.toml light add tcp://ibc-1:26657 -c ibc-1 -s /data/ibc-1 --peer-id A885BB3D3DFF6101188B462466AE926E7A6CD51E -y - Success Added light client: - chain id: ibc-1 - address: tcp://ibc-1:26657 - peer id: A885BB3D3DFF6101188B462466AE926E7A6CD51E - height: 737 - hash: 5377D2FDCD1886129AF32AABFDFC4C80112B2465F49814E91C25FD325DE54DCC - primary: false -================================================================================================================= - CLIENTS -================================================================================================================= ------------------------------------------------------------------------------------------------------------------ -Create client transactions ------------------------------------------------------------------------------------------------------------------ -Creating ibc-1 client on chain ibc-0 -Message CreateClient for source chain: ChainId { id: "ibc-1", version: 1 }, on destination chain: ChainId { id: "ibc-0", version: 0 } -Jan 21 18:46:57.259 INFO relayer::event::monitor: running listener chain.id=ibc-1 -Jan 21 18:46:57.278 INFO relayer::event::monitor: running listener chain.id=ibc-0 -{"status":"success","result":[{"CreateClient":{"client_id":"07-tendermint-0","client_type":"Tendermint","consensus_height":{"revision_height":739,"revision_number":1},"height":"1"}}]} ------------------------------------------------------------------------------------------------------------------ -Creating ibc-0 client on chain ibc-1 -Message CreateClient for source chain: ChainId { id: "ibc-0", version: 0 }, on destination chain: ChainId { id: "ibc-1", version: 1 } -Jan 21 18:46:58.299 INFO relayer::event::monitor: running listener chain.id=ibc-0 -Jan 21 18:46:58.324 INFO relayer::event::monitor: running listener chain.id=ibc-1 -{"status":"success","result":[{"CreateClient":{"client_id":"07-tendermint-0","client_type":"Tendermint","consensus_height":{"revision_height":740,"revision_number":0},"height":"1"}}]} -``` - -### [Cleaning up](#cleaning-up) - -In order to clear up the testing environment and stop the containers you can run the command below - -`docker-compose -f ci/docker-compose-gaia-current.yml down` - -If the command executes successfully you should see the message below: - -```shell -Container relayer Removed -Container ibc-1 Removed -Container ibc-0 Removed -Network ci_relaynet Removed -``` - -### [Upgrading the gaia chains release and generating new container images](#upgrading-chains) - -The repository stores the files used to configure and build the chains for the containers. For example, the files for a `gaia` chain release `v5.0.0` can be seen [here](./chains/gaia) - -> Note: Please ensure you have gaiad installed on your machine and it matches the version that you're trying to upgrade. -> You can check by running `gaiad version` in your machine. -> -> If you need to upgrade your local `gaiad` binary, you can follow the steps -> listed in the Cosmos Hub documentation on [installing the binary](https://hub.cosmos.network/main/getting-started/installation.html). - -If you need to generate configuration files for a new gaia release and new containers, please follow the steps below: - -1. Move into the `ci` folder - - `cd ci` - - -2. Open the `build-ibc-chains.sh` file and change the release. Just replace the value for the `GAIA_BRANCH` parameter. For example to set it to release `v5.0.0` use: - `GAIA_BRANCH="v5.0.0"` - - -3. Run the `build-ibc-chains.sh` script: - - `./build-ibc-chains.sh` - - -__Note__: This will generate the files for the chains in the `/ci/chains/gaia` folder and build the Docker containers. At the end of the script it will ask if you want to push these new images to Docker Hub. In order to do so you need to have Docker login configured on your machine with permissions to push the container. If you don't want to push them (just have them built locally) just cancel the script execution (by hitting `CTRL+C`) - - -4. Committing the release files. **You have to** add the new chain files generated to the ibc-rs repository, just `git commit` the files, otherwise the CI might fail because private keys don't match. - - -5. Update the release for Docker Compose. If this new release should be the default release for running the end to end (e2e) test you need to update the release version in the `docker-compose.yml` file in the `ci` folder of the repository. Open the file and change the release version in all the places required (image name and RELEASE variables. For example, if current release is `v4.0.0` and the new one is `v5.0.0` just do a find and replace with these two values. - -Change the version in the image for ibc-0 and ibc-1 services: - - ``` - image: "informaldev/ibc-0:v4.0.0" - ``` - -And in the relayer service: - - ``` - args: - RELEASE: v4.0.0 - ``` - -6. Update the CI workflow - -There are currently two CI workflows, for running the E2E tests against two versions of gaiad: - - legacy release: `.github\workflows\e2e-gaia-legacy-release.yaml`, and - - current release: `.github\workflows\e2e-gaia-current-release.yaml`. - -Depending on which of the two setups you have upgraded at the prior steps, change the `name` key in the corresponding workflow file to match with the version of the upgraded gaia used, e.g.: - -`name: End to End testing (Gaia - v6.0.0)` diff --git a/ci/bootstrap_gaia.sh b/ci/bootstrap_gaia.sh deleted file mode 100755 index c1f9cd4509..0000000000 --- a/ci/bootstrap_gaia.sh +++ /dev/null @@ -1,94 +0,0 @@ -#!/bin/sh - -# This file can be used to initialize a chain - -# coins to add to each account -coins="100000000000stake,100000000000samoleans" -STAKE="100000000000stake" -# - the user also needs stake to perform actions -USER_COINS="${STAKE},${SAMOLEANS}samoleans" -#home="/chain" - -echo Node: "$MONIKER" -echo Chain: "$CHAIN_ID" -echo Chain IP: "$CHAIN_IP" -echo RPC Port: "$RPC_PORT" -echo GRPC Port: "$GRPC_PORT" -echo Home_Dir: "$CHAIN_HOME" - -# Clean home dir if exists -rm -Rf "$CHAIN_HOME" - -# Create home dir -mkdir -p "$CHAIN_HOME" - -ls -allh "$CHAIN_HOME" - -# Check gaia version -echo "-------------------------------------------------------------------------------------------------------------------" -echo "Gaiad version" -echo "-------------------------------------------------------------------------------------------------------------------" -gaiad version --long - -echo "-------------------------------------------------------------------------------------------------------------------" -echo "Initialize chain $CHAIN_ID" -echo "-------------------------------------------------------------------------------------------------------------------" -gaiad init "$MONIKER" --chain-id "$CHAIN_ID" --home "$CHAIN_HOME" - -echo "-------------------------------------------------------------------------------------------------------------------" -echo "Replace addresses and ports in the config file and some performance tweaks" -echo "-------------------------------------------------------------------------------------------------------------------" -sed -i 's#"tcp://127.0.0.1:26657"#"tcp://'"$CHAIN_IP"':'"$RPC_PORT"'"#g' "$CHAIN_HOME"/config/config.toml -#sed -i 's#"tcp://0.0.0.0:26656"#"tcp://'"$CHAIN_ID"':'"$P2P_PORT"'"#g' "$CHAIN_HOME"/config/config.toml -#sed -i 's#grpc_laddr = ""#grpc_laddr = "tcp://'"$CHAIN_IP"':'"$GRPC_PORT"'"#g' "$CHAIN_HOME"/config/config.toml -sed -i 's/timeout_commit = "5s"/timeout_commit = "1s"/g' "$CHAIN_HOME"/config/config.toml -sed -i 's/timeout_propose = "3s"/timeout_propose = "1s"/g' "$CHAIN_HOME"/config/config.toml -sed -i 's/index_all_keys = false/index_all_keys = true/g' "$CHAIN_HOME"/config/config.toml - -echo "-------------------------------------------------------------------------------------------------------------------" -echo "Adding validator key" -echo "-------------------------------------------------------------------------------------------------------------------" -gaiad keys add validator --keyring-backend="test" --home "$CHAIN_HOME" --output json > "$CHAIN_HOME"/validator_seed.json -cat "$CHAIN_HOME"/validator_seed.json - -echo "-------------------------------------------------------------------------------------------------------------------" -echo "Adding user key" -echo "-------------------------------------------------------------------------------------------------------------------" -gaiad keys add user --keyring-backend="test" --home $CHAIN_HOME --output json > "$CHAIN_HOME"/user_seed.json -cat "$CHAIN_HOME"/user_seed.json - -echo "-------------------------------------------------------------------------------------------------------------------" -echo "Adding user2 key" -echo "-------------------------------------------------------------------------------------------------------------------" -gaiad keys add user2 --keyring-backend="test" --home $CHAIN_HOME --output json > "$CHAIN_HOME"/user2_seed.json -cat "$CHAIN_HOME"/user2_seed.json - -echo "-------------------------------------------------------------------------------------------------------------------" -echo "Adding user account to genesis" -echo "-------------------------------------------------------------------------------------------------------------------" -gaiad --home "$CHAIN_HOME" add-genesis-account $(gaiad --home "$CHAIN_HOME" keys --keyring-backend="test" show user -a) 1000000000stake -echo "Done!" - -echo "-------------------------------------------------------------------------------------------------------------------" -echo "Adding user2 account to genesis" -echo "-------------------------------------------------------------------------------------------------------------------" -gaiad --home "$CHAIN_HOME" add-genesis-account $(gaiad --home "$CHAIN_HOME" keys --keyring-backend="test" show user2 -a) 1000000000stake -echo "Done!" - -echo "-------------------------------------------------------------------------------------------------------------------" -echo "Adding validator account to genesis" -echo "-------------------------------------------------------------------------------------------------------------------" -gaiad --home "$CHAIN_HOME" add-genesis-account $(gaiad --home "$CHAIN_HOME" keys --keyring-backend="test" show validator -a) 1000000000stake,1000000000validatortoken -echo "Done!" - -echo "-------------------------------------------------------------------------------------------------------------------" -echo "Generate a genesis transaction that creates a validator with a self-delegation" -echo "-------------------------------------------------------------------------------------------------------------------" -gaiad --home "$CHAIN_HOME" gentx validator 1000000000stake --keyring-backend="test" --chain-id "$CHAIN_ID" -echo "Done!" - -echo "-------------------------------------------------------------------------------------------------------------------" -echo "Collect genesis txs and output a genesis.json file" -echo "-------------------------------------------------------------------------------------------------------------------" -gaiad collect-gentxs --home "$CHAIN_HOME" -echo "Done!" diff --git a/ci/build-chains.sh b/ci/build-chains.sh deleted file mode 100755 index 2ec002b273..0000000000 --- a/ci/build-chains.sh +++ /dev/null @@ -1,44 +0,0 @@ -#!/usr/bin/env bash - -## Programmatic list for creating Gaia Hub chains for testing IBC. -## Instead of blindly running this code, read it line by line and understand the dependecies and tasks. -## Prerequisites: Log into Docker Hub -set -eou pipefail -GAIA_BRANCH="v7.0.1" # Requires a version with the `--keyring-backend` option. v2.1 and above. - -echo "*** Requirements" -which git && which go && which make && which sed && which jq && which docker - -echo "*** Fetch gaiad source code" -git clone https://github.com/cosmos/gaia || echo "Already cloned." -cd gaia -git checkout "${GAIA_BRANCH}" -q - -echo "*** Build binary" -GOOS=linux CGO_ENABLED=0 make build - -echo "*** Create config using the built binary" -docker run -it --rm -v $(pwd)/build:/root:z alpine /root/gaiad testnet -o /root/chain_a --v 1 --chain-id chain_A --keyring-backend test -sed -i.bak -e 's/^index_all_keys[[:space:]]*=.*/index_all_keys = true/' build/chain_a/node0/gaiad/config/config.toml -sed -i.bak -e 's/^timeout_commit[[:space:]]*=.*/timeout_commit = "1s"/' build/chain_a/node0/gaiad/config/config.toml -sed -i.bak -e 's/^timeout_propose[[:space:]]*=.*/timeout_propose = "1s"/' build/chain_a/node0/gaiad/config/config.toml - -docker run -it --rm -v $(pwd)/build:/root:z alpine /root/gaiad testnet -o /root/chain_b --v 1 --chain-id chain_B --keyring-backend test -sed -i.bak -e 's/^index_all_keys[[:space:]]*=.*/index_all_keys = true/' build/chain_b/node0/gaiad/config/config.toml -sed -i.bak -e 's/^timeout_commit[[:space:]]*=.*/timeout_commit = "1s"/' build/chain_b/node0/gaiad/config/config.toml -sed -i.bak -e 's/^timeout_propose[[:space:]]*=.*/timeout_propose = "1s"/' build/chain_b/node0/gaiad/config/config.toml - -echo "*** Create Docker image and upload to Docker Hub" -cd .. -docker build -t informaldev/chain_a -f chain_a.Dockerfile . -docker build -t informaldev/chain_b -f chain_b.Dockerfile . - -# Get details from the config files -echo SECRET_A=$(jq -r .secret gaia/build/chain_a/node0/gaiad/key_seed.json) -echo SECRET_B=$(jq -r .secret gaia/build/chain_b/node0/gaiad/key_seed.json) -echo NODEID_A=$(jq -r .app_state.genutil.gen_txs[0].body.memo gaia/build/chain_a/node0/gaiad/config/genesis.json) -echo NODEID_B=$(jq -r .app_state.genutil.gen_txs[0].body.memo gaia/build/chain_b/node0/gaiad/config/genesis.json) - -read -p "Press ENTER to push image to Docker Hub or CTRL-C to cancel. " dontcare -docker push informaldev/chain_a -docker push informaldev/chain_b diff --git a/ci/build-ibc-chains.sh b/ci/build-ibc-chains.sh deleted file mode 100755 index 0fbcbde456..0000000000 --- a/ci/build-ibc-chains.sh +++ /dev/null @@ -1,80 +0,0 @@ -#!/usr/bin/env bash - -## Programmatic list for creating Gaia Hub chains for testing IBC. -## Instead of blindly running this code, read it line by line and understand the dependencies and tasks. -## Prerequisites: Log into Docker Hub -set -eou pipefail - -## After updating the gaia version below, double-check the following (see readme.md also): -## - the new version made it to docker hub, and is available for download, e.g. `docker pull informaldev/ibc-1:v4.0.0` -## - the image versions and the relayer release in `docker-compose.yml` are consistent with the new version - -# For building current gaia use this -# GAIA_BRANCH="v5.0.8" # Requires a version with the `--keyring-backend` option. v2.1 and above. - -# For future gaia use this -GAIA_BRANCH="v7.0.1" # Requires a version with the `--keyring-backend` option. v2.1 and above. - -# Check if gaiad is installed and if the versions match -if ! [ -x "$(command -v gaiad)" ]; then - echo 'Error: gaiad is not installed.' >&2 - exit 1 -fi - -CURRENT_GAIA="$(gaiad version 2>&1)" -echo "Current Gaia Version: $CURRENT_GAIA" - -if [ "$GAIA_BRANCH" != "$CURRENT_GAIA" ]; then - echo "Error: gaiad installed is different than target gaiad ($CURRENT_GAIA != $GAIA_BRANCH)" - exit 1 -else - echo "Gaiad installed matches desired version ($CURRENT_GAIA = $GAIA_BRANCH)" -fi - -BASE_DIR="$(dirname $0)" -ONE_CHAIN="$BASE_DIR/../scripts/one-chain" - -echo "*** Building config folders" - -CHAIN_HOME="./chains/gaia/$GAIA_BRANCH" - -# Clean home dir if exists -rm -Rf "$CHAIN_HOME" - -# Create home dir -mkdir -p "$CHAIN_HOME" - -ls -allh "$CHAIN_HOME" - -# Check gaia version -echo "-------------------------------------------------------------------------------------------------------------------" -echo "Gaiad version" -echo "-------------------------------------------------------------------------------------------------------------------" -gaiad version --long --log_level error - -MONIKER=node_ibc_0 -CHAIN_ID=ibc-0 -CHAIN_IP=172.25.0.10 -RPC_PORT=26657 -GRPC_PORT=9090 -CHAIN_SAMOLEANS=100000000000 -"$ONE_CHAIN" gaiad "$CHAIN_ID" "$CHAIN_HOME" "$RPC_PORT" 26656 6060 "$GRPC_PORT" "$CHAIN_SAMOLEANS" - -MONIKER=node_ibc_1 -CHAIN_ID=ibc-1 -CHAIN_IP=172.25.0.11 -RPC_PORT=26657 -GRPC_PORT=9090 -CHAIN_SAMOLEANS=100000000000 -"$ONE_CHAIN" gaiad "$CHAIN_ID" "$CHAIN_HOME" "$RPC_PORT" 26656 6060 "$GRPC_PORT" "$CHAIN_SAMOLEANS" - -echo "*** Requirements" -which docker - -echo "*** Create Docker image and upload to Docker Hub" -docker build --build-arg CHAIN=gaia --build-arg RELEASE=$GAIA_BRANCH --build-arg NAME=ibc-0 --no-cache -t informaldev/ibc-0:$GAIA_BRANCH -f "$BASE_DIR/gaia.Dockerfile" . -docker build --build-arg CHAIN=gaia --build-arg RELEASE=$GAIA_BRANCH --build-arg NAME=ibc-1 --no-cache -t informaldev/ibc-1:$GAIA_BRANCH -f "$BASE_DIR/gaia.Dockerfile" . - -read -p "Press ANY KEY to push image to Docker Hub, or CTRL-C to cancel. " dontcare -docker push informaldev/ibc-0:$GAIA_BRANCH -docker push informaldev/ibc-1:$GAIA_BRANCH diff --git a/ci/build-simd.sh b/ci/build-simd.sh deleted file mode 100755 index 390cda7161..0000000000 --- a/ci/build-simd.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/usr/bin/env bash - -## Programmatic list for creating a simd chain for testing IBC. -## Instead of blindly running this code, read it line by line and understand the dependecies and tasks. -## Prerequisites: Log into Docker Hub -set -eou pipefail -GAIA_BRANCH="master" - -echo "*** Requirements" -which git && which go && which make && which docker - -echo "*** Fetch gaiad source code" -git clone https://github.com/cosmos/cosmos-sdk || echo "Already cloned." -cd cosmos-sdk -git checkout "${GAIA_BRANCH}" -q - -echo "*** Build binary" -GOOS=linux make build-simd - -echo "*** Create Docker image and upload to Docker Hub" -cd .. -docker build -t informaldev/simd -f simd.Dockerfile . -read -p "Press ENTER to push image to Docker Hub or CTRL-C to cancel. " dontcare -docker push informaldev/simd diff --git a/ci/chain_a.Dockerfile b/ci/chain_a.Dockerfile deleted file mode 100644 index 9eae1fcb5a..0000000000 --- a/ci/chain_a.Dockerfile +++ /dev/null @@ -1,14 +0,0 @@ -FROM alpine -LABEL maintainer="hello@informal.systems" - -EXPOSE 26656 26657 26660 - -ENTRYPOINT ["/usr/bin/gaiad"] - -CMD ["start"] - -VOLUME [ "/root" ] - -COPY gaia/build/gaiad /usr/bin/gaiad -COPY gaia/build/chain_a/node0/gaiad /root/.gaia -COPY gaia/build/chain_a/node0/gaiad/key_seed.json /root/key_seed.json diff --git a/ci/chain_b.Dockerfile b/ci/chain_b.Dockerfile deleted file mode 100644 index 7afbbacfcf..0000000000 --- a/ci/chain_b.Dockerfile +++ /dev/null @@ -1,14 +0,0 @@ -FROM alpine -LABEL maintainer="hello@informal.systems" - -EXPOSE 26656 26657 26660 - -ENTRYPOINT ["/usr/bin/gaiad"] - -CMD ["start"] - -VOLUME [ "/root" ] - -COPY gaia/build/gaiad /usr/bin/gaiad -COPY gaia/build/chain_b/node0/gaiad /root/.gaia -COPY gaia/build/chain_b/node0/gaiad/key_seed.json /root/key_seed.json diff --git a/ci/chains/gaia/v3.0.0/ibc-0/config/app.toml b/ci/chains/gaia/v3.0.0/ibc-0/config/app.toml deleted file mode 100644 index 54de73e392..0000000000 --- a/ci/chains/gaia/v3.0.0/ibc-0/config/app.toml +++ /dev/null @@ -1,152 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -############################################################################### -### Base Configuration ### -############################################################################### - -# The minimum gas prices a validator is willing to accept for processing a -# transaction. A transaction's fees must meet the minimum of any denomination -# specified in this config (e.g. 0.25token1;0.0001token2). -minimum-gas-prices = "" - -# default: the last 100 states are kept in addition to every 500th state; pruning at 10 block intervals -# nothing: all historic states will be saved, nothing will be deleted (i.e. archiving node) -# everything: all saved states will be deleted, storing only the current state; pruning at 10 block intervals -# custom: allow pruning options to be manually specified through 'pruning-keep-recent', 'pruning-keep-every', and 'pruning-interval' -pruning = "default" - -# These are applied if and only if the pruning strategy is custom. -pruning-keep-recent = "0" -pruning-keep-every = "0" -pruning-interval = "0" - -# HaltHeight contains a non-zero block height at which a node will gracefully -# halt and shutdown that can be used to assist upgrades and testing. -# -# Note: Commitment of state will be attempted on the corresponding block. -halt-height = 0 - -# HaltTime contains a non-zero minimum block time (in Unix seconds) at which -# a node will gracefully halt and shutdown that can be used to assist upgrades -# and testing. -# -# Note: Commitment of state will be attempted on the corresponding block. -halt-time = 0 - -# MinRetainBlocks defines the minimum block height offset from the current -# block being committed, such that all blocks past this offset are pruned -# from Tendermint. It is used as part of the process of determining the -# ResponseCommit.RetainHeight value during ABCI Commit. A value of 0 indicates -# that no blocks should be pruned. -# -# This configuration value is only responsible for pruning Tendermint blocks. -# It has no bearing on application state pruning which is determined by the -# "pruning-*" configurations. -# -# Note: Tendermint block pruning is dependant on this parameter in conunction -# with the unbonding (safety threshold) period, state pruning and state sync -# snapshot parameters to determine the correct minimum value of -# ResponseCommit.RetainHeight. -min-retain-blocks = 0 - -# InterBlockCache enables inter-block caching. -inter-block-cache = true - -# IndexEvents defines the set of events in the form {eventType}.{attributeKey}, -# which informs Tendermint what to index. If empty, all events will be indexed. -# -# Example: -# ["message.sender", "message.recipient"] -index-events = [] - -############################################################################### -### Telemetry Configuration ### -############################################################################### - -[telemetry] - -# Prefixed with keys to separate services. -service-name = "" - -# Enabled enables the application telemetry functionality. When enabled, -# an in-memory sink is also enabled by default. Operators may also enabled -# other sinks such as Prometheus. -enabled = false - -# Enable prefixing gauge values with hostname. -enable-hostname = false - -# Enable adding hostname to labels. -enable-hostname-label = false - -# Enable adding service to labels. -enable-service-label = false - -# PrometheusRetentionTime, when positive, enables a Prometheus metrics sink. -prometheus-retention-time = 0 - -# GlobalLabels defines a global set of name/value label tuples applied to all -# metrics emitted using the wrapper functions defined in telemetry package. -# -# Example: -# [["chain_id", "cosmoshub-1"]] -global-labels = [ -] - -############################################################################### -### API Configuration ### -############################################################################### - -[api] - -# Enable defines if the API server should be enabled. -enable = false - -# Swagger defines if swagger documentation should automatically be registered. -swagger = false - -# Address defines the API server to listen on. -address = "tcp://0.0.0.0:1317" - -# MaxOpenConnections defines the number of maximum open connections. -max-open-connections = 1000 - -# RPCReadTimeout defines the Tendermint RPC read timeout (in seconds). -rpc-read-timeout = 10 - -# RPCWriteTimeout defines the Tendermint RPC write timeout (in seconds). -rpc-write-timeout = 0 - -# RPCMaxBodyBytes defines the Tendermint maximum response body (in bytes). -rpc-max-body-bytes = 1000000 - -# EnableUnsafeCORS defines if CORS should be enabled (unsafe - use it at your own risk). -enabled-unsafe-cors = false - -############################################################################### -### gRPC Configuration ### -############################################################################### - -[grpc] - -# Enable defines if the gRPC server should be enabled. -enable = true - -# Address defines the gRPC server address to bind to. -address = "0.0.0.0:9090" - -############################################################################### -### State Sync Configuration ### -############################################################################### - -# State sync snapshots allow other nodes to rapidly join the network without replaying historical -# blocks, instead downloading and applying a snapshot of the application state at a given height. -[state-sync] - -# snapshot-interval specifies the block interval at which local state sync snapshots are -# taken (0 to disable). Must be a multiple of pruning-keep-every. -snapshot-interval = 0 - -# snapshot-keep-recent specifies the number of recent snapshots to keep and serve (0 to keep all). -snapshot-keep-recent = 2 diff --git a/ci/chains/gaia/v3.0.0/ibc-0/config/config.toml b/ci/chains/gaia/v3.0.0/ibc-0/config/config.toml deleted file mode 100644 index 03b013dc29..0000000000 --- a/ci/chains/gaia/v3.0.0/ibc-0/config/config.toml +++ /dev/null @@ -1,393 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -# NOTE: Any path below can be absolute (e.g. "/var/myawesomeapp/data") or -# relative to the home directory (e.g. "data"). The home directory is -# "$HOME/.tendermint" by default, but could be changed via $TMHOME env variable -# or --home cmd flag. - -####################################################################### -### Main Base Config Options ### -####################################################################### - -# TCP or UNIX socket address of the ABCI application, -# or the name of an ABCI application compiled in with the Tendermint binary -proxy_app = "tcp://127.0.0.1:26658" - -# A custom human readable name for this node -moniker = "ibc-0" - -# If this node is many blocks behind the tip of the chain, FastSync -# allows them to catchup quickly by downloading blocks in parallel -# and verifying their commits -fast_sync = true - -# Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb -# * goleveldb (github.com/syndtr/goleveldb - most popular implementation) -# - pure go -# - stable -# * cleveldb (uses levigo wrapper) -# - fast -# - requires gcc -# - use cleveldb build tag (go build -tags cleveldb) -# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt) -# - EXPERIMENTAL -# - may be faster is some use-cases (random reads - indexer) -# - use boltdb build tag (go build -tags boltdb) -# * rocksdb (uses github.com/tecbot/gorocksdb) -# - EXPERIMENTAL -# - requires gcc -# - use rocksdb build tag (go build -tags rocksdb) -# * badgerdb (uses github.com/dgraph-io/badger) -# - EXPERIMENTAL -# - use badgerdb build tag (go build -tags badgerdb) -db_backend = "goleveldb" - -# Database directory -db_dir = "data" - -# Output level for logging, including package level options -log_level = "info" - -# Output format: 'plain' (colored text) or 'json' -log_format = "plain" - -##### additional base config options ##### - -# Path to the JSON file containing the initial validator set and other meta data -genesis_file = "config/genesis.json" - -# Path to the JSON file containing the private key to use as a validator in the consensus protocol -priv_validator_key_file = "config/priv_validator_key.json" - -# Path to the JSON file containing the last sign state of a validator -priv_validator_state_file = "data/priv_validator_state.json" - -# TCP or UNIX socket address for Tendermint to listen on for -# connections from an external PrivValidator process -priv_validator_laddr = "" - -# Path to the JSON file containing the private key to use for node authentication in the p2p protocol -node_key_file = "config/node_key.json" - -# Mechanism to connect to the ABCI application: socket | grpc -abci = "socket" - -# If true, query the ABCI app on connecting to a new peer -# so the app can decide if we should keep the connection or not -filter_peers = false - - -####################################################################### -### Advanced Configuration Options ### -####################################################################### - -####################################################### -### RPC Server Configuration Options ### -####################################################### -[rpc] - -# TCP or UNIX socket address for the RPC server to listen on -laddr = "tcp://0.0.0.0:26657" - -# A list of origins a cross-domain request can be executed from -# Default value '[]' disables cors support -# Use '["*"]' to allow any origin -cors_allowed_origins = [] - -# A list of methods the client is allowed to use with cross-domain requests -cors_allowed_methods = ["HEAD", "GET", "POST", ] - -# A list of non simple headers the client is allowed to use with cross-domain requests -cors_allowed_headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ] - -# TCP or UNIX socket address for the gRPC server to listen on -# NOTE: This server only supports /broadcast_tx_commit -grpc_laddr = "" - -# Maximum number of simultaneous connections. -# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -grpc_max_open_connections = 900 - -# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool -unsafe = false - -# Maximum number of simultaneous connections (including WebSocket). -# Does not include gRPC connections. See grpc_max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -max_open_connections = 900 - -# Maximum number of unique clientIDs that can /subscribe -# If you're using /broadcast_tx_commit, set to the estimated maximum number -# of broadcast_tx_commit calls per block. -max_subscription_clients = 100 - -# Maximum number of unique queries a given client can /subscribe to -# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to -# the estimated # maximum number of broadcast_tx_commit calls per block. -max_subscriptions_per_client = 5 - -# How long to wait for a tx to be committed during /broadcast_tx_commit. -# WARNING: Using a value larger than 10s will result in increasing the -# global HTTP write timeout, which applies to all connections and endpoints. -# See https://github.com/tendermint/tendermint/issues/3435 -timeout_broadcast_tx_commit = "10s" - -# Maximum size of request body, in bytes -max_body_bytes = 1000000 - -# Maximum size of request header, in bytes -max_header_bytes = 1048576 - -# The path to a file containing certificate that is used to create the HTTPS server. -# Migth be either absolute path or path related to tendermint's config directory. -# If the certificate is signed by a certificate authority, -# the certFile should be the concatenation of the server's certificate, any intermediates, -# and the CA's certificate. -# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. -# Otherwise, HTTP server is run. -tls_cert_file = "" - -# The path to a file containing matching private key that is used to create the HTTPS server. -# Migth be either absolute path or path related to tendermint's config directory. -# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. -# Otherwise, HTTP server is run. -tls_key_file = "" - -# pprof listen address (https://golang.org/pkg/net/http/pprof) -pprof_laddr = "localhost:6060" - -####################################################### -### P2P Configuration Options ### -####################################################### -[p2p] - -# Address to listen for incoming connections -laddr = "tcp://0.0.0.0:26656" - -# Address to advertise to peers for them to dial -# If empty, will use the same port as the laddr, -# and will introspect on the listener or use UPnP -# to figure out the address. -external_address = "" - -# Comma separated list of seed nodes to connect to -seeds = "" - -# Comma separated list of nodes to keep persistent connections to -persistent_peers = "" - -# UPNP port forwarding -upnp = false - -# Path to address book -addr_book_file = "config/addrbook.json" - -# Set true for strict address routability rules -# Set false for private or local networks -addr_book_strict = true - -# Maximum number of inbound peers -max_num_inbound_peers = 40 - -# Maximum number of outbound peers to connect to, excluding persistent peers -max_num_outbound_peers = 10 - -# List of node IDs, to which a connection will be (re)established ignoring any existing limits -unconditional_peer_ids = "" - -# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) -persistent_peers_max_dial_period = "0s" - -# Time to wait before flushing messages out on the connection -flush_throttle_timeout = "100ms" - -# Maximum size of a message packet payload, in bytes -max_packet_msg_payload_size = 1024 - -# Rate at which packets can be sent, in bytes/second -send_rate = 5120000 - -# Rate at which packets can be received, in bytes/second -recv_rate = 5120000 - -# Set true to enable the peer-exchange reactor -pex = true - -# Seed mode, in which node constantly crawls the network and looks for -# peers. If another node asks it for addresses, it responds and disconnects. -# -# Does not work if the peer-exchange reactor is disabled. -seed_mode = false - -# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) -private_peer_ids = "" - -# Toggle to disable guard against peers connecting from the same ip. -allow_duplicate_ip = false - -# Peer connection configuration. -handshake_timeout = "20s" -dial_timeout = "3s" - -####################################################### -### Mempool Configurattion Option ### -####################################################### -[mempool] - -recheck = true -broadcast = true -wal_dir = "" - -# Maximum number of transactions in the mempool -size = 5000 - -# Limit the total size of all txs in the mempool. -# This only accounts for raw transactions (e.g. given 1MB transactions and -# max_txs_bytes=5MB, mempool will only accept 5 transactions). -max_txs_bytes = 1073741824 - -# Size of the cache (used to filter transactions we saw earlier) in transactions -cache_size = 10000 - -# Do not remove invalid transactions from the cache (default: false) -# Set to true if it's not possible for any invalid transaction to become valid -# again in the future. -keep-invalid-txs-in-cache = false - -# Maximum size of a single transaction. -# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes}. -max_tx_bytes = 1048576 - -# Maximum size of a batch of transactions to send to a peer -# Including space needed by encoding (one varint per transaction). -# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796 -max_batch_bytes = 0 - -####################################################### -### State Sync Configuration Options ### -####################################################### -[statesync] -# State sync rapidly bootstraps a new node by discovering, fetching, and restoring a state machine -# snapshot from peers instead of fetching and replaying historical blocks. Requires some peers in -# the network to take and serve state machine snapshots. State sync is not attempted if the node -# has any local state (LastBlockHeight > 0). The node will have a truncated block history, -# starting from the height of the snapshot. -enable = false - -# RPC servers (comma-separated) for light client verification of the synced state machine and -# retrieval of state data for node bootstrapping. Also needs a trusted height and corresponding -# header hash obtained from a trusted source, and a period during which validators can be trusted. -# -# For Cosmos SDK-based chains, trust_period should usually be about 2/3 of the unbonding time (~2 -# weeks) during which they can be financially punished (slashed) for misbehavior. -rpc_servers = "" -trust_height = 0 -trust_hash = "" -trust_period = "168h0m0s" - -# Time to spend discovering snapshots before initiating a restore. -discovery_time = "15s" - -# Temporary directory for state sync snapshot chunks, defaults to the OS tempdir (typically /tmp). -# Will create a new, randomly named directory within, and remove it when done. -temp_dir = "" - -####################################################### -### Fast Sync Configuration Connections ### -####################################################### -[fastsync] - -# Fast Sync version to use: -# 1) "v0" (default) - the legacy fast sync implementation -# 2) "v1" - refactor of v0 version for better testability -# 2) "v2" - complete redesign of v0, optimized for testability & readability -version = "v0" - -####################################################### -### Consensus Configuration Options ### -####################################################### -[consensus] - -wal_file = "data/cs.wal/wal" - -# How long we wait for a proposal block before prevoting nil -timeout_propose = "1s" -# How much timeout_propose increases with each round -timeout_propose_delta = "500ms" -# How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil) -timeout_prevote = "1s" -# How much the timeout_prevote increases with each round -timeout_prevote_delta = "500ms" -# How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil) -timeout_precommit = "1s" -# How much the timeout_precommit increases with each round -timeout_precommit_delta = "500ms" -# How long we wait after committing a block, before starting on the new -# height (this gives us a chance to receive some more precommits, even -# though we already have +2/3). -timeout_commit = "1s" - -# How many blocks to look back to check existence of the node's consensus votes before joining consensus -# When non-zero, the node will panic upon restart -# if the same consensus key was used to sign {double_sign_check_height} last blocks. -# So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic. -double_sign_check_height = 0 - -# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) -skip_timeout_commit = false - -# EmptyBlocks mode and possible interval between empty blocks -create_empty_blocks = true -create_empty_blocks_interval = "0s" - -# Reactor sleep duration parameters -peer_gossip_sleep_duration = "100ms" -peer_query_maj23_sleep_duration = "2s" - -####################################################### -### Transaction Indexer Configuration Options ### -####################################################### -[tx_index] - -# What indexer to use for transactions -# -# The application will set which txs to index. In some cases a node operator will be able -# to decide which txs to index based on configuration set in the application. -# -# Options: -# 1) "null" -# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). -# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed. -indexer = "kv" - -####################################################### -### Instrumentation Configuration Options ### -####################################################### -[instrumentation] - -# When true, Prometheus metrics are served under /metrics on -# PrometheusListenAddr. -# Check out the documentation for the list of available metrics. -prometheus = false - -# Address to listen for Prometheus collector(s) connections -prometheus_listen_addr = ":26660" - -# Maximum number of simultaneous connections. -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -max_open_connections = 3 - -# Instrumentation namespace -namespace = "tendermint" diff --git a/ci/chains/gaia/v3.0.0/ibc-0/config/genesis.json b/ci/chains/gaia/v3.0.0/ibc-0/config/genesis.json deleted file mode 100644 index 617b5db01a..0000000000 --- a/ci/chains/gaia/v3.0.0/ibc-0/config/genesis.json +++ /dev/null @@ -1,285 +0,0 @@ -{ - "genesis_time": "2021-01-29T14:17:55.021324268Z", - "chain_id": "ibc-0", - "initial_height": "1", - "consensus_params": { - "block": { - "max_bytes": "22020096", - "max_gas": "-1", - "time_iota_ms": "1000" - }, - "evidence": { - "max_age_num_blocks": "100000", - "max_age_duration": "172800000000000", - "max_bytes": "1048576" - }, - "validator": { - "pub_key_types": [ - "ed25519" - ] - }, - "version": {} - }, - "app_hash": "", - "app_state": { - "auth": { - "params": { - "max_memo_characters": "256", - "tx_sig_limit": "7", - "tx_size_cost_per_byte": "10", - "sig_verify_cost_ed25519": "590", - "sig_verify_cost_secp256k1": "1000" - }, - "accounts": [ - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos1g88sephz2r6ym6aqjgfaflknhhpj4reev9k28z", - "pub_key": null, - "account_number": "0", - "sequence": "0" - }, - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos1atnl2j4w2adr7xpa8kercfrr55fg4kcfkf583u", - "pub_key": null, - "account_number": "0", - "sequence": "0" - } - ] - }, - "bank": { - "params": { - "send_enabled": [], - "default_send_enabled": true - }, - "balances": [ - { - "address": "cosmos1g88sephz2r6ym6aqjgfaflknhhpj4reev9k28z", - "coins": [ - { - "denom": "samoleans", - "amount": "100000000000" - }, - { - "denom": "stake", - "amount": "100000000000" - } - ] - }, - { - "address": "cosmos1atnl2j4w2adr7xpa8kercfrr55fg4kcfkf583u", - "coins": [ - { - "denom": "stake", - "amount": "100000000000" - } - ] - } - ], - "supply": [], - "denom_metadata": [] - }, - "capability": { - "index": "1", - "owners": [] - }, - "crisis": { - "constant_fee": { - "denom": "stake", - "amount": "1000" - } - }, - "distribution": { - "params": { - "community_tax": "0.020000000000000000", - "base_proposer_reward": "0.010000000000000000", - "bonus_proposer_reward": "0.040000000000000000", - "withdraw_addr_enabled": true - }, - "fee_pool": { - "community_pool": [] - }, - "delegator_withdraw_infos": [], - "previous_proposer": "", - "outstanding_rewards": [], - "validator_accumulated_commissions": [], - "validator_historical_rewards": [], - "validator_current_rewards": [], - "delegator_starting_infos": [], - "validator_slash_events": [] - }, - "evidence": { - "evidence": [] - }, - "genutil": { - "gen_txs": [ - { - "body": { - "messages": [ - { - "@type": "/cosmos.staking.v1beta1.MsgCreateValidator", - "description": { - "moniker": "ibc-0", - "identity": "", - "website": "", - "security_contact": "", - "details": "" - }, - "commission": { - "rate": "0.100000000000000000", - "max_rate": "0.200000000000000000", - "max_change_rate": "0.010000000000000000" - }, - "min_self_delegation": "1", - "delegator_address": "cosmos1atnl2j4w2adr7xpa8kercfrr55fg4kcfkf583u", - "validator_address": "cosmosvaloper1atnl2j4w2adr7xpa8kercfrr55fg4kcfnaqja0", - "pubkey": { - "@type": "/cosmos.crypto.ed25519.PubKey", - "key": "pOI/9x+U+bRejes4VPOlHl2OALAaG6yUoebv5gmXdww=" - }, - "value": { - "denom": "stake", - "amount": "100000000000" - } - } - ], - "memo": "d2c710169cd596180da34bc6dde5d28fd663e48c@192.168.1.213:26656", - "timeout_height": "0", - "extension_options": [], - "non_critical_extension_options": [] - }, - "auth_info": { - "signer_infos": [ - { - "public_key": { - "@type": "/cosmos.crypto.secp256k1.PubKey", - "key": "A0SaYYx4qCJyjdew0+nHDykQMNQ8mXzuj6MM5JAaY3oC" - }, - "mode_info": { - "single": { - "mode": "SIGN_MODE_DIRECT" - } - }, - "sequence": "0" - } - ], - "fee": { - "amount": [], - "gas_limit": "200000", - "payer": "", - "granter": "" - } - }, - "signatures": [ - "MukV92ZBXAEPxNl+mHk7Gu+NRflkxmAOEmQPoyOS/Akz0vlVkHCgUagdiHpDpRNLL/6v18w9IA7BAaSht6233w==" - ] - } - ] - }, - "gov": { - "starting_proposal_id": "1", - "deposits": [], - "votes": [], - "proposals": [], - "deposit_params": { - "min_deposit": [ - { - "denom": "stake", - "amount": "10000000" - } - ], - "max_deposit_period": "172800s" - }, - "voting_params": { - "voting_period": "172800s" - }, - "tally_params": { - "quorum": "0.334000000000000000", - "threshold": "0.500000000000000000", - "veto_threshold": "0.334000000000000000" - } - }, - "ibc": { - "client_genesis": { - "clients": [], - "clients_consensus": [], - "clients_metadata": [], - "params": { - "allowed_clients": [ - "06-solomachine", - "07-tendermint" - ] - }, - "create_localhost": false, - "next_client_sequence": "0" - }, - "connection_genesis": { - "connections": [], - "client_connection_paths": [], - "next_connection_sequence": "0" - }, - "channel_genesis": { - "channels": [], - "acknowledgements": [], - "commitments": [], - "receipts": [], - "send_sequences": [], - "recv_sequences": [], - "ack_sequences": [], - "next_channel_sequence": "0" - } - }, - "mint": { - "minter": { - "inflation": "0.130000000000000000", - "annual_provisions": "0.000000000000000000" - }, - "params": { - "mint_denom": "stake", - "inflation_rate_change": "0.130000000000000000", - "inflation_max": "0.200000000000000000", - "inflation_min": "0.070000000000000000", - "goal_bonded": "0.670000000000000000", - "blocks_per_year": "6311520" - } - }, - "params": null, - "slashing": { - "params": { - "signed_blocks_window": "100", - "min_signed_per_window": "0.500000000000000000", - "downtime_jail_duration": "600s", - "slash_fraction_double_sign": "0.050000000000000000", - "slash_fraction_downtime": "0.010000000000000000" - }, - "signing_infos": [], - "missed_blocks": [] - }, - "staking": { - "params": { - "unbonding_time": "1814400s", - "max_validators": 100, - "max_entries": 7, - "historical_entries": 10000, - "bond_denom": "stake" - }, - "last_total_power": "0", - "last_validator_powers": [], - "validators": [], - "delegations": [], - "unbonding_delegations": [], - "redelegations": [], - "exported": false - }, - "transfer": { - "port_id": "transfer", - "denom_traces": [], - "params": { - "send_enabled": true, - "receive_enabled": true - } - }, - "upgrade": {}, - "vesting": {} - } -} \ No newline at end of file diff --git a/ci/chains/gaia/v3.0.0/ibc-0/config/gentx/gentx-d2c710169cd596180da34bc6dde5d28fd663e48c.json b/ci/chains/gaia/v3.0.0/ibc-0/config/gentx/gentx-d2c710169cd596180da34bc6dde5d28fd663e48c.json deleted file mode 100644 index 8604a7903f..0000000000 --- a/ci/chains/gaia/v3.0.0/ibc-0/config/gentx/gentx-d2c710169cd596180da34bc6dde5d28fd663e48c.json +++ /dev/null @@ -1 +0,0 @@ -{"body":{"messages":[{"@type":"/cosmos.staking.v1beta1.MsgCreateValidator","description":{"moniker":"ibc-0","identity":"","website":"","security_contact":"","details":""},"commission":{"rate":"0.100000000000000000","max_rate":"0.200000000000000000","max_change_rate":"0.010000000000000000"},"min_self_delegation":"1","delegator_address":"cosmos1atnl2j4w2adr7xpa8kercfrr55fg4kcfkf583u","validator_address":"cosmosvaloper1atnl2j4w2adr7xpa8kercfrr55fg4kcfnaqja0","pubkey":{"@type":"/cosmos.crypto.ed25519.PubKey","key":"pOI/9x+U+bRejes4VPOlHl2OALAaG6yUoebv5gmXdww="},"value":{"denom":"stake","amount":"100000000000"}}],"memo":"d2c710169cd596180da34bc6dde5d28fd663e48c@192.168.1.213:26656","timeout_height":"0","extension_options":[],"non_critical_extension_options":[]},"auth_info":{"signer_infos":[{"public_key":{"@type":"/cosmos.crypto.secp256k1.PubKey","key":"A0SaYYx4qCJyjdew0+nHDykQMNQ8mXzuj6MM5JAaY3oC"},"mode_info":{"single":{"mode":"SIGN_MODE_DIRECT"}},"sequence":"0"}],"fee":{"amount":[],"gas_limit":"200000","payer":"","granter":""}},"signatures":["MukV92ZBXAEPxNl+mHk7Gu+NRflkxmAOEmQPoyOS/Akz0vlVkHCgUagdiHpDpRNLL/6v18w9IA7BAaSht6233w=="]} diff --git a/ci/chains/gaia/v3.0.0/ibc-0/config/node_key.json b/ci/chains/gaia/v3.0.0/ibc-0/config/node_key.json deleted file mode 100644 index a34f5bed89..0000000000 --- a/ci/chains/gaia/v3.0.0/ibc-0/config/node_key.json +++ /dev/null @@ -1 +0,0 @@ -{"priv_key":{"type":"tendermint/PrivKeyEd25519","value":"6uGEFm3VE1D/rPPet3qiUZJdMT/aVF2fmU/Yc5kqnlJ6uhSV/KX+Wdu9uHUjfg26zFdA4xByM9eyRHZNUEp8+Q=="}} \ No newline at end of file diff --git a/ci/chains/gaia/v3.0.0/ibc-0/config/priv_validator_key.json b/ci/chains/gaia/v3.0.0/ibc-0/config/priv_validator_key.json deleted file mode 100644 index d5077632d0..0000000000 --- a/ci/chains/gaia/v3.0.0/ibc-0/config/priv_validator_key.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "address": "11BA3491738A2B92BED24B71332AD77F00024369", - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "pOI/9x+U+bRejes4VPOlHl2OALAaG6yUoebv5gmXdww=" - }, - "priv_key": { - "type": "tendermint/PrivKeyEd25519", - "value": "p4xRp3MT8KEPctLJWuY7oOw5sAuVazyY3+hBvmUrXpqk4j/3H5T5tF6N6zhU86UeXY4AsBobrJSh5u/mCZd3DA==" - } -} \ No newline at end of file diff --git a/ci/chains/gaia/v3.0.0/ibc-0/key_seed.json b/ci/chains/gaia/v3.0.0/ibc-0/key_seed.json deleted file mode 100644 index ab144f2101..0000000000 --- a/ci/chains/gaia/v3.0.0/ibc-0/key_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"user","type":"local","address":"cosmos1g88sephz2r6ym6aqjgfaflknhhpj4reev9k28z","pubkey":"cosmospub1addwnpepqt3t47qa3hzpf9au0nrxhs0h2tu7g5utlpl3nfka98m73za9h2gs6lrm4k9","mnemonic":"odor sibling reason pistol carry injury end mountain gauge velvet trouble beef casual find cigar before pen drift noise meat slush pistol chronic wool"} diff --git a/ci/chains/gaia/v3.0.0/ibc-0/keyring-test/41cf0c86e250f44deba09213d4fed3bdc32a8f39.address b/ci/chains/gaia/v3.0.0/ibc-0/keyring-test/41cf0c86e250f44deba09213d4fed3bdc32a8f39.address deleted file mode 100644 index bca91e0431..0000000000 --- a/ci/chains/gaia/v3.0.0/ibc-0/keyring-test/41cf0c86e250f44deba09213d4fed3bdc32a8f39.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wMS0yOSAwOToxNzo1Ny40NDI3NTI1NzEgLTA1MDAgRVNUIG09KzAuMTg0NjA2MjI3IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiN0F6d0dMeWZmNXloTDB1ZiJ9.aZw80hDsm42xGZWfEt7_cp4qTV1Y9L-9KYFWdlf2Z4zPjISmDmSHOA.xCs1eEcwnMasy0jx.7yonWaNsLRaKXDa3K8FhhbOrQGyCh1765MgDuwV7EMFxlywGzpnhpB0_PadOytHs89i25fuwfPvw8KGIUYbBrF-9NE6E76dtcBSd49Fcg8Sq9UdYEyBG4C__TgK-6dWD8I3kn4TLVZ8yt87U0OH7vQfMqrhceCzQvZhWro6TcEUBQpbo4QZZDIEH_TA81z_JWNzEreXZ3F-auQHObl238jwCVzry55W1-btoHZV3L9NLYQ.jQaUCrcV9ZU7XeJ6BVj28g \ No newline at end of file diff --git a/ci/chains/gaia/v3.0.0/ibc-0/keyring-test/eae7f54aae575a3f183d3db23c2463a5128adb09.address b/ci/chains/gaia/v3.0.0/ibc-0/keyring-test/eae7f54aae575a3f183d3db23c2463a5128adb09.address deleted file mode 100644 index 0a2f5f945a..0000000000 --- a/ci/chains/gaia/v3.0.0/ibc-0/keyring-test/eae7f54aae575a3f183d3db23c2463a5128adb09.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wMS0yOSAwOToxNzo1Ni4yMjczNDUxMzIgLTA1MDAgRVNUIG09KzAuMTkzNzA4NTA0IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiNUp5RmZUQ2JLWVlxUkNqNCJ9.PAlCYv_dN1e4tGyNVeQlsXheYzOEvacBj4N94SrBd3Q0NIsnPShJoA.AH6A91TU4D74ONPs.BiMujTsrIou2KpIWQ_XRMgjPO_QThzujRVHmFgL5VBe4OhwKI86XwQdPtib8BQHSHId-oGHuIoYZ9pzsRZXDZdDZ6uTAX2Agq8s9CMRcT9tDGfgFTiQs3zPbO1joL1BrYSqGtr_b75Wems159TzQUGI1yZZcJgqbLswSg03ZEEZDxFoCUga7q6RPS0B7cWZx3o7GdBAx-iObsdmNp9h2Ok6qAf6RzTAqSJDgKFBdJLnt4DDwe1H530ik.7JFKm6ZogyEfJHgNAIpvzg \ No newline at end of file diff --git a/ci/chains/gaia/v3.0.0/ibc-0/keyring-test/user.info b/ci/chains/gaia/v3.0.0/ibc-0/keyring-test/user.info deleted file mode 100644 index 584392be6e..0000000000 --- a/ci/chains/gaia/v3.0.0/ibc-0/keyring-test/user.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wMS0yOSAwOToxNzo1Ny40MjU3NzU4MTcgLTA1MDAgRVNUIG09KzAuMTY3NjI5NDQ3IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoicFM2Ty1XVXB6eXhQQXN1TSJ9.w5_RrMMhj-33GL6dn07zRrX-hv92nMREh2c0-sTg6afQdFtoyJ9Sjw.uT0r5JSAcITVV072.38D79yPbUn8pElVmfiGtbZDwx1Ii6WWsQGdHFmeooNzp8Qa9XnbjgOhr5bbcb1CRGGY9DnR9VI_8EMF0tskC2b82ZsSbS90HoqvwFXRLELrI3avI2P8RDSH60SsOPYHhBHFHtRYSnA6oxjamy9nfZ7bbM_WU0HZ2jWio2cptpuGbXzyE3TFPrOvyioTp5KUh80gkD0cuKq2NhNnH_wHxWURSj3UxEdTBFXFvXn70oLD3rj8MvVr4-1cXPfV1GI7MLaEkLT8BH9RNBkaKyOSYLm1Id_2bCDJJw-14ka3JorihwfbLmrbF9uEkXIp3zdnzY-mG-Mh51nWzUt3seJQ-C8gqE8FE-1U.j_H_QFmqyTl6o97oxeTN8g \ No newline at end of file diff --git a/ci/chains/gaia/v3.0.0/ibc-0/keyring-test/validator.info b/ci/chains/gaia/v3.0.0/ibc-0/keyring-test/validator.info deleted file mode 100644 index 931a9f1251..0000000000 --- a/ci/chains/gaia/v3.0.0/ibc-0/keyring-test/validator.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wMS0yOSAwOToxNzo1Ni4yMDY4NDUxOTYgLTA1MDAgRVNUIG09KzAuMTczMjA4NDU1IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiZWVyTVRDZ1FkT3hobzRJbCJ9.fLybsw-F-KD13P8G7I99yJwb3htJYjf2gR11_Ek27nSGPGNgJOXKZg.Pf0kNyUPLuqW0k5y.hDrnr6Cfu0m3dPLXr7UONiuWsGnx5gwQW6GWzZk3hYPd2ziAYOt79fBSwodY1VVohHftzZgwzvO07eBcvK6HzAB4e4uk_MME2uxHoS8gEC5FvwoJyKgSLYnMcJmK8XRUjgAKL9LV6ptwF0YmmxL_50I3N2qDsVRwldEqIiNm9HmuQ2PhfUPbnWfl0lMPIZK9pY4KDyA-Aw35vtEgF1A9ns2CsteRxJnC31apqfrirHSk127K-P7n04caAx033TjGIKVTLOgEdjCHCB4xJ9BIlkxyy1KFgelJrrwOkxkBKRFyHOtfs4c__nk5d8arZiKTzLnwju_s2CEF9cB4iRmyqhbaGTfejtTAnHalosBKs_qwt2F1.Oa8WLImfXbdXsJi4PWajgw \ No newline at end of file diff --git a/ci/chains/gaia/v3.0.0/ibc-0/validator_seed.json b/ci/chains/gaia/v3.0.0/ibc-0/validator_seed.json deleted file mode 100644 index ea4713ffea..0000000000 --- a/ci/chains/gaia/v3.0.0/ibc-0/validator_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"validator","type":"local","address":"cosmos1atnl2j4w2adr7xpa8kercfrr55fg4kcfkf583u","pubkey":"cosmospub1addwnpepqdzf5cvv0z5zyu5d67cd86w8pu53qvx58jvhem505vxwfyq6vdaqy6tfsfg","mnemonic":"frown boost win vacuum wink excuse frog cube visit cousin hover cook broccoli gift usual useful fork vault during name dog suspect much another"} diff --git a/ci/chains/gaia/v3.0.0/ibc-1/config/app.toml b/ci/chains/gaia/v3.0.0/ibc-1/config/app.toml deleted file mode 100644 index 54de73e392..0000000000 --- a/ci/chains/gaia/v3.0.0/ibc-1/config/app.toml +++ /dev/null @@ -1,152 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -############################################################################### -### Base Configuration ### -############################################################################### - -# The minimum gas prices a validator is willing to accept for processing a -# transaction. A transaction's fees must meet the minimum of any denomination -# specified in this config (e.g. 0.25token1;0.0001token2). -minimum-gas-prices = "" - -# default: the last 100 states are kept in addition to every 500th state; pruning at 10 block intervals -# nothing: all historic states will be saved, nothing will be deleted (i.e. archiving node) -# everything: all saved states will be deleted, storing only the current state; pruning at 10 block intervals -# custom: allow pruning options to be manually specified through 'pruning-keep-recent', 'pruning-keep-every', and 'pruning-interval' -pruning = "default" - -# These are applied if and only if the pruning strategy is custom. -pruning-keep-recent = "0" -pruning-keep-every = "0" -pruning-interval = "0" - -# HaltHeight contains a non-zero block height at which a node will gracefully -# halt and shutdown that can be used to assist upgrades and testing. -# -# Note: Commitment of state will be attempted on the corresponding block. -halt-height = 0 - -# HaltTime contains a non-zero minimum block time (in Unix seconds) at which -# a node will gracefully halt and shutdown that can be used to assist upgrades -# and testing. -# -# Note: Commitment of state will be attempted on the corresponding block. -halt-time = 0 - -# MinRetainBlocks defines the minimum block height offset from the current -# block being committed, such that all blocks past this offset are pruned -# from Tendermint. It is used as part of the process of determining the -# ResponseCommit.RetainHeight value during ABCI Commit. A value of 0 indicates -# that no blocks should be pruned. -# -# This configuration value is only responsible for pruning Tendermint blocks. -# It has no bearing on application state pruning which is determined by the -# "pruning-*" configurations. -# -# Note: Tendermint block pruning is dependant on this parameter in conunction -# with the unbonding (safety threshold) period, state pruning and state sync -# snapshot parameters to determine the correct minimum value of -# ResponseCommit.RetainHeight. -min-retain-blocks = 0 - -# InterBlockCache enables inter-block caching. -inter-block-cache = true - -# IndexEvents defines the set of events in the form {eventType}.{attributeKey}, -# which informs Tendermint what to index. If empty, all events will be indexed. -# -# Example: -# ["message.sender", "message.recipient"] -index-events = [] - -############################################################################### -### Telemetry Configuration ### -############################################################################### - -[telemetry] - -# Prefixed with keys to separate services. -service-name = "" - -# Enabled enables the application telemetry functionality. When enabled, -# an in-memory sink is also enabled by default. Operators may also enabled -# other sinks such as Prometheus. -enabled = false - -# Enable prefixing gauge values with hostname. -enable-hostname = false - -# Enable adding hostname to labels. -enable-hostname-label = false - -# Enable adding service to labels. -enable-service-label = false - -# PrometheusRetentionTime, when positive, enables a Prometheus metrics sink. -prometheus-retention-time = 0 - -# GlobalLabels defines a global set of name/value label tuples applied to all -# metrics emitted using the wrapper functions defined in telemetry package. -# -# Example: -# [["chain_id", "cosmoshub-1"]] -global-labels = [ -] - -############################################################################### -### API Configuration ### -############################################################################### - -[api] - -# Enable defines if the API server should be enabled. -enable = false - -# Swagger defines if swagger documentation should automatically be registered. -swagger = false - -# Address defines the API server to listen on. -address = "tcp://0.0.0.0:1317" - -# MaxOpenConnections defines the number of maximum open connections. -max-open-connections = 1000 - -# RPCReadTimeout defines the Tendermint RPC read timeout (in seconds). -rpc-read-timeout = 10 - -# RPCWriteTimeout defines the Tendermint RPC write timeout (in seconds). -rpc-write-timeout = 0 - -# RPCMaxBodyBytes defines the Tendermint maximum response body (in bytes). -rpc-max-body-bytes = 1000000 - -# EnableUnsafeCORS defines if CORS should be enabled (unsafe - use it at your own risk). -enabled-unsafe-cors = false - -############################################################################### -### gRPC Configuration ### -############################################################################### - -[grpc] - -# Enable defines if the gRPC server should be enabled. -enable = true - -# Address defines the gRPC server address to bind to. -address = "0.0.0.0:9090" - -############################################################################### -### State Sync Configuration ### -############################################################################### - -# State sync snapshots allow other nodes to rapidly join the network without replaying historical -# blocks, instead downloading and applying a snapshot of the application state at a given height. -[state-sync] - -# snapshot-interval specifies the block interval at which local state sync snapshots are -# taken (0 to disable). Must be a multiple of pruning-keep-every. -snapshot-interval = 0 - -# snapshot-keep-recent specifies the number of recent snapshots to keep and serve (0 to keep all). -snapshot-keep-recent = 2 diff --git a/ci/chains/gaia/v3.0.0/ibc-1/config/config.toml b/ci/chains/gaia/v3.0.0/ibc-1/config/config.toml deleted file mode 100644 index 1ba972916c..0000000000 --- a/ci/chains/gaia/v3.0.0/ibc-1/config/config.toml +++ /dev/null @@ -1,393 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -# NOTE: Any path below can be absolute (e.g. "/var/myawesomeapp/data") or -# relative to the home directory (e.g. "data"). The home directory is -# "$HOME/.tendermint" by default, but could be changed via $TMHOME env variable -# or --home cmd flag. - -####################################################################### -### Main Base Config Options ### -####################################################################### - -# TCP or UNIX socket address of the ABCI application, -# or the name of an ABCI application compiled in with the Tendermint binary -proxy_app = "tcp://127.0.0.1:26658" - -# A custom human readable name for this node -moniker = "ibc-1" - -# If this node is many blocks behind the tip of the chain, FastSync -# allows them to catchup quickly by downloading blocks in parallel -# and verifying their commits -fast_sync = true - -# Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb -# * goleveldb (github.com/syndtr/goleveldb - most popular implementation) -# - pure go -# - stable -# * cleveldb (uses levigo wrapper) -# - fast -# - requires gcc -# - use cleveldb build tag (go build -tags cleveldb) -# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt) -# - EXPERIMENTAL -# - may be faster is some use-cases (random reads - indexer) -# - use boltdb build tag (go build -tags boltdb) -# * rocksdb (uses github.com/tecbot/gorocksdb) -# - EXPERIMENTAL -# - requires gcc -# - use rocksdb build tag (go build -tags rocksdb) -# * badgerdb (uses github.com/dgraph-io/badger) -# - EXPERIMENTAL -# - use badgerdb build tag (go build -tags badgerdb) -db_backend = "goleveldb" - -# Database directory -db_dir = "data" - -# Output level for logging, including package level options -log_level = "info" - -# Output format: 'plain' (colored text) or 'json' -log_format = "plain" - -##### additional base config options ##### - -# Path to the JSON file containing the initial validator set and other meta data -genesis_file = "config/genesis.json" - -# Path to the JSON file containing the private key to use as a validator in the consensus protocol -priv_validator_key_file = "config/priv_validator_key.json" - -# Path to the JSON file containing the last sign state of a validator -priv_validator_state_file = "data/priv_validator_state.json" - -# TCP or UNIX socket address for Tendermint to listen on for -# connections from an external PrivValidator process -priv_validator_laddr = "" - -# Path to the JSON file containing the private key to use for node authentication in the p2p protocol -node_key_file = "config/node_key.json" - -# Mechanism to connect to the ABCI application: socket | grpc -abci = "socket" - -# If true, query the ABCI app on connecting to a new peer -# so the app can decide if we should keep the connection or not -filter_peers = false - - -####################################################################### -### Advanced Configuration Options ### -####################################################################### - -####################################################### -### RPC Server Configuration Options ### -####################################################### -[rpc] - -# TCP or UNIX socket address for the RPC server to listen on -laddr = "tcp://0.0.0.0:26657" - -# A list of origins a cross-domain request can be executed from -# Default value '[]' disables cors support -# Use '["*"]' to allow any origin -cors_allowed_origins = [] - -# A list of methods the client is allowed to use with cross-domain requests -cors_allowed_methods = ["HEAD", "GET", "POST", ] - -# A list of non simple headers the client is allowed to use with cross-domain requests -cors_allowed_headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ] - -# TCP or UNIX socket address for the gRPC server to listen on -# NOTE: This server only supports /broadcast_tx_commit -grpc_laddr = "" - -# Maximum number of simultaneous connections. -# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -grpc_max_open_connections = 900 - -# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool -unsafe = false - -# Maximum number of simultaneous connections (including WebSocket). -# Does not include gRPC connections. See grpc_max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -max_open_connections = 900 - -# Maximum number of unique clientIDs that can /subscribe -# If you're using /broadcast_tx_commit, set to the estimated maximum number -# of broadcast_tx_commit calls per block. -max_subscription_clients = 100 - -# Maximum number of unique queries a given client can /subscribe to -# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to -# the estimated # maximum number of broadcast_tx_commit calls per block. -max_subscriptions_per_client = 5 - -# How long to wait for a tx to be committed during /broadcast_tx_commit. -# WARNING: Using a value larger than 10s will result in increasing the -# global HTTP write timeout, which applies to all connections and endpoints. -# See https://github.com/tendermint/tendermint/issues/3435 -timeout_broadcast_tx_commit = "10s" - -# Maximum size of request body, in bytes -max_body_bytes = 1000000 - -# Maximum size of request header, in bytes -max_header_bytes = 1048576 - -# The path to a file containing certificate that is used to create the HTTPS server. -# Migth be either absolute path or path related to tendermint's config directory. -# If the certificate is signed by a certificate authority, -# the certFile should be the concatenation of the server's certificate, any intermediates, -# and the CA's certificate. -# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. -# Otherwise, HTTP server is run. -tls_cert_file = "" - -# The path to a file containing matching private key that is used to create the HTTPS server. -# Migth be either absolute path or path related to tendermint's config directory. -# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. -# Otherwise, HTTP server is run. -tls_key_file = "" - -# pprof listen address (https://golang.org/pkg/net/http/pprof) -pprof_laddr = "localhost:6060" - -####################################################### -### P2P Configuration Options ### -####################################################### -[p2p] - -# Address to listen for incoming connections -laddr = "tcp://0.0.0.0:26656" - -# Address to advertise to peers for them to dial -# If empty, will use the same port as the laddr, -# and will introspect on the listener or use UPnP -# to figure out the address. -external_address = "" - -# Comma separated list of seed nodes to connect to -seeds = "" - -# Comma separated list of nodes to keep persistent connections to -persistent_peers = "" - -# UPNP port forwarding -upnp = false - -# Path to address book -addr_book_file = "config/addrbook.json" - -# Set true for strict address routability rules -# Set false for private or local networks -addr_book_strict = true - -# Maximum number of inbound peers -max_num_inbound_peers = 40 - -# Maximum number of outbound peers to connect to, excluding persistent peers -max_num_outbound_peers = 10 - -# List of node IDs, to which a connection will be (re)established ignoring any existing limits -unconditional_peer_ids = "" - -# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) -persistent_peers_max_dial_period = "0s" - -# Time to wait before flushing messages out on the connection -flush_throttle_timeout = "100ms" - -# Maximum size of a message packet payload, in bytes -max_packet_msg_payload_size = 1024 - -# Rate at which packets can be sent, in bytes/second -send_rate = 5120000 - -# Rate at which packets can be received, in bytes/second -recv_rate = 5120000 - -# Set true to enable the peer-exchange reactor -pex = true - -# Seed mode, in which node constantly crawls the network and looks for -# peers. If another node asks it for addresses, it responds and disconnects. -# -# Does not work if the peer-exchange reactor is disabled. -seed_mode = false - -# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) -private_peer_ids = "" - -# Toggle to disable guard against peers connecting from the same ip. -allow_duplicate_ip = false - -# Peer connection configuration. -handshake_timeout = "20s" -dial_timeout = "3s" - -####################################################### -### Mempool Configurattion Option ### -####################################################### -[mempool] - -recheck = true -broadcast = true -wal_dir = "" - -# Maximum number of transactions in the mempool -size = 5000 - -# Limit the total size of all txs in the mempool. -# This only accounts for raw transactions (e.g. given 1MB transactions and -# max_txs_bytes=5MB, mempool will only accept 5 transactions). -max_txs_bytes = 1073741824 - -# Size of the cache (used to filter transactions we saw earlier) in transactions -cache_size = 10000 - -# Do not remove invalid transactions from the cache (default: false) -# Set to true if it's not possible for any invalid transaction to become valid -# again in the future. -keep-invalid-txs-in-cache = false - -# Maximum size of a single transaction. -# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes}. -max_tx_bytes = 1048576 - -# Maximum size of a batch of transactions to send to a peer -# Including space needed by encoding (one varint per transaction). -# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796 -max_batch_bytes = 0 - -####################################################### -### State Sync Configuration Options ### -####################################################### -[statesync] -# State sync rapidly bootstraps a new node by discovering, fetching, and restoring a state machine -# snapshot from peers instead of fetching and replaying historical blocks. Requires some peers in -# the network to take and serve state machine snapshots. State sync is not attempted if the node -# has any local state (LastBlockHeight > 0). The node will have a truncated block history, -# starting from the height of the snapshot. -enable = false - -# RPC servers (comma-separated) for light client verification of the synced state machine and -# retrieval of state data for node bootstrapping. Also needs a trusted height and corresponding -# header hash obtained from a trusted source, and a period during which validators can be trusted. -# -# For Cosmos SDK-based chains, trust_period should usually be about 2/3 of the unbonding time (~2 -# weeks) during which they can be financially punished (slashed) for misbehavior. -rpc_servers = "" -trust_height = 0 -trust_hash = "" -trust_period = "168h0m0s" - -# Time to spend discovering snapshots before initiating a restore. -discovery_time = "15s" - -# Temporary directory for state sync snapshot chunks, defaults to the OS tempdir (typically /tmp). -# Will create a new, randomly named directory within, and remove it when done. -temp_dir = "" - -####################################################### -### Fast Sync Configuration Connections ### -####################################################### -[fastsync] - -# Fast Sync version to use: -# 1) "v0" (default) - the legacy fast sync implementation -# 2) "v1" - refactor of v0 version for better testability -# 2) "v2" - complete redesign of v0, optimized for testability & readability -version = "v0" - -####################################################### -### Consensus Configuration Options ### -####################################################### -[consensus] - -wal_file = "data/cs.wal/wal" - -# How long we wait for a proposal block before prevoting nil -timeout_propose = "1s" -# How much timeout_propose increases with each round -timeout_propose_delta = "500ms" -# How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil) -timeout_prevote = "1s" -# How much the timeout_prevote increases with each round -timeout_prevote_delta = "500ms" -# How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil) -timeout_precommit = "1s" -# How much the timeout_precommit increases with each round -timeout_precommit_delta = "500ms" -# How long we wait after committing a block, before starting on the new -# height (this gives us a chance to receive some more precommits, even -# though we already have +2/3). -timeout_commit = "1s" - -# How many blocks to look back to check existence of the node's consensus votes before joining consensus -# When non-zero, the node will panic upon restart -# if the same consensus key was used to sign {double_sign_check_height} last blocks. -# So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic. -double_sign_check_height = 0 - -# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) -skip_timeout_commit = false - -# EmptyBlocks mode and possible interval between empty blocks -create_empty_blocks = true -create_empty_blocks_interval = "0s" - -# Reactor sleep duration parameters -peer_gossip_sleep_duration = "100ms" -peer_query_maj23_sleep_duration = "2s" - -####################################################### -### Transaction Indexer Configuration Options ### -####################################################### -[tx_index] - -# What indexer to use for transactions -# -# The application will set which txs to index. In some cases a node operator will be able -# to decide which txs to index based on configuration set in the application. -# -# Options: -# 1) "null" -# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). -# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed. -indexer = "kv" - -####################################################### -### Instrumentation Configuration Options ### -####################################################### -[instrumentation] - -# When true, Prometheus metrics are served under /metrics on -# PrometheusListenAddr. -# Check out the documentation for the list of available metrics. -prometheus = false - -# Address to listen for Prometheus collector(s) connections -prometheus_listen_addr = ":26660" - -# Maximum number of simultaneous connections. -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -max_open_connections = 3 - -# Instrumentation namespace -namespace = "tendermint" diff --git a/ci/chains/gaia/v3.0.0/ibc-1/config/genesis.json b/ci/chains/gaia/v3.0.0/ibc-1/config/genesis.json deleted file mode 100644 index 35096d1758..0000000000 --- a/ci/chains/gaia/v3.0.0/ibc-1/config/genesis.json +++ /dev/null @@ -1,285 +0,0 @@ -{ - "genesis_time": "2021-01-29T14:18:07.030169146Z", - "chain_id": "ibc-1", - "initial_height": "1", - "consensus_params": { - "block": { - "max_bytes": "22020096", - "max_gas": "-1", - "time_iota_ms": "1000" - }, - "evidence": { - "max_age_num_blocks": "100000", - "max_age_duration": "172800000000000", - "max_bytes": "1048576" - }, - "validator": { - "pub_key_types": [ - "ed25519" - ] - }, - "version": {} - }, - "app_hash": "", - "app_state": { - "auth": { - "params": { - "max_memo_characters": "256", - "tx_sig_limit": "7", - "tx_size_cost_per_byte": "10", - "sig_verify_cost_ed25519": "590", - "sig_verify_cost_secp256k1": "1000" - }, - "accounts": [ - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos1y5yvcjvzjukdrk3993xlumt05468mytrwt5v4j", - "pub_key": null, - "account_number": "0", - "sequence": "0" - }, - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos1y8sa4m3pv69nlfraxuh2zg2ghvua2wfddnnkkr", - "pub_key": null, - "account_number": "0", - "sequence": "0" - } - ] - }, - "bank": { - "params": { - "send_enabled": [], - "default_send_enabled": true - }, - "balances": [ - { - "address": "cosmos1y8sa4m3pv69nlfraxuh2zg2ghvua2wfddnnkkr", - "coins": [ - { - "denom": "stake", - "amount": "100000000000" - } - ] - }, - { - "address": "cosmos1y5yvcjvzjukdrk3993xlumt05468mytrwt5v4j", - "coins": [ - { - "denom": "samoleans", - "amount": "100000000000" - }, - { - "denom": "stake", - "amount": "100000000000" - } - ] - } - ], - "supply": [], - "denom_metadata": [] - }, - "capability": { - "index": "1", - "owners": [] - }, - "crisis": { - "constant_fee": { - "denom": "stake", - "amount": "1000" - } - }, - "distribution": { - "params": { - "community_tax": "0.020000000000000000", - "base_proposer_reward": "0.010000000000000000", - "bonus_proposer_reward": "0.040000000000000000", - "withdraw_addr_enabled": true - }, - "fee_pool": { - "community_pool": [] - }, - "delegator_withdraw_infos": [], - "previous_proposer": "", - "outstanding_rewards": [], - "validator_accumulated_commissions": [], - "validator_historical_rewards": [], - "validator_current_rewards": [], - "delegator_starting_infos": [], - "validator_slash_events": [] - }, - "evidence": { - "evidence": [] - }, - "genutil": { - "gen_txs": [ - { - "body": { - "messages": [ - { - "@type": "/cosmos.staking.v1beta1.MsgCreateValidator", - "description": { - "moniker": "ibc-1", - "identity": "", - "website": "", - "security_contact": "", - "details": "" - }, - "commission": { - "rate": "0.100000000000000000", - "max_rate": "0.200000000000000000", - "max_change_rate": "0.010000000000000000" - }, - "min_self_delegation": "1", - "delegator_address": "cosmos1y8sa4m3pv69nlfraxuh2zg2ghvua2wfddnnkkr", - "validator_address": "cosmosvaloper1y8sa4m3pv69nlfraxuh2zg2ghvua2wfdg88r6s", - "pubkey": { - "@type": "/cosmos.crypto.ed25519.PubKey", - "key": "Sl/6UDZu7Ttd9MOsUnMO4xswFYfVbXyUDRRNbAXH2kQ=" - }, - "value": { - "denom": "stake", - "amount": "100000000000" - } - } - ], - "memo": "c15eddb601e66c008c241ccfc80f7cf3358cfa7f@192.168.1.213:26656", - "timeout_height": "0", - "extension_options": [], - "non_critical_extension_options": [] - }, - "auth_info": { - "signer_infos": [ - { - "public_key": { - "@type": "/cosmos.crypto.secp256k1.PubKey", - "key": "AwgqmhChUIpyAo157HyCorxiB8s0KKH0UtoHYazZdZal" - }, - "mode_info": { - "single": { - "mode": "SIGN_MODE_DIRECT" - } - }, - "sequence": "0" - } - ], - "fee": { - "amount": [], - "gas_limit": "200000", - "payer": "", - "granter": "" - } - }, - "signatures": [ - "4mnUAljJh9m6Wd1Yl7aLDwoR4gezH9ugjSW4SqSOtp5FCnUoX+UR3oAR0kQpzX5S4qLbV4nfANmwj6Cgw9hrOw==" - ] - } - ] - }, - "gov": { - "starting_proposal_id": "1", - "deposits": [], - "votes": [], - "proposals": [], - "deposit_params": { - "min_deposit": [ - { - "denom": "stake", - "amount": "10000000" - } - ], - "max_deposit_period": "172800s" - }, - "voting_params": { - "voting_period": "172800s" - }, - "tally_params": { - "quorum": "0.334000000000000000", - "threshold": "0.500000000000000000", - "veto_threshold": "0.334000000000000000" - } - }, - "ibc": { - "client_genesis": { - "clients": [], - "clients_consensus": [], - "clients_metadata": [], - "params": { - "allowed_clients": [ - "06-solomachine", - "07-tendermint" - ] - }, - "create_localhost": false, - "next_client_sequence": "0" - }, - "connection_genesis": { - "connections": [], - "client_connection_paths": [], - "next_connection_sequence": "0" - }, - "channel_genesis": { - "channels": [], - "acknowledgements": [], - "commitments": [], - "receipts": [], - "send_sequences": [], - "recv_sequences": [], - "ack_sequences": [], - "next_channel_sequence": "0" - } - }, - "mint": { - "minter": { - "inflation": "0.130000000000000000", - "annual_provisions": "0.000000000000000000" - }, - "params": { - "mint_denom": "stake", - "inflation_rate_change": "0.130000000000000000", - "inflation_max": "0.200000000000000000", - "inflation_min": "0.070000000000000000", - "goal_bonded": "0.670000000000000000", - "blocks_per_year": "6311520" - } - }, - "params": null, - "slashing": { - "params": { - "signed_blocks_window": "100", - "min_signed_per_window": "0.500000000000000000", - "downtime_jail_duration": "600s", - "slash_fraction_double_sign": "0.050000000000000000", - "slash_fraction_downtime": "0.010000000000000000" - }, - "signing_infos": [], - "missed_blocks": [] - }, - "staking": { - "params": { - "unbonding_time": "1814400s", - "max_validators": 100, - "max_entries": 7, - "historical_entries": 10000, - "bond_denom": "stake" - }, - "last_total_power": "0", - "last_validator_powers": [], - "validators": [], - "delegations": [], - "unbonding_delegations": [], - "redelegations": [], - "exported": false - }, - "transfer": { - "port_id": "transfer", - "denom_traces": [], - "params": { - "send_enabled": true, - "receive_enabled": true - } - }, - "upgrade": {}, - "vesting": {} - } -} \ No newline at end of file diff --git a/ci/chains/gaia/v3.0.0/ibc-1/config/gentx/gentx-c15eddb601e66c008c241ccfc80f7cf3358cfa7f.json b/ci/chains/gaia/v3.0.0/ibc-1/config/gentx/gentx-c15eddb601e66c008c241ccfc80f7cf3358cfa7f.json deleted file mode 100644 index 526c658967..0000000000 --- a/ci/chains/gaia/v3.0.0/ibc-1/config/gentx/gentx-c15eddb601e66c008c241ccfc80f7cf3358cfa7f.json +++ /dev/null @@ -1 +0,0 @@ -{"body":{"messages":[{"@type":"/cosmos.staking.v1beta1.MsgCreateValidator","description":{"moniker":"ibc-1","identity":"","website":"","security_contact":"","details":""},"commission":{"rate":"0.100000000000000000","max_rate":"0.200000000000000000","max_change_rate":"0.010000000000000000"},"min_self_delegation":"1","delegator_address":"cosmos1y8sa4m3pv69nlfraxuh2zg2ghvua2wfddnnkkr","validator_address":"cosmosvaloper1y8sa4m3pv69nlfraxuh2zg2ghvua2wfdg88r6s","pubkey":{"@type":"/cosmos.crypto.ed25519.PubKey","key":"Sl/6UDZu7Ttd9MOsUnMO4xswFYfVbXyUDRRNbAXH2kQ="},"value":{"denom":"stake","amount":"100000000000"}}],"memo":"c15eddb601e66c008c241ccfc80f7cf3358cfa7f@192.168.1.213:26656","timeout_height":"0","extension_options":[],"non_critical_extension_options":[]},"auth_info":{"signer_infos":[{"public_key":{"@type":"/cosmos.crypto.secp256k1.PubKey","key":"AwgqmhChUIpyAo157HyCorxiB8s0KKH0UtoHYazZdZal"},"mode_info":{"single":{"mode":"SIGN_MODE_DIRECT"}},"sequence":"0"}],"fee":{"amount":[],"gas_limit":"200000","payer":"","granter":""}},"signatures":["4mnUAljJh9m6Wd1Yl7aLDwoR4gezH9ugjSW4SqSOtp5FCnUoX+UR3oAR0kQpzX5S4qLbV4nfANmwj6Cgw9hrOw=="]} diff --git a/ci/chains/gaia/v3.0.0/ibc-1/config/node_key.json b/ci/chains/gaia/v3.0.0/ibc-1/config/node_key.json deleted file mode 100644 index 39c903be39..0000000000 --- a/ci/chains/gaia/v3.0.0/ibc-1/config/node_key.json +++ /dev/null @@ -1 +0,0 @@ -{"priv_key":{"type":"tendermint/PrivKeyEd25519","value":"V6YnjXHp7KVvKGjeiVnpo2gXYTz/L7nlxBqJACkGIejLNOwsxmAX3JtFYT0tmUXBdFtVsv8V9oCl1tdHRnLtkQ=="}} \ No newline at end of file diff --git a/ci/chains/gaia/v3.0.0/ibc-1/config/priv_validator_key.json b/ci/chains/gaia/v3.0.0/ibc-1/config/priv_validator_key.json deleted file mode 100644 index 4ba202f381..0000000000 --- a/ci/chains/gaia/v3.0.0/ibc-1/config/priv_validator_key.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "address": "574E7B0DC5C0C1D726059ADC9CF3EDCF06815C46", - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "Sl/6UDZu7Ttd9MOsUnMO4xswFYfVbXyUDRRNbAXH2kQ=" - }, - "priv_key": { - "type": "tendermint/PrivKeyEd25519", - "value": "EteHYnSwj1wLWwDYKJpnxOE1aeEDrlpOTVE7IMCb4idKX/pQNm7tO130w6xScw7jGzAVh9VtfJQNFE1sBcfaRA==" - } -} \ No newline at end of file diff --git a/ci/chains/gaia/v3.0.0/ibc-1/key_seed.json b/ci/chains/gaia/v3.0.0/ibc-1/key_seed.json deleted file mode 100644 index cef64cfb7f..0000000000 --- a/ci/chains/gaia/v3.0.0/ibc-1/key_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"user","type":"local","address":"cosmos1y5yvcjvzjukdrk3993xlumt05468mytrwt5v4j","pubkey":"cosmospub1addwnpepqvfmqqc0ghq7rjwe5kqyg2n9d9xclfcwvvcvemscqxqdkh68xcqfsaz5m4c","mnemonic":"stick blossom laugh account diamond tribe tool joy fox anger strategy trash vendor eager clog core office device patch install turkey muscle conduct city"} diff --git a/ci/chains/gaia/v3.0.0/ibc-1/keyring-test/21e1daee21668b3fa47d372ea12148bb39d5392d.address b/ci/chains/gaia/v3.0.0/ibc-1/keyring-test/21e1daee21668b3fa47d372ea12148bb39d5392d.address deleted file mode 100644 index 18fabf8b58..0000000000 --- a/ci/chains/gaia/v3.0.0/ibc-1/keyring-test/21e1daee21668b3fa47d372ea12148bb39d5392d.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wMS0yOSAwOToxODowOC4xODA5NDQ5MjQgLTA1MDAgRVNUIG09KzAuMTM3NjA0NTE3IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiWUNHNnFUekJtVVVqZExiTCJ9.MZFL8-n9JInB8KXYiYrgUbowFxAn1Phgh-GjJ7ZkqKuHP4LQxe6H5g.xYWRM_9wTncgcwPM.RTCU1R9HB_21CeQ19AB7tzqM02iXYw6zJzMJNQ1HXWVu95CtDcpY5Pq4qfLD_C4G7CLOWfIFa80MOVpamL6oY5LoilnRd7HGAiI-DBFuMTJkDJfjrtOuU6I4eo9s3O7ixB4JvxoKI9pDlXv9lXkAQwBwAvQQ0VSZItm2JuQp0NxFb0cIVX7rx6_lBTq5YPg10vrjp3QQ9zssGH1Ljc7PhB3NUe20lGrY3g_1GvrBqt3hhJIih03Ai3Fv.w1RB9aOyRJDHA_dA876pBQ \ No newline at end of file diff --git a/ci/chains/gaia/v3.0.0/ibc-1/keyring-test/2508cc4982972cd1da252c4dfe6d6fa5747d9163.address b/ci/chains/gaia/v3.0.0/ibc-1/keyring-test/2508cc4982972cd1da252c4dfe6d6fa5747d9163.address deleted file mode 100644 index 001e62a194..0000000000 --- a/ci/chains/gaia/v3.0.0/ibc-1/keyring-test/2508cc4982972cd1da252c4dfe6d6fa5747d9163.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wMS0yOSAwOToxODowOS4zNzI3NzQxOTMgLTA1MDAgRVNUIG09KzAuMTc1NzQzMjU2IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiSUlBb3k0ZzV2ZlVRbV9sSSJ9.V7DHUp6H5PWzVX2xoS6MFpIAElsf0OpA2sD6xlPWRQCEKzCy3NGBdQ.nZ8t10M-8IxPvrI_.Fl0jClobEpAwJ-TOyj3dmbaMW2wRo_w8GP8PXnfUOaFJVWvrIW0yrlYp-M8r5zR1sa0OQy3QnOuu63xVMw9ouGqa1NznLHOjqi1CGP4dWrCFe9a1-owLk28U5lYIrI58kJWaor7REJjcypG0Y4D8eum2n4tXx82C-z_M3tHvhBrMUCONSPcjr36HxkXlc3RQYLn13QH6Qlrq6oxF3W51qN3LmJgtaStAnpEpn8LsABurRQ.4vt3nXFsGLbzAJ0EjW3KJw \ No newline at end of file diff --git a/ci/chains/gaia/v3.0.0/ibc-1/keyring-test/user.info b/ci/chains/gaia/v3.0.0/ibc-1/keyring-test/user.info deleted file mode 100644 index f9e01b2aed..0000000000 --- a/ci/chains/gaia/v3.0.0/ibc-1/keyring-test/user.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wMS0yOSAwOToxODowOS4zNTYxMTkwMDMgLTA1MDAgRVNUIG09KzAuMTU5MDg4MDM2IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiVGJQcUZETEwxTWRTZGhleiJ9._pcGFxyxfLg910MktjRBqMqgfYa8xijq5HHYwJ2LnCC3a69LX4woMw.Ubual1F2FZV3gW2T.S9nk-LnI3fcvbXcTzyj_ghMAK14dZC0JMPqwRtwiXD9fhOlNBnWrKrACS1OYi2P3_KbA9F3diFnSSiCj77DW0HTw1Ez6bQf0nFOP1usveK1EzXFSnuhWqRa8xGjLtaggN5m_wdeC8kWimKXAaigqxGVwHkcfx3EzMI0GLArkPPXBIzs4uOXnsMn98GDLlN85iPEWEBiIhHerRiI0V5ftdpDNeW2ZWgj5GXafxpALAMve38jlob79Hplq73PJqSMmRu-Yczr0YxwErhKeZqFCjaVl6bCoxQo9pASsEsc9gJNrBewHGQAZnOXKFSaJzcePGCwlROf-isxqdxBnlqUekg5nlRsb4sc.xmoB_vY5tCmBQ6eqYitrHw \ No newline at end of file diff --git a/ci/chains/gaia/v3.0.0/ibc-1/keyring-test/validator.info b/ci/chains/gaia/v3.0.0/ibc-1/keyring-test/validator.info deleted file mode 100644 index d8c9886640..0000000000 --- a/ci/chains/gaia/v3.0.0/ibc-1/keyring-test/validator.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wMS0yOSAwOToxODowOC4xNzQ5ODM0MTcgLTA1MDAgRVNUIG09KzAuMTMxNjQzMDEwIiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoid0h4dDRWbFMxVDV3TGs3ZSJ9.8D3_glu4tLCAGBAIYciFA5gWr9TlSyw28IjQqBg72WqQ9weaVNCdMg.DqW1vdFKv8LMYlHY.AvlW-jmxVZuQZ1i1EOazHlDBmQZJUxQGhM1fJ3AMNMD9cFkC65TF97KhcNUE2Tpe8xMpMTOPz6Ys8gfs4hE-LhkyQ_y_pWw9y-Pz3vC1jA7dRrRYJ-YKV29PdFGHXTyp_hnlAyFEaJ8c-sjjVs1Hz1szSfyNKWzG9w4rRJdGPtFeEbb9TjhXLDoAWAdXYIFOZ2qjKl5XFycMbhlacHO9FMQ5rS1xcFUhL1wF4_wxRvbZ3CqhQXxsVb4mlb8kMXRAkLYoPUjKIAez0J0arMc_2DscfTT5kt9XUjcst8zs5ssBMBgpVHmaB4pxlUNUiSQwiKJ3lA4m0iturK-csJ82CUL5LTJSIq1GpqlSMK27XbOEQXs5.g4pBKgKgTOGnGowaK-kx1A \ No newline at end of file diff --git a/ci/chains/gaia/v3.0.0/ibc-1/validator_seed.json b/ci/chains/gaia/v3.0.0/ibc-1/validator_seed.json deleted file mode 100644 index ff4f3ed72c..0000000000 --- a/ci/chains/gaia/v3.0.0/ibc-1/validator_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"validator","type":"local","address":"cosmos1y8sa4m3pv69nlfraxuh2zg2ghvua2wfddnnkkr","pubkey":"cosmospub1addwnpepqvyz4xss59gg5usz34u7clyz527xyp7txs52razjmgrkrtxewkt22myg9hq","mnemonic":"hill corn gravity veteran pond unaware sure blame fringe enable torch myself hint tag provide entire first auto hidden tide like glory bread ramp"} diff --git a/ci/chains/gaia/v4.0.0/ibc-0/config/app.toml b/ci/chains/gaia/v4.0.0/ibc-0/config/app.toml deleted file mode 100644 index 54de73e392..0000000000 --- a/ci/chains/gaia/v4.0.0/ibc-0/config/app.toml +++ /dev/null @@ -1,152 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -############################################################################### -### Base Configuration ### -############################################################################### - -# The minimum gas prices a validator is willing to accept for processing a -# transaction. A transaction's fees must meet the minimum of any denomination -# specified in this config (e.g. 0.25token1;0.0001token2). -minimum-gas-prices = "" - -# default: the last 100 states are kept in addition to every 500th state; pruning at 10 block intervals -# nothing: all historic states will be saved, nothing will be deleted (i.e. archiving node) -# everything: all saved states will be deleted, storing only the current state; pruning at 10 block intervals -# custom: allow pruning options to be manually specified through 'pruning-keep-recent', 'pruning-keep-every', and 'pruning-interval' -pruning = "default" - -# These are applied if and only if the pruning strategy is custom. -pruning-keep-recent = "0" -pruning-keep-every = "0" -pruning-interval = "0" - -# HaltHeight contains a non-zero block height at which a node will gracefully -# halt and shutdown that can be used to assist upgrades and testing. -# -# Note: Commitment of state will be attempted on the corresponding block. -halt-height = 0 - -# HaltTime contains a non-zero minimum block time (in Unix seconds) at which -# a node will gracefully halt and shutdown that can be used to assist upgrades -# and testing. -# -# Note: Commitment of state will be attempted on the corresponding block. -halt-time = 0 - -# MinRetainBlocks defines the minimum block height offset from the current -# block being committed, such that all blocks past this offset are pruned -# from Tendermint. It is used as part of the process of determining the -# ResponseCommit.RetainHeight value during ABCI Commit. A value of 0 indicates -# that no blocks should be pruned. -# -# This configuration value is only responsible for pruning Tendermint blocks. -# It has no bearing on application state pruning which is determined by the -# "pruning-*" configurations. -# -# Note: Tendermint block pruning is dependant on this parameter in conunction -# with the unbonding (safety threshold) period, state pruning and state sync -# snapshot parameters to determine the correct minimum value of -# ResponseCommit.RetainHeight. -min-retain-blocks = 0 - -# InterBlockCache enables inter-block caching. -inter-block-cache = true - -# IndexEvents defines the set of events in the form {eventType}.{attributeKey}, -# which informs Tendermint what to index. If empty, all events will be indexed. -# -# Example: -# ["message.sender", "message.recipient"] -index-events = [] - -############################################################################### -### Telemetry Configuration ### -############################################################################### - -[telemetry] - -# Prefixed with keys to separate services. -service-name = "" - -# Enabled enables the application telemetry functionality. When enabled, -# an in-memory sink is also enabled by default. Operators may also enabled -# other sinks such as Prometheus. -enabled = false - -# Enable prefixing gauge values with hostname. -enable-hostname = false - -# Enable adding hostname to labels. -enable-hostname-label = false - -# Enable adding service to labels. -enable-service-label = false - -# PrometheusRetentionTime, when positive, enables a Prometheus metrics sink. -prometheus-retention-time = 0 - -# GlobalLabels defines a global set of name/value label tuples applied to all -# metrics emitted using the wrapper functions defined in telemetry package. -# -# Example: -# [["chain_id", "cosmoshub-1"]] -global-labels = [ -] - -############################################################################### -### API Configuration ### -############################################################################### - -[api] - -# Enable defines if the API server should be enabled. -enable = false - -# Swagger defines if swagger documentation should automatically be registered. -swagger = false - -# Address defines the API server to listen on. -address = "tcp://0.0.0.0:1317" - -# MaxOpenConnections defines the number of maximum open connections. -max-open-connections = 1000 - -# RPCReadTimeout defines the Tendermint RPC read timeout (in seconds). -rpc-read-timeout = 10 - -# RPCWriteTimeout defines the Tendermint RPC write timeout (in seconds). -rpc-write-timeout = 0 - -# RPCMaxBodyBytes defines the Tendermint maximum response body (in bytes). -rpc-max-body-bytes = 1000000 - -# EnableUnsafeCORS defines if CORS should be enabled (unsafe - use it at your own risk). -enabled-unsafe-cors = false - -############################################################################### -### gRPC Configuration ### -############################################################################### - -[grpc] - -# Enable defines if the gRPC server should be enabled. -enable = true - -# Address defines the gRPC server address to bind to. -address = "0.0.0.0:9090" - -############################################################################### -### State Sync Configuration ### -############################################################################### - -# State sync snapshots allow other nodes to rapidly join the network without replaying historical -# blocks, instead downloading and applying a snapshot of the application state at a given height. -[state-sync] - -# snapshot-interval specifies the block interval at which local state sync snapshots are -# taken (0 to disable). Must be a multiple of pruning-keep-every. -snapshot-interval = 0 - -# snapshot-keep-recent specifies the number of recent snapshots to keep and serve (0 to keep all). -snapshot-keep-recent = 2 diff --git a/ci/chains/gaia/v4.0.0/ibc-0/config/config.toml b/ci/chains/gaia/v4.0.0/ibc-0/config/config.toml deleted file mode 100644 index 4714078da1..0000000000 --- a/ci/chains/gaia/v4.0.0/ibc-0/config/config.toml +++ /dev/null @@ -1,393 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -# NOTE: Any path below can be absolute (e.g. "/var/myawesomeapp/data") or -# relative to the home directory (e.g. "data"). The home directory is -# "$HOME/.tendermint" by default, but could be changed via $TMHOME env variable -# or --home cmd flag. - -####################################################################### -### Main Base Config Options ### -####################################################################### - -# TCP or UNIX socket address of the ABCI application, -# or the name of an ABCI application compiled in with the Tendermint binary -proxy_app = "tcp://127.0.0.1:26658" - -# A custom human readable name for this node -moniker = "ibc-0" - -# If this node is many blocks behind the tip of the chain, FastSync -# allows them to catchup quickly by downloading blocks in parallel -# and verifying their commits -fast_sync = true - -# Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb -# * goleveldb (github.com/syndtr/goleveldb - most popular implementation) -# - pure go -# - stable -# * cleveldb (uses levigo wrapper) -# - fast -# - requires gcc -# - use cleveldb build tag (go build -tags cleveldb) -# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt) -# - EXPERIMENTAL -# - may be faster is some use-cases (random reads - indexer) -# - use boltdb build tag (go build -tags boltdb) -# * rocksdb (uses github.com/tecbot/gorocksdb) -# - EXPERIMENTAL -# - requires gcc -# - use rocksdb build tag (go build -tags rocksdb) -# * badgerdb (uses github.com/dgraph-io/badger) -# - EXPERIMENTAL -# - use badgerdb build tag (go build -tags badgerdb) -db_backend = "goleveldb" - -# Database directory -db_dir = "data" - -# Output level for logging, including package level options -log_level = "info" - -# Output format: 'plain' (colored text) or 'json' -log_format = "plain" - -##### additional base config options ##### - -# Path to the JSON file containing the initial validator set and other meta data -genesis_file = "config/genesis.json" - -# Path to the JSON file containing the private key to use as a validator in the consensus protocol -priv_validator_key_file = "config/priv_validator_key.json" - -# Path to the JSON file containing the last sign state of a validator -priv_validator_state_file = "data/priv_validator_state.json" - -# TCP or UNIX socket address for Tendermint to listen on for -# connections from an external PrivValidator process -priv_validator_laddr = "" - -# Path to the JSON file containing the private key to use for node authentication in the p2p protocol -node_key_file = "config/node_key.json" - -# Mechanism to connect to the ABCI application: socket | grpc -abci = "socket" - -# If true, query the ABCI app on connecting to a new peer -# so the app can decide if we should keep the connection or not -filter_peers = false - - -####################################################################### -### Advanced Configuration Options ### -####################################################################### - -####################################################### -### RPC Server Configuration Options ### -####################################################### -[rpc] - -# TCP or UNIX socket address for the RPC server to listen on -laddr = "tcp://0.0.0.0:26657" - -# A list of origins a cross-domain request can be executed from -# Default value '[]' disables cors support -# Use '["*"]' to allow any origin -cors_allowed_origins = [] - -# A list of methods the client is allowed to use with cross-domain requests -cors_allowed_methods = ["HEAD", "GET", "POST", ] - -# A list of non simple headers the client is allowed to use with cross-domain requests -cors_allowed_headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ] - -# TCP or UNIX socket address for the gRPC server to listen on -# NOTE: This server only supports /broadcast_tx_commit -grpc_laddr = "" - -# Maximum number of simultaneous connections. -# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -grpc_max_open_connections = 900 - -# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool -unsafe = false - -# Maximum number of simultaneous connections (including WebSocket). -# Does not include gRPC connections. See grpc_max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -max_open_connections = 900 - -# Maximum number of unique clientIDs that can /subscribe -# If you're using /broadcast_tx_commit, set to the estimated maximum number -# of broadcast_tx_commit calls per block. -max_subscription_clients = 100 - -# Maximum number of unique queries a given client can /subscribe to -# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to -# the estimated # maximum number of broadcast_tx_commit calls per block. -max_subscriptions_per_client = 5 - -# How long to wait for a tx to be committed during /broadcast_tx_commit. -# WARNING: Using a value larger than 10s will result in increasing the -# global HTTP write timeout, which applies to all connections and endpoints. -# See https://github.com/tendermint/tendermint/issues/3435 -timeout_broadcast_tx_commit = "10s" - -# Maximum size of request body, in bytes -max_body_bytes = 1000000 - -# Maximum size of request header, in bytes -max_header_bytes = 1048576 - -# The path to a file containing certificate that is used to create the HTTPS server. -# Might be either absolute path or path related to Tendermint's config directory. -# If the certificate is signed by a certificate authority, -# the certFile should be the concatenation of the server's certificate, any intermediates, -# and the CA's certificate. -# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. -# Otherwise, HTTP server is run. -tls_cert_file = "" - -# The path to a file containing matching private key that is used to create the HTTPS server. -# Might be either absolute path or path related to Tendermint's config directory. -# NOTE: both tls-cert-file and tls-key-file must be present for Tendermint to create HTTPS server. -# Otherwise, HTTP server is run. -tls_key_file = "" - -# pprof listen address (https://golang.org/pkg/net/http/pprof) -pprof_laddr = "localhost:6060" - -####################################################### -### P2P Configuration Options ### -####################################################### -[p2p] - -# Address to listen for incoming connections -laddr = "tcp://0.0.0.0:26656" - -# Address to advertise to peers for them to dial -# If empty, will use the same port as the laddr, -# and will introspect on the listener or use UPnP -# to figure out the address. -external_address = "" - -# Comma separated list of seed nodes to connect to -seeds = "" - -# Comma separated list of nodes to keep persistent connections to -persistent_peers = "" - -# UPNP port forwarding -upnp = false - -# Path to address book -addr_book_file = "config/addrbook.json" - -# Set true for strict address routability rules -# Set false for private or local networks -addr_book_strict = true - -# Maximum number of inbound peers -max_num_inbound_peers = 40 - -# Maximum number of outbound peers to connect to, excluding persistent peers -max_num_outbound_peers = 10 - -# List of node IDs, to which a connection will be (re)established ignoring any existing limits -unconditional_peer_ids = "" - -# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) -persistent_peers_max_dial_period = "0s" - -# Time to wait before flushing messages out on the connection -flush_throttle_timeout = "100ms" - -# Maximum size of a message packet payload, in bytes -max_packet_msg_payload_size = 1024 - -# Rate at which packets can be sent, in bytes/second -send_rate = 5120000 - -# Rate at which packets can be received, in bytes/second -recv_rate = 5120000 - -# Set true to enable the peer-exchange reactor -pex = true - -# Seed mode, in which node constantly crawls the network and looks for -# peers. If another node asks it for addresses, it responds and disconnects. -# -# Does not work if the peer-exchange reactor is disabled. -seed_mode = false - -# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) -private_peer_ids = "" - -# Toggle to disable guard against peers connecting from the same ip. -allow_duplicate_ip = false - -# Peer connection configuration. -handshake_timeout = "20s" -dial_timeout = "3s" - -####################################################### -### Mempool Configuration Option ### -####################################################### -[mempool] - -recheck = true -broadcast = true -wal_dir = "" - -# Maximum number of transactions in the mempool -size = 5000 - -# Limit the total size of all txs in the mempool. -# This only accounts for raw transactions (e.g. given 1MB transactions and -# max_txs_bytes=5MB, mempool will only accept 5 transactions). -max_txs_bytes = 1073741824 - -# Size of the cache (used to filter transactions we saw earlier) in transactions -cache_size = 10000 - -# Do not remove invalid transactions from the cache (default: false) -# Set to true if it's not possible for any invalid transaction to become valid -# again in the future. -keep-invalid-txs-in-cache = false - -# Maximum size of a single transaction. -# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes}. -max_tx_bytes = 1048576 - -# Maximum size of a batch of transactions to send to a peer -# Including space needed by encoding (one varint per transaction). -# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796 -max_batch_bytes = 0 - -####################################################### -### State Sync Configuration Options ### -####################################################### -[statesync] -# State sync rapidly bootstraps a new node by discovering, fetching, and restoring a state machine -# snapshot from peers instead of fetching and replaying historical blocks. Requires some peers in -# the network to take and serve state machine snapshots. State sync is not attempted if the node -# has any local state (LastBlockHeight > 0). The node will have a truncated block history, -# starting from the height of the snapshot. -enable = false - -# RPC servers (comma-separated) for light client verification of the synced state machine and -# retrieval of state data for node bootstrapping. Also needs a trusted height and corresponding -# header hash obtained from a trusted source, and a period during which validators can be trusted. -# -# For Cosmos SDK-based chains, trust_period should usually be about 2/3 of the unbonding time (~2 -# weeks) during which they can be financially punished (slashed) for misbehavior. -rpc_servers = "" -trust_height = 0 -trust_hash = "" -trust_period = "168h0m0s" - -# Time to spend discovering snapshots before initiating a restore. -discovery_time = "15s" - -# Temporary directory for state sync snapshot chunks, defaults to the OS tempdir (typically /tmp). -# Will create a new, randomly named directory within, and remove it when done. -temp_dir = "" - -####################################################### -### Fast Sync Configuration Connections ### -####################################################### -[fastsync] - -# Fast Sync version to use: -# 1) "v0" (default) - the legacy fast sync implementation -# 2) "v1" - refactor of v0 version for better testability -# 2) "v2" - complete redesign of v0, optimized for testability & readability -version = "v0" - -####################################################### -### Consensus Configuration Options ### -####################################################### -[consensus] - -wal_file = "data/cs.wal/wal" - -# How long we wait for a proposal block before prevoting nil -timeout_propose = "1s" -# How much timeout_propose increases with each round -timeout_propose_delta = "500ms" -# How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil) -timeout_prevote = "1s" -# How much the timeout_prevote increases with each round -timeout_prevote_delta = "500ms" -# How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil) -timeout_precommit = "1s" -# How much the timeout_precommit increases with each round -timeout_precommit_delta = "500ms" -# How long we wait after committing a block, before starting on the new -# height (this gives us a chance to receive some more precommits, even -# though we already have +2/3). -timeout_commit = "1s" - -# How many blocks to look back to check existence of the node's consensus votes before joining consensus -# When non-zero, the node will panic upon restart -# if the same consensus key was used to sign {double_sign_check_height} last blocks. -# So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic. -double_sign_check_height = 0 - -# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) -skip_timeout_commit = false - -# EmptyBlocks mode and possible interval between empty blocks -create_empty_blocks = true -create_empty_blocks_interval = "0s" - -# Reactor sleep duration parameters -peer_gossip_sleep_duration = "100ms" -peer_query_maj23_sleep_duration = "2s" - -####################################################### -### Transaction Indexer Configuration Options ### -####################################################### -[tx_index] - -# What indexer to use for transactions -# -# The application will set which txs to index. In some cases a node operator will be able -# to decide which txs to index based on configuration set in the application. -# -# Options: -# 1) "null" -# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). -# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed. -indexer = "kv" - -####################################################### -### Instrumentation Configuration Options ### -####################################################### -[instrumentation] - -# When true, Prometheus metrics are served under /metrics on -# PrometheusListenAddr. -# Check out the documentation for the list of available metrics. -prometheus = false - -# Address to listen for Prometheus collector(s) connections -prometheus_listen_addr = ":26660" - -# Maximum number of simultaneous connections. -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -max_open_connections = 3 - -# Instrumentation namespace -namespace = "tendermint" diff --git a/ci/chains/gaia/v4.0.0/ibc-0/config/genesis.json b/ci/chains/gaia/v4.0.0/ibc-0/config/genesis.json deleted file mode 100644 index 8041ce39a5..0000000000 --- a/ci/chains/gaia/v4.0.0/ibc-0/config/genesis.json +++ /dev/null @@ -1,294 +0,0 @@ -{ - "genesis_time": "2021-02-03T14:50:54.649630107Z", - "chain_id": "ibc-0", - "initial_height": "1", - "consensus_params": { - "block": { - "max_bytes": "22020096", - "max_gas": "-1", - "time_iota_ms": "1000" - }, - "evidence": { - "max_age_num_blocks": "100000", - "max_age_duration": "172800000000000", - "max_bytes": "1048576" - }, - "validator": { - "pub_key_types": [ - "ed25519" - ] - }, - "version": {} - }, - "app_hash": "", - "app_state": { - "auth": { - "params": { - "max_memo_characters": "256", - "tx_sig_limit": "7", - "tx_size_cost_per_byte": "10", - "sig_verify_cost_ed25519": "590", - "sig_verify_cost_secp256k1": "1000" - }, - "accounts": [ - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos15f5wafvkhp9yczqw55y996qnc37w8wm3jk82ht", - "pub_key": null, - "account_number": "0", - "sequence": "0" - }, - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos1ej0c20022zj9xr702a8ytjwmgx6ayy6jwaqkjt", - "pub_key": null, - "account_number": "0", - "sequence": "0" - } - ] - }, - "bank": { - "params": { - "send_enabled": [], - "default_send_enabled": true - }, - "balances": [ - { - "address": "cosmos15f5wafvkhp9yczqw55y996qnc37w8wm3jk82ht", - "coins": [ - { - "denom": "samoleans", - "amount": "100000000000" - }, - { - "denom": "stake", - "amount": "100000000000" - } - ] - }, - { - "address": "cosmos1ej0c20022zj9xr702a8ytjwmgx6ayy6jwaqkjt", - "coins": [ - { - "denom": "stake", - "amount": "100000000000" - } - ] - } - ], - "supply": [ - { - "denom": "samoleans", - "amount": "100000000000" - }, - { - "denom": "stake", - "amount": "200000000000" - } - ], - "denom_metadata": [] - }, - "capability": { - "index": "1", - "owners": [] - }, - "crisis": { - "constant_fee": { - "denom": "stake", - "amount": "1000" - } - }, - "distribution": { - "params": { - "community_tax": "0.020000000000000000", - "base_proposer_reward": "0.010000000000000000", - "bonus_proposer_reward": "0.040000000000000000", - "withdraw_addr_enabled": true - }, - "fee_pool": { - "community_pool": [] - }, - "delegator_withdraw_infos": [], - "previous_proposer": "", - "outstanding_rewards": [], - "validator_accumulated_commissions": [], - "validator_historical_rewards": [], - "validator_current_rewards": [], - "delegator_starting_infos": [], - "validator_slash_events": [] - }, - "evidence": { - "evidence": [] - }, - "genutil": { - "gen_txs": [ - { - "body": { - "messages": [ - { - "@type": "/cosmos.staking.v1beta1.MsgCreateValidator", - "description": { - "moniker": "ibc-0", - "identity": "", - "website": "", - "security_contact": "", - "details": "" - }, - "commission": { - "rate": "0.100000000000000000", - "max_rate": "0.200000000000000000", - "max_change_rate": "0.010000000000000000" - }, - "min_self_delegation": "1", - "delegator_address": "cosmos1ej0c20022zj9xr702a8ytjwmgx6ayy6jwaqkjt", - "validator_address": "cosmosvaloper1ej0c20022zj9xr702a8ytjwmgx6ayy6jtf5r7c", - "pubkey": { - "@type": "/cosmos.crypto.ed25519.PubKey", - "key": "T4iSX7p9Zfb9ZJS9KmsjSj3ik7Anman99Z9kjdZiNpc=" - }, - "value": { - "denom": "stake", - "amount": "100000000000" - } - } - ], - "memo": "f3e4bdc90fb01a1167636bb40362967569fa150f@192.168.1.213:26656", - "timeout_height": "0", - "extension_options": [], - "non_critical_extension_options": [] - }, - "auth_info": { - "signer_infos": [ - { - "public_key": { - "@type": "/cosmos.crypto.secp256k1.PubKey", - "key": "AxhLYE7FGQkOnVaFBpY9pxwBUUBr3D2IQVyguaU9Rb2w" - }, - "mode_info": { - "single": { - "mode": "SIGN_MODE_DIRECT" - } - }, - "sequence": "0" - } - ], - "fee": { - "amount": [], - "gas_limit": "200000", - "payer": "", - "granter": "" - } - }, - "signatures": [ - "dx/A9Z4TjhlLnS3P5aYMSea2b0IKZP8X7p1zHoLHV+4QCEhKOZMwXY0dKyuCTuafpq+/9VJyOWLqvRnvx+M5ig==" - ] - } - ] - }, - "gov": { - "starting_proposal_id": "1", - "deposits": [], - "votes": [], - "proposals": [], - "deposit_params": { - "min_deposit": [ - { - "denom": "stake", - "amount": "10000000" - } - ], - "max_deposit_period": "172800s" - }, - "voting_params": { - "voting_period": "172800s" - }, - "tally_params": { - "quorum": "0.334000000000000000", - "threshold": "0.500000000000000000", - "veto_threshold": "0.334000000000000000" - } - }, - "ibc": { - "client_genesis": { - "clients": [], - "clients_consensus": [], - "clients_metadata": [], - "params": { - "allowed_clients": [ - "06-solomachine", - "07-tendermint" - ] - }, - "create_localhost": false, - "next_client_sequence": "0" - }, - "connection_genesis": { - "connections": [], - "client_connection_paths": [], - "next_connection_sequence": "0" - }, - "channel_genesis": { - "channels": [], - "acknowledgements": [], - "commitments": [], - "receipts": [], - "send_sequences": [], - "recv_sequences": [], - "ack_sequences": [], - "next_channel_sequence": "0" - } - }, - "mint": { - "minter": { - "inflation": "0.130000000000000000", - "annual_provisions": "0.000000000000000000" - }, - "params": { - "mint_denom": "stake", - "inflation_rate_change": "0.130000000000000000", - "inflation_max": "0.200000000000000000", - "inflation_min": "0.070000000000000000", - "goal_bonded": "0.670000000000000000", - "blocks_per_year": "6311520" - } - }, - "params": null, - "slashing": { - "params": { - "signed_blocks_window": "100", - "min_signed_per_window": "0.500000000000000000", - "downtime_jail_duration": "600s", - "slash_fraction_double_sign": "0.050000000000000000", - "slash_fraction_downtime": "0.010000000000000000" - }, - "signing_infos": [], - "missed_blocks": [] - }, - "staking": { - "params": { - "unbonding_time": "1814400s", - "max_validators": 100, - "max_entries": 7, - "historical_entries": 10000, - "bond_denom": "stake" - }, - "last_total_power": "0", - "last_validator_powers": [], - "validators": [], - "delegations": [], - "unbonding_delegations": [], - "redelegations": [], - "exported": false - }, - "transfer": { - "port_id": "transfer", - "denom_traces": [], - "params": { - "send_enabled": true, - "receive_enabled": true - } - }, - "upgrade": {}, - "vesting": {} - } -} \ No newline at end of file diff --git a/ci/chains/gaia/v4.0.0/ibc-0/config/gentx/gentx-f3e4bdc90fb01a1167636bb40362967569fa150f.json b/ci/chains/gaia/v4.0.0/ibc-0/config/gentx/gentx-f3e4bdc90fb01a1167636bb40362967569fa150f.json deleted file mode 100644 index c07fca93cd..0000000000 --- a/ci/chains/gaia/v4.0.0/ibc-0/config/gentx/gentx-f3e4bdc90fb01a1167636bb40362967569fa150f.json +++ /dev/null @@ -1 +0,0 @@ -{"body":{"messages":[{"@type":"/cosmos.staking.v1beta1.MsgCreateValidator","description":{"moniker":"ibc-0","identity":"","website":"","security_contact":"","details":""},"commission":{"rate":"0.100000000000000000","max_rate":"0.200000000000000000","max_change_rate":"0.010000000000000000"},"min_self_delegation":"1","delegator_address":"cosmos1ej0c20022zj9xr702a8ytjwmgx6ayy6jwaqkjt","validator_address":"cosmosvaloper1ej0c20022zj9xr702a8ytjwmgx6ayy6jtf5r7c","pubkey":{"@type":"/cosmos.crypto.ed25519.PubKey","key":"T4iSX7p9Zfb9ZJS9KmsjSj3ik7Anman99Z9kjdZiNpc="},"value":{"denom":"stake","amount":"100000000000"}}],"memo":"f3e4bdc90fb01a1167636bb40362967569fa150f@192.168.1.213:26656","timeout_height":"0","extension_options":[],"non_critical_extension_options":[]},"auth_info":{"signer_infos":[{"public_key":{"@type":"/cosmos.crypto.secp256k1.PubKey","key":"AxhLYE7FGQkOnVaFBpY9pxwBUUBr3D2IQVyguaU9Rb2w"},"mode_info":{"single":{"mode":"SIGN_MODE_DIRECT"}},"sequence":"0"}],"fee":{"amount":[],"gas_limit":"200000","payer":"","granter":""}},"signatures":["dx/A9Z4TjhlLnS3P5aYMSea2b0IKZP8X7p1zHoLHV+4QCEhKOZMwXY0dKyuCTuafpq+/9VJyOWLqvRnvx+M5ig=="]} diff --git a/ci/chains/gaia/v4.0.0/ibc-0/config/node_key.json b/ci/chains/gaia/v4.0.0/ibc-0/config/node_key.json deleted file mode 100644 index 75d41cbcd7..0000000000 --- a/ci/chains/gaia/v4.0.0/ibc-0/config/node_key.json +++ /dev/null @@ -1 +0,0 @@ -{"priv_key":{"type":"tendermint/PrivKeyEd25519","value":"SFxLztE9i3reZ1yF6QUN1VbLtNSU1Tr6sMAQ8YMUVhpQSacMoM8IOrsCpZi28Svst8tPZQOmC+0qwYCdw+4HVA=="}} \ No newline at end of file diff --git a/ci/chains/gaia/v4.0.0/ibc-0/config/priv_validator_key.json b/ci/chains/gaia/v4.0.0/ibc-0/config/priv_validator_key.json deleted file mode 100644 index e0b9bb1ffa..0000000000 --- a/ci/chains/gaia/v4.0.0/ibc-0/config/priv_validator_key.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "address": "3E23CAA4341C954786AE114941796B95B563EAFE", - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "T4iSX7p9Zfb9ZJS9KmsjSj3ik7Anman99Z9kjdZiNpc=" - }, - "priv_key": { - "type": "tendermint/PrivKeyEd25519", - "value": "Y4CimyecevlH/DCP1f3Dg8t1T4PVsYrIWR9CuqMxYy5PiJJfun1l9v1klL0qayNKPeKTsCeZqf31n2SN1mI2lw==" - } -} \ No newline at end of file diff --git a/ci/chains/gaia/v4.0.0/ibc-0/key_seed.json b/ci/chains/gaia/v4.0.0/ibc-0/key_seed.json deleted file mode 100644 index 145e5db799..0000000000 --- a/ci/chains/gaia/v4.0.0/ibc-0/key_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"user","type":"local","address":"cosmos15f5wafvkhp9yczqw55y996qnc37w8wm3jk82ht","pubkey":"cosmospub1addwnpepq2vk4rpd39kuzklulxx9llkcwcr628m6qe54yyuy0q4vpy30ujlas7cze0u","mnemonic":"split mixture arrive polar point entire luggage analyst thank toy dove olive correct choice day dawn increase canal disease price dentist alcohol sense item"} diff --git a/ci/chains/gaia/v4.0.0/ibc-0/keyring-test/a268eea596b84a4c080ea50852e813c47ce3bb71.address b/ci/chains/gaia/v4.0.0/ibc-0/keyring-test/a268eea596b84a4c080ea50852e813c47ce3bb71.address deleted file mode 100644 index 2805a8a34a..0000000000 --- a/ci/chains/gaia/v4.0.0/ibc-0/keyring-test/a268eea596b84a4c080ea50852e813c47ce3bb71.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wMi0wMyAwOTo1MDo1Ni45ODY3NTkxMDcgLTA1MDAgRVNUIG09KzAuMTM4NDA3MDgyIiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiSWFqZ25zVDZwZmZWT0ZaOSJ9.Au0cr2CiSPSn_L6qZ0JvOGph0M8733QvSmQv8AdCx3AF66JinLxobA.E2zOZfMPndHrF_iq.1NDhA42TdW6_gUPgsN2T4ycMY8-fd8oqZDrg-T_uz6Yt99z31zohe8vVLAMJJSg_Z9n82yv9wSk1_K8y_PqoRMKOI-nYq8ed5iiN63p72IczObndCd6IiEURlEE4I5CkuTeVjxuGNh_cqS2AFBjWnX0x5d27Jf__7YjkJG1OkaNX_8gVS0cRs7I0YTyYSs410vvK4HX1YEGNOWCT-SYX68alGuIMv7WecU1p3dT3hzChlw.tkE1W32vYIj2XMfyRb2A4Q \ No newline at end of file diff --git a/ci/chains/gaia/v4.0.0/ibc-0/keyring-test/cc9f853dea50a4530fcf574e45c9db41b5d21352.address b/ci/chains/gaia/v4.0.0/ibc-0/keyring-test/cc9f853dea50a4530fcf574e45c9db41b5d21352.address deleted file mode 100644 index ed76c2030a..0000000000 --- a/ci/chains/gaia/v4.0.0/ibc-0/keyring-test/cc9f853dea50a4530fcf574e45c9db41b5d21352.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wMi0wMyAwOTo1MDo1NS44MjExMDg5NTkgLTA1MDAgRVNUIG09KzAuMTU4NjEzMDQ3IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiYjRzYUVOendvSkV3UVZtTSJ9.UZbI7pYbcENIDtx5Hsxh0Lci8Z_y1YwSdapRDeZE6BfKxUXlfAi2lA.ElMF8zQ1ot39SWZb.34UIgm11oh4WDlerswbv8vIJlTaicVP9PKH9WeeDzpppBLu01ge21YeEEBRm51-0EHBw8KSCT14CK20DbX_RpaQ-R3rB0A64orUhh-BdAouB_7URaAcJzl_kWHtrvDOvaOR-PIUA6PPJ3SNJEgodaYnXR8_zX21uLdcim6mEkTbhgYka80tpsMu0m8OP0jy8_IMAak9Obx80xPdMoK6EDKX94bH8IyJGMJ5idLjndURi9mvkDqS28nTU.8W6Eael_wwHzE1hfDjn4EA \ No newline at end of file diff --git a/ci/chains/gaia/v4.0.0/ibc-0/keyring-test/user.info b/ci/chains/gaia/v4.0.0/ibc-0/keyring-test/user.info deleted file mode 100644 index 6ab1f30d9f..0000000000 --- a/ci/chains/gaia/v4.0.0/ibc-0/keyring-test/user.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wMi0wMyAwOTo1MDo1Ni45NzUyMTQ0ODggLTA1MDAgRVNUIG09KzAuMTI2ODYyNDczIiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiX2VtTGdadWNCQ3k0VmhWZSJ9._2XDBX2GLEQPSwHnx3KUphkyVRiOPqONkUNgSZRKiPfX_tKZduKR6A.-ICHk1c9gjagrUBD.zR6_o_XKCUxiYNh6HK01pANtaTHQV__9ASbPTGoBH3rpNfCGYUysD5iWEL2HmvsLIamMRWDOC_FvmS1iop5_GUM_ZiMx4Y8rJa9Q69S34xtRdizjfr6rdTSeNlDHZSHosnJdEd7qWhWT8eppS5z_xPl0ENoD5KHCvvFU1SqxjQ0f1ufWwWnb-qvqHJgAEX9lQXyhXLR_DN4HMhGo7WyZVqN_xYqPvD7mBzKwQXBLMvfK8DI4SrDp1KZLhoHxnYOKaSIDjz0ntvmfRi-ISSpqJvImSw71GLZ2JcS5akx9M_TOXxi16AVW--YhQcDJahT8ATOyxFqj-hJIVbU6oUU1vk-74w2I76Y.O_5Pgh2CGkrCaJuNPOlBtA \ No newline at end of file diff --git a/ci/chains/gaia/v4.0.0/ibc-0/keyring-test/validator.info b/ci/chains/gaia/v4.0.0/ibc-0/keyring-test/validator.info deleted file mode 100644 index f266a39123..0000000000 --- a/ci/chains/gaia/v4.0.0/ibc-0/keyring-test/validator.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wMi0wMyAwOTo1MDo1NS44MDUxMDMwOCAtMDUwMCBFU1QgbT0rMC4xNDI2MDcxMzQiLCJlbmMiOiJBMjU2R0NNIiwicDJjIjo4MTkyLCJwMnMiOiJfbkZVNVBkRlZLTXhkOEtlIn0.FQeeUZLy5q5dYQK_7lIglUGB_bm3EsqLcUosojtkxbqf2QDGCPgmLw.eouNcsGAooGILwmP.izRK3eBdPfQlgz7cLUbekdvyzSewoObzazGgfFZu2ffqKZqapij2ckzmGqOzJ4XKQrA-8FGWPq2ZANTAoOsCExFaV9ZhPBBgZGi1YOgHyVjaQ8xDkNLswZ16m5Z9P80HZR3-C5CixiqxQMCNUy-T7rj24aWvYaFxRVQ4QoonRA0O0WcGUZZUCRRFnBj0SBE4-i_NoOpGyM69KmuQmI7hW_dqmDxJqaFB9GhxtQdWF1dA5fClsCAj-s9dbBJOUzdd30NsZ0bSYKdohUJe9HBOiZrEKT_1uO0cYQMEqps7DcUqoIky8XAAv7153SrU8BzCUicgWRYWim1QLRtP8B6YWY_bP-EzAE7wThv-2iLNCjbU-cRD.3oMuadqNJ7yxVez17l_Bxg \ No newline at end of file diff --git a/ci/chains/gaia/v4.0.0/ibc-0/validator_seed.json b/ci/chains/gaia/v4.0.0/ibc-0/validator_seed.json deleted file mode 100644 index 0b90e2606c..0000000000 --- a/ci/chains/gaia/v4.0.0/ibc-0/validator_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"validator","type":"local","address":"cosmos1ej0c20022zj9xr702a8ytjwmgx6ayy6jwaqkjt","pubkey":"cosmospub1addwnpepqvvykczwc5vsjr5a26zsd93a5uwqz52qd0wrmzzptjstnffagk7mqqyjvjs","mnemonic":"front fix fury drip kind cluster hire hedgehog balcony gown oyster rug pond quality exact spread ship match kiwi gold beyond fame car ill"} diff --git a/ci/chains/gaia/v4.0.0/ibc-1/config/app.toml b/ci/chains/gaia/v4.0.0/ibc-1/config/app.toml deleted file mode 100644 index 54de73e392..0000000000 --- a/ci/chains/gaia/v4.0.0/ibc-1/config/app.toml +++ /dev/null @@ -1,152 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -############################################################################### -### Base Configuration ### -############################################################################### - -# The minimum gas prices a validator is willing to accept for processing a -# transaction. A transaction's fees must meet the minimum of any denomination -# specified in this config (e.g. 0.25token1;0.0001token2). -minimum-gas-prices = "" - -# default: the last 100 states are kept in addition to every 500th state; pruning at 10 block intervals -# nothing: all historic states will be saved, nothing will be deleted (i.e. archiving node) -# everything: all saved states will be deleted, storing only the current state; pruning at 10 block intervals -# custom: allow pruning options to be manually specified through 'pruning-keep-recent', 'pruning-keep-every', and 'pruning-interval' -pruning = "default" - -# These are applied if and only if the pruning strategy is custom. -pruning-keep-recent = "0" -pruning-keep-every = "0" -pruning-interval = "0" - -# HaltHeight contains a non-zero block height at which a node will gracefully -# halt and shutdown that can be used to assist upgrades and testing. -# -# Note: Commitment of state will be attempted on the corresponding block. -halt-height = 0 - -# HaltTime contains a non-zero minimum block time (in Unix seconds) at which -# a node will gracefully halt and shutdown that can be used to assist upgrades -# and testing. -# -# Note: Commitment of state will be attempted on the corresponding block. -halt-time = 0 - -# MinRetainBlocks defines the minimum block height offset from the current -# block being committed, such that all blocks past this offset are pruned -# from Tendermint. It is used as part of the process of determining the -# ResponseCommit.RetainHeight value during ABCI Commit. A value of 0 indicates -# that no blocks should be pruned. -# -# This configuration value is only responsible for pruning Tendermint blocks. -# It has no bearing on application state pruning which is determined by the -# "pruning-*" configurations. -# -# Note: Tendermint block pruning is dependant on this parameter in conunction -# with the unbonding (safety threshold) period, state pruning and state sync -# snapshot parameters to determine the correct minimum value of -# ResponseCommit.RetainHeight. -min-retain-blocks = 0 - -# InterBlockCache enables inter-block caching. -inter-block-cache = true - -# IndexEvents defines the set of events in the form {eventType}.{attributeKey}, -# which informs Tendermint what to index. If empty, all events will be indexed. -# -# Example: -# ["message.sender", "message.recipient"] -index-events = [] - -############################################################################### -### Telemetry Configuration ### -############################################################################### - -[telemetry] - -# Prefixed with keys to separate services. -service-name = "" - -# Enabled enables the application telemetry functionality. When enabled, -# an in-memory sink is also enabled by default. Operators may also enabled -# other sinks such as Prometheus. -enabled = false - -# Enable prefixing gauge values with hostname. -enable-hostname = false - -# Enable adding hostname to labels. -enable-hostname-label = false - -# Enable adding service to labels. -enable-service-label = false - -# PrometheusRetentionTime, when positive, enables a Prometheus metrics sink. -prometheus-retention-time = 0 - -# GlobalLabels defines a global set of name/value label tuples applied to all -# metrics emitted using the wrapper functions defined in telemetry package. -# -# Example: -# [["chain_id", "cosmoshub-1"]] -global-labels = [ -] - -############################################################################### -### API Configuration ### -############################################################################### - -[api] - -# Enable defines if the API server should be enabled. -enable = false - -# Swagger defines if swagger documentation should automatically be registered. -swagger = false - -# Address defines the API server to listen on. -address = "tcp://0.0.0.0:1317" - -# MaxOpenConnections defines the number of maximum open connections. -max-open-connections = 1000 - -# RPCReadTimeout defines the Tendermint RPC read timeout (in seconds). -rpc-read-timeout = 10 - -# RPCWriteTimeout defines the Tendermint RPC write timeout (in seconds). -rpc-write-timeout = 0 - -# RPCMaxBodyBytes defines the Tendermint maximum response body (in bytes). -rpc-max-body-bytes = 1000000 - -# EnableUnsafeCORS defines if CORS should be enabled (unsafe - use it at your own risk). -enabled-unsafe-cors = false - -############################################################################### -### gRPC Configuration ### -############################################################################### - -[grpc] - -# Enable defines if the gRPC server should be enabled. -enable = true - -# Address defines the gRPC server address to bind to. -address = "0.0.0.0:9090" - -############################################################################### -### State Sync Configuration ### -############################################################################### - -# State sync snapshots allow other nodes to rapidly join the network without replaying historical -# blocks, instead downloading and applying a snapshot of the application state at a given height. -[state-sync] - -# snapshot-interval specifies the block interval at which local state sync snapshots are -# taken (0 to disable). Must be a multiple of pruning-keep-every. -snapshot-interval = 0 - -# snapshot-keep-recent specifies the number of recent snapshots to keep and serve (0 to keep all). -snapshot-keep-recent = 2 diff --git a/ci/chains/gaia/v4.0.0/ibc-1/config/config.toml b/ci/chains/gaia/v4.0.0/ibc-1/config/config.toml deleted file mode 100644 index c32f66cf63..0000000000 --- a/ci/chains/gaia/v4.0.0/ibc-1/config/config.toml +++ /dev/null @@ -1,393 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -# NOTE: Any path below can be absolute (e.g. "/var/myawesomeapp/data") or -# relative to the home directory (e.g. "data"). The home directory is -# "$HOME/.tendermint" by default, but could be changed via $TMHOME env variable -# or --home cmd flag. - -####################################################################### -### Main Base Config Options ### -####################################################################### - -# TCP or UNIX socket address of the ABCI application, -# or the name of an ABCI application compiled in with the Tendermint binary -proxy_app = "tcp://127.0.0.1:26658" - -# A custom human readable name for this node -moniker = "ibc-1" - -# If this node is many blocks behind the tip of the chain, FastSync -# allows them to catchup quickly by downloading blocks in parallel -# and verifying their commits -fast_sync = true - -# Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb -# * goleveldb (github.com/syndtr/goleveldb - most popular implementation) -# - pure go -# - stable -# * cleveldb (uses levigo wrapper) -# - fast -# - requires gcc -# - use cleveldb build tag (go build -tags cleveldb) -# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt) -# - EXPERIMENTAL -# - may be faster is some use-cases (random reads - indexer) -# - use boltdb build tag (go build -tags boltdb) -# * rocksdb (uses github.com/tecbot/gorocksdb) -# - EXPERIMENTAL -# - requires gcc -# - use rocksdb build tag (go build -tags rocksdb) -# * badgerdb (uses github.com/dgraph-io/badger) -# - EXPERIMENTAL -# - use badgerdb build tag (go build -tags badgerdb) -db_backend = "goleveldb" - -# Database directory -db_dir = "data" - -# Output level for logging, including package level options -log_level = "info" - -# Output format: 'plain' (colored text) or 'json' -log_format = "plain" - -##### additional base config options ##### - -# Path to the JSON file containing the initial validator set and other meta data -genesis_file = "config/genesis.json" - -# Path to the JSON file containing the private key to use as a validator in the consensus protocol -priv_validator_key_file = "config/priv_validator_key.json" - -# Path to the JSON file containing the last sign state of a validator -priv_validator_state_file = "data/priv_validator_state.json" - -# TCP or UNIX socket address for Tendermint to listen on for -# connections from an external PrivValidator process -priv_validator_laddr = "" - -# Path to the JSON file containing the private key to use for node authentication in the p2p protocol -node_key_file = "config/node_key.json" - -# Mechanism to connect to the ABCI application: socket | grpc -abci = "socket" - -# If true, query the ABCI app on connecting to a new peer -# so the app can decide if we should keep the connection or not -filter_peers = false - - -####################################################################### -### Advanced Configuration Options ### -####################################################################### - -####################################################### -### RPC Server Configuration Options ### -####################################################### -[rpc] - -# TCP or UNIX socket address for the RPC server to listen on -laddr = "tcp://0.0.0.0:26657" - -# A list of origins a cross-domain request can be executed from -# Default value '[]' disables cors support -# Use '["*"]' to allow any origin -cors_allowed_origins = [] - -# A list of methods the client is allowed to use with cross-domain requests -cors_allowed_methods = ["HEAD", "GET", "POST", ] - -# A list of non simple headers the client is allowed to use with cross-domain requests -cors_allowed_headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ] - -# TCP or UNIX socket address for the gRPC server to listen on -# NOTE: This server only supports /broadcast_tx_commit -grpc_laddr = "" - -# Maximum number of simultaneous connections. -# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -grpc_max_open_connections = 900 - -# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool -unsafe = false - -# Maximum number of simultaneous connections (including WebSocket). -# Does not include gRPC connections. See grpc_max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -max_open_connections = 900 - -# Maximum number of unique clientIDs that can /subscribe -# If you're using /broadcast_tx_commit, set to the estimated maximum number -# of broadcast_tx_commit calls per block. -max_subscription_clients = 100 - -# Maximum number of unique queries a given client can /subscribe to -# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to -# the estimated # maximum number of broadcast_tx_commit calls per block. -max_subscriptions_per_client = 5 - -# How long to wait for a tx to be committed during /broadcast_tx_commit. -# WARNING: Using a value larger than 10s will result in increasing the -# global HTTP write timeout, which applies to all connections and endpoints. -# See https://github.com/tendermint/tendermint/issues/3435 -timeout_broadcast_tx_commit = "10s" - -# Maximum size of request body, in bytes -max_body_bytes = 1000000 - -# Maximum size of request header, in bytes -max_header_bytes = 1048576 - -# The path to a file containing certificate that is used to create the HTTPS server. -# Might be either absolute path or path related to Tendermint's config directory. -# If the certificate is signed by a certificate authority, -# the certFile should be the concatenation of the server's certificate, any intermediates, -# and the CA's certificate. -# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. -# Otherwise, HTTP server is run. -tls_cert_file = "" - -# The path to a file containing matching private key that is used to create the HTTPS server. -# Might be either absolute path or path related to Tendermint's config directory. -# NOTE: both tls-cert-file and tls-key-file must be present for Tendermint to create HTTPS server. -# Otherwise, HTTP server is run. -tls_key_file = "" - -# pprof listen address (https://golang.org/pkg/net/http/pprof) -pprof_laddr = "localhost:6060" - -####################################################### -### P2P Configuration Options ### -####################################################### -[p2p] - -# Address to listen for incoming connections -laddr = "tcp://0.0.0.0:26656" - -# Address to advertise to peers for them to dial -# If empty, will use the same port as the laddr, -# and will introspect on the listener or use UPnP -# to figure out the address. -external_address = "" - -# Comma separated list of seed nodes to connect to -seeds = "" - -# Comma separated list of nodes to keep persistent connections to -persistent_peers = "" - -# UPNP port forwarding -upnp = false - -# Path to address book -addr_book_file = "config/addrbook.json" - -# Set true for strict address routability rules -# Set false for private or local networks -addr_book_strict = true - -# Maximum number of inbound peers -max_num_inbound_peers = 40 - -# Maximum number of outbound peers to connect to, excluding persistent peers -max_num_outbound_peers = 10 - -# List of node IDs, to which a connection will be (re)established ignoring any existing limits -unconditional_peer_ids = "" - -# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) -persistent_peers_max_dial_period = "0s" - -# Time to wait before flushing messages out on the connection -flush_throttle_timeout = "100ms" - -# Maximum size of a message packet payload, in bytes -max_packet_msg_payload_size = 1024 - -# Rate at which packets can be sent, in bytes/second -send_rate = 5120000 - -# Rate at which packets can be received, in bytes/second -recv_rate = 5120000 - -# Set true to enable the peer-exchange reactor -pex = true - -# Seed mode, in which node constantly crawls the network and looks for -# peers. If another node asks it for addresses, it responds and disconnects. -# -# Does not work if the peer-exchange reactor is disabled. -seed_mode = false - -# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) -private_peer_ids = "" - -# Toggle to disable guard against peers connecting from the same ip. -allow_duplicate_ip = false - -# Peer connection configuration. -handshake_timeout = "20s" -dial_timeout = "3s" - -####################################################### -### Mempool Configuration Option ### -####################################################### -[mempool] - -recheck = true -broadcast = true -wal_dir = "" - -# Maximum number of transactions in the mempool -size = 5000 - -# Limit the total size of all txs in the mempool. -# This only accounts for raw transactions (e.g. given 1MB transactions and -# max_txs_bytes=5MB, mempool will only accept 5 transactions). -max_txs_bytes = 1073741824 - -# Size of the cache (used to filter transactions we saw earlier) in transactions -cache_size = 10000 - -# Do not remove invalid transactions from the cache (default: false) -# Set to true if it's not possible for any invalid transaction to become valid -# again in the future. -keep-invalid-txs-in-cache = false - -# Maximum size of a single transaction. -# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes}. -max_tx_bytes = 1048576 - -# Maximum size of a batch of transactions to send to a peer -# Including space needed by encoding (one varint per transaction). -# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796 -max_batch_bytes = 0 - -####################################################### -### State Sync Configuration Options ### -####################################################### -[statesync] -# State sync rapidly bootstraps a new node by discovering, fetching, and restoring a state machine -# snapshot from peers instead of fetching and replaying historical blocks. Requires some peers in -# the network to take and serve state machine snapshots. State sync is not attempted if the node -# has any local state (LastBlockHeight > 0). The node will have a truncated block history, -# starting from the height of the snapshot. -enable = false - -# RPC servers (comma-separated) for light client verification of the synced state machine and -# retrieval of state data for node bootstrapping. Also needs a trusted height and corresponding -# header hash obtained from a trusted source, and a period during which validators can be trusted. -# -# For Cosmos SDK-based chains, trust_period should usually be about 2/3 of the unbonding time (~2 -# weeks) during which they can be financially punished (slashed) for misbehavior. -rpc_servers = "" -trust_height = 0 -trust_hash = "" -trust_period = "168h0m0s" - -# Time to spend discovering snapshots before initiating a restore. -discovery_time = "15s" - -# Temporary directory for state sync snapshot chunks, defaults to the OS tempdir (typically /tmp). -# Will create a new, randomly named directory within, and remove it when done. -temp_dir = "" - -####################################################### -### Fast Sync Configuration Connections ### -####################################################### -[fastsync] - -# Fast Sync version to use: -# 1) "v0" (default) - the legacy fast sync implementation -# 2) "v1" - refactor of v0 version for better testability -# 2) "v2" - complete redesign of v0, optimized for testability & readability -version = "v0" - -####################################################### -### Consensus Configuration Options ### -####################################################### -[consensus] - -wal_file = "data/cs.wal/wal" - -# How long we wait for a proposal block before prevoting nil -timeout_propose = "1s" -# How much timeout_propose increases with each round -timeout_propose_delta = "500ms" -# How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil) -timeout_prevote = "1s" -# How much the timeout_prevote increases with each round -timeout_prevote_delta = "500ms" -# How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil) -timeout_precommit = "1s" -# How much the timeout_precommit increases with each round -timeout_precommit_delta = "500ms" -# How long we wait after committing a block, before starting on the new -# height (this gives us a chance to receive some more precommits, even -# though we already have +2/3). -timeout_commit = "1s" - -# How many blocks to look back to check existence of the node's consensus votes before joining consensus -# When non-zero, the node will panic upon restart -# if the same consensus key was used to sign {double_sign_check_height} last blocks. -# So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic. -double_sign_check_height = 0 - -# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) -skip_timeout_commit = false - -# EmptyBlocks mode and possible interval between empty blocks -create_empty_blocks = true -create_empty_blocks_interval = "0s" - -# Reactor sleep duration parameters -peer_gossip_sleep_duration = "100ms" -peer_query_maj23_sleep_duration = "2s" - -####################################################### -### Transaction Indexer Configuration Options ### -####################################################### -[tx_index] - -# What indexer to use for transactions -# -# The application will set which txs to index. In some cases a node operator will be able -# to decide which txs to index based on configuration set in the application. -# -# Options: -# 1) "null" -# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). -# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed. -indexer = "kv" - -####################################################### -### Instrumentation Configuration Options ### -####################################################### -[instrumentation] - -# When true, Prometheus metrics are served under /metrics on -# PrometheusListenAddr. -# Check out the documentation for the list of available metrics. -prometheus = false - -# Address to listen for Prometheus collector(s) connections -prometheus_listen_addr = ":26660" - -# Maximum number of simultaneous connections. -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -max_open_connections = 3 - -# Instrumentation namespace -namespace = "tendermint" diff --git a/ci/chains/gaia/v4.0.0/ibc-1/config/genesis.json b/ci/chains/gaia/v4.0.0/ibc-1/config/genesis.json deleted file mode 100644 index 2c49ab669c..0000000000 --- a/ci/chains/gaia/v4.0.0/ibc-1/config/genesis.json +++ /dev/null @@ -1,294 +0,0 @@ -{ - "genesis_time": "2021-02-03T14:51:06.54169119Z", - "chain_id": "ibc-1", - "initial_height": "1", - "consensus_params": { - "block": { - "max_bytes": "22020096", - "max_gas": "-1", - "time_iota_ms": "1000" - }, - "evidence": { - "max_age_num_blocks": "100000", - "max_age_duration": "172800000000000", - "max_bytes": "1048576" - }, - "validator": { - "pub_key_types": [ - "ed25519" - ] - }, - "version": {} - }, - "app_hash": "", - "app_state": { - "auth": { - "params": { - "max_memo_characters": "256", - "tx_sig_limit": "7", - "tx_size_cost_per_byte": "10", - "sig_verify_cost_ed25519": "590", - "sig_verify_cost_secp256k1": "1000" - }, - "accounts": [ - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos1vwxc6zytm7evqagh27keefldqtnpntrkhdqrgy", - "pub_key": null, - "account_number": "0", - "sequence": "0" - }, - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos1l5akq8gvv7c35l3cmgu3xvje6m7df7tf5m8hm3", - "pub_key": null, - "account_number": "0", - "sequence": "0" - } - ] - }, - "bank": { - "params": { - "send_enabled": [], - "default_send_enabled": true - }, - "balances": [ - { - "address": "cosmos1vwxc6zytm7evqagh27keefldqtnpntrkhdqrgy", - "coins": [ - { - "denom": "samoleans", - "amount": "100000000000" - }, - { - "denom": "stake", - "amount": "100000000000" - } - ] - }, - { - "address": "cosmos1l5akq8gvv7c35l3cmgu3xvje6m7df7tf5m8hm3", - "coins": [ - { - "denom": "stake", - "amount": "100000000000" - } - ] - } - ], - "supply": [ - { - "denom": "samoleans", - "amount": "100000000000" - }, - { - "denom": "stake", - "amount": "200000000000" - } - ], - "denom_metadata": [] - }, - "capability": { - "index": "1", - "owners": [] - }, - "crisis": { - "constant_fee": { - "denom": "stake", - "amount": "1000" - } - }, - "distribution": { - "params": { - "community_tax": "0.020000000000000000", - "base_proposer_reward": "0.010000000000000000", - "bonus_proposer_reward": "0.040000000000000000", - "withdraw_addr_enabled": true - }, - "fee_pool": { - "community_pool": [] - }, - "delegator_withdraw_infos": [], - "previous_proposer": "", - "outstanding_rewards": [], - "validator_accumulated_commissions": [], - "validator_historical_rewards": [], - "validator_current_rewards": [], - "delegator_starting_infos": [], - "validator_slash_events": [] - }, - "evidence": { - "evidence": [] - }, - "genutil": { - "gen_txs": [ - { - "body": { - "messages": [ - { - "@type": "/cosmos.staking.v1beta1.MsgCreateValidator", - "description": { - "moniker": "ibc-1", - "identity": "", - "website": "", - "security_contact": "", - "details": "" - }, - "commission": { - "rate": "0.100000000000000000", - "max_rate": "0.200000000000000000", - "max_change_rate": "0.010000000000000000" - }, - "min_self_delegation": "1", - "delegator_address": "cosmos1l5akq8gvv7c35l3cmgu3xvje6m7df7tf5m8hm3", - "validator_address": "cosmosvaloper1l5akq8gvv7c35l3cmgu3xvje6m7df7tf30nzhz", - "pubkey": { - "@type": "/cosmos.crypto.ed25519.PubKey", - "key": "GuS1ygd8w+B00mMo7N/+PqNBH7jLbkJjGNo9eA/oW34=" - }, - "value": { - "denom": "stake", - "amount": "100000000000" - } - } - ], - "memo": "b6f08e59e6071a9ddfb7f66d08723b1bef442f71@192.168.1.213:26656", - "timeout_height": "0", - "extension_options": [], - "non_critical_extension_options": [] - }, - "auth_info": { - "signer_infos": [ - { - "public_key": { - "@type": "/cosmos.crypto.secp256k1.PubKey", - "key": "AgQZuntbPUcfQZHy0NhGerdUa6dmFxcZO8rGkLPa3zrB" - }, - "mode_info": { - "single": { - "mode": "SIGN_MODE_DIRECT" - } - }, - "sequence": "0" - } - ], - "fee": { - "amount": [], - "gas_limit": "200000", - "payer": "", - "granter": "" - } - }, - "signatures": [ - "WXaiZujqnOXrUUEBxkofpTZlxrUV9Nfn90GwhJaQ/4MsJKx95Yhang0BNHy68GDLNiWzGjYS2Kz4c5V/lRcjGA==" - ] - } - ] - }, - "gov": { - "starting_proposal_id": "1", - "deposits": [], - "votes": [], - "proposals": [], - "deposit_params": { - "min_deposit": [ - { - "denom": "stake", - "amount": "10000000" - } - ], - "max_deposit_period": "172800s" - }, - "voting_params": { - "voting_period": "172800s" - }, - "tally_params": { - "quorum": "0.334000000000000000", - "threshold": "0.500000000000000000", - "veto_threshold": "0.334000000000000000" - } - }, - "ibc": { - "client_genesis": { - "clients": [], - "clients_consensus": [], - "clients_metadata": [], - "params": { - "allowed_clients": [ - "06-solomachine", - "07-tendermint" - ] - }, - "create_localhost": false, - "next_client_sequence": "0" - }, - "connection_genesis": { - "connections": [], - "client_connection_paths": [], - "next_connection_sequence": "0" - }, - "channel_genesis": { - "channels": [], - "acknowledgements": [], - "commitments": [], - "receipts": [], - "send_sequences": [], - "recv_sequences": [], - "ack_sequences": [], - "next_channel_sequence": "0" - } - }, - "mint": { - "minter": { - "inflation": "0.130000000000000000", - "annual_provisions": "0.000000000000000000" - }, - "params": { - "mint_denom": "stake", - "inflation_rate_change": "0.130000000000000000", - "inflation_max": "0.200000000000000000", - "inflation_min": "0.070000000000000000", - "goal_bonded": "0.670000000000000000", - "blocks_per_year": "6311520" - } - }, - "params": null, - "slashing": { - "params": { - "signed_blocks_window": "100", - "min_signed_per_window": "0.500000000000000000", - "downtime_jail_duration": "600s", - "slash_fraction_double_sign": "0.050000000000000000", - "slash_fraction_downtime": "0.010000000000000000" - }, - "signing_infos": [], - "missed_blocks": [] - }, - "staking": { - "params": { - "unbonding_time": "1814400s", - "max_validators": 100, - "max_entries": 7, - "historical_entries": 10000, - "bond_denom": "stake" - }, - "last_total_power": "0", - "last_validator_powers": [], - "validators": [], - "delegations": [], - "unbonding_delegations": [], - "redelegations": [], - "exported": false - }, - "transfer": { - "port_id": "transfer", - "denom_traces": [], - "params": { - "send_enabled": true, - "receive_enabled": true - } - }, - "upgrade": {}, - "vesting": {} - } -} \ No newline at end of file diff --git a/ci/chains/gaia/v4.0.0/ibc-1/config/gentx/gentx-b6f08e59e6071a9ddfb7f66d08723b1bef442f71.json b/ci/chains/gaia/v4.0.0/ibc-1/config/gentx/gentx-b6f08e59e6071a9ddfb7f66d08723b1bef442f71.json deleted file mode 100644 index 82f090e98e..0000000000 --- a/ci/chains/gaia/v4.0.0/ibc-1/config/gentx/gentx-b6f08e59e6071a9ddfb7f66d08723b1bef442f71.json +++ /dev/null @@ -1 +0,0 @@ -{"body":{"messages":[{"@type":"/cosmos.staking.v1beta1.MsgCreateValidator","description":{"moniker":"ibc-1","identity":"","website":"","security_contact":"","details":""},"commission":{"rate":"0.100000000000000000","max_rate":"0.200000000000000000","max_change_rate":"0.010000000000000000"},"min_self_delegation":"1","delegator_address":"cosmos1l5akq8gvv7c35l3cmgu3xvje6m7df7tf5m8hm3","validator_address":"cosmosvaloper1l5akq8gvv7c35l3cmgu3xvje6m7df7tf30nzhz","pubkey":{"@type":"/cosmos.crypto.ed25519.PubKey","key":"GuS1ygd8w+B00mMo7N/+PqNBH7jLbkJjGNo9eA/oW34="},"value":{"denom":"stake","amount":"100000000000"}}],"memo":"b6f08e59e6071a9ddfb7f66d08723b1bef442f71@192.168.1.213:26656","timeout_height":"0","extension_options":[],"non_critical_extension_options":[]},"auth_info":{"signer_infos":[{"public_key":{"@type":"/cosmos.crypto.secp256k1.PubKey","key":"AgQZuntbPUcfQZHy0NhGerdUa6dmFxcZO8rGkLPa3zrB"},"mode_info":{"single":{"mode":"SIGN_MODE_DIRECT"}},"sequence":"0"}],"fee":{"amount":[],"gas_limit":"200000","payer":"","granter":""}},"signatures":["WXaiZujqnOXrUUEBxkofpTZlxrUV9Nfn90GwhJaQ/4MsJKx95Yhang0BNHy68GDLNiWzGjYS2Kz4c5V/lRcjGA=="]} diff --git a/ci/chains/gaia/v4.0.0/ibc-1/config/node_key.json b/ci/chains/gaia/v4.0.0/ibc-1/config/node_key.json deleted file mode 100644 index 479ecaf48f..0000000000 --- a/ci/chains/gaia/v4.0.0/ibc-1/config/node_key.json +++ /dev/null @@ -1 +0,0 @@ -{"priv_key":{"type":"tendermint/PrivKeyEd25519","value":"AQYU4jfG59xE9H1aL/u+uAgSRlzJM499W9lp6B9O5TDkkvxTFY45gBOVxKRd97cOM922/yJHYEhfRoHs3ooXQQ=="}} \ No newline at end of file diff --git a/ci/chains/gaia/v4.0.0/ibc-1/config/priv_validator_key.json b/ci/chains/gaia/v4.0.0/ibc-1/config/priv_validator_key.json deleted file mode 100644 index 5fb082a9f8..0000000000 --- a/ci/chains/gaia/v4.0.0/ibc-1/config/priv_validator_key.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "address": "83241810ACDABC2880B48E29AF113AC5907A20DA", - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "GuS1ygd8w+B00mMo7N/+PqNBH7jLbkJjGNo9eA/oW34=" - }, - "priv_key": { - "type": "tendermint/PrivKeyEd25519", - "value": "4Vuni8zdWWJQdT7iwXrpxuPFSlwX9s9P8TBBTCXRuxwa5LXKB3zD4HTSYyjs3/4+o0EfuMtuQmMY2j14D+hbfg==" - } -} \ No newline at end of file diff --git a/ci/chains/gaia/v4.0.0/ibc-1/key_seed.json b/ci/chains/gaia/v4.0.0/ibc-1/key_seed.json deleted file mode 100644 index 2224cd7eec..0000000000 --- a/ci/chains/gaia/v4.0.0/ibc-1/key_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"user","type":"local","address":"cosmos1vwxc6zytm7evqagh27keefldqtnpntrkhdqrgy","pubkey":"cosmospub1addwnpepqtwa3rh2rl90q2ctm9yvp565vg7mpkfcpvej5t3kxe3shk3lxsx7zugmyzf","mnemonic":"vintage protect level embrace pencil often casual couple allow snake bronze quality delay earn nothing approve embrace margin jelly private obvious color bomb float"} diff --git a/ci/chains/gaia/v4.0.0/ibc-1/keyring-test/638d8d088bdfb2c0751757ad9ca7ed02e619ac76.address b/ci/chains/gaia/v4.0.0/ibc-1/keyring-test/638d8d088bdfb2c0751757ad9ca7ed02e619ac76.address deleted file mode 100644 index 8ca62ee00a..0000000000 --- a/ci/chains/gaia/v4.0.0/ibc-1/keyring-test/638d8d088bdfb2c0751757ad9ca7ed02e619ac76.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wMi0wMyAwOTo1MTowOC45MDU0NDAzMjMgLTA1MDAgRVNUIG09KzAuMTYxMDgyMzc2IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiM2liRE0teEpreEIyTGNxcyJ9.rsJFZy_PrEDTPLPX2LNKgonmOv3kxFkRuTuDjLgZehSamY7JIZs69g.M1lDKbntKiMxA1RS.2cVYYt5cql1jk7e8SJ62bFOF6ARdV4GlPbi6Z3cc6ua-tveq46qDB7gzLGYWFkEPWl96-CyjbTIi75p5fn0moBiqLX9P_Y97mQPgXGfQjnbjaNPRk1zDREICdCpPvKdqjWD5gQJQBIjJzKurGen3l3tPRj3BrRtY5_Pv0stjSMRegNKErnZ6IE1fsxi_8uiuQDfdhFwjTm9bPLPomYRSSGOhPmahQzsEuv7Pw1ePba0hMA.MlBJiRSqxY7nuhkDai2rRQ \ No newline at end of file diff --git a/ci/chains/gaia/v4.0.0/ibc-1/keyring-test/fd3b601d0c67b11a7e38da39133259d6fcd4f969.address b/ci/chains/gaia/v4.0.0/ibc-1/keyring-test/fd3b601d0c67b11a7e38da39133259d6fcd4f969.address deleted file mode 100644 index 458fed6584..0000000000 --- a/ci/chains/gaia/v4.0.0/ibc-1/keyring-test/fd3b601d0c67b11a7e38da39133259d6fcd4f969.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wMi0wMyAwOTo1MTowNy43MTcxODgyNzcgLTA1MDAgRVNUIG09KzAuMTYyMzI1MTI1IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiX2lHWmtzNFAyZVlKUjlsViJ9.XyxB43Bi5WC5hitA_xVez_ZULcED4AwogGLxzE4CWQ_JEN3M4kQu6g.JEA6TzEZep_qpLUw.zHOpL5lR2hKKHspGA3zPL0H3s4B3Ru-8F0G7DWfwSbGuXSfu4AtjxOys0bbyB-DEBd9x-l4OpFyrG-ccwHNNKp5AAX99kVLbJW-c2hz19e0iaBrq83ycsQ0anvwy1yckaujDxrJ1hlG0ASEvcKQqWViq1u_ujuG0HX2N6qsCMtfSHR0Pi2-JryGlaKZKrXoKLz02Fu_h6jQNpIQ7oHHos0NTzA-KJxsUcqKHt5r1ScbPEKxr-2SZ4MTo.kz2rKiUHOihpTQYjKPvaOw \ No newline at end of file diff --git a/ci/chains/gaia/v4.0.0/ibc-1/keyring-test/user.info b/ci/chains/gaia/v4.0.0/ibc-1/keyring-test/user.info deleted file mode 100644 index fe235e09cd..0000000000 --- a/ci/chains/gaia/v4.0.0/ibc-1/keyring-test/user.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wMi0wMyAwOTo1MTowOC44ODkyMzg0MDMgLTA1MDAgRVNUIG09KzAuMTQ0ODgwNDYzIiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiTGhQYXZUdDRxMlMwVWw1ViJ9.C397HbHnLZQfC5K9cNM3NHWcoLZhiAElUxQQVH2nHOZCOVZAE2aARg.F6B2C7YoAIIVxZSi.0Gqp7ZcXWIDsGQL3K2n3DIZ4VwcRfSXRijIk1v8ZuKbH02H0EBAopKDIszA-IbYNwlllE1_58tuFde143yKfbQ3G04upv5KYBVr5VCpun2jBtO6DcUkzbyGgBQiIgJ54bnfrw7AaQrTzfgtequY0jidm8sOhMKYW68zyBDPBhcAxWcxmODvq6brYq2-xyvtHFJ5pMJKymmdvRyGugL1YkawLjJkhfkWgCz6lys9DZ9kQvvCJdx47xpufniSxm72Nia0bvitSC67zp6Vs1UDovisX7a_P9wFIsCCr3x9iBDrgjV3RhUjoWuVDC2kR731Bs1XbQMJBIF5ovWgwgIunaQb4muNhbmE.gvVp69e7BLcqCc594sV7jQ \ No newline at end of file diff --git a/ci/chains/gaia/v4.0.0/ibc-1/keyring-test/validator.info b/ci/chains/gaia/v4.0.0/ibc-1/keyring-test/validator.info deleted file mode 100644 index a4abaf81cb..0000000000 --- a/ci/chains/gaia/v4.0.0/ibc-1/keyring-test/validator.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wMi0wMyAwOTo1MTowNy43MDEwMjA5OTYgLTA1MDAgRVNUIG09KzAuMTQ2MTU3ODI3IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiaTN6aU03SlFHWGI4RnlITyJ9.aFLyxaSlXM2028jnoET9m66Kv4OG0pNXf-daElLM9AJ3ONm7yMzWwA.gZGLlKKLxfDTcZ0x.HnlMnQ6zuqPtdF3YbbTrptmN9YgRrCh58QFCbDB5oEE4udj0hPZDhIGv7PFYGddSV04oEcgqQuo7h0x-UG119wLtfm_1oXvaDWIJY8vSfMbX_ZItOQahpfpSjE2-eU5ZqFKsVOoxV8o1vw5XqUHsvNRVWM9CoeM-bB3emHLKySyq-Pgzh26jp-HF27Ive8kEYg59rWFq62kWfaaAveBqVHJUccUlo9vtHLwAEd9doUaFj97zS26-8_grRfNgwLZjyHFFWbGlbfGLsn3yQNTFWNtTJ4VW_ZwvEuAxJ69Gy33evTJbYQawXB-PNrq907lIfzbFlPtQNmKq6Ou7M1ofnWmJJNfRXwUyF933mhJp-cxwjvWv.K3KPLo19G-up65pcbtCnHw \ No newline at end of file diff --git a/ci/chains/gaia/v4.0.0/ibc-1/validator_seed.json b/ci/chains/gaia/v4.0.0/ibc-1/validator_seed.json deleted file mode 100644 index ae03aa3240..0000000000 --- a/ci/chains/gaia/v4.0.0/ibc-1/validator_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"validator","type":"local","address":"cosmos1l5akq8gvv7c35l3cmgu3xvje6m7df7tf5m8hm3","pubkey":"cosmospub1addwnpepqgzpnwnmtv75w86pj8edpkzx02m4g6a8vct3wxfmetrfpv76muavz507azh","mnemonic":"column pupil truly plastic trial profit arena wonder alien worry food tonight alien member merit kitchen actress inside blossom clump age resource repair street"} diff --git a/ci/chains/gaia/v4.1.0/ibc-0/config/addrbook.json b/ci/chains/gaia/v4.1.0/ibc-0/config/addrbook.json deleted file mode 100644 index df217df04b..0000000000 --- a/ci/chains/gaia/v4.1.0/ibc-0/config/addrbook.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "key": "a345b77cdfaa7499fb5d6514", - "addrs": [] -} \ No newline at end of file diff --git a/ci/chains/gaia/v4.1.0/ibc-0/config/app.toml b/ci/chains/gaia/v4.1.0/ibc-0/config/app.toml deleted file mode 100644 index 54de73e392..0000000000 --- a/ci/chains/gaia/v4.1.0/ibc-0/config/app.toml +++ /dev/null @@ -1,152 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -############################################################################### -### Base Configuration ### -############################################################################### - -# The minimum gas prices a validator is willing to accept for processing a -# transaction. A transaction's fees must meet the minimum of any denomination -# specified in this config (e.g. 0.25token1;0.0001token2). -minimum-gas-prices = "" - -# default: the last 100 states are kept in addition to every 500th state; pruning at 10 block intervals -# nothing: all historic states will be saved, nothing will be deleted (i.e. archiving node) -# everything: all saved states will be deleted, storing only the current state; pruning at 10 block intervals -# custom: allow pruning options to be manually specified through 'pruning-keep-recent', 'pruning-keep-every', and 'pruning-interval' -pruning = "default" - -# These are applied if and only if the pruning strategy is custom. -pruning-keep-recent = "0" -pruning-keep-every = "0" -pruning-interval = "0" - -# HaltHeight contains a non-zero block height at which a node will gracefully -# halt and shutdown that can be used to assist upgrades and testing. -# -# Note: Commitment of state will be attempted on the corresponding block. -halt-height = 0 - -# HaltTime contains a non-zero minimum block time (in Unix seconds) at which -# a node will gracefully halt and shutdown that can be used to assist upgrades -# and testing. -# -# Note: Commitment of state will be attempted on the corresponding block. -halt-time = 0 - -# MinRetainBlocks defines the minimum block height offset from the current -# block being committed, such that all blocks past this offset are pruned -# from Tendermint. It is used as part of the process of determining the -# ResponseCommit.RetainHeight value during ABCI Commit. A value of 0 indicates -# that no blocks should be pruned. -# -# This configuration value is only responsible for pruning Tendermint blocks. -# It has no bearing on application state pruning which is determined by the -# "pruning-*" configurations. -# -# Note: Tendermint block pruning is dependant on this parameter in conunction -# with the unbonding (safety threshold) period, state pruning and state sync -# snapshot parameters to determine the correct minimum value of -# ResponseCommit.RetainHeight. -min-retain-blocks = 0 - -# InterBlockCache enables inter-block caching. -inter-block-cache = true - -# IndexEvents defines the set of events in the form {eventType}.{attributeKey}, -# which informs Tendermint what to index. If empty, all events will be indexed. -# -# Example: -# ["message.sender", "message.recipient"] -index-events = [] - -############################################################################### -### Telemetry Configuration ### -############################################################################### - -[telemetry] - -# Prefixed with keys to separate services. -service-name = "" - -# Enabled enables the application telemetry functionality. When enabled, -# an in-memory sink is also enabled by default. Operators may also enabled -# other sinks such as Prometheus. -enabled = false - -# Enable prefixing gauge values with hostname. -enable-hostname = false - -# Enable adding hostname to labels. -enable-hostname-label = false - -# Enable adding service to labels. -enable-service-label = false - -# PrometheusRetentionTime, when positive, enables a Prometheus metrics sink. -prometheus-retention-time = 0 - -# GlobalLabels defines a global set of name/value label tuples applied to all -# metrics emitted using the wrapper functions defined in telemetry package. -# -# Example: -# [["chain_id", "cosmoshub-1"]] -global-labels = [ -] - -############################################################################### -### API Configuration ### -############################################################################### - -[api] - -# Enable defines if the API server should be enabled. -enable = false - -# Swagger defines if swagger documentation should automatically be registered. -swagger = false - -# Address defines the API server to listen on. -address = "tcp://0.0.0.0:1317" - -# MaxOpenConnections defines the number of maximum open connections. -max-open-connections = 1000 - -# RPCReadTimeout defines the Tendermint RPC read timeout (in seconds). -rpc-read-timeout = 10 - -# RPCWriteTimeout defines the Tendermint RPC write timeout (in seconds). -rpc-write-timeout = 0 - -# RPCMaxBodyBytes defines the Tendermint maximum response body (in bytes). -rpc-max-body-bytes = 1000000 - -# EnableUnsafeCORS defines if CORS should be enabled (unsafe - use it at your own risk). -enabled-unsafe-cors = false - -############################################################################### -### gRPC Configuration ### -############################################################################### - -[grpc] - -# Enable defines if the gRPC server should be enabled. -enable = true - -# Address defines the gRPC server address to bind to. -address = "0.0.0.0:9090" - -############################################################################### -### State Sync Configuration ### -############################################################################### - -# State sync snapshots allow other nodes to rapidly join the network without replaying historical -# blocks, instead downloading and applying a snapshot of the application state at a given height. -[state-sync] - -# snapshot-interval specifies the block interval at which local state sync snapshots are -# taken (0 to disable). Must be a multiple of pruning-keep-every. -snapshot-interval = 0 - -# snapshot-keep-recent specifies the number of recent snapshots to keep and serve (0 to keep all). -snapshot-keep-recent = 2 diff --git a/ci/chains/gaia/v4.1.0/ibc-0/config/config.toml b/ci/chains/gaia/v4.1.0/ibc-0/config/config.toml deleted file mode 100644 index 4714078da1..0000000000 --- a/ci/chains/gaia/v4.1.0/ibc-0/config/config.toml +++ /dev/null @@ -1,393 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -# NOTE: Any path below can be absolute (e.g. "/var/myawesomeapp/data") or -# relative to the home directory (e.g. "data"). The home directory is -# "$HOME/.tendermint" by default, but could be changed via $TMHOME env variable -# or --home cmd flag. - -####################################################################### -### Main Base Config Options ### -####################################################################### - -# TCP or UNIX socket address of the ABCI application, -# or the name of an ABCI application compiled in with the Tendermint binary -proxy_app = "tcp://127.0.0.1:26658" - -# A custom human readable name for this node -moniker = "ibc-0" - -# If this node is many blocks behind the tip of the chain, FastSync -# allows them to catchup quickly by downloading blocks in parallel -# and verifying their commits -fast_sync = true - -# Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb -# * goleveldb (github.com/syndtr/goleveldb - most popular implementation) -# - pure go -# - stable -# * cleveldb (uses levigo wrapper) -# - fast -# - requires gcc -# - use cleveldb build tag (go build -tags cleveldb) -# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt) -# - EXPERIMENTAL -# - may be faster is some use-cases (random reads - indexer) -# - use boltdb build tag (go build -tags boltdb) -# * rocksdb (uses github.com/tecbot/gorocksdb) -# - EXPERIMENTAL -# - requires gcc -# - use rocksdb build tag (go build -tags rocksdb) -# * badgerdb (uses github.com/dgraph-io/badger) -# - EXPERIMENTAL -# - use badgerdb build tag (go build -tags badgerdb) -db_backend = "goleveldb" - -# Database directory -db_dir = "data" - -# Output level for logging, including package level options -log_level = "info" - -# Output format: 'plain' (colored text) or 'json' -log_format = "plain" - -##### additional base config options ##### - -# Path to the JSON file containing the initial validator set and other meta data -genesis_file = "config/genesis.json" - -# Path to the JSON file containing the private key to use as a validator in the consensus protocol -priv_validator_key_file = "config/priv_validator_key.json" - -# Path to the JSON file containing the last sign state of a validator -priv_validator_state_file = "data/priv_validator_state.json" - -# TCP or UNIX socket address for Tendermint to listen on for -# connections from an external PrivValidator process -priv_validator_laddr = "" - -# Path to the JSON file containing the private key to use for node authentication in the p2p protocol -node_key_file = "config/node_key.json" - -# Mechanism to connect to the ABCI application: socket | grpc -abci = "socket" - -# If true, query the ABCI app on connecting to a new peer -# so the app can decide if we should keep the connection or not -filter_peers = false - - -####################################################################### -### Advanced Configuration Options ### -####################################################################### - -####################################################### -### RPC Server Configuration Options ### -####################################################### -[rpc] - -# TCP or UNIX socket address for the RPC server to listen on -laddr = "tcp://0.0.0.0:26657" - -# A list of origins a cross-domain request can be executed from -# Default value '[]' disables cors support -# Use '["*"]' to allow any origin -cors_allowed_origins = [] - -# A list of methods the client is allowed to use with cross-domain requests -cors_allowed_methods = ["HEAD", "GET", "POST", ] - -# A list of non simple headers the client is allowed to use with cross-domain requests -cors_allowed_headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ] - -# TCP or UNIX socket address for the gRPC server to listen on -# NOTE: This server only supports /broadcast_tx_commit -grpc_laddr = "" - -# Maximum number of simultaneous connections. -# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -grpc_max_open_connections = 900 - -# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool -unsafe = false - -# Maximum number of simultaneous connections (including WebSocket). -# Does not include gRPC connections. See grpc_max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -max_open_connections = 900 - -# Maximum number of unique clientIDs that can /subscribe -# If you're using /broadcast_tx_commit, set to the estimated maximum number -# of broadcast_tx_commit calls per block. -max_subscription_clients = 100 - -# Maximum number of unique queries a given client can /subscribe to -# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to -# the estimated # maximum number of broadcast_tx_commit calls per block. -max_subscriptions_per_client = 5 - -# How long to wait for a tx to be committed during /broadcast_tx_commit. -# WARNING: Using a value larger than 10s will result in increasing the -# global HTTP write timeout, which applies to all connections and endpoints. -# See https://github.com/tendermint/tendermint/issues/3435 -timeout_broadcast_tx_commit = "10s" - -# Maximum size of request body, in bytes -max_body_bytes = 1000000 - -# Maximum size of request header, in bytes -max_header_bytes = 1048576 - -# The path to a file containing certificate that is used to create the HTTPS server. -# Might be either absolute path or path related to Tendermint's config directory. -# If the certificate is signed by a certificate authority, -# the certFile should be the concatenation of the server's certificate, any intermediates, -# and the CA's certificate. -# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. -# Otherwise, HTTP server is run. -tls_cert_file = "" - -# The path to a file containing matching private key that is used to create the HTTPS server. -# Might be either absolute path or path related to Tendermint's config directory. -# NOTE: both tls-cert-file and tls-key-file must be present for Tendermint to create HTTPS server. -# Otherwise, HTTP server is run. -tls_key_file = "" - -# pprof listen address (https://golang.org/pkg/net/http/pprof) -pprof_laddr = "localhost:6060" - -####################################################### -### P2P Configuration Options ### -####################################################### -[p2p] - -# Address to listen for incoming connections -laddr = "tcp://0.0.0.0:26656" - -# Address to advertise to peers for them to dial -# If empty, will use the same port as the laddr, -# and will introspect on the listener or use UPnP -# to figure out the address. -external_address = "" - -# Comma separated list of seed nodes to connect to -seeds = "" - -# Comma separated list of nodes to keep persistent connections to -persistent_peers = "" - -# UPNP port forwarding -upnp = false - -# Path to address book -addr_book_file = "config/addrbook.json" - -# Set true for strict address routability rules -# Set false for private or local networks -addr_book_strict = true - -# Maximum number of inbound peers -max_num_inbound_peers = 40 - -# Maximum number of outbound peers to connect to, excluding persistent peers -max_num_outbound_peers = 10 - -# List of node IDs, to which a connection will be (re)established ignoring any existing limits -unconditional_peer_ids = "" - -# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) -persistent_peers_max_dial_period = "0s" - -# Time to wait before flushing messages out on the connection -flush_throttle_timeout = "100ms" - -# Maximum size of a message packet payload, in bytes -max_packet_msg_payload_size = 1024 - -# Rate at which packets can be sent, in bytes/second -send_rate = 5120000 - -# Rate at which packets can be received, in bytes/second -recv_rate = 5120000 - -# Set true to enable the peer-exchange reactor -pex = true - -# Seed mode, in which node constantly crawls the network and looks for -# peers. If another node asks it for addresses, it responds and disconnects. -# -# Does not work if the peer-exchange reactor is disabled. -seed_mode = false - -# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) -private_peer_ids = "" - -# Toggle to disable guard against peers connecting from the same ip. -allow_duplicate_ip = false - -# Peer connection configuration. -handshake_timeout = "20s" -dial_timeout = "3s" - -####################################################### -### Mempool Configuration Option ### -####################################################### -[mempool] - -recheck = true -broadcast = true -wal_dir = "" - -# Maximum number of transactions in the mempool -size = 5000 - -# Limit the total size of all txs in the mempool. -# This only accounts for raw transactions (e.g. given 1MB transactions and -# max_txs_bytes=5MB, mempool will only accept 5 transactions). -max_txs_bytes = 1073741824 - -# Size of the cache (used to filter transactions we saw earlier) in transactions -cache_size = 10000 - -# Do not remove invalid transactions from the cache (default: false) -# Set to true if it's not possible for any invalid transaction to become valid -# again in the future. -keep-invalid-txs-in-cache = false - -# Maximum size of a single transaction. -# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes}. -max_tx_bytes = 1048576 - -# Maximum size of a batch of transactions to send to a peer -# Including space needed by encoding (one varint per transaction). -# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796 -max_batch_bytes = 0 - -####################################################### -### State Sync Configuration Options ### -####################################################### -[statesync] -# State sync rapidly bootstraps a new node by discovering, fetching, and restoring a state machine -# snapshot from peers instead of fetching and replaying historical blocks. Requires some peers in -# the network to take and serve state machine snapshots. State sync is not attempted if the node -# has any local state (LastBlockHeight > 0). The node will have a truncated block history, -# starting from the height of the snapshot. -enable = false - -# RPC servers (comma-separated) for light client verification of the synced state machine and -# retrieval of state data for node bootstrapping. Also needs a trusted height and corresponding -# header hash obtained from a trusted source, and a period during which validators can be trusted. -# -# For Cosmos SDK-based chains, trust_period should usually be about 2/3 of the unbonding time (~2 -# weeks) during which they can be financially punished (slashed) for misbehavior. -rpc_servers = "" -trust_height = 0 -trust_hash = "" -trust_period = "168h0m0s" - -# Time to spend discovering snapshots before initiating a restore. -discovery_time = "15s" - -# Temporary directory for state sync snapshot chunks, defaults to the OS tempdir (typically /tmp). -# Will create a new, randomly named directory within, and remove it when done. -temp_dir = "" - -####################################################### -### Fast Sync Configuration Connections ### -####################################################### -[fastsync] - -# Fast Sync version to use: -# 1) "v0" (default) - the legacy fast sync implementation -# 2) "v1" - refactor of v0 version for better testability -# 2) "v2" - complete redesign of v0, optimized for testability & readability -version = "v0" - -####################################################### -### Consensus Configuration Options ### -####################################################### -[consensus] - -wal_file = "data/cs.wal/wal" - -# How long we wait for a proposal block before prevoting nil -timeout_propose = "1s" -# How much timeout_propose increases with each round -timeout_propose_delta = "500ms" -# How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil) -timeout_prevote = "1s" -# How much the timeout_prevote increases with each round -timeout_prevote_delta = "500ms" -# How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil) -timeout_precommit = "1s" -# How much the timeout_precommit increases with each round -timeout_precommit_delta = "500ms" -# How long we wait after committing a block, before starting on the new -# height (this gives us a chance to receive some more precommits, even -# though we already have +2/3). -timeout_commit = "1s" - -# How many blocks to look back to check existence of the node's consensus votes before joining consensus -# When non-zero, the node will panic upon restart -# if the same consensus key was used to sign {double_sign_check_height} last blocks. -# So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic. -double_sign_check_height = 0 - -# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) -skip_timeout_commit = false - -# EmptyBlocks mode and possible interval between empty blocks -create_empty_blocks = true -create_empty_blocks_interval = "0s" - -# Reactor sleep duration parameters -peer_gossip_sleep_duration = "100ms" -peer_query_maj23_sleep_duration = "2s" - -####################################################### -### Transaction Indexer Configuration Options ### -####################################################### -[tx_index] - -# What indexer to use for transactions -# -# The application will set which txs to index. In some cases a node operator will be able -# to decide which txs to index based on configuration set in the application. -# -# Options: -# 1) "null" -# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). -# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed. -indexer = "kv" - -####################################################### -### Instrumentation Configuration Options ### -####################################################### -[instrumentation] - -# When true, Prometheus metrics are served under /metrics on -# PrometheusListenAddr. -# Check out the documentation for the list of available metrics. -prometheus = false - -# Address to listen for Prometheus collector(s) connections -prometheus_listen_addr = ":26660" - -# Maximum number of simultaneous connections. -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -max_open_connections = 3 - -# Instrumentation namespace -namespace = "tendermint" diff --git a/ci/chains/gaia/v4.1.0/ibc-0/config/genesis.json b/ci/chains/gaia/v4.1.0/ibc-0/config/genesis.json deleted file mode 100644 index 4c200cdc3e..0000000000 --- a/ci/chains/gaia/v4.1.0/ibc-0/config/genesis.json +++ /dev/null @@ -1,294 +0,0 @@ -{ - "genesis_time": "2021-03-10T23:11:19.074673019Z", - "chain_id": "ibc-0", - "initial_height": "1", - "consensus_params": { - "block": { - "max_bytes": "22020096", - "max_gas": "-1", - "time_iota_ms": "1000" - }, - "evidence": { - "max_age_num_blocks": "100000", - "max_age_duration": "172800000000000", - "max_bytes": "1048576" - }, - "validator": { - "pub_key_types": [ - "ed25519" - ] - }, - "version": {} - }, - "app_hash": "", - "app_state": { - "auth": { - "params": { - "max_memo_characters": "256", - "tx_sig_limit": "7", - "tx_size_cost_per_byte": "10", - "sig_verify_cost_ed25519": "590", - "sig_verify_cost_secp256k1": "1000" - }, - "accounts": [ - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos19gk88alnakp2ccfls3vxshpclqqv5gs7ca3uf4", - "pub_key": null, - "account_number": "0", - "sequence": "0" - }, - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos1wlmpgkdk8suvyr2f8k00rfhrtzyax4m0t2jt3v", - "pub_key": null, - "account_number": "0", - "sequence": "0" - } - ] - }, - "bank": { - "params": { - "send_enabled": [], - "default_send_enabled": true - }, - "balances": [ - { - "address": "cosmos19gk88alnakp2ccfls3vxshpclqqv5gs7ca3uf4", - "coins": [ - { - "denom": "samoleans", - "amount": "100000000000" - }, - { - "denom": "stake", - "amount": "100000000000" - } - ] - }, - { - "address": "cosmos1wlmpgkdk8suvyr2f8k00rfhrtzyax4m0t2jt3v", - "coins": [ - { - "denom": "stake", - "amount": "100000000000" - } - ] - } - ], - "supply": [ - { - "denom": "samoleans", - "amount": "100000000000" - }, - { - "denom": "stake", - "amount": "200000000000" - } - ], - "denom_metadata": [] - }, - "capability": { - "index": "1", - "owners": [] - }, - "crisis": { - "constant_fee": { - "denom": "stake", - "amount": "1000" - } - }, - "distribution": { - "params": { - "community_tax": "0.020000000000000000", - "base_proposer_reward": "0.010000000000000000", - "bonus_proposer_reward": "0.040000000000000000", - "withdraw_addr_enabled": true - }, - "fee_pool": { - "community_pool": [] - }, - "delegator_withdraw_infos": [], - "previous_proposer": "", - "outstanding_rewards": [], - "validator_accumulated_commissions": [], - "validator_historical_rewards": [], - "validator_current_rewards": [], - "delegator_starting_infos": [], - "validator_slash_events": [] - }, - "evidence": { - "evidence": [] - }, - "genutil": { - "gen_txs": [ - { - "body": { - "messages": [ - { - "@type": "/cosmos.staking.v1beta1.MsgCreateValidator", - "description": { - "moniker": "ibc-0", - "identity": "", - "website": "", - "security_contact": "", - "details": "" - }, - "commission": { - "rate": "0.100000000000000000", - "max_rate": "0.200000000000000000", - "max_change_rate": "0.010000000000000000" - }, - "min_self_delegation": "1", - "delegator_address": "cosmos1wlmpgkdk8suvyr2f8k00rfhrtzyax4m0t2jt3v", - "validator_address": "cosmosvaloper1wlmpgkdk8suvyr2f8k00rfhrtzyax4m0w7x7al", - "pubkey": { - "@type": "/cosmos.crypto.ed25519.PubKey", - "key": "iuhBmH0fkvkZDJBMxhbsVyws433RY+RBVfpaK7m/QgY=" - }, - "value": { - "denom": "stake", - "amount": "100000000000" - } - } - ], - "memo": "45cc9bede9a7d77955b8770b47267069f9d910b6@192.168.1.214:26656", - "timeout_height": "0", - "extension_options": [], - "non_critical_extension_options": [] - }, - "auth_info": { - "signer_infos": [ - { - "public_key": { - "@type": "/cosmos.crypto.secp256k1.PubKey", - "key": "A09doPsGZIeiIE4QdZxt28539imOelzvTf5wlQrcFB6V" - }, - "mode_info": { - "single": { - "mode": "SIGN_MODE_DIRECT" - } - }, - "sequence": "0" - } - ], - "fee": { - "amount": [], - "gas_limit": "200000", - "payer": "", - "granter": "" - } - }, - "signatures": [ - "D8Rb+8eIVmz222pe212HLryE/6h5Kg5aerEU77Mw09p4TSkOEKPuAca8WvjrboRcFDpDc9Qh9m37fuGwQmm/Aw==" - ] - } - ] - }, - "gov": { - "starting_proposal_id": "1", - "deposits": [], - "votes": [], - "proposals": [], - "deposit_params": { - "min_deposit": [ - { - "denom": "stake", - "amount": "10000000" - } - ], - "max_deposit_period": "172800s" - }, - "voting_params": { - "voting_period": "172800s" - }, - "tally_params": { - "quorum": "0.334000000000000000", - "threshold": "0.500000000000000000", - "veto_threshold": "0.334000000000000000" - } - }, - "ibc": { - "client_genesis": { - "clients": [], - "clients_consensus": [], - "clients_metadata": [], - "params": { - "allowed_clients": [ - "06-solomachine", - "07-tendermint" - ] - }, - "create_localhost": false, - "next_client_sequence": "0" - }, - "connection_genesis": { - "connections": [], - "client_connection_paths": [], - "next_connection_sequence": "0" - }, - "channel_genesis": { - "channels": [], - "acknowledgements": [], - "commitments": [], - "receipts": [], - "send_sequences": [], - "recv_sequences": [], - "ack_sequences": [], - "next_channel_sequence": "0" - } - }, - "mint": { - "minter": { - "inflation": "0.130000000000000000", - "annual_provisions": "0.000000000000000000" - }, - "params": { - "mint_denom": "stake", - "inflation_rate_change": "0.130000000000000000", - "inflation_max": "0.200000000000000000", - "inflation_min": "0.070000000000000000", - "goal_bonded": "0.670000000000000000", - "blocks_per_year": "6311520" - } - }, - "params": null, - "slashing": { - "params": { - "signed_blocks_window": "100", - "min_signed_per_window": "0.500000000000000000", - "downtime_jail_duration": "600s", - "slash_fraction_double_sign": "0.050000000000000000", - "slash_fraction_downtime": "0.010000000000000000" - }, - "signing_infos": [], - "missed_blocks": [] - }, - "staking": { - "params": { - "unbonding_time": "1814400s", - "max_validators": 100, - "max_entries": 7, - "historical_entries": 10000, - "bond_denom": "stake" - }, - "last_total_power": "0", - "last_validator_powers": [], - "validators": [], - "delegations": [], - "unbonding_delegations": [], - "redelegations": [], - "exported": false - }, - "transfer": { - "port_id": "transfer", - "denom_traces": [], - "params": { - "send_enabled": true, - "receive_enabled": true - } - }, - "upgrade": {}, - "vesting": {} - } -} \ No newline at end of file diff --git a/ci/chains/gaia/v4.1.0/ibc-0/config/gentx/gentx-45cc9bede9a7d77955b8770b47267069f9d910b6.json b/ci/chains/gaia/v4.1.0/ibc-0/config/gentx/gentx-45cc9bede9a7d77955b8770b47267069f9d910b6.json deleted file mode 100644 index 60d77586ea..0000000000 --- a/ci/chains/gaia/v4.1.0/ibc-0/config/gentx/gentx-45cc9bede9a7d77955b8770b47267069f9d910b6.json +++ /dev/null @@ -1 +0,0 @@ -{"body":{"messages":[{"@type":"/cosmos.staking.v1beta1.MsgCreateValidator","description":{"moniker":"ibc-0","identity":"","website":"","security_contact":"","details":""},"commission":{"rate":"0.100000000000000000","max_rate":"0.200000000000000000","max_change_rate":"0.010000000000000000"},"min_self_delegation":"1","delegator_address":"cosmos1wlmpgkdk8suvyr2f8k00rfhrtzyax4m0t2jt3v","validator_address":"cosmosvaloper1wlmpgkdk8suvyr2f8k00rfhrtzyax4m0w7x7al","pubkey":{"@type":"/cosmos.crypto.ed25519.PubKey","key":"iuhBmH0fkvkZDJBMxhbsVyws433RY+RBVfpaK7m/QgY="},"value":{"denom":"stake","amount":"100000000000"}}],"memo":"45cc9bede9a7d77955b8770b47267069f9d910b6@192.168.1.214:26656","timeout_height":"0","extension_options":[],"non_critical_extension_options":[]},"auth_info":{"signer_infos":[{"public_key":{"@type":"/cosmos.crypto.secp256k1.PubKey","key":"A09doPsGZIeiIE4QdZxt28539imOelzvTf5wlQrcFB6V"},"mode_info":{"single":{"mode":"SIGN_MODE_DIRECT"}},"sequence":"0"}],"fee":{"amount":[],"gas_limit":"200000","payer":"","granter":""}},"signatures":["D8Rb+8eIVmz222pe212HLryE/6h5Kg5aerEU77Mw09p4TSkOEKPuAca8WvjrboRcFDpDc9Qh9m37fuGwQmm/Aw=="]} diff --git a/ci/chains/gaia/v4.1.0/ibc-0/config/node_key.json b/ci/chains/gaia/v4.1.0/ibc-0/config/node_key.json deleted file mode 100644 index 802b32756b..0000000000 --- a/ci/chains/gaia/v4.1.0/ibc-0/config/node_key.json +++ /dev/null @@ -1 +0,0 @@ -{"priv_key":{"type":"tendermint/PrivKeyEd25519","value":"cL2y+gGxn5GpzI9gXcG4swoe0MGiNHEJFejbnWjXiSKerCGitavdUVLWBuug74FbAA6G/hpw+7y3Ng/EP1tGnQ=="}} \ No newline at end of file diff --git a/ci/chains/gaia/v4.1.0/ibc-0/config/priv_validator_key.json b/ci/chains/gaia/v4.1.0/ibc-0/config/priv_validator_key.json deleted file mode 100644 index 535087bf4f..0000000000 --- a/ci/chains/gaia/v4.1.0/ibc-0/config/priv_validator_key.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "address": "028FA4039CA625CDCF67FA42CD1252A0ED1B58F3", - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "iuhBmH0fkvkZDJBMxhbsVyws433RY+RBVfpaK7m/QgY=" - }, - "priv_key": { - "type": "tendermint/PrivKeyEd25519", - "value": "IbIVd94XuhcfqBr5KRZgZy3TiolDo0kuerw6PXMfDv6K6EGYfR+S+RkMkEzGFuxXLCzjfdFj5EFV+lorub9CBg==" - } -} \ No newline at end of file diff --git a/ci/chains/gaia/v4.1.0/ibc-0/key_seed.json b/ci/chains/gaia/v4.1.0/ibc-0/key_seed.json deleted file mode 100644 index 74515d1149..0000000000 --- a/ci/chains/gaia/v4.1.0/ibc-0/key_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"user","type":"local","address":"cosmos19gk88alnakp2ccfls3vxshpclqqv5gs7ca3uf4","pubkey":"cosmospub1addwnpepq2f7zqadncegg3m5ke7dpa064p5xmltlg39pnplylztlmsxaa5e9zcwwcc9","mnemonic":"meat domain tube damp notice pioneer useful cherry develop harbor identify yard police distance quote volume number kind lamp word region lumber motor donate"} diff --git a/ci/chains/gaia/v4.1.0/ibc-0/keyring-test/2a2c73f7f3ed82ac613f8458685c38f800ca221e.address b/ci/chains/gaia/v4.1.0/ibc-0/keyring-test/2a2c73f7f3ed82ac613f8458685c38f800ca221e.address deleted file mode 100644 index 7b4bbf9521..0000000000 --- a/ci/chains/gaia/v4.1.0/ibc-0/keyring-test/2a2c73f7f3ed82ac613f8458685c38f800ca221e.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wMy0xMCAxODoxMToyMS40MzQ2NTcxMTUgLTA1MDAgRVNUIG09KzAuMTU5Nzk5Njc3IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiN1VaQkYwTUQtMF9RTG5XcCJ9.7ul__VTSeFv8NGFhFNgg0NeToZbM9_9mDbSeuf1mBDpcM4R51ind-g.f6U0SMd-AX-F3Vpr.fTPtpbW9Tw_fP6SO9_oW9MAUcTh8ce8Uhy5VScoEJmp0_XSFANCMX4GCUgjCmidZHWVZs3I2w059POgVGx2j3QETW8Ha3YEzq_v9NPV7Cp4wupKHWCuMVXMQDJxzepBXXvos8chliSHQJSJXg2kmrkU3gSKKrARmfphvDVR94SkoeFMICQK_3PkbIsAApdclkj6_16LVlCzQWZJ4c1B4-OIjTxDdRdtbQJOQkSI9p4PHOQ.uTjBcYGCjzEPzWkqEyPNdA \ No newline at end of file diff --git a/ci/chains/gaia/v4.1.0/ibc-0/keyring-test/77f61459b63c38c20d493d9ef1a6e35889d3576f.address b/ci/chains/gaia/v4.1.0/ibc-0/keyring-test/77f61459b63c38c20d493d9ef1a6e35889d3576f.address deleted file mode 100644 index 54ada7e01a..0000000000 --- a/ci/chains/gaia/v4.1.0/ibc-0/keyring-test/77f61459b63c38c20d493d9ef1a6e35889d3576f.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wMy0xMCAxODoxMToyMC4yNDY4ODM4NjMgLTA1MDAgRVNUIG09KzAuMTYxMjA4MDQxIiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoicTY1a3dYN3ZRbmFTX2JtRCJ9.ZiY-spyh7NYc9Pd83APYLg3Gf24UCnQvexiM2HOjKxy54fpZceQggA.eC5y8lYbobmLehya.hdZnBJdY8Z6TD45gmlbsCBN3VhE003weVCk8_-cK0AhdjU6vz4mM8uIDE-HZ5RmCN6xjPxjQkUdNqdIyLQxv5AUo6T3dNAKK3K0j3wpRLIvNMzF4E1aGB0xBdW7DjEsFIRyrvHf0UCDPwrALLXUYRhgZgdgDP7dkxRGjBP9vXx2l7Y7vHOfPTerAENtSW-uj30ck4L5M-Ni8Ps9WHwuoFuQ7iR8CD-zXimuKdW7eusTdfL-mf7ZhT6F9.4EHrhYHsSGMEVnChYcGcWA \ No newline at end of file diff --git a/ci/chains/gaia/v4.1.0/ibc-0/keyring-test/user.info b/ci/chains/gaia/v4.1.0/ibc-0/keyring-test/user.info deleted file mode 100644 index 9551a79564..0000000000 --- a/ci/chains/gaia/v4.1.0/ibc-0/keyring-test/user.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wMy0xMCAxODoxMToyMS40MTg1ODA0NiAtMDUwMCBFU1QgbT0rMC4xNDM3MjI5OTUiLCJlbmMiOiJBMjU2R0NNIiwicDJjIjo4MTkyLCJwMnMiOiJoRkNwQmIwbTUwajNielhHIn0.C3PQTfSyFpuK7rbdEL6Z6zN2reOBRnDGHUV-jN_P9imE_1A933e7lA.9XKKOi6b1Nlw_90U.yoQd3cWpPpeYmX0cfpq_y1lPJ7gmLfo321FCYoD9keRhJFe9QVlxIQl7v_l2A5Uah92I55_Is1BxFJhlMEyHWaasdCHQ9OXzfO52Kwrm5v_IPyngJcCKk90yLa6oeoV12XmCBjOyZqIy7Ux10IOGxQoykznl68JLxb1aACyLNQ7ECOh87lUqLlcgu4LzMerlA6gY6oMW7FjxzV_kFLCCHHyF-fP4gy3vs6_lfozVHH0fOOWLQCDc8eWQcs3USltaLRmJDvHTqoZj6h3B8TLtJKqHc48jRdK9jBC3qgo9EZuLvBobef2M0XGgu8kiAv6WfkYsyEldilnfj7xkPn4BacAZXo3V1gg.kk405OOVQZDLVWoHPgaywA \ No newline at end of file diff --git a/ci/chains/gaia/v4.1.0/ibc-0/keyring-test/validator.info b/ci/chains/gaia/v4.1.0/ibc-0/keyring-test/validator.info deleted file mode 100644 index 043b8d9379..0000000000 --- a/ci/chains/gaia/v4.1.0/ibc-0/keyring-test/validator.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wMy0xMCAxODoxMToyMC4yMzAyNzQ2NTQgLTA1MDAgRVNUIG09KzAuMTQ0NTk4ODU4IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiMkNkZ28zekhfb3B3bk9RLSJ9.q6n61AXUzUb6qNDNRbytFs0pdd0psGHO6xxWPJa91KFHhCo6e1lZbA.ZK_aytbygZCmJicK.yCj0FFc9w1m5eVknK9t5RlD38yqRw0VbO2VGMVTLOp86QtriWoTeQ4xblUKv-5EneXhM6uTzrG_vgf8_xTQbfaad44hWm8EXKV0Fg0AZhdoAqMWlN3DbzIUh5JVdmtL3JywB4blJvM3ugRsYM-CDyDOoUeam03ci7gFu-VzGn510_dEff7y4MTTUlvgrNeF7emCKTt-lrneBoknc6CqdvqgfhGpcwhT5XkPpeZgHm22jn-pLOtQifvsvU4T_gCK2SnNNPaorDkeMA7gWjiEqvBB4JPZTsO8Ps7Ua46KPmLqBaFlq_jba1rJvQSLhU4jTOH6WSbTdNm0cCyQ8zzUOv9dQT8nJGvOiWJBwLFZNu-S4_Ktd.MfJdPHsR-lchlIXrr-9bBA \ No newline at end of file diff --git a/ci/chains/gaia/v4.1.0/ibc-0/validator_seed.json b/ci/chains/gaia/v4.1.0/ibc-0/validator_seed.json deleted file mode 100644 index 1116869c59..0000000000 --- a/ci/chains/gaia/v4.1.0/ibc-0/validator_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"validator","type":"local","address":"cosmos1wlmpgkdk8suvyr2f8k00rfhrtzyax4m0t2jt3v","pubkey":"cosmospub1addwnpepqd84mg8mqejg0g3qfcg8t8rdm0880a3f3ea9em6dlecf2zkuzs0f287l3tw","mnemonic":"picnic busy spike vintage various below sight faculty kitten require share swamp member injury trouble mean desk scrap version unfair engine success ring still"} diff --git a/ci/chains/gaia/v4.1.0/ibc-1/config/app.toml b/ci/chains/gaia/v4.1.0/ibc-1/config/app.toml deleted file mode 100644 index 54de73e392..0000000000 --- a/ci/chains/gaia/v4.1.0/ibc-1/config/app.toml +++ /dev/null @@ -1,152 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -############################################################################### -### Base Configuration ### -############################################################################### - -# The minimum gas prices a validator is willing to accept for processing a -# transaction. A transaction's fees must meet the minimum of any denomination -# specified in this config (e.g. 0.25token1;0.0001token2). -minimum-gas-prices = "" - -# default: the last 100 states are kept in addition to every 500th state; pruning at 10 block intervals -# nothing: all historic states will be saved, nothing will be deleted (i.e. archiving node) -# everything: all saved states will be deleted, storing only the current state; pruning at 10 block intervals -# custom: allow pruning options to be manually specified through 'pruning-keep-recent', 'pruning-keep-every', and 'pruning-interval' -pruning = "default" - -# These are applied if and only if the pruning strategy is custom. -pruning-keep-recent = "0" -pruning-keep-every = "0" -pruning-interval = "0" - -# HaltHeight contains a non-zero block height at which a node will gracefully -# halt and shutdown that can be used to assist upgrades and testing. -# -# Note: Commitment of state will be attempted on the corresponding block. -halt-height = 0 - -# HaltTime contains a non-zero minimum block time (in Unix seconds) at which -# a node will gracefully halt and shutdown that can be used to assist upgrades -# and testing. -# -# Note: Commitment of state will be attempted on the corresponding block. -halt-time = 0 - -# MinRetainBlocks defines the minimum block height offset from the current -# block being committed, such that all blocks past this offset are pruned -# from Tendermint. It is used as part of the process of determining the -# ResponseCommit.RetainHeight value during ABCI Commit. A value of 0 indicates -# that no blocks should be pruned. -# -# This configuration value is only responsible for pruning Tendermint blocks. -# It has no bearing on application state pruning which is determined by the -# "pruning-*" configurations. -# -# Note: Tendermint block pruning is dependant on this parameter in conunction -# with the unbonding (safety threshold) period, state pruning and state sync -# snapshot parameters to determine the correct minimum value of -# ResponseCommit.RetainHeight. -min-retain-blocks = 0 - -# InterBlockCache enables inter-block caching. -inter-block-cache = true - -# IndexEvents defines the set of events in the form {eventType}.{attributeKey}, -# which informs Tendermint what to index. If empty, all events will be indexed. -# -# Example: -# ["message.sender", "message.recipient"] -index-events = [] - -############################################################################### -### Telemetry Configuration ### -############################################################################### - -[telemetry] - -# Prefixed with keys to separate services. -service-name = "" - -# Enabled enables the application telemetry functionality. When enabled, -# an in-memory sink is also enabled by default. Operators may also enabled -# other sinks such as Prometheus. -enabled = false - -# Enable prefixing gauge values with hostname. -enable-hostname = false - -# Enable adding hostname to labels. -enable-hostname-label = false - -# Enable adding service to labels. -enable-service-label = false - -# PrometheusRetentionTime, when positive, enables a Prometheus metrics sink. -prometheus-retention-time = 0 - -# GlobalLabels defines a global set of name/value label tuples applied to all -# metrics emitted using the wrapper functions defined in telemetry package. -# -# Example: -# [["chain_id", "cosmoshub-1"]] -global-labels = [ -] - -############################################################################### -### API Configuration ### -############################################################################### - -[api] - -# Enable defines if the API server should be enabled. -enable = false - -# Swagger defines if swagger documentation should automatically be registered. -swagger = false - -# Address defines the API server to listen on. -address = "tcp://0.0.0.0:1317" - -# MaxOpenConnections defines the number of maximum open connections. -max-open-connections = 1000 - -# RPCReadTimeout defines the Tendermint RPC read timeout (in seconds). -rpc-read-timeout = 10 - -# RPCWriteTimeout defines the Tendermint RPC write timeout (in seconds). -rpc-write-timeout = 0 - -# RPCMaxBodyBytes defines the Tendermint maximum response body (in bytes). -rpc-max-body-bytes = 1000000 - -# EnableUnsafeCORS defines if CORS should be enabled (unsafe - use it at your own risk). -enabled-unsafe-cors = false - -############################################################################### -### gRPC Configuration ### -############################################################################### - -[grpc] - -# Enable defines if the gRPC server should be enabled. -enable = true - -# Address defines the gRPC server address to bind to. -address = "0.0.0.0:9090" - -############################################################################### -### State Sync Configuration ### -############################################################################### - -# State sync snapshots allow other nodes to rapidly join the network without replaying historical -# blocks, instead downloading and applying a snapshot of the application state at a given height. -[state-sync] - -# snapshot-interval specifies the block interval at which local state sync snapshots are -# taken (0 to disable). Must be a multiple of pruning-keep-every. -snapshot-interval = 0 - -# snapshot-keep-recent specifies the number of recent snapshots to keep and serve (0 to keep all). -snapshot-keep-recent = 2 diff --git a/ci/chains/gaia/v4.1.0/ibc-1/config/config.toml b/ci/chains/gaia/v4.1.0/ibc-1/config/config.toml deleted file mode 100644 index c32f66cf63..0000000000 --- a/ci/chains/gaia/v4.1.0/ibc-1/config/config.toml +++ /dev/null @@ -1,393 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -# NOTE: Any path below can be absolute (e.g. "/var/myawesomeapp/data") or -# relative to the home directory (e.g. "data"). The home directory is -# "$HOME/.tendermint" by default, but could be changed via $TMHOME env variable -# or --home cmd flag. - -####################################################################### -### Main Base Config Options ### -####################################################################### - -# TCP or UNIX socket address of the ABCI application, -# or the name of an ABCI application compiled in with the Tendermint binary -proxy_app = "tcp://127.0.0.1:26658" - -# A custom human readable name for this node -moniker = "ibc-1" - -# If this node is many blocks behind the tip of the chain, FastSync -# allows them to catchup quickly by downloading blocks in parallel -# and verifying their commits -fast_sync = true - -# Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb -# * goleveldb (github.com/syndtr/goleveldb - most popular implementation) -# - pure go -# - stable -# * cleveldb (uses levigo wrapper) -# - fast -# - requires gcc -# - use cleveldb build tag (go build -tags cleveldb) -# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt) -# - EXPERIMENTAL -# - may be faster is some use-cases (random reads - indexer) -# - use boltdb build tag (go build -tags boltdb) -# * rocksdb (uses github.com/tecbot/gorocksdb) -# - EXPERIMENTAL -# - requires gcc -# - use rocksdb build tag (go build -tags rocksdb) -# * badgerdb (uses github.com/dgraph-io/badger) -# - EXPERIMENTAL -# - use badgerdb build tag (go build -tags badgerdb) -db_backend = "goleveldb" - -# Database directory -db_dir = "data" - -# Output level for logging, including package level options -log_level = "info" - -# Output format: 'plain' (colored text) or 'json' -log_format = "plain" - -##### additional base config options ##### - -# Path to the JSON file containing the initial validator set and other meta data -genesis_file = "config/genesis.json" - -# Path to the JSON file containing the private key to use as a validator in the consensus protocol -priv_validator_key_file = "config/priv_validator_key.json" - -# Path to the JSON file containing the last sign state of a validator -priv_validator_state_file = "data/priv_validator_state.json" - -# TCP or UNIX socket address for Tendermint to listen on for -# connections from an external PrivValidator process -priv_validator_laddr = "" - -# Path to the JSON file containing the private key to use for node authentication in the p2p protocol -node_key_file = "config/node_key.json" - -# Mechanism to connect to the ABCI application: socket | grpc -abci = "socket" - -# If true, query the ABCI app on connecting to a new peer -# so the app can decide if we should keep the connection or not -filter_peers = false - - -####################################################################### -### Advanced Configuration Options ### -####################################################################### - -####################################################### -### RPC Server Configuration Options ### -####################################################### -[rpc] - -# TCP or UNIX socket address for the RPC server to listen on -laddr = "tcp://0.0.0.0:26657" - -# A list of origins a cross-domain request can be executed from -# Default value '[]' disables cors support -# Use '["*"]' to allow any origin -cors_allowed_origins = [] - -# A list of methods the client is allowed to use with cross-domain requests -cors_allowed_methods = ["HEAD", "GET", "POST", ] - -# A list of non simple headers the client is allowed to use with cross-domain requests -cors_allowed_headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ] - -# TCP or UNIX socket address for the gRPC server to listen on -# NOTE: This server only supports /broadcast_tx_commit -grpc_laddr = "" - -# Maximum number of simultaneous connections. -# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -grpc_max_open_connections = 900 - -# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool -unsafe = false - -# Maximum number of simultaneous connections (including WebSocket). -# Does not include gRPC connections. See grpc_max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -max_open_connections = 900 - -# Maximum number of unique clientIDs that can /subscribe -# If you're using /broadcast_tx_commit, set to the estimated maximum number -# of broadcast_tx_commit calls per block. -max_subscription_clients = 100 - -# Maximum number of unique queries a given client can /subscribe to -# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to -# the estimated # maximum number of broadcast_tx_commit calls per block. -max_subscriptions_per_client = 5 - -# How long to wait for a tx to be committed during /broadcast_tx_commit. -# WARNING: Using a value larger than 10s will result in increasing the -# global HTTP write timeout, which applies to all connections and endpoints. -# See https://github.com/tendermint/tendermint/issues/3435 -timeout_broadcast_tx_commit = "10s" - -# Maximum size of request body, in bytes -max_body_bytes = 1000000 - -# Maximum size of request header, in bytes -max_header_bytes = 1048576 - -# The path to a file containing certificate that is used to create the HTTPS server. -# Might be either absolute path or path related to Tendermint's config directory. -# If the certificate is signed by a certificate authority, -# the certFile should be the concatenation of the server's certificate, any intermediates, -# and the CA's certificate. -# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. -# Otherwise, HTTP server is run. -tls_cert_file = "" - -# The path to a file containing matching private key that is used to create the HTTPS server. -# Might be either absolute path or path related to Tendermint's config directory. -# NOTE: both tls-cert-file and tls-key-file must be present for Tendermint to create HTTPS server. -# Otherwise, HTTP server is run. -tls_key_file = "" - -# pprof listen address (https://golang.org/pkg/net/http/pprof) -pprof_laddr = "localhost:6060" - -####################################################### -### P2P Configuration Options ### -####################################################### -[p2p] - -# Address to listen for incoming connections -laddr = "tcp://0.0.0.0:26656" - -# Address to advertise to peers for them to dial -# If empty, will use the same port as the laddr, -# and will introspect on the listener or use UPnP -# to figure out the address. -external_address = "" - -# Comma separated list of seed nodes to connect to -seeds = "" - -# Comma separated list of nodes to keep persistent connections to -persistent_peers = "" - -# UPNP port forwarding -upnp = false - -# Path to address book -addr_book_file = "config/addrbook.json" - -# Set true for strict address routability rules -# Set false for private or local networks -addr_book_strict = true - -# Maximum number of inbound peers -max_num_inbound_peers = 40 - -# Maximum number of outbound peers to connect to, excluding persistent peers -max_num_outbound_peers = 10 - -# List of node IDs, to which a connection will be (re)established ignoring any existing limits -unconditional_peer_ids = "" - -# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) -persistent_peers_max_dial_period = "0s" - -# Time to wait before flushing messages out on the connection -flush_throttle_timeout = "100ms" - -# Maximum size of a message packet payload, in bytes -max_packet_msg_payload_size = 1024 - -# Rate at which packets can be sent, in bytes/second -send_rate = 5120000 - -# Rate at which packets can be received, in bytes/second -recv_rate = 5120000 - -# Set true to enable the peer-exchange reactor -pex = true - -# Seed mode, in which node constantly crawls the network and looks for -# peers. If another node asks it for addresses, it responds and disconnects. -# -# Does not work if the peer-exchange reactor is disabled. -seed_mode = false - -# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) -private_peer_ids = "" - -# Toggle to disable guard against peers connecting from the same ip. -allow_duplicate_ip = false - -# Peer connection configuration. -handshake_timeout = "20s" -dial_timeout = "3s" - -####################################################### -### Mempool Configuration Option ### -####################################################### -[mempool] - -recheck = true -broadcast = true -wal_dir = "" - -# Maximum number of transactions in the mempool -size = 5000 - -# Limit the total size of all txs in the mempool. -# This only accounts for raw transactions (e.g. given 1MB transactions and -# max_txs_bytes=5MB, mempool will only accept 5 transactions). -max_txs_bytes = 1073741824 - -# Size of the cache (used to filter transactions we saw earlier) in transactions -cache_size = 10000 - -# Do not remove invalid transactions from the cache (default: false) -# Set to true if it's not possible for any invalid transaction to become valid -# again in the future. -keep-invalid-txs-in-cache = false - -# Maximum size of a single transaction. -# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes}. -max_tx_bytes = 1048576 - -# Maximum size of a batch of transactions to send to a peer -# Including space needed by encoding (one varint per transaction). -# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796 -max_batch_bytes = 0 - -####################################################### -### State Sync Configuration Options ### -####################################################### -[statesync] -# State sync rapidly bootstraps a new node by discovering, fetching, and restoring a state machine -# snapshot from peers instead of fetching and replaying historical blocks. Requires some peers in -# the network to take and serve state machine snapshots. State sync is not attempted if the node -# has any local state (LastBlockHeight > 0). The node will have a truncated block history, -# starting from the height of the snapshot. -enable = false - -# RPC servers (comma-separated) for light client verification of the synced state machine and -# retrieval of state data for node bootstrapping. Also needs a trusted height and corresponding -# header hash obtained from a trusted source, and a period during which validators can be trusted. -# -# For Cosmos SDK-based chains, trust_period should usually be about 2/3 of the unbonding time (~2 -# weeks) during which they can be financially punished (slashed) for misbehavior. -rpc_servers = "" -trust_height = 0 -trust_hash = "" -trust_period = "168h0m0s" - -# Time to spend discovering snapshots before initiating a restore. -discovery_time = "15s" - -# Temporary directory for state sync snapshot chunks, defaults to the OS tempdir (typically /tmp). -# Will create a new, randomly named directory within, and remove it when done. -temp_dir = "" - -####################################################### -### Fast Sync Configuration Connections ### -####################################################### -[fastsync] - -# Fast Sync version to use: -# 1) "v0" (default) - the legacy fast sync implementation -# 2) "v1" - refactor of v0 version for better testability -# 2) "v2" - complete redesign of v0, optimized for testability & readability -version = "v0" - -####################################################### -### Consensus Configuration Options ### -####################################################### -[consensus] - -wal_file = "data/cs.wal/wal" - -# How long we wait for a proposal block before prevoting nil -timeout_propose = "1s" -# How much timeout_propose increases with each round -timeout_propose_delta = "500ms" -# How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil) -timeout_prevote = "1s" -# How much the timeout_prevote increases with each round -timeout_prevote_delta = "500ms" -# How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil) -timeout_precommit = "1s" -# How much the timeout_precommit increases with each round -timeout_precommit_delta = "500ms" -# How long we wait after committing a block, before starting on the new -# height (this gives us a chance to receive some more precommits, even -# though we already have +2/3). -timeout_commit = "1s" - -# How many blocks to look back to check existence of the node's consensus votes before joining consensus -# When non-zero, the node will panic upon restart -# if the same consensus key was used to sign {double_sign_check_height} last blocks. -# So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic. -double_sign_check_height = 0 - -# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) -skip_timeout_commit = false - -# EmptyBlocks mode and possible interval between empty blocks -create_empty_blocks = true -create_empty_blocks_interval = "0s" - -# Reactor sleep duration parameters -peer_gossip_sleep_duration = "100ms" -peer_query_maj23_sleep_duration = "2s" - -####################################################### -### Transaction Indexer Configuration Options ### -####################################################### -[tx_index] - -# What indexer to use for transactions -# -# The application will set which txs to index. In some cases a node operator will be able -# to decide which txs to index based on configuration set in the application. -# -# Options: -# 1) "null" -# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). -# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed. -indexer = "kv" - -####################################################### -### Instrumentation Configuration Options ### -####################################################### -[instrumentation] - -# When true, Prometheus metrics are served under /metrics on -# PrometheusListenAddr. -# Check out the documentation for the list of available metrics. -prometheus = false - -# Address to listen for Prometheus collector(s) connections -prometheus_listen_addr = ":26660" - -# Maximum number of simultaneous connections. -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -max_open_connections = 3 - -# Instrumentation namespace -namespace = "tendermint" diff --git a/ci/chains/gaia/v4.1.0/ibc-1/config/genesis.json b/ci/chains/gaia/v4.1.0/ibc-1/config/genesis.json deleted file mode 100644 index 56b21ad6d2..0000000000 --- a/ci/chains/gaia/v4.1.0/ibc-1/config/genesis.json +++ /dev/null @@ -1,294 +0,0 @@ -{ - "genesis_time": "2021-03-10T23:11:30.807005731Z", - "chain_id": "ibc-1", - "initial_height": "1", - "consensus_params": { - "block": { - "max_bytes": "22020096", - "max_gas": "-1", - "time_iota_ms": "1000" - }, - "evidence": { - "max_age_num_blocks": "100000", - "max_age_duration": "172800000000000", - "max_bytes": "1048576" - }, - "validator": { - "pub_key_types": [ - "ed25519" - ] - }, - "version": {} - }, - "app_hash": "", - "app_state": { - "auth": { - "params": { - "max_memo_characters": "256", - "tx_sig_limit": "7", - "tx_size_cost_per_byte": "10", - "sig_verify_cost_ed25519": "590", - "sig_verify_cost_secp256k1": "1000" - }, - "accounts": [ - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos1448z7w3j5gsht5fa099wzdqtz9hv7ryur05rm8", - "pub_key": null, - "account_number": "0", - "sequence": "0" - }, - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos1l7yp6p7gmhvcjwngh6vvxcw2s8l60hkeznfarz", - "pub_key": null, - "account_number": "0", - "sequence": "0" - } - ] - }, - "bank": { - "params": { - "send_enabled": [], - "default_send_enabled": true - }, - "balances": [ - { - "address": "cosmos1448z7w3j5gsht5fa099wzdqtz9hv7ryur05rm8", - "coins": [ - { - "denom": "samoleans", - "amount": "100000000000" - }, - { - "denom": "stake", - "amount": "100000000000" - } - ] - }, - { - "address": "cosmos1l7yp6p7gmhvcjwngh6vvxcw2s8l60hkeznfarz", - "coins": [ - { - "denom": "stake", - "amount": "100000000000" - } - ] - } - ], - "supply": [ - { - "denom": "samoleans", - "amount": "100000000000" - }, - { - "denom": "stake", - "amount": "200000000000" - } - ], - "denom_metadata": [] - }, - "capability": { - "index": "1", - "owners": [] - }, - "crisis": { - "constant_fee": { - "denom": "stake", - "amount": "1000" - } - }, - "distribution": { - "params": { - "community_tax": "0.020000000000000000", - "base_proposer_reward": "0.010000000000000000", - "bonus_proposer_reward": "0.040000000000000000", - "withdraw_addr_enabled": true - }, - "fee_pool": { - "community_pool": [] - }, - "delegator_withdraw_infos": [], - "previous_proposer": "", - "outstanding_rewards": [], - "validator_accumulated_commissions": [], - "validator_historical_rewards": [], - "validator_current_rewards": [], - "delegator_starting_infos": [], - "validator_slash_events": [] - }, - "evidence": { - "evidence": [] - }, - "genutil": { - "gen_txs": [ - { - "body": { - "messages": [ - { - "@type": "/cosmos.staking.v1beta1.MsgCreateValidator", - "description": { - "moniker": "ibc-1", - "identity": "", - "website": "", - "security_contact": "", - "details": "" - }, - "commission": { - "rate": "0.100000000000000000", - "max_rate": "0.200000000000000000", - "max_change_rate": "0.010000000000000000" - }, - "min_self_delegation": "1", - "delegator_address": "cosmos1l7yp6p7gmhvcjwngh6vvxcw2s8l60hkeznfarz", - "validator_address": "cosmosvaloper1l7yp6p7gmhvcjwngh6vvxcw2s8l60hke88ag03", - "pubkey": { - "@type": "/cosmos.crypto.ed25519.PubKey", - "key": "ECbqm7nyI8nJgB24gc9fdBb7v3a3OGSx6pfmMpy2XNA=" - }, - "value": { - "denom": "stake", - "amount": "100000000000" - } - } - ], - "memo": "b56974b6f467580c92b7a0fe752797eca18915d0@192.168.1.214:26656", - "timeout_height": "0", - "extension_options": [], - "non_critical_extension_options": [] - }, - "auth_info": { - "signer_infos": [ - { - "public_key": { - "@type": "/cosmos.crypto.secp256k1.PubKey", - "key": "A7t/fqE7mjd+KK1dCev7LtDxy3cQ2v6NCzRrVXUGxBR2" - }, - "mode_info": { - "single": { - "mode": "SIGN_MODE_DIRECT" - } - }, - "sequence": "0" - } - ], - "fee": { - "amount": [], - "gas_limit": "200000", - "payer": "", - "granter": "" - } - }, - "signatures": [ - "hsNHETTZp2u1Q9lhtR98bnupI+jYJx+zcqlJcNv7CIkIqtOOIWFSUy7WKGhvYPzSslHt691KG+VHph63JMjw/A==" - ] - } - ] - }, - "gov": { - "starting_proposal_id": "1", - "deposits": [], - "votes": [], - "proposals": [], - "deposit_params": { - "min_deposit": [ - { - "denom": "stake", - "amount": "10000000" - } - ], - "max_deposit_period": "172800s" - }, - "voting_params": { - "voting_period": "172800s" - }, - "tally_params": { - "quorum": "0.334000000000000000", - "threshold": "0.500000000000000000", - "veto_threshold": "0.334000000000000000" - } - }, - "ibc": { - "client_genesis": { - "clients": [], - "clients_consensus": [], - "clients_metadata": [], - "params": { - "allowed_clients": [ - "06-solomachine", - "07-tendermint" - ] - }, - "create_localhost": false, - "next_client_sequence": "0" - }, - "connection_genesis": { - "connections": [], - "client_connection_paths": [], - "next_connection_sequence": "0" - }, - "channel_genesis": { - "channels": [], - "acknowledgements": [], - "commitments": [], - "receipts": [], - "send_sequences": [], - "recv_sequences": [], - "ack_sequences": [], - "next_channel_sequence": "0" - } - }, - "mint": { - "minter": { - "inflation": "0.130000000000000000", - "annual_provisions": "0.000000000000000000" - }, - "params": { - "mint_denom": "stake", - "inflation_rate_change": "0.130000000000000000", - "inflation_max": "0.200000000000000000", - "inflation_min": "0.070000000000000000", - "goal_bonded": "0.670000000000000000", - "blocks_per_year": "6311520" - } - }, - "params": null, - "slashing": { - "params": { - "signed_blocks_window": "100", - "min_signed_per_window": "0.500000000000000000", - "downtime_jail_duration": "600s", - "slash_fraction_double_sign": "0.050000000000000000", - "slash_fraction_downtime": "0.010000000000000000" - }, - "signing_infos": [], - "missed_blocks": [] - }, - "staking": { - "params": { - "unbonding_time": "1814400s", - "max_validators": 100, - "max_entries": 7, - "historical_entries": 10000, - "bond_denom": "stake" - }, - "last_total_power": "0", - "last_validator_powers": [], - "validators": [], - "delegations": [], - "unbonding_delegations": [], - "redelegations": [], - "exported": false - }, - "transfer": { - "port_id": "transfer", - "denom_traces": [], - "params": { - "send_enabled": true, - "receive_enabled": true - } - }, - "upgrade": {}, - "vesting": {} - } -} \ No newline at end of file diff --git a/ci/chains/gaia/v4.1.0/ibc-1/config/gentx/gentx-b56974b6f467580c92b7a0fe752797eca18915d0.json b/ci/chains/gaia/v4.1.0/ibc-1/config/gentx/gentx-b56974b6f467580c92b7a0fe752797eca18915d0.json deleted file mode 100644 index afb3aaad30..0000000000 --- a/ci/chains/gaia/v4.1.0/ibc-1/config/gentx/gentx-b56974b6f467580c92b7a0fe752797eca18915d0.json +++ /dev/null @@ -1 +0,0 @@ -{"body":{"messages":[{"@type":"/cosmos.staking.v1beta1.MsgCreateValidator","description":{"moniker":"ibc-1","identity":"","website":"","security_contact":"","details":""},"commission":{"rate":"0.100000000000000000","max_rate":"0.200000000000000000","max_change_rate":"0.010000000000000000"},"min_self_delegation":"1","delegator_address":"cosmos1l7yp6p7gmhvcjwngh6vvxcw2s8l60hkeznfarz","validator_address":"cosmosvaloper1l7yp6p7gmhvcjwngh6vvxcw2s8l60hke88ag03","pubkey":{"@type":"/cosmos.crypto.ed25519.PubKey","key":"ECbqm7nyI8nJgB24gc9fdBb7v3a3OGSx6pfmMpy2XNA="},"value":{"denom":"stake","amount":"100000000000"}}],"memo":"b56974b6f467580c92b7a0fe752797eca18915d0@192.168.1.214:26656","timeout_height":"0","extension_options":[],"non_critical_extension_options":[]},"auth_info":{"signer_infos":[{"public_key":{"@type":"/cosmos.crypto.secp256k1.PubKey","key":"A7t/fqE7mjd+KK1dCev7LtDxy3cQ2v6NCzRrVXUGxBR2"},"mode_info":{"single":{"mode":"SIGN_MODE_DIRECT"}},"sequence":"0"}],"fee":{"amount":[],"gas_limit":"200000","payer":"","granter":""}},"signatures":["hsNHETTZp2u1Q9lhtR98bnupI+jYJx+zcqlJcNv7CIkIqtOOIWFSUy7WKGhvYPzSslHt691KG+VHph63JMjw/A=="]} diff --git a/ci/chains/gaia/v4.1.0/ibc-1/config/node_key.json b/ci/chains/gaia/v4.1.0/ibc-1/config/node_key.json deleted file mode 100644 index 095f1d5de5..0000000000 --- a/ci/chains/gaia/v4.1.0/ibc-1/config/node_key.json +++ /dev/null @@ -1 +0,0 @@ -{"priv_key":{"type":"tendermint/PrivKeyEd25519","value":"WJ0+t+9V9WQZtXI0lGu9lMjTNmWn/KNv8oVvbxu3BnwwPzd52rKilEaJUh6B2tj4+dKQbcmiWAB9tZKwlR9vIw=="}} \ No newline at end of file diff --git a/ci/chains/gaia/v4.1.0/ibc-1/config/priv_validator_key.json b/ci/chains/gaia/v4.1.0/ibc-1/config/priv_validator_key.json deleted file mode 100644 index 826fedc842..0000000000 --- a/ci/chains/gaia/v4.1.0/ibc-1/config/priv_validator_key.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "address": "DB69AC7415E648FEE0FBBA6325B465C9BF722043", - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "ECbqm7nyI8nJgB24gc9fdBb7v3a3OGSx6pfmMpy2XNA=" - }, - "priv_key": { - "type": "tendermint/PrivKeyEd25519", - "value": "YpdaGAiOAWB6OUvkV+1A24bPbP9UsEKqWIKnDvpdWr4QJuqbufIjycmAHbiBz190Fvu/drc4ZLHql+YynLZc0A==" - } -} \ No newline at end of file diff --git a/ci/chains/gaia/v4.1.0/ibc-1/key_seed.json b/ci/chains/gaia/v4.1.0/ibc-1/key_seed.json deleted file mode 100644 index 61ad598158..0000000000 --- a/ci/chains/gaia/v4.1.0/ibc-1/key_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"user","type":"local","address":"cosmos1448z7w3j5gsht5fa099wzdqtz9hv7ryur05rm8","pubkey":"cosmospub1addwnpepqw98q342d8vad42pu8yv9u5nyekrrjzxcft4cu02qtwgvljck6mu75tz9j2","mnemonic":"trip double kite slam town swim train nice vivid torch soon artwork capital sweet struggle raise moral vault unique tape cloth elephant tenant lazy"} diff --git a/ci/chains/gaia/v4.1.0/ibc-1/keyring-test/ad4e2f3a32a22175d13d794ae1340b116ecf0c9c.address b/ci/chains/gaia/v4.1.0/ibc-1/keyring-test/ad4e2f3a32a22175d13d794ae1340b116ecf0c9c.address deleted file mode 100644 index 2d31e6a69d..0000000000 --- a/ci/chains/gaia/v4.1.0/ibc-1/keyring-test/ad4e2f3a32a22175d13d794ae1340b116ecf0c9c.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wMy0xMCAxODoxMTozMy4xNzA0ODQ5NzYgLTA1MDAgRVNUIG09KzAuMTYyMTY5MTY1IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiV3lSNHhGcS1LX1FVS3VEViJ9.uJXyQ92NvMUNARblxv7LkNkCJr13S0RSQEde8TJgBkyPC-tyxjTSqA.2pM_o8dJOrMO65VY.IyhKLjlCLDCNivHsToX4hDd8MN_Gj_tyqk1Eoww_pqjY0l09sgFZZPbeJkSAXYkzqjn731Ex2EnoWz1xaI3MTahSFBeQzCp8pEzph1gnzoW7xj3cxsPloHOnnMubtUFi30WX_F0M0GrnsMeFIu8AGgg2WiRbFRLKKvglRwcOvImqvL_BGvjtOeyTpEAgNcpTFV-HG4dQ7KFpktD1xjrZGaNZnUkiKXyxopXjFb91iK6_gA.b5X94oQnSbvvABVmetKeUg \ No newline at end of file diff --git a/ci/chains/gaia/v4.1.0/ibc-1/keyring-test/ff881d07c8ddd9893a68be98c361ca81ffa7ded9.address b/ci/chains/gaia/v4.1.0/ibc-1/keyring-test/ff881d07c8ddd9893a68be98c361ca81ffa7ded9.address deleted file mode 100644 index d0cfc02049..0000000000 --- a/ci/chains/gaia/v4.1.0/ibc-1/keyring-test/ff881d07c8ddd9893a68be98c361ca81ffa7ded9.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wMy0xMCAxODoxMTozMS45ODA3ODk4MTEgLTA1MDAgRVNUIG09KzAuMTYyMTcxNzQ1IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiM1FmLUxhZHhPSy1DQUhCUiJ9.UKlMcMJ92PPqca6Qp1PCqdIpRnmUQ0qS3azHrUWvQusjk0x2-meSdQ.RvQGnB2W_wd0vM2y.pP6wnn21-BCh4Nnvr3cfY7uAQ8fq8WTx82r_TuAi4c1v7Ht_bbqlUo00z3IASHw8pbaabjUG2G2EzwdQXRocdofEhlObmDSs1zt6vvq3_dNQcQicCwchzxXQg4YOtTDjGKXfStHquF7Mp3r_AJWC1P180TB304onvNqAvDQ4Cgcm2NvTcKaCsBrsyTG5FlUrx5iWGFcdcLCNmsJya0AZRLJPv9_AydM8mJjNyledAMOwx2J-xpNpv1l8.H3Yb2CX4QXxg62Dsz9bdGw \ No newline at end of file diff --git a/ci/chains/gaia/v4.1.0/ibc-1/keyring-test/user.info b/ci/chains/gaia/v4.1.0/ibc-1/keyring-test/user.info deleted file mode 100644 index a6cc1b40fd..0000000000 --- a/ci/chains/gaia/v4.1.0/ibc-1/keyring-test/user.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wMy0xMCAxODoxMTozMy4xNTQyNzYyNjggLTA1MDAgRVNUIG09KzAuMTQ1OTYwNDU0IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiY1JHRS1zSURoTXpQSTl5MCJ9.wnB46OdjIw8sjPlHz210M8KdMT3fV8IEDYs80XPT3hYuWVyujpVQxQ.F4UkwlZvSudsxCox._PTGfc7oXgB_t3oLzLZPZIPVhADRE2bPkF_vr2sgdx5Z_ZH_sIh5QN8-ai7d0nP_o9JIAmt9omdUSbbrCxRuBFcX9jjOoSGOVOetPWL3z3DvwpsJd2URXa5axUYWd_xpXL-qKUvDWSyAdLGNuzKvROezsfCeMHxUnVD4k1bxC8BnFjxasFxcbYYVeAEoMPAZvQTkl8tou0k8I48lZla3NoqADKxteVb9nlOR9ETMcEDvNmyJ9lJmtlFWWc2Z29Zn1vhw9AyLGMjf0sGDdRx6KrZZSVvHqPSEb_hqATAQRydhi0btRo5GRifJIKdIJc5o6HA2VmH010cUSZlHWUpecam9D9SjpRM.deJYBz0D8wmMX3KNsibaEg \ No newline at end of file diff --git a/ci/chains/gaia/v4.1.0/ibc-1/keyring-test/validator.info b/ci/chains/gaia/v4.1.0/ibc-1/keyring-test/validator.info deleted file mode 100644 index cc3daec056..0000000000 --- a/ci/chains/gaia/v4.1.0/ibc-1/keyring-test/validator.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wMy0xMCAxODoxMTozMS45NjQ1NzExNzQgLTA1MDAgRVNUIG09KzAuMTQ1OTUzMDg3IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiTnlnUVNnNGQ3dzRhUnN5eCJ9.kGy2rUdFlT6NNF60b2sBUIwQhM3AFwy4tQEcKHdeDBqvHcNNm5JgBA.7WHjfrN3sJuiY0Vx.NQGQRCEc9xy4m_XCjMLVyzLmgxU02kATCzE6i7l7DV554MmELFcNVvh-4PEhT95Gr2t3zZOOlePj3TQnQsQiGJWdaeJLhx8ki9KNHakH1-gX_GNkJH7SwMbs1CZ-sf6w9Q5AJHUU3OSLe5WyzLM1PEGB4ioje1W9eLkg7mxgVkGd9XtbXzZ4HBqigwFI6GyBgCXZREvFp4JHjJzqTwZ0DAEpzAz6rkjwk-0C_1snQdknLVD_cHyVgphrJx2d_2gDXXfu5py6SOOai2yakFeDfqn7qetS1OFl6JJGJlwjO7LeKUlftmG3NsirQz7p1_tXCJ2UbOk4UxzEB0apv4IEN7gooSjmiNGgFFBkguiQxBtgo7rt.W07CCcXAsSr25-1d2zM3iQ \ No newline at end of file diff --git a/ci/chains/gaia/v4.1.0/ibc-1/validator_seed.json b/ci/chains/gaia/v4.1.0/ibc-1/validator_seed.json deleted file mode 100644 index ab63235057..0000000000 --- a/ci/chains/gaia/v4.1.0/ibc-1/validator_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"validator","type":"local","address":"cosmos1l7yp6p7gmhvcjwngh6vvxcw2s8l60hkeznfarz","pubkey":"cosmospub1addwnpepqwah7l4p8wdrwl3g44wsn6lm9mg0rjmhzrd0argtx3442agxcs28vay605h","mnemonic":"climb sight despair thank anxiety brown busy south fit buffalo follow bronze episode length few spirit auction salad donate elegant problem logic tiger ocean"} diff --git a/ci/chains/gaia/v4.2.0/ibc-0/config/addrbook.json b/ci/chains/gaia/v4.2.0/ibc-0/config/addrbook.json deleted file mode 100644 index f7a8c71e83..0000000000 --- a/ci/chains/gaia/v4.2.0/ibc-0/config/addrbook.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "key": "af219e789fd5acf4bfdc46d0", - "addrs": [] -} \ No newline at end of file diff --git a/ci/chains/gaia/v4.2.0/ibc-0/config/app.toml b/ci/chains/gaia/v4.2.0/ibc-0/config/app.toml deleted file mode 100644 index 54de73e392..0000000000 --- a/ci/chains/gaia/v4.2.0/ibc-0/config/app.toml +++ /dev/null @@ -1,152 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -############################################################################### -### Base Configuration ### -############################################################################### - -# The minimum gas prices a validator is willing to accept for processing a -# transaction. A transaction's fees must meet the minimum of any denomination -# specified in this config (e.g. 0.25token1;0.0001token2). -minimum-gas-prices = "" - -# default: the last 100 states are kept in addition to every 500th state; pruning at 10 block intervals -# nothing: all historic states will be saved, nothing will be deleted (i.e. archiving node) -# everything: all saved states will be deleted, storing only the current state; pruning at 10 block intervals -# custom: allow pruning options to be manually specified through 'pruning-keep-recent', 'pruning-keep-every', and 'pruning-interval' -pruning = "default" - -# These are applied if and only if the pruning strategy is custom. -pruning-keep-recent = "0" -pruning-keep-every = "0" -pruning-interval = "0" - -# HaltHeight contains a non-zero block height at which a node will gracefully -# halt and shutdown that can be used to assist upgrades and testing. -# -# Note: Commitment of state will be attempted on the corresponding block. -halt-height = 0 - -# HaltTime contains a non-zero minimum block time (in Unix seconds) at which -# a node will gracefully halt and shutdown that can be used to assist upgrades -# and testing. -# -# Note: Commitment of state will be attempted on the corresponding block. -halt-time = 0 - -# MinRetainBlocks defines the minimum block height offset from the current -# block being committed, such that all blocks past this offset are pruned -# from Tendermint. It is used as part of the process of determining the -# ResponseCommit.RetainHeight value during ABCI Commit. A value of 0 indicates -# that no blocks should be pruned. -# -# This configuration value is only responsible for pruning Tendermint blocks. -# It has no bearing on application state pruning which is determined by the -# "pruning-*" configurations. -# -# Note: Tendermint block pruning is dependant on this parameter in conunction -# with the unbonding (safety threshold) period, state pruning and state sync -# snapshot parameters to determine the correct minimum value of -# ResponseCommit.RetainHeight. -min-retain-blocks = 0 - -# InterBlockCache enables inter-block caching. -inter-block-cache = true - -# IndexEvents defines the set of events in the form {eventType}.{attributeKey}, -# which informs Tendermint what to index. If empty, all events will be indexed. -# -# Example: -# ["message.sender", "message.recipient"] -index-events = [] - -############################################################################### -### Telemetry Configuration ### -############################################################################### - -[telemetry] - -# Prefixed with keys to separate services. -service-name = "" - -# Enabled enables the application telemetry functionality. When enabled, -# an in-memory sink is also enabled by default. Operators may also enabled -# other sinks such as Prometheus. -enabled = false - -# Enable prefixing gauge values with hostname. -enable-hostname = false - -# Enable adding hostname to labels. -enable-hostname-label = false - -# Enable adding service to labels. -enable-service-label = false - -# PrometheusRetentionTime, when positive, enables a Prometheus metrics sink. -prometheus-retention-time = 0 - -# GlobalLabels defines a global set of name/value label tuples applied to all -# metrics emitted using the wrapper functions defined in telemetry package. -# -# Example: -# [["chain_id", "cosmoshub-1"]] -global-labels = [ -] - -############################################################################### -### API Configuration ### -############################################################################### - -[api] - -# Enable defines if the API server should be enabled. -enable = false - -# Swagger defines if swagger documentation should automatically be registered. -swagger = false - -# Address defines the API server to listen on. -address = "tcp://0.0.0.0:1317" - -# MaxOpenConnections defines the number of maximum open connections. -max-open-connections = 1000 - -# RPCReadTimeout defines the Tendermint RPC read timeout (in seconds). -rpc-read-timeout = 10 - -# RPCWriteTimeout defines the Tendermint RPC write timeout (in seconds). -rpc-write-timeout = 0 - -# RPCMaxBodyBytes defines the Tendermint maximum response body (in bytes). -rpc-max-body-bytes = 1000000 - -# EnableUnsafeCORS defines if CORS should be enabled (unsafe - use it at your own risk). -enabled-unsafe-cors = false - -############################################################################### -### gRPC Configuration ### -############################################################################### - -[grpc] - -# Enable defines if the gRPC server should be enabled. -enable = true - -# Address defines the gRPC server address to bind to. -address = "0.0.0.0:9090" - -############################################################################### -### State Sync Configuration ### -############################################################################### - -# State sync snapshots allow other nodes to rapidly join the network without replaying historical -# blocks, instead downloading and applying a snapshot of the application state at a given height. -[state-sync] - -# snapshot-interval specifies the block interval at which local state sync snapshots are -# taken (0 to disable). Must be a multiple of pruning-keep-every. -snapshot-interval = 0 - -# snapshot-keep-recent specifies the number of recent snapshots to keep and serve (0 to keep all). -snapshot-keep-recent = 2 diff --git a/ci/chains/gaia/v4.2.0/ibc-0/config/config.toml b/ci/chains/gaia/v4.2.0/ibc-0/config/config.toml deleted file mode 100644 index 4714078da1..0000000000 --- a/ci/chains/gaia/v4.2.0/ibc-0/config/config.toml +++ /dev/null @@ -1,393 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -# NOTE: Any path below can be absolute (e.g. "/var/myawesomeapp/data") or -# relative to the home directory (e.g. "data"). The home directory is -# "$HOME/.tendermint" by default, but could be changed via $TMHOME env variable -# or --home cmd flag. - -####################################################################### -### Main Base Config Options ### -####################################################################### - -# TCP or UNIX socket address of the ABCI application, -# or the name of an ABCI application compiled in with the Tendermint binary -proxy_app = "tcp://127.0.0.1:26658" - -# A custom human readable name for this node -moniker = "ibc-0" - -# If this node is many blocks behind the tip of the chain, FastSync -# allows them to catchup quickly by downloading blocks in parallel -# and verifying their commits -fast_sync = true - -# Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb -# * goleveldb (github.com/syndtr/goleveldb - most popular implementation) -# - pure go -# - stable -# * cleveldb (uses levigo wrapper) -# - fast -# - requires gcc -# - use cleveldb build tag (go build -tags cleveldb) -# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt) -# - EXPERIMENTAL -# - may be faster is some use-cases (random reads - indexer) -# - use boltdb build tag (go build -tags boltdb) -# * rocksdb (uses github.com/tecbot/gorocksdb) -# - EXPERIMENTAL -# - requires gcc -# - use rocksdb build tag (go build -tags rocksdb) -# * badgerdb (uses github.com/dgraph-io/badger) -# - EXPERIMENTAL -# - use badgerdb build tag (go build -tags badgerdb) -db_backend = "goleveldb" - -# Database directory -db_dir = "data" - -# Output level for logging, including package level options -log_level = "info" - -# Output format: 'plain' (colored text) or 'json' -log_format = "plain" - -##### additional base config options ##### - -# Path to the JSON file containing the initial validator set and other meta data -genesis_file = "config/genesis.json" - -# Path to the JSON file containing the private key to use as a validator in the consensus protocol -priv_validator_key_file = "config/priv_validator_key.json" - -# Path to the JSON file containing the last sign state of a validator -priv_validator_state_file = "data/priv_validator_state.json" - -# TCP or UNIX socket address for Tendermint to listen on for -# connections from an external PrivValidator process -priv_validator_laddr = "" - -# Path to the JSON file containing the private key to use for node authentication in the p2p protocol -node_key_file = "config/node_key.json" - -# Mechanism to connect to the ABCI application: socket | grpc -abci = "socket" - -# If true, query the ABCI app on connecting to a new peer -# so the app can decide if we should keep the connection or not -filter_peers = false - - -####################################################################### -### Advanced Configuration Options ### -####################################################################### - -####################################################### -### RPC Server Configuration Options ### -####################################################### -[rpc] - -# TCP or UNIX socket address for the RPC server to listen on -laddr = "tcp://0.0.0.0:26657" - -# A list of origins a cross-domain request can be executed from -# Default value '[]' disables cors support -# Use '["*"]' to allow any origin -cors_allowed_origins = [] - -# A list of methods the client is allowed to use with cross-domain requests -cors_allowed_methods = ["HEAD", "GET", "POST", ] - -# A list of non simple headers the client is allowed to use with cross-domain requests -cors_allowed_headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ] - -# TCP or UNIX socket address for the gRPC server to listen on -# NOTE: This server only supports /broadcast_tx_commit -grpc_laddr = "" - -# Maximum number of simultaneous connections. -# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -grpc_max_open_connections = 900 - -# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool -unsafe = false - -# Maximum number of simultaneous connections (including WebSocket). -# Does not include gRPC connections. See grpc_max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -max_open_connections = 900 - -# Maximum number of unique clientIDs that can /subscribe -# If you're using /broadcast_tx_commit, set to the estimated maximum number -# of broadcast_tx_commit calls per block. -max_subscription_clients = 100 - -# Maximum number of unique queries a given client can /subscribe to -# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to -# the estimated # maximum number of broadcast_tx_commit calls per block. -max_subscriptions_per_client = 5 - -# How long to wait for a tx to be committed during /broadcast_tx_commit. -# WARNING: Using a value larger than 10s will result in increasing the -# global HTTP write timeout, which applies to all connections and endpoints. -# See https://github.com/tendermint/tendermint/issues/3435 -timeout_broadcast_tx_commit = "10s" - -# Maximum size of request body, in bytes -max_body_bytes = 1000000 - -# Maximum size of request header, in bytes -max_header_bytes = 1048576 - -# The path to a file containing certificate that is used to create the HTTPS server. -# Might be either absolute path or path related to Tendermint's config directory. -# If the certificate is signed by a certificate authority, -# the certFile should be the concatenation of the server's certificate, any intermediates, -# and the CA's certificate. -# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. -# Otherwise, HTTP server is run. -tls_cert_file = "" - -# The path to a file containing matching private key that is used to create the HTTPS server. -# Might be either absolute path or path related to Tendermint's config directory. -# NOTE: both tls-cert-file and tls-key-file must be present for Tendermint to create HTTPS server. -# Otherwise, HTTP server is run. -tls_key_file = "" - -# pprof listen address (https://golang.org/pkg/net/http/pprof) -pprof_laddr = "localhost:6060" - -####################################################### -### P2P Configuration Options ### -####################################################### -[p2p] - -# Address to listen for incoming connections -laddr = "tcp://0.0.0.0:26656" - -# Address to advertise to peers for them to dial -# If empty, will use the same port as the laddr, -# and will introspect on the listener or use UPnP -# to figure out the address. -external_address = "" - -# Comma separated list of seed nodes to connect to -seeds = "" - -# Comma separated list of nodes to keep persistent connections to -persistent_peers = "" - -# UPNP port forwarding -upnp = false - -# Path to address book -addr_book_file = "config/addrbook.json" - -# Set true for strict address routability rules -# Set false for private or local networks -addr_book_strict = true - -# Maximum number of inbound peers -max_num_inbound_peers = 40 - -# Maximum number of outbound peers to connect to, excluding persistent peers -max_num_outbound_peers = 10 - -# List of node IDs, to which a connection will be (re)established ignoring any existing limits -unconditional_peer_ids = "" - -# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) -persistent_peers_max_dial_period = "0s" - -# Time to wait before flushing messages out on the connection -flush_throttle_timeout = "100ms" - -# Maximum size of a message packet payload, in bytes -max_packet_msg_payload_size = 1024 - -# Rate at which packets can be sent, in bytes/second -send_rate = 5120000 - -# Rate at which packets can be received, in bytes/second -recv_rate = 5120000 - -# Set true to enable the peer-exchange reactor -pex = true - -# Seed mode, in which node constantly crawls the network and looks for -# peers. If another node asks it for addresses, it responds and disconnects. -# -# Does not work if the peer-exchange reactor is disabled. -seed_mode = false - -# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) -private_peer_ids = "" - -# Toggle to disable guard against peers connecting from the same ip. -allow_duplicate_ip = false - -# Peer connection configuration. -handshake_timeout = "20s" -dial_timeout = "3s" - -####################################################### -### Mempool Configuration Option ### -####################################################### -[mempool] - -recheck = true -broadcast = true -wal_dir = "" - -# Maximum number of transactions in the mempool -size = 5000 - -# Limit the total size of all txs in the mempool. -# This only accounts for raw transactions (e.g. given 1MB transactions and -# max_txs_bytes=5MB, mempool will only accept 5 transactions). -max_txs_bytes = 1073741824 - -# Size of the cache (used to filter transactions we saw earlier) in transactions -cache_size = 10000 - -# Do not remove invalid transactions from the cache (default: false) -# Set to true if it's not possible for any invalid transaction to become valid -# again in the future. -keep-invalid-txs-in-cache = false - -# Maximum size of a single transaction. -# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes}. -max_tx_bytes = 1048576 - -# Maximum size of a batch of transactions to send to a peer -# Including space needed by encoding (one varint per transaction). -# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796 -max_batch_bytes = 0 - -####################################################### -### State Sync Configuration Options ### -####################################################### -[statesync] -# State sync rapidly bootstraps a new node by discovering, fetching, and restoring a state machine -# snapshot from peers instead of fetching and replaying historical blocks. Requires some peers in -# the network to take and serve state machine snapshots. State sync is not attempted if the node -# has any local state (LastBlockHeight > 0). The node will have a truncated block history, -# starting from the height of the snapshot. -enable = false - -# RPC servers (comma-separated) for light client verification of the synced state machine and -# retrieval of state data for node bootstrapping. Also needs a trusted height and corresponding -# header hash obtained from a trusted source, and a period during which validators can be trusted. -# -# For Cosmos SDK-based chains, trust_period should usually be about 2/3 of the unbonding time (~2 -# weeks) during which they can be financially punished (slashed) for misbehavior. -rpc_servers = "" -trust_height = 0 -trust_hash = "" -trust_period = "168h0m0s" - -# Time to spend discovering snapshots before initiating a restore. -discovery_time = "15s" - -# Temporary directory for state sync snapshot chunks, defaults to the OS tempdir (typically /tmp). -# Will create a new, randomly named directory within, and remove it when done. -temp_dir = "" - -####################################################### -### Fast Sync Configuration Connections ### -####################################################### -[fastsync] - -# Fast Sync version to use: -# 1) "v0" (default) - the legacy fast sync implementation -# 2) "v1" - refactor of v0 version for better testability -# 2) "v2" - complete redesign of v0, optimized for testability & readability -version = "v0" - -####################################################### -### Consensus Configuration Options ### -####################################################### -[consensus] - -wal_file = "data/cs.wal/wal" - -# How long we wait for a proposal block before prevoting nil -timeout_propose = "1s" -# How much timeout_propose increases with each round -timeout_propose_delta = "500ms" -# How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil) -timeout_prevote = "1s" -# How much the timeout_prevote increases with each round -timeout_prevote_delta = "500ms" -# How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil) -timeout_precommit = "1s" -# How much the timeout_precommit increases with each round -timeout_precommit_delta = "500ms" -# How long we wait after committing a block, before starting on the new -# height (this gives us a chance to receive some more precommits, even -# though we already have +2/3). -timeout_commit = "1s" - -# How many blocks to look back to check existence of the node's consensus votes before joining consensus -# When non-zero, the node will panic upon restart -# if the same consensus key was used to sign {double_sign_check_height} last blocks. -# So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic. -double_sign_check_height = 0 - -# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) -skip_timeout_commit = false - -# EmptyBlocks mode and possible interval between empty blocks -create_empty_blocks = true -create_empty_blocks_interval = "0s" - -# Reactor sleep duration parameters -peer_gossip_sleep_duration = "100ms" -peer_query_maj23_sleep_duration = "2s" - -####################################################### -### Transaction Indexer Configuration Options ### -####################################################### -[tx_index] - -# What indexer to use for transactions -# -# The application will set which txs to index. In some cases a node operator will be able -# to decide which txs to index based on configuration set in the application. -# -# Options: -# 1) "null" -# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). -# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed. -indexer = "kv" - -####################################################### -### Instrumentation Configuration Options ### -####################################################### -[instrumentation] - -# When true, Prometheus metrics are served under /metrics on -# PrometheusListenAddr. -# Check out the documentation for the list of available metrics. -prometheus = false - -# Address to listen for Prometheus collector(s) connections -prometheus_listen_addr = ":26660" - -# Maximum number of simultaneous connections. -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -max_open_connections = 3 - -# Instrumentation namespace -namespace = "tendermint" diff --git a/ci/chains/gaia/v4.2.0/ibc-0/config/genesis.json b/ci/chains/gaia/v4.2.0/ibc-0/config/genesis.json deleted file mode 100644 index b855cf5be0..0000000000 --- a/ci/chains/gaia/v4.2.0/ibc-0/config/genesis.json +++ /dev/null @@ -1,314 +0,0 @@ -{ - "genesis_time": "2021-06-14T13:12:59.924746991Z", - "chain_id": "ibc-0", - "initial_height": "1", - "consensus_params": { - "block": { - "max_bytes": "22020096", - "max_gas": "-1", - "time_iota_ms": "1000" - }, - "evidence": { - "max_age_num_blocks": "100000", - "max_age_duration": "172800000000000", - "max_bytes": "1048576" - }, - "validator": { - "pub_key_types": [ - "ed25519" - ] - }, - "version": {} - }, - "app_hash": "", - "app_state": { - "auth": { - "params": { - "max_memo_characters": "256", - "tx_sig_limit": "7", - "tx_size_cost_per_byte": "10", - "sig_verify_cost_ed25519": "590", - "sig_verify_cost_secp256k1": "1000" - }, - "accounts": [ - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos13sg5tlgu37spr854gekelgnnyzlucrq740y9us", - "pub_key": null, - "account_number": "0", - "sequence": "0" - }, - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos174vqlk0w3xsqkzg8anuct387hlqkwtfahqu9j8", - "pub_key": null, - "account_number": "0", - "sequence": "0" - }, - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos1pggja8l6rcpknqwpgyjysk7a2sm9fv6rhguha2", - "pub_key": null, - "account_number": "0", - "sequence": "0" - } - ] - }, - "bank": { - "params": { - "send_enabled": [], - "default_send_enabled": true - }, - "balances": [ - { - "address": "cosmos1pggja8l6rcpknqwpgyjysk7a2sm9fv6rhguha2", - "coins": [ - { - "denom": "stake", - "amount": "100000000000" - } - ] - }, - { - "address": "cosmos13sg5tlgu37spr854gekelgnnyzlucrq740y9us", - "coins": [ - { - "denom": "samoleans", - "amount": "100000000000" - }, - { - "denom": "stake", - "amount": "100000000000" - } - ] - }, - { - "address": "cosmos174vqlk0w3xsqkzg8anuct387hlqkwtfahqu9j8", - "coins": [ - { - "denom": "samoleans", - "amount": "100000000000" - }, - { - "denom": "stake", - "amount": "100000000000" - } - ] - } - ], - "supply": [ - { - "denom": "samoleans", - "amount": "200000000000" - }, - { - "denom": "stake", - "amount": "300000000000" - } - ], - "denom_metadata": [] - }, - "capability": { - "index": "1", - "owners": [] - }, - "crisis": { - "constant_fee": { - "denom": "stake", - "amount": "1000" - } - }, - "distribution": { - "params": { - "community_tax": "0.020000000000000000", - "base_proposer_reward": "0.010000000000000000", - "bonus_proposer_reward": "0.040000000000000000", - "withdraw_addr_enabled": true - }, - "fee_pool": { - "community_pool": [] - }, - "delegator_withdraw_infos": [], - "previous_proposer": "", - "outstanding_rewards": [], - "validator_accumulated_commissions": [], - "validator_historical_rewards": [], - "validator_current_rewards": [], - "delegator_starting_infos": [], - "validator_slash_events": [] - }, - "evidence": { - "evidence": [] - }, - "genutil": { - "gen_txs": [ - { - "body": { - "messages": [ - { - "@type": "/cosmos.staking.v1beta1.MsgCreateValidator", - "description": { - "moniker": "ibc-0", - "identity": "", - "website": "", - "security_contact": "", - "details": "" - }, - "commission": { - "rate": "0.100000000000000000", - "max_rate": "0.200000000000000000", - "max_change_rate": "0.010000000000000000" - }, - "min_self_delegation": "1", - "delegator_address": "cosmos1pggja8l6rcpknqwpgyjysk7a2sm9fv6rhguha2", - "validator_address": "cosmosvaloper1pggja8l6rcpknqwpgyjysk7a2sm9fv6rjugz3e", - "pubkey": { - "@type": "/cosmos.crypto.ed25519.PubKey", - "key": "IWE690GXTZ+i+l2cqDS4AO7U5YZEViGea7WNjcxH61I=" - }, - "value": { - "denom": "stake", - "amount": "100000000000" - } - } - ], - "memo": "240c6c414bfb15dab6e170c7b1781de818736374@192.168.50.214:26656", - "timeout_height": "0", - "extension_options": [], - "non_critical_extension_options": [] - }, - "auth_info": { - "signer_infos": [ - { - "public_key": { - "@type": "/cosmos.crypto.secp256k1.PubKey", - "key": "AojvMkfmkHDkXDlQcQvl/55kHyREFMo6ML90610yb/yu" - }, - "mode_info": { - "single": { - "mode": "SIGN_MODE_DIRECT" - } - }, - "sequence": "0" - } - ], - "fee": { - "amount": [], - "gas_limit": "200000", - "payer": "", - "granter": "" - } - }, - "signatures": [ - "7Q0wEGWmQH9AwFRhcCJMNgnFrsho37/4EKhpafKWWOMvAGub2LfAt8iLuHck1mStJG4h3mO9eHUx5Kw2xpyFsA==" - ] - } - ] - }, - "gov": { - "starting_proposal_id": "1", - "deposits": [], - "votes": [], - "proposals": [], - "deposit_params": { - "min_deposit": [ - { - "denom": "stake", - "amount": "10000000" - } - ], - "max_deposit_period": "200s" - }, - "voting_params": { - "voting_period": "200s" - }, - "tally_params": { - "quorum": "0.334000000000000000", - "threshold": "0.500000000000000000", - "veto_threshold": "0.334000000000000000" - } - }, - "ibc": { - "client_genesis": { - "clients": [], - "clients_consensus": [], - "clients_metadata": [], - "params": { - "allowed_clients": [ - "06-solomachine", - "07-tendermint" - ] - }, - "create_localhost": false, - "next_client_sequence": "0" - }, - "connection_genesis": { - "connections": [], - "client_connection_paths": [], - "next_connection_sequence": "0" - }, - "channel_genesis": { - "channels": [], - "acknowledgements": [], - "commitments": [], - "receipts": [], - "send_sequences": [], - "recv_sequences": [], - "ack_sequences": [], - "next_channel_sequence": "0" - } - }, - "mint": { - "minter": { - "inflation": "0.130000000000000000", - "annual_provisions": "0.000000000000000000" - }, - "params": { - "mint_denom": "stake", - "inflation_rate_change": "0.130000000000000000", - "inflation_max": "0.200000000000000000", - "inflation_min": "0.070000000000000000", - "goal_bonded": "0.670000000000000000", - "blocks_per_year": "6311520" - } - }, - "params": null, - "slashing": { - "params": { - "signed_blocks_window": "100", - "min_signed_per_window": "0.500000000000000000", - "downtime_jail_duration": "600s", - "slash_fraction_double_sign": "0.050000000000000000", - "slash_fraction_downtime": "0.010000000000000000" - }, - "signing_infos": [], - "missed_blocks": [] - }, - "staking": { - "params": { - "unbonding_time": "1814400s", - "max_validators": 100, - "max_entries": 7, - "historical_entries": 10000, - "bond_denom": "stake" - }, - "last_total_power": "0", - "last_validator_powers": [], - "validators": [], - "delegations": [], - "unbonding_delegations": [], - "redelegations": [], - "exported": false - }, - "transfer": { - "port_id": "transfer", - "denom_traces": [], - "params": { - "send_enabled": true, - "receive_enabled": true - } - }, - "upgrade": {}, - "vesting": {} - } -} \ No newline at end of file diff --git a/ci/chains/gaia/v4.2.0/ibc-0/config/gentx/gentx-240c6c414bfb15dab6e170c7b1781de818736374.json b/ci/chains/gaia/v4.2.0/ibc-0/config/gentx/gentx-240c6c414bfb15dab6e170c7b1781de818736374.json deleted file mode 100644 index 21c082511e..0000000000 --- a/ci/chains/gaia/v4.2.0/ibc-0/config/gentx/gentx-240c6c414bfb15dab6e170c7b1781de818736374.json +++ /dev/null @@ -1 +0,0 @@ -{"body":{"messages":[{"@type":"/cosmos.staking.v1beta1.MsgCreateValidator","description":{"moniker":"ibc-0","identity":"","website":"","security_contact":"","details":""},"commission":{"rate":"0.100000000000000000","max_rate":"0.200000000000000000","max_change_rate":"0.010000000000000000"},"min_self_delegation":"1","delegator_address":"cosmos1pggja8l6rcpknqwpgyjysk7a2sm9fv6rhguha2","validator_address":"cosmosvaloper1pggja8l6rcpknqwpgyjysk7a2sm9fv6rjugz3e","pubkey":{"@type":"/cosmos.crypto.ed25519.PubKey","key":"IWE690GXTZ+i+l2cqDS4AO7U5YZEViGea7WNjcxH61I="},"value":{"denom":"stake","amount":"100000000000"}}],"memo":"240c6c414bfb15dab6e170c7b1781de818736374@192.168.50.214:26656","timeout_height":"0","extension_options":[],"non_critical_extension_options":[]},"auth_info":{"signer_infos":[{"public_key":{"@type":"/cosmos.crypto.secp256k1.PubKey","key":"AojvMkfmkHDkXDlQcQvl/55kHyREFMo6ML90610yb/yu"},"mode_info":{"single":{"mode":"SIGN_MODE_DIRECT"}},"sequence":"0"}],"fee":{"amount":[],"gas_limit":"200000","payer":"","granter":""}},"signatures":["7Q0wEGWmQH9AwFRhcCJMNgnFrsho37/4EKhpafKWWOMvAGub2LfAt8iLuHck1mStJG4h3mO9eHUx5Kw2xpyFsA=="]} diff --git a/ci/chains/gaia/v4.2.0/ibc-0/config/node_key.json b/ci/chains/gaia/v4.2.0/ibc-0/config/node_key.json deleted file mode 100644 index b60e32e105..0000000000 --- a/ci/chains/gaia/v4.2.0/ibc-0/config/node_key.json +++ /dev/null @@ -1 +0,0 @@ -{"priv_key":{"type":"tendermint/PrivKeyEd25519","value":"I7b2zBJrrLurcrW3JZ2TkCsOpHsfe44Ql5T3ZCBTU5C28pjlytBjgkuIocu5QpqrSpUuK51NzI8NG+Bq++4PRQ=="}} \ No newline at end of file diff --git a/ci/chains/gaia/v4.2.0/ibc-0/config/priv_validator_key.json b/ci/chains/gaia/v4.2.0/ibc-0/config/priv_validator_key.json deleted file mode 100644 index 6cb3161da0..0000000000 --- a/ci/chains/gaia/v4.2.0/ibc-0/config/priv_validator_key.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "address": "8D52C26DC85F80E344534DF3053969135B1DA272", - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "IWE690GXTZ+i+l2cqDS4AO7U5YZEViGea7WNjcxH61I=" - }, - "priv_key": { - "type": "tendermint/PrivKeyEd25519", - "value": "07mTOftTScXOU8vtsUMSxwKXeKWOmjfXUj5YEse1EeohYTr3QZdNn6L6XZyoNLgA7tTlhkRWIZ5rtY2NzEfrUg==" - } -} \ No newline at end of file diff --git a/ci/chains/gaia/v4.2.0/ibc-0/keyring-test/0a112e9ffa1e036981c14124485bdd543654b343.address b/ci/chains/gaia/v4.2.0/ibc-0/keyring-test/0a112e9ffa1e036981c14124485bdd543654b343.address deleted file mode 100644 index 807da328b1..0000000000 --- a/ci/chains/gaia/v4.2.0/ibc-0/keyring-test/0a112e9ffa1e036981c14124485bdd543654b343.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wNi0xNCAwOToxMzowMC45OTg2MjcyOSAtMDQwMCBFRFQgbT0rMC4wNjQxMzE5MzgiLCJlbmMiOiJBMjU2R0NNIiwicDJjIjo4MTkyLCJwMnMiOiJPSVNaOTlHUUNkcVl6LVN0In0.CZMPhgSbRxrf1aA2NdW3IWxCc99W46ckA_ANwO9aUMmYT8bDupGlfA.rXtMktmJBPPqyv0l.GDbFv3ps--6uIZdI_jEG0Ig27i-GGe12arGeDBQ0_eoMw2rY-Mb8oaMamFJtIsc2eNGrCfWLx3QetD0sp423GGgKWzlG_hoNawgL_QRRy_rwwZEJz3cbfvHRPHQv7VY0BwMCElfInRmYypIu91eME_Ui5QEHwBDVGgdF0vyunaozo2yMOHtUDEjUmh-TzjpDSUG3RTefveKYK95kFlpzCB-k6M8j5LZSz0uD_5EPzdkaL4dMcGGOyzgI.yGMc9I2f50IikMt5YAG2Tw \ No newline at end of file diff --git a/ci/chains/gaia/v4.2.0/ibc-0/keyring-test/8c1145fd1c8fa0119e95466d9fa27320bfcc0c1e.address b/ci/chains/gaia/v4.2.0/ibc-0/keyring-test/8c1145fd1c8fa0119e95466d9fa27320bfcc0c1e.address deleted file mode 100644 index 911af8519e..0000000000 --- a/ci/chains/gaia/v4.2.0/ibc-0/keyring-test/8c1145fd1c8fa0119e95466d9fa27320bfcc0c1e.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wNi0xNCAwOToxMzowMi4wNzg0ODc0ODggLTA0MDAgRURUIG09KzAuMDY1MjI3ODA3IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiTEZZbk1zTFNSWVp5X1Y2XyJ9.eNZUQlok-yxmYnWAAUPnRvoHBFbj5cRsERmqbnFdqiAZNZ7g-b9iaQ.hZsRJ0mY3FFR0PmG.tuODM50B7-Zx3hGWIc84Ch6SUS05UieIDpAwRk85fBe5o6LQ-COy-f_wmrSYfnet9Onp23CnLVxUuJnbw4hiAZVFIRkLN5h_aOBlQ_Dg0edvFcSLSdDDgbHuCkPG2AuygmpR749FTBWqvSInNP-4JyJNrmrpUvq6mVitXdrLRU-1c8KWDwJVuw4BcyTHGNIyT9D1aaZZaI_BApZDXzJhRHTS5PigamexjzNB2KiBHtJiUQ.25_lUD1O7rjmqUeL-9_tLg \ No newline at end of file diff --git a/ci/chains/gaia/v4.2.0/ibc-0/keyring-test/f5580fd9ee89a00b0907ecf985c4febfc1672d3d.address b/ci/chains/gaia/v4.2.0/ibc-0/keyring-test/f5580fd9ee89a00b0907ecf985c4febfc1672d3d.address deleted file mode 100644 index 508eb654ee..0000000000 --- a/ci/chains/gaia/v4.2.0/ibc-0/keyring-test/f5580fd9ee89a00b0907ecf985c4febfc1672d3d.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wNi0xNCAwOToxMzowMy4xNTg4MjA0MzQgLTA0MDAgRURUIG09KzAuMDY1NzEzNTkxIiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiNTNrdmxEYVJ0dmNsOF90UCJ9.9kn3dNjpXGDJ3XMxUyGUWMllP4QiKhIdrZCbG-OwjGjyRNlPwJ32fA.utuMYFv7LPDz6P1l.djEBvcAW3PgPGoWpCzGWjF14TlICbGQ6Nu3zzPMMMhhyP0Ed8MA_hF2jnC-UYrlkK03VME55c6Bt2XjJLt6gX-fKKMOoVdzyb-a4bHvywH3DwU-CtmlxwSZUxClpiTjriX13yUCdgS1HszRvIbpiWL6lZc8294PYbVJ9EzMhZkv3tSWWYGl97vuHB_1dP7cGNJvp1FOvokoYxf_pfHbh7lVHxd2CZieOfL-g-rZ9bqWFmw7-sC0.t3J9an5u-70LvsshXfFHeQ \ No newline at end of file diff --git a/ci/chains/gaia/v4.2.0/ibc-0/keyring-test/user.info b/ci/chains/gaia/v4.2.0/ibc-0/keyring-test/user.info deleted file mode 100644 index 91dc1fba44..0000000000 --- a/ci/chains/gaia/v4.2.0/ibc-0/keyring-test/user.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wNi0xNCAwOToxMzowMi4wNzEzMDM1OTUgLTA0MDAgRURUIG09KzAuMDU4MDQzODk0IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiN0JpWkVEdWNtX3B3cXBUViJ9.TGKX2_vYpVCPcHWwWcgBbx3OweraZ_PIWjqmOE7qdExo8QslTTEWeg.8aZQFoUOMq-OhVYj.yol_qfmJdXXTFqvrw7LzjCR_0Bm3tLhJ6N68p5xHWHsQ_GAFpGGVLqPdKN5_3dDK_ObhlDMqEBsmqS4HIYT1vxmsKd5YmdWGXY823E_cdWQ-H9_p6BBdAsxak0ckWEeVuEZAT4963viFTSMnuylgMfZwzSXybipSoI86-PuqqVweSpsG_IRln7mUOU5EruMfSvkstzLsz5gBEJ1GnUXNUHiqXfZiki9eQ4JiUYeZLEPFkB6rsYt47xUMdxT__Bbi5fh-OvlwlRyQWv8dLHgaR-QHGN1qiYh_ZPZ-HRnJ1tgirXWN5pQBK4BU6KIb3DoqbVt4ZYRSHocVWaS0cX0vkfcNN9QBERQ.wQ7RopDwE-thWkBhKyGoKg \ No newline at end of file diff --git a/ci/chains/gaia/v4.2.0/ibc-0/keyring-test/user2.info b/ci/chains/gaia/v4.2.0/ibc-0/keyring-test/user2.info deleted file mode 100644 index 5603cdb824..0000000000 --- a/ci/chains/gaia/v4.2.0/ibc-0/keyring-test/user2.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wNi0xNCAwOToxMzowMy4xNTE5NDA4MTUgLTA0MDAgRURUIG09KzAuMDU4ODMzOTU4IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiQkpVWThIdS1JSi10ZFVkeSJ9.CzGLBhnREbPwATFvjx5OsQ3xcvw6RU3BMJv10aao28PdpTspHzcKxQ.c1JJAuOzl_TdiaGg.ZsG0fQmAoF0j_WAlMZbVB3rhc2scGkYKxewcR8wyMZ792vZtdys-whsqLhv8NF0jc0HyIiS4gk_1E-QCatZl-cMWjjRZaCtk599aYA5CRKOEFAfhLond0nGVEjM6HbL5ChgAulyEsq74DQnFoyrO6uZN0usI8G-9ghPFehRAB5Fen5crxROWO-Dr3Rs_-DLLCB1D_jLeWGSR6V1p4pFH0kYGoJ504FuarV_hPqQ4ftwRPFSDVDtiSun0Lec9TLd1a0VzaAueINpZl8ueVL0qxZY2BWsedU4QZjYDLxsxWq2aQY9VSmm3rJfpBLyMLfHr-HaWedfeqIrvnusKIGKgDdpxI0lCU0jy.V7H5m657PtOwyuLaoJOycw \ No newline at end of file diff --git a/ci/chains/gaia/v4.2.0/ibc-0/keyring-test/validator.info b/ci/chains/gaia/v4.2.0/ibc-0/keyring-test/validator.info deleted file mode 100644 index 0589f1a12e..0000000000 --- a/ci/chains/gaia/v4.2.0/ibc-0/keyring-test/validator.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wNi0xNCAwOToxMzowMC45OTE2ODEyMjggLTA0MDAgRURUIG09KzAuMDU3MTg1ODc3IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiWFRYNkVfdEJ1MUhXbXhzUCJ9.PphVXieI66NRMofk3D5FuOz3KwyZVlBdEBHiM-BWL5vf09ZgZOi_Zg.BqzzLuv2WbgZ3LbV.gASPxItpiGSMFeBdFyoVU_f4y-gflUKWmRWh2wEAnyI6J0wYPv9QixO4l-WEFPbWyebv0rBeZv8vz80xo7GH7W67CI-A8zpAgLaH2sULflU9Aqs84-MGQfdogFeoz46TsHJHEbN9Qsxe05be2eF-RWzJTeeVdJYSuo133IHJCc3EJesgYpn0Ve1_WjxYL4egHLBMaH9DJ5clNl8D01D8R0Vd_bjyiNqkXbSkcYTKOi3wtgyQ_SxjBkg981yANpLqjzEnKy4MuiWqw09BYOkHZC3xHBzpACHrm4bWIF4DzMHMsA7I-LI1finIf61K6dFGjywJ31FfhUE8-l6iTXFsQ4yf8fnur_Xyh3PUxxp4IKkBtJ-o.w46r1h6ErVI8ut26RUuPKQ \ No newline at end of file diff --git a/ci/chains/gaia/v4.2.0/ibc-0/user2_seed.json b/ci/chains/gaia/v4.2.0/ibc-0/user2_seed.json deleted file mode 100644 index 1cec6d4120..0000000000 --- a/ci/chains/gaia/v4.2.0/ibc-0/user2_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"user2","type":"local","address":"cosmos174vqlk0w3xsqkzg8anuct387hlqkwtfahqu9j8","pubkey":"cosmospub1addwnpepqvs7pf9n7gexuyw424aptzypnj5rtjt70hsdrwf8gejd7gtpswgm793a7zs","mnemonic":"engage verb play circle divide large identify ceiling gentle myself tortoise decrease matrix leave captain want deposit credit gossip warm denial copy food derive"} diff --git a/ci/chains/gaia/v4.2.0/ibc-0/user_seed.json b/ci/chains/gaia/v4.2.0/ibc-0/user_seed.json deleted file mode 100644 index 72edb9690c..0000000000 --- a/ci/chains/gaia/v4.2.0/ibc-0/user_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"user","type":"local","address":"cosmos13sg5tlgu37spr854gekelgnnyzlucrq740y9us","pubkey":"cosmospub1addwnpepqvcrrazdvt6knarm90hr93c6a685hp64cvvx6t75r940n5uxw9nqxp5fvt6","mnemonic":"isolate inmate used fitness smart delay tower famous hotel lift blame because uphold record stairs million acid taste kit together genre win refuse anger"} diff --git a/ci/chains/gaia/v4.2.0/ibc-0/validator_seed.json b/ci/chains/gaia/v4.2.0/ibc-0/validator_seed.json deleted file mode 100644 index bcf04513df..0000000000 --- a/ci/chains/gaia/v4.2.0/ibc-0/validator_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"validator","type":"local","address":"cosmos1pggja8l6rcpknqwpgyjysk7a2sm9fv6rhguha2","pubkey":"cosmospub1addwnpepq2yw7vj8u6g8pezu89g8zzl9l70xg8eygs2v5w3sha6wkhfjdl72u89slhs","mnemonic":"return crystal shoe ceiling elite pen include hurt original boss garment code lawsuit auction people swarm horn shoe zoo stereo crop sleep caution struggle"} diff --git a/ci/chains/gaia/v4.2.0/ibc-1/config/app.toml b/ci/chains/gaia/v4.2.0/ibc-1/config/app.toml deleted file mode 100644 index 54de73e392..0000000000 --- a/ci/chains/gaia/v4.2.0/ibc-1/config/app.toml +++ /dev/null @@ -1,152 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -############################################################################### -### Base Configuration ### -############################################################################### - -# The minimum gas prices a validator is willing to accept for processing a -# transaction. A transaction's fees must meet the minimum of any denomination -# specified in this config (e.g. 0.25token1;0.0001token2). -minimum-gas-prices = "" - -# default: the last 100 states are kept in addition to every 500th state; pruning at 10 block intervals -# nothing: all historic states will be saved, nothing will be deleted (i.e. archiving node) -# everything: all saved states will be deleted, storing only the current state; pruning at 10 block intervals -# custom: allow pruning options to be manually specified through 'pruning-keep-recent', 'pruning-keep-every', and 'pruning-interval' -pruning = "default" - -# These are applied if and only if the pruning strategy is custom. -pruning-keep-recent = "0" -pruning-keep-every = "0" -pruning-interval = "0" - -# HaltHeight contains a non-zero block height at which a node will gracefully -# halt and shutdown that can be used to assist upgrades and testing. -# -# Note: Commitment of state will be attempted on the corresponding block. -halt-height = 0 - -# HaltTime contains a non-zero minimum block time (in Unix seconds) at which -# a node will gracefully halt and shutdown that can be used to assist upgrades -# and testing. -# -# Note: Commitment of state will be attempted on the corresponding block. -halt-time = 0 - -# MinRetainBlocks defines the minimum block height offset from the current -# block being committed, such that all blocks past this offset are pruned -# from Tendermint. It is used as part of the process of determining the -# ResponseCommit.RetainHeight value during ABCI Commit. A value of 0 indicates -# that no blocks should be pruned. -# -# This configuration value is only responsible for pruning Tendermint blocks. -# It has no bearing on application state pruning which is determined by the -# "pruning-*" configurations. -# -# Note: Tendermint block pruning is dependant on this parameter in conunction -# with the unbonding (safety threshold) period, state pruning and state sync -# snapshot parameters to determine the correct minimum value of -# ResponseCommit.RetainHeight. -min-retain-blocks = 0 - -# InterBlockCache enables inter-block caching. -inter-block-cache = true - -# IndexEvents defines the set of events in the form {eventType}.{attributeKey}, -# which informs Tendermint what to index. If empty, all events will be indexed. -# -# Example: -# ["message.sender", "message.recipient"] -index-events = [] - -############################################################################### -### Telemetry Configuration ### -############################################################################### - -[telemetry] - -# Prefixed with keys to separate services. -service-name = "" - -# Enabled enables the application telemetry functionality. When enabled, -# an in-memory sink is also enabled by default. Operators may also enabled -# other sinks such as Prometheus. -enabled = false - -# Enable prefixing gauge values with hostname. -enable-hostname = false - -# Enable adding hostname to labels. -enable-hostname-label = false - -# Enable adding service to labels. -enable-service-label = false - -# PrometheusRetentionTime, when positive, enables a Prometheus metrics sink. -prometheus-retention-time = 0 - -# GlobalLabels defines a global set of name/value label tuples applied to all -# metrics emitted using the wrapper functions defined in telemetry package. -# -# Example: -# [["chain_id", "cosmoshub-1"]] -global-labels = [ -] - -############################################################################### -### API Configuration ### -############################################################################### - -[api] - -# Enable defines if the API server should be enabled. -enable = false - -# Swagger defines if swagger documentation should automatically be registered. -swagger = false - -# Address defines the API server to listen on. -address = "tcp://0.0.0.0:1317" - -# MaxOpenConnections defines the number of maximum open connections. -max-open-connections = 1000 - -# RPCReadTimeout defines the Tendermint RPC read timeout (in seconds). -rpc-read-timeout = 10 - -# RPCWriteTimeout defines the Tendermint RPC write timeout (in seconds). -rpc-write-timeout = 0 - -# RPCMaxBodyBytes defines the Tendermint maximum response body (in bytes). -rpc-max-body-bytes = 1000000 - -# EnableUnsafeCORS defines if CORS should be enabled (unsafe - use it at your own risk). -enabled-unsafe-cors = false - -############################################################################### -### gRPC Configuration ### -############################################################################### - -[grpc] - -# Enable defines if the gRPC server should be enabled. -enable = true - -# Address defines the gRPC server address to bind to. -address = "0.0.0.0:9090" - -############################################################################### -### State Sync Configuration ### -############################################################################### - -# State sync snapshots allow other nodes to rapidly join the network without replaying historical -# blocks, instead downloading and applying a snapshot of the application state at a given height. -[state-sync] - -# snapshot-interval specifies the block interval at which local state sync snapshots are -# taken (0 to disable). Must be a multiple of pruning-keep-every. -snapshot-interval = 0 - -# snapshot-keep-recent specifies the number of recent snapshots to keep and serve (0 to keep all). -snapshot-keep-recent = 2 diff --git a/ci/chains/gaia/v4.2.0/ibc-1/config/config.toml b/ci/chains/gaia/v4.2.0/ibc-1/config/config.toml deleted file mode 100644 index c32f66cf63..0000000000 --- a/ci/chains/gaia/v4.2.0/ibc-1/config/config.toml +++ /dev/null @@ -1,393 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -# NOTE: Any path below can be absolute (e.g. "/var/myawesomeapp/data") or -# relative to the home directory (e.g. "data"). The home directory is -# "$HOME/.tendermint" by default, but could be changed via $TMHOME env variable -# or --home cmd flag. - -####################################################################### -### Main Base Config Options ### -####################################################################### - -# TCP or UNIX socket address of the ABCI application, -# or the name of an ABCI application compiled in with the Tendermint binary -proxy_app = "tcp://127.0.0.1:26658" - -# A custom human readable name for this node -moniker = "ibc-1" - -# If this node is many blocks behind the tip of the chain, FastSync -# allows them to catchup quickly by downloading blocks in parallel -# and verifying their commits -fast_sync = true - -# Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb -# * goleveldb (github.com/syndtr/goleveldb - most popular implementation) -# - pure go -# - stable -# * cleveldb (uses levigo wrapper) -# - fast -# - requires gcc -# - use cleveldb build tag (go build -tags cleveldb) -# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt) -# - EXPERIMENTAL -# - may be faster is some use-cases (random reads - indexer) -# - use boltdb build tag (go build -tags boltdb) -# * rocksdb (uses github.com/tecbot/gorocksdb) -# - EXPERIMENTAL -# - requires gcc -# - use rocksdb build tag (go build -tags rocksdb) -# * badgerdb (uses github.com/dgraph-io/badger) -# - EXPERIMENTAL -# - use badgerdb build tag (go build -tags badgerdb) -db_backend = "goleveldb" - -# Database directory -db_dir = "data" - -# Output level for logging, including package level options -log_level = "info" - -# Output format: 'plain' (colored text) or 'json' -log_format = "plain" - -##### additional base config options ##### - -# Path to the JSON file containing the initial validator set and other meta data -genesis_file = "config/genesis.json" - -# Path to the JSON file containing the private key to use as a validator in the consensus protocol -priv_validator_key_file = "config/priv_validator_key.json" - -# Path to the JSON file containing the last sign state of a validator -priv_validator_state_file = "data/priv_validator_state.json" - -# TCP or UNIX socket address for Tendermint to listen on for -# connections from an external PrivValidator process -priv_validator_laddr = "" - -# Path to the JSON file containing the private key to use for node authentication in the p2p protocol -node_key_file = "config/node_key.json" - -# Mechanism to connect to the ABCI application: socket | grpc -abci = "socket" - -# If true, query the ABCI app on connecting to a new peer -# so the app can decide if we should keep the connection or not -filter_peers = false - - -####################################################################### -### Advanced Configuration Options ### -####################################################################### - -####################################################### -### RPC Server Configuration Options ### -####################################################### -[rpc] - -# TCP or UNIX socket address for the RPC server to listen on -laddr = "tcp://0.0.0.0:26657" - -# A list of origins a cross-domain request can be executed from -# Default value '[]' disables cors support -# Use '["*"]' to allow any origin -cors_allowed_origins = [] - -# A list of methods the client is allowed to use with cross-domain requests -cors_allowed_methods = ["HEAD", "GET", "POST", ] - -# A list of non simple headers the client is allowed to use with cross-domain requests -cors_allowed_headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ] - -# TCP or UNIX socket address for the gRPC server to listen on -# NOTE: This server only supports /broadcast_tx_commit -grpc_laddr = "" - -# Maximum number of simultaneous connections. -# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -grpc_max_open_connections = 900 - -# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool -unsafe = false - -# Maximum number of simultaneous connections (including WebSocket). -# Does not include gRPC connections. See grpc_max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -max_open_connections = 900 - -# Maximum number of unique clientIDs that can /subscribe -# If you're using /broadcast_tx_commit, set to the estimated maximum number -# of broadcast_tx_commit calls per block. -max_subscription_clients = 100 - -# Maximum number of unique queries a given client can /subscribe to -# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to -# the estimated # maximum number of broadcast_tx_commit calls per block. -max_subscriptions_per_client = 5 - -# How long to wait for a tx to be committed during /broadcast_tx_commit. -# WARNING: Using a value larger than 10s will result in increasing the -# global HTTP write timeout, which applies to all connections and endpoints. -# See https://github.com/tendermint/tendermint/issues/3435 -timeout_broadcast_tx_commit = "10s" - -# Maximum size of request body, in bytes -max_body_bytes = 1000000 - -# Maximum size of request header, in bytes -max_header_bytes = 1048576 - -# The path to a file containing certificate that is used to create the HTTPS server. -# Might be either absolute path or path related to Tendermint's config directory. -# If the certificate is signed by a certificate authority, -# the certFile should be the concatenation of the server's certificate, any intermediates, -# and the CA's certificate. -# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. -# Otherwise, HTTP server is run. -tls_cert_file = "" - -# The path to a file containing matching private key that is used to create the HTTPS server. -# Might be either absolute path or path related to Tendermint's config directory. -# NOTE: both tls-cert-file and tls-key-file must be present for Tendermint to create HTTPS server. -# Otherwise, HTTP server is run. -tls_key_file = "" - -# pprof listen address (https://golang.org/pkg/net/http/pprof) -pprof_laddr = "localhost:6060" - -####################################################### -### P2P Configuration Options ### -####################################################### -[p2p] - -# Address to listen for incoming connections -laddr = "tcp://0.0.0.0:26656" - -# Address to advertise to peers for them to dial -# If empty, will use the same port as the laddr, -# and will introspect on the listener or use UPnP -# to figure out the address. -external_address = "" - -# Comma separated list of seed nodes to connect to -seeds = "" - -# Comma separated list of nodes to keep persistent connections to -persistent_peers = "" - -# UPNP port forwarding -upnp = false - -# Path to address book -addr_book_file = "config/addrbook.json" - -# Set true for strict address routability rules -# Set false for private or local networks -addr_book_strict = true - -# Maximum number of inbound peers -max_num_inbound_peers = 40 - -# Maximum number of outbound peers to connect to, excluding persistent peers -max_num_outbound_peers = 10 - -# List of node IDs, to which a connection will be (re)established ignoring any existing limits -unconditional_peer_ids = "" - -# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) -persistent_peers_max_dial_period = "0s" - -# Time to wait before flushing messages out on the connection -flush_throttle_timeout = "100ms" - -# Maximum size of a message packet payload, in bytes -max_packet_msg_payload_size = 1024 - -# Rate at which packets can be sent, in bytes/second -send_rate = 5120000 - -# Rate at which packets can be received, in bytes/second -recv_rate = 5120000 - -# Set true to enable the peer-exchange reactor -pex = true - -# Seed mode, in which node constantly crawls the network and looks for -# peers. If another node asks it for addresses, it responds and disconnects. -# -# Does not work if the peer-exchange reactor is disabled. -seed_mode = false - -# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) -private_peer_ids = "" - -# Toggle to disable guard against peers connecting from the same ip. -allow_duplicate_ip = false - -# Peer connection configuration. -handshake_timeout = "20s" -dial_timeout = "3s" - -####################################################### -### Mempool Configuration Option ### -####################################################### -[mempool] - -recheck = true -broadcast = true -wal_dir = "" - -# Maximum number of transactions in the mempool -size = 5000 - -# Limit the total size of all txs in the mempool. -# This only accounts for raw transactions (e.g. given 1MB transactions and -# max_txs_bytes=5MB, mempool will only accept 5 transactions). -max_txs_bytes = 1073741824 - -# Size of the cache (used to filter transactions we saw earlier) in transactions -cache_size = 10000 - -# Do not remove invalid transactions from the cache (default: false) -# Set to true if it's not possible for any invalid transaction to become valid -# again in the future. -keep-invalid-txs-in-cache = false - -# Maximum size of a single transaction. -# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes}. -max_tx_bytes = 1048576 - -# Maximum size of a batch of transactions to send to a peer -# Including space needed by encoding (one varint per transaction). -# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796 -max_batch_bytes = 0 - -####################################################### -### State Sync Configuration Options ### -####################################################### -[statesync] -# State sync rapidly bootstraps a new node by discovering, fetching, and restoring a state machine -# snapshot from peers instead of fetching and replaying historical blocks. Requires some peers in -# the network to take and serve state machine snapshots. State sync is not attempted if the node -# has any local state (LastBlockHeight > 0). The node will have a truncated block history, -# starting from the height of the snapshot. -enable = false - -# RPC servers (comma-separated) for light client verification of the synced state machine and -# retrieval of state data for node bootstrapping. Also needs a trusted height and corresponding -# header hash obtained from a trusted source, and a period during which validators can be trusted. -# -# For Cosmos SDK-based chains, trust_period should usually be about 2/3 of the unbonding time (~2 -# weeks) during which they can be financially punished (slashed) for misbehavior. -rpc_servers = "" -trust_height = 0 -trust_hash = "" -trust_period = "168h0m0s" - -# Time to spend discovering snapshots before initiating a restore. -discovery_time = "15s" - -# Temporary directory for state sync snapshot chunks, defaults to the OS tempdir (typically /tmp). -# Will create a new, randomly named directory within, and remove it when done. -temp_dir = "" - -####################################################### -### Fast Sync Configuration Connections ### -####################################################### -[fastsync] - -# Fast Sync version to use: -# 1) "v0" (default) - the legacy fast sync implementation -# 2) "v1" - refactor of v0 version for better testability -# 2) "v2" - complete redesign of v0, optimized for testability & readability -version = "v0" - -####################################################### -### Consensus Configuration Options ### -####################################################### -[consensus] - -wal_file = "data/cs.wal/wal" - -# How long we wait for a proposal block before prevoting nil -timeout_propose = "1s" -# How much timeout_propose increases with each round -timeout_propose_delta = "500ms" -# How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil) -timeout_prevote = "1s" -# How much the timeout_prevote increases with each round -timeout_prevote_delta = "500ms" -# How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil) -timeout_precommit = "1s" -# How much the timeout_precommit increases with each round -timeout_precommit_delta = "500ms" -# How long we wait after committing a block, before starting on the new -# height (this gives us a chance to receive some more precommits, even -# though we already have +2/3). -timeout_commit = "1s" - -# How many blocks to look back to check existence of the node's consensus votes before joining consensus -# When non-zero, the node will panic upon restart -# if the same consensus key was used to sign {double_sign_check_height} last blocks. -# So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic. -double_sign_check_height = 0 - -# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) -skip_timeout_commit = false - -# EmptyBlocks mode and possible interval between empty blocks -create_empty_blocks = true -create_empty_blocks_interval = "0s" - -# Reactor sleep duration parameters -peer_gossip_sleep_duration = "100ms" -peer_query_maj23_sleep_duration = "2s" - -####################################################### -### Transaction Indexer Configuration Options ### -####################################################### -[tx_index] - -# What indexer to use for transactions -# -# The application will set which txs to index. In some cases a node operator will be able -# to decide which txs to index based on configuration set in the application. -# -# Options: -# 1) "null" -# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). -# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed. -indexer = "kv" - -####################################################### -### Instrumentation Configuration Options ### -####################################################### -[instrumentation] - -# When true, Prometheus metrics are served under /metrics on -# PrometheusListenAddr. -# Check out the documentation for the list of available metrics. -prometheus = false - -# Address to listen for Prometheus collector(s) connections -prometheus_listen_addr = ":26660" - -# Maximum number of simultaneous connections. -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -max_open_connections = 3 - -# Instrumentation namespace -namespace = "tendermint" diff --git a/ci/chains/gaia/v4.2.0/ibc-1/config/genesis.json b/ci/chains/gaia/v4.2.0/ibc-1/config/genesis.json deleted file mode 100644 index 07644cdfeb..0000000000 --- a/ci/chains/gaia/v4.2.0/ibc-1/config/genesis.json +++ /dev/null @@ -1,314 +0,0 @@ -{ - "genesis_time": "2021-06-14T13:13:13.056631194Z", - "chain_id": "ibc-1", - "initial_height": "1", - "consensus_params": { - "block": { - "max_bytes": "22020096", - "max_gas": "-1", - "time_iota_ms": "1000" - }, - "evidence": { - "max_age_num_blocks": "100000", - "max_age_duration": "172800000000000", - "max_bytes": "1048576" - }, - "validator": { - "pub_key_types": [ - "ed25519" - ] - }, - "version": {} - }, - "app_hash": "", - "app_state": { - "auth": { - "params": { - "max_memo_characters": "256", - "tx_sig_limit": "7", - "tx_size_cost_per_byte": "10", - "sig_verify_cost_ed25519": "590", - "sig_verify_cost_secp256k1": "1000" - }, - "accounts": [ - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos1fvfhg5hj208gackwmqch4zmwtg924em2xg55r2", - "pub_key": null, - "account_number": "0", - "sequence": "0" - }, - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos14qcemug6e6c6tnwr42ewfn7fhju00quv5uk6z3", - "pub_key": null, - "account_number": "0", - "sequence": "0" - }, - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos1w7wq24hmvel4946ekaql0v56n8rlpuv9ns6ctv", - "pub_key": null, - "account_number": "0", - "sequence": "0" - } - ] - }, - "bank": { - "params": { - "send_enabled": [], - "default_send_enabled": true - }, - "balances": [ - { - "address": "cosmos1fvfhg5hj208gackwmqch4zmwtg924em2xg55r2", - "coins": [ - { - "denom": "samoleans", - "amount": "100000000000" - }, - { - "denom": "stake", - "amount": "100000000000" - } - ] - }, - { - "address": "cosmos1w7wq24hmvel4946ekaql0v56n8rlpuv9ns6ctv", - "coins": [ - { - "denom": "stake", - "amount": "100000000000" - } - ] - }, - { - "address": "cosmos14qcemug6e6c6tnwr42ewfn7fhju00quv5uk6z3", - "coins": [ - { - "denom": "samoleans", - "amount": "100000000000" - }, - { - "denom": "stake", - "amount": "100000000000" - } - ] - } - ], - "supply": [ - { - "denom": "samoleans", - "amount": "200000000000" - }, - { - "denom": "stake", - "amount": "300000000000" - } - ], - "denom_metadata": [] - }, - "capability": { - "index": "1", - "owners": [] - }, - "crisis": { - "constant_fee": { - "denom": "stake", - "amount": "1000" - } - }, - "distribution": { - "params": { - "community_tax": "0.020000000000000000", - "base_proposer_reward": "0.010000000000000000", - "bonus_proposer_reward": "0.040000000000000000", - "withdraw_addr_enabled": true - }, - "fee_pool": { - "community_pool": [] - }, - "delegator_withdraw_infos": [], - "previous_proposer": "", - "outstanding_rewards": [], - "validator_accumulated_commissions": [], - "validator_historical_rewards": [], - "validator_current_rewards": [], - "delegator_starting_infos": [], - "validator_slash_events": [] - }, - "evidence": { - "evidence": [] - }, - "genutil": { - "gen_txs": [ - { - "body": { - "messages": [ - { - "@type": "/cosmos.staking.v1beta1.MsgCreateValidator", - "description": { - "moniker": "ibc-1", - "identity": "", - "website": "", - "security_contact": "", - "details": "" - }, - "commission": { - "rate": "0.100000000000000000", - "max_rate": "0.200000000000000000", - "max_change_rate": "0.010000000000000000" - }, - "min_self_delegation": "1", - "delegator_address": "cosmos1w7wq24hmvel4946ekaql0v56n8rlpuv9ns6ctv", - "validator_address": "cosmosvaloper1w7wq24hmvel4946ekaql0v56n8rlpuv9kywd8l", - "pubkey": { - "@type": "/cosmos.crypto.ed25519.PubKey", - "key": "oUdI91TXlwS/qQXrrc/aLb9u/RESrYAhKKJY9iFyZ4U=" - }, - "value": { - "denom": "stake", - "amount": "100000000000" - } - } - ], - "memo": "69dd5b43951124b2e6e2d744328041a907f3968c@192.168.50.214:26656", - "timeout_height": "0", - "extension_options": [], - "non_critical_extension_options": [] - }, - "auth_info": { - "signer_infos": [ - { - "public_key": { - "@type": "/cosmos.crypto.secp256k1.PubKey", - "key": "Aqfj+Tm7Sin/Uxw4PObXXqN8OpYJH2O92T+IOLSj77Th" - }, - "mode_info": { - "single": { - "mode": "SIGN_MODE_DIRECT" - } - }, - "sequence": "0" - } - ], - "fee": { - "amount": [], - "gas_limit": "200000", - "payer": "", - "granter": "" - } - }, - "signatures": [ - "5mu7NcH4HWs9Nx7tAqLMjjrAG/EBOIHwByB/x4CFi6ZPoHo7YnWwDMKWrOK6z5Adays/rr9l25/p3lVzZ2mEvA==" - ] - } - ] - }, - "gov": { - "starting_proposal_id": "1", - "deposits": [], - "votes": [], - "proposals": [], - "deposit_params": { - "min_deposit": [ - { - "denom": "stake", - "amount": "10000000" - } - ], - "max_deposit_period": "200s" - }, - "voting_params": { - "voting_period": "200s" - }, - "tally_params": { - "quorum": "0.334000000000000000", - "threshold": "0.500000000000000000", - "veto_threshold": "0.334000000000000000" - } - }, - "ibc": { - "client_genesis": { - "clients": [], - "clients_consensus": [], - "clients_metadata": [], - "params": { - "allowed_clients": [ - "06-solomachine", - "07-tendermint" - ] - }, - "create_localhost": false, - "next_client_sequence": "0" - }, - "connection_genesis": { - "connections": [], - "client_connection_paths": [], - "next_connection_sequence": "0" - }, - "channel_genesis": { - "channels": [], - "acknowledgements": [], - "commitments": [], - "receipts": [], - "send_sequences": [], - "recv_sequences": [], - "ack_sequences": [], - "next_channel_sequence": "0" - } - }, - "mint": { - "minter": { - "inflation": "0.130000000000000000", - "annual_provisions": "0.000000000000000000" - }, - "params": { - "mint_denom": "stake", - "inflation_rate_change": "0.130000000000000000", - "inflation_max": "0.200000000000000000", - "inflation_min": "0.070000000000000000", - "goal_bonded": "0.670000000000000000", - "blocks_per_year": "6311520" - } - }, - "params": null, - "slashing": { - "params": { - "signed_blocks_window": "100", - "min_signed_per_window": "0.500000000000000000", - "downtime_jail_duration": "600s", - "slash_fraction_double_sign": "0.050000000000000000", - "slash_fraction_downtime": "0.010000000000000000" - }, - "signing_infos": [], - "missed_blocks": [] - }, - "staking": { - "params": { - "unbonding_time": "1814400s", - "max_validators": 100, - "max_entries": 7, - "historical_entries": 10000, - "bond_denom": "stake" - }, - "last_total_power": "0", - "last_validator_powers": [], - "validators": [], - "delegations": [], - "unbonding_delegations": [], - "redelegations": [], - "exported": false - }, - "transfer": { - "port_id": "transfer", - "denom_traces": [], - "params": { - "send_enabled": true, - "receive_enabled": true - } - }, - "upgrade": {}, - "vesting": {} - } -} \ No newline at end of file diff --git a/ci/chains/gaia/v4.2.0/ibc-1/config/gentx/gentx-69dd5b43951124b2e6e2d744328041a907f3968c.json b/ci/chains/gaia/v4.2.0/ibc-1/config/gentx/gentx-69dd5b43951124b2e6e2d744328041a907f3968c.json deleted file mode 100644 index 7b398f46d4..0000000000 --- a/ci/chains/gaia/v4.2.0/ibc-1/config/gentx/gentx-69dd5b43951124b2e6e2d744328041a907f3968c.json +++ /dev/null @@ -1 +0,0 @@ -{"body":{"messages":[{"@type":"/cosmos.staking.v1beta1.MsgCreateValidator","description":{"moniker":"ibc-1","identity":"","website":"","security_contact":"","details":""},"commission":{"rate":"0.100000000000000000","max_rate":"0.200000000000000000","max_change_rate":"0.010000000000000000"},"min_self_delegation":"1","delegator_address":"cosmos1w7wq24hmvel4946ekaql0v56n8rlpuv9ns6ctv","validator_address":"cosmosvaloper1w7wq24hmvel4946ekaql0v56n8rlpuv9kywd8l","pubkey":{"@type":"/cosmos.crypto.ed25519.PubKey","key":"oUdI91TXlwS/qQXrrc/aLb9u/RESrYAhKKJY9iFyZ4U="},"value":{"denom":"stake","amount":"100000000000"}}],"memo":"69dd5b43951124b2e6e2d744328041a907f3968c@192.168.50.214:26656","timeout_height":"0","extension_options":[],"non_critical_extension_options":[]},"auth_info":{"signer_infos":[{"public_key":{"@type":"/cosmos.crypto.secp256k1.PubKey","key":"Aqfj+Tm7Sin/Uxw4PObXXqN8OpYJH2O92T+IOLSj77Th"},"mode_info":{"single":{"mode":"SIGN_MODE_DIRECT"}},"sequence":"0"}],"fee":{"amount":[],"gas_limit":"200000","payer":"","granter":""}},"signatures":["5mu7NcH4HWs9Nx7tAqLMjjrAG/EBOIHwByB/x4CFi6ZPoHo7YnWwDMKWrOK6z5Adays/rr9l25/p3lVzZ2mEvA=="]} diff --git a/ci/chains/gaia/v4.2.0/ibc-1/config/node_key.json b/ci/chains/gaia/v4.2.0/ibc-1/config/node_key.json deleted file mode 100644 index 1d542369f4..0000000000 --- a/ci/chains/gaia/v4.2.0/ibc-1/config/node_key.json +++ /dev/null @@ -1 +0,0 @@ -{"priv_key":{"type":"tendermint/PrivKeyEd25519","value":"YJV/qvkkALKFIQeZCJMDVGXxb6MmHpUPFjWkhhSD5V9jaJsfupWnsJH+PvOGvlHJGvjKX1plnB9N2l+T655b4g=="}} \ No newline at end of file diff --git a/ci/chains/gaia/v4.2.0/ibc-1/config/priv_validator_key.json b/ci/chains/gaia/v4.2.0/ibc-1/config/priv_validator_key.json deleted file mode 100644 index 786b60801a..0000000000 --- a/ci/chains/gaia/v4.2.0/ibc-1/config/priv_validator_key.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "address": "70FF5C5D426D127BEC43AA1AB3FCDFE3862E4E4D", - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "oUdI91TXlwS/qQXrrc/aLb9u/RESrYAhKKJY9iFyZ4U=" - }, - "priv_key": { - "type": "tendermint/PrivKeyEd25519", - "value": "muN9d4dS7CR/WEXEsNc85JbqgEyC4RLbNssH3v+IF2mhR0j3VNeXBL+pBeutz9otv279ERKtgCEoolj2IXJnhQ==" - } -} \ No newline at end of file diff --git a/ci/chains/gaia/v4.2.0/ibc-1/keyring-test/4b137452f253ce8ee2ced8317a8b6e5a0aaae76a.address b/ci/chains/gaia/v4.2.0/ibc-1/keyring-test/4b137452f253ce8ee2ced8317a8b6e5a0aaae76a.address deleted file mode 100644 index 82d1370527..0000000000 --- a/ci/chains/gaia/v4.2.0/ibc-1/keyring-test/4b137452f253ce8ee2ced8317a8b6e5a0aaae76a.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wNi0xNCAwOToxMzoxNS4yMDkwNDc2OTQgLTA0MDAgRURUIG09KzAuMDY1MDcyOTE1IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoibmVCandtSHFyQlF0Z0w2XyJ9.BTl2Woy2rvSdMNL-ADRy9BtSPCNnRM2J4NKXx44NSW5YjJCl79hOgA.O6V5W8isgg82NWZe.rNXOtM3r3Z-nYKJkY6j8tE5MDCKeiSUIIhfFhjrmzy5u7KdjGQaXAO4Hb9aYqXnh5GiymD_SN3xSGFx1KElCFY8jm7FMdluvsGFQ5UIgP90c6wWqBubZ4iNboRrQ3qn6B7tcDwe2UBGFdb6q9rub9sf3AqOUDsGPMe26613oQbk7lRUPYGfbVYk532JCCFojIDftg9F3XlhPA3r9MRZW7z9jD1n4Iv-PmBvXKJTa5CZKEA.JB9sZ58QQODQHkd2vo_I4w \ No newline at end of file diff --git a/ci/chains/gaia/v4.2.0/ibc-1/keyring-test/779c0556fb667f52d759b741f7b29a99c7f0f185.address b/ci/chains/gaia/v4.2.0/ibc-1/keyring-test/779c0556fb667f52d759b741f7b29a99c7f0f185.address deleted file mode 100644 index 84691d39e9..0000000000 --- a/ci/chains/gaia/v4.2.0/ibc-1/keyring-test/779c0556fb667f52d759b741f7b29a99c7f0f185.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wNi0xNCAwOToxMzoxNC4xMzAzNDcxNDIgLTA0MDAgRURUIG09KzAuMDY2Mjc3NDcwIiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiQVZSVGVOanVmYUFjUW5XVCJ9.Kg5WAienYLoK6Zqf1lD7EY0b03IIpC0R3a0YClewgyldQcbp_ISvYw.gw0rXmdDXySw1jip.OLke5XiNiCpKqtQLC2iAr_4Id4iusXIH_dzHGoirlw22nLt4OMsPbmVQeRMcNDrnK5VSlJXXFPvizqqlgJutwz4yFMTifJ4azGUzXUnYNaABe8r8Y2UfRQMLM5x4ySFS1Cb55C6eCF7DLVd_9Vv15VOeEEDkACZ-kzGOwbI2JkYpXqveklAf1PLICtIDuafYyW9Rtr59d14BTQDKlJGlQcA8bCuvQxewh_JDxvCOfP7oEUlzU1fNl517.d-yhzyHzDVy3jyaMqcaw7A \ No newline at end of file diff --git a/ci/chains/gaia/v4.2.0/ibc-1/keyring-test/a8319df11aceb1a5cdc3aab2e4cfc9bcb8f7838c.address b/ci/chains/gaia/v4.2.0/ibc-1/keyring-test/a8319df11aceb1a5cdc3aab2e4cfc9bcb8f7838c.address deleted file mode 100644 index 33b21e2606..0000000000 --- a/ci/chains/gaia/v4.2.0/ibc-1/keyring-test/a8319df11aceb1a5cdc3aab2e4cfc9bcb8f7838c.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wNi0xNCAwOToxMzoxNi4yOTAxMTQwMjEgLTA0MDAgRURUIG09KzAuMDY2ODQzNzY0IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiNWJMZWYwMkJLWXFiRDl0NyJ9.4EVMebPoQfn0yGMEFR9u-PTdO2UKdoFfY7vSwfILVDqiE2VHeezWRQ.KA3J7bJHbTxI9ASJ.0dFNohEbV4PD2tlow_N5knExrP3DHzcYkZ_g1LB-RIwXsZSD0Ywhh3YxgZF662YvGYYvaZ4IrFmVFJ149eRdI80i35m7uAoQfJdQdZlhGk-EwlElOUrO8ZFAQikvFd7u0z6hGSXKz1kpe2ATSvH1ExM23Py2OxursQ1r8ZF7ySCC1l3yK1wiT8Xhd2VWYX0svExcQ2LOTafewu8OmC49w3J8R2h-jMJLC_gnSn-7VYUB7d5zvkI.wnIdgljYgH8fQOBu3WCf_A \ No newline at end of file diff --git a/ci/chains/gaia/v4.2.0/ibc-1/keyring-test/user.info b/ci/chains/gaia/v4.2.0/ibc-1/keyring-test/user.info deleted file mode 100644 index ef17944c9b..0000000000 --- a/ci/chains/gaia/v4.2.0/ibc-1/keyring-test/user.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wNi0xNCAwOToxMzoxNS4yMDIxMTM2ODMgLTA0MDAgRURUIG09KzAuMDU4MTM4OTAzIiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiRWNGRFZaU1N4UEF3Y01JcSJ9.UsJ_17tC0mifeG3WCHDQMTF0R4pAgEUavyaUEyJpvh8llc0LepVMMg.t_Kpzpq-jsb-rGak.w1B31ctpUT04gDoqed4b5cp3Z7gR5JWqTfxFBvqJgS6Yi9vhjdkguvKp9L2MfefwpK6EYbUC3i0VpPLOUY51RfciA4Ap58WeQSYBDLlF6RG_lbKCmv6_FEJxR742J7CCYqKZhU7slGmYXZPORZWsv6Cp6Ba7aJatlTG1sqiU6394mdFHGQkNXAOTJTqSSA99ajaWm-0Qeazp1RZRY1K0mdjxd-JkT8MRiNPpLeh0DRnIGJ5n94z7afNTFZ2Fb2U2QbaN7LujJAjw1JHA7Nw5a0PQjUH02Wp5femiG8i5e41ME5oGvU-YuX801_-VafXzh0uzv6RfD6tq1i9dRJ-beGVQFKxLWL4.mcR8HWvCYlRnLLYW0wM2lg \ No newline at end of file diff --git a/ci/chains/gaia/v4.2.0/ibc-1/keyring-test/user2.info b/ci/chains/gaia/v4.2.0/ibc-1/keyring-test/user2.info deleted file mode 100644 index 59b1e80368..0000000000 --- a/ci/chains/gaia/v4.2.0/ibc-1/keyring-test/user2.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wNi0xNCAwOToxMzoxNi4yODMwODg5NzYgLTA0MDAgRURUIG09KzAuMDU5ODE4NzE1IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiRGNkNjNHWGZNLVktd1ppbiJ9.bieeQsp_9NDEx6PL5z15KR7HAm53spmwmylcayAcZyFXjHNqe1r_OQ.BnYcBQhGjPBav1MU.mVTHXLCelV1YPfZYm9848FR5STbC8Xm1kQjPgOWZ78aAa1VGbD6qsvlnTNhi7t1MIzPnMAy-O9sbsrtNFimuGe8PpuGD0pVeIwe_rE7w335lCFJ-y_GFeSRNBparwPFHpy08DEB8asozIIj_O3cVjcW1tUIcqLl-5XoePhKZZkchXUOYvDEquCEe3ZKj8PW-gFyEtjn4vK5JFRgj-LmA3T-bvHg4V7jPkdlITuecqwuHh_OxUX19jHtK6vm1YHA9V0FLyEMoX4l0G728UxxbS1xPB48oQNty4cIgLRmxnztCU52bSpEDq8W5VfcU5hNQpk0cL2lh8K7FyjStc79svZTSCIY7dINW.6SE8M1RrjHBghHh3iUDq0g \ No newline at end of file diff --git a/ci/chains/gaia/v4.2.0/ibc-1/keyring-test/validator.info b/ci/chains/gaia/v4.2.0/ibc-1/keyring-test/validator.info deleted file mode 100644 index 0e0d63b85a..0000000000 --- a/ci/chains/gaia/v4.2.0/ibc-1/keyring-test/validator.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wNi0xNCAwOToxMzoxNC4xMjMzMTE4NyAtMDQwMCBFRFQgbT0rMC4wNTkyNDIyMDYiLCJlbmMiOiJBMjU2R0NNIiwicDJjIjo4MTkyLCJwMnMiOiJOcmE2dTRqV0NWMVlLbnd4In0.JlwFC_IeZIvb554blz3jrw8-k1CiXyHh7cR71u9M47hpylHmh3mZFQ.dwgvcK0kSJu4TkYJ.TSxVfxFpGvPV4fPx-l7agEGjQ_dxsoRffPJG0YoAdKG8WA0qTKofGWoVgg5jyMuBfOFUMVAPXWhnP9Gr3pCfa4SldrEZ2OoXYYtB47R6XBpZBfAXsiyioj4580vfrmTFQfPUwocBYZFja9S8G2kmP_zpP3tv3mOs5q_16fQ5UxK-kI4_i69NzL_F32iYgo85tUAjkrTGR55xBP9uW3Ev5t7AoKdozR9zjSm8MBgAX5XsuIFGCue74NqSZiynSA3prDNDBJ2ATo4ccNDUmz12RwlkF-D7bIlNzKTCFvMqLrqgHqDKmCnrhd_YQhrZwjlLBQLzZAbMbLuaSS4cBjb_mTV406dbYBuygVR3iOxPAJhYjhwu.UjRwllvnVNPFbMDvsNno4A \ No newline at end of file diff --git a/ci/chains/gaia/v4.2.0/ibc-1/user2_seed.json b/ci/chains/gaia/v4.2.0/ibc-1/user2_seed.json deleted file mode 100644 index b175597f92..0000000000 --- a/ci/chains/gaia/v4.2.0/ibc-1/user2_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"user2","type":"local","address":"cosmos14qcemug6e6c6tnwr42ewfn7fhju00quv5uk6z3","pubkey":"cosmospub1addwnpepqdg569jjx96hyx46053u3ks6uk8lvnkh9xnqkdyevhxvqgfxuh2p52aw30k","mnemonic":"miracle index era tobacco birth submit powder echo bubble mandate chuckle knee jewel hurry session narrow believe cake boring cancel medal announce mad bronze"} diff --git a/ci/chains/gaia/v4.2.0/ibc-1/user_seed.json b/ci/chains/gaia/v4.2.0/ibc-1/user_seed.json deleted file mode 100644 index 7edbb11221..0000000000 --- a/ci/chains/gaia/v4.2.0/ibc-1/user_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"user","type":"local","address":"cosmos1fvfhg5hj208gackwmqch4zmwtg924em2xg55r2","pubkey":"cosmospub1addwnpepqfakz5yae5sawd6muhrczgr6mvdd0hqsdhc07c2ua70vxl4xcxwssw9dvya","mnemonic":"satoshi shield jungle victory danger amateur design push leisure current couch capable moral october glide design erase hen divorce venue devote fresh early exchange"} diff --git a/ci/chains/gaia/v4.2.0/ibc-1/validator_seed.json b/ci/chains/gaia/v4.2.0/ibc-1/validator_seed.json deleted file mode 100644 index 51cedcf7b2..0000000000 --- a/ci/chains/gaia/v4.2.0/ibc-1/validator_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"validator","type":"local","address":"cosmos1w7wq24hmvel4946ekaql0v56n8rlpuv9ns6ctv","pubkey":"cosmospub1addwnpepq2n787fehd9znl6nrsureekht63hcw5kpy0k80we87yr3d9ra76wz05qrfn","mnemonic":"dentist display eagle twin apology joke alpha timber bachelor quantum old enough extend average caught actual distance trouble helmet real raw illegal time slim"} diff --git a/ci/chains/gaia/v5.0.5/ibc-0/config/addrbook.json b/ci/chains/gaia/v5.0.5/ibc-0/config/addrbook.json deleted file mode 100644 index cce9f7f27c..0000000000 --- a/ci/chains/gaia/v5.0.5/ibc-0/config/addrbook.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "key": "d551bab9ec950b3c71733c80", - "addrs": [] -} \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.5/ibc-0/config/app.toml b/ci/chains/gaia/v5.0.5/ibc-0/config/app.toml deleted file mode 100644 index 54de73e392..0000000000 --- a/ci/chains/gaia/v5.0.5/ibc-0/config/app.toml +++ /dev/null @@ -1,152 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -############################################################################### -### Base Configuration ### -############################################################################### - -# The minimum gas prices a validator is willing to accept for processing a -# transaction. A transaction's fees must meet the minimum of any denomination -# specified in this config (e.g. 0.25token1;0.0001token2). -minimum-gas-prices = "" - -# default: the last 100 states are kept in addition to every 500th state; pruning at 10 block intervals -# nothing: all historic states will be saved, nothing will be deleted (i.e. archiving node) -# everything: all saved states will be deleted, storing only the current state; pruning at 10 block intervals -# custom: allow pruning options to be manually specified through 'pruning-keep-recent', 'pruning-keep-every', and 'pruning-interval' -pruning = "default" - -# These are applied if and only if the pruning strategy is custom. -pruning-keep-recent = "0" -pruning-keep-every = "0" -pruning-interval = "0" - -# HaltHeight contains a non-zero block height at which a node will gracefully -# halt and shutdown that can be used to assist upgrades and testing. -# -# Note: Commitment of state will be attempted on the corresponding block. -halt-height = 0 - -# HaltTime contains a non-zero minimum block time (in Unix seconds) at which -# a node will gracefully halt and shutdown that can be used to assist upgrades -# and testing. -# -# Note: Commitment of state will be attempted on the corresponding block. -halt-time = 0 - -# MinRetainBlocks defines the minimum block height offset from the current -# block being committed, such that all blocks past this offset are pruned -# from Tendermint. It is used as part of the process of determining the -# ResponseCommit.RetainHeight value during ABCI Commit. A value of 0 indicates -# that no blocks should be pruned. -# -# This configuration value is only responsible for pruning Tendermint blocks. -# It has no bearing on application state pruning which is determined by the -# "pruning-*" configurations. -# -# Note: Tendermint block pruning is dependant on this parameter in conunction -# with the unbonding (safety threshold) period, state pruning and state sync -# snapshot parameters to determine the correct minimum value of -# ResponseCommit.RetainHeight. -min-retain-blocks = 0 - -# InterBlockCache enables inter-block caching. -inter-block-cache = true - -# IndexEvents defines the set of events in the form {eventType}.{attributeKey}, -# which informs Tendermint what to index. If empty, all events will be indexed. -# -# Example: -# ["message.sender", "message.recipient"] -index-events = [] - -############################################################################### -### Telemetry Configuration ### -############################################################################### - -[telemetry] - -# Prefixed with keys to separate services. -service-name = "" - -# Enabled enables the application telemetry functionality. When enabled, -# an in-memory sink is also enabled by default. Operators may also enabled -# other sinks such as Prometheus. -enabled = false - -# Enable prefixing gauge values with hostname. -enable-hostname = false - -# Enable adding hostname to labels. -enable-hostname-label = false - -# Enable adding service to labels. -enable-service-label = false - -# PrometheusRetentionTime, when positive, enables a Prometheus metrics sink. -prometheus-retention-time = 0 - -# GlobalLabels defines a global set of name/value label tuples applied to all -# metrics emitted using the wrapper functions defined in telemetry package. -# -# Example: -# [["chain_id", "cosmoshub-1"]] -global-labels = [ -] - -############################################################################### -### API Configuration ### -############################################################################### - -[api] - -# Enable defines if the API server should be enabled. -enable = false - -# Swagger defines if swagger documentation should automatically be registered. -swagger = false - -# Address defines the API server to listen on. -address = "tcp://0.0.0.0:1317" - -# MaxOpenConnections defines the number of maximum open connections. -max-open-connections = 1000 - -# RPCReadTimeout defines the Tendermint RPC read timeout (in seconds). -rpc-read-timeout = 10 - -# RPCWriteTimeout defines the Tendermint RPC write timeout (in seconds). -rpc-write-timeout = 0 - -# RPCMaxBodyBytes defines the Tendermint maximum response body (in bytes). -rpc-max-body-bytes = 1000000 - -# EnableUnsafeCORS defines if CORS should be enabled (unsafe - use it at your own risk). -enabled-unsafe-cors = false - -############################################################################### -### gRPC Configuration ### -############################################################################### - -[grpc] - -# Enable defines if the gRPC server should be enabled. -enable = true - -# Address defines the gRPC server address to bind to. -address = "0.0.0.0:9090" - -############################################################################### -### State Sync Configuration ### -############################################################################### - -# State sync snapshots allow other nodes to rapidly join the network without replaying historical -# blocks, instead downloading and applying a snapshot of the application state at a given height. -[state-sync] - -# snapshot-interval specifies the block interval at which local state sync snapshots are -# taken (0 to disable). Must be a multiple of pruning-keep-every. -snapshot-interval = 0 - -# snapshot-keep-recent specifies the number of recent snapshots to keep and serve (0 to keep all). -snapshot-keep-recent = 2 diff --git a/ci/chains/gaia/v5.0.5/ibc-0/config/client.toml b/ci/chains/gaia/v5.0.5/ibc-0/config/client.toml deleted file mode 100644 index 222695a3f8..0000000000 --- a/ci/chains/gaia/v5.0.5/ibc-0/config/client.toml +++ /dev/null @@ -1,17 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -############################################################################### -### Client Configuration ### -############################################################################### - -# The network chain ID -chain-id = "" -# The keyring's backend, where the keys are stored (os|file|kwallet|pass|test|memory) -keyring-backend = "os" -# CLI output format (text|json) -output = "text" -# : to Tendermint RPC interface for this chain -node = "tcp://localhost:26657" -# Transaction broadcasting mode (sync|async|block) -broadcast-mode = "sync" diff --git a/ci/chains/gaia/v5.0.5/ibc-0/config/config.toml b/ci/chains/gaia/v5.0.5/ibc-0/config/config.toml deleted file mode 100644 index 071a9c4199..0000000000 --- a/ci/chains/gaia/v5.0.5/ibc-0/config/config.toml +++ /dev/null @@ -1,400 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -# NOTE: Any path below can be absolute (e.g. "/var/myawesomeapp/data") or -# relative to the home directory (e.g. "data"). The home directory is -# "$HOME/.tendermint" by default, but could be changed via $TMHOME env variable -# or --home cmd flag. - -####################################################################### -### Main Base Config Options ### -####################################################################### - -# TCP or UNIX socket address of the ABCI application, -# or the name of an ABCI application compiled in with the Tendermint binary -proxy_app = "tcp://127.0.0.1:26658" - -# A custom human readable name for this node -moniker = "ibc-0" - -# If this node is many blocks behind the tip of the chain, FastSync -# allows them to catchup quickly by downloading blocks in parallel -# and verifying their commits -fast_sync = true - -# Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb -# * goleveldb (github.com/syndtr/goleveldb - most popular implementation) -# - pure go -# - stable -# * cleveldb (uses levigo wrapper) -# - fast -# - requires gcc -# - use cleveldb build tag (go build -tags cleveldb) -# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt) -# - EXPERIMENTAL -# - may be faster is some use-cases (random reads - indexer) -# - use boltdb build tag (go build -tags boltdb) -# * rocksdb (uses github.com/tecbot/gorocksdb) -# - EXPERIMENTAL -# - requires gcc -# - use rocksdb build tag (go build -tags rocksdb) -# * badgerdb (uses github.com/dgraph-io/badger) -# - EXPERIMENTAL -# - use badgerdb build tag (go build -tags badgerdb) -db_backend = "goleveldb" - -# Database directory -db_dir = "data" - -# Output level for logging, including package level options -log_level = "info" - -# Output format: 'plain' (colored text) or 'json' -log_format = "plain" - -##### additional base config options ##### - -# Path to the JSON file containing the initial validator set and other meta data -genesis_file = "config/genesis.json" - -# Path to the JSON file containing the private key to use as a validator in the consensus protocol -priv_validator_key_file = "config/priv_validator_key.json" - -# Path to the JSON file containing the last sign state of a validator -priv_validator_state_file = "data/priv_validator_state.json" - -# TCP or UNIX socket address for Tendermint to listen on for -# connections from an external PrivValidator process -priv_validator_laddr = "" - -# Path to the JSON file containing the private key to use for node authentication in the p2p protocol -node_key_file = "config/node_key.json" - -# Mechanism to connect to the ABCI application: socket | grpc -abci = "socket" - -# If true, query the ABCI app on connecting to a new peer -# so the app can decide if we should keep the connection or not -filter_peers = false - - -####################################################################### -### Advanced Configuration Options ### -####################################################################### - -####################################################### -### RPC Server Configuration Options ### -####################################################### -[rpc] - -# TCP or UNIX socket address for the RPC server to listen on -laddr = "tcp://0.0.0.0:26657" - -# A list of origins a cross-domain request can be executed from -# Default value '[]' disables cors support -# Use '["*"]' to allow any origin -cors_allowed_origins = [] - -# A list of methods the client is allowed to use with cross-domain requests -cors_allowed_methods = ["HEAD", "GET", "POST", ] - -# A list of non simple headers the client is allowed to use with cross-domain requests -cors_allowed_headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ] - -# TCP or UNIX socket address for the gRPC server to listen on -# NOTE: This server only supports /broadcast_tx_commit -grpc_laddr = "" - -# Maximum number of simultaneous connections. -# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -grpc_max_open_connections = 900 - -# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool -unsafe = false - -# Maximum number of simultaneous connections (including WebSocket). -# Does not include gRPC connections. See grpc_max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -max_open_connections = 900 - -# Maximum number of unique clientIDs that can /subscribe -# If you're using /broadcast_tx_commit, set to the estimated maximum number -# of broadcast_tx_commit calls per block. -max_subscription_clients = 100 - -# Maximum number of unique queries a given client can /subscribe to -# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to -# the estimated # maximum number of broadcast_tx_commit calls per block. -max_subscriptions_per_client = 5 - -# How long to wait for a tx to be committed during /broadcast_tx_commit. -# WARNING: Using a value larger than 10s will result in increasing the -# global HTTP write timeout, which applies to all connections and endpoints. -# See https://github.com/tendermint/tendermint/issues/3435 -timeout_broadcast_tx_commit = "10s" - -# Maximum size of request body, in bytes -max_body_bytes = 1000000 - -# Maximum size of request header, in bytes -max_header_bytes = 1048576 - -# The path to a file containing certificate that is used to create the HTTPS server. -# Might be either absolute path or path related to Tendermint's config directory. -# If the certificate is signed by a certificate authority, -# the certFile should be the concatenation of the server's certificate, any intermediates, -# and the CA's certificate. -# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. -# Otherwise, HTTP server is run. -tls_cert_file = "" - -# The path to a file containing matching private key that is used to create the HTTPS server. -# Might be either absolute path or path related to Tendermint's config directory. -# NOTE: both tls-cert-file and tls-key-file must be present for Tendermint to create HTTPS server. -# Otherwise, HTTP server is run. -tls_key_file = "" - -# pprof listen address (https://golang.org/pkg/net/http/pprof) -pprof_laddr = "localhost:6060" - -####################################################### -### P2P Configuration Options ### -####################################################### -[p2p] - -# Address to listen for incoming connections -laddr = "tcp://0.0.0.0:26656" - -# Address to advertise to peers for them to dial -# If empty, will use the same port as the laddr, -# and will introspect on the listener or use UPnP -# to figure out the address. -external_address = "" - -# Comma separated list of seed nodes to connect to -seeds = "" - -# Comma separated list of nodes to keep persistent connections to -persistent_peers = "" - -# UPNP port forwarding -upnp = false - -# Path to address book -addr_book_file = "config/addrbook.json" - -# Set true for strict address routability rules -# Set false for private or local networks -addr_book_strict = true - -# Maximum number of inbound peers -max_num_inbound_peers = 40 - -# Maximum number of outbound peers to connect to, excluding persistent peers -max_num_outbound_peers = 10 - -# List of node IDs, to which a connection will be (re)established ignoring any existing limits -unconditional_peer_ids = "" - -# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) -persistent_peers_max_dial_period = "0s" - -# Time to wait before flushing messages out on the connection -flush_throttle_timeout = "100ms" - -# Maximum size of a message packet payload, in bytes -max_packet_msg_payload_size = 1024 - -# Rate at which packets can be sent, in bytes/second -send_rate = 5120000 - -# Rate at which packets can be received, in bytes/second -recv_rate = 5120000 - -# Set true to enable the peer-exchange reactor -pex = true - -# Seed mode, in which node constantly crawls the network and looks for -# peers. If another node asks it for addresses, it responds and disconnects. -# -# Does not work if the peer-exchange reactor is disabled. -seed_mode = false - -# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) -private_peer_ids = "" - -# Toggle to disable guard against peers connecting from the same ip. -allow_duplicate_ip = false - -# Peer connection configuration. -handshake_timeout = "20s" -dial_timeout = "3s" - -####################################################### -### Mempool Configuration Option ### -####################################################### -[mempool] - -recheck = true -broadcast = true -wal_dir = "" - -# Maximum number of transactions in the mempool -size = 5000 - -# Limit the total size of all txs in the mempool. -# This only accounts for raw transactions (e.g. given 1MB transactions and -# max_txs_bytes=5MB, mempool will only accept 5 transactions). -max_txs_bytes = 1073741824 - -# Size of the cache (used to filter transactions we saw earlier) in transactions -cache_size = 10000 - -# Do not remove invalid transactions from the cache (default: false) -# Set to true if it's not possible for any invalid transaction to become valid -# again in the future. -keep-invalid-txs-in-cache = false - -# Maximum size of a single transaction. -# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes}. -max_tx_bytes = 1048576 - -# Maximum size of a batch of transactions to send to a peer -# Including space needed by encoding (one varint per transaction). -# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796 -max_batch_bytes = 0 - -####################################################### -### State Sync Configuration Options ### -####################################################### -[statesync] -# State sync rapidly bootstraps a new node by discovering, fetching, and restoring a state machine -# snapshot from peers instead of fetching and replaying historical blocks. Requires some peers in -# the network to take and serve state machine snapshots. State sync is not attempted if the node -# has any local state (LastBlockHeight > 0). The node will have a truncated block history, -# starting from the height of the snapshot. -enable = false - -# RPC servers (comma-separated) for light client verification of the synced state machine and -# retrieval of state data for node bootstrapping. Also needs a trusted height and corresponding -# header hash obtained from a trusted source, and a period during which validators can be trusted. -# -# For Cosmos SDK-based chains, trust_period should usually be about 2/3 of the unbonding time (~2 -# weeks) during which they can be financially punished (slashed) for misbehavior. -rpc_servers = "" -trust_height = 0 -trust_hash = "" -trust_period = "168h0m0s" - -# Time to spend discovering snapshots before initiating a restore. -discovery_time = "15s" - -# Temporary directory for state sync snapshot chunks, defaults to the OS tempdir (typically /tmp). -# Will create a new, randomly named directory within, and remove it when done. -temp_dir = "" - -# The timeout duration before re-requesting a chunk, possibly from a different -# peer (default: 1 minute). -chunk_request_timeout = "10s" - -# The number of concurrent chunk fetchers to run (default: 1). -chunk_fetchers = "4" - -####################################################### -### Fast Sync Configuration Connections ### -####################################################### -[fastsync] - -# Fast Sync version to use: -# 1) "v0" (default) - the legacy fast sync implementation -# 2) "v1" - refactor of v0 version for better testability -# 2) "v2" - complete redesign of v0, optimized for testability & readability -version = "v0" - -####################################################### -### Consensus Configuration Options ### -####################################################### -[consensus] - -wal_file = "data/cs.wal/wal" - -# How long we wait for a proposal block before prevoting nil -timeout_propose = "1s" -# How much timeout_propose increases with each round -timeout_propose_delta = "500ms" -# How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil) -timeout_prevote = "1s" -# How much the timeout_prevote increases with each round -timeout_prevote_delta = "500ms" -# How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil) -timeout_precommit = "1s" -# How much the timeout_precommit increases with each round -timeout_precommit_delta = "500ms" -# How long we wait after committing a block, before starting on the new -# height (this gives us a chance to receive some more precommits, even -# though we already have +2/3). -timeout_commit = "1s" - -# How many blocks to look back to check existence of the node's consensus votes before joining consensus -# When non-zero, the node will panic upon restart -# if the same consensus key was used to sign {double_sign_check_height} last blocks. -# So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic. -double_sign_check_height = 0 - -# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) -skip_timeout_commit = false - -# EmptyBlocks mode and possible interval between empty blocks -create_empty_blocks = true -create_empty_blocks_interval = "0s" - -# Reactor sleep duration parameters -peer_gossip_sleep_duration = "100ms" -peer_query_maj23_sleep_duration = "2s" - -####################################################### -### Transaction Indexer Configuration Options ### -####################################################### -[tx_index] - -# What indexer to use for transactions -# -# The application will set which txs to index. In some cases a node operator will be able -# to decide which txs to index based on configuration set in the application. -# -# Options: -# 1) "null" -# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). -# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed. -indexer = "kv" - -####################################################### -### Instrumentation Configuration Options ### -####################################################### -[instrumentation] - -# When true, Prometheus metrics are served under /metrics on -# PrometheusListenAddr. -# Check out the documentation for the list of available metrics. -prometheus = false - -# Address to listen for Prometheus collector(s) connections -prometheus_listen_addr = ":26660" - -# Maximum number of simultaneous connections. -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -max_open_connections = 3 - -# Instrumentation namespace -namespace = "tendermint" diff --git a/ci/chains/gaia/v5.0.5/ibc-0/config/genesis.json b/ci/chains/gaia/v5.0.5/ibc-0/config/genesis.json deleted file mode 100644 index 0fd111e818..0000000000 --- a/ci/chains/gaia/v5.0.5/ibc-0/config/genesis.json +++ /dev/null @@ -1,342 +0,0 @@ -{ - "genesis_time": "2021-08-06T15:55:09.233573359Z", - "chain_id": "ibc-0", - "initial_height": "1", - "consensus_params": { - "block": { - "max_bytes": "22020096", - "max_gas": "-1", - "time_iota_ms": "1000" - }, - "evidence": { - "max_age_num_blocks": "100000", - "max_age_duration": "172800000000000", - "max_bytes": "1048576" - }, - "validator": { - "pub_key_types": [ - "ed25519" - ] - }, - "version": {} - }, - "app_hash": "", - "app_state": { - "auth": { - "params": { - "max_memo_characters": "256", - "tx_sig_limit": "7", - "tx_size_cost_per_byte": "10", - "sig_verify_cost_ed25519": "590", - "sig_verify_cost_secp256k1": "1000" - }, - "accounts": [ - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos18hxd9gnkkfp6du3479jrpw69s2ycjwpd5kwm3m", - "pub_key": null, - "account_number": "0", - "sequence": "0" - }, - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos1fk5g3yrm6resm26esff2a2t28t5hh9lwv65hdr", - "pub_key": null, - "account_number": "0", - "sequence": "0" - }, - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos1m9h2rkke0h2qw5wpmr7d2a7nanxg8prankvar9", - "pub_key": null, - "account_number": "0", - "sequence": "0" - } - ] - }, - "bank": { - "params": { - "send_enabled": [], - "default_send_enabled": true - }, - "balances": [ - { - "address": "cosmos18hxd9gnkkfp6du3479jrpw69s2ycjwpd5kwm3m", - "coins": [ - { - "denom": "samoleans", - "amount": "100000000000" - }, - { - "denom": "stake", - "amount": "100000000000" - } - ] - }, - { - "address": "cosmos1fk5g3yrm6resm26esff2a2t28t5hh9lwv65hdr", - "coins": [ - { - "denom": "samoleans", - "amount": "100000000000" - }, - { - "denom": "stake", - "amount": "100000000000" - } - ] - }, - { - "address": "cosmos1m9h2rkke0h2qw5wpmr7d2a7nanxg8prankvar9", - "coins": [ - { - "denom": "stake", - "amount": "100000000000" - } - ] - } - ], - "supply": [ - { - "denom": "samoleans", - "amount": "200000000000" - }, - { - "denom": "stake", - "amount": "300000000000" - } - ], - "denom_metadata": [] - }, - "capability": { - "index": "1", - "owners": [] - }, - "crisis": { - "constant_fee": { - "denom": "stake", - "amount": "1000" - } - }, - "distribution": { - "params": { - "community_tax": "0.020000000000000000", - "base_proposer_reward": "0.010000000000000000", - "bonus_proposer_reward": "0.040000000000000000", - "withdraw_addr_enabled": true - }, - "fee_pool": { - "community_pool": [] - }, - "delegator_withdraw_infos": [], - "previous_proposer": "", - "outstanding_rewards": [], - "validator_accumulated_commissions": [], - "validator_historical_rewards": [], - "validator_current_rewards": [], - "delegator_starting_infos": [], - "validator_slash_events": [] - }, - "evidence": { - "evidence": [] - }, - "genutil": { - "gen_txs": [ - { - "body": { - "messages": [ - { - "@type": "/cosmos.staking.v1beta1.MsgCreateValidator", - "description": { - "moniker": "ibc-0", - "identity": "", - "website": "", - "security_contact": "", - "details": "" - }, - "commission": { - "rate": "0.100000000000000000", - "max_rate": "0.200000000000000000", - "max_change_rate": "0.010000000000000000" - }, - "min_self_delegation": "1", - "delegator_address": "cosmos1m9h2rkke0h2qw5wpmr7d2a7nanxg8prankvar9", - "validator_address": "cosmosvaloper1m9h2rkke0h2qw5wpmr7d2a7nanxg8prakzcg0k", - "pubkey": { - "@type": "/cosmos.crypto.ed25519.PubKey", - "key": "UWzLnjUlTmMPHJISY+GWB5I4wJKRjMGzNf/o0rB50vo=" - }, - "value": { - "denom": "stake", - "amount": "100000000000" - } - } - ], - "memo": "0a67e76f6743997bde26b522ef43e886b1baa95d@192.168.50.214:26656", - "timeout_height": "0", - "extension_options": [], - "non_critical_extension_options": [] - }, - "auth_info": { - "signer_infos": [ - { - "public_key": { - "@type": "/cosmos.crypto.secp256k1.PubKey", - "key": "A8om4wMSv7tTzlhjxv9wF/Pc4SZTZh1tb6kqRF4OOBJp" - }, - "mode_info": { - "single": { - "mode": "SIGN_MODE_DIRECT" - } - }, - "sequence": "0" - } - ], - "fee": { - "amount": [], - "gas_limit": "200000", - "payer": "", - "granter": "" - } - }, - "signatures": [ - "dbccwhF7NQmGAHDr4+5JiVlNL/KqevVYSFCIGlSqYWZ9QOSxSJ+J40pzGWsaAyeVpo91xnQkl8CpuTofBzfXkQ==" - ] - } - ] - }, - "gov": { - "starting_proposal_id": "1", - "deposits": [], - "votes": [], - "proposals": [], - "deposit_params": { - "min_deposit": [ - { - "denom": "stake", - "amount": "10000000" - } - ], - "max_deposit_period": "200s" - }, - "voting_params": { - "voting_period": "200s" - }, - "tally_params": { - "quorum": "0.334000000000000000", - "threshold": "0.500000000000000000", - "veto_threshold": "0.334000000000000000" - } - }, - "ibc": { - "client_genesis": { - "clients": [], - "clients_consensus": [], - "clients_metadata": [], - "params": { - "allowed_clients": [ - "06-solomachine", - "07-tendermint" - ] - }, - "create_localhost": false, - "next_client_sequence": "0" - }, - "connection_genesis": { - "connections": [], - "client_connection_paths": [], - "next_connection_sequence": "0" - }, - "channel_genesis": { - "channels": [], - "acknowledgements": [], - "commitments": [], - "receipts": [], - "send_sequences": [], - "recv_sequences": [], - "ack_sequences": [], - "next_channel_sequence": "0" - } - }, - "liquidity": { - "params": { - "pool_types": [ - { - "id": 1, - "name": "StandardLiquidityPool", - "min_reserve_coin_num": 2, - "max_reserve_coin_num": 2, - "description": "Standard liquidity pool with pool price function X/Y, ESPM constraint, and two kinds of reserve coins" - } - ], - "min_init_deposit_amount": "1000000", - "init_pool_coin_mint_amount": "1000000", - "max_reserve_coin_amount": "0", - "pool_creation_fee": [ - { - "denom": "stake", - "amount": "40000000" - } - ], - "swap_fee_rate": "0.003000000000000000", - "withdraw_fee_rate": "0.000000000000000000", - "max_order_amount_ratio": "0.100000000000000000", - "unit_batch_height": 1, - "circuit_breaker_enabled": false - }, - "pool_records": [] - }, - "mint": { - "minter": { - "inflation": "0.130000000000000000", - "annual_provisions": "0.000000000000000000" - }, - "params": { - "mint_denom": "stake", - "inflation_rate_change": "0.130000000000000000", - "inflation_max": "0.200000000000000000", - "inflation_min": "0.070000000000000000", - "goal_bonded": "0.670000000000000000", - "blocks_per_year": "6311520" - } - }, - "params": null, - "slashing": { - "params": { - "signed_blocks_window": "100", - "min_signed_per_window": "0.500000000000000000", - "downtime_jail_duration": "600s", - "slash_fraction_double_sign": "0.050000000000000000", - "slash_fraction_downtime": "0.010000000000000000" - }, - "signing_infos": [], - "missed_blocks": [] - }, - "staking": { - "params": { - "unbonding_time": "1814400s", - "max_validators": 100, - "max_entries": 7, - "historical_entries": 10000, - "bond_denom": "stake" - }, - "last_total_power": "0", - "last_validator_powers": [], - "validators": [], - "delegations": [], - "unbonding_delegations": [], - "redelegations": [], - "exported": false - }, - "transfer": { - "port_id": "transfer", - "denom_traces": [], - "params": { - "send_enabled": true, - "receive_enabled": true - } - }, - "upgrade": {}, - "vesting": {} - } -} \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.5/ibc-0/config/gentx/gentx-0a67e76f6743997bde26b522ef43e886b1baa95d.json b/ci/chains/gaia/v5.0.5/ibc-0/config/gentx/gentx-0a67e76f6743997bde26b522ef43e886b1baa95d.json deleted file mode 100644 index 36cc8ac3d0..0000000000 --- a/ci/chains/gaia/v5.0.5/ibc-0/config/gentx/gentx-0a67e76f6743997bde26b522ef43e886b1baa95d.json +++ /dev/null @@ -1 +0,0 @@ -{"body":{"messages":[{"@type":"/cosmos.staking.v1beta1.MsgCreateValidator","description":{"moniker":"ibc-0","identity":"","website":"","security_contact":"","details":""},"commission":{"rate":"0.100000000000000000","max_rate":"0.200000000000000000","max_change_rate":"0.010000000000000000"},"min_self_delegation":"1","delegator_address":"cosmos1m9h2rkke0h2qw5wpmr7d2a7nanxg8prankvar9","validator_address":"cosmosvaloper1m9h2rkke0h2qw5wpmr7d2a7nanxg8prakzcg0k","pubkey":{"@type":"/cosmos.crypto.ed25519.PubKey","key":"UWzLnjUlTmMPHJISY+GWB5I4wJKRjMGzNf/o0rB50vo="},"value":{"denom":"stake","amount":"100000000000"}}],"memo":"0a67e76f6743997bde26b522ef43e886b1baa95d@192.168.50.214:26656","timeout_height":"0","extension_options":[],"non_critical_extension_options":[]},"auth_info":{"signer_infos":[{"public_key":{"@type":"/cosmos.crypto.secp256k1.PubKey","key":"A8om4wMSv7tTzlhjxv9wF/Pc4SZTZh1tb6kqRF4OOBJp"},"mode_info":{"single":{"mode":"SIGN_MODE_DIRECT"}},"sequence":"0"}],"fee":{"amount":[],"gas_limit":"200000","payer":"","granter":""}},"signatures":["dbccwhF7NQmGAHDr4+5JiVlNL/KqevVYSFCIGlSqYWZ9QOSxSJ+J40pzGWsaAyeVpo91xnQkl8CpuTofBzfXkQ=="]} diff --git a/ci/chains/gaia/v5.0.5/ibc-0/config/node_key.json b/ci/chains/gaia/v5.0.5/ibc-0/config/node_key.json deleted file mode 100644 index 1ad4894060..0000000000 --- a/ci/chains/gaia/v5.0.5/ibc-0/config/node_key.json +++ /dev/null @@ -1 +0,0 @@ -{"priv_key":{"type":"tendermint/PrivKeyEd25519","value":"XSr6UzSuk0yJGLPr8HNJmOvR0Ll4lXd0Q1f3JwCTsarMfTCiPkwdB7XuIaUYZ+EqPMQlGswfcX1MsmQnwT7tiA=="}} \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.5/ibc-0/config/priv_validator_key.json b/ci/chains/gaia/v5.0.5/ibc-0/config/priv_validator_key.json deleted file mode 100644 index 5acf485cbc..0000000000 --- a/ci/chains/gaia/v5.0.5/ibc-0/config/priv_validator_key.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "address": "3CE3FE24E325D665404E83E08F52B6F690F53696", - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "UWzLnjUlTmMPHJISY+GWB5I4wJKRjMGzNf/o0rB50vo=" - }, - "priv_key": { - "type": "tendermint/PrivKeyEd25519", - "value": "aHKU0D6vWVoOY/yGbG99aTwunQw/OLIWSXq+RnmLihRRbMueNSVOYw8ckhJj4ZYHkjjAkpGMwbM1/+jSsHnS+g==" - } -} \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.5/ibc-0/keyring-test/3dccd2a276b243a6f235f16430bb45828989382d.address b/ci/chains/gaia/v5.0.5/ibc-0/keyring-test/3dccd2a276b243a6f235f16430bb45828989382d.address deleted file mode 100644 index 0ee685e76d..0000000000 --- a/ci/chains/gaia/v5.0.5/ibc-0/keyring-test/3dccd2a276b243a6f235f16430bb45828989382d.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wOC0wNiAxMTo1NToxMS40MDg4OTA0OTQgLTA0MDAgRURUIG09KzAuMDcyNDk0NTU0IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiVDJ0N0FLQlU3M1dFbU9vVSJ9.c_tFqk0Us6om3ARhb75Nc6Sbfr3pKIoAMN9iFbMTnxZClTMEjWQlrw.p3d8hSE0wlkjQJFq.TVz3yHFFHaaYbe2Tvxm2ZjFXHZchdMSNmOwqQpOx9kEA7E-lsDJtMTVZ67CHca6hONRQNN1qNGwmnc_qGt4EsFFkLSs8FoP4CbhZbpeiFP8hCGifaJeB3V-7rB1QqaCHXv6bWjo22J8R4Ab2FYSsmIzkVDsbJ0bwF6X1zvy-HK31DaX-7pXiZ_ZpHPLPulbNkuQOuw1Ko60pn7gddsd2KtVb8XGdfF2AwPbwm1JwJat1dA.6gvhCUm2fYsEaC83zEaArw \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.5/ibc-0/keyring-test/4da888907bd0f30dab598252aea96a3ae97b97ee.address b/ci/chains/gaia/v5.0.5/ibc-0/keyring-test/4da888907bd0f30dab598252aea96a3ae97b97ee.address deleted file mode 100644 index 10aca78a47..0000000000 --- a/ci/chains/gaia/v5.0.5/ibc-0/keyring-test/4da888907bd0f30dab598252aea96a3ae97b97ee.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wOC0wNiAxMTo1NToxMi40OTEwMDU5NTYgLTA0MDAgRURUIG09KzAuMDY4MTU1MzM3IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiV1FRdjdLTVJZTlh4NF9kZyJ9.sYBSSj4WLgksCMvTmINlA29D5xOghK-f2MAW1jFdVdIRhjKJ8pvFVw.zVwXJihE_WugBihe.ZYweIxXQ4BfXomdUiYjrNsmBn4N1J2SqHeDTC21Od6oy-tjq5xOMxKSDCqbPl9828txXEB9eCOzCkDMQkcEz7dYWGjQcBkGsln9JefEK_xchdSmeK3pfy2hVpQvqtt0OO8EsDlcIsm6gGloTq7ACJ9vJCtTiP2H0gTdNTqmX5ERRvy6KCVypZSCvPg7znXCTeW7vG8x7be9VA33ZT6KJIuphk91EamFFYOqJPQ9konae3u9OKP4.9gG1lM3T0U-TEVvH1NHF6A \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.5/ibc-0/keyring-test/d96ea1dad97dd40751c1d8fcd577d3eccc83847d.address b/ci/chains/gaia/v5.0.5/ibc-0/keyring-test/d96ea1dad97dd40751c1d8fcd577d3eccc83847d.address deleted file mode 100644 index d97809d0a3..0000000000 --- a/ci/chains/gaia/v5.0.5/ibc-0/keyring-test/d96ea1dad97dd40751c1d8fcd577d3eccc83847d.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wOC0wNiAxMTo1NToxMC4zMTkyMjcyMzYgLTA0MDAgRURUIG09KzAuMDc2Mjg1MTUwIiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiZnZiVzhmdERKcTJWdEdseiJ9.5XAKEXhLRZMMdbEJtqUUTcXqwkxICMQUfYnKdzyUmO6oItkgvgSAdw._w8dTWEfuIPQW82G._kamePXu2Pkx3reyUjSK1Abw4gn199CXbP8YHU1Zur5cu4P9tVIen3-tu0VJ9ou8sA-QUVdvcG3_kVxIu1mvcUdgQOGJidA6dndOuPevkFDWAvvtnUQaTKBzdiQaM3e7arGMgjXvFJlDM0IrCjmBBHiBCG7_kx-VLF-FC4D9OvypINRk-d-HVCpAOHfyrdMj08EcpRvpFQu0Q7WcEaZsl8Wl0u-TdQslkVZupPXSNfT79qxg1LwLvKzY.UA2-tzWmzhuGS83LLNFTJQ \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.5/ibc-0/keyring-test/user.info b/ci/chains/gaia/v5.0.5/ibc-0/keyring-test/user.info deleted file mode 100644 index 6378434dba..0000000000 --- a/ci/chains/gaia/v5.0.5/ibc-0/keyring-test/user.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wOC0wNiAxMTo1NToxMS40MDI1NjU5NCAtMDQwMCBFRFQgbT0rMC4wNjYxNjk5OTYiLCJlbmMiOiJBMjU2R0NNIiwicDJjIjo4MTkyLCJwMnMiOiJFam1uQmRUT3VHOVJFdGRuIn0.5BCtBSgTthznCJgVzxwvR6LV8-AsVAfkKJgaR3MahCuhcKjgM6H7ig.qO-A9NhUkrc3OcZM.lHVVATkQuFo34HYnmBu5r5uOeCt9FfYpArvr4SEwaltfzucjVPkunpNTG6VEuVkM-QNo86pwTi-nx3K-QydzkCpcjYM8vn-R-HuROwKp2yDgRdNr-QK9XLoYJqL0rrDabpwxJnyghuth0D6iJOlTkZyGn6Gkl7GkF3ewI7ny9qKwdTwv5AkbNI4UmN1cEDikFUgJ-EdtvjZB6iy3DsBuacXQspNZLw4IsVNS6G5y3ODBomdt4VrBlsYmfKVO_UFlmnuAhLHUbRw6pCibQgWhWll1IoC0zx491fqoNX049gFfJN2uDcCX7vy5svG2__4omQvBdCr5QtRwsvCFZPChNf0vuLmNLhg.di2QoHAwspqXNXe0Kwzm1Q \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.5/ibc-0/keyring-test/user2.info b/ci/chains/gaia/v5.0.5/ibc-0/keyring-test/user2.info deleted file mode 100644 index fde543a535..0000000000 --- a/ci/chains/gaia/v5.0.5/ibc-0/keyring-test/user2.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wOC0wNiAxMTo1NToxMi40ODQ2OTQ0NTggLTA0MDAgRURUIG09KzAuMDYxODQzODI1IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiTE9BMXRvanItQ0lsZHZjdSJ9.QvxZZ5KouVuuw39bKORfHDJRGSep8hdZQyuP-Xuvxu2pmfYYQ_1YYQ.W7y3-VIQfHW6SsWs.WlSMpDVl0Ej8pVq2HpoxiyEVZSdWygRWFHSM-zteFePNLjYUymhtg3jGx8s3jX5d6NwtX0PxmPVYYClLNMKWDHJH9GC5AfZgUCoCPeya0VkmOc9V9u8hBVBMDisnDCkE5IRlQzi5bd_hf-AJH_8vGNHvqsI9mtoeLF-BNAFTma_MUUd5wJNSxZYL3rsnhPmzBvNklAf3noCKQmmnIpNenMP23BwMSyki6y3pbYyJrERuo7noqHJ749vL2QG7AuogM70FUfiAABJXkL3EVZX7lOXJLxx6GCOlWMTLrAQE9bzxbCSYDISlmEfQgjhRgKPzGRqb8um98nXN7aa0eYGWL5OEiN7RBPTd.Tq-m5jicmGvDOuUNI2B_IQ \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.5/ibc-0/keyring-test/validator.info b/ci/chains/gaia/v5.0.5/ibc-0/keyring-test/validator.info deleted file mode 100644 index 3f1a7adf11..0000000000 --- a/ci/chains/gaia/v5.0.5/ibc-0/keyring-test/validator.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wOC0wNiAxMTo1NToxMC4zMTAwNzkyNTEgLTA0MDAgRURUIG09KzAuMDY3MTM3MTYyIiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiNXZDekQ5cFBPbHQ3T3E1ciJ9.LQcsbodiuLHFDBxBMqGqec2nsjt3GFMeV4ePzX4YyGJz7bUyoCw3Mw.m4YUYOicLq53eixI.dPpXWczIiJnumz44yxHMkaU1-tYYjPsN9Sn_9PC_c8T3mDXPWCLWdaywYif8BP1jpQ__tF8iHh6HiOO9r6yp3mH6SGodkj2ey3FwBQwBwQ0L7N1sx6D4ASCaBaAcsjsc2aHew427LyDJhpMoW6luf4XzzzJsCRN0QXBWtpdffE23JAkahgIgrHARAKxYyqRkqRnesItEjJZXwjobm_ENEcyaUPdg7N5Ys3VDEp55FySa7shyBABprk4BdSu8N9ivaz6nk0sjvIzEDuINMCg9JdneErkIyr33mebIhAzaK3Aw6i0Umg1BTYvruai2eMp9ryh8nryMVrVDhBnf8DHaEkfiKUDPrP0rvzgAyjpWAleKrrb1.w1S63MsoNXdNVFgRuo0DCw \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.5/ibc-0/user2_seed.json b/ci/chains/gaia/v5.0.5/ibc-0/user2_seed.json deleted file mode 100644 index f14517d186..0000000000 --- a/ci/chains/gaia/v5.0.5/ibc-0/user2_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"user2","type":"local","address":"cosmos1fk5g3yrm6resm26esff2a2t28t5hh9lwv65hdr","pubkey":"cosmospub1addwnpepqgssups8ywx8rp2h85xt0y7cfunpeadzwh9r5ss9w6awask3q257zld37vn","mnemonic":"dynamic life add hill bundle hold tone square damage library wrist news used text circle imitate walnut control curious cactus canal employ load half"} diff --git a/ci/chains/gaia/v5.0.5/ibc-0/user_seed.json b/ci/chains/gaia/v5.0.5/ibc-0/user_seed.json deleted file mode 100644 index bcba356514..0000000000 --- a/ci/chains/gaia/v5.0.5/ibc-0/user_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"user","type":"local","address":"cosmos18hxd9gnkkfp6du3479jrpw69s2ycjwpd5kwm3m","pubkey":"cosmospub1addwnpepqd4gesy8r0wyvay4a2m3x34xlhnds853tnhal0mwnzxdmlls3lukk4gwgjs","mnemonic":"regret area gas slab game feel corn picture click match myself flip fragile shuffle field toddler rack cycle shield blouse cup prefer depart picnic"} diff --git a/ci/chains/gaia/v5.0.5/ibc-0/validator_seed.json b/ci/chains/gaia/v5.0.5/ibc-0/validator_seed.json deleted file mode 100644 index f816aa20ac..0000000000 --- a/ci/chains/gaia/v5.0.5/ibc-0/validator_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"validator","type":"local","address":"cosmos1m9h2rkke0h2qw5wpmr7d2a7nanxg8prankvar9","pubkey":"cosmospub1addwnpepq09zdccrz2lmk57wtp3udlmszleaecfx2dnp6mt04y4yghsw8qfxju3rfl3","mnemonic":"robust nerve resist tip spoil grid poem invest unfold wrong helmet change poverty network popular strategy vague consider valid shrug salad health flush staff"} diff --git a/ci/chains/gaia/v5.0.5/ibc-1/config/app.toml b/ci/chains/gaia/v5.0.5/ibc-1/config/app.toml deleted file mode 100644 index 54de73e392..0000000000 --- a/ci/chains/gaia/v5.0.5/ibc-1/config/app.toml +++ /dev/null @@ -1,152 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -############################################################################### -### Base Configuration ### -############################################################################### - -# The minimum gas prices a validator is willing to accept for processing a -# transaction. A transaction's fees must meet the minimum of any denomination -# specified in this config (e.g. 0.25token1;0.0001token2). -minimum-gas-prices = "" - -# default: the last 100 states are kept in addition to every 500th state; pruning at 10 block intervals -# nothing: all historic states will be saved, nothing will be deleted (i.e. archiving node) -# everything: all saved states will be deleted, storing only the current state; pruning at 10 block intervals -# custom: allow pruning options to be manually specified through 'pruning-keep-recent', 'pruning-keep-every', and 'pruning-interval' -pruning = "default" - -# These are applied if and only if the pruning strategy is custom. -pruning-keep-recent = "0" -pruning-keep-every = "0" -pruning-interval = "0" - -# HaltHeight contains a non-zero block height at which a node will gracefully -# halt and shutdown that can be used to assist upgrades and testing. -# -# Note: Commitment of state will be attempted on the corresponding block. -halt-height = 0 - -# HaltTime contains a non-zero minimum block time (in Unix seconds) at which -# a node will gracefully halt and shutdown that can be used to assist upgrades -# and testing. -# -# Note: Commitment of state will be attempted on the corresponding block. -halt-time = 0 - -# MinRetainBlocks defines the minimum block height offset from the current -# block being committed, such that all blocks past this offset are pruned -# from Tendermint. It is used as part of the process of determining the -# ResponseCommit.RetainHeight value during ABCI Commit. A value of 0 indicates -# that no blocks should be pruned. -# -# This configuration value is only responsible for pruning Tendermint blocks. -# It has no bearing on application state pruning which is determined by the -# "pruning-*" configurations. -# -# Note: Tendermint block pruning is dependant on this parameter in conunction -# with the unbonding (safety threshold) period, state pruning and state sync -# snapshot parameters to determine the correct minimum value of -# ResponseCommit.RetainHeight. -min-retain-blocks = 0 - -# InterBlockCache enables inter-block caching. -inter-block-cache = true - -# IndexEvents defines the set of events in the form {eventType}.{attributeKey}, -# which informs Tendermint what to index. If empty, all events will be indexed. -# -# Example: -# ["message.sender", "message.recipient"] -index-events = [] - -############################################################################### -### Telemetry Configuration ### -############################################################################### - -[telemetry] - -# Prefixed with keys to separate services. -service-name = "" - -# Enabled enables the application telemetry functionality. When enabled, -# an in-memory sink is also enabled by default. Operators may also enabled -# other sinks such as Prometheus. -enabled = false - -# Enable prefixing gauge values with hostname. -enable-hostname = false - -# Enable adding hostname to labels. -enable-hostname-label = false - -# Enable adding service to labels. -enable-service-label = false - -# PrometheusRetentionTime, when positive, enables a Prometheus metrics sink. -prometheus-retention-time = 0 - -# GlobalLabels defines a global set of name/value label tuples applied to all -# metrics emitted using the wrapper functions defined in telemetry package. -# -# Example: -# [["chain_id", "cosmoshub-1"]] -global-labels = [ -] - -############################################################################### -### API Configuration ### -############################################################################### - -[api] - -# Enable defines if the API server should be enabled. -enable = false - -# Swagger defines if swagger documentation should automatically be registered. -swagger = false - -# Address defines the API server to listen on. -address = "tcp://0.0.0.0:1317" - -# MaxOpenConnections defines the number of maximum open connections. -max-open-connections = 1000 - -# RPCReadTimeout defines the Tendermint RPC read timeout (in seconds). -rpc-read-timeout = 10 - -# RPCWriteTimeout defines the Tendermint RPC write timeout (in seconds). -rpc-write-timeout = 0 - -# RPCMaxBodyBytes defines the Tendermint maximum response body (in bytes). -rpc-max-body-bytes = 1000000 - -# EnableUnsafeCORS defines if CORS should be enabled (unsafe - use it at your own risk). -enabled-unsafe-cors = false - -############################################################################### -### gRPC Configuration ### -############################################################################### - -[grpc] - -# Enable defines if the gRPC server should be enabled. -enable = true - -# Address defines the gRPC server address to bind to. -address = "0.0.0.0:9090" - -############################################################################### -### State Sync Configuration ### -############################################################################### - -# State sync snapshots allow other nodes to rapidly join the network without replaying historical -# blocks, instead downloading and applying a snapshot of the application state at a given height. -[state-sync] - -# snapshot-interval specifies the block interval at which local state sync snapshots are -# taken (0 to disable). Must be a multiple of pruning-keep-every. -snapshot-interval = 0 - -# snapshot-keep-recent specifies the number of recent snapshots to keep and serve (0 to keep all). -snapshot-keep-recent = 2 diff --git a/ci/chains/gaia/v5.0.5/ibc-1/config/client.toml b/ci/chains/gaia/v5.0.5/ibc-1/config/client.toml deleted file mode 100644 index 222695a3f8..0000000000 --- a/ci/chains/gaia/v5.0.5/ibc-1/config/client.toml +++ /dev/null @@ -1,17 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -############################################################################### -### Client Configuration ### -############################################################################### - -# The network chain ID -chain-id = "" -# The keyring's backend, where the keys are stored (os|file|kwallet|pass|test|memory) -keyring-backend = "os" -# CLI output format (text|json) -output = "text" -# : to Tendermint RPC interface for this chain -node = "tcp://localhost:26657" -# Transaction broadcasting mode (sync|async|block) -broadcast-mode = "sync" diff --git a/ci/chains/gaia/v5.0.5/ibc-1/config/config.toml b/ci/chains/gaia/v5.0.5/ibc-1/config/config.toml deleted file mode 100644 index bef1cae571..0000000000 --- a/ci/chains/gaia/v5.0.5/ibc-1/config/config.toml +++ /dev/null @@ -1,400 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -# NOTE: Any path below can be absolute (e.g. "/var/myawesomeapp/data") or -# relative to the home directory (e.g. "data"). The home directory is -# "$HOME/.tendermint" by default, but could be changed via $TMHOME env variable -# or --home cmd flag. - -####################################################################### -### Main Base Config Options ### -####################################################################### - -# TCP or UNIX socket address of the ABCI application, -# or the name of an ABCI application compiled in with the Tendermint binary -proxy_app = "tcp://127.0.0.1:26658" - -# A custom human readable name for this node -moniker = "ibc-1" - -# If this node is many blocks behind the tip of the chain, FastSync -# allows them to catchup quickly by downloading blocks in parallel -# and verifying their commits -fast_sync = true - -# Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb -# * goleveldb (github.com/syndtr/goleveldb - most popular implementation) -# - pure go -# - stable -# * cleveldb (uses levigo wrapper) -# - fast -# - requires gcc -# - use cleveldb build tag (go build -tags cleveldb) -# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt) -# - EXPERIMENTAL -# - may be faster is some use-cases (random reads - indexer) -# - use boltdb build tag (go build -tags boltdb) -# * rocksdb (uses github.com/tecbot/gorocksdb) -# - EXPERIMENTAL -# - requires gcc -# - use rocksdb build tag (go build -tags rocksdb) -# * badgerdb (uses github.com/dgraph-io/badger) -# - EXPERIMENTAL -# - use badgerdb build tag (go build -tags badgerdb) -db_backend = "goleveldb" - -# Database directory -db_dir = "data" - -# Output level for logging, including package level options -log_level = "info" - -# Output format: 'plain' (colored text) or 'json' -log_format = "plain" - -##### additional base config options ##### - -# Path to the JSON file containing the initial validator set and other meta data -genesis_file = "config/genesis.json" - -# Path to the JSON file containing the private key to use as a validator in the consensus protocol -priv_validator_key_file = "config/priv_validator_key.json" - -# Path to the JSON file containing the last sign state of a validator -priv_validator_state_file = "data/priv_validator_state.json" - -# TCP or UNIX socket address for Tendermint to listen on for -# connections from an external PrivValidator process -priv_validator_laddr = "" - -# Path to the JSON file containing the private key to use for node authentication in the p2p protocol -node_key_file = "config/node_key.json" - -# Mechanism to connect to the ABCI application: socket | grpc -abci = "socket" - -# If true, query the ABCI app on connecting to a new peer -# so the app can decide if we should keep the connection or not -filter_peers = false - - -####################################################################### -### Advanced Configuration Options ### -####################################################################### - -####################################################### -### RPC Server Configuration Options ### -####################################################### -[rpc] - -# TCP or UNIX socket address for the RPC server to listen on -laddr = "tcp://0.0.0.0:26657" - -# A list of origins a cross-domain request can be executed from -# Default value '[]' disables cors support -# Use '["*"]' to allow any origin -cors_allowed_origins = [] - -# A list of methods the client is allowed to use with cross-domain requests -cors_allowed_methods = ["HEAD", "GET", "POST", ] - -# A list of non simple headers the client is allowed to use with cross-domain requests -cors_allowed_headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ] - -# TCP or UNIX socket address for the gRPC server to listen on -# NOTE: This server only supports /broadcast_tx_commit -grpc_laddr = "" - -# Maximum number of simultaneous connections. -# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -grpc_max_open_connections = 900 - -# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool -unsafe = false - -# Maximum number of simultaneous connections (including WebSocket). -# Does not include gRPC connections. See grpc_max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -max_open_connections = 900 - -# Maximum number of unique clientIDs that can /subscribe -# If you're using /broadcast_tx_commit, set to the estimated maximum number -# of broadcast_tx_commit calls per block. -max_subscription_clients = 100 - -# Maximum number of unique queries a given client can /subscribe to -# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to -# the estimated # maximum number of broadcast_tx_commit calls per block. -max_subscriptions_per_client = 5 - -# How long to wait for a tx to be committed during /broadcast_tx_commit. -# WARNING: Using a value larger than 10s will result in increasing the -# global HTTP write timeout, which applies to all connections and endpoints. -# See https://github.com/tendermint/tendermint/issues/3435 -timeout_broadcast_tx_commit = "10s" - -# Maximum size of request body, in bytes -max_body_bytes = 1000000 - -# Maximum size of request header, in bytes -max_header_bytes = 1048576 - -# The path to a file containing certificate that is used to create the HTTPS server. -# Might be either absolute path or path related to Tendermint's config directory. -# If the certificate is signed by a certificate authority, -# the certFile should be the concatenation of the server's certificate, any intermediates, -# and the CA's certificate. -# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. -# Otherwise, HTTP server is run. -tls_cert_file = "" - -# The path to a file containing matching private key that is used to create the HTTPS server. -# Might be either absolute path or path related to Tendermint's config directory. -# NOTE: both tls-cert-file and tls-key-file must be present for Tendermint to create HTTPS server. -# Otherwise, HTTP server is run. -tls_key_file = "" - -# pprof listen address (https://golang.org/pkg/net/http/pprof) -pprof_laddr = "localhost:6060" - -####################################################### -### P2P Configuration Options ### -####################################################### -[p2p] - -# Address to listen for incoming connections -laddr = "tcp://0.0.0.0:26656" - -# Address to advertise to peers for them to dial -# If empty, will use the same port as the laddr, -# and will introspect on the listener or use UPnP -# to figure out the address. -external_address = "" - -# Comma separated list of seed nodes to connect to -seeds = "" - -# Comma separated list of nodes to keep persistent connections to -persistent_peers = "" - -# UPNP port forwarding -upnp = false - -# Path to address book -addr_book_file = "config/addrbook.json" - -# Set true for strict address routability rules -# Set false for private or local networks -addr_book_strict = true - -# Maximum number of inbound peers -max_num_inbound_peers = 40 - -# Maximum number of outbound peers to connect to, excluding persistent peers -max_num_outbound_peers = 10 - -# List of node IDs, to which a connection will be (re)established ignoring any existing limits -unconditional_peer_ids = "" - -# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) -persistent_peers_max_dial_period = "0s" - -# Time to wait before flushing messages out on the connection -flush_throttle_timeout = "100ms" - -# Maximum size of a message packet payload, in bytes -max_packet_msg_payload_size = 1024 - -# Rate at which packets can be sent, in bytes/second -send_rate = 5120000 - -# Rate at which packets can be received, in bytes/second -recv_rate = 5120000 - -# Set true to enable the peer-exchange reactor -pex = true - -# Seed mode, in which node constantly crawls the network and looks for -# peers. If another node asks it for addresses, it responds and disconnects. -# -# Does not work if the peer-exchange reactor is disabled. -seed_mode = false - -# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) -private_peer_ids = "" - -# Toggle to disable guard against peers connecting from the same ip. -allow_duplicate_ip = false - -# Peer connection configuration. -handshake_timeout = "20s" -dial_timeout = "3s" - -####################################################### -### Mempool Configuration Option ### -####################################################### -[mempool] - -recheck = true -broadcast = true -wal_dir = "" - -# Maximum number of transactions in the mempool -size = 5000 - -# Limit the total size of all txs in the mempool. -# This only accounts for raw transactions (e.g. given 1MB transactions and -# max_txs_bytes=5MB, mempool will only accept 5 transactions). -max_txs_bytes = 1073741824 - -# Size of the cache (used to filter transactions we saw earlier) in transactions -cache_size = 10000 - -# Do not remove invalid transactions from the cache (default: false) -# Set to true if it's not possible for any invalid transaction to become valid -# again in the future. -keep-invalid-txs-in-cache = false - -# Maximum size of a single transaction. -# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes}. -max_tx_bytes = 1048576 - -# Maximum size of a batch of transactions to send to a peer -# Including space needed by encoding (one varint per transaction). -# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796 -max_batch_bytes = 0 - -####################################################### -### State Sync Configuration Options ### -####################################################### -[statesync] -# State sync rapidly bootstraps a new node by discovering, fetching, and restoring a state machine -# snapshot from peers instead of fetching and replaying historical blocks. Requires some peers in -# the network to take and serve state machine snapshots. State sync is not attempted if the node -# has any local state (LastBlockHeight > 0). The node will have a truncated block history, -# starting from the height of the snapshot. -enable = false - -# RPC servers (comma-separated) for light client verification of the synced state machine and -# retrieval of state data for node bootstrapping. Also needs a trusted height and corresponding -# header hash obtained from a trusted source, and a period during which validators can be trusted. -# -# For Cosmos SDK-based chains, trust_period should usually be about 2/3 of the unbonding time (~2 -# weeks) during which they can be financially punished (slashed) for misbehavior. -rpc_servers = "" -trust_height = 0 -trust_hash = "" -trust_period = "168h0m0s" - -# Time to spend discovering snapshots before initiating a restore. -discovery_time = "15s" - -# Temporary directory for state sync snapshot chunks, defaults to the OS tempdir (typically /tmp). -# Will create a new, randomly named directory within, and remove it when done. -temp_dir = "" - -# The timeout duration before re-requesting a chunk, possibly from a different -# peer (default: 1 minute). -chunk_request_timeout = "10s" - -# The number of concurrent chunk fetchers to run (default: 1). -chunk_fetchers = "4" - -####################################################### -### Fast Sync Configuration Connections ### -####################################################### -[fastsync] - -# Fast Sync version to use: -# 1) "v0" (default) - the legacy fast sync implementation -# 2) "v1" - refactor of v0 version for better testability -# 2) "v2" - complete redesign of v0, optimized for testability & readability -version = "v0" - -####################################################### -### Consensus Configuration Options ### -####################################################### -[consensus] - -wal_file = "data/cs.wal/wal" - -# How long we wait for a proposal block before prevoting nil -timeout_propose = "1s" -# How much timeout_propose increases with each round -timeout_propose_delta = "500ms" -# How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil) -timeout_prevote = "1s" -# How much the timeout_prevote increases with each round -timeout_prevote_delta = "500ms" -# How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil) -timeout_precommit = "1s" -# How much the timeout_precommit increases with each round -timeout_precommit_delta = "500ms" -# How long we wait after committing a block, before starting on the new -# height (this gives us a chance to receive some more precommits, even -# though we already have +2/3). -timeout_commit = "1s" - -# How many blocks to look back to check existence of the node's consensus votes before joining consensus -# When non-zero, the node will panic upon restart -# if the same consensus key was used to sign {double_sign_check_height} last blocks. -# So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic. -double_sign_check_height = 0 - -# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) -skip_timeout_commit = false - -# EmptyBlocks mode and possible interval between empty blocks -create_empty_blocks = true -create_empty_blocks_interval = "0s" - -# Reactor sleep duration parameters -peer_gossip_sleep_duration = "100ms" -peer_query_maj23_sleep_duration = "2s" - -####################################################### -### Transaction Indexer Configuration Options ### -####################################################### -[tx_index] - -# What indexer to use for transactions -# -# The application will set which txs to index. In some cases a node operator will be able -# to decide which txs to index based on configuration set in the application. -# -# Options: -# 1) "null" -# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). -# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed. -indexer = "kv" - -####################################################### -### Instrumentation Configuration Options ### -####################################################### -[instrumentation] - -# When true, Prometheus metrics are served under /metrics on -# PrometheusListenAddr. -# Check out the documentation for the list of available metrics. -prometheus = false - -# Address to listen for Prometheus collector(s) connections -prometheus_listen_addr = ":26660" - -# Maximum number of simultaneous connections. -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -max_open_connections = 3 - -# Instrumentation namespace -namespace = "tendermint" diff --git a/ci/chains/gaia/v5.0.5/ibc-1/config/genesis.json b/ci/chains/gaia/v5.0.5/ibc-1/config/genesis.json deleted file mode 100644 index 5857a4c3fa..0000000000 --- a/ci/chains/gaia/v5.0.5/ibc-1/config/genesis.json +++ /dev/null @@ -1,342 +0,0 @@ -{ - "genesis_time": "2021-08-06T15:55:22.411963459Z", - "chain_id": "ibc-1", - "initial_height": "1", - "consensus_params": { - "block": { - "max_bytes": "22020096", - "max_gas": "-1", - "time_iota_ms": "1000" - }, - "evidence": { - "max_age_num_blocks": "100000", - "max_age_duration": "172800000000000", - "max_bytes": "1048576" - }, - "validator": { - "pub_key_types": [ - "ed25519" - ] - }, - "version": {} - }, - "app_hash": "", - "app_state": { - "auth": { - "params": { - "max_memo_characters": "256", - "tx_sig_limit": "7", - "tx_size_cost_per_byte": "10", - "sig_verify_cost_ed25519": "590", - "sig_verify_cost_secp256k1": "1000" - }, - "accounts": [ - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos1gz8hnf4l6yz7k3mmyseus3v69t4f8s60hth0fx", - "pub_key": null, - "account_number": "0", - "sequence": "0" - }, - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos1lt47c46ydlmqs6tq5y4djkndx9u8mu3x6mqhje", - "pub_key": null, - "account_number": "0", - "sequence": "0" - }, - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos1xs9mn3pfxgx4s2ncu0yejl0dh8lrquyzekqwz3", - "pub_key": null, - "account_number": "0", - "sequence": "0" - } - ] - }, - "bank": { - "params": { - "send_enabled": [], - "default_send_enabled": true - }, - "balances": [ - { - "address": "cosmos1xs9mn3pfxgx4s2ncu0yejl0dh8lrquyzekqwz3", - "coins": [ - { - "denom": "stake", - "amount": "100000000000" - } - ] - }, - { - "address": "cosmos1gz8hnf4l6yz7k3mmyseus3v69t4f8s60hth0fx", - "coins": [ - { - "denom": "samoleans", - "amount": "100000000000" - }, - { - "denom": "stake", - "amount": "100000000000" - } - ] - }, - { - "address": "cosmos1lt47c46ydlmqs6tq5y4djkndx9u8mu3x6mqhje", - "coins": [ - { - "denom": "samoleans", - "amount": "100000000000" - }, - { - "denom": "stake", - "amount": "100000000000" - } - ] - } - ], - "supply": [ - { - "denom": "samoleans", - "amount": "200000000000" - }, - { - "denom": "stake", - "amount": "300000000000" - } - ], - "denom_metadata": [] - }, - "capability": { - "index": "1", - "owners": [] - }, - "crisis": { - "constant_fee": { - "denom": "stake", - "amount": "1000" - } - }, - "distribution": { - "params": { - "community_tax": "0.020000000000000000", - "base_proposer_reward": "0.010000000000000000", - "bonus_proposer_reward": "0.040000000000000000", - "withdraw_addr_enabled": true - }, - "fee_pool": { - "community_pool": [] - }, - "delegator_withdraw_infos": [], - "previous_proposer": "", - "outstanding_rewards": [], - "validator_accumulated_commissions": [], - "validator_historical_rewards": [], - "validator_current_rewards": [], - "delegator_starting_infos": [], - "validator_slash_events": [] - }, - "evidence": { - "evidence": [] - }, - "genutil": { - "gen_txs": [ - { - "body": { - "messages": [ - { - "@type": "/cosmos.staking.v1beta1.MsgCreateValidator", - "description": { - "moniker": "ibc-1", - "identity": "", - "website": "", - "security_contact": "", - "details": "" - }, - "commission": { - "rate": "0.100000000000000000", - "max_rate": "0.200000000000000000", - "max_change_rate": "0.010000000000000000" - }, - "min_self_delegation": "1", - "delegator_address": "cosmos1xs9mn3pfxgx4s2ncu0yejl0dh8lrquyzekqwz3", - "validator_address": "cosmosvaloper1xs9mn3pfxgx4s2ncu0yejl0dh8lrquyzuz5mwz", - "pubkey": { - "@type": "/cosmos.crypto.ed25519.PubKey", - "key": "uEJtNPYlKV1WAsKJV+ntucge/3FNb23OT74fNR4SB4A=" - }, - "value": { - "denom": "stake", - "amount": "100000000000" - } - } - ], - "memo": "1feb1ad566fa381f6e6b4afd639559e0505fd9d0@192.168.50.214:26656", - "timeout_height": "0", - "extension_options": [], - "non_critical_extension_options": [] - }, - "auth_info": { - "signer_infos": [ - { - "public_key": { - "@type": "/cosmos.crypto.secp256k1.PubKey", - "key": "ArYgz5c42qZWEE37Ltf/g1ikRbRjlED+9u/9uPzzeWp+" - }, - "mode_info": { - "single": { - "mode": "SIGN_MODE_DIRECT" - } - }, - "sequence": "0" - } - ], - "fee": { - "amount": [], - "gas_limit": "200000", - "payer": "", - "granter": "" - } - }, - "signatures": [ - "licdn52xp1gR8oNcVzqZAWwDaOsdGK6A/+B9bkw8K/hakKmHiTe9wRXnOD3m532E5SYyPkK93vukLEhPbtx+wg==" - ] - } - ] - }, - "gov": { - "starting_proposal_id": "1", - "deposits": [], - "votes": [], - "proposals": [], - "deposit_params": { - "min_deposit": [ - { - "denom": "stake", - "amount": "10000000" - } - ], - "max_deposit_period": "200s" - }, - "voting_params": { - "voting_period": "200s" - }, - "tally_params": { - "quorum": "0.334000000000000000", - "threshold": "0.500000000000000000", - "veto_threshold": "0.334000000000000000" - } - }, - "ibc": { - "client_genesis": { - "clients": [], - "clients_consensus": [], - "clients_metadata": [], - "params": { - "allowed_clients": [ - "06-solomachine", - "07-tendermint" - ] - }, - "create_localhost": false, - "next_client_sequence": "0" - }, - "connection_genesis": { - "connections": [], - "client_connection_paths": [], - "next_connection_sequence": "0" - }, - "channel_genesis": { - "channels": [], - "acknowledgements": [], - "commitments": [], - "receipts": [], - "send_sequences": [], - "recv_sequences": [], - "ack_sequences": [], - "next_channel_sequence": "0" - } - }, - "liquidity": { - "params": { - "pool_types": [ - { - "id": 1, - "name": "StandardLiquidityPool", - "min_reserve_coin_num": 2, - "max_reserve_coin_num": 2, - "description": "Standard liquidity pool with pool price function X/Y, ESPM constraint, and two kinds of reserve coins" - } - ], - "min_init_deposit_amount": "1000000", - "init_pool_coin_mint_amount": "1000000", - "max_reserve_coin_amount": "0", - "pool_creation_fee": [ - { - "denom": "stake", - "amount": "40000000" - } - ], - "swap_fee_rate": "0.003000000000000000", - "withdraw_fee_rate": "0.000000000000000000", - "max_order_amount_ratio": "0.100000000000000000", - "unit_batch_height": 1, - "circuit_breaker_enabled": false - }, - "pool_records": [] - }, - "mint": { - "minter": { - "inflation": "0.130000000000000000", - "annual_provisions": "0.000000000000000000" - }, - "params": { - "mint_denom": "stake", - "inflation_rate_change": "0.130000000000000000", - "inflation_max": "0.200000000000000000", - "inflation_min": "0.070000000000000000", - "goal_bonded": "0.670000000000000000", - "blocks_per_year": "6311520" - } - }, - "params": null, - "slashing": { - "params": { - "signed_blocks_window": "100", - "min_signed_per_window": "0.500000000000000000", - "downtime_jail_duration": "600s", - "slash_fraction_double_sign": "0.050000000000000000", - "slash_fraction_downtime": "0.010000000000000000" - }, - "signing_infos": [], - "missed_blocks": [] - }, - "staking": { - "params": { - "unbonding_time": "1814400s", - "max_validators": 100, - "max_entries": 7, - "historical_entries": 10000, - "bond_denom": "stake" - }, - "last_total_power": "0", - "last_validator_powers": [], - "validators": [], - "delegations": [], - "unbonding_delegations": [], - "redelegations": [], - "exported": false - }, - "transfer": { - "port_id": "transfer", - "denom_traces": [], - "params": { - "send_enabled": true, - "receive_enabled": true - } - }, - "upgrade": {}, - "vesting": {} - } -} \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.5/ibc-1/config/gentx/gentx-1feb1ad566fa381f6e6b4afd639559e0505fd9d0.json b/ci/chains/gaia/v5.0.5/ibc-1/config/gentx/gentx-1feb1ad566fa381f6e6b4afd639559e0505fd9d0.json deleted file mode 100644 index 06efed20fc..0000000000 --- a/ci/chains/gaia/v5.0.5/ibc-1/config/gentx/gentx-1feb1ad566fa381f6e6b4afd639559e0505fd9d0.json +++ /dev/null @@ -1 +0,0 @@ -{"body":{"messages":[{"@type":"/cosmos.staking.v1beta1.MsgCreateValidator","description":{"moniker":"ibc-1","identity":"","website":"","security_contact":"","details":""},"commission":{"rate":"0.100000000000000000","max_rate":"0.200000000000000000","max_change_rate":"0.010000000000000000"},"min_self_delegation":"1","delegator_address":"cosmos1xs9mn3pfxgx4s2ncu0yejl0dh8lrquyzekqwz3","validator_address":"cosmosvaloper1xs9mn3pfxgx4s2ncu0yejl0dh8lrquyzuz5mwz","pubkey":{"@type":"/cosmos.crypto.ed25519.PubKey","key":"uEJtNPYlKV1WAsKJV+ntucge/3FNb23OT74fNR4SB4A="},"value":{"denom":"stake","amount":"100000000000"}}],"memo":"1feb1ad566fa381f6e6b4afd639559e0505fd9d0@192.168.50.214:26656","timeout_height":"0","extension_options":[],"non_critical_extension_options":[]},"auth_info":{"signer_infos":[{"public_key":{"@type":"/cosmos.crypto.secp256k1.PubKey","key":"ArYgz5c42qZWEE37Ltf/g1ikRbRjlED+9u/9uPzzeWp+"},"mode_info":{"single":{"mode":"SIGN_MODE_DIRECT"}},"sequence":"0"}],"fee":{"amount":[],"gas_limit":"200000","payer":"","granter":""}},"signatures":["licdn52xp1gR8oNcVzqZAWwDaOsdGK6A/+B9bkw8K/hakKmHiTe9wRXnOD3m532E5SYyPkK93vukLEhPbtx+wg=="]} diff --git a/ci/chains/gaia/v5.0.5/ibc-1/config/node_key.json b/ci/chains/gaia/v5.0.5/ibc-1/config/node_key.json deleted file mode 100644 index 3405e42380..0000000000 --- a/ci/chains/gaia/v5.0.5/ibc-1/config/node_key.json +++ /dev/null @@ -1 +0,0 @@ -{"priv_key":{"type":"tendermint/PrivKeyEd25519","value":"gPIctCwq106NQ4vqEEFczd1lw//4MoNXqV4bPHEZKIepCHj5EMHC8YlvENm0Ge0RPexjMxgANTrbfU2Lb+tmgw=="}} \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.5/ibc-1/config/priv_validator_key.json b/ci/chains/gaia/v5.0.5/ibc-1/config/priv_validator_key.json deleted file mode 100644 index ecf31ea63d..0000000000 --- a/ci/chains/gaia/v5.0.5/ibc-1/config/priv_validator_key.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "address": "C2BD2207A3FF8B7BBBCF3E1F3554FD4CCBF5B9AD", - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "uEJtNPYlKV1WAsKJV+ntucge/3FNb23OT74fNR4SB4A=" - }, - "priv_key": { - "type": "tendermint/PrivKeyEd25519", - "value": "8AatfvD3y+/RBRVCjA+OsegXGdiYg2afP4D2NmqYCJu4Qm009iUpXVYCwolX6e25yB7/cU1vbc5Pvh81HhIHgA==" - } -} \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.5/ibc-1/keyring-test/340bb9c429320d582a78e3c9997dedb9fe307082.address b/ci/chains/gaia/v5.0.5/ibc-1/keyring-test/340bb9c429320d582a78e3c9997dedb9fe307082.address deleted file mode 100644 index 1113f7071b..0000000000 --- a/ci/chains/gaia/v5.0.5/ibc-1/keyring-test/340bb9c429320d582a78e3c9997dedb9fe307082.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wOC0wNiAxMTo1NToyMy40OTE5OTA3MyAtMDQwMCBFRFQgbT0rMC4wNjk3MzYxNTkiLCJlbmMiOiJBMjU2R0NNIiwicDJjIjo4MTkyLCJwMnMiOiJRSkhjMnBBR214bnY2aWkyIn0.4zfntRhv4PSLNCRq3eGlC3mKHhIsSveK1tKoxtDZINfuPicEOyq1og.9IeSNTJ5u3lMmiqS.4ehEfNzu0HwJyquVTY10QMOXjZnZTHvtuNH7jk_KqNNXqatsNg1rppABQUr4HjTFymGCOb0PzFWzLz8_4Fv5JwqYoU7j04G5uvvLALi6OVaguuEVs0B0qaD32_J8OTV9Bp8zbYr3dJHwPM6n7ntH5zIvQgC6kVaZUipglJAQjkZN2h0u2WHQ1Z0GPIeeE00dX52jJtgY0zhZdA_p1ZYAO_AJJ_fQQMfHkV10rnbErYHPFCqbawhP6ZIV.0NWwcw1QgdcihcSS7XyOdg \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.5/ibc-1/keyring-test/408f79a6bfd105eb477b2433c8459a2aea93c34f.address b/ci/chains/gaia/v5.0.5/ibc-1/keyring-test/408f79a6bfd105eb477b2433c8459a2aea93c34f.address deleted file mode 100644 index a3ed0bc21e..0000000000 --- a/ci/chains/gaia/v5.0.5/ibc-1/keyring-test/408f79a6bfd105eb477b2433c8459a2aea93c34f.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wOC0wNiAxMTo1NToyNC41NjkyNTA1OTUgLTA0MDAgRURUIG09KzAuMDYzNjAyNDM2IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoibTZ5UGlIMS1zOVd1WmF2QSJ9.qRrycHfHVdBkFepPKyfZH-7VpIjVqJxcHiquC-heLERJqKOfwZKD0A.ESc_YX4SF-KGfXX0.81lArzJU37dL_0U6QX2aU4dZIOcVJqdu6pUscTwIWO12lbYMZnEt7QwVVxGFEpCb81enwmSOFyZI03mWloeaiVDuCPsOWjY_H-ivvGV1EooeaWynnyJGEpO2fucneP11szaJO2j2Pen9x9jgQdvn2Xuvr3gi4O6jMyiHnV8Ibmx7x80cwMpoQF9uPtodIG9aP7N1xWHV6_l_O-emv9qfnuTuLwiPGYB7jsHNXaDJfUZ9-w.i1aJxWM7HoDFIirXbYZuKQ \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.5/ibc-1/keyring-test/faebec57446ff6086960a12ad95a6d31787df226.address b/ci/chains/gaia/v5.0.5/ibc-1/keyring-test/faebec57446ff6086960a12ad95a6d31787df226.address deleted file mode 100644 index 8c3f521aad..0000000000 --- a/ci/chains/gaia/v5.0.5/ibc-1/keyring-test/faebec57446ff6086960a12ad95a6d31787df226.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wOC0wNiAxMTo1NToyNS42Njk1MzAxMDEgLTA0MDAgRURUIG09KzAuMDg1NzIwMjA3IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiYXVLSVRmbGZZdm9DV2tURyJ9.ndHS7gKlrYCO78G9ZIpB2jEqdv85zfQRZAJoYa-00hRZlHEr7dF7RQ.F--xWwL-JrjkxipY.jv_CYB2Jg_ppO9hr_j7oyTvyFy0gzvLiBDRDmRXajJJXvm-4Rx_oxx1hH-MQGF-mhmOhXn_UPB-NsVl3S6YrFq5C7BxHpa-H48yCQ8YHROrb44TmgWf26lDWRKyGdGH3AkKiUtxRGQ968vfNOLu5FCPcK1zysHDrmLMlN88c2NLNzE8F8F_iAWWwuE6Wt0vpm85qtsAyX73pTi-NracluzPMoy7XqAguUAHWI4tod2h56-H2Gls.Tt6Q5d4jNAXdwvXJk4QUTg \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.5/ibc-1/keyring-test/user.info b/ci/chains/gaia/v5.0.5/ibc-1/keyring-test/user.info deleted file mode 100644 index b79bac9b9d..0000000000 --- a/ci/chains/gaia/v5.0.5/ibc-1/keyring-test/user.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wOC0wNiAxMTo1NToyNC41NjMyODgyNCAtMDQwMCBFRFQgbT0rMC4wNTc2NDAwNjUiLCJlbmMiOiJBMjU2R0NNIiwicDJjIjo4MTkyLCJwMnMiOiJQTVRlSGNMT0M5MUc4Rk50In0.bjQxqrY4tQJ8T8qA1V73gNw0HPW8Ajz6myOrIlX8jj9PCsD9duBsbQ.UzOo26N2xEPLM2Jg.ijNc2B_c5igQwBxzMk9EnZDPVK36JrC0dZ1InnsBuY_ZXktKbNnW_x9Ipx5irWM9oCW_7f7lA4QblxzVtvxrFNVgNja1ub91SM0cwtNABlRx1cNoAq0qi-2PZQGBO0aPJZwrRCMfEVS9snkZu1JMG74ezMtA72O8okt3YJdQjNTcKJ7YsP69qAy9F0cchlHmB58U8bXDoaddPrx41NVCWdiDmPZh4A_8wn0YMpoXY7k7xdDTA9CQb5JWalg2YmGg9j1h_9-1nicyxsDAItG2OOE7JnfU7kbg3-y4usAvEVViqskBZcah_XPVOLiYl3GgpPzTmZr9AeMfyQIYUVPkoBHNr_LR6es.RVtp5fYSTZhXmxxzPWJDvw \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.5/ibc-1/keyring-test/user2.info b/ci/chains/gaia/v5.0.5/ibc-1/keyring-test/user2.info deleted file mode 100644 index 4812a16689..0000000000 --- a/ci/chains/gaia/v5.0.5/ibc-1/keyring-test/user2.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wOC0wNiAxMTo1NToyNS42NTU1MzUwNDUgLTA0MDAgRURUIG09KzAuMDcxNzI1MTc0IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiQXc5WXZSdWNCT3cyOGdMWCJ9.u-If16vmX554Dc3b1vEQvxy074ipGJZClyrZmFzJ4pLJg3h002ahbA.GDOrfF1gPcL0DtKA.KEFCtKYMZSqJqHcu8-5QWz8XnFyYF1WN9p7H_lUDUJD-rcGU0WR4nZhAR02qGM9unBU1BF8PqHoVkK5oNO9nAqg7G8pb4pi83AurRtOtdeNJOQBmlZeI12rLJorvtG2RxjsDfObWHlh-LW5TzAZbRRFSO8s8XK6hfVrNybcCMiBa8NuPDynSvEoTir7IrjmP_gKrNdeE6LHYafiED8cUSME30LX33SrJ2Fof8NTUTWRKV1pZ0a-vcrUkL4k2M-u0dvYPxRSmlJn6PH3uR31ex1ncoSSGIA5Sb1YYBhna_wqlrMzM2EpSKQlrjAPNAWCMLu2MjdheeAOfgK15V9JSyXP-wS8AJvSJ.QtanYYmmeLuW0m1sD7Cjzw \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.5/ibc-1/keyring-test/validator.info b/ci/chains/gaia/v5.0.5/ibc-1/keyring-test/validator.info deleted file mode 100644 index 278a657d45..0000000000 --- a/ci/chains/gaia/v5.0.5/ibc-1/keyring-test/validator.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wOC0wNiAxMTo1NToyMy40ODU2NDEyODcgLTA0MDAgRURUIG09KzAuMDYzMzg2NzI1IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiSEZiLTRrV3ZYVUY0bFFPVyJ9.aX-P2pGvLb0TZEMmPiAjvInmOnTeZVq-JjJUyL4Ox0_XfC8v2ho0VQ.ViPeDShGm714yf0I.b1M2DratDW_NpHqK3Gy7mXiJvJlS5JcCC3jn9TpciR6U3RCE9XXLQJBnAj622ABdM4-NbjqTbjIxhSw1kIX77ADBjWg5G0GzZ5beXku0haIds9NgBwv49oWpLS7SQFtnGZkZtae3uCPj65lnRaY2rf10h9ia5qcakzisxRWjxXZdBKzDOJqIWX3M_7x8W2M4IrH8bDqvaKiEjaIcD1lg95F7NNfLUGJHDds-4Y4fvqVpIbapbSsMr54WOQQ_DLpMz9b7ztataH6EIYs35GtuKaBfDFp-Rk7s2QGELp3N31EGLLcz2119fR8tw0LLrjcUpouFGUlFTPl-g1J1LefXcRKTOGbnh3dAASUoUNWV3E-zccSR.jTU1Bg5x6yQP5_zT0N5QqA \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.5/ibc-1/user2_seed.json b/ci/chains/gaia/v5.0.5/ibc-1/user2_seed.json deleted file mode 100644 index 730bf0e099..0000000000 --- a/ci/chains/gaia/v5.0.5/ibc-1/user2_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"user2","type":"local","address":"cosmos1lt47c46ydlmqs6tq5y4djkndx9u8mu3x6mqhje","pubkey":"cosmospub1addwnpepqtxdta64tecx2rdatsn0shkcascj4xu7a0x9p0cynqgn7jywrq905p244ce","mnemonic":"pact obvious seminar path address segment motion they artwork clever poet local normal dinosaur celery few hold hire charge chaos claw fine grow impact"} diff --git a/ci/chains/gaia/v5.0.5/ibc-1/user_seed.json b/ci/chains/gaia/v5.0.5/ibc-1/user_seed.json deleted file mode 100644 index 004dbc5b34..0000000000 --- a/ci/chains/gaia/v5.0.5/ibc-1/user_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"user","type":"local","address":"cosmos1gz8hnf4l6yz7k3mmyseus3v69t4f8s60hth0fx","pubkey":"cosmospub1addwnpepqw2jrvfe98frdgf9yfvvjk6px4mtat5km4r7l6j8vhd0hlmd2akv25zdlk5","mnemonic":"host federal present budget champion erode wreck category journey bamboo entry degree destroy rice warm champion acid ability ramp cactus food shaft mimic vague"} diff --git a/ci/chains/gaia/v5.0.5/ibc-1/validator_seed.json b/ci/chains/gaia/v5.0.5/ibc-1/validator_seed.json deleted file mode 100644 index 0e3b8ebb2a..0000000000 --- a/ci/chains/gaia/v5.0.5/ibc-1/validator_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"validator","type":"local","address":"cosmos1xs9mn3pfxgx4s2ncu0yejl0dh8lrquyzekqwz3","pubkey":"cosmospub1addwnpepq2mzpnuh8rd2v4ssfhaja4llsdv2g3d5vw2yplhkal7m3l8n0948ud4ufa6","mnemonic":"salad column stone sphere tuna teach trash amused cake menu pyramid half tackle scan initial entire resist become cost loan trade tonight manual release"} diff --git a/ci/chains/gaia/v5.0.8/ibc-0/config/addrbook.json b/ci/chains/gaia/v5.0.8/ibc-0/config/addrbook.json deleted file mode 100644 index 2c14bbdc95..0000000000 --- a/ci/chains/gaia/v5.0.8/ibc-0/config/addrbook.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "key": "82c133df9d6d66a97fe64ae9", - "addrs": [] -} \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.8/ibc-0/config/app.toml b/ci/chains/gaia/v5.0.8/ibc-0/config/app.toml deleted file mode 100644 index 54de73e392..0000000000 --- a/ci/chains/gaia/v5.0.8/ibc-0/config/app.toml +++ /dev/null @@ -1,152 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -############################################################################### -### Base Configuration ### -############################################################################### - -# The minimum gas prices a validator is willing to accept for processing a -# transaction. A transaction's fees must meet the minimum of any denomination -# specified in this config (e.g. 0.25token1;0.0001token2). -minimum-gas-prices = "" - -# default: the last 100 states are kept in addition to every 500th state; pruning at 10 block intervals -# nothing: all historic states will be saved, nothing will be deleted (i.e. archiving node) -# everything: all saved states will be deleted, storing only the current state; pruning at 10 block intervals -# custom: allow pruning options to be manually specified through 'pruning-keep-recent', 'pruning-keep-every', and 'pruning-interval' -pruning = "default" - -# These are applied if and only if the pruning strategy is custom. -pruning-keep-recent = "0" -pruning-keep-every = "0" -pruning-interval = "0" - -# HaltHeight contains a non-zero block height at which a node will gracefully -# halt and shutdown that can be used to assist upgrades and testing. -# -# Note: Commitment of state will be attempted on the corresponding block. -halt-height = 0 - -# HaltTime contains a non-zero minimum block time (in Unix seconds) at which -# a node will gracefully halt and shutdown that can be used to assist upgrades -# and testing. -# -# Note: Commitment of state will be attempted on the corresponding block. -halt-time = 0 - -# MinRetainBlocks defines the minimum block height offset from the current -# block being committed, such that all blocks past this offset are pruned -# from Tendermint. It is used as part of the process of determining the -# ResponseCommit.RetainHeight value during ABCI Commit. A value of 0 indicates -# that no blocks should be pruned. -# -# This configuration value is only responsible for pruning Tendermint blocks. -# It has no bearing on application state pruning which is determined by the -# "pruning-*" configurations. -# -# Note: Tendermint block pruning is dependant on this parameter in conunction -# with the unbonding (safety threshold) period, state pruning and state sync -# snapshot parameters to determine the correct minimum value of -# ResponseCommit.RetainHeight. -min-retain-blocks = 0 - -# InterBlockCache enables inter-block caching. -inter-block-cache = true - -# IndexEvents defines the set of events in the form {eventType}.{attributeKey}, -# which informs Tendermint what to index. If empty, all events will be indexed. -# -# Example: -# ["message.sender", "message.recipient"] -index-events = [] - -############################################################################### -### Telemetry Configuration ### -############################################################################### - -[telemetry] - -# Prefixed with keys to separate services. -service-name = "" - -# Enabled enables the application telemetry functionality. When enabled, -# an in-memory sink is also enabled by default. Operators may also enabled -# other sinks such as Prometheus. -enabled = false - -# Enable prefixing gauge values with hostname. -enable-hostname = false - -# Enable adding hostname to labels. -enable-hostname-label = false - -# Enable adding service to labels. -enable-service-label = false - -# PrometheusRetentionTime, when positive, enables a Prometheus metrics sink. -prometheus-retention-time = 0 - -# GlobalLabels defines a global set of name/value label tuples applied to all -# metrics emitted using the wrapper functions defined in telemetry package. -# -# Example: -# [["chain_id", "cosmoshub-1"]] -global-labels = [ -] - -############################################################################### -### API Configuration ### -############################################################################### - -[api] - -# Enable defines if the API server should be enabled. -enable = false - -# Swagger defines if swagger documentation should automatically be registered. -swagger = false - -# Address defines the API server to listen on. -address = "tcp://0.0.0.0:1317" - -# MaxOpenConnections defines the number of maximum open connections. -max-open-connections = 1000 - -# RPCReadTimeout defines the Tendermint RPC read timeout (in seconds). -rpc-read-timeout = 10 - -# RPCWriteTimeout defines the Tendermint RPC write timeout (in seconds). -rpc-write-timeout = 0 - -# RPCMaxBodyBytes defines the Tendermint maximum response body (in bytes). -rpc-max-body-bytes = 1000000 - -# EnableUnsafeCORS defines if CORS should be enabled (unsafe - use it at your own risk). -enabled-unsafe-cors = false - -############################################################################### -### gRPC Configuration ### -############################################################################### - -[grpc] - -# Enable defines if the gRPC server should be enabled. -enable = true - -# Address defines the gRPC server address to bind to. -address = "0.0.0.0:9090" - -############################################################################### -### State Sync Configuration ### -############################################################################### - -# State sync snapshots allow other nodes to rapidly join the network without replaying historical -# blocks, instead downloading and applying a snapshot of the application state at a given height. -[state-sync] - -# snapshot-interval specifies the block interval at which local state sync snapshots are -# taken (0 to disable). Must be a multiple of pruning-keep-every. -snapshot-interval = 0 - -# snapshot-keep-recent specifies the number of recent snapshots to keep and serve (0 to keep all). -snapshot-keep-recent = 2 diff --git a/ci/chains/gaia/v5.0.8/ibc-0/config/client.toml b/ci/chains/gaia/v5.0.8/ibc-0/config/client.toml deleted file mode 100644 index 222695a3f8..0000000000 --- a/ci/chains/gaia/v5.0.8/ibc-0/config/client.toml +++ /dev/null @@ -1,17 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -############################################################################### -### Client Configuration ### -############################################################################### - -# The network chain ID -chain-id = "" -# The keyring's backend, where the keys are stored (os|file|kwallet|pass|test|memory) -keyring-backend = "os" -# CLI output format (text|json) -output = "text" -# : to Tendermint RPC interface for this chain -node = "tcp://localhost:26657" -# Transaction broadcasting mode (sync|async|block) -broadcast-mode = "sync" diff --git a/ci/chains/gaia/v5.0.8/ibc-0/config/config.toml b/ci/chains/gaia/v5.0.8/ibc-0/config/config.toml deleted file mode 100644 index eb6f4b2952..0000000000 --- a/ci/chains/gaia/v5.0.8/ibc-0/config/config.toml +++ /dev/null @@ -1,401 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -# NOTE: Any path below can be absolute (e.g. "/var/myawesomeapp/data") or -# relative to the home directory (e.g. "data"). The home directory is -# "$HOME/.tendermint" by default, but could be changed via $TMHOME env variable -# or --home cmd flag. - -####################################################################### -### Main Base Config Options ### -####################################################################### - -# TCP or UNIX socket address of the ABCI application, -# or the name of an ABCI application compiled in with the Tendermint binary -proxy_app = "tcp://127.0.0.1:26658" - -# A custom human readable name for this node -moniker = "ibc-0" - -# If this node is many blocks behind the tip of the chain, FastSync -# allows them to catchup quickly by downloading blocks in parallel -# and verifying their commits -fast_sync = true - -# Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb -# * goleveldb (github.com/syndtr/goleveldb - most popular implementation) -# - pure go -# - stable -# * cleveldb (uses levigo wrapper) -# - fast -# - requires gcc -# - use cleveldb build tag (go build -tags cleveldb) -# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt) -# - EXPERIMENTAL -# - may be faster is some use-cases (random reads - indexer) -# - use boltdb build tag (go build -tags boltdb) -# * rocksdb (uses github.com/tecbot/gorocksdb) -# - EXPERIMENTAL -# - requires gcc -# - use rocksdb build tag (go build -tags rocksdb) -# * badgerdb (uses github.com/dgraph-io/badger) -# - EXPERIMENTAL -# - use badgerdb build tag (go build -tags badgerdb) -db_backend = "goleveldb" - -# Database directory -db_dir = "data" - -# Output level for logging, including package level options -log_level = "info" - -# Output format: 'plain' (colored text) or 'json' -log_format = "plain" - -##### additional base config options ##### - -# Path to the JSON file containing the initial validator set and other meta data -genesis_file = "config/genesis.json" - -# Path to the JSON file containing the private key to use as a validator in the consensus protocol -priv_validator_key_file = "config/priv_validator_key.json" - -# Path to the JSON file containing the last sign state of a validator -priv_validator_state_file = "data/priv_validator_state.json" - -# TCP or UNIX socket address for Tendermint to listen on for -# connections from an external PrivValidator process -priv_validator_laddr = "" - -# Path to the JSON file containing the private key to use for node authentication in the p2p protocol -node_key_file = "config/node_key.json" - -# Mechanism to connect to the ABCI application: socket | grpc -abci = "socket" - -# If true, query the ABCI app on connecting to a new peer -# so the app can decide if we should keep the connection or not -filter_peers = false - - -####################################################################### -### Advanced Configuration Options ### -####################################################################### - -####################################################### -### RPC Server Configuration Options ### -####################################################### -[rpc] - -# TCP or UNIX socket address for the RPC server to listen on -laddr = "tcp://0.0.0.0:26657" - -# A list of origins a cross-domain request can be executed from -# Default value '[]' disables cors support -# Use '["*"]' to allow any origin -cors_allowed_origins = [] - -# A list of methods the client is allowed to use with cross-domain requests -cors_allowed_methods = ["HEAD", "GET", "POST", ] - -# A list of non simple headers the client is allowed to use with cross-domain requests -cors_allowed_headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ] - -# TCP or UNIX socket address for the gRPC server to listen on -# NOTE: This server only supports /broadcast_tx_commit -grpc_laddr = "" - -# Maximum number of simultaneous connections. -# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -grpc_max_open_connections = 900 - -# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool -unsafe = false - -# Maximum number of simultaneous connections (including WebSocket). -# Does not include gRPC connections. See grpc_max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -max_open_connections = 900 - -# Maximum number of unique clientIDs that can /subscribe -# If you're using /broadcast_tx_commit, set to the estimated maximum number -# of broadcast_tx_commit calls per block. -max_subscription_clients = 100 - -# Maximum number of unique queries a given client can /subscribe to -# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to -# the estimated # maximum number of broadcast_tx_commit calls per block. -max_subscriptions_per_client = 5 - -# How long to wait for a tx to be committed during /broadcast_tx_commit. -# WARNING: Using a value larger than 10s will result in increasing the -# global HTTP write timeout, which applies to all connections and endpoints. -# See https://github.com/tendermint/tendermint/issues/3435 -timeout_broadcast_tx_commit = "10s" - -# Maximum size of request body, in bytes -max_body_bytes = 1000000 - -# Maximum size of request header, in bytes -max_header_bytes = 1048576 - -# The path to a file containing certificate that is used to create the HTTPS server. -# Might be either absolute path or path related to Tendermint's config directory. -# If the certificate is signed by a certificate authority, -# the certFile should be the concatenation of the server's certificate, any intermediates, -# and the CA's certificate. -# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. -# Otherwise, HTTP server is run. -tls_cert_file = "" - -# The path to a file containing matching private key that is used to create the HTTPS server. -# Might be either absolute path or path related to Tendermint's config directory. -# NOTE: both tls-cert-file and tls-key-file must be present for Tendermint to create HTTPS server. -# Otherwise, HTTP server is run. -tls_key_file = "" - -# pprof listen address (https://golang.org/pkg/net/http/pprof) -pprof_laddr = "localhost:6060" - -####################################################### -### P2P Configuration Options ### -####################################################### -[p2p] - -# Address to listen for incoming connections -laddr = "tcp://0.0.0.0:26656" - -# Address to advertise to peers for them to dial -# If empty, will use the same port as the laddr, -# and will introspect on the listener or use UPnP -# to figure out the address. ip and port are required -# example: 159.89.10.97:26656 -external_address = "" - -# Comma separated list of seed nodes to connect to -seeds = "" - -# Comma separated list of nodes to keep persistent connections to -persistent_peers = "" - -# UPNP port forwarding -upnp = false - -# Path to address book -addr_book_file = "config/addrbook.json" - -# Set true for strict address routability rules -# Set false for private or local networks -addr_book_strict = true - -# Maximum number of inbound peers -max_num_inbound_peers = 40 - -# Maximum number of outbound peers to connect to, excluding persistent peers -max_num_outbound_peers = 10 - -# List of node IDs, to which a connection will be (re)established ignoring any existing limits -unconditional_peer_ids = "" - -# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) -persistent_peers_max_dial_period = "0s" - -# Time to wait before flushing messages out on the connection -flush_throttle_timeout = "100ms" - -# Maximum size of a message packet payload, in bytes -max_packet_msg_payload_size = 1024 - -# Rate at which packets can be sent, in bytes/second -send_rate = 5120000 - -# Rate at which packets can be received, in bytes/second -recv_rate = 5120000 - -# Set true to enable the peer-exchange reactor -pex = true - -# Seed mode, in which node constantly crawls the network and looks for -# peers. If another node asks it for addresses, it responds and disconnects. -# -# Does not work if the peer-exchange reactor is disabled. -seed_mode = false - -# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) -private_peer_ids = "" - -# Toggle to disable guard against peers connecting from the same ip. -allow_duplicate_ip = false - -# Peer connection configuration. -handshake_timeout = "20s" -dial_timeout = "3s" - -####################################################### -### Mempool Configuration Option ### -####################################################### -[mempool] - -recheck = true -broadcast = true -wal_dir = "" - -# Maximum number of transactions in the mempool -size = 5000 - -# Limit the total size of all txs in the mempool. -# This only accounts for raw transactions (e.g. given 1MB transactions and -# max_txs_bytes=5MB, mempool will only accept 5 transactions). -max_txs_bytes = 1073741824 - -# Size of the cache (used to filter transactions we saw earlier) in transactions -cache_size = 10000 - -# Do not remove invalid transactions from the cache (default: false) -# Set to true if it's not possible for any invalid transaction to become valid -# again in the future. -keep-invalid-txs-in-cache = false - -# Maximum size of a single transaction. -# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes}. -max_tx_bytes = 1048576 - -# Maximum size of a batch of transactions to send to a peer -# Including space needed by encoding (one varint per transaction). -# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796 -max_batch_bytes = 0 - -####################################################### -### State Sync Configuration Options ### -####################################################### -[statesync] -# State sync rapidly bootstraps a new node by discovering, fetching, and restoring a state machine -# snapshot from peers instead of fetching and replaying historical blocks. Requires some peers in -# the network to take and serve state machine snapshots. State sync is not attempted if the node -# has any local state (LastBlockHeight > 0). The node will have a truncated block history, -# starting from the height of the snapshot. -enable = false - -# RPC servers (comma-separated) for light client verification of the synced state machine and -# retrieval of state data for node bootstrapping. Also needs a trusted height and corresponding -# header hash obtained from a trusted source, and a period during which validators can be trusted. -# -# For Cosmos SDK-based chains, trust_period should usually be about 2/3 of the unbonding time (~2 -# weeks) during which they can be financially punished (slashed) for misbehavior. -rpc_servers = "" -trust_height = 0 -trust_hash = "" -trust_period = "168h0m0s" - -# Time to spend discovering snapshots before initiating a restore. -discovery_time = "15s" - -# Temporary directory for state sync snapshot chunks, defaults to the OS tempdir (typically /tmp). -# Will create a new, randomly named directory within, and remove it when done. -temp_dir = "" - -# The timeout duration before re-requesting a chunk, possibly from a different -# peer (default: 1 minute). -chunk_request_timeout = "10s" - -# The number of concurrent chunk fetchers to run (default: 1). -chunk_fetchers = "4" - -####################################################### -### Fast Sync Configuration Connections ### -####################################################### -[fastsync] - -# Fast Sync version to use: -# 1) "v0" (default) - the legacy fast sync implementation -# 2) "v1" - refactor of v0 version for better testability -# 2) "v2" - complete redesign of v0, optimized for testability & readability -version = "v0" - -####################################################### -### Consensus Configuration Options ### -####################################################### -[consensus] - -wal_file = "data/cs.wal/wal" - -# How long we wait for a proposal block before prevoting nil -timeout_propose = "1s" -# How much timeout_propose increases with each round -timeout_propose_delta = "500ms" -# How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil) -timeout_prevote = "1s" -# How much the timeout_prevote increases with each round -timeout_prevote_delta = "500ms" -# How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil) -timeout_precommit = "1s" -# How much the timeout_precommit increases with each round -timeout_precommit_delta = "500ms" -# How long we wait after committing a block, before starting on the new -# height (this gives us a chance to receive some more precommits, even -# though we already have +2/3). -timeout_commit = "1s" - -# How many blocks to look back to check existence of the node's consensus votes before joining consensus -# When non-zero, the node will panic upon restart -# if the same consensus key was used to sign {double_sign_check_height} last blocks. -# So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic. -double_sign_check_height = 0 - -# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) -skip_timeout_commit = false - -# EmptyBlocks mode and possible interval between empty blocks -create_empty_blocks = true -create_empty_blocks_interval = "0s" - -# Reactor sleep duration parameters -peer_gossip_sleep_duration = "100ms" -peer_query_maj23_sleep_duration = "2s" - -####################################################### -### Transaction Indexer Configuration Options ### -####################################################### -[tx_index] - -# What indexer to use for transactions -# -# The application will set which txs to index. In some cases a node operator will be able -# to decide which txs to index based on configuration set in the application. -# -# Options: -# 1) "null" -# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). -# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed. -indexer = "kv" - -####################################################### -### Instrumentation Configuration Options ### -####################################################### -[instrumentation] - -# When true, Prometheus metrics are served under /metrics on -# PrometheusListenAddr. -# Check out the documentation for the list of available metrics. -prometheus = false - -# Address to listen for Prometheus collector(s) connections -prometheus_listen_addr = ":26660" - -# Maximum number of simultaneous connections. -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -max_open_connections = 3 - -# Instrumentation namespace -namespace = "tendermint" diff --git a/ci/chains/gaia/v5.0.8/ibc-0/config/genesis.json b/ci/chains/gaia/v5.0.8/ibc-0/config/genesis.json deleted file mode 100644 index e2bdd082e4..0000000000 --- a/ci/chains/gaia/v5.0.8/ibc-0/config/genesis.json +++ /dev/null @@ -1,342 +0,0 @@ -{ - "genesis_time": "2021-11-04T15:47:29.231543194Z", - "chain_id": "ibc-0", - "initial_height": "1", - "consensus_params": { - "block": { - "max_bytes": "22020096", - "max_gas": "-1", - "time_iota_ms": "1000" - }, - "evidence": { - "max_age_num_blocks": "100000", - "max_age_duration": "172800000000000", - "max_bytes": "1048576" - }, - "validator": { - "pub_key_types": [ - "ed25519" - ] - }, - "version": {} - }, - "app_hash": "", - "app_state": { - "auth": { - "params": { - "max_memo_characters": "256", - "tx_sig_limit": "7", - "tx_size_cost_per_byte": "10", - "sig_verify_cost_ed25519": "590", - "sig_verify_cost_secp256k1": "1000" - }, - "accounts": [ - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos1exw8hfhy3y09nqfkvvr0y7crd0asmxcmauvuvd", - "pub_key": null, - "account_number": "0", - "sequence": "0" - }, - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos1vnatnf5m0g27uz2lk0dgq4dxg3gcscr7uajmv7", - "pub_key": null, - "account_number": "0", - "sequence": "0" - }, - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos1tgzhx34m3mamgzd5wjuwhhdptzu37crsmle9cz", - "pub_key": null, - "account_number": "0", - "sequence": "0" - } - ] - }, - "bank": { - "params": { - "send_enabled": [], - "default_send_enabled": true - }, - "balances": [ - { - "address": "cosmos1tgzhx34m3mamgzd5wjuwhhdptzu37crsmle9cz", - "coins": [ - { - "denom": "stake", - "amount": "100000000000" - } - ] - }, - { - "address": "cosmos1vnatnf5m0g27uz2lk0dgq4dxg3gcscr7uajmv7", - "coins": [ - { - "denom": "samoleans", - "amount": "100000000000" - }, - { - "denom": "stake", - "amount": "100000000000" - } - ] - }, - { - "address": "cosmos1exw8hfhy3y09nqfkvvr0y7crd0asmxcmauvuvd", - "coins": [ - { - "denom": "samoleans", - "amount": "100000000000" - }, - { - "denom": "stake", - "amount": "100000000000" - } - ] - } - ], - "supply": [ - { - "denom": "samoleans", - "amount": "200000000000" - }, - { - "denom": "stake", - "amount": "300000000000" - } - ], - "denom_metadata": [] - }, - "capability": { - "index": "1", - "owners": [] - }, - "crisis": { - "constant_fee": { - "denom": "stake", - "amount": "1000" - } - }, - "distribution": { - "params": { - "community_tax": "0.020000000000000000", - "base_proposer_reward": "0.010000000000000000", - "bonus_proposer_reward": "0.040000000000000000", - "withdraw_addr_enabled": true - }, - "fee_pool": { - "community_pool": [] - }, - "delegator_withdraw_infos": [], - "previous_proposer": "", - "outstanding_rewards": [], - "validator_accumulated_commissions": [], - "validator_historical_rewards": [], - "validator_current_rewards": [], - "delegator_starting_infos": [], - "validator_slash_events": [] - }, - "evidence": { - "evidence": [] - }, - "genutil": { - "gen_txs": [ - { - "body": { - "messages": [ - { - "@type": "/cosmos.staking.v1beta1.MsgCreateValidator", - "description": { - "moniker": "ibc-0", - "identity": "", - "website": "", - "security_contact": "", - "details": "" - }, - "commission": { - "rate": "0.100000000000000000", - "max_rate": "0.200000000000000000", - "max_change_rate": "0.010000000000000000" - }, - "min_self_delegation": "1", - "delegator_address": "cosmos1tgzhx34m3mamgzd5wjuwhhdptzu37crsmle9cz", - "validator_address": "cosmosvaloper1tgzhx34m3mamgzd5wjuwhhdptzu37crs7tds53", - "pubkey": { - "@type": "/cosmos.crypto.ed25519.PubKey", - "key": "b0Upk7Jh+8qlBg9/nBABHW57gwE9jRwXcVK2bw9UcWQ=" - }, - "value": { - "denom": "stake", - "amount": "100000000000" - } - } - ], - "memo": "e644f6ada6dc23fa6927484cca6e00183dc3fe0b@192.168.50.214:26656", - "timeout_height": "0", - "extension_options": [], - "non_critical_extension_options": [] - }, - "auth_info": { - "signer_infos": [ - { - "public_key": { - "@type": "/cosmos.crypto.secp256k1.PubKey", - "key": "A50bVFQ0B4TqK6Jf9m0D9NhVEO0HqJFgoIhPpcY/5RAD" - }, - "mode_info": { - "single": { - "mode": "SIGN_MODE_DIRECT" - } - }, - "sequence": "0" - } - ], - "fee": { - "amount": [], - "gas_limit": "200000", - "payer": "", - "granter": "" - } - }, - "signatures": [ - "W0OZ0bf1Hs6gz8vCofrI267vAe8KT+AdaJpH8gTGx7lDK94R+3wwItI9HDak61KZSScQqln0gXjXIpXMu4BkfQ==" - ] - } - ] - }, - "gov": { - "starting_proposal_id": "1", - "deposits": [], - "votes": [], - "proposals": [], - "deposit_params": { - "min_deposit": [ - { - "denom": "stake", - "amount": "10000000" - } - ], - "max_deposit_period": "200s" - }, - "voting_params": { - "voting_period": "200s" - }, - "tally_params": { - "quorum": "0.334000000000000000", - "threshold": "0.500000000000000000", - "veto_threshold": "0.334000000000000000" - } - }, - "ibc": { - "client_genesis": { - "clients": [], - "clients_consensus": [], - "clients_metadata": [], - "params": { - "allowed_clients": [ - "06-solomachine", - "07-tendermint" - ] - }, - "create_localhost": false, - "next_client_sequence": "0" - }, - "connection_genesis": { - "connections": [], - "client_connection_paths": [], - "next_connection_sequence": "0" - }, - "channel_genesis": { - "channels": [], - "acknowledgements": [], - "commitments": [], - "receipts": [], - "send_sequences": [], - "recv_sequences": [], - "ack_sequences": [], - "next_channel_sequence": "0" - } - }, - "liquidity": { - "params": { - "pool_types": [ - { - "id": 1, - "name": "StandardLiquidityPool", - "min_reserve_coin_num": 2, - "max_reserve_coin_num": 2, - "description": "Standard liquidity pool with pool price function X/Y, ESPM constraint, and two kinds of reserve coins" - } - ], - "min_init_deposit_amount": "1000000", - "init_pool_coin_mint_amount": "1000000", - "max_reserve_coin_amount": "0", - "pool_creation_fee": [ - { - "denom": "stake", - "amount": "40000000" - } - ], - "swap_fee_rate": "0.003000000000000000", - "withdraw_fee_rate": "0.000000000000000000", - "max_order_amount_ratio": "0.100000000000000000", - "unit_batch_height": 1, - "circuit_breaker_enabled": false - }, - "pool_records": [] - }, - "mint": { - "minter": { - "inflation": "0.130000000000000000", - "annual_provisions": "0.000000000000000000" - }, - "params": { - "mint_denom": "stake", - "inflation_rate_change": "0.130000000000000000", - "inflation_max": "0.200000000000000000", - "inflation_min": "0.070000000000000000", - "goal_bonded": "0.670000000000000000", - "blocks_per_year": "6311520" - } - }, - "params": null, - "slashing": { - "params": { - "signed_blocks_window": "100", - "min_signed_per_window": "0.500000000000000000", - "downtime_jail_duration": "600s", - "slash_fraction_double_sign": "0.050000000000000000", - "slash_fraction_downtime": "0.010000000000000000" - }, - "signing_infos": [], - "missed_blocks": [] - }, - "staking": { - "params": { - "unbonding_time": "1814400s", - "max_validators": 100, - "max_entries": 7, - "historical_entries": 10000, - "bond_denom": "stake" - }, - "last_total_power": "0", - "last_validator_powers": [], - "validators": [], - "delegations": [], - "unbonding_delegations": [], - "redelegations": [], - "exported": false - }, - "transfer": { - "port_id": "transfer", - "denom_traces": [], - "params": { - "send_enabled": true, - "receive_enabled": true - } - }, - "upgrade": {}, - "vesting": {} - } -} \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.8/ibc-0/config/gentx/gentx-e644f6ada6dc23fa6927484cca6e00183dc3fe0b.json b/ci/chains/gaia/v5.0.8/ibc-0/config/gentx/gentx-e644f6ada6dc23fa6927484cca6e00183dc3fe0b.json deleted file mode 100644 index a5a6bb6187..0000000000 --- a/ci/chains/gaia/v5.0.8/ibc-0/config/gentx/gentx-e644f6ada6dc23fa6927484cca6e00183dc3fe0b.json +++ /dev/null @@ -1 +0,0 @@ -{"body":{"messages":[{"@type":"/cosmos.staking.v1beta1.MsgCreateValidator","description":{"moniker":"ibc-0","identity":"","website":"","security_contact":"","details":""},"commission":{"rate":"0.100000000000000000","max_rate":"0.200000000000000000","max_change_rate":"0.010000000000000000"},"min_self_delegation":"1","delegator_address":"cosmos1tgzhx34m3mamgzd5wjuwhhdptzu37crsmle9cz","validator_address":"cosmosvaloper1tgzhx34m3mamgzd5wjuwhhdptzu37crs7tds53","pubkey":{"@type":"/cosmos.crypto.ed25519.PubKey","key":"b0Upk7Jh+8qlBg9/nBABHW57gwE9jRwXcVK2bw9UcWQ="},"value":{"denom":"stake","amount":"100000000000"}}],"memo":"e644f6ada6dc23fa6927484cca6e00183dc3fe0b@192.168.50.214:26656","timeout_height":"0","extension_options":[],"non_critical_extension_options":[]},"auth_info":{"signer_infos":[{"public_key":{"@type":"/cosmos.crypto.secp256k1.PubKey","key":"A50bVFQ0B4TqK6Jf9m0D9NhVEO0HqJFgoIhPpcY/5RAD"},"mode_info":{"single":{"mode":"SIGN_MODE_DIRECT"}},"sequence":"0"}],"fee":{"amount":[],"gas_limit":"200000","payer":"","granter":""}},"signatures":["W0OZ0bf1Hs6gz8vCofrI267vAe8KT+AdaJpH8gTGx7lDK94R+3wwItI9HDak61KZSScQqln0gXjXIpXMu4BkfQ=="]} diff --git a/ci/chains/gaia/v5.0.8/ibc-0/config/node_key.json b/ci/chains/gaia/v5.0.8/ibc-0/config/node_key.json deleted file mode 100644 index 65bec3d2d6..0000000000 --- a/ci/chains/gaia/v5.0.8/ibc-0/config/node_key.json +++ /dev/null @@ -1 +0,0 @@ -{"priv_key":{"type":"tendermint/PrivKeyEd25519","value":"PnKW6NeBJUMFjHt4zFCFcoODQ6igtYYqMjFp6MJNGKdb7ynzjZlXJoFDdL31vKFVjfqBLTT5J64JFslespTbkw=="}} \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.8/ibc-0/config/priv_validator_key.json b/ci/chains/gaia/v5.0.8/ibc-0/config/priv_validator_key.json deleted file mode 100644 index 8b5954526b..0000000000 --- a/ci/chains/gaia/v5.0.8/ibc-0/config/priv_validator_key.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "address": "A129A9C5DCC0E76B98FEE6CC94007F5A9DD9FFCB", - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "b0Upk7Jh+8qlBg9/nBABHW57gwE9jRwXcVK2bw9UcWQ=" - }, - "priv_key": { - "type": "tendermint/PrivKeyEd25519", - "value": "bllmLWHxWHFQecOBGM976AuAN9lD3RFHRTgrcY0au2JvRSmTsmH7yqUGD3+cEAEdbnuDAT2NHBdxUrZvD1RxZA==" - } -} \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.8/ibc-0/keyring-test/5a057346bb8efbb409b474b8ebdda158b91f6070.address b/ci/chains/gaia/v5.0.8/ibc-0/keyring-test/5a057346bb8efbb409b474b8ebdda158b91f6070.address deleted file mode 100644 index f82461c049..0000000000 --- a/ci/chains/gaia/v5.0.8/ibc-0/keyring-test/5a057346bb8efbb409b474b8ebdda158b91f6070.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0xMS0wNCAxMTo0NzozMC4zMTcyMTgyNDggLTA0MDAgRURUIG09KzAuMDczMzM0NDQxIiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiZkJwSWlqbzZ3UHRMUDRhYiJ9.Xd813npMEw-9lrIGXKwsFu0CejBjnuQlCk5L1_mKs2fCACcHeyeESg.eT2fnPglp8HNTBKd.DBxSUHsnMx08T2rBia1ps1adGRxtH44DscMgTgL-D-xu0cHLLxkziH4U6pK1nD9TTA5p7OnFIv7-QnWpTvVhfbmPlmpmDqM-UkAPhMFIm7mtkBxUq1jf5qqRDHyT-5ikSEn0tIceZHC0cXg0C_son_QpW-LSCh-YUoNKpTBZCTX6u0jrZnQISzU4ztgYKpX5uSCIJLZel-3zqSw50p8HWneex9H5bQBbGItSBxEo6bARvGNCDD3N3rLq.0UVZuIkn97EnR1AL_Yrb7Q \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.8/ibc-0/keyring-test/64fab9a69b7a15ee095fb3da8055a6445188607e.address b/ci/chains/gaia/v5.0.8/ibc-0/keyring-test/64fab9a69b7a15ee095fb3da8055a6445188607e.address deleted file mode 100644 index 4e62c6a822..0000000000 --- a/ci/chains/gaia/v5.0.8/ibc-0/keyring-test/64fab9a69b7a15ee095fb3da8055a6445188607e.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0xMS0wNCAxMTo0NzozMi40ODAzMTc0NjggLTA0MDAgRURUIG09KzAuMDY0ODk5ODA1IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiaDhIT3drRGw0UnplckxvOCJ9.vTyBa21aNqfLtvvdm0QftogEJQQp2FerqFR9tflolVBcE5-O59FuAQ.lJ6VZ4mXuG5M5K9P.ff22Ek0KiqEvRn1QY1oCKYd2qFPDgnJ3gccM1UWmoh6fIUx8Pbsktrnu45HtwHpnfEn8Gxp24snmRv0ad3Qq_h_j5lSS98mnoss7d1CLhtF7P7DBYwLNMDSNCbMjgD0-Bx0ZhdgTGoBW9_t23aEQH2yBVt6pfkse2Kd9hKvy31-MovUt-WyQ3DzxjyFwH3utdazTnIwyCmCH5fW95VraXeX2-ynI47pYQoZ3vA0StfYawE59CI4.RkuJB1gfEF2wWRwnk-1oKA \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.8/ibc-0/keyring-test/c99c7ba6e4891e5981366306f27b036bfb0d9b1b.address b/ci/chains/gaia/v5.0.8/ibc-0/keyring-test/c99c7ba6e4891e5981366306f27b036bfb0d9b1b.address deleted file mode 100644 index af2a4f4748..0000000000 --- a/ci/chains/gaia/v5.0.8/ibc-0/keyring-test/c99c7ba6e4891e5981366306f27b036bfb0d9b1b.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0xMS0wNCAxMTo0NzozMS40MDI1NjU1MjcgLTA0MDAgRURUIG09KzAuMDcwNTE1NTI0IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiNjNjVEtlNF9QZi1WNW9CdiJ9.ZnIuGZ7P72EPIxV-hnmbpKNJxNkM23GUizPhBP520TSAQvGY_XsBsw.e4wvOVOnnoNdApXM.Joe1jjxS6ej_zICXtu6WpfcsZdXRqLoXdqK6_dPQslinSSfKKdekMb62zYCNOxeyjXb9RSu3EbYRGq3TAG8OWZaqpbsYuenjAvrlIRYqGY1jxwIFQwK6rTgGrZwzb_KxdB9mI99kGBwLP-T8VsvI-bWtBBri5sIIb-UEf_F4NPfI9vs4Bkco-PV520xPUN8T1e6oycJFYQYy2H99vcFddQ8ENrF2T8xr0v0HLeMd9fbnSQ.NSXG5ji268uoENc0zVHUmA \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.8/ibc-0/keyring-test/user.info b/ci/chains/gaia/v5.0.8/ibc-0/keyring-test/user.info deleted file mode 100644 index ab21145159..0000000000 --- a/ci/chains/gaia/v5.0.8/ibc-0/keyring-test/user.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0xMS0wNCAxMTo0NzozMS4zOTY1MzkzOTggLTA0MDAgRURUIG09KzAuMDY0NDg5MzkxIiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiZ0t6ZjgtZHNDVktwRHo2ZCJ9.pVu0Mtx4jNLLIDS3pgoN02A3GI1NOyof03z2kQBthJkkLIFn8aoKYg.YV7kj3HXaHRBuaS_.o2ODJ4i0huip-aamiyMiDmuZc9ZOc65R79fKmB46efEl8jcjog7Zql5z0WoKtFh9qfkHAUYnMM1dVZ3O6d_gSZU-r-nEoq4WqZep__zLGJ7ETX-WPz11y-oi4ZeOpXeEh7gCDFb9kZhIzIDvoBv-qYVfOFC33OoN6Bz2OGtC4aIjVPf5vwcTwaRhZMaM_atH5smS18LODhppcSmJAjV9xjsca3KGQtAAYLYuVT3PG36W7p4JAww2m3AeeqHP0tdQQwa6sg8PLOQg9Zxuw9H614ZEgGX7-Ao9iVUkqP8BCOCoS9RnffAkfP0WCR7KNoA5IxJAdk8kKBWJfdsKb7xdyO5RNSIzbuk.xkWnt_f8cV9kh7it2xXK7A \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.8/ibc-0/keyring-test/user2.info b/ci/chains/gaia/v5.0.8/ibc-0/keyring-test/user2.info deleted file mode 100644 index e30f9f7b76..0000000000 --- a/ci/chains/gaia/v5.0.8/ibc-0/keyring-test/user2.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0xMS0wNCAxMTo0NzozMi40NzQzNDY3MTcgLTA0MDAgRURUIG09KzAuMDU4OTI5MDM3IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiVmNSTG9SMmhQWFhtM01oNiJ9.qcJz1u3RDq-IFS9eLisFpeU3mKBn4TuQNzr7WcMurqjW5EmNmUEtKA.7RRHagT_GFcnaFxJ.pkLQs5JLngcUu9FIaM6dpx3TIWmw8hMLi9DVq9DjVPdwO-_79CBYAUf5-p62iw7czNl5f5l0YwtE-_d2pwcsS3sl0F6Ez2RsxVCvZrC755XCuFktkWWL-5uXbUpFNyvZs-6ATOxhr0aoAkF5ZGkUUFweTMZrglLGh5HlKymVrSx9MXSqVrZOB3CFhZVxX-EobcxKm4x9Jph7LpDj1JODpDcOawB6zZwMF0bqYPywTfKnrZlB7xlmhcwv_43vh8Yh_EhXeCj4ps4i7PqMiBLg2Wn9Mi8SYL5P-C_J2gi_PvsPU5zKkVDVuqEGrlMCTWEz4s0tAeEwoJFk77MpPK323jXv4U87o4pc.W5JsaLin4N_eJ9rFryQJEQ \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.8/ibc-0/keyring-test/validator.info b/ci/chains/gaia/v5.0.8/ibc-0/keyring-test/validator.info deleted file mode 100644 index 5aac0f6eba..0000000000 --- a/ci/chains/gaia/v5.0.8/ibc-0/keyring-test/validator.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0xMS0wNCAxMTo0NzozMC4zMTA4MDQ2NDcgLTA0MDAgRURUIG09KzAuMDY2OTIwODYwIiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiejF4dkoycjE5SjNMXzhCUiJ9.n8QDC3Qcju_iKHP-15BXBPKcAedMbGd6L8MmglKV1gkneTZoFWAIvw.a2J96EcNQ6dLaguQ.2QRIomjZxj5ROCWXfe7wgxnWl0_MncasnkmsbP5yH0m-lmjd_sRFIQ53MpIsFqAHQj1xFBmJRP2GqXQZkQt-QeIrXyzylzmDK9FtlKL-gvCxuoOKync4Il62SmnClMkFGPObebRKIAluQoZ8DzVYWSklWyP51eIBewz2FyONvQkOS5ZTUYcTzqhnK_ZwQM1t_uzBQ9TvP1sUvABRsAwvfWZke8iP4I-uBHlMXO-34bCQCmYRRo9TF49ti0T3tbAt7rDjmhv9N0BrGv4Ir_o81wo-4Zd0xgdtZlu_Nt8_99qnF9hQkAvkUD6u6NyVOGC3Gpppjl6D3KkWxarGkbJFiFVubMLPmkO_3fll7kOsEEFMjcKR.2TUybiWXhefRJJF6cPTGHw \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.8/ibc-0/user2_seed.json b/ci/chains/gaia/v5.0.8/ibc-0/user2_seed.json deleted file mode 100644 index 20fa1e4754..0000000000 --- a/ci/chains/gaia/v5.0.8/ibc-0/user2_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"user2","type":"local","address":"cosmos1vnatnf5m0g27uz2lk0dgq4dxg3gcscr7uajmv7","pubkey":"cosmospub1addwnpepqg2wvn5a6kn7yjqqxvchav8g9yz46xywfvn248qvmj56jpxypjvakjqsc3m","mnemonic":"naive early elbow chat model athlete lottery unfold comfort scare portion army era patch yard penalty two time student jazz middle endless execute relax"} diff --git a/ci/chains/gaia/v5.0.8/ibc-0/user_seed.json b/ci/chains/gaia/v5.0.8/ibc-0/user_seed.json deleted file mode 100644 index d99cc314dc..0000000000 --- a/ci/chains/gaia/v5.0.8/ibc-0/user_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"user","type":"local","address":"cosmos1exw8hfhy3y09nqfkvvr0y7crd0asmxcmauvuvd","pubkey":"cosmospub1addwnpepq28jx6t5xgfl3pkvgydru2h68pwuqhunkm70hfttqp2esswctdzv6echc75","mnemonic":"lock border during undo menu crouch ticket absurd slight remove sock more nominee ketchup night parrot firm future essence need devote client sugar stadium"} diff --git a/ci/chains/gaia/v5.0.8/ibc-0/validator_seed.json b/ci/chains/gaia/v5.0.8/ibc-0/validator_seed.json deleted file mode 100644 index 1c00c62d5f..0000000000 --- a/ci/chains/gaia/v5.0.8/ibc-0/validator_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"validator","type":"local","address":"cosmos1tgzhx34m3mamgzd5wjuwhhdptzu37crsmle9cz","pubkey":"cosmospub1addwnpepqww3k4z5xsrcf63t5f0lvmgr7nv92y8dq75fzc9q3p86t33lu5gqx5vfz4y","mnemonic":"trigger liar blast diesel fall mention quantum vast walnut mail rally there village crop timber assault bachelor scene taste hover top caught concert rather"} diff --git a/ci/chains/gaia/v5.0.8/ibc-1/config/app.toml b/ci/chains/gaia/v5.0.8/ibc-1/config/app.toml deleted file mode 100644 index 54de73e392..0000000000 --- a/ci/chains/gaia/v5.0.8/ibc-1/config/app.toml +++ /dev/null @@ -1,152 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -############################################################################### -### Base Configuration ### -############################################################################### - -# The minimum gas prices a validator is willing to accept for processing a -# transaction. A transaction's fees must meet the minimum of any denomination -# specified in this config (e.g. 0.25token1;0.0001token2). -minimum-gas-prices = "" - -# default: the last 100 states are kept in addition to every 500th state; pruning at 10 block intervals -# nothing: all historic states will be saved, nothing will be deleted (i.e. archiving node) -# everything: all saved states will be deleted, storing only the current state; pruning at 10 block intervals -# custom: allow pruning options to be manually specified through 'pruning-keep-recent', 'pruning-keep-every', and 'pruning-interval' -pruning = "default" - -# These are applied if and only if the pruning strategy is custom. -pruning-keep-recent = "0" -pruning-keep-every = "0" -pruning-interval = "0" - -# HaltHeight contains a non-zero block height at which a node will gracefully -# halt and shutdown that can be used to assist upgrades and testing. -# -# Note: Commitment of state will be attempted on the corresponding block. -halt-height = 0 - -# HaltTime contains a non-zero minimum block time (in Unix seconds) at which -# a node will gracefully halt and shutdown that can be used to assist upgrades -# and testing. -# -# Note: Commitment of state will be attempted on the corresponding block. -halt-time = 0 - -# MinRetainBlocks defines the minimum block height offset from the current -# block being committed, such that all blocks past this offset are pruned -# from Tendermint. It is used as part of the process of determining the -# ResponseCommit.RetainHeight value during ABCI Commit. A value of 0 indicates -# that no blocks should be pruned. -# -# This configuration value is only responsible for pruning Tendermint blocks. -# It has no bearing on application state pruning which is determined by the -# "pruning-*" configurations. -# -# Note: Tendermint block pruning is dependant on this parameter in conunction -# with the unbonding (safety threshold) period, state pruning and state sync -# snapshot parameters to determine the correct minimum value of -# ResponseCommit.RetainHeight. -min-retain-blocks = 0 - -# InterBlockCache enables inter-block caching. -inter-block-cache = true - -# IndexEvents defines the set of events in the form {eventType}.{attributeKey}, -# which informs Tendermint what to index. If empty, all events will be indexed. -# -# Example: -# ["message.sender", "message.recipient"] -index-events = [] - -############################################################################### -### Telemetry Configuration ### -############################################################################### - -[telemetry] - -# Prefixed with keys to separate services. -service-name = "" - -# Enabled enables the application telemetry functionality. When enabled, -# an in-memory sink is also enabled by default. Operators may also enabled -# other sinks such as Prometheus. -enabled = false - -# Enable prefixing gauge values with hostname. -enable-hostname = false - -# Enable adding hostname to labels. -enable-hostname-label = false - -# Enable adding service to labels. -enable-service-label = false - -# PrometheusRetentionTime, when positive, enables a Prometheus metrics sink. -prometheus-retention-time = 0 - -# GlobalLabels defines a global set of name/value label tuples applied to all -# metrics emitted using the wrapper functions defined in telemetry package. -# -# Example: -# [["chain_id", "cosmoshub-1"]] -global-labels = [ -] - -############################################################################### -### API Configuration ### -############################################################################### - -[api] - -# Enable defines if the API server should be enabled. -enable = false - -# Swagger defines if swagger documentation should automatically be registered. -swagger = false - -# Address defines the API server to listen on. -address = "tcp://0.0.0.0:1317" - -# MaxOpenConnections defines the number of maximum open connections. -max-open-connections = 1000 - -# RPCReadTimeout defines the Tendermint RPC read timeout (in seconds). -rpc-read-timeout = 10 - -# RPCWriteTimeout defines the Tendermint RPC write timeout (in seconds). -rpc-write-timeout = 0 - -# RPCMaxBodyBytes defines the Tendermint maximum response body (in bytes). -rpc-max-body-bytes = 1000000 - -# EnableUnsafeCORS defines if CORS should be enabled (unsafe - use it at your own risk). -enabled-unsafe-cors = false - -############################################################################### -### gRPC Configuration ### -############################################################################### - -[grpc] - -# Enable defines if the gRPC server should be enabled. -enable = true - -# Address defines the gRPC server address to bind to. -address = "0.0.0.0:9090" - -############################################################################### -### State Sync Configuration ### -############################################################################### - -# State sync snapshots allow other nodes to rapidly join the network without replaying historical -# blocks, instead downloading and applying a snapshot of the application state at a given height. -[state-sync] - -# snapshot-interval specifies the block interval at which local state sync snapshots are -# taken (0 to disable). Must be a multiple of pruning-keep-every. -snapshot-interval = 0 - -# snapshot-keep-recent specifies the number of recent snapshots to keep and serve (0 to keep all). -snapshot-keep-recent = 2 diff --git a/ci/chains/gaia/v5.0.8/ibc-1/config/client.toml b/ci/chains/gaia/v5.0.8/ibc-1/config/client.toml deleted file mode 100644 index 222695a3f8..0000000000 --- a/ci/chains/gaia/v5.0.8/ibc-1/config/client.toml +++ /dev/null @@ -1,17 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -############################################################################### -### Client Configuration ### -############################################################################### - -# The network chain ID -chain-id = "" -# The keyring's backend, where the keys are stored (os|file|kwallet|pass|test|memory) -keyring-backend = "os" -# CLI output format (text|json) -output = "text" -# : to Tendermint RPC interface for this chain -node = "tcp://localhost:26657" -# Transaction broadcasting mode (sync|async|block) -broadcast-mode = "sync" diff --git a/ci/chains/gaia/v5.0.8/ibc-1/config/config.toml b/ci/chains/gaia/v5.0.8/ibc-1/config/config.toml deleted file mode 100644 index bd592119ba..0000000000 --- a/ci/chains/gaia/v5.0.8/ibc-1/config/config.toml +++ /dev/null @@ -1,401 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -# NOTE: Any path below can be absolute (e.g. "/var/myawesomeapp/data") or -# relative to the home directory (e.g. "data"). The home directory is -# "$HOME/.tendermint" by default, but could be changed via $TMHOME env variable -# or --home cmd flag. - -####################################################################### -### Main Base Config Options ### -####################################################################### - -# TCP or UNIX socket address of the ABCI application, -# or the name of an ABCI application compiled in with the Tendermint binary -proxy_app = "tcp://127.0.0.1:26658" - -# A custom human readable name for this node -moniker = "ibc-1" - -# If this node is many blocks behind the tip of the chain, FastSync -# allows them to catchup quickly by downloading blocks in parallel -# and verifying their commits -fast_sync = true - -# Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb -# * goleveldb (github.com/syndtr/goleveldb - most popular implementation) -# - pure go -# - stable -# * cleveldb (uses levigo wrapper) -# - fast -# - requires gcc -# - use cleveldb build tag (go build -tags cleveldb) -# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt) -# - EXPERIMENTAL -# - may be faster is some use-cases (random reads - indexer) -# - use boltdb build tag (go build -tags boltdb) -# * rocksdb (uses github.com/tecbot/gorocksdb) -# - EXPERIMENTAL -# - requires gcc -# - use rocksdb build tag (go build -tags rocksdb) -# * badgerdb (uses github.com/dgraph-io/badger) -# - EXPERIMENTAL -# - use badgerdb build tag (go build -tags badgerdb) -db_backend = "goleveldb" - -# Database directory -db_dir = "data" - -# Output level for logging, including package level options -log_level = "info" - -# Output format: 'plain' (colored text) or 'json' -log_format = "plain" - -##### additional base config options ##### - -# Path to the JSON file containing the initial validator set and other meta data -genesis_file = "config/genesis.json" - -# Path to the JSON file containing the private key to use as a validator in the consensus protocol -priv_validator_key_file = "config/priv_validator_key.json" - -# Path to the JSON file containing the last sign state of a validator -priv_validator_state_file = "data/priv_validator_state.json" - -# TCP or UNIX socket address for Tendermint to listen on for -# connections from an external PrivValidator process -priv_validator_laddr = "" - -# Path to the JSON file containing the private key to use for node authentication in the p2p protocol -node_key_file = "config/node_key.json" - -# Mechanism to connect to the ABCI application: socket | grpc -abci = "socket" - -# If true, query the ABCI app on connecting to a new peer -# so the app can decide if we should keep the connection or not -filter_peers = false - - -####################################################################### -### Advanced Configuration Options ### -####################################################################### - -####################################################### -### RPC Server Configuration Options ### -####################################################### -[rpc] - -# TCP or UNIX socket address for the RPC server to listen on -laddr = "tcp://0.0.0.0:26657" - -# A list of origins a cross-domain request can be executed from -# Default value '[]' disables cors support -# Use '["*"]' to allow any origin -cors_allowed_origins = [] - -# A list of methods the client is allowed to use with cross-domain requests -cors_allowed_methods = ["HEAD", "GET", "POST", ] - -# A list of non simple headers the client is allowed to use with cross-domain requests -cors_allowed_headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ] - -# TCP or UNIX socket address for the gRPC server to listen on -# NOTE: This server only supports /broadcast_tx_commit -grpc_laddr = "" - -# Maximum number of simultaneous connections. -# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -grpc_max_open_connections = 900 - -# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool -unsafe = false - -# Maximum number of simultaneous connections (including WebSocket). -# Does not include gRPC connections. See grpc_max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -max_open_connections = 900 - -# Maximum number of unique clientIDs that can /subscribe -# If you're using /broadcast_tx_commit, set to the estimated maximum number -# of broadcast_tx_commit calls per block. -max_subscription_clients = 100 - -# Maximum number of unique queries a given client can /subscribe to -# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to -# the estimated # maximum number of broadcast_tx_commit calls per block. -max_subscriptions_per_client = 5 - -# How long to wait for a tx to be committed during /broadcast_tx_commit. -# WARNING: Using a value larger than 10s will result in increasing the -# global HTTP write timeout, which applies to all connections and endpoints. -# See https://github.com/tendermint/tendermint/issues/3435 -timeout_broadcast_tx_commit = "10s" - -# Maximum size of request body, in bytes -max_body_bytes = 1000000 - -# Maximum size of request header, in bytes -max_header_bytes = 1048576 - -# The path to a file containing certificate that is used to create the HTTPS server. -# Might be either absolute path or path related to Tendermint's config directory. -# If the certificate is signed by a certificate authority, -# the certFile should be the concatenation of the server's certificate, any intermediates, -# and the CA's certificate. -# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. -# Otherwise, HTTP server is run. -tls_cert_file = "" - -# The path to a file containing matching private key that is used to create the HTTPS server. -# Might be either absolute path or path related to Tendermint's config directory. -# NOTE: both tls-cert-file and tls-key-file must be present for Tendermint to create HTTPS server. -# Otherwise, HTTP server is run. -tls_key_file = "" - -# pprof listen address (https://golang.org/pkg/net/http/pprof) -pprof_laddr = "localhost:6060" - -####################################################### -### P2P Configuration Options ### -####################################################### -[p2p] - -# Address to listen for incoming connections -laddr = "tcp://0.0.0.0:26656" - -# Address to advertise to peers for them to dial -# If empty, will use the same port as the laddr, -# and will introspect on the listener or use UPnP -# to figure out the address. ip and port are required -# example: 159.89.10.97:26656 -external_address = "" - -# Comma separated list of seed nodes to connect to -seeds = "" - -# Comma separated list of nodes to keep persistent connections to -persistent_peers = "" - -# UPNP port forwarding -upnp = false - -# Path to address book -addr_book_file = "config/addrbook.json" - -# Set true for strict address routability rules -# Set false for private or local networks -addr_book_strict = true - -# Maximum number of inbound peers -max_num_inbound_peers = 40 - -# Maximum number of outbound peers to connect to, excluding persistent peers -max_num_outbound_peers = 10 - -# List of node IDs, to which a connection will be (re)established ignoring any existing limits -unconditional_peer_ids = "" - -# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) -persistent_peers_max_dial_period = "0s" - -# Time to wait before flushing messages out on the connection -flush_throttle_timeout = "100ms" - -# Maximum size of a message packet payload, in bytes -max_packet_msg_payload_size = 1024 - -# Rate at which packets can be sent, in bytes/second -send_rate = 5120000 - -# Rate at which packets can be received, in bytes/second -recv_rate = 5120000 - -# Set true to enable the peer-exchange reactor -pex = true - -# Seed mode, in which node constantly crawls the network and looks for -# peers. If another node asks it for addresses, it responds and disconnects. -# -# Does not work if the peer-exchange reactor is disabled. -seed_mode = false - -# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) -private_peer_ids = "" - -# Toggle to disable guard against peers connecting from the same ip. -allow_duplicate_ip = false - -# Peer connection configuration. -handshake_timeout = "20s" -dial_timeout = "3s" - -####################################################### -### Mempool Configuration Option ### -####################################################### -[mempool] - -recheck = true -broadcast = true -wal_dir = "" - -# Maximum number of transactions in the mempool -size = 5000 - -# Limit the total size of all txs in the mempool. -# This only accounts for raw transactions (e.g. given 1MB transactions and -# max_txs_bytes=5MB, mempool will only accept 5 transactions). -max_txs_bytes = 1073741824 - -# Size of the cache (used to filter transactions we saw earlier) in transactions -cache_size = 10000 - -# Do not remove invalid transactions from the cache (default: false) -# Set to true if it's not possible for any invalid transaction to become valid -# again in the future. -keep-invalid-txs-in-cache = false - -# Maximum size of a single transaction. -# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes}. -max_tx_bytes = 1048576 - -# Maximum size of a batch of transactions to send to a peer -# Including space needed by encoding (one varint per transaction). -# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796 -max_batch_bytes = 0 - -####################################################### -### State Sync Configuration Options ### -####################################################### -[statesync] -# State sync rapidly bootstraps a new node by discovering, fetching, and restoring a state machine -# snapshot from peers instead of fetching and replaying historical blocks. Requires some peers in -# the network to take and serve state machine snapshots. State sync is not attempted if the node -# has any local state (LastBlockHeight > 0). The node will have a truncated block history, -# starting from the height of the snapshot. -enable = false - -# RPC servers (comma-separated) for light client verification of the synced state machine and -# retrieval of state data for node bootstrapping. Also needs a trusted height and corresponding -# header hash obtained from a trusted source, and a period during which validators can be trusted. -# -# For Cosmos SDK-based chains, trust_period should usually be about 2/3 of the unbonding time (~2 -# weeks) during which they can be financially punished (slashed) for misbehavior. -rpc_servers = "" -trust_height = 0 -trust_hash = "" -trust_period = "168h0m0s" - -# Time to spend discovering snapshots before initiating a restore. -discovery_time = "15s" - -# Temporary directory for state sync snapshot chunks, defaults to the OS tempdir (typically /tmp). -# Will create a new, randomly named directory within, and remove it when done. -temp_dir = "" - -# The timeout duration before re-requesting a chunk, possibly from a different -# peer (default: 1 minute). -chunk_request_timeout = "10s" - -# The number of concurrent chunk fetchers to run (default: 1). -chunk_fetchers = "4" - -####################################################### -### Fast Sync Configuration Connections ### -####################################################### -[fastsync] - -# Fast Sync version to use: -# 1) "v0" (default) - the legacy fast sync implementation -# 2) "v1" - refactor of v0 version for better testability -# 2) "v2" - complete redesign of v0, optimized for testability & readability -version = "v0" - -####################################################### -### Consensus Configuration Options ### -####################################################### -[consensus] - -wal_file = "data/cs.wal/wal" - -# How long we wait for a proposal block before prevoting nil -timeout_propose = "1s" -# How much timeout_propose increases with each round -timeout_propose_delta = "500ms" -# How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil) -timeout_prevote = "1s" -# How much the timeout_prevote increases with each round -timeout_prevote_delta = "500ms" -# How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil) -timeout_precommit = "1s" -# How much the timeout_precommit increases with each round -timeout_precommit_delta = "500ms" -# How long we wait after committing a block, before starting on the new -# height (this gives us a chance to receive some more precommits, even -# though we already have +2/3). -timeout_commit = "1s" - -# How many blocks to look back to check existence of the node's consensus votes before joining consensus -# When non-zero, the node will panic upon restart -# if the same consensus key was used to sign {double_sign_check_height} last blocks. -# So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic. -double_sign_check_height = 0 - -# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) -skip_timeout_commit = false - -# EmptyBlocks mode and possible interval between empty blocks -create_empty_blocks = true -create_empty_blocks_interval = "0s" - -# Reactor sleep duration parameters -peer_gossip_sleep_duration = "100ms" -peer_query_maj23_sleep_duration = "2s" - -####################################################### -### Transaction Indexer Configuration Options ### -####################################################### -[tx_index] - -# What indexer to use for transactions -# -# The application will set which txs to index. In some cases a node operator will be able -# to decide which txs to index based on configuration set in the application. -# -# Options: -# 1) "null" -# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). -# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed. -indexer = "kv" - -####################################################### -### Instrumentation Configuration Options ### -####################################################### -[instrumentation] - -# When true, Prometheus metrics are served under /metrics on -# PrometheusListenAddr. -# Check out the documentation for the list of available metrics. -prometheus = false - -# Address to listen for Prometheus collector(s) connections -prometheus_listen_addr = ":26660" - -# Maximum number of simultaneous connections. -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -max_open_connections = 3 - -# Instrumentation namespace -namespace = "tendermint" diff --git a/ci/chains/gaia/v5.0.8/ibc-1/config/genesis.json b/ci/chains/gaia/v5.0.8/ibc-1/config/genesis.json deleted file mode 100644 index d1177e4ac8..0000000000 --- a/ci/chains/gaia/v5.0.8/ibc-1/config/genesis.json +++ /dev/null @@ -1,342 +0,0 @@ -{ - "genesis_time": "2021-11-04T15:47:42.444060916Z", - "chain_id": "ibc-1", - "initial_height": "1", - "consensus_params": { - "block": { - "max_bytes": "22020096", - "max_gas": "-1", - "time_iota_ms": "1000" - }, - "evidence": { - "max_age_num_blocks": "100000", - "max_age_duration": "172800000000000", - "max_bytes": "1048576" - }, - "validator": { - "pub_key_types": [ - "ed25519" - ] - }, - "version": {} - }, - "app_hash": "", - "app_state": { - "auth": { - "params": { - "max_memo_characters": "256", - "tx_sig_limit": "7", - "tx_size_cost_per_byte": "10", - "sig_verify_cost_ed25519": "590", - "sig_verify_cost_secp256k1": "1000" - }, - "accounts": [ - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos1cyy7533l8l3alkmdnqph5rqeu2mdgn9q85ulcx", - "pub_key": null, - "account_number": "0", - "sequence": "0" - }, - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos1j6q82mthnkytmdr63u942g43xkqjcwssw6e2x9", - "pub_key": null, - "account_number": "0", - "sequence": "0" - }, - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos1xaz9g9p9a6lcqznyakd3nc5qu85cz6zuyw5ytv", - "pub_key": null, - "account_number": "0", - "sequence": "0" - } - ] - }, - "bank": { - "params": { - "send_enabled": [], - "default_send_enabled": true - }, - "balances": [ - { - "address": "cosmos1xaz9g9p9a6lcqznyakd3nc5qu85cz6zuyw5ytv", - "coins": [ - { - "denom": "stake", - "amount": "100000000000" - } - ] - }, - { - "address": "cosmos1j6q82mthnkytmdr63u942g43xkqjcwssw6e2x9", - "coins": [ - { - "denom": "samoleans", - "amount": "100000000000" - }, - { - "denom": "stake", - "amount": "100000000000" - } - ] - }, - { - "address": "cosmos1cyy7533l8l3alkmdnqph5rqeu2mdgn9q85ulcx", - "coins": [ - { - "denom": "samoleans", - "amount": "100000000000" - }, - { - "denom": "stake", - "amount": "100000000000" - } - ] - } - ], - "supply": [ - { - "denom": "samoleans", - "amount": "200000000000" - }, - { - "denom": "stake", - "amount": "300000000000" - } - ], - "denom_metadata": [] - }, - "capability": { - "index": "1", - "owners": [] - }, - "crisis": { - "constant_fee": { - "denom": "stake", - "amount": "1000" - } - }, - "distribution": { - "params": { - "community_tax": "0.020000000000000000", - "base_proposer_reward": "0.010000000000000000", - "bonus_proposer_reward": "0.040000000000000000", - "withdraw_addr_enabled": true - }, - "fee_pool": { - "community_pool": [] - }, - "delegator_withdraw_infos": [], - "previous_proposer": "", - "outstanding_rewards": [], - "validator_accumulated_commissions": [], - "validator_historical_rewards": [], - "validator_current_rewards": [], - "delegator_starting_infos": [], - "validator_slash_events": [] - }, - "evidence": { - "evidence": [] - }, - "genutil": { - "gen_txs": [ - { - "body": { - "messages": [ - { - "@type": "/cosmos.staking.v1beta1.MsgCreateValidator", - "description": { - "moniker": "ibc-1", - "identity": "", - "website": "", - "security_contact": "", - "details": "" - }, - "commission": { - "rate": "0.100000000000000000", - "max_rate": "0.200000000000000000", - "max_change_rate": "0.010000000000000000" - }, - "min_self_delegation": "1", - "delegator_address": "cosmos1xaz9g9p9a6lcqznyakd3nc5qu85cz6zuyw5ytv", - "validator_address": "cosmosvaloper1xaz9g9p9a6lcqznyakd3nc5qu85cz6zup6q38l", - "pubkey": { - "@type": "/cosmos.crypto.ed25519.PubKey", - "key": "Q3lqhqi32SHvUpEqoVMHiRGXbThJ7Sbz98IYFIM+guo=" - }, - "value": { - "denom": "stake", - "amount": "100000000000" - } - } - ], - "memo": "b2617c1100350fcb281cc6749f1f79956222be5a@192.168.50.214:26656", - "timeout_height": "0", - "extension_options": [], - "non_critical_extension_options": [] - }, - "auth_info": { - "signer_infos": [ - { - "public_key": { - "@type": "/cosmos.crypto.secp256k1.PubKey", - "key": "AgfKYVHZJ3hgLBEDTuPJ6yf8akWkCVlsLunRmZ9PVh4w" - }, - "mode_info": { - "single": { - "mode": "SIGN_MODE_DIRECT" - } - }, - "sequence": "0" - } - ], - "fee": { - "amount": [], - "gas_limit": "200000", - "payer": "", - "granter": "" - } - }, - "signatures": [ - "1KY7yzImO543wGvV8kIlxDgUxm6Tcc3ajtLLYV89X/tTX5A12F6IdBBibcU52joSM9eAFg2pUDhWXlFRybqaZA==" - ] - } - ] - }, - "gov": { - "starting_proposal_id": "1", - "deposits": [], - "votes": [], - "proposals": [], - "deposit_params": { - "min_deposit": [ - { - "denom": "stake", - "amount": "10000000" - } - ], - "max_deposit_period": "200s" - }, - "voting_params": { - "voting_period": "200s" - }, - "tally_params": { - "quorum": "0.334000000000000000", - "threshold": "0.500000000000000000", - "veto_threshold": "0.334000000000000000" - } - }, - "ibc": { - "client_genesis": { - "clients": [], - "clients_consensus": [], - "clients_metadata": [], - "params": { - "allowed_clients": [ - "06-solomachine", - "07-tendermint" - ] - }, - "create_localhost": false, - "next_client_sequence": "0" - }, - "connection_genesis": { - "connections": [], - "client_connection_paths": [], - "next_connection_sequence": "0" - }, - "channel_genesis": { - "channels": [], - "acknowledgements": [], - "commitments": [], - "receipts": [], - "send_sequences": [], - "recv_sequences": [], - "ack_sequences": [], - "next_channel_sequence": "0" - } - }, - "liquidity": { - "params": { - "pool_types": [ - { - "id": 1, - "name": "StandardLiquidityPool", - "min_reserve_coin_num": 2, - "max_reserve_coin_num": 2, - "description": "Standard liquidity pool with pool price function X/Y, ESPM constraint, and two kinds of reserve coins" - } - ], - "min_init_deposit_amount": "1000000", - "init_pool_coin_mint_amount": "1000000", - "max_reserve_coin_amount": "0", - "pool_creation_fee": [ - { - "denom": "stake", - "amount": "40000000" - } - ], - "swap_fee_rate": "0.003000000000000000", - "withdraw_fee_rate": "0.000000000000000000", - "max_order_amount_ratio": "0.100000000000000000", - "unit_batch_height": 1, - "circuit_breaker_enabled": false - }, - "pool_records": [] - }, - "mint": { - "minter": { - "inflation": "0.130000000000000000", - "annual_provisions": "0.000000000000000000" - }, - "params": { - "mint_denom": "stake", - "inflation_rate_change": "0.130000000000000000", - "inflation_max": "0.200000000000000000", - "inflation_min": "0.070000000000000000", - "goal_bonded": "0.670000000000000000", - "blocks_per_year": "6311520" - } - }, - "params": null, - "slashing": { - "params": { - "signed_blocks_window": "100", - "min_signed_per_window": "0.500000000000000000", - "downtime_jail_duration": "600s", - "slash_fraction_double_sign": "0.050000000000000000", - "slash_fraction_downtime": "0.010000000000000000" - }, - "signing_infos": [], - "missed_blocks": [] - }, - "staking": { - "params": { - "unbonding_time": "1814400s", - "max_validators": 100, - "max_entries": 7, - "historical_entries": 10000, - "bond_denom": "stake" - }, - "last_total_power": "0", - "last_validator_powers": [], - "validators": [], - "delegations": [], - "unbonding_delegations": [], - "redelegations": [], - "exported": false - }, - "transfer": { - "port_id": "transfer", - "denom_traces": [], - "params": { - "send_enabled": true, - "receive_enabled": true - } - }, - "upgrade": {}, - "vesting": {} - } -} \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.8/ibc-1/config/gentx/gentx-b2617c1100350fcb281cc6749f1f79956222be5a.json b/ci/chains/gaia/v5.0.8/ibc-1/config/gentx/gentx-b2617c1100350fcb281cc6749f1f79956222be5a.json deleted file mode 100644 index f9b223e349..0000000000 --- a/ci/chains/gaia/v5.0.8/ibc-1/config/gentx/gentx-b2617c1100350fcb281cc6749f1f79956222be5a.json +++ /dev/null @@ -1 +0,0 @@ -{"body":{"messages":[{"@type":"/cosmos.staking.v1beta1.MsgCreateValidator","description":{"moniker":"ibc-1","identity":"","website":"","security_contact":"","details":""},"commission":{"rate":"0.100000000000000000","max_rate":"0.200000000000000000","max_change_rate":"0.010000000000000000"},"min_self_delegation":"1","delegator_address":"cosmos1xaz9g9p9a6lcqznyakd3nc5qu85cz6zuyw5ytv","validator_address":"cosmosvaloper1xaz9g9p9a6lcqznyakd3nc5qu85cz6zup6q38l","pubkey":{"@type":"/cosmos.crypto.ed25519.PubKey","key":"Q3lqhqi32SHvUpEqoVMHiRGXbThJ7Sbz98IYFIM+guo="},"value":{"denom":"stake","amount":"100000000000"}}],"memo":"b2617c1100350fcb281cc6749f1f79956222be5a@192.168.50.214:26656","timeout_height":"0","extension_options":[],"non_critical_extension_options":[]},"auth_info":{"signer_infos":[{"public_key":{"@type":"/cosmos.crypto.secp256k1.PubKey","key":"AgfKYVHZJ3hgLBEDTuPJ6yf8akWkCVlsLunRmZ9PVh4w"},"mode_info":{"single":{"mode":"SIGN_MODE_DIRECT"}},"sequence":"0"}],"fee":{"amount":[],"gas_limit":"200000","payer":"","granter":""}},"signatures":["1KY7yzImO543wGvV8kIlxDgUxm6Tcc3ajtLLYV89X/tTX5A12F6IdBBibcU52joSM9eAFg2pUDhWXlFRybqaZA=="]} diff --git a/ci/chains/gaia/v5.0.8/ibc-1/config/node_key.json b/ci/chains/gaia/v5.0.8/ibc-1/config/node_key.json deleted file mode 100644 index 1e4f16abfb..0000000000 --- a/ci/chains/gaia/v5.0.8/ibc-1/config/node_key.json +++ /dev/null @@ -1 +0,0 @@ -{"priv_key":{"type":"tendermint/PrivKeyEd25519","value":"iEFaFp4baiAkuUQGKLM6cDiEaJ1wNdWKcZmK/vXqmmcDMwlXEQKRIRv/2RDKG75dRAm9vL8oMa448/NUUpJDCA=="}} \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.8/ibc-1/config/priv_validator_key.json b/ci/chains/gaia/v5.0.8/ibc-1/config/priv_validator_key.json deleted file mode 100644 index eec913ba7f..0000000000 --- a/ci/chains/gaia/v5.0.8/ibc-1/config/priv_validator_key.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "address": "AEC2C6BCE9572CB47610ADE77F3C075DC6B3D717", - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "Q3lqhqi32SHvUpEqoVMHiRGXbThJ7Sbz98IYFIM+guo=" - }, - "priv_key": { - "type": "tendermint/PrivKeyEd25519", - "value": "1+GCG7iNUe8caJWcaPhMHxUIcUZPdUo9MIN7UGSv8PZDeWqGqLfZIe9SkSqhUweJEZdtOEntJvP3whgUgz6C6g==" - } -} \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.8/ibc-1/keyring-test/3744541425eebf800a64ed9b19e280e1e981685c.address b/ci/chains/gaia/v5.0.8/ibc-1/keyring-test/3744541425eebf800a64ed9b19e280e1e981685c.address deleted file mode 100644 index 518db809e2..0000000000 --- a/ci/chains/gaia/v5.0.8/ibc-1/keyring-test/3744541425eebf800a64ed9b19e280e1e981685c.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0xMS0wNCAxMTo0Nzo0My41Mjc4NTQxMTUgLTA0MDAgRURUIG09KzAuMDc0MTQyODI1IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiT0tGblVsckZOQnBwY1hiOCJ9.v4uS3F3sRZaXHOjSqg7ikPWiPaRcVwlfjvNUFnDhN7ud7ZNn9h29TQ.y3FESL0YTY7Qb198.UBdUf8mh2WtewKbMiDZ1fcw-TvuoMMMkKDyAjhHJRMa0nemizWCyqx_5gdzKkmD00IgZwDKfrT4BCu-axk-Ou6_O__XVerJGVQrgg28gi6dkWWy9Uju3b8KpB-oucibpwffc6JpDMD6KuqhawtzZEHq43yZob-IBIOPJba3MdWaCGgEdf12sHX_ckghjy-ZYExg92M1WID8JwbEfjBp_14FmJcvRICEsfkacs1ceZK0C0Gt63swZ59ue.6saz2fRViVfN_eCAzpS2Iw \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.8/ibc-1/keyring-test/9680756d779d88bdb47a8f0b5522b135812c3a10.address b/ci/chains/gaia/v5.0.8/ibc-1/keyring-test/9680756d779d88bdb47a8f0b5522b135812c3a10.address deleted file mode 100644 index 30eb62ba8c..0000000000 --- a/ci/chains/gaia/v5.0.8/ibc-1/keyring-test/9680756d779d88bdb47a8f0b5522b135812c3a10.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0xMS0wNCAxMTo0Nzo0NS42ODk2NDExNiAtMDQwMCBFRFQgbT0rMC4wNjIzMjY4NzEiLCJlbmMiOiJBMjU2R0NNIiwicDJjIjo4MTkyLCJwMnMiOiJnWVV5YlV3b2lLb3lEaGlxIn0.QCgBZNULcfesHuKvVDbDJQDK_LmptOir7iRwtsFlhqe9jvxv2FPNAg.ZuySvpSIgoKLCoRs.cugISI2Y5eEjl2T93YxTy5zZQrK5O5c0FKR-lvlRfiEMd8cDqei4pykU8RiHYKU4H-CPsuLLhRX3S7iooasDoCLdOo9a9MKo861yBx9vayMc0sz-zZN8OCoIyDQ9pa5iHNuzL4kSJOJGUrlQMk3bKNcjALx_-W9qBYQlQ6DN8wnowPL0NqblfdE92aD6cq8DOA1gT8lltMs7Q7psLPKKt18OCyhYi8KSEjQu3HALbPPOiRJHcTE.YSN_T3AQZwJx7k5MMxqhzg \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.8/ibc-1/keyring-test/c109ea463f3fe3dfdb6d98037a0c19e2b6d44ca0.address b/ci/chains/gaia/v5.0.8/ibc-1/keyring-test/c109ea463f3fe3dfdb6d98037a0c19e2b6d44ca0.address deleted file mode 100644 index 8c2f468fc3..0000000000 --- a/ci/chains/gaia/v5.0.8/ibc-1/keyring-test/c109ea463f3fe3dfdb6d98037a0c19e2b6d44ca0.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0xMS0wNCAxMTo0Nzo0NC42MTM2NzQ5MiAtMDQwMCBFRFQgbT0rMC4wNjg3NjczMTYiLCJlbmMiOiJBMjU2R0NNIiwicDJjIjo4MTkyLCJwMnMiOiJFdFBGZURzSUgzWjlBeE50In0.KwEj3f-Mmxb0t-i5GlKga5zM7jka30UbzYW-Sngyz7jecvjdflkEsw.mTTn8vgmlEJTa9OZ.e3yxoN3gqXU_eUXoeYICL-QcEevYvmp44oOBDTovuQK2jNVJPwLmS0Bp6Pzqe-WHiumBB2OSrA51kHPvj6hWIHRCmO7VQicODLMLSi1o4n3p3jR3pQJepz0JEoc8nMRr-QknJALFNoL1JXR6hSGum2QXFl4u9aBXOwkPStUCJG85UZ36SnXKCRsXvDkC7Z5DLntNxp_WGq2mfh20wJRYcqoPN9o0nVr2YF1JShqIiKfe5g.5e1lpxCz9Fi3pks7vWVFoQ \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.8/ibc-1/keyring-test/user.info b/ci/chains/gaia/v5.0.8/ibc-1/keyring-test/user.info deleted file mode 100644 index 7b7f6c3d9c..0000000000 --- a/ci/chains/gaia/v5.0.8/ibc-1/keyring-test/user.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0xMS0wNCAxMTo0Nzo0NC42MDYxNDA1NTggLTA0MDAgRURUIG09KzAuMDYxMjMyOTQzIiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoidTJpNEQ3RmFqZHlXRXFDeCJ9.ylqk8B5MaEwCYmLIukes3XkXxGKkaB6kFMVYNcV0_-5z29vt9WUQ0Q.bvkej9imldLoFWcm.jRKIDMaq2T5grW8s7TtiK8_hMM538Ivi7soGIyDxU3PomUqLlL3ul2L2mgwmcmICm210AToxc3Fx4KIQbf6bB-hesPcfXNQgFsJuTpXAtZ8DrZg22xs_8ZzgllN14LWx6wAGmWsinFUMS_RLslD3CJFddzO39j2UWFJIBKLo8zkfnvIDWbe2Vda0RVrUPiOMbgzqaJM73DDgXxpir-aER2UtSaeBHFbi_DxxNyLrVeS90Lu8A5LfY3WWc681Fv_Cvs8XTIm0VI9dVsq-qFejZNy9dN--jz-x9SJcEMwb_QQ32El7sB1Yo6VfBVWCRvWmGQjDqV68Wg7UNM0biKc4CHzOac2OIHM.Vr8XbTJkkAOAW4cFuZ28OA \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.8/ibc-1/keyring-test/user2.info b/ci/chains/gaia/v5.0.8/ibc-1/keyring-test/user2.info deleted file mode 100644 index 45b722ac06..0000000000 --- a/ci/chains/gaia/v5.0.8/ibc-1/keyring-test/user2.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0xMS0wNCAxMTo0Nzo0NS42ODM2MzI1OTUgLTA0MDAgRURUIG09KzAuMDU2MzE4MzI3IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiYXgwVTVtZ29sRmxUMEZFNCJ9.yaKh9b08Pi26qhmKuwG2GgMgmDWY_KV6zb1yJ_I8-JZHlg61QN146Q.AFqz4pfoU65bFkza.vHSXUU9ElCcFpHuyOr_lf3FuSfobS6Nn98YXNjOcsj5tcw08zFb3jTeMd90U4b2yVl-g_jUzBwz6tVTsMvKXjEyyQP4zbLK9RiYAvo2d4Qfz6Cqof7lpFE1nrs8hzER7rTkyEoZ37Bfl6D-cHWLZip4Wlv8v1GEGrbQnvRRpUNhBWRNHoR_ZvD47C_KRZH2DdT-ieLJRMB1pIVWMjgATQdhfimBwlfBNFph54ajPnc33nEx5YVwgUXqmZBeJYoiNUO5MmwR1EYHyLtKQUFzvkvj_nzkANbog2xJLYcLuUaw2JLc0faPp5gsUZnUer5ms18FoFKsd80eRaZ4nCXWfvERDRl_812qj.IIfR3WCV_7WYXUbytSzgvA \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.8/ibc-1/keyring-test/validator.info b/ci/chains/gaia/v5.0.8/ibc-1/keyring-test/validator.info deleted file mode 100644 index 95b59b0c61..0000000000 --- a/ci/chains/gaia/v5.0.8/ibc-1/keyring-test/validator.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0xMS0wNCAxMTo0Nzo0My41MTkwMDQyNzIgLTA0MDAgRURUIG09KzAuMDY1MjkyOTkxIiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiVUFZVzROV0NGU0NIMG5fUCJ9.jgdjIazR7B2dqQ-d2G_C8ViJN6jcaVzwSHZJyD4huBtEa7LkpzCxxQ.pgLPbRYyu2l6RUKY.FXQaljl1rrsxyMqjGKyJzN2GQQdrB34LYqQWSSYSnc5TfevEYyc7QwFbgeu-DIAqo_xxmmBTLGftY1X_R5WCpmXqyhOkxVKpPSsrHsSwBdNalxa9AHyQB27RbMOe-gqte3MyUd0XXaTl365nLA7vCPiJnGsNUDjOLbu-lEJWm3bN36daH43Ifb1BACYH7TndPU3c8hOOmyxHVlvYHyZutVfqCh9kr80GIXVTYwWTV5LMcKGtgfc-VmljYGAaI7IqdeiMlsZcr_l8TSeHZHNFGxZXc8jBHezMxr8qnD_p2UKnYylIPTfu4U_Ea0yeWHuIQBCyWTXyf02Bf88foTXGJNiScIWJxLTLdbVtem59pT6OCA5x.IErhLYYhkKMX_UlVlDtXxQ \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.8/ibc-1/user2_seed.json b/ci/chains/gaia/v5.0.8/ibc-1/user2_seed.json deleted file mode 100644 index 48cde56108..0000000000 --- a/ci/chains/gaia/v5.0.8/ibc-1/user2_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"user2","type":"local","address":"cosmos1j6q82mthnkytmdr63u942g43xkqjcwssw6e2x9","pubkey":"cosmospub1addwnpepqdjxwx0t2kj2qyw4q6gj8mr40y6mtljdsa7u6hhhteaw32vshnucj59wprp","mnemonic":"nurse ordinary pulp find square senior club great balcony monster faint glass arrest curve message orange fan syrup pepper smoke medal tent sheriff grocery"} diff --git a/ci/chains/gaia/v5.0.8/ibc-1/user_seed.json b/ci/chains/gaia/v5.0.8/ibc-1/user_seed.json deleted file mode 100644 index 4b94f9d064..0000000000 --- a/ci/chains/gaia/v5.0.8/ibc-1/user_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"user","type":"local","address":"cosmos1cyy7533l8l3alkmdnqph5rqeu2mdgn9q85ulcx","pubkey":"cosmospub1addwnpepqd8rasvzjylp56ezk3ydvrte0k8s9gw3a3au00rdqapu80tqqk66q4sae27","mnemonic":"toddler strategy wine bridge short position animal brown cream slogan merry beach dust exhibit stem wire once offer name van pilot code course observe"} diff --git a/ci/chains/gaia/v5.0.8/ibc-1/validator_seed.json b/ci/chains/gaia/v5.0.8/ibc-1/validator_seed.json deleted file mode 100644 index eeb2b00014..0000000000 --- a/ci/chains/gaia/v5.0.8/ibc-1/validator_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"validator","type":"local","address":"cosmos1xaz9g9p9a6lcqznyakd3nc5qu85cz6zuyw5ytv","pubkey":"cosmospub1addwnpepqgru5c23mynhscpvzyp5ac7favnlc6j95sy4jmpwa8gen8602c0rqemmd7c","mnemonic":"prepare detect federal maple ability require blood slam hazard universe soon bubble simple canal rapid style proud thing horn warfare galaxy wood exhaust advance"} diff --git a/ci/chains/gaia/v6.0.0/ibc-0/config/addrbook.json b/ci/chains/gaia/v6.0.0/ibc-0/config/addrbook.json deleted file mode 100644 index 388c554d6a..0000000000 --- a/ci/chains/gaia/v6.0.0/ibc-0/config/addrbook.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "key": "abcfca3214aae21d8fb33c29", - "addrs": [] -} \ No newline at end of file diff --git a/ci/chains/gaia/v6.0.0/ibc-0/config/app.toml b/ci/chains/gaia/v6.0.0/ibc-0/config/app.toml deleted file mode 100644 index 1fc0b791f4..0000000000 --- a/ci/chains/gaia/v6.0.0/ibc-0/config/app.toml +++ /dev/null @@ -1,192 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -############################################################################### -### Base Configuration ### -############################################################################### - -# The minimum gas prices a validator is willing to accept for processing a -# transaction. A transaction's fees must meet the minimum of any denomination -# specified in this config (e.g. 0.25token1;0.0001token2). -minimum-gas-prices = "" - -# default: the last 100 states are kept in addition to every 500th state; pruning at 10 block intervals -# nothing: all historic states will be saved, nothing will be deleted (i.e. archiving node) -# everything: all saved states will be deleted, storing only the current state; pruning at 10 block intervals -# custom: allow pruning options to be manually specified through 'pruning-keep-recent', 'pruning-keep-every', and 'pruning-interval' -pruning = "default" - -# These are applied if and only if the pruning strategy is custom. -pruning-keep-recent = "0" -pruning-keep-every = "0" -pruning-interval = "0" - -# HaltHeight contains a non-zero block height at which a node will gracefully -# halt and shutdown that can be used to assist upgrades and testing. -# -# Note: Commitment of state will be attempted on the corresponding block. -halt-height = 0 - -# HaltTime contains a non-zero minimum block time (in Unix seconds) at which -# a node will gracefully halt and shutdown that can be used to assist upgrades -# and testing. -# -# Note: Commitment of state will be attempted on the corresponding block. -halt-time = 0 - -# MinRetainBlocks defines the minimum block height offset from the current -# block being committed, such that all blocks past this offset are pruned -# from Tendermint. It is used as part of the process of determining the -# ResponseCommit.RetainHeight value during ABCI Commit. A value of 0 indicates -# that no blocks should be pruned. -# -# This configuration value is only responsible for pruning Tendermint blocks. -# It has no bearing on application state pruning which is determined by the -# "pruning-*" configurations. -# -# Note: Tendermint block pruning is dependant on this parameter in conunction -# with the unbonding (safety threshold) period, state pruning and state sync -# snapshot parameters to determine the correct minimum value of -# ResponseCommit.RetainHeight. -min-retain-blocks = 0 - -# InterBlockCache enables inter-block caching. -inter-block-cache = true - -# IndexEvents defines the set of events in the form {eventType}.{attributeKey}, -# which informs Tendermint what to index. If empty, all events will be indexed. -# -# Example: -# ["message.sender", "message.recipient"] -index-events = [] - -############################################################################### -### Telemetry Configuration ### -############################################################################### - -[telemetry] - -# Prefixed with keys to separate services. -service-name = "" - -# Enabled enables the application telemetry functionality. When enabled, -# an in-memory sink is also enabled by default. Operators may also enabled -# other sinks such as Prometheus. -enabled = false - -# Enable prefixing gauge values with hostname. -enable-hostname = false - -# Enable adding hostname to labels. -enable-hostname-label = false - -# Enable adding service to labels. -enable-service-label = false - -# PrometheusRetentionTime, when positive, enables a Prometheus metrics sink. -prometheus-retention-time = 0 - -# GlobalLabels defines a global set of name/value label tuples applied to all -# metrics emitted using the wrapper functions defined in telemetry package. -# -# Example: -# [["chain_id", "cosmoshub-1"]] -global-labels = [ -] - -############################################################################### -### API Configuration ### -############################################################################### - -[api] - -# Enable defines if the API server should be enabled. -enable = false - -# Swagger defines if swagger documentation should automatically be registered. -swagger = false - -# Address defines the API server to listen on. -address = "tcp://0.0.0.0:1317" - -# MaxOpenConnections defines the number of maximum open connections. -max-open-connections = 1000 - -# RPCReadTimeout defines the Tendermint RPC read timeout (in seconds). -rpc-read-timeout = 10 - -# RPCWriteTimeout defines the Tendermint RPC write timeout (in seconds). -rpc-write-timeout = 0 - -# RPCMaxBodyBytes defines the Tendermint maximum response body (in bytes). -rpc-max-body-bytes = 1000000 - -# EnableUnsafeCORS defines if CORS should be enabled (unsafe - use it at your own risk). -enabled-unsafe-cors = false - -############################################################################### -### Rosetta Configuration ### -############################################################################### - -[rosetta] - -# Enable defines if the Rosetta API server should be enabled. -enable = false - -# Address defines the Rosetta API server to listen on. -address = ":8080" - -# Network defines the name of the blockchain that will be returned by Rosetta. -blockchain = "app" - -# Network defines the name of the network that will be returned by Rosetta. -network = "network" - -# Retries defines the number of retries when connecting to the node before failing. -retries = 3 - -# Offline defines if Rosetta server should run in offline mode. -offline = false - -############################################################################### -### gRPC Configuration ### -############################################################################### - -[grpc] - -# Enable defines if the gRPC server should be enabled. -enable = true - -# Address defines the gRPC server address to bind to. -address = "0.0.0.0:9090" - -############################################################################### -### gRPC Web Configuration ### -############################################################################### - -[grpc-web] - -# GRPCWebEnable defines if the gRPC-web should be enabled. -# NOTE: gRPC must also be enabled, otherwise, this configuration is a no-op. -enable = true - -# Address defines the gRPC-web server address to bind to. -address = "0.0.0.0:9091" - -# EnableUnsafeCORS defines if CORS should be enabled (unsafe - use it at your own risk). -enable-unsafe-cors = false - -############################################################################### -### State Sync Configuration ### -############################################################################### - -# State sync snapshots allow other nodes to rapidly join the network without replaying historical -# blocks, instead downloading and applying a snapshot of the application state at a given height. -[state-sync] - -# snapshot-interval specifies the block interval at which local state sync snapshots are -# taken (0 to disable). Must be a multiple of pruning-keep-every. -snapshot-interval = 0 - -# snapshot-keep-recent specifies the number of recent snapshots to keep and serve (0 to keep all). -snapshot-keep-recent = 2 diff --git a/ci/chains/gaia/v6.0.0/ibc-0/config/client.toml b/ci/chains/gaia/v6.0.0/ibc-0/config/client.toml deleted file mode 100644 index 222695a3f8..0000000000 --- a/ci/chains/gaia/v6.0.0/ibc-0/config/client.toml +++ /dev/null @@ -1,17 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -############################################################################### -### Client Configuration ### -############################################################################### - -# The network chain ID -chain-id = "" -# The keyring's backend, where the keys are stored (os|file|kwallet|pass|test|memory) -keyring-backend = "os" -# CLI output format (text|json) -output = "text" -# : to Tendermint RPC interface for this chain -node = "tcp://localhost:26657" -# Transaction broadcasting mode (sync|async|block) -broadcast-mode = "sync" diff --git a/ci/chains/gaia/v6.0.0/ibc-0/config/config.toml b/ci/chains/gaia/v6.0.0/ibc-0/config/config.toml deleted file mode 100644 index eb6f4b2952..0000000000 --- a/ci/chains/gaia/v6.0.0/ibc-0/config/config.toml +++ /dev/null @@ -1,401 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -# NOTE: Any path below can be absolute (e.g. "/var/myawesomeapp/data") or -# relative to the home directory (e.g. "data"). The home directory is -# "$HOME/.tendermint" by default, but could be changed via $TMHOME env variable -# or --home cmd flag. - -####################################################################### -### Main Base Config Options ### -####################################################################### - -# TCP or UNIX socket address of the ABCI application, -# or the name of an ABCI application compiled in with the Tendermint binary -proxy_app = "tcp://127.0.0.1:26658" - -# A custom human readable name for this node -moniker = "ibc-0" - -# If this node is many blocks behind the tip of the chain, FastSync -# allows them to catchup quickly by downloading blocks in parallel -# and verifying their commits -fast_sync = true - -# Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb -# * goleveldb (github.com/syndtr/goleveldb - most popular implementation) -# - pure go -# - stable -# * cleveldb (uses levigo wrapper) -# - fast -# - requires gcc -# - use cleveldb build tag (go build -tags cleveldb) -# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt) -# - EXPERIMENTAL -# - may be faster is some use-cases (random reads - indexer) -# - use boltdb build tag (go build -tags boltdb) -# * rocksdb (uses github.com/tecbot/gorocksdb) -# - EXPERIMENTAL -# - requires gcc -# - use rocksdb build tag (go build -tags rocksdb) -# * badgerdb (uses github.com/dgraph-io/badger) -# - EXPERIMENTAL -# - use badgerdb build tag (go build -tags badgerdb) -db_backend = "goleveldb" - -# Database directory -db_dir = "data" - -# Output level for logging, including package level options -log_level = "info" - -# Output format: 'plain' (colored text) or 'json' -log_format = "plain" - -##### additional base config options ##### - -# Path to the JSON file containing the initial validator set and other meta data -genesis_file = "config/genesis.json" - -# Path to the JSON file containing the private key to use as a validator in the consensus protocol -priv_validator_key_file = "config/priv_validator_key.json" - -# Path to the JSON file containing the last sign state of a validator -priv_validator_state_file = "data/priv_validator_state.json" - -# TCP or UNIX socket address for Tendermint to listen on for -# connections from an external PrivValidator process -priv_validator_laddr = "" - -# Path to the JSON file containing the private key to use for node authentication in the p2p protocol -node_key_file = "config/node_key.json" - -# Mechanism to connect to the ABCI application: socket | grpc -abci = "socket" - -# If true, query the ABCI app on connecting to a new peer -# so the app can decide if we should keep the connection or not -filter_peers = false - - -####################################################################### -### Advanced Configuration Options ### -####################################################################### - -####################################################### -### RPC Server Configuration Options ### -####################################################### -[rpc] - -# TCP or UNIX socket address for the RPC server to listen on -laddr = "tcp://0.0.0.0:26657" - -# A list of origins a cross-domain request can be executed from -# Default value '[]' disables cors support -# Use '["*"]' to allow any origin -cors_allowed_origins = [] - -# A list of methods the client is allowed to use with cross-domain requests -cors_allowed_methods = ["HEAD", "GET", "POST", ] - -# A list of non simple headers the client is allowed to use with cross-domain requests -cors_allowed_headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ] - -# TCP or UNIX socket address for the gRPC server to listen on -# NOTE: This server only supports /broadcast_tx_commit -grpc_laddr = "" - -# Maximum number of simultaneous connections. -# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -grpc_max_open_connections = 900 - -# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool -unsafe = false - -# Maximum number of simultaneous connections (including WebSocket). -# Does not include gRPC connections. See grpc_max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -max_open_connections = 900 - -# Maximum number of unique clientIDs that can /subscribe -# If you're using /broadcast_tx_commit, set to the estimated maximum number -# of broadcast_tx_commit calls per block. -max_subscription_clients = 100 - -# Maximum number of unique queries a given client can /subscribe to -# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to -# the estimated # maximum number of broadcast_tx_commit calls per block. -max_subscriptions_per_client = 5 - -# How long to wait for a tx to be committed during /broadcast_tx_commit. -# WARNING: Using a value larger than 10s will result in increasing the -# global HTTP write timeout, which applies to all connections and endpoints. -# See https://github.com/tendermint/tendermint/issues/3435 -timeout_broadcast_tx_commit = "10s" - -# Maximum size of request body, in bytes -max_body_bytes = 1000000 - -# Maximum size of request header, in bytes -max_header_bytes = 1048576 - -# The path to a file containing certificate that is used to create the HTTPS server. -# Might be either absolute path or path related to Tendermint's config directory. -# If the certificate is signed by a certificate authority, -# the certFile should be the concatenation of the server's certificate, any intermediates, -# and the CA's certificate. -# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. -# Otherwise, HTTP server is run. -tls_cert_file = "" - -# The path to a file containing matching private key that is used to create the HTTPS server. -# Might be either absolute path or path related to Tendermint's config directory. -# NOTE: both tls-cert-file and tls-key-file must be present for Tendermint to create HTTPS server. -# Otherwise, HTTP server is run. -tls_key_file = "" - -# pprof listen address (https://golang.org/pkg/net/http/pprof) -pprof_laddr = "localhost:6060" - -####################################################### -### P2P Configuration Options ### -####################################################### -[p2p] - -# Address to listen for incoming connections -laddr = "tcp://0.0.0.0:26656" - -# Address to advertise to peers for them to dial -# If empty, will use the same port as the laddr, -# and will introspect on the listener or use UPnP -# to figure out the address. ip and port are required -# example: 159.89.10.97:26656 -external_address = "" - -# Comma separated list of seed nodes to connect to -seeds = "" - -# Comma separated list of nodes to keep persistent connections to -persistent_peers = "" - -# UPNP port forwarding -upnp = false - -# Path to address book -addr_book_file = "config/addrbook.json" - -# Set true for strict address routability rules -# Set false for private or local networks -addr_book_strict = true - -# Maximum number of inbound peers -max_num_inbound_peers = 40 - -# Maximum number of outbound peers to connect to, excluding persistent peers -max_num_outbound_peers = 10 - -# List of node IDs, to which a connection will be (re)established ignoring any existing limits -unconditional_peer_ids = "" - -# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) -persistent_peers_max_dial_period = "0s" - -# Time to wait before flushing messages out on the connection -flush_throttle_timeout = "100ms" - -# Maximum size of a message packet payload, in bytes -max_packet_msg_payload_size = 1024 - -# Rate at which packets can be sent, in bytes/second -send_rate = 5120000 - -# Rate at which packets can be received, in bytes/second -recv_rate = 5120000 - -# Set true to enable the peer-exchange reactor -pex = true - -# Seed mode, in which node constantly crawls the network and looks for -# peers. If another node asks it for addresses, it responds and disconnects. -# -# Does not work if the peer-exchange reactor is disabled. -seed_mode = false - -# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) -private_peer_ids = "" - -# Toggle to disable guard against peers connecting from the same ip. -allow_duplicate_ip = false - -# Peer connection configuration. -handshake_timeout = "20s" -dial_timeout = "3s" - -####################################################### -### Mempool Configuration Option ### -####################################################### -[mempool] - -recheck = true -broadcast = true -wal_dir = "" - -# Maximum number of transactions in the mempool -size = 5000 - -# Limit the total size of all txs in the mempool. -# This only accounts for raw transactions (e.g. given 1MB transactions and -# max_txs_bytes=5MB, mempool will only accept 5 transactions). -max_txs_bytes = 1073741824 - -# Size of the cache (used to filter transactions we saw earlier) in transactions -cache_size = 10000 - -# Do not remove invalid transactions from the cache (default: false) -# Set to true if it's not possible for any invalid transaction to become valid -# again in the future. -keep-invalid-txs-in-cache = false - -# Maximum size of a single transaction. -# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes}. -max_tx_bytes = 1048576 - -# Maximum size of a batch of transactions to send to a peer -# Including space needed by encoding (one varint per transaction). -# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796 -max_batch_bytes = 0 - -####################################################### -### State Sync Configuration Options ### -####################################################### -[statesync] -# State sync rapidly bootstraps a new node by discovering, fetching, and restoring a state machine -# snapshot from peers instead of fetching and replaying historical blocks. Requires some peers in -# the network to take and serve state machine snapshots. State sync is not attempted if the node -# has any local state (LastBlockHeight > 0). The node will have a truncated block history, -# starting from the height of the snapshot. -enable = false - -# RPC servers (comma-separated) for light client verification of the synced state machine and -# retrieval of state data for node bootstrapping. Also needs a trusted height and corresponding -# header hash obtained from a trusted source, and a period during which validators can be trusted. -# -# For Cosmos SDK-based chains, trust_period should usually be about 2/3 of the unbonding time (~2 -# weeks) during which they can be financially punished (slashed) for misbehavior. -rpc_servers = "" -trust_height = 0 -trust_hash = "" -trust_period = "168h0m0s" - -# Time to spend discovering snapshots before initiating a restore. -discovery_time = "15s" - -# Temporary directory for state sync snapshot chunks, defaults to the OS tempdir (typically /tmp). -# Will create a new, randomly named directory within, and remove it when done. -temp_dir = "" - -# The timeout duration before re-requesting a chunk, possibly from a different -# peer (default: 1 minute). -chunk_request_timeout = "10s" - -# The number of concurrent chunk fetchers to run (default: 1). -chunk_fetchers = "4" - -####################################################### -### Fast Sync Configuration Connections ### -####################################################### -[fastsync] - -# Fast Sync version to use: -# 1) "v0" (default) - the legacy fast sync implementation -# 2) "v1" - refactor of v0 version for better testability -# 2) "v2" - complete redesign of v0, optimized for testability & readability -version = "v0" - -####################################################### -### Consensus Configuration Options ### -####################################################### -[consensus] - -wal_file = "data/cs.wal/wal" - -# How long we wait for a proposal block before prevoting nil -timeout_propose = "1s" -# How much timeout_propose increases with each round -timeout_propose_delta = "500ms" -# How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil) -timeout_prevote = "1s" -# How much the timeout_prevote increases with each round -timeout_prevote_delta = "500ms" -# How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil) -timeout_precommit = "1s" -# How much the timeout_precommit increases with each round -timeout_precommit_delta = "500ms" -# How long we wait after committing a block, before starting on the new -# height (this gives us a chance to receive some more precommits, even -# though we already have +2/3). -timeout_commit = "1s" - -# How many blocks to look back to check existence of the node's consensus votes before joining consensus -# When non-zero, the node will panic upon restart -# if the same consensus key was used to sign {double_sign_check_height} last blocks. -# So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic. -double_sign_check_height = 0 - -# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) -skip_timeout_commit = false - -# EmptyBlocks mode and possible interval between empty blocks -create_empty_blocks = true -create_empty_blocks_interval = "0s" - -# Reactor sleep duration parameters -peer_gossip_sleep_duration = "100ms" -peer_query_maj23_sleep_duration = "2s" - -####################################################### -### Transaction Indexer Configuration Options ### -####################################################### -[tx_index] - -# What indexer to use for transactions -# -# The application will set which txs to index. In some cases a node operator will be able -# to decide which txs to index based on configuration set in the application. -# -# Options: -# 1) "null" -# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). -# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed. -indexer = "kv" - -####################################################### -### Instrumentation Configuration Options ### -####################################################### -[instrumentation] - -# When true, Prometheus metrics are served under /metrics on -# PrometheusListenAddr. -# Check out the documentation for the list of available metrics. -prometheus = false - -# Address to listen for Prometheus collector(s) connections -prometheus_listen_addr = ":26660" - -# Maximum number of simultaneous connections. -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -max_open_connections = 3 - -# Instrumentation namespace -namespace = "tendermint" diff --git a/ci/chains/gaia/v6.0.0/ibc-0/config/genesis.json b/ci/chains/gaia/v6.0.0/ibc-0/config/genesis.json deleted file mode 100644 index caa8b3f630..0000000000 --- a/ci/chains/gaia/v6.0.0/ibc-0/config/genesis.json +++ /dev/null @@ -1,356 +0,0 @@ -{ - "genesis_time": "2021-11-24T19:33:05.288086182Z", - "chain_id": "ibc-0", - "initial_height": "1", - "consensus_params": { - "block": { - "max_bytes": "22020096", - "max_gas": "-1", - "time_iota_ms": "1000" - }, - "evidence": { - "max_age_num_blocks": "100000", - "max_age_duration": "172800000000000", - "max_bytes": "1048576" - }, - "validator": { - "pub_key_types": [ - "ed25519" - ] - }, - "version": {} - }, - "app_hash": "", - "app_state": { - "auth": { - "params": { - "max_memo_characters": "256", - "tx_sig_limit": "7", - "tx_size_cost_per_byte": "10", - "sig_verify_cost_ed25519": "590", - "sig_verify_cost_secp256k1": "1000" - }, - "accounts": [ - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos1n0gwen2cakk64kk74yguhw3au6rctlyxf5rvmj", - "pub_key": null, - "account_number": "0", - "sequence": "0" - }, - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos1rdy5sg55yp45q3rklpqwhprg5zdlg7tzuxdlhj", - "pub_key": null, - "account_number": "0", - "sequence": "0" - }, - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos1da0qjh7qptrscq59uaqjwtte0ru7mnfp2ndcxu", - "pub_key": null, - "account_number": "0", - "sequence": "0" - } - ] - }, - "authz": { - "authorization": [] - }, - "bank": { - "params": { - "send_enabled": [], - "default_send_enabled": true - }, - "balances": [ - { - "address": "cosmos1rdy5sg55yp45q3rklpqwhprg5zdlg7tzuxdlhj", - "coins": [ - { - "denom": "samoleans", - "amount": "100000000000" - }, - { - "denom": "stake", - "amount": "100000000000" - } - ] - }, - { - "address": "cosmos1da0qjh7qptrscq59uaqjwtte0ru7mnfp2ndcxu", - "coins": [ - { - "denom": "stake", - "amount": "100000000000" - } - ] - }, - { - "address": "cosmos1n0gwen2cakk64kk74yguhw3au6rctlyxf5rvmj", - "coins": [ - { - "denom": "samoleans", - "amount": "100000000000" - }, - { - "denom": "stake", - "amount": "100000000000" - } - ] - } - ], - "supply": [ - { - "denom": "samoleans", - "amount": "200000000000" - }, - { - "denom": "stake", - "amount": "300000000000" - } - ], - "denom_metadata": [] - }, - "capability": { - "index": "1", - "owners": [] - }, - "crisis": { - "constant_fee": { - "denom": "stake", - "amount": "1000" - } - }, - "distribution": { - "params": { - "community_tax": "0.020000000000000000", - "base_proposer_reward": "0.010000000000000000", - "bonus_proposer_reward": "0.040000000000000000", - "withdraw_addr_enabled": true - }, - "fee_pool": { - "community_pool": [] - }, - "delegator_withdraw_infos": [], - "previous_proposer": "", - "outstanding_rewards": [], - "validator_accumulated_commissions": [], - "validator_historical_rewards": [], - "validator_current_rewards": [], - "delegator_starting_infos": [], - "validator_slash_events": [] - }, - "evidence": { - "evidence": [] - }, - "feegrant": { - "allowances": [] - }, - "genutil": { - "gen_txs": [ - { - "body": { - "messages": [ - { - "@type": "/cosmos.staking.v1beta1.MsgCreateValidator", - "description": { - "moniker": "ibc-0", - "identity": "", - "website": "", - "security_contact": "", - "details": "" - }, - "commission": { - "rate": "0.100000000000000000", - "max_rate": "0.200000000000000000", - "max_change_rate": "0.010000000000000000" - }, - "min_self_delegation": "1", - "delegator_address": "cosmos1da0qjh7qptrscq59uaqjwtte0ru7mnfp2ndcxu", - "validator_address": "cosmosvaloper1da0qjh7qptrscq59uaqjwtte0ru7mnfp08ed20", - "pubkey": { - "@type": "/cosmos.crypto.ed25519.PubKey", - "key": "NYFNoMsPbFYg4H/WodS6spPKJuYsFWOz/uEQMxiA7Mk=" - }, - "value": { - "denom": "stake", - "amount": "100000000000" - } - } - ], - "memo": "f8825df2c406207c64cbc02b1f543a53a46094e3@192.168.50.214:26656", - "timeout_height": "0", - "extension_options": [], - "non_critical_extension_options": [] - }, - "auth_info": { - "signer_infos": [ - { - "public_key": { - "@type": "/cosmos.crypto.secp256k1.PubKey", - "key": "AoMRzDhz5CdelMYvXq/YUvy/pJTub9JJ8SV8yqTlyOmc" - }, - "mode_info": { - "single": { - "mode": "SIGN_MODE_DIRECT" - } - }, - "sequence": "0" - } - ], - "fee": { - "amount": [], - "gas_limit": "200000", - "payer": "", - "granter": "" - } - }, - "signatures": [ - "fo1Cp9mv2cOpF+bpO0z+emIzeALBVhyyslZ29mvP2QJU7kCbq1KPeQ6xrQMJWNo5IiEvs0JBdhRHEC08Fbbp6Q==" - ] - } - ] - }, - "gov": { - "starting_proposal_id": "1", - "deposits": [], - "votes": [], - "proposals": [], - "deposit_params": { - "min_deposit": [ - { - "denom": "stake", - "amount": "10000000" - } - ], - "max_deposit_period": "200s" - }, - "voting_params": { - "voting_period": "200s" - }, - "tally_params": { - "quorum": "0.334000000000000000", - "threshold": "0.500000000000000000", - "veto_threshold": "0.334000000000000000" - } - }, - "ibc": { - "client_genesis": { - "clients": [], - "clients_consensus": [], - "clients_metadata": [], - "params": { - "allowed_clients": [ - "06-solomachine", - "07-tendermint" - ] - }, - "create_localhost": false, - "next_client_sequence": "0" - }, - "connection_genesis": { - "connections": [], - "client_connection_paths": [], - "next_connection_sequence": "0", - "params": { - "max_expected_time_per_block": "30000000000" - } - }, - "channel_genesis": { - "channels": [], - "acknowledgements": [], - "commitments": [], - "receipts": [], - "send_sequences": [], - "recv_sequences": [], - "ack_sequences": [], - "next_channel_sequence": "0" - } - }, - "liquidity": { - "params": { - "pool_types": [ - { - "id": 1, - "name": "StandardLiquidityPool", - "min_reserve_coin_num": 2, - "max_reserve_coin_num": 2, - "description": "Standard liquidity pool with pool price function X/Y, ESPM constraint, and two kinds of reserve coins" - } - ], - "min_init_deposit_amount": "1000000", - "init_pool_coin_mint_amount": "1000000", - "max_reserve_coin_amount": "0", - "pool_creation_fee": [ - { - "denom": "stake", - "amount": "40000000" - } - ], - "swap_fee_rate": "0.003000000000000000", - "withdraw_fee_rate": "0.000000000000000000", - "max_order_amount_ratio": "0.100000000000000000", - "unit_batch_height": 1, - "circuit_breaker_enabled": false - }, - "pool_records": [] - }, - "mint": { - "minter": { - "inflation": "0.130000000000000000", - "annual_provisions": "0.000000000000000000" - }, - "params": { - "mint_denom": "stake", - "inflation_rate_change": "0.130000000000000000", - "inflation_max": "0.200000000000000000", - "inflation_min": "0.070000000000000000", - "goal_bonded": "0.670000000000000000", - "blocks_per_year": "6311520" - } - }, - "packetfowardmiddleware": { - "params": { - "fee_percentage": "0.000000000000000000" - } - }, - "params": null, - "slashing": { - "params": { - "signed_blocks_window": "100", - "min_signed_per_window": "0.500000000000000000", - "downtime_jail_duration": "600s", - "slash_fraction_double_sign": "0.050000000000000000", - "slash_fraction_downtime": "0.010000000000000000" - }, - "signing_infos": [], - "missed_blocks": [] - }, - "staking": { - "params": { - "unbonding_time": "1814400s", - "max_validators": 100, - "max_entries": 7, - "historical_entries": 10000, - "bond_denom": "stake" - }, - "last_total_power": "0", - "last_validator_powers": [], - "validators": [], - "delegations": [], - "unbonding_delegations": [], - "redelegations": [], - "exported": false - }, - "transfer": { - "port_id": "transfer", - "denom_traces": [], - "params": { - "send_enabled": true, - "receive_enabled": true - } - }, - "upgrade": {}, - "vesting": {} - } -} \ No newline at end of file diff --git a/ci/chains/gaia/v6.0.0/ibc-0/config/gentx/gentx-f8825df2c406207c64cbc02b1f543a53a46094e3.json b/ci/chains/gaia/v6.0.0/ibc-0/config/gentx/gentx-f8825df2c406207c64cbc02b1f543a53a46094e3.json deleted file mode 100644 index 685abc3aef..0000000000 --- a/ci/chains/gaia/v6.0.0/ibc-0/config/gentx/gentx-f8825df2c406207c64cbc02b1f543a53a46094e3.json +++ /dev/null @@ -1 +0,0 @@ -{"body":{"messages":[{"@type":"/cosmos.staking.v1beta1.MsgCreateValidator","description":{"moniker":"ibc-0","identity":"","website":"","security_contact":"","details":""},"commission":{"rate":"0.100000000000000000","max_rate":"0.200000000000000000","max_change_rate":"0.010000000000000000"},"min_self_delegation":"1","delegator_address":"cosmos1da0qjh7qptrscq59uaqjwtte0ru7mnfp2ndcxu","validator_address":"cosmosvaloper1da0qjh7qptrscq59uaqjwtte0ru7mnfp08ed20","pubkey":{"@type":"/cosmos.crypto.ed25519.PubKey","key":"NYFNoMsPbFYg4H/WodS6spPKJuYsFWOz/uEQMxiA7Mk="},"value":{"denom":"stake","amount":"100000000000"}}],"memo":"f8825df2c406207c64cbc02b1f543a53a46094e3@192.168.50.214:26656","timeout_height":"0","extension_options":[],"non_critical_extension_options":[]},"auth_info":{"signer_infos":[{"public_key":{"@type":"/cosmos.crypto.secp256k1.PubKey","key":"AoMRzDhz5CdelMYvXq/YUvy/pJTub9JJ8SV8yqTlyOmc"},"mode_info":{"single":{"mode":"SIGN_MODE_DIRECT"}},"sequence":"0"}],"fee":{"amount":[],"gas_limit":"200000","payer":"","granter":""}},"signatures":["fo1Cp9mv2cOpF+bpO0z+emIzeALBVhyyslZ29mvP2QJU7kCbq1KPeQ6xrQMJWNo5IiEvs0JBdhRHEC08Fbbp6Q=="]} diff --git a/ci/chains/gaia/v6.0.0/ibc-0/config/node_key.json b/ci/chains/gaia/v6.0.0/ibc-0/config/node_key.json deleted file mode 100644 index c5ecc004ac..0000000000 --- a/ci/chains/gaia/v6.0.0/ibc-0/config/node_key.json +++ /dev/null @@ -1 +0,0 @@ -{"priv_key":{"type":"tendermint/PrivKeyEd25519","value":"ZMa5+ovMRjEApthUCunbPP4UIS4BkLwrN7ZUYqNsI6mXanUuQ3pszUuvB1YT4uXgQSNRUVVZ+0yITLieUxXSDA=="}} \ No newline at end of file diff --git a/ci/chains/gaia/v6.0.0/ibc-0/config/priv_validator_key.json b/ci/chains/gaia/v6.0.0/ibc-0/config/priv_validator_key.json deleted file mode 100644 index 02126f7d9c..0000000000 --- a/ci/chains/gaia/v6.0.0/ibc-0/config/priv_validator_key.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "address": "858D7DD17C7EF440D4AE4288C5BAA80EAA51D7FD", - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "NYFNoMsPbFYg4H/WodS6spPKJuYsFWOz/uEQMxiA7Mk=" - }, - "priv_key": { - "type": "tendermint/PrivKeyEd25519", - "value": "7pxUnNSpT3r1VnjhjgbMBIekqm3HlhqWzj72wp4/VZI1gU2gyw9sViDgf9ah1Lqyk8om5iwVY7P+4RAzGIDsyQ==" - } -} \ No newline at end of file diff --git a/ci/chains/gaia/v6.0.0/ibc-0/keyring-test/1b49482294206b404476f840eb8468a09bf47962.address b/ci/chains/gaia/v6.0.0/ibc-0/keyring-test/1b49482294206b404476f840eb8468a09bf47962.address deleted file mode 100644 index ab43a81270..0000000000 --- a/ci/chains/gaia/v6.0.0/ibc-0/keyring-test/1b49482294206b404476f840eb8468a09bf47962.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0xMS0yNCAxNDozMzowOC41NTA0NTQxMzMgLTA1MDAgRVNUIG09KzAuMDcxMDg5NDgzIiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiUE1YZkJzdHdtTEZISDFTUCJ9.e2R6TeUCkYqRtQcpeOIZ9MH7LwWmKYsF-9azSTSBBGSkPjEmY49gTA.dzTMUVpONW5lmbuZ.2KL3qYiMBjqzewecmeI8ooDqhYw8nTb2MhBL9CZ3Blm-1PrQR19YQvcrYEzIhydKUNS-tm_vDDxQ7b-FtEWwGuZak7ErpvrKhsZFQiLEyHg0F4UEvWp5W4fY6hvIso8cf5HlIAHvSnffej1WfngiTSV23u2KbgvoiDU7Ctf5uYpBjaruHRbzk738ruKSTRyWPrag5L3IU-5uRNfhxXH-pGa7ftZcmPwMhTxCGojghgSmg9ZkGgQ.2UgcRahsvtKKqlHlNtnZsg \ No newline at end of file diff --git a/ci/chains/gaia/v6.0.0/ibc-0/keyring-test/6f5e095fc00ac70c0285e741272d7978f9edcd21.address b/ci/chains/gaia/v6.0.0/ibc-0/keyring-test/6f5e095fc00ac70c0285e741272d7978f9edcd21.address deleted file mode 100644 index cbd24616ab..0000000000 --- a/ci/chains/gaia/v6.0.0/ibc-0/keyring-test/6f5e095fc00ac70c0285e741272d7978f9edcd21.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0xMS0yNCAxNDozMzowNi4zNjMwOTU1NTkgLTA1MDAgRVNUIG09KzAuMDY1Nzg0NzI2IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoicVFEZXh1UFllT3l2RG95UCJ9.80eJIBA0YgpL8j7mOFmajvU3VwJyv8xSVdhJOIn3KEwOwWIPAdvuZA.JOxBKB717wxiUuEN.VLZqTx5iM7dUvFQ75UbVhz4a-Wz1cgaRmf_-3cUD32-6c84jk8FyDuFlc1Tb1umMf8H1vwIE9higSQFMtZeuzCxIG08a9zTfXPyqWHfs5aodiDi9tDhMH1t2FQz1uHlf0UaAz1_tLyFeY0LuK_pXk_-err9r2ACgjDKnMUI_y5bmfxS1EGOLvylcKea2s1kROzYz_h2avUodhHg3Nx-qNtwlTxr6Hv74E21ZJPEtLe5PAb8Rm2LGrPpO.s7eBd83I848F6Vj-WcS3fQ \ No newline at end of file diff --git a/ci/chains/gaia/v6.0.0/ibc-0/keyring-test/9bd0eccd58edadaadadea911cbba3de68785fc86.address b/ci/chains/gaia/v6.0.0/ibc-0/keyring-test/9bd0eccd58edadaadadea911cbba3de68785fc86.address deleted file mode 100644 index f9bb589ad5..0000000000 --- a/ci/chains/gaia/v6.0.0/ibc-0/keyring-test/9bd0eccd58edadaadadea911cbba3de68785fc86.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0xMS0yNCAxNDozMzowNy40NjE0MjQ3NyAtMDUwMCBFU1QgbT0rMC4wODQ4MTQ4MzMiLCJlbmMiOiJBMjU2R0NNIiwicDJjIjo4MTkyLCJwMnMiOiJ6aFh6S0haLVVQOWhnRVFnIn0.xXrGsXcX7D1hn6m_nAwcGfAkdcgjbmLz_UEmlE0W45mFUAyljxjRkQ.KMAJFWKlCnYw4mvX.DyhKx-xyqWKJrUS42VAtNkSWKVqkogh2q-joJOFPJcF6f1IcN73JqZxdYmvsai3dRGs5xiTQsjsiM6tjvDKSFEaT3vYqpPwxTOJu2pvo_vWbNlNzauQyWgNiSiAPvtEWZI6PCTdk7FuyPBfm3dE4SkWuQBUuYZNoUnkkEXWDmqD3lKZ2ZIYcc0hqA4FQVP1A7RL4J16kn2BeXOEBPsUH8JZbplUDXA4UAnCFAtF_JdCgug.6K4vdxOdRJ9rbZa7cKxJwg \ No newline at end of file diff --git a/ci/chains/gaia/v6.0.0/ibc-0/keyring-test/user.info b/ci/chains/gaia/v6.0.0/ibc-0/keyring-test/user.info deleted file mode 100644 index 9fb08e779e..0000000000 --- a/ci/chains/gaia/v6.0.0/ibc-0/keyring-test/user.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0xMS0yNCAxNDozMzowNy40NDc4MTY1MDUgLTA1MDAgRVNUIG09KzAuMDcxMjA2NTUyIiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoibl94Mm1fOWVjbzhpeUxvQiJ9.ikzAEDoLYycFQVexbskD1kRzstVFXpR-YNz_kRrLMSUNSgkhhZGZVQ.7zH7erlv7nJbS5gX.xAIev6M3iU17fXOFC4SU8Al5JJp8_M7oaambw5ZxgmFDyajwffxsuXYIW7Y9MLGJNJ8yVHic-iN9HXdbxNIBfMsP138D6VXpksxAlbEWoVRJihwvqF_iqEHcuEyD212WEwIPTY6hH822xZV4l7yQlEHoGlQtUnMuWQz1IzvcKFYESUUavfiiSfP0CFbY5MAd5JremAtH-vN3xJ9pqcL0hOl9hmeKmc5pxZtnrbiTyVYiDJI8zPYeWFKTT_AvKoT9aoa1Ouo4qZ2whbUYufG9dC_DDhCcUATJocFBqNegahBOgegaJxDVzbv2PLqgWTrwpaMi0XiKMo0xt4HYt0LVNtbrpgJjHD4.PK-PK_u4eJmtbGd4y4G5Yg \ No newline at end of file diff --git a/ci/chains/gaia/v6.0.0/ibc-0/keyring-test/user2.info b/ci/chains/gaia/v6.0.0/ibc-0/keyring-test/user2.info deleted file mode 100644 index 5f864632e6..0000000000 --- a/ci/chains/gaia/v6.0.0/ibc-0/keyring-test/user2.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0xMS0yNCAxNDozMzowOC41NDEyNTcxMTQgLTA1MDAgRVNUIG09KzAuMDYxODkyNDY0IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiMXU3TFdzdjBZSHdwbkNqciJ9.fMo5HmXetkpNLdBkXlnIZk4ApD-nFYMME6dmdFarg7zhFL7QCFN7KQ._8e7Jva2GXFtzSt8.I2ta6Y6HKwoHkTx8-rPjLcajthmS90-iVMA_0FIup9lNd5LBLQpNAHY2hDbhCUbr3rNBPhTDVz6fD_S3ImfGjg4rQePDvOGgjntteAJ_e4-h1w0PCliyLlUM24Bw83aJmT1tubJ_bLe1Jkowg2FFv7b4QIY4sEslkoM0mMFmjgghZgVYzK_7gHY-KfXzXhEt-7lNypUbm7Te8ZQoZfzqcjwiJuwHKmXA2F5aWlNZK2ry6ivWWgRVUzwS0VWBRaDlx60R4PIrR0kqF_H8H4SDa_yIJsFyoLLliMgUz4N8COs6dHc3rf-jfopVtFriVDF7qRuKvfh4cL2FPiD6A1_kGBG4QxTWytCx.uiXHyY__OXUydn-keVhDyw \ No newline at end of file diff --git a/ci/chains/gaia/v6.0.0/ibc-0/keyring-test/validator.info b/ci/chains/gaia/v6.0.0/ibc-0/keyring-test/validator.info deleted file mode 100644 index cd874c66c9..0000000000 --- a/ci/chains/gaia/v6.0.0/ibc-0/keyring-test/validator.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0xMS0yNCAxNDozMzowNi4zNTYxNDUzNjYgLTA1MDAgRVNUIG09KzAuMDU4ODM0NTI0IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiT1d6Q3h2a2lleHNUUXhXWiJ9.sMP2Opeadzy1NIqSFinrOosc9ueNGl6eoXzSlj1PuyjzKVDNPJBG9w.ymFHLPlxDdlwKchG.v7aqEHKE7zGY0DHDrV80QujgwP0dCVJG8PRsZv30pE_49NxiFjMJF_leVa52oiUN_8HCJsitXTIcs8Kku8eqL1FYPrFG-_pNZduDHM8w3dU40rYRhpEqo3NApemWXYeX5RQdnuXTHqriwIxOAsTbpoHxls7kMGH3AyDxz1yzfeATF6C0wKIUj1sssYg1dNX6PK2Vo2yyVWdmNFFX5UdUZZhSHmHN0t97k-GiYOYSfu92NLy4Nv1QQDchYuTGKdLdxMV_NApAlYUFFijFZthtEdABNzUqiSi296uCj7pw1uR8K9X-bzITkWb3_Yh-ELHbKbLAdF5FBST9Sw76jiqSGAQoZsunDNcnsJSgsAMcnGRtzBa8.FBJD9ZKZOHhaaeO9_y8byw \ No newline at end of file diff --git a/ci/chains/gaia/v6.0.0/ibc-0/user2_seed.json b/ci/chains/gaia/v6.0.0/ibc-0/user2_seed.json deleted file mode 100644 index 6a2856a3d4..0000000000 --- a/ci/chains/gaia/v6.0.0/ibc-0/user2_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"user2","type":"local","address":"cosmos1rdy5sg55yp45q3rklpqwhprg5zdlg7tzuxdlhj","pubkey":"{\"@type\":\"/cosmos.crypto.secp256k1.PubKey\",\"key\":\"A6qQIsP43N+JyGeXtjYV10mUC3gfk1QcxEredSeNlezv\"}","mnemonic":"response group easily vicious master immense circle flock bridge chest cannon fog acoustic million plastic cinnamon rotate butter minute fatigue seed stick cousin mule"} diff --git a/ci/chains/gaia/v6.0.0/ibc-0/user_seed.json b/ci/chains/gaia/v6.0.0/ibc-0/user_seed.json deleted file mode 100644 index 05c5482efa..0000000000 --- a/ci/chains/gaia/v6.0.0/ibc-0/user_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"user","type":"local","address":"cosmos1n0gwen2cakk64kk74yguhw3au6rctlyxf5rvmj","pubkey":"{\"@type\":\"/cosmos.crypto.secp256k1.PubKey\",\"key\":\"ArOL1s7aGOyN+cvnOzf/b+F0aSX0gcNOmo/KKTxFqZ/a\"}","mnemonic":"mass symbol wrap wear typical romance machine cart club famous celery impose fancy chief emotion excess figure wet insane muffin tone awful coconut romance"} diff --git a/ci/chains/gaia/v6.0.0/ibc-0/validator_seed.json b/ci/chains/gaia/v6.0.0/ibc-0/validator_seed.json deleted file mode 100644 index 558019b078..0000000000 --- a/ci/chains/gaia/v6.0.0/ibc-0/validator_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"validator","type":"local","address":"cosmos1da0qjh7qptrscq59uaqjwtte0ru7mnfp2ndcxu","pubkey":"{\"@type\":\"/cosmos.crypto.secp256k1.PubKey\",\"key\":\"AoMRzDhz5CdelMYvXq/YUvy/pJTub9JJ8SV8yqTlyOmc\"}","mnemonic":"dizzy lobster gaze luggage strategy drama negative bright trumpet favorite upset chief month popular hungry potato mad nephew peanut ship city purchase method metal"} diff --git a/ci/chains/gaia/v6.0.0/ibc-1/config/app.toml b/ci/chains/gaia/v6.0.0/ibc-1/config/app.toml deleted file mode 100644 index 1fc0b791f4..0000000000 --- a/ci/chains/gaia/v6.0.0/ibc-1/config/app.toml +++ /dev/null @@ -1,192 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -############################################################################### -### Base Configuration ### -############################################################################### - -# The minimum gas prices a validator is willing to accept for processing a -# transaction. A transaction's fees must meet the minimum of any denomination -# specified in this config (e.g. 0.25token1;0.0001token2). -minimum-gas-prices = "" - -# default: the last 100 states are kept in addition to every 500th state; pruning at 10 block intervals -# nothing: all historic states will be saved, nothing will be deleted (i.e. archiving node) -# everything: all saved states will be deleted, storing only the current state; pruning at 10 block intervals -# custom: allow pruning options to be manually specified through 'pruning-keep-recent', 'pruning-keep-every', and 'pruning-interval' -pruning = "default" - -# These are applied if and only if the pruning strategy is custom. -pruning-keep-recent = "0" -pruning-keep-every = "0" -pruning-interval = "0" - -# HaltHeight contains a non-zero block height at which a node will gracefully -# halt and shutdown that can be used to assist upgrades and testing. -# -# Note: Commitment of state will be attempted on the corresponding block. -halt-height = 0 - -# HaltTime contains a non-zero minimum block time (in Unix seconds) at which -# a node will gracefully halt and shutdown that can be used to assist upgrades -# and testing. -# -# Note: Commitment of state will be attempted on the corresponding block. -halt-time = 0 - -# MinRetainBlocks defines the minimum block height offset from the current -# block being committed, such that all blocks past this offset are pruned -# from Tendermint. It is used as part of the process of determining the -# ResponseCommit.RetainHeight value during ABCI Commit. A value of 0 indicates -# that no blocks should be pruned. -# -# This configuration value is only responsible for pruning Tendermint blocks. -# It has no bearing on application state pruning which is determined by the -# "pruning-*" configurations. -# -# Note: Tendermint block pruning is dependant on this parameter in conunction -# with the unbonding (safety threshold) period, state pruning and state sync -# snapshot parameters to determine the correct minimum value of -# ResponseCommit.RetainHeight. -min-retain-blocks = 0 - -# InterBlockCache enables inter-block caching. -inter-block-cache = true - -# IndexEvents defines the set of events in the form {eventType}.{attributeKey}, -# which informs Tendermint what to index. If empty, all events will be indexed. -# -# Example: -# ["message.sender", "message.recipient"] -index-events = [] - -############################################################################### -### Telemetry Configuration ### -############################################################################### - -[telemetry] - -# Prefixed with keys to separate services. -service-name = "" - -# Enabled enables the application telemetry functionality. When enabled, -# an in-memory sink is also enabled by default. Operators may also enabled -# other sinks such as Prometheus. -enabled = false - -# Enable prefixing gauge values with hostname. -enable-hostname = false - -# Enable adding hostname to labels. -enable-hostname-label = false - -# Enable adding service to labels. -enable-service-label = false - -# PrometheusRetentionTime, when positive, enables a Prometheus metrics sink. -prometheus-retention-time = 0 - -# GlobalLabels defines a global set of name/value label tuples applied to all -# metrics emitted using the wrapper functions defined in telemetry package. -# -# Example: -# [["chain_id", "cosmoshub-1"]] -global-labels = [ -] - -############################################################################### -### API Configuration ### -############################################################################### - -[api] - -# Enable defines if the API server should be enabled. -enable = false - -# Swagger defines if swagger documentation should automatically be registered. -swagger = false - -# Address defines the API server to listen on. -address = "tcp://0.0.0.0:1317" - -# MaxOpenConnections defines the number of maximum open connections. -max-open-connections = 1000 - -# RPCReadTimeout defines the Tendermint RPC read timeout (in seconds). -rpc-read-timeout = 10 - -# RPCWriteTimeout defines the Tendermint RPC write timeout (in seconds). -rpc-write-timeout = 0 - -# RPCMaxBodyBytes defines the Tendermint maximum response body (in bytes). -rpc-max-body-bytes = 1000000 - -# EnableUnsafeCORS defines if CORS should be enabled (unsafe - use it at your own risk). -enabled-unsafe-cors = false - -############################################################################### -### Rosetta Configuration ### -############################################################################### - -[rosetta] - -# Enable defines if the Rosetta API server should be enabled. -enable = false - -# Address defines the Rosetta API server to listen on. -address = ":8080" - -# Network defines the name of the blockchain that will be returned by Rosetta. -blockchain = "app" - -# Network defines the name of the network that will be returned by Rosetta. -network = "network" - -# Retries defines the number of retries when connecting to the node before failing. -retries = 3 - -# Offline defines if Rosetta server should run in offline mode. -offline = false - -############################################################################### -### gRPC Configuration ### -############################################################################### - -[grpc] - -# Enable defines if the gRPC server should be enabled. -enable = true - -# Address defines the gRPC server address to bind to. -address = "0.0.0.0:9090" - -############################################################################### -### gRPC Web Configuration ### -############################################################################### - -[grpc-web] - -# GRPCWebEnable defines if the gRPC-web should be enabled. -# NOTE: gRPC must also be enabled, otherwise, this configuration is a no-op. -enable = true - -# Address defines the gRPC-web server address to bind to. -address = "0.0.0.0:9091" - -# EnableUnsafeCORS defines if CORS should be enabled (unsafe - use it at your own risk). -enable-unsafe-cors = false - -############################################################################### -### State Sync Configuration ### -############################################################################### - -# State sync snapshots allow other nodes to rapidly join the network without replaying historical -# blocks, instead downloading and applying a snapshot of the application state at a given height. -[state-sync] - -# snapshot-interval specifies the block interval at which local state sync snapshots are -# taken (0 to disable). Must be a multiple of pruning-keep-every. -snapshot-interval = 0 - -# snapshot-keep-recent specifies the number of recent snapshots to keep and serve (0 to keep all). -snapshot-keep-recent = 2 diff --git a/ci/chains/gaia/v6.0.0/ibc-1/config/client.toml b/ci/chains/gaia/v6.0.0/ibc-1/config/client.toml deleted file mode 100644 index 222695a3f8..0000000000 --- a/ci/chains/gaia/v6.0.0/ibc-1/config/client.toml +++ /dev/null @@ -1,17 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -############################################################################### -### Client Configuration ### -############################################################################### - -# The network chain ID -chain-id = "" -# The keyring's backend, where the keys are stored (os|file|kwallet|pass|test|memory) -keyring-backend = "os" -# CLI output format (text|json) -output = "text" -# : to Tendermint RPC interface for this chain -node = "tcp://localhost:26657" -# Transaction broadcasting mode (sync|async|block) -broadcast-mode = "sync" diff --git a/ci/chains/gaia/v6.0.0/ibc-1/config/config.toml b/ci/chains/gaia/v6.0.0/ibc-1/config/config.toml deleted file mode 100644 index bd592119ba..0000000000 --- a/ci/chains/gaia/v6.0.0/ibc-1/config/config.toml +++ /dev/null @@ -1,401 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -# NOTE: Any path below can be absolute (e.g. "/var/myawesomeapp/data") or -# relative to the home directory (e.g. "data"). The home directory is -# "$HOME/.tendermint" by default, but could be changed via $TMHOME env variable -# or --home cmd flag. - -####################################################################### -### Main Base Config Options ### -####################################################################### - -# TCP or UNIX socket address of the ABCI application, -# or the name of an ABCI application compiled in with the Tendermint binary -proxy_app = "tcp://127.0.0.1:26658" - -# A custom human readable name for this node -moniker = "ibc-1" - -# If this node is many blocks behind the tip of the chain, FastSync -# allows them to catchup quickly by downloading blocks in parallel -# and verifying their commits -fast_sync = true - -# Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb -# * goleveldb (github.com/syndtr/goleveldb - most popular implementation) -# - pure go -# - stable -# * cleveldb (uses levigo wrapper) -# - fast -# - requires gcc -# - use cleveldb build tag (go build -tags cleveldb) -# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt) -# - EXPERIMENTAL -# - may be faster is some use-cases (random reads - indexer) -# - use boltdb build tag (go build -tags boltdb) -# * rocksdb (uses github.com/tecbot/gorocksdb) -# - EXPERIMENTAL -# - requires gcc -# - use rocksdb build tag (go build -tags rocksdb) -# * badgerdb (uses github.com/dgraph-io/badger) -# - EXPERIMENTAL -# - use badgerdb build tag (go build -tags badgerdb) -db_backend = "goleveldb" - -# Database directory -db_dir = "data" - -# Output level for logging, including package level options -log_level = "info" - -# Output format: 'plain' (colored text) or 'json' -log_format = "plain" - -##### additional base config options ##### - -# Path to the JSON file containing the initial validator set and other meta data -genesis_file = "config/genesis.json" - -# Path to the JSON file containing the private key to use as a validator in the consensus protocol -priv_validator_key_file = "config/priv_validator_key.json" - -# Path to the JSON file containing the last sign state of a validator -priv_validator_state_file = "data/priv_validator_state.json" - -# TCP or UNIX socket address for Tendermint to listen on for -# connections from an external PrivValidator process -priv_validator_laddr = "" - -# Path to the JSON file containing the private key to use for node authentication in the p2p protocol -node_key_file = "config/node_key.json" - -# Mechanism to connect to the ABCI application: socket | grpc -abci = "socket" - -# If true, query the ABCI app on connecting to a new peer -# so the app can decide if we should keep the connection or not -filter_peers = false - - -####################################################################### -### Advanced Configuration Options ### -####################################################################### - -####################################################### -### RPC Server Configuration Options ### -####################################################### -[rpc] - -# TCP or UNIX socket address for the RPC server to listen on -laddr = "tcp://0.0.0.0:26657" - -# A list of origins a cross-domain request can be executed from -# Default value '[]' disables cors support -# Use '["*"]' to allow any origin -cors_allowed_origins = [] - -# A list of methods the client is allowed to use with cross-domain requests -cors_allowed_methods = ["HEAD", "GET", "POST", ] - -# A list of non simple headers the client is allowed to use with cross-domain requests -cors_allowed_headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ] - -# TCP or UNIX socket address for the gRPC server to listen on -# NOTE: This server only supports /broadcast_tx_commit -grpc_laddr = "" - -# Maximum number of simultaneous connections. -# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -grpc_max_open_connections = 900 - -# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool -unsafe = false - -# Maximum number of simultaneous connections (including WebSocket). -# Does not include gRPC connections. See grpc_max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -max_open_connections = 900 - -# Maximum number of unique clientIDs that can /subscribe -# If you're using /broadcast_tx_commit, set to the estimated maximum number -# of broadcast_tx_commit calls per block. -max_subscription_clients = 100 - -# Maximum number of unique queries a given client can /subscribe to -# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to -# the estimated # maximum number of broadcast_tx_commit calls per block. -max_subscriptions_per_client = 5 - -# How long to wait for a tx to be committed during /broadcast_tx_commit. -# WARNING: Using a value larger than 10s will result in increasing the -# global HTTP write timeout, which applies to all connections and endpoints. -# See https://github.com/tendermint/tendermint/issues/3435 -timeout_broadcast_tx_commit = "10s" - -# Maximum size of request body, in bytes -max_body_bytes = 1000000 - -# Maximum size of request header, in bytes -max_header_bytes = 1048576 - -# The path to a file containing certificate that is used to create the HTTPS server. -# Might be either absolute path or path related to Tendermint's config directory. -# If the certificate is signed by a certificate authority, -# the certFile should be the concatenation of the server's certificate, any intermediates, -# and the CA's certificate. -# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. -# Otherwise, HTTP server is run. -tls_cert_file = "" - -# The path to a file containing matching private key that is used to create the HTTPS server. -# Might be either absolute path or path related to Tendermint's config directory. -# NOTE: both tls-cert-file and tls-key-file must be present for Tendermint to create HTTPS server. -# Otherwise, HTTP server is run. -tls_key_file = "" - -# pprof listen address (https://golang.org/pkg/net/http/pprof) -pprof_laddr = "localhost:6060" - -####################################################### -### P2P Configuration Options ### -####################################################### -[p2p] - -# Address to listen for incoming connections -laddr = "tcp://0.0.0.0:26656" - -# Address to advertise to peers for them to dial -# If empty, will use the same port as the laddr, -# and will introspect on the listener or use UPnP -# to figure out the address. ip and port are required -# example: 159.89.10.97:26656 -external_address = "" - -# Comma separated list of seed nodes to connect to -seeds = "" - -# Comma separated list of nodes to keep persistent connections to -persistent_peers = "" - -# UPNP port forwarding -upnp = false - -# Path to address book -addr_book_file = "config/addrbook.json" - -# Set true for strict address routability rules -# Set false for private or local networks -addr_book_strict = true - -# Maximum number of inbound peers -max_num_inbound_peers = 40 - -# Maximum number of outbound peers to connect to, excluding persistent peers -max_num_outbound_peers = 10 - -# List of node IDs, to which a connection will be (re)established ignoring any existing limits -unconditional_peer_ids = "" - -# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) -persistent_peers_max_dial_period = "0s" - -# Time to wait before flushing messages out on the connection -flush_throttle_timeout = "100ms" - -# Maximum size of a message packet payload, in bytes -max_packet_msg_payload_size = 1024 - -# Rate at which packets can be sent, in bytes/second -send_rate = 5120000 - -# Rate at which packets can be received, in bytes/second -recv_rate = 5120000 - -# Set true to enable the peer-exchange reactor -pex = true - -# Seed mode, in which node constantly crawls the network and looks for -# peers. If another node asks it for addresses, it responds and disconnects. -# -# Does not work if the peer-exchange reactor is disabled. -seed_mode = false - -# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) -private_peer_ids = "" - -# Toggle to disable guard against peers connecting from the same ip. -allow_duplicate_ip = false - -# Peer connection configuration. -handshake_timeout = "20s" -dial_timeout = "3s" - -####################################################### -### Mempool Configuration Option ### -####################################################### -[mempool] - -recheck = true -broadcast = true -wal_dir = "" - -# Maximum number of transactions in the mempool -size = 5000 - -# Limit the total size of all txs in the mempool. -# This only accounts for raw transactions (e.g. given 1MB transactions and -# max_txs_bytes=5MB, mempool will only accept 5 transactions). -max_txs_bytes = 1073741824 - -# Size of the cache (used to filter transactions we saw earlier) in transactions -cache_size = 10000 - -# Do not remove invalid transactions from the cache (default: false) -# Set to true if it's not possible for any invalid transaction to become valid -# again in the future. -keep-invalid-txs-in-cache = false - -# Maximum size of a single transaction. -# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes}. -max_tx_bytes = 1048576 - -# Maximum size of a batch of transactions to send to a peer -# Including space needed by encoding (one varint per transaction). -# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796 -max_batch_bytes = 0 - -####################################################### -### State Sync Configuration Options ### -####################################################### -[statesync] -# State sync rapidly bootstraps a new node by discovering, fetching, and restoring a state machine -# snapshot from peers instead of fetching and replaying historical blocks. Requires some peers in -# the network to take and serve state machine snapshots. State sync is not attempted if the node -# has any local state (LastBlockHeight > 0). The node will have a truncated block history, -# starting from the height of the snapshot. -enable = false - -# RPC servers (comma-separated) for light client verification of the synced state machine and -# retrieval of state data for node bootstrapping. Also needs a trusted height and corresponding -# header hash obtained from a trusted source, and a period during which validators can be trusted. -# -# For Cosmos SDK-based chains, trust_period should usually be about 2/3 of the unbonding time (~2 -# weeks) during which they can be financially punished (slashed) for misbehavior. -rpc_servers = "" -trust_height = 0 -trust_hash = "" -trust_period = "168h0m0s" - -# Time to spend discovering snapshots before initiating a restore. -discovery_time = "15s" - -# Temporary directory for state sync snapshot chunks, defaults to the OS tempdir (typically /tmp). -# Will create a new, randomly named directory within, and remove it when done. -temp_dir = "" - -# The timeout duration before re-requesting a chunk, possibly from a different -# peer (default: 1 minute). -chunk_request_timeout = "10s" - -# The number of concurrent chunk fetchers to run (default: 1). -chunk_fetchers = "4" - -####################################################### -### Fast Sync Configuration Connections ### -####################################################### -[fastsync] - -# Fast Sync version to use: -# 1) "v0" (default) - the legacy fast sync implementation -# 2) "v1" - refactor of v0 version for better testability -# 2) "v2" - complete redesign of v0, optimized for testability & readability -version = "v0" - -####################################################### -### Consensus Configuration Options ### -####################################################### -[consensus] - -wal_file = "data/cs.wal/wal" - -# How long we wait for a proposal block before prevoting nil -timeout_propose = "1s" -# How much timeout_propose increases with each round -timeout_propose_delta = "500ms" -# How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil) -timeout_prevote = "1s" -# How much the timeout_prevote increases with each round -timeout_prevote_delta = "500ms" -# How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil) -timeout_precommit = "1s" -# How much the timeout_precommit increases with each round -timeout_precommit_delta = "500ms" -# How long we wait after committing a block, before starting on the new -# height (this gives us a chance to receive some more precommits, even -# though we already have +2/3). -timeout_commit = "1s" - -# How many blocks to look back to check existence of the node's consensus votes before joining consensus -# When non-zero, the node will panic upon restart -# if the same consensus key was used to sign {double_sign_check_height} last blocks. -# So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic. -double_sign_check_height = 0 - -# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) -skip_timeout_commit = false - -# EmptyBlocks mode and possible interval between empty blocks -create_empty_blocks = true -create_empty_blocks_interval = "0s" - -# Reactor sleep duration parameters -peer_gossip_sleep_duration = "100ms" -peer_query_maj23_sleep_duration = "2s" - -####################################################### -### Transaction Indexer Configuration Options ### -####################################################### -[tx_index] - -# What indexer to use for transactions -# -# The application will set which txs to index. In some cases a node operator will be able -# to decide which txs to index based on configuration set in the application. -# -# Options: -# 1) "null" -# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). -# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed. -indexer = "kv" - -####################################################### -### Instrumentation Configuration Options ### -####################################################### -[instrumentation] - -# When true, Prometheus metrics are served under /metrics on -# PrometheusListenAddr. -# Check out the documentation for the list of available metrics. -prometheus = false - -# Address to listen for Prometheus collector(s) connections -prometheus_listen_addr = ":26660" - -# Maximum number of simultaneous connections. -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -max_open_connections = 3 - -# Instrumentation namespace -namespace = "tendermint" diff --git a/ci/chains/gaia/v6.0.0/ibc-1/config/genesis.json b/ci/chains/gaia/v6.0.0/ibc-1/config/genesis.json deleted file mode 100644 index 2f2a75c350..0000000000 --- a/ci/chains/gaia/v6.0.0/ibc-1/config/genesis.json +++ /dev/null @@ -1,356 +0,0 @@ -{ - "genesis_time": "2021-11-24T19:33:18.421970195Z", - "chain_id": "ibc-1", - "initial_height": "1", - "consensus_params": { - "block": { - "max_bytes": "22020096", - "max_gas": "-1", - "time_iota_ms": "1000" - }, - "evidence": { - "max_age_num_blocks": "100000", - "max_age_duration": "172800000000000", - "max_bytes": "1048576" - }, - "validator": { - "pub_key_types": [ - "ed25519" - ] - }, - "version": {} - }, - "app_hash": "", - "app_state": { - "auth": { - "params": { - "max_memo_characters": "256", - "tx_sig_limit": "7", - "tx_size_cost_per_byte": "10", - "sig_verify_cost_ed25519": "590", - "sig_verify_cost_secp256k1": "1000" - }, - "accounts": [ - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos1y4e9e3srnuqcul7839cmttc4rjct2gece5y34l", - "pub_key": null, - "account_number": "0", - "sequence": "0" - }, - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos139lr5vxkr94lf85nr4zc4dvmj52qmmn0er2hrf", - "pub_key": null, - "account_number": "0", - "sequence": "0" - }, - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos1zx9v7cdvltsgyzymc6wwd070nzje4duvkzmfem", - "pub_key": null, - "account_number": "0", - "sequence": "0" - } - ] - }, - "authz": { - "authorization": [] - }, - "bank": { - "params": { - "send_enabled": [], - "default_send_enabled": true - }, - "balances": [ - { - "address": "cosmos1zx9v7cdvltsgyzymc6wwd070nzje4duvkzmfem", - "coins": [ - { - "denom": "stake", - "amount": "100000000000" - } - ] - }, - { - "address": "cosmos1y4e9e3srnuqcul7839cmttc4rjct2gece5y34l", - "coins": [ - { - "denom": "samoleans", - "amount": "100000000000" - }, - { - "denom": "stake", - "amount": "100000000000" - } - ] - }, - { - "address": "cosmos139lr5vxkr94lf85nr4zc4dvmj52qmmn0er2hrf", - "coins": [ - { - "denom": "samoleans", - "amount": "100000000000" - }, - { - "denom": "stake", - "amount": "100000000000" - } - ] - } - ], - "supply": [ - { - "denom": "samoleans", - "amount": "200000000000" - }, - { - "denom": "stake", - "amount": "300000000000" - } - ], - "denom_metadata": [] - }, - "capability": { - "index": "1", - "owners": [] - }, - "crisis": { - "constant_fee": { - "denom": "stake", - "amount": "1000" - } - }, - "distribution": { - "params": { - "community_tax": "0.020000000000000000", - "base_proposer_reward": "0.010000000000000000", - "bonus_proposer_reward": "0.040000000000000000", - "withdraw_addr_enabled": true - }, - "fee_pool": { - "community_pool": [] - }, - "delegator_withdraw_infos": [], - "previous_proposer": "", - "outstanding_rewards": [], - "validator_accumulated_commissions": [], - "validator_historical_rewards": [], - "validator_current_rewards": [], - "delegator_starting_infos": [], - "validator_slash_events": [] - }, - "evidence": { - "evidence": [] - }, - "feegrant": { - "allowances": [] - }, - "genutil": { - "gen_txs": [ - { - "body": { - "messages": [ - { - "@type": "/cosmos.staking.v1beta1.MsgCreateValidator", - "description": { - "moniker": "ibc-1", - "identity": "", - "website": "", - "security_contact": "", - "details": "" - }, - "commission": { - "rate": "0.100000000000000000", - "max_rate": "0.200000000000000000", - "max_change_rate": "0.010000000000000000" - }, - "min_self_delegation": "1", - "delegator_address": "cosmos1zx9v7cdvltsgyzymc6wwd070nzje4duvkzmfem", - "validator_address": "cosmosvaloper1zx9v7cdvltsgyzymc6wwd070nzje4duvnk0u4g", - "pubkey": { - "@type": "/cosmos.crypto.ed25519.PubKey", - "key": "3v9tEIBIsTBYs45lfoaBa1DtropTwNPjxWRKNyh1unI=" - }, - "value": { - "denom": "stake", - "amount": "100000000000" - } - } - ], - "memo": "bb5010f06487a9d315952e3e57c6dadac4ac425b@192.168.50.214:26656", - "timeout_height": "0", - "extension_options": [], - "non_critical_extension_options": [] - }, - "auth_info": { - "signer_infos": [ - { - "public_key": { - "@type": "/cosmos.crypto.secp256k1.PubKey", - "key": "AobtY30Q9FSuaB83i0DaK8gMU89R5BF9D5EiQZxltYj9" - }, - "mode_info": { - "single": { - "mode": "SIGN_MODE_DIRECT" - } - }, - "sequence": "0" - } - ], - "fee": { - "amount": [], - "gas_limit": "200000", - "payer": "", - "granter": "" - } - }, - "signatures": [ - "xpbYGwOWxrHHn45537a7C3wJJPs/I5Ccx3uq9ZzoEHMyOssoynjbqPsNVNj9ymcLqy4OYVMojbfqmSbMJuVe/Q==" - ] - } - ] - }, - "gov": { - "starting_proposal_id": "1", - "deposits": [], - "votes": [], - "proposals": [], - "deposit_params": { - "min_deposit": [ - { - "denom": "stake", - "amount": "10000000" - } - ], - "max_deposit_period": "200s" - }, - "voting_params": { - "voting_period": "200s" - }, - "tally_params": { - "quorum": "0.334000000000000000", - "threshold": "0.500000000000000000", - "veto_threshold": "0.334000000000000000" - } - }, - "ibc": { - "client_genesis": { - "clients": [], - "clients_consensus": [], - "clients_metadata": [], - "params": { - "allowed_clients": [ - "06-solomachine", - "07-tendermint" - ] - }, - "create_localhost": false, - "next_client_sequence": "0" - }, - "connection_genesis": { - "connections": [], - "client_connection_paths": [], - "next_connection_sequence": "0", - "params": { - "max_expected_time_per_block": "30000000000" - } - }, - "channel_genesis": { - "channels": [], - "acknowledgements": [], - "commitments": [], - "receipts": [], - "send_sequences": [], - "recv_sequences": [], - "ack_sequences": [], - "next_channel_sequence": "0" - } - }, - "liquidity": { - "params": { - "pool_types": [ - { - "id": 1, - "name": "StandardLiquidityPool", - "min_reserve_coin_num": 2, - "max_reserve_coin_num": 2, - "description": "Standard liquidity pool with pool price function X/Y, ESPM constraint, and two kinds of reserve coins" - } - ], - "min_init_deposit_amount": "1000000", - "init_pool_coin_mint_amount": "1000000", - "max_reserve_coin_amount": "0", - "pool_creation_fee": [ - { - "denom": "stake", - "amount": "40000000" - } - ], - "swap_fee_rate": "0.003000000000000000", - "withdraw_fee_rate": "0.000000000000000000", - "max_order_amount_ratio": "0.100000000000000000", - "unit_batch_height": 1, - "circuit_breaker_enabled": false - }, - "pool_records": [] - }, - "mint": { - "minter": { - "inflation": "0.130000000000000000", - "annual_provisions": "0.000000000000000000" - }, - "params": { - "mint_denom": "stake", - "inflation_rate_change": "0.130000000000000000", - "inflation_max": "0.200000000000000000", - "inflation_min": "0.070000000000000000", - "goal_bonded": "0.670000000000000000", - "blocks_per_year": "6311520" - } - }, - "packetfowardmiddleware": { - "params": { - "fee_percentage": "0.000000000000000000" - } - }, - "params": null, - "slashing": { - "params": { - "signed_blocks_window": "100", - "min_signed_per_window": "0.500000000000000000", - "downtime_jail_duration": "600s", - "slash_fraction_double_sign": "0.050000000000000000", - "slash_fraction_downtime": "0.010000000000000000" - }, - "signing_infos": [], - "missed_blocks": [] - }, - "staking": { - "params": { - "unbonding_time": "1814400s", - "max_validators": 100, - "max_entries": 7, - "historical_entries": 10000, - "bond_denom": "stake" - }, - "last_total_power": "0", - "last_validator_powers": [], - "validators": [], - "delegations": [], - "unbonding_delegations": [], - "redelegations": [], - "exported": false - }, - "transfer": { - "port_id": "transfer", - "denom_traces": [], - "params": { - "send_enabled": true, - "receive_enabled": true - } - }, - "upgrade": {}, - "vesting": {} - } -} \ No newline at end of file diff --git a/ci/chains/gaia/v6.0.0/ibc-1/config/gentx/gentx-bb5010f06487a9d315952e3e57c6dadac4ac425b.json b/ci/chains/gaia/v6.0.0/ibc-1/config/gentx/gentx-bb5010f06487a9d315952e3e57c6dadac4ac425b.json deleted file mode 100644 index 74f5f94556..0000000000 --- a/ci/chains/gaia/v6.0.0/ibc-1/config/gentx/gentx-bb5010f06487a9d315952e3e57c6dadac4ac425b.json +++ /dev/null @@ -1 +0,0 @@ -{"body":{"messages":[{"@type":"/cosmos.staking.v1beta1.MsgCreateValidator","description":{"moniker":"ibc-1","identity":"","website":"","security_contact":"","details":""},"commission":{"rate":"0.100000000000000000","max_rate":"0.200000000000000000","max_change_rate":"0.010000000000000000"},"min_self_delegation":"1","delegator_address":"cosmos1zx9v7cdvltsgyzymc6wwd070nzje4duvkzmfem","validator_address":"cosmosvaloper1zx9v7cdvltsgyzymc6wwd070nzje4duvnk0u4g","pubkey":{"@type":"/cosmos.crypto.ed25519.PubKey","key":"3v9tEIBIsTBYs45lfoaBa1DtropTwNPjxWRKNyh1unI="},"value":{"denom":"stake","amount":"100000000000"}}],"memo":"bb5010f06487a9d315952e3e57c6dadac4ac425b@192.168.50.214:26656","timeout_height":"0","extension_options":[],"non_critical_extension_options":[]},"auth_info":{"signer_infos":[{"public_key":{"@type":"/cosmos.crypto.secp256k1.PubKey","key":"AobtY30Q9FSuaB83i0DaK8gMU89R5BF9D5EiQZxltYj9"},"mode_info":{"single":{"mode":"SIGN_MODE_DIRECT"}},"sequence":"0"}],"fee":{"amount":[],"gas_limit":"200000","payer":"","granter":""}},"signatures":["xpbYGwOWxrHHn45537a7C3wJJPs/I5Ccx3uq9ZzoEHMyOssoynjbqPsNVNj9ymcLqy4OYVMojbfqmSbMJuVe/Q=="]} diff --git a/ci/chains/gaia/v6.0.0/ibc-1/config/node_key.json b/ci/chains/gaia/v6.0.0/ibc-1/config/node_key.json deleted file mode 100644 index 4649f65656..0000000000 --- a/ci/chains/gaia/v6.0.0/ibc-1/config/node_key.json +++ /dev/null @@ -1 +0,0 @@ -{"priv_key":{"type":"tendermint/PrivKeyEd25519","value":"aqiLE2A0F5uKvpi0NoZTd+SN0hU1sJdb/ZecNbkmv9q6yo1iuGVhveMMsAnzrPmox4lyLlgINPny0nOhizn2Zw=="}} \ No newline at end of file diff --git a/ci/chains/gaia/v6.0.0/ibc-1/config/priv_validator_key.json b/ci/chains/gaia/v6.0.0/ibc-1/config/priv_validator_key.json deleted file mode 100644 index 807e3970c2..0000000000 --- a/ci/chains/gaia/v6.0.0/ibc-1/config/priv_validator_key.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "address": "8B39C329C83C7DE2A2F345C384B36F05A84F2AC0", - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "3v9tEIBIsTBYs45lfoaBa1DtropTwNPjxWRKNyh1unI=" - }, - "priv_key": { - "type": "tendermint/PrivKeyEd25519", - "value": "GBxwdlqDA1bc4mgTEtS/+NMxRXOucVfeOlFtaheA6YXe/20QgEixMFizjmV+hoFrUO2uilPA0+PFZEo3KHW6cg==" - } -} \ No newline at end of file diff --git a/ci/chains/gaia/v6.0.0/ibc-1/keyring-test/118acf61acfae082089bc69ce6bfcf98a59ab78c.address b/ci/chains/gaia/v6.0.0/ibc-1/keyring-test/118acf61acfae082089bc69ce6bfcf98a59ab78c.address deleted file mode 100644 index 7230415964..0000000000 --- a/ci/chains/gaia/v6.0.0/ibc-1/keyring-test/118acf61acfae082089bc69ce6bfcf98a59ab78c.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0xMS0yNCAxNDozMzoxOS41MDEyNDE5ODMgLTA1MDAgRVNUIG09KzAuMDY5NzExMzQzIiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiU25yZDdpRkliTkpYZ1c5RyJ9.lZtxW-Goo8loYIEykGq-NVYIohLZxK2FeO3jlQnon1I3HVi_3UCTGw.JPk6bE9slrEzRd1Y.DbE0LNFkwPCjhaIpHiVYywZRMkR5cs1Q1ma9L_bc1dqflzYG2ZtdkPpgE02e92zdkjRCaOz-U2l4k4AH95Li6rzP7p61XiR-RFo7qt-YIaOZvRoAyNfoCdZyO26JrzAX2b1hcP3re_WYxPz3k8hArJE_hNOdR6hxC2EopEC5CbCfXqilaPkEgLePCJKrqbMVW3vzcAVO1fjbn8pxVxvld3H26BB31ZVuDHR_e6upKg7XDyFq6F9cpbWG.I9JzJS5586b9eQyGVEmHbg \ No newline at end of file diff --git a/ci/chains/gaia/v6.0.0/ibc-1/keyring-test/25725cc6039f018e7fc78971b5af151cb0b52338.address b/ci/chains/gaia/v6.0.0/ibc-1/keyring-test/25725cc6039f018e7fc78971b5af151cb0b52338.address deleted file mode 100644 index 74a2a9c79a..0000000000 --- a/ci/chains/gaia/v6.0.0/ibc-1/keyring-test/25725cc6039f018e7fc78971b5af151cb0b52338.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0xMS0yNCAxNDozMzoyMC41Nzc2NjE0IC0wNTAwIEVTVCBtPSswLjA2MjE1NTkzMCIsImVuYyI6IkEyNTZHQ00iLCJwMmMiOjgxOTIsInAycyI6IjdVMmFaa01mZmlDazJWVlUifQ._Eam3zPOJ_9z31QMf2vGarrSCy-8OO6R8V3oiFPoPLJWs9kDLZs4KQ.G1a_5-gTtaFLTzNe.8e8vQWKINIYpXRg2sEW2QNrh2AhiyM39U5lqDQoLmeaozfle33Yyl2b-0eRYvOxUL9KoeUpLGtOa6ZoWAjla94TdUfWbUzHegNkP8-ibRD-u1LoHizRzIBHsTi5QoVdQGVJqxW22vb23t3OVjH2VVrzsQDrwGliFMR-nQT3GccTO6LowDGa7atVjJnc0qyEsrZ9NqvBJRM0A1N3ns0_MTCVIFlRCQr2mQ8j4NZmr8e-4Ww.wsvyHcDA3JyxpHTO9El4kA \ No newline at end of file diff --git a/ci/chains/gaia/v6.0.0/ibc-1/keyring-test/897e3a30d6196bf49e931d458ab59b95140dee6f.address b/ci/chains/gaia/v6.0.0/ibc-1/keyring-test/897e3a30d6196bf49e931d458ab59b95140dee6f.address deleted file mode 100644 index 47b98bfec9..0000000000 --- a/ci/chains/gaia/v6.0.0/ibc-1/keyring-test/897e3a30d6196bf49e931d458ab59b95140dee6f.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0xMS0yNCAxNDozMzoyMS42NjQ0NTUxOTMgLTA1MDAgRVNUIG09KzAuMDczNjg5OTMxIiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiM09NSHpGS2ZzcnRZcUpiaCJ9.JKnoJPHUxymhduvxIJ9Y3uIpjlDXAhdzM0s6h2V2YwkL1PKPr0MJWA.MVCYrgojv-yJgZe8.r_z_gW4tANrbjpRj4k37TOxYrm3mHs7aJAwHtW6iVgOHhLljrcVJP0JdoqzvRYZpDxpayIovEFlEXpnopVjgZV0XpqJZoLmjFfaP2yOqbsrSOk7j86CEZPoGxLZqThL7IUQ1Fji3jYoplTSoOfaYQS40fBQgoLBgv4ZD_Vh2FEa45fP6-PEzHWo0-_kMyWxWXMM6KbzGDaN02aU8AAThF98iUo1GHHGCzjuRGW1OyvuHBR198nk.x1JgVMjEXbH5qEZf1-tNfg \ No newline at end of file diff --git a/ci/chains/gaia/v6.0.0/ibc-1/keyring-test/user.info b/ci/chains/gaia/v6.0.0/ibc-1/keyring-test/user.info deleted file mode 100644 index e53db9c699..0000000000 --- a/ci/chains/gaia/v6.0.0/ibc-1/keyring-test/user.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0xMS0yNCAxNDozMzoyMC41NzIwMjI0MzggLTA1MDAgRVNUIG09KzAuMDU2NTE2OTY2IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiNmhYeVNDaXNFNDRmYU50NSJ9.yXnz86KYtSpKGF9_xqy80tu_X3j9azA50Gy0IlYUkDFJzhOBt8J85w.hWiGnawDFEJ21SRW.Pmkw-7YANEweAfCoO4cwwiiuaRvUcRReXkk3MYqoSctKKJc5M1STXIYhxKq8R1nxhMeX6bzdRpJwmVcWU6wEUPBgNnJENykxDwxpxisMsZn1orL3ySc1r60Yh7-AgriYoLXfdyBcHM3xi0wglQV9cRqxNhp6cB6qM0YP3WIfuW48yh5k-1Ke3SDrEOLHbAHMLR_KjCnI7NuyPFvrL-FbcyNsYNkI3NIJ8uDyMtaVxNwB-8C2WuIBmPae47iqAG4YXhAxVG7XXYmYKmQH08EvGsHiXB4IL6Omr2VHYKvSZIVIvzWX9W1UwlYpCIu_hf1yPEbBE0287_3Ymd06J2ZUB5R9Gz7wP8w.6dnU167tXcmROvvJnZWNxw \ No newline at end of file diff --git a/ci/chains/gaia/v6.0.0/ibc-1/keyring-test/user2.info b/ci/chains/gaia/v6.0.0/ibc-1/keyring-test/user2.info deleted file mode 100644 index 097d150a78..0000000000 --- a/ci/chains/gaia/v6.0.0/ibc-1/keyring-test/user2.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0xMS0yNCAxNDozMzoyMS42NTM4MzY1MDkgLTA1MDAgRVNUIG09KzAuMDYzMDcxMjMyIiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiUHJqSDV5OFFueHQwRDRpaSJ9.jcO3HbJ7y45QeK4grQ5oET_W8LMAwcCAhjuji_MH4b7a8gwVGhNdiQ.AncSbV6zoozvh9B2._VmB-1kpDhTFQkcj3p4vYjO4r8PP_hUBAiaTKA9MMWDhc6wJgUbZe4ETgAic8KpVsD6MRIEwtaUPZQ5A6OJILNsojKpEOw1bZlWGVCugvOvX_yn6bGwjDWd3hQ6m1NB7Pq9AqL7LMQ4Vq5PhoHyKqprJVeIVnq8emTMQsnM3jF5xJEnVNRixAhaA7UJQkXZrKyfmsUmDSLyCS647TNLnjltQLx_wBO08gggYSkEavsWs9klWyqNF7_Yil8GNKnCtOV1EDJfJBUW7k3bDb9KkNtqVRgmQLBdrQUQKQHeKyR9ufIXasjsuF_uuwyRYZfEcRGu-4JaL1WDkbkBBQCCAebeZfCyxYHSa.IUZFTm6h9sgvk8enEW-7Lw \ No newline at end of file diff --git a/ci/chains/gaia/v6.0.0/ibc-1/keyring-test/validator.info b/ci/chains/gaia/v6.0.0/ibc-1/keyring-test/validator.info deleted file mode 100644 index 5284d5847b..0000000000 --- a/ci/chains/gaia/v6.0.0/ibc-1/keyring-test/validator.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0xMS0yNCAxNDozMzoxOS40OTM2MzgxNDUgLTA1MDAgRVNUIG09KzAuMDYyMTA3NTAxIiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiM3k0NDJvdU5PemotQmtaaCJ9.AypJs2QRWNAionV2YbpSj4iMUlXK6ve5AbW6dnqsKw_J_CHcjqWzig.WDpu5XC5HpWC43r4._f8eYLRJEFjyYTrAZS0N6dCyeEwpz-is9_1kjHmfWgVw5PYZxVrdI83S4t12Lh_jRhk2REPDVMbiG5-E0dEXNuV6o1tymAWZT1Vkw9EbzS-J51hzeDt4b-SvIe7XUJqwE67smbK22_o6dW8AcEd10p96jIqzO9jjoJQ_KXkWmYKgOv561K40wLew8thC3bdyMpwB50z0aywSTPHmfzBcI_ugni_RyIqtvLREraDHBcTAq9gwQTESKDbhtmOdvSUez9GBHhbHHf-95qz0d7VnuJXTg4h6dOwo2IF5dK5KENw9Ry_61nTvHTV3-tkh826gWWg4MkXjZGdQ5H-a0age-wN8N8oiJeIiE6WZXrHL9UpdwGX8.d7VnQTd1I5Ct5IcwkXbYEg \ No newline at end of file diff --git a/ci/chains/gaia/v6.0.0/ibc-1/user2_seed.json b/ci/chains/gaia/v6.0.0/ibc-1/user2_seed.json deleted file mode 100644 index 44a4a4ab38..0000000000 --- a/ci/chains/gaia/v6.0.0/ibc-1/user2_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"user2","type":"local","address":"cosmos139lr5vxkr94lf85nr4zc4dvmj52qmmn0er2hrf","pubkey":"{\"@type\":\"/cosmos.crypto.secp256k1.PubKey\",\"key\":\"AquM1jLXtEDMVXLMvXr8Ny/lDySZ8GbJdQemhfRewpnA\"}","mnemonic":"cousin group crucial ready save drama piece time ordinary language one mind begin sound clever express broom limb foil size choose salute lazy gold"} diff --git a/ci/chains/gaia/v6.0.0/ibc-1/user_seed.json b/ci/chains/gaia/v6.0.0/ibc-1/user_seed.json deleted file mode 100644 index 6c2ad92777..0000000000 --- a/ci/chains/gaia/v6.0.0/ibc-1/user_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"user","type":"local","address":"cosmos1y4e9e3srnuqcul7839cmttc4rjct2gece5y34l","pubkey":"{\"@type\":\"/cosmos.crypto.secp256k1.PubKey\",\"key\":\"A8c8vRcad30nDg3JJV8Iczt5Jz+wmaJYAwHJ3R18whn+\"}","mnemonic":"horn wait grace stool course spot one inject gloom fat crime usual horn kitten canoe glove foam cake reopen oblige silver favorite uniform cry"} diff --git a/ci/chains/gaia/v6.0.0/ibc-1/validator_seed.json b/ci/chains/gaia/v6.0.0/ibc-1/validator_seed.json deleted file mode 100644 index 13fe0a134c..0000000000 --- a/ci/chains/gaia/v6.0.0/ibc-1/validator_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"validator","type":"local","address":"cosmos1zx9v7cdvltsgyzymc6wwd070nzje4duvkzmfem","pubkey":"{\"@type\":\"/cosmos.crypto.secp256k1.PubKey\",\"key\":\"AobtY30Q9FSuaB83i0DaK8gMU89R5BF9D5EiQZxltYj9\"}","mnemonic":"latin trial chronic shadow sugar raw clay village face hurry world melody choose escape post angry casino scale unit emotion beyond cheese shiver addict"} diff --git a/ci/chains/gaia/v7.0.1/ibc-0/config/addrbook.json b/ci/chains/gaia/v7.0.1/ibc-0/config/addrbook.json deleted file mode 100644 index 06fb719cd9..0000000000 --- a/ci/chains/gaia/v7.0.1/ibc-0/config/addrbook.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "key": "f79fd5952365514d9426b513", - "addrs": [] -} \ No newline at end of file diff --git a/ci/chains/gaia/v7.0.1/ibc-0/config/app.toml b/ci/chains/gaia/v7.0.1/ibc-0/config/app.toml deleted file mode 100644 index 7585d6e7ca..0000000000 --- a/ci/chains/gaia/v7.0.1/ibc-0/config/app.toml +++ /dev/null @@ -1,196 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -############################################################################### -### Base Configuration ### -############################################################################### - -# The minimum gas prices a validator is willing to accept for processing a -# transaction. A transaction's fees must meet the minimum of any denomination -# specified in this config (e.g. 0.25token1;0.0001token2). -minimum-gas-prices = "" - -# default: the last 100 states are kept in addition to every 500th state; pruning at 10 block intervals -# nothing: all historic states will be saved, nothing will be deleted (i.e. archiving node) -# everything: all saved states will be deleted, storing only the current and previous state; pruning at 10 block intervals -# custom: allow pruning options to be manually specified through 'pruning-keep-recent', 'pruning-keep-every', and 'pruning-interval' -pruning = "default" - -# These are applied if and only if the pruning strategy is custom. -pruning-keep-recent = "0" -pruning-keep-every = "0" -pruning-interval = "0" - -# HaltHeight contains a non-zero block height at which a node will gracefully -# halt and shutdown that can be used to assist upgrades and testing. -# -# Note: Commitment of state will be attempted on the corresponding block. -halt-height = 0 - -# HaltTime contains a non-zero minimum block time (in Unix seconds) at which -# a node will gracefully halt and shutdown that can be used to assist upgrades -# and testing. -# -# Note: Commitment of state will be attempted on the corresponding block. -halt-time = 0 - -# MinRetainBlocks defines the minimum block height offset from the current -# block being committed, such that all blocks past this offset are pruned -# from Tendermint. It is used as part of the process of determining the -# ResponseCommit.RetainHeight value during ABCI Commit. A value of 0 indicates -# that no blocks should be pruned. -# -# This configuration value is only responsible for pruning Tendermint blocks. -# It has no bearing on application state pruning which is determined by the -# "pruning-*" configurations. -# -# Note: Tendermint block pruning is dependant on this parameter in conunction -# with the unbonding (safety threshold) period, state pruning and state sync -# snapshot parameters to determine the correct minimum value of -# ResponseCommit.RetainHeight. -min-retain-blocks = 0 - -# InterBlockCache enables inter-block caching. -inter-block-cache = true - -# IndexEvents defines the set of events in the form {eventType}.{attributeKey}, -# which informs Tendermint what to index. If empty, all events will be indexed. -# -# Example: -# ["message.sender", "message.recipient"] -index-events = [] - -# IavlCacheSize set the size of the iavl tree cache. -# Default cache size is 50mb. -iavl-cache-size = 781250 - -############################################################################### -### Telemetry Configuration ### -############################################################################### - -[telemetry] - -# Prefixed with keys to separate services. -service-name = "" - -# Enabled enables the application telemetry functionality. When enabled, -# an in-memory sink is also enabled by default. Operators may also enabled -# other sinks such as Prometheus. -enabled = false - -# Enable prefixing gauge values with hostname. -enable-hostname = false - -# Enable adding hostname to labels. -enable-hostname-label = false - -# Enable adding service to labels. -enable-service-label = false - -# PrometheusRetentionTime, when positive, enables a Prometheus metrics sink. -prometheus-retention-time = 0 - -# GlobalLabels defines a global set of name/value label tuples applied to all -# metrics emitted using the wrapper functions defined in telemetry package. -# -# Example: -# [["chain_id", "cosmoshub-1"]] -global-labels = [ -] - -############################################################################### -### API Configuration ### -############################################################################### - -[api] - -# Enable defines if the API server should be enabled. -enable = false - -# Swagger defines if swagger documentation should automatically be registered. -swagger = false - -# Address defines the API server to listen on. -address = "tcp://0.0.0.0:1317" - -# MaxOpenConnections defines the number of maximum open connections. -max-open-connections = 1000 - -# RPCReadTimeout defines the Tendermint RPC read timeout (in seconds). -rpc-read-timeout = 10 - -# RPCWriteTimeout defines the Tendermint RPC write timeout (in seconds). -rpc-write-timeout = 0 - -# RPCMaxBodyBytes defines the Tendermint maximum response body (in bytes). -rpc-max-body-bytes = 1000000 - -# EnableUnsafeCORS defines if CORS should be enabled (unsafe - use it at your own risk). -enabled-unsafe-cors = false - -############################################################################### -### Rosetta Configuration ### -############################################################################### - -[rosetta] - -# Enable defines if the Rosetta API server should be enabled. -enable = false - -# Address defines the Rosetta API server to listen on. -address = ":8080" - -# Network defines the name of the blockchain that will be returned by Rosetta. -blockchain = "app" - -# Network defines the name of the network that will be returned by Rosetta. -network = "network" - -# Retries defines the number of retries when connecting to the node before failing. -retries = 3 - -# Offline defines if Rosetta server should run in offline mode. -offline = false - -############################################################################### -### gRPC Configuration ### -############################################################################### - -[grpc] - -# Enable defines if the gRPC server should be enabled. -enable = true - -# Address defines the gRPC server address to bind to. -address = "0.0.0.0:9090" - -############################################################################### -### gRPC Web Configuration ### -############################################################################### - -[grpc-web] - -# GRPCWebEnable defines if the gRPC-web should be enabled. -# NOTE: gRPC must also be enabled, otherwise, this configuration is a no-op. -enable = false - -# Address defines the gRPC-web server address to bind to. -address = "0.0.0.0:9091" - -# EnableUnsafeCORS defines if CORS should be enabled (unsafe - use it at your own risk). -enable-unsafe-cors = false - -############################################################################### -### State Sync Configuration ### -############################################################################### - -# State sync snapshots allow other nodes to rapidly join the network without replaying historical -# blocks, instead downloading and applying a snapshot of the application state at a given height. -[state-sync] - -# snapshot-interval specifies the block interval at which local state sync snapshots are -# taken (0 to disable). Must be a multiple of pruning-keep-every. -snapshot-interval = 1000 - -# snapshot-keep-recent specifies the number of recent snapshots to keep and serve (0 to keep all). -snapshot-keep-recent = 10 diff --git a/ci/chains/gaia/v7.0.1/ibc-0/config/client.toml b/ci/chains/gaia/v7.0.1/ibc-0/config/client.toml deleted file mode 100644 index 222695a3f8..0000000000 --- a/ci/chains/gaia/v7.0.1/ibc-0/config/client.toml +++ /dev/null @@ -1,17 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -############################################################################### -### Client Configuration ### -############################################################################### - -# The network chain ID -chain-id = "" -# The keyring's backend, where the keys are stored (os|file|kwallet|pass|test|memory) -keyring-backend = "os" -# CLI output format (text|json) -output = "text" -# : to Tendermint RPC interface for this chain -node = "tcp://localhost:26657" -# Transaction broadcasting mode (sync|async|block) -broadcast-mode = "sync" diff --git a/ci/chains/gaia/v7.0.1/ibc-0/config/config.toml b/ci/chains/gaia/v7.0.1/ibc-0/config/config.toml deleted file mode 100644 index 473abd0295..0000000000 --- a/ci/chains/gaia/v7.0.1/ibc-0/config/config.toml +++ /dev/null @@ -1,428 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -# NOTE: Any path below can be absolute (e.g. "/var/myawesomeapp/data") or -# relative to the home directory (e.g. "data"). The home directory is -# "$HOME/.tendermint" by default, but could be changed via $TMHOME env variable -# or --home cmd flag. - -####################################################################### -### Main Base Config Options ### -####################################################################### - -# TCP or UNIX socket address of the ABCI application, -# or the name of an ABCI application compiled in with the Tendermint binary -proxy_app = "tcp://127.0.0.1:26658" - -# A custom human readable name for this node -moniker = "ibc-0" - -# If this node is many blocks behind the tip of the chain, FastSync -# allows them to catchup quickly by downloading blocks in parallel -# and verifying their commits -fast_sync = true - -# Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb -# * goleveldb (github.com/syndtr/goleveldb - most popular implementation) -# - pure go -# - stable -# * cleveldb (uses levigo wrapper) -# - fast -# - requires gcc -# - use cleveldb build tag (go build -tags cleveldb) -# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt) -# - EXPERIMENTAL -# - may be faster is some use-cases (random reads - indexer) -# - use boltdb build tag (go build -tags boltdb) -# * rocksdb (uses github.com/tecbot/gorocksdb) -# - EXPERIMENTAL -# - requires gcc -# - use rocksdb build tag (go build -tags rocksdb) -# * badgerdb (uses github.com/dgraph-io/badger) -# - EXPERIMENTAL -# - use badgerdb build tag (go build -tags badgerdb) -db_backend = "goleveldb" - -# Database directory -db_dir = "data" - -# Output level for logging, including package level options -log_level = "info" - -# Output format: 'plain' (colored text) or 'json' -log_format = "plain" - -##### additional base config options ##### - -# Path to the JSON file containing the initial validator set and other meta data -genesis_file = "config/genesis.json" - -# Path to the JSON file containing the private key to use as a validator in the consensus protocol -priv_validator_key_file = "config/priv_validator_key.json" - -# Path to the JSON file containing the last sign state of a validator -priv_validator_state_file = "data/priv_validator_state.json" - -# TCP or UNIX socket address for Tendermint to listen on for -# connections from an external PrivValidator process -priv_validator_laddr = "" - -# Path to the JSON file containing the private key to use for node authentication in the p2p protocol -node_key_file = "config/node_key.json" - -# Mechanism to connect to the ABCI application: socket | grpc -abci = "socket" - -# If true, query the ABCI app on connecting to a new peer -# so the app can decide if we should keep the connection or not -filter_peers = false - - -####################################################################### -### Advanced Configuration Options ### -####################################################################### - -####################################################### -### RPC Server Configuration Options ### -####################################################### -[rpc] - -# TCP or UNIX socket address for the RPC server to listen on -laddr = "tcp://0.0.0.0:26657" - -# A list of origins a cross-domain request can be executed from -# Default value '[]' disables cors support -# Use '["*"]' to allow any origin -cors_allowed_origins = [] - -# A list of methods the client is allowed to use with cross-domain requests -cors_allowed_methods = ["HEAD", "GET", "POST", ] - -# A list of non simple headers the client is allowed to use with cross-domain requests -cors_allowed_headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ] - -# TCP or UNIX socket address for the gRPC server to listen on -# NOTE: This server only supports /broadcast_tx_commit -grpc_laddr = "" - -# Maximum number of simultaneous connections. -# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -grpc_max_open_connections = 900 - -# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool -unsafe = false - -# Maximum number of simultaneous connections (including WebSocket). -# Does not include gRPC connections. See grpc_max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -max_open_connections = 900 - -# Maximum number of unique clientIDs that can /subscribe -# If you're using /broadcast_tx_commit, set to the estimated maximum number -# of broadcast_tx_commit calls per block. -max_subscription_clients = 100 - -# Maximum number of unique queries a given client can /subscribe to -# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to -# the estimated # maximum number of broadcast_tx_commit calls per block. -max_subscriptions_per_client = 5 - -# Experimental parameter to specify the maximum number of events a node will -# buffer, per subscription, before returning an error and closing the -# subscription. Must be set to at least 100, but higher values will accommodate -# higher event throughput rates (and will use more memory). -experimental_subscription_buffer_size = 200 - -# Experimental parameter to specify the maximum number of RPC responses that -# can be buffered per WebSocket client. If clients cannot read from the -# WebSocket endpoint fast enough, they will be disconnected, so increasing this -# parameter may reduce the chances of them being disconnected (but will cause -# the node to use more memory). -# -# Must be at least the same as "experimental_subscription_buffer_size", -# otherwise connections could be dropped unnecessarily. This value should -# ideally be somewhat higher than "experimental_subscription_buffer_size" to -# accommodate non-subscription-related RPC responses. -experimental_websocket_write_buffer_size = 200 - -# If a WebSocket client cannot read fast enough, at present we may -# silently drop events instead of generating an error or disconnecting the -# client. -# -# Enabling this experimental parameter will cause the WebSocket connection to -# be closed instead if it cannot read fast enough, allowing for greater -# predictability in subscription behaviour. -experimental_close_on_slow_client = false - -# How long to wait for a tx to be committed during /broadcast_tx_commit. -# WARNING: Using a value larger than 10s will result in increasing the -# global HTTP write timeout, which applies to all connections and endpoints. -# See https://github.com/tendermint/tendermint/issues/3435 -timeout_broadcast_tx_commit = "10s" - -# Maximum size of request body, in bytes -max_body_bytes = 1000000 - -# Maximum size of request header, in bytes -max_header_bytes = 1048576 - -# The path to a file containing certificate that is used to create the HTTPS server. -# Might be either absolute path or path related to Tendermint's config directory. -# If the certificate is signed by a certificate authority, -# the certFile should be the concatenation of the server's certificate, any intermediates, -# and the CA's certificate. -# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. -# Otherwise, HTTP server is run. -tls_cert_file = "" - -# The path to a file containing matching private key that is used to create the HTTPS server. -# Might be either absolute path or path related to Tendermint's config directory. -# NOTE: both tls-cert-file and tls-key-file must be present for Tendermint to create HTTPS server. -# Otherwise, HTTP server is run. -tls_key_file = "" - -# pprof listen address (https://golang.org/pkg/net/http/pprof) -pprof_laddr = "localhost:6060" - -####################################################### -### P2P Configuration Options ### -####################################################### -[p2p] - -# Address to listen for incoming connections -laddr = "tcp://0.0.0.0:26656" - -# Address to advertise to peers for them to dial -# If empty, will use the same port as the laddr, -# and will introspect on the listener or use UPnP -# to figure out the address. ip and port are required -# example: 159.89.10.97:26656 -external_address = "" - -# Comma separated list of seed nodes to connect to -seeds = "" - -# Comma separated list of nodes to keep persistent connections to -persistent_peers = "" - -# UPNP port forwarding -upnp = false - -# Path to address book -addr_book_file = "config/addrbook.json" - -# Set true for strict address routability rules -# Set false for private or local networks -addr_book_strict = true - -# Maximum number of inbound peers -max_num_inbound_peers = 40 - -# Maximum number of outbound peers to connect to, excluding persistent peers -max_num_outbound_peers = 10 - -# List of node IDs, to which a connection will be (re)established ignoring any existing limits -unconditional_peer_ids = "" - -# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) -persistent_peers_max_dial_period = "0s" - -# Time to wait before flushing messages out on the connection -flush_throttle_timeout = "100ms" - -# Maximum size of a message packet payload, in bytes -max_packet_msg_payload_size = 1024 - -# Rate at which packets can be sent, in bytes/second -send_rate = 5120000 - -# Rate at which packets can be received, in bytes/second -recv_rate = 5120000 - -# Set true to enable the peer-exchange reactor -pex = true - -# Seed mode, in which node constantly crawls the network and looks for -# peers. If another node asks it for addresses, it responds and disconnects. -# -# Does not work if the peer-exchange reactor is disabled. -seed_mode = false - -# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) -private_peer_ids = "" - -# Toggle to disable guard against peers connecting from the same ip. -allow_duplicate_ip = false - -# Peer connection configuration. -handshake_timeout = "20s" -dial_timeout = "3s" - -####################################################### -### Mempool Configuration Option ### -####################################################### -[mempool] - -recheck = true -broadcast = true -wal_dir = "" - -# Maximum number of transactions in the mempool -size = 5000 - -# Limit the total size of all txs in the mempool. -# This only accounts for raw transactions (e.g. given 1MB transactions and -# max_txs_bytes=5MB, mempool will only accept 5 transactions). -max_txs_bytes = 1073741824 - -# Size of the cache (used to filter transactions we saw earlier) in transactions -cache_size = 10000 - -# Do not remove invalid transactions from the cache (default: false) -# Set to true if it's not possible for any invalid transaction to become valid -# again in the future. -keep-invalid-txs-in-cache = false - -# Maximum size of a single transaction. -# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes}. -max_tx_bytes = 1048576 - -# Maximum size of a batch of transactions to send to a peer -# Including space needed by encoding (one varint per transaction). -# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796 -max_batch_bytes = 0 - -####################################################### -### State Sync Configuration Options ### -####################################################### -[statesync] -# State sync rapidly bootstraps a new node by discovering, fetching, and restoring a state machine -# snapshot from peers instead of fetching and replaying historical blocks. Requires some peers in -# the network to take and serve state machine snapshots. State sync is not attempted if the node -# has any local state (LastBlockHeight > 0). The node will have a truncated block history, -# starting from the height of the snapshot. -enable = false - -# RPC servers (comma-separated) for light client verification of the synced state machine and -# retrieval of state data for node bootstrapping. Also needs a trusted height and corresponding -# header hash obtained from a trusted source, and a period during which validators can be trusted. -# -# For Cosmos SDK-based chains, trust_period should usually be about 2/3 of the unbonding time (~2 -# weeks) during which they can be financially punished (slashed) for misbehavior. -rpc_servers = "" -trust_height = 0 -trust_hash = "" -trust_period = "168h0m0s" - -# Time to spend discovering snapshots before initiating a restore. -discovery_time = "15s" - -# Temporary directory for state sync snapshot chunks, defaults to the OS tempdir (typically /tmp). -# Will create a new, randomly named directory within, and remove it when done. -temp_dir = "" - -# The timeout duration before re-requesting a chunk, possibly from a different -# peer (default: 1 minute). -chunk_request_timeout = "10s" - -# The number of concurrent chunk fetchers to run (default: 1). -chunk_fetchers = "4" - -####################################################### -### Fast Sync Configuration Connections ### -####################################################### -[fastsync] - -# Fast Sync version to use: -# 1) "v0" (default) - the legacy fast sync implementation -# 2) "v1" - refactor of v0 version for better testability -# 2) "v2" - complete redesign of v0, optimized for testability & readability -version = "v0" - -####################################################### -### Consensus Configuration Options ### -####################################################### -[consensus] - -wal_file = "data/cs.wal/wal" - -# How long we wait for a proposal block before prevoting nil -timeout_propose = "1s" -# How much timeout_propose increases with each round -timeout_propose_delta = "500ms" -# How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil) -timeout_prevote = "1s" -# How much the timeout_prevote increases with each round -timeout_prevote_delta = "500ms" -# How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil) -timeout_precommit = "1s" -# How much the timeout_precommit increases with each round -timeout_precommit_delta = "500ms" -# How long we wait after committing a block, before starting on the new -# height (this gives us a chance to receive some more precommits, even -# though we already have +2/3). -timeout_commit = "1s" - -# How many blocks to look back to check existence of the node's consensus votes before joining consensus -# When non-zero, the node will panic upon restart -# if the same consensus key was used to sign {double_sign_check_height} last blocks. -# So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic. -double_sign_check_height = 0 - -# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) -skip_timeout_commit = false - -# EmptyBlocks mode and possible interval between empty blocks -create_empty_blocks = true -create_empty_blocks_interval = "0s" - -# Reactor sleep duration parameters -peer_gossip_sleep_duration = "100ms" -peer_query_maj23_sleep_duration = "2s" - -####################################################### -### Transaction Indexer Configuration Options ### -####################################################### -[tx_index] - -# What indexer to use for transactions -# -# The application will set which txs to index. In some cases a node operator will be able -# to decide which txs to index based on configuration set in the application. -# -# Options: -# 1) "null" -# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). -# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed. -indexer = "kv" - -####################################################### -### Instrumentation Configuration Options ### -####################################################### -[instrumentation] - -# When true, Prometheus metrics are served under /metrics on -# PrometheusListenAddr. -# Check out the documentation for the list of available metrics. -prometheus = false - -# Address to listen for Prometheus collector(s) connections -prometheus_listen_addr = ":26660" - -# Maximum number of simultaneous connections. -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -max_open_connections = 3 - -# Instrumentation namespace -namespace = "tendermint" diff --git a/ci/chains/gaia/v7.0.1/ibc-0/config/genesis.json b/ci/chains/gaia/v7.0.1/ibc-0/config/genesis.json deleted file mode 100644 index 70889fa4a4..0000000000 --- a/ci/chains/gaia/v7.0.1/ibc-0/config/genesis.json +++ /dev/null @@ -1,375 +0,0 @@ -{ - "genesis_time": "2022-05-06T10:04:49.897046Z", - "chain_id": "ibc-0", - "initial_height": "1", - "consensus_params": { - "block": { - "max_bytes": "22020096", - "max_gas": "-1", - "time_iota_ms": "1000" - }, - "evidence": { - "max_age_num_blocks": "100000", - "max_age_duration": "172800000000000", - "max_bytes": "1048576" - }, - "validator": { - "pub_key_types": [ - "ed25519" - ] - }, - "version": {} - }, - "app_hash": "", - "app_state": { - "auth": { - "params": { - "max_memo_characters": "256", - "tx_sig_limit": "7", - "tx_size_cost_per_byte": "10", - "sig_verify_cost_ed25519": "590", - "sig_verify_cost_secp256k1": "1000" - }, - "accounts": [ - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos16njyvav3vw3pdwqp9r3hrn9s46gam669pd09g9", - "pub_key": null, - "account_number": "0", - "sequence": "0" - }, - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos1zj5weejyt0phez2uml5d4qpc2q0xvlrs47wc6c", - "pub_key": null, - "account_number": "0", - "sequence": "0" - }, - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos1sphll46wcyjw46wsfpf5qgnsjhm826gguh4rcw", - "pub_key": null, - "account_number": "0", - "sequence": "0" - } - ] - }, - "authz": { - "authorization": [] - }, - "bank": { - "params": { - "send_enabled": [], - "default_send_enabled": true - }, - "balances": [ - { - "address": "cosmos1zj5weejyt0phez2uml5d4qpc2q0xvlrs47wc6c", - "coins": [ - { - "denom": "samoleans", - "amount": "100000000000" - }, - { - "denom": "stake", - "amount": "100000000000" - } - ] - }, - { - "address": "cosmos1sphll46wcyjw46wsfpf5qgnsjhm826gguh4rcw", - "coins": [ - { - "denom": "stake", - "amount": "100000000000" - } - ] - }, - { - "address": "cosmos16njyvav3vw3pdwqp9r3hrn9s46gam669pd09g9", - "coins": [ - { - "denom": "samoleans", - "amount": "100000000000" - }, - { - "denom": "stake", - "amount": "100000000000" - } - ] - } - ], - "supply": [ - { - "denom": "samoleans", - "amount": "200000000000" - }, - { - "denom": "stake", - "amount": "300000000000" - } - ], - "denom_metadata": [] - }, - "capability": { - "index": "1", - "owners": [] - }, - "crisis": { - "constant_fee": { - "denom": "stake", - "amount": "1000" - } - }, - "distribution": { - "params": { - "community_tax": "0.020000000000000000", - "base_proposer_reward": "0.010000000000000000", - "bonus_proposer_reward": "0.040000000000000000", - "withdraw_addr_enabled": true - }, - "fee_pool": { - "community_pool": [] - }, - "delegator_withdraw_infos": [], - "previous_proposer": "", - "outstanding_rewards": [], - "validator_accumulated_commissions": [], - "validator_historical_rewards": [], - "validator_current_rewards": [], - "delegator_starting_infos": [], - "validator_slash_events": [] - }, - "evidence": { - "evidence": [] - }, - "feegrant": { - "allowances": [] - }, - "genutil": { - "gen_txs": [ - { - "body": { - "messages": [ - { - "@type": "/cosmos.staking.v1beta1.MsgCreateValidator", - "description": { - "moniker": "ibc-0", - "identity": "", - "website": "", - "security_contact": "", - "details": "" - }, - "commission": { - "rate": "0.100000000000000000", - "max_rate": "0.200000000000000000", - "max_change_rate": "0.010000000000000000" - }, - "min_self_delegation": "1", - "delegator_address": "cosmos1sphll46wcyjw46wsfpf5qgnsjhm826gguh4rcw", - "validator_address": "cosmosvaloper1sphll46wcyjw46wsfpf5qgnsjhm826ggerpk5a", - "pubkey": { - "@type": "/cosmos.crypto.ed25519.PubKey", - "key": "DjvU3nA6MHikZs0esilo5NfNi7e3PpAhWtRK/HS8tEA=" - }, - "value": { - "denom": "stake", - "amount": "100000000000" - } - } - ], - "memo": "3494ad7d61f006035641dbcd204dcbdb25904c2b@192.168.1.80:26656", - "timeout_height": "0", - "extension_options": [], - "non_critical_extension_options": [] - }, - "auth_info": { - "signer_infos": [ - { - "public_key": { - "@type": "/cosmos.crypto.secp256k1.PubKey", - "key": "A9JDjNFV9GL3nqTdRRlWjOZXO4MDSqVkcFsN0Q8zsWuC" - }, - "mode_info": { - "single": { - "mode": "SIGN_MODE_DIRECT" - } - }, - "sequence": "0" - } - ], - "fee": { - "amount": [], - "gas_limit": "200000", - "payer": "", - "granter": "" - } - }, - "signatures": [ - "dZpQRC0ydRoumK+s6NQUD+mu2BLxGpsvglEbl2QRMTlIfL2F47an/4lKswZ6dv+SBzKMvAYKzSzcj59lbRVDIA==" - ] - } - ] - }, - "gov": { - "starting_proposal_id": "1", - "deposits": [], - "votes": [], - "proposals": [], - "deposit_params": { - "min_deposit": [ - { - "denom": "stake", - "amount": "10000000" - } - ], - "max_deposit_period": "200s" - }, - "voting_params": { - "voting_period": "200s" - }, - "tally_params": { - "quorum": "0.334000000000000000", - "threshold": "0.500000000000000000", - "veto_threshold": "0.334000000000000000" - } - }, - "ibc": { - "client_genesis": { - "clients": [], - "clients_consensus": [], - "clients_metadata": [], - "params": { - "allowed_clients": [ - "06-solomachine", - "07-tendermint" - ] - }, - "create_localhost": false, - "next_client_sequence": "0" - }, - "connection_genesis": { - "connections": [], - "client_connection_paths": [], - "next_connection_sequence": "0", - "params": { - "max_expected_time_per_block": "30000000000" - } - }, - "channel_genesis": { - "channels": [], - "acknowledgements": [], - "commitments": [], - "receipts": [], - "send_sequences": [], - "recv_sequences": [], - "ack_sequences": [], - "next_channel_sequence": "0" - } - }, - "interchainaccounts": { - "controller_genesis_state": { - "active_channels": [], - "interchain_accounts": [], - "ports": [], - "params": { - "controller_enabled": true - } - }, - "host_genesis_state": { - "active_channels": [], - "interchain_accounts": [], - "port": "icahost", - "params": { - "host_enabled": true, - "allow_messages": [] - } - } - }, - "liquidity": { - "params": { - "pool_types": [ - { - "id": 1, - "name": "StandardLiquidityPool", - "min_reserve_coin_num": 2, - "max_reserve_coin_num": 2, - "description": "Standard liquidity pool with pool price function X/Y, ESPM constraint, and two kinds of reserve coins" - } - ], - "min_init_deposit_amount": "1000000", - "init_pool_coin_mint_amount": "1000000", - "max_reserve_coin_amount": "0", - "pool_creation_fee": [ - { - "denom": "stake", - "amount": "40000000" - } - ], - "swap_fee_rate": "0.003000000000000000", - "withdraw_fee_rate": "0.000000000000000000", - "max_order_amount_ratio": "0.100000000000000000", - "unit_batch_height": 1, - "circuit_breaker_enabled": false - }, - "pool_records": [] - }, - "mint": { - "minter": { - "inflation": "0.130000000000000000", - "annual_provisions": "0.000000000000000000" - }, - "params": { - "mint_denom": "stake", - "inflation_rate_change": "0.130000000000000000", - "inflation_max": "0.200000000000000000", - "inflation_min": "0.070000000000000000", - "goal_bonded": "0.670000000000000000", - "blocks_per_year": "6311520" - } - }, - "packetfowardmiddleware": { - "params": { - "fee_percentage": "0.000000000000000000" - } - }, - "params": null, - "slashing": { - "params": { - "signed_blocks_window": "100", - "min_signed_per_window": "0.500000000000000000", - "downtime_jail_duration": "600s", - "slash_fraction_double_sign": "0.050000000000000000", - "slash_fraction_downtime": "0.010000000000000000" - }, - "signing_infos": [], - "missed_blocks": [] - }, - "staking": { - "params": { - "unbonding_time": "1814400s", - "max_validators": 100, - "max_entries": 7, - "historical_entries": 10000, - "bond_denom": "stake" - }, - "last_total_power": "0", - "last_validator_powers": [], - "validators": [], - "delegations": [], - "unbonding_delegations": [], - "redelegations": [], - "exported": false - }, - "transfer": { - "port_id": "transfer", - "denom_traces": [], - "params": { - "send_enabled": true, - "receive_enabled": true - } - }, - "upgrade": {}, - "vesting": {} - } -} \ No newline at end of file diff --git a/ci/chains/gaia/v7.0.1/ibc-0/config/gentx/gentx-3494ad7d61f006035641dbcd204dcbdb25904c2b.json b/ci/chains/gaia/v7.0.1/ibc-0/config/gentx/gentx-3494ad7d61f006035641dbcd204dcbdb25904c2b.json deleted file mode 100644 index c6bed88969..0000000000 --- a/ci/chains/gaia/v7.0.1/ibc-0/config/gentx/gentx-3494ad7d61f006035641dbcd204dcbdb25904c2b.json +++ /dev/null @@ -1 +0,0 @@ -{"body":{"messages":[{"@type":"/cosmos.staking.v1beta1.MsgCreateValidator","description":{"moniker":"ibc-0","identity":"","website":"","security_contact":"","details":""},"commission":{"rate":"0.100000000000000000","max_rate":"0.200000000000000000","max_change_rate":"0.010000000000000000"},"min_self_delegation":"1","delegator_address":"cosmos1sphll46wcyjw46wsfpf5qgnsjhm826gguh4rcw","validator_address":"cosmosvaloper1sphll46wcyjw46wsfpf5qgnsjhm826ggerpk5a","pubkey":{"@type":"/cosmos.crypto.ed25519.PubKey","key":"DjvU3nA6MHikZs0esilo5NfNi7e3PpAhWtRK/HS8tEA="},"value":{"denom":"stake","amount":"100000000000"}}],"memo":"3494ad7d61f006035641dbcd204dcbdb25904c2b@192.168.1.80:26656","timeout_height":"0","extension_options":[],"non_critical_extension_options":[]},"auth_info":{"signer_infos":[{"public_key":{"@type":"/cosmos.crypto.secp256k1.PubKey","key":"A9JDjNFV9GL3nqTdRRlWjOZXO4MDSqVkcFsN0Q8zsWuC"},"mode_info":{"single":{"mode":"SIGN_MODE_DIRECT"}},"sequence":"0"}],"fee":{"amount":[],"gas_limit":"200000","payer":"","granter":""}},"signatures":["dZpQRC0ydRoumK+s6NQUD+mu2BLxGpsvglEbl2QRMTlIfL2F47an/4lKswZ6dv+SBzKMvAYKzSzcj59lbRVDIA=="]} diff --git a/ci/chains/gaia/v7.0.1/ibc-0/config/node_key.json b/ci/chains/gaia/v7.0.1/ibc-0/config/node_key.json deleted file mode 100644 index a68b2e4f43..0000000000 --- a/ci/chains/gaia/v7.0.1/ibc-0/config/node_key.json +++ /dev/null @@ -1 +0,0 @@ -{"priv_key":{"type":"tendermint/PrivKeyEd25519","value":"IMD/3+Fiy+a+cJcCpn1YylTJhiqwCnRW7MrGJaxqxWtngftDoTFOGbEXGVsrTAUBG41JjsfjkOmLmUm2mDeYWw=="}} \ No newline at end of file diff --git a/ci/chains/gaia/v7.0.1/ibc-0/config/priv_validator_key.json b/ci/chains/gaia/v7.0.1/ibc-0/config/priv_validator_key.json deleted file mode 100644 index 1e40fbcbd1..0000000000 --- a/ci/chains/gaia/v7.0.1/ibc-0/config/priv_validator_key.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "address": "5C8DC809794222F5F6AE922BB7B1763397F50CC2", - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "DjvU3nA6MHikZs0esilo5NfNi7e3PpAhWtRK/HS8tEA=" - }, - "priv_key": { - "type": "tendermint/PrivKeyEd25519", - "value": "XOm68Zz6f0OCUw1BDUuD2bYP76mx12XBbaHaKxmimOMOO9TecDoweKRmzR6yKWjk182Lt7c+kCFa1Er8dLy0QA==" - } -} \ No newline at end of file diff --git a/ci/chains/gaia/v7.0.1/ibc-0/keyring-test/14a8ece6445bc37c895cdfe8da8038501e667c70.address b/ci/chains/gaia/v7.0.1/ibc-0/keyring-test/14a8ece6445bc37c895cdfe8da8038501e667c70.address deleted file mode 100644 index a6366a58fd..0000000000 --- a/ci/chains/gaia/v7.0.1/ibc-0/keyring-test/14a8ece6445bc37c895cdfe8da8038501e667c70.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMi0wNS0wNiAxMjowNDo1My4xODkzODQgKzAyMDAgQ0VTVCBtPSswLjA1MjcyMzU0MCIsImVuYyI6IkEyNTZHQ00iLCJwMmMiOjgxOTIsInAycyI6Im96WmhEVnBJSXZ4RTNUN0YifQ.9TuQlMEndxJpH_l9rlzqmz6_xJMRU8JL1LwuUkgdfY_WtHb75oFQqA.zegihqG91bm2Q-Wu.dXK6_FULn11-n4PxiagGZVLt1rP6agaZTjT5j4PmyVuP3Rl6jW1Vti0mdnFz_ULtj7uXZAPOwC7iSmI_0Ta1DApaxSaTJFO1J2atgg0TrVpQQY-ifjD4duD8r_PWDahxicsbw1TwEWRWwBzrCsuGsEP-2oGIiAWZuBaU4DjbyLpEKkF0UByNy1jAcIkq9uZFht-1hWKqvug00JkEzBq2Oq96xTEy6gUNI3rD5kouDY-WNgUBF5M.3sXBzzHa6KCcqr3w1bUDMw \ No newline at end of file diff --git a/ci/chains/gaia/v7.0.1/ibc-0/keyring-test/806fffd74ec124eae9d0485340227095f6756908.address b/ci/chains/gaia/v7.0.1/ibc-0/keyring-test/806fffd74ec124eae9d0485340227095f6756908.address deleted file mode 100644 index f5596e8975..0000000000 --- a/ci/chains/gaia/v7.0.1/ibc-0/keyring-test/806fffd74ec124eae9d0485340227095f6756908.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMi0wNS0wNiAxMjowNDo1MS4wMDA5MjkgKzAyMDAgQ0VTVCBtPSswLjA3MTg0NDEwMCIsImVuYyI6IkEyNTZHQ00iLCJwMmMiOjgxOTIsInAycyI6IlhvM2JXdXBNSXNScEI1QXgifQ.ndlbfgx_pi8Okmvhy4kxNASy15rGXUPgn25_FTiQrIhxrSdYOiCFGQ.efoaTYk4NOoLvJei.RnHvhAgBZpn_yOES2N8HNBpcK6buXZbDbR5HuYUOVQlr9i0CCZtpij7tlJ52FuOOxjj2qf2NPAqDFq2FMWlm6h4XT_Yd_X2O1uH9EaP2Hyb-FZsXAQDDmJm7Ns6qG4CNV5qXK37z_0GrCnhYxs8en7VxESRSFQBPsKN2cCZ6_pkAENfnK8hehekQsIi7G9ltTuqWXAkl-HL2AvX_kZK4uNkSJyQEAEgCAtm763gI-pDl7v7tLmIRudhI.geHt2gy-LGm-Ukyhp4ppIg \ No newline at end of file diff --git a/ci/chains/gaia/v7.0.1/ibc-0/keyring-test/d4e446759163a216b80128e371ccb0ae91ddeb45.address b/ci/chains/gaia/v7.0.1/ibc-0/keyring-test/d4e446759163a216b80128e371ccb0ae91ddeb45.address deleted file mode 100644 index 17b4394e78..0000000000 --- a/ci/chains/gaia/v7.0.1/ibc-0/keyring-test/d4e446759163a216b80128e371ccb0ae91ddeb45.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMi0wNS0wNiAxMjowNDo1Mi4wOTg4OSArMDIwMCBDRVNUIG09KzAuMDYyMzE4Njg1IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiOE8zbUYtcDI1cG9VM2FOWCJ9.d6Pd5xSMIIzL2BGSu7eZ6XmETqXW9cNbhnO4q0d_QcFonORm9QrEIw.BoBvr9OIe60yDpR-.lAStGSI2Qlwa_DycEKRwVG2ruv1xb5TA0wcF_1XMdorszAu_6n9QwDkikWWA5btcUraaCD9IjJbGpMmp2QBW6mFymwaKngTb8yr4aHkObruhQq_Kl_bB7vckmFDpOzcOBcl_ImQSrFdf-pCFaofRkyley9K7cCSUxD8aLgqy9PDjCd7GEXfIo3NW4s0njxUqpfPMrZdOOz-8ncb7Qsq9NmV3FDk08ezgf0a9L_OJnd7Wuw.3Sdzjy4E62NhjVk-2p7x_w \ No newline at end of file diff --git a/ci/chains/gaia/v7.0.1/ibc-0/keyring-test/user.info b/ci/chains/gaia/v7.0.1/ibc-0/keyring-test/user.info deleted file mode 100644 index 46206e6ef1..0000000000 --- a/ci/chains/gaia/v7.0.1/ibc-0/keyring-test/user.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMi0wNS0wNiAxMjowNDo1Mi4wOTM1MjkgKzAyMDAgQ0VTVCBtPSswLjA1Njk1NzE3NiIsImVuYyI6IkEyNTZHQ00iLCJwMmMiOjgxOTIsInAycyI6ImYwMHVnS2FKMmJxV2FyVXUifQ.ErunVpmVMSz4PXguTcg-JXH653Kh0Ay8DR9LsSFbBUYjXOpB5lgr1g.0nDxh8n9F7zdzCI6.MmP_lgaMBJbm0zvwlUjzSyVSJjZlK47yEsWkTgIA_L4PjhpSRXLic5vMF1pBreuI8FGYrV_9QGa_hpZHNFct3YtCGLKz5haw2lXb9O42U-NA5xP_lqC4tBJggaK2o6LB06iE8o-nHU6WqHVNLFsodsq7M0v17p8G4WkCvTqmQeRmWd7HJBfhJBDE7P4jtPfLiw8tX8hGGsMDCGFtalYeVfOpExhPSawjPZ3T7X20dMAmdZfFSjRCm-io540jlo6MoG30oTVpml2RjeYTQO98fqfUcSSDOmdUxQnkAj1ODYlbTrnAORWYp8lPK_ZxgKjmO0DlKruGDshegmLKeXSNBRF-1DdIEO8.jpzhxNdkgpheBstdI_BoLw \ No newline at end of file diff --git a/ci/chains/gaia/v7.0.1/ibc-0/keyring-test/user2.info b/ci/chains/gaia/v7.0.1/ibc-0/keyring-test/user2.info deleted file mode 100644 index 8781cc1ced..0000000000 --- a/ci/chains/gaia/v7.0.1/ibc-0/keyring-test/user2.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMi0wNS0wNiAxMjowNDo1My4xODQyMTcgKzAyMDAgQ0VTVCBtPSswLjA0NzU1NTkxMiIsImVuYyI6IkEyNTZHQ00iLCJwMmMiOjgxOTIsInAycyI6Img0VGJoNW1PaWh4Vi1xM0sifQ.ZFXGm6O4wc9k-Z4TT3d9oDnTo8tN0Jd9U64XXBjH0sQvTT-LEpqCCA.pGUBYNikHGTsV817.tsbXA6vDHeiVjoixvpusx2fbznnF34O8kPVTdO2iACNGheU9GFtqZEoC7NENSFyx9l8OlJ6rXNyuBP4G-v65qo2kHwR46vF5stIH_Vsi1uuBjxlzGdbM-i6xMA_hYiHizHTcKwFni6kLKUVm2e6tkg-pbWmekC9JG5wd65UcsbbX8onWWZimNHuI7zy18cw79oSjuOz7LcOJgyGFjLQd1eOf1WloJsqi1GCjpJtIGp60Fp0C4nBN6aQjvsZsY7KE5v6yxIW8jkKLRts0DliqrVJ45OrCBLi2xElu_xA9uUPpDklTtRE-im9uPAFY4fPGtkzZjHhlmef4GFh0j1hULBSwAHwcxp1o.ZhDB2WAdJnquIRiolF8btg \ No newline at end of file diff --git a/ci/chains/gaia/v7.0.1/ibc-0/keyring-test/validator.info b/ci/chains/gaia/v7.0.1/ibc-0/keyring-test/validator.info deleted file mode 100644 index d9eda6ac5a..0000000000 --- a/ci/chains/gaia/v7.0.1/ibc-0/keyring-test/validator.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMi0wNS0wNiAxMjowNDo1MC45OTQ4NDEgKzAyMDAgQ0VTVCBtPSswLjA2NTc1NTg5NyIsImVuYyI6IkEyNTZHQ00iLCJwMmMiOjgxOTIsInAycyI6ImZLZXdhNERPVVZLb3dvY0cifQ.HOh51oKn3a-uakDKp5AzitIiHc7s1USNaDsl6DoCAZVcnA65tupmGQ.bZEWLUq4BQxGWHfX.Eltt1QI7XgQ-3jz5z1jXY-PlYgKJQxET2eNwknpQpUewCjNNlLbgiIlU4EG-35XlqC4j9beUirAIC-zHuD_Knzk78N2w88CHOKnyJQp4RbaDjZIftHTCtAXm--eAH3sDYWnHm2HGvV-pPRYmsy2aP2KQtBi9npBTvzAMHKcpdqZBVwkii4KzKSiD3OkIgiukceVvp_sMCKDIv3FfoxhIeqakop5LcZCjYkUywV6ArF8uWtWGC4Huy6fR8MkEC7K2G5hSVt6RMKFBfegdKwmZ43Owm0KnUoIZglNloZr-NBcdxAQ39QIPbNS2UpjfhdSAqi5yQcTWGDwC-6myHqwYb1Thgh9Uv1S-hGU0QNW7str6rdjy.-N7Jpvj3HlD6YvRz7kljHg \ No newline at end of file diff --git a/ci/chains/gaia/v7.0.1/ibc-0/user2_seed.json b/ci/chains/gaia/v7.0.1/ibc-0/user2_seed.json deleted file mode 100644 index e0e81ee9bb..0000000000 --- a/ci/chains/gaia/v7.0.1/ibc-0/user2_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"user2","type":"local","address":"cosmos1zj5weejyt0phez2uml5d4qpc2q0xvlrs47wc6c","pubkey":"{\"@type\":\"/cosmos.crypto.secp256k1.PubKey\",\"key\":\"A7T4E+QhhEj1GkBDfiHEYcJZpptfnt9hSlSZMO1DBjky\"}","mnemonic":"amateur copy funny cabin thank aerobic despair actress exist sadness reopen science deny output extend huge motion slow blouse spoon salmon hollow spawn great"} diff --git a/ci/chains/gaia/v7.0.1/ibc-0/user_seed.json b/ci/chains/gaia/v7.0.1/ibc-0/user_seed.json deleted file mode 100644 index 4f732a1532..0000000000 --- a/ci/chains/gaia/v7.0.1/ibc-0/user_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"user","type":"local","address":"cosmos16njyvav3vw3pdwqp9r3hrn9s46gam669pd09g9","pubkey":"{\"@type\":\"/cosmos.crypto.secp256k1.PubKey\",\"key\":\"A/8Q8+2Evq0x+A4MmkkQI/eX1NwcmHmbRgAJTgEQSVU0\"}","mnemonic":"annual usage original ten orbit squirrel physical click hammer bomb inside bulb insane review creek record delay treat law evoke shoe cattle else certain"} diff --git a/ci/chains/gaia/v7.0.1/ibc-0/validator_seed.json b/ci/chains/gaia/v7.0.1/ibc-0/validator_seed.json deleted file mode 100644 index a2dadca940..0000000000 --- a/ci/chains/gaia/v7.0.1/ibc-0/validator_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"validator","type":"local","address":"cosmos1sphll46wcyjw46wsfpf5qgnsjhm826gguh4rcw","pubkey":"{\"@type\":\"/cosmos.crypto.secp256k1.PubKey\",\"key\":\"A9JDjNFV9GL3nqTdRRlWjOZXO4MDSqVkcFsN0Q8zsWuC\"}","mnemonic":"brown dress cage biology skin magnet wire spread cream rookie aspect october neutral sea core panic thrive sample grow tool isolate squirrel mesh actress"} diff --git a/ci/chains/gaia/v7.0.1/ibc-1/config/app.toml b/ci/chains/gaia/v7.0.1/ibc-1/config/app.toml deleted file mode 100644 index 7585d6e7ca..0000000000 --- a/ci/chains/gaia/v7.0.1/ibc-1/config/app.toml +++ /dev/null @@ -1,196 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -############################################################################### -### Base Configuration ### -############################################################################### - -# The minimum gas prices a validator is willing to accept for processing a -# transaction. A transaction's fees must meet the minimum of any denomination -# specified in this config (e.g. 0.25token1;0.0001token2). -minimum-gas-prices = "" - -# default: the last 100 states are kept in addition to every 500th state; pruning at 10 block intervals -# nothing: all historic states will be saved, nothing will be deleted (i.e. archiving node) -# everything: all saved states will be deleted, storing only the current and previous state; pruning at 10 block intervals -# custom: allow pruning options to be manually specified through 'pruning-keep-recent', 'pruning-keep-every', and 'pruning-interval' -pruning = "default" - -# These are applied if and only if the pruning strategy is custom. -pruning-keep-recent = "0" -pruning-keep-every = "0" -pruning-interval = "0" - -# HaltHeight contains a non-zero block height at which a node will gracefully -# halt and shutdown that can be used to assist upgrades and testing. -# -# Note: Commitment of state will be attempted on the corresponding block. -halt-height = 0 - -# HaltTime contains a non-zero minimum block time (in Unix seconds) at which -# a node will gracefully halt and shutdown that can be used to assist upgrades -# and testing. -# -# Note: Commitment of state will be attempted on the corresponding block. -halt-time = 0 - -# MinRetainBlocks defines the minimum block height offset from the current -# block being committed, such that all blocks past this offset are pruned -# from Tendermint. It is used as part of the process of determining the -# ResponseCommit.RetainHeight value during ABCI Commit. A value of 0 indicates -# that no blocks should be pruned. -# -# This configuration value is only responsible for pruning Tendermint blocks. -# It has no bearing on application state pruning which is determined by the -# "pruning-*" configurations. -# -# Note: Tendermint block pruning is dependant on this parameter in conunction -# with the unbonding (safety threshold) period, state pruning and state sync -# snapshot parameters to determine the correct minimum value of -# ResponseCommit.RetainHeight. -min-retain-blocks = 0 - -# InterBlockCache enables inter-block caching. -inter-block-cache = true - -# IndexEvents defines the set of events in the form {eventType}.{attributeKey}, -# which informs Tendermint what to index. If empty, all events will be indexed. -# -# Example: -# ["message.sender", "message.recipient"] -index-events = [] - -# IavlCacheSize set the size of the iavl tree cache. -# Default cache size is 50mb. -iavl-cache-size = 781250 - -############################################################################### -### Telemetry Configuration ### -############################################################################### - -[telemetry] - -# Prefixed with keys to separate services. -service-name = "" - -# Enabled enables the application telemetry functionality. When enabled, -# an in-memory sink is also enabled by default. Operators may also enabled -# other sinks such as Prometheus. -enabled = false - -# Enable prefixing gauge values with hostname. -enable-hostname = false - -# Enable adding hostname to labels. -enable-hostname-label = false - -# Enable adding service to labels. -enable-service-label = false - -# PrometheusRetentionTime, when positive, enables a Prometheus metrics sink. -prometheus-retention-time = 0 - -# GlobalLabels defines a global set of name/value label tuples applied to all -# metrics emitted using the wrapper functions defined in telemetry package. -# -# Example: -# [["chain_id", "cosmoshub-1"]] -global-labels = [ -] - -############################################################################### -### API Configuration ### -############################################################################### - -[api] - -# Enable defines if the API server should be enabled. -enable = false - -# Swagger defines if swagger documentation should automatically be registered. -swagger = false - -# Address defines the API server to listen on. -address = "tcp://0.0.0.0:1317" - -# MaxOpenConnections defines the number of maximum open connections. -max-open-connections = 1000 - -# RPCReadTimeout defines the Tendermint RPC read timeout (in seconds). -rpc-read-timeout = 10 - -# RPCWriteTimeout defines the Tendermint RPC write timeout (in seconds). -rpc-write-timeout = 0 - -# RPCMaxBodyBytes defines the Tendermint maximum response body (in bytes). -rpc-max-body-bytes = 1000000 - -# EnableUnsafeCORS defines if CORS should be enabled (unsafe - use it at your own risk). -enabled-unsafe-cors = false - -############################################################################### -### Rosetta Configuration ### -############################################################################### - -[rosetta] - -# Enable defines if the Rosetta API server should be enabled. -enable = false - -# Address defines the Rosetta API server to listen on. -address = ":8080" - -# Network defines the name of the blockchain that will be returned by Rosetta. -blockchain = "app" - -# Network defines the name of the network that will be returned by Rosetta. -network = "network" - -# Retries defines the number of retries when connecting to the node before failing. -retries = 3 - -# Offline defines if Rosetta server should run in offline mode. -offline = false - -############################################################################### -### gRPC Configuration ### -############################################################################### - -[grpc] - -# Enable defines if the gRPC server should be enabled. -enable = true - -# Address defines the gRPC server address to bind to. -address = "0.0.0.0:9090" - -############################################################################### -### gRPC Web Configuration ### -############################################################################### - -[grpc-web] - -# GRPCWebEnable defines if the gRPC-web should be enabled. -# NOTE: gRPC must also be enabled, otherwise, this configuration is a no-op. -enable = false - -# Address defines the gRPC-web server address to bind to. -address = "0.0.0.0:9091" - -# EnableUnsafeCORS defines if CORS should be enabled (unsafe - use it at your own risk). -enable-unsafe-cors = false - -############################################################################### -### State Sync Configuration ### -############################################################################### - -# State sync snapshots allow other nodes to rapidly join the network without replaying historical -# blocks, instead downloading and applying a snapshot of the application state at a given height. -[state-sync] - -# snapshot-interval specifies the block interval at which local state sync snapshots are -# taken (0 to disable). Must be a multiple of pruning-keep-every. -snapshot-interval = 1000 - -# snapshot-keep-recent specifies the number of recent snapshots to keep and serve (0 to keep all). -snapshot-keep-recent = 10 diff --git a/ci/chains/gaia/v7.0.1/ibc-1/config/client.toml b/ci/chains/gaia/v7.0.1/ibc-1/config/client.toml deleted file mode 100644 index 222695a3f8..0000000000 --- a/ci/chains/gaia/v7.0.1/ibc-1/config/client.toml +++ /dev/null @@ -1,17 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -############################################################################### -### Client Configuration ### -############################################################################### - -# The network chain ID -chain-id = "" -# The keyring's backend, where the keys are stored (os|file|kwallet|pass|test|memory) -keyring-backend = "os" -# CLI output format (text|json) -output = "text" -# : to Tendermint RPC interface for this chain -node = "tcp://localhost:26657" -# Transaction broadcasting mode (sync|async|block) -broadcast-mode = "sync" diff --git a/ci/chains/gaia/v7.0.1/ibc-1/config/config.toml b/ci/chains/gaia/v7.0.1/ibc-1/config/config.toml deleted file mode 100644 index b2c4196901..0000000000 --- a/ci/chains/gaia/v7.0.1/ibc-1/config/config.toml +++ /dev/null @@ -1,428 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -# NOTE: Any path below can be absolute (e.g. "/var/myawesomeapp/data") or -# relative to the home directory (e.g. "data"). The home directory is -# "$HOME/.tendermint" by default, but could be changed via $TMHOME env variable -# or --home cmd flag. - -####################################################################### -### Main Base Config Options ### -####################################################################### - -# TCP or UNIX socket address of the ABCI application, -# or the name of an ABCI application compiled in with the Tendermint binary -proxy_app = "tcp://127.0.0.1:26658" - -# A custom human readable name for this node -moniker = "ibc-1" - -# If this node is many blocks behind the tip of the chain, FastSync -# allows them to catchup quickly by downloading blocks in parallel -# and verifying their commits -fast_sync = true - -# Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb -# * goleveldb (github.com/syndtr/goleveldb - most popular implementation) -# - pure go -# - stable -# * cleveldb (uses levigo wrapper) -# - fast -# - requires gcc -# - use cleveldb build tag (go build -tags cleveldb) -# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt) -# - EXPERIMENTAL -# - may be faster is some use-cases (random reads - indexer) -# - use boltdb build tag (go build -tags boltdb) -# * rocksdb (uses github.com/tecbot/gorocksdb) -# - EXPERIMENTAL -# - requires gcc -# - use rocksdb build tag (go build -tags rocksdb) -# * badgerdb (uses github.com/dgraph-io/badger) -# - EXPERIMENTAL -# - use badgerdb build tag (go build -tags badgerdb) -db_backend = "goleveldb" - -# Database directory -db_dir = "data" - -# Output level for logging, including package level options -log_level = "info" - -# Output format: 'plain' (colored text) or 'json' -log_format = "plain" - -##### additional base config options ##### - -# Path to the JSON file containing the initial validator set and other meta data -genesis_file = "config/genesis.json" - -# Path to the JSON file containing the private key to use as a validator in the consensus protocol -priv_validator_key_file = "config/priv_validator_key.json" - -# Path to the JSON file containing the last sign state of a validator -priv_validator_state_file = "data/priv_validator_state.json" - -# TCP or UNIX socket address for Tendermint to listen on for -# connections from an external PrivValidator process -priv_validator_laddr = "" - -# Path to the JSON file containing the private key to use for node authentication in the p2p protocol -node_key_file = "config/node_key.json" - -# Mechanism to connect to the ABCI application: socket | grpc -abci = "socket" - -# If true, query the ABCI app on connecting to a new peer -# so the app can decide if we should keep the connection or not -filter_peers = false - - -####################################################################### -### Advanced Configuration Options ### -####################################################################### - -####################################################### -### RPC Server Configuration Options ### -####################################################### -[rpc] - -# TCP or UNIX socket address for the RPC server to listen on -laddr = "tcp://0.0.0.0:26657" - -# A list of origins a cross-domain request can be executed from -# Default value '[]' disables cors support -# Use '["*"]' to allow any origin -cors_allowed_origins = [] - -# A list of methods the client is allowed to use with cross-domain requests -cors_allowed_methods = ["HEAD", "GET", "POST", ] - -# A list of non simple headers the client is allowed to use with cross-domain requests -cors_allowed_headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ] - -# TCP or UNIX socket address for the gRPC server to listen on -# NOTE: This server only supports /broadcast_tx_commit -grpc_laddr = "" - -# Maximum number of simultaneous connections. -# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -grpc_max_open_connections = 900 - -# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool -unsafe = false - -# Maximum number of simultaneous connections (including WebSocket). -# Does not include gRPC connections. See grpc_max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -max_open_connections = 900 - -# Maximum number of unique clientIDs that can /subscribe -# If you're using /broadcast_tx_commit, set to the estimated maximum number -# of broadcast_tx_commit calls per block. -max_subscription_clients = 100 - -# Maximum number of unique queries a given client can /subscribe to -# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to -# the estimated # maximum number of broadcast_tx_commit calls per block. -max_subscriptions_per_client = 5 - -# Experimental parameter to specify the maximum number of events a node will -# buffer, per subscription, before returning an error and closing the -# subscription. Must be set to at least 100, but higher values will accommodate -# higher event throughput rates (and will use more memory). -experimental_subscription_buffer_size = 200 - -# Experimental parameter to specify the maximum number of RPC responses that -# can be buffered per WebSocket client. If clients cannot read from the -# WebSocket endpoint fast enough, they will be disconnected, so increasing this -# parameter may reduce the chances of them being disconnected (but will cause -# the node to use more memory). -# -# Must be at least the same as "experimental_subscription_buffer_size", -# otherwise connections could be dropped unnecessarily. This value should -# ideally be somewhat higher than "experimental_subscription_buffer_size" to -# accommodate non-subscription-related RPC responses. -experimental_websocket_write_buffer_size = 200 - -# If a WebSocket client cannot read fast enough, at present we may -# silently drop events instead of generating an error or disconnecting the -# client. -# -# Enabling this experimental parameter will cause the WebSocket connection to -# be closed instead if it cannot read fast enough, allowing for greater -# predictability in subscription behaviour. -experimental_close_on_slow_client = false - -# How long to wait for a tx to be committed during /broadcast_tx_commit. -# WARNING: Using a value larger than 10s will result in increasing the -# global HTTP write timeout, which applies to all connections and endpoints. -# See https://github.com/tendermint/tendermint/issues/3435 -timeout_broadcast_tx_commit = "10s" - -# Maximum size of request body, in bytes -max_body_bytes = 1000000 - -# Maximum size of request header, in bytes -max_header_bytes = 1048576 - -# The path to a file containing certificate that is used to create the HTTPS server. -# Might be either absolute path or path related to Tendermint's config directory. -# If the certificate is signed by a certificate authority, -# the certFile should be the concatenation of the server's certificate, any intermediates, -# and the CA's certificate. -# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. -# Otherwise, HTTP server is run. -tls_cert_file = "" - -# The path to a file containing matching private key that is used to create the HTTPS server. -# Might be either absolute path or path related to Tendermint's config directory. -# NOTE: both tls-cert-file and tls-key-file must be present for Tendermint to create HTTPS server. -# Otherwise, HTTP server is run. -tls_key_file = "" - -# pprof listen address (https://golang.org/pkg/net/http/pprof) -pprof_laddr = "localhost:6060" - -####################################################### -### P2P Configuration Options ### -####################################################### -[p2p] - -# Address to listen for incoming connections -laddr = "tcp://0.0.0.0:26656" - -# Address to advertise to peers for them to dial -# If empty, will use the same port as the laddr, -# and will introspect on the listener or use UPnP -# to figure out the address. ip and port are required -# example: 159.89.10.97:26656 -external_address = "" - -# Comma separated list of seed nodes to connect to -seeds = "" - -# Comma separated list of nodes to keep persistent connections to -persistent_peers = "" - -# UPNP port forwarding -upnp = false - -# Path to address book -addr_book_file = "config/addrbook.json" - -# Set true for strict address routability rules -# Set false for private or local networks -addr_book_strict = true - -# Maximum number of inbound peers -max_num_inbound_peers = 40 - -# Maximum number of outbound peers to connect to, excluding persistent peers -max_num_outbound_peers = 10 - -# List of node IDs, to which a connection will be (re)established ignoring any existing limits -unconditional_peer_ids = "" - -# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) -persistent_peers_max_dial_period = "0s" - -# Time to wait before flushing messages out on the connection -flush_throttle_timeout = "100ms" - -# Maximum size of a message packet payload, in bytes -max_packet_msg_payload_size = 1024 - -# Rate at which packets can be sent, in bytes/second -send_rate = 5120000 - -# Rate at which packets can be received, in bytes/second -recv_rate = 5120000 - -# Set true to enable the peer-exchange reactor -pex = true - -# Seed mode, in which node constantly crawls the network and looks for -# peers. If another node asks it for addresses, it responds and disconnects. -# -# Does not work if the peer-exchange reactor is disabled. -seed_mode = false - -# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) -private_peer_ids = "" - -# Toggle to disable guard against peers connecting from the same ip. -allow_duplicate_ip = false - -# Peer connection configuration. -handshake_timeout = "20s" -dial_timeout = "3s" - -####################################################### -### Mempool Configuration Option ### -####################################################### -[mempool] - -recheck = true -broadcast = true -wal_dir = "" - -# Maximum number of transactions in the mempool -size = 5000 - -# Limit the total size of all txs in the mempool. -# This only accounts for raw transactions (e.g. given 1MB transactions and -# max_txs_bytes=5MB, mempool will only accept 5 transactions). -max_txs_bytes = 1073741824 - -# Size of the cache (used to filter transactions we saw earlier) in transactions -cache_size = 10000 - -# Do not remove invalid transactions from the cache (default: false) -# Set to true if it's not possible for any invalid transaction to become valid -# again in the future. -keep-invalid-txs-in-cache = false - -# Maximum size of a single transaction. -# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes}. -max_tx_bytes = 1048576 - -# Maximum size of a batch of transactions to send to a peer -# Including space needed by encoding (one varint per transaction). -# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796 -max_batch_bytes = 0 - -####################################################### -### State Sync Configuration Options ### -####################################################### -[statesync] -# State sync rapidly bootstraps a new node by discovering, fetching, and restoring a state machine -# snapshot from peers instead of fetching and replaying historical blocks. Requires some peers in -# the network to take and serve state machine snapshots. State sync is not attempted if the node -# has any local state (LastBlockHeight > 0). The node will have a truncated block history, -# starting from the height of the snapshot. -enable = false - -# RPC servers (comma-separated) for light client verification of the synced state machine and -# retrieval of state data for node bootstrapping. Also needs a trusted height and corresponding -# header hash obtained from a trusted source, and a period during which validators can be trusted. -# -# For Cosmos SDK-based chains, trust_period should usually be about 2/3 of the unbonding time (~2 -# weeks) during which they can be financially punished (slashed) for misbehavior. -rpc_servers = "" -trust_height = 0 -trust_hash = "" -trust_period = "168h0m0s" - -# Time to spend discovering snapshots before initiating a restore. -discovery_time = "15s" - -# Temporary directory for state sync snapshot chunks, defaults to the OS tempdir (typically /tmp). -# Will create a new, randomly named directory within, and remove it when done. -temp_dir = "" - -# The timeout duration before re-requesting a chunk, possibly from a different -# peer (default: 1 minute). -chunk_request_timeout = "10s" - -# The number of concurrent chunk fetchers to run (default: 1). -chunk_fetchers = "4" - -####################################################### -### Fast Sync Configuration Connections ### -####################################################### -[fastsync] - -# Fast Sync version to use: -# 1) "v0" (default) - the legacy fast sync implementation -# 2) "v1" - refactor of v0 version for better testability -# 2) "v2" - complete redesign of v0, optimized for testability & readability -version = "v0" - -####################################################### -### Consensus Configuration Options ### -####################################################### -[consensus] - -wal_file = "data/cs.wal/wal" - -# How long we wait for a proposal block before prevoting nil -timeout_propose = "1s" -# How much timeout_propose increases with each round -timeout_propose_delta = "500ms" -# How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil) -timeout_prevote = "1s" -# How much the timeout_prevote increases with each round -timeout_prevote_delta = "500ms" -# How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil) -timeout_precommit = "1s" -# How much the timeout_precommit increases with each round -timeout_precommit_delta = "500ms" -# How long we wait after committing a block, before starting on the new -# height (this gives us a chance to receive some more precommits, even -# though we already have +2/3). -timeout_commit = "1s" - -# How many blocks to look back to check existence of the node's consensus votes before joining consensus -# When non-zero, the node will panic upon restart -# if the same consensus key was used to sign {double_sign_check_height} last blocks. -# So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic. -double_sign_check_height = 0 - -# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) -skip_timeout_commit = false - -# EmptyBlocks mode and possible interval between empty blocks -create_empty_blocks = true -create_empty_blocks_interval = "0s" - -# Reactor sleep duration parameters -peer_gossip_sleep_duration = "100ms" -peer_query_maj23_sleep_duration = "2s" - -####################################################### -### Transaction Indexer Configuration Options ### -####################################################### -[tx_index] - -# What indexer to use for transactions -# -# The application will set which txs to index. In some cases a node operator will be able -# to decide which txs to index based on configuration set in the application. -# -# Options: -# 1) "null" -# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). -# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed. -indexer = "kv" - -####################################################### -### Instrumentation Configuration Options ### -####################################################### -[instrumentation] - -# When true, Prometheus metrics are served under /metrics on -# PrometheusListenAddr. -# Check out the documentation for the list of available metrics. -prometheus = false - -# Address to listen for Prometheus collector(s) connections -prometheus_listen_addr = ":26660" - -# Maximum number of simultaneous connections. -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -max_open_connections = 3 - -# Instrumentation namespace -namespace = "tendermint" diff --git a/ci/chains/gaia/v7.0.1/ibc-1/config/genesis.json b/ci/chains/gaia/v7.0.1/ibc-1/config/genesis.json deleted file mode 100644 index 1055a5a68d..0000000000 --- a/ci/chains/gaia/v7.0.1/ibc-1/config/genesis.json +++ /dev/null @@ -1,375 +0,0 @@ -{ - "genesis_time": "2022-05-06T10:05:03.049703Z", - "chain_id": "ibc-1", - "initial_height": "1", - "consensus_params": { - "block": { - "max_bytes": "22020096", - "max_gas": "-1", - "time_iota_ms": "1000" - }, - "evidence": { - "max_age_num_blocks": "100000", - "max_age_duration": "172800000000000", - "max_bytes": "1048576" - }, - "validator": { - "pub_key_types": [ - "ed25519" - ] - }, - "version": {} - }, - "app_hash": "", - "app_state": { - "auth": { - "params": { - "max_memo_characters": "256", - "tx_sig_limit": "7", - "tx_size_cost_per_byte": "10", - "sig_verify_cost_ed25519": "590", - "sig_verify_cost_secp256k1": "1000" - }, - "accounts": [ - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos16gawv9t4xf92lt98qjjzgp6wq8ed55gf49wqef", - "pub_key": null, - "account_number": "0", - "sequence": "0" - }, - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos1m3x6hk7nkds6yquvldpsvl35n2q9cmq0fumhh6", - "pub_key": null, - "account_number": "0", - "sequence": "0" - }, - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos12gs7gsx968sgy8fwnzf37veqj02rd6rm96u087", - "pub_key": null, - "account_number": "0", - "sequence": "0" - } - ] - }, - "authz": { - "authorization": [] - }, - "bank": { - "params": { - "send_enabled": [], - "default_send_enabled": true - }, - "balances": [ - { - "address": "cosmos12gs7gsx968sgy8fwnzf37veqj02rd6rm96u087", - "coins": [ - { - "denom": "stake", - "amount": "100000000000" - } - ] - }, - { - "address": "cosmos16gawv9t4xf92lt98qjjzgp6wq8ed55gf49wqef", - "coins": [ - { - "denom": "samoleans", - "amount": "100000000000" - }, - { - "denom": "stake", - "amount": "100000000000" - } - ] - }, - { - "address": "cosmos1m3x6hk7nkds6yquvldpsvl35n2q9cmq0fumhh6", - "coins": [ - { - "denom": "samoleans", - "amount": "100000000000" - }, - { - "denom": "stake", - "amount": "100000000000" - } - ] - } - ], - "supply": [ - { - "denom": "samoleans", - "amount": "200000000000" - }, - { - "denom": "stake", - "amount": "300000000000" - } - ], - "denom_metadata": [] - }, - "capability": { - "index": "1", - "owners": [] - }, - "crisis": { - "constant_fee": { - "denom": "stake", - "amount": "1000" - } - }, - "distribution": { - "params": { - "community_tax": "0.020000000000000000", - "base_proposer_reward": "0.010000000000000000", - "bonus_proposer_reward": "0.040000000000000000", - "withdraw_addr_enabled": true - }, - "fee_pool": { - "community_pool": [] - }, - "delegator_withdraw_infos": [], - "previous_proposer": "", - "outstanding_rewards": [], - "validator_accumulated_commissions": [], - "validator_historical_rewards": [], - "validator_current_rewards": [], - "delegator_starting_infos": [], - "validator_slash_events": [] - }, - "evidence": { - "evidence": [] - }, - "feegrant": { - "allowances": [] - }, - "genutil": { - "gen_txs": [ - { - "body": { - "messages": [ - { - "@type": "/cosmos.staking.v1beta1.MsgCreateValidator", - "description": { - "moniker": "ibc-1", - "identity": "", - "website": "", - "security_contact": "", - "details": "" - }, - "commission": { - "rate": "0.100000000000000000", - "max_rate": "0.200000000000000000", - "max_change_rate": "0.010000000000000000" - }, - "min_self_delegation": "1", - "delegator_address": "cosmos12gs7gsx968sgy8fwnzf37veqj02rd6rm96u087", - "validator_address": "cosmosvaloper12gs7gsx968sgy8fwnzf37veqj02rd6rmqwg6td", - "pubkey": { - "@type": "/cosmos.crypto.ed25519.PubKey", - "key": "RMhhlBtQSs1rDz3bVzuJarNZHNRjukKEF7r9qqCgARQ=" - }, - "value": { - "denom": "stake", - "amount": "100000000000" - } - } - ], - "memo": "56790e64fa1c26c79a5c03ae6432973f04e293cd@192.168.1.80:26656", - "timeout_height": "0", - "extension_options": [], - "non_critical_extension_options": [] - }, - "auth_info": { - "signer_infos": [ - { - "public_key": { - "@type": "/cosmos.crypto.secp256k1.PubKey", - "key": "AxsUINyw8wVnTzhmDJ3v6GBUPT7xQ+7yaf7PCm3Y2ic/" - }, - "mode_info": { - "single": { - "mode": "SIGN_MODE_DIRECT" - } - }, - "sequence": "0" - } - ], - "fee": { - "amount": [], - "gas_limit": "200000", - "payer": "", - "granter": "" - } - }, - "signatures": [ - "2MAVzXWc/buNQlM+tEM0Xxtkfv8FgF34yk0fw1B1Prt17G7AoB7cPX33GwhlvVknNtSNCg8VsTDphYocXqCDvQ==" - ] - } - ] - }, - "gov": { - "starting_proposal_id": "1", - "deposits": [], - "votes": [], - "proposals": [], - "deposit_params": { - "min_deposit": [ - { - "denom": "stake", - "amount": "10000000" - } - ], - "max_deposit_period": "200s" - }, - "voting_params": { - "voting_period": "200s" - }, - "tally_params": { - "quorum": "0.334000000000000000", - "threshold": "0.500000000000000000", - "veto_threshold": "0.334000000000000000" - } - }, - "ibc": { - "client_genesis": { - "clients": [], - "clients_consensus": [], - "clients_metadata": [], - "params": { - "allowed_clients": [ - "06-solomachine", - "07-tendermint" - ] - }, - "create_localhost": false, - "next_client_sequence": "0" - }, - "connection_genesis": { - "connections": [], - "client_connection_paths": [], - "next_connection_sequence": "0", - "params": { - "max_expected_time_per_block": "30000000000" - } - }, - "channel_genesis": { - "channels": [], - "acknowledgements": [], - "commitments": [], - "receipts": [], - "send_sequences": [], - "recv_sequences": [], - "ack_sequences": [], - "next_channel_sequence": "0" - } - }, - "interchainaccounts": { - "controller_genesis_state": { - "active_channels": [], - "interchain_accounts": [], - "ports": [], - "params": { - "controller_enabled": true - } - }, - "host_genesis_state": { - "active_channels": [], - "interchain_accounts": [], - "port": "icahost", - "params": { - "host_enabled": true, - "allow_messages": [] - } - } - }, - "liquidity": { - "params": { - "pool_types": [ - { - "id": 1, - "name": "StandardLiquidityPool", - "min_reserve_coin_num": 2, - "max_reserve_coin_num": 2, - "description": "Standard liquidity pool with pool price function X/Y, ESPM constraint, and two kinds of reserve coins" - } - ], - "min_init_deposit_amount": "1000000", - "init_pool_coin_mint_amount": "1000000", - "max_reserve_coin_amount": "0", - "pool_creation_fee": [ - { - "denom": "stake", - "amount": "40000000" - } - ], - "swap_fee_rate": "0.003000000000000000", - "withdraw_fee_rate": "0.000000000000000000", - "max_order_amount_ratio": "0.100000000000000000", - "unit_batch_height": 1, - "circuit_breaker_enabled": false - }, - "pool_records": [] - }, - "mint": { - "minter": { - "inflation": "0.130000000000000000", - "annual_provisions": "0.000000000000000000" - }, - "params": { - "mint_denom": "stake", - "inflation_rate_change": "0.130000000000000000", - "inflation_max": "0.200000000000000000", - "inflation_min": "0.070000000000000000", - "goal_bonded": "0.670000000000000000", - "blocks_per_year": "6311520" - } - }, - "packetfowardmiddleware": { - "params": { - "fee_percentage": "0.000000000000000000" - } - }, - "params": null, - "slashing": { - "params": { - "signed_blocks_window": "100", - "min_signed_per_window": "0.500000000000000000", - "downtime_jail_duration": "600s", - "slash_fraction_double_sign": "0.050000000000000000", - "slash_fraction_downtime": "0.010000000000000000" - }, - "signing_infos": [], - "missed_blocks": [] - }, - "staking": { - "params": { - "unbonding_time": "1814400s", - "max_validators": 100, - "max_entries": 7, - "historical_entries": 10000, - "bond_denom": "stake" - }, - "last_total_power": "0", - "last_validator_powers": [], - "validators": [], - "delegations": [], - "unbonding_delegations": [], - "redelegations": [], - "exported": false - }, - "transfer": { - "port_id": "transfer", - "denom_traces": [], - "params": { - "send_enabled": true, - "receive_enabled": true - } - }, - "upgrade": {}, - "vesting": {} - } -} \ No newline at end of file diff --git a/ci/chains/gaia/v7.0.1/ibc-1/config/gentx/gentx-56790e64fa1c26c79a5c03ae6432973f04e293cd.json b/ci/chains/gaia/v7.0.1/ibc-1/config/gentx/gentx-56790e64fa1c26c79a5c03ae6432973f04e293cd.json deleted file mode 100644 index 61d9fd7d25..0000000000 --- a/ci/chains/gaia/v7.0.1/ibc-1/config/gentx/gentx-56790e64fa1c26c79a5c03ae6432973f04e293cd.json +++ /dev/null @@ -1 +0,0 @@ -{"body":{"messages":[{"@type":"/cosmos.staking.v1beta1.MsgCreateValidator","description":{"moniker":"ibc-1","identity":"","website":"","security_contact":"","details":""},"commission":{"rate":"0.100000000000000000","max_rate":"0.200000000000000000","max_change_rate":"0.010000000000000000"},"min_self_delegation":"1","delegator_address":"cosmos12gs7gsx968sgy8fwnzf37veqj02rd6rm96u087","validator_address":"cosmosvaloper12gs7gsx968sgy8fwnzf37veqj02rd6rmqwg6td","pubkey":{"@type":"/cosmos.crypto.ed25519.PubKey","key":"RMhhlBtQSs1rDz3bVzuJarNZHNRjukKEF7r9qqCgARQ="},"value":{"denom":"stake","amount":"100000000000"}}],"memo":"56790e64fa1c26c79a5c03ae6432973f04e293cd@192.168.1.80:26656","timeout_height":"0","extension_options":[],"non_critical_extension_options":[]},"auth_info":{"signer_infos":[{"public_key":{"@type":"/cosmos.crypto.secp256k1.PubKey","key":"AxsUINyw8wVnTzhmDJ3v6GBUPT7xQ+7yaf7PCm3Y2ic/"},"mode_info":{"single":{"mode":"SIGN_MODE_DIRECT"}},"sequence":"0"}],"fee":{"amount":[],"gas_limit":"200000","payer":"","granter":""}},"signatures":["2MAVzXWc/buNQlM+tEM0Xxtkfv8FgF34yk0fw1B1Prt17G7AoB7cPX33GwhlvVknNtSNCg8VsTDphYocXqCDvQ=="]} diff --git a/ci/chains/gaia/v7.0.1/ibc-1/config/node_key.json b/ci/chains/gaia/v7.0.1/ibc-1/config/node_key.json deleted file mode 100644 index 3daaff2931..0000000000 --- a/ci/chains/gaia/v7.0.1/ibc-1/config/node_key.json +++ /dev/null @@ -1 +0,0 @@ -{"priv_key":{"type":"tendermint/PrivKeyEd25519","value":"c6g3gMTe4rl8L4rYw/LnIaCnYpNrXa+gG18Ob8ozBbWMh5p4zIqFZJGL2G1gkEGphTagNwScUOlI4a4DnurdEg=="}} \ No newline at end of file diff --git a/ci/chains/gaia/v7.0.1/ibc-1/config/priv_validator_key.json b/ci/chains/gaia/v7.0.1/ibc-1/config/priv_validator_key.json deleted file mode 100644 index 91b7267961..0000000000 --- a/ci/chains/gaia/v7.0.1/ibc-1/config/priv_validator_key.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "address": "39D793D7F075E3BFD3142828B88DF5B13A194B5A", - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "RMhhlBtQSs1rDz3bVzuJarNZHNRjukKEF7r9qqCgARQ=" - }, - "priv_key": { - "type": "tendermint/PrivKeyEd25519", - "value": "g4brql1ymthMphMQd9txX8o9FmV6cccCl7B+oRTDaUREyGGUG1BKzWsPPdtXO4lqs1kc1GO6QoQXuv2qoKABFA==" - } -} \ No newline at end of file diff --git a/ci/chains/gaia/v7.0.1/ibc-1/keyring-test/5221e440c5d1e0821d2e98931f332093d436e87b.address b/ci/chains/gaia/v7.0.1/ibc-1/keyring-test/5221e440c5d1e0821d2e98931f332093d436e87b.address deleted file mode 100644 index 6b44695ff3..0000000000 --- a/ci/chains/gaia/v7.0.1/ibc-1/keyring-test/5221e440c5d1e0821d2e98931f332093d436e87b.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMi0wNS0wNiAxMjowNTowNC4xNTMyNTcgKzAyMDAgQ0VTVCBtPSswLjA2OTEyODQ2OCIsImVuYyI6IkEyNTZHQ00iLCJwMmMiOjgxOTIsInAycyI6IlNBNHEydGtDVXB4X0Y3X08ifQ.adYJ-_sAjRtuxIyeySQTySat5Zp1ZEbrVVEqfA4pse0qwwIEtHeL6g.MQDEaLhHPF4jR5QT.Cca1U5asQ8JLJ2qgeZr5I2BZd5naAgc2DEWrniEJTDvoS-uFndSrlR-edtZJLfOEDZQ34oeGLKMEvy6AqZAUYojA2Zqu85Xl87wq9IOaiZ7JRMjTDPAx9AMS9ZYDal1p53DQC40Z9enfqWO36KGvNjpWRlzh3pHVwopZWKJp4clyqXDvC1tVALQ76RxUpVF9_iHcsULI2gscNovc9HqJhC3PFNISVx_eDMahK713SffzD8XDWE7-fcpv.PepmuUZxDNqe2HNvr1E0Fw \ No newline at end of file diff --git a/ci/chains/gaia/v7.0.1/ibc-1/keyring-test/d23ae61575324aafaca704a424074e01f2da5109.address b/ci/chains/gaia/v7.0.1/ibc-1/keyring-test/d23ae61575324aafaca704a424074e01f2da5109.address deleted file mode 100644 index 1a727853d5..0000000000 --- a/ci/chains/gaia/v7.0.1/ibc-1/keyring-test/d23ae61575324aafaca704a424074e01f2da5109.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMi0wNS0wNiAxMjowNTowNS4yODkyNDggKzAyMDAgQ0VTVCBtPSswLjA1MTg5NjMyNyIsImVuYyI6IkEyNTZHQ00iLCJwMmMiOjgxOTIsInAycyI6InE1UUJSSkpDQ3pDZmEtUmYifQ.rrONBnY5Aj8aT7dz0UXOGzyUQ66I1Ac2zP5PjKleSowQedk0FcdMuQ.gxMk313lDafB83M0.3iiecezUwejqiJxgO7ck6NMebrVHdmTiXhcM2So82p7Ln1z75l_oj-KpproRYYrMxr-dgxQHtld3-VgdPJpzvuKr3rdrIDi_zhMHHRzZxa1Wj8L3z3-r-npYAY1sivaRpgP8Q34_qZAjABTvE36DFbIUecORBhr-QohA4hggFszvBSHvUnzfly03QLJlTvWwtbXHDqtPISBVfoOxtzM0NZ1EYH211KZY83QYO4f6xCnZZA.vGjEm8-w40mu6EW_ii6D9g \ No newline at end of file diff --git a/ci/chains/gaia/v7.0.1/ibc-1/keyring-test/dc4dabdbd3b361a2038cfb43067e349a805c6c0f.address b/ci/chains/gaia/v7.0.1/ibc-1/keyring-test/dc4dabdbd3b361a2038cfb43067e349a805c6c0f.address deleted file mode 100644 index 07d547336e..0000000000 --- a/ci/chains/gaia/v7.0.1/ibc-1/keyring-test/dc4dabdbd3b361a2038cfb43067e349a805c6c0f.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMi0wNS0wNiAxMjowNTowNi40MjExNSArMDIwMCBDRVNUIG09KzAuMDUwOTg1MjA0IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiZXMwemowdHJBekFIN3pheiJ9.vPUBN-P-3YbENnEEZ4oZpiQXKIdE9FuTmqy7FocumRXR47lTR2l_9g.TMUNiFS4n7qvTgYn.7eIywlD-vrfJrkahp9Kfz9xn8ShKKRAH8L-KYJRmNrfNfN0fVQC7tiEuo4tjQkYw20Umsxx_9wuEVi3xarjm4aERw-StCqp5VbOE0QQtI6Sr0Pemfpx4yVj5OvVZIpiw3ho4cDWH_kWCksnsjZMqOga8zkn4E07ZdpGiBWsdBowvK9E6mAyX2yqLLsPSvaTq2wP54eZbTfRyd5u8L3qSNzKb9fm1CvJ4O1E7vdBJD-sa_AmXf60.rQ1AfXNGhQunz5Q7LQWtpQ \ No newline at end of file diff --git a/ci/chains/gaia/v7.0.1/ibc-1/keyring-test/user.info b/ci/chains/gaia/v7.0.1/ibc-1/keyring-test/user.info deleted file mode 100644 index 4665ed2142..0000000000 --- a/ci/chains/gaia/v7.0.1/ibc-1/keyring-test/user.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMi0wNS0wNiAxMjowNTowNS4yNzU5MTcgKzAyMDAgQ0VTVCBtPSswLjAzODU2NTUxMiIsImVuYyI6IkEyNTZHQ00iLCJwMmMiOjgxOTIsInAycyI6Iks5UlMxNlVXSUF5ZUxCbl8ifQ.YCQkyMtkR3SbDmhnJDA0MZi96fsLjtOz3DF1xHxglUQVbY9cMA9CdA.n4NJ4cDazjGjE80c._rusIrVHv3Ln2e7XamD9pzoC1nS1q0sOw2iAZZjA847Bp9ZglIUg32vMR4F60pQaN1xkYlOnyUDcCn9mD4H0GZCg079tyHyMIye-mhmISHnj1b6p01vzlh7C3c5vTzCVlAwZgtb3PcwNO9v-tD6DVE8NRzSnVsQeTdWrFqhwSUZuIFO5Xq-pdBhGvgm1yfwyEk7XEeUZREiQ3s8z14fapmXjHUnyvesCEBkBCfTEIjsimaltcDMkrXfF3gYxyZmWfWnFPFfj6OiPxoTRlHwejZt4sTIrrT6meWYyUndp8aAVVM2YhZU0sB1brxn-RbuZx_AXDP6bZYVOD6yPht2OjQqLtXakumk.j-3-OZISZNQ64Q4wQYYVIA \ No newline at end of file diff --git a/ci/chains/gaia/v7.0.1/ibc-1/keyring-test/user2.info b/ci/chains/gaia/v7.0.1/ibc-1/keyring-test/user2.info deleted file mode 100644 index f3cb21a755..0000000000 --- a/ci/chains/gaia/v7.0.1/ibc-1/keyring-test/user2.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMi0wNS0wNiAxMjowNTowNi40MTAzMSArMDIwMCBDRVNUIG09KzAuMDQwMTQ1NDUzIiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiT3VHWU5ZcHlwOENoc2RRYSJ9.J_QPFA8vaBIXZTMcvPIEzSWQriqJCukc5Cw8nXx9r2-AqilX--tvfw.kRbNuGXPwl_5LYWu.q-WqlXg_1ohgqyILJraRhcu4Zv1T06t4oYCi0WxzF_ukhCZ9LkDcpi9NGFuDwxjlmXXXrFsJFoz2naY0ppHPQg74iS7DKw8hav1rv6JW1z_kPKTKVzvej4JdWmoFFe8mJXPyDp3-8PNcyX0gndyLV20MQ1LSIjjaUMeDaYXlitLU5uAAxHHARnFf-uBIkq94-Ud_tLFWbnbgvs8bp0S5M86apksT20UqcqiqgdYQKOduwajeunm76f_PPuJGetkLfkmfceLeTjATkYe7vnDFLWNKb7vg2bCeCTH1sbaMHEgKtvkD-qzuSf0w7QAq1Vhd0oXj2i5AebPsmoDN7VmZGu_IRVfYK1vr.fdQwhvoluadIf1mIi8qYBQ \ No newline at end of file diff --git a/ci/chains/gaia/v7.0.1/ibc-1/keyring-test/validator.info b/ci/chains/gaia/v7.0.1/ibc-1/keyring-test/validator.info deleted file mode 100644 index b15476ee06..0000000000 --- a/ci/chains/gaia/v7.0.1/ibc-1/keyring-test/validator.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMi0wNS0wNiAxMjowNTowNC4xMzUzNDcgKzAyMDAgQ0VTVCBtPSswLjA1MTIxODA5OCIsImVuYyI6IkEyNTZHQ00iLCJwMmMiOjgxOTIsInAycyI6Ik1kcGhWd2RrRl9PMEw0SXUifQ.aWwkNEtb7SToBnDyxHdNx3Aj8L2RyOSbpGTPB2ZkEryPJGFJOz-hDA.CL-kA95E4_Sv12Gx.jMgUN0U13X85kSWHvUnS5wnPTty9ZMm-8Kos3dUxMBB1XTZFwPiFbehGjUgcKDtHaH3a-FYqtzeFvNTmL93hU7O2Gg_35WLbAGXGVlxMFe_t0gHUYc7nyn7lP6sElCgY-WBruyrLFFPe7TA5IISrHQGjXaOwUTK_DTDklDus75zvTtgusGEL3_J9Y3zRiFG3oE1cp8nN7yTnEHEt2ddOh46WmWpfbWrQDOvozaRkTr67iN2xsJ2dwhzJkeVUL1OVhlVfAfS6AmCm6OJjwf6k38CyR8s6FD6eQTd3kHfi_D4rD-vsca4cHc1poRfzT7qJqASv7CysA9W8DvJv4FgffhMu0rgzE3ihx880Kl01ol8w25ev.s6QGa1w6n9013h0w6e_UGg \ No newline at end of file diff --git a/ci/chains/gaia/v7.0.1/ibc-1/user2_seed.json b/ci/chains/gaia/v7.0.1/ibc-1/user2_seed.json deleted file mode 100644 index 17f84353e5..0000000000 --- a/ci/chains/gaia/v7.0.1/ibc-1/user2_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"user2","type":"local","address":"cosmos1m3x6hk7nkds6yquvldpsvl35n2q9cmq0fumhh6","pubkey":"{\"@type\":\"/cosmos.crypto.secp256k1.PubKey\",\"key\":\"A/ufAZXN5V+SzW5b8o1bRW6EHofOmvW4h2lb49uG1k9o\"}","mnemonic":"tube castle all pistol animal liar news reform imitate tennis cup onion garbage glide circle fun benefit dutch worry title garbage record charge version"} diff --git a/ci/chains/gaia/v7.0.1/ibc-1/user_seed.json b/ci/chains/gaia/v7.0.1/ibc-1/user_seed.json deleted file mode 100644 index b478d34db4..0000000000 --- a/ci/chains/gaia/v7.0.1/ibc-1/user_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"user","type":"local","address":"cosmos16gawv9t4xf92lt98qjjzgp6wq8ed55gf49wqef","pubkey":"{\"@type\":\"/cosmos.crypto.secp256k1.PubKey\",\"key\":\"AsU2Pk5VDeJ+qmzGZqTDOyl/gZPQxRk2EuA1O6WFhXmD\"}","mnemonic":"gold eight fury outdoor sword catch unique eagle monster april worry layer dismiss cheese balcony bounce tribe simple tank cave radar fitness reward hundred"} diff --git a/ci/chains/gaia/v7.0.1/ibc-1/validator_seed.json b/ci/chains/gaia/v7.0.1/ibc-1/validator_seed.json deleted file mode 100644 index 1327e403f8..0000000000 --- a/ci/chains/gaia/v7.0.1/ibc-1/validator_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"validator","type":"local","address":"cosmos12gs7gsx968sgy8fwnzf37veqj02rd6rm96u087","pubkey":"{\"@type\":\"/cosmos.crypto.secp256k1.PubKey\",\"key\":\"AxsUINyw8wVnTzhmDJ3v6GBUPT7xQ+7yaf7PCm3Y2ic/\"}","mnemonic":"expire island increase truck party border elite exit effort layer spatial scout direct grace dream dry change hill impose nation smoke snow either pioneer"} diff --git a/ci/docker-compose-gaia-current.yml b/ci/docker-compose-gaia-current.yml deleted file mode 100644 index d162c6fa3c..0000000000 --- a/ci/docker-compose-gaia-current.yml +++ /dev/null @@ -1,61 +0,0 @@ -version: '3' - -services: - - ibc-0: - container_name: ibc-0 - image: "informaldev/ibc-0:v7.0.1" - stdin_open: true - tty: true - entrypoint: "/chain/gaia/run-gaiad.sh" - networks: - relaynet: - ipv4_address: 172.25.0.10 - environment: - - CHAIN_ID=ibc-0 - - ibc-1: - container_name: ibc-1 - image: "informaldev/ibc-1:v7.0.1" - stdin_open: true - tty: true - entrypoint: "/chain/gaia/run-gaiad.sh" - networks: - relaynet: - ipv4_address: 172.25.0.11 - environment: - - CHAIN_ID=ibc-1 - - relayer: - depends_on: - - ibc-0 - - ibc-1 - container_name: relayer - stdin_open: true - tty: true - build: - context: ../ - dockerfile: ci/relayer.Dockerfile - args: - RELEASE: v7.0.1 - environment: - - CHAIN_A=ibc-0 - - CHAIN_A_HOME=/data/ibc-0 - - CHAIN_A_PORT=26657 - - CHAIN_B=ibc-1 - - CHAIN_B_HOME=/data/ibc-1 - - CHAIN_B_PORT=26657 - - CONFIG=simple_config.toml - - RELAYER_DIR=/relayer - - RELEASE=v7.0.1 - networks: - relaynet: - ipv4_address: 172.25.0.12 - -networks: - relaynet: - driver: bridge - ipam: - driver: default - config: - - subnet: 172.25.0.0/16 diff --git a/ci/docker-compose-gaia-legacy.yml b/ci/docker-compose-gaia-legacy.yml deleted file mode 100644 index c9743f7e24..0000000000 --- a/ci/docker-compose-gaia-legacy.yml +++ /dev/null @@ -1,61 +0,0 @@ -version: '3' - -services: - - ibc-0: - container_name: ibc-0 - image: "informaldev/ibc-0:v6.0.0" - stdin_open: true - tty: true - entrypoint: "/chain/gaia/run-gaiad.sh" - networks: - relaynet: - ipv4_address: 172.25.0.10 - environment: - - CHAIN_ID=ibc-0 - - ibc-1: - container_name: ibc-1 - image: "informaldev/ibc-1:v6.0.0" - stdin_open: true - tty: true - entrypoint: "/chain/gaia/run-gaiad.sh" - networks: - relaynet: - ipv4_address: 172.25.0.11 - environment: - - CHAIN_ID=ibc-1 - - relayer: - depends_on: - - ibc-0 - - ibc-1 - container_name: relayer - stdin_open: true - tty: true - build: - context: ../ - dockerfile: ci/relayer.Dockerfile - args: - RELEASE: v6.0.0 - environment: - - CHAIN_A=ibc-0 - - CHAIN_A_HOME=/data/ibc-0 - - CHAIN_A_PORT=26657 - - CHAIN_B=ibc-1 - - CHAIN_B_HOME=/data/ibc-1 - - CHAIN_B_PORT=26657 - - CONFIG=simple_config.toml - - RELAYER_DIR=/relayer - - RELEASE=v6.0.0 - networks: - relaynet: - ipv4_address: 172.25.0.12 - -networks: - relaynet: - driver: bridge - ipam: - driver: default - config: - - subnet: 172.25.0.0/16 diff --git a/ci/e2e.sh b/ci/e2e.sh deleted file mode 100755 index cd8b26a387..0000000000 --- a/ci/e2e.sh +++ /dev/null @@ -1,47 +0,0 @@ -#!/bin/sh - -set -e - -RELAYER_CMD=/usr/bin/hermes - -echo "=================================================================================================================" -echo " INITIALIZE " -echo "=================================================================================================================" -echo "-----------------------------------------------------------------------------------------------------------------" -echo "Show config path" -echo "-----------------------------------------------------------------------------------------------------------------" -# Configuration file -CONFIG_PATH="$RELAYER_DIR"/"$CONFIG" -echo "-----------------------------------------------------------------------------------------------------------------" -echo "Show relayer version" -echo "-----------------------------------------------------------------------------------------------------------------" -echo Config: "$CONFIG_PATH" -$RELAYER_CMD -c "$CONFIG_PATH" version -echo "-----------------------------------------------------------------------------------------------------------------" -echo "Setting up chains" -echo "-----------------------------------------------------------------------------------------------------------------" -echo " Chain:" "$CHAIN_A" -echo " creating chain store folder: "["$CHAIN_A_HOME"] -mkdir -p "$CHAIN_A_HOME" -echo " Chain:" "$CHAIN_B" ["$CHAIN_B_HOME"] -echo " creating chain store folder: "["$CHAIN_B_HOME"] -mkdir -p "$CHAIN_B_HOME" -echo Waiting 20 seconds for chains to generate blocks... -sleep 20 -echo "=================================================================================================================" -echo " CONFIGURATION " -echo "=================================================================================================================" -echo "-----------------------------------------------------------------------------------------------------------------" -echo "Add keys for chains" -echo "-----------------------------------------------------------------------------------------------------------------" -hermes -c "$CONFIG_PATH" keys add "$CHAIN_A" -f user_seed_"$CHAIN_A".json -hermes -c "$CONFIG_PATH" keys add "$CHAIN_B" -f user_seed_"$CHAIN_B".json -hermes -c "$CONFIG_PATH" keys add "$CHAIN_A" -f user2_seed_"$CHAIN_A".json -k user2 -hermes -c "$CONFIG_PATH" keys add "$CHAIN_B" -f user2_seed_"$CHAIN_B".json -k user2 - -echo "=================================================================================================================" -echo " END-TO-END TESTS " -echo "=================================================================================================================" - -python3 /relayer/e2e/run.py -c "$CONFIG_PATH" --cmd "$RELAYER_CMD" - diff --git a/ci/gaia.Dockerfile b/ci/gaia.Dockerfile deleted file mode 100644 index fce8641dc8..0000000000 --- a/ci/gaia.Dockerfile +++ /dev/null @@ -1,54 +0,0 @@ -################################################################################################### -# Build image -################################################################################################### -FROM golang:alpine AS build-env - -# Add dependencies -RUN apk add --no-cache curl make git libc-dev bash gcc linux-headers eudev-dev python3 - -# Gaia Branch or Release -ARG RELEASE - -# Clone repository -RUN git clone https://github.com/cosmos/gaia /go/src/github.com/cosmos/gaia - -# Set working directory for the build -WORKDIR /go/src/github.com/cosmos/gaia - -# Checkout branch -RUN git checkout $RELEASE - -# Install minimum necessary dependencies, build Cosmos SDK, remove packages -RUN apk add --no-cache $PACKAGES && \ - make install - -# Show version -RUN gaiad version --long - -################################################################################################### -# Final image -################################################################################################### -FROM alpine:edge -LABEL maintainer="hello@informal.systems" - -ARG RELEASE -ARG CHAIN -ARG NAME - -# Add jq for debugging -RUN apk add --no-cache jq curl tree - -WORKDIR /$NAME - -# Copy over binaries from the build-env -COPY --from=build-env /go/bin/gaiad /usr/bin/gaiad - -COPY --chown=root:root ./chains/$CHAIN/$RELEASE/$NAME /chain/$CHAIN - -# Copy entrypoint script -COPY ./run-gaiad.sh /chain/$CHAIN -RUN chmod 755 /chain/$CHAIN/run-gaiad.sh - -RUN tree -pug /chain - -ENTRYPOINT "/bin/sh" diff --git a/ci/hermes.Dockerfile b/ci/hermes.Dockerfile deleted file mode 100644 index cbee1188d6..0000000000 --- a/ci/hermes.Dockerfile +++ /dev/null @@ -1,28 +0,0 @@ -# informalsystems/hermes image -# -# Used for running hermes in docker containers -# -# Usage: -# docker build . --build-arg TAG=v0.3.0 -t informalsystems/hermes:0.3.0 -f hermes.Dockerfile - -FROM rust:1.52-buster AS build-env - -ARG TAG -WORKDIR /root - -RUN git clone https://github.com/informalsystems/ibc-rs -RUN cd ibc-rs && git checkout $TAG && cargo build --release - - -FROM debian:buster-slim -LABEL maintainer="hello@informal.systems" - -RUN useradd -m hermes -s /bin/bash -WORKDIR /home/hermes -USER hermes:hermes -ENTRYPOINT ["/usr/bin/hermes"] - -COPY --chown=0:0 --from=build-env /usr/lib/x86_64-linux-gnu/libssl.so.1.1 /usr/lib/x86_64-linux-gnu/libssl.so.1.1 -COPY --chown=0:0 --from=build-env /usr/lib/x86_64-linux-gnu/libcrypto.so.1.1 /usr/lib/x86_64-linux-gnu/libcrypto.so.1.1 -COPY --chown=0:0 --from=build-env /root/ibc-rs/target/release/hermes /usr/bin/hermes -COPY --chown=0:0 --from=build-env /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt diff --git a/ci/no-std-check/.gitignore b/ci/no-std-check/.gitignore deleted file mode 100644 index 2f7896d1d1..0000000000 --- a/ci/no-std-check/.gitignore +++ /dev/null @@ -1 +0,0 @@ -target/ diff --git a/ci/no-std-check/Cargo.lock b/ci/no-std-check/Cargo.lock deleted file mode 100644 index 08f7e5024c..0000000000 --- a/ci/no-std-check/Cargo.lock +++ /dev/null @@ -1,2540 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -version = 3 - -[[package]] -name = "Inflector" -version = "0.11.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe438c63458706e03479442743baae6c88256498e6431708f6dfc520a26515d3" -dependencies = [ - "lazy_static", - "regex", -] - -[[package]] -name = "addr2line" -version = "0.17.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9ecd88a8c8378ca913a680cd98f0f13ac67383d35993f86c90a70e3f137816b" -dependencies = [ - "gimli", -] - -[[package]] -name = "adler" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" - -[[package]] -name = "ahash" -version = "0.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" -dependencies = [ - "getrandom 0.2.4", - "once_cell", - "version_check", -] - -[[package]] -name = "aho-corasick" -version = "0.7.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e37cfd5e7657ada45f742d6e99ca5788580b5c529dc78faf11ece6dc702656f" -dependencies = [ - "memchr", -] - -[[package]] -name = "ansi_term" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" -dependencies = [ - "winapi", -] - -[[package]] -name = "anyhow" -version = "1.0.53" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94a45b455c14666b85fc40a019e8ab9eb75e3a124e05494f5397122bc9eb06e0" - -[[package]] -name = "arrayref" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" - -[[package]] -name = "arrayvec" -version = "0.4.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd9fd44efafa8690358b7408d253adf110036b88f55672a933f01d616ad9b1b9" -dependencies = [ - "nodrop", -] - -[[package]] -name = "arrayvec" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" - -[[package]] -name = "arrayvec" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6" - -[[package]] -name = "async-trait" -version = "0.1.52" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "061a7acccaa286c011ddc30970520b98fa40e00c9d644633fb26b5fc63a265e3" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "autocfg" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" - -[[package]] -name = "backtrace" -version = "0.3.64" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e121dee8023ce33ab248d9ce1493df03c3b38a659b240096fcbd7048ff9c31f" -dependencies = [ - "addr2line", - "cc", - "cfg-if", - "libc", - "miniz_oxide", - "object", - "rustc-demangle", -] - -[[package]] -name = "base58" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6107fe1be6682a68940da878d9e9f5e90ca5745b3dec9fd1bb393c8777d4f581" - -[[package]] -name = "base64" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" - -[[package]] -name = "bitflags" -version = "1.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" - -[[package]] -name = "bitvec" -version = "0.20.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7774144344a4faa177370406a7ff5f1da24303817368584c6206c8303eb07848" -dependencies = [ - "funty", - "radium", - "tap", - "wyz", -] - -[[package]] -name = "blake2-rfc" -version = "0.2.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d6d530bdd2d52966a6d03b7a964add7ae1a288d25214066fd4b600f0f796400" -dependencies = [ - "arrayvec 0.4.12", - "constant_time_eq", -] - -[[package]] -name = "block-buffer" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" -dependencies = [ - "block-padding 0.1.5", - "byte-tools", - "byteorder", - "generic-array 0.12.4", -] - -[[package]] -name = "block-buffer" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" -dependencies = [ - "block-padding 0.2.1", - "generic-array 0.14.5", -] - -[[package]] -name = "block-buffer" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03588e54c62ae6d763e2a80090d50353b785795361b4ff5b3bf0a5097fc31c0b" -dependencies = [ - "generic-array 0.14.5", -] - -[[package]] -name = "block-padding" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa79dedbb091f449f1f39e53edf88d5dbe95f895dae6135a8d7b881fb5af73f5" -dependencies = [ - "byte-tools", -] - -[[package]] -name = "block-padding" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" - -[[package]] -name = "bumpalo" -version = "3.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a45a46ab1f2412e53d3a0ade76ffad2025804294569aae387231a0cd6e0899" - -[[package]] -name = "byte-slice-cast" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d30c751592b77c499e7bce34d99d67c2c11bdc0574e9a488ddade14150a4698" - -[[package]] -name = "byte-tools" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" - -[[package]] -name = "byteorder" -version = "1.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" - -[[package]] -name = "bytes" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4872d67bab6358e59559027aa3b9157c53d9358c51423c17554809a8858e0f8" - -[[package]] -name = "cc" -version = "1.0.72" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22a9137b95ea06864e018375b72adfb7db6e6f68cfc8df5a04d00288050485ee" - -[[package]] -name = "cfg-if" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" - -[[package]] -name = "chrono" -version = "0.4.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73" -dependencies = [ - "libc", - "num-integer", - "num-traits", - "winapi", -] - -[[package]] -name = "constant_time_eq" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" - -[[package]] -name = "cpufeatures" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95059428f66df56b63431fdb4e1947ed2190586af5c5a8a8b71122bdf5a7f469" -dependencies = [ - "libc", -] - -[[package]] -name = "crunchy" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" - -[[package]] -name = "crypto-common" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57952ca27b5e3606ff4dd79b0020231aaf9d6aa76dc05fd30137538c50bd3ce8" -dependencies = [ - "generic-array 0.14.5", - "typenum", -] - -[[package]] -name = "crypto-mac" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b584a330336237c1eecd3e94266efb216c56ed91225d634cb2991c5f3fd1aeab" -dependencies = [ - "generic-array 0.14.5", - "subtle", -] - -[[package]] -name = "crypto-mac" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1d1a86f49236c215f271d40892d5fc950490551400b02ef360692c29815c714" -dependencies = [ - "generic-array 0.14.5", - "subtle", -] - -[[package]] -name = "curve25519-dalek" -version = "2.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a9b85542f99a2dfa2a1b8e192662741c9859a846b296bef1c92ef9b58b5a216" -dependencies = [ - "byteorder", - "digest 0.8.1", - "rand_core 0.5.1", - "subtle", - "zeroize", -] - -[[package]] -name = "curve25519-dalek" -version = "3.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b9fdf9972b2bd6af2d913799d9ebc165ea4d2e65878e329d9c6b372c4491b61" -dependencies = [ - "byteorder", - "digest 0.9.0", - "rand_core 0.5.1", - "subtle", - "zeroize", -] - -[[package]] -name = "derive_more" -version = "0.99.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "digest" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3d0c8c8752312f9713efd397ff63acb9f85585afbf179282e720e7704954dd5" -dependencies = [ - "generic-array 0.12.4", -] - -[[package]] -name = "digest" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" -dependencies = [ - "generic-array 0.14.5", -] - -[[package]] -name = "digest" -version = "0.10.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2fb860ca6fafa5552fb6d0e816a69c8e49f0908bf524e30a90d97c85892d506" -dependencies = [ - "block-buffer 0.10.1", - "crypto-common", -] - -[[package]] -name = "downcast-rs" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ea835d29036a4087793836fa931b08837ad5e957da9e23886b29586fb9b6650" - -[[package]] -name = "dyn-clonable" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e9232f0e607a262ceb9bd5141a3dfb3e4db6994b31989bbfd845878cba59fd4" -dependencies = [ - "dyn-clonable-impl", - "dyn-clone", -] - -[[package]] -name = "dyn-clonable-impl" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "558e40ea573c374cf53507fd240b7ee2f5477df7cfebdb97323ec61c719399c5" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "dyn-clone" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee2626afccd7561a06cf1367e2950c4718ea04565e20fb5029b6c7d8ad09abcf" - -[[package]] -name = "ed25519" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74e1069e39f1454367eb2de793ed062fac4c35c2934b76a81d90dd9abcd28816" -dependencies = [ - "signature", -] - -[[package]] -name = "ed25519-dalek" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c762bae6dcaf24c4c84667b8579785430908723d5c889f469d76a41d59cc7a9d" -dependencies = [ - "curve25519-dalek 3.2.0", - "ed25519", - "rand 0.7.3", - "serde", - "sha2 0.9.9", - "zeroize", -] - -[[package]] -name = "either" -version = "1.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" - -[[package]] -name = "environmental" -version = "1.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68b91989ae21441195d7d9b9993a2f9295c7e1a8c96255d8b729accddc124797" - -[[package]] -name = "fake-simd" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed" - -[[package]] -name = "fixed-hash" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfcf0ed7fe52a17a03854ec54a9f76d6d84508d1c0e66bc1793301c73fc8493c" -dependencies = [ - "byteorder", - "rand 0.8.4", - "rustc-hex", - "static_assertions", -] - -[[package]] -name = "flex-error" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c606d892c9de11507fa0dcffc116434f94e105d0bbdc4e405b61519464c49d7b" -dependencies = [ - "paste", -] - -[[package]] -name = "funty" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fed34cd105917e91daa4da6b3728c47b068749d6a62c59811f06ed2ac71d9da7" - -[[package]] -name = "futures" -version = "0.3.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f73fe65f54d1e12b726f517d3e2135ca3125a437b6d998caf1962961f7172d9e" -dependencies = [ - "futures-channel", - "futures-core", - "futures-executor", - "futures-io", - "futures-sink", - "futures-task", - "futures-util", -] - -[[package]] -name = "futures-channel" -version = "0.3.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3083ce4b914124575708913bca19bfe887522d6e2e6d0952943f5eac4a74010" -dependencies = [ - "futures-core", - "futures-sink", -] - -[[package]] -name = "futures-core" -version = "0.3.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c09fd04b7e4073ac7156a9539b57a484a8ea920f79c7c675d05d289ab6110d3" - -[[package]] -name = "futures-executor" -version = "0.3.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9420b90cfa29e327d0429f19be13e7ddb68fa1cccb09d65e5706b8c7a749b8a6" -dependencies = [ - "futures-core", - "futures-task", - "futures-util", - "num_cpus", -] - -[[package]] -name = "futures-io" -version = "0.3.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc4045962a5a5e935ee2fdedaa4e08284547402885ab326734432bed5d12966b" - -[[package]] -name = "futures-macro" -version = "0.3.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33c1e13800337f4d4d7a316bf45a567dbcb6ffe087f16424852d97e97a91f512" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "futures-sink" -version = "0.3.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21163e139fa306126e6eedaf49ecdb4588f939600f0b1e770f4205ee4b7fa868" - -[[package]] -name = "futures-task" -version = "0.3.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57c66a976bf5909d801bbef33416c41372779507e7a6b3a5e25e4749c58f776a" - -[[package]] -name = "futures-util" -version = "0.3.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8b7abd5d659d9b90c8cba917f6ec750a74e2dc23902ef9cd4cc8c8b22e6036a" -dependencies = [ - "futures-channel", - "futures-core", - "futures-io", - "futures-macro", - "futures-sink", - "futures-task", - "memchr", - "pin-project-lite", - "pin-utils", - "slab", -] - -[[package]] -name = "generic-array" -version = "0.12.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffdf9f34f1447443d37393cc6c2b8313aebddcd96906caf34e54c68d8e57d7bd" -dependencies = [ - "typenum", -] - -[[package]] -name = "generic-array" -version = "0.14.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd48d33ec7f05fbfa152300fdad764757cbded343c1aa1cff2fbaf4134851803" -dependencies = [ - "typenum", - "version_check", -] - -[[package]] -name = "getrandom" -version = "0.1.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" -dependencies = [ - "cfg-if", - "js-sys", - "libc", - "wasi 0.9.0+wasi-snapshot-preview1", - "wasm-bindgen", -] - -[[package]] -name = "getrandom" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "418d37c8b1d42553c93648be529cb70f920d3baf8ef469b74b9638df426e0b4c" -dependencies = [ - "cfg-if", - "libc", - "wasi 0.10.2+wasi-snapshot-preview1", -] - -[[package]] -name = "gimli" -version = "0.26.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78cc372d058dcf6d5ecd98510e7fbc9e5aec4d21de70f65fea8fecebcd881bd4" - -[[package]] -name = "hash-db" -version = "0.15.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d23bd4e7b5eda0d0f3a307e8b381fdc8ba9000f26fbe912250c0a4cc3956364a" - -[[package]] -name = "hash256-std-hasher" -version = "0.15.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92c171d55b98633f4ed3860808f004099b36c1cc29c42cfc53aa8591b21efcf2" -dependencies = [ - "crunchy", -] - -[[package]] -name = "hashbrown" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" -dependencies = [ - "ahash", -] - -[[package]] -name = "hashbrown" -version = "0.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c21d40587b92fa6a6c6e3c1bdbf87d75511db5672f9c93175574b3a00df1758" -dependencies = [ - "ahash", -] - -[[package]] -name = "hermit-abi" -version = "0.1.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" -dependencies = [ - "libc", -] - -[[package]] -name = "hex" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" - -[[package]] -name = "hmac" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "126888268dcc288495a26bf004b38c5fdbb31682f992c84ceb046a1f0fe38840" -dependencies = [ - "crypto-mac 0.8.0", - "digest 0.9.0", -] - -[[package]] -name = "hmac" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a2a2320eb7ec0ebe8da8f744d7812d9fc4cb4d09344ac01898dbcb6a20ae69b" -dependencies = [ - "crypto-mac 0.11.1", - "digest 0.9.0", -] - -[[package]] -name = "hmac-drbg" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17ea0a1394df5b6574da6e0c1ade9e78868c9fb0a4e5ef4428e32da4676b85b1" -dependencies = [ - "digest 0.9.0", - "generic-array 0.14.5", - "hmac 0.8.1", -] - -[[package]] -name = "ibc" -version = "0.15.0" -dependencies = [ - "bytes", - "derive_more", - "flex-error", - "ibc-proto", - "ics23", - "num-traits", - "prost", - "prost-types", - "safe-regex", - "serde", - "serde_derive", - "serde_json", - "sha2 0.10.2", - "subtle-encoding", - "tendermint", - "tendermint-light-client-verifier", - "tendermint-proto", - "time", - "tracing", - "uint", -] - -[[package]] -name = "ibc-proto" -version = "0.18.0" -dependencies = [ - "base64", - "bytes", - "prost", - "prost-types", - "serde", - "tendermint-proto", -] - -[[package]] -name = "ics23" -version = "0.8.0-alpha" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18a435f2471c1b2ce14771da465d47321c5905fac866d0effa9e0a3eb5d94fcf" -dependencies = [ - "anyhow", - "bytes", - "hex", - "prost", - "ripemd160", - "sha2 0.9.9", - "sha3", - "sp-std 3.0.0", -] - -[[package]] -name = "impl-codec" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "161ebdfec3c8e3b52bf61c4f3550a1eea4f9579d10dc1b936f3171ebdcd6c443" -dependencies = [ - "parity-scale-codec", -] - -[[package]] -name = "impl-serde" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4551f042f3438e64dbd6226b20527fc84a6e1fe65688b58746a2f53623f25f5c" -dependencies = [ - "serde", -] - -[[package]] -name = "impl-trait-for-tuples" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "instant" -version = "0.1.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "integer-sqrt" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "276ec31bcb4a9ee45f58bec6f9ec700ae4cf4f4f8f2fa7e06cb406bd5ffdd770" -dependencies = [ - "num-traits", -] - -[[package]] -name = "itertools" -version = "0.10.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9a9d19fa1e79b6215ff29b9d6880b706147f16e9b1dbb1e4e5947b5b02bc5e3" -dependencies = [ - "either", -] - -[[package]] -name = "itoa" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1aab8fc367588b89dcee83ab0fd66b72b50b72fa1904d7095045ace2b0c81c35" - -[[package]] -name = "js-sys" -version = "0.3.56" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a38fc24e30fd564ce974c02bf1d337caddff65be6cc4735a1f7eab22a7440f04" -dependencies = [ - "wasm-bindgen", -] - -[[package]] -name = "keccak" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67c21572b4949434e4fc1e1978b99c5f77064153c59d998bf13ecd96fb5ecba7" - -[[package]] -name = "lazy_static" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" - -[[package]] -name = "libc" -version = "0.2.117" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e74d72e0f9b65b5b4ca49a346af3976df0f9c61d550727f349ecd559f251a26c" - -[[package]] -name = "libsecp256k1" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0452aac8bab02242429380e9b2f94ea20cea2b37e2c1777a1358799bbe97f37" -dependencies = [ - "arrayref", - "base64", - "digest 0.9.0", - "hmac-drbg", - "libsecp256k1-core", - "libsecp256k1-gen-ecmult", - "libsecp256k1-gen-genmult", - "rand 0.8.4", - "serde", - "sha2 0.9.9", - "typenum", -] - -[[package]] -name = "libsecp256k1-core" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5be9b9bb642d8522a44d533eab56c16c738301965504753b03ad1de3425d5451" -dependencies = [ - "crunchy", - "digest 0.9.0", - "subtle", -] - -[[package]] -name = "libsecp256k1-gen-ecmult" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3038c808c55c87e8a172643a7d87187fc6c4174468159cb3090659d55bcb4809" -dependencies = [ - "libsecp256k1-core", -] - -[[package]] -name = "libsecp256k1-gen-genmult" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3db8d6ba2cec9eacc40e6e8ccc98931840301f1006e95647ceb2dd5c3aa06f7c" -dependencies = [ - "libsecp256k1-core", -] - -[[package]] -name = "lock_api" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88943dd7ef4a2e5a4bfa2753aaab3013e34ce2533d1996fb18ef591e315e2b3b" -dependencies = [ - "scopeguard", -] - -[[package]] -name = "log" -version = "0.4.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "matchers" -version = "0.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f099785f7595cc4b4553a174ce30dd7589ef93391ff414dbb67f62392b9e0ce1" -dependencies = [ - "regex-automata", -] - -[[package]] -name = "memchr" -version = "2.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a" - -[[package]] -name = "memory-db" -version = "0.28.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d505169b746dacf02f7d14d8c80b34edfd8212159c63d23c977739a0d960c626" -dependencies = [ - "hash-db", - "hashbrown 0.11.2", - "parity-util-mem", -] - -[[package]] -name = "memory_units" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71d96e3f3c0b6325d8ccd83c33b28acb183edcb6c67938ba104ec546854b0882" - -[[package]] -name = "merlin" -version = "2.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e261cf0f8b3c42ded9f7d2bb59dea03aa52bc8a1cbc7482f9fc3fd1229d3b42" -dependencies = [ - "byteorder", - "keccak", - "rand_core 0.5.1", - "zeroize", -] - -[[package]] -name = "miniz_oxide" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a92518e98c078586bc6c934028adcca4c92a53d6a958196de835170a01d84e4b" -dependencies = [ - "adler", - "autocfg", -] - -[[package]] -name = "no-std-check" -version = "0.1.0" -dependencies = [ - "ibc", - "ibc-proto", - "sp-core", - "sp-io", - "sp-runtime", - "sp-std 4.0.0", - "tendermint", - "tendermint-light-client-verifier", - "tendermint-proto", -] - -[[package]] -name = "nodrop" -version = "0.1.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72ef4a56884ca558e5ddb05a1d1e7e1bfd9a68d9ed024c21704cc98872dae1bb" - -[[package]] -name = "num-bigint" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "090c7f9998ee0ff65aa5b723e4009f7b217707f1fb5ea551329cc4d6231fb304" -dependencies = [ - "autocfg", - "num-integer", - "num-traits", -] - -[[package]] -name = "num-derive" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "876a53fff98e03a936a674b29568b0e605f06b29372c2489ff4de23f1949743d" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "num-integer" -version = "0.1.44" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2cc698a63b549a70bc047073d2949cce27cd1c7b0a4a862d08a8031bc2801db" -dependencies = [ - "autocfg", - "num-traits", -] - -[[package]] -name = "num-rational" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c000134b5dbf44adc5cb772486d335293351644b801551abe8f75c84cfa4aef" -dependencies = [ - "autocfg", - "num-bigint", - "num-integer", - "num-traits", -] - -[[package]] -name = "num-traits" -version = "0.2.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" -dependencies = [ - "autocfg", -] - -[[package]] -name = "num_cpus" -version = "1.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19e64526ebdee182341572e50e9ad03965aa510cd94427a4549448f285e957a1" -dependencies = [ - "hermit-abi", - "libc", -] - -[[package]] -name = "num_threads" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97ba99ba6393e2c3734791401b66902d981cb03bf190af674ca69949b6d5fb15" -dependencies = [ - "libc", -] - -[[package]] -name = "object" -version = "0.27.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67ac1d3f9a1d3616fd9a60c8d74296f22406a238b6a72f5cc1e6f314df4ffbf9" -dependencies = [ - "memchr", -] - -[[package]] -name = "once_cell" -version = "1.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da32515d9f6e6e489d7bc9d84c71b060db7247dc035bbe44eac88cf87486d8d5" - -[[package]] -name = "opaque-debug" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c" - -[[package]] -name = "opaque-debug" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" - -[[package]] -name = "parity-scale-codec" -version = "2.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "373b1a4c1338d9cd3d1fa53b3a11bdab5ab6bd80a20f7f7becd76953ae2be909" -dependencies = [ - "arrayvec 0.7.2", - "bitvec", - "byte-slice-cast", - "impl-trait-for-tuples", - "parity-scale-codec-derive", - "serde", -] - -[[package]] -name = "parity-scale-codec-derive" -version = "2.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1557010476e0595c9b568d16dcfb81b93cdeb157612726f5170d31aa707bed27" -dependencies = [ - "proc-macro-crate", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "parity-util-mem" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f4cb4e169446179cbc6b8b6320cc9fca49bd2e94e8db25f25f200a8ea774770" -dependencies = [ - "cfg-if", - "hashbrown 0.11.2", - "impl-trait-for-tuples", - "parity-util-mem-derive", - "parking_lot", - "primitive-types", - "winapi", -] - -[[package]] -name = "parity-util-mem-derive" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f557c32c6d268a07c921471619c0295f5efad3a0e76d4f97a05c091a51d110b2" -dependencies = [ - "proc-macro2", - "syn", - "synstructure", -] - -[[package]] -name = "parity-wasm" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be5e13c266502aadf83426d87d81a0f5d1ef45b8027f5a471c360abfe4bfae92" - -[[package]] -name = "parking_lot" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" -dependencies = [ - "instant", - "lock_api", - "parking_lot_core", -] - -[[package]] -name = "parking_lot_core" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d76e8e1493bcac0d2766c42737f34458f1c8c50c0d23bcb24ea953affb273216" -dependencies = [ - "cfg-if", - "instant", - "libc", - "redox_syscall", - "smallvec", - "winapi", -] - -[[package]] -name = "paste" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0744126afe1a6dd7f394cb50a716dbe086cb06e255e53d8d0185d82828358fb5" - -[[package]] -name = "pbkdf2" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "216eaa586a190f0a738f2f918511eecfa90f13295abec0e457cdebcceda80cbd" -dependencies = [ - "crypto-mac 0.8.0", -] - -[[package]] -name = "pbkdf2" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d95f5254224e617595d2cc3cc73ff0a5eaf2637519e25f03388154e9378b6ffa" -dependencies = [ - "crypto-mac 0.11.1", -] - -[[package]] -name = "pin-project-lite" -version = "0.2.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e280fbe77cc62c91527259e9442153f4688736748d24660126286329742b4c6c" - -[[package]] -name = "pin-utils" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" - -[[package]] -name = "ppv-lite86" -version = "0.2.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb9f9e6e233e5c4a35559a617bf40a4ec447db2e84c20b55a6f83167b7e57872" - -[[package]] -name = "primitive-types" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05e4722c697a58a99d5d06a08c30821d7c082a4632198de1eaa5a6c22ef42373" -dependencies = [ - "fixed-hash", - "impl-codec", - "impl-serde", - "scale-info", - "uint", -] - -[[package]] -name = "proc-macro-crate" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ebace6889caf889b4d3f76becee12e90353f2b8c7d875534a71e5742f8f6f83" -dependencies = [ - "thiserror", - "toml", -] - -[[package]] -name = "proc-macro2" -version = "1.0.36" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7342d5883fbccae1cc37a2353b09c87c9b0f3afd73f5fb9bba687a1f733b029" -dependencies = [ - "unicode-xid", -] - -[[package]] -name = "prost" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a07b0857a71a8cb765763950499cae2413c3f9cede1133478c43600d9e146890" -dependencies = [ - "bytes", - "prost-derive", -] - -[[package]] -name = "prost-derive" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b670f45da57fb8542ebdbb6105a925fe571b67f9e7ed9f47a06a84e72b4e7cc" -dependencies = [ - "anyhow", - "itertools", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "prost-types" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d0a014229361011dc8e69c8a1ec6c2e8d0f2af7c91e3ea3f5b2170298461e68" -dependencies = [ - "bytes", - "prost", -] - -[[package]] -name = "quote" -version = "1.0.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "864d3e96a899863136fc6e99f3d7cae289dafe43bf2c5ac19b70df7210c0a145" -dependencies = [ - "proc-macro2", -] - -[[package]] -name = "radium" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "643f8f41a8ebc4c5dc4515c82bb8abd397b527fc20fd681b7c011c2aee5d44fb" - -[[package]] -name = "rand" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" -dependencies = [ - "getrandom 0.1.16", - "libc", - "rand_chacha 0.2.2", - "rand_core 0.5.1", - "rand_hc 0.2.0", - "rand_pcg", -] - -[[package]] -name = "rand" -version = "0.8.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e7573632e6454cf6b99d7aac4ccca54be06da05aca2ef7423d22d27d4d4bcd8" -dependencies = [ - "libc", - "rand_chacha 0.3.1", - "rand_core 0.6.3", - "rand_hc 0.3.1", -] - -[[package]] -name = "rand_chacha" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" -dependencies = [ - "ppv-lite86", - "rand_core 0.5.1", -] - -[[package]] -name = "rand_chacha" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" -dependencies = [ - "ppv-lite86", - "rand_core 0.6.3", -] - -[[package]] -name = "rand_core" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" -dependencies = [ - "getrandom 0.1.16", -] - -[[package]] -name = "rand_core" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" -dependencies = [ - "getrandom 0.2.4", -] - -[[package]] -name = "rand_hc" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" -dependencies = [ - "rand_core 0.5.1", -] - -[[package]] -name = "rand_hc" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d51e9f596de227fda2ea6c84607f5558e196eeaf43c986b724ba4fb8fdf497e7" -dependencies = [ - "rand_core 0.6.3", -] - -[[package]] -name = "rand_pcg" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16abd0c1b639e9eb4d7c50c0b8100b0d0f849be2349829c740fe8e6eb4816429" -dependencies = [ - "rand_core 0.5.1", -] - -[[package]] -name = "redox_syscall" -version = "0.2.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8383f39639269cde97d255a32bdb68c047337295414940c68bdd30c2e13203ff" -dependencies = [ - "bitflags", -] - -[[package]] -name = "ref-cast" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "300f2a835d808734ee295d45007adacb9ebb29dd3ae2424acfa17930cae541da" -dependencies = [ - "ref-cast-impl", -] - -[[package]] -name = "ref-cast-impl" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c38e3aecd2b21cb3959637b883bb3714bc7e43f0268b9a29d3743ee3e55cdd2" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "regex" -version = "1.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d83f127d94bdbcda4c8cc2e50f6f84f4b611f69c902699ca385a39c3a75f9ff1" -dependencies = [ - "aho-corasick", - "memchr", - "regex-syntax", -] - -[[package]] -name = "regex-automata" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" -dependencies = [ - "regex-syntax", -] - -[[package]] -name = "regex-syntax" -version = "0.6.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49b3de9ec5dc0a3417da371aab17d729997c15010e7fd24ff707773a33bddb64" - -[[package]] -name = "ripemd160" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2eca4ecc81b7f313189bf73ce724400a07da2a6dac19588b03c8bd76a2dcc251" -dependencies = [ - "block-buffer 0.9.0", - "digest 0.9.0", - "opaque-debug 0.3.0", -] - -[[package]] -name = "rustc-demangle" -version = "0.1.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ef03e0a2b150c7a90d01faf6254c9c48a41e95fb2a8c2ac1c6f0d2b9aefc342" - -[[package]] -name = "rustc-hash" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" - -[[package]] -name = "rustc-hex" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" - -[[package]] -name = "ryu" -version = "1.0.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73b4b750c782965c211b42f022f59af1fbceabdd026623714f104152f1ec149f" - -[[package]] -name = "safe-proc-macro2" -version = "1.0.36" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "814c536dcd27acf03296c618dab7ad62d28e70abd7ba41d3f34a2ce707a2c666" -dependencies = [ - "unicode-xid", -] - -[[package]] -name = "safe-quote" -version = "1.0.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77e530f7831f3feafcd5f1aae406ac205dd998436b4007c8e80f03eca78a88f7" -dependencies = [ - "safe-proc-macro2", -] - -[[package]] -name = "safe-regex" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a15289bf322e0673d52756a18194167f2378ec1a15fe884af6e2d2cb934822b0" -dependencies = [ - "safe-regex-macro", -] - -[[package]] -name = "safe-regex-compiler" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fba76fae590a2aa665279deb1f57b5098cbace01a0c5e60e262fcf55f7c51542" -dependencies = [ - "safe-proc-macro2", - "safe-quote", -] - -[[package]] -name = "safe-regex-macro" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96c2e96b5c03f158d1b16ba79af515137795f4ad4e8de3f790518aae91f1d127" -dependencies = [ - "safe-proc-macro2", - "safe-regex-compiler", -] - -[[package]] -name = "scale-info" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c55b744399c25532d63a0d2789b109df8d46fc93752d46b0782991a931a782f" -dependencies = [ - "bitvec", - "cfg-if", - "derive_more", - "parity-scale-codec", - "scale-info-derive", -] - -[[package]] -name = "scale-info-derive" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baeb2780690380592f86205aa4ee49815feb2acad8c2f59e6dd207148c3f1fcd" -dependencies = [ - "proc-macro-crate", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "schnorrkel" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "021b403afe70d81eea68f6ea12f6b3c9588e5d536a94c3bf80f15e7faa267862" -dependencies = [ - "arrayref", - "arrayvec 0.5.2", - "curve25519-dalek 2.1.3", - "getrandom 0.1.16", - "merlin", - "rand 0.7.3", - "rand_core 0.5.1", - "sha2 0.8.2", - "subtle", - "zeroize", -] - -[[package]] -name = "scopeguard" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" - -[[package]] -name = "secrecy" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9bd1c54ea06cfd2f6b63219704de0b9b4f72dcc2b8fdef820be6cd799780e91e" -dependencies = [ - "zeroize", -] - -[[package]] -name = "serde" -version = "1.0.136" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce31e24b01e1e524df96f1c2fdd054405f8d7376249a5110886fb4b658484789" -dependencies = [ - "serde_derive", -] - -[[package]] -name = "serde_bytes" -version = "0.11.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16ae07dd2f88a366f15bd0632ba725227018c69a1c8550a927324f8eb8368bb9" -dependencies = [ - "serde", -] - -[[package]] -name = "serde_derive" -version = "1.0.136" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08597e7152fcd306f41838ed3e37be9eaeed2b61c42e2117266a554fab4662f9" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "serde_json" -version = "1.0.78" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d23c1ba4cf0efd44be32017709280b32d1cea5c3f1275c3b6d9e8bc54f758085" -dependencies = [ - "itoa", - "ryu", - "serde", -] - -[[package]] -name = "serde_repr" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98d0516900518c29efa217c298fa1f4e6c6ffc85ae29fd7f4ee48f176e1a9ed5" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "sha2" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a256f46ea78a0c0d9ff00077504903ac881a1dafdc20da66545699e7776b3e69" -dependencies = [ - "block-buffer 0.7.3", - "digest 0.8.1", - "fake-simd", - "opaque-debug 0.2.3", -] - -[[package]] -name = "sha2" -version = "0.9.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" -dependencies = [ - "block-buffer 0.9.0", - "cfg-if", - "cpufeatures", - "digest 0.9.0", - "opaque-debug 0.3.0", -] - -[[package]] -name = "sha2" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55deaec60f81eefe3cce0dc50bda92d6d8e88f2a27df7c5033b42afeb1ed2676" -dependencies = [ - "cfg-if", - "cpufeatures", - "digest 0.10.3", -] - -[[package]] -name = "sha3" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f81199417d4e5de3f04b1e871023acea7389672c4135918f05aa9cbf2f2fa809" -dependencies = [ - "block-buffer 0.9.0", - "digest 0.9.0", - "keccak", - "opaque-debug 0.3.0", -] - -[[package]] -name = "sharded-slab" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "900fba806f70c630b0a382d0d825e17a0f19fcd059a2ade1ff237bcddf446b31" -dependencies = [ - "lazy_static", -] - -[[package]] -name = "signature" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f054c6c1a6e95179d6f23ed974060dcefb2d9388bb7256900badad682c499de4" - -[[package]] -name = "slab" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9def91fd1e018fe007022791f865d0ccc9b3a0d5001e01aabb8b40e46000afb5" - -[[package]] -name = "smallvec" -version = "1.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2dd574626839106c320a323308629dcb1acfc96e32a8cba364ddc61ac23ee83" - -[[package]] -name = "sp-application-crypto" -version = "5.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69c8dfffb7a15ac2c802bc9ce972b8449c8ce0a15254ae4d77a012a3a8c278d1" -dependencies = [ - "parity-scale-codec", - "scale-info", - "serde", - "sp-core", - "sp-io", - "sp-std 4.0.0", -] - -[[package]] -name = "sp-arithmetic" -version = "4.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa92b9707afdaa807bcb985fcc70645ebbe6fbb2442620d61dc47e7f3553a7ae" -dependencies = [ - "integer-sqrt", - "num-traits", - "parity-scale-codec", - "scale-info", - "serde", - "sp-debug-derive", - "sp-std 4.0.0", - "static_assertions", -] - -[[package]] -name = "sp-core" -version = "5.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e2a372e6c3a40464f490f76843dfc331d08e43f7672d73377c0458f5b5db7ed" -dependencies = [ - "base58", - "bitflags", - "blake2-rfc", - "byteorder", - "dyn-clonable", - "ed25519-dalek", - "futures", - "hash-db", - "hash256-std-hasher", - "hex", - "impl-serde", - "lazy_static", - "libsecp256k1", - "log", - "merlin", - "num-traits", - "parity-scale-codec", - "parity-util-mem", - "parking_lot", - "primitive-types", - "rand 0.7.3", - "regex", - "scale-info", - "schnorrkel", - "secrecy", - "serde", - "sha2 0.10.2", - "sp-core-hashing", - "sp-debug-derive", - "sp-externalities", - "sp-runtime-interface", - "sp-std 4.0.0", - "sp-storage", - "ss58-registry", - "substrate-bip39", - "thiserror", - "tiny-bip39", - "tiny-keccak", - "twox-hash", - "wasmi", - "zeroize", -] - -[[package]] -name = "sp-core-hashing" -version = "4.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec864a6a67249f0c8dd3d5acab43623a61677e85ff4f2f9b04b802d2fe780e83" -dependencies = [ - "blake2-rfc", - "byteorder", - "sha2 0.9.9", - "sp-std 4.0.0", - "tiny-keccak", - "twox-hash", -] - -[[package]] -name = "sp-debug-derive" -version = "4.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d676664972e22a0796176e81e7bec41df461d1edf52090955cdab55f2c956ff2" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "sp-externalities" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54f0fe949490000a97f367dc2acf4d2e7d0473ec0e13ce1775d0804bb64d3c98" -dependencies = [ - "environmental", - "parity-scale-codec", - "sp-std 4.0.0", - "sp-storage", -] - -[[package]] -name = "sp-io" -version = "5.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dece853333a36d405be49d72a5190c1aa3fa6fbc442a8f28c59b50c0351b21d6" -dependencies = [ - "futures", - "hash-db", - "libsecp256k1", - "log", - "parity-scale-codec", - "parking_lot", - "sp-core", - "sp-externalities", - "sp-keystore", - "sp-runtime-interface", - "sp-state-machine", - "sp-std 4.0.0", - "sp-tracing", - "sp-trie", - "sp-wasm-interface", - "tracing", - "tracing-core", -] - -[[package]] -name = "sp-keystore" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3c3376dede1940014254f6d10c9fca20c4deb1fae930a98edfdab0190a06ed4" -dependencies = [ - "async-trait", - "futures", - "merlin", - "parity-scale-codec", - "parking_lot", - "schnorrkel", - "sp-core", - "sp-externalities", - "thiserror", -] - -[[package]] -name = "sp-panic-handler" -version = "4.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2101f3c555fceafcfcfb0e61c55ea9ed80dc60bd77d54d9f25b369edb029e9a4" -dependencies = [ - "backtrace", - "lazy_static", - "regex", -] - -[[package]] -name = "sp-runtime" -version = "5.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3772a765d076ac548c0c37f6d84dc8873bf64d0cfcf8b7b7322b5374d635ceff" -dependencies = [ - "either", - "hash256-std-hasher", - "impl-trait-for-tuples", - "log", - "parity-scale-codec", - "parity-util-mem", - "paste", - "rand 0.7.3", - "scale-info", - "serde", - "sp-application-crypto", - "sp-arithmetic", - "sp-core", - "sp-io", - "sp-std 4.0.0", -] - -[[package]] -name = "sp-runtime-interface" -version = "5.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab1f6625996742e3e5c0e6a6a2b5c0db363e2c52f4a71ea6cd6d963a10a4bdd6" -dependencies = [ - "impl-trait-for-tuples", - "parity-scale-codec", - "primitive-types", - "sp-externalities", - "sp-runtime-interface-proc-macro", - "sp-std 4.0.0", - "sp-storage", - "sp-tracing", - "sp-wasm-interface", - "static_assertions", -] - -[[package]] -name = "sp-runtime-interface-proc-macro" -version = "4.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b58cc6060b2d2f35061db5b4172f4a47353c3f01a89f281699a6c3f05d1267a" -dependencies = [ - "Inflector", - "proc-macro-crate", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "sp-state-machine" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e66851fd082cccc6f66dddf6eda458f30766a3febd769f1b5592120aa8072fd" -dependencies = [ - "hash-db", - "log", - "num-traits", - "parity-scale-codec", - "parking_lot", - "rand 0.7.3", - "smallvec", - "sp-core", - "sp-externalities", - "sp-panic-handler", - "sp-std 4.0.0", - "sp-trie", - "thiserror", - "tracing", - "trie-db", - "trie-root", -] - -[[package]] -name = "sp-std" -version = "3.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35391ea974fa5ee869cb094d5b437688fbf3d8127d64d1b9fed5822a1ed39b12" - -[[package]] -name = "sp-std" -version = "4.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14804d6069ee7a388240b665f17908d98386ffb0b5d39f89a4099fc7a2a4c03f" - -[[package]] -name = "sp-storage" -version = "5.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "838ec2a757a8e7b903684a71f649dfbd6449d91e3e5d93979fc8d2ee77d56fee" -dependencies = [ - "impl-serde", - "parity-scale-codec", - "ref-cast", - "serde", - "sp-debug-derive", - "sp-std 4.0.0", -] - -[[package]] -name = "sp-tracing" -version = "4.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4688fceac497cee7e9b72c387fef20fa517e2bf6a3bf52a4a45dcc9391d6201" -dependencies = [ - "parity-scale-codec", - "sp-std 4.0.0", - "tracing", - "tracing-core", - "tracing-subscriber", -] - -[[package]] -name = "sp-trie" -version = "5.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a7ad44bbb93fb9af94ea9869ec13602884f7d2360c245377aed7c6c92c6834e" -dependencies = [ - "hash-db", - "memory-db", - "parity-scale-codec", - "scale-info", - "sp-core", - "sp-std 4.0.0", - "trie-db", - "trie-root", -] - -[[package]] -name = "sp-wasm-interface" -version = "5.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60951479e2f26018c4c315a6f48956b9e2a3803953517bf8930e69b9a7a159df" -dependencies = [ - "impl-trait-for-tuples", - "log", - "parity-scale-codec", - "sp-std 4.0.0", - "wasmi", -] - -[[package]] -name = "ss58-registry" -version = "1.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8319f44e20b42e5c11b88b1ad4130c35fe2974665a007b08b02322070177136a" -dependencies = [ - "Inflector", - "proc-macro2", - "quote", - "serde", - "serde_json", - "unicode-xid", -] - -[[package]] -name = "static_assertions" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" - -[[package]] -name = "substrate-bip39" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49eee6965196b32f882dd2ee85a92b1dbead41b04e53907f269de3b0dc04733c" -dependencies = [ - "hmac 0.11.0", - "pbkdf2 0.8.0", - "schnorrkel", - "sha2 0.9.9", - "zeroize", -] - -[[package]] -name = "subtle" -version = "2.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" - -[[package]] -name = "subtle-encoding" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7dcb1ed7b8330c5eed5441052651dd7a12c75e2ed88f2ec024ae1fa3a5e59945" -dependencies = [ - "zeroize", -] - -[[package]] -name = "syn" -version = "1.0.86" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a65b3f4ffa0092e9887669db0eae07941f023991ab58ea44da8fe8e2d511c6b" -dependencies = [ - "proc-macro2", - "quote", - "unicode-xid", -] - -[[package]] -name = "synstructure" -version = "0.12.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" -dependencies = [ - "proc-macro2", - "quote", - "syn", - "unicode-xid", -] - -[[package]] -name = "tap" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" - -[[package]] -name = "tendermint" -version = "0.23.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ca881fa4dedd2b46334f13be7fbc8cc1549ba4be5a833fe4e73d1a1baaf7949" -dependencies = [ - "async-trait", - "bytes", - "ed25519", - "ed25519-dalek", - "flex-error", - "futures", - "num-traits", - "once_cell", - "prost", - "prost-types", - "serde", - "serde_bytes", - "serde_json", - "serde_repr", - "sha2 0.9.9", - "signature", - "subtle", - "subtle-encoding", - "tendermint-proto", - "time", - "zeroize", -] - -[[package]] -name = "tendermint-light-client-verifier" -version = "0.23.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ae030a759b89cca84860d497d4d4e491615d8a9243cc04c61cd89335ba9b593" -dependencies = [ - "derive_more", - "flex-error", - "serde", - "tendermint", - "time", -] - -[[package]] -name = "tendermint-proto" -version = "0.23.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b71f925d74903f4abbdc4af0110635a307b3cb05b175fdff4a7247c14a4d0874" -dependencies = [ - "bytes", - "flex-error", - "num-derive", - "num-traits", - "prost", - "prost-types", - "serde", - "serde_bytes", - "subtle-encoding", - "time", -] - -[[package]] -name = "thiserror" -version = "1.0.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "854babe52e4df1653706b98fcfc05843010039b406875930a70e4d9644e5c417" -dependencies = [ - "thiserror-impl", -] - -[[package]] -name = "thiserror-impl" -version = "1.0.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa32fd3f627f367fe16f893e2597ae3c05020f8bba2666a4e6ea73d377e5714b" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "thread_local" -version = "1.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5516c27b78311c50bf42c071425c560ac799b11c30b31f87e3081965fe5e0180" -dependencies = [ - "once_cell", -] - -[[package]] -name = "time" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "004cbc98f30fa233c61a38bc77e96a9106e65c88f2d3bef182ae952027e5753d" -dependencies = [ - "libc", - "num_threads", - "time-macros", -] - -[[package]] -name = "time-macros" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25eb0ca3468fc0acc11828786797f6ef9aa1555e4a211a60d64cc8e4d1be47d6" - -[[package]] -name = "tiny-bip39" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffc59cb9dfc85bb312c3a78fd6aa8a8582e310b0fa885d5bb877f6dcc601839d" -dependencies = [ - "anyhow", - "hmac 0.8.1", - "once_cell", - "pbkdf2 0.4.0", - "rand 0.7.3", - "rustc-hash", - "sha2 0.9.9", - "thiserror", - "unicode-normalization", - "wasm-bindgen", - "zeroize", -] - -[[package]] -name = "tiny-keccak" -version = "2.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" -dependencies = [ - "crunchy", -] - -[[package]] -name = "tinyvec" -version = "1.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c1c1d5a42b6245520c249549ec267180beaffcc0615401ac8e31853d4b6d8d2" -dependencies = [ - "tinyvec_macros", -] - -[[package]] -name = "tinyvec_macros" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" - -[[package]] -name = "toml" -version = "0.5.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa" -dependencies = [ - "serde", -] - -[[package]] -name = "tracing" -version = "0.1.34" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d0ecdcb44a79f0fe9844f0c4f33a342cbcbb5117de8001e6ba0dc2351327d09" -dependencies = [ - "cfg-if", - "pin-project-lite", - "tracing-attributes", - "tracing-core", -] - -[[package]] -name = "tracing-attributes" -version = "0.1.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e65ce065b4b5c53e73bb28912318cb8c9e9ad3921f1d669eb0e68b4c8143a2b" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "tracing-core" -version = "0.1.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03cfcb51380632a72d3111cb8d3447a8d908e577d31beeac006f836383d29a23" -dependencies = [ - "lazy_static", - "valuable", -] - -[[package]] -name = "tracing-log" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6923477a48e41c1951f1999ef8bb5a3023eb723ceadafe78ffb65dc366761e3" -dependencies = [ - "lazy_static", - "log", - "tracing-core", -] - -[[package]] -name = "tracing-serde" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc6b213177105856957181934e4920de57730fc69bf42c37ee5bb664d406d9e1" -dependencies = [ - "serde", - "tracing-core", -] - -[[package]] -name = "tracing-subscriber" -version = "0.2.25" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e0d2eaa99c3c2e41547cfa109e910a68ea03823cccad4a0525dcbc9b01e8c71" -dependencies = [ - "ansi_term", - "chrono", - "lazy_static", - "matchers", - "regex", - "serde", - "serde_json", - "sharded-slab", - "smallvec", - "thread_local", - "tracing", - "tracing-core", - "tracing-log", - "tracing-serde", -] - -[[package]] -name = "trie-db" -version = "0.23.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d32d034c0d3db64b43c31de38e945f15b40cd4ca6d2dcfc26d4798ce8de4ab83" -dependencies = [ - "hash-db", - "hashbrown 0.12.0", - "log", - "rustc-hex", - "smallvec", -] - -[[package]] -name = "trie-root" -version = "0.17.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a36c5ca3911ed3c9a5416ee6c679042064b93fc637ded67e25f92e68d783891" -dependencies = [ - "hash-db", -] - -[[package]] -name = "twox-hash" -version = "1.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ee73e6e4924fe940354b8d4d98cad5231175d615cd855b758adc658c0aac6a0" -dependencies = [ - "cfg-if", - "rand 0.8.4", - "static_assertions", -] - -[[package]] -name = "typenum" -version = "1.15.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987" - -[[package]] -name = "uint" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12f03af7ccf01dd611cc450a0d10dbc9b745770d096473e2faf0ca6e2d66d1e0" -dependencies = [ - "byteorder", - "crunchy", - "hex", - "static_assertions", -] - -[[package]] -name = "unicode-normalization" -version = "0.1.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d54590932941a9e9266f0832deed84ebe1bf2e4c9e4a3554d393d18f5e854bf9" -dependencies = [ - "tinyvec", -] - -[[package]] -name = "unicode-xid" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3" - -[[package]] -name = "valuable" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" - -[[package]] -name = "version_check" -version = "0.9.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" - -[[package]] -name = "wasi" -version = "0.9.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" - -[[package]] -name = "wasi" -version = "0.10.2+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" - -[[package]] -name = "wasm-bindgen" -version = "0.2.79" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25f1af7423d8588a3d840681122e72e6a24ddbcb3f0ec385cac0d12d24256c06" -dependencies = [ - "cfg-if", - "wasm-bindgen-macro", -] - -[[package]] -name = "wasm-bindgen-backend" -version = "0.2.79" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b21c0df030f5a177f3cba22e9bc4322695ec43e7257d865302900290bcdedca" -dependencies = [ - "bumpalo", - "lazy_static", - "log", - "proc-macro2", - "quote", - "syn", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-macro" -version = "0.2.79" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f4203d69e40a52ee523b2529a773d5ffc1dc0071801c87b3d270b471b80ed01" -dependencies = [ - "quote", - "wasm-bindgen-macro-support", -] - -[[package]] -name = "wasm-bindgen-macro-support" -version = "0.2.79" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa8a30d46208db204854cadbb5d4baf5fcf8071ba5bf48190c3e59937962ebc" -dependencies = [ - "proc-macro2", - "quote", - "syn", - "wasm-bindgen-backend", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-shared" -version = "0.2.79" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d958d035c4438e28c70e4321a2911302f10135ce78a9c7834c0cab4123d06a2" - -[[package]] -name = "wasmi" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca00c5147c319a8ec91ec1a0edbec31e566ce2c9cc93b3f9bb86a9efd0eb795d" -dependencies = [ - "downcast-rs", - "libc", - "memory_units", - "num-rational", - "num-traits", - "parity-wasm", - "wasmi-validation", -] - -[[package]] -name = "wasmi-validation" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "165343ecd6c018fc09ebcae280752702c9a2ef3e6f8d02f1cfcbdb53ef6d7937" -dependencies = [ - "parity-wasm", -] - -[[package]] -name = "winapi" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" -dependencies = [ - "winapi-i686-pc-windows-gnu", - "winapi-x86_64-pc-windows-gnu", -] - -[[package]] -name = "winapi-i686-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" - -[[package]] -name = "winapi-x86_64-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" - -[[package]] -name = "wyz" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85e60b0d1b5f99db2556934e21937020776a5d31520bf169e851ac44e6420214" - -[[package]] -name = "zeroize" -version = "1.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c88870063c39ee00ec285a2f8d6a966e5b6fb2becc4e8dac77ed0d370ed6006" -dependencies = [ - "zeroize_derive", -] - -[[package]] -name = "zeroize_derive" -version = "1.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81e8f13fef10b63c06356d65d416b070798ddabcadc10d3ece0c5be9b3c7eddb" -dependencies = [ - "proc-macro2", - "quote", - "syn", - "synstructure", -] diff --git a/ci/no-std-check/Cargo.toml b/ci/no-std-check/Cargo.toml deleted file mode 100644 index b1567d863e..0000000000 --- a/ci/no-std-check/Cargo.toml +++ /dev/null @@ -1,37 +0,0 @@ -[package] -name = "no-std-check" -version = "0.1.0" -edition = "2021" -resolver = "2" - -[dependencies] -ibc = { path = "../../modules", default-features = false } -ibc-proto = { path = "../../proto", default-features = false } -tendermint = { version = "0.23.7", default-features = false } -tendermint-proto = { version = "0.23.7", default-features = false } -tendermint-light-client-verifier = { version = "0.23.7", default-features = false } - -sp-core = { version = "5.0.0", default-features = false, optional = true } -sp-io = { version = "5.0.0", default-features = false, optional = true } -sp-runtime = { version = "5.0.0", default-features = false, optional = true } -sp-std = { version = "4.0.0", default-features = false, optional = true } - -[features] -panic-handler = [] -use-substrate = [ - "sp-core", - "sp-io", - "sp-runtime", - "sp-std", -] -substrate-std = [ - "sp-core/std", - "sp-io/std", - "sp-runtime/std", - "sp-std/std", -] - -# [patch.crates-io] -# tendermint = { git = "https://github.com/informalsystems/tendermint-rs", branch = "v0.23.x" } -# tendermint-proto = { git = "https://github.com/informalsystems/tendermint-rs", branch = "v0.23.x" } -# tendermint-light-client-verifier = { git = "https://github.com/informalsystems/tendermint-rs", branch = "v0.23.x" } diff --git a/ci/no-std-check/Makefile b/ci/no-std-check/Makefile deleted file mode 100644 index b41cbf75f4..0000000000 --- a/ci/no-std-check/Makefile +++ /dev/null @@ -1,34 +0,0 @@ -NIGHTLY_VERSION=nightly - -setup: - rustup install $(NIGHTLY_VERSION) - rustup target add wasm32-unknown-unknown --toolchain $(NIGHTLY_VERSION) - -build-substrate: - cargo build \ - --no-default-features \ - --features use-substrate,substrate-std - -check-panic-conflict: - cargo build \ - --no-default-features \ - --features panic-handler - -check-cargo-build-std: - rustup run $(NIGHTLY_VERSION) -- \ - cargo build -Z build-std=core,alloc \ - --no-default-features \ - --target x86_64-unknown-linux-gnu - -check-wasm: - rustup run $(NIGHTLY_VERSION) -- \ - cargo build \ - --features panic-handler \ - --target wasm32-unknown-unknown - -check-substrate: - rustup run $(NIGHTLY_VERSION) -- \ - cargo build \ - --no-default-features \ - --features use-substrate \ - --target wasm32-unknown-unknown diff --git a/ci/no-std-check/README.md b/ci/no-std-check/README.md deleted file mode 100644 index 16468ae76f..0000000000 --- a/ci/no-std-check/README.md +++ /dev/null @@ -1,191 +0,0 @@ -# `no_std` Compliance Check - -This crate checks the `no_std` compliance of the supported crates in ibc-rs. - -## Make Recipes - -- `check-panic-conflict` - Check for `no_std` compliance by installing a panic handler, and any other crate importing `std` will cause a conflict. Runs on default target. - -- `check-cargo-build-std` - Check for `no_std` compliance using Cargo nightly's `build-std` feature. Runs on the target `x86_64-unknown-linux-gnu`. - -- `check-wasm` - Check for WebAssembly and `no_std` compliance by building on the target `wasm32-unknown-unknown` and installing a panic handler. - -- `check-substrate` - Check for Substrate, WebAssembly, and `no_std` compliance by importing Substrate crates and building on `wasm32-unknown-unknown`. Any crate using `std` will cause a conflict on the panic and out-of-memory (OOM) handlers installed by `sp-io`. - -## Checking Single Unsupported Dependency - -By default, the check scripts try to build all unsupported dependencies and will fail. To test if a particular crate still fails the no_std check, edit the `use-unsupported` list in [Cargo.toml](./Cargo.toml) to uncomment all crates except the crate that we are interested to check. For example, to check for only the `getrandom` crate: - -```toml -use-unsupported = [ - # "tonic", - # "socket2", - "getrandom", - # "serde", - # ..., -] -``` - -## Adding New Dependencies - -For a crate named `my-package-1.2.3`, first try and add the crate in [Cargo.toml](./Cargo.toml) of this project as: - -```toml -my-package = { version = "1.2.3" } -``` - -Then comment out the `use-unsupported` list in the `[features]` section of Cargo.toml and replace it with an empty list temporarily for testing: - -```toml -[features] -... -use-unsupported = [] -# use-unsupported = [ -# "tonic", -# "socket2", -# "getrandom", -# ... -# ] -``` - -Then import the package in [src/lib.rs](./src/lib.rs): - -```rust -use my_package -``` - -Note that you must import the package in `lib.rs`, otherwise Cargo will skip linking the crate and fail to check for the panic handler conflicts. - -Then run all of the check scripts and see if any of them fails. If the check script fails, try and disable the default features and run the checks again: - -```rust -my-package = { version = "1.2.3", default-features = false } -``` - -You may also need other tweaks such as enable custom features to make it run on Wasm. -At this point if the checks pass, we have verified the no_std compliance of `my-package`. Restore the original `use-unsupported` list and commit the code. - -Otherwise if it fails, we have found a dependency that does not support `no_std`. Update Cargo.toml to make the crate optional: - -```rust -my-package = { version = "1.2.3", optional = true, default-features = false } -``` - -Now we have to modify [lib.rs](./src/lib.rs) again and only import the crate if it is enabled: - -```rust -#[cfg(feature = "my-package")] -use my_package; -``` - -Retore the original `use-unsupported` list, and add `my-package` to the end of the list: - -```toml -use-unsupported = [ - "tonic", - "socket2", - "getrandom", - ..., - "my-package", -] -``` - -Commit the changes so that we will track if newer version of the crate would support no_std in the future. - -## Conflict Detection Methods - -There are two methods that we use to detect `std` conflict: - -### Panic Handler Conflict - -This follows the outline of the guide by -[Danilo Bargen](https://blog.dbrgn.ch/2019/12/24/testing-for-no-std-compatibility/) -to register a panic handler in the `no-std-check` crate. -Any crate imported `no-std-check` that uses `std` will cause a compile error that -looks like follows: - -``` -$ cargo build - Updating crates.io index - Compiling no-std-check v0.1.0 (/data/development/informal/ibc-rs/no-std-check) -error[E0152]: found duplicate lang item `panic_impl` - --> src/lib.rs:31:1 - | -31 | fn panic(_info: &PanicInfo) -> ! { - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - | - = note: the lang item is first defined in crate `std` (which `prost` depends on) - = note: first definition in `std` loaded from /home/ubuntu/.rustup/toolchains/stable-x86_64-unknown-linux-gnu/lib/rustlib/x86_64-unknown-linux-gnu/lib/libstd-b6b48477bfa8c673.rlib - = note: second definition in the local crate (`no_std_check`) - -error: aborting due to previous error - -For more information about this error, try `rustc --explain E0152`. -error: could not compile `no-std-check` -``` - -- Pros: - - Can be tested using Rust stable. -- Cons: - - Crates must be listed on both `Cargo.toml` and `lib.rs`. - - Crates that are listed in `Cargo.toml` but not imported inside `lib.rs` are not checked. - -### Overrride std crates using Cargo Nightly - -This uses the unstable `build-std` feature provided by -[Cargo Nightly](https://doc.rust-lang.org/nightly/cargo/reference/unstable.html#build-std). -With this we can explicitly pass the std crates we want to support, `core` and `alloc`, -via command line, and exclude the `std` crate. - -If any of the dependency uses `std`, they will fail to compile at all, albeit with -confusing error messages: - -``` -$ rustup run nightly -- cargo build -j1 -Z build-std=core,alloc --target x86_64-unknown-linux-gnu - ... - Compiling bytes v1.0.1 -error[E0773]: attempted to define built-in macro more than once - --> /home/ubuntu/.rustup/toolchains/nightly-x86_64-unknown-linux-gnu/lib/rustlib/src/rust/library/core/src/macros/mod.rs:1201:5 - | -1201 | / macro_rules! cfg { -1202 | | ($($cfg:tt)*) => { -1203 | | /* compiler built-in */ -1204 | | }; -1205 | | } - | |_____^ - | -note: previously defined here - --> /home/ubuntu/.rustup/toolchains/nightly-x86_64-unknown-linux-gnu/lib/rustlib/src/rust/library/core/src/macros/mod.rs:1201:5 - | -1201 | / macro_rules! cfg { -1202 | | ($($cfg:tt)*) => { -1203 | | /* compiler built-in */ -1204 | | }; -1205 | | } - | |_____^ - -error: duplicate lang item in crate `core` (which `std` depends on): `bool`. - | - = note: the lang item is first defined in crate `core` (which `bytes` depends on) - = note: first definition in `core` loaded from /data/development/informal/ibc-rs/no-std-check/target/x86_64-unknown-linux-gnu/debug/deps/libcore-c00d94870d25cd7e.rmeta - = note: second definition in `core` loaded from /home/ubuntu/.rustup/toolchains/nightly-x86_64-unknown-linux-gnu/lib/rustlib/x86_64-unknown-linux-gnu/lib/libcore-9924c22ae1efcf66.rlib - -error: duplicate lang item in crate `core` (which `std` depends on): `char`. - | - = note: the lang item is first defined in crate `core` (which `bytes` depends on) - = note: first definition in `core` loaded from /data/development/informal/ibc-rs/no-std-check/target/x86_64-unknown-linux-gnu/debug/deps/libcore-c00d94870d25cd7e.rmeta - = note: second definition in `core` loaded from /home/ubuntu/.rustup/toolchains/nightly-x86_64-unknown-linux-gnu/lib/rustlib/x86_64-unknown-linux-gnu/lib/libcore-9924c22ae1efcf66.rlib -... -``` - -The above error are shown when building the `bytes` crate. This is caused by `bytes` using imports from `std`, -which causes `std` to be included and produce conflicts with the `core` crate that is explicitly built by Cargo. -This produces very long error messages, so we may want to use tools like `less` to scroll through the errors. - -Pros: - - Directly identify use of `std` in dependencies. - - Error is raised on the first dependency that imports `std`. - -Cons: - - Nightly-only feature that is subject to change. - - Confusing and long error messages. diff --git a/ci/no-std-check/src/lib.rs b/ci/no-std-check/src/lib.rs deleted file mode 100644 index c45dffb897..0000000000 --- a/ci/no-std-check/src/lib.rs +++ /dev/null @@ -1,52 +0,0 @@ -// ensure_no_std/src/main.rs -#![no_std] -#![allow(unused_imports)] - -extern crate alloc; - -// Import the crates that we want to check if they are fully no-std compliance - -use ibc; -use ibc_proto; -use tendermint; -use tendermint_proto; -use tendermint_light_client_verifier; - -#[cfg(feature = "sp-core")] -use sp_core; - -#[cfg(feature = "sp-io")] -use sp_io; - -#[cfg(feature = "sp-runtime")] -use sp_runtime; - -#[cfg(feature = "sp-std")] -use sp_std; - -use core::panic::PanicInfo; - -/* - -This function definition checks for the compliance of no-std in -dependencies by causing a compile error if this crate is -linked with `std`. When that happens, you should see error messages -such as follows: - -``` -error[E0152]: found duplicate lang item `panic_impl` - --> no-std-check/src/lib.rs - | -12 | fn panic(_info: &PanicInfo) -> ! { - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - | - = note: the lang item is first defined in crate `std` (which `offending-crate` depends on) -``` - - */ -#[cfg(feature="panic-handler")] -#[panic_handler] -#[no_mangle] -fn panic(_info: &PanicInfo) -> ! { - loop {} -} diff --git a/ci/relayer.Dockerfile b/ci/relayer.Dockerfile deleted file mode 100644 index 265446581b..0000000000 --- a/ci/relayer.Dockerfile +++ /dev/null @@ -1,40 +0,0 @@ -##################################################### -#### Relayer image #### -##################################################### -FROM --platform=linux/amd64 ubuntu:21.04 -LABEL maintainer="hello@informal.systems" - -ARG RELEASE - -# Add Python 3 and Ping and Telnet (for testing) -RUN apt-get update -y && apt-get install python3 python3-toml iputils-ping telnet -y - -# Copy relayer executable -COPY ./hermes /usr/bin/hermes - -# Relayer folder -WORKDIR /relayer - -# Copy configuration file -COPY ci/simple_config.toml . - -# Copy setup script -COPY ci/e2e.sh . - -# Copy end-to-end testing script -COPY e2e ./e2e - -# Copy key files -COPY ci/chains/gaia/$RELEASE/ibc-0/user_seed.json ./user_seed_ibc-0.json -RUN cat ./user_seed_ibc-0.json -COPY ci/chains/gaia/$RELEASE/ibc-1/user_seed.json ./user_seed_ibc-1.json -RUN cat ./user_seed_ibc-1.json -COPY ci/chains/gaia/$RELEASE/ibc-0/user2_seed.json ./user2_seed_ibc-0.json -RUN cat ./user2_seed_ibc-0.json -COPY ci/chains/gaia/$RELEASE/ibc-1/user2_seed.json ./user2_seed_ibc-1.json -RUN cat ./user2_seed_ibc-1.json - -# Make it executable -RUN chmod +x e2e.sh - -ENTRYPOINT ["/bin/sh"] diff --git a/ci/run-gaiad.sh b/ci/run-gaiad.sh deleted file mode 100755 index 78ac334b64..0000000000 --- a/ci/run-gaiad.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/sh - -gaiad start --home=/chain/gaia --grpc.address=$CHAIN_ID:9090 --pruning=nothing --log_level error 2>&1 | tee /chain/gaia/gaiad.log \ No newline at end of file diff --git a/ci/simd.Dockerfile b/ci/simd.Dockerfile deleted file mode 100644 index 2a235cfc5a..0000000000 --- a/ci/simd.Dockerfile +++ /dev/null @@ -1,14 +0,0 @@ -FROM alpine -LABEL maintainer="hello@informal.systems" - -EXPOSE 26656 26657 26660 1317 - -ENTRYPOINT ["/usr/bin/simd"] - -CMD ["--home", "/root/.simapp", "start"] - -VOLUME [ "/root" ] - -#Commit ID: c2d40e1099d2c00c02f68bc156c57603640e3590 -COPY cosmos-sdk/build/simd /usr/bin/simd -COPY simapp/ /root/.simapp diff --git a/ci/simple_config.toml b/ci/simple_config.toml deleted file mode 100644 index 0eea92af7a..0000000000 --- a/ci/simple_config.toml +++ /dev/null @@ -1,60 +0,0 @@ -[global] -log_level = 'trace' - -[mode] - -[mode.clients] -enabled = true -refresh = true -misbehaviour = true - -[mode.connections] -enabled = false - -[mode.channels] -enabled = false - -[mode.packets] -enabled = true -clear_interval = 100 -clear_on_start = true -tx_confirmation = true - -[telemetry] -enabled = false -host = '127.0.0.1' -port = 3001 - -[[chains]] -id = 'ibc-0' -rpc_addr = 'http://ibc-0:26657' -grpc_addr = 'http://ibc-0:9090' -websocket_addr = 'ws://ibc-0:26657/websocket' -rpc_timeout = '10s' -account_prefix = 'cosmos' -key_name = 'testkey' -store_prefix = 'ibc' -max_gas = 3000000 -max_msg_num = 30 -max_tx_size = 2097152 -gas_price = { price = 0.001, denom = 'stake' } -clock_drift = '5s' -trusting_period = '14days' -trust_threshold = { numerator = '1', denominator = '3' } - -[[chains]] -id = 'ibc-1' -rpc_addr = 'http://ibc-1:26657' -grpc_addr = 'http://ibc-1:9090' -websocket_addr = 'ws://ibc-1:26657/websocket' -rpc_timeout = '10s' -account_prefix = 'cosmos' -key_name = 'testkey' -store_prefix = 'ibc' -max_gas = 3000000 -max_msg_num = 30 -max_tx_size = 2097152 -gas_price = { price = 0.001, denom = 'stake' } -clock_drift = '5s' -trusting_period = '14days' -trust_threshold = { numerator = '1', denominator = '3' } diff --git a/docs/spec/README.md b/docs/spec/README.md deleted file mode 100644 index 6adf6bb24d..0000000000 --- a/docs/spec/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# Specification and verification of IBC protocols - - * [tla](./tla) comprises TLA+ specification for the IBC specification. - - * [connection-handshake](./connection-handshake) [Deprecated] contains English and TLA+ specifications for the IBC Connection Handshake Protocol (ICS 003). - - * [relayer](./relayer) contains English specification of the relayer. diff --git a/docs/spec/connection-handshake/CHANGELOG.md b/docs/spec/connection-handshake/CHANGELOG.md deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/docs/spec/connection-handshake/L1_2.md b/docs/spec/connection-handshake/L1_2.md deleted file mode 100644 index 62bd5bcb75..0000000000 --- a/docs/spec/connection-handshake/L1_2.md +++ /dev/null @@ -1,628 +0,0 @@ -# IBC Connection Handshake (ICS3) English Spec - -## L1. Problem statement / outside view - -Suppose there are two chains (e.g., distributed ledgers) that execute independently of each other. -What should be an abstraction for representing a path of communication between the two? -We use a __connection__ abstraction to represent this path of communication (see the [ICS 003 spec](#references)). - -The IBC connection handshake protocol, at the highest level of abstraction, enables two chains to establish -- i.e., __open__ -- a connection with each other. -Once open, connections cannot be closed, so a closing handshake does not exist. -Whenever we speak of connection handshake, we implicitly mean the __opening__ handshake. -Throughout the rest of this document, we will often use the abbreviation "ICS3" to stand for __connection handshake__ problem (or protocol, depending on the context). - -A connection involves the two __parties__ (the respective chains) involved in the communication, as well as a __relayer__ which handles message transmission between the chains. -The relayer carries a central responsibility in handshaking and maintaining a connection. -That is, a connection handshake is essentially a three-party protocol: two chains, plus a relayer. -This document focuses on the functionality of the two chains, while providing a minimal description of the relayer. - -#### Sequential problem statement - -###### Definitions -A chain comprises three elements that are relevant for connections: - -- a __module__, or the "connection handshake module" or "ICS3 module": this is a process (running as a component of the chain) which implements the ICS3 protocol; -- the __store__ of the chain, alternatively called the "consensus state" or just "state" of the chain; -- a __client__: a process running on the chain, which represents a window onto the store of the other chain. - -We give concrete names, Alice and Bob, to the chains involved in our problem, to simplify description. -The diagram below sketches the system model of the connection handshake problem. -Note that the _client_ on Alice points to the _store_ on Bob; vice versa is true for the _client_ on Bob (but there is no corresponding arrow to keep the figure cleaner). -Alice and Bob have no direct medium for communicating with each other, so a relayer process sits between the two chains and enables their communication (more details on this will follow). - - - -``` - IBC Connection Handshake - High-level Model - Alice Bob - +-----------+ +-----------+ - | Chain | +---------+ | Chain | - | |<--->| Relayer |<--->| | - | +-------+ | +---------+ | +-------+ | - | |Module | | | |Module | | - | +-------+ | | +-------+ | - | +-------+ | | +-------+ | - | | Store | | -->| Store | | - | +-------+ | -------/ | +-------+ | - |+---------+| --------/ |+---------+| - || Client |---/ || Client || - |+---------+| |+---------+| - +-----------+ +-----------+ -``` - -###### Problem statement and guarantees -A connection handshake allows, briefly, that the modules on two chains agree on a consistent view of their state (i.e., their store), and each module allocates a new connection associated with this state. - -We identify two basic (high-level) requirements that a connection handshake should guarantee: - -- **[ICS3-Seq-1-Agreement]** If Alice finishes a handshake by opening a new connection `c1` with Bob, and Bob finishes the handshake opening the connection `c2`, then `c1 == c2`. -In other words, if a connection handshake finishes, the two parties have a consistent view of this connection. We say that Alice stores one end of the connection, label `c1`, and Bob stores the other end of the same connection, label `c2`. - -- **[ICS3-Seq-2-Termination]** Eventually, the modules of Alice and Bob should both finish the handshake, opening a connection. -Opening a connection means that Alice and Bob both allocate in their local store a new connection. - -Requirement **[ICS3-Seq-1-Agreement]** represents the basic safety guarantee, while **[ICS3-Seq-2-Termination]** is the basic liveness guarantee we would like to have. - - -## L2. Protocol specification / protocol view - -### 2.1 System model specification - -#### System model - -A **chain** is a deterministic state machine. -Each chain may be replicated, i.e., consisting of a replicated state machine (RSM), or it may be a standalone process. -As a side note, there are certain requirements a chain must satisfy in practice(e.g., see [[ICS 024](#references)]); at this level of specification, however, we model each chain as consisting of those three components we described earlier: module, store, and a client. - -In ICS3, __actors__ represent entities that may trigger this protocol and provide external feedback to the protocol. -To quote from IBC terminology: - -> An actor, or a user (used interchangeably), is an entity interacting with the IBC protocol. An actor can be a human end-user, a module or smart contract running on a blockchain, or an off-chain relayer process capable of signing transactions. [[IBC terminology](#references)] - -###### Communication assumptions - -A relayer intermediates communication between Alice and Bob. -We model communication as a shared mutable state. -Each chain exposes an API comprising read, write, as well as a queue (FIFO) functionality. -So there are two parts to the communication API: - - 1. a read/write store: - - this holds the entire state of the chain; - - each module can write to this store, and we primarily concerned with the function `setConnection` that write a new connection into the store; - - external processes -- in our case, any relayer -- can read from the store via a function `getConnection`; - - 2. a queue of datagrams, or messages: - - each module can `pop` datagrams stored in this queue; - - external processes (relayers) can `push` to this queue. - -We describe the API in the code snippet below. -Connection handshake modules implement this interface. -Note that we use the modifiers 'private' and 'public' to denote which API function can be invoked by external processes (e.g., relayer) and which functions are private to the module implementing this interface. -The data types (`Identifier` and `ConnectionEnd`) are defined below. - -```golang -type CommunicationLayer interface { - // Write a new value (a connection) to the local store, indexed by the - // identifier of the connection on the local party. - private setConnection(i Identifier, e ConnectionEnd) - - // Relayer invokes this to `read` from a chains' local store. - public getConnection(i Identifier) -> ConnectionEnd - - // Relayer invokes this to append a datagram into a chain's - // local store. - public push("outstandingDatagrams", g Datagram) - - // A module consumes elements from its local store - // "outstandingDatagrams". - private pop("outstandingDatagrams") -> Datagram - // The typical use case is that for each datagram, this module invokes the - // corresponding handler, e.g., ConnTryHandler. -} -``` - -Preconditions: - - the datagram `g` as well as Identifier `i` and Connection `e` (respectively for `push`, `setConnection` and `getConnection`) are non-null; - -Postconditions: - - for `setConnection`: the local store on the module implementing this function stores at position `i` the value `e`; - - for `push`: the `outstandingDatagrams` queue will append the Datagram `g` - -Error conditions: - - `getConnection` may fail (returning `null`) if no `ConnectionEnd` exists for the input parameter `i`; - - before invoking the function `setConnection`, the caller should make sure that the client in the `Identifier` field was created; - - -Properties of the communication layer: - -- **[ICS3-CommAssumption-1-RWIntegrity]** If some connection `e` is returned from `getConnection(i)` then the `setConnection(i, e)` was previously invoked. - -- **[ICS3-CommAssumption-2-QueueIntegrity]** If some module `d` returns a datagram `g` from `Pop("outstandingDatagrams")`, then a process previously invoked `Push("outstandingDatagrams", g)`. - -- **[ICS3-CommAssumption-3-QueueNoLoss]** If some process invokes `Push("outstandingDatagrams", g)`, then eventually a `Pop` operation will return `g`. - - -Refinement remarks (relation to L3): - - The `push` function is a more abstract version of `submitDatagram` ([ICS 018](#references)). - - The `setConnection` function is a more abstract version of the functionality accomplished by `provableStore.set` and `addConnectionToClient` ([ICS 003](#references)). - - The `getConnection` function is the analogy to `getConnection` ([ICS 018](#references)). - - -###### Fault assumptions -The chains Alice and Bob (and their module, local store, and client, respectively) may be subject to arbitrary (Byzantine) faults. -The properties we define below are guaranteed to hold iff both Alice and Bob behave correctly, i.e., do not deviate from any protocol we describe below. -Beside these two parties, the system comprises an arbitrary number of other parties, any of which may be Byzantine; these other chains may, for instance, try to impersonate Alice or Bob or attack them in some way. -The termination property of the ICS3 protocol depends on the existence of at least one correct relayer. -Additionally, there may be an arbitrary number of relayers that can communicate (i.e., [read/push](#communication-assumptions)) to Alice and Bob and are subject to Byzantine faults. - - -###### Additional remarks on the system model -We explicitly overlook here two assumptions of synchrony that may be necessary for achieving liveness of **other** protocols, but they are not necessary within the ICS3 protocol. -These assumptions are: (i) partial synchrony is necessary for achieving liveness of a chain if that chain is implemented using a deterministic consensus protocol (e.g., Tendermint Core); (ii) synchrony is necessary for guaranteeing message delivery in any connection-oriented protocol such as TCP, which is most likely the underlying network transport layer (implementing the `CommunicationLayer` between modules and relayers). - - -#### Properties - -###### Preliminary abstractions -We introduce several abstractions that are relevant at this point: - -1. A `ConnectionParty` is a data type encapsulating the details that identify a party. - -```golang -type ConnectionParty interface { - connectionIdentifier Identifier - clientIdentifier Identifier - prefix Prefix -} -``` - -2. A `ConnectionState` captures the state of a connection, and may have one of the following values. - -```typescript -enum ConnectionState { - UNINIT, - INIT, - TRYOPEN, - OPEN, -} -``` - -3. A `ConnectionEnd` is a data type that captures all the details of a connection at a party. -This includes a _remote_ and a _local_ field, so that the local party is the one storing this object, and the remote party is the other one. -One of the goals of the connection handshake protocol is to allocate an object of this type at each of Alice and Bob. - -```golang -type ConnectionEnd interface { - state ConnectionState - remoteParty ConnectionParty - localParty ConnectionParty - version []String -} -``` - - -4. The `ConnProof` and `ClientProof` are two data types that abstracts over the details of a cryptographic proof that any module can create, and another module can verify. -Intuitively, a proof is helpful for some module to guarantee that it is in a certain state. -We are concerned with connection proofs (`ConnProof` type) and client proofs (`ClientProof`) here. - -```golang -type ConnProof struct { - proof CommitmentProof // The proof for the connection that the remote party claims to have. -} -``` - -```golang -type ClientProof struct { - proof CommitmentProof // The proof for the client that the remote party claims to have. - height uint64 // The height which the client (on remote party) claims having. -} -``` - -We now restate the connection handshake problem in a slightly more precise (or lower level) formulation: - -__A connection handshake enables two ICS3 modules to agree on a consistent view of their chain state, to verify each other's state, and to allocate a new connection.__ - - -###### Guarantees -We refine the safety guarantee **[ICS3-Seq-1-Agreement]** (defined in the [sequential problem statement](#sequential-problem-statement)) by splitting this into three complementary safety properties: - -- **[ICS3-Proto-1-ConnectionUniqueness]** -A module accepts (i.e., initializes on) a `ConnectionEnd` `e` at most once. - -- **[ICS3-Proto-2-ConnectionIntegrity]** -If any two modules open a connection `e`, then either one module or the other or both modules accepted (i.e., initialized with) `e`. - -- **[ICS3-Proto-3-StateConsistency]** -If any two modules open a connection, then the client in the first module is consistent with the state of the second module. - -The liveness property below is equivalent to **[ICS3-Seq-2-Termination]** (the livenes property in the sequential specification above): - -- **[ICS3-Proto-4-Termination]** -If the two parties involved in a connection handshake are correct, then the connection handshake eventually terminates. -Specifically, termination implies that each module allocates in the local store a new `ConnectionEnd` object with `state` field set to `OPEN`. - - -_Remarks_: - -- Uniqueness property essentially provides a safeguard against overwritting a connection in the store with some new set of parameters. - -- The integrity property, in conjunction with uniqueness, ensures that there is continuity between the connections that a module initializes and the connections that this module opens. - -- One of the mechanisms that enforces the connection integrity property consists of cryptographic proofs, specifically the `ConnProof` type, which asserts that a certain chain stores a certain connection in a certain state. - -- The `ClientProof` type guarantees the state consistency property, by asserting that the client on a chain is updated to a consensus state of the remote chain. - -- Note that the consistency property works both ways. -This property applies to _any two modules_ (there is no predefined "first" or "second" module here). - - -### 2.2 Protocol - -The ICS3 protocol comprises four steps, summarized below. - -1. An actor invokes the `ConnInitHandler` handler at the ICS3 module in one of the chains; this sets off the connection handshake protocol. -In our example, we will use Alice as the party to execute `ConnInitHandler`. -Once Alice does so, this handler marks the connection on her end as initialized (i.e., `INIT`). - -2. This comprises two sub-steps: - - 2.a. Upon observing that Alice has executed `ConnInitHandler` (meaning that her state contains a connection that is `INIT`), the relayer constructs a datagram of type `ConnOpenTry` and pushes this at Bob's module; - - 2.b. Bob handles this datagram via the `ConnTryHandler` handler. Once this handler finishes, the connection on his end is marked as `TRYOPEN`. - -3. This comprises two sub-steps: - - 3.a. Upon observing that Bob has finished executing `ConnTryHandler` (i.e., his state contains a `TRYOPEN` connection), the relayer constructs a datagram of type `ConnOpenAck` and pushes this at Alice's module; - - 3.b. Alice handles this datagram by invoking the `ConnAckHandler` handler. Once this handler finishes, Alice considers her `ConnectionEnd` in state `OPEN`. - -4. This comprises two sub-steps: - - 4.a. When the relayer observes that Alice finished handling the `ConnOpenAck` datagram (and consequently the connection is open on her side), the relayer constructs a `ConnOpenConfirm` datagram and pushes it on Bob's side. - - 4.b. Finally, Bob's module processes the `ConnOpenConfirm` datagram through an eponymous handler, which sets the connection state to `OPEN` for him and thereby marks the termination of the ICS3 protocol. - -The diagram below sketches these fours steps of handshake protocol. - -``` - The four steps comprising - IBC Connection Handshake Protocol - Alice Bob - +-----------+ +-----------+ - |ICS3 Module| |ICS3 Module| - | | | | - | step 1 | | - | ConnInitHandler | | - | ►INIT | | | - | |<-2.a--Relayer-----2.a-->| | - | | | | | - | | \-> step 2.b | - | | ConnTryHandler | - | | | ►TRYOPEN | - | <--3.a------Relayer--3.a->| | - | | | | | - | step 3.b <---/ | | - | ConnAckHandler | | - | ►OPEN | | | - | |<-4.a---Relayer----4.a-->| | - | | | | | - | | \-> step 4.b | - | | ConnConfirmHandler | - | | | ►OPEN | - +-----------+ +-----------+ -``` - - -#### Protocol handler signatures - -We first present the signatures of the four protocol handlers; the connection handshake module at each party implements and exposes these handlers. - -```golang -func ConnInitHandler( - local ConnectionParty, - remote ConnectionParty) - -func ConnTryHandler( - local ConnectionParty, - remote ConnectionParty, - remoteVersions []String, - proofsHeight uint64, - remoteConnectionProof ConnProof, - remoteClientProof ClientProof) - -func ConnAckHandler( - local ConnectionParty, - remoteVersion String, - proofsHeight uint64, - remoteConnectionProof ConnProof, - remoteClientProof ClientProof) - -func ConnConfirmHandler( - local ConnectionParty, - proofsHeight uint64, - remoteConnectionProof ConnProof) -``` - - - -#### Main Protocol Handlers - -We first define the four protocol handlers, and then describe the datagrams. -Finally, we discuss some helper functions. - -###### ConnInitHandler - -This is step 1. - -```golang -func ConnInitHandler(local ConnectionParty, remote ConnectionParty) -{ - // Create local end of the connection in the state 'INIT'. - connectionEnd = newConnectionEnd('INIT', local, remote, getCompatibleVersions()) - - // Uniqueness check. - // The Init handler for a given connection may run only once. - // Abort, unless this is the first time initializing this connection. - abortTransactionUnless(getConnection(local.connectionIdentifier) == nil) - - // Now save this connection end in the local store. - // This connectionEnd is in state INIT, the relayer will continue with - // next steps of the connection handshake protocol. - setConnection(local.connectionIdentifier, connectionEnd) -} -``` - -Preconditions: - - The parameters `local` and `remote` should be valid, i.e., a syntactically correct (see [validation](#Validation)). - - This handler must not have executed previously with the same `local` input parameter (in particular, the same `local.connectionIdentifier` field). - -Postconditions: - - The module executing this handler stores a `ConnectionEnd` (matching with the input arguments `local` and `remote`) in state `INIT`. - - Uniqueness is satisfied because this handler aborts if a key with the identifier `local.connectionIdentifier` already exists in the store (at-most-once semantics). - - To ensure termination of the ICS3 protocol, this handler must execute either on one of the chains or both (at-least-once semantics). - - Handler produces no output; may abort if preconditions not met. - - -###### ConnTryHandler - -This is step 2.b. - -```golang -func ConnTryHandler( - local ConnectionParty, - remote ConnectionParty, - remoteVersions []String, - proofsHeight uint64, - remoteConnectionProof ConnProof, - remoteClientProof ClientProof) -{ - // Create local end of the connection. - connectionEnd = newConnectionEnd('TRYOPEN', local, remote, pickVersion(remoteVersions)) - - // Uniqueness check. - // Verify that if there is a connection then the Init handler previously executed - // with these same parameters). - current = getConnection(local.connectionIdentifier) - abortTransactionUnless( - current == nil || - current.state == 'INIT' && matchingEnds(connectionEnd, current)) - - // Verify proofs. - expectedRemoteConnection = newConnectionEnd('INIT', remote, local, remoteVersions) - abortTransactionUnless(verifyProofs( - expectedRemoteConnection, proofsHeight, remoteConnectionProof, remoteClientProof) - - // Handler done, store the updated connectionEnd. - setConnection(local.connectionIdentifier, connectionEnd) -} -``` - -Preconditions: - - The input parameters should be valid; among others validation criterias, it is important that the module running this handler supports at least one of the versions supplied in the input list `remoteVersions` (see [validation](#Validation)). - - The two proofs `remoteConnectionProof` and `remoteClientProof` should be correct. This is necessary in connection to properties [ICS3-Proto-2-ConnectionIntegrity] and [ICS3-Proto-3-StateConsistency]. Correctness of proofs means that they pass verification (`verifyProof` function). - -Postconditions: - - The module executing this handler stores a `ConnectionEnd` (matching with the arguments `local` and `remote`) in state `TRYOPEN`. - - No output; may abort if preconditions not met. - - -###### ConnAckHandler - -Step 3.b. - -```golang -func ConnAckHandler( - local ConnectionParty, - remoteVersion String, - proofsHeight uint64, - remoteConnectionProof ConnProof, - remoteClientProof ClientProof) -{ - // Search the details of this connection in local store. - connectionEnd = getConnection(local.connectionIdentifier) - - // This verification helps guarantee uniqueness and integrity. - abortTransactionUnless(connectionEnd != nil && connectionEnd.local == local) - - // Should not overwrite a connection that is already `OPEN`. - abortTransactionUnless(connectionEnd.state == INIT || connectionEnd.state == TRYOPEN) - - // Verify proofs. - expectedRemoteConnection = newConnectionEnd('TRYOPEN', remote, local, remoteVersion) - abortTransactionUnless(verifyProofs( - expectedRemoteConnection, proofsHeight, remoteConnectionProof, remoteClientProof) - - // Termination (partial -- only at this end) - connectionEnd.state = OPEN - connectionEnd.version = remoteVersion - - // Update the connectionEnd in local state. - setConnection(local.connectionIdentifier, connectionEnd) -} -``` - -Preconditions: - - The module executing this handler already has a `ConnectionEnd` stored locally, matching the `local` argument, and in state `INIT` or `TRYOPEN`. - - The input parameters are valid (see [validation](#Validation)). - - The two proofs `remoteConnectionProof` and `remoteClientProof` are correct; for guaranteeing properties [ICS3-Proto-2-ConnectionIntegrity] and [ICS3-Proto-3-StateConsistency]. - -Postconditions: - - The module executing this handler stores a `ConnectionEnd` (matching with the arguments `local`) in state `OPEN`. - - No output; may abort if preconditions not met. - - -###### ConnConfirmHandler - -Step 4.b. - -```golang -func ConnConfirmHandler( - local ConnectionParty, - proofsHeight uint64, - remoteConnectionProof ConnProof) -{ - // Search the details of this connection in the local store. - connectionEnd = getConnection(local.connectionIdentifier) - - // Helps guarantee integrity and uniqueness. - abortTransactionUnless(connectionEnd != nil && connectionEnd.local == local) - - // Integrity check: the handler should not overwrite a connection that is - // already OPEN. And should not be working with a connection that is - // INIT. The only acceptable state is TRYOPEN. - abortTransactionUnless(connectionEnd.state == TRYOPEN) - - // Connection proof verification. - expectedRemoteConnection = newConnectionEnd('OPEN', remote, local, connectionEnd.version) - abortTransactionUnless(verifyProofs( - expectedRemoteConnection, proofsHeight, remoteConnectionProof, nil) - - // Termination. - connectionEnd.state = OPEN - setConnection(local.connectionIdentifier, connectionEnd) -} -``` - -Preconditions: - - The module executing this handler has a `ConnectionEnd` stored locally, (matching the input argument `local`) and in state `TRYOPEN`. - - The input parameters are valid (see [validation](#Validation)). - - The input proof `remoteConnectionProof` is correct; for guaranteeing property [ICS3-Proto-2-ConnectionIntegrity]. - -Postconditions: - - The module executing this handler stores a `ConnectionEnd` (matching with the input argument `local`) in state `OPEN`. - - No output; may abort if preconditions not met. - - -##### Datagrams - -A correct relayer can push the following datagrams at a chain. -Upon popping a datagram of a certain type, e.g., `ConnOpenConfirm`, a correct chain handles the datagram by first validating the datagram (syntactic validation, e.g., well-formed parameters, see [validation](#validation)) and then invoking the corresponding handler. - -```golang -type ConnOpenInit struct { - local ConnectionParty - remote ConnectionParty -} - -type ConnOpenTry struct { - local ConnectionParty - remote ConnectionParty - remoteVersions []String - proofsHeight uint64 - remoteConnectionProof ConnProof - remoteClientProof ClientProof -} - -type ConnOpenAck struct { - local ConnectionParty - remoteVersion String - proofsHeight uint64 - remoteConnectionProof ConnProof - remoteClientProof ClientProof -} - -type ConnOpenConfirm struct { - local ConnectionParty - proofsHeight uint64 - remoteConnectionProof ConnProof -} -``` - -Notice that the fields in each datagram match with the corresponding protocol handler signature. - -#### Helper functions & application-level predicates - -###### `matchingEnds(endLocal, endRemote)` - -Checks that the connection ends in the local and remote arguments match. -(NB: This does not check the connection state, only the connection parties). - -###### Validation -When we talk of validation we usually mean a verification that is done before a handler executes, checking that the input parameters of that handler are syntactically correct, e.g., valid `local` connection party, non-null proofs, non-zero proofs height. -With respect to the `local` parameter, similar in functionality with `validateConnectionIdentifier` from [ICS 003](#references), we also validate the `local.connectionIdentifier` field. -Additionally, if a `version` or `remoteVersion` input parameter is present, this should also be validated prior to invoking a handler with that version. - -###### `pickVersion(counterpartyVersions)` -This is identical with the one in [ICS 003](#references). - -###### `getCompatibleVersions()` -This is identical with the one in [ICS 003](#references). - -###### Proof Verification - -This function handles everything related to proof verification. -It requires four arguments: the connection end as it expected to be on the remote party, the height where the proofs were taken, a connection proof, and a client proof; the last argument may be nil. -This function invokes a lower-level method `client.verifyProof`, which executes from a client context and handles the cryptographic verification of a given proof; this method is analogous to `verifyMembership` of the [ICS 023](#references), except it must additionally get the commitment root from the consensuss state and perform serialization on the expected data. -The pseudocode for the `verifyProofs` is provided below. - -```golang -func verifyProofs( - expectedConnection ConnectionEnd, - proofsHeight, - remoteConnectionProof ConnProof, - remoteClientProof ClientProof) -{ - // Proofs verification. - // Generic proof check: Local client should exists and be updated with the - // height which the two proofs target (proofsHeight). - client = clientState(expectedConnection.remote.clientIdentifier) - abortTransactionUnless(client != nil) - clientConsensusState = clientConsensusState(expectedConnection.remote.clientIdentifier), proofsHeight) - abortTransactionUnless(clientConsensusState != nil) - - // Connection proof specific verification. - abortTransactionUnless(client.verifyProof( - clientConsensusState, remoteConnectionProof, - connectionPath(local.connectionIdentifier), expectedConnection)) - - if remoteClientProof == nil { - return // Client proof is missing, so skip any further verification. - } - - // Client proof verification ensures state consistency: the remote - // client should be consistent with the state of local party. - expectedRemoteClientConsensus = localConsensusState(remoteClientProof.height) - abortTransactionUnless(remoteClientProof.height <= getCurrentHeight()) - abortTransactionUnless(client.verifyProof( - clientConsensusState, remoteClientProof, - clientPath(local.clientIdentifier, remoteClientProof.height), - expectedRemoteClientConsensus)) -} -``` - -###### `localConsensusState` and `clientConsensusState` - -These are analogous to `getConsensusState` and `queryClient` functions. -The `clientConsensusState`, in particular, has an implicit requirement that it may not return the consensus state of a client if the height is older than a certain threshold; the `proofsHeight` should not be outdated by more than `100` heights, specifically. (This means that proofs must not be too old.) - - -### Open questions: - -- Standard cryptographic assumptions (collision-resistant hash functions, public-key signatures). We should mention these once we have more details about the implementation. - -- How to capture aborts or incorrect termination? See [issue raised by Anca](https://github.com/informalsystems/ibc-rs/pull/42#discussion_r397077901). Is this necessary? To inform this discussion, more implementation details are needed. - -- Verification of the unbonding period in `ConnTryHandler`. See [ICS/#403](https://github.com/cosmos/ibc/issues/403). - -- Missing link to L3: what is the mechanism that implements the `pop` functionality at the implementation/L3 level (hint: it's in the SDK, the layer sitting between the consensus module and IBC Handler). - -## References - -- [ICS 003] Interchain Standards [ICS 003 Connection Semantics](https://github.com/cosmos/ibc/tree/master/spec/core/ics-003-connection-semantics). -- [ICS 024] Interchain Standards [ICS 024 Host Requirements](https://github.com/cosmos/ibc/tree/master/spec/core/ics-024-host-requirements). -- [ICS 018] Interchain Standards [ICS 024 Host Requirements](https://github.com/cosmos/ibc/tree/master/spec/relayer/ics-018-relayer-algorithms). diff --git a/docs/spec/connection-handshake/L2-tla/Environment.tla b/docs/spec/connection-handshake/L2-tla/Environment.tla deleted file mode 100644 index ed05bea075..0000000000 --- a/docs/spec/connection-handshake/L2-tla/Environment.tla +++ /dev/null @@ -1,406 +0,0 @@ ----------------------------- MODULE Environment ---------------------------- - -(*************************************************************************** - - This module is part of the TLA+ specification for the IBC Connection - Handshake protocol (identifier 'ICS3'). This is a high-level spec of ICS3. - - This module captures the operators and actions outside of the ICS3 protocol - itself (i.e., the environment). - Among others, the environment does the following: - - creates two instances of ICS3Module; - - wires these instances together; - - provides the initialization step for ICS3 protocol, concretely a - "ICS3MsgInit" message, so that the two instances can perform the protocol; - - some relayer functionality: passes any outgoing message from a chain - into the ingoing buffer of the other (destination) chain and correspondingly - updates the client of the destination chain; - - also, advances the chain of each instance non-deterministically; - - if `Concurrency` is TRUE, then this module can take non-deterministic - steps, by updating the client on a chain. - - ***************************************************************************) - -EXTENDS Naturals, FiniteSets, Sequences, ICS3Utils - - -CONSTANT MaxHeight, \* Maximum height of any chain in the system. - MaxBufLen, \* Length (size) of message buffers. - Concurrency, \* Flag for enabling concurrent relayers. - MaxVersionNr, \* Maximum version number. - VersionPickMode \* The mode for picking versions. - - -ASSUME MaxHeight > 4 -ASSUME MaxBufLen >= 1 -ASSUME VersionPickMode \in {"overwrite", "onTryDet", "onTryNonDet", "onAckDet", "onAckNonDet"} - -(* -VersionPickMode: - * "overwrite" -- the version is picked deterministically when handling - ICS3MsgTry from the intersection of versions sent in the - message and locally supported versions. The picked version - is sent to the counterparty chain in ICS3MsgAck, which overwrites its - own version with the one from the message - * "onTryDet" -- the version is picked deterministically when handling - ICS3MsgTry from the intersection of versions sent in the - message and locally supported versions. The picked version - is sent to the counterparty chain in ICS3MsgAck, which accepts it - * "onTryNonDet" -- same as "onTryDet", except the version is picked - non-deterministically - * "onAckDet" -- the version is picked deterministically when handling - ICS3MsgAck from the intersection of versions sent in the - message and locally supported versions. The picked version - is sent to the counterparty chain in ICS3MsgConfirm, which accepts it - * "onAckNonDet" -- same as "onAckDet", except the version is picked - non-deterministically -*) - -VARIABLES - inBufChainA, \* A buffer (sequence) for messages inbound to chain A. - inBufChainB, \* A buffer for messages inbound to chain B. - outBufChainA, \* A buffer for messages outgoing from chain A. - outBufChainB, \* A buffer for messages outgoing from chain B. - storeChainA, \* The local store of chain A. - storeChainB \* The local store of chain B. - -(************* ChainAConnectionEnds & ChainBConnectionEnds ***************** - - The set of records that each chain can use as a valid local connection - end. For each chain, this set contains one record, since we are - modeling a single connection in this specification. - - ***************************************************************************) - -AllChainIDs == - { "chainA", "chainB" } - -AllVersionSeqs == - {<<>>} \union - {<> : a \in 1..MaxVersionNr} \union - {<> \in (1..MaxVersionNr) \X (1..MaxVersionNr) : a /= b} - -ChainAConnectionEnds == - [ - connectionID : { "connAtoB" }, - clientID : { "clientOnAToB" } - ] -ChainBConnectionEnds == - [ - connectionID : { "connBtoA" }, - clientID : { "clientOnBToA" } - ] - -AllConnectionEnds == - ChainAConnectionEnds \union ChainBConnectionEnds - -AllClientIDs == - { x.clientID : x \in AllConnectionEnds } - -AllConnectionIDs == - { x.connectionID : x \in AllConnectionEnds } - -ChainAClientIDs == - { x.clientID : x \in ChainAConnectionEnds } - -ChainBClientIDs == - { x.clientID : x \in ChainBConnectionEnds } - -ChainAConnectionIDs == - { x.connectionID : x \in ChainAConnectionEnds } - -ChainBConnectionIDs == - { x.connectionID : x \in ChainBConnectionEnds } - -(* Bundle with variables that chain A has access to. *) -chainAVars == <> (* The local chain store. *) - -(* Bundle with variables that chain B has access to. *) -chainBVars == <> (* Local chain store. *) - -(* All variables specific to both chains. *) -chainStoreVars == <> - -allVars == <> - - -(* This is a separate module comprising common type definitions. *) -INSTANCE ICS3Types - -chmA == INSTANCE ICS3Module - WITH MaxChainHeight <- MaxHeight, - inBuf <- inBufChainA, - outBuf <- outBufChainA, - store <- storeChainA, - ConnectionIDs <- ChainAConnectionIDs, - ClientIDs <- ChainAClientIDs, - ChainID <- "chainA" - - -chmB == INSTANCE ICS3Module - WITH MaxChainHeight <- MaxHeight, - inBuf <- inBufChainB, - outBuf <- outBufChainB, - store <- storeChainB, - ConnectionIDs <- ChainBConnectionIDs, - ClientIDs <- ChainBClientIDs, - ChainID <- "chainB" - - -(*************************************************************************** - Environment actions. - ***************************************************************************) - - -(* Environment initialization. - - This action kick-starts the ICS3 protocol by assigning an ICS3MsgInit - msg to either of the two chains (or both). - - *) -InitEnv == - /\ \/ /\ inBufChainA \in {<> : (* ICS3MsgInit to chain A. *) - msg \in InitMsgs(ChainAConnectionEnds, ChainBConnectionEnds)} - /\ inBufChainB = <<>> - \/ /\ inBufChainB \in {<> : (* ICS3MsgInit to chain B. *) - msg \in InitMsgs(ChainBConnectionEnds, ChainAConnectionEnds)} - /\ inBufChainA = <<>> - \/ /\ inBufChainA \in {<> : (* ICS3MsgInit to both chains. *) - msg \in InitMsgs(ChainAConnectionEnds, ChainBConnectionEnds)} - /\ inBufChainB \in {<> : - msg \in InitMsgs(ChainBConnectionEnds, ChainAConnectionEnds)} - /\ outBufChainA = <<>> (* Output buffers should be empty initially. *) - /\ outBufChainB = <<>> - - -(* Message relaying functionality of the environment. - - This is part of the RelayNextEnv sub-action of the environment. - This performs a basic relaying step, that is, passing a message from the - output buffer of one of the chains (paramter 'from') into the input buffer - of another chain (parameter 'to'). - - *) -RelayMessage(from, to) == - /\ from # <<>> - /\ Len(to) < MaxBufLen - 1 - /\ to' = Append(to, Head(from)) - /\ from' = Tail(from) - - -(* Default next step for environment. - - This step may change (non-deterministically) either of the store of chain A - or B, by advancing the height of that chain. This can only enable if the - respective chain has ample steps left, i.e., the chain height is not within 4 steps - of the maximum height. This precondition disallow continuos advancing of chain heights, - and therefore allows chains to take meaningful steps (executing the ICS3 protocol to - completion). - - *) -DefaultNextEnv == - \/ /\ MaxHeight - storeChainA.latestHeight > 4 - /\ chmA!AdvanceChainHeight - /\ UNCHANGED<> - \/ /\ MaxHeight - storeChainB.latestHeight > 4 - /\ chmB!AdvanceChainHeight - /\ UNCHANGED<> - - -(* A concurrent UpdateClient step for the environment. - - This updates the client on one of the chains with the latest height of the other chain. - This step helps to simulate the conditions of having multiple relayers acting in parallel. - -*) -ConcurrentUpdateClient == - \/ /\ chmB!CanUpdateClient(storeChainA.latestHeight) - /\ chmB!UpdateClient(storeChainA.latestHeight) - /\ UNCHANGED<> - \/ /\ chmA!CanUpdateClient(storeChainB.latestHeight) - /\ chmA!UpdateClient(storeChainB.latestHeight) - /\ UNCHANGED<> - - -(* Relaying step for the environment. - - This step performs a relay: moving a message between the output - buffer of a chain to the input buffer of the other chain, and updating accordingly - the client on the latter chain. - - *) -RelayNextEnv == - (* Relay direction: from chain A to chain B. *) - \/ LET msg == Head(outBufChainA) - targetHeight == IF MessageTypeIncludesConnProof(msg.type) - THEN msg.proofHeight - ELSE storeChainA.latestHeight - IN /\ RelayMessage(outBufChainA, inBufChainB) - /\ \/ chmB!CanUpdateClient(targetHeight) - /\ chmB!UpdateClient(targetHeight) - \/ ~ chmB!CanUpdateClient(targetHeight) - /\ UNCHANGED storeChainB - /\ UNCHANGED<> - (* Relay direction: from chain B to chain A. *) - \/ LET msg == Head(outBufChainB) - targetHeight == IF MessageTypeIncludesConnProof(msg.type) - THEN msg.proofHeight - ELSE storeChainB.latestHeight - IN /\ RelayMessage(outBufChainB, inBufChainA) - /\ \/ chmA!CanUpdateClient(targetHeight) - /\ chmA!UpdateClient(targetHeight) - \/ ~ chmA!CanUpdateClient(targetHeight) - /\ UNCHANGED storeChainA - /\ UNCHANGED<> - - -(* Environment next action. - - There are three possible actions that the environment may perform: - - 1. If `Concurrency` flag is TRUE, then the environment may update the - client on one of the two chains. This effectively models what happens - when more than a relayer triggers the `UpdateClient` action of a chain, - a condition that can lead to liveness (termination) problems in ICS3. - - 2. A 'DefaultNextEnv' step, that simply advances the height of one of - the chains unless the chain has just a few (namely, `4`) heights left. - - 3. The environment may perform a relaying step, that is: - if there is a message in the ougoing buffer of a chain, the relayer - moves this message to the ingoing buffer of the other chain, and also - updates the client on the latter chain. - - *) -NextEnv == - \/ Concurrency /\ ConcurrentUpdateClient - \/ DefaultNextEnv - \/ RelayNextEnv - \/ UNCHANGED allVars - - -(* Enables when the connection is open on both chains. - - State predicate signaling that the protocol terminated correctly. - - *) -ICS3ReachedOpenConnection == - /\ storeChainA.connection.state = "OPEN" - /\ storeChainB.connection.state = "OPEN" - /\ UNCHANGED allVars - - -(* Enables when both chains are stuck, i.e., unable to progress while - their connection is not opened. - - State predicate signaling that the protocol terminated unsucessfully. - - *) -ICS3ImpossibleToAdvance == - /\ \/ (~ chmA!CanAdvance /\ storeChainA.connection.state # "OPEN") - \/ (~ chmB!CanAdvance /\ storeChainB.connection.state # "OPEN") - /\ UNCHANGED allVars - - -(****************************************************************************** - - Main spec. The system comprises the environment plus the two instances of - ICS3 modules. - - *****************************************************************************) - - -(* Initializes both chains, attributing to each a chainID and a client. - The ChainVersionsOverlap predicate is a necessary assumption for termination. - *) -Init == - /\ chmA!Init - /\ chmB!Init - /\ ChainVersionsOverlap(storeChainA, storeChainB) - /\ InitEnv - - -(* The two ICS3 modules and the environment alternate their steps - non-deterministically. Eventually, the execution ends with either - successful (ICS3ReachedOpenConnection sub-action) or unsuccesfull - (ICS3ImpossibleToAdvance sub-action) termination. -*) -Next == - \/ ICS3ReachedOpenConnection - \/ ICS3ImpossibleToAdvance - \/ NextEnv - \/ chmA!Next /\ UNCHANGED chainBVars - \/ chmB!Next /\ UNCHANGED chainAVars - - -FairProgress == - /\ chmA!Fairness - /\ chmB!Fairness - /\ WF_<>(RelayNextEnv) - - -Spec == - /\ Init - /\ [][Next]_<> - /\ FairProgress - - -TypeInvariant == - /\ chmA!TypeInvariant - /\ chmB!TypeInvariant - - -(* Liveness property. - - We expect to eventually always reach an OPEN connection on both chains. - - Naturally, this property may not hold if the two chains do not have - sufficient number of heights they can advance to. In other words, the - `MaxHeight` constant should be at least `4` for this property to hold. - - The `Concurrency` constant may also affect liveness. -*) -Termination == - <> [](/\ \/ storeChainA.connection.state = "OPEN" - \/ storeChainA.latestHeight = MaxHeight - /\ \/ storeChainB.connection.state = "OPEN" - \/ storeChainB.latestHeight = MaxHeight) - -(* Safety property. - - If the connections in the two chains are not null, then the - connection parameters must always match. - *) -ConsistencyProperty == - /\ storeChainA.connection.state # "UNINIT" - /\ storeChainB.connection.state # "UNINIT" - => storeChainA.connection.parameters - = chmB!FlipConnectionParameters(storeChainB.connection.parameters) - - -Consistency == - [] ConsistencyProperty - -(* Complementary to the safety property above. - - If the connections in the two chains are both OPEN, then the - connection version must be identical. - *) -VersionInvariant == - /\ storeChainA.connection.state = "OPEN" - /\ storeChainB.connection.state = "OPEN" - => /\ Len(storeChainA.connection.version) = 1 - /\ Len(storeChainB.connection.version) = 1 - /\ storeChainA.connection.version = storeChainB.connection.version - -============================================================================= -\* Modification History -\* Last modified Fri Aug 28 09:11:35 CEST 2020 by adi -\* Last modified Tue Aug 25 17:48:37 CEST 2020 by ilinastoilkovska -\* Created Fri Apr 24 18:51:07 CEST 2020 by adi \ No newline at end of file diff --git a/docs/spec/connection-handshake/L2-tla/ICS3Module.tla b/docs/spec/connection-handshake/L2-tla/ICS3Module.tla deleted file mode 100644 index 1716b4d8fb..0000000000 --- a/docs/spec/connection-handshake/L2-tla/ICS3Module.tla +++ /dev/null @@ -1,580 +0,0 @@ --------------------------- MODULE ICS3Module ------------------------------ - -(*************************************************************************** - - This module is part of the TLA+ specification for the - IBC Connection Handshake protocol (ICS3). - - This module captures the actions and operators of the ICS3 protocol. - Typically, it is an IBC module running on a chain that would implement - the logic in this TLA+ module, hence the name "ICS3Module". - sometimes abbreviated to "chModule" or "chm". - - This module deals with a high-level spec of the ICS3 protocol, so it is - a simplification with respect to ICS3 proper in several regards: - - - the modules assumes to run on a chain which we model as a simple - advancing height, plus a few more critical fields (see the 'store'), - but without any state (e.g., blockchain, transactions, consensus core); - - - we model a single connection; establishing multiple connections is not - possible; - - - we do not perform any cryptographic proof verifications; - - - the abstractions we use are higher-level, and slightly different from - the ones in ICS3 (see e.g., ConnectionEnd and Connection records). - - - the client colocated with the module is simplified, comprising only - a set of heights (not the actual blockchain headers). - - ***************************************************************************) - -EXTENDS Naturals, FiniteSets, Sequences, ICS3Utils, ICS3Types - - -CONSTANTS MaxChainHeight, \* Maximum height of the local chain. - ConnectionIDs, \* The set of valid connection IDs. - ClientIDs, \* The set of valid client IDs. - MaxBufLen, \* Maximum length of the input and output buffers. - MaxVersionNr, \* Maximum version number - ChainID, \* The chainID - VersionPickMode \* the mode for picking versions - -ASSUME Cardinality(ConnectionIDs) >= 1 -ASSUME Cardinality(ClientIDs) >= 1 - - -VARIABLES -(******************************* Store ***************************** - - The store record of a chain contains the following fields: - - - chainID -- a string. - Stores the identifier of the chain where this module executes. - - - latestHeight -- a natural number in the range 1..MaxHeight. - Describes the current height of the chain. - - - connection -- a connection record. - Captures all the details of the connection on this chain. - For a full description of a connection record, see the - 'Environment.Connections' set. - - - client -- a client record. - Specifies the state of the client running on this chain. - - A client record contains the following fields: - - - consensusHeights -- a set of heights. - Stores the set of all heights (i.e., consensus states) that this - client observed. - - - clientID -- a string. - The identifier of the client. - - - latestHeight -- a natural number in the range 1..MaxHeight. - Stores the latest height among all the heights in consensusHeights. - - For more details on how clients are initialized, see the operator - ICS3Types.InitClients. - - ***************************************************************************) - store, - (* A buffer (Sequence) holding any message(s) incoming to this module. *) - inBuf, - (* A buffer (Sequence) holding outbound message(s) from this module. *) - outBuf - - -moduleVars == - <> - - -(*************************************************************************** - Helper operators. - ***************************************************************************) - - -(* Simple computation returning the maximum out of two numbers 'a' and 'b'. - *) -MAX(a, b) == - IF a > b THEN a ELSE b - -MAXSet(S) == - CHOOSE x \in S: \A y \in S: y <= x - - -(* Validates a connection parameter. - - Returns true if 'para' matches the parameters in the local connection, - and returns false otherwise. - - *) -ValidConnectionParameters(para) == - LET local == store.connection.parameters.localEnd - remote == store.connection.parameters.remoteEnd - IN /\ local.connectionID = para.localEnd.connectionID - /\ remote.connectionID = para.remoteEnd.connectionID - /\ local.clientID = para.localEnd.clientID - /\ remote.clientID = para.remoteEnd.clientID - - -(* Validates a connection parameter local end. - - Expects as input a ConnectionParameter 'para' and returns true or false. - This is a basic validation step, making sure that the local end in 'para' - is valid with respect to module-level constants ConnectionIDs and ClientIDs. - -*) -ValidLocalEnd(para) == - /\ para.localEnd.connectionID \in ConnectionIDs - /\ para.localEnd.clientID \in ClientIDs - -(* Operator for reversing the connection ends. - - Given a ConnectionParameters record 'para', returns a new set - of parameters where the local and remote ends are - flipped (i.e., reversed). - *) -FlipConnectionParameters(para) == - [localEnd |-> para.remoteEnd, - remoteEnd |-> para.localEnd] - - -(* Operator for constructing a connection proof. - - The connection proof is used to demonstrate to another chain that the - local store on this chain comprises a connection in a certain state. - *) -GetConnProof(myConnection) == - [connection |-> myConnection] - - -(* Operator for constructing a client proof. - *) -GetClientProof == - [latestHeight |-> store.client.latestHeight, - consensusHeights |-> store.client.consensusHeights] - - -(* Verification of a connection proof. - - This is a state predicate returning true if the following holds: - - the state of connection in this proof should match with input parameter - 'expectedState'; and - - the connection parameters in this proof should match with the flipped version - of the input 'expectedParams'. - - *) -VerifyConnProof(cp, expectedState, expectedParams) == - /\ cp.connection.state = expectedState - /\ cp.connection.parameters = FlipConnectionParameters(expectedParams) - - -(* Verification of a client proof. - - This is a state predicate returning true if the following holds: the height - reported in the client proof must not exceed the current (latestHeight) of - this chain. - *) -VerifyClientProof(cp) == - /\ cp.latestHeight <= store.latestHeight (* Consistency height check. *) - /\ cp.latestHeight \in cp.consensusHeights (* Client verification step. *) - - -(* Get all possible version sequences from a set of versions. - *) -VersionSetAsVersionSequences(S) == - LET E == 1..Cardinality(S) IN - LET AllSeqs == [E -> S] IN - {seq \in AllSeqs : seq \in AllVersionSeqs} - -(*************************************************************************** - Connection Handshake Module actions & operators. - ***************************************************************************) - - -(* Modifies the local store. - - Replaces the connection in the store with the argument 'newCon'. - This action also advances the chain height. - *) -NewStore(newCon) == - [store EXCEPT !.connection = newCon, - !.latestHeight = @ + 1] - - -(********************************** - ICS3 spec related to Init messages. - **********************************) - -(* State predicate, guarding the handler for the Init msg. - - If any of these preconditions does not hold, the message - is dropped. - *) -PreconditionsInitMsg(m) == - /\ ValidLocalEnd(m.parameters) (* Basic validation of localEnd in parameters. *) - /\ store.connection.state = "UNINIT" - -(* Reply message to an ICS3MsgInit message. - *) -MsgInitReply(chainStore) == - LET conn == chainStore.connection - myConnProof == GetConnProof(conn) - myClientProof == GetClientProof - replyMsg == [parameters |-> FlipConnectionParameters(conn.parameters), - type |-> "ICS3MsgTry", - proofHeight |-> chainStore.latestHeight, - connProof |-> myConnProof, - clientProof |-> myClientProof, - version |-> conn.version] IN - replyMsg - -(* Handles a "ICS3MsgInit" message 'm'. - - Primes the store.connection to become initialized with the parameters - specified in 'm'. Also creates a reply message, enqueued on the outgoing - buffer. This reply message will include proofs that match the height of - this chain (i.e., current store.latestHeight + 1). - *) -HandleInitMsg(m) == - LET newCon == [parameters |-> m.parameters, - state |-> "INIT", - version |-> store.connection.version] - newStore == NewStore(newCon) IN - IF PreconditionsInitMsg(m) - THEN {newStore} - ELSE {store} - - -(********************************** - ICS3 spec related to Try messages. - **********************************) - -(* State predicate, guarding the handler for the Try msg. - - If any of these preconditions does not hold, the message - is dropped. - *) -PreconditionsTryMsg(m) == - /\ \/ /\ store.connection.state = "UNINIT" - /\ ValidLocalEnd(m.parameters) - \/ /\ store.connection.state = "INIT" - /\ ValidConnectionParameters(m.parameters) - /\ m.proofHeight \in store.client.consensusHeights (* Consistency height check. *) - /\ VerifyConnProof(m.connProof, "INIT", m.parameters) - /\ VerifyClientProof(m.clientProof) - \* check if the locally stored versions overlap with the versions sent in - \* the ICS3MsgTry message - /\ VersionSequencesOverlap(store.connection.version, m.version) - -(* Pick a version depending on the value of the constant VersionPickMode - - - if VersionPickMode = "onTryNonDet" or VersionPickMode = "overwrite" - -> pick a version from (m.version \intersect store.connection.version) non-deterministically, - send the picked version to counterparty in ICS3MsgAck - - if VersionPickMode = "onTryNonDet" - -> pick a version from (m.version \intersect store.connection.version) deterministically - (e.g., maximum), send the picked version to counterparty in ICS3MsgAck - - otherwise (version picking is done when handling ICS3MsgAck) - -> send the value of the intersection (m.version \intersect store.connection.version) - to counterparty in ICS3MsgConfirm -*) -PickVersionOnTry(m) == - \* get a set of feasible versions -- - \* the intersection between the local and the versions sent in the message - LET feasibleVersions == SequenceAsSet(m.version) - \intersect - SequenceAsSet(store.connection.version) IN - - IF feasibleVersions /= {} - THEN IF \/ VersionPickMode = "overwrite" - \/ VersionPickMode = "onTryNonDet" - \* the version is picked non-deterministically - THEN {<> : newVersion \in feasibleVersions} - ELSE IF VersionPickMode = "onTryDet" - \* the version is picked deterministically, - \* using MAXSet as a deterministic choice function - THEN {<>} - \* the version will be picked when handling ICS3MsgAck, - \* send a sequence which consists of elements in the - \* set feasibleVersions - ELSE VersionSetAsVersionSequences(feasibleVersions) - ELSE {} - -(* Reply message to an ICS3MsgTry message. - *) -MsgTryReply(chainStore) == - LET conn == chainStore.connection - myConnProof == GetConnProof(conn) - myClientProof == GetClientProof - replyMsg == [parameters |-> FlipConnectionParameters(conn.parameters), - type |-> "ICS3MsgAck", - proofHeight |-> chainStore.latestHeight, - connProof |-> myConnProof, - clientProof |-> myClientProof, - version |-> conn.version] IN - replyMsg - -(* Handles a "ICS3MsgTry" message. - *) -HandleTryMsg(m) == - \* create a set of new connections, whose versions - \* were picked in OnTryPickVersion - LET newConnSet == [parameters : {m.parameters}, - state : {"TRYOPEN"}, - version : PickVersionOnTry(m)] - newStoreSet == {NewStore(newConn) : newConn \in newConnSet} IN - - IF /\ PreconditionsTryMsg(m) - /\ newStoreSet /= {} - THEN newStoreSet - ELSE {store} - - -(********************************** - ICS3 spec related to Ack messages. - **********************************) - -(* State predicate, guarding the handler for the Ack msg. - *) -PreconditionsAckMsg(m) == - /\ \/ store.connection.state = "INIT" - \/ store.connection.state = "TRYOPEN" - /\ ValidConnectionParameters(m.parameters) - /\ m.proofHeight \in store.client.consensusHeights (* Consistency height check. *) - /\ VerifyConnProof(m.connProof, "TRYOPEN", m.parameters) - /\ VerifyClientProof(m.clientProof) - /\ IF VersionPickMode /= "overwrite" - \* check if the locally stored versions overlap with the versions sent in - \* the ICS3MsgAck message if VersionPickMode /= "overwrite" - THEN VersionSequencesOverlap(store.connection.version, m.version) - \* if VersionPickMode = "overwrite", do not check for version overlap - ELSE TRUE - -(* Pick a version depending on the value of the constant VersionPickMode - - - if VersionPickMode = "overwrite" - -> take the picked version from the message - - if VersionPickMode = "onAckNonDet" - -> pick a version from (m.version \intersect store.connection.version) non-deterministically, - send the picked version to counterparty in ICS3MsgConfirm - - if VersionPickMode = "onAckDet" - -> pick a version from (m.version \intersect store.connection.version) deterministically - (e.g., maximum), send the picked version to counterparty in ICS3MsgConfirm - - otherwise (version picking was done when handling ICS3MsgTry) - -> use m.version if (m.version \intersect store.connection.version) is not empty - (checked in PreconditionsAckMsg) - -*) -PickVersionOnAck(m) == - \* get a set of feasible versions -- - \* the intersection between the local and the versions sent in the message - LET feasibleVersions == SequenceAsSet(m.version) - \intersect - SequenceAsSet(store.connection.version) IN - - IF VersionPickMode = "overwrite" - \* take the picked version from the message - THEN {m.version} - ELSE IF feasibleVersions /= {} - THEN IF VersionPickMode = "onAckNonDet" - \* the version is picked non-deterministically - THEN {<> : newVersion \in feasibleVersions} - ELSE IF VersionPickMode = "onAckDet" - \* the version is picked deterministically, - \* using MAXSet as a deterministic choice function - THEN {<>} - \* the version was picked when handling ICS3MsgTry, - \* use the picked version from the ICS3MsgAck message - ELSE {m.version} - ELSE {} - -(* Reply message to an ICS3MsgAck message. - *) -MsgAckReply(chainStore) == - LET conn == chainStore.connection - myConnProof == GetConnProof(conn) - replyMsg == [parameters |-> FlipConnectionParameters(conn.parameters), - proofHeight |-> chainStore.latestHeight, - type |-> "ICS3MsgConfirm", - connProof |-> myConnProof, - version |-> conn.version] IN - replyMsg - -(* Handles a "ICS3MsgAck" message. - *) -HandleAckMsg(m) == - LET newConnSet == [parameters : {m.parameters}, - state : {"OPEN"}, - version : PickVersionOnAck(m)] - newStoreSet == {NewStore(newConn) : newConn \in newConnSet} IN - - IF /\ PreconditionsAckMsg(m) - /\ newStoreSet /= {} - THEN newStoreSet - ELSE {store} - - -(************************************** - ICS3 spec related to Confirm messages. - **************************************) - -(* State predicate, guarding the handler for the Confirm msg. - *) -PreconditionsConfirmMsg(m) == - /\ store.connection.state = "TRYOPEN" - /\ ValidConnectionParameters(m.parameters) - /\ m.proofHeight \in store.client.consensusHeights (* Consistency height check. *) - /\ VerifyConnProof(m.connProof, "OPEN", m.parameters) - /\ IF VersionPickMode /= "overwrite" - \* check if the locally stored versions overlap with the versions sent in - \* the ICS3MsgConfirm message if VersionPickMode /= "overwrite" - THEN IF \/ VersionPickMode = "onAckNonDet" - \/ VersionPickMode = "onAckDet" - \* if the version was picked on handling ICS3MsgAck, check for intersection - THEN VersionSequencesOverlap(store.connection.version, m.version) - \* if the version was picked on handling ICS3MsgTry, check for equality - ELSE store.connection.version = m.version - \* if VersionPickMode = "overwrite", do not check for version overlap - ELSE TRUE - -(* Pick a version depending on the value of the constant VersionPickMode - - - if VersionPickMode = "overwrite" - -> take the picked version from the message - - if VersionPickMode = "onAckNonDet" - -> pick a version from store.connection.version non-deterministically - - if VersionPickMode = "onAckDet" - -> pick a version from store.connection.version deterministically - (e.g., maximum) - - otherwise - -> use store.connection.version if - * version picking was done when handling ICS3MsgAck and - (m.version \intersect store.connection.version) is not empty - * version picking was done when handling ICS3MsgTry and - m.version = store.connection.version - (both conditions checked in PreconditionsAckMsg) - -*) -PickVersionOnConfirm(m) == - IF VersionPickMode = "overwrite" - \* take the picked version from the message - THEN {m.version} - ELSE IF VersionPickMode = "onAckNonDet" - \* the version is picked non-deterministically - THEN {<> : newVersion \in SequenceAsSet(store.connection.version)} - ELSE IF VersionPickMode = "onAckDet" - \* the version is picked deterministically, - \* using MAXSet as a deterministic choice function - THEN {<>} - \* the version was picked when handling ICS3MsgTry, - \* use the picked version from the ICS3MsgAck message - ELSE {store.connection.version} - -(* Handles a "ICS3MsgConfirm" message. - *) -HandleConfirmMsg(m) == - LET newConnSet == [parameters : {m.parameters}, - state : {"OPEN"}, - version : PickVersionOnConfirm(m)] - newStoreSet == {NewStore(newConn) : newConn \in newConnSet} IN - - IF /\ PreconditionsConfirmMsg(m) - /\ newStoreSet /= {} - THEN newStoreSet - ELSE {store} - - -(* Action for advancing the current height (latestHeight) of the chain. - - The environment triggers this as part of the GoodNextEnv action. - *) -AdvanceChainHeight == - store' = [store EXCEPT !.latestHeight = @ + 1] - - -(* State predicate returning true if MaxChainHeight not yet attained. - *) -CanAdvance == - store.latestHeight < MaxChainHeight - - -(* Action for updating the local client on this chain with a new height. - - This primes the store; leaves the chain buffers unchanged. - This will also advance the chain height. - *) -UpdateClient(height) == - /\ store' = [store EXCEPT !.latestHeight = @ + 1, - !.client.consensusHeights = @ \cup {height}, - !.client.latestHeight = MAX(height, store.client.latestHeight)] - - -(* State predicate guarding the UpdateClient action. - - This requires client updates to be monotonic (prevents updates with older - heights). - *) -CanUpdateClient(newHeight) == - /\ CanAdvance - /\ newHeight > store.client.latestHeight - - -(* Generic action for handling any type of inbound message. - - Expects as parameter a message. - Takes care of priming the 'store' and adding any reply msg in 'outBuf'. - This action assumes the message type is valid, therefore one of the - disjunctions (in the CASE statements) will always enable. - *) -ProcessMsg == - /\ inBuf /= <<>> - /\ CanAdvance - /\ LET m == Head(inBuf) - resStores == CASE m.type = "ICS3MsgInit" -> HandleInitMsg(m) - [] m.type = "ICS3MsgTry" -> HandleTryMsg(m) - [] m.type = "ICS3MsgAck" -> HandleAckMsg(m) - [] m.type = "ICS3MsgConfirm" -> HandleConfirmMsg(m) IN - /\ store' \in resStores - /\ outBuf' = CASE m.type = "ICS3MsgInit" (* Get reply to the Init msg. *) - /\ store'.connection.state = "INIT" -> Append(outBuf, MsgInitReply(store')) - [] m.type = "ICS3MsgTry" (* Get reply to the Try msg. *) - /\ store'.connection.state = "TRYOPEN" -> Append(outBuf, MsgTryReply(store')) - [] m.type = "ICS3MsgAck" (* Get reply to the Ack msg. *) - /\ store'.connection.state = "OPEN" -> Append(outBuf, MsgAckReply(store')) - [] TRUE -> outBuf (* Default case: no reply necessary. *) - /\ inBuf' = Tail(inBuf) - - -(*************************************************************************** - Connection Handshake Module (ICS3) main spec. - ***************************************************************************) - -Init == - store \in [chainID : {ChainID}, - latestHeight : {1}, - connection : NullConnections, - client : InitClients(ClientIDs)] - -Next == - \/ ProcessMsg - \/ UNCHANGED moduleVars - -Fairness == - WF_moduleVars(ProcessMsg) - - -TypeInvariant == - /\ inBuf \in Seq(ConnectionHandshakeMessages) \union {<<>>} - /\ outBuf \in Seq(ConnectionHandshakeMessages) \union {<<>>} - /\ store \in Stores - - -============================================================================= -\* Modification History -\* Last modified Thu Aug 27 16:00:21 CEST 2020 by adi -\* Last modified Wed Aug 26 17:05:35 CEST 2020 by ilinastoilkovska -\* Created Fri Apr 24 19:08:19 CEST 2020 by adi \ No newline at end of file diff --git a/docs/spec/connection-handshake/L2-tla/ICS3Types.tla b/docs/spec/connection-handshake/L2-tla/ICS3Types.tla deleted file mode 100644 index fed0d0c045..0000000000 --- a/docs/spec/connection-handshake/L2-tla/ICS3Types.tla +++ /dev/null @@ -1,364 +0,0 @@ ------------------------------ MODULE ICS3Types ----------------------------- - -(*************************************************************************** - - This module is part of the TLA+ high-level specification for the - IBC Connection Handshake protocol (ICS3). - - This module includes common domain definitions that other modules will - extend. - - ***************************************************************************) - -EXTENDS Naturals, Sequences - -CONSTANTS MaxHeight, - AllConnectionIDs, - AllClientIDs, - AllChainIDs, - AllVersionSeqs - - -(******************************* InitClients ******************************** - - A set of records describing the possible initial values for the - clients on a chain. - - A client record contains the following fields: - - - consensusHeights -- a set of heights - Stores the set of all heights (i.e., consensus states) that this - client observed. At initialization time, the client only observes - the first height, so the only possible value for this record is - {1}. - - - clientID -- a string - The identifier of the client. This is expected as a parameter, since - it is a chain-specific field at initialization time. - - - latestHeight -- a number representing a (consensus) height - Stores the latest height among all the heights in consensusHeights. - Initialized to 1. - - ***************************************************************************) -InitClients(specificClientIDs) == - [ - consensusHeights : {{1}}, - clientID : specificClientIDs, - latestHeight : {1} - ] - - -(***************************** InitMsgs *********************************** - - The set of ConnectionHandshakeMessage records where message type is - ICS3MsgInit. - - This operator returns the set of all initialization messages, such that - the local end is the set 'le', and the remote end is set 're'. - - ***************************************************************************) -InitMsgs(le, re) == - [ - type : {"ICS3MsgInit"}, - parameters : [ - localEnd : le, - remoteEnd : re - ] - ] - - -(***************************** ICS3MessageTypes **************************** - - The set of valid message types that the ICS3Module can - handle, e.g., as incoming or outgoing messages. - - In the low-level connection handshake protocol, the four messages have - types: ConnOpenInit, ConnOpenTry, ConnOpenAck, ConnOpenConfirm. - In this high-level specification, we choose slightly different names, to - make an explicit distinction to the low-level protocol. Message types - are as follows: - ICS3MsgInit, ICS3MsgTry, ICS3MsgAck, and ICS3MsgConfirm. - For a complete description of the message record, see - ConnectionHandshakeMessage below. - - ***************************************************************************) -ICS3MessageTypes == - { - "ICS3MsgInit", - "ICS3MsgTry", - "ICS3MsgAck", - "ICS3MsgConfirm" - } - - -(******************************* ICS3ConnectionStates ********************** - - The set of valid states that a connection can be in. - - ***************************************************************************) -ICS3ConnectionStates == - { - "UNINIT", - "INIT", - "TRYOPEN", - "OPEN" - } - - -NullClientID == - "NULLClientID" - -NullConnectionID == - "NULLConnectionID" - - -(******************************* NullConnectionEnd ************************* - - A special record defining an uninitialized connection end record. - - ***************************************************************************) -NullConnectionEnd == - [ - connectionID |-> NullConnectionID, - clientID |-> NullClientID - ] - - -(******************************* NullConnectionParameters ****************** - - A record defining the special null connection parameters record. - - ***************************************************************************) -NullConnectionParameters == - [ - localEnd |-> NullConnectionEnd, - remoteEnd |-> NullConnectionEnd - ] - - -(******************************* ConnectionEnds ***************************** - - A set of connection end records. - A connection end record contains the following fields: - - - connectionID -- a string - Stores the identifier of this connection, specific to a chain. - - - clientID -- a string - Stores the identifier of the client running on this chain. - - ***************************************************************************) -ConnectionEnds == - [ - connectionID : AllConnectionIDs, - clientID : AllClientIDs - ] - - -(******************************* ConnectionParameters ********************** - - A set of connection parameter records. - A connection parameter record contains the following fields: - - - localEnd -- a connection end - Specifies the local connection details (i.e., connection ID and - client ID). - - - remoteEnd -- a connection end - Specifies the remote connection details. - - ***************************************************************************) -ConnectionParameters == - [ - localEnd : ConnectionEnds, - remoteEnd : ConnectionEnds - ] - \union - { - NullConnectionParameters - } - - -(******************************* NullConnection **************************** - - Initially, the connection on both chains is uninitialized, defined as - this special record. - - ***************************************************************************) -NullConnections == [ - parameters : {NullConnectionParameters}, - state : {"UNINIT"}, - version : AllVersionSeqs \ {<<>>} -] - - -(******************************* Connections ******************************* - - The set of possible connection records. - A connection record contains the following fields: - - - parameters -- a connection parameters record - Specifies the local plus remote ends. - - - state -- a connection state (see ConnectionStates set). - - ***************************************************************************) -Connections == - [ - parameters : ConnectionParameters, - state : ICS3ConnectionStates, - version : AllVersionSeqs - ] - - -(******************************* ConnProof ********************************* - - A set of records describing the possible values for connection proofs. - - A connection proof record contains a single field: - - - connection -- a connection record - This is the connection (in the local store of a chain) at the moment - when the module created this proof. - - ***************************************************************************) -ConnProofs == - [ - connection : Connections - ] - - -(******************************* Heights *********************************** - - The set of all possible heights that a chain can assume throughout any - execution. - - ***************************************************************************) -Heights == - 1..MaxHeight - - -(******************************* ClientProofs ******************************* - - A set of records describing the possible values for client proofs. - - A client proof record contains two fields: - - - latestHeight -- a number representing a height - The current height (latestHeight) of the client (in the local store of a - chain) at the moment when the ICS3 module created this proof. - - - consensusHeights -- a set of heights - The set of heights of the client colocated with module which created - this proof. - - ***************************************************************************) -ClientProofs == - [ - latestHeight : Heights, - consensusHeights : SUBSET Heights - ] - - -(*********************** ConnectionHandshakeMessages *********************** - - The set of ConnectionHandshakeMessage records. - These are connection handshake specific messages that two chains exchange - while executing the ICS3 protocol. - - ***************************************************************************) -ConnectionHandshakeMessages == - [ - type : {"ICS3MsgInit"}, - parameters : ConnectionParameters - ] - \union - [ - type : {"ICS3MsgTry"}, - parameters : ConnectionParameters, - proofHeight : Heights, - connProof : ConnProofs, - clientProof : ClientProofs, - version : AllVersionSeqs - ] - \union - [ - type : {"ICS3MsgAck"}, - parameters : ConnectionParameters, - proofHeight : Heights, - connProof : ConnProofs, - clientProof : ClientProofs, - version : AllVersionSeqs - ] - \union - [ - type : {"ICS3MsgConfirm"}, - parameters : ConnectionParameters, - proofHeight : Heights, - connProof : ConnProofs - ] - - - -(********************** MessageTypeIncludesConnProof *********************** - - Operator that evaluates to true if the message type (input parameter - 'type') refers to a message that includes a connection proof. - - ***************************************************************************) -MessageTypeIncludesConnProof(type) == - type \in {"ICS3MsgTry", "ICS3MsgAck", "ICS3MsgConfirm"} - - -(******************************* Clients *********************************** - - A set of records describing all the possible values for the - clients on a chain. - - See client record description above (within the InitClients operator). - - ***************************************************************************) -Clients == - [ - consensusHeights : SUBSET Heights, - clientID : AllClientIDs \union { NullClientID }, - latestHeight : Heights - ] - -(******************************* Stores ************************************* - - The set of store records. - A store record represents the local storage of a chain. This record - contains the following fields: - - - chainID -- a string - Stores the identifier of the chain where this module executes. - - - latestHeight -- a number representing a height - Describes the current height of the chain. - - - connection -- a connection record - Captures all the details of the connection on this chain. - For a full description of a connection record, see the - 'Environment.Connections' set. - - - client -- a client record. - Specifies the state of the client running on this chain. - - ***************************************************************************) -Stores == - [ - chainID : AllChainIDs, - latestHeight : Heights, - connection : Connections \union NullConnections, - client : Clients - ] - - -============================================================================= -\* Modification History -\* Last modified Thu Aug 20 14:14:03 CEST 2020 by ilinastoilkovska -\* Last modified Tue Jun 23 13:47:17 CEST 2020 by adi -\* Created Mon May 18 17:53:08 CEST 2020 by adi - diff --git a/docs/spec/connection-handshake/L2-tla/ICS3Utils.tla b/docs/spec/connection-handshake/L2-tla/ICS3Utils.tla deleted file mode 100644 index 98a52eb6d3..0000000000 --- a/docs/spec/connection-handshake/L2-tla/ICS3Utils.tla +++ /dev/null @@ -1,40 +0,0 @@ ------------------------------ MODULE ICS3Utils ----------------------------- - -(*************************************************************************** - - This module is part of the TLA+ high-level specification for the - IBC Connection Handshake protocol (ICS3). - - This module includes common action definitions that other modules need. - - ***************************************************************************) - -EXTENDS Naturals, FiniteSets, Sequences - - -(* Obtain a set from the given sequence. - *) -SequenceAsSet(seq) == - {seq[x] : x \in DOMAIN seq} - - -(* Checks if two version sequences overlap by taking the intersection of their - set representation. - *) -VersionSequencesOverlap(versionSeq1, versionSeq2) == - SequenceAsSet(versionSeq1) - \intersect - SequenceAsSet(versionSeq2) /= {} - - -(* Checks if the versions of the two chain stores overlap; a wrapper over the - base action 'VersionSequencesOverlap'. - *) -ChainVersionsOverlap(chainStore, otherChainStore) == - VersionSequencesOverlap(chainStore.connection.version, otherChainStore.connection.version) - - -============================================================================= -\* Modification History -\* Last modified Thu Aug 27 16:02:28 CEST 2020 by adi -\* Created Thu Aug 27 15:39:01 CEST 2020 by adi diff --git a/docs/spec/connection-handshake/L2-tla/README.md b/docs/spec/connection-handshake/L2-tla/README.md deleted file mode 100644 index fdb4b772ac..0000000000 --- a/docs/spec/connection-handshake/L2-tla/README.md +++ /dev/null @@ -1,64 +0,0 @@ -# IBC Connection Handshake (ICS3) TLA+ spec - - -This is a high-level TLA+ spec for the IBC Connection Handshake (ICS3) protocol. -The spec has four modules: - - - `Environment.tla` (main model lives here). - - `ICS3Module.tla` (the spec for the ICS3 module). - - `ICS3Types.tla` (common domain definitions). - - `ICS3Utils.tla` (common actions live here). - -To run this spec: - -1. add the modules in a new specification in the toolbox -2. specify values for constants `MaxHeight`, `MaxBufLen`, and `Concurrency`. -Two additional constants serve the version negotiation algorithm in the handshake: - - `MaxVersionNr` -- typical value `2`; or set to `1` to make version negotiation trivial; - - `VersionPickMode` -- typical value `"onAckDet"`; parametrizes the strategy for negotiating versions (see [below](#version-negotiation-modes)). - -Note the assumptions: - -``` -ASSUME MaxHeight > 4 -ASSUME MaxBufLen >= 1 -ASSUME VersionPickMode \in - {"overwrite", - "onTryDet", - "onTryNonDet", - "onAckDet", - "onAckNonDet"} -``` - -Typical values could be: `MaxHeight = 5` and `MaxBufLen = 2`. -The `Concurrency` flag enables/disables some non-determinsm of the environment, -specifically: - -- if TRUE, then the environment can non-deterministically update the light client of a chain. -This configuration simulates a liveness problem caused by the way relayers use `UpdateClient`, and will lead the model to stutter. -To be clear: the stuttering is not caused by a bug in the ICS3 protocol itself; this model simply captures the original faulty relayer algorithms surrounding the ICS3 protocol. -See more details in the [disclosure log](https://github.com/informalsystems/ibc-rs/pull/83). -- if FALSE, then the model should check correctly. - -3. add the invariant `ConsistencyInv` and `TypeInvariant` as well as the property (temporal formula) `Termination`. - -4. run the model checker. - -## Version negotiation modes - -We introduce different version picking modes, which are used to parameterize the way in which versions are picked during the connection handshake. That is, the constant `VersionPickMode` can take one of the following values: - - `overwrite` : a version is picked non-deterministically when handling `ICS3MsgTry`, local version gets overwritten with version(s) sent in datagrams; - - `onTryNonDet` : a version is picked non-deterministically when handling `ICS3MsgTry`, local version is chosen from intersection of local and datagram versions; - - `onTryDet` : a version is picked deterministically when handling `ICS3MsgTry`, local version is chosen from intersection of local and datagram versions; - - `onAckNonDet` : a version is picked non-deterministically when handling `ICS3MsgAck`, local version is chosen from intersection of local and datagram versions; - - `onAckDet` : a version is picked non-deterministically when handling `ICS3MsgAck`, local version is chosen from intersection of local and datagram versions. - - The table below details these modes: - -| Mode\Action | `HandleMsgTry(m)` | `HandleMsgAck(m)` | `HandleMsgConfirm(m)` | -|-------------|-----------------------------------------|-----------------------------|-----------------------------| -|`overwrite` | pick a version from `m.versions \intersect conn.versions` non-deterministically, send the picked version to counterparty in `ICS3MsgAck` | store `m.version` locally, send it to counterparty in `ICS3MsgConfirm` | store `m.version` locally | -|`onTryNonDet`| pick a version from `m.versions \intersect conn.versions` non-deterministically, send the picked version to counterparty in `ICS3MsgAck` | check if received version in `ICS3MsgAck` is in list of local versions, accept it if it is, send it to counterparty in `ICS3MsgConfirm` | check if received version is the same as one stored in connection end| -|`onTryDet` | pick a version from `m.versions \intersect conn.versions` deterministically (e.g. maximum), store & send the picked version to counterparty in `ICS3MsgAck` | check if received version in `ICS3MsgAck` is in list of local versions, accept & store it if it is, then send it to counterparty in `ICS3MsgConfirm` | check if received version is the same as one stored in connection end| -|`onAckNonDet`| send the value of `m.versions \intersect conn.versions` to counterparty in `ICS3MsgAck`, store the intersection locally | pick a version from `m.versions \intersect conn.versions` non-deterministically, send the intersection to counterparty in `ICS3MsgConfirm` | pick a version from `conn.versions` non-deterministically | -|`onAckDet`| send the value of the intersection `m.versions \intersect conn.versions` to counterparty in `ICS3MsgAck`, store the intersection locally | pick a version from `m.versions \intersect conn.versions` deterministically (e.g. maximum), send the intersection to counterparty in `ICS3MsgConfirm` | pick a version from `conn.versions` deterministically (e.g. maximum)| diff --git a/docs/spec/connection-handshake/README.md b/docs/spec/connection-handshake/README.md deleted file mode 100644 index 928fa1716b..0000000000 --- a/docs/spec/connection-handshake/README.md +++ /dev/null @@ -1,11 +0,0 @@ -# IBC Connection Handshake Spec - -## Specification roadmap - -In this folder you will find a collection of documents representing English & TLA+ specifications for the IBC connection handshake problem [[ICS-003](https://github.com/cosmos/ibc/tree/master/spec/core/ics-003-connection-semantics)]. - -We currently cover two levels of abstraction of ICS2, in accordance with the [VDD workflow](https://github.com/informalsystems/VDD/blob/master/guide/guide.md): _level 1_ (abstract), _level 2_ (system model & distributed protocol). -Consequently, we break this work across the following documents: - -- [L1_2.md](./L1_2.md) covers the highest level of abstraction (level 1) and also includes an English spec of the system model and protocol (level 2); -- [L2-tla](./L2-tla/) is a directory with the TLA+ spec for level 2. diff --git a/docs/spec/relayer/Definitions.md b/docs/spec/relayer/Definitions.md deleted file mode 100644 index 5d8830a717..0000000000 --- a/docs/spec/relayer/Definitions.md +++ /dev/null @@ -1,292 +0,0 @@ -# Data structure and helper function definitions - -This document defines data types and helper functions used by the relayer logic. - -## Data Types - -### Chain - -Chain is a data structure that captures relayer's perspective of a given chain and contains all important -information that allows relayer to communicate with a chain. A provider is a Tendermint full node through -which a relayer read information about the given chain and submit transactions. A relayer maintains a list -of full nodes (*peerList*) as a current provider could be faulty, so it can be replaced by another full node. -For each chain a relayer is connected to, the relayer has a light client that provides the relayer -access to the trusted headers (used as part of data verification). - -```go -type Chain { - chainID string - clientID Identifier - peerList List> - provider Pair - lc LightClient -} -``` - -### Client state and consensus state - -```go -type ClientState { - chainID string - validatorSet List> - trustLevel Rational - trustingPeriod uint64 - unbondingPeriod uint64 - latestHeight Height - latestTimestamp uint64 - frozenHeight Maybe - upgradeCommitmentPrefix CommitmentPrefix - upgradeKey []byte - maxClockDrift uint64 - proofSpecs []ProofSpec -} -``` - -```go -type ConsensusState { - timestamp uint64 - validatorSet List> - commitmentRoot []byte -} -``` - -### Membership proof - -```go -type MembershipProof struct { - Height Height - Proof Proof -} -``` - -### Connection - -```go -type ConnectionEnd { - state ConnectionState - counterpartyConnectionIdentifier Identifier - counterpartyPrefix CommitmentPrefix - clientIdentifier Identifier - counterpartyClientIdentifier Identifier - version []string -} - -enum ConnectionState { - INIT, - TRYOPEN, - OPEN, -} -``` - -### Channel - -```go -type ChannelEnd { - state ChannelState - ordering ChannelOrder - counterpartyPortIdentifier Identifier - counterpartyChannelIdentifier Identifier - connectionHops [Identifier] - version string -} - -enum ChannelState { - INIT, - TRYOPEN, - OPEN, - CLOSED, -} - -enum ChannelOrder { - ORDERED, - UNORDERED, -} -``` - -```go -type Packet { - sequence uint64 - timeoutHeight Height - timeoutTimestamp uint64 - sourcePort Identifier - sourceChannel Identifier - destPort Identifier - destChannel Identifier - data []byte -} -``` - -```go -type PacketRecv { - packet Packet - proof CommitmentProof - proofHeight Height -} -``` - -```go -type PacketAcknowledgement { - packet Packet - acknowledgement byte[] - proof CommitmentProof - proofHeight Height -} -``` - -## Helper functions - -We assume the existence of the following helper functions: - -```go -// Returns channel end with a commitment proof. -GetChannel(chain Chain, - portId Identifier, - channelId Identifier, - proofHeight Height) (ChannelEnd, CommitmentProof, Error) - -// Returns connection end with a commitment proof. -GetConnection(chain Chain, - connectionId Identifier, - proofHeight Height) (ConnectionEnd, CommitmentProof, Error) - - -// Returns client state with a commitment proof. -GetClientState(chain Chain, - clientId Identifier, - proofHeight Height) (ClientState, CommitmentProof, Error) - -// Returns consensus state with a commitment proof. -GetConsensusState(chain Chain, - clientId Identifier, - targetHeight Height, - proofHeight Height) (ConsensusState, CommitmentProof, Error) - - -// Returns packet commitment with a commitment proof. -GetPacketCommitment(chain Chain, - portId Identifier, - channelId Identifier, - sequence uint64, - proofHeight Height) (bytes, CommitmentProof, Error) - -// Returns next recv sequence number with a commitment proof. -GetNextSequenceRecv(chain Chain, - portId Identifier, - channelId Identifier, - proofHeight Height) (uint64, CommitmentProof, Error) - - -// Returns next recv sequence number with a commitment proof. -GetNextSequenceAck(chain Chain, - portId Identifier, - channelId Identifier, - proofHeight Height) (uint64, CommitmentProof, Error) - - -// Returns packet acknowledgment with a commitment proof. -GetPacketAcknowledgement(chain Chain, - portId Identifier, - channelId Identifier, - sequence uint64, - proofHeight Height) (bytes, CommitmentProof, Error) - - -// Returns packet receipt with a commitment proof. -GetPacketReceipt(chain Chain, - portId Identifier, - channelId Identifier, - sequence uint64, - proofHeight Height) (String, CommitmentProof, Error) - - -// Returns estimate of the consensus height on the given chain. -GetConsensusHeight(chain Chain) Height - -// Returns estimate of the current time on the given chain. -GetCurrentTimestamp(chainB) uint64 - -// Verify that the data is written at the given path using provided membership proof and the root hash. -VerifyMembership(rootHash []byte, - proofHeight Height, - proof MembershipProof, - path String, - data []byte) boolean - -// Create IBC datagram as part of processing event at chainA. -CreateDatagram(ev IBCEvent, - chainA Chain, - chainB Chain, - installedHeight Height) (IBCDatagram, Error) - -// Create UpdateClient datagrams from the list of signed headers -CreateUpdateClientDatagrams(shs []SignedHeader) IBCDatagram[] - -// Submit given datagram to a given chain -Submit(chain Chain, datagram IBCDatagram) Error - -// Return the correspondin chain for a given chainID -// We assume that the relayer maintains a map of known chainIDs and the corresponding chains. -GetChain(chainID String) Chain -``` - -For functions that return proof, if `error == nil`, then the returned value is being verified. -The value is being verified using the header's app hash that is provided by the corresponding light client. - -Helper functions listed above assume querying (parts of the) application state using Tendermint RPC. For example, -`GetChannel` relies on `QueryChannel`. RPC calls can fail if: - -- no response is received within some timeout or -- malformed response is received. - -In both cases, error handling logic should be defined by the caller. For example, in the former case, the caller might -retry sending the same request to a same provider (full node), while in the latter case the request might be sent to -some other provider node. Although these kinds of errors could be due to network infrastructure issues, it is normally -simpler to blame the provider (assume implicitly network is always correct and reliable). Therefore, correct provider -always respond timely with a correct response, while in case of errors we consider the provider node faulty, and then -we replace it with a different node. - -We assume the following error types: - -```golang -enum Error { - RETRY, // transient processing error (for example due to optimistic send); function can be retried later - DROP, // event has already been received by the destination chain so it should be dropped - BADPROVIDER, // provider does not reply timely or with a correct data; it normally leads to replacing provider - BADLIGHTCLIENT // light client does not reply timely or with a correct data -} -``` - -We now show the pseudocode for one of those functions: - -```go -func GetChannel(chain Chain, - portId Identifier, - channelId Identifier, - proofHeight Height) (ChannelEnd, CommitmentProof, Error) { - - // Query provable store exposed by the full node of chain. - // The path for the channel end is at channelEnds/ports/{portId}/channels/{channelId}". - // The channel and the membership proof returned is read at height proofHeight - 1. - channel, proof, error = QueryChannel(chain.provider, portId, channelId, proofHeight) - if error != nil { return (nil, nil, Error.BADPROVIDER) } - - header, error = GetHeader(chain.lc, proofHeight) // get header for height proofHeight using light client - if error != nil { return (nil, nil, Error.BADLIGHTCLIENT) } // return if light client can't provide header for the given height - - // verify membership of the channel at path channelEnds/ports/{portId}/channels/{channelId} using - // the root hash header.AppHash - if !VerifyMembership(header.AppHash, proofHeight, proof, channelPath(portId, channelId), channel) { - // membership check fails; therefore provider is faulty. Try to elect new provider - return (nil, nil, Error.BadProvider) - } - - return (channel, proof, nil) -} -``` - -If *LATEST_HEIGHT* is passed as a parameter, the data should be read (and the corresponding proof created) -at the most recent height. - - - - diff --git a/docs/spec/relayer/Packets.md b/docs/spec/relayer/Packets.md deleted file mode 100644 index b9e24633ef..0000000000 --- a/docs/spec/relayer/Packets.md +++ /dev/null @@ -1,196 +0,0 @@ -# IBC packet handling - -This document specifies datagram creation logic for packets. It is used by the relayer. - -## Packet related IBC events - -```go -type SendPacketEvent { - height Height - sequence uint64 - timeoutHeight Height - timeoutTimestamp uint64 - sourcePort Identifier - sourceChannel Identifier - destPort Identifier - destChannel Identifier - data []byte -} -``` - -```go -type WriteAcknowledgementEvent { - height Height - port Identifier - channel Identifier - sequence uint64 - timeoutHeight Height - timeoutTimestamp uint64 - data []byte - acknowledgement []byte -} -``` - -## Event handlers - -### SendPacketEvent handler - -Successful handling of *SendPacketEvent* leads to *PacketRecv* datagram creation. - -// NOTE: Stateful relayer might keep packet that are not acked in the state so the following logic -// can be a bit simpler. - -```golang -func CreateDatagram(ev SendPacketEvent, - chainA Chain, // source chain - chainB Chain, // destination chain - proofHeight Height) (PacketRecv, Error) { - - // Stage 1 - // Verify if packet is committed to chain A and it is still pending (commitment exists) - - packetCommitment, packetCommitmentProof, error = - GetPacketCommitment(chainA, ev.sourcePort, ev.sourceChannel, ev.sequence, proofHeight) - if error != nil { return (nil, error) } - - if packetCommitment == nil OR - packetCommitment != hash(concat(ev.data, ev.timeoutHeight, ev.timeoutTimestamp)) { - // invalid event; bad provider - return (nil, Error.BADPROVIDER) - } - - // Stage 2 - // Execute checks IBC handler on chainB will execute - - channel, proof, error = GetChannel(chainB, ev.destPort, ev.destChannel, LATEST_HEIGHT) - if error != nil { return (nil, error) } - - if channel != nil AND - (channel.state == CLOSED OR - ev.sourcePort != channel.counterpartyPortIdentifier OR - ev.sourceChannel != channel.counterpartyChannelIdentifier) { return (nil, Error.DROP) } - - if channel == nil OR channel.state != OPEN { return (nil, Error.RETRY) } - // TODO: Maybe we shouldn't even enter handle loop for packets if the corresponding channel is not open! - - connectionId = channel.connectionHops[0] - connection, proof, error = GetConnection(chainB, connectionId, LATEST_HEIGHT) - if error != nil { return (nil, error) } - - if connection == nil OR connection.state != OPEN { return (nil, Error.RETRY) } - - if ev.timeoutHeight != 0 AND GetConsensusHeight(chainB) >= ev.timeoutHeight { return (nil, Error.DROP) } - if ev.timeoutTimestamp != 0 AND GetCurrentTimestamp(chainB) >= ev.timeoutTimestamp { return (nil, Error.DROP) } - - // we now check if this packet is already received by the destination chain - if channel.ordering === ORDERED { - nextSequenceRecv, proof, error = GetNextSequenceRecv(chainB, ev.destPort, ev.destChannel, LATEST_HEIGHT) - if error != nil { return (nil, error) } - - if ev.sequence != nextSequenceRecv { return (nil, Error.DROP) } // packet has already been delivered by another relayer - - } else { - // Note that absence of receipt (packetReceipt == nil) is also proven also and we should be able to verify it. - packetReceipt, proof, error = - GetPacketReceipt(chainB, ev.destPort, ev.destChannel, ev.sequence, LATEST_HEIGHT) - if error != nil { return (nil, error) } - - if packetReceipt != nil { return (nil, Error.DROP) } // packet has already been delivered by another relayer - } - - // Stage 3 - // Build datagram as all checks has passed - packet = Packet { - sequence: ev.sequence, - timeoutHeight: ev.timeoutHeight, - timeoutTimestamp: ev.timeoutTimestamp, - sourcePort: ev.sourcePort, - sourceChannel: ev.sourceChannel, - destPort: ev.destPort, - destChannel: ev.destChannel, - data: ev.data - } - - return (PacketRecv { packet, packetCommitmentProof, proofHeight }, nil) -} -``` - -### WriteAcknowledgementEvent handler - -Successful handling of *WriteAcknowledgementEvent* leads to *PacketAcknowledgement* datagram creation. - -```golang -func CreateDatagram(ev WriteAcknowledgementEvent, - chainA Chain, // source chain - chainB Chain, // destination chain - proofHeight Height) (PacketAcknowledgement, Error) { - - // Stage 1 - // Verify if acknowledment is committed to chain A and it is still pending - packetAck, PacketStateProof, error = - GetPacketAcknowledgement(chainA, ev.port, ev.channel, ev.sequence, proofHeight) - if error != nil { return (nil, error) } - - if packetAck == nil OR packetAck != hash(ev.acknowledgement) { - // invalid event; bad provider - return (nil, Error.BADPROVIDER) - } - - // Stage 2 - // Execute checks IBC handler on chainB will execute - - // Fetch channelEnd from the chainA to be able to compute port and chain ids on destination chain - channelA, proof, error = GetChannel(chainA, ev.port, ev.channel, ev.height) - if error != nil { return (nil, error) } - - channelB, proof, error = - GetChannel(chainB, channelA.counterpartyPortIdentifier, channelA.counterpartyChannelIdentifier, LATEST_HEIGHT) - if error != nil { return (nil, error) } - - if channelB == nil OR channel.state != OPEN { (nil, Error.DROP) } - // Note that we checked implicitly above that counterparty identifiers match each other - - connectionId = channelB.connectionHops[0] - connection, proof, error = GetConnection(chainB, connectionId, LATEST_HEIGHT) - if error != nil { return (nil, error) } - - if connection == nil OR connection.state != OPEN { return (nil, Error.DROP) } - - // verify the packet is sent by chainB and hasn't been cleared out yet - packetCommitment, packetCommitmentProof, error = - GetPacketCommitment(chainB, channelA.counterpartyPortIdentifier, - channelA.counterpartyChannelIdentifier, ev.sequence, LATEST_HEIGHT) - if error != nil { return (nil, error) } - - if packetCommitment == nil OR - packetCommitment != hash(concat(ev.data, ev.timeoutHeight, ev.timeoutTimestamp)) { - // invalid event; bad provider - return (nil, Error.BADPROVIDER) - } - - // abort transaction unless acknowledgement is processed in order - if channelB.ordering === ORDERED { - nextSequenceAck, proof, error = - GetNextSequenceAck(chainB, channelA.counterpartyPortIdentifier, - channelA.counterpartyChannelIdentifier, ev.sequence, LATEST_HEIGHT) - if error != nil { return (nil, error) } - - if ev.sequence != nextSequenceAck { return (nil, Error.DROP) } - } - - // Stage 3 - // Build datagram as all checks has passed - packet = Packet { - sequence: ev.sequence, - timeoutHeight: ev.timeoutHeight, - timeoutTimestamp: ev.timeoutTimestamp, - sourcePort: channelA.counterpartyPortIdentifier, - sourceChannel: channelA.counterpartyChannelIdentifier, - destPort: ev.port, - destChannel: ev.channel, - data: ev.data - } - - return (PacketAcknowledgement { packet, ev.acknowledgement, PacketStateProof, proofHeight }, nil) -} -``` diff --git a/docs/spec/relayer/Relayer.md b/docs/spec/relayer/Relayer.md deleted file mode 100644 index 4158c38dfe..0000000000 --- a/docs/spec/relayer/Relayer.md +++ /dev/null @@ -1,200 +0,0 @@ -# Relayer Specification - -Relayers are processes that provide connection layer of the IBC protocol. In the IBC protocol, on chain -modules do not have a way of directly sending a message to each other; this is the responsibility of relayer -processes. Modules signal its intention to send a message by writing data in its data store at the -defined location, and make those data (with corresponding proofs) available to external parties. -Relayer processes read (we say also scan) the state of each chain, construct appropriate IBC datagrams, -verify the corresponding proofs and submit valid datagrams to destination chain. -We assume existence of multiple relayers, where some relayers could be faulty (behave arbitrarily), -but there is always at least a single correct relayer. We don't make assumptions on the maximum number of -faulty relayers. - -For the purpose of this specification we assume existence of two on chain modules A and B, that executes -IBC protocol. We say that a module A (or B) sends an IBC datagram m to a module B (or A) when a correct -relayer can construct valid datagram m by scanning the state of the chain A. We say that a module A receives -an IBC datagram m, when m was processed by the module A on chain. We assume that modules -are correct. - -Correct relayers need to ensure the following properties: - -**[ICS18-Delivery]**: If a module A sends an IBC datagram m to a module B, then m is -eventually received by the module B. - -**[ICS18-Validity]**: If a module B receives an IBC datagram m from a module A, -then m was sent by the module A to the module B. - -## System model - -We assume that a correct relayer operates in the following model: - -### Connected chains - -Relayer transfers data between two chains: chainA and chainB. For simplicity, we assume Tendermint chains. -Each chain operates under Tendermint security model: -- given a block b at height h committed at time `t = b.Header.Time`, `+2/3` of voting power behaves correctly -at least before `t + UNBONDING_PERIOD`, where `UNBONDING_PERIOD` is a system parameter (typically order of weeks). -Validators sets can be changed in every block, and we don't assume any constraint on the way validators are changed -(application specific logic). - -Furthermore, we assume that blockchain applications that operate on top of chainA and chainB writes -relevant data into Merkleised data store (for example IBC packets), and that parts of the store are publicly -available (so relayers can access it). - -In order to access IBC relevant data, a relayer needs to establish connections with full nodes (correct) from -both chains. Note that there is no constrain on number of faulty full nodes: we can only assume that a correct relayer -will eventually have access to a correct full node. - -### Data availability - -Note that data written to a store at height *h* as part of executing block *b* (`b.Height = h`) is effectively committed by -the next block (at height h+1). The reason is the fact that the data store root hash as an effect of executing block at -height h is part of the block header at height h+1. Therefore, data read at height h is available until time -`t = b.Header.Time + UNBONDING_PERIOD`, where `b.Header.Height = h+1`. After time *t* we cannot trust that data anymore. -Note that data present in the store are re-validated by each new block: data added/modified at block *h* are still -valid even if not altered after, as they are still "covered" by the root hash of the store. - -Therefore UNBONDING_PERIOD gives absolute time bound during which relayer needs to transfer data read at source chain -to the destination chain. As we will explain below, due to fork detection and accountability protocols, the effective -data availability period will be shorter than UNBONDING_PERIOD. - -### Data verification - -As connected chains in IBC do not blindly trust each other, data coming from the opposite chain must be verified at -the destination before being acted upon. Data verification in IBC is implemented by relying on the concept of light client. -Light client is a process that by relying on an initial trusted header (subjective initialisation), verifies and maintains -set of trusted headers. Note that a light client does not maintain full blockchain and does not execute (verify) application -transitions. It operates by relying on the Tendermint security model, and by applying header verification logic that operates -only on signed headers (header + corresponding commit). - -More details about light client assumptions and protocols can be found -[here](https://github.com/tendermint/spec/tree/master/rust-spec/lightclient). For the purpose of this document, we assume -that a relayer has access to the light client node that provides trusted headers. -Given a data d read at a given path at height h with a proof p, we assume existence of a function -`VerifyMembership(header.AppHash, h, proof, path, d)` that returns `true` if data was committed by the corresponding -chain at height *h*. The trusted header is provided by the corresponding light client. - -## Relayer algorithm - -The main relayer event loop is a pipeline of four stages. Assuming some IBC event at height `h` on `chainA`, -the relayer: - -1. Determines destination chain (`chainB`) -2. Updates (on `chainB`) the IBC client for `chainA` to a certain height `H` where `H >= h+1`. -3. Creates IBC datagram at height `H-1`. -4. Submits the datagram from stage (2) to `chainB`. - -Note that an IBC event at height `h` corresponds to the modifications to the data store made as part of executing -block at height `h`. The corresponding proof (that data is indeed written to the data store) can be verified using -the data store root hash that is part of the header at height `h+1`. - -Once stage 2 finishes correctly, stage 3 should succeed assuming that `chainB` has not already processed the event. The -interface between stage 2 and stage 3 is just the height `H`. Once stage 3 finishes correctly, stage 4 should -succeed. The interface between stage 3 and stage 4 is an IBC datagram. - -We assume that the corresponding light client is correctly installed on each chain. - -Data structures and helper function definitions are provided -[here](https://github.com/informalsystems/ibc-rs/blob/master/docs/spec/relayer/Definitions.md). - -```golang -func handleEvent(ev, chainA) Error { - // NOTE: we don't verify if event data are valid at this point. We trust full node we are connected to - // until some verification fails. - - // Stage 1. - // Determine destination chain - chainB, error = getDestinationInfo(ev, chainA) - if error != nil { return error } - - // Stage 2. - // Update on `chainB` the IBC client for `chainA` to height `>= targetHeight`. - targetHeight = ev.height + 1 - // See the code for `updateIBCClient` below. - proofHeight, error := updateIBCClient(chainB, chainA, targetHeight) - if error != nil { return error } - - // Stage 3. - // Create the IBC datagrams including `ev` & verify them. - datagram, error = CreateDatagram(ev, chainA, chainB, proofHeight) - if error != nil { return error } - - // Stage 4. - // Submit datagrams. - error = Submit(chainB, datagram) - if error != nil { return error } -} - -func getDestinationInfo(ev IBCEvent, chain Chain) (Chain, Error) { - switch ev.type { - case SendPacketEvent: - chainId, error = getChainId(chain, ev.sourcePort, ev.sourceChannel, ev.Height) - if error != nil { return (nil, error) } - - chain = GetChain(chainId) - if chain == nil { return (nil, Error.DROP) } - - return (chain, nil) - - case WriteAcknowledgementEvent: - chainId, error = getChainId(chain, ev.Port, ev.Channel, ev.Height) - if error != nil { return (nil, error) } - - chain = GetChain(chainId) - if chain == nil { nil, Error.DROP } - - return (chain, nil) - } -} - -// Return chaindId of the destination chain based on port and channel info for the given chain -func getChainId(chain Chain, port Identifier, channel Identifier, height Height) (String, Error) { - channel, proof, error = GetChannel(chain, port, channel, height) - if error != nil { return (nil, error) } - - connectionId = channel.connectionHops[0] - connection, proof, error = GetConnection(chain, connectionId, height) - if error != nil { return (nil, error) } - - clientState, proof, error = GetClientState(chain, connection.clientIdentifier, height) - if error != nil { return (nil, error) } - - return (clientState.chainID, error) -} - -// Perform an update on `dest` chain for the IBC client for `src` chain. -// Preconditions: -// - `src` chain has height greater or equal to `targetHeight` -// Postconditions: -// - returns the installedHeight >= targetHeight -// - return error if some of verification steps fail -func updateIBCClient(dest Chain, src Chain, targetHeight Height) -> (Height, Error) { - - clientState, proof, error = GetClientState(dest, dest.clientId, LATEST_HEIGHT) - if error != nil { return (nil, error) } - // NOTE: What if a full node we are connected to send us stale (but correct) information regarding targetHeight? - - // if installed height is smaller than the targetHeight, we need to update client with targetHeight - while (clientState.latestHeight < targetHeight) { - // Do an update to IBC client for `src` on `dest`. - shs, error = src.lc.getMinimalSet(clientState.latestHeight, targetHeight) - if error != nil { return (nil, error) } - - error = dest.submit(createUpdateClientDatagrams(shs)) - if error != nil { return (nil, error) } - - clientState, proof, error = GetClientState(dest, dest.clientId, LATEST_HEIGHT) - if error != nil { return (nil, error) } - } - - // NOTE: semantic check of the installed header is done using fork detection component - return { clientState.Height, nil } -} -``` - - - - - - - diff --git a/docs/spec/tla/client/Chain.tla b/docs/spec/tla/client/Chain.tla deleted file mode 100644 index 765c9b2450..0000000000 --- a/docs/spec/tla/client/Chain.tla +++ /dev/null @@ -1,137 +0,0 @@ ------------------------------ MODULE Chain ------------------------------ - -(*************************************************************************** - This module models the behavior of a chain running the IBC Core Client - Protocol. -****************************************************************************) - -EXTENDS Integers, FiniteSets, ICS02ClientHandlers, ICS02Definitions - -CONSTANTS - MaxHeight, \* maximal chain height - ChainID, \* chain identifier - NrClients, \* number of clients that will be created on the chain - ClientIDs \* a set of counterparty client IDs - -VARIABLES - chainStore, \* chain store, containing a client state for each client - incomingDatagrams, \* set of incoming datagrams - history \* history variable - -vars == <> -Heights == 1..MaxHeight \* set of possible heights of the chains in the system - -\* @type: (CHAINSTORE, Str) => Int; -GetClientNr(store, clientID) == - IF \E clientNr \in DOMAIN chainStore.clientStates : - store.clientStates[clientNr].clientID = clientID - THEN CHOOSE clientNr \in DOMAIN store.clientStates : - store.clientStates[clientNr].clientID = clientID - ELSE 0 - -(*************************************************************************** - Client update operators - ***************************************************************************) -\* Update the clients on chain with chainID, -\* using the client datagrams generated by the relayer -\* (Handler operators defined in ClientHandlers.tla) -LightClientUpdate(chainID, store, clientID, datagrams) == - \* create client - LET clientCreatedStore == HandleCreateClient(store, clientID, datagrams) IN - \* update client - LET clientUpdatedStore == HandleClientUpdate(clientCreatedStore, clientID, datagrams, MaxHeight) IN - - clientUpdatedStore - -(*************************************************************************** - Chain actions - ***************************************************************************) -\* Advance the height of the chain until MaxHeight is reached -AdvanceChain == - /\ chainStore.height + 1 \in Heights - /\ chainStore' = [chainStore EXCEPT !.height = chainStore.height + 1] - /\ UNCHANGED <> - -\* Handle the datagrams and update the chain state -HandleIncomingDatagrams == - /\ incomingDatagrams /= {} - /\ \E clientID \in ClientIDs : - /\ chainStore' = LightClientUpdate(ChainID, chainStore, clientID, incomingDatagrams) - /\ history' = [history EXCEPT ![clientID] = - LET clientNr == GetClientNr(chainStore', clientID) IN - IF /\ clientNr /= 0 - /\ ~history[clientID].created - /\ chainStore.clientStates[clientNr].clientID = nullClientID - /\ chainStore'.clientStates[clientNr].clientID /= nullClientID - THEN [created |-> TRUE, updated |-> history[clientID].updated] - ELSE IF /\ clientNr /= 0 - /\ history[clientID].created - /\ chainStore.clientStates[clientNr].heights /= chainStore'.clientStates[clientNr].heights - /\ chainStore.clientStates[clientNr].heights \subseteq chainStore'.clientStates[clientNr].heights - THEN [created |-> history[clientID].created, updated |-> TRUE] - ELSE history[clientID] - ] - /\ incomingDatagrams' = {dgr \in incomingDatagrams : dgr.clientID /= clientID} - -(*************************************************************************** - Specification - ***************************************************************************) -\* Initial state predicate -\* Initially -\* - each chain is initialized to InitChain (defined in RelayerDefinitions.tla) -\* - pendingDatagrams for each chain is empty -\* - the packetSeq is set to 1 -Init == - /\ chainStore = ICS02InitChainStore(NrClients, ClientIDs) - /\ incomingDatagrams = {} - -\* Next state action -\* The chain either -\* - advances its height -\* - receives datagrams and updates its state -\* - sends a packet if the appPacketSeq is not bigger than MaxPacketSeq -\* - acknowledges a packet -Next == - \/ AdvanceChain - \/ HandleIncomingDatagrams - \/ UNCHANGED vars - -Fairness == - /\ WF_vars(AdvanceChain) - /\ WF_vars(HandleIncomingDatagrams) - -(*************************************************************************** - Invariants - ***************************************************************************) -\* Type invariant -\* ChainStores and Datagrams are defined in RelayerDefinitions.tla -TypeOK == - /\ chainStore \in ChainStores(NrClients, ClientIDs, MaxHeight) - /\ incomingDatagrams \in SUBSET Datagrams(ClientIDs, MaxHeight) - -\* two clients with the same ID cannot be created -CreatedClientsHaveDifferentIDs == - (\A clientNr \in 1..NrClients : - chainStore.clientStates[clientNr].clientID /= nullClientID) - => (\A clientNr1 \in 1..NrClients : \A clientNr2 \in 1..NrClients : - clientNr1 /= clientNr2 - => chainStore.clientStates[clientNr1].clientID /= - chainStore.clientStates[clientNr2].clientID) - -\* only created clients can be updated -UpdatedClientsAreCreated == - \A clID \in ClientIDs : - history[clID].updated => history[clID].created - -(*************************************************************************** - Properties - ***************************************************************************) -\* it ALWAYS holds that the height of the chain does not EVENTUALLY decrease -HeightDoesntDecrease == - [](\A h \in Heights : chainStore.height = h - => <>(chainStore.height >= h)) - -============================================================================= -\* Modification History -\* Last modified Thu Apr 15 12:17:59 CEST 2021 by ilinastoilkovska -\* Created Fri Jun 05 16:56:21 CET 2020 by ilinastoilkovska diff --git a/docs/spec/tla/client/ICS02ClientHandlers.tla b/docs/spec/tla/client/ICS02ClientHandlers.tla deleted file mode 100644 index 2e31ed2df5..0000000000 --- a/docs/spec/tla/client/ICS02ClientHandlers.tla +++ /dev/null @@ -1,97 +0,0 @@ ------------------------ MODULE ICS02ClientHandlers ------------------------- - -(*************************************************************************** - This module contains definitions of operators that are used to handle - client create and update datagrams. - ***************************************************************************) - -EXTENDS Integers, FiniteSets, ICS02Definitions - -(*************************************************************************** - Client datagram handlers - ***************************************************************************) - -\* Handle "CreateClient" datagrams -\* @type: (CHAINSTORE, Str, Set(DATAGRAM)) => CHAINSTORE; -HandleCreateClient(chain, clientID, datagrams) == - \* get "CreateClient" datagrams with valid clientID - LET createClientDgrs == {dgr \in datagrams : - /\ dgr.type = "CreateClient" - /\ dgr.clientID = clientID} IN - \* get heights in datagrams with correct counterparty clientID for chainID - LET createClientHeights == {dgr.height : dgr \in createClientDgrs} IN - \* get next available client number where a client can be created - LET nextClientNr == - IF /\ \A clientNr \in DOMAIN chain.clientStates : - chain.clientStates[clientNr].clientID /= clientID - /\ \E clientNr \in DOMAIN chain.clientStates : - chain.clientStates[clientNr].clientID = nullClientID - THEN CHOOSE clientNr \in DOMAIN chain.clientStates : - \/ /\ clientNr = 1 - /\ chain.clientStates[clientNr].clientID = nullClientID - \/ /\ clientNr - 1 \in DOMAIN chain.clientStates - /\ chain.clientStates[clientNr - 1].clientID /= nullClientID - /\ chain.clientStates[clientNr].clientID = nullClientID - ELSE 0 IN - - \* new chain record with client created - LET clientCreateChain == - IF nextClientNr \in DOMAIN chain.clientStates - THEN [chain EXCEPT !.clientStates = - [chain.clientStates EXCEPT ![nextClientNr] = - \* if the slot at nextClientNr is an empty slot - IF /\ chain.clientStates[nextClientNr].clientID = nullClientID - \* if the set of heights from datagrams is not empty - /\ createClientHeights /= {} - \* then create a client with clientID at the slot nextClientNr - THEN [clientID |-> clientID, - heights |-> {Max(createClientHeights)}] - \* otherwise, discard CreateClient datagrams - ELSE chain.clientStates[nextClientNr] - ]] - ELSE chain IN - - clientCreateChain - -\* Handle "ClientUpdate" datagrams -\* @type: (CHAINSTORE, Str, Set(DATAGRAM), Int) => CHAINSTORE; -HandleClientUpdate(chain, clientID, datagrams, MaxHeight) == - \* get the client number of the client with clientID - LET clientNr == IF \E clientNr \in DOMAIN chain.clientStates : - chain.clientStates[clientNr].clientID = clientID - THEN CHOOSE clientNr \in DOMAIN chain.clientStates : - chain.clientStates[clientNr].clientID = clientID - ELSE 0 IN - \* max client height of client ID - LET maxClientHeight == IF clientNr /= 0 - THEN Max(chain.clientStates[clientNr].heights) - ELSE MaxHeight IN - \* get "ClientUpdate" datagrams with valid clientID - LET updateClientDgrs == {dgr \in datagrams : - /\ dgr.type = "ClientUpdate" - /\ dgr.clientID = clientID - /\ maxClientHeight < dgr.height} IN - \* get heights in datagrams with correct counterparty clientID for chainID - LET updateClientHeights == {dgr.height : dgr \in updateClientDgrs} IN - - \* new chain record with client updated - LET clientUpdatedChain == - IF clientNr \in DOMAIN chain.clientStates - THEN [chain EXCEPT !.clientStates = - [chain.clientStates EXCEPT ![clientNr] = - \* if clientNr is a valid client number - IF /\ clientNr \in DOMAIN chain.clientStates - \* if the slot at clientNr holds a client with clientID - /\ chain.clientStates[clientNr].clientID = clientID - THEN [chain.clientStates[clientNr] EXCEPT !.heights = - chain.clientStates[clientNr].heights \union updateClientHeights] - ELSE chain.clientStates[clientNr] - ]] - ELSE chain IN - - clientUpdatedChain - -============================================================================= -\* Modification History -\* Last modified Wed Apr 14 18:46:39 CEST 2021 by ilinastoilkovska -\* Created Tue Apr 07 16:42:47 CEST 2020 by ilinastoilkovska diff --git a/docs/spec/tla/client/ICS02Definitions.tla b/docs/spec/tla/client/ICS02Definitions.tla deleted file mode 100644 index d7b367166a..0000000000 --- a/docs/spec/tla/client/ICS02Definitions.tla +++ /dev/null @@ -1,120 +0,0 @@ --------------------------- MODULE ICS02Definitions -------------------------- - -(*************************************************************************** - This module contains definitions of operators that are shared between the - different modules, and which are relevant for ICS02. - ***************************************************************************) - -EXTENDS Integers, FiniteSets, Sequences - -(************************ TYPE ALIASES FOR SNOWCAT *************************) -(* @typeAlias: CLIENTSTATE = - [ - clientID: Str, - heights: Set(Int) - ]; -*) -(* @typeAlias: CHAINSTORE = - [ - height: Int, - clientStates: Int -> CLIENTSTATE - ]; -*) -(* @typeAlias: DATAGRAM = - [ - type: Str, - clientID: Str, - height: Int - ]; -*) - -(********************** Common operator definitions ***********************) -ChainIDs == {"chainA", "chainB"} - -nullHeight == 0 -nullClientID == "none" - -Max(S) == CHOOSE x \in S: \A y \in S: y <= x - -BoundedSeq(S, bound) == UNION {[1..n -> S] : n \in 1..bound} - -SetHeights(h1, h2) == {h \in 1..10 : h1 <= h /\ h <= h2} - -(****************************** ClientStates ******************************* - A client state is a set of heights - ***************************************************************************) -ClientStates(ClientIDs, maxHeight) == - [ - clientID : ClientIDs, - heights : SUBSET(1..maxHeight) - ] - -NullClientState == - [ - clientID |-> nullClientID, - heights |-> {} - ] - -(******************************** ChainStores ****************************** - A set of chain store records, with fields relevant for ICS02. - A chain store record contains the following fields: - - - height : an integer between nullHeight and MaxHeight. - Stores the current height of the chain. - - - counterpartyClientHeights : a set of integers between 1 and MaxHeight - Stores the heights of the client for the counterparty chain. - - ***************************************************************************) -ChainStores(NrClients, ClientIDs, maxHeight) == - [ - height : 1..maxHeight, - clientStates : [1..NrClients -> ClientStates(ClientIDs, maxHeight) \union {NullClientState}] - ] - -(******************************** Datagrams ********************************) -\* Set of datagrams -Datagrams(ClientIDs, maxHeight) == - [type : {"CreateClient"}, clientID : ClientIDs, height : 1..maxHeight] - \union - [type : {"ClientUpdate"}, clientID : ClientIDs, height : 1..maxHeight] - -\* Set of client datagrams for a specific set ClientIDs of client IDs. -ClientDatagrams(ClientIDs, Heights) == - [type : {"CreateClient"}, clientID : ClientIDs, height : Heights] - \union - [type : {"ClientUpdate"}, clientID : ClientIDs, height : Heights] - -\* Null datagram -NullDatagram == - [type |-> "null"] - -(*************************************************************************** - Initial value of a chain store for ICS02 - ***************************************************************************) -\* Initial value of the chain store for ICS02: -\* - height is initialized to 1 -\* - the counterparty clients are uninitialized -ICS02InitChainStore(NrClients, ClientIDs) == - [ - height |-> 1, - clientStates |-> [clientNr \in 1..NrClients |-> NullClientState] - ] - -(*************************************************************************** - Client helper operators - ***************************************************************************) - -\* get the ID of chainID's counterparty chain -GetCounterpartyChainID(chainID) == - IF chainID = "chainA" THEN "chainB" ELSE "chainA" - -\* get the latest height of chainID -\* @type: (CHAINSTORE) => Int; -GetLatestHeight(chain) == - chain.height - -========================================================================= -\* Modification History -\* Last modified Thu Apr 15 12:17:55 CEST 2021 by ilinastoilkovska -\* Created Tue Oct 06 16:26:25 CEST 2020 by ilinastoilkovska diff --git a/docs/spec/tla/client/ICS02SingleChainEnvironment.tla b/docs/spec/tla/client/ICS02SingleChainEnvironment.tla deleted file mode 100644 index 41a58038ee..0000000000 --- a/docs/spec/tla/client/ICS02SingleChainEnvironment.tla +++ /dev/null @@ -1,107 +0,0 @@ --------------------- MODULE ICS02SingleChainEnvironment -------------------- - -(*************************************************************************** - A TLA+ specification of the IBC client protocol (ICS02). This module models - a system consisting of one chain that can create multiple clients, and which - operates in an environment that overapproximates the behavior of a correct - relayer. - ***************************************************************************) - -EXTENDS Integers, FiniteSets, Sequences, ICS02Definitions - -CONSTANTS - \* @type: Int; - MaxHeight, \* maximal height of all the chains in the system - \* @type: Int; - NrClientsChainA, \* number of clients that will be created on the chain - \* @type: Set(Str); - ClientIDsChainA \* a set of counterparty client IDs for the chain - -ASSUME MaxHeight < 10 - -VARIABLES - \* @type: CHAINSTORE; - chainAstore, \* store of ChainA - \* @type: Set(DATAGRAM); - datagramsChainA, \* set of datagrams incoming to ChainA - \* @type: Str -> [created: Bool, updated: Bool]; - history \* history variable - -vars == <> - -(*************************************************************************** - Instances of ICS02Chain - ***************************************************************************) - -\* We suppose there is a single chain, ChainA -\* ChainA -- Instance of Chain.tla -ChainA == INSTANCE Chain - WITH ChainID <- "chainA", - NrClients <- NrClientsChainA, - ClientIDs <- ClientIDsChainA, - chainStore <- chainAstore, - incomingDatagrams <- datagramsChainA - -(*************************************************************************** - ICS02Environment actions - ***************************************************************************) - -\* non-deterministically create datagrams -CreateDatagrams == - \* pick a sequence from the set of client datagrams non-deterministically - /\ datagramsChainA' \in - SUBSET ClientDatagrams( - ClientIDsChainA, - SetHeights(1, MaxHeight) - ) - - /\ UNCHANGED <> - - -(*************************************************************************** - Component actions - ***************************************************************************) - -\* ChainAction: the chain takes a step -ChainAction == - ChainA!Next - -\* EnvironmentAction: non-deterministically create datagrams -EnvironmentAction == - CreateDatagrams - -(*************************************************************************** - Specification - ***************************************************************************) -\* Initial state predicate -Init == - /\ ChainA!Init - /\ history = [clientID \in ClientIDsChainA |-> [created |-> FALSE, updated |-> FALSE]] - -\* Next state action -Next == - \/ ChainAction - \/ EnvironmentAction - \/ UNCHANGED vars - -\* Specification formula -Spec == Init /\ [][Next]_vars - -(*************************************************************************** - Invariants - ***************************************************************************) - -\* type invariant -TypeOK == - /\ ChainA!TypeOK - /\ history \in [ClientIDsChainA -> [created : BOOLEAN, updated : BOOLEAN]] - -\* conjunction of invariants -ICS02SingleChainInv == - /\ ChainA!CreatedClientsHaveDifferentIDs - /\ ChainA!UpdatedClientsAreCreated - -============================================================================= -\* Modification History -\* Last modified Thu Apr 15 12:16:46 CEST 2021 by ilinastoilkovska -\* Created Fri Oct 02 12:57:19 CEST 2020 by ilinastoilkovska diff --git a/docs/spec/tla/client/ICS02TwoChainsEnvironment.tla b/docs/spec/tla/client/ICS02TwoChainsEnvironment.tla deleted file mode 100644 index f8ed9f67b1..0000000000 --- a/docs/spec/tla/client/ICS02TwoChainsEnvironment.tla +++ /dev/null @@ -1,167 +0,0 @@ ----------------------- MODULE ICS02TwoChainsEnvironment ---------------------- - -(*************************************************************************** - A TLA+ specification of the IBC client protocol (ICS02). This module models - a system consisting of two chain that can create multiple clients, and which - operate in an environment that overapproximates the behavior of a correct - relayer. - ***************************************************************************) - -EXTENDS Integers, FiniteSets, Sequences, ICS02Definitions - -CONSTANTS - \* @type: Int; - MaxHeight, \* maximal height of all the chains in the system - \* @type: Int; - NrClientsChainA, \* number of clients that will be created on ChainA - \* @type: Int; - NrClientsChainB, \* number of clients that will be created on ChainB - \* @type: Set(Str); - ClientIDsChainA, \* a set of counterparty client IDs for ChainA - \* @type: Set(Str); - ClientIDsChainB \* a set of counterparty client IDs for ChainB - -ASSUME MaxHeight < 10 -ASSUME ClientIDsChainA \intersect ClientIDsChainB = {} - -VARIABLES - \* @type: CHAINSTORE; - chainAstore, \* store of ChainA - \* @type: CHAINSTORE; - chainBstore, \* store of ChainB - \* @type: Set(DATAGRAM); - datagramsChainA, \* set of datagrams incoming to ChainA - \* @type: Set(DATAGRAM); - datagramsChainB, \* set of datagrams incoming to ChainB - \* @type: Str -> [created: Bool, updated: Bool]; - history \* history variable - -chainAvars == <> -chainBvars == <> -vars == <> - -(*************************************************************************** - Instances of ICS02Chain - ***************************************************************************) - -\* We suppose there are two chains that communicate, ChainA and ChainB -\* ChainA -- Instance of Chain.tla -ChainA == INSTANCE Chain - WITH ChainID <- "chainA", - NrClients <- NrClientsChainA, - ClientIDs <- ClientIDsChainA, - chainStore <- chainAstore, - incomingDatagrams <- datagramsChainA - -\* ChainB -- Instance of Chain.tla -ChainB == INSTANCE Chain - WITH ChainID <- "chainB", - NrClients <- NrClientsChainB, - ClientIDs <- ClientIDsChainB, - chainStore <- chainBstore, - incomingDatagrams <- datagramsChainB - -GetChainByID(chainID) == - IF chainID = "chainA" - THEN chainAstore - ELSE chainBstore - -GetNrClientsByID(chainID) == - IF chainID = "chainA" - THEN NrClientsChainA - ELSE NrClientsChainB - - -(*************************************************************************** - ICS02Environment actions - ***************************************************************************) - -\* non-deterministically create datagrams -CreateDatagrams == - \* pick a sequence from the set of client datagrams non-deterministically - \* for each chain - /\ datagramsChainA = {} - /\ datagramsChainB = {} - /\ datagramsChainA' \in - SUBSET ClientDatagrams( - ClientIDsChainA, - SetHeights(1, GetLatestHeight(GetChainByID("chainB"))) - ) - /\ datagramsChainB' \in - SUBSET ClientDatagrams( - ClientIDsChainB, - SetHeights(1, GetLatestHeight(GetChainByID("chainA"))) - ) - - /\ UNCHANGED <> - /\ UNCHANGED history - - -(*************************************************************************** - Component actions - ***************************************************************************) - -\* ChainAction: either chain takes a step, leaving the other -\* variables unchanged -ChainAction == - \/ /\ ChainA!Next - /\ UNCHANGED chainBvars - \/ /\ ChainB!Next - /\ UNCHANGED chainAvars - -\* EnvironmentAction: non-deterministically create datagrams -EnvironmentAction == - CreateDatagrams - -(*************************************************************************** - Specification - ***************************************************************************) -\* Initial state predicate -Init == - /\ ChainA!Init - /\ ChainB!Init - /\ history = [clientID \in (ClientIDsChainA \union ClientIDsChainB) |-> - [created |-> FALSE, updated |-> FALSE]] - -\* Next state action -Next == - \/ ChainAction - \/ EnvironmentAction - \/ UNCHANGED vars - -\* Specification formula -Spec == Init /\ [][Next]_vars - -(*************************************************************************** -Invariants - ***************************************************************************) - -\* type invariant -TypeOK == - /\ ChainA!TypeOK - /\ ChainB!TypeOK - /\ history \in [ClientIDsChainA -> [created : BOOLEAN, updated : BOOLEAN]] - -\* the maximum client height is less than or equal to the current height of -\* the counterparty chain -ClientHeightsAreBelowCounterpartyHeight == - \A chainID \in ChainIDs : - \A clientNr \in 1..GetNrClientsByID(chainID) : - (GetChainByID(chainID).clientStates[clientNr].heights /= {} - => (Max(GetChainByID(chainID).clientStates[clientNr].heights) - <= GetLatestHeight(GetChainByID(GetCounterpartyChainID(chainID))))) - -\* conjunction of invariants -ICS02TwoChainsInv == - /\ ChainA!CreatedClientsHaveDifferentIDs - /\ ChainA!UpdatedClientsAreCreated - /\ ChainB!CreatedClientsHaveDifferentIDs - /\ ChainB!UpdatedClientsAreCreated - /\ ClientHeightsAreBelowCounterpartyHeight - -============================================================================= -\* Modification History -\* Last modified Wed Apr 14 19:08:27 CEST 2021 by ilinastoilkovska -\* Created Fri Oct 02 12:57:19 CEST 2020 by ilinastoilkovska diff --git a/docs/spec/tla/client/MC_SingleChain.tla b/docs/spec/tla/client/MC_SingleChain.tla deleted file mode 100644 index 68dd22c294..0000000000 --- a/docs/spec/tla/client/MC_SingleChain.tla +++ /dev/null @@ -1,17 +0,0 @@ ---------------------------- MODULE MC_SingleChain --------------------------- - -MaxHeight == 4 -NrClientsChainA == 2 -ClientIDsChainA == {"B1", "B2"} - -VARIABLES - \* @type: CHAINSTORE; - chainAstore, \* store of ChainA - \* @type: Set(DATAGRAM); - datagramsChainA, \* set of datagrams incoming to ChainA - \* @type: Str -> [created: Bool, updated: Bool]; - history \* history variable - -INSTANCE ICS02SingleChainEnvironment - -============================================================================= diff --git a/docs/spec/tla/client/MC_TwoChains.tla b/docs/spec/tla/client/MC_TwoChains.tla deleted file mode 100644 index 7bbf436552..0000000000 --- a/docs/spec/tla/client/MC_TwoChains.tla +++ /dev/null @@ -1,22 +0,0 @@ ----------------------------- MODULE MC_TwoChains ---------------------------- - -MaxHeight == 4 -NrClientsChainA == 2 -NrClientsChainB == 2 -ClientIDsChainA == {"B1", "B2"} -ClientIDsChainB == {"A1", "A2"} - -VARIABLES - \* @type: CHAINSTORE; - chainAstore, \* store of ChainA - \* @type: CHAINSTORE; - chainBstore, \* store of ChainB - \* @type: Set(DATAGRAM); - datagramsChainA, \* set of datagrams incoming to ChainA - \* @type: Set(DATAGRAM); - datagramsChainB, \* set of datagrams incoming to ChainB - \* @type: Str -> [created: Bool, updated: Bool]; - history \* history variable - -INSTANCE ICS02TwoChainsEnvironment -============================================================================= \ No newline at end of file diff --git a/docs/spec/tla/client/README.md b/docs/spec/tla/client/README.md deleted file mode 100644 index fbfbbbbece..0000000000 --- a/docs/spec/tla/client/README.md +++ /dev/null @@ -1,80 +0,0 @@ -# TLA+ specification of the IBC Core Client Protocol - -This document describes the TLA+ models of the core logic of the English specification -[ICS02](https://github.com/cosmos/ibc/tree/5877197dc03e844542cb8628dd52674a37ca6ff9/spec/ics-002-client-semantics). -We start by discussing [the model of the -protocol](#the-model-of-the-protocol). -Then, we discuss the [invariants](#invariants) that we formalize, and finally, we -discuss how to [use the model](#using-the-model). - -## The Model of the Protocol - -We present models two of two different systems, which are used to check -different invariants: -1. The first system, specified in [ICS02SingleChainEnvironment.tla](ICS02SingleChainEnvironment.tla), consists of a single chain that can -create multiple clients. -The chain operates in an environment that overapproximates the -behavior of a correct relayer. -2. The second system, specified in [ICS02TwoChainsEnvironment.tla](ICS02TwoChainsEnvironment.tla), consists of two chain that can -create multiple clients. -The relayer is again overapproximated using an environment. - -Both systems extend the following modules: -- [Chain.tla](Chain.tla), which models the behavior of a chain running the IBC Core Client Protocol. -- [ICS02ClientHandlers.tla](ICS02ClientHandlers.tla), which contains definitions of -operators that are used to handle client creation and client update events. -- [ICS02Definitions.tla](ICS02Definitions.tla), which contains definitions of operators that are shared between the - different modules, and which are relevant for ICS02. - -## Invariants - -The module [Chain.tla](Chain.tla) defines the following invariants: -- `TypeOK`, the type invariant, -- `CreatedClientsHaveDifferentIDs`, which ensures that two clients two clients with the same ID cannot be created, -- `UpdatedClientsAreCreated`, which ensures that only created clients can be updated. - -These invariants are checked for a system of single chain in [ICS02SingleChainEnvironment.tla](ICS02SingleChainEnvironment.tla), and for a system of two chains in [ICS02TwoChainsEnvironment.tla](ICS02TwoChainsEnvironment.tla). -Additionally, [ICS02SingleChainEnvironment](ICS02TwoChainsEnvironment.tla) checks the invariant: -- `ClientHeightsAreBelowCounterpartyHeight`, which ensures that the maximum client -height is less than or equal to the current height of the counterparty chain. - - -## Using the Model - -### Constants - -The modules `ICS02SingleChainEnvironment.tla` and `ICS02TwoChainsEnvironment.tla` -are parameterized by the constants: - - `MaxHeight`, a natural number denoting the maximal height of the chains, - - `NrClientsChainA`, a number of clients that will be created on ChainA - - `NrClientsChainB`, a number of clients that will be created on ChainB - - `ClientIDsChainA`, a set of counterparty client IDs for ChainA - - `ClientIDsChainB`, a set of counterparty client IDs for ChainB - -We assume that the sets `ClientIDsChainA` and `ClientIDsChainB` contain distinct -client IDs. - - -### Importing the specification into TLA+ toolbox - -To import the specification in the TLA+ toolbox and run TLC: - - add a new spec in TLA+ toolbox with the root-module file `ICS02SingleChainEnvironment.tla` (or `ICS02TwoChainsEnvironment.tla`) - - create a model - - assign a value to the constants (example values can be found in `ICS02SingleChainEnvironment.cfg` (or `ICS02TwoChainsEnvironment.tla`)) - - choose "Temporal formula" as the behavior spec, and use the formula `Spec` - - choose invariants/properties that should be checked - - run TLC on the model - -#### Basic checks with TLC - -We ran TLC on `ICS02SingleChainEnvironment.tla` using the constants defined -in `ICS02SingleChainEnvironment.cfg`. -We were able to check the invariants described above within seconds. - -#### Apalache - -The specification contains type annotations for the -model checker [Apalache](https://github.com/informalsystems/apalache). -The specification passes the type check using the type checker [Snowcat](https://apalache.informal.systems/docs/apalache/typechecker-snowcat.html) -integrated in Apalache. - diff --git a/docs/spec/tla/fungible-token-transfer/Bank.tla b/docs/spec/tla/fungible-token-transfer/Bank.tla deleted file mode 100644 index 20e1af41ff..0000000000 --- a/docs/spec/tla/fungible-token-transfer/Bank.tla +++ /dev/null @@ -1,115 +0,0 @@ --------------------------------- MODULE Bank -------------------------------- - -(*************************************************************************** - This module contains definitions of operators that model the behavior of - the bank module. - ***************************************************************************) - -EXTENDS Integers, FiniteSets - -\* subtract coins from account -\* @type: (ACCOUNT -> Int, ACCOUNT, Int) => ACCOUNT -> Int; -SubtractCoins(accounts, accountID, amount) == - [accounts EXCEPT ![accountID] = accounts[accountID] - amount] - -\* add coins to account -\* @type: (ACCOUNT -> Int, ACCOUNT, Int) => ACCOUNT -> Int; -AddCoins(accounts, accountID, amount) == - LET newDomain == (DOMAIN accounts) \union {accountID} IN - - \* if an account with accountID exists - IF accountID \in DOMAIN accounts - \* add amount to account - THEN [accounts EXCEPT ![accountID] = accounts[accountID] + amount] - \* otherwise create a new account with balance equal to amount - \* and add it to the map - ELSE [accID \in newDomain |-> - IF accID = accountID - THEN amount - ELSE accounts[accID] - ] - - -\* Transfer coins from senderAccounts to receiverAccounts, depeding on -\* the sender addressees, receiver addressees and denomination -\* - senderAccounts is a map from sender addresses and denominations -\* to account balances -\* - receiverAccounts is a map from receiver addresses and denominations -\* to account balances -(* @type: (ACCOUNT -> Int, Str, ACCOUNT -> Int, Str, Seq(Str), Int) => - [senderAccounts: ACCOUNT -> Int, receiverAccounts: ACCOUNT -> Int, error: Bool]; -*) -TransferCoins(senderAccounts, senderAddr, - receiverAccounts, receiverAddr, - denomination, amount) == - LET senderAccountID == <> IN - LET receiverAccountID == <> IN - - LET senderBalance == senderAccounts[senderAccountID] IN - - \* if the sender account exists and its balance is sufficient - IF /\ senderAccountID \in DOMAIN senderAccounts - /\ senderBalance - amount >= 0 - \* subtract coins from senderAccountID and add coins to receiverAccountID - THEN [ - senderAccounts |-> SubtractCoins(senderAccounts, senderAccountID, amount), - receiverAccounts |-> AddCoins(receiverAccounts, receiverAccountID, amount), - error |-> FALSE - ] - \* otherwise report an error - ELSE [ - senderAccounts |-> senderAccounts, - receiverAccounts |-> receiverAccounts, - error |-> TRUE - ] - - -\* Burn coins on accounts, depending on the address and -\* denomination -\* - accounts is a map from addresses and denominations -\* to account balances -\* @type: (ACCOUNT -> Int, Str, Seq(Str), Int) => [accounts: ACCOUNT -> Int, error: Bool]; -BurnCoins(accounts, address, denomination, amount) == - LET accountID == <> IN - LET balance == accounts[accountID] IN - - \* if the account exists and its balance is sufficient - IF /\ accountID \in DOMAIN accounts - /\ balance - amount >= 0 - \* subtract coins from accountID - THEN [ - accounts |-> SubtractCoins(accounts, accountID, amount), - error |-> FALSE - ] - \* otherwise report an error - ELSE [ - accounts |-> accounts, - error |-> TRUE - ] - - -\* Mint new coins of denomination to account with the given address -\* @type: (ACCOUNT -> Int, Str, Seq(Str), Int, Int) => [accounts: ACCOUNT -> Int, error: Bool]; -MintCoins(accounts, address, denomination, amount, maxBalance) == - LET accountID == <> IN - - \* if the new balance does not exceed maxBalance - IF \/ /\ accountID \notin DOMAIN accounts - /\ amount <= maxBalance - \/ /\ accountID \in DOMAIN accounts - /\ accounts[accountID] + amount <= maxBalance - \* add coins to accountID - THEN [ - accounts |-> AddCoins(accounts, accountID, amount), - error |-> FALSE - ] - \* otherwise report an error - ELSE [ - accounts |-> accounts, - error |-> TRUE - ] - -============================================================================= -\* Modification History -\* Last modified Wed Apr 14 14:50:41 CEST 2021 by ilinastoilkovska -\* Created Thu Oct 28 19:49:56 CET 2020 by ilinastoilkovska diff --git a/docs/spec/tla/fungible-token-transfer/Chain.tla b/docs/spec/tla/fungible-token-transfer/Chain.tla deleted file mode 100644 index af779b2999..0000000000 --- a/docs/spec/tla/fungible-token-transfer/Chain.tla +++ /dev/null @@ -1,203 +0,0 @@ -------------------------------- MODULE Chain ------------------------------- - -(*************************************************************************** - This module models the behavior of a chain running the IBC Token Transfer - Protocol. - - The chain state is represented by a chain store, which is a snapshot of the - provable and private stores, to the extent necessary for ICS20. Additionally, - a chain has a dedicated datagram container for packet datagrams (given by a - queue of datagrams that models the order in which the datagrams were submitted - by some relayer). -***************************************************************************) - -EXTENDS Integers, FiniteSets, Sequences, IBCTokenTransferDefinitions, - ICS04PacketHandlers, ICS20FungibleTokenTransferHandlers - -CONSTANTS - MaxHeight, \* maximal chain height - MaxPacketSeq, \* maximal packet sequence number - MaxBalance, \* maximal account balance - ChainID, \* a chain ID - NativeDenomination \* native denomination of tokens at ChainID - - -VARIABLES - chainStore, \* chain store, containing client heights, a channel end - incomingPacketDatagrams, \* sequence of incoming packet datagrams - appPacketSeq, \* packet sequence number from the application on the chain - packetLog, \* packet log - accounts, \* a map from chainIDs and denominations to account balances - escrowAccounts \* a map from channelIDs and denominations to escrow account balances - - -vars == <> -Heights == 1..MaxHeight \* set of possible heights of the chains in the system - -(*************************************************************************** - Token transfer operators - ***************************************************************************) -\* Create a packet: Abstract away from timestamp. -\* Assume timeoutHeight is MaxHeight -CreatePacket(packetData) == - LET channelEnd == chainStore.channelEnd IN - [ - sequence |-> appPacketSeq, - timeoutHeight |-> MaxHeight, - data |-> packetData, - srcPortID |-> channelEnd.portID, - srcChannelID |-> channelEnd.channelID, - dstPortID |-> channelEnd.counterpartyPortID, - dstChannelID |-> channelEnd.counterpartyChannelID - ] - - -\* Update the chain store and packet log with ICS20 packet datagrams -\* @type: (Str, DATAGRAM, Seq(LOGENTRY)) => [store: CHAINSTORE, log: Seq(LOGENTRY), accounts: ACCOUNT -> Int, escrowAccounts: ACCOUNT -> Int]; -TokenTransferUpdate(chainID, packetDatagram, log) == - LET packet == packetDatagram.packet IN - \* get the new updated store, packet log, and accounts - LET tokenTransferUpdate == - IF packetDatagram.type = "PacketRecv" - THEN HandlePacketRecv(chainID, chainStore, packetDatagram, log, accounts, escrowAccounts, MaxBalance) - ELSE IF packetDatagram.type = "PacketAck" - THEN HandlePacketAck(chainStore, packetDatagram, log, accounts, escrowAccounts, MaxBalance) - ELSE [store |-> chainStore, - log |-> log, - accounts |-> accounts, - escrowAccounts |-> escrowAccounts] - IN - - LET tokenTransferStore == tokenTransferUpdate.store IN - - \* update height - LET updatedStore == - IF tokenTransferStore.height + 1 \in Heights - THEN [tokenTransferStore EXCEPT !.height = tokenTransferStore.height + 1] - ELSE tokenTransferStore - IN - - [store |-> updatedStore, - log |-> tokenTransferUpdate.log, - accounts |-> tokenTransferUpdate.accounts, - escrowAccounts |-> tokenTransferUpdate.escrowAccounts] - -(*************************************************************************** - Chain actions - ***************************************************************************) -\* Advance the height of the chain until MaxHeight is reached -AdvanceChain == - /\ chainStore.height + 1 \in Heights - /\ chainStore' = [chainStore EXCEPT !.height = chainStore.height + 1] - /\ UNCHANGED <> - /\ UNCHANGED <> - -\* handle the incoming packet datagrams -HandlePacketDatagrams == - \* enabled if incomingPacketDatagrams is not empty - /\ incomingPacketDatagrams /= <<>> - /\ LET tokenTransferUpdate == TokenTransferUpdate(ChainID, Head(incomingPacketDatagrams), packetLog) IN - /\ chainStore' = tokenTransferUpdate.store - /\ packetLog' = tokenTransferUpdate.log - /\ accounts' = tokenTransferUpdate.accounts - /\ escrowAccounts' = tokenTransferUpdate.escrowAccounts - /\ incomingPacketDatagrams' = Tail(incomingPacketDatagrams) - /\ UNCHANGED appPacketSeq - -\* Send a packet -SendPacket == - \* enabled if appPacketSeq is not bigger than MaxPacketSeq - /\ appPacketSeq <= MaxPacketSeq - \* Create packet data - /\ LET createOutgoingPacketOutcome == - CreateOutgoingPacketData(accounts, - escrowAccounts, - <>, - MaxBalance, - ChainID, - GetCounterpartyChainID(ChainID)) IN - \* do nothing if there is an error - \/ /\ createOutgoingPacketOutcome.error - /\ UNCHANGED vars - \* if there is no error, send packet - \/ /\ ~createOutgoingPacketOutcome.error - /\ LET packet == CreatePacket(createOutgoingPacketOutcome.packetData) IN - LET updatedChainStore == WritePacketCommitment(chainStore, packet) IN - \* if writing the packet commitment was successful - /\ chainStore /= updatedChainStore - \* update chain store with packet committment - /\ chainStore' = updatedChainStore - \* log sent packet - /\ packetLog' = Append(packetLog, [ - type |-> "PacketSent", - srcChainID |-> ChainID, - sequence |-> packet.sequence, - timeoutHeight |-> packet.timeoutHeight, - data |-> packet.data - ]) - \* update bank accounts - /\ accounts' = createOutgoingPacketOutcome.accounts - \* update escrow accounts - /\ escrowAccounts' = createOutgoingPacketOutcome.escrowAccounts - \* increase application packet sequence - /\ appPacketSeq' = appPacketSeq + 1 - /\ UNCHANGED incomingPacketDatagrams - - - -\* Acknowledge a packet -AcknowledgePacket == - /\ chainStore.packetsToAcknowledge /= <<>> - \* write acknowledgements to chain store - /\ chainStore' = WriteAcknowledgement(chainStore, Head(chainStore.packetsToAcknowledge)) - \* log acknowledgement - /\ packetLog' = LogAcknowledgement(ChainID, chainStore, packetLog, Head(chainStore.packetsToAcknowledge)) - /\ UNCHANGED <> - /\ UNCHANGED <> - -(*************************************************************************** - Specification - ***************************************************************************) -\* Initial state predicate -\* Initially -\* - the chain store is initialized to -\* ICS20InitChainStore(ChainID, <>) -\* (defined in IBCTokenTransferDefinitions.tla) -\* - incomingPacketDatagrams is an empty sequence -\* - the appPacketSeq is set to 1 -Init == - /\ chainStore = ICS20InitChainStore(ChainID) - /\ incomingPacketDatagrams = <<>> - /\ appPacketSeq = 1 - -\* Next state action -\* The chain either -\* - advances its height -\* - receives datagrams and updates its state -\* - sends a packet -\* - acknowledges a packet -Next == - \/ AdvanceChain - \/ HandlePacketDatagrams - \/ SendPacket - \/ AcknowledgePacket - \/ UNCHANGED vars - -Fairness == - /\ WF_vars(Next) - -(*************************************************************************** - Invariants - ***************************************************************************) -\* Type invariant -\* ChainStores, Datagrams, PacketLogEntries are defined in IBCTokenTransferDefinitions.tla -TypeOK == - /\ chainStore \in ChainStores(Heights, MaxPacketSeq, MaxBalance, {NativeDenomination}) - /\ appPacketSeq \in 1..(MaxPacketSeq + 1) - - -============================================================================= -\* Modification History -\* Last modified Wed Apr 14 15:33:11 CEST 2021 by ilinastoilkovska -\* Created Mon Oct 17 13:01:03 CEST 2020 by ilinastoilkovska diff --git a/docs/spec/tla/fungible-token-transfer/IBCTokenTransfer.cfg b/docs/spec/tla/fungible-token-transfer/IBCTokenTransfer.cfg deleted file mode 100644 index aa0fe31d8d..0000000000 --- a/docs/spec/tla/fungible-token-transfer/IBCTokenTransfer.cfg +++ /dev/null @@ -1,16 +0,0 @@ -CONSTANTS - MaxHeight = 5 - MaxPacketSeq = 5 - MaxBalance = 5 - NativeDenominationChainA = "atom" - NativeDenominationChainB = "eth" - -INIT Init -NEXT Next - -INVARIANTS - TypeOK - ICS20Inv - -PROPERTIES - ICS20Prop \ No newline at end of file diff --git a/docs/spec/tla/fungible-token-transfer/IBCTokenTransfer.tla b/docs/spec/tla/fungible-token-transfer/IBCTokenTransfer.tla deleted file mode 100644 index ef1df144f5..0000000000 --- a/docs/spec/tla/fungible-token-transfer/IBCTokenTransfer.tla +++ /dev/null @@ -1,440 +0,0 @@ -------------------------- MODULE IBCTokenTransfer ------------------------- - -(*************************************************************************** - A TLA+ specification of the IBC Fungible Token Transfer Protocol (ICS20). - This module is the main module in the specification and models a - system of two chains, where each chain perofmrs a transaction that sends - 1 token to the respective counterparty. - - The specification also contains type annotations for the model checker - Apalache. - ***************************************************************************) - -EXTENDS Integers, FiniteSets, Sequences, IBCTokenTransferDefinitions - -CONSTANTS - \* @type: Int; - MaxHeight, \* maximal height of all the chains in the system - \* @type: Int; - MaxPacketSeq, \* maximal packet sequence number - \* @type: Int; - MaxBalance, \* maximal account balance - \* @type: Str; - NativeDenominationChainA, \* native denomination of tokens at ChainA - \* @type: Str; - NativeDenominationChainB \* native denomination of tokens at ChainA - -VARIABLES - \* @type: CHAINSTORE; - chainAstore, \* store of ChainA - \* @type: CHAINSTORE; - chainBstore, \* store of ChainB - \* @type: Seq(DATAGRAM); - packetDatagramsChainA, \* sequence of packet datagrams incoming to ChainA - \* @type: Seq(DATAGRAM); - packetDatagramsChainB, \* sequence of packet datagrams incoming to ChainB - \* @type: Seq(LOGENTRY); - packetLog, \* packet log - \* @type: Int; - appPacketSeqChainA, \* packet sequence number from the application on ChainA - \* @type: Int; - appPacketSeqChainB, \* packet sequence number from the application on ChainB - \* @type: ACCOUNT -> Int; - accounts, \* a map from chainIDs and denominations to account balances - \* @type: ACCOUNT -> Int; - escrowAccounts \* a map from channelIDs and denominations to escrow account balances - -chainAvars == <> -chainBvars == <> -vars == <> - -Heights == 1..MaxHeight -NativeDenominations == {NativeDenominationChainA, NativeDenominationChainB} -AllDenominations == Seq(ChannelIDs \union PortIDs \union NativeDenominations) - -(*************************************************************************** - Instances of ICS20Chain - ***************************************************************************) - -\* We suppose there are two chains that communicate, ChainA and ChainB -\* ChainA -- Instance of Chain.tla -ChainA == INSTANCE Chain - WITH ChainID <- "chainA", - NativeDenomination <- NativeDenominationChainA, - chainStore <- chainAstore, - incomingPacketDatagrams <- packetDatagramsChainA, - appPacketSeq <- appPacketSeqChainA - -\* ChainB -- Instance of Chain.tla -ChainB == INSTANCE Chain - WITH ChainID <- "chainB", - NativeDenomination <- NativeDenominationChainB, - chainStore <- chainBstore, - incomingPacketDatagrams <- packetDatagramsChainB, - appPacketSeq <- appPacketSeqChainB - - (*************************************************************************** - Environment operators - ***************************************************************************) - -\* get chain store by ID -GetChainByID(chainID) == - IF chainID = "chainA" - THEN chainAstore - ELSE chainBstore - -\* update the client height of the client for the counterparty chain of chainID -UpdateClientHeights(chainID) == - - /\ \/ /\ chainID = "chainA" - /\ chainAstore' = [chainAstore EXCEPT - !.counterpartyClientHeights = - chainAstore.counterpartyClientHeights - \union - {chainBstore.height} - ] - /\ UNCHANGED chainBstore - \/ /\ chainID = "chainB" - /\ chainBstore' = [chainBstore EXCEPT - !.counterpartyClientHeights = - chainBstore.counterpartyClientHeights - \union - {chainAstore.height} - ] - /\ UNCHANGED chainAstore - /\ UNCHANGED <> - /\ UNCHANGED <> - - -\* Compute a packet datagram designated for dstChainID, based on the packetLogEntry -\* @type: (Str, Str, LOGENTRY) => DATAGRAM; -PacketDatagram(srcChainID, dstChainID, packetLogEntry) == - - LET srcChannelID == GetChannelID(srcChainID) IN \* "chanAtoB" (if srcChainID = "chainA") - LET dstChannelID == GetChannelID(dstChainID) IN \* "chanBtoA" (if dstChainID = "chainB") - - LET srcPortID == GetPortID(srcChainID) IN \* "portA" (if srcChainID = "chainA") - LET dstPortID == GetPortID(dstChainID) IN \* "portB" (if dstChainID = "chainB") - - LET srcHeight == GetLatestHeight(GetChainByID(srcChainID)) IN - - \* the source chain of the packet that is received by dstChainID is srcChainID - LET recvPacket == [ - sequence |-> packetLogEntry.sequence, - timeoutHeight |-> packetLogEntry.timeoutHeight, - srcChannelID |-> srcChannelID, - srcPortID |-> srcPortID, - dstChannelID |-> dstChannelID, - dstPortID |-> dstPortID, - data |-> packetLogEntry.data - ] IN - - \* the source chain of the packet that is acknowledged by srcChainID is dstChainID - LET ackPacket == [ - sequence |-> packetLogEntry.sequence, - timeoutHeight |-> packetLogEntry.timeoutHeight, - srcChannelID |-> dstChannelID, - srcPortID |-> dstPortID, - dstChannelID |-> srcChannelID, - dstPortID |-> srcPortID, - data |-> packetLogEntry.data - ] IN - - IF packetLogEntry.type = "PacketSent" - THEN [ - type |-> "PacketRecv", - packet |-> recvPacket, - proofHeight |-> srcHeight - ] - ELSE IF packetLogEntry.type = "WriteAck" - THEN [ - type |-> "PacketAck", - packet |-> ackPacket, - acknowledgement |-> packetLogEntry.acknowledgement, - proofHeight |-> srcHeight - ] - ELSE NullDatagram - -(*************************************************************************** - Environment actions - ***************************************************************************) - \* update the client height of some chain - UpdateClients == - \E chainID \in ChainIDs : UpdateClientHeights(chainID) - -\* create datagrams depending on packet log -CreateDatagrams == - /\ packetLog /= <<>> - /\ LET packetLogEntry == Head(packetLog) IN - LET srcChainID == packetLogEntry.srcChainID IN - LET dstChainID == GetCounterpartyChainID(srcChainID) IN - LET packetDatagram == PacketDatagram(srcChainID, dstChainID, packetLogEntry) IN - /\ \/ /\ packetDatagram = NullDatagram - /\ UNCHANGED <> - \/ /\ packetDatagram /= NullDatagram - /\ srcChainID = "chainA" - /\ packetDatagramsChainB' = - Append(packetDatagramsChainB, packetDatagram) - /\ UNCHANGED packetDatagramsChainA - \/ /\ packetDatagram /= NullDatagram - /\ srcChainID = "chainB" - /\ packetDatagramsChainA' = - Append(packetDatagramsChainA, - PacketDatagram(srcChainID, dstChainID, packetLogEntry)) - /\ UNCHANGED packetDatagramsChainB - - /\ packetLog' = Tail(packetLog) - /\ UNCHANGED <> - /\ UNCHANGED <> - /\ UNCHANGED <> - -(*************************************************************************** - Component actions - ***************************************************************************) - -\* ChainAction: either chain takes a step, leaving the other -\* variables unchange -ChainAction == - \/ /\ ChainA!Next - /\ UNCHANGED chainBvars - \/ /\ ChainB!Next - /\ UNCHANGED chainAvars - -\* EnvironmentAction: create packet datagrams if packet log is not empty -EnvironmentAction == - \/ CreateDatagrams - \/ UpdateClients - -(*************************************************************************** - Specification - ***************************************************************************) - -\* Initial state predicate -Init == - /\ ChainA!Init - /\ ChainB!Init - \* bank accounts for each chain ID and its native denomination have MaxBalance - /\ accounts = - [<> \in {<<"chainA", <>>>, - <<"chainB", <>>>} - |-> MaxBalance] - \* escrow accounts for each channel ID and the chain's native denomination have balance 0 - /\ escrowAccounts = - [<> \in {<<"chanBtoA", <>>>, - <<"chanAtoB", <>>>} - |-> 0] - /\ packetLog = <<>> - - -\* Next state action -Next == - \/ ChainAction - \/ EnvironmentAction - \/ UNCHANGED vars - -\* fairness constraint -Fairness == - /\ ChainA!Fairness - /\ ChainB!Fairness - /\ WF_vars(Next) - -Spec == Init /\ [][Next]_vars /\ Fairness - -(*************************************************************************** - Helper operators used in properties and invariants - ***************************************************************************) - -RECURSIVE Sum(_) - -\* sum of elements in a set -Sum(S) == - IF S = {} - THEN 0 - ELSE LET x == CHOOSE y \in S: TRUE IN - x + Sum(S \ {x}) - -\* get the native denomination based on chainID -GetNativeDenomination(chainID) == - IF chainID = "chainA" - THEN NativeDenominationChainA - ELSE NativeDenominationChainB - -\* set of prefixed denominations given a native denomination -\* @type: (Str) => Set(Seq(Str)); -PrefixedDenoms(nativeDenomination) == - {<> : portID \in PortIDs, channelID \in ChannelIDs} - -\* set of escrow account IDs -\* @type: Set(<>); -EscrowAccountsDomain == - {<>>> : - chainID \in ChainIDs} - -\* set of all denominations -Denominations == - {<>, <>} - \union - PrefixedDenoms(NativeDenominationChainA) - \union - PrefixedDenoms(NativeDenominationChainB) - -\* create expected packet receipt for a given packet commitment -\* @type: (Str, PACKETCOMM) => [channelID: Str, portID: Str, sequence: Int]; -PacketReceipt(chainID, packetCommitment) == - [ - channelID |-> GetCounterpartyChannelID(chainID), - portID |-> GetCounterpartyPortID(chainID), - sequence |-> packetCommitment.sequence - ] - -\* get the escrow account IDs for the native denomination -\* @type: (Str) => Set(<>); -EscrowAccountIDs(nativeDenomination) == - {<>>> : channelID \in ChannelIDs} - -\* a packet is in flight if a packet commitment exists, but a -\* corresponding packet receipt is not on the counterparty chain -\* @type: (Str, Str) => Set(Int); -GetAmountsInFlight(chainID, nativeDenom) == - - \* get packet commitments of chainID and packet receipts of its counterparty - LET packetCommittments == GetChainByID(chainID).packetCommitments IN - LET counterpartyChainID == GetCounterpartyChainID(chainID) IN - LET counterpartyPacketReceipts == GetChainByID(counterpartyChainID).packetReceipts IN - - \* get packet commitments for packets in flight - LET inFlight == {pc \in packetCommittments : - PacketReceipt(chainID, pc) \notin counterpartyPacketReceipts} IN - - \* get packet data for packets in flight - LET inFlightData == {pc.data : pc \in inFlight} IN - - \* get packet data for packets in flight that have a prefixed denomination, - \* where the last field is the native denomination of chainID - LET inFlightDataOfDenomination == {d \in inFlightData : - d.denomination[Len(d.denomination)] = nativeDenom} IN - - \* compute set of amounts of the packets in flight that have - \* the desired denomination - {d.amount : d \in inFlightDataOfDenomination} - -\* compute sum over accounts that have chainID's native denomination -SumOverLocalAccounts(chainID) == - \* get the native denomination of chainID - LET nativeDenomination == GetNativeDenomination(chainID) IN - \* get counterparty channel ID - LET counterpartyChannelID == GetCounterpartyChannelID(chainID) IN - - \* compute the sum over bank accounts and escrow accounts with - \* native denomination - accounts[<>>>] + - escrowAccounts[<>>>] - -\* compute the sum over the amounts in escrow accounts -\* @type: (Str) => Int; -SumOverEscrowAccounts(chainID) == - \* get the native denomination of chainID - LET nativeDenomination == GetNativeDenomination(chainID) IN - - \* get the escrow account IDs for the native denomination - LET escrowAccountIDs == EscrowAccountIDs(nativeDenomination) IN - \* get the amounts in escrow accounts for the native denomination - LET escrowAccountAmounts == {escrowAccounts[accountID] : - accountID \in (escrowAccountIDs \intersect DOMAIN escrowAccounts)} IN - - \* compute the sum over the amounts in escrow accounts - Sum(escrowAccountAmounts) - -\* compute the sum over the amounts of packets in flight -SumOverPacketsInFlight(chainID) == - \* get the native denomination of chainID - LET nativeDenomination == GetNativeDenomination(chainID) IN - - \* get the set of amounts of packets in flight for each chain - LET amountsInFlight == UNION {GetAmountsInFlight(chID, nativeDenomination) : chID \in ChainIDs} IN - - \* compute the sum over the amounts of packets in flight - Sum(amountsInFlight) - -\* compute the sum over the amounts in bank accounts for prefixed denomination -SumOverBankAccountsWithPrefixedDenoms(chainID) == - \* get the native denomination of chainID - LET nativeDenomination == GetNativeDenomination(chainID) IN - - \* compute the set of prefixed denominations - LET prefixedDenominations == {pd \in PrefixedDenoms(nativeDenomination) : - /\ Len(pd) > 1 - /\ pd[Len(pd)] = nativeDenomination} IN - - \* get the bank account IDs for the prefixed denominations - LET accountIDs == {<> : - chID \in ChainIDs, prefixedDenomination \in prefixedDenominations} IN - - \* get the amounts in bank accounts for the prefixed denominations - LET accountAmounts == {accounts[accountID] : - accountID \in (accountIDs \intersect DOMAIN accounts)} IN - - \* compute the sum over the amounts in bank accounts - Sum(accountAmounts) - -(*************************************************************************** - Properties and invariants - ***************************************************************************) - -\* Type invariant -TypeOK == - /\ chainAstore \in ChainStores(Heights, MaxPacketSeq, MaxBalance, NativeDenominations) - /\ chainBstore \in ChainStores(Heights, MaxPacketSeq, MaxBalance, NativeDenominations) - /\ appPacketSeqChainA \in 1..(MaxPacketSeq + 1) - /\ appPacketSeqChainB \in 1..(MaxPacketSeq + 1) - /\ packetDatagramsChainA \in Seq(Datagrams(Heights, MaxPacketSeq, MaxBalance, NativeDenominations)) - /\ packetDatagramsChainB \in Seq(Datagrams(Heights, MaxPacketSeq, MaxBalance, NativeDenominations)) - /\ packetLog \in Seq(PacketLogEntries(Heights, MaxPacketSeq, MaxBalance, NativeDenominations)) - /\ DOMAIN accounts \subseteq ChainIDs \X AllDenominations - /\ \A accountID \in DOMAIN accounts : accounts[accountID] \in 0..MaxBalance - /\ DOMAIN escrowAccounts \subseteq EscrowAccountsDomain - /\ \A accountID \in DOMAIN escrowAccounts : escrowAccounts[accountID] \in 0..MaxBalance - -\* There are MaxBalance coins of the native denomination in bank and escrow accounts -\* for a given chain -\* Note: this property still holds if the counterparty chain is malicious -PreservationOfTotalSupplyLocal == - \A chainID \in ChainIDs : - SumOverLocalAccounts(chainID) = MaxBalance - -\* The amount in nativeDenomination in escrow accounts -\* is equal to the sum of: -\* * the amounts in-flight packets in a (prefixed or unprefixed) denomination ending -\* in nativeDenomination, and -\* * the amounts in accounts in a prefixed denomination ending in -\* nativeDenomination, in which it is not native -\* Note: this property is satisfied only if both chains are correct -PreservationOfTotalSupplyGlobal == - \A chainID \in ChainIDs : - SumOverEscrowAccounts(chainID) = - SumOverPacketsInFlight(chainID) + SumOverBankAccountsWithPrefixedDenoms(chainID) - -\* A violation of this property is an execution where fungibility is preserved, -\* where a return payment is effectuated -\* Note: this property should also be violated if the counterparty chain is malicious -\* and effectuates a return payment -NonPreservationOfFungibility == - \A accountID \in EscrowAccountsDomain : - [](escrowAccounts[accountID] > 0 - => [](escrowAccounts[accountID] > 0)) - -\* ICS20Inv invariant: conjunction of invariants -ICS20Inv == - /\ PreservationOfTotalSupplyLocal - /\ PreservationOfTotalSupplyGlobal - -\* ICS20Prop property: conjunction of properties -ICS20Prop == - NonPreservationOfFungibility - -============================================================================= -\* Modification History -\* Last modified Wed Apr 14 15:24:26 CEST 2021 by ilinastoilkovska -\* Created Mon Oct 17 13:00:24 CEST 2020 by ilinastoilkovska diff --git a/docs/spec/tla/fungible-token-transfer/IBCTokenTransferDefinitions.tla b/docs/spec/tla/fungible-token-transfer/IBCTokenTransferDefinitions.tla deleted file mode 100644 index 8e3f3c5641..0000000000 --- a/docs/spec/tla/fungible-token-transfer/IBCTokenTransferDefinitions.tla +++ /dev/null @@ -1,426 +0,0 @@ --------------------- MODULE IBCTokenTransferDefinitions -------------------- - -(*************************************************************************** - This module contains definitions of operators that are shared between the - different modules, and which are relevant for ICS20. - ***************************************************************************) - -EXTENDS Integers, FiniteSets, Sequences - -(************************ TYPE ALIASES FOR SNOWCAT *************************) -(* @typeAlias: CHAN = - [ - state: Str, - order: Str, - portID: Str, - channelID: Str, - counterpartyPortID: Str, - counterpartyChannelID: Str, - version: Str - ]; -*) -(* @typeAlias: PACKETDATA = - [ - denomination: Seq(Str), - amount: Int, - sender: Str, - receiver: Str - ]; -*) -(* @typeAlias: PACKET = - [ - sequence: Int, - timeoutHeight: Int, - data: PACKETDATA, - srcPortID: Str, - srcChannelID: Str, - dstPortID: Str, - dstChannelID: Str - ]; -*) -(* @typeAlias: PACKETCOMM = - [ - portID: Str, - channelID: Str, - data: PACKETDATA, - sequence: Int, - timeoutHeight: Int - ]; -*) -(* @typeAlias: PACKETREC = - [ - portID: Str, - channelID: Str, - sequence: Int - ]; -*) -(* @typeAlias: PACKETACK = - [ - portID: Str, - channelID: Str, - sequence: Int, - acknowledgement: Bool - ]; -*) -(* @typeAlias: ACCOUNT = - <>; -*) -(* @typeAlias: PACKETTOACK = - <>; -*) -(* @typeAlias: CHAINSTORE = - [ - height: Int, - counterpartyClientHeights: Set(Int), - channelEnd: CHAN, - packetCommitments: Set(PACKETCOMM), - packetsToAcknowledge: Seq(PACKETTOACK), - packetReceipts: Set(PACKETREC), - packetAcknowledgements: Set(PACKETACK), - escrowAccounts: ACCOUNT -> Int - ]; -*) -(* @typeAlias: DATAGRAM = - [ - type: Str, - height: Int, - proofHeight: Int, - consensusHeight: Int, - clientID: Str, - counterpartyClientID: Str, - connectionID: Str, - counterpartyConnectionID: Str, - versions: Set(Int), - portID: Str, - channelID: Str, - counterpartyPortID: Str, - counterpartyChannelID: Str, - packet: PACKET, - acknowledgement: Bool - ]; -*) -(* @typeAlias: LOGENTRY = - [ - type: Str, - srcChainID: Str, - sequence: Int, - timeoutHeight: Int, - acknowledgement: Bool, - data: PACKETDATA - ]; -*) -(* @typeAlias: HISTORY = - [ - connInit: Bool, - connTryOpen: Bool, - connOpen: Bool, - chanInit: Bool, - chanTryOpen: Bool, - chanOpen: Bool, - chanClosed: Bool - ]; -*) - -(********************** Common operator definitions ***********************) -ChainIDs == {"chainA", "chainB"} -ChannelIDs == {"chanAtoB", "chanBtoA"} -PortIDs == {"portA", "portB"} -ChannelStates == {"UNINIT", "INIT", "TRYOPEN", "OPEN", "CLOSED"} - -nullHeight == 0 -nullChannelID == "none" -nullPortID == "none" -nullEscrowAddress == "none" - -Max(S) == CHOOSE x \in S: \A y \in S: y <= x - -(******************************* ChannelEnds ******************************* - A set of channel end records. - A channel end record contains the following fields: - - - state -- a string - Stores the current state of this channel end. It has one of the - following values: "UNINIT", "INIT", "TRYOPEN", "OPEN", "CLOSED". - - - order -- a string - Stores whether the channel end is ordered or unordered. It has one of the - following values: "UNORDERED", "ORDERED" - - * for ICS20 we require that the channels are unordered - - - portID -- a port identifier - Stores the port identifier of this channel end. - - - channelID -- a channel identifier - Stores the channel identifier of this channel end. - - - counterpartyPortID -- a port identifier - Stores the port identifier of the counterparty channel end. - - - counterpartyChannelID -- a channel identifier - Stores the channel identifier of the counterparty channel end. - - - version -- a string - The version is "ics20-1" for fungible token transfer - ***************************************************************************) - -ChannelEnds == - [ - state : ChannelStates, - order : {"UNORDERED"}, - portID : PortIDs \union {nullPortID}, - channelID : ChannelIDs \union {nullChannelID}, - counterpartyPortID : PortIDs \union {nullPortID}, - counterpartyChannelID : ChannelIDs \union {nullChannelID}, - version : {"ics20-1"} - ] - -(************************* FungibleTokenPacketData ************************* - A set of records defining ICS20 packet data. - - Denominations are defined as Seq(ChannelIDs \union PortIDs \union NativeDenominations), - where NativeDenominations is the set of native denominations of the two chains. - ***************************************************************************) -\* @type: (Int, Set(Seq(Str))) => Set(PACKETDATA); -FungibleTokenPacketData(maxBalance, Denominations) == - [ - denomination : Denominations, - amount : 0..maxBalance, - sender : ChainIDs, - receiver : ChainIDs - ] - -(******* PacketCommitments, PacketReceipts, PacketAcknowledgements *********) -\* Set of packet commitments -\* @type: (Set(Int), Int, Int, Set(Seq(Str))) => Set(PACKETCOMM); -PacketCommitments(Heights, maxPacketSeq, maxBalance, Denominations) == - [ - channelID : ChannelIDs, - portID : PortIDs, - sequence : 1..maxPacketSeq, - data : FungibleTokenPacketData(maxBalance, Denominations), - timeoutHeight : Heights - ] - -\* Set of packet receipts -\* @type: (Int) => Set(PACKETREC); -PacketReceipts(maxPacketSeq) == - [ - channelID : ChannelIDs, - portID : PortIDs, - sequence : 1..maxPacketSeq - ] - -\* Set of packet acknowledgements -\* @type: (Int) => Set(PACKETACK); -PacketAcknowledgements(maxPacketSeq) == - [ - channelID : ChannelIDs, - portID : PortIDs, - sequence : 1..maxPacketSeq, - acknowledgement : BOOLEAN - ] - -(********************************* Packets *********************************) -\* Set of packets -\* @type: (Set(Int), Int, Int, Set(Seq(Str))) => Set(PACKET); -Packets(Heights, maxPacketSeq, maxBalance, Denominations) == - [ - sequence : 1..maxPacketSeq, - timeoutHeight : Heights, - data : FungibleTokenPacketData(maxBalance, Denominations), - srcPortID : PortIDs, - srcChannelID : ChannelIDs, - dstPortID : PortIDs, - dstChannelID : ChannelIDs - ] - -(******************************** ChainStores ****************************** - A set of chain store records, with fields relevant for ICS20. - A chain store record contains the following fields: - - - height : an integer between nullHeight and MaxHeight. - Stores the current height of the chain. - - - counterpartyClientHeights : a set of integers between 1 and MaxHeight - Stores the heights of the client for the counterparty chain. - - - channelEnd : a channel end - Stores data about the channel with the counterparty chain. - - - packetCommitments : a set of packet commitments - A packet commitment is added to this set when a chain sends a packet - to the counterparty. - - - packetReceipts : a set of packet receipts - A packet receipt is added to this set when a chain received a packet - from the counterparty chain. - - - packetAcknowledgements : a set of packet acknowledgements - A packet acknowledgement is added to this set when a chain writes an - acknowledgement for a packet it received from the counterparty - - - packetsToAcknowledge : a sequence of pairs <> - A pair <>, where ack is a Boolean value, is added - to this sequence when a chain successfully receives a PacketRecv - datagram - - A chain store is the combination of the provable and private stores. - - ***************************************************************************) -\* @type: (Set(Int), Int, Int, Set(Str)) => Set(CHAINSTORE); -ChainStores(Heights, maxPacketSeq, maxBalance, NativeDenominations) == - [ - height : Heights, - counterpartyClientHeights : SUBSET(Heights), - channelEnd : ChannelEnds, - - packetCommitments : SUBSET(PacketCommitments(Heights, maxPacketSeq, maxBalance, - Seq(ChannelIDs \union PortIDs \union NativeDenominations))), - packetReceipts : SUBSET(PacketReceipts(maxPacketSeq)), - packetAcknowledgements : SUBSET(PacketAcknowledgements(maxPacketSeq)), - packetsToAcknowledge : Seq(Packets(Heights, maxPacketSeq, maxBalance, - Seq(ChannelIDs \union PortIDs \union NativeDenominations)) - \X - BOOLEAN) - ] - -(******************************** Datagrams ********************************) -\* Set of datagrams -Datagrams(Heights, maxPacketSeq, maxBalance, NativeDenominations) == - [type : {"PacketRecv"}, - packet : Packets(Heights, maxPacketSeq, maxBalance, - Seq(ChannelIDs \union PortIDs \union NativeDenominations)), - proofHeight : Heights] - \union - [type : {"PacketAck"}, - packet : Packets(Heights, maxPacketSeq, maxBalance, - Seq(ChannelIDs \union PortIDs \union NativeDenominations)), - acknowledgement : BOOLEAN, - proofHeight : Heights] - -\* Null datagram -NullDatagram == - [type |-> "null"] - -(**************************** PacketLogEntries *****************************) -\* Set of packet log entries -PacketLogEntries(Heights, maxPacketSeq, maxBalance, NativeDenominations) == - [ - type : {"PacketSent"}, - srcChainID : ChainIDs, - sequence : 1..maxPacketSeq, - timeoutHeight : Heights, - data : FungibleTokenPacketData(maxBalance, - Seq(ChannelIDs \union PortIDs \union NativeDenominations)) - ] \union [ - type : {"PacketRecv"}, - srcChainID : ChainIDs, - sequence : 1..maxPacketSeq, - portID : PortIDs, - channelID : ChannelIDs, - timeoutHeight : Heights - ] \union [ - type : {"WriteAck"}, - srcChainID : ChainIDs, - sequence : 1..maxPacketSeq, - portID : PortIDs, - channelID : ChannelIDs, - timeoutHeight : Heights, - data : FungibleTokenPacketData(maxBalance, - Seq(ChannelIDs \union PortIDs \union NativeDenominations)), - acknowledgement : BOOLEAN - ] - -(*************************************************************************** - Chain helper operators - ***************************************************************************) - -\* get the ID of chainID's counterparty chain -GetCounterpartyChainID(chainID) == - IF chainID = "chainA" THEN "chainB" ELSE "chainA" - -\* get the maximal height of the client for chainID's counterparty chain -\* @type: (CHAINSTORE) => Int; -GetMaxCounterpartyClientHeight(chain) == - IF chain.counterpartyClientHeights /= {} - THEN Max(chain.counterpartyClientHeights) - ELSE nullHeight - -\* get the channel ID of the channel end at chainID -GetChannelID(chainID) == - IF chainID = "chainA" - THEN "chanAtoB" - ELSE IF chainID = "chainB" - THEN "chanBtoA" - ELSE nullChannelID - -\* get the channel ID of the channel end at chainID's counterparty chain -GetCounterpartyChannelID(chainID) == - IF chainID = "chainA" - THEN "chanBtoA" - ELSE IF chainID = "chainB" - THEN "chanAtoB" - ELSE nullChannelID - -\* get the port ID at chainID -GetPortID(chainID) == - IF chainID = "chainA" - THEN "portA" - ELSE IF chainID = "chainB" - THEN "portB" - ELSE nullPortID - -\* get the port ID at chainID's counterparty chain -GetCounterpartyPortID(chainID) == - IF chainID = "chainA" - THEN "portB" - ELSE IF chainID = "chainB" - THEN "portA" - ELSE nullPortID - -\* get the latest height of chain -\* @type: (CHAINSTORE) => Int; -GetLatestHeight(chain) == - chain.height - -(*************************************************************************** - Initial values of a channel end, chain store, accounts for ICS20 - ***************************************************************************) -\* Initial value of a channel end: -\* - state is "OPEN" (we assume channel handshake has successfully finished) -\* - order is "UNORDERED" (requirement of ICS20) -\* - channelID, counterpartyChannelID -InitUnorderedChannelEnd(ChainID) == - [ - state |-> "OPEN", - order |-> "UNORDERED", - portID |-> GetPortID(ChainID), - channelID |-> GetChannelID(ChainID), - counterpartyPortID |-> GetCounterpartyPortID(ChainID), - counterpartyChannelID |-> GetCounterpartyChannelID(ChainID), - version |-> "ics20-1" - ] - -\* A set of initial values of the chain store for ICS20: -\* - height is initialized to 1 -\* - counterpartyClientHeights is the set of installed client heights -\* - the channelEnd is initialized to InitUnorderedChannelEnd -\* - the packet committments, receipts, acknowledgements, and packets -\* to acknowledge are empty -ICS20InitChainStore(ChainID) == - [ - height |-> 1, - counterpartyClientHeights |-> {}, - channelEnd |-> InitUnorderedChannelEnd(ChainID), - - packetCommitments |-> {}, - packetReceipts |-> {}, - packetAcknowledgements |-> {}, - packetsToAcknowledge |-> <<>> - ] - -============================================================================= -\* Modification History -\* Last modified Wed Apr 14 15:27:35 CEST 2021 by ilinastoilkovska -\* Created Mon Oct 17 13:01:38 CEST 2020 by ilinastoilkovska diff --git a/docs/spec/tla/fungible-token-transfer/ICS04PacketHandlers.tla b/docs/spec/tla/fungible-token-transfer/ICS04PacketHandlers.tla deleted file mode 100644 index cfcd4883a8..0000000000 --- a/docs/spec/tla/fungible-token-transfer/ICS04PacketHandlers.tla +++ /dev/null @@ -1,359 +0,0 @@ ------------------------- MODULE ICS04PacketHandlers ------------------------ - -(*************************************************************************** - This module contains definitions of operators that are used to handle - packet datagrams. - ***************************************************************************) - -EXTENDS Integers, FiniteSets, IBCTokenTransferDefinitions, - ICS20FungibleTokenTransferHandlers - -(*************************************************************************** - Packet datagram handlers - ***************************************************************************) - -\* Handle "PacketRecv" datagrams -(* @type: (Str, CHAINSTORE, DATAGRAM, Seq(LOGENTRY), ACCOUNT -> Int, ACCOUNT -> Int, Int) - => [store: CHAINSTORE, log: Seq(LOGENTRY), accounts: ACCOUNT -> Int, escrowAccounts: ACCOUNT -> Int]; -*) -HandlePacketRecv(chainID, chain, packetDatagram, log, accounts, escrowAccounts, maxBalance) == - \* get chainID's channel end - LET channelEnd == chain.channelEnd IN - \* get packet - LET packet == packetDatagram.packet IN - - LET packetRecvUpdates == - IF \* if the channel end is open for packet transmission - /\ channelEnd.state = "OPEN" - \* if the packet has not passed the timeout height - /\ \/ packet.timeoutHeight = 0 - \/ chain.height < packet.timeoutHeight - \* if the "PacketRecv" datagram has valid port and channel IDs - /\ packet.srcPortID = channelEnd.counterpartyPortID - /\ packet.srcChannelID = channelEnd.counterpartyChannelID - /\ packet.dstPortID = channelEnd.portID - /\ packet.dstChannelID = channelEnd.channelID - THEN \* call application function OnPacketRecv - LET OnPacketRecvOutcome == - OnPacketRecv(chain, accounts, escrowAccounts, packet, maxBalance) IN - \* if OnPacketRecv is successful - IF /\ ~OnPacketRecvOutcome.error - \* if the packet has not been received - /\ [ - portID |-> packet.dstPortID, - channelID |-> packet.dstChannelID, - sequence |-> packet.sequence - ] \notin chain.packetReceipts - THEN \* construct log entry for packet log - LET logEntry == [ - type |-> "PacketRecv", - srcChainID |-> chainID, - sequence |-> packet.sequence, - portID |-> packet.dstPortID, - channelID |-> packet.dstChannelID, - timeoutHeight |-> packet.timeoutHeight - ] IN - - LET updatedChainStore == [chain EXCEPT - \* record that the packet has been received - !.packetReceipts = - chain.packetReceipts - \union - {[ - channelID |-> packet.dstChannelID, - portID |-> packet.dstPortID, - sequence |-> packet.sequence - ]}, - \* add packet to the set of packets for which an acknowledgement - \* should be written - !.packetsToAcknowledge = - Append(chain.packetsToAcknowledge, - <>) - ] IN - - \* update the chain store, packet log, and bank accounts - [store |-> updatedChainStore, - log |-> Append(log, logEntry), - accounts |-> OnPacketRecvOutcome.accounts, - escrowAccounts |-> OnPacketRecvOutcome.escrowAccounts] - - \* otherwise, do not update the chain store, the log, the accounts - ELSE [store |-> chain, - log |-> log, - accounts |-> accounts, - escrowAccounts |-> escrowAccounts] - ELSE [store |-> chain, - log |-> log, - accounts |-> accounts, - escrowAccounts |-> escrowAccounts] - IN - - packetRecvUpdates - -\* Handle "PacketAck" datagrams -(* @type: (CHAINSTORE, DATAGRAM, Seq(LOGENTRY), ACCOUNT -> Int, ACCOUNT -> Int, Int) - => [store: CHAINSTORE, log: Seq(LOGENTRY), accounts: ACCOUNT -> Int, escrowAccounts: ACCOUNT -> Int]; -*) -HandlePacketAck(chain, packetDatagram, log, accounts, escrowAccounts, maxBalance) == - \* get chainID's channel end - LET channelEnd == chain.channelEnd IN - \* get packet - LET packet == packetDatagram.packet IN - \* get acknowledgement - LET ack == packetDatagram.acknowledgement IN - \* get packet committment that should be in chain store - LET packetCommitment == [ - portID |-> packet.srcPortID, - channelID |-> packet.srcChannelID, - sequence |-> packet.sequence, - timeoutHeight |-> packet.timeoutHeight - ] IN - - \* call application function OnPacketAck - LET OnPacketAckOutcome == - OnPaketAck(accounts, escrowAccounts, packet, ack, maxBalance) IN - - IF \* if the channel and connection ends are open for packet transmission - /\ channelEnd.state = "OPEN" - \* if the packet commitment exists in the chain store - /\ packetCommitment \in chain.packetCommitments - \* if the "PacketAck" datagram has valid port and channel IDs - /\ packet.srcPortID = channelEnd.portID - /\ packet.srcChannelID = channelEnd.channelID - /\ packet.dstPortID = channelEnd.counterpartyPortID - /\ packet.dstChannelID = channelEnd.counterpartyChannelID - \* remove packet commitment - THEN LET updatedChainStore == - [chain EXCEPT - !.packetCommitments = - chain.packetCommitments \ {packetCommitment}] - IN - - [store |-> updatedChainStore, - log |-> log, - accounts |-> OnPacketAckOutcome.accounts, - escrowAccounts |-> OnPacketAckOutcome.escrowAccounts] - - \* otherwise, do not update the chain store, log and accounts - ELSE [store |-> chain, - log |-> log, - accounts |-> accounts, - escrowAccounts |-> escrowAccounts] - - -\* write packet committments to chain store -\* @type: (CHAINSTORE, PACKET) => CHAINSTORE; -WritePacketCommitment(chain, packet) == - \* get channel end - LET channelEnd == chain.channelEnd IN - \* get latest client height - LET latestClientHeight == GetMaxCounterpartyClientHeight(chain) IN - - IF \* channel end is neither null nor closed - /\ channelEnd.state \notin {"UNINIT", "CLOSED"} - \* there exists a counterparty client - \* (used to abstract the check if the connection end is not in UNINIT) - /\ latestClientHeight /= 0 - \* if the packet has valid port and channel IDs - /\ packet.srcPortID = channelEnd.portID - /\ packet.srcChannelID = channelEnd.channelID - /\ packet.dstPortID = channelEnd.counterpartyPortID - /\ packet.dstChannelID = channelEnd.counterpartyChannelID - \* timeout height has not passed - /\ \/ packet.timeoutHeight = 0 - \/ latestClientHeight < packet.timeoutHeight - THEN [chain EXCEPT - !.packetCommitments = - chain.packetCommitments - \union - {[portID |-> packet.srcPortID, - channelID |-> packet.srcChannelID, - data |-> packet.data, - sequence |-> packet.sequence, - timeoutHeight |-> packet.timeoutHeight]} - ] - \* otherwise, do not update the chain store - ELSE chain - -\* write acknowledgements to chain store -\* @type: (CHAINSTORE, PACKETTOACK) => CHAINSTORE; -WriteAcknowledgement(chain, packetToAck) == - \* packetToack is a pair of a packet and its acknowledgement - LET packet == packetToAck[1] IN - LET ack == packetToAck[2] IN - - \* create a packet acknowledgement for this packet - LET packetAcknowledgement == [ - portID |-> packet.dstPortID, - channelID |-> packet.dstChannelID, - sequence |-> packet.sequence, - acknowledgement |-> ack - ] IN - - \* if the acknowledgement for the packet has not been written - IF packetAcknowledgement \notin chain.packetAcknowledgements - THEN \* write the acknowledgement to the chain store and remove - \* the packet from the set of packets to acknowledge - [chain EXCEPT !.packetAcknowledgements = - chain.packetAcknowledgements - \union - {packetAcknowledgement}, - !.packetsToAcknowledge = - Tail(chain.packetsToAcknowledge)] - - \* remove the packet from the sequence of packets to acknowledge - ELSE [chain EXCEPT !.packetsToAcknowledge = - Tail(chain.packetsToAcknowledge)] - -\* log acknowledgements to packet Log -\* @type: (Str, CHAINSTORE, Seq(LOGENTRY), PACKETTOACK) => Seq(LOGENTRY); -LogAcknowledgement(chainID, chain, log, packetToAck) == - \* packetToAck is a pair of a packet and its acknowledgement - LET packet == packetToAck[1] IN - LET ack == packetToAck[2] IN - - \* create a packet acknowledgement for this packet - LET packetAcknowledgement == [ - portID |-> packet.dstPortID, - channelID |-> packet.dstChannelID, - sequence |-> packet.sequence, - acknowledgement |-> ack - ] IN - - \* if the acknowledgement for the packet has not been written - IF packetAcknowledgement \notin chain.packetAcknowledgements - THEN \* append a "WriteAck" log entry to the log - LET packetLogEntry == [ - type |-> "WriteAck", - srcChainID |-> chainID, - sequence |-> packet.sequence, - portID |-> packet.dstPortID, - channelID |-> packet.dstChannelID, - timeoutHeight |-> packet.timeoutHeight, - acknowledgement |-> ack, - data |-> packet.data - ] IN - Append(log, packetLogEntry) - \* do not add anything to the log - ELSE log - -\* check if a packet timed out -(* @type: (CHAINSTORE, CHAINSTORE, ACCOUNT -> Int, ACCOUNT -> Int, PACKET, Int, Int) - => [store: CHAINSTORE, accounts: ACCOUNT -> Int, escrowAccounts: ACCOUNT -> Int]; -*) -TimeoutPacket(chain, counterpartyChain, accounts, escrowAccounts, - packet, proofHeight, maxBalance) == - \* get channel end - LET channelEnd == chain.channelEnd IN - \* get packet committment that should be in chain store - LET packetCommitment == [ - portID |-> packet.srcPortID, - channelID |-> packet.srcChannelID, - data |-> packet.data, - sequence |-> packet.sequence, - timeoutHeight |-> packet.timeoutHeight - ] IN - \* get packet receipt that should be absent in counterparty chain store - LET packetReceipt == [ - portID |-> packet.dstPortID, - channelID |-> packet.dstChannelID, - sequence |-> packet.sequence - ] IN - - \* call application function OnTimeoutPacket - LET OnTimeoutPacketOutcome == - OnTimeoutPacket(accounts, escrowAccounts, packet, maxBalance) IN - - \* if channel end is open - IF /\ channelEnd.state = "OPEN" - \* srcChannelID and srcPortID match channel and port IDs - /\ packet.srcPortID = channelEnd.portID - /\ packet.srcChannelID = channelEnd.channelID - \* dstChannelID and dstPortID match counterparty channel and port IDs - /\ packet.dstPortID = channelEnd.counterpartyPortID - /\ packet.dstChannelID = channelEnd.counterpartyChannelID - \* packet has timed out - /\ packet.timeoutHeight > 0 - /\ proofHeight >= packet.timeoutHeight - \* chain has sent the packet - /\ packetCommitment \in chain.packetCommitments - \* counterparty chain has not received the packet - /\ packetReceipt \notin counterpartyChain.packetReceipts - \* remove packet commitment - THEN LET updatedChainStore == - [chain EXCEPT !.packetCommitments = - chain.packetCommitments \ {packetCommitment}] IN - [store |-> updatedChainStore, - accounts |-> OnTimeoutPacketOutcome.accounts, - escrowAccounts |-> OnTimeoutPacketOutcome.escrowAccounts] - - \* otherwise, do not update the chain store and accounts - ELSE [store |-> chain, - accounts |-> accounts, - escrowAccounts |-> escrowAccounts] - -\* check if a packet timed out on close -(* @type: (CHAINSTORE, CHAINSTORE, ACCOUNT -> Int, ACCOUNT -> Int, PACKET, Int, Int) - => [store: CHAINSTORE, accounts: ACCOUNT -> Int, escrowAccounts: ACCOUNT -> Int]; -*) -TimeoutOnClose(chain, counterpartyChain, accounts, escrowAccounts, - packet, proofHeight, maxBalance) == - \* get channel end - LET channelEnd == chain.channelEnd IN - \* get counterparty channel end - LET counterpartyChannelEnd == counterpartyChain.channelEnd IN - - \* get packet committment that should be in chain store - LET packetCommitment == [ - portID |-> packet.srcPortID, - channelID |-> packet.srcChannelID, - data |-> packet.data, - sequence |-> packet.sequence, - timeoutHeight |-> packet.timeoutHeight - ] IN - \* get packet receipt that should be absent in counterparty chain store - LET packetReceipt == [ - portID |-> packet.dstPortID, - channelID |-> packet.dstChannelID, - sequence |-> packet.sequence - ] IN - - \* call application function OnTimeoutPacket - LET OnTimeoutPacketOutcome == - OnTimeoutPacket(accounts, escrowAccounts, packet, maxBalance) IN - - \* if srcChannelID and srcPortID match channel and port IDs - IF /\ packet.srcPortID = channelEnd.portID - /\ packet.srcChannelID = channelEnd.channelID - \* if dstChannelID and dstPortID match counterparty channel and port IDs - /\ packet.dstPort = channelEnd.counterpartyPortID - /\ packet.dstChannelID = channelEnd.counterpartyChannelID - \* chain has sent the packet - /\ packetCommitment \in chain.packetCommitments - \* counterparty channel end is closed and its fields are as expected - /\ counterpartyChannelEnd.state = "CLOSED" - /\ counterpartyChannelEnd.order = "UNORDERED" - /\ counterpartyChannelEnd.portID = packet.dstPortID - /\ counterpartyChannelEnd.channelID = packet.dstChannelID - /\ counterpartyChannelEnd.counterpartyChannelID = packet.srcChannelID - /\ counterpartyChannelEnd.counterpartyPortID = packet.srcPortID - /\ counterpartyChannelEnd.version = channelEnd.version - \* counterparty chain has not received the packet - /\ packetReceipt \notin counterpartyChain.packetReceipts - \* remove packet commitment - THEN LET updatedChainStore == - [chain EXCEPT !.packetCommitments = - chain.packetCommitments \ {packetCommitment}] IN - [store |-> updatedChainStore, - accounts |-> OnTimeoutPacketOutcome.accounts, - escrowAccounts |-> OnTimeoutPacketOutcome.escrowAccounts] - - \* otherwise, do not update the chain store and accounts - ELSE [store |-> chain, - accounts |-> accounts, - escrowAccounts |-> escrowAccounts] - -============================================================================= -\* Modification History -\* Last modified Wed Apr 14 15:36:57 CEST 2021 by ilinastoilkovska -\* Created Thu Oct 19 18:29:58 CET 2020 by ilinastoilkovska diff --git a/docs/spec/tla/fungible-token-transfer/ICS20FungibleTokenTransferHandlers.tla b/docs/spec/tla/fungible-token-transfer/ICS20FungibleTokenTransferHandlers.tla deleted file mode 100644 index 846a3066c6..0000000000 --- a/docs/spec/tla/fungible-token-transfer/ICS20FungibleTokenTransferHandlers.tla +++ /dev/null @@ -1,200 +0,0 @@ ------------------ MODULE ICS20FungibleTokenTransferHandlers ---------------- - -(*************************************************************************** - This module contains definitions of operators that are used to handle - ICS20 packet datagrams. - ***************************************************************************) - -EXTENDS Integers, FiniteSets, Sequences, Bank, IBCTokenTransferDefinitions - -\* create outgoing packet data -\* - accounts is the map of bank accounts -\* - escrowAccounts is the map of escrow accounts -\* - sender, receiver are chain IDs (used as addresses) -\* @type: (ACCOUNT -> Int, ACCOUNT -> Int, Seq(Str), Int, Str, Str) => [denomination: Seq(Str), amount: Int, sender: Str, receiver: Str]; -CreateOutgoingPacketData(accounts, escrowAccounts, denomination, amount, sender, receiver) == - \* sending chain is source if the denomination is of length 1 - \* or if the denomination is not prefixed by the sender's port and channel ID - LET source == \/ Len(denomination) = 1 - \/ SubSeq(denomination, 1, 2) /= <> IN - - \* create packet data - LET data == - [ - denomination |-> denomination, - amount |-> amount, - sender |-> sender, - receiver |-> receiver - ] IN - - \* get the outcome of TransferCoins from the sender account to the escrow account - LET transferCoinsOutcome == - TransferCoins(accounts, sender, escrowAccounts, GetCounterpartyChannelID(sender), denomination, amount) IN - - \* get the outcome of BurnCoins applied to the sender account - LET burnCoinsOutcome == - BurnCoins(accounts, sender, denomination, amount) IN - - IF /\ source - /\ ~transferCoinsOutcome.error - \* if source and the coin transfer is successful, - \* update bank accounts and escrow accounts using the outcome from TransferCoins - THEN [ - packetData |-> data, - accounts |-> transferCoinsOutcome.senderAccounts, - escrowAccounts |-> transferCoinsOutcome.receiverAccounts, - error |-> FALSE - ] - \* if not source and the coin burning is successful, - \* update bank accounts using the outcome from BurnCoins - ELSE IF /\ ~source - /\ ~burnCoinsOutcome.error - THEN [ - packetData |-> data, - accounts |-> burnCoinsOutcome.accounts, - escrowAccounts |-> escrowAccounts, - error |-> FALSE - ] - \* otherwise, there is an error - ELSE [ - packetData |-> data, - accounts |-> accounts, - escrowAccounts |-> escrowAccounts, - error |-> TRUE - ] - -\* receive an ICS20 packet -(* @type: (CHAINSTORE, ACCOUNT -> Int, ACCOUNT -> Int, PACKET, Int) => - [packetAck: Bool, accounts: ACCOUNT -> Int, escrowAccounts: ACCOUNT -> Int, error: Bool]; -*) -OnPacketRecv(chain, accounts, escrowAccounts, packet, maxBalance) == - \* get packet data and denomination - LET data == packet.data IN - LET denomination == data.denomination IN - - \* receiving chain is source if - \* the denomination is prefixed by srcPortID and srcChannelID - LET source == /\ Len(denomination) > 1 - /\ SubSeq(denomination, 1, 2) = <> IN - - LET unprefixedDenomination == SubSeq(denomination, 3, Len(denomination)) IN - LET prefixedDenomination == <> \o denomination IN - - \* get the outcome of TransferCoins from the escrow - \* to the receiver account - LET transferCoinsOutcome == - TransferCoins( - escrowAccounts, packet.dstChannelID, - accounts, data.receiver, - unprefixedDenomination, data.amount - ) IN - - \* get the outcome of MintCoins with prefixedDenomination - \* to the receiver account - LET mintCoinsOutcome == - MintCoins( - accounts, data.receiver, - prefixedDenomination, data.amount, - maxBalance - ) IN - - IF /\ source - /\ ~transferCoinsOutcome.error - \* if source and the coin transfer is successful, - \* update bank accounts and escrow accounts using the outcome from TransferCoins - THEN - [ - packetAck |-> TRUE, - accounts |-> transferCoinsOutcome.receiverAccounts, - escrowAccounts |-> transferCoinsOutcome.senderAccounts, - error |-> FALSE - ] - \* if not source and minting coins is successful - \* update bank accounts using the outcome from MintCoins - ELSE IF /\ ~source - /\ ~mintCoinsOutcome.error - THEN [ - packetAck |-> TRUE, - accounts |-> mintCoinsOutcome.accounts, - escrowAccounts |-> escrowAccounts, - error |-> FALSE - ] - \* otherwise, there is an error - ELSE [ - packetAck |-> FALSE, - accounts |-> accounts, - escrowAccounts |-> escrowAccounts, - error |-> TRUE - ] - -\* refund tokens on unsuccessful ack -(* @type: (ACCOUNT -> Int, ACCOUNT -> Int, PACKET, Int) => - [accounts: ACCOUNT -> Int, escrowAccounts: ACCOUNT -> Int]; -*) -RefundTokens(accounts, escrowAccounts, packet, maxBalance) == - \* get packet data and denomination - LET data == packet.data IN - LET denomination == data.denomination IN - - \* chain is source if the denomination is of length 1 - \* or if the denomination is not prefixed by srcPortID and srcChannelID - LET source == \/ Len(denomination) = 1 - \/ SubSeq(denomination, 1, 2) /= <> IN - - \* get the outcome of TransferCoins from the escrow - \* to the sender account - LET transferCoinsOutcome == - TransferCoins( - escrowAccounts, packet.srcChannelID, - accounts, data.sender, - denomination, data.amount - ) IN - - \* get the outcome of MintCoins with denomination - \* to the sender account - LET mintCoinsOutcome == - MintCoins( - accounts, data.sender, - denomination, data.amount, - maxBalance - ) IN - - IF /\ source - /\ ~transferCoinsOutcome.error - \* if source and the coin transfer is successful, - \* update bank accounts and escrow accounts using the outcome from TransferCoins - THEN [ - accounts |-> transferCoinsOutcome.receiverAccounts, - escrowAccounts |-> transferCoinsOutcome.senderAccounts - ] - \* if not source and minting coins is successful - \* update bank accounts using the outcome from MintCoins - ELSE IF /\ ~source - /\ ~mintCoinsOutcome.error - THEN [ - accounts |-> mintCoinsOutcome.accounts, - escrowAccounts |-> escrowAccounts - ] - \* otherwise, do not update anything - ELSE [ - accounts |-> accounts, - escrowAccounts |-> escrowAccounts - ] - -\* acknowledge an ICS20 packet -OnPaketAck(accounts, escrowAccounts, packet, ack, maxBalance) == - IF ~ack - THEN RefundTokens(accounts, escrowAccounts, packet, maxBalance) - ELSE [ - accounts |-> accounts, - escrowAccounts |-> escrowAccounts - ] - -\* timeout an ICS20 packet -OnTimeoutPacket(accounts, escrowAccounts, packet, maxBalance) == - RefundTokens(accounts, escrowAccounts, packet, maxBalance) - -============================================================================= -\* Modification History -\* Last modified Wed Apr 14 15:37:37 CEST 2021 by ilinastoilkovska -\* Created Mon Oct 17 13:02:01 CEST 2020 by ilinastoilkovska diff --git a/docs/spec/tla/fungible-token-transfer/MC_IBCTokenTransfer.tla b/docs/spec/tla/fungible-token-transfer/MC_IBCTokenTransfer.tla deleted file mode 100644 index dcd99c6e28..0000000000 --- a/docs/spec/tla/fungible-token-transfer/MC_IBCTokenTransfer.tla +++ /dev/null @@ -1,31 +0,0 @@ -------------------------- MODULE MC_IBCTokenTransfer ------------------------ - -MaxHeight == 5 -MaxPacketSeq == 5 -MaxBalance == 5 -NativeDenominationChainA == "atom" -NativeDenominationChainB == "eth" - -VARIABLES - \* @type: CHAINSTORE; - chainAstore, \* store of ChainA - \* @type: CHAINSTORE; - chainBstore, \* store of ChainB - \* @type: Seq(DATAGRAM); - packetDatagramsChainA, \* sequence of packet datagrams incoming to ChainA - \* @type: Seq(DATAGRAM); - packetDatagramsChainB, \* sequence of packet datagrams incoming to ChainB - \* @type: Seq(LOGENTRY); - packetLog, \* packet log - \* @type: Int; - appPacketSeqChainA, \* packet sequence number from the application on ChainA - \* @type: Int; - appPacketSeqChainB, \* packet sequence number from the application on ChainB - \* @type: ACCOUNT -> Int; - accounts, \* a map from chainIDs and denominations to account balances - \* @type: ACCOUNT -> Int; - escrowAccounts \* a map from channelIDs and denominations to escrow account balances - -INSTANCE IBCTokenTransfer - -============================================================================= \ No newline at end of file diff --git a/docs/spec/tla/fungible-token-transfer/README.md b/docs/spec/tla/fungible-token-transfer/README.md deleted file mode 100644 index 5d7a7199d4..0000000000 --- a/docs/spec/tla/fungible-token-transfer/README.md +++ /dev/null @@ -1,218 +0,0 @@ -# TLA+ specification of the IBC Fungible Token Transfer Protocol - -This document describes the TLA+ model of the core logic of the English -specification [ICS -20](https://github.com/cosmos/ibc/tree/5877197dc03e844542cb8628dd52674a37ca6ff9/spec/ics-020-fungible-token-transfer). We -start by discussing [the model of the -protocol](#the-model-of-the-protocol). - Then this document provides links to our TLA+ formalization of [Properties and -invariants](#properties-and-invariants) that formalizes what a fungible -token protocol is supposed to achieve. -After that we discuss how to [use the model](#using-the-model). - -## The Model of the Protocol - - Mirroring -the structure of the English specification, we start by discussing -initialization ([Port and Channel Setup & Channel lifecycle management](#port-and-channel-setup-and-channel-lifecycle-management)), and then provide the links to the TLA+ modules that -implement [packet relay](#packet-relay), that is, the core callback functions. - -As the application "fungible token transfer" uses the underlying IBC -infrastructure, we also modeled it to the extent necessary in [helper -modules](#helper-modules). - -### Port and Channel Setup and Channel lifecycle management - - -In the model we assume that the [`setup()`](https://github.com/cosmos/ibc/tree/5877197dc03e844542cb8628dd52674a37ca6ff9/spec/ics-020-fungible-token-transfer#port--channel-setup) function has been called -before. The [channel handshake -functions](https://github.com/cosmos/ibc/tree/5877197dc03e844542cb8628dd52674a37ca6ff9/spec/ics-020-fungible-token-transfer#channel-lifecycle-management) -are also considered being already executed. Our -model starts from a state where the channel handshake has completed -successfully. - -### Packet Relay - -The [core callback functions](https://github.com/cosmos/ibc/tree/5877197dc03e844542cb8628dd52674a37ca6ff9/spec/ics-020-fungible-token-transfer#packet-relay) -`createOutgoingPacket()`, `onRecvPacket()`, `onRecvPacket()` and - `onTimeoutPacket()`, as well as the auxiliary function `refundTokens()` - are modeled in - [ICS20FungibleTokenTransferHandlers.tla](ICS20FungibleTokenTransferHandlers.tla). - -### Helper modules - -In order to completely specify the behavior of fungible token -transfer, we encoded the required additional functionalities of IBC in -the TLA+ modules discussed below. From -the viewpoint of TLA+, [IBCTokenTransfer.tla](IBCTokenTransfer.tla) is -the main module that brings together all other modules that are -discussed here. We will discuss it the last. - - -#### [ICS04PacketHandlers.tla](ICS04PacketHandlers.tla) - -This module captures the functions -specifying packet flow and handling from [ICS -04](https://github.com/cosmos/ibc/tree/5877197dc03e844542cb8628dd52674a37ca6ff9/spec/ics-004-channel-and-packet-semantics). - -#### [Bank.tla](Bank.tla) -The bank module encodes functions defined by the Cosmos bank - application. - -#### [Chain.tla](Chain.tla) - -This module captures the relevant - Cosmos SDK functionality, that is, the context in which token - transfer runs. In the complete TLA+ model it is instantiated twice, - once for each chain participating in the token transfer. - The transition relation is defined by - -```tla -Next == - \/ AdvanceChain - \/ HandlePacketDatagrams - \/ SendPacket - \/ AcknowledgePacket -``` - -- `AdvanceChain`: increments the height of the chain -- `HandlePacketDatagrams`: based on the datagram type of the next - incoming datagram (created in - [IBCTokenTransfer.tla](IBCTokenTransfer.tla); see below), it calls the - appropriate datagram handlers from ICS 04 - ([ICS04PacketHandlers.tla](ICS04PacketHandlers.tla)), which in turn call the - ICS 20 module callbacks specified in - [ICS20FungibleTokenTransferHandlers.tla](ICS20FungibleTokenTransferHandlers.tla). - This result in an update of the application state (bank accounts, - packet log, provable and private store). -- `SendPacket`: models that a user wants to initiate a transfer -- `AcknowledgePacket`: writes an acknowledgement for a received packet - on the packet log. - - -#### [IBCTokenTransfer.tla](IBCTokenTransfer.tla) -This is the main module that - brings everything together. It specifies a transitions system - consisting of two chains ([Chain.tla](Chain.tla)) and a - relayer node (modelled here). -```tla -Next == - \/ ChainAction - \/ EnvironmentAction - \/ UNCHANGED vars -``` - -- `ChainAction` performs an action of one non-deterministically chosen - chain. - -- `EnvironmentAction` performs the relayer logic, that is, reads the - packet log and creates an appropriate datagram for the destination - chain (`CreateDatagrams`). - - -### Properties and invariants - -The English specification provides informal requirements as "[desired properties]( -https://github.com/cosmos/ibc/tree/5877197dc03e844542cb8628dd52674a37ca6ff9/spec/ics-020-fungible-token-transfer#desired-properties)". - -#### Preservation of fungibility - -We understand that for establishing "Preservation of fungibility" it -is sufficient to establish that if -some tokens have been transferred from chain A to chain B, and the receiver -on chain B wants to return them, then the tokens can be returned. - -For this we require the assumption (which is somewhat implicit it - its [correctness -argument](https://github.com/cosmos/ibc/tree/5877197dc03e844542cb8628dd52674a37ca6ff9/spec/ics-020-fungible-token-transfer#correctness)) that the source chain only performs valid transitions. - -This is implemented in the property `ICS20Prop` in the file [IBCTokenTransfer.tla](IBCTokenTransfer.tla). - - -#### Preservation of total supply - -We understand "Preservation of total supply" as conjunction of two -properties - -- For each native denomination of a chain: the sum of the amounts in - user accounts in this denomination and the amounts in escrow - accounts in this denomination is constant. - -The following intuitive property can only be specified and guaranteed -if all involved chains only perform valid transitions: - -- The amount in denomination *d* in escrow accounts in the chain in which *d* is native -is equal to the sum of: - * the amounts in-flight packets in a (prefixed or unprefixed) denomination ending with *d* - * the amounts in accounts in a prefixed denomination ending with *d*, in which *d* is -**not** native - -These two properties are implemented in the invariant `ICS20Inv` in the file -[IBCTokenTransfer.tla](IBCTokenTransfer.tla). - -#### No Whitelist - -This is a design requirement, and not a correctness property that can be expressed -in temporal logic. - - -#### Symmetric - -This is not a temporal property but a property on the local transition -relation. It is satisfied by construction (of both the code and the -model). - - -#### No Byzantine Inflation - -This should be implied by the first property of preservation of total -supply. This is under the assumption that the property found in ICS 20 -"Fault containment: prevents Byzantine-inflation of tokens originating -on chain A, as a result of chain B’s Byzantine behavior (though any -users who sent tokens to chain B may be at risk)." is purely -understood in terms on inflation **on chain A**. - -We note that chain B can send an unbounded amount of tokens that it -claims to originate from A to some chain C. - - -## Using the Model - - -### Constants - -The module `IBCTokenTransfer.tla` is parameterized by the constants: - - `MaxHeight`, a natural number denoting the maximal height of the chains, - - `MaxPacketSeq`, a natural number denoting the maximal packet sequence number, - - `MaxBalance`, a natural number denoting the maximal bank account balance, - - `NativeDenominationChainA`, a string denoting the native denomination of `ChainA`, - - `NativeDenominationChainB`, a string denoting the native denomination of `ChainB` - - We assume that the native denominations of the chains are different. - - -### Importing the specification into TLA+ toolbox - -To import the specification in the TLA+ toolbox and run TLC: - - add a new spec in TLA+ toolbox with the root-module file `IBCTokenTransfer.tla` - - create a model - - assign a value to the constants (example values can be found in `IBCTokenTransfer.cfg`) - - choose "Temporal formula" as the behavior spec, and use the formula `Spec` - - run TLC on the model - -#### Basic checks with TLC - -We ran TLC using the constants defined in `IBCTokenTransfer.cfg` and verified the invariants `TypeOK` and `ICS20Inv` in 1min21s and the property `ICS20Prop` in 9min34s. -We note that the specification currently models two transfers: one from `ChainA` to `ChainB`, and vice versa, in their respective native denominations. -Both chains are correct, and there is no malicious relayer. -The relayer implements the logic from [ICS 18](https://github.com/cosmos/ibc/tree/5877197dc03e844542cb8628dd52674a37ca6ff9/spec/ics-018-relayer-algorithms), in particular, it does not -relay timeouts. -However, the packet timeout handlers are specified in [`ICS04PacketHandlers.tla`](ICS04PacketHandlers.tla) -for future use. - -#### Apalache - -The specification contains type annotations for the -model checker [Apalache](https://github.com/informalsystems/apalache). -The specification passes the type check using the type checker [Snowcat](https://apalache.informal.systems/docs/apalache/typechecker-snowcat.html) -integrated in Apalache. diff --git a/docs/spec/tla/ibc-core/Chain.tla b/docs/spec/tla/ibc-core/Chain.tla deleted file mode 100644 index 500c985dec..0000000000 --- a/docs/spec/tla/ibc-core/Chain.tla +++ /dev/null @@ -1,290 +0,0 @@ ----------------------------- MODULE Chain ---------------------------- - -(*************************************************************************** - This module models the behavior of a chain running the IBC Core Protocols. - - The chain state is represented by a chain store, which is a snapshot of the - provable and private stores, to the extent necessary for IBC. Additionally, - a chain has dedicated datagram containers for: - 1. client, connection, and channel datagrams (given by a set of datagrams), - 2. packet datagrams (given by a queue of datagrams that models the order - in which the datagrams were submitted by the relayer). - -***************************************************************************) - -EXTENDS Integers, FiniteSets, IBCCoreDefinitions, - ICS02ClientHandlers, ICS03ConnectionHandlers, - ICS04ChannelHandlers, ICS04PacketHandlers - -CONSTANTS - MaxHeight, \* maximal chain height - ChainID, \* chain identifier - ChannelOrdering, \* indicate whether the channels are ordered or unordered - MaxVersion, \* maximal connection / channel version (we assume versions are integers) - MaxPacketSeq \* maximal packet sequence number - -VARIABLES - chainStore, \* chain store, containing client heights, a connection end, a channel end - incomingDatagrams, \* set of incoming datagrams - incomingPacketDatagrams, \* sequence of incoming packet datagrams - history, \* history variable - packetLog, \* packet log - appPacketSeq \* packet sequence number from the application on the chain - -vars == <> -Heights == 1..MaxHeight \* set of possible heights of the chains in the system -Versions == 1..MaxVersion \* set of possible connection versions - -(*************************************************************************** - Client update operators - ***************************************************************************) -\* Update the clients on chain with chainID, -\* using the client datagrams generated by the relayer -\* (Handler operators defined in ICS02ClientHandlers.tla) -LightClientUpdate(chainID, store, datagrams) == - \* create clients - LET clientCreatedStore == HandleCreateClient(chainID, store, datagrams) IN - \* update clients - LET clientUpdatedStore == HandleClientUpdate(chainID, clientCreatedStore, datagrams) IN - - clientUpdatedStore - -(*************************************************************************** - Connection update operators - ***************************************************************************) -\* Update the connections on chain with chainID, -\* using the connection datagrams generated by the relayer -\* (Handler operators defined in ICS03ConnectionHandlers.tla) -ConnectionUpdate(chainID, store, datagrams) == - \* update the chain store with "ConnOpenInit" datagrams - LET connOpenInitStore == HandleConnOpenInit(chainID, store, datagrams) IN - - \* update the chain store with "ConnOpenTry" datagrams - LET connOpenTryStore == HandleConnOpenTry(chainID, connOpenInitStore, datagrams) IN - - \* update the chain store with "ConnOpenAck" datagrams - LET connOpenAckStore == HandleConnOpenAck(chainID, connOpenTryStore, datagrams) IN - - \* update the chain store with "ConnOpenConfirm" datagrams - LET connOpenConfirmStore == HandleConnOpenConfirm(chainID, connOpenAckStore, datagrams) IN - - \* output the updated chain store - connOpenConfirmStore - -(*************************************************************************** - Channel update operators - ***************************************************************************) -\* Update the channel on chain with chainID, -\* using the channel datagrams generated by the relayer -\* (Handler operators defined in ICS04ChannelHandlers.tla) -ChannelUpdate(chainID, store, datagrams) == - \* update the chain store with "ChanOpenInit" datagrams - LET chanOpenInitStore == HandleChanOpenInit(chainID, store, datagrams) IN - - \* update the chain store with "ChanOpenTry" datagrams - LET chanOpenTryStore == HandleChanOpenTry(chainID, chanOpenInitStore, datagrams) IN - - \* update the chain store with "ChanOpenAck" datagrams - LET chanOpenAckStore == HandleChanOpenAck(chainID, chanOpenTryStore, datagrams) IN - - \* update the chain store with "ChanOpenConfirm" datagrams - LET chanOpenConfirmStore == HandleChanOpenConfirm(chainID, chanOpenAckStore, datagrams) IN - - \* update the chain store with "ChanCloseInit" datagrams - LET chanCloseInitStore == HandleChanCloseInit(chainID, chanOpenConfirmStore, datagrams) IN - - \* update the chain store with "ChanCloseConfirm" datagrams - LET chanCloseConfirmStore == HandleChanCloseConfirm(chainID, chanCloseInitStore, datagrams) IN - - chanCloseConfirmStore - -(*************************************************************************** - Packet update operators - ***************************************************************************) -\* Update the chain store of the chain with chainID and the packet log, -\* using the packet datagrams generated by the relayer -\* (Handler operators defined in ICS04PacketHandlers.tla) -\* @type: (Str, CHAINSTORE, Seq(DATAGRAM), Seq(LOGENTRY)) => [chainStore: CHAINSTORE, packetLog: Seq(LOGENTRY)]; -PacketUpdate(chainID, store, packetDatagrams, log) == - \* if the sequence of packet datagrams is not empty - IF packetDatagrams /= <<>> - THEN \* process the packet datagram at the head of the sequence - LET packetDatagram == Head(packetDatagrams) IN - LET packet == packetDatagram.packet IN - \* get the new updated store and packet log entry - LET newStoreAndLog == - IF packetDatagram.type = "PacketRecv" - THEN HandlePacketRecv(chainID, store, packetDatagram, log) - ELSE IF packetDatagram.type = "PacketAck" - THEN HandlePacketAck(chainID, store, packetDatagram, log) - ELSE [chainStore|-> store, packetLogEntry |-> log] IN - newStoreAndLog - ELSE [chainStore |-> store, packetLog |->log] - -(*************************************************************************** - Chain update operators - ***************************************************************************) -\* Update chainID with the received datagrams -\* Supports ICS02 (Clients), ICS03 (Connections), and ICS04 (Channels & Packets). -UpdateChainStoreAndPacketLog(chainID, chain, datagrams, packetDatagrams, log) == - - \* ICS02: Client updates - LET clientUpdatedStore == LightClientUpdate(chainID, chain, datagrams) IN - - \* ICS03: Connection updates - LET connectionUpdatedStore == ConnectionUpdate(chainID, clientUpdatedStore, datagrams) IN - - \* ICS04: Channel updates - LET channelUpdatedStore == ChannelUpdate(chainID, connectionUpdatedStore, datagrams) IN - - \* ICS04: Packet transmission - LET packetUpdatedStoreAndLog == PacketUpdate(chainID, channelUpdatedStore, packetDatagrams, log) IN - LET packetUpdatedStore == packetUpdatedStoreAndLog.chainStore IN - - \* update height - LET updatedChainStore == - IF /\ chainStore /= packetUpdatedStore - /\ chainStore.height + 1 \in Heights - THEN [packetUpdatedStore EXCEPT !.height = chainStore.height + 1] - ELSE packetUpdatedStore - IN - - [chainStore |-> updatedChainStore, - packetLog |-> packetUpdatedStoreAndLog.packetLog] - -(*************************************************************************** - Chain actions - ***************************************************************************) -\* Advance the height of the chain until MaxHeight is reached -AdvanceChain == - /\ chainStore.height + 1 \in Heights - /\ chainStore' = [chainStore EXCEPT !.height = chainStore.height + 1] - /\ UNCHANGED <> - /\ UNCHANGED <> - -\* Send a packet -SendPacket == - \* enabled if appPacketSeq is not bigger than MaxPacketSeq - /\ appPacketSeq <= MaxPacketSeq - \* Create a packet: Abstract away from packet data, ports, and timestamp. - \* Assume timeoutHeight is MaxHeight - /\ LET packet == [ - sequence |-> appPacketSeq, - timeoutHeight |-> MaxHeight, - srcPortID |-> chainStore.connectionEnd.channelEnd.portID, - srcChannelID |-> chainStore.connectionEnd.channelEnd.channelID, - dstPortID |-> chainStore.connectionEnd.channelEnd.counterpartyPortID, - dstChannelID |-> chainStore.connectionEnd.channelEnd.counterpartyChannelID - ] IN - LET updatedChainStore == WritePacketCommitment(chainStore, packet) IN - \* if writing the packet commitment was successful - /\ chainStore /= updatedChainStore - \* update chain store with packet committment - /\ chainStore' = updatedChainStore - \* log sent packet - /\ packetLog' = Append(packetLog, [ - type |-> "PacketSent", - srcChainID |-> ChainID, - sequence |-> packet.sequence, - timeoutHeight |-> packet.timeoutHeight - ]) - \* increase application packet sequence - /\ appPacketSeq' = appPacketSeq + 1 - /\ UNCHANGED <> - -\* write a packet acknowledgement on the packet log and chain store -AcknowledgePacket == - /\ chainStore.packetsToAcknowledge /= <<>> - /\ chainStore' = WriteAcknowledgement(chainStore, Head(chainStore.packetsToAcknowledge)) - /\ packetLog' = LogAcknowledgement(ChainID, chainStore, packetLog, Head(chainStore.packetsToAcknowledge)) - /\ UNCHANGED <> - /\ UNCHANGED appPacketSeq - -\* Handle the datagrams and update the chain state -HandleIncomingDatagrams == - /\ \/ incomingDatagrams /= {} - \/ incomingPacketDatagrams /= <<>> - /\ LET updatedChainStoreAndPacketLog == - UpdateChainStoreAndPacketLog(ChainID, chainStore, incomingDatagrams, incomingPacketDatagrams, packetLog) IN - /\ chainStore' = updatedChainStoreAndPacketLog.chainStore - /\ packetLog' = updatedChainStoreAndPacketLog.packetLog - /\ incomingDatagrams' = {} - /\ incomingPacketDatagrams' = IF incomingPacketDatagrams /= <<>> - THEN Tail(incomingPacketDatagrams) - ELSE incomingPacketDatagrams - /\ history' = CASE chainStore'.connectionEnd.state = "INIT" - -> [history EXCEPT !.connInit = TRUE] - [] chainStore'.connectionEnd.state = "TRYOPEN" - -> [history EXCEPT !.connTryOpen = TRUE] - [] chainStore'.connectionEnd.state = "OPEN" - -> [history EXCEPT !.connOpen = TRUE] - [] chainStore'.connectionEnd.channelEnd.state = "INIT" - -> [history EXCEPT !.chanInit = TRUE] - [] chainStore'.connectionEnd.channelEnd.state = "TRYOPEN" - -> [history EXCEPT !.chanTryOpen = TRUE] - [] chainStore'.connectionEnd.channelEnd.state = "OPEN" - -> [history EXCEPT !.chanOpen = TRUE] - [] chainStore'.connectionEnd.channelEnd.state = "CLOSED" - -> [history EXCEPT !.chanClosed = TRUE] - [] OTHER - -> history - /\ UNCHANGED appPacketSeq - -(*************************************************************************** - Specification - ***************************************************************************) -\* Initial state predicate -\* Initially -\* - each chain is initialized to some element of the set -\* InitChainStores (defined in IBCCoreDefinitions.tla) -\* - pendingDatagrams for each chain is empty -\* - the packetSeq is set to 1 -Init == - /\ chainStore \in InitChainStore(Versions, ChannelOrdering) - /\ incomingDatagrams = {} - /\ incomingPacketDatagrams = <<>> - /\ history = InitHistory - /\ appPacketSeq = 1 - -\* Next state action -\* The chain either -\* - advances its height -\* - receives datagrams and updates its state -\* - sends a packet if the appPacketSeq is not bigger than MaxPacketSeq -\* - acknowledges a packet -Next == - \/ AdvanceChain - \/ HandleIncomingDatagrams - \/ SendPacket - \/ AcknowledgePacket - \/ UNCHANGED vars - -Fairness == - /\ WF_vars(Next) - -(*************************************************************************** - Invariants - ***************************************************************************) -\* Type invariant -\* ChainStores, Datagrams, PacketLogEntries are defined in IBCCoreDefinitions.tla -TypeOK == - /\ chainStore \in ChainStores(Heights, ChannelOrdering, MaxPacketSeq, Versions) - /\ incomingDatagrams \in SUBSET Datagrams(Heights, MaxPacketSeq, Versions) - /\ incomingPacketDatagrams \in Seq(Datagrams(Heights, MaxPacketSeq, Versions)) - /\ history \in Histories - /\ appPacketSeq \in 1..(MaxPacketSeq + 1) - /\ packetLog \in Seq(PacketLogEntries(Heights, MaxPacketSeq)) - -(*************************************************************************** - Properties - ***************************************************************************) -\* it ALWAYS holds that the height of the chain does not EVENTUALLY decrease -HeightDoesntDecrease == - [](\A h \in Heights : chainStore.height = h - => <>(chainStore.height >= h)) - -============================================================================= -\* Modification History -\* Last modified Fri Feb 05 13:46:33 CET 2021 by ilinastoilkovska -\* Created Fri Jun 05 16:56:21 CET 2020 by ilinastoilkovska diff --git a/docs/spec/tla/ibc-core/IBCCore.cfg b/docs/spec/tla/ibc-core/IBCCore.cfg deleted file mode 100644 index 180958f961..0000000000 --- a/docs/spec/tla/ibc-core/IBCCore.cfg +++ /dev/null @@ -1,24 +0,0 @@ -CONSTANTS - MaxHeight = 2 - MaxVersion = 1 - MaxPacketSeq = 1 - ClientDatagramsRelayer1 = TRUE - ClientDatagramsRelayer2 = FALSE - ConnectionDatagramsRelayer1 = TRUE - ConnectionDatagramsRelayer2 = FALSE - ChannelDatagramsRelayer1 = TRUE - ChannelDatagramsRelayer2 = FALSE - PacketDatagramsRelayer1 = TRUE - PacketDatagramsRelayer2 = FALSE - ChannelOrdering = "UNORDERED" - -INIT Init -NEXT Next - -INVARIANTS - TypeOK - IBCInv - -\* PROPERTIES -\* IBCSafety -\* IBCDelivery \ No newline at end of file diff --git a/docs/spec/tla/ibc-core/IBCCore.tla b/docs/spec/tla/ibc-core/IBCCore.tla deleted file mode 100644 index 2c3447bc60..0000000000 --- a/docs/spec/tla/ibc-core/IBCCore.tla +++ /dev/null @@ -1,610 +0,0 @@ ------------------------------- MODULE IBCCore ------------------------------ - -(*************************************************************************** - A TLA+ specification of the IBC Core protocols (ICS02, ICS03, ICS04, ICS18). - This module is the main module in the specification and models a - system consisting of two chains and two relayers. - - The model allows to express concurrency aspects of a system with multiple - (correct) relayers. The specification is written in a modular way, in order - to facilitate future formal verification of properties and invariants in - an adversarial setting. - - The specification also contains type annotations for the model checker - Apalache. - ***************************************************************************) - -EXTENDS Integers, FiniteSets, Sequences, IBCCoreDefinitions - -CONSTANTS - \* @type: Int; - MaxHeight, \* maximal height of all the chains in the system - \* @type: Int; - MaxVersion, \* maximal connection / channel version (we assume versions are integers) - \* @type: Int; - MaxPacketSeq, \* maximal packet sequence number - \* @type: Bool; - ClientDatagramsRelayer1, \* toggle generation of client datagrams for Relayer1 - \* @type: Bool; - ClientDatagramsRelayer2, \* toggle generation of client datagrams for Relayer2 - \* @type: Bool; - ConnectionDatagramsRelayer1, \* toggle generation of connection datagrams for Relayer1 - \* @type: Bool; - ConnectionDatagramsRelayer2, \* toggle generation of connection datagrams for Relayer2 - \* @type: Bool; - ChannelDatagramsRelayer1, \* toggle generation of channel datagrams for Relayer1 - \* @type: Bool; - ChannelDatagramsRelayer2, \* toggle generation of channel datagrams for Relayer2 - \* @type: Bool; - PacketDatagramsRelayer1, \* toggle generation of packet datagrams for Relayer1 - \* @type: Bool; - PacketDatagramsRelayer2, \* toggle generation of packet datagrams for Relayer2 - \* @type: Str; - ChannelOrdering \* indicate whether the channels are ordered or unordered - -VARIABLES - \* @type: CHAINSTORE; - chainAstore, \* chain store of ChainA - \* @type: CHAINSTORE; - chainBstore, \* chain store of ChainB - \* @type: Set(DATAGRAM); - incomingDatagramsChainA, \* set of (client, connection, channel) datagrams incoming to ChainA - \* @type: Set(DATAGRAM); - incomingDatagramsChainB, \* set of (client, connection, channel) datagrams incoming to ChainB - \* @type: Seq(DATAGRAM); - incomingPacketDatagramsChainA, \* sequence of packet datagrams incoming to ChainA - \* @type: Seq(DATAGRAM); - incomingPacketDatagramsChainB, \* sequence of packet datagrams incoming to ChainB - \* @type: Str -> Int; - relayer1Heights, \* the client heights of Relayer1 - \* @type: Str -> Int; - relayer2Heights, \* the client heights of Relayer2 - \* @type: Str -> Set(DATAGRAM); - outgoingDatagrams, \* sets of (client, connection, channel) datagrams outgoing of the relayers - \* @type: Str -> Seq(DATAGRAM); - outgoingPacketDatagrams, \* sequences of packet datagrams outgoing of the relayers - \* @type: Bool; - closeChannelA, \* flag that triggers closing of the channel end at ChainA - \* @type: Bool; - closeChannelB, \* flag that triggers closing of the channel end at ChainB - \* @type: HISTORY; - historyChainA, \* history variables for ChainA - \* @type: HISTORY; - historyChainB, \* history variables for ChainB - \* @type: Seq(LOGENTRY); - packetLog, \* packet log - \* @type: Int; - appPacketSeqChainA, \* packet sequence number from the application on ChainA - \* @type: Int; - appPacketSeqChainB \* packet sequence number from the application on ChainB - -vars == <> - -chainAvars == <> -chainBvars == <> -relayerVars == <> -Heights == 1..MaxHeight \* set of possible heights of the chains in the system - - -(*************************************************************************** - Instances of Relayer and Chain - ***************************************************************************) - -\* We suppose there are two correct relayers in the system, Relayer1 and Relayer2 -\* Relayer1 -- Instance of ICS18Relayer.tla -Relayer1 == INSTANCE ICS18Relayer - WITH GenerateClientDatagrams <- ClientDatagramsRelayer1, - GenerateConnectionDatagrams <- ConnectionDatagramsRelayer1, - GenerateChannelDatagrams <- ChannelDatagramsRelayer1, - GeneratePacketDatagrams <- PacketDatagramsRelayer1, - relayerHeights <- relayer1Heights - -\* Relayer2 -- Instance of ICS18Relayer.tla -Relayer2 == INSTANCE ICS18Relayer - WITH GenerateClientDatagrams <- ClientDatagramsRelayer2, - GenerateConnectionDatagrams <- ConnectionDatagramsRelayer2, - GenerateChannelDatagrams <- ChannelDatagramsRelayer2, - GeneratePacketDatagrams <- PacketDatagramsRelayer2, - relayerHeights <- relayer2Heights - -\* We suppose there are two chains that communicate, ChainA and ChainB -\* ChainA -- Instance of Chain.tla -ChainA == INSTANCE Chain - WITH ChainID <- "chainA", - chainStore <- chainAstore, - incomingDatagrams <- incomingDatagramsChainA, - incomingPacketDatagrams <- incomingPacketDatagramsChainA, - history <- historyChainA, - appPacketSeq <- appPacketSeqChainA - -\* ChainB -- Instance of Chain.tla -ChainB == INSTANCE Chain - WITH ChainID <- "chainB", - chainStore <- chainBstore, - incomingDatagrams <- incomingDatagramsChainB, - incomingPacketDatagrams <- incomingPacketDatagramsChainB, - history <- historyChainB, - appPacketSeq <- appPacketSeqChainB - -(*************************************************************************** - Component actions - ***************************************************************************) - -\* RelayerAction: either correct relayer takes a step, leaving the other -\* variables unchanged -RelayerAction == - \/ /\ Relayer1!Next - /\ UNCHANGED chainAvars - /\ UNCHANGED chainBvars - /\ UNCHANGED relayer2Heights - /\ UNCHANGED <> - \/ /\ Relayer2!Next - /\ UNCHANGED chainAvars - /\ UNCHANGED chainBvars - /\ UNCHANGED relayer1Heights - /\ UNCHANGED <> - -\* ChainAction: either chain takes a step, leaving the other -\* variables unchanged -ChainAction == - \/ /\ ChainA!Next - /\ UNCHANGED chainBvars - /\ UNCHANGED relayerVars - /\ UNCHANGED <> - \/ /\ ChainB!Next - /\ UNCHANGED chainAvars - /\ UNCHANGED relayerVars - /\ UNCHANGED <> - -(*************************************************************************** - IBCCore Environment actions - ***************************************************************************) -\* Submit datagrams from relayers to chains -SubmitDatagrams == - /\ incomingDatagramsChainA' = incomingDatagramsChainA \union outgoingDatagrams["chainA"] - /\ incomingDatagramsChainB' = incomingDatagramsChainB \union outgoingDatagrams["chainB"] - /\ outgoingDatagrams' = [chainID \in ChainIDs |-> {}] - /\ incomingPacketDatagramsChainA' = incomingPacketDatagramsChainA \o outgoingPacketDatagrams["chainA"] - /\ incomingPacketDatagramsChainB' = incomingPacketDatagramsChainB \o outgoingPacketDatagrams["chainB"] - /\ outgoingPacketDatagrams' = [chainID \in ChainIDs |-> <<>>] - /\ UNCHANGED <> - /\ UNCHANGED <> - /\ UNCHANGED <> - /\ UNCHANGED <> - -\* Non-deterministically set channel closing flags -CloseChannels == - \/ /\ closeChannelA = FALSE - /\ closeChannelA' \in BOOLEAN - /\ UNCHANGED <> - /\ UNCHANGED <> - /\ UNCHANGED closeChannelB - /\ UNCHANGED <> - /\ UNCHANGED <> - /\ UNCHANGED <> - \/ /\ closeChannelB = FALSE - /\ closeChannelB' \in BOOLEAN - /\ UNCHANGED <> - /\ UNCHANGED <> - /\ UNCHANGED closeChannelA - /\ UNCHANGED <> - /\ UNCHANGED <> - /\ UNCHANGED <> - -EnvironmentAction == - \/ SubmitDatagrams - \/ CloseChannels - -(*************************************************************************** - Specification - ***************************************************************************) -\* Initial state predicate -Init == - /\ ChainA!Init - /\ ChainB!Init - /\ Relayer1!Init - /\ Relayer2!Init - /\ closeChannelA = FALSE - /\ closeChannelB = FALSE - /\ packetLog = <<>> - -\* Next state action -Next == - \/ ChainAction - \/ RelayerAction - \/ EnvironmentAction - \/ UNCHANGED vars - -\* Fairness constraint -Fairness == - /\ WF_vars(SubmitDatagrams) - /\ ChainA!Fairness - /\ ChainB!Fairness - /\ Relayer1!Fairness - /\ Relayer2!Fairness - /\ <>[]closeChannelA - /\ <>[]closeChannelB - -\* Specification formula -Spec == Init /\ [][Next]_vars /\ Fairness - -(*************************************************************************** - Invariants - ***************************************************************************) - -\* Type invariant -TypeOK == - /\ ChainA!TypeOK - /\ ChainB!TypeOK - /\ Relayer1!TypeOK - /\ Relayer2!TypeOK - /\ closeChannelA \in BOOLEAN - /\ closeChannelB \in BOOLEAN - -(*************************************************************************** - Helper operators used in properties - ***************************************************************************) -\* get chain store by ID -\* @type: (Str) => CHAINSTORE; -GetChainByID(chainID) == - IF chainID = "chainA" - THEN chainAstore - ELSE chainBstore - -\* returns true if there is a "ClientUpdate" datagram -\* in the incoming datagrams for chainID -IsClientUpdateInIncomingDatagrams(chainID, h) == - LET clID == GetCounterpartyClientID(chainID) IN - IF chainID = "chainA" - THEN [type |-> "ClientUpdate", clientID |-> clID, height |-> h] - \in incomingDatagramsChainA - ELSE [type |-> "ClientUpdate", clientID |-> clID, height |-> h] - \in incomingDatagramsChainB - -\* returns true if there is a "ClientUpdate" datagram -\* in the outgoing datagrams for chainID -IsClientUpdateInOutgoingDatagrams(chainID, h) == - LET clID == GetCounterpartyClientID(chainID) IN - [type |-> "ClientUpdate", clientID |-> clID, height |-> h] - \in outgoingDatagrams[chainID] - -\* returns true if there is a "ConnOpenInit" datagram -\* in outgoing datagrams for chainID -IsConnOpenInitInOutgoingDatagrams(chainID) == - LET clID == GetClientID(chainID) IN - LET counterpartyClID == GetCounterpartyClientID(chainID) IN - LET connID == GetConnectionID(chainID) IN - LET counterpartyConnID == GetCounterpartyConnectionID(chainID) IN - - [type |-> "ConnOpenInit", - connectionID |-> connID, - clientID |-> clID, - counterpartyConnectionID |-> counterpartyConnID, - counterpartyClientID |-> counterpartyClID] \in outgoingDatagrams[chainID] - -\* returns true if there is a "ChanOpenInit" datagram -\* in outgoing datagrams for chainID -IsChanOpenInitInOutgoingDatagrams(chainID) == - LET chanID == GetChannelID(chainID) IN - LET counterpartyChanID == GetCounterpartyChannelID(chainID) IN - [type |-> "ChanOpenInit", - channelID |-> chanID, - counterpartyChannelID |-> counterpartyChanID] \in outgoingDatagrams[chainID] - -\* returns true if there is a "ChanCloseInit" datagram -\* in outgoing datagrams for chainID -IsChanCloseInitInOutgoingDatagrams(chainID) == - LET chanID == GetChannelID(chainID) IN - [type |-> "ChanCloseInit", - channelID |-> chanID] \in outgoingDatagrams[chainID] - - ----------------------------------------------------------------------------- -(*************************************************************************** - Invariants & Properties - ***************************************************************************) -(*************************************************************************** - Invariants: connection datagrams - ***************************************************************************) -\* once connInit is set to TRUE in the history variable, -\* the connection never goes to UNINIT -ConnectionInitInv == - /\ historyChainA.connInit => ~IsConnectionUninit(chainAstore) - /\ historyChainB.connInit => ~IsConnectionUninit(GetChainByID("chainB")) - -\* once connTryOpen is set to TRUE in the history variable, -\* the connection never goes to UNINIT -ConnectionTryOpenInv == - /\ historyChainA.connTryOpen => ~IsConnectionUninit(chainAstore) - /\ historyChainB.connTryOpen => ~IsConnectionUninit(GetChainByID("chainB")) - -\* once connOpen is set to TRUE in the history variable, -\* the connection never goes to UNINIT, INIT, or TRYOPEN -ConnectionOpenInv == - /\ historyChainA.connOpen => (/\ ~IsConnectionUninit(chainAstore) - /\ ~IsConnectionInit(chainAstore) - /\ ~IsConnectionTryOpen(chainAstore)) - /\ historyChainB.connOpen => (/\ ~IsConnectionUninit(GetChainByID("chainB")) - /\ ~IsConnectionInit(GetChainByID("chainB")) - /\ ~IsConnectionTryOpen(GetChainByID("chainB"))) - -(*************************************************************************** - Invariants: channel datagrams - ***************************************************************************) -\* once chanInit is set to TRUE in the history variable, -\* the channel never goes to UNINIT -ChannelInitInv == - /\ historyChainA.chanInit => ~IsChannelUninit(chainAstore) - /\ historyChainB.chanInit => ~IsChannelUninit(chainBstore) - -\* once chanTryOpen is set to TRUE in the history variable, -\* the channel never goes to UNINIT -ChannelTryOpenInv == - /\ historyChainA.chanTryOpen => ~IsChannelUninit(chainAstore) - /\ historyChainB.chanTryOpen => ~IsChannelUninit(chainBstore) - -\* once chanOpen is set to TRUE in the history variable, -\* the channel never goes to UNINIT, INIT, or TRYOPEN -ChannelOpenInv == - /\ historyChainA.chanOpen => (/\ ~IsChannelUninit(chainAstore) - /\ ~IsChannelInit(chainAstore) - /\ ~IsChannelTryOpen(chainAstore)) - /\ historyChainB.chanOpen => (/\ ~IsChannelUninit(chainBstore) - /\ ~IsChannelInit(chainBstore) - /\ ~IsChannelTryOpen(chainBstore)) - -\* once chanClosed is set to TRUE in the history variable, -\* the channel never goes to UNINIT, INIT, TRYOPEN, or OPEN -ChannelCloseInv == - /\ historyChainA.chanClosed => (/\ ~IsChannelUninit(chainAstore) - /\ ~IsChannelInit(chainAstore) - /\ ~IsChannelTryOpen(chainAstore) - /\ ~IsChannelOpen(chainAstore)) - /\ historyChainB.chanClosed => (/\ ~IsChannelUninit(chainBstore) - /\ ~IsChannelInit(chainBstore) - /\ ~IsChannelTryOpen(chainBstore) - /\ ~IsChannelOpen(chainBstore)) - -(*************************************************************************** - Invariant [IBCInv] - ***************************************************************************) -\* IBCInv invariant: conjunction of invariants -IBCInv == - \* at least one relayer creates connection datagrams - /\ (ConnectionDatagramsRelayer1 \/ ConnectionDatagramsRelayer2) - => /\ ConnectionInitInv - /\ ConnectionTryOpenInv - /\ ConnectionOpenInv - \* at least one relayer creates channel datagrams - /\ (ChannelDatagramsRelayer1 \/ ChannelDatagramsRelayer2) - => /\ ChannelInitInv - /\ ChannelTryOpenInv - /\ ChannelOpenInv - /\ ChannelCloseInv - - -(*************************************************************************** - Safety: client datagrams - ***************************************************************************) - -\* it ALWAYS holds that, for every chainID and every height h: -\* - if -\* * there is a "ClientUpdate" datagram for chainID and height h and -\* * the height h is smaller than the maximal counterparty client height -\* at chainID -\* - then -\* * the height h is NEVER added to the counterparty client heights -\* -\* Note: this property does not hold when it is allowed to install older headers -ClientUpdateSafety == - [](\A chainID \in ChainIDs : \A h \in Heights : - (/\ IsClientUpdateInIncomingDatagrams(chainID, h) - /\ h < GetMaxCounterpartyClientHeight(GetChainByID(chainID))) - => [](~IsCounterpartyClientHeightOnChain(GetChainByID(chainID), h))) - -(*************************************************************************** - Safety: connection datagrams - ***************************************************************************) - -\* it ALWAYS holds that, for every chainID -\* - if -\* * the connection end is in INIT -\* - then -\* * it NEVER goes to UNINIT -ConnectionInitSafety == - [](\A chainID \in ChainIDs: - /\ IsConnectionInit(GetChainByID(chainID)) - => [](~IsConnectionUninit(GetChainByID(chainID)))) - -\* it ALWAYS holds that, for every chainID -\* - if -\* * the connection end is in TRYOPEN -\* - then -\* * it NEVER goes to UNINIT ] -ConnectionTryOpenSafety == - [](\A chainID \in ChainIDs: - /\ IsConnectionTryOpen(GetChainByID(chainID)) - => [](~IsConnectionUninit(GetChainByID(chainID)))) - -\* it ALWAYS holds that, for every chainID -\* - if -\* * the connection end is in OPEN -\* - then -\* * it NEVER goes to UNINIT, INIT, or TRYOPEN -ConnectionOpenSafety == - [](\A chainID \in ChainIDs: - /\ IsConnectionOpen(GetChainByID(chainID)) - => [](/\ ~IsConnectionUninit(GetChainByID(chainID)) - /\ ~IsConnectionInit(GetChainByID(chainID)) - /\ ~IsConnectionTryOpen(GetChainByID(chainID)))) - -(*************************************************************************** - Safety: channels datagrams - ***************************************************************************) - -\* it ALWAYS holds that, for every chainID -\* - if -\* * the channel end is in INIT -\* - then -\* * it NEVER goes to UNINIT -ChannelInitSafety == - [](\A chainID \in ChainIDs: - /\ IsChannelInit(GetChainByID(chainID)) - => [](~IsChannelUninit(GetChainByID(chainID)))) - -\* it ALWAYS holds that, for every chainID -\* - if -\* * the channel end is in TRYOPEN -\* - then -\* * it NEVER goes to UNINIT -ChannelTryOpenSafety == - [](\A chainID \in ChainIDs: - /\ IsChannelTryOpen(GetChainByID(chainID)) - => [](~IsChannelUninit(GetChainByID(chainID)))) - -\* it ALWAYS holds that, for every chainID -\* - if -\* * the channel end is in OPEN -\* - then -\* * it NEVER goes to UNINIT, INIT, or TRYOPEN -ChannelOpenSafety == - [](\A chainID \in ChainIDs: - /\ IsChannelOpen(GetChainByID(chainID)) - => [](/\ ~IsChannelUninit(GetChainByID(chainID)) - /\ ~IsChannelInit(GetChainByID(chainID)) - /\ ~IsChannelTryOpen(GetChainByID(chainID)))) - -\* it ALWAYS holds that, for every chainID -\* - if -\* * the channel end is in CLOSED -\* - then -\* * it NEVER goes to UNINIT, INIT, TRYOPEN, or OPEN -ChannelCloseSafety == - [](\A chainID \in ChainIDs: - /\ IsChannelClosed(GetChainByID(chainID)) - => [](/\ ~IsChannelUninit(GetChainByID(chainID)) - /\ ~IsChannelInit(GetChainByID(chainID)) - /\ ~IsChannelTryOpen(GetChainByID(chainID)) - /\ ~IsChannelOpen(GetChainByID(chainID)))) - -(*************************************************************************** - Safety [IBCSafety]: - Bad datagrams are not used to update the chain stores - ***************************************************************************) -\* IBCSafety property: conjunction of safety properties -IBCSafety == - \* at least one relayer creates client datagrams - /\ (ClientDatagramsRelayer1 \/ ClientDatagramsRelayer2) - => ClientUpdateSafety - \* at least one relayer creates connection datagrams - /\ (ConnectionDatagramsRelayer1 \/ ConnectionDatagramsRelayer2) - => /\ ConnectionInitSafety - /\ ConnectionTryOpenSafety - /\ ConnectionOpenSafety - \* at least one relayer creates channel datagrams - /\ (ChannelDatagramsRelayer1 \/ ChannelDatagramsRelayer2) - => /\ ChannelInitSafety - /\ ChannelTryOpenSafety - /\ ChannelOpenSafety - /\ ChannelCloseSafety - -(*************************************************************************** - Liveness: Eventual delivery of client datagrams - ***************************************************************************) - -\* it ALWAYS holds that, for every chainID: -\* - if -\* * the counterparty client is not initialized -\* - then -\* * the chain EVENTUALLY creates the counterparty client -CreateClientDelivery == - [](\A chainID \in ChainIDs : - (GetCounterpartyClientHeights(GetChainByID(chainID)) = {}) - => <>(IsCounterpartyClientOnChain(GetChainByID(chainID)))) - -\* it ALWAYS holds that, for every chainID and every height h -\* - if -\* * EVENTUALLY a ClientUpdate for height h is sent to chainID -\* - then -\* * EVENTUALLY height h is added to counterparty client heights of chainID -ClientUpdateDelivery == - [](\A chainID \in ChainIDs : \A h \in Heights : - (<>IsClientUpdateInOutgoingDatagrams(chainID, h) - => <>(IsCounterpartyClientHeightOnChain(GetChainByID(chainID), h)))) - -(*************************************************************************** - Liveness: Eventual delivery of connection datagrams - ***************************************************************************) - -\* it ALWAYS holds that, for every chainID -\* - if -\* * EVENTUALLY a ConnOpenInit is sent to chainID -\* - then -\* * EVENTUALLY the connections at chainID and its counterparty are open -ConnOpenInitDelivery == - [](\A chainID \in ChainIDs : - (<>IsConnOpenInitInOutgoingDatagrams(chainID) - => <>(/\ IsConnectionOpen(GetChainByID(chainID)) - /\ IsConnectionOpen(GetChainByID(GetCounterpartyChainID(chainID)))))) - -(*************************************************************************** - Liveness: Eventual delivery of channel datagrams - ***************************************************************************) -\* it ALWAYS holds that, for every chainID -\* - if -\* * EVENTUALLY a ChanOpenInit is sent to chainID -\* - then -\* * EVENTUALLY the channels at chainID and its counterparty are open -ChanOpenInitDelivery == - [](\A chainID \in ChainIDs : - (<>IsChanOpenInitInOutgoingDatagrams(chainID) - => <>(/\ IsChannelOpen(GetChainByID(chainID)) - /\ IsChannelOpen(GetChainByID(GetCounterpartyChainID(chainID)))))) - -\* it ALWAYS holds that, for every chainID -\* - if -\* * EVENTUALLY a ChanCloseInit is sent to chainID -\* - then -\* * EVENTUALLY the channels at chainID and its counterparty are closed -ChanCloseInitDelivery == - [](\A chainID \in ChainIDs : - (<>IsChanCloseInitInOutgoingDatagrams(chainID) - => <>(/\ IsChannelClosed(GetChainByID(chainID)) - /\ IsChannelClosed(GetChainByID(GetCounterpartyChainID(chainID)))))) - -(*************************************************************************** - Liveness [IBCDelivery]: - If ChainA sends a datagram to ChainB, then ChainB eventually receives - the datagram - - * ChainA sends a datagram iff a correct relayer constructs the datagram by - scanning ChainA's store - * ChainB receives a datagram iff it acts upon this datagram - ***************************************************************************) -\* IBCDelivery property: conjunction of delivery properties -IBCDelivery == - \* at least one relayer creates client datagrams - /\ (ClientDatagramsRelayer1 \/ ClientDatagramsRelayer2) - => /\ CreateClientDelivery - /\ ClientUpdateDelivery - \* at least one relayer creates connection datagrams - /\ (ConnectionDatagramsRelayer1 \/ ConnectionDatagramsRelayer2) - => ConnOpenInitDelivery - \* at least one relayer creates channel datagrams - /\ (ChannelDatagramsRelayer1 \/ ChannelDatagramsRelayer2) - => /\ ChanOpenInitDelivery - /\ ChanCloseInitDelivery - -============================================================================= -\* Modification History -\* Last modified Mon Apr 12 14:05:32 CEST 2021 by ilinastoilkovska -\* Created Fri Jun 05 16:48:22 CET 2020 by ilinastoilkovska diff --git a/docs/spec/tla/ibc-core/IBCCoreDefinitions.tla b/docs/spec/tla/ibc-core/IBCCoreDefinitions.tla deleted file mode 100644 index 758a046817..0000000000 --- a/docs/spec/tla/ibc-core/IBCCoreDefinitions.tla +++ /dev/null @@ -1,703 +0,0 @@ ------------------------- MODULE IBCCoreDefinitions ------------------------- - -(*************************************************************************** - This module contains definitions of operators that are shared between the - different modules. - ***************************************************************************) - -EXTENDS Integers, FiniteSets, Sequences - -(************************ TYPE ALIASES FOR SNOWCAT *************************) -(* @typeAlias: CHAN = - [ - state: Str, - order: Str, - portID: Str, - channelID: Str, - counterpartyPortID: Str, - counterpartyChannelID: Str, - nextSendSeq: Int, - nextRcvSeq: Int, - nextAckSeq: Int - ]; -*) -(* @typeAlias: CONN = - [ - state: Str, - connectionID: Str, - clientID: Str, - counterpartyConnectionID: Str, - counterpartyClientID: Str, - channelEnd: CHAN, - versions: Set(Int) - ]; -*) -(* @typeAlias: PACKET = - [ - sequence: Int, - timeoutHeight: Int, - srcPortID: Str, - srcChannelID: Str, - dstPortID: Str, - dstChannelID: Str - ]; -*) -(* @typeAlias: PACKETCOMM = - [ - portID: Str, - channelID: Str, - sequence: Int, - timeoutHeight: Int - ]; -*) -(* @typeAlias: PACKETREC = - [ - portID: Str, - channelID: Str, - sequence: Int - ]; -*) -(* @typeAlias: PACKETACK = - [ - portID: Str, - channelID: Str, - sequence: Int, - acknowledgement: Bool - ]; -*) -(* @typeAlias: CHAINSTORE = - [ - height: Int, - counterpartyClientHeights: Set(Int), - connectionEnd: CONN, - packetCommitments: Set(PACKETCOMM), - packetsToAcknowledge: Seq(PACKET), - packetReceipts: Set(PACKETREC), - packetAcknowledgements: Set(PACKETACK) - ]; -*) -(* @typeAlias: DATAGRAM = - [ - type: Str, - height: Int, - proofHeight: Int, - consensusHeight: Int, - clientID: Str, - counterpartyClientID: Str, - connectionID: Str, - counterpartyConnectionID: Str, - versions: Set(Int), - portID: Str, - channelID: Str, - counterpartyPortID: Str, - counterpartyChannelID: Str, - packet: PACKET, - acknowledgement: Bool - ]; -*) -(* @typeAlias: LOGENTRY = - [ - type: Str, - srcChainID: Str, - sequence: Int, - timeoutHeight: Int, - acknowledgement: Bool - ]; -*) -(* @typeAlias: HISTORY = - [ - connInit: Bool, - connTryOpen: Bool, - connOpen: Bool, - chanInit: Bool, - chanTryOpen: Bool, - chanOpen: Bool, - chanClosed: Bool - ]; -*) - -(********************** Common operator definitions ***********************) -ChainIDs == {"chainA", "chainB"} -ClientIDs == {"clA", "clB"} -ConnectionIDs == {"connAtoB", "connBtoA"} -ChannelIDs == {"chanAtoB", "chanBtoA"} -PortIDs == {"portA", "portB"} - -nullHeight == 0 -nullClientID == "none" -nullConnectionID == "none" -nullChannelID == "none" -nullPortID == "none" - -ConnectionStates == {"UNINIT", "INIT", "TRYOPEN", "OPEN"} -ChannelStates == {"UNINIT", "INIT", "TRYOPEN", "OPEN", "CLOSED"} -ChannelOrder == {"ORDERED", "UNORDERED"} - -Max(S) == CHOOSE x \in S: \A y \in S: y <= x -Min(S) == CHOOSE x \in S: \A y \in S: y >= x - -(******************************* ChannelEnds ******************************* - A set of channel end records. - A channel end record contains the following fields: - - - state -- a string - Stores the current state of this channel end. It has one of the - following values: "UNINIT", "INIT", "TRYOPEN", "OPEN", "CLOSED". - - - order -- a string - Stores whether the channel end is ordered or unordered. It has one - of the following values: "UNORDERED", "ORDERED". - - * ordered channels have three additional packet sequence fields: - nextSendSeq -- stores the sequence number of the next packet that - is going to be sent, - nextRcvSeq -- stores the sequence number of the next packet that - is going to be received, - nextAckSeq -- stores the sequence number of the next packet that - is going to be acknowledged. - - - portID -- a port identifier - Stores the port identifier of this channel end. - - - channelID -- a channel identifier - Stores the channel identifier of this channel end. - - - counterpartyPortID -- a port identifier - Stores the port identifier of the counterparty channel end. - - - counterpartyChannelID -- a channel identifier - Stores the channel identifier of the counterparty channel end. - - Note: we omit channel versions and connection hops. - ***************************************************************************) -ChannelEnds(channelOrdering, maxPacketSeq) == - IF channelOrdering = "UNORDERED" - THEN \* set of unordered channels - [ - state : ChannelStates, - order : {"UNORDERED"}, - portID : PortIDs \union {nullPortID}, - channelID : ChannelIDs \union {nullChannelID}, - counterpartyPortID : PortIDs \union {nullPortID}, - counterpartyChannelID : ChannelIDs \union {nullChannelID} - ] - ELSE \* set of ordered channels - [ - state : ChannelStates, - order : {"ORDERED"}, - nextSendSeq : 0..maxPacketSeq, - nextRcvSeq : 0..maxPacketSeq, - nextAckSeq : 0..maxPacketSeq, - portID : PortIDs \union {nullPortID}, - channelID : ChannelIDs \union {nullChannelID}, - counterpartyPortID : PortIDs \union {nullPortID}, - counterpartyChannelID : ChannelIDs \union {nullChannelID} - ] - - -(******* PacketCommitments, PacketReceipts, PacketAcknowledgements *********) -\* Set of packet commitments -PacketCommitments(Heights, maxPacketSeq) == - [ - portID : PortIDs, - channelID : ChannelIDs, - sequence : 1..maxPacketSeq, - timeoutHeight : Heights - ] - -\* Set of packet receipts -PacketReceipts(maxPacketSeq) == - [ - portID : PortIDs, - channelID : ChannelIDs, - sequence : 1..maxPacketSeq - ] - -\* Set of packet acknowledgements -PacketAcknowledgements(maxPacketSeq) == - [ - portID : PortIDs, - channelID : ChannelIDs, - sequence : 1..maxPacketSeq, - acknowledgement : BOOLEAN - ] - -(***************************** ConnectionEnds ***************************** - A set of connection end records. - A connection end record contains the following fields: - - - state -- a string - Stores the current state of this connection end. It has one of the - following values: "UNINIT", "INIT", "TRYOPEN", "OPEN". - - - connectionID -- a connection identifier - Stores the connection identifier of this connection end. - - - counterpartyConnectionID -- a connection identifier - Stores the connection identifier of the counterparty connection end. - - - clientID -- a client identifier - Stores the client identifier associated with this connection end. - - - counterpartyClientID -- a client identifier - Stores the counterparty client identifier associated with this connection end. - - - versions -- a set of versions - Stores the set of supported connection versions. At the end of a handshake, - it should be a singleton set. - - - channelEnd : a channel end record - Stores data about the channel associated with this connection end. - ***************************************************************************) -ConnectionEnds(channelOrdering, maxPacketSeq, Versions) == - [ - state : ConnectionStates, - connectionID : ConnectionIDs \union {nullConnectionID}, - counterpartyConnectionID : ConnectionIDs \union {nullConnectionID}, - clientID : ClientIDs \union {nullClientID}, - counterpartyClientID : ClientIDs \union {nullClientID}, - versions : (SUBSET Versions) \ {{}}, - channelEnd : ChannelEnds(channelOrdering, maxPacketSeq) - ] - -(********************************* Packets *********************************) -\* Set of packets -Packets(Heights, maxPacketSeq) == - [ - sequence : 1..maxPacketSeq, - timeoutHeight : Heights, - srcPortID : PortIDs, - srcChannelID : ChannelIDs, - dstPortID : PortIDs, - dstChannelID : ChannelIDs - ] - -(******************************** ChainStores ****************************** - A set of chain store records. - A chain store record contains the following fields: - - - height : an integer between nullHeight and MaxHeight. - Stores the current height of the chain. - - - counterpartyClientHeights : a set of integers between 1 and MaxHeight - Stores the heights of the client for the counterparty chain. - - - connectionEnd : a connection end record - Stores data about the connection with the counterparty chain. - - - packetCommitments : a set of packet commitments - A packet commitment is added to this set when a chain sends a packet - to the counterparty. - - - packetReceipts : a set of packet receipts - A packet receipt is added to this set when a chain received a packet - from the counterparty chain. - - - packetsToAcknowledge : a sequence of packets - A packet is added to this sequence when a chain receives it and is used - later for the receiver chain to write an acknowledgement for the packet. - - - packetAcknowledgements : a set of packet acknowledgements - A packet acknowledgement is added to this set when a chain writes an - acknowledgement for a packet it received from the counterparty. - - A chain store is the combination of the provable and private stores. - ***************************************************************************) -ChainStores(Heights, channelOrdering, maxPacketSeq, Versions) == - [ - height : Heights, - counterpartyClientHeights : SUBSET(Heights), - connectionEnd : ConnectionEnds(channelOrdering, maxPacketSeq, Versions), - packetCommitments : SUBSET(PacketCommitments(Heights, maxPacketSeq)), - packetReceipts : SUBSET(PacketReceipts(maxPacketSeq)), - packetsToAcknowledge : Seq(Packets(Heights, maxPacketSeq)), - packetAcknowledgements : SUBSET(PacketAcknowledgements(maxPacketSeq)) - ] - -(******************************** Datagrams ********************************) -\* Set of datagrams -Datagrams(Heights, maxPacketSeq, Versions) == - [ - type : {"ClientCreate"}, - clientID : ClientIDs, - height : Heights - ] \union [ - type : {"ClientUpdate"}, - clientID : ClientIDs, - height : Heights - ] \union [ - type : {"ConnOpenInit"}, - connectionID : ConnectionIDs, - counterpartyConnectionID : ConnectionIDs, - clientID : ClientIDs, - counterpartyClientID : ClientIDs - ] \union [ - type : {"ConnOpenTry"}, - desiredConnectionID : ConnectionIDs, - counterpartyConnectionID : ConnectionIDs, - clientID : ClientIDs, - counterpartyClientID : ClientIDs, - versions : SUBSET (Versions), - proofHeight : Heights, - consensusHeight : Heights - ] \union [ - type : {"ConnOpenAck"}, - connectionID : ConnectionIDs, - versions : SUBSET (Versions), - proofHeight : Heights, - consensusHeight : Heights - ] \union [ - type : {"ConnOpenConfirm"}, - connectionID : ConnectionIDs, - proofHeight : Heights - ] \union [ - type : {"ChanOpenInit"}, - portID : PortIDs, - channelID : ChannelIDs, - counterpartyPortID : PortIDs, - counterpartyChannelID : ChannelIDs - ] \union [ - type : {"ChanOpenTry"}, - portID : PortIDs, - channelID : ChannelIDs, - counterpartyPortID : PortIDs, - counterpartyChannelID : ChannelIDs, - proofHeight : Heights - ] \union [ - type : {"ChanOpenAck"}, - portID : PortIDs, - channelID : ChannelIDs, - proofHeight : Heights - ] \union [ - type : {"ChanOpenConfirm"}, - portID : PortIDs, - channelID : ChannelIDs, - proofHeight : Heights - ] \union [ - type : {"ChanCloseInit"}, - portID : PortIDs, - channelID : ChannelIDs - ] \union [ - type : {"ChanCloseConfirm"}, - portID : PortIDs, - channelID : ChannelIDs, - proofHeight : Heights - ] \union [ - type : {"PacketRecv"}, - packet : Packets(Heights, maxPacketSeq), - proofHeight : Heights - ] \union [ - type : {"PacketAck"}, - packet : Packets(Heights, maxPacketSeq), - acknowledgement : BOOLEAN, - proofHeight : Heights - ] - -\* Null datagram -NullDatagram == - [type |-> "null"] - -(**************************** PacketLogEntries *****************************) -\* Set of packet log entries -PacketLogEntries(Heights, maxPacketSeq) == - [ - type : {"PacketSent"}, - srcChainID : ChainIDs, - sequence : 1..maxPacketSeq, - timeoutHeight : Heights - ] \union [ - type : {"PacketRecv"}, - srcChainID : ChainIDs, - sequence : 1..maxPacketSeq, - portID : PortIDs, - channelID : ChannelIDs, - timeoutHeight : Heights - ] \union [ - type : {"WriteAck"}, - srcChainID : ChainIDs, - sequence : 1..maxPacketSeq, - portID : PortIDs, - channelID : ChannelIDs, - timeoutHeight : Heights, - acknowledgement : BOOLEAN - ] - -\* Null packet log entry -NullPacketLogEntry == - [type |-> "null"] - -(******************************* Histories ********************************) -\* Set of history variable records -Histories == - [ - connInit : BOOLEAN, - connTryOpen : BOOLEAN, - connOpen : BOOLEAN, - chanInit : BOOLEAN, - chanTryOpen : BOOLEAN, - chanOpen : BOOLEAN, - chanClosed : BOOLEAN - ] - -(*************************************************************************** - Initial values of a channel end, connection end, chain - ***************************************************************************) -\* Initial value of an unordered channel end: -\* - state is "UNINIT" -\* - order is "UNORDERED" -\* - channelID, counterpartyPortID, counterpartyChannelID are uninitialized -InitUnorderedChannelEnd == - [ - state |-> "UNINIT", - order |-> "UNORDERED", - portID |-> nullPortID, - channelID |-> nullChannelID, - counterpartyPortID |-> nullPortID, - counterpartyChannelID |-> nullChannelID - ] - -\* Initial value of an ordered channel end: -\* - state is "UNINIT" -\* - order is "ORDERED" -\* - nextSendSeq, nextRcvSeq, nextAckSeq are set to 0 -\* - channelID, counterpartyPortID, counterpartyChannelID are uninitialized -InitOrderedChannelEnd == - [ - state |-> "UNINIT", - order |-> "ORDERED", - nextSendSeq |-> 0, - nextRcvSeq |-> 0, - nextAckSeq |-> 0, - portID |-> nullPortID, - channelID |-> nullChannelID, - counterpartyPortID |-> nullPortID, - counterpartyChannelID |-> nullChannelID - ] - -\* Initial value of a connection end: -\* - state is "UNINIT" -\* - connectionID, counterpartyConnectionID are uninitialized -\* - clientID, counterpartyClientID are uninitialized -\* - versions is an arbitrary (non-empty) subset of the set {1, .., maxVersion} -\* - channelEnd is initialized based on channelOrdering -InitConnectionEnds(Versions, channelOrdering) == - IF channelOrdering = "ORDERED" - THEN [ - state : {"UNINIT"}, - connectionID : {nullConnectionID}, - clientID : {nullClientID}, - counterpartyConnectionID : {nullConnectionID}, - counterpartyClientID : {nullClientID}, - versions : (SUBSET Versions) \ {{}}, - channelEnd : {InitOrderedChannelEnd} - ] - ELSE [ - state : {"UNINIT"}, - connectionID : {nullConnectionID}, - clientID : {nullClientID}, - counterpartyConnectionID : {nullConnectionID}, - counterpartyClientID : {nullClientID}, - versions : (SUBSET Versions) \ {{}}, - channelEnd : {InitUnorderedChannelEnd} - ] - -\* Initial value of the chain store: -\* - height is initialized to 1 -\* - the counterparty light client is uninitialized -\* - the connection end is initialized to InitConnectionEnd -\* - the packet committments, receipts, acknowledgements, and -\* packets to acknowledge are empty -InitChainStore(Versions, channelOrdering) == - [ - height : {1}, - counterpartyClientHeights : {{}}, - connectionEnd : InitConnectionEnds(Versions, channelOrdering), - - packetCommitments : {{}}, - packetReceipts : {{}}, - packetAcknowledgements : {{}}, - packetsToAcknowledge : {<<>>} - - ] - -\* Initial value of history flags -InitHistory == - [ - connInit |-> FALSE, - connTryOpen |-> FALSE, - connOpen |-> FALSE, - chanInit |-> FALSE, - chanTryOpen |-> FALSE, - chanOpen |-> FALSE, - chanClosed |-> FALSE - ] - -(*************************************************************************** - Client helper operators - ***************************************************************************) - -\* get the ID of chainID's counterparty chain -GetCounterpartyChainID(chainID) == - \* IF chainID = "chainA" THEN AsID("chainB") ELSE AsID("chainA") - IF chainID = "chainA" THEN "chainB" ELSE "chainA" - -\* get the client ID of the client for chainID -GetClientID(chainID) == - \* IF chainID = "chainA" THEN AsID("clA") ELSE AsID("clB") - IF chainID = "chainA" THEN "clA" ELSE "clB" - -\* get the client ID of the client for chainID's counterparty chain -GetCounterpartyClientID(chainID) == - \* IF chainID = "chainA" THEN AsID("clB") ELSE AsID("clA") - IF chainID = "chainA" THEN "clB" ELSE "clA" - -\* get the latest height of chainID -\* @type: (CHAINSTORE) => Int; -GetLatestHeight(chain) == - chain.height - -\* get the maximal height of the client for chainID's counterparty chain -\* @type: (CHAINSTORE) => Int; -GetMaxCounterpartyClientHeight(chain) == - IF chain.counterpartyClientHeights /= {} - THEN Max(chain.counterpartyClientHeights) - ELSE nullHeight - -\* get the set of heights of the client for chainID's counterparty chain -\* @type: (CHAINSTORE) => Set(Int); -GetCounterpartyClientHeights(chain) == - chain.counterpartyClientHeights - -\* returns true if the counterparty client is initialized on chainID -\* @type: (CHAINSTORE) => Bool; -IsCounterpartyClientOnChain(chain) == - chain.counterpartyClientHeights /= {} - -\* returns true if the height h is in counterparty client heights on chainID -\* @type: (CHAINSTORE, Int) => Bool; -IsCounterpartyClientHeightOnChain(chain, h) == - h \in chain.counterpartyClientHeights - -(*************************************************************************** - Connection helper operators - ***************************************************************************) - -\* get the connection ID of the connection end at chainID -GetConnectionID(chainID) == - IF chainID = "chainA" - THEN "connAtoB" - ELSE IF chainID = "chainB" - THEN "connBtoA" - ELSE nullConnectionID - -\* get the connection ID of the connection end at chainID's counterparty chain -GetCounterpartyConnectionID(chainID) == - IF chainID = "chainA" - THEN "connBtoA" - ELSE IF chainID = "chainB" - THEN "connAtoB" - ELSE nullConnectionID - -\* get the connection end at chainID -\* @type: (CHAINSTORE) => CONN; -GetConnectionEnd(chain) == - chain.connectionEnd - -\* pick the minimal version from a set of versions -PickVersion(versions) == - IF versions /= {} - THEN LET minVersion == Min(versions) IN - {minVersion} - ELSE {} - - -\* returns true if the connection end on chainID is UNINIT -\* @type: (CHAINSTORE) => Bool; -IsConnectionUninit(chain) == - chain.connectionEnd.state = "UNINIT" - -\* returns true if the connection end on chainID is INIT -\* @type: (CHAINSTORE) => Bool; -IsConnectionInit(chain) == - chain.connectionEnd.state = "INIT" - -\* returns true if the connection end on chainID is TRYOPEN -\* @type: (CHAINSTORE) => Bool; -IsConnectionTryOpen(chain) == - chain.connectionEnd.state = "TRYOPEN" - -\* returns true if the connection end on chainID is OPEN -\* @type: (CHAINSTORE) => Bool; -IsConnectionOpen(chain) == - chain.connectionEnd.state = "OPEN" - -(*************************************************************************** - Channel helper operators - ***************************************************************************) - -\* get the channel ID of the channel end at the connection end of chainID -GetChannelID(chainID) == - IF chainID = "chainA" - THEN "chanAtoB" - ELSE IF chainID = "chainB" - THEN "chanBtoA" - ELSE nullChannelID - -\* get the channel ID of the channel end at chainID's counterparty chain -GetCounterpartyChannelID(chainID) == - IF chainID = "chainA" - THEN "chanBtoA" - ELSE IF chainID = "chainB" - THEN "chanAtoB" - ELSE nullChannelID - -\* get the port ID at chainID -GetPortID(chainID) == - IF chainID = "chainA" - THEN "portA" - ELSE IF chainID = "chainB" - THEN "portB" - ELSE nullPortID - -\* get the port ID at chainID's counterparty chain -GetCounterpartyPortID(chainID) == - IF chainID = "chainA" - THEN "portB" - ELSE IF chainID = "chainB" - THEN "portA" - ELSE nullPortID - -\* get the channel end at the connection end of chainID -\* @type: (CHAINSTORE) => CHAN; -GetChannelEnd(chain) == - chain.connectionEnd.channelEnd - -\* returns true if the channel end on chainID is UNINIT -\* @type: (CHAINSTORE) => Bool; -IsChannelUninit(chain) == - chain.connectionEnd.channelEnd.state = "UNINIT" - -\* returns true if the channel end on chainID is INIT -\* @type: (CHAINSTORE) => Bool; -IsChannelInit(chain) == - chain.connectionEnd.channelEnd.state = "INIT" - -\* returns true if the channel end on chainID is TRYOPEN -\* @type: (CHAINSTORE) => Bool; -IsChannelTryOpen(chain) == - chain.connectionEnd.channelEnd.state = "TRYOPEN" - -\* returns true if the channel end on chainID is OPEN -\* @type: (CHAINSTORE) => Bool; -IsChannelOpen(chain) == - chain.connectionEnd.channelEnd.state = "OPEN" - -\* returns true if the channel end on chainID is CLOSED -\* @type: (CHAINSTORE) => Bool; -IsChannelClosed(chain) == - chain.connectionEnd.channelEnd.state = "CLOSED" - -============================================================================= -\* Modification History -\* Last modified Mon Apr 12 14:26:47 CEST 2021 by ilinastoilkovska -\* Created Fri Jun 05 16:56:21 CET 2020 by ilinastoilkovska \ No newline at end of file diff --git a/docs/spec/tla/ibc-core/ICS02ClientHandlers.tla b/docs/spec/tla/ibc-core/ICS02ClientHandlers.tla deleted file mode 100644 index c9d1489748..0000000000 --- a/docs/spec/tla/ibc-core/ICS02ClientHandlers.tla +++ /dev/null @@ -1,75 +0,0 @@ ------------------------ MODULE ICS02ClientHandlers ------------------------- - -(*************************************************************************** - This module contains definitions of operators that are used to handle - client create and update datagrams. - ***************************************************************************) - -EXTENDS Integers, FiniteSets, IBCCoreDefinitions - -(*************************************************************************** - Client datagram handlers - ***************************************************************************) - -\* Handle "CreateClient" datagrams -\* @type: (Str, CHAINSTORE, Set(DATAGRAM)) => CHAINSTORE; -HandleCreateClient(chainID, chain, datagrams) == - \* get "CreateClient" datagrams with valid clientID - LET createClientDgrs == {dgr \in datagrams : - /\ dgr.type = "ClientCreate" - /\ dgr.clientID = GetCounterpartyClientID(chainID)} IN - \* get heights in datagrams with correct counterparty clientID for chainID - LET createClientHeights == {dgr.height : dgr \in createClientDgrs} IN - - \* new chain record with clients created - LET clientCreateChain == [ - chain EXCEPT !.counterpartyClientHeights = - \* if the set of counterparty client heights is not empty or - \* if the set of heights from datagrams is empty - IF \/ chain.counterpartyClientHeights /= {} - \/ createClientHeights = {} - \* then discard CreateClient datagrams - THEN chain.counterpartyClientHeights - \* otherwise, create counterparty client with height Max(createClientHeights) - ELSE {Max(createClientHeights)} - ] IN - - clientCreateChain - -\* Handle "ClientUpdate" datagrams -\* @type: (Str, CHAINSTORE, Set(DATAGRAM)) => CHAINSTORE; -HandleClientUpdate(chainID, chain, datagrams) == - \* max client height for counterparty chain - LET maxClientHeight == GetMaxCounterpartyClientHeight(chain) IN - \* get "ClientUpdate" datagrams with valid clientID - LET updateClientDgrs == {dgr \in datagrams : - /\ dgr.type = "ClientUpdate" - /\ dgr.clientID = GetCounterpartyClientID(chainID) - \* Note: the check maxClientHeight < dgr.height can be commented out in case - \* older headers can be installed for the client - /\ maxClientHeight < dgr.height - } IN - \* get heights in datagrams with correct counterparty clientID for chainID - LET updateClientHeights == {dgr.height : dgr \in updateClientDgrs} IN - - \* new chain record with clients updated - LET clientUpdatedChain == [ - chain EXCEPT !.counterpartyClientHeights = - \* if set of counterparty client heights is empty - IF chain.counterpartyClientHeights = {} - \* then discard ClientUpdate datagrams - THEN chain.counterpartyClientHeights - \* otherwise, if set of heights from datagrams is not empty - ELSE IF updateClientHeights /= {} - \* then update counterparty client heights with updateClientHeights - THEN chain.counterpartyClientHeights \union updateClientHeights - \* otherwise, do not update client heights - ELSE chain.counterpartyClientHeights - ] IN - - clientUpdatedChain - -============================================================================= -\* Modification History -\* Last modified Mon Apr 12 14:23:14 CEST 2021 by ilinastoilkovska -\* Created Tue Apr 07 16:42:47 CEST 2020 by ilinastoilkovska diff --git a/docs/spec/tla/ibc-core/ICS03ConnectionHandlers.tla b/docs/spec/tla/ibc-core/ICS03ConnectionHandlers.tla deleted file mode 100644 index 72fc205aa3..0000000000 --- a/docs/spec/tla/ibc-core/ICS03ConnectionHandlers.tla +++ /dev/null @@ -1,167 +0,0 @@ ----------------------- MODULE ICS03ConnectionHandlers ---------------------- - -(*************************************************************************** - This module contains definitions of operators that are used to handle - connection handshake datagrams. - ***************************************************************************) - -EXTENDS Integers, FiniteSets, IBCCoreDefinitions - -(*************************************************************************** - Connection datagram handlers - ***************************************************************************) - -\* Handle "ConnOpenInit" datagrams -\* @type: (Str, CHAINSTORE, Set(DATAGRAM)) => CHAINSTORE; -HandleConnOpenInit(chainID, chain, datagrams) == - \* get "ConnOpenInit" datagrams, with a valid connection ID - LET connOpenInitDgrs == {dgr \in datagrams : - /\ dgr.type = "ConnOpenInit" - /\ dgr.connectionID = GetConnectionID(chainID)} IN - - \* if there are valid "ConnOpenInit" datagrams, create a new connection end - \* and update the chain store - IF /\ connOpenInitDgrs /= {} - /\ chain.connectionEnd.state = "UNINIT" - THEN LET connOpenInitDgr == CHOOSE dgr \in connOpenInitDgrs : TRUE IN - LET connOpenInitConnectionEnd == [ - state |-> "INIT", - connectionID |-> connOpenInitDgr.connectionID, - counterpartyConnectionID |-> connOpenInitDgr.counterpartyConnectionID, - clientID |-> connOpenInitDgr.clientID, - counterpartyClientID |-> connOpenInitDgr.counterpartyClientID, - versions |-> chain.connectionEnd.versions, - channelEnd |-> chain.connectionEnd.channelEnd - ] IN - LET connOpenInitChain == [ - chain EXCEPT !.connectionEnd = connOpenInitConnectionEnd - ] IN - - connOpenInitChain - - \* otherwise, do not update the chain store - ELSE chain - - -\* Handle "ConnOpenTry" datagrams -\* @type: (Str, CHAINSTORE, Set(DATAGRAM)) => CHAINSTORE; -HandleConnOpenTry(chainID, chain, datagrams) == - \* get "ConnOpenTry" datagrams, with a valid connection ID and valid height - LET connOpenTryDgrs == {dgr \in datagrams : - /\ dgr.type = "ConnOpenTry" - /\ dgr.desiredConnectionID = GetConnectionID(chainID) - /\ dgr.consensusHeight <= chain.height - /\ dgr.proofHeight \in chain.counterpartyClientHeights} IN - - IF connOpenTryDgrs /= {} - \* if there are valid "ConnOpenTry" datagrams, update the connection end - THEN LET connOpenTryDgr == CHOOSE dgr \in connOpenTryDgrs : TRUE IN - LET versionIntersection == chain.connectionEnd.versions \intersect connOpenTryDgr.versions IN - - \* if the versions from the datagram overlap with the supported versions of the connnection end - IF /\ versionIntersection /= {} - \* if the connection end is uninitialized - /\ \/ chain.connectionEnd.state = "UNINIT" - \* of if it is initialized, and all fields match the datagram fields - \/ /\ chain.connectionEnd.state = "INIT" - /\ chain.connectionEnd.connectionID - = connOpenTryDgr.desiredConnectionID - /\ chain.connectionEnd.counterpartyConnectionID - = connOpenTryDgr.counterpartyConnectionID - /\ chain.connectionEnd.clientID - = connOpenTryDgr.clientID - /\ chain.connectionEnd.counterpartyClientID - = connOpenTryDgr.counterpartyClientID - \* update the connection end in the chain store - THEN LET connOpenTryConnectionEnd == [ - state |-> "TRYOPEN", - connectionID |-> connOpenTryDgr.desiredConnectionID, - counterpartyConnectionID |-> connOpenTryDgr.counterpartyConnectionID, - clientID |-> connOpenTryDgr.clientID, - counterpartyClientID |-> connOpenTryDgr.counterpartyClientID, - versions |-> PickVersion(versionIntersection), - channelEnd |-> chain.connectionEnd.channelEnd - ] IN - LET connOpenTryChain == [ - chain EXCEPT !.connectionEnd = connOpenTryConnectionEnd - ] IN - - connOpenTryChain - - \* otherwise, do not update the chain store - ELSE chain - ELSE chain - -\* Handle "ConnOpenAck" datagrams -\* @type: (Str, CHAINSTORE, Set(DATAGRAM)) => CHAINSTORE; -HandleConnOpenAck(chainID, chain, datagrams) == - \* get existing connection end - LET connectionEnd == GetConnectionEnd(chain) IN - \* get "ConnOpenAck" datagrams, with a valid connection ID and valid height - LET connOpenAckDgrs == {dgr \in datagrams : - /\ dgr.type = "ConnOpenAck" - /\ dgr.connectionID = connectionEnd.connectionID - /\ dgr.consensusHeight <= chain.height - /\ dgr.proofHeight \in chain.counterpartyClientHeights} IN - - \* if there are valid "ConnOpenAck" datagrams, update the connection end - IF connOpenAckDgrs /= {} - THEN LET connOpenAckDgr == CHOOSE dgr \in connOpenAckDgrs : TRUE IN - \* if the connection end on the chain is in "INIT" and the version set - \* from the datagram is a subset of the supported versions in the connection end - IF \/ /\ connectionEnd.state = "INIT" - /\ connOpenAckDgr.versions \subseteq connectionEnd.versions - \* or the connection end is in "TRYOPEN" and the version set - \* from the datagram is equal to the version set in the connection end - \/ /\ connectionEnd.state = "TRYOPEN" - /\ connOpenAckDgr.versions = connectionEnd.versions - \* update the connection end - THEN LET connOpenAckConnectionEnd == [ - connectionEnd EXCEPT !.state = "OPEN", - !.versions = connOpenAckDgr.versions - ] IN - LET connOpenAckChain == [ - chain EXCEPT !.connectionEnd = connOpenAckConnectionEnd - ] IN - - connOpenAckChain - - \* otherwise, do not update the chain store - ELSE chain - ELSE chain - - - -\* Handle "ConnOpenConfirm" datagrams -\* @type: (Str, CHAINSTORE, Set(DATAGRAM)) => CHAINSTORE; -HandleConnOpenConfirm(chainID, chain, datagrams) == - \* get existing connection end - LET connectionEnd == GetConnectionEnd(chain) IN - \* get "ConnOpenConfirm" datagrams, with a valid connection ID and valid height - LET connOpenConfirmDgrs == {dgr \in datagrams : - /\ dgr.type = "ConnOpenConfirm" - /\ dgr.connectionID = connectionEnd.connectionID - /\ dgr.proofHeight \in chain.counterpartyClientHeights} IN - - IF connOpenConfirmDgrs /= {} - \* if there are valid "connOpenConfirmDgrs" datagrams, update the connection end - THEN IF connectionEnd.state = "TRYOPEN" - \* if the connection end on the chain is in "TRYOPEN", update the connection end - THEN LET connOpenConfirmDgr == CHOOSE dgr \in connOpenConfirmDgrs : TRUE IN - LET connOpenConfirmConnectionEnd == [ - connectionEnd EXCEPT !.state = "OPEN" - ] IN - LET connOpenConfirmChain == [ - chain EXCEPT !.connectionEnd = connOpenConfirmConnectionEnd - ] IN - - connOpenConfirmChain - - \* otherwise, do not update the chain store - ELSE chain - ELSE chain - -============================================================================= -\* Modification History -\* Last modified Mon Apr 12 14:22:53 CEST 2021 by ilinastoilkovska -\* Created Tue Apr 07 16:09:26 CEST 2020 by ilinastoilkovska diff --git a/docs/spec/tla/ibc-core/ICS04ChannelHandlers.tla b/docs/spec/tla/ibc-core/ICS04ChannelHandlers.tla deleted file mode 100644 index c659d61f67..0000000000 --- a/docs/spec/tla/ibc-core/ICS04ChannelHandlers.tla +++ /dev/null @@ -1,275 +0,0 @@ ------------------------- MODULE ICS04ChannelHandlers ----------------------- - -(*************************************************************************** - This module contains definitions of operators that are used to handle - channel handshake datagrams. - ***************************************************************************) - -EXTENDS Integers, FiniteSets, IBCCoreDefinitions - -(*************************************************************************** - Channel datagram handlers - ***************************************************************************) - -\* Handle "ChanOpenInit" datagrams -\* @type: (Str, CHAINSTORE, Set(DATAGRAM)) => CHAINSTORE; -HandleChanOpenInit(chainID, chain, datagrams) == - \* get chainID's connection end - LET connectionEnd == GetConnectionEnd(chain) IN - \* get "ChanOpenInit" datagrams, with a valid port and channel ID - LET chanOpenInitDgrs == {dgr \in datagrams : - /\ dgr.type = "ChanOpenInit" - /\ dgr.portID = GetPortID(chainID) - /\ dgr.channelID = GetChannelID(chainID)} IN - - \* if there are valid "ChanOpenInit" datagrams and the connection is not "UNINIT", - \* initialize the channel end and update the chain - IF /\ chanOpenInitDgrs /= {} - /\ connectionEnd.state /= "UNINIT" - /\ connectionEnd.channelEnd.state = "UNINIT" - THEN LET chanOpenInitDgr == CHOOSE dgr \in chanOpenInitDgrs : TRUE IN - LET chanOpenInitChannelEnd == - IF connectionEnd.channelEnd.order = "ORDERED" - THEN [ - state |-> "INIT", - order |-> "ORDERED", - nextSendSeq |-> 1, - nextRcvSeq |-> 1, - nextAckSeq |-> 1, - portID |-> chanOpenInitDgr.portID, - channelID |-> chanOpenInitDgr.channelID, - counterpartyPortID |-> chanOpenInitDgr.counterpartyPortID, - counterpartyChannelID |-> chanOpenInitDgr.counterpartyChannelID - ] - ELSE [ - state |-> "INIT", - order |-> "UNORDERED", - portID |-> chanOpenInitDgr.portID, - channelID |-> chanOpenInitDgr.channelID, - counterpartyPortID |-> chanOpenInitDgr.counterpartyPortID, - counterpartyChannelID |-> chanOpenInitDgr.counterpartyChannelID - ] IN - LET chanOpenInitConnectionEnd == [ - chain.connectionEnd EXCEPT !.channelEnd = chanOpenInitChannelEnd - ] IN - LET chanOpenInitChain == [ - chain EXCEPT !.connectionEnd = chanOpenInitConnectionEnd - ] IN - - chanOpenInitChain - - \* otherwise, do not update the chain store - ELSE chain - -\* Handle "ChanOpenTry" datagrams -\* @type: (Str, CHAINSTORE, Set(DATAGRAM)) => CHAINSTORE; -HandleChanOpenTry(chainID, chain, datagrams) == - \* get chainID's connection end - LET connectionEnd == GetConnectionEnd(chain) IN - \* get "ChanOpenTry" datagrams, with a valid port and channel ID - LET chanOpenTryDgrs == {dgr \in datagrams : - /\ dgr.type = "ChanOpenTry" - /\ dgr.portID = GetPortID(chainID) - /\ dgr.channelID = GetChannelID(chainID) - /\ dgr.proofHeight \in chain.counterpartyClientHeights} IN - - \* if there are valid "ChanOpenTry" datagrams and the connection is "OPEN", - \* update the channel end - IF /\ chanOpenTryDgrs /= {} - /\ chain.connectionEnd.state = "OPEN" - THEN LET chanOpenTryDgr == CHOOSE dgr \in chanOpenTryDgrs : TRUE IN - \* if the channel end is uninitialized - IF \/ connectionEnd.channelEnd.state = "UNINIT" - \* of if it is initialized, and all fields match the datagram fields - \/ /\ connectionEnd.channelEnd.state = "INIT" - /\ connectionEnd.channelEnd.counterpartyPortID - = chanOpenTryDgr.counterpartyPortID - /\ connectionEnd.channelEnd.counterpartyChannelID - = chanOpenTryDgr.counterpartyChannelID - \* update the channel end in the chain store - THEN LET chanOpenTryChannelEnd == - IF connectionEnd.channelEnd.order = "ORDERED" - THEN [ - state |-> "TRYOPEN", - order |-> "ORDERED", - nextSendSeq |-> 1, - nextRcvSeq |-> 1, - nextAckSeq |-> 1, - portID |-> chanOpenTryDgr.portID, - channelID |-> chanOpenTryDgr.channelID, - counterpartyPortID |-> chanOpenTryDgr.counterpartyPortID, - counterpartyChannelID |-> chanOpenTryDgr.counterpartyChannelID - ] - ELSE [ - state |-> "TRYOPEN", - order |-> "UNORDERED", - portID |-> chanOpenTryDgr.portID, - channelID |-> chanOpenTryDgr.channelID, - counterpartyPortID |-> chanOpenTryDgr.counterpartyPortID, - counterpartyChannelID |-> chanOpenTryDgr.counterpartyChannelID - ] IN - - LET chanOpenTryConnectionEnd == [ - connectionEnd EXCEPT !.channelEnd = chanOpenTryChannelEnd - ] IN - - LET chanOpenTryChain == [ - chain EXCEPT !.connectionEnd = chanOpenTryConnectionEnd - ] IN - - chanOpenTryChain - - \* otherwise, do not update the chain store - ELSE chain - ELSE chain - -\* Handle "ChanOpenAck" datagrams -\* @type: (Str, CHAINSTORE, Set(DATAGRAM)) => CHAINSTORE; -HandleChanOpenAck(chainID, chain, datagrams) == - \* get chainID's connection end - LET connectionEnd == GetConnectionEnd(chain) IN - \* get chainID's channel end - LET channelEnd == GetChannelEnd(chain) IN - \* get "ChanOpenAck" datagrams, with a valid channel ID - LET chanOpenAckDgrs == {dgr \in datagrams : - /\ dgr.type = "ChanOpenAck" - /\ dgr.portID = channelEnd.portID - /\ dgr.channelID = channelEnd.channelID - /\ dgr.proofHeight \in chain.counterpartyClientHeights} IN - - \* if there are valid "ChanOpenAck" datagrams, update the channel end - IF /\ chanOpenAckDgrs /= {} - /\ connectionEnd.state = "OPEN" - THEN \* if the channel end on the chain is in "INIT" or it is in "TRYOPEN", - \* update the channel end - IF \/ channelEnd.state = "INIT" - \/ channelEnd.state = "TRYOPEN" - THEN LET chanOpenAckDgr == CHOOSE dgr \in chanOpenAckDgrs : TRUE IN - LET chanOpenAckChannelEnd == [ - channelEnd EXCEPT !.state = "OPEN" - ] IN - LET chanOpenAckConnectionEnd == [ - connectionEnd EXCEPT !.channelEnd = chanOpenAckChannelEnd - ] IN - LET chanOpenAckChain == [ - chain EXCEPT !.connectionEnd = chanOpenAckConnectionEnd - ] IN - - chanOpenAckChain - - \* otherwise, do not update the chain store - ELSE chain - ELSE chain - - -\* Handle "ChanOpenConfirm" datagrams -\* @type: (Str, CHAINSTORE, Set(DATAGRAM)) => CHAINSTORE; -HandleChanOpenConfirm(chainID, chain, datagrams) == - \* get chainID's connection end - LET connectionEnd == GetConnectionEnd(chain) IN - \* get chainID's channel end - LET channelEnd == GetChannelEnd(chain) IN - \* get "ChanOpenConfirm" datagrams, with a valid channel ID - LET chanOpenConfirmDgrs == {dgr \in datagrams : - /\ dgr.type = "ChanOpenConfirm" - /\ dgr.portID = channelEnd.portID - /\ dgr.channelID = channelEnd.channelID - /\ dgr.proofHeight \in chain.counterpartyClientHeights} IN - - \* if there are valid "ChanOpenConfirm" datagrams, update the channel end - IF /\ chanOpenConfirmDgrs /= {} - /\ connectionEnd.state = "OPEN" - THEN \* if the channel end on the chain is in "TRYOPEN", update the channel end - IF channelEnd.state = "TRYOPEN" - THEN LET chanOpenConfirmDgr == CHOOSE dgr \in chanOpenConfirmDgrs : TRUE IN - LET chanOpenConfirmChannelEnd == [ - channelEnd EXCEPT !.state = "OPEN" - ] IN - LET chanOpenConfirmConnectionEnd == [ - connectionEnd EXCEPT !.channelEnd = chanOpenConfirmChannelEnd - ] IN - LET chanOpenConfirmChain == [ - chain EXCEPT !.connectionEnd = chanOpenConfirmConnectionEnd - ] IN - - chanOpenConfirmChain - - \* otherwise, do not update the chain store - ELSE chain - ELSE chain - -\* Handle "ChanCloseInit" datagrams -\* @type: (Str, CHAINSTORE, Set(DATAGRAM)) => CHAINSTORE; -HandleChanCloseInit(chainID, chain, datagrams) == - \* get chainID's connection end - LET connectionEnd == GetConnectionEnd(chain) IN - \* get chainID's channel end - LET channelEnd == GetChannelEnd(chain) IN - \* get "ChanCloseInit" datagrams, with a valid channel ID - LET chanCloseInitDgrs == {dgr \in datagrams : - /\ dgr.type = "ChanCloseInit" - /\ dgr.portID = channelEnd.portID - /\ dgr.channelID = channelEnd.channelID} IN - - \* if there are valid "ChanCloseInit" datagrams - IF /\ chanCloseInitDgrs /= {} - \* and the channel end is neither UNINIT nor CLOSED - /\ channelEnd.state \notin {"UNINIT", "CLOSED"} - \* and the connection end is OPEN - /\ connectionEnd.state = "OPEN" - THEN \* then close the channel end - LET chanCloseInitChannelEnd == [ - channelEnd EXCEPT !.state = "CLOSED" - ] IN - LET chanCloseInitConnectionEnd == [ - connectionEnd EXCEPT !.channelEnd = chanCloseInitChannelEnd - ] IN - LET chanCloseInitChain == [ - chain EXCEPT !.connectionEnd = chanCloseInitConnectionEnd - ] IN - - chanCloseInitChain - - \* otherwise, do not update the chain store - ELSE chain - -\* Handle "ChanCloseConfirm" datagrams -\* @type: (Str, CHAINSTORE, Set(DATAGRAM)) => CHAINSTORE; -HandleChanCloseConfirm(chainID, chain, datagrams) == - \* get chainID's connection end - LET connectionEnd == GetConnectionEnd(chain) IN - \* get chainID's channel end - LET channelEnd == GetChannelEnd(chain) IN - \* get "ChanCloseConfirm" datagrams, with a valid channel ID - LET chanCloseConfirmDgrs == {dgr \in datagrams : - /\ dgr.type = "ChanCloseConfirm" - /\ dgr.portID = channelEnd.portID - /\ dgr.channelID = channelEnd.channelID - /\ dgr.proofHeight \in chain.counterpartyClientHeights} IN - - \* if there are valid "ChanCloseConfirm" datagrams - IF /\ chanCloseConfirmDgrs /= {} - \* and the channel end is neither UNINIT nor CLOSED - /\ channelEnd.state \notin {"UNINIT", "CLOSED"} - \* and the connection end is OPEN - /\ connectionEnd.state = "OPEN" - THEN \* then close the channel end - LET chanCloseConfirmChannelEnd == [ - channelEnd EXCEPT !.state = "CLOSED" - ] IN - LET chanCloseConfirmConnectionEnd == [ - connectionEnd EXCEPT !.channelEnd = chanCloseConfirmChannelEnd - ] IN - LET chanCloseConfirmChain == [ - chain EXCEPT !.connectionEnd = chanCloseConfirmConnectionEnd - ] IN - - chanCloseConfirmChain - - \* otherwise, do not update the chain store - ELSE chain - -============================================================================= -\* Modification History -\* Last modified Mon Apr 12 14:22:44 CEST 2021 by ilinastoilkovska -\* Created Tue Apr 07 16:58:02 CEST 2020 by ilinastoilkovska diff --git a/docs/spec/tla/ibc-core/ICS04PacketHandlers.tla b/docs/spec/tla/ibc-core/ICS04PacketHandlers.tla deleted file mode 100644 index 4e4808f040..0000000000 --- a/docs/spec/tla/ibc-core/ICS04PacketHandlers.tla +++ /dev/null @@ -1,368 +0,0 @@ ------------------------- MODULE ICS04PacketHandlers ------------------------ - -(*************************************************************************** - This module contains definitions of operators that are used to handle - packet datagrams. - ***************************************************************************) - -EXTENDS Integers, FiniteSets, Sequences, IBCCoreDefinitions - -(*************************************************************************** - Packet datagram handlers - ***************************************************************************) - -\* Handle "PacketRecv" datagrams -\* @type: (Str, CHAINSTORE, DATAGRAM, Seq(LOGENTRY)) => [chainStore: CHAINSTORE, packetLog: Seq(LOGENTRY)]; -HandlePacketRecv(chainID, chain, packetDatagram, log) == - \* get chainID's connection end - LET connectionEnd == GetConnectionEnd(chain) IN - \* get chainID's channel end - LET channelEnd == connectionEnd.channelEnd IN - \* get packet - LET packet == packetDatagram.packet IN - - IF \* if the channel and connection ends are open for packet transmission - /\ channelEnd.state = "OPEN" - /\ connectionEnd.state = "OPEN" - \* if the packet has not passed the timeout height - /\ \/ packet.timeoutHeight = 0 - \/ chain.height < packet.timeoutHeight - \* if the "PacketRecv" datagram has valid port and channel IDs - /\ packet.srcPortID = channelEnd.counterpartyPortID - /\ packet.srcChannelID = channelEnd.counterpartyChannelID - /\ packet.dstPortID = channelEnd.portID - /\ packet.dstChannelID = channelEnd.channelID - \* if "PacketRecv" datagram can be verified - /\ packetDatagram.proofHeight \in chain.counterpartyClientHeights - THEN \* construct log entry for packet log - LET logEntry == [ - type |-> "PacketRecv", - srcChainID |-> chainID, - sequence |-> packet.sequence, - portID |-> packet.dstPortID, - channelID |-> packet.dstChannelID, - timeoutHeight |-> packet.timeoutHeight - ] IN - - \* if the channel is unordered and the packet has not been received - IF /\ channelEnd.order = "UNORDERED" - /\ [ - portID |-> packet.dstPortID, - channelID |-> packet.dstChannelID, - sequence |-> packet.sequence - ] \notin chain.packetReceipts - THEN LET newChainStore == [chain EXCEPT - \* record that the packet has been received - !.packetReceipts = - chain.packetReceipts - \union - {[ - channelID |-> packet.dstChannelID, - portID |-> packet.dstPortID, - sequence |-> packet.sequence - ]}, - \* add packet to the set of packets for which an acknowledgement should be written - !.packetsToAcknowledge = Append(chain.packetsToAcknowledge, packet)] IN - - [chainStore |-> newChainStore, packetLog |-> Append(log, logEntry)] - - ELSE \* if the channel is ordered and the packet sequence is nextRcvSeq - IF /\ channelEnd.order = "ORDERED" - /\ packet.sequence = channelEnd.nextRcvSeq - THEN LET newChainStore == [chain EXCEPT - \* increase the nextRcvSeq - !.connectionEnd.channelEnd.nextRcvSeq = - chain.connectionEnd.channelEnd.nextRcvSeq + 1, - \* add packet to the set of packets for which an acknowledgement should be written - !.packetsToAcknowledge = Append(chain.packetsToAcknowledge, packet)] IN - - [chainStore |-> newChainStore, packetLog |-> Append(log, logEntry)] - - - \* otherwise, do not update the chain store and the log - ELSE [chainStore |-> chain, packetLog |-> log] - ELSE [chainStore |-> chain, packetLog |-> log] - - -\* Handle "PacketAck" datagrams -\* @type: (Str, CHAINSTORE, DATAGRAM, Seq(LOGENTRY)) => [chainStore: CHAINSTORE, packetLog: Seq(LOGENTRY)]; -HandlePacketAck(chainID, chain, packetDatagram, log) == - \* get chainID's connection end - LET connectionEnd == GetConnectionEnd(chain) IN - \* get chainID's channel end - LET channelEnd == GetChannelEnd(chain) IN - \* get packet - LET packet == packetDatagram.packet IN - \* get packet committment that should be in chain store - LET packetCommitment == [ - portID |-> packet.srcPortID, - channelID |-> packet.srcChannelID, - sequence |-> packet.sequence, - timeoutHeight |-> packet.timeoutHeight - ] IN - - IF \* if the channel and connection ends are open for packet transmission - /\ channelEnd.state = "OPEN" - /\ connectionEnd.state = "OPEN" - \* if the packet commitment exists in the chain store - /\ packetCommitment \in chain.packetCommitments - \* if the "PacketRecv" datagram has valid port and channel IDs - /\ packet.srcPortID = channelEnd.portID - /\ packet.srcChannelID = channelEnd.channelID - /\ packet.dstPortID = channelEnd.counterpartyPortID - /\ packet.dstChannelID = channelEnd.counterpartyChannelID - \* if the "PacketAck" datagram can be verified - /\ packetDatagram.proofHeight \in chain.counterpartyClientHeights - THEN \* if the channel is ordered and the packet sequence is nextAckSeq - LET newChainStore == - IF /\ channelEnd.order = "ORDERED" - /\ packet.sequence = channelEnd.nextAckSeq - THEN \* increase the nextAckSeq and remove packet commitment - [chain EXCEPT - !.connectionEnd.channelEnd.nextAckSeq = - chain.connectionEnd.channelEnd.nextAckSeq + 1, - !.packetCommitments = chain.packetCommitments \ {packetCommitment}] - \* if the channel is unordered, remove packet commitment - ELSE IF channelEnd.order = "UNORDERED" - THEN [chain EXCEPT - !.packetCommitments = chain.packetCommitments \ {packetCommitment}] - \* otherwise, do not update the chain store - ELSE chain IN - - [chainStore |-> newChainStore, packetLog |-> log] - - \* otherwise, do not update the chain store and the log - ELSE [chainStore |-> chain, packetLog |-> log] - - -\* write packet committments to chain store -\* @type: (CHAINSTORE, PACKET) => CHAINSTORE; -WritePacketCommitment(chain, packet) == - \* get chainID's connection end - LET connectionEnd == GetConnectionEnd(chain) IN - \* get chainID's channel end - LET channelEnd == GetChannelEnd(chain) IN - \* get latest counterparty client height - LET latestClientHeight == GetMaxCounterpartyClientHeight(chain) IN - - IF \* channel end is neither null nor closed - /\ channelEnd.state \notin {"UNINIT", "CLOSED"} - \* connection end is initialized - /\ connectionEnd.state /= "UNINIT" - \* if the packet has valid port and channel IDs - /\ packet.srcPortID = channelEnd.portID - /\ packet.srcChannelID = channelEnd.channelID - /\ packet.dstPortID = channelEnd.counterpartyPortID - /\ packet.dstChannelID = channelEnd.counterpartyChannelID - \* timeout height has not passed - /\ \/ packet.timeoutHeight = 0 - \/ latestClientHeight < packet.timeoutHeight - THEN IF \* if the channel is ordered, check if packetSeq is nextSendSeq, - \* add a packet committment in the chain store, and increase nextSendSeq - /\ channelEnd.order = "ORDERED" - /\ packet.sequence = channelEnd.nextSendSeq - THEN [chain EXCEPT - !.packetCommitments = - chain.packetCommitments \union {[portID |-> packet.srcPortID, - channelID |-> packet.srcChannelID, - sequence |-> packet.sequence, - timeoutHeight |-> packet.timeoutHeight]}, - !.connectionEnd.channelEnd.nextSendSeq = channelEnd.nextSendSeq + 1 - ] - \* otherwise, do not update the chain store - ELSE IF \* if the channel is unordered, - \* add a packet committment in the chain store - /\ channelEnd.order = "UNORDERED" - THEN [chain EXCEPT - !.packetCommitments = - chain.packetCommitments \union {[portID |-> packet.srcPortID, - channelID |-> packet.srcChannelID, - sequence |-> packet.sequence, - timeoutHeight |-> packet.timeoutHeight]} - ] - \* otherwise, do not update the chain store - ELSE chain - ELSE chain - -\* write acknowledgements to chain store -\* @type: (CHAINSTORE, PACKET) => CHAINSTORE; -WriteAcknowledgement(chain, packet) == - \* create a packet acknowledgement for this packet - LET packetAcknowledgement == [ - portID |-> packet.dstPortID, - channelID |-> packet.dstChannelID, - sequence |-> packet.sequence, - acknowledgement |-> TRUE - ] IN - - \* if the acknowledgement for the packet has not been written - IF packetAcknowledgement \notin chain.packetAcknowledgements - THEN \* write the acknowledgement to the chain store and remove - \* the packet from the set of packets to acknowledge - [chain EXCEPT !.packetAcknowledgements = - chain.packetAcknowledgements - \union - {packetAcknowledgement}, - !.packetsToAcknowledge = - Tail(chain.packetsToAcknowledge)] - - \* remove the packet from the sequence of packets to acknowledge - ELSE [chain EXCEPT !.packetsToAcknowledge = - Tail(chain.packetsToAcknowledge)] - -\* log acknowledgements to packet Log -\* @type: (Str, CHAINSTORE, Seq(LOGENTRY), PACKET) => Seq(LOGENTRY); -LogAcknowledgement(chainID, chain, log, packet) == - \* create a packet acknowledgement for this packet - LET packetAcknowledgement == [ - portID |-> packet.dstPortID, - channelID |-> packet.dstChannelID, - sequence |-> packet.sequence, - acknowledgement |-> TRUE - ] IN - - \* if the acknowledgement for the packet has not been written - IF packetAcknowledgement \notin chain.packetAcknowledgements - THEN \* append a "WriteAck" log entry to the log - LET packetLogEntry == [ - type |-> "WriteAck", - srcChainID |-> chainID, - sequence |-> packet.sequence, - portID |-> packet.dstPortID, - channelID |-> packet.dstChannelID, - timeoutHeight |-> packet.timeoutHeight, - acknowledgement |-> TRUE - ] IN - Append(log, packetLogEntry) - \* do not add anything to the log - ELSE log - - -\* check if a packet timed out -\* @type: (CHAINSTORE, CHAINSTORE, PACKET, Int) => CHAINSTORE; -TimeoutPacket(chain, counterpartyChain, packet, proofHeight) == - \* get connection end - LET connectionEnd == GetConnectionEnd(chain) IN - \* get channel end - LET channelEnd == GetChannelEnd(chain) IN - \* get counterparty channel end - LET counterpartyChannelEnd == GetChannelEnd(counterpartyChain) IN - - \* get packet committment that should be in chain store - LET packetCommitment == [ - portID |-> packet.srcPortID, - channelID |-> packet.srcChannelID, - sequence |-> packet.sequence, - timeoutHeight |-> packet.timeoutHeight - ] IN - \* get packet receipt that should be absent in counterparty chain store - LET packetReceipt == [ - portID |-> packet.dstPortID, - channelID |-> packet.dstChannelID, - sequence |-> packet.sequence - ] IN - - \* if channel end is open - IF /\ channelEnd.state = "OPEN" - \* srcChannelID and srcPortID match channel and port IDs - /\ packet.srcPortID = channelEnd.portID - /\ packet.srcChannelID = channelEnd.channelID - \* dstChannelID and dstPortID match counterparty channel and port IDs - /\ packet.dstPortID = channelEnd.counterpartyPortID - /\ packet.dstChannelID = channelEnd.counterpartyChannelID - \* packet has timed out - /\ packet.timeoutHeight > 0 - /\ proofHeight >= packet.timeoutHeight - \* chain has sent the packet - /\ packetCommitment \in chain.packetCommitments - \* counterparty chain has not received the packet - /\ \/ /\ channelEnd.order = "ORDERED" - /\ counterpartyChannelEnd.nextRcvSeq <= packet.sequence - \/ /\ channelEnd.order = "UNORDERED" - /\ packetReceipt \notin counterpartyChain.packetReceipts - \* counterparty channel end has dstPortID and dstChannelID - /\ counterpartyChannelEnd.portID = packet.dstPortID - /\ counterpartyChannelEnd.channelID = packet.dstChannelID - \* close ordered channel and remove packet commitment - THEN LET updatedChannelEnd == [channelEnd EXCEPT - !.state = IF channelEnd.order = "ORDERED" - THEN "CLOSED" - ELSE channelEnd.state] IN - LET updatedConnectionEnd == [connectionEnd EXCEPT - !.channelEnd = updatedChannelEnd] IN - LET updatedChainStore == [chain EXCEPT - !.packetCommitments = - chain.packetCommitments \ {packetCommitment}, - !.connectionEnd = updatedConnectionEnd] IN - - updatedChainStore - - \* otherwise, do not update the chain store - ELSE chain - -\* check if a packet timed out on close -\* @type: (CHAINSTORE, CHAINSTORE, PACKET, Int) => CHAINSTORE; -TimeoutOnClose(chain, counterpartyChain, packet, proofHeight) == - \* get connection end - LET connectionEnd == GetConnectionEnd(chain) IN - \* get channel end - LET channelEnd == GetChannelEnd(chain) IN - \* get counterparty channel end - LET counterpartyChannelEnd == GetChannelEnd(counterpartyChain) IN - - \* get packet committment that should be in chain store - LET packetCommitment == [ - portID |-> packet.srcPortID, - channelID |-> packet.srcChannelID, - sequence |-> packet.sequence, - timeoutHeight |-> packet.timeoutHeight - ] IN - \* get packet receipt that should be absent in counterparty chain store - LET packetReceipt == [ - portID |-> packet.dstPortID, - channelID |-> packet.dstChannelID, - sequence |-> packet.sequence - ] IN - - - \* if srcChannelID and srcPortID match channel and port IDs - IF /\ packet.srcPortID = channelEnd.portID - /\ packet.srcChannelID = channelEnd.channelID - \* if dstChannelID and dstPortID match counterparty channel and port IDs - /\ packet.dstPort = channelEnd.counterpartyPortID - /\ packet.dstChannelID = channelEnd.counterpartyChannelID - \* chain has sent the packet - /\ packetCommitment \in chain.packetCommitments - \* counterparty channel end is closed and its fields are as expected - /\ counterpartyChannelEnd.state = "CLOSED" - /\ counterpartyChannelEnd.portID = packet.dstPortID - /\ counterpartyChannelEnd.channelID = packet.dstChannelID - /\ counterpartyChannelEnd.counterpartyPortID = packet.srcPortID - /\ counterpartyChannelEnd.counterpartyChannelID = packet.srcChannelID - \* counterparty chain has not received the packet - /\ \/ /\ channelEnd.order = "ORDERED" - /\ counterpartyChannelEnd.nextRcvSeq <= packet.sequence - \/ /\ channelEnd.order = "UNORDERED" - /\ packetReceipt \notin counterpartyChain.packetReceipts - \* close ordered channel and remove packet commitment - THEN LET updatedChannelEnd == [channelEnd EXCEPT - !.state = IF channelEnd.order = "ORDERED" - THEN "CLOSED" - ELSE channelEnd.state] IN - LET updatedConnectionEnd == [connectionEnd EXCEPT - !.channelEnd = updatedChannelEnd] IN - LET updatedChainStore == [chain EXCEPT - !.packetCommitments = - chain.packetCommitments \ {packetCommitment}, - !.connectionEnd = updatedConnectionEnd] IN - - updatedChainStore - - \* otherwise, do not update the chain store - ELSE chain - - -============================================================================= -\* Modification History -\* Last modified Mon Apr 12 14:22:40 CEST 2021 by ilinastoilkovska -\* Created Wed Jul 29 14:30:04 CEST 2020 by ilinastoilkovska diff --git a/docs/spec/tla/ibc-core/ICS18Relayer.tla b/docs/spec/tla/ibc-core/ICS18Relayer.tla deleted file mode 100644 index b3d9e8cf40..0000000000 --- a/docs/spec/tla/ibc-core/ICS18Relayer.tla +++ /dev/null @@ -1,448 +0,0 @@ ---------------------------- MODULE ICS18Relayer ---------------------------- - -(*************************************************************************** - This module contains the specification of a relayer, which is an off-chain - process running a relayer algorithm. - ***************************************************************************) - -EXTENDS Integers, FiniteSets, Sequences, IBCCoreDefinitions - -CONSTANTS GenerateClientDatagrams, \* toggle generation of client datagrams - GenerateConnectionDatagrams, \* toggle generation of connection datagrams - GenerateChannelDatagrams, \* toggle generation of channel datagrams - GeneratePacketDatagrams \* toggle generation of packet datagrams - -ASSUME /\ GenerateClientDatagrams \in BOOLEAN - /\ GenerateConnectionDatagrams \in BOOLEAN - /\ GenerateChannelDatagrams \in BOOLEAN - /\ GeneratePacketDatagrams \in BOOLEAN - -CONSTANTS - MaxHeight, \* set of possible heights of the chains in the system - MaxVersion, \* maximal connection / channel version (we assume versions are integers) - MaxPacketSeq \* maximal packet sequence number - -VARIABLES - chainAstore, \* store of ChainA - chainBstore, \* store of ChainB - outgoingDatagrams, \* a function that assigns a set of pending datagrams - \* outgoing from the relayer to each chainID - outgoingPacketDatagrams, \* a dedicated datagrams channel for packet datagrams - relayerHeights, \* a function that assigns a height to each chainID - closeChannelA, \* flag that triggers closing of the channel end at ChainA - closeChannelB, \* flag that triggers closing of the channel end at ChainB - packetLog \* packet log - -vars == <> -Heights == 1..MaxHeight \* set of possible heights of the chains in the system -Versions == 1..MaxVersion \* set of possible connection versions - -\* @type: (Str) => CHAINSTORE; -GetChainByID(chainID) == - IF chainID = "chainA" - THEN chainAstore - ELSE chainBstore - -\* @type: (Str) => Bool; -GetCloseChannelFlag(chainID) == - IF chainID = "chainA" - THEN closeChannelA - ELSE closeChannelB - -(*************************************************************************** - Client datagrams - ***************************************************************************) -\* Compute client datagrams designated for dstChainID. -\* These are used to update the client for srcChainID on dstChainID. -\* Some client updates might trigger an update of the height that -\* the relayer stores for srcChainID -\* @type: (Str, Str, Str -> Int) => [datagrams: Set(DATAGRAM), relayerUpdate: Str -> Int]; -ClientDatagrams(srcChainID, dstChainID, relayer) == - LET srcChain == GetChainByID(srcChainID) IN - LET dstChain == GetChainByID(dstChainID) IN - LET srcChainHeight == srcChain.height IN - LET srcClientHeight == GetMaxCounterpartyClientHeight(dstChain) IN - LET srcClientID == GetClientID(srcChainID) IN - - LET emptySetDatagrams == {} IN - - \* check if the relayer chain height for srcChainID should be updated - LET srcRelayerChainHeight == - IF relayer[srcChainID] < srcChainHeight - THEN srcChainHeight - ELSE relayer[srcChainID] IN - - \* create an updated relayer - LET updatedRelayer == - [relayer EXCEPT ![srcChainID] = srcRelayerChainHeight] IN - - \* generate datagrams for dstChainID - LET dstDatagrams == - IF srcClientHeight = nullHeight - THEN \* the src client does not exist on dstChainID - {[ - type |-> "ClientCreate", - height |-> srcChainHeight, - clientID |-> srcClientID - ]} - ELSE \* the src client exists on dstChainID - IF srcClientHeight < srcChainHeight - THEN \* the height of the src client on dstChainID is smaller than the height of the src chain - {[ - type |-> "ClientUpdate", - height |-> srcChainHeight, - clientID |-> srcClientID - ]} - ELSE emptySetDatagrams IN - - [datagrams|-> dstDatagrams, relayerUpdate |-> updatedRelayer] - -(*************************************************************************** - Connection datagrams - ***************************************************************************) -\* Compute connection datagrams designated for dstChainID. -\* These are used to update the connection end on dstChainID. -ConnectionDatagrams(srcChainID, dstChainID) == - LET srcChain == GetChainByID(srcChainID) IN - LET dstChain == GetChainByID(dstChainID) IN - - LET srcConnectionEnd == srcChain.connectionEnd IN - LET dstConnectionEnd == dstChain.connectionEnd IN - - LET srcConnectionID == GetConnectionID(srcChainID) IN - LET dstConnectionID == GetConnectionID(dstChainID) IN - - LET srcHeight == srcChain.height IN - LET srcClientHeight == GetMaxCounterpartyClientHeight(srcChain) IN - - LET emptySetDatagrams == {} IN - - LET dstDatagrams == - IF dstConnectionEnd.state = "UNINIT" /\ srcConnectionEnd.state = "UNINIT" THEN - {[ - type |-> "ConnOpenInit", - connectionID |-> dstConnectionID, \* "connBtoA" (if srcChainID = "chainA", dstChainID = "chainB") - clientID |-> GetCounterpartyClientID(dstChainID), \* "clA" - counterpartyConnectionID |-> srcConnectionID, \* "connAtoB" - counterpartyClientID |-> GetCounterpartyClientID(srcChainID) \* "clB" - ]} - - ELSE IF /\ srcClientHeight /= nullHeight - /\ srcConnectionEnd.state = "INIT" /\ \/ dstConnectionEnd.state = "UNINIT" - \/ dstConnectionEnd.state = "INIT" THEN - {[ - type |-> "ConnOpenTry", - desiredConnectionID |-> srcConnectionEnd.counterpartyConnectionID, \* "connBtoA" (if srcChainID = "chainA", dstChainID = "chainB") - counterpartyConnectionID |-> srcConnectionEnd.connectionID, \* "connAtoB" - clientID |-> srcConnectionEnd.counterpartyClientID, \* "clA" - counterpartyClientID |-> srcConnectionEnd.clientID, \* "clB" - versions |-> srcConnectionEnd.versions, - proofHeight |-> srcHeight, - consensusHeight |-> srcClientHeight - ]} - - ELSE IF /\ srcClientHeight /= nullHeight - /\ srcConnectionEnd.state = "TRYOPEN" /\ \/ dstConnectionEnd.state = "INIT" - \/ dstConnectionEnd.state = "TRYOPEN" THEN - {[ - type |-> "ConnOpenAck", - connectionID |-> dstConnectionID, \* "connBtoA" (if srcChainID = "chainA", dstChainID = "chainB") - versions |-> srcConnectionEnd.versions, - proofHeight |-> srcHeight, - consensusHeight |-> srcClientHeight - ]} - - ELSE IF srcConnectionEnd.state = "OPEN" /\ dstConnectionEnd.state = "TRYOPEN" THEN - {[ - type |-> "ConnOpenConfirm", - connectionID |-> dstConnectionEnd.connectionID, \* "connBtoA" (if srcChainID = "chainA", dstChainID = "chainB") - proofHeight |-> srcHeight - ]} - ELSE emptySetDatagrams IN - - dstDatagrams - -(*************************************************************************** - Channel handshake datagrams - ***************************************************************************) -\* Compute channel datagrams designated for dstChainID. -\* These are used to update the channel end on dstChainID. -ChannelDatagrams(srcChainID, dstChainID) == - LET srcChain == GetChainByID(srcChainID) IN - LET dstChain == GetChainByID(dstChainID) IN - - LET srcChannelEnd == GetChannelEnd(srcChain) IN - LET dstChannelEnd == GetChannelEnd(dstChain) IN - - LET srcPortID == GetPortID(srcChainID) IN - LET dstPortID == GetPortID(dstChainID) IN - - LET srcChannelID == GetChannelID(srcChainID) IN - LET dstChannelID == GetChannelID(dstChainID) IN - - LET srcHeight == srcChain.height IN - - LET emptySetDatagrams == {} IN - - LET dstDatagrams == - IF dstChannelEnd.state = "UNINIT" /\ srcChannelEnd.state = "UNINIT" THEN - {[ - type |-> "ChanOpenInit", - portID |-> dstPortID, \* "portB" (if srcChainID = "chainA", dstChainID = "chainB") - channelID |-> dstChannelID, \* "chanBtoA" - counterpartyPortID |-> srcPortID, \* "portA" - counterpartyChannelID |-> srcChannelID \* "chanAtoB" - ]} - - ELSE IF srcChannelEnd.state = "INIT" /\ \/ dstChannelEnd.state = "UNINIT" - \/ dstChannelEnd.state = "INIT" THEN - {[ - type |-> "ChanOpenTry", - portID |-> dstPortID, \* "portB" (if srcChainID = "chainA", dstChainID = "chainB") - channelID |-> dstChannelID, \* "chanBtoA" - counterpartyPortID |-> srcPortID, \* "portA" - counterpartyChannelID |-> srcChannelID, \* "chanAtoB" - proofHeight |-> srcHeight - ]} - - ELSE IF srcChannelEnd.state = "TRYOPEN" /\ \/ dstChannelEnd.state = "INIT" - \/ dstChannelEnd.state = "TRYOPEN" THEN - {[ - type |-> "ChanOpenAck", - portID |-> dstChannelEnd.portID, \* "portB" (if srcChainID = "chainA", dstChainID = "chainB") - channelID |-> dstChannelEnd.channelID, \* "chanBtoA" - proofHeight |-> srcHeight - ]} - - ELSE IF srcChannelEnd.state = "OPEN" /\ dstChannelEnd.state = "TRYOPEN" THEN - {[ - type |-> "ChanOpenConfirm", - portID |-> dstChannelEnd.portID, \* "portB" (if srcChainID = "chainA", dstChainID = "chainB") - channelID |-> dstChannelEnd.channelID, \* "chanBtoA" - proofHeight |-> srcHeight - ]} - - \* channel closing datagrams creation only for open channels - ELSE IF dstChannelEnd.state = "OPEN" /\ GetCloseChannelFlag(dstChannelID) THEN - {[ - type |-> "ChanCloseInit", - portID |-> dstChannelEnd.portID, \* "portB" (if srcChainID = "chainA", dstChainID = "chainB") - channelID |-> dstChannelEnd.channelID \* "chanBtoA" - ]} - - ELSE IF /\ srcChannelEnd.state = "CLOSED" - /\ dstChannelEnd.state /= "CLOSED" - /\ dstChannelEnd.state /= "UNINIT" THEN - {[ - type |-> "ChanCloseConfirm", - portID |-> dstChannelEnd.portID, \* "portB" (if srcChainID = "chainA", dstChainID = "chainB") - channelID |-> dstChannelEnd.channelID, \* "chanBtoA" - proofHeight |-> srcHeight - ]} - - ELSE emptySetDatagrams IN - - dstDatagrams - -(*************************************************************************** - Packet datagrams - ***************************************************************************) -\* Compute a packet datagram based on the packetLogEntry -\* @type: (LOGENTRY) => DATAGRAM; -PacketDatagram(packetLogEntry) == - \* get chainID and its channel end - LET chainID == packetLogEntry.srcChainID IN - LET channelEnd == GetChainByID(chainID).connectionEnd.channelEnd IN - - \* get portID and counterpartyPortID - LET portID == channelEnd.portID IN \* "portA" (if srcChainID = "chainA") - LET counterpartyPortID == channelEnd.counterpartyPortID IN \* "portB" (if srcChainID = "chainA") - - \* get channelID and counterpartyChannelID - LET channelID == channelEnd.channelID IN \* "chanAtoB" (if srcChainID = "chainA") - LET counterpartyChannelID == channelEnd.counterpartyChannelID IN \* "chanBtoA" (if srcChainID = "chainA") - - LET srcHeight == GetChainByID(chainID).height IN - - \* the srcChannelID of the packet that is received is channelID, - \* the dstChannelID of the packet that is received is counterpartyChannelID - LET recvPacket == [ - sequence |-> packetLogEntry.sequence, - timeoutHeight |-> packetLogEntry.timeoutHeight, - srcPortID |-> portID, - srcChannelID |-> channelID, - dstPortID |-> counterpartyPortID, - dstChannelID |-> counterpartyChannelID - ] IN - - \* the srcChannelID of the packet that is acknowledged is counterpartyChannelID, - \* the dstChannelID of the packet that is acknowledged is channelID - LET ackPacket == [ - sequence |-> packetLogEntry.sequence, - timeoutHeight |-> packetLogEntry.timeoutHeight, - srcPortID |-> counterpartyPortID, - srcChannelID |-> counterpartyChannelID, - dstPortID |-> portID, - dstChannelID |-> channelID - ] IN - - IF packetLogEntry.type = "PacketSent" - THEN [ - type |-> "PacketRecv", - packet |-> recvPacket, - proofHeight |-> srcHeight - ] - ELSE IF packetLogEntry.type = "WriteAck" - THEN [ - type |-> "PacketAck", - packet |-> ackPacket, - acknowledgement |-> packetLogEntry.acknowledgement, - proofHeight |-> srcHeight - ] - ELSE NullDatagram - -(*************************************************************************** - Compute client, connection, channel datagrams (from srcChainID to dstChainID) - ***************************************************************************) -\* Currently supporting: -\* - ICS 02: Client updates -\* - ICS 03: Connection handshake -\* - ICS 04: Channel handshake -ComputeDatagrams(srcChainID, dstChainID) == - \* ICS 02 : Clients - \* - Determine if light clients needs to be updated - LET clientDatagrams == - IF GenerateClientDatagrams - THEN ClientDatagrams(srcChainID, dstChainID, relayerHeights) - ELSE [datagrams |-> {}, relayerUpdate |-> relayerHeights] IN - - \* ICS 03 : Connections - \* - Determine if any connection handshakes are in progress - LET connectionDatagrams == - IF GenerateConnectionDatagrams - THEN ConnectionDatagrams(srcChainID, dstChainID) - ELSE {} IN - - \* ICS 04 : Channels & Packets - \* - Determine if any channel handshakes are in progress - LET channelDatagrams == - IF GenerateChannelDatagrams - THEN ChannelDatagrams(srcChainID, dstChainID) - ELSE {} IN - - [datagrams |-> clientDatagrams.datagrams \union - connectionDatagrams \union - channelDatagrams, - relayerUpdate |-> clientDatagrams.relayerUpdate] - -(*************************************************************************** - Relayer actions - ***************************************************************************) -\* Update the height of the relayer client for some chainID -UpdateRelayerClientHeight(chainID) == - LET chainLatestHeight == GetChainByID(chainID).height IN - /\ relayerHeights[chainID] < chainLatestHeight - /\ relayerHeights' = [relayerHeights EXCEPT - ![chainID] = GetChainByID(chainID).height - ] - /\ UNCHANGED <> - /\ UNCHANGED <> - -\* for two chains, srcChainID and dstChainID, where srcChainID /= dstChainID, -\* create the pending datagrams and update the corresponding sets of pending datagrams -Relay(srcChainID, dstChainID) == - LET datagramsAndRelayerUpdate == ComputeDatagrams(srcChainID, dstChainID) IN - /\ srcChainID /= dstChainID - /\ outgoingDatagrams' = - [outgoingDatagrams EXCEPT - ![dstChainID] = outgoingDatagrams[dstChainID] - \union - datagramsAndRelayerUpdate.datagrams - ] - /\ relayerHeights' = datagramsAndRelayerUpdate.relayerUpdate - /\ UNCHANGED <> - /\ UNCHANGED <> - -\* given an entry from the packet log, create a packet datagram and -\* append it to the outgoing packet datagram queue for dstChainID -RelayPacketDatagram(srcChainID, dstChainID) == - /\ packetLog /= <<>> - /\ GeneratePacketDatagrams - /\ LET packetLogEntry == Head(packetLog) IN - LET packetDatagram == PacketDatagram(packetLogEntry) IN - \* if srcChainID matches the one from the log entry - /\ packetLogEntry.srcChainID = srcChainID - \* if dstChainID is the counterparty chain of srcChainID - /\ dstChainID = GetCounterpartyChainID(packetLogEntry.srcChainID) - /\ packetDatagram /= NullDatagram - /\ outgoingPacketDatagrams' = [outgoingPacketDatagrams EXCEPT - ![dstChainID] = Append(outgoingPacketDatagrams[dstChainID], - packetDatagram)] - /\ packetLog' = Tail(packetLog) - /\ UNCHANGED <> - /\ UNCHANGED <> - -\* update the relayer client heights -UpdateClient == - \E chainID \in ChainIDs : UpdateRelayerClientHeight(chainID) - -\* create client, connection, channel datagrams -CreateDatagrams == - \E srcChainID \in ChainIDs : \E dstChainID \in ChainIDs : - \* relay client, connection, channel datagrams - Relay(srcChainID, dstChainID) - -\* create packet datagrams -CreatePacketDatagrams == - \E srcChainID \in ChainIDs : \E dstChainID \in ChainIDs : - \* relay packet datagrams - RelayPacketDatagram(srcChainID, dstChainID) - - -(*************************************************************************** - Specification - ***************************************************************************) -\* Initial state predicate -\* Initially: -\* - the relayer heights are uninitialized (i.e., their height is nullHeight) -\* - there are no datagrams -Init == - /\ relayerHeights = [chainID \in ChainIDs |-> nullHeight] - /\ outgoingDatagrams = [chainID \in ChainIDs |-> {}] - /\ outgoingPacketDatagrams = [chainID \in ChainIDs |-> <<>>] - -\* Next state action -\* The relayer either: -\* - updates its clients, or -\* - creates datagrams, or -\* - does nothing -Next == - \/ UpdateClient - \/ CreateDatagrams - \/ CreatePacketDatagrams - \/ UNCHANGED vars - -\* Fairness constraints -Fairness == - /\ \A chainID \in ChainIDs : - WF_vars(UpdateRelayerClientHeight(chainID)) - /\ \A srcChainID \in ChainIDs : \A dstChainID \in ChainIDs : - WF_vars(Relay(srcChainID, dstChainID)) - /\ \A srcChainID \in ChainIDs : \A dstChainID \in ChainIDs : - WF_vars(RelayPacketDatagram(srcChainID, dstChainID)) - - - -(*************************************************************************** - Invariants - ***************************************************************************) -\* Type invariant -TypeOK == - /\ relayerHeights \in [ChainIDs -> Heights \union {nullHeight}] - /\ outgoingDatagrams \in [ChainIDs -> SUBSET Datagrams(Heights, MaxPacketSeq, Versions)] - /\ outgoingPacketDatagrams \in [ChainIDs -> Seq(Datagrams(Heights, MaxPacketSeq, Versions))] - -============================================================================= -\* Modification History -\* Last modified Mon Apr 12 14:30:40 CEST 2021 by ilinastoilkovska -\* Created Fri Mar 06 09:23:12 CET 2020 by ilinastoilkovska diff --git a/docs/spec/tla/ibc-core/MC_IBCCore.tla b/docs/spec/tla/ibc-core/MC_IBCCore.tla deleted file mode 100644 index d9edd8e323..0000000000 --- a/docs/spec/tla/ibc-core/MC_IBCCore.tla +++ /dev/null @@ -1,53 +0,0 @@ ------------------------------ MODULE MC_IBCCore ----------------------------- - -MaxHeight == 2 -MaxVersion == 2 -MaxPacketSeq == 1 -ClientDatagramsRelayer1 == TRUE -ClientDatagramsRelayer2 == FALSE -ConnectionDatagramsRelayer1 == TRUE -ConnectionDatagramsRelayer2 == FALSE -ChannelDatagramsRelayer1 == TRUE -ChannelDatagramsRelayer2 == FALSE -PacketDatagramsRelayer1 == TRUE -PacketDatagramsRelayer2 == FALSE -ChannelOrdering == "UNORDERED" - -VARIABLES - \* @type: CHAINSTORE; - chainAstore, \* chain store of ChainA - \* @type: CHAINSTORE; - chainBstore, \* chain store of ChainB - \* @type: Set(DATAGRAM); - incomingDatagramsChainA, \* set of (client, connection, channel) datagrams incoming to ChainA - \* @type: Set(DATAGRAM); - incomingDatagramsChainB, \* set of (client, connection, channel) datagrams incoming to ChainB - \* @type: Seq(DATAGRAM); - incomingPacketDatagramsChainA, \* sequence of packet datagrams incoming to ChainA - \* @type: Seq(DATAGRAM); - incomingPacketDatagramsChainB, \* sequence of packet datagrams incoming to ChainB - \* @type: Str -> Int; - relayer1Heights, \* the client heights of Relayer1 - \* @type: Str -> Int; - relayer2Heights, \* the client heights of Relayer2 - \* @type: Str -> Set(DATAGRAM); - outgoingDatagrams, \* sets of (client, connection, channel) datagrams outgoing of the relayers - \* @type: Str -> Seq(DATAGRAM); - outgoingPacketDatagrams, \* sequences of packet datagrams outgoing of the relayers - \* @type: Bool; - closeChannelA, \* flag that triggers closing of the channel end at ChainA - \* @type: Bool; - closeChannelB, \* flag that triggers closing of the channel end at ChainB - \* @type: HISTORY; - historyChainA, \* history variables for ChainA - \* @type: HISTORY; - historyChainB, \* history variables for ChainB - \* @type: Seq(LOGENTRY); - packetLog, \* packet log - \* @type: Int; - appPacketSeqChainA, \* packet sequence number from the application on ChainA - \* @type: Int; - appPacketSeqChainB \* packet sequence number from the application on ChainB - -INSTANCE IBCCore -============================================================================= \ No newline at end of file diff --git a/docs/spec/tla/ibc-core/README.md b/docs/spec/tla/ibc-core/README.md deleted file mode 100644 index 388dc4711c..0000000000 --- a/docs/spec/tla/ibc-core/README.md +++ /dev/null @@ -1,221 +0,0 @@ -# TLA+ specification of the IBC Core protocols - -A TLA+ specification of the IBC Core protocols ([ICS02](https://github.com/cosmos/ibc/tree/5877197dc03e844542cb8628dd52674a37ca6ff9/spec/ics-002-client-semantics), [ICS03](https://github.com/cosmos/ibc/tree/5877197dc03e844542cb8628dd52674a37ca6ff9/spec/ics-003-connection-semantics), [ICS04](https://github.com/cosmos/ibc/tree/5877197dc03e844542cb8628dd52674a37ca6ff9/spec/ics-004-channel-and-packet-semantics), [ICS18](https://github.com/cosmos/ibc/tree/5877197dc03e844542cb8628dd52674a37ca6ff9/spec/ics-018-relayer-algorithms)). -In particular, the main module is [IBCCore.tla](IBCCore.tla) and models a -system consisting of two chains and two relayers. -The model allows to express concurrency aspects of a system with multiple (correct) relayers. -The specification is written in a modular way, in order to facilitate future -formal verification of properties and invariants in an adversarial setting. - -## Modules - -The specification has the following modules: - - `IBCCore.tla` (the main module) - - `ICS18Relayer.tla` - - `Chain.tla` - - `ICS02ClientHandlers.tla` - - `ICS03ConnectionHandlers.tla` - - `ICS04ChannelHandlers.tla` - - `ICS04PacketHandlers.tla` - - `IBCCoreDefinitions.tla` - -### [`ICS18Relayer.tla`](ICS18Relayer.tla) -A relayer relays datagrams between the two chains. Its transition relation is defined by the formula: -```tla -Next == - \/ UpdateClient - \/ CreateDatagrams - \/ UNCHANGED vars -``` -where `UpdateClient` and `CreateDatagrams` are scheduled non-deterministically. -`UpdateClient` picks a light client on the relayer for some chain and updates it. `CreateDatagrams` picks a direction (a pair of source and destination chain) and -creates client, connection, channel, and packet datagrams (i.e., it captures the -logic of [`pendingDatagrams()`](https://github.com/cosmos/ibc/tree/5877197dc03e844542cb8628dd52674a37ca6ff9/spec/ics-018-relayer-algorithms#pending-datagrams)). - -### [`Chain.tla`](Chain.tla) -The chain state is represented by a chain store, which is a snapshot of the provable and private stores, to the extent necessary for IBC. Additionally, a chain has dedicated -datagram containers for: -1. client, connection, and channel datagrams (given by a set of datagrams), -2. packet datagrams (given by a queue of datagrams that models the order in which the datagrams were submitted by the relayer). - -Its transition relation is defined by the formula: -```tla -Next == - \/ AdvanceChain - \/ HandleIncomingDatagrams - \/ SendPacket - \/ AcknowledgePacket - \/ UNCHANGED vars -``` -where: -- `AdvanceChain`: increments the height of the chain, -- `HandleIncomingDatagrams`: dispatches the datagrams to the appropriate handlers. -This captures the logic of the [routing module](https://github.com/cosmos/ibc/tree/5877197dc03e844542cb8628dd52674a37ca6ff9/spec/ics-026-routing-module). -- `SendPacket`: models user/application-defined calls to send a packet. As this specification does not have a specific application in mind, we abstract away from the packet data, and allow sending packets non-deterministically. -The packet commitment is written in the chain store, and the sent packet is logged, -which triggers the relayer to create a `PacketRecv` datagram. -- `AcknowledgePacket`: writes an acknowledgement for a received packet - in the chain store and on the packet log, which triggers the relayer to create a `PacketAck` datagram. - -### [`ICS02ClientHandlers.tla`](ICS02ClientHandlers.tla), [`ICS03ConnectionHandlers.tla`](ICS03ConnectionHandlers.tla), [`ICS04ChannelHandlers.tla`](ICS04ChannelHandlers.tla), [`ICS04PacketHandlers.tla`](ICS04PacketHandlers.tla) -These TLA+ modules contain definitions of -operators that handle client, connection handshake, channel handshake, and packet -datagrams, respectively. -These operators capture the logic of the handlers defined in [ICS02](https://github.com/cosmos/ibc/tree/5877197dc03e844542cb8628dd52674a37ca6ff9/spec/ics-002-client-semantics), [ICS03](https://github.com/cosmos/ibc/tree/5877197dc03e844542cb8628dd52674a37ca6ff9/spec/ics-003-connection-semantics), and -[ICS04](https://github.com/cosmos/ibc/tree/5877197dc03e844542cb8628dd52674a37ca6ff9/spec/ics-004-channel-and-packet-semantics). - - - - - -The module `Relayer.tla` contains the specification of the relayer algorithm. -The module `Chain.tla` captures the chain logic. -It extends the modules `ClientHandlers.tla`, -`ConnectionHandlers.tla`, `ChannelHandlers.tla`, and -`PacketHandlers.tla`, which contain definition of -operators that handle client, connection handshake, channel handshake, and packet -datagrams, respectively. -The module `RelayerDefinitions.tla` contains definition of operators that are used across all the -modules. - -## Properties and Invariants - -### System-level properties - -We specify three kinds of properties for the IBC core protocols in the module [IBCCore.tla](IBCCore.tla): - -- `IBCSafety`: Bad datagrams are not used to update the chain stores. - -- `IBCDelivery`: If `ChainA` sends a datagram to `ChainB`, then `ChainB` eventually receives the datagram - - - -### Packets - -[ICS04](https://github.com/cosmos/ibc/tree/5877197dc03e844542cb8628dd52674a37ca6ff9/spec/ics-004-channel-and-packet-semantics) specifies the following list of ["Desired -Properties"](https://github.com/cosmos/ibc/tree/5877197dc03e844542cb8628dd52674a37ca6ff9/spec/ics-004-channel-and-packet-semantics#desired-properties) - -#### [Efficiency](https://github.com/cosmos/ibc/tree/5877197dc03e844542cb8628dd52674a37ca6ff9/spec/ics-004-channel-and-packet-semantics#efficiency) - -Efficiency seems to be too vague to formalize. In particular the -formulation ignores relayers that are the active components in packet -transmission. It is not clear what a suitable way is to formalize it. - -#### [Exactly-once delivery](https://github.com/cosmos/ibc/tree/5877197dc03e844542cb8628dd52674a37ca6ff9/spec/ics-004-channel-and-packet-semantics#exactly-once-delivery) - -These properties are also vague as: - -* in the absence of a relayer no packets can be delivered -* ignores timeouts -* unspecific what "sent" means. We suggest it means that a packet commitment is written in the provable store (in our model `ChainStore`) rather than executing `SendPacket`. - -As a result we suggest that the property should be decomposed into to properties: - -* (at most once) For each packer `p`, if a chain performs `RecvPacket(p)` successfully (without abort), it will - not perform `RecvPacket(p)` successfully in the future. - - -* (typical case) If - * sender and receiver chain are valid, and - * there is a correct relayer, and - * communication is bounded in time, and - * the `timeoutHeights` and times are luckily chosen, and - * the receiver chain does not censor the packet - - then the packet will be delivered. - - -The second property ignores that timeouts can happen. - -If this is the confirmed intended behavior, these properties can be expressed -and verified -by a slight modification of the specification, in particular, the way in which -the packet receipts are stored in the chain store (in a set vs. in a sequence). - -#### [Ordering](https://github.com/cosmos/ibc/tree/5877197dc03e844542cb8628dd52674a37ca6ff9/spec/ics-004-channel-and-packet-semantics#ordering) - -- ordered channels: It is not clear what "if packet x is sent before packet y by a channel end on chain A" meant in a context where chain A performs invalid transitions: then a packet with sequence number *i* can be sent after *i+1*. If this happens, the IBC implementation may be broken (depends on the relayer). - -In the context of two valid chains, this property can be -expressed and verified by adding a history -variable on the receiving side, which is modified by transitions of the receiving chain. - -- no property defined for unordered. - -#### [Permissioning](https://github.com/cosmos/ibc/tree/5877197dc03e844542cb8628dd52674a37ca6ff9/spec/ics-004-channel-and-packet-semantics#permissioning) - -This property is about capabilities. We do not capture capabilities in the TLA+ specification. - - - -### Channel - -As there are no explicit properties regarding channels given in [ICS 04](https://github.com/cosmos/ibc/tree/5877197dc03e844542cb8628dd52674a37ca6ff9/spec/ics-004-channel-and-packet-semantics) in textual form, we have formalized that the channel handshake does not deviate from the channel lifecycle provided as a [figure](https://github.com/cosmos/ibc/tree/5877197dc03e844542cb8628dd52674a37ca6ff9/spec/ics-004-channel-and-packet-semantics/channel-state-machine.png). They are given in [IBCCore.tla](IBCCore.tla) under the names - -- `ChannelInitSafety` -- `ChannelTryOpenSafety` -- `ChannelOpenSafety` -- `ChannelCloseSafety` - -### Connection Handshake - -Similar to Channel handshake, we have formalized that the connection handshake does not deviate from the channel lifecycle provided as a [figure](https://github.com/cosmos/ibc/tree/5877197dc03e844542cb8628dd52674a37ca6ff9/spec/ics-003-connection-semantics/state.png). They are given in [IBCCore.tla](IBCCore.tla) under the names - -- `ConnectionInitSafety` -- `ConnectionTryOpenSafety` -- `ConnectionOpenSafety` - - -We formalize [these properties](https://github.com/cosmos/ibc/tree/5877197dc03e844542cb8628dd52674a37ca6ff9/spec/ics-003-connection-semantics#properties--invariants) as follows: -> Connection identifiers are first-come-first-serve: once a connection has been negotiated, a unique identifier pair exists between two chains. - -[ICS3-Proto-1-ConnectionUniqueness](https://github.com/informalsystems/ibc-rs/blob/master/docs/spec/connection-handshake/L1_2.md#guarantees) A module accepts (i.e., initializes on) a connection end at most once. - -> The connection handshake cannot be man-in-the-middled by another blockchain's IBC handler. - -The scenario is not clear, so we did not formalize it. - - - -## Using the Model - -### Constants - -The module `IBCCore.tla` is parameterized by the constants: - - `ClientDatagramsRelayer_i`, for `i in {1, 2}`, a Boolean flag defining if `Relayer_i` creates client datagrams, - - `ConnectionDatagramsRelayer_i`, for `i in {1, 2}`, a Boolean flag defining if `Relayer_i` creates connection datagrams, - - `ChannelDatagramsRelayer_i`, for `i in {1, 2}`, a Boolean flag defining if `Relayer_i` creates channel datagrams, - - `PacketDatagramsRelayer_i`, for `i in {1, 2}`, a Boolean flag defining if `Relayer_i` creates packet datagrams, - - `MaxHeight`, a natural number denoting the maximal height of the chains, - - `MaxVersion`, a natural number denoting the maximal connection / channel version supported, - - `MaxPacketSeq`, a natural number denoting the maximal packet sequence number, - - `ChannelOrdering`, a string indicating whether the channels are ordered or unordered - -#### Assigning values to the constants - -The Boolean flags, defined as constants in the module `IBCCore.tla`, allow us to run experiments in different settings. For example, if we set both `ClientDatagramsRelayer1` and `ClientDatagramsRelayer2` to `TRUE` in a TLC model, then the two relayers in the system concurrently create datagrams related to client creation and client update, and the model checker will check the temporal properties related to client datagrams. - -Observe that the setting where, for example, `ClientDatagramsRelayer1 = TRUE`, `ConnectionDatagramsRelayer2 = TRUE`, `ChannelDatagramsRelayer1 = TRUE`, `PacketDatagramsRelayer1 = TRUE`, and the remaining boolean flags are `FALSE`, is equivalent to having a single relayer. - -### Importing the specification into TLA+ toolbox - -To import the specification in the TLA+ toolbox and run TLC: - - add a new spec in TLA+ toolbox with the root-module file `IBCCore.tla` - - create a model - - assign a value to the constants (example values can be found in `IBCCore.cfg`) - - choose "Temporal formula" as the behavior spec, and use the formula `Spec` - - add the properties `IBCSafety` and `IBCDelivery` - - run TLC on the model - -#### Basic checks with TLC - -We ran TLC using the constants defined in `IBCCore.cfg` and verified the invariant `TypeOK` in 14min and the invariant `IBCInv` in 11min. -As TLC usually takes longer to check safety and liveness properties, we have not -conducted extensive experiments to check `IBCSafety` and `IBCDelivery` with TLC yet. - -#### Apalache - -The specification contains type annotations for the -model checker [Apalache](https://github.com/informalsystems/apalache). -The specification passes the type check using the type checker [Snowcat](https://apalache.informal.systems/docs/apalache/typechecker-snowcat.html) -integrated in Apalache. diff --git a/docs/spec/tla/packet-delay/Chain.tla b/docs/spec/tla/packet-delay/Chain.tla deleted file mode 100644 index c21ad200d0..0000000000 --- a/docs/spec/tla/packet-delay/Chain.tla +++ /dev/null @@ -1,159 +0,0 @@ -------------------------------- MODULE Chain ------------------------------- - -EXTENDS Integers, FiniteSets, Sequences, ICS04PacketHandlers, IBCPacketDelayDefinitions - -CONSTANTS - MaxHeight, \* maximal chain height - ChannelOrdering, \* indicate whether the channels are ordered or unordered - MaxPacketSeq, \* maximal packet sequence number - MaxDelay, \* maximal packet delay - ChainID \* a chain ID - -VARIABLES - chainStore, \* chain store, containing client heights and a channel end - incomingPacketDatagrams, \* sequence of incoming packet datagrams - appPacketSeq, \* packet sequence number from the application on the chain - packetLog, \* packet log - packetDatagramTimestamp \* history variable that tracks when packet datagrams were processed - -vars == <> -Heights == 1..MaxHeight \* set of possible heights of the chains in the system - -(*************************************************************************** - Packet update operators - ***************************************************************************) -\* Update the chain store and packet log with packet datagrams -(* @type: (Str, DATAGRAM, Seq(LOGENTRY)) => - [chainStore: CHAINSTORE, packetLog: Seq(LOGENTRY), datagramTimestamp: <> -> Int]; -*) -PacketUpdate(chainID, packetDatagram, log) == - - LET packet == packetDatagram.packet IN - \* get the new updated store, packet log - LET packetUpdate == - IF packetDatagram.type = "PacketRecv" - THEN HandlePacketRecv(chainID, chainStore, packetDatagram, MaxDelay, log, packetDatagramTimestamp) - ELSE IF packetDatagram.type = "PacketAck" - THEN HandlePacketAck(chainID, chainStore, packetDatagram, MaxDelay, log, packetDatagramTimestamp) - ELSE [chainStore |-> chainStore, - packetLog |-> log, - datagramTimestamp |-> packetDatagramTimestamp] - IN - - LET packetUpdateStore == packetUpdate.chainStore IN - - \* update height and timestamp - LET updatedStore == - IF packetUpdateStore.height + 1 \in Heights - THEN [packetUpdateStore EXCEPT - !.height = packetUpdateStore.height + 1, - !.timestamp = packetUpdateStore.timestamp + 1] - ELSE [packetUpdateStore EXCEPT - !.timestamp = packetUpdateStore.timestamp + 1] - IN - - [chainStore |-> updatedStore, - packetLog |-> packetUpdate.packetLog, - datagramTimestamp |-> packetUpdate.datagramTimestamp] - -(*************************************************************************** - Chain actions - ***************************************************************************) -\* Advance the height of the chain until MaxHeight is reached -AdvanceChain == - /\ chainStore.height + 1 \in Heights - /\ chainStore' = [chainStore EXCEPT - !.height = chainStore.height + 1, - !.timestamp = chainStore.timestamp + 1] - /\ UNCHANGED <> - -\* handle the incoming packet datagrams -HandlePacketDatagrams == - \* enabled if incomingPacketDatagrams is not empty - /\ incomingPacketDatagrams /= <<>> - /\ LET packetUpdate == PacketUpdate(ChainID, Head(incomingPacketDatagrams), packetLog) IN - /\ chainStore' = packetUpdate.chainStore - /\ packetLog' = packetUpdate.packetLog - /\ incomingPacketDatagrams' = Tail(incomingPacketDatagrams) - /\ packetDatagramTimestamp' = packetUpdate.datagramTimestamp - /\ UNCHANGED appPacketSeq - -\* Send a packet -SendPacket == - \* enabled if appPacketSeq is not bigger than MaxPacketSeq - /\ appPacketSeq <= MaxPacketSeq - \* Create packet - /\ LET packet == [ - sequence |-> appPacketSeq, - timeoutHeight |-> MaxHeight, - srcPortID |-> chainStore.channelEnd.portID, - srcChannelID |-> chainStore.channelEnd.channelID, - dstPortID |-> chainStore.channelEnd.counterpartyPortID, - dstChannelID |-> chainStore.channelEnd.counterpartyChannelID] IN - \* update chain store with packet committment - /\ chainStore' = WritePacketCommitment(chainStore, packet) - \* log sent packet - /\ packetLog' = Append(packetLog, - [type |-> "PacketSent", - srcChainID |-> ChainID, - sequence |-> packet.sequence, - timeoutHeight |-> packet.timeoutHeight] - ) - \* increase application packet sequence - /\ appPacketSeq' = appPacketSeq + 1 - /\ UNCHANGED <> - - - -\* Acknowledge a packet -AcknowledgePacket == - /\ chainStore.packetsToAcknowledge /= <<>> - \* write acknowledgements to chain store - /\ chainStore' = WriteAcknowledgement(chainStore, Head(chainStore.packetsToAcknowledge)) - \* log acknowledgement - /\ packetLog' = LogAcknowledgement(ChainID, chainStore, packetLog, Head(chainStore.packetsToAcknowledge)) - /\ UNCHANGED <> - -(*************************************************************************** - Specification - ***************************************************************************) -\* Initial state predicate -\* Initially -\* - the chain store is initialized to -\* InitChainStore(ChainID, ChannelOrdering, MaxDelay) -\* (defined in IBCPacketDelayDefinitions.tla) -\* - incomingPacketDatagrams is an empty sequence -\* - the appPacketSeq is set to 1 -Init == - /\ chainStore = InitChainStore(ChainID, Heights, ChannelOrdering, MaxDelay) - /\ incomingPacketDatagrams = <<>> - /\ appPacketSeq = 1 - -\* Next state action -\* The chain either -\* - advances its height -\* - receives datagrams and updates its state -\* - sends a packet -\* - acknowledges a packet -Next == - \/ AdvanceChain - \/ HandlePacketDatagrams - \/ SendPacket - \/ AcknowledgePacket - \/ UNCHANGED vars - -(*************************************************************************** - Invariants - ***************************************************************************) - -\* type invariant -TypeOK == - /\ chainStore \in ChainStores(Heights, ChannelOrdering, MaxPacketSeq) - /\ incomingPacketDatagrams \in Seq(Datagrams(Heights, MaxPacketSeq)) - /\ appPacketSeq \in Int - /\ packetLog \in Seq(PacketLogEntries(Heights, MaxPacketSeq)) - -============================================================================= -\* Modification History -\* Last modified Mon Apr 19 15:44:24 CEST 2021 by ilinastoilkovska -\* Created Thu Dec 10 13:52:13 CET 2020 by ilinastoilkovska diff --git a/docs/spec/tla/packet-delay/IBCPacketDelay.cfg b/docs/spec/tla/packet-delay/IBCPacketDelay.cfg deleted file mode 100644 index 6b6fab9bd3..0000000000 --- a/docs/spec/tla/packet-delay/IBCPacketDelay.cfg +++ /dev/null @@ -1,12 +0,0 @@ -CONSTANTS - MaxHeight = 3 - ChannelOrdering = "UNORDERED" - MaxPacketSeq = 1 - MaxDelay = 1 - -INIT Init -NEXT Next - -INVARIANTS - TypeOK - Inv \ No newline at end of file diff --git a/docs/spec/tla/packet-delay/IBCPacketDelay.tla b/docs/spec/tla/packet-delay/IBCPacketDelay.tla deleted file mode 100644 index c52903bb35..0000000000 --- a/docs/spec/tla/packet-delay/IBCPacketDelay.tla +++ /dev/null @@ -1,332 +0,0 @@ ---------------------------- MODULE IBCPacketDelay --------------------------- - -(*************************************************************************** - A TLA+ specification of the IBC packet transmission with packet delays. - Packet delays ensure that packet-related data should be accepted only - after some delay has passed since the corresponding header is installed. -***************************************************************************) - -EXTENDS Integers, FiniteSets, Sequences, IBCPacketDelayDefinitions - -CONSTANTS - \* @type: Int; - MaxHeight, \* maximal height of all the chains in the system - \* @type: Str; - ChannelOrdering, \* indicate whether the channels are ordered or unordered - \* @type: Int; - MaxPacketSeq, \* maximal packet sequence number - \* @type: Int; - MaxDelay \* maximal packet delay - -VARIABLES - \* @type: CHAINSTORE; - chainAstore, \* store of ChainA - \* @type: CHAINSTORE; - chainBstore, \* store of ChainB - \* @type: Seq(DATAGRAM); - packetDatagramsChainA, \* sequence of packet datagrams incoming to ChainA - \* @type: Seq(DATAGRAM); - packetDatagramsChainB, \* sequence of packet datagrams incoming to ChainB - \* @type: Str -> Seq(DATAGRAM); - outgoingPacketDatagrams, \* packet datagrams created by the relayer but not submitted - \* @type: Seq(LOGENTRY); - packetLog, \* packet log - \* @type: Int; - appPacketSeqChainA, \* packet sequence number from the application on ChainA - \* @type: Int; - appPacketSeqChainB, \* packet sequence number from the application on ChainB - \* @type: <> -> Int; - packetDatagramTimestamp \* history variable that tracks when packet datagrams were processed - -chainAvars == <> -chainBvars == <> -vars == <> - -Heights == 1..MaxHeight - -(*************************************************************************** - Instances of Chain - ***************************************************************************) - -\* We suppose there are two chains that communicate, ChainA and ChainB -\* ChainA -- Instance of Chain.tla -ChainA == INSTANCE Chain - WITH ChainID <- "chainA", - chainStore <- chainAstore, - incomingPacketDatagrams <- packetDatagramsChainA, - appPacketSeq <- appPacketSeqChainA - -\* ChainB -- Instance of Chain.tla -ChainB == INSTANCE Chain - WITH ChainID <- "chainB", - chainStore <- chainBstore, - incomingPacketDatagrams <- packetDatagramsChainB, - appPacketSeq <- appPacketSeqChainB - - (*************************************************************************** - Environment operators - ***************************************************************************) - -\* get chain store by ID -\* @type: (Str) => CHAINSTORE; -GetChainByID(chainID) == - IF chainID = "chainA" - THEN chainAstore - ELSE chainBstore - -\* update the client height of the client for the counterparty chain of chainID -UpdateClientHeights(chainID) == - /\ \/ /\ chainID = "chainA" - /\ chainAstore.counterpartyClientHeights[chainBstore.height] = 0 - /\ chainAstore' = [chainAstore EXCEPT - !.counterpartyClientHeights = [chainAstore.counterpartyClientHeights EXCEPT - ![chainBstore.height] = chainAstore.timestamp], - !.timestamp = chainAstore.timestamp + 1 - ] - /\ UNCHANGED chainBstore - \/ /\ chainID = "chainB" - /\ chainBstore.counterpartyClientHeights[chainAstore.height] = 0 - /\ chainBstore' = [chainBstore EXCEPT - !.counterpartyClientHeights = [chainBstore.counterpartyClientHeights EXCEPT - ![chainAstore.height] = chainBstore.timestamp], - !.timestamp = chainBstore.timestamp + 1 - ] - /\ UNCHANGED chainAstore - \/ UNCHANGED <> - /\ UNCHANGED <> - /\ UNCHANGED <> - - -\* Compute a packet datagram designated for dstChainID, based on the packetLogEntry -\* @type: (Str, Str, LOGENTRY) => DATAGRAM; -PacketDatagram(srcChainID, dstChainID, packetLogEntry) == - - LET srcChannelID == GetChannelID(srcChainID) IN \* "chanAtoB" (if srcChainID = "chainA") - LET dstChannelID == GetChannelID(dstChainID) IN \* "chanBtoA" (if dstChainID = "chainB") - - LET srcPortID == GetPortID(srcChainID) IN \* "portA" (if srcChainID = "chainA") - LET dstPortID == GetPortID(dstChainID) IN \* "portB" (if dstChainID = "chainB") - - LET srcHeight == GetLatestHeight(GetChainByID(srcChainID)) IN - - \* the source chain of the packet that is received by dstChainID is srcChainID - LET recvPacket == [ - sequence |-> packetLogEntry.sequence, - timeoutHeight |-> packetLogEntry.timeoutHeight, - srcChannelID |-> srcChannelID, - srcPortID |-> srcPortID, - dstChannelID |-> dstChannelID, - dstPortID |-> dstPortID - ] IN - - \* the source chain of the packet that is acknowledged by srcChainID is dstChainID - LET ackPacket == [ - sequence |-> packetLogEntry.sequence, - timeoutHeight |-> packetLogEntry.timeoutHeight, - srcChannelID |-> dstChannelID, - srcPortID |-> dstPortID, - dstChannelID |-> srcChannelID, - dstPortID |-> srcPortID - ] IN - - IF packetLogEntry.type = "PacketSent" - THEN [ - type |-> "PacketRecv", - packet |-> recvPacket, - proofHeight |-> srcHeight - ] - ELSE IF packetLogEntry.type = "WriteAck" - THEN [ - type |-> "PacketAck", - packet |-> ackPacket, - acknowledgement |-> packetLogEntry.acknowledgement, - proofHeight |-> srcHeight - ] - ELSE NullDatagram - -\* submit a packet datagram if a delay has passed -\* or install the appropriate height if it is missing -(* @type: (Str) => -[ - datagramsChainA: Seq(DATAGRAM), datagramsChainB: Seq(DATAGRAM), - outgoingDatagrams: Str -> Seq(DATAGRAM), - chainA: CHAINSTORE, chainB: CHAINSTORE -]; -*) -SubmitDatagramOrInstallClientHeight(chainID) == - LET packetDatagram == Head(outgoingPacketDatagrams[chainID]) IN - LET chain == GetChainByID(chainID) IN - - \* if the proof height of the packet datagram is installed on the chain, - \* then clientHeightTimestamp is the timestamp, denoting the time when this - \* height was installed on the chain; - \* otherwise it is 0, denoting that this height is not installed on the chain - LET clientHeightTimestamp == chain.counterpartyClientHeights[packetDatagram.proofHeight] IN - - \* packetDatagram.proof height is installed on chain - IF clientHeightTimestamp /= 0 - \* the delay has passed - THEN IF clientHeightTimestamp + MaxDelay < chain.timestamp - \* submit the datagram to the corresponding chain - THEN LET datagramsChainA == IF chainID = "chainA" - THEN Append(packetDatagramsChainA, packetDatagram) - ELSE packetDatagramsChainA IN - LET datagramsChainB == IF chainID = "chainB" - THEN Append(packetDatagramsChainB, packetDatagram) - ELSE packetDatagramsChainB IN - LET outgoingDatagrams == [outgoingPacketDatagrams EXCEPT - ![chainID] = Tail(outgoingPacketDatagrams[chainID])] IN - - [datagramsChainA |-> datagramsChainA, - datagramsChainB |-> datagramsChainB, - outgoingDatagrams |-> outgoingDatagrams, - chainA |-> chainAstore, - chainB |-> chainBstore] - \* the client height is installed, but the delay has not passed - \* do not submit and do not install any new heights - ELSE [datagramsChainA |-> packetDatagramsChainA, - datagramsChainB |-> packetDatagramsChainB, - outgoingDatagrams |-> outgoingPacketDatagrams, - chainA |-> chainAstore, - chainB |-> chainBstore] - \* packetDatagram.proof height is not installed on chain, install it - ELSE LET chainA == IF chainID = "chainA" - THEN [chainAstore EXCEPT - !.counterpartyClientHeights = - [chainAstore.counterpartyClientHeights EXCEPT - ![packetDatagram.proofHeight] = chainAstore.timestamp], - !.timestamp = chainAstore.timestamp + 1 - ] - ELSE chainAstore IN - LET chainB == IF chainID = "chainB" - THEN [chainBstore EXCEPT - !.counterpartyClientHeights = - [chainBstore.counterpartyClientHeights EXCEPT - ![packetDatagram.proofHeight] = chainBstore.timestamp], - !.timestamp = chainBstore.timestamp + 1 - ] - ELSE chainBstore IN - - [datagramsChainA |-> packetDatagramsChainA, - datagramsChainB |-> packetDatagramsChainB, - outgoingDatagrams |-> outgoingPacketDatagrams, - chainA |-> chainA, - chainB |-> chainB] - -(*************************************************************************** - Environment actions - ***************************************************************************) - \* update the client height of some chain - UpdateClients == - \E chainID \in ChainIDs : UpdateClientHeights(chainID) - -\* create datagrams depending on packet log -CreateDatagrams == - /\ packetLog /= <<>> - /\ LET packetLogEntry == Head(packetLog) IN - LET srcChainID == packetLogEntry.srcChainID IN - LET dstChainID == GetCounterpartyChainID(srcChainID) IN - LET packetDatagram == PacketDatagram(srcChainID, dstChainID, packetLogEntry) IN - /\ \/ /\ packetDatagram = NullDatagram - /\ UNCHANGED outgoingPacketDatagrams - \/ /\ packetDatagram /= NullDatagram - /\ outgoingPacketDatagrams' = - [chainID \in ChainIDs |-> - IF chainID = dstChainID - THEN Append(outgoingPacketDatagrams[chainID], packetDatagram) - ELSE outgoingPacketDatagrams[chainID] - ] - /\ packetLog' = Tail(packetLog) - /\ UNCHANGED <> - /\ UNCHANGED <> - /\ UNCHANGED <> - -\* submit datagrams if delay has passed -SubmitDatagramsWithDelay == - \E chainID \in ChainIDs : - /\ outgoingPacketDatagrams[chainID] /= <<>> - /\ LET submitted == SubmitDatagramOrInstallClientHeight(chainID) IN - /\ packetDatagramsChainA' = submitted.datagramsChainA - /\ packetDatagramsChainB' = submitted.datagramsChainB - /\ outgoingPacketDatagrams' = submitted.outgoingDatagrams - /\ chainAstore' = submitted.chainA - /\ chainBstore' = submitted.chainB - /\ UNCHANGED <> - -(*************************************************************************** - Component actions - ***************************************************************************) - -\* ChainAction: either chain takes a step, leaving the other -\* variables unchange -ChainAction == - \/ /\ ChainA!Next - /\ UNCHANGED chainBvars - /\ UNCHANGED outgoingPacketDatagrams - \/ /\ ChainB!Next - /\ UNCHANGED chainAvars - /\ UNCHANGED outgoingPacketDatagrams - -\* EnvironmentAction: either -\* - create packet datagrams if packet log is not empty, or -\* - update counterparty clients, or -\* - submit datagrams after their delay has passed -EnvironmentAction == - \/ CreateDatagrams - \/ UpdateClients - \/ SubmitDatagramsWithDelay - -(*************************************************************************** - Specification - ***************************************************************************) - -\* Initial state predicate -Init == - /\ ChainA!Init - /\ ChainB!Init - /\ outgoingPacketDatagrams = [chainID \in ChainIDs |-> <<>>] - /\ packetLog = <<>> - /\ packetDatagramTimestamp = [<> \in ChainIDs \X Heights |-> 0] - -\* Next state action -Next == - \/ ChainAction - \/ EnvironmentAction - \/ UNCHANGED vars - -Spec == Init /\ [][Next]_vars - -(*************************************************************************** - Invariants - ***************************************************************************) - -\* type invariant -TypeOK == - /\ ChainA!TypeOK - /\ ChainB!TypeOK - /\ outgoingPacketDatagrams \in [ChainIDs -> Seq(Datagrams(Heights, MaxPacketSeq))] - /\ packetDatagramTimestamp \in [ChainIDs \X Heights -> Int] - -\* each packet datagam is processed at time t (stored in packetDatagramTimestamp), -\* such that t >= ht + delay, where -\* ht is the time when the client height is installed -PacketDatagramsDelay == - \A chainID \in ChainIDs : - \A h \in Heights : - /\ GetChainByID(chainID).counterpartyClientHeights[h] /= 0 - /\ packetDatagramTimestamp[<>] /= 0 - => - packetDatagramTimestamp[<>] >= GetChainByID(chainID).counterpartyClientHeights[h] + MaxDelay - -\* a conjunction of all invariants -Inv == - /\ PacketDatagramsDelay - -============================================================================= -\* Modification History -\* Last modified Mon Apr 19 15:43:40 CEST 2021 by ilinastoilkovska -\* Created Thu Dec 10 13:44:21 CET 2020 by ilinastoilkovska diff --git a/docs/spec/tla/packet-delay/IBCPacketDelayDefinitions.tla b/docs/spec/tla/packet-delay/IBCPacketDelayDefinitions.tla deleted file mode 100644 index cad0aee7ea..0000000000 --- a/docs/spec/tla/packet-delay/IBCPacketDelayDefinitions.tla +++ /dev/null @@ -1,419 +0,0 @@ ---------------------- MODULE IBCPacketDelayDefinitions --------------------- - -EXTENDS Integers, FiniteSets, Sequences - -(************************ TYPE ALIASES FOR SNOWCAT *************************) -(* @typeAlias: CHAN = - [ - state: Str, - order: Str, - portID: Str, - channelID: Str, - counterpartyPortID: Str, - counterpartyChannelID: Str, - nextSendSeq: Int, - nextRcvSeq: Int, - nextAckSeq: Int - ]; -*) -(* @typeAlias: PACKET = - [ - sequence: Int, - timeoutHeight: Int, - srcPortID: Str, - srcChannelID: Str, - dstPortID: Str, - dstChannelID: Str - ]; -*) -(* @typeAlias: PACKETCOMM = - [ - portID: Str, - channelID: Str, - sequence: Int, - timeoutHeight: Int - ]; -*) -(* @typeAlias: PACKETREC = - [ - portID: Str, - channelID: Str, - sequence: Int - ]; -*) -(* @typeAlias: PACKETACK = - [ - portID: Str, - channelID: Str, - sequence: Int, - acknowledgement: Bool - ]; -*) -(* @typeAlias: CHAINSTORE = - [ - height: Int, - timestamp: Int, - counterpartyClientHeights: Int -> Int, - channelEnd: CHAN, - packetCommitments: Set(PACKETCOMM), - packetsToAcknowledge: Seq(PACKET), - packetReceipts: Set(PACKETREC), - packetAcknowledgements: Set(PACKETACK) - ]; -*) -(* @typeAlias: DATAGRAM = - [ - type: Str, - packet: PACKET, - proofHeight: Int, - acknowledgement: Bool - ]; -*) -(* @typeAlias: LOGENTRY = - [ - type: Str, - srcChainID: Str, - sequence: Int, - timeoutHeight: Int, - acknowledgement: Bool - ]; -*) - -(********************** Common operator definitions ***********************) -ChainIDs == {"chainA", "chainB"} -ChannelIDs == {"chanAtoB", "chanBtoA"} -PortIDs == {"portA", "portB"} -ChannelStates == {"OPEN", "CLOSED"} - -nullHeight == 0 -nullChannelID == "none" -nullPortID == "none" -nullEscrowAddress == "none" - -Max(S) == CHOOSE x \in S: \A y \in S: y <= x - -(******************************* ChannelEnds ******************************* - A set of channel end records. - A channel end record contains the following fields: - - - state -- a string - Stores the current state of this channel end. We assume that channel - handshake has successfully finished, that is, the state is either - "OPEN" or "CLOSED" - - - order -- a string - Stores whether the channel end is ordered or unordered. It has one - of the following values: "UNORDERED", "ORDERED". - - * ordered channels have three additional packet sequence fields: - nextSendSeq -- stores the sequence number of the next packet that - is going to be sent, - nextRcvSeq -- stores the sequence number of the next packet that - is going to be received, - nextAckSeq -- stores the sequence number of the next packet that - is going to be acknowledged. - - - portID -- a port identifier - Stores the port identifier of this channel end. - - - channelID -- a channel identifier - Stores the channel identifier of this channel end. - - - counterpartyPortID -- a port identifier - Stores the port identifier of the counterparty channel end. - - - counterpartyChannelID -- a channel identifier - Stores the channel identifier of the counterparty channel end. - - Note: we omit channel versions and connection hops. - ***************************************************************************) -\* @type: (Str, Int) => Set(CHAN); -ChannelEnds(channelOrdering, maxPacketSeq) == - IF channelOrdering = "UNORDERED" - THEN \* set of unordered channels - [ - state : ChannelStates, - order : {"UNORDERED"}, - portID : PortIDs \union {nullPortID}, - channelID : ChannelIDs \union {nullChannelID}, - counterpartyPortID : PortIDs \union {nullPortID}, - counterpartyChannelID : ChannelIDs \union {nullChannelID} - ] - ELSE \* set of ordered channels - [ - state : ChannelStates, - order : {"ORDERED"}, - nextSendSeq : 0..maxPacketSeq, - nextRcvSeq : 0..maxPacketSeq, - nextAckSeq : 0..maxPacketSeq, - portID : PortIDs \union {nullPortID}, - channelID : ChannelIDs \union {nullChannelID}, - counterpartyPortID : PortIDs \union {nullPortID}, - counterpartyChannelID : ChannelIDs \union {nullChannelID} - ] - - -(******* PacketCommitments, PacketReceipts, PacketAcknowledgements *********) -\* Set of packet commitments -\* @type: (Set(Int), Int) => Set(PACKETCOMM); -PacketCommitments(Heights, maxPacketSeq) == - [ - channelID : ChannelIDs, - portID : PortIDs, - sequence : 1..maxPacketSeq, - timeoutHeight : Heights - ] - -\* Set of packet receipts -\* @type: (Int) => Set(PACKETREC); -PacketReceipts(maxPacketSeq) == - [ - channelID : ChannelIDs, - portID : PortIDs, - sequence : 1..maxPacketSeq - ] - -\* Set of packet acknowledgements -\* @type: (Int) => Set(PACKETACK); -PacketAcknowledgements(maxPacketSeq) == - [ - channelID : ChannelIDs, - portID : PortIDs, - sequence : 1..maxPacketSeq, - acknowledgement : BOOLEAN - ] - -(********************************* Packets *********************************) -\* Set of packets -\* @type: (Set(Int), Int) => Set(PACKET); -Packets(Heights, maxPacketSeq) == - [ - sequence : 1..maxPacketSeq, - timeoutHeight : Heights, - srcPortID : PortIDs, - srcChannelID : ChannelIDs, - dstPortID : PortIDs, - dstChannelID : ChannelIDs - ] - -(******************************** ChainStores ****************************** - A set of chain store records. - A chain store record contains the following fields: - - - height : an integer between nullHeight and MaxHeight. - Stores the current height of the chain. - - - counterpartyClientHeights : a set of integers between 1 and MaxHeight - Stores the heights of the client for the counterparty chain. - - - connectionEnd : a connection end record - Stores data about the connection with the counterparty chain. - - - packetCommitments : a set of packet commitments - A packet commitment is added to this set when a chain sends a packet - to the counterparty. - - - packetReceipts : a set of packet receipts - A packet receipt is added to this set when a chain received a packet - from the counterparty chain. - - - packetsToAcknowledge : a sequence of packets - A packet is added to this sequence when a chain receives it and is used - later for the receiver chain to write an acknowledgement for the packet. - - - packetAcknowledgements : a set of packet acknowledgements - A packet acknowledgement is added to this set when a chain writes an - acknowledgement for a packet it received from the counterparty. - - A chain store is the combination of the provable and private stores. - ***************************************************************************) -\* @type: (Set(Int), Str, Int) => Set(CHAINSTORE); -ChainStores(Heights, channelOrdering, maxPacketSeq) == - [ - height : Heights, - timestamp : Int, - counterpartyClientHeights : [Heights -> Int], - channelEnd : ChannelEnds(channelOrdering, maxPacketSeq), - packetCommitments : SUBSET(PacketCommitments(Heights, maxPacketSeq)), - packetReceipts : SUBSET(PacketReceipts(maxPacketSeq)), - packetsToAcknowledge : Seq(Packets(Heights, maxPacketSeq)), - packetAcknowledgements : SUBSET(PacketAcknowledgements(maxPacketSeq)) - ] - -(******************************** Datagrams ********************************) -\* Set of datagrams (we consider only packet datagrams) -\* @type: (Set(Int), Int) => Set(DATAGRAM); -Datagrams(Heights, maxPacketSeq) == - [ - type : {"PacketRecv"}, - packet : Packets(Heights, maxPacketSeq), - proofHeight : Heights - ] \union [ - type : {"PacketAck"}, - packet : Packets(Heights, maxPacketSeq), - acknowledgement : BOOLEAN, - proofHeight : Heights - ] - -\* Null datagram -NullDatagram == - [type |-> "null"] - -(**************************** PacketLogEntries *****************************) -\* Set of packet log entries -\* @type: (Set(Int), Int) => Set(LOGENTRY); -PacketLogEntries(Heights, maxPacketSeq) == - [ - type : {"PacketSent"}, - srcChainID : ChainIDs, - sequence : 1..maxPacketSeq, - timeoutHeight : Heights - ] \union [ - type : {"PacketRecv"}, - srcChainID : ChainIDs, - sequence : 1..maxPacketSeq, - portID : PortIDs, - channelID : ChannelIDs, - timeoutHeight : Heights - ] \union [ - type : {"WriteAck"}, - srcChainID : ChainIDs, - sequence : 1..maxPacketSeq, - portID : PortIDs, - channelID : ChannelIDs, - timeoutHeight : Heights, - acknowledgement : BOOLEAN - ] - -\* Null packet log entry -NullPacketLogEntry == - [type |-> "null"] - - -(*************************************************************************** - Chain helper operators - ***************************************************************************) - -\* get the ID of chainID's counterparty chain -\* @type: (Str) => Str; -GetCounterpartyChainID(chainID) == - IF chainID = "chainA" THEN "chainB" ELSE "chainA" - -\* get the maximal height of the client for chainID's counterparty chain -\* @type: (CHAINSTORE) => Int; -GetMaxCounterpartyClientHeight(chain) == - IF DOMAIN chain.counterpartyClientHeights /= {} - THEN Max(DOMAIN chain.counterpartyClientHeights) - ELSE nullHeight - -\* get the channel ID of the channel end at chainID -\* @type: (Str) => Str; -GetChannelID(chainID) == - IF chainID = "chainA" - THEN "chanAtoB" - ELSE IF chainID = "chainB" - THEN "chanBtoA" - ELSE nullChannelID - -\* get the channel ID of the channel end at chainID's counterparty chain -\* @type: (Str) => Str; -GetCounterpartyChannelID(chainID) == - IF chainID = "chainA" - THEN "chanBtoA" - ELSE IF chainID = "chainB" - THEN "chanAtoB" - ELSE nullChannelID - -\* get the port ID at chainID -\* @type: (Str) => Str; -GetPortID(chainID) == - IF chainID = "chainA" - THEN "portA" - ELSE IF chainID = "chainB" - THEN "portB" - ELSE nullPortID - -\* get the port ID at chainID's counterparty chain -\* @type: (Str) => Str; -GetCounterpartyPortID(chainID) == - IF chainID = "chainA" - THEN "portB" - ELSE IF chainID = "chainB" - THEN "portA" - ELSE nullPortID - -\* get the latest height of chain -\* @type: (CHAINSTORE) => Int; -GetLatestHeight(chain) == - chain.height - -(*************************************************************************** - Initial values of a channel end, connection end, chain store - ***************************************************************************) -\* Initial value of an unordered channel end: -\* - state is "OPEN" (we assume channel handshake has successfully finished) -\* - order is "UNORDERED" -\* - portID, channelID, counterpartyPortID, counterpartyChannelID depend on ChainID -\* @type: (Str) => CHAN; -InitUnorderedChannelEnd(ChainID) == - [ - state |-> "OPEN", - order |-> "UNORDERED", - portID |-> GetPortID(ChainID), - channelID |-> GetChannelID(ChainID), - counterpartyPortID |-> GetCounterpartyPortID(ChainID), - counterpartyChannelID |-> GetCounterpartyChannelID(ChainID) - ] - -\* Initial value of an ordered channel end: -\* - state is "OPEN" (we assume channel handshake has successfully finished) -\* - order is "ORDERED" -\* - nextSendSeq, nextRcvSeq, nextAckSeq are set to 0 -\* - portID, channelID, counterpartyPortID, counterpartyChannelID depend on ChainID -\* @type: (Str) => CHAN; -InitOrderedChannelEnd(ChainID) == - [ - state |-> "OPEN", - order |-> "ORDERED", - nextSendSeq |-> 0, - nextRcvSeq |-> 0, - nextAckSeq |-> 0, - portID |-> GetPortID(ChainID), - channelID |-> GetChannelID(ChainID), - counterpartyPortID |-> GetCounterpartyPortID(ChainID), - counterpartyChannelID |-> GetCounterpartyChannelID(ChainID) - ] - -\* Initial value of a channel end, based on the channel ordering -\* @type: (Str, Str) => CHAN; -InitChannelEnd(ChainID, ChannelOrdering) == - IF ChannelOrdering = "ORDERED" - THEN InitOrderedChannelEnd(ChainID) - ELSE InitUnorderedChannelEnd(ChainID) - -\* Initial value of the chain store: -\* - height is initialized to 1 -\* - timestamp is initialized to 1 -\* - there are no installed client heights -\* - the channel end is initialized to InitChannelEnd -\* - the packet committments, receipts, acknowledgements, and packets -\* to acknowledge are empty -\* @type: (Str, Set(Int), Str, Int) => CHAINSTORE; -InitChainStore(ChainID, Heights, ChannelOrdering, MaxDelay) == - [ - height |-> 1, - timestamp |-> 1, - counterpartyClientHeights |-> [h \in Heights |-> 0], - channelEnd |-> InitChannelEnd(ChainID, ChannelOrdering), - - packetCommitments |-> {}, - packetReceipts |-> {}, - packetAcknowledgements |-> {}, - packetsToAcknowledge |-> <<>> - ] - -============================================================================= -\* Modification History -\* Last modified Mon Apr 19 15:46:15 CEST 2021 by ilinastoilkovska -\* Created Thu Dec 10 14:06:33 CET 2020 by ilinastoilkovska - \ No newline at end of file diff --git a/docs/spec/tla/packet-delay/ICS04PacketHandlers.tla b/docs/spec/tla/packet-delay/ICS04PacketHandlers.tla deleted file mode 100644 index 22a3b359be..0000000000 --- a/docs/spec/tla/packet-delay/ICS04PacketHandlers.tla +++ /dev/null @@ -1,387 +0,0 @@ ------------------------- MODULE ICS04PacketHandlers ------------------------ - -EXTENDS Integers, FiniteSets, Sequences, IBCPacketDelayDefinitions - -(*************************************************************************** - Packet datagram handlers - ***************************************************************************) - -\* Handle "PacketRecv" datagrams -(* @type: (Str, CHAINSTORE, DATAGRAM, Int, Seq(LOGENTRY), <> -> Int) => - [chainStore: CHAINSTORE, packetLog: Seq(LOGENTRY), datagramTimestamp: <> -> Int]; -*) -HandlePacketRecv(chainID, chain, packetDatagram, delay, log, datagramTimestamp) == - \* get chainID's channel end - LET channelEnd == chain.channelEnd IN - \* get packet - LET packet == packetDatagram.packet IN - - \* if the proof height of the packet datagram is installed on the chain, - \* then clientHeightTimestamp is the timestamp, denoting the time when this - \* height was installed on the chain; - \* otherwise it is 0, denoting that this height is not installed on the chain - LET clientHeightTimestamp == chain.counterpartyClientHeights[packetDatagram.proofHeight] IN - - IF \* if the channel end is open for packet transmission - /\ channelEnd.state = "OPEN" - \* if the packet has not passed the timeout height - /\ \/ packet.timeoutHeight = 0 - \/ chain.height < packet.timeoutHeight - \* if the "PacketRecv" datagram has valid port and channel IDs - /\ packet.srcPortID = channelEnd.counterpartyPortID - /\ packet.srcChannelID = channelEnd.counterpartyChannelID - /\ packet.dstPortID = channelEnd.portID - /\ packet.dstChannelID = channelEnd.channelID - \* if "PacketRecv" datagram can be verified (i.e., proofHeight is installed) - /\ clientHeightTimestamp /= 0 - \* the "PacketRecv" datagram was received after packet delay - /\ clientHeightTimestamp + delay < chain.timestamp - THEN \* construct log entry for packet log - LET logEntry == [ - type |-> "PacketRecv", - srcChainID |-> chainID, - sequence |-> packet.sequence, - portID |-> packet.dstPortID, - channelID |-> packet.dstChannelID, - timeoutHeight |-> packet.timeoutHeight - ] IN - - \* if the channel is unordered and the packet has not been received - IF /\ channelEnd.order = "UNORDERED" - /\ [ - portID |-> packet.dstPortID, - channelID |-> packet.dstChannelID, - sequence |-> packet.sequence - ] \notin chain.packetReceipts - THEN LET newChainStore == [chain EXCEPT - \* record that the packet has been received - !.packetReceipts = - chain.packetReceipts - \union - {[ - channelID |-> packet.dstChannelID, - portID |-> packet.dstPortID, - sequence |-> packet.sequence - ]}, - \* add packet to the set of packets for which an acknowledgement should be written - !.packetsToAcknowledge = Append(chain.packetsToAcknowledge, packet)] IN - \* record the timestamp in the history variable - LET newDatagramTimestamp == [datagramTimestamp EXCEPT - ![<>] = chain.timestamp - ] IN - - [ - chainStore |-> newChainStore, - packetLog |-> Append(log, logEntry), - datagramTimestamp |-> newDatagramTimestamp - ] - - ELSE \* if the channel is ordered and the packet sequence is nextRcvSeq - IF /\ channelEnd.order = "ORDERED" - /\ packet.sequence = channelEnd.nextRcvSeq - THEN LET newChainStore == [chain EXCEPT - \* increase the nextRcvSeq - !.channelEnd.nextRcvSeq = - channelEnd.nextRcvSeq + 1, - \* add packet to the set of packets for which an acknowledgement should be written - !.packetsToAcknowledge = Append(chain.packetsToAcknowledge, packet)] IN - \* record the timestamp in the history variable - LET newDatagramTimestamp == [datagramTimestamp EXCEPT - ![<>] = chain.timestamp - ] IN - - [ - chainStore |-> newChainStore, - packetLog |-> Append(log, logEntry), - datagramTimestamp |-> newDatagramTimestamp - ] - - - \* otherwise, do not update the chain store and the log - ELSE [chainStore |-> chain, packetLog |-> log, datagramTimestamp |-> datagramTimestamp] - ELSE [chainStore |-> chain, packetLog |-> log, datagramTimestamp |-> datagramTimestamp] - - -\* Handle "PacketAck" datagrams -(* @type: (Str, CHAINSTORE, DATAGRAM, Int, Seq(LOGENTRY), <> -> Int) => - [chainStore: CHAINSTORE, packetLog: Seq(LOGENTRY), datagramTimestamp: <> -> Int]; -*) -HandlePacketAck(chainID, chain, packetDatagram, delay, log, datagramTimestamp) == - \* get chainID's channel end - LET channelEnd == chain.channelEnd IN - \* get packet - LET packet == packetDatagram.packet IN - \* get packet committment that should be in chain store - LET packetCommitment == [portID |-> packet.srcPortID, - channelID |-> packet.srcChannelID, - sequence |-> packet.sequence, - timeoutHeight |-> packet.timeoutHeight] IN - - \* if the proof height of the packet datagram is installed on the chain, - \* then clientHeightTimestamp is the timestamp, denoting the time when this - \* height was installed on the chain; - \* otherwise it is 0, denoting that this height is not installed on the chain - LET clientHeightTimestamp == chain.counterpartyClientHeights[packetDatagram.proofHeight] IN - - IF \* if the channel end is open for packet transmission - /\ channelEnd.state = "OPEN" - \* if the packet committment exists in the chain store - /\ packetCommitment \in chain.packetCommitments - \* if the "PacketRecv" datagram has valid port and channel IDs - /\ packet.srcPortID = channelEnd.portID - /\ packet.srcChannelID = channelEnd.channelID - /\ packet.dstPortID = channelEnd.counterpartyPortID - /\ packet.dstChannelID = channelEnd.counterpartyChannelID - \* if "PacketAck" datagram can be verified (i.e., proofHeight is installed) - /\ clientHeightTimestamp /= 0 - \* the "PacketAck" datagram was received after packet delay - /\ clientHeightTimestamp + delay < chain.timestamp - THEN \* if the channel is ordered and the packet sequence is nextAckSeq - LET newChainStore == - IF /\ channelEnd.order = "ORDERED" - /\ packet.sequence = channelEnd.nextAckSeq - THEN \* increase the nextAckSeq and remove packet commitment - [chain EXCEPT - !.channelEnd.nextAckSeq = - channelEnd.nextAckSeq + 1, - !.packetCommitments = chain.packetCommitments \ {packetCommitment}] - \* if the channel is unordered, remove packet commitment - ELSE IF channelEnd.order = "UNORDERED" - THEN [chain EXCEPT - !.packetCommitments = chain.packetCommitments \ {packetCommitment}] - \* otherwise, do not update the chain store - ELSE chain IN - - \* record the timestamp in the history variable - LET newDatagramTimestamp == [datagramTimestamp EXCEPT - ![<>] = chain.timestamp - ] IN - - [ - chainStore |-> newChainStore, - packetLog |-> log, - datagramTimestamp |-> newDatagramTimestamp - ] - - \* otherwise, do not update the chain store and the log - ELSE [chainStore |-> chain, packetLog |-> log, datagramTimestamp |-> datagramTimestamp] - - -\* write packet committments to chain store -\* @type: (CHAINSTORE, PACKET) => CHAINSTORE; -WritePacketCommitment(chain, packet) == - \* get channel end - LET channelEnd == chain.channelEnd IN - \* get latest counterparty client height - LET latestClientHeight == GetMaxCounterpartyClientHeight(chain) IN - - IF \* channel end is neither null nor closed - /\ channelEnd.state \notin {"UNINIT", "CLOSED"} - \* if the packet has valid port and channel IDs - /\ packet.srcPortID = channelEnd.portID - /\ packet.srcChannelID = channelEnd.channelID - /\ packet.dstPortID = channelEnd.counterpartyPortID - /\ packet.dstChannelID = channelEnd.counterpartyChannelID - \* timeout height has not passed - /\ \/ packet.timeoutHeight = 0 - \/ latestClientHeight < packet.timeoutHeight - THEN IF \* if the channel is ordered, check if packetSeq is nextSendSeq, - \* add a packet committment in the chain store, and increase nextSendSeq - /\ channelEnd.order = "ORDERED" - /\ packet.sequence = channelEnd.nextSendSeq - THEN [chain EXCEPT - !.packetCommitments = - chain.packetCommitments \union {[portID |-> packet.srcPortID, - channelID |-> packet.srcChannelID, - sequence |-> packet.sequence, - timeoutHeight |-> packet.timeoutHeight]}, - !.channelEnd = - [channelEnd EXCEPT !.nextSendSeq = channelEnd.nextSendSeq + 1], - !.timestamp = - chain.timestamp + 1 - ] - \* otherwise, do not update the chain store - ELSE chain - ELSE IF \* if the channel is unordered, - \* add a packet committment in the chain store - /\ channelEnd.order = "UNORDERED" - THEN [chain EXCEPT - !.packetCommitments = - chain.packetCommitments \union {[portID |-> packet.srcPortID, - channelID |-> packet.srcChannelID, - sequence |-> packet.sequence, - timeoutHeight |-> packet.timeoutHeight]}, - !.timestamp = - chain.timestamp + 1 - ] - \* otherwise, do not update the chain store - ELSE chain - -\* write acknowledgements to chain store -\* @type: (CHAINSTORE, PACKET) => CHAINSTORE; -WriteAcknowledgement(chain, packet) == - \* create a packet acknowledgement for this packet - LET packetAcknowledgement == [ - portID |-> packet.dstPortID, - channelID |-> packet.dstChannelID, - sequence |-> packet.sequence, - acknowledgement |-> TRUE - ] IN - - \* if the acknowledgement for the packet has not been written - IF packetAcknowledgement \notin chain.packetAcknowledgements - THEN \* write the acknowledgement to the chain store and remove - \* the packet from the set of packets to acknowledge - [chain EXCEPT !.packetAcknowledgements = - chain.packetAcknowledgements - \union - {packetAcknowledgement}, - !.packetsToAcknowledge = - Tail(chain.packetsToAcknowledge), - !.timestamp = - chain.timestamp + 1] - - \* remove the packet from the sequence of packets to acknowledge - ELSE [chain EXCEPT !.packetsToAcknowledge = - Tail(chain.packetsToAcknowledge), - !.timestamp = - chain.timestamp + 1] - -\* log acknowledgements to packet Log -\* @type: (Str, CHAINSTORE, Seq(LOGENTRY), PACKET) => Seq(LOGENTRY); -LogAcknowledgement(chainID, chain, log, packet) == - \* create a packet acknowledgement for this packet - LET packetAcknowledgement == [ - portID |-> packet.dstPortID, - channelID |-> packet.dstChannelID, - sequence |-> packet.sequence, - acknowledgement |-> TRUE - ] IN - - \* if the acknowledgement for the packet has not been written - IF packetAcknowledgement \notin chain.packetAcknowledgements - THEN \* append a "WriteAck" log entry to the log - LET packetLogEntry == - [type |-> "WriteAck", - srcChainID |-> chainID, - sequence |-> packet.sequence, - portID |-> packet.dstPortID, - channelID |-> packet.dstChannelID, - timeoutHeight |-> packet.timeoutHeight, - acknowledgement |-> TRUE] IN - Append(log, packetLogEntry) - \* do not add anything to the log - ELSE log - - -\* check if a packet timed out -\* @type: (CHAINSTORE, CHAINSTORE, PACKET, Int) => CHAINSTORE; -TimeoutPacket(chain, counterpartyChain, packet, proofHeight) == - \* get channel end - LET channelEnd == chain.channelEnd IN - \* get counterparty channel end - LET counterpartyChannelEnd == counterpartyChain.channelEnd IN - - \* get packet committment that should be in chain store - LET packetCommitment == [portID |-> packet.srcPortID, - channelID |-> packet.srcChannelID, - sequence |-> packet.sequence, - timeoutHeight |-> packet.timeoutHeight] IN - \* get packet receipt that should be absent in counterparty chain store - LET packetReceipt == [portID |-> packet.dstPortID, - channelID |-> packet.dstChannelID, - sequence |-> packet.sequence] IN - - \* if channel end is open - IF /\ channelEnd.state = "OPEN" - \* srcChannelID and srcPortID match channel and port IDs - /\ packet.srcPortID = channelEnd.portID - /\ packet.srcChannelID = channelEnd.channelID - \* dstChannelID and dstPortID match counterparty channel and port IDs - /\ packet.dstPortID = channelEnd.counterpartyPortID - /\ packet.dstChannelID = channelEnd.counterpartyChannelID - \* packet has timed out - /\ packet.timeoutHeight > 0 - /\ proofHeight >= packet.timeoutHeight - \* chain has sent the packet - /\ packetCommitment \in chain.packetCommitments - \* counterparty chain has not received the packet - /\ \/ /\ channelEnd.order = "ORDERED" - /\ counterpartyChannelEnd.nextRcvSeq <= packet.sequence - \/ /\ channelEnd.order = "UNORDERED" - /\ packetReceipt \notin counterpartyChain.packetReceipts - \* counterparty channel end has dstPortID and dstChannelID - /\ counterpartyChannelEnd.portID = packet.dstPortID - /\ counterpartyChannelEnd.channelID = packet.dstChannelID - \* close ordered channel and remove packet commitment - THEN LET updatedChannelEnd == [channelEnd EXCEPT - !.state = IF channelEnd.order = "ORDERED" - THEN "CLOSED" - ELSE channelEnd.state] IN - LET updatedChainStore == [chain EXCEPT - !.channelEnd = updatedChannelEnd, - !.packetCommitments = - chain.packetCommitments \ {packetCommitment}] IN - - updatedChainStore - - \* otherwise, do not update the chain store - ELSE chain - -\* check if a packet timed out on close -\* @type: (CHAINSTORE, CHAINSTORE, PACKET, Int) => CHAINSTORE; -TimeoutOnClose(chain, counterpartyChain, packet, proofHeight) == - \* get channel end - LET channelEnd == chain.channelEnd IN - \* get counterparty channel end - LET counterpartyChannelEnd == counterpartyChain.channelEnd IN - - \* get packet committment that should be in chain store - LET packetCommitment == [portID |-> packet.srcPortID, - channelID |-> packet.srcChannelID, - sequence |-> packet.sequence, - timeoutHeight |-> packet.timeoutHeight] IN - \* get packet receipt that should be absent in counterparty chain store - LET packetReceipt == [portID |-> packet.dstPortID, - channelID |-> packet.dstChannelID, - sequence |-> packet.sequence] IN - - - \* if srcChannelID and srcPortID match channel and port IDs - IF /\ packet.srcPortID = channelEnd.portID - /\ packet.srcChannelID = channelEnd.channelID - \* if dstChannelID and dstPortID match counterparty channel and port IDs - /\ packet.dstPort = channelEnd.counterpartyPortID - /\ packet.dstChannelID = channelEnd.counterpartyChannelID - \* chain has sent the packet - /\ packetCommitment \in chain.packetCommitments - \* counterparty channel end is closed and its fields are as expected - /\ counterpartyChannelEnd.state = "CLOSED" - /\ counterpartyChannelEnd.portID = packet.dstPortID - /\ counterpartyChannelEnd.channelID = packet.dstChannelID - /\ counterpartyChannelEnd.counterpartyPortID = packet.srcPortID - /\ counterpartyChannelEnd.counterpartyChannelID = packet.srcChannelID - \* counterparty chain has not received the packet - /\ \/ /\ channelEnd.order = "ORDERED" - /\ counterpartyChannelEnd.nextRcvSeq <= packet.sequence - \/ /\ channelEnd.order = "UNORDERED" - /\ packetReceipt \notin counterpartyChain.packetReceipts - \* close ordered channel and remove packet commitment - THEN LET updatedChannelEnd == [channelEnd EXCEPT - !.state = IF channelEnd.order = "ORDERED" - THEN "CLOSED" - ELSE channelEnd.state] IN - LET updatedChainStore == [chain EXCEPT - !.channelEnd = updatedChannelEnd, - !.packetCommitments = - chain.packetCommitments \ {packetCommitment}] IN - - updatedChainStore - - \* otherwise, do not update the chain store - ELSE chain - -============================================================================= -\* Modification History -\* Last modified Mon Apr 19 15:46:42 CEST 2021 by ilinastoilkovska -\* Created Thu Dec 10 15:12:41 CET 2020 by ilinastoilkovska diff --git a/docs/spec/tla/packet-delay/MC_IBCPacketDelay.tla b/docs/spec/tla/packet-delay/MC_IBCPacketDelay.tla deleted file mode 100644 index 1411f647c7..0000000000 --- a/docs/spec/tla/packet-delay/MC_IBCPacketDelay.tla +++ /dev/null @@ -1,30 +0,0 @@ --------------------------- MODULE MC_IBCPacketDelay ------------------------- - -MaxHeight == 3 -ChannelOrdering == "UNORDERED" -MaxPacketSeq == 1 -MaxDelay == 1 - -VARIABLES - \* @type: CHAINSTORE; - chainAstore, \* store of ChainA - \* @type: CHAINSTORE; - chainBstore, \* store of ChainB - \* @type: Seq(DATAGRAM); - packetDatagramsChainA, \* sequence of packet datagrams incoming to ChainA - \* @type: Seq(DATAGRAM); - packetDatagramsChainB, \* sequence of packet datagrams incoming to ChainB - \* @type: Str -> Seq(DATAGRAM); - outgoingPacketDatagrams, \* packet datagrams created by the relayer but not submitted - \* @type: Seq(LOGENTRY); - packetLog, \* packet log - \* @type: Int; - appPacketSeqChainA, \* packet sequence number from the application on ChainA - \* @type: Int; - appPacketSeqChainB, \* packet sequence number from the application on ChainB - \* @type: <> -> Int; - packetDatagramTimestamp \* history variable that tracks when packet datagrams were processed - -INSTANCE IBCPacketDelay - -============================================================================= diff --git a/docs/spec/tla/packet-delay/README.md b/docs/spec/tla/packet-delay/README.md deleted file mode 100644 index b01cdc44c0..0000000000 --- a/docs/spec/tla/packet-delay/README.md +++ /dev/null @@ -1,121 +0,0 @@ -# TLA+ Specification of IBC Packet Transmission with Packet Delay (deprecated) - -This document describes the TLA+ specification of an IBC packet transmission with -packet delays. -IBC packet transmission with packet delays ensures that -packet-related data should be accepted only after some delay has passed since the corresponding header is installed. -This allows a correct relayer to intervene if the header is from a fork and shutdown the IBC handler, preventing damage at the application level. - -This TLA+ specification was used during the [design process](https://github.com/cosmos/cosmos-sdk/pull/7884) of the IBC connection-specified delay, where packet delay was a time duration. -Later, this design was augmented by adding a second delay parameter, in -terms of number of blocks; called [hybrid packet delay](https://github.com/cosmos/ibc/issues/539). - -## The Model of the Protocol - -We model a system where packet datagrams are both **submitted** by a -relayer and **handled** by a chain after a delay period has passed. -The system contains the following modules: -- [IBCPacketDelay.tla](IBCPacketDelay.tla), the main module, which -instantiates two chains and models the behavior of a correct relayer -as the environment where the two chains operate; -- [Chain.tla](Chain.tla), which models the behavior of a chain; -- [IBCPacketDelayDefinitions.tla](IBCPacketDelayDefinitions.tla), which contains definitions of operators that are shared between the - different modules; -- [ICS04PacketHandlers.tla](ICS04PacketHandlers.tla), which contains definitions of operators that specify packet transmission and packet datagram handling. - -### Timestamps - -To be able to enforce packet datagram submission and handling after a given delay, -we introduce a `timestamp` field in the chain store. -This `timestamp` is initially 1, and is incremented when a chain takes a step, that is, when it advances its height, or when it processes datagrams. - -Further, we need to keep track of the time when a counterparty client height -is installed on a chain. -That is, instead of keeping track of a set of counterparty client heights, in the -chain store, we store for each client height -the timestamp at which it was installed. -A counterparty client height whose timestamp is 0 has -not yet been installed on the chain. - - -### Relayer - -In this specification, the relayer is a part of the environment in which the two chains operate. -We define three actions that the environment (i.e., the relayer) can take: -- `UpdateClients`, which updates the counterparty client -heights of some chain. This action abstracts the -transmission of client datagrams. -- `CreateDatagrams`, which creates datagrams depending -on the packet log. This action scans the packet log and -adds the created packet datagram to the outgoing packet -datagram queue of the appropriate chain. -- `SubmitDatagramsWithDelay`, which submits datagrams if -delay has passed. This action scans the outgoing packet datagram queue -of a given chain, and -checks if the `proofHeight` of the datagram is a -client height that is installed on the chain. -The following cases are possible: - - if `proofHeight` is installed, then check if a `MaxDelay` period - has passed between the timestamp when the client height was - installed and the current `timestamp`, stored in the chain store. If - this is the case -- submit the datagram to the incoming packet - datagram queue of the chain; otherwise -- do nothing. - - if `proofHeight` is not installed, then install the it. - -### Packet handlers - -On the packet handling side, the chain also checks if the incoming -`PacketRecv` or `PacketAck` datagram has a valid `proofHeight` field. -This means that the `proofHeight` of the datagram should be installed on the -chain, and there should be `MaxDelay` period between the timestamp when the `proofHeight` was -installed and the current `timestamp` of the chain. - -### History variable - -We define a history variable, called `packetDatagramTimestamp`, where we store -for each `chainID` and each `proofHeight`, the timestamp of the chain `chainID` when a datagram with this `proofHeight` was processed. -We use this history variable in the invariant `PacketDatagramsDelay`, -described below. - - -## Invariants - -The module [IBCPacketDelay.tla](IBCPacketDelay.tla) defines the following invariants: -- `TypeOK`, the type invariant, -- `PacketDatagramsDelay`, which ensures that each packet -datagram is processed after a delay period. - -## Using the Model - -### Constants - -The module `IBCPacketDelay.tla` is parameterized by the constants: - - `MaxHeight`, a natural number denoting the maximal height of the chains, - - `ChannelOrdering`, a string denoting whether the channels are ordered or unordered, - - `MaxPacketSeq`, a natural number denoting the maximal packet sequence number - - `MaxDelay`, a natural number denoting the maximal packet delay - -### Importing the specification into TLA+ toolbox - -To import the specification in the TLA+ toolbox and run TLC: - - add a new spec in TLA+ toolbox with the root-module file `IBCPacketDelay.tla` - - create a model - - assign a value to the constants (example values can be found in `IBCPacketDelay.cfg`) - - choose "Temporal formula" as the behavior spec, and use the formula `Spec` - - choose invariants/properties that should be checked - - run TLC on the model - -#### Basic checks with TLC - -We ran TLC on `IBCPacketDelay.tla` using the constants defined -in `IBCPacketDelay.cfg`. -We were able to check the invariants described above within seconds. - -#### Apalache - -The specification contains type annotations for the -model checker [Apalache](https://github.com/informalsystems/apalache). -The specification passes the type check using the type checker [Snowcat](https://apalache.informal.systems/docs/apalache/typechecker-snowcat.html) -integrated in Apalache. - - diff --git a/e2e/e2e/__init__.py b/e2e/e2e/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/e2e/e2e/channel.py b/e2e/e2e/channel.py deleted file mode 100644 index 90921aff7b..0000000000 --- a/e2e/e2e/channel.py +++ /dev/null @@ -1,638 +0,0 @@ -from typing import Optional, Tuple -import toml - -from .cmd import * -from .common import * - -import e2e.relayer as relayer - -@dataclass -class TxChanOpenInitRes: - channel_id: ChannelId - connection_id: ConnectionId - counterparty_channel_id: Optional[ChannelId] - counterparty_port_id: PortId - height: BlockHeight - port_id: PortId - - -@cmd("tx raw chan-open-init") -@dataclass -class TxChanOpenInit(Cmd[TxChanOpenInitRes]): - dst_chain_id: ChainId - src_chain_id: ChainId - connection_id: ConnectionId - dst_port_id: PortId - src_port_id: PortId - ordering: Optional[Ordering] = None - - def args(self) -> List[str]: - args = [self.dst_chain_id, self.src_chain_id, - self.connection_id, - self.dst_port_id, self.src_port_id] - - if self.ordering is not None: - args.extend(['--ordering', str(self.ordering)]) - - return args - - def process(self, result: Any) -> TxChanOpenInitRes: - return from_dict(TxChanOpenInitRes, result['OpenInitChannel']) - - -# ----------------------------------------------------------------------------- - - -@dataclass -class TxChanOpenTryRes: - channel_id: ChannelId - connection_id: ConnectionId - counterparty_channel_id: ChannelId - counterparty_port_id: ChannelId - height: BlockHeight - port_id: PortId - - -@cmd("tx raw chan-open-try") -@dataclass -class TxChanOpenTry(Cmd[TxChanOpenTryRes]): - dst_chain_id: ChainId - src_chain_id: ChainId - connection_id: ConnectionId - dst_port_id: PortId - src_port_id: PortId - src_channel_id: ChannelId - ordering: Optional[Ordering] = None - - def args(self) -> List[str]: - args = [self.dst_chain_id, self.src_chain_id, - self.connection_id, - self.dst_port_id, self.src_port_id, - "-s", self.src_channel_id] - - if self.ordering is not None: - args.extend(['--ordering', str(self.ordering)]) - - return args - - def process(self, result: Any) -> TxChanOpenTryRes: - return from_dict(TxChanOpenTryRes, result['OpenTryChannel']) - - -# ----------------------------------------------------------------------------- - - -@dataclass -class TxChanOpenAckRes: - channel_id: ChannelId - connection_id: ConnectionId - counterparty_channel_id: ChannelId - counterparty_port_id: ChannelId - height: BlockHeight - port_id: PortId - - -@cmd("tx raw chan-open-ack") -@dataclass -class TxChanOpenAck(Cmd[TxChanOpenAckRes]): - dst_chain_id: ChainId - src_chain_id: ChainId - connection_id: ConnectionId - dst_port_id: PortId - src_port_id: PortId - dst_channel_id: ChannelId - src_channel_id: ChannelId - - def args(self) -> List[str]: - args = [self.dst_chain_id, self.src_chain_id, - self.connection_id, - self.dst_port_id, self.src_port_id, - "-d", self.dst_channel_id, - "-s", self.src_channel_id] - - return args - - def process(self, result: Any) -> TxChanOpenAckRes: - return from_dict(TxChanOpenAckRes, result['OpenAckChannel']) - - -# ----------------------------------------------------------------------------- - - -@dataclass -class TxChanOpenConfirmRes: - channel_id: ChannelId - connection_id: ConnectionId - counterparty_channel_id: ChannelId - counterparty_port_id: ChannelId - height: BlockHeight - port_id: PortId - - -@cmd("tx raw chan-open-confirm") -@dataclass -class TxChanOpenConfirm(Cmd[TxChanOpenConfirmRes]): - dst_chain_id: ChainId - src_chain_id: ChainId - connection_id: ConnectionId - dst_port_id: PortId - src_port_id: PortId - dst_channel_id: ChannelId - src_channel_id: ChannelId - - def args(self) -> List[str]: - args = [self.dst_chain_id, self.src_chain_id, - self.connection_id, - self.dst_port_id, self.src_port_id, - "-d", self.dst_channel_id, - "-s", self.src_channel_id] - - return args - - def process(self, result: Any) -> TxChanOpenConfirmRes: - return from_dict(TxChanOpenConfirmRes, result['OpenConfirmChannel']) - -# ----------------------------------------------------------------------------- - - -@dataclass -class TxChanCloseInitRes: - channel_id: ChannelId - connection_id: ConnectionId - counterparty_channel_id: ChannelId - counterparty_port_id: ChannelId - height: BlockHeight - port_id: PortId - - -@cmd("tx raw chan-close-init") -@dataclass -class TxChanCloseInit(Cmd[TxChanCloseInitRes]): - dst_chain_id: ChainId - src_chain_id: ChainId - dst_conn_id: ConnectionId - dst_port_id: PortId - src_port_id: PortId - dst_chan_id: ChannelId - src_chan_id: ChannelId - - def args(self) -> List[str]: - args = [self.dst_chain_id, self.src_chain_id, - self.dst_conn_id, - self.dst_port_id, self.src_port_id, - "-d", self.dst_chan_id, - "-s", self.src_chan_id] - - return args - - def process(self, result: Any) -> TxChanCloseInitRes: - print(result) - return from_dict(TxChanCloseConfirmRes, result['CloseInitChannel']) - -# ----------------------------------------------------------------------------- - - -@dataclass -class TxChanCloseConfirmRes: - channel_id: ChannelId - connection_id: ConnectionId - counterparty_channel_id: ChannelId - counterparty_port_id: ChannelId - height: BlockHeight - port_id: PortId - - -@cmd("tx raw chan-close-confirm") -@dataclass -class TxChanCloseConfirm(Cmd[TxChanCloseConfirmRes]): - dst_chain_id: ChainId - src_chain_id: ChainId - dst_conn_id: ConnectionId - dst_port_id: PortId - src_port_id: PortId - dst_chan_id: ChannelId - src_chan_id: ChannelId - - def args(self) -> List[str]: - args = [self.dst_chain_id, self.src_chain_id, - self.dst_conn_id, - self.dst_port_id, self.src_port_id, - "-d", self.dst_chan_id, - "-s", self.src_chan_id] - - return args - - def process(self, result: Any) -> TxChanCloseConfirmRes: - print(result) - return from_dict(TxChanCloseConfirmRes, result['CloseConfirmChannel']) - - -# ----------------------------------------------------------------------------- - - -@ dataclass -class Remote: - channel_id: ChannelId - port_id: PortId - - -@ dataclass -class ChannelEnd: - connection_hops: List[Any] - ordering: str - remote: Remote - state: str - version: str - -@ dataclass -class ChannelEnds: - chain_id: str - client_id: str - connection_id: str - channel_id: str - port_id: str - - counterparty_chain_id: str - counterparty_client_id: str - counterparty_connection_id: str - counterparty_channel_id: str - counterparty_port_id: str - - -@ cmd("query channel end") -@ dataclass -class QueryChannelEnd(Cmd[ChannelEnd]): - chain_id: ChainId - port_id: PortId - channel_id: ChannelId - - def args(self) -> List[str]: - return [self.chain_id, self.port_id, self.channel_id] - - def process(self, result: Any) -> ChannelEnd: - return from_dict(ChannelEnd, result) - -@ cmd("query channel ends") -@ dataclass -class QueryChannelEnds(Cmd[ChannelEnds]): - chain_id: ChainId - port_id: PortId - channel_id: ChannelId - - def args(self) -> List[str]: - return [self.chain_id, self.port_id, self.channel_id] - - def process(self, result: Any) -> ChannelEnds: - return from_dict(ChannelEnds, result) - -# ============================================================================= -# CHANNEL handshake -# ============================================================================= - - -def chan_open_init(c: Config, - dst: ChainId, src: ChainId, - dst_conn: ConnectionId, - dst_port: PortId = PortId('transfer'), - src_port: PortId = PortId('transfer'), - ordering: Optional[Ordering] = None - ) -> ChannelId: - cmd = TxChanOpenInit(dst_chain_id=dst, src_chain_id=src, - connection_id=dst_conn, - dst_port_id=dst_port, src_port_id=src_port, - ordering=ordering) - - res = cmd.run(c).success() - l.info( - f'ChanOpenInit submitted to {dst} and obtained channel id {res.channel_id}') - return res.channel_id - - -def chan_open_try(c: Config, - dst: ChainId, - src: ChainId, - dst_conn: ConnectionId, - dst_port: PortId, - src_port: PortId, - src_chan: ChannelId, - ordering: Optional[Ordering] = None - ) -> ChannelId: - cmd = TxChanOpenTry(dst_chain_id=dst, src_chain_id=src, - connection_id=dst_conn, - dst_port_id=dst_port, src_port_id=src_port, - src_channel_id=src_chan, - ordering=ordering) - - res = cmd.run(c).success() - l.info( - f'ChanOpenTry submitted to {dst} and obtained channel id {res.channel_id}') - return res.channel_id - - -def chan_open_ack(c: Config, - dst: ChainId, src: ChainId, - dst_conn: ConnectionId, - dst_port: PortId, - src_port: PortId, - dst_chan: ChannelId, - src_chan: ChannelId, - ) -> ChannelId: - cmd = TxChanOpenAck(dst_chain_id=dst, src_chain_id=src, - connection_id=dst_conn, - dst_port_id=dst_port, src_port_id=src_port, - dst_channel_id=dst_chan, - src_channel_id=src_chan) - - res = cmd.run(c).success() - l.info( - f'ChanOpenAck submitted to {dst} and got channel id {res.channel_id}') - return res.channel_id - - -def chan_open_confirm( - c: Config, - dst: ChainId, - src: ChainId, - dst_conn: ConnectionId, - dst_port: PortId, - src_port: PortId, - dst_chan: ChannelId, - src_chan: ChannelId -) -> ChannelId: - cmd = TxChanOpenConfirm(dst_chain_id=dst, src_chain_id=src, - connection_id=dst_conn, - dst_port_id=dst_port, src_port_id=src_port, - dst_channel_id=dst_chan, - src_channel_id=src_chan) - - res = cmd.run(c).success() - l.info( - f'ChanOpenConfirm submitted to {dst} and got channel id {res.channel_id}') - return res.channel_id - -# ============================================================================= -# CHANNEL close -# ============================================================================= - - -def chan_close_init( - c: Config, - dst: ChainId, - src: ChainId, - dst_conn: ConnectionId, - dst_port: PortId, - src_port: PortId, - dst_chan: ChannelId, - src_chan: ChannelId -) -> ChannelId: - cmd = TxChanCloseInit(dst_chain_id=dst, src_chain_id=src, - dst_conn_id=dst_conn, - dst_port_id=dst_port, src_port_id=src_port, - dst_chan_id=dst_chan, - src_chan_id=src_chan) - - res = cmd.run(c).success() - l.info( - f'ChannelCloseInit submitted to {dst} and got channel id {res.channel_id}') - return res.channel_id - - -def chan_close_confirm( - c: Config, - dst: ChainId, - src: ChainId, - dst_conn: ConnectionId, - dst_port: PortId, - src_port: PortId, - dst_chan: ChannelId, - src_chan: ChannelId -) -> ChannelId: - cmd = TxChanCloseConfirm(dst_chain_id=dst, src_chain_id=src, - dst_conn_id=dst_conn, - dst_port_id=dst_port, src_port_id=src_port, - dst_chan_id=dst_chan, - src_chan_id=src_chan) - - res = cmd.run(c).success() - l.info( - f'ChannelCloseConfirm submitted to {dst} and got channel id {res.channel_id}') - return res.channel_id - - -def close( - c: Config, - dst: ChainId, - src: ChainId, - dst_conn: ConnectionId, - src_conn: ConnectionId, - dst_chan: ChannelId, - src_chan: ChannelId, - dst_port: PortId = PortId('transfer'), - src_port: PortId = PortId('transfer'), -): - chan_close_init(c, dst, src, dst_conn, dst_port, - src_port, dst_chan, src_chan) - - chan_close_confirm(c, src, dst, src_conn, src_port, - dst_port, src_chan, dst_chan) - - -# ============================================================================= -# CHANNEL handshake -# ============================================================================= - - -def handshake( - c: Config, - side_a: ChainId, side_b: ChainId, - conn_a: ConnectionId, conn_b: ConnectionId, - port_id: PortId -) -> Tuple[ChannelId, ChannelId]: - a_chan_id = chan_open_init(c, dst=side_a, src=side_b, dst_conn=conn_a) - - split() - - b_chan_id = chan_open_try( - c, dst=side_b, src=side_a, dst_conn=conn_b, dst_port=port_id, src_port=port_id, - src_chan=a_chan_id) - - split() - - ack_res = chan_open_ack(c, dst=side_a, src=side_b, dst_port=port_id, src_port=port_id, - dst_conn=conn_a, dst_chan=a_chan_id, src_chan=b_chan_id) - - if ack_res != a_chan_id: - l.error( - f'Incorrect channel id returned from chan open ack: expected={a_chan_id} got={ack_res}') - exit(1) - - confirm_res = chan_open_confirm( - c, dst=side_b, src=side_a, dst_port=port_id, src_port=port_id, - dst_conn=conn_b, dst_chan=b_chan_id, src_chan=a_chan_id) - - if confirm_res != b_chan_id: - l.error( - f'Incorrect channel id returned from chan open confirm: expected={b_chan_id} got={confirm_res}') - exit(1) - - split() - - a_chan_end = query_channel_end(c, side_a, port_id, a_chan_id) - if a_chan_end.state != 'Open': - l.error( - f'Channel end with id {a_chan_id} on chain {side_a} is not in Open state, got: {a_chan_end.state}') - exit(1) - - b_chan_end = query_channel_end(c, side_b, port_id, b_chan_id) - if b_chan_end.state != 'Open': - l.error( - f'Channel end with id {b_chan_id} on chain {side_b} is not in Open state, got: {b_chan_end.state}') - exit(1) - - a_chan_ends = query_channel_ends(c, side_a, port_id, a_chan_id) - l.debug(f'query channel ends result: {a_chan_ends}') - - assert a_chan_ends.chain_id == side_a - assert a_chan_ends.connection_id == conn_a - assert a_chan_ends.port_id == port_id - assert a_chan_ends.channel_id == a_chan_id - - assert a_chan_ends.counterparty_chain_id == side_b - assert a_chan_ends.counterparty_connection_id == conn_b - assert a_chan_ends.counterparty_port_id == port_id - assert a_chan_ends.counterparty_channel_id == b_chan_id - - b_chan_ends = query_channel_ends(c, side_b, port_id, b_chan_id) - l.debug(f'query channel ends result: {b_chan_ends}') - - assert b_chan_ends.chain_id == side_b - assert b_chan_ends.connection_id == conn_b - assert b_chan_ends.port_id == port_id - assert b_chan_ends.channel_id == b_chan_id - - assert b_chan_ends.counterparty_chain_id == side_a - assert b_chan_ends.counterparty_connection_id == conn_a - assert b_chan_ends.counterparty_port_id == port_id - assert b_chan_ends.counterparty_channel_id == a_chan_id - - return a_chan_id, b_chan_id - - -# ============================================================================= -# CHANNEL END query -# ============================================================================= - - -def query_channel_end(c: Config, chain_id: ChainId, port: PortId, chan_id: ChannelId) -> ChannelEnd: - cmd = QueryChannelEnd(chain_id, port, chan_id) - res = cmd.run(c).success() - - l.debug(f'Status of channel end {chan_id}: {res}') - - return res - - -# ============================================================================= -# CHANNEL ENDS query -# ============================================================================= - -def query_channel_ends(c: Config, chain_id: ChainId, port: PortId, chan_id: ChannelId) -> ChannelEnd: - cmd = QueryChannelEnds(chain_id, port, chan_id) - res = cmd.run(c).success() - - l.debug(f'Status of channel ends {chan_id}: {res}') - - return res - - -# ============================================================================= -# Passive CHANNEL relayer tests -# ============================================================================= - -def verify_state(c: Config, - ibc1: ChainId, ibc0: ChainId, - ibc1_chan_id: ChannelId, port_id: PortId): - - mode = toml.load(c.config_file)['mode'] - clients_enabled = mode['clients']['enabled'] - conn_enabled = mode['connections']['enabled'] - chan_enabled = mode['channels']['enabled'] - packets_enabled = mode['packets']['enabled'] - - # verify connection state on both chains, should be 'Open' or 'Init' depending on config 'mode' - if clients_enabled and conn_enabled and chan_enabled and packets_enabled: - sleep(10.0) - for i in range(20): - sleep(2.0) - ibc1_chan_end = query_channel_end(c, ibc1, port_id, ibc1_chan_id) - ibc0_chan_id = ibc1_chan_end.remote.channel_id - ibc0_chan_end = query_channel_end(c, ibc0, port_id, ibc0_chan_id) - if ibc0_chan_end.state == 'Open' and ibc1_chan_end.state == 'Open': - break - else: - assert (ibc0_chan_end.state == 'Open'), (ibc0_chan_end, "state is not Open") - assert (ibc1_chan_end.state == 'Open'), (ibc1_chan_end, "state is not Open") - - else: - sleep(5.0) - ibc1_chan_end = query_channel_end(c, ibc1, port_id, ibc1_chan_id) - assert (ibc1_chan_end.state == 'Init'), (ibc1_chan_end, "state is not Init") - - -def passive_channel_start_then_init(c: Config, - ibc1: ChainId, ibc0: ChainId, - ibc1_conn_id: ConnectionId, port_id: PortId): - - # 1. start hermes - proc = relayer.start(c) - sleep(2.0) - - # 2. create a channel in Init state - ibc1_chan_id = chan_open_init(c, dst=ibc1, src=ibc0, dst_conn=ibc1_conn_id) - - # 3. wait for channel handshake to finish and verify channel state on both chains - verify_state(c, ibc1, ibc0, ibc1_chan_id, port_id) - - # 4. All good, stop the relayer - proc.kill() - - -def passive_channel_init_then_start(c: Config, - ibc1: ChainId, ibc0: ChainId, - ibc1_conn_id: ConnectionId, port_id: PortId): - - # 1. create a channel in Init state - ibc1_chan_id = chan_open_init(c, dst=ibc1, src=ibc0, dst_conn=ibc1_conn_id) - sleep(2.0) - - # 2. start relaying - proc = relayer.start(c) - - # 3. wait for channel handshake to finish and verify channel state on both chains - verify_state(c, ibc1, ibc0, ibc1_chan_id, port_id) - - # 4. All good, stop the relayer - proc.kill() - - -def passive_channel_try_then_start(c: Config, - ibc1: ChainId, - ibc0: ChainId, - ibc1_conn_id: ConnectionId, - ibc0_conn_id: ConnectionId, - port_id: PortId): - - # 1. create a channel in Try state - ibc1_chan_id = chan_open_init(c, dst=ibc1, src=ibc0, dst_conn=ibc1_conn_id) - sleep(2.0) - ibc0_chan_id = chan_open_try(c, dst=ibc0, src=ibc1, dst_conn=ibc0_conn_id, src_port=port_id, dst_port=port_id, src_chan=ibc1_chan_id) - sleep(2.0) - - # 2. start relaying - proc = relayer.start(c) - - # 3. wait for channel handshake to finish and verify channel state on both chains - verify_state(c, ibc1, ibc0, ibc1_chan_id, port_id) - - # 4. All good, stop the relayer - proc.kill() diff --git a/e2e/e2e/client.py b/e2e/e2e/client.py deleted file mode 100644 index fd71a5ae18..0000000000 --- a/e2e/e2e/client.py +++ /dev/null @@ -1,132 +0,0 @@ -from typing import Optional, Tuple - -from .cmd import * -from .common import * - - -@dataclass -class ClientCreated: - client_id: ClientId - client_type: ClientType - consensus_height: Height - height: BlockHeight - - -@dataclass -@cmd("tx raw create-client") -class TxCreateClient(Cmd[ClientCreated]): - dst_chain_id: ChainId - src_chain_id: ChainId - - def args(self) -> List[str]: - return [self.dst_chain_id, self.src_chain_id] - - def process(self, result: Any) -> ClientCreated: - return from_dict(ClientCreated, result['CreateClient']) - - -# ----------------------------------------------------------------------------- - - -@dataclass -class ClientUpdated: - client_id: ClientId - client_type: ClientType - consensus_height: Height - height: BlockHeight - - -@dataclass -@cmd("tx raw update-client") -class TxUpdateClient(Cmd[ClientUpdated]): - dst_chain_id: ChainId - dst_client_id: ClientId - - def args(self) -> List[str]: - return [self.dst_chain_id, self.dst_client_id] - - def process(self, result: Any) -> ClientUpdated: - return from_dict(ClientUpdated, result[-1]['UpdateClient']['common']) - - -# ----------------------------------------------------------------------------- - -@dataclass -class AllowUpdate: - after_expiry: bool - after_misbehaviour: bool - - -@dataclass -class ClientState: - chain_id: ChainId - frozen_height: Optional[Height] - latest_height: Height - max_clock_drift: Duration - trust_level: TrustLevel - trusting_period: Duration - unbonding_period: Duration - upgrade_path: List[str] - allow_update: AllowUpdate - - -@dataclass -@cmd("query client state") -class QueryClientState(Cmd[ClientState]): - chain_id: ChainId - client_id: ClientId - height: Optional[int] = None - proof: bool = False - - def args(self) -> List[str]: - args = [] - - if self.height is not None: - args.extend(['--height', str(self.height)]) - if self.proof: - args.append('--proof') - - args.extend([self.chain_id, self.client_id]) - - return args - - def process(self, result: Any) -> ClientState: - return from_dict(ClientState, result) - -# ============================================================================= -# CLIENT creation and manipulation -# ============================================================================= - - -def create_client(c: Config, dst: ChainId, src: ChainId) -> ClientCreated: - cmd = TxCreateClient(dst_chain_id=dst, src_chain_id=src) - client = cmd.run(c).success() - l.info(f'Created client: {client.client_id}') - return client - - -def update_client(c: Config, dst: ChainId, client_id: ClientId) -> ClientUpdated: - cmd = TxUpdateClient(dst_chain_id=dst, - dst_client_id=client_id) - res = cmd.run(c).success() - l.info(f'Updated client to: {res.consensus_height}') - return res - - -def query_client_state(c: Config, chain_id: ChainId, client_id: ClientId) -> Tuple[ClientId, ClientState]: - cmd = QueryClientState(chain_id, client_id) - res = cmd.run(c).success() - l.debug(f'State of client {client_id} is: {res}') - return client_id, res - - -def create_update_query_client(c: Config, dst: ChainId, src: ChainId) -> ClientId: - client = create_client(c, dst, src) - split() - query_client_state(c, dst, client.client_id) - split() - update_client(c, dst, client.client_id) - split() - query_client_state(c, dst, client.client_id) - split() - return client.client_id diff --git a/e2e/e2e/cmd.py b/e2e/e2e/cmd.py deleted file mode 100644 index 6750aab58b..0000000000 --- a/e2e/e2e/cmd.py +++ /dev/null @@ -1,106 +0,0 @@ -#!/usr/bin/env python3 - -import json -import logging as l -import subprocess -from dataclasses import dataclass, fields as datafields, is_dataclass -from pathlib import Path -from typing import Any, List, TypeVar, Generic, Type, Callable - - -@dataclass -class Config: - config_file: Path - relayer_cmd: str - log_level: str - max_retries: int = 10 - - -T = TypeVar('T') - - -@dataclass -class CmdResult(Generic[T]): - cmd: 'Cmd' - config: Config - result: Any - retries: int = 0 - - def success(self) -> T: - status = self.result.get('status') or 'unknown' - result = self.result.get('result') or {} - - if status == "success": - data = self.cmd.process(result) - l.debug(str(data)) - return data - elif self.retries < self.config.max_retries: - left = self.config.max_retries - self.retries - l.warn(f'Command failed: retrying (retries left: {left})') - return self.cmd.retry(self.config, self.retries).success() - else: - raise ExpectedSuccess(self.cmd, status, result) - - -class Cmd(Generic[T]): - name: str - - def process(self, result: Any) -> Any: - raise NotImplementedError("Cmd::process") - - def args(self) -> List[str]: - raise NotImplementedError("Cmd::args") - - def to_cmd(self) -> str: - return f"{self.name} {' '.join(self.args())}" - - def run(self, config: Config, retries: int = 0) -> CmdResult[T]: - full_cmd = f'{config.relayer_cmd} -c {config.config_file} --json'.split(' ') - full_cmd.extend(self.name.split(' ')) - full_cmd.extend(self.args()) - l.debug(' '.join(full_cmd)) - - res = subprocess.run(full_cmd, capture_output=True, text=True) - lines = res.stdout.splitlines() - last_line = ''.join(lines[-1:]) - l.debug(last_line) - - return CmdResult(cmd=self, config=config, retries=retries, result=json.loads(last_line)) - - def retry(self, config: Config, retries: int) -> CmdResult[T]: - return self.run(config, retries + 1) - - -C = TypeVar('C', bound=Cmd) - - -def cmd(name: str) -> Callable[[Type[C]], Type[C]]: - def decorator(klass: Type[C]) -> Type[C]: - klass.name = name - return klass - - return decorator - - -def from_dict(klass, dikt) -> Any: - if is_dataclass(klass): - fields = datafields(klass) - args = {f.name: from_dict(f.type, dikt[f.name]) for f in fields} - return klass(**args) - else: - return dikt - - -class ExpectedSuccess(Exception): - cmd: Any - status: str - result: Any - - def __init__(self, cmd: Any, status: str, result: Any) -> None: - self.cmd = cmd - self.status = status - self.result = result - - super().__init__( - f"Command '{cmd}' failed. Expected 'success', got '{status}'. Message: {result}" - ) diff --git a/e2e/e2e/common.py b/e2e/e2e/common.py deleted file mode 100644 index 10b96b7f51..0000000000 --- a/e2e/e2e/common.py +++ /dev/null @@ -1,44 +0,0 @@ -from dataclasses import dataclass -from enum import Enum -from time import sleep -from typing import NewType - - -@dataclass -class Height: - revision_height: int - revision_number: int - - -@dataclass -class Duration: - nanos: int - secs: int - - -@dataclass -class TrustLevel: - denominator: int - numerator: int - - -class Ordering(Enum): - UNORDERED = 'UNORDERED' - ORDERED = 'ORDERED' - - -PortId = NewType('PortId', str) -ChainId = NewType('ChainId', str) -ClientId = NewType('ClientId', str) -ChannelId = NewType('ChannelId', str) -ConnectionId = NewType('ConnectionId', str) - -Hex = NewType('Hex', str) -Sequence = NewType('Sequence', str) -Timestamp = NewType('Timestamp', int) -ClientType = NewType('ClientType', str) -BlockHeight = NewType('BlockHeight', str) - -def split(): - sleep(0.5) - print() diff --git a/e2e/e2e/connection.py b/e2e/e2e/connection.py deleted file mode 100644 index 834cfb6440..0000000000 --- a/e2e/e2e/connection.py +++ /dev/null @@ -1,346 +0,0 @@ -from typing import Tuple -import toml - -from .cmd import * -from .common import * - -import e2e.relayer as relayer - - - -@dataclass -class TxConnInitRes: - connection_id: ConnectionId - - -@cmd("tx raw conn-init") -@dataclass -class TxConnInit(Cmd[TxConnInitRes]): - dst_chain_id: ChainId - src_chain_id: ChainId - dst_client_id: ClientId - src_client_id: ClientId - - def args(self) -> List[str]: - return [self.dst_chain_id, self.src_chain_id, - self.dst_client_id, self.src_client_id] - - def process(self, result: Any) -> TxConnInitRes: - return from_dict(TxConnInitRes, result['OpenInitConnection']) - - -# ----------------------------------------------------------------------------- - -@dataclass -class TxConnTryRes: - connection_id: ConnectionId - - -@cmd("tx raw conn-try") -@dataclass -class TxConnTry(Cmd[TxConnTryRes]): - dst_chain_id: ChainId - src_chain_id: ChainId - dst_client_id: ClientId - src_client_id: ClientId - src_conn_id: ConnectionId - - def args(self) -> List[str]: - return [self.dst_chain_id, self.src_chain_id, - self.dst_client_id, self.src_client_id, - "-s", self.src_conn_id] - - def process(self, result: Any) -> TxConnTryRes: - return from_dict(TxConnTryRes, result['OpenTryConnection']) - - -# ----------------------------------------------------------------------------- - -@dataclass -class TxConnAckRes: - connection_id: ConnectionId - - -@cmd("tx raw conn-ack") -@dataclass -class TxConnAck(Cmd[TxConnAckRes]): - dst_chain_id: ChainId - src_chain_id: ChainId - dst_client_id: ClientId - src_client_id: ClientId - dst_conn_id: ConnectionId - src_conn_id: ConnectionId - - def args(self) -> List[str]: - return [self.dst_chain_id, self.src_chain_id, - self.dst_client_id, self.src_client_id, - "-d", self.dst_conn_id, - "-s", self.src_conn_id] - - def process(self, result: Any) -> TxConnAckRes: - return from_dict(TxConnAckRes, result['OpenAckConnection']) - - -# ----------------------------------------------------------------------------- - -@dataclass -class TxConnConfirmRes: - connection_id: ConnectionId - - -@cmd("tx raw conn-confirm") -@dataclass -class TxConnConfirm(Cmd[TxConnConfirmRes]): - dst_chain_id: ChainId - src_chain_id: ChainId - dst_client_id: ClientId - src_client_id: ClientId - dst_conn_id: ConnectionId - src_conn_id: ConnectionId - - def args(self) -> List[str]: - return [self.dst_chain_id, self.src_chain_id, - self.dst_client_id, self.src_client_id, - "-d", self.dst_conn_id, - "-s", self.src_conn_id] - - def process(self, result: Any) -> TxConnConfirmRes: - return from_dict(TxConnConfirmRes, result['OpenConfirmConnection']) - - -# ----------------------------------------------------------------------------- - -@dataclass -class Version: - features: List[str] - identifier: str - - -@dataclass -class Counterparty: - client_id: ClientId - connection_id: ConnectionId - prefix: str - - -@dataclass -class ConnectionEnd: - client_id: ClientId - counterparty: Counterparty - delay_period: int - state: str - versions: List[Version] - - -@cmd("query connection end") -@dataclass -class QueryConnectionEnd(Cmd[ConnectionEnd]): - chain_id: ChainId - connection_id: ConnectionId - - def args(self) -> List[str]: - return [self.chain_id, self.connection_id] - - def process(self, result: Any) -> ConnectionEnd: - return from_dict(ConnectionEnd, result) - - -# ============================================================================= -# CONNECTION handshake -# ============================================================================= - - -def conn_init(c: Config, - dst: ChainId, src: ChainId, - dst_client: ClientId, src_client: ClientId - ) -> ConnectionId: - cmd = TxConnInit(dst_chain_id=dst, src_chain_id=src, - dst_client_id=dst_client, src_client_id=src_client) - res = cmd.run(c).success() - l.info( - f'ConnOpen init submitted to {dst} and obtained connection id {res.connection_id}') - return res.connection_id - - -def conn_try(c: Config, - dst: ChainId, src: ChainId, - dst_client: ClientId, src_client: ClientId, - src_conn: ConnectionId - ) -> ConnectionId: - cmd = TxConnTry(dst_chain_id=dst, src_chain_id=src, dst_client_id=dst_client, src_client_id=src_client, - src_conn_id=src_conn) - res = cmd.run(c).success() - l.info( - f'ConnOpen try submitted to {dst} and obtained connection id {res.connection_id}') - return res.connection_id - - -def conn_ack(c: Config, - dst: ChainId, src: ChainId, - dst_client: ClientId, src_client: ClientId, - dst_conn: ConnectionId, src_conn: ConnectionId - ) -> ConnectionId: - cmd = TxConnAck(dst_chain_id=dst, src_chain_id=src, dst_client_id=dst_client, src_client_id=src_client, - dst_conn_id=dst_conn, src_conn_id=src_conn) - res = cmd.run(c).success() - l.info( - f'ConnOpen ack submitted to {dst} and obtained connection id {res.connection_id}') - return res.connection_id - - -def conn_confirm(c: Config, - dst: ChainId, src: ChainId, - dst_client: ClientId, src_client: ClientId, - dst_conn: ConnectionId, src_conn: ConnectionId - ) -> ConnectionId: - cmd = TxConnConfirm(dst_chain_id=dst, src_chain_id=src, dst_client_id=dst_client, src_client_id=src_client, - dst_conn_id=dst_conn, src_conn_id=src_conn) - res = cmd.run(c).success() - l.info( - f'ConnOpen confirm submitted to {dst} and obtained connection id {res.connection_id}') - return res.connection_id - - -def handshake(c: Config, - side_a: ChainId, side_b: ChainId, - client_a: ClientId, client_b: ClientId - ) -> Tuple[ConnectionId, ConnectionId]: - a_conn_id = conn_init(c, side_a, side_b, client_a, client_b) - split() - b_conn_id = conn_try(c, side_b, side_a, client_b, client_a, a_conn_id) - split() - ack_res = conn_ack( - c, side_a, side_b, client_a, client_b, a_conn_id, b_conn_id) - - if ack_res != a_conn_id: - l.error( - f'Incorrect connection id returned from conn ack: expected=({a_conn_id})/got=({ack_res})') - exit(1) - - split() - - confirm_res = conn_confirm( - c, side_b, side_a, client_b, client_a, b_conn_id, a_conn_id) - - if confirm_res != b_conn_id: - l.error( - f'Incorrect connection id returned from conn confirm: expected=({b_conn_id})/got=({confirm_res})') - exit(1) - - a_conn_end = query_connection_end(c, side_a, a_conn_id) - if a_conn_end.state != 'Open': - l.error( - f'Connection end with id {a_conn_id} is not in Open state, got: {a_conn_end.state}') - exit(1) - - b_conn_end = query_connection_end(c, side_b, b_conn_id) - if b_conn_end.state != 'Open': - l.error( - f'Connection end with id {b_conn_id} is not in Open state, got: {b_conn_end.state}') - exit(1) - - return a_conn_id, b_conn_id - - -# ============================================================================= -# CONNECTION END query -# ============================================================================= - - -def query_connection_end(c: Config, chain_id: ChainId, conn_id: ConnectionId) -> ConnectionEnd: - cmd = QueryConnectionEnd(chain_id, conn_id) - res = cmd.run(c).success() - - l.debug(f'Status of connection end {conn_id}: {res}') - - return res - -# ============================================================================= -# Passive CONNECTION relayer tests -# ============================================================================= - -def verify_state(c: Config, - ibc1: ChainId, ibc0: ChainId, - ibc1_conn_id: ConnectionId): - - mode = toml.load(c.config_file)['mode'] - clients_enabled = mode['clients']['enabled'] - conn_enabled = mode['connections']['enabled'] - chan_enabled = mode['channels']['enabled'] - packets_enabled = mode['packets']['enabled'] - - # verify connection state on both chains, should be 'Open' or 'Init' depending on config 'mode' - if clients_enabled and conn_enabled and chan_enabled and packets_enabled: - sleep(10.0) - for i in range(20): - sleep(5.0) - ibc1_conn_end = query_connection_end(c, ibc1, ibc1_conn_id) - ibc0_conn_id = ibc1_conn_end.counterparty.connection_id - ibc0_conn_end = query_connection_end(c, ibc0, ibc0_conn_id) - if ibc0_conn_end.state == 'Open' and ibc1_conn_end.state == 'Open': - break - else: - assert (ibc0_conn_end.state == 'Open'), (ibc0_conn_end, "state is not Open") - assert (ibc1_conn_end.state == 'Open'), (ibc1_conn_end, "state is not Open") - - else: - sleep(5.0) - ibc1_conn_end = query_connection_end(c, ibc1, ibc1_conn_id) - assert (ibc1_conn_end.state == 'Init'), (ibc1_conn_end, "state is not Init") - -def passive_connection_start_then_init(c: Config, - ibc1: ChainId, ibc0: ChainId, - ibc1_client_id: ClientId, ibc0_client_id: ClientId )-> ConnectionId: - - # 1. start hermes - proc = relayer.start(c) - sleep(2.0) - - # 2. create a connection in Init state - ibc1_conn_id_a = conn_init(c, dst=ibc1, src=ibc0, dst_client=ibc1_client_id, src_client=ibc0_client_id) - - # 3. wait for connection handshake to finish and verify connection state on both chains - verify_state(c, ibc1, ibc0, ibc1_conn_id_a) - - # 4. All good, stop the relayer - proc.kill() - - return ibc1_conn_id_a - -def passive_connection_init_then_start(c: Config, - ibc1: ChainId, ibc0: ChainId, - ibc1_client_id: ClientId, ibc0_client_id: ClientId ): - - # 1. create a connection in Init state - ibc1_conn_id_a = conn_init(c, dst=ibc1, src=ibc0, dst_client=ibc1_client_id, src_client=ibc0_client_id) - - # 2. start hermes - proc = relayer.start(c) - sleep(10.0) - - # 3. wait for connection handshake to finish and verify connection state on both chains - verify_state(c, ibc1, ibc0, ibc1_conn_id_a) - - # 4. All good, stop the relayer - proc.kill() - - -def passive_connection_try_then_start(c: Config, - ibc1: ChainId, ibc0: ChainId, - ibc1_client_id: ClientId, ibc0_client_id: ClientId ): - - # 1. create a connection in Init state - ibc1_conn_id_a = conn_init(c, dst=ibc1, src=ibc0, dst_client=ibc1_client_id, src_client=ibc0_client_id) - - # 2. create a connection in Try-Open state - ibc0_conn_id_b = conn_try(c, dst=ibc0, src=ibc1, dst_client=ibc0_client_id, src_client=ibc1_client_id, src_conn=ibc1_conn_id_a) - - # 2. start hermes - proc = relayer.start(c) - sleep(10.0) - - # 3. wait for connection handshake to finish and verify connection state on both chains - verify_state(c, ibc1, ibc0, ibc1_conn_id_a) - - # 4. All good, stop the relayer - proc.kill() \ No newline at end of file diff --git a/e2e/e2e/packet.py b/e2e/e2e/packet.py deleted file mode 100644 index 278aaf1f04..0000000000 --- a/e2e/e2e/packet.py +++ /dev/null @@ -1,328 +0,0 @@ -from typing import Optional - -from .cmd import * -from .common import * - - -@dataclass -class Packet: - sequence: Sequence - source_port: PortId - source_channel: ChannelId - destination_port: PortId - destination_channel: ChannelId - data: Hex - timeout_height: Height - timeout_timestamp: Timestamp - - -@dataclass -class TxPacketSendRes: - height: BlockHeight - packet: Packet - - -@cmd("tx raw ft-transfer") -@dataclass -class TxPacketSend(Cmd[TxPacketSendRes]): - dst_chain_id: ChainId - src_chain_id: ChainId - src_port: PortId - src_channel: ChannelId - amount: int - height_offset: int - number_msgs: Optional[int] = None - key: Optional[str] = None - - def args(self) -> List[str]: - args = [ - self.dst_chain_id, - self.src_chain_id, - self.src_port, - self.src_channel, - str(self.amount), - "-o", str(self.height_offset), - ] - - if self.number_msgs != None: - args.extend(['-n', str(self.number_msgs)]) - - if self.key != None: - args.extend(['-k', str(self.key)]) - - return args - - def process(self, result: Any) -> TxPacketSendRes: - entry = find_entry(result, 'SendPacket') - return from_dict(TxPacketSendRes, entry) - -# ----------------------------------------------------------------------------- - - -@dataclass -class TxPacketRecvRes: - height: BlockHeight - packet: Packet - ack: Hex - - -@cmd("tx raw packet-recv") -@dataclass -class TxPacketRecv(Cmd[TxPacketRecvRes]): - dst_chain_id: ChainId - src_chain_id: ChainId - src_port: PortId - src_channel: ChannelId - - def args(self) -> List[str]: - return [self.dst_chain_id, self.src_chain_id, self.src_port, self.src_channel] - - def process(self, result: Any) -> TxPacketRecvRes: - entry = find_entry(result, 'WriteAcknowledgement') - return from_dict(TxPacketRecvRes, entry) - -# ----------------------------------------------------------------------------- - - -@dataclass -class TxPacketTimeoutRes: - height: BlockHeight - packet: Packet - - -@cmd("tx raw packet-recv") -@dataclass -class TxPacketTimeout(Cmd[TxPacketTimeoutRes]): - dst_chain_id: ChainId - src_chain_id: ChainId - src_port: PortId - src_channel: ChannelId - - def args(self) -> List[str]: - return [self.dst_chain_id, self.src_chain_id, self.src_port, self.src_channel] - - def process(self, result: Any) -> TxPacketTimeoutRes: - entry = find_entry(result, 'TimeoutPacket') - return from_dict(TxPacketTimeoutRes, entry) - - -# ----------------------------------------------------------------------------- - - -@dataclass -class TxPacketAckRes: - height: BlockHeight - packet: Packet - - -@cmd("tx raw packet-ack") -@dataclass -class TxPacketAck(Cmd[TxPacketAckRes]): - dst_chain_id: ChainId - src_chain_id: ChainId - src_port: PortId - src_channel: ChannelId - - def args(self) -> List[str]: - return [self.dst_chain_id, self.src_chain_id, self.src_port, self.src_channel] - - def process(self, result: Any) -> TxPacketAckRes: - entry = find_entry(result, 'AcknowledgePacket') - return from_dict(TxPacketAckRes, entry) - - -# ----------------------------------------------------------------------------- - -@cmd("query packet unreceived-packets") -@dataclass -class QueryUnreceivedPackets(Cmd[List[int]]): - chain: ChainId - port: PortId - channel: ChannelId - - def args(self) -> List[str]: - return [self.chain, self.port, self.channel] - - def process(self, result: Any) -> List[int]: - return from_dict(List[int], result) - - -def query_unreceived_packets( - c: Config, - chain: ChainId, - port: PortId, - channel: ChannelId, -) -> List[int]: - cmd = QueryUnreceivedPackets( - chain=chain, port=port, channel=channel) - - return cmd.run(c).success() - -# ----------------------------------------------------------------------------- - - -@cmd("query packet unreceived-acks") -@dataclass -class QueryUnreceivedAcks(Cmd[List[int]]): - chain: ChainId - port: PortId - channel: ChannelId - - def args(self) -> List[str]: - return [self.chain, self.port, self.channel] - - def process(self, result: Any) -> List[int]: - return from_dict(List[int], result) - - -def query_unreceived_acks( - c: Config, - chain: ChainId, - port: PortId, - channel: ChannelId, -) -> List[int]: - cmd = QueryUnreceivedAcks( - chain=chain, port=port, channel=channel) - - return cmd.run(c).success() - - -# TRANSFER (packet send) -# ============================================================================= - - -def packet_send(c: Config, src: ChainId, dst: ChainId, - src_port: PortId, src_channel: ChannelId, - amount: int, height_offset: int, number_msgs: Optional[int] = None, - key: Optional[str] = 'user2') -> Packet: - - cmd = TxPacketSend(dst_chain_id=dst, src_chain_id=src, - src_port=src_port, src_channel=src_channel, - amount=amount, - number_msgs=number_msgs, - height_offset=height_offset, - key=key) - - res = cmd.run(c).success() - l.info( - f'PacketSend to {src} and obtained sequence number {res.packet.sequence}') - - return res.packet - - -def packet_recv(c: Config, dst: ChainId, src: ChainId, src_port: PortId, src_channel: ChannelId) -> Packet: - cmd = TxPacketRecv(dst_chain_id=dst, src_chain_id=src, - src_port=src_port, src_channel=src_channel) - - res = cmd.run(c).success() - l.info( - f'PacketRecv to {dst} done for sequence number {res.packet.sequence}') - - return res.packet - - -def packet_timeout(c: Config, dst: ChainId, src: ChainId, src_port: PortId, src_channel: ChannelId) -> Packet: - cmd = TxPacketTimeout(dst_chain_id=dst, src_chain_id=src, - src_port=src_port, src_channel=src_channel) - - res = cmd.run(c).success() - l.info( - f'Timeout to {src} done for sequence number {res.packet.sequence}') - - return res.packet - - -def packet_ack(c: Config, dst: ChainId, src: ChainId, src_port: PortId, src_channel: ChannelId) -> Packet: - cmd = TxPacketAck(dst_chain_id=dst, src_chain_id=src, - src_port=src_port, src_channel=src_channel) - - res = cmd.run(c).success() - l.info( - f'PacketAck to {dst} done for sequence number {res.packet.sequence}') - - return res.packet - - -def ping_pong(c: Config, - side_a: ChainId, side_b: ChainId, - a_chan: ChannelId, b_chan: ChannelId, - port_id: PortId = PortId('transfer')): - - pkt_send_a = packet_send(c, side_a, side_b, port_id, - a_chan, amount=9999, height_offset=1000) - - split() - - pkt_recv_b = packet_recv(c, side_b, side_a, port_id, a_chan) - - if pkt_send_a.sequence != pkt_recv_b.sequence: - l.error( - f'Mismatched sequence numbers for path {side_a} -> {side_b} : Sent={pkt_send_a.sequence} versus Received={pkt_recv_b.sequence}') - - split() - - # write the ack - pkt_ack_a = packet_ack(c, side_a, side_b, port_id, b_chan) - - if pkt_recv_b.sequence != pkt_ack_a.sequence: - l.error( - f'Mismatched sequence numbers for ack on path {side_a} -> {side_b} : Recv={pkt_recv_b.sequence} versus Ack={pkt_ack_a.sequence}') - - split() - - pkt_send_b = packet_send(c, side_b, side_a, port_id, - b_chan, amount=9999, height_offset=1000) - - split() - - pkt_recv_a = packet_recv(c, side_a, side_b, port_id, b_chan) - - if pkt_send_b.sequence != pkt_recv_a.sequence: - l.error( - f'Mismatched sequence numbers for path {side_b} -> {side_a} : Sent={pkt_send_b.sequence} versus Received={pkt_recv_a.sequence}') - - split() - - pkt_ack_b = packet_ack(c, side_b, side_a, port_id, a_chan) - - if pkt_recv_a.sequence != pkt_ack_b.sequence: - l.error( - f'Mismatched sequence numbers for ack on path {side_a} -> {side_b} : Recv={pkt_recv_a.sequence} versus Ack={pkt_ack_b.sequence}') - - -def timeout(c: Config, - side_a: ChainId, side_b: ChainId, - a_chan: ChannelId, b_chan: ChannelId, - port_id: PortId = PortId('transfer')): - - pkt_send_a = packet_send(c, side_a, side_b, port_id, - a_chan, amount=9999, height_offset=1) - - split() - - pkt_timeout_a = packet_timeout(c, side_b, side_a, port_id, a_chan) - - if pkt_send_a.sequence != pkt_timeout_a.sequence: - l.error( - f'Mismatched sequence numbers for path {side_a} -> {side_b} : Sent={pkt_send_a.sequence} versus Timeout={pkt_timeout_a.sequence}') - - split() - - pkt_send_b = packet_send(c, side_b, side_a, port_id, - b_chan, amount=9999, height_offset=1) - - split() - - pkt_timeout_b = packet_timeout(c, side_a, side_b, port_id, b_chan) - - if pkt_send_b.sequence != pkt_timeout_b.sequence: - l.error( - f'Mismatched sequence numbers for path {side_b} -> {side_a} : Sent={pkt_send_b.sequence} versus Timeout={pkt_timeout_b.sequence}') - - split() - - -def find_entry(result: Any, key: str) -> Any: - for entry in result: - if key in entry: - return entry[key] diff --git a/e2e/e2e/relayer.py b/e2e/e2e/relayer.py deleted file mode 100644 index 1f72d09c11..0000000000 --- a/e2e/e2e/relayer.py +++ /dev/null @@ -1,11 +0,0 @@ - -from subprocess import Popen -import logging as l - -from .cmd import Config - - -def start(c: Config) -> Popen: - full_cmd = f'{c.relayer_cmd} -c {c.config_file} -j start'.split(' ') - l.debug(' '.join(full_cmd)) - return Popen(full_cmd) diff --git a/e2e/pyrightconfig.json b/e2e/pyrightconfig.json deleted file mode 100644 index 2c63c08510..0000000000 --- a/e2e/pyrightconfig.json +++ /dev/null @@ -1,2 +0,0 @@ -{ -} diff --git a/e2e/run.py b/e2e/run.py deleted file mode 100755 index 721be3f386..0000000000 --- a/e2e/run.py +++ /dev/null @@ -1,277 +0,0 @@ -#!/usr/bin/env python3 - -import argparse -import logging as l -from typing import Tuple -from pathlib import Path -import toml - -import e2e.channel as channel -import e2e.client as client -import e2e.connection as connection -import e2e.packet as packet -import e2e.relayer as relayer -from e2e.cmd import Config -from e2e.common import * - - -def passive_packets( - c: Config, - ibc0: ChainId, ibc1: ChainId, port_id: PortId, - ibc0_channel_id: ChannelId, ibc1_channel_id: ChannelId): - - # 1. create some unreceived acks - - # hermes tx raw ft-transfer ibc-1 ibc-0 transfer channel-0 10000 -o 1000 -n 2 - packet.packet_send(c, src=ibc0, dst=ibc1, src_port=port_id, - src_channel=ibc0_channel_id, amount=10000, height_offset=1000, number_msgs=2) - - # hermes tx raw ft-transfer ibc-0 ibc-1 transfer channel-1 10000 -o 1000 -n 2 - packet.packet_send(c, src=ibc1, dst=ibc0, src_port=port_id, - src_channel=ibc1_channel_id, amount=10000, height_offset=1000, number_msgs=2) - sleep(5.0) - - # hermes tx raw packet-recv ibc-1 ibc-0 transfer channel-0 - packet.packet_recv(c, src=ibc0, dst=ibc1, - src_port=port_id, src_channel=ibc0_channel_id) - - # hermes tx raw packet-recv ibc-0 ibc-1 transfer channel-1 - packet.packet_recv(c, src=ibc1, dst=ibc0, - src_port=port_id, src_channel=ibc1_channel_id) - - # 2. create some unreceived packets - - # hermes tx raw ft-transfer ibc-0 ibc-1 transfer channel-1 10000 -o 1000 -n 3 - packet.packet_send(c, src=ibc1, dst=ibc0, src_port=port_id, - src_channel=ibc1_channel_id, amount=10000, height_offset=1000, number_msgs=3) - - # hermes tx raw ft-transfer ibc-1 ibc-0 transfer channel-0 10000 -o 1000 -n 4 - packet.packet_send(c, src=ibc0, dst=ibc1, src_port=port_id, - src_channel=ibc0_channel_id, amount=10000, height_offset=1000, number_msgs=4) - - sleep(10.0) - - # 3. verify the expected number of unreceived packets and acks on each channel end - - # hermes query packet unreceived-packets ibc-0 transfer channel-0 - unreceived = packet.query_unreceived_packets( - c, chain=ibc0, port=port_id, channel=ibc0_channel_id) - - assert (len(unreceived) == 3), (unreceived, "unreceived packet mismatch") - - # hermes query packet unreceived-acks ibc-1 transfer channel-1 - unreceived = packet.query_unreceived_acks( - c, chain=ibc1, port=port_id, channel=ibc1_channel_id) - - assert (len(unreceived) == 2), (unreceived, "unreceived packet mismatch") - - # hermes query packet unreceived-packets ibc-1 transfer channel-1 - unreceived = packet.query_unreceived_packets( - c, chain=ibc1, port=port_id, channel=ibc1_channel_id) - - assert (len(unreceived) == 4), (unreceived, "unreceived packet mismatch") - - # hermes query packet unreceived-acks ibc-0 transfer channel-0 - unreceived = packet.query_unreceived_acks( - c, chain=ibc0, port=port_id, channel=ibc0_channel_id) - - assert (len(unreceived) == 2), (unreceived, "unreceived packet mismatch") - - # 4. start relaying - it should clear the unreceived packets - proc = relayer.start(c) - - # 5. wait for the relayer to initialize and pick up pending packets - sleep(20.0) - - # 6. verify that there are no pending packets - # hermes query packet unreceived-packets ibc-1 transfer channel-1 - unreceived = packet.query_unreceived_packets( - c, chain=ibc1, port=port_id, channel=ibc1_channel_id) - - assert (len(unreceived) == 0), (unreceived, - "unreceived packets mismatch (expected 0)") - - # hermes query packet unreceived-acks ibc-1 transfer channel-1 - unreceived = packet.query_unreceived_acks( - c, chain=ibc1, port=port_id, channel=ibc1_channel_id) - - assert (len(unreceived) == 0), (unreceived, - "unreceived acks mismatch (expected 0)") - - # hermes query packet unreceived-packets ibc-0 transfer channel-0 - unreceived = packet.query_unreceived_packets( - c, chain=ibc0, port=port_id, channel=ibc0_channel_id) - - assert (len(unreceived) == 0), (unreceived, - "unreceived packets mismatch (expected 0)") - - # hermes query packet unreceived-acks ibc-0 transfer channel-0 - unreceived = packet.query_unreceived_acks( - c, chain=ibc0, port=port_id, channel=ibc0_channel_id) - - assert (len(unreceived) == 0), (unreceived, - "unreceived acks mismatch (expected 0)") - - # 7. send some packets - # hermes tx raw ft-transfer ibc-0 ibc-1 transfer channel-1 10000 1000 -n 3 - packet.packet_send(c, src=ibc1, dst=ibc0, src_port=port_id, - src_channel=ibc1_channel_id, amount=10000, height_offset=1000, number_msgs=3) - - # hermes tx raw ft-transfer ibc-1 ibc-0 transfer channel-0 10000 1000 -n 4 - packet.packet_send(c, src=ibc0, dst=ibc1, src_port=port_id, - src_channel=ibc0_channel_id, amount=10000, height_offset=1000, number_msgs=4) - - sleep(20.0) - - # 8. verify that there are no pending packets - # hermes query packet unreceived-packets ibc-1 transfer channel-1 - unreceived = packet.query_unreceived_packets( - c, chain=ibc1, port=port_id, channel=ibc1_channel_id) - - assert (len(unreceived) == 0), (unreceived, - "unreceived packets mismatch (expected 0)") - - # hermes query packet unreceived-acks ibc-1 transfer channel-1 - unreceived = packet.query_unreceived_acks( - c, chain=ibc1, port=port_id, channel=ibc1_channel_id) - - assert (len(unreceived) == 0), (unreceived, - "unreceived acks mismatch (expected 0)") - - # hermes query packet unreceived-packets ibc-0 transfer channel-0 - unreceived = packet.query_unreceived_packets( - c, chain=ibc0, port=port_id, channel=ibc0_channel_id) - - assert (len(unreceived) == 0), (unreceived, - "unreceived packets mismatch (expected 0)") - - # hermes query packet unreceived-acks ibc-0 transfer channel-0 - unreceived = packet.query_unreceived_acks( - c, chain=ibc0, port=port_id, channel=ibc0_channel_id) - - assert (len(unreceived) == 0), (unreceived, - "unreceived acks mismatch (expected 0)") - - # 9.Stop the relayer - proc.kill() - - -def raw(c: Config, ibc0: ChainId, ibc1: ChainId, port_id: PortId) -> Tuple[ClientId, ConnectionId, ChannelId, ClientId, ConnectionId, ChannelId]: - ibc0_client_id = client.create_update_query_client(c, ibc0, ibc1) - - # Allocate first IDs on ibc-1 - ibc1_client_id = client.create_update_query_client(c, ibc1, ibc0) - ibc1_conn_id = connection.conn_init( - c, ibc1, ibc0, ibc1_client_id, ibc0_client_id) - ibc1_chan_id = channel.chan_open_init( - c, dst=ibc1, src=ibc0, dst_conn=ibc1_conn_id) - - ibc1_client_id = client.create_update_query_client(c, ibc1, ibc0) - - split() - - ibc0_conn_id, ibc1_conn_id = connection.handshake( - c, ibc0, ibc1, ibc0_client_id, ibc1_client_id) - - split() - - ibc0_chan_id, ibc1_chan_id = channel.handshake( - c, ibc0, ibc1, ibc0_conn_id, ibc1_conn_id, port_id) - - split() - - packet.ping_pong(c, ibc0, ibc1, ibc0_chan_id, ibc1_chan_id) - - split() - - sleep(5) - - packet.timeout(c, ibc0, ibc1, ibc0_chan_id, ibc1_chan_id) - - split() - - # The ChannelCloseInit message is currently denied by Gaia, - # and requires a patch to be accepted. - # channel.close(c, ibc0, ibc1, ibc0_conn_id, - # ibc1_conn_id, ibc0_chan_id, ibc1_chan_id) - - return ibc0_client_id, ibc0_conn_id, ibc0_chan_id, ibc1_client_id, ibc1_conn_id, ibc1_chan_id - - -def main(): - parser = argparse.ArgumentParser( - description='Test all relayer commands, end-to-end') - - parser.add_argument('-c', '--config', - help='configuration file for the relayer', - metavar='CONFIG_FILE', - required=True, - type=Path) - - parser.add_argument('--cmd', - help='command to run the relayer (default: cargo run --bin hermes --)', - metavar='CMD', - default='cargo run --bin hermes --') - - parser.add_argument('--log-level', - help='minimum log level (default: debug)', - metavar='LOG', - choices=['notset', 'debug', 'info', - 'warning', 'error', 'critical'], - default='debug') - - args = parser.parse_args() - - if not args.config.exists(): - print( - f'error: supplied configuration file does not exist: {args.config}') - exit(1) - - config = Config(config_file=args.config, relayer_cmd=args.cmd, - log_level=args.log_level.upper()) - - l.basicConfig( - level=config.log_level, - format='%(asctime)s [%(levelname)8s] %(message)s', - datefmt='%Y-%m-%d %H:%M:%S') - - chains = toml.load(config.config_file)['chains'] - - ibc0 = chains[0]['id'] - ibc1 = chains[1]['id'] - port_id = PortId('transfer') - - ibc0_client_id, ibc0_conn_id, ibc0_chan_id, ibc1_client_id, ibc1_conn_id, ibc1_chan_id = raw( - config, ibc0, ibc1, port_id) - sleep(2.0) - - passive_packets(config, ibc0, ibc1, port_id, ibc0_chan_id, ibc1_chan_id) - sleep(2.0) - - connection.passive_connection_init_then_start( - config, ibc1, ibc0, ibc1_client_id, ibc0_client_id) - sleep(2.0) - - connection.passive_connection_start_then_init( - config, ibc1, ibc0, ibc1_client_id, ibc0_client_id) - sleep(2.0) - - connection.passive_connection_try_then_start( - config, ibc1, ibc0, ibc1_client_id, ibc0_client_id) - sleep(2.0) - - channel.passive_channel_start_then_init( - config, ibc1, ibc0, ibc1_conn_id, port_id) - sleep(2.0) - - channel.passive_channel_init_then_start( - config, ibc1, ibc0, ibc1_conn_id, port_id) - sleep(2.0) - - channel.passive_channel_try_then_start( - config, ibc1, ibc0, ibc1_conn_id, ibc0_conn_id, port_id) - sleep(2.0) - - -if __name__ == "__main__": - main() diff --git a/guide/.gitignore b/guide/.gitignore deleted file mode 100644 index 7585238efe..0000000000 --- a/guide/.gitignore +++ /dev/null @@ -1 +0,0 @@ -book diff --git a/guide/README.md b/guide/README.md deleted file mode 100644 index c5aa9dd797..0000000000 --- a/guide/README.md +++ /dev/null @@ -1,75 +0,0 @@ -# Hermes Guide - -Hermes is the name of the binary that comes packaged with -[IBC Relayer CLI](https://crates.io/crates/ibc-relayer-cli) crate. - -This directory comprises a comprehensive guide to Hermes. -In order to build and view this guide you need to install [`mdBook`] -(https://github.com/rust-lang/mdBook). -mdBook is a utility to create modern online books from Markdown files. - -This guide should be permanently deployed at its latest stable version at -[hermes.informal.systems](https://hermes.informal.systems). - -Current version: `0.15.0`. - -The version of this guide is aligned with the [versioning of the ibc crates](../README.md). - -## Local deployment - -This section describes how to deploy this guide locally on your system. - -### Pre-requisites - -Install `mdBook` using [`cargo`](https://doc.rust-lang.org/cargo/): - -```bash -cargo install mdbook -``` - -You also need to install the mdbook plug-in for [`mermaid`](https://mermaid-js.github.io/mermaid/#/) to generate graphs and diagrams, and the `mdbook-toc` plug-in for generating table of contents: - -```bash -cargo install mdbook-mermaid -cargo install mdbook-toc -``` - -### Building and viewing the guide locally - -In order to build and view the guide on your local machine, please follow this instructions. - -#### Change to the `guide` dir - -This assumes your current dir is the `ibc-rs` repository - -```bash -cd guide -``` - -#### Build the guide - -To build and view the guide in your browser, run the `mdbook` command below: - -```bash -mdbook serve -``` - -#### View the guide - -This will host the guide in your local machine. Open your browser and navigate to: - -```bash -http://localhost:3000 -``` - -## Adding or editing new content to the guide - -Please check the [mdBook documentation](https://rust-lang.github.io/mdBook/index.html) for additional information on how to add new content to the guide. - -Basically if you want to add new content to the guide, just add an entry to the `SUMMARY.md` Markdown file which is the TOC page. Then create a page for the entry you've added to the `SUMMARY.md` page. If you don't create the page, but save the `SUMMARY.md` file and build again, `mdBook` will create the page automatically for you. - -#### Local development -If you are adding content using your favorite IDE and have a terminal opened running `mdbook serve`, it provides a convenient watch functionality so any changes detected on local files will trigger another build and if you refresh the guide on your browser they will be shown there. - -#### Submit your changes -Once you finish adding the new content just commit your changes (`git commit`) and push them to the respository (`git push`). diff --git a/guide/book.toml b/guide/book.toml deleted file mode 100644 index c11547b50c..0000000000 --- a/guide/book.toml +++ /dev/null @@ -1,19 +0,0 @@ -[book] -authors = ["Informal Systems Inc."] -language = "en" -multilingual = false -src = "src" -title = "Hermes (IBC Relayer CLI) Documentation" - -[preprocessor.mermaid] -command = "mdbook-mermaid" - -[preprocessor.toc] -command = "mdbook-toc" -renderer = ["html"] - -[output.html] -additional-js = ["mermaid.min.js", "mermaid-init.js"] - -# Uncomment to trigger the link check -# [output.linkcheck] diff --git a/guide/mermaid-init.js b/guide/mermaid-init.js deleted file mode 100644 index 72f038e237..0000000000 --- a/guide/mermaid-init.js +++ /dev/null @@ -1,16 +0,0 @@ -// Adapt Mermaid theme to rustdoc theme. -// https://github.com/mersinvald/aquamarine/blob/ce24cd6e3a84e4f80a60c21e218b9c6f26b001fa/src/attrs.rs#L89-L101 - -function get_mermaid_theme() { - let is_dark = /.*(dark|coal|navy|ayu).*/.test(document.documentElement.className); - if (is_dark) { - return 'dark'; - } else { - return 'default'; - } -} - -mermaid.initialize({ - startOnLoad: true, - theme: get_mermaid_theme() -}); diff --git a/guide/mermaid.min.js b/guide/mermaid.min.js deleted file mode 100644 index 8d71a81caf..0000000000 --- a/guide/mermaid.min.js +++ /dev/null @@ -1,32 +0,0 @@ -!function(t,e){"object"==typeof exports&&"object"==typeof module?module.exports=e():"function"==typeof define&&define.amd?define([],e):"object"==typeof exports?exports.mermaid=e():t.mermaid=e()}("undefined"!=typeof self?self:this,(function(){return function(t){var e={};function n(r){if(e[r])return e[r].exports;var i=e[r]={i:r,l:!1,exports:{}};return t[r].call(i.exports,i,i.exports,n),i.l=!0,i.exports}return n.m=t,n.c=e,n.d=function(t,e,r){n.o(t,e)||Object.defineProperty(t,e,{enumerable:!0,get:r})},n.r=function(t){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(t,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(t,"__esModule",{value:!0})},n.t=function(t,e){if(1&e&&(t=n(t)),8&e)return t;if(4&e&&"object"==typeof t&&t&&t.__esModule)return t;var r=Object.create(null);if(n.r(r),Object.defineProperty(r,"default",{enumerable:!0,value:t}),2&e&&"string"!=typeof t)for(var i in t)n.d(r,i,function(e){return t[e]}.bind(null,i));return r},n.n=function(t){var e=t&&t.__esModule?function(){return t.default}:function(){return t};return n.d(e,"a",e),e},n.o=function(t,e){return Object.prototype.hasOwnProperty.call(t,e)},n.p="",n(n.s=383)}([function(t,e,n){"use strict";n.r(e);var r=function(t,e){return te?1:t>=e?0:NaN},i=function(t){var e;return 1===t.length&&(e=t,t=function(t,n){return r(e(t),n)}),{left:function(e,n,r,i){for(null==r&&(r=0),null==i&&(i=e.length);r>>1;t(e[a],n)<0?r=a+1:i=a}return r},right:function(e,n,r,i){for(null==r&&(r=0),null==i&&(i=e.length);r>>1;t(e[a],n)>0?i=a:r=a+1}return r}}};var a=i(r),o=a.right,s=a.left,c=o,u=function(t,e){null==e&&(e=l);for(var n=0,r=t.length-1,i=t[0],a=new Array(r<0?0:r);nt?1:e>=t?0:NaN},d=function(t){return null===t?NaN:+t},p=function(t,e){var n,r,i=t.length,a=0,o=-1,s=0,c=0;if(null==e)for(;++o1)return c/(a-1)},g=function(t,e){var n=p(t,e);return n?Math.sqrt(n):n},y=function(t,e){var n,r,i,a=t.length,o=-1;if(null==e){for(;++o=n)for(r=i=n;++on&&(r=n),i=n)for(r=i=n;++on&&(r=n),i0)return[t];if((r=e0)for(t=Math.ceil(t/o),e=Math.floor(e/o),a=new Array(i=Math.ceil(e-t+1));++s=0?(a>=w?10:a>=E?5:a>=T?2:1)*Math.pow(10,i):-Math.pow(10,-i)/(a>=w?10:a>=E?5:a>=T?2:1)}function A(t,e,n){var r=Math.abs(e-t)/Math.max(0,n),i=Math.pow(10,Math.floor(Math.log(r)/Math.LN10)),a=r/i;return a>=w?i*=10:a>=E?i*=5:a>=T&&(i*=2),eh;)f.pop(),--d;var p,g=new Array(d+1);for(i=0;i<=d;++i)(p=g[i]=[]).x0=i>0?f[i-1]:l,p.x1=i=1)return+n(t[r-1],r-1,t);var r,i=(r-1)*e,a=Math.floor(i),o=+n(t[a],a,t);return o+(+n(t[a+1],a+1,t)-o)*(i-a)}},N=function(t,e,n){return t=b.call(t,d).sort(r),Math.ceil((n-e)/(2*(D(t,.75)-D(t,.25))*Math.pow(t.length,-1/3)))},B=function(t,e,n){return Math.ceil((n-e)/(3.5*g(t)*Math.pow(t.length,-1/3)))},L=function(t,e){var n,r,i=t.length,a=-1;if(null==e){for(;++a=n)for(r=n;++ar&&(r=n)}else for(;++a=n)for(r=n;++ar&&(r=n);return r},F=function(t,e){var n,r=t.length,i=r,a=-1,o=0;if(null==e)for(;++a=0;)for(e=(r=t[i]).length;--e>=0;)n[--o]=r[e];return n},j=function(t,e){var n,r,i=t.length,a=-1;if(null==e){for(;++a=n)for(r=n;++an&&(r=n)}else for(;++a=n)for(r=n;++an&&(r=n);return r},R=function(t,e){for(var n=e.length,r=new Array(n);n--;)r[n]=t[e[n]];return r},Y=function(t,e){if(n=t.length){var n,i,a=0,o=0,s=t[o];for(null==e&&(e=r);++a=0&&(n=t.slice(r+1),t=t.slice(0,r)),t&&!e.hasOwnProperty(t))throw new Error("unknown type: "+t);return{type:t,name:n}}))}function ct(t,e){for(var n,r=0,i=t.length;r0)for(var n,r,i=new Array(n),a=0;ae?1:t>=e?0:NaN}var _t="http://www.w3.org/1999/xhtml",kt={svg:"http://www.w3.org/2000/svg",xhtml:_t,xlink:"http://www.w3.org/1999/xlink",xml:"http://www.w3.org/XML/1998/namespace",xmlns:"http://www.w3.org/2000/xmlns/"},wt=function(t){var e=t+="",n=e.indexOf(":");return n>=0&&"xmlns"!==(e=t.slice(0,n))&&(t=t.slice(n+1)),kt.hasOwnProperty(e)?{space:kt[e],local:t}:t};function Et(t){return function(){this.removeAttribute(t)}}function Tt(t){return function(){this.removeAttributeNS(t.space,t.local)}}function Ct(t,e){return function(){this.setAttribute(t,e)}}function St(t,e){return function(){this.setAttributeNS(t.space,t.local,e)}}function At(t,e){return function(){var n=e.apply(this,arguments);null==n?this.removeAttribute(t):this.setAttribute(t,n)}}function Mt(t,e){return function(){var n=e.apply(this,arguments);null==n?this.removeAttributeNS(t.space,t.local):this.setAttributeNS(t.space,t.local,n)}}var Ot=function(t){return t.ownerDocument&&t.ownerDocument.defaultView||t.document&&t||t.defaultView};function Dt(t){return function(){this.style.removeProperty(t)}}function Nt(t,e,n){return function(){this.style.setProperty(t,e,n)}}function Bt(t,e,n){return function(){var r=e.apply(this,arguments);null==r?this.style.removeProperty(t):this.style.setProperty(t,r,n)}}function Lt(t,e){return t.style.getPropertyValue(e)||Ot(t).getComputedStyle(t,null).getPropertyValue(e)}function Ft(t){return function(){delete this[t]}}function Pt(t,e){return function(){this[t]=e}}function It(t,e){return function(){var n=e.apply(this,arguments);null==n?delete this[t]:this[t]=n}}function jt(t){return t.trim().split(/^|\s+/)}function Rt(t){return t.classList||new Yt(t)}function Yt(t){this._node=t,this._names=jt(t.getAttribute("class")||"")}function zt(t,e){for(var n=Rt(t),r=-1,i=e.length;++r=0&&(this._names.splice(e,1),this._node.setAttribute("class",this._names.join(" ")))},contains:function(t){return this._names.indexOf(t)>=0}};function Ht(){this.textContent=""}function Gt(t){return function(){this.textContent=t}}function qt(t){return function(){var e=t.apply(this,arguments);this.textContent=null==e?"":e}}function Xt(){this.innerHTML=""}function Zt(t){return function(){this.innerHTML=t}}function Jt(t){return function(){var e=t.apply(this,arguments);this.innerHTML=null==e?"":e}}function Qt(){this.nextSibling&&this.parentNode.appendChild(this)}function Kt(){this.previousSibling&&this.parentNode.insertBefore(this,this.parentNode.firstChild)}function te(t){return function(){var e=this.ownerDocument,n=this.namespaceURI;return n===_t&&e.documentElement.namespaceURI===_t?e.createElement(t):e.createElementNS(n,t)}}function ee(t){return function(){return this.ownerDocument.createElementNS(t.space,t.local)}}var ne=function(t){var e=wt(t);return(e.local?ee:te)(e)};function re(){return null}function ie(){var t=this.parentNode;t&&t.removeChild(this)}function ae(){var t=this.cloneNode(!1),e=this.parentNode;return e?e.insertBefore(t,this.nextSibling):t}function oe(){var t=this.cloneNode(!0),e=this.parentNode;return e?e.insertBefore(t,this.nextSibling):t}var se={},ce=null;"undefined"!=typeof document&&("onmouseenter"in document.documentElement||(se={mouseenter:"mouseover",mouseleave:"mouseout"}));function ue(t,e,n){return t=le(t,e,n),function(e){var n=e.relatedTarget;n&&(n===this||8&n.compareDocumentPosition(this))||t.call(this,e)}}function le(t,e,n){return function(r){var i=ce;ce=r;try{t.call(this,this.__data__,e,n)}finally{ce=i}}}function he(t){return t.trim().split(/^|\s+/).map((function(t){var e="",n=t.indexOf(".");return n>=0&&(e=t.slice(n+1),t=t.slice(0,n)),{type:t,name:e}}))}function fe(t){return function(){var e=this.__on;if(e){for(var n,r=0,i=-1,a=e.length;r=_&&(_=x+1);!(b=v[_])&&++_=0;)(r=i[a])&&(o&&4^r.compareDocumentPosition(o)&&o.parentNode.insertBefore(r,o),o=r);return this},sort:function(t){function e(e,n){return e&&n?t(e.__data__,n.__data__):!e-!n}t||(t=xt);for(var n=this._groups,r=n.length,i=new Array(r),a=0;a1?this.each((null==e?Dt:"function"==typeof e?Bt:Nt)(t,e,null==n?"":n)):Lt(this.node(),t)},property:function(t,e){return arguments.length>1?this.each((null==e?Ft:"function"==typeof e?It:Pt)(t,e)):this.node()[t]},classed:function(t,e){var n=jt(t+"");if(arguments.length<2){for(var r=Rt(this.node()),i=-1,a=n.length;++i>8&15|e>>4&240,e>>4&15|240&e,(15&e)<<4|15&e,1):8===n?new qe(e>>24&255,e>>16&255,e>>8&255,(255&e)/255):4===n?new qe(e>>12&15|e>>8&240,e>>8&15|e>>4&240,e>>4&15|240&e,((15&e)<<4|15&e)/255):null):(e=Le.exec(t))?new qe(e[1],e[2],e[3],1):(e=Fe.exec(t))?new qe(255*e[1]/100,255*e[2]/100,255*e[3]/100,1):(e=Pe.exec(t))?Ve(e[1],e[2],e[3],e[4]):(e=Ie.exec(t))?Ve(255*e[1]/100,255*e[2]/100,255*e[3]/100,e[4]):(e=je.exec(t))?Qe(e[1],e[2]/100,e[3]/100,1):(e=Re.exec(t))?Qe(e[1],e[2]/100,e[3]/100,e[4]):Ye.hasOwnProperty(t)?We(Ye[t]):"transparent"===t?new qe(NaN,NaN,NaN,0):null}function We(t){return new qe(t>>16&255,t>>8&255,255&t,1)}function Ve(t,e,n,r){return r<=0&&(t=e=n=NaN),new qe(t,e,n,r)}function He(t){return t instanceof Me||(t=$e(t)),t?new qe((t=t.rgb()).r,t.g,t.b,t.opacity):new qe}function Ge(t,e,n,r){return 1===arguments.length?He(t):new qe(t,e,n,null==r?1:r)}function qe(t,e,n,r){this.r=+t,this.g=+e,this.b=+n,this.opacity=+r}function Xe(){return"#"+Je(this.r)+Je(this.g)+Je(this.b)}function Ze(){var t=this.opacity;return(1===(t=isNaN(t)?1:Math.max(0,Math.min(1,t)))?"rgb(":"rgba(")+Math.max(0,Math.min(255,Math.round(this.r)||0))+", "+Math.max(0,Math.min(255,Math.round(this.g)||0))+", "+Math.max(0,Math.min(255,Math.round(this.b)||0))+(1===t?")":", "+t+")")}function Je(t){return((t=Math.max(0,Math.min(255,Math.round(t)||0)))<16?"0":"")+t.toString(16)}function Qe(t,e,n,r){return r<=0?t=e=n=NaN:n<=0||n>=1?t=e=NaN:e<=0&&(t=NaN),new en(t,e,n,r)}function Ke(t){if(t instanceof en)return new en(t.h,t.s,t.l,t.opacity);if(t instanceof Me||(t=$e(t)),!t)return new en;if(t instanceof en)return t;var e=(t=t.rgb()).r/255,n=t.g/255,r=t.b/255,i=Math.min(e,n,r),a=Math.max(e,n,r),o=NaN,s=a-i,c=(a+i)/2;return s?(o=e===a?(n-r)/s+6*(n0&&c<1?0:o,new en(o,s,c,t.opacity)}function tn(t,e,n,r){return 1===arguments.length?Ke(t):new en(t,e,n,null==r?1:r)}function en(t,e,n,r){this.h=+t,this.s=+e,this.l=+n,this.opacity=+r}function nn(t,e,n){return 255*(t<60?e+(n-e)*t/60:t<180?n:t<240?e+(n-e)*(240-t)/60:e)}function rn(t,e,n,r,i){var a=t*t,o=a*t;return((1-3*t+3*a-o)*e+(4-6*a+3*o)*n+(1+3*t+3*a-3*o)*r+o*i)/6}Se(Me,$e,{copy:function(t){return Object.assign(new this.constructor,this,t)},displayable:function(){return this.rgb().displayable()},hex:ze,formatHex:ze,formatHsl:function(){return Ke(this).formatHsl()},formatRgb:Ue,toString:Ue}),Se(qe,Ge,Ae(Me,{brighter:function(t){return t=null==t?1/.7:Math.pow(1/.7,t),new qe(this.r*t,this.g*t,this.b*t,this.opacity)},darker:function(t){return t=null==t?.7:Math.pow(.7,t),new qe(this.r*t,this.g*t,this.b*t,this.opacity)},rgb:function(){return this},displayable:function(){return-.5<=this.r&&this.r<255.5&&-.5<=this.g&&this.g<255.5&&-.5<=this.b&&this.b<255.5&&0<=this.opacity&&this.opacity<=1},hex:Xe,formatHex:Xe,formatRgb:Ze,toString:Ze})),Se(en,tn,Ae(Me,{brighter:function(t){return t=null==t?1/.7:Math.pow(1/.7,t),new en(this.h,this.s,this.l*t,this.opacity)},darker:function(t){return t=null==t?.7:Math.pow(.7,t),new en(this.h,this.s,this.l*t,this.opacity)},rgb:function(){var t=this.h%360+360*(this.h<0),e=isNaN(t)||isNaN(this.s)?0:this.s,n=this.l,r=n+(n<.5?n:1-n)*e,i=2*n-r;return new qe(nn(t>=240?t-240:t+120,i,r),nn(t,i,r),nn(t<120?t+240:t-120,i,r),this.opacity)},displayable:function(){return(0<=this.s&&this.s<=1||isNaN(this.s))&&0<=this.l&&this.l<=1&&0<=this.opacity&&this.opacity<=1},formatHsl:function(){var t=this.opacity;return(1===(t=isNaN(t)?1:Math.max(0,Math.min(1,t)))?"hsl(":"hsla(")+(this.h||0)+", "+100*(this.s||0)+"%, "+100*(this.l||0)+"%"+(1===t?")":", "+t+")")}}));var an=function(t){var e=t.length-1;return function(n){var r=n<=0?n=0:n>=1?(n=1,e-1):Math.floor(n*e),i=t[r],a=t[r+1],o=r>0?t[r-1]:2*i-a,s=r180||n<-180?n-360*Math.round(n/360):n):sn(isNaN(t)?e:t)}function ln(t){return 1==(t=+t)?hn:function(e,n){return n-e?function(t,e,n){return t=Math.pow(t,n),e=Math.pow(e,n)-t,n=1/n,function(r){return Math.pow(t+r*e,n)}}(e,n,t):sn(isNaN(e)?n:e)}}function hn(t,e){var n=e-t;return n?cn(t,n):sn(isNaN(t)?e:t)}var fn=function t(e){var n=ln(e);function r(t,e){var r=n((t=Ge(t)).r,(e=Ge(e)).r),i=n(t.g,e.g),a=n(t.b,e.b),o=hn(t.opacity,e.opacity);return function(e){return t.r=r(e),t.g=i(e),t.b=a(e),t.opacity=o(e),t+""}}return r.gamma=t,r}(1);function dn(t){return function(e){var n,r,i=e.length,a=new Array(i),o=new Array(i),s=new Array(i);for(n=0;na&&(i=e.slice(a,i),s[o]?s[o]+=i:s[++o]=i),(n=n[0])===(r=r[0])?s[o]?s[o]+=r:s[++o]=r:(s[++o]=null,c.push({i:o,x:_n(n,r)})),a=En.lastIndex;return a=0&&e._call.call(null,t),e=e._next;--Bn}function Hn(){In=(Pn=Rn.now())+jn,Bn=Ln=0;try{Vn()}finally{Bn=0,function(){var t,e,n=Tn,r=1/0;for(;n;)n._call?(r>n._time&&(r=n._time),t=n,n=n._next):(e=n._next,n._next=null,n=t?t._next=e:Tn=e);Cn=t,qn(r)}(),In=0}}function Gn(){var t=Rn.now(),e=t-Pn;e>1e3&&(jn-=e,Pn=t)}function qn(t){Bn||(Ln&&(Ln=clearTimeout(Ln)),t-In>24?(t<1/0&&(Ln=setTimeout(Hn,t-Rn.now()-jn)),Fn&&(Fn=clearInterval(Fn))):(Fn||(Pn=Rn.now(),Fn=setInterval(Gn,1e3)),Bn=1,Yn(Hn)))}$n.prototype=Wn.prototype={constructor:$n,restart:function(t,e,n){if("function"!=typeof t)throw new TypeError("callback is not a function");n=(null==n?zn():+n)+(null==e?0:+e),this._next||Cn===this||(Cn?Cn._next=this:Tn=this,Cn=this),this._call=t,this._time=n,qn()},stop:function(){this._call&&(this._call=null,this._time=1/0,qn())}};var Xn=function(t,e,n){var r=new $n;return e=null==e?0:+e,r.restart((function(n){r.stop(),t(n+e)}),e,n),r},Zn=lt("start","end","cancel","interrupt"),Jn=[],Qn=function(t,e,n,r,i,a){var o=t.__transition;if(o){if(n in o)return}else t.__transition={};!function(t,e,n){var r,i=t.__transition;function a(c){var u,l,h,f;if(1!==n.state)return s();for(u in i)if((f=i[u]).name===n.name){if(3===f.state)return Xn(a);4===f.state?(f.state=6,f.timer.stop(),f.on.call("interrupt",t,t.__data__,f.index,f.group),delete i[u]):+u0)throw new Error("too late; already scheduled");return n}function tr(t,e){var n=er(t,e);if(n.state>3)throw new Error("too late; already running");return n}function er(t,e){var n=t.__transition;if(!n||!(n=n[e]))throw new Error("transition not found");return n}var nr,rr,ir,ar,or=function(t,e){var n,r,i,a=t.__transition,o=!0;if(a){for(i in e=null==e?null:e+"",a)(n=a[i]).name===e?(r=n.state>2&&n.state<5,n.state=6,n.timer.stop(),n.on.call(r?"interrupt":"cancel",t,t.__data__,n.index,n.group),delete a[i]):o=!1;o&&delete t.__transition}},sr=180/Math.PI,cr={translateX:0,translateY:0,rotate:0,skewX:0,scaleX:1,scaleY:1},ur=function(t,e,n,r,i,a){var o,s,c;return(o=Math.sqrt(t*t+e*e))&&(t/=o,e/=o),(c=t*n+e*r)&&(n-=t*c,r-=e*c),(s=Math.sqrt(n*n+r*r))&&(n/=s,r/=s,c/=s),t*r180?e+=360:e-t>180&&(t+=360),a.push({i:n.push(i(n)+"rotate(",null,r)-2,x:_n(t,e)})):e&&n.push(i(n)+"rotate("+e+r)}(a.rotate,o.rotate,s,c),function(t,e,n,a){t!==e?a.push({i:n.push(i(n)+"skewX(",null,r)-2,x:_n(t,e)}):e&&n.push(i(n)+"skewX("+e+r)}(a.skewX,o.skewX,s,c),function(t,e,n,r,a,o){if(t!==n||e!==r){var s=a.push(i(a)+"scale(",null,",",null,")");o.push({i:s-4,x:_n(t,n)},{i:s-2,x:_n(e,r)})}else 1===n&&1===r||a.push(i(a)+"scale("+n+","+r+")")}(a.scaleX,a.scaleY,o.scaleX,o.scaleY,s,c),a=o=null,function(t){for(var e,n=-1,r=c.length;++n=0&&(t=t.slice(0,e)),!t||"start"===t}))}(e)?Kn:tr;return function(){var o=a(this,t),s=o.on;s!==r&&(i=(r=s).copy()).on(e,n),o.on=i}}var Br=_e.prototype.constructor;function Lr(t){return function(){this.style.removeProperty(t)}}function Fr(t,e,n){return function(r){this.style.setProperty(t,e.call(this,r),n)}}function Pr(t,e,n){var r,i;function a(){var a=e.apply(this,arguments);return a!==i&&(r=(i=a)&&Fr(t,a,n)),r}return a._value=e,a}function Ir(t){return function(e){this.textContent=t.call(this,e)}}function jr(t){var e,n;function r(){var r=t.apply(this,arguments);return r!==n&&(e=(n=r)&&Ir(r)),e}return r._value=t,r}var Rr=0;function Yr(t,e,n,r){this._groups=t,this._parents=e,this._name=n,this._id=r}function zr(t){return _e().transition(t)}function Ur(){return++Rr}var $r=_e.prototype;function Wr(t){return t*t*t}function Vr(t){return--t*t*t+1}function Hr(t){return((t*=2)<=1?t*t*t:(t-=2)*t*t+2)/2}Yr.prototype=zr.prototype={constructor:Yr,select:function(t){var e=this._name,n=this._id;"function"!=typeof t&&(t=ft(t));for(var r=this._groups,i=r.length,a=new Array(i),o=0;o1&&n.name===e)return new Yr([[t]],Xr,e,+r);return null},Jr=function(t){return function(){return t}},Qr=function(t,e,n){this.target=t,this.type=e,this.selection=n};function Kr(){ce.stopImmediatePropagation()}var ti=function(){ce.preventDefault(),ce.stopImmediatePropagation()},ei={name:"drag"},ni={name:"space"},ri={name:"handle"},ii={name:"center"};function ai(t){return[+t[0],+t[1]]}function oi(t){return[ai(t[0]),ai(t[1])]}function si(t){return function(e){return Dn(e,ce.touches,t)}}var ci={name:"x",handles:["w","e"].map(yi),input:function(t,e){return null==t?null:[[+t[0],e[0][1]],[+t[1],e[1][1]]]},output:function(t){return t&&[t[0][0],t[1][0]]}},ui={name:"y",handles:["n","s"].map(yi),input:function(t,e){return null==t?null:[[e[0][0],+t[0]],[e[1][0],+t[1]]]},output:function(t){return t&&[t[0][1],t[1][1]]}},li={name:"xy",handles:["n","w","e","s","nw","ne","sw","se"].map(yi),input:function(t){return null==t?null:oi(t)},output:function(t){return t}},hi={overlay:"crosshair",selection:"move",n:"ns-resize",e:"ew-resize",s:"ns-resize",w:"ew-resize",nw:"nwse-resize",ne:"nesw-resize",se:"nwse-resize",sw:"nesw-resize"},fi={e:"w",w:"e",nw:"ne",ne:"nw",se:"sw",sw:"se"},di={n:"s",s:"n",nw:"sw",ne:"se",se:"ne",sw:"nw"},pi={overlay:1,selection:1,n:null,e:1,s:null,w:-1,nw:-1,ne:1,se:1,sw:-1},gi={overlay:1,selection:1,n:-1,e:null,s:1,w:null,nw:-1,ne:-1,se:1,sw:1};function yi(t){return{type:t}}function vi(){return!ce.ctrlKey&&!ce.button}function mi(){var t=this.ownerSVGElement||this;return t.hasAttribute("viewBox")?[[(t=t.viewBox.baseVal).x,t.y],[t.x+t.width,t.y+t.height]]:[[0,0],[t.width.baseVal.value,t.height.baseVal.value]]}function bi(){return navigator.maxTouchPoints||"ontouchstart"in this}function xi(t){for(;!t.__brush;)if(!(t=t.parentNode))return;return t.__brush}function _i(t){return t[0][0]===t[1][0]||t[0][1]===t[1][1]}function ki(t){var e=t.__brush;return e?e.dim.output(e.selection):null}function wi(){return Ci(ci)}function Ei(){return Ci(ui)}var Ti=function(){return Ci(li)};function Ci(t){var e,n=mi,r=vi,i=bi,a=!0,o=lt("start","brush","end"),s=6;function c(e){var n=e.property("__brush",g).selectAll(".overlay").data([yi("overlay")]);n.enter().append("rect").attr("class","overlay").attr("pointer-events","all").attr("cursor",hi.overlay).merge(n).each((function(){var t=xi(this).extent;ke(this).attr("x",t[0][0]).attr("y",t[0][1]).attr("width",t[1][0]-t[0][0]).attr("height",t[1][1]-t[0][1])})),e.selectAll(".selection").data([yi("selection")]).enter().append("rect").attr("class","selection").attr("cursor",hi.selection).attr("fill","#777").attr("fill-opacity",.3).attr("stroke","#fff").attr("shape-rendering","crispEdges");var r=e.selectAll(".handle").data(t.handles,(function(t){return t.type}));r.exit().remove(),r.enter().append("rect").attr("class",(function(t){return"handle handle--"+t.type})).attr("cursor",(function(t){return hi[t.type]})),e.each(u).attr("fill","none").attr("pointer-events","all").on("mousedown.brush",f).filter(i).on("touchstart.brush",f).on("touchmove.brush",d).on("touchend.brush touchcancel.brush",p).style("touch-action","none").style("-webkit-tap-highlight-color","rgba(0,0,0,0)")}function u(){var t=ke(this),e=xi(this).selection;e?(t.selectAll(".selection").style("display",null).attr("x",e[0][0]).attr("y",e[0][1]).attr("width",e[1][0]-e[0][0]).attr("height",e[1][1]-e[0][1]),t.selectAll(".handle").style("display",null).attr("x",(function(t){return"e"===t.type[t.type.length-1]?e[1][0]-s/2:e[0][0]-s/2})).attr("y",(function(t){return"s"===t.type[0]?e[1][1]-s/2:e[0][1]-s/2})).attr("width",(function(t){return"n"===t.type||"s"===t.type?e[1][0]-e[0][0]+s:s})).attr("height",(function(t){return"e"===t.type||"w"===t.type?e[1][1]-e[0][1]+s:s}))):t.selectAll(".selection,.handle").style("display","none").attr("x",null).attr("y",null).attr("width",null).attr("height",null)}function l(t,e,n){return!n&&t.__brush.emitter||new h(t,e)}function h(t,e){this.that=t,this.args=e,this.state=t.__brush,this.active=0}function f(){if((!e||ce.touches)&&r.apply(this,arguments)){var n,i,o,s,c,h,f,d,p,g,y,v=this,m=ce.target.__data__.type,b="selection"===(a&&ce.metaKey?m="overlay":m)?ei:a&&ce.altKey?ii:ri,x=t===ui?null:pi[m],_=t===ci?null:gi[m],k=xi(v),w=k.extent,E=k.selection,T=w[0][0],C=w[0][1],S=w[1][0],A=w[1][1],M=0,O=0,D=x&&_&&a&&ce.shiftKey,N=ce.touches?si(ce.changedTouches[0].identifier):Nn,B=N(v),L=B,F=l(v,arguments,!0).beforestart();"overlay"===m?(E&&(p=!0),k.selection=E=[[n=t===ui?T:B[0],o=t===ci?C:B[1]],[c=t===ui?S:n,f=t===ci?A:o]]):(n=E[0][0],o=E[0][1],c=E[1][0],f=E[1][1]),i=n,s=o,h=c,d=f;var P=ke(v).attr("pointer-events","none"),I=P.selectAll(".overlay").attr("cursor",hi[m]);if(ce.touches)F.moved=R,F.ended=z;else{var j=ke(ce.view).on("mousemove.brush",R,!0).on("mouseup.brush",z,!0);a&&j.on("keydown.brush",U,!0).on("keyup.brush",$,!0),Te(ce.view)}Kr(),or(v),u.call(v),F.start()}function R(){var t=N(v);!D||g||y||(Math.abs(t[0]-L[0])>Math.abs(t[1]-L[1])?y=!0:g=!0),L=t,p=!0,ti(),Y()}function Y(){var t;switch(M=L[0]-B[0],O=L[1]-B[1],b){case ni:case ei:x&&(M=Math.max(T-n,Math.min(S-c,M)),i=n+M,h=c+M),_&&(O=Math.max(C-o,Math.min(A-f,O)),s=o+O,d=f+O);break;case ri:x<0?(M=Math.max(T-n,Math.min(S-n,M)),i=n+M,h=c):x>0&&(M=Math.max(T-c,Math.min(S-c,M)),i=n,h=c+M),_<0?(O=Math.max(C-o,Math.min(A-o,O)),s=o+O,d=f):_>0&&(O=Math.max(C-f,Math.min(A-f,O)),s=o,d=f+O);break;case ii:x&&(i=Math.max(T,Math.min(S,n-M*x)),h=Math.max(T,Math.min(S,c+M*x))),_&&(s=Math.max(C,Math.min(A,o-O*_)),d=Math.max(C,Math.min(A,f+O*_)))}h0&&(n=i-M),_<0?f=d-O:_>0&&(o=s-O),b=ni,I.attr("cursor",hi.selection),Y());break;default:return}ti()}function $(){switch(ce.keyCode){case 16:D&&(g=y=D=!1,Y());break;case 18:b===ii&&(x<0?c=h:x>0&&(n=i),_<0?f=d:_>0&&(o=s),b=ri,Y());break;case 32:b===ni&&(ce.altKey?(x&&(c=h-M*x,n=i+M*x),_&&(f=d-O*_,o=s+O*_),b=ii):(x<0?c=h:x>0&&(n=i),_<0?f=d:_>0&&(o=s),b=ri),I.attr("cursor",hi[m]),Y());break;default:return}ti()}}function d(){l(this,arguments).moved()}function p(){l(this,arguments).ended()}function g(){var e=this.__brush||{selection:null};return e.extent=oi(n.apply(this,arguments)),e.dim=t,e}return c.move=function(e,n){e.selection?e.on("start.brush",(function(){l(this,arguments).beforestart().start()})).on("interrupt.brush end.brush",(function(){l(this,arguments).end()})).tween("brush",(function(){var e=this,r=e.__brush,i=l(e,arguments),a=r.selection,o=t.input("function"==typeof n?n.apply(this,arguments):n,r.extent),s=An(a,o);function c(t){r.selection=1===t&&null===o?null:s(t),u.call(e),i.brush()}return null!==a&&null!==o?c:c(1)})):e.each((function(){var e=this,r=arguments,i=e.__brush,a=t.input("function"==typeof n?n.apply(e,r):n,i.extent),o=l(e,r).beforestart();or(e),i.selection=null===a?null:a,u.call(e),o.start().brush().end()}))},c.clear=function(t){c.move(t,null)},h.prototype={beforestart:function(){return 1==++this.active&&(this.state.emitter=this,this.starting=!0),this},start:function(){return this.starting?(this.starting=!1,this.emit("start")):this.emit("brush"),this},brush:function(){return this.emit("brush"),this},end:function(){return 0==--this.active&&(delete this.state.emitter,this.emit("end")),this},emit:function(e){pe(new Qr(c,e,t.output(this.state.selection)),o.apply,o,[e,this.that,this.args])}},c.extent=function(t){return arguments.length?(n="function"==typeof t?t:Jr(oi(t)),c):n},c.filter=function(t){return arguments.length?(r="function"==typeof t?t:Jr(!!t),c):r},c.touchable=function(t){return arguments.length?(i="function"==typeof t?t:Jr(!!t),c):i},c.handleSize=function(t){return arguments.length?(s=+t,c):s},c.keyModifiers=function(t){return arguments.length?(a=!!t,c):a},c.on=function(){var t=o.on.apply(o,arguments);return t===o?c:t},c}var Si=Math.cos,Ai=Math.sin,Mi=Math.PI,Oi=Mi/2,Di=2*Mi,Ni=Math.max;function Bi(t){return function(e,n){return t(e.source.value+e.target.value,n.source.value+n.target.value)}}var Li=function(){var t=0,e=null,n=null,r=null;function i(i){var a,o,s,c,u,l,h=i.length,f=[],d=k(h),p=[],g=[],y=g.groups=new Array(h),v=new Array(h*h);for(a=0,u=-1;++u1e-6)if(Math.abs(l*s-c*u)>1e-6&&i){var f=n-a,d=r-o,p=s*s+c*c,g=f*f+d*d,y=Math.sqrt(p),v=Math.sqrt(h),m=i*Math.tan((Ii-Math.acos((p+h-g)/(2*y*v)))/2),b=m/v,x=m/y;Math.abs(b-1)>1e-6&&(this._+="L"+(t+b*u)+","+(e+b*l)),this._+="A"+i+","+i+",0,0,"+ +(l*f>u*d)+","+(this._x1=t+x*s)+","+(this._y1=e+x*c)}else this._+="L"+(this._x1=t)+","+(this._y1=e);else;},arc:function(t,e,n,r,i,a){t=+t,e=+e,a=!!a;var o=(n=+n)*Math.cos(r),s=n*Math.sin(r),c=t+o,u=e+s,l=1^a,h=a?r-i:i-r;if(n<0)throw new Error("negative radius: "+n);null===this._x1?this._+="M"+c+","+u:(Math.abs(this._x1-c)>1e-6||Math.abs(this._y1-u)>1e-6)&&(this._+="L"+c+","+u),n&&(h<0&&(h=h%ji+ji),h>Ri?this._+="A"+n+","+n+",0,1,"+l+","+(t-o)+","+(e-s)+"A"+n+","+n+",0,1,"+l+","+(this._x1=c)+","+(this._y1=u):h>1e-6&&(this._+="A"+n+","+n+",0,"+ +(h>=Ii)+","+l+","+(this._x1=t+n*Math.cos(i))+","+(this._y1=e+n*Math.sin(i))))},rect:function(t,e,n,r){this._+="M"+(this._x0=this._x1=+t)+","+(this._y0=this._y1=+e)+"h"+ +n+"v"+ +r+"h"+-n+"Z"},toString:function(){return this._}};var Ui=zi;function $i(t){return t.source}function Wi(t){return t.target}function Vi(t){return t.radius}function Hi(t){return t.startAngle}function Gi(t){return t.endAngle}var qi=function(){var t=$i,e=Wi,n=Vi,r=Hi,i=Gi,a=null;function o(){var o,s=Fi.call(arguments),c=t.apply(this,s),u=e.apply(this,s),l=+n.apply(this,(s[0]=c,s)),h=r.apply(this,s)-Oi,f=i.apply(this,s)-Oi,d=l*Si(h),p=l*Ai(h),g=+n.apply(this,(s[0]=u,s)),y=r.apply(this,s)-Oi,v=i.apply(this,s)-Oi;if(a||(a=o=Ui()),a.moveTo(d,p),a.arc(0,0,l,h,f),h===y&&f===v||(a.quadraticCurveTo(0,0,g*Si(y),g*Ai(y)),a.arc(0,0,g,y,v)),a.quadraticCurveTo(0,0,d,p),a.closePath(),o)return a=null,o+""||null}return o.radius=function(t){return arguments.length?(n="function"==typeof t?t:Pi(+t),o):n},o.startAngle=function(t){return arguments.length?(r="function"==typeof t?t:Pi(+t),o):r},o.endAngle=function(t){return arguments.length?(i="function"==typeof t?t:Pi(+t),o):i},o.source=function(e){return arguments.length?(t=e,o):t},o.target=function(t){return arguments.length?(e=t,o):e},o.context=function(t){return arguments.length?(a=null==t?null:t,o):a},o};function Xi(){}function Zi(t,e){var n=new Xi;if(t instanceof Xi)t.each((function(t,e){n.set(e,t)}));else if(Array.isArray(t)){var r,i=-1,a=t.length;if(null==e)for(;++i=r.length)return null!=t&&n.sort(t),null!=e?e(n):n;for(var c,u,l,h=-1,f=n.length,d=r[i++],p=Ji(),g=o();++hr.length)return n;var o,s=i[a-1];return null!=e&&a>=r.length?o=n.entries():(o=[],n.each((function(e,n){o.push({key:n,values:t(e,a)})}))),null!=s?o.sort((function(t,e){return s(t.key,e.key)})):o}(a(t,0,ea,na),0)},key:function(t){return r.push(t),n},sortKeys:function(t){return i[r.length-1]=t,n},sortValues:function(e){return t=e,n},rollup:function(t){return e=t,n}}};function Ki(){return{}}function ta(t,e,n){t[e]=n}function ea(){return Ji()}function na(t,e,n){t.set(e,n)}function ra(){}var ia=Ji.prototype;function aa(t,e){var n=new ra;if(t instanceof ra)t.each((function(t){n.add(t)}));else if(t){var r=-1,i=t.length;if(null==e)for(;++r6/29*(6/29)*(6/29)?Math.pow(t,1/3):t/(6/29*3*(6/29))+4/29}function va(t){return t>6/29?t*t*t:6/29*3*(6/29)*(t-4/29)}function ma(t){return 255*(t<=.0031308?12.92*t:1.055*Math.pow(t,1/2.4)-.055)}function ba(t){return(t/=255)<=.04045?t/12.92:Math.pow((t+.055)/1.055,2.4)}function xa(t){if(t instanceof wa)return new wa(t.h,t.c,t.l,t.opacity);if(t instanceof ga||(t=fa(t)),0===t.a&&0===t.b)return new wa(NaN,0r!=d>r&&n<(f-u)*(r-l)/(d-l)+u&&(i=-i)}return i}function Ia(t,e,n){var r,i,a,o;return function(t,e,n){return(e[0]-t[0])*(n[1]-t[1])==(n[0]-t[0])*(e[1]-t[1])}(t,e,n)&&(i=t[r=+(t[0]===e[0])],a=n[r],o=e[r],i<=a&&a<=o||o<=a&&a<=i)}var ja=function(){},Ra=[[],[[[1,1.5],[.5,1]]],[[[1.5,1],[1,1.5]]],[[[1.5,1],[.5,1]]],[[[1,.5],[1.5,1]]],[[[1,1.5],[.5,1]],[[1,.5],[1.5,1]]],[[[1,.5],[1,1.5]]],[[[1,.5],[.5,1]]],[[[.5,1],[1,.5]]],[[[1,1.5],[1,.5]]],[[[.5,1],[1,.5]],[[1.5,1],[1,1.5]]],[[[1.5,1],[1,.5]]],[[[.5,1],[1.5,1]]],[[[1,1.5],[1.5,1]]],[[[.5,1],[1,1.5]]],[]],Ya=function(){var t=1,e=1,n=M,r=s;function i(t){var e=n(t);if(Array.isArray(e))e=e.slice().sort(Ba);else{var r=y(t),i=r[0],o=r[1];e=A(i,o,e),e=k(Math.floor(i/e)*e,Math.floor(o/e)*e,e)}return e.map((function(e){return a(t,e)}))}function a(n,i){var a=[],s=[];return function(n,r,i){var a,s,c,u,l,h,f=new Array,d=new Array;a=s=-1,u=n[0]>=r,Ra[u<<1].forEach(p);for(;++a=r,Ra[c|u<<1].forEach(p);Ra[u<<0].forEach(p);for(;++s=r,l=n[s*t]>=r,Ra[u<<1|l<<2].forEach(p);++a=r,h=l,l=n[s*t+a+1]>=r,Ra[c|u<<1|l<<2|h<<3].forEach(p);Ra[u|l<<3].forEach(p)}a=-1,l=n[s*t]>=r,Ra[l<<2].forEach(p);for(;++a=r,Ra[l<<2|h<<3].forEach(p);function p(t){var e,n,r=[t[0][0]+a,t[0][1]+s],c=[t[1][0]+a,t[1][1]+s],u=o(r),l=o(c);(e=d[u])?(n=f[l])?(delete d[e.end],delete f[n.start],e===n?(e.ring.push(c),i(e.ring)):f[e.start]=d[n.end]={start:e.start,end:n.end,ring:e.ring.concat(n.ring)}):(delete d[e.end],e.ring.push(c),d[e.end=l]=e):(e=f[l])?(n=d[u])?(delete f[e.start],delete d[n.end],e===n?(e.ring.push(c),i(e.ring)):f[n.start]=d[e.end]={start:n.start,end:e.end,ring:n.ring.concat(e.ring)}):(delete f[e.start],e.ring.unshift(r),f[e.start=u]=e):f[u]=d[l]={start:u,end:l,ring:[r,c]}}Ra[l<<3].forEach(p)}(n,i,(function(t){r(t,n,i),function(t){for(var e=0,n=t.length,r=t[n-1][1]*t[0][0]-t[n-1][0]*t[0][1];++e0?a.push([t]):s.push(t)})),s.forEach((function(t){for(var e,n=0,r=a.length;n0&&o0&&s0&&a>0))throw new Error("invalid size");return t=r,e=a,i},i.thresholds=function(t){return arguments.length?(n="function"==typeof t?t:Array.isArray(t)?La(Na.call(t)):La(t),i):n},i.smooth=function(t){return arguments.length?(r=t?s:ja,i):r===s},i};function za(t,e,n){for(var r=t.width,i=t.height,a=1+(n<<1),o=0;o=n&&(s>=a&&(c-=t.data[s-a+o*r]),e.data[s-n+o*r]=c/Math.min(s+1,r-1+a-s,a))}function Ua(t,e,n){for(var r=t.width,i=t.height,a=1+(n<<1),o=0;o=n&&(s>=a&&(c-=t.data[o+(s-a)*r]),e.data[o+(s-n)*r]=c/Math.min(s+1,i-1+a-s,a))}function $a(t){return t[0]}function Wa(t){return t[1]}function Va(){return 1}var Ha=function(){var t=$a,e=Wa,n=Va,r=960,i=500,a=20,o=2,s=3*a,c=r+2*s>>o,u=i+2*s>>o,l=La(20);function h(r){var i=new Float32Array(c*u),h=new Float32Array(c*u);r.forEach((function(r,a,l){var h=+t(r,a,l)+s>>o,f=+e(r,a,l)+s>>o,d=+n(r,a,l);h>=0&&h=0&&f>o),Ua({width:c,height:u,data:h},{width:c,height:u,data:i},a>>o),za({width:c,height:u,data:i},{width:c,height:u,data:h},a>>o),Ua({width:c,height:u,data:h},{width:c,height:u,data:i},a>>o),za({width:c,height:u,data:i},{width:c,height:u,data:h},a>>o),Ua({width:c,height:u,data:h},{width:c,height:u,data:i},a>>o);var d=l(i);if(!Array.isArray(d)){var p=L(i);d=A(0,p,d),(d=k(0,Math.floor(p/d)*d,d)).shift()}return Ya().thresholds(d).size([c,u])(i).map(f)}function f(t){return t.value*=Math.pow(2,-2*o),t.coordinates.forEach(d),t}function d(t){t.forEach(p)}function p(t){t.forEach(g)}function g(t){t[0]=t[0]*Math.pow(2,o)-s,t[1]=t[1]*Math.pow(2,o)-s}function y(){return c=r+2*(s=3*a)>>o,u=i+2*s>>o,h}return h.x=function(e){return arguments.length?(t="function"==typeof e?e:La(+e),h):t},h.y=function(t){return arguments.length?(e="function"==typeof t?t:La(+t),h):e},h.weight=function(t){return arguments.length?(n="function"==typeof t?t:La(+t),h):n},h.size=function(t){if(!arguments.length)return[r,i];var e=Math.ceil(t[0]),n=Math.ceil(t[1]);if(!(e>=0||e>=0))throw new Error("invalid size");return r=e,i=n,y()},h.cellSize=function(t){if(!arguments.length)return 1<=1))throw new Error("invalid cell size");return o=Math.floor(Math.log(t)/Math.LN2),y()},h.thresholds=function(t){return arguments.length?(l="function"==typeof t?t:Array.isArray(t)?La(Na.call(t)):La(t),h):l},h.bandwidth=function(t){if(!arguments.length)return Math.sqrt(a*(a+1));if(!((t=+t)>=0))throw new Error("invalid bandwidth");return a=Math.round((Math.sqrt(4*t*t+1)-1)/2),y()},h},Ga=function(t){return function(){return t}};function qa(t,e,n,r,i,a,o,s,c,u){this.target=t,this.type=e,this.subject=n,this.identifier=r,this.active=i,this.x=a,this.y=o,this.dx=s,this.dy=c,this._=u}function Xa(){return!ce.ctrlKey&&!ce.button}function Za(){return this.parentNode}function Ja(t){return null==t?{x:ce.x,y:ce.y}:t}function Qa(){return navigator.maxTouchPoints||"ontouchstart"in this}qa.prototype.on=function(){var t=this._.on.apply(this._,arguments);return t===this._?this:t};var Ka=function(){var t,e,n,r,i=Xa,a=Za,o=Ja,s=Qa,c={},u=lt("start","drag","end"),l=0,h=0;function f(t){t.on("mousedown.drag",d).filter(s).on("touchstart.drag",y).on("touchmove.drag",v).on("touchend.drag touchcancel.drag",m).style("touch-action","none").style("-webkit-tap-highlight-color","rgba(0,0,0,0)")}function d(){if(!r&&i.apply(this,arguments)){var o=b("mouse",a.apply(this,arguments),Nn,this,arguments);o&&(ke(ce.view).on("mousemove.drag",p,!0).on("mouseup.drag",g,!0),Te(ce.view),we(),n=!1,t=ce.clientX,e=ce.clientY,o("start"))}}function p(){if(Ee(),!n){var r=ce.clientX-t,i=ce.clientY-e;n=r*r+i*i>h}c.mouse("drag")}function g(){ke(ce.view).on("mousemove.drag mouseup.drag",null),Ce(ce.view,n),Ee(),c.mouse("end")}function y(){if(i.apply(this,arguments)){var t,e,n=ce.changedTouches,r=a.apply(this,arguments),o=n.length;for(t=0;t9999?"+"+io(e,6):io(e,4))+"-"+io(t.getUTCMonth()+1,2)+"-"+io(t.getUTCDate(),2)+(a?"T"+io(n,2)+":"+io(r,2)+":"+io(i,2)+"."+io(a,3)+"Z":i?"T"+io(n,2)+":"+io(r,2)+":"+io(i,2)+"Z":r||n?"T"+io(n,2)+":"+io(r,2)+"Z":"")}var oo=function(t){var e=new RegExp('["'+t+"\n\r]"),n=t.charCodeAt(0);function r(t,e){var r,i=[],a=t.length,o=0,s=0,c=a<=0,u=!1;function l(){if(c)return eo;if(u)return u=!1,to;var e,r,i=o;if(34===t.charCodeAt(i)){for(;o++=a?c=!0:10===(r=t.charCodeAt(o++))?u=!0:13===r&&(u=!0,10===t.charCodeAt(o)&&++o),t.slice(i+1,e-1).replace(/""/g,'"')}for(;o=(a=(g+v)/2))?g=a:v=a,(l=n>=(o=(y+m)/2))?y=o:m=o,i=d,!(d=d[h=l<<1|u]))return i[h]=p,t;if(s=+t._x.call(null,d.data),c=+t._y.call(null,d.data),e===s&&n===c)return p.next=d,i?i[h]=p:t._root=p,t;do{i=i?i[h]=new Array(4):t._root=new Array(4),(u=e>=(a=(g+v)/2))?g=a:v=a,(l=n>=(o=(y+m)/2))?y=o:m=o}while((h=l<<1|u)==(f=(c>=o)<<1|s>=a));return i[f]=d,i[h]=p,t}var _s=function(t,e,n,r,i){this.node=t,this.x0=e,this.y0=n,this.x1=r,this.y1=i};function ks(t){return t[0]}function ws(t){return t[1]}function Es(t,e,n){var r=new Ts(null==e?ks:e,null==n?ws:n,NaN,NaN,NaN,NaN);return null==t?r:r.addAll(t)}function Ts(t,e,n,r,i,a){this._x=t,this._y=e,this._x0=n,this._y0=r,this._x1=i,this._y1=a,this._root=void 0}function Cs(t){for(var e={data:t.data},n=e;t=t.next;)n=n.next={data:t.data};return e}var Ss=Es.prototype=Ts.prototype;function As(t){return t.x+t.vx}function Ms(t){return t.y+t.vy}Ss.copy=function(){var t,e,n=new Ts(this._x,this._y,this._x0,this._y0,this._x1,this._y1),r=this._root;if(!r)return n;if(!r.length)return n._root=Cs(r),n;for(t=[{source:r,target:n._root=new Array(4)}];r=t.pop();)for(var i=0;i<4;++i)(e=r.source[i])&&(e.length?t.push({source:e,target:r.target[i]=new Array(4)}):r.target[i]=Cs(e));return n},Ss.add=function(t){var e=+this._x.call(null,t),n=+this._y.call(null,t);return xs(this.cover(e,n),e,n,t)},Ss.addAll=function(t){var e,n,r,i,a=t.length,o=new Array(a),s=new Array(a),c=1/0,u=1/0,l=-1/0,h=-1/0;for(n=0;nl&&(l=r),ih&&(h=i));if(c>l||u>h)return this;for(this.cover(c,u).cover(l,h),n=0;nt||t>=i||r>e||e>=a;)switch(s=(ef||(a=c.y0)>d||(o=c.x1)=v)<<1|t>=y)&&(c=p[p.length-1],p[p.length-1]=p[p.length-1-u],p[p.length-1-u]=c)}else{var m=t-+this._x.call(null,g.data),b=e-+this._y.call(null,g.data),x=m*m+b*b;if(x=(s=(p+y)/2))?p=s:y=s,(l=o>=(c=(g+v)/2))?g=c:v=c,e=d,!(d=d[h=l<<1|u]))return this;if(!d.length)break;(e[h+1&3]||e[h+2&3]||e[h+3&3])&&(n=e,f=h)}for(;d.data!==t;)if(r=d,!(d=d.next))return this;return(i=d.next)&&delete d.next,r?(i?r.next=i:delete r.next,this):e?(i?e[h]=i:delete e[h],(d=e[0]||e[1]||e[2]||e[3])&&d===(e[3]||e[2]||e[1]||e[0])&&!d.length&&(n?n[f]=d:this._root=d),this):(this._root=i,this)},Ss.removeAll=function(t){for(var e=0,n=t.length;ec+d||iu+d||as.index){var p=c-o.x-o.vx,g=u-o.y-o.vy,y=p*p+g*g;yt.r&&(t.r=t[e].r)}function s(){if(e){var r,i,a=e.length;for(n=new Array(a),r=0;r1?(null==n?s.remove(t):s.set(t,d(n)),e):s.get(t)},find:function(e,n,r){var i,a,o,s,c,u=0,l=t.length;for(null==r?r=1/0:r*=r,u=0;u1?(u.on(t,n),e):u.on(t)}}},js=function(){var t,e,n,r,i=ms(-30),a=1,o=1/0,s=.81;function c(r){var i,a=t.length,o=Es(t,Ls,Fs).visitAfter(l);for(n=r,i=0;i=o)){(t.data!==e||t.next)&&(0===l&&(d+=(l=bs())*l),0===h&&(d+=(h=bs())*h),d1?r[0]+r.slice(2):r,+t.slice(n+1)]},$s=function(t){return(t=Us(Math.abs(t)))?t[1]:NaN},Ws=/^(?:(.)?([<>=^]))?([+\-( ])?([$#])?(0)?(\d+)?(,)?(\.\d+)?(~)?([a-z%])?$/i;function Vs(t){if(!(e=Ws.exec(t)))throw new Error("invalid format: "+t);var e;return new Hs({fill:e[1],align:e[2],sign:e[3],symbol:e[4],zero:e[5],width:e[6],comma:e[7],precision:e[8]&&e[8].slice(1),trim:e[9],type:e[10]})}function Hs(t){this.fill=void 0===t.fill?" ":t.fill+"",this.align=void 0===t.align?">":t.align+"",this.sign=void 0===t.sign?"-":t.sign+"",this.symbol=void 0===t.symbol?"":t.symbol+"",this.zero=!!t.zero,this.width=void 0===t.width?void 0:+t.width,this.comma=!!t.comma,this.precision=void 0===t.precision?void 0:+t.precision,this.trim=!!t.trim,this.type=void 0===t.type?"":t.type+""}Vs.prototype=Hs.prototype,Hs.prototype.toString=function(){return this.fill+this.align+this.sign+this.symbol+(this.zero?"0":"")+(void 0===this.width?"":Math.max(1,0|this.width))+(this.comma?",":"")+(void 0===this.precision?"":"."+Math.max(0,0|this.precision))+(this.trim?"~":"")+this.type};var Gs,qs,Xs,Zs,Js=function(t,e){var n=Us(t,e);if(!n)return t+"";var r=n[0],i=n[1];return i<0?"0."+new Array(-i).join("0")+r:r.length>i+1?r.slice(0,i+1)+"."+r.slice(i+1):r+new Array(i-r.length+2).join("0")},Qs={"%":function(t,e){return(100*t).toFixed(e)},b:function(t){return Math.round(t).toString(2)},c:function(t){return t+""},d:function(t){return Math.round(t).toString(10)},e:function(t,e){return t.toExponential(e)},f:function(t,e){return t.toFixed(e)},g:function(t,e){return t.toPrecision(e)},o:function(t){return Math.round(t).toString(8)},p:function(t,e){return Js(100*t,e)},r:Js,s:function(t,e){var n=Us(t,e);if(!n)return t+"";var r=n[0],i=n[1],a=i-(Gs=3*Math.max(-8,Math.min(8,Math.floor(i/3))))+1,o=r.length;return a===o?r:a>o?r+new Array(a-o+1).join("0"):a>0?r.slice(0,a)+"."+r.slice(a):"0."+new Array(1-a).join("0")+Us(t,Math.max(0,e+a-1))[0]},X:function(t){return Math.round(t).toString(16).toUpperCase()},x:function(t){return Math.round(t).toString(16)}},Ks=function(t){return t},tc=Array.prototype.map,ec=["y","z","a","f","p","n","µ","m","","k","M","G","T","P","E","Z","Y"],nc=function(t){var e,n,r=void 0===t.grouping||void 0===t.thousands?Ks:(e=tc.call(t.grouping,Number),n=t.thousands+"",function(t,r){for(var i=t.length,a=[],o=0,s=e[0],c=0;i>0&&s>0&&(c+s+1>r&&(s=Math.max(1,r-c)),a.push(t.substring(i-=s,i+s)),!((c+=s+1)>r));)s=e[o=(o+1)%e.length];return a.reverse().join(n)}),i=void 0===t.currency?"":t.currency[0]+"",a=void 0===t.currency?"":t.currency[1]+"",o=void 0===t.decimal?".":t.decimal+"",s=void 0===t.numerals?Ks:function(t){return function(e){return e.replace(/[0-9]/g,(function(e){return t[+e]}))}}(tc.call(t.numerals,String)),c=void 0===t.percent?"%":t.percent+"",u=void 0===t.minus?"-":t.minus+"",l=void 0===t.nan?"NaN":t.nan+"";function h(t){var e=(t=Vs(t)).fill,n=t.align,h=t.sign,f=t.symbol,d=t.zero,p=t.width,g=t.comma,y=t.precision,v=t.trim,m=t.type;"n"===m?(g=!0,m="g"):Qs[m]||(void 0===y&&(y=12),v=!0,m="g"),(d||"0"===e&&"="===n)&&(d=!0,e="0",n="=");var b="$"===f?i:"#"===f&&/[boxX]/.test(m)?"0"+m.toLowerCase():"",x="$"===f?a:/[%p]/.test(m)?c:"",_=Qs[m],k=/[defgprs%]/.test(m);function w(t){var i,a,c,f=b,w=x;if("c"===m)w=_(t)+w,t="";else{var E=(t=+t)<0;if(t=isNaN(t)?l:_(Math.abs(t),y),v&&(t=function(t){t:for(var e,n=t.length,r=1,i=-1;r0&&(i=0)}return i>0?t.slice(0,i)+t.slice(e+1):t}(t)),E&&0==+t&&(E=!1),f=(E?"("===h?h:u:"-"===h||"("===h?"":h)+f,w=("s"===m?ec[8+Gs/3]:"")+w+(E&&"("===h?")":""),k)for(i=-1,a=t.length;++i(c=t.charCodeAt(i))||c>57){w=(46===c?o+t.slice(i+1):t.slice(i))+w,t=t.slice(0,i);break}}g&&!d&&(t=r(t,1/0));var T=f.length+t.length+w.length,C=T>1)+f+t+w+C.slice(T);break;default:t=C+f+t+w}return s(t)}return y=void 0===y?6:/[gprs]/.test(m)?Math.max(1,Math.min(21,y)):Math.max(0,Math.min(20,y)),w.toString=function(){return t+""},w}return{format:h,formatPrefix:function(t,e){var n=h(((t=Vs(t)).type="f",t)),r=3*Math.max(-8,Math.min(8,Math.floor($s(e)/3))),i=Math.pow(10,-r),a=ec[8+r/3];return function(t){return n(i*t)+a}}}};function rc(t){return qs=nc(t),Xs=qs.format,Zs=qs.formatPrefix,qs}rc({decimal:".",thousands:",",grouping:[3],currency:["$",""],minus:"-"});var ic=function(t){return Math.max(0,-$s(Math.abs(t)))},ac=function(t,e){return Math.max(0,3*Math.max(-8,Math.min(8,Math.floor($s(e)/3)))-$s(Math.abs(t)))},oc=function(t,e){return t=Math.abs(t),e=Math.abs(e)-t,Math.max(0,$s(e)-$s(t))+1},sc=function(){return new cc};function cc(){this.reset()}cc.prototype={constructor:cc,reset:function(){this.s=this.t=0},add:function(t){lc(uc,t,this.t),lc(this,uc.s,this.s),this.s?this.t+=uc.t:this.s=uc.t},valueOf:function(){return this.s}};var uc=new cc;function lc(t,e,n){var r=t.s=e+n,i=r-e,a=r-i;t.t=e-a+(n-i)}var hc=Math.PI,fc=hc/2,dc=hc/4,pc=2*hc,gc=180/hc,yc=hc/180,vc=Math.abs,mc=Math.atan,bc=Math.atan2,xc=Math.cos,_c=Math.ceil,kc=Math.exp,wc=(Math.floor,Math.log),Ec=Math.pow,Tc=Math.sin,Cc=Math.sign||function(t){return t>0?1:t<0?-1:0},Sc=Math.sqrt,Ac=Math.tan;function Mc(t){return t>1?0:t<-1?hc:Math.acos(t)}function Oc(t){return t>1?fc:t<-1?-fc:Math.asin(t)}function Dc(t){return(t=Tc(t/2))*t}function Nc(){}function Bc(t,e){t&&Fc.hasOwnProperty(t.type)&&Fc[t.type](t,e)}var Lc={Feature:function(t,e){Bc(t.geometry,e)},FeatureCollection:function(t,e){for(var n=t.features,r=-1,i=n.length;++r=0?1:-1,i=r*n,a=xc(e=(e*=yc)/2+dc),o=Tc(e),s=Uc*o,c=zc*a+s*xc(i),u=s*r*Tc(i);Wc.add(bc(u,c)),Yc=t,zc=a,Uc=o}var Jc=function(t){return Vc.reset(),$c(t,Hc),2*Vc};function Qc(t){return[bc(t[1],t[0]),Oc(t[2])]}function Kc(t){var e=t[0],n=t[1],r=xc(n);return[r*xc(e),r*Tc(e),Tc(n)]}function tu(t,e){return t[0]*e[0]+t[1]*e[1]+t[2]*e[2]}function eu(t,e){return[t[1]*e[2]-t[2]*e[1],t[2]*e[0]-t[0]*e[2],t[0]*e[1]-t[1]*e[0]]}function nu(t,e){t[0]+=e[0],t[1]+=e[1],t[2]+=e[2]}function ru(t,e){return[t[0]*e,t[1]*e,t[2]*e]}function iu(t){var e=Sc(t[0]*t[0]+t[1]*t[1]+t[2]*t[2]);t[0]/=e,t[1]/=e,t[2]/=e}var au,ou,su,cu,uu,lu,hu,fu,du,pu,gu=sc(),yu={point:vu,lineStart:bu,lineEnd:xu,polygonStart:function(){yu.point=_u,yu.lineStart=ku,yu.lineEnd=wu,gu.reset(),Hc.polygonStart()},polygonEnd:function(){Hc.polygonEnd(),yu.point=vu,yu.lineStart=bu,yu.lineEnd=xu,Wc<0?(au=-(su=180),ou=-(cu=90)):gu>1e-6?cu=90:gu<-1e-6&&(ou=-90),pu[0]=au,pu[1]=su},sphere:function(){au=-(su=180),ou=-(cu=90)}};function vu(t,e){du.push(pu=[au=t,su=t]),ecu&&(cu=e)}function mu(t,e){var n=Kc([t*yc,e*yc]);if(fu){var r=eu(fu,n),i=eu([r[1],-r[0],0],r);iu(i),i=Qc(i);var a,o=t-uu,s=o>0?1:-1,c=i[0]*gc*s,u=vc(o)>180;u^(s*uucu&&(cu=a):u^(s*uu<(c=(c+360)%360-180)&&ccu&&(cu=e)),u?tEu(au,su)&&(su=t):Eu(t,su)>Eu(au,su)&&(au=t):su>=au?(tsu&&(su=t)):t>uu?Eu(au,t)>Eu(au,su)&&(su=t):Eu(t,su)>Eu(au,su)&&(au=t)}else du.push(pu=[au=t,su=t]);ecu&&(cu=e),fu=n,uu=t}function bu(){yu.point=mu}function xu(){pu[0]=au,pu[1]=su,yu.point=vu,fu=null}function _u(t,e){if(fu){var n=t-uu;gu.add(vc(n)>180?n+(n>0?360:-360):n)}else lu=t,hu=e;Hc.point(t,e),mu(t,e)}function ku(){Hc.lineStart()}function wu(){_u(lu,hu),Hc.lineEnd(),vc(gu)>1e-6&&(au=-(su=180)),pu[0]=au,pu[1]=su,fu=null}function Eu(t,e){return(e-=t)<0?e+360:e}function Tu(t,e){return t[0]-e[0]}function Cu(t,e){return t[0]<=t[1]?t[0]<=e&&e<=t[1]:eEu(r[0],r[1])&&(r[1]=i[1]),Eu(i[0],r[1])>Eu(r[0],r[1])&&(r[0]=i[0])):a.push(r=i);for(o=-1/0,e=0,r=a[n=a.length-1];e<=n;r=i,++e)i=a[e],(s=Eu(r[1],i[0]))>o&&(o=s,au=i[0],su=r[1])}return du=pu=null,au===1/0||ou===1/0?[[NaN,NaN],[NaN,NaN]]:[[au,ou],[su,cu]]},Wu={sphere:Nc,point:Vu,lineStart:Gu,lineEnd:Zu,polygonStart:function(){Wu.lineStart=Ju,Wu.lineEnd=Qu},polygonEnd:function(){Wu.lineStart=Gu,Wu.lineEnd=Zu}};function Vu(t,e){t*=yc;var n=xc(e*=yc);Hu(n*xc(t),n*Tc(t),Tc(e))}function Hu(t,e,n){++Su,Mu+=(t-Mu)/Su,Ou+=(e-Ou)/Su,Du+=(n-Du)/Su}function Gu(){Wu.point=qu}function qu(t,e){t*=yc;var n=xc(e*=yc);Yu=n*xc(t),zu=n*Tc(t),Uu=Tc(e),Wu.point=Xu,Hu(Yu,zu,Uu)}function Xu(t,e){t*=yc;var n=xc(e*=yc),r=n*xc(t),i=n*Tc(t),a=Tc(e),o=bc(Sc((o=zu*a-Uu*i)*o+(o=Uu*r-Yu*a)*o+(o=Yu*i-zu*r)*o),Yu*r+zu*i+Uu*a);Au+=o,Nu+=o*(Yu+(Yu=r)),Bu+=o*(zu+(zu=i)),Lu+=o*(Uu+(Uu=a)),Hu(Yu,zu,Uu)}function Zu(){Wu.point=Vu}function Ju(){Wu.point=Ku}function Qu(){tl(ju,Ru),Wu.point=Vu}function Ku(t,e){ju=t,Ru=e,t*=yc,e*=yc,Wu.point=tl;var n=xc(e);Yu=n*xc(t),zu=n*Tc(t),Uu=Tc(e),Hu(Yu,zu,Uu)}function tl(t,e){t*=yc;var n=xc(e*=yc),r=n*xc(t),i=n*Tc(t),a=Tc(e),o=zu*a-Uu*i,s=Uu*r-Yu*a,c=Yu*i-zu*r,u=Sc(o*o+s*s+c*c),l=Oc(u),h=u&&-l/u;Fu+=h*o,Pu+=h*s,Iu+=h*c,Au+=l,Nu+=l*(Yu+(Yu=r)),Bu+=l*(zu+(zu=i)),Lu+=l*(Uu+(Uu=a)),Hu(Yu,zu,Uu)}var el=function(t){Su=Au=Mu=Ou=Du=Nu=Bu=Lu=Fu=Pu=Iu=0,$c(t,Wu);var e=Fu,n=Pu,r=Iu,i=e*e+n*n+r*r;return i<1e-12&&(e=Nu,n=Bu,r=Lu,Au<1e-6&&(e=Mu,n=Ou,r=Du),(i=e*e+n*n+r*r)<1e-12)?[NaN,NaN]:[bc(n,e)*gc,Oc(r/Sc(i))*gc]},nl=function(t){return function(){return t}},rl=function(t,e){function n(n,r){return n=t(n,r),e(n[0],n[1])}return t.invert&&e.invert&&(n.invert=function(n,r){return(n=e.invert(n,r))&&t.invert(n[0],n[1])}),n};function il(t,e){return[vc(t)>hc?t+Math.round(-t/pc)*pc:t,e]}function al(t,e,n){return(t%=pc)?e||n?rl(sl(t),cl(e,n)):sl(t):e||n?cl(e,n):il}function ol(t){return function(e,n){return[(e+=t)>hc?e-pc:e<-hc?e+pc:e,n]}}function sl(t){var e=ol(t);return e.invert=ol(-t),e}function cl(t,e){var n=xc(t),r=Tc(t),i=xc(e),a=Tc(e);function o(t,e){var o=xc(e),s=xc(t)*o,c=Tc(t)*o,u=Tc(e),l=u*n+s*r;return[bc(c*i-l*a,s*n-u*r),Oc(l*i+c*a)]}return o.invert=function(t,e){var o=xc(e),s=xc(t)*o,c=Tc(t)*o,u=Tc(e),l=u*i-c*a;return[bc(c*i+u*a,s*n+l*r),Oc(l*n-s*r)]},o}il.invert=il;var ul=function(t){function e(e){return(e=t(e[0]*yc,e[1]*yc))[0]*=gc,e[1]*=gc,e}return t=al(t[0]*yc,t[1]*yc,t.length>2?t[2]*yc:0),e.invert=function(e){return(e=t.invert(e[0]*yc,e[1]*yc))[0]*=gc,e[1]*=gc,e},e};function ll(t,e,n,r,i,a){if(n){var o=xc(e),s=Tc(e),c=r*n;null==i?(i=e+r*pc,a=e-c/2):(i=hl(o,i),a=hl(o,a),(r>0?ia)&&(i+=r*pc));for(var u,l=i;r>0?l>a:l1&&e.push(e.pop().concat(e.shift()))},result:function(){var n=e;return e=[],t=null,n}}},pl=function(t,e){return vc(t[0]-e[0])<1e-6&&vc(t[1]-e[1])<1e-6};function gl(t,e,n,r){this.x=t,this.z=e,this.o=n,this.e=r,this.v=!1,this.n=this.p=null}var yl=function(t,e,n,r,i){var a,o,s=[],c=[];if(t.forEach((function(t){if(!((e=t.length-1)<=0)){var e,n,r=t[0],o=t[e];if(pl(r,o)){for(i.lineStart(),a=0;a=0;--a)i.point((l=u[a])[0],l[1]);else r(f.x,f.p.x,-1,i);f=f.p}u=(f=f.o).z,d=!d}while(!f.v);i.lineEnd()}}};function vl(t){if(e=t.length){for(var e,n,r=0,i=t[0];++r=0?1:-1,T=E*w,C=T>hc,S=g*_;if(ml.add(bc(S*E*Tc(T),y*k+S*xc(T))),o+=C?w+E*pc:w,C^d>=n^b>=n){var A=eu(Kc(f),Kc(m));iu(A);var M=eu(a,A);iu(M);var O=(C^w>=0?-1:1)*Oc(M[2]);(r>O||r===O&&(A[0]||A[1]))&&(s+=C^w>=0?1:-1)}}return(o<-1e-6||o<1e-6&&ml<-1e-6)^1&s},_l=function(t,e,n,r){return function(i){var a,o,s,c=e(i),u=dl(),l=e(u),h=!1,f={point:d,lineStart:g,lineEnd:y,polygonStart:function(){f.point=v,f.lineStart=m,f.lineEnd=b,o=[],a=[]},polygonEnd:function(){f.point=d,f.lineStart=g,f.lineEnd=y,o=I(o);var t=xl(a,r);o.length?(h||(i.polygonStart(),h=!0),yl(o,wl,t,n,i)):t&&(h||(i.polygonStart(),h=!0),i.lineStart(),n(null,null,1,i),i.lineEnd()),h&&(i.polygonEnd(),h=!1),o=a=null},sphere:function(){i.polygonStart(),i.lineStart(),n(null,null,1,i),i.lineEnd(),i.polygonEnd()}};function d(e,n){t(e,n)&&i.point(e,n)}function p(t,e){c.point(t,e)}function g(){f.point=p,c.lineStart()}function y(){f.point=d,c.lineEnd()}function v(t,e){s.push([t,e]),l.point(t,e)}function m(){l.lineStart(),s=[]}function b(){v(s[0][0],s[0][1]),l.lineEnd();var t,e,n,r,c=l.clean(),f=u.result(),d=f.length;if(s.pop(),a.push(s),s=null,d)if(1&c){if((e=(n=f[0]).length-1)>0){for(h||(i.polygonStart(),h=!0),i.lineStart(),t=0;t1&&2&c&&f.push(f.pop().concat(f.shift())),o.push(f.filter(kl))}return f}};function kl(t){return t.length>1}function wl(t,e){return((t=t.x)[0]<0?t[1]-fc-1e-6:fc-t[1])-((e=e.x)[0]<0?e[1]-fc-1e-6:fc-e[1])}var El=_l((function(){return!0}),(function(t){var e,n=NaN,r=NaN,i=NaN;return{lineStart:function(){t.lineStart(),e=1},point:function(a,o){var s=a>0?hc:-hc,c=vc(a-n);vc(c-hc)<1e-6?(t.point(n,r=(r+o)/2>0?fc:-fc),t.point(i,r),t.lineEnd(),t.lineStart(),t.point(s,r),t.point(a,r),e=0):i!==s&&c>=hc&&(vc(n-i)<1e-6&&(n-=1e-6*i),vc(a-s)<1e-6&&(a-=1e-6*s),r=function(t,e,n,r){var i,a,o=Tc(t-n);return vc(o)>1e-6?mc((Tc(e)*(a=xc(r))*Tc(n)-Tc(r)*(i=xc(e))*Tc(t))/(i*a*o)):(e+r)/2}(n,r,a,o),t.point(i,r),t.lineEnd(),t.lineStart(),t.point(s,r),e=0),t.point(n=a,r=o),i=s},lineEnd:function(){t.lineEnd(),n=r=NaN},clean:function(){return 2-e}}}),(function(t,e,n,r){var i;if(null==t)i=n*fc,r.point(-hc,i),r.point(0,i),r.point(hc,i),r.point(hc,0),r.point(hc,-i),r.point(0,-i),r.point(-hc,-i),r.point(-hc,0),r.point(-hc,i);else if(vc(t[0]-e[0])>1e-6){var a=t[0]0,i=vc(e)>1e-6;function a(t,n){return xc(t)*xc(n)>e}function o(t,n,r){var i=[1,0,0],a=eu(Kc(t),Kc(n)),o=tu(a,a),s=a[0],c=o-s*s;if(!c)return!r&&t;var u=e*o/c,l=-e*s/c,h=eu(i,a),f=ru(i,u);nu(f,ru(a,l));var d=h,p=tu(f,d),g=tu(d,d),y=p*p-g*(tu(f,f)-1);if(!(y<0)){var v=Sc(y),m=ru(d,(-p-v)/g);if(nu(m,f),m=Qc(m),!r)return m;var b,x=t[0],_=n[0],k=t[1],w=n[1];_0^m[1]<(vc(m[0]-x)<1e-6?k:w):k<=m[1]&&m[1]<=w:E>hc^(x<=m[0]&&m[0]<=_)){var C=ru(d,(-p+v)/g);return nu(C,f),[m,Qc(C)]}}}function s(e,n){var i=r?t:hc-t,a=0;return e<-i?a|=1:e>i&&(a|=2),n<-i?a|=4:n>i&&(a|=8),a}return _l(a,(function(t){var e,n,c,u,l;return{lineStart:function(){u=c=!1,l=1},point:function(h,f){var d,p=[h,f],g=a(h,f),y=r?g?0:s(h,f):g?s(h+(h<0?hc:-hc),f):0;if(!e&&(u=c=g)&&t.lineStart(),g!==c&&(!(d=o(e,p))||pl(e,d)||pl(p,d))&&(p[0]+=1e-6,p[1]+=1e-6,g=a(p[0],p[1])),g!==c)l=0,g?(t.lineStart(),d=o(p,e),t.point(d[0],d[1])):(d=o(e,p),t.point(d[0],d[1]),t.lineEnd()),e=d;else if(i&&e&&r^g){var v;y&n||!(v=o(p,e,!0))||(l=0,r?(t.lineStart(),t.point(v[0][0],v[0][1]),t.point(v[1][0],v[1][1]),t.lineEnd()):(t.point(v[1][0],v[1][1]),t.lineEnd(),t.lineStart(),t.point(v[0][0],v[0][1])))}!g||e&&pl(e,p)||t.point(p[0],p[1]),e=p,c=g,n=y},lineEnd:function(){c&&t.lineEnd(),e=null},clean:function(){return l|(u&&c)<<1}}}),(function(e,r,i,a){ll(a,t,n,i,e,r)}),r?[0,-t]:[-hc,t-hc])};function Cl(t,e,n,r){function i(i,a){return t<=i&&i<=n&&e<=a&&a<=r}function a(i,a,s,u){var l=0,h=0;if(null==i||(l=o(i,s))!==(h=o(a,s))||c(i,a)<0^s>0)do{u.point(0===l||3===l?t:n,l>1?r:e)}while((l=(l+s+4)%4)!==h);else u.point(a[0],a[1])}function o(r,i){return vc(r[0]-t)<1e-6?i>0?0:3:vc(r[0]-n)<1e-6?i>0?2:1:vc(r[1]-e)<1e-6?i>0?1:0:i>0?3:2}function s(t,e){return c(t.x,e.x)}function c(t,e){var n=o(t,1),r=o(e,1);return n!==r?n-r:0===n?e[1]-t[1]:1===n?t[0]-e[0]:2===n?t[1]-e[1]:e[0]-t[0]}return function(o){var c,u,l,h,f,d,p,g,y,v,m,b=o,x=dl(),_={point:k,lineStart:function(){_.point=w,u&&u.push(l=[]);v=!0,y=!1,p=g=NaN},lineEnd:function(){c&&(w(h,f),d&&y&&x.rejoin(),c.push(x.result()));_.point=k,y&&b.lineEnd()},polygonStart:function(){b=x,c=[],u=[],m=!0},polygonEnd:function(){var e=function(){for(var e=0,n=0,i=u.length;nr&&(f-a)*(r-o)>(d-o)*(t-a)&&++e:d<=r&&(f-a)*(r-o)<(d-o)*(t-a)&&--e;return e}(),n=m&&e,i=(c=I(c)).length;(n||i)&&(o.polygonStart(),n&&(o.lineStart(),a(null,null,1,o),o.lineEnd()),i&&yl(c,s,e,a,o),o.polygonEnd());b=o,c=u=l=null}};function k(t,e){i(t,e)&&b.point(t,e)}function w(a,o){var s=i(a,o);if(u&&l.push([a,o]),v)h=a,f=o,d=s,v=!1,s&&(b.lineStart(),b.point(a,o));else if(s&&y)b.point(a,o);else{var c=[p=Math.max(-1e9,Math.min(1e9,p)),g=Math.max(-1e9,Math.min(1e9,g))],x=[a=Math.max(-1e9,Math.min(1e9,a)),o=Math.max(-1e9,Math.min(1e9,o))];!function(t,e,n,r,i,a){var o,s=t[0],c=t[1],u=0,l=1,h=e[0]-s,f=e[1]-c;if(o=n-s,h||!(o>0)){if(o/=h,h<0){if(o0){if(o>l)return;o>u&&(u=o)}if(o=i-s,h||!(o<0)){if(o/=h,h<0){if(o>l)return;o>u&&(u=o)}else if(h>0){if(o0)){if(o/=f,f<0){if(o0){if(o>l)return;o>u&&(u=o)}if(o=a-c,f||!(o<0)){if(o/=f,f<0){if(o>l)return;o>u&&(u=o)}else if(f>0){if(o0&&(t[0]=s+u*h,t[1]=c+u*f),l<1&&(e[0]=s+l*h,e[1]=c+l*f),!0}}}}}(c,x,t,e,n,r)?s&&(b.lineStart(),b.point(a,o),m=!1):(y||(b.lineStart(),b.point(c[0],c[1])),b.point(x[0],x[1]),s||b.lineEnd(),m=!1)}p=a,g=o,y=s}return _}}var Sl,Al,Ml,Ol=function(){var t,e,n,r=0,i=0,a=960,o=500;return n={stream:function(n){return t&&e===n?t:t=Cl(r,i,a,o)(e=n)},extent:function(s){return arguments.length?(r=+s[0][0],i=+s[0][1],a=+s[1][0],o=+s[1][1],t=e=null,n):[[r,i],[a,o]]}}},Dl=sc(),Nl={sphere:Nc,point:Nc,lineStart:function(){Nl.point=Ll,Nl.lineEnd=Bl},lineEnd:Nc,polygonStart:Nc,polygonEnd:Nc};function Bl(){Nl.point=Nl.lineEnd=Nc}function Ll(t,e){Sl=t*=yc,Al=Tc(e*=yc),Ml=xc(e),Nl.point=Fl}function Fl(t,e){t*=yc;var n=Tc(e*=yc),r=xc(e),i=vc(t-Sl),a=xc(i),o=r*Tc(i),s=Ml*n-Al*r*a,c=Al*n+Ml*r*a;Dl.add(bc(Sc(o*o+s*s),c)),Sl=t,Al=n,Ml=r}var Pl=function(t){return Dl.reset(),$c(t,Nl),+Dl},Il=[null,null],jl={type:"LineString",coordinates:Il},Rl=function(t,e){return Il[0]=t,Il[1]=e,Pl(jl)},Yl={Feature:function(t,e){return Ul(t.geometry,e)},FeatureCollection:function(t,e){for(var n=t.features,r=-1,i=n.length;++r0&&(i=Rl(t[a],t[a-1]))>0&&n<=i&&r<=i&&(n+r-i)*(1-Math.pow((n-r)/i,2))<1e-12*i)return!0;n=r}return!1}function Vl(t,e){return!!xl(t.map(Hl),Gl(e))}function Hl(t){return(t=t.map(Gl)).pop(),t}function Gl(t){return[t[0]*yc,t[1]*yc]}var ql=function(t,e){return(t&&Yl.hasOwnProperty(t.type)?Yl[t.type]:Ul)(t,e)};function Xl(t,e,n){var r=k(t,e-1e-6,n).concat(e);return function(t){return r.map((function(e){return[t,e]}))}}function Zl(t,e,n){var r=k(t,e-1e-6,n).concat(e);return function(t){return r.map((function(e){return[e,t]}))}}function Jl(){var t,e,n,r,i,a,o,s,c,u,l,h,f=10,d=f,p=90,g=360,y=2.5;function v(){return{type:"MultiLineString",coordinates:m()}}function m(){return k(_c(r/p)*p,n,p).map(l).concat(k(_c(s/g)*g,o,g).map(h)).concat(k(_c(e/f)*f,t,f).filter((function(t){return vc(t%p)>1e-6})).map(c)).concat(k(_c(a/d)*d,i,d).filter((function(t){return vc(t%g)>1e-6})).map(u))}return v.lines=function(){return m().map((function(t){return{type:"LineString",coordinates:t}}))},v.outline=function(){return{type:"Polygon",coordinates:[l(r).concat(h(o).slice(1),l(n).reverse().slice(1),h(s).reverse().slice(1))]}},v.extent=function(t){return arguments.length?v.extentMajor(t).extentMinor(t):v.extentMinor()},v.extentMajor=function(t){return arguments.length?(r=+t[0][0],n=+t[1][0],s=+t[0][1],o=+t[1][1],r>n&&(t=r,r=n,n=t),s>o&&(t=s,s=o,o=t),v.precision(y)):[[r,s],[n,o]]},v.extentMinor=function(n){return arguments.length?(e=+n[0][0],t=+n[1][0],a=+n[0][1],i=+n[1][1],e>t&&(n=e,e=t,t=n),a>i&&(n=a,a=i,i=n),v.precision(y)):[[e,a],[t,i]]},v.step=function(t){return arguments.length?v.stepMajor(t).stepMinor(t):v.stepMinor()},v.stepMajor=function(t){return arguments.length?(p=+t[0],g=+t[1],v):[p,g]},v.stepMinor=function(t){return arguments.length?(f=+t[0],d=+t[1],v):[f,d]},v.precision=function(f){return arguments.length?(y=+f,c=Xl(a,i,90),u=Zl(e,t,y),l=Xl(s,o,90),h=Zl(r,n,y),v):y},v.extentMajor([[-180,1e-6-90],[180,90-1e-6]]).extentMinor([[-180,-80-1e-6],[180,80+1e-6]])}function Ql(){return Jl()()}var Kl,th,eh,nh,rh=function(t,e){var n=t[0]*yc,r=t[1]*yc,i=e[0]*yc,a=e[1]*yc,o=xc(r),s=Tc(r),c=xc(a),u=Tc(a),l=o*xc(n),h=o*Tc(n),f=c*xc(i),d=c*Tc(i),p=2*Oc(Sc(Dc(a-r)+o*c*Dc(i-n))),g=Tc(p),y=p?function(t){var e=Tc(t*=p)/g,n=Tc(p-t)/g,r=n*l+e*f,i=n*h+e*d,a=n*s+e*u;return[bc(i,r)*gc,bc(a,Sc(r*r+i*i))*gc]}:function(){return[n*gc,r*gc]};return y.distance=p,y},ih=function(t){return t},ah=sc(),oh=sc(),sh={point:Nc,lineStart:Nc,lineEnd:Nc,polygonStart:function(){sh.lineStart=ch,sh.lineEnd=hh},polygonEnd:function(){sh.lineStart=sh.lineEnd=sh.point=Nc,ah.add(vc(oh)),oh.reset()},result:function(){var t=ah/2;return ah.reset(),t}};function ch(){sh.point=uh}function uh(t,e){sh.point=lh,Kl=eh=t,th=nh=e}function lh(t,e){oh.add(nh*t-eh*e),eh=t,nh=e}function hh(){lh(Kl,th)}var fh=sh,dh=1/0,ph=dh,gh=-dh,yh=gh;var vh,mh,bh,xh,_h={point:function(t,e){tgh&&(gh=t);eyh&&(yh=e)},lineStart:Nc,lineEnd:Nc,polygonStart:Nc,polygonEnd:Nc,result:function(){var t=[[dh,ph],[gh,yh]];return gh=yh=-(ph=dh=1/0),t}},kh=0,wh=0,Eh=0,Th=0,Ch=0,Sh=0,Ah=0,Mh=0,Oh=0,Dh={point:Nh,lineStart:Bh,lineEnd:Ph,polygonStart:function(){Dh.lineStart=Ih,Dh.lineEnd=jh},polygonEnd:function(){Dh.point=Nh,Dh.lineStart=Bh,Dh.lineEnd=Ph},result:function(){var t=Oh?[Ah/Oh,Mh/Oh]:Sh?[Th/Sh,Ch/Sh]:Eh?[kh/Eh,wh/Eh]:[NaN,NaN];return kh=wh=Eh=Th=Ch=Sh=Ah=Mh=Oh=0,t}};function Nh(t,e){kh+=t,wh+=e,++Eh}function Bh(){Dh.point=Lh}function Lh(t,e){Dh.point=Fh,Nh(bh=t,xh=e)}function Fh(t,e){var n=t-bh,r=e-xh,i=Sc(n*n+r*r);Th+=i*(bh+t)/2,Ch+=i*(xh+e)/2,Sh+=i,Nh(bh=t,xh=e)}function Ph(){Dh.point=Nh}function Ih(){Dh.point=Rh}function jh(){Yh(vh,mh)}function Rh(t,e){Dh.point=Yh,Nh(vh=bh=t,mh=xh=e)}function Yh(t,e){var n=t-bh,r=e-xh,i=Sc(n*n+r*r);Th+=i*(bh+t)/2,Ch+=i*(xh+e)/2,Sh+=i,Ah+=(i=xh*t-bh*e)*(bh+t),Mh+=i*(xh+e),Oh+=3*i,Nh(bh=t,xh=e)}var zh=Dh;function Uh(t){this._context=t}Uh.prototype={_radius:4.5,pointRadius:function(t){return this._radius=t,this},polygonStart:function(){this._line=0},polygonEnd:function(){this._line=NaN},lineStart:function(){this._point=0},lineEnd:function(){0===this._line&&this._context.closePath(),this._point=NaN},point:function(t,e){switch(this._point){case 0:this._context.moveTo(t,e),this._point=1;break;case 1:this._context.lineTo(t,e);break;default:this._context.moveTo(t+this._radius,e),this._context.arc(t,e,this._radius,0,pc)}},result:Nc};var $h,Wh,Vh,Hh,Gh,qh=sc(),Xh={point:Nc,lineStart:function(){Xh.point=Zh},lineEnd:function(){$h&&Jh(Wh,Vh),Xh.point=Nc},polygonStart:function(){$h=!0},polygonEnd:function(){$h=null},result:function(){var t=+qh;return qh.reset(),t}};function Zh(t,e){Xh.point=Jh,Wh=Hh=t,Vh=Gh=e}function Jh(t,e){Hh-=t,Gh-=e,qh.add(Sc(Hh*Hh+Gh*Gh)),Hh=t,Gh=e}var Qh=Xh;function Kh(){this._string=[]}function tf(t){return"m0,"+t+"a"+t+","+t+" 0 1,1 0,"+-2*t+"a"+t+","+t+" 0 1,1 0,"+2*t+"z"}Kh.prototype={_radius:4.5,_circle:tf(4.5),pointRadius:function(t){return(t=+t)!==this._radius&&(this._radius=t,this._circle=null),this},polygonStart:function(){this._line=0},polygonEnd:function(){this._line=NaN},lineStart:function(){this._point=0},lineEnd:function(){0===this._line&&this._string.push("Z"),this._point=NaN},point:function(t,e){switch(this._point){case 0:this._string.push("M",t,",",e),this._point=1;break;case 1:this._string.push("L",t,",",e);break;default:null==this._circle&&(this._circle=tf(this._radius)),this._string.push("M",t,",",e,this._circle)}},result:function(){if(this._string.length){var t=this._string.join("");return this._string=[],t}return null}};var ef=function(t,e){var n,r,i=4.5;function a(t){return t&&("function"==typeof i&&r.pointRadius(+i.apply(this,arguments)),$c(t,n(r))),r.result()}return a.area=function(t){return $c(t,n(fh)),fh.result()},a.measure=function(t){return $c(t,n(Qh)),Qh.result()},a.bounds=function(t){return $c(t,n(_h)),_h.result()},a.centroid=function(t){return $c(t,n(zh)),zh.result()},a.projection=function(e){return arguments.length?(n=null==e?(t=null,ih):(t=e).stream,a):t},a.context=function(t){return arguments.length?(r=null==t?(e=null,new Kh):new Uh(e=t),"function"!=typeof i&&r.pointRadius(i),a):e},a.pointRadius=function(t){return arguments.length?(i="function"==typeof t?t:(r.pointRadius(+t),+t),a):i},a.projection(t).context(e)},nf=function(t){return{stream:rf(t)}};function rf(t){return function(e){var n=new af;for(var r in t)n[r]=t[r];return n.stream=e,n}}function af(){}function of(t,e,n){var r=t.clipExtent&&t.clipExtent();return t.scale(150).translate([0,0]),null!=r&&t.clipExtent(null),$c(n,t.stream(_h)),e(_h.result()),null!=r&&t.clipExtent(r),t}function sf(t,e,n){return of(t,(function(n){var r=e[1][0]-e[0][0],i=e[1][1]-e[0][1],a=Math.min(r/(n[1][0]-n[0][0]),i/(n[1][1]-n[0][1])),o=+e[0][0]+(r-a*(n[1][0]+n[0][0]))/2,s=+e[0][1]+(i-a*(n[1][1]+n[0][1]))/2;t.scale(150*a).translate([o,s])}),n)}function cf(t,e,n){return sf(t,[[0,0],e],n)}function uf(t,e,n){return of(t,(function(n){var r=+e,i=r/(n[1][0]-n[0][0]),a=(r-i*(n[1][0]+n[0][0]))/2,o=-i*n[0][1];t.scale(150*i).translate([a,o])}),n)}function lf(t,e,n){return of(t,(function(n){var r=+e,i=r/(n[1][1]-n[0][1]),a=-i*n[0][0],o=(r-i*(n[1][1]+n[0][1]))/2;t.scale(150*i).translate([a,o])}),n)}af.prototype={constructor:af,point:function(t,e){this.stream.point(t,e)},sphere:function(){this.stream.sphere()},lineStart:function(){this.stream.lineStart()},lineEnd:function(){this.stream.lineEnd()},polygonStart:function(){this.stream.polygonStart()},polygonEnd:function(){this.stream.polygonEnd()}};var hf=xc(30*yc),ff=function(t,e){return+e?function(t,e){function n(r,i,a,o,s,c,u,l,h,f,d,p,g,y){var v=u-r,m=l-i,b=v*v+m*m;if(b>4*e&&g--){var x=o+f,_=s+d,k=c+p,w=Sc(x*x+_*_+k*k),E=Oc(k/=w),T=vc(vc(k)-1)<1e-6||vc(a-h)<1e-6?(a+h)/2:bc(_,x),C=t(T,E),S=C[0],A=C[1],M=S-r,O=A-i,D=m*M-v*O;(D*D/b>e||vc((v*M+m*O)/b-.5)>.3||o*f+s*d+c*p2?t[2]%360*yc:0,S()):[y*gc,v*gc,m*gc]},T.angle=function(t){return arguments.length?(b=t%360*yc,S()):b*gc},T.precision=function(t){return arguments.length?(o=ff(s,E=t*t),A()):Sc(E)},T.fitExtent=function(t,e){return sf(T,t,e)},T.fitSize=function(t,e){return cf(T,t,e)},T.fitWidth=function(t,e){return uf(T,t,e)},T.fitHeight=function(t,e){return lf(T,t,e)},function(){return e=t.apply(this,arguments),T.invert=e.invert&&C,S()}}function mf(t){var e=0,n=hc/3,r=vf(t),i=r(e,n);return i.parallels=function(t){return arguments.length?r(e=t[0]*yc,n=t[1]*yc):[e*gc,n*gc]},i}function bf(t,e){var n=Tc(t),r=(n+Tc(e))/2;if(vc(r)<1e-6)return function(t){var e=xc(t);function n(t,n){return[t*e,Tc(n)/e]}return n.invert=function(t,n){return[t/e,Oc(n*e)]},n}(t);var i=1+n*(2*r-n),a=Sc(i)/r;function o(t,e){var n=Sc(i-2*r*Tc(e))/r;return[n*Tc(t*=r),a-n*xc(t)]}return o.invert=function(t,e){var n=a-e;return[bc(t,vc(n))/r*Cc(n),Oc((i-(t*t+n*n)*r*r)/(2*r))]},o}var xf=function(){return mf(bf).scale(155.424).center([0,33.6442])},_f=function(){return xf().parallels([29.5,45.5]).scale(1070).translate([480,250]).rotate([96,0]).center([-.6,38.7])};var kf=function(){var t,e,n,r,i,a,o=_f(),s=xf().rotate([154,0]).center([-2,58.5]).parallels([55,65]),c=xf().rotate([157,0]).center([-3,19.9]).parallels([8,18]),u={point:function(t,e){a=[t,e]}};function l(t){var e=t[0],o=t[1];return a=null,n.point(e,o),a||(r.point(e,o),a)||(i.point(e,o),a)}function h(){return t=e=null,l}return l.invert=function(t){var e=o.scale(),n=o.translate(),r=(t[0]-n[0])/e,i=(t[1]-n[1])/e;return(i>=.12&&i<.234&&r>=-.425&&r<-.214?s:i>=.166&&i<.234&&r>=-.214&&r<-.115?c:o).invert(t)},l.stream=function(n){return t&&e===n?t:(r=[o.stream(e=n),s.stream(n),c.stream(n)],i=r.length,t={point:function(t,e){for(var n=-1;++n0?e<1e-6-fc&&(e=1e-6-fc):e>fc-1e-6&&(e=fc-1e-6);var n=i/Ec(Nf(e),r);return[n*Tc(r*t),i-n*xc(r*t)]}return a.invert=function(t,e){var n=i-e,a=Cc(r)*Sc(t*t+n*n);return[bc(t,vc(n))/r*Cc(n),2*mc(Ec(i/a,1/r))-fc]},a}var Lf=function(){return mf(Bf).scale(109.5).parallels([30,30])};function Ff(t,e){return[t,e]}Ff.invert=Ff;var Pf=function(){return yf(Ff).scale(152.63)};function If(t,e){var n=xc(t),r=t===e?Tc(t):(n-xc(e))/(e-t),i=n/r+t;if(vc(r)<1e-6)return Ff;function a(t,e){var n=i-e,a=r*t;return[n*Tc(a),i-n*xc(a)]}return a.invert=function(t,e){var n=i-e;return[bc(t,vc(n))/r*Cc(n),i-Cc(r)*Sc(t*t+n*n)]},a}var jf=function(){return mf(If).scale(131.154).center([0,13.9389])},Rf=1.340264,Yf=-.081106,zf=893e-6,Uf=.003796,$f=Sc(3)/2;function Wf(t,e){var n=Oc($f*Tc(e)),r=n*n,i=r*r*r;return[t*xc(n)/($f*(Rf+3*Yf*r+i*(7*zf+9*Uf*r))),n*(Rf+Yf*r+i*(zf+Uf*r))]}Wf.invert=function(t,e){for(var n,r=e,i=r*r,a=i*i*i,o=0;o<12&&(a=(i=(r-=n=(r*(Rf+Yf*i+a*(zf+Uf*i))-e)/(Rf+3*Yf*i+a*(7*zf+9*Uf*i)))*r)*i*i,!(vc(n)<1e-12));++o);return[$f*t*(Rf+3*Yf*i+a*(7*zf+9*Uf*i))/xc(r),Oc(Tc(r)/$f)]};var Vf=function(){return yf(Wf).scale(177.158)};function Hf(t,e){var n=xc(e),r=xc(t)*n;return[n*Tc(t)/r,Tc(e)/r]}Hf.invert=Ef(mc);var Gf=function(){return yf(Hf).scale(144.049).clipAngle(60)};function qf(t,e,n,r){return 1===t&&1===e&&0===n&&0===r?ih:rf({point:function(i,a){this.stream.point(i*t+n,a*e+r)}})}var Xf=function(){var t,e,n,r,i,a,o=1,s=0,c=0,u=1,l=1,h=ih,f=null,d=ih;function p(){return r=i=null,a}return a={stream:function(t){return r&&i===t?r:r=h(d(i=t))},postclip:function(r){return arguments.length?(d=r,f=t=e=n=null,p()):d},clipExtent:function(r){return arguments.length?(d=null==r?(f=t=e=n=null,ih):Cl(f=+r[0][0],t=+r[0][1],e=+r[1][0],n=+r[1][1]),p()):null==f?null:[[f,t],[e,n]]},scale:function(t){return arguments.length?(h=qf((o=+t)*u,o*l,s,c),p()):o},translate:function(t){return arguments.length?(h=qf(o*u,o*l,s=+t[0],c=+t[1]),p()):[s,c]},reflectX:function(t){return arguments.length?(h=qf(o*(u=t?-1:1),o*l,s,c),p()):u<0},reflectY:function(t){return arguments.length?(h=qf(o*u,o*(l=t?-1:1),s,c),p()):l<0},fitExtent:function(t,e){return sf(a,t,e)},fitSize:function(t,e){return cf(a,t,e)},fitWidth:function(t,e){return uf(a,t,e)},fitHeight:function(t,e){return lf(a,t,e)}}};function Zf(t,e){var n=e*e,r=n*n;return[t*(.8707-.131979*n+r*(r*(.003971*n-.001529*r)-.013791)),e*(1.007226+n*(.015085+r*(.028874*n-.044475-.005916*r)))]}Zf.invert=function(t,e){var n,r=e,i=25;do{var a=r*r,o=a*a;r-=n=(r*(1.007226+a*(.015085+o*(.028874*a-.044475-.005916*o)))-e)/(1.007226+a*(.045255+o*(.259866*a-.311325-.005916*11*o)))}while(vc(n)>1e-6&&--i>0);return[t/(.8707+(a=r*r)*(a*(a*a*a*(.003971-.001529*a)-.013791)-.131979)),r]};var Jf=function(){return yf(Zf).scale(175.295)};function Qf(t,e){return[xc(e)*Tc(t),Tc(e)]}Qf.invert=Ef(Oc);var Kf=function(){return yf(Qf).scale(249.5).clipAngle(90+1e-6)};function td(t,e){var n=xc(e),r=1+xc(t)*n;return[n*Tc(t)/r,Tc(e)/r]}td.invert=Ef((function(t){return 2*mc(t)}));var ed=function(){return yf(td).scale(250).clipAngle(142)};function nd(t,e){return[wc(Ac((fc+e)/2)),-t]}nd.invert=function(t,e){return[-e,2*mc(kc(t))-fc]};var rd=function(){var t=Df(nd),e=t.center,n=t.rotate;return t.center=function(t){return arguments.length?e([-t[1],t[0]]):[(t=e())[1],-t[0]]},t.rotate=function(t){return arguments.length?n([t[0],t[1],t.length>2?t[2]+90:90]):[(t=n())[0],t[1],t[2]-90]},n([0,0,90]).scale(159.155)};function id(t,e){return t.parent===e.parent?1:2}function ad(t,e){return t+e.x}function od(t,e){return Math.max(t,e.y)}var sd=function(){var t=id,e=1,n=1,r=!1;function i(i){var a,o=0;i.eachAfter((function(e){var n=e.children;n?(e.x=function(t){return t.reduce(ad,0)/t.length}(n),e.y=function(t){return 1+t.reduce(od,0)}(n)):(e.x=a?o+=t(e,a):0,e.y=0,a=e)}));var s=function(t){for(var e;e=t.children;)t=e[0];return t}(i),c=function(t){for(var e;e=t.children;)t=e[e.length-1];return t}(i),u=s.x-t(s,c)/2,l=c.x+t(c,s)/2;return i.eachAfter(r?function(t){t.x=(t.x-i.x)*e,t.y=(i.y-t.y)*n}:function(t){t.x=(t.x-u)/(l-u)*e,t.y=(1-(i.y?t.y/i.y:1))*n})}return i.separation=function(e){return arguments.length?(t=e,i):t},i.size=function(t){return arguments.length?(r=!1,e=+t[0],n=+t[1],i):r?null:[e,n]},i.nodeSize=function(t){return arguments.length?(r=!0,e=+t[0],n=+t[1],i):r?[e,n]:null},i};function cd(t){var e=0,n=t.children,r=n&&n.length;if(r)for(;--r>=0;)e+=n[r].value;else e=1;t.value=e}function ud(t,e){var n,r,i,a,o,s=new dd(t),c=+t.value&&(s.value=t.value),u=[s];for(null==e&&(e=ld);n=u.pop();)if(c&&(n.value=+n.data.value),(i=e(n.data))&&(o=i.length))for(n.children=new Array(o),a=o-1;a>=0;--a)u.push(r=n.children[a]=new dd(i[a])),r.parent=n,r.depth=n.depth+1;return s.eachBefore(fd)}function ld(t){return t.children}function hd(t){t.data=t.data.data}function fd(t){var e=0;do{t.height=e}while((t=t.parent)&&t.height<++e)}function dd(t){this.data=t,this.depth=this.height=0,this.parent=null}dd.prototype=ud.prototype={constructor:dd,count:function(){return this.eachAfter(cd)},each:function(t){var e,n,r,i,a=this,o=[a];do{for(e=o.reverse(),o=[];a=e.pop();)if(t(a),n=a.children)for(r=0,i=n.length;r=0;--n)i.push(e[n]);return this},sum:function(t){return this.eachAfter((function(e){for(var n=+t(e.data)||0,r=e.children,i=r&&r.length;--i>=0;)n+=r[i].value;e.value=n}))},sort:function(t){return this.eachBefore((function(e){e.children&&e.children.sort(t)}))},path:function(t){for(var e=this,n=function(t,e){if(t===e)return t;var n=t.ancestors(),r=e.ancestors(),i=null;t=n.pop(),e=r.pop();for(;t===e;)i=t,t=n.pop(),e=r.pop();return i}(e,t),r=[e];e!==n;)e=e.parent,r.push(e);for(var i=r.length;t!==n;)r.splice(i,0,t),t=t.parent;return r},ancestors:function(){for(var t=this,e=[t];t=t.parent;)e.push(t);return e},descendants:function(){var t=[];return this.each((function(e){t.push(e)})),t},leaves:function(){var t=[];return this.eachBefore((function(e){e.children||t.push(e)})),t},links:function(){var t=this,e=[];return t.each((function(n){n!==t&&e.push({source:n.parent,target:n})})),e},copy:function(){return ud(this).eachBefore(hd)}};var pd=Array.prototype.slice;var gd=function(t){for(var e,n,r=0,i=(t=function(t){for(var e,n,r=t.length;r;)n=Math.random()*r--|0,e=t[r],t[r]=t[n],t[n]=e;return t}(pd.call(t))).length,a=[];r0&&n*n>r*r+i*i}function bd(t,e){for(var n=0;n(o*=o)?(r=(u+o-i)/(2*u),a=Math.sqrt(Math.max(0,o/u-r*r)),n.x=t.x-r*s-a*c,n.y=t.y-r*c+a*s):(r=(u+i-o)/(2*u),a=Math.sqrt(Math.max(0,i/u-r*r)),n.x=e.x+r*s-a*c,n.y=e.y+r*c+a*s)):(n.x=e.x+n.r,n.y=e.y)}function Ed(t,e){var n=t.r+e.r-1e-6,r=e.x-t.x,i=e.y-t.y;return n>0&&n*n>r*r+i*i}function Td(t){var e=t._,n=t.next._,r=e.r+n.r,i=(e.x*n.r+n.x*e.r)/r,a=(e.y*n.r+n.y*e.r)/r;return i*i+a*a}function Cd(t){this._=t,this.next=null,this.previous=null}function Sd(t){if(!(i=t.length))return 0;var e,n,r,i,a,o,s,c,u,l,h;if((e=t[0]).x=0,e.y=0,!(i>1))return e.r;if(n=t[1],e.x=-n.r,n.x=e.r,n.y=0,!(i>2))return e.r+n.r;wd(n,e,r=t[2]),e=new Cd(e),n=new Cd(n),r=new Cd(r),e.next=r.previous=n,n.next=e.previous=r,r.next=n.previous=e;t:for(s=3;s0)throw new Error("cycle");return a}return n.id=function(e){return arguments.length?(t=Od(e),n):t},n.parentId=function(t){return arguments.length?(e=Od(t),n):e},n};function Hd(t,e){return t.parent===e.parent?1:2}function Gd(t){var e=t.children;return e?e[0]:t.t}function qd(t){var e=t.children;return e?e[e.length-1]:t.t}function Xd(t,e,n){var r=n/(e.i-t.i);e.c-=r,e.s+=n,t.c+=r,e.z+=n,e.m+=n}function Zd(t,e,n){return t.a.parent===e.parent?t.a:n}function Jd(t,e){this._=t,this.parent=null,this.children=null,this.A=null,this.a=this,this.z=0,this.m=0,this.c=0,this.s=0,this.t=null,this.i=e}Jd.prototype=Object.create(dd.prototype);var Qd=function(){var t=Hd,e=1,n=1,r=null;function i(i){var c=function(t){for(var e,n,r,i,a,o=new Jd(t,0),s=[o];e=s.pop();)if(r=e._.children)for(e.children=new Array(a=r.length),i=a-1;i>=0;--i)s.push(n=e.children[i]=new Jd(r[i],i)),n.parent=e;return(o.parent=new Jd(null,0)).children=[o],o}(i);if(c.eachAfter(a),c.parent.m=-c.z,c.eachBefore(o),r)i.eachBefore(s);else{var u=i,l=i,h=i;i.eachBefore((function(t){t.xl.x&&(l=t),t.depth>h.depth&&(h=t)}));var f=u===l?1:t(u,l)/2,d=f-u.x,p=e/(l.x+f+d),g=n/(h.depth||1);i.eachBefore((function(t){t.x=(t.x+d)*p,t.y=t.depth*g}))}return i}function a(e){var n=e.children,r=e.parent.children,i=e.i?r[e.i-1]:null;if(n){!function(t){for(var e,n=0,r=0,i=t.children,a=i.length;--a>=0;)(e=i[a]).z+=n,e.m+=n,n+=e.s+(r+=e.c)}(e);var a=(n[0].z+n[n.length-1].z)/2;i?(e.z=i.z+t(e._,i._),e.m=e.z-a):e.z=a}else i&&(e.z=i.z+t(e._,i._));e.parent.A=function(e,n,r){if(n){for(var i,a=e,o=e,s=n,c=a.parent.children[0],u=a.m,l=o.m,h=s.m,f=c.m;s=qd(s),a=Gd(a),s&&a;)c=Gd(c),(o=qd(o)).a=e,(i=s.z+h-a.z-u+t(s._,a._))>0&&(Xd(Zd(s,e,r),e,i),u+=i,l+=i),h+=s.m,u+=a.m,f+=c.m,l+=o.m;s&&!qd(o)&&(o.t=s,o.m+=h-l),a&&!Gd(c)&&(c.t=a,c.m+=u-f,r=e)}return r}(e,i,e.parent.A||r[0])}function o(t){t._.x=t.z+t.parent.m,t.m+=t.parent.m}function s(t){t.x*=e,t.y=t.depth*n}return i.separation=function(e){return arguments.length?(t=e,i):t},i.size=function(t){return arguments.length?(r=!1,e=+t[0],n=+t[1],i):r?null:[e,n]},i.nodeSize=function(t){return arguments.length?(r=!0,e=+t[0],n=+t[1],i):r?[e,n]:null},i},Kd=function(t,e,n,r,i){for(var a,o=t.children,s=-1,c=o.length,u=t.value&&(i-n)/t.value;++sf&&(f=s),y=l*l*g,(d=Math.max(f/y,y/h))>p){l-=s;break}p=d}v.push(o={value:l,dice:c1?e:1)},n}(tp),rp=function(){var t=np,e=!1,n=1,r=1,i=[0],a=Dd,o=Dd,s=Dd,c=Dd,u=Dd;function l(t){return t.x0=t.y0=0,t.x1=n,t.y1=r,t.eachBefore(h),i=[0],e&&t.eachBefore(jd),t}function h(e){var n=i[e.depth],r=e.x0+n,l=e.y0+n,h=e.x1-n,f=e.y1-n;h=n-1){var l=s[e];return l.x0=i,l.y0=a,l.x1=o,void(l.y1=c)}var h=u[e],f=r/2+h,d=e+1,p=n-1;for(;d>>1;u[g]c-a){var m=(i*v+o*y)/r;t(e,d,y,i,a,m,c),t(d,n,v,m,a,o,c)}else{var b=(a*v+c*y)/r;t(e,d,y,i,a,o,b),t(d,n,v,i,b,o,c)}}(0,c,t.value,e,n,r,i)},ap=function(t,e,n,r,i){(1&t.depth?Kd:Rd)(t,e,n,r,i)},op=function t(e){function n(t,n,r,i,a){if((o=t._squarify)&&o.ratio===e)for(var o,s,c,u,l,h=-1,f=o.length,d=t.value;++h1?e:1)},n}(tp),sp=function(t){var e=t.length;return function(n){return t[Math.max(0,Math.min(e-1,Math.floor(n*e)))]}},cp=function(t,e){var n=un(+t,+e);return function(t){var e=n(t);return e-360*Math.floor(e/360)}},up=function(t,e){return t=+t,e=+e,function(n){return Math.round(t*(1-n)+e*n)}},lp=Math.SQRT2;function hp(t){return((t=Math.exp(t))+1/t)/2}var fp=function(t,e){var n,r,i=t[0],a=t[1],o=t[2],s=e[0],c=e[1],u=e[2],l=s-i,h=c-a,f=l*l+h*h;if(f<1e-12)r=Math.log(u/o)/lp,n=function(t){return[i+t*l,a+t*h,o*Math.exp(lp*t*r)]};else{var d=Math.sqrt(f),p=(u*u-o*o+4*f)/(2*o*2*d),g=(u*u-o*o-4*f)/(2*u*2*d),y=Math.log(Math.sqrt(p*p+1)-p),v=Math.log(Math.sqrt(g*g+1)-g);r=(v-y)/lp,n=function(t){var e,n=t*r,s=hp(y),c=o/(2*d)*(s*(e=lp*n+y,((e=Math.exp(2*e))-1)/(e+1))-function(t){return((t=Math.exp(t))-1/t)/2}(y));return[i+c*l,a+c*h,o*s/hp(lp*n+y)]}}return n.duration=1e3*r,n};function dp(t){return function(e,n){var r=t((e=tn(e)).h,(n=tn(n)).h),i=hn(e.s,n.s),a=hn(e.l,n.l),o=hn(e.opacity,n.opacity);return function(t){return e.h=r(t),e.s=i(t),e.l=a(t),e.opacity=o(t),e+""}}}var pp=dp(un),gp=dp(hn);function yp(t,e){var n=hn((t=pa(t)).l,(e=pa(e)).l),r=hn(t.a,e.a),i=hn(t.b,e.b),a=hn(t.opacity,e.opacity);return function(e){return t.l=n(e),t.a=r(e),t.b=i(e),t.opacity=a(e),t+""}}function vp(t){return function(e,n){var r=t((e=ka(e)).h,(n=ka(n)).h),i=hn(e.c,n.c),a=hn(e.l,n.l),o=hn(e.opacity,n.opacity);return function(t){return e.h=r(t),e.c=i(t),e.l=a(t),e.opacity=o(t),e+""}}}var mp=vp(un),bp=vp(hn);function xp(t){return function e(n){function r(e,r){var i=t((e=Oa(e)).h,(r=Oa(r)).h),a=hn(e.s,r.s),o=hn(e.l,r.l),s=hn(e.opacity,r.opacity);return function(t){return e.h=i(t),e.s=a(t),e.l=o(Math.pow(t,n)),e.opacity=s(t),e+""}}return n=+n,r.gamma=e,r}(1)}var _p=xp(un),kp=xp(hn);function wp(t,e){for(var n=0,r=e.length-1,i=e[0],a=new Array(r<0?0:r);n1&&(e=t[a[o-2]],n=t[a[o-1]],r=t[s],(n[0]-e[0])*(r[1]-e[1])-(n[1]-e[1])*(r[0]-e[0])<=0);)--o;a[o++]=s}return a.slice(0,o)}var Mp=function(t){if((n=t.length)<3)return null;var e,n,r=new Array(n),i=new Array(n);for(e=0;e=0;--e)u.push(t[r[a[e]][2]]);for(e=+s;es!=u>s&&o<(c-n)*(s-r)/(u-r)+n&&(l=!l),c=n,u=r;return l},Dp=function(t){for(var e,n,r=-1,i=t.length,a=t[i-1],o=a[0],s=a[1],c=0;++r1);return t+n*a*Math.sqrt(-2*Math.log(i)/i)}}return n.source=t,n}(Np),Fp=function t(e){function n(){var t=Lp.source(e).apply(this,arguments);return function(){return Math.exp(t())}}return n.source=t,n}(Np),Pp=function t(e){function n(t){return function(){for(var n=0,r=0;rr&&(e=n,n=r,r=e),function(t){return Math.max(n,Math.min(r,t))}}function tg(t,e,n){var r=t[0],i=t[1],a=e[0],o=e[1];return i2?eg:tg,i=a=null,h}function h(e){return isNaN(e=+e)?n:(i||(i=r(o.map(t),s,c)))(t(u(e)))}return h.invert=function(n){return u(e((a||(a=r(s,o.map(t),_n)))(n)))},h.domain=function(t){return arguments.length?(o=Up.call(t,Xp),u===Jp||(u=Kp(o)),l()):o.slice()},h.range=function(t){return arguments.length?(s=$p.call(t),l()):s.slice()},h.rangeRound=function(t){return s=$p.call(t),c=up,l()},h.clamp=function(t){return arguments.length?(u=t?Kp(o):Jp,h):u!==Jp},h.interpolate=function(t){return arguments.length?(c=t,l()):c},h.unknown=function(t){return arguments.length?(n=t,h):n},function(n,r){return t=n,e=r,l()}}function ig(t,e){return rg()(t,e)}var ag=function(t,e,n,r){var i,a=A(t,e,n);switch((r=Vs(null==r?",f":r)).type){case"s":var o=Math.max(Math.abs(t),Math.abs(e));return null!=r.precision||isNaN(i=ac(a,o))||(r.precision=i),Zs(r,o);case"":case"e":case"g":case"p":case"r":null!=r.precision||isNaN(i=oc(a,Math.max(Math.abs(t),Math.abs(e))))||(r.precision=i-("e"===r.type));break;case"f":case"%":null!=r.precision||isNaN(i=ic(a))||(r.precision=i-2*("%"===r.type))}return Xs(r)};function og(t){var e=t.domain;return t.ticks=function(t){var n=e();return C(n[0],n[n.length-1],null==t?10:t)},t.tickFormat=function(t,n){var r=e();return ag(r[0],r[r.length-1],null==t?10:t,n)},t.nice=function(n){null==n&&(n=10);var r,i=e(),a=0,o=i.length-1,s=i[a],c=i[o];return c0?r=S(s=Math.floor(s/r)*r,c=Math.ceil(c/r)*r,n):r<0&&(r=S(s=Math.ceil(s*r)/r,c=Math.floor(c*r)/r,n)),r>0?(i[a]=Math.floor(s/r)*r,i[o]=Math.ceil(c/r)*r,e(i)):r<0&&(i[a]=Math.ceil(s*r)/r,i[o]=Math.floor(c*r)/r,e(i)),t},t}function sg(){var t=ig(Jp,Jp);return t.copy=function(){return ng(t,sg())},Rp.apply(t,arguments),og(t)}function cg(t){var e;function n(t){return isNaN(t=+t)?e:t}return n.invert=n,n.domain=n.range=function(e){return arguments.length?(t=Up.call(e,Xp),n):t.slice()},n.unknown=function(t){return arguments.length?(e=t,n):e},n.copy=function(){return cg(t).unknown(e)},t=arguments.length?Up.call(t,Xp):[0,1],og(n)}var ug=function(t,e){var n,r=0,i=(t=t.slice()).length-1,a=t[r],o=t[i];return o0){for(;fc)break;g.push(h)}}else for(;f=1;--l)if(!((h=u*l)c)break;g.push(h)}}else g=C(f,d,Math.min(d-f,p)).map(n);return r?g.reverse():g},r.tickFormat=function(t,i){if(null==i&&(i=10===a?".0e":","),"function"!=typeof i&&(i=Xs(i)),t===1/0)return i;null==t&&(t=10);var o=Math.max(1,a*t/r.ticks().length);return function(t){var r=t/n(Math.round(e(t)));return r*a0?i[r-1]:e[0],r=r?[i[r-1],n]:[i[o-1],i[o]]},o.unknown=function(e){return arguments.length?(t=e,o):o},o.thresholds=function(){return i.slice()},o.copy=function(){return Mg().domain([e,n]).range(a).unknown(t)},Rp.apply(og(o),arguments)}function Og(){var t,e=[.5],n=[0,1],r=1;function i(i){return i<=i?n[c(e,i,0,r)]:t}return i.domain=function(t){return arguments.length?(e=$p.call(t),r=Math.min(e.length,n.length-1),i):e.slice()},i.range=function(t){return arguments.length?(n=$p.call(t),r=Math.min(e.length,n.length-1),i):n.slice()},i.invertExtent=function(t){var r=n.indexOf(t);return[e[r-1],e[r]]},i.unknown=function(e){return arguments.length?(t=e,i):t},i.copy=function(){return Og().domain(e).range(n).unknown(t)},Rp.apply(i,arguments)}var Dg=new Date,Ng=new Date;function Bg(t,e,n,r){function i(e){return t(e=0===arguments.length?new Date:new Date(+e)),e}return i.floor=function(e){return t(e=new Date(+e)),e},i.ceil=function(n){return t(n=new Date(n-1)),e(n,1),t(n),n},i.round=function(t){var e=i(t),n=i.ceil(t);return t-e0))return s;do{s.push(o=new Date(+n)),e(n,a),t(n)}while(o=e)for(;t(e),!n(e);)e.setTime(e-1)}),(function(t,r){if(t>=t)if(r<0)for(;++r<=0;)for(;e(t,-1),!n(t););else for(;--r>=0;)for(;e(t,1),!n(t););}))},n&&(i.count=function(e,r){return Dg.setTime(+e),Ng.setTime(+r),t(Dg),t(Ng),Math.floor(n(Dg,Ng))},i.every=function(t){return t=Math.floor(t),isFinite(t)&&t>0?t>1?i.filter(r?function(e){return r(e)%t==0}:function(e){return i.count(0,e)%t==0}):i:null}),i}var Lg=Bg((function(t){t.setMonth(0,1),t.setHours(0,0,0,0)}),(function(t,e){t.setFullYear(t.getFullYear()+e)}),(function(t,e){return e.getFullYear()-t.getFullYear()}),(function(t){return t.getFullYear()}));Lg.every=function(t){return isFinite(t=Math.floor(t))&&t>0?Bg((function(e){e.setFullYear(Math.floor(e.getFullYear()/t)*t),e.setMonth(0,1),e.setHours(0,0,0,0)}),(function(e,n){e.setFullYear(e.getFullYear()+n*t)})):null};var Fg=Lg,Pg=Lg.range,Ig=Bg((function(t){t.setDate(1),t.setHours(0,0,0,0)}),(function(t,e){t.setMonth(t.getMonth()+e)}),(function(t,e){return e.getMonth()-t.getMonth()+12*(e.getFullYear()-t.getFullYear())}),(function(t){return t.getMonth()})),jg=Ig,Rg=Ig.range;function Yg(t){return Bg((function(e){e.setDate(e.getDate()-(e.getDay()+7-t)%7),e.setHours(0,0,0,0)}),(function(t,e){t.setDate(t.getDate()+7*e)}),(function(t,e){return(e-t-6e4*(e.getTimezoneOffset()-t.getTimezoneOffset()))/6048e5}))}var zg=Yg(0),Ug=Yg(1),$g=Yg(2),Wg=Yg(3),Vg=Yg(4),Hg=Yg(5),Gg=Yg(6),qg=zg.range,Xg=Ug.range,Zg=$g.range,Jg=Wg.range,Qg=Vg.range,Kg=Hg.range,ty=Gg.range,ey=Bg((function(t){t.setHours(0,0,0,0)}),(function(t,e){t.setDate(t.getDate()+e)}),(function(t,e){return(e-t-6e4*(e.getTimezoneOffset()-t.getTimezoneOffset()))/864e5}),(function(t){return t.getDate()-1})),ny=ey,ry=ey.range,iy=Bg((function(t){t.setTime(t-t.getMilliseconds()-1e3*t.getSeconds()-6e4*t.getMinutes())}),(function(t,e){t.setTime(+t+36e5*e)}),(function(t,e){return(e-t)/36e5}),(function(t){return t.getHours()})),ay=iy,oy=iy.range,sy=Bg((function(t){t.setTime(t-t.getMilliseconds()-1e3*t.getSeconds())}),(function(t,e){t.setTime(+t+6e4*e)}),(function(t,e){return(e-t)/6e4}),(function(t){return t.getMinutes()})),cy=sy,uy=sy.range,ly=Bg((function(t){t.setTime(t-t.getMilliseconds())}),(function(t,e){t.setTime(+t+1e3*e)}),(function(t,e){return(e-t)/1e3}),(function(t){return t.getUTCSeconds()})),hy=ly,fy=ly.range,dy=Bg((function(){}),(function(t,e){t.setTime(+t+e)}),(function(t,e){return e-t}));dy.every=function(t){return t=Math.floor(t),isFinite(t)&&t>0?t>1?Bg((function(e){e.setTime(Math.floor(e/t)*t)}),(function(e,n){e.setTime(+e+n*t)}),(function(e,n){return(n-e)/t})):dy:null};var py=dy,gy=dy.range;function yy(t){return Bg((function(e){e.setUTCDate(e.getUTCDate()-(e.getUTCDay()+7-t)%7),e.setUTCHours(0,0,0,0)}),(function(t,e){t.setUTCDate(t.getUTCDate()+7*e)}),(function(t,e){return(e-t)/6048e5}))}var vy=yy(0),my=yy(1),by=yy(2),xy=yy(3),_y=yy(4),ky=yy(5),wy=yy(6),Ey=vy.range,Ty=my.range,Cy=by.range,Sy=xy.range,Ay=_y.range,My=ky.range,Oy=wy.range,Dy=Bg((function(t){t.setUTCHours(0,0,0,0)}),(function(t,e){t.setUTCDate(t.getUTCDate()+e)}),(function(t,e){return(e-t)/864e5}),(function(t){return t.getUTCDate()-1})),Ny=Dy,By=Dy.range,Ly=Bg((function(t){t.setUTCMonth(0,1),t.setUTCHours(0,0,0,0)}),(function(t,e){t.setUTCFullYear(t.getUTCFullYear()+e)}),(function(t,e){return e.getUTCFullYear()-t.getUTCFullYear()}),(function(t){return t.getUTCFullYear()}));Ly.every=function(t){return isFinite(t=Math.floor(t))&&t>0?Bg((function(e){e.setUTCFullYear(Math.floor(e.getUTCFullYear()/t)*t),e.setUTCMonth(0,1),e.setUTCHours(0,0,0,0)}),(function(e,n){e.setUTCFullYear(e.getUTCFullYear()+n*t)})):null};var Fy=Ly,Py=Ly.range;function Iy(t){if(0<=t.y&&t.y<100){var e=new Date(-1,t.m,t.d,t.H,t.M,t.S,t.L);return e.setFullYear(t.y),e}return new Date(t.y,t.m,t.d,t.H,t.M,t.S,t.L)}function jy(t){if(0<=t.y&&t.y<100){var e=new Date(Date.UTC(-1,t.m,t.d,t.H,t.M,t.S,t.L));return e.setUTCFullYear(t.y),e}return new Date(Date.UTC(t.y,t.m,t.d,t.H,t.M,t.S,t.L))}function Ry(t,e,n){return{y:t,m:e,d:n,H:0,M:0,S:0,L:0}}function Yy(t){var e=t.dateTime,n=t.date,r=t.time,i=t.periods,a=t.days,o=t.shortDays,s=t.months,c=t.shortMonths,u=Qy(i),l=Ky(i),h=Qy(a),f=Ky(a),d=Qy(o),p=Ky(o),g=Qy(s),y=Ky(s),v=Qy(c),m=Ky(c),b={a:function(t){return o[t.getDay()]},A:function(t){return a[t.getDay()]},b:function(t){return c[t.getMonth()]},B:function(t){return s[t.getMonth()]},c:null,d:xv,e:xv,f:Tv,H:_v,I:kv,j:wv,L:Ev,m:Cv,M:Sv,p:function(t){return i[+(t.getHours()>=12)]},q:function(t){return 1+~~(t.getMonth()/3)},Q:em,s:nm,S:Av,u:Mv,U:Ov,V:Dv,w:Nv,W:Bv,x:null,X:null,y:Lv,Y:Fv,Z:Pv,"%":tm},x={a:function(t){return o[t.getUTCDay()]},A:function(t){return a[t.getUTCDay()]},b:function(t){return c[t.getUTCMonth()]},B:function(t){return s[t.getUTCMonth()]},c:null,d:Iv,e:Iv,f:Uv,H:jv,I:Rv,j:Yv,L:zv,m:$v,M:Wv,p:function(t){return i[+(t.getUTCHours()>=12)]},q:function(t){return 1+~~(t.getUTCMonth()/3)},Q:em,s:nm,S:Vv,u:Hv,U:Gv,V:qv,w:Xv,W:Zv,x:null,X:null,y:Jv,Y:Qv,Z:Kv,"%":tm},_={a:function(t,e,n){var r=d.exec(e.slice(n));return r?(t.w=p[r[0].toLowerCase()],n+r[0].length):-1},A:function(t,e,n){var r=h.exec(e.slice(n));return r?(t.w=f[r[0].toLowerCase()],n+r[0].length):-1},b:function(t,e,n){var r=v.exec(e.slice(n));return r?(t.m=m[r[0].toLowerCase()],n+r[0].length):-1},B:function(t,e,n){var r=g.exec(e.slice(n));return r?(t.m=y[r[0].toLowerCase()],n+r[0].length):-1},c:function(t,n,r){return E(t,e,n,r)},d:lv,e:lv,f:yv,H:fv,I:fv,j:hv,L:gv,m:uv,M:dv,p:function(t,e,n){var r=u.exec(e.slice(n));return r?(t.p=l[r[0].toLowerCase()],n+r[0].length):-1},q:cv,Q:mv,s:bv,S:pv,u:ev,U:nv,V:rv,w:tv,W:iv,x:function(t,e,r){return E(t,n,e,r)},X:function(t,e,n){return E(t,r,e,n)},y:ov,Y:av,Z:sv,"%":vv};function k(t,e){return function(n){var r,i,a,o=[],s=-1,c=0,u=t.length;for(n instanceof Date||(n=new Date(+n));++s53)return null;"w"in a||(a.w=1),"Z"in a?(i=(r=jy(Ry(a.y,0,1))).getUTCDay(),r=i>4||0===i?my.ceil(r):my(r),r=Ny.offset(r,7*(a.V-1)),a.y=r.getUTCFullYear(),a.m=r.getUTCMonth(),a.d=r.getUTCDate()+(a.w+6)%7):(i=(r=Iy(Ry(a.y,0,1))).getDay(),r=i>4||0===i?Ug.ceil(r):Ug(r),r=ny.offset(r,7*(a.V-1)),a.y=r.getFullYear(),a.m=r.getMonth(),a.d=r.getDate()+(a.w+6)%7)}else("W"in a||"U"in a)&&("w"in a||(a.w="u"in a?a.u%7:"W"in a?1:0),i="Z"in a?jy(Ry(a.y,0,1)).getUTCDay():Iy(Ry(a.y,0,1)).getDay(),a.m=0,a.d="W"in a?(a.w+6)%7+7*a.W-(i+5)%7:a.w+7*a.U-(i+6)%7);return"Z"in a?(a.H+=a.Z/100|0,a.M+=a.Z%100,jy(a)):Iy(a)}}function E(t,e,n,r){for(var i,a,o=0,s=e.length,c=n.length;o=c)return-1;if(37===(i=e.charCodeAt(o++))){if(i=e.charAt(o++),!(a=_[i in Hy?e.charAt(o++):i])||(r=a(t,n,r))<0)return-1}else if(i!=n.charCodeAt(r++))return-1}return r}return(b.x=k(n,b),b.X=k(r,b),b.c=k(e,b),x.x=k(n,x),x.X=k(r,x),x.c=k(e,x),{format:function(t){var e=k(t+="",b);return e.toString=function(){return t},e},parse:function(t){var e=w(t+="",!1);return e.toString=function(){return t},e},utcFormat:function(t){var e=k(t+="",x);return e.toString=function(){return t},e},utcParse:function(t){var e=w(t+="",!0);return e.toString=function(){return t},e}})}var zy,Uy,$y,Wy,Vy,Hy={"-":"",_:" ",0:"0"},Gy=/^\s*\d+/,qy=/^%/,Xy=/[\\^$*+?|[\]().{}]/g;function Zy(t,e,n){var r=t<0?"-":"",i=(r?-t:t)+"",a=i.length;return r+(a68?1900:2e3),n+r[0].length):-1}function sv(t,e,n){var r=/^(Z)|([+-]\d\d)(?::?(\d\d))?/.exec(e.slice(n,n+6));return r?(t.Z=r[1]?0:-(r[2]+(r[3]||"00")),n+r[0].length):-1}function cv(t,e,n){var r=Gy.exec(e.slice(n,n+1));return r?(t.q=3*r[0]-3,n+r[0].length):-1}function uv(t,e,n){var r=Gy.exec(e.slice(n,n+2));return r?(t.m=r[0]-1,n+r[0].length):-1}function lv(t,e,n){var r=Gy.exec(e.slice(n,n+2));return r?(t.d=+r[0],n+r[0].length):-1}function hv(t,e,n){var r=Gy.exec(e.slice(n,n+3));return r?(t.m=0,t.d=+r[0],n+r[0].length):-1}function fv(t,e,n){var r=Gy.exec(e.slice(n,n+2));return r?(t.H=+r[0],n+r[0].length):-1}function dv(t,e,n){var r=Gy.exec(e.slice(n,n+2));return r?(t.M=+r[0],n+r[0].length):-1}function pv(t,e,n){var r=Gy.exec(e.slice(n,n+2));return r?(t.S=+r[0],n+r[0].length):-1}function gv(t,e,n){var r=Gy.exec(e.slice(n,n+3));return r?(t.L=+r[0],n+r[0].length):-1}function yv(t,e,n){var r=Gy.exec(e.slice(n,n+6));return r?(t.L=Math.floor(r[0]/1e3),n+r[0].length):-1}function vv(t,e,n){var r=qy.exec(e.slice(n,n+1));return r?n+r[0].length:-1}function mv(t,e,n){var r=Gy.exec(e.slice(n));return r?(t.Q=+r[0],n+r[0].length):-1}function bv(t,e,n){var r=Gy.exec(e.slice(n));return r?(t.s=+r[0],n+r[0].length):-1}function xv(t,e){return Zy(t.getDate(),e,2)}function _v(t,e){return Zy(t.getHours(),e,2)}function kv(t,e){return Zy(t.getHours()%12||12,e,2)}function wv(t,e){return Zy(1+ny.count(Fg(t),t),e,3)}function Ev(t,e){return Zy(t.getMilliseconds(),e,3)}function Tv(t,e){return Ev(t,e)+"000"}function Cv(t,e){return Zy(t.getMonth()+1,e,2)}function Sv(t,e){return Zy(t.getMinutes(),e,2)}function Av(t,e){return Zy(t.getSeconds(),e,2)}function Mv(t){var e=t.getDay();return 0===e?7:e}function Ov(t,e){return Zy(zg.count(Fg(t)-1,t),e,2)}function Dv(t,e){var n=t.getDay();return t=n>=4||0===n?Vg(t):Vg.ceil(t),Zy(Vg.count(Fg(t),t)+(4===Fg(t).getDay()),e,2)}function Nv(t){return t.getDay()}function Bv(t,e){return Zy(Ug.count(Fg(t)-1,t),e,2)}function Lv(t,e){return Zy(t.getFullYear()%100,e,2)}function Fv(t,e){return Zy(t.getFullYear()%1e4,e,4)}function Pv(t){var e=t.getTimezoneOffset();return(e>0?"-":(e*=-1,"+"))+Zy(e/60|0,"0",2)+Zy(e%60,"0",2)}function Iv(t,e){return Zy(t.getUTCDate(),e,2)}function jv(t,e){return Zy(t.getUTCHours(),e,2)}function Rv(t,e){return Zy(t.getUTCHours()%12||12,e,2)}function Yv(t,e){return Zy(1+Ny.count(Fy(t),t),e,3)}function zv(t,e){return Zy(t.getUTCMilliseconds(),e,3)}function Uv(t,e){return zv(t,e)+"000"}function $v(t,e){return Zy(t.getUTCMonth()+1,e,2)}function Wv(t,e){return Zy(t.getUTCMinutes(),e,2)}function Vv(t,e){return Zy(t.getUTCSeconds(),e,2)}function Hv(t){var e=t.getUTCDay();return 0===e?7:e}function Gv(t,e){return Zy(vy.count(Fy(t)-1,t),e,2)}function qv(t,e){var n=t.getUTCDay();return t=n>=4||0===n?_y(t):_y.ceil(t),Zy(_y.count(Fy(t),t)+(4===Fy(t).getUTCDay()),e,2)}function Xv(t){return t.getUTCDay()}function Zv(t,e){return Zy(my.count(Fy(t)-1,t),e,2)}function Jv(t,e){return Zy(t.getUTCFullYear()%100,e,2)}function Qv(t,e){return Zy(t.getUTCFullYear()%1e4,e,4)}function Kv(){return"+0000"}function tm(){return"%"}function em(t){return+t}function nm(t){return Math.floor(+t/1e3)}function rm(t){return zy=Yy(t),Uy=zy.format,$y=zy.parse,Wy=zy.utcFormat,Vy=zy.utcParse,zy}rm({dateTime:"%x, %X",date:"%-m/%-d/%Y",time:"%-I:%M:%S %p",periods:["AM","PM"],days:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"],shortDays:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],months:["January","February","March","April","May","June","July","August","September","October","November","December"],shortMonths:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"]});function im(t){return new Date(t)}function am(t){return t instanceof Date?+t:+new Date(+t)}function om(t,e,n,r,a,o,s,c,u){var l=ig(Jp,Jp),h=l.invert,f=l.domain,d=u(".%L"),p=u(":%S"),g=u("%I:%M"),y=u("%I %p"),v=u("%a %d"),m=u("%b %d"),b=u("%B"),x=u("%Y"),_=[[s,1,1e3],[s,5,5e3],[s,15,15e3],[s,30,3e4],[o,1,6e4],[o,5,3e5],[o,15,9e5],[o,30,18e5],[a,1,36e5],[a,3,108e5],[a,6,216e5],[a,12,432e5],[r,1,864e5],[r,2,1728e5],[n,1,6048e5],[e,1,2592e6],[e,3,7776e6],[t,1,31536e6]];function k(i){return(s(i)1)&&(t-=Math.floor(t));var e=Math.abs(t-.5);return qb.h=360*t-100,qb.s=1.5-1.5*e,qb.l=.8-.9*e,qb+""},Zb=Ge(),Jb=Math.PI/3,Qb=2*Math.PI/3,Kb=function(t){var e;return t=(.5-t)*Math.PI,Zb.r=255*(e=Math.sin(t))*e,Zb.g=255*(e=Math.sin(t+Jb))*e,Zb.b=255*(e=Math.sin(t+Qb))*e,Zb+""},tx=function(t){return t=Math.max(0,Math.min(1,t)),"rgb("+Math.max(0,Math.min(255,Math.round(34.61+t*(1172.33-t*(10793.56-t*(33300.12-t*(38394.49-14825.05*t)))))))+", "+Math.max(0,Math.min(255,Math.round(23.31+t*(557.33+t*(1225.33-t*(3574.96-t*(1073.77+707.56*t)))))))+", "+Math.max(0,Math.min(255,Math.round(27.2+t*(3211.1-t*(15327.97-t*(27814-t*(22569.18-6838.66*t)))))))+")"};function ex(t){var e=t.length;return function(n){return t[Math.max(0,Math.min(e-1,Math.floor(n*e)))]}}var nx=ex(Nm("44015444025645045745055946075a46085c460a5d460b5e470d60470e6147106347116447136548146748166848176948186a481a6c481b6d481c6e481d6f481f70482071482173482374482475482576482677482878482979472a7a472c7a472d7b472e7c472f7d46307e46327e46337f463480453581453781453882443983443a83443b84433d84433e85423f854240864241864142874144874045884046883f47883f48893e49893e4a893e4c8a3d4d8a3d4e8a3c4f8a3c508b3b518b3b528b3a538b3a548c39558c39568c38588c38598c375a8c375b8d365c8d365d8d355e8d355f8d34608d34618d33628d33638d32648e32658e31668e31678e31688e30698e306a8e2f6b8e2f6c8e2e6d8e2e6e8e2e6f8e2d708e2d718e2c718e2c728e2c738e2b748e2b758e2a768e2a778e2a788e29798e297a8e297b8e287c8e287d8e277e8e277f8e27808e26818e26828e26828e25838e25848e25858e24868e24878e23888e23898e238a8d228b8d228c8d228d8d218e8d218f8d21908d21918c20928c20928c20938c1f948c1f958b1f968b1f978b1f988b1f998a1f9a8a1e9b8a1e9c891e9d891f9e891f9f881fa0881fa1881fa1871fa28720a38620a48621a58521a68522a78522a88423a98324aa8325ab8225ac8226ad8127ad8128ae8029af7f2ab07f2cb17e2db27d2eb37c2fb47c31b57b32b67a34b67935b77937b87838b9773aba763bbb753dbc743fbc7340bd7242be7144bf7046c06f48c16e4ac16d4cc26c4ec36b50c46a52c56954c56856c66758c7655ac8645cc8635ec96260ca6063cb5f65cb5e67cc5c69cd5b6ccd5a6ece5870cf5773d05675d05477d1537ad1517cd2507fd34e81d34d84d44b86d54989d5488bd6468ed64590d74393d74195d84098d83e9bd93c9dd93ba0da39a2da37a5db36a8db34aadc32addc30b0dd2fb2dd2db5de2bb8de29bade28bddf26c0df25c2df23c5e021c8e020cae11fcde11dd0e11cd2e21bd5e21ad8e219dae319dde318dfe318e2e418e5e419e7e419eae51aece51befe51cf1e51df4e61ef6e620f8e621fbe723fde725")),rx=ex(Nm("00000401000501010601010802010902020b02020d03030f03031204041405041606051806051a07061c08071e0907200a08220b09240c09260d0a290e0b2b100b2d110c2f120d31130d34140e36150e38160f3b180f3d19103f1a10421c10441d11471e114920114b21114e22115024125325125527125829115a2a115c2c115f2d11612f116331116533106734106936106b38106c390f6e3b0f703d0f713f0f72400f74420f75440f764510774710784910784a10794c117a4e117b4f127b51127c52137c54137d56147d57157e59157e5a167e5c167f5d177f5f187f601880621980641a80651a80671b80681c816a1c816b1d816d1d816e1e81701f81721f817320817521817621817822817922827b23827c23827e24828025828125818326818426818627818827818928818b29818c29818e2a81902a81912b81932b80942c80962c80982d80992d809b2e7f9c2e7f9e2f7fa02f7fa1307ea3307ea5317ea6317da8327daa337dab337cad347cae347bb0357bb2357bb3367ab5367ab73779b83779ba3878bc3978bd3977bf3a77c03a76c23b75c43c75c53c74c73d73c83e73ca3e72cc3f71cd4071cf4070d0416fd2426fd3436ed5446dd6456cd8456cd9466bdb476adc4869de4968df4a68e04c67e24d66e34e65e44f64e55064e75263e85362e95462ea5661eb5760ec5860ed5a5fee5b5eef5d5ef05f5ef1605df2625df2645cf3655cf4675cf4695cf56b5cf66c5cf66e5cf7705cf7725cf8745cf8765cf9785df9795df97b5dfa7d5efa7f5efa815ffb835ffb8560fb8761fc8961fc8a62fc8c63fc8e64fc9065fd9266fd9467fd9668fd9869fd9a6afd9b6bfe9d6cfe9f6dfea16efea36ffea571fea772fea973feaa74feac76feae77feb078feb27afeb47bfeb67cfeb77efeb97ffebb81febd82febf84fec185fec287fec488fec68afec88cfeca8dfecc8ffecd90fecf92fed194fed395fed597fed799fed89afdda9cfddc9efddea0fde0a1fde2a3fde3a5fde5a7fde7a9fde9aafdebacfcecaefceeb0fcf0b2fcf2b4fcf4b6fcf6b8fcf7b9fcf9bbfcfbbdfcfdbf")),ix=ex(Nm("00000401000501010601010802010a02020c02020e03021004031204031405041706041907051b08051d09061f0a07220b07240c08260d08290e092b10092d110a30120a32140b34150b37160b39180c3c190c3e1b0c411c0c431e0c451f0c48210c4a230c4c240c4f260c51280b53290b552b0b572d0b592f0a5b310a5c320a5e340a5f3609613809623909633b09643d09653e0966400a67420a68440a68450a69470b6a490b6a4a0c6b4c0c6b4d0d6c4f0d6c510e6c520e6d540f6d550f6d57106e59106e5a116e5c126e5d126e5f136e61136e62146e64156e65156e67166e69166e6a176e6c186e6d186e6f196e71196e721a6e741a6e751b6e771c6d781c6d7a1d6d7c1d6d7d1e6d7f1e6c801f6c82206c84206b85216b87216b88226a8a226a8c23698d23698f24699025689225689326679526679727669827669a28659b29649d29649f2a63a02a63a22b62a32c61a52c60a62d60a82e5fa92e5eab2f5ead305dae305cb0315bb1325ab3325ab43359b63458b73557b93556ba3655bc3754bd3853bf3952c03a51c13a50c33b4fc43c4ec63d4dc73e4cc83f4bca404acb4149cc4248ce4347cf4446d04545d24644d34743d44842d54a41d74b3fd84c3ed94d3dda4e3cdb503bdd513ade5238df5337e05536e15635e25734e35933e45a31e55c30e65d2fe75e2ee8602de9612bea632aeb6429eb6628ec6726ed6925ee6a24ef6c23ef6e21f06f20f1711ff1731df2741cf3761bf37819f47918f57b17f57d15f67e14f68013f78212f78410f8850ff8870ef8890cf98b0bf98c0af98e09fa9008fa9207fa9407fb9606fb9706fb9906fb9b06fb9d07fc9f07fca108fca309fca50afca60cfca80dfcaa0ffcac11fcae12fcb014fcb216fcb418fbb61afbb81dfbba1ffbbc21fbbe23fac026fac228fac42afac62df9c72ff9c932f9cb35f8cd37f8cf3af7d13df7d340f6d543f6d746f5d949f5db4cf4dd4ff4df53f4e156f3e35af3e55df2e661f2e865f2ea69f1ec6df1ed71f1ef75f1f179f2f27df2f482f3f586f3f68af4f88ef5f992f6fa96f8fb9af9fc9dfafda1fcffa4")),ax=ex(Nm("0d088710078813078916078a19068c1b068d1d068e20068f2206902406912605912805922a05932c05942e05952f059631059733059735049837049938049a3a049a3c049b3e049c3f049c41049d43039e44039e46039f48039f4903a04b03a14c02a14e02a25002a25102a35302a35502a45601a45801a45901a55b01a55c01a65e01a66001a66100a76300a76400a76600a76700a86900a86a00a86c00a86e00a86f00a87100a87201a87401a87501a87701a87801a87a02a87b02a87d03a87e03a88004a88104a78305a78405a78606a68707a68808a68a09a58b0aa58d0ba58e0ca48f0da4910ea3920fa39410a29511a19613a19814a099159f9a169f9c179e9d189d9e199da01a9ca11b9ba21d9aa31e9aa51f99a62098a72197a82296aa2395ab2494ac2694ad2793ae2892b02991b12a90b22b8fb32c8eb42e8db52f8cb6308bb7318ab83289ba3388bb3488bc3587bd3786be3885bf3984c03a83c13b82c23c81c33d80c43e7fc5407ec6417dc7427cc8437bc9447aca457acb4679cc4778cc4977cd4a76ce4b75cf4c74d04d73d14e72d24f71d35171d45270d5536fd5546ed6556dd7566cd8576bd9586ada5a6ada5b69db5c68dc5d67dd5e66de5f65de6164df6263e06363e16462e26561e26660e3685fe4695ee56a5de56b5de66c5ce76e5be76f5ae87059e97158e97257ea7457eb7556eb7655ec7754ed7953ed7a52ee7b51ef7c51ef7e50f07f4ff0804ef1814df1834cf2844bf3854bf3874af48849f48948f58b47f58c46f68d45f68f44f79044f79143f79342f89441f89540f9973ff9983ef99a3efa9b3dfa9c3cfa9e3bfb9f3afba139fba238fca338fca537fca636fca835fca934fdab33fdac33fdae32fdaf31fdb130fdb22ffdb42ffdb52efeb72dfeb82cfeba2cfebb2bfebd2afebe2afec029fdc229fdc328fdc527fdc627fdc827fdca26fdcb26fccd25fcce25fcd025fcd225fbd324fbd524fbd724fad824fada24f9dc24f9dd25f8df25f8e125f7e225f7e425f6e626f6e826f5e926f5eb27f4ed27f3ee27f3f027f2f227f1f426f1f525f0f724f0f921")),ox=function(t){return ke(ne(t).call(document.documentElement))},sx=0;function cx(){return new ux}function ux(){this._="@"+(++sx).toString(36)}ux.prototype=cx.prototype={constructor:ux,get:function(t){for(var e=this._;!(e in t);)if(!(t=t.parentNode))return;return t[e]},set:function(t,e){return t[this._]=e},remove:function(t){return this._ in t&&delete t[this._]},toString:function(){return this._}};var lx=function(t){return"string"==typeof t?new be([document.querySelectorAll(t)],[document.documentElement]):new be([null==t?[]:t],me)},hx=function(t,e){null==e&&(e=Mn().touches);for(var n=0,r=e?e.length:0,i=new Array(r);n1?0:t<-1?xx:Math.acos(t)}function Ex(t){return t>=1?_x:t<=-1?-_x:Math.asin(t)}function Tx(t){return t.innerRadius}function Cx(t){return t.outerRadius}function Sx(t){return t.startAngle}function Ax(t){return t.endAngle}function Mx(t){return t&&t.padAngle}function Ox(t,e,n,r,i,a,o,s){var c=n-t,u=r-e,l=o-i,h=s-a,f=h*c-l*u;if(!(f*f<1e-12))return[t+(f=(l*(e-a)-h*(t-i))/f)*c,e+f*u]}function Dx(t,e,n,r,i,a,o){var s=t-n,c=e-r,u=(o?a:-a)/bx(s*s+c*c),l=u*c,h=-u*s,f=t+l,d=e+h,p=n+l,g=r+h,y=(f+p)/2,v=(d+g)/2,m=p-f,b=g-d,x=m*m+b*b,_=i-a,k=f*g-p*d,w=(b<0?-1:1)*bx(yx(0,_*_*x-k*k)),E=(k*b-m*w)/x,T=(-k*m-b*w)/x,C=(k*b+m*w)/x,S=(-k*m+b*w)/x,A=E-y,M=T-v,O=C-y,D=S-v;return A*A+M*M>O*O+D*D&&(E=C,T=S),{cx:E,cy:T,x01:-l,y01:-h,x11:E*(i/_-1),y11:T*(i/_-1)}}var Nx=function(){var t=Tx,e=Cx,n=fx(0),r=null,i=Sx,a=Ax,o=Mx,s=null;function c(){var c,u,l=+t.apply(this,arguments),h=+e.apply(this,arguments),f=i.apply(this,arguments)-_x,d=a.apply(this,arguments)-_x,p=dx(d-f),g=d>f;if(s||(s=c=Ui()),h1e-12)if(p>kx-1e-12)s.moveTo(h*gx(f),h*mx(f)),s.arc(0,0,h,f,d,!g),l>1e-12&&(s.moveTo(l*gx(d),l*mx(d)),s.arc(0,0,l,d,f,g));else{var y,v,m=f,b=d,x=f,_=d,k=p,w=p,E=o.apply(this,arguments)/2,T=E>1e-12&&(r?+r.apply(this,arguments):bx(l*l+h*h)),C=vx(dx(h-l)/2,+n.apply(this,arguments)),S=C,A=C;if(T>1e-12){var M=Ex(T/l*mx(E)),O=Ex(T/h*mx(E));(k-=2*M)>1e-12?(x+=M*=g?1:-1,_-=M):(k=0,x=_=(f+d)/2),(w-=2*O)>1e-12?(m+=O*=g?1:-1,b-=O):(w=0,m=b=(f+d)/2)}var D=h*gx(m),N=h*mx(m),B=l*gx(_),L=l*mx(_);if(C>1e-12){var F,P=h*gx(b),I=h*mx(b),j=l*gx(x),R=l*mx(x);if(p1e-12?A>1e-12?(y=Dx(j,R,D,N,h,A,g),v=Dx(P,I,B,L,h,A,g),s.moveTo(y.cx+y.x01,y.cy+y.y01),A1e-12&&k>1e-12?S>1e-12?(y=Dx(B,L,P,I,l,-S,g),v=Dx(D,N,j,R,l,-S,g),s.lineTo(y.cx+y.x01,y.cy+y.y01),S=l;--h)s.point(y[h],v[h]);s.lineEnd(),s.areaEnd()}g&&(y[u]=+t(f,u,c),v[u]=+n(f,u,c),s.point(e?+e(f,u,c):y[u],r?+r(f,u,c):v[u]))}if(d)return s=null,d+""||null}function u(){return Ix().defined(i).curve(o).context(a)}return c.x=function(n){return arguments.length?(t="function"==typeof n?n:fx(+n),e=null,c):t},c.x0=function(e){return arguments.length?(t="function"==typeof e?e:fx(+e),c):t},c.x1=function(t){return arguments.length?(e=null==t?null:"function"==typeof t?t:fx(+t),c):e},c.y=function(t){return arguments.length?(n="function"==typeof t?t:fx(+t),r=null,c):n},c.y0=function(t){return arguments.length?(n="function"==typeof t?t:fx(+t),c):n},c.y1=function(t){return arguments.length?(r=null==t?null:"function"==typeof t?t:fx(+t),c):r},c.lineX0=c.lineY0=function(){return u().x(t).y(n)},c.lineY1=function(){return u().x(t).y(r)},c.lineX1=function(){return u().x(e).y(n)},c.defined=function(t){return arguments.length?(i="function"==typeof t?t:fx(!!t),c):i},c.curve=function(t){return arguments.length?(o=t,null!=a&&(s=o(a)),c):o},c.context=function(t){return arguments.length?(null==t?a=s=null:s=o(a=t),c):a},c},Rx=function(t,e){return et?1:e>=t?0:NaN},Yx=function(t){return t},zx=function(){var t=Yx,e=Rx,n=null,r=fx(0),i=fx(kx),a=fx(0);function o(o){var s,c,u,l,h,f=o.length,d=0,p=new Array(f),g=new Array(f),y=+r.apply(this,arguments),v=Math.min(kx,Math.max(-kx,i.apply(this,arguments)-y)),m=Math.min(Math.abs(v)/f,a.apply(this,arguments)),b=m*(v<0?-1:1);for(s=0;s0&&(d+=h);for(null!=e?p.sort((function(t,n){return e(g[t],g[n])})):null!=n&&p.sort((function(t,e){return n(o[t],o[e])})),s=0,u=d?(v-f*b)/d:0;s0?h*u:0)+b,g[c]={data:o[c],index:s,value:h,startAngle:y,endAngle:l,padAngle:m};return g}return o.value=function(e){return arguments.length?(t="function"==typeof e?e:fx(+e),o):t},o.sortValues=function(t){return arguments.length?(e=t,n=null,o):e},o.sort=function(t){return arguments.length?(n=t,e=null,o):n},o.startAngle=function(t){return arguments.length?(r="function"==typeof t?t:fx(+t),o):r},o.endAngle=function(t){return arguments.length?(i="function"==typeof t?t:fx(+t),o):i},o.padAngle=function(t){return arguments.length?(a="function"==typeof t?t:fx(+t),o):a},o},Ux=Wx(Lx);function $x(t){this._curve=t}function Wx(t){function e(e){return new $x(t(e))}return e._curve=t,e}function Vx(t){var e=t.curve;return t.angle=t.x,delete t.x,t.radius=t.y,delete t.y,t.curve=function(t){return arguments.length?e(Wx(t)):e()._curve},t}$x.prototype={areaStart:function(){this._curve.areaStart()},areaEnd:function(){this._curve.areaEnd()},lineStart:function(){this._curve.lineStart()},lineEnd:function(){this._curve.lineEnd()},point:function(t,e){this._curve.point(e*Math.sin(t),e*-Math.cos(t))}};var Hx=function(){return Vx(Ix().curve(Ux))},Gx=function(){var t=jx().curve(Ux),e=t.curve,n=t.lineX0,r=t.lineX1,i=t.lineY0,a=t.lineY1;return t.angle=t.x,delete t.x,t.startAngle=t.x0,delete t.x0,t.endAngle=t.x1,delete t.x1,t.radius=t.y,delete t.y,t.innerRadius=t.y0,delete t.y0,t.outerRadius=t.y1,delete t.y1,t.lineStartAngle=function(){return Vx(n())},delete t.lineX0,t.lineEndAngle=function(){return Vx(r())},delete t.lineX1,t.lineInnerRadius=function(){return Vx(i())},delete t.lineY0,t.lineOuterRadius=function(){return Vx(a())},delete t.lineY1,t.curve=function(t){return arguments.length?e(Wx(t)):e()._curve},t},qx=function(t,e){return[(e=+e)*Math.cos(t-=Math.PI/2),e*Math.sin(t)]},Xx=Array.prototype.slice;function Zx(t){return t.source}function Jx(t){return t.target}function Qx(t){var e=Zx,n=Jx,r=Fx,i=Px,a=null;function o(){var o,s=Xx.call(arguments),c=e.apply(this,s),u=n.apply(this,s);if(a||(a=o=Ui()),t(a,+r.apply(this,(s[0]=c,s)),+i.apply(this,s),+r.apply(this,(s[0]=u,s)),+i.apply(this,s)),o)return a=null,o+""||null}return o.source=function(t){return arguments.length?(e=t,o):e},o.target=function(t){return arguments.length?(n=t,o):n},o.x=function(t){return arguments.length?(r="function"==typeof t?t:fx(+t),o):r},o.y=function(t){return arguments.length?(i="function"==typeof t?t:fx(+t),o):i},o.context=function(t){return arguments.length?(a=null==t?null:t,o):a},o}function Kx(t,e,n,r,i){t.moveTo(e,n),t.bezierCurveTo(e=(e+r)/2,n,e,i,r,i)}function t_(t,e,n,r,i){t.moveTo(e,n),t.bezierCurveTo(e,n=(n+i)/2,r,n,r,i)}function e_(t,e,n,r,i){var a=qx(e,n),o=qx(e,n=(n+i)/2),s=qx(r,n),c=qx(r,i);t.moveTo(a[0],a[1]),t.bezierCurveTo(o[0],o[1],s[0],s[1],c[0],c[1])}function n_(){return Qx(Kx)}function r_(){return Qx(t_)}function i_(){var t=Qx(e_);return t.angle=t.x,delete t.x,t.radius=t.y,delete t.y,t}var a_={draw:function(t,e){var n=Math.sqrt(e/xx);t.moveTo(n,0),t.arc(0,0,n,0,kx)}},o_={draw:function(t,e){var n=Math.sqrt(e/5)/2;t.moveTo(-3*n,-n),t.lineTo(-n,-n),t.lineTo(-n,-3*n),t.lineTo(n,-3*n),t.lineTo(n,-n),t.lineTo(3*n,-n),t.lineTo(3*n,n),t.lineTo(n,n),t.lineTo(n,3*n),t.lineTo(-n,3*n),t.lineTo(-n,n),t.lineTo(-3*n,n),t.closePath()}},s_=Math.sqrt(1/3),c_=2*s_,u_={draw:function(t,e){var n=Math.sqrt(e/c_),r=n*s_;t.moveTo(0,-n),t.lineTo(r,0),t.lineTo(0,n),t.lineTo(-r,0),t.closePath()}},l_=Math.sin(xx/10)/Math.sin(7*xx/10),h_=Math.sin(kx/10)*l_,f_=-Math.cos(kx/10)*l_,d_={draw:function(t,e){var n=Math.sqrt(.8908130915292852*e),r=h_*n,i=f_*n;t.moveTo(0,-n),t.lineTo(r,i);for(var a=1;a<5;++a){var o=kx*a/5,s=Math.cos(o),c=Math.sin(o);t.lineTo(c*n,-s*n),t.lineTo(s*r-c*i,c*r+s*i)}t.closePath()}},p_={draw:function(t,e){var n=Math.sqrt(e),r=-n/2;t.rect(r,r,n,n)}},g_=Math.sqrt(3),y_={draw:function(t,e){var n=-Math.sqrt(e/(3*g_));t.moveTo(0,2*n),t.lineTo(-g_*n,-n),t.lineTo(g_*n,-n),t.closePath()}},v_=Math.sqrt(3)/2,m_=1/Math.sqrt(12),b_=3*(m_/2+1),x_={draw:function(t,e){var n=Math.sqrt(e/b_),r=n/2,i=n*m_,a=r,o=n*m_+n,s=-a,c=o;t.moveTo(r,i),t.lineTo(a,o),t.lineTo(s,c),t.lineTo(-.5*r-v_*i,v_*r+-.5*i),t.lineTo(-.5*a-v_*o,v_*a+-.5*o),t.lineTo(-.5*s-v_*c,v_*s+-.5*c),t.lineTo(-.5*r+v_*i,-.5*i-v_*r),t.lineTo(-.5*a+v_*o,-.5*o-v_*a),t.lineTo(-.5*s+v_*c,-.5*c-v_*s),t.closePath()}},__=[a_,o_,u_,p_,d_,y_,x_],k_=function(){var t=fx(a_),e=fx(64),n=null;function r(){var r;if(n||(n=r=Ui()),t.apply(this,arguments).draw(n,+e.apply(this,arguments)),r)return n=null,r+""||null}return r.type=function(e){return arguments.length?(t="function"==typeof e?e:fx(e),r):t},r.size=function(t){return arguments.length?(e="function"==typeof t?t:fx(+t),r):e},r.context=function(t){return arguments.length?(n=null==t?null:t,r):n},r},w_=function(){};function E_(t,e,n){t._context.bezierCurveTo((2*t._x0+t._x1)/3,(2*t._y0+t._y1)/3,(t._x0+2*t._x1)/3,(t._y0+2*t._y1)/3,(t._x0+4*t._x1+e)/6,(t._y0+4*t._y1+n)/6)}function T_(t){this._context=t}T_.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._y0=this._y1=NaN,this._point=0},lineEnd:function(){switch(this._point){case 3:E_(this,this._x1,this._y1);case 2:this._context.lineTo(this._x1,this._y1)}(this._line||0!==this._line&&1===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,e){switch(t=+t,e=+e,this._point){case 0:this._point=1,this._line?this._context.lineTo(t,e):this._context.moveTo(t,e);break;case 1:this._point=2;break;case 2:this._point=3,this._context.lineTo((5*this._x0+this._x1)/6,(5*this._y0+this._y1)/6);default:E_(this,t,e)}this._x0=this._x1,this._x1=t,this._y0=this._y1,this._y1=e}};var C_=function(t){return new T_(t)};function S_(t){this._context=t}S_.prototype={areaStart:w_,areaEnd:w_,lineStart:function(){this._x0=this._x1=this._x2=this._x3=this._x4=this._y0=this._y1=this._y2=this._y3=this._y4=NaN,this._point=0},lineEnd:function(){switch(this._point){case 1:this._context.moveTo(this._x2,this._y2),this._context.closePath();break;case 2:this._context.moveTo((this._x2+2*this._x3)/3,(this._y2+2*this._y3)/3),this._context.lineTo((this._x3+2*this._x2)/3,(this._y3+2*this._y2)/3),this._context.closePath();break;case 3:this.point(this._x2,this._y2),this.point(this._x3,this._y3),this.point(this._x4,this._y4)}},point:function(t,e){switch(t=+t,e=+e,this._point){case 0:this._point=1,this._x2=t,this._y2=e;break;case 1:this._point=2,this._x3=t,this._y3=e;break;case 2:this._point=3,this._x4=t,this._y4=e,this._context.moveTo((this._x0+4*this._x1+t)/6,(this._y0+4*this._y1+e)/6);break;default:E_(this,t,e)}this._x0=this._x1,this._x1=t,this._y0=this._y1,this._y1=e}};var A_=function(t){return new S_(t)};function M_(t){this._context=t}M_.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._y0=this._y1=NaN,this._point=0},lineEnd:function(){(this._line||0!==this._line&&3===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,e){switch(t=+t,e=+e,this._point){case 0:this._point=1;break;case 1:this._point=2;break;case 2:this._point=3;var n=(this._x0+4*this._x1+t)/6,r=(this._y0+4*this._y1+e)/6;this._line?this._context.lineTo(n,r):this._context.moveTo(n,r);break;case 3:this._point=4;default:E_(this,t,e)}this._x0=this._x1,this._x1=t,this._y0=this._y1,this._y1=e}};var O_=function(t){return new M_(t)};function D_(t,e){this._basis=new T_(t),this._beta=e}D_.prototype={lineStart:function(){this._x=[],this._y=[],this._basis.lineStart()},lineEnd:function(){var t=this._x,e=this._y,n=t.length-1;if(n>0)for(var r,i=t[0],a=e[0],o=t[n]-i,s=e[n]-a,c=-1;++c<=n;)r=c/n,this._basis.point(this._beta*t[c]+(1-this._beta)*(i+r*o),this._beta*e[c]+(1-this._beta)*(a+r*s));this._x=this._y=null,this._basis.lineEnd()},point:function(t,e){this._x.push(+t),this._y.push(+e)}};var N_=function t(e){function n(t){return 1===e?new T_(t):new D_(t,e)}return n.beta=function(e){return t(+e)},n}(.85);function B_(t,e,n){t._context.bezierCurveTo(t._x1+t._k*(t._x2-t._x0),t._y1+t._k*(t._y2-t._y0),t._x2+t._k*(t._x1-e),t._y2+t._k*(t._y1-n),t._x2,t._y2)}function L_(t,e){this._context=t,this._k=(1-e)/6}L_.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._x2=this._y0=this._y1=this._y2=NaN,this._point=0},lineEnd:function(){switch(this._point){case 2:this._context.lineTo(this._x2,this._y2);break;case 3:B_(this,this._x1,this._y1)}(this._line||0!==this._line&&1===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,e){switch(t=+t,e=+e,this._point){case 0:this._point=1,this._line?this._context.lineTo(t,e):this._context.moveTo(t,e);break;case 1:this._point=2,this._x1=t,this._y1=e;break;case 2:this._point=3;default:B_(this,t,e)}this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=e}};var F_=function t(e){function n(t){return new L_(t,e)}return n.tension=function(e){return t(+e)},n}(0);function P_(t,e){this._context=t,this._k=(1-e)/6}P_.prototype={areaStart:w_,areaEnd:w_,lineStart:function(){this._x0=this._x1=this._x2=this._x3=this._x4=this._x5=this._y0=this._y1=this._y2=this._y3=this._y4=this._y5=NaN,this._point=0},lineEnd:function(){switch(this._point){case 1:this._context.moveTo(this._x3,this._y3),this._context.closePath();break;case 2:this._context.lineTo(this._x3,this._y3),this._context.closePath();break;case 3:this.point(this._x3,this._y3),this.point(this._x4,this._y4),this.point(this._x5,this._y5)}},point:function(t,e){switch(t=+t,e=+e,this._point){case 0:this._point=1,this._x3=t,this._y3=e;break;case 1:this._point=2,this._context.moveTo(this._x4=t,this._y4=e);break;case 2:this._point=3,this._x5=t,this._y5=e;break;default:B_(this,t,e)}this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=e}};var I_=function t(e){function n(t){return new P_(t,e)}return n.tension=function(e){return t(+e)},n}(0);function j_(t,e){this._context=t,this._k=(1-e)/6}j_.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._x2=this._y0=this._y1=this._y2=NaN,this._point=0},lineEnd:function(){(this._line||0!==this._line&&3===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,e){switch(t=+t,e=+e,this._point){case 0:this._point=1;break;case 1:this._point=2;break;case 2:this._point=3,this._line?this._context.lineTo(this._x2,this._y2):this._context.moveTo(this._x2,this._y2);break;case 3:this._point=4;default:B_(this,t,e)}this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=e}};var R_=function t(e){function n(t){return new j_(t,e)}return n.tension=function(e){return t(+e)},n}(0);function Y_(t,e,n){var r=t._x1,i=t._y1,a=t._x2,o=t._y2;if(t._l01_a>1e-12){var s=2*t._l01_2a+3*t._l01_a*t._l12_a+t._l12_2a,c=3*t._l01_a*(t._l01_a+t._l12_a);r=(r*s-t._x0*t._l12_2a+t._x2*t._l01_2a)/c,i=(i*s-t._y0*t._l12_2a+t._y2*t._l01_2a)/c}if(t._l23_a>1e-12){var u=2*t._l23_2a+3*t._l23_a*t._l12_a+t._l12_2a,l=3*t._l23_a*(t._l23_a+t._l12_a);a=(a*u+t._x1*t._l23_2a-e*t._l12_2a)/l,o=(o*u+t._y1*t._l23_2a-n*t._l12_2a)/l}t._context.bezierCurveTo(r,i,a,o,t._x2,t._y2)}function z_(t,e){this._context=t,this._alpha=e}z_.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._x2=this._y0=this._y1=this._y2=NaN,this._l01_a=this._l12_a=this._l23_a=this._l01_2a=this._l12_2a=this._l23_2a=this._point=0},lineEnd:function(){switch(this._point){case 2:this._context.lineTo(this._x2,this._y2);break;case 3:this.point(this._x2,this._y2)}(this._line||0!==this._line&&1===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,e){if(t=+t,e=+e,this._point){var n=this._x2-t,r=this._y2-e;this._l23_a=Math.sqrt(this._l23_2a=Math.pow(n*n+r*r,this._alpha))}switch(this._point){case 0:this._point=1,this._line?this._context.lineTo(t,e):this._context.moveTo(t,e);break;case 1:this._point=2;break;case 2:this._point=3;default:Y_(this,t,e)}this._l01_a=this._l12_a,this._l12_a=this._l23_a,this._l01_2a=this._l12_2a,this._l12_2a=this._l23_2a,this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=e}};var U_=function t(e){function n(t){return e?new z_(t,e):new L_(t,0)}return n.alpha=function(e){return t(+e)},n}(.5);function $_(t,e){this._context=t,this._alpha=e}$_.prototype={areaStart:w_,areaEnd:w_,lineStart:function(){this._x0=this._x1=this._x2=this._x3=this._x4=this._x5=this._y0=this._y1=this._y2=this._y3=this._y4=this._y5=NaN,this._l01_a=this._l12_a=this._l23_a=this._l01_2a=this._l12_2a=this._l23_2a=this._point=0},lineEnd:function(){switch(this._point){case 1:this._context.moveTo(this._x3,this._y3),this._context.closePath();break;case 2:this._context.lineTo(this._x3,this._y3),this._context.closePath();break;case 3:this.point(this._x3,this._y3),this.point(this._x4,this._y4),this.point(this._x5,this._y5)}},point:function(t,e){if(t=+t,e=+e,this._point){var n=this._x2-t,r=this._y2-e;this._l23_a=Math.sqrt(this._l23_2a=Math.pow(n*n+r*r,this._alpha))}switch(this._point){case 0:this._point=1,this._x3=t,this._y3=e;break;case 1:this._point=2,this._context.moveTo(this._x4=t,this._y4=e);break;case 2:this._point=3,this._x5=t,this._y5=e;break;default:Y_(this,t,e)}this._l01_a=this._l12_a,this._l12_a=this._l23_a,this._l01_2a=this._l12_2a,this._l12_2a=this._l23_2a,this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=e}};var W_=function t(e){function n(t){return e?new $_(t,e):new P_(t,0)}return n.alpha=function(e){return t(+e)},n}(.5);function V_(t,e){this._context=t,this._alpha=e}V_.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._x2=this._y0=this._y1=this._y2=NaN,this._l01_a=this._l12_a=this._l23_a=this._l01_2a=this._l12_2a=this._l23_2a=this._point=0},lineEnd:function(){(this._line||0!==this._line&&3===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,e){if(t=+t,e=+e,this._point){var n=this._x2-t,r=this._y2-e;this._l23_a=Math.sqrt(this._l23_2a=Math.pow(n*n+r*r,this._alpha))}switch(this._point){case 0:this._point=1;break;case 1:this._point=2;break;case 2:this._point=3,this._line?this._context.lineTo(this._x2,this._y2):this._context.moveTo(this._x2,this._y2);break;case 3:this._point=4;default:Y_(this,t,e)}this._l01_a=this._l12_a,this._l12_a=this._l23_a,this._l01_2a=this._l12_2a,this._l12_2a=this._l23_2a,this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=e}};var H_=function t(e){function n(t){return e?new V_(t,e):new j_(t,0)}return n.alpha=function(e){return t(+e)},n}(.5);function G_(t){this._context=t}G_.prototype={areaStart:w_,areaEnd:w_,lineStart:function(){this._point=0},lineEnd:function(){this._point&&this._context.closePath()},point:function(t,e){t=+t,e=+e,this._point?this._context.lineTo(t,e):(this._point=1,this._context.moveTo(t,e))}};var q_=function(t){return new G_(t)};function X_(t){return t<0?-1:1}function Z_(t,e,n){var r=t._x1-t._x0,i=e-t._x1,a=(t._y1-t._y0)/(r||i<0&&-0),o=(n-t._y1)/(i||r<0&&-0),s=(a*i+o*r)/(r+i);return(X_(a)+X_(o))*Math.min(Math.abs(a),Math.abs(o),.5*Math.abs(s))||0}function J_(t,e){var n=t._x1-t._x0;return n?(3*(t._y1-t._y0)/n-e)/2:e}function Q_(t,e,n){var r=t._x0,i=t._y0,a=t._x1,o=t._y1,s=(a-r)/3;t._context.bezierCurveTo(r+s,i+s*e,a-s,o-s*n,a,o)}function K_(t){this._context=t}function tk(t){this._context=new ek(t)}function ek(t){this._context=t}function nk(t){return new K_(t)}function rk(t){return new tk(t)}function ik(t){this._context=t}function ak(t){var e,n,r=t.length-1,i=new Array(r),a=new Array(r),o=new Array(r);for(i[0]=0,a[0]=2,o[0]=t[0]+2*t[1],e=1;e=0;--e)i[e]=(o[e]-i[e+1])/a[e];for(a[r-1]=(t[r]+i[r-1])/2,e=0;e=0&&(this._t=1-this._t,this._line=1-this._line)},point:function(t,e){switch(t=+t,e=+e,this._point){case 0:this._point=1,this._line?this._context.lineTo(t,e):this._context.moveTo(t,e);break;case 1:this._point=2;default:if(this._t<=0)this._context.lineTo(this._x,e),this._context.lineTo(t,e);else{var n=this._x*(1-this._t)+t*this._t;this._context.lineTo(n,this._y),this._context.lineTo(n,e)}}this._x=t,this._y=e}};var ck=function(t){return new sk(t,.5)};function uk(t){return new sk(t,0)}function lk(t){return new sk(t,1)}var hk=function(t,e){if((i=t.length)>1)for(var n,r,i,a=1,o=t[e[0]],s=o.length;a=0;)n[e]=e;return n};function dk(t,e){return t[e]}var pk=function(){var t=fx([]),e=fk,n=hk,r=dk;function i(i){var a,o,s=t.apply(this,arguments),c=i.length,u=s.length,l=new Array(u);for(a=0;a0){for(var n,r,i,a=0,o=t[0].length;a0)for(var n,r,i,a,o,s,c=0,u=t[e[0]].length;c0?(r[0]=a,r[1]=a+=i):i<0?(r[1]=o,r[0]=o+=i):(r[0]=0,r[1]=i)},vk=function(t,e){if((n=t.length)>0){for(var n,r=0,i=t[e[0]],a=i.length;r0&&(r=(n=t[e[0]]).length)>0){for(var n,r,i,a=0,o=1;oa&&(a=e,r=n);return r}var _k=function(t){var e=t.map(kk);return fk(t).sort((function(t,n){return e[t]-e[n]}))};function kk(t){for(var e,n=0,r=-1,i=t.length;++r0)){if(a/=f,f<0){if(a0){if(a>h)return;a>l&&(l=a)}if(a=r-c,f||!(a<0)){if(a/=f,f<0){if(a>h)return;a>l&&(l=a)}else if(f>0){if(a0)){if(a/=d,d<0){if(a0){if(a>h)return;a>l&&(l=a)}if(a=i-u,d||!(a<0)){if(a/=d,d<0){if(a>h)return;a>l&&(l=a)}else if(d>0){if(a0||h<1)||(l>0&&(t[0]=[c+l*f,u+l*d]),h<1&&(t[1]=[c+h*f,u+h*d]),!0)}}}}}function Uk(t,e,n,r,i){var a=t[1];if(a)return!0;var o,s,c=t[0],u=t.left,l=t.right,h=u[0],f=u[1],d=l[0],p=l[1],g=(h+d)/2,y=(f+p)/2;if(p===f){if(g=r)return;if(h>d){if(c){if(c[1]>=i)return}else c=[g,n];a=[g,i]}else{if(c){if(c[1]1)if(h>d){if(c){if(c[1]>=i)return}else c=[(n-s)/o,n];a=[(i-s)/o,i]}else{if(c){if(c[1]=r)return}else c=[e,o*e+s];a=[r,o*r+s]}else{if(c){if(c[0]=-lw)){var d=c*c+u*u,p=l*l+h*h,g=(h*d-u*p)/f,y=(c*p-l*d)/f,v=Gk.pop()||new qk;v.arc=t,v.site=i,v.x=g+o,v.y=(v.cy=y+s)+Math.sqrt(g*g+y*y),t.circle=v;for(var m=null,b=sw._;b;)if(v.yuw)s=s.L;else{if(!((i=a-iw(s,o))>uw)){r>-uw?(e=s.P,n=s):i>-uw?(e=s,n=s.N):e=n=s;break}if(!s.R){e=s;break}s=s.R}!function(t){ow[t.index]={site:t,halfedges:[]}}(t);var c=Kk(t);if(aw.insert(e,c),e||n){if(e===n)return Zk(e),n=Kk(e.site),aw.insert(c,n),c.edge=n.edge=jk(e.site,c.site),Xk(e),void Xk(n);if(n){Zk(e),Zk(n);var u=e.site,l=u[0],h=u[1],f=t[0]-l,d=t[1]-h,p=n.site,g=p[0]-l,y=p[1]-h,v=2*(f*y-d*g),m=f*f+d*d,b=g*g+y*y,x=[(y*m-d*b)/v+l,(f*b-g*m)/v+h];Yk(n.edge,u,p,x),c.edge=jk(u,t,null,x),n.edge=jk(t,p,null,x),Xk(e),Xk(n)}else c.edge=jk(e.site,c.site)}}function rw(t,e){var n=t.site,r=n[0],i=n[1],a=i-e;if(!a)return r;var o=t.P;if(!o)return-1/0;var s=(n=o.site)[0],c=n[1],u=c-e;if(!u)return s;var l=s-r,h=1/a-1/u,f=l/u;return h?(-f+Math.sqrt(f*f-2*h*(l*l/(-2*u)-c+u/2+i-a/2)))/h+r:(r+s)/2}function iw(t,e){var n=t.N;if(n)return rw(n,e);var r=t.site;return r[1]===e?r[0]:1/0}var aw,ow,sw,cw,uw=1e-6,lw=1e-12;function hw(t,e){return e[1]-t[1]||e[0]-t[0]}function fw(t,e){var n,r,i,a=t.sort(hw).pop();for(cw=[],ow=new Array(t.length),aw=new Ik,sw=new Ik;;)if(i=Hk,a&&(!i||a[1]uw||Math.abs(i[0][1]-i[1][1])>uw)||delete cw[a]}(o,s,c,u),function(t,e,n,r){var i,a,o,s,c,u,l,h,f,d,p,g,y=ow.length,v=!0;for(i=0;iuw||Math.abs(g-f)>uw)&&(c.splice(s,0,cw.push(Rk(o,d,Math.abs(p-t)uw?[t,Math.abs(h-t)uw?[Math.abs(f-r)uw?[n,Math.abs(h-n)uw?[Math.abs(f-e)=s)return null;var c=t-i.site[0],u=e-i.site[1],l=c*c+u*u;do{i=a.cells[r=o],o=null,i.halfedges.forEach((function(n){var r=a.edges[n],s=r.left;if(s!==i.site&&s||(s=r.right)){var c=t-s[0],u=e-s[1],h=c*c+u*u;hr?(r+i)/2:Math.min(0,r)||Math.max(0,i),o>a?(a+o)/2:Math.min(0,a)||Math.max(0,o))}var Sw=function(){var t,e,n=_w,r=kw,i=Cw,a=Ew,o=Tw,s=[0,1/0],c=[[-1/0,-1/0],[1/0,1/0]],u=250,l=fp,h=lt("start","zoom","end"),f=0;function d(t){t.property("__zoom",ww).on("wheel.zoom",x).on("mousedown.zoom",_).on("dblclick.zoom",k).filter(o).on("touchstart.zoom",w).on("touchmove.zoom",E).on("touchend.zoom touchcancel.zoom",T).style("touch-action","none").style("-webkit-tap-highlight-color","rgba(0,0,0,0)")}function p(t,e){return(e=Math.max(s[0],Math.min(s[1],e)))===t.k?t:new yw(e,t.x,t.y)}function g(t,e,n){var r=e[0]-n[0]*t.k,i=e[1]-n[1]*t.k;return r===t.x&&i===t.y?t:new yw(t.k,r,i)}function y(t){return[(+t[0][0]+ +t[1][0])/2,(+t[0][1]+ +t[1][1])/2]}function v(t,e,n){t.on("start.zoom",(function(){m(this,arguments).start()})).on("interrupt.zoom end.zoom",(function(){m(this,arguments).end()})).tween("zoom",(function(){var t=this,i=arguments,a=m(t,i),o=r.apply(t,i),s=null==n?y(o):"function"==typeof n?n.apply(t,i):n,c=Math.max(o[1][0]-o[0][0],o[1][1]-o[0][1]),u=t.__zoom,h="function"==typeof e?e.apply(t,i):e,f=l(u.invert(s).concat(c/u.k),h.invert(s).concat(c/h.k));return function(t){if(1===t)t=h;else{var e=f(t),n=c/e[2];t=new yw(n,s[0]-e[0]*n,s[1]-e[1]*n)}a.zoom(null,t)}}))}function m(t,e,n){return!n&&t.__zooming||new b(t,e)}function b(t,e){this.that=t,this.args=e,this.active=0,this.extent=r.apply(t,e),this.taps=0}function x(){if(n.apply(this,arguments)){var t=m(this,arguments),e=this.__zoom,r=Math.max(s[0],Math.min(s[1],e.k*Math.pow(2,a.apply(this,arguments)))),o=Nn(this);if(t.wheel)t.mouse[0][0]===o[0]&&t.mouse[0][1]===o[1]||(t.mouse[1]=e.invert(t.mouse[0]=o)),clearTimeout(t.wheel);else{if(e.k===r)return;t.mouse=[o,e.invert(o)],or(this),t.start()}xw(),t.wheel=setTimeout(u,150),t.zoom("mouse",i(g(p(e,r),t.mouse[0],t.mouse[1]),t.extent,c))}function u(){t.wheel=null,t.end()}}function _(){if(!e&&n.apply(this,arguments)){var t=m(this,arguments,!0),r=ke(ce.view).on("mousemove.zoom",u,!0).on("mouseup.zoom",l,!0),a=Nn(this),o=ce.clientX,s=ce.clientY;Te(ce.view),bw(),t.mouse=[a,this.__zoom.invert(a)],or(this),t.start()}function u(){if(xw(),!t.moved){var e=ce.clientX-o,n=ce.clientY-s;t.moved=e*e+n*n>f}t.zoom("mouse",i(g(t.that.__zoom,t.mouse[0]=Nn(t.that),t.mouse[1]),t.extent,c))}function l(){r.on("mousemove.zoom mouseup.zoom",null),Ce(ce.view,t.moved),xw(),t.end()}}function k(){if(n.apply(this,arguments)){var t=this.__zoom,e=Nn(this),a=t.invert(e),o=t.k*(ce.shiftKey?.5:2),s=i(g(p(t,o),e,a),r.apply(this,arguments),c);xw(),u>0?ke(this).transition().duration(u).call(v,s,e):ke(this).call(d.transform,s)}}function w(){if(n.apply(this,arguments)){var e,r,i,a,o=ce.touches,s=o.length,c=m(this,arguments,ce.changedTouches.length===s);for(bw(),r=0;rh&&A.push("'"+this.terminals_[T]+"'");O=p.showPosition?"Parse error on line "+(c+1)+":\n"+p.showPosition()+"\nExpecting "+A.join(", ")+", got '"+(this.terminals_[x]||x)+"'":"Parse error on line "+(c+1)+": Unexpected "+(x==f?"end of input":"'"+(this.terminals_[x]||x)+"'"),this.parseError(O,{text:p.match,token:this.terminals_[x]||x,line:p.yylineno,loc:v,expected:A})}if(w[0]instanceof Array&&w.length>1)throw new Error("Parse Error: multiple actions possible at state: "+k+", token: "+x);switch(w[0]){case 1:n.push(x),i.push(p.yytext),a.push(p.yylloc),n.push(w[1]),x=null,_?(x=_,_=null):(u=p.yyleng,s=p.yytext,c=p.yylineno,v=p.yylloc,l>0&&l--);break;case 2:if(C=this.productions_[w[1]][1],M.$=i[i.length-C],M._$={first_line:a[a.length-(C||1)].first_line,last_line:a[a.length-1].last_line,first_column:a[a.length-(C||1)].first_column,last_column:a[a.length-1].last_column},m&&(M._$.range=[a[a.length-(C||1)].range[0],a[a.length-1].range[1]]),void 0!==(E=this.performAction.apply(M,[s,u,c,g.yy,w[1],i,a].concat(d))))return E;C&&(n=n.slice(0,-1*C*2),i=i.slice(0,-1*C),a=a.slice(0,-1*C)),n.push(this.productions_[w[1]][0]),i.push(M.$),a.push(M._$),S=o[n[n.length-2]][n[n.length-1]],n.push(S);break;case 3:return!0}}return!0}},M={EOF:1,parseError:function(t,e){if(!this.yy.parser)throw new Error(t);this.yy.parser.parseError(t,e)},setInput:function(t,e){return this.yy=e||this.yy||{},this._input=t,this._more=this._backtrack=this.done=!1,this.yylineno=this.yyleng=0,this.yytext=this.matched=this.match="",this.conditionStack=["INITIAL"],this.yylloc={first_line:1,first_column:0,last_line:1,last_column:0},this.options.ranges&&(this.yylloc.range=[0,0]),this.offset=0,this},input:function(){var t=this._input[0];return this.yytext+=t,this.yyleng++,this.offset++,this.match+=t,this.matched+=t,t.match(/(?:\r\n?|\n).*/g)?(this.yylineno++,this.yylloc.last_line++):this.yylloc.last_column++,this.options.ranges&&this.yylloc.range[1]++,this._input=this._input.slice(1),t},unput:function(t){var e=t.length,n=t.split(/(?:\r\n?|\n)/g);this._input=t+this._input,this.yytext=this.yytext.substr(0,this.yytext.length-e),this.offset-=e;var r=this.match.split(/(?:\r\n?|\n)/g);this.match=this.match.substr(0,this.match.length-1),this.matched=this.matched.substr(0,this.matched.length-1),n.length-1&&(this.yylineno-=n.length-1);var i=this.yylloc.range;return this.yylloc={first_line:this.yylloc.first_line,last_line:this.yylineno+1,first_column:this.yylloc.first_column,last_column:n?(n.length===r.length?this.yylloc.first_column:0)+r[r.length-n.length].length-n[0].length:this.yylloc.first_column-e},this.options.ranges&&(this.yylloc.range=[i[0],i[0]+this.yyleng-e]),this.yyleng=this.yytext.length,this},more:function(){return this._more=!0,this},reject:function(){return this.options.backtrack_lexer?(this._backtrack=!0,this):this.parseError("Lexical error on line "+(this.yylineno+1)+". You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},less:function(t){this.unput(this.match.slice(t))},pastInput:function(){var t=this.matched.substr(0,this.matched.length-this.match.length);return(t.length>20?"...":"")+t.substr(-20).replace(/\n/g,"")},upcomingInput:function(){var t=this.match;return t.length<20&&(t+=this._input.substr(0,20-t.length)),(t.substr(0,20)+(t.length>20?"...":"")).replace(/\n/g,"")},showPosition:function(){var t=this.pastInput(),e=new Array(t.length+1).join("-");return t+this.upcomingInput()+"\n"+e+"^"},test_match:function(t,e){var n,r,i;if(this.options.backtrack_lexer&&(i={yylineno:this.yylineno,yylloc:{first_line:this.yylloc.first_line,last_line:this.last_line,first_column:this.yylloc.first_column,last_column:this.yylloc.last_column},yytext:this.yytext,match:this.match,matches:this.matches,matched:this.matched,yyleng:this.yyleng,offset:this.offset,_more:this._more,_input:this._input,yy:this.yy,conditionStack:this.conditionStack.slice(0),done:this.done},this.options.ranges&&(i.yylloc.range=this.yylloc.range.slice(0))),(r=t[0].match(/(?:\r\n?|\n).*/g))&&(this.yylineno+=r.length),this.yylloc={first_line:this.yylloc.last_line,last_line:this.yylineno+1,first_column:this.yylloc.last_column,last_column:r?r[r.length-1].length-r[r.length-1].match(/\r?\n?/)[0].length:this.yylloc.last_column+t[0].length},this.yytext+=t[0],this.match+=t[0],this.matches=t,this.yyleng=this.yytext.length,this.options.ranges&&(this.yylloc.range=[this.offset,this.offset+=this.yyleng]),this._more=!1,this._backtrack=!1,this._input=this._input.slice(t[0].length),this.matched+=t[0],n=this.performAction.call(this,this.yy,this,e,this.conditionStack[this.conditionStack.length-1]),this.done&&this._input&&(this.done=!1),n)return n;if(this._backtrack){for(var a in i)this[a]=i[a];return!1}return!1},next:function(){if(this.done)return this.EOF;var t,e,n,r;this._input||(this.done=!0),this._more||(this.yytext="",this.match="");for(var i=this._currentRules(),a=0;ae[0].length)){if(e=n,r=a,this.options.backtrack_lexer){if(!1!==(t=this.test_match(n,i[a])))return t;if(this._backtrack){e=!1;continue}return!1}if(!this.options.flex)break}return e?!1!==(t=this.test_match(e,i[r]))&&t:""===this._input?this.EOF:this.parseError("Lexical error on line "+(this.yylineno+1)+". Unrecognized text.\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},lex:function(){var t=this.next();return t||this.lex()},begin:function(t){this.conditionStack.push(t)},popState:function(){return this.conditionStack.length-1>0?this.conditionStack.pop():this.conditionStack[0]},_currentRules:function(){return this.conditionStack.length&&this.conditionStack[this.conditionStack.length-1]?this.conditions[this.conditionStack[this.conditionStack.length-1]].rules:this.conditions.INITIAL.rules},topState:function(t){return(t=this.conditionStack.length-1-Math.abs(t||0))>=0?this.conditionStack[t]:"INITIAL"},pushState:function(t){this.begin(t)},stateStackSize:function(){return this.conditionStack.length},options:{"case-insensitive":!0},performAction:function(t,e,n,r){switch(n){case 0:return this.begin("open_directive"),56;case 1:return this.begin("type_directive"),57;case 2:return this.popState(),this.begin("arg_directive"),14;case 3:return this.popState(),this.popState(),59;case 4:return 58;case 5:return 5;case 6:case 7:case 8:case 9:case 10:break;case 11:return this.begin("ID"),16;case 12:return e.yytext=e.yytext.trim(),this.begin("ALIAS"),48;case 13:return this.popState(),this.popState(),this.begin("LINE"),18;case 14:return this.popState(),this.popState(),5;case 15:return this.begin("LINE"),27;case 16:return this.begin("LINE"),29;case 17:return this.begin("LINE"),30;case 18:return this.begin("LINE"),31;case 19:return this.begin("LINE"),36;case 20:return this.begin("LINE"),33;case 21:return this.begin("LINE"),35;case 22:return this.popState(),19;case 23:return 28;case 24:return 43;case 25:return 44;case 26:return 39;case 27:return 37;case 28:return this.begin("ID"),22;case 29:return this.begin("ID"),23;case 30:return 25;case 31:return 7;case 32:return 21;case 33:return 42;case 34:return 5;case 35:return e.yytext=e.yytext.trim(),48;case 36:return 51;case 37:return 52;case 38:return 49;case 39:return 50;case 40:return 53;case 41:return 54;case 42:return 55;case 43:return 46;case 44:return 47;case 45:return 5;case 46:return"INVALID"}},rules:[/^(?:%%\{)/i,/^(?:((?:(?!\}%%)[^:.])*))/i,/^(?::)/i,/^(?:\}%%)/i,/^(?:((?:(?!\}%%).|\n)*))/i,/^(?:[\n]+)/i,/^(?:\s+)/i,/^(?:((?!\n)\s)+)/i,/^(?:#[^\n]*)/i,/^(?:%(?!\{)[^\n]*)/i,/^(?:[^\}]%%[^\n]*)/i,/^(?:participant\b)/i,/^(?:[^\->:\n,;]+?(?=((?!\n)\s)+as(?!\n)\s|[#\n;]|$))/i,/^(?:as\b)/i,/^(?:(?:))/i,/^(?:loop\b)/i,/^(?:rect\b)/i,/^(?:opt\b)/i,/^(?:alt\b)/i,/^(?:else\b)/i,/^(?:par\b)/i,/^(?:and\b)/i,/^(?:(?:[:]?(?:no)?wrap)?[^#\n;]*)/i,/^(?:end\b)/i,/^(?:left of\b)/i,/^(?:right of\b)/i,/^(?:over\b)/i,/^(?:note\b)/i,/^(?:activate\b)/i,/^(?:deactivate\b)/i,/^(?:title\b)/i,/^(?:sequenceDiagram\b)/i,/^(?:autonumber\b)/i,/^(?:,)/i,/^(?:;)/i,/^(?:[^\+\->:\n,;]+((?!(-x|--x))[\-]*[^\+\->:\n,;]+)*)/i,/^(?:->>)/i,/^(?:-->>)/i,/^(?:->)/i,/^(?:-->)/i,/^(?:-[x])/i,/^(?:--[x])/i,/^(?::(?:(?:no)?wrap)?[^#\n;]+)/i,/^(?:\+)/i,/^(?:-)/i,/^(?:$)/i,/^(?:.)/i],conditions:{open_directive:{rules:[1,8],inclusive:!1},type_directive:{rules:[2,3,8],inclusive:!1},arg_directive:{rules:[3,4,8],inclusive:!1},ID:{rules:[7,8,12],inclusive:!1},ALIAS:{rules:[7,8,13,14],inclusive:!1},LINE:{rules:[7,8,22],inclusive:!1},INITIAL:{rules:[0,5,6,8,9,10,11,15,16,17,18,19,20,21,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46],inclusive:!0}}};function O(){this.yy={}}return A.lexer=M,O.prototype=A,A.Parser=O,new O}();e.parser=i,e.Parser=i.Parser,e.parse=function(){return i.parse.apply(i,arguments)},e.main=function(r){r[1]||(console.log("Usage: "+r[0]+" FILE"),t.exit(1));var i=n(19).readFileSync(n(20).normalize(r[1]),"utf8");return e.parser.parse(i)},n.c[n.s]===r&&e.main(t.argv.slice(1))}).call(this,n(14),n(7)(t))},function(t,e,n){var r=n(198);t.exports={Graph:r.Graph,json:n(301),alg:n(302),version:r.version}},function(t,e,n){var r;try{r={cloneDeep:n(313),constant:n(86),defaults:n(154),each:n(87),filter:n(128),find:n(314),flatten:n(156),forEach:n(126),forIn:n(319),has:n(93),isUndefined:n(139),last:n(320),map:n(140),mapValues:n(321),max:n(322),merge:n(324),min:n(329),minBy:n(330),now:n(331),pick:n(161),range:n(162),reduce:n(142),sortBy:n(338),uniqueId:n(163),values:n(147),zipObject:n(343)}}catch(t){}r||(r=window._),t.exports=r},function(t,e){var n=Array.isArray;t.exports=n},function(t,e,n){ -/** - * @license - * Copyright (c) 2012-2013 Chris Pettitt - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ -t.exports={graphlib:n(311),dagre:n(153),intersect:n(368),render:n(370),util:n(12),version:n(382)}},function(t,e){t.exports=function(t){return t.webpackPolyfill||(t.deprecate=function(){},t.paths=[],t.children||(t.children=[]),Object.defineProperty(t,"loaded",{enumerable:!0,get:function(){return t.l}}),Object.defineProperty(t,"id",{enumerable:!0,get:function(){return t.i}}),t.webpackPolyfill=1),t}},function(t,e,n){"use strict";var r=n(4),i=n(17).Graph;function a(t,e,n,i){var a;do{a=r.uniqueId(i)}while(t.hasNode(a));return n.dummy=e,t.setNode(a,n),a}function o(t){return r.max(r.map(t.nodes(),(function(e){var n=t.node(e).rank;if(!r.isUndefined(n))return n})))}t.exports={addDummyNode:a,simplify:function(t){var e=(new i).setGraph(t.graph());return r.forEach(t.nodes(),(function(n){e.setNode(n,t.node(n))})),r.forEach(t.edges(),(function(n){var r=e.edge(n.v,n.w)||{weight:0,minlen:1},i=t.edge(n);e.setEdge(n.v,n.w,{weight:r.weight+i.weight,minlen:Math.max(r.minlen,i.minlen)})})),e},asNonCompoundGraph:function(t){var e=new i({multigraph:t.isMultigraph()}).setGraph(t.graph());return r.forEach(t.nodes(),(function(n){t.children(n).length||e.setNode(n,t.node(n))})),r.forEach(t.edges(),(function(n){e.setEdge(n,t.edge(n))})),e},successorWeights:function(t){var e=r.map(t.nodes(),(function(e){var n={};return r.forEach(t.outEdges(e),(function(e){n[e.w]=(n[e.w]||0)+t.edge(e).weight})),n}));return r.zipObject(t.nodes(),e)},predecessorWeights:function(t){var e=r.map(t.nodes(),(function(e){var n={};return r.forEach(t.inEdges(e),(function(e){n[e.v]=(n[e.v]||0)+t.edge(e).weight})),n}));return r.zipObject(t.nodes(),e)},intersectRect:function(t,e){var n,r,i=t.x,a=t.y,o=e.x-i,s=e.y-a,c=t.width/2,u=t.height/2;if(!o&&!s)throw new Error("Not possible to find intersection inside of the rectangle");Math.abs(s)*c>Math.abs(o)*u?(s<0&&(u=-u),n=u*o/s,r=u):(o<0&&(c=-c),n=c,r=c*s/o);return{x:i+n,y:a+r}},buildLayerMatrix:function(t){var e=r.map(r.range(o(t)+1),(function(){return[]}));return r.forEach(t.nodes(),(function(n){var i=t.node(n),a=i.rank;r.isUndefined(a)||(e[a][i.order]=n)})),e},normalizeRanks:function(t){var e=r.min(r.map(t.nodes(),(function(e){return t.node(e).rank})));r.forEach(t.nodes(),(function(n){var i=t.node(n);r.has(i,"rank")&&(i.rank-=e)}))},removeEmptyRanks:function(t){var e=r.min(r.map(t.nodes(),(function(e){return t.node(e).rank}))),n=[];r.forEach(t.nodes(),(function(r){var i=t.node(r).rank-e;n[i]||(n[i]=[]),n[i].push(r)}));var i=0,a=t.graph().nodeRankFactor;r.forEach(n,(function(e,n){r.isUndefined(e)&&n%a!=0?--i:i&&r.forEach(e,(function(e){t.node(e).rank+=i}))}))},addBorderNode:function(t,e,n,r){var i={width:0,height:0};arguments.length>=4&&(i.rank=n,i.order=r);return a(t,"border",i,e)},maxRank:o,partition:function(t,e){var n={lhs:[],rhs:[]};return r.forEach(t,(function(t){e(t)?n.lhs.push(t):n.rhs.push(t)})),n},time:function(t,e){var n=r.now();try{return e()}finally{console.log(t+" time: "+(r.now()-n)+"ms")}},notime:function(t,e){return e()}}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(173),i=n(174),a=n(175),o={channel:r.default,lang:i.default,unit:a.default};e.default=o},function(t,e,n){var r;try{r={clone:n(199),constant:n(86),each:n(87),filter:n(128),has:n(93),isArray:n(5),isEmpty:n(276),isFunction:n(37),isUndefined:n(139),keys:n(30),map:n(140),reduce:n(142),size:n(279),transform:n(285),union:n(286),values:n(147)}}catch(t){}r||(r=window._),t.exports=r},function(t,e){t.exports=function(t){var e=typeof t;return null!=t&&("object"==e||"function"==e)}},function(t,e,n){var r=n(43);t.exports={isSubgraph:function(t,e){return!!t.children(e).length},edgeToId:function(t){return a(t.v)+":"+a(t.w)+":"+a(t.name)},applyStyle:function(t,e){e&&t.attr("style",e)},applyClass:function(t,e,n){e&&t.attr("class",e).attr("class",n+" "+t.attr("class"))},applyTransition:function(t,e){var n=e.graph();if(r.isPlainObject(n)){var i=n.transition;if(r.isFunction(i))return i(t)}return t}};var i=/:/g;function a(t){return t?String(t).replace(i,"\\:"):""}},function(t,e,n){(function(t,r){var i=function(){var t=function(t,e,n,r){for(n=n||{},r=t.length;r--;n[t[r]]=e);return n},e=[1,7],n=[1,6],r=[1,14],i=[1,25],a=[1,28],o=[1,26],s=[1,27],c=[1,29],u=[1,30],l=[1,31],h=[1,33],f=[1,34],d=[1,35],p=[10,19],g=[1,47],y=[1,48],v=[1,49],m=[1,50],b=[1,51],x=[1,52],_=[10,19,25,32,33,41,44,45,46,47,48,49],k=[10,19,23,25,32,33,37,41,44,45,46,47,48,49,66,67,68],w=[10,13,17,19],E=[41,66,67,68],T=[41,48,49,66,67,68],C=[41,44,45,46,47,66,67,68],S=[10,19,25],A=[1,81],M={trace:function(){},yy:{},symbols_:{error:2,start:3,mermaidDoc:4,directive:5,graphConfig:6,openDirective:7,typeDirective:8,closeDirective:9,NEWLINE:10,":":11,argDirective:12,open_directive:13,type_directive:14,arg_directive:15,close_directive:16,CLASS_DIAGRAM:17,statements:18,EOF:19,statement:20,className:21,alphaNumToken:22,GENERICTYPE:23,relationStatement:24,LABEL:25,classStatement:26,methodStatement:27,annotationStatement:28,clickStatement:29,cssClassStatement:30,CLASS:31,STYLE_SEPARATOR:32,STRUCT_START:33,members:34,STRUCT_STOP:35,ANNOTATION_START:36,ANNOTATION_END:37,MEMBER:38,SEPARATOR:39,relation:40,STR:41,relationType:42,lineType:43,AGGREGATION:44,EXTENSION:45,COMPOSITION:46,DEPENDENCY:47,LINE:48,DOTTED_LINE:49,CALLBACK:50,LINK:51,CSSCLASS:52,commentToken:53,textToken:54,graphCodeTokens:55,textNoTagsToken:56,TAGSTART:57,TAGEND:58,"==":59,"--":60,PCT:61,DEFAULT:62,SPACE:63,MINUS:64,keywords:65,UNICODE_TEXT:66,NUM:67,ALPHA:68,$accept:0,$end:1},terminals_:{2:"error",10:"NEWLINE",11:":",13:"open_directive",14:"type_directive",15:"arg_directive",16:"close_directive",17:"CLASS_DIAGRAM",19:"EOF",23:"GENERICTYPE",25:"LABEL",31:"CLASS",32:"STYLE_SEPARATOR",33:"STRUCT_START",35:"STRUCT_STOP",36:"ANNOTATION_START",37:"ANNOTATION_END",38:"MEMBER",39:"SEPARATOR",41:"STR",44:"AGGREGATION",45:"EXTENSION",46:"COMPOSITION",47:"DEPENDENCY",48:"LINE",49:"DOTTED_LINE",50:"CALLBACK",51:"LINK",52:"CSSCLASS",55:"graphCodeTokens",57:"TAGSTART",58:"TAGEND",59:"==",60:"--",61:"PCT",62:"DEFAULT",63:"SPACE",64:"MINUS",65:"keywords",66:"UNICODE_TEXT",67:"NUM",68:"ALPHA"},productions_:[0,[3,1],[3,2],[4,1],[5,4],[5,6],[7,1],[8,1],[12,1],[9,1],[6,4],[18,1],[18,2],[18,3],[21,1],[21,2],[21,3],[21,2],[20,1],[20,2],[20,1],[20,1],[20,1],[20,1],[20,1],[20,1],[26,2],[26,4],[26,5],[26,7],[28,4],[34,1],[34,2],[27,1],[27,2],[27,1],[27,1],[24,3],[24,4],[24,4],[24,5],[40,3],[40,2],[40,2],[40,1],[42,1],[42,1],[42,1],[42,1],[43,1],[43,1],[29,3],[29,4],[29,3],[29,4],[30,3],[53,1],[53,1],[54,1],[54,1],[54,1],[54,1],[54,1],[54,1],[54,1],[56,1],[56,1],[56,1],[56,1],[22,1],[22,1],[22,1]],performAction:function(t,e,n,r,i,a,o){var s=a.length-1;switch(i){case 6:r.parseDirective("%%{","open_directive");break;case 7:r.parseDirective(a[s],"type_directive");break;case 8:a[s]=a[s].trim().replace(/'/g,'"'),r.parseDirective(a[s],"arg_directive");break;case 9:r.parseDirective("}%%","close_directive","class");break;case 14:this.$=a[s];break;case 15:this.$=a[s-1]+a[s];break;case 16:this.$=a[s-2]+"~"+a[s-1]+a[s];break;case 17:this.$=a[s-1]+"~"+a[s];break;case 18:r.addRelation(a[s]);break;case 19:a[s-1].title=r.cleanupLabel(a[s]),r.addRelation(a[s-1]);break;case 26:r.addClass(a[s]);break;case 27:r.addClass(a[s-2]),r.setCssClass(a[s-2],a[s]);break;case 28:r.addClass(a[s-3]),r.addMembers(a[s-3],a[s-1]);break;case 29:r.addClass(a[s-5]),r.setCssClass(a[s-5],a[s-3]),r.addMembers(a[s-5],a[s-1]);break;case 30:r.addAnnotation(a[s],a[s-2]);break;case 31:this.$=[a[s]];break;case 32:a[s].push(a[s-1]),this.$=a[s];break;case 33:break;case 34:r.addMember(a[s-1],r.cleanupLabel(a[s]));break;case 35:case 36:break;case 37:this.$={id1:a[s-2],id2:a[s],relation:a[s-1],relationTitle1:"none",relationTitle2:"none"};break;case 38:this.$={id1:a[s-3],id2:a[s],relation:a[s-1],relationTitle1:a[s-2],relationTitle2:"none"};break;case 39:this.$={id1:a[s-3],id2:a[s],relation:a[s-2],relationTitle1:"none",relationTitle2:a[s-1]};break;case 40:this.$={id1:a[s-4],id2:a[s],relation:a[s-2],relationTitle1:a[s-3],relationTitle2:a[s-1]};break;case 41:this.$={type1:a[s-2],type2:a[s],lineType:a[s-1]};break;case 42:this.$={type1:"none",type2:a[s],lineType:a[s-1]};break;case 43:this.$={type1:a[s-1],type2:"none",lineType:a[s]};break;case 44:this.$={type1:"none",type2:"none",lineType:a[s]};break;case 45:this.$=r.relationType.AGGREGATION;break;case 46:this.$=r.relationType.EXTENSION;break;case 47:this.$=r.relationType.COMPOSITION;break;case 48:this.$=r.relationType.DEPENDENCY;break;case 49:this.$=r.lineType.LINE;break;case 50:this.$=r.lineType.DOTTED_LINE;break;case 51:this.$=a[s-2],r.setClickEvent(a[s-1],a[s],void 0);break;case 52:this.$=a[s-3],r.setClickEvent(a[s-2],a[s-1],a[s]);break;case 53:this.$=a[s-2],r.setLink(a[s-1],a[s],void 0);break;case 54:this.$=a[s-3],r.setLink(a[s-2],a[s-1],a[s]);break;case 55:r.setCssClass(a[s-1],a[s])}},table:[{3:1,4:2,5:3,6:4,7:5,13:e,17:n},{1:[3]},{1:[2,1]},{3:8,4:2,5:3,6:4,7:5,13:e,17:n},{1:[2,3]},{8:9,14:[1,10]},{10:[1,11]},{14:[2,6]},{1:[2,2]},{9:12,11:[1,13],16:r},t([11,16],[2,7]),{5:23,7:5,13:e,18:15,20:16,21:24,22:32,24:17,26:18,27:19,28:20,29:21,30:22,31:i,36:a,38:o,39:s,50:c,51:u,52:l,66:h,67:f,68:d},{10:[1,36]},{12:37,15:[1,38]},{10:[2,9]},{19:[1,39]},{10:[1,40],19:[2,11]},t(p,[2,18],{25:[1,41]}),t(p,[2,20]),t(p,[2,21]),t(p,[2,22]),t(p,[2,23]),t(p,[2,24]),t(p,[2,25]),t(p,[2,33],{40:42,42:45,43:46,25:[1,44],41:[1,43],44:g,45:y,46:v,47:m,48:b,49:x}),{21:53,22:32,66:h,67:f,68:d},t(p,[2,35]),t(p,[2,36]),{22:54,66:h,67:f,68:d},{21:55,22:32,66:h,67:f,68:d},{21:56,22:32,66:h,67:f,68:d},{41:[1,57]},t(_,[2,14],{22:32,21:58,23:[1,59],66:h,67:f,68:d}),t(k,[2,69]),t(k,[2,70]),t(k,[2,71]),t(w,[2,4]),{9:60,16:r},{16:[2,8]},{1:[2,10]},{5:23,7:5,13:e,18:61,19:[2,12],20:16,21:24,22:32,24:17,26:18,27:19,28:20,29:21,30:22,31:i,36:a,38:o,39:s,50:c,51:u,52:l,66:h,67:f,68:d},t(p,[2,19]),{21:62,22:32,41:[1,63],66:h,67:f,68:d},{40:64,42:45,43:46,44:g,45:y,46:v,47:m,48:b,49:x},t(p,[2,34]),{43:65,48:b,49:x},t(E,[2,44],{42:66,44:g,45:y,46:v,47:m}),t(T,[2,45]),t(T,[2,46]),t(T,[2,47]),t(T,[2,48]),t(C,[2,49]),t(C,[2,50]),t(p,[2,26],{32:[1,67],33:[1,68]}),{37:[1,69]},{41:[1,70]},{41:[1,71]},{22:72,66:h,67:f,68:d},t(_,[2,15]),t(_,[2,17],{22:32,21:73,66:h,67:f,68:d}),{10:[1,74]},{19:[2,13]},t(S,[2,37]),{21:75,22:32,66:h,67:f,68:d},{21:76,22:32,41:[1,77],66:h,67:f,68:d},t(E,[2,43],{42:78,44:g,45:y,46:v,47:m}),t(E,[2,42]),{22:79,66:h,67:f,68:d},{34:80,38:A},{21:82,22:32,66:h,67:f,68:d},t(p,[2,51],{41:[1,83]}),t(p,[2,53],{41:[1,84]}),t(p,[2,55]),t(_,[2,16]),t(w,[2,5]),t(S,[2,39]),t(S,[2,38]),{21:85,22:32,66:h,67:f,68:d},t(E,[2,41]),t(p,[2,27],{33:[1,86]}),{35:[1,87]},{34:88,35:[2,31],38:A},t(p,[2,30]),t(p,[2,52]),t(p,[2,54]),t(S,[2,40]),{34:89,38:A},t(p,[2,28]),{35:[2,32]},{35:[1,90]},t(p,[2,29])],defaultActions:{2:[2,1],4:[2,3],7:[2,6],8:[2,2],14:[2,9],38:[2,8],39:[2,10],61:[2,13],88:[2,32]},parseError:function(t,e){if(!e.recoverable){var n=new Error(t);throw n.hash=e,n}this.trace(t)},parse:function(t){var e=this,n=[0],r=[],i=[null],a=[],o=this.table,s="",c=0,u=0,l=0,h=2,f=1,d=a.slice.call(arguments,1),p=Object.create(this.lexer),g={yy:{}};for(var y in this.yy)Object.prototype.hasOwnProperty.call(this.yy,y)&&(g.yy[y]=this.yy[y]);p.setInput(t,g.yy),g.yy.lexer=p,g.yy.parser=this,void 0===p.yylloc&&(p.yylloc={});var v=p.yylloc;a.push(v);var m=p.options&&p.options.ranges;function b(){var t;return"number"!=typeof(t=r.pop()||p.lex()||f)&&(t instanceof Array&&(t=(r=t).pop()),t=e.symbols_[t]||t),t}"function"==typeof g.yy.parseError?this.parseError=g.yy.parseError:this.parseError=Object.getPrototypeOf(this).parseError;for(var x,_,k,w,E,T,C,S,A,M={};;){if(k=n[n.length-1],this.defaultActions[k]?w=this.defaultActions[k]:(null==x&&(x=b()),w=o[k]&&o[k][x]),void 0===w||!w.length||!w[0]){var O="";for(T in A=[],o[k])this.terminals_[T]&&T>h&&A.push("'"+this.terminals_[T]+"'");O=p.showPosition?"Parse error on line "+(c+1)+":\n"+p.showPosition()+"\nExpecting "+A.join(", ")+", got '"+(this.terminals_[x]||x)+"'":"Parse error on line "+(c+1)+": Unexpected "+(x==f?"end of input":"'"+(this.terminals_[x]||x)+"'"),this.parseError(O,{text:p.match,token:this.terminals_[x]||x,line:p.yylineno,loc:v,expected:A})}if(w[0]instanceof Array&&w.length>1)throw new Error("Parse Error: multiple actions possible at state: "+k+", token: "+x);switch(w[0]){case 1:n.push(x),i.push(p.yytext),a.push(p.yylloc),n.push(w[1]),x=null,_?(x=_,_=null):(u=p.yyleng,s=p.yytext,c=p.yylineno,v=p.yylloc,l>0&&l--);break;case 2:if(C=this.productions_[w[1]][1],M.$=i[i.length-C],M._$={first_line:a[a.length-(C||1)].first_line,last_line:a[a.length-1].last_line,first_column:a[a.length-(C||1)].first_column,last_column:a[a.length-1].last_column},m&&(M._$.range=[a[a.length-(C||1)].range[0],a[a.length-1].range[1]]),void 0!==(E=this.performAction.apply(M,[s,u,c,g.yy,w[1],i,a].concat(d))))return E;C&&(n=n.slice(0,-1*C*2),i=i.slice(0,-1*C),a=a.slice(0,-1*C)),n.push(this.productions_[w[1]][0]),i.push(M.$),a.push(M._$),S=o[n[n.length-2]][n[n.length-1]],n.push(S);break;case 3:return!0}}return!0}},O={EOF:1,parseError:function(t,e){if(!this.yy.parser)throw new Error(t);this.yy.parser.parseError(t,e)},setInput:function(t,e){return this.yy=e||this.yy||{},this._input=t,this._more=this._backtrack=this.done=!1,this.yylineno=this.yyleng=0,this.yytext=this.matched=this.match="",this.conditionStack=["INITIAL"],this.yylloc={first_line:1,first_column:0,last_line:1,last_column:0},this.options.ranges&&(this.yylloc.range=[0,0]),this.offset=0,this},input:function(){var t=this._input[0];return this.yytext+=t,this.yyleng++,this.offset++,this.match+=t,this.matched+=t,t.match(/(?:\r\n?|\n).*/g)?(this.yylineno++,this.yylloc.last_line++):this.yylloc.last_column++,this.options.ranges&&this.yylloc.range[1]++,this._input=this._input.slice(1),t},unput:function(t){var e=t.length,n=t.split(/(?:\r\n?|\n)/g);this._input=t+this._input,this.yytext=this.yytext.substr(0,this.yytext.length-e),this.offset-=e;var r=this.match.split(/(?:\r\n?|\n)/g);this.match=this.match.substr(0,this.match.length-1),this.matched=this.matched.substr(0,this.matched.length-1),n.length-1&&(this.yylineno-=n.length-1);var i=this.yylloc.range;return this.yylloc={first_line:this.yylloc.first_line,last_line:this.yylineno+1,first_column:this.yylloc.first_column,last_column:n?(n.length===r.length?this.yylloc.first_column:0)+r[r.length-n.length].length-n[0].length:this.yylloc.first_column-e},this.options.ranges&&(this.yylloc.range=[i[0],i[0]+this.yyleng-e]),this.yyleng=this.yytext.length,this},more:function(){return this._more=!0,this},reject:function(){return this.options.backtrack_lexer?(this._backtrack=!0,this):this.parseError("Lexical error on line "+(this.yylineno+1)+". You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},less:function(t){this.unput(this.match.slice(t))},pastInput:function(){var t=this.matched.substr(0,this.matched.length-this.match.length);return(t.length>20?"...":"")+t.substr(-20).replace(/\n/g,"")},upcomingInput:function(){var t=this.match;return t.length<20&&(t+=this._input.substr(0,20-t.length)),(t.substr(0,20)+(t.length>20?"...":"")).replace(/\n/g,"")},showPosition:function(){var t=this.pastInput(),e=new Array(t.length+1).join("-");return t+this.upcomingInput()+"\n"+e+"^"},test_match:function(t,e){var n,r,i;if(this.options.backtrack_lexer&&(i={yylineno:this.yylineno,yylloc:{first_line:this.yylloc.first_line,last_line:this.last_line,first_column:this.yylloc.first_column,last_column:this.yylloc.last_column},yytext:this.yytext,match:this.match,matches:this.matches,matched:this.matched,yyleng:this.yyleng,offset:this.offset,_more:this._more,_input:this._input,yy:this.yy,conditionStack:this.conditionStack.slice(0),done:this.done},this.options.ranges&&(i.yylloc.range=this.yylloc.range.slice(0))),(r=t[0].match(/(?:\r\n?|\n).*/g))&&(this.yylineno+=r.length),this.yylloc={first_line:this.yylloc.last_line,last_line:this.yylineno+1,first_column:this.yylloc.last_column,last_column:r?r[r.length-1].length-r[r.length-1].match(/\r?\n?/)[0].length:this.yylloc.last_column+t[0].length},this.yytext+=t[0],this.match+=t[0],this.matches=t,this.yyleng=this.yytext.length,this.options.ranges&&(this.yylloc.range=[this.offset,this.offset+=this.yyleng]),this._more=!1,this._backtrack=!1,this._input=this._input.slice(t[0].length),this.matched+=t[0],n=this.performAction.call(this,this.yy,this,e,this.conditionStack[this.conditionStack.length-1]),this.done&&this._input&&(this.done=!1),n)return n;if(this._backtrack){for(var a in i)this[a]=i[a];return!1}return!1},next:function(){if(this.done)return this.EOF;var t,e,n,r;this._input||(this.done=!0),this._more||(this.yytext="",this.match="");for(var i=this._currentRules(),a=0;ae[0].length)){if(e=n,r=a,this.options.backtrack_lexer){if(!1!==(t=this.test_match(n,i[a])))return t;if(this._backtrack){e=!1;continue}return!1}if(!this.options.flex)break}return e?!1!==(t=this.test_match(e,i[r]))&&t:""===this._input?this.EOF:this.parseError("Lexical error on line "+(this.yylineno+1)+". Unrecognized text.\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},lex:function(){var t=this.next();return t||this.lex()},begin:function(t){this.conditionStack.push(t)},popState:function(){return this.conditionStack.length-1>0?this.conditionStack.pop():this.conditionStack[0]},_currentRules:function(){return this.conditionStack.length&&this.conditionStack[this.conditionStack.length-1]?this.conditions[this.conditionStack[this.conditionStack.length-1]].rules:this.conditions.INITIAL.rules},topState:function(t){return(t=this.conditionStack.length-1-Math.abs(t||0))>=0?this.conditionStack[t]:"INITIAL"},pushState:function(t){this.begin(t)},stateStackSize:function(){return this.conditionStack.length},options:{},performAction:function(t,e,n,r){switch(n){case 0:return this.begin("open_directive"),13;case 1:return this.begin("type_directive"),14;case 2:return this.popState(),this.begin("arg_directive"),11;case 3:return this.popState(),this.popState(),16;case 4:return 15;case 5:case 6:break;case 7:return 10;case 8:break;case 9:case 10:return 17;case 11:return this.begin("struct"),33;case 12:return"EOF_IN_STRUCT";case 13:return"OPEN_IN_STRUCT";case 14:return this.popState(),35;case 15:break;case 16:return"MEMBER";case 17:return 31;case 18:return 52;case 19:return 50;case 20:return 51;case 21:return 36;case 22:return 37;case 23:this.begin("generic");break;case 24:this.popState();break;case 25:return"GENERICTYPE";case 26:this.begin("string");break;case 27:this.popState();break;case 28:return"STR";case 29:case 30:return 45;case 31:case 32:return 47;case 33:return 46;case 34:return 44;case 35:return 48;case 36:return 49;case 37:return 25;case 38:return 32;case 39:return 64;case 40:return"DOT";case 41:return"PLUS";case 42:return 61;case 43:case 44:return"EQUALS";case 45:return 68;case 46:return"PUNCTUATION";case 47:return 67;case 48:return 66;case 49:return 63;case 50:return 19}},rules:[/^(?:%%\{)/,/^(?:((?:(?!\}%%)[^:.])*))/,/^(?::)/,/^(?:\}%%)/,/^(?:((?:(?!\}%%).|\n)*))/,/^(?:%%(?!\{)*[^\n]*(\r?\n?)+)/,/^(?:%%[^\n]*(\r?\n)*)/,/^(?:(\r?\n)+)/,/^(?:\s+)/,/^(?:classDiagram-v2\b)/,/^(?:classDiagram\b)/,/^(?:[{])/,/^(?:$)/,/^(?:[{])/,/^(?:[}])/,/^(?:[\n])/,/^(?:[^{}\n]*)/,/^(?:class\b)/,/^(?:cssClass\b)/,/^(?:callback\b)/,/^(?:link\b)/,/^(?:<<)/,/^(?:>>)/,/^(?:[~])/,/^(?:[~])/,/^(?:[^~]*)/,/^(?:["])/,/^(?:["])/,/^(?:[^"]*)/,/^(?:\s*<\|)/,/^(?:\s*\|>)/,/^(?:\s*>)/,/^(?:\s*<)/,/^(?:\s*\*)/,/^(?:\s*o\b)/,/^(?:--)/,/^(?:\.\.)/,/^(?::{1}[^:\n;]+)/,/^(?::{3})/,/^(?:-)/,/^(?:\.)/,/^(?:\+)/,/^(?:%)/,/^(?:=)/,/^(?:=)/,/^(?:\w+)/,/^(?:[!"#$%&'*+,-.`?\\/])/,/^(?:[0-9]+)/,/^(?:[\u00AA\u00B5\u00BA\u00C0-\u00D6\u00D8-\u00F6]|[\u00F8-\u02C1\u02C6-\u02D1\u02E0-\u02E4\u02EC\u02EE\u0370-\u0374\u0376\u0377]|[\u037A-\u037D\u0386\u0388-\u038A\u038C\u038E-\u03A1\u03A3-\u03F5]|[\u03F7-\u0481\u048A-\u0527\u0531-\u0556\u0559\u0561-\u0587\u05D0-\u05EA]|[\u05F0-\u05F2\u0620-\u064A\u066E\u066F\u0671-\u06D3\u06D5\u06E5\u06E6\u06EE]|[\u06EF\u06FA-\u06FC\u06FF\u0710\u0712-\u072F\u074D-\u07A5\u07B1\u07CA-\u07EA]|[\u07F4\u07F5\u07FA\u0800-\u0815\u081A\u0824\u0828\u0840-\u0858\u08A0]|[\u08A2-\u08AC\u0904-\u0939\u093D\u0950\u0958-\u0961\u0971-\u0977]|[\u0979-\u097F\u0985-\u098C\u098F\u0990\u0993-\u09A8\u09AA-\u09B0\u09B2]|[\u09B6-\u09B9\u09BD\u09CE\u09DC\u09DD\u09DF-\u09E1\u09F0\u09F1\u0A05-\u0A0A]|[\u0A0F\u0A10\u0A13-\u0A28\u0A2A-\u0A30\u0A32\u0A33\u0A35\u0A36\u0A38\u0A39]|[\u0A59-\u0A5C\u0A5E\u0A72-\u0A74\u0A85-\u0A8D\u0A8F-\u0A91\u0A93-\u0AA8]|[\u0AAA-\u0AB0\u0AB2\u0AB3\u0AB5-\u0AB9\u0ABD\u0AD0\u0AE0\u0AE1\u0B05-\u0B0C]|[\u0B0F\u0B10\u0B13-\u0B28\u0B2A-\u0B30\u0B32\u0B33\u0B35-\u0B39\u0B3D\u0B5C]|[\u0B5D\u0B5F-\u0B61\u0B71\u0B83\u0B85-\u0B8A\u0B8E-\u0B90\u0B92-\u0B95\u0B99]|[\u0B9A\u0B9C\u0B9E\u0B9F\u0BA3\u0BA4\u0BA8-\u0BAA\u0BAE-\u0BB9\u0BD0]|[\u0C05-\u0C0C\u0C0E-\u0C10\u0C12-\u0C28\u0C2A-\u0C33\u0C35-\u0C39\u0C3D]|[\u0C58\u0C59\u0C60\u0C61\u0C85-\u0C8C\u0C8E-\u0C90\u0C92-\u0CA8\u0CAA-\u0CB3]|[\u0CB5-\u0CB9\u0CBD\u0CDE\u0CE0\u0CE1\u0CF1\u0CF2\u0D05-\u0D0C\u0D0E-\u0D10]|[\u0D12-\u0D3A\u0D3D\u0D4E\u0D60\u0D61\u0D7A-\u0D7F\u0D85-\u0D96\u0D9A-\u0DB1]|[\u0DB3-\u0DBB\u0DBD\u0DC0-\u0DC6\u0E01-\u0E30\u0E32\u0E33\u0E40-\u0E46\u0E81]|[\u0E82\u0E84\u0E87\u0E88\u0E8A\u0E8D\u0E94-\u0E97\u0E99-\u0E9F\u0EA1-\u0EA3]|[\u0EA5\u0EA7\u0EAA\u0EAB\u0EAD-\u0EB0\u0EB2\u0EB3\u0EBD\u0EC0-\u0EC4\u0EC6]|[\u0EDC-\u0EDF\u0F00\u0F40-\u0F47\u0F49-\u0F6C\u0F88-\u0F8C\u1000-\u102A]|[\u103F\u1050-\u1055\u105A-\u105D\u1061\u1065\u1066\u106E-\u1070\u1075-\u1081]|[\u108E\u10A0-\u10C5\u10C7\u10CD\u10D0-\u10FA\u10FC-\u1248\u124A-\u124D]|[\u1250-\u1256\u1258\u125A-\u125D\u1260-\u1288\u128A-\u128D\u1290-\u12B0]|[\u12B2-\u12B5\u12B8-\u12BE\u12C0\u12C2-\u12C5\u12C8-\u12D6\u12D8-\u1310]|[\u1312-\u1315\u1318-\u135A\u1380-\u138F\u13A0-\u13F4\u1401-\u166C]|[\u166F-\u167F\u1681-\u169A\u16A0-\u16EA\u1700-\u170C\u170E-\u1711]|[\u1720-\u1731\u1740-\u1751\u1760-\u176C\u176E-\u1770\u1780-\u17B3\u17D7]|[\u17DC\u1820-\u1877\u1880-\u18A8\u18AA\u18B0-\u18F5\u1900-\u191C]|[\u1950-\u196D\u1970-\u1974\u1980-\u19AB\u19C1-\u19C7\u1A00-\u1A16]|[\u1A20-\u1A54\u1AA7\u1B05-\u1B33\u1B45-\u1B4B\u1B83-\u1BA0\u1BAE\u1BAF]|[\u1BBA-\u1BE5\u1C00-\u1C23\u1C4D-\u1C4F\u1C5A-\u1C7D\u1CE9-\u1CEC]|[\u1CEE-\u1CF1\u1CF5\u1CF6\u1D00-\u1DBF\u1E00-\u1F15\u1F18-\u1F1D]|[\u1F20-\u1F45\u1F48-\u1F4D\u1F50-\u1F57\u1F59\u1F5B\u1F5D\u1F5F-\u1F7D]|[\u1F80-\u1FB4\u1FB6-\u1FBC\u1FBE\u1FC2-\u1FC4\u1FC6-\u1FCC\u1FD0-\u1FD3]|[\u1FD6-\u1FDB\u1FE0-\u1FEC\u1FF2-\u1FF4\u1FF6-\u1FFC\u2071\u207F]|[\u2090-\u209C\u2102\u2107\u210A-\u2113\u2115\u2119-\u211D\u2124\u2126\u2128]|[\u212A-\u212D\u212F-\u2139\u213C-\u213F\u2145-\u2149\u214E\u2183\u2184]|[\u2C00-\u2C2E\u2C30-\u2C5E\u2C60-\u2CE4\u2CEB-\u2CEE\u2CF2\u2CF3]|[\u2D00-\u2D25\u2D27\u2D2D\u2D30-\u2D67\u2D6F\u2D80-\u2D96\u2DA0-\u2DA6]|[\u2DA8-\u2DAE\u2DB0-\u2DB6\u2DB8-\u2DBE\u2DC0-\u2DC6\u2DC8-\u2DCE]|[\u2DD0-\u2DD6\u2DD8-\u2DDE\u2E2F\u3005\u3006\u3031-\u3035\u303B\u303C]|[\u3041-\u3096\u309D-\u309F\u30A1-\u30FA\u30FC-\u30FF\u3105-\u312D]|[\u3131-\u318E\u31A0-\u31BA\u31F0-\u31FF\u3400-\u4DB5\u4E00-\u9FCC]|[\uA000-\uA48C\uA4D0-\uA4FD\uA500-\uA60C\uA610-\uA61F\uA62A\uA62B]|[\uA640-\uA66E\uA67F-\uA697\uA6A0-\uA6E5\uA717-\uA71F\uA722-\uA788]|[\uA78B-\uA78E\uA790-\uA793\uA7A0-\uA7AA\uA7F8-\uA801\uA803-\uA805]|[\uA807-\uA80A\uA80C-\uA822\uA840-\uA873\uA882-\uA8B3\uA8F2-\uA8F7\uA8FB]|[\uA90A-\uA925\uA930-\uA946\uA960-\uA97C\uA984-\uA9B2\uA9CF\uAA00-\uAA28]|[\uAA40-\uAA42\uAA44-\uAA4B\uAA60-\uAA76\uAA7A\uAA80-\uAAAF\uAAB1\uAAB5]|[\uAAB6\uAAB9-\uAABD\uAAC0\uAAC2\uAADB-\uAADD\uAAE0-\uAAEA\uAAF2-\uAAF4]|[\uAB01-\uAB06\uAB09-\uAB0E\uAB11-\uAB16\uAB20-\uAB26\uAB28-\uAB2E]|[\uABC0-\uABE2\uAC00-\uD7A3\uD7B0-\uD7C6\uD7CB-\uD7FB\uF900-\uFA6D]|[\uFA70-\uFAD9\uFB00-\uFB06\uFB13-\uFB17\uFB1D\uFB1F-\uFB28\uFB2A-\uFB36]|[\uFB38-\uFB3C\uFB3E\uFB40\uFB41\uFB43\uFB44\uFB46-\uFBB1\uFBD3-\uFD3D]|[\uFD50-\uFD8F\uFD92-\uFDC7\uFDF0-\uFDFB\uFE70-\uFE74\uFE76-\uFEFC]|[\uFF21-\uFF3A\uFF41-\uFF5A\uFF66-\uFFBE\uFFC2-\uFFC7\uFFCA-\uFFCF]|[\uFFD2-\uFFD7\uFFDA-\uFFDC])/,/^(?:\s)/,/^(?:$)/],conditions:{string:{rules:[27,28],inclusive:!1},generic:{rules:[24,25],inclusive:!1},struct:{rules:[12,13,14,15,16],inclusive:!1},open_directive:{rules:[1],inclusive:!1},type_directive:{rules:[2,3],inclusive:!1},arg_directive:{rules:[3,4],inclusive:!1},INITIAL:{rules:[0,5,6,7,8,9,10,11,17,18,19,20,21,22,23,26,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],inclusive:!0}}};function D(){this.yy={}}return M.lexer=O,D.prototype=M,M.Parser=D,new D}();e.parser=i,e.Parser=i.Parser,e.parse=function(){return i.parse.apply(i,arguments)},e.main=function(r){r[1]||(console.log("Usage: "+r[0]+" FILE"),t.exit(1));var i=n(19).readFileSync(n(20).normalize(r[1]),"utf8");return e.parser.parse(i)},n.c[n.s]===r&&e.main(t.argv.slice(1))}).call(this,n(14),n(7)(t))},function(t,e){var n,r,i=t.exports={};function a(){throw new Error("setTimeout has not been defined")}function o(){throw new Error("clearTimeout has not been defined")}function s(t){if(n===setTimeout)return setTimeout(t,0);if((n===a||!n)&&setTimeout)return n=setTimeout,setTimeout(t,0);try{return n(t,0)}catch(e){try{return n.call(null,t,0)}catch(e){return n.call(this,t,0)}}}!function(){try{n="function"==typeof setTimeout?setTimeout:a}catch(t){n=a}try{r="function"==typeof clearTimeout?clearTimeout:o}catch(t){r=o}}();var c,u=[],l=!1,h=-1;function f(){l&&c&&(l=!1,c.length?u=c.concat(u):h=-1,u.length&&d())}function d(){if(!l){var t=s(f);l=!0;for(var e=u.length;e;){for(c=u,u=[];++h1)for(var n=1;n=0;r--){var i=t[r];"."===i?t.splice(r,1):".."===i?(t.splice(r,1),n++):n&&(t.splice(r,1),n--)}if(e)for(;n--;n)t.unshift("..");return t}function r(t,e){if(t.filter)return t.filter(e);for(var n=[],r=0;r=-1&&!i;a--){var o=a>=0?arguments[a]:t.cwd();if("string"!=typeof o)throw new TypeError("Arguments to path.resolve must be strings");o&&(e=o+"/"+e,i="/"===o.charAt(0))}return(i?"/":"")+(e=n(r(e.split("/"),(function(t){return!!t})),!i).join("/"))||"."},e.normalize=function(t){var a=e.isAbsolute(t),o="/"===i(t,-1);return(t=n(r(t.split("/"),(function(t){return!!t})),!a).join("/"))||a||(t="."),t&&o&&(t+="/"),(a?"/":"")+t},e.isAbsolute=function(t){return"/"===t.charAt(0)},e.join=function(){var t=Array.prototype.slice.call(arguments,0);return e.normalize(r(t,(function(t,e){if("string"!=typeof t)throw new TypeError("Arguments to path.join must be strings");return t})).join("/"))},e.relative=function(t,n){function r(t){for(var e=0;e=0&&""===t[n];n--);return e>n?[]:t.slice(e,n-e+1)}t=e.resolve(t).substr(1),n=e.resolve(n).substr(1);for(var i=r(t.split("/")),a=r(n.split("/")),o=Math.min(i.length,a.length),s=o,c=0;c=1;--a)if(47===(e=t.charCodeAt(a))){if(!i){r=a;break}}else i=!1;return-1===r?n?"/":".":n&&1===r?"/":t.slice(0,r)},e.basename=function(t,e){var n=function(t){"string"!=typeof t&&(t+="");var e,n=0,r=-1,i=!0;for(e=t.length-1;e>=0;--e)if(47===t.charCodeAt(e)){if(!i){n=e+1;break}}else-1===r&&(i=!1,r=e+1);return-1===r?"":t.slice(n,r)}(t);return e&&n.substr(-1*e.length)===e&&(n=n.substr(0,n.length-e.length)),n},e.extname=function(t){"string"!=typeof t&&(t+="");for(var e=-1,n=0,r=-1,i=!0,a=0,o=t.length-1;o>=0;--o){var s=t.charCodeAt(o);if(47!==s)-1===r&&(i=!1,r=o+1),46===s?-1===e?e=o:1!==a&&(a=1):-1!==e&&(a=-1);else if(!i){n=o+1;break}}return-1===e||-1===r||0===a||1===a&&e===r-1&&e===n+1?"":t.slice(e,r)};var i="b"==="ab".substr(-1)?function(t,e,n){return t.substr(e,n)}:function(t,e,n){return e<0&&(e=t.length+e),t.substr(e,n)}}).call(this,n(14))},function(t,e){t.exports=function(t){return null!=t&&"object"==typeof t}},function(t,e,n){(function(t,r){var i=function(){var t=function(t,e,n,r){for(n=n||{},r=t.length;r--;n[t[r]]=e);return n},e=[1,2],n=[1,3],r=[1,5],i=[1,7],a=[2,5],o=[1,15],s=[1,17],c=[1,19],u=[1,20],l=[1,21],h=[1,22],f=[1,28],d=[1,23],p=[1,24],g=[1,25],y=[1,26],v=[1,29],m=[1,32],b=[1,4,5,14,15,17,19,20,22,23,24,25,26,36,39],x=[1,4,5,12,13,14,15,17,19,20,22,23,24,25,26,36,39],_=[1,4,5,7,14,15,17,19,20,22,23,24,25,26,36,39],k=[4,5,14,15,17,19,20,22,23,24,25,26,36,39],w={trace:function(){},yy:{},symbols_:{error:2,start:3,SPACE:4,NL:5,directive:6,SD:7,document:8,line:9,statement:10,idStatement:11,DESCR:12,"--\x3e":13,HIDE_EMPTY:14,scale:15,WIDTH:16,COMPOSIT_STATE:17,STRUCT_START:18,STRUCT_STOP:19,STATE_DESCR:20,AS:21,ID:22,FORK:23,JOIN:24,CONCURRENT:25,note:26,notePosition:27,NOTE_TEXT:28,openDirective:29,typeDirective:30,closeDirective:31,":":32,argDirective:33,eol:34,";":35,EDGE_STATE:36,left_of:37,right_of:38,open_directive:39,type_directive:40,arg_directive:41,close_directive:42,$accept:0,$end:1},terminals_:{2:"error",4:"SPACE",5:"NL",7:"SD",12:"DESCR",13:"--\x3e",14:"HIDE_EMPTY",15:"scale",16:"WIDTH",17:"COMPOSIT_STATE",18:"STRUCT_START",19:"STRUCT_STOP",20:"STATE_DESCR",21:"AS",22:"ID",23:"FORK",24:"JOIN",25:"CONCURRENT",26:"note",28:"NOTE_TEXT",32:":",35:";",36:"EDGE_STATE",37:"left_of",38:"right_of",39:"open_directive",40:"type_directive",41:"arg_directive",42:"close_directive"},productions_:[0,[3,2],[3,2],[3,2],[3,2],[8,0],[8,2],[9,2],[9,1],[9,1],[10,1],[10,2],[10,3],[10,4],[10,1],[10,2],[10,1],[10,4],[10,3],[10,6],[10,1],[10,1],[10,1],[10,4],[10,4],[10,1],[6,3],[6,5],[34,1],[34,1],[11,1],[11,1],[27,1],[27,1],[29,1],[30,1],[33,1],[31,1]],performAction:function(t,e,n,r,i,a,o){var s=a.length-1;switch(i){case 4:return r.setRootDoc(a[s]),a[s];case 5:this.$=[];break;case 6:"nl"!=a[s]&&(a[s-1].push(a[s]),this.$=a[s-1]);break;case 7:case 8:this.$=a[s];break;case 9:this.$="nl";break;case 10:this.$={stmt:"state",id:a[s],type:"default",description:""};break;case 11:this.$={stmt:"state",id:a[s-1],type:"default",description:r.trimColon(a[s])};break;case 12:this.$={stmt:"relation",state1:{stmt:"state",id:a[s-2],type:"default",description:""},state2:{stmt:"state",id:a[s],type:"default",description:""}};break;case 13:this.$={stmt:"relation",state1:{stmt:"state",id:a[s-3],type:"default",description:""},state2:{stmt:"state",id:a[s-1],type:"default",description:""},description:a[s].substr(1).trim()};break;case 17:this.$={stmt:"state",id:a[s-3],type:"default",description:"",doc:a[s-1]};break;case 18:var c=a[s],u=a[s-2].trim();if(a[s].match(":")){var l=a[s].split(":");c=l[0],u=[u,l[1]]}this.$={stmt:"state",id:c,type:"default",description:u};break;case 19:this.$={stmt:"state",id:a[s-3],type:"default",description:a[s-5],doc:a[s-1]};break;case 20:this.$={stmt:"state",id:a[s],type:"fork"};break;case 21:this.$={stmt:"state",id:a[s],type:"join"};break;case 22:this.$={stmt:"state",id:r.getDividerId(),type:"divider"};break;case 23:this.$={stmt:"state",id:a[s-1].trim(),note:{position:a[s-2].trim(),text:a[s].trim()}};break;case 30:case 31:this.$=a[s];break;case 34:r.parseDirective("%%{","open_directive");break;case 35:r.parseDirective(a[s],"type_directive");break;case 36:a[s]=a[s].trim().replace(/'/g,'"'),r.parseDirective(a[s],"arg_directive");break;case 37:r.parseDirective("}%%","close_directive","state")}},table:[{3:1,4:e,5:n,6:4,7:r,29:6,39:i},{1:[3]},{3:8,4:e,5:n,6:4,7:r,29:6,39:i},{3:9,4:e,5:n,6:4,7:r,29:6,39:i},{3:10,4:e,5:n,6:4,7:r,29:6,39:i},t([1,4,5,14,15,17,20,22,23,24,25,26,36,39],a,{8:11}),{30:12,40:[1,13]},{40:[2,34]},{1:[2,1]},{1:[2,2]},{1:[2,3]},{1:[2,4],4:o,5:s,6:27,9:14,10:16,11:18,14:c,15:u,17:l,20:h,22:f,23:d,24:p,25:g,26:y,29:6,36:v,39:i},{31:30,32:[1,31],42:m},t([32,42],[2,35]),t(b,[2,6]),{6:27,10:33,11:18,14:c,15:u,17:l,20:h,22:f,23:d,24:p,25:g,26:y,29:6,36:v,39:i},t(b,[2,8]),t(b,[2,9]),t(b,[2,10],{12:[1,34],13:[1,35]}),t(b,[2,14]),{16:[1,36]},t(b,[2,16],{18:[1,37]}),{21:[1,38]},t(b,[2,20]),t(b,[2,21]),t(b,[2,22]),{27:39,28:[1,40],37:[1,41],38:[1,42]},t(b,[2,25]),t(x,[2,30]),t(x,[2,31]),t(_,[2,26]),{33:43,41:[1,44]},t(_,[2,37]),t(b,[2,7]),t(b,[2,11]),{11:45,22:f,36:v},t(b,[2,15]),t(k,a,{8:46}),{22:[1,47]},{22:[1,48]},{21:[1,49]},{22:[2,32]},{22:[2,33]},{31:50,42:m},{42:[2,36]},t(b,[2,12],{12:[1,51]}),{4:o,5:s,6:27,9:14,10:16,11:18,14:c,15:u,17:l,19:[1,52],20:h,22:f,23:d,24:p,25:g,26:y,29:6,36:v,39:i},t(b,[2,18],{18:[1,53]}),{28:[1,54]},{22:[1,55]},t(_,[2,27]),t(b,[2,13]),t(b,[2,17]),t(k,a,{8:56}),t(b,[2,23]),t(b,[2,24]),{4:o,5:s,6:27,9:14,10:16,11:18,14:c,15:u,17:l,19:[1,57],20:h,22:f,23:d,24:p,25:g,26:y,29:6,36:v,39:i},t(b,[2,19])],defaultActions:{7:[2,34],8:[2,1],9:[2,2],10:[2,3],41:[2,32],42:[2,33],44:[2,36]},parseError:function(t,e){if(!e.recoverable){var n=new Error(t);throw n.hash=e,n}this.trace(t)},parse:function(t){var e=this,n=[0],r=[],i=[null],a=[],o=this.table,s="",c=0,u=0,l=0,h=2,f=1,d=a.slice.call(arguments,1),p=Object.create(this.lexer),g={yy:{}};for(var y in this.yy)Object.prototype.hasOwnProperty.call(this.yy,y)&&(g.yy[y]=this.yy[y]);p.setInput(t,g.yy),g.yy.lexer=p,g.yy.parser=this,void 0===p.yylloc&&(p.yylloc={});var v=p.yylloc;a.push(v);var m=p.options&&p.options.ranges;function b(){var t;return"number"!=typeof(t=r.pop()||p.lex()||f)&&(t instanceof Array&&(t=(r=t).pop()),t=e.symbols_[t]||t),t}"function"==typeof g.yy.parseError?this.parseError=g.yy.parseError:this.parseError=Object.getPrototypeOf(this).parseError;for(var x,_,k,w,E,T,C,S,A,M={};;){if(k=n[n.length-1],this.defaultActions[k]?w=this.defaultActions[k]:(null==x&&(x=b()),w=o[k]&&o[k][x]),void 0===w||!w.length||!w[0]){var O="";for(T in A=[],o[k])this.terminals_[T]&&T>h&&A.push("'"+this.terminals_[T]+"'");O=p.showPosition?"Parse error on line "+(c+1)+":\n"+p.showPosition()+"\nExpecting "+A.join(", ")+", got '"+(this.terminals_[x]||x)+"'":"Parse error on line "+(c+1)+": Unexpected "+(x==f?"end of input":"'"+(this.terminals_[x]||x)+"'"),this.parseError(O,{text:p.match,token:this.terminals_[x]||x,line:p.yylineno,loc:v,expected:A})}if(w[0]instanceof Array&&w.length>1)throw new Error("Parse Error: multiple actions possible at state: "+k+", token: "+x);switch(w[0]){case 1:n.push(x),i.push(p.yytext),a.push(p.yylloc),n.push(w[1]),x=null,_?(x=_,_=null):(u=p.yyleng,s=p.yytext,c=p.yylineno,v=p.yylloc,l>0&&l--);break;case 2:if(C=this.productions_[w[1]][1],M.$=i[i.length-C],M._$={first_line:a[a.length-(C||1)].first_line,last_line:a[a.length-1].last_line,first_column:a[a.length-(C||1)].first_column,last_column:a[a.length-1].last_column},m&&(M._$.range=[a[a.length-(C||1)].range[0],a[a.length-1].range[1]]),void 0!==(E=this.performAction.apply(M,[s,u,c,g.yy,w[1],i,a].concat(d))))return E;C&&(n=n.slice(0,-1*C*2),i=i.slice(0,-1*C),a=a.slice(0,-1*C)),n.push(this.productions_[w[1]][0]),i.push(M.$),a.push(M._$),S=o[n[n.length-2]][n[n.length-1]],n.push(S);break;case 3:return!0}}return!0}},E={EOF:1,parseError:function(t,e){if(!this.yy.parser)throw new Error(t);this.yy.parser.parseError(t,e)},setInput:function(t,e){return this.yy=e||this.yy||{},this._input=t,this._more=this._backtrack=this.done=!1,this.yylineno=this.yyleng=0,this.yytext=this.matched=this.match="",this.conditionStack=["INITIAL"],this.yylloc={first_line:1,first_column:0,last_line:1,last_column:0},this.options.ranges&&(this.yylloc.range=[0,0]),this.offset=0,this},input:function(){var t=this._input[0];return this.yytext+=t,this.yyleng++,this.offset++,this.match+=t,this.matched+=t,t.match(/(?:\r\n?|\n).*/g)?(this.yylineno++,this.yylloc.last_line++):this.yylloc.last_column++,this.options.ranges&&this.yylloc.range[1]++,this._input=this._input.slice(1),t},unput:function(t){var e=t.length,n=t.split(/(?:\r\n?|\n)/g);this._input=t+this._input,this.yytext=this.yytext.substr(0,this.yytext.length-e),this.offset-=e;var r=this.match.split(/(?:\r\n?|\n)/g);this.match=this.match.substr(0,this.match.length-1),this.matched=this.matched.substr(0,this.matched.length-1),n.length-1&&(this.yylineno-=n.length-1);var i=this.yylloc.range;return this.yylloc={first_line:this.yylloc.first_line,last_line:this.yylineno+1,first_column:this.yylloc.first_column,last_column:n?(n.length===r.length?this.yylloc.first_column:0)+r[r.length-n.length].length-n[0].length:this.yylloc.first_column-e},this.options.ranges&&(this.yylloc.range=[i[0],i[0]+this.yyleng-e]),this.yyleng=this.yytext.length,this},more:function(){return this._more=!0,this},reject:function(){return this.options.backtrack_lexer?(this._backtrack=!0,this):this.parseError("Lexical error on line "+(this.yylineno+1)+". You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},less:function(t){this.unput(this.match.slice(t))},pastInput:function(){var t=this.matched.substr(0,this.matched.length-this.match.length);return(t.length>20?"...":"")+t.substr(-20).replace(/\n/g,"")},upcomingInput:function(){var t=this.match;return t.length<20&&(t+=this._input.substr(0,20-t.length)),(t.substr(0,20)+(t.length>20?"...":"")).replace(/\n/g,"")},showPosition:function(){var t=this.pastInput(),e=new Array(t.length+1).join("-");return t+this.upcomingInput()+"\n"+e+"^"},test_match:function(t,e){var n,r,i;if(this.options.backtrack_lexer&&(i={yylineno:this.yylineno,yylloc:{first_line:this.yylloc.first_line,last_line:this.last_line,first_column:this.yylloc.first_column,last_column:this.yylloc.last_column},yytext:this.yytext,match:this.match,matches:this.matches,matched:this.matched,yyleng:this.yyleng,offset:this.offset,_more:this._more,_input:this._input,yy:this.yy,conditionStack:this.conditionStack.slice(0),done:this.done},this.options.ranges&&(i.yylloc.range=this.yylloc.range.slice(0))),(r=t[0].match(/(?:\r\n?|\n).*/g))&&(this.yylineno+=r.length),this.yylloc={first_line:this.yylloc.last_line,last_line:this.yylineno+1,first_column:this.yylloc.last_column,last_column:r?r[r.length-1].length-r[r.length-1].match(/\r?\n?/)[0].length:this.yylloc.last_column+t[0].length},this.yytext+=t[0],this.match+=t[0],this.matches=t,this.yyleng=this.yytext.length,this.options.ranges&&(this.yylloc.range=[this.offset,this.offset+=this.yyleng]),this._more=!1,this._backtrack=!1,this._input=this._input.slice(t[0].length),this.matched+=t[0],n=this.performAction.call(this,this.yy,this,e,this.conditionStack[this.conditionStack.length-1]),this.done&&this._input&&(this.done=!1),n)return n;if(this._backtrack){for(var a in i)this[a]=i[a];return!1}return!1},next:function(){if(this.done)return this.EOF;var t,e,n,r;this._input||(this.done=!0),this._more||(this.yytext="",this.match="");for(var i=this._currentRules(),a=0;ae[0].length)){if(e=n,r=a,this.options.backtrack_lexer){if(!1!==(t=this.test_match(n,i[a])))return t;if(this._backtrack){e=!1;continue}return!1}if(!this.options.flex)break}return e?!1!==(t=this.test_match(e,i[r]))&&t:""===this._input?this.EOF:this.parseError("Lexical error on line "+(this.yylineno+1)+". Unrecognized text.\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},lex:function(){var t=this.next();return t||this.lex()},begin:function(t){this.conditionStack.push(t)},popState:function(){return this.conditionStack.length-1>0?this.conditionStack.pop():this.conditionStack[0]},_currentRules:function(){return this.conditionStack.length&&this.conditionStack[this.conditionStack.length-1]?this.conditions[this.conditionStack[this.conditionStack.length-1]].rules:this.conditions.INITIAL.rules},topState:function(t){return(t=this.conditionStack.length-1-Math.abs(t||0))>=0?this.conditionStack[t]:"INITIAL"},pushState:function(t){this.begin(t)},stateStackSize:function(){return this.conditionStack.length},options:{"case-insensitive":!0},performAction:function(t,e,n,r){switch(n){case 0:return this.begin("open_directive"),39;case 1:return this.begin("type_directive"),40;case 2:return this.popState(),this.begin("arg_directive"),32;case 3:return this.popState(),this.popState(),42;case 4:return 41;case 5:break;case 6:console.log("Crap after close");break;case 7:return 5;case 8:case 9:case 10:case 11:break;case 12:return this.pushState("SCALE"),15;case 13:return 16;case 14:this.popState();break;case 15:this.pushState("STATE");break;case 16:return this.popState(),e.yytext=e.yytext.slice(0,-8).trim(),23;case 17:return this.popState(),e.yytext=e.yytext.slice(0,-8).trim(),24;case 18:return this.popState(),e.yytext=e.yytext.slice(0,-8).trim(),23;case 19:return this.popState(),e.yytext=e.yytext.slice(0,-8).trim(),24;case 20:this.begin("STATE_STRING");break;case 21:return this.popState(),this.pushState("STATE_ID"),"AS";case 22:return this.popState(),"ID";case 23:this.popState();break;case 24:return"STATE_DESCR";case 25:return 17;case 26:this.popState();break;case 27:return this.popState(),this.pushState("struct"),18;case 28:return this.popState(),19;case 29:break;case 30:return this.begin("NOTE"),26;case 31:return this.popState(),this.pushState("NOTE_ID"),37;case 32:return this.popState(),this.pushState("NOTE_ID"),38;case 33:this.popState(),this.pushState("FLOATING_NOTE");break;case 34:return this.popState(),this.pushState("FLOATING_NOTE_ID"),"AS";case 35:break;case 36:return"NOTE_TEXT";case 37:return this.popState(),"ID";case 38:return this.popState(),this.pushState("NOTE_TEXT"),22;case 39:return this.popState(),e.yytext=e.yytext.substr(2).trim(),28;case 40:return this.popState(),e.yytext=e.yytext.slice(0,-8).trim(),28;case 41:case 42:return 7;case 43:return 14;case 44:return 36;case 45:return 22;case 46:return e.yytext=e.yytext.trim(),12;case 47:return 13;case 48:return 25;case 49:return 5;case 50:return"INVALID"}},rules:[/^(?:%%\{)/i,/^(?:((?:(?!\}%%)[^:.])*))/i,/^(?::)/i,/^(?:\}%%)/i,/^(?:((?:(?!\}%%).|\n)*))/i,/^(?:%%(?!\{)[^\n]*)/i,/^(?:[^\}]%%[^\n]*)/i,/^(?:[\n]+)/i,/^(?:[\s]+)/i,/^(?:((?!\n)\s)+)/i,/^(?:#[^\n]*)/i,/^(?:%[^\n]*)/i,/^(?:scale\s+)/i,/^(?:\d+)/i,/^(?:\s+width\b)/i,/^(?:state\s+)/i,/^(?:.*<>)/i,/^(?:.*<>)/i,/^(?:.*\[\[fork\]\])/i,/^(?:.*\[\[join\]\])/i,/^(?:["])/i,/^(?:\s*as\s+)/i,/^(?:[^\n\{]*)/i,/^(?:["])/i,/^(?:[^"]*)/i,/^(?:[^\n\s\{]+)/i,/^(?:\n)/i,/^(?:\{)/i,/^(?:\})/i,/^(?:[\n])/i,/^(?:note\s+)/i,/^(?:left of\b)/i,/^(?:right of\b)/i,/^(?:")/i,/^(?:\s*as\s*)/i,/^(?:["])/i,/^(?:[^"]*)/i,/^(?:[^\n]*)/i,/^(?:\s*[^:\n\s\-]+)/i,/^(?:\s*:[^:\n;]+)/i,/^(?:\s*[^:;]+end note\b)/i,/^(?:stateDiagram\s+)/i,/^(?:stateDiagram-v2\s+)/i,/^(?:hide empty description\b)/i,/^(?:\[\*\])/i,/^(?:[^:\n\s\-\{]+)/i,/^(?:\s*:[^:\n;]+)/i,/^(?:-->)/i,/^(?:--)/i,/^(?:$)/i,/^(?:.)/i],conditions:{LINE:{rules:[9,10],inclusive:!1},close_directive:{rules:[9,10],inclusive:!1},arg_directive:{rules:[3,4,9,10],inclusive:!1},type_directive:{rules:[2,3,9,10],inclusive:!1},open_directive:{rules:[1,9,10],inclusive:!1},struct:{rules:[9,10,15,28,29,30,44,45,46,47,48],inclusive:!1},FLOATING_NOTE_ID:{rules:[37],inclusive:!1},FLOATING_NOTE:{rules:[34,35,36],inclusive:!1},NOTE_TEXT:{rules:[39,40],inclusive:!1},NOTE_ID:{rules:[38],inclusive:!1},NOTE:{rules:[31,32,33],inclusive:!1},SCALE:{rules:[13,14],inclusive:!1},ALIAS:{rules:[],inclusive:!1},STATE_ID:{rules:[22],inclusive:!1},STATE_STRING:{rules:[23,24],inclusive:!1},FORK_STATE:{rules:[],inclusive:!1},STATE:{rules:[9,10,16,17,18,19,20,21,25,26,27],inclusive:!1},ID:{rules:[9,10],inclusive:!1},INITIAL:{rules:[0,5,6,7,8,10,11,12,15,27,30,41,42,43,44,45,46,47,49,50],inclusive:!0}}};function T(){this.yy={}}return w.lexer=E,T.prototype=w,w.Parser=T,new T}();e.parser=i,e.Parser=i.Parser,e.parse=function(){return i.parse.apply(i,arguments)},e.main=function(r){r[1]||(console.log("Usage: "+r[0]+" FILE"),t.exit(1));var i=n(19).readFileSync(n(20).normalize(r[1]),"utf8");return e.parser.parse(i)},n.c[n.s]===r&&e.main(t.argv.slice(1))}).call(this,n(14),n(7)(t))},function(t,e,n){(function(t){t.exports=function(){"use strict";var e,r;function i(){return e.apply(null,arguments)}function a(t){return t instanceof Array||"[object Array]"===Object.prototype.toString.call(t)}function o(t){return null!=t&&"[object Object]"===Object.prototype.toString.call(t)}function s(t){return void 0===t}function c(t){return"number"==typeof t||"[object Number]"===Object.prototype.toString.call(t)}function u(t){return t instanceof Date||"[object Date]"===Object.prototype.toString.call(t)}function l(t,e){var n,r=[];for(n=0;n>>0,r=0;ryt(t)?(a=t+1,s-yt(t)):(a=t,s),{year:a,dayOfYear:o}}function Ft(t,e,n){var r,i,a=Bt(t.year(),e,n),o=Math.floor((t.dayOfYear()-a-1)/7)+1;return o<1?r=o+Pt(i=t.year()-1,e,n):o>Pt(t.year(),e,n)?(r=o-Pt(t.year(),e,n),i=t.year()+1):(i=t.year(),r=o),{week:r,year:i}}function Pt(t,e,n){var r=Bt(t,e,n),i=Bt(t+1,e,n);return(yt(t)-r+i)/7}function It(t,e){return t.slice(e,7).concat(t.slice(0,e))}W("w",["ww",2],"wo","week"),W("W",["WW",2],"Wo","isoWeek"),L("week","w"),L("isoWeek","W"),j("week",5),j("isoWeek",5),lt("w",Q),lt("ww",Q,q),lt("W",Q),lt("WW",Q,q),gt(["w","ww","W","WW"],(function(t,e,n,r){e[r.substr(0,1)]=w(t)})),W("d",0,"do","day"),W("dd",0,0,(function(t){return this.localeData().weekdaysMin(this,t)})),W("ddd",0,0,(function(t){return this.localeData().weekdaysShort(this,t)})),W("dddd",0,0,(function(t){return this.localeData().weekdays(this,t)})),W("e",0,0,"weekday"),W("E",0,0,"isoWeekday"),L("day","d"),L("weekday","e"),L("isoWeekday","E"),j("day",11),j("weekday",11),j("isoWeekday",11),lt("d",Q),lt("e",Q),lt("E",Q),lt("dd",(function(t,e){return e.weekdaysMinRegex(t)})),lt("ddd",(function(t,e){return e.weekdaysShortRegex(t)})),lt("dddd",(function(t,e){return e.weekdaysRegex(t)})),gt(["dd","ddd","dddd"],(function(t,e,n,r){var i=n._locale.weekdaysParse(t,r,n._strict);null!=i?e.d=i:p(n).invalidWeekday=t})),gt(["d","e","E"],(function(t,e,n,r){e[r]=w(t)}));var jt="Sunday_Monday_Tuesday_Wednesday_Thursday_Friday_Saturday".split("_"),Rt="Sun_Mon_Tue_Wed_Thu_Fri_Sat".split("_"),Yt="Su_Mo_Tu_We_Th_Fr_Sa".split("_"),zt=ct,Ut=ct,$t=ct;function Wt(){function t(t,e){return e.length-t.length}var e,n,r,i,a,o=[],s=[],c=[],u=[];for(e=0;e<7;e++)n=d([2e3,1]).day(e),r=this.weekdaysMin(n,""),i=this.weekdaysShort(n,""),a=this.weekdays(n,""),o.push(r),s.push(i),c.push(a),u.push(r),u.push(i),u.push(a);for(o.sort(t),s.sort(t),c.sort(t),u.sort(t),e=0;e<7;e++)s[e]=ft(s[e]),c[e]=ft(c[e]),u[e]=ft(u[e]);this._weekdaysRegex=new RegExp("^("+u.join("|")+")","i"),this._weekdaysShortRegex=this._weekdaysRegex,this._weekdaysMinRegex=this._weekdaysRegex,this._weekdaysStrictRegex=new RegExp("^("+c.join("|")+")","i"),this._weekdaysShortStrictRegex=new RegExp("^("+s.join("|")+")","i"),this._weekdaysMinStrictRegex=new RegExp("^("+o.join("|")+")","i")}function Vt(){return this.hours()%12||12}function Ht(t,e){W(t,0,0,(function(){return this.localeData().meridiem(this.hours(),this.minutes(),e)}))}function Gt(t,e){return e._meridiemParse}W("H",["HH",2],0,"hour"),W("h",["hh",2],0,Vt),W("k",["kk",2],0,(function(){return this.hours()||24})),W("hmm",0,0,(function(){return""+Vt.apply(this)+R(this.minutes(),2)})),W("hmmss",0,0,(function(){return""+Vt.apply(this)+R(this.minutes(),2)+R(this.seconds(),2)})),W("Hmm",0,0,(function(){return""+this.hours()+R(this.minutes(),2)})),W("Hmmss",0,0,(function(){return""+this.hours()+R(this.minutes(),2)+R(this.seconds(),2)})),Ht("a",!0),Ht("A",!1),L("hour","h"),j("hour",13),lt("a",Gt),lt("A",Gt),lt("H",Q),lt("h",Q),lt("k",Q),lt("HH",Q,q),lt("hh",Q,q),lt("kk",Q,q),lt("hmm",K),lt("hmmss",tt),lt("Hmm",K),lt("Hmmss",tt),pt(["H","HH"],3),pt(["k","kk"],(function(t,e,n){var r=w(t);e[3]=24===r?0:r})),pt(["a","A"],(function(t,e,n){n._isPm=n._locale.isPM(t),n._meridiem=t})),pt(["h","hh"],(function(t,e,n){e[3]=w(t),p(n).bigHour=!0})),pt("hmm",(function(t,e,n){var r=t.length-2;e[3]=w(t.substr(0,r)),e[4]=w(t.substr(r)),p(n).bigHour=!0})),pt("hmmss",(function(t,e,n){var r=t.length-4,i=t.length-2;e[3]=w(t.substr(0,r)),e[4]=w(t.substr(r,2)),e[5]=w(t.substr(i)),p(n).bigHour=!0})),pt("Hmm",(function(t,e,n){var r=t.length-2;e[3]=w(t.substr(0,r)),e[4]=w(t.substr(r))})),pt("Hmmss",(function(t,e,n){var r=t.length-4,i=t.length-2;e[3]=w(t.substr(0,r)),e[4]=w(t.substr(r,2)),e[5]=w(t.substr(i))}));var qt,Xt=xt("Hours",!0),Zt={calendar:{sameDay:"[Today at] LT",nextDay:"[Tomorrow at] LT",nextWeek:"dddd [at] LT",lastDay:"[Yesterday at] LT",lastWeek:"[Last] dddd [at] LT",sameElse:"L"},longDateFormat:{LTS:"h:mm:ss A",LT:"h:mm A",L:"MM/DD/YYYY",LL:"MMMM D, YYYY",LLL:"MMMM D, YYYY h:mm A",LLLL:"dddd, MMMM D, YYYY h:mm A"},invalidDate:"Invalid date",ordinal:"%d",dayOfMonthOrdinalParse:/\d{1,2}/,relativeTime:{future:"in %s",past:"%s ago",s:"a few seconds",ss:"%d seconds",m:"a minute",mm:"%d minutes",h:"an hour",hh:"%d hours",d:"a day",dd:"%d days",M:"a month",MM:"%d months",y:"a year",yy:"%d years"},months:Tt,monthsShort:Ct,week:{dow:0,doy:6},weekdays:jt,weekdaysMin:Yt,weekdaysShort:Rt,meridiemParse:/[ap]\.?m?\.?/i},Jt={},Qt={};function Kt(t){return t?t.toLowerCase().replace("_","-"):t}function te(e){var r=null;if(!Jt[e]&&void 0!==t&&t&&t.exports)try{r=qt._abbr,n(171)("./"+e),ee(r)}catch(e){}return Jt[e]}function ee(t,e){var n;return t&&((n=s(e)?re(t):ne(t,e))?qt=n:"undefined"!=typeof console&&console.warn&&console.warn("Locale "+t+" not found. Did you forget to load it?")),qt._abbr}function ne(t,e){if(null===e)return delete Jt[t],null;var n,r=Zt;if(e.abbr=t,null!=Jt[t])M("defineLocaleOverride","use moment.updateLocale(localeName, config) to change an existing locale. moment.defineLocale(localeName, config) should only be used for creating a new locale See http://momentjs.com/guides/#/warnings/define-locale/ for more info."),r=Jt[t]._config;else if(null!=e.parentLocale)if(null!=Jt[e.parentLocale])r=Jt[e.parentLocale]._config;else{if(null==(n=te(e.parentLocale)))return Qt[e.parentLocale]||(Qt[e.parentLocale]=[]),Qt[e.parentLocale].push({name:t,config:e}),null;r=n._config}return Jt[t]=new N(D(r,e)),Qt[t]&&Qt[t].forEach((function(t){ne(t.name,t.config)})),ee(t),Jt[t]}function re(t){var e;if(t&&t._locale&&t._locale._abbr&&(t=t._locale._abbr),!t)return qt;if(!a(t)){if(e=te(t))return e;t=[t]}return function(t){for(var e,n,r,i,a=0;a=e&&E(i,n,!0)>=e-1)break;e--}a++}return qt}(t)}function ie(t){var e,n=t._a;return n&&-2===p(t).overflow&&(e=n[1]<0||11wt(n[0],n[1])?2:n[3]<0||24Pt(n,a,o)?p(t)._overflowWeeks=!0:null!=c?p(t)._overflowWeekday=!0:(s=Lt(n,r,i,a,o),t._a[0]=s.year,t._dayOfYear=s.dayOfYear)}(t),null!=t._dayOfYear&&(o=ae(t._a[0],r[0]),(t._dayOfYear>yt(o)||0===t._dayOfYear)&&(p(t)._overflowDayOfYear=!0),n=Nt(o,0,t._dayOfYear),t._a[1]=n.getUTCMonth(),t._a[2]=n.getUTCDate()),e=0;e<3&&null==t._a[e];++e)t._a[e]=s[e]=r[e];for(;e<7;e++)t._a[e]=s[e]=null==t._a[e]?2===e?1:0:t._a[e];24===t._a[3]&&0===t._a[4]&&0===t._a[5]&&0===t._a[6]&&(t._nextDay=!0,t._a[3]=0),t._d=(t._useUTC?Nt:function(t,e,n,r,i,a,o){var s;return t<100&&0<=t?(s=new Date(t+400,e,n,r,i,a,o),isFinite(s.getFullYear())&&s.setFullYear(t)):s=new Date(t,e,n,r,i,a,o),s}).apply(null,s),a=t._useUTC?t._d.getUTCDay():t._d.getDay(),null!=t._tzm&&t._d.setUTCMinutes(t._d.getUTCMinutes()-t._tzm),t._nextDay&&(t._a[3]=24),t._w&&void 0!==t._w.d&&t._w.d!==a&&(p(t).weekdayMismatch=!0)}}var se=/^\s*((?:[+-]\d{6}|\d{4})-(?:\d\d-\d\d|W\d\d-\d|W\d\d|\d\d\d|\d\d))(?:(T| )(\d\d(?::\d\d(?::\d\d(?:[.,]\d+)?)?)?)([\+\-]\d\d(?::?\d\d)?|\s*Z)?)?$/,ce=/^\s*((?:[+-]\d{6}|\d{4})(?:\d\d\d\d|W\d\d\d|W\d\d|\d\d\d|\d\d))(?:(T| )(\d\d(?:\d\d(?:\d\d(?:[.,]\d+)?)?)?)([\+\-]\d\d(?::?\d\d)?|\s*Z)?)?$/,ue=/Z|[+-]\d\d(?::?\d\d)?/,le=[["YYYYYY-MM-DD",/[+-]\d{6}-\d\d-\d\d/],["YYYY-MM-DD",/\d{4}-\d\d-\d\d/],["GGGG-[W]WW-E",/\d{4}-W\d\d-\d/],["GGGG-[W]WW",/\d{4}-W\d\d/,!1],["YYYY-DDD",/\d{4}-\d{3}/],["YYYY-MM",/\d{4}-\d\d/,!1],["YYYYYYMMDD",/[+-]\d{10}/],["YYYYMMDD",/\d{8}/],["GGGG[W]WWE",/\d{4}W\d{3}/],["GGGG[W]WW",/\d{4}W\d{2}/,!1],["YYYYDDD",/\d{7}/]],he=[["HH:mm:ss.SSSS",/\d\d:\d\d:\d\d\.\d+/],["HH:mm:ss,SSSS",/\d\d:\d\d:\d\d,\d+/],["HH:mm:ss",/\d\d:\d\d:\d\d/],["HH:mm",/\d\d:\d\d/],["HHmmss.SSSS",/\d\d\d\d\d\d\.\d+/],["HHmmss,SSSS",/\d\d\d\d\d\d,\d+/],["HHmmss",/\d\d\d\d\d\d/],["HHmm",/\d\d\d\d/],["HH",/\d\d/]],fe=/^\/?Date\((\-?\d+)/i;function de(t){var e,n,r,i,a,o,s=t._i,c=se.exec(s)||ce.exec(s);if(c){for(p(t).iso=!0,e=0,n=le.length;en.valueOf():n.valueOf()this.clone().month(0).utcOffset()||this.utcOffset()>this.clone().month(5).utcOffset()},on.isLocal=function(){return!!this.isValid()&&!this._isUTC},on.isUtcOffset=function(){return!!this.isValid()&&this._isUTC},on.isUtc=Be,on.isUTC=Be,on.zoneAbbr=function(){return this._isUTC?"UTC":""},on.zoneName=function(){return this._isUTC?"Coordinated Universal Time":""},on.dates=C("dates accessor is deprecated. Use date instead.",Ke),on.months=C("months accessor is deprecated. Use month instead",At),on.years=C("years accessor is deprecated. Use year instead",bt),on.zone=C("moment().zone is deprecated, use moment().utcOffset instead. http://momentjs.com/guides/#/warnings/zone/",(function(t,e){return null!=t?("string"!=typeof t&&(t=-t),this.utcOffset(t,e),this):-this.utcOffset()})),on.isDSTShifted=C("isDSTShifted is deprecated. See http://momentjs.com/guides/#/warnings/dst-shifted/ for more information",(function(){if(!s(this._isDSTShifted))return this._isDSTShifted;var t={};if(m(t,this),(t=me(t))._a){var e=t._isUTC?d(t._a):xe(t._a);this._isDSTShifted=this.isValid()&&0h&&A.push("'"+this.terminals_[T]+"'");O=p.showPosition?"Parse error on line "+(c+1)+":\n"+p.showPosition()+"\nExpecting "+A.join(", ")+", got '"+(this.terminals_[x]||x)+"'":"Parse error on line "+(c+1)+": Unexpected "+(x==f?"end of input":"'"+(this.terminals_[x]||x)+"'"),this.parseError(O,{text:p.match,token:this.terminals_[x]||x,line:p.yylineno,loc:v,expected:A})}if(w[0]instanceof Array&&w.length>1)throw new Error("Parse Error: multiple actions possible at state: "+k+", token: "+x);switch(w[0]){case 1:n.push(x),i.push(p.yytext),a.push(p.yylloc),n.push(w[1]),x=null,_?(x=_,_=null):(u=p.yyleng,s=p.yytext,c=p.yylineno,v=p.yylloc,l>0&&l--);break;case 2:if(C=this.productions_[w[1]][1],M.$=i[i.length-C],M._$={first_line:a[a.length-(C||1)].first_line,last_line:a[a.length-1].last_line,first_column:a[a.length-(C||1)].first_column,last_column:a[a.length-1].last_column},m&&(M._$.range=[a[a.length-(C||1)].range[0],a[a.length-1].range[1]]),void 0!==(E=this.performAction.apply(M,[s,u,c,g.yy,w[1],i,a].concat(d))))return E;C&&(n=n.slice(0,-1*C*2),i=i.slice(0,-1*C),a=a.slice(0,-1*C)),n.push(this.productions_[w[1]][0]),i.push(M.$),a.push(M._$),S=o[n[n.length-2]][n[n.length-1]],n.push(S);break;case 3:return!0}}return!0}},qt={EOF:1,parseError:function(t,e){if(!this.yy.parser)throw new Error(t);this.yy.parser.parseError(t,e)},setInput:function(t,e){return this.yy=e||this.yy||{},this._input=t,this._more=this._backtrack=this.done=!1,this.yylineno=this.yyleng=0,this.yytext=this.matched=this.match="",this.conditionStack=["INITIAL"],this.yylloc={first_line:1,first_column:0,last_line:1,last_column:0},this.options.ranges&&(this.yylloc.range=[0,0]),this.offset=0,this},input:function(){var t=this._input[0];return this.yytext+=t,this.yyleng++,this.offset++,this.match+=t,this.matched+=t,t.match(/(?:\r\n?|\n).*/g)?(this.yylineno++,this.yylloc.last_line++):this.yylloc.last_column++,this.options.ranges&&this.yylloc.range[1]++,this._input=this._input.slice(1),t},unput:function(t){var e=t.length,n=t.split(/(?:\r\n?|\n)/g);this._input=t+this._input,this.yytext=this.yytext.substr(0,this.yytext.length-e),this.offset-=e;var r=this.match.split(/(?:\r\n?|\n)/g);this.match=this.match.substr(0,this.match.length-1),this.matched=this.matched.substr(0,this.matched.length-1),n.length-1&&(this.yylineno-=n.length-1);var i=this.yylloc.range;return this.yylloc={first_line:this.yylloc.first_line,last_line:this.yylineno+1,first_column:this.yylloc.first_column,last_column:n?(n.length===r.length?this.yylloc.first_column:0)+r[r.length-n.length].length-n[0].length:this.yylloc.first_column-e},this.options.ranges&&(this.yylloc.range=[i[0],i[0]+this.yyleng-e]),this.yyleng=this.yytext.length,this},more:function(){return this._more=!0,this},reject:function(){return this.options.backtrack_lexer?(this._backtrack=!0,this):this.parseError("Lexical error on line "+(this.yylineno+1)+". You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},less:function(t){this.unput(this.match.slice(t))},pastInput:function(){var t=this.matched.substr(0,this.matched.length-this.match.length);return(t.length>20?"...":"")+t.substr(-20).replace(/\n/g,"")},upcomingInput:function(){var t=this.match;return t.length<20&&(t+=this._input.substr(0,20-t.length)),(t.substr(0,20)+(t.length>20?"...":"")).replace(/\n/g,"")},showPosition:function(){var t=this.pastInput(),e=new Array(t.length+1).join("-");return t+this.upcomingInput()+"\n"+e+"^"},test_match:function(t,e){var n,r,i;if(this.options.backtrack_lexer&&(i={yylineno:this.yylineno,yylloc:{first_line:this.yylloc.first_line,last_line:this.last_line,first_column:this.yylloc.first_column,last_column:this.yylloc.last_column},yytext:this.yytext,match:this.match,matches:this.matches,matched:this.matched,yyleng:this.yyleng,offset:this.offset,_more:this._more,_input:this._input,yy:this.yy,conditionStack:this.conditionStack.slice(0),done:this.done},this.options.ranges&&(i.yylloc.range=this.yylloc.range.slice(0))),(r=t[0].match(/(?:\r\n?|\n).*/g))&&(this.yylineno+=r.length),this.yylloc={first_line:this.yylloc.last_line,last_line:this.yylineno+1,first_column:this.yylloc.last_column,last_column:r?r[r.length-1].length-r[r.length-1].match(/\r?\n?/)[0].length:this.yylloc.last_column+t[0].length},this.yytext+=t[0],this.match+=t[0],this.matches=t,this.yyleng=this.yytext.length,this.options.ranges&&(this.yylloc.range=[this.offset,this.offset+=this.yyleng]),this._more=!1,this._backtrack=!1,this._input=this._input.slice(t[0].length),this.matched+=t[0],n=this.performAction.call(this,this.yy,this,e,this.conditionStack[this.conditionStack.length-1]),this.done&&this._input&&(this.done=!1),n)return n;if(this._backtrack){for(var a in i)this[a]=i[a];return!1}return!1},next:function(){if(this.done)return this.EOF;var t,e,n,r;this._input||(this.done=!0),this._more||(this.yytext="",this.match="");for(var i=this._currentRules(),a=0;ae[0].length)){if(e=n,r=a,this.options.backtrack_lexer){if(!1!==(t=this.test_match(n,i[a])))return t;if(this._backtrack){e=!1;continue}return!1}if(!this.options.flex)break}return e?!1!==(t=this.test_match(e,i[r]))&&t:""===this._input?this.EOF:this.parseError("Lexical error on line "+(this.yylineno+1)+". Unrecognized text.\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},lex:function(){var t=this.next();return t||this.lex()},begin:function(t){this.conditionStack.push(t)},popState:function(){return this.conditionStack.length-1>0?this.conditionStack.pop():this.conditionStack[0]},_currentRules:function(){return this.conditionStack.length&&this.conditionStack[this.conditionStack.length-1]?this.conditions[this.conditionStack[this.conditionStack.length-1]].rules:this.conditions.INITIAL.rules},topState:function(t){return(t=this.conditionStack.length-1-Math.abs(t||0))>=0?this.conditionStack[t]:"INITIAL"},pushState:function(t){this.begin(t)},stateStackSize:function(){return this.conditionStack.length},options:{},performAction:function(t,e,n,r){switch(n){case 0:return this.begin("open_directive"),12;case 1:return this.begin("type_directive"),13;case 2:return this.popState(),this.begin("arg_directive"),10;case 3:return this.popState(),this.popState(),15;case 4:return 14;case 5:case 6:break;case 7:this.begin("string");break;case 8:this.popState();break;case 9:return"STR";case 10:return 75;case 11:return 84;case 12:return 76;case 13:return 90;case 14:return 77;case 15:return 78;case 16:return 79;case 17:case 18:return t.lex.firstGraph()&&this.begin("dir"),24;case 19:return 38;case 20:return 42;case 21:case 22:case 23:case 24:return 87;case 25:return this.popState(),25;case 26:case 27:case 28:case 29:case 30:case 31:case 32:case 33:case 34:case 35:return this.popState(),26;case 36:return 91;case 37:return 99;case 38:return 47;case 39:return 96;case 40:return 46;case 41:return 20;case 42:return 92;case 43:return 110;case 44:case 45:case 46:return 70;case 47:case 48:case 49:return 69;case 50:return 51;case 51:return 52;case 52:return 53;case 53:return 54;case 54:return 55;case 55:return 56;case 56:return 57;case 57:return 58;case 58:return 97;case 59:return 100;case 60:return 111;case 61:return 108;case 62:return 101;case 63:case 64:return 109;case 65:return 102;case 66:return 61;case 67:return 81;case 68:return"SEP";case 69:return 80;case 70:return 95;case 71:return 63;case 72:return 62;case 73:return 65;case 74:return 64;case 75:return 106;case 76:return 107;case 77:return 71;case 78:return 49;case 79:return 50;case 80:return 40;case 81:return 41;case 82:return 59;case 83:return 60;case 84:return 117;case 85:return 21;case 86:return 22;case 87:return 23}},rules:[/^(?:%%\{)/,/^(?:((?:(?!\}%%)[^:.])*))/,/^(?::)/,/^(?:\}%%)/,/^(?:((?:(?!\}%%).|\n)*))/,/^(?:%%(?!\{)[^\n]*)/,/^(?:[^\}]%%[^\n]*)/,/^(?:["])/,/^(?:["])/,/^(?:[^"]*)/,/^(?:style\b)/,/^(?:default\b)/,/^(?:linkStyle\b)/,/^(?:interpolate\b)/,/^(?:classDef\b)/,/^(?:class\b)/,/^(?:click\b)/,/^(?:graph\b)/,/^(?:flowchart\b)/,/^(?:subgraph\b)/,/^(?:end\b\s*)/,/^(?:_self\b)/,/^(?:_blank\b)/,/^(?:_parent\b)/,/^(?:_top\b)/,/^(?:(\r?\n)*\s*\n)/,/^(?:\s*LR\b)/,/^(?:\s*RL\b)/,/^(?:\s*TB\b)/,/^(?:\s*BT\b)/,/^(?:\s*TD\b)/,/^(?:\s*BR\b)/,/^(?:\s*<)/,/^(?:\s*>)/,/^(?:\s*\^)/,/^(?:\s*v\b)/,/^(?:[0-9]+)/,/^(?:#)/,/^(?::::)/,/^(?::)/,/^(?:&)/,/^(?:;)/,/^(?:,)/,/^(?:\*)/,/^(?:\s*[xo<]?--+[-xo>]\s*)/,/^(?:\s*[xo<]?==+[=xo>]\s*)/,/^(?:\s*[xo<]?-?\.+-[xo>]?\s*)/,/^(?:\s*[xo<]?--\s*)/,/^(?:\s*[xo<]?==\s*)/,/^(?:\s*[xo<]?-\.\s*)/,/^(?:\(-)/,/^(?:-\))/,/^(?:\(\[)/,/^(?:\]\))/,/^(?:\[\[)/,/^(?:\]\])/,/^(?:\[\()/,/^(?:\)\])/,/^(?:-)/,/^(?:\.)/,/^(?:[\_])/,/^(?:\+)/,/^(?:%)/,/^(?:=)/,/^(?:=)/,/^(?:<)/,/^(?:>)/,/^(?:\^)/,/^(?:\\\|)/,/^(?:v\b)/,/^(?:[A-Za-z]+)/,/^(?:\\\])/,/^(?:\[\/)/,/^(?:\/\])/,/^(?:\[\\)/,/^(?:[!"#$%&'*+,-.`?\\_/])/,/^(?:[\u00AA\u00B5\u00BA\u00C0-\u00D6\u00D8-\u00F6]|[\u00F8-\u02C1\u02C6-\u02D1\u02E0-\u02E4\u02EC\u02EE\u0370-\u0374\u0376\u0377]|[\u037A-\u037D\u0386\u0388-\u038A\u038C\u038E-\u03A1\u03A3-\u03F5]|[\u03F7-\u0481\u048A-\u0527\u0531-\u0556\u0559\u0561-\u0587\u05D0-\u05EA]|[\u05F0-\u05F2\u0620-\u064A\u066E\u066F\u0671-\u06D3\u06D5\u06E5\u06E6\u06EE]|[\u06EF\u06FA-\u06FC\u06FF\u0710\u0712-\u072F\u074D-\u07A5\u07B1\u07CA-\u07EA]|[\u07F4\u07F5\u07FA\u0800-\u0815\u081A\u0824\u0828\u0840-\u0858\u08A0]|[\u08A2-\u08AC\u0904-\u0939\u093D\u0950\u0958-\u0961\u0971-\u0977]|[\u0979-\u097F\u0985-\u098C\u098F\u0990\u0993-\u09A8\u09AA-\u09B0\u09B2]|[\u09B6-\u09B9\u09BD\u09CE\u09DC\u09DD\u09DF-\u09E1\u09F0\u09F1\u0A05-\u0A0A]|[\u0A0F\u0A10\u0A13-\u0A28\u0A2A-\u0A30\u0A32\u0A33\u0A35\u0A36\u0A38\u0A39]|[\u0A59-\u0A5C\u0A5E\u0A72-\u0A74\u0A85-\u0A8D\u0A8F-\u0A91\u0A93-\u0AA8]|[\u0AAA-\u0AB0\u0AB2\u0AB3\u0AB5-\u0AB9\u0ABD\u0AD0\u0AE0\u0AE1\u0B05-\u0B0C]|[\u0B0F\u0B10\u0B13-\u0B28\u0B2A-\u0B30\u0B32\u0B33\u0B35-\u0B39\u0B3D\u0B5C]|[\u0B5D\u0B5F-\u0B61\u0B71\u0B83\u0B85-\u0B8A\u0B8E-\u0B90\u0B92-\u0B95\u0B99]|[\u0B9A\u0B9C\u0B9E\u0B9F\u0BA3\u0BA4\u0BA8-\u0BAA\u0BAE-\u0BB9\u0BD0]|[\u0C05-\u0C0C\u0C0E-\u0C10\u0C12-\u0C28\u0C2A-\u0C33\u0C35-\u0C39\u0C3D]|[\u0C58\u0C59\u0C60\u0C61\u0C85-\u0C8C\u0C8E-\u0C90\u0C92-\u0CA8\u0CAA-\u0CB3]|[\u0CB5-\u0CB9\u0CBD\u0CDE\u0CE0\u0CE1\u0CF1\u0CF2\u0D05-\u0D0C\u0D0E-\u0D10]|[\u0D12-\u0D3A\u0D3D\u0D4E\u0D60\u0D61\u0D7A-\u0D7F\u0D85-\u0D96\u0D9A-\u0DB1]|[\u0DB3-\u0DBB\u0DBD\u0DC0-\u0DC6\u0E01-\u0E30\u0E32\u0E33\u0E40-\u0E46\u0E81]|[\u0E82\u0E84\u0E87\u0E88\u0E8A\u0E8D\u0E94-\u0E97\u0E99-\u0E9F\u0EA1-\u0EA3]|[\u0EA5\u0EA7\u0EAA\u0EAB\u0EAD-\u0EB0\u0EB2\u0EB3\u0EBD\u0EC0-\u0EC4\u0EC6]|[\u0EDC-\u0EDF\u0F00\u0F40-\u0F47\u0F49-\u0F6C\u0F88-\u0F8C\u1000-\u102A]|[\u103F\u1050-\u1055\u105A-\u105D\u1061\u1065\u1066\u106E-\u1070\u1075-\u1081]|[\u108E\u10A0-\u10C5\u10C7\u10CD\u10D0-\u10FA\u10FC-\u1248\u124A-\u124D]|[\u1250-\u1256\u1258\u125A-\u125D\u1260-\u1288\u128A-\u128D\u1290-\u12B0]|[\u12B2-\u12B5\u12B8-\u12BE\u12C0\u12C2-\u12C5\u12C8-\u12D6\u12D8-\u1310]|[\u1312-\u1315\u1318-\u135A\u1380-\u138F\u13A0-\u13F4\u1401-\u166C]|[\u166F-\u167F\u1681-\u169A\u16A0-\u16EA\u1700-\u170C\u170E-\u1711]|[\u1720-\u1731\u1740-\u1751\u1760-\u176C\u176E-\u1770\u1780-\u17B3\u17D7]|[\u17DC\u1820-\u1877\u1880-\u18A8\u18AA\u18B0-\u18F5\u1900-\u191C]|[\u1950-\u196D\u1970-\u1974\u1980-\u19AB\u19C1-\u19C7\u1A00-\u1A16]|[\u1A20-\u1A54\u1AA7\u1B05-\u1B33\u1B45-\u1B4B\u1B83-\u1BA0\u1BAE\u1BAF]|[\u1BBA-\u1BE5\u1C00-\u1C23\u1C4D-\u1C4F\u1C5A-\u1C7D\u1CE9-\u1CEC]|[\u1CEE-\u1CF1\u1CF5\u1CF6\u1D00-\u1DBF\u1E00-\u1F15\u1F18-\u1F1D]|[\u1F20-\u1F45\u1F48-\u1F4D\u1F50-\u1F57\u1F59\u1F5B\u1F5D\u1F5F-\u1F7D]|[\u1F80-\u1FB4\u1FB6-\u1FBC\u1FBE\u1FC2-\u1FC4\u1FC6-\u1FCC\u1FD0-\u1FD3]|[\u1FD6-\u1FDB\u1FE0-\u1FEC\u1FF2-\u1FF4\u1FF6-\u1FFC\u2071\u207F]|[\u2090-\u209C\u2102\u2107\u210A-\u2113\u2115\u2119-\u211D\u2124\u2126\u2128]|[\u212A-\u212D\u212F-\u2139\u213C-\u213F\u2145-\u2149\u214E\u2183\u2184]|[\u2C00-\u2C2E\u2C30-\u2C5E\u2C60-\u2CE4\u2CEB-\u2CEE\u2CF2\u2CF3]|[\u2D00-\u2D25\u2D27\u2D2D\u2D30-\u2D67\u2D6F\u2D80-\u2D96\u2DA0-\u2DA6]|[\u2DA8-\u2DAE\u2DB0-\u2DB6\u2DB8-\u2DBE\u2DC0-\u2DC6\u2DC8-\u2DCE]|[\u2DD0-\u2DD6\u2DD8-\u2DDE\u2E2F\u3005\u3006\u3031-\u3035\u303B\u303C]|[\u3041-\u3096\u309D-\u309F\u30A1-\u30FA\u30FC-\u30FF\u3105-\u312D]|[\u3131-\u318E\u31A0-\u31BA\u31F0-\u31FF\u3400-\u4DB5\u4E00-\u9FCC]|[\uA000-\uA48C\uA4D0-\uA4FD\uA500-\uA60C\uA610-\uA61F\uA62A\uA62B]|[\uA640-\uA66E\uA67F-\uA697\uA6A0-\uA6E5\uA717-\uA71F\uA722-\uA788]|[\uA78B-\uA78E\uA790-\uA793\uA7A0-\uA7AA\uA7F8-\uA801\uA803-\uA805]|[\uA807-\uA80A\uA80C-\uA822\uA840-\uA873\uA882-\uA8B3\uA8F2-\uA8F7\uA8FB]|[\uA90A-\uA925\uA930-\uA946\uA960-\uA97C\uA984-\uA9B2\uA9CF\uAA00-\uAA28]|[\uAA40-\uAA42\uAA44-\uAA4B\uAA60-\uAA76\uAA7A\uAA80-\uAAAF\uAAB1\uAAB5]|[\uAAB6\uAAB9-\uAABD\uAAC0\uAAC2\uAADB-\uAADD\uAAE0-\uAAEA\uAAF2-\uAAF4]|[\uAB01-\uAB06\uAB09-\uAB0E\uAB11-\uAB16\uAB20-\uAB26\uAB28-\uAB2E]|[\uABC0-\uABE2\uAC00-\uD7A3\uD7B0-\uD7C6\uD7CB-\uD7FB\uF900-\uFA6D]|[\uFA70-\uFAD9\uFB00-\uFB06\uFB13-\uFB17\uFB1D\uFB1F-\uFB28\uFB2A-\uFB36]|[\uFB38-\uFB3C\uFB3E\uFB40\uFB41\uFB43\uFB44\uFB46-\uFBB1\uFBD3-\uFD3D]|[\uFD50-\uFD8F\uFD92-\uFDC7\uFDF0-\uFDFB\uFE70-\uFE74\uFE76-\uFEFC]|[\uFF21-\uFF3A\uFF41-\uFF5A\uFF66-\uFFBE\uFFC2-\uFFC7\uFFCA-\uFFCF]|[\uFFD2-\uFFD7\uFFDA-\uFFDC])/,/^(?:\|)/,/^(?:\()/,/^(?:\))/,/^(?:\[)/,/^(?:\])/,/^(?:\{)/,/^(?:\})/,/^(?:")/,/^(?:(\r?\n)+)/,/^(?:\s)/,/^(?:$)/],conditions:{close_directive:{rules:[],inclusive:!1},arg_directive:{rules:[3,4],inclusive:!1},type_directive:{rules:[2,3],inclusive:!1},open_directive:{rules:[1],inclusive:!1},vertex:{rules:[],inclusive:!1},dir:{rules:[25,26,27,28,29,30,31,32,33,34,35],inclusive:!1},string:{rules:[8,9],inclusive:!1},INITIAL:{rules:[0,5,6,7,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87],inclusive:!0}}};function Xt(){this.yy={}}return Gt.lexer=qt,Xt.prototype=Gt,Gt.Parser=Xt,new Xt}();e.parser=i,e.Parser=i.Parser,e.parse=function(){return i.parse.apply(i,arguments)},e.main=function(r){r[1]||(console.log("Usage: "+r[0]+" FILE"),t.exit(1));var i=n(19).readFileSync(n(20).normalize(r[1]),"utf8");return e.parser.parse(i)},n.c[n.s]===r&&e.main(t.argv.slice(1))}).call(this,n(14),n(7)(t))},function(t,e,n){(function(t,r){var i=function(){var t=function(t,e,n,r){for(n=n||{},r=t.length;r--;n[t[r]]=e);return n},e=[1,3],n=[1,5],r=[7,9,11,12,13,14,15,16,17,18,20,27,32],i=[1,15],a=[1,16],o=[1,17],s=[1,18],c=[1,19],u=[1,20],l=[1,21],h=[1,23],f=[1,25],d=[1,28],p=[5,7,9,11,12,13,14,15,16,17,18,20,27,32],g={trace:function(){},yy:{},symbols_:{error:2,start:3,directive:4,gantt:5,document:6,EOF:7,line:8,SPACE:9,statement:10,NL:11,dateFormat:12,inclusiveEndDates:13,axisFormat:14,excludes:15,todayMarker:16,title:17,section:18,clickStatement:19,taskTxt:20,taskData:21,openDirective:22,typeDirective:23,closeDirective:24,":":25,argDirective:26,click:27,callbackname:28,callbackargs:29,href:30,clickStatementDebug:31,open_directive:32,type_directive:33,arg_directive:34,close_directive:35,$accept:0,$end:1},terminals_:{2:"error",5:"gantt",7:"EOF",9:"SPACE",11:"NL",12:"dateFormat",13:"inclusiveEndDates",14:"axisFormat",15:"excludes",16:"todayMarker",17:"title",18:"section",20:"taskTxt",21:"taskData",25:":",27:"click",28:"callbackname",29:"callbackargs",30:"href",32:"open_directive",33:"type_directive",34:"arg_directive",35:"close_directive"},productions_:[0,[3,2],[3,3],[6,0],[6,2],[8,2],[8,1],[8,1],[8,1],[10,1],[10,1],[10,1],[10,1],[10,1],[10,1],[10,1],[10,1],[10,2],[10,1],[4,4],[4,6],[19,2],[19,3],[19,3],[19,4],[19,3],[19,4],[19,2],[31,2],[31,3],[31,3],[31,4],[31,3],[31,4],[31,2],[22,1],[23,1],[26,1],[24,1]],performAction:function(t,e,n,r,i,a,o){var s=a.length-1;switch(i){case 2:return a[s-1];case 3:this.$=[];break;case 4:a[s-1].push(a[s]),this.$=a[s-1];break;case 5:case 6:this.$=a[s];break;case 7:case 8:this.$=[];break;case 9:r.setDateFormat(a[s].substr(11)),this.$=a[s].substr(11);break;case 10:r.enableInclusiveEndDates(),this.$=a[s].substr(18);break;case 11:r.setAxisFormat(a[s].substr(11)),this.$=a[s].substr(11);break;case 12:r.setExcludes(a[s].substr(9)),this.$=a[s].substr(9);break;case 13:r.setTodayMarker(a[s].substr(12)),this.$=a[s].substr(12);break;case 14:r.setTitle(a[s].substr(6)),this.$=a[s].substr(6);break;case 15:r.addSection(a[s].substr(8)),this.$=a[s].substr(8);break;case 17:r.addTask(a[s-1],a[s]),this.$="task";break;case 21:this.$=a[s-1],r.setClickEvent(a[s-1],a[s],null);break;case 22:this.$=a[s-2],r.setClickEvent(a[s-2],a[s-1],a[s]);break;case 23:this.$=a[s-2],r.setClickEvent(a[s-2],a[s-1],null),r.setLink(a[s-2],a[s]);break;case 24:this.$=a[s-3],r.setClickEvent(a[s-3],a[s-2],a[s-1]),r.setLink(a[s-3],a[s]);break;case 25:this.$=a[s-2],r.setClickEvent(a[s-2],a[s],null),r.setLink(a[s-2],a[s-1]);break;case 26:this.$=a[s-3],r.setClickEvent(a[s-3],a[s-1],a[s]),r.setLink(a[s-3],a[s-2]);break;case 27:this.$=a[s-1],r.setLink(a[s-1],a[s]);break;case 28:case 34:this.$=a[s-1]+" "+a[s];break;case 29:case 30:case 32:this.$=a[s-2]+" "+a[s-1]+" "+a[s];break;case 31:case 33:this.$=a[s-3]+" "+a[s-2]+" "+a[s-1]+" "+a[s];break;case 35:r.parseDirective("%%{","open_directive");break;case 36:r.parseDirective(a[s],"type_directive");break;case 37:a[s]=a[s].trim().replace(/'/g,'"'),r.parseDirective(a[s],"arg_directive");break;case 38:r.parseDirective("}%%","close_directive","gantt")}},table:[{3:1,4:2,5:e,22:4,32:n},{1:[3]},{3:6,4:2,5:e,22:4,32:n},t(r,[2,3],{6:7}),{23:8,33:[1,9]},{33:[2,35]},{1:[2,1]},{4:24,7:[1,10],8:11,9:[1,12],10:13,11:[1,14],12:i,13:a,14:o,15:s,16:c,17:u,18:l,19:22,20:h,22:4,27:f,32:n},{24:26,25:[1,27],35:d},t([25,35],[2,36]),t(r,[2,8],{1:[2,2]}),t(r,[2,4]),{4:24,10:29,12:i,13:a,14:o,15:s,16:c,17:u,18:l,19:22,20:h,22:4,27:f,32:n},t(r,[2,6]),t(r,[2,7]),t(r,[2,9]),t(r,[2,10]),t(r,[2,11]),t(r,[2,12]),t(r,[2,13]),t(r,[2,14]),t(r,[2,15]),t(r,[2,16]),{21:[1,30]},t(r,[2,18]),{28:[1,31],30:[1,32]},{11:[1,33]},{26:34,34:[1,35]},{11:[2,38]},t(r,[2,5]),t(r,[2,17]),t(r,[2,21],{29:[1,36],30:[1,37]}),t(r,[2,27],{28:[1,38]}),t(p,[2,19]),{24:39,35:d},{35:[2,37]},t(r,[2,22],{30:[1,40]}),t(r,[2,23]),t(r,[2,25],{29:[1,41]}),{11:[1,42]},t(r,[2,24]),t(r,[2,26]),t(p,[2,20])],defaultActions:{5:[2,35],6:[2,1],28:[2,38],35:[2,37]},parseError:function(t,e){if(!e.recoverable){var n=new Error(t);throw n.hash=e,n}this.trace(t)},parse:function(t){var e=this,n=[0],r=[],i=[null],a=[],o=this.table,s="",c=0,u=0,l=0,h=2,f=1,d=a.slice.call(arguments,1),p=Object.create(this.lexer),g={yy:{}};for(var y in this.yy)Object.prototype.hasOwnProperty.call(this.yy,y)&&(g.yy[y]=this.yy[y]);p.setInput(t,g.yy),g.yy.lexer=p,g.yy.parser=this,void 0===p.yylloc&&(p.yylloc={});var v=p.yylloc;a.push(v);var m=p.options&&p.options.ranges;function b(){var t;return"number"!=typeof(t=r.pop()||p.lex()||f)&&(t instanceof Array&&(t=(r=t).pop()),t=e.symbols_[t]||t),t}"function"==typeof g.yy.parseError?this.parseError=g.yy.parseError:this.parseError=Object.getPrototypeOf(this).parseError;for(var x,_,k,w,E,T,C,S,A,M={};;){if(k=n[n.length-1],this.defaultActions[k]?w=this.defaultActions[k]:(null==x&&(x=b()),w=o[k]&&o[k][x]),void 0===w||!w.length||!w[0]){var O="";for(T in A=[],o[k])this.terminals_[T]&&T>h&&A.push("'"+this.terminals_[T]+"'");O=p.showPosition?"Parse error on line "+(c+1)+":\n"+p.showPosition()+"\nExpecting "+A.join(", ")+", got '"+(this.terminals_[x]||x)+"'":"Parse error on line "+(c+1)+": Unexpected "+(x==f?"end of input":"'"+(this.terminals_[x]||x)+"'"),this.parseError(O,{text:p.match,token:this.terminals_[x]||x,line:p.yylineno,loc:v,expected:A})}if(w[0]instanceof Array&&w.length>1)throw new Error("Parse Error: multiple actions possible at state: "+k+", token: "+x);switch(w[0]){case 1:n.push(x),i.push(p.yytext),a.push(p.yylloc),n.push(w[1]),x=null,_?(x=_,_=null):(u=p.yyleng,s=p.yytext,c=p.yylineno,v=p.yylloc,l>0&&l--);break;case 2:if(C=this.productions_[w[1]][1],M.$=i[i.length-C],M._$={first_line:a[a.length-(C||1)].first_line,last_line:a[a.length-1].last_line,first_column:a[a.length-(C||1)].first_column,last_column:a[a.length-1].last_column},m&&(M._$.range=[a[a.length-(C||1)].range[0],a[a.length-1].range[1]]),void 0!==(E=this.performAction.apply(M,[s,u,c,g.yy,w[1],i,a].concat(d))))return E;C&&(n=n.slice(0,-1*C*2),i=i.slice(0,-1*C),a=a.slice(0,-1*C)),n.push(this.productions_[w[1]][0]),i.push(M.$),a.push(M._$),S=o[n[n.length-2]][n[n.length-1]],n.push(S);break;case 3:return!0}}return!0}},y={EOF:1,parseError:function(t,e){if(!this.yy.parser)throw new Error(t);this.yy.parser.parseError(t,e)},setInput:function(t,e){return this.yy=e||this.yy||{},this._input=t,this._more=this._backtrack=this.done=!1,this.yylineno=this.yyleng=0,this.yytext=this.matched=this.match="",this.conditionStack=["INITIAL"],this.yylloc={first_line:1,first_column:0,last_line:1,last_column:0},this.options.ranges&&(this.yylloc.range=[0,0]),this.offset=0,this},input:function(){var t=this._input[0];return this.yytext+=t,this.yyleng++,this.offset++,this.match+=t,this.matched+=t,t.match(/(?:\r\n?|\n).*/g)?(this.yylineno++,this.yylloc.last_line++):this.yylloc.last_column++,this.options.ranges&&this.yylloc.range[1]++,this._input=this._input.slice(1),t},unput:function(t){var e=t.length,n=t.split(/(?:\r\n?|\n)/g);this._input=t+this._input,this.yytext=this.yytext.substr(0,this.yytext.length-e),this.offset-=e;var r=this.match.split(/(?:\r\n?|\n)/g);this.match=this.match.substr(0,this.match.length-1),this.matched=this.matched.substr(0,this.matched.length-1),n.length-1&&(this.yylineno-=n.length-1);var i=this.yylloc.range;return this.yylloc={first_line:this.yylloc.first_line,last_line:this.yylineno+1,first_column:this.yylloc.first_column,last_column:n?(n.length===r.length?this.yylloc.first_column:0)+r[r.length-n.length].length-n[0].length:this.yylloc.first_column-e},this.options.ranges&&(this.yylloc.range=[i[0],i[0]+this.yyleng-e]),this.yyleng=this.yytext.length,this},more:function(){return this._more=!0,this},reject:function(){return this.options.backtrack_lexer?(this._backtrack=!0,this):this.parseError("Lexical error on line "+(this.yylineno+1)+". You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},less:function(t){this.unput(this.match.slice(t))},pastInput:function(){var t=this.matched.substr(0,this.matched.length-this.match.length);return(t.length>20?"...":"")+t.substr(-20).replace(/\n/g,"")},upcomingInput:function(){var t=this.match;return t.length<20&&(t+=this._input.substr(0,20-t.length)),(t.substr(0,20)+(t.length>20?"...":"")).replace(/\n/g,"")},showPosition:function(){var t=this.pastInput(),e=new Array(t.length+1).join("-");return t+this.upcomingInput()+"\n"+e+"^"},test_match:function(t,e){var n,r,i;if(this.options.backtrack_lexer&&(i={yylineno:this.yylineno,yylloc:{first_line:this.yylloc.first_line,last_line:this.last_line,first_column:this.yylloc.first_column,last_column:this.yylloc.last_column},yytext:this.yytext,match:this.match,matches:this.matches,matched:this.matched,yyleng:this.yyleng,offset:this.offset,_more:this._more,_input:this._input,yy:this.yy,conditionStack:this.conditionStack.slice(0),done:this.done},this.options.ranges&&(i.yylloc.range=this.yylloc.range.slice(0))),(r=t[0].match(/(?:\r\n?|\n).*/g))&&(this.yylineno+=r.length),this.yylloc={first_line:this.yylloc.last_line,last_line:this.yylineno+1,first_column:this.yylloc.last_column,last_column:r?r[r.length-1].length-r[r.length-1].match(/\r?\n?/)[0].length:this.yylloc.last_column+t[0].length},this.yytext+=t[0],this.match+=t[0],this.matches=t,this.yyleng=this.yytext.length,this.options.ranges&&(this.yylloc.range=[this.offset,this.offset+=this.yyleng]),this._more=!1,this._backtrack=!1,this._input=this._input.slice(t[0].length),this.matched+=t[0],n=this.performAction.call(this,this.yy,this,e,this.conditionStack[this.conditionStack.length-1]),this.done&&this._input&&(this.done=!1),n)return n;if(this._backtrack){for(var a in i)this[a]=i[a];return!1}return!1},next:function(){if(this.done)return this.EOF;var t,e,n,r;this._input||(this.done=!0),this._more||(this.yytext="",this.match="");for(var i=this._currentRules(),a=0;ae[0].length)){if(e=n,r=a,this.options.backtrack_lexer){if(!1!==(t=this.test_match(n,i[a])))return t;if(this._backtrack){e=!1;continue}return!1}if(!this.options.flex)break}return e?!1!==(t=this.test_match(e,i[r]))&&t:""===this._input?this.EOF:this.parseError("Lexical error on line "+(this.yylineno+1)+". Unrecognized text.\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},lex:function(){var t=this.next();return t||this.lex()},begin:function(t){this.conditionStack.push(t)},popState:function(){return this.conditionStack.length-1>0?this.conditionStack.pop():this.conditionStack[0]},_currentRules:function(){return this.conditionStack.length&&this.conditionStack[this.conditionStack.length-1]?this.conditions[this.conditionStack[this.conditionStack.length-1]].rules:this.conditions.INITIAL.rules},topState:function(t){return(t=this.conditionStack.length-1-Math.abs(t||0))>=0?this.conditionStack[t]:"INITIAL"},pushState:function(t){this.begin(t)},stateStackSize:function(){return this.conditionStack.length},options:{"case-insensitive":!0},performAction:function(t,e,n,r){switch(n){case 0:return this.begin("open_directive"),32;case 1:return this.begin("type_directive"),33;case 2:return this.popState(),this.begin("arg_directive"),25;case 3:return this.popState(),this.popState(),35;case 4:return 34;case 5:case 6:case 7:break;case 8:return 11;case 9:case 10:case 11:break;case 12:this.begin("href");break;case 13:this.popState();break;case 14:return 30;case 15:this.begin("callbackname");break;case 16:this.popState();break;case 17:this.popState(),this.begin("callbackargs");break;case 18:return 28;case 19:this.popState();break;case 20:return 29;case 21:this.begin("click");break;case 22:this.popState();break;case 23:return 27;case 24:return 5;case 25:return 12;case 26:return 13;case 27:return 14;case 28:return 15;case 29:return 16;case 30:return"date";case 31:return 17;case 32:return 18;case 33:return 20;case 34:return 21;case 35:return 25;case 36:return 7;case 37:return"INVALID"}},rules:[/^(?:%%\{)/i,/^(?:((?:(?!\}%%)[^:.])*))/i,/^(?::)/i,/^(?:\}%%)/i,/^(?:((?:(?!\}%%).|\n)*))/i,/^(?:%%(?!\{)*[^\n]*)/i,/^(?:[^\}]%%*[^\n]*)/i,/^(?:%%*[^\n]*[\n]*)/i,/^(?:[\n]+)/i,/^(?:\s+)/i,/^(?:#[^\n]*)/i,/^(?:%[^\n]*)/i,/^(?:href[\s]+["])/i,/^(?:["])/i,/^(?:[^"]*)/i,/^(?:call[\s]+)/i,/^(?:\([\s]*\))/i,/^(?:\()/i,/^(?:[^(]*)/i,/^(?:\))/i,/^(?:[^)]*)/i,/^(?:click[\s]+)/i,/^(?:[\s\n])/i,/^(?:[^\s\n]*)/i,/^(?:gantt\b)/i,/^(?:dateFormat\s[^#\n;]+)/i,/^(?:inclusiveEndDates\b)/i,/^(?:axisFormat\s[^#\n;]+)/i,/^(?:excludes\s[^#\n;]+)/i,/^(?:todayMarker\s[^\n;]+)/i,/^(?:\d\d\d\d-\d\d-\d\d\b)/i,/^(?:title\s[^#\n;]+)/i,/^(?:section\s[^#:\n;]+)/i,/^(?:[^#:\n;]+)/i,/^(?::[^#\n;]+)/i,/^(?::)/i,/^(?:$)/i,/^(?:.)/i],conditions:{close_directive:{rules:[],inclusive:!1},arg_directive:{rules:[3,4],inclusive:!1},type_directive:{rules:[2,3],inclusive:!1},open_directive:{rules:[1],inclusive:!1},callbackargs:{rules:[19,20],inclusive:!1},callbackname:{rules:[16,17,18],inclusive:!1},href:{rules:[13,14],inclusive:!1},click:{rules:[22,23],inclusive:!1},INITIAL:{rules:[0,5,6,7,8,9,10,11,12,15,21,24,25,26,27,28,29,30,31,32,33,34,35,36,37],inclusive:!0}}};function v(){this.yy={}}return g.lexer=y,v.prototype=g,g.Parser=v,new v}();e.parser=i,e.Parser=i.Parser,e.parse=function(){return i.parse.apply(i,arguments)},e.main=function(r){r[1]||(console.log("Usage: "+r[0]+" FILE"),t.exit(1));var i=n(19).readFileSync(n(20).normalize(r[1]),"utf8");return e.parser.parse(i)},n.c[n.s]===r&&e.main(t.argv.slice(1))}).call(this,n(14),n(7)(t))},function(t,e,n){(function(t,r){var i=function(){var t=function(t,e,n,r){for(n=n||{},r=t.length;r--;n[t[r]]=e);return n},e=[1,2],n=[1,5],r=[6,9,11,17,18,19,21],i=[1,15],a=[1,16],o=[1,17],s=[1,21],c=[4,6,9,11,17,18,19,21],u={trace:function(){},yy:{},symbols_:{error:2,start:3,journey:4,document:5,EOF:6,directive:7,line:8,SPACE:9,statement:10,NEWLINE:11,openDirective:12,typeDirective:13,closeDirective:14,":":15,argDirective:16,title:17,section:18,taskName:19,taskData:20,open_directive:21,type_directive:22,arg_directive:23,close_directive:24,$accept:0,$end:1},terminals_:{2:"error",4:"journey",6:"EOF",9:"SPACE",11:"NEWLINE",15:":",17:"title",18:"section",19:"taskName",20:"taskData",21:"open_directive",22:"type_directive",23:"arg_directive",24:"close_directive"},productions_:[0,[3,3],[3,2],[5,0],[5,2],[8,2],[8,1],[8,1],[8,1],[7,4],[7,6],[10,1],[10,1],[10,2],[10,1],[12,1],[13,1],[16,1],[14,1]],performAction:function(t,e,n,r,i,a,o){var s=a.length-1;switch(i){case 1:return a[s-1];case 3:this.$=[];break;case 4:a[s-1].push(a[s]),this.$=a[s-1];break;case 5:case 6:this.$=a[s];break;case 7:case 8:this.$=[];break;case 11:r.setTitle(a[s].substr(6)),this.$=a[s].substr(6);break;case 12:r.addSection(a[s].substr(8)),this.$=a[s].substr(8);break;case 13:r.addTask(a[s-1],a[s]),this.$="task";break;case 15:r.parseDirective("%%{","open_directive");break;case 16:r.parseDirective(a[s],"type_directive");break;case 17:a[s]=a[s].trim().replace(/'/g,'"'),r.parseDirective(a[s],"arg_directive");break;case 18:r.parseDirective("}%%","close_directive","journey")}},table:[{3:1,4:e,7:3,12:4,21:n},{1:[3]},t(r,[2,3],{5:6}),{3:7,4:e,7:3,12:4,21:n},{13:8,22:[1,9]},{22:[2,15]},{6:[1,10],7:18,8:11,9:[1,12],10:13,11:[1,14],12:4,17:i,18:a,19:o,21:n},{1:[2,2]},{14:19,15:[1,20],24:s},t([15,24],[2,16]),t(r,[2,8],{1:[2,1]}),t(r,[2,4]),{7:18,10:22,12:4,17:i,18:a,19:o,21:n},t(r,[2,6]),t(r,[2,7]),t(r,[2,11]),t(r,[2,12]),{20:[1,23]},t(r,[2,14]),{11:[1,24]},{16:25,23:[1,26]},{11:[2,18]},t(r,[2,5]),t(r,[2,13]),t(c,[2,9]),{14:27,24:s},{24:[2,17]},{11:[1,28]},t(c,[2,10])],defaultActions:{5:[2,15],7:[2,2],21:[2,18],26:[2,17]},parseError:function(t,e){if(!e.recoverable){var n=new Error(t);throw n.hash=e,n}this.trace(t)},parse:function(t){var e=this,n=[0],r=[],i=[null],a=[],o=this.table,s="",c=0,u=0,l=0,h=2,f=1,d=a.slice.call(arguments,1),p=Object.create(this.lexer),g={yy:{}};for(var y in this.yy)Object.prototype.hasOwnProperty.call(this.yy,y)&&(g.yy[y]=this.yy[y]);p.setInput(t,g.yy),g.yy.lexer=p,g.yy.parser=this,void 0===p.yylloc&&(p.yylloc={});var v=p.yylloc;a.push(v);var m=p.options&&p.options.ranges;function b(){var t;return"number"!=typeof(t=r.pop()||p.lex()||f)&&(t instanceof Array&&(t=(r=t).pop()),t=e.symbols_[t]||t),t}"function"==typeof g.yy.parseError?this.parseError=g.yy.parseError:this.parseError=Object.getPrototypeOf(this).parseError;for(var x,_,k,w,E,T,C,S,A,M={};;){if(k=n[n.length-1],this.defaultActions[k]?w=this.defaultActions[k]:(null==x&&(x=b()),w=o[k]&&o[k][x]),void 0===w||!w.length||!w[0]){var O="";for(T in A=[],o[k])this.terminals_[T]&&T>h&&A.push("'"+this.terminals_[T]+"'");O=p.showPosition?"Parse error on line "+(c+1)+":\n"+p.showPosition()+"\nExpecting "+A.join(", ")+", got '"+(this.terminals_[x]||x)+"'":"Parse error on line "+(c+1)+": Unexpected "+(x==f?"end of input":"'"+(this.terminals_[x]||x)+"'"),this.parseError(O,{text:p.match,token:this.terminals_[x]||x,line:p.yylineno,loc:v,expected:A})}if(w[0]instanceof Array&&w.length>1)throw new Error("Parse Error: multiple actions possible at state: "+k+", token: "+x);switch(w[0]){case 1:n.push(x),i.push(p.yytext),a.push(p.yylloc),n.push(w[1]),x=null,_?(x=_,_=null):(u=p.yyleng,s=p.yytext,c=p.yylineno,v=p.yylloc,l>0&&l--);break;case 2:if(C=this.productions_[w[1]][1],M.$=i[i.length-C],M._$={first_line:a[a.length-(C||1)].first_line,last_line:a[a.length-1].last_line,first_column:a[a.length-(C||1)].first_column,last_column:a[a.length-1].last_column},m&&(M._$.range=[a[a.length-(C||1)].range[0],a[a.length-1].range[1]]),void 0!==(E=this.performAction.apply(M,[s,u,c,g.yy,w[1],i,a].concat(d))))return E;C&&(n=n.slice(0,-1*C*2),i=i.slice(0,-1*C),a=a.slice(0,-1*C)),n.push(this.productions_[w[1]][0]),i.push(M.$),a.push(M._$),S=o[n[n.length-2]][n[n.length-1]],n.push(S);break;case 3:return!0}}return!0}},l={EOF:1,parseError:function(t,e){if(!this.yy.parser)throw new Error(t);this.yy.parser.parseError(t,e)},setInput:function(t,e){return this.yy=e||this.yy||{},this._input=t,this._more=this._backtrack=this.done=!1,this.yylineno=this.yyleng=0,this.yytext=this.matched=this.match="",this.conditionStack=["INITIAL"],this.yylloc={first_line:1,first_column:0,last_line:1,last_column:0},this.options.ranges&&(this.yylloc.range=[0,0]),this.offset=0,this},input:function(){var t=this._input[0];return this.yytext+=t,this.yyleng++,this.offset++,this.match+=t,this.matched+=t,t.match(/(?:\r\n?|\n).*/g)?(this.yylineno++,this.yylloc.last_line++):this.yylloc.last_column++,this.options.ranges&&this.yylloc.range[1]++,this._input=this._input.slice(1),t},unput:function(t){var e=t.length,n=t.split(/(?:\r\n?|\n)/g);this._input=t+this._input,this.yytext=this.yytext.substr(0,this.yytext.length-e),this.offset-=e;var r=this.match.split(/(?:\r\n?|\n)/g);this.match=this.match.substr(0,this.match.length-1),this.matched=this.matched.substr(0,this.matched.length-1),n.length-1&&(this.yylineno-=n.length-1);var i=this.yylloc.range;return this.yylloc={first_line:this.yylloc.first_line,last_line:this.yylineno+1,first_column:this.yylloc.first_column,last_column:n?(n.length===r.length?this.yylloc.first_column:0)+r[r.length-n.length].length-n[0].length:this.yylloc.first_column-e},this.options.ranges&&(this.yylloc.range=[i[0],i[0]+this.yyleng-e]),this.yyleng=this.yytext.length,this},more:function(){return this._more=!0,this},reject:function(){return this.options.backtrack_lexer?(this._backtrack=!0,this):this.parseError("Lexical error on line "+(this.yylineno+1)+". You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},less:function(t){this.unput(this.match.slice(t))},pastInput:function(){var t=this.matched.substr(0,this.matched.length-this.match.length);return(t.length>20?"...":"")+t.substr(-20).replace(/\n/g,"")},upcomingInput:function(){var t=this.match;return t.length<20&&(t+=this._input.substr(0,20-t.length)),(t.substr(0,20)+(t.length>20?"...":"")).replace(/\n/g,"")},showPosition:function(){var t=this.pastInput(),e=new Array(t.length+1).join("-");return t+this.upcomingInput()+"\n"+e+"^"},test_match:function(t,e){var n,r,i;if(this.options.backtrack_lexer&&(i={yylineno:this.yylineno,yylloc:{first_line:this.yylloc.first_line,last_line:this.last_line,first_column:this.yylloc.first_column,last_column:this.yylloc.last_column},yytext:this.yytext,match:this.match,matches:this.matches,matched:this.matched,yyleng:this.yyleng,offset:this.offset,_more:this._more,_input:this._input,yy:this.yy,conditionStack:this.conditionStack.slice(0),done:this.done},this.options.ranges&&(i.yylloc.range=this.yylloc.range.slice(0))),(r=t[0].match(/(?:\r\n?|\n).*/g))&&(this.yylineno+=r.length),this.yylloc={first_line:this.yylloc.last_line,last_line:this.yylineno+1,first_column:this.yylloc.last_column,last_column:r?r[r.length-1].length-r[r.length-1].match(/\r?\n?/)[0].length:this.yylloc.last_column+t[0].length},this.yytext+=t[0],this.match+=t[0],this.matches=t,this.yyleng=this.yytext.length,this.options.ranges&&(this.yylloc.range=[this.offset,this.offset+=this.yyleng]),this._more=!1,this._backtrack=!1,this._input=this._input.slice(t[0].length),this.matched+=t[0],n=this.performAction.call(this,this.yy,this,e,this.conditionStack[this.conditionStack.length-1]),this.done&&this._input&&(this.done=!1),n)return n;if(this._backtrack){for(var a in i)this[a]=i[a];return!1}return!1},next:function(){if(this.done)return this.EOF;var t,e,n,r;this._input||(this.done=!0),this._more||(this.yytext="",this.match="");for(var i=this._currentRules(),a=0;ae[0].length)){if(e=n,r=a,this.options.backtrack_lexer){if(!1!==(t=this.test_match(n,i[a])))return t;if(this._backtrack){e=!1;continue}return!1}if(!this.options.flex)break}return e?!1!==(t=this.test_match(e,i[r]))&&t:""===this._input?this.EOF:this.parseError("Lexical error on line "+(this.yylineno+1)+". Unrecognized text.\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},lex:function(){var t=this.next();return t||this.lex()},begin:function(t){this.conditionStack.push(t)},popState:function(){return this.conditionStack.length-1>0?this.conditionStack.pop():this.conditionStack[0]},_currentRules:function(){return this.conditionStack.length&&this.conditionStack[this.conditionStack.length-1]?this.conditions[this.conditionStack[this.conditionStack.length-1]].rules:this.conditions.INITIAL.rules},topState:function(t){return(t=this.conditionStack.length-1-Math.abs(t||0))>=0?this.conditionStack[t]:"INITIAL"},pushState:function(t){this.begin(t)},stateStackSize:function(){return this.conditionStack.length},options:{"case-insensitive":!0},performAction:function(t,e,n,r){switch(n){case 0:return this.begin("open_directive"),21;case 1:return this.begin("type_directive"),22;case 2:return this.popState(),this.begin("arg_directive"),15;case 3:return this.popState(),this.popState(),24;case 4:return 23;case 5:case 6:break;case 7:return 11;case 8:case 9:break;case 10:return 4;case 11:return 17;case 12:return 18;case 13:return 19;case 14:return 20;case 15:return 15;case 16:return 6;case 17:return"INVALID"}},rules:[/^(?:%%\{)/i,/^(?:((?:(?!\}%%)[^:.])*))/i,/^(?::)/i,/^(?:\}%%)/i,/^(?:((?:(?!\}%%).|\n)*))/i,/^(?:%(?!\{)[^\n]*)/i,/^(?:[^\}]%%[^\n]*)/i,/^(?:[\n]+)/i,/^(?:\s+)/i,/^(?:#[^\n]*)/i,/^(?:journey\b)/i,/^(?:title\s[^#\n;]+)/i,/^(?:section\s[^#:\n;]+)/i,/^(?:[^#:\n;]+)/i,/^(?::[^#\n;]+)/i,/^(?::)/i,/^(?:$)/i,/^(?:.)/i],conditions:{open_directive:{rules:[1],inclusive:!1},type_directive:{rules:[2,3],inclusive:!1},arg_directive:{rules:[3,4],inclusive:!1},INITIAL:{rules:[0,5,6,7,8,9,10,11,12,13,14,15,16,17],inclusive:!0}}};function h(){this.yy={}}return u.lexer=l,h.prototype=u,u.Parser=h,new h}();e.parser=i,e.Parser=i.Parser,e.parse=function(){return i.parse.apply(i,arguments)},e.main=function(r){r[1]||(console.log("Usage: "+r[0]+" FILE"),t.exit(1));var i=n(19).readFileSync(n(20).normalize(r[1]),"utf8");return e.parser.parse(i)},n.c[n.s]===r&&e.main(t.argv.slice(1))}).call(this,n(14),n(7)(t))},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(9),i=n(15);e.default=function(t,e){return r.default.lang.round(i.default.parse(t)[e])}},function(t,e,n){var r=n(112),i=n(82),a=n(24);t.exports=function(t){return a(t)?r(t):i(t)}},function(t,e,n){var r;if(!r)try{r=n(0)}catch(t){}r||(r=window.d3),t.exports=r},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(9),i=n(15);e.default=function(t,e,n){var a=i.default.parse(t),o=a[e],s=r.default.channel.clamp[e](o+n);return o!==s&&(a[e]=s),i.default.stringify(a)}},function(t,e,n){var r=n(210),i=n(216);t.exports=function(t,e){var n=i(t,e);return r(n)?n:void 0}},function(t,e,n){var r=n(38),i=n(212),a=n(213),o=r?r.toStringTag:void 0;t.exports=function(t){return null==t?void 0===t?"[object Undefined]":"[object Null]":o&&o in Object(t)?i(t):a(t)}},function(t,e){t.exports=function(t){return t}},function(t,e){t.exports=function(t,e){return t===e||t!=t&&e!=e}},function(t,e,n){var r=n(34),i=n(11);t.exports=function(t){if(!i(t))return!1;var e=r(t);return"[object Function]"==e||"[object GeneratorFunction]"==e||"[object AsyncFunction]"==e||"[object Proxy]"==e}},function(t,e,n){var r=n(16).Symbol;t.exports=r},function(t,e,n){(function(t){var r=n(16),i=n(232),a=e&&!e.nodeType&&e,o=a&&"object"==typeof t&&t&&!t.nodeType&&t,s=o&&o.exports===a?r.Buffer:void 0,c=(s?s.isBuffer:void 0)||i;t.exports=c}).call(this,n(7)(t))},function(t,e,n){var r=n(112),i=n(236),a=n(24);t.exports=function(t){return a(t)?r(t,!0):i(t)}},function(t,e,n){var r=n(241),i=n(77),a=n(242),o=n(121),s=n(243),c=n(34),u=n(110),l=u(r),h=u(i),f=u(a),d=u(o),p=u(s),g=c;(r&&"[object DataView]"!=g(new r(new ArrayBuffer(1)))||i&&"[object Map]"!=g(new i)||a&&"[object Promise]"!=g(a.resolve())||o&&"[object Set]"!=g(new o)||s&&"[object WeakMap]"!=g(new s))&&(g=function(t){var e=c(t),n="[object Object]"==e?t.constructor:void 0,r=n?u(n):"";if(r)switch(r){case l:return"[object DataView]";case h:return"[object Map]";case f:return"[object Promise]";case d:return"[object Set]";case p:return"[object WeakMap]"}return e}),t.exports=g},function(t,e,n){var r=n(34),i=n(21);t.exports=function(t){return"symbol"==typeof t||i(t)&&"[object Symbol]"==r(t)}},function(t,e,n){var r;try{r={defaults:n(154),each:n(87),isFunction:n(37),isPlainObject:n(158),pick:n(161),has:n(93),range:n(162),uniqueId:n(163)}}catch(t){}r||(r=window._),t.exports=r},function(t){t.exports=JSON.parse('{"name":"mermaid","version":"8.8.3","description":"Markdownish syntax for generating flowcharts, sequence diagrams, class diagrams, gantt charts and git graphs.","main":"dist/mermaid.core.js","keywords":["diagram","markdown","flowchart","sequence diagram","gantt","class diagram","git graph"],"scripts":{"build:development":"webpack --progress --colors","build:production":"yarn build:development -p --config webpack.config.prod.babel.js","build":"yarn build:development && yarn build:production","postbuild":"documentation build src/mermaidAPI.js src/config.js --shallow -f md --markdown-toc false > docs/Setup.md","build:watch":"yarn build --watch","minify":"minify ./dist/mermaid.js > ./dist/mermaid.min.js","release":"yarn build","lint":"eslint src","e2e:depr":"yarn lint && jest e2e --config e2e/jest.config.js","cypress":"percy exec -- cypress run","e2e":"start-server-and-test dev http://localhost:9000/ cypress","e2e-upd":"yarn lint && jest e2e -u --config e2e/jest.config.js","dev":"webpack-dev-server --config webpack.config.e2e.js","test":"yarn lint && jest src/.*","test:watch":"jest --watch src","prepublishOnly":"yarn build && yarn test","prepare":"yarn build"},"repository":{"type":"git","url":"https://github.com/knsv/mermaid"},"author":"Knut Sveidqvist","license":"MIT","standard":{"ignore":["**/parser/*.js","dist/**/*.js","cypress/**/*.js"],"globals":["page"]},"dependencies":{"@braintree/sanitize-url":"^3.1.0","babel-eslint":"^10.1.0","d3":"^5.7.0","dagre":"^0.8.4","dagre-d3":"^0.6.4","entity-decode":"^2.0.2","graphlib":"^2.1.7","he":"^1.2.0","khroma":"^1.1.0","minify":"^4.1.1","moment-mini":"^2.22.1","stylis":"^3.5.2"},"devDependencies":{"@babel/core":"^7.2.2","@babel/preset-env":"^7.8.4","@babel/register":"^7.0.0","@percy/cypress":"*","babel-core":"7.0.0-bridge.0","babel-jest":"^24.9.0","babel-loader":"^8.0.4","coveralls":"^3.0.2","css-loader":"^2.0.1","css-to-string-loader":"^0.1.3","cypress":"4.0.1","documentation":"^12.0.1","eslint":"^6.3.0","eslint-config-prettier":"^6.3.0","eslint-plugin-prettier":"^3.1.0","husky":"^1.2.1","identity-obj-proxy":"^3.0.0","jest":"^24.9.0","jison":"^0.4.18","moment":"^2.23.0","node-sass":"^4.12.0","prettier":"^1.18.2","puppeteer":"^1.17.0","sass-loader":"^7.1.0","start-server-and-test":"^1.10.6","terser-webpack-plugin":"^2.2.2","webpack":"^4.41.2","webpack-bundle-analyzer":"^3.7.0","webpack-cli":"^3.1.2","webpack-dev-server":"^3.4.1","webpack-node-externals":"^1.7.2","yarn-upgrade-all":"^0.5.0"},"files":["dist"],"yarn-upgrade-all":{"ignore":["babel-core"]},"sideEffects":["**/*.css","**/*.scss"],"husky":{"hooks":{"pre-push":"yarn test"}}}')},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=new(n(176).default)({r:0,g:0,b:0,a:0},"transparent");e.default=r},function(t,e,n){var r=n(58),i=n(59);t.exports=function(t,e,n,a){var o=!n;n||(n={});for(var s=-1,c=e.length;++s-1&&t%1==0&&t-1}(s)?s:(n=s.match(a))?(e=n[0],r.test(e)?"about:blank":s):"about:blank"}}},function(t,e,n){(function(t,r){var i=function(){var t=function(t,e,n,r){for(n=n||{},r=t.length;r--;n[t[r]]=e);return n},e=[2,3],n=[1,7],r=[7,12,15,17,19,20,21],i=[7,11,12,15,17,19,20,21],a=[2,20],o=[1,32],s={trace:function(){},yy:{},symbols_:{error:2,start:3,GG:4,":":5,document:6,EOF:7,DIR:8,options:9,body:10,OPT:11,NL:12,line:13,statement:14,COMMIT:15,commit_arg:16,BRANCH:17,ID:18,CHECKOUT:19,MERGE:20,RESET:21,reset_arg:22,STR:23,HEAD:24,reset_parents:25,CARET:26,$accept:0,$end:1},terminals_:{2:"error",4:"GG",5:":",7:"EOF",8:"DIR",11:"OPT",12:"NL",15:"COMMIT",17:"BRANCH",18:"ID",19:"CHECKOUT",20:"MERGE",21:"RESET",23:"STR",24:"HEAD",26:"CARET"},productions_:[0,[3,4],[3,5],[6,0],[6,2],[9,2],[9,1],[10,0],[10,2],[13,2],[13,1],[14,2],[14,2],[14,2],[14,2],[14,2],[16,0],[16,1],[22,2],[22,2],[25,0],[25,2]],performAction:function(t,e,n,r,i,a,o){var s=a.length-1;switch(i){case 1:return a[s-1];case 2:return r.setDirection(a[s-3]),a[s-1];case 4:r.setOptions(a[s-1]),this.$=a[s];break;case 5:a[s-1]+=a[s],this.$=a[s-1];break;case 7:this.$=[];break;case 8:a[s-1].push(a[s]),this.$=a[s-1];break;case 9:this.$=a[s-1];break;case 11:r.commit(a[s]);break;case 12:r.branch(a[s]);break;case 13:r.checkout(a[s]);break;case 14:r.merge(a[s]);break;case 15:r.reset(a[s]);break;case 16:this.$="";break;case 17:this.$=a[s];break;case 18:this.$=a[s-1]+":"+a[s];break;case 19:this.$=a[s-1]+":"+r.count,r.count=0;break;case 20:r.count=0;break;case 21:r.count+=1}},table:[{3:1,4:[1,2]},{1:[3]},{5:[1,3],8:[1,4]},{6:5,7:e,9:6,12:n},{5:[1,8]},{7:[1,9]},t(r,[2,7],{10:10,11:[1,11]}),t(i,[2,6]),{6:12,7:e,9:6,12:n},{1:[2,1]},{7:[2,4],12:[1,15],13:13,14:14,15:[1,16],17:[1,17],19:[1,18],20:[1,19],21:[1,20]},t(i,[2,5]),{7:[1,21]},t(r,[2,8]),{12:[1,22]},t(r,[2,10]),{12:[2,16],16:23,23:[1,24]},{18:[1,25]},{18:[1,26]},{18:[1,27]},{18:[1,30],22:28,24:[1,29]},{1:[2,2]},t(r,[2,9]),{12:[2,11]},{12:[2,17]},{12:[2,12]},{12:[2,13]},{12:[2,14]},{12:[2,15]},{12:a,25:31,26:o},{12:a,25:33,26:o},{12:[2,18]},{12:a,25:34,26:o},{12:[2,19]},{12:[2,21]}],defaultActions:{9:[2,1],21:[2,2],23:[2,11],24:[2,17],25:[2,12],26:[2,13],27:[2,14],28:[2,15],31:[2,18],33:[2,19],34:[2,21]},parseError:function(t,e){if(!e.recoverable){var n=new Error(t);throw n.hash=e,n}this.trace(t)},parse:function(t){var e=this,n=[0],r=[],i=[null],a=[],o=this.table,s="",c=0,u=0,l=0,h=2,f=1,d=a.slice.call(arguments,1),p=Object.create(this.lexer),g={yy:{}};for(var y in this.yy)Object.prototype.hasOwnProperty.call(this.yy,y)&&(g.yy[y]=this.yy[y]);p.setInput(t,g.yy),g.yy.lexer=p,g.yy.parser=this,void 0===p.yylloc&&(p.yylloc={});var v=p.yylloc;a.push(v);var m=p.options&&p.options.ranges;function b(){var t;return"number"!=typeof(t=r.pop()||p.lex()||f)&&(t instanceof Array&&(t=(r=t).pop()),t=e.symbols_[t]||t),t}"function"==typeof g.yy.parseError?this.parseError=g.yy.parseError:this.parseError=Object.getPrototypeOf(this).parseError;for(var x,_,k,w,E,T,C,S,A,M={};;){if(k=n[n.length-1],this.defaultActions[k]?w=this.defaultActions[k]:(null==x&&(x=b()),w=o[k]&&o[k][x]),void 0===w||!w.length||!w[0]){var O="";for(T in A=[],o[k])this.terminals_[T]&&T>h&&A.push("'"+this.terminals_[T]+"'");O=p.showPosition?"Parse error on line "+(c+1)+":\n"+p.showPosition()+"\nExpecting "+A.join(", ")+", got '"+(this.terminals_[x]||x)+"'":"Parse error on line "+(c+1)+": Unexpected "+(x==f?"end of input":"'"+(this.terminals_[x]||x)+"'"),this.parseError(O,{text:p.match,token:this.terminals_[x]||x,line:p.yylineno,loc:v,expected:A})}if(w[0]instanceof Array&&w.length>1)throw new Error("Parse Error: multiple actions possible at state: "+k+", token: "+x);switch(w[0]){case 1:n.push(x),i.push(p.yytext),a.push(p.yylloc),n.push(w[1]),x=null,_?(x=_,_=null):(u=p.yyleng,s=p.yytext,c=p.yylineno,v=p.yylloc,l>0&&l--);break;case 2:if(C=this.productions_[w[1]][1],M.$=i[i.length-C],M._$={first_line:a[a.length-(C||1)].first_line,last_line:a[a.length-1].last_line,first_column:a[a.length-(C||1)].first_column,last_column:a[a.length-1].last_column},m&&(M._$.range=[a[a.length-(C||1)].range[0],a[a.length-1].range[1]]),void 0!==(E=this.performAction.apply(M,[s,u,c,g.yy,w[1],i,a].concat(d))))return E;C&&(n=n.slice(0,-1*C*2),i=i.slice(0,-1*C),a=a.slice(0,-1*C)),n.push(this.productions_[w[1]][0]),i.push(M.$),a.push(M._$),S=o[n[n.length-2]][n[n.length-1]],n.push(S);break;case 3:return!0}}return!0}},c={EOF:1,parseError:function(t,e){if(!this.yy.parser)throw new Error(t);this.yy.parser.parseError(t,e)},setInput:function(t,e){return this.yy=e||this.yy||{},this._input=t,this._more=this._backtrack=this.done=!1,this.yylineno=this.yyleng=0,this.yytext=this.matched=this.match="",this.conditionStack=["INITIAL"],this.yylloc={first_line:1,first_column:0,last_line:1,last_column:0},this.options.ranges&&(this.yylloc.range=[0,0]),this.offset=0,this},input:function(){var t=this._input[0];return this.yytext+=t,this.yyleng++,this.offset++,this.match+=t,this.matched+=t,t.match(/(?:\r\n?|\n).*/g)?(this.yylineno++,this.yylloc.last_line++):this.yylloc.last_column++,this.options.ranges&&this.yylloc.range[1]++,this._input=this._input.slice(1),t},unput:function(t){var e=t.length,n=t.split(/(?:\r\n?|\n)/g);this._input=t+this._input,this.yytext=this.yytext.substr(0,this.yytext.length-e),this.offset-=e;var r=this.match.split(/(?:\r\n?|\n)/g);this.match=this.match.substr(0,this.match.length-1),this.matched=this.matched.substr(0,this.matched.length-1),n.length-1&&(this.yylineno-=n.length-1);var i=this.yylloc.range;return this.yylloc={first_line:this.yylloc.first_line,last_line:this.yylineno+1,first_column:this.yylloc.first_column,last_column:n?(n.length===r.length?this.yylloc.first_column:0)+r[r.length-n.length].length-n[0].length:this.yylloc.first_column-e},this.options.ranges&&(this.yylloc.range=[i[0],i[0]+this.yyleng-e]),this.yyleng=this.yytext.length,this},more:function(){return this._more=!0,this},reject:function(){return this.options.backtrack_lexer?(this._backtrack=!0,this):this.parseError("Lexical error on line "+(this.yylineno+1)+". You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},less:function(t){this.unput(this.match.slice(t))},pastInput:function(){var t=this.matched.substr(0,this.matched.length-this.match.length);return(t.length>20?"...":"")+t.substr(-20).replace(/\n/g,"")},upcomingInput:function(){var t=this.match;return t.length<20&&(t+=this._input.substr(0,20-t.length)),(t.substr(0,20)+(t.length>20?"...":"")).replace(/\n/g,"")},showPosition:function(){var t=this.pastInput(),e=new Array(t.length+1).join("-");return t+this.upcomingInput()+"\n"+e+"^"},test_match:function(t,e){var n,r,i;if(this.options.backtrack_lexer&&(i={yylineno:this.yylineno,yylloc:{first_line:this.yylloc.first_line,last_line:this.last_line,first_column:this.yylloc.first_column,last_column:this.yylloc.last_column},yytext:this.yytext,match:this.match,matches:this.matches,matched:this.matched,yyleng:this.yyleng,offset:this.offset,_more:this._more,_input:this._input,yy:this.yy,conditionStack:this.conditionStack.slice(0),done:this.done},this.options.ranges&&(i.yylloc.range=this.yylloc.range.slice(0))),(r=t[0].match(/(?:\r\n?|\n).*/g))&&(this.yylineno+=r.length),this.yylloc={first_line:this.yylloc.last_line,last_line:this.yylineno+1,first_column:this.yylloc.last_column,last_column:r?r[r.length-1].length-r[r.length-1].match(/\r?\n?/)[0].length:this.yylloc.last_column+t[0].length},this.yytext+=t[0],this.match+=t[0],this.matches=t,this.yyleng=this.yytext.length,this.options.ranges&&(this.yylloc.range=[this.offset,this.offset+=this.yyleng]),this._more=!1,this._backtrack=!1,this._input=this._input.slice(t[0].length),this.matched+=t[0],n=this.performAction.call(this,this.yy,this,e,this.conditionStack[this.conditionStack.length-1]),this.done&&this._input&&(this.done=!1),n)return n;if(this._backtrack){for(var a in i)this[a]=i[a];return!1}return!1},next:function(){if(this.done)return this.EOF;var t,e,n,r;this._input||(this.done=!0),this._more||(this.yytext="",this.match="");for(var i=this._currentRules(),a=0;ae[0].length)){if(e=n,r=a,this.options.backtrack_lexer){if(!1!==(t=this.test_match(n,i[a])))return t;if(this._backtrack){e=!1;continue}return!1}if(!this.options.flex)break}return e?!1!==(t=this.test_match(e,i[r]))&&t:""===this._input?this.EOF:this.parseError("Lexical error on line "+(this.yylineno+1)+". Unrecognized text.\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},lex:function(){var t=this.next();return t||this.lex()},begin:function(t){this.conditionStack.push(t)},popState:function(){return this.conditionStack.length-1>0?this.conditionStack.pop():this.conditionStack[0]},_currentRules:function(){return this.conditionStack.length&&this.conditionStack[this.conditionStack.length-1]?this.conditions[this.conditionStack[this.conditionStack.length-1]].rules:this.conditions.INITIAL.rules},topState:function(t){return(t=this.conditionStack.length-1-Math.abs(t||0))>=0?this.conditionStack[t]:"INITIAL"},pushState:function(t){this.begin(t)},stateStackSize:function(){return this.conditionStack.length},options:{"case-insensitive":!0},performAction:function(t,e,n,r){switch(n){case 0:return 12;case 1:case 2:case 3:break;case 4:return 4;case 5:return 15;case 6:return 17;case 7:return 20;case 8:return 21;case 9:return 19;case 10:case 11:return 8;case 12:return 5;case 13:return 26;case 14:this.begin("options");break;case 15:this.popState();break;case 16:return 11;case 17:this.begin("string");break;case 18:this.popState();break;case 19:return 23;case 20:return 18;case 21:return 7}},rules:[/^(?:(\r?\n)+)/i,/^(?:\s+)/i,/^(?:#[^\n]*)/i,/^(?:%[^\n]*)/i,/^(?:gitGraph\b)/i,/^(?:commit\b)/i,/^(?:branch\b)/i,/^(?:merge\b)/i,/^(?:reset\b)/i,/^(?:checkout\b)/i,/^(?:LR\b)/i,/^(?:BT\b)/i,/^(?::)/i,/^(?:\^)/i,/^(?:options\r?\n)/i,/^(?:end\r?\n)/i,/^(?:[^\n]+\r?\n)/i,/^(?:["])/i,/^(?:["])/i,/^(?:[^"]*)/i,/^(?:[a-zA-Z][-_\.a-zA-Z0-9]*[-_a-zA-Z0-9])/i,/^(?:$)/i],conditions:{options:{rules:[15,16],inclusive:!1},string:{rules:[18,19],inclusive:!1},INITIAL:{rules:[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,17,20,21],inclusive:!0}}};function u(){this.yy={}}return s.lexer=c,u.prototype=s,s.Parser=u,new u}();e.parser=i,e.Parser=i.Parser,e.parse=function(){return i.parse.apply(i,arguments)},e.main=function(r){r[1]||(console.log("Usage: "+r[0]+" FILE"),t.exit(1));var i=n(19).readFileSync(n(20).normalize(r[1]),"utf8");return e.parser.parse(i)},n.c[n.s]===r&&e.main(t.argv.slice(1))}).call(this,n(14),n(7)(t))},function(t,e,n){(function(t,r){var i=function(){var t=function(t,e,n,r){for(n=n||{},r=t.length;r--;n[t[r]]=e);return n},e=[6,9,10],n={trace:function(){},yy:{},symbols_:{error:2,start:3,info:4,document:5,EOF:6,line:7,statement:8,NL:9,showInfo:10,$accept:0,$end:1},terminals_:{2:"error",4:"info",6:"EOF",9:"NL",10:"showInfo"},productions_:[0,[3,3],[5,0],[5,2],[7,1],[7,1],[8,1]],performAction:function(t,e,n,r,i,a,o){a.length;switch(i){case 1:return r;case 4:break;case 6:r.setInfo(!0)}},table:[{3:1,4:[1,2]},{1:[3]},t(e,[2,2],{5:3}),{6:[1,4],7:5,8:6,9:[1,7],10:[1,8]},{1:[2,1]},t(e,[2,3]),t(e,[2,4]),t(e,[2,5]),t(e,[2,6])],defaultActions:{4:[2,1]},parseError:function(t,e){if(!e.recoverable){var n=new Error(t);throw n.hash=e,n}this.trace(t)},parse:function(t){var e=this,n=[0],r=[],i=[null],a=[],o=this.table,s="",c=0,u=0,l=0,h=2,f=1,d=a.slice.call(arguments,1),p=Object.create(this.lexer),g={yy:{}};for(var y in this.yy)Object.prototype.hasOwnProperty.call(this.yy,y)&&(g.yy[y]=this.yy[y]);p.setInput(t,g.yy),g.yy.lexer=p,g.yy.parser=this,void 0===p.yylloc&&(p.yylloc={});var v=p.yylloc;a.push(v);var m=p.options&&p.options.ranges;function b(){var t;return"number"!=typeof(t=r.pop()||p.lex()||f)&&(t instanceof Array&&(t=(r=t).pop()),t=e.symbols_[t]||t),t}"function"==typeof g.yy.parseError?this.parseError=g.yy.parseError:this.parseError=Object.getPrototypeOf(this).parseError;for(var x,_,k,w,E,T,C,S,A,M={};;){if(k=n[n.length-1],this.defaultActions[k]?w=this.defaultActions[k]:(null==x&&(x=b()),w=o[k]&&o[k][x]),void 0===w||!w.length||!w[0]){var O="";for(T in A=[],o[k])this.terminals_[T]&&T>h&&A.push("'"+this.terminals_[T]+"'");O=p.showPosition?"Parse error on line "+(c+1)+":\n"+p.showPosition()+"\nExpecting "+A.join(", ")+", got '"+(this.terminals_[x]||x)+"'":"Parse error on line "+(c+1)+": Unexpected "+(x==f?"end of input":"'"+(this.terminals_[x]||x)+"'"),this.parseError(O,{text:p.match,token:this.terminals_[x]||x,line:p.yylineno,loc:v,expected:A})}if(w[0]instanceof Array&&w.length>1)throw new Error("Parse Error: multiple actions possible at state: "+k+", token: "+x);switch(w[0]){case 1:n.push(x),i.push(p.yytext),a.push(p.yylloc),n.push(w[1]),x=null,_?(x=_,_=null):(u=p.yyleng,s=p.yytext,c=p.yylineno,v=p.yylloc,l>0&&l--);break;case 2:if(C=this.productions_[w[1]][1],M.$=i[i.length-C],M._$={first_line:a[a.length-(C||1)].first_line,last_line:a[a.length-1].last_line,first_column:a[a.length-(C||1)].first_column,last_column:a[a.length-1].last_column},m&&(M._$.range=[a[a.length-(C||1)].range[0],a[a.length-1].range[1]]),void 0!==(E=this.performAction.apply(M,[s,u,c,g.yy,w[1],i,a].concat(d))))return E;C&&(n=n.slice(0,-1*C*2),i=i.slice(0,-1*C),a=a.slice(0,-1*C)),n.push(this.productions_[w[1]][0]),i.push(M.$),a.push(M._$),S=o[n[n.length-2]][n[n.length-1]],n.push(S);break;case 3:return!0}}return!0}},r={EOF:1,parseError:function(t,e){if(!this.yy.parser)throw new Error(t);this.yy.parser.parseError(t,e)},setInput:function(t,e){return this.yy=e||this.yy||{},this._input=t,this._more=this._backtrack=this.done=!1,this.yylineno=this.yyleng=0,this.yytext=this.matched=this.match="",this.conditionStack=["INITIAL"],this.yylloc={first_line:1,first_column:0,last_line:1,last_column:0},this.options.ranges&&(this.yylloc.range=[0,0]),this.offset=0,this},input:function(){var t=this._input[0];return this.yytext+=t,this.yyleng++,this.offset++,this.match+=t,this.matched+=t,t.match(/(?:\r\n?|\n).*/g)?(this.yylineno++,this.yylloc.last_line++):this.yylloc.last_column++,this.options.ranges&&this.yylloc.range[1]++,this._input=this._input.slice(1),t},unput:function(t){var e=t.length,n=t.split(/(?:\r\n?|\n)/g);this._input=t+this._input,this.yytext=this.yytext.substr(0,this.yytext.length-e),this.offset-=e;var r=this.match.split(/(?:\r\n?|\n)/g);this.match=this.match.substr(0,this.match.length-1),this.matched=this.matched.substr(0,this.matched.length-1),n.length-1&&(this.yylineno-=n.length-1);var i=this.yylloc.range;return this.yylloc={first_line:this.yylloc.first_line,last_line:this.yylineno+1,first_column:this.yylloc.first_column,last_column:n?(n.length===r.length?this.yylloc.first_column:0)+r[r.length-n.length].length-n[0].length:this.yylloc.first_column-e},this.options.ranges&&(this.yylloc.range=[i[0],i[0]+this.yyleng-e]),this.yyleng=this.yytext.length,this},more:function(){return this._more=!0,this},reject:function(){return this.options.backtrack_lexer?(this._backtrack=!0,this):this.parseError("Lexical error on line "+(this.yylineno+1)+". You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},less:function(t){this.unput(this.match.slice(t))},pastInput:function(){var t=this.matched.substr(0,this.matched.length-this.match.length);return(t.length>20?"...":"")+t.substr(-20).replace(/\n/g,"")},upcomingInput:function(){var t=this.match;return t.length<20&&(t+=this._input.substr(0,20-t.length)),(t.substr(0,20)+(t.length>20?"...":"")).replace(/\n/g,"")},showPosition:function(){var t=this.pastInput(),e=new Array(t.length+1).join("-");return t+this.upcomingInput()+"\n"+e+"^"},test_match:function(t,e){var n,r,i;if(this.options.backtrack_lexer&&(i={yylineno:this.yylineno,yylloc:{first_line:this.yylloc.first_line,last_line:this.last_line,first_column:this.yylloc.first_column,last_column:this.yylloc.last_column},yytext:this.yytext,match:this.match,matches:this.matches,matched:this.matched,yyleng:this.yyleng,offset:this.offset,_more:this._more,_input:this._input,yy:this.yy,conditionStack:this.conditionStack.slice(0),done:this.done},this.options.ranges&&(i.yylloc.range=this.yylloc.range.slice(0))),(r=t[0].match(/(?:\r\n?|\n).*/g))&&(this.yylineno+=r.length),this.yylloc={first_line:this.yylloc.last_line,last_line:this.yylineno+1,first_column:this.yylloc.last_column,last_column:r?r[r.length-1].length-r[r.length-1].match(/\r?\n?/)[0].length:this.yylloc.last_column+t[0].length},this.yytext+=t[0],this.match+=t[0],this.matches=t,this.yyleng=this.yytext.length,this.options.ranges&&(this.yylloc.range=[this.offset,this.offset+=this.yyleng]),this._more=!1,this._backtrack=!1,this._input=this._input.slice(t[0].length),this.matched+=t[0],n=this.performAction.call(this,this.yy,this,e,this.conditionStack[this.conditionStack.length-1]),this.done&&this._input&&(this.done=!1),n)return n;if(this._backtrack){for(var a in i)this[a]=i[a];return!1}return!1},next:function(){if(this.done)return this.EOF;var t,e,n,r;this._input||(this.done=!0),this._more||(this.yytext="",this.match="");for(var i=this._currentRules(),a=0;ae[0].length)){if(e=n,r=a,this.options.backtrack_lexer){if(!1!==(t=this.test_match(n,i[a])))return t;if(this._backtrack){e=!1;continue}return!1}if(!this.options.flex)break}return e?!1!==(t=this.test_match(e,i[r]))&&t:""===this._input?this.EOF:this.parseError("Lexical error on line "+(this.yylineno+1)+". Unrecognized text.\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},lex:function(){var t=this.next();return t||this.lex()},begin:function(t){this.conditionStack.push(t)},popState:function(){return this.conditionStack.length-1>0?this.conditionStack.pop():this.conditionStack[0]},_currentRules:function(){return this.conditionStack.length&&this.conditionStack[this.conditionStack.length-1]?this.conditions[this.conditionStack[this.conditionStack.length-1]].rules:this.conditions.INITIAL.rules},topState:function(t){return(t=this.conditionStack.length-1-Math.abs(t||0))>=0?this.conditionStack[t]:"INITIAL"},pushState:function(t){this.begin(t)},stateStackSize:function(){return this.conditionStack.length},options:{"case-insensitive":!0},performAction:function(t,e,n,r){switch(n){case 0:return 4;case 1:return 9;case 2:return"space";case 3:return 10;case 4:return 6;case 5:return"TXT"}},rules:[/^(?:info\b)/i,/^(?:[\s\n\r]+)/i,/^(?:[\s]+)/i,/^(?:showInfo\b)/i,/^(?:$)/i,/^(?:.)/i],conditions:{INITIAL:{rules:[0,1,2,3,4,5],inclusive:!0}}};function i(){this.yy={}}return n.lexer=r,i.prototype=n,n.Parser=i,new i}();e.parser=i,e.Parser=i.Parser,e.parse=function(){return i.parse.apply(i,arguments)},e.main=function(r){r[1]||(console.log("Usage: "+r[0]+" FILE"),t.exit(1));var i=n(19).readFileSync(n(20).normalize(r[1]),"utf8");return e.parser.parse(i)},n.c[n.s]===r&&e.main(t.argv.slice(1))}).call(this,n(14),n(7)(t))},function(t,e,n){(function(t,r){var i=function(){var t=function(t,e,n,r){for(n=n||{},r=t.length;r--;n[t[r]]=e);return n},e=[1,4],n=[1,5],r=[1,6],i=[1,7],a=[1,9],o=[1,10,12,19,20,21,22],s=[1,6,10,12,19,20,21,22],c=[19,20,21],u=[1,22],l=[6,19,20,21,22],h={trace:function(){},yy:{},symbols_:{error:2,start:3,eol:4,directive:5,PIE:6,document:7,line:8,statement:9,txt:10,value:11,title:12,title_value:13,openDirective:14,typeDirective:15,closeDirective:16,":":17,argDirective:18,NEWLINE:19,";":20,EOF:21,open_directive:22,type_directive:23,arg_directive:24,close_directive:25,$accept:0,$end:1},terminals_:{2:"error",6:"PIE",10:"txt",11:"value",12:"title",13:"title_value",17:":",19:"NEWLINE",20:";",21:"EOF",22:"open_directive",23:"type_directive",24:"arg_directive",25:"close_directive"},productions_:[0,[3,2],[3,2],[3,2],[7,0],[7,2],[8,2],[9,0],[9,2],[9,2],[9,1],[5,3],[5,5],[4,1],[4,1],[4,1],[14,1],[15,1],[18,1],[16,1]],performAction:function(t,e,n,r,i,a,o){var s=a.length-1;switch(i){case 6:this.$=a[s-1];break;case 8:r.addSection(a[s-1],r.cleanupValue(a[s]));break;case 9:this.$=a[s].trim(),r.setTitle(this.$);break;case 16:r.parseDirective("%%{","open_directive");break;case 17:r.parseDirective(a[s],"type_directive");break;case 18:a[s]=a[s].trim().replace(/'/g,'"'),r.parseDirective(a[s],"arg_directive");break;case 19:r.parseDirective("}%%","close_directive","pie")}},table:[{3:1,4:2,5:3,6:e,14:8,19:n,20:r,21:i,22:a},{1:[3]},{3:10,4:2,5:3,6:e,14:8,19:n,20:r,21:i,22:a},{3:11,4:2,5:3,6:e,14:8,19:n,20:r,21:i,22:a},t(o,[2,4],{7:12}),t(s,[2,13]),t(s,[2,14]),t(s,[2,15]),{15:13,23:[1,14]},{23:[2,16]},{1:[2,1]},{1:[2,2]},t(c,[2,7],{14:8,8:15,9:16,5:19,1:[2,3],10:[1,17],12:[1,18],22:a}),{16:20,17:[1,21],25:u},t([17,25],[2,17]),t(o,[2,5]),{4:23,19:n,20:r,21:i},{11:[1,24]},{13:[1,25]},t(c,[2,10]),t(l,[2,11]),{18:26,24:[1,27]},t(l,[2,19]),t(o,[2,6]),t(c,[2,8]),t(c,[2,9]),{16:28,25:u},{25:[2,18]},t(l,[2,12])],defaultActions:{9:[2,16],10:[2,1],11:[2,2],27:[2,18]},parseError:function(t,e){if(!e.recoverable){var n=new Error(t);throw n.hash=e,n}this.trace(t)},parse:function(t){var e=this,n=[0],r=[],i=[null],a=[],o=this.table,s="",c=0,u=0,l=0,h=2,f=1,d=a.slice.call(arguments,1),p=Object.create(this.lexer),g={yy:{}};for(var y in this.yy)Object.prototype.hasOwnProperty.call(this.yy,y)&&(g.yy[y]=this.yy[y]);p.setInput(t,g.yy),g.yy.lexer=p,g.yy.parser=this,void 0===p.yylloc&&(p.yylloc={});var v=p.yylloc;a.push(v);var m=p.options&&p.options.ranges;function b(){var t;return"number"!=typeof(t=r.pop()||p.lex()||f)&&(t instanceof Array&&(t=(r=t).pop()),t=e.symbols_[t]||t),t}"function"==typeof g.yy.parseError?this.parseError=g.yy.parseError:this.parseError=Object.getPrototypeOf(this).parseError;for(var x,_,k,w,E,T,C,S,A,M={};;){if(k=n[n.length-1],this.defaultActions[k]?w=this.defaultActions[k]:(null==x&&(x=b()),w=o[k]&&o[k][x]),void 0===w||!w.length||!w[0]){var O="";for(T in A=[],o[k])this.terminals_[T]&&T>h&&A.push("'"+this.terminals_[T]+"'");O=p.showPosition?"Parse error on line "+(c+1)+":\n"+p.showPosition()+"\nExpecting "+A.join(", ")+", got '"+(this.terminals_[x]||x)+"'":"Parse error on line "+(c+1)+": Unexpected "+(x==f?"end of input":"'"+(this.terminals_[x]||x)+"'"),this.parseError(O,{text:p.match,token:this.terminals_[x]||x,line:p.yylineno,loc:v,expected:A})}if(w[0]instanceof Array&&w.length>1)throw new Error("Parse Error: multiple actions possible at state: "+k+", token: "+x);switch(w[0]){case 1:n.push(x),i.push(p.yytext),a.push(p.yylloc),n.push(w[1]),x=null,_?(x=_,_=null):(u=p.yyleng,s=p.yytext,c=p.yylineno,v=p.yylloc,l>0&&l--);break;case 2:if(C=this.productions_[w[1]][1],M.$=i[i.length-C],M._$={first_line:a[a.length-(C||1)].first_line,last_line:a[a.length-1].last_line,first_column:a[a.length-(C||1)].first_column,last_column:a[a.length-1].last_column},m&&(M._$.range=[a[a.length-(C||1)].range[0],a[a.length-1].range[1]]),void 0!==(E=this.performAction.apply(M,[s,u,c,g.yy,w[1],i,a].concat(d))))return E;C&&(n=n.slice(0,-1*C*2),i=i.slice(0,-1*C),a=a.slice(0,-1*C)),n.push(this.productions_[w[1]][0]),i.push(M.$),a.push(M._$),S=o[n[n.length-2]][n[n.length-1]],n.push(S);break;case 3:return!0}}return!0}},f={EOF:1,parseError:function(t,e){if(!this.yy.parser)throw new Error(t);this.yy.parser.parseError(t,e)},setInput:function(t,e){return this.yy=e||this.yy||{},this._input=t,this._more=this._backtrack=this.done=!1,this.yylineno=this.yyleng=0,this.yytext=this.matched=this.match="",this.conditionStack=["INITIAL"],this.yylloc={first_line:1,first_column:0,last_line:1,last_column:0},this.options.ranges&&(this.yylloc.range=[0,0]),this.offset=0,this},input:function(){var t=this._input[0];return this.yytext+=t,this.yyleng++,this.offset++,this.match+=t,this.matched+=t,t.match(/(?:\r\n?|\n).*/g)?(this.yylineno++,this.yylloc.last_line++):this.yylloc.last_column++,this.options.ranges&&this.yylloc.range[1]++,this._input=this._input.slice(1),t},unput:function(t){var e=t.length,n=t.split(/(?:\r\n?|\n)/g);this._input=t+this._input,this.yytext=this.yytext.substr(0,this.yytext.length-e),this.offset-=e;var r=this.match.split(/(?:\r\n?|\n)/g);this.match=this.match.substr(0,this.match.length-1),this.matched=this.matched.substr(0,this.matched.length-1),n.length-1&&(this.yylineno-=n.length-1);var i=this.yylloc.range;return this.yylloc={first_line:this.yylloc.first_line,last_line:this.yylineno+1,first_column:this.yylloc.first_column,last_column:n?(n.length===r.length?this.yylloc.first_column:0)+r[r.length-n.length].length-n[0].length:this.yylloc.first_column-e},this.options.ranges&&(this.yylloc.range=[i[0],i[0]+this.yyleng-e]),this.yyleng=this.yytext.length,this},more:function(){return this._more=!0,this},reject:function(){return this.options.backtrack_lexer?(this._backtrack=!0,this):this.parseError("Lexical error on line "+(this.yylineno+1)+". You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},less:function(t){this.unput(this.match.slice(t))},pastInput:function(){var t=this.matched.substr(0,this.matched.length-this.match.length);return(t.length>20?"...":"")+t.substr(-20).replace(/\n/g,"")},upcomingInput:function(){var t=this.match;return t.length<20&&(t+=this._input.substr(0,20-t.length)),(t.substr(0,20)+(t.length>20?"...":"")).replace(/\n/g,"")},showPosition:function(){var t=this.pastInput(),e=new Array(t.length+1).join("-");return t+this.upcomingInput()+"\n"+e+"^"},test_match:function(t,e){var n,r,i;if(this.options.backtrack_lexer&&(i={yylineno:this.yylineno,yylloc:{first_line:this.yylloc.first_line,last_line:this.last_line,first_column:this.yylloc.first_column,last_column:this.yylloc.last_column},yytext:this.yytext,match:this.match,matches:this.matches,matched:this.matched,yyleng:this.yyleng,offset:this.offset,_more:this._more,_input:this._input,yy:this.yy,conditionStack:this.conditionStack.slice(0),done:this.done},this.options.ranges&&(i.yylloc.range=this.yylloc.range.slice(0))),(r=t[0].match(/(?:\r\n?|\n).*/g))&&(this.yylineno+=r.length),this.yylloc={first_line:this.yylloc.last_line,last_line:this.yylineno+1,first_column:this.yylloc.last_column,last_column:r?r[r.length-1].length-r[r.length-1].match(/\r?\n?/)[0].length:this.yylloc.last_column+t[0].length},this.yytext+=t[0],this.match+=t[0],this.matches=t,this.yyleng=this.yytext.length,this.options.ranges&&(this.yylloc.range=[this.offset,this.offset+=this.yyleng]),this._more=!1,this._backtrack=!1,this._input=this._input.slice(t[0].length),this.matched+=t[0],n=this.performAction.call(this,this.yy,this,e,this.conditionStack[this.conditionStack.length-1]),this.done&&this._input&&(this.done=!1),n)return n;if(this._backtrack){for(var a in i)this[a]=i[a];return!1}return!1},next:function(){if(this.done)return this.EOF;var t,e,n,r;this._input||(this.done=!0),this._more||(this.yytext="",this.match="");for(var i=this._currentRules(),a=0;ae[0].length)){if(e=n,r=a,this.options.backtrack_lexer){if(!1!==(t=this.test_match(n,i[a])))return t;if(this._backtrack){e=!1;continue}return!1}if(!this.options.flex)break}return e?!1!==(t=this.test_match(e,i[r]))&&t:""===this._input?this.EOF:this.parseError("Lexical error on line "+(this.yylineno+1)+". Unrecognized text.\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},lex:function(){var t=this.next();return t||this.lex()},begin:function(t){this.conditionStack.push(t)},popState:function(){return this.conditionStack.length-1>0?this.conditionStack.pop():this.conditionStack[0]},_currentRules:function(){return this.conditionStack.length&&this.conditionStack[this.conditionStack.length-1]?this.conditions[this.conditionStack[this.conditionStack.length-1]].rules:this.conditions.INITIAL.rules},topState:function(t){return(t=this.conditionStack.length-1-Math.abs(t||0))>=0?this.conditionStack[t]:"INITIAL"},pushState:function(t){this.begin(t)},stateStackSize:function(){return this.conditionStack.length},options:{"case-insensitive":!0},performAction:function(t,e,n,r){switch(n){case 0:return this.begin("open_directive"),22;case 1:return this.begin("type_directive"),23;case 2:return this.popState(),this.begin("arg_directive"),17;case 3:return this.popState(),this.popState(),25;case 4:return 24;case 5:case 6:break;case 7:return 19;case 8:case 9:break;case 10:return this.begin("title"),12;case 11:return this.popState(),"title_value";case 12:this.begin("string");break;case 13:this.popState();break;case 14:return"txt";case 15:return 6;case 16:return"value";case 17:return 21}},rules:[/^(?:%%\{)/i,/^(?:((?:(?!\}%%)[^:.])*))/i,/^(?::)/i,/^(?:\}%%)/i,/^(?:((?:(?!\}%%).|\n)*))/i,/^(?:%%(?!\{)[^\n]*)/i,/^(?:[^\}]%%[^\n]*)/i,/^(?:[\n\r]+)/i,/^(?:%%[^\n]*)/i,/^(?:[\s]+)/i,/^(?:title\b)/i,/^(?:(?!\n||)*[^\n]*)/i,/^(?:["])/i,/^(?:["])/i,/^(?:[^"]*)/i,/^(?:pie\b)/i,/^(?::[\s]*[\d]+(?:\.[\d]+)?)/i,/^(?:$)/i],conditions:{close_directive:{rules:[],inclusive:!1},arg_directive:{rules:[3,4],inclusive:!1},type_directive:{rules:[2,3],inclusive:!1},open_directive:{rules:[1],inclusive:!1},title:{rules:[11],inclusive:!1},string:{rules:[13,14],inclusive:!1},INITIAL:{rules:[0,5,6,7,8,9,10,12,15,16,17],inclusive:!0}}};function d(){this.yy={}}return h.lexer=f,d.prototype=h,h.Parser=d,new d}();e.parser=i,e.Parser=i.Parser,e.parse=function(){return i.parse.apply(i,arguments)},e.main=function(r){r[1]||(console.log("Usage: "+r[0]+" FILE"),t.exit(1));var i=n(19).readFileSync(n(20).normalize(r[1]),"utf8");return e.parser.parse(i)},n.c[n.s]===r&&e.main(t.argv.slice(1))}).call(this,n(14),n(7)(t))},function(t,e,n){(function(t,r){var i=function(){var t=function(t,e,n,r){for(n=n||{},r=t.length;r--;n[t[r]]=e);return n},e=[1,2],n=[1,5],r=[6,9,11,20,30],i=[1,17],a=[1,20],o=[1,24],s=[1,25],c=[1,26],u=[1,27],l=[20,27,28],h=[4,6,9,11,20,30],f=[23,24,25,26],d={trace:function(){},yy:{},symbols_:{error:2,start:3,ER_DIAGRAM:4,document:5,EOF:6,directive:7,line:8,SPACE:9,statement:10,NEWLINE:11,openDirective:12,typeDirective:13,closeDirective:14,":":15,argDirective:16,entityName:17,relSpec:18,role:19,ALPHANUM:20,cardinality:21,relType:22,ZERO_OR_ONE:23,ZERO_OR_MORE:24,ONE_OR_MORE:25,ONLY_ONE:26,NON_IDENTIFYING:27,IDENTIFYING:28,WORD:29,open_directive:30,type_directive:31,arg_directive:32,close_directive:33,$accept:0,$end:1},terminals_:{2:"error",4:"ER_DIAGRAM",6:"EOF",9:"SPACE",11:"NEWLINE",15:":",20:"ALPHANUM",23:"ZERO_OR_ONE",24:"ZERO_OR_MORE",25:"ONE_OR_MORE",26:"ONLY_ONE",27:"NON_IDENTIFYING",28:"IDENTIFYING",29:"WORD",30:"open_directive",31:"type_directive",32:"arg_directive",33:"close_directive"},productions_:[0,[3,3],[3,2],[5,0],[5,2],[8,2],[8,1],[8,1],[8,1],[7,4],[7,6],[10,1],[10,5],[10,1],[17,1],[18,3],[21,1],[21,1],[21,1],[21,1],[22,1],[22,1],[19,1],[19,1],[12,1],[13,1],[16,1],[14,1]],performAction:function(t,e,n,r,i,a,o){var s=a.length-1;switch(i){case 1:break;case 3:this.$=[];break;case 4:a[s-1].push(a[s]),this.$=a[s-1];break;case 5:case 6:this.$=a[s];break;case 7:case 8:this.$=[];break;case 12:r.addEntity(a[s-4]),r.addEntity(a[s-2]),r.addRelationship(a[s-4],a[s],a[s-2],a[s-3]);break;case 13:r.addEntity(a[s]);break;case 14:this.$=a[s];break;case 15:this.$={cardA:a[s],relType:a[s-1],cardB:a[s-2]};break;case 16:this.$=r.Cardinality.ZERO_OR_ONE;break;case 17:this.$=r.Cardinality.ZERO_OR_MORE;break;case 18:this.$=r.Cardinality.ONE_OR_MORE;break;case 19:this.$=r.Cardinality.ONLY_ONE;break;case 20:this.$=r.Identification.NON_IDENTIFYING;break;case 21:this.$=r.Identification.IDENTIFYING;break;case 22:this.$=a[s].replace(/"/g,"");break;case 23:this.$=a[s];break;case 24:r.parseDirective("%%{","open_directive");break;case 25:r.parseDirective(a[s],"type_directive");break;case 26:a[s]=a[s].trim().replace(/'/g,'"'),r.parseDirective(a[s],"arg_directive");break;case 27:r.parseDirective("}%%","close_directive","er")}},table:[{3:1,4:e,7:3,12:4,30:n},{1:[3]},t(r,[2,3],{5:6}),{3:7,4:e,7:3,12:4,30:n},{13:8,31:[1,9]},{31:[2,24]},{6:[1,10],7:15,8:11,9:[1,12],10:13,11:[1,14],12:4,17:16,20:i,30:n},{1:[2,2]},{14:18,15:[1,19],33:a},t([15,33],[2,25]),t(r,[2,8],{1:[2,1]}),t(r,[2,4]),{7:15,10:21,12:4,17:16,20:i,30:n},t(r,[2,6]),t(r,[2,7]),t(r,[2,11]),t(r,[2,13],{18:22,21:23,23:o,24:s,25:c,26:u}),t([6,9,11,15,20,23,24,25,26,30],[2,14]),{11:[1,28]},{16:29,32:[1,30]},{11:[2,27]},t(r,[2,5]),{17:31,20:i},{22:32,27:[1,33],28:[1,34]},t(l,[2,16]),t(l,[2,17]),t(l,[2,18]),t(l,[2,19]),t(h,[2,9]),{14:35,33:a},{33:[2,26]},{15:[1,36]},{21:37,23:o,24:s,25:c,26:u},t(f,[2,20]),t(f,[2,21]),{11:[1,38]},{19:39,20:[1,41],29:[1,40]},{20:[2,15]},t(h,[2,10]),t(r,[2,12]),t(r,[2,22]),t(r,[2,23])],defaultActions:{5:[2,24],7:[2,2],20:[2,27],30:[2,26],37:[2,15]},parseError:function(t,e){if(!e.recoverable){var n=new Error(t);throw n.hash=e,n}this.trace(t)},parse:function(t){var e=this,n=[0],r=[],i=[null],a=[],o=this.table,s="",c=0,u=0,l=0,h=2,f=1,d=a.slice.call(arguments,1),p=Object.create(this.lexer),g={yy:{}};for(var y in this.yy)Object.prototype.hasOwnProperty.call(this.yy,y)&&(g.yy[y]=this.yy[y]);p.setInput(t,g.yy),g.yy.lexer=p,g.yy.parser=this,void 0===p.yylloc&&(p.yylloc={});var v=p.yylloc;a.push(v);var m=p.options&&p.options.ranges;function b(){var t;return"number"!=typeof(t=r.pop()||p.lex()||f)&&(t instanceof Array&&(t=(r=t).pop()),t=e.symbols_[t]||t),t}"function"==typeof g.yy.parseError?this.parseError=g.yy.parseError:this.parseError=Object.getPrototypeOf(this).parseError;for(var x,_,k,w,E,T,C,S,A,M={};;){if(k=n[n.length-1],this.defaultActions[k]?w=this.defaultActions[k]:(null==x&&(x=b()),w=o[k]&&o[k][x]),void 0===w||!w.length||!w[0]){var O="";for(T in A=[],o[k])this.terminals_[T]&&T>h&&A.push("'"+this.terminals_[T]+"'");O=p.showPosition?"Parse error on line "+(c+1)+":\n"+p.showPosition()+"\nExpecting "+A.join(", ")+", got '"+(this.terminals_[x]||x)+"'":"Parse error on line "+(c+1)+": Unexpected "+(x==f?"end of input":"'"+(this.terminals_[x]||x)+"'"),this.parseError(O,{text:p.match,token:this.terminals_[x]||x,line:p.yylineno,loc:v,expected:A})}if(w[0]instanceof Array&&w.length>1)throw new Error("Parse Error: multiple actions possible at state: "+k+", token: "+x);switch(w[0]){case 1:n.push(x),i.push(p.yytext),a.push(p.yylloc),n.push(w[1]),x=null,_?(x=_,_=null):(u=p.yyleng,s=p.yytext,c=p.yylineno,v=p.yylloc,l>0&&l--);break;case 2:if(C=this.productions_[w[1]][1],M.$=i[i.length-C],M._$={first_line:a[a.length-(C||1)].first_line,last_line:a[a.length-1].last_line,first_column:a[a.length-(C||1)].first_column,last_column:a[a.length-1].last_column},m&&(M._$.range=[a[a.length-(C||1)].range[0],a[a.length-1].range[1]]),void 0!==(E=this.performAction.apply(M,[s,u,c,g.yy,w[1],i,a].concat(d))))return E;C&&(n=n.slice(0,-1*C*2),i=i.slice(0,-1*C),a=a.slice(0,-1*C)),n.push(this.productions_[w[1]][0]),i.push(M.$),a.push(M._$),S=o[n[n.length-2]][n[n.length-1]],n.push(S);break;case 3:return!0}}return!0}},p={EOF:1,parseError:function(t,e){if(!this.yy.parser)throw new Error(t);this.yy.parser.parseError(t,e)},setInput:function(t,e){return this.yy=e||this.yy||{},this._input=t,this._more=this._backtrack=this.done=!1,this.yylineno=this.yyleng=0,this.yytext=this.matched=this.match="",this.conditionStack=["INITIAL"],this.yylloc={first_line:1,first_column:0,last_line:1,last_column:0},this.options.ranges&&(this.yylloc.range=[0,0]),this.offset=0,this},input:function(){var t=this._input[0];return this.yytext+=t,this.yyleng++,this.offset++,this.match+=t,this.matched+=t,t.match(/(?:\r\n?|\n).*/g)?(this.yylineno++,this.yylloc.last_line++):this.yylloc.last_column++,this.options.ranges&&this.yylloc.range[1]++,this._input=this._input.slice(1),t},unput:function(t){var e=t.length,n=t.split(/(?:\r\n?|\n)/g);this._input=t+this._input,this.yytext=this.yytext.substr(0,this.yytext.length-e),this.offset-=e;var r=this.match.split(/(?:\r\n?|\n)/g);this.match=this.match.substr(0,this.match.length-1),this.matched=this.matched.substr(0,this.matched.length-1),n.length-1&&(this.yylineno-=n.length-1);var i=this.yylloc.range;return this.yylloc={first_line:this.yylloc.first_line,last_line:this.yylineno+1,first_column:this.yylloc.first_column,last_column:n?(n.length===r.length?this.yylloc.first_column:0)+r[r.length-n.length].length-n[0].length:this.yylloc.first_column-e},this.options.ranges&&(this.yylloc.range=[i[0],i[0]+this.yyleng-e]),this.yyleng=this.yytext.length,this},more:function(){return this._more=!0,this},reject:function(){return this.options.backtrack_lexer?(this._backtrack=!0,this):this.parseError("Lexical error on line "+(this.yylineno+1)+". You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},less:function(t){this.unput(this.match.slice(t))},pastInput:function(){var t=this.matched.substr(0,this.matched.length-this.match.length);return(t.length>20?"...":"")+t.substr(-20).replace(/\n/g,"")},upcomingInput:function(){var t=this.match;return t.length<20&&(t+=this._input.substr(0,20-t.length)),(t.substr(0,20)+(t.length>20?"...":"")).replace(/\n/g,"")},showPosition:function(){var t=this.pastInput(),e=new Array(t.length+1).join("-");return t+this.upcomingInput()+"\n"+e+"^"},test_match:function(t,e){var n,r,i;if(this.options.backtrack_lexer&&(i={yylineno:this.yylineno,yylloc:{first_line:this.yylloc.first_line,last_line:this.last_line,first_column:this.yylloc.first_column,last_column:this.yylloc.last_column},yytext:this.yytext,match:this.match,matches:this.matches,matched:this.matched,yyleng:this.yyleng,offset:this.offset,_more:this._more,_input:this._input,yy:this.yy,conditionStack:this.conditionStack.slice(0),done:this.done},this.options.ranges&&(i.yylloc.range=this.yylloc.range.slice(0))),(r=t[0].match(/(?:\r\n?|\n).*/g))&&(this.yylineno+=r.length),this.yylloc={first_line:this.yylloc.last_line,last_line:this.yylineno+1,first_column:this.yylloc.last_column,last_column:r?r[r.length-1].length-r[r.length-1].match(/\r?\n?/)[0].length:this.yylloc.last_column+t[0].length},this.yytext+=t[0],this.match+=t[0],this.matches=t,this.yyleng=this.yytext.length,this.options.ranges&&(this.yylloc.range=[this.offset,this.offset+=this.yyleng]),this._more=!1,this._backtrack=!1,this._input=this._input.slice(t[0].length),this.matched+=t[0],n=this.performAction.call(this,this.yy,this,e,this.conditionStack[this.conditionStack.length-1]),this.done&&this._input&&(this.done=!1),n)return n;if(this._backtrack){for(var a in i)this[a]=i[a];return!1}return!1},next:function(){if(this.done)return this.EOF;var t,e,n,r;this._input||(this.done=!0),this._more||(this.yytext="",this.match="");for(var i=this._currentRules(),a=0;ae[0].length)){if(e=n,r=a,this.options.backtrack_lexer){if(!1!==(t=this.test_match(n,i[a])))return t;if(this._backtrack){e=!1;continue}return!1}if(!this.options.flex)break}return e?!1!==(t=this.test_match(e,i[r]))&&t:""===this._input?this.EOF:this.parseError("Lexical error on line "+(this.yylineno+1)+". Unrecognized text.\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},lex:function(){var t=this.next();return t||this.lex()},begin:function(t){this.conditionStack.push(t)},popState:function(){return this.conditionStack.length-1>0?this.conditionStack.pop():this.conditionStack[0]},_currentRules:function(){return this.conditionStack.length&&this.conditionStack[this.conditionStack.length-1]?this.conditions[this.conditionStack[this.conditionStack.length-1]].rules:this.conditions.INITIAL.rules},topState:function(t){return(t=this.conditionStack.length-1-Math.abs(t||0))>=0?this.conditionStack[t]:"INITIAL"},pushState:function(t){this.begin(t)},stateStackSize:function(){return this.conditionStack.length},options:{"case-insensitive":!0},performAction:function(t,e,n,r){switch(n){case 0:return this.begin("open_directive"),30;case 1:return this.begin("type_directive"),31;case 2:return this.popState(),this.begin("arg_directive"),15;case 3:return this.popState(),this.popState(),33;case 4:return 32;case 5:case 6:break;case 7:return 11;case 8:break;case 9:return 9;case 10:return 29;case 11:return 4;case 12:return 23;case 13:return 24;case 14:return 25;case 15:return 26;case 16:return 23;case 17:return 24;case 18:return 25;case 19:return 27;case 20:return 28;case 21:case 22:return 27;case 23:return 20;case 24:return e.yytext[0];case 25:return 6}},rules:[/^(?:%%\{)/i,/^(?:((?:(?!\}%%)[^:.])*))/i,/^(?::)/i,/^(?:\}%%)/i,/^(?:((?:(?!\}%%).|\n)*))/i,/^(?:%(?!\{)[^\n]*)/i,/^(?:[^\}]%%[^\n]*)/i,/^(?:[\n]+)/i,/^(?:\s+)/i,/^(?:[\s]+)/i,/^(?:"[^"]*")/i,/^(?:erDiagram\b)/i,/^(?:\|o\b)/i,/^(?:\}o\b)/i,/^(?:\}\|)/i,/^(?:\|\|)/i,/^(?:o\|)/i,/^(?:o\{)/i,/^(?:\|\{)/i,/^(?:\.\.)/i,/^(?:--)/i,/^(?:\.-)/i,/^(?:-\.)/i,/^(?:[A-Za-z][A-Za-z0-9\-_]*)/i,/^(?:.)/i,/^(?:$)/i],conditions:{open_directive:{rules:[1],inclusive:!1},type_directive:{rules:[2,3],inclusive:!1},arg_directive:{rules:[3,4],inclusive:!1},INITIAL:{rules:[0,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25],inclusive:!0}}};function g(){this.yy={}}return d.lexer=p,g.prototype=d,d.Parser=g,new g}();e.parser=i,e.Parser=i.Parser,e.parse=function(){return i.parse.apply(i,arguments)},e.main=function(r){r[1]||(console.log("Usage: "+r[0]+" FILE"),t.exit(1));var i=n(19).readFileSync(n(20).normalize(r[1]),"utf8");return e.parser.parse(i)},n.c[n.s]===r&&e.main(t.argv.slice(1))}).call(this,n(14),n(7)(t))},function(t,e,n){"use strict";var r;Object.defineProperty(e,"__esModule",{value:!0}),function(t){t[t.ALL=0]="ALL",t[t.RGB=1]="RGB",t[t.HSL=2]="HSL"}(r||(r={})),e.TYPE=r},function(t,e,n){"use strict";var r=n(10);t.exports=i;function i(t){this._isDirected=!r.has(t,"directed")||t.directed,this._isMultigraph=!!r.has(t,"multigraph")&&t.multigraph,this._isCompound=!!r.has(t,"compound")&&t.compound,this._label=void 0,this._defaultNodeLabelFn=r.constant(void 0),this._defaultEdgeLabelFn=r.constant(void 0),this._nodes={},this._isCompound&&(this._parent={},this._children={},this._children["\0"]={}),this._in={},this._preds={},this._out={},this._sucs={},this._edgeObjs={},this._edgeLabels={}}function a(t,e){t[e]?t[e]++:t[e]=1}function o(t,e){--t[e]||delete t[e]}function s(t,e,n,i){var a=""+e,o=""+n;if(!t&&a>o){var s=a;a=o,o=s}return a+""+o+""+(r.isUndefined(i)?"\0":i)}function c(t,e,n,r){var i=""+e,a=""+n;if(!t&&i>a){var o=i;i=a,a=o}var s={v:i,w:a};return r&&(s.name=r),s}function u(t,e){return s(t,e.v,e.w,e.name)}i.prototype._nodeCount=0,i.prototype._edgeCount=0,i.prototype.isDirected=function(){return this._isDirected},i.prototype.isMultigraph=function(){return this._isMultigraph},i.prototype.isCompound=function(){return this._isCompound},i.prototype.setGraph=function(t){return this._label=t,this},i.prototype.graph=function(){return this._label},i.prototype.setDefaultNodeLabel=function(t){return r.isFunction(t)||(t=r.constant(t)),this._defaultNodeLabelFn=t,this},i.prototype.nodeCount=function(){return this._nodeCount},i.prototype.nodes=function(){return r.keys(this._nodes)},i.prototype.sources=function(){var t=this;return r.filter(this.nodes(),(function(e){return r.isEmpty(t._in[e])}))},i.prototype.sinks=function(){var t=this;return r.filter(this.nodes(),(function(e){return r.isEmpty(t._out[e])}))},i.prototype.setNodes=function(t,e){var n=arguments,i=this;return r.each(t,(function(t){n.length>1?i.setNode(t,e):i.setNode(t)})),this},i.prototype.setNode=function(t,e){return r.has(this._nodes,t)?(arguments.length>1&&(this._nodes[t]=e),this):(this._nodes[t]=arguments.length>1?e:this._defaultNodeLabelFn(t),this._isCompound&&(this._parent[t]="\0",this._children[t]={},this._children["\0"][t]=!0),this._in[t]={},this._preds[t]={},this._out[t]={},this._sucs[t]={},++this._nodeCount,this)},i.prototype.node=function(t){return this._nodes[t]},i.prototype.hasNode=function(t){return r.has(this._nodes,t)},i.prototype.removeNode=function(t){var e=this;if(r.has(this._nodes,t)){var n=function(t){e.removeEdge(e._edgeObjs[t])};delete this._nodes[t],this._isCompound&&(this._removeFromParentsChildList(t),delete this._parent[t],r.each(this.children(t),(function(t){e.setParent(t)})),delete this._children[t]),r.each(r.keys(this._in[t]),n),delete this._in[t],delete this._preds[t],r.each(r.keys(this._out[t]),n),delete this._out[t],delete this._sucs[t],--this._nodeCount}return this},i.prototype.setParent=function(t,e){if(!this._isCompound)throw new Error("Cannot set parent in a non-compound graph");if(r.isUndefined(e))e="\0";else{for(var n=e+="";!r.isUndefined(n);n=this.parent(n))if(n===t)throw new Error("Setting "+e+" as parent of "+t+" would create a cycle");this.setNode(e)}return this.setNode(t),this._removeFromParentsChildList(t),this._parent[t]=e,this._children[e][t]=!0,this},i.prototype._removeFromParentsChildList=function(t){delete this._children[this._parent[t]][t]},i.prototype.parent=function(t){if(this._isCompound){var e=this._parent[t];if("\0"!==e)return e}},i.prototype.children=function(t){if(r.isUndefined(t)&&(t="\0"),this._isCompound){var e=this._children[t];if(e)return r.keys(e)}else{if("\0"===t)return this.nodes();if(this.hasNode(t))return[]}},i.prototype.predecessors=function(t){var e=this._preds[t];if(e)return r.keys(e)},i.prototype.successors=function(t){var e=this._sucs[t];if(e)return r.keys(e)},i.prototype.neighbors=function(t){var e=this.predecessors(t);if(e)return r.union(e,this.successors(t))},i.prototype.isLeaf=function(t){return 0===(this.isDirected()?this.successors(t):this.neighbors(t)).length},i.prototype.filterNodes=function(t){var e=new this.constructor({directed:this._isDirected,multigraph:this._isMultigraph,compound:this._isCompound});e.setGraph(this.graph());var n=this;r.each(this._nodes,(function(n,r){t(r)&&e.setNode(r,n)})),r.each(this._edgeObjs,(function(t){e.hasNode(t.v)&&e.hasNode(t.w)&&e.setEdge(t,n.edge(t))}));var i={};return this._isCompound&&r.each(e.nodes(),(function(t){e.setParent(t,function t(r){var a=n.parent(r);return void 0===a||e.hasNode(a)?(i[r]=a,a):a in i?i[a]:t(a)}(t))})),e},i.prototype.setDefaultEdgeLabel=function(t){return r.isFunction(t)||(t=r.constant(t)),this._defaultEdgeLabelFn=t,this},i.prototype.edgeCount=function(){return this._edgeCount},i.prototype.edges=function(){return r.values(this._edgeObjs)},i.prototype.setPath=function(t,e){var n=this,i=arguments;return r.reduce(t,(function(t,r){return i.length>1?n.setEdge(t,r,e):n.setEdge(t,r),r})),this},i.prototype.setEdge=function(){var t,e,n,i,o=!1,u=arguments[0];"object"==typeof u&&null!==u&&"v"in u?(t=u.v,e=u.w,n=u.name,2===arguments.length&&(i=arguments[1],o=!0)):(t=u,e=arguments[1],n=arguments[3],arguments.length>2&&(i=arguments[2],o=!0)),t=""+t,e=""+e,r.isUndefined(n)||(n=""+n);var l=s(this._isDirected,t,e,n);if(r.has(this._edgeLabels,l))return o&&(this._edgeLabels[l]=i),this;if(!r.isUndefined(n)&&!this._isMultigraph)throw new Error("Cannot set a named edge when isMultigraph = false");this.setNode(t),this.setNode(e),this._edgeLabels[l]=o?i:this._defaultEdgeLabelFn(t,e,n);var h=c(this._isDirected,t,e,n);return t=h.v,e=h.w,Object.freeze(h),this._edgeObjs[l]=h,a(this._preds[e],t),a(this._sucs[t],e),this._in[e][l]=h,this._out[t][l]=h,this._edgeCount++,this},i.prototype.edge=function(t,e,n){var r=1===arguments.length?u(this._isDirected,arguments[0]):s(this._isDirected,t,e,n);return this._edgeLabels[r]},i.prototype.hasEdge=function(t,e,n){var i=1===arguments.length?u(this._isDirected,arguments[0]):s(this._isDirected,t,e,n);return r.has(this._edgeLabels,i)},i.prototype.removeEdge=function(t,e,n){var r=1===arguments.length?u(this._isDirected,arguments[0]):s(this._isDirected,t,e,n),i=this._edgeObjs[r];return i&&(t=i.v,e=i.w,delete this._edgeLabels[r],delete this._edgeObjs[r],o(this._preds[e],t),o(this._sucs[t],e),delete this._in[e][r],delete this._out[t][r],this._edgeCount--),this},i.prototype.inEdges=function(t,e){var n=this._in[t];if(n){var i=r.values(n);return e?r.filter(i,(function(t){return t.v===e})):i}},i.prototype.outEdges=function(t,e){var n=this._out[t];if(n){var i=r.values(n);return e?r.filter(i,(function(t){return t.w===e})):i}},i.prototype.nodeEdges=function(t,e){var n=this.inEdges(t,e);if(n)return n.concat(this.outEdges(t,e))}},function(t,e,n){var r=n(33)(n(16),"Map");t.exports=r},function(t,e,n){var r=n(217),i=n(224),a=n(226),o=n(227),s=n(228);function c(t){var e=-1,n=null==t?0:t.length;for(this.clear();++e-1&&t%1==0&&t<=9007199254740991}},function(t,e,n){(function(t){var r=n(109),i=e&&!e.nodeType&&e,a=i&&"object"==typeof t&&t&&!t.nodeType&&t,o=a&&a.exports===i&&r.process,s=function(){try{var t=a&&a.require&&a.require("util").types;return t||o&&o.binding&&o.binding("util")}catch(t){}}();t.exports=s}).call(this,n(7)(t))},function(t,e,n){var r=n(62),i=n(234),a=Object.prototype.hasOwnProperty;t.exports=function(t){if(!r(t))return i(t);var e=[];for(var n in Object(t))a.call(t,n)&&"constructor"!=n&&e.push(n);return e}},function(t,e,n){var r=n(116),i=n(117),a=Object.prototype.propertyIsEnumerable,o=Object.getOwnPropertySymbols,s=o?function(t){return null==t?[]:(t=Object(t),r(o(t),(function(e){return a.call(t,e)})))}:i;t.exports=s},function(t,e){t.exports=function(t,e){for(var n=-1,r=e.length,i=t.length;++n0&&a(l)?n>1?t(l,n-1,a,o,s):r(s,l):o||(s[s.length]=l)}return s}},function(t,e,n){var r=n(42);t.exports=function(t,e,n){for(var i=-1,a=t.length;++i4,u=c?1:17,l=c?8:4,h=s?0:-1,f=c?255:15;return i.default.set({r:(r>>l*(h+3)&f)*u,g:(r>>l*(h+2)&f)*u,b:(r>>l*(h+1)&f)*u,a:s?(r&f)*u/255:1},t)}}},stringify:function(t){return t.a<1?"#"+a.DEC2HEX[Math.round(t.r)]+a.DEC2HEX[Math.round(t.g)]+a.DEC2HEX[Math.round(t.b)]+r.default.unit.frac2hex(t.a):"#"+a.DEC2HEX[Math.round(t.r)]+a.DEC2HEX[Math.round(t.g)]+a.DEC2HEX[Math.round(t.b)]}};e.default=o},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(9),i=n(45),a=n(15);e.default=function(t,e,n,o){void 0===o&&(o=1);var s=i.default.set({h:r.default.channel.clamp.h(t),s:r.default.channel.clamp.s(e),l:r.default.channel.clamp.l(n),a:r.default.channel.clamp.a(o)});return a.default.stringify(s)}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(29);e.default=function(t){return r.default(t,"a")}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(9),i=n(15);e.default=function(t){var e=i.default.parse(t),n=e.r,a=e.g,o=e.b,s=.2126*r.default.channel.toLinear(n)+.7152*r.default.channel.toLinear(a)+.0722*r.default.channel.toLinear(o);return r.default.lang.round(s)}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(102);e.default=function(t){return r.default(t)>=.5}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(32);e.default=function(t,e){return r.default(t,"a",e)}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(32);e.default=function(t,e){return r.default(t,"a",-e)}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(15),i=n(52);e.default=function(t,e){var n=r.default.parse(t),a={};for(var o in e)e[o]&&(a[o]=n[o]+e[o]);return i.default(t,a)}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(15),i=n(51);e.default=function(t,e,n){void 0===n&&(n=50);var a=r.default.parse(t),o=a.r,s=a.g,c=a.b,u=a.a,l=r.default.parse(e),h=l.r,f=l.g,d=l.b,p=l.a,g=n/100,y=2*g-1,v=u-p,m=((y*v==-1?y:(y+v)/(1+y*v))+1)/2,b=1-m,x=o*m+h*b,_=s*m+f*b,k=c*m+d*b,w=u*g+p*(1-g);return i.default(x,_,k,w)}},function(t,e,n){var r=n(53),i=n(79),a=n(58),o=n(229),s=n(235),c=n(114),u=n(115),l=n(238),h=n(239),f=n(119),d=n(240),p=n(41),g=n(244),y=n(245),v=n(124),m=n(5),b=n(39),x=n(249),_=n(11),k=n(251),w=n(30),E={};E["[object Arguments]"]=E["[object Array]"]=E["[object ArrayBuffer]"]=E["[object DataView]"]=E["[object Boolean]"]=E["[object Date]"]=E["[object Float32Array]"]=E["[object Float64Array]"]=E["[object Int8Array]"]=E["[object Int16Array]"]=E["[object Int32Array]"]=E["[object Map]"]=E["[object Number]"]=E["[object Object]"]=E["[object RegExp]"]=E["[object Set]"]=E["[object String]"]=E["[object Symbol]"]=E["[object Uint8Array]"]=E["[object Uint8ClampedArray]"]=E["[object Uint16Array]"]=E["[object Uint32Array]"]=!0,E["[object Error]"]=E["[object Function]"]=E["[object WeakMap]"]=!1,t.exports=function t(e,n,T,C,S,A){var M,O=1&n,D=2&n,N=4&n;if(T&&(M=S?T(e,C,S,A):T(e)),void 0!==M)return M;if(!_(e))return e;var B=m(e);if(B){if(M=g(e),!O)return u(e,M)}else{var L=p(e),F="[object Function]"==L||"[object GeneratorFunction]"==L;if(b(e))return c(e,O);if("[object Object]"==L||"[object Arguments]"==L||F&&!S){if(M=D||F?{}:v(e),!O)return D?h(e,s(M,e)):l(e,o(M,e))}else{if(!E[L])return S?e:{};M=y(e,L,O)}}A||(A=new r);var P=A.get(e);if(P)return P;A.set(e,M),k(e)?e.forEach((function(r){M.add(t(r,n,T,r,e,A))})):x(e)&&e.forEach((function(r,i){M.set(i,t(r,n,T,i,e,A))}));var I=N?D?d:f:D?keysIn:w,j=B?void 0:I(e);return i(j||e,(function(r,i){j&&(r=e[i=r]),a(M,i,t(r,n,T,i,e,A))})),M}},function(t,e,n){(function(e){var n="object"==typeof e&&e&&e.Object===Object&&e;t.exports=n}).call(this,n(211))},function(t,e){var n=Function.prototype.toString;t.exports=function(t){if(null!=t){try{return n.call(t)}catch(t){}try{return t+""}catch(t){}}return""}},function(t,e,n){var r=n(33),i=function(){try{var t=r(Object,"defineProperty");return t({},"",{}),t}catch(t){}}();t.exports=i},function(t,e,n){var r=n(230),i=n(47),a=n(5),o=n(39),s=n(60),c=n(48),u=Object.prototype.hasOwnProperty;t.exports=function(t,e){var n=a(t),l=!n&&i(t),h=!n&&!l&&o(t),f=!n&&!l&&!h&&c(t),d=n||l||h||f,p=d?r(t.length,String):[],g=p.length;for(var y in t)!e&&!u.call(t,y)||d&&("length"==y||h&&("offset"==y||"parent"==y)||f&&("buffer"==y||"byteLength"==y||"byteOffset"==y)||s(y,g))||p.push(y);return p}},function(t,e){t.exports=function(t,e){return function(n){return t(e(n))}}},function(t,e,n){(function(t){var r=n(16),i=e&&!e.nodeType&&e,a=i&&"object"==typeof t&&t&&!t.nodeType&&t,o=a&&a.exports===i?r.Buffer:void 0,s=o?o.allocUnsafe:void 0;t.exports=function(t,e){if(e)return t.slice();var n=t.length,r=s?s(n):new t.constructor(n);return t.copy(r),r}}).call(this,n(7)(t))},function(t,e){t.exports=function(t,e){var n=-1,r=t.length;for(e||(e=Array(r));++nl))return!1;var f=c.get(t);if(f&&c.get(e))return f==e;var d=-1,p=!0,g=2&n?new r:void 0;for(c.set(t,e),c.set(e,t);++d0&&(a=c.removeMin(),(o=s[a]).distance!==Number.POSITIVE_INFINITY);)r(a).forEach(u);return s}(t,String(e),n||a,r||function(e){return t.outEdges(e)})};var a=r.constant(1)},function(t,e,n){var r=n(10);function i(){this._arr=[],this._keyIndices={}}t.exports=i,i.prototype.size=function(){return this._arr.length},i.prototype.keys=function(){return this._arr.map((function(t){return t.key}))},i.prototype.has=function(t){return r.has(this._keyIndices,t)},i.prototype.priority=function(t){var e=this._keyIndices[t];if(void 0!==e)return this._arr[e].priority},i.prototype.min=function(){if(0===this.size())throw new Error("Queue underflow");return this._arr[0].key},i.prototype.add=function(t,e){var n=this._keyIndices;if(t=String(t),!r.has(n,t)){var i=this._arr,a=i.length;return n[t]=a,i.push({key:t,priority:e}),this._decrease(a),!0}return!1},i.prototype.removeMin=function(){this._swap(0,this._arr.length-1);var t=this._arr.pop();return delete this._keyIndices[t.key],this._heapify(0),t.key},i.prototype.decrease=function(t,e){var n=this._keyIndices[t];if(e>this._arr[n].priority)throw new Error("New priority is greater than current priority. Key: "+t+" Old: "+this._arr[n].priority+" New: "+e);this._arr[n].priority=e,this._decrease(n)},i.prototype._heapify=function(t){var e=this._arr,n=2*t,r=n+1,i=t;n>1].priority2?e[2]:void 0;for(u&&a(e[0],e[1],u)&&(r=1);++n1&&o.sort((function(t,e){var r=t.x-n.x,i=t.y-n.y,a=Math.sqrt(r*r+i*i),o=e.x-n.x,s=e.y-n.y,c=Math.sqrt(o*o+s*s);return aMath.abs(o)*u?(s<0&&(u=-u),n=0===s?0:u*o/s,r=u):(o<0&&(c=-c),n=c,r=0===o?0:c*s/o);return{x:i+n,y:a+r}}},function(t,e,n){t.exports=function t(e){"use strict";var n=/^\0+/g,r=/[\0\r\f]/g,i=/: */g,a=/zoo|gra/,o=/([,: ])(transform)/g,s=/,+\s*(?![^(]*[)])/g,c=/ +\s*(?![^(]*[)])/g,u=/ *[\0] */g,l=/,\r+?/g,h=/([\t\r\n ])*\f?&/g,f=/:global\(((?:[^\(\)\[\]]*|\[.*\]|\([^\(\)]*\))*)\)/g,d=/\W+/g,p=/@(k\w+)\s*(\S*)\s*/,g=/::(place)/g,y=/:(read-only)/g,v=/\s+(?=[{\];=:>])/g,m=/([[}=:>])\s+/g,b=/(\{[^{]+?);(?=\})/g,x=/\s{2,}/g,_=/([^\(])(:+) */g,k=/[svh]\w+-[tblr]{2}/,w=/\(\s*(.*)\s*\)/g,E=/([\s\S]*?);/g,T=/-self|flex-/g,C=/[^]*?(:[rp][el]a[\w-]+)[^]*/,S=/stretch|:\s*\w+\-(?:conte|avail)/,A=/([^-])(image-set\()/,M="-webkit-",O="-moz-",D="-ms-",N=1,B=1,L=0,F=1,P=1,I=1,j=0,R=0,Y=0,z=[],U=[],$=0,W=null,V=0,H=1,G="",q="",X="";function Z(t,e,i,a,o){for(var s,c,l=0,h=0,f=0,d=0,v=0,m=0,b=0,x=0,k=0,E=0,T=0,C=0,S=0,A=0,O=0,D=0,j=0,U=0,W=0,Q=i.length,it=Q-1,at="",ot="",st="",ct="",ut="",lt="";O0&&(ot=ot.replace(r,"")),ot.trim().length>0)){switch(b){case 32:case 9:case 59:case 13:case 10:break;default:ot+=i.charAt(O)}b=59}if(1===j)switch(b){case 123:case 125:case 59:case 34:case 39:case 40:case 41:case 44:j=0;case 9:case 13:case 10:case 32:break;default:for(j=0,W=O,v=b,O--,b=59;W0&&(++O,b=v);case 123:W=Q}}switch(b){case 123:for(v=(ot=ot.trim()).charCodeAt(0),T=1,W=++O;O0&&(ot=ot.replace(r,"")),m=ot.charCodeAt(1)){case 100:case 109:case 115:case 45:s=e;break;default:s=z}if(W=(st=Z(e,s,st,m,o+1)).length,Y>0&&0===W&&(W=ot.length),$>0&&(c=nt(3,st,s=J(z,ot,U),e,B,N,W,m,o,a),ot=s.join(""),void 0!==c&&0===(W=(st=c.trim()).length)&&(m=0,st="")),W>0)switch(m){case 115:ot=ot.replace(w,et);case 100:case 109:case 45:st=ot+"{"+st+"}";break;case 107:st=(ot=ot.replace(p,"$1 $2"+(H>0?G:"")))+"{"+st+"}",st=1===P||2===P&&tt("@"+st,3)?"@"+M+st+"@"+st:"@"+st;break;default:st=ot+st,112===a&&(ct+=st,st="")}else st="";break;default:st=Z(e,J(e,ot,U),st,a,o+1)}ut+=st,C=0,j=0,A=0,D=0,U=0,S=0,ot="",st="",b=i.charCodeAt(++O);break;case 125:case 59:if((W=(ot=(D>0?ot.replace(r,""):ot).trim()).length)>1)switch(0===A&&(45===(v=ot.charCodeAt(0))||v>96&&v<123)&&(W=(ot=ot.replace(" ",":")).length),$>0&&void 0!==(c=nt(1,ot,e,t,B,N,ct.length,a,o,a))&&0===(W=(ot=c.trim()).length)&&(ot="\0\0"),v=ot.charCodeAt(0),m=ot.charCodeAt(1),v){case 0:break;case 64:if(105===m||99===m){lt+=ot+i.charAt(O);break}default:if(58===ot.charCodeAt(W-1))break;ct+=K(ot,v,m,ot.charCodeAt(2))}C=0,j=0,A=0,D=0,U=0,ot="",b=i.charCodeAt(++O)}}switch(b){case 13:case 10:if(h+d+f+l+R===0)switch(E){case 41:case 39:case 34:case 64:case 126:case 62:case 42:case 43:case 47:case 45:case 58:case 44:case 59:case 123:case 125:break;default:A>0&&(j=1)}47===h?h=0:F+C===0&&107!==a&&ot.length>0&&(D=1,ot+="\0"),$*V>0&&nt(0,ot,e,t,B,N,ct.length,a,o,a),N=1,B++;break;case 59:case 125:if(h+d+f+l===0){N++;break}default:switch(N++,at=i.charAt(O),b){case 9:case 32:if(d+l+h===0)switch(x){case 44:case 58:case 9:case 32:at="";break;default:32!==b&&(at=" ")}break;case 0:at="\\0";break;case 12:at="\\f";break;case 11:at="\\v";break;case 38:d+h+l===0&&F>0&&(U=1,D=1,at="\f"+at);break;case 108:if(d+h+l+L===0&&A>0)switch(O-A){case 2:112===x&&58===i.charCodeAt(O-3)&&(L=x);case 8:111===k&&(L=k)}break;case 58:d+h+l===0&&(A=O);break;case 44:h+f+d+l===0&&(D=1,at+="\r");break;case 34:case 39:0===h&&(d=d===b?0:0===d?b:d);break;case 91:d+h+f===0&&l++;break;case 93:d+h+f===0&&l--;break;case 41:d+h+l===0&&f--;break;case 40:if(d+h+l===0){if(0===C)switch(2*x+3*k){case 533:break;default:T=0,C=1}f++}break;case 64:h+f+d+l+A+S===0&&(S=1);break;case 42:case 47:if(d+l+f>0)break;switch(h){case 0:switch(2*b+3*i.charCodeAt(O+1)){case 235:h=47;break;case 220:W=O,h=42}break;case 42:47===b&&42===x&&W+2!==O&&(33===i.charCodeAt(W+2)&&(ct+=i.substring(W,O+1)),at="",h=0)}}if(0===h){if(F+d+l+S===0&&107!==a&&59!==b)switch(b){case 44:case 126:case 62:case 43:case 41:case 40:if(0===C){switch(x){case 9:case 32:case 10:case 13:at+="\0";break;default:at="\0"+at+(44===b?"":"\0")}D=1}else switch(b){case 40:A+7===O&&108===x&&(A=0),C=++T;break;case 41:0==(C=--T)&&(D=1,at+="\0")}break;case 9:case 32:switch(x){case 0:case 123:case 125:case 59:case 44:case 12:case 9:case 32:case 10:case 13:break;default:0===C&&(D=1,at+="\0")}}ot+=at,32!==b&&9!==b&&(E=b)}}k=x,x=b,O++}if(W=ct.length,Y>0&&0===W&&0===ut.length&&0===e[0].length==0&&(109!==a||1===e.length&&(F>0?q:X)===e[0])&&(W=e.join(",").length+2),W>0){if(s=0===F&&107!==a?function(t){for(var e,n,i=0,a=t.length,o=Array(a);i1)){if(f=c.charCodeAt(c.length-1),d=n.charCodeAt(0),e="",0!==l)switch(f){case 42:case 126:case 62:case 43:case 32:case 40:break;default:e=" "}switch(d){case 38:n=e+q;case 126:case 62:case 43:case 32:case 41:case 40:break;case 91:n=e+n+q;break;case 58:switch(2*n.charCodeAt(1)+3*n.charCodeAt(2)){case 530:if(I>0){n=e+n.substring(8,h-1);break}default:(l<1||s[l-1].length<1)&&(n=e+q+n)}break;case 44:e="";default:n=h>1&&n.indexOf(":")>0?e+n.replace(_,"$1"+q+"$2"):e+n+q}c+=n}o[i]=c.replace(r,"").trim()}return o}(e):e,$>0&&void 0!==(c=nt(2,ct,s,t,B,N,W,a,o,a))&&0===(ct=c).length)return lt+ct+ut;if(ct=s.join(",")+"{"+ct+"}",P*L!=0){switch(2!==P||tt(ct,2)||(L=0),L){case 111:ct=ct.replace(y,":-moz-$1")+ct;break;case 112:ct=ct.replace(g,"::-webkit-input-$1")+ct.replace(g,"::-moz-$1")+ct.replace(g,":-ms-input-$1")+ct}L=0}}return lt+ct+ut}function J(t,e,n){var r=e.trim().split(l),i=r,a=r.length,o=t.length;switch(o){case 0:case 1:for(var s=0,c=0===o?"":t[0]+" ";s0&&F>0)return i.replace(f,"$1").replace(h,"$1"+X);break;default:return t.trim()+i.replace(h,"$1"+t.trim())}default:if(n*F>0&&i.indexOf("\f")>0)return i.replace(h,(58===t.charCodeAt(0)?"":"$1")+t.trim())}return t+i}function K(t,e,n,r){var u,l=0,h=t+";",f=2*e+3*n+4*r;if(944===f)return function(t){var e=t.length,n=t.indexOf(":",9)+1,r=t.substring(0,n).trim(),i=t.substring(n,e-1).trim();switch(t.charCodeAt(9)*H){case 0:break;case 45:if(110!==t.charCodeAt(10))break;default:var a=i.split((i="",s)),o=0;for(n=0,e=a.length;o64&&h<90||h>96&&h<123||95===h||45===h&&45!==u.charCodeAt(1)))switch(isNaN(parseFloat(u))+(-1!==u.indexOf("("))){case 1:switch(u){case"infinite":case"alternate":case"backwards":case"running":case"normal":case"forwards":case"both":case"none":case"linear":case"ease":case"ease-in":case"ease-out":case"ease-in-out":case"paused":case"reverse":case"alternate-reverse":case"inherit":case"initial":case"unset":case"step-start":case"step-end":break;default:u+=G}}l[n++]=u}i+=(0===o?"":",")+l.join(" ")}}return i=r+i+";",1===P||2===P&&tt(i,1)?M+i+i:i}(h);if(0===P||2===P&&!tt(h,1))return h;switch(f){case 1015:return 97===h.charCodeAt(10)?M+h+h:h;case 951:return 116===h.charCodeAt(3)?M+h+h:h;case 963:return 110===h.charCodeAt(5)?M+h+h:h;case 1009:if(100!==h.charCodeAt(4))break;case 969:case 942:return M+h+h;case 978:return M+h+O+h+h;case 1019:case 983:return M+h+O+h+D+h+h;case 883:return 45===h.charCodeAt(8)?M+h+h:h.indexOf("image-set(",11)>0?h.replace(A,"$1-webkit-$2")+h:h;case 932:if(45===h.charCodeAt(4))switch(h.charCodeAt(5)){case 103:return M+"box-"+h.replace("-grow","")+M+h+D+h.replace("grow","positive")+h;case 115:return M+h+D+h.replace("shrink","negative")+h;case 98:return M+h+D+h.replace("basis","preferred-size")+h}return M+h+D+h+h;case 964:return M+h+D+"flex-"+h+h;case 1023:if(99!==h.charCodeAt(8))break;return u=h.substring(h.indexOf(":",15)).replace("flex-","").replace("space-between","justify"),M+"box-pack"+u+M+h+D+"flex-pack"+u+h;case 1005:return a.test(h)?h.replace(i,":"+M)+h.replace(i,":"+O)+h:h;case 1e3:switch(l=(u=h.substring(13).trim()).indexOf("-")+1,u.charCodeAt(0)+u.charCodeAt(l)){case 226:u=h.replace(k,"tb");break;case 232:u=h.replace(k,"tb-rl");break;case 220:u=h.replace(k,"lr");break;default:return h}return M+h+D+u+h;case 1017:if(-1===h.indexOf("sticky",9))return h;case 975:switch(l=(h=t).length-10,f=(u=(33===h.charCodeAt(l)?h.substring(0,l):h).substring(t.indexOf(":",7)+1).trim()).charCodeAt(0)+(0|u.charCodeAt(7))){case 203:if(u.charCodeAt(8)<111)break;case 115:h=h.replace(u,M+u)+";"+h;break;case 207:case 102:h=h.replace(u,M+(f>102?"inline-":"")+"box")+";"+h.replace(u,M+u)+";"+h.replace(u,D+u+"box")+";"+h}return h+";";case 938:if(45===h.charCodeAt(5))switch(h.charCodeAt(6)){case 105:return u=h.replace("-items",""),M+h+M+"box-"+u+D+"flex-"+u+h;case 115:return M+h+D+"flex-item-"+h.replace(T,"")+h;default:return M+h+D+"flex-line-pack"+h.replace("align-content","").replace(T,"")+h}break;case 973:case 989:if(45!==h.charCodeAt(3)||122===h.charCodeAt(4))break;case 931:case 953:if(!0===S.test(t))return 115===(u=t.substring(t.indexOf(":")+1)).charCodeAt(0)?K(t.replace("stretch","fill-available"),e,n,r).replace(":fill-available",":stretch"):h.replace(u,M+u)+h.replace(u,O+u.replace("fill-",""))+h;break;case 962:if(h=M+h+(102===h.charCodeAt(5)?D+h:"")+h,n+r===211&&105===h.charCodeAt(13)&&h.indexOf("transform",10)>0)return h.substring(0,h.indexOf(";",27)+1).replace(o,"$1-webkit-$2")+h}return h}function tt(t,e){var n=t.indexOf(1===e?":":"{"),r=t.substring(0,3!==e?n:10),i=t.substring(n+1,t.length-1);return W(2!==e?r:r.replace(C,"$1"),i,e)}function et(t,e){var n=K(e,e.charCodeAt(0),e.charCodeAt(1),e.charCodeAt(2));return n!==e+";"?n.replace(E," or ($1)").substring(4):"("+e+")"}function nt(t,e,n,r,i,a,o,s,c,u){for(var l,h=0,f=e;h<$;++h)switch(l=U[h].call(at,t,f,n,r,i,a,o,s,c,u)){case void 0:case!1:case!0:case null:break;default:f=l}if(f!==e)return f}function rt(t,e,n,r){for(var i=e+1;i0&&(G=i.replace(d,91===a?"":"-")),a=1,1===F?X=i:q=i;var o,s=[X];$>0&&void 0!==(o=nt(-1,n,s,s,B,N,0,0,0,0))&&"string"==typeof o&&(n=o);var c=Z(z,s,n,0,0);return $>0&&void 0!==(o=nt(-2,c,s,s,B,N,c.length,0,0,0))&&"string"!=typeof(c=o)&&(a=0),G="",X="",q="",L=0,B=1,N=1,j*a==0?c:function(t){return t.replace(r,"").replace(v,"").replace(m,"$1").replace(b,"$1").replace(x," ")}(c)}return at.use=function t(e){switch(e){case void 0:case null:$=U.length=0;break;default:if("function"==typeof e)U[$++]=e;else if("object"==typeof e)for(var n=0,r=e.length;n=255?255:t<0?0:t},g:function(t){return t>=255?255:t<0?0:t},b:function(t){return t>=255?255:t<0?0:t},h:function(t){return t%360},s:function(t){return t>=100?100:t<0?0:t},l:function(t){return t>=100?100:t<0?0:t},a:function(t){return t>=1?1:t<0?0:t}},toLinear:function(t){var e=t/255;return t>.03928?Math.pow((e+.055)/1.055,2.4):e/12.92},hue2rgb:function(t,e,n){return n<0&&(n+=1),n>1&&(n-=1),n<1/6?t+6*(e-t)*n:n<.5?e:n<2/3?t+(e-t)*(2/3-n)*6:t},hsl2rgb:function(t,e){var n=t.h,i=t.s,a=t.l;if(100===i)return 2.55*a;n/=360,i/=100;var o=(a/=100)<.5?a*(1+i):a+i-a*i,s=2*a-o;switch(e){case"r":return 255*r.hue2rgb(s,o,n+1/3);case"g":return 255*r.hue2rgb(s,o,n);case"b":return 255*r.hue2rgb(s,o,n-1/3)}},rgb2hsl:function(t,e){var n=t.r,r=t.g,i=t.b;n/=255,r/=255,i/=255;var a=Math.max(n,r,i),o=Math.min(n,r,i),s=(a+o)/2;if("l"===e)return 100*s;if(a===o)return 0;var c=a-o;if("s"===e)return 100*(s>.5?c/(2-a-o):c/(a+o));switch(a){case n:return 60*((r-i)/c+(r1?e:"0"+e},dec2hex:function(t){var e=Math.round(t).toString(16);return e.length>1?e:"0"+e}};e.default=r},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(9),i=n(75),a=n(177),o=function(){function t(t,e){this.color=e,this.changed=!1,this.data=t,this.type=new a.default}return t.prototype.set=function(t,e){return this.color=e,this.changed=!1,this.data=t,this.type.type=i.TYPE.ALL,this},t.prototype._ensureHSL=function(){void 0===this.data.h&&(this.data.h=r.default.channel.rgb2hsl(this.data,"h")),void 0===this.data.s&&(this.data.s=r.default.channel.rgb2hsl(this.data,"s")),void 0===this.data.l&&(this.data.l=r.default.channel.rgb2hsl(this.data,"l"))},t.prototype._ensureRGB=function(){void 0===this.data.r&&(this.data.r=r.default.channel.hsl2rgb(this.data,"r")),void 0===this.data.g&&(this.data.g=r.default.channel.hsl2rgb(this.data,"g")),void 0===this.data.b&&(this.data.b=r.default.channel.hsl2rgb(this.data,"b"))},Object.defineProperty(t.prototype,"r",{get:function(){return this.type.is(i.TYPE.HSL)||void 0===this.data.r?(this._ensureHSL(),r.default.channel.hsl2rgb(this.data,"r")):this.data.r},set:function(t){this.type.set(i.TYPE.RGB),this.changed=!0,this.data.r=t},enumerable:!0,configurable:!0}),Object.defineProperty(t.prototype,"g",{get:function(){return this.type.is(i.TYPE.HSL)||void 0===this.data.g?(this._ensureHSL(),r.default.channel.hsl2rgb(this.data,"g")):this.data.g},set:function(t){this.type.set(i.TYPE.RGB),this.changed=!0,this.data.g=t},enumerable:!0,configurable:!0}),Object.defineProperty(t.prototype,"b",{get:function(){return this.type.is(i.TYPE.HSL)||void 0===this.data.b?(this._ensureHSL(),r.default.channel.hsl2rgb(this.data,"b")):this.data.b},set:function(t){this.type.set(i.TYPE.RGB),this.changed=!0,this.data.b=t},enumerable:!0,configurable:!0}),Object.defineProperty(t.prototype,"h",{get:function(){return this.type.is(i.TYPE.RGB)||void 0===this.data.h?(this._ensureRGB(),r.default.channel.rgb2hsl(this.data,"h")):this.data.h},set:function(t){this.type.set(i.TYPE.HSL),this.changed=!0,this.data.h=t},enumerable:!0,configurable:!0}),Object.defineProperty(t.prototype,"s",{get:function(){return this.type.is(i.TYPE.RGB)||void 0===this.data.s?(this._ensureRGB(),r.default.channel.rgb2hsl(this.data,"s")):this.data.s},set:function(t){this.type.set(i.TYPE.HSL),this.changed=!0,this.data.s=t},enumerable:!0,configurable:!0}),Object.defineProperty(t.prototype,"l",{get:function(){return this.type.is(i.TYPE.RGB)||void 0===this.data.l?(this._ensureRGB(),r.default.channel.rgb2hsl(this.data,"l")):this.data.l},set:function(t){this.type.set(i.TYPE.HSL),this.changed=!0,this.data.l=t},enumerable:!0,configurable:!0}),Object.defineProperty(t.prototype,"a",{get:function(){return this.data.a},set:function(t){this.changed=!0,this.data.a=t},enumerable:!0,configurable:!0}),t}();e.default=o},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(75),i=function(){function t(){this.type=r.TYPE.ALL}return t.prototype.get=function(){return this.type},t.prototype.set=function(t){if(this.type&&this.type!==t)throw new Error("Cannot change both RGB and HSL channels at the same time");this.type=t},t.prototype.reset=function(){this.type=r.TYPE.ALL},t.prototype.is=function(t){return this.type===t},t}();e.default=i},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(9),i={};e.DEC2HEX=i;for(var a=0;a<=255;a++)i[a]=r.default.unit.dec2hex(a)},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(99),i={colors:{aliceblue:"#f0f8ff",antiquewhite:"#faebd7",aqua:"#00ffff",aquamarine:"#7fffd4",azure:"#f0ffff",beige:"#f5f5dc",bisque:"#ffe4c4",black:"#000000",blanchedalmond:"#ffebcd",blue:"#0000ff",blueviolet:"#8a2be2",brown:"#a52a2a",burlywood:"#deb887",cadetblue:"#5f9ea0",chartreuse:"#7fff00",chocolate:"#d2691e",coral:"#ff7f50",cornflowerblue:"#6495ed",cornsilk:"#fff8dc",crimson:"#dc143c",cyanaqua:"#00ffff",darkblue:"#00008b",darkcyan:"#008b8b",darkgoldenrod:"#b8860b",darkgray:"#a9a9a9",darkgreen:"#006400",darkgrey:"#a9a9a9",darkkhaki:"#bdb76b",darkmagenta:"#8b008b",darkolivegreen:"#556b2f",darkorange:"#ff8c00",darkorchid:"#9932cc",darkred:"#8b0000",darksalmon:"#e9967a",darkseagreen:"#8fbc8f",darkslateblue:"#483d8b",darkslategray:"#2f4f4f",darkslategrey:"#2f4f4f",darkturquoise:"#00ced1",darkviolet:"#9400d3",deeppink:"#ff1493",deepskyblue:"#00bfff",dimgray:"#696969",dimgrey:"#696969",dodgerblue:"#1e90ff",firebrick:"#b22222",floralwhite:"#fffaf0",forestgreen:"#228b22",fuchsia:"#ff00ff",gainsboro:"#dcdcdc",ghostwhite:"#f8f8ff",gold:"#ffd700",goldenrod:"#daa520",gray:"#808080",green:"#008000",greenyellow:"#adff2f",grey:"#808080",honeydew:"#f0fff0",hotpink:"#ff69b4",indianred:"#cd5c5c",indigo:"#4b0082",ivory:"#fffff0",khaki:"#f0e68c",lavender:"#e6e6fa",lavenderblush:"#fff0f5",lawngreen:"#7cfc00",lemonchiffon:"#fffacd",lightblue:"#add8e6",lightcoral:"#f08080",lightcyan:"#e0ffff",lightgoldenrodyellow:"#fafad2",lightgray:"#d3d3d3",lightgreen:"#90ee90",lightgrey:"#d3d3d3",lightpink:"#ffb6c1",lightsalmon:"#ffa07a",lightseagreen:"#20b2aa",lightskyblue:"#87cefa",lightslategray:"#778899",lightslategrey:"#778899",lightsteelblue:"#b0c4de",lightyellow:"#ffffe0",lime:"#00ff00",limegreen:"#32cd32",linen:"#faf0e6",magenta:"#ff00ff",maroon:"#800000",mediumaquamarine:"#66cdaa",mediumblue:"#0000cd",mediumorchid:"#ba55d3",mediumpurple:"#9370db",mediumseagreen:"#3cb371",mediumslateblue:"#7b68ee",mediumspringgreen:"#00fa9a",mediumturquoise:"#48d1cc",mediumvioletred:"#c71585",midnightblue:"#191970",mintcream:"#f5fffa",mistyrose:"#ffe4e1",moccasin:"#ffe4b5",navajowhite:"#ffdead",navy:"#000080",oldlace:"#fdf5e6",olive:"#808000",olivedrab:"#6b8e23",orange:"#ffa500",orangered:"#ff4500",orchid:"#da70d6",palegoldenrod:"#eee8aa",palegreen:"#98fb98",paleturquoise:"#afeeee",palevioletred:"#db7093",papayawhip:"#ffefd5",peachpuff:"#ffdab9",peru:"#cd853f",pink:"#ffc0cb",plum:"#dda0dd",powderblue:"#b0e0e6",purple:"#800080",rebeccapurple:"#663399",red:"#ff0000",rosybrown:"#bc8f8f",royalblue:"#4169e1",saddlebrown:"#8b4513",salmon:"#fa8072",sandybrown:"#f4a460",seagreen:"#2e8b57",seashell:"#fff5ee",sienna:"#a0522d",silver:"#c0c0c0",skyblue:"#87ceeb",slateblue:"#6a5acd",slategray:"#708090",slategrey:"#708090",snow:"#fffafa",springgreen:"#00ff7f",tan:"#d2b48c",teal:"#008080",thistle:"#d8bfd8",transparent:"#00000000",turquoise:"#40e0d0",violet:"#ee82ee",wheat:"#f5deb3",white:"#ffffff",whitesmoke:"#f5f5f5",yellow:"#ffff00",yellowgreen:"#9acd32"},parse:function(t){t=t.toLowerCase();var e=i.colors[t];if(e)return r.default.parse(e)},stringify:function(t){var e=r.default.stringify(t);for(var n in i.colors)if(i.colors[n]===e)return n}};e.default=i},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(9),i=n(45),a={re:/^rgba?\(\s*?(-?(?:\d+(?:\.\d+)?|(?:\.\d+))(?:e\d+)?(%?))\s*?(?:,|\s)\s*?(-?(?:\d+(?:\.\d+)?|(?:\.\d+))(?:e\d+)?(%?))\s*?(?:,|\s)\s*?(-?(?:\d+(?:\.\d+)?|(?:\.\d+))(?:e\d+)?(%?))(?:\s*?(?:,|\/)\s*?\+?(-?(?:\d+(?:\.\d+)?|(?:\.\d+))(?:e\d+)?(%?)))?\s*?\)$/i,parse:function(t){var e=t.charCodeAt(0);if(114===e||82===e){var n=t.match(a.re);if(n){var o=n[1],s=n[2],c=n[3],u=n[4],l=n[5],h=n[6],f=n[7],d=n[8];return i.default.set({r:r.default.channel.clamp.r(s?2.55*parseFloat(o):parseFloat(o)),g:r.default.channel.clamp.g(u?2.55*parseFloat(c):parseFloat(c)),b:r.default.channel.clamp.b(h?2.55*parseFloat(l):parseFloat(l)),a:f?r.default.channel.clamp.a(d?parseFloat(f)/100:parseFloat(f)):1},t)}}},stringify:function(t){return t.a<1?"rgba("+r.default.lang.round(t.r)+", "+r.default.lang.round(t.g)+", "+r.default.lang.round(t.b)+", "+r.default.lang.round(t.a)+")":"rgb("+r.default.lang.round(t.r)+", "+r.default.lang.round(t.g)+", "+r.default.lang.round(t.b)+")"}};e.default=a},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(9),i=n(45),a={re:/^hsla?\(\s*?(-?(?:\d+(?:\.\d+)?|(?:\.\d+))(?:e-?\d+)?(?:deg|grad|rad|turn)?)\s*?(?:,|\s)\s*?(-?(?:\d+(?:\.\d+)?|(?:\.\d+))(?:e-?\d+)?%)\s*?(?:,|\s)\s*?(-?(?:\d+(?:\.\d+)?|(?:\.\d+))(?:e-?\d+)?%)(?:\s*?(?:,|\/)\s*?\+?(-?(?:\d+(?:\.\d+)?|(?:\.\d+))(?:e-?\d+)?(%)?))?\s*?\)$/i,hueRe:/^(.+?)(deg|grad|rad|turn)$/i,_hue2deg:function(t){var e=t.match(a.hueRe);if(e){var n=e[1];switch(e[2]){case"grad":return r.default.channel.clamp.h(.9*parseFloat(n));case"rad":return r.default.channel.clamp.h(180*parseFloat(n)/Math.PI);case"turn":return r.default.channel.clamp.h(360*parseFloat(n))}}return r.default.channel.clamp.h(parseFloat(t))},parse:function(t){var e=t.charCodeAt(0);if(104===e||72===e){var n=t.match(a.re);if(n){var o=n[1],s=n[2],c=n[3],u=n[4],l=n[5];return i.default.set({h:a._hue2deg(o),s:r.default.channel.clamp.s(parseFloat(s)),l:r.default.channel.clamp.l(parseFloat(c)),a:u?r.default.channel.clamp.a(l?parseFloat(u)/100:parseFloat(u)):1},t)}}},stringify:function(t){return t.a<1?"hsla("+r.default.lang.round(t.h)+", "+r.default.lang.round(t.s)+"%, "+r.default.lang.round(t.l)+"%, "+t.a+")":"hsl("+r.default.lang.round(t.h)+", "+r.default.lang.round(t.s)+"%, "+r.default.lang.round(t.l)+"%)"}};e.default=a},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(29);e.default=function(t){return r.default(t,"r")}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(29);e.default=function(t){return r.default(t,"g")}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(29);e.default=function(t){return r.default(t,"b")}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(29);e.default=function(t){return r.default(t,"h")}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(29);e.default=function(t){return r.default(t,"s")}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(29);e.default=function(t){return r.default(t,"l")}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(103);e.default=function(t){return!r.default(t)}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(15);e.default=function(t){try{return r.default.parse(t),!0}catch(t){return!1}}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(32);e.default=function(t,e){return r.default(t,"s",e)}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(32);e.default=function(t,e){return r.default(t,"s",-e)}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(32);e.default=function(t,e){return r.default(t,"l",e)}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(32);e.default=function(t,e){return r.default(t,"l",-e)}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(32);e.default=function(t){return r.default(t,"h",180)}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(52);e.default=function(t){return r.default(t,{s:0})}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(15),i=n(107);e.default=function(t,e){void 0===e&&(e=100);var n=r.default.parse(t);return n.r=255-n.r,n.g=255-n.g,n.b=255-n.b,i.default(n,t,e)}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(9),i=n(15),a=n(106);e.default=function(t,e){var n,o,s,c=i.default.parse(t),u={};for(var l in e)u[l]=(n=c[l],o=e[l],s=r.default.channel.max[l],o>0?(s-n)*o/100:n*o/100);return a.default(t,u)}},function(t,e,n){t.exports={Graph:n(76),version:n(300)}},function(t,e,n){var r=n(108);t.exports=function(t){return r(t,4)}},function(t,e){t.exports=function(){this.__data__=[],this.size=0}},function(t,e,n){var r=n(55),i=Array.prototype.splice;t.exports=function(t){var e=this.__data__,n=r(e,t);return!(n<0)&&(n==e.length-1?e.pop():i.call(e,n,1),--this.size,!0)}},function(t,e,n){var r=n(55);t.exports=function(t){var e=this.__data__,n=r(e,t);return n<0?void 0:e[n][1]}},function(t,e,n){var r=n(55);t.exports=function(t){return r(this.__data__,t)>-1}},function(t,e,n){var r=n(55);t.exports=function(t,e){var n=this.__data__,i=r(n,t);return i<0?(++this.size,n.push([t,e])):n[i][1]=e,this}},function(t,e,n){var r=n(54);t.exports=function(){this.__data__=new r,this.size=0}},function(t,e){t.exports=function(t){var e=this.__data__,n=e.delete(t);return this.size=e.size,n}},function(t,e){t.exports=function(t){return this.__data__.get(t)}},function(t,e){t.exports=function(t){return this.__data__.has(t)}},function(t,e,n){var r=n(54),i=n(77),a=n(78);t.exports=function(t,e){var n=this.__data__;if(n instanceof r){var o=n.__data__;if(!i||o.length<199)return o.push([t,e]),this.size=++n.size,this;n=this.__data__=new a(o)}return n.set(t,e),this.size=n.size,this}},function(t,e,n){var r=n(37),i=n(214),a=n(11),o=n(110),s=/^\[object .+?Constructor\]$/,c=Function.prototype,u=Object.prototype,l=c.toString,h=u.hasOwnProperty,f=RegExp("^"+l.call(h).replace(/[\\^$.*+?()[\]{}|]/g,"\\$&").replace(/hasOwnProperty|(function).*?(?=\\\()| for .+?(?=\\\])/g,"$1.*?")+"$");t.exports=function(t){return!(!a(t)||i(t))&&(r(t)?f:s).test(o(t))}},function(t,e){var n;n=function(){return this}();try{n=n||new Function("return this")()}catch(t){"object"==typeof window&&(n=window)}t.exports=n},function(t,e,n){var r=n(38),i=Object.prototype,a=i.hasOwnProperty,o=i.toString,s=r?r.toStringTag:void 0;t.exports=function(t){var e=a.call(t,s),n=t[s];try{t[s]=void 0;var r=!0}catch(t){}var i=o.call(t);return r&&(e?t[s]=n:delete t[s]),i}},function(t,e){var n=Object.prototype.toString;t.exports=function(t){return n.call(t)}},function(t,e,n){var r,i=n(215),a=(r=/[^.]+$/.exec(i&&i.keys&&i.keys.IE_PROTO||""))?"Symbol(src)_1."+r:"";t.exports=function(t){return!!a&&a in t}},function(t,e,n){var r=n(16)["__core-js_shared__"];t.exports=r},function(t,e){t.exports=function(t,e){return null==t?void 0:t[e]}},function(t,e,n){var r=n(218),i=n(54),a=n(77);t.exports=function(){this.size=0,this.__data__={hash:new r,map:new(a||i),string:new r}}},function(t,e,n){var r=n(219),i=n(220),a=n(221),o=n(222),s=n(223);function c(t){var e=-1,n=null==t?0:t.length;for(this.clear();++e0){if(++e>=800)return arguments[0]}else e=0;return t.apply(void 0,arguments)}}},function(t,e,n){var r=n(131),i=n(292),a=n(296),o=n(132),s=n(297),c=n(90);t.exports=function(t,e,n){var u=-1,l=i,h=t.length,f=!0,d=[],p=d;if(n)f=!1,l=a;else if(h>=200){var g=e?null:s(t);if(g)return c(g);f=!1,l=o,p=new r}else p=e?[]:d;t:for(;++u-1}},function(t,e,n){var r=n(145),i=n(294),a=n(295);t.exports=function(t,e,n){return e==e?a(t,e,n):r(t,i,n)}},function(t,e){t.exports=function(t){return t!=t}},function(t,e){t.exports=function(t,e,n){for(var r=n-1,i=t.length;++r1||1===e.length&&t.hasEdge(e[0],e[0])}))}},function(t,e,n){var r=n(10);t.exports=function(t,e,n){return function(t,e,n){var r={},i=t.nodes();return i.forEach((function(t){r[t]={},r[t][t]={distance:0},i.forEach((function(e){t!==e&&(r[t][e]={distance:Number.POSITIVE_INFINITY})})),n(t).forEach((function(n){var i=n.v===t?n.w:n.v,a=e(n);r[t][i]={distance:a,predecessor:t}}))})),i.forEach((function(t){var e=r[t];i.forEach((function(n){var a=r[n];i.forEach((function(n){var r=a[t],i=e[n],o=a[n],s=r.distance+i.distance;s0;){if(n=c.removeMin(),r.has(s,n))o.setEdge(n,s[n]);else{if(l)throw new Error("Input graph is not connected: "+t);l=!0}t.nodeEdges(n).forEach(u)}return o}},function(t,e,n){var r;try{r=n(3)}catch(t){}r||(r=window.graphlib),t.exports=r},function(t,e,n){"use strict";var r=n(4),i=n(345),a=n(348),o=n(349),s=n(8).normalizeRanks,c=n(351),u=n(8).removeEmptyRanks,l=n(352),h=n(353),f=n(354),d=n(355),p=n(364),g=n(8),y=n(17).Graph;t.exports=function(t,e){var n=e&&e.debugTiming?g.time:g.notime;n("layout",(function(){var e=n(" buildLayoutGraph",(function(){return function(t){var e=new y({multigraph:!0,compound:!0}),n=C(t.graph());return e.setGraph(r.merge({},m,T(n,v),r.pick(n,b))),r.forEach(t.nodes(),(function(n){var i=C(t.node(n));e.setNode(n,r.defaults(T(i,x),_)),e.setParent(n,t.parent(n))})),r.forEach(t.edges(),(function(n){var i=C(t.edge(n));e.setEdge(n,r.merge({},w,T(i,k),r.pick(i,E)))})),e}(t)}));n(" runLayout",(function(){!function(t,e){e(" makeSpaceForEdgeLabels",(function(){!function(t){var e=t.graph();e.ranksep/=2,r.forEach(t.edges(),(function(n){var r=t.edge(n);r.minlen*=2,"c"!==r.labelpos.toLowerCase()&&("TB"===e.rankdir||"BT"===e.rankdir?r.width+=r.labeloffset:r.height+=r.labeloffset)}))}(t)})),e(" removeSelfEdges",(function(){!function(t){r.forEach(t.edges(),(function(e){if(e.v===e.w){var n=t.node(e.v);n.selfEdges||(n.selfEdges=[]),n.selfEdges.push({e:e,label:t.edge(e)}),t.removeEdge(e)}}))}(t)})),e(" acyclic",(function(){i.run(t)})),e(" nestingGraph.run",(function(){l.run(t)})),e(" rank",(function(){o(g.asNonCompoundGraph(t))})),e(" injectEdgeLabelProxies",(function(){!function(t){r.forEach(t.edges(),(function(e){var n=t.edge(e);if(n.width&&n.height){var r=t.node(e.v),i={rank:(t.node(e.w).rank-r.rank)/2+r.rank,e:e};g.addDummyNode(t,"edge-proxy",i,"_ep")}}))}(t)})),e(" removeEmptyRanks",(function(){u(t)})),e(" nestingGraph.cleanup",(function(){l.cleanup(t)})),e(" normalizeRanks",(function(){s(t)})),e(" assignRankMinMax",(function(){!function(t){var e=0;r.forEach(t.nodes(),(function(n){var i=t.node(n);i.borderTop&&(i.minRank=t.node(i.borderTop).rank,i.maxRank=t.node(i.borderBottom).rank,e=r.max(e,i.maxRank))})),t.graph().maxRank=e}(t)})),e(" removeEdgeLabelProxies",(function(){!function(t){r.forEach(t.nodes(),(function(e){var n=t.node(e);"edge-proxy"===n.dummy&&(t.edge(n.e).labelRank=n.rank,t.removeNode(e))}))}(t)})),e(" normalize.run",(function(){a.run(t)})),e(" parentDummyChains",(function(){c(t)})),e(" addBorderSegments",(function(){h(t)})),e(" order",(function(){d(t)})),e(" insertSelfEdges",(function(){!function(t){var e=g.buildLayerMatrix(t);r.forEach(e,(function(e){var n=0;r.forEach(e,(function(e,i){var a=t.node(e);a.order=i+n,r.forEach(a.selfEdges,(function(e){g.addDummyNode(t,"selfedge",{width:e.label.width,height:e.label.height,rank:a.rank,order:i+ ++n,e:e.e,label:e.label},"_se")})),delete a.selfEdges}))}))}(t)})),e(" adjustCoordinateSystem",(function(){f.adjust(t)})),e(" position",(function(){p(t)})),e(" positionSelfEdges",(function(){!function(t){r.forEach(t.nodes(),(function(e){var n=t.node(e);if("selfedge"===n.dummy){var r=t.node(n.e.v),i=r.x+r.width/2,a=r.y,o=n.x-i,s=r.height/2;t.setEdge(n.e,n.label),t.removeNode(e),n.label.points=[{x:i+2*o/3,y:a-s},{x:i+5*o/6,y:a-s},{x:i+o,y:a},{x:i+5*o/6,y:a+s},{x:i+2*o/3,y:a+s}],n.label.x=n.x,n.label.y=n.y}}))}(t)})),e(" removeBorderNodes",(function(){!function(t){r.forEach(t.nodes(),(function(e){if(t.children(e).length){var n=t.node(e),i=t.node(n.borderTop),a=t.node(n.borderBottom),o=t.node(r.last(n.borderLeft)),s=t.node(r.last(n.borderRight));n.width=Math.abs(s.x-o.x),n.height=Math.abs(a.y-i.y),n.x=o.x+n.width/2,n.y=i.y+n.height/2}})),r.forEach(t.nodes(),(function(e){"border"===t.node(e).dummy&&t.removeNode(e)}))}(t)})),e(" normalize.undo",(function(){a.undo(t)})),e(" fixupEdgeLabelCoords",(function(){!function(t){r.forEach(t.edges(),(function(e){var n=t.edge(e);if(r.has(n,"x"))switch("l"!==n.labelpos&&"r"!==n.labelpos||(n.width-=n.labeloffset),n.labelpos){case"l":n.x-=n.width/2+n.labeloffset;break;case"r":n.x+=n.width/2+n.labeloffset}}))}(t)})),e(" undoCoordinateSystem",(function(){f.undo(t)})),e(" translateGraph",(function(){!function(t){var e=Number.POSITIVE_INFINITY,n=0,i=Number.POSITIVE_INFINITY,a=0,o=t.graph(),s=o.marginx||0,c=o.marginy||0;function u(t){var r=t.x,o=t.y,s=t.width,c=t.height;e=Math.min(e,r-s/2),n=Math.max(n,r+s/2),i=Math.min(i,o-c/2),a=Math.max(a,o+c/2)}r.forEach(t.nodes(),(function(e){u(t.node(e))})),r.forEach(t.edges(),(function(e){var n=t.edge(e);r.has(n,"x")&&u(n)})),e-=s,i-=c,r.forEach(t.nodes(),(function(n){var r=t.node(n);r.x-=e,r.y-=i})),r.forEach(t.edges(),(function(n){var a=t.edge(n);r.forEach(a.points,(function(t){t.x-=e,t.y-=i})),r.has(a,"x")&&(a.x-=e),r.has(a,"y")&&(a.y-=i)})),o.width=n-e+s,o.height=a-i+c}(t)})),e(" assignNodeIntersects",(function(){!function(t){r.forEach(t.edges(),(function(e){var n,r,i=t.edge(e),a=t.node(e.v),o=t.node(e.w);i.points?(n=i.points[0],r=i.points[i.points.length-1]):(i.points=[],n=o,r=a),i.points.unshift(g.intersectRect(a,n)),i.points.push(g.intersectRect(o,r))}))}(t)})),e(" reversePoints",(function(){!function(t){r.forEach(t.edges(),(function(e){var n=t.edge(e);n.reversed&&n.points.reverse()}))}(t)})),e(" acyclic.undo",(function(){i.undo(t)}))}(e,n)})),n(" updateInputGraph",(function(){!function(t,e){r.forEach(t.nodes(),(function(n){var r=t.node(n),i=e.node(n);r&&(r.x=i.x,r.y=i.y,e.children(n).length&&(r.width=i.width,r.height=i.height))})),r.forEach(t.edges(),(function(n){var i=t.edge(n),a=e.edge(n);i.points=a.points,r.has(a,"x")&&(i.x=a.x,i.y=a.y)})),t.graph().width=e.graph().width,t.graph().height=e.graph().height}(t,e)}))}))};var v=["nodesep","edgesep","ranksep","marginx","marginy"],m={ranksep:50,edgesep:20,nodesep:50,rankdir:"tb"},b=["acyclicer","ranker","rankdir","align"],x=["width","height"],_={width:0,height:0},k=["minlen","weight","width","height","labeloffset"],w={minlen:1,weight:1,width:0,height:0,labeloffset:10,labelpos:"r"},E=["labelpos"];function T(t,e){return r.mapValues(r.pick(t,e),Number)}function C(t){var e={};return r.forEach(t,(function(t,n){e[n.toLowerCase()]=t})),e}},function(t,e,n){var r=n(108);t.exports=function(t){return r(t,5)}},function(t,e,n){var r=n(315)(n(316));t.exports=r},function(t,e,n){var r=n(25),i=n(24),a=n(30);t.exports=function(t){return function(e,n,o){var s=Object(e);if(!i(e)){var c=r(n,3);e=a(e),n=function(t){return c(s[t],t,s)}}var u=t(e,n,o);return u>-1?s[c?e[u]:u]:void 0}}},function(t,e,n){var r=n(145),i=n(25),a=n(317),o=Math.max;t.exports=function(t,e,n){var s=null==t?0:t.length;if(!s)return-1;var c=null==n?0:a(n);return c<0&&(c=o(s+c,0)),r(t,i(e,3),c)}},function(t,e,n){var r=n(155);t.exports=function(t){var e=r(t),n=e%1;return e==e?n?e-n:e:0}},function(t,e,n){var r=n(11),i=n(42),a=/^\s+|\s+$/g,o=/^[-+]0x[0-9a-f]+$/i,s=/^0b[01]+$/i,c=/^0o[0-7]+$/i,u=parseInt;t.exports=function(t){if("number"==typeof t)return t;if(i(t))return NaN;if(r(t)){var e="function"==typeof t.valueOf?t.valueOf():t;t=r(e)?e+"":e}if("string"!=typeof t)return 0===t?t:+t;t=t.replace(a,"");var n=s.test(t);return n||c.test(t)?u(t.slice(2),n?2:8):o.test(t)?NaN:+t}},function(t,e,n){var r=n(89),i=n(127),a=n(40);t.exports=function(t,e){return null==t?t:r(t,i(e),a)}},function(t,e){t.exports=function(t){var e=null==t?0:t.length;return e?t[e-1]:void 0}},function(t,e,n){var r=n(59),i=n(88),a=n(25);t.exports=function(t,e){var n={};return e=a(e,3),i(t,(function(t,i,a){r(n,i,e(t,i,a))})),n}},function(t,e,n){var r=n(95),i=n(323),a=n(35);t.exports=function(t){return t&&t.length?r(t,a,i):void 0}},function(t,e){t.exports=function(t,e){return t>e}},function(t,e,n){var r=n(325),i=n(328)((function(t,e,n){r(t,e,n)}));t.exports=i},function(t,e,n){var r=n(53),i=n(157),a=n(89),o=n(326),s=n(11),c=n(40),u=n(159);t.exports=function t(e,n,l,h,f){e!==n&&a(n,(function(a,c){if(f||(f=new r),s(a))o(e,n,c,l,t,h,f);else{var d=h?h(u(e,c),a,c+"",e,n,f):void 0;void 0===d&&(d=a),i(e,c,d)}}),c)}},function(t,e,n){var r=n(157),i=n(114),a=n(123),o=n(115),s=n(124),c=n(47),u=n(5),l=n(146),h=n(39),f=n(37),d=n(11),p=n(158),g=n(48),y=n(159),v=n(327);t.exports=function(t,e,n,m,b,x,_){var k=y(t,n),w=y(e,n),E=_.get(w);if(E)r(t,n,E);else{var T=x?x(k,w,n+"",t,e,_):void 0,C=void 0===T;if(C){var S=u(w),A=!S&&h(w),M=!S&&!A&&g(w);T=w,S||A||M?u(k)?T=k:l(k)?T=o(k):A?(C=!1,T=i(w,!0)):M?(C=!1,T=a(w,!0)):T=[]:p(w)||c(w)?(T=k,c(k)?T=v(k):d(k)&&!f(k)||(T=s(w))):C=!1}C&&(_.set(w,T),b(T,w,m,x,_),_.delete(w)),r(t,n,T)}}},function(t,e,n){var r=n(46),i=n(40);t.exports=function(t){return r(t,i(t))}},function(t,e,n){var r=n(67),i=n(68);t.exports=function(t){return r((function(e,n){var r=-1,a=n.length,o=a>1?n[a-1]:void 0,s=a>2?n[2]:void 0;for(o=t.length>3&&"function"==typeof o?(a--,o):void 0,s&&i(n[0],n[1],s)&&(o=a<3?void 0:o,a=1),e=Object(e);++r1&&o(t,e[0],e[1])?e=[]:n>2&&o(e[0],e[1],e[2])&&(e=[e[0]]),i(t,r(e,1),[])}));t.exports=s},function(t,e,n){var r=n(66),i=n(25),a=n(141),o=n(340),s=n(61),c=n(341),u=n(35);t.exports=function(t,e,n){var l=-1;e=r(e.length?e:[u],s(i));var h=a(t,(function(t,n,i){return{criteria:r(e,(function(e){return e(t)})),index:++l,value:t}}));return o(h,(function(t,e){return c(t,e,n)}))}},function(t,e){t.exports=function(t,e){var n=t.length;for(t.sort(e);n--;)t[n]=t[n].value;return t}},function(t,e,n){var r=n(342);t.exports=function(t,e,n){for(var i=-1,a=t.criteria,o=e.criteria,s=a.length,c=n.length;++i=c?u:u*("desc"==n[i]?-1:1)}return t.index-e.index}},function(t,e,n){var r=n(42);t.exports=function(t,e){if(t!==e){var n=void 0!==t,i=null===t,a=t==t,o=r(t),s=void 0!==e,c=null===e,u=e==e,l=r(e);if(!c&&!l&&!o&&t>e||o&&s&&u&&!c&&!l||i&&s&&u||!n&&u||!a)return 1;if(!i&&!o&&!l&&t0;--c)if(r=e[c].dequeue()){i=i.concat(s(t,e,n,r,!0));break}}return i}(n.graph,n.buckets,n.zeroIdx);return r.flatten(r.map(u,(function(e){return t.outEdges(e.v,e.w)})),!0)};var o=r.constant(1);function s(t,e,n,i,a){var o=a?[]:void 0;return r.forEach(t.inEdges(i.v),(function(r){var i=t.edge(r),s=t.node(r.v);a&&o.push({v:r.v,w:r.w}),s.out-=i,c(e,n,s)})),r.forEach(t.outEdges(i.v),(function(r){var i=t.edge(r),a=r.w,o=t.node(a);o.in-=i,c(e,n,o)})),t.removeNode(i.v),o}function c(t,e,n){n.out?n.in?t[n.out-n.in+e].enqueue(n):t[t.length-1].enqueue(n):t[0].enqueue(n)}},function(t,e){function n(){var t={};t._next=t._prev=t,this._sentinel=t}function r(t){t._prev._next=t._next,t._next._prev=t._prev,delete t._next,delete t._prev}function i(t,e){if("_next"!==t&&"_prev"!==t)return e}t.exports=n,n.prototype.dequeue=function(){var t=this._sentinel,e=t._prev;if(e!==t)return r(e),e},n.prototype.enqueue=function(t){var e=this._sentinel;t._prev&&t._next&&r(t),t._next=e._next,e._next._prev=t,e._next=t,t._prev=e},n.prototype.toString=function(){for(var t=[],e=this._sentinel,n=e._prev;n!==e;)t.push(JSON.stringify(n,i)),n=n._prev;return"["+t.join(", ")+"]"}},function(t,e,n){"use strict";var r=n(4),i=n(8);t.exports={run:function(t){t.graph().dummyChains=[],r.forEach(t.edges(),(function(e){!function(t,e){var n,r,a,o=e.v,s=t.node(o).rank,c=e.w,u=t.node(c).rank,l=e.name,h=t.edge(e),f=h.labelRank;if(u===s+1)return;for(t.removeEdge(e),a=0,++s;sc.lim&&(u=c,l=!0);var h=r.filter(e.edges(),(function(e){return l===m(t,t.node(e.v),u)&&l!==m(t,t.node(e.w),u)}));return r.minBy(h,(function(t){return a(e,t)}))}function v(t,e,n,i){var a=n.v,o=n.w;t.removeEdge(a,o),t.setEdge(i.v,i.w,{}),d(t),h(t,e),function(t,e){var n=r.find(t.nodes(),(function(t){return!e.node(t).parent})),i=s(t,n);i=i.slice(1),r.forEach(i,(function(n){var r=t.node(n).parent,i=e.edge(n,r),a=!1;i||(i=e.edge(r,n),a=!0),e.node(n).rank=e.node(r).rank+(a?i.minlen:-i.minlen)}))}(t,e)}function m(t,e,n){return n.low<=e.lim&&e.lim<=n.lim}t.exports=l,l.initLowLimValues=d,l.initCutValues=h,l.calcCutValue=f,l.leaveEdge=g,l.enterEdge=y,l.exchangeEdges=v},function(t,e,n){var r=n(4);t.exports=function(t){var e=function(t){var e={},n=0;function i(a){var o=n;r.forEach(t.children(a),i),e[a]={low:o,lim:n++}}return r.forEach(t.children(),i),e}(t);r.forEach(t.graph().dummyChains,(function(n){for(var r=t.node(n),i=r.edgeObj,a=function(t,e,n,r){var i,a,o=[],s=[],c=Math.min(e[n].low,e[r].low),u=Math.max(e[n].lim,e[r].lim);i=n;do{i=t.parent(i),o.push(i)}while(i&&(e[i].low>c||u>e[i].lim));a=i,i=r;for(;(i=t.parent(i))!==a;)s.push(i);return{path:o.concat(s.reverse()),lca:a}}(t,e,i.v,i.w),o=a.path,s=a.lca,c=0,u=o[c],l=!0;n!==i.w;){if(r=t.node(n),l){for(;(u=o[c])!==s&&t.node(u).maxRank=2),s=l.buildLayerMatrix(t);var y=a(t,s);y0;)e%2&&(n+=c[e+1]),c[e=e-1>>1]+=t.weight;u+=t.weight*n}))),u}t.exports=function(t,e){for(var n=0,r=1;r=t.barycenter)&&function(t,e){var n=0,r=0;t.weight&&(n+=t.barycenter*t.weight,r+=t.weight);e.weight&&(n+=e.barycenter*e.weight,r+=e.weight);t.vs=e.vs.concat(t.vs),t.barycenter=n/r,t.weight=r,t.i=Math.min(e.i,t.i),e.merged=!0}(t,e)}}function i(e){return function(n){n.in.push(e),0==--n.indegree&&t.push(n)}}for(;t.length;){var a=t.pop();e.push(a),r.forEach(a.in.reverse(),n(a)),r.forEach(a.out,i(a))}return r.map(r.filter(e,(function(t){return!t.merged})),(function(t){return r.pick(t,["vs","i","barycenter","weight"])}))}(r.filter(n,(function(t){return!t.indegree})))}},function(t,e,n){var r=n(4),i=n(8);function a(t,e,n){for(var i;e.length&&(i=r.last(e)).i<=n;)e.pop(),t.push(i.vs),n++;return n}t.exports=function(t,e){var n=i.partition(t,(function(t){return r.has(t,"barycenter")})),o=n.lhs,s=r.sortBy(n.rhs,(function(t){return-t.i})),c=[],u=0,l=0,h=0;o.sort((f=!!e,function(t,e){return t.barycentere.barycenter?1:f?e.i-t.i:t.i-e.i})),h=a(c,s,h),r.forEach(o,(function(t){h+=t.vs.length,c.push(t.vs),u+=t.barycenter*t.weight,l+=t.weight,h=a(c,s,h)}));var f;var d={vs:r.flatten(c,!0)};l&&(d.barycenter=u/l,d.weight=l);return d}},function(t,e,n){var r=n(4),i=n(17).Graph;t.exports=function(t,e,n){var a=function(t){var e;for(;t.hasNode(e=r.uniqueId("_root")););return e}(t),o=new i({compound:!0}).setGraph({root:a}).setDefaultNodeLabel((function(e){return t.node(e)}));return r.forEach(t.nodes(),(function(i){var s=t.node(i),c=t.parent(i);(s.rank===e||s.minRank<=e&&e<=s.maxRank)&&(o.setNode(i),o.setParent(i,c||a),r.forEach(t[n](i),(function(e){var n=e.v===i?e.w:e.v,a=o.edge(n,i),s=r.isUndefined(a)?0:a.weight;o.setEdge(n,i,{weight:t.edge(e).weight+s})})),r.has(s,"minRank")&&o.setNode(i,{borderLeft:s.borderLeft[e],borderRight:s.borderRight[e]}))})),o}},function(t,e,n){var r=n(4);t.exports=function(t,e,n){var i,a={};r.forEach(n,(function(n){for(var r,o,s=t.parent(n);s;){if((r=t.parent(s))?(o=a[r],a[r]=s):(o=i,i=s),o&&o!==s)return void e.setEdge(o,s);s=r}}))}},function(t,e,n){"use strict";var r=n(4),i=n(8),a=n(365).positionX;t.exports=function(t){(function(t){var e=i.buildLayerMatrix(t),n=t.graph().ranksep,a=0;r.forEach(e,(function(e){var i=r.max(r.map(e,(function(e){return t.node(e).height})));r.forEach(e,(function(e){t.node(e).y=a+i/2})),a+=i+n}))})(t=i.asNonCompoundGraph(t)),r.forEach(a(t),(function(e,n){t.node(n).x=e}))}},function(t,e,n){"use strict";var r=n(4),i=n(17).Graph,a=n(8);function o(t,e){var n={};return r.reduce(e,(function(e,i){var a=0,o=0,s=e.length,u=r.last(i);return r.forEach(i,(function(e,l){var h=function(t,e){if(t.node(e).dummy)return r.find(t.predecessors(e),(function(e){return t.node(e).dummy}))}(t,e),f=h?t.node(h).order:s;(h||e===u)&&(r.forEach(i.slice(o,l+1),(function(e){r.forEach(t.predecessors(e),(function(r){var i=t.node(r),o=i.order;!(os)&&c(n,e,u)}))}))}return r.reduce(e,(function(e,n){var a,o=-1,s=0;return r.forEach(n,(function(r,c){if("border"===t.node(r).dummy){var u=t.predecessors(r);u.length&&(a=t.node(u[0]).order,i(n,s,c,o,a),s=c,o=a)}i(n,s,n.length,a,e.length)})),n})),n}function c(t,e,n){if(e>n){var r=e;e=n,n=r}var i=t[e];i||(t[e]=i={}),i[n]=!0}function u(t,e,n){if(e>n){var i=e;e=n,n=i}return r.has(t[e],n)}function l(t,e,n,i){var a={},o={},s={};return r.forEach(e,(function(t){r.forEach(t,(function(t,e){a[t]=t,o[t]=t,s[t]=e}))})),r.forEach(e,(function(t){var e=-1;r.forEach(t,(function(t){var c=i(t);if(c.length)for(var l=((c=r.sortBy(c,(function(t){return s[t]}))).length-1)/2,h=Math.floor(l),f=Math.ceil(l);h<=f;++h){var d=c[h];o[t]===t&&e0}t.exports=function(t,e,r,i){var a,o,s,c,u,l,h,f,d,p,g,y,v;if(a=e.y-t.y,s=t.x-e.x,u=e.x*t.y-t.x*e.y,d=a*r.x+s*r.y+u,p=a*i.x+s*i.y+u,0!==d&&0!==p&&n(d,p))return;if(o=i.y-r.y,c=r.x-i.x,l=i.x*r.y-r.x*i.y,h=o*t.x+c*t.y+l,f=o*e.x+c*e.y+l,0!==h&&0!==f&&n(h,f))return;if(0===(g=a*c-o*s))return;return y=Math.abs(g/2),{x:(v=s*l-c*u)<0?(v-y)/g:(v+y)/g,y:(v=o*u-a*l)<0?(v-y)/g:(v+y)/g}}},function(t,e,n){var r=n(43),i=n(31),a=n(153).layout;t.exports=function(){var t=n(371),e=n(374),i=n(375),u=n(376),l=n(377),h=n(378),f=n(379),d=n(380),p=n(381),g=function(n,g){!function(t){t.nodes().forEach((function(e){var n=t.node(e);r.has(n,"label")||t.children(e).length||(n.label=e),r.has(n,"paddingX")&&r.defaults(n,{paddingLeft:n.paddingX,paddingRight:n.paddingX}),r.has(n,"paddingY")&&r.defaults(n,{paddingTop:n.paddingY,paddingBottom:n.paddingY}),r.has(n,"padding")&&r.defaults(n,{paddingLeft:n.padding,paddingRight:n.padding,paddingTop:n.padding,paddingBottom:n.padding}),r.defaults(n,o),r.each(["paddingLeft","paddingRight","paddingTop","paddingBottom"],(function(t){n[t]=Number(n[t])})),r.has(n,"width")&&(n._prevWidth=n.width),r.has(n,"height")&&(n._prevHeight=n.height)})),t.edges().forEach((function(e){var n=t.edge(e);r.has(n,"label")||(n.label=""),r.defaults(n,s)}))}(g);var y=c(n,"output"),v=c(y,"clusters"),m=c(y,"edgePaths"),b=i(c(y,"edgeLabels"),g),x=t(c(y,"nodes"),g,d);a(g),l(x,g),h(b,g),u(m,g,p);var _=e(v,g);f(_,g),function(t){r.each(t.nodes(),(function(e){var n=t.node(e);r.has(n,"_prevWidth")?n.width=n._prevWidth:delete n.width,r.has(n,"_prevHeight")?n.height=n._prevHeight:delete n.height,delete n._prevWidth,delete n._prevHeight}))}(g)};return g.createNodes=function(e){return arguments.length?(t=e,g):t},g.createClusters=function(t){return arguments.length?(e=t,g):e},g.createEdgeLabels=function(t){return arguments.length?(i=t,g):i},g.createEdgePaths=function(t){return arguments.length?(u=t,g):u},g.shapes=function(t){return arguments.length?(d=t,g):d},g.arrows=function(t){return arguments.length?(p=t,g):p},g};var o={paddingLeft:10,paddingRight:10,paddingTop:10,paddingBottom:10,rx:0,ry:0,shape:"rect"},s={arrowhead:"normal",curve:i.curveLinear};function c(t,e){var n=t.select("g."+e);return n.empty()&&(n=t.append("g").attr("class",e)),n}},function(t,e,n){"use strict";var r=n(43),i=n(97),a=n(12),o=n(31);t.exports=function(t,e,n){var s,c=e.nodes().filter((function(t){return!a.isSubgraph(e,t)})),u=t.selectAll("g.node").data(c,(function(t){return t})).classed("update",!0);u.exit().remove(),u.enter().append("g").attr("class","node").style("opacity",0),(u=t.selectAll("g.node")).each((function(t){var s=e.node(t),c=o.select(this);a.applyClass(c,s.class,(c.classed("update")?"update ":"")+"node"),c.select("g.label").remove();var u=c.append("g").attr("class","label"),l=i(u,s),h=n[s.shape],f=r.pick(l.node().getBBox(),"width","height");s.elem=this,s.id&&c.attr("id",s.id),s.labelId&&u.attr("id",s.labelId),r.has(s,"width")&&(f.width=s.width),r.has(s,"height")&&(f.height=s.height),f.width+=s.paddingLeft+s.paddingRight,f.height+=s.paddingTop+s.paddingBottom,u.attr("transform","translate("+(s.paddingLeft-s.paddingRight)/2+","+(s.paddingTop-s.paddingBottom)/2+")");var d=o.select(this);d.select(".label-container").remove();var p=h(d,f,s).classed("label-container",!0);a.applyStyle(p,s.style);var g=p.node().getBBox();s.width=g.width,s.height=g.height})),s=u.exit?u.exit():u.selectAll(null);return a.applyTransition(s,e).style("opacity",0).remove(),u}},function(t,e,n){var r=n(12);t.exports=function(t,e){for(var n=t.append("text"),i=function(t){for(var e,n="",r=!1,i=0;i0&&void 0!==arguments[0]?arguments[0]:"fatal";isNaN(t)&&(t=t.toLowerCase(),void 0!==h[t]&&(t=h[t])),f.trace=function(){},f.debug=function(){},f.info=function(){},f.warn=function(){},f.error=function(){},f.fatal=function(){},t<=h.fatal&&(f.fatal=console.error?console.error.bind(console,p("FATAL"),"color: orange"):console.log.bind(console,"",p("FATAL"))),t<=h.error&&(f.error=console.error?console.error.bind(console,p("ERROR"),"color: orange"):console.log.bind(console,"",p("ERROR"))),t<=h.warn&&(f.warn=console.warn?console.warn.bind(console,p("WARN"),"color: orange"):console.log.bind(console,"",p("WARN"))),t<=h.info&&(f.info=console.info?console.info.bind(console,p("INFO"),"color: lightblue"):console.log.bind(console,"",p("INFO"))),t<=h.debug&&(f.debug=console.debug?console.debug.bind(console,p("DEBUG"),"color: lightgreen"):console.log.bind(console,"",p("DEBUG")))},p=function(t){var e=l()().format("ss.SSS");return"%c".concat(e," : ").concat(t," : ")},g=n(70),y=function(t){for(var e="",n=0;n>=0;){if(!((n=t.indexOf("=0)){e+=t,n=-1;break}e+=t.substr(0,n),(n=(t=t.substr(n+1)).indexOf("<\/script>"))>=0&&(n+=9,t=t.substr(n))}return e},v=//gi,m=function(t){return t.replace(v,"#br#")},b=function(t){return t.replace(/#br#/g,"
")},x={getRows:function(t){if(!t)return 1;var e=m(t);return(e=e.replace(/\\n/g,"#br#")).split("#br#")},sanitizeText:function(t,e){var n=t,r=!0;if(!e.flowchart||!1!==e.flowchart.htmlLabels&&"false"!==e.flowchart.htmlLabels||(r=!1),r){var i=e.securityLevel;"antiscript"===i?n=y(n):"loose"!==i&&(n=(n=(n=m(n)).replace(//g,">")).replace(/=/g,"="),n=b(n))}return n},hasBreaks:function(t){return//gi.test(t)},splitBreaks:function(t){return t.split(//gi)},lineBreakRegex:v,removeScript:y};function _(t){return(_="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(t){return typeof t}:function(t){return t&&"function"==typeof Symbol&&t.constructor===Symbol&&t!==Symbol.prototype?"symbol":typeof t})(t)}function k(t){return function(t){if(Array.isArray(t)){for(var e=0,n=new Array(t.length);e1&&void 0!==arguments[1]?arguments[1]:null;try{var n=new RegExp("[%]{2}(?![{]".concat(T.source,")(?=[}][%]{2}).*\n"),"ig");t=t.trim().replace(n,"").replace(/'/gm,'"'),f.debug("Detecting diagram directive".concat(null!==e?" type:"+e:""," based on the text:").concat(t));for(var r,i=[];null!==(r=E.exec(t));)if(r.index===E.lastIndex&&E.lastIndex++,r&&!e||e&&r[1]&&r[1].match(e)||e&&r[2]&&r[2].match(e)){var a=r[1]?r[1]:r[2],o=r[3]?r[3].trim():r[4]?JSON.parse(r[4].trim()):null;i.push({type:a,args:o})}return 0===i.length&&i.push({type:t,args:null}),1===i.length?i[0]:i}catch(n){return f.error("ERROR: ".concat(n.message," - Unable to parse directive").concat(null!==e?" type:"+e:""," based on the text:").concat(t)),{type:null,args:null}}},A=function(t){return t=t.replace(E,"").replace(C,"\n"),f.debug("Detecting diagram type based on the text "+t),t.match(/^\s*sequenceDiagram/)?"sequence":t.match(/^\s*gantt/)?"gantt":t.match(/^\s*classDiagram-v2/)?"classDiagram":t.match(/^\s*classDiagram/)?"class":t.match(/^\s*stateDiagram-v2/)?"stateDiagram":t.match(/^\s*stateDiagram/)?"state":t.match(/^\s*gitGraph/)?"git":t.match(/^\s*flowchart/)?"flowchart-v2":t.match(/^\s*info/)?"info":t.match(/^\s*pie/)?"pie":t.match(/^\s*erDiagram/)?"er":t.match(/^\s*journey/)?"journey":"flowchart"},M=function(t,e){var n={};return function(){for(var r=arguments.length,i=new Array(r),a=0;a"},n),x.lineBreakRegex.test(t))return t;var r=t.split(" "),i=[],a="";return r.forEach((function(t,o){var s=Y("".concat(t," "),n),c=Y(a,n);if(s>e){var u=R(t,e,"-",n),l=u.hyphenatedStrings,h=u.remainingWord;i.push.apply(i,[a].concat(k(l))),a=h}else c+s>=e?(i.push(a),a=t):a=[a,t].filter(Boolean).join(" ");o+1===r.length&&i.push(a)})),i.filter((function(t){return""!==t})).join(n.joinWith)}),(function(t,e,n){return"".concat(t,"-").concat(e,"-").concat(n.fontSize,"-").concat(n.fontWeight,"-").concat(n.fontFamily,"-").concat(n.joinWith)})),R=M((function(t,e){var n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:"-",r=arguments.length>3?arguments[3]:void 0;r=Object.assign({fontSize:12,fontWeight:400,fontFamily:"Arial",margin:0},r);var i=t.split(""),a=[],o="";return i.forEach((function(t,s){var c="".concat(o).concat(t);if(Y(c,r)>=e){var u=s+1,l=i.length===u,h="".concat(c).concat(n);a.push(l?c:h),o=""}else o=c})),{hyphenatedStrings:a,remainingWord:o}}),(function(t,e){var n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:"-",r=arguments.length>3?arguments[3]:void 0;return"".concat(t,"-").concat(e,"-").concat(n,"-").concat(r.fontSize,"-").concat(r.fontWeight,"-").concat(r.fontFamily)})),Y=function(t,e){return e=Object.assign({fontSize:12,fontWeight:400,fontFamily:"Arial"},e),z(t,e).width},z=M((function(t,e){var n=e=Object.assign({fontSize:12,fontWeight:400,fontFamily:"Arial"},e),r=n.fontSize,i=n.fontFamily,a=n.fontWeight;if(!t)return{width:0,height:0};var o=["sans-serif",i],c=t.split(x.lineBreakRegex),u=[],l=Object(s.select)("body");if(!l.remove)return{width:0,height:0,lineHeight:0};for(var h=l.append("svg"),f=0,d=o;fu[1].height&&u[0].width>u[1].width&&u[0].lineHeight>u[1].lineHeight?0:1]}),(function(t,e){return"".concat(t,"-").concat(e.fontSize,"-").concat(e.fontWeight,"-").concat(e.fontFamily)})),U=function(t,e,n){var r=new Map;return r.set("height",t),n?(r.set("width","100%"),r.set("style","max-width: ".concat(e,"px;"))):r.set("width",e),r},$=function(t,e,n,r){!function(t,e){var n=!0,r=!1,i=void 0;try{for(var a,o=e[Symbol.iterator]();!(n=(a=o.next()).done);n=!0){var s=a.value;t.attr(s[0],s[1])}}catch(t){r=!0,i=t}finally{try{n||null==o.return||o.return()}finally{if(r)throw i}}}(t,U(e,n,r))},W={assignWithDepth:P,wrapLabel:j,calculateTextHeight:function(t,e){return e=Object.assign({fontSize:12,fontWeight:400,fontFamily:"Arial",margin:15},e),z(t,e).height},calculateTextWidth:Y,calculateTextDimensions:z,calculateSvgSizeAttrs:U,configureSvgSize:$,detectInit:function(t){var e=S(t,/(?:init\b)|(?:initialize\b)/),n={};if(Array.isArray(e)){var r=e.map((function(t){return t.args}));n=P(n,k(r))}else n=e.args;if(n){var i=A(t);["config"].forEach((function(t){void 0!==n[t]&&("flowchart-v2"===i&&(i="flowchart"),n[i]=n[t],delete n[t])}))}return n},detectDirective:S,detectType:A,isSubstringInArray:function(t,e){for(var n=0;n=1&&(i={x:t.x,y:t.y}),a>0&&a<1&&(i={x:(1-a)*e.x+a*t.x,y:(1-a)*e.y+a*t.y})}}e=t})),i}(t)},calcCardinalityPosition:function(t,e,n){var r;f.info("our points",e),e[0]!==n&&(e=e.reverse()),e.forEach((function(t){D(t,r),r=t}));var i,a=25;r=void 0,e.forEach((function(t){if(r&&!i){var e=D(t,r);if(e=1&&(i={x:t.x,y:t.y}),n>0&&n<1&&(i={x:(1-n)*r.x+n*t.x,y:(1-n)*r.y+n*t.y})}}r=t}));var o=t?10:5,s=Math.atan2(e[0].y-i.y,e[0].x-i.x),c={x:0,y:0};return c.x=Math.sin(s)*o+(e[0].x+i.x)/2,c.y=-Math.cos(s)*o+(e[0].y+i.y)/2,c},calcTerminalLabelPosition:function(t,e,n){var r,i=JSON.parse(JSON.stringify(n));f.info("our points",i),"start_left"!==e&&"start_right"!==e&&(i=i.reverse()),i.forEach((function(t){D(t,r),r=t}));var a,o=25;r=void 0,i.forEach((function(t){if(r&&!a){var e=D(t,r);if(e=1&&(a={x:t.x,y:t.y}),n>0&&n<1&&(a={x:(1-n)*r.x+n*t.x,y:(1-n)*r.y+n*t.y})}}r=t}));var s=10,c=Math.atan2(i[0].y-a.y,i[0].x-a.x),u={x:0,y:0};return u.x=Math.sin(c)*s+(i[0].x+a.x)/2,u.y=-Math.cos(c)*s+(i[0].y+a.y)/2,"start_left"===e&&(u.x=Math.sin(c+Math.PI)*s+(i[0].x+a.x)/2,u.y=-Math.cos(c+Math.PI)*s+(i[0].y+a.y)/2),"end_right"===e&&(u.x=Math.sin(c-Math.PI)*s+(i[0].x+a.x)/2-5,u.y=-Math.cos(c-Math.PI)*s+(i[0].y+a.y)/2-5),"end_left"===e&&(u.x=Math.sin(c)*s+(i[0].x+a.x)/2-5,u.y=-Math.cos(c)*s+(i[0].y+a.y)/2-5),u},formatUrl:function(t,e){var n=t.trim();if(n)return"loose"!==e.securityLevel?Object(g.sanitizeUrl)(n):n},getStylesFromArray:N,generateId:L,random:F,memoize:M,runFunc:function(t){for(var e,n=t.split("."),r=n.length-1,i=n[r],a=window,o=0;o1?s-1:0),u=1;u=0&&(n=!0)})),n},Gt=function(t,e){var n=[];return t.nodes.forEach((function(r,i){Ht(e,r)||n.push(t.nodes[i])})),{nodes:n}},qt={parseDirective:function(t,e,n){$o.parseDirective(this,t,e,n)},defaultConfig:function(){return pt.flowchart},addVertex:function(t,e,n,r,i){var a,o=t;void 0!==o&&0!==o.trim().length&&(void 0===Mt[o]&&(Mt[o]={id:o,domId:"flowchart-"+o+"-"+St,styles:[],classes:[]}),St++,void 0!==e?(At=xt(),'"'===(a=x.sanitizeText(e.trim(),At))[0]&&'"'===a[a.length-1]&&(a=a.substring(1,a.length-1)),Mt[o].text=a):void 0===Mt[o].text&&(Mt[o].text=t),void 0!==n&&(Mt[o].type=n),null!=r&&r.forEach((function(t){Mt[o].styles.push(t)})),null!=i&&i.forEach((function(t){Mt[o].classes.push(t)})))},lookUpDomId:jt,addLink:function(t,e,n,r){var i,a;for(i=0;i/)&&(Tt="LR"),Tt.match(/.*v/)&&(Tt="TB")},setClass:Yt,getTooltip:function(t){return Lt[t]},setClickEvent:function(t,e,n){t.split(",").forEach((function(t){!function(t,e){var n=jt(t);"loose"===xt().securityLevel&&void 0!==e&&void 0!==Mt[t]&&(Mt[t].haveCallback=!0,It.push((function(){var r=document.querySelector('[id="'.concat(n,'"]'));null!==r&&r.addEventListener("click",(function(){W.runFunc(e,t)}),!1)})))}(t,e)})),zt(t,n),Yt(t,"clickable")},setLink:function(t,e,n,r){t.split(",").forEach((function(t){void 0!==Mt[t]&&(Mt[t].link=W.formatUrl(e,At),Mt[t].linkTarget=r)})),zt(t,n),Yt(t,"clickable")},bindFunctions:function(t){It.forEach((function(e){e(t)}))},getDirection:function(){return Tt.trim()},getVertices:function(){return Mt},getEdges:function(){return Ot},getClasses:function(){return Dt},clear:function(t){Mt={},Dt={},Ot=[],(It=[]).push(Ut),Nt=[],Bt={},Ft=0,Lt=[],Pt=!0,Ct=t||"gen-1"},setGen:function(t){Ct=t||"gen-1"},defaultStyle:function(){return"fill:#ffa;stroke: #f66; stroke-width: 3px; stroke-dasharray: 5, 5;fill:#ffa;stroke: #666;"},addSubGraph:function(t,e,n){var r=t.trim(),i=n;t===n&&n.match(/\s/)&&(r=void 0);var a,o,s,c=[];if(a=c.concat.apply(c,e),o={boolean:{},number:{},string:{}},s=[],c=a.filter((function(t){var e=Et(t);return""!==t.trim()&&(e in o?!o[e].hasOwnProperty(t)&&(o[e][t]=!0):!(s.indexOf(t)>=0)&&s.push(t))})),"gen-1"===Ct){f.warn("LOOKING UP");for(var u=0;u0&&function t(e,n){var r=Nt[n].nodes;if(!((Wt+=1)>2e3)){if(Vt[Wt]=n,Nt[n].id===e)return{result:!0,count:0};for(var i=0,a=1;i=0){var s=t(e,o);if(s.result)return{result:!0,count:a+s.count};a+=s.count}i+=1}return{result:!1,count:a}}}("none",Nt.length-1)},getSubGraphs:function(){return Nt},destructLink:function(t,e){var n,r=function(t){var e=t.trim(),n=e.slice(0,-1),r="arrow_open";switch(e.slice(-1)){case"x":r="arrow_cross","x"===e[0]&&(r="double_"+r,n=n.slice(1));break;case">":r="arrow_point","<"===e[0]&&(r="double_"+r,n=n.slice(1));break;case"o":r="arrow_circle","o"===e[0]&&(r="double_"+r,n=n.slice(1))}var i="normal",a=n.length-1;"="===n[0]&&(i="thick");var o=function(t,e){for(var n=e.length,r=0,i=0;in.height/2-a)){var o=a*a*(1-r*r/(i*i));0!=o&&(o=Math.sqrt(o)),o=a-o,t.y-n.y>0&&(o=-o),e.y+=o}return e},c}function fe(t,e,n,r){return t.insert("polygon",":first-child").attr("points",r.map((function(t){return t.x+","+t.y})).join(" ")).attr("transform","translate("+-e/2+","+n/2+")")}var de={addToRender:function(t){t.shapes().question=ee,t.shapes().hexagon=ne,t.shapes().stadium=ue,t.shapes().subroutine=le,t.shapes().cylinder=he,t.shapes().rect_left_inv_arrow=re,t.shapes().lean_right=ie,t.shapes().lean_left=ae,t.shapes().trapezoid=oe,t.shapes().inv_trapezoid=se,t.shapes().rect_right_inv_arrow=ce},addToRenderV2:function(t){t({question:ee}),t({hexagon:ne}),t({stadium:ue}),t({subroutine:le}),t({cylinder:he}),t({rect_left_inv_arrow:re}),t({lean_right:ie}),t({lean_left:ae}),t({trapezoid:oe}),t({inv_trapezoid:se}),t({rect_right_inv_arrow:ce})}},pe={},ge=function(t,e,n){var r=Object(s.select)('[id="'.concat(n,'"]'));Object.keys(t).forEach((function(n){var i=t[n],a="default";i.classes.length>0&&(a=i.classes.join(" "));var o,s=N(i.styles),c=void 0!==i.text?i.text:i.id;if(xt().flowchart.htmlLabels){var u={label:c.replace(/fa[lrsb]?:fa-[\w-]+/g,(function(t){return"")}))};(o=te()(r,u).node()).parentNode.removeChild(o)}else{var l=document.createElementNS("http://www.w3.org/2000/svg","text");l.setAttribute("style",s.labelStyle.replace("color:","fill:"));for(var h=c.split(x.lineBreakRegex),d=0;d').concat(a.text.replace(/fa[lrsb]?:fa-[\w-]+/g,(function(t){return"")})),"")):(l.labelType="text",l.label=a.text.replace(x.lineBreakRegex,"\n"),void 0===a.style&&(l.style=l.style||"stroke: #333; stroke-width: 1.5px;fill:none"),l.labelStyle=l.labelStyle.replace("color:","fill:"))),l.id=o,l.class=c+" "+u,l.minlen=a.length||1,e.setEdge(qt.lookUpDomId(a.start),qt.lookUpDomId(a.end),l,i)}))},ve=function(t){for(var e=Object.keys(t),n=0;n=0;h--)i=l[h],qt.addVertex(i.id,i.title,"group",void 0,i.classes);var d=qt.getVertices();f.warn("Get vertices",d);var p=qt.getEdges(),g=0;for(g=l.length-1;g>=0;g--){i=l[g],Object(s.selectAll)("cluster").append("text");for(var y=0;y"),f.info("vertexText"+i),function(t){var e,n,r=Object(s.select)(document.createElementNS("http://www.w3.org/2000/svg","foreignObject")),i=r.append("xhtml:div"),a=t.label,o=t.isNode?"nodeLabel":"edgeLabel";return i.html(''+a+""),e=i,(n=t.labelStyle)&&e.attr("style",n),i.style("display","inline-block"),i.style("white-space","nowrap"),i.attr("xmlns","http://www.w3.org/1999/xhtml"),r.node()}({isNode:r,label:i.replace(/fa[lrsb]?:fa-[\w-]+/g,(function(t){return"")})),labelStyle:e.replace("fill:","color:")});var a=document.createElementNS("http://www.w3.org/2000/svg","text");a.setAttribute("style",e.replace("color:","fill:"));var o=[];o="string"==typeof i?i.split(/\\n|\n|/gi):Array.isArray(i)?i:[];for(var c=0;c0)t(a,n,r,i);else{var o=n.node(a);f.info("cp ",a," to ",i," with parent ",e),r.setNode(a,o),i!==n.parent(a)&&(f.warn("Setting parent",a,n.parent(a)),r.setParent(a,n.parent(a))),e!==i&&a!==e?(f.debug("Setting parent",a,e),r.setParent(a,e)):(f.info("In copy ",e,"root",i,"data",n.node(e),i),f.debug("Not Setting parent for node=",a,"cluster!==rootId",e!==i,"node!==clusterId",a!==e));var s=n.edges(a);f.debug("Copying Edges",s),s.forEach((function(t){f.info("Edge",t);var a=n.edge(t.v,t.w,t.name);f.info("Edge data",a,i);try{!function(t,e){return f.info("Decendants of ",e," is ",Me[e]),f.info("Edge is ",t),t.v!==e&&(t.w!==e&&(Me[e]?(f.info("Here "),Me[e].indexOf(t.v)>=0||(!!De(t.v,e)||(!!De(t.w,e)||Me[e].indexOf(t.w)>=0))):(f.debug("Tilt, ",e,",not in decendants"),!1)))}(t,i)?f.info("Skipping copy of edge ",t.v,"--\x3e",t.w," rootId: ",i," clusterId:",e):(f.info("Copying as ",t.v,t.w,a,t.name),r.setEdge(t.v,t.w,a,t.name),f.info("newGraph edges ",r.edges(),r.edge(r.edges()[0])))}catch(t){f.error(t)}}))}f.debug("Removing node",a),n.removeNode(a)}))},Be=function t(e,n){f.trace("Searching",e);var r=n.children(e);if(f.trace("Searching children of id ",e,r),r.length<1)return f.trace("This is a valid node",e),e;for(var i=0;i ",a),a}},Le=function(t){return Ae[t]&&Ae[t].externalConnections&&Ae[t]?Ae[t].id:t},Fe=function(t,e){!t||e>10?f.debug("Opting out, no graph "):(f.debug("Opting in, graph "),t.nodes().forEach((function(e){t.children(e).length>0&&(f.warn("Cluster identified",e," Replacement id in edges: ",Be(e,t)),Me[e]=function t(e,n){for(var r=n.children(e),i=[].concat(r),a=0;a0?(f.debug("Cluster identified",e,Me),r.forEach((function(t){t.v!==e&&t.w!==e&&(De(t.v,e)^De(t.w,e)&&(f.warn("Edge: ",t," leaves cluster ",e),f.warn("Decendants of XXX ",e,": ",Me[e]),Ae[e].externalConnections=!0))}))):f.debug("Not a cluster ",e,Me)})),t.edges().forEach((function(e){var n=t.edge(e);f.warn("Edge "+e.v+" -> "+e.w+": "+JSON.stringify(e)),f.warn("Edge "+e.v+" -> "+e.w+": "+JSON.stringify(t.edge(e)));var r=e.v,i=e.w;f.warn("Fix XXX",Ae,"ids:",e.v,e.w,"Translateing: ",Ae[e.v]," --- ",Ae[e.w]),(Ae[e.v]||Ae[e.w])&&(f.warn("Fixing and trixing - removing XXX",e.v,e.w,e.name),r=Le(e.v),i=Le(e.w),t.removeEdge(e.v,e.w,e.name),r!==e.v&&(n.fromCluster=e.v),i!==e.w&&(n.toCluster=e.w),f.warn("Fix Replacing with XXX",r,i,e.name),t.setEdge(r,i,n,e.name))})),f.warn("Adjusted Graph",H.a.json.write(t)),Pe(t,0),f.trace(Ae))},Pe=function t(e,n){if(f.warn("extractor - ",n,H.a.json.write(e),e.children("D")),n>10)f.error("Bailing out");else{for(var r=e.nodes(),i=!1,a=0;a0}if(i){f.debug("Nodes = ",r,n);for(var c=0;c0){f.warn("Cluster without external connections, without a parent and with children",u,n);var l=e.graph(),h=new H.a.Graph({multigraph:!0,compound:!0}).setGraph({rankdir:"TB"===l.rankdir?"LR":"TB",nodesep:50,ranksep:50,marginx:8,marginy:8}).setDefaultEdgeLabel((function(){return{}}));f.warn("Old graph before copy",H.a.json.write(e)),Ne(u,e,h,u),e.setNode(u,{clusterNode:!0,id:u,clusterData:Ae[u].clusterData,labelText:Ae[u].labelText,graph:h}),f.warn("New graph after copy node: (",u,")",H.a.json.write(h)),f.debug("Old graph after copy",H.a.json.write(e))}else f.warn("Cluster ** ",u," **not meeting the criteria !externalConnections:",!Ae[u].externalConnections," no parent: ",!e.parent(u)," children ",e.children(u)&&e.children(u).length>0,e.children("D"),n),f.debug(Ae);else f.debug("Not a cluster",u,n)}r=e.nodes(),f.warn("New list of nodes",r);for(var d=0;d0}var Ue=function(t,e,n,r){var i,a,o,s,c,u,l,h,f,d,p,g,y;if(i=e.y-t.y,o=t.x-e.x,c=e.x*t.y-t.x*e.y,f=i*n.x+o*n.y+c,d=i*r.x+o*r.y+c,!(0!==f&&0!==d&&ze(f,d)||(a=r.y-n.y,s=n.x-r.x,u=r.x*n.y-n.x*r.y,l=a*t.x+s*t.y+u,h=a*e.x+s*e.y+u,0!==l&&0!==h&&ze(l,h)||0==(p=i*s-a*o))))return g=Math.abs(p/2),{x:(y=o*u-s*c)<0?(y-g)/p:(y+g)/p,y:(y=a*c-i*u)<0?(y-g)/p:(y+g)/p}},$e=function(t,e,n){var r=t.x,i=t.y,a=[],o=Number.POSITIVE_INFINITY,s=Number.POSITIVE_INFINITY;"function"==typeof e.forEach?e.forEach((function(t){o=Math.min(o,t.x),s=Math.min(s,t.y)})):(o=Math.min(o,e.x),s=Math.min(s,e.y));for(var c=r-t.width/2-o,u=i-t.height/2-s,l=0;l1&&a.sort((function(t,e){var r=t.x-n.x,i=t.y-n.y,a=Math.sqrt(r*r+i*i),o=e.x-n.x,s=e.y-n.y,c=Math.sqrt(o*o+s*s);return aMath.abs(o)*u?(s<0&&(u=-u),n=0===s?0:u*o/s,r=u):(o<0&&(c=-c),n=c,r=0===o?0:c*s/o),{x:i+n,y:a+r}},Ve={node:n.n(je).a,circle:Ye,ellipse:Re,polygon:$e,rect:We},He=function(t,e){var n=Te(t,e,"node "+e.classes,!0),r=n.shapeSvg,i=n.bbox,a=n.halfPadding;f.info("Classes = ",e.classes);var o=r.insert("rect",":first-child");return o.attr("rx",e.rx).attr("ry",e.ry).attr("x",-i.width/2-a).attr("y",-i.height/2-a).attr("width",i.width+e.padding).attr("height",i.height+e.padding),Ce(e,o),e.intersect=function(t){return Ve.rect(e,t)},r},Ge=[],qe={},Xe=0,Ze=[],Je=function(t){var e="",n=t;if(t.indexOf("~")>0){var r=t.split("~");n=r[0],e=r[1]}return{className:n,type:e}},Qe=function(t){var e=Je(t);void 0===qe[e.className]&&(qe[e.className]={id:e.className,type:e.type,cssClasses:[],methods:[],members:[],annotations:[],domId:"classid-"+e.className+"-"+Xe},Xe++)},Ke=function(t){for(var e=Object.keys(qe),n=0;n>")?r.annotations.push(i.substring(2,i.length-2)):i.indexOf(")")>0?r.methods.push(i):i&&r.members.push(i)}},en=function(t,e){t.split(",").forEach((function(t){var n=t;t[0].match(/\d/)&&(n="classid-"+n),void 0!==qe[n]&&qe[n].cssClasses.push(e)}))},nn=function(t,e,n){var r=xt(),i=t,a=Ke(i);"loose"===r.securityLevel&&void 0!==e&&void 0!==qe[i]&&(n&&(qe[i].tooltip=x.sanitizeText(n,r)),Ze.push((function(){var t=document.querySelector('[id="'.concat(a,'"]'));null!==t&&t.addEventListener("click",(function(){W.runFunc(e,a)}),!1)})))},rn={AGGREGATION:0,EXTENSION:1,COMPOSITION:2,DEPENDENCY:3},an=function(t){var e=Object(s.select)(".mermaidTooltip");null===(e._groups||e)[0][0]&&(e=Object(s.select)("body").append("div").attr("class","mermaidTooltip").style("opacity",0)),Object(s.select)(t).select("svg").selectAll("g.node").on("mouseover",(function(){var t=Object(s.select)(this);if(null!==t.attr("title")){var n=this.getBoundingClientRect();e.transition().duration(200).style("opacity",".9"),e.html(t.attr("title")).style("left",window.scrollX+n.left+(n.right-n.left)/2+"px").style("top",window.scrollY+n.top-14+document.body.scrollTop+"px"),t.classed("hover",!0)}})).on("mouseout",(function(){e.transition().duration(500).style("opacity",0),Object(s.select)(this).classed("hover",!1)}))};Ze.push(an);var on={parseDirective:function(t,e,n){$o.parseDirective(this,t,e,n)},getConfig:function(){return xt().class},addClass:Qe,bindFunctions:function(t){Ze.forEach((function(e){e(t)}))},clear:function(){Ge=[],qe={},(Ze=[]).push(an)},getClass:function(t){return qe[t]},getClasses:function(){return qe},addAnnotation:function(t,e){var n=Je(t).className;qe[n].annotations.push(e)},getRelations:function(){return Ge},addRelation:function(t){f.debug("Adding relation: "+JSON.stringify(t)),Qe(t.id1),Qe(t.id2),t.id1=Je(t.id1).className,t.id2=Je(t.id2).className,Ge.push(t)},addMember:tn,addMembers:function(t,e){Array.isArray(e)&&(e.reverse(),e.forEach((function(e){return tn(t,e)})))},cleanupLabel:function(t){return":"===t.substring(0,1)?t.substr(1).trim():t.trim()},lineType:{LINE:0,DOTTED_LINE:1},relationType:rn,setClickEvent:function(t,e,n){t.split(",").forEach((function(t){nn(t,e,n),qe[t].haveCallback=!0})),en(t,"clickable")},setCssClass:en,setLink:function(t,e,n){var r=xt();t.split(",").forEach((function(t){var i=t;t[0].match(/\d/)&&(i="classid-"+i),void 0!==qe[i]&&(qe[i].link=W.formatUrl(e,r),n&&(qe[i].tooltip=x.sanitizeText(n,r)))})),en(t,"clickable")},lookUpDomId:Ke},sn=0,cn=function(t){var e=t.match(/(\+|-|~|#)?(\w+)(~\w+~|\[\])?\s+(\w+)/),n=t.match(/^([+|\-|~|#])?(\w+) *\( *(.*)\) *(\*|\$)? *(\w*[~|[\]]*\s*\w*~?)$/);return e&&!n?un(e):n?ln(n):hn(t)},un=function(t){var e="";try{e=(t[1]?t[1].trim():"")+(t[2]?t[2].trim():"")+(t[3]?dn(t[3].trim()):"")+" "+(t[4]?t[4].trim():"")}catch(n){e=t}return{displayText:e,cssStyle:""}},ln=function(t){var e="",n="";try{var r=t[1]?t[1].trim():"",i=t[2]?t[2].trim():"",a=t[3]?dn(t[3].trim()):"",o=t[4]?t[4].trim():"";n=r+i+"("+a+")"+(t[5]?" : "+dn(t[5]).trim():""),e=pn(o)}catch(e){n=t}return{displayText:n,cssStyle:e}},hn=function(t){var e="",n="",r="",i=t.indexOf("("),a=t.indexOf(")");if(i>1&&a>i&&a<=t.length){var o="",s="",c=t.substring(0,1);c.match(/\w/)?s=t.substring(0,i).trim():(c.match(/\+|-|~|#/)&&(o=c),s=t.substring(1,i).trim());var u=t.substring(i+1,a),l=t.substring(a+1,1);n=pn(l),e=o+s+"("+dn(u.trim())+")",a<"".length&&""!==(r=t.substring(a+2).trim())&&(r=" : "+dn(r))}else e=dn(t);return{displayText:e,cssStyle:n}},fn=function(t,e,n,r){var i=cn(e),a=t.append("tspan").attr("x",r.padding).text(i.displayText);""!==i.cssStyle&&a.attr("style",i.cssStyle),n||a.attr("dy",r.textHeight)},dn=function t(e){var n=e;return-1!=e.indexOf("~")?t(n=(n=n.replace("~","<")).replace("~",">")):n},pn=function(t){switch(t){case"*":return"font-style:italic;";case"$":return"text-decoration:underline;";default:return""}},gn=function(t,e,n){f.info("Rendering class "+e);var r,i=e.id,a={id:i,label:e.id,width:0,height:0},o=t.append("g").attr("id",Ke(i)).attr("class","classGroup");r=e.link?o.append("svg:a").attr("xlink:href",e.link).attr("target","_blank").append("text").attr("y",n.textHeight+n.padding).attr("x",0):o.append("text").attr("y",n.textHeight+n.padding).attr("x",0);var s=!0;e.annotations.forEach((function(t){var e=r.append("tspan").text("«"+t+"»");s||e.attr("dy",n.textHeight),s=!1}));var c=e.id;void 0!==e.type&&""!==e.type&&(c+="<"+e.type+">");var u=r.append("tspan").text(c).attr("class","title");s||u.attr("dy",n.textHeight);var l=r.node().getBBox().height,h=o.append("line").attr("x1",0).attr("y1",n.padding+l+n.dividerMargin/2).attr("y2",n.padding+l+n.dividerMargin/2),d=o.append("text").attr("x",n.padding).attr("y",l+n.dividerMargin+n.textHeight).attr("fill","white").attr("class","classText");s=!0,e.members.forEach((function(t){fn(d,t,s,n),s=!1}));var p=d.node().getBBox(),g=o.append("line").attr("x1",0).attr("y1",n.padding+l+n.dividerMargin+p.height).attr("y2",n.padding+l+n.dividerMargin+p.height),y=o.append("text").attr("x",n.padding).attr("y",l+2*n.dividerMargin+p.height+n.textHeight).attr("fill","white").attr("class","classText");s=!0,e.methods.forEach((function(t){fn(y,t,s,n),s=!1}));var v=o.node().getBBox(),m=" ";e.cssClasses.length>0&&(m+=e.cssClasses.join(" "));var b=o.insert("rect",":first-child").attr("x",0).attr("y",0).attr("width",v.width+2*n.padding).attr("height",v.height+n.padding+.5*n.dividerMargin).attr("class",m).node().getBBox().width;return r.node().childNodes.forEach((function(t){t.setAttribute("x",(b-t.getBBox().width)/2)})),e.tooltip&&r.insert("title").text(e.tooltip),h.attr("x2",b),g.attr("x2",b),a.width=b,a.height=v.height+n.padding+.5*n.dividerMargin,a},yn=function(t,e,n,r){var i=function(t){switch(t){case rn.AGGREGATION:return"aggregation";case rn.EXTENSION:return"extension";case rn.COMPOSITION:return"composition";case rn.DEPENDENCY:return"dependency"}};e.points=e.points.filter((function(t){return!Number.isNaN(t.y)}));var a,o,c=e.points,u=Object(s.line)().x((function(t){return t.x})).y((function(t){return t.y})).curve(s.curveBasis),l=t.append("path").attr("d",u(c)).attr("id","edge"+sn).attr("class","relation"),h="";r.arrowMarkerAbsolute&&(h=(h=(h=window.location.protocol+"//"+window.location.host+window.location.pathname+window.location.search).replace(/\(/g,"\\(")).replace(/\)/g,"\\)")),1==n.relation.lineType&&l.attr("class","relation dashed-line"),"none"!==n.relation.type1&&l.attr("marker-start","url("+h+"#"+i(n.relation.type1)+"Start)"),"none"!==n.relation.type2&&l.attr("marker-end","url("+h+"#"+i(n.relation.type2)+"End)");var d,p,g,y,v=e.points.length,m=W.calcLabelPosition(e.points);if(a=m.x,o=m.y,v%2!=0&&v>1){var b=W.calcCardinalityPosition("none"!==n.relation.type1,e.points,e.points[0]),x=W.calcCardinalityPosition("none"!==n.relation.type2,e.points,e.points[v-1]);f.debug("cardinality_1_point "+JSON.stringify(b)),f.debug("cardinality_2_point "+JSON.stringify(x)),d=b.x,p=b.y,g=x.x,y=x.y}if(void 0!==n.title){var _=t.append("g").attr("class","classLabel"),k=_.append("text").attr("class","label").attr("x",a).attr("y",o).attr("fill","red").attr("text-anchor","middle").text(n.title);window.label=k;var w=k.node().getBBox();_.insert("rect",":first-child").attr("class","box").attr("x",w.x-r.padding/2).attr("y",w.y-r.padding/2).attr("width",w.width+r.padding).attr("height",w.height+r.padding)}(f.info("Rendering relation "+JSON.stringify(n)),void 0!==n.relationTitle1&&"none"!==n.relationTitle1)&&t.append("g").attr("class","cardinality").append("text").attr("class","type1").attr("x",d).attr("y",p).attr("fill","black").attr("font-size","6").text(n.relationTitle1);void 0!==n.relationTitle2&&"none"!==n.relationTitle2&&t.append("g").attr("class","cardinality").append("text").attr("class","type2").attr("x",g).attr("y",y).attr("fill","black").attr("font-size","6").text(n.relationTitle2);sn++},vn=function(t,e,n){var r=t.insert("g").attr("class","node default").attr("id",e.domId||e.id),i=70,a=10;"LR"===n&&(i=10,a=70);var o=r.append("rect").style("stroke","black").style("fill","black").attr("x",-1*i/2).attr("y",-1*a/2).attr("width",i).attr("height",a).attr("class","fork-join");return Ce(e,o),e.height=e.height+e.padding/2,e.width=e.width+e.padding/2,e.intersect=function(t){return Ve.rect(e,t)},r},mn={question:function(t,e){var n=Te(t,e,void 0,!0),r=n.shapeSvg,i=n.bbox,a=i.width+e.padding+(i.height+e.padding),o=[{x:a/2,y:0},{x:a,y:-a/2},{x:a/2,y:-a},{x:0,y:-a/2}];f.info("Question main (Circle)");var s=Se(r,a,a,o);return Ce(e,s),e.intersect=function(t){return f.warn("Intersect called"),Ve.polygon(e,o,t)},r},rect:function(t,e){var n=Te(t,e,"node "+e.classes,!0),r=n.shapeSvg,i=n.bbox,a=n.halfPadding;f.trace("Classes = ",e.classes);var o=r.insert("rect",":first-child");return o.attr("class","basic label-container").attr("style",e.style).attr("rx",e.rx).attr("ry",e.ry).attr("x",-i.width/2-a).attr("y",-i.height/2-a).attr("width",i.width+e.padding).attr("height",i.height+e.padding),Ce(e,o),e.intersect=function(t){return Ve.rect(e,t)},r},rectWithTitle:function(t,e){var n;n=e.classes?"node "+e.classes:"node default";var r=t.insert("g").attr("class",n).attr("id",e.domId||e.id),i=r.insert("rect",":first-child"),a=r.insert("line"),o=r.insert("g").attr("class","label"),c=e.labelText.flat();f.info("Label text",c[0]);var u,l=o.node().appendChild(Ee(c[0],e.labelStyle,!0,!0));if(xt().flowchart.htmlLabels){var h=l.children[0],d=Object(s.select)(l);u=h.getBoundingClientRect(),d.attr("width",u.width),d.attr("height",u.height)}f.info("Text 2",c);var p=c.slice(1,c.length),g=l.getBBox(),y=o.node().appendChild(Ee(p.join("
"),e.labelStyle,!0,!0));if(xt().flowchart.htmlLabels){var v=y.children[0],m=Object(s.select)(y);u=v.getBoundingClientRect(),m.attr("width",u.width),m.attr("height",u.height)}var b=e.padding/2;return Object(s.select)(y).attr("transform","translate( "+(u.width>g.width?0:(g.width-u.width)/2)+", "+(g.height+b+5)+")"),Object(s.select)(l).attr("transform","translate( "+(u.widthe.height/2-s)){var i=s*s*(1-r*r/(o*o));0!=i&&(i=Math.sqrt(i)),i=s-i,t.y-e.y>0&&(i=-i),n.y+=i}return n},r},start:function(t,e){var n=t.insert("g").attr("class","node default").attr("id",e.domId||e.id),r=n.insert("circle",":first-child");return r.attr("class","state-start").attr("r",7).attr("width",14).attr("height",14),Ce(e,r),e.intersect=function(t){return Ve.circle(e,7,t)},n},end:function(t,e){var n=t.insert("g").attr("class","node default").attr("id",e.domId||e.id),r=n.insert("circle",":first-child"),i=n.insert("circle",":first-child");return i.attr("class","state-start").attr("r",7).attr("width",14).attr("height",14),r.attr("class","state-end").attr("r",5).attr("width",10).attr("height",10),Ce(e,i),e.intersect=function(t){return Ve.circle(e,7,t)},n},note:He,subroutine:function(t,e){var n=Te(t,e,void 0,!0),r=n.shapeSvg,i=n.bbox,a=i.width+e.padding,o=i.height+e.padding,s=Se(r,a,o,[{x:0,y:0},{x:a,y:0},{x:a,y:-o},{x:0,y:-o},{x:0,y:0},{x:-8,y:0},{x:a+8,y:0},{x:a+8,y:-o},{x:-8,y:-o},{x:-8,y:0}]);return Ce(e,s),e.intersect=function(t){return Ve.polygon(e,t)},r},fork:vn,join:vn,class_box:function(t,e){var n,r=e.padding/2;n=e.classes?"node "+e.classes:"node default";var i=t.insert("g").attr("class",n).attr("id",e.domId||e.id),a=i.insert("rect",":first-child"),o=i.insert("line"),c=i.insert("line"),u=0,l=4,h=i.insert("g").attr("class","label"),f=0,d=e.classData.annotations&&e.classData.annotations[0],p=e.classData.annotations[0]?"«"+e.classData.annotations[0]+"»":"",g=h.node().appendChild(Ee(p,e.labelStyle,!0,!0)),y=g.getBBox();if(xt().flowchart.htmlLabels){var v=g.children[0],m=Object(s.select)(g);y=v.getBoundingClientRect(),m.attr("width",y.width),m.attr("height",y.height)}e.classData.annotations[0]&&(l+=y.height+4,u+=y.width);var b=e.classData.id;void 0!==e.classData.type&&""!==e.classData.type&&(b+="<"+e.classData.type+">");var x=h.node().appendChild(Ee(b,e.labelStyle,!0,!0));Object(s.select)(x).attr("class","classTitle");var _=x.getBBox();if(xt().flowchart.htmlLabels){var k=x.children[0],w=Object(s.select)(x);_=k.getBoundingClientRect(),w.attr("width",_.width),w.attr("height",_.height)}l+=_.height+4,_.width>u&&(u=_.width);var E=[];e.classData.members.forEach((function(t){var n=cn(t).displayText,r=h.node().appendChild(Ee(n,e.labelStyle,!0,!0)),i=r.getBBox();if(xt().flowchart.htmlLabels){var a=r.children[0],o=Object(s.select)(r);i=a.getBoundingClientRect(),o.attr("width",i.width),o.attr("height",i.height)}i.width>u&&(u=i.width),l+=i.height+4,E.push(r)})),l+=8;var T=[];if(e.classData.methods.forEach((function(t){var n=cn(t).displayText,r=h.node().appendChild(Ee(n,e.labelStyle,!0,!0)),i=r.getBBox();if(xt().flowchart.htmlLabels){var a=r.children[0],o=Object(s.select)(r);i=a.getBoundingClientRect(),o.attr("width",i.width),o.attr("height",i.height)}i.width>u&&(u=i.width),l+=i.height+4,T.push(r)})),l+=8,d){var C=(u-y.width)/2;Object(s.select)(g).attr("transform","translate( "+(-1*u/2+C)+", "+-1*l/2+")"),f=y.height+4}var S=(u-_.width)/2;return Object(s.select)(x).attr("transform","translate( "+(-1*u/2+S)+", "+(-1*l/2+f)+")"),f+=_.height+4,o.attr("class","divider").attr("x1",-u/2-r).attr("x2",u/2+r).attr("y1",-l/2-r+8+f).attr("y2",-l/2-r+8+f),f+=8,E.forEach((function(t){Object(s.select)(t).attr("transform","translate( "+-u/2+", "+(-1*l/2+f+4)+")"),f+=_.height+4})),f+=8,c.attr("class","divider").attr("x1",-u/2-r).attr("x2",u/2+r).attr("y1",-l/2-r+8+f).attr("y2",-l/2-r+8+f),f+=8,T.forEach((function(t){Object(s.select)(t).attr("transform","translate( "+-u/2+", "+(-1*l/2+f)+")"),f+=_.height+4})),a.attr("class","outer title-state").attr("x",-u/2-r).attr("y",-l/2-r).attr("width",u+e.padding).attr("height",l+e.padding),Ce(e,a),e.intersect=function(t){return Ve.rect(e,t)},i}},bn={},xn=function(t){var e=bn[t.id];f.trace("Transforming node",t,"translate("+(t.x-t.width/2-5)+", "+(t.y-t.height/2-5)+")");t.clusterNode?e.attr("transform","translate("+(t.x-t.width/2-8)+", "+(t.y-t.height/2-8)+")"):e.attr("transform","translate("+t.x+", "+t.y+")")},_n={rect:function(t,e){f.trace("Creating subgraph rect for ",e.id,e);var n=t.insert("g").attr("class","cluster"+(e.class?" "+e.class:"")).attr("id",e.id),r=n.insert("rect",":first-child"),i=n.insert("g").attr("class","cluster-label"),a=i.node().appendChild(Ee(e.labelText,e.labelStyle,void 0,!0)),o=a.getBBox();if(xt().flowchart.htmlLabels){var c=a.children[0],u=Object(s.select)(a);o=c.getBoundingClientRect(),u.attr("width",o.width),u.attr("height",o.height)}var l=0*e.padding,h=l/2;f.trace("Data ",e,JSON.stringify(e)),r.attr("rx",e.rx).attr("ry",e.ry).attr("x",e.x-e.width/2-h).attr("y",e.y-e.height/2-h).attr("width",e.width+l).attr("height",e.height+l),i.attr("transform","translate("+(e.x-o.width/2)+", "+(e.y-e.height/2-e.padding/3+3)+")");var d=r.node().getBBox();return e.width=d.width,e.height=d.height,e.intersect=function(t){return We(e,t)},n},roundedWithTitle:function(t,e){var n=t.insert("g").attr("class",e.classes).attr("id",e.id),r=n.insert("rect",":first-child"),i=n.insert("g").attr("class","cluster-label"),a=n.append("rect"),o=i.node().appendChild(Ee(e.labelText,e.labelStyle,void 0,!0)),c=o.getBBox();if(xt().flowchart.htmlLabels){var u=o.children[0],l=Object(s.select)(o);c=u.getBoundingClientRect(),l.attr("width",c.width),l.attr("height",c.height)}c=o.getBBox();var h=0*e.padding,f=h/2;r.attr("class","outer").attr("x",e.x-e.width/2-f).attr("y",e.y-e.height/2-f).attr("width",e.width+h).attr("height",e.height+h),a.attr("class","inner").attr("x",e.x-e.width/2-f).attr("y",e.y-e.height/2-f+c.height-1).attr("width",e.width+h).attr("height",e.height+h-c.height-3),i.attr("transform","translate("+(e.x-c.width/2)+", "+(e.y-e.height/2-e.padding/3+(xt().flowchart.htmlLabels?5:3))+")");var d=r.node().getBBox();return e.width=d.width,e.height=d.height,e.intersect=function(t){return We(e,t)},n},noteGroup:function(t,e){var n=t.insert("g").attr("class","note-cluster").attr("id",e.id),r=n.insert("rect",":first-child"),i=0*e.padding,a=i/2;r.attr("rx",e.rx).attr("ry",e.ry).attr("x",e.x-e.width/2-a).attr("y",e.y-e.height/2-a).attr("width",e.width+i).attr("height",e.height+i).attr("fill","none");var o=r.node().getBBox();return e.width=o.width,e.height=o.height,e.intersect=function(t){return We(e,t)},n},divider:function(t,e){var n=t.insert("g").attr("class",e.classes).attr("id",e.id),r=n.insert("rect",":first-child"),i=0*e.padding,a=i/2;r.attr("class","divider").attr("x",e.x-e.width/2-a).attr("y",e.y-e.height/2).attr("width",e.width+i).attr("height",e.height+i);var o=r.node().getBBox();return e.width=o.width,e.height=o.height,e.intersect=function(t){return We(e,t)},n}},kn={},wn={},En={},Tn=function(t,e){var n=t.x,r=t.y,i=Math.abs(e.x-n),a=Math.abs(e.y-r),o=t.width/2,s=t.height/2;return i>=o||a>=s},Cn=function(t,e,n){f.warn("intersection calc o:",e," i:",n,t);var r=t.x,i=t.y,a=Math.abs(r-n.x),o=t.width/2,s=n.xMath.abs(r-e.x)*c){var y=n.y0&&f.info("Recursive edges",n.edge(n.edges()[0]));var c=o.insert("g").attr("class","clusters"),u=o.insert("g").attr("class","edgePaths"),l=o.insert("g").attr("class","edgeLabels"),h=o.insert("g").attr("class","nodes");return n.nodes().forEach((function(e){var o=n.node(e);if(void 0!==i){var s=JSON.parse(JSON.stringify(i.clusterData));f.info("Setting data for cluster XXX (",e,") ",s,i),n.setNode(i.id,s),n.parent(e)||(f.warn("Setting parent",e,i.id),n.setParent(e,i.id,s))}if(f.info("(Insert) Node XXX"+e+": "+JSON.stringify(n.node(e))),o&&o.clusterNode){f.info("Cluster identified",e,o,n.node(e));var c=t(h,o.graph,r,n.node(e));Ce(o,c),function(t,e){bn[e.id]=t}(c,o),f.warn("Recursive render complete",c,o)}else n.children(e).length>0?(f.info("Cluster - the non recursive path XXX",e,o.id,o,n),f.info(Be(o.id,n)),Ae[o.id]={id:Be(o.id,n),node:o}):(f.info("Node - the non recursive path",e,o.id,o),function(t,e,n){var r,i;e.link?(r=t.insert("svg:a").attr("xlink:href",e.link).attr("target",e.linkTarget||"_blank"),i=mn[e.shape](r,e,n)):r=i=mn[e.shape](t,e,n),e.tooltip&&i.attr("title",e.tooltip),e.class&&i.attr("class","node default "+e.class),bn[e.id]=r,e.haveCallback&&bn[e.id].attr("class",bn[e.id].attr("class")+" clickable")}(h,n.node(e),a))})),n.edges().forEach((function(t){var e=n.edge(t.v,t.w,t.name);f.info("Edge "+t.v+" -> "+t.w+": "+JSON.stringify(t)),f.info("Edge "+t.v+" -> "+t.w+": ",t," ",JSON.stringify(n.edge(t))),f.info("Fix",Ae,"ids:",t.v,t.w,"Translateing: ",Ae[t.v],Ae[t.w]),function(t,e){var n=Ee(e.label,e.labelStyle),r=t.insert("g").attr("class","edgeLabel"),i=r.insert("g").attr("class","label");i.node().appendChild(n);var a=n.getBBox();if(xt().flowchart.htmlLabels){var o=n.children[0],c=Object(s.select)(n);a=o.getBoundingClientRect(),c.attr("width",a.width),c.attr("height",a.height)}if(i.attr("transform","translate("+-a.width/2+", "+-a.height/2+")"),wn[e.id]=r,e.width=a.width,e.height=a.height,e.startLabelLeft){var u=Ee(e.startLabelLeft,e.labelStyle),l=t.insert("g").attr("class","edgeTerminals"),h=l.insert("g").attr("class","inner");h.node().appendChild(u);var f=u.getBBox();h.attr("transform","translate("+-f.width/2+", "+-f.height/2+")"),En[e.id]||(En[e.id]={}),En[e.id].startLeft=l}if(e.startLabelRight){var d=Ee(e.startLabelRight,e.labelStyle),p=t.insert("g").attr("class","edgeTerminals"),g=p.insert("g").attr("class","inner");p.node().appendChild(d),g.node().appendChild(d);var y=d.getBBox();g.attr("transform","translate("+-y.width/2+", "+-y.height/2+")"),En[e.id]||(En[e.id]={}),En[e.id].startRight=p}if(e.endLabelLeft){var v=Ee(e.endLabelLeft,e.labelStyle),m=t.insert("g").attr("class","edgeTerminals"),b=m.insert("g").attr("class","inner");b.node().appendChild(v);var x=v.getBBox();b.attr("transform","translate("+-x.width/2+", "+-x.height/2+")"),m.node().appendChild(v),En[e.id]||(En[e.id]={}),En[e.id].endLeft=m}if(e.endLabelRight){var _=Ee(e.endLabelRight,e.labelStyle),k=t.insert("g").attr("class","edgeTerminals"),w=k.insert("g").attr("class","inner");w.node().appendChild(_);var E=_.getBBox();w.attr("transform","translate("+-E.width/2+", "+-E.height/2+")"),k.node().appendChild(_),En[e.id]||(En[e.id]={}),En[e.id].endRight=k}}(l,e)})),n.edges().forEach((function(t){f.info("Edge "+t.v+" -> "+t.w+": "+JSON.stringify(t))})),f.info("#############################################"),f.info("### Layout ###"),f.info("#############################################"),f.info(n),_e.a.layout(n),f.info("Graph after layout:",H.a.json.write(n)),Ie(n).forEach((function(t){var e=n.node(t);f.info("Position "+t+": "+JSON.stringify(n.node(t))),f.info("Position "+t+": ("+e.x,","+e.y,") width: ",e.width," height: ",e.height),e&&e.clusterNode?xn(e):n.children(t).length>0?(!function(t,e){f.trace("Inserting cluster");var n=e.shape||"rect";kn[e.id]=_n[n](t,e)}(c,e),Ae[e.id].node=e):xn(e)})),n.edges().forEach((function(t){var e=n.edge(t);f.info("Edge "+t.v+" -> "+t.w+": "+JSON.stringify(e),e);var i=function(t,e,n,r,i,a){var o=n.points,c=!1,u=a.node(e.v),l=a.node(e.w);if(l.intersect&&u.intersect&&((o=o.slice(1,n.points.length-1)).unshift(u.intersect(o[0])),f.info("Last point",o[o.length-1],l,l.intersect(o[o.length-1])),o.push(l.intersect(o[o.length-1]))),n.toCluster){var h;f.trace("edge",n),f.trace("to cluster",r[n.toCluster]),o=[];var d=!1;n.points.forEach((function(t){var e=r[n.toCluster].node;if(Tn(e,t)||d)d||o.push(t);else{f.trace("inside",n.toCluster,t,h);var i=Cn(e,h,t),a=!1;o.forEach((function(t){a=a||t.x===i.x&&t.y===i.y})),o.find((function(t){return t.x===i.x&&t.y===i.y}))?f.warn("no intersect",i,o):o.push(i),d=!0}h=t})),c=!0}if(n.fromCluster){f.trace("edge",n),f.warn("from cluster",r[n.fromCluster]);for(var p,g=[],y=!1,v=o.length-1;v>=0;v--){var m=o[v],b=r[n.fromCluster].node;if(Tn(b,m)||y)f.trace("Outside point",m),y||g.unshift(m);else{f.warn("inside",n.fromCluster,m,b);var x=Cn(b,p,m);g.unshift(x),y=!0}p=m}o=g,c=!0}var _,k=o.filter((function(t){return!Number.isNaN(t.y)})),w=Object(s.line)().x((function(t){return t.x})).y((function(t){return t.y})).curve(s.curveBasis);switch(n.thickness){case"normal":_="edge-thickness-normal";break;case"thick":_="edge-thickness-thick";break;default:_=""}switch(n.pattern){case"solid":_+=" edge-pattern-solid";break;case"dotted":_+=" edge-pattern-dotted";break;case"dashed":_+=" edge-pattern-dashed"}var E=t.append("path").attr("d",w(k)).attr("id",n.id).attr("class"," "+_+(n.classes?" "+n.classes:"")).attr("style",n.style),T="";switch(xt().state.arrowMarkerAbsolute&&(T=(T=(T=window.location.protocol+"//"+window.location.host+window.location.pathname+window.location.search).replace(/\(/g,"\\(")).replace(/\)/g,"\\)")),f.info("arrowTypeStart",n.arrowTypeStart),f.info("arrowTypeEnd",n.arrowTypeEnd),n.arrowTypeStart){case"arrow_cross":E.attr("marker-start","url("+T+"#"+i+"-crossStart)");break;case"arrow_point":E.attr("marker-start","url("+T+"#"+i+"-pointStart)");break;case"arrow_barb":E.attr("marker-start","url("+T+"#"+i+"-barbStart)");break;case"arrow_circle":E.attr("marker-start","url("+T+"#"+i+"-circleStart)");break;case"aggregation":E.attr("marker-start","url("+T+"#"+i+"-aggregationStart)");break;case"extension":E.attr("marker-start","url("+T+"#"+i+"-extensionStart)");break;case"composition":E.attr("marker-start","url("+T+"#"+i+"-compositionStart)");break;case"dependency":E.attr("marker-start","url("+T+"#"+i+"-dependencyStart)")}switch(n.arrowTypeEnd){case"arrow_cross":E.attr("marker-end","url("+T+"#"+i+"-crossEnd)");break;case"arrow_point":E.attr("marker-end","url("+T+"#"+i+"-pointEnd)");break;case"arrow_barb":E.attr("marker-end","url("+T+"#"+i+"-barbEnd)");break;case"arrow_circle":E.attr("marker-end","url("+T+"#"+i+"-circleEnd)");break;case"aggregation":E.attr("marker-end","url("+T+"#"+i+"-aggregationEnd)");break;case"extension":E.attr("marker-end","url("+T+"#"+i+"-extensionEnd)");break;case"composition":E.attr("marker-end","url("+T+"#"+i+"-compositionEnd)");break;case"dependency":E.attr("marker-end","url("+T+"#"+i+"-dependencyEnd)")}var C={};return c&&(C.updatedPath=o),C.originalPath=n.points,C}(u,t,e,Ae,r,n);!function(t,e){f.info("Moving label",t.id,t.label,wn[t.id]);var n=e.updatedPath?e.updatedPath:e.originalPath;if(t.label){var r=wn[t.id],i=t.x,a=t.y;if(n){var o=W.calcLabelPosition(n);f.info("Moving label from (",i,",",a,") to (",o.x,",",o.y,")")}r.attr("transform","translate("+i+", "+a+")")}if(t.startLabelLeft){var s=En[t.id].startLeft,c=t.x,u=t.y;if(n){var l=W.calcTerminalLabelPosition(0,"start_left",n);c=l.x,u=l.y}s.attr("transform","translate("+c+", "+u+")")}if(t.startLabelRight){var h=En[t.id].startRight,d=t.x,p=t.y;if(n){var g=W.calcTerminalLabelPosition(0,"start_right",n);d=g.x,p=g.y}h.attr("transform","translate("+d+", "+p+")")}if(t.endLabelLeft){var y=En[t.id].endLeft,v=t.x,m=t.y;if(n){var b=W.calcTerminalLabelPosition(0,"end_left",n);v=b.x,m=b.y}y.attr("transform","translate("+v+", "+m+")")}if(t.endLabelRight){var x=En[t.id].endRight,_=t.x,k=t.y;if(n){var w=W.calcTerminalLabelPosition(0,"end_right",n);_=w.x,k=w.y}x.attr("transform","translate("+_+", "+k+")")}}(e,i)})),o},An=function(t,e,n,r,i){we(t,n,r,i),bn={},wn={},En={},kn={},Me={},Oe={},Ae={},f.warn("Graph at first:",H.a.json.write(e)),Fe(e),f.warn("Graph after:",H.a.json.write(e)),Sn(t,e,r)},Mn={},On=function(t,e,n){var r=Object(s.select)('[id="'.concat(n,'"]'));Object.keys(t).forEach((function(n){var i=t[n],a="default";i.classes.length>0&&(a=i.classes.join(" "));var o,s=N(i.styles),c=void 0!==i.text?i.text:i.id;if(xt().flowchart.htmlLabels){var u={label:c.replace(/fa[lrsb]?:fa-[\w-]+/g,(function(t){return"")}))};(o=te()(r,u).node()).parentNode.removeChild(o)}else{var l=document.createElementNS("http://www.w3.org/2000/svg","text");l.setAttribute("style",s.labelStyle.replace("color:","fill:"));for(var h=c.split(x.lineBreakRegex),d=0;d=0;h--)i=l[h],f.info("Subgraph - ",i),qt.addVertex(i.id,i.title,"group",void 0,i.classes);var d=qt.getVertices(),p=qt.getEdges();f.info(p);var g=0;for(g=l.length-1;g>=0;g--){i=l[g],Object(s.selectAll)("cluster").append("text");for(var y=0;y0)switch(e.valign){case"top":case"start":s=function(){return Math.round(e.y+e.textMargin)};break;case"middle":case"center":s=function(){return Math.round(e.y+(n+r+e.textMargin)/2)};break;case"bottom":case"end":s=function(){return Math.round(e.y+(n+r+2*e.textMargin)-e.textMargin)}}if(void 0!==e.anchor&&void 0!==e.textMargin&&void 0!==e.width)switch(e.anchor){case"left":case"start":e.x=Math.round(e.x+e.textMargin),e.anchor="start",e.dominantBaseline="text-after-edge",e.alignmentBaseline="middle";break;case"middle":case"center":e.x=Math.round(e.x+e.width/2),e.anchor="middle",e.dominantBaseline="middle",e.alignmentBaseline="middle";break;case"right":case"end":e.x=Math.round(e.x+e.width-e.textMargin),e.anchor="end",e.dominantBaseline="text-before-edge",e.alignmentBaseline="middle"}for(var c=0;c0&&(r+=(l._groups||l)[0][0].getBBox().height,n=r),a.push(l)}return a},Pn=function(t,e){var n,r,i,a,o,s=t.append("polygon");return s.attr("points",(n=e.x,r=e.y,i=e.width,a=e.height,n+","+r+" "+(n+i)+","+r+" "+(n+i)+","+(r+a-(o=7))+" "+(n+i-1.2*o)+","+(r+a)+" "+n+","+(r+a))),s.attr("class","labelBox"),e.y=e.y+e.height/2,Fn(t,e),s},In=-1,jn=function(){return{x:0,y:0,fill:void 0,anchor:void 0,style:"#666",width:void 0,height:void 0,textMargin:0,rx:0,ry:0,tspan:!0,valign:void 0}},Rn=function(){return{x:0,y:0,fill:"#EDF2AE",stroke:"#666",width:100,anchor:"start",height:100,rx:0,ry:0}},Yn=function(){function t(t,e,n,i,a,o,s){r(e.append("text").attr("x",n+a/2).attr("y",i+o/2+5).style("text-anchor","middle").text(t),s)}function e(t,e,n,i,a,o,s,c){for(var u=c.actorFontSize,l=c.actorFontFamily,h=c.actorFontWeight,f=t.split(x.lineBreakRegex),d=0;d2&&void 0!==arguments[2]?arguments[2]:{text:void 0,wrap:void 0},r=arguments.length>3?arguments[3]:void 0;if(r===nr.ACTIVE_END){var i=Kn(t.actor);if(i<1){var a=new Error("Trying to inactivate an inactive participant ("+t.actor+")");throw a.hash={text:"->>-",token:"->>-",line:"1",loc:{first_line:1,last_line:1,first_column:1,last_column:1},expected:["'ACTIVE_PARTICIPANT'"]},a}}return Hn.push({from:t,to:e,message:n.text,wrap:void 0===n.wrap&&er()||!!n.wrap,type:r}),!0},er=function(){return Jn},nr={SOLID:0,DOTTED:1,NOTE:2,SOLID_CROSS:3,DOTTED_CROSS:4,SOLID_OPEN:5,DOTTED_OPEN:6,LOOP_START:10,LOOP_END:11,ALT_START:12,ALT_ELSE:13,ALT_END:14,OPT_START:15,OPT_END:16,ACTIVE_START:17,ACTIVE_END:18,PAR_START:19,PAR_AND:20,PAR_END:21,RECT_START:22,RECT_END:23},rr=function(t,e,n){var r={actor:t,placement:e,message:n.text,wrap:void 0===n.wrap&&er()||!!n.wrap},i=[].concat(t,t);Gn.push(r),Hn.push({from:i[0],to:i[1],message:n.text,wrap:void 0===n.wrap&&er()||!!n.wrap,type:nr.NOTE,placement:e})},ir=function(t){qn=t.text,Xn=void 0===t.wrap&&er()||!!t.wrap},ar={addActor:Qn,addMessage:function(t,e,n,r){Hn.push({from:t,to:e,message:n.text,wrap:void 0===n.wrap&&er()||!!n.wrap,answer:r})},addSignal:tr,autoWrap:er,setWrap:function(t){Jn=t},enableSequenceNumbers:function(){Zn=!0},showSequenceNumbers:function(){return Zn},getMessages:function(){return Hn},getActors:function(){return Vn},getActor:function(t){return Vn[t]},getActorKeys:function(){return Object.keys(Vn)},getTitle:function(){return qn},parseDirective:function(t,e,n){$o.parseDirective(this,t,e,n)},getConfig:function(){return xt().sequence},getTitleWrapped:function(){return Xn},clear:function(){Vn={},Hn=[]},parseMessage:function(t){var e=t.trim(),n={text:e.replace(/^[:]?(?:no)?wrap:/,"").trim(),wrap:null===e.match(/^[:]?(?:no)?wrap:/)?x.hasBreaks(e)||void 0:null!==e.match(/^[:]?wrap:/)||null===e.match(/^[:]?nowrap:/)&&void 0};return f.debug("parseMessage:",n),n},LINETYPE:nr,ARROWTYPE:{FILLED:0,OPEN:1},PLACEMENT:{LEFTOF:0,RIGHTOF:1,OVER:2},addNote:rr,setTitle:ir,apply:function t(e){if(e instanceof Array)e.forEach((function(e){t(e)}));else switch(e.type){case"addActor":Qn(e.actor,e.actor,e.description);break;case"activeStart":case"activeEnd":tr(e.actor,void 0,void 0,e.signalType);break;case"addNote":rr(e.actor,e.placement,e.text);break;case"addMessage":tr(e.from,e.to,e.msg,e.signalType);break;case"loopStart":tr(void 0,void 0,e.loopText,e.signalType);break;case"loopEnd":tr(void 0,void 0,void 0,e.signalType);break;case"rectStart":tr(void 0,void 0,e.color,e.signalType);break;case"rectEnd":tr(void 0,void 0,void 0,e.signalType);break;case"optStart":tr(void 0,void 0,e.optText,e.signalType);break;case"optEnd":tr(void 0,void 0,void 0,e.signalType);break;case"altStart":case"else":tr(void 0,void 0,e.altText,e.signalType);break;case"altEnd":tr(void 0,void 0,void 0,e.signalType);break;case"setTitle":ir(e.text);break;case"parStart":case"and":tr(void 0,void 0,e.parText,e.signalType);break;case"parEnd":tr(void 0,void 0,void 0,e.signalType)}}};Un.parser.yy=ar;var or={},sr={data:{startx:void 0,stopx:void 0,starty:void 0,stopy:void 0},verticalPos:0,sequenceItems:[],activations:[],models:{getHeight:function(){return Math.max.apply(null,0===this.actors.length?[0]:this.actors.map((function(t){return t.height||0})))+(0===this.loops.length?0:this.loops.map((function(t){return t.height||0})).reduce((function(t,e){return t+e})))+(0===this.messages.length?0:this.messages.map((function(t){return t.height||0})).reduce((function(t,e){return t+e})))+(0===this.notes.length?0:this.notes.map((function(t){return t.height||0})).reduce((function(t,e){return t+e})))},clear:function(){this.actors=[],this.loops=[],this.messages=[],this.notes=[]},addActor:function(t){this.actors.push(t)},addLoop:function(t){this.loops.push(t)},addMessage:function(t){this.messages.push(t)},addNote:function(t){this.notes.push(t)},lastActor:function(){return this.actors[this.actors.length-1]},lastLoop:function(){return this.loops[this.loops.length-1]},lastMessage:function(){return this.messages[this.messages.length-1]},lastNote:function(){return this.notes[this.notes.length-1]},actors:[],loops:[],messages:[],notes:[]},init:function(){this.sequenceItems=[],this.activations=[],this.models.clear(),this.data={startx:void 0,stopx:void 0,starty:void 0,stopy:void 0},this.verticalPos=0,fr(Un.parser.yy.getConfig())},updateVal:function(t,e,n,r){void 0===t[e]?t[e]=n:t[e]=r(n,t[e])},updateBounds:function(t,e,n,r){var i=this,a=0;function o(o){return function(s){a++;var c=i.sequenceItems.length-a+1;i.updateVal(s,"starty",e-c*or.boxMargin,Math.min),i.updateVal(s,"stopy",r+c*or.boxMargin,Math.max),i.updateVal(sr.data,"startx",t-c*or.boxMargin,Math.min),i.updateVal(sr.data,"stopx",n+c*or.boxMargin,Math.max),"activation"!==o&&(i.updateVal(s,"startx",t-c*or.boxMargin,Math.min),i.updateVal(s,"stopx",n+c*or.boxMargin,Math.max),i.updateVal(sr.data,"starty",e-c*or.boxMargin,Math.min),i.updateVal(sr.data,"stopy",r+c*or.boxMargin,Math.max))}}this.sequenceItems.forEach(o()),this.activations.forEach(o("activation"))},insert:function(t,e,n,r){var i=Math.min(t,n),a=Math.max(t,n),o=Math.min(e,r),s=Math.max(e,r);this.updateVal(sr.data,"startx",i,Math.min),this.updateVal(sr.data,"starty",o,Math.min),this.updateVal(sr.data,"stopx",a,Math.max),this.updateVal(sr.data,"stopy",s,Math.max),this.updateBounds(i,o,a,s)},newActivation:function(t,e,n){var r=n[t.from.actor],i=dr(t.from.actor).length||0,a=r.x+r.width/2+(i-1)*or.activationWidth/2;this.activations.push({startx:a,starty:this.verticalPos+2,stopx:a+or.activationWidth,stopy:void 0,actor:t.from.actor,anchored:zn.anchorElement(e)})},endActivation:function(t){var e=this.activations.map((function(t){return t.actor})).lastIndexOf(t.from.actor);return this.activations.splice(e,1)[0]},createLoop:function(){var t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{message:void 0,wrap:!1,width:void 0},e=arguments.length>1?arguments[1]:void 0;return{startx:void 0,starty:this.verticalPos,stopx:void 0,stopy:void 0,title:t.message,wrap:t.wrap,width:t.width,height:0,fill:e}},newLoop:function(){var t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{message:void 0,wrap:!1,width:void 0},e=arguments.length>1?arguments[1]:void 0;this.sequenceItems.push(this.createLoop(t,e))},endLoop:function(){return this.sequenceItems.pop()},addSectionToLoop:function(t){var e=this.sequenceItems.pop();e.sections=e.sections||[],e.sectionTitles=e.sectionTitles||[],e.sections.push({y:sr.getVerticalPos(),height:0}),e.sectionTitles.push(t),this.sequenceItems.push(e)},bumpVerticalPos:function(t){this.verticalPos=this.verticalPos+t,this.data.stopy=this.verticalPos},getVerticalPos:function(){return this.verticalPos},getBounds:function(){return{bounds:this.data,models:this.models}}},cr=function(t){return{fontFamily:t.messageFontFamily,fontSize:t.messageFontSize,fontWeight:t.messageFontWeight}},ur=function(t){return{fontFamily:t.noteFontFamily,fontSize:t.noteFontSize,fontWeight:t.noteFontWeight}},lr=function(t){return{fontFamily:t.actorFontFamily,fontSize:t.actorFontSize,fontWeight:t.actorFontWeight}},hr=function(t,e,n,r){for(var i=0,a=0,o=0;o0&&o.forEach((function(r){if(n=r,i.startx===i.stopx){var a=e[t.from],o=e[t.to];n.from=Math.min(a.x-i.width/2,a.x-a.width/2,n.from),n.to=Math.max(o.x+i.width/2,o.x+a.width/2,n.to),n.width=Math.max(n.width,Math.abs(n.to-n.from))-or.labelBoxWidth}else n.from=Math.min(i.startx,n.from),n.to=Math.max(i.stopx,n.to),n.width=Math.max(n.width,i.width)-or.labelBoxWidth})))})),sr.activations=[],f.debug("Loop type widths:",a),a},br={bounds:sr,drawActors:hr,setConf:fr,draw:function(t,e){or=xt().sequence,Un.parser.yy.clear(),Un.parser.yy.setWrap(or.wrap),Un.parser.parse(t+"\n"),sr.init(),f.debug("C:".concat(JSON.stringify(or,null,2)));var n=Object(s.select)('[id="'.concat(e,'"]')),r=Un.parser.yy.getActors(),i=Un.parser.yy.getActorKeys(),a=Un.parser.yy.getMessages(),o=Un.parser.yy.getTitle(),c=yr(r,a);or.height=vr(r,c),hr(n,r,i,0);var u=mr(a,r,c);zn.insertArrowHead(n),zn.insertArrowCrossHead(n),zn.insertSequenceNumber(n);var l=1;a.forEach((function(t){var e,i,a;switch(t.type){case Un.parser.yy.LINETYPE.NOTE:i=t.noteModel,function(t,e){sr.bumpVerticalPos(or.boxMargin),e.height=or.boxMargin,e.starty=sr.getVerticalPos();var n=zn.getNoteRect();n.x=e.startx,n.y=e.starty,n.width=e.width||or.width,n.class="note";var r=t.append("g"),i=zn.drawRect(r,n),a=zn.getTextObj();a.x=e.startx,a.y=e.starty,a.width=n.width,a.dy="1em",a.text=e.message,a.class="noteText",a.fontFamily=or.noteFontFamily,a.fontSize=or.noteFontSize,a.fontWeight=or.noteFontWeight,a.anchor=or.noteAlign,a.textMargin=or.noteMargin,a.valign=or.noteAlign,a.wrap=!0;var o=Fn(r,a),s=Math.round(o.map((function(t){return(t._groups||t)[0][0].getBBox().height})).reduce((function(t,e){return t+e})));i.attr("height",s+2*or.noteMargin),e.height+=s+2*or.noteMargin,sr.bumpVerticalPos(s+2*or.noteMargin),e.stopy=e.starty+s+2*or.noteMargin,e.stopx=e.startx+n.width,sr.insert(e.startx,e.starty,e.stopx,e.stopy),sr.models.addNote(e)}(n,i);break;case Un.parser.yy.LINETYPE.ACTIVE_START:sr.newActivation(t,n,r);break;case Un.parser.yy.LINETYPE.ACTIVE_END:!function(t,e){var r=sr.endActivation(t);r.starty+18>e&&(r.starty=e-6,e+=12),zn.drawActivation(n,r,e,or,dr(t.from.actor).length),sr.insert(r.startx,e-10,r.stopx,e)}(t,sr.getVerticalPos());break;case Un.parser.yy.LINETYPE.LOOP_START:gr(u,t,or.boxMargin,or.boxMargin+or.boxTextMargin,(function(t){return sr.newLoop(t)}));break;case Un.parser.yy.LINETYPE.LOOP_END:e=sr.endLoop(),zn.drawLoop(n,e,"loop",or),sr.bumpVerticalPos(e.stopy-sr.getVerticalPos()),sr.models.addLoop(e);break;case Un.parser.yy.LINETYPE.RECT_START:gr(u,t,or.boxMargin,or.boxMargin,(function(t){return sr.newLoop(void 0,t.message)}));break;case Un.parser.yy.LINETYPE.RECT_END:e=sr.endLoop(),zn.drawBackgroundRect(n,e),sr.models.addLoop(e),sr.bumpVerticalPos(e.stopy-sr.getVerticalPos());break;case Un.parser.yy.LINETYPE.OPT_START:gr(u,t,or.boxMargin,or.boxMargin+or.boxTextMargin,(function(t){return sr.newLoop(t)}));break;case Un.parser.yy.LINETYPE.OPT_END:e=sr.endLoop(),zn.drawLoop(n,e,"opt",or),sr.bumpVerticalPos(e.stopy-sr.getVerticalPos()),sr.models.addLoop(e);break;case Un.parser.yy.LINETYPE.ALT_START:gr(u,t,or.boxMargin,or.boxMargin+or.boxTextMargin,(function(t){return sr.newLoop(t)}));break;case Un.parser.yy.LINETYPE.ALT_ELSE:gr(u,t,or.boxMargin+or.boxTextMargin,or.boxMargin,(function(t){return sr.addSectionToLoop(t)}));break;case Un.parser.yy.LINETYPE.ALT_END:e=sr.endLoop(),zn.drawLoop(n,e,"alt",or),sr.bumpVerticalPos(e.stopy-sr.getVerticalPos()),sr.models.addLoop(e);break;case Un.parser.yy.LINETYPE.PAR_START:gr(u,t,or.boxMargin,or.boxMargin+or.boxTextMargin,(function(t){return sr.newLoop(t)}));break;case Un.parser.yy.LINETYPE.PAR_AND:gr(u,t,or.boxMargin+or.boxTextMargin,or.boxMargin,(function(t){return sr.addSectionToLoop(t)}));break;case Un.parser.yy.LINETYPE.PAR_END:e=sr.endLoop(),zn.drawLoop(n,e,"par",or),sr.bumpVerticalPos(e.stopy-sr.getVerticalPos()),sr.models.addLoop(e);break;default:try{(a=t.msgModel).starty=sr.getVerticalPos(),a.sequenceIndex=l,function(t,e){sr.bumpVerticalPos(10);var n=e.startx,r=e.stopx,i=e.starty,a=e.message,o=e.type,s=e.sequenceIndex,c=e.wrap,u=x.splitBreaks(a).length,l=W.calculateTextDimensions(a,cr(or)),h=l.height/u;e.height+=h,sr.bumpVerticalPos(h);var f=zn.getTextObj();f.x=n,f.y=i+10,f.width=r-n,f.class="messageText",f.dy="1em",f.text=a,f.fontFamily=or.messageFontFamily,f.fontSize=or.messageFontSize,f.fontWeight=or.messageFontWeight,f.anchor=or.messageAlign,f.valign=or.messageAlign,f.textMargin=or.wrapPadding,f.tspan=!1,f.wrap=c,Fn(t,f);var d,p,g=l.height-10,y=l.width;if(n===r){p=sr.getVerticalPos()+g,or.rightAngles?d=t.append("path").attr("d","M ".concat(n,",").concat(p," H ").concat(n+Math.max(or.width/2,y/2)," V ").concat(p+25," H ").concat(n)):(g+=or.boxMargin,p=sr.getVerticalPos()+g,d=t.append("path").attr("d","M "+n+","+p+" C "+(n+60)+","+(p-10)+" "+(n+60)+","+(p+30)+" "+n+","+(p+20))),g+=30;var v=Math.max(y/2,or.width/2);sr.insert(n-v,sr.getVerticalPos()-10+g,r+v,sr.getVerticalPos()+30+g)}else g+=or.boxMargin,p=sr.getVerticalPos()+g,(d=t.append("line")).attr("x1",n),d.attr("y1",p),d.attr("x2",r),d.attr("y2",p),sr.insert(n,p-10,r,p);o===Un.parser.yy.LINETYPE.DOTTED||o===Un.parser.yy.LINETYPE.DOTTED_CROSS||o===Un.parser.yy.LINETYPE.DOTTED_OPEN?(d.style("stroke-dasharray","3, 3"),d.attr("class","messageLine1")):d.attr("class","messageLine0");var m="";or.arrowMarkerAbsolute&&(m=(m=(m=window.location.protocol+"//"+window.location.host+window.location.pathname+window.location.search).replace(/\(/g,"\\(")).replace(/\)/g,"\\)")),d.attr("stroke-width",2),d.attr("stroke","none"),d.style("fill","none"),o!==Un.parser.yy.LINETYPE.SOLID&&o!==Un.parser.yy.LINETYPE.DOTTED||d.attr("marker-end","url("+m+"#arrowhead)"),o!==Un.parser.yy.LINETYPE.SOLID_CROSS&&o!==Un.parser.yy.LINETYPE.DOTTED_CROSS||d.attr("marker-end","url("+m+"#crosshead)"),(ar.showSequenceNumbers()||or.showSequenceNumbers)&&(d.attr("marker-start","url("+m+"#sequencenumber)"),t.append("text").attr("x",n).attr("y",p+4).attr("font-family","sans-serif").attr("font-size","12px").attr("text-anchor","middle").attr("textLength","16px").attr("class","sequenceNumber").text(s)),sr.bumpVerticalPos(g),e.height+=g,e.stopy=e.starty+e.height,sr.insert(e.fromBounds,e.starty,e.toBounds,e.stopy)}(n,a),sr.models.addMessage(a)}catch(t){f.error("error while drawing message",t)}}[Un.parser.yy.LINETYPE.SOLID_OPEN,Un.parser.yy.LINETYPE.DOTTED_OPEN,Un.parser.yy.LINETYPE.SOLID,Un.parser.yy.LINETYPE.DOTTED,Un.parser.yy.LINETYPE.SOLID_CROSS,Un.parser.yy.LINETYPE.DOTTED_CROSS].includes(t.type)&&l++})),or.mirrorActors&&(sr.bumpVerticalPos(2*or.boxMargin),hr(n,r,i,sr.getVerticalPos()));var h=sr.getBounds().bounds;f.debug("For line height fix Querying: #"+e+" .actor-line"),Object(s.selectAll)("#"+e+" .actor-line").attr("y2",h.stopy);var d=h.stopy-h.starty+2*or.diagramMarginY;or.mirrorActors&&(d=d-or.boxMargin+or.bottomMarginAdj);var p=h.stopx-h.startx+2*or.diagramMarginX;o&&n.append("text").text(o).attr("x",(h.stopx-h.startx)/2-2*or.diagramMarginX).attr("y",-25),$(n,d,p,or.useMaxWidth);var g=o?40:0;n.attr("viewBox",h.startx-or.diagramMarginX+" -"+(or.diagramMarginY+g)+" "+p+" "+(d+g)),f.debug("models:",sr.models)}},xr=n(27),_r=n.n(xr);function kr(t){return function(t){if(Array.isArray(t)){for(var e=0,n=new Array(t.length);e=6&&n.indexOf("weekends")>=0||(n.indexOf(t.format("dddd").toLowerCase())>=0||n.indexOf(t.format(e.trim()))>=0)},jr=function(t,e,n){if(n.length&&!t.manualEndTime){var r=l()(t.startTime,e,!0);r.add(1,"d");var i=l()(t.endTime,e,!0),a=Rr(r,i,e,n);t.endTime=i.toDate(),t.renderEndTime=a}},Rr=function(t,e,n,r){for(var i=!1,a=null;t<=e;)i||(a=e.toDate()),(i=Ir(t,n,r))&&e.add(1,"d"),t.add(1,"d");return a},Yr=function(t,e,n){n=n.trim();var r=/^after\s+([\d\w- ]+)/.exec(n.trim());if(null!==r){var i=null;if(r[1].split(" ").forEach((function(t){var e=Gr(t);void 0!==e&&(i?e.endTime>i.endTime&&(i=e):i=e)})),i)return i.endTime;var a=new Date;return a.setHours(0,0,0,0),a}var o=l()(n,e.trim(),!0);return o.isValid()?o.toDate():(f.debug("Invalid date:"+n),f.debug("With date format:"+e.trim()),new Date)},zr=function(t,e){if(null!==t)switch(t[2]){case"s":e.add(t[1],"seconds");break;case"m":e.add(t[1],"minutes");break;case"h":e.add(t[1],"hours");break;case"d":e.add(t[1],"days");break;case"w":e.add(t[1],"weeks")}return e.toDate()},Ur=function(t,e,n,r){r=r||!1,n=n.trim();var i=l()(n,e.trim(),!0);return i.isValid()?(r&&i.add(1,"d"),i.toDate()):zr(/^([\d]+)([wdhms])/.exec(n.trim()),l()(t))},$r=0,Wr=function(t){return void 0===t?"task"+($r+=1):t},Vr=[],Hr={},Gr=function(t){var e=Hr[t];return Vr[e]},qr=function(){for(var t=function(t){var e=Vr[t],n="";switch(Vr[t].raw.startTime.type){case"prevTaskEnd":var r=Gr(e.prevTaskId);e.startTime=r.endTime;break;case"getStartDate":(n=Yr(0,Tr,Vr[t].raw.startTime.startData))&&(Vr[t].startTime=n)}return Vr[t].startTime&&(Vr[t].endTime=Ur(Vr[t].startTime,Tr,Vr[t].raw.endTime.data,Fr),Vr[t].endTime&&(Vr[t].processed=!0,Vr[t].manualEndTime=l()(Vr[t].raw.endTime.data,"YYYY-MM-DD",!0).isValid(),jr(Vr[t],Tr,Ar))),Vr[t].processed},e=!0,n=0;nr?i=1:n0&&(e=t.classes.join(" "));for(var n=0,r=0;rn-e?n+a+1.5*ti.leftPadding>u?e+r-5:n+r+5:(n-e)/2+e+r})).attr("y",(function(t,r){return t.order*e+ti.barHeight/2+(ti.fontSize/2-2)+n})).attr("text-height",i).attr("class",(function(t){var e=o(t.startTime),n=o(t.endTime);t.milestone&&(n=e+i);var r=this.getBBox().width,a="";t.classes.length>0&&(a=t.classes.join(" "));for(var s=0,l=0;ln-e?n+r+1.5*ti.leftPadding>u?a+" taskTextOutsideLeft taskTextOutside"+s+" "+h:a+" taskTextOutsideRight taskTextOutside"+s+" "+h+" width-"+r:a+" taskText taskText"+s+" "+h+" width-"+r}))}(t,i,u,f,r,0,e),function(t,e){for(var n=[],r=0,i=0;i0&&a.setAttribute("dy","1em"),a.textContent=e[i],r.appendChild(a)}return r})).attr("x",10).attr("y",(function(i,a){if(!(a>0))return i[1]*t/2+e;for(var o=0;o "+t.w+": "+JSON.stringify(i.edge(t))),yn(r,i.edge(t),i.edge(t).relation,oi))}));var h=r.node().getBBox(),d=h.width+40,p=h.height+40;$(r,p,d,oi.useMaxWidth);var g="".concat(h.x-20," ").concat(h.y-20," ").concat(d," ").concat(p);f.debug("viewBox ".concat(g)),r.attr("viewBox",g)};ri.parser.yy=on;var li={dividerMargin:10,padding:5,textHeight:10},hi=function(t){Object.keys(t).forEach((function(e){li[e]=t[e]}))},fi=function(t,e){f.info("Drawing class"),on.clear(),ri.parser.parse(t);var n=xt().flowchart;f.info("config:",n);var r=n.nodeSpacing||50,i=n.rankSpacing||50,a=new H.a.Graph({multigraph:!0,compound:!0}).setGraph({rankdir:"TD",nodesep:r,ranksep:i,marginx:8,marginy:8}).setDefaultEdgeLabel((function(){return{}})),o=on.getClasses(),c=on.getRelations();f.info(c),function(t,e){var n=Object.keys(t);f.info("keys:",n),f.info(t),n.forEach((function(n){var r=t[n],i="";r.cssClasses.length>0&&(i=i+" "+r.cssClasses.join(" "));var a={labelStyle:""},o=void 0!==r.text?r.text:r.id,s="";switch(r.type){case"class":s="class_box";break;default:s="class_box"}e.setNode(r.id,{labelStyle:a.labelStyle,shape:s,labelText:o,classData:r,rx:0,ry:0,class:i,style:a.style,id:r.id,domId:r.domId,haveCallback:r.haveCallback,link:r.link,width:"group"===r.type?500:void 0,type:r.type,padding:xt().flowchart.padding}),f.info("setNode",{labelStyle:a.labelStyle,shape:s,labelText:o,rx:0,ry:0,class:i,style:a.style,id:r.id,width:"group"===r.type?500:void 0,type:r.type,padding:xt().flowchart.padding})}))}(o,a),function(t,e){var n=0;t.forEach((function(r){n++;var i={classes:"relation"};i.pattern=1==r.relation.lineType?"dashed":"solid",i.id="id"+n,"arrow_open"===r.type?i.arrowhead="none":i.arrowhead="normal",f.info(i,r),i.startLabelRight="none"===r.relationTitle1?"":r.relationTitle1,i.endLabelLeft="none"===r.relationTitle2?"":r.relationTitle2,i.arrowTypeStart=di(r.relation.type1),i.arrowTypeEnd=di(r.relation.type2);var a="",o="";if(void 0!==r.style){var c=N(r.style);a=c.style,o=c.labelStyle}else a="fill:none";i.style=a,i.labelStyle=o,void 0!==r.interpolate?i.curve=O(r.interpolate,s.curveLinear):void 0!==t.defaultInterpolate?i.curve=O(t.defaultInterpolate,s.curveLinear):i.curve=O(li.curve,s.curveLinear),r.text=r.title,void 0===r.text?void 0!==r.style&&(i.arrowheadStyle="fill: #333"):(i.arrowheadStyle="fill: #333",i.labelpos="c",xt().flowchart.htmlLabels,i.labelType="text",i.label=r.text.replace(x.lineBreakRegex,"\n"),void 0===r.style&&(i.style=i.style||"stroke: #333; stroke-width: 1.5px;fill:none"),i.labelStyle=i.labelStyle.replace("color:","fill:")),e.setEdge(r.id1,r.id2,i,n)}))}(c,a);var u=Object(s.select)('[id="'.concat(e,'"]'));u.attr("xmlns:xlink","http://www.w3.org/1999/xlink");var l=Object(s.select)("#"+e+" g");An(l,a,["aggregation","extension","composition","dependency"],"classDiagram",e);var h=u.node().getBBox(),d=h.width+16,p=h.height+16;if(f.debug("new ViewBox 0 0 ".concat(d," ").concat(p),"translate(".concat(8-a._label.marginx,", ").concat(8-a._label.marginy,")")),$(u,p,d,n.useMaxWidth),u.attr("viewBox","0 0 ".concat(d," ").concat(p)),u.select("g").attr("transform","translate(".concat(8-a._label.marginx,", ").concat(8-h.y,")")),!n.htmlLabels)for(var g=document.querySelectorAll('[id="'+e+'"] .edgeLabel .label'),y=0;y0&&o.length>0){var c={stmt:"state",id:L(),type:"divider",doc:yi(o)};i.push(yi(c)),n.doc=i}n.doc.forEach((function(e){return t(n,e,!0)}))}}({id:"root"},{id:"root",doc:vi},!0),{id:"root",doc:vi}},extract:function(t){var e;e=t.doc?t.doc:t,f.info(e),ki(),f.info("Extract",e),e.forEach((function(t){"state"===t.stmt&&_i(t.id,t.type,t.doc,t.description,t.note),"relation"===t.stmt&&wi(t.state1.id,t.state2.id,t.description)}))},trimColon:function(t){return t&&":"===t[0]?t.substr(1).trim():t.trim()}},Ai=n(22),Mi=n.n(Ai),Oi={},Di=function(t,e){Oi[t]=e},Ni=function(t,e){var n=t.append("text").attr("x",2*xt().state.padding).attr("y",xt().state.textHeight+1.3*xt().state.padding).attr("font-size",xt().state.fontSize).attr("class","state-title").text(e.descriptions[0]).node().getBBox(),r=n.height,i=t.append("text").attr("x",xt().state.padding).attr("y",r+.4*xt().state.padding+xt().state.dividerMargin+xt().state.textHeight).attr("class","state-description"),a=!0,o=!0;e.descriptions.forEach((function(t){a||(!function(t,e,n){var r=t.append("tspan").attr("x",2*xt().state.padding).text(e);n||r.attr("dy",xt().state.textHeight)}(i,t,o),o=!1),a=!1}));var s=t.append("line").attr("x1",xt().state.padding).attr("y1",xt().state.padding+r+xt().state.dividerMargin/2).attr("y2",xt().state.padding+r+xt().state.dividerMargin/2).attr("class","descr-divider"),c=i.node().getBBox(),u=Math.max(c.width,n.width);return s.attr("x2",u+3*xt().state.padding),t.insert("rect",":first-child").attr("x",xt().state.padding).attr("y",xt().state.padding).attr("width",u+2*xt().state.padding).attr("height",c.height+r+2*xt().state.padding).attr("rx",xt().state.radius),t},Bi=function(t,e,n){var r,i=xt().state.padding,a=2*xt().state.padding,o=t.node().getBBox(),s=o.width,c=o.x,u=t.append("text").attr("x",0).attr("y",xt().state.titleShift).attr("font-size",xt().state.fontSize).attr("class","state-title").text(e.id),l=u.node().getBBox().width+a,h=Math.max(l,s);h===s&&(h+=a);var f=t.node().getBBox();e.doc,r=c-i,l>s&&(r=(s-h)/2+i),Math.abs(c-f.x)s&&(r=c-(l-s)/2);var d=1-xt().state.textHeight;return t.insert("rect",":first-child").attr("x",r).attr("y",d).attr("class",n?"alt-composit":"composit").attr("width",h).attr("height",f.height+xt().state.textHeight+xt().state.titleShift+1).attr("rx","0"),u.attr("x",r+i),l<=s&&u.attr("x",c+(h-a)/2-l/2+i),t.insert("rect",":first-child").attr("x",r).attr("y",xt().state.titleShift-xt().state.textHeight-xt().state.padding).attr("width",h).attr("height",3*xt().state.textHeight).attr("rx",xt().state.radius),t.insert("rect",":first-child").attr("x",r).attr("y",xt().state.titleShift-xt().state.textHeight-xt().state.padding).attr("width",h).attr("height",f.height+3+2*xt().state.textHeight).attr("rx",xt().state.radius),t},Li=function(t,e){e.attr("class","state-note");var n=e.append("rect").attr("x",0).attr("y",xt().state.padding),r=function(t,e,n,r){var i=0,a=r.append("text");a.style("text-anchor","start"),a.attr("class","noteText");var o=t.replace(/\r\n/g,"
"),s=(o=o.replace(/\n/g,"
")).split(x.lineBreakRegex),c=1.25*xt().state.noteMargin,u=!0,l=!1,h=void 0;try{for(var f,d=s[Symbol.iterator]();!(u=(f=d.next()).done);u=!0){var p=f.value.trim();if(p.length>0){var g=a.append("tspan");if(g.text(p),0===c)c+=g.node().getBBox().height;i+=c,g.attr("x",e+xt().state.noteMargin),g.attr("y",n+i+1.25*xt().state.noteMargin)}}}catch(t){l=!0,h=t}finally{try{u||null==d.return||d.return()}finally{if(l)throw h}}return{textWidth:a.node().getBBox().width,textHeight:i}}(t,0,0,e.append("g")),i=r.textWidth,a=r.textHeight;return n.attr("height",a+2*xt().state.noteMargin),n.attr("width",i+2*xt().state.noteMargin),n},Fi=function(t,e){var n=e.id,r={id:n,label:e.id,width:0,height:0},i=t.append("g").attr("id",n).attr("class","stateGroup");"start"===e.type&&function(t){t.append("circle").attr("class","start-state").attr("r",xt().state.sizeUnit).attr("cx",xt().state.padding+xt().state.sizeUnit).attr("cy",xt().state.padding+xt().state.sizeUnit)}(i),"end"===e.type&&function(t){t.append("circle").attr("class","end-state-outer").attr("r",xt().state.sizeUnit+xt().state.miniPadding).attr("cx",xt().state.padding+xt().state.sizeUnit+xt().state.miniPadding).attr("cy",xt().state.padding+xt().state.sizeUnit+xt().state.miniPadding),t.append("circle").attr("class","end-state-inner").attr("r",xt().state.sizeUnit).attr("cx",xt().state.padding+xt().state.sizeUnit+2).attr("cy",xt().state.padding+xt().state.sizeUnit+2)}(i),"fork"!==e.type&&"join"!==e.type||function(t,e){var n=xt().state.forkWidth,r=xt().state.forkHeight;if(e.parentId){var i=n;n=r,r=i}t.append("rect").style("stroke","black").style("fill","black").attr("width",n).attr("height",r).attr("x",xt().state.padding).attr("y",xt().state.padding)}(i,e),"note"===e.type&&Li(e.note.text,i),"divider"===e.type&&function(t){t.append("line").style("stroke","grey").style("stroke-dasharray","3").attr("x1",xt().state.textHeight).attr("class","divider").attr("x2",2*xt().state.textHeight).attr("y1",0).attr("y2",0)}(i),"default"===e.type&&0===e.descriptions.length&&function(t,e){var n=t.append("text").attr("x",2*xt().state.padding).attr("y",xt().state.textHeight+2*xt().state.padding).attr("font-size",xt().state.fontSize).attr("class","state-title").text(e.id),r=n.node().getBBox();t.insert("rect",":first-child").attr("x",xt().state.padding).attr("y",xt().state.padding).attr("width",r.width+2*xt().state.padding).attr("height",r.height+2*xt().state.padding).attr("rx",xt().state.radius)}(i,e),"default"===e.type&&e.descriptions.length>0&&Ni(i,e);var a=i.node().getBBox();return r.width=a.width+2*xt().state.padding,r.height=a.height+2*xt().state.padding,Di(n,r),r},Pi=0;Ai.parser.yy=Si;var Ii={},ji=function t(e,n,r,i){var a,o=new H.a.Graph({compound:!0,multigraph:!0}),c=!0;for(a=0;a "+t.w+": "+JSON.stringify(o.edge(t))),function(t,e,n){e.points=e.points.filter((function(t){return!Number.isNaN(t.y)}));var r=e.points,i=Object(s.line)().x((function(t){return t.x})).y((function(t){return t.y})).curve(s.curveBasis),a=t.append("path").attr("d",i(r)).attr("id","edge"+Pi).attr("class","transition"),o="";if(xt().state.arrowMarkerAbsolute&&(o=(o=(o=window.location.protocol+"//"+window.location.host+window.location.pathname+window.location.search).replace(/\(/g,"\\(")).replace(/\)/g,"\\)")),a.attr("marker-end","url("+o+"#"+function(t){switch(t){case Si.relationType.AGGREGATION:return"aggregation";case Si.relationType.EXTENSION:return"extension";case Si.relationType.COMPOSITION:return"composition";case Si.relationType.DEPENDENCY:return"dependency"}}(Si.relationType.DEPENDENCY)+"End)"),void 0!==n.title){for(var c=t.append("g").attr("class","stateLabel"),u=W.calcLabelPosition(e.points),l=u.x,h=u.y,d=x.getRows(n.title),p=0,g=[],y=0,v=0,m=0;m<=d.length;m++){var b=c.append("text").attr("text-anchor","middle").text(d[m]).attr("x",l).attr("y",h+p),_=b.node().getBBox();if(y=Math.max(y,_.width),v=Math.min(v,_.x),f.info(_.x,l,h+p),0===p){var k=b.node().getBBox();p=k.height,f.info("Title height",p,h)}g.push(b)}var w=p*d.length;if(d.length>1){var E=(d.length-1)*p*.5;g.forEach((function(t,e){return t.attr("y",h+e*p-E)})),w=p*d.length}var T=c.node().getBBox();c.insert("rect",":first-child").attr("class","box").attr("x",l-y/2-xt().state.padding/2).attr("y",h-w/2-xt().state.padding/2-3.5).attr("width",y+xt().state.padding).attr("height",w+xt().state.padding),f.info(T)}Pi++}(n,o.edge(t),o.edge(t).relation))})),w=k.getBBox();var E={id:r||"root",label:r||"root",width:0,height:0};return E.width=w.width+2*gi.padding,E.height=w.height+2*gi.padding,f.debug("Doc rendered",E,o),E},Ri=function(){},Yi=function(t,e){gi=xt().state,Ai.parser.yy.clear(),Ai.parser.parse(t),f.debug("Rendering diagram "+t);var n=Object(s.select)("[id='".concat(e,"']"));n.append("defs").append("marker").attr("id","dependencyEnd").attr("refX",19).attr("refY",7).attr("markerWidth",20).attr("markerHeight",28).attr("orient","auto").append("path").attr("d","M 19,7 L9,13 L14,7 L9,1 Z"),new H.a.Graph({multigraph:!0,compound:!0,rankdir:"RL"}).setDefaultEdgeLabel((function(){return{}}));var r=Si.getRootDoc();ji(r,n,void 0,!1);var i=gi.padding,a=n.node().getBBox(),o=a.width+2*i,c=a.height+2*i;$(n,c,1.75*o,gi.useMaxWidth),n.attr("viewBox","".concat(a.x-gi.padding," ").concat(a.y-gi.padding," ")+o+" "+c)},zi={},Ui={},$i=function(t,e,n,r){if("root"!==n.id){var i="rect";!0===n.start&&(i="start"),!1===n.start&&(i="end"),"default"!==n.type&&(i=n.type),Ui[n.id]||(Ui[n.id]={id:n.id,shape:i,description:n.id,classes:"statediagram-state"}),n.description&&(Array.isArray(Ui[n.id].description)?(Ui[n.id].shape="rectWithTitle",Ui[n.id].description.push(n.description)):Ui[n.id].description.length>0?(Ui[n.id].shape="rectWithTitle",Ui[n.id].description===n.id?Ui[n.id].description=[n.description]:Ui[n.id].description=[Ui[n.id].description,n.description]):(Ui[n.id].shape="rect",Ui[n.id].description=n.description)),!Ui[n.id].type&&n.doc&&(f.info("Setting cluser for ",n.id),Ui[n.id].type="group",Ui[n.id].shape="divider"===n.type?"divider":"roundedWithTitle",Ui[n.id].classes=Ui[n.id].classes+" "+(r?"statediagram-cluster statediagram-cluster-alt":"statediagram-cluster"));var a={labelStyle:"",shape:Ui[n.id].shape,labelText:Ui[n.id].description,classes:Ui[n.id].classes,style:"",id:n.id,domId:"state-"+n.id+"-"+Wi,type:Ui[n.id].type,padding:15};if(n.note){var o={labelStyle:"",shape:"note",labelText:n.note.text,classes:"statediagram-note",style:"",id:n.id+"----note",domId:"state-"+n.id+"----note-"+Wi,type:Ui[n.id].type,padding:15},s={labelStyle:"",shape:"noteGroup",labelText:n.note.text,classes:Ui[n.id].classes,style:"",id:n.id+"----parent",domId:"state-"+n.id+"----parent-"+Wi,type:"group",padding:0};Wi++,t.setNode(n.id+"----parent",s),t.setNode(o.id,o),t.setNode(n.id,a),t.setParent(n.id,n.id+"----parent"),t.setParent(o.id,n.id+"----parent");var c=n.id,u=o.id;"left of"===n.note.position&&(c=o.id,u=n.id),t.setEdge(c,u,{arrowhead:"none",arrowType:"",style:"fill:none",labelStyle:"",classes:"transition note-edge",arrowheadStyle:"fill: #333",labelpos:"c",labelType:"text",thickness:"normal"})}else t.setNode(n.id,a)}e&&"root"!==e.id&&(f.info("Setting node ",n.id," to be child of its parent ",e.id),t.setParent(n.id,e.id)),n.doc&&(f.info("Adding nodes children "),Vi(t,n,n.doc,!r))},Wi=0,Vi=function(t,e,n,r){Wi=0,f.trace("items",n),n.forEach((function(n){if("state"===n.stmt||"default"===n.stmt)$i(t,e,n,r);else if("relation"===n.stmt){$i(t,e,n.state1,r),$i(t,e,n.state2,r);var i={id:"edge"+Wi,arrowhead:"normal",arrowTypeEnd:"arrow_barb",style:"fill:none",labelStyle:"",label:n.description,arrowheadStyle:"fill: #333",labelpos:"c",labelType:"text",thickness:"normal",classes:"transition"},a=n.state1.id,o=n.state2.id;t.setEdge(a,o,i,Wi),Wi++}}))},Hi=function(t){for(var e=Object.keys(t),n=0;ne.seq?t:e}),t[0]),n="";t.forEach((function(t){n+=t===e?"\t*":"\t|"}));var r,i,a,o=[n,e.id,e.seq];for(var s in Zi)Zi[s]===e.id&&o.push(s);if(f.debug(o.join(" ")),Array.isArray(e.parent)){var c=qi[e.parent[0]];ra(t,e,c),t.push(qi[e.parent[1]])}else{if(null==e.parent)return;var u=qi[e.parent];ra(t,e,u)}r=t,i=function(t){return t.id},a=Object.create(null),ia(t=r.reduce((function(t,e){var n=i(e);return a[n]||(a[n]=!0,t.push(e)),t}),[]))}var aa,oa=function(){var t=Object.keys(qi).map((function(t){return qi[t]}));return t.forEach((function(t){f.debug(t.id)})),t.sort((function(t,e){return e.seq-t.seq})),t},sa={setDirection:function(t){Qi=t},setOptions:function(t){f.debug("options str",t),t=(t=t&&t.trim())||"{}";try{na=JSON.parse(t)}catch(t){f.error("error while parsing gitGraph options",t.message)}},getOptions:function(){return na},commit:function(t){var e={id:ta(),message:t,seq:Ki++,parent:null==Xi?null:Xi.id};Xi=e,qi[e.id]=e,Zi[Ji]=e.id,f.debug("in pushCommit "+e.id)},branch:function(t){Zi[t]=null!=Xi?Xi.id:null,f.debug("in createBranch")},merge:function(t){var e=qi[Zi[Ji]],n=qi[Zi[t]];if(function(t,e){return t.seq>e.seq&&ea(e,t)}(e,n))f.debug("Already merged");else{if(ea(e,n))Zi[Ji]=Zi[t],Xi=qi[Zi[Ji]];else{var r={id:ta(),message:"merged branch "+t+" into "+Ji,seq:Ki++,parent:[null==Xi?null:Xi.id,Zi[t]]};Xi=r,qi[r.id]=r,Zi[Ji]=r.id}f.debug(Zi),f.debug("in mergeBranch")}},checkout:function(t){f.debug("in checkout");var e=Zi[Ji=t];Xi=qi[e]},reset:function(t){f.debug("in reset",t);var e=t.split(":")[0],n=parseInt(t.split(":")[1]),r="HEAD"===e?Xi:qi[Zi[e]];for(f.debug(r,n);n>0;)if(n--,!(r=qi[r.parent])){var i="Critical error - unique parent commit not found during reset";throw f.error(i),i}Xi=r,Zi[Ji]=r.id},prettyPrint:function(){f.debug(qi),ia([oa()[0]])},clear:function(){qi={},Zi={master:Xi=null},Ji="master",Ki=0},getBranchesAsObjArray:function(){var t=[];for(var e in Zi)t.push({name:e,commit:qi[Zi[e]]});return t},getBranches:function(){return Zi},getCommits:function(){return qi},getCommitsArray:oa,getCurrentBranch:function(){return Ji},getDirection:function(){return Qi},getHead:function(){return Xi}},ca=n(71),ua=n.n(ca),la={},ha={nodeSpacing:150,nodeFillColor:"yellow",nodeStrokeWidth:2,nodeStrokeColor:"grey",lineStrokeWidth:4,branchOffset:50,lineColor:"grey",leftMargin:50,branchColors:["#442f74","#983351","#609732","#AA9A39"],nodeRadius:10,nodeLabel:{width:75,height:100,x:-25,y:0}},fa={};function da(t,e,n,r){var i=O(r,s.curveBasis),a=ha.branchColors[n%ha.branchColors.length],o=Object(s.line)().x((function(t){return Math.round(t.x)})).y((function(t){return Math.round(t.y)})).curve(i);t.append("svg:path").attr("d",o(e)).style("stroke",a).style("stroke-width",ha.lineStrokeWidth).style("fill","none")}function pa(t,e){e=e||t.node().getBBox();var n=t.node().getCTM();return{left:n.e+e.x*n.a,top:n.f+e.y*n.d,width:e.width,height:e.height}}function ga(t,e,n,r,i){f.debug("svgDrawLineForCommits: ",e,n);var a=pa(t.select("#node-"+e+" circle")),o=pa(t.select("#node-"+n+" circle"));switch(r){case"LR":if(a.left-o.left>ha.nodeSpacing){var s={x:a.left-ha.nodeSpacing,y:o.top+o.height/2};da(t,[s,{x:o.left+o.width,y:o.top+o.height/2}],i,"linear"),da(t,[{x:a.left,y:a.top+a.height/2},{x:a.left-ha.nodeSpacing/2,y:a.top+a.height/2},{x:a.left-ha.nodeSpacing/2,y:s.y},s],i)}else da(t,[{x:a.left,y:a.top+a.height/2},{x:a.left-ha.nodeSpacing/2,y:a.top+a.height/2},{x:a.left-ha.nodeSpacing/2,y:o.top+o.height/2},{x:o.left+o.width,y:o.top+o.height/2}],i);break;case"BT":if(o.top-a.top>ha.nodeSpacing){var c={x:o.left+o.width/2,y:a.top+a.height+ha.nodeSpacing};da(t,[c,{x:o.left+o.width/2,y:o.top}],i,"linear"),da(t,[{x:a.left+a.width/2,y:a.top+a.height},{x:a.left+a.width/2,y:a.top+a.height+ha.nodeSpacing/2},{x:o.left+o.width/2,y:c.y-ha.nodeSpacing/2},c],i)}else da(t,[{x:a.left+a.width/2,y:a.top+a.height},{x:a.left+a.width/2,y:a.top+ha.nodeSpacing/2},{x:o.left+o.width/2,y:o.top-ha.nodeSpacing/2},{x:o.left+o.width/2,y:o.top}],i)}}function ya(t,e){return t.select(e).node().cloneNode(!0)}function va(t,e,n,r){var i,a=Object.keys(la).length;if("string"==typeof e)do{if(i=la[e],f.debug("in renderCommitHistory",i.id,i.seq),t.select("#node-"+e).size()>0)return;t.append((function(){return ya(t,"#def-commit")})).attr("class","commit").attr("id",(function(){return"node-"+i.id})).attr("transform",(function(){switch(r){case"LR":return"translate("+(i.seq*ha.nodeSpacing+ha.leftMargin)+", "+aa*ha.branchOffset+")";case"BT":return"translate("+(aa*ha.branchOffset+ha.leftMargin)+", "+(a-i.seq)*ha.nodeSpacing+")"}})).attr("fill",ha.nodeFillColor).attr("stroke",ha.nodeStrokeColor).attr("stroke-width",ha.nodeStrokeWidth);var o=void 0;for(var s in n)if(n[s].commit===i){o=n[s];break}o&&(f.debug("found branch ",o.name),t.select("#node-"+i.id+" p").append("xhtml:span").attr("class","branch-label").text(o.name+", ")),t.select("#node-"+i.id+" p").append("xhtml:span").attr("class","commit-id").text(i.id),""!==i.message&&"BT"===r&&t.select("#node-"+i.id+" p").append("xhtml:span").attr("class","commit-msg").text(", "+i.message),e=i.parent}while(e&&la[e]);Array.isArray(e)&&(f.debug("found merge commmit",e),va(t,e[0],n,r),aa++,va(t,e[1],n,r),aa--)}function ma(t,e,n,r){for(r=r||0;e.seq>0&&!e.lineDrawn;)"string"==typeof e.parent?(ga(t,e.id,e.parent,n,r),e.lineDrawn=!0,e=la[e.parent]):Array.isArray(e.parent)&&(ga(t,e.id,e.parent[0],n,r),ga(t,e.id,e.parent[1],n,r+1),ma(t,la[e.parent[1]],n,r+1),e.lineDrawn=!0,e=la[e.parent[0]])}var ba,xa=function(t){fa=t},_a=function(t,e,n){try{var r=ua.a.parser;r.yy=sa,r.yy.clear(),f.debug("in gitgraph renderer",t+"\n","id:",e,n),r.parse(t+"\n"),ha=Object.assign(ha,fa,sa.getOptions()),f.debug("effective options",ha);var i=sa.getDirection();la=sa.getCommits();var a=sa.getBranchesAsObjArray();"BT"===i&&(ha.nodeLabel.x=a.length*ha.branchOffset,ha.nodeLabel.width="100%",ha.nodeLabel.y=-2*ha.nodeRadius);var o=Object(s.select)('[id="'.concat(e,'"]'));for(var c in function(t){t.append("defs").append("g").attr("id","def-commit").append("circle").attr("r",ha.nodeRadius).attr("cx",0).attr("cy",0),t.select("#def-commit").append("foreignObject").attr("width",ha.nodeLabel.width).attr("height",ha.nodeLabel.height).attr("x",ha.nodeLabel.x).attr("y",ha.nodeLabel.y).attr("class","node-label").attr("requiredFeatures","http://www.w3.org/TR/SVG11/feature#Extensibility").append("p").html("")}(o),aa=1,a){var u=a[c];va(o,u.commit.id,a,i),ma(o,u.commit,i),aa++}o.attr("height",(function(){return"BT"===i?Object.keys(la).length*ha.nodeSpacing:(a.length+1)*ha.branchOffset}))}catch(t){f.error("Error while rendering gitgraph"),f.error(t.message)}},ka="",wa=!1,Ea={setMessage:function(t){f.debug("Setting message to: "+t),ka=t},getMessage:function(){return ka},setInfo:function(t){wa=t},getInfo:function(){return wa}},Ta=n(72),Ca=n.n(Ta),Sa={},Aa=function(t){Object.keys(t).forEach((function(e){Sa[e]=t[e]}))},Ma=function(t,e,n){try{var r=Ca.a.parser;r.yy=Ea,f.debug("Renering info diagram\n"+t),r.parse(t),f.debug("Parsed info diagram");var i=Object(s.select)("#"+e);i.append("g").append("text").attr("x",100).attr("y",40).attr("class","version").attr("font-size","32px").style("text-anchor","middle").text("v "+n),i.attr("height",100),i.attr("width",400)}catch(t){f.error("Error while rendering info diagram"),f.error(t.message)}},Oa={},Da=function(t){Object.keys(t).forEach((function(e){Oa[e]=t[e]}))},Na=function(t,e){try{f.debug("Renering svg for syntax error\n");var n=Object(s.select)("#"+t),r=n.append("g");r.append("path").attr("class","error-icon").attr("d","m411.313,123.313c6.25-6.25 6.25-16.375 0-22.625s-16.375-6.25-22.625,0l-32,32-9.375,9.375-20.688-20.688c-12.484-12.5-32.766-12.5-45.25,0l-16,16c-1.261,1.261-2.304,2.648-3.31,4.051-21.739-8.561-45.324-13.426-70.065-13.426-105.867,0-192,86.133-192,192s86.133,192 192,192 192-86.133 192-192c0-24.741-4.864-48.327-13.426-70.065 1.402-1.007 2.79-2.049 4.051-3.31l16-16c12.5-12.492 12.5-32.758 0-45.25l-20.688-20.688 9.375-9.375 32.001-31.999zm-219.313,100.687c-52.938,0-96,43.063-96,96 0,8.836-7.164,16-16,16s-16-7.164-16-16c0-70.578 57.422-128 128-128 8.836,0 16,7.164 16,16s-7.164,16-16,16z"),r.append("path").attr("class","error-icon").attr("d","m459.02,148.98c-6.25-6.25-16.375-6.25-22.625,0s-6.25,16.375 0,22.625l16,16c3.125,3.125 7.219,4.688 11.313,4.688 4.094,0 8.188-1.563 11.313-4.688 6.25-6.25 6.25-16.375 0-22.625l-16.001-16z"),r.append("path").attr("class","error-icon").attr("d","m340.395,75.605c3.125,3.125 7.219,4.688 11.313,4.688 4.094,0 8.188-1.563 11.313-4.688 6.25-6.25 6.25-16.375 0-22.625l-16-16c-6.25-6.25-16.375-6.25-22.625,0s-6.25,16.375 0,22.625l15.999,16z"),r.append("path").attr("class","error-icon").attr("d","m400,64c8.844,0 16-7.164 16-16v-32c0-8.836-7.156-16-16-16-8.844,0-16,7.164-16,16v32c0,8.836 7.156,16 16,16z"),r.append("path").attr("class","error-icon").attr("d","m496,96.586h-32c-8.844,0-16,7.164-16,16 0,8.836 7.156,16 16,16h32c8.844,0 16-7.164 16-16 0-8.836-7.156-16-16-16z"),r.append("path").attr("class","error-icon").attr("d","m436.98,75.605c3.125,3.125 7.219,4.688 11.313,4.688 4.094,0 8.188-1.563 11.313-4.688l32-32c6.25-6.25 6.25-16.375 0-22.625s-16.375-6.25-22.625,0l-32,32c-6.251,6.25-6.251,16.375-0.001,22.625z"),r.append("text").attr("class","error-text").attr("x",1240).attr("y",250).attr("font-size","150px").style("text-anchor","middle").text("Syntax error in graph"),r.append("text").attr("class","error-text").attr("x",1050).attr("y",400).attr("font-size","100px").style("text-anchor","middle").text("mermaid version "+e),n.attr("height",100),n.attr("width",400),n.attr("viewBox","768 0 512 512")}catch(t){f.error("Error while rendering info diagram"),f.error(t.message)}},Ba={},La="",Fa={parseDirective:function(t,e,n){$o.parseDirective(this,t,e,n)},getConfig:function(){return xt().pie},addSection:function(t,e){void 0===Ba[t]&&(Ba[t]=e,f.debug("Added new section :",t))},getSections:function(){return Ba},cleanupValue:function(t){return":"===t.substring(0,1)?(t=t.substring(1).trim(),Number(t.trim())):Number(t.trim())},clear:function(){Ba={},La=""},setTitle:function(t){La=t},getTitle:function(){return La}},Pa=n(73),Ia=n.n(Pa),ja={},Ra=function(t){Object.keys(t).forEach((function(e){ja[e]=t[e]}))},Ya=function(t,e){try{var n=Ia.a.parser;n.yy=Fa,f.debug("Rendering info diagram\n"+t),n.yy.clear(),n.parse(t),f.debug("Parsed info diagram");var r=document.getElementById(e);void 0===(ba=r.parentElement.offsetWidth)&&(ba=1200),void 0!==ja.useWidth&&(ba=ja.useWidth);var i=Object(s.select)("#"+e);$(i,450,ba,ja.useMaxWidth),r.setAttribute("viewBox","0 0 "+ba+" 450");var a=Math.min(ba,450)/2-40,o=i.append("g").attr("transform","translate("+ba/2+",225)"),c=Fa.getSections(),u=0;Object.keys(c).forEach((function(t){u+=c[t]}));var l=Object(s.scaleOrdinal)().domain(c).range(s.schemeSet2),h=Object(s.pie)().value((function(t){return t.value}))(Object(s.entries)(c)),d=Object(s.arc)().innerRadius(0).outerRadius(a);o.selectAll("mySlices").data(h).enter().append("path").attr("d",d).attr("fill",(function(t){return l(t.data.key)})).attr("stroke","black").style("stroke-width","2px").style("opacity",.7),o.selectAll("mySlices").data(h).enter().append("text").text((function(t){return(t.data.value/u*100).toFixed(0)+"%"})).attr("transform",(function(t){return"translate("+d.centroid(t)+")"})).style("text-anchor","middle").attr("class","slice").style("font-size",17),o.append("text").text(n.yy.getTitle()).attr("x",0).attr("y",-200).attr("class","pieTitleText");var p=o.selectAll(".legend").data(l.domain()).enter().append("g").attr("class","legend").attr("transform",(function(t,e){return"translate(216,"+(22*e-22*l.domain().length/2)+")"}));p.append("rect").attr("width",18).attr("height",18).style("fill",l).style("stroke",l),p.append("text").attr("x",22).attr("y",14).text((function(t){return t}))}catch(t){f.error("Error while rendering info diagram"),f.error(t)}},za={},Ua=[],$a="",Wa={Cardinality:{ZERO_OR_ONE:"ZERO_OR_ONE",ZERO_OR_MORE:"ZERO_OR_MORE",ONE_OR_MORE:"ONE_OR_MORE",ONLY_ONE:"ONLY_ONE"},Identification:{NON_IDENTIFYING:"NON_IDENTIFYING",IDENTIFYING:"IDENTIFYING"},parseDirective:function(t,e,n){$o.parseDirective(this,t,e,n)},getConfig:function(){return xt().er},addEntity:function(t){void 0===za[t]&&(za[t]=t,f.debug("Added new entity :",t))},getEntities:function(){return za},addRelationship:function(t,e,n,r){var i={entityA:t,roleA:e,entityB:n,relSpec:r};Ua.push(i),f.debug("Added new relationship :",i)},getRelationships:function(){return Ua},clear:function(){za={},Ua=[],$a=""},setTitle:function(t){$a=t},getTitle:function(){return $a}},Va=n(74),Ha=n.n(Va),Ga={ONLY_ONE_START:"ONLY_ONE_START",ONLY_ONE_END:"ONLY_ONE_END",ZERO_OR_ONE_START:"ZERO_OR_ONE_START",ZERO_OR_ONE_END:"ZERO_OR_ONE_END",ONE_OR_MORE_START:"ONE_OR_MORE_START",ONE_OR_MORE_END:"ONE_OR_MORE_END",ZERO_OR_MORE_START:"ZERO_OR_MORE_START",ZERO_OR_MORE_END:"ZERO_OR_MORE_END"},qa=Ga,Xa=function(t,e){var n;t.append("defs").append("marker").attr("id",Ga.ONLY_ONE_START).attr("refX",0).attr("refY",9).attr("markerWidth",18).attr("markerHeight",18).attr("orient","auto").append("path").attr("stroke",e.stroke).attr("fill","none").attr("d","M9,0 L9,18 M15,0 L15,18"),t.append("defs").append("marker").attr("id",Ga.ONLY_ONE_END).attr("refX",18).attr("refY",9).attr("markerWidth",18).attr("markerHeight",18).attr("orient","auto").append("path").attr("stroke",e.stroke).attr("fill","none").attr("d","M3,0 L3,18 M9,0 L9,18"),(n=t.append("defs").append("marker").attr("id",Ga.ZERO_OR_ONE_START).attr("refX",0).attr("refY",9).attr("markerWidth",30).attr("markerHeight",18).attr("orient","auto")).append("circle").attr("stroke",e.stroke).attr("fill","white").attr("cx",21).attr("cy",9).attr("r",6),n.append("path").attr("stroke",e.stroke).attr("fill","none").attr("d","M9,0 L9,18"),(n=t.append("defs").append("marker").attr("id",Ga.ZERO_OR_ONE_END).attr("refX",30).attr("refY",9).attr("markerWidth",30).attr("markerHeight",18).attr("orient","auto")).append("circle").attr("stroke",e.stroke).attr("fill","white").attr("cx",9).attr("cy",9).attr("r",6),n.append("path").attr("stroke",e.stroke).attr("fill","none").attr("d","M21,0 L21,18"),t.append("defs").append("marker").attr("id",Ga.ONE_OR_MORE_START).attr("refX",18).attr("refY",18).attr("markerWidth",45).attr("markerHeight",36).attr("orient","auto").append("path").attr("stroke",e.stroke).attr("fill","none").attr("d","M0,18 Q 18,0 36,18 Q 18,36 0,18 M42,9 L42,27"),t.append("defs").append("marker").attr("id",Ga.ONE_OR_MORE_END).attr("refX",27).attr("refY",18).attr("markerWidth",45).attr("markerHeight",36).attr("orient","auto").append("path").attr("stroke",e.stroke).attr("fill","none").attr("d","M3,9 L3,27 M9,18 Q27,0 45,18 Q27,36 9,18"),(n=t.append("defs").append("marker").attr("id",Ga.ZERO_OR_MORE_START).attr("refX",18).attr("refY",18).attr("markerWidth",57).attr("markerHeight",36).attr("orient","auto")).append("circle").attr("stroke",e.stroke).attr("fill","white").attr("cx",48).attr("cy",18).attr("r",6),n.append("path").attr("stroke",e.stroke).attr("fill","none").attr("d","M0,18 Q18,0 36,18 Q18,36 0,18"),(n=t.append("defs").append("marker").attr("id",Ga.ZERO_OR_MORE_END).attr("refX",39).attr("refY",18).attr("markerWidth",57).attr("markerHeight",36).attr("orient","auto")).append("circle").attr("stroke",e.stroke).attr("fill","white").attr("cx",9).attr("cy",18).attr("r",6),n.append("path").attr("stroke",e.stroke).attr("fill","none").attr("d","M21,18 Q39,0 57,18 Q39,36 21,18")},Za={},Ja=function(t){return(t.entityA+t.roleA+t.entityB).replace(/\s/g,"")},Qa=0,Ka=function(t){for(var e=Object.keys(t),n=0;n/gi," "),r=t.append("text");r.attr("x",e.x),r.attr("y",e.y),r.attr("class","legend"),r.style("text-anchor",e.anchor),void 0!==e.class&&r.attr("class",e.class);var i=r.append("tspan");return i.attr("x",e.x+2*e.textMargin),i.text(n),r},go=-1,yo=function(){return{x:0,y:0,width:100,anchor:"start",height:100,rx:0,ry:0}},vo=function(){function t(t,e,n,i,a,o,s,c){r(e.append("text").attr("x",n+a/2).attr("y",i+o/2+5).style("font-color",c).style("text-anchor","middle").text(t),s)}function e(t,e,n,i,a,o,s,c,u){for(var l=c.taskFontSize,h=c.taskFontFamily,f=t.split(//gi),d=0;d3?function(t){var e=Object(s.arc)().startAngle(Math.PI/2).endAngle(Math.PI/2*3).innerRadius(7.5).outerRadius(15/2.2);t.append("path").attr("class","mouth").attr("d",e).attr("transform","translate("+o.cx+","+(o.cy+2)+")")}(c):o.score<3?function(t){var e=Object(s.arc)().startAngle(3*Math.PI/2).endAngle(Math.PI/2*5).innerRadius(7.5).outerRadius(15/2.2);t.append("path").attr("class","mouth").attr("d",e).attr("transform","translate("+o.cx+","+(o.cy+7)+")")}(c):function(t){t.append("line").attr("class","mouth").attr("stroke",2).attr("x1",o.cx-5).attr("y1",o.cy+7).attr("x2",o.cx+5).attr("y2",o.cy+7).attr("class","mouth").attr("stroke-width","1px").attr("stroke","#666")}(c);var u=yo();u.x=e.x,u.y=e.y,u.fill=e.fill,u.width=n.width,u.height=n.height,u.class="task task-type-"+e.num,u.rx=3,u.ry=3,ho(i,u);var l=e.x+14;e.people.forEach((function(t){var n=e.actors[t],r={cx:l,cy:e.y,r:7,fill:n,stroke:"#000",title:t};fo(i,r),l+=10})),vo(n)(e.task,i,u.x,u.y,u.width,u.height,{class:"task"},n,e.colour)},ko=function(t){t.append("defs").append("marker").attr("id","arrowhead").attr("refX",5).attr("refY",2).attr("markerWidth",6).attr("markerHeight",4).attr("orient","auto").append("path").attr("d","M 0,0 V 4 L6,2 Z")};eo.parser.yy=lo;var wo={leftMargin:150,diagramMarginX:50,diagramMarginY:20,taskMargin:50,width:150,height:50,taskFontSize:14,taskFontFamily:'"Open-Sans", "sans-serif"',boxMargin:10,boxTextMargin:5,noteMargin:10,messageMargin:35,messageAlign:"center",bottomMarginAdj:1,activationWidth:10,textPlacement:"fo",actorColours:["#8FBC8F","#7CFC00","#00FFFF","#20B2AA","#B0E0E6","#FFFFE0"],sectionFills:["#191970","#8B008B","#4B0082","#2F4F4F","#800000","#8B4513","#00008B"],sectionColours:["#fff"]},Eo={};var To=wo.leftMargin,Co={data:{startx:void 0,stopx:void 0,starty:void 0,stopy:void 0},verticalPos:0,sequenceItems:[],init:function(){this.sequenceItems=[],this.data={startx:void 0,stopx:void 0,starty:void 0,stopy:void 0},this.verticalPos=0},updateVal:function(t,e,n,r){void 0===t[e]?t[e]=n:t[e]=r(n,t[e])},updateBounds:function(t,e,n,r){var i,a=this,o=0;this.sequenceItems.forEach((function(s){o++;var c=a.sequenceItems.length-o+1;a.updateVal(s,"starty",e-c*wo.boxMargin,Math.min),a.updateVal(s,"stopy",r+c*wo.boxMargin,Math.max),a.updateVal(Co.data,"startx",t-c*wo.boxMargin,Math.min),a.updateVal(Co.data,"stopx",n+c*wo.boxMargin,Math.max),"activation"!==i&&(a.updateVal(s,"startx",t-c*wo.boxMargin,Math.min),a.updateVal(s,"stopx",n+c*wo.boxMargin,Math.max),a.updateVal(Co.data,"starty",e-c*wo.boxMargin,Math.min),a.updateVal(Co.data,"stopy",r+c*wo.boxMargin,Math.max))}))},insert:function(t,e,n,r){var i=Math.min(t,n),a=Math.max(t,n),o=Math.min(e,r),s=Math.max(e,r);this.updateVal(Co.data,"startx",i,Math.min),this.updateVal(Co.data,"starty",o,Math.min),this.updateVal(Co.data,"stopx",a,Math.max),this.updateVal(Co.data,"stopy",s,Math.max),this.updateBounds(i,o,a,s)},bumpVerticalPos:function(t){this.verticalPos=this.verticalPos+t,this.data.stopy=this.verticalPos},getVerticalPos:function(){return this.verticalPos},getBounds:function(){return this.data}},So=wo.sectionFills,Ao=wo.sectionColours,Mo=function(t,e,n){for(var r="",i=n+(2*wo.height+wo.diagramMarginY),a=0,o="#CCC",s="black",c=0,u=0;u tspan {\n fill: ").concat(t.actorTextColor,";\n stroke: none;\n }\n\n .actor-line {\n stroke: ").concat(t.actorLineColor,";\n }\n\n .messageLine0 {\n stroke-width: 1.5;\n stroke-dasharray: none;\n stroke: ").concat(t.signalColor,";\n }\n\n .messageLine1 {\n stroke-width: 1.5;\n stroke-dasharray: 2, 2;\n stroke: ").concat(t.signalColor,";\n }\n\n #arrowhead path {\n fill: ").concat(t.signalColor,";\n stroke: ").concat(t.signalColor,";\n }\n\n .sequenceNumber {\n fill: ").concat(t.sequenceNumberColor,";\n }\n\n #sequencenumber {\n fill: ").concat(t.signalColor,";\n }\n\n #crosshead path {\n fill: ").concat(t.signalColor,";\n stroke: ").concat(t.signalColor,";\n }\n\n .messageText {\n fill: ").concat(t.signalTextColor,";\n stroke: ").concat(t.signalTextColor,";\n }\n\n .labelBox {\n stroke: ").concat(t.labelBoxBorderColor,";\n fill: ").concat(t.labelBoxBkgColor,";\n }\n\n .labelText, .labelText > tspan {\n fill: ").concat(t.labelTextColor,";\n stroke: none;\n }\n\n .loopText, .loopText > tspan {\n fill: ").concat(t.loopTextColor,";\n stroke: none;\n }\n\n .loopLine {\n stroke-width: 2px;\n stroke-dasharray: 2, 2;\n stroke: ").concat(t.labelBoxBorderColor,";\n fill: ").concat(t.labelBoxBorderColor,";\n }\n\n .note {\n //stroke: #decc93;\n stroke: ").concat(t.noteBorderColor,";\n fill: ").concat(t.noteBkgColor,";\n }\n\n .noteText, .noteText > tspan {\n fill: ").concat(t.noteTextColor,";\n stroke: none;\n }\n\n .activation0 {\n fill: ").concat(t.activationBkgColor,";\n stroke: ").concat(t.activationBorderColor,";\n }\n\n .activation1 {\n fill: ").concat(t.activationBkgColor,";\n stroke: ").concat(t.activationBorderColor,";\n }\n\n .activation2 {\n fill: ").concat(t.activationBkgColor,";\n stroke: ").concat(t.activationBorderColor,";\n }\n")},gantt:function(t){return'\n .mermaid-main-font {\n font-family: "trebuchet ms", verdana, arial;\n font-family: var(--mermaid-font-family);\n }\n\n .section {\n stroke: none;\n opacity: 0.2;\n }\n\n .section0 {\n fill: '.concat(t.sectionBkgColor,";\n }\n\n .section2 {\n fill: ").concat(t.sectionBkgColor2,";\n }\n\n .section1,\n .section3 {\n fill: ").concat(t.altSectionBkgColor,";\n opacity: 0.2;\n }\n\n .sectionTitle0 {\n fill: ").concat(t.titleColor,";\n }\n\n .sectionTitle1 {\n fill: ").concat(t.titleColor,";\n }\n\n .sectionTitle2 {\n fill: ").concat(t.titleColor,";\n }\n\n .sectionTitle3 {\n fill: ").concat(t.titleColor,";\n }\n\n .sectionTitle {\n text-anchor: start;\n font-size: 11px;\n text-height: 14px;\n font-family: 'trebuchet ms', verdana, arial;\n font-family: var(--mermaid-font-family);\n\n }\n\n\n /* Grid and axis */\n\n .grid .tick {\n stroke: ").concat(t.gridColor,";\n opacity: 0.8;\n shape-rendering: crispEdges;\n text {\n font-family: ").concat(t.fontFamily,";\n fill: ").concat(t.textColor,";\n }\n }\n\n .grid path {\n stroke-width: 0;\n }\n\n\n /* Today line */\n\n .today {\n fill: none;\n stroke: ").concat(t.todayLineColor,";\n stroke-width: 2px;\n }\n\n\n /* Task styling */\n\n /* Default task */\n\n .task {\n stroke-width: 2;\n }\n\n .taskText {\n text-anchor: middle;\n font-family: 'trebuchet ms', verdana, arial;\n font-family: var(--mermaid-font-family);\n }\n\n .taskText:not([font-size]) {\n font-size: 11px;\n }\n\n .taskTextOutsideRight {\n fill: ").concat(t.taskTextDarkColor,";\n text-anchor: start;\n font-size: 11px;\n font-family: 'trebuchet ms', verdana, arial;\n font-family: var(--mermaid-font-family);\n\n }\n\n .taskTextOutsideLeft {\n fill: ").concat(t.taskTextDarkColor,";\n text-anchor: end;\n font-size: 11px;\n }\n\n /* Special case clickable */\n .task.clickable {\n cursor: pointer;\n }\n .taskText.clickable {\n cursor: pointer;\n fill: ").concat(t.taskTextClickableColor," !important;\n font-weight: bold;\n }\n\n .taskTextOutsideLeft.clickable {\n cursor: pointer;\n fill: ").concat(t.taskTextClickableColor," !important;\n font-weight: bold;\n }\n\n .taskTextOutsideRight.clickable {\n cursor: pointer;\n fill: ").concat(t.taskTextClickableColor," !important;\n font-weight: bold;\n }\n\n /* Specific task settings for the sections*/\n\n .taskText0,\n .taskText1,\n .taskText2,\n .taskText3 {\n fill: ").concat(t.taskTextColor,";\n }\n\n .task0,\n .task1,\n .task2,\n .task3 {\n fill: ").concat(t.taskBkgColor,";\n stroke: ").concat(t.taskBorderColor,";\n }\n\n .taskTextOutside0,\n .taskTextOutside2\n {\n fill: ").concat(t.taskTextOutsideColor,";\n }\n\n .taskTextOutside1,\n .taskTextOutside3 {\n fill: ").concat(t.taskTextOutsideColor,";\n }\n\n\n /* Active task */\n\n .active0,\n .active1,\n .active2,\n .active3 {\n fill: ").concat(t.activeTaskBkgColor,";\n stroke: ").concat(t.activeTaskBorderColor,";\n }\n\n .activeText0,\n .activeText1,\n .activeText2,\n .activeText3 {\n fill: ").concat(t.taskTextDarkColor," !important;\n }\n\n\n /* Completed task */\n\n .done0,\n .done1,\n .done2,\n .done3 {\n stroke: ").concat(t.doneTaskBorderColor,";\n fill: ").concat(t.doneTaskBkgColor,";\n stroke-width: 2;\n }\n\n .doneText0,\n .doneText1,\n .doneText2,\n .doneText3 {\n fill: ").concat(t.taskTextDarkColor," !important;\n }\n\n\n /* Tasks on the critical line */\n\n .crit0,\n .crit1,\n .crit2,\n .crit3 {\n stroke: ").concat(t.critBorderColor,";\n fill: ").concat(t.critBkgColor,";\n stroke-width: 2;\n }\n\n .activeCrit0,\n .activeCrit1,\n .activeCrit2,\n .activeCrit3 {\n stroke: ").concat(t.critBorderColor,";\n fill: ").concat(t.activeTaskBkgColor,";\n stroke-width: 2;\n }\n\n .doneCrit0,\n .doneCrit1,\n .doneCrit2,\n .doneCrit3 {\n stroke: ").concat(t.critBorderColor,";\n fill: ").concat(t.doneTaskBkgColor,";\n stroke-width: 2;\n cursor: pointer;\n shape-rendering: crispEdges;\n }\n\n .milestone {\n transform: rotate(45deg) scale(0.8,0.8);\n }\n\n .milestoneText {\n font-style: italic;\n }\n .doneCritText0,\n .doneCritText1,\n .doneCritText2,\n .doneCritText3 {\n fill: ").concat(t.taskTextDarkColor," !important;\n }\n\n .activeCritText0,\n .activeCritText1,\n .activeCritText2,\n .activeCritText3 {\n fill: ").concat(t.taskTextDarkColor," !important;\n }\n\n .titleText {\n text-anchor: middle;\n font-size: 18px;\n fill: ").concat(t.textColor," ;\n font-family: 'trebuchet ms', verdana, arial;\n font-family: var(--mermaid-font-family);\n }\n")},classDiagram:No,"classDiagram-v2":No,class:No,stateDiagram:Lo,state:Lo,git:function(){return"\n .commit-id,\n .commit-msg,\n .branch-label {\n fill: lightgrey;\n color: lightgrey;\n font-family: 'trebuchet ms', verdana, arial;\n font-family: var(--mermaid-font-family);\n }\n"},info:function(){return""},pie:function(t){return".pieTitleText {\n text-anchor: middle;\n font-size: 25px;\n fill: ".concat(t.taskTextDarkColor,";\n font-family: ").concat(t.fontFamily,";\n }\n .slice {\n font-family: ").concat(t.fontFamily,";\n fill: ").concat(t.textColor,";\n // fill: white;\n }\n .legend text {\n fill: ").concat(t.taskTextDarkColor,";\n font-family: ").concat(t.fontFamily,";\n font-size: 17px;\n }\n")},er:function(t){return"\n .entityBox {\n fill: ".concat(t.mainBkg,";\n stroke: ").concat(t.nodeBorder,";\n }\n\n .relationshipLabelBox {\n fill: ").concat(t.tertiaryColor,";\n opacity: 0.7;\n background-color: ").concat(t.tertiaryColor,";\n rect {\n opacity: 0.5;\n }\n }\n\n .relationshipLine {\n stroke: ").concat(t.lineColor,";\n }\n")},journey:function(t){return".label {\n font-family: 'trebuchet ms', verdana, arial;\n font-family: var(--mermaid-font-family);\n color: ".concat(t.textColor,";\n }\n .mouth {\n stroke: #666;\n }\n\n line {\n stroke: ").concat(t.textColor,"\n }\n\n .legend {\n fill: ").concat(t.textColor,";\n }\n\n .label text {\n fill: #333;\n }\n .label {\n color: ").concat(t.textColor,"\n }\n\n .face {\n fill: #FFF8DC;\n stroke: #999;\n }\n\n .node rect,\n .node circle,\n .node ellipse,\n .node polygon,\n .node path {\n fill: ").concat(t.mainBkg,";\n stroke: ").concat(t.nodeBorder,";\n stroke-width: 1px;\n }\n\n .node .label {\n text-align: center;\n }\n .node.clickable {\n cursor: pointer;\n }\n\n .arrowheadPath {\n fill: ").concat(t.arrowheadColor,";\n }\n\n .edgePath .path {\n stroke: ").concat(t.lineColor,";\n stroke-width: 1.5px;\n }\n\n .flowchart-link {\n stroke: ").concat(t.lineColor,";\n fill: none;\n }\n\n .edgeLabel {\n background-color: ").concat(t.edgeLabelBackground,";\n rect {\n opacity: 0.5;\n }\n text-align: center;\n }\n\n .cluster rect {\n }\n\n .cluster text {\n fill: ").concat(t.titleColor,";\n }\n\n div.mermaidTooltip {\n position: absolute;\n text-align: center;\n max-width: 200px;\n padding: 2px;\n font-family: 'trebuchet ms', verdana, arial;\n font-family: var(--mermaid-font-family);\n font-size: 12px;\n background: ").concat(t.tertiaryColor,";\n border: 1px solid ").concat(t.border2,";\n border-radius: 2px;\n pointer-events: none;\n z-index: 100;\n }\n\n .task-type-0, .section-type-0 {\n ").concat(t.fillType0?"fill: ".concat(t.fillType0):"",";\n }\n .task-type-1, .section-type-1 {\n ").concat(t.fillType0?"fill: ".concat(t.fillType1):"",";\n }\n .task-type-2, .section-type-2 {\n ").concat(t.fillType0?"fill: ".concat(t.fillType2):"",";\n }\n .task-type-3, .section-type-3 {\n ").concat(t.fillType0?"fill: ".concat(t.fillType3):"",";\n }\n .task-type-4, .section-type-4 {\n ").concat(t.fillType0?"fill: ".concat(t.fillType4):"",";\n }\n .task-type-5, .section-type-5 {\n ").concat(t.fillType0?"fill: ".concat(t.fillType5):"",";\n }\n .task-type-6, .section-type-6 {\n ").concat(t.fillType0?"fill: ".concat(t.fillType6):"",";\n }\n .task-type-7, .section-type-7 {\n ").concat(t.fillType0?"fill: ".concat(t.fillType7):"",";\n }\n")}},Po=function(t,e,n){return" {\n font-family: ".concat(n.fontFamily,";\n font-size: ").concat(n.fontSize,";\n fill: ").concat(n.textColor,"\n }\n\n /* Classes common for multiple diagrams */\n\n .error-icon {\n fill: ").concat(n.errorBkgColor,";\n }\n .error-text {\n fill: ").concat(n.errorTextColor,";\n stroke: ").concat(n.errorTextColor,";\n }\n\n .edge-thickness-normal {\n stroke-width: 2px;\n }\n .edge-thickness-thick {\n stroke-width: 3.5px\n }\n .edge-pattern-solid {\n stroke-dasharray: 0;\n }\n\n .edge-pattern-dashed{\n stroke-dasharray: 3;\n }\n .edge-pattern-dotted {\n stroke-dasharray: 2;\n }\n\n .marker {\n fill: ").concat(n.lineColor,";\n }\n .marker.cross {\n stroke: ").concat(n.lineColor,";\n }\n\n svg {\n font-family: ").concat(n.fontFamily,";\n font-size: ").concat(n.fontSize,";\n }\n\n ").concat(Fo[t](n),"\n\n ").concat(e,"\n\n ").concat(t," { fill: apa;}\n")};function Io(t){return(Io="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(t){return typeof t}:function(t){return t&&"function"==typeof Symbol&&t.constructor===Symbol&&t!==Symbol.prototype?"symbol":typeof t})(t)}var jo={},Ro=function(t,e,n){switch(f.debug("Directive type=".concat(e.type," with args:"),e.args),e.type){case"init":case"initialize":["config"].forEach((function(t){void 0!==e.args[t]&&("flowchart-v2"===n&&(n="flowchart"),e.args[n]=e.args[t],delete e.args[t])})),e.args,kt(e.args);break;case"wrap":case"nowrap":t&&t.setWrap&&t.setWrap("wrap"===e.type);break;default:f.warn("Unhandled directive: source: '%%{".concat(e.type,": ").concat(JSON.stringify(e.args?e.args:{}),"}%%"),e)}};function Yo(t){xa(t.git),ve(t.flowchart),Nn(t.flowchart),void 0!==t.sequenceDiagram&&br.setConf(P(t.sequence,t.sequenceDiagram)),br.setConf(t.sequence),ei(t.gantt),ci(t.class),Ri(t.state),Hi(t.state),Aa(t.class),Ra(t.class),Ka(t.er),Oo(t.journey),Da(t.class)}function zo(){}var Uo=Object.freeze({render:function(t,e,n,r){wt();var i=e,a=W.detectInit(i);a&&kt(a);var u=xt();if(e.length>u.maxTextSize&&(i="graph TB;a[Maximum text size in diagram exceeded];style a fill:#faa"),void 0!==r)r.innerHTML="",Object(s.select)(r).append("div").attr("id","d"+t).attr("style","font-family: "+u.fontFamily).append("svg").attr("id",t).attr("width","100%").attr("xmlns","http://www.w3.org/2000/svg").append("g");else{var l=document.getElementById(t);l&&l.remove();var h=document.querySelector("#d"+t);h&&h.remove(),Object(s.select)("body").append("div").attr("id","d"+t).append("svg").attr("id",t).attr("width","100%").attr("xmlns","http://www.w3.org/2000/svg").append("g")}window.txt=i,i=function(t){var e=t;return e=(e=(e=e.replace(/style.*:\S*#.*;/g,(function(t){return t.substring(0,t.length-1)}))).replace(/classDef.*:\S*#.*;/g,(function(t){return t.substring(0,t.length-1)}))).replace(/#\w+;/g,(function(t){var e=t.substring(1,t.length-1);return/^\+?\d+$/.test(e)?"fl°°"+e+"¶ß":"fl°"+e+"¶ß"}))}(i);var d=Object(s.select)("#d"+t).node(),p=W.detectType(i),g=d.firstChild,y=g.firstChild,v="";if(void 0!==u.themeCSS&&(v+="\n".concat(u.themeCSS)),void 0!==u.fontFamily&&(v+="\n:root { --mermaid-font-family: ".concat(u.fontFamily,"}")),void 0!==u.altFontFamily&&(v+="\n:root { --mermaid-alt-font-family: ".concat(u.altFontFamily,"}")),"flowchart"===p||"flowchart-v2"===p||"graph"===p){var m=me(i);for(var b in m)v+="\n.".concat(b," > * { ").concat(m[b].styles.join(" !important; ")," !important; }"),m[b].textStyles&&(v+="\n.".concat(b," tspan { ").concat(m[b].textStyles.join(" !important; ")," !important; }"))}var x=(new o.a)("#".concat(t),Po(p,v,u.themeVariables)),_=document.createElement("style");_.innerHTML=x,g.insertBefore(_,y);try{switch(p){case"git":u.flowchart.arrowMarkerAbsolute=u.arrowMarkerAbsolute,xa(u.git),_a(i,t,!1);break;case"flowchart":u.flowchart.arrowMarkerAbsolute=u.arrowMarkerAbsolute,ve(u.flowchart),be(i,t,!1);break;case"flowchart-v2":u.flowchart.arrowMarkerAbsolute=u.arrowMarkerAbsolute,Nn(u.flowchart),Bn(i,t,!1);break;case"sequence":u.sequence.arrowMarkerAbsolute=u.arrowMarkerAbsolute,u.sequenceDiagram?(br.setConf(Object.assign(u.sequence,u.sequenceDiagram)),console.error("`mermaid config.sequenceDiagram` has been renamed to `config.sequence`. Please update your mermaid config.")):br.setConf(u.sequence),br.draw(i,t);break;case"gantt":u.gantt.arrowMarkerAbsolute=u.arrowMarkerAbsolute,ei(u.gantt),ni(i,t);break;case"class":u.class.arrowMarkerAbsolute=u.arrowMarkerAbsolute,ci(u.class),ui(i,t);break;case"classDiagram":u.class.arrowMarkerAbsolute=u.arrowMarkerAbsolute,hi(u.class),fi(i,t);break;case"state":u.class.arrowMarkerAbsolute=u.arrowMarkerAbsolute,Ri(u.state),Yi(i,t);break;case"stateDiagram":u.class.arrowMarkerAbsolute=u.arrowMarkerAbsolute,Hi(u.state),Gi(i,t);break;case"info":u.class.arrowMarkerAbsolute=u.arrowMarkerAbsolute,Aa(u.class),Ma(i,t,c.version);break;case"pie":u.class.arrowMarkerAbsolute=u.arrowMarkerAbsolute,Ra(u.pie),Ya(i,t,c.version);break;case"er":Ka(u.er),to(i,t,c.version);break;case"journey":Oo(u.journey),Do(i,t,c.version)}}catch(e){throw Na(t,c.version),e}Object(s.select)('[id="'.concat(t,'"]')).selectAll("foreignobject > *").attr("xmlns","http://www.w3.org/1999/xhtml");var k=Object(s.select)("#d"+t).node().innerHTML;if(f.debug("cnf.arrowMarkerAbsolute",u.arrowMarkerAbsolute),u.arrowMarkerAbsolute&&"false"!==u.arrowMarkerAbsolute||(k=k.replace(/marker-end="url\(.*?#/g,'marker-end="url(#',"g")),k=function(t){var e=t;return e=(e=(e=e.replace(/fl°°/g,(function(){return"&#"}))).replace(/fl°/g,(function(){return"&"}))).replace(/¶ß/g,(function(){return";"}))}(k),void 0!==n)switch(p){case"flowchart":case"flowchart-v2":n(k,qt.bindFunctions);break;case"gantt":n(k,Jr.bindFunctions);break;case"class":case"classDiagram":n(k,on.bindFunctions);break;default:n(k)}else f.debug("CB = undefined!");var w=Object(s.select)("#d"+t).node();return null!==w&&"function"==typeof w.remove&&Object(s.select)("#d"+t).node().remove(),k},parse:function(t){var e=W.detectInit(t);e&&f.debug("reinit ",e);var n,r=W.detectType(t);switch(f.debug("Type "+r),r){case"git":(n=ua.a).parser.yy=sa;break;case"flowchart":case"flowchart-v2":qt.clear(),(n=Zt.a).parser.yy=qt;break;case"sequence":(n=$n.a).parser.yy=ar;break;case"gantt":(n=_r.a).parser.yy=Jr;break;case"class":case"classDiagram":(n=ii.a).parser.yy=on;break;case"state":case"stateDiagram":(n=Mi.a).parser.yy=Si;break;case"info":f.debug("info info info"),(n=Ca.a).parser.yy=Ea;break;case"pie":f.debug("pie"),(n=Ia.a).parser.yy=Fa;break;case"er":f.debug("er"),(n=Ha.a).parser.yy=Wa;break;case"journey":f.debug("Journey"),(n=no.a).parser.yy=lo}return n.parser.yy.graphType=r,n.parser.yy.parseError=function(t,e){throw{str:t,hash:e}},n.parse(t),n},parseDirective:function(t,e,n,r){try{if(void 0!==e)switch(e=e.trim(),n){case"open_directive":jo={};break;case"type_directive":jo.type=e.toLowerCase();break;case"arg_directive":jo.args=JSON.parse(e);break;case"close_directive":Ro(t,jo,r),jo=null}}catch(t){f.error("Error while rendering sequenceDiagram directive: ".concat(e," jison context: ").concat(n)),f.error(t.message)}},initialize:function(t){t&&t.fontFamily&&(t.themeVariables&&t.themeVariables.fontFamily||(t.themeVariables={fontFamily:t.fontFamily})),ft=P({},t),t&&t.theme&<[t.theme]?t.themeVariables=lt[t.theme].getThemeVariables(t.themeVariables):t&&(t.themeVariables=lt.default.getThemeVariables(t.themeVariables));var e="object"===Io(t)?function(t){return gt=P({},pt),gt=P(gt,t),t.theme&&(gt.themeVariables=lt[t.theme].getThemeVariables(t.themeVariables)),vt=mt(gt,yt),gt}(t):bt();Yo(e),d(e.logLevel)},reinitialize:zo,getConfig:xt,setConfig:function(t){return P(vt,t),xt()},getSiteConfig:bt,updateSiteConfig:function(t){return gt=P(gt,t),mt(gt,yt),gt},reset:function(){wt()},globalReset:function(){wt(),Yo(xt())},defaultConfig:pt});d(xt().logLevel),wt(xt());var $o=Uo,Wo=function(){Vo.startOnLoad?$o.getConfig().startOnLoad&&Vo.init():void 0===Vo.startOnLoad&&(f.debug("In start, no config"),$o.getConfig().startOnLoad&&Vo.init())};"undefined"!=typeof document&& -/*! - * Wait for document loaded before starting the execution - */ -window.addEventListener("load",(function(){Wo()}),!1);var Vo={startOnLoad:!0,htmlLabels:!0,mermaidAPI:$o,parse:$o.parse,render:$o.render,init:function(){var t,e,n,r=this,a=$o.getConfig();arguments.length>=2?( -/*! sequence config was passed as #1 */ -void 0!==arguments[0]&&(Vo.sequenceConfig=arguments[0]),t=arguments[1]):t=arguments[0],"function"==typeof arguments[arguments.length-1]?(e=arguments[arguments.length-1],f.debug("Callback function found")):void 0!==a.mermaid&&("function"==typeof a.mermaid.callback?(e=a.mermaid.callback,f.debug("Callback function found")):f.debug("No Callback function found")),t=void 0===t?document.querySelectorAll(".mermaid"):"string"==typeof t?document.querySelectorAll(t):t instanceof window.Node?[t]:t,f.debug("Start On Load before: "+Vo.startOnLoad),void 0!==Vo.startOnLoad&&(f.debug("Start On Load inner: "+Vo.startOnLoad),$o.updateSiteConfig({startOnLoad:Vo.startOnLoad})),void 0!==Vo.ganttConfig&&$o.updateSiteConfig({gantt:Vo.ganttConfig});for(var o=function(a){var o=t[a]; -/*! Check if previously processed */if(o.getAttribute("data-processed"))return"continue";o.setAttribute("data-processed",!0);var s="mermaid-".concat(Date.now());n=i(n=o.innerHTML).trim().replace(//gi,"
");var c=W.detectInit(n);c&&f.debug("Detected early reinit: ",c);try{$o.render(s,n,(function(t,n){o.innerHTML=t,void 0!==e&&e(s),n&&n(o)}),o)}catch(t){f.warn("Syntax Error rendering"),f.warn(t),r.parseError&&r.parseError(t)}},s=0;s - -DESCRIPTION: - validate the relayer configuration -``` - -__Example__ - -Validate the default config file, the path inferred automatically to be -`$HOME/.hermes/config.toml`. - -```shell -hermes config validate -``` - -```text -hermes config validate -Jul 12 16:31:07.017 INFO using default configuration from '$HOME/.hermes/config.toml' -Success: "validation passed successfully" -``` - -Validate a config file at an arbitrary location: - -```shell -hermes -c ./config.toml config validate -``` - -This one fails validation because we mistakenly added two separate sections for -the same chain `ibc-1`: - -```text -hermes -c ./config.toml config validate -error: hermes fatal error: config error: config file has duplicate entry for the chain with id ibc-1 -``` diff --git a/guide/src/commands/global.md b/guide/src/commands/global.md deleted file mode 100644 index 6c302987f0..0000000000 --- a/guide/src/commands/global.md +++ /dev/null @@ -1,113 +0,0 @@ -# Global options - -Hermes accepts global options which affect all commands. - -```shell -hermes 0.15.0 -Informal Systems -Implementation of `hermes`, an IBC Relayer developed in Rust. - -FLAGS: - -c, --config CONFIG path to configuration file - -j, --json enable JSON output -``` - -The flags must be specified right after the `hermes` command and before any subcommand. - -__Example__ - -To start the relayer using the configuration file at `/home/my_chain.toml` and enable JSON output: - -```shell -hermes -c /home/my_chain.toml --json start -``` - -## JSON output - -If the `--json` option is supplied, all relayer commands will output single-line JSON values instead of plain text. - -Log messages will be written to `stderr`, while the final result will be written to `stdout`, and everything -will be formatted as JSON. -This allows processing only the final output using [`jq`](https://stedolan.github.io/jq/). -To process all the output using `jq`, one can redirect `stderr` to `stdout` with `hermes --json COMMAND 2>&1 | jq`. - -__Example__ - -```shell -hermes -c /home/my_chain.toml --json create client ibc-0 ibc-1 -``` - -```json -{"timestamp":"Apr 13 20:46:31.921","level":"INFO","fields":{"message":"Using default configuration from: '.hermes/config.toml'"},"target":"ibc_relayer_cli::commands"} -{"timestamp":"Apr 13 20:46:31.961","level":"INFO","fields":{"message":"running listener","chain.id":"ibc-1"},"target":"ibc_relayer::event::monitor"} -{"timestamp":"Apr 13 20:46:31.989","level":"INFO","fields":{"message":"running listener","chain.id":"ibc-0"},"target":"ibc_relayer::event::monitor"} -{"result":{"CreateClient":{"client_id":"07-tendermint-1","client_type":"Tendermint","consensus_height":{"revision_height":10060,"revision_number":1},"height":{"revision_height":10072,"revision_number":0}}},"status":"success"} -``` - -The first three lines are printed to `stderr`, while the last line with a `"result"` key is printed to `stdout`. - -__Example__ - -To improve the readability, pipe all of the output to `jq`: - -``` -hermes -c /home/my_chain.toml --json create client ibc-0 ibc-1 2>&1 | jq -``` - -```json -{ - "timestamp": "Apr 13 20:52:26.060", - "level": "INFO", - "fields": { - "message": "Using default configuration from: '.hermes/config.toml'" - }, - "target": "ibc_relayer_cli::commands" -} -{ - "timestamp": "Apr 13 20:52:26.082", - "level": "INFO", - "fields": { - "message": "running listener", - "chain.id": "ibc-1" - }, - "target": "ibc_relayer::event::monitor" -} -{ - "timestamp": "Apr 13 20:52:26.088", - "level": "INFO", - "fields": { - "message": "running listener", - "chain.id": "ibc-0" - }, - "target": "ibc_relayer::event::monitor" -} -{ - "result": { - "CreateClient": { - "client_id": "07-tendermint-5", - "client_type": "Tendermint", - "consensus_height": { - "revision_height": 10364, - "revision_number": 1 - }, - "height": { - "revision_height": 10375, - "revision_number": 0 - } - } - }, - "status": "success" -} -``` - -__Example__ - -To extract the identifer of the newly created client above: - -``` -hermes -c /home/my_chain.toml --json create client ibc-0 ibc-1 | jq '.result.CreateClient.client_id' -``` - -``` -"07-tendermint-2" -``` diff --git a/guide/src/commands/index.md b/guide/src/commands/index.md deleted file mode 100644 index 9f7ede994e..0000000000 --- a/guide/src/commands/index.md +++ /dev/null @@ -1,41 +0,0 @@ -# Commands - -The `Commands` section presents the commands current available in Hermes - -## Sections - -**[Keys](./keys/index.md)** - -Commands to manage keys (private keys) for each chain. - -**[Config](./config.md)** - -Commands to manage configuration file, in particular to validate it. - -**[Path Setup](./path-setup/index.md)** - -Commands to manage clients, connections, channels. - -**[Relaying](./relaying/index.md)** - -Commands to start the relayer and relay packets. - -**[Listen Mode](./listen/index.md)** - -Commands to listen for IBC events - -**[Upgrade](./upgrade/index.md)** - -Commands to perform client upgrade - -**[Monitor](./misbehaviour/index.md)** - -Commands to monitor clients and submit evidence of misbehaviour - -**[Queries](./queries/index.md)** - -Commands to execute queries on configured chains - -**[Raw Transactions](./raw/index.md)** - -Commands to submit individual transactions to configured chains diff --git a/guide/src/commands/keys/index.md b/guide/src/commands/keys/index.md deleted file mode 100644 index e246d0ef4a..0000000000 --- a/guide/src/commands/keys/index.md +++ /dev/null @@ -1,344 +0,0 @@ -# Adding Keys to the Relayer - -> __WARNING__: Currently the relayer does NOT support a `keyring` store to securely -> store the private key file. The key file will be stored on the local file system -> in the user __$HOME__ folder under `$HOME/.hermes/keys/` - -> __BREAKING__: As of Hermes v1.0.0, the sub-command `keys restore` has been removed. -> Please use the sub-command `keys add` in order to restore a key. - ---- - -Using the `keys` command you can add and list keys. - -#### Show usage - -To see the available sub-commands for the `keys` command run: - -```shell -hermes help keys -``` - -The available sub-commands are the following: - -```shell -USAGE: - hermes keys - -DESCRIPTION: - Manage keys in the relayer for each chain - -SUBCOMMANDS: - help Get usage information - add Adds key to a configured chain or restores a key to a configured chain - using a mnemonic - balance Query balance for a key from a configured chain. If no key is given, the - key is retrieved from the configuration file - delete Delete key(s) from a configured chain - list List keys configured on a chain -``` - -### Key Seed file (Private Key) - -In order to execute the command below you need a private key file (JSON). The relayer uses the private key file to sign the transactions submitted to the chain. - -The private key file can be obtained by using the `keys add` on a Cosmos chain. For example, the command for `gaiad` is: - -```shell -# The `key_name` parameter is the name of the key that will be found in the json output -# For example, in the "Two Local Chains" tutorial, we use "testkey". -gaiad keys add --output json -``` - -The command outputs a JSON similar to the one below. - -```json -{ - "name": "testkey", - "type": "local", - "address": "cosmos1tc3vcuxyyac0dmayf887t95tdg7qpyql48w7gj", - "pubkey": "cosmospub1addwnpepqgg7ng4ycm60pdxfzdfh4hjvkwcr3da59mr8k883vsstx60ruv7kur4525u", - "mnemonic": "[24 words mnemonic]" -} -``` - -You can save this to a file (e.g. `key_seed.json`) and use it to add to the relayer with `hermes keys add -f key_seed.json`. See the `Adding Keys` section for more details. - -### Adding and restoring Keys - -The command `keys add` has two exclusive flags, `--key-file` and `--mnemonic-file` which are respectively used to add and restore a key. - -```shell - hermes keys add [OPTIONS] --key-file --mnemonic-file - -DESCRIPTION: - Adds key to a configured chain or restores a key to a configured chain using a mnemonic - -ARGS: - chain_id identifier of the chain - -FLAGS: - -f, --key-file - path to the key file - - -m, --mnemonic-file - path to file containing mnemonic to restore the key from - -OPTIONS: - -k, --key-name - name of the key (defaults to the `key_name` defined in the config) - - -p, --hd-path - derivation path for this key [default: m/44'/118'/0'/0/0] -``` - -#### Add a private key to a chain from a key file - -```shell - hermes keys add [OPTIONS] --key-file - -DESCRIPTION: - Adds key to a configured chain or restores a key to a configured chain using a mnemonic - -ARGS: - chain_id identifier of the chain - -FLAGS: - -f, --key-file - path to the key file - -OPTIONS: - -k, --key-name - name of the key (defaults to the `key_name` defined in the config) - - -p, --hd-path - derivation path for this key [default: m/44'/118'/0'/0/0] -``` - -To add a private key file to a chain: - -```shell -hermes -c config.toml keys add [CHAIN_ID] -f [PRIVATE_KEY_FILE] -``` - -The content of the file key should have the same format as the output of the `gaiad keys add` command: - -```json -{ - "name": "testkey", - "type": "local", - "address": "cosmos1tc3vcuxyyac0dmayf887t95tdg7qpyql48w7gj", - "pubkey": "cosmospub1addwnpepqgg7ng4ycm60pdxfzdfh4hjvkwcr3da59mr8k883vsstx60ruv7kur4525u", - "mnemonic": "[24 words mnemonic]" -} -``` - -If the command is successful a message similar to the one below will be displayed: - -```json -Success: Added key testkey ([ADDRESS]) on [CHAIN ID] chain -``` - -> **Key name:** -> By default, the key will be named after the `key_name` property specified in the configuration file. -> To use a different key name, specify the `--key-name` option when invoking `keys add`. -> -> ``` -> hermes -c config.toml keys add [CHAINID] -f [PRIVATE_KEY_FILE] -k [KEY_NAME] -> ``` - -#### Restore a private key to a chain from a mnemonic - -```shell - hermes keys add [OPTIONS] --mnemonic-file - -DESCRIPTION: - Adds key to a configured chain or restores a key to a configured chain using a mnemonic - -ARGS: - chain_id identifier of the chain - -FLAGS: - -m, --mnemonic-file - path to file containing mnemonic to restore the key from - -OPTIONS: - -k, --key-name - name of the key (defaults to the `key_name` defined in the config) - - -p, --hd-path - derivation path for this key [default: m/44'/118'/0'/0/0] -``` - -To restore a key from its mnemonic: - -```shell -hermes -c config.toml keys add [CHAIN_ID] -m "[MNEMONIC_FILE]" -``` - -or using an explicit [derivation path](https://github.com/satoshilabs/slips/blob/master/slip-0044.md), for example -an Ethereum coin type (used for Evmos, Injective, Umee, Cronos, and -possibly other networks): - -```shell -hermes -c config.toml keys add --mnemonic-file --hd-path "m/44'/60'/0'/0/0" -``` - -The mnemonic file needs to have the 24 mnemonic words on the same line, separated by a white space. So the content should have the following format: -``` -word1 word2 word3 ... word24 -``` - -If the command is successful a message similar to the one below will be displayed: - -```json -Success: Restore key testkey ([ADDRESS]) on [CHAIN ID] chain -``` - -> **Key name:** -> By default, the key will be named after the `key_name` property specified in the configuration file. -> To use a different key name, specify the `--key-name` option when invoking `keys add`. -> -> ``` -> hermes -c config.toml keys add [CHAINID] -m "[MNEMONIC_FILE]" -k [KEY_NAME] -> ``` - -### Delete keys - -In order to delete the private keys added to chains use the `keys delete` command - -```shell -USAGE: - hermes keys delete - -DESCRIPTION: - Delete key(s) from a configured chain - -POSITIONAL ARGUMENTS: - chain_id identifier of the chain - -FLAGS: - -n, --name NAME name of the key - -a, --all delete all keys -``` - -#### Delete private keys that was previously added to a chain - -To delete a single private key by name: - -```shell -hermes -c config.toml keys delete [CHAIN_ID] -n [KEY_NAME] -``` - -Alternatively, to delete all private keys added to a chain: - -```shell -hermes -c config.toml keys delete [CHAIN_ID] -a -``` - -### List keys - -In order to list the private keys added to chains use the `keys list` command - -```shell -USAGE: - hermes keys list - -DESCRIPTION: - List keys configured on a chain - -POSITIONAL ARGUMENTS: - chain_id identifier of the chain -``` - -#### Listing the private key that was added to a chain - -To list the private key file that was added to a chain: - -```shell -hermes -c config.toml keys list [CHAIN_ID] -``` - -If the command is successful a message similar to the one below will be displayed: - -``` -Success: -- user2 (cosmos1attn9fxrcvjz483w3tu4cfz77ldmlyujly3q3k) -- testkey (cosmos1dw88vdekeeuta5u50p6n5lt5v5c6y2we0pu8nz) -``` - -**JSON:** - -```shell -hermes --json -c config.toml keys list [CHAIN_ID] | jq -``` - -If the command is successful a message similar to the one below will be displayed: - -```json -{ - "result": { - "testkey": { - "account": "cosmos1dw88vdekeeuta5u50p6n5lt5v5c6y2we0pu8nz", - "address": [ 107, 142, 118, 55, 54, 206, 120, 190, 211, 148, 120, 117, 58, 125, 116, 101, 49, 162, 41, 217 ], - "coin_type": 118, - "private_key": "(snip)", - "public_key": "xpub6Gc7ZUt2q1BiQYjhUextPv5bZLwosHigZYqEquPD6FkAGmHDrLiBgE5Xnh8XGZp79rAXtZn1Dt3DNQHxxgCgVQqfRMfVsRiXn6mwULBnYq7" - }, - "user2": { - "account": "cosmos1attn9fxrcvjz483w3tu4cfz77ldmlyujly3q3k", - "address": [ 234, 215, 50, 164, 195, 195, 36, 42, 158, 46, 138, 249, 92, 36, 94, 247, 219, 191, 147, 146 ], - "coin_type": 118, - "private_key": "(snip)", - "public_key": "xpub6FmDbeGTWVjSvHrqHfrpnMTZxpPX1V7XFiq5nMuvgwX9jumt1yUuwNAUQo8Nn36unbFShg6iSjkfMBgeY49wik7rF91N2SHvarpX62ByWMf" - } - }, - "status": "success" -} -``` -### Query balance - -In order to retrieve the balance of an account associated with a key use the `keys balance` command - -```shell -USAGE: - hermes keys balance [OPTIONS] - -DESCRIPTION: - Query balance for a key from a configured chain. If no key is given, the key is retrieved from the configuration file - -ARGS: - chain_id identifier of the chain - -OPTIONS: - -k, --key-name (optional) name of the key (defaults to the `key_name` defined in the config) -``` - -If the command is successful a message with the following format will be displayed: - -``` -Success: balance for key `KEY_NAME`: 100000000000 stake -``` - -**JSON:** - -```shell - hermes --json keys balance [OPTIONS] -``` -or - -```shell - hermes -j keys balance [OPTIONS] -``` - -If the command is successful a message with the following format will be displayed: - -```json -{ - "result": { - "amount": "99989207", - "denom": "stake" - }, - "status": "success" -} -``` \ No newline at end of file diff --git a/guide/src/commands/listen/index.md b/guide/src/commands/listen/index.md deleted file mode 100644 index dd4fc8472d..0000000000 --- a/guide/src/commands/listen/index.md +++ /dev/null @@ -1,162 +0,0 @@ -# Relayer Listen Mode - -The relayer can be started in `listen` mode to display the events emitted by a given chain. `NewBlock` and `Tx` IBC events are shown. - -```shell -USAGE: - hermes listen - -DESCRIPTION: - Listen to and display IBC events emitted by a chain - -POSITIONAL ARGUMENTS: - chain_id Identifier of the chain to listen for events from - -FLAGS: - -e, --event EVENT Add an event type to listen for, can be repeated. Listen for all events by default (available: Tx, NewBlock) -``` - -__Example__ - -Start the relayer in listen mode for all `ibc-0` events and observe the output: - -```shell -hermes listen ibc-0 -``` - -```json -EventBatch { - chain_id: ChainId { - id: "ibc-0", - version: 0, - }, - height: block::Height(10914), - events: [ - NewBlock( - NewBlock { - height: block::Height(10914), - }, - ), - ], -} -EventBatch { - chain_id: ChainId { - id: "ibc-0", - version: 0, - }, - height: block::Height(10915), - events: [ - OpenInitConnection( - OpenInit( - Attributes { - height: block::Height(10915), - connection_id: Some( - ConnectionId( - "connection-3", - ), - ), - client_id: ClientId( - "07-tendermint-3", - ), - counterparty_connection_id: None, - counterparty_client_id: ClientId( - "07-tendermint-5", - ), - }, - ), - ), - ], - -... - -EventBatch { - chain_id: ChainId { - id: "ibc-0", - version: 0, - }, - height: block::Height(10919), - events: [ - UpdateClient( - UpdateClient( - Attributes { - height: block::Height(10919), - client_id: ClientId( - "07-tendermint-3", - ), - client_type: Tendermint, - consensus_height: Height { - revision: 1, - height: 10907, - }, - }, - ), - ), - ], -} - -... - -EventBatch { - chain_id: ChainId { - id: "ibc-0", - version: 0, - }, - height: block::Height(10924), - events: [ - UpdateClient( - UpdateClient( - Attributes { - height: block::Height(10924), - client_id: ClientId( - "07-tendermint-3", - ), - client_type: Tendermint, - consensus_height: Height { - revision: 1, - height: 10912, - }, - }, - ), - ), - OpenAckConnection( - OpenAck( - Attributes { - height: block::Height(10924), - connection_id: Some( - ConnectionId( - "connection-3", - ), - ), - client_id: ClientId( - "07-tendermint-3", - ), - counterparty_connection_id: Some( - ConnectionId( - "connection-5", - ), - ), - counterparty_client_id: ClientId( - "07-tendermint-5", - ), - }, - ), - ), - ], -} -``` - -## Filter events - -The `listen` command accepts an `--event` flag to specify which event types to listen for. - -At the moment, two event types are available: -- `NewBlock` -- `Tx` - -The `--event` flag can be repeated to specify more than one event type. - -- To listen for only `NewBlock` events on `ibc-0`, invoke `hermes listen ibc-0 --event NewBlock` -- To listen for only `Tx` events on `ibc-0`, invoke `hermes listen ibc-0 --event Tx` -- To listen for both `NewBlock` and `Tx` events on `ibc-0`, invoke `hermes listen ibc-0 --e NewBlock --event Tx` - -If the `--event` flag is omitted, the relayer will subscribe to all event types. diff --git a/guide/src/commands/misbehaviour/index.md b/guide/src/commands/misbehaviour/index.md deleted file mode 100644 index 1d7f48cbad..0000000000 --- a/guide/src/commands/misbehaviour/index.md +++ /dev/null @@ -1,127 +0,0 @@ -# Misbehaviour - -## Table of Contents - - -## Monitoring Misbehaviour and Evidence Submission -Use the `mishbehaviour` command to monitor the updates for a given client, detect certain types of misbehaviour and -submit evidence to the chain. If the evidence passes the on-chain validation, the client is frozen. Further packets -cannot be relayed using the frozen client. - -```shell -USAGE: - hermes misbehaviour - -DESCRIPTION: - Listen to client update IBC events and handles misbehaviour - -POSITIONAL ARGUMENTS: - chain_id identifier of the chain where client updates are monitored for misbehaviour - client_id identifier of the client to be monitored for misbehaviour -``` - -The misbehaviour monitor starts by analyzing all headers used in prior client updates. -Once finished it registers for update client events and checks any new headers for misbehaviour. -If it detects evidence of misbehaviour, it submits a transaction with the evidence to the chain. -If the chain validates the transaction then the monitor exits. - -> This is an experimental feature. - -The following types of misbehaviour are handled: -1. **Fork** - - Assumes at least one consensus state before the fork point exists. - Let existing consensus states on chain B be: `[Sn,.., Sf, Sf-1, S0]` with `Sf-1` being - the most recent state before the fork. - Chain A is queried for a header `Hf'` at `Sf.height` and if it is different than the `Hf` - in the event for the client update (the one that has generated `Sf` on chain), then the two - headers are included in the evidence and submitted. - Note that in this case the headers are different but have the same height. - -2. **BFT time violation for an unavailable header** - - Some header with a height that is higher than the latest - height on chain `A` has been accepted and a consensus state was created on `B`. Note that this implies - that the timestamp of this header must be within the `clock_drift` of the client. - Assume the client on `B` has been updated with `h2`(not present on/ produced by chain `A`) - and it has a timestamp of `t2` that is at most `clock_drift` in the future. - Then the latest header from `A` is fetched, let it be `h1`, with a timestamp of `t1`. - If `t1 >= t2` then evidence of misbehavior is submitted to A. - -__Example__ - -The `hermes misbehaviour` outputs an error message displaying `MISBEHAVIOUR DETECTED`: - -```shell -hermes misbehaviour ibc-0 07-tendermint-0 -``` - -```json -Apr 13 20:04:03.347 INFO ibc_relayer::foreign_client: checking misbehaviour for consensus state heights [Height { revision: 1, height: 195 }, Height { revision: 1, height: 85 }, Height { revision: 1, height: 28 }] -Apr 13 20:04:04.425 ERROR ibc_relayer::foreign_client: MISBEHAVIOUR DETECTED ClientId("07-tendermint-0") h1: Height { revision: 1, height: 195 }-Height { revision: 1, height: 85 } h2: Height { revision: 1, height: 195 }-Height { revision: 1, height: 85 }, sending evidence -Apr 13 20:04:05.070 INFO ibc_relayer_cli::commands::misbehaviour: evidence submission result [ClientMisbehaviour(ClientMisbehaviour(Attributes { height: Height { revision: 0, height: 1521 }, client_id: ClientId("07-tendermint-0"), client_type: Tendermint, consensus_height: Height { revision: 1, height: 195 } }))] - -Success: Some( - ClientMisbehaviour( - ClientMisbehaviour( - Attributes { - height: Height { - revision: 0, - height: 1521, - }, - client_id: ClientId( - "07-tendermint-0", - ), - client_type: Tendermint, - consensus_height: Height { - revision: 1, - height: 195, - }, - }, - ), - ), -) -``` - -Querying client state from this point will show the client is in frozen state, with `frozen_height` indicating the height at which the client was frozen: -```shell -hermes query client state ibc-0 07-tendermint-0 | jq -``` -```json -{ - "result": { - "allow_update_after_expiry": true, - "allow_update_after_misbehaviour": true, - "chain_id": "ibc-1", - "frozen_height": { - "revision_height": 16, - "revision_number": 1 - }, - "latest_height": { - "revision_height": 16, - "revision_number": 1 - }, - "max_clock_drift": { - "nanos": 0, - "secs": 3 - }, - "trust_level": { - "denominator": "3", - "numerator": "1" - }, - "trusting_period": { - "nanos": 0, - "secs": 1209600 - }, - "unbonding_period": { - "nanos": 0, - "secs": 1814400 - }, - "upgrade_path": [ - "upgrade", - "upgradedIBCState" - ] - }, - "status": "success" -} -``` diff --git a/guide/src/commands/path-setup/channels.md b/guide/src/commands/path-setup/channels.md deleted file mode 100644 index ca47b1c116..0000000000 --- a/guide/src/commands/path-setup/channels.md +++ /dev/null @@ -1,486 +0,0 @@ -# Channel - -## Table of Contents - - - -## Establish Channel - -Use the `create channel` command to establish a new channel. - -```shell -USAGE: - hermes create channel [OPTIONS] --port-a --port-b [CONNECTION_A] - -DESCRIPTION: - Create a new channel between two chains using a pre-existing connection. - Alternatively, create a new client and a new connection underlying the new channel if a pre-existing connection is not provided. - -POSITIONAL ARGUMENTS: - Identifier of the side `a` chain for the new channel - Identifier of the connection on chain `a` to use in creating the new channel - -FLAGS: - -c, --chain-b Identifier of the side `b` chain for the new channel - -h, --help Print help information - --new-client-connection Indicates that a new client and connection will be created underlying the new channel - -o, --order The channel ordering, valid options 'unordered' (default) and 'ordered' [default: ORDER_UNORDERED] - --port-a Identifier of the side `a` port for the new channel - --port-b Identifier of the side `b` port for the new channel - -v, --channel-version The version for the new channel -``` - -## Examples - -### New channel over an existing connection - -This is the preferred way to create a new channel, by leveraging an existing -connection. - -Create a new unordered channel between `ibc-0` and `ibc-1` over an existing connection, -specifically the one we just created in the example above, with port name -`transfer` on both sides: - -```shell -hermes create channel ibc-0 --connection-a connection-0 --port-a transfer --port-b transfer -o unordered -``` - -Notice that one can omit the destination chain parameter, as Hermes will automatically -figure it out by looking up the given connection on `ibc-0`. - -```json -🥳 ibc-0 => OpenInitChannel( - OpenInit( - Attributes { - height: Height { revision: 0, height: 129 }, - port_id: PortId("transfer"), - channel_id: Some(ChannelId("channel-1")), - connection_id: ConnectionId("connection-0"), - counterparty_port_id: PortId("transfer"), - counterparty_channel_id: None - } - ) -) -🥳 ibc-1 => OpenTryChannel( - OpenTry( - Attributes { - height: Height { revision: 1, height: 126 }, - port_id: PortId("transfer"), - channel_id: Some(ChannelId("channel-1")), - connection_id: ConnectionId("connection-0"), - counterparty_port_id: PortId("transfer"), - counterparty_channel_id: Some(ChannelId("channel-1")) - } - ) -) -🥳 ibc-0 => OpenAckChannel( - OpenAck( - Attributes { - height: Height { revision: 0, height: 137 }, - port_id: PortId("transfer"), - channel_id: Some(ChannelId("channel-1")), - connection_id: ConnectionId("connection-0"), - counterparty_port_id: PortId("transfer"), - counterparty_channel_id: Some(ChannelId("channel-1")) - } - ) -) -🥳 ibc-1 => OpenConfirmChannel( - OpenConfirm( - Attributes { - height: Height { revision: 1, height: 129 }, - port_id: PortId("transfer"), - channel_id: Some(ChannelId("channel-1")), - connection_id: ConnectionId("connection-0"), - counterparty_port_id: PortId("transfer"), - counterparty_channel_id: Some(ChannelId("channel-1")) - } - ) -) -🥳 🥳 🥳 Channel handshake finished for Channel { - ordering: Unordered, - a_side: ChannelSide { - chain: ProdChainHandle { - chain_id: ChainId { - id: "ibc-0", - version: 0, - }, - runtime_sender: Sender { .. }, - }, - client_id: ClientId( - "07-tendermint-0", - ), - connection_id: ConnectionId( - "connection-0", - ), - port_id: PortId( - "transfer", - ), - channel_id: ChannelId( - "channel-1", - ), - }, - b_side: ChannelSide { - chain: ProdChainHandle { - chain_id: ChainId { - id: "ibc-1", - version: 1, - }, - runtime_sender: Sender { .. }, - }, - client_id: ClientId( - "07-tendermint-0", - ), - connection_id: ConnectionId( - "connection-0", - ), - port_id: PortId( - "transfer", - ), - channel_id: ChannelId( - "channel-1", - ), - }, - connection_delay: 0s, -} -Success: Channel { - ordering: Unordered, - a_side: ChannelSide { - chain: ProdChainHandle { - chain_id: ChainId { - id: "ibc-0", - version: 0, - }, - runtime_sender: Sender { .. }, - }, - client_id: ClientId( - "07-tendermint-0", - ), - connection_id: ConnectionId( - "connection-0", - ), - port_id: PortId( - "transfer", - ), - channel_id: ChannelId( - "channel-1", - ), - }, - b_side: ChannelSide { - chain: ProdChainHandle { - chain_id: ChainId { - id: "ibc-1", - version: 1, - }, - runtime_sender: Sender { .. }, - }, - client_id: ClientId( - "07-tendermint-0", - ), - connection_id: ConnectionId( - "connection-0", - ), - port_id: PortId( - "transfer", - ), - channel_id: ChannelId( - "channel-1", - ), - }, - connection_delay: 0s, -} -``` - -### New channel over a new connection - -Should you specifically want to create a new client and a new connection as part -of the `create channel` flow, that option exists, though this is the -less-preferred option over the previous flow, as creating new clients and -connections should only be done in certain specific circumstances so as not to -create redundant resources. - -Create a new unordered channel between `ibc-0` and `ibc-1` over a new -connection, using port name `transfer` on both sides and accepting the -interactive prompt that pops up notifying you that a new client and a new -connection will be initialized as part of the process: - -```shell -hermes create channel ibc-0 --chain-b ibc-1 --port-a transfer --port-b transfer -o unordered --new-client-connection -``` - -```json -🥂 ibc-0 => OpenInitConnection( - OpenInit( - Attributes { - height: Height { revision: 0, height: 66 }, - connection_id: Some( - ConnectionId( - "connection-0", - ), - ), - client_id: ClientId( - "07-tendermint-0", - ), - counterparty_connection_id: None, - counterparty_client_id: ClientId( - "07-tendermint-0", - ), - }, - ), -) - -🥂 ibc-1 => OpenTryConnection( - OpenTry( - Attributes { - height: Height { revision: 1, height: 64 }, - connection_id: Some( - ConnectionId( - "connection-0", - ), - ), - client_id: ClientId( - "07-tendermint-0", - ), - counterparty_connection_id: Some( - ConnectionId( - "connection-0", - ), - ), - counterparty_client_id: ClientId( - "07-tendermint-0", - ), - }, - ), -) - -🥂 ibc-0 => OpenAckConnection( - OpenAck( - Attributes { - height: Height { revision: 0, height: 76 }, - connection_id: Some( - ConnectionId( - "connection-0", - ), - ), - client_id: ClientId( - "07-tendermint-0", - ), - counterparty_connection_id: Some( - ConnectionId( - "connection-0", - ), - ), - counterparty_client_id: ClientId( - "07-tendermint-0", - ), - }, - ), -) - -🥂 ibc-1 => OpenConfirmConnection( - OpenConfirm( - Attributes { - height: Height { revision: 1, height: 68 }, - connection_id: Some( - ConnectionId( - "connection-0", - ), - ), - client_id: ClientId( - "07-tendermint-0", - ), - counterparty_connection_id: Some( - ConnectionId( - "connection-0", - ), - ), - counterparty_client_id: ClientId( - "07-tendermint-0", - ), - }, - ), -) - -🥂🥂🥂 Connection handshake finished for [Connection { - delay_period: 0s, - a_side: ConnectionSide { - chain: ProdChainHandle { - chain_id: ChainId { - id: "ibc-0", - version: 0, - }, - runtime_sender: Sender { .. }, - }, - client_id: ClientId( - "07-tendermint-0", - ), - connection_id: ConnectionId( - "connection-0", - ), - }, - b_side: ConnectionSide { - chain: ProdChainHandle { - chain_id: ChainId { - id: "ibc-1", - version: 1, - }, - runtime_sender: Sender { .. }, - }, - client_id: ClientId( - "07-tendermint-0", - ), - connection_id: ConnectionId( - "connection-0", - ), - }, -}] - -🥳 ibc-0 => OpenInitChannel( - OpenInit( - Attributes { - height: Height { revision: 0, height: 78 }, - port_id: PortId("transfer"), - channel_id: Some(ChannelId("channel-0")), - connection_id: ConnectionId("connection-0"), - counterparty_port_id: PortId("transfer"), - counterparty_channel_id: None - } - ) -) - -🥳 ibc-1 => OpenTryChannel( - OpenTry( - Attributes { - height: Height { revision: 1, height: 70 }, - port_id: PortId("transfer"), - channel_id: Some(ChannelId("channel-0")), - connection_id: ConnectionId("connection-0"), - counterparty_port_id: PortId("transfer"), - counterparty_channel_id: Some(ChannelId("channel-0")) - } - ) -) - -🥳 ibc-0 => OpenAckChannel( - OpenAck( - Attributes { - height: Height { revision: 0, height: 81 }, - port_id: PortId("transfer"), - channel_id: Some(ChannelId("channel-0")), - connection_id: ConnectionId("connection-0"), - counterparty_port_id: PortId("transfer"), - counterparty_channel_id: Some(ChannelId("channel-0")) - } - ) -) - -🥳 ibc-1 => OpenConfirmChannel - OpenConfirm - Attributes { - height: Height { revision: 1, height: 73 }, - port_id: PortId("transfer"), - channel_id: Some(ChannelId("channel-0")), - connection_id: ConnectionId("connection-0"), - counterparty_port_id: PortId("transfer"), - counterparty_channel_id: Some(ChannelId("channel-0")) - } - ) -) - -🥳 🥳 🥳 Channel handshake finished for Channel { - ordering: Unordered, - a_side: ChannelSide { - chain: ProdChainHandle { - chain_id: ChainId { - id: "ibc-0", - version: 0, - }, - runtime_sender: Sender { .. }, - }, - client_id: ClientId( - "07-tendermint-0", - ), - connection_id: ConnectionId( - "connection-0", - ), - port_id: PortId( - "transfer", - ), - channel_id: ChannelId( - "channel-0", - ), - }, - b_side: ChannelSide { - chain: ProdChainHandle { - chain_id: ChainId { - id: "ibc-1", - version: 1, - }, - runtime_sender: Sender { .. }, - }, - client_id: ClientId( - "07-tendermint-0", - ), - connection_id: ConnectionId( - "connection-0", - ), - port_id: PortId( - "transfer", - ), - channel_id: ChannelId( - "channel-0", - ), - }, - connection_delay: 0s, -} - -Success: Channel { - ordering: Unordered, - a_side: ChannelSide { - chain: ProdChainHandle { - chain_id: ChainId { - id: "ibc-0", - version: 0, - }, - runtime_sender: Sender { .. }, - }, - client_id: ClientId( - "07-tendermint-0", - ), - connection_id: ConnectionId( - "connection-0", - ), - port_id: PortId( - "transfer", - ), - channel_id: ChannelId( - "channel-0", - ), - }, - b_side: ChannelSide { - chain: ProdChainHandle { - chain_id: ChainId { - id: "ibc-1", - version: 1, - }, - runtime_sender: Sender { .. }, - }, - client_id: ClientId( - "07-tendermint-0", - ), - connection_id: ConnectionId( - "connection-0", - ), - port_id: PortId( - "transfer", - ), - channel_id: ChannelId( - "channel-0", - ), - }, - connection_delay: 0s, -} -``` - -A new channel with identifier `channel-0` on both sides has been established on -a new connection with identifier `connection-0` on both sides. diff --git a/guide/src/commands/path-setup/clients.md b/guide/src/commands/path-setup/clients.md deleted file mode 100644 index bfecbc7913..0000000000 --- a/guide/src/commands/path-setup/clients.md +++ /dev/null @@ -1,152 +0,0 @@ -# Client - -## Table of Contents - - - -## Create Client - -Use the `create client` command to create a new client on a destination chain, -tracking the state of the source chain. - -```shell -USAGE: - hermes create client [OPTIONS] - -ARGS: - - identifier of the destination chain - - - identifier of the source chain - -OPTIONS: - -d, --clock-drift - The maximum allowed clock drift for this client. - - The clock drift is a correction parameter. It helps deal with clocks that are only - approximately synchronized between the source and destination chains of this client. The - destination chain for this client uses the clock drift parameter when deciding to accept - or reject a new header (originating from the source chain) for this client. If this - option is not specified, a suitable clock drift value is derived from the chain - configurations. - - -p, --trusting-period - Override the trusting period specified in the config. - - The trusting period specifies how long a validator set is trusted for (must be shorter - than the chain's unbonding period). - - -t, --trust-threshold - Override the trust threshold specified in the configuration. - - The trust threshold defines what fraction of the total voting power of a known and - trusted validator set is sufficient for a commit to be accepted going forward. -``` - -__Example__ - -Create a new client on `ibc-0` which tracks `ibc-1`: - -```shell -hermes create client ibc-0 ibc-1 -``` - -```json - CreateClient( - Attributes { - height: Height { - revision: 0, - height: 286, - }, - client_id: ClientId( - "07-tendermint-0", - ), - client_type: Tendermint, - consensus_height: Height { - revision: 1, - height: 274, - }, - }, - ), -) -``` - -A new client is created with identifier `07-tendermint-1` - -## Update Client - -Use the `update client` command to update an existing client with a new consensus state. -Specific update and trusted heights can be specified. - -```shell -USAGE: - hermes update client [OPTIONS] - -ARGS: - identifier of the destination chain - identifier of the client to be updated on destination chain - -OPTIONS: - -h, --help Print help information - -H, --target-height the target height of the client update - -t, --trusted-height the trusted height of the client update -``` - -__Update client with latest header__ - -the client on `ibc-0` with latest header of `ibc-1`: - -```shell -hermes update client ibc-0 07-tendermint-9 -``` - -```json -Success: UpdateClient( - UpdateClient { - common: Attributes { - height: Height { revision: 0, height: 303 }, - client_id: ClientId( - "07-tendermint-1", - ), - client_type: Tendermint, - consensus_height: Height { revision: 1, height: 293 }, - }, - header: Some( - Tendermint( - Header {...}, - ), - ), - }, -) -``` - -The client with identifier `07-tendermint-1` has been updated with the consensus state at height `1-293`. - -__Update a client to a specific target height__ - -```shell -hermes update client ibc-0 07-tendermint-1 --target-height 320 --trusted-height 293 -``` - -```json -Success: UpdateClient( - UpdateClient { - common: Attributes { - height: Height { revision: 0, height: 555 }, - client_id: ClientId( - "07-tendermint-1", - ), - client_type: Tendermint, - consensus_height: Height { revision: 1, height: 320 }, - }, - header: Some( - Tendermint( - Header {...}, - ), - ), - }, -) -``` - -The client with identifier `07-tendermint-1` has been updated with the consensus state at height `1-320`, as specified. diff --git a/guide/src/commands/path-setup/connections.md b/guide/src/commands/path-setup/connections.md deleted file mode 100644 index 2c8ec5a735..0000000000 --- a/guide/src/commands/path-setup/connections.md +++ /dev/null @@ -1,215 +0,0 @@ -# Connection - -## Table of Contents - - - -## Establish Connection -Use the `create connection` command to create a new connection. - -```shell -USAGE: - hermes create connection - -DESCRIPTION: - Create a new connection between two chains - -POSITIONAL ARGUMENTS: - chain_a_id identifier of the side `a` chain for the new connection - chain_b_id identifier of the side `b` chain for the new connection - -FLAGS: - --client-a CLIENT-A identifier of client hosted on chain `a`; default: None (creates a new client) - --client-b CLIENT-B identifier of client hosted on chain `b`; default: None (creates a new client) - --delay DELAY delay period parameter for the new connection (seconds) (default: 0) -``` - -## Examples - -### New connection over new clients - -Create a new connection between `ibc-0` and `ibc-1` over new clients: - -```shell -hermes create connection ibc-0 ibc-1 -``` - -```json -🥂 ibc-0 => OpenInitConnection( - OpenInit( - Attributes { - height: Height { revision: 0, height: 4073 }, - connection_id: Some( - ConnectionId( - "connection-8", - ), - ), - client_id: ClientId( - "07-tendermint-8", - ), - counterparty_connection_id: None, - counterparty_client_id: ClientId( - "07-tendermint-8", - ), - }, - ), -) - -🥂 ibc-1 => OpenTryConnection( - OpenTry( - Attributes { - height: Height { revision: 1, height: 4069 }, - connection_id: Some( - ConnectionId( - "connection-8", - ), - ), - client_id: ClientId( - "07-tendermint-8", - ), - counterparty_connection_id: Some( - ConnectionId( - "connection-8", - ), - ), - counterparty_client_id: ClientId( - "07-tendermint-8", - ), - }, - ), -) - -🥂 ibc-0 => OpenAckConnection( - OpenAck( - Attributes { - height: Height { revision: 0, height: 4081 }, - connection_id: Some( - ConnectionId( - "connection-8", - ), - ), - client_id: ClientId( - "07-tendermint-8", - ), - counterparty_connection_id: Some( - ConnectionId( - "connection-8", - ), - ), - counterparty_client_id: ClientId( - "07-tendermint-8", - ), - }, - ), -) - -🥂 ibc-1 => OpenConfirmConnection( - OpenConfirm( - Attributes { - height: Height { revision: 1, height: 4073 }, - connection_id: Some( - ConnectionId( - "connection-8", - ), - ), - client_id: ClientId( - "07-tendermint-8", - ), - counterparty_connection_id: Some( - ConnectionId( - "connection-8", - ), - ), - counterparty_client_id: ClientId( - "07-tendermint-8", - ), - }, - ), -) - -🥂🥂🥂 Connection handshake finished for [Connection { - delay_period: 0s, - a_side: ConnectionSide { - chain: ProdChainHandle { - chain_id: ChainId { - id: "ibc-0", - version: 0, - }, - runtime_sender: Sender { .. }, - }, - client_id: ClientId( - "07-tendermint-8", - ), - connection_id: ConnectionId( - "connection-8", - ), - }, - b_side: ConnectionSide { - chain: ProdChainHandle { - chain_id: ChainId { - id: "ibc-1", - version: 1, - }, - runtime_sender: Sender { .. }, - }, - client_id: ClientId( - "07-tendermint-8", - ), - connection_id: ConnectionId( - "connection-8", - ), - }, -}] - -Success: Connection { - delay_period: 0s, - a_side: ConnectionSide { - chain: ProdChainHandle { - chain_id: ChainId { - id: "ibc-0", - version: 0, - }, - runtime_sender: Sender { .. }, - }, - client_id: ClientId( - "07-tendermint-8", - ), - connection_id: ConnectionId( - "connection-8", - ), - }, - b_side: ConnectionSide { - chain: ProdChainHandle { - chain_id: ChainId { - id: "ibc-1", - version: 1, - }, - runtime_sender: Sender { .. }, - }, - client_id: ClientId( - "07-tendermint-8", - ), - connection_id: ConnectionId( - "connection-8", - ), - }, -} -``` - -### New connection over existing clients - -Create a new connection between `ibc-0` and `ibc-1` over existing clients, -both with client id `07-tendermint-0`: - -```shell -hermes create connection ibc-0 --client-a 07-tendermint-0 --client-b -07-tendermint-0 -``` - - -Notice that one can omit the destination chain parameter, as Hermes will automatically -figure it out by looking up the given client on `ibc-0`. - -## Non-zero Delay Connection - -A connection can be created with a delay period parameter. This parameter specifies a period of time that must elpase after a successful client state update and before a packet with proofs using its commitment root can pe processed on chain. For more information see [how packet delay works](../relaying/index.md#packet-delay) and the [connection delay specification](https://github.com/cosmos/ibc/tree/master/spec/core/ics-003-connection-semantics). diff --git a/guide/src/commands/path-setup/index.md b/guide/src/commands/path-setup/index.md deleted file mode 100644 index ae654ca77a..0000000000 --- a/guide/src/commands/path-setup/index.md +++ /dev/null @@ -1,43 +0,0 @@ -# Path Setup - -This section describes a number of commands that can be used to manage clients, connections, channels. - -| CLI name | Description | -| ---------------------- | --------------------------------------------------------------------------------------------------------------- | -| `create client` | [Create a client for source chain on destination chain](./clients.md#create-client) | -| `update client` | [Update the specified client on destination chain](./clients.md#md-client) | -| `create connection` | [Establish a connection using existing or new clients](./connections.md#establish-connection) | -| `create channel` | [Establish a channel using a pre-existing connection, or alternatively create a new client and a new connection underlying the new channel](./channels.md#establish-channel) | - - -## Create -Use the `create` commands to create new clients, connections, and channels. - -```shell -USAGE: - hermes create - -DESCRIPTION: - Create objects (client, connection, or channel) on chains - -SUBCOMMANDS: - help Get usage information - client Create a new IBC client - connection Create a new connection between two chains - channel Create a new channel between two chains -``` - -## Update -Use the `update` commands to update a client. - -```shell -USAGE: - hermes update - -DESCRIPTION: - Update objects (clients) on chains - -SUBCOMMANDS: - help Get usage information - client Update an IBC client -``` diff --git a/guide/src/commands/queries/channel.md b/guide/src/commands/queries/channel.md deleted file mode 100644 index fed5deb4ea..0000000000 --- a/guide/src/commands/queries/channel.md +++ /dev/null @@ -1,413 +0,0 @@ -# Table of Contents - - - -# Query Channels - -Use the `query channels` command to query the identifiers of all channels on a given chain. - -```shell -USAGE: - hermes query channels - -DESCRIPTION: - Query the identifiers of all channels on a given chain - -POSITIONAL ARGUMENTS: - chain_id identifier of the chain to query -``` - -__Example__ - -Query all channels on `ibc-1`: - -```shell -hermes query channels ibc-1 -``` - -```json -Success: [ - PortChannelId { - channel_id: ChannelId( - "channel-0", - ), - port_id: PortId( - "transfer", - ), - }, - PortChannelId { - channel_id: ChannelId( - "channel-1", - ), - port_id: PortId( - "transfer", - ), - }, -] -``` - -# Query Channel Data - -Use the `query channel` commands to query the information about a specific channel. - -```shell -USAGE: - hermes query channel - -DESCRIPTION: - Query information about channels - -SUBCOMMANDS: - client Query channel's client state - end Query channel end - ends Query channel ends and underlying connection and client objects -``` - -## Query the channel end data - -Use the `query channel end` command to query the channel end: - -```shell -USAGE: - hermes query channel end - -DESCRIPTION: - Query channel end - -POSITIONAL ARGUMENTS: - chain_id identifier of the chain to query - port_id identifier of the port to query - channel_id identifier of the channel to query - -FLAGS: - -H, --height HEIGHT height of the state to query -``` - -__Example__ - -Query the channel end of channel `channel-1` on port `transfer` on `ibc-1`: - -```shell -hermes query channel end ibc-1 transfer channel-1 -``` - -```json -Success: ChannelEnd { - state: Open, - ordering: Unordered, - remote: Counterparty { - port_id: PortId( - "transfer", - ), - channel_id: Some( - ChannelId( - "channel-0", - ), - ), - }, - connection_hops: [ - ConnectionId( - "connection-1", - ), - ], - version: "ics20-1", -} -``` - -## Query the channel data for both ends of a channel - - -Use the `query channel ends` command to obtain both ends of a channel: - -```shell -USAGE: - hermes query channel ends - -DESCRIPTION: - Query channel ends and underlying connection and client objects - -POSITIONAL ARGUMENTS: - chain_id identifier of the chain to query - port_id identifier of the port to query - channel_id identifier of the channel to query - -FLAGS: - -H, --height HEIGHT height of the state to query - -v, --verbose enable verbose output, displaying all details of channels, connections & clients -``` - -__Example__ - -Query the channel end of channel `channel-1` on port `transfer` on `ibc-0`: - -```shell -hermes query channel ends ibc-0 transfer channel-1 -``` - -```json -Success: ChannelEndsSummary { - chain_id: ChainId { - id: "ibc-0", - version: 0, - }, - client_id: ClientId( - "07-tendermint-1", - ), - connection_id: ConnectionId( - "connection-1", - ), - channel_id: ChannelId( - "channel-1", - ), - port_id: PortId( - "transfer", - ), - counterparty_chain_id: ChainId { - id: "ibc-2", - version: 2, - }, - counterparty_client_id: ClientId( - "07-tendermint-1", - ), - counterparty_connection_id: ConnectionId( - "connection-1", - ), - counterparty_channel_id: ChannelId( - "channel-1", - ), - counterparty_port_id: PortId( - "transfer", - ), -} -``` - -Passing the `-v` flag will additionally print all the details of the -channel, connection, and client on both ends. - -## Query the channel client state - -Use the `query channel client` command to obtain the channel's client state: - -```shell -USAGE: - hermes query channel client --port-id --channel-id - -DESCRIPTION: - Query channel's client state - -ARGS: - identifier of the chain to query - -FLAGS: - --channel-id identifier of the channel to query - --port-id identifier of the port to query -``` - -If the command is successful a message with the following format will be displayed: -``` -Success: Some( - IdentifiedAnyClientState { - client_id: ClientId( - "07-tendermint-0", - ), - client_state: Tendermint( - ClientState { - chain_id: ChainId { - id: "network2", - version: 0, - }, - trust_level: TrustThreshold { - numerator: 1, - denominator: 3, - }, - trusting_period: 1209600s, - unbonding_period: 1814400s, - max_clock_drift: 40s, - latest_height: Height { - revision: 0, - height: 2775, - }, - proof_specs: ProofSpecs( - [ - ProofSpec( - ProofSpec { - leaf_spec: Some( - LeafOp { - hash: Sha256, - prehash_key: NoHash, - prehash_value: Sha256, - length: VarProto, - prefix: [ - 0, - ], - }, - ), - inner_spec: Some( - InnerSpec { - child_order: [ - 0, - 1, - ], - child_size: 33, - min_prefix_length: 4, - max_prefix_length: 12, - empty_child: [], - hash: Sha256, - }, - ), - max_depth: 0, - min_depth: 0, - }, - ), - ProofSpec( - ProofSpec { - leaf_spec: Some( - LeafOp { - hash: Sha256, - prehash_key: NoHash, - prehash_value: Sha256, - length: VarProto, - prefix: [ - 0, - ], - }, - ), - inner_spec: Some( - InnerSpec { - child_order: [ - 0, - 1, - ], - child_size: 32, - min_prefix_length: 1, - max_prefix_length: 1, - empty_child: [], - hash: Sha256, - }, - ), - max_depth: 0, - min_depth: 0, - }, - ), - ], - ), - upgrade_path: [ - "upgrade", - "upgradedIBCState", - ], - allow_update: AllowUpdate { - after_expiry: true, - after_misbehaviour: true, - }, - frozen_height: None, - }, - ), - }, -) -``` - -**JSON:** - -```shell - hermes --json query channel client --port-id --channel-id -``` -or - -```shell - hermes -j query channel client --port-id --channel-id -``` - -If the command is successful a message with the following format will be displayed: - -```json -{ - "result": - { - "client_id":"07-tendermint-0", - "client_state": - { - "allow_update": - { - "after_expiry":true, - "after_misbehaviour":true - }, - "chain_id":"network2", - "frozen_height":null, - "latest_height": - { - "revision_height":2775, - "revision_number":0 - }, - "max_clock_drift": - { - "nanos":0, - "secs":40 - }, - "proof_specs": - [ - { - "inner_spec": - { - "child_order":[0,1], - "child_size":33, - "empty_child":"", - "hash":1, - "max_prefix_length":12, - "min_prefix_length":4 - }, - "leaf_spec": - { - "hash":1, - "length":1, - "prefix":"AA==", - "prehash_key":0, - "prehash_value":1 - }, - "max_depth":0, - "min_depth":0 - }, - { - "inner_spec": - { - "child_order":[0,1], - "child_size":32, - "empty_child":"", - "hash":1, - "max_prefix_length":1, - "min_prefix_length":1 - }, - "leaf_spec": - { - "hash":1, - "length":1, - "prefix":"AA==", - "prehash_key":0, - "prehash_value":1 - }, - "max_depth":0, - "min_depth":0 - } - ], - "trust_level": - { - "denominator":3, - "numerator":1 - }, - "trusting_period": - { - "nanos":0, - "secs":1209600 - }, - "type":"Tendermint", - "unbonding_period": - { - "nanos":0, - "secs":1814400 - }, - "upgrade_path":["upgrade","upgradedIBCState"] - }, - "type":"IdentifiedAnyClientState" - }, - "status":"success" -} -``` \ No newline at end of file diff --git a/guide/src/commands/queries/client.md b/guide/src/commands/queries/client.md deleted file mode 100644 index 1603f8332e..0000000000 --- a/guide/src/commands/queries/client.md +++ /dev/null @@ -1,306 +0,0 @@ - -# Table of Contents - - - -# Query Clients - -Use the `query clients` command to query the identifiers of all clients on a given chain. - -```shell -USAGE: - hermes query clients - -DESCRIPTION: - Query the identifiers of all clients on a chain - -POSITIONAL ARGUMENTS: - chain_id identifier of the chain to query - -FLAGS: - -s, --src-chain-id ID filter for clients which target a specific chain id (implies '-o') - -o, --omit-chain-ids omit printing the source chain for each client (default: false) -``` - -__Example__ - -Query all clients on `ibc-1`: - -```shell -hermes query clients ibc-1 -``` - -```json -Success: [ - ClientChain { - client_id: ClientId( - "07-tendermint-0", - ), - chain_id: ChainId { - id: "ibc-0", - version: 0, - }, - }, - ClientChain { - client_id: ClientId( - "07-tendermint-1", - ), - chain_id: ChainId { - id: "ibc-2", - version: 2, - }, - }, -] -``` - -Query all clients on `ibc-1` having `ibc-2` as their source chain: - -```shell -hermes query clients ibc-1 -s ibc-2 -``` - -```json -Success: [ - ClientId( - "07-tendermint-1", - ), -] -``` - -# Query Client Data - -Use the `query client` command to query the information about a specific client. - -```shell -USAGE: - hermes query client - -DESCRIPTION: - Query information about clients - -SUBCOMMANDS: - state query client full state - consensus query client consensus - connections query client connections -``` - -## Query the client state - -Use the `query client state` command to query the client state of a client: - -```shell -USAGE: - hermes query client state - -DESCRIPTION: - Query client full state - -POSITIONAL ARGUMENTS: - chain_id identifier of the chain to query - client_id identifier of the client to query - -FLAGS: - -H, --height HEIGHT the chain height which this query should reflect -``` - -__Example__ - -Query the state of client `07-tendermint-2` on `ibc-1`: - -```shell -hermes query client state ibc-1 07-tendermint-1 -``` - -```json -Success: ClientState { - chain_id: ChainId { - id: "ibc-2", - version: 2, - }, - trust_level: TrustThresholdFraction { - numerator: 1, - denominator: 3, - }, - trusting_period: 1209600s, - unbonding_period: 1814400s, - max_clock_drift: 3s, - frozen_height: Height { - revision: 0, - height: 0, - }, - latest_height: Height { - revision: 2, - height: 3069, - }, - upgrade_path: [ - "upgrade", - "upgradedIBCState", - ], - allow_update_after_expiry: true, - allow_update_after_misbehaviour: true, -} -``` - -## Query the client consensus state - -Use the `query client consensus` command to query the consensus states of a given client, or the state at a specified height: - -```shell -USAGE: - hermes query client consensus - -DESCRIPTION: - Query client consensus state - -POSITIONAL ARGUMENTS: - chain_id identifier of the chain to query - client_id identifier of the client to query - -FLAGS: - -c, --consensus-height CONSENSUS-HEIGHT - -s, --heights-only show only consensus heights - -H, --height HEIGHT the chain height context to be used, applicable only to a specific height -``` - -__Example__ - -Query the states of client `07-tendermint-0` on `ibc-0`: - -```shell -hermes query client consensus ibc-0 07-tendermint-0 --heights-only -``` - -```json -Success: [ - Height { - revision: 1, - height: 3049, - }, - Height { - revision: 1, - height: 2888, - }, - Height { - revision: 1, - height: 2736, - }, - Height { - revision: 1, - height: 2729, - }, - Height { - revision: 1, - height: 2724, - }, - Height { - revision: 1, - height: 2717, - }, -] -``` - -Query `ibc-0` at height `2800` for the consensus state for height `2724`: - -```shell -hermes query client consensus ibc-0 07-tendermint-0 -c 2724 -h 2800 -``` - -```json -Success: ConsensusState { - timestamp: Time( - 2021-04-13T14:11:20.969154Z - ), - root: CommitmentRoot( - "371DD19003221B60162D42C78FD86ABF95A572F3D9497084584B75F97B05B70C" - ), - next_validators_hash: Hash::Sha256( - 740950668B6705A136D041914FC219045B1D0AD1C6A284C626BF5116005A98A7 - ), -} -``` - -## Query the identifiers of all connections associated with a given client - -Use the `query client connections` command to query the connections associated with a given client: - -```shell -USAGE: - hermes query client connections - -DESCRIPTION: - Query client connections - -POSITIONAL ARGUMENTS: - chain_id identifier of the chain to query - client_id identifier of the client to query - -FLAGS: - -H, --height HEIGHT the chain height which this query should reflect -``` - -__Example__ - -Query the connections of client `07-tendermint-0` on `ibc-0`: - -```shell -hermes query client connections ibc-0 07-tendermint-0 -``` - -```json -Success: [ - ConnectionId("connection-0"), - ConnectionId("connection-1"), -] -``` - -## Query for the header used in a client update at a certain height - -``` -USAGE: - hermes query client header - -DESCRIPTION: - Query for the header used in a client update at a certain height - -POSITIONAL ARGUMENTS: - chain_id identifier of the chain to query - client_id identifier of the client to query - consensus_height height of header to query - -FLAGS: - -H, --height HEIGHT the chain height context for the query -``` - -__Example__ - -Query for the header used in the `07-tendermint-0` client update at height 2724 on `ibc-0`: - -```shell -hermes query client header ibc-0 07-tendermint-0 2724 -``` - -```json -Success: [ - UpdateClient( - UpdateClient { - common: Attributes { - height: Height { - revision: 0, - height: 0, - }, - client_id: ClientId( - "07-tendermint-0", - ), - client_type: Tendermint, - consensus_height: Height { - revision: 1, - height: 2724, - }, - }, - header: Some( - Tendermint(...), - ), - }, - ), -] -``` diff --git a/guide/src/commands/queries/connection.md b/guide/src/commands/queries/connection.md deleted file mode 100644 index 2f2c4ad61f..0000000000 --- a/guide/src/commands/queries/connection.md +++ /dev/null @@ -1,155 +0,0 @@ -# Table of Contents - - - -# Query Connections - -Use the `query connections` command to query the identifiers of all connections on a given chain. - -```shell -USAGE: - hermes query connections - -DESCRIPTION: - Query the identifiers of all connections on a chain - -POSITIONAL ARGUMENTS: - chain_id identifier of the chain to query -``` - -__Example__ - -Query all connections on `ibc-1`: - -```shell -hermes query connections ibc-1 -``` - -```json -Success: [ - ConnectionId( - "connection-0", - ), - ConnectionId( - "connection-1", - ), -] -``` - -# Query Connection Data - -Use the `query connection` commands to query a specific connection. - -```shell -USAGE: - hermes query connection - -DESCRIPTION: - Query information about connection(s) - -SUBCOMMANDS: - end query connection end - channels query connection channels -``` - -## Query the connection end data - -Use the `query connection end` command to query the connection end: - -```shell -USAGE: - hermes query connection end - -DESCRIPTION: - query connection end - -POSITIONAL ARGUMENTS: - chain_id identifier of the chain to query - connection_id identifier of the connection to query - -FLAGS: - -H, --height HEIGHT height of the state to query -``` - -__Example__ - -Query the connection end of connection `connection-1` on `ibc-1`: - -```shell -hermes query connection end ibc-1 connection-1 -``` - -```json -Success: ConnectionEnd { - state: Open, - client_id: ClientId( - "07-tendermint-1", - ), - counterparty: Counterparty { - client_id: ClientId( - "07-tendermint-0", - ), - connection_id: Some( - ConnectionId( - "connection-0", - ), - ), - prefix: ibc, - }, - versions: [ - Version { - identifier: "1", - features: [ - "ORDER_ORDERED", - "ORDER_UNORDERED", - ], - }, - ], - delay_period: 0s, -} -``` - -## Query the identifiers of all channels associated with a given connection - -Use the `query connection channels` command to query the identifiers of the channels associated with a given connection: - -```shell -USAGE: - hermes query connection channels - -DESCRIPTION: - query connection channels - -POSITIONAL ARGUMENTS: - chain_id identifier of the chain to query - connection_id identifier of the connection to query -``` - -__Example__ - -Query the channels associated with connection `connection-1` on `ibc-1`: - -```shell -hermes query connection channels ibc-1 connection-1 -``` - -```json -Success: [ - PortChannelId { - channel_id: ChannelId( - "channel-0", - ), - port_id: PortId( - "transfer", - ), - }, - PortChannelId { - channel_id: ChannelId( - "channel-1", - ), - port_id: PortId( - "transfer", - ), - }, -] -``` diff --git a/guide/src/commands/queries/index.md b/guide/src/commands/queries/index.md deleted file mode 100644 index 67eeeba748..0000000000 --- a/guide/src/commands/queries/index.md +++ /dev/null @@ -1,35 +0,0 @@ -# Queries - -Hermes supports querying for different objects that exist on a configured chain. - -The `query` command provides the following sub-commands: - -| CLI name | Description | -| ---------------------- | ------------------------------------------------------------------------------ | -| `client` | [Query information about clients](./client.md) | -| `clients` | [Query all clients](./client.md) | -| `connection` | [Query information about connections](./connection.md) | -| `connections` | [Query the identifiers of all connections on a chain](./connection.md) | -| `channel` | [Query information about channels](./channel.md) | -| `channels` | [Query the identifiers of all channels on a given chain](./channel.md) | -| `packet` | [Query information about packets](./packet.md) | -| `tx` | [Query information about transactions](./tx.md) | - -## Usage - -``` -USAGE: - hermes query - -DESCRIPTION: - Query objects from the chain - -SUBCOMMANDS: - client Query information about clients - clients Query clients - connection Query information about connections - connections Query the identifiers of all connections on a chain - channel Query information about channels - channels Query the identifiers of all channels on a given chain - packet Query information about packets -``` diff --git a/guide/src/commands/queries/packet.md b/guide/src/commands/queries/packet.md deleted file mode 100644 index fa7b8a9b5f..0000000000 --- a/guide/src/commands/queries/packet.md +++ /dev/null @@ -1,290 +0,0 @@ - -# Packet Queries - -Use the `query packet` commands to query information about packets. - - -```shell -USAGE: - hermes query packet - -OPTIONS: - -h, --help Print help information - -SUBCOMMANDS: - ack Query packet acknowledgment - acks Query packet acknowledgments - commitment Query packet commitment - commitments Query packet commitments - pending Output a summary of pending packets in both directions - unreceived-acks Query unreceived acknowledgments - unreceived-packets Query unreceived packets - help Print this message or the help of the given subcommand(s) -``` - -## Table of Contents - - - - -## Pending Packets - -Use the `query packet pending` command to query the sequence numbers of all packets that have not yet been received or acknowledged, at both ends of a channel. - -```shell -USAGE: - hermes query packet pending - -ARGS: - identifier of the chain at one end of the channel - port identifier on the chain given by - channel identifier on the chain given by -``` - -__Example__ - -Query the sequence numbers of all packets that either not yet been received or not yet been acknowledged, at both ends of the channel `channel-1`. - -```shell -$ hermes query packet pending ibc-0 tranfer channel-1 -``` - -```json -Success: Summary { - forward: PendingPackets { - unreceived_packets: [ - 2203, - ... - 2212, - ], - unreceived_acks: [ - 2183, - ... - 2202, - ], - }, - reverse: PendingPackets { - unreceived_packets: [ - 14, - ... - 23, - ], - unreceived_acks: [ - 4, - ... - 13, - ], - }, -} -``` - - -## Packet Commitments - -Use the `query packet commitments` command to query the sequence numbers of all packets that have been sent but not yet acknowledged (these are the packets that still have their commitments stored). - -```shell -USAGE: - hermes query packet commitments - -DESCRIPTION: - Query packet commitments - -POSITIONAL ARGUMENTS: - chain_id identifier of the chain to query - port_id identifier of the port to query - channel_id identifier of the channel to query -``` - -__Example__ - -Query `ibc-0` for the sequence numbers of packets that still have commitments on `ibc-0` and that were sent on `transfer` port and `channel-0`: - -```shell -hermes query packet commitments ibc-0 transfer channel-0 -``` - -```json -Success: PacketSeqs { - height: Height { - revision: 0, - height: 9154, - }, - seqs: [ - 1, - 2, - 3 - ], -} -``` - -## Packet Commitment with Sequence - -Use the `query packet commitment` command to query the commitment value of a packet with a given sequence number. - -```shell -USAGE: - hermes query packet commitment - -DESCRIPTION: - Query packet commitment - -POSITIONAL ARGUMENTS: - chain_id identifier of the chain to query - port_id identifier of the port to query - channel_id identifier of the channel to query - sequence sequence of packet to query - -FLAGS: - -H, --height HEIGHT height of the state to query -``` - -__Example__ - -Query `ibc-0` for the commitment of packet with sequence `3` sent on `transfer` port and `channel-0`: - -```shell -hermes query packet commitment ibc-0 transfer channel-0 3 -``` - -```json -Success: "F9458DC7EBEBCD6D18E983FCAB5BD752CC2A74532BBD50B812DB229997739EFC" -``` - -## Packet Acknowledgments - -Use the `query packet acknowledgments` command to query the sequence numbers of all packets that have been acknowledged. - -```shell -USAGE: - hermes query packet acks - -DESCRIPTION: - Query packet acknowledgments - -POSITIONAL ARGUMENTS: - chain_id identifier of the chain to query - port_id identifier of the port to query - channel_id identifier of the channel to query -``` - -__Example__ - -Query `ibc-1` for the sequence numbers of packets acknowledged that were received on `transfer` port and `channel-1`: - -```shell -hermes query packet acks ibc-1 transfer channel-1 -``` - -```json -Success: PacketSeqs { - height: Height { - revision: 1, - height: 9547, - }, - seqs: [ - 1, - 2, - 3 - ], -} -``` - -## Packet Acknowledgment with Sequence - -Use the `query packet acknowledgment` command to query the acknowledgment value of a packet with a given sequence number. - -```shell -USAGE: - hermes query packet ack - -DESCRIPTION: - Query packet acknowledgment - -POSITIONAL ARGUMENTS: - chain_id identifier of the chain to query - port_id identifier of the port to query - channel_id identifier of the channel to query - sequence sequence of packet to query - -FLAGS: - -H, --height HEIGHT height of the state to query -``` - -__Example__ - -Query `ibc-1` for the acknowledgment of packet with sequence `2` received on `transfer` port and `channel-1`: - -```shell -hermes query packet ack ibc-1 transfer channel-1 2 -``` - -```json -Success: "08F7557ED51826FE18D84512BF24EC75001EDBAF2123A477DF72A0A9F3640A7C" -``` - -## Unreceived Packets - -Use the `query packet unreceived-packets` command to query the sequence numbers of all packets that have been sent on the source chain but not yet received on the destination chain. - -```shell -USAGE: - hermes query packet unreceived-packets - -DESCRIPTION: - Query unreceived packets - -POSITIONAL ARGUMENTS: - chain_id identifier of the chain for the unreceived sequences - port_id port identifier - channel_id channel identifier -``` - -__Example__ - -Query `transfer` port and `channel-1` on `ibc-1` for the sequence numbers of packets sent on `ibc-0` but not yet received: - -```shell -hermes query packet unreceived-packets ibc-1 transfer channel-1 -``` - -```json -Success: [ - 1, - 2, - 3 -] -``` - -## Unreceived Acknowledgments - -Use the `query packet unreceived-acks` command to query the sequence numbers of all packets that have not yet been acknowledged. - -```shell -USAGE: - hermes query packet unreceived-acks - -DESCRIPTION: - Query unreceived acknowledgments - -POSITIONAL ARGUMENTS: - chain_id identifier of the chain to query the unreceived acknowledgments - port_id port identifier - channel_id channel identifier -``` - -__Example__ - -Query `transfer` port and `channel-0` on `ibc-0` for the sequence numbers of packets received by `ibc-1` but not yet acknowledged on `ibc-0`: - -```shell -hermes query packet unreceived-acks ibc-0 transfer channel-0 -``` - -```json -Success: [ - 1, - 2, - 3 -] -``` diff --git a/guide/src/commands/queries/tx.md b/guide/src/commands/queries/tx.md deleted file mode 100644 index 7af7c4c2a4..0000000000 --- a/guide/src/commands/queries/tx.md +++ /dev/null @@ -1,60 +0,0 @@ -# Tx Queries - -Use the `query tx` command to query information about transaction(s). - - -```shell -USAGE: - hermes query tx - -DESCRIPTION: - Query information about transactions - -SUBCOMMANDS: - events Query the events emitted by transaction -``` - -## Table of Contents - - - - -## Transaction Events - -Use the `query tx events` command to obtain a list of events that a chain generated as a consequence of -delivering a transaction. - -```shell -USAGE: - hermes query tx events - -DESCRIPTION: - Query the events emitted by transaction - -POSITIONAL ARGUMENTS: - chain_id identifier of the chain to query - hash transaction hash to query -``` - -__Example__ - -Query chain `ibc-0` for the events emitted due to transaction with hash -`6EDBBCBCB779F9FC9D6884ACDC4350E69720C4B362E4ACE6C576DE792F837490`: - -```shell -hermes query tx events ibc-0 6EDBBCBCB779F9FC9D6884ACDC4350E69720C4B362E4ACE6C576DE792F837490 -``` - -```json -Success: [ - SendPacket( - SendPacket { - height: Height { - revision: 4, - height: 6628239, - }, - packet: PortId("transfer") ChannelId("channel-139") Sequence(2), - }, - ), -] -``` \ No newline at end of file diff --git a/guide/src/commands/raw/channel-close.md b/guide/src/commands/raw/channel-close.md deleted file mode 100644 index b04befeeb0..0000000000 --- a/guide/src/commands/raw/channel-close.md +++ /dev/null @@ -1,134 +0,0 @@ -# Channel Close Handshake - -The channel close handshake involves two steps: init and confirm. - -## Table of Contents - - - -## Channel Close Init - -Use the `chan-close-init` command to initialize the closure of a channel. - -```shell -USAGE: - hermes tx raw chan-close-init - -DESCRIPTION: - Initiate the closing of a channel (ChannelCloseInit) - -POSITIONAL ARGUMENTS: - dst_chain_id identifier of the destination chain - src_chain_id identifier of the source chain - dst_conn_id identifier of the destination connection - dst_port_id identifier of the destination port - src_port_id identifier of the source port - -FLAGS: - -d, --dst-chan-id ID identifier of the destination channel (required) - -s, --src-chan-id ID identifier of the source channel (required) -``` - -__Example__ - -```shell -hermes tx raw chan-close-init ibc-0 ibc-1 connection-0 transfer transfer -d channel-0 -s channel-1 -``` - -```json -Success: CloseInitChannel( - CloseInit( - Attributes { - height: Height { - revision: 0, - height: 77, - }, - port_id: PortId( - "transfer", - ), - channel_id: Some( - ChannelId( - "channel-0", - ), - ), - connection_id: ConnectionId( - "connection-0", - ), - counterparty_port_id: PortId( - "transfer", - ), - counterparty_channel_id: Some( - ChannelId( - "channel-1", - ), - ), - }, - ), -) -``` - -## Channel Close Confirm - -Use the `chan-close-confirm` command to confirm the closure of a channel. - -```shell -USAGE: - hermes tx raw chan-close-confirm - -DESCRIPTION: - Confirm the closing of a channel (ChannelCloseConfirm) - -POSITIONAL ARGUMENTS: - dst_chain_id identifier of the destination chain - src_chain_id identifier of the source chain - dst_conn_id identifier of the destination connection - dst_port_id identifier of the destination port - src_port_id identifier of the source port - -FLAGS: - -d, --dst-chan-id ID identifier of the destination channel (required) - -s, --src-chan-id ID identifier of the source channel (required) -``` - -__Example__ - -```shell -hermes tx raw chan-close-confirm ibc-1 ibc-0 connection-1 transfer transfer -d channel-1 -s channel-0 -``` - -```json -Success: CloseConfirmChannel( - CloseConfirm( - Attributes { - height: Height { - revision: 1, - height: 551, - }, - port_id: PortId( - "transfer", - ), - channel_id: Some( - ChannelId( - "channel-1", - ), - ), - connection_id: ConnectionId( - "connection-1", - ), - counterparty_port_id: PortId( - "transfer", - ), - counterparty_channel_id: Some( - ChannelId( - "channel-0", - ), - ), - }, - ), -) -``` - -__NOTE__: The `cosmos-sdk` transfer module implementation does not allow the user (`hermes` in this case) to initiate the closing of channels. -Therefore, when using the Gaia release image, the `chan-close-init` command -fails as the `MsgChannelCloseInit` message included in the transaction is rejected. -To be able to test channel closure, you need to [patch](../../help.md#patching-gaia) your gaia deployments. diff --git a/guide/src/commands/raw/channel-open.md b/guide/src/commands/raw/channel-open.md deleted file mode 100644 index 86bfb569d1..0000000000 --- a/guide/src/commands/raw/channel-open.md +++ /dev/null @@ -1,293 +0,0 @@ -# Channel Open Handshake - -The `tx raw` commands can be used to establish a channel for a given connection. Only `unordered` channels are currently supported. - -
- -```mermaid -sequenceDiagram - autonumber - participant A as ibc-1 - participant B as ibc-0 - Note over A, B: No channel - A->>B: ChannelOpenInit - Note over B: channel: channel-0 - Note over B: channel: counterparty: none - B->>A: ChannelOpenTry - Note over A: channel: channel-1 - Note over A: channel: counterparty: channel-0 - A->>B: ChannelOpenAck - note over B: channel: channel-0 - note over B: counterparty: channel-1 - B->>A: ChannelOpenConfirm - Note over A, B: Channel open -``` - -
- -## Table of Contents - - - -## Channel Open Init - -Use the `chan-open-init` command to initialize a new channel. - -```shell -USAGE: - hermes tx raw chan-open-init - -DESCRIPTION: - Initialize a channel (ChannelOpenInit) - -POSITIONAL ARGUMENTS: - dst_chain_id identifier of the destination chain - src_chain_id identifier of the source chain - dst_conn_id identifier of the destination connection - dst_port_id identifier of the destination port - src_port_id identifier of the source port - -FLAGS: - -o, --order ORDER the channel ordering, valid options 'unordered' (default) and 'ordered' -``` - -__Example__ - -First, let's initialize the channel on `ibc-0` using an existing connection identified by `connection-0`: - -```shell -hermes tx raw chan-open-init ibc-0 ibc-1 connection-0 transfer transfer -``` - -```json -Success: OpenInitChannel( - OpenInit( - Attributes { - height: Height { - revision: 0, - height: 3091 - }, - port_id: PortId( - "transfer", - ), - channel_id: Some( - ChannelId( - "channel-0", - ), - ), - connection_id: ConnectionId( - "connection-0", - ), - counterparty_port_id: PortId( - "transfer", - ), - counterparty_channel_id: None, - }, - ), -) -``` - -A new channel has been initialized on `ibc-1` with identifier `channel-0`. - -> Note that the `counterparty_channel_id` field is currently empty. - - -## Channel Open Try - -Use the `chan-open-try` command to establish a counterparty to the channel on the other chain. - -```shell -USAGE: - hermes tx raw chan-open-try - -DESCRIPTION: - Relay the channel attempt (ChannelOpenTry) - -POSITIONAL ARGUMENTS: - dst_chain_id identifier of the destination chain - src_chain_id identifier of the source chain - dst_conn_id identifier of the destination connection - dst_port_id identifier of the destination port - src_port_id identifier of the source port - -FLAGS: - -s, --src-chan-id ID identifier of the source channel (required) -``` - -__Example__ - -Let's now create the counterparty to `channel-0` on chain `ibc-1`: - -```shell -hermes tx raw chan-open-try ibc-1 ibc-0 connection-1 transfer transfer -s channel-0 -``` - -```json -Success: OpenTryChannel( - OpenTry( - Attributes { - height: Height { - revision: 1, - height: 3213 - }, - port_id: PortId( - "transfer", - ), - channel_id: Some( - ChannelId( - "channel-1", - ), - ), - connection_id: ConnectionId( - "connection-1", - ), - counterparty_port_id: PortId( - "transfer", - ), - counterparty_channel_id: Some( - ChannelId( - "channel-0", - ), - ), - }, - ), -) -``` - -A new channel has been created on `ibc-1` with identifier `channel-1`. - -> Note that the field `counterparty_channel_id` points to the channel on `ibc-0`. - - -## Channel Open Ack - -Use the `chan-open-ack` command to acknowledge the channel on the initial chain. - -```shell -USAGE: - hermes tx raw chan-open-ack - -DESCRIPTION: - Relay acknowledgment of a channel attempt (ChannelOpenAck) - -POSITIONAL ARGUMENTS: - dst_chain_id identifier of the destination chain - src_chain_id identifier of the source chain - dst_conn_id identifier of the destination connection - dst_port_id identifier of the destination port - src_port_id identifier of the source port - -FLAGS: - -d, --dst-chan-id ID identifier of the destination channel (required) - -s, --src-chan-id ID identifier of the source channel (required) -``` - -__Example__ - -We can now acknowledge on `ibc-0` that `ibc-1` has accepted the opening of the channel: - -```shell -hermes tx raw chan-open-ack ibc-0 ibc-1 connection-0 transfer transfer -d channel-0 -s channel-1 -``` - -```json -Success: OpenAckChannel( - OpenAck( - Attributes { - height: Height { - revision: 0, - height: 3301 - }, - port_id: PortId( - "transfer", - ), - channel_id: Some( - ChannelId( - "channel-0", - ), - ), - connection_id: ConnectionId( - "connection-0", - ), - counterparty_port_id: PortId( - "transfer", - ), - counterparty_channel_id: Some( - ChannelId( - "channel-1", - ), - ), - }, - ), -) -``` - -> Note that the field `counterparty_channel_id` now points to the channel on `ibc-1`. - - -## Channel Open Confirm - -Use the `chan-open-confirm` command to confirm that the channel has been acknowledged, -and finish the handshake, after which the channel is open on both chains. - -```shell -USAGE: - hermes tx raw chan-open-confirm - -DESCRIPTION: - Confirm opening of a channel (ChannelOpenConfirm) - -POSITIONAL ARGUMENTS: - dst_chain_id identifier of the destination chain - src_chain_id identifier of the source chain - dst_conn_id identifier of the destination connection - dst_port_id identifier of the destination port - src_port_id identifier of the source port - -FLAGS: - -d, --dst-chan-id ID identifier of the destination channel (required) - -s, --src-chan-id ID identifier of the source channel (required) -``` - -__Example__ - -Confirm on `ibc-1` that `ibc-0` has accepted the opening of the channel, -after which the channel is open on both chains. - -```shell -hermes tx raw chan-open-confirm ibc-1 ibc-0 connection-1 transfer transfer -d channel-1 -s channel-0 -``` - -```json - OpenConfirm( - Attributes { - height: Height { - revision: 1, - height: 3483 - }, - port_id: PortId( - "transfer", - ), - channel_id: Some( - ChannelId( - "channel-1", - ), - ), - connection_id: ConnectionId( - "connection-1", - ), - counterparty_port_id: PortId( - "transfer", - ), - counterparty_channel_id: Some( - ChannelId( - "channel-0", - ), - ), - }, - ), -) -``` - -We have now successfully opened a channel over an existing connection between the two chains. - diff --git a/guide/src/commands/raw/client.md b/guide/src/commands/raw/client.md deleted file mode 100644 index b0041246ef..0000000000 --- a/guide/src/commands/raw/client.md +++ /dev/null @@ -1,98 +0,0 @@ -# Client -The `tx raw` commands can be used to create and update the on-chain IBC clients. - -## Table of Contents - - -## Create Client -Use the `create-client` command to create a new client. - -```shell -USAGE: - hermes tx raw create-client - -DESCRIPTION: - Create a client for source chain on destination chain - -POSITIONAL ARGUMENTS: - dst_chain_id identifier of the destination chain - src_chain_id identifier of the source chain - -``` - -__Example__ - -Create a new client of `ibc-1` on `ibc-0`: - -```shell -hermes tx raw create-client ibc-0 ibc-1 -``` - -```json -{ - Success: CreateClient( - CreateClient( - Attributes { - height: Height { revision: 0, height: 43 }, - client_id: ClientId( - "07-tendermint-0", - ), - client_type: Tendermint, - consensus_height: Height { revision: 1, height: 32 }, - }, - ), - ) -} -``` - -A new client is created with identifier `07-tendermint-0` - -## Update Client -Use the `update-client` command to update an existing client with a new consensus state. -Specific update and trusted heights can be specified. - -```shell -USAGE: - hermes tx raw update-client - -DESCRIPTION: - Update the specified client on destination chain - -POSITIONAL ARGUMENTS: - dst_chain_id identifier of the destination chain - dst_client_id identifier of the client to be updated on destination chain - -FLAGS: - -H, --target-height TARGET-HEIGHT - -t, --trusted-height TRUSTED-HEIGHT -``` - -__Example__ - -Update the client on `ibc-0` with latest header of `ibc-1` - -```shell -hermes tx raw update-client ibc-0 07-tendermint-0 -``` - -```json -Success: UpdateClient( - UpdateClient { - common: Attributes { - height: Height { revision: 0, height: 110 }, - client_id: ClientId( - "07-tendermint-0", - ), - client_type: Tendermint, - consensus_height: Height { revision: 1, height: 109 }, - }, - header: Some( - Tendermint( - Header {...}, - ), - ), - }, -) -``` - -The client with identifier `07-tendermint-0` has been updated with the consensus state at height `1-273`. diff --git a/guide/src/commands/raw/connection.md b/guide/src/commands/raw/connection.md deleted file mode 100644 index ce20d223fd..0000000000 --- a/guide/src/commands/raw/connection.md +++ /dev/null @@ -1,277 +0,0 @@ -# Connection Handshake - -The `tx raw` commands can be used to establish a connection between two clients. - -
- -```mermaid -sequenceDiagram - autonumber - participant A as ibc-1 - participant B as ibc-0 - Note over A, B: No connection - A->>B: ConnectionOpenInit - Note over B: connection: connection-0 - Note over B: counterparty: none - B->>A: ConnectionOpenTry - Note over A: connection: connection-1 - Note over A: counterparty: connection-0 - A->>B: ConnectionOpenAck - note over B: connection: connection-0 - note over B: counterparty: connection-1 - B->>A: ConnectionOpenConfirm - Note over A, B: Connection open -``` - -
- -## Table of Contents - - - -## Connection Init - -Use the `conn-init` command to initialize a new connection on a chain. - -```shell -USAGE: - hermes tx raw conn-init - -DESCRIPTION: - Initialize a connection (ConnectionOpenInit) - -POSITIONAL ARGUMENTS: - dst_chain_id identifier of the destination chain - src_chain_id identifier of the source chain - dst_client_id identifier of the destination client - src_client_id identifier of the source client -``` - -__Example__ - -Given that two clients were previously created with identifier `07-tendermint-0` on chain `ibc-0` and -identifier `07-tendermint-1` on chain `ibc-1`, we can initialize a connection between the two clients. - -First, let's initialize the connection on `ibc-0`: - -```shell -hermes tx raw conn-init ibc-0 ibc-1 07-tendermint-0 07-tendermint-1 -``` - -```json -Success: OpenInitConnection( - OpenInit( - Attributes { - height: Height { - revision: 0, - height: 73, - }, - connection_id: Some( - ConnectionId( - "connection-0", - ), - ), - client_id: ClientId( - "07-tendermint-0", - ), - counterparty_connection_id: None, - counterparty_client_id: ClientId( - "07-tendermint-1", - ), - }, - ), -) -``` - -A new connection has been initialized on `ibc-0` with identifier `connection-0`. - -> Note that the `counterparty_connection_id` field is currently empty. - - -## Connection Try - -Use the `conn-try` command to establish a counterparty to the connection on the other chain. - -```shell -USAGE: - hermes tx raw conn-try - -DESCRIPTION: - Relay the connection attempt (ConnectionOpenTry) - -POSITIONAL ARGUMENTS: - dst_chain_id identifier of the destination chain - src_chain_id identifier of the source chain - dst_client_id identifier of the destination client - src_client_id identifier of the source client - -FLAGS: - -s, --src-conn-id ID identifier of the source connection (required) -``` - -__Example__ - -Let's now create the counterparty to `connection-0` on chain `ibc-1`: - -```shell -hermes tx raw conn-try ibc-1 ibc-0 07-tendermint-1 07-tendermint-0 -s connection-0 -``` - -```json -Success: OpenTryConnection( - OpenTry( - Attributes { - height: Height { - revision: 1, - height: 88, - }, - connection_id: Some( - ConnectionId( - "connection-1", - ), - ), - client_id: ClientId( - "07-tendermint-1", - ), - counterparty_connection_id: Some( - ConnectionId( - "connection-0", - ), - ), - counterparty_client_id: ClientId( - "07-tendermint-0", - ), - }, - ), -) -``` - -A new connection has been created on `ibc-1` with identifier `connection-1`. - -> Note that the field `counterparty_connection_id` points to the connection on `ibc-0`. - - -## Connection Ack - -Use the `conn-ack` command to acknowledge the connection on the initial chain. - -```shell -USAGE: - hermes tx raw conn-ack - -DESCRIPTION: - Relay acknowledgment of a connection attempt (ConnectionOpenAck) - -POSITIONAL ARGUMENTS: - dst_chain_id identifier of the destination chain - src_chain_id identifier of the source chain - dst_client_id identifier of the destination client - src_client_id identifier of the source client - -FLAGS: - -d, --dst-conn-id ID identifier of the destination connection (required) - -s, --src-conn-id ID identifier of the source connection (required) -``` - -__Example__ - -We can now acknowledge on `ibc-0` that `ibc-1` has accepted the connection attempt: - -```shell -hermes tx raw conn-ack ibc-0 ibc-1 07-tendermint-0 07-tendermint-1 -d connection-0 -s connection-1 -``` - -```json -Success: OpenAckConnection( - OpenAck( - Attributes { - height: Height { - revision: 0, - height: 206, - }, - connection_id: Some( - ConnectionId( - "connection-0", - ), - ), - client_id: ClientId( - "07-tendermint-0", - ), - counterparty_connection_id: Some( - ConnectionId( - "connection-1", - ), - ), - counterparty_client_id: ClientId( - "07-tendermint-1", - ), - }, - ), -) -``` - -> Note that the field `counterparty_connection_id` now points to the connection on `ibc-1`. - - -## Connection Confirm - -Use the `conn-confirm` command to confirm that the connection has been acknowledged, -and finish the handshake, after which the connection is open on both chains. - -```shell -USAGE: - hermes tx raw conn-confirm - -DESCRIPTION: - Confirm opening of a connection (ConnectionOpenConfirm) - -POSITIONAL ARGUMENTS: - dst_chain_id identifier of the destination chain - src_chain_id identifier of the source chain - dst_client_id identifier of the destination client - src_client_id identifier of the source client - -FLAGS: - -d, --dst-conn-id ID identifier of the destination connection (required) - -s, --src-conn-id ID identifier of the source connection (required) -``` - -__Example__ - -Confirm on `ibc-1` that `ibc-0` has accepted the connection attempt. - -```shell -hermes tx raw conn-confirm ibc-1 ibc-0 07-tendermint-1 07-tendermint-0 -d connection-1 -s connection-0 -``` - -```json -Success: OpenConfirmConnection( - OpenConfirm( - Attributes { - height: Height { - revision: 1, - height: 239, - }, - connection_id: Some( - ConnectionId( - "connection-1", - ), - ), - client_id: ClientId( - "07-tendermint-1", - ), - counterparty_connection_id: Some( - ConnectionId( - "connection-0", - ), - ), - counterparty_client_id: ClientId( - "07-tendermint-0", - ), - }, - ), -) -``` - -We have now successfully established a connection between the two chains. - diff --git a/guide/src/commands/raw/index.md b/guide/src/commands/raw/index.md deleted file mode 100644 index c4c4093015..0000000000 --- a/guide/src/commands/raw/index.md +++ /dev/null @@ -1,85 +0,0 @@ -# Raw Transactions - -There are a number of simple commands that perform minimal validation, build and send IBC transactions. - -The `tx raw` command provides the following sub-commands: - -| CLI name | Description | -| ---------------------- | --------------------------------------------------------------------------------------------------------------- | -| `create-client` | [Create a client for source chain on destination chain](./client.md#create-client) | -| `update-client` | [Update the specified client on destination chain](./client.md#update-client) | -| `conn-init` | [Initialize a connection (ConnectionOpenInit)](./connection.md#connection-init) | -| `conn-try` | [Relay the connection attempt (ConnectionOpenTry)](./connection.md#connection-try) | -| `conn-ack` | [Relay acknowledgment of a connection attempt (ConnectionOpenAck)](./connection.md#connection-ack) | -| `conn-confirm` | [Confirm opening of a connection (ConnectionOpenConfirm)](./connection.md#connection-confirm) | -| `chan-open-init` | [Initialize a channel (ChannelOpenInit)](./channel-open.md#channel-open-init) | -| `chan-open-try` | [Relay the channel attempt (ChannelOpenTry)](./channel-open.md#channel-open-try) | -| `chan-open-ack` | [Relay acknowledgment of a channel attempt (ChannelOpenAck)](./channel-open.md#channel-open-ack) | -| `chan-open-close` | [Confirm opening of a channel (ChannelOpenConfirm)](./channel-open.md#channel-open-close) | -| `chan-close-init` | [Initiate the closing of a channel (ChannelCloseInit)](./channel-close.md#channel-close-init) | -| `chan-close-confirm` | [Confirm the closing of a channel (ChannelCloseConfirm)](./channel-close.md#channel-close-confirm) | -| `ft-transfer` | [Send a fungible token transfer test transaction (ICS20 MsgTransfer](./packet.md#fungible-token-transfer) | -| `packet-recv` | [Relay receive or timeout packets](./packet.md#relay-receive-and-timeout-packets) | -| `packet-ack` | [Relay acknowledgment packets](./packet.md#relay-acknowledgment-packets) | - -The main purpose of these commands is to support development and testing, and continuous integration. These CLIs take quite a few parameters and they are explained in the individual sub-sections. - -At a high level, most commands follow this template: - -```shell -hermes tx raw [-d -s ]* -``` - -In the command template above: - -- `ibc-datagram` - identifies the "main" IBC message that is being sent, e.g. `conn-init`, `conn-try`, `chan-open-init`, etc. To ensure successful processing on the receiving chain, the majority of these commands build and send two messages: one `UpdateClient` message followed by the actual IBC message. These two messages are included in a single transaction. This is done for all IBC datagrams that include proofs collected from the source chain. - - The messages that do not require proofs are: - - `MsgCreateClient` (`create-client` command), - - `MsgConnectionOpenInit` (`conn-open-init` command), - - `MsgChannelOpenInit` (`chan-open-init` command), - - `MsgChannelCloseInit` (`chan-close-init` command) and - - `MsgTransfer` (`ft-transfer` command) - -- `dst-chain-id` - is the identifier of the chain where the transaction will be sent. - -- `src-chain-id` - is the identifier of the chain that is queried for the data that is included in the transaction, e.g. connection data, client proofs, etc. To ensure correct on-chain state, the relayer also queries the destination chain, however it does not include this information in the Tx to the destination chain. - -- `dst-obj-id` - the identifier of an object on destination chain required by the datagram, e.g. the `client-id` associated with the connection on destination chain in connection datagrams. Or the `connection-id` in a `ConnOpenAck` datagram. - -- `src-obj-id` - the identifier of an object on the source chain, required by the datagram, e.d. the `client-id` of the connection on source chain. - -- More details about the `tx raw` commands can be found in the following sections: - - [Client](./client.md) - - [Connection](./connection.md) - - [Channel Open](./channel-open.md) - - [Channel Close](./channel-close.md) - - [Packet](./packet.md) - -## Usage - -```shell -USAGE: - hermes tx raw - -DESCRIPTION: - Raw commands for sending transactions to a configured chain. - -SUBCOMMANDS: - help Get usage information - create-client Create a client for source chain on destination chain - update-client Update the specified client on destination chain - conn-init Initialize a connection (ConnectionOpenInit) - conn-try Relay the connection attempt (ConnectionOpenTry) - conn-ack Relay acknowledgment of a connection attempt (ConnectionOpenAck) - conn-confirm Confirm opening of a connection (ConnectionOpenConfirm) - chan-open-init Initialize a channel (ChannelOpenInit) - chan-open-try Relay the channel attempt (ChannelOpenTry) - chan-open-ack Relay acknowledgment of a channel attempt (ChannelOpenAck) - chan-open-confirm Confirm opening of a channel (ChannelOpenConfirm) - chan-close-init Initiate the closing of a channel (ChannelCloseInit) - chan-close-confirm Confirm the closing of a channel (ChannelCloseConfirm) - ft-transfer Send a fungible token transfer test transaction (ICS20 MsgTransfer) - packet-recv Relay receive or timeout packets - packet-ack Relay acknowledgment packets -``` diff --git a/guide/src/commands/raw/packet.md b/guide/src/commands/raw/packet.md deleted file mode 100644 index e87bbacf29..0000000000 --- a/guide/src/commands/raw/packet.md +++ /dev/null @@ -1,272 +0,0 @@ -# Packet Tx Commands - -## Table of Contents - - - -## Fungible token transfer - -Use the `tx raw ft-transfer` command to send ICS-20 fungible token transfer packets. -__NOTE:__ This command is mainly used for testing the packet features of the relayer. - -```shell -USAGE: - hermes tx raw ft-transfer - -DESCRIPTION: - Send a fungible token transfer test transaction (ICS20 MsgTransfer) - -POSITIONAL ARGUMENTS: - dst_chain_id identifier of the destination chain - src_chain_id identifier of the source chain - src_port_id identifier of the source port - src_channel_id identifier of the source channel - amount amount of coins (samoleans, by default) to send (e.g. `100000`) - -FLAGS: - -o, --timeout-height-offset TIMEOUT-HEIGHT-OFFSET timeout in number of blocks since current - -t, --timeout-seconds TIMEOUT-SECONDS timeout in seconds since current - -r, --receiver RECEIVER receiving account address on the destination chain - -d, --denom DENOM denomination of the coins to send (default: samoleans) - -n, --number-msgs NUMBER-MSGS number of messages to send - -k, --key KEY use the given signing key (default: `key_name` config) -``` - -__Example__ - -Send two transfer packets from the `transfer` module and `channel-0` of `ibc-0` to `ibc-1`. Each transfer if for `9999` samoleans (default denomination) and a timeout offset of `10` blocks. The transfer fee is paid by the relayer account on `ibc-1`. - -```shell -hermes tx raw ft-transfer ibc-1 ibc-0 transfer channel-0 9999 -o 1000 -n 2 -``` - -```json -Success: [ - SendPacket( - SendPacket { - height: Height { - revision: 0, - height: 431, - }, - packet: PortId("transfer") ChannelId("channel-0") Sequence(4), - }, - ), - SendPacket( - SendPacket { - height: Height { - revision: 0, - height: 431, - }, - packet: PortId("transfer") ChannelId("channel-0") Sequence(5), - }, - ), -] -``` - -The transfer packets are stored on `ibc-0` and can be relayed. - -> To send transfer packets with a custom receiver address use the `--receiver | -r` flag. - -```shell -hermes tx raw ft-transfer ibc-1 ibc-0 transfer channel-0 9999 -o 1000 -n 1 -r board:1938586739 -``` - -```json -Success: [ - SendPacket( - SendPacket { - height: Height { - revision: 0, - height: 546, - }, - packet: PortId("transfer") ChannelId("channel-0") Sequence(7), - }, - ), -] -``` - -## Relay receive and timeout packets - -Use the `tx raw packet-recv` command to relay the packets sent but not yet received. If the sent packets have timed out then a timeout packet is sent to the source chain. - -```shell -USAGE: - hermes tx raw packet-recv - -DESCRIPTION: - Relay receive or timeout packets - -POSITIONAL ARGUMENTS: - dst_chain_id identifier of the destination chain - src_chain_id identifier of the source chain - src_port_id identifier of the source port - src_channel_id identifier of the source channel -``` - -__Example__ - -Send the two transfer packets to the `ibc-1` module bound to the `transfer` port and the `channel-0`'s counterparty. - -__NOTE__: The relayer prepends a client update message before the receive messages. - -```shell -hermes tx raw packet-recv ibc-1 ibc-0 transfer channel-0 -``` - -```json -Success: [ - UpdateClient( - UpdateClient { - common: Attributes { - height: Height { - revision: 1, - height: 439, - }, - client_id: ClientId( - "07-tendermint-1", - ), - client_type: Tendermint, - consensus_height: Height { - revision: 0, - height: 449, - }, - }, - header: Some( - Tendermint(...), - ), - }, - ), - WriteAcknowledgement( - WriteAcknowledgement { - height: Height { - revision: 1, - height: 439, - }, - packet: PortId("transfer") ChannelId("channel-0") Sequence(4), - ack: [ - 123, - 34, - 114, - 101, - 115, - 117, - 108, - 116, - 34, - 58, - 34, - 65, - 81, - 61, - 61, - 34, - 125, - ], - }, - ), - WriteAcknowledgement( - WriteAcknowledgement { - height: Height { - revision: 1, - height: 439, - }, - packet: PortId("transfer") ChannelId("channel-0") Sequence(5), - ack: [ - 123, - 34, - 114, - 101, - 115, - 117, - 108, - 116, - 34, - 58, - 34, - 65, - 81, - 61, - 61, - 34, - 125, - ], - }, - ), -] -``` - -Both packets have been relayed to `ibc-1` and acknowledged. - -## Relay acknowledgment packets - -Use the `tx raw packet-ack` command to relay acknowledgments to the original source of the packets. - -```shell -USAGE: - hermes tx raw packet-ack - -DESCRIPTION: - Relay acknowledgment packets - -POSITIONAL ARGUMENTS: - dst_chain_id identifier of the destination chain - src_chain_id identifier of the source chain - src_port_id identifier of the source port - src_channel_id identifier of the source channel -``` - -__Example__ - -Send the acknowledgments to the `ibc-0` module bound to the `transfer` port and the `channel-1`'s counterparty. - -__NOTE__: The relayer prepends a client update message before the acknowledgments. - -```shell -hermes tx raw packet-ack ibc-0 ibc-1 transfer channel-1 -``` - -```json -Success: [ - UpdateClient( - UpdateClient { - common: Attributes { - height: Height { - revision: 0, - height: 495, - }, - client_id: ClientId( - "07-tendermint-0", - ), - client_type: Tendermint, - consensus_height: Height { - revision: 1, - height: 483, - }, - }, - header: Some( - Tendermint(...), - ), - }, - ), - AcknowledgePacket( - AcknowledgePacket { - height: Height { - revision: 0, - height: 495, - }, - packet: PortId("transfer") ChannelId("channel-0") Sequence(4), - }, - ), - AcknowledgePacket( - AcknowledgePacket { - height: Height { - revision: 0, - height: 495, - }, - packet: PortId("transfer") ChannelId("channel-0") Sequence(5), - }, - ), -] -``` - -Both acknowledgments have been received on `ibc-0`. diff --git a/guide/src/commands/relaying/clear.md b/guide/src/commands/relaying/clear.md deleted file mode 100644 index a9f2c581cf..0000000000 --- a/guide/src/commands/relaying/clear.md +++ /dev/null @@ -1,442 +0,0 @@ -# Clearing Packets - -## `clear packets` - -This command clears outstanding packets on a given channel in both directions, -by issuing the appropriate [packet-recvs](../raw/packet.md#relay-receive-and-timeout-packets) -and [packet-acks](../raw/packet.md#relay-acknowledgment-packets). - -### Usage - -``` -Clear outstanding packets (i.e. packet-recv and packet-ack) on a given channel in both directions. - -The channel is identified by the chain, port, and channel IDs at one of its ends - -USAGE: - hermes clear packets - -ARGS: - identifier of the chain - identifier of the port - identifier of the channel - -OPTIONS: - -h, --help Print help information -``` - -### Example - -1. Without Hermes running, send 3 packets over a channel, here `channel-13`: - -``` -❯ hermes tx raw ft-transfer ibc1 ibc0 transfer channel-13 9999 -o 1000 -n 3 -2022-02-24T14:16:28.295526Z INFO ThreadId(01) using default configuration from '/Users/coromac/.hermes/config.toml' -2022-02-24T14:16:28.330860Z INFO ThreadId(15) send_tx{id=ibc0}: refresh: retrieved account sequence=61 number=1 -2022-02-24T14:16:28.350022Z INFO ThreadId(15) wait_for_block_commits: waiting for commit of tx hashes(s) AE4C3186778488E45670EB7303FA77E69B39F4E7C7494B05EC51E55136A373D6 id=ibc0 -Success: [ - SendPacket( - SendPacket { - height: Height { - revision: 0, - height: 86208, - }, - packet: Packet { - sequence: Sequence( - 14, - ), - source_port: PortId( - "transfer", - ), - source_channel: ChannelId( - "channel-13", - ), - destination_port: PortId( - "transfer", - ), - destination_channel: ChannelId( - "channel-12", - ), - data: [ ... ], - timeout_height: Height { - revision: 0, - height: 87203, - }, - timeout_timestamp: Timestamp { - time: None, - }, - }, - }, - ), - SendPacket( - SendPacket { - height: Height { - revision: 0, - height: 86208, - }, - packet: Packet { - sequence: Sequence( - 15, - ), - source_port: PortId( - "transfer", - ), - source_channel: ChannelId( - "channel-13", - ), - destination_port: PortId( - "transfer", - ), - destination_channel: ChannelId( - "channel-12", - ), - data: [ ... ], - timeout_height: Height { - revision: 0, - height: 87203, - }, - timeout_timestamp: Timestamp { - time: None, - }, - }, - }, - ), - SendPacket( - SendPacket { - height: Height { - revision: 0, - height: 86208, - }, - packet: Packet { - sequence: Sequence( - 16, - ), - source_port: PortId( - "transfer", - ), - source_channel: ChannelId( - "channel-13", - ), - destination_port: PortId( - "transfer", - ), - destination_channel: ChannelId( - "channel-12", - ), - data: [ ... ], - timeout_height: Height { - revision: 0, - height: 87203, - }, - timeout_timestamp: Timestamp { - time: None, - }, - }, - }, - ), -] -``` - -2. Because the relayer is not running these packets won't be relayed, -as can be seen with the `query packet unreceived-packets` command: - -``` -❯ hermes query packet unreceived-packets ibc1 transfer channel-13 -2022-02-24T14:21:28.874190Z INFO ThreadId(01) using default configuration from '/Users/coromac/.hermes/config.toml' -Success: [ - 14, - 15, - 16, -] -``` - -3. We can clear them manually using the `clear packets` command: - -``` -❯ hermes clear packets ibc0 transfer channel-13 -2022-02-24T14:17:25.748422Z INFO ThreadId(01) using default configuration from '/Users/coromac/.hermes/config.toml' -2022-02-24T14:17:25.799704Z INFO ThreadId(01) PacketRecvCmd{src_chain=ibc0 src_port=transfer src_channel=channel-13 dst_chain=ibc1}: found unprocessed SendPacket events for [Sequence(14), Sequence(15), Sequence(16)] (first 10 shown here; total=3) -2022-02-24T14:17:25.827177Z INFO ThreadId(01) PacketRecvCmd{src_chain=ibc0 src_port=transfer src_channel=channel-13 dst_chain=ibc1}: ready to fetch a scheduled op. data with batch of size 3 targeting Destination -2022-02-24T14:17:26.504798Z INFO ThreadId(01) PacketRecvCmd{src_chain=ibc0 src_port=transfer src_channel=channel-13 dst_chain=ibc1}:relay{odata=E96CV_cA5P ->Destination @0-86218; len=3}: assembled batch of 4 message(s) -2022-02-24T14:17:26.508873Z INFO ThreadId(29) send_tx{id=ibc1}: refresh: retrieved account sequence=54 number=1 -2022-02-24T14:17:26.561715Z INFO ThreadId(29) wait_for_block_commits: waiting for commit of tx hashes(s) 07AA83524257105CC476063932A560893BE8F4E94C679BFD00F970FC248647E0 id=ibc1 -2022-02-24T14:17:31.948950Z INFO ThreadId(01) PacketRecvCmd{src_chain=ibc0 src_port=transfer src_channel=channel-13 dst_chain=ibc1}:relay{odata=E96CV_cA5P ->Destination @0-86218; len=3}: [Sync->ibc1] result events: - UpdateClientEv(h: 0-86215, cs_h: 07-tendermint-3(0-86219)) - WriteAcknowledgementEv(WriteAcknowledgement - h:0-86215, seq:14, path:channel-13/transfer->channel-12/transfer, toh:0-87203, tos:Timestamp(NoTimestamp))) - WriteAcknowledgementEv(WriteAcknowledgement - h:0-86215, seq:15, path:channel-13/transfer->channel-12/transfer, toh:0-87203, tos:Timestamp(NoTimestamp))) - WriteAcknowledgementEv(WriteAcknowledgement - h:0-86215, seq:16, path:channel-13/transfer->channel-12/transfer, toh:0-87203, tos:Timestamp(NoTimestamp))) - - -2022-02-24T14:17:31.949192Z INFO ThreadId(01) PacketRecvCmd{src_chain=ibc0 src_port=transfer src_channel=channel-13 dst_chain=ibc1}:relay{odata=E96CV_cA5P ->Destination @0-86218; len=3}: success -2022-02-24T14:17:31.989215Z INFO ThreadId(01) PacketAckCmd{src_chain=ibc1 src_port=transfer src_channel=channel-12 dst_chain=ibc0}: found unprocessed WriteAcknowledgement events for [Sequence(14), Sequence(15), Sequence(16)] (first 10 shown here; total=3) -2022-02-24T14:17:32.013500Z INFO ThreadId(01) PacketAckCmd{src_chain=ibc1 src_port=transfer src_channel=channel-12 dst_chain=ibc0}: ready to fetch a scheduled op. data with batch of size 3 targeting Destination -2022-02-24T14:17:33.211930Z INFO ThreadId(01) PacketAckCmd{src_chain=ibc1 src_port=transfer src_channel=channel-12 dst_chain=ibc0}:relay{odata=L4fnSXkxL_ ->Destination @0-86215; len=3}: assembled batch of 4 message(s) -2022-02-24T14:17:33.215803Z INFO ThreadId(15) send_tx{id=ibc0}: refresh: retrieved account sequence=62 number=1 -2022-02-24T14:17:33.245229Z INFO ThreadId(15) wait_for_block_commits: waiting for commit of tx hashes(s) 62C69B1C46AF45182D5D99C6CB5EB301F8A402726772BA4EE067B18C68F2A4D6 id=ibc0 -2022-02-24T14:17:37.465489Z INFO ThreadId(01) PacketAckCmd{src_chain=ibc1 src_port=transfer src_channel=channel-12 dst_chain=ibc0}:relay{odata=L4fnSXkxL_ ->Destination @0-86215; len=3}: [Sync->ibc0] result events: - UpdateClientEv(h: 0-86221, cs_h: 07-tendermint-3(0-86216)) - AcknowledgePacketEv(h:0-86221, seq:14, path:channel-13/transfer->channel-12/transfer, toh:0-87203, tos:Timestamp(NoTimestamp))) - AcknowledgePacketEv(h:0-86221, seq:15, path:channel-13/transfer->channel-12/transfer, toh:0-87203, tos:Timestamp(NoTimestamp))) - AcknowledgePacketEv(h:0-86221, seq:16, path:channel-13/transfer->channel-12/transfer, toh:0-87203, tos:Timestamp(NoTimestamp))) - - -2022-02-24T14:17:37.465802Z INFO ThreadId(01) PacketAckCmd{src_chain=ibc1 src_port=transfer src_channel=channel-12 dst_chain=ibc0}:relay{odata=L4fnSXkxL_ ->Destination @0-86215; len=3}: success -Success: [ - UpdateClient( - UpdateClient { - common: Attributes { - height: Height { - revision: 0, - height: 86215, - }, - client_id: ClientId( - "07-tendermint-3", - ), - client_type: Tendermint, - consensus_height: Height { - revision: 0, - height: 86219, - }, - }, - header: Some( - Tendermint( - Header {...}, - ), - ), - }, - ), - WriteAcknowledgement( - WriteAcknowledgement { - height: Height { - revision: 0, - height: 86215, - }, - packet: Packet { - sequence: Sequence( - 14, - ), - source_port: PortId( - "transfer", - ), - source_channel: ChannelId( - "channel-13", - ), - destination_port: PortId( - "transfer", - ), - destination_channel: ChannelId( - "channel-12", - ), - data: [ ... ], - timeout_height: Height { - revision: 0, - height: 87203, - }, - timeout_timestamp: Timestamp { - time: None, - }, - }, - ack: [ ... ], - }, - ), - WriteAcknowledgement( - WriteAcknowledgement { - height: Height { - revision: 0, - height: 86215, - }, - packet: Packet { - sequence: Sequence( - 15, - ), - source_port: PortId( - "transfer", - ), - source_channel: ChannelId( - "channel-13", - ), - destination_port: PortId( - "transfer", - ), - destination_channel: ChannelId( - "channel-12", - ), - data: [ ... ], - timeout_height: Height { - revision: 0, - height: 87203, - }, - timeout_timestamp: Timestamp { - time: None, - }, - }, - ack: [ ... ], - }, - ), - WriteAcknowledgement( - WriteAcknowledgement { - height: Height { - revision: 0, - height: 86215, - }, - packet: Packet { - sequence: Sequence( - 16, - ), - source_port: PortId( - "transfer", - ), - source_channel: ChannelId( - "channel-13", - ), - destination_port: PortId( - "transfer", - ), - destination_channel: ChannelId( - "channel-12", - ), - data: [ ... ], - timeout_height: Height { - revision: 0, - height: 87203, - }, - timeout_timestamp: Timestamp { - time: None, - }, - }, - ack: [ ... ], - }, - ), - UpdateClient( - UpdateClient { - common: Attributes { - height: Height { - revision: 0, - height: 86221, - }, - client_id: ClientId( - "07-tendermint-3", - ), - client_type: Tendermint, - consensus_height: Height { - revision: 0, - height: 86216, - }, - }, - header: Some( - Tendermint( - Header {...}, - ), - ), - }, - ), - AcknowledgePacket( - AcknowledgePacket { - height: Height { - revision: 0, - height: 86221, - }, - packet: Packet { - sequence: Sequence( - 14, - ), - source_port: PortId( - "transfer", - ), - source_channel: ChannelId( - "channel-13", - ), - destination_port: PortId( - "transfer", - ), - destination_channel: ChannelId( - "channel-12", - ), - data: [], - timeout_height: Height { - revision: 0, - height: 87203, - }, - timeout_timestamp: Timestamp { - time: None, - }, - }, - }, - ), - AcknowledgePacket( - AcknowledgePacket { - height: Height { - revision: 0, - height: 86221, - }, - packet: Packet { - sequence: Sequence( - 15, - ), - source_port: PortId( - "transfer", - ), - source_channel: ChannelId( - "channel-13", - ), - destination_port: PortId( - "transfer", - ), - destination_channel: ChannelId( - "channel-12", - ), - data: [], - timeout_height: Height { - revision: 0, - height: 87203, - }, - timeout_timestamp: Timestamp { - time: None, - }, - }, - }, - ), - AcknowledgePacket( - AcknowledgePacket { - height: Height { - revision: 0, - height: 86221, - }, - packet: Packet { - sequence: Sequence( - 16, - ), - source_port: PortId( - "transfer", - ), - source_channel: ChannelId( - "channel-13", - ), - destination_port: PortId( - "transfer", - ), - destination_channel: ChannelId( - "channel-12", - ), - data: [], - timeout_height: Height { - revision: 0, - height: 87203, - }, - timeout_timestamp: Timestamp { - time: None, - }, - }, - }, - ), -] -``` - -4. The packets have now been successfully relayed: - -``` -❯ hermes query packet unreceived-packets ibc1 transfer channel-13 -2022-02-24T14:21:28.874190Z INFO ThreadId(01) using default configuration from '/Users/coromac/.hermes/config.toml' -Success: [] -``` - diff --git a/guide/src/commands/relaying/handshakes.md b/guide/src/commands/relaying/handshakes.md deleted file mode 100644 index a504cffafd..0000000000 --- a/guide/src/commands/relaying/handshakes.md +++ /dev/null @@ -1,59 +0,0 @@ -# Relaying of Handshake Messages - -This section describes the configuration and commands that can be used to start the relayer and relay both handshake and packets -for connections and channels. - -## The `start` Command - -To relay packets and handshake messages configure the `mode` section of the configuration file like so: -```toml -[global] -log_level = 'info' - -[mode] - -[mode.clients] -enabled = true -# ... - -[mode.connections] -enabled = true - -[mode.channels] -enabled = true - -[mode.packets] -enabled = true -# ... -``` - -Then start hermes using the start command: - -```shell -hermes start -``` - -The relayer sends handshake and packet transactions triggered by IBC events. - -## Completing Channel Handshakes - -After the relayer is started using the `start` command, it scans the chain state and will resume the handshake for any -channels or connections that are not in open state. It then listens to IBC events emitted by any of -the configured chains. - -Assuming the events are coming from a `source` chain, the relayer determines the `destination` chain and builds the handshake messages based on these events. These are then sent to the `destination` chain. - -In addition to the events described in [Packet Relaying](packets.md#packet-relaying), the following IBC events may be handled: - -- Channels (if `mode.channels.enabled=true`): - - `chan_open_init`: the relayer builds a `MsgChannelOpenTry` message - - `chan_open_try`: the relayer builds a `MsgChannelOpenAck` message - - `chan_open_ack`: the relayer builds a `MsgChannelOpenConfirm` message - - `chan_open_confirm`: no message is sent out, channel opening is finished - -- Connections (if `mode.connections.enabled=true`): - - `conn_open_init`: the relayer builds a `MsgConnOpenTry` message - - `conn_open_try`: the relayer builds a `MsgConnOpenAck` message - - `conn_open_ack`: the relayer builds a `MsgConnOpenConfirm` message - - `conn_open_confirm`: no message is sent out, connection opening is finished - diff --git a/guide/src/commands/relaying/index.md b/guide/src/commands/relaying/index.md deleted file mode 100644 index c9d4d06b06..0000000000 --- a/guide/src/commands/relaying/index.md +++ /dev/null @@ -1,20 +0,0 @@ -# Relaying -This section describes the types of relaying that hermes can perform. - -Hermes can send transactions triggered by IBC events. It currently handles channel handshake and packet events: - - [packets messages only](./packets.md#packet-relaying) - - [channel and packet messages](./handshakes.md) - -## The `start` Command - -The `start` command can be used to start hermes in IBC event listen mode. - -```shell -USAGE: - hermes start - -DESCRIPTION: - Start the relayer in multi-chain mode. Relays packets and channel handshake messages between all chains in the config. -``` - -As described in next sub-sections, the type of relaying can be configured in the `global` section of the configuration file, by specifying different values in `strategy` field. diff --git a/guide/src/commands/relaying/packets.md b/guide/src/commands/relaying/packets.md deleted file mode 100644 index 760e51297d..0000000000 --- a/guide/src/commands/relaying/packets.md +++ /dev/null @@ -1,64 +0,0 @@ -# Packet Relaying - -This section describes the configuration and commands that can be used to start the relayer and relay packets over one or multiple paths. - -## Table of Contents - - - -## The `start` Command - -To relay packets only configure the `mode` section of the configuration file like so: -```toml -[global] -log_level = 'info' - -[mode] - -[mode.clients] -enabled = true -# ... - -[mode.connections] -enabled = false - -[mode.channels] -enabled = false - -[mode.packets] -enabled = true -# ... -``` - -Then start hermes using the start command: - -```shell -hermes start -``` - -The relayer sends packet transactions triggered by IBC packet events for all open channels between the configured chains. -This is also referred to packet streaming. - -## Packet Streaming - -After the relayer is started using the `start` command, it listens to IBC packet events emitted by any of -the configured chains. Assuming the events are coming from a `source` chain, the relayer builds packets -based on these events, packets that are then sent either to the `source` chain or the counterparty (`destination`) chain. - -Current events and actions are: - -- `send_packet`: the relayer builds a packet message with the `packet` obtained from the event and any required proofs obtained from the counterparty of the chain where the message is sent. The concrete packet is: - - `MsgRecvPacket`, sent to `destination` chain if the channel is in open state on the `destination` chain, and a timeout has not occurred, - - `MsgTimeout`, sent to the `source` chain if the channel is in open state on the `destination` chain, but a timeout has occurred. - - `MsgTimeoutOnClose`, sent to the `source` chain if the channel is in closed state on the `destination` chain. -- `write_acknowledgement`: the relayer builds a `MsgAcknowledgement` packet that is sent to the `destination` chain. - -In addition to these events, the relayer will also handle channel closing events: -- `chan_close_init`: the relayer builds a `MsgChannelCloseConfirm` and sends it to the `destination` chain - -## Packet Delay - -If the relay path is using a non-zero delay connection, then `hermes` will delay all packet transactions. The delay is relative to the submission time for the client update at the height required by the packet proof. -The delay is used to prevent light client attacks and ensures that misbehavior detection finalizes before the transaction is submitted. -For more information on the misbehavior detector see [the misbehaviour section](../misbehaviour/index.md#monitoring-misbehaviour-and-evidence-submission). - diff --git a/guide/src/commands/upgrade/index.md b/guide/src/commands/upgrade/index.md deleted file mode 100644 index 135d9c137e..0000000000 --- a/guide/src/commands/upgrade/index.md +++ /dev/null @@ -1,21 +0,0 @@ -# Client Upgrade - -## Client Upgrade Command - -Use the `upgrade client` command to upgrade a client after a chain upgrade. - -```shell -USAGE: - hermes upgrade client - -DESCRIPTION: - Upgrade an IBC client - -POSITIONAL ARGUMENTS: - dst_chain_id identifier of the destination chain - dst_client_id identifier of the client to be upgraded on destination chain -``` - -__Example__ - -Here is [an example](./test.md) of a chain upgrade proposal submission and client upgrade. diff --git a/guide/src/commands/upgrade/test.md b/guide/src/commands/upgrade/test.md deleted file mode 100644 index 56a2f3ccb3..0000000000 --- a/guide/src/commands/upgrade/test.md +++ /dev/null @@ -1,232 +0,0 @@ -# Testing Client Upgrade - -## Prerequisites - -- gaiad `(v4.2.*)`, for example: - -```shell -gaiad version --log_level error --long | head -n4 -``` - -```shell -name: gaia -server_name: gaiad -version: v4.2.0 -commit: 535be14a8bdbfeb0d950914b5baa2dc72c6b081c -``` - -## Testing procedure - -1. Start two gaia instances and initialize hermes: - - ```shell - ./scripts/dev-env ~/.hermes/config.toml ibc-0 ibc-1 - ``` - The `one-chain` script is invoked for each chain and modifies the `genesis.json` file to use a short window for governance proposals (`200s` for `max_deposit_period` and `voting_period`). Therefore, an upgrade proposal can be submitted, voted on and accepted within a short time. - -2. Create one client on `ibc-1` for `ibc-0`: - - ```shell - hermes create client ibc-1 ibc-0 - ``` - - ```json - Success: CreateClient( - CreateClient( - Attributes { - height: Height { revision: 1, height: 9 }, - client_id: ClientId( - "07-tendermint-0", - ), - client_type: Tendermint, - consensus_height: Height { revision: 0, height: 18 }, - }, - ), - ) - ``` - -3. Create and submit an upgrade plan for chain `ibc-0`: - - Use the hermes test command to make an upgrade proposal. In the example below a software upgrade proposal is made for `ibc-0`, for the height `300` blocks from latest height. `10000000stake` is deposited. - The proposal includes the upgraded client state constructed from the state of `07-tendermint-0` client on `ibc-1` that was created in the previous step. In addition, the `unbonding_period` of the client is set to some new value (`400h`) - - ```shell - hermes tx raw upgrade-chain ibc-0 ibc-1 07-tendermint-0 10000000 300 - ``` - - ```text - Success: transaction::Hash(CE98D8D98091BA8016BD852D18056E54C4CB3C4525E7F40DD3C40B4FD0F2482B) - ``` - - Note that the height offset should be picked such that the proposal plan height is reached after the `200s` voting period. - - 4. Verify that the proposal was accepted: - - Query the upgrade plan to check that it was submitted correctly. Note the `height` at which the proposal will take effect (chain halts). Also `status: PROPOSAL_STATUS_VOTING_PERIOD`. - - ```shell - gaiad query gov proposal 1 --home data/ibc-0/ - ``` - - ```text - content: - '@type': /cosmos.upgrade.v1beta1.SoftwareUpgradeProposal - description: upgrade the chain software and unbonding period - plan: - height: "332" - info: upgrade the chain software and unbonding period - name: test - time: "0001-01-01T00:00:00Z" - upgraded_client_state: - '@type': /ibc.lightclients.tendermint.v1.ClientState - allow_update_after_expiry: true - allow_update_after_misbehaviour: true - chain_id: ibc-0 - frozen_height: - revision_height: "0" - revision_number: "0" - latest_height: - revision_height: "333" - revision_number: "0" - max_clock_drift: 0s - proof_specs: - - inner_spec: - child_order: - - 0 - - 1 - child_size: 33 - empty_child: null - hash: SHA256 - max_prefix_length: 12 - min_prefix_length: 4 - leaf_spec: - hash: SHA256 - length: VAR_PROTO - prefix: AA== - prehash_key: NO_HASH - prehash_value: SHA256 - max_depth: 0 - min_depth: 0 - - inner_spec: - child_order: - - 0 - - 1 - child_size: 32 - empty_child: null - hash: SHA256 - max_prefix_length: 1 - min_prefix_length: 1 - leaf_spec: - hash: SHA256 - length: VAR_PROTO - prefix: AA== - prehash_key: NO_HASH - prehash_value: SHA256 - max_depth: 0 - min_depth: 0 - trust_level: - denominator: "0" - numerator: "0" - trusting_period: 0s - unbonding_period: 1440000s - upgrade_path: - - upgrade - - upgradedIBCState - title: upgrade_ibc_clients - deposit_end_time: "2021-04-12T16:33:37.187389Z" - final_tally_result: - abstain: "0" - "no": "0" - no_with_veto: "0" - "yes": "0" - proposal_id: "1" - status: PROPOSAL_STATUS_VOTING_PERIOD - submit_time: "2021-04-12T16:30:17.187389Z" - total_deposit: - - amount: "10000000" - denom: stake - voting_end_time: "2021-04-12T16:33:37.187389Z" - voting_start_time: "2021-04-12T16:30:17.187389Z" - ``` - - 5. Vote on the proposal - - The parameter `1` should match the `proposal_id:` from the upgrade proposal submitted at step 3. - This command must be issued while the proposal status is `PROPOSAL_STATUS_VOTING_PERIOD`. Confirm transaction when prompted. - - ```shell - gaiad tx gov vote 1 yes --home data/ibc-0/data/ --keyring-backend test --keyring-dir data/ibc-0/ --chain-id ibc-0 --from validator - ``` - - ```text - confirm transaction before signing and broadcasting [y/N]: y - - {"height":"85","txhash":"AC24D80B1BFE0832769DECFDD3B3DF999A363D5E4390B0B673344FFDED9150B2","codespace":"","code":0,"data":"0A060A04766F7465","raw_log":"[{\"events\":[{\"type\":\"message\",\"attributes\":[{\"key\":\"action\",\"value\":\"vote\"},{\"key\":\"module\",\"value\":\"governance\"},{\"key\":\"sender\",\"value\":\"cosmos1srfzw0jkyyn7wf0ps4zy0tuvdaclfj2ufgp6w3\"}]},{\"type\":\"proposal_vote\",\"attributes\":[{\"key\":\"option\",\"value\":\"VOTE_OPTION_YES\"},{\"key\":\"proposal_id\",\"value\":\"1\"}]}]}]","logs":[{"msg_index":0,"log":"","events":[{"type":"message","attributes":[{"key":"action","value":"vote"},{"key":"module","value":"governance"},{"key":"sender","value":"cosmos1srfzw0jkyyn7wf0ps4zy0tuvdaclfj2ufgp6w3"}]},{"type":"proposal_vote","attributes":[{"key":"option","value":"VOTE_OPTION_YES"},{"key":"proposal_id","value":"1"}]}]}],"info":"","gas_wanted":"200000","gas_used":"43716","tx":null,"timestamp":""} - ``` - - 6. Wait approximately 200 seconds until the proposal changes status to `PROPOSAL_STATUS_PASSED`. - Note the `final tally_result` that includes the vote submitted in the previous step. - - ```shell - gaiad query gov proposal 1 --home data/ibc-0/ - ``` - - ```text - content: - '@type': /cosmos.upgrade.v1beta1.SoftwareUpgradeProposal - description: upgrade the chain software and unbonding period - ... - final_tally_result: - abstain: "0" - "no": "0" - no_with_veto: "0" - "yes": "100000000000" - proposal_id: "1" - status: PROPOSAL_STATUS_PASSED - submit_time: "2021-04-12T16:30:17.187389Z" - total_deposit: - - amount: "10000000" - denom: stake - voting_end_time: "2021-04-12T16:33:37.187389Z" - voting_start_time: "2021-04-12T16:30:17.187389Z" - ``` - -6. Test the `upgrade client` CLI - - The following command performs the upgrade for client `07-tendermint-0`. It outputs two events, one for the updated client state, - and another for the upgraded state. - - ```shell - hermes upgrade client ibc-1 07-tendermint-0 - ``` - ```json - Success: [ - UpdateClient( - UpdateClient { - common: Attributes { - height: Height { revision: 1, height: 438 }, - client_id: ClientId( - "07-tendermint-0", - ), - client_type: Tendermint, - consensus_height: Height { revision: 0, height: 440 }, - }, - header: Some( - Tendermint(..) - ), - }, - ), - UpgradeClient( - UpgradeClient( - Attributes { - height: Height { revision: 1, height: 438 }, - client_id: ClientId( - "07-tendermint-0", - ), - client_type: Tendermint, - consensus_height: Height { revision: 0, height: 441 }, - }, - ), - ), - ] - ``` diff --git a/guide/src/config.md b/guide/src/config.md deleted file mode 100644 index cdde0904bd..0000000000 --- a/guide/src/config.md +++ /dev/null @@ -1,183 +0,0 @@ -# Configuration - -In order to run Hermes, you will need to have a configuration file. - -The format supported for the configuration file is [TOML](https://toml.io/en/). - -By default, Hermes expects the configuration file to be located at `$HOME/.hermes/config.toml`. - -This can be overridden by supplying the `-c` flag when invoking `hermes`, before the -name of the command to run, eg. `hermes -c my_config.toml query connection channels ibc-1 connection-1`. - -> The current version of Hermes does not support managing the configuration file programmatically. -> You will need to use a text editor to create the file and add content to it. - -```bash -hermes [-c CONFIG_FILE] COMMAND -``` - -## Table of contents - - - -## Configuration - -The configuration file must have one `global` section, and one `chains` section for each chain. - -> **Note:** As of 0.6.0, the Hermes configuration file is self-documented. -> Please read the configuration file [`config.toml`](https://github.com/informalsystems/ibc-rs/blob/v0.15.0/config.toml) -> itself for the most up-to-date documentation of parameters. - -By default, Hermes will relay on all channels available between all the configured chains. -In this way, every configured chain will act as a source (in the sense that Hermes listens for events) -and as a destination (to relay packets that others chains have sent). - -For example, if there are only two chains configured, then Hermes will only relay packets between those two, -i.e. the two chains will serve as a source for each other, and likewise as a destination for each other's relevant events. -Hermes will ignore all events that pertain to chains which are unknown (ie. not present in config.toml). - -To restrict relaying on specific channels, or uni-directionally, you can use [packet filtering policies](https://github.com/informalsystems/ibc-rs/blob/v0.15.0/config.toml#L207-L224). - -## Adding private keys - -For each chain configured you need to add a private key for that chain in order to submit [transactions](./commands/raw/index.md), -please refer to the [Keys](./commands/keys/index.md) sections in order to learn how to add the private keys that are used by the relayer. - -## Connecting via TLS - -Hermes supports connection via TLS for use-cases such as connecting from behind -a proxy or a load balancer. In order to enable this, you'll want to set the -`rpc_addr`, `grpc_addr`, or `websocket_addr` parameters to specify a TLS -connection via HTTPS using the following scheme (note that the port number 443 -is just used for example): -``` -rpc_addr = 'https://domain.com:443' -grpc_addr = 'https://domain.com:443' -websocket_addr = 'wss://domain.com:443/websocket' -``` - -## Support for Interchain Accounts - -As of version 0.13.0, Hermes supports relaying on [Interchain Accounts][ica] channels. - -If the `packet_filter` option in the chain configuration is disabled, then -Hermes will relay on all existing and future channels, including ICA channels. - -There are two kinds of ICA channels: - -1. The host channels, whose port is `icahost` -2. The controller channels, whose port starts with `icacontroller-` followed - by the owner account address. [See the spec for more details][ica]. - -If you wish to only relay on a few specific standard channels (here `channel-0` and `channel-1`), -but also relay on all ICA channels, you can specify the following packet filter: - -> Note the use of wildcards in the port and channel identifiers (`['ica*', '*']`) -> to match over all the possible ICA ports. - -```toml -[chains.packet_filter] -policy = 'allow' -list = [ - ['ica*', '*'], # allow relaying on all channels whose port starts with `ica` - ['transfer', 'channel-0'], - ['transfer', 'channel-1'], - # Add any other port/channel pairs you wish to relay on -] -``` - -If you wish to relay on all channels but not on ICA channels, you can use -the following packet filter configuration: - -```toml -[chains.packet_filter] -policy = 'deny' -list = [ - ['ica*', '*'], # deny relaying on all channels whose port starts with `ica` -] -``` - -## Update the configuration without restarting Hermes - -> ⚠️ This feature has been removed in Hermes v0.12.0. - -Before Hermes 0.6.1, the only way to get Hermes to pick up a change in the -configuration was to stop and restart Hermes. - -As of version 0.6.1, Hermes will react to receiving a `SIGHUP` signal -by reloading the `[chains]` section of the configuration, and -stopping, starting or restarting the affected workers. - -> **Warning:** the configuration reload feature only supports -> adding, removing, or updating configuration of chains. It does -> not support dynamically changing global features, such as the -> filtering mechanism or logging level. - -For example, say you start with the configuration given in the previous section -in `~/.hermes/config.toml`, ie. with two chains `ibc-0` and `ibc-1`. - -1. Start three chains `ibc-0`, `ibc-1` and `ibc-2`: - - ```shell - ./scripts/dev-env ibc-0 ibc-1 ibc-2 - ``` - -2. Start Hermes - - ```shell - hermes start - ``` - -3. Add the configuration for the chain `ibc-2` to the configuration file: - - ```toml - [[chains]] - id = 'ibc-2' - rpc_addr = 'http://127.0.0.1:26457' - grpc_addr = 'http://127.0.0.1:9092' - websocket_addr = 'ws://127.0.0.1:26457/websocket' - rpc_timeout = '10s' - account_prefix = 'cosmos' - key_name = 'testkey' - store_prefix = 'ibc' - max_gas = 20000000 - gas_price = { price = 0.001, denom = 'stake' } - clock_drift = '5s' - trusting_period = '14days' - ``` - -4. Change the configuration of the chain `ibc-0`, eg. the `max_gas` property. - -5. Send a `SIGHUP` signal to the `hermes` process: - - > ⚠️ **Warning:** the command below will send a `SIGHUP` signal to the first - > process in the list emitted by `ps aux` which contains the string `hermes`. - > Alternatively, you can look up the process ID (PID) of the `hermes` process - > you want to target and use `kill -SIGHUP PID`. - - ```shell - ps aux | rg hermes | awk '{ print $2 }' | head -n1 | xargs -I{} kill -SIGHUP {} - ``` - -6. Watch the output of Hermes, it will show that Hermes has picked up the changes in - the config. Hermes is now relaying between the three chains and using the new - maximum amount of gas specified for `ibc-0`. - - ``` - ... - - INFO reloading configuration (triggered by SIGHUP) - INFO configuration successfully reloaded - INFO updating existing chain chain.id=ibc-1 - INFO adding new chain chain.id=ibc-2 - ``` - -To make sure Hermes ends up in the expected state, check out the documentation -on [inspecting the relayer state](help.md#inspecting-the-relayer-state). - -## Next steps - -Now that you learned how to build the relayer and how to create a configuration file, you can go to the [`Two Chains`](./tutorials/local-chains/index.md) tutorial to learn how to perform some local testing connecting the relayer to two local chains. - -[log-level]: ./help.md#parametrizing-the-log-output-level -[ica]: https://github.com/cosmos/ibc/blob/master/spec/app/ics-027-interchain-accounts/README.md diff --git a/guide/src/example-config.md b/guide/src/example-config.md deleted file mode 100644 index a6a1095f3a..0000000000 --- a/guide/src/example-config.md +++ /dev/null @@ -1,7 +0,0 @@ -# Example Configuration File - -Here is a full example of a configuration file with two chains configured: - -```toml -{{#include ../../config.toml}} -``` diff --git a/guide/src/features.md b/guide/src/features.md deleted file mode 100644 index 5dcb606a2f..0000000000 --- a/guide/src/features.md +++ /dev/null @@ -1,62 +0,0 @@ -# Features - -This section includes a summary of the supported and planned features. -A feature matrix and comparison between the Rust and Go relayer implementations can be found in the [Feature Matrix](./features/matrix.md) - -> **Cosmos SDK compatibility:** -> Hermes supports Cosmos SDK chains implementing the [IBC v1.1][ibcv1] protocol specification. -> Cosmos SDK versions `0.41.3` to `0.44.x` are officially supported. -> In case Hermes finds an incompatible SDK version, it will output a log warning. - -[ibcv1]: https://github.com/cosmos/ibc-go - -## Supported Features - -- Basic features - - create and update clients - - refresh clients to prevent expiration - - establish connections with new or existing clients - - establish channels with new or existing connection - - channel closing handshake - - relay packets, acknowledgments, timeout and timeout-on-close packets, with zero or non-zero delay. - - queries for all objects -- Packet relaying over: - - multiple paths, for the chains in `config.toml` -- Restart support - - clear packets - - resume channel handshake if configured to relay `all` - - resume connection handshake if configured to relay `all` -- Client upgrade - - upgrading clients after a counterparty chain has performed an upgrade for IBC breaking changes -- Packet delay: - - establish path over non-zero delay connection - - relay all packets with the specified delay -- Monitor and submit misbehaviour for clients - - monitor client updates for misbehaviour (fork and BFT time violation) - - submit misbehaviour evidence to the on-chain IBC client. - > misbehaviour submission to full node not yet supported -- Individual commands that build and send transactions for: - - creating and updating IBC Tendermint light clients - - sending connection open handshake datagrams - - sending channel open handshake datagrams - - sending channel closing handshake datagrams - - initiating a cross chain transfer (mainly for testing) - - relaying sent packets, acknowledgments and timeouts - - client upgrade -- Channel handshake for existing channel that is not in `Open` state -- Connection handshake for existing connection that is not in `Open` state -- Telemetry support - -## Upcoming / Unsupported Features - -Planned features: -- Full Passive mode: relay from all IBC events - - Connection handshake for existing connection that is not in `Open` state -- Relayer support for management application (add RPC server) -- Dynamic configuration management - -Not planned: -- Relayer management application -- Create clients with user chosen parameters (such as UpgradePath) -- Use IBC light clients other than Tendermint such as Solo Machine -- Support non cosmos-SDK chains diff --git a/guide/src/features/matrix.md b/guide/src/features/matrix.md deleted file mode 100644 index 77573acaf8..0000000000 --- a/guide/src/features/matrix.md +++ /dev/null @@ -1,65 +0,0 @@ -# Feature Matrix -This section gives more details about the features and implementation status -of Hermes in comparison with the [cosmos-go-relayer]. - -__Legend__: - -| Term | Description | -| ----- | ----------- | -| ❌ | feature not supported | -| ✅ | feature is supported | -| `Chain` | chain related | -| `Cl` | client related | -| `Conn` | connection related | -| `Chan` | channel related | -| `.._Handshake_..` | can execute all transactions required to finish a handshake from a single command | -| `..__A` | building and sending `msg` from a command that scans chain state | -| `..__P` | building and sending `msg` from IBC event; doesn't apply to `.._Init` and `FT_Transfer` features | - -__Feature comparison between Hermes and the Go relayer__ - -| Features \ Status | Hermes | Cosmos Go | Feature Details | -| ---------------------- | :---: | :----: |:-------| -| Restart | ✅ | ✅ | replays any IBC events that happened before restart -| Multiple_Paths | ✅ | ✅ | relays on multiple paths concurrently -| | | | -| Connection Delay | ✅ | ❌ | -| Cl_Misbehavior | ✅ | ❌ | monitors and submits IBC client misbehavior -| Cl_Refresh | ✅ | ❌ | periodically refresh an on-chain client to prevent expiration -| Packet Delay | ✅ | ❌ | -| | | | -| Chan_Unordered | ✅ | ✅ | -| Chan_Ordered | ✅ | ❓ | -| | | | -| Cl_Tendermint_Create | ✅ | ✅ | tendermint light client creation -| Cl_Tendermint_Update | ✅ | ✅ | tendermint light client update -| Cl_Tendermint_Upgrade | ✅ | ✅ | tendermint light client upgrade -| | | | -| Conn_Open_Handshake_A | ✅ | ✅ | -| Conn_Open_Handshake_P | ✅ | ❌ | -| | | | -| Chan_Open_Handshake_A | ✅ | ✅ | -| Chan_Open_Handshake_P | ✅ | ❌ | -| Chan_Open_Handshake_Optimistic | ❌ | ❌ | open a channel on a non-Open connection -| | | | -| Chan_Close_Handshake_P | ✅ | ✅ | -| Chan_Close_Handshake_A | ✅ | ❌ | -| | | | -| FT_Transfer | ✅ | ✅ | can submit an ICS-20 fungible token transfer message -| ICA_Relay | ✅ | ❌ | can relay ICS-27 Interchain account packets -| Packet_Recv_A | ✅ | ✅ | -| Packet_Recv_P | ✅ | ✅ | -| Packet_Timeout_A | ✅ | ✅ | -| Packet_Timeout_P | ✅ | ✅ | -| Packet_TimeoutClose_A | ✅ | ❓ | -| Packet_TimeoutClose_P | ✅ | ❓ | -| Packet_Optimistic | ❌ | ❓ | relay packets over non-Open channels -| | | | -| Cl_Non_Tendermint | ❌ | ❌ | supports non tendermint IBC light clients -| Chain_Non_Cosmos | ❌ | ❌ | supports non cosmos-SDK chains -| | | | -| Mgmt_Static | ✅ | ✅ | provides means for configuration prior to being started -| Mgmt_Dynamic | ❌ | ❌ | provides means for configuration and monitoring during runtime - - -[cosmos-go-relayer]: https://github.com/cosmos/relayer diff --git a/guide/src/getting_started.md b/guide/src/getting_started.md deleted file mode 100644 index 0c1f747fbd..0000000000 --- a/guide/src/getting_started.md +++ /dev/null @@ -1,13 +0,0 @@ -# Getting Started - -In order to run Hermes, please make sure you have all the -[pre-requisites](./pre_requisites.md) installed on your machine. - -Once you have these pre-requisites, you can -[build and run Hermes](./installation.md). - -> The instructions in this guide have been tested on `Linux` and `MacOS` -> environments. Most of the commands should work on both environments. Even -> though you can build and run the relayer on `Windows` (since we develop it -> in Rust and it supports cross platform compilation) we have not tested the -> relayer on `Windows` and we do not support this operating system at this time. \ No newline at end of file diff --git a/guide/src/glossary.md b/guide/src/glossary.md deleted file mode 100644 index 89f406494b..0000000000 --- a/guide/src/glossary.md +++ /dev/null @@ -1,14 +0,0 @@ -# Glossary - -These are some of the definitions used in this guide: - -| Term | Definition | -|------|------------| -|IBC transaction| A transaction that includes IBC datagrams (including packets). This is constructed by the relayer and sent over the physical network to a chain according to the chain rules. For example, for tendermint chains a broadcast_tx_commit request is sent to a tendermint RPC server.| -|IBC datagram| An element of the transaction payload sent by the relayer; it includes client, connection, channel and IBC packet data. Multiple IBC datagrams may be included in an IBC transaction.| -|IBC packet| A particular type of IBC datagram that includes the application packet and its commitment proof.| -|IBC Client| Client code running on chain, typically only the light client verification related functionality.| -|Relayer Light Client| Full light client functionality, including connecting to at least one provider (full node), storing and verifying headers, etc.| -|Source chain| The chain from which the relayer reads data to fill an IBC datagram.| -|Destination chain| The chain where the relayer submits transactions that include the IBC datagram.| - diff --git a/guide/src/help.md b/guide/src/help.md deleted file mode 100644 index e2e7bfcf45..0000000000 --- a/guide/src/help.md +++ /dev/null @@ -1,586 +0,0 @@ -# Help - -This section provides guidelines regarding troubleshooting and general -resources for getting help with `hermes`. -For this purpose, we recommend a few ideas that could be of help: - -- [hermes help][help] command, providing a CLI - documentation for all `hermes` commands. -- [profile][profiling] your relayer binary to identify slow methods; -- [configure][log-level] the `log_level` to help with debugging; -- [patch][patching] your local gaia chain(s) to enable some corner-case methods - (e.g., channel close); - -And if the above options do not address your specific problem: -- you can [request a new feature][feature]; -- or consult the [list of reported issues][issues] and search by relevant - keywords to see if you're dealing with a known problem; -- we would be grateful if you can submit a [bug report][bug-report] - discussing any problem you find, and from there on we can look at the - problem together; - -Lastly, for general questions, you can reach us at `hello@informal.systems`, -or on Twitter [@informalinc][twitter]. - -## Table of contents - - - -## Help command - -The CLI comprises a special `help` command, which accepts as parameter other commands, and provides guidance on what is the correct way to invoke those commands. - -For instance, - -```shell -hermes help create -``` - -will provide details about all the valid invocations of the `create` CLI command. - -``` -USAGE: - hermes create - -DESCRIPTION: - Create objects (client, connection, or channel) on chains - -SUBCOMMANDS: - help Get usage information - client Create a new IBC client - connection Create a new connection between two chains - channel Create a new channel between two chains -``` - -This can provide further specific guidance if we add additional parameters, e.g., - -```shell -hermes help create channel -``` - -``` -USAGE: - hermes create channel - -DESCRIPTION: - Create a new channel between two chains - -POSITIONAL ARGUMENTS: - chain_a_id identifier of the side `a` chain for the new channel - chain_b_id identifier of the side `b` chain for the new channel (optional) - -FLAGS: - -c, --connection-a CONNECTION-A - --port-a PORT-A identifier of the side `a` port for the new channel - --port-b PORT-B identifier of the side `b` port for the new channel - -o, --order ORDER the channel ordering, valid options 'unordered' (default) and 'ordered' - -v, --channel-version VERSION the version for the new channel -``` - -Additionally, the `-h`/`--help` flags typical for CLI applications work on -all commands. - -## Parametrizing the log output level - -The relayer configuration file permits parametrization of output verbosity via the knob called `log_level`. -This file is loaded by default from `$HOME/.hermes/config.toml`, but can be overridden in all commands -with the `-c` flag, eg. `hermes -c ./path/to/my/config.toml some command`. - -Relevant snippet: - -```toml -[global] -log_level = 'error' -``` - -Valid options for `log_level` are: 'error', 'warn', 'info', 'debug', 'trace'. -These levels correspond to the tracing sub-component of the relayer-cli, -[see here](https://docs.rs/tracing-core/0.1.17/tracing_core/struct.Level.html). - -The relayer will _always_ print a last line summarizing the result of its -operation for queries or transactions. In addition to this last line, -arbitrary debug, info, or other outputs may be produced. - -## Overriding the tracing filter using `RUST_LOG` - -For debugging purposes, we may want to inspect which RPC queries the relayer is making. -The relayer makes use of the `tendermint-rpc` library to issue RPC queries, but -the output of this library is by default turned off in order to keep the logs more -readable. - -Using the `RUST_LOG` environment variable, we can turn logging on for the -`tendermint-rpc` library, as follows: - -``` -RUST_LOG=tendermint-rpc=debug,info hermes start -``` - -Setting the `RUST_LOG` environment variable to `tendermint_rpc=debug,info` instructs -the relayer to set the log level of the `tendermint_rpc` crate to `debug` and otherwise -use the `info` log level. - -> **Note:** While the `tendermint-rpc` contains a dash in its name, the logging filter -> expects a module name, which can only contain alphanumeric characters and underscores, -> hence why the filter above is written `tendermint_rpc=debug`. - -**Example:** - -``` -❯ RUST_LOG=tendermint_rpc=debug,info hermes start -2022-02-24T14:32:14.039555Z INFO ThreadId(01) using default configuration from '/Users/coromac/.hermes/config.toml' -2022-02-24T14:32:14.043500Z INFO ThreadId(01) telemetry service running, exposing metrics at http://127.0.0.1:3001/metrics -2022-02-24T14:32:14.043542Z INFO ThreadId(01) [rest] address not configured, REST server disabled -2022-02-24T14:32:14.049759Z DEBUG ThreadId(01) Incoming response: { - "jsonrpc": "2.0", - "id": "143b4580-c49e-47c1-81b2-4e7090f6e762", - "result": { - "node_info": { - "protocol_version": { - "p2p": "8", - "block": "11", - "app": "0" - }, - "id": "73f9134539f9845cd253dc302e36d48ee4c0f32d", - "listen_addr": "tcp://0.0.0.0:27003", - "network": "ibc0", - "version": "v0.34.14", - "channels": "40202122233038606100", - "moniker": "ibc0", - "other": { - "tx_index": "on", - "rpc_address": "tcp://0.0.0.0:27000" - } - }, - "sync_info": { - "latest_block_hash": "8396B93E355AD80EED8167A04BB9858A315A8BEB482547DE16A6CD82BC11551B", - "latest_app_hash": "22419E041D6997EE75FF66F7F537A3D36122B220EAB89A9C246FEF680FB1C97A", - "latest_block_height": "86392", - "latest_block_time": "2022-02-24T14:32:08.673989Z", - "earliest_block_hash": "0A73CFE8566D4D4FBFE3178D9BCBAD483FD689854CA8012FF1457F8EC4598132", - "earliest_app_hash": "E3B0C44298FC1C149AFBF4C8996FB92427AE41E4649B934CA495991B7852B855", - "earliest_block_height": "1", - "earliest_block_time": "2022-01-20T09:04:21.549736Z", - "catching_up": false - }, - "validator_info": { - "address": "6FD56E6AA1EEDAD227AFAB6B9DE631719D4A3691", - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "mR5V/QWOv/mJYyNmlsl3mfxKy1PNaOzdztyas4NF2BA=" - }, - "voting_power": "10" - } - } -} -2022-02-24T14:32:14.052503Z DEBUG ThreadId(21) Incoming response: { - "jsonrpc": "2.0", - "id": "0ca35e64-ea98-4fbf-bd66-c3291128ace9", - "result": {} -} - -... -``` - -The two DEBUG log lines above were emitted by the `tendermint-rpc` crate. - -## Inspecting the relayer state - -To get a little bit of insight into the state of the relayer, -Hermes will react to a `SIGUSR1` signal by dumping its state to -the console, either in plain text form or as a JSON object if Hermes -was started with the `--json` option. - -To send a `SIGUSR1` signal to Hermes, look up its process ID (below PID) -and use the following command: - -```shell -kill -SIGUSR1 PID -``` - -Hermes will print some information about the workers which are currently running. - -For example, with three chains configured and one channel between each pair of chains: - -```text -INFO Dumping state (triggered by SIGUSR1) -INFO -INFO * Chains: ibc-0, ibc-1, ibc-2 -INFO * Client workers: -INFO - client::ibc-0->ibc-1:07-tendermint-0 (id: 5) -INFO - client::ibc-0->ibc-2:07-tendermint-0 (id: 9) -INFO - client::ibc-1->ibc-0:07-tendermint-0 (id: 1) -INFO - client::ibc-1->ibc-2:07-tendermint-1 (id: 11) -INFO - client::ibc-2->ibc-0:07-tendermint-1 (id: 3) -INFO - client::ibc-2->ibc-1:07-tendermint-1 (id: 7) -INFO * Packet workers: -INFO - packet::channel-0/transfer:ibc-0->ibc-1 (id: 2) -INFO - packet::channel-0/transfer:ibc-1->ibc-0 (id: 6) -INFO - packet::channel-0/transfer:ibc-2->ibc-0 (id: 10) -INFO - packet::channel-1/transfer:ibc-0->ibc-2 (id: 4) -INFO - packet::channel-1/transfer:ibc-1->ibc-2 (id: 8) -INFO - packet::channel-1/transfer:ibc-2->ibc-1 (id: 12) -``` - -or in JSON form (prettified): - -```json -{ - "timestamp": "Jul 12 17:04:37.244", - "level": "INFO", - "fields": { - "message": "Dumping state (triggered by SIGUSR1)" - } -} -{ - "chains": [ - "ibc-0", - "ibc-1", - "ibc-2" - ], - "workers": { - "Client": [ - { - "id": 5, - "object": { - "type": "Client", - "dst_chain_id": "ibc-1", - "dst_client_id": "07-tendermint-0", - "src_chain_id": "ibc-0" - } - }, - { - "id": 9, - "object": { - "type": "Client", - "dst_chain_id": "ibc-2", - "dst_client_id": "07-tendermint-0", - "src_chain_id": "ibc-0" - } - }, - { - "id": 1, - "object": { - "type": "Client", - "dst_chain_id": "ibc-0", - "dst_client_id": "07-tendermint-0", - "src_chain_id": "ibc-1" - } - }, - { - "id": 11, - "object": { - "type": "Client", - "dst_chain_id": "ibc-2", - "dst_client_id": "07-tendermint-1", - "src_chain_id": "ibc-1" - } - }, - { - "id": 3, - "object": { - "type": "Client", - "dst_chain_id": "ibc-0", - "dst_client_id": "07-tendermint-1", - "src_chain_id": "ibc-2" - } - }, - { - "id": 7, - "object": { - "type": "Client", - "dst_chain_id": "ibc-1", - "dst_client_id": "07-tendermint-1", - "src_chain_id": "ibc-2" - } - } - ], - "Packet": [ - { - "id": 2, - "object": { - "type": "Packet", - "dst_chain_id": "ibc-1", - "src_chain_id": "ibc-0", - "src_channel_id": "channel-0", - "src_port_id": "transfer" - } - }, - { - "id": 6, - "object": { - "type": "Packet", - "dst_chain_id": "ibc-0", - "src_chain_id": "ibc-1", - "src_channel_id": "channel-0", - "src_port_id": "transfer" - } - }, - { - "id": 10, - "object": { - "type": "Packet", - "dst_chain_id": "ibc-0", - "src_chain_id": "ibc-2", - "src_channel_id": "channel-0", - "src_port_id": "transfer" - } - }, - { - "id": 4, - "object": { - "type": "Packet", - "dst_chain_id": "ibc-2", - "src_chain_id": "ibc-0", - "src_channel_id": "channel-1", - "src_port_id": "transfer" - } - }, - { - "id": 8, - "object": { - "type": "Packet", - "dst_chain_id": "ibc-2", - "src_chain_id": "ibc-1", - "src_channel_id": "channel-1", - "src_port_id": "transfer" - } - }, - { - "id": 12, - "object": { - "type": "Packet", - "dst_chain_id": "ibc-1", - "src_chain_id": "ibc-2", - "src_channel_id": "channel-1", - "src_port_id": "transfer" - } - } - ] - } -} -``` - -## Patching `gaia` to support `ChanCloseInit` - -The guide below refers specifically to patching your gaia chain so that the -relayer can initiate the closing of channels by submitting a [`ChanCloseInit`][chan-close] message. -Without this modification, the transaction will be rejected. -We also describe how to test the channel closing feature. - -- Clone the Cosmos SDK - - ```shell - git clone https://github.com/cosmos/cosmos-sdk.git ~/go/src/github.com/cosmos/cosmos-sdk - cd ~/go/src/github.com/cosmos/cosmos-sdk - ``` - -- Apply these diffs: - - ``` - --- a/x/ibc/applications/transfer/module.go - +++ b/x/ibc/applications/transfer/module.go - @@ -305,7 +305,7 @@ func (am AppModule) OnChanCloseInit( - channelID string, - ) error { - // Disallow user-initiated channel closing for transfer channels - - return sdkerrors.Wrap(sdkerrors.ErrInvalidRequest, "user cannot close channel") - + return nil - } - ``` - -- Append the line below (watch for the placeholder ``) as the last line - in your `go.mod` in the gaia clone: - -```replace github.com/cosmos/cosmos-sdk => /Users//go/src/github.com/cosmos/cosmos-sdk``` - -- Now `make build` and `make install` your local copy of gaia - -In order to test the correct operation during the channel close, perform the steps below. - -- the channel should be in state open-open: - -- transfer of 5555 samoleans from `ibc-1` to `ibc-0`. This results in a - Tx to `ibc-1` for a `MsgTransfer` packet. - Make sure you're not relaying this packet (the relayer should not be running on - this path). - - ```shell - hermes tx raw ft-transfer ibc-0 ibc-1 transfer channel-1 5555 -o 1000 -n 1 -d samoleans - ``` - -- now do the first step of channel closing: the channel will transition -to close-open: - - ```shell - hermes -c config.toml tx raw chan-close-init ibc-0 ibc-1 connection-0 transfer transfer channel-0 channel-1 - ``` - -- trigger timeout on close to ibc-1 - - ```shell - hermes -c config.toml tx raw packet-recv ibc-0 ibc-1 transfer channel-1 - ``` - -- close-close - - ```shell - hermes -c config.toml tx raw chan-close-confirm ibc-1 ibc-0 connection-1 transfer transfer channel-1 channel-0 - ``` - -- verify that the two ends are in Close state: - - ```shell - hermes -c config.toml query channel end ibc-0 transfer channel-0 - hermes -c config.toml query channel end ibc-1 transfer channel-1 - ``` - - -## New Feature Request - -If you would like a feature to be added to `hermes`, don't hesitate -to open a discussion about that via the [feature request][feature-request] -issue template. - -> Note that Hermes is packaged as part of the `ibc-relayer-cli` crate. - - -## Profiling - -The `relayer` crate provides a `time!` macro which can be used to measure how much time is spent between the invocation of the macro and the end of the enclosing scope. - -### Setup - -The `time!` macro has no effect unless the `profiling` feature of the `relayer` crate is enabled. - -To enable it, one must compile the `relayer-cli` crate with the `--features=profiling` flag. - -a) One way is to build the `relayer` binary and update the `hermes` alias to point to the executable: - -```shell -cd relayer-cli/ -cargo build --features=profiling -``` - -b) Alternatively, one can use the `cargo run` command and update the alias accordingly: - -```shell -alias hermes='cargo run --features=profiling --manifest-path=relayer-cli/Cargo.toml --' -``` - -The `--manifest-path=relayer-cli/Cargo.toml` flag is needed for `cargo run` to accept the `--features` flag. - -### Example - -```rust -fn my_function(x: u32) -> u32 { - time!("myfunction: x={}", x); // A - - std::thread::sleep(Duration::from_secs(1)); - - { - time!("inner operation"); // B - - std::thread::sleep(Duration::from_secs(2)); - - // timer B ends here - } - - x + 1 - - // timer A ends here -} -``` - -#### Output - -``` -Jan 20 11:28:46.841 INFO relayer::macros::profiling: ⏳ myfunction: x=42 - start -Jan 20 11:28:47.842 INFO relayer::macros::profiling: ⏳ inner operation - start -Jan 20 11:28:49.846 INFO relayer::macros::profiling: ⏳ inner operation - elapsed: 2004ms -Jan 20 11:28:49.847 INFO relayer::macros::profiling: ⏳ myfunction: x=42 - elapsed: 3005ms -``` - -Profiling is useful for tracking down unusually slow methods. -Each transaction or query usually consists of multiple lower-level methods, -and it's often not clear which of these are the culprit for low performance. -With profiling enabled, `hermes` will output timing information for individual -methods involved in a command. - -__NOTE__: To be able to see the profiling output, the realyer needs to be compiled with -the `profiling` feature and the [log level][log-level] should be `info` level or lower. - -#### Example output for `tx raw conn-init` command - -``` -hermes -c config.toml tx raw conn-init ibc-0 ibc-1 07-tendermint-0 07-tendermint-0 -``` - -``` -Apr 13 20:58:21.225 INFO ibc_relayer::macros::profiling: ⏳ init_light_client - start -Apr 13 20:58:21.230 INFO ibc_relayer::macros::profiling: ⏳ init_light_client - elapsed: 4ms -Apr 13 20:58:21.230 INFO ibc_relayer::macros::profiling: ⏳ init_event_monitor - start -Apr 13 20:58:21.235 INFO ibc_relayer::macros::profiling: ⏳ init_event_monitor - elapsed: 5ms -Apr 13 20:58:21.235 INFO ibc_relayer::event::monitor: running listener chain.id=ibc-1 -Apr 13 20:58:21.236 INFO ibc_relayer::macros::profiling: ⏳ init_light_client - start -Apr 13 20:58:21.239 INFO ibc_relayer::macros::profiling: ⏳ init_light_client - elapsed: 2ms -Apr 13 20:58:21.239 INFO ibc_relayer::macros::profiling: ⏳ init_event_monitor - start -Apr 13 20:58:21.244 INFO ibc_relayer::macros::profiling: ⏳ init_event_monitor - elapsed: 4ms -Apr 13 20:58:21.244 INFO ibc_relayer::event::monitor: running listener chain.id=ibc-0 -Apr 13 20:58:21.244 INFO ibc_relayer::macros::profiling: ⏳ get_signer - start -Apr 13 20:58:21.246 INFO ibc_relayer::macros::profiling: ⏳ get_signer - elapsed: 1ms -Apr 13 20:58:21.246 INFO ibc_relayer::macros::profiling: ⏳ query_latest_height - start -Apr 13 20:58:21.246 INFO ibc_relayer::macros::profiling: ⏳ block_on - start -Apr 13 20:58:21.248 INFO ibc_relayer::macros::profiling: ⏳ block_on - elapsed: 1ms -Apr 13 20:58:21.249 INFO ibc_relayer::macros::profiling: ⏳ query_latest_height - elapsed: 3ms -Apr 13 20:58:21.250 INFO ibc_relayer::macros::profiling: ⏳ unbonding_period - start -Apr 13 20:58:21.250 INFO ibc_relayer::macros::profiling: ⏳ block_on - start -Apr 13 20:58:21.251 INFO ibc_relayer::macros::profiling: ⏳ block_on - elapsed: 0ms -Apr 13 20:58:21.270 INFO ibc_relayer::macros::profiling: ⏳ block_on - start -Apr 13 20:58:21.273 INFO ibc_relayer::macros::profiling: ⏳ block_on - elapsed: 2ms -Apr 13 20:58:21.273 INFO ibc_relayer::macros::profiling: ⏳ unbonding_period - elapsed: 23ms -Apr 13 20:58:21.279 INFO ibc_relayer::macros::profiling: ⏳ build_consensus_state - start -Apr 13 20:58:21.280 INFO ibc_relayer::macros::profiling: ⏳ build_consensus_state - elapsed: 0ms -Apr 13 20:58:21.280 INFO ibc_relayer::macros::profiling: ⏳ send_msgs - start -Apr 13 20:58:21.280 INFO ibc_relayer::macros::profiling: ⏳ send_tx - start -Apr 13 20:58:21.282 INFO ibc_relayer::macros::profiling: ⏳ PK "03f17d2c094ee68cfcedb2c2f2b7dec6cd82ea158ac1c32d3de0ca8b288a3c8bfa" - start -Apr 13 20:58:21.282 INFO ibc_relayer::macros::profiling: ⏳ block_on - start -Apr 13 20:58:21.285 INFO ibc_relayer::macros::profiling: ⏳ block_on - elapsed: 3ms -Apr 13 20:58:21.296 INFO ibc_relayer::macros::profiling: ⏳ block_on - start -Apr 13 20:58:22.664 INFO ibc_relayer::macros::profiling: ⏳ block_on - elapsed: 1367ms -Apr 13 20:58:22.664 INFO ibc_relayer::macros::profiling: ⏳ PK "03f17d2c094ee68cfcedb2c2f2b7dec6cd82ea158ac1c32d3de0ca8b288a3c8bfa" - elapsed: 1382ms -Apr 13 20:58:22.664 INFO ibc_relayer::macros::profiling: ⏳ send_tx - elapsed: 1384ms -Apr 13 20:58:22.664 INFO ibc_relayer::macros::profiling: ⏳ send_msgs - elapsed: 1384ms -Success: CreateClient( - CreateClient( - Attributes { - height: Height { - revision: 0, - height: 10675, - }, - client_id: ClientId( - "07-tendermint-7", - ), - client_type: Tendermint, - consensus_height: Height { - revision: 1, - height: 10663, - }, - }, - ), -) -``` - - - -[help]: ./help.md#help-command -[feature-request]: https://github.com/informalsystems/ibc-rs/issues/new?assignees=&labels=&template=feature-request.md -[bug-report]: https://github.com/informalsystems/ibc-rs/issues/new?assignees=&labels=&template=bug-report.md -[twitter]: https://twitter.com/informalinc -[twitter-image]: https://abs.twimg.com/errors/logo23x19.png -[website]: https://informal.systems -[log-level]: ./help.md#parametrizing-the-log-output-level -[issues]: https://github.com/informalsystems/ibc-rs/issues -[profiling]: ./help.md#profiling -[feature]: ./help.md#new-feature-request -[patching]: ./help.md#patching-gaia -[chan-close]: ./commands/raw/channel-close.md#channel-close-init diff --git a/guide/src/images/grafana.png b/guide/src/images/grafana.png deleted file mode 100644 index 5d2882ec52f10624f31e20c77f8de612b3b406a6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 350364 zcmeFZbx<8mw>Js|f(3U7?j*SDMuP>|xO;GScXtRHT!Xv21b5rG1b27;cAn>b&pCDP z_k8!U!u!tGFPt$RD<>ENQ(Ag^h#9Rc?1%bi1vH)S1N*wB{F&79rGr+XhP!yE}x2#BFzF4ZDbOp%rM5IPL8L(&i=UxeFVBGG@~ zN04&F{emFP%#4L^%<0#>l|-JyDczvp1y8=Qe$F^~+V zs|$J)=$FD6$c42j-`6)Y>k8S+FsbwY!a8)ZIm>SM4bkV636PZqh^%)r3;akcKNx;r z`#LfGY(vYSfc#+VA4QckKvjAb!rrFNMK2p=hI%LEfPoIG556xKEh^`QpiwT1l$lw| zOM-&InL}YBy~VsF-y&%UDTug(9pHr-8L6!$#V3em3to#uXg#)CZDd&D+Os*}_mOUWFLD;Y`o1w-2okq;B4 ze`|yFBZp0T6fCmXQ!@Vsb56v$uWeK|*-9!MZL$yAh-=j=)%6 z_4?}3)NqWjg3iTtTl#Hf;_!2z*Gh8I0{StDGd{0hMqjkvm~Rl_fJPPKf;>Bsjz|of zh3TMX4OOP_Ao)WYpkutS*LM^j**pDrX?1T|J9 zQQzh5^(~a(i_BEVPaPKab%a2-Q^r-Yreu-)fPet;*Xh1?*x%xsO=HSF2kDHz<{>?a zv(zbp!_xIUS}!DBjedT+7DhKN#kLe3FN zz8URGZb^X|CcGKsQdB$( zbtr6(R3GOa?jG`}ASX8=%Lugb&lI)s(}`O1ZfAAByIGrxG_z)7pildmPhWVFTR&wz zr89ltNbSzWAE6_xD_dEZmA{_X2{PG_ncF)+yxBXNKjb{no%694WVU7djr*0EhP8-; z%hYx%dp_1+|Jxw@Ho-V+89Se)>#wTyQ2lMA1XCr;x>3`ZDfPU_g*P^xh%zIdp3y7)609+}6lQ8HdKGcqQrbE!n^%FN6^ zG2#b48&v2K>FJns5bLx1vkJ2r9EA3xC5yK)6PRIJQ?Zn?Et@!x9k+9j@C~<3dM3Kn zK4>GZ^o$w!_kD<<$1eQrE}k0sfru7Yo%^zDP?ug_nsema$YRnmJ)_#Krn+XI+E5vX zT1MSry|G4!CXQNVy@2tm;jb$A`nZL%dT#40f{}zieXMG8HNr9hP3dJd&4Lq-W!Ys& zUXxa~R{K^jXP1-WTgxZDr~2EITQ6h|`LkYWYyy1lNyqV6PTieG88`t%U zwKIw)tR|TbKO6$C2yt4ZQ$`$*V=Hr}sY8{S9VJuOwPp5^)YcCK_z zby9bcl2Gd@b|E#u^M{0eRY&aF2mlyimT zRLfeE0viuQ9d8lO=wl*wJ@cVXowlR5qllw!r0(c(N`soPrPguD0s-HrOkdTBBLW;I z>(i!aE)6ZImHsB!3+s!^`?-t#d%Laq$j6U>X*FtvVoGP8lQqI3h!q3|-@FMf&TmRt)P%ksao-ALM zSsd+<ipG?)ZsvCOpU#2WL0{SVO&?nhye_#O zWMOhs%1mjQ{X*^#rEqpsho1^^3hE1NO!6nVxMoa$I_?IkTh}IyyDMO8->zDEHt0D& zeO%^7+*`>S`O#`c#KP#J^W^#T5V7aN7^+_7LF%Nx(W5)eKcbr4sgY1!!NpmkX5_-k4&v1iypIa3*=Oq>Ltl=0Mml$L7qK6#J+ zOlzalU_@$D>R|Y&v`6Jc$EvQy^5z%&);wFeQF*!6llkhblS!#pX=6oUg=&Q=sCT~3 z(srTB{#QuUX4Jenx%r({%}Sl)Oxjq_SaM^ajoFHMeZBe7lMUti+6s1UdGUC0g*&gE z$HDCY@3c$$YxIlui)=)OGB4ot`>Dc-?Aoe4F;KU2zpMI8HF351weJlej$0%+lq+>So8F9DJ=%FjhP=>U~`sguxPd*$MCMK z;jg&w7fu0Acnm!9Z;KA5um*|-u!#xybe!8dW#;BQ$liK?-4>rT%E|J%)f)oj#y_5;d$XrV|cG+*^KF0TyJ4H zUp=CqETb&AUsKz zJoZhvy%*m(L3jKfdCl^`3_UE@Nqh!79pOf5lEyMJ5Om-=90W8ZJ_HQ71_|B;APN4X z{soc-;@w}zp&%ea%pjoub&V{z{IkV^_djj^F5i6*hJXeCLI>}z*--z!8X-IT-M{NF zj^J|;LW&}ilHgL&(B8<%+QHPu5inGF13mz6E1~WH0f9~VXNQzjpg0HjzhI`M=BOs~ zmB-M=ib4OIje!w^tCj7aejxZk>x` zelj%~Ac=^Ly%7mJ0~-SqnE(O_2??M5H)9?J(J%jM4*tbYX6oo@%fra%;^M;K!pdM{ zZ^FpT&CSio#KOqJLJz)z-oee_u#>z%3mG{&Up-)%d@j{J%E*OQ!mN%4BBc=J>BN z|I4X=HRWUc^X&g+EdCzozm9@OS^$BM@jqrw0O5gj9UnZAAI(JNl)xo;nf=+ITEKs3 z{w~3F$UWVXTtsyU2tf!*Q6VK)$YX6-Ur;Z0yP9ADomdM3O*jdSLcT;ILwkT@tTl{S z9~2z|0s@@G8DTySTI``Xq!5}|0jHRt8A9&;-nVAAcuSq-$=&}lcuQ%g~SC$6?B7fIfOp}k2XCm07WB6+XOV33!(|tsr0Xfq-9;jn^VyC>pe^vn0-7 zA6{*(RU6N-ph{Rf&_BWbHEbmR&qeH4)IYNEr~gOx|BU(nthN6?y<=VML0w)j2-9J@ zJ~@hovO0=4Ny9q4a0#yYY*y=!SlS)WclC;9#l!(!c$zWF{&0f0r&9p`T5{ni_pS(;x{Up=@ zNM8ogd;TKJZZHK!AF^A1HB%@gm5LL7A-)SC>mlC5kG`0w?0gokY=4G&d7g>+!e!B5 zdEKx1j&^-rUukYGfgb{)?_7u1C~cu?SY zp0#{JNvu@+p->=YRJ>mxoxFd=o;CIQ^7Q#)3V{OF)6c5mn43oN=V$_pveYjaZ)&;M zFFv}D%eFCuyi%~}@LQ(!1zLYJG}wUgF^I?ChnUP{xa{WeUTw=8no>!nab&>nHanPr zxZJrx_cXSK0ZsOF_edSeCF%K zJu|%(@mQ8v06?t>#I2#|+`ffGBxduTwTyT&Ug@R<5_Ws}yR??;di|*S0F}Oj-9d^5 zlt(_7POe-N47bNuEi2ttq>6sjYM-Sw?X5yF?I4Tm(P%u9C|`1(#7OZLSdVNv} z2&zySBBIw9>JrQzn3ZN{iUnfeW_)$-x1bcmZD#UZU~4to2XkFWgWTZLAR>f?1TBZrkl zZhf2q;vZxF56h7P8j_LvBZfj|zI(JLM#7XS3hH%@(F9QpG~S0N2a}1ZO#XFP>D{4= z{6HHL2o1ieFKE)!JXGD#k1LCI4g=Bfe34&E?)^V2@B8%}R!R)M;C)-S>0Hctbw+=F3$MlqtQ<3!WnS8%`3qXC?dr7q7)`_ln9mp6Xd$PR zt98ttQz!1>Y!OCzo7!N&!;1U~8@8nQdf!fmdJ26vhwV|ZyX{WZs42Jo3BX(aYHNq`)-}W9UY1YKFtyrdZ;@sd z%fY}WCHKh3jt#|f?N)cJ8=n&+{;+#2D4<>zlM!}73$1l`Pv_gK^ob^Wo(L=ooB%%5 z;>@~fV01=vSzq`(JY?HU5`*Tr(#_k>y6x_9!@=! z-Q@+(&Bq}$l6b47rnQKX;Dkc(RO>Xr4k82B0~NjrBMqBaBr2HH(bN967=R9!qgDLL z78(tV7I_Oo&>%_UFhVD#kN10hefmxvE_=&DA?7KRK&RYNw&kiRxCxHgqfIh`$1o_f$tUS#dWZToMoDBewKj@(f|q18uFQdf46-7LMQ2z7Qz z554Y|qF}j7aoPH&@J^lb^6jB3LV)L?sG!(N{)OT5ZVm=wo|vH3N*xTyVj+dacvO^g z1y{VM3gP+kJyzk&CjmDluB30b0Vhm%DYPa)Td0*?2a|50H^A8La@Lq{scZ zU$ZT4vgaxrg1-{g?}>ze8UTJ%{RYEz3v&Qgpon|eisB_oFwjillAa< z4HNM6A$hqNI1`$1AooKBiik6tRIIRBUpls2^e6O7k}w!f^ z<_BE?+4DC0W2d$swkQl^gB56m%KI;q(<27sP6f3AcZ<>zGB@pT#_YnuGS@ETWQ zh&7kpAw#}jRYhW)ZLESpbJB)w?Z+WHNr2|Pu|E#aZ?U&|kR0 zx-e}8-Cd@DS9mICnFz_pquaTz?rRBJ-6tPD1KhDx0Wmp)`x^m_GdfSgjp*?GY?K%_%2qoSB$$snFnP7h#j_t6?>| z_Dw~^r6dn<^|F%^+eiF*`Q7!OP=PH9BqPHM^;j^uS@uk z57S7v)0F~Y^$_geeU8SpV$ zuSw$Rj(Yft4pXcPa&6)cwv)W=iz-!K?67R;v|f}f%&$sehs0=#-R3A-Xl`HGBfedNTnHIH5n4_ z1C7_vDmORUJcT0omn$qD8`zT$?r z`^$H{weYxk#oAxRny9R|PzlN>L1Zz{feptE)XxY#yt#_FaIPiZ9rqHKSb5KfgIuiL z-1ki0^{?f2wo)IWel8y_x0SfDX1d%F|D!!^-X3w?BGWgVJ{=Z?K&)}aK4KBr0z2&d zb$WVhBjmeoUJLu@aKBqN^jqvdYYrD$$rGYXt$pp7yi|pu5b`D96P0^VI4ZDRn8R&{ z_g^?ey?FdYb1KHeo9Wl_MXGQ3(MN92Pz`pA!DL6rtb?1}CF_!oEoRZpf=FeL8`03S z>707Q)oaJsXL!nNd+*Ly`}9;=gUVzo=-KlrLOvyWd{-LlDTwjYwPya4wHvE@Jjy)Q zL(H#oFQrDcvqfj&TXnXomai#Bw5FKvknlMbMej`_Kk0D!WD4N(q;OQ|itkNi<(ada z<54`!noZ*lfYxx&aqvBqL5oF z{kG%hPfIp0dlsf=YgvRb*luVxPH$bhXgf0-&k%w8$t|78%b# zPkx;)s%;TgYfYp=6O{@^!x~C+oiE1Je~i2irD{)Pqvi{01k;ks@kjebDj8g9pO+ie z!Gp^Bz`vpG^_wXgaB5dadxYK-3z9ub!x*D+)j3->9CJj)Ci|$udQ7`qA-%42h8qlC zvsJ8?OGRi+Yg**bVJ%iG^1{#b?$J2So^gMjUB}3NE>rv2q=>G%xl6Ah)VSuHM#*TI z)8@M2`6xWZC9;7z`j~4Tq!v?{#i>GY5GKYiBToM4-JV799HpFy!u5$lr zb)B1NhWtfllRPTJS3^MVz4@&mwi7g~n}AU}=iJ9^LzD7WosDOSF|a+ZToR#&wRGkf(D;pq2J$bM-TMlz?1V}Y(803EZOU54ja8(_3H3AE`*!s9q=V#=e8_; zbP6WN<+Y7F(CtEvIkrsg0^2B@l1$l((l(w)#{6B0BDLI9;iu$21WSeJ(m$XQ`y$4# zueL~k>!(x@S0b8`kI$xKPRk<{LhMkXTm~jbG0Ut4DdArZg||%QT4(y2cC`%G8q9)h z1`i&G@szta^P2UI$g#w7ktAz{JCHM}e=IvlCJ_2x?}JaJM2;+zhZB9{dHYZJB4>Lv zeB0`!hHrx&XI%9KXm_JmQel-9%3&zWcmAPt*j~`+X}&xTuLi^4cbxM!2S9VM^!T7C z0k|csAm0){QXO$8T`^t=J(!eo9~Ke8^{ooy^J5?P#*5~SNv{IoOB3Nsci)CS@lbT;99It z%-!>Bh{rj;&A&!2+?>cBE*9jiUsb1U{6aINDSY%(Z+u^0iWk1< zdwO{~XbPj5m9~G%{OIkA83YaSyDy6+6rXIX4d93t?@B!f!z=m?78a$hVRuWkf=cGu z5`pbha+G3F<{9otG@1>^#xZ{3CS03TW10=QjtA3ed%v#0zS*=`fvJmTe`J#D=!9R} zdo-*DKJ&FQsd_FJ<^jBK9$4Yap$)on?oWhwG9(YhG*xx--0o0YA9p2bo%HeYISlU-83JkGF z|K&!sUQhu7BrZ3)7Y0JLIcIWvP>vKuVc^+6?i)pvRfQ+y3 zP}zBzi-#?DjVkdKicZ3l%@J;_){3%cAa@4=l3hcBdhAHAU@SeTBhkxmR!&}o z5w2(uXZidyuvbBat}n#7poY?R(4hUXO&DY`$FX&uqg$zxn5`L6r}&9pv%%UV)8%4Q zS1Hdv-8=^m&!1WgimpaAc;Ub)kuJz6?5AoWjDy*TR82^_d+&N;H(xeD+KT#V{Ju4)k9#_7KCRx5`<>$Xs_c0SP zw_0fW!5rhq);;|+LY;7L5#^go>tJkNG%L6G`f{f$hMFN+D3`^NE2vhwp3Q93bfY3G z{PtvkadPk8if6slMyU_W*^u}t=?`xfrB|ZFi54wllxTH3YG(ZUF*)TFiE~8|H3xEv zG3hH@QS~hTxY5R6ORB@ExZ{+pP| zH1x;u^Miv@YwDFVoiW)Cq&(bVu^N9Oj|@JK*CD15DdKV+gKo{>86BPF{B(&GENiLy zOd;-}PxD#R%pk_Hz@Cbfwd)W53W4*E_4bJVNPMAW#*kP4J(b0z&(ESDGZaw8e5_On zB)eRur{hlNCpO1(5{*{(M0HqKg`(t1e;8t>X)FblBdzAM>`!kx@}G!gCpSbwB4bL3 z{Vm1v;)SX}f?#%se*T>0T;mN=sX|z*_1#kmmQ+YRRsfEX^k*pZ4v$O4<=c!%j^2Tp zpsPKQ_>@4sS1)Is!GS>!0>97u?;A**y>T}ktIYvbM%)eBunl+EO}bY23H~ZxE$Ls7 z-X;fgjgA@)d0g%h(u=l=6txMZB?!0dv51{ zaXp#yuI{N-yH0ks6L9*_eZeQ1B)^=KtZZaR@Y?P1rL&}5g$|(AmS0VGzp0B;Y*EOO zbYklJYnYv2OaKbqv7By4#2xmx1to0b=Bn920FNE9#p~gW^&Ba?yq%Q~>Q3tuQAwpY zUzti?68uc5m;2P0<@T_xKk!Q;z$3p>ik#}7+=uD_UmT2R+4=|`$oj9yhq$=I0!5=0OT>+(gFoQS zrq43=e_X=qOP)l!a^-f|$t6hKAGR?kdd*kRaZ(5pd$9S6$2spIHg>T;T=7`#!|*18 z!;sF&V3)R}j9N1DR{SQLexJxBuu$pq$90mL)uLkH4cRbYKy5>80Q1Z_+@NQhm5<-z ze~w`>bI9^zdwIXMEq+lbY_o;fB~EgyOYaWae2m0rj%?R&#$9JAeQ>a~1=@HgaYyy(8>RQ6 zJ3NWH16-%x+R1`=*yB|Hc;e6)Krr)^ZK^+f4n6+?#VEEbobMhL8GoAzwO6lv*@-PI z;SYVdetf?a>hcI{w%7)Yb>DkIDRvDLzelfg+j4i`>DwE#Wt#Xd2PjMfMc^HWKbBPL z6qjuw;zZ-PXT#kPGYtyo3%6Oj!n?IR#b3awNhs#2Sh>IUsg7=w2YU$?C)$@r$GM%C zz%DCwE){ZGEk72##8MtRFJgPMBoCLA8(Fu~=k4&+ntUygCq`o55-cI)urD(zNOl8W z`NoEp$GzQexzNw;$$W)69Iht-A-YKAF)hyq)XcN;d%{Rl=+?m(kwpI?3TrZ2+7C(M zkvv;Fbzvmhk*dpibojy(Y0QS+Ul3|=a&BTX=vq~b_ri|y+H}a;v^_+N*-hKmAVlJE zryGAwvy9CI;0g>kQJyC$?P_&Q zD7DC2bi*VL53e^K2xZP%&o4VLzB;-hlz+BVT#h%#AC=aSjcopeqRp%fKn8?POA=`#i7w4ev<2Ibwz@{d)vE^4Z3llq?rE*y zG{8P2(`Wy1DpKg~Suj|5f(3u0QAdV?U7&KpQ{=LAzr~78-2S8jSSnU2`W3Th(^#0v z9(E%O4YEKJDX#tBhjCD);ar(F_LTr&_(pQ?bv1@8o;bv?W3RARbAf}`n3W4x1Utcl z#hlC4NJZ0bj^Y!@SCk{f{V6ZC7sVL37C2pO&9i}ip+lR~&(l%FiwQ*J2c~30!}2;G z)6i=$t)FiZ>uJ|($oQ8O#|zOr?SvO#LxopZz6T>?W0M8VYaZj z68Nh^7->Z&f)dYBLJM2|FgWGvFF#1D3I-aLasg%)-v#50>(GY{JcQv9A&GJO=fDnp z=|xV9B*Km($peFCOQ<)moqygw5$F}`lPdEsXOP;*s?rY?U?fGdn)?P_Q<)GNmmv=6 zjd)6{{X6)|*L0MJauh24IM@TnLAynmzR-{C^DL*Gw*memPrr4nnF#YWN}Z~3sDI9O zW#Qj|?{Y3{(KR!Bz14_BeG?DvNrQ4VL8E2XZXb7s?Cmy zo*j%;3{t)<%^W|4J$kt?$9gH@c9-Yyz~d^z0NQDz znpbuTSl6Q!UhHkcd^`-2{TYvwSL}-rG%nz^&J;0QuE;CmHWw*Dfl}qQu`@qfy45{X z8xb>VMFjVIiWvs4);x$Mx;STn+x}QoabuD>4m*r~uo|m*_4;Tr#%_TaGBGA(vjcE$ zWjFoQt~+8vJmn*{6L|m#kmt!E>Ic<2ndIH@6=ExiNQ+D zv}jY;Kk{m}wdgC?b)@p>Kw*&{7RnY$r2p^rP3%2f6Z?jm_Dh3pZ9`M0#e32y_N;QU zM);!cP*d&Uz3hse#PIaT7hDJVM_i9X&hZ>fDn8B~A`e9sU734B_>1#wd`h^oWf`n!GB&`A8Oiya`MblM zxUij$H)g~d?F}ef!T4+qD8(b*w$k%M8 zV<$yinCzh=Z0En~1EhcJg9u+9yT^})yvty0TG>9ktR?STF6lwr6BYm95w`erB{y3w zgIK6Y7Cw^*5htHVwQI{v5T`}aj%Cy%!TBAdjQGeG&-uoPGIiU&2f zh}4n{-;(5m`wPQ5Ieq9K&mOZJ*e!tq(_Xg+2=1-xrH{110DPlgK|o#H2KKDN{4vC0 zP%N?cx7Be-r?n>U452b8DoP?+8TFbK>-2#)y)@e*g(Vqk#xL={;m%|6oLTbewTCdB z)-vlOmF> zR=5jFJnyahU~+r%wx4Usk}lIiGh4+;ls9x8Lgs5VZgBjYQADBLYl2Piebd z;cJrZ9|&E)g9lk#uksGghw;JYZlJWfV5l&xss8q^&9dXRZL^LV_)I+O`8hTM%bx_M z#*4*;14(f?5tapPr1x1BO~xCGhoLkw&lGR+>0G)5TybT-qe-@IqP`d%2;ud%Z00Xbd@f}C_WLUR zlvLn7&IC?dt#4cSUcsu0&4PhIUXFo!tHy7v>f0+r_762?Cr>j36fdN^XhIJUHxu z9B!5Av>VdN(?^X-(vs65fx3rS zW_Y0QQnozCaQb55eei>LEFp7NvVfO6A_eTM)*1pOQ5F$JMJSZN+N|bn5ZkPi^r43}4q#pGbnMYJSPKDgYGSc*t>z_Kwm7gn?=@ zgEkx1*9Tl@tMtlUyhU_j!JtjrdnETX5`dSM3=QPXaWiYbTEMeof$BBo@j2}B&jg(x zE6Dcz{lu$+9Aa;N=0!UFgyyPjsQUW2BZ7K!F!K3j}|EPiRa|CI&27(>SYSpB1oI*{%^F1O z<7@72GbwHmvm__AL5DZrM;`@T>c0~~9$mLUu3ablp!%ja@o*b8=vh8<-YwX0K38So z)$AO6sUj`)rpI@;s(njyjn)0zfc%lG$GEonEB&|O9qHLI$Mk@T=F&MYeRRM@jeM~s z{QTu^q_Iy;0*le@(7Z@@`BVJKL8L)ltzWx<`SCZys^46^%%fqx;Vc1@wV(!t-7%N7 zJcYxps1v!q)Jo+7K3JLgnVRBmhuz5H;M|3r0N7EB9#;KB%X;%eSyp6dLDWd*--qvm zlxEY$h?yn8x+(%3ubtUrR5!c5S!6AoD%v2nm&X9rgEL{;7V<0xs)p;_+hZt#V^FAJ zW;<-!3i6M<@-<`0vV2;2H{2~rijhC^jn>rjHPo9f*DzBvYqcXibh+0Db^AX!d{hq0 zfGZMN^9aGVSZ*aDjtzKh=byBs9w*ptP}T9wY|wh)`hA~NzAWDwoxyF|at{SK;&?%#bBpocOk-X-!Nmjkz=q6uD1QITl|YV zGAtRdJ$~aVm~0wRE)rc((P`}Pa>ok~{TPxga!u}a0lD-t#`&nBq!*D$BWcslyHEof zP}JrvE)Q|GRq6K%eTqYR$RhPlSi(sCS zOpsk}J%U!IzJwd31m~-WJS6TTr=`U|R7Y0IKBsUPeXo9DH|$~vrFL0!N|wwV&g7@| z10rly`_DW&I9+0fW{{XPTY;nY25ZoCS(Yh)6h5=IVsu!@yhkLjt4L@6^b87qtCF4z zx@u~#;19E94;bn7LJYHw;gyP&kVYd(t9$~^*AfwW32r3ND38ny-(VL9WgSH_1T^cX zQMZ4W4Hws;7U&1$W^`cj4>X$3tYN-wm>?5h?Ja>*^}*jkBOZiq(s$LA@XRYaH4;pJ z6!07j9gicY=GPf$EF}vq?h28imewB!#6r>as_oK{Q7s`m4DEp0@XyqOpB+JCU<=7C zP<>ERP0}u#%9MnH$FOU*>{Rt#BAM3UGDie9YHObp$g<7NKa1i3qP7BFkt;K>PwiW?UcBcx3SqVa6g)_MvYz~Dh8(*>5nET$X;){_9Woh6XI$h?w_ejigusJX#L9W%+1mBJaXrR0f~ZvhcO4|8KFekMEc$**g+X^)PF--eWftV!_VA;7#v(? z$b)>*ZOn(qo>caTYXGO>NtxZW`W8|K`r*&wHTG+J zCNR8Nw(qRjXjM|_AA@9w$hB!{ikn-z`4!VGF*okK0GX6--nArVv#YX zxJ*(n{7{-ZsXm@ne$fv`MktC6dysp?gec#h35>85=50B-qblObH@^@?-#YBr|OHzMHT=jVB}*@rY^B)9DZ^+DMw}@ zVjhG{IK}&FRQ-OZv@36N3Rw|Wp6nDs1$o0uNmVBlM!P=;j=GP?#>tvFTG~}adlW${ z)eZGt7$PYvcL)S#rPiOL1>}(MI9)<-*|NbE0e=Ouu%>CRVqE|WgQVz}FO8%lGqg`c ztIj}eM?91@z3*9YmXeV2C$w8THuPp!1-EN@$+DoJiIP{zUPv3Y;#({ zeRAR?I^4+bk`1}WT9b*%>*Dw*npihKY62yNYMLMBrd<-DM*FGFSOrW6YTdlFz=oW-ta8|tj3{65a?-UtJC?WUDX>-G&>cf zQs%l7MqqGC5*pXs)6jzaFr|`TkGC!qKWhSA&Q{iPtHQ1vFAeJ8ig+L|3yJS0<4-&p z`(6*x%kNE5fkzysU-`4#*2>UladU?EC;lX=-y5dos|0Pt$k>SRIUdmn+mOs$yvdcb z1V|MFO4SxyPTDYXQ~-@xWMxTH4a%>-UtG+lsINvTtk-z)SdDYJ5tDzo!QzJqDxAG2 z=(3)qDrcdGx^CbOR*oJJIPvM^xSim#J`&lq6p{USZ6gasDpk0c+w? zFf{g8nb|Pk!`&uc`%%$hA6>{=M`vLg` z6?Q_8Z)QO-^^3QqGt}WE>DiP&_}?Aa0_WEzgyr8Or<#4B7|hzCN}Z(OERfvMr1@=B zJ+9j#Y-K{2%TEmzL#vZS3S1Kjj>+v2BVzg9R)QIJLRY{ey%} zwtwS+IJgjBjlv3rC5IFbC{mPguOBQ%ERBQ4(;|&@i^U%G$EuQrcB0AwRpzlk5tQ}2 zGF0G=GvYiW^ek6z`d|T9ZoGH;^gguGDAN7%ntJC%`XzkhDp$KE#<6u2wH8BD&MY@7 zBuLUuWkUBZ#GanttLYVNJ5S~zB|jh}984E*Hu!a(wqOREiXF}tkL9;0n3XR%(VC2F z?J~oO=o4L;+_W;T3=7RH(?z;S5f*~ezL_$6!YL9f|!g0lo2;Lim%pylpELJ*wbZ4bTwrU7^0St8& z{U;6IEtn{67)0_Jz{x(OqdL+5i@mpuiYwdNMgxK1?oJ@M2X_f1IEA}~;1=91Sb_$3 zcXzi00wlP*6RdD4T<)gR-RHdTNq^&x@7|y1Uk#|*d&!(@+Vjly&E0#pKWcIm~io}-q6zBz}HvGT8y zeOuC9_RevRT{L}uMD$7Uk|w_Q(MDI?bifU+a`$U^3ZsVlx_i`@o=Fx}$G#SnU7u3x zY-)ztv8X$)>Ob#4Hj*BE?=K;U__Ms#qJIp8(r4ZbZCJPkD~*Q=<H@Mq+-xo^FJ1#Gxo0U5chq`T>U*{1NLHr6935P4k z=>&}etH#PCOY8`l@MTyCE`wpdXTHaltGIqzD?X6ufM+DHuH>~&8tTRD3V#OW1k@>1 zM6Bt6*wox!pyiSWpA#T?V<#%=|dr*-DYKueft}hPF5~^%$9AY2Yzl$G*#+D9tk@e_U$$`mSpbQH*EoOQx$9kl_eK_e^j6M@_9ofArjGYI!Gvjpxg9c=wm-7PMn@!%Q4Zii!;479d}e6{s=iZ*;;h-%N1r|2%3F z5G4!6`cf!I3!^Ao>i@)QnJ{Ja3C7GaS9R9U;j0(l!i_y4?PyfO=&`6XlG1!Ty`Rv5 zW%MuW7ab7ctJg?1aIq9vy*o zmqG7Ch|pe&hdo8+RHuuLtA8SRcUIbm=32O;z*scQ%=bbOvb!L z#kQz!>6pi{O;8uqt-W)$C(tMRIWB>;>Cqg>r!V7j)wG3sjOM0ZqBrgu|K(wyo8NrA z2XfT>7Ni(x&pfc-3W0vf9Mx{SXTI!Y}Ky-`1e?VP2`ZU^Gq za^DzcR*R?-fcvc6_32MmUa2VUH}1b9zxP-EP<7i0d+2N)G5M!~uIM1+uS-(5tGe_i z#)WcPJL9#OUnm7@t9cC;<59PAtfFl?I9)1@Sh7I4;2ddXA>ei8aUN4FIP%awysQ^NpTRwK6YesL>Z(6q9Ma1c9}c#vvA#uv`bq#1XL2;1xs4 zqVN6^;S{rR@J*?o9u4H12;zJo*AS$d>*pVww3CMYs$f`C^3vDn>GM5XDgx5)XK3HZ zE^bAD0Q_al?u)lTPM(&gufN{*aCjoi$pN*;f#3Py^`KTg*ZMw;3gC$A<=7rGD5 zZ9-lw&tf?nq7LUHQl-HQ9FTsc2XZ(ocTtmo0HWxr^dAFNKS@k8XtvmRNeIBl*75JZa&5cf z8Fb-0D3yVb-f7mDu-}B?dvO0U*y3pRuZf0cEInAOlz0Lx%S@Gs0UHHeGtN>@l^~Kfl)-4 z?^iFi`utU_hy^|#ee%ikI4012DmCU~6=PDaDFU>i)p~@5MVV%RxR5Cjyj-)|%Vt$E z^i7}GXzazB$WQ21QSMvnJq0RN1-xRwnGeVHgM(3{ZuF@0a(n@!Ld*7+{|F=pL?*;% zS$q)icsO_dSqiLkrxv8hwt?$~V~j7@akb^}heF&0F~R(fa)DWWa*XdRLZ%EJOD%2g z39t3M%gaNu&!0u0QA`k?XM&|YQ+@2IGQqie<(8~?AKvsQFmc7GmYyYMO{o_ zgi4L20PO9~wB!7+Ys)O!(>RQU%J{OmM5zd3Az2yR8)w)y^?gZ1Co6u_J^@j;sS0gN z^aThH_gxi_{4cP@oflE_EUvs8l^Zm;dXh*5uJ>)<2tr}ZWGLFb!Aww#>O89a3{0zC z>UZdfqyP+an&_*Ro^1qWI(_(%GCn9AZ7TmgoKHLSQ8t($tQM;;F8~1dPI$vIr;657 zltkxS0LUAH7Kc+ZtsGKPvZ+>~1tg(XWee*Q1howLp`4~0_=`*vA`Er)Xae{!Ku%Om zl`q?qXXLpiD)9P&Sn`mh=O9UsrI<{0O|9A?_25J1N1Si?Z>Gz_mAA(ugTaxwP@FUI zVA?}nw-ZckdC1MOJgZmP!xMV-uLWW}me<(Vt^tw6Bo=LyQl>PS_c=TAAgWe+-Eysj^64h&^yk)9dZjbLDa{gJ$w zA^WOpoD>a{K2~5BdSlp}!VbuIp}T_~02q~KabXG6nyrG3^iMP2EoUM7-c7Wu80~ZB{9CRaCw7` zb$I~u-!cXPjHt-E_?-1vDeR*At~IqIIm25UuFn9oTH)B)x=rf%)ycB9NVw4j<~M^g zUeM1#0n?#GY18{lO3;*|V9(E&Ivi}V3Qg4l%q?=a<9Kp_Gwjzs+vLxZWN_ivDG?Tk z{r$^6Eatfw+oQ3YJ$2gk(iooJb)f{})GGqp27noP@;1!1TE^w{=&&OrUz0sQ$C(*?6ft>vl)(O>!VhOCraP{Hdnu-3u@Wj8k? ztV){iW3)X2S{Hg(m%RR(uJnZ;z=1@ELm7+3f^yo5*snd^-JeTRWBFce#Wx$UPT{0p zzG&9I^1MQAkZ>%jD_CsE`J6@uL7T2-2S%j@Oi_T)yIA_}i@w{j+d4KkLg6P}E>E6; z#D8|b0Uuo7{I4EG$yP!?Sd(}tSH1C+K zbEN9;j7E#rNPdL2poLM(%bbG{CK`XS7>}^lN;G6tGs{u4ul8eowx+=`C@%te zvt2(83;xbo5?jE4x*uSga_=c4>6J-~XsoCKJffj@@k1cu_(+t|KsI|3@w8@>Ne-{g z{Gn4S>)SJ!b*fnbUMSMLG;$>%x)F?~gu+2DkaH3Yw~U_W!~`$_`#`MdkZ><2g7o?U zJI%oT{+8GMqRhL8pZ3<(#@Pd`8Hy;lwpFBBa_O)C|0)wiZLbZ5GyMe0c3xeduh^?2*d2$JAMY)@tQkKn~ zNz*To8mN62!G~Y^Y`#>3Ry(N;C*-LElj*!&`qKr2?^Gzm&d4F)zG^;moZ=30#HOY? zX1o`G)xx>?fHB-8gkYyFFd9_JPJ_I^Lj5(NAHZ1AV=wnvuDw7KRSKoLhU90r<;h0>6LPX`g((mezF#V;^vB8AS^%AyGX2`NI_ZFobXyZy>`6JsxtB zFP9LZWyXIR^fKSHNsuAVtCrY!D7AZ_&pJ?_-DnU?W?dZU;e&Ul|G;$??Hf`BC%tWD zqZfMPlY2>>=&Mzxq*<;mfcGuyk$>g-0Hg9*i6;C4Lt`&#JC`HP>a9Y;$kA;#!({zo zLvq3V8;igMAwuau|SL%WKus#s0`%Eml*irs9Hf+Pv-F3~AQ);gugGoLbH94kJQ=27kya zq1C-UGfAsLd9{G1ZK5JF-0tOpFt1aybK%a&klE~Y`2NlbuY|Q^+A}pIfzi11`%fG< zcS0*C)D!jz^DZ`z7F9ZZX^i#%HO1V$U<11rOFbWz@>^w9D@$KfDJjM`7OcCBqExg!Le6 znv>O=r;kOkPTD!Aifbr3l`e};v?ZTuS({-N9axpv2EaQu@m_x&$&voXMEvDRqlB28 z#ZP4$FW5tx`{}BHtj@GQU_qkI*ol2bf%J$k9kmvM0+^p!t=#cP4?}HqL>~!XV{5j zBjJcQ_-n2E zxQk#N+j+=5`0~VA@l&g8Yu|IIPb5Db>zBRxvz*COyG6SIlsfFIop{Qp7$8V1uNMYY zsoDt!nI8`SbLK?h1hkX`CTSj|;iw1Q4ciqF%kEE2SDPu}9IkNq+?z&J@~0JUs4A|O zmMsS9JjV=!8T|7C%;`KpdlqN^%K{_BYPDB&SQJ4WcRd0?y2btN%?a!MqGo`v>{>Vy^j?4-aC%8**rb- z;ocg>js%=~B9&}Im6awj;b@s9QIt4Xd;7VUZVslSwlwlOgP+2xFb@%L8NVj3bCf63 zyud>HqY)V1T}Kq5sjJJA+EakVTgg~$UxILEi@t}+YI=^`P)!Zc+Ds4%&-c?ry*;yw z6vA0FWMC;W*A{T3VI$x)tmbv{J{)#aX)K+uJ9myD)+L*-_}-ttGntXE{v%{HVrJ{F zcfWg(brNX(=;TwKmS<%`6-yHjUkKpA^M9~=vZUk)5lYv@$$FP+0=AqiGNV-Oc+pa= z&8x(uRzFG_%R(y9H{`m>?J(ej`Y~oCsGwYsR$-|T=cVURi)|YQ*AMf2CDNW^{Q7#P zlo8Q7YbkO-A3_rD0{hid65z7x0T`eOkJmNw*^e=WK~@!QOVu(1?}22vTUQ}c;qK}= z$G&fN9HMs`=n;X^A`gjA1>qf`^N3>dy7EOvjN(v}BbxQql9H~HgCY96H0f17uKcc^ygYzaM6c7LG4 zf_^FT(@2JPG+;Q`lEwmn=+aT-@d?^4lI>oC?~v&2L3`tIZz0VAyOESeoSX=WSqpHh zdj8|u2!-lqa}!dY-$!B1j{!Ux2ez~mtO`1@-`7B-EflIB^s1KlUJ}FaIvd##b3%Js z1tfW`5(~aYl*>j3&{M%@$~uZS(}6DoPwm#dKB?S_a;eN2dKm z5E~im4&B}!rR*^$r~ZwXi{h7Hg!2FP?Q={T68KfbJjcUAaTNGqLEgq|Ybuq-Ofdf}M=r1%)t$*Y{rw55x=dPZsU2 zr>`dYR+(3q`ktj>S_@V(8d#~>!C3ejSs@kji*&p}c78Kk}}p2x?}0A_-~ z0Z$7aIm&AKfD4MVsxSU9{gy*3s&nR%jMeOPf)T}&zNkTw2@WtuKA4Pye$NV#EPunX z2o#KN&b)L5&nyxrKSbM+a~iQ{ktrxqWH!4U`kuw>dR2b8S0n4`j}+%HHd$YiM{iA<{F=~>d1rjk~j;*J1EqkxrEz_O`n#fC@yu9LD+&6 zX&Nic7u*e~vPpk@TK?d%*9(8MFr=!lmWsUvehJas+;^##1mUu5Opt+gZ{8%*a7m(Pg6 zk~3S#%k&3!)LW1xL*FjcE5 z(|=GRiFO?4bAL5WJ`A3I?PXeI|8RVBXi`7ZgyXjTEE{t(NAoNsa`s0F@!?NuV`zGC zPDz;k-3arvu8SZXQ+b*%c!FjmL*&~XfO_j`fO!G-}{Ivhgb)7;{CMcA$SpO=CfK=_BL&+3|3)gN`K4Yl>&v0Sd zz7~Tg=679r!lYk6BN-I0-e`PMVaOw?EDX2G+EC&}%iu?{V_^!ya@U5DAZSL|<%^KsJ9S7XxUjM~l5`LE)6DF?`=iW#H z31yma?qQeHrKAQk*u`SC!pf|rLIAfhDdwY2#*AX2?jQ~39+H2HBGEl90}ZZ>GTONp zLrdInqOjsr5yl3Z!__7|6aF|+a+TPmH7Q_l%3(Pg!smR9;rO6ZVq&REXDfpvfX@sy zDWtY;j4oXhj7nOmBYW7z^Mup5_6>QL^V3UFD3*j~SjtRywu2MM{;+67Fu(5Id(QLq z%lX2EXudkpiOKQvy)m{WHI8e$OT}RTu1&pg+AIO6YD`~NXDAy}(_ABUQ;|lwHbwE0 zdhkQ;c@3&W>>DNCCLmp2j?l%ZA>T>LO@~txKwVe!n7JvH zkC>1OyI?&Y?fLyug6$3(NO z@Yp$&33*c6HIqMG?w;R3Qob5b1?#!5@gm@xQE!P@USF*6gO^F|w0I+s)KV1jczB7l z-&=EV%6Zm?+2Wh!BT9$;Zb;-ACVIWSuoE~p3E9C8pfVc;%{puj5G1n=(pz-*I^sFZ z19bP7TQLx$W-^C28D(1L16SL}N5U#+hW+K%=j-rB>Dw`G4k(lA1?%UHx$+`yr{w}L zCys>xonr}r#*e8w@sbtJi*3Z0>0E_4>igaBZf|GrXQGxIvVOX?D^JU{7&cg5YCGHM zGd?#%V0d(iLoc^rioJ9s2x(=+im-+m!eHV3pCqvvQ5*4F3yecs!yfj`?5U_l7(Mnd zvGd+@hujC=?!Q|O5a?%%ICb9qUZb*8eLVM^4jokW{z)pV01@;oEY(?_}(;eSgcWF}<>8XCMnZ@nd`*Y&DI{zW-pLc3ZI0B|D#**6n$ zWH&&)1B-ulZe$b%=S6;lIazT~a0-2c`jo>}iiqEh&rrtcbHvS=`<&Q`y!2ziCN1)H zq&nG4*uf<%$;mTC$m+>;%3-3pe-T^?0;fWEpXxTiC*d%PXS@!0hDK}+eT03_ITa;mQkY_23zTRW<9CXu(6Pz)!=DSwZ|4J7!Gr*OlQv7 zH9FmFe96_#vuL^oH*m-RMunwEmzg^uw5wUWz@4hjtD9G_?+oq1RRE`PdHP~F^~!&a z!&dhFevtF6h~W594izrJid+5zIW`@X(_}Ntfa*9o#uU&jQ7_;f&i&?#U~|Dgs8csVcdmV*7VvC9g3V6Tfy6{4>0qq=n4^xHZgRN_6)23WiOaWzdzOD}=M$yEmKFI)r(abq`f=qf>&!CIZ8 z2N6fTbH@hmvH)X*C?}d26}cy61qkIzP_H08=@#gPW8VL;7AwqKz1`Jas9~3}GOSc* z`B(_Ps$87{X^*^;4YdZGdTNCFZmcqtZUth0=%<>4JunyMPG3&`f)=eICMO zt?|%ADgo5;lxNi0c01v+#5}Ig=l~A6mQm}Pt8>k}CS8Og6*Y&|o%K3?ar=7Ds##Z3 zJ9oRXN1uQS3P&A=4f(X#S;ElLJoQ!LsZNUtj^FpMUt${YTn9cKvj>kKBTh9H zXzxW&?PBd*3JQp_Swz$G@_{K%FORVBF&!NqUyd41h0Oxp96b`MCfVJvM7O~&u=HAmTf)BedKAnBBzomat9P^&a)@WN3|4%-dqNNH zF7UF!b)*F^Bh^^-#iN4u@BD1X$0U$=%o_RJMj2pLymswBFM}rvt>|7c!U9K&gx7v9 z_qSE5j*7V{5^amF>V@>lOA(8_j}9OaW9`y4V@?*kzP+bXs`H&sh5jJ3@ufthUH^Y?1QBM_RFq&eC2@0>K0A625VMn_zs>+*X zk1cDhHjBVfweKxDSJD&Ja{PVy^{j?9TO8s;gPszD(C6|9sp0`-ohiN4u|Bl zS*gOp=zOe-Jwf*e1D$bphl2{EvVh*`c&?O8s9Gu4S_Q0_gTRv)V!yA7eYAT#ZLpy& zUsl*!rV7E;2Pq{Sve6@4k!RvFLJaPIW!k%SKKN{%(J>?+AbuN0Mqxw@a1SQB26AOa@{{(bsq@ zSRd9Sg)upLVNTG+-C0dxp-_FYN$&gn-LhqqYCwHNwj7%Vq64i{C>>vyizlpEirhM5 zI?~`vbVc|EN{?X5&6F1u{Pwsw69RYB_?-HpO7{+R&NK)7bydW4d~Ut@R#3C=Pa3Dz z+;q+XWziAs3*JV-EeQ}4?@r}_k8NJdyqn#^L5Q>abmrB+l#zW5gvvYby;>thkBO1U z#n=jm)j**%XIly0dP+c<+BgUkY~5hYk$rT4k6Oj5TAVU*88@rp`CcoW!H4ecO`Qp# zwWHe)?(-|U$X*0N$M1$p3A{6ew+}T&zSg_N+#oIT*tYEAwoeN{L3)Uk$3C)QJ(dXe z@rew*qb3pL&I9gaZ$4jiZj@~2r>ynU8|5LNOd#;2vV1Hc&#ZP66#>*zR0>=6E?_3C zD8?+T{u;$?`yDz!>jLTBzWY%?{ncVAHR%kf+hQr*7N%k`LDSQ-1LdOT29et`_{`1A z;f{wyhRL&#_4Q)&JFR%82?&=gB2a8PAX!-x_kAPq-IZ>_hr2LRy2UV02P^P-y$|a2 zo^}U6#uh{XOH5>P7_B?#BLKDE(QlR)d)pItAnsbO$IC z#i7*r7KmBmqj6VcN;vpB77HUXAsoYqI@t)RATTrmt@NF}`+))SH;wNmG}}OBdy&-T zYVN($2cce|qhz8e`etR!e6#MG&WxC>dwDW1v*GfzIW6oq%IBG4MVKLRR_ncCwS|!m zp7wTRLrzHHw$bx(;Mx?YkM*SO=R!6jjq(8Y!q1Qun!abdw}dxRyPV_0JBr_X-9X1< zN+2}O0Z2}W9MX4OL5n__nOpCcy=%tNh0Ouv)iP6dfn1WL`FLIqK+fVfHbke#qst2Z z_7dQq6ep}*!OCWV!%Y)yc(#W4Pw^eL$uq%0TX<_4qJngoAjtJcVNaNaX0q1X`}f6h z_i~th9*ONw{m12@<)`0M4l#LR$%?2B;QJGI!4PDWsI$PzsbTJ1`ukZu_SO;f+u1rD zgdbez_)$UP+S{qR*eU-NvE`LhIEduInjLp2xXC)voY5=kF{}G3|<>~=DF*&Tk&*dmEyD5 zBE2&D=VcM>NF>Gz96(EmJIKK}_s^S60Put2B>Gv_X?&g)@ZJ@jxerKsGL5!uCQ9>c zsh;-oH-Yf>NAh7w_*}p2ydP;aP4|f+RiB@)98716d+o&$>wm0Mflvsl@jQj7Z}$p* zZ8-JLpALl)Gs`p$)PtfuX}_9dkZ$&FAti|y&H~C{GH)UvQQ~%rh4*52>J2wQb^)FC z*vOu>>(c)mx=FbK1Yan5xY<~u-(j-fe5P}lFhCnyN%!lIbW%gLNC_?4(N%%e!#XMnPs&*?Le~$uh{uTN z-rxSX+Tn+%hZ8i408s`9zd zt2b=|V4tXzx=KBSxQFDPq(nRFcQ|u~lk%o!z9S6bG+PLpt3MQP$d%tuB>wqB3af`F zT9W|)1n7B}b+|391zI4TincQi3_evC^cTXHmmoX0MwtiDZgUM6rXDZZi3h8XC4dk} zR!+M%FUuWJJ$6GXv6zT@7);QYm+^!0 zue;(8hR^pf)uNnaMSF_%u10;g@RWp+&%ExK>cYie_uf3)uYW|V@%$N`)<{nisX}OSR-+Qz`V8+|zjUkV5J2c#=qbM^k>6d_ zNFI;~DinI019=|0mU1$GGRQYB&Msk3VTupL^`40hW`RjwtYgj{4RA_H0GVkg>mCWm zpk>F}OgKOIIQrwhoP42&XTH*LZA`;it^EBXCmT=SE&ODorMIeLmDtKdP|?~**k_2b zfUz^gqtpTgJbvMHn)?2rV3jPAI~MiwozFT}{k7}Pq<4VoK+%LVqjOhcqtCLQ`{824 zcJSizESmC8^+qAU=`j&Fr~^nk++EGRWp?VrSQ`&kcMk)FZNd-YBsfxhDz7qN5H89E zuGR$6KEquLz4ti{S111QaHq1^Pm4U0Ns5YdY!I|Nq}#agg2Si71J7x9f_cVV>e}_e zVzyBJ>)A2*{rwnDa`hdD+~PHVTFY65oq%=mZqY_p!x$D?5tE6MF5Y2kV9O9_8K-x> zo8|&O%^;*|bFtxibf}_%XdM;T*AYNh7|D>XupaGu*8**0f1-YxQ24!%CaDD9$h*2- zTd0-6V$X+KdB!2XVhAPVjTL6>0I1X8`B5<5W|D}vWYWvHju(dAK$m2yS52WfG+lhW z-@Zv9EQ_dG1`tgK z6b)!Nq3N~6XtLf8kO-d}O%r~!lj?&}dG>ToPNOuM%UUY}5&U7K1yaJac`8G5f&j%y z>-mG(Py6B(N4v1&QPoFyhwlhggDG6%#&)=UfL0k|fcTwui`1;AKry=(H>2p-AcfoO zP>K$xJpG8?DD0C(ugEOqjoQUFVbvC?^;m03G20>vrj|+ z3@Lxs29##Om%BjPWx=aS#MzXcLW+g``vYxg48|%Zwm#?HG#3Ig*-Q99tr>3q_WHsO zYHDf_um2puyteIG-eifIJxB)*W+YuA6sLfwyu;7?;jT8z%I)ZjcW~?vhQ2Zn8lxUr z6_B;e)2qu^a4$@0_Wg12;g51cA_RA=f^vfP@R2o=3SK-T%t{$$72D$-n zUhmk)=#dK2{tO!YN+MK|8FG829#x0Nkf(ji`gkiv(;TBsM9T)fO7`?M8f!l{knYv0 z-o|1lj)2P@d1NY_5!mEC(m?&;E`r6WQML$E&&ynv3*Ja4Y>>Czb~>B|x_^=x;wqQ2 zp^x9itqGC;rhK}h8YxTx{xi!s8>5e9s0%x(A1HA*LG}`p<@esq3ufri^!Kt0bM7uq zfPa!Fm^x3}w6d#Er>xHI@cPUA%uiny!1PfTh#tx1eV>#)3sjn-l86ohny+Fg3P9=^ zqz(W~`U>WwtN^|Qx$k$-bIuO3zVo?Nk)FmEaM1MAL`eQ3ue%+PJmFjXi0t-e6sd-4 zhe5BWBiCour48>I=9s=9sY>?n7oEQ%Dfzj*^_b9>3_Cw% zbEx!^x-QR6zb>vek^D0{%xi^2X$>9dn~P~*`W-8PAC-1P9 zTRQhAs<+Etd%gCy8_RYjER815cBYH_A>cDOytT6f`{qMhJY!_RufplaO3CgYB0~E& zZ&_a9Ozt%PAMQDL{uv)D;*+}eVfml8j1VET4>dO8FkChV=Kg%VOyh$uKae_L{3gth z{zuD1%X0Cm^ZPh1>t@$?${tBM7$&WGWUrs@A60yP?7EF^c_-}oqLsIc$rGp$%iTzp z|9ApbC=PV!4pxlOk4*+BJ_l0I>Z0l4p~d}!BXTLwa5%Go8ad~_qIAjI4;X`w&(4UPU!BKyT||F}&ID4J{%dcdf7TwyW{xXK92mg2&D>i%`5RnP1n6A!ES zhWL27#Ahpv@0s;H84YH%ZFqYx?sm>r+E(A zkxeB)M&2d|=qukb@htrSw=#D%rziQ_F3_S*Y5lrx8>m%Rsj1NYqYw8eYjE)219JE? z|MPCME1&a&r#d0vgQrJO*cjM~M%;ugibl3oujM9YDl@Y+zT$eYR9!4ts$ffCctrL@ z{|tR?6xKF902V8laCxh-;lnS zoH2_Ttlquddi8)8A+VQeHH+-m`URmN6+=zSxbC+2-F+{a5At2ZJ`r#+K$=~BT^x?X zz}4ydjg>6J5W0eaxz;;1ug+jH?-8}+LJ}BYEF!*7prHT$P{z0d7t#F$5Asy{PO(k+ zQq6_Vf@vAJsyrC@RyTaE4>QtzoJT%0;rFukwVP84&&(m|I>uC-VGo$}!!+{1n0 zQia47Vy4_BNkt6f5h4-J@92!`Z?4(=_(g{&?ZWg#v@^-0|rrLKJ2VsMe<{I%>& z!O}9?JuprE&=VYyvI9#d;5U3>U%2Kk(D)B(EgLU}U4q6HgtEGH8j#X>{5Yqsb!W0| zJ4R~MD~>j{GJ?0#z4(w@&(_f2h*+bId->d$UT&t?cBZ$Wz>|3&d3%^ub7B6k^@V}Q zrhQyt1Kj`rR#@6#>1q>AKUOSI-wR?K+-!q4ZwC{viUHT8X{q;f-fA~v3BQ_Gv2{c;B z8_4~N_F`UPoo8sYBPk%_*p#2|n;0+E50aWEh#DD$OP*Ndy=hK$Z$VRzs)r<y3a~PDR}B)xFiACZr=Bc_GeZ`4rZ>?E4w+KEjt5Zy?@Ged+&$xfh`B98A{a{Seydp-tlFd6mu6A`zcLgD2 z$-i6fA(KB(wP|K`iX>favF$)1^E$d0PtWpnxl=s&+OUwboi}wta6?Z631L8+kfx zw(4?*DYD6{KeX67s@-9L(?xO6{!#FOb28$~o5K#WcB1=zp50wdjEDP;2Ym=UpR+vz z_VLZv4q(Ie6d#B1=PCG4`t=my8yTOxa97w5+H@d)Y>~-ec_;HpI58{-*6XT!%PCl4 zdK6qJ*E5e0KJJ!W zo8H-SBrGV!Y7pZ;j?BMB=RZG8kdymrJ&o4=hXMb7Et6qjKq-s^|9#Pamkt`{zryfn zZT_oAfB2UF6^8!`!{4UlzryfeVfe2wJj%e|e%Jq{nV_Zz=&O;WhQ;NJVhMEl@QKqc74V%;3I}!Vt!;km(P)J zu1i==fF|?pPH(|^fu310e}CiOKV*{oQS9b%!h#Aiyn|kN@&Q2%s|DcQ8kzs}<)Jdl zj4*vM@u7Uu{9jA=Z@(U)@S~HqwFl4DS#B>h$$XTUWa+qVmDnm0FuUF{z144XbkuCC zwHNOH-8j+B7@*4(S<(!iaJIiL_P8+bR1gkFUH0P69uRv{7uE&+KQ`>ow_1ZTJuVKU z;GHgyY&%`Lf~O}tHCXmXjpkTQWJ?pg)u)??W&b0&0&Ibw6cgxOo8t$l37%_nVCRsR zeS}{UU2(J-KuxH0(){*Ss^mhm%VqNufngt#BfRq39)jTebTLB%C)J7Q_oKW6h2;%;i)oRWRmPQ z_0ttI`IFw+kU7FAeA_Xn(xz`>zEw$u;@if6WS@B_?HHLGwE( znX|te{sc2fMHI;TIXig#?vxne`t|zy!TU-2e?R8_(p5xYgMr~|85Ho~{O9bUpkpNd zSbp%k9U=YSOwadA?ym>%(RmEI{4U%aA^bH1?HrfX-|i0=)vp=IIhFMMHL8DKiVA7mQ8L7t(&|lzE0>{y1C$l`m_32g zYRtB_7s{XOlDQ9vDj-SL{9eUWf9{1}F*Fj>ZGYu}eZT4aLGyY&swS(z*-=hcamfSq z?NJw+;W}h`NMLSWR(pRKiJ1U@Gs$J?=(v|GP|$@0z5V8(<@|L?+g}eef&D|7xcaRq z|9q!GHBxE)HKE~9)28$soBhx4@B72270$Wjz|R;fSHxCh*@qAoo1KujQAseL>Dp=P z%4s-Ob$4OPFvX>5Kdh$^3B#AR1U_k0;%I4@$=)4 zlgr{>BwbT}u)7BASf~8_1(NXnrIAPWq}rcSCsG3~fujPXKaA_6{(8eK>@$5AD<0IB`KUo(4OSZ6ook!vJZyHg5%^f%c`0Sr>28jkQd3s1b z9Pr=H+jjYqTu+j~4+e9td&Qt-mbV?a8)n?0_1P4_9RM9ey?Tmf?X}aA-iFYLwW_zb!*;`{PXN@8crxf>I6} zY1~v*bli{bI|6Ftg^iWGoJu^0e}addrsv7e=52!al85+j!j3MD9{S?f2j+Hfh4vi2 ze4TA>yRUR`Jq@3(G5*U;`JFTV(pJG`*ttK|$q~jcf>>nHzQo(Gg~Ua=nKuGF>(f%! zxs~Ci=F-58C$gCJFl2bzu5G$}yoNL!?0;@a2qK?nB`{%bd}okYK0V`WPP}+F^7Mkq zvU{&)7=xL>RdstTPo~;?`}0@BqfCar^Qyn>mzm11(;(bExbZPr3Y-|3hA`CX231T? z&S&jQD%X8=qw~WEIq9w1kFWRd>yBbdgk6@bFDj4oQ%o1#ce?Qc=55<}wdGBR_ztrx zM>;!XaUX6rQw)a}pXH`_{&k4X)%`LH&R#@j0>6}T1PTV&Mo9kr195)3J2*GNmYDAa za`RfXn0sr}6N`$S0<;8OH@WSlHjm5IV6x89U^2{fx9cLY_J*aHh5N0WSHwFU@^%5^ z8v5m_*EVg|GtR;HKfp8hLT%HQ7o6LDmbHJ`;6mzOMkl%4=`RiCdcoQR9ct~jl0SdQ zj3e7ZBiFPEX6SO7)>*_&R#hjWR`;1A6wx*K8raE;rxUHa{`KaNM9=lU38B-f{HGI{S`L^e^F;`?Wh|vob?+zy4N>;HM#yY`Sz+ zUGI7Fqjxo>qGJdUvM}Kr&Fx9c>vxN9$h^^m(8%=a1#ySn&xvn}(+@iygjz2S_>Je+ z#XO_kuW)=wyxE3Z0pD6V@Ul3|G{cQP44>_7s@M44)_u3YY(1K=tMIH#QC0wLdgys-y7h_uu>I8eBW2KL!FFY52t(i%t_Hc|!33|s&E7b0 zjHJt@OAj9=3=MN$EEx9DwNQsXmYPnA^JkVOeQHcD?am@sH;ICR%%U>Zz9$`b%t}`< zhfJS>ODY1?_1xKNb4O-9#)S+M7SAQ+g@LkO<>;YL%0uxxq2(S0;ky@}f>s)dhCu`| z-iby-KwT}vi(267+xmK2&n-gEaCYb&J)`Cc8N0iz>s_6C%*sZ?vUkjK8Q62rKhV2L zQp5g7-mB?y-7XoHw$C#?1z?5(sl|+%AujFhQKRI_t9b2TRrLqCsZuJ2Kl zVxD&@-py;^=Z5i`rZQ^X)-7ktTphE2%CbnN?w_)duvjtrOUAy4!=C}f=K;R?y;%&~ z4#=|I>wwhLEr-CO{ABvstwC-*vV!}Big}b9aLLiEX@<#`jOpXa^mrO~E;u|;gxz|- zbKNYW>FUqZT1#Xz8q@6)D%hW89}qZYoNc~r1Huzy4dc4y09Y9`)rVdE$N<0zrI4ld z)pGMP>Ip+VmJ^9no@;lLhAbKCqai5>9%mf&>{h5A#4}FM)!%-tifEjvz(qUuJ2KDT zC&+eay|<{_y0u$R={T-$iBXhXKW{l~71VLO=Ut!Y)FivHJ*J9htTX&{w0~sV6`B`3 zqVIN!r1d=R-nyDJ ztQ)%zCSE!dWV34Cw4IiR4zue$^PtAo5V+er?V<=}j!(NO$mj_WbY7&pT5{a*O>;pa zvaZ&gJioiCdee3MhKx<#@Z#X+KAx0R&ue2t3Bz%Y)q_qWS^j44|6%Vf~gcgILKLk~GTXFPL8y#KlP zJ};ja&%5~yoU_+ny}oPhJ(*^wT7B!x(u;Ju3(nB^r9Vyz*S}-;OGwQ#EUh%{nR0fW zjAfx@MHwO-w>8cfr}h(bwGp==EVU6;U2a?A?71-x*A3snZT@f_s&7+ZF>=*9SzetG z?q+`~)X(*El)lwk6!q?faGR=cDlaRncVF&0K2gtkG@2$M9LYW?2WZKS$8?Slu|~KY z%?&fTiDXZgydJme43*NkR_!vXKJ%XO+kC$mjG!KvQY`4a8Gg>)CU_xD57D=VO!9)j z4Hpyjqt`cVuc3w|y_nmayG`OYyLNZxe%jhRyx`l{_AM({WWcthtfe~Y42_oY&@9sw zDw72Z+zlMr-p=_l(OqJx8~BBRQn0yP|OVbsf{GmyskH*2Dx(u`A zEm@A~LT8wk2um5WHW)A$b#inXVS9F?&9MUxQ*c$_=aoMhe%a+P3RfogYm9(L)K`bv zA}KnW&%Lb66{3ZuZqG}j?PT*8inHtdE#FKn#!0C=_l#vN&ldDO|2nXN@?kWH(z4!& z8wz4fS6rLR9#hq;-e2^jS|#k;@yROh^&>h$7yDMHuOBF1l}W1!YJjqFh6ijrE**ag z#<643os|>11XV6MIttHOR+)OU<)&%=K}eFO->cuzELQ@b-_V3vgz!|ew~TN(wsJ^4 zF70BqSD+bb8*pcER1wyAQm8m)bbN!^z5e9TVl^%hk#s!TkP#=MEVpm%It*t?K% zD}otqpK`hx7a>JB<>53QA);kf^JBKT`rZW}7Kiqso@bcWm-7O!&kBmcw|OPE-yLYJ z%;>~)9@}EIHO@tjG@sLJK5KB~=xa-Lv56OQS>6lT5uAqlmu-WY^C*CMl0d|_<4$=BIV)K;Y1Utx8iX0dLyg8V7v~hKBdux= z&crAN-FA^&%7}-}R2)=F?(opmciyf^@Qw?ncadKd(-$qcaw5DGRZi6_?R4R>99iO` zMBaca{K^;Lnm*yzx3nzJ6SB3dAHd@Ob|IG*1;`r zG1RzTu>nl5v3T`cne{jgZ=l^V1N@36qdhA^T2ptayB(SqmVEfW`i$)OUiew{;Axx5 zR%%}2W@wrz!dJ1_eQR!^_447iPdQwXGZiELDzbv0@VhYSo(7U_dW|u&&Dh0!kY|>I8 ztO@C;?JFyN&)Q~@i#g-gE3LL~En&U3<#uhAO7vCrT^h@lU+z)A7RphGvohX1oMgK* zlU~9wtQpe!ZK-%_bHxVFOZ#dDlN6tl#Z9v*jTf2Ok{(2YoPk@SAz(Vh%c+`tnQzoY5uj3L^hS3D`21cUf
@<+ccw^e?9 zC%Q7;S9c>MA?n$U^(}Zcii!)BgiBfGttAV|?9T5P54*U(!|_9{)daQlwvyPI#|gL{ zYr^`_Tso|AK2W#jtsWwsFuT2Z(LI zTUhz1criH$&|#}q6ZzPZ+kKRskGXn;3pQ;g2JERBEeoA`M@MlAm=5!&7UOS|y`R|c z0>Aos;z0E!Fit#&xe!2@NDLW*9n@pfv!cu46lG;T+@Z59Gm(oPIJ(!q%DA6nPi{>G z)1C42Xc5(x9IfIXX>tpy51!UYE*#z(;*5) z@r_pt?(U45O(`@>zC$hE_v{Agr2MG0O4rMlRb*k3v%d_=IFr*Nk&4^@>9l3a?g&u4 ztFkM7Y?Er$93xxN^W^PZA23f?E^b*G!JVG>^R`|<33dzJc>;A=GQ4z7)%C-eMxQGE z3w-FFwZmG!>eQSs7Gd-hjLh#mvWeNOgJ+8Y^2cJWYTr~F&fPc4?bCyGKbt|cR-Qng z$?dWb_vAyZOFX8P&GC!vi*Gx^TFh!cs5gt_p^WY{(4dF;33ba*_lr!8h-Q>s3PZ3Q zo^4&JN;dTv*e=-|a6o$k`L}dQv9%PIr{kqNw$#}w$Hvd*`B|twvecaS~u;)4Qox>6SuW7_1 z-zQy(R-em}z=Iqd2d`ZgcR>Bd({{nz0&VGYBIR;S zL|IqjtDcH7ZL!J)Gp6hIH`;(7$UHeb6FuCnmC1*$QsJx+#jm-oy00}Q+){zS?WFP4xh?O-D^GFM-*srCc0Nl zuRCw+xOZPG-l&=xIrx@c#a+RCpU|#GifEE$)ZhqTV|>>n1WYyufe2jgm`X+4#;^Nm$3>WM~Jn0R?1s=Dx0cz z^Oju}4DxGb%S|;b&WfekOM?E77-duB6M*ELSL@nBtF6FReNjcA^{4i$+l;{m=*q6< z1WVuAuBVy|u+-wgHv*~0qsL{R_|5q#OX)HMyQE`_xG!rLNW%L9`ZyD6pIf9Hst(N{ zqE3k*qRY5SWvxvp9Yx7Dx+p5U80G*}u;y~uRPvy~uPc;as>((U8F!?Py&PN(R_AAC zRB~NTKd0Ayt^)ssGlcZ@; zcn&|a`s5nxe0=~@=$uyQXM7neO z8ncV>bpOoEAiLyy!C6(ox}tYIG+C5kex)Q{QMmfKDR*%d2Ryp?_lG*Ubc0a7$E@ye z6dSAOHm1~k_#Oyi1t#jqH*sY(2_hx*mswZp zyB?2BezAVk0q7uEC@Vi3oH6CN6N^nn%+R8{w5rqFtbNm>jB;GV2Zmmz<(r4g3m^d; zRoQR`xJWW;p+TfPYfLn|!G5Q_HaRUQRgP6G9Z;up)uKs8$JcOUkwxXoalJMva*DHO z%$hO}M<`50+HH!a2M6>m={1OI%;Ix#MeKFf zH|x~kuK!hZ$;TjEghcAlE|O%UH5t}82EZh)`WFRsd4ue?-mPd#p|D*4-; zYx%-4DVi>#MF+c(~L05(S_I#E>}xltsn4Mf4Tqk{{f#n&1E&mT)W*kZ4IOlYFcsQuOWw`yN|d|bjwoYw_IXN|&feUq z_?feZbzBjuFB_wTbLUDN(3E)t&y-KyNfq)H0}2T*8@_F2`=sx<@YKr<6}pJyFNyIi z%+~rv6d2Ov_6bS9yI%a-nJf|CnGxMi9J%7-!zU`zZ&!`z$)bmngIKUZhxjgDzDiJo zk+X?R)>l!R95jvki-HP^LJvzdC$K%Ix6@He&m0#$CvG4I9);`wwERffS#!fNuiwcs z#5C#K2gKstRX?}gz9*=q-~-f>5-FL6ZMiEd*8}HFrj90FkoD(2+bOI-Ua)~d#jH$k zu(aL(2p9Uf5e#=0V9ID>sd3ABCEj*g)yuqXb;<~--S*y_nQ53S&-C-s{`Pd#Cg)0@ z{>LQmj@GIHY;Ru5r0Z&6u=h-Qk?=J%a6W-0NW-}&X(CPQGj;U%mX!a&6Br+jgkwMr znJmpN{M>oHrP?!4RY#~Og)Sp4XEK8M34~WPdx1^B+!%!p2qscFv=mcI=kIIzym001 z&l-pLoYw1IgYP7}UFUvZ*9%lpYr6f_mpZ|+FHbSc9l6yUZI;&5y9IOn|!DfHEqgIZ{y-G14j>ym2u10$dZJz6T1%MDutTH7N z+Kwi#FO7r^zs_>`G&p)jn*FNEVWs7lm%mmwUa3nNdi%30&Sz+~ONSDV3FS(}d=5IO zG$RzzzQ4VrOPz}e*?p3}vcM>kW`3(q^8Y5{5TLp7A)&ZUElGNLMOp97f7SkOp@;7T z>lpT&pwP5_8&d$fLNC&LIL=-yee$bF%3TWpt$d zc!6FEoCjBG#ar)ZGoApjOlz4ttSy}W5--`FFt2zdK@tIl`7hza3e_X zM`O{`GqSkF-wUJeEKPf0BYCiMA3H#whCf>aqzcBWzk&MT)nmg5M7ZmcA`%UBMY5aq z+^XsBOC+qc+lWQDt!?PNzeOuYTMlH$F=y0W#eq)E()EsfUX%tWNov8oxby4wvB}fgU1?By#sOcG&~_^w6*TZ42%10%f$+fYh=@3r}1(gmyPk$n;o3l;Vq*1 z+b_FJ=$Eabgb6Jdo|CV0s)3X-`q9f-iN>KWZ5O-z@gy!eyF91zL&3+Hj9j$dOTCV= zD4trAHp(em>Kz37qX$bqaex%dK)mYT7l*Hz*@*1SvfyC7ih3S3MyzbqCD}zt6 z>o2BWFyg8w;)C}=LBAZJjjg2zn7{}usEEp^Oio}i<6v0>tIh?aap%j#|}C!8N)vTt?tv_B>F?g$1b`LX1Th+-4_dAjyl zpbi@X5=8k<%I-1hxEEtIJ!_(5KP|-P-ti;|3cTyH?3*j^F$&jMd%LPp7O_F5&&^;7 zv;ohdK1fBlm9%mWy*BVyr2Y8*RyEc%)jXl~{#Z_2dQUCp^u#h^8#vf>1Km)&C5&n` z{Y9A=qD$%eZ_}UDDh6U( zP%iT{Ixee$O7yUZM>~({VX4BkGN_%6-?qpOwNjq2`J8RHaVXK51Li95S)j|5_1r}; z3w(PbAMo^g{?{g$CLU>8qVCcO&n7?Fk87>1t|x7Q zfkF+zI>>WO`IyL>aSfTsjeVxbnHH~^w7_;rz5=Au`i7QG0zB~esS`&i?j%s`Ugf8N zSJ^oPO;-kgnyv~|MrOBa+3cJ@&U{6*?~z=ee(F*oqh+1{8%w>Ult31{ejp1yt5&;K zu+`XCerqD?BTs(%a1k%7J-+_dL~~k??Vu zGB63S-Dh{>LM_X)^#?W4Cpmrrz#p?4&>gjHfhMb>hYo@RGX1GTFTmcs$M`{@U+~jv z-qQds5H(Nh^KS7h1|1wZ>E9Mwsx8iBwV*dOJ}L+|mlLTdeMeca*x4K>7i_DNKUOtU z{-dtyu);*B%e>XvM(`_(fx@SKyM(@jLJ&~zNa8{+eCQIZ{CK=@d~?(eJ4K6J8S>Wq zv8KuHQxYE~RdGTOH&40n<>=E{=xdj>mS1s!E z1bC&fvw}eDC;7epQQ@^F0F0HMlk=6X=szqDz`Ey5`Ma?EnvYV}#V*PnMFS!q#3H9Bo5V=)(^5SyaTdq_le~Ix|<|@a(McQt+JWdsqS|~w# zcs~)!%QryTtIGWnWKjkX1nRHV9G!CMg==0}dGIlG_ZhP|Z)R)TEX(cxIvXrdddB!U zo|?im?IN$#ydEj@K?n04A(S5{Dv(nlCwFdBbNNua6D;e}M zX)ljxzelj5>w$Xm)nlVDo}O(mP4+_=xb0H9Se~-)Gr$1kJ((}XvVK6lwY2F)G?mnm zh44vW%|+LryMEZgm%-KFU+*(zk{5wT1$dz8wo`@2$~!v_iznLq5?k>2*#|ttvS;?; z`<>spMHq_3vx`%os1a3m_yb-J{Wk8xOHZ}1%ncGqtR7ghR0r}ahT2Uhzy)KtmACG)S+onBHwM{y??{-B3c?4 zF?=|Ln9kC-=;wepH+YLz)55eZJkOR3R0mMD$HPyh?*g`Wm%MOeSf}4@{&9l0dV56k z)m-Y~nG&)8iUxOsUCOSVz2I9#xgk8>L|^_T5Y3#2DaFp?CxPzS5122bfFX^dacVj9 zDqs4KhxnEmxnzC?Dppjxjz|eCgyo7$N}+M%KmpSe^#=eMf2G`d2>@M1#hq6|nq5lJ zfvLM4!*ZX<^5rjZRcvt#s{Nn9ze}oYQcv6$pT4Z6s`C~e@rn!9<^_ITeWTCAf-qB& zH8NawC|HXHn3qCjty+JoQCIY#wx39FnMVm~Sy{BnOy_t>08H?k&~YfCEYsOydAA#C zHR-W+i8{AB&?wSHPklYu=My4ewedkLiqAz|a{h#whvw>3ghaEBSsy1lZ+d{WkNjJ^7gRcx398&TU~WY~EvLG57npRN1M`$*A<;gqZ;G>4k;v zQkJ_kfXetC$!o_e%~)c3rer317)F+<8WAlgjo4hJskzy(i28E@=<< z65L!888N{?h@eHUR&>3eNT_9R@IH>>)B>6yZo%fLtP??qUkw#%A=M{uE~h8(oqx_y zdS1%9RAx8c%AEWDd|nsphpv+9IqOFrqJ=OUWr^`wK?O=`K;Y(i$b1{*{`P#EBqGV**DSk6*O~c`ObG`aR(fm`ww33m3df}!x((|tYB6Hp z>>m*gwZP?Qclf6w$5im81JZ2OXf}$`(kERe%sJdJt#pmy>CD<7=0`>&rc1H2(j^vS zSW*tmE-2>)9D=f3m(yUDJLs(ALdbHR4_Oo}4`zAKVfzuVbxfFLP=xR<`Fj32R2+xY zI{aG0dUn)*9u=PrLkpfM_bj=yxN|ePuE#u`F{N}8287O@f)sqnbj)_9K}5kc54fF| z9zV>(zcp*~$<<-(26CTUX^FkuZEC+hAkj@}Vb*lMgZw-elrAwQ3LphePWiY(v+Boi zL}hCoH=}-pSOor{`3bb?qiYS~=L; zXe1tb4vhFmuf78tWQPkmw8#mqoG{qa7JSR;Xq4-kNzc!~SOe8Qc6S(3@)6SY!P6mL znwdzxlBCP1d(%KZy5#*TP)(bM<9<9BhRbis$s+K96g4n_m{Ap4D(^s1&17v13>gGG zt+S55!oO8X8|97ITB})mbEsj5T8w*Tx`RuT)@ZnGkkx(V!ITN6t&E~cS}m7{b)E^n z@2aG34KPvz9Lcx(Gr4jS*0#E6vz~|cT2D%R!+bp(dnxQ`dS-zCVvdf$udt{k1b&9E zj4{0=sCb@>)yuETaW(LO%y9-ag~go5N0^Z6-}qGdD4MGtkj>}MbzanB@X4Dx1JAQ- zTLr4h8lzH#!<`%Xi&`5TYgpzy^r^I>@#*bp08SGWK2P&7Ky1Fk`bJ{l{&utU(*Njx z3bQN&$%JQ#18d$L#=OtaRNvfPLS8j(hagNpzdiBmh_HcrwjyS=>e`%%Q3NfG_N-dX zCS7B{c`Q00ThZ+nljqgWM_jUlzOmN0xu09r^buEBG&gjyL*6WOk#0+GwoN{6T<_9h zZGCQ*!ex=s82WUpf?+;Kq@Q7FwYb*%*A{-7OF!A|>CZmL4~_`Sl8xJ91Zm2u(*0)qZH zU|cBM8$eJpr!ODe*wMVg3a=AFZ&P^b!CmQfT;@8g$=z}yvu!3ChdPUJA4=?}Gi6zi zRR@w6ZE&!{FzwnHs>*hCh8SY29YoByxDprgnF8Pz6khSo0J%f3PL2dDxO>-viTw=1X zXz2l(drb5!uA@W$=P@|M12!BpPp7hh_Uc?7n$z#{rfP>;wPIRsbsig!swq6rVxUY= zQBq}H^M!t@#rs?CXMxH2_`;bQ>*ue49OHvhXKHNd=$3_@a zE>*Vo#&@Kh>ztw8>KN90%d7kf_-$-K#scAaM$FF`5wt2fnS%n=PBnU$K|ykA-83Q7 zADZq|BH--PxmE&Do2TkjHxQ_6xTZ!_D!)v1YMMV3&Vm8mMuTJbHm7(vBw)32jDHiE>Ruj%LLncZ{X<3-+ut_^3kYTC-Fi39w z6;6y@Eri&$88N&`G&HT>K&>Em`;%Yr;$RWx)qs8PImScR_vLgq4p{%-jcE$f{Eyww z18vOQAP*6Ln}By5b$Dq89VWOM|D$&%jdl%~a~U1hcg3v71{@98nR#DT+)b8dig!Tl zfb%q2X;Y}_;Vl_0v)bYjIcsH@06qWR6&$fbl&0rEbG*$lKsUg6$g0zc)-WCBy!JVv zOz+5CRLIe28V9o#0JZLZnkSicnu;E&l5f9&zhQ)kzyd?T_|pg&hZk$mc?Sl~-`|0N z;1i@H!U~*)VK@zRvI7>Tx%foZZsq-+9uIT(0BQ|dZ(i-fi?XFlOLG4u4`^Cl@`lRe z-UQIe16$M}3`L%pR3b}GSqro3J}GO|Ra`Et>7^*Waq(!h;YGWqeq^;|jAouijT3II z>qr@WLby8iS_EO}gsnM^_fs9-^TWacWdl`1VQG-Hfb<#gfH2QBc^TGhuqiFcR*Y|!0Z87l3%7lZ zCui)eAr%5%i9ZYwm=y#$t%JQOumBdxGGGv&3Gb_g`>NJ@GepmqwGR||i|Dwgmgu~I zrdqSTtb6475jaS)IkfSJaGNOqOxO*-d9iaUiMS7u0}*USb9t zXh~uNuxLBN4lJaviSK<7S`7gMU%)tdU!LR(ctPRW*}Z&8lpgdjK&mk{VX#HU(hNEP z0jd2TRx}Vg1qwZc=IlRzJ;-naTwkW2iuc9;WFgEOm_h&!Gxf$;UZ5qk$oC2&+5kL? zG)IT^NhV-2d#Gb;{zXuL4PI-+Ud}>$y$F&&phW7A-yOis&+RBQyOCo5B@ijJd(mzW zz3?CE{0K)hMwnBbNG;IL2O^C)l+|I9?@tGfy{0b!MN+};vi*61-6`9kbdnUP5ae$v za_t&P`4hs7=A=A=PxxRWJVQ1pEJ*dw|sD@qJ_bLAX7Ad$67%|56 z&tC|#GFAcYH?>z@0Xj;z$9|r}Gzs#faaLv|^@2oMkOh(@ztu_b|6s2t7{csZ1{k0z z+kX}i1X&PvJr^2A`A0xig!SpLFO&B^N8m;?0fVt0E&7!MWp{h%oMZ#_NBkG%$Q2U$ zcQu2ME}`yfF_4mg0XRKW7`x}CL^_Ny1o>@6MDOesEc74umHw#T^ucd)dv^sb1Y@KH z-B*>k#xkh|5()8p$93p9$@lAk;`^A#>Pj+xe+1+A^u1A`Y*jz$ta`lwgO& zvu7mb_X3n(ZgW^K$t_5NESQ-(R86cy_E=CeFCn+}neV(qYJpJTur1z$AJ2kn3Wz&@ zM10i?T)!%xRf~iN^YAACxEIj3(ny3r*b%~6-s={0!6fuA+5_~U!VB`lt)wJC2Ur+z z7`KD@e_rDUaL)A`HL z!ydVpo1fP5p7Y({@MV#V=Mh#6Bhu9yLXc-?W~dA#PkvS1Ic?RWZ4Fl%Zd&&{OqG^r z1eAVGG?;~s39>a4Hfis>RjC3osrSQ=`qB`oderZx8v#1X-t&v4Q}LWTU8yVRcD-p9 z?eb3p@xMY{2JO4NTcpWj4B3dIX$O-)tw^mk`wx=HkI^&gD>J5?98qA*gtY%i{$s84gvWHRP}p*L-gD0!oz=sQjUQd+6_UMkTO>78egb19vu@o! z$$-segw}w`0+|!ut{-67x}Xx-IF?Kq8_qe4bi=&iOo|Ua|2*{GIx#IvB%E94F#8NC zPxMg_mNrG^APtwF{OD&kUi5~To?&J3=-iM-=;iY6 z*^&`t4f6&f5q?AGENE#VPh!kUw){zlD`)X%a$bHWuui-U8???4;pF#r>M$8(jvsHb zdCAl6&TZ@Eqz0R}^hH;7-Pzy12y{Xq1$hU0lu$Jx;#d4V2jjy{-)1a(mXtplD9X*J z{J$@h%A4_H$;LBhlkar=d{62E<`j%4y!{wN2PFr{vvZczw2)&nb|*7ih4VTMmqrFr zM0dtcK=B-AKtNDtw<|Xe&bVbx`biiU#^!z;yO>;1J6uJLVN@E4kC)(8JLR&xjz3|7|upQ2A=j=-I4VL}w zH|Qc>5j3{>9q3~jm)5KvKxC<&p6rY@>mHs&d(lsXr*)t%xa;q4FZOaGx)D+rTBPze zn{}F$`5PhByRiVwb$jY2^g=H}`^75Hivn_#-$Q>ql8isz7g?wiV(+zy#(lCJ{becZ z$tS%jh7_6I;@?g_8Q1h)@1}`cJ68G??d~FaJB>KIaPa6O&|WnHyY3Omx_hn}^VO~# za6PDubU=fATi-z9p=1YL`H%%AE%BVZkOw7JQ>ebQKin>AcMEgdb609r&vBxs)twK? zgxVM`Dr}lM_}9s3f~>7Z1!zeU1frXDj2qRtt(UsL?(F6cS)%7NF?vW=F$C}5ju_I1 z$Vk1E@q^sPlXC5GLE5tW;?J(3mas^QbjdXf{C%9$V!?Y*iBT}rkE=8ggWLU69`kK> z6yoq^aohRUf@aKV7|*A+8tCnmgnKMvEGTFp7jAsk9aGe_Eq)0-d+_M}JAd{!{pu@pKmxdARZI7yj4E}hy8c>O6L4Rv=(p35k7ocX zA^iAAdH*S1qB@z9x%#UMZX@46*09B~&%0fdKn}F&)ze*w%stwCoybZd5JgFS)2BfE z?$)vTyjdo_*c~;lY1^k0Ct*Az8N>|k9jHN}GD5_v4Y+MHy)&F|+dg5e`muD(S!v&G zfqu6t>9Qg|H;EYSzk6WA!&3M#{SPcyi^b2DsbE#I^vOo-G}6QgWz{8VHFae?uZHIu0~;&E5n4#40Vs z*dxc`@j33zFemoSd=2UouvC>=H^@Z%3HmuQAf6UNFixf9A4`lpr&Q;QZl)HoZ~4(n zUea=}KQI(4A{F)TdZbVdQ<@Ohm?%%40GxKimisoEuZ(-SUTGO;ExW({&2W|=q^2@} zEC9TDPASjN2SR1rxtqT!A1%@;z8g|#louNCMm)(x;Y@G@Q=1?Bp~Z`f?MJSoj9cQfJNK`y}$#>Vf-z`ptn{zWEfIn)Q$69 zO}0qRU1~@eq|S@N1@O-x1%W%d4`f086j<~v(BEoSVAvpHWGGg)9|!RNF7~@L&44ML zMGtM1mIyqk?k1)c&tVThh4Ps`IRQc2C+KAHAuU&O469cag=H|(%sFdtJK$LGoRiDC zMK=(qO<|pXV*qf65uVk1?lAK3I;(C_^t9~546pj~D&_98o8PFMH32^qN}c%^`}DX0 zx*?ig9tC2O`HoioS;V1ohCaTJ@Q&H-mueC$k*>d2pRitt7M6QTeE7#r0-kWku?X^E zV$Xlf2V2s_PY>pdbGfd>Pn4BfIHAUO=I%f#1uWVa%!_YfeV2(FxJcG^U)pY|_gka3 zSl7j+sXn-;8C4N`pV~g5SboUau_*zp!e6CCCRWY?35W9HiJ53|c>dx|?Uu2)Jeccp zrGrYq6cFMve+ z;%}>>X{J|z3S470k3*^@~wL+{kn${Uws98PAmTtB#1Z@o3qkv!I519LLktmdA|fB-m63^}eYae=aafgQx!f zC{l7pad7*YZ_^>=Jz#gzu_r=s!1c)Y2tf|PLzrS^zKhGg$@N*18ah&_Lq5unAs2&a zxOD4~lG(8GYhr$W+au)?a08IpK{a0}1G;rESK2OuP69rc9He8@zq2@{39qQ_q%8LPAKWE)$66)eqTBzl6Y{xH>LpbzA?tT1^Da$dX#J2zGp^;^zPq!;jX zMXf6pZ`H~+)KWwXU$7?P!O;iE9?JtdV6S`P5#({NhQP4%1Nx<0xfaUlyw?`ix<{K; z44P-#a=z(kQY5ITwwWgH5it+Qgx(KnHbf^hcPeDdCA*47HeJVNz#X_xAb%#jT7w%n z+k4*U!kO#2yd<7~>QFWj6axg8+m4k`yrksL7({VZmDM$AVkONP&}V{~ ztYm$+Z66KfsT_d*>0mrTqm+$`B|IVZ%pe0U*YaU*QWr4{wb5;GWy(d*2`qg~PYMFA zTc{m@s@>->MgmhW6b}ob3yeEqD^U*){ z6k21IPgLU>a_*e@FPXJ#a-GK)y6yNXl=F)FO`Z0)tG+n{)h8#6U&2&R17JM@ruGS< z_BUByM3#NM*skyAwP)KRBO^WmsJH1-jHl7QJHngCCj;GY1G*)?9exD2@~C*dnUNV4 zzz9X~qqc4yDdWIZbK<*2(MO<7m|WuDxxZr&oGtQtUb3*O-Rph1Z1sOz12ppTr!@fH z_j106`~c8M1%!szZpz8GkfAbY>2}!&zx5ErHhm4|lUjF{a9XZg95ag5EJia?G@YtT z!Ir2S_Y`Or>(tdehzfWUQ;LcU`cg}z>S{iyca*p-@<*0Iu5mn#xB*+mO#8c=yZ5iFty1b-~86}=K{q~*cVL$icuGYAuiNjQp zy!H~V5tAKxMjg+1wC@kKow<=i48gVtP&`FzE?=>bc9G~Y$VS@8#%}z%a*(WWcF1~>?=?W!ryVQ zv(9zcWN2(6b;HSF8`t7E_szkSH;Npn$w3_IeMy_T102s zppMI|>mtsqyiX7hePb)rlVp+{|%*piO{=_3`@t7%BV51%wv+$ZF^hO0aRZ_?6z zc|i58XHZ0uJho(ep=hKnuhdwX*%Mldv_@7QEOfxoh({E-9S?Oa42^1OId*@4d+4{b z5T17;@jAFLL&jMsK-up1a67P75lL&_8y~6gZQrzFo`yuwz~mOfv~HDM>I??8^dM~d z@?RiUl{+iE5@fdc9c#caM{a)yRvw1qj<6F`$*wT^oU}#I)JuZak zfjOE8f%(SV9xReY)u>=ZN4$ch<6_%l*@2$)=bJB8XkH6p)lu?@f)Q*$m6>8mTf7)K zB(kHV@*>^0N9*|=VYAQHybvgdw)J@7E{@dZ5?O3(c_=nn(&0wiE4soip2gKfYaz7a zb)tz@5!|`J;E@t2?mRh{fgaPmr8zFZS~6YWxaqH!?cle#ZR&k{k3y=<5__A7E;1) zrnL8wtlr=K5Nyabw#}f~A=6#WDM)zqez}3D5lae)^C9w(Iqa zVTX7;Ef0mX^n%At1G`b{b^qY6~8wQ zcOZkiqTEC*b_Xs91*hle21E`RpzCszavV|p=rp7-&S@g~AMY?iuuf6Tt4Qc|S7<}* zaP$U6%AJkdsJ109vc$HB=xj)IZ3>_OOhmjQAyrQl@sOv63yo`E}fztVGr9)ly4Ao@ivPEVprJocU8xq|4qF0qk3GBqyMzaQfg;t z9leYRj<6vDS2v0tB7dL-A;=+5OyW2mmwtp#Nu zHdghE2*5&{~u7&MtmoWnw0$KGQrs&}`FTBX~ z+S8ZXpE(foo`b+GVt>+w+eq;hsC)k|=7(Fb<=Fuj+olEowl!55Y~g>mT?Gt#h0EF? zB|I2k2qtpMu9Fhbgr`y$Su!S6)75r0H&x1cZjriTYZrI%qgzAidU$4yUh?-T9m|ph zy?)zX_`g2j6d0#72PsPS_<%ZzmtQmk>Mi@GDJRw&x!+&1?8}N%R4Z9+J@PTNb*14I z9d7V-jgA9xt|Ho+;IjK2ZZ<*8ojVy~*7De(&Op^txnl@}JQ>Vv?+`Y9KW#z5D2-OI z;hNR6U`>hLuDYl0M9zM6r zhfT-sDaScMDpEh$LFUPpF>{Tv^2g_`yko;@(o!tDZopBdj&Iz>KR(!0R(YprITdz{ zNV6i1kZZVKs&yQHZ6lu$!RE!k7(tq;z{B>u_nWuk2t<+{J6OvNP^KOxW?BV5^ppA8 z$J4L*0d=RA^KWLn(%I*fYUaHL{DHJ#xWT4y2dlWVRyekfvF{(34ER@k+O#-GDGBu^ zuD*J{&s64ktODetrf-8@sF404B_y+UKUjc{LCNT|V8Utrzx<8l7Pb#D;XrBm=D#;ph-o8kqs z(Gsh$zST-^c%^*8uF!4d%2Rnemz7x-v)Np&qOt^&jt(1}Kh(8gH2ixcQp8OM2AJQV-X@ZQn;ji!<=o?3Qi?+IA`sS5E*5#h>zyai*l15iy@U7DKIagO7IzKKMMVmHu=}*lb5{Hd*^fG*rweh9oeNEciQNd$GStBub!hd zo;#b}D6Pz$RaQJlo=3y*QB_jXJpj1$CcTs=d*#xmX93si?NqG@rGvwpNBp0U-+Ose@=;rlRQ0W_}NIw+@&?N zB~~rKWN-qD+qtXdUUoo0uk5Pj+ryB?9c_OPR=ajr6GNdpMA`W7!uHvH@RTGj;ZIm% zG1Cef4uPqi2r+5&C^#{#(UNT0WYRf+_di|ZWeEndu7VA{zkd?5s#Ok95ncV$RUsY} z76Wn!c0aTaqT~ut@B*d0P@A5$AfyK}6!rXlE13f|W_TDO!C4x;KH*|LfnoJq90`lx z%$9_lGb|c74uy}isk@n-H=xIZ&zykGeJD?BgQUZ8>5nj`&oG`BAvS}*{s4SewV{k^ z!1ZiJk&aF53UhizteT`qN94rc7FL8J7)u&O)-)i`H+c#$CT;x?yAK5G0&3AeC2xeO z%sBL@hu6u;&QwkA%+9jRC40G>SeAhV^-|Qfxvj+2S=PnzY>a%SQNF<6ii(gk(P3Wd zK)hj40D8qkUXQ#XC1`s@NRz~TS)jSN$#$}x!)ty(I2J<{^%s9$Gnk#UUR*faR8`7? zv%DX|f3D4?2dz)x z3rzcD7VK74Ot8AqTS_fk%(D(t=k=Os`zPLp^iRwi)cXj7;v7hP#K;7E@Spq>zH@&5DoLIPgM>!o~Q58;s8J`wVPkVVxzcxDM15+r5(3*)^A_@=>k2~xjL=wZSM zYU@?z+tiTzf;0lZBHIDbzY5YF&m%6o=R-7#5UVlPonoXY(4Yllp`F=(q;@blN|3=4 z?e%3y21KIs+sgCTQ$hIxpnn?l2gTP zsvrR-;{K74`R$?o`Hx3nA#2BZJVDSCxET1{Gp`h8f6J} z#jneWP)Bnw1+WA{DXn~*^emgwEpQ8*zjz^&*sBA;VDq1zg>cnYG%X1N3$s8LYH#qg zI*?$p_D#@1@h+83k{)aZRnG%{N*jVV_cBWN3#dMW#96kzauzEh81Ife>5~l_U@*y1 zF+H?*SC9cG+JKfb1KiscK+3pD#Jhj~Ej|?_pv|61lEhf&K96r~pzS`Q;||y>7w9dz zZ$SW2?3Q;;11Z}|JOVn}8NI7H#2NrnAH09^C&Y>O50rwTx9X&-gOnz~+^us1MtZWH8E{Yd#=?lf z5a}Pb(gX=em=kpt`-e2ba>@jR?;F253)J}iXMbnF0BHA?>SGuL^z7xc5;bUno#n=) zk2Ww9z@-bZ(Y&N6h#(Xb3IhG4N88VWTS#UrBIdM^fdG`k3mZz8gXz{y%d{Wz9) z2TekQZBG(1gW z{dRUQ+o6S`8wvU1vPY)`3H3ZE#tY`{)zxJr&qjpgf&?h^S+CfVdO`dP&;XS6G9-wt zU`YS)2FwAdD9~4(KEr%Tgj9HsW+7BY-YulZnZ?e7X|?R_>)NC!5c32nh|td$BAMcF zf?H6%ZV#a&d*lz4tR&FbJ;u}=h-V=}=(*T3clK@IxUznGVtS~PgPhT`E^`tBKYAk3 z$m42^m|oed^_7nSV(s;h4>tux@$F0}^NHKC29lB@8-FT3&fyms@fExk5Lu!>+nqkr zXIaWKlfKWC-unQljZZ5@8#P6?ky1b)jX8N|sO{v1(%12eBZ5vkn&WbU zPSIg|HvU&%*e>ANW=0+zZ=axUQrbTex4AOUDEW-*$bDX8v4Um#)d9pF@A20ro0ERL zI(AxrI}cseZ4IxUd=RU_gN`KIF$>{CG`V&_@XJ4o!h*<@O;O+F_qQX-h0t8d;fB}p z=b=I2zpwsF-5$I9BZbNPK;;%;^8Guh)SjfA0NRy~;IR|_qoM!%_x{hd|NoT;TKNBS zkD!GA-`5BPdGs11;Qa6g?7Zd++-R25yQGYLy+2ZrxiIac4$UpA!uWzzdk2adG!vfCL zA8_b5$z$Ab8w*tE?2dh=yZaA(ao?TNFMG8p09-1F#DrYTAh3r(e{s7ROIixm|qg7gvv5$U~$qHI7#h)RJfWc(;2?yw#sS7IZks z^UFPy02H*hR%P9uDcNAEf8VkQej*mZBupWb_nwWnbIGpkpGPb5`{r}!zJKB z_w7GQuY%u^4k5%iXclmM@Y4LX zI{_ceXQEc=PhGzMaviOggD7P2Suro@C=9)x7|2!*sov|R)@a7OZG^o}$SL)}>!!uJ zS;=gC9(&#Da-{gO-PT(ZlN3#FEnb&Nd&TBcFnO*2|F8To;4n;X<%`=4yXvebr|;m- zIne7_C=(h0+|wW3{c8AnYDj188My-G;+mQo&tUW~Y?0 z`;ed91q~yRv}ix;`E#%4IwEy;b$h1$m&b~?k@2!7Q!h^xpj`A@0L)&|2F0>{l)-dP zA5By#{Ko4!ru(f5AGjL87_$r78?>AVWYLjXx+jg|_O+m??AA}s`<*ubhhlloC2-DH zYC2f<=P*s;%|uXDM-VYyk8f*bkBkjI4sN+T!-Mxc8T8L-a_&i(Cxc(t?f*Fn_O1E= zq`EE?f4@}rgU`NTDN@-bu0D-(E;X(i1I4O7Tz!0fzdVIxPM(N&+8Ym)e|;%Z@AB>7 zGYqPGgs*O(DrnGp)?&xi#zY;9C{vRpoVU1~8Dm@FVv?}qJIPsQly%&UTEysR! z#Dj`z4D+fsX5%!%O+tu$2~ZMuf6HS5jo&LmHf81Q@qmziv}TlPM7>~R;X+LaC+t;yi6F5^s-fo1fAoe~@(}c@Kk4TUkFQxtG2*V3EZXP5*E4@sH~%!x6_73l<#YLy)yw zGV$J%IQ;%D?v@8+Cf0HZTCldCHs;qL2HHH{0@e3d0~-T<&Oo61?(n^nO8jwEXgW+> z8IVIZg8pct8Ykhtm%-!aaO2#H*|rkAJifsZ*=&|TE5O%13A&i657hgab&jR2jYL~- zNE&Z5XuXLtid0HV~TDrZGHrK-0J;7pULS+ zqc@-O~W=0qeI$O1^2*F(4#S=kxuR$I}J}P4PK0H*jO}{1ubYh2ClVHZ=pcw|B?9k$%mJGTWQ2 ztE4@mXrnT>-2Bz0DqPb$P?@h$o=|L;xh3r1|9%lvZy(I=RPxG`YosfF>BHiD5(HRZ%+K^ATV-`2?bT$yO zdM4r%xq+xr5}-$<#L*kcPK1q3k(DYHe;G|J-|G&-3})r=W@$)lwNc3jH^QV!2X;57 zKflAP@L!A>Eo$gfCvjfQDSZ{^x^22tF+$zEA}>!|C~^~-Ev|2gVMO8z8^!Qn71?KC zxN4k0^QwRx(csOthuz`c7vF0E+~?iIB+w%nT-5X35+DR2_HT0YbhFKgr_Fc`xcB!E zew(efK8_ZGQ-;-Vhsjm+ zS{TJwPcnPHw*7Q^Ep` zZdEM0CsDDhyaY8Tadt4Jnk9zn$AJ>b-kK5}#8`v=@@%!Gb2(jAt+~>g@geQ5<+{&@ zU#*E-=8d>=<&qfi`Fbo*+%WF#?YmN?JS`oe#V0o{UYj$Qp!G;@@-6u!M4+}ORq&2a z0zZ5qbGW^VfEZMBe|vocl(N+H4j*l|4EYI1P*ethqX}qcvznh^O^^S<`DCs;lHPk| zoegSO)YOo*s5wL1!0ZQ(eJ5-<$cfO`-Q!V7?cKX~!-I>abY5j>R{N+G-F>Qwl&ePA z2(9(|S;xABaZu1<3f8T({Y4L(Hmxd0C@W}suM{4BTGIGtZUV-nCF&`+bb=O->_q?h zZO2gR#Qmk%)zz%`jNGZa$!`sJn%oC3Gbt#OHsm?-Se4R?NCT6HX&z2PP!ENpM|-To zJ9}nqD(vP#aa72a2x63+K71^1^D_<^rT<{%qf*`#BjPRw)^fL%)lde$9pZ_&o`XF# zNx3A=wddX}D$c0f-JTv}lkWnzDRJduV0KtIx~eJp>G=jdFKKoZ_#z9E5iz*}@iCt9?i@M1u^rSy zXowFh*wXf3Vq&5F7N{sW$s}zRNB06PbvV3MAIs_iIz)eTO^_38WpRvf-^bP3&S*Uy zDfvPp|0n$S0q`HwS-X+%m&JbgulJRnw?ERrW`>9JB&L2DecO1fJx?uauzIZ!(Oke8 ztIn9qO}np|`|U$RsQG%WQv!XD)b21fLhcY~IO*EL?(O=tN`hPd*8Xr2a%w2rCPM2s z7LT^+cP}FS`Ml-`(OLXRcK)>y|5a-)1Eq;vlK>V*r0gnkFQYl~4$s+m&GJ}?q&=ZT z3&~zauOK`54YM)6(IP~J!`L~bd$uwc70gR{BO$Nj1gp=D?( z1YWWHbWm>B%55QkeP)~B&!j7&?pErgG*KJe7gz4_%wp5pRbGvzIwhQiPV!L#6mNm+ zMr!HH9-z8OYYorM5zDg4xzPFC+Q)kyW8|;V@@L~MQDUct!n)Gt{rUWPK%+;P@yNZP zhq-IAGT+^j^msZ2qQxfUrd}sFT%G;ldQgf!sCSC>lmd8fx|;ur9^=xx_E^_0-0JhL zHLh(X{@PS4-i_^E8$77LJ{#oPlnT`0=m`Bo70mn5U1hPM3oavG-E$vPOrqF~PIoEe z9_d{w`J``3vJQGQhG?kD@cRtOReYJ+W-@$F7s?o~al|wgQ-U^>-#oqRM-}N%|5d;w zOPMp6=uOFb#xl1=1@B?5AVn~&SelW(Ap0rb(0<^O{W?SBG#yHZreEfY@SneL)Z%8? zdPe@7UFBm_!rC#b@~$(@W&=OR;J@h|oP4sZUjJUv5wTcvJo4^%JHe(s>RbWHU*jmM&>c7*>aS{TmQw_F{0Q~ z#e7J^__s#e7HnFhZb)Igm}l-t;r7Ys+x&8UE4=T3tcxP#P@0I!6c&I&ty6w zN2^-X{L2!IX^)FhXIZ-0Qs1iDXbCm&DX7(aRxyZ=P2al83vWPW1Sjj z_k8KvtL63_AzwW^9zN~U_jwGh*FrqmI}TwkdrQgXT#hEQ4IgUiy@wNs;GvyMsarDI zU4Ue(Z%uib_X~Ro)hxfMmEV6B>oolJU5jOob01INkVELYNl*HNZHxs@-Q1U-L4{}0 zdwV*W#89e?5K?uM>NsQR_0MG@3X(R%<+@)VgZ3T< zgjZ(l9^?CNpuWCAFy%U5QBTMADzNi+1;Wt38#?vf!)J)Q2Bp!ZI=*SJyg z=9=jp_l^%=?TIy;Q#WR93AozM_-TrtGI|PV25>aA(j;&8r)W+F{{f5#o#aHzMQFCC zBOxt)G1mPH!h`0{UBz>luwnIltH|NZBy-XB(Hc)}uZMsiqR-j{V=0eyM5-ns*3r@Q zPqIBihnp=(dz|ue#D^q^NAc~uJtr070A9c}Joe$&;-gU07_)(6Mklw6qs{bsi&Ao` zZBIFP#V|K0YaLX`E@&rgj>gfo8@Vqg=a2Sk8!--9m6^ah((~Z2LBo!7{MB29A3T@b zS7y1jOS3olHFK})LG8lhijYzq>@?uES;IyfCwptxkg^^_shu~_A?*x;)Yk^D*EAs8 zuRhTyOPAl8FviCeb@2~pS~(*c+KNwF>%A+@4?nCnWa&62>w1e1j9E6Wp-|(|4k0Ys zs&v9!>z{p67DrM_D}m-h20R4aGYZOCtNb+!4$X)__G+JM(D=3)V<8RDiB^O)mv2Hl zi`di$9rtieo*->mep)~DNd-ff3ij3l&Ukg2{D@JR9H$)+GicV-Hekf!qJKUHl4wZ{ z;;D;xwC5k=sdWz-(izWWi~4#1n{_im&TD0nKU+&TIhcKPEIl5da-{_O1Lqqr0tLz+Q4(9A6IvS zsuu${__DDuwx8-|(BCHHyVDcu1H-`N3hn0ikl>5b#*T}c+D>yM7JARcbDH$~`KzW> z?H7BsXXsR=7T>>TB>iw zV$vGISJlZ?E6LN%U2Z+lT_ZMR7Y7a3BTLVCL?y);X)&==+`R@+l<965&(wyNVhot(RB|Y+VQ=Y^Smear%KFBVdo3sCRLXn|LzG56w zpg`%$t$qIf`GO%ExFAr{ueO#O>5LEbe&9M4jEgog;;>+m0cpu{c91~jRh#Pl=mUeJ zNS2O0oRala#YXeX!zIv`=M8cfq$A=}{9u5B*^=zyl+ z(1K&K2EGXcYhK~W{xGIWl%Du-e!EwWnlrOTDP>W4T4TJV#Jc~>>MAmO+Honx>+YuP zG-$3}pt#Sd#9QJN3muGVR@4l5O3egf&Z4rl9Aa_qhk6=AWTdqA2bvh%LmN-0#nJnC zeXG}*NaJaKYoc-|ZAG6-LvCdm;*bzqE3Mw|u~1%spXJHpIh+IpK`sI z*S62vxS{#8x8 z98W?iz0MGTS(r~SIbJQpWohbW$yvxl8Gc$IP|@YeHsOu$IBwG;QsYiPWex)tL`5!6 ze_H5JC%3kOGEcl5ezWyLj+<`3`m*CDWI>0qa^V?CNpUUq)bQDy-PH!nYkAaMwD_ot ztNkHVwah5a<9wR2+z|2}I?}a&4E+f+tUISk)LO`Rk&_wUC<7#kUF#S1Rt0JVJ zBF*lqrhnrH1BrZ{#C8=dcQ)T4A#Ht=PKj?{I9|J6p-p6S`#e#wgG6&1S(=yhjqPnL zY}LaUc_IdUaI%;Z;$9V~NB1J{Y<7)@T$uZ{C5?4}oFE0zon%7Dkj(>`rDr7T4M74y z6|?baZnYx(=Fod(hOR_StIF+&W{teP`DqC^;@xW%P7|e7>@V45J?Ih1aStF~44|d* zwf$k-HYbQve`k8Yt2<*{HeVHvvIEbprs)q z$W)+aWsq6Mt#1+^m6mYQ$9;K!(tdEe(f!6M&s|0pMlA*9))d@rsnJXRp^jOT5jK$q z-pU+Hr{=N_*?a^Ho!Jn+)K&8l73jBle{??HG zC#(uy+bK*K5Xa;bDH@Si>V)sDAo@LuXc0@{FXpukT^)7v9f7lqATEXFR>p8IZqCb2 zuA}!gvgJ|tg^&F~K=aC*Q? z>UlOl1I|j1kt5!AcHOKV-iK`cO zkcD#S7m5C%ME>>Ri{!sty^O#cpvUe~N6zgJ*W)d7Yit%g>*`I<7Z$gh>n(~{V^Oo% zQ{O<~@4m(dtA+l8if}rnHyj~Saxde@e-g;ii62P#n8KIw7R>S1ZaW4+V%fUgl_ zSx$#~=oOUDR~FMo+qV=Ve@k=R9C6f+_3ArqE-rOO!O+{E0rL%`(muvR5b&Q=|F-g~ zl^v0wUh|Ao#5YivV~_!#pI zB+;bc{$R!iKf!4_VY+3v-+23pGsd|>@Qj%K8;{K;)*+i(4Xc9nru=b3*Wn0=xsc`^ zp8V8s9cq1#&5(94J=q#F^WLGFP3bG%rGs<+i0uRr!KJ@*zrLv<;>Gh_T#cf)KU(Cp zXVcf#W{^_?u@|DG=s&d9GZImMNrBp#0OK)KRZU}GOzQ_hlh?C@MH&E(t)p~U9jB7E zNj!w)i651WQ=`oIM|Wy%;U_$C+lW2;X{vN$TaKF4RKDNJ^DcjQmEA!A-vKjPt5fD+ zI#xVs{rJ5s12>^YiN~J}WKF;!$}wX?T1MAZQ}U@6?qV!Qb}8Wq@kfv6fVqjL-&-n zWHlGli`a7Y$4NF+YT*GX-P`nu!^W8Pm5Cmw;l4$2Gs1cAyVM6>Ht0y`w1) zm&(i%&IWnoc2hK0Fmg7-{e=pTx!*374M0Cla=$+?k-E?F)lv%+{9_`)A=0bMNbOVx zVZ`h`l<;M39}DoOm?2BlR}u>k7wo-eZ29u8*JQzsNo-pD{j|Yq&}x}l{LTcTd)aJa z@A>W$1JQdueDAU8xI`RpafU<5s1si>=Jx!)%;HQLgSMVz_gHQ>AHnOczA2%bK|Sk` z8p@*62Xu=rti2>YVrNKm*28-rZqCqCOl1p#@jEf21?`*JBJ?pfUk{|^B9<3nZYLHwP?wdSXjLibNSw@9jQC$XS<9)39P-r3j}Fl-SL zTeOxk)TF6S$4zY$Zlt;kBFhLgAECp=G^#H|qhB=Vy3T25-UVRKjE*&O$`)N834xhd$K31Tj6SIaAa*H>Su7jnHd5u$J|?;G+f&j8 zoX{g+5h6s-`yTb-8&Kqk=LT zHCN4*Z`j{G)=0jwt%x7?-YsD8A*%{G9MftI5@k}uBFS@!$*kH zvXxt_BU~WlU^7vj?;m66V50}YWW9=N0?I4eN3}5AoybXV$yo)1b zNnneGO?I$p0`y4Y-a!Bz^w;M1K3Tb7y!5Da;3kzQblu`N+fR&9v1;ZS_@a>%BJD;GXl*p0HshF7RM(#3C?FVWx8_sQvP&;3U% z>F93vZp!g_b57O-$a=n7xx=kKC%6zxw46!2mRinmH+L47mG)`4a?*IMjGNDa$qCnN z53wy@&)rK~K(!t8d}n$zRJPzDb%k%Wz1AqiZ*#0lH|ijUGm%p^a=vuH}$`dxfQxOk}RbVSQ~GRh%AkXDfIUw|BRw1GjqxQQT4f z)?eNT&-itR*N+DU8XfoAQQmpzl>{wGK+5ml-)H5fP4r%AYX!#v@|s~jh1ZM5hzS1E zj39QST%?~m7U_3P_5x@)9$qMg#eK0Z8;+%WEl{#1)a=3M&R;bQ!z4Bagb}*ZsJJNN z2i?2zinH2Ue8W@Yje$*>Sve0R-hlXC^#E1=tdRsSXxZC67P;nX8JHt3(0A8oK13Q@ zY_L1SMLm+{;v=?KbSEoy)B&~Lyy73%7F>_zOgQf2xh1e|Y~DM?0h5%A0N(RcoZZgQ z>LfLC+6Rrg*Yo4{{11@*JeLPt>2_{4LjIJg0XJ;?*S(9kPQGhMK+1tciA8*gn2i6C zSl5=A9|iAAb*l1&+qCU&Fp-oZgMYowD|jD8fv&cXypM5wtp2px^{PCHo;2_ zzB7jaVd)mvg67CKsj2v{@dbCYg8AW1>f5t5VbtrX=hj5UN403UFBj3Gbvay#snLVN zm#9k6sT|gKpU(p8e`hJq{Ixb@TidF%xv${s<(Lv|gsSZJro{d?j!z;Wr#n~M|9Bqy zR^-rQvjmeaH&G_z{coS>^lMzH4EbZJx4PbCt@aW3&w`fM9$C)Z)XAQE=QcMVwVXD4 zJsaOkhY|d=XmQR10nTEKAV$4d}Gyy$|y8_Fl8*v^%adtlibQM4m zp(BYZC$z+WN`oE$Mnk4cp(cGpzQlfx%BmcnT7VJoKHbXV{;UWYO0+8JJvbG|E%q%) z0RD2ZbU4{gRZqLHQjbY)_lE0fX1ZnJMy&hrtF6B|)JGOWbCRt-U+&C@#!-j(WmPy7=iyk-bX z(zY=9crPucFmBvRdsg2v=PZ9&82V=wj3^t~kAADxcK1DN-p-Ug$LJtD=h%B4Op>6qjMvo0K}+bCByCa76yTdKRfy;QY-j_!Vys+8*zFH_%0 zkvoj7eiLmZ`yk%k;&!8K_4EI=mAd#gleFg(Oi~LR5MH<}nIr8h<@T*j$Z6~4qt1RM zfe6VWh?P!lbA*2TlztjTLPGA@i#546i6X0MipY3}d0QOKC^C9G+d$OYU|DW8p|DV> zqShwSxu(?G6hEo@q$j_g7VSp_aA!7l!BHN|-jkiwUzFY&xly68n_vDYypQ5A- zpWslW#I?SWxGQH=e5Fdptp++wG2n>vda^iJ;;?oiP^6-T@_%ObpI`FaTQRbX(+V{^ zl`${syGukZP5bF!p;b7aE1xY!NPD<^wnD8c&Bb*KpA@`dkxJK%7axYkSoLO=@j~DR zhuP)Nm3xn>dUFSE$X>Uwv`m4F`NfPluB2;B1^NY0jL}J~sAc5l+LQ(e{4HVpL5(-f^4nga(c|3}i+34vQU#%oh-&FCrIap>Zfa{FmoQ-w1C}L?< zF4uDz@}kSgJbQsvNBn;DHfq^01fndPNhFb6U08ilViqT-$Jmj;UOg-SYu)h2*ZCKV zo(rbvpi9)!(rTAQvAr*y&*5bE+DeKQ^Kg5skxSZxg>}5`khQJZ4luCWALtY!G^f=p>fU zZ4cb4rRg#y5#{p{{YL&jsLDTkpXUY?_vsRs;Il?6SrAnk56|&^c|_QdjrBJa2*QF( zKxZswl=mn+tl;i;pDCSdBM|IIo^kr-4W9Z4Z=qN&5u_cp{PRZ|QkiCQC5q)aoe1Yvd z`38w9W}-b8f4mOuR$gc+1ITQ-3uoxS=s=#TmK$L7Uz2Yuk*F$Rwl{_=Qmn_fY>LR! z<)vn!;8^QEDLgv*jIO@;RlxeG$}!_5iyrfb@&+uanf9fbC2CCOWV=(8wErd2S9DNY z^5Xa5%B6E7`dZmK=lw#YrOKW556MT~IJU?Gj9zyhu0A-D5*HZEe913>hI*7P{j6Tj zzA9fQW9jlOcXxN{u@y}NiqZj(`A72=xaw2r6ARSOKOY&ld~#*r3MtuEG|7{Ymv2bm zw*%e>pFHQAcT{2K<#+OJGPg4v2BjgffE*G=YjV*yPq4vs{`1js%c(m9S8tPT{fc~; z%uVl1Pl+WuqVzeYs`FT)k^S_=0r$v}=n2HJ#9YsbRis?#V?S8rU9@V1b=ixMjZ#MA*@gGw<;Qvrejd(!bTseb*3npLD_zYrw9UEb!!1QFAMm*KqMpLSXF*`kX! z!1(c#4S>49a+0}0+`Ub=zz$tN!0o{XN~9N>8GsGsFI+fF2{zD=+q30gs9(S-u@vL&JEuBsPEB+^6WvXP*e z+gR3;ws2l8R=j@KEI#%;S|z&QPIab}Kqr45n3GVX9N}RAon%s*sNA;;b>ynrY!6XV zq)_WPI&EhldWP<5%JV>Uo#hOA!3>?JuZiZIszdi}+i6mg%JjY?*QCEq0-?qP5bmu? zjMd3(+3jTZ_B5qPv0>NN9?xW@$Xs=OeQ-97od=1GKD}T+aG(_9JrB*>RGAjatko3< zc)ERpk0HmbsDm|YInWCmlIC#tbrN*?Z@F$bsC$e%d_jM zNPB1&guCRlch=ea>uLaw8q|6y0^Oe{c19osD$*w$XF6SL(TehjmF< zWVkrgXk(g5$mt7ST|)ykCHH@1cQV)&yw=`PPDh;0vu)Y#Kcx~j!O<#mTBegGTn_*3 zlf(Sf%)Ibft6GeT1Gb+hnGmpeb_20@VNTkScu9V@&0Sqnvv*n6*6HN<^E6?mOp7QI zcxi{ft@Ft`?YIgxO$6W#Q+{YxC<%P7!#`8+vTq)r4i676T+OXQfPgtSu0}d9 z(Nibc4{*g_^Xfey;lF3}GK%FMJ|7i2MG{CO-nc}cY-=t;J}r>LH*dnNU=XN#XG8+3 z5%9TPwa#J$3A}O|iJbrnQo<aC7&$^WXoTWKZ4N@`qdpzk*8<8dVibhd>wh- z@q5`{uaiJZE?#5x2lKvurQkL-Sn2l^&h+5V)JaWe_<%}LI@$B}45^HD@G*2TkXGwg zYO0oiZA7IEP(bmNT)Hls1D3k)o1OAmFgW7KdNKI3{-;J!YcN>#}oY=O@uB#%*Jwd^T$UEDJ$4wKc~vzK!k zLg*Ydp53w#6!o!e(e^1S>z9P<-S4c8#`n~AoOSIWu`oGPttv~(=9U4I(4?KT%$&UM zCA<~A^HQVt96Xh~t4G`~@82YAVYlePXBq>{@Km9`9x?yCj9{_w;0a4^6J=|St-m4} zYPdyL-nk*#L}GEpfVWh|mok2xf{@7$A+!&9lx z=ePu=X@#y9*+AEYU9$`^S-n$x*=Heyju8v=mW8OK{CtbHykps*C|pmu3t5TpMGogU z2dDL&3@SA`taiX%?MIjEG}w+$i=Ow*Q?^^aCP@m_{Od<9QW?!NG^QnYz-BZxMBY0( zp4I*6O4cp1t-%Y7@Q3$EAmN8^=<)%MWh_`uQ77BV6k%?+cn9>uOMijz6LlJ<&o3Gr zBTomsB~b=Awc>X|1lWCKWp>RDNCAt0R@EinJezGt4+(%CLYT5ffYKHpmMj5}dMx5g z4nVR->}Go=0e{*Lmm_F_cs9mq1OaJnw#V>`1IS71s=^mS0%vRuu6lpTH?3`@&*cnx z`ryf=cpWh1AO?XL#rq@>j>GLP1>kjc@P+P8;FpaPt(bU#{QaH`1WYHt(EL6iU(UBW z3%EJ>5Km0YO%jL+8KSfRF!?XY?|^XMv5y{lL;BG1-2*!=FzN8?FwamRV0TZ=R;z;v zaIKY#n*;e1vG3yX2lDqjvM~@a!JhI~1~9){@s2@YlbY2qY}#Oc4DMZg0bqVNf;|m@ z1Zhu6mNA3Hp%GoO2NvD%-R$sHie(X+oV6U_8hAx@Ki~mBT|yS9GPX=q28T8u=y>q^4dLC6aHoZdKCDvj&q@ z(*ez8hIpxr8R51p1cVcKTC@{TWoT*mAflu7HNK~{XK~|Ao=eecfdh11(78$$lQpsc z&1Qjk-5;?*Zdr&+v-7lSvul(M3Bw1${lj*APg-%zCU8?6FGE9($z+z*S!}5E8`1Ou z(UTF&qRkVj`Jzze$LU-)Ma~rKZhxHV>}!1Q?AqdbNZy9fYK1X$ z{SS+yf^)Or%#z>+8+R;oAtM6Vv68Q9I@CZx9=ZZe0mdgYIG_RSP~M;I5QlHtpY11L zot(Of_fkM6`AQ6AiuhkiVN@aVJHCKW2!<<%}YfK`V)7GsWbReP!ni7yx>eaM@z>eSI zg=U12z`1Dy3iW_z#b-RBlmcYZ{AyV5ZzSPGn`62+oFxayKBq9J(w!60r*}g=qNob zZS-c`>)Uuy20;%L1>ED5GxwQh^uwla%BQyGlzTFs(L*#&d=gh4&k}!-_VEE!`PQ2Z zTQXG*cKa+^A<*58jXyP*gs!^yQuLDdvgpWmYK}BiS^pz;i4uZ}vmLB#ME{$VA}yn= z^xk{zd7k%FYy>}El<)4&S4#NKJ4e0BCUo&dtRnl?oMM+b_9O)LKAGKXJd0gYA9qqU zhf_z708TTuz^Z~zj6XNyy^2IMYaC3!XMg>8 zfy^!Q3eCMXz$B{|jGtcsU;f*VWMJ$$ioi=(edmtLDRQd%QW~vvNEWyObU&U1 zei<2iDhNbmnu{=9Q@{)VJvoFV{&H({=siGA5q=Ul1ti-cTQb85jF!6LatCPtUyxrW zki@qjbyknkBwDBh4qo7_BNuQ;wqoW0(sqJhUlD+H4#oBWL10jsCkmaXfH3}^e49j- z<3`)vs!O$N}ph+7Pd%;gR%g}GKlexX7iU1}z zr8u1dVqbTX6j~C-STf0T>?gFk-}gFg|7ij?mh@rm>SnGdo7?Cuo65&fQPu?4W=z(f zi=~E4{NEA2QIY3)KO{C4epE);t0_n9Gl@l(TlohupyslJFqn&1BN|%;JtzV^9 zu|t(z;pX$lAgCXth^1wbzae~SDN4{E5^GhXBvTUq$b4PcSMQAclGtqkYzyjS1ehSQ zOm#ZDTNZyVmc_8N+b=$L_>#acy!dB?@E<}4Ko?)it(>jbT9X z`Yu{`z%MkYUE$&+g?7FQpr!&G^4E{AkBn!0yn5vlaJo8!ULFFES%3SH3XFYmoF$@; zB=g1=OPmaF5ZZ)inFR3)>bf++zm!!X$H2^kEdZ#S{D$az2khVP$)`zWf@v+0iIf;OQNEdH{zO&|M%m3 zJ?_J5ig6CnEbcvDyKM;TDatWo2T>#hd{5om5fQwymmS3?G=}pfpI#G2Mg&W{gmsKt z%*0RgL?C*`h$^6@!~`P_+gDR%Am;MIX|Yck1a~MbwQ>x&1mIgQ3_N>(W5i7;83JpK zZTGX#0~?#OI+4i_v^Wo4ra9Qq(yqX)O2A6FGU=(#0B1|F=gt8@E#;8-x&dh1$FVvo zejqyX`8hWg*mi$=2-K_W*3YCgYLm zVgQ`4B6|H#A+i1P0$d9`@ikP`5`;1+jqOSu3f8+~^v~9IQ(ca!fwc$9Fjt#?a;Sa~Owj&&$ zBD$)UE;AWAR1{mH`OD2dA8q}-GL1MhSwRTN==LhwxrVO7?fA)DU6jKPR}>lOxFTm{ zJt*dLYdk*6toFS5KNC3wlQ-NP6*%ja)yTtsq;4*#ez(J?wBJD~3OXM)zKU}8OGfw7hCSV1`!=9q zLs*Qf-_;dP2+wMPQIDGKYXI5YMAH`MqDymljrLx>IU`m+amIEHV4*aDQ{3&qfH@XT zyp{u;_t%eX0BvwmbGNAiq#|tW&j!Byw;w&p+~(xSBN;xhDb%dy$}3IpEKW7g5-%Kt zgO9d`#7VUoZuZf|$!&IM2w~gt_3Ph5{F(6W+lM#iho<5?0;|$wFS1HGy@ko`sj0Av z@|Jr~jwcC0r@oC7KAhleYYk#NgexLe{2`b6q;^L0JkZBfHyd4VPE=__AfsPG?N8Ipc+Db%)H>p+)`6T;;8 z?uv~R$MEVF>fAn-L-g-y*&Vi9wJy}m8!d!xJDi5@`HpwH%Wr3}3~nCD$jJ%!_M)?? z2ItGa=V5V_Ebi$KwYA0d8=0hlZ-n~WF?t7(Wco#5<)5C2EDI@=wl%2N1{h2I{a7}D z0|PM479fz)L+i?ymg+!Kve`HE`BG$XC~>>5EIBpxc1Xr<3Ti>^TMdf+-Nxb&!gF<| zP0}oGkT=l&RBptG%h-*~xqv&AeYBxZ_eG$Q7;O#MoV-Gp%x?9gMcp)5#G%z$O58R~ z4k(~C)9be?fZekZGx!SPuj_yP2qLZPPp)0s24D8Ro&5!Xt-t+frwZ1=Y`pMA4^3jc z*Qjc;c3zHYQKxw1x_JM&N0ly%dKEM6cp=M~BHA|x=Oegzc&5QEu(#A*@>LVN3o#Ib z9iD+QV^%g)Rzc&Sdxhhu@qx--t=Aq@8LBJIJ&W;=dBKOy(01KRxU)T|SAafD5^C%5r zUyp+XisLMQH%K5JP6ai7!tQ6DRj}UU93z>#6=}c^s?(k8A(L$a+({u6SRAkr3% zN-E_gmFavx-V5SbC2voPJ_So3L6HZ-SXusI>4LyrD7xr&5XIt4z8S0wpx34PtV19% zeg2X~@r?Pbt?S$N4QmcM8|(|y!kPKGFT4DE<9=T@pH-d*Zy%iWG(c5V7Gs5#fAna1 zT@%yVmK}vWj7yW<`jA~)e|TlIYIQ1BSk)S;zr4RQpdU84w_85pl}{ z;KV}p{MK$Yk5_=&!;KaiH&dMk)lTX+ zXCcdul2c#c2M~?0!#12Rw)+&EjuLwIIe#4ETba~5S2@vpooJ;dPJ>98I$||arES+Q zy*jGAq^9yh;T@Rg9lPNf5dDy z3Vi#f?{GZu0F~**1H%D6R-*X42*~JPk&ow3VQnZkBD^Siruj<}Y|$YNeSH6X6dV!;_Sm6dI?gtq+p zR!TIdp}cPCh>fRi(bHg-A>#7W&0%Mp6ShAi6k7d_Y~yaTsfk3=YRFRwV$)a0h6iFH>jOolbn-pUhrwvp0O0aT#FHGfcW0!2n&hTnm@Q z08L~6&U8!yfKHZ&1H#|Q?@8is-wmyq1hQd&>M8LGkcKdk0UjGLTKw?_>^(>l{et|$ z_mV1X_v`b*>8_YIY7~*5dQ6w))-J8*lK@q8_D{mqcP&D zU4*aZ%0iZ9`#TO}%dncF!*418Bf2 z*jEK`1V7f%C5a;2P48=|L09#hgl?Xp|9w{Z8*shS%rA`8dS!E2wok0r2VeB1n!f|6 zK;PRlaP1p@@f?gPK7q@Xh=�O^^)$55%-7U9dZ6_i25JWnTOhq>Lzs8$?%^3&rLk z!+hVc#-n8K+uB#>T9?W@QdT|JY~X}r*y6U!^_8KjJ4w(=C-Z|7Z_=rw9EO!n;e&&E z6%Knk?%sVJk6?%v<+rjM!#p~!LwuprNtCIg!;NRXD6)aIefQ}C6Nr->a_-^*zlrnbF+F?ASoMjmqX_22mac>C(GD7SBaI#fU{ysXaBmdKcP~8v(abNm_CmlD$gn=2tZO z-lJ}_t#;ubY5Mo5+HTp_hRt!4>5US`?d9!w*WFZmRJ+0*q} zXcxDq;Z`*AkZLZvsw6y5ucBqB+@=)`_oz--_h?j1G!CG3jC8S|Z3$14x68A1e^XGP z3XrKzy9b3z(^XEheAS=arqN6sX_Sh12DIAk6hYSWfG-K}^2AGJP3~LPW2|BQ1g7^TFzI zj^}Jyqj7uoH?N1`nCg=t6V%_SFmmqwYVG<}GOCXM55Zpwk)`6+8M`*E(U{QD<9nA7 z)7<&WMi`(0#Qjf(YI-=FBCAi%y`%bmAT$7^@BXCkP6PGl7jBq{2V4jST#L&f&p;s; z=s*R$B7d^XH_CuAm#_4i164|;n&N*O5bRCr{R|)ALdf_~q0YS@n`7|ZjvOW)xC`|} zaqSKk+RR#JWiFga0bS4{Zw?s;dHdlfalKWi2BEm%h^ubW;L7=qwZ+pG=-Hcd1x`Ic zqguROyzu+96o%3}#%3{D5K?8nY=2iBOtXhCM&JNr{glV-4#;c%WI9r=_GyMu!zHf7 z3TtMPAzpbgtsR#vu2N@l?94MMC71E^)6wgJRy^-kxB{#Knd?wIFZkuSo3l|Mjv@c1 zu}T(rMX4|Bp}$X)VTvdO^3L7?>hN7k(5+r`NoDqqmbsBn zg3+syKmR=5>MU4I;jLHf5kNv@U(F<32J%Hl*3WbW_`_u89WQ_ly?g7GX!vi_AdzSO zlic->@_fH(Awz=i2(X@8%-1+bVYElIZEn97<8Y-qqV$e&UI zC{b__=Y9h?#h*;ym6a&WFqt;78`$ybs-E1l+m2Fdj!E_;H(5~FGDOQ4>+MyBL>yl6 z)h^kDJUO6b(mqd)qynMn`XmS^1F~{TbXg$|C>e!>_5Vx*8mXoVvYi44iz~Gy69}fI z;_Tl7ky24M4ow9V{>gM1;lr&W`A|ikgA3A4f!vxc!w^v}071?le}G=l*C|6IQS^c~ zl;57`0>L(bs`>k|1D+}V6VCutP4`bwHB=zX_hU#7*SXJ&b4dZ#Z1{=qD-nmN8j~3h zc#*oNF+kyf6udD{{wpX}9G$akmP#xKaIgYJW+}i8J?Kq?XdX~09H;~xU@p1*skX}i z3*|N0TVDiG>>u;{zexkHo9lrfeyfw?B5UWtoJs8VHpwaTjBk7=`lm=_=wX2O41N-A zpnBZTCG~efj=T+0Oy5xh|Dq*+n_p=cKk;F0Scnz?!z{%7Watn`oyuXJ?z`LyvB>MC%L%p%sx1nJ;&@6na5=k#nr>PC${tnJA2%7PK_mU_ zFEoB}jS!_oE20-?-DTLop&_1hrNdMKC9-hJ#bp8cnXNk5!R0zyE^I#n9G+~~OwRAq zgdnx^@2B&l!&Ahf5*>aoDJ-}d=yPk(YiEH670K zP?zPUJGW2t@|`=AXG>2Aa(?_tJ0OU?P*2HKOn1E)f@D5mN!#a?2P<$^9rPI8djaH& zFGJEw3mniN5uTsEw-8tn`KJ&7zCqrW>&fis=4Z@!DXJ~x*e|nOW020dyBhSDb@Yg? z4o!?b`6W;W8hLT&{daSkf6o)6N;R#LO!1T&bw~6yJ*w_5$L*x5Lvj_EY4DUOB^2)P z65Gm43}Sb35h?b8G<-4J9gkqZtN^(9gCQkIOymD4iTd{!{s~5T)SKPjow&Wg;t{aI zw0>MJlXNhd@ad@mnj1eEPtrT#6oE~@8Z-WlNUs44Xq`u(0ZJ6AZyUfp@lTPri7Wri ze1Eq%WLF2que6_Bko>5!uXK8M&MQV>woQri;>mvR@e|ugqxVZos95sdu-#t|l_}}> zLX#h-kxwWPT~lm>KtX4<++W_@4c?5aSOWaUTxO|6O01oK z(h~gAL{?3|&LWLISNe=i`_8=07KLylBq(cF=SKBjd{uqN8q(0>sP`rO$kZm^-o5yB zoEM)Zs>rfa!FY!|gaA)zrF_pkD=SNZ^X#8C{eSrCCo6R&F)MU6@dpd=)5g6;5K{}4vMJx)Y)e8|@%h_<4+ z+v!ka#N)8;PTh%vbH{J;ng9+jV6L-c>L8%`qdKBp9gd{$6Z9{y`pX$W3MrKOEc-MS zxEE}U&r4%2Ezel7iP_sv^A6tOW28!G@c94Z#fj`h#VLgxa0e$ zofnC$4n672>IKeI@u!2l3;Ks0quSg5SWBntS`PsB{?`xx8WtE{QS9tmd%dznOnQ;t zE16HpRQ#y(ljO;}Kc!99P7ywI8Dkk0WJ6=leukJnwrEg0<|!SzUahUzSu?PPtZCzN zdI^v2UWYrnZ*tqW(oVvrHx6L5`iHGUc8xn0f}&m)eG_x5L7|a(qQcGZN{`|0*KA}b zFi%{hi7wj*C>*fw@RbBofFu4FX_?IOiq%=ZsLz!#tG~1}uRjwl+yH5v+l2^M@5Pi) zhpZv(T#t|;@FNrJawGTRge37|%he)}#?9$SeUh+&;$l=IM^YFc40fKx`-IVBLa-y) zQpSB^-1`z90_*+R)#)s<;(N6aaXUr%n4P$2=EHB;`iD(| zTzV6h+Tk1TNZdD>W{aM@fSb$m411JqJ&D0~Z8ebGj@~|6dnLlRd7SXU6oZci)EEmp zgo7$a#z+|eqEU(FI=$Up`3I23nl)XLuW14gy*?tpRXi+Q5F){%&%XnJ?;51Ieq_J| zp7#E9o)36G3S?xe08+<=ab7+FATDOlDN7IlHyVpVJ`qs$*N5jGFU@_)SmGXB{GxVb zcWjC3J+4}LiH)7<4{Gg0X}AtUE?ouzTc60NC|OXVnxkE;x?DM{gqp25F5WP!!6uIE z?iA7CU;dgu{ug9TwDqfhE4lBHs@=p^xih1SP)s$9 zRiwJ^$b}&I2*-m{7}arst^0;+NH^Sjn6(f!TNj-ic_o3vEmoezw@g#5N;Tnbd7eT$ z<8qK&*SswrV{A ^EGvFVG)rdy2c#$_ELF!CmQ$5#1V)_B;yGm#cJKdTKd&wepT> z%|^|K{lE(StBFTVXRdH_$AJ9gNixBu<59m$-|*C1`Ku($%?D5C!$?D3O}EDI?Ktn# zs-^9l#^WZhR4U{JT) zJ$)b+%b)(i?fb(RfI@fvSN!dX+YCI8tD?iRtF%+zMov+sGn`>H_q>qi1ZPsHXe1K1 z#*EPTFO&K}a-N+XOa&Y&=S4!hmN_%L(W=#hd$HG+A%6wgE|3Au+w72!N+?eb-dinRzZHnY&v^w^67T$G2s5@LkH~Kg<>m?pbK7%@BE3Z z%NjtU<^OWSqH70qGM!8OQ5oq+8YBCS0gAwKBfnx;fD+50<5f~0-dKY#3v7D*cT(E8 z6UBDEssRiDwaaW`>GT0z;yK;qCx^!9CI)Hq@ixW}YN{2Mo88OY1|+-QNaF8)t0S52 zQK@^nz(#zDy-qrWcqEk4HryTYP-RtYKe@FLq?NWuq*I@V6WF4aFN(s9Ax=+U@6l*#CR?gJ#iKl0Wr7 zK#W5)I`^BEraxCp%K$C5vEcT&&`t4@L*T*V&tYTxn9`Tnkw|2F@2Vd$R^BCNp^A)C zmcTk{8+!Hy!98bfa^2jEqhF71PQXTYbrvq^=d#S0RXNX_Eo)XbKw!_LfNh!jm(A0fNw!hSGfReL7}2mNqZR~q}94| zg+aqXG8KElU-pI0Y5Ljb9vga&xho(T!dI6yJgU0E15oJ5$unnjyj_p3%g_q`-NNjmP5a z7~Y5UpCD~Oi7eA0$?>h#P0-+w6MSkaF$Y>K7ef8m*wMUhVVgwgzNhOr%ZJ(1+&lRP z;PX>dd24qsp!cOmmf3I$06oIV983wuI4=DFYW>@#>&JJ5zueQ1i!I#|+=8kV8{e2i zhqh`^;M_WP8`?_f~xBn69pjr_3Oh^SXV`?Ms2Y{R0*@7#3C1+uI|mrv+H<@ z*%NstMe(SetbamS2k*8*oU@*g8*6idfiYo*~}cYRXy`7py- zOb}DYVANp-n0->o_FLz`dI{(Yqzpl@BUI6fu;Hq}wphfxr%dL4n31n0EX` zbCv--eU??H9$5FEP2a8c%MdMH;lwBER}edVP~LVB5prCLLUO=TO|29jcF0F_o&Zk< zN=GkWHk%74JChPZ>lL+F-I#Ps@_0B#g+5NOT;Ks?N#| zHwqET=@$*4wM|WMJHq|2ZT6D#%IOPNAC|E8UFauV)$Q9A)5a2Y+aloCEnD?{C}1i; z@Co=|M&2#a`5^W1^>cyk8L&~j%tw@)i>eMBw>LK(Ku_Fc8vLQ@oEX;CAVj2 zFv`(xXiaP9@#rp>UG-nvM`ZTLQL~P+>TE7^acJg*-D;$z)de3#N^WHc*g|6p=K8kB zOt;TaUzfupqNH#aPqBVjTw(28{8>EyQGx6md94eWK2ROxG1h$L%G&A7b7RIquR-Xk z&AVR=RDe=lQREK-vB8Ejua}rWVCdC^apCXN=Rx@Y$BEm2&+E#%X>0c0^>qH=Q((~O zh}v1QH}22ikk1y+j_>O^#F`(NU#;=p+nr68)_5A>p?ck0$oMM(CC?LQjMdT8BQb@F z)QO}ssS9kaC4*Mnmc2QzjP~h99u4blE5@MFsf|Hm;n$h`@%of#MAZF5b4KlZRv8L* zB0}SboYqW?rrq|J*?wyXgYMmc<gJkU_5wQ;q@VyhW~7uKCCDs{?Wj# zyL!D!ntAt`oD|#(tiRb?ky%9|8nfZmDr)l0?a#i}0@V1M06K=*B(Gba?PeefO0Dt+@;y_a;JxJOHKeNu_hHsK}uDKQWQAZWV*urO}d#1M(n7Bod{w(xE#h`U8CyD5_-0@2z-C`rAG^BvP>r4Vyj2S$`zDhiiE%QKly|M> zIz6^WxN4H^bRN&WEbNT(JjCbcgk|SNmynYaB_Hp36;7k%uUhORxU3JaY`zNOrlvkq zX(|SgvC*gklz%6vm_Ya4fm&Et@WkxCi;Nh&2pZi`bAGQA! z(T~QhA^*MZ{@6mXt2?k{+HRg4+r&4ReR>&|6m62scYOUGgb>_hY%w;g@r@^msRe9V z`!MD=%)O$oln!^j-}+wjiUsOkKF`9#5aH%DBY4;rb&1W6(0WkjO50(~!6MtNtcHe0 z-8SNjEPXS6k|LJ6$G)+FHQhyS*NN=JIkuV6etrXeCF48gBJ}t6 z717^qUm+0=`d84Z`I&&5qoC-qMI+?WS@-SZ-mb=@0ga*8~} zUuLxFNaF4kUv<)WOewMX1VbarkBn5%3zWC~(cIvBeGkZqCd}ClH_{2+ zH(BS^c?02D&%ea8t|H?z)xKP=PG`Jp6ITndd8;V(ZYM7AVwE|Te$|VCC+}okg}xor zvWu21Gb)V;O3&1k%a9A81Kk=d%=Mon!SO&vXy3z>fV-Vhw6q-v-@37ph3&q1#cIE5 z0(N-DV;)1_LvGL-?)>+<@PDt#mUXE(@{dBZTPotME*hD^QKOxEiUJK4$vz)$IKdFs zgUy{Z-;WdkiEN#TlKpzf;O?%bX!Txn*^=iP($TY8BNSfEVqK^&QXCr(*O-LCn+F%E z^{r-RERj3&mMH%H21tDQt~OuwUL2ymdJSo6U9A}muV%G@t)z+^b`TFw@7R-sr_s4H zbXzB65uZPKcSQKpw`}Nl@3r)Qkp}NJns;<-fK>MH-WROidtEV{w1%{H6w?fWi=|t` z%|wc0VmrwvVV!e>3$*%6CNuW+JM;E?Tds`|x7c0X>(zS+)m0;FNGK5MmvFHRn~d#L z5z*D1VH0>=NdzP*k;BUarQqO=j(KY&Mv{?<`lMcR{sQR;5d*>3{}kGPhoEkvlUYA2 z=l@QTyKoiT^xSj(OIH#FR+veK_nvab)u_|bpBy4g1gGZbEeLRD$IZw_PV98SV=5$VM!xqts)9bFsMgtb(%2&xTz{!yyXQ8Um6($miK$NWUdns+2U|pnSN}nk4Z2b^ zn1;vnkyrnY*FbAB+)Yz#-#J$98ai>Dt$HsoFj``W1o04NA}|Vqtt)t6=jrsVrl;MC zcjhZGOZFNeIxf3VuIfE&@8F}gp1X?bTA&2!`o9Jad?bdFj+9D+O{UVMn_H$~Ci9=z z(oC&_y#DX~>7)djb5DS;dnEnqIIk5RP%M{^RQWM|kGH8u^`l-rZfY zpwW~CITPkceF9NpX%#=#{>#=oz}7Xu*5`lxlRg+p=rsRd)9pXaB>z^!5l8rG&b)NR zUEUx16;gd4aeKLHh7y!eX#_nz5$zUPgD>Zvulroki(16-aRsSb!tbZOK^50O)78IH zC}N6b>sqm*A}pJXDcnlU-4S!)(q5(MnUCnKPBv~@*+YU)yR|{-w;g?Iy$Ht^ytGyo z#Q_o{`5gHXs98AzA=wQefrpeTUaj~`@o4Xh>PCiUqEV^9(aZzj(ZtB+6A&t zG#&L^?O@S)K#mwq(~W6R=2cq7kpyW4-+N}<}16)ZzP09VW`O#K?A8;yB zx$Q!7E-taj*8%KZ&MPmz{L$S?EYKOYA5@})7z)OeGXT`@C6k(5!O?uu7EH;1;z`O4#Kglb%M+E z0I5oZ8I%TwuwNPKdstOM%NNU5G*OAPl{dVpK3;&x3k*H38~X;YITGqRKj5S*e)0Cv zpxSYGO!6+|?`vEDYcvFF1jpx-az7oQq_Us;FrEUpM%<@{v8TWq8Sw%yhk-Q`1ez#< zHL4xZTmm5`f1C!p3~+o`Jc&sFrcx5is9X)W8f4-D0pM8%pZuXK;H(3a>m<*we(4LW z81GKpf5b8>Y{AxyI5u1h4C}d{#Ovd@#O})kfkR1mzW`Wk>tf^4mE^116uToTHce>O zR}rrFJVm&(LsIzh;^iy}0Fb1=9GwLS$ZnYO{0D1R1LbSqTSjePSnd5~fKL{x?;K|U z`;~dX=>%$&6DMX@!*JZ!`EER}P#a#It3IX_^RVA?-~CplB&llT(uGsuv_e5s{_soV zD>}X6bZxQSZL?P^)Q$dVb3hdUS+Tm%UWDp>u)YD0RHb7)TM`EsU zZ>4Iyx{Uo^qU0^u@pQKUG>#kI?kZiarKiCxYAqiq0naTt72>*GwH+>`IxYBzt@2hn zz9uanGW>%I4MXtW{Hd}|T=3poi8nw2OZP7qu>7Z-W3|4oZFMfhp)SLNiYU2yZ%DWK zU=+SCEslw&yl?}Ju~I+u(#d*nj8%P5r^|DhUl~AYH9kq4!v%3YtXui_z}q#>VM~#L zYO2#X^*rQ2KS`Y-dli6@G=urcL<0;pXviW0FsgrL=)PLKwrcJ&Xy;9EtAAIPM4s%(R}i zi<-aZxyu`069obEh&Z7KhnHDaAr6-LaX>i^?@%@nG4?Zl;dX!?2@lIszCVlu*jrPX zo!g){kp>NM8Xu_qZx37bGX!FKlSa-Ggi_-f#!uG+YT4e5>;;Vg_$5DH2qaAHN1qNc zIGueYQL<(aIHBv6);k-5{z~KvuM2s>n!cQ1Cpv1Fb!A9=c#rE6k|Mt`|E#9*H*gd{ z$Eke>9k&3s;qYQr>jp4V3B<*@;FtPXKAQapTIW-=7e{%M$X_WmZ-2TwkjKToWB&|j z%U%7Z^Gr%_Ks{Rc{f`DzE%I@?_DJmWMaq-}z)vNTAo+kWDY&f4et(Dr%enXOaJaC$ z`h|d1=lzv9z@JfP^J+AXA!{#vK$W*=;5i0%=H>~8r}&Hh>5=p(394fQMN7>MmZ$Lh z-%l#yVmYG=J^zXr`y2f5?|w-bzy6K7j6tt$;et_bz4-tovraf5hEr-q;CT0=847($ ziCm=PS+NRebbhfH8|JrHy<)unuLMIC|nnmM2SWo4(?d+-c)LBfGGJ2 z4DeS|5p{GItRcI{f=hzocYW839=UJwI={2ZXzpvM%b1=vLG>nVPe;wp@VHy_v^SW5 zW-Cf3&@JlYsxc2a!qhpsnaI0$bb^67W4egY0M;2PeaGhqLjcl5-jy@}K+HahlvVKQ z=$&kSs#BH#iCKc-2k!wl!|(PZ3!~8Y9q;y-_u4H&yuCMaaxg*qVpa@x5&GA?wdttQ z(i?Yh`remug4Aicna6sC1E8_Q-;-fx!#jWE&ol&*z7GM>-<*W$yXdS?=;yM1gBWl6@1~-R;?!c9wlLi_>1fxR_7LXQ5GPtmX`~)6umTmOoCQvAUEEE!A^opLXbL6 zXG&uDw*DAEPSwB9c_+`pRjwy@%2hMsbTtgaeY@MX#6H| zjeFc}1HhjoK(T{QNAC2+NB6Qk*rl6sa2vPZ1FP7(p{N$H6PM20J$kx2U3`+);W^w~ z?#5x&$&_S(^RI)BKn&{ryrt>RlfgFbWKbqO7~M(@R2jU(c!xK42Gj{9b7E&+B1y4E z*h*`aEVe3pH4fVJKs7z9s0lBhY8B%yyT@<4&3ySOm`c-iM5coXh2dFH|SVNIy%pTXWGb!m0FpA~-6sA+h znT%@=iNnm2TvV;g*Y*;;9JYkVBm@uJ3PIqNHZ3RT9%!R)vKu(fv$yW4xXM*&;k%#u4tO=a{34D zeGhh?bS8Z`IgI1o2{+VsO`Fk9kDmx(QZt5y=%W)DOo`Q?Cf+4C1kIO~hv-;0x`naK zM!8vXv@9w7;TpW_Hz*7jwAFq0lTm1sLZ5#3)Sja73gaXzl#6_N(>FumYOlx6n6IDR z-nWRp!??~Cg8TacT9jffvcFb{7YI6XNEuABN&_G2oO1{lD0QIl3y!A&@KfnA!II0s z0VCtLwgaJcTA$gkZ-HV{`C8U|4pY7wuvuvqwbk4}2AVkbc z77+{9cIZql8Zo0UNo)7~WQkv_4UoKhOhlZr)o*kB0Hdg+L{seG@f#7;7`9!n;4J=&nLXT|-#gD( zHb7lZKrvOA4#&fWtz0IkT(zg2N_|A(P>6ss=uZF6OsRac)Cq6JtleERk4pTibMmed zsO}!p7x-KO9p=uz`sda&?@=U$Vs`3N-!I4h8{1MwV|80?whPefQv z_ju}!s-E63=$!hE{sFL|w5gf5e`}XV1{Bp#dhLmU%8SsmeSAQQ70i|8LGz(s8Gc?~ z@TZ1zA1ci;iMl#Pc~v1KOcmTTNX0Bb(J$Cs3dma+V{HB%6SLL#Xy_Ttq;z;YEW3iC z%g3QEHCCKxbb1Hb^KOqM>_(>;_kFkCxpyWX4t77d9lZ8w<(%G zrz!#P+9PB{3mvh#R5QtK7zrn2`>vdUu_fsE4V)^%lrP&4%}9Bjd!4+w+V6k|%B)+A#dpo9$etwv3ja$n3Dwyl+ z#C1JT8=yFuD)!5`a+-H~Y0R<(TT#nxC$2z3ZG^ZwU1Th>H~{|7sSr*5K_^}Pz2JT& z*NtUHdws3Th3QqQ0+t%_DBi9w&^|@0H0hWYnfSSpDxLSEZ>8v@E8xZLBMGIt(Vo=} z+WjnjgVl$3KU|WJx#msfE#rz&3={%OKog!H={L||;K5quN;PChA?KMl6YxI>L>ego zp@Kec9rss~?p$u!51p)`IZJ?feu_*Pk)4tvQzemKtZ}ZK{1(m~Dco~9gW5_WSrZyk zLHLK0D%{EvXV0!-+<|H0BK#Oj8J`I=oTYE{GVKa(Hg)P(Fm+5aOX}R(WUI6n>)hWq z>EGRN8ltSN;2LE)=%{f zVyA(oI9)qO3jRBxZGL_y))aCU>+#+*oF>cLx{nqMy#rko6ck=l9aXwBm@| zhC(@a2T!?2Pi(9aOc|%iBwr<(N;PVs!kg@8OR}=)%btEzVsm`t%ulk&AF42XIjBbI zVC6Oxn$*kMz?Y(!cwKtl)U>6APP56OPgf(&*(sQotwwB@tY2T9*`p$PDTi)*^<7u> z;hNzw%beShpvIQqEMgfIuwe5zI+VsUHbD_G1&t)?s+3z@3#7g{x;o*+myACO7mF~Ofy^! zaMbfgR4BG!ijsk+C+r@AY1ntCm9N8tNwT=vHicA~S){6uSEc*Q3@Zb$XuwwjDt9*$ zLgwt2UpSadZW%U@s!jC1VPSe#VxLpxwq5RckCudY$#)oqB#|N{@#sfQ6Ry1LA_hbe z+n+EtA1I2Qnz0UV#Usg44Qgb)7s(VNB!^mGHyOjNO4#fheH?Lcv}m7JRHW{j;HDYn z;eV!57mt`~f^)@+FPB2A9kwQRyOch}$*lwyTRR>=J45pTIgjKMKI_I5hJ%TU!!cGI zkHc_r2oA?)xdqN2GghsPRRXH_Zc0KH&_q^ZhK!GbK7)^S3PTnULH5&(IvB%$ZLG`% z-=yk|$~z2=xSKoQ8ptBvQVKqqE^B`N)=iCK94Y~sm`fSRm~|m!6p_xU>EO_HlWyW| zD&=@S%lVg<-RVOX!Q7XHtp@XzG;N;O*j(dXA)@vq7Db{mY>6^SLX+tmi>%n(wwI$k zMJqL~49Kgc%R4HD93aNoD8(MVFZ8Uf_ci%u0=e1AO(!J7ZVPs z>9nqa+1;baWI9_hnX{ZYIscsJJU2MtmC@Ka2 zY4%fL5OUbZ*B^14%*zPi8h}phkbr!@(_wv&&p}~8vuNZI8=byD4!h4>3%uoAM5n&ZbZy7wg@=bTuGCbkYjWzrd1#FaaqJ4Z(zfSjJrK?#2eHFZpQJsNAxRYZ|`9mXXveWnH;` zDWe?esi&^XY@TI44R>1pdes*n`mm}z`YX)kGJ8skcv~Rq8fQJ=Y(rdwo7ZuhEbm>? zWCvVA&OuQ3;)(>+2=!j|I^dT3w=OE)!L*}#<>wa&xJBx8-FwDUVSUdzIn~v@5$xI* z{mE6l5qFIZ-n<3W`SS5v&U36OKE~6h$;p)&;O28#c5+n#mJk2mxH-E>@?HNL=wUQ zvwkMN4`9!d+|==#ek!q%SuNW_G0RWFI z@H#$1O^c|gN%(;SjxA*g}2wwe-he!&g6A zMu8J-K^i<72RqVz*2fMkYZR>RE07k;?@3F_Z77M=pss7^H}!>ShJ zhS1zjiieE}ZM!zNv>u4_Q-oJnL*yq!C|oNtnmJyz#)> zcGQ;H2!Haxd9eOzA3SwQYGtIm@B?)y;m#pRA!L1`>;tejA95^}&7`2d+WikYf)IFx zdAAbDU5G49d}156OwP+?y=lFHjftzv@fT;QhMJ0RNC@^Vw7y>Z|$X zL1zF0ud@19Jgp0nwv?|Zo%niObP{_;qF2JE*OJL#gNVy|e<-up{_+D~OOZ-fhpwsZ z#DZzmGFNKZohb*{?vv$=!EhcXD&^MK<`QX`d{?~2Z@#F(@;s_rTxk=m3;{>pm>nx@ zin6d?a!3$#LFQY`gv27^blhmo6rm0|aV|!z88b-TS;`1VBFt>$YctG+ug|4nN@I3D zR=^Qu0d03ky6%E3cby4NM8rYt6U`RQP8ebDrAYS#N7QzP7lXKaKGdv5B2@GSlY*z4 zVaXZs8owKzlyugw1nF!P1`^CAxZb5>M&b3O9;Qv+IU?NIB`NeOoN227BB#e;HdEFN zi*@L8=$+E&Yl#)KMp>8+J0$YCAdBrxgX0t8G;D^=1s!iqS2MO=qzMM;M}EiJ9W9?$N; z!7`&Imd&^|N>p1$TRRY8z?WRTrh#Cn>?h$4 zSWb+sL{$=qlM4RbbICvhcD~5%0Ss!<G4O#(V;Z^+>V+_Rda1W}+@;$C0S+2@de z9&XMqz5v4b3GsD6A6RX;xS0}g>R4y$dyl~0y$%K446!_?__#zhgti}YXE=&0mRPk# z2u2)nRXQ%qJ{o(JNaiD?xjr|yMBRcEC5av=Rx|FcDGl`4?lu;GD8WUVfhMc*@$jU( z7{SgONg+x1R3zp(dN}5lREBPPxIdW?eK+$zM#i-nw-m9VW8Mm(2)vvW%w%F0B4;0L zeo++F`=@yBLQ{NWxWr#Knk;KiQLWB}^?6^o`Jj892_8^v$xwAS!9X_szLS_9qqzI7 zQLVb~^~9Ijn(tQgiKga9@a0YK#*^HW4j*yXnP$tdY|5eXuI8_!8`im$AgXre7Vu`g%w-kiYvJ#$wX zCU8a7*4i03f2H`SQIgANpjXdA$qJ%>_kMfE=8bNXWi}m?`F)+#)ShH*u+V4{DrIvC zjfKRgUz{1k7QJ4J#^T`f-StLLsm)8=i$vhPec5pD=|4smxomQZ#aEMUec9j*QMwC+ z5s=(gv!StOm@P}6>&GdL1r()#Gs;4@xj5;X3$nt!fk-UYMWHZ`s&ZqVKI4h7X31d! zn|;!*jN{UfRYLhZXAjES;v#^8+?b3x`j@F$5uP+h`kPgjESal9U{V}bkQOGL~? zs41MPzebCRk~{kZ*mWU|p!{$MPf7%a3o=XlWfUSDGZ83OMWkU)bDL$nBUkDQpP8Jj z?F)ps)r*5wq!O9Lb)P4&-J))IfwB5%w4E}zl`qc@nzTIC?T1Y1uV*RT0n1Z9SzdnW z8UIN?^zyb04`{kM!Sc`o%H@KrdTJV+jQEFK_w|RX{fjyXiATg~Iq8`xKpk=uVkZ?f zp$oIdLhTs}B<9SmVtPh=DOaOZ7raDz&dJCtyb6)-jZit%mMu&e;PG1Hj}`GMBwOZQ z^9TcKJ<1yoNW8~!YZC_If=sV?+eqiL8?WglpbD9Gk7s0{zH0KMrnmnwDmb!d&e-ew zoJtu6iO9}9)@_Cz@$s7DO(9b)7Q`W_Y%d~(I7}D5tbx7GD8#(JWQ1whOGJ8FM$24P zXiP+>726>HO!K0b=VjboKi2CU;(a}lWOI-2h*A#2(`bfnN>W@MX5g-=l-*ifui5Qs zc!yccd7C$-%RBoZo)9~yT!QDwAaD1wAQqMh)>IVpk8B@7nA&%YB?It_!^bj+fl&xx zzupjr;X)+4s)i42rexGbBN=*17ttasdmudms;a7tr}cFhIckosJaP48r++ZKqaI?V zrj%Ei5>;d!%@9;Q#K2t@!GY^nf9hr4BLYBC!Oz}u*x5W@S16x5VKwgr^jChdH|_@P zWKQ1aRz0P$vKPzmin7qI-%R3mK~{RE2PYxoG;Kv^pIT>WnlE;14%R&VA*l7V}yz zcM6d^qGs;)FkRQlfI~1xigtGumWY}Vj%W5N1x&(ELzHQ<_6Dsn3z%2UQg9JR0fL{M z?_He(3Vv=X;XNMKR570OV%!s;!JpVE-Us(^ZEO)og$i(!5)x#~@y8I%#iOd+0gj}{ zyV*}jU;vO3Mi)lG9&I)f%BaBOR1X&?;xSw{gOf-hz+Px#2kW_$uJQm^(cI2;h1d>N zzGMundhB5*Ng934=hAY!iEOMI3k}H8*P1ArejYWkJ;w~hHJgY<&db>qZ#w0HlIOr6 z%H&PiJAl0EW|ORK^?Ds~-Lu0W8pDMTHd4_UjhKhtDr``Kwr7_v7qY-BAfI#AA|QqH zxeJe)v+|;D*bhx1+bR}>EK%8((`_^YE_{W{6&-Pid2PWPjSA5AoXWgHR(OSGN1Lr% zY#;xU_I)SuC15bo48RE(p!uG$pPA^!kZBjp-bky&x>Q8m=G~)C2z<9^2F;##PS|sG zkWkWI9=*C@DyGI;^uQ^TJb%ywuI=;6EDaZdvSA?foCosdka^zd46un|IO5<&mhs=X zk%0?HxzI~4-C7uM=FD1f-+3`9Xq&X6I{TWjV3<(>u2@;UVOI|nNOvtvd>%Cq!bdQU z1sH)@r$5n0{WYm)+C(woM+S~I&u9pJ^l_RNxG6-`^nqE|XS%x+gyC{n|I}MP&8z{6Er8r{3gJw*RZZ57#BszbOOcLr9!~gZo?Vrw^y=aS=_H@g-cTqs3-&wkj_iX1iGm z=HrveAlICnc~j;!+|bq#$&vrEL>mhVU^KZt?@6(2LD1l4*w;IhU}g8NT$G^!l0(eQ zuL7(jyV@zd^8vs$!hWIPet|xtI>sB58Jst1t)bc|xN5AhJis6XkK?@m+T#R#AveN{ z5?J7IOiuny5XstuKCLB6vDWmb>3RDmH9f6Nth6^*N4Qb9#D0N`>+{LajrpcB`aQU43;3(-^;em5V3{%u7w_tV z_grP=SG@z4sY>8~E(R zyyD1Inxx==iCJu7#0fdXa885X!Zw{u)n=2ZYmpP(6IfP3WGkdFrRwAP` zMk$YGhT@RoEQL_bESU|(BXrDwjs5ahjr!Jp4wk)Y4aWzVSV*$Z)4s-h8I9JMq`=MN zcvzbu3H_lf_e5xlH^q^$u6>$5-V*#OJ2NGcEfCbZ^E+*dtiu9gUhlU!E9kp9qNvK-KT=ofpmZ;wdGc6N>#)5O^<@#B_GwSs zt(4AcpvacE+_asll%#!)_w4%`WsyqH3KWxQs)ME3%Y|Mz#4`sUqYFZx%Aece9w+fOQJ_ z<5BiBUzMXjui(UGL6_RIf{Rn##*O}T)0IxM&D$&OQQ_yesi_n)^ZV>`BD8cAR~!&D z$o#0OywR&@x5BTos$7kWC=&`%Tbk_l&UWsVC$?kJ14k);RnW2R>YO$nFG@p4eauMTU?Oj5a}nRe&ZaP7r(;GN>*xRHYDN<5Qj4+9pk`sJIIGmG6M&JRWjDTj70)E96ogudMZqf+d;5o5d3#5 zm5|vq;76#&7*%qCX^LrQFb(<3wD&2%SQRZ)mxfeBcH8?q09!dtw}$F_F(fN`-rAa4 zTD9Awsl>y|(i{%?_F1)q>a*sou z!&k3DSuZ}+*r7M)6eCr-57v0EfmjT@oNi6f;Ukc610Kg!5L()M-kC)TVyc7@`MxAr zQ$6@k6tzHTTH8vlb{?n)SG(R}2co z;;M?}JF4d0bL#zT>!GM6VSAJQ0~g?aPWTdJyp*mzg3hMr+NbWDB(hXF#<_1N^P%cL z9yb(P2A90SEWYbLumZ>qWjQecRp(nD!dS+(@tZA(;u2qi_+>*hwDm;qXW$|dPIPRt(GF!KSBpN|Qhnb7f1nMh!Jl{_tcL*NhN|g>X_};0PboQONQ0w@hu8K!k==Z=prm4S0~b{b z)k0pzvF9Yml`C7-2CwndNxQdNK@jAy{&s-biL)fBNNGfbdvHLnRrjW_(g&P(?ORI` z6I8bC+8G)l7ryu3sFY$67P=zX0g6x}&I*4u)(k3&_A!PCh;I;lyr&uFFLlGYX$m)@g5e{`|r8007stHPpd!Tvv>9*b4KEq=t~k4p0D4B z^Q)6U*W<)0@PISqDHeqMo6ZM*leV7V);eJFy*C|#Fc1!baWr~*0#*?35j1lEKjSd{ zwUY$c*Z`r~oIHtw-&jIewc<>rs&?_pWnr%o`cZ_abak@0lAOFeOqgg(K3u{zm)G2D zVBF3kbgRH}-#S*@B!BU1LUL+h&}Z*BUJDkbJJ{LbCNG38qaB!FDq>9>Y5IOTW?!bp zdk4T{_sq2K08U)rd`o^Z9iTjkLx+|gVB0^fm6V)5<3(V8RWg`X^5{s+1pdGJ%cVmQ z^YfgyU%msD{?_o(N(PWmwPbrX_OLdDsp+iD0x_FhA1p6QNNW?Y$En>;j?U&8BnK%i z|KL7-SX>-q0Il2mgQE%k)+3L4RCD}j)4+;l?}MmsybmQ5)H&}D0!;`JHJ8Zk-#3yo zt>Rqh<57L9S!Ekx+EMZJsh&oO?N-5Y!>UBBk&m%w*oHGZ%?Jj#R7c=Cb@f05mp9WRQ!z+!6_SH}31td-ZjZ(o)(V782 zXx*FNni>|pfiaD;I^}plty_{xcH1CBF88H2s$Cj3)hfjiE_R7ZIYKe7hY$o(vT5*X z&Y+dyrEAQOQfef4#AN2-pU$WQzdBp}sqH5w5H?dWq}BkpC-$fZG;6>&bdLObmmK(P zax536IYE?KKEUA@lNR@}ptF3UOpC@(^~c&JA>9WuU9hz(A4RyY?!~BThac>H@y=Gh?0)eSQKnc*-&iDCOtpP?-ovh9 zLA}Cy>V0hILH=wrll1?w_uf%Ww%NO|Ac}~BfPi$Qh;#*!UKHulr9-09q)R8Eg8~9l zrT5+my+f4Vd+#lTj+6i)l<(nvXXeZ~v*!Hfoj=ZB-&$EN!Xr=a+`C=-+WX#51_uDU zm|p!0(QLe{-IZ5cIbpl@KQXSghQ~ZGAixZtosuAW3t*Acj=aW)`E;u*G0EXY7Fr4T z#Y??gqeYxKC%g1Aj+UGWp3{%UNBmzz*JXcgL-17Y?wY?gntyryZF%!V~3UV7nE3iqQm4| zz~5oK*2D-zk5o(#EGCNi$i3q%0L`joCB0dR)5-pP5MUZN-*L9fgSxzw0LYMI9w)46 z_wU&7|8o->J1wBnG#ergH5DRNzT2~>MA*BdLYTzEaKx;#U*!}uIbgiBKxY!D*NcEnbCaG zL$|^=H?VLXNc_+L$i98QkUUog86!OCQW)rrt}Gy#i7Rp^FR71?ujmioZBYVL*SxEm z4i>>Z4_?C5e3C#l;xrKe%o-R?A|`lk`_lIn>&AWxx{vE=l9B&mjyn6(9Tl{0b{m-L>wKiX0%IOc8>fKwQXa=Thx zRq$gFm51F^$DTdgl~EwQ9)-lDZN=|yMnxw4f#QYw6i0bH@Ed?1z434;l{HMuH;~3FFXCU{d9jA6-wdQq zWd*L0j(FvTY5m{dB`!YXEfh|6+Hv)vhP-B}6dyxuV$?s7G z=z=UY^MDxpBgVHLDAxb;Wd#6lGRqP~J_Hc; zLldQCfYflri)dkz0?_K$xX2p7?-}^4K9d7tjogPdCN7{B7(;*4F81i?h1mk)vmomH z5A2}xd-LN|assS(`;&x5_QR%oUjWseEO1x*6FMZzT|sr?`E4H8)+Vf${kmCTc``{t zf(T#?jc8~PsXc9;*BI~6mAGlnL31*(|7zG>J!CxzSa5$dV?8$j+W8Ry^S~$XSqhe6 zfba9qmsuo~=E_%P=K->5@4KL{D{xsRDOiDHu(#+AHjKHO#D@cPg7r?QTi%4upw7wVV3E~awSh}vln zag`NHo|HS*1KTg;V5a_`u0JXC$H)6kas||aSAqR%1x|Ht7kg0$W4Pi^`I&+Bv&*UB z0@VYxS_?OxKi8kWQ9d6L9DEPGJ7crzw>c!(6W_m69B{ixA&L$Vd_^FKkqC9gv7pHY1S2>CRdctd`*8ggvP-pA%mmmW+7oedr zuLKiwfR9!O5$u)$mPYZT1;Qo)5=9^u|9+VTFh~+jg3WMXMR%fGpbWs}4~SG+K>b7C z3l;*G$NuH=*k&qxb5vJJa8H}HcY*y7Y`_4~yZeLY>aqV16lsxOwbImco@aYFYHIRX zI==7rlop4>^{Om4s5f#_JnS|CptL3z(|u*`G9Q|K7piT*&6wN@Nli_6Q-K6~0Qu|2 zF`?grX7|ciB`< zT6}xd*FIAM-~4-!>Ed8lfw$CzOZTeA$})w3!-4txA--fD$XJcb?fXfIl<&50yp3wO zof>dk(YxV)cy~t~$ZDqQ3=F5>)w0p<6|bs_2SQ7tgo5HOHJ73|v0)*OfDcLY_6Zv7{5|4(R;3dPQ1W+;H4>u7SR z!t)~iHa5oup1K;35y!b`nv8C2PwP=T(y%v>bau-Ix%`H^M{q0fGkNfjj1^405lQC4 z2Skve_J20QO|4|HjxYj#O1dzvM37n zayzhwtMiZ5)&0FQ%@Q)x$KN^JmJ+i!*3kBgX9FFRI1xnFAA{Lz%yFYp{?B*qPgLbi ze|Hd&jRr3dkI`l?Kk#H}jl1~xh-o4%ZD-**`um&wVJw&E!qpe%tJH{L1+ECr1pW@& zH!olP&!Fso$8ATXsaSmd)8DBta5sj^mC@svpukK`C=|V=e05kl%eNMNFd_1n@9M(_ z#5C-voe58`dySSZS?wd~%M5XU>qx?u`KbOM!N-4EsRS5{Ql&t(emjv`@OW~?sO3cDf6EEzq%Ewh=x(-ukMu_QR;Q-q!b7c+Voz3;h%MOiJSRNm4>T-?a%1FytuJ} z#HYDmj_9BE;y+rp`>jtBW7ST<)c>JL{Ntv-rLNKUq#)~G{EqagFrdnKHXahr_*b^( zdLNta0B{#ym__Ozi}(+}?X~i1EKEPQ`>Q(elN?Yd_YcTSKp?uLPGDD1$C+@GQ z?0gIoY$ z7Uj`nSR_;H1QGTUJozm^hwP$I&!2qlJovh0_D}Oy(eoO(X+FV{o1vhr9Mjtijfkn{ zO=Godi~{=9>zP#M8V)Wu)vyHK>SK@fnd#%2GJ*jqqM}kFWr#mb7uM?YIK(D;K(!zz zbC848l@&%SGTSMDq|Uth`KrH;VgwY4OhKEr?lhA-iDeYbejc$}3Sw^rouN(bPH3t% zb;+PZWg6`03qhnDhJZzOOOi1BJW1I2wB%RX*!jH@FD1xDpN%Jr)^|$IYX|e=$3s?5 z&L|%rY{(K?j>mN^_=eWEystRReFYbEvpMkRTJlASZ(Jw$>?Ltgp;dLm2kbKXD-AN^ z1zH1LAXVFN%egYC-9{t*Wu$zA^%dc4c-xMdOz~zf*$MK8Ho25pf ziZ}ChQO}%SRGN=Ph+S~~*cEkE7|HJIrdflZgowCZH6mb!qGr>Xd2gXhq&ccx^VajT zf(Mg~gl*GVZgYzY+>t@t6uQ7%j7UUJQ>3^7e|~qq2RVo-1}xZYCV*Eo+gLAgcCtYP z?HcgHTaTS6D3Wdhxt~cDwWgntcV2kb3vWU-*V$E)8eZDimb`^tX2nL%*E_?Gwx&4O zULJR2XwT~%!{?tbJ2|Ou{H+T(x58sF^)ZjpBqAzW!|l0qKL@(w6t1P;dgyg$Jyr{| z7kJ_PWO^kiOJQ}K>WbQmuZNFp@HuR0mJfYS?ZE*m;7qj#)y}I4>sf~=btKv)AxyB3 zgrXN9m3vBd-%tvZ2A;0bhnysj8-+V&8B#8gD}z1q;rmsT8FJ^<(W;P81j$=<(apKTtqw)o{ z^MAh=AQK|TCk2K|Ed{NPnl%AX7(gJwp0t0@wVH7{_{kU=!ylrFBb$kZ%99(A+Zy+e z-bIYp&rxoo#4Rq}#3 zR~Ukho*I8zM>4CKy0O}zqchZ2<_tvC=yZ%Rsr@2pd?NZ@U5PT<`q=6uX8@`&JUram zUoe+?HpN1x;7=md&1twB0kpykI7`xu!fl$t5H%0tpvyUS~ zH==ZTIf-LBwANQ^Hv%q>`7UmgTZ1=n^C_@-?3d$nRJ_?eBg!HmP#pdlVa4vEYvm!d zzcB8gb;Y>P2SN4wjRg=qDwW}vc8CCr#M+bXW8qTn?1ICR1;(8ddDkm^S7&leab8+S z343wGtlPf*&619jr^9 z#K2a*U1AHZIZpTTw_OS){|&v%OXZ%Ptousg8_jC(@)+RB9+C^K@g+87r=+xmc33#x za_D29(AzZW4hr9hRWB}YkoBSMI1+~O>%!MZQN0U7@HZE8)!?4ZDE2I^1$^p;Pxo{6 zO*g_hxux@f?e0Hdz=Kk&cQp9*04B;sJUdah7PVElXu>nz>i~FFy;3&HZAXu_vCHgl zG1))na?QUvX5>BRUr@ml9#wwNeLf@V+$lrHVK;8ysPU~U7g$`>7H0;-=3C8 zTZNyA_`pD6!^9w%a_q5xMR@tZ&J&oThYrB>Ce{ul^Dm(9r@K(9P>tHqb2kVn4v!2M z)rWepbZ9%jtE97O6x5^jJo*6anExF8$Auwo-tuIT7rP*xYHLMw-M4`TdQQE{2-Ma@ zcL2Lz6n7}fsCgtZv+skyu=6^V(4x;lX~(p`;~75ST@!5TC6)HhsBk?yf8ra@R23n< zmUyz;tR?ITz|isQcF{jY@~lBHiVGP*xTy^*8&||UGRa*c^wS<}WIlFl7VE&D^eRd` zn9ah0fQF;MBl>OJ%N~eMDH^@BPm*}De}OrcOQA*jz2v?^caO#mr@p$0-sqr1VbfiAh8}b-5E}qzH>4`VL$IAHgJ9d?^!*wkTKUFyiKJiQQhD-@ML~q zUIwciB2#%huW|m{y13er_6} zqf^FcSP)m3R(7Amb~BZV>(M4VBdoDs$8FGCnmsdJAa92)Ou|a(yjA(;3 zFV8Gu-OiX!)w>A1LPF-sI?~*^?tK5VjgnAm@w#SzOKS9|w zQN4OGg$>h+x;aj}+c)51?)j7a@_DDZu|2Ac#lX{oGzG7NYHq826!3{;=zDm%i6euL zB!Q@XS1$J|^7cEFh0*Gr6-Z0@hyL>&or8&l4gqPBltg-7a0;9KJZQ0jz(%4HUWW#l zg(Q<)&)}z{6$5=#dgQwtBZ`6k8WqE^){0Qu)xxu3@k5yoT_sSdSNy^+7)^M zy{M~HD*x1GGAg?VbJ{iOO-829D4sc$dz>0}AL&FyMKN1w!Xi|)z)OV$oO-n%zv}aE z=KnS@+OF}>T=|xic(N%rN9g7}&W8KmTN67U$w#&Z!1L9J$1y?)`eF`YmeZ&9g?&2Y zH?Ry2;yaPZ8$Z6Xz9CW(h3VT!u_k^9(ii~j)9=>7jS)pn5aWHuffRo z+Vk&C=#)D6)GEbX+hFfGvk*HVe@D%9;DE?+J~C>n=sl8qXbK%S5omaUSyApBlBtdY z&S1*Jljpj%Y#TL{evv3h05@VI2Ar~kPo+{!qFqv!)-I4J7J%A=UncX5VEA8Tzjc+| z4hjndw^0{ijkTBWEyqMBYa;lgUn7HVTY4I!vwXSi;uUtRiknO-jy6ZQr`P%qnwwu! zjKV_?AHP`hD;x#U|42daH8K(|fRD%Umm&{%VaM8aC+p0|6ZturnF5mK+#%b~ztW}& zqy0dKyfxVSt0*ux{0T0fH8tt4rj?@rcCM%te#@r|PtGqY&C1sfw((&CTPX=IR6z-9 z#|>_{TU|ksIXcdoRz@_)KTkuN*Nt&lnI6{Lz{9DE=?rY`k?11lSuei(X$d(SoTI0uNbF`|?zsDd~m#t`KFfy?R7n5`BC1%;&ctncD6e6QQg zCBA>r>Nz_z#qdXN^Ox+*RCVD7&Bug_xq(B3&K^-fHxNp`1&!b~7z^BDTL6vf$`SA{ z9}u@_cZhrG7&tar2i_KzpN3x;iIZ`;DAP9B6dopu`>+F!(h7pH7^L&uyvkHeXvu&0 zF~d=-iQWHxoH z?p!VqU+O&18rO!Mv=*gGMmMg-*G=FEU{Z227q@ob*zw%xj>!(YKZv0=!Jg5oy9`KW z!nCTz#BmE9q?Ke)4R(#jMjQoiby11aH`jy;>j9vCg zq;hS+#zL!6TO7P)j|Mn21_0l;|86MJ>fJzr0bX%+Z`X>q$)+i2r&PnTw;WD0cq!c# z7Kzv^5S)+laCl zmraA%oDS`oVfWVfg#1KLggfR$Vc~E9k8=%}8VNWs6BALSk{d8F(w?ovZOXX}I>xr; zv!P<1(+M{ARNaY0Aq+)vJ@AJQ28;rEhbK z*LS~H9tF4sQf1P|KfA)k%0d-yRBhrl8>xh zcD)7`d7n#;Rj-}dEp{8er0*$^$}87N;EZlGkaNh|+Lqp<6Uyzr&+Ue*eugEz#ElYd($}<0^Z0XIp=K(>v3DhbrKYTb-@OCSB+IU{EDsN zk?WP@aH(OAyz`8!`LO+P9V)G?;HSC#j^6kW-4Emx7aigX)w`mJQjPT~R(!Uk8v1>Q zUU<356AFI%Y`%W?>__+DC)PR@pI{I=ih)kBH!O_?Y0o% z-5n}`1veZEnU2g@H@%AHx)fQqcDlLs!>l~fk#vvVz;;C|yWe4#xE4AlU+dKR*>r4_ z!alRuv3e{@yfAF5xHf-zt^aV)W^=E4UJIkW2NUt(1dOW%)e%xI#+x|VrPJ{Kvi@MO z2L9EidFuv#ZsLTIrw763xSPCuQHSP?_Kl>mxnrLg@{vEb3Mr%bKMS+4!@?Jr%>bOn z9+6nv1L%#5am{s9->U4sj_QRR?Yw%xpN6Jv6}seq>VJ718aV9%dw6@%U2w6JH_-G# zit^>B^?M_ilU;JXlRU^7frSW%4q0c3N2#(J)=3Z(~OHT0Q%#w8&&cA^a?UNK@T>DoKmGsU&n2u~S&F5TNQAmfVOD=z%(68XZqYm*ux0 zM1Mp~^8>xw=DLj4+HT(Bfn0A+&H@qj2F6q>4_S)Q6nyAXNA>F%*;}3 z^X?}=8nhZ)wPUn5-#832t(M13a)aV|47On+Xh@4PkK5$pKce_>)gF)4m1fh0b~!XE zxb7@)kj;aV3Bbp5m*||Ao+zgGUwQj? z2ycmDjtsZ>!=EU4JH|BdS!l}4T@rfMz3Sf>pVOh+*vbSP9)PMPV6b#2K2y5DI> zn3{Wq7CH*KTuT3Hw=h{|QaC6FN)%B9&hrOOOOQ^7r2KjSIOnv5hH_pvRd}+dq=Ovu zDftuSDCzG65dbo)P1Ty3!?0QX43fZW?Jf``7y(b4Ylo z_}SppSd(joJGmK#=rVcs@3k+-ztz4vApLYe8?z{^>U?UhKT`ZMZ=0a$ILQ@OL7BmB zB3d){!DQ(=``H1KReNNm$Mw(St55~_TgdZ|*HVin2Uy)$Kx#E=|17oXu_d9N?O@Hs z#%rfn@G;B98t|Q3u5S9KQ+GvP1K1nD!zx*)X826Pu_H0y=9dY7LS@tgLn zghunJPT}+L@T|7$?S<7jZgnyeiqh96ycC+zIET(B30L)e>7Whw^gQ*>%+BtbJDKCA z5q7exa&?~9I{=0>2oLOtzXo5u7x!GMH8ncu;nv1vcJ~?X{s8(BO2~U=Y#Lctt}avm zj=db)g-8+i-1^b3JHa4!aDp~TakmzH>jfgh88AVS#>$KQ7#s|tE(*|_NXxV`( zFwkJ&t()$A;-TO6LbWn5DuqGP&tR9@bfrB75t!zih#Z;+|D9!oDT3i1MZvU?DEmYw z;8gvI=utpsPL{B<_Rd0%48(EUP%#OeeduA%F9jA}zPn|eLzVgJ;`TwSc#2~@X6HM1 z<0`{c8N?p#0i??SVBT>U?y>{wu5dk@^Fyq(+Ib=Ra8;_@ zWPnr*4m2F?%1<82y;-gKIf+!EW5E%qtZfZUW!z~!^dmQ($h8p=g`R|mXIw6BAOA|f z#bdoahwwi=1lvTHdw43LEieV0JN_4AgU2nGcQ=E$x79a7OsojR^=U6UMwB9>8^612 z$+=>*F_Sa4vE*Z35XwGCO4>OQ5LP&D=^q3J1u++BUM@C-pe}Xyuky@n8W!sC^XFzMp(27G z8hVoj8IOk1f`1=((bIf_J`$(vF)*}d1rJ0n-UYBu3^)mNIS)-`Yj}wug;)=_-n43m ze!qsA9J*YSh8sBcNx)`8*<+^^j5Pm17;o|^&aEPoCIZs|IZV&kCuykmZ6mLZiKG0G zLd+FFkz(_KFu+c7KeXEeLUfF#&y4D5r6xBn=nRsy z5hyq93uB?912?J8RPC((lDS%XF)L;6LixfdR^MTtIf^o4`$>UK)1}&O1!WS$+)$xy z3u`%i`;+LMyag$8Ok(Tgp7|+wwy0ly21cH`9cY@Lm_9W!u zASlc3YGF58WNX)5#E1KV4Mmf{Tkiru!jL$`t za7VWsON!e3PxHM`{G#FqgBJ&Ye@1lVoo0fsumBIa6g3~YBV5*>B&wHO7Ox43eGHfY zI9W&%z+R8Eo@Xn&YdleO<0-b9l2yJ&=L&rPFb;!1j6<+sW8LvO3E?ZU`+ThI1Y+;j zu#{+=JG+wNyxblMEMON!3KbChmvUdj37vLn-sM&xfoXkpW%Kx7n1XA2(@fX6zkA%( zZIOOIH$w)bt8mjdLbCeOm-@~&-!IM3vBwT5U;=_)|iXRYiBQL0?eACMy};noCZ zT+x&d^*5c0ovjJk!_T6>eB`2-9xT-(J(Az0*qkTNqZ_-Y$gmkMSbFFZ6Bl5(ymJ*PH>@t;T{c z6x`a^plR9!cnX=mS@-k)g4%=L1|i@qnUMMRN{rXSwd;U&Z$SXg{F)Q3-M1L^xQ54Y zHW(6e(O-~k9xOBkOs|<*h9-yJekZ!sp8J<8?+Z9U%@#Pl`*p;`tt9OyV*{{jeXcQ{Z+hC`VG;;wt#f(y8-VP zDZ@TV*r_qe_Y3+mg+CSaUF8szCwpcf)h2QKmXySx#f@JMo+y~Q;dj3BIo`R(@}Aq;U<$N(uw=TSC3iR^{1n9B)pf%TQXsJz)aV4dt*Kc>nb)kq?`K z7gRiQxwf^^)!hPlnEE7$?q=kFeMgD9#}ZiPb*yYAKX7Ed{>#_!ZLGU0BhrG;$eARt z|I1gH^&J(jc99nf4m5y!U>CZm#wxDI6gBSm2k(1%=wB!xqoZ6moFh!fm$;3=hWj zY;x-BJ003chs!kZSyj`&H2iK3DuW!ZIXDv{b^R2HR(H;M>m_r@u%-xY$aF8QNQ>s$vwko=nf;{(hd zoF&ZRAVYqXn%VQRo*79ah`2aen24*}Yr+`qH8qwQcJ!~B9?+3VBIHRC=Z9C!bnfTj z)B?XZ#J6`Zo03u;85$3suSCyipYzArV*(5zx#xRL!YQlWaqJ>WOgWlNWxJ(r#9*<~ z?w8@l9Xe-@Ei?LxhZPfv_{euM*U zw4J#_86&gQB1_L6J^nZ9@>4wI#;x3q5?Nm3Zl;}?%J3AAOC7&^6i=>BJg;*0KB6|G zu%D0fFPv+!Y33&q>~UyED?~xn>YTUoZ=S4AVd&T;W@DzTNAGDGCXG!={ci!mTr+OLYJP*+V>6FkL%#GBBO`*2NWr1a7+OA{|XIYrmqN4STW>4{W zoOeasF?nMX%n_C9$vr=~I1TE6Jk()p+~Aw#!ds&VQ_B-$e zkFz`uaOvx&%QLEp1_PI+uev|fnXK1^)m$(#AGvXg#Ms_VTp6eKkplWp z4)RMk2&Zc8t1YL>qzK$EYa>sXkYUzi`5NzN#Urg|s+empdqz5?D<86Xo8N1^fP6&#Lwn(6Bro!P0LPRwL4ETE)4H zUnlRTvkTl+k$NgIrtw(ElbCr|R`bcQ?E8Rd`eyG=jOP^wOxdOJu{@j-`~!-G*%wY_)1cY`2*VC|x^b*nGOug2(SS#}m>3my{*StaFW$mEL=;P} zGNctVEvmV6y@Z@*UE=s{n)>4+V-Gn@ff;d15GBI&fS@sj`^XgMnf{I#UaItyc;>)Zp(5r2LnzyLcK;(J)RnXHR_>#%Kr;6I>Kl~~Q!h+w z`Bm*7GQQNW_*8nHclpZeY`-H#_K=KoCAz0}?Qtbew5jWMh!)H2L5!~3@|ZB(-MFDt z00c#LqmG_M=Sb@?$t*rfg1;FTjaCk@9z6H}Co&Ko88}hkZhU7-#{HIgffS!bJ9Nz_g`8?($u>F(*n zrXv3~EQUqkJe5$Ct7Co0-lm00mD5{kx_YJKfu}s8 ziMt{YY>lM$wFetLLb+OAd5f`+B)t@U&v-FC1?Y#MyS+Lmx;zS{el=#AdmMCKEW5LGCj2XDN{3eTn}3z^wT$YxnQ69xi;> zsvZ&+A_4>f`}}7X<>ViXGNI;Jf*jQ?f_P$EVke2CFt8TbzXlc5;*Stvj%`F|CpaY5 zybp@mr@@|*zE34TlrGiE(Z3>+m~f*~x6(9I>d>USY?F)Pxn1U44@iW>L%k}ub>kG3 z^;6Hs>vO5QBq#M7m3_knT6?G7A5UdDu{IGgU@i-g0W&<897kU4_>Hz#eD0-CJLg%` zk6gj-SXD+uGWN++tiv8Vjc5+FEO}$AtViR48w_}{9t>HWsmcG#EqvpM>a=-v*6m_w zYjhX6Tm6TEY~YXz2q8Gb9YYZ$4Ahx!lQsfl{YSRNMqR|i^;)0iLu#L_C3e|@*1WG) zku$*Kr0+Pz`LrlWPTD@rN+cg&`-Qy4oOBzlAy8Ux)$lkS3tc&0{Hg z(K7&Fw<=f6f0(M~Wxn~^PK*Tb21>NU@{>_LG4dwNZ@o$H-Bt08k@u`jn$7r1H$=oF zKZs-DSfuCAF{T*DqnNFjLL2^Tw${Gum#=??aYq1t-fw2+U4GKCBy+h(OIVxD+f%%i zE~RoKVW)&~pNZ|Wb=R<;to}eA(FZrj=Gq~!K%4U$h36H__grHDAn%d-PDdnvu=J}7hTa=#!J33mXe7z2S&*9&_3J4UTh zT9Kp(sT=^ZsoRkDzBRg9H%gXDIkXeqmryZSEjzT_)%f*`KR@bL(7{(aPD%j}#+P5T zg$4|q&9F8jvJ|2m7l^!9&U0?RxOgLvukvfpGGq?4`9=LwEizP(27g$qohoQw6O**P`h);rhB5_Yid8amwtT5_ZnIzz6r8eqsBbu078?_>SopZ#-pay4(;^Wv(Z* z1~Lr+!t`sk!eBA<_Tnq_l0RK~MlhO2ybjqPr7or#y_Q&I*zvM&s<=^4Qn)8=&r&LcuY6 z4`}t>_S>*+6NplqhndJkml}>7ACV94UY)NfPnA2vko))dC7+^;J^e+>y{{c{9xFy-?=KVd5Z^yR+@`q93F^zm@}-3=r9F*<-%1~LD>Qe& zq`t@J*4^%0)s{;B0JXyZ|`{Q`Esv57FET>D9 z!1g4-%y>&av-!e24uh(RQT(FJPdJ>xg)}19k9>Mn=^1J+j%N_*L0qG#pIvsNlg~4* zVSFWOJEBcN7Qq%}wA?DkV{x3g&T)FSTe&k89xpid%2;vo$oJT>B#4i&>f*Lw$OKEg zM>ARK8HI3o4*lVk_Q|p_0XciaNM)*H^Ebdo=kt(nN*?TtwXx$?yZ9VO?YDodKKhtc z-BAd1fXD>Xl{Tjlb{EC&Ojq_i4_-hk6jr3e+AP4|9`%H0$JD-Yn13rEK9F-~rP|#E z5`CB9vqK$S;-NYK*%(+=#kTop&2vr;JyH<9{l1B7cfOIOLgR4HAWgTVE*cJ3LPN&i z`1a$SA6a>{XYhj-(o-boUH9D0W{Mjvn1A!Y9qtC#*(r`&tY(CYM+cjC1i08+W7-;< zfQINg)K*eDLh#?4v;DO>+e;CPOIF<~gQBRu6j@GTk2=r0YbC2Szde_|i~DO7$VoT!o|d|IhP{g5wrL8e@x<)#$B7wc zm%V)1^V~c@_AYzDH=Q3U5A=4MANK+eWn#p^F2%{MlHED|w$k0K(V6az4yQ0%$bS<+ zD0=!nmUI|>x}uyZ*`XBTQwN0HJ5^h4?i9F>r_WGSMn8YPTKhppUaVWIk90@mkMVp` z$#gNfY!HNEmHV3g16G9E# z;%s}WTxPn$YR`qPgjT2Y^)0Jc1l}q#1hgVdIUXXcw{}@m;WB#Uz7LGH*N5y-V`)1v zLdCcWCsH_VwRc#IzCr9OKvAR6lo*GXU8^V}O;R$Q+5Q8%j9K@*L~nnx z0TT4WQbnAB=FaJKUfbB>Ws9n& zC0_~vkTgAc#yx9l#gQDb$Xnrg`W~R~so{w?*1x~Inn38y2er~?M|2J<+Bq*8M8nd1=onNP#{dVKC`xo&5zwu%N7D+z> z1%~78(NbTsxL@GBobJxhd@c&yAxbjMA+W@@iTp$R#SaU36Zg|n6QV{@r}|?Wqn}=vK3`w2&StY5weD06W;cDC&Uf0Wk6x##6!h+vj@(t%}=CVldk&i%Cn zo4Y5RvYuTLS>`<_szkO+_|vV6uWqhhV)jC1>1IYlDfyl>%je>4SJHS(VUDd7O9GTz zuTDv_j%wS2$h#{i^chp!$|U3YY|;jzxGsKppcm`+;1eGk1AcA>LAJ0P_rjFVB4IL_KD;FRV@j zh{{~{)!c()s7s<6hyo0Izp+Sgsy4(rXs+P3mz{`Da*gCD4!s^-Vw9Pdg9`bf2Cs9? zdFYmvsAt(tbP>GjMrBHV4vla{&?CuxH3G8klRf3+N{wf_Y3*HC4Tr@*jwwUN36;{h zyaZA5nWvYhB#L$JiBf@Htv&s8^X6GW?U(qGDZ?7k_Jq@VhYhB-AkxU36o@9R=k{~6 z!PMj;#$xWVXYPoLQB!&Ey(Zu3-7Qa3+3sPdG)WJg+dfpLDaeu43PSM#wQmnop?R(xbVFnh8pXEGIDvfGRwy91 zCp+mDB>F^{kjG50k0^vzl;P`L(!g$m^(`RKqZIf;$X61B`3R2=+$`3=sEjn+X@X}KBR$cZnjzBnovzBa~p7VG(MiB0+<5&&u!#e_*-Zr{2&vl&B_DujcV z;*-^3N6Pk9>T!)%Td5AO>6<(7W~@=HH|_)P3b-EVp&d(11g$n7R(`dXo=8>CQH;z} z&ymshEVZFy)vfr{Du}@+`{jz-EK;royhA{tM!Day@2$a{G0{goY9^(R!~SUFVn50{IF&HfamoOvmh|hF^@6&{Z9NC)SGgdHDEPZ`}RZ( zmkx(cIjBg=@xjC|3akNgFm*H~-&Oz3W!;L;-t2~V0^b`9-tm4A(^y2_BrLevLzddS z96xn1*##H$=M1iQ#J^Y~}x?P3?fRyZ+v|VZ0NWGuRSyM|PuN`tL zh7VTkR21{+@Dt`U2}^LK2;`tfDu+{FdV!_&qjA)GQh=P;thz&$IhQf73?zXT29ilW z0)#;xpuGv3T1q0y-LWEq6_GFRoaT=7$REHb%D#eY^3Y)r>tZ)=DB*OQe`P>$wbXkHNY3!R%)HMP5Ctb@&_ ztf-Nn`wN1+CC8DDiw_;?q(2*%iD3*vNTkQMuyBJ z{seGX9BzV3qsQ3}N#7&|`{lZzeb}E~qjpSmwWoLKE$!oIo5`Q<$sf;mrs`$tobLDO z^a)lgSD!?O9y64nm{r}gNV8y)0fEu1SQt|I^S!m~f!N29EXw!6 zPKG8pxv$o^mpVnMPtCxuIu7XV@mksj4$Z=jvEF#3WX*1Rrgu=Sen&*s9AGK4ba=FU z3&aSH^v}9~X-``>MbjVBS64if5h(Tut>2sX37PUzA>k%4uvgSXCG@?K5kHn|C^Uv2 zeFT?>Hi|4vAHEUhIOEw4cwnUTUf`4ha}gqA(r=BNqF$+%K3}_oH1(jc==Hkci1D9M zBrZ;=xTwv9*NOh-F>?r&Z#UJ7?Rp(1AdUlMkrv=Ifc5Q{C_2R}du zF}M20i)QW?ssO3s5+U|Gfjhz$qlGl3R<$~dEhvCZ*LVaBjV zQyxqD=e5ls&e*?*CxB;ir+&sq=sa>J7I@`_?mn-b=3yKLNW3ayl;VUYE9PjInNalT zeT2vdeA_#li(x5gY&(wK*h8Y|LxqI#q6&J7i3cg15=nxFx|7YD0BT-sBga_aXCr!I zqUr`j$;q7R0QX4y8f`Os8ZVMr7&ESgu#9~ys$BpionLuOclEoVs45*Q4+x?KKZue=LshF9F%79j>OX}LvClIq+Qd#RU>pTa1sqr{akuVKOTOiEKf9+> zY8Um;{YX_~fa(i*Uq-b6??<-La61J1V;6eU972U8Nxyqh{2Hz`S~~8p1z#QAH4Vyr z9}Wv8{`D)3=4g*!)@tTBylQm3%F1HS$+ieMY@vG|utL8hsOuH~+)!GB;INtOB{0i{ z{o;&J;%Isb?K9RjF5h`u^+jRny?5grs$98R5AI8z-Y2){5wlbe+9_UoPgHHQKz?gx z;TNTO&{|*fPNO=KtB!Dpb}VIMBs-ro+GYebYOPnZTO~`IIoBdTVpWY8>}qaaK8oW- z$TNSj9IZ*K+6}^gfYd5Js5tf&FWsG3Lssf%kfZ1aW5qq2bbITStf=OhX1R7Q8DtGg z4%aFz`Psvlzvd4FA_qesdx8?x!Tuh$`{}oq#IT~@OS4);!SlpYr+JnAJ4lXzLPs>A z%~BBO_tGMsFUWO6fJMmSq$bna!8dGm++@ZR2+;hFSS9U=`O?wDs-E4I-WGIg2Rq%O z9D!MC0n4K(`C6)1ewe!UddXOBh)I1feUG08`?iJIV`z&nxoxmI)GTDJeQ#$rE1}qs zQ2Z!tlLY#;XnoQh0J%oft>Y65VY_oFgbRcO$(~IK5C1>z-ZCu8_HFkT1OZ1$1(9x$ z7)0qt8fk^0yBWG$LcpO@8dLj5$ zHP?Bb$FU##{yW`%BlSJ{j{<+ zaD&BgX}MM5iAvJr*Gpr`qCKM?af-8|)}4P#=T@tCf#N>y$VgUk+T+y{8glGflj7*rDkGk+94%gOsNtm21(X(c;z7hO-)ik( zU4IzoakF4;xE+^2ok%AdYoqOrSH_J(cvTlB(HV{NZHW_ohe?;W;kakx^}{&JV*9>G zaCrAxz6O={x5M-GW z-gsQ9PL-I}?0E2Qsw{coh0zJMEopymm!-_>6Mj<>);jfc}jiopJG%s#4tG^k=3F z!rTBSuihm>ukIWvz|yP!>kCYFYSH-tHYO`}c%q)^x!!%$nt>Q!bvnO!`;q)CUEAS-P|`=m)z zeqd?6!0(1YuM42AZC1y@7hQY;TFJ`8>Z@@1|@XlY}&$N$VFg z{g%*{{5RBW2%oU0-|t>72vGp~DEdlo+~Hh}V=OMsm!7m=_57-Z^+LLankhWx<>`+W zpc$$Wrdv1g|# zFCk|{KQwp6_z#fimJ6!4UNzwd*ud%8B(>0$;6**OX!ij=du3GV`&=bEhHCKLkD3(W=ty zS9aV1X4k4G4Y^?>*G(#W2L|EaE7xL7YOrP+tQ$rx1_wpI3x)<**4(CJal}b9+?;AK z6f(ulV2Zdy=)}GCK+Aq*RIL`%{(Zf2;PKa65Lg`Z3i23cB-yg@KmsEk?KRZ33;v;6 zzfGF(dKi0|{d%f&{a9Ty91lB5Kbc)c(RPPch#k~!755QG=bx<%O}VIi4p8s!M>i)q z)#k}uHouiUlQ*2q)>|z8+QE7iS!s2NK90VtB}bg&QBeHSGtx-08Z1(5y`DVBxFr0s zJ7pgQ)D|?wpN-n_gVySL{ccv~tkZ%jWd#>a`iTv1_c@$qYHRx!_ZV!#KNLVHSQ2l3 z_5EID2KO_2f5yryQc)N$+a*CeTJ2$6?g&^DF-@><%=)&9}S1RXj??9=?plgH_s6m6P4=17tg8ZSkhSxFV znIg~JC|L0BTZOZ8^_m0A_;EhJe25pOkf!h&3I|%Dfp^3q`o)_8ydmUK@?#nI-gCsr zz)Ds1Nrb<3;JPp$fp|mq1~28KKUakxsBQW|3K~yxqn4JGl~b`Z!ptfLjhDuBYHOSLo>sPoxR9uIK_#_=S-^b5QnW% z#WXez?I}S4;6_iqbHB>Q;-Y1>uFB*#MH=aa*Jce_7qmDWYn&$WC3Z6 zu2I{RR1CDl9b=bF_d|5sxFJ9IJTJ>$42^U}(`A<7c8k|E`8_i4$$>u@!#8V|)0(Ymy*t{L5qvKh@zWvX6?okQ6{Pjm>1MmkY0B{ad zqtS7i1^lDyLe@xa(g_>!iGB7Is{)kg*_7b1I0@lweXS`VlZ+$~`jRsY8%!?u z5u+>{{hMG}cysr!uf^4bb@_}-4jTUKj-wVe5}L^Bng+$3bSZ5K~MLH633eOm1O6wZ8yXQi#*?LCH&hXP*&xH zsmUdt##giHqPp^Ok^*4(d$f-RGV7V0gtMg*v71*uV^UxZ4lvE)n*l}4RXY+OLgA%X zCL=7m3)-0m@h{#9HYW?NS`oc zD}8szbuL@cz_6(~;B?iH1kKF-GeJ8a`l%&>A*p3xxHIt&`L{RzX~l0;1d`Y84uBQi zv1AmUG-O{1aYeW5WA-jSCnu?;_Kg>_v@J`s+sM(@b>FM%yue@LciLgR8SbCLk1Drb zP-$TG=W9B$P-UOjtA7-D`2@CO^Oa67#0S>K)Ojr?v9%~4ar)elDLnppMU^NMi<6*n zs(hr}KgCB!jjG^;nm|VUEv}-#e~5M(FG1=B(P@^! zfZ;PM)^G$k#oHVW*#CLnj*p&v3>=WrxAP@$*&nZDirYt}yjMVa1^?E{5o}|8btwkZ z9-2vSNi^9UHi}N+%tB|W-k=1o<1H4V|MVCQmJBa{>;D>g6pieB-7%bTlJ9fCK&y~>WU&@{GX$Z( z0u<3q%c{rfq5-HFOwG+(;lx3D;ozyj&v;?rp71y9wY{Bm1ya$V4LFS}sc;c#IV zlTY)PmzpESeMTK&DT^D7ALzkjY570%-Wv~TSTE08nt&Ai%1o$)%9s5e<8f(7K_JxL z@?`G)h#X~+aZU=Az|~)-CjO7$zro|a&zr$oC6E&1Iv(QZOr9RsU37pq$>Wpcj8l;v z8V=_y9J((B$k#hPmy^)Q*-=G0oKWQ&3B&BO?7Q|)eHjCoTg=(21?rgrgMc(0;@ohs zo~60WyhYfP=_I&U!Ggl9VLuRj?Xb1ot6$6fJ6LW6B3Fib^Aq~+*+|Cn0+g4>65(tq zKO?7xHjs-l3=A32&%U>(%?U0gE2l0q3r*v9_KL8;KY~?R-r^?_h`mo-D;4&0I|8bi zOAEknVt9A^@N!a~m(4bT14q;+QA&-dVY`5r#&65sezlv%Xtak&l|EO-#)+sDHG8s} z1RN$>v!X5(x>vJ%KH`fyntsBmJtIMwr=sGqIIBHftcJoL7LS!BK)=DC#IbD>He6z4 z-vnc`c@h6j=Vx;=-tLS3e9SEv(%m`Ky?6~pc&*8+1bTkCi6<+lS78zTb7nGl+v}gD z9ALASs(9AQ|n^<>fZQzVj1%xsDxxO#5uF$n~et& z2}$W*3jj^c``ybTv9*SyFUugR;;$De4U_MefDJ@PxZN%qtHC}PqB%SLKih{SciC3D z9=f;kvZ5D73zDLt*SujayS@Lc-Oi70&(beb@LHF3+ZEk4L2RU-vVs|>kaJXVH8p9Nh^2gDL8_C18L!AqGwzkynCxcSP3b@iy=C(bG0;BQf3vU?px; z0M-@r+==`lMe`vllxQ35C=rFn6WMwX%b$BNdp?0B`#nMspsQn%e$Z&0$RWaCSnN3N z*QyT4_Y$|3u#z$>{|# z(N6{jq4I$DLz}#woUTtxd_J8k$@r*O*@-pb$Jv1ECw0aPG_tnw;JG!5%sWJZVmEhw zvHB+%6r9P-LhO#M^6pN!>6F^;J?k6 z^FyFO6~A}d2|z`A&VB2D)ZwPrpWUKeCSEMVC9Sryb#R`dOkrmj_a{$LX9z~#nj2-X zsVfj{dr^>|Y7_{!>UeF743-2Oyi%q$%H#;M0EV7Il6!3lwKJY6-0Nk`b-^Px&CX>v zz0zf&98cCI93Ew0=Y>pzrG*ARABk(VUBuv~CsxDwl5RmA@KOSfp~6BnyprELStJSP z?~~9eh1hK;)5L?&GMtL_X%I9a$Jk*lP{19Sx<4dCHsk&1Z+Ci0#w3fyL`yMe%S_ z3Sgk9YhcUo1e;*@2?Tk~XyU1~Sm|0y6au(j!iy-k@E72|``r$cc zP!P>-pA|0^kw$*MlyGT;o?bGos@&mm&lh>{h(SWnlnVQ<>8k-NkxRzTfL;m2d zh(?JT^gN_oR)Z<7^NVrbB)J^AHpQ=N!13XLTl64kH+(VVK{=#YfA}n5{xX?4Z-`Fw zRurUA%zn;1Rcf&S_vymibN*B)87Yyu0P}Qw223}jje6o{hBlXztg02QCqluc#siVT zc<(d}@AX`@60R~RXEWStObLe85t^3H#Md7U&)1LvECyv`1ztC3pZ6Oa66|nP{CX)r zd&-N4pcM~P7Wa>9%ZhW>pC&fi`CfeTybAQTj#`j-!t99EfpKP9BHaEOvx9E!3Ast#EbNTH>$l(wq@R?m3vRbZ% zpv1Hy90nZso|^Vb|p{_pDE-IQKQBs+K8*P=?&4N0d700pj1tD2@7#qDmzg3XJ? zo%omLW54k5O^D>RXK=2G1cGtX^^417b5(d40b4H!))LBIC@(NY(U(}ks`k|TB}lMR zXO=S=>PYX#*Xqbr1lD9F3uNr4mt$dmH5v_HHTqndu>^C(2=bL8Q?nU? zct)5H6E4ruUT~nOh zf%8V`yQ8MlU44*($zSTk`! zhuVg1HPG(eh2Hk2OFu;RW-aMXx=lJZ+Qk_-|5f-eYX?<1EGNHGC#$`cl|p)j%T64| z_{kvl)c|!@;7D*f=tT~V5DAZA(X&?{Bu#FLJJMp(yl=UgFz*yG_KKcY?C!J~XaL5^ zhg+W}PAyTg8-77yJu$iDSkK&qE>Ap#>Cj$7T#dwDxcZdNm@vWorb|!gfO2)qJ&q}1 zbp?yg=q#0Ag&wD7;EPp<_<^Byg%xrQb5s(~7PX6w`eegSNlzInV{485dI2k%N8kg; z`PnJA3@#olK$b^mypFM`72|I~9#55t$Am5bg=*uMoK5$e80dZA;R_=hlGOVOhpCc- z9<Z``=o5kufMiBAk@5kO`80Q~RsEk&&fAv;JyNgF&@kDaz(2SxHB|-DXd(UZ%06Rt&R?k6*JlvX41E+o8uy?2peGm|7Rf@Z?SAZDR}2Wa5%Hv2#< zZ;G!I?zUDJur{>A#P~MWiWXul7D?_6A53rDF;yPX-OXp(-9iZlJ3w;^;d*j8e__AA zQAH)E-^+Sj3+V-xYO?kyx(VeLx#ZOsI$Aro1nLbJW*H>1+5@tOa7$}X=xRGCR zX*t^bVPQ$xL-z~AzoBG7^3qrJMOce)tB#U`$N@JPBdi+DGCkNsopsBDXBm>y0S=)}?Y4d^TOlgazA65AKBXBvm z_Z?D>svtEc>taYbk4J~oNf9!N2~*A_sQl9RGMd+!CE?nxvm_gbXPPpGl;1t~AZhhJ z7bFJ5_h&wyikrk%jX)JBeStEB1tIr_Yt)$sOn>2t#y=EL|MAt6K}0eTRAj1DzO}F< zg*T3(-k|4qpFy?whmv-GOw+H%{3mK;uRwNQ}X`;}YyK3U>94M}W?|Jj$=1Gqp! zouL5TjOUG;qtQmuqHl@s8)%E{pgul{yAV1}PN3zW=+7<@9S21kDqwW-JeW=m zJ?NPyEpne=kw&OX)1kqd*)7I7qylH?MB@Q}!dQ@UC>ql>5J*ziK;l@pq%a%Or=j#8 zK91o+IsvIber0nYMZ#^1xXfFpuzfhpF3&is_}vA-;ez-jF+ry;mk2Yw-jA(I6>|nx zqqYVe8!q1oe%K5n!nruUc@cN~b@*E-Vr_Bk$b{Qf=A?(I^7Np8;&l|;)TD~rg^XQxNo4Mlsi>B9W31#%?2CgV&-U2aOI zwM`g^{i4bQig%xbZgD1(kBF_2hNa5vwwWtB^VHs8XL6~X1+?`%?}^(W;N7vMHsxi5 zQciy#zE>3yL!KQQ$=6NLvc=k^iAk6J9sGN2yHdPh6P1NhQ&GqXQ3tk#( zqWiyYrp8;&cIFtL5`Ffsy8L!&u$~#RrTRLItE!A#=&8E5>`aGe=u38Se5R1HOMVS# zu{l${L0CYQiZoX=7N>=poTtK|J0_dNCv=DI6$Op*nSegzrM-t~UU4?I=D_%E4aLeH zcZ9$_1#RYxaDNietJE`IcR>2SMGYXj@(fnpMb2P|w`UA*x)z?E$K5=*5$-t#@L z>Rlz4^TBW79IdPZ0Zb)Kp7fS9VAzJK=+1*`5sLkSyACo^X-n0FNo-RjXW1(EIYBw) zF>uLw%uoa01r+FxkJA4t=z(5uJ8j}#sooE)BWPes(;cYx>IuAQCj^hZ-suQ==aqVI zJ$#R%5Q}e66cp!~fFl1H;DOP&a@K%&;2ALRLHSj4}Mn!xFFiGMn3PUmhCvM@9-n~Gg zsD0O4YI>+|qM$S0eK` zSU4K~-GHhL@o5vuvz@DUh|H1>MDgY_f$MQj9BxwEh+O1rm8!ESQi@lqsun7)3cLq< ztt*cs*Eny}a?ZQ>2{}B8B;5+QdB@Y=QZh?a*pLB7>!Tv!DZ_(=`oi!NUld~%G~qr( zDfzAb^SxP-5rD1I3E1Nt|LYriWkjQ0{DFKBvH^9GUSESqLzg9pjEN&>2d@maAO zir=E+KaDxxZ2a_4Di5VahtFg(GZYH7i7F>X<-6nuf~i5kFf(o z$pnR2dR!9tw93Cc18Wm!mCsb!XW<%iOmqlOmqtH$9w}@Zh)U&IYZ{3g5jx)CnC33G zHGzdePtLJ1nNteKd@Mr}`|4wq+ezw(V^clB49`If!-(NDVzat-aD5VnxSv;R@Xy0{ z`I)76r%D576L>4+oVUu%*c`VUJVOMFLQF5|AVK8;MOLsP;psY@kZ+jpQ`tVAg%@dR zeo)i@?1OzWkAFgCUA)IBS=FvS`p!*msgT@}LUM}oMwBB*Sc!P+3`EJ?_WQmPhZa$}UfWhf~%gAQD!^HqePshD{ zIe6Gq6Z&0#%7Nkhg+0mKx?;_^S`pPRMTOkKefT4SMnh*nh@phrE6<#&BZVB;!hW&GEAdPqvv@L1?GP>d5;A}`(oGF^XA0@g+SU{+i&ywxL(kq zSM@W?n3pj_2JIWgIYzLxZe%ATm!a`?oU|kvf8f}6c~oBhmPrnam}`=m!Be0%=;jv9 zynm2ohg0%~9ZD9NjwP~CWhmA8%DSQeAQ=Xd4j+;!HgB0r>!v96uTZ&Z>kiv*`Pndc z@}s4E7g{lyKK}GDO}Yk?#PO4KF=lJHKFmn%5qH5x;gg0ZgzE4jrXO~S@&G7U|je{y!Va3_9KQg z(K8~KM}W)mXKJA?T^NL=?u5vgxS1dD6$CB-KCbAv8DgZvrGU3PqsW0K0f1d#V?pe| zGDQg(&iz{(0c!`5#iy#==rA2nndNRu%JU6Sbm=J&U zG4DnHqocSsFE1L4O#<(a6Mi z7wJ(U*dhAWr_&bYI@iHg`*iiU>Odg%WqsH~poIb<+4lW4f$!k>m9GJRGQr$RmeR2K z#<+8(OW9Vc=C46KpvhMSH(ZaS8qQl!ect0L_MW?}Mk93*!ni;aM+iHq{jL=2Q9*`v z!Ak>DxvT@5oP)-;8FCkmHOty2D)-jhPPRzqtEU(S4)Mu%J@}O>CAkQLs@4b62=I-T zL>!1<_=ey6QOzX$(4L~hUPNN%Ei_BNMo|XLYC+1?@+AQ%fO$I>$WP1aw2&bxUZBa| zf$Cj?m!jpJ{BM3Y;*T%6Blv|~<_kR0G#}U0?A0n`gm!%HA+~KvD2vcRbh)4HfdYdC zB>f+ZF(o$~_ty+(7?{FlHynD;4k(>s91~ zv=@duPuT99iE5S)*`O@U=n5$=%gH0hpBC(FU_|S9L#oq<@A!C}-fy%~1?{QkYeRLy zxWuOoI>InPP}|qla=Y`T5`r>`KE+$lu2B*izE&=KUII1Bz2y=R9d|C4e>Z`-XGgWi6#PZT=OU?IVMSGRj=`veb;Aoc9rEYD zUX5Rb%JDMVT9}pdv*E{t_>EQ8-&}4pPupUS|JoYQwPx9tx7@sibVcbcV#qB8dImA* z-Sk(|k$P(abKimnC+~U*Tt@y=taQoElf8TD=`>M@oD zVV?0Cv@=03a2KFR_rA?cg$$9z@Mu$`s%hz$A`+am;$>Hh5@fGrDk-*0B_Hk$HHS-{ z?ah_>ymp|a=C}X?89>l`4@Uzh3BE{eC<-avz^;4Bp2y0pdGO^v)h#6gEIcZ;n87yK zo83mHbRj0Ujbm_3Y#Xv3((@(Z1P;IpEj4p*J%C@rn43fWgdDR=tY&{1fjsx`t`_co zKn|nUN^TLusj?3s@=eWOMkonhob4~jySK&p(U7?}ql~Cv=YDjj{YE0hcLQ87C(URS zfZFUhLqXbVLcFySkI)-a z9z@ng_m+vrV7^WaInM#nK4H=XS}8mh#kAtfYJgeLg>FQF0HQA8^q^T@CifaOvs{IY z+A_=e_Zku$7osz+$9{yYFW7K#fJy{EBM(pY6Nt z8QAw6Rq+!Rd7L+2S=XJw1vtD}t@N&aVcOY&4;%&?k?hh+{LRmXI*+d}K!!^{6sl=i zJRX@z6sdCxWAKFr2q4zQzI?YCa&u# zlm78}ifnpSxTPpp*~oK7RdeBLl6daVu^&Qs+m*_=?*RSQyy*~-oL^@V3l4cVN4=g+ zRGmP_x4Yq*T?V4F4iT}rDn04KO-$*O$PFhGWu6H3WxDqYvl^m^D#2S9^=Y^TK`Pp1 z6%|JwteV~fQ{nTd1 z^o>S^RqVPu;&ECj)^&^@N&hBSm1ibN){P~AlF*pC)$6p&-R%%)=d(j(JPm?&}C410NHFXst5FL?7Ss4WQWHwAPN|54)M^1Tk$uNDWzV zV6d5EGrQ@)w7*)n$9xF{5P{oNN543E%BxVMEj3ILv$887rU}z{*ss!nv%;C5K5*po z+nMxPp)66+5LilI;M(%!<(l0Wk7_^#N}{8DivdM`NlwQe#=U0fiz(QH&;XU2d`}7r*^>7f}z2 z0$-p_KH@VeNG{O|D6NcfezZQJzW{I++?SGu3H{J1` z`o$B>P^JUz0;WBv>Wr^j4pk7HL3@Z@A+ETm*G2@KG>D5`>(7+%vJtr z9lpS=(4idoae8_TG_&`nWnW*QV3N$COGC_I_I``Sas8HDRPOPU@r|o_5`?(dwS%O@ zeqxVWq!%b0a``(RudlzHZb~t?ICvK*Ku~rC*?T#5d(5D?Fl5kCwlOP}7kd79K3b>E zQ4^E?b*@H=e-RkID63>Clt473<(w#2 z>wDNj&gH^>%nOHs)Vfr{rlmuzSF;6Lyi|Fgk{nV5A_uOW7xKZbBn!z)hwgjA<)NoC zerTJza2nGJmtM+KcSU>!=$-I3c02X-2`p}XT}J`RH=o$P6S#OfJ}YS1 zMVb+)A*SAkn&dz>JF44{aBhx1NDFS$ep_G#x9WM_Al?kLzZ$2WP%2$OKT1-uOIx*V z5ndOT(p957NP5Dga(8qLzI2Y*AP{0M#NZ7%^B`i(7)`~auGU_hk9yM(($-$j;XO}W z1z4z0aym%8*}7nLa2?V=@~ni^Der5BSD|lH?AL$eJs-hZdXDWiaw6d*oLFIS3nA>0Pu8N3#{rO^8;J#I|{7!?GEy; z=K!Q4Q$Mpi^?Dc&BLcX01zR|bWxIj1v8}?ii_%;;080$@VC^uGTk~ z09dfUq60yhKm}g<}{CQ*zzK>XgY1%xZ8r5iR!ra!p^5JPJ{&qXr|09I)zPTu`&lL`&fBa!yF>wV;(IBh;ez?V1I zdB6v}fVjxmMh<(U!glN3`FV9l<8gEVMS!`+X*W*0?y584jY*XMB1WXE{d%Y4gi$oG zTgF$PY~e{g#;&}pcVCT8sacuO@sj#9Nl0^Fcvs??j#S5f$ibbSCd7x)$|t-kHSTl5 zYGnNPUV78_LE7qw*z3%z_r^VOvMe=BejcZUI=V7v0bNA@wH5Z0eGC9DS~qPvhW|S! z4fovxu&JiQVgNAo3vLzdg#7W8`_FJSa*}&$8p$)gig*A8bzl`2qZ=p9~}2gY|?dUc>(V z+xqYQJ0k8LtQRNk=L)>;|BrwAfBg%e9E5-6_Fg)V6 zcI`h<0(EpR<(&T)O5n9g_0nUrsb2vx^okIu1)d0$j1%@8}e5RgEX?_MEJTvym9kFv0{yZHzTO`L)wSxQ4Nv~VP z=l>Fr2Kf+!en$XmPI^%S=7uf|-b`rhe%?cEzt(#MG_O^3qwi9%eQxKr-TW zPOZNg-P*?Onr0Xn>`q<&Vj^!GG$-~2 zKOR6Ib)gbyl4thB$k>BVsLjLJfJhdDjXV;+)%D@}zEZjL^NW>elHoj8*jFPv7HyB? z(ELj{rz>*x9rd*1f9WX!>rS9SS<336L@qSN#^%XBka)tx? z!*2Bpk5W)RdA0BN2Os^BKWIIVI-yfs3NtjK3`O~Dia|*nRC{;UaF-dWNy9+|Amaiu z!(lAdlD7)q)^xltX!JN)|5b!Ldi2CG#{)6o&1LRV6RFc3&h6z=igf9mE7=#7rNohH zwN|6q!4A9QfLEy_j0g4My=5A{ts&MH%3(hgf{%cgp&|qG-s@nDRgp z5xssfl?#T(46>-4u!*8d-g^rGjl0w4v53x8k>2B_QZ=XU9<@sOzpHBb!Z`~>l2$v1zN-si_Uzio z`3?Y)aXR28(y<8&tDB_*J#KjnW9#}&S`s>z$t5F?Kq0td%V(uBQ=HeZV=k<9TA_TE zT|%>=+Jut4Y8mkmVuom)RBG(DIA$`pDY|Dow>s!+3nBQ8=!ZwD#}dYg zuRJ@ni`1TRxfWevZtZ{;)9WYGr>Jge=)QJ_EALw3S9{tH*95fN=rn5}H=5Blp&OL) zxoBBInj4(gXRTlgb9udUw%{$Fv%{+W#{y519;hVozPvf1Y#ASExW0zXbIkmZz9&rn zWT9Sv18f#6D-`+uT2&r7hAPd^AWc9e?vjI0o$2CNKgS^v{)Qr3q%u+M*r_!4uR$~p zVQ&g=JTV}_YdD2eMm*M0TJln+n8}Ir$01E;=}qNVh7lyaeHwfhfkF4$B~!^;kv71+x>aS;{b`;>)~Ir8lPC7VQPqn5%G$Xz0-ZX-O~QC;D>gy8IhYU z(2_78d4y}PMSXpZM)wGlm?>AK5L3sG9v4)O9X*k&p##&heQnauYdGABF)eb^rJVJn ztYp+ov+Ow)r0_GlMWqY!=){7HGGKP z;mlc0)k=Xp0m^LZtcIkEi?WVBhG8wWdFy9vnf0n|7G@!>5{I0UJ>F)S1N-jqO*+7Q z_c+_ce&wj@NYY$!!)0 z!2|T}r#oXB1rk|!H#gH3g&$`;OjOTG243q7-2%gEso&2?=<*8ZTurRFFK)2(GUUqA zHukfr5(;aToc8CxZwC?BqP6llulLZUk?_$A0)Lo8%jB%`9hFs2;OyM!xpbF)tU#gA%n|sx z#^6z2H2qA?@ml2fj`}EIZl(|ARYJLY!-r z7I5xH0;L${8{fuf*T28SyD&+4y-y3SYWXhqC7`7*jZ;;Pw@x@7uUQW!zpN0K#8EKe zD=}(PI%m{`jA}HRy*N!InuDD+jS1R^_yk(aR_5$X*J+S_x29Ju$lKFc?@eHwnfKP= zfI_kg0gh3cCp_^j;5Lk7(Ezrmf>^0&T6&9Y-b|^H24==>bX-J-#SIV?ZPsVns-JoB zk{T9-XWt0~ekcc23Nx6~zlG>N9qYT;^{bt?cW?txSlFYpURS$zLt+G5Zodt=&3C>I z-PixqZcbfGG(YTOrP*wks1w-T5pW2A}X%w~IlshFfv} zjKO&Q%@tL9IAn1UZB?b$X?J?{Mf?6Wjf(ov#8P>oXvnJMNRK8Zk2P&_+Pbp;;fz2n zpA0Gv7tb74|HVD!Dl+C3S_S4~cJmEdJ^QH=&K zzb7xaI+KT&nm;neZ`T%Tma9K{`sLX`ZyKkBwV=nrAmGrevEm}e{3npB{lKl{gP9g! zP@%)ysNOD!7Jhgo1Bi*e49o}sa2j4_HZy-HpvzyPAMkkEml!!CjQ9gSmHds1(&Jg> zM%zOSk%;2&5}(0eA2Mn^y3b& z+S@5%IKiGV;1*c_#>~a<(UfPf(5 z(>y{RtrZ-VCClwsCtLh<<>uodrX$$tm|kX*E{u-uyu2*~9&&E3B_(fPnE$uHgD8X> zU~;E7ET%Ob$S+&1OikcGy2T%{JE5cD+v-|)25Ru?F4tKgh=G&_&6Cz>U17563VAn) zWY}VhoszK*+FpqXIySYgXIh3Xkf*)*DL=A{CBlioIa_Ivh}GM)`A~exgAjf=cJr=N zk9cbx0Gj8&%&qqCcxT_|@vYGPa`Ts6%hQR#3Mc_?_4{QhtuR-;bM%~@**oPz?3%N3jargVV6?*>Q#17}%I2oskHxVF`Gild;AX}zEv!t;`S$bi=grOkw3{0Cc=HgqtbAtuH zZw$+@9nU)>ouX31^!O3JAeM-Vc{h!Lv>hR@?Luau-K;IM4$NK3c7@s)zks8gt7sGG z^AMG6+ap1s(FaE1kwA1=F|MH@UOY5$V}~va>&SiJvuXtQ<7t2kY0i?d=5YGLD*TZ4 z7U=Nr1)Y^mlhoJ_GWYi{-)7U*jn+w2K$iu_vnJrc+i8!CULc>sN0Z&txsOBhrSR0! zaap#u->(J9gZO+CFSHR?<62~N+p&Mg`5qEMq^-SXBY;hP`0~DK9yoAa?&FMYq|U_+ zT7NPj*lVDzJj)trX9oms82I{8cNv#M+pIRZQv&I;<@ZT1FPf~}zY@rKTo?h@&f1Ey zz9I8F$k%N`b_>JAIz3j3xR4R0w5@l6>U0tI-1O5tApoGSU1nrnshPM8LMKW9@AVrq zxtr(sQzZe@ic=XZDhbSRM&Li3Y!kK5?cBCF-}8cwnD(7X4Tpi6~ixugpjpCZ6i}2iPA$yPn+R>=VkCqoT)xxR<(Tf&>}}pbV7>mvbI8W z_k>`OvpTnW9Q#I3H%316Lz{`&@N^mjf9Pg5x`Ayuqx$6$l9U zg4ZE$y6xhfc5{V(5r|G;WJHYesi<{b#i__Y5UvOG0hM&_db64xSvUg1KGk|R>Q_pw zw!qN4OI647xh=~)L9`c>w`7Cn#bcb42hjB}UMuIPk9xv^yE4Km_bT{#F%JeWPMd$-Q!5p1tOrX%--t2MD8mh#U2MdcPwY{qm5)f&{V{zpP3YH>3Bg$P8{HaD7y^v%=vTpxx=iW%d2N*j zv)dY+7O6n6HX67=HF0E7zVw8BjY#ZHy2omnE6@V|Mgy`myYxTUd#k88zi!<#5Znn6 zf)gNUf(G{h!5xA_2*KUmA-Dy1hr->Z5+Fcu*TREC;Z8xnWPkhY(|z{$Kch#FzB^ZS zQ6UuTt+m#i&wS=@O33!ezulAYv*=5Q7*SJt9Ko;)tQTF@b^@jh+73gg!})&9&DE+~ z!qy{O68@BB7My6XG#jxP zY4L8|97WI0?>xUEXBZjY|7KSzYV#9%apcnvwIS;-8`K25kUGI;oB^L zjkPHAS1U{L|92YU3=@ZG)0tKDi4L`oW6&8;_~tYb@oR;3AJU@EDgv#ggb!1A{j&(6lOa8?9Oz=|lfe>g%?CjeB{% z+PCyuwaB7o{;+9@7Bg|P=I*FXV|&1AaE~p*(6*NtUZEX!>HGCOg>9kqQzBzxp(xBl z8v|uuWG;N3M{}i2gZt}i%&y@}nR38m(${dogCfInGZ;*0CSr`Cxp)^jNu-hWbvIK9BtM4J?9<0(?)xkUn&UXEyN{N?z#{N55 z_cw*Zk9DqOtDn>~Tv zvqE}$3B>$>FK@$s`_8mLPay89qishgSqH;?ZH}f2>CSr)VB`l^1?Ov`+1M;;kxT#p zonPH*yCOu)`Z@L!YlTuS>2#US>}!`>TqRNR4=~&1_C%=0ULmHMAYhoqwnzk6CAscr z+mpu0`ZD>FF;4h#AEQ+ETx1wJkz6b%+ zPt#4|Nma4Niz7;O`h+7OiwVDaZwGQ%ylnhrVF}dap>54Ge%R{!NV8(208wLNYw7uCM`L9^B-~NMZ&)k58@P>gn5Xz!JEJ8vDJ(n6lyLBZnJ!a zZx5K(MC^Q#Ij54|rSZc$9Fr?OFvj7gvr{>Wm$l-vMY?nx@?=rQ4R>A^|=f z`nMw)g6Yhk#QS^YiOro2bf58pWv2U_U)`g&8sLFJ7kkT7&qa;ax`Hh@juS|KxXi2a%{s0Yqg}jJ|3{A{TVzu3LDK z9L9s{cDmPHg=`SUK(}bNB8?*&-dLc597; zeBdTH+q{Hd7 z#nkmP>L!69%pJ)He(U|*+5QX@;{5B|xqGAU^5k!I)eMMv*NFhEp&)=Yw9@w^N?wt* zPUtp8gfF$eyOJvkT7giyOOOV?WZ1i_0@B< z(Gov)NOULA|9-Jd!&VS^x?D@FEXxSvnJ5KIT)~9ZZB`5IkCX<-ZG{GX_o!%6!Mv~@ z``MrN&^{BnKR(~Q@+woUXC8eVo5D1vICalVtIvC*tpNMCYpEFOSS65ah_(gSxZi*W${F}R>V!d^CO#FmZ*5iAn)NK-#G!io>@XOp44&t)2qbsf+9*%lg&BqwCoOoX=Bd^~^H;#3oIS&f0)QfD_c zac~e2;6z3K`X}P zQegglffR+3B(an_^aJIgzb0y#iWWm|gDib+jYY1O$g)5;_LR-{WTKHHw}v0SS74M3xntk7$Vb^(xv``HF%YIrPDUvpJt!iFP_UKGqtx25}e z*pFmHdm7%Gt$^(D4%c-}&(A29SD%Yk)-JelExE>!aBH{4iYtK#48`z!HFE?u@qg}4 z%^K{VT|cMVX>G8qU!bmL)M;$*v`Hega8{}aOfBH@;wIpi|MfiHXTV?p|H(YR<#k~f3N?`HlmIjX|bEbC+{CxP`f9xbv(Tp`O0amnJndibFBmEvDUkcgv$^d z#liBL={}<{EJn6hF>)IH) znOjmKHtu-sClY9!Kdt!f!5i6>3MK=;rN(||A0vp8NeFC00Q=Zb18_w28Xj%<J7h6`qqLIYs3N28q8Du)^+fFv{;l*S?^_N#y{gn4i6ib|!?9nus z_l#7xP*iY`CRAvt3Of<7t;_~XTbxnxq}p(=IgpEgn$+|s*v^r12fC=gOiTg{eo!RU zK&$oK*lTUxX_}lg8hH>xtDY`3X75ylpGA@JR-dt9)94mwJ>qBN4`I@kdd;7O^gIta ziR-=>$`qT|wQHRKrizK~7=fbrKaTp|&-e1v@p(zMXmyLy{Pq%sPXPu{!iEAiv`YRI zzOrrjZJqQ;!|o_8+=ap8?TGS^3kSjohiWEkko%O%Kv$9f7;?FxZRZ88=plOVW*1u- z$@;5LW0a=PvU}yzXyOQCi!2b*)DR{pZY2#18GB39ZJH{jCT;PO7R{!7V(MQGWp96G zHyPs5u61Hiuzdb=6&w86K|7dAbD49u*{q-Kb1{(a;R6d@Y??1_!LaXjNpHPqjXBRc zBi;wPIYhCOnJd(%a_quAok?QoXxZZLs&^L_B4=yFJZ~4huPlshnkz8sYK$bumt2=J zKfYcl8`?tpdIc3kG0HdJx*meTmrlP5G<=SXt<3VZoI8KVh`x0hfD7EF{7$pK!=)SW z?)v6u@nVfvu)c}SsFyE%nui5JO~oR^<&eAlD4nIH*`R-wPx_asCU1K$<)Y`7k5#nk zmY`dO@)+GXx1nq@bN+ZKW|~Rmes46HVnp&=HkRW3xuPRB{ggA2IgViyGOJ!}{zf7^ z_l!^uSER&lOIXaYkE1J&{#N_0em7#`&#g3rn}B0cAvb9RmWY%;J3JR-IvqbgHQAhc zpzr#e{w}^P%;b<4IULK)^f5#+4mh>s=3FuCKoax(M#Yb@PcwvDO*v#W!@@U?;%jxA zJgiIp2_M`bj>Ejg(>q$+-}j$EMzv>@quw6f$%Fyp9y0T2U@vBs$8R}!3m0N;KXoPH zZs#gf_eEzVP5pkBP`~hfB0~W#YHJ^xHLtFjFw&9zNn3fGpuiP~?PR4AtuTD)@GZO5 z$Z^+?eMs!37j2qEQKc<{>bm{{Q%McTyctZ)~d zcMl&&F-S_|;coD0rdT-$l!mMA)cHkQexBFnU<#?KJIz&_)|a`^GZ+REBB@J)_*)P6 z%Bp4toOoBLy%+hRD z9OfmLEUG>>bRV4ICvc z^_dv6uApXM#A&j2y&Q<7mKb3mWZ0|8Pz*lEV0Ez6U6kZi=x9p&=>XG01!wVZ1i1LI z-r04pZ<_`N41OOZFrQMHg$B!}c&}gK1fbsd>BNCQZ-P35JUNDGau&`_X>-!dKP(k~ z{T)J@_spPeGb!Etwm#^Vwtr8l(t!jIQ?AQk>>I&)m1buobK8Lm9Rt00`x&(Dz3;;Z zc!HFiAD<`I_iqfXDDcLmJBP z>@~-M4=%Gp$2aS}8ix|z=ss)oM(4q+i~+@}nrQ9~dH5K9#ZLMj z$hEoHx@zhtI(>i>P~XU~PoPfOpdN8gn8rQ58|Vj#pucospGLFdb5rJ21q zQ<4v!u^o%OJQB`{**K?Gh`0;FgySq$ApsvY>M(>5I|9O&tnu_92_eJuo0R!JIe13h zrj1{?O@$h8(}FJPyQYYUYKxa7Ggr#U`w8JJLq~;Vc<758)ede^qK#KW-drQZh3aK5 zg9wd3rzAsIR1Js2d@#-|18?&-vzxtH%;Z1UVMVK2^>UA-^lt>sSMe%>TkvK#${%r; zZJ}6Q=qJ}%0ljcRvQ;;UEGQIzchH6ux5P~~RCWWoC z3hx($S1cKY3VmMePpM5Q&NZ*88$9I&3L8k(X#1x6qoEsl$GP{{@q3Tbjg~&fP0;jC4rz8 zFNH|hY!^IOKotKrX#goKswo(K%Y1^N82!tt;@+bz!E3*;uW}+4dSyi_y4K}^?1tYr zZoC1JAh%OhxrGIZVbI#1`&w?oHb%cAv22zMO2f&96D0jAw16_9iJeKle3~v4!W!>4U`?aodr6f;r8l`L*x4=j2GT?H2$pO97|QyA z7C+`2V`2<2S-9}^991TOm!8Ot5t>M4Gbb`&O>$B~n3~o16*Jfn_DTN(`MB}jOr#$P zl~bcqHzW8#9GKPjp)g&E&sl zDys~#mF%J?V|WCicw7i-3f=c#smMbY0ORCTGV5p{57uI*H~+=Kv?N8k?>8(WIcxzt z-xrl$SZnRoA_nWcgs+~VvkamksHI;3B+SqP+JCJqW?v;hdW-Hk$I)gEoLI~le zx3bu(L~lr6nntnPK+IbAN802xy<%V!7g0ECA0VBC^|Uow$ddGVSYP;Q9Y`WLTzmB% z^Zf)K4BI&cpCPOTyUCHVNzDVhRGXwBfC@;+b{|2?-t!C;E)~#PT z+l$rCdBbUx7>-pf6q`|tl{g`j4tPq|Gs%XINT(5B)XUU!xhk}I9rv;XJ*lcR@(NfC z|9#pF+=2d)A|fatQ(U-d$P}D3+*ia;i|>DbDX}su{}T9K)D5xZ6wvPZ(CMFb!Pp{E zLBK?@;s?d6>20|$Q9^5Heexo~md7NOVEqs-R5_FLW)d%ebCe6Tp6=5)^X)$WPZq$p zm{-zF8kOwvlYPFolMM@c+?_+I{4qsx(#`#?fYHzFrZ#BNrpFp+uAs$M$at^CAi$jC zV!faQa2FdaW`Br^W!$l-HhO)xz_I<3a@F%yAl92!;rNs1W`NCdYhq5u58-O?%?u0k z1TTh=dp?AwJts@apc6<$bwOL1q+$jLBD&lRw-t z#tf#ifvLADD?%L?8qKZ0Xrr>pgdb8j90bg|2O_agadjhKD#I#%ST_M<(O7XFEoMsP z0iWvh%27)(P82DdE`?YWVQG6&3%B77anm>t4RLhDC{rT;;GUv@0%gI6la%4ZOYR*X z>pe~@)zOH1f~I$KGlz}VI;7Jaz1{nDI!)yDXN6x0&-K^rMl8#XzHH161Rnvmr;A85 zXMOr7Ug7#1nO#=hZDncG9UZ{R`w77hy}tFZKw9-Z5s7)%7Ds?m zzI6d@YXiAQB3Kbz!5LAckXaeCbDE; zc%^3jxHV)f@;o3%hOw8E>Knx#3NDr_18y`Ci3t3eE3D1C$NM{BsL86=N`oii;Zob^ zP1Zml=;>Dc0^Ev@*pTh~=rF1DjhwxP>?4{E7O zp|&Qg_fnOY=r!f~A89<0h(Ema8B$-^f5~SVXTECTa{wNuK;@0>ggp1Az5aVAPrlW= zD+^WoSr+>N(@C(^mc!yxC<`n=+9;0w)3xZ!xJ^=$J<>XQEX!X4%I1Ml*P()R8_R`k zaxx6hVrQR{QTn$m33Q=;Xh%H9-|qclndUwZSLF`lYJXbL-DBBVu!6Y{Kn%IDY95ISmP#ZX+ zS5>gqu)N(um)*|f*-=4`+r!1bzlqpklz18VPNy~FV-%4QEv_dAY(0YIQ$Yd3 zuG8R*^;EIqO2PT@FEt#ii2|ZS_ai~Nq1)|Q3yF9@ZnsoN()A}_X1#xtr{ey42)PJFeCohv8bg?e+xG`Dv1r|tW7 zfXAx{C0e~qD6WW9pU#S#l|90b@5?o7>(#QS^Ed?~48{9u$rX$DowJ+Iu=alRX6Ils)nCan z|CT6L$jZA~_FV~;_!_MR^%)AHn_S|Pyj-N&=Gv2RJ6@O*e!M?x2q2hP>hO#SeBK%8 z*ZTQEw?p}WS!@i(UPJXaI%RvQi8f7+RoW>=1;iPjaG+QRz zZzx%FTKl~rG>eL9JpO1e<#60&gzrI6YN(T&6~_iJ$F!G8iBfp8jg!LLagU^e*6Ia4 zfL4;AZ!#y6edlIuM6QO`tcbL(7*!v`a-Z( z0Xc0lVVH&;5FY(szSA%77O2|#5y9LZ+j)PA&S;S9kme?UmFj44bGiv~ERN>0TJce| zM6~6|r2wE9?Szv1s&IbtpdtH9DaJe$f1U()^^5DX#Lrc}39viSE?&R1xY`XMCRZF;%8Rwdl z0~IuH4tZu~2pihhT6Q7BPa+%u?%rQ>I{6-_gQ=k#jYq!MSufv8wfAp!Ze0lO+;Ev? znmt9iKP{n6<{Hf!3WKop!hfZpGv3U_9>4+se^2{&?YxC)Hxe)gq{ zQo^30CydrRTWThmn{&;9yK8iEqM(3KO*_u3Ze7m2U;igy$eeaELl~k7TfVz}}Sh#$iJju-d z-TUdsMw@x@?Mzqw?`hQA#*CC{*1xQiY~M3%l8r zdP%B5>a@iU&#dnM00!R*_@Qk%D)XB4H}Rnp==i*FhvO-0$PF5WP)2g-R&r~ zB)3FUzPV6HwV`*ZPH{9AhiSA*tq&L5$|^#yE4|HDe;l>BR}W_udx*7&N8Bfy_BEt5 z4%3(4X;_>M==Cf0TXey5h9lICu+Y08onIP+KQ^#O7k8>Q33$nxQ@L_{g&9AUrwAcV ze;LwXrR7|m6Now55(EQbn+$FTi9&n{`NP9<#}r68Af=3@RMsvfCC2Vgxv*8;M>tfcg(-0Z&b#0+#Qr5@ zp=;L7KWgcneU|WGPSluR%CkSWyuXq4k>SOW$KWZWf*SXIg#&b6>HAnVv%X!|zha`; z2uv1XR4@I0UaRj@xP`SYbg=p}@LVm^<94oy+(|q}xP+KV@O!iqnne`lD|diNSRic_ zfQG@14<4Ll@+1_mW_^fveUCNENt4{f&&e)mt7~i;yJkGbk*(bfq zkED3hXSqqs;M*{f^4{xeLV`!|vQy|Vl-)!B{~muDXpM1W@S(K@UB zb+HJt(ySO7SNQP4moG(ua-u~2$Bsd>-0K2Ffl!W zduV}X)vuY)Van`KpFBO4a!o`X*~A8b6XHIy`s4f*-O_5cN>#yc&)l2#9(awrzJR}3 zNp7=mC{J8AJg^WpJRnBlT>v?593OChBa2@Qw3q?2(_5T)Xm#9hm2iLDugz5aF{}HD z9Gna|fcM&KenV;{m=14W?SYK?A|0T8vakQ8l-&ih24iYk_0&2aEhLgN#Z%s|k+vCb z?_Zx&@#H?}Yl5R6a-<@ww`VIfE`(HUeF)Vo`~_QGi!G}q{+mrDzx&g4U{bqZLs=nJWEK7kio?+K2a{zSY_IKb_8Z zYU(X6EaX>%(3hd%K}cP3(o+rdKSXZJ-=n-GW;~q)lEVoQ0<~_6{R0YeL+(gs>p0LI z*KqqTAxk7JIMu=wop5N@IpmBo1b{51j(}IaJleI@8I|)86f74zJe#OY8pry$VDk}v z;XNYt-QVJb^jp<%`+IjcJe>Dc0+CSR5&Mxc`CP~Ma+LN!x-IO}RR$VOh-dn8&a38= zCS#GQ9M0Z`Lu}l;F{Heu>M;i17o74kKm<(qq@Rqq-bNC=_r8?_sDw%%|D2$r)zIS3 z*y7C;t#BCPTYuiG-}UecaCcGIedN()q?o_b> zsO|R$CKlLZ7!OOw7wtS~HVzqC9PH&R;_u~|3@=2ZrQb|+fgdhdI78}Z(X1#D8*C=S zk5P4+?Qo;AS>vFc`!*8iB!#1f?WDqnFRT|kGi3zTz7tQC6({~>5(3BNXOF-78n6)c zKnYDc_~Di?6Z6)!ctp?nuD2r+05#x!wjWo6)gMZhbA>&qis23e7RlX6`@@C$Tx7Hr z#fX>8xo<2m{QRX5KHy?l6ajvzDI=mzqkJ-Esd2h7$(+++B#ogu6IzIW9{KSp#+GaB zXm%#ML|cluCO7YNhAV4PB#E&8sCX?=k5>!+ZSV5DcQN zAZMG& zD>wO7N#?LNEUZ@StA=JHs`bU8Os;)ja0@>M3)B&s916M0T0ZjGH1diSx?S&A&fjsK zeyq8q$id$n6}-~T*ew>7-W_-!<%7Oh zxg}eE#@e|8R2bfWJ*G5WE(a1hp%}Z!R3NlAfd#zJ*M>Y8b0jZ5g-OzH;4VxeOc}tJ z6a-hGwE`b0@IA?u7B|5~ygoc*7M$g5Y2hr>aS!g-d27h3JIWRQg%-J?^^9mn@(MBB zF2Evmv+!Tj%2nTQymiGD&@oLw6kPdMfoDB~V94V({8V3avj=h<#>!uMPiLPL$I$9Y znBW8ksem|Gm*}NrCHlWDtUVonT5$e#S$=5*jIV8o@^F-369^~I3y5$m=LYCc6=s<; z$>3k3Aj#2+zoKXTX-&n2x1rrssh1$TV<1KEbK+{XA}Z){F_WCT(BgIi-+CI(l(`CJ z_c!A;>&Nk4it!7Npsm3R0vBh=;A~rQUB{BQ1^2LF2%qM+TZxLl`t(&~Za}@1FB~8b zGX7}!ZPkng)XdfdLK|V(&}20%*A@}TbN`s9GFf=D1BLI!y%U~%wHE4y7Kjv4>m#00 zoGUPry~LqVMuAP~)2k*O9M#39qaCaBMXS%0f~xAcD~3vILByt{8~4LJP(FA=^zb~& zi>{$46OOFAxx)!Q8{bRnoL2Q9`SqTId70o#(>(TpjZQ*(+!?j6jq%|_NP$dum@G6* zb#D!70)V+!6;+~P0Ggw6%*tjdlDH3y|X7TZxNy^w405Q{gvjW39^wc(uTN11`6;QdaeJWj)l%v>X_;)@O@myqm6V7 zZEW7LQ3WPaclsp#2^F9UMvUpCEILF_cuW?z?L@MWU&Zjbh0-W zqpa;4`pLJ_lrFS;bR7s&3VX)YXaocMB&@Ry`qq?fQ6!MjAy4}%Hpu8Hie(+vdi(th ziY`(UGTGUt4p#iY=zl4Q$W<*XMS5`53WE`T#ID{%T&ChBcr<$cFX*8xY z-uaXR!v$hLsjfO+E5lZQicJ;C>jbRRsTDvUby&^gHpqn>dh8t+B;s@i>96|dl)eO7 z=sx-f)$p>#Z96qlk!LAPX!-G#EMRr-SGvjA?rPi*$0hL@j{NuwH)hD-*l!UOyPEij04`t zvXc|4igK080#T>)?jY$l+YVTgN|!unlnPrjt_xcwhJ1g8?> za}?6Kth0)20eirwvWG4S9ukyN>V#JN{ZH>MI-WR#MSwtfVhkfH6m~=a6@NDRWf_nD z3Y+-75_0h1PZeyzq49dOFBf}?G#Affi-oUOpIcO@X^QA_WA0K4a@GG{qyD&8Wfmxg zgk1m>C*I(`va=t}5}eX8^tr#bk1iFKZ~6xH;WJ-HUqgws8*WStDtmjrIp3+!Czq++X^Q(h7&L;0o^_geC)}1-rI%A`gs83 z=ac~kP<|8yDPGbmQSROYle5*XlgSW2gIpAq_m)_z*o8~CI>=?>h-QF_>+Ri&!A##h z^4ZHRmK4u?a)hy5F{DX@#22N!AM=OM37Lx+Mrlts>yOPRV{){)wQvQrar=7=u>x831fn>J_y-zsEVfA^F2`ci@)!_N$=qQ((l|FEL@g4wER_ z0)Dut=b(Zw%Z7z!o#%i%$gJxGp!MG*Y}55B(i3&h2!pe#<4F4KViudM$CYQw^(4hY z78r7<{F@Jx?}U2Oq~qzBAUd>BEh}D*X$uXuN9E5qAQ~Cfx>)IK^iV;^7ONS0N)j>q z6BgH%#3@_96(DFgU(NZM;k9ZC;fqYAa^ZKmH}4IyWgAlz07Ychj96*sIC#&O@XL~4+=i+nk8K1(D zrg@RMkWPcmQuMMHQ!d2f8(U0GG%2^sd%de-;Wrmgh{5GriHbKNJDfy)hC}T$Mx?X@ zw}_4!!Qq>`+>dvdl4r1<)+w5akGlp^LoUosTCxah>sMZBSr53tjF>9e9emaZx{B~g zmb0p|@O@tFiY~>c!ocY|($I;WD}Yvkjb528JlfN2c$S`YSSKJ7+J=R;KZ#L(_eQ!r zB)eQWp2f7kc;(D`jt@6D?5~<*3ytBY$hFFNiGP}#aMZ}NKUmG%o!K~hg+;coyZu(z zvP7TV^RA+)Bj7jhzq26z^%GDIzNSQ&daLoKKF zES)iH*My%^rbxFdX3=%oK7{rTy4WQAqLWCEHJ0*p8yq#le3*P%Aeb^)onBRuD>7lhSRAmOPhb#d?4p zoAW@5&oaV!9;lP}7dA+Ben;7+d+`_$KIM84ulRl!5>Xy?4(&X{N}Mru5{c7-EzpDx zQ}T4{1ADCm=$aPQD*i*fhuJd&h>@X}(8V(8Hc`}&r7G(Vq|IH|;i7jvB|2a)jy-|B zJQ4rO z*`F3=FwW`8TDOOh@Y@Oy3}x5PN<(C6J9yhbq%D!(vz@Z*+YL9!AU|rQw0DJtfan}{ zb@j0qCA_bx#0_Y}R#17q?bRWqJi>1S)-wp_1G^XET|vADqAocA42N0Ki+TgT_ApqQ zEZj7P?V?W{QuUy#rBDYE=DM>B+oETqR{k@*c2>t>&RLw9TKm~(`+Z1xAdmNGB%u>& zJ=QlBw%#jxq+mmO%=DM0a_=`J1QkYag)PXn-jVh)4slQlwI$}j2GRyRq<(4sW|0zq z^F8(Im*WgFg^!dZ{%bVzg1&$m7EQ@_DZ(r&r#@FNrodEhNMF#G0r5k4D$%Hl=nb-7 z{OxN1H26_?A8A^2%JTjL>C9}5@X6OHGd^9DXc8{4dEu27Ik!2mOWTPed`zi2*w!SC{krDpB^-G_lob-Pa z#G^ii3xx~)>&6TQour(1TJD_Xpd%)#oMaCXO@WpyXWSpy*GgRe158{A@TBYuEu=cj;ZMJ)l76s?qgtNiCkAkW7+~+I6@qu%9+_dG^usv% zlKmbON3G(GkbS26_~WEQW#U@x6r7E9Pqs^g%T&| znOZ(K=z>5%(CheTh+30AEN9j9oLB&VD2tOrg{brK0U59+$`n+;kOF?$`s-&e{s6!l zr-C6LB2Im2X-~hfyYwW1!9EGCu0^V6hz|53-TMQ56cj~i4Hk>EZ%+QXej_Id6kt=a zTE5bwd{so4Q|E`gsnTjoa)EkDm)T60D+k(uJ90W1=(iKglo&jhKO-e+b5*b-LB78U z2QE_Yb0kcNgb;@pLWO<@tr2^?8X}zNYPs060Z$U{&YUpK%r>XOWP=A`MGTR-R`C*U zZp)bjR_32c?{q}-KgL{Qu+Y^p!z=F#>@WK#$l*v)z?ZXPPk0v&sP6)W-(6XyFTivC zQAO^5dAO*uDU-OfGoG&o`yJ{0+?V!8g6R#1|LuITX$aOKNaa`fZSNXG5dfF2H*mkj z!TSflHt@*M;XjBBGjqP##77umg?C+cri&tGgUn6?lTp*(n==Z3_neBi(suXzo~KGF za}Efp4*E{G@~Nk9hf_(02`s~FlfMG%x~dWVtW9kNn6rMA>9RNH4=`Y|onR%!drzWG z{K)U*1AmbOzEN^lszQ8(0~~tI>Sz9bt*%w{TMMu0YY_bFu>FB5?u2Dk=f`4Tl#Yh$ z+3DNIva*S{?qbJWYS%xJ8&1tvd|*32{IP4l@)j!nQ$!r#M*;jz7h)erfF%DZhYhzIxrMS`Bo`9j`z<3KSioi;jyQk&J6T+ewr(P?K)QmxA8xj)Tw z9QyNF@K{^h)fo1QZ#G+SxYnM>3s&To4n{V05R3IkDL@k=GRpQOk3)&m8WJB6;d}DQ1_I zW(r2pb)Hr~Uww|fb92#gbv0oWBP3(sbAxC-?_tsxzi88ucnVCdSUWp8^10$K)h&6E z`EdEZ!r&1;k>TtWjIEEt{MTvX`J|$7AT#b+cz*j z>g=co;>~lyQ;nn#i35o{O<)*V2W(`~Y4ry(xVG(X)5U60+~rru_B(LPHR0D0D0S)} z-i7boAtJ-kJNHa?D!~e>L~F%8x{C`uwWgBtwj8fN+FMkynKS-GVRFFqt{q$7I}$zn z8{<9hX+^W1P-TWuW|Vz^JE012C(M^xFW$RAn%_1dDe}Jhp7Rgoo*z|MPr*_*<8Uem zowM0W(!<4)>wB+jsoPQChy0^2ORby;>@)vqm%uz4s_c&ux<$FzS(cZCPPZLZCHXbF zPa@rRQ#^=J_{yf>=*#j_^7-_^^2+}mU5oRpR-Iu1t{d~M6h(` zpb;>cmg%%?Em)e(MDDg4w^ z6i+)0vTR~@+Q8%<>yeH(f4E1yz2vYgmwhv%Zww%AFq{t;v`i;EJh3Hey%`d1F?m2)XOv`!|<~2k`5t`qtgYDwxiC7O4Xi?7;IlT+m4$? zDi0gy8Fu}+Ge}BEHG^*TXE;u7ShCBJPfVljib9CcZ9kJ+nqz;Hc0*}tB*g?E>ZEPm z;>8FnRA@Jho$XJ;kjLsvMkib?qArwnB`1O~`A?=)8+NfIrczlA*~tW)Sl_y>UaXB` zsFJK*nOu8${X`@jL*Jj>je_R=k{P?S*4fE$9LZ%j9yD!9_E2f~NW^XPe#v=T&tv)e z=+P(7LcMIF?FgE;^-ddU=J&kYNns}cxwY+bZQ||aVX`4k#%>FLx%_n^OWko8y4P~M z`);nv8}s|1uq@^7tuLcerfa_q?sUl;Wat#rq!7{mxNe1kPTIQ)YY(MiG&l;1upTX= z!dhti!nX{54wSFO&YwS0o-eLEBmqPFKDwX&k(PP**(DL*S-dWbGXqR@p^1eVW|KWe z?oE{_0suj4#@fX9|8y)N_{)ht1*7ne798fYjqlV;RHW(9Y6+w?SYPM7K!sE14t9b` z6SPq41@}|MiE6{6BGViJypa{H0=1HNA5CzSa?kCT-M7bm2}*84j=+lZm~VAiV=6l? zwKJCi$;|9JH15`I8N%xXprUp+I7D*>KzH&)cBVY9m)mV1%?!@_%j$SabaENu=r7++ zy*Z~=fdZMl>?}kvw<8kdEB+LQhe=7qI z{yHqoUf~OzYn$LHvCR@N>7Nq9TckQ@DYS5_TjJA_FzdBOJ`_Lr``wQ4AE`tnReadr zv_g;7LKx%_bPZ`h1)mzf6jEMTddg&-@lnZHw%3Ax^+FGZ-$;33Q?bgjAx`-tY z$Zzv1vGP2@r-KqRB|2~05S|K>!vhDaa5F62g+46VDWyi$V|9V%#iJ^!qQ5GaFNKNa-8`u#E(@cIxs5}V$cx}*$p zmYmlI^n0gh!^QC^0AuHEGg#)^Ch0Gp(Lp6&3NC@%p7W3ei=?BkEq4E z=M-M4)M?WO*TZ3~nWK=0un**Y9tMjVV)aEB0<-B2 zgpVOKv~pR$ka3^R z$Y9l{2>gt8Vc5u+I1NnbdBHMbwbGgCak9J)=K@rwQHs|vlQkrpHs8ZHY_+A4!8 zf7-V7^`}zns6uKvL8+oQj5ASLCezA>fEuJBHcb5TqlmZtw(NV)ODlO1!T;!QkBE=D zN1fQuesFpx3dh-zG!JwFF4?R!O5k}%ykv@7){%1evSAVJX{AqPpU3&Sd*8HFgbD&? zCMxZ9=I4tGmvtTU#Em1U`FYT8L+P!%%Dhs;Vhp+$YrL66Govv#*h7y+VKi3ETlFKC zqX(?XrXR`XaY$hUi35#K4}AWAJn+lc+ugWsJGm-T#jY%5)wbYbbHF0976<=Yt)BUWJbOxhC$eh_{$)k^H!ok|pK?5e zSPmw17I?VCc zFno2fr+?|5G)N}YvC@H}0DYg^36unHcBVAw$nv83Bk%YZq$(p=iU514K~o(4AFNn+ z8y3x2Apy5og)D(x1@>cpAE(jetCt=c48!g%Mj`22OjWHavw^eDuIq$lwSkf6Yaz(- zDDzM|=n-Yo1Mg_mc~Ef_ZAt>rr^1d`rPy3)NW zUddky^n(9@+{=oy*#33|wlKrg7dUu4D*RZi!8VbBZix;>QQn&&qZ@Fw(tiSN)5R{P z_@Na_TX}TlMLMSt0xNvA@;H9ydG#@CB~~c(y8);j@Q8h@%0+@O54htVsyjouY26d& zBkwSL3I_G zN%=JQ{pK1Qo@EG?Liy$|hz}%_d?zlQ<>-~1RIk;Qnyxe|szPDdBnIung&M6GP7q9rK_~mf= zcz7aH;)@;h^MkthMB+Ugi-iM#UX&>k{vd7*)<;mI@69{B8_wmD5Kq$-Bj zWy;Bm&iS@yYTuUwx;hvcHHm_!TF!pFH|0BOcXZ}IR*6ny^&wpX%n;gwZE9iP)e0?) z)7Uq)(l}hR+E8jbB@aDK^4g2|KvO~Pr0=Vqx8*oj6U}V#Sjqjd@1n`+P32dnO~3FR z>k!xKM*MZvYO+Y%e%x|6qjQmlf;O)FuADqz0uCTea3=Jaj-<)MZZC4d5N}s{I8HNW ztbx&^G+xugMX<2>!*N95*{i@)hM~)Y`3)jE=+)b^4Z>u}b!aRhHSnoHaVK~O(u(`^b^{Gs)7}36Yd&m!1k9Sg&9;j-A!_%v@+V@>}=RPPi!45U)=hWNFJ- z_Sq(D6>*P5(Q!;?fdY|EESD8zC$T)^f1}-zK5cae^%we-s{Z9Zf#0GBlB7joeN0tj zM&T*HEqEy9$be#lb$!!0%_6~DO+W1h3u*2$OwarNVI&oUV5A|Bs-+}JTFN{3q%1XB7js-78iA)3*!Mu!4iNc+4|IjvlA!Q|XqLn51+I(h}0$jdY`=boZo7x@*FO@8Wr$wcdB_@jmbR_88wDV~@Sp z81oOtbPkn!-q&@X*Lfbt?_gi{DCT^@>#$SwMZ{*?abaOkwiVxDqE49$LEOrhUKA)? z@L3sR?a=(he0&&nraCOzHZ0o268*>k)sxA3A%%{ld$gnSFfa?q!DNQG!pTo*7 z3EA`gyMiZ>s|Opmor%^XbYfFX8kKpY^Im)^;D?WE>hj=Jly*0ctcyghvuIGldB*)I zkLEcuu%c0>`G~SR4KOCN^q|IYD{E(a!0S~S-WO*HJVMwiGx^L{GA8Tf7((VLa0OM# zeiiro>PhZQezhXd+~x99JbNp`(pcsVOh56Tcc$xH$ zm0zz)t`$E#();GVaPxVL!yDJG#T!=|MgWEyz7;U{A z(3+#b2{GqOUS?+qYek6t!_y(V0iJ-HoAcOHSa-E?)-)a4a}IiTAzd$@F_)t>i#nl4ftLP5q-vOufKVc)Q^ z3)?sW;W{!PRwK77^;4vj;0ZPdu+zfwT%FD>G09Gds`epI8>#Ot!M2rk@n+b1Tvfg< z7YeHT`YMh&l;J(^6i#COiuTRs5JSm&jKdCk+Oh!c|0Jf~*5O4iJ53EkbQ}rWEh&C?pU2 zeL?2pcC2}yonpA}gtuafwR19RI1-Q$WF&B;j197^V*vIIVu%Q|PEfAFU`?4N$>n56 zjL)$#kEKIhcbV(;^*0n(tF2cB$10W^PPxGMIh=C7fT3kaNt$xyP4_X)rBnCz+QpsG z^WUH7i^4!vUg7e{IeHNK^5wghIIk_74j!cJt|NQ~zAS95AL~;|(-o0{x^jpP_mmd! z#x1aay@xz9{{}##%FOmG2cf(G{+1x%Zai{&AAY6y-Jm0KAYQi&Z9Z3C*S#kkXlp<0 zYH&C_`RFB3vggfZ;#~1WFeq`Af*mVDVTS%3pc~v^(Ex`yC=^$u)0C6BhfG2FHlF$t zA2O)ggThjzo6D6aoPr=3*3DMiY;ehTr)=TK$6V!og7-<_=Hlge(Wx|1#Kg#pV0TvJ zbkr>$8N@dlIhrhP=WNl1HeU?}UeK0&?={@W>bkG>#A!d}V%U8(aLIm<-~aU+xGg#{ zewQF$$@rlM&s8r@4;C)-R?s6qA=<;sagykRA+D)f>&2A5P)Ku_JhcEA4VkTAtY>Gf zn1`$8f>@4Mn_5$Z1A?^Aj#zZl4V3tV7V0k71+T1FHmwkaFWS`1p z)T6(7pK6u&2Xv~yff&C0P*=*Kf8Sm>5y7JA?Y3*RL@Mgng-yiDQ#KR*CS6af-rP>hkUM}X%6ixw@AbixsZtq}h4U*L0h;rxA~NR;3>!n7_sS%Hpe1sf07d zqO|HMn8;&6Ae+oPSGN;DbmzHckz2;WTs~o$t%Dl>xnWdol6leYRYdDv$)CFUmP^93 zN<6%BW3$ae*J2!_?e6rm9t8H*z&6jhP2SR2CVn9UCq18Rf$pk5#O63$9s+@OIUW`9 z%S4^8zrJEZ9S?oH`RiyRrxLIHhi+fkgNmb!|XH-PZBmXLs^g5pN$c)hHdM(yktpNk61ZR5~KW6_@sp}~uY2XB^0OqV+GN0*x6mX zoJBT%@y#9FTO+U6LT#mc_z$}31*xcFfdFA_DFW$9_L&GWqvQ`I$o`O=^7kW>u^%kFeC;*bS;Vlk} ze6_C>1<73Yoc(jm1939`oJ!4aj6`eL*ga_-Fv$dbbd=yrEK;Xvr-&mAeN>9VUrSRd ze*PT7LVonD09xHt95VwJU*;8a35Uh}nH~$i?33 zB&sv#$yFbXvZhSX|p(r5Y_r0O17Z=FcnHI|uS7*A(R>~e}Jy;coX z*Z&N8xE5@nV@T53VI)n+XNVB3*GXU1LpJ_I0sf^c`4VwOX4)428DX=H*~6M1@)W}m z;CTyw&&`+)BU}$G9FxX<8S!47#~Cy)2i$lpXC7+Qe67H`WwR+D+yDIsfvB$Bb|FhtH$a*(gVv@TaZoy-Moe1ncwg;LTE%>?Luc>ep7m&V zH|PrH%a;gG;nltwtuS$_KTmOe#YrA=MYNTpHC?V!tnse+AjrlB0I{SDQhvsH@jN=a zu0NF$b_^8Yyp8)1g@Qpi9xER9NxbQYO}NM9y~IhK&b4zT;HZ!hy%uy2O*5V1S} z0`N!=r-A)w-cJ(6Hi1{KZC7c972}x3;UkPFM}cg})AJR(Pm!qXuwi=Sw`W`9nuEd| z7864S8&4hx4RuLV<-ba7HYq`SX}k-qQ!ZB3$)uDBH>H3;9c+@Wz7yfqr9My1voKD) ztH>;u@f{vbHmWNZq(7c&-MRRJr;;sIRi+24*S(Z+8|v&VBy>EG@#Zj|w9-!E8KJW2 z$PxPw7C_3>m`K(e;IjhuOX`VFtmc6fz1PZ3Q-^*DiHChb%>joPgD&KhKV|yL&SvY4 z3KJ;Lhf4SxHdj??*iWWk0e4n%ujK?THM8PXg6wUX^S2|5+x!&kERUTO2l&>;#`EX> z5(N`#X1HOWD1aW?STGpIXx)Ixc6_8kdC%bg&G^FWY_iPOM;b$9Kx&una59%4RcSYa zI8(t2w=NC9QTSdrc_*Z<;=vZ;q<<%Y?JqR*;5+X7>~f_y7O@I%49DN#q5coO@c$q5 zLbOyt8o7kXtbv}jbJ&MI*{Y4dU=$8EET@BD4vDMf*e1iamZwOWOh^Z{f~HGVI_{d! zovR_?f9TklEXr8O<2-|HmlmEa$mVu?F7}Y<_c=eFZ0z|Mrhj$5n+tnH3}J&s;g|QH z`Lf2vNVHrmV-%!4?v;HoOuE(nghj@nQC9FxJ(NbKukYF?Y`9aS+FWuY&m-`ttUsA6 zm0JEr%(+{I{!j0rF*k~X0B~pIcb{_KFQ;2qe=pYYo$P)LR|q`%-B+FWDre zE2lwb_heT7ZvT=5%z#w!N5LBe5$l;nzAMrEdvSr40DD$pT&;e4)jvnAqc~}Er#E|- zufqB`0bVe;xK>QN4$hS*TTb{f3DujBGyuzgWoBT&$s5u=RXvUmVr`o#P+BXH9MtFBCLCbtxpQ#*;jDTR%l&_0t7d)ihopi{ zs;g8!3zUR_-FmshX=NiAe0;YI%2{%x(H z(@)vcc7(yo)s`d&jJZXl)+PdvEog&2S z0@4679(*~h=Og8kE})G`C5HbMUdSj*`nT*t%s<(MEB|NMg?q<~|HCf)A9mq?%Py2; z*bt(L7xWm%TH9w*@WN zd_!&Rc-D1s`S^0`1S|0mhooaEq(mA{r2~@|ga24_9;$_asU4t5pl+Mg?w#H(WAv?2 zXsQot-ZSd)RKPCO2JFJu|Btc@9a*mVmACcBD8G>5j7H`E9len5o3&OghSfw7nm4UC zGK!d-GSefRcrTX)ch5|d&yU8(J^0@pH)L1J%`L8Q+d0i!fmne?4zE(3u{(M2yg5HQmk#qG9yBRYQK7D90 zeeyBEiZ!e`5Y)UP`fZ6<`fs2hdR3Z9gL>?d`Xhw3Hsj{mvgh|(Sq_ZG8`B;=Y&Vh7 zabCmUok$q{;NvA}x?%wlXgew`eJqrofow6+hy-9ovy#mQ9q1}6-CI&dN4RfvTbts+ z-Czl0$5ZHkmAN{ z^*CB!-uYZRLD2KLcb4T-l|7;5&rPr`i?L=AiQ{sCzYsAQhg;`wAMTRwraqc_20KV| z6mBx!)sXdDx40&9Y(XW=)oJ5ARjV6&)&+Vxv-NAPa%;JhYF&wk)ZSgmGz>Heeja;ExIrn}m)Oyu)Y> zW_#Huy%(Ebwk|68`LasbefUWJ4Za_!Rd2FO~7K0@^bk<#xBGO@Jhfj zv;d^hq3SkR{682?e7FiLC1|ZSi_T*oQ}67`wi;GQDv75?Q%T;}88dc+vgw+~YokBK z&^YMLasM4qbW!FH8Fe6Fx#lk-5SG})zcP&MP?{$Het74ZkmX&o7k+px3BN6-Kz#WX z0M)82##Mp}Lmd>TWo%!!$b+%#m=ityc?Y>s9n&6isw8Z8xUk0dw7xAliyGPICEA0p zkNuR?R>U90FkH;Qo&+U{pkPs8YhaP8FfVV9+5K|5IBb-j+zZ>voh9L!jM@B_5YuYE z{M-{6^QjEzLkw?H=BbOb7sS?2E&hCfMx6SRS}Go5(-&tT)=gxz{eeTN}_B?gPAF0_5~t_zxFQQz3* z6qoOp$h~qKi#CahBmT>8c~~L)NHI?#{S~5S)@Z$hChy6x1TQ-^(+WmsBxaBoY-Zc~ zx5agrbyU)~ui>k!&a0!tl8r-3TY_B_rnML(%7XAqgx}UPGT@;+h?o zArxu0vtsei*R9{p);p;?Pfxe;&Z-dS6Qby=UBAh{qMT<%{UZVfwSj66I7hri>P8_ z!XLXttqdXS1GU?pk0ZJSLuztRyvQ4|!@p2cqaoQnb+r+CBFG1nR2*hA(0$tx=awU+ zC11o(+M$NfNwFYqQWGnLpDj+MDS;^;y240CSq@$#UtdatujuQGE!TFSv4U-vNGI?? zEP-ugtkDP{`fkblKJ5fnd=nE;?91_tw=0QFsGIDiq7Ci`5YSi^90kkNf3VIZ3l0@$ zUX_MJPw_adK>doJ9pa&1-dpQ^6wogMTAHuFbvQeIi6m?Vv)a=7E&2p5Cr%%v87#r; zx8VZssb7XTvjT$B$7ItAKv05O>`6GUR5P?MRYHL?`>&;@Hg5hdPqy&=KvKQQV9KxU z7}`zgC;wLDPfm%Ajj*LID#XF%Af$9p-FuuWGIW>`amX~GA?sDbIUcGMl<75H8Eti(ki>#7lgFDM-Ruy(@x~PQx_D^ zaM%oCZ`|}oGo@xdZBKl%&d*AO^&#~OUgNk z3V*{WC6-{(pnltZ%Z4JBmT@c$%IlRpDxotPnY-Mq_fHh z5o}_HqY(f8B0{khpV`P`*-r6y>c9<(J)k zu}{q>OI9K(cofDw{0>6o0fKXi(Q>xFidhQGB}r_#3!R&;_L$A*GhPsmUw-f>gR<-q z8y8^KZHQ=fAzwvyU_joooAhS@k^Hvdd1{f6P?cqizWV~z;}X>m7v8=Bzi0oXl1|>` ze{fnv173{6FopkVlgK5=(7Oq5AmUzd+MO4>gAp|69+8_bU}!{5frBXY!TDvLA;ri# z_n%9Ab>+|JAk-LeKwk^|I2MT`5rp^@sN@q6$mpKm!O#M^oktMq71D%88h=8T|7x54 z!5I{!7u%7Fp;yX}e6g96c*M=}w4R^j0Pb3qUkeVQd_B2;)Uba-jDoz(1w`8a?2{#d zJ{j+}KpV?$R))tlObj&2JT|*{QpdX5n@n0;Xd66+kf9|AjiC*Sl@Q}#f$A(N&>gkc}LE6D?xK z$~(gmcv9YY^?V4Ui=t7wxxVBWM$W>*ZjyQ6jQio`)YdzT*WE9TKOH|I)*??9Vj6%O zGPgS8C@g&@7My7N%fdF%wN6$WEmA8&Mw-Dj?-nJ4umyEKHal9StNhSSQ7`qiL$CK6 zbFQLcXPjjFybNe$1mn}u3G>dCI}ih01s5H^8!J8{@SKUe&btnxRoq-wX$-Fdvy(>5 zeZsWQFhH?4SDICNr}IR`Crxcx4E2nH8v;+CSMapqCowKcfL(6fW=X{qW})qg330sR zhw;y)a$3$LQIlFHfTt>cJR4>zosZ#ofqWKSz$DtA$kDJT0wH%2cgG4E=scbn_rNbdVifxeaYLElcQ zaF8!=*&z+6P

x(ze6-QMtdH&KI~0aN-TWHIT6{b=%hrd<7l%x=&Fef*!C?lMS*|tKSGekQhK8$Ypug)3DOLD46T#uiOPC2mP_+N!b3~}j*)_^Kt2-E*}hLYX7PvUkAKNu zTJ=jTB*DZB(Gn4Ra45xbdMq^n6UAb@NLUsaQPj_VPX{~hd>Zc4A**;Ao{wuf8(u3f z`XWs(o=vi4@esIn$7&&?uZ-Bg^Z|ZhnpiO5_RGOSo`VbwmMmay@##^`1D82mWF4y7 z?8dVN&CkUABj@SoY>4Rqyy%&w&c@1ruHg1WUGtyrx%IlA0sa1Ad6UUr7i5A~As&U5 zcp}}<1ObP&qAzD3saUelnRAPmd!VTkz{WTMY-~d=@l^2WFtNe+;LU65usD!hVsmh$ z7Dpm*&8n|W*6~LHJF*trQC8jd6w6o{i`SrC5Oj+=q>1wE`kGSRj=m_r32DOE7#y{b zNNP{|x!p+Kw_Kvzs554RzQkL$sjL5iqBK_W;~jGBQ?W9{l@snetDAIp-c0Se6yM6N zu=<`~M5an;)0_j^MpRFZ$@xNwYpcpU*8h3E8tfD+@?WmWv_odk1>NK^2&i;cZFP@f zq$h5~m2#EZcz#QZ9Otu@3{ZyLwVVDC<2&ZZC;e@KSmh?{kgI^D^8s+**7^Q&`nL<0 zT$;jW0#6~4Swq6J&l!@VGT@XeH?m5T(NK595WR^xPsMu_UP(^LX*xx_$>dD?)M}eszvn9Ojo@^D6bts2lVJX0o(QNN^ z8CLPS1vI$i-TSE~*#XTPCmc(4yF3~h+raI~kg2x(M@K8_`w*1N|L$;B+JU%dK`5xvpY7)^w-ep%ACOURwiYd_gwi*v6 z2m_DCa92@MA;^qJaac(6;D5?$`#8Ns56X3o%G9{27fFcCQsjn;*TH?ekKbJ(Q->~T z_4d1DrCFPcduk4BVS*{~cjNJZw36LwqYRR0MRDBja{xD8j@=|Jlc*^PI)UX4U(ha zM0Zb|lyG*XpZhLBB;2JJ&rhe@;QhB1(v6nqin>>U)OKf4KaG#$xDuW>?auI6`4Z z5kX%V%UJS%99Z?&`xK=n2C3JHlYU%acJu>h5mjh7h|{@)39*dygmOWJ?AH6O`z<6S(8I$efSn;F}9+rmOa>{AXYY`asj(=JA7b&e7P(!=sUcMRw`L>I)H=XOX( zzH8Z_CoC+w2;(4_&>+&+!W67x{DXXYo7!~j6_xRJ_F|i8PxSGy^5NpcPZa2Zdb|%P zxAqCO5#$z9f(x%(G&V{LPt*>K})MAG|0GqFs^J^#nCj!Ki5>z%lx_{ zJUOLgX#6&a|MWx?0z$pLoWyML3P1IVaS$j=2`w}#O_`^>A7D;=37zVGd0e0jS!YRSgPEk| z?J^9gkvcKsw7*D3P-(dFcmH-&c&j)SfYy8T<-CZv%58+B3tc%!N8eVnMZYthcJK(Y zA-t~PVTX87sVmgVj*}8vJ_E>L$o+UL187Q+gQr(3V+=OzW_79OD}5}=p+x#U-Sh5d zeN*4J&Li3}VEW5!57GWE!tt3Df<)uJs;NgcKucsi8e6Wf^v1Mhbd*kPHMeH6rGr>fPFGSFb)X@x~(1;dR)`D-9J}ueO}oEDqNv zaoF+AOujlSk8l}(cN>H9`SwLzD*_{dLFo2eMJ`l{=lMk&PsO8#!>-)eA82be#;5d` z0l2KK{NK&1=+6~h4jcT+38r$Ltm#zp(uC|EekI-Vhl}tIw3S1T2l-2@QJAQK`iwv= z)W?oRgZMkvwceNe2m zFGJ+q<^J^$f|n7^-(R5;$i`flZ44w66#*t9y-7*9O&RA{lP1e_lSxgr2Zppfc7N;i z#K4^%yccuhIc6d^Br+-D91+N)d6k;ooRcQ3Ku(5K+_X4Zm)W2Wf(jLKItf9@$zkLR zDIrcdGA{Eeg@jjg*O7~qYU@f8%KTFbd+R%e?-EBe$ezcBO!-#4~h%BN^1)b>AFDG@~*G?eGy!H_mJz7lR zj9QGr@5t_7rVqqF!LRW4or+ui`<9+=+5YHdf3omg zAeGIzeBJ<8e{NaD6)K#{W_A6EpR>whR$To@NpVy_6(nn8W8}Gz7+H#hCDWteQIP=5 z^M+`FeMWh}377T`xF7v%1h9l(*= zYf%6<-1BTHr}fAvO@(9fdnc_epMQQ9pF{1Jbf} zr6y=3HMYOTg1dG{683_oPBNsz1Kx1z?8n{*7A@H9-m(`3&onQGtU)$#g|$ZLg!lzJ zfuIYH)yw~p`&;<$SkFv>?M}K>`@ZixwjJzx@P@sSWBi?VoYg<}z2{KnDHCVEP5&&e8#2Sr0A?h^0@)Rf|i9@hhMJt{j6E5k6`1W7E$Z4>-myz;#(y=qvpNHHVXOTq}Bv0@z z>#XgOY86|{rAt~#NCH_V)qsuo%wtu;BNPL-^-1tU zRLw$1k}e9m=2?P8r~&6in=@LH4e6WNt38sqXe_5@a;UG@VC_=dXZod{ck6{4xtgXv z9a3tk{xO;4vEv9TS`Bs^VM>L@vNXiM7?Sd9uyY*upyTSl!z;YJPd*j~U0|XxB$ag% zb;NJaLZC*7KM5Fv-i%?Gu)LZ*gm&#Q6=znP2;MK44JuWKHG6ulf?7c6oQH- zDym?M*cA6J>1>{(>EuAZ=G%qjmiP+e?y>yQrEKjXcqiSYO0bK^a%rB!{rGT=Y5+*w z*KKLu1HeUvH#g~$?h)Dq+*Y$CO+O6EcR!}n49r3fNDNLpd(3Nz2rUmeuHjm@HxH7E z6aU--1}3YFdU#{K<6M_ok-}*4NcT9`2|i>FbeKuB1w4X?hSXvfEwoCpA}VjExFhG< zr+&ad143@*+si@c87$AC1%qd=;4Bx*FMsU{+_GPK_iHYJ+Os}5$fdm zb>bzlNzB-+Yy4t?ty<1j^Fg2}o7iay{>Pvc603LI-fI>eaWcV~eIXR8>?u1dU+=Yz z!Ch^!@LZ(H38{BXH45v1#cuADXyjU2BwFZ zYw}>xJ^bZiv=uxSQepcR+j08R-rsh=b7^FrY@k{~MznM1=jz`HN+*c=Y&~vmk8g>q zRCX7))P-~%aoc?%XFj`zTpXZ$xD1Mcy#=MDEMdbEs&4#HU8aj%M@c%0JD;PVG4m_x#JLXSj3^#Aqh^g^^nzmKk9 zCnYKR3#A&za=P3%Jy^2ABcdhN^k{RtyuIG}+1WZxY1)Q-R;Z?w> zKQZyRkLNnC8K_b>hte~DEYlRd)rK$V4QG65a9R*BNgO4Wf@-K+hKC@%SkdFyYZmrB zCD`Rgv3M8yl#6v9hNoGtTA((Jld?$VF%=MpJFy?qaQK-x3#2N;EK*%z#$_XhEcfJ` zKNa8tP9o#})NE|x33;F2#r**bX?C@S0@c!#XRdv$)ImC?9as}($Y`u#vFsVi;MlE% z@n}x}z2J8#jIc(6e4Mk*+=*X@JqWW`B}~Ih0p&#O=2?c{aW@tftgs4QJ;2_KM^n_I z-C#dKK(%Y@y_Qom?;_1|?SdvdAaKPSQGPok$vWuoMQ5cVpmrsKCe$MlO@_CI_mn~Hgk?|Q8tMJczFy~Y2o+| zHo#6~d$|@%gZ5;x&Zw7QGdGTm+vbPpc!*n#IH|qB|4S4Z}8FwTs&ttza zGd(~)!BgYoY~{*AqaHQsi@rQ2jFaY06+S1f(h7Y$R4BXbBOS{)f}n%y$)GC9c%2X; znvX$^$*3hxr-{d+oht0E?Tvu+A*38zqinGyvIpQ2d*?mg&zO(pF$2`{#Kc!wfni6T zPfJLfv({cb*@DMwy}4a&%N@Zq$zW2@fPNckc>l@!1{#L#IU614X7x~4_h>QWT0qfS-FHB-yQD)_D3cCVNopow$zRILT)|N05abQh zvEa_r+js=uLdSDR_#Dw0!-xZ7v1h6+nK&%Quur~L&$f8B=sok;Z<;#;#6+JUpk|`( z5aH8qHt;6^#KhtUpdC{ubTx{9p{Zta`C8S@q0 zs!2TP?VV+{pUgIuBzKXoCyF&$e37wOyHPyEnU3=;S;mvvKQQ;bu2sqovbAlFl8yc` zIBqe)eH=pTZUFtxzIDA|y(8DrYmG^WO8Gfg_vRs42%6E@>y4y}K}RKt#OX49U+8E} zi`{@gpc1BIqe_?f!TzMNaz{;S@l4G=RpR_xHLKI$0O!@(?}n>Ip0|AYyG!av@nBM@ zWt*206@T$Le$uVa4~^aP=hV9XV>b@dx44$CbMWs_aHI~T z(FL-^aeJ{;tYpnA11_C>(gN@lYvq2tukm?zJK}ZcP2MTGGrQT(Hv)`n4cgI{Mg^>Y z5EGfTc>yu;qUs$>eA8uTqR!7xVSiJ*fZg%g-R`LMHH%9GJ2uH>iw*%o<;Rf!jNnKr zrd~9-Yu?$!PU&F1KVgf>F-i{Rb_uu-YZJ3uyv^7}S_)Z#NschHBiMFg5|Ry#(2x7o z_gQR(ai+7Dr)NK+h;|dOT-Ulwc1LRLG`L@dDY#Ye8e$7SW^nxa%xMR5G%;&U!*mAI zCdO!_p$~RV9IH;#-$EVfPUI@pD)3X>-=EGb2CzgrW$I1ozGN<4YT1Mxr@G42E*~LC zRVXLHH+_9N2B`A}{)CSY37=C>KiAyjI+8ds2FfE*9({{TH<}HManA4DYOB}BM#tZr z*il)1_qg)-Q96Q`0~~>x`KjAr$~n@mz^~+z8BeqqTUQ5};g!Fk(twsIrK_Zrefnib z#o&DlFPc6(;(!fH<3+~F&NSce5wk;)I^ijokuNme)w{Emrd+g3D!?hCuFLxWkkPUu1wy_4hUD_e`t$+o!Pj=Y3V|# z;`uaMM@vxc7qixd7W~aG6xVZbo>VdOx0HB`V$iU>B_RL$dFjXZv(qa`I4du2kgF`l z4l&-{UisttTFuony`ofm-Tby=w%o`rlEcPMyHtL)VzqPN*hBB) zB}soZ;%;o1db`KC(fx5uvYH!;M`a8y`if0Y9xXXoK_L4)1J96T4q9b?`XC|MP3PHl za&lpw0@& zY7=PJ)DMEk1#b>AZ%EqV2zmIgECYZT*_Szb&d$ev62MuF*JFi0ery5Bg_7xS{7>?w zxS-KX9XfoB;s2(p$4R@#K@%g6IVcU%?6b#Hzo{x4ITYd|xTs^j#Sg#XiM{9XBftnx z&AG#;e7Em?D)+_@qA3#q7u8%H9X~L zMEZ84`<_p+a`fr?`(h1r*o)e1X|kAM!_H5iH@cIg$v?(#K>73r-jdrPhWI-N?paI} zP~JJ7)^R_4#3lD`OA*=_Ajy8;pHS|3>yhs&F=6$!3*{5mab^RXW;95zRJDaR{GEuTVRxBhuHh(k8kgbjQJ0X+9BvSBkOlb7>mfQz5y za%wJl^XTcT$T?R>n&g#Os4%%q{6-j`KC>h_sW1EP>J*wc$}r-I<2vrTvc|-sN^1CC}pJjf4JnufIlB@Y<62 zyaBE{8@3m1ro?%P(uq$-&y1#rrSlQ+VMZ}&&d1z5K<)uvU-tGTw693V8{P3H1Q7t4 z7%Lu{h%HHgU3*Z5<%WBh?51+G73<)ZT}xQze# z+I;Rs_$rL=OpV88QOE`N1O0#g$$$St1O~VY2^i>K)T#dESN^~ArgxGk_-`etJnh5y z+ve!M{)O)SB77-`*Pv6%6&Sm(g&-{`^q!sFy%>LQ<-~Qfz`vD>RGwmm=Ae6=Y#3%(6q5zCyJ>o2x6C;jb zlH#1B<=W5T;^ywO%~`j$p12mwA`n|ONIG+~3I@^91|300m!I5#u1&f)fn%+~ccFW0 z+#<_%O>_0)bysg@FKHBDuQ7#Yv8SrkuhYJe)?pMIV4)~&I!hQBGm6!wo~^cya+;q` zx|AU`rW-4Hr+jl(leyMkGasGI=R{)cbUwM;UW`{46HZif-MQTI>)Eu#vtGP4&%=%v zt3DKj+$*BS=3>0tq`froH9VE-O~!__?I9LdwACDEz4wuHnrFP10Mf<)h93zq*;7g) z9$Whv7Q*&=%h?H~Dk*&RD)Xuk5-#Sh{-2|mzG#vEv`zf;a(U)>x61s*5bn?XD6<>{pEsdkY@)f@6o*iOZ*WWPq0-BNN#?@>S30+qYV`N-F+Gh&-5^q~}I znB--b;LUR~w=&hOxp#u3McCgzV-eE>$z06!VHcUwdi}OYyIurTMz7U4mF;-(V@4GPj2LYfD*ESi~;wn>G8?zs4CTzLX{FmepkoI*v+|yaNOzUhyiKR zqso%TBPlw&T@Ks5x{V&E1$P=S^Nj?Pkyzja@8h!WB6TeySo_ATEj6Ijx`K7)L2mug zMH!7h)nOel2{BSN)yinKGDIO9oN(qut`>&v$B^?Av6CT61`Z(}dmW^8I-6sAcXR(z)oQzXg}Q zm&~7E_b`%$gJ}wb#rx6`j5(uH7MAVF;uym)9&vir{BJ8dTauH{jG(owN?VaiF3<~j z_`>Q0ufr-(uza1coI(E9{q^@{>oM_(rC`OGI?+o#y>HLC#|)hcc$Z>Zc9!w*uaO9~ z$Ybc=M;zjj9)%>BGoFRmBc{BDb{&<8fg}eJrN&RJCP}EogA7`K+;76%+G}kdZH1kr zHygI2uU|4qtpEgOVqX%gEIJ``QuH-wZ&JI;(J6fzdRhq z73IcuzCG$?;h|F9>=$=LRnXsDsa*++T8PQJ1jT~P>>Y?u9vz=PYt$8Vq>S2sDgFT> zv^|?P*<2BMp4L-vB}-f@k`#eM`pSA<$>nq}A1M!yG=cUGM96cDYe%}wnA_N>nqucm zyS%@Z{ket2A^I9%<)ouAKa-vwH2jqo(U3x9o7tbzFEg!$U84!it&Dq%CT~6WhwS4h{OZ|8? z$`&9<*!{Q=#?A#Vg!US&BM8#K-+yk~7RqIUN^FntZHm!#D{O0!vJqq=8i_sgaAob$T& zR2*7aJf}NoWSaO6+ws<{UiftNI)Bn<{cTjPVhD-bCPCA;ZBt7jS&LxyraVd*sP8(G zjP@|@34n_`R;Hcku7HF2kJv&?&*3HN5VjYIc?p26* z@tO|7mCowfc%?VXsch$+{f#F2h#*Ifw#G*Lj+*N-K zRJxL1r1}IN4;x7%zLDk*e5LRI*yvL$8V$Gi?oqpByGUJI5Epb(*OTc=@LB=mmwp|| zfb_w1p1z3LNY>Z0fZ9{vCf7CE7jgXC3%3(yt!)G>D}0FM{fQhPT!w|g7(%+Sbd8t? zV42L8vz70^y{q{Zhe8&ek0p;2Xi)fGzHuiiFd4>z*d$+g)ebLlcT7In7bJJc^1_~e zA>@DY;S`TyP2$u?s6&d27h%l)bn<@9lW7KRyXZqhvS?{3?F_^|tls?GBnpSEv5EIV z`o(d~@m`xW7)(H<7h3{Mue%WN|HB@qoG{%=^omaU!S^XLG%&xJPd)V^zCim zBSUA$Xhmbhu)XW-1eE$V6nB$Ls^9H&WM{;}91JE`?^J*o^hCaImUXf{iJcv^(++Gr znzaGd$iwWx!|$Wmj?a4N2|rg|K0-I-)_HgZwy<(~KAG6REzU;niuKCnzUKC_)01$J zpshWK+20=xcknK1g+#SY85z5OYpRN_VOa17|e1z|tUn>z6RYx_H^i}bC+oL7u z6_9qHeN$2A< z{qq|U>c;k@>2Gy$ghxGgR@h=smc~ABq{Iri?@2nJcPh~$EU(3$k?^7UrQ1xg9i^kK zoU$V3lnH#)+SS~XpugED@ zlJkT{wDk7I>A5%3SYQ1PLE6*dU6_MK)lkjO=^DE$hFQ4vmUKiJbDvn~6t=;p0ikvl zQeH2qyNdcb2_7{J2fy5B6h#A|o+h zhFI~#&>vqqhvkxC6<%9$&3p>ow|t5Keg2QXvFkvvkAo5zWqbuSP`1>!n}1Zu|MdVt z6+wCTpr8zT&L4k(oAxHpMe63d+^Dx9>@C&!_yGULv&$>No01MZakWT#IWPpV zEqA}dw7kzG^ZMhQwi24AV;-5#b8`saNA~GlCj*OuW;pV%dWSY_cGCb9 zp4GSmRY>gvgOp(D=!kR2{civW*pvKW(dqiP&WR7jQFZyNjVclVPGPn&zORj`bF2wslDs=zB-+dzYyAN=F`0tqB6ORk1WhO zG6L7X4KG3X6JzRPo60Uv1;HQ0TFIZD$#jYD03-xSFB*wqv}E4XGu#cfY0}JP65O{n z*}`Y<4$y^|dru*DiBd17I(oIaCv%P=6<|!?lw@0O#JSaDzHW zOg?))hcNc?_y_G^O#;gt z6;%`F5Lph$I<^aHACc(_COVF-VG%c8NRmB{S`|x|La$;sQ-_5OP zdY%et1(3~xFm>t|L9uV8t~OFVCxWvoOa>2So>D}z^_VZyNzs~CM~Nm8u^UsPA^ITZ zRlnVhdXP$sp^6raye?s^b*1&F(~95-Dkr?p`!VBdK&FARyf!EaE+vd+%{S_r7)S@!$8C_Zj2);uydZuIs$cxaOS4 z@teH^8rIEpT8)lp55}0SClt%I7loXQLOv79XpjRSoAUYmPUiBB_ zt%;J63I)7jn9oUX60?~Jx?M;BN2P{Z_~qJcSd3 z5Jx3e?aZnfF?}s(_B1$d<*1EP ziB0DNnj%Qxi^b24JK5DFAb{LT6+{n&l>5dNr4q>x}G~f_&&3=3sLQQVRwN>oR z>4{$2YB8N<)_+zZX1atv)48uF_#MPQG7UV?zkvwjWO3OoIB$o6f?T4BSi0y<5TW(N zI*49r;VQ@`Q2ZQekkarh1$m0&eiR*#^Bl+)rI51D0Qx;P9sW?f2;Is3=@M;q6C3A4 zm*8^rwc$}^L3qtHkAAGoJE7WS;ogFsbBFK+<(;e5Vcz^%T+*bB)(8?N;dpvaMbZ^Y z6bnjg;3KjJ?{4Ak6XE`Al;HIuBL2k=L$u^f;6F=rC)Io{jK>>;ubZ~D0`;dcE- zr7FlADyoLTetE_cUTJgKKE zPv$?dvo0^ylS(cj7Z_iHsyk?#DEP-bFncOU^RhPwa2*DmrjxNfH$vwebeH@j1N zIgm2j1!&rKt&(J3a|8yyy^~J#W#ZtP12&0$h>1<#*T;r>TQ79DY+Dx%)3SqYl*LBo zw7m{UezAG@eJ+j_%q_~YS2V1PXh^*;ZAa=Abp}rS+m|lJ3nxVjQA{{N`w!AZlk&@{ zVzsa*H~Bl$5 zbcssFO%xP%))G>na1voiSK;%v=P4RKo3#MG3SI|gJYC*eYYC?Mv0;XJ>Fu$a)e~S7 z?2eaVtZ)S=I?acx^C3i_b9Ug3L>82VVEQ_SL znF@Uk*7ez`((4*=VqLVWI^Gx`<%x5+XR8kKgsJwb$BowCd_KLjp02DhfTIWxEy8}G z2rUk>57Jm1-)h>lA+4DQ$*5igby?4XiZnPd6#e!hofOv}tsn*flAwSnS};$8X8m31 z4;gn15FQ%PbrY|o&6Z0a%>jL>!|upniWAS^#N-ES&71X_ZI(#;{i@XIb{w!5s!@6A ze`UO_$T>eiLWuCSsH~YjmBZ2LeVBsx4)hm{aB8a56Xj|-{6O5fvHA!*2lK7{a9uE) z{e#ZrSGInYbxK6R_(W_qD)tQ^9l_+UVAuF{Wq(`4hrp1;V&pyyzeeVjfwAnx zF1vlodF$eorOWv^_xJvRB}o$UcYER%`D-g3tG zBuYG@mrIjbsm3IGdqKOZax==kKxR#wejJDC66CnedMTFZv_?wt0;4{Cb!EW+vNVvr zoqF@EuAiQ6C-FWioJ0sZQ^HAv+xXo)fJC^Rpl!hmV;eXs;g+dh`N+@_iE6us7$g{5 z!Mo}5W^LOkZSaMcd8c)xjU1?V8z3X*CofFBp@e56L71$(B6?k5mi#8r^MwWp~VU zv}S6XcRiJQFm|;9Fs&8W`z-niUWQ3C8r(;q)^^*tc{BI(V5U$xm7^xdG^Shq9_kj~ z)Z1w`)kX8Ur+Vb~+Xv~ziw_Ta(oME(v0|@RWYCiUyc$O}6GW2~M2Pt|SWXj-Mf@Ys zRU==_YaNe+ddakxO4`ctt?$Y>5SPLx3yb(0eV>c-73XxjOW@ zamT$$|C&A#5;fYRwc&)(>vI|KsvB#7ZF+2CI4`YsH^AReS~V&#M>}@M$M?)^>p{6R zyVs?vWd-I^!_$6%Bb4|AqSiY-SPFwF>=N;1s?R2MoKp+erBa3L&mhLal7SrZC2n-D zqgJ=KI^@?wuph4?jg!zTDW}UyV>P!=Q#@DwPMH7N#>NB%G^!~WLIuHoNcqomInuh$%5oEC1QOAWZMW=DdlH=Yr;(Xa4gGGG< z-U-<+B%%5Mr-CKn!iZ^tlQ(uA{gf-Z26QV{^gwmoILGcs&%0K}p`tRu);fpJ6nQc} zlpiYk)K`ZKG4tJFmP0OnUb*7qn?sP6sgXys&jno~d2+2=uV>ozT*q4&g;NT&-~0HR zjH^m(U;lh*+?2v%8APkrp!kKJtbs+NW$*7f9(zu0S7^^Y#LoEL(XmJ9P^^oj>Xu6i zwPG~$FNS3_S))N+UqHOG#o;tKuCiqrS5bG@_{`RJHrf=IFwI^2;<#QU&-{AMUdFoq z@GZ?NLe%anqP|?SxaW3TC63jBDghlVYcbT+-T237k4C?xsn)2gy(2D++Tn#xeV=)0 zSTmU)jhT=4sPqd1sqVo{f8wRP8X@zNz27ZntE-w>p6)kI_sX}w_I&?g5UD)T)@X(v zTsI6H9O$%n2@`gmOxN#BqK=|;HLus%KL{Bh|Kacdv(|di%k5sF%AU0436bY5#0!_u zC#m$(vsLSeKdREauU(mST$N6D7V`o-<<{HecB!S)O=j>*ah-;UjIZ}too1WbhnCc9 z&7L_wN4F@L^OfO^1C5cT9!kPstHW#l94y++<^INZ2VS_JZZ3sKL-(IZvj_BjPxq}< z5H+gm4wDm%Q(g1k?24Qnp9(ituI4`0ahdRcgf?UpRKHkdo~~r@9pY--Z*;!dUseft zgl2!=aHllep*3tfkB8u90hu$G4!A37ONGbV2`!TcSjyR3oY)T=H-Si zh8S2wK^xMJ59yUrqO`dHHQZy@OJ6L9@xF&MGmljP#aIe9dmdQBp<3c!C1m;2N@Kg4 zj%4a1Cxb68w3hVaYi}E@4ycmy7YmfHn0+KfvR*b&LvGcN{z4op<+7C)8x(Xcm3Wk7 zm8n@!gWx0e;@}@0s*k|N>a?}+*wkDD7VMa99%geee|>QMIWJAVy%WGcE4ol?yJcKN zt&f8CZBG6Ace%}q){!HQU4g~H-E1<`;5}0HtaYQ5b#SB)Jol<0I~F~b?tm^^;o^6J z+?Hrt&TKBkZxX1UyVrEFFJF3x&Ozt3#WiF=@N2Z%@n8AqB+W>Ws-*Q3pK> z&*$g~t69e^QM<@B>sDoG+NeJM%|10lT-=$#Tba61L{?WLbPPntd73*y-1`XBE7G8M zLK`H|2Kw~nFZ)z^wrk%r>g4U8UOcaC?cO^oFZ%R#FxHlEs}!s913PA3maE!LQ;jrn zTl~BY^n=~;54oYR2fLPYdT}PJeX9*S_%!)gG&~i{w(R2L6`*5<)cB1yhtv^@M*ZV( z?#T9GnqenWr~wSabgnv4OxT+MecI`YpVWdj%Io@kU0zF#ww5^Hv|Kv;x*gKpV=5v# zS&f#Q=-HfMFjfKGndy5`NKC=o4d{eG%z4p&_};R>;r#I7%S&Hus{EtrvPEun%_v-~ zWc?IoWjWhzYCdN4Zm)QNCzQs@=q_n>nnh74ubBtZId~ChB9EZJR)2j+>w;~5XJklZ ziU+h??@7$kmHfm~m>=xI|09SHS3t3&56UrLt#*`sUV(;t#suHRo`AQDA>I4^;dEGo z!HFm$pWVlJuuYS_DEd;^hv3p-sB2(qhnD;%PJvdHu=p!Z#pJer4ZRe&j#yQ%B(vCb zmJ#CxQ(`FPSRdkj6F&S+1}?0Sr<49dmD|bkN8LJPp>fC<>_cy z^j?jghj|qZ+bl5DI2n;$62V!fgA8>B^bQclUfzUKe1sPhs#{)yl?Se^alu zqDl9BSk~X14n!o3zzpuV?Crg^y8rU zpskI}uBEAzzLD>6i0YuLhjNj+P=oVcu1q3b**0mqj@w4gy^l{2p(RNK5)ZI?`Jd*k z+?eVO@dTHQ=yLZ%LKZm7TVok?v=aj9%#Lu_rZpZI^t+;}SsK%pFB8$qd!5ZY>%W%K z@xH22R2ERHCFHTlho+dJ%UVWL6(cs()(H~?6(rmmm+o&#`y4iXI(;grb&C^K+`@JT zZ>q&n?%uT?Y1$>mIR!J<<#i)+Y8 zZO=1}GFb06ds#~tgu#~kQDge8qa zeT9LD#D;thlWNFmUsdz1=|*Q#{zVhcMsJp#+sZ{>Q9AAe*Ie_vikZldQF>o%-h2)_ zslYZDwiKa~8>L!Cm9->q62m;%X+!DZfE3xL-By&FU|ShqRJ2_5`rLIT5cDj@!6UaX zScUY#S(+3l6vhN2XF6me%i3G2LlT?SX@!BM{s7O5r*L+Cd{Yz-pf z9HNT?yeR?}$uw*7dC4Ua1(nOJ38>5ZX9jEM4<dKJ6ycYT-I!;DAFHOouax~P{>cylOv&^fdCZ4M`M1Hu+ zpKS&E@H;hM1XHUdvysb-eGXQKAG-CYN#@mf+TyB}ZxUx}o>epksJNv4S;v~f_bmi3 z!)nvR6dEtMAy;O6;bx1JKlK)8_DOr<&Q=1g>uYO&4D&RAnICjmXu|pA)CM}!V4+gV zF6l;(LgRzjgnj<*lg1qV%;#|z+w8fq2478we$fn?8KEQtUiBwqjfViofzYhPN&Bpg zfRLz|qrD+(7PNi68HwMpS%6-f(8hWKK%+zI>9N@l|g-(^I43iXkJ?Ts- ze}7&vqd&HDZ}jPHMFXV?854a(+rQ~6lFO}3iuuXBRR^3oX+`<=(ObIBw7=X&UM8d} zYdU>Fupp(x%r$XCJ%K6XLryE zdW%`wodu*Sq*R>?WHaACe!%D@^frZIG>@{_TA&!CU&+ci02%s?0i5m!AUsx-=G;k_ zmM=d(08WSg%X4%j3d`BHKH}GNHo!amJhjoGrun2|k)pmJOPhkA%qglw%b^<9>uX!U zZ)*w8IAG*N%10WP&;!K_r*5(EC{nMV(K9D{&9olhv4`=5{XPh#woXaP6Ze?v*mm2+ zMCHSsj9!*>(lF3%1|(|+n{!uUR%1Toa3m*XFNw*nn8dAZc+Y1I``@+im3#~%I3f%8okC*#%$ zDo)0Xz;g@Vuo9aHlWs^@PL+WElI4=0tGa{PNFd`r>&cC^IF@d}8(|;yn^L#FWixx? zF;prkIIR%jdGX$u>@1#O?5dL--cO6K*6A%*W6EWOb7E$CWQZ{~Snma7@4`XxH4i;J zJ@193bOhalpsG9XDQk2t&i7(3F7+IN_>@zbcYZcHxw5i0hb12H3ZGryBj9?S=4w+4QlCJMhAf^#D;Aar-6R{_DNJrPE#MNzkX|XD-0rwJCq(uH zb47Atr)4PRXeNFk+mvmIbH>Yyaz>aiD2#%`Ulvtacn3roCW5Y6rOO`5XtGlCLTOSr zfd;eF=*d4ORm!AQZ6Ze^CEaqRII+9B@5?C3nb z!fJ;+NLtnOT#LAG#Sj-v>9T^7=9VwMu3$W_ABklzEJMbK&=*msgO5# z*{cPr*6m-=NvX!!;H0yQ%BREhy>f--uaD6z%7oi0BV~!ZY?-eoKITV?9eY?uhMB~#Nk*J1GD$s4mmp`sy4mcp8_#S9f=cJ`c zK}+VqW2bqLINIe(tDT)tdjRJLr>(n{*G&CJK;?@zvz84IE~D)dt2BEO*Wa?l45Fz z03%+Te5`RjZNUzx$2RR%ZTRM=>+KV+t?bJI#Ac?7j!)={P0rF4^Tqv?jbG40fbM;n z;mv#XP-9*{x_g+IZ9lYX)#Zy{95LzgV~ZDVsbfEZuFuis69hF0k48cR)~qDrUyiJR&ApBip7`gAtWlo-Wo>e&}!SqJpK_5 z12NMI0eQTM635~+d=l;SX19~*bsfKP zc(15KnJ^>mFmnfkcW|Sjp}4=6d5_~|_b!=si1B7PkefzgPMwp-aqe&A$v2*s6!@8X z|61Ukpf#N+Fd@ct-^ZmE|9g&2!^iB%=q4|NXVx+l zCk1waPGgD~rSW^V-L7$>;?jR#zdP=zoFwFtV*Nl$^Xrv%%Jm+H_EqQeE3UW3UqGb8 z(Rv4|E0gq(g$g1wP^ci`2N#8+_;~FD*9Q4B<>C*J8NOqL%!D1Xw17wVtj$sW|1Mtlw76-Dw?nO6Z{|L{$d%GZmt7x9W{9DZ zith>|jb@5dy=Mjb)V;iDL)m#k!H^3ma-YRxW3f{IA?F0x!U{T6U<4C~Ho;19jew}L zMkod_2~3La?fWo~`J-T($<5-#>9DDy`E}ti@Ruii`W#EzcYigwBu|0efcd0q_?nv8 ze0&INPQu4bDiLxUazq4 zxsh&YBu$W08MY-v%eK24RtrsCOFny8ztDvY4;qgEx=>gZ`)}{4kn&T_dOLmCDFAdR zOG+&g=2)0d7Cxg+jl~qbJ)afDz1HM`^+?t=iNz{xhnnXVzj&-Ds*6?KfziD1+{TBN z!N~Qvc;Ygb1AY19_K$}-+X z`MGRgpxmq+k33zzAW~mLw z7dC4BS9BWrQp7jT6}4)q!C{9Ui^3=&Y!tl1J~?G0bRr>I)rOP7W&s(j2hP#a?6Ka) zSPpa%W&<;g{8L-MK>@(Lk)p1hz*9?pKgqO?Dj%&meOPJhdWJ&$;(Pk`gZE20_hQ+* z7eH9r`3I7G72rtda%=T>=WN|Ei;!0vl<70dwmHqed9f}=M9ET-dT=Lsn<_R*~& zugeV)wQ{lpiiCt#7-)E5`GxDnZVK5$6p-^7atVOFGOhQn3jeiq>+0P+hx4>S#i9d{ z3o(c}T#;7T|C~v(MT~So*ql+Oq`W!kc_Y#00kYciddnJ#E%D97O>_>gct-@nr?O3} zvTMKVeFd9Y`dIwtb_tyCOX)lp4WNY(h7XcMA9$pOcyD-T%!Z{{RVMtzRuVQxq0-P_ z3j8PJ7>ozufLhq&bW(mB(U`Vi3cY4V@amcR@$JM5oA%0lY>pXh+qILa8!gE@d*dxWT^`tK1XS}C0l zy+(s5+HD!B%CR>Z&hr4WFxH%v2GE7&V`%-6Ho2NM+td?&q$z5#&KknvKMSi_ibpjn zJ1Cyg`nM0JF$)4mM{!idswQ0R^AtuELHM~>E(^&J$#^CxJ)?`3V`>?xQzDKx0BvR9 z#9NVKxKF8w+EVbX(8s)V&62%Q==It5Ox`LmWk^)6nUor8@KtZU)go<&8b7|QtVP)# zEy}}bUMOZj32TR21vF<&)3LOamHd<*OEV*s@7wc7Q_S2gXE#O{D`QY02k=L}P+Zwo z-N2kkGh8fFLX@y9;gtIbCjH9ZmNBy+nQJGQ6Oi+|x(7%Khh85&taxpCHpO!S)Ypy; zyGYMcUR^zojL0YL`IXT2hn(1OGaRU-H0WL|JT(m4%9{n}{ORp)pl;p!F0Sx++n$j0tXcqvjNRy&VDGG-QI{??4WWNJuIQ0R3lesySyj5Zl|{zt@fc#Dnb;m z(VVb*Np9!HH={<_9^q5q6KLi^D(ZSv_ANXgq36`qqjv8+CqY{t^3{E*dTOGh7O0s9 zlPCk~0bi`*NM7iq=#fa1=HIf`xjQQ@pcywq7=&8mDy;~r$_!b2(bKL;LzD73I2~XN z|DLzzL*%eD`&crw({6-ddp4xCm!p+Jkhhb_?z@=MdEBM9-gfGl%Mm$4O~E}X6Ywyj zJM{*JHYa~@nB0-0k3GJ~;2ngieY-%d%4wwEtt*z<8TD+*X>Wn<{;pMa)Jm*>wL6iI`uRzTm6bl;EAGByL(x%$^w z2@ZEM6+kKJ$uf?5~QNJzXpNZ9CIep8#VDE<(c^ zbhnoK0I&rQk>P+X{HRZZ*{fa)QNQ+$fM?Y1XE`qndI}c>aTtCChxRl6QCx2G6piTM z_Ly0hbR5Ry!#+Fg=bd?&JheLU*5_QKtEr$6hW;sO36=7b`&wd+e7-J!jpx}NyY7v= zEprB)7J(q%cT+7hkZ*i-9A?Llnwz--NU@ea-NsL8HYt<|xHLcvK*e}5{60q_PN*#` zN3H`oG6c&jX58sy3QAHcWM8AI##^CUzkYQ#&eB9&z1nnlwO~PXkjd+N9?PqeH2jOsZ(Y zb&5lpq)WP%`R=vamF~kkDg6~PlZn%0b%FM3aallIZZ3Z+D>k3|ZP6_7x@GJTIF%V+VW#2wR;_(41e zICCi@2wBG*+gk8uIrghfncj2{BsMgCS)U3!hMn$ADo3$ec_@X+!%ntn@E~-BWt^Z9 zZX%CoE>Ab9s_?5TT-weV0o7l?umx0MITT<$FNKMy(zj@<=?D2>oh(339$Wv@{(_Zf zVSDAkS=NFv$rzt)d#^tU9wY2jh_7lWE{+RLK8qCs;RZ2{#i;~3h@F}V1UbynEwGSk z_P$8(qt)6hJ$~ucE-2riXi9lZSTda1O2n)8u~?&3`?+!4^LTp{P zzwJ4<9-2mxaEsHoZp(Y(SMOQPyKi1ZxZ}gVJ)-jo%(FaUvf_L>LG{&ZS5e@oto5RA zV}--<2iat7y*i0nohqb==N5(l1?t;xx`WjQJ*5o{9nQ4X2I%tDXw}%N=PVc+FNTv6 zr!XF@`Xm@Lwg`Ig7#_Z9?t0SR&{c3+=g(sZ#5ZmhW|Po}N*^r^YE1JL(c$47L7%rg zTv0RX1dzO74B}nqXPyfH3bNzKZ2V(8;1`5UgyM()7iW5n`0~ElMitEZi_Id(3s_uv zLn)4`aU9dR%`p1FNz(<{&#wAk+JQ?+_XQ9`I_%C!)eifaVT?5>(X?IODz#6rs59PqbXuz!10fo1N|3)XP*P*ambT?`RN`Za;3lngpM8!`k!g}Uz zybR^8rwpRT6M~SK2GW<7f9+^8MuBhOIoKF#$~1-jl=8L?g4)k_I~PrAZMMc;LS0EN z&fZfp#Aga3bFLU&T{v$Yp&Ea1MU!0_(0OO1idW;=h2Ot2MCn(Tt>EM46%H@Lk;c1E z+0c@HWw;UcoRAVZe*<0o^#2%x|93D3EfNi zNtOT=TFQv$d%7nd*bRW0o_F1p_mbZ`%=XK1>$k9krwd*%V(NPs7J76ojeBVAM?+c- z?ftfRS}vJ51Tr>#M5Y~y=H#tEVroJ9pwpH#ThPZ0GW0j93|)m#DU4&Su_|N-C*v1J zG`YH&r>#N@hJnA?tTI)%F3Iz$b-;1bShTwguCWy50wf^)@UAFY8|_|xj$IBT4TWIu zcjy{xwlisoxIaTBJ`VZ&?{@SL@4A0WFO&&qa_UaJfkn(0xFoV9;3DR0H`xwtos*fd z|IFEO_ZPIUv3=M}9f4qBlCDz5Rh z{)qGmK3JZ_{VMV~JQD?bFcO4UPiS-V2008O zHP;L~WuHwsm0h0bGjo4K`xGCg$oA|$L2<1~54&oS*;rlZ!9@DhNV9y`+sJ)H>VpoR z&e(D<<2eB6CIjNGuR5@~QC9yr5_bffz7B=BPiBz@u00DabJ-|*RGpYM+8Rl1GfLo? z`#xn5`9khkfX1slh*>nmwkZXB#{(4U)J=Y1R9%EBsoQ+75sfI9;jIylRDnz_I9rBd z@Wu{)kb6uV7RQDXZ0D{ze`FBFoyMkjwKJ1b1tl|0K&NGGKAF&PTbh(Gft$NO|FktA zrl}UiIO8;*AhJBiSp@!qUhIz5KAN&IkEYi&QAH)$ud2f^jPxl@SPl`w!x8>OsKqpg;B2vS&0FY27+rgWKc8cErPW-E zmWJnE>*ZlsERD`rm#tyOo$1#;Cl6?gUFht54?vCILDqs@wPGZKDhcYRTD!X=wsUKF z1JIqt+zkVf+8t-fiL;G~Hou2NXC14IwSlwdGw30n_C(ib9rVY@PS^MP_6Y%3$$Ms& zpka2SGnh&g%@AQKWJB_GRXJ(RypDQ9^}R=5WOR3?P#51(eUMClp|Ccrqf5W!fjnj)hc{gD3KdBhuXm?_Tr?n6&xb zh(AZ4ZJ0_2j&>*PSYFP_k)W(0+}EhJY6jJ=&L^Zl z@oNUMpv{W*I5G8%N8dnym%h*E(kybfc~#2PRtA_p9*5fY4E?g904Q=RyCzxqqcpE` zCJ#ocSh=GV02G`RBepc^2)b`4vv@tk4kHt1pE{5yCTwffNtSxJ=mj-fY>BaOd#qPK z&Oovjz0w^SU3Go6mkkXz{%kx^s$~B;(@olFnb1i4=mjp-z$KD_$_c^v_k7;SmtQ@2 zUog}JR_o2+;e`ECn1fW0JI|mbEaVVgY4P+m9`Rqr-BDvDUX7XdG6->t_m?Huxo)Uj zm7nkt9JlT~m!jFN;vxfx=Nz+9Iy_ac)!&YCvq1uF}dRi-Z&6))k zB*8diN}OBa8N#W^HhDWI%@PUH7wqU>mg5pe)-4CQDvbX^bwDHn4B&I@>7_m>}!?+ms6Hm1i8vCbY3eR`O0hv3}~$%KUgdtdh{Br>#f$4 z9%A>DrjyfIv9M;Z$?n^Gp?%8>R5f%aA2|FR4gtC&;kn)z%>C44%U9H(FIMaV$9{b< zK`e#}rbWNAUO^_SX_*xfXsD{%ch?zE3TiTRdxlc&j-*(HtWb=;_JwfCNl&TsEPg&0 zsF6Y~e+#VD77K?~$mB*}u8?ciGhE~@zhKZSYWVL>AGKu6qtuTo!5A`Mdy;w`M?+HG zLu;b7HzdRht9hbR#RoQ3sL3sYKH={M;_1Yual4~vanN}^WkuDH;W&6Q4fRZh>>yL7 zYv}TYRrx}*#_h(OzW%@S0;8uJS8ZBgh&2_ zv6p{8>M&@wdLu&i%2d|P_DYw$(Pqj11f868d%g(oR3Yafl$e}2Wan)WjO!qvsS+rp zqVQgacl;zAE0ybEP`uj16Tb&+h0rhIx(7cg9$NRu4~k8E8gy*ti5`BBr&HYA9%-$; z@tWH?Zblg9yVT+EY8Izjhr5o966a7~Kll5IdVhl_hjBV&l zNa&CU1BO{c#98t6TF7!UX~Yaa*i1<7Np$^K>|j%U6qKVkE3|>TfkD5LopK*1VG`{-HS*!JE4Jou-;3X$9lhKab9=9*qUDG$+NP{|zQRb6>CyA&DS-MQ%t zB*c)V`H9^5`d}>G!Oc$Pae&W#>##F1PaLplkKp^%&@12C_`0dS@3|)%dOk6yBetqt z?qgrF+vNmT*-1KjdBIwZ3o$_v1Sm+n*^wu3aDTxMV%?9|xn0KR`LXys^a!eC!AK_C4To(g?m3_!xI)$C^5;R>fs>7# zzBiHmvF!6tIFkSQ<##K%fo1p5VE2CkRQoT&;eV?c|EZSzyYKhT!|!E&wP!BoPxk45 z`+YtrF2GODMj!e8amfCO!SFx7eAfu4H8%D~)l>iOGXHUZ6S~L=|6l?9F?A3Bu&)I* z@O(obuOt00#@88sU(N}n_A-AoD*oF$o|Xp3iiOA!`ky#i|8s8q^LO}msldJL((|sR z{SjyKM=sJoSaX$dN9A;63f}*MBseDp?qyEPYk>PdpZIS$eJyY^(k_NaSM#$X-}|Gq z_S?jWEQKFt{ZSD3uO`IbCqeHyA{pNmSn=Pz(}{Z(jiUPO1r~T?-;S|HU!KcEXGO6q zdtGdZz}aIl?Q%Xz0scDwcEtYp0kR$(wzUri-$`%Sw7srY@y-wXhneDrV{eafQKygr z-@KC9rs)@LMmUwvY>*gLNf@15K9@5DN zgQz%=^wR=zcH>-)Xz zm>N{(aV-+Z*!Y~>yTac8a_Rl+$K`ABm8@AbPyuPY_5i*u3l>u+tMw*I$8a5Bsu7D`7b zOZ#VxME&Ov2E!O$*dNO2cmL1ltuHMcA_?I$eE-L1@;`UVfBKFM8`z{yZ0NNX2>%z0 z5&@3Oy7fHff3Z1qB!Uel*nZ=1;g1XFAA8XsKS1`2cZwwYH&E+8?}PvLAo}$Iq9Jgi zai>TmoC*KuyS5r1*y(6}`kwsfr^h!NuxoEFX*M*m{wLG-Z&SY)&RsP^P^1$3&+jFe z2|QfP^^w))KN3>@?a1;$u)tA>p7B!bKff2Y6mZ15u_e&&Mf~T>t`g3FRl5}@_WR@X z(@m9gx~+ViF={VP6WZ{5 z8WN&vpdTijRY7ick3x$>RLv>99y2i}aeBkP&vWQ}5Z!4lw$Y=k^@t*!#u$t576RfQ z|GA{)yHl86)W7h@PyFczgorQA_~>*hKE~21NZ%EYd*9U5RM#)&z_r^&^!i`@$>09M za;c2EI#rB^kiP3Xus3^Y-0_1k%6H7X_OlaIzL|&gNThRFjHFfflui2ug$mw$Dw zj~k9zz1=FE>s_-xgY^M1)W!abv(CrI8PSCAM7l)v*^VMU0h5p(+2Uoj*oq;gU zS)-rxmoFA_`&EXS95TfBua1w4h!3*j-q_nXiodid!H3mRzxWgi;j1G4)$#fN+r#w1 zL@HD+qEgZJjHs`Q;<@Bg_HOLE93Vuzq_t_=yCFLvWO)Ux0|qd)CL7=M+COZy`*Gl@ zw>+m#=Ksxl`=XQ~)Nj6;WL_4tGYkD3OHIm}EU9!y`v$hlnsBwYMPs=ABY1GQ$yPW6 z-~;SHGl$PFdRX2&Umxa5reDM|;i7oNs~tf;M~e^}K7D|P*FT=1^}q%j8uh!!{V!*( zj|fs^zNXq5orm1v;bAzj#atb~o5QYdQyzj+)JF_=o9q8#~k=^kCR`AL@Jl?x!tS zvN9TqGLwpBJF~T)88o1e_HdaqShy}s*kPs` z8d7Ge6So(tG+Hi}bt*TL%W}%g8OKYW4Hun=@G#q5HKa%638#I%lw{vxZH-~RIhRwW z)lt?@=~F7~RWrU(j^u_$Z%@?=@?7=w99}sWNoeBj@zKKyR9ah7zFyK6Xq2^7urTj+ zM-VPcJr)4~Yi9F_m%~^4SI(Jv;}8k(xben66lymL4t5Z-7PFS2wT3o3&kvig@JLmC zE|6ONPL&5L3Q7@4mq#()0o`*CNvQH#D{9V72ff3o>w@eRGxd?qf9#R=vFdmxPH?UE zC$M^by#B5r9S54m^=&FG_D*KHkA9Y;VwM>9DLm9^l-9Bz6cP&lzQ}v2>xI!O#%i$n ztn>UF>7_}pO@Z2$_esloX-lQ`VoUxFONT*GE5I(rC)KrlPtNro9#F*Z)QW1|yT_qJe^tL6K?++zk6%V9y(wr&^ryEQbCP^_| ztmMe&O5z(3G0XJRFudqdl#0gWdNpGba-Y>UNT%>=AcBoQBsi7!)X={TOVw!WYIHA)(5 zT$8{{D(w}&w)&yK&#V;Zw!o3~$i^p^yxmFE0@jLx=B-~emx8|c+9x(el?=|Q4U2{; zm#l_7jtn6Xu9tbls;pf4bFqbjfsK1ilFR0cY=LTl{CB;TE3lGR>(A|9M-cDDNe#Yd z&QJ!jEu(=n($!o7r&*oJ5fcn$^WG;0W}fzV*Jbnvr{zvNGIsV+^lJsUG8}5xbKV(W zJ1s~q_XrKoPm`^CJA1nOuCHJQGDp9EDOp4hxM3Y0OkTbry%|O7Et0K}ujpN(M(AuT zaEs`;u|DL^%%ksY32(fHJ@B2f_YnTg*3I_`^LF(8{7tk(R>!DCpCq#~05g56K+TKo zm}o{o#G;>l9x$FCgH6hV#*N^x@8g9n^3t7f_WL7uXYjlOsB?R5dsrizX*Z9IZ!Q&) z7wlEWDl|{C(u6q-Cb*sf1>V?)3evRH0|&(;^Uug*v#b38-Cw|DEW%uQ+E^tMn)kKk zCt$v7HB`R{bTc#Cf+P^ikSd{o)f~k2%B}eP!T6M8{&1fqUn{3XIN{#R>mK=Ws?!#r zgzg*IP{_hKtJ^TDFSUf_8yxWDuc?o}Aw2Jc8if+b9vvMS&M?_$rOCU2`fHZ0`%$jc z+F0Tn+EL^&qr7x-%t&!<&YE|(MV?kIBHn`+Uzd-ra5dLye$z`{LZEni{;$Yd7y^5 zzNbS9W0kWUyFLqE3m^3D6K$NB>;=`S@XVxi*Z6zhE|O#Ao9nCNxax$jxH!y8Ha{l2 zJH^%Tmb@q%{x$SHQXcZsYzI&5J(S8gVRICHte1VS40JrM6S;U?)qk{7M}tE`ct`A< z26r5b`}%7K#t|M}XkVdQ-$vBUHNV+-slqZ^R~Q~Q^=K{^zBYk3_V2z&vW3Tekw(-5LP8h8Fdr^Y%NgExup6zfr-A#YL zlSwtq(`+4EqT&4_@}yyv#ng4jaqZl3%PT&);cCzA=rw`$GBdl?B2;R@V})66&~vjO zZm;!>wSNC&Sk7Xz{{Z&5Ozm0o36~G7A*V^Wd269nX}E0LfiY&`GE;=AVZ(E$O0840 z@RV(2p5Aqsv7Nq?I$QDKq29gH=dm|MLw!>&#Jm>J^OqXcTxxgBGsYx_(b&l#!W7-@ zrw0SyxT_Ck7R#nzD=l6syxEmM^8ka1?CFX3fe2_fUw>fVCCI#3)xsoqi!%!O zURsb7k!ZKmM58`7G-tG}buwo5;KC;mT;C)lmi!g93vofu)iPMU-zB||fuA6e?Dh*v zl1Z)fmlze3Q#AuIUpT7u#2w^17&GXW?$^{HC!6E=(fMi|3Mc6>Hs zkYmf{4V3+&RJ8IC5BUxe&qbwY6t{ix?uzKrUp;~0b4i;!$XTcVLg|Qm&{1heCFa5% zsRbJj%X_8^gBx$Mo?d-n*GEBD=W*#u!(FxrW1Punm8Jud6o^eH;es!=CrE%`J& zFD|zoZxL{7QLRERX(3{dFiPAm9oaS3leq)gAoZ=3&a=B@J01~x$Ax3d*4E|d#n%Uq z%ez|4$&WTCry5q7W~|0rH3p2}!l;9t;b`p%or1Ggmi6uC!;}4+?H5UdFZJQ4XwuaY zcr0b%GD_% zrqYu6faG|N?6pAzb zA>kpL4rUQqgbFss-~E1@=V;!b2={W_P*&Rh2Wo$!0BSq!{UW^^ zwb}L-3CAj`OFu&4%8E3W5)_tH6s!k3itVK96?(0$nI+v9gE^$>D`ejZVl_m!_Oa&KPIRJH7d6<9@h~>j`HGIo@pZTMN`WgRl4!E z#1hf=|+&$aW~XmedC zoU7v0)26fSI6k7R6oBCOccih9QTMAVx_=&C1W8D4c}8$D=XOhoZ=LE4-|{bA{an!RC^@mE z+ltNWaWRd#l@!d!)LjR8#f@xAHN>|sbAiJNfl3#ukkTh`3?2lfOcon6wjs^Q)~hTG zTggJ=TZe~qi+7Tf@=*?rC&p3Cc6wrY$|4eoEv*!E`6G-a*ns;J?dXKdUWWwis?1Wf z)59@*oe>W~U=17DJ!=_m6E=ypS+15-C-Q;#y|vw9`{uXEoxwKt3l zxKH!giSq9jZtUC3E<=M{MX+DX@^VS;La4ASAJzm!6`DoE6Tt z<0rQtto7SQoYId`Oo?s?CnC)20^e}Tm4z<3xj8fy2{?W`V8dXLW85mcVACV=MplK8 zS>iC$2L-|`XQYzZl=E2csH*bLTCMbOne&dBV(U!bCF{X!bP+LxQ{4reJDtl4j3RZe zTQkBlwZjUN>wPY+iM~u*51*Zmjw-qBk45!LFuUzFnWWKKnd#@3M?0dV);IQNpAJfm ze<3CNR9d16&)?$_9vT{I*VuL^1*nB+O_E}7=8wenh2zWwm)|?jFz6?AdtmwaRNwmQ zx{VO5YYmV!pQ}%CQ&3Qhid9&uk=qp(AzXaRcXctVdQb)`N50-Vy-OMB!ReQ# z+Q6plJ^9QlC8_P2&7S$LiG&W`g0L8p(ZHBZK3K>hShl1q3^N~LmUtjhtDbJ~Ibh_C zORz{tV)Sg6U|S8|YKYP%%+FsO#UUk1*gOSFxIa5RtXL$UOkHwr(Q~Cgm{H6@HOViu z?lx{(?@ab9lH{TL zB(z>+Lq7{gvf*xyxfUz<2S$e*3gfX>XeX$=lL z8_ToRzS~v8q8#gCD+_#uwPsQtwBy%?Yx+6 z^8NJ`bg27Hso_%9zNhXwVm<4I`e1V2&&x!Cjm1S~d%3B*YKdgxIMK4(hfA&Wzp*Uk zOhb--$X#fsYP)BvJ-xif!_A?#E2jNqM>1!tl#of9TDpe9@>Dlx!7}kQu{OOEf6p|& z>b0Qh+lHtm-wQpYTt@kYgrU@XbtYXXsc8k)fO6RoIcJO{TnkZjXzQ?^V~_9XGt(IycWVq6 zmBb!f$I|kSw0}}ntmIb@)v+M*VLLTT6(z>+V{9vE23xHmq$ye^);BcccPoU-upS_l zOy1N@9@|Y@>>k}>xLxU^t`v88XV`i5wNuNzZVhCZ-$SCKS$w?M+i-q3fjqrWwv-ej zuFZO`#Q7&~oX)SkpiGWsu$)}IY28wrbd@^29U`mkPfDf8U&S((J5MiO?SRkMGgRmF zzQ_*W*;H9b8mk}9E%Q>BVv(=Y#NW_Wd16yr)Cr}z}hR4*fQMqcCg#G9D5U)@Wn;IWVrSV=aQjL4ur=h8TB~4_Ii(%!*H(kOw7znch~JO zD;}^xNk!PCV%ah^sSX^PQ)K1az0Cp$w@)ZLBbfo!5Nm__+GD}o*Za7B4|V&J^<8#L z{$&1Jh+xKWTc0k2Vc@OAS|JbhR)XuWJVMN$K^(=U_N&FDFJ87+9MiH-D}V80O(lKl z3v=sR?H*W$#WtE>;Mj>!+am;Dj8RNq6^%|M-Dq|UM^ZEgl%lwZ&$0%s*mkf-v#7ZHLCi_!1TwXi8&EF2$AmaY0?+BVjKa=pgPC@$8Q zlI#DmougBMi^?RU1DSE2KUFvJ`RAHbIO9(I*{^=s2$MV1<=N?4q_8C4X?^CLf+DpN z8}VQ{Q+K{V^u=)bDx%1Pvq}o$o3@%Zv!5c&bqeH?mOJRZ!=u|99KV`#1wyJ*=hrJn zuf*ej%4g;Oi8uVI(*qmMVmaMO(CjE0L$i|aKPuR<3K?b#MD4~<@gBXfB#RJ}5^_l< zmC%)|D_PV6dj{UHk!-QtrAwDqGm|;*SajmAariQ_#cU4g4;8MDJ0L7MaLrO%A68RV zYw|JApGWOfNhv)mfx}{aNDu?3gbRgi>-!>R`d< zZG~q^zF5QeFo9AcAu#_wxqI4u;L_UEOw)xI>`qwYAsWS?YUUU-)Jl5N%+%SOc8jes!JMK!T~@Qf0ingtDfjqz2+{>f48ell4@) zn3X+`Vh=*0D))H~dbh|?=|FOfk#u_J_?xOm>nbJxnZ8o)DLWSR(&9*$x|D4F4PmII zu~|;r)tk4>cU3B3d?lhMxB8`B&`+tEUrz=kjMatkawk2FKC6|XE4nt>swC*7eIL7Z zCZbR-7+XanL`8M9E-7ZSam4OBH{^i|LWDwLMu;~1wttMTPJH`!|}#ycE0DL z=lR-BIkC2#Vd(T=a1%>6=`pikr+*{c(=xwi6jKK^rhC(TYgw?J6h0rPka*AQ`8**M zeNtK+J&zr}4rPh?S2Qt=9Qt)*!>Zvmudb_y+DYS}4xW^SX@yDt3z z>zzCZE;M`unE&DaKqlNj;wi|q`~3sd$xn8xBa?ocEqwQkGl~Z^7U3{g@zpCEKGv`x zxOfVi! zGE)3Iv)wY%-2ndcTc$Ro9;w&p|25!j5%58Z|9tnA|2*q^H2Kqt(rBU2ZlYZEuK`_1 zJvrFdyZo;?^1rOy%`pG*+yAoi4@&GR!T+n3yOQM(wf|qO{DTs^nx+3Wu)C7w50U!+ zo3Ctp`2c1l466O7=sPvU;sPdn7gB#d!BqE6if`Bc_4i~0&o@v{+2$Wra0hb5v+RI#1Gx7k-J zsftYJ^sqrzjX39(x{{e~^(sGl`W5CQyEUu+DNg(?%ygrf)gw1)WIz~nrD0@MQ@Gkw zad;k@D#_#O1@LiN`7sO0IR9&Ijw2(X!*g>lWSIlrCLsck#WTNBq3~u@idTFpwYk{J zFy5PEI(b)W*N}6!F!Yb#%1Ey#4jT)?*AC+wFMkw{4LKxPWIHF%oe&!RKkU1Fdia#Yt-4@tBMIs}{ccl(u_mU`i>gKqA;n8X`k1jsj0Q?G_qIt}@@ewR zD{Ypa-d&WMex*FWUUy_t>;~b|96q%{0TDSERp%6RVg-oy;U=7p$mPOy6;s zMc{^%e?<@dQ!xAob^ewJ3|uGc7mC%8ip!QLx$`QBPhA@tVBDhP!>Ybc>6&G? zIHZKz(|VzC;A^x0=&6?)NVFmI(eki@2hY?pbhX@2B^pjk-^@-~bi4_)9q(&(bk{Fn zyuVd!H(2jiUMcGvl;gIbBWnBYrLN6KmiVSQvhO;`U5Gau;bF;en*}pz6`!?u`}65> zmn}PinKgFV^m~lAlO)BFCLI}U5yIv{(@|Za$z#6{YJ@~VBh*`%*Kfh0$MuD-b>|5c zU0og|+FtpYbMbQFO*jYs^TodTI{Adm%*+pnQ-2tdKLwZHH8%S;e;M$hZ0p|{{AJu> zhBcl821!oKqcqR(<6P=jPj!0~-TS?O9YUR-?=j~lfZo3fYKWKwwtx>kY$B64TugNFa zh6&8Ww34+=GoNhKaYQU1sRigv8X_BaFp4>j_+uPHURn87ZsF#=q0M-jn3pt45?ae! z>#4dO^L58pk}1D9Qyv!fKMD;&m+j6Z_Kd-QNZS6{7!1&nHH$zy_JT^2#D)lo10r@a zJdEu)L6bmpa^k!9mBT#?{58pBOOL5`Shx1~SvJU%W6|5KcUb-=r~fFxzukCw<6SFt zGN~%h?D#M!tAH-rUGc3#nAvnIH+8ZfBa3AQOeInYC8PUa4<~(UwZ@!|E!l%1lwMBb zSMk8hn#`gc$mX%mUYh8HJs7{Xfzs){!|E%CvZUB$6l))a?yE@+#eUST<))tv*n?S8 z$f&Xw(+ImvI z1x||+eK;Gd>r6;wpDtQ_&ONd$zCmAYS(H_hrnR8|%=9?Z#qkd`G*Mp*srOPd3ZIwN z%NgZllr_4sFYN>?uH8u>ycCc_p=Ap+Jn-5RwGWmLy+k^@()VhU&6}ii#&@#GC{9@! zs5!$bVLa62#U4yvb+Gc(F;?7ip#WAAfSHl9dDlJgT|;%r%Lf2hm!7~3z%RJtwYg;! zZ5B2S3D`dt+}FOMShR_yr9^{tojQc@#UfFt=(z$A%Bud^B82I-zEEV-Ye}?8dm8)i zip$t>bzAUm%g>@2R{EWJrlGmz(+opxQCri&(kSb&KL%wnMRR2~1@zCjdCovy+Rm%AlBJ3=h@A0u6!= z4}0vv*zE~B$3w%43sDw$>9=>dO;-15zAjKlvCcynsKaPQ-(?NJqUXrF&dQ+7KNviy zhq<$ngu@TwfjY-R-|*4_aG*de_1K&Z($(G)H)*s0=GzuD^5{a#f z{<2=r-%@mikzU*_AkudJT(EFAtfqcq#n~qdywwBF@o81%sdz+cr_^fNQ@uo%c+vOE!NL+b^gDS0nAXsR z8x|HE?rTs0z&fwz(tl>fX_yFv^a8NO-Kv7i0Q}}#?*)l&5A9TYrW91X?B%u3`qhkM z(AwO{e)!&{lg24-9LkkCPJL}UaAvkqB%@GljLZ6q7mXL7ipbp>9j5Q%HZ~<<&~~NL zZ>8EhI$U&nF3EQVS6+O%%8$vQFTPKreDTCNWJV->Pu<6M{EIO06wBiA0V*Y&Q)c+^ z-eE-1QKKc$HYb87;S5l|*7ag4ES8YUoW@bK2cs}usU^9uCWYDV&WZ7ZFFO_^3+0Z2 zK3`|jyygVZlAIYWS3K}U<^3A3qT%10pQSDWjhxyZsV8vCpjvIdayd}cq?Bx1G*5R@ zwy8ZNS@FXAALk7O=CZOzM-1vif?>W=9z5=3cY%_{M6cjtZmXTj+OKu7#{;m#BrpZ%>tawD(PKeiBfrY*i$HK zOikD_K7mFORjD)rv_0@|g})wx+o|8Vy#g!TKs>aecw{Q0{-t2CNH4?UbDga;7#lbh zP(XTTt8&HQ%fO>iqoq-Y9fyp=3}U*Z(efcL??Z{KD>g4gq8qoL)7ZO$1z-fc+vy_|2C;4pSB$3ecaiBMupq;KElE5be~G?S|;EJ($J@pVVG(3e)R? z&W0d!g8Uiw*8zAaqdx7Cr3@kQr?g6Xva;DoD??gS-~htk_p2{h%c?M~G;jy)7X_XR zf?c3DGR%?%YdKVN7JP~7pM4287?mXhTT^&vUbL?v4vb1i=;!o*a6|_HShrMwi#i#V z^7BOQRS$j%`IGb?PQwCz5^B}_MFz!t_x?}bfXS6lxAdRE!=AjlbP5c?kxPOKieP#< zr7Y|@la4n%@J=b(X~FmaGJb&L zSNF*aKne`7!_pE>C>+vcB=n|b0#EQMv`u#h9-Mb8E5#mj+*altN zBPLujCJ`$Dnh)G z^f8Q|4ivzLc|{E3jVM~9GTnVOrhHC46S-hlv?JHDkFh3oCR@GGfrs&8MjOGd7{*sg zS^y>m!(k$f0PM%GQV8CpH^8XR41k4AdWJ#(SUwsf2*6z3O|H4f zY8l3lXGQiqJ9FatQng+pK?`+VXQra?PB_C$s0$cs>!s2Hhk0UYIEB^6VWD5uSEi$D zQyom{J_HALuxj*{ICJ3o()E5qDxy3!?PjLK@JXFBNkV+1GN#MXkIdR~g-c8Nh$3d?4d8>ATm`%x5JPtVUvV$ZzvY@}Jhmf?F z$+6s%w}>IIT#H&kOJR65BVyVLTz@fpGrkl4+pMjynio_sL}I}ZBqK+8h&c&Vu=Q(0 zs1NAxwHMA&;A#|o1kro|OeZ;Q2Vt5RZ<9+A0P8Lmb{GS2?!|=>K+NXz5Go1qR&8-= z0?R=PO@t4E{?hPNcEN4)y9T!-Km`>t51YcX^SpvDnS&1$elL*53tB^~L7ArsDtNf= zVLq569SnWXJ<#6^XDd6wc$tqFJoEvH{uq1MC>4N%L*MfPVzizDk?8=efubz90l@pl zKi>uYeK4qLZx3>1Fjl2E4Vj)pPZ$D779PduPz0;&wK8g-1j5=|^o)SE*i}bq3DSTH zVyyB?;9+|1hRwG?e@8zOrU4U&tuRSJ01l%=mVgrVE*UL10I(rScPaybJ7PLp0eD<= zZHWu?S6gfO1QPU@WJc9Mf3;7Bz%{^) z_;4+trChHX{}`W8t+u>@<>MlL#Dh|pA5jSC&H-Lehjs2)K<3qJTX*`QlDiQanf$Np073)%|-9)`m*Cr zTjxZ{nI!P8786Ni2odS&<<}t1%-ovqLgi89%zIv#$KajP?vSDRFw~BFB=TM7iCK>+ zT#AMf17x!`%e5)j4}BD%CYzW02@h_dRE@V?4pr;%PoW75VnM;oa`upOsBWaXxf&d#<*BTH^~!pGceK?aIoy+4FmP5HJSG_ZPjHi6tR`%sAX6YI|dCtLkhSw?^_u_{MbOwhT zIBHDq$W8h91t8HupI0JSlgjw5TBut*!h^g6-5^VEwmh-4;#QMdJ~2_y`wCgE-qycy z8n>N+>$ks-+R{*VpYg`8mz246GKnt9vwLP8ooDbB+b{?nCh!Lzu9B)TJx`nr)eAtD zs|lRkV8y*p&OzJXL~W_m-kJ2oQ^q%;`q@R7oG5H`q^XmO)~pJuA-P`Ae80C$cb;K( zkE7YfFhQS!YSp^d^d0#?)>I(!#R;B)dUhPyQIG5SN{W&yXG4sES&x5&u8;SKE?o|u z)M2AeUhWjgehNZs{2ko{on)Z(VrPfH2aPYjc?J1w@<|@{L`aHmexP6g2kQ6yy^0Db zdeBwf*jFF}-?{w=pkoj4@jaqcqnius!jN)J5u>U}2)38)d&cqL7WyLTb3kP~yRDKI zK$tN$h7F*yhL+V)O^}F>rG=e?Qf+w2X|{{t+M{v|-}V6T^i$t~NC0jM53>f~B1VD7 zHUKPYYFMY|fgiP~Ij~IiQgy@6E*jn(m0H=wLkCQ?R-w#QhO*|b^`Al@SgpTF|49}+ zCmY`t9WV#HoCY$C;5q#Y51WVW5zX@rcytP?Mn_l$y$s;l0&N3=2QQ!Du@mJA^+jrPA7mp%Cyn@ zh|~uo_1H%0LPO!;5mNPvsk@+FRY@@jDp=eZYZyA+v!#|(Z0cBz6NEzSbZ3!iQbV+; z9?W4gtVE01P1JoFWJ}bD5`%9BoEr*9uS^F!^lzp5G71VzOP*?z2bb_K$|zve|3wMp zujRd-6Rb(o>m`yUW6&gOdd+S}Y!_rwCpQ^3g|AHt*mMQeTp^W_$9vMRw`J8F#o;#t zB&4=K!>^0Eh64B6*i^6s4~LA~YCn0$xM{PMI@|k6HW)RMN?vKKJe(;Y{B@N#8-dt{ z6n8~$f&{8CE1Ce`5!?srZ$j7wK3;_eN?tclYszYa^Wf#+Yk_RUMCCxpD@a}xe4hCT zK>!>6^J9Yq2vl0{ef>oNp2p#Ihu#T^n|Sr@*xFhlKIZK4la`!sABvChoAEh{ad zE44o0Dmt|&czV@$g)XYVdyA|?OqzsN&{T%-l62g`I*9$2NyYQy;0-?rM}AgeLyA z8G>}Rkja@q4kbaUa$1u!t~_nGu_)3GxLPG?eZ2cdy2$;8N;cmWV$9~I3r4E#llEI% zW70(D;qEAeXH zGywv2{mQ)v9A0mG9Gn0FcZy%(Wft&=|9rc`6&UVD-J|;`iSc$!*g|9wFIutBb&Q`> zgjV(^&3%ee9TAE8eS_lNq0Qg7bG`E1f8<7LD!)(hb=TK_Y0O4i{xK=-+@P&pgX>4*n|Ks60pwmhkDdyEhNQ$}cl0J~$(zh~T1>zX=fqoCfts z7PuLIFEc0zn9Y8S4sN5qcbgTU&p!5%pS2}75h!>{{o)m8O!Qq{0pp5`T4iP_b8cNZ zHi`CAV%e}&YrJoKmPUoGI&AjC&!W?-RoU=;&+$)h6Rn8;B{Q$&`ARN7_wgAa*7H5b z_c->09kX`b@UPkkL85-~^74Ydr3W*(xbg24|Nl~7{lCTLA1E_4)t@@F|13(j?noNTqoa>w)b4WF3n?EzVy@>znXYLCErX7HZJie^hdci z3=N8`P-A48L|0tmt@1vO&@p*6GacxW;t=Fhe#Fl9_EEZ8NpecYnEi_- z@=`mKstczq|37N}L{PTT=ZwkwF(BCa`J07Y7nXtrgxfqA*5n8tVh?A}leY50X=|ke z71B3$9<4C8!Z>NiVo zZ(JYZ-(ii-0sSr#OXG#6aMkl!Eh9i0au}U7h}KUG<}`s6jS(~RGxvy6%g=UDLBK<$ ze$Nkz>u|Ukjt-Eb!+v0>1a0SekLHKalV9x_;wjh$&Bx^^h>}pr$4wWt6<>&@JPS(I zCCpctZ;w)mX4+h@8p>E3Px77lJWdI7C$e$a(Nf9_TdKUJey!oFRvD2oxHC;TeTF20!+fgzKyy(q@MQIXn9pE;ns=+>q8aRX?eKge99AXd)$G zQA%F=#gHrMaaR_91^?^Fb*fbz+?h6g`-yWhifjl++z-tQl-+=5rM!3U1#3w<^GEKT zyl8q?a+Z*Jhx#G8(0sh(P9)-5{?5jRZ}k>5&3~f<5t<4-I1DWy?-%3Hl z=Fo72Y3*AD#zg0&JT&B2Tc)=aWfLesvsVkkb)Lb(#uE^+ z9uf~U1S_8NWJnkZe1*DKI0W~N$NjrcK@{b0EvV%WY`!jI;Q&dcSY6|NQ2;(gR@AEu zz-L7QN1#J)vv1h`I;=Uq=T-=G$QzHn>}EPPtD@LkJ|}E!Z6M#d+RN3M;gUF`LR8j!YfBk1cbAVDZNKcoW`OK-c+Qv!?bt?;!3ish*UW=(>px2F;; z3kG00g{o{F0OoC2)i~hDp0q|m(V|~vqf%sOjxlI>*jZ+gexh5X`U}H4VR6gZJ|f0* zzAA@087m_nQqDo~=8j9bQ(li-ccU_b8(D_uak(1?`Yk_9bKCj%KD(~?0~zVHMHZHb z6*x%*`BqY}dUdm@+p;d%`zhnh3+cgq^gFAnRT~{>z0>dk27wu6%V_la^C0-pvs-8O zwPIT{MmztbP6r)FsFe5-8h;ekb0BK@PDLJ~+VE$z9mNpF^=9l@FoFW5;ODQ}1we@I z6L+?6L4}V)TX!NKvayoA4KdKPYrN`X+6#oSKzgjT9-Ea@oQUeWmBF%AIN6nblXiKA z+iU-FQch_uuCu_#`;?gD`KgI5>o=qz$i?ZY(1VA$%0A^~3n2{=2>JFWV$YgO1}=}c z56Pye7DQWF^svPbF9mb^uKeDlXY@;f4}Ao7JFOg+oU)BlBMr7)uzRW|K7V7VI7392 zUq{$dxG&Wmjw|AaW32+de!WM(HTw`VHdWwz;i>=0b4Jm2`BuF_oSd}*bhKeBv$E1; zI0zp&tDpCPU9(6QG|C5c&=H-sfsumZH%*b9pblmuh0RbaBR&JYd0%n)oF36#fAf#F zKju$u|Lu~diYs0!FVT10`m$UjY)$%QpFe*dH$CKu+KXDNXqERP{6k0PuflWHBhoPL zW1a=1*9F|~Hp8I!eJi)H-5i!{Pw`rs1_I^p2$StT{7cf=nu!~WYb3qVT7S1ktDKl> zn;-HSAr$MBNv5t|s4*obaOcxcqGfXjW#6+gUJd1QLmA$RI9_lr*RI>XC2_+~*)Iv3 zvKVYoe}6t<&Z^P|_FNg%%z7uNe3To-5N6k>u(-sXUWc0Zd!#ORJejRwEsx02TQz%*fdAN^6~+agG+gjB`9&~`Odkxfk>C`Yo*W*C1%>!2A_WR} zI8hn_4^OZVm(1SQX}_ZqP48c{H5NCC`;$HxmG)m1Y4IR_rfhq7Y2k_zo4M_#l}f51 zuT~zBu`?TEv#QooPD%+l6cmFAsT#F!8zTZxj8a+upxZIO60Q#aSmo{C`Kjr!vCNh)eb#^NBsBTA_}@HO@zDVS)e!?PnyL5D^daH z9*{NdK#U!bQw-^y_Yw3+^O)2GAZeFz#MB2WmRY~Jv@6T3MA8Ov@%AP)2G->fYkaOu zzw+x(&-Qvnsm5^I{2;9-kv(>UE*hnsR`tU&Sz|0FryCEQ7((LlG&bnNhWj7y_y%^+ zE@j`u4QSK8jsJishL}i?>%CYD5O@5nl^-7jRH1U2{`dD5kLC8IG$u@adzLM}IPr~Y zV+(PVzG*&MrvXGleJQ~SSrQb>9PPsJ80=vdmayyQx!gE)(LCy?NgmpY?t|ul+0CvX{wWawXr(=h4!!Pis_4EbJO_KPVM2 z1=9MbDUd*@{d2SqMp}T76gJ2GYq-G^fJCrfAGZ>{acqzDkX^k33Aqdl5L zOJ_&RS~k1v=W!?MhfB3ohj{z5^-1z$qV@XZ4K!I)`Rt;)zQMogqg$nSgG5XnI`7T_ z@2BC-u!HcWSdVP987f!bv*Ju-dN$g(vAMPp!bofzmRtVmawv6b`As8nE%X%C+MOb_GzX3A$Jg9e@AG|phuB!-81$Sg!W35XbuQ%XPs3HBcMoaH`)Mciy?Ql zs|CoENL!d7bmha-PpuSzO!eM2YPv;X+h|;>8gaAxGmQZpS)Bc?uRC$}-S(}5kk3oM zX_@k!!Y>hfi`@n>s)6d=|6&%h=AqH6BOruBVaL;i?RA;@g|bCCICy7_yAdSb8GXl|^U@%ZwhZi-4t1`mA~S~zXH z>UlZ`lP5{d0x6#aZvXUYj;fg@d$QsPtks)KzYEs-orzNdUl0$y}v-it4Uiy|N819(4qV9c>gge3lUw{o4VKb zz~GYm^c`|!Og2t~jjoAnc~iq^98d6yp;C@@$-(M463dI4fs)n;Ip3QH!$`5M7A;CFJ2|6(8$JuF0d&#G$|OG;kH zwqgXt- zuA*L5{$EzfMb>Mrjk_smEN%$FOYTit>O#FkM<)wRQg2qjA<-y-Y%oE3pY2A)f;U%5 z=I^Q0AFuugDNh6%eJ7RrzLa|bjka8IW!8ohB-1EAQmWOO_T`?o{wEJPaue-KgTGi( zeFFlSD`;5$!^AUe2>A23^dTV74{LeWWdMHj9}~ezM;2xMn`Mj=yN+#7;pFe>R}yyJ zG3V2%{|4h}#xjj(&+ASqkVTcv;?LD!|LS=Fm0jEx_&miQ@Ol4~p2VsuP6&%CWUwqi z0+y>x+v{BNGqc3Yar^ADXp-XR962iDbX_KiUt%UXOQis;n$CPfgdg}uSwH6<2t0O~ zh=I7i^GHhE;f5DcTFGBaO?!45#UzW;ma5#>y*F$pwdGs2a%=x|W8$POsFwohkP# z(CzmBOv1^0@ZKHQeYTJbU4Ih%?JrTM7F3ROnc8U~HqpPfn)XMfjYmeY^7!V`W7t7* zIapI4+9XfeS0^YS7V+ZBs``E~Y*%Ys%At@r_4;PE%@8HV?H!Yf-; zKYifayw_OMbQr-Dbk+0()v{BqY39UCUT+{0r_S?bf*tohqrAgj3010*btfY!g6XD;fUCAUv45-tb+qOj2KJasZgt5+NT!I8-9{eV;}c(>YsixyO4hapL~Zd zeK^h*_e=g49T|mt{6d$)vt!@2bQ-KQxkU8VXA71H=Abdn8&VP6;tRnm zl4EPq4U^l`R&6~O%1jiSOS|R>r40xNUtD;r4bI-hj^Cz{X()Acu_vW>pOTHW%}AOz zbEx5zeb87&`Q_)M%?Bm-P~N7Oq1F=BwDj+=AyEF)o`LvI?_hI|?n^r+io@~tK7?&`i< zj_;|;P3_y{_Mr1P#teH`iH6hKB~{4u+9EBHvf{9k_KZ1oQ)>%?Uy{X0dZ~^JAv#uP zX(&|(V>&~N&g1;1v9(In=<6!^h@3iDtS((v99*z$F{keQ*rLtyagukm`?E>$JFza? zEeiq*bJExLMw6*g-ex=YLZOoVP6ZXJkSQm^aPKNsRC2|n8o46Su?ta?>?Q0tO&P^L zA{kX|llHs(z(sDkcd}$-Q)BQkcNdlLJY&vgJ$Emj7-`Pq-wwwLcrZM3G-KdkY`nQX2 zs_3qWbib&B^^*WkEM_&FHiuyfVLHrv2-FhskFIVbD8n{4XKE^v<9d7=2uq?_5_o))vzac z3>>_=^q9`YXz~slnM>XUb!X=aq;pPQmf+tmHk>L{Hw5yE*j(~N^){R;kw0+Z?Br9? zI@zO3zdcLSl4kCVSuR%2{#sJ>iu}(A%mD(($vfPD076TAqXH>zlb6T+w~LbX3e}SN z+)_rDJjuHy>y^kKxDa0l1nhgAYu|};))QGm>ICDj*R{6<)Z7*DFsa>RvOvpwe+jE@ ze2|c)Wuuj2Xs5mMv+%hf>Io4!t7lO=4qgTP%&R9e!uI?ZH^-#ZTif+iagOlx z=iYHL^bcH2&73}@HuDDO=Z^QKM+2$&@K?hTmMA-=xvqPd$!7O}P{UhFwm!M0{V0o* z;;61%Q%+3Qi#b1c^PbF0QmX@!MH9<&2WJLjwf4n1z8Cb_LwN9u2QIuNPQ&FRnZmwV zbye-Q=eUcp&tSE3^}X72ZM~BSdw(2*s!f5Ri=BwGU(`@)Ra;LwJ-W9Mr9ZZcy?}5q ztwK8FbbJvlGr84R8eX|9RB(x$e*GOf5mUl6QZylZ^Rt7EpGVUkYztYN^!C~X^?cJVVJ zN%v-RyK27#!YS{y6l0NjTX&-wWmzZbBr3a-P8k65T6g?IeQ^F%v1zU@>a?$iM!u#F z?LBMCf)1t_eNf~`d@w85U4=}1FRCqECdPno$)8gAng@dw z6xL(r1vM2E{k3J~UFeMBb+L12&(6ZmlR@pUwdR%5>Y^+qzNqC2?ts8RHul6s>uz5C zks&QXgXYqeR~I_^zV*`V>?6ESisJeB(G)xEvihnzVas3KsX4}FlYf4)Eh$kwRYO3` zzFCn;%&~f=ct``Y#e!4ott4MK@MP<;5i=QA%hf$-MB=sd)#v2lU`^lVCZ9lC$^q{Ce!V{xM@RNmW>PH zb+|Lad&+obfiGlBk^W3f<7XmB$`FAYb_g!IxXn0wt3R6Age&5KbUNGxXJ0%8_?Xvd5dim2O^)bgg%#SkB zvkh!A*Z#Uf+PtsaNB*xYTNh|Z{D7qPhR+fHBiy*r^OB9`pJbj&%_5EaEUsgwiK>rz?sp!=}&Lk;*Xsib(%7@gf8690}in;LM*Talf{wu3JgGO~n>q9Uv zxOWPzdK^1vIyUOt({)BZ==)ZZ@xbA-Np;udX!b3-l^+sx@=64mniqNUmXg#*ng{LD zJLi08dDA#l<7NFu#>WgDtKUj)1>M4|lFiHol}nt4Pt3sCc_RY%YTlWwe2D3>*}2k^ zIFSLT6X-6bU_*riwBwICW8O3lW9X&P4G~;f@a*mfb-HLjD1M5$t zRMGTIs$T!^M8M}Ir(CD6W%s9ejG?qcGWvfU89R+qblEMa&BN5J$7J$q8UG(a)S|cFXVOi(%01PHPZ=ZbRPSted#LE ze!Hu<8fpHlNQ*!Ee{zdqdLnYq%OXsFhLDn%L1yYpR^TnC2hU0BGe06-*Lh{Xdadx37xD~9aM?Cdo*pqD zfE@}xno+jRAvzm-B_S>Cmf|;u1c%io=b6x6hZ(*IVT&Nubrv0rF=6G(QR&MWCtcH5 zTo#k1YsO++RqXut8y-6_LD8I+ks=E|`}@d>ME_4B^yggSpK~W=cg^VhnLhcZ*XOk~ zb~4r8>TT^m$J=@D=R2@IdYQ(?#{a|HUx#Ixb^YToB?^K{OCw52NsF{dNlHqKl(d3$ zgMxr`Nu$y!-4_^iDM)uqNO%4g&dmMH%bh(DSQWF;y(uDBm zQ`zAK*%lqi)6T1rrOQvxRY9(KI)o&th5mybSt*KZ%l_Q{tf)J}la+}Zgoy)Bw z;=R?(+RKfvkd>v;73c(RirxC&HGpUSA(y8$&Z=5fmb)8WL9@(KC08&JO*+|T4wDCQ zuw35c=}nhB9Qz3j*N5eWA{=#Kd;d+n#m}_yV|Djq(%OGd)(1rwPkKZ zWK>FfMaCns!JSu6MfLRX^5Y4_2(glbYG&;AW)KIC*fm?Ti`I?1C%NyP7HnAWeiRQh zfS1Bs&p)5*yXihvzE(JvzY`tSrN}$qFF3llhPNCgu-z|R8WGXd+(@oHmH6W&&_dP` z_J{U!K_OxRguUgo1D-7ys|Itt%dDP3z)s5b&52-*m{ZuXZnKndb6&4lWR!(Wsj{!;VAkMtZWPd?kE9I=C7 zE8e%z!Yi%kT70k9YO;CUa`^U`nBU+PQQgjYq9(Cg0Xmb*c(ih1mLtwhv+Z$)br?sc zT06T+-6@U%gzFy*Q>~X*uZ-?-pks&XUA^AR@7(`D{dnqP&vMnQPl0uv=UHiW^_ck2 zD@~?z4i5=ETjb+=1bHn9RkASrkr3!&QMTSx9{C~)hV9Ac#?2HA)OpUK?2@3V^kV%= zKDy?S?$%|Hn~X7T_W7Cmm^{kv0-bCxW-L4{`)Uiw`F}^Ix>TIb>*&aCzOI|%c#tqX zuqfW_e`e#5erSh_n;<_lx8USMrJ3%+S%rg5K5;h?*l5i|CD~3&w&GrSDuKD`^9FS$ z#twbD98M7~>nrXmhNGq0&N1uXrbajH$J~@GYaDZS$`>uGnm-*-Y*N%7FECp7J=4ry z_7ie2+HX}?GnSSjortS=-bDNr<8X3~02ciE;-W=6dsW)uu>R@^Yqkddnwj&u$;}CjdB%WOxiq5lB+bCYA}d)ZajTq*1`s>l$~9v`Cu{D>L=UZ?gEfvL5#=urzfz4 zr^&{)99o%P6Cr~jHSnnT8v0da=)X`xn8(~)+bv5-Yf;rpws zxwW&kyyH|AAn2p-%nlvrdlj3TJ2IURMqAg3*;kbr7#RbSW?L0dL~6&02e}OyXgvNc zK4T`;{Z~lW*wUcXh@v*ig1CP1bHVyrPeEN|SwMaZ>eCz-7h9&sGv6n(*OAB>DRs%u zYdKc!X7LM4v|Q}!j<4n?zO~2N_2x)!eaZH%LA{q_gQ6jl$`sNST}8&M zF?`OY___3VO`2f@VZdfZx201kH)3yNHtp#}q8JgXCL(^0TPc|TkBUtpzZ0Vk`Bn*c z_qIn4Sjo3;d6*VQ;?MDUQt7U`utuFpp!3M@U`)nDLS*^3?mb-VaXh1pT&Je;gjbgd zaVy^g6UiZqn%4i271>4X%%79;0F5<*vz$0}YO*Hx7!i{m=qMtNvEk8lqI5#nnFsAd zjPprp7zx`rNY0)X*+`h_>-R*dGKl!yv-AnhDZ?-KIIgytL#@e9P+2Vj386X%2M1;M zfjtFnTa;amgtMJ(U|Nl4Z>#u$X?@5-#ubi)h>ba~LPP73?^p5os|-AB#G)Me3JF0w z`{5o3JRDP@pz;u9H}=(eiFWEs5$P88D_6XiofK4=HL1H#PfJ?5-PEPZ^s=xak}V&N zgXvYzWSv!W12Oq85UqUUNcQ>)zqOE4&h7+O-W6Rrh&{j(<#kKSDf5F&x#Up>#K!nB?zO|D0$#Y)t z{P9c~*LXdt_$>ws!pUw-n-4(3y`D68%t9%28Ni2eUZt@Iklau<8 z1vry`Md{kCh{}Z8(&aT3&4-qK0r{45`KiwQJz@B^4H)B$6n|4d`Q@xGtqyf}98g{x z30`8D(uGa zwpcUp#+o~xEf~%r8iRla4zYRQaRV8OgQ*;$vLX+%OKu#F+AC2c-S%d$yTGdaf556Y zbmP(KXTUTs`;yZL9FWoF+RZba7ffWZ8FiH=bYGL&luc7WK9!-gWQ^!XbmM*J6YVa1 z&G$w^EHwFDXFNB~Nc(SFU+(o!|5pK_ifbzSAJiOrmVDFsmZ}3bZ|#@&(8!GkCGcGdp#b>^CQ_4-2KkujT%L!JdDG#E zZ7~Fj8plhCzmY|eAr0b-K%kER(1in_U z;A3J^epgKhLo*@G&i?r6RG1f1QpYzj4EDXEkL8AuJ@-Ky_BjlmuR;0)y0X5WCfv1Mh`E87QSyF%u!O`WteP zFm2-U=O;w-Pd3@QvOAwBmfB{wIj%6r9Ca)f8tq+tPe8KT@}uqa?je&Eil>aus!T>4 z$uIs{9ir%ys^Y>{_Ezz7bL!vVj@3vyx>WgQN1!ZbcD{wTD-%)G}`f zI!HjQl94*|%$EJM19+S5<|87{r#h=k3-OkmxOM-bC^nV(^T^{5Ualu7f7vjvLjN>a z;VVgBFcIla>-zpquq%6It*h{3N8$L}7Yb=_fk+4+pV&Yb2&4aoydXkFAg^h=e0?P7riRltuUyMr=^A-~qwVi~Ddib%q5)eF z5o;0)=jukChKUUgAw^;68v&{o)i#TL`Pokb?bTG*oy~^pkk1)NQbiI~V@1z$pWFS1 z8Q7(|?*E+L{Z;lSh?~(4$E-D9eS+L|)Z|vMa<v7bVw<9DgHYAoB;J z@x>WIV$FfpXb}p@sN}mM6CZe5^zkqu20Z;YaMisJjhV^|anV0Z1{J{D-dMa#i1L#aFfpE~` z6v$^%L|~a7^{Jtt&P`X7J@C(sn_z~Nf4U6*SOyBdPTe3tT2GCT8YyVQP6z`vH|o@?Ht2Z2=uP_-Z)#{Vg?v?=5dM zRJ&aLPrGEcq%vC!IFg%Q(OrRBXrAXl%FsR!uvSgo89t72$0Vu<#q$vbv%LqF9#lT<5Y$y)?=zmk>{Z~N!;P;cQy8%qcX!$p|QxY z_P8!h$-SN*wogm1MgDX}v&v2rBnJ>v9uW_Hy@D8wNIRziO7`W}xmDmOK?aMm|<`I%_C3L0E?THVMP#S~^WcIeA=v>%XV!y@qD z^wgqV>`T?Kt=c+#M5nkwHn6Xe-GG3XgzQ3U4=5H_$m6;PaB`1tMyNtW)GPb}85LtZ z)AO~0q9VvPJ~kZ|SZ#El+fq^>e~O&(H$W3eE@RTrK&~hCWJ|bR3O;PpNtQK?mjgV98GG!K~0pSP5g)(9k3DgddiS-2d?)Itt0?D~oZN zi^qT&aKBl2_a>+|S;x}kMP!#P->ccrzyf@nXk+<}{0?oE^FAVH8S;s(Xu9k(5Hoqt zv@RTjuXA@S)BrNqPsuk%`g~;Ln#Y~fB_}S_h7eEXg;LJBEwUo1Dv3aKibW@3W42T6 zo|S^w4@atCS&%1!CgZNpeU2dYG88MZ&w1i1g61>4oxF_-RtWiu!tECLF+*joM^MS* zqC&fy5ssAD>-XY37IY!epNlZqf(gEb^TbmG0{7$_^r>=?k7z7Lrv^A5)67cQ1kWTJT(nITlPV&)Yl&DliECc z)Iazyojh?eY2TXd zNc;3**0JintoPR#m?fdkqWbwq&ztHbv7qz{nv9!1HyV!NW$4&pV|pttfUMY3Poj7O zlSK3s?k0e(lDnhz;1V#29_y~uT_pMNHxti1fhlzg^bFv(Wc_{c(Bb@0MPH|+081H2 zqNgGoD{)t1px7-}rfT1M;N8A4Du$xsWwgt}xyfy( zv7+9gtA6kcj8}&ZWNaj*Jbxvoe`Fapio-9vhEwwuHTh=>;*IfYX;H7XM%m4Aop{}L zDO`Rmz!l}X?lLYM7`5}MU9#O#se9ca>!xox+ExE%I*NaERbn$umned3;HQJZC~Sa zN+wp!<_k{C3h(M0%OvW8J42j&Q`T|kZg^;f(*AJGn>(>OVV7=f1@cN&klBGNzQy8E zxV`oly=D_i`A9Ue7)U?F(EcuS&(1{H@#O@QIrm@}x{R8~xJ^A`U>v%8{{r4AFI;}` zV%M-Pp;y*b-;NIXmaH%ELNvhaEvVByy1L_e@E%VeJ+b%-XTN~fFGLQTy}+Co=nuRQ zOLXOcJUB*SDlr2rm-6%g4<3*)xR(8(cKUNN?S5=RIAi*-OZ3zJ#pv~pk2}p}R<%)5 zX2be8$mj};9}G^7V!*LR_g!`rS_B(icH|ToAF|Is?Kz_&^Y?_{a;d_B-Nkk315?12 zawjknq&f_3j0>K_X1J$M16jwl_x9M}d_?bly)RXFM;F2oS@e;kWOJQbxwIgJ3^!qZ zCR6lE&<_D|_`#G-n!4OlSr>gfM&!!>_e^oxeSXz22~R#Uf6PH_^q_lMW^j_C^;9}= zpgxF|tg)>YXNQY(V=a*PUILl7Fo?&bCoSOE7?@4#|G*uRbK{vl5g7$xP;m(xuR@2< zT%j?|0hx`wwwvb(a0^L45=IbR8VrOP2t0}6n})k6X?uei>gS2APp)>oGvRR5Tk0uv zIgW2p+*`i!<5Y}KPsLXkeb{8An8(PzsoR7W1Om@=7>@vgQ-A&gquUU>zHSh>2MKUS zQ<2qgWNu`B?6{i;Js?)Sz7#t?vfwdjJKVNFZnj=G*62Zq`OdFM*A?W)_1hDxe)v}N z$t5+)(-lo+=%Yv{GVYao=Ay5-S@7Bl-CNdEc<6Hdk~gCz*}LxuK{=~P5-kdq^ysq8SSxL(OhO-&i9zi73oQPts)8wa|x@l%DcU! ztlE|p(z|8g_|vA#Sux>f&)9Caf#ou~8+7mn#NnQTap!Mj859Q+T8r&0aOb(KBFh({ zJm(`7SF-?aeNj5(;3LSCUYAk+K4{-Q?)32u_|{j&1K9{Pnf0v~@NNL-BnN+&twHJa z<0Q%)E3;v%-!#0(3;AoW#b8U!N1xXBXLq9dblp{G$N1Rhb$h&>DQStc9Q-)VMzsoy zic+hqwfy@CTlL+K!>c*BgvQ1eXt3K zPdCpi3~i&W+B#G7^8gGhHvU2hkad4b_FzD8n~vW08QyupY2GW77F?qNU}H97(vs&g z9>`xYPw^|{!tJfC;o<1<*Bi;)-xOPph!++XR^J(Onc|9hyx5L$atoV`U)kq^5Z^?_ z?)zw67m?*S2jh|r=cS71msMLAn%grk^XJ;JZyT&lidnzsHMVenHRpqKVhlA7#w*~pUp)8r0k19U{W`}KZMB%>LgHn}l(8?qSkD3l zHNENZ5SqDM{8TO3(BaIU826iihgUu5^PvTt>*G!5gLk98FVQt95)rM-{5$>g34dFC z0|U&G$S?4Xk*b{#&6VoL1GMgzT25Q+9;^Cjivf+8ZKS}e+5Mg|+VM+oeU zN;GIWzs1MLcS!QNwObsXTJ9h_Q@Fl*HCXC^+8jF4OhAK@`EXm)stJbK zHz*oUnmRh7j31)rVn0m!RQY>I5;ETCtM<3I#nNDdO5v~b?trnxAqa{AFuAO(PbDs# z>>cHsM%Q7#T*M25QAmlF(%y|}u-|UHHwX*ZZz-pPITXCP1g##kiOAI0OR1O)AO zW1x>~2p!u$gOlun=!|e~Oau-vQ7h~JvvbUbp2FXGx}RHo+FhqyKoU_}a5ZNhqbm?xNT%JdQtU%o_!An5MEZAFBqndU?<)zlOY4jez4IC+55rZ1 zSDfzzI(OVU;OBK6f3V_kqJJQ1Vqy|#cJl%HkK?2gA^0oz^9PT;hX4Cs%uhXk?72JN z&EC#38+@CVps9WGTBg?IWzPK@@_2%#*nX$}DjDKl1Ej4?6IMw*z1M9?d}=H;-wsLK z+ZzIFMYjQUp@>ULEx^DnLvoUJD{5LvDVXndt(HTx;b;kw_TwO?!)3k|HuqN&R0}*t z8!r?>n?diUpJrG%qwH1+;3URDOHNkKcd!&H3qyDl6~B^m!;ljH7m>)X=L1{GM>Mv6 zxhl~bTvWS_uF#sQh01M;6?3|gx*r>{JvB4%b_bWEo@jW%GY!{qrMBylto68vujDT+ zf60}@gq=~=s+Cb~Zi`|1^i84C1_56Rw`W>x`x+~|-|o4GbP64$G;opK@D~0GUPB=n zlNrK9X~nO^unC7>(5b&`gjHxVCjK16_|qOla2H(qt;PGj*V&*k(YYlqd?gAh!?`$D zxO|0AeMHk~46_h(V|I{qCRx-eoy%kJ;N8cW7Pdae9S#qY580{$r7zY4D<}+Yvfl!; ze~z&GrGu+JpX>d`HIXdpAu;I8E2Je63jS7f0cAU4>NA zx41gP<=ROkhdvBHjn3ZMudeP#FUSuI3)9FiI+_;XP-}9JN)gJ?*3!x$3A91YP)c&X zN5XA-ZDX!IQks>&R@{Lnh?Mo<$jP07fjKff==)RxrO<@sfPQd|f%Fn<^WVab+?B2j zI0hX^C+jy}81(|It?s<$u_Y!4}eiTE-Aim;f^pM?wt)uScM6Lw$sx$yC?WfTxAanZENuz2&FmH|r%L;c3>Z1Uz?j%o`TQ+^f}k zSu}AUL^FLq>$zKJ(%oNY5)S{h_6x~AaPV%Ws4ILYl2d z{Ur|r(Xp?iE0mc(afU|zQv15ty?rc1g1}Ji4eI;N706#DC=TLIBOHJKsMZ%(P|c`n zjk`p<4EwYVp}@qx8i7}{aaBT_Mq+E8ab{-5+GXsj=`hFB-2rw0V}o7n|7>bv?!eWP z5Av4k#L%}VoV>wO<&f=C_Q3OEb^#ii7u2)fuN&IBL!4I2ug-iQ#EMB*btf7^=GO3@ ze~}^m5h7EkAp`OSW4krTJN7?@s(gTaW8$$9kvjz3KP6v^%&(&OBEL;;oyU}PklPmP z5)EbZ#%Ogk>mo4?{OKlE5<}_52HvilJlUq>#Y%ecrfJd__*N|8Tydo>u_tc21yTl% zOSPA3U6i6C%(wEBxGglC1DJJnrpMl3s%dCf>k#G*hS|-uMjeH03($^?4=ZEjiGvoPOUf3VM6cIf|N>n!*UnFH}bsNI85&VgwFBE!{#55^jK6UXB>}$O=#YlzeBxOm0ai-EARv z=k5VT&2;X;N>~MDHJ*6((!H~u!%Iv9uk}icO(UkT;|V{}D)52Q!6hxx%em{RRNNkw zMoKX`+vsv-P9M{y1EdX}JjrAVbe*0L<4RQ|`AqgCU5?MWhgECWm0Y{uwer!-^?_oB zBV^_8%JHPa|H495X#Wcf)&Kt+7OI}KNF6Oh{0DUXcfju7Wsm>MM)TX%l6K1x!_OlZ zTN-+@r{@`2Bil)P&F&bBwaac4rP`P`6^og#atc1E)NJ+F>!v91?A}#7?3Pul_mP_# z>$6}8z>bS*+R0}0l~X!lb?74^1b2r9K)?c>>xNvWT9@NOJMLCCv1*j8b>!;S<~4AQ zYO6P|$DNrX8`SLS*_zoxmV2=C*kQ|3`G`Zse8shElA8*<~sa?0f66pCqJg{ftFq5(w7yqy~CM# z_KgAtk^*%l8{f1F_T^R8Lg@f>9r1(2tVwIc zeHU9DWEG?j+a>ql9fG~y?vd4pKI@0FYRxUr$1*1kH46p`d?@C}Iu0VbWm63cAFKE= zy}*ud@Rd_*@VO_u_Ylf<7ywZw6?Q!;QJcvdYNR`I=KdL=ifzJG!0G#yzTg^lOj07% zgkX2-h4_K7rpRbrT4)cbG&(+@^?Wz>sy#(mMFZ?&R7wU1B#CRkOmh5A9sZYd4iu&N z&J;tsL6w`B>f7x^?E1qciWCQwx?gZ!Tui1Is0fAqB<2koEc;TkduU9YL(PZ+?8ku` z;!oJB`R$)$tCo0-I4aXHbN^3as@yWVHr5-{#X~FMFvryrw==@9VT8ygPuky~Ld{?% znY}y)I41%eh<3gqWv#i3GJIA;(L=BuXxt3og=Rs*OKpqp-7M}~bmtq@==9ZwI1yQa z9nBxsdYb!txB}A`sBJ1AxY*uuvu&Tsu55~DxvZNIq^rHLo5jMOe!f91eM+s5ACVQ% zHt+Ruv!p$5#9&q6VylmeP^;2gmZ&gU(gY)`+Z$NPu_3n?xK8U%rVFKyQ#DSPkp2A5 zL<7j6{+r|wXw{niO{>=Qibr}$-9*F_2qw!V=`#qSsc@c5ju_DDfdS9FfSmtRGL_8l zG&6P1V0J3_a^r`3ugk(ck+)9klMC!orX19lW=Udq?LqDwB=1<8-U1dF{1R;+GA?Nx<$f2&zwU5=6>2ccA z?p}Lt7KwXqF;Ja>qJ3OiN`5wBR%Iz*Fs^;EWk1P1Dm}maKHy>Q zON+&0hC>#o*OhmobD98YxCh*S?{K;*(uE zx<4n291NDvT(IsH4VISEN#u7wid^rWd|pEIX+QyrY9!Z}ova#8du3ZuFV3{lUD_lq z+C$4w_`eHJyti#v@Z>|UvK)Fd9j$Z3JZZHh!BN-dK+UL}JZ&2)Yd0ubh@*MA>33w% zTY3rOg0P`s`36GiRJ`H#Jt;VH+#6)v5K@v5pBJM6HI}M-+)oeqKxHy~rL#~wM7@+A z2%)5Sy$G8v=+0K0PfUc7@#2A!xaub>j6@Mf)N;F}6} zQ2up&o6<)vw%ksUfm7Kn{qf8sx(R_ghEpQ@gQc1miJ$g~79P(fO|QIV2?Ml%GJ9bK z%sJVn{urOGe3?4<@*LV;L;O9FVKmSGDEPnV1s13cz+a@G<&uWvC;bwb)o@4T3IlKj zpX!Mp8_3KndirZ;fW;`+SlE1kr+8!Xrn0_34n@bCq z8#*x`X?1FkPQk4uZsS8r@$q zR9FGA+TmoA{@BwVeKRbE9cxa{m1a-v;%0}|8oYNXISw}Sd5OiP} zDq*xZglJcTJy5!+EgtbmM10-S>YYlb`JVI+=Wezou)^ZkFKfbdox?^hwn*CPY^7=S=j~p3L$d zo`C#?t536#U2LygZnf_(o2f=^RvnVeKT#Iio8C*Gd0Q)#mKfTOeZwP*QTw!&+GEJn zV|1r(hiCR>);R6C6sX!OO?^@^0z=PXo$UV=_Gubl-hu^YLi~bqD0CoX1M$?vqybU; zH-Oup;&Hi`N8hR2m<+rQH<+8=9o6x@z~^YjqEr1Tx}>P6XlB=}q29DT@)WtA9mTS4 zs7;9B0|NB^F}A77=}NI%n0vWiAPGtS5du=D=KUc#;ud(?gcs#?RIpBk%MprnVD0d5 zhyZ^g{}1pdlF&~|d+C23n<}}!kI*7kGJG}=i%J}$1h=Ur((Ztaq4t(*2Ir#;y8II9 z+DLnf>bJeUH;Dy2bM9lvZoJ8Vn}<1AVzH>b{xNuqR5-_svb(c$5sy`*E4-8Db| z-8;XsS^qctGf6!ePxLAzwGKY%);{WK;45%*m2Dc;E(ms*J&z)S_j%ph;4|5MT`<)> zp?E5)a0W8})>Gtea^oNetG8Em;i_nkQ~j-aVi zh_JP{i=DF4$dBq=@vtFJ^WtN#hzO(BtO^dA>|Ld?veOa!-r-`C@D;P7XLf}ym)e6A)G-Ms_f^`m%7ui+0(B>;V6C-lek_h=T9MDO8PT!47ZsmP|l z+QpX5wnktodvr-FhfG&nM%$`$H;aca`g)^MbXV^fBVwWRbLxfFp48&49sm4&j;EQZ zF18F7M>Uh#$5`460A471c+xXIn|nTW2*KBC6|@KdRP#`=qUcs%R(BfGzAAywhFNj4 znq|>%#dWSS^Vx+yr*x+(qHtCK;H4{TnNqskXzY0+mQ7a+Le3;%J9Cs0N4L_o=XT%T zg{w_X)=R%U-3$>cOW4-p2q~d45|?E5{h^yEw8vB?WY;j+|K+GajXgfm{6*%WcpjNQ z_soU#mjHWg5W6i>0&Y)Z{Bg@o&>pR8_{!%&d;UPa(cU*)O=dFk`uUQ(jQo6IV^Cq% zqg)p*Gm)jFm6Yn7>L|IJ1O%R5AJVMrRs&KN-%B+4JC3#RU6cuFU+hI!ynveF`9P%z z&=RI^)-er{wi=zzM4c+bk~Zt{`&z$Rw=Y?N_79KtZMj%hm;H~1$XrCI9-*WiQ4v3_ z&B=!b{X$1)$--Js$tT;Lfc(l0n_MRsTjnPtN0Zshhgz8w^4d~^L4s>NL1fzVD7t_3 z$A@dD;w9ZUGSu`s{ZNsFt2z1&P4>eir~MKp-OX^uCmD+zWg?!Nw`Q;HZ6({JmVZ0z zxbwcsV5@5dbRR=u&>tXpwJ#snXnq(+6aJZrdXKm2WZsm zDHvU>1sA7#O`Q%*CkeXY_Uf->FUTN7x(qF9A!Qi5qs}S`tKrmsyGUv0;1*R#WWv>i9iclHs#Y<< z)kq0CJ@jYV@62(^xxbSOg9+_Tvy7)HA{RC8 zdFy$vwVwIi9xlp~{0c|ju1eAKyyN9Mh0>kdg_R&Cyc>Lb`ybCJsuCUqU)(UH_hd2| zDT&)8w+){r;Eb>ub&wK_J60YqfxD<~K77mb0IBDjgDHOFDTldI#ywoo-W-c`3BGxX ze+>%pVJv0`fKxf5ZE+F-Yy;JNVd)q|YgTV^h<+nO0&qm#d`l0c`UZWGuP%^T!*cMz zM`L@lJMTgQ@JI5S&60cJDD&?$NqG;8T~m^uAn=)WXo_@>uWuh2*AD6@c%0)j=}`?L zam-zMXG64iJXT~dt4k}bZek)_b#zh)^OHj7i745?r`rCHPo-+1yvA|;Ra`vOcI6uL z^Bi4Nh+?|WOl5bkWt&m_&j7kuiA;+@fhZ(gVKUFey@V1*htYVeS430zR3+rAP*3 z{`Ls69WjUt-zIk?#=ys9nxE+60~vE+DFy(EeKjJq?Q%*dw3a-nF-Y%EPs&rT zkVEV~NOU=xxDA)tbQWVTpZ62C^mdv5QdWC7+qPl&-G63%`>3ormNM#DaTU~`UcctH zzKX68_>#ut-f18gt9(JOUzBQz^4j++5_I?Akm)mujMHEdO4h?fX8`f!b<5cZ(s&M< zM%UlSl>mO#eyBwWaI?TLLFaS`P`YUvw(h|e=;w>GSpN^xfnu%s>(t?%Acf8 z^O#eK1*kQ*5!FTzNUNo4a`#CHQ}OXh*p03T^G=4bDHb}ub=mwxY52um5UT_iT|u4?n%K5fW1SlIG{ ze@~_q9ob%%>aob>bjIkrq#H8bkER#f-C%2}3D= zB%^&dv-Bo2snI__FYthJpumh#y~sMj<#?2TZ}#o;AGMoX=kS+5i|;r1$ZJ0Rb}c~Y zNI~$Gby7s61$GT!0{>#!A}6ryOXRNrpD26RTVnx|Ut65XYALV?DV!OpEKm|4y3!H% zD>)3OuG_%irsY3Fuo$3u{bD!za|6lT_7P#aA#|rQB4q4xg)ur;PW3_~_O+0)az8T8 z$%pgSpK%JU-;+7c9rGDvXU1~6gx)0mE6lIWeD6?L>J0tcBK5Q5Hb(n9| zipVil=5u7*1z@*R;TuI9mwFUgsI2~1*E3!C@IyAX0~!j|RC|-GEa%u*WK(#Jj(aIKVPnkQ znlY(STu#5y`g(Aq&XYnL{mvRQ{otv_GIxqvLA#f%=Y)|^E_ooKQ6VA!$A6xji!i?) z36aWl5(gqAvUQhrVPAqfC$ca_Wo{&8gEuqc zJfl;OBx`DJX_+VEJQ3Od&iEfdzq)L36UrZbVDP(ea#CO&+J91ioZ+Cbck{S-@^QWQ zZhAoj>tEO&&=l5R#5&vyG_AE{-1~5M2wAUL`r=_9EgSXj`|kwb=$oi zq1K$}+A!ugX{}FLh`A5s8m(X6T82fD+lPvs>?s`Yr5~i^n43+zvUxym9!dx(p_JY3 z(I0nBhTr9Q)TGXC(EoVluzxW>RGmX8qq0zKoJDfFrI-T{lAk3?H+y3P`DsCr#Npt zKC#{nult#;nKEqku@x_VkCcVOc1INrE*gB@`AGkVG+giawYjZWXc!Rx)MbpMPk?vJ z@$@CH#qm3GH(#6Z%$j}!>MT$MC!yCN z+=1-*4`k@_%TuY-;sY7eh}$|3>Z#Gqfh1mJUW7&F~l} zvM#E!M3=49YOAtpn#F>$zkhBU#<>2!JIgyQT7y2(p`ptLWLryzRT;vVpo_`}}x~48IkqtQ3qE5e% z&X4M9kAn!2B7`RCs5PS$0UUlo&dw$nba9z#*Bnt-)J00m*pP-qVhLm_5R;5#!WVf z6+{+5J#AIBhy@_76hyr)30Enkb@(&B1=fEt7Cw^+GV@jaiQy)EY#-}QUb4vB+q}E+ z(y6VUzO60KHvpi~&>FS1_=bgX;utHL_S0mW2voMk+D~&IsX%ONq!QV5^6=$Wj1!F2 zh9w~-RjeRDj5J0?9>Fgha~Rf-LumEbTzvq{$-?i+S3T1I*VEgPjBt%zo-YM+Ermmn zxzq?`)NtDjmI7 z{HDW@kX4-_Th&UYY8PN>DXzg%Jotp%Yc1;cJ|UTQX{8D7yx}zeDmmL32SV=vLT^dN zqqBXmb*%V1p^ta_c+zVZYa@vFXt=zfY;u2V(N!mAhH4x})VGot7|}r^z4cpLjsa

xo_F>v>0Y0lEGl<=X;>+|9lVBseu}|cXuxU8|(egB7xsR1s7@s9aWa|qt4odh>a|HM)We733s7K z7UY!0gk%QZYAj4mVa-G*)F#gtxua+W8vVx4;d8gimhbM;MYYn|bW;jzzk8tfVXK(T z0Ez2)3aw21wUNr5koGv9Ru8=Wp7N=+^fW2B7j@d#P&_3P5Qm*v6HOIY>TRG_L(n@HFGP7qqJeCjs7ef z{(l%q{l>mugD!5EDB^mq{Kwmies*J#sti|>?d*J^b-h2n>NM`3zUH5Wammq8%w{m! z`4{x12k!q2i;mOWI(&Q4xFxVJDATC}^}qURxHT8L*^+i}6;%^l7^_Ok&pe`Wowf7T zm5Va5uNv-ZVgBbB^Cv&NdCDH|rP#7ZR|K55!zzl5rN8*U!BQP3{K<#^a~%7F0Zv74 zt^cd1FJZtQufFd{`)Az#Q&cU?3QuntNzD9LPgCJS$41;MLH5d@zV;t!f`<+~z0e-i z@?SkId<}*)PcIm_{f~a^&t{|sp6-bwj}-lHp7xZ488;FPJMQOy{@QkhvNBl z-~a0A&`ThFxJ%}zvP-3b(ymz?0xx9~(?qM{lYq*LCZMM7t_@BWJ6ZMSROF}^-O6Dv1T{+TP;n%OFLt!|#bxIF+T$!4g#E)et+*7V{7=i{xgWo?U#>7tjFLFn zeEeatRLEicN{!)Hl|FO#^1%)Hr$;?KK`Md+Ptp^nGu{S=3TzK+-dY;I*PEw) zb?ySCqGRj|A>QZdMLl0ihB@7*ew-Fv$^eE-fiWz&t&P>)^{IcG!!d7w(wmaID4F{t zC+_9)amR%bjr>yE1nvo>+`GjF>DlB$D_-Yo`kG)K-+4I_ZqH0o&5MX@)M4G2ZOgQ? zmd4k(>Le0_~X=*wR_5m1=)@=dxI{oaJPWq!yV3v+9kL56x{Kvz?|l;$6euLFGds)zZhI=3wDNlbt@&TK^6(<_BR@GU7o=lR zSVw)T)#cg8+=ExOUYGCL?O0Lg3b|=79G-ueI1``9MbbOHxLv>D+`D~e$vy9jnp3ZZ?Sy<|~jx~B0=12VsaM!ZNfK9&cUFiCpt%B=JRpH&QGS!QAdpnJkG*}*E zWw;NYEF{zF?zy%yaAk94H+P#pZ%kZ0{oXbwxRqMn@usG%j!t4c=eRQO_Os)zO53>W z7pxF#u`=abvsrT`0*^+rN@VsH!$*@94=dGl-R><{ECvl)X5QkN&cR{-)~2V@i6`*t z=zHI~uQY4c6VtlP^wxnn?Q!gQ<;3B4dNuKOU2Bo@dzQq=frw*n?k4+5Dbz69wPil5 z=G+r!rLK+m&(c(Qx22AUf|;)*f9cyDi`*THG0;tNTc@npZ)purm@R(@$1y@-@9O3@>c*TH zCo5c(e~2(F)pS!$|Dxp9x!#SWZ6@~J?66y^Zbf6Yaq4+r@&%3W0xgKX<-z8s86 zst0gHJ&$j0-FbU@L=kU#Vtc-sm7qVA-xV-(rrs9pTVkv=9#}Li?BLvQSaPFC06ePsfTk zcYR^B#&_7RuSUiD?GTEjG18VNgL~8ka=W}rfdu5Qt@&hECCCOON-amOv+@h&ng&`> zx@>IqtshreSvgDQRp{lAJsy8QTChvi15PRwxnL|Fc;TaARp|)y^Zm{F{xh{0M)|lB%G@sh_`&DXBJs&-(97zTs>#7u$aFs zBy4$ElkMsr4n897>T^Ui=mr0J z>v-U_+`Sl_&)>}6$SrPc*u2@gX+(?Mj#xj4ijg~WFH#(olJOkg(;VWG{55xV*5i|5 znN7G6FmkL2_lxz^D{);=T5KLX+srS0cU)JURjIFUlXw3(<2ik8a+6I^#D%*(qZ{`Q z_8a?GO{%u}oIJ){4`lbIM8ow@YBL`4@@|W0z%<$7(MicFO2s*~++T&o?yEgw31A>CIx%L7Vqe zZh1IOHlt3L)mEjD)ILR5XdHVI4eTB*JiT1e#c5sMZ`jUz1m4}uw-Pf=u2JwtGH*XA zbbab#YiZ51Mnc@r-xfp5$?Cy#4#P|ll^{Ye){dCAhN%{`+QJHC$ws%ykvpw;wB!fh@-Ri+S&Uq7P zT8WjKgQ%C3jp;IKs40hj;<{xLI%7#mWxBz0v$&rZ7;CL!7L_kWnrudl6?xqdX^g#h z;AFU@`*gwe+r((?oue%^-6pTSlg3f?I%$<+_d^^_95htTPij~S$b63iA}m9e(WOG+ zKZg0`3-5QQvgrxwUwdG4WBPCHCynJ4bu0@w+wHvD84VoUU0e3c+(?*fiH|Mh_}?`7vX$EE)Psz3 znwHG5!*lH2It5*ys>K8)RH`E1X6lS2UGT8jM7w$Oox9b*UQ6pf|C}cBD@9W}obQnv zxe@I5TnqdjrN7VpM5&j*y*Y(xC+eebl<(zUS~8{w!*9EjYAb;>@zOvnV(Yn^ogp=M6uFh^)`2OB~Ln zb-cQ&rfUPnVSuiy`kw2_ld?bzr8Mbf()C9dUAMB-w);kyu{&u5J?|9vEq884`3B&3 zuP*jxP>;EvB-CD6RT<6N(%ck`_S@E%ip9>~9xRk~W>8v&yF&AYW||d)eP(d7+_g)c zytk3|zPv~6%8Qi$X3pN~bWc^i*gSBwq`vcNqul4CUCP=M7J?Q*%Qpw^L`>Q=1BHg+ ztnilaSnj9W`%}SffzPNz9&I}sHT%kcp-rY}x*f6eT)mS4UUv1Bp^a3m6jU6q%f0BKiuA5Wi=*n~P(mN0B&YKXf2)}^okZoGqP_j^nZy{=~kg?2r6-Q$LBJ8QbEk~;&YAJU+>>osf~ z+YC#}p@jCEB_VrHCpuGx;L*ut7~W}-iefUp8x+qw4U-qP&v4-+i=M>YjT0-i9Juz@ z_o7s|-E-Wz1e@)F_1-+5>48M;t-bl8xmE%-Ee2h8R~eG8nW41p<$g*t4JX4X(fpa} zGtHcHj#L%rJ{8qGd~-Z4l$ds0Wn&yWCnV^gUTB?b88UNf^8P>e-ZChzZd)4#LIQ-~ z0fIY(U;%==1$Vb53Blc+#wB>r;O@cQI(UEp!KHDx#%Ubx;ywG^?|xPH>>vC0saij} zsAASybJ#PUG3J=t3=G=q$Cl&RVP;+!71sId+bhls%H&8Vw!kLR#r(_|N$hFOaLjb! z|C#1(!8r~J+?Aph-4tX>(X%}2uWfhCOq_4nb-p;UuI#*c`m@X)tiSc?&z{`7uD$;( z(_9bQ%NTm=6)dy=fI>0pr_F5i&OYG_3Y1l!%Z^qWlWe?*Z@GriK_C+_N0#{_D(q2L z%RO^nzEKX^wZ~+n$t%0Og!0d{WaPhq%DH%p^HBkjV#WtCt)DvY_POn5Zaa}Qm=^_Y zyEIrrJZ%r&!l*Q*HNcUmdoxD9W8{jfhFGhYA`dEE#WN-<5aIc=l6wn6vjw$o{trmZJISJ97Gf#<%3t)zlg zMqt>uJ{krUT%uUJ%u>exvt!-sN+e|JYHga+yZuVYM(a^W;t*@A0Tc@^a(j-)LQvmZ ztaQ(GV7oc5NX##JgT0RuDm64CGhME``FmAjyi}LQXP)%-YDLTfT54gBOZ5J0&}(Mr zSGUn<8Uv*NRL z-O;E}dw~7#z|UP(JFstqGH&ZKn#O)+<;gpkDj^0u7vHEOb#hDp(`tGlXUshO10JlZ z2Z?9p8+8!#%6ChE49d;*KQymDiLoPb0Zs^;GAV4IcOm2Q&0X+5`*{krgUA**Dqkr( zbZkm@Vn|Ug!KQx3@h&^&#T#tn_A2p>$}zX*A#hdCGnrVD$cBcpeIQ`bXGsZ~m1hsd z%|;?`yUOsG{$bIf!?78R6iGLfRjR2NA&*m}JH)NlAzq*|c5wo9D#`vV#XqN<XHULC*Wp4vpAGU4s9l*NSFkaru-o>ux~d3@Ri}gskeBnogzO z9kG2E`#b7Gi8{m$*PTm3r_Q0=uFma?wQeFF$2nasBCMB^|7}*S?IF2QYy}Jo85JU) ze69z@AIkMPH8jYTsgc4zRf76xWG5xtUPl001cW6mZoEABC}S#Vn8f4{>hj)q-qGu%oE{_Wu&3qZ((Qod(t+ikWFiO! zWO6dkOlL_7$i$?(+WQKKEI)(gr6H$iISa_eO2;EwLn7-#w9cPIydT@!PEX})&Zd$# z3e)W3Je9d7k+kz`lNpW#K7}p#9Qp7IBtT3z9d#R=<8T`usE*%;S7u?mS})UgqhS4o z-o>{)DXbXzPuskfpSvck(;`iIS z7O5;ISYH#``4OXo5faas8EOW+m;pTw7u?oW(okF1p`K;h)-CxQ5ociXBthM{x-WY@ z;ci}1^7b8x{O~u2a-0(&RUZA6&?&^~+&n2Tbr&Wk}8_Mbr0j9jioVi`^*&A}a@2 z;Y9#C>tyB?D?~8a*8l!iIJP?!vjlD8!cqSLdcp2~ZL?TjO)BzW{JjB}Z3r9Htdbry z9Z@rw1%99_^1YuT9fts{@f0I=*?pCwk`zbEBHg^|Z`DQ`zfZiNX)bkdR3E9Ow&6oM?PCx21MW@tBr3%NtJI|^ctsBvvwcq+U}>*q6a8H83KSz z*vu=ofrd?|Mb0Z+(0j+&VzI4SxZZh_pU=oc|Nbg3aJ<1pU7%%&+i34G=Vt0($$%0;Q-yi;*)?M;!aw8JUXntu(tt)pdrw;!)+yw@k@wXg~fW z)VmDP6A5|Cq`EWB*y5hg*|j&zXTrv$va-RV~@hg^p1kyRF<|P7rmMz&xtu=8; zGubE{*hI_ue5fV>v~riy&He#WK%m(4X1cH*hkfJh2GxEYpjJ2glf+cnqNi;$eO^-Y zbquNB(y?PRjLrnBueE(1qx(8Zk*2<3eeNj;80hCCa5+F(ORNs3S24(i>=lbP$&W80GRC6`5 zwx?1x-~Cn=A;XfHY10&7y!`syr+fh~=Iv1l_<8!ak|Pcfg}A68F<9?nzW*HeO!#2L zJ;$ACz|vDzjn*{Bw6^H>_-s5|mXq%AQ63oUtmS*XnV(@m&TJs*3EwM=x$tTTZF^Xr zK^lPy3~AZ@9r}K-eCh9O4ILCN2rdF(Spw$>uY~ncE0EX3>f5#}`rg8LN;D3`{XWe8 zXf$vBG!S3eaVJhZQyIS{h($~2@7|~_K>`A9)DiBA>kiG=to^w48mt~BDhRbI2jnza zF6&kDB&U&Ut?^Wl1|h-GlNxIb|2U6(LcX8>iPryxd|TU7r&k4sVr5{43S8`F@;i*(^L8rP?e_|zFnbc? z*TXK8?=)s3u}JUmVju%zPe$bJfkINDRg#V?`N)QFmmVxnXEq|gF`p76eIUTo7|wWhMx>}`^SR;Xh1&G=Xpo!4O>taL;|Fh zJ#ekHHmBh^$@`C0ott0u40F=JxKh+;pV)(dk^;Ez6LJbYE1zsoz1{oA&DH3CgHi$x z^V^L>=JS`YLjDV-6&75fBN5qD3T5unVBs^Ct{%;HD28B>ugqho+cmGrHpc^RW{y45 zs5ZhpVPG%#1gKyfSYPKJ};PGL$r$}Is@Ex-Sv5F62@pk_iXOi zheSSkjg%AB*guda(nmNSO-Yk`7r)0G+KSpYIVfKBOapFX1YW+3rKYduZ@ZW4#M(~n z2){g#Bhg_o#w4NI9r?;{E zfmH^;go{SZQ@s|;_CsZ=Z?=nVai^{~1z%g=rYU&)77|?2oT}HC4KxB{&y@S4HfVti z*>gtdQf?VhqK;vCeUQYnyM=3WvJygXW_(=Qg$p}XB>MV=m+fPqji8!*eL?V<7 zTjax~$T4wx7e@cUL=yiBJNK3}ikzm=fjQpeM;=5|Q~Gma%5QUFj6luKBs)y_6EJ9ns} zL!MQECMj+L@Oam!@nPygT2b;r!b2>$X(~t-uWxW(lWDl(!Dg&5BY{P`6U@vzOag4N zkJrU<*7nEKr(Mo~xHm)WFlJuL6R^bUP85Yi^qrw8JUIjm7KQ^`=kI3*PY76lz`|?H zK=Yq8a;(nuw13##t3Dj?gHql=kg_akCj+n=5Tu}H$5Q|ldoeS!v@JMP7)J~Rav*@O z3t{=_mLoJ1brw|o3ADg+H@=(=jx9w=Ke#`ncsNVi-}L%&|7Y}c)f z$R2By+4_`d)w=q6@XAx0 z+Ec3(TlW(tBG=_9H)M)WG*6`BdQyTBqZYLqvmtU-V`ifkokHb|*Qy2|jG1~sRyp4qP$V$nAAsc(B+YQ^z_w)0A3 z4ipWFG=U+acY~rxw#Xb%(BfX76D_uaADmbr==MZ`vi8Z^-5a8>O7u;ieDsMidbmow z&U{(&J`kPJ>n9cC=isR=8y=e=)W4v1730)D)Uk6B(TvNw-@M;BLs^HZw>R91YBL0WZ?+-|TdJ}sRC?L^_{)bPv ztRK>3k2TM9Dzc2HpK+p!8Ru5k<5t z%rWcyk9UJMHU;^usNx@22AnSPM`t--C{@g{eQX}2xzqQi2th||3{bpS3Ul3 zv-)54_&>Sqdm=t`t(3klVwmEy=19ZtU z;cLAv;b}Z}N$Vo_377?(Lps<^XCnf%M~WWx!&DHU{G$(tEi)WJAPfX^UL!PleJ`~^ z9tDDTeGn};(O|rOxIOv`jerWx2({z(eUlx)?R`qVzrtEv9B*NbLl-lK z3~Z3oZ=?dQ%i$YfTc25P*@~J_m4wM|)Kl8fUlA(+61y%BIWs_qo9SmQqMJ%w<)&l8T$2~6@m;4t3$li<75L(0!d`#%SG83_Q4 ze2`gt6ontm6{z>aPlqZYgJC#%8>1qRlZl{4+o-RjNs1lUTl#bjrKFF9xcyw-YbX5$ z8oLZ%m)f46_L122I4G{yT2^+Pey^B)8VEU4>$6t-oysye`KQm#dD0W*({H}mkkNJq z7Z6WFMP$arG#gIUMfLtNQ@yHc% z)N9qI(#RM}Vx(DYaTyEuI^QYjv8ik^OAs}C;?jT{YSp{i?;I~QwFkzGjfOgWZokge zY@YU0fP>n;lkz#q4Seq{_(kR>1COX3dhkKnxR^1*Vlr*MT=LogE?G`YOO!ber2xme zErX4KgoR$@;mY`T%>kZy9`RJ!!1n^6$EOlft-K36S?`zWM0&1yul@V+;X%GstAzTf z{j#IZdWv7spz+yurdy`-;jCCq`VXm(QN%P1%9+j)EeRuQhHi zC@3e26cdf6nt)b?OjP~;$ml9tm}Teeo^!u)8jnpZI%|_?Dwh@2VvAcMiQ{%scs%{k z-8DML&G)sIw#b8iN?NP&{OY?)Naav&Zvr3PM)jm&v&Ts^70p8CA&7XWf zKe(_i|7Ifm??527D4=Kvq~X;QIX+$`2FV7X{sJGLdqH$gCSs)qw`0){bf34ziIl8=<1kEtt@X7#*UFq3~ zHE?agL9I(WC9fOadM~nMfXSC2luzpA?gQZG#U_ImSUEX8=_lSla`w{Wx>zc`|Hvza zVbEZ>;lHDO_ToG~g0-Bs?ybA>yd0Loa~OmBIT?J zl0_n-bjoBW*@?(KnYYbJ)gG3&T%wZGrcq)bb^L{Bb(hs}vwr4*Bt7xX>fCt2(1zUw z1#)q*dfBwic#n&KN|~0T#awQ4G>6%U;13R$QvLI)Z4S%wPoNNr>kYiAkI(86E|P&F zIH;goizASxN6&A*&cbPTDKCv6TPcNCmVj9&rBnO$a<9o(5pKIf(&mSo{SVk@s^TBV z(PprMi1kw(G)!Nr&|GyV;~4I?(D&eQ6F% zg{^B?!1dXk({pcY`-6Ssn#b<@U1|gu;|wNk)*t*P-ybOP2>4WnOSsFIV|Drl=D+{X zZl4zU2}0-oKw?5WVU#RJ?GvDB;FQw{sQE5bZUoV)VWfJUv{#{jS@IRKkYbdDqm*BS z;Ii@yQLRqqv*BeZI$_iR*9W^Lg}up&g0oaXk8gi~d#a!Kgh%{KvyV2fwF=@58Ry zK*p^ni&8(DJ6oUAIHvlf{H%B5Vj-5;!b^Wz!6H!H{!4J(9+%YsVHS9GIaH_KVZCoK zZ#Uy+9r+Qh9!Jja1~{vtmi-Bl?u`$KRTZRHrcqKZG~MN7^8sbCihUdq$5J3F%ZGFwEo4tvz$&&B^&szH>nju7I^d}7w3jO{aLDxKt?MC=Z zFUqiHsg?i?|CM-Sx(x3$c1WJE&g$muj8R$4Tv>}rztg`n`jv1XP0g$_@y#u(Y}Qk{ z_ml%1t^nACLr!Cj?R_}U3gX7JzTH^Y5894D&aEG4(q)tp-y!Q_n^L{WS=|NwZF0yf zQ0WxsPkX0XHl#{t&Pi2k^JTacKcJwG5je$ldQ^!+{>A|s4TGMeH{|xp?Z^OD{wge-&eN&gemA#M0yrj)huYn&y5l-PgP~dT)U>Rn~u* z!~9|EKLLxEw6ZwC7nye2m+A8#;D@x|!|ymO){qx}9do~|%-bcR{?hJQu<<)i0vxMu z@be9D5!t_yYc)NP^_Pd6e(_bx#YF|$Fu>+Zt(VWPmmePpRbjTjGxSG@A4kUNgt4`I&zWQ{{^ zrT5lY{;pqW(vPV{qb4z_ePmy-Hdl`R1T`5j-(rzIHEqpmV$5<=bGpl&-F$hmxjE#P zmaej-v?9i1;P+`gDeY{qaoLw4%lpRSw-;+Fp|d;UtE1T4ON!X_{+Q6RfJZ^(ipK$` zK=0$#jufCk(j=KzCQzjdJ|LiND7h$;@?NP*wtc2KE=G&_Ndfl$U0sQ;X~wXcG=Q}v zLQ~B*2{e4CZ5%z`vrrsD3CFh%tfYwI7Nl2^i+^t~IE`ZLctxT7qVT;E1K4&)D*DAX zi$DS+--{{jL3B%u$K7P3dDh=pKHBwrT=)Iesz6XPIsTM2^VdW^dB}XzA1}vhhE=V? zny7uT!Vty7>e9B|=7CwiLDJFCx0xV?pcXWeYxg^G-_b)64JcTqP(4S-Pk`G%_rB&J z7vp7;umSb`&^q%uIdVo-wRhSjwCaTliF{7mKi0n7kd)l_S(am0oN`%B#{$dzPt#rjl1L z)DR=%S%-Ct;ne7U+uKeAGg#W^{cRD6y}Q6aUKa6{^v&?yG;BFeg`v_o!2G5`CQBMi z&>r%Nf?Fexn9s3}96fZ6VBl(;k!9<<$m3n@gIku24wT}scW>RW$n)gQ|IB}h?mh8+ zh|mttddu;`DCJ*cRjS4;uX{~qbH6`!JhUXRUIc+lKDE^ed0$oGcGbDQPU@7GNaOyw zNl(|&B~qS-;Gi*MyC~#ks9*2cOaNXr*;DQZ<0d>~v&||ih_WiCi^idpBM~(B6FmO) z>?WO&gw066^ZU_=y0VLZ_J7X;;0f|aNz5)6 z>;9<6%a4wJ5F)8wA)<;*QrXrfahA&NRQQ@nOQYSn zxpNe2NYieqjxWCvV^;avaT9rs6^~M7soFSHj&oA=wC4s;H}8-0@`ooBqMnsiC=nm%4o6#C4Og}-2U+~BJh;+y-j_kR+F;n z@p^f}=ED}N`%VAR(@mzy(}{JY(=Mg!)6MhK$tof|`>c+~2XX)iR1y%)tqI`uKh(F^ ziJNnL=}~DGdW0czV;)7L1HU&hnC&H}MS5@1j_nfSh_yP+hSXy`p*6bogg43xm3 z9l5PZ|K|;H*t6U7T_*WtR<+D8;MhXNwEY9|a6U!ds-ck#e#^@DTW=1jP?G?`9K2l; z7tCiYZ#&-7XbADeKx{J`N#A_S!3=6^lA92#VT_9okNJRod%3qLAbJi!s|I_k-?5(u z1I?nsY%nQkUl9q)K(eKpQLFgEvRYmDx4=nPBUcX}Jt?9{*%biPEG$q0CgH}w+1dm~ zSVs`DOlC@$wI2s4q;X61TTIuLd`XdjKFqu9-|hYhY2F5_vFQSDU4{kq1~BehJbERL zv3m#`%v}|_lx7D@g2M!M)_3|(y0{a5x5Zmh%<_oK(tYC21&&ck1pyERlSvdd{WBs# z#Rk*iz&J+uT@(pV!qT9ykrxcY|0wggpjjLX8X-Fw+v<$WtzVuXVv^J|>tWK(Z6df6 zzFa^SBl(lcXNayE_2$a?CfmG{=%RDhbYYfV&;5BE3ewHaPiTV$H2BT4dr+@j;sxK? z_8?PbLXN}>*-Sx?KU5iq5>svsM}HYb7488L*+LPFO^&pKAtHY#joWsCmgZyhOYF5D?43z0`USDLGVA1GI<>ADugLfn?AD3BE)NTW z>KaSfm9%_bfhivjMLxy0o%E4B7=yT}VR*_#6oJL|S5=?o1YHWi%(<0fKCd}tJj5|O zAdzNH=fiYz!g~?oB? zNG^$aae9+QQsqx*w7_~?y7!ve2BoNOE9D<#F}p#w%XVYwKG`Q^9%|elE&J#nxRKGg zd_vLCZ#=4;#;QD3wofrpX9E>;g}`oSDd$fjAbzOaxy=x3M9Y0{8%;2~OihVLD4IL` zl^Xe3Ly0RU=2#@1HS1WUy7KlznCTK86QU*U6CBl5^K8Jnjaq4)@>yOm18tp-MF%SA zDj6e}zx@uJKIk8uce3h7Wkr#Qdd8rfvG>^Yhq3v`a;ifv6hb2waJ*9R_rBiI5Y z_+KY~<0vH;^hZm5Be=|pF!koE0*hQYsH+Nrb(`$tCMz`6WBlJoOJ(Nhq_Ok7gv~TI zCcItMCksN-0CU<0{?FAJ$`GJx5k>VUab(ibFdwQvd(~*ugJyd|nQcDRq9~g1gR^i*k>{RaC8hmYOqgQohgvK~L`BBkxpw_u z>|P8yb;f(9=qlA3@M2Y(dOxJx#t~(=#F<2u`5+nCr|ugU#4e(EatNpCAKbQ{z&B0> zxU}1ha41N7E77}ir^sS3VZgycL_2HZKH17GN+Si=LsRPH4z)iu7V z^(JS1D-#bs?twwtzky1w_}_|#?B5i-Ss3<+6nKBy!yBr4W48o-!CD}Lz+UySd8E_u zIvX59>fRFd!cP-izt)p+s#`y5FxXUosq$-65evKh82lAsOs#f{rS8oltC{FyhuoFn zAK}+k7-KR=OFnn~{B>4ELZ5E1kZrPuhe`|9qE^^7GHlY&2dO{t%x+P(>C}9UBN#xD zyfZ^_^($Ci{W%_FewfE@{BdZ}4WC2ynJ<-N$eX5wo42`UF^4w`ITE4yOX?U%YkV-j z?r)p&7wy3X3ts2PXdPp=*hy3?Gyzj(Gx@T%!Y_YC?DM=@|Y<#8{MCBG~vk*@QJO|}ZDvM;Ei zK@DFNKdwFKLTA&UU}DCcYg{va9*OeDXFtiUZo5z+v$&_`TAUu;S1WDX)*gvQG&sy< z0Jw4v$Z=mtpBzd2MRoX=jIEfpQ%@0il?z@Lrpl_XD%cv=^}5x0SM7&@o$v26I9gHl^O3ZhIfuZ;%Y(Tx!IzD%cu5rn zkZE7dhie)UCD`<|9*+ybu1hLxFvRGD?M3Zt8EHf>IDEtFy2h4_KKgyTleqBbjmCQO zeuR^1zB*{QcX&Fm|5e|^!Z`9WMX_<+89BqPa?CacwBJmu+ML2TLJ4xflU^JoYlD-{mNVG(CP!y1m#Ux7)Rt`M7MDWjBL{?Qz2}qg)^c21B-*P`0414cO2box znwDmHx<<|Wli{4*BZM=bBlKWSozU0WFYs1{{|zoGM^o`5w#p@nH3Hg?!l>w#GMUr3 zZQ>T1E0e{Z-7;Z03ESy3h~hq8VK`v5ytLBV+-Lo_ZZ|0vkkjP^Fod6_zpG>lxGG)WJl#yD)@R;-nOb_b!}B8txeWqBE!; zZqH#1`oO#khZ6=p0a4;c6C{WG`UEp<YQCOX^(dwb8FzAu)-0 z!>-2t&aVFX%h$2dWb;;7u$@b-0wk0Vt6+&dT25cMXRx?%1=`_E3@-`d=rrjjN z-uU*x3E&5SmgJ0I##BLQ<`LC{>acfoU)yBDxmsO3^vFGT#41bz73jb{RpH3kQozZe`vE4L+5x5>X3H%UuqTW(9&AM zwC)QQ{g&Ay9Ea{3j8aGnC8VrnQspwJ8s~h>WJqZD@@j8WIAFNjm`wcG2UBc+BOI>| z)&43B2T30Te=48?2MJ|QrnM7 z)%BM_Kn;YpJ#+VGCI*FE^;&O!{GQvekQKVhm^PapuaTFLB$(4|#Nfien_0+o2U^r9 zc(wA3AL6nW3$m25TddBZ>v0@EdmnN4~^cyXAEA5gi7Vj?OJPxUG z#@m`;QWpyIOh-qHdF@OzA{!CM;KxRFFKxl?^xA?r&yNYLnsOppV<>{Tf{$xFsn|AI zI|URHK3bJ}hZI9fS;Fc`toro2wWd=PZ+>jUin4s^>n&$av_;Yp@NpoI4*~#vntG@0 zG1li)n{>)-)0RWPb`rKc!JJ9RvvgI-U;w5PBf&liIaMNlt(o3O5N6!Sa8tIOkZd-I zHM(QF*t%E!a*fD=t|89`B_9p{%anG=pvZzl8kaEX4=%3;gYwK_#Wb$CE-aDqcw^G& z6JGHu#ntXWvca;)?JOTEsFA0vtyaLad4CQ6I_jgO7x;NZI3<#PlYP!j_KuvzR88~p zI)~tj>(Sxuao3Ozb&5Rgx`siG4LELz0)BSpyrsW;()RiGs_lFK7vTd}`Ko36ym)xJ zPnHya@+CiJ&5$E8O}4A8)BbrAn`$BSZkn8d|6bsGGQ0mU)p!TX{5DR-zCX&L_R_qM zvBceN*u8&N@`=eY=lajZR!xz6@aaY4@^5(oka*l@iLxY?a|3*wtiy4xsQ#-eqW4Ju zvp+;Y3!;iWMVh?B+KT(4`EuY|cN8OV?_BB8bB$-N+PU#y&SU4CL#?D6O@%7oa=yIz z0NS{0L?D@zmdGF#womETh3*nGnD|Z(g|0&s6^``Hc#8r~)$vxVEuX-0%{{-fjaT%> z6~=#){7cgFZe1WrCgjauivcI7y4gCt;>Cq~$x)4oJnhV8^8Py%bZM&Wpii$pkFE7` z_}(G6!)Q_cb`aPou>;!Q5-WRQ=V_Lx-DmNRC70oSyy|ZgWe15Kd~HCretfteFLqG( zAznfOC?qN{6G_Q(RU839nzOQyrgDUDwTRgvz`vW63UdM69|Zfl>DFV? z7yg(5uwH^M++p(vD_`;85%#QhNOq%{l;T4(4MhT=oTA_pH3BGi*D!YxFwBGYw>@yM-Cly7}2H`qklJbgvBVa7A z-MO%)@Nb{*WL}IkZb%tt{_#qTyGL)wNwMnr8~sFt@)$kG`zOvk1}uoGoccj3>Ftp_ zy@Aj0RE@d+sQOZ8AcgER7bEJRC+8+{?c*s@cE*)!-+nd4ye>#Kipvzr z8Z&EKkak@*=ByoZf|BPZ@%Q&5x@-OLOCAx@$gu7#g_YE~KwS~xR^vZ9mf^0JhqA!E zgS(Xc)oAm$mn(P8 zPIW7PeWS%TxOIH;0}$8_syGF#!472vd0oQn!rx4)+P|W(fg|X81YGvABWiEW&K82* zcD^N+zQvjrxh+GE+v>yEKkt~R9efBw1cj{H@czpI1)|(sSZNIqSsq*grx4_lD}?O^ z3p4xV+lBgio9%(vm{p80$uVSFfG@Libar2wn%7}ns92FQ~?A~Exe#8YFj*Qtn zUgmG6VyrZ4w${ewOe1J_Urn!8Werb z$NJSEViE%3r{rY!I^2}clF8UElUeAy)c!V&DpqAZ!gc-*>2z(aIUl9DHt6P(_dVn` zi~Qjv+*LN~z~6s@Q#_;Lnzz|L63>m5iv;>!CW4#R!zb-l^#1s4^$d|KL!CS-*D2ML zCf;<~yQJy&-*kL1X&JVIIea;W(0GvHy}=wiw;7wquNYSJ+cWXD5aC?;>~p%o_~LU9 z1m3R*@5jI~`xW+Y=wVvy5Ya(&75$jf?}uyZ6ECoHglA5SR^#}{VRpQTdpv^^G?H$e zo$wXiQfJ-!n#DGAKkVnZ8f|`c&p^H()R#iA^%|^MjKKF%KKus_9u6xS3|?rPfcXr( z+4T8~xtYi?$~uTn)z#!MInu!t3Nx61TFZXk{(6rQB{Ae$Acz&yll+;Y@=ayC(DMa4$##W^op;YR3bA&z$M|9qDk3nq38P)YN741rB~5u_RCeQ z0Np<2qr0>I2r|l&YD4R^&4a2a1P4q>e$?YP(}rl4lyYKzJTmLM_|`RyB)?9lJvSO@k5)vQag zy|bBo+i zc&C=zR9E4ccKN!#Rc=PLn)#`|%G=p;f5h3xu!;e}EA^@{UL z3*u9A5yKaIQ|jd419sR2A-R%6duM0Z3+vl&Tf$vFfuA7nFR4ZApHCDt!l43Zd_~-!D);ACH+c;kQY{dO>O4&~6-xm(71t)1P&0 zi_X@#iw_v)+o7LA4omLt+!XdwP|T~Ha^HK;-|r~BuQvhW{F;<~G)QIiR;Kp`@rQJE z?WYI9X8637^S5f_eiZK}#MLp6?aVTfhxg+9_T2ceE4pu`F9a{C+aZxDZ)ZV@&4u7s zT*B-p2S!EJlDw?0BBn9wiwA6z_p2sLc!LkHef~uowtEU_|7Dzn{p}|u^)~mh4~jMN zK243VUf2Q8c&vlSifa~C!Gyum5HG4nFcZ4N!qzlz7J29SF#N7N!T3hZ%i44#_!fhU z-hLgs)Gh+<5!BL*rqUkC)_ozn>Yj48wOmsbgKzXPiio5dMKc}+{O=koDPQ|15M~Pt zO>N(Eyw294pjdKVvM}4#^!r9PP@T^apiAtzJ1*rYRl#)ry$)!)5rDNkH(PtJ-!|$z0$u+~%@2SRU4YT$K;=2y>(T_w8kQrdLC4^e-sHjV zO-$V;b5%bCHt>V@yhLirn>NcY;ioG=gkA+50A2rWS0Ag44vf(h+y_4(A_*{~v|Dak z-XqPBHi8#Hf2qhNJ6QBSOkC$0ht3!;NVtY+_A7>PZ<1|A6QDQF_9+sa6$Z^5$uD znTj_$s45xOwmGL&o>46fktzcTW{*l)+?uu>*QPArxR*%tnqw3|4S%~M5b)(AKrU07 zjNO-BYsr;om7=8LdD z-56M*b&SUSMWo!GWBpz+_bq0*PKN?(>wy~h8%4(>XvybD=!Xb62PlYzKCGK|3%X?> zCyokENMyP%Y)j!@@(BBeX;w)Q#KFTsSsBHg!o)kpf|uLc)P|8biX&`O{B=TbCyq66T%s#HJU!E>@Et%jb~!i@ zb7qtB+&Z<0w2gR)&q11}b?$0>rb}ZE9`1EcowvI0Z{SI|bV%C?<}~Yc>GvO<3HAX? zWGHHGOLo9MeW^{b)-?RnL0BjNddI!Mi*pg~s<6`6Sz;$aG$tGq+ec0kMarkARc;X8 z&F@ZR6yjx?($;;y_AI)7FVNC^Cm12-@+vIJ|9|Eb{gA(i0ZlTmakJ+i-EETyxY-`8 zPfVC!4Q;H#*M5Q4CVQ|hZl7|f7T5j5O9f&JA6P+;#HLO3<8e2ZT1DHaRpBb`0pi74 z1Y1(QC7Mo9!K1s4A;kCRSM{L~@K=`WUBKXyWqcO_;^01oJ#X`{_2q7P&e{3iWu^e<%NNG(q@U0|I-AwcRwD<5cho%0Zu+%aHrJ{wW_BnHX~#ETD6Y9J z7G%)dUpc80ND@dSRynK}DsII-xOj<$S{XKd%7omx#2<7N{>gNxD_n63nQpclUyJgUQOCX7A_jNhU<;3#SP?G0@%Lx8rM zAI!RKGSDiPFKn87XuEUWjmT~Qr$`zM?Hd{$GnSqTeKzG%xTW)!z_kM#ByqR%nnT}j zZ_3w!rLR!JBf*iFDZk%`sDk|CdsF0MP-U}Y!MAZSGy)uJoLb@IQ3?jcQk?PG6LFi+ zK^~1endt7k&A(JnBSj9!Knt)BJ2CSOHf*6Wq$d>IqlOHKdmFNNwBU!kn`VUSFU*0- z!ejfjXPBF+K3z`h&oXzh)mJ@GKvz4|LeGo4Lok8CK^;uY!jV`EN~xsWIT>}&6@8fB(Y#hHp%D=TxL(VcU67F)-axR+3jrjf-q8H~FEi&~>x@Dobi zCI3ZEEbQ~baA8}d(ZOk#jaM_|4<`>0-E@Jl!>HbU zCxh_I2-=hr*0mRnSX!FDq$4{&-$uhYZ028mhTtgl`G^>ZPzXL{w&E;ZiNU-;Vym;9 z{1&Uz#+Sr4ea8eaC8`7j{ENsfkDx1CBn`IpA^Xu+2}H6x695CDc4~xkoz8dmbr6`E zLTf_$?~ZSv%KyuVs1a$9Kghe!v!!+EDZa*80Fl1>S3=~J_TJ(9-LH<5FVzVFjx=kqG$ELQ)!} zzH0lTAb+IiNRkA3(lK!+Lq2OpJ#JoNW`fUX zmiDHszV4k(+#%_;y**qgp;59>MLT?k+~|e39oTEcYdX)Rrhh!%P=)Uvf<`DF&YeC! z$7cPnp3AYZKU^I-NtiHzaZ*>Cdsx$A!S3}L&;CqPe{SxuyIdI)O;WOR5VFub#Wg&> z_V(PcJ2d9_lI!>Hu-OTDKTAQzhx4;hwH9Yx!^;&B&O+r0@|vo2w-a39%68XunM0bM zn0f?!k7zOhy~B4JGM6UDOQm^R)bIGN{@;8wE+R&<@gIkhVXF7?Y|9h>n0LmYM%>AuF@anr38#1 z4k$+oh(~a_DBb+7?y=S@jflsf@G^{|t@fgq@Hx5M1dV8Rw8`g)*DTc6cCCsl?Jp58 z#bEp{n?Og$%xxjq6*oI~RD3xYp_*DUt@82P@zuW?kJ^sr$Qq~Pg|M>076Da<^E`;th6_wiiR%mPhZjvO4z3@ zw?yH!N24w9HleBb)Z>S5cmy^5zZR!1(rDy9)t`=yNa4g|H#Yuw^xSsWJ^J58EowTT z9q1*RiVeFC(8GJdv7t_{oZx+Zx|@6r)F4ZrV>F!nx+RswLIq~w;h>`Cq1m!w?oE|3 zDW-jxo@@31l?*6*=`B&^iW{rT)wdD>Ig~RvqMBQG* zJt4>01P0Ox+dZ8_5kBs*Pu+lY2scK=c2)mX;Ge?lEhv#FCUkBZ0s5DBJtaOZ<`^?% zEZv0{AWVh(GonB`8&(Qz#y-|tPFB^rEC^pf8Fi~Z<*YZa1OjeUpWAK{X;USWa_o`C zs6oTbET|0LZWH}HIkX@O=jgaUbQ9q5?vdnrx_v@#M=`fRifo8``QdmQuJs07L2@xd zAUVp8SD1YyF|gp#A)nVj5%AZJ>aDbge(brehbVG*9?Y(khTk7_IlUaE3OlujP`90* zGL6@oWivp~+1gaNZ44?$AvfShhQ*ePzfhfTWC1-Y%psUW%veI_+0p3AaoE>lm{L2dqg$ z(muZrB6$j=tv&$ZbqF#0<1{1t`^#nTaB6*QJB9bBnh8h~7E@^q z{s=E;#MAhI``l?i87kMd^KejR23&}+EuEz_2?t5z9K*wIkR+f761uu8lyU+wd)6o} zq`f$g$M*&?0%yX@BJ@`(@?kGPto-4s5jtN!whpZ_8(s1wGdW%Sv zt{A*lj#nnqxlZUL_}z%@Qk$1OE*@dZJJO$Dia!appH2%&1NxBrtzC4S^I;-AO z!7|9K2r6uQiF}K}!*4MfyuUqs_bYSAiwGrWd$Z{zG8 z(gT&Tj#rm|M1QOX+}r6G_eI4Q$a{a8wBL2l{P(8yM8!_$mHq~3*r8zApXO2?+#^?`L^O9zt(b4{hj7u<&6+7TT%;Y5jdmc(sbZoc@=BZqJY^$>Qf4F7qj^5s(JyZjest?(XhxP`bOjdr{xTzV{yQ^Ng|Yz2Eom z_X|aaW39z?&U4PA=2WHMr1FqGC8twl5|>`%qV0r7wfYV~F>P)R{NlFA_K;0UhSf$_ zmjy--!>g&WEXA6_k^Afvhm`r6%~c7q`#7O%#g!xJWdDQS;C<^Y2H56)@z3a=my$Wk zh7%4h^n>((^J+y5)tP6m!2$n1unE9lOt=lL?~fqsmx%VV!c`0R{Ic-Re$(Sad~?siA1;Bu!S+`Sn~&*jv|PFa9Q4_^in31VF{ zc)!QLYV!VZlx*{7YdIZ9V^1qP< zqYavc7q(D|xYeCBF9I=+F1DB1JIQ!{GuAD>pGdvAGCj>-%#x5>8u5Y?ZgGB7t@icT z>r}qJ5P}@I?!MNiFZ61qg>rr`2y0aj9(dVJCWeWcuj#(I-&}r(5RI5ubf#m-P2Xj% zvPC%Qzz)L3j&@SMo8{w!@khiFi6R}5Xu51-8QnwaW-v=y-4sv+KB3I`90-@|sE@>R zDSaP~&k0Ww$PGtn9T|4IFx|PZjVnE$H+^Qy`}Smdvr}uRUv_PJ{AkpR#L`9^lTqM5 zarq2^a7A}EWZkF%ID^*NW zdLJs%f=?F_(Nf2JjqXK3kLnw}Z*rcLs-aK|F-yd5`9ZYx(Kb`?Pak9)q9}hLgW=0{*h!W3@g2 zNa(h>cNt@0;L8nvw%3g~vNr)Hn~rZlq{NSOl>a2B(e&-Fk1}Wtx zGvV6g8uekW)ptvK1|bpN2T!>U1=hsr0fm83LmZIvh3>weyu1KkaJU>Aj%>C3Q?H_A zTGvn$h;^WI!QwYjzhIucYxgoS?#-QSIl+qm>&gw0oX@db;a9Bm9UK*X{mRN~O^L=svW&k26K+8@oxI&{8ZBoP4k>&*SB zLa8M7R_+&S@%x{|95|tJy?biD=E?PkH7beCKRhQ96h82DWAkE-i;Z?^<9%0Q5WU+~ z?lrpcm!5m8hhJ0-9gjj#pWAs@eXiQLN;BX0!yCfMIC>TO0~ar-uM!p`Izz{Yf<>mca^xq4XF-DEKsRBjvPUToQW`*i;8 zg~+iBH9(-=_H}eP%Y^&#fM}&CK;7)^FN#n|@t#4cmqz#}Gd8pn)ekd3KQ~#Tp`nIh zKb8B5r^S~-d%wnV1q_(gGDd{(!{P?Dn(QO34Tg60blk3Djt#?!??i9PYD}jt>rxoG zQB@2wPaPsI8bGnC48v_0# z9OMm_StK22o#~2D@Sh57yA}SCebdgW@@(qOpGN36tn>JG`wMkx7B-HYgdFzOI*WP` zY$esxGRDeNdcht($Bw%l(~&WepD4&ooo$xE=w^R$L;Oeqw#BPuvM3y~d}d>LDJ@E5 zn^%c|zwg~`I@}sC2~UieIo>mB)|cC!OpB3gik2UEpY;Na&t|hnY_m?Ag%6=x=s2mP zr5dT9%k^$XT3)^7sTZnxqhGU=rJ^2uno)UpWmBBCGnc-thWgo@VOr5nH$Go)`^Cnb zq07JvP&qN*DR{B?hMK8DK-@Fkel{R7O3W=^p~jlpc(Fc`6m6c2)~`6ZEAX#^mf`KE zzgR!kXj|@X(fM|+H1#B$_Y1IZ)M)i&1?aT99!?)`xGge~<*hK(=^Z9_$If&2VJJ)p zM%&Vk<*Z7zno#E_*#8exJ%7hxrmnL;U69qsa@_}OPh4d#x^b$?XAVK(WTKZ z3V6q2QFp|a{K<)+Exu9ooapEg2Q0h!7U)D?I-FQw#J(WStFU|9oE6%2jlo6}@aW1} zPcmx>i)FRKF=E~wWXp77N31`H#3n9C@=pxf-2H$Ze8~+U&p@-+!zOXhD@@1%^f+e* z^@gtpQScyFM@P>$seqN|4PMI5f5e?lpy@z@p^56mZvlZ6VPPWr^jc3dW3*>~VG^d~ zngJ8E_I0B9VHXc{EWQ6I{rn5$>?BlNEsEG4(hk!{j?{_#9*bPjWU4WAZYs0f=zzzi z_I%w*gw%ty!=U{@+`Q)fL%6_G{)Vp$MG$o@JU!GaZ$`oi_+o*vSX1gZO~%^kvlu>Q z46_pnkKcO0c+HQGH!D&v1~;??P`(xOtT_3mSDcETTCN2!VeSQCyQ_mv{LLLN=*!G7 zgg=1nzk(`4AJAjV)G4yWLnB{oobP1uPwPY6zRO*RiDrC!>^AJzdf8tfYTjf$#b&_CnLkSZZZM5F9eq`|q#9M?H z#zwPhCNZHKvmC(gu3s#1?USmhdw1)vZp7E!PjwElqH);;1{bap&h%cTV(=@4Fci5|A59e37A&1TEF!K?HSp7kTH>r{HG`o!R!~gYP2$Oj}IQm@j}>WF}ncJZRl7M zjiJFBuee^+K>Uv~p)vzsd`OAxrloKm++J7k!dV%Erth>Z5_OrU>`I5L(c98rF$HLf;fZHYXaA$VX3T0fO)T;UUC9@sC)iCS}RH0>y?}P81q=N}`-;t1PRz@Ga1gmlh;pC6O*QO6{@opJW1B z>zMDqHTJ8u56RX05l>nG{BHcNBepM`hzj5-%{{d`O6x2Za(&B1U@19xW>d}A*v4?1 zxaNEx$A`6-mpA8%)a0BVD*7%hPJ>C`2W!`X&CLzp@w;1B?G z0<~vI|BN5L4?k(!2iZw^umV6S8PC6_Q-S^x?GJ9H7wAK8rmyrC#=;>(g9w1G^fi*324z2 z^kU(N*PtwW!8E@d#-nQ=#?qB?q0&|e{m~NcS*Y5wyd6JDQj;*_5+ATpNh|d+5(+l% zH0WqEatr_!9x`!BiA|>q8DBJW!wS%xv-w9X1T&%3)qu%>(P2e}Hgwf!rc|pD2v4o$MBYrX7LAC%_zBe1CP~R-qRYyVdPg)tu#Qr={02-NKkdA!xSKA+ zE%RDzlI8UR{`^DDj5gLHk<^tQ_}O9i#p7&+-V^Vzjl)GO<6zRot7j`0nA8=CBL3Cf zBw(G`s>@aJ?wu^?Q3%F&Nluj(WcYA&Pdfl6YYw&g#h4vhEq6H|o{`qB&ISPWP%3k6 z9&LodhnFr*X&)R?uWv&of4u{vI~Mm`w5p#)J=t0`le(Tl5wK6*Ky?6CLjwSMd9tlo z{z0gbj-n)>DWiLJ20lJ0yTNqI4fa!(=q`F{nvG8L<6r=~ufokEV@o|Kzyu;rk zYZ6HmAzn`xE^`(A_buz<2P#g>Jh$R@XgNqEzfa2fD$9Su@P7cgLWM*rZ#f;RFu!P8 zOM&MAG``%MP?O0Q9xetSGt>KKWd%#HUHh1G)wc^3Rzsx-*c!6ojsTrgRMz?>&fJ9< zC1$XsHDg>*-tK^rV`!~QLKtaCIEOD~1IYB25&6l`<*51*Bp*V974U$x=#=+xJ~Z&r zd?%VlTX3^w@7-M0jn6BLU?t6a{Ma&3c6TTG2O#S*?z$k}TtW~21$$mB?WWbqZ%hZD zk`fZXcpd|TEh57CpVY~Jb_L_*y*ddw9S>ms+D<-KZ~RNCI-XRdNyye=e@3bDxgAQv zt6pn0lw*5L7{El(_Dw5BI)+w$J6#MK1R?qkg1z~8p7tzUD*TM*uWb+T%$=*@d{5Eo z-EP-fUzO!qICW6zECYT;iymIhHb3`ORO+AH&X@6mA7cohI%^;9E(Us}hijhOI|5Fg zB~Pd{v6>S*H>eH?Gr?4+{s`#yJ_e&Aa7urGxj5^*Ih6WOnLH_#L)#;`yEfbzS8=4d zER1yOQ6ita2g2zK)8+qWYHnV}h$m)a8)_=-WXe5KqFN8GX>oY*+WI#-&!XLombI`G z+tZJg)`$_G)q_sAU!WCp>pycl{Y@t=aoaFIS8qG+#do*1dITV$!f?mRdbkF8`&k68 z4_1v(GEqCqcdci=rJkl8v#%)qB!%-$19RmTMeELF-f#0{ z9GA&o1ztPy0>JsyocS^qleu%6;r*>Dh1AI%^Zgq`8TMzeb)${b=CJ|sre={xxHxOw z6NkN7?c#Rbnsk3Zi;y#KIM~6JAS|5AQdPTMvqpKZa+~=-3oE|AT)6~;n5dF8~B=8B1Xk&V*}ku4?DDMUMc=L^P0KAg2Ar3mc4g$7|wt zf8_9@2D>}!Bh#8Vefy%?6qWhO9iEG>;CdF@W6?wL3q`a4)o4SK1`E{4@W`hXR%D!t@#NX?wbbN#)F zRM6zbZOFEeqLu;+dpl0=MmXj!1Y@NR?1#!a?~LcK`Bn-j$OeR8r8WVQx%{30tw#G_ zfX>^d`J>}*)8Y7#{%HD>^>4PBUCw{P#H;ve@EdWSXWxPirF*1RZgoW-*l|o_!LZ!+ zW}p~))o38FMAKxlI0c-;tbs}HrB4}g`1pF~d_ceC%!ScryO~^F3wq6;%zbo2zWBQ1 z|Hk0VDuCHf4()0jP#6e3fYTNC40CHwjcOYRfs2rZ9gf;x&|V}xuLR(W z9lTUZ%-CgjK;MD^A|PYcOYO&oRu?9Qo^-pvHk7`59}UZz)J_#19j#?~81)iHGpC|^ z2YeEU!sH3}Z~kmybt3?$)l{`40toeva9DPe06Zm>fZW-|TL+^^?(bdM=w&aD+f1x( zizGRU*U>`V00I4?8sQ;e=KPyR3n>$Xk=(oj+PY-v|H)o(-Hvzwhg|Ic^*Z~y%7-q-(YjNf`P5Q|{7ELcqX_Xp^|eIngNK-R`aiI_Us zf4WEi^CuEu1Ed-DI>A&p|J%3vfAvjt%a`Y_4p-Oh|MgS&KR-|Y=f#4v^QL=0^1r?9 zzkO={fBArnDVIsE`BSTvGHe2Y2N6Jh41Pzq5hW;#s9Y>B^oRT4z0*yL!&bYO56f>J zD%K$z5z@WcjFinbzBb-#N4h%z()f_69=jIVo?w_t_fjOWXyp03{;K8a}N*qUPZ_XXTOb%zUWa?2hba>{qz+}1$iQEBtJw9_pRO5PvYb>88BpKV*9 zcn-Xrfm*z}K1dXpr%j!O4lv(t4m$-FYt?}*+8*s2o*EA}x74&F0e@+VJ;^URALaeN z=4+T329@ZSwB?s-h)ViEOOYr5L|srYQTot3EtJ$rx4PbK)~$H_0*rY2@&#vVA$uRr zl4pT3mdb$tSF#bM>e0q@d4&Z}U8N(F9^%_^PUe<={QEy~0%@8*o}5g;DpXQoM-a&;NA}PM;68;Nx+`rdul82!w2*;Ba8Yup`pHsSgMR zV*W&lvO&f4Bd|utxBkO59AQI?M>w6m1=R1m9;&JQ%Qv7K0n>GRgNCa zl;vFRjgO%TJka*5PANTyX0KzmLIxR>OCO4=f2@6jtoooST1hDxjxEo%$l$)GpzwlR z2TP^+Rny_%YUq8-+ylZyA}QwWb|uqX0iZdl9O^wzGB}By(R{vMJL*#~4oHdzt5ycu z)-Nb#Z@niNwsK-$g+X6xV?#~xFrK9#ee#c3IfG3P5>CG1$T6!mAe4e_I3n5cEzK%2 z9?#AgjA1}r*VZ0Pq()BRw~0k1}d`J|p3|hoyS7GV@VLX6}HJ^MH+B$+hN7U_2(x^4~ITp4^36Mei8H zgR`m(Jw`zT8-ONX9th|XyHgt{^LPq35fvLVOmhR9&)3RePw#afIV=h-HQMHWe3lde z1Tz^xbZrk^{XtFiIgZIxu@rD;=9INU5?`Ybi+(Va&06tYQOrFoDm9SZdv!2VLUF4q zb(L0Ywk+weWQSK{HkSxQ-V23)9|8Y*Czl}>jGK4Kn6v+&XiW?gS?qN2{c_pmWMbT4 zNRb$?&Z_-SoxJ;G?GcZ!+_*r`P>JELU9oLa@YP4l>y1du&~!X)$JD2X3pskl>f2Aq zeYGV?d#EgZx~chUZ9-p(DVMJ6-Gx;!3Lpxis<$%VgKd`*CU#b4P`wTY&}31gN!@QL z5?R~`m)vfa#Lpcuiq1`^LRKQVe{?z@#D22fNpTn~wm(`Fa|50L<=SLas&)is=#W=` z|3w#p8Hx=h>|jG;2b_}cd{tOkPOj*+2rWaW7&v_R`gyj-#$WkUv+Z6bd8TT8#|is+ zU0wjzfRvzLX1{se(z^gHk8zJu&A|Rop1U=l(Frn^k+z0~eEdfNI!b?jrt#CF^Fc|a zCV4VXsh#5LhP3&BiQt! zKCj=TyBs&rC}3ZnP>A?{rZ>cOx-%f;EN?p`+73u_hY;8uPR=<>iD*eYr!u<}Z{e7T zeitFyU)uSkfnycIqf)4xo4>E%>t`@awB#}hPz8qIAnddEOiZay*w2E3VhQuLrfIar z6M>C(2l7eNCHfJ7Bx!uocp}-mNn3_aM~Lxf+QUABf`kt>6o7nr#1xK~`pK@nihP$| zYcv+*e05BRn2$yCDYqv$F8h=u=lBqKA~fp1@*50Y2f&K9L`OvJ)`!B_xB8uDl~O}Z zov81zxba~EHnSR==}3<&6Mr1z{Qi3`GY*o+ZRb`KZFxZr-*poIA-4U(=Hl} zo>8vi0Q@2>FBd{@AL-Ubtuf`{(MUymf^Z`ZJQ^e`0v1JzR2d80TN+tQ=#+BDz5Rki zY|HGD)T^BR7U#JdqLo63T2Rphzbw*14snE(-)n8-_;?o1`;jDkU|T8FN>GZb$AL{7 z&yi7(wUzu#!_D0r_0a^(q1Wp%er_Q126GQhxQFP7SK?2yZ4r5JryxR^LX~R#xY3_v zE~De*IdSWiH2zOoYx|f_uFVCvi=88jAGT4 z#>*p)6~ZjbqcuU>! zg9!f66?scSO9r}drDEKi2`sg!6S&mnbAoW*R3_~Q z+rv&u&ZG>6w>?s!?-`nc+*d;(uEDJlSXv!FTg5`F2`BzTo z1B+3dHZUvSE0+=}!N6i29N+ioRbPXQ;wOm+IhbORaNfWD6i z;8ZTK#%cVq=G~mT5d=323|M0)z=p*#6=1v4@h-7*1I&V*gG*KuZ#bG(4?~sI*(>S>O_x{C2z0w2}cBd!(R&O?GLS zjaT~zJyJM*KyCO2{!;!m^=*2s2?^y|ZV=HYYCp+6tM?AaG`qKFSmHWzQux?VQx?SK z7mU$lb|nb;LcP*;4V=_gaz{=q9W2Ts!2dCS=jFZtGb2|Haf0l*egxvXV2**XN}us; zDJqX940$RQ2k~v!RU$7*0S%Wk7|Y3>1we&24#`zY$#wea zDC|*m_5Rgw{M$kRe3N|qZFXo>xy-%Te0FH;d36g zAqMSHqD+#f{)r0@LpD%&|8ei9*li!Fi!ANTk~B~p&Wjbd>Y6`S>Sq*xCe40?#uCsF zJurA#2*h-Of5*@ZwL3DI%Vso^O5&xtu@GY}^;w0ehe_hd|90L zj;!ZA{Zi*l4C>pP$oYjdng@0hvD=^ig_KW@Ap8a=7t474r4_`(>Cb7v$iPG5mfCqk;uY?*c-4#@9pkryg4*K~JL z^M1?z^|0`BQJ57eyqLCo!cWvU1wVWHO48Lc+7S=NvYO<#3(3QQm>*fPqXnAqDGApH z>%nBYUvJ}aXK)Q!*!BM;2QK4uPXhjGg$^JdNy#6THIW3$Zr%4RTHtwNvhkA!y;dc* zN_e3-u-Gr_4jH%bkP>oK^C&*3_JhGS1)w#}lcuY!r|2`}n*)KEAxiE2S-Fxgbv2>; z4k+i&BikrK2v5KT8Tz#D=NWW|3Ch*v_2>vN1ml_G91(3ghza_w7tO7(^x53MfwpGP z$aOy73}nXnn&;U5?&g;q(({SuRx^At8IH}y6U;poc-WQ*i;5*|ZF%iEM>y)^(N6u@dQED9vQ6>RYy^m(vgUOBE#w-qCV4bu42b40f< z$^L!>xB99O2JH07sTkiLk!h#8P>Km&EdYI4Kvb4HHq;eS26>zH7JZOTg*PAz{&W*y zH5KN$Uw5qRVte08+V|%2Q`^XSJE*@3c|E}mwrphDVz%f&`$14(qz>#MA)G4Y1}&hM zG-Lnb-Wb)O`YA&z9vT$WTHe|7aMaWGv9H6DS4O+hR_bY~4|y6A>=NnhG*hVQ8uoui zhJSm6Ewo|dJ)KFW-AWSK3f%6|y|SbxzDLmpEsRQ0pR_#l$AE4MD^!?q&E=9gDFT>} z-Uk}U8yl^(lJGJ0tKD`rvPNV%UmoJ9+nHY#;zT|8^-QLLo@D#s=`Gab3mI!Ue|kl; z+xC+?pY`BvvBhxU>HrZUCJm#c+0B3$CiC^xf&UU2?zu!W8|&D0{p=s2?sh$I`RRHo zr91d1x?eN{nR6Jkg1ij}rA?TO<#z*WOHFYnOO_|mj=vN6l7jF+?{Kw9*-pFcrvSZd zh0229eSu~Hrq^N)3hY)2k3$e=7h@o?N|AB~ke4eDEayaFBf>EpUCq#i#T83t#*z=l zVUUJ_zl=K*Iu8jdG|sv!0JM-Hd8c&B#BNlS~#yoE+*PLg6tc8c~yuVA_z1O%qy+}J(njSDRJ&Qu-{mlU5qOFC-E?1ApdI{*>pqR zAsL{Fyn=(uAln@x{eL_S+BJNznPz7EjWH~V4xCu*O|;Tve;i?c(iMKK04OfE zoI*C#%WJ3-9B{6DuHP_FBMb*kz9zQC)Kw1MFNK{<$3H+Xg$1>f3 z7*Q8iw=yzdBzRkABjGsNKtR=Y#6j2UJ;O?+SrA42cFm#FH_(?m{)0>I+e~`aABw%? zZ8st3?R5kk_91Vzy*8n`@LSC9?dR-;dfV&OYQESxeR2bwGCm(&XVC`%i4!;V1_`E~ zVcs9DuKr1zo#EIG+JIAXLA-t)&%qcpjMUJ6QZ!}@`{T(-dZf;3#RW0tkWWBvOm1YB zxIe~Ov|PE}0XzL$gz;UfazLsuak@se$#TrX-B?CG&QFafbe?_t>ds{~o#L_so-AL< ztFP^Wklxj_8$l@5dGu6FUj1@}LKtywu`Rlit9LspXAu7;=hS=TG~e-ToqEj& z+QtAFcoLL_!27|{78{;1mR_v10wy;+CkU7Z;%DQ6OnS?LM6(rCUg?D<2HFQ5^rhT* z`Y&^*%Di?Pi>inMk(+!Sko7Uz-XXFJ*y!XdAC`|m_O&iZ&wR0bInr}K=XltyeedS#qeZ?u?&jy^GP3OR%5#kJEJ+AJ zD(g!&wdw`aJ@lz(^I|B9S_ar5&OHwAO&0v|s9cqwgOy}dAW=H@J>7*NbfLl< z*Z(#FV~Bbp=3~Hq$cZu;P3NyM8`JzgQ)ytz{o^#9S2Y_m&l=zhLCOu%Y(C09<+ojr z4^B2+2&m44@?M21O-#BSGi7rzQZ(t{1YQMn@AEJ3LInyP(7P$zBDo};bnuYJ7!x_F zhGA%Ski1E8i(@`B23V{#ex~hOrjSCF|2Xl9u61>>P^E~P1-8%qLX5a9w;vI_CX{Zp zQ0k^0F<13{90;VvwFf^#iFE_HPWySVUb)0qds80{lb^a|=M@Yl5c6m0fuwYWC_$)k zjXDnJ(R**&l3`OGdE^B8)F(dEY2r>w(*F1&SPBK5;hC;Ab(;19Q!uP@5qrijqfu3y z0;!z5){J_@^~%b3p+s1?o<}IYfY7mM$gi+OgMAL}mq~=YpX;g@Ji--{9|aE#Q@cdM zeo{Vi3Q9rg#(k8$72B`8hg!JojT#FK+_yc(N_p#}W9hd!l80eXJz^=vM(-l<=nRcD<8RBK{S+bU)S}-gpK=|QZY^qysL=fTio2U={?OWY4 z7;ertQ8Qs=0^l+ZGh*C7`|v;18z{lojAmQQO%4I4q0TTBC_m#6fGZ@4w$YjH74pp0 zK^s$95r65g&sPB^$TG1n%X#4qmu&bo2sn7#chI!*&fSGjzKZ9C525nVOfpkcT(!6`wrxaGH2!o4R z@edTw_qX@U<=6U5!OZ-lA(>4b(y;_+>XVeYE z47xn(6-@3!MmfERzPqBf)l8mmoHLX^8leko@JJ7FPje}r_pl!30M0rtb|6H-eC(jk z*`i8G@G3DZ?#IkReFh|9qUAH+5d4|X;kkFXe6u=>hJXa0hD4>#I-Cq>q;62|<3`p~ z7mu+VnJ;zy%j6qC z2*w13koWE_Fu~J-uqhzOozN)K9wa<=lx7OyaNfGY87PHDAo>Bfr4AhNawWZgynwM%AisxY5t{-756BEHy?NRidC25 zNB`>DH`8&sY;K*MKO~5^otK>CGvAOuPr~&(y4{P(_*Fzf&j7^isE_vp0*uugjo}XE zIczALLJB~C!aRum7fhk`v&VnM-jfr&0K5KlfMkG_~{uJn72N*p3$4A#KjKe8N2buex=LY99O8dM1W zoyZ<}WBP7o*A2@g!e)Cohs|_G;Ve^3Y{l|>goDECXgJ(##@mtw;_z*BlaeX}cQ9Fg z8>Lt<4;)J=(D+mtxcH{4BYcD2|5hwtsKpo%a)VI$edO43wC(9CN>+r-=SOBBIhv0s ziip7e{M8#4a7wI6_M82gEFM?|lH6LynHaM&?G}@KCOzaui={@)T6f&Fsbr4j-Fz(U#bO$!8RU<`C+?MUL|y=?OWsb_y#nj*Pf`K1fZXuIqo3uAyz~4Gr&dM7LpKK1645-q( zM`fWQNgqKwjN2cSMBD3pdOYD%)H5 z>iZQ^S6m904SP>dpG*y3^VjU{g|aq&F}s5)j-V59L9zBUcWw!#yx-|JzCrBUzZ-== ze<{d_rTsiFpV;FX`C(^8x?cQUE=+{u(WKNTp@G3VLHrhe7VS^y^dq!n+qVZz7CUyuXY2jRP8Yk{EF5)4bIC!?50xRP0>n?8!;876>mA^X z{pa5WmUOx^OwPjSUmhWqth(W3?)PyCvap!5K)=w9EQSWGge^hoF`!+&-p6r1qYMD) z_Vxs)1!+t#q|H2KjHF39ElY>p{XC$_mL?6{(GAz?M2L-oojfpK9KBDNF}zE6k4TM+ z3HL;@ajpf2xd-y5q*~eujP0LD3b{g)z?bPIt=RkBzsyR$UtIb*x>qDF@gBTygr zvfS>wIUg;#?>Eq@O?Q~|_e{(es$)kA5pKnANCUAqw6IoR{U^l!AOBWQ{$m0_7#X4> zxu*xf1jh|iaD~}g{2qJsp<1p{swYnr#)yHl7O=|b1z)Po#Qf37Sk9pbtb^qhVH;3k zY2W8}0gFncQ{e8PzpFy^a9#PkMSz0hXsH7eIZvi0JE7Jc4rhC2p|Q@+`W(z6QCQQb zVs{!94S_7q%|}=|%YQ-)?<*5Hk)Wv&^vVIZ!x(^WZB^hbN^;ic#d#mul3BPiKG^6%1gYa2DD|l3AAIXW5G0!x_WV zdQMO#l-3s@bQF$O2>V?D!oL`S4C2?`sIqDma0zd+*_VpveOt>tF8dWs#)}(B#VkMw z)^*?~-!I0C=Q*s4!1?fP++Nu?kdF%t*I+vP_{%66cNdpYlUx!xVei2YSvgC*gjw-u zvfvlph?3mE1K~#_Ng8l!bf5Ez*-C{{{NRtj2Jc$dWN^^s0km0kJ^dq_P1 zof5vU&Aa+$um7jR2VS8fQ%oNd@VB7P7g?Abd{3Sjh&CD0N3Rr`?u;*zrTJ=x1zV0^ zu>}OO6?-kYh@c{oU%{hQ)Be%GB)rUhJ1!D5ERB?ntyjY;gXO!68qrcT#rx7Z zN8Gk3rfu}8gg$mPN^Ghcp-BPo<7a@7+yK*lu&_SZGqu6a2kbh5McY>j)q`IX{f7KWsYzjQ!`EqJS+EgenDP1p&puu=!ntY413Vnn? z+u-=M%TTO2?0Pz0)j1=&+!Rf8e^CHll5Nb=3tFl#wC%rl@pHvF0&&(AXpyR=manQi z;f@F{iY@l}?N+2Mm2_OssM$;wMZ+m4vmWm&brCrq5XReVSutw8#Rb%IN1hu&_}y`% zRu%jAg{gM$6;P+19xloK@e+2Dc{Hg| zFIT-#e7MiB|cBtm)*XEqzl{MCZS&R~oTrodP&x9xs% z!07j>cb9%{4CAQda0#fnaTwjE`9)s!lcjF(KCr<*)M@+JB8VzDb7iMsCch{zD9t3d z+F56m#_mjVT-ClBCtL4OCTOnpOSe0)6%Z=j(??C=MO7IcPWUj<6O30N+%p&$H9gQ7 z6d!n9r9`{cIbEUynzR$q$U2Cuu)BVIZq}j2brXBBmUt&kPA&f9Y+oHM z_-oT?s1`m6uVLSqvWp3iEySg{`?|wS;0^nr%iDEU(^El&^1xc1wNu}r3mq{i1@GZ< z!ER(TC9ZJE;niF*T^1-v)*}$!HUJ(H?G{^{0j@kaWd}Q*9EtE_<$`Z;P795Y(oP|= zm5g-vyK@6Ox`CCukKPo6lbHrGpV?!WkELYQ&$md}>I$4@25Cm`b4ue2#=9Iph=piS zN@c9`EvVk9`OUQXgaiupvhc|j>0NccLipXtT}uQtg#=o9_HATTw)izv?25d(rJPn@ zik&)ztO?xepcFQfOYSJ7s*~1x?fi%l4(;@cq?4AYt1ir&Gd+Ooyae)Y2Jiuu2%3u( zOn27HZjsi9jDTqi^TD#`^{jl|i`1E?oPN>m>%w!DjjR-p%+9?P)mC33CJ1NleF6Ar zq|^h}>A1r58RABL#o2$`cZKCV(4i^dm`kUSsh&wc$DqBq&`R|{&4eS#7rqZNE;NmN zYEA8Zv)H60&}HYB^7Njp8|^#_ualEcW5d}xo-pZZ2Q4O?5Z-P@R?FtT_5v}PxsGVz zp_U=~cc3R>cbhHIufe^QzD+$5Yi9RRo;ctK5Yd5OZD+@Al!11)m)l%civ}e z9vhY!{uMogYq@4?4T;GDDSBjoF^`Ry9U6wUz_r@D*L%u@(yh+N|+yn zHV?1G9r)spD(I6)SAivNm(E?i5IQH~Y!dR!2}%O&Bi!LM!+hFrUe5r#frS*uV>~dP zqYK;I>~taN_fds8-{f1cb=Bt@#5zAWGqEr~%lOvva7N~g=mHd29y9nB=PAMkIL$mb z?rfMS%Y$h~gGT(>4CfI3>?Ad4`t^Kg4`9DpfoYw5OOcKCg79eL)DUr?9uI3CrHt#t zK%ej#TQ;ouA2~W2QUQwjlY`JMTx=nJIK-(U99nhH`QW1}GUr{22EW?*_0K)wuuxNl zzZo%*iqHc2GXkXP1s+|tLyRxGvE(Ebs}*f7C&HD#aF0f-Ytua$vXq{w^k%!ePm-JM?)LaK3&E z6RtyqPFCEnG4%_`lkqn?c@sv{fcW9 zM+-z+QVjEv2qYjnXdC4>!@{z=7|6Pj*g&%R3~X7tO-%~I0g21EvLz+bP_n5ePTWA6g#FGZiwx1I zZdJy<%OSwI^P&N=E+z#cds8J7-n0ESd;%9v;FetM7gfE8qYom}>3oC7kOpSNd4w657o%Gs1lqwX_0~dAH@{DEMqd4{(N=CKZZ^YF^lZtkekkGZXbtuE`LB zQjLFr3eB5-%;1sf`c&fWOV@*KnoM2*C8QZH>1E$stJ9(gJ0=Px#F1f8E9o>g7J87W z;}Xd^x(>zToDRe*Jj=vG3A%9%j1B!SEDFAtBsqX8Bn5aDKA<GZy1M9rkldnd^g* z;UTzr7IzQ#D=zsGBicV%3ueyuH3FNqtezg63YEFC6AON@Fr))opq6T|85DBxy-bbW zKU5TEL{EOTTZW^qON0434qt!~kwx#@-kc%Q0Bz6%LMC0UDLla^f_}nhzw?sY*TxvR z2>ArCLO94G%wo{70-F5FbHg2twr)QB0A^i2Av6z#`9tGvm-i&C5clA9KUH8r{On-) zrzTWu84X%9AYOKcyg|2Vf!pq246Y)j#X{Aba<`Lz+13m1K4;sHDGTtz9lS2{N^WDga z@Ek`r*vL*xz)(zZYUH!IOxb!7i)<(wFmGcOjxuf<2xNcg7Sdc2gcf;LGAD09$S*t+ z-rKIBr`Y=5Ld>a(oW!ZR0T&7oo51>__AnrtmO9)TY>{?z_OD$H z3k$1wYrzmhmU5Z?XHGXG4|$!fKU~$m1Q8gRLC0b>#ynzl4FAC4G`O&JWijM^mTj&~ zR0^ud-*?(#R`H(A_~SsSV4Pn5uCq#vsyfwS#NZH3alaB@5#2Eg{#g0;Uijipzh#=z2gZtc&G7nc^ zaj4=IFEGcZ)~@I+l1b*zItg-2-Z-CSMwBuiPv&~xf}Gtg`h*~Hi8jBFRJIHx1g{Qc zI05+rKtaBNEb2`?QN1*k#lJwV>p9Gz+y^aRwiz^gm3Ens?=rIqg1`y~*0k6*r6|uJ zl(IN07y~<^o!=wpLXYGiCyJ1XD!c4^>T@41zLmziKwwa%89qYDr_D>V_d5l@K!G_6 z$zHrI>awyQW0RVZo?VLN`=}|j{xD*Tebv5n)uwAA{D@HuR-m@MYtWR(5 zDzcVkGDZKbA_seGIH8MNZ+{`tJ098YYd(Ev`4ZE<5`c+e*y`%Y)fq*`Q5%;DdO_>H z4W6B4!EU1zeG+p7?y4M=kRPVg#W}_khWM~w-eWr)%)`aKEr5M|wLQsUwIuu0$)!>T zTvri|Vj5noAxU$Q?8WPQ$hCk>lFoh@pFnnL1OD5=C;%FL7R&J^S$Hdi{g`d_EBD*M z80N>_uXl##G+rUA9dmFNNif6O= zsodXbzf3LtQ`zN;SsQ}UDUk?-S@r=^Bts;eC_REnA|d|qsb;~;FV#jY-aiu*%iz|U zjjPa&)eBIC?CNFb)Vex1B!0aM&6nEfJ$L>6#cmf+XUJf`rYU8E!ZAP?cgGkhInlU# z0j!n=Gmhn6j$T!%C%45S?3Y?st|+7r4AA?DQw+QO8QsBg(C=;AT!!^6ik5sNdwHc5WOaa0`FR2#G7aHw|uE>~28RMHR zwlr3~mE6`FDW?h=m?inhNqB-M)s5p?(=O0>PhU*|3t=!l(<4eyo=~)ly&7uh{PNVE z#kSVAcqE^#sjxl+Z|WA)qFyw}lMuMG3Lhn$Th}0;B_fN-KyJclzs6yeV;oG#IOM@> zOrL)*)LUG=FLR|57-7;r7smf6atnFbtXZ=YbPM0c&Jr@Ex^GOr-DY zmNP*&)NJ_K#3@rmSYd(f>K~_&gE#qN7vEXAXmYst1lqPfrSYen`CkLmo7B|@KE5Ws zA~*#c)fI>n00b*HJD!Q&?s=@<{3;vl8mgOsMp|c3B*Ny4QOu?!*1;65wthzmNJyp- zkvDx94{Zbzl9;rb-v$t-k;7zoV{l(l9;u5{x1ZV|>&(MOCUQQl%NqG9*g zLK6h=$iV8Vo0~3XiZSRh=mFrk83G)e4MAiY4yq9Dbq4t_J{NYOR}iX5!b1i?R=d*b z7NM2NI?Fh^#_#aXEN<&{N25(#DQhbv;4Ls;z{H@n{x+wOT*?gFpDrz-F?Dj?5>PV& zh|}1aVH%*qz3jk+HQ=F^4Dn!C-_W-XsxQ1TxBHV!R?9A4kEN5E9)5lF0U;W}-`)Qy zFKNGM0BFK^jVZUV7F2xlUlakb=Q(rHgyE%~c-drk)am~pdv6&OXV<0w2KV3+0--~2 zhXi+bXxst>2*KT50>L#90>RxSxF=|E4-N_LPSA$4d7gL9%v82=8EQjNpQ?r3r=7Se4e;tl#iamadBdxvXl>zC^@G_8~7 zn}cSH(|oLxYySH??(@A_y=Nwjt~hx6l_Wyebg*4R*1k$;IAY&OzbXP8rwj;*;~5%g zC|@o6tIcA&4yD3K^Cw+PpD2*0J$!Oe=OYa-%06waiKxDAmx-0RDCs!w>2$ZT3@vCm zY{PUu?vONzRy8`i2K_6evnLPdL7lUz2GfC`&cA+%CZ^s>370Y$Xf~`73tV#W>*m3*rXPmp6yw_aTwm!_q}vGhoL@Y%pH6lBU`XHj+aASY2ht# z?#FG0Mbd5a@3E`~8ypDg1tg?gB6_F)vGzI&|E8!cvdF&8+i$OIKJQSGiG<~LH?F}j z`_683OR5DKYex-`57zC{WHRq$u)j(GST&ZZJR)MF?e5PZ=aS10DOzFeMAjfWOwna0 z=!bzOgJZSXP%en3R)}{oVAcwFFkR)489Y``E=4tfXb+~cE7F|8wkJSaQN~AAxgJPp zo%bSGqikZ|Cvc0#p4u;K7Xtpg+>96(=-lpvy>2wW3yh7Y9|jZT6#>kyg8lirAe*q} z!&M(+{)OSJM}9uYecViNNfefF>sw*b z++zLfE62$KEg4DMoSC2QN25kxF+sDm{A*r`SjkQ8wWiIpol26Nq5RNONO~Y@p+X$# z-HU6vC!(cOec9LUA8x+;d_P0l)u}d{DAjRV^El7yk#Q1Mo1UsqAO4r@}rLgw#JgcOYyRU+MBjN8rUq*JOKm z>Y?Wj+W==-060E%jWrd3RZu_iOPpyOM^5%iWez z))F$kM(dHT@Z{w6@cW-l-pU^?UH0`CqwxH^*NXd=Nn5#{ICkY zCCqJ!ddiuduP7dI3ZV*QLL5g{9|MP2n)=rtcJj*KnMLf z4h{p6jxGla!GC-&<1i2F$swwLk3pbMfGa_6Zedvw!wF0O}0Pd-h!0N zIdrDu;#smC@D{9lT#0+rdD~||!zYJ|6%lo2P>N93C`n}Ut4w;vf#zk)Ry6g2w4|L{ z#!^Af=lwZ_+m1fwm>s~k7yx{WYWfxaW(+z1JDh{1rNlQl*`kAC5i!I&M#^fdQWa0v zjB?psc3!BG0-^nrUq*|;i zt6>|{uF>eXJ7hGHRiqv)Q@%a|9=E+oo%rD@LYMo`z9;Ey#Gl4P7a&;KtU1vt`Cy0s z!57jo)=*W2+$bGI&%kHCP)P)9&R1hu8ri@H zDTfVL0J=UZo8ft)X#ySfvYjfX{vAv-)9AFhmWoxoOeq--tw>))>yrFzKy>-^3s-po z|J~e)Z|8MXM+GS}q5CPCLOqe*zf$3uxZE!8WehEg~a?=tTK z<86L3v0RelzmGHtMkmU4ST#01_~zNs)t3O-xEME;Q<>XL>rNATds>;~DZ>DFF7=FivIUE*Rp{^ekE;-T|F1LasNFszB_I&? zHHKzNW^bX?r_qu%18uZ9k4+U9?={JZ{D_;JSc>}luzNGu_Jgrj4`HlfM zRsI?B)5yqxR?h$Jlp0QMY;Kt!~EE*yH^nobD0R%z8 zcBT;eTAOJDSVAsa`+6F;a-q9WN~cU`DvCPyPMFPa*4GV?&IaaCYUG7x^+C*D-3gvW@y(O;wfW=uI4sI`Tsyj)R-#&YZX}` zWIp-2u!P+UJuPOMqYQlqT3o0mmJv3&D0+WeFg~Pvp2()~IJAh4e&cjX#(u^}HP2a2 zuLe|Xe6^PNqQrmBnIFg1H(Eo)HH(N$am;a~w2HdyND>Z3@(-q}LqtDe>Q~(@)H)HL zZ_g_mIPZSZTw|5^Ye-w>vi&{-n9wLC(<8< z`(Y{mAvi%I;CuMxI`Wlnp2>MxoliZZ(esO4Cw@34Tb&)}9>S3b8N8_2s6GeMn-4%*!HBQG1L$cWr`1v*aKKW= z5N@Lc)lm5IFy2+_Gey2SKA$cqlpN5EKcanMP5~jsA}&tWUQqvl1VZPPQC;$sf|wyy z?ib@2ti=P-%Z3?zlEA4XRK$@ShUfV4eYvoJUDby2puMG$-@N35tcTKB@^ z{rSG`f)cF?)v)-yDbuSco7MSx8+<7|@dme`Ys+O{b|WK=+0x4G#cJBb3LI5pLHl2i zYOSZFda%YH4@xK%UPO~|>gI>k3HUCMc&_~p>6{}38^HcN60R#rz*_#7a%+q!iL||k zNvz~cA1|-OccAj$O8jcNu6!znT_(_(^vbV5ssr^5jF}_(Zjl zo?|iOG#L$c{QTqz%VM^jX^8v!ZisTq2G^kIWG<1H-sS+V%H`to%ov9-j2Z!lB`e$} z?*~aPMKZBv)-UY>TfzQM`?x8rsP^VLO~xDtPxZ`uyCf#6M+oF3c;=ejD#!*zG`XMl zQ~^zc+}BcH<o`Tcr@eAQ}}Kx~7=O0(v> zf|e7NH4Gp%`eMsJfK7^0m3HO(#gmtw>Xa)lZnL*1%w$ZZf}CCsB7MexL(b=%??Z9u z6dCBtVZHUn^Pq7PFHVZx&DdAiX(KZ3LyB9y^&-P$D$0}1a|F8f-f;F`0BM8K`HPla z>&&CN!aEDD>tGsqbM@{pTN847xhXpB#=~k1db=`LNqW#Oi-D;QixJ53rGr?%_ClL8 z+TUcU-%4fneWbx7uwq{(W>x7A-3*$RXKj^v@gc+jLc(XwQ)bxyJ(PGGfpHn$qwwUn zL3n7A+gG!JcW`jVpjy%@h&sG% zFpc-OUHUZlP{Kr{Wfae4Ou4W86mAi?D12ILb-&|D)!yhUW}fd1qeHqoRL=O+jCNTz z@zi4h3L8AE7GOKuMJZ}2y{<8!1(Pk$1{n~%@^8I)veX_f;Zxko?-71E-H#>M1Wb%j zD66mjs$-nyG~25XFcQvMhrerD9P$36(Ki+64SX>^ANRLfTEwvx;JMEMopxvH$zQIb z0(5jgVj+aycUO08h-drJoh;h7kBmyBhUSR0Qk4TT6F99jlk3ZQ%!vnzT3j7O3OzY4 z@SRt>Wc7@b^%rte-TJkbNynM7^GXYepb7D4GQLPBn*|^^iCSe*Coc>s^mUA|(hW8l z=8G!r;HeR#n`^N^CQOc1z>+A)t(0>3Hx__l_)JxXYb?E*R6p$xy5;b%nF>&B&iS0# zY7ED*oU`sEL%bjM+U`{xg)hbOaJkG!apr0~sx>M@)5Ti@oZZ&z8J*X*ROAC2(v5&Q zm(rQq|PaTQ`)Ccaio z7f>Fvsb7-H@cpBcO-wTQd#IgojPXZ@a1X$2onT~0)~FqiV;bDa6dTE(+(~THL<+$y}Cjh8AxkBvU~NlqlHZ}zSy2_q z^H^Ho4P(_#ZFF20RtJ8f2uAO4TvX{ttW*lAYzFDCEA4R=lQ}pAL>R8SNBD~hIU4A= zIpMjl8;oc&mnIX3xHC#o&vs_tGpN(Co`%L4%7RLl%E~vD=O3nQ3#kvC|BX_&w%w(=^d4LBPW$L?ce)~m}0d!+7%&5uRIaCycNym^+jbiP&3&ov2?n38d}#`EwsN^}$3 zt~Q<)>lYYstatQrz(;gNMrX1YSu%dI_(@q-oo@Ir90HOfK$C75+sGTQS`d@Mb zVN@n(d67-?^a9}jC`rN=#06g`fl`}JJYJq;cuBT&>7c}nN*irJc6z->r31G z5PmW*p-KRm>VS~$cp|F7Q8e}0eu=gs}+>+!$;*Z;Y4|NCq8pMUfJqbnDi z@z4a}Fde>q1X?3?YVEd|L&RWDNwzB|0XixGnDsR8(>w#0-|SeK(f%KIkg z0~wFg^}MeY2|0imAV~dnT>zf)D8}#heX&NFT#{d-!c%VBY;?l@i8QxGSw^kW?0eXu zU(l{biAI**T}!ag?U6X2!^zuksj4g~`pKZ+^kvY=4r2NN3`SckBfd)*Hfp_DEl3dCsnC+q!V%EDwN{+2a;mG3Z9_0wt8^`{|HT=X-gblLvl}5=BfzL z@6aoz_>IzL_=@#L`#9*Ux6i-!)?bi7Cp;;l&Bzt-I2rZxy~i%ospblHRP>oGWXC`4 zTEZ4afBo$*m=07!qS!Z;fot4lGlAGpUI($9R+z0ZR%M@G864lH)7(PM0)1+%uTDbE zHiOO|=3e?uXRoaO?0xZU3**^J{U=2=l^Ae~hCyUuwBrTJ%KFXD*@k{MQNZY}_;6`W zv!*KyOc@9Ln&UfI~pf+b&0>iWrU#3z>~&hqdQmQ&`R}%h|wV46%4@< zx4`WGL0pXXwqUA6^GAG7Z$<75yNRl3QCh?st&MO(2IxroqUqXF>kX>oJpVA7zO>dv zuB#?oRNe4EL-9V`L)3?@kufdF>WiGj%R67oTtN z4B`W!3TmWy?)S;_(+F-eG!}0$&?6U zy>2yC9xZdQ4YpE8MX^a(mAWpx(T`N3&^SvXG*7@jbwsh`s| zRz_ahIUkAw8F)G`y=qr=LxcOZ+AVPTm|6lObatf@?at3KaWEB&7u^^!lu;{CbV5Jy zWX|(OswMF~DXEYk$NGdg35rgQV!37_j$j0d6#qFOFWH%DE}+U(N_~|~#A*=V;g--; zE#K#0{X6g~fqDJ~9?rR{m|XTM*W;NhqW%~;tOvi;L+}3{FTCfdSt)~8QAMQ|@B0=P zF>Sa#e#Vhn)99o4=koqgXi_GN-XOHU{cnJ5DO;2LSB+Mgz`xq-H^;; zS0m{Sg`s-OzhO+@3!JxA3Gm5Pe*(XD6;)Kq#L(yCj2iD*)%OzD@1e(1{3CHMX%`uO zWV7LPSWT7^=D$jLY4$h;D6`@gE((+<@-kRe0JVuq7M(cLAkss%zQ`Dt>%mH;2B{B( zpsi1$a0tIaEYT=n@wpkQ@3D*3_`+#LL!fv%kc1zJvGOh2pU-GPn*mzs4~bMNxCxfC zQdf5=(#BZV75=dQI|TCyQ%=_zg~)uJ%>}-o$^)j!a=Kh{f37YIc7r|oxymFDeeyv! zEmsk2kSVKPY2{8ijyMTgQ8`s6CiGr)){}nUL{=p~Sh+_OG3+Lg^-9HfMxYb3C}<%y zJ^=6>nPg?z7JA_Lm&LABro;WwRiT|9unJYX!;vi{f4`F!aRImm?>ib}@GT*Hkd1BE z0k=Vm@9njATsi+U!@&QD=;D24B)vtg*)mF|k@GIzjm^GThr3i1Q`)1lb0E3etb1UO zGc|tI>L05q#E1PT;7#+NxUWJ9iRHJh!NWicM?jM&LoM}z`v{IeB;A{%o@#jETYa*1 zeg0qQKBVUuT}+=TNomeM^h8DEi6LS10payz_C(-l{+@1qSZ#?GUL7f*CltrEw@<0x;y>D?L9apZ$9=fCB2#m4qen^Mx23d~;n~T-st8Sryk}@35oGGiE2=kP< zJ-+o8X+@3bx&FLH+xCp>uXW+>&w-mcsG{QiuOE8vwyoEm4yxS_xoCx-KR(p_XoY3E zAuNysOlc^-C~Bd>e899M$FF!nJ=dyVj;x?LmcU}zR!YpdryVz# znLy-hgnYH$cS@j7WJwDhb$Q9uvf)cf47*(Az24cau(?UiK;mP1v^kjkKAMbIj+oWJ znK96T9C5nWQ2@Zi$p4g%ulyv6?CK7fZkk$k=T*vJNmKV1a@l!WtX7|{E`lVAV_C@N zBPt-|OWhq^Me~S~JW{lvnC9n%7B!A#xI_C>fr%h?NVc!u#t*q`~%9)r9`?m*%%OEh(g zl8NUOuHh)wj>1>DNB57lhrPaCtFUt)2v-WPbSc!c zv*WFj9FX4`Hl1y6Res*he3z;&?rCu-D<9HD7RpP7N90Tf^-WL`@sXRPz13u073e?E z#8$c}KrLKGrQQNZP?g(o!W!2h%O>exCek93Upg<1xrzDLH=VIY9z$?fgW)eZwJHo; z&~0CPUH%TCQ|nPU>nq669j^AhC4Qk2gJKIC2>(^+?O5n2jvxqzK-8+=cYxtgOT!S4t8Ra$H6HBy8pf<5U*|8IdHfST@TWvIRuRn6Mvy!#a*j{~?_)_1#r3G}X%_t^U6vXYdbjKE@6K09r;2zd{RZK9t-Sd_u7>T9*3jk9pzXo+wZduj$C|;Q z<+HZ4$K*Hcc@4YSmTpZOg#7}?JUK+iBo3Sew}S=x70h}n$A@aw@@J_ipv|$XSE{E1 z6zm6}8+GVV5MkCVrNbf{HS9s@d&y9opS7}EOHP*^%TRy!kY<)u!)=(}vEq3Dxy^WB zFA$lf!JjXK&xfy@Ic7Sie-)%8jEM)U2NSer!8>^liT+AP<%_UeW;p5 z;n5Q99BB_H$x*tXS2T5BB}h7?R5FvJCGyd5?}D3cw{vzT*`6vNn0gIfH*gTFE)8_HoCb;*cPO@8aTN75oq@0e3f)(J8Sh7P%DgG7$*O%viUCZwr)B165U`R(mqe_Q*6*O8a@r)&YR)A zgVfo~17jcU@xbf^<@HpD9I zT!2KCEEP>kF4d(b^c~QM3u7TNIWgaUmg^~l4D;CDhTg5Mwm_Mi1S-7#k1GgkK&$X! z6otY&Qwiz%4<8MUDH%f1Q5o(FM2{WOPGV7`G2swY>bFk!s{I=zU*tbAm@Fi9Uu`-SP;Hksb&(Z+fE<1B;6j zJ<3o@8g|GkWdKHOMxO8~cc~jyX46nNgH6RUNl8{=lu+S3C>P9s{d#`Z*PB67;NoWu zEYg!l=g1$FC$hxN<}MtzOcC1eBH{u<5QJg!w3&4vcrlV#oG#7%0Q#ZPEEGC8&$2UX z?dy$3PAu$esd^ofv6(J|3vI0d$YH3w-0(Hpiit-NzY~T1wU1yF5KJI>e7g8+9O;d? zM;9;T<0-FwZe}&`=rQg7?Kk>~{3onQI+9us7$4_r{&>R-k>9`E4ld@(AK2YeF{BnQ zdgE?Px}`x5Lot55?!O)#JF2l?-GVF%)ncvs=(B)dy_AalrHLa^3MgkW`2fVxf1B)Y zsg6ML4+2aPL|o9^M*OZ&luP=mP?=fpRnqJOl>0H609_1WQCATeXdYl_l&cOwrXWmr z_QHNk3v7Y!(~;FPEvL%mwU5rNTusN(3PDymK~QGPC)w4Q9dbkm8N;`aRhOo_#hfEH zhL}#j^)&`&h9Fs)YGP#1*V_{HSGvi>BM=NZtI4VG)C1wBEavyBWpCWx5_-o#HFq33 zK#!&%di$p|`RsBvCOB=vPXxEhG6i#*h&jdXasG$C5 zKlh`aowY&a7*FRO6TpdePqdp2rHE^HAv|$Pgi!57TzT@4?IQBH057rS6pb?;)?6_q zXVKXtHvzZkYt5gz4kjJ&m1zM-s(F?!jnG=qPiTIliiKAk``~HGGRRVRXZ8*0l1|MB z-zDqg^C=f^Lp+A^cP9zUb^gu|h{3>I9-AF?GCop*c7m`)_RNODkAy>rCt^sV2TyGx z;AG^P^=d!8br#GJMYzFOadUvF&#=q97#m~nX&ff!mqSum4cdO>VxVG8Sb-YjYWj0N zl|HO8V}lE2rTb)Eo?2Fj>6fAK=;KOGfk}qMt8HnMp005qY*1C{Y!h`ve$tTYPM@uO zXmBzIv<=(mAEG)tbYPDjGir79wFQ?gcwIckZBwy>eENQb_Z31n@`X*W zyG-@EaFje#u`vpBqI=nIYYGlCn#X~u2vx+0hT7JpB=U$w`Z}?HO0L`FI(O#x7^$S& z)z^>sk&znBVz)KJToEa&J|=H|1y&}#meR^WSL^Np^dl7Yb{Dz%!`dP>8nmz0&*sfH z8g+-7H&8)&hqntniS&R(o${WfUDOJtb&RZRq{8N zQ2}gHTw4NLIN{#3z<=jC+K6c8xS+}PtKKH$EId}8Z>e($587qJ7NQxudsRgBTsv;$ z6#;`c{uu>>6|7mSE0aHmgx*{FDLD+n5KPZ*9xQ?%O@L7etu|9;GR87Ob2;v$s&xF1 z;{mW!s0%waahLpMrqDMy!VM>8e7Cz<2-p*Ubs)p#~9T1;n%G zG>XY$Ai5P0v8%^ozas>9yq3X|r3N+3>Usc6YgVrY1INUBCpZRsj zdN8GgpQyIK=yruk0T5R0REe-D&?SfxVs!;+4QQ|>43-i}oJ+!X#2VFT*BCU=*Wk`y z#cBtxMO+#vGG{^r2nc!=Ka2Esx#h_b7Ci{?vIP@Rrr=u*XmC&gT8yQR1}0H_3S`f5 z{;Ucy2#(Bkv6A}!k<;~tHR>dc^!7HhUy%gqe#EfhOjmZ3DI_Z5LbFIY0LD2-fgOf2 zT4ytFs_lJggA%%0qX;=@FNY!r1mwO{D9nb@?c{2fr3Br*_Y(C&d4hz&-TyFTEDOwa zI#}TKRK%%uskX$w?hxuQ_S#O)#?DGJiX)zn>&zU?ml=Q*9n7?8;I+kF{D{jj{|GH% zhGsr;*(8=GeV*Nmz)4Qe>6DS+gz zS0{}c9t@$$Wa&ZAo0|&!;T{74E%iI(zdTN_&e&0DQ&3OXWV+bSa?16WaaJcSilpW; zeFdAuai)wcBi!`+{(!WI4?7%n-}Zl&F&gbk&j5=bH5x&$tLfwM4ST<1hXG8Rx zKQ^X>vGl9zouN%hu}Nu_o~2}6lIBum4T)Dhes{15sZmOPr4s*WnUptvRC6CwP5~q# zXM;9lgyeB7L+V`z5+NLjYQ zu6Nbc>kWKzE&)0R@pikw2^f4aJ(Ua1O_oX-6Y>&CErGZXD^F*TnAhp9EXeAuG2f%G zX{EGDf&@VC2pZePU!6kCI*B|niawYv2|3fiQHo=9*sFhr(;JgV1*g$V${IbG%pG$; z=;RdfX{hMO0PBEpCw7SNX2_Pu3|iiaR_(Z)o)lHfTXzk^k&)yi&08>HN=Tt>4zE8G!4=|Fy^!#Y?Yv z%oNE>^!GaUcW%#}bJ=de26wByp~5mF1O3vE%7Ls3xK!i*ChO?J$mv6sGgp7PSf9?9 z+Z#UUC81W>S065KLhk|%fqP8A%)7=8oVV^3&HQcyK$1o+#S79tVk!DmZP$i4xG?X3JE-5&!`3MP$WDj5Bo zOb(4Ab(MCv)y$EL>~;4c0ECZE7EP?>0q#CBZlh67&)IrXuLeP2@k*16)m!qNE^-Yn z9B38nX7I@bRV=|nb6u*)rQGFUA+s3X5g=`1iOuZWW z00{Hj{8M5UXcTE}hfFOXnCCwPKb%^PWr?I1t7OE`S!?+-_s{-(zdaxQ zxSbxVYCYtst6xsZ;QaP#znzwOtL9abENrKDdK!H?)tO~loi=ae)Ooy=>mzLFkA#obtwAfYxVVsgcs7ahL^6N2u^`UNOPLDiU{)#GqWTskf;7>YkjwP&w27=w;oNjaCEqU%pe- zjF*{}LZPI`RnG|003qHBQ4eXheT1SN<2Dt6;J?(--;f{{fu~CfdH| zSxWS{v_ID_i;S39*(B&X(>5_U-pnma3>7E<;F79ix;^oR4!cvQ0*%O7;s5#Xu& zQRgp?g^H+xO=?FUu+zI^u%Jrec=gai^u1nS>N$t@&gw~@DP zV-pU%V;+N7B{)45CQ>BpQI&6`CS>S^jI+t?g zjHNT}?Qz|6V)O!cgJyP%Z>5GtbGUO@gRj|Q#6&pJCQ_wq-ba)3_u7;&BdVW2mk7GX zP-vYKT1eYKZ^$q|?GMqTAXrZxa=2ShGE@?M-Q{EhrBD@dnNBTnK3s}oNQfDlRXEr0 z`Mw8uUw4acfL>S}Wv30JpK?~b}nwEb2 zgVLix3j1JY6mRFBl|~2>gdnQxPowJd3<|dxR(AsnU+Zq$Fd-I$npoQ=`sQfYg$6VS z3R{2ovx|0^qTlUz39EKKLf1RI4Ug;^RvP@-UB-d|SkC-y5G+}`pfQMo<(=@mVYSws z485DslciP7`=L#_{l*04(tA0Ht6ypP?#J2m;csKE?d^J<_`^2iZ4c2zg->MTM7}-U zRBaECKCv{j-xQkmPKx2cMvo`Db#K+{6l9LPd3OqXKk?glH1$i>m*p`VrC-5~ddoz0 z2G8vb5GUzQ<=fdiv9Vn@+f9Fy0??P@Gh%Jg#MRgw2m}=sKG6Taf3ZbihrRUSuV{Re|8V|x9JbI(?4n?^@M)iiD|L<0;IsCLjeC8i~r z>j`6m3TyKo$A0t!>I?mg;YW~n9~Zs&IntcvQzLVz>)xn=-AFi3v3glPS|d$k$(f1i zD+@Nuravtn`d*;CS$H|3ki&nnG1XvP;xWOX;VEXv{)JkeNbzy7YMuoR+T5Wd{!4lQ~{=%=Y`O$ER>gqDx`e*~>ip#)wZ$ z`lQm|1~P;*G-XyJMK$pC66S}BlrEX~Oc))RygLU0@+$x>Pk{?(uaf+%+Un;H((5+6 zXRSnAzp$y6HOq9j+{dcK&q-{4)MO&62mN;R+pnE2l%LjaJZ=5!SxfOy>S}zN{E>Kd z+4F`u?~+t4u&$uDKdd*p@#(qPdptmm$i4^;I;? ztb$%LtMTPcHeP|*Gqd5(N`RU%mTRu*ggFt-!}4(p7X+A;N!qm>OEu99aHh1}nQu!o zpRYZ{k9F~U^xu&v6!+EM$IvT3y(9`JV|OGd)2~mbU_SVaO90K8G!zJ$ZoqUaXoNqY zox45x4vU&ak@VO=dFJll6a6Hw8r#&4&U+qocKGa;tUEqcR|3J+(z75I6+|A*9hZ{# z^zgwARi0CFO@HS6w4W>a!8lt_51MPt$M7Ct^e!37;Ax~(7lJvndfrxj4yT&q7V?!z zNT!O&^f8{S!uRNrMo3F^^z)S*?0w@jTh{>oLWz`5iI=CJ2^DG+rgcg?6Y3#SHJpI z;4M)AKB1Zf+4mdM3vvt0wrzgbXBNd!_WYE-h8aRAn2uWNR@C_imtE@Dx~%q`J15R@ z-TIgej1+D=8U&KH5B%I3KTtBRzo(*#51P@#jJpEB`iTWxboKkVojtkyS=#(^d9M+3jHfDAyYH6WOl`-yLIBl}S3?g|8km9>hC9M})Ofi!cC;Q<1feqD)ROXGj-)ofdaQ}W6VmWSQ^>BLbFIV4q zS*=%6Z!l6(Y^0AcKnGNgIr;R>#8RQSHPBv>yux9h8Gq z?7L6f?|E>c$ucI_PW)~W(?fo-H_#d~c5k{xKnT?BUc`KVSLkyS?R?vXw#yaM41Bbp z#is%?jn5k%eKn?J278ywUW*=mv2qH}b6W+JWb{??qYi0e3t16zUSl4xE4H>B8Ts;f z^s5{C9esmU`?PIqq@#wk2!L}g54HDO^#a#2s0qFh&({2=1^LqL)!BqH^OV!6(D1}Z z0MkG6hGZFU!c9Pzo=%2{<##k_rG<0&u}_@x$BehR57tg=DGfyVle)6}4#pUlBLE-I zD@F~beCGaQz1qORN8#>(L30U#o6a7 z=d-OFKKfXKDo%k&v_hUu=g)|dUcNm>zp(q`N-0BZ))}?RiwWI$qv_5c8MPAi@z|1O z9jP=rQrFI?#@n`XCQ0_Zs^>&vC*Ju6)d(_b)=wy~P;#N%>1cam7uOMEK5h^IkF+8~ zBrDrj1Q=vp0hnNSczJrO?NPt_cHZW5#s)%jVZ3*wzPh~pCwt1 zHsR!1t7v6nuA15AVinkL1n&1ImeAAl>Ugc1dF$Jd&o#K#T8|ID#*X1fXQhGjEl*V{o0Op}1d;G<&J@2dOz;4UD-@BpI zEW)pqX>P}O>{A94%CWA9_hB@dl*10I@mot`!On)r=TfmT3Y@GstpE;*S>e#LQwvGu zwjh0hRlS3w9zwwkb5Heq{mOPBmXzBv_i(yU=ZnzIUNwQQ{ptw#L)a=itQ}XiJ_U_p zx;66c-bbf@J$hfz3xQ8H*K{E1MKO;PWoEgLE1{{1T+1cho0D96hr4~^`3$eU#UBQlKXx=6)uS-kcrxW^7p7<{r=6XGWe!_feXDUbtgUfZ$-1Aq!%|wDfpWCptV9Zgc?R?$-t2e(O!gh0u z7PfFG6(>RbA9viDB2^!U(njWS!zG@&0%nC8Kv2AbLq(sp{yg%llk~G;nz6w9VMCrM zNH5*~K#anggh&(z*tDos5GM-{1(nW)$5x{fMm*)-Ub)wXNeorBjHB*+8oR*~K)H}t zTIE_uTUz(9CD>S`meD<0J-1EUat%9sV{lk+aN1S)utl|^7FI3bAr)yI?Mi|_-G27; zC6Awhbm7iw8qSB9M!h;q8(8Z%#?%J1|M6f54vjbjaL+RhL=e?KVrl{=b1T+W4FHY< z@{T8ayF{NfXV&*p0cZxoHeB7GSrytz9}rr zX96^GV1tn(`wJGL39uAB;lsJwh;Ty2Pw>L0*tbUG>#zRasnTU_pl5IMWN66zN*-80FP z2Ym9@xYywV4S?;%>$bm;b&3_}0)m*F-S1=p+Y1dr2}RmVKUB4jE(aL>lgIed?=L4} zYQXebn)^nP8-54m=W>e9Dx%@RmWvY^ot+h4bIx-uR~LkoMg2a!MCsigYV~4>8KZH93`FNYVbF;PzAL9r{uS9dB^> zQyRzXYLwc+gQ)s!j?wz!lofiobS&qaWa!Ha^v5dA?U&>7lR46?KG9_}qB72%#$887 zXBB>er7$siJSt(@0aQ>coD+|%aI$nRc754Iw-{rAoqVUl_~=i%p8QhZCDHetzo&AKW zX{>Jd!4uU6-IyuEr#i4Q-px7$qr~{p3ko-dClgOeiGWJuEG}fHMp?T`w8T19gEsRl zNVZ3+YG|&mN@#lA$1}L~?d3s(X2jLn`{?BZtjAy29-Hq=ony#!KS3fy?PNuNaFx0% z8xQdee!%IsFL%4W=nYLEOb3k_-^lSO>9H*2V3}0IuVSbog4;X4BUpA}6K!{WA~Hxy zkFQ=_Fex9eenpZ)7OFmWG+(mCe9Y{{p#WWsRDW01WNf%{k|=m{QN3cVvn^R^x4k+) zy>tK6Iv|n&3nPZ$-n?9bx^HzmTeFhRn@mRLjR}VV(xhi(-P}(|6wC1C)0o#vmFr&PxjjOe;jEKiFBSAT zIa*RHDCetCSiZ$E@zReWbFfToBh1wgc9BBJtVe`8HtD@6u~uI_bTccPe_ba(*m=|@ zJ02R+zU7T6^Q4dqP3j74#$A6h5in$l><$nIgv;nD5_~F#+s?LuW2DD0(NV9n$O35w zd#6seM)IW^4QEb6Ap2WBC;q#?GoX6Ci_^#iR4*=BRZi>W!IB{mrzy@#j!uPDnfckG1sMF=|>D#G7#4t7%ye#6+`o@&Lun*`kXFadBL{{^_t3D zGf=Hol=mADa&ExzWp@{j8a5J(yp#_%V_?kO8Ma6Y;x&v1#OTqlM!NU&r*zGtEzalD z_qgSrEXJdvMUb{wk@huAo}?gi z9K+OhebbbXu1#LF@x54_jJ?aEt%@i=MSoREVklxMjCXZqudQ zlGmNQoAq!TQsj>Q73FTa(eA}26Frl^e~{VL?yL70_D73_`^0J5C-fA5{-&#R0oahm zKm6JrZi<(PVd<%YM^+PsX` z-IsUnhmvm-KpVKlpBy`Z>veqLTxZA|#BJ;!$|WDDK}E1TaJ?siRBZfwbI+7 z9IRl=>)TRA5MJKH`BtU%?1JzIjhdwHQjpv)BQd7Urp!OK=bzOuM}rci*|qS3*mE zON2z0zF9s>#+3PaES*`F0nVMvk4f!nqXOb6({eCg}oQ&lGXLVJ-)sM&=X6tc%JpWTe z6dC?}^$OjDm@=spj)_#(<;jQ6X(;GI)vV_=W|#wOg9%k87s>32NWRYIwBsLgFb;dA zlb>SRll&2J3TuD2$mOZGJw)BKw#x8+MNCB%|CY->6$D=Vgy>!x&`>{^0Vy6V$75)f zyAp&$u_@(56c6lwkG=&`GV#TZ%YU%|{CIjteBpm6l=JaRmYNhE5^y_D5-oDKx#S52 zK4&oM?vD}Y-nTTJEcX(^7sj2)m8=V#cDI-ax{Z!^v( z)a)%l1UE$(8$*Rj{MHBSg*O)}t~#@o)cM}bl&WA0a0>V=cOZp00~S;|NNq4SS`}q9$Po=dtN0?Hk%eJ{cQP2@#6Au>SK7GG3yl{tctVh;bL7;!-ucT zv)WTlOHFAG=awi`JP%i>EK($U?9Y3`TitF)@tjU~%Spcn^lGx&1YI*g!qpIqglB|I z1z-awTLy?2-o8%1_KGyy#KrBn2$H?202T!hFC@lc_letj9Bsa!=Ta^_Hk&jQTlF4? z>!)7)gtkY?ed->x0vB{`KVDL_pjAE(Qg-ZjpXa*|XqEt3G7k7&i0ZH5Pwj_ zznv;mmLAa?42MrXY0tLKR~(6F|JauFb1hw0Tn9H>Y;a-dW>+~cb5J|^WW%1${Pu(x zmHnxQes}j76s9cwF5wf4^RLz>7voOeo=ZhsUFx_bkeG%U^tkX`;&556Ir)IvCI!}7 z&C(Wn{rlb$_MwT+zZ(YJWVn3C&GI$m4#)q~nC}>Y_}Z$6+j)~ z*XW-y@@Sxe=QAM0x158nc#UvMDho+cgda@k*R6qt;c|^TBO}tTv_Yum$5Q48)df>I zZX@(e&l~m-@V#KD-$z6PKVENKA{qwZAZ=2hkEq+ITF*{+?h0$x(A)v(Ks7-djI4PD zw;=MfPoyRiEA;Lixn|Jk+#4`x6Iu5S`{G8M9l>vg-JkNGWxL4=RqBRdOc$$Xkdd6y zEBnubpH39yQqf-`XBX~l0g1{&ikSS&A##X(SZg%3$OPt~(_^+bXIygX#bY)ZhYm@H zV0B4F#O2E0V_BR9TqT9vznb%g?ziK$Td(x{#1#uwp|P(R6e+D=`JCYOO>(<@sq2em zz}#9{YO{COYVoPveij#oKkm!r2u`mGXi%E{;-8}67U@}+P z{l#rMKC?`oEn>?5bvQ*3q>mz1ocg+1Snp2XLTl3oyJ<>i?P-?q>lw=M7%3y?SA)l5Wt+rl3`j zWkcX|OITg1S|#ow(f+XSMksnr*qfvz^37en&eUY7GT^(TOo7c5Ta3iO=@D5FVLGGr zonp;~(@b9DTvU1K5?uaoq?`wvg*ueaY}ScXY>pqNXodsCZkUO`?D=GJF`-q=OQ5!5*rVpT3p9f&uW#V+pES zR&xvDznbAYEcLy+DmVJx++e*%)KT&FAwvdXxjMlLhR6Q^HS0o8=Jxq@&&bXBURlt_ zZwDSEnWa<*(0cf$AkB8fLIof=OVB`^Q=C_HqDm`F{y2 zAk&W@Ur$juzTos2Lx#PBUueDS69iK^a$9&_4GD=s#^YV-L5Co~{TZ9LV*qF`^oy2W z6H7d#6Pe{~SKZG!BG)9kVFnn7*Nax4IhoRCczVm*20E>xosjZ2-iz=xYaLC0mPR;r zZaYsR6HY*N{PlCX&JQo#GYS&|{ZMBMlnc*S&zM{%m5Wsv4uD zz9at-!gRH&EytJbavq~`L^#m=T>IU0+K;h~cZm?VNero1Px|uWwO>Ju8T-l&b*+-( zVDN#xN$F$^2THAH*XmN|qibLacQ}FD9Yf2-yjho1CF;Auc0xSKn`Y2LZ468^7wE&sa0^6un+-!w+Xb0{a?sCKf!wy`uIv703pKm~35K!m9q>bKZ=%j{1Nw(V3#6y*IbL$QB0mL=Y7m)JwvlNX&Lgngw0N!TrWmmy&!4bg1r3|He+;8SP%vN2ZMUqHBydd?FIc4%xIe?yKsw< zVfcUo1US?jbM_KJTv^O%?j|*n8r^PjaeP)DS~Y5vi-QRfifv6~2Ld-k%$)w0Q*vZ3 zH#=Hq#szZL*-}VK{GvYn`6k;{Ml49pbg!q=H0`tscSwm|c#!U1Esy zv&~l-g&zXCR~^nx1w3kxNoDc11QxG zGcPML^kPYBr+(>!%W{ovfW20oJ5(qZvKBh69lVs?Ui*Y&JZLo3L7LGP6d) z?y}0ibp)2VoQgU889hMT0%t+2T&h7KngSQlAbm2+KoJ-okI8*H__AaW&cofm;+Y;~ zvD>Xw^<_y*D*vbyDCh((S3EpkjFb0BeLVv4-Ic39tsVAiEyRbM%I#LTyiQyqccDNl zT+8;dRY_0iDDQR~5u0z{JyTAoal39-M9k3fmVHx_^xJ#mG^`qe54>ItreFZSR%vtf zTZaYHX-@7B-An1tKMw@NBLlkEGA5PQ_=_#SnNCDrL1K|E9zv}5uFw{<5jRT+#l99~ zV1ecM>+YB8i=uD`o6SBQKf#tIP@{1#<)zt;aKWp+HjF3SD$c&Y{t=~y&LxU9F+#7H z#{c{KY(O~8-98wPPS%gcm4f9>Q7h5Qn@iQItx<4VmXP1=qPe_e^awg>^||t?rW@|< zL+-3@WbLm6EtKm5N2x}I1nTI)f?U8;OSAf3dgEqmGVQ7+obZVIF5U7xZ3)#W`XneL z0e>TEo;_CeaLcys`vLWS6W4EePQ=w};Cc_Ln`x@qp_APLMS%Nh7de<3;x^yZ(ysx) zSAd7!6aaiRKLNg){1UWpLf>4}r`Hz?WOm&*&70iF#@*2zAdFmUQqW|KCbk6U59pwy7GdCqnOeLmTYb8;^IC8@?w3+NNq2d!wk?x@CePZ zcLToHV%>_=?FB&~aRu8o%h40xEBJ};1(y7V9PjqIB)IDj8_jlTg@msaHej5*o%5G0 zeq!Ssu=H?zb`n;bf0&@C9Vl4Ut2JwJ^Py+0qGaN0!;5D8}EOt8c>|D(UBgG(}`lc@KZD*;rl=o<_BA4X+821iSsdBl(7fxwU(X+}Fk1=>@Mno)cBkP$xo#j7L#A zDCOqUWMaSrk}WH8p+~f>U#~~a&&Ha3ZZ~qy3T<=n}%>e zs@=-D6WOLEaMXNj8H|Xv|8OuPaTZ+pJKQ}>#>-_gS!hcBeC#F90|51ZJfWssp3z=G zy`+u@P%ose#ng8aX;>t9O$4rgpk7DIb*m>tGS~u6_vGw0$vly5zO5$On4xPlwE%$T6S2RSB_|gIgUc!L~*}9b3Qso zNSZ#B(LGVc@o8CsF8;u9R- z_0f0OL&SBbiupfSuUHNYR2e833}8@PbX*IqHaC%w-2=GtU~CW*3V_qcFNwqcO#Xa@ zK7IFgN)$cJes2+u5bOp>9n*F9wU%`OF?(OVo(0aR*P4&N*asVUFMM@I;bFN=NXntp z&0(>fpP#n?wLi08v5=#_kA|NRAHp?)9!R&{Y`=F10S9KaBjTHF@nr`_868n2T4U(p zOZQGGzLs2iq}y(VDdgA~+^k1Q z=PMmFnnVW+Qi0{)7AoYHOWWI{lP!6Rq52Uze00-kjWE60mc zGrek+ju}E+eq`n?nl}}UUUgp`LRD}d(4fox*x>R5UgJH;kH-nZc=6(R$$_}o)S1iC zIjh#|sw$!#1W508cJFpj5B4?PvCF}a9GjQA*t2M+ursyX4=+sSd}O@=ef!8US>!(W z-TxrS@#Yg2;sc>uHTopl-kyk%oDPTSA<=0@GsQXC)@!b=A+{>ew5?aVH(R^}Ulhq+ z;-p<+K3xyOmyie;gIUS!Hrs?i{s?t`c+Y&`v^GR98>szXzH_gafu79+M(qOZh*iL?zczN(BQN)8!ArI`76+xD2{uz4k&&+kv>yZ%DV1}NS|7uJzg6N%sP&Pq zDO#ya?E{$bQ+;o-i4p%YW`#M1W7W(LCbNO@ayKxx1E(xA8Xw*TW6r#pYqX;RL;s|+ zf#__s7od=9NU~W;Ri9yp1`$HH0E7DBY}w^jpgYQVWQRq${+B<__O~apS2CLv>}&kY zN`wAUpimOxOkGIW77@2|4eH3&4_b|WH^5Tu-ub){B#Dss4zW&vjSU}Y7{u-HNAlok zv)!ppd1XK!(|EPoHo9=sEGZJ+>LG>JwxZ%H#U;pLa5Bt!m<}SD0X6JV5C|gwFm+Z4b`R zhfR70=R@fBr{63z*pdO>Jg=VN_&a44^MjQ(0?jXDLh~XE7kJwTb<3m$sI;C2o(swv z)-V>!)%;PCAsAHrfDAPXy=}8WH9l8m1mIraEi}%q*b7L+T_knr?!D)Dr?$eKKJ*Y-I|rhBd6WL zVi4`aK=;#t?%o zMP*~-1_R@Gm~HOu7M9;n^A9*vFfiwP+i1gK63X#7K_QC$(p86OwZ# zJxmijCj)eKnY{rj=xO=7nq<3O2zwUltP?qLT7^;bEoQ9ehZC%_Kg7BIW6kkYk955S zw;UVWaA=4W9;e(?VT_$2e>P+TOsrNmO3JL$LzT;nts2WQNYa%ToV3}xo*;cVj%WK8 z3y2-T8a6-P1<#)2+@Gz5!#lwtwbYUm5l^aTB(sBL+WA_Grm)+QQdAFM!Lw`SW4k0; zdj}n8ON~N`|13$j`{M&YAB{3Um2cxWcBT`(#tnAvAa&W}UqVt5uYBfP{9cUx{0_C# zPgN#?41M4Ns6Ao*34R1lzRzD_r!V+({{%JuM?4PuN}lmg4%J6Ns6F!TIl}s^lNnkq zY4(c9B5m>oazcelHP|{jZ!Q$-4$r_x&1Vsjrk1EwM7qmA^6?3!KB3?`%4R}2>`dkN zlxS+=u1_(?t9K?^nr|C(W;DDoJEdK(GAOg&CVJk0d4RE^$%;g7Re&I>4DgVOal4$4 zLQjJGVRW_oF z_!!n{X;hw+T#YGt*;;vE+)JGf4P{=%tc^ytN}l$*;E61a=dtrE*V1FHNvV+qzvNy}SPNjZXqpkUUr) z+AR}MXfVGW8Lc!xc=FYUne9%RJQ<9smU)|`irwsEmgm_^qUDQWxnZUG)E2Yn_rrtp zRd&aEsUaz65Lg+(WJsZoE04^Y>IKhV(mdUM4v@OyKoUB?2F8*eqrmn|gR&DR-i>q4 zHB(YIu8ppW_FTa69Ghh=TG*8cX(9OVG>U9z!KyVUncdbn5)BtHy{yNpO-zyB zucSEas{hUO+N;vCaw<}7dhK@eBX63euTV1Sy<%|g<#sqKHLTsLFkcFrJ1)p(=0g@v z%P@-;H>)vKtkI>3HtWQy4Itq768L%?P`Tz?-9+MlQrzXvXk>tSDDVfG0W1pdyKtO5 z#q=kkw4x)?1%zbq*ZUw=6iGg}OgVIHR@xviBI1X7{<@M!{|c3W;lsO>)O!%ynYHZ& zi=VEI*3hg_Fy2TfCQW~L15wuiGm`ZHE|Dm|G>>W;Wi*}DHzh1qAcR^`8T_>&N&?-h zLL&-fnqzj-{bXuSH;Cn><=;1m_KMXQhCL7FZ6G=UvvQFZ@f0VP;}k7#@$oU<2n7-7sC%iJ zlC!27aQPZU9=HjP?d!FgoDSPCwIJfzSlUv5m{+&Y(Fn{orB{{E6!6`u(a!f4L||Wu z$Ljm>L01BzSH%Yx$wl9V0lGGcrN+yyg4*otiZ73Dm%FDrog(?+^zV0V$;)XqD`ik? zGuRX|bl-7+FaUcd>uyN%gL%81tu&8mOU0u6a88bHo+qLga2Dhqt@SD}bg)FrRm3Bl z5~q86!gWx=kDXY6vygwMx%B6{Tkp^tjEX#Ja2ekM`U@g{LA=(5N;eF`($1X^e83<}6nKM0f zU8S;7kQA%(d6|z^JUq)Cal6V5mRBAgSUGIgjepYfulf<>^Dny1Xnv8P064EvX6lX= z`vnW}hQqQiz|q}Ksi<@6vKGoJSBqnn?)D;zUR^NBDbyx)ddC~2KnLP-Wrr`S5gb21fKZ%Huv$f-^Gy~&QFTS+-fk>m98E=}{|N??_?yt!;^%S7b8>8ly?Q%#5Z zaX77RJNBzn1SHl~xvQ2h@_%ljnV|tOlo{=mH12lagEdm1Y1Ek{B2FcgHFMC@dBK7h zK*mdz{swwQeX0L5KCuTtFZ?rpvMG!pOS=UM-r2}2{3+5fTsK~%<$Pk9NnhPPSOQ$z z{k_GLeZj@mtMrFA*~4$!8f;gj+U}27IUKhm%u<-r&`qFCMBJejx=!ob?rBPo3}D*9 zV8g63KY_L{8>EAduMdDKTcaR-x-vO!r9~b!bJSrfUv;4|JgOs{OQ6%gTj7Ipsfv)8 zxEJa)m^bHaD@&z0fb8hy#U|yn(~7Pg0&@1mr7*w0&y9Zrc;ftVkl)J3zdolfOa7dIBIQd;JKfdv;`$nT^Y36AE-?N z!I@f%VW=GWvIO{36$$M2vtmo;>z}_`fQ4~0bnzyHe*E2usN@1Kz}M{_neDmsr6ICYT$~5h0w)0xk$20juK|v~;HJ233EER6~50_p%(}%hh>qV&Py8H_3 zl?OL?GQ*qmprGyE-}~{8nlU?+9N9e_`ET;3kyg-^wfJGDumTxKkTGfqemX^q@wLdu zkZqRm-#D!=mEkt0h6m}Vk-yuXDw-MXS1dAKnm5`J_X`K9cy_PvLl#u_V^ZUAmht$A zdSGGVXYN4Zq_s>_mt@&5HKXZ*2j(=KVQc2`*CFU8?Wk=R8-kzL+1V8uGN2EQxGr3V z-X*W{)zhCUgb|zH!ksLm#{TScsvzi`=B6OXtR^SeVcln8T4b3woP83(6XvlCj5_S4 zOQY-KV{P6~?;VNu$xpKL$G6gKI|AtaW40`@h;e?;SFv$h9o9wfLAsQVRm~GnTupgg z!h6?T|I*6jS+?tsTl|M%mlpyi5b37+7-;_~Wod#?3cMp4__ZNE(T9ADA5QaCip zxN>%qBFb)h5zd@2Rl|PHfC6K)GkahJfBmZCw24`(%5WqXL_ZY@#i>)@m1~|))2M4s zy_fNzRLbWgbg1x32liiDbVFa_Z&@}u;vkvS4g}Q*e}oC?e2LFU9oF8_1Js}eH^&^jF!=0G_V7$1Up_gJFOY1pa+x4>BZmHIP zcC<_a9cy+9r-N)&u4J*rz^~q-d%psNKq}+84lA_IjwQ9J8Bg5Vw#R2`6}km-!Q7HV zNp+KB4d2+=X(k6ic0c%{bF8NVJOawmVNiU+@G_>74rxD zWn~?XF~#8Q&Ijb@`j0&KH{IW~mQKu3>iiK23edRBU`lPT2z9Q?Gn~CJb~9W95YnribMloDJHqeG2E2EEveiA*P3KOIBO z%NeWLPkw8tmZvNELeo!nfCW;X5KOuZPy?4c?A3!-q4BWgcvEY4wj!!E+#MI8){^9u zw$N3kP3IlDSAv*D`*#v#r6R4VIM4Yk5mw5Ke%$rh@o*k-P?F`k7rM8e^cQhFr1M^4 zP-fTJ26#j_;ANGcv$ELwr91hzMZOC*>p1EPih?tsH@c9J6f|J`Yb8 zMkT41*E+-pf)=;l*P0IeJ$!A-4dRLiJ-n=opTVY*C)#GCzq{^By-(rMJ|H&WCO97^ z&ovUjAmc-CLtdh#GaBgm8c}_2M?FhQ;jm9X38o)1ZGsDKH96g+VRjyzWh25Lfd}9e zDL*uRYzydGOMuW|(9P!QWxCj#5kWRNniLdplfU=8<2L>+;|xpy*Gk!XqdW{O_gJES zS;Aoo>jgN)4RDMh7wifYZxN1Z>F~=AEY=ePf_+2 z+{^qZBlw)CPKd}A?)S(rl3r&+)J!1zJ&&3_f{+RLySsaVeYJemdOMm-e2Q>V(mPw}-MuvmgWH^&`o6I0nGPA|1 zo98_or$+?}+-HPk zx@l~pC9haHrEZ@)WCL?u&)fZ3f-hW8E(??W_%W&QoUJf`yslg97^Dk$iJi}5Nm;m} z{gnA>w>LR$OE{>9oQE^bwI!Shv`Seh@5Hyy{5(z3eUkq{bIAt@HXNpq-zCcOcW25( zXcAc`emr&cU{jGklAy2}6Al(Jsgz2Ey5CHHVtUl4@mg8pe*gXzic*HfZ0arbl|M@z z7q~XS!bKq<6cR7>BIz|KTGMdKg*EF2QjV=R5=r7!Q?|2)%f%f-kk}(s#Tjv7{`1) z8d-2bo4Tz#h+Cu(VM{=yDrMI}GL~M-SP-HWKv5ok~FPdmB?AwMCEl%!*romoBzTj>E4;h z{WUp}e$`E4FCE@LMnW|YzwYS)Y2mW!0X{tmT) zs(9%i4hK@{;)vKh&^-P2oX@e19W2*K6Q*AFsjIBwpL}iRR0aEL`Fiq8zI~YQn6c5> zHz-1;Mj>)Q%3zBtC336rmS+Cl7P0x0k)1zSHES0+uuIF3V1Tym&gQ(LgK?%6oq*~S z55NUc9hO^8&&$*U<-`8UaZ@PFU!3PrrGNp6A9Mn9PyHu3^p?3xQ?K~-&0KE%) zbj7Y%NE2}5Ygp)dZ0UD!vn9zVDjBr2^y!N5I>`p_5zmVsJCB;#*!o~ zkW4SKd*o{0UIPtdOy=jk`URc-x3r%;5mG%GDAJz?5|v}-e;IK!Y&25!kqJV?{iJ0V z4AhpJbo@-XwQdX$=*LaxUiIOKSo9-$2%K^B*2jf=7wk|oyk+zk7hYWcZb#OIk0uPu zqSsxfGDQ6Wd%B}pXE=wV)||GN{Of151P1u)u^5|h-N6`kDOsW@%Wk zY>7#vh@=wrDG&EpndjJFOL*dTwf^ov2>wzZ!1J)K*OCq?M~vaCfDyrQKCl z>`x(m8>dJS@f;Y8bb}&kJ78I~_`E#G$-~mvzJ(QnK-i4}yQ0|XCvKOM*YY4L9Q22P zuQKXUttC^}9Co{W?U;uC5DggWeVNpJJ$n;%X-ZgB8J{i2GTE2X|HbY4dfEf6knb;% z$~@LD{|EtP0>t3+bnYF~P`=0Wq(~#$A{0LDeB*CQ6~-P)`m+s(uzfFljQHLDaJ$T( zmWXYXk;BT`ZbH)JnYF-X*r72SplwdaEno9?l8O36m(vC-dM>{DW#q0cXRS#G>o}kU zzO+y>t$}%gmJW6RrZ7srbCq^fudtb+^X8jt$(i?7KnpH_s-Wo!X$&SM1tw8FH%&I*AEi=upy{n7%?a&^XLEbzq+hu~(q62Fc!^!D)W{y}hH||q107v# z5>5h>NLUj5A8;3S4@J(r5&E}daiQt`u>*>mnKt}s>v%ULOJ=2YorpjkV-d|puACcS z+zhRDZ)rSp5K;&@#yHB2JOHC*-2z%YgvCybK}ZpM$u z))PIFg>CVIyhY8Eau(iua5vj0=&9vs+CVln=|n#Leg6R0ce)^o%mn3U{#X{g-PBNe zmja;V7Ah2Es-cA!bR-aH@bT9#$ZOjD9@cE6-00z^$kg2+$>&s(S=X8s&ar%@+t?z( zUOp%C`VGg|_ZBv!o<`75R${Z-`*d5sYU4OV9`6f&*|Y|O0SpD{A*xSCZP)%cTuaiS zuV4#Qj&&!6&YKnuB_Szqc5^;rn4Alpv7TmL1wQ3;6Cvyc|8fD z11Vq$INOo8izfVth|L@icxoStGHNX*=KHPc4!Mi%yVhtS-wxV>DYV|WyR+3JPf6+5 zD;@ioaVx{|pl0|P`O$|ZI?TJM#3 zkpEH(nw8>PwUZ~Sae4=iuDke+pa;`TIVa%mg(|OsLg!PLa<#{IMMI zM5to*9g8gLr4Ke8E}?6{>y#3J?!x~Y-Nj+yWJziF-eOvVS33F*__dNNwH#80D^u8B zK)!?1D+jS9W4`cFXAP)tpJhW)^R@3&1{YTw6$%K6DU0C$4#1RAf$4f_7}*X|XC)*P z-_HyKbeC!+^nLEGXlIWkb47e7_q9Hh( z^ul`gdo@jeQtN=z=a7`O9%<}rZ$9eGlvbkU;1fb=h3Mz5lA?OEPl$>8odd($M4ZfP zD1KPO!ddUZ=Wr`PBD;pl<^a5E4QmXw$N1I}2$t z+LCJk-8DiYe-itHP~WH6k0)-3*mrL`Qry%1PMv*p{1A2)uG6eo)3fRLHMqBh*H+fVWb(F@t~Z9$(2@xrlqBt(AY z7Qg8I%EuR~1}dI_1gdPIrghg(?nu>$!8e11_dd@o9%rEqg5K8y>zE`Pb1y^0qJKfS z!Y3IW5pE`nE!zc_H|Xc6r&)rD1(6mj4;bo!fHo~gW^^&abtr(>XcJ>9{{2Rs>Z5Xg z$t0`Qe0o_*59KNG^Zb_|EZDyK0ZI3no=o~m{h?~3=n&qDFz0X9icRj4Hy8VhI) zjp%oM;CQP9BXX^`FsBhyuZJxymI1BK>gwc$lLT{xAW!{+p9uliCXuV@avciyQ+c#( zi=xOwzCurW@6V~WIGnthz3Dm+_&pp?V#`gqq}thh zcNRLrL>%AKL7IXCgWZr%#|`XGUh4eJy0(tGw*K%FuIr!SJv@J07kDLyVzws~aS)>U z@A8^EDhj1!AG*fXJX4MfwFVJX@0`hitK$0YL zira|OuXYOQO^kjjrQ&S;93Epep`7(O$pWN~p71o$fmpga(Ey#3+q=V*#C=oC&cPi| zz96oSi`E)tDzRV5nAqvjY5?Q7I9_XcfZd?^h4dtV&A}y2;K>_oeMO4Wd=Dl(O zhvz|w?bdKfP+GqF0jIqSjkPstN3|w*)NHDcmZ{*$9FbF{yl-o%5&#ba!b;LPLl8s=*+;f z@a1A(l~TD?H#lFik{*>w*2A76M`QNp6;5=crMcM`Ci>dp)=h#L^6m~+ldiz%!(8oT zK&zLX5tAPpq8X9h)c}U`or>|Cnq4|HspDpQJ(OV8wvKAzYMl6nPxzpjMag9kH--gU zgRli9b-4W94}oy!nDNdvZuOi)=%0DVHe(R$c*y&ilGg4(DVFSAAGkH2&XeuATgtAy zv)qECNUkl#TU*dNz!rbci8k2=hUfO;=f|`@p8~m9ti}YkLWz5r3LrjuG9#m6mUKh7V)EH}}k61sQET%9QJ;Ylp`$BUD4-C9^bRz1<-g zl3-SZG36)_>z=X&U?De z{Dk(|?;Uh1RKB-0BamHoS=?>A)+pePI|(}14k9OLoS$@bLn&!h^bm5r=HAEY?#Kiq^@m(}!#Nkf;2QUSO8ww67X?jm(hwoXeo_1R-M zr&FSI;XmqUcgJ$hM;e5g*<%Cqllt}-g}4E@KsNZ_GkuGAf13Pq z;B?w}p$3XmqtpaphxMT%>u#mfT*bmO-(}tRA7iN#abM(!Pe?%SB8(i3cS#_zH}Omj zoozY~J27$Na@v2z=9=qk&Kb#?gn~X(Bk{>x^1eQ$Mi-*~?E!f5;=gP#X;d4T!#v}F zY_KGv=Z#MxX)SqOl6=Z<_1Rvfb(Jgs1ccjJNL29zbHZ62&TSwUvD0inS4;oEMR;9n zIwjB>5{6gUYy2S>=3P3`%eY%0_z>1??CDQAf&uT+f$2e8qT!{!J4|cSk^92W0RX}B0H@Zoa%I-?^G~uuel2aKforgw^ zVdFk!v+eoPA4L{UDpZxI7jNHXogf%a4y|BL;|%sw<1DJBW9#Q4A9+m+$PnLf3=;Ju zKr4_5;GJ}uYR}rF^s zZQn(+xtwd5uI!zkn05jh?tkz=EQ=Ez&C6Ezn?Zfjvw!T=ZaCv`ocRCu$Xcw7Ql?Z1 zGaX(H-38hlbS-C$c1L3qa-^>kwQZ@a+n7PRwj?@bj{?&h%qGa)DP?WRy(x_Ha`d*M zTFXB|hz0*PEA#6&EShaxiHBMU-7 zxP-F0YQK)@J11M3NSxZH63sl?6xoHjB;Zxbq}?Z8Hk3><&cxIk2EYae0c0FjtGSN4 zvoSNT*CUPk#{xxnNYApPwWl6K(BWf(!V2D|3m1SI^}-*=GTjehvzxnVq8kL^6-T@2 z?fOa`ZgP`B7CMcXs5`$Nal^VqJl~sAEF2xGvBesH(?M8lLg=>~|1$>w&+%thhZ8l>Py0 zP4dy(KANXl0Rjn;uok}i)!UTkJdB0%O-lI{@C3ML2d7_xx!iAd@?v)zmKeB$^hI$- zU{NUT@#$Y}wD7x;gHLvuDj+|_Wa4f+B|ix1)d26V>P5JGoh=ltdcqPx;H_=FWSe>v z3KHhh0e%ott zI~sdxHfcnBHtQ4$A}8ClwNswgqO4SHy_#7}SC<0Be47VvC_Ync7`~e}8B8Qmt$zR*Qf?)` zj`Fk~8DV>Q$Il+AxL#i33f`ABtF*ny-~lS0A$rprN{q%ssa%?kp)NjNQny*z7r$AX zOSmb3hgBF%Z(bL2!VIX7f||3eYcz6K$ddC^GU_*|WE1VuJh%iwEm;i)%Ko)G#xnX?9;k z>)Z{7R{ug9)+qm56lrF4_qD93P5zzU1$q;XLhzo3M!GJ)0tol+cbM~V}ess%7=PieJ?a_7x% z;K##$@{R%J0K26I%9471#Nj~tGTh0xf=!sr8DJgz#WJkYg5i5gDyf&PqlD+Z`#UR# zApA5uq$d>iuN>`7}8+kt3#HwUe-{#N{6h^?*m=wE0%cODQO=VmAmVRv^G z1cGU803H?CV!;R~lUh6olK=*cAKwkP-Rs`kjv%9nPjv9f^wySxB~8Yi!?Ku6nRf=> zlMDIDV(&I|Uxf;R?uKzgi`5rQXhGWl7XfHc7DT3gRMv9xQ))|QG{6QEy2MeV?|?}u ziOBWYBN0VrYdE1~3)y|vVxczW`AgjOEy*U%A)22*0V~o-4lmqKwjbiyx8jU6(t5cR zXj}^wtNcly@xM)ab^69t1?sd&{;SdGH9{WpI#!O`^|zqpCq4&?dp!vVK|F8iEqEYv zZvJ+nDjh_fMrhG-MttudBz(vI2d(AG(K2wlNIuwvuzK?G|HT0Ujhk62mK|WO`d~t^ z#Hwc!t|8w!|H~{toHm18c;iUs`%CXIwnRo(ilKxZzacB2oK^KO zUa2;zQsQM=1MopLB!jli>5(S;gG{ho&uJgIgYt;~!X8Qpz>8?UMshf4+^Q6j@VzUP zL~3tvkcUD&cl}_-zhhiD9rb-&NxfG>RkjX4?I|pmq4cdJ-0un?kZW;%b;jJNK#1O^ze03N_2y4?K6zCsKEDT4qc3aZ=|hcjXtt>&(=A@_+;&qfR2HIlKH_ZEDtGu?QBhli>>R6BIJzoTQOcg6Ye-P##U z+(P)^J&YZUJ#wwpB|%bR^pB`*+8RxkK=B*`st<(P^Py9Y66meu2jtWc=<7!Sw=u3g zX?>bY6U}Uy%x$J0riqnD|Jo}$;Nr!?{T;$&lS6al$`iI&{k_%XAzkYk%K7%#$22`E zdnA;lax4Z{EFfq_G;5OyhW;_~gAo#fMf|?I$}x1{Gwbo@-^e=p9~~s=!cLn{+M={K zBeiQ_S=+7A1o#b3tzI0S&T)Mfdj~rUWf@Wl?>-uGxNH`)2v-nPxhj>a7}UJMcSs4O zHq>4O>9O-3Kc9)@g&PONO!dCNyx-7uKA4)JmNQExiwgm-x#)@sbK8pp^KUh0gz1@9 zeQqw6m?RJNuW}qXdpzoU(i%dGv^IjU;@dm)Z^$>0(oi3Z&WIv29{nrv%vW0Pt@TLZ z@lbSUyx@%fgYhW|JGlRlj(FDJpk5i zL~b7o_ZzjOMll{!*KD9>OUAoruh5SL@?P$3UEjC=K^Y4a{#EwzHw48^nNull$RwYBHP_p~_$~gQmziF!qA{V+ptMuT5G@ z9AD1v5sNJ%veq5)h_-$;RLE#%=;qY>nPd~ZDF(#0M`1BmTF5j6Ek1Ueb#fL<6yg(`2#^34(eo9*fG!-DM<7(u&9j0i zJ*xk0U%N`SpT$yb==0aO#UoooDUuB~8{=CnW-}SmF}lxjcF_aP;xuf6lst-l3mP1B zIWg6}2h=`_rF4bjPqV4-ofaM4@WJ~tU91BF;yVc}{}1#o39`(R1~-am2T2rSx~r^I z-U~<76y68xLR49diwxoZ1!azJ#wymQz>b(L7tYi3LtP(Lz5zXqu$opIgIJei3P!2c zcjfeKJ+PWAiz=X(B~>=mao;Nbl06Dxice zAT4wdkp4FJ-tUe3wR@lcUw&nbldSBVz0cZvtu@!2KqutVaMNQWYpQkC?qRVze^6^2%@>kY3CY>K(@fFz4J z5EBGE=~0aXunD=bar(NQ&k>6SHj~ToI!31Xfy{lU6Y%>{f22j(jx*tbsTBDK*n^Ik zfGi2VH=GNw-1u$oA~Tk>=Rig%3vXvP(RiUf)i!Yr$M`YS+GTknY@t6z?vdA)>8jZa zpUk&D0JXn6QIwp_=%3n;zl)-_5)1NACBWmPJEw((zklShKe>-|PN@_jJxiQpi4)EI zM&I!}?@XQwqh{-M#PDdDEqw~WQ;3rr0aO*k`xe=kdZP#t(=vA2)5Db#{ge1B^_vbX znReAO%xMcW@&P-pdnvZ%4)q7vx)Qm8qSpG#@%!j^Cd1!8CR#b79SD!OCKl7-qWJKl z1c!deT?xT6C_*)zUqe*XTeo$CeY2U({Ws3@*#yD-qu)G1=wddR zh!VhnbU{a&WEyORjjzn71O7^~afFt{pl$Vt{`z9t)O2qR;(&+Q8PaAc+;>qZ1Q2Ab zUjHn2n%H7FwI-kzA##1Q<%8vF^c@H`>p#x27jquslP;-UQ)(ODd`A*EL=Yx5dlB}x z8*BAdyAM?AoC|fqB9}j=FR;O1sdb8<5uql};tlk08$rcNRtEY#0+J##UbT_2I$BnM z`YNaKb)|U7@^Scjj1`GRq5>3`-t_Gl0R1JK6LtIQE|uu=RxVlc*sb_s`+|L#5|8H7 zyI16`G$Uzs61ia!(7e@Es`I3ohJ2eE8?{+F{kh zU`%d-+0uQ&Pe~fEg;|Pq<0G5rLFW2#PJNp`i}ijuzPIXsX5Ad(Wf0}kC1aL=$J)%` zXAu!zF9nK|8ZJTHqy}$PH4Jnf`Lxf&vpocEYGegAMk4?>;*AGNjNaBf6Q=-&IsDGV z9Mu)<@N7CWP0I%)eR9KM*;j6-3dRCx_T)?Ip0{4knBk3zZ-OD?6sMy{_?k@ zwH}(FfbcsDoqV3c$2p6h1e8+^OFzHWuzHM0p;c8t2)V|4Gj4E#U{{vUdx1x6<)EVJ ze75vCn`UPHU{aJ@nm<&SQ@a?BF?Q7iJ-S@mw`UXYTGz^!?>$QaK7_;hs^Tb1Nf^K2 zo$Fn7JqbKtzpK59#lJaAefg3U693)Xhc$FW^lj>t-;=(f0Xl8>s-$6>>~N9Gn#dA2 z={w}eI}?KQoV3dYUZ(Kw$vW3;;`yFsPlWGj&APc->QD9k5&$Qv?CuBc>*v1ZUQ5c| z;wsDuXyAd5Bk~aW^&TCdn7kzBm#R}`()QO$ynu6QD}zBuon7Z)U;HZjx;JVD?(s-!UXA zjenWRK!ajwS_eke(efHUhA~H~+%%4NoBwgMq~U_P5Ce?oBg?)WqQ!DF8c^|Mkr}xx z=jsc4{M?M$V|)87Ht_K!_1}p96+c8PsGw+@wAZ1nH{zVtn?D^)w<@L#;HxRYYHkAPt1i&sf-!JW$ z;8I%2MM3V-7EuTk>Y$_npzrgJ)Xs%#BfY{xfxi`df{1Cdw&r4;FwGvbIXB8RBR6s( z+zKiT0%eDSZq1#y^P5ifkHklakZy#}<3;b@2%Ay|G)3G1MKpYJV*+sADkbG=nRm$#{5>?rxCLpl7 zW`sUx2Xr`3@Sd}T%7Y5bJ|w@Ug0Z8Z=GOc@Od51+qgAR~=8()a&TS$%I^z!}z;PyQ z_f3zgU{C$?5PU?87jqth1IE#G;Z?W5+yBsExL|oW+KhejwrOANK{XQLYX4h%_Fv_T zq5L?sV%P%wPw1hN0EuL9SL$tROT9uI*2lsS7I|Y{+TeC0iB5sxi=|q^q1wiM&t|af z{oj^9yx5%h&EFG>=g-!L|M~#{ko|lR+tRY&Iwh7n>#oGZN$$y@K8Y-<41$3Mh`qTg z=u7U(5DpxyUbe_(#tt>KQ)EVCe!brlk8;=>!{y$Gy1cEU1LwV)pnOBUQduJznX)H> z!$JYqt}(KoQpUO2LVe>nbZUxXQGKTniD3yf)4+gi+TGcJ^C+$(N+nNpqdI_HCHe71 z{%>XjJN^L#&JDqgj8y7r-NU~J;Z;I1V|(@fqTe_c~uwBH3vqv=* zR*G#L!UfL>?*I@Tk~IuyI<;}g0euV}fo(x;`@wAmgjt}Cw|bm;Fv}i;R%arUO`b{m z_|AiJi;E2Wc9IjRZAOnWvPf8vh?xtaK>6Lm7RppIUHU+U2yHSc&>zt+iciN5n|phnla5M3V@f zn(^W-ky$^O@%ZnN0fQ+x284%&>9HG~gdOnkflRJLx>Nk$_Q~+3FFM}{_oTEj|5j$BloHsZfZRt(vCv(oXVOq zA2E~N2x41HTQlDcTGffat;GQOOdv|d67PR3)lOn%RGQb93{-1Fc7nhAlcUDQmAsEQ1J!qP=^7@@ z5A@hr;x%#hLiSK~Tp#KaoV=`4qFz$9fzAvqZ6#t898xwlK(Z%470eeg=1EQ(A9qak zKKzl_srcsWo4p{&^KhdkBSXUhG%U)3vz3}C^o#K*+0**@o`k`Qk<$R%_JVO~`l8e^ z#Yr~Xd)PLgUS;yTC@XK#-E56y5d1uIGR7X;lo2;qAH|pD4ED%+GBZb7Kll# zkA4v;1n9>H+tpk4pP$5(+9D(rr8ZtxA1(3KacG$#6GTRb)b_LFl;U5qI=?6-mS%w8 zSGk7>LUH$P-!q)3wxlxcamm)M3i8#SckXet!c6R1t*n2~c_$*i!+L>|C-dc6P8VJ7 zl~G!&%x}7Ns)pS$b7sjUgm%d6yeD_k84WDRV3f`&SA%V(gP4VdlAY9~KMt0dl5 zjWz8vZ~4iqT*3N7N)^9i3|wyWKE`sO3S9=#h*rz8KofQ)mFchWNv0*djIDRUj!Z<)#Klh+-tj}FI?_;p($$t zRh#Ck`@AksykO3lIBJ6EI#6ZMwCWE)9W4;WO~=BS?T*%}+Ko3W7&tv(lXAYFu%7Hw z#orv1ZEBZLywf^9z-xOatX%4-4S7O$NYtyfvpB%~CcZUUaAbBcg1;td7 z#KJOk!?zlBw_a-t-Ksy&p)&}BR>%*fGprV^!I9e4G6*41C&pi-7lbwYREaBPT%!sL%;y&^!IS>&0`MM8U{qY5?GP2Izf zJ=w_Zj6TLLVJvwjXX{EqupN)#g$lXBDB`+I-m?h_J`?2dK8*}67`HLp# zb56hEygnw627{E-OU~=NJ}ZJiDd0R{DY7NugJ3gy{S?$*lf-QmrE!6l86+O$PA3{i zk1sQbmP#tXmgrV5hx}a^54Kb@325W-$aI8M&uhqpKP&zy?YUP!qlEl0_rgtGJ?5&^ zO}C8(Z+pVT@hhFeLJ$2)wHoeo6VPrLK*3XzpptKWle{))ySv1Ij zWy!6at`;bt&)%qco@Psb>u>;|Qb<^T@H9?M%jng0WXRZBLaH!z%@U2rSpWs$Qz5Q; zA^KtZ%t1m%F*gqDsj~Ku2GSdu6Oy$@TQJz*467Uy(X82vs;OipDvc;6@j^tqQ$Y74 zkQ7 zf_%Q&$OenJZ(sK3-!-QTwFb4G>>}!E`jDR3U6Fy)+mv}JbeL^_d^Pz z@hAI)(o8y|LLN+Y&@7H{ws$qyHe9J*XGH`JClW|gA4dPULrHKyl4`FIEH z@q5|`0-qrHc}7ak23=1BOpD4OkE*uDID>D^MYy>qlyzPm5MI4CM55L4tW$8x>)Bax zjBn?@5z|Jy9Bu2E?X!>fTvk|n7Dt6Khw~ikuE7mW+o=ZWcZTe^W1Q2o*FDQH2@TUf zC(fu}huFi!*3sGrjwXYi2NrtYML3?3ZciAPSMaW%YV23(SzlQckfNWKi#2*=cJO0w z-^cyxR8gbT`uvpH*(Io?9~L$aE+OTg-hAuA#Z`}vC_5xGtlm^~+c-d|Jv{!pJDY*IpZ2&R` z=i%g?E9NenT(ew#oR55+?K-a-R905Dg6=w>E=UUD50W;5ON~E|-#g7=1gC?Q-ZZY> zx-I)`-8IkRcB&!@ukf^nmrfG3`*_8x00-f9tRVyltvn()b$mz>M)kUwEq>L1|J#0& z;&`~~E5hp@D*xf)zul!jKEuHU_0vyOmHn4{_*WMG|98N8`~UlPzTTp^{SMn=)q?J4 zUmRPjBgkR2S1KEj{;S3Fuj}yk2@bE{*qTf0A;MSLsVwqqOM%U6mMddj%R}ZZy{iw= zQ&Yrgs`(|USXRs-)W5Jwn}>;e`}>0r4q`Jwou}GyaZhG3iC)s0n=4Rz1^(J)k}M_V zZ10LPRz-9x@xQu|zxW4_t?=|w+CuhE*vrSy0{dJ%vj7KaM#tAVF64VhM`K&tTdS+9 zVu?grSk5e-@kD=iOn+hZ{8BDSTasEzpBaOM_{&kXIif}4jP%4LqG)c3^kDV(9-|gH z?}pz#*`xZCtNR;^xP(mElB9^!-TnLM@yWEG$Qm5v76-4khlBA|vB6XdK7~szx}TiT zdtn3c3;*my|HiTjkyG7d>G*QsS{kZ!)FxWX6+ZJ02Wd$xVBN=+pRK;e`X3zgf3peK zlfY2{wp)7MMEix+Frsi z{>tERv9X6QsSALu=KuUHWSJk9RlJU!lS;JWG=CU}q^3t3AB` zH@5#gfgDm`R&n7f|8bB0FLnygccb~wUj%<;a6~v~*wapBu@%HQ*;wGEAgd} NOTE: The binary may be initially prevented from running if you're -> on MacOS. -> See the ["Open Anyway" instructions from this support forum][developer-app] -> if that is the case. - -You should now be able to run Hermes by invoking the `hermes` executable. - -```shell -hermes version -``` - -``` -hermes 0.15.0 -``` - -## Install via Cargo - -> NOTE: This approach assumes you have installed all -> the [pre-requisites](./pre_requisites.md) on your machine. - -Hermes is packaged in the `ibc-relayer-cli` Rust crate. -To install the latest release of Hermes, run the following command in a terminal: - -```shell -cargo install ibc-relayer-cli --bin hermes --locked -``` - -This will download and build the crate `ibc-relayer-cli`, and install the -`hermes` binary in `$HOME/.cargo/bin`. - -> If you have not installed Rust and Cargo via [rustup.rs](https://rustup.rs), you may need to -> add the `$HOME/.cargo/bin` directory to your `PATH` environment variable. -> For most shells, this can be done by adding the following line to your -> `.bashrc` or `.zshrc` configuration file: -> -> ```shell -> export PATH="$HOME/.cargo/bin:$PATH" -> ``` - -You should now be able to run Hermes by invoking the `hermes` executable. - -```shell -hermes version -``` - -``` -hermes 0.15.0 -``` - -## Build from source - -### Clone the repository - -Open a terminal and clone the `ibc-rs` repository: - -```shell -git clone https://github.com/informalsystems/ibc-rs.git -``` - -Change to the repository directory -```shell -cd ibc-rs -``` - -### Checkout the latest release - -Go to the [ibc-rs releases](https://github.com/informalsystems/ibc-rs/releases) page to see what is the most recent release. - -Then checkout the release, for example if the most recent release is `v0.15.0` then execute the command: - -```shell -git checkout v0.15.0 -``` - -### Building with `cargo build` - -This command builds all the crates from the [__`ibc-rs`__](https://github.com/informalsystems/ibc-rs) repository, namely: the [__`ibc`__](https://github.com/informalsystems/ibc-rs/tree/master/modules) modules crate, [__`ibc-relayer`__](https://github.com/informalsystems/ibc-rs/tree/master/relayer) crate, [__`ibc-proto`__](https://github.com/informalsystems/ibc-rs/tree/master/proto) crate, and the [__`ibc-relayer-cli`__](https://github.com/informalsystems/ibc-rs/tree/master/relayer-cli) crate. -The last of these crates contains the `hermes` binary. - -```shell -cargo build --release --bin hermes -``` - - - -> By default, Hermes bundles a [telemetry service and server](./telemetry.md). -> To build Hermes without telemetry support, and get a smaller executable, -> supply the `--no-default-features flag` to `cargo build`: -> -> ```shell -> cargo build --release --no-default-features --bin hermes -> ``` - -If the build is successful, the `hermes` executable will be located in the following location: - -```shell -./target/release/hermes -``` - -__Troubleshooting__: -In case the `cargo build` command above fails, as a first course of action we -recommend trying to run the same command with the additional `locked` flag: - -```shell -cargo build --release --bin hermes --locked -``` - -### Running for the first time - -If you run the `hermes` without any additional parameters you should see the usage and help information: - -```shell -./target/release/hermes -``` - -``` -hermes 0.15.0 -Informal Systems - -USAGE: - hermes - -SUBCOMMANDS: - help Get usage information - config Validate Hermes configuration file - keys Manage keys in the relayer for each chain - create Create objects (client, connection, or channel) on chains - update Update objects (clients) on chains - upgrade Upgrade objects (clients) after chain upgrade - start Start the relayer - query Query objects from the chain - tx Create and send IBC transactions - listen Listen to and display IBC events emitted by a chain - misbehaviour Listen to client update IBC events and handles misbehaviour - version Display version information -``` - -### Creating an alias for the executable - -It might be easier to create an alias for `hermes` so you can just run it by specifying the executable name instead of the whole path. In order to create an alias execute the following command: - -```shell -alias hermes='cargo run --release --bin hermes --' -``` - -## Shell auto-completions - -The `completions` subcommand of Hermes can be used to output a completion script -for a choice of widely used command-line shells. -Refer to `hermes completions --help` for the list. Some shell-specific examples -of setting up auto-completion with this command are provided below; check your -shell configuration to decide on the suitable directory in which to install the script -and any further necessary modifications to the shell's startup files. - -### Bash - -```sh -hermes completions bash > ~/.local/share/bash-completion/completions/hermes -``` - -On a MacOS installation with Homebrew `bash-completion` formula installed, use - -```sh -hermes completions bash > $(brew --prefix)/etc/bash_completion.d/hermes.bash-completion -``` - -### Zsh - -```sh -hermes completions zsh > ~/.zfunc/_hermes -``` - -To make the shell load the script on initialization, add the directory to `fpath` -in your `~/.zshrc` before `compinit`: - -``` -fpath+=~/.zfunc -``` - -## Next Steps - -Go to the [`Configuration`](./config.md) section to learn how to create a configuration file to be used by Hermes. - - -[releases]: https://github.com/informalsystems/ibc-rs/releases -[developer-app]: https://support.apple.com/en-gb/HT202491 diff --git a/guide/src/pre_requisites.md b/guide/src/pre_requisites.md deleted file mode 100644 index 02d06077a3..0000000000 --- a/guide/src/pre_requisites.md +++ /dev/null @@ -1,43 +0,0 @@ -# Pre-requisites - -## 1. Rust - -The IBC Relayer is developed with the [Rust](https://www.rust-lang.org) programming language. In order to build and run the relayer you need to install and configure `Rust` on your machine. - -### Fresh Rust installation - -For instructions on how to install `Rust` on your machine please follow the official [`Notes about Rust Installation`](https://www.rust-lang.org/tools/install). - -The provided instructions will install all the Rust toolchain including `rustc`, `cargo`, and `rustup` that are required to build the project. - -### Version requirements - -Hermes is developed and tested using the latest version of Rust, `1.60` at -the moment. To check that your toolchain is up-to-date run: - -```shell -rustc --version -``` - -In case you already had installed the Rust toolchain in the past, you can -update your installation by running `rustup update`. - -### Testing the installation - -After you install the `Rust` toolchain you can execute the following command: - -```shell -cargo version -``` - -This should display the `cargo` version and confirm the proper installation. - -## 2. Golang - -You will also need the __Go__ programming language installed and configured on your machine. This is a requirement for the the section [Installing Gaia](./tutorials/local-chains/gaia.md) in the [Two Local Chains](./tutorials/local-chains/index.md) tutorial. - -To install and configure Golang on your machine please follow the [Golang official documentation](https://golang.org/doc/install). - -## Next Steps - -Next, go to the [Installation](./installation.md) section to learn how to build Hermes. diff --git a/guide/src/relayer.md b/guide/src/relayer.md deleted file mode 100644 index 0c3f813561..0000000000 --- a/guide/src/relayer.md +++ /dev/null @@ -1,25 +0,0 @@ -# What is Hermes? - -Hermes is an open-source Rust implementation of a relayer for the -[Inter-Blockchain Communication protocol](https://ibc.cosmos.network) (IBC), -released under the [ibc-relayer-cli](https://crates.io/crates/ibc-relayer-cli) crate. - -The **Inter-Blockchain Communication protocol** is an end-to-end, connection-oriented, -stateful protocol for reliable, ordered, and authenticated communication between modules -on separate distributed ledgers. [^ibc] - -An IBC **relayer** is an off-chain process responsible for relaying IBC datagrams between any two chains. -The way it does so is by scanning chain states, building transactions based on these states, -and submitting the transactions to the chains involved in the network. - -The relayer is a central element in the IBC network architecture. This is because chain modules -in this architecture are not directly sending messages to each other over networking infrastructure, -but instead they create and store the data to be retrieved and used by a relayer to build the IBC datagrams. - -We sometimes refer to Hermes as "IBC Relayer CLI", to make it clear that this -is a relayer CLI (i.e., a binary) and distinguish it from the relayer core library -(that is the crate called [`ibc-relayer`](https://crates.io/crates/ibc-relayer)). - -Hermes is actively developed and maintained by [Informal Systems](https://informal.systems) in the [ibc-rs](https://github.com/informalsystems/ibc-rs) repository. - -[^ibc]: [The Interblockchain Communication Protocol: An Overview](https://arxiv.org/pdf/2006.15918.pdf) diff --git a/guide/src/rest-api.md b/guide/src/rest-api.md deleted file mode 100644 index 3593b1f2ae..0000000000 --- a/guide/src/rest-api.md +++ /dev/null @@ -1,177 +0,0 @@ -# REST API - -*Since version 0.7.0.* - -Hermes features a built-in HTTP server which exposes information -about the relayer configuration and state via a REST API. - -## Table of Contents - - - -## Configuration - -The REST API is not active by default, and must be enabled in the relayer configuration: - -```toml -[rest] -enabled = true -host = '127.0.0.1' -port = 3000 -``` - -Please see the [relevant section in the *Configuration* page](./config.md#rest) for details about the configuration options. - -## Endpoints - -### GET `/version` - -This endpoint returns the version of the Hermes (under the `ibc-relayer` key) as well -as the version of the REST server itself (under the `ibc-relayer-rest` key). - -**Example** - -``` -❯ curl -s -X GET 'http://127.0.0.1:3000/version' | jq -``` - -```json -[ - { - "name": "ibc-relayer", - "version": "0.15.0" - }, - { - "name": "ibc-relayer-rest", - "version": "0.1.0" - } -] -``` - -### GET `/chains` - -This endpoint return the identifiers of the chains that Hermes is connected to. -Those identifiers can be used with the `/chain/:id` endpoint to gather more -information about each chain's configuration. See the next section for more details. - -**Example** - -``` -❯ curl -s -X GET 'http://127.0.0.1:3000/chains' | jq -``` - -```json -{ - "status": "success", - "result": [ - "ibc-0", - "ibc-1" - ] -} -``` - -### GET `/chain/:id` - -This endpoint returns the configuration of the chain with the given identifier, -where `:id` stands for the identififer. - -**Example** - -``` -❯ curl -s -X GET 'http://127.0.0.1:3000/chain/ibc-0' | jq -``` - -```json -{ - "status": "success", - "result": { - "id": "ibc-0", - "rpc_addr": "http://127.0.0.1:26657/", - "websocket_addr": "ws://127.0.0.1:26657/websocket", - "grpc_addr": "http://127.0.0.1:9090/", - "rpc_timeout": "10s", - "account_prefix": "cosmos", - "key_name": "testkey", - "store_prefix": "ibc", - "max_gas": 900000000, - "gas_adjustment": null, - "max_msg_num": 60, - "max_tx_size": 2097152, - "clock_drift": "5s", - "trusting_period": "14days", - "trust_threshold": { - "numerator": "1", - "denominator": "3" - }, - "gas_price": { - "price": 0.001, - "denom": "stake" - }, - "packet_filter": { - "policy": "allowall" - } - } -} -``` - -### GET `/state` - -This endpoint returns the current state of the relayer, -namely which chains it is connected to, as well as a description -of all the workers which are currently active. - -``` -❯ curl -s -X GET 'http://127.0.0.1:3000/state' | jq -``` - -```json -{ - "status": "success", - "result": { - "chains": [ - "ibc-0", - "ibc-1" - ], - "workers": { - "Client": [ - { - "id": 3, - "object": { - "type": "Client", - "dst_chain_id": "ibc-1", - "dst_client_id": "07-tendermint-0", - "src_chain_id": "ibc-0" - } - }, - { - "id": 4, - "object": { - "type": "Client", - "dst_chain_id": "ibc-1", - "dst_client_id": "07-tendermint-1", - "src_chain_id": "ibc-0" - } - }, - { - "id": 1, - "object": { - "type": "Client", - "dst_chain_id": "ibc-0", - "dst_client_id": "07-tendermint-0", - "src_chain_id": "ibc-1" - } - }, - { - "id": 2, - "object": { - "type": "Client", - "dst_chain_id": "ibc-0", - "dst_client_id": "07-tendermint-1", - "src_chain_id": "ibc-1" - } - } - ] - } - } -} -``` diff --git a/guide/src/telemetry.md b/guide/src/telemetry.md deleted file mode 100644 index fcc7ca7deb..0000000000 --- a/guide/src/telemetry.md +++ /dev/null @@ -1,190 +0,0 @@ -# Telemetry - -*Since version 0.4.0.* - -To gain a better understanding of the status and activity of the relayer, -Hermes features a built-in telemetry service based on the [OpenTelemetry][opentelemetry] observability framework, -whose metrics can be exposed over HTTP for integration with the [Prometheus][prometheus] monitoring system. - -The official Hermes builds for Linux and macOS come with telemetry support since version 0.4.0, -and can be [downloaded directly from the GitHub Releases][gh-releases] page. - -[gh-releases]: https://github.com/informalsystems/ibc-rs/releases -[opentelemetry]: https://opentelemetry.io -[prometheus]: https://prometheus.io - -## Configuration - -The telemetry service is not active by default, and must be enabled in the relayer configuration: - -```toml -[telemetry] -enabled = true -host = '127.0.0.1' -port = 3001 -``` - -Please see the [relevant section in the *Configuration* page](./config.md#telemetry) for details about the configuration options. - -## Metrics - -The following table describes the metrics currently tracked by the telemetry service: - -| Name | Description | OpenTelemetry type | -| ---------------------------- | ---------------------------------------------------- | ------------------- | -| `workers` | Number of workers per object | `i64` UpDownCounter | -| `ibc_client_updates` | Number of client updates performed per client | `u64` Counter | -| `ibc_client_misbehaviours` | Number of misbehaviours detected per client | `u64` Counter | -| `ibc_receive_packets` | Number of receive packets relayed per channel | `u64` Counter | -| `ibc_acknowledgment_packets` | Number of acknowledgment packets relayed per channel | `u64` Counter | -| `ibc_timeout_packets` | Number of timeout packets relayed per channel | `u64` Counter | -| `wallet_balance` | How much balance (coins) there is left in each wallet key that Hermes is using. | `u64` ValueRecorder | -| `ws_events` | How many IBC events did Hermes receive via the websocket subscription, in total since starting up, per chain. | Counter | -| `ws_reconnect` | Number of times Hermes had to reconnect to the WebSocket endpoint | Counter | -| `tx_latency_submitted` | Latency for all transactions submitted to a chain (i.e., difference between the moment when Hermes received an event until the corresponding transaction(s) were submitted). | `u64` ValueRecorder | -| `tx_latency_confirmed` | Latency for all transactions confirmed by a chain (i.e., difference between the moment when Hermes received an event until the corresponding transaction(s) were confirmed). Requires `tx_confirmation = true`. | `u64` ValueRecorder | -| `msg_num` | How many messages Hermes submitted to a specific chain. | `u64` Counter | - -## Integration with Prometheus - -With the settings , the telemetry service will be enabled and will serve the metrics using -the Prometheus encoder over HTTP at [`http://localhost:3001/metrics`](http://localhost:3001/metrics). - -After starting Hermes with `hermes start`, and letting it run for a while to relay packets, -open [`http://localhost:3001/metrics`](http://localhost:3001/metrics) in a browser, you should -see Prometheus-encoded metrics. - -For example, with two channels and after transferring some tokens between the chains: - -```text -# HELP cache_hits Number of cache hits for queries emitted by the relayer, per chain and query type -# TYPE cache_hits counter -cache_hits{chain="ibc-0",query_type="query_channel"} 276 -cache_hits{chain="ibc-0",query_type="query_client_state"} 177 -cache_hits{chain="ibc-0",query_type="query_connection"} 160 -cache_hits{chain="ibc-1",query_type="query_channel"} 240 -cache_hits{chain="ibc-1",query_type="query_client_state"} 173 -cache_hits{chain="ibc-1",query_type="query_connection"} 160 -# HELP ibc_acknowledgment_packets Number of acknowledgment packets relayed per channel -# TYPE ibc_acknowledgment_packets counter -ibc_acknowledgment_packets{src_chain="ibc-0",src_channel="channel-0",src_port="transfer"} 0 -ibc_acknowledgment_packets{src_chain="ibc-0",src_channel="channel-1",src_port="transfer"} 42 -ibc_acknowledgment_packets{src_chain="ibc-1",src_channel="channel-0",src_port="transfer"} 110 -ibc_acknowledgment_packets{src_chain="ibc-1",src_channel="channel-1",src_port="transfer"} 0 -# HELP ibc_receive_packets Number of receive packets relayed per channel -# TYPE ibc_receive_packets counter -ibc_receive_packets{src_chain="ibc-0",src_channel="channel-0",src_port="transfer"} 110 -ibc_receive_packets{src_chain="ibc-0",src_channel="channel-1",src_port="transfer"} 0 -ibc_receive_packets{src_chain="ibc-1",src_channel="channel-0",src_port="transfer"} 0 -ibc_receive_packets{src_chain="ibc-1",src_channel="channel-1",src_port="transfer"} 42 -# HELP ibc_timeout_packets Number of timeout packets relayed per channel -# TYPE ibc_timeout_packets counter -ibc_timeout_packets{src_chain="ibc-0",src_channel="channel-0",src_port="transfer"} 0 -ibc_timeout_packets{src_chain="ibc-0",src_channel="channel-1",src_port="transfer"} 0 -ibc_timeout_packets{src_chain="ibc-1",src_channel="channel-0",src_port="transfer"} 0 -ibc_timeout_packets{src_chain="ibc-1",src_channel="channel-1",src_port="transfer"} 0 -# HELP msg_num How many messages Hermes submitted to the chain, per chain -# TYPE msg_num counter -msg_num{chain="ibc-0"} 168 -msg_num{chain="ibc-1"} 156 -# HELP queries Number of queries emitted by the relayer, per chain and query type -# TYPE queries counter -queries{chain="ibc-0",query_type="query_application_status"} 23 -queries{chain="ibc-0",query_type="query_channel"} 88 -queries{chain="ibc-0",query_type="query_client_connections"} 2 -queries{chain="ibc-0",query_type="query_client_state"} 383 -queries{chain="ibc-0",query_type="query_clients"} 1 -queries{chain="ibc-0",query_type="query_connection"} 2 -queries{chain="ibc-0",query_type="query_connection_channels"} 2 -queries{chain="ibc-0",query_type="query_consensus_state"} 392 -queries{chain="ibc-0",query_type="query_consensus_states"} 2 -queries{chain="ibc-0",query_type="query_latest_height"} 1 -queries{chain="ibc-0",query_type="query_packet_acknowledgements"} 5 -queries{chain="ibc-0",query_type="query_packet_commitments"} 10 -queries{chain="ibc-0",query_type="query_staking_params"} 2 -queries{chain="ibc-0",query_type="query_txs"} 76 -queries{chain="ibc-0",query_type="query_unreceived_acknowledgements"} 241 -queries{chain="ibc-0",query_type="query_unreceived_packets"} 127 -queries{chain="ibc-1",query_type="query_application_status"} 20 -queries{chain="ibc-1",query_type="query_channel"} 224 -queries{chain="ibc-1",query_type="query_client_connections"} 2 -queries{chain="ibc-1",query_type="query_client_state"} 387 -queries{chain="ibc-1",query_type="query_clients"} 1 -queries{chain="ibc-1",query_type="query_connection"} 2 -queries{chain="ibc-1",query_type="query_connection_channels"} 2 -queries{chain="ibc-1",query_type="query_consensus_state"} 394 -queries{chain="ibc-1",query_type="query_consensus_states"} 3 -queries{chain="ibc-1",query_type="query_latest_height"} 1 -queries{chain="ibc-1",query_type="query_packet_acknowledgements"} 5 -queries{chain="ibc-1",query_type="query_packet_commitments"} 10 -queries{chain="ibc-1",query_type="query_staking_params"} 2 -queries{chain="ibc-1",query_type="query_txs"} 56 -queries{chain="ibc-1",query_type="query_unreceived_acknowledgements"} 127 -queries{chain="ibc-1",query_type="query_unreceived_packets"} 292 -# HELP tx_latency_confirmed The latency for all transactions submitted to a specific chain, i.e. the difference between the moment when Hermes received a batch of events until the corresponding transaction(s) were confirmed. Milliseconds. -# TYPE tx_latency_confirmed histogram -tx_latency_confirmed_bucket{chain="ibc-0",channel="channel-0",counterparty="ibc-1",port="transfer",le="0.5"} 0 -tx_latency_confirmed_bucket{chain="ibc-0",channel="channel-0",counterparty="ibc-1",port="transfer",le="0.9"} 0 -tx_latency_confirmed_bucket{chain="ibc-0",channel="channel-0",counterparty="ibc-1",port="transfer",le="0.99"} 0 -tx_latency_confirmed_bucket{chain="ibc-0",channel="channel-0",counterparty="ibc-1",port="transfer",le="+Inf"} 4 -tx_latency_confirmed_sum{chain="ibc-0",channel="channel-0",counterparty="ibc-1",port="transfer"} 22466 -tx_latency_confirmed_count{chain="ibc-0",channel="channel-0",counterparty="ibc-1",port="transfer"} 4 -tx_latency_confirmed_bucket{chain="ibc-0",channel="channel-1",counterparty="ibc-1",port="transfer",le="0.5"} 0 -tx_latency_confirmed_bucket{chain="ibc-0",channel="channel-1",counterparty="ibc-1",port="transfer",le="0.9"} 0 -tx_latency_confirmed_bucket{chain="ibc-0",channel="channel-1",counterparty="ibc-1",port="transfer",le="0.99"} 0 -tx_latency_confirmed_bucket{chain="ibc-0",channel="channel-1",counterparty="ibc-1",port="transfer",le="+Inf"} 1 -tx_latency_confirmed_sum{chain="ibc-0",channel="channel-1",counterparty="ibc-1",port="transfer"} 4256 -tx_latency_confirmed_count{chain="ibc-0",channel="channel-1",counterparty="ibc-1",port="transfer"} 1 -tx_latency_confirmed_bucket{chain="ibc-1",channel="channel-0",counterparty="ibc-0",port="transfer",le="0.5"} 0 -tx_latency_confirmed_bucket{chain="ibc-1",channel="channel-0",counterparty="ibc-0",port="transfer",le="0.9"} 0 -tx_latency_confirmed_bucket{chain="ibc-1",channel="channel-0",counterparty="ibc-0",port="transfer",le="0.99"} 0 -tx_latency_confirmed_bucket{chain="ibc-1",channel="channel-0",counterparty="ibc-0",port="transfer",le="+Inf"} 2 -tx_latency_confirmed_sum{chain="ibc-1",channel="channel-0",counterparty="ibc-0",port="transfer"} 9408 -tx_latency_confirmed_count{chain="ibc-1",channel="channel-0",counterparty="ibc-0",port="transfer"} 2 -tx_latency_confirmed_bucket{chain="ibc-1",channel="channel-1",counterparty="ibc-0",port="transfer",le="0.5"} 0 -tx_latency_confirmed_bucket{chain="ibc-1",channel="channel-1",counterparty="ibc-0",port="transfer",le="0.9"} 0 -tx_latency_confirmed_bucket{chain="ibc-1",channel="channel-1",counterparty="ibc-0",port="transfer",le="0.99"} 0 -tx_latency_confirmed_bucket{chain="ibc-1",channel="channel-1",counterparty="ibc-0",port="transfer",le="+Inf"} 1 -tx_latency_confirmed_sum{chain="ibc-1",channel="channel-1",counterparty="ibc-0",port="transfer"} 3173 -tx_latency_confirmed_count{chain="ibc-1",channel="channel-1",counterparty="ibc-0",port="transfer"} 1 -# HELP tx_latency_submitted The latency for all transactions submitted to a specific chain, i.e. the difference between the moment when Hermes received a batch of events and when it submitted the corresponding transaction(s). Milliseconds. -# TYPE tx_latency_submitted histogram -tx_latency_submitted_bucket{chain="ibc-0",channel="channel-0",counterparty="ibc-1",port="transfer",le="0.5"} 0 -tx_latency_submitted_bucket{chain="ibc-0",channel="channel-0",counterparty="ibc-1",port="transfer",le="0.9"} 0 -tx_latency_submitted_bucket{chain="ibc-0",channel="channel-0",counterparty="ibc-1",port="transfer",le="0.99"} 0 -tx_latency_submitted_bucket{chain="ibc-0",channel="channel-0",counterparty="ibc-1",port="transfer",le="+Inf"} 5 -tx_latency_submitted_sum{chain="ibc-0",channel="channel-0",counterparty="ibc-1",port="transfer"} 14428 -tx_latency_submitted_count{chain="ibc-0",channel="channel-0",counterparty="ibc-1",port="transfer"} 5 -tx_latency_submitted_bucket{chain="ibc-0",channel="channel-1",counterparty="ibc-1",port="transfer",le="0.5"} 0 -tx_latency_submitted_bucket{chain="ibc-0",channel="channel-1",counterparty="ibc-1",port="transfer",le="0.9"} 0 -tx_latency_submitted_bucket{chain="ibc-0",channel="channel-1",counterparty="ibc-1",port="transfer",le="0.99"} 0 -tx_latency_submitted_bucket{chain="ibc-0",channel="channel-1",counterparty="ibc-1",port="transfer",le="+Inf"} 1 -tx_latency_submitted_sum{chain="ibc-0",channel="channel-1",counterparty="ibc-1",port="transfer"} 729 -tx_latency_submitted_count{chain="ibc-0",channel="channel-1",counterparty="ibc-1",port="transfer"} 1 -tx_latency_submitted_bucket{chain="ibc-1",channel="channel-0",counterparty="ibc-0",port="transfer",le="0.5"} 0 -tx_latency_submitted_bucket{chain="ibc-1",channel="channel-0",counterparty="ibc-0",port="transfer",le="0.9"} 0 -tx_latency_submitted_bucket{chain="ibc-1",channel="channel-0",counterparty="ibc-0",port="transfer",le="0.99"} 0 -tx_latency_submitted_bucket{chain="ibc-1",channel="channel-0",counterparty="ibc-0",port="transfer",le="+Inf"} 2 -tx_latency_submitted_sum{chain="ibc-1",channel="channel-0",counterparty="ibc-0",port="transfer"} 1706 -tx_latency_submitted_count{chain="ibc-1",channel="channel-0",counterparty="ibc-0",port="transfer"} 2 -tx_latency_submitted_bucket{chain="ibc-1",channel="channel-1",counterparty="ibc-0",port="transfer",le="0.5"} 0 -tx_latency_submitted_bucket{chain="ibc-1",channel="channel-1",counterparty="ibc-0",port="transfer",le="0.9"} 0 -tx_latency_submitted_bucket{chain="ibc-1",channel="channel-1",counterparty="ibc-0",port="transfer",le="0.99"} 0 -tx_latency_submitted_bucket{chain="ibc-1",channel="channel-1",counterparty="ibc-0",port="transfer",le="+Inf"} 1 -tx_latency_submitted_sum{chain="ibc-1",channel="channel-1",counterparty="ibc-0",port="transfer"} 791 -tx_latency_submitted_count{chain="ibc-1",channel="channel-1",counterparty="ibc-0",port="transfer"} 1 -# HELP wallet_balance The balance in each wallet that Hermes is using, per wallet, denom and chain -# TYPE wallet_balance gauge -wallet_balance{account="cosmos1934akx97773lsjjs9x74dr03uuam29hcc9grp3",chain="ibc-0",denom="stake"} 99999970473 -wallet_balance{account="cosmos1hngzqscyg476nd68qggxps8r2aq56lne45ps8n",chain="ibc-1",denom="stake"} 99999978431 -# HELP workers Number of workers per object -# TYPE workers gauge -workers{type="client"} 4 -workers{type="packet"} 4 -workers{type="wallet"} 2 -# HELP ws_events How many IBC events did Hermes receive via the WebSocket subscription, per chain -# TYPE ws_events counter -ws_events{chain="ibc-0"} 443 -ws_events{chain="ibc-1"} 370 -``` - diff --git a/guide/src/tutorials/index.md b/guide/src/tutorials/index.md deleted file mode 100644 index 8eee017999..0000000000 --- a/guide/src/tutorials/index.md +++ /dev/null @@ -1,9 +0,0 @@ -# Tutorials - -This section includes tutorials for some common relayer uses cases and commands. You can also refer to the [Commands Reference](../commands/index.md) section to learn more about individual commands. - -## Basic tutorials - -**[Two Local Chains](./local-chains/index.md)** - -In this tutorial you will learn how to start two local [`Cosmos Gaia`](https://github.com/cosmos/gaia) chains that support the `IBC` protocol and start relaying packets between them. diff --git a/guide/src/tutorials/local-chains/gaia.md b/guide/src/tutorials/local-chains/gaia.md deleted file mode 100644 index 8736d48af2..0000000000 --- a/guide/src/tutorials/local-chains/gaia.md +++ /dev/null @@ -1,42 +0,0 @@ -# Install Gaia - -The script to start the chains requires gaia to be installed. - -> __NOTE__: This assumes you have `Golang` programming language installed on -> your machine. If not, please ensure you install before proceeding. See -> more details in the [Pre-requisites](../../pre_requisites.md#2-golang) section. - -#### Clone gaia - -Clone the repository from Github: - -```shell -git clone https://github.com/cosmos/gaia.git ~/go/src/github.com/cosmos/gaia -``` - -#### Build and Install - -Run the `make` command to build and install `gaiad` - -```shell -cd ~/go/src/github.com/cosmos/gaia -git checkout v4.2.1 -make install -``` - -If the command above is successful you can run the following command to ensure it was properly installed: - -```shell -gaiad version --log_level error --long | head -n4 -``` -Output: -```shell -name: gaia -server_name: gaiad -version: v4.2.1 -commit: dbd8a6fb522c571debf958837f9113c56d418f6b -``` - -## Next Steps - -In the next section you will learn how to [start two local chains](./start.md) diff --git a/guide/src/tutorials/local-chains/identifiers.md b/guide/src/tutorials/local-chains/identifiers.md deleted file mode 100644 index b72682d7b1..0000000000 --- a/guide/src/tutorials/local-chains/identifiers.md +++ /dev/null @@ -1,130 +0,0 @@ -# Identifiers - - -A chain allocates identifiers when it creates clients, connections and channels. These identifiers can subsequently be used to refer to existing clients, connections and channels. - -> NOTE: If you want to ensure you get the same identifiers while following the tutorials, run the each of the three commands below __once__ on `ibc-1`. This will ensure that when going through the tutorial, a second channel on `ibc-1` with identifier `channel-1` will created. - -Chains allocate identifiers using a chain specific allocation scheme. Currently, *cosmos-sdk* implementation uses the follow identifiers: - -### 1. Client Identifiers - -__`07-tendermint-`__ for tendermint clients - -For example `07-tendermint-0` is assigned to the first client created on `ibc-1`: - - ```shell -hermes tx raw create-client ibc-1 ibc-0 - ``` - - ```json -Success: CreateClient( - CreateClient( - Attributes { - height: Height { - revision: 1, - height: 103, - }, - client_id: ClientId( - "07-tendermint-0", - ), - client_type: Tendermint, - consensus_height: Height { - revision: 0, - height: 112, - }, - }, - ), -) - ``` - -We will create a second client on `ibc-1` with identifier `07-tendermint-1` in the client tutorial. - -### 2. Connection Identifiers - -__`connection-`__ for connections - -For example `connection-0` is assigned to the first connection created on `ibc-1`: - -```shell -hermes tx raw conn-init ibc-1 ibc-0 07-tendermint-0 07-tendermint-0 -``` - -```json -Success: OpenInitConnection( - OpenInit( - Attributes { - height: Height { - revision: 1, - height: 119, - }, - connection_id: Some( - ConnectionId( - "connection-0", - ), - ), - client_id: ClientId( - "07-tendermint-0", - ), - counterparty_connection_id: None, - counterparty_client_id: ClientId( - "07-tendermint-0", - ), - }, - ), -) -``` -We will create a second connection on `ibc-1` with identifier `connection-1` in the connection tutorial. - -### 3. Channel Identifiers - -`channel-` for channels - -For example `channel-0` is assigned to the first channel created on `ibc-1`: - -```shell -hermes tx raw chan-open-init ibc-1 ibc-0 connection-0 transfer transfer -``` - -```json -Success: OpenInitChannel( - OpenInit( - Attributes { - height: Height { - revision: 1, - height: 225, - }, - port_id: PortId( - "transfer", - ), - channel_id: Some( - ChannelId( - "channel-0", - ), - ), - connection_id: ConnectionId( - "connection-0", - ), - counterparty_port_id: PortId( - "transfer", - ), - counterparty_channel_id: None, - }, - ), -) -``` - -In the following tutorials the __`ibc-0`__ and __`ibc-1`__ chains are setup and configured. - -For clarity, the tutorials run on a setup where the identifiers allocated to the client, connection and channel on __`ibc-0`__ are __`07-tendermint-0`__, __`connection-0`__ and __`channel-0`__ respectively. Identifiers allocated to the client, connection and channel on __`ibc-1`__ are __`07-tendermint-1`__, __`connection-1`__ and __`channel-1`__ respectively. - -Before going over the next sections, please ensure the commands above are executed. - -### Next Steps - -The following sections describe the commands to connect and relay packets between two chains. You can: - -1. use a [simplified approach](./relay-paths/index.md) for managing relaying paths, or -2. use [individual (raw) transactions](./raw/index.md) to create - all the necessary chain objects (clients, connections, channels) and relay packets. - diff --git a/guide/src/tutorials/local-chains/index.md b/guide/src/tutorials/local-chains/index.md deleted file mode 100644 index 4ffdfa025f..0000000000 --- a/guide/src/tutorials/local-chains/index.md +++ /dev/null @@ -1,7 +0,0 @@ -# Tutorial: Relayer with two local chains - -In this tutorial we will show how you can test the relayer against two chains, we provide a script that can start two separate chains and configure them automatically. This is the easiest way to get started. - -The script starts two [`gaia`](https://github.com/cosmos/gaia) chains that support the `IBC` protocol. - -Follow the steps in this tutorial section starting with the [Install Gaia](./gaia.md) section.# Local chains diff --git a/guide/src/tutorials/local-chains/raw/channel.md b/guide/src/tutorials/local-chains/raw/channel.md deleted file mode 100644 index 6902d98ae4..0000000000 --- a/guide/src/tutorials/local-chains/raw/channel.md +++ /dev/null @@ -1,46 +0,0 @@ -# 3. Channel Handshake - -## 3.1 `chan-open-init` - -Initialize a new unordered channel on `ibc-0`: -```shell -hermes tx raw chan-open-init ibc-0 ibc-1 connection-0 transfer transfer -o UNORDERED -``` - -## 3.2 `chan-open-try` - -Send a channel open try to `ibc-1`: -```shell -hermes tx raw chan-open-try ibc-1 ibc-0 connection-1 transfer transfer -s channel-0 -``` - -Take note of the ID allocated by the chain, e.g. `channel-1` on `ibc-1`. Use in the `chan-open-ack` CLI - -## 3.3 `chan-open-ack` - -Send a channel open acknowledgment to `ibc-0`: -```shell -hermes tx raw chan-open-ack ibc-0 ibc-1 connection-0 transfer transfer -d channel-0 -s channel-1 -``` - -## 3.4 `chan-open-confirm` - -Send the open confirmation to `ibc-1`: -```shell -hermes tx raw chan-open-confirm ibc-1 ibc-0 connection-1 transfer transfer -d channel-1 -s channel-0 -``` - -## 3.5 `query channel` -To verify that the two ends are in `Open` state: - -```shell -hermes query channel end ibc-0 transfer channel-0 -``` - -```shell -hermes query channel end ibc-1 transfer channel-1 -``` - -## Next Steps - -In the next section, we'll start to [relay packets](./packet.md) diff --git a/guide/src/tutorials/local-chains/raw/client.md b/guide/src/tutorials/local-chains/raw/client.md deleted file mode 100644 index b29ebe1e08..0000000000 --- a/guide/src/tutorials/local-chains/raw/client.md +++ /dev/null @@ -1,117 +0,0 @@ -# 1. Configuring clients - -### 1.1. `create client` - -First you will need to create a client for each chain: - -This command submits a transaction to a destination chain (`ibc-0`) with a request to create a client for a source chain (`ibc-1`): - -```shell -hermes tx raw create-client ibc-0 ibc-1 -``` - -if the command is successful a message similar to the one below will be displayed `status:success`: - -```json -{ - Success: CreateClient( - CreateClient( - Attributes { - height: Height { revision: 0, height: 43 }, - client_id: ClientId( - "07-tendermint-0", - ), - client_type: Tendermint, - consensus_height: Height { revision: 1, height: 32 }, - }, - ), - ) -} -``` - -> Please note the `client_id` value returned. You will need that for other commands. - -You can also execute a __query__ to view the client state on destination chain `ibc-0` by specifying the `client_id` value `07-tendermint-0`: - -```shell -hermes query client state ibc-0 07-tendermint-0 -``` - -which show a message similar to the one below: - -```json -Success: ClientState { - chain_id: ChainId { - id: "ibc-1", - version: 1, - }, - trust_level: TrustThresholdFraction { - numerator: 1, - denominator: 3, - }, - trusting_period: 1209600s, - unbonding_period: 1814400s, - max_clock_drift: 3s, - frozen_height: Height { - revision: 0, - height: 0, - }, - latest_height: Height { - revision: 1, - height: 38, - }, - upgrade_path: [ - "upgrade", - "upgradedIBCState", - ], - allow_update_after_expiry: true, - allow_update_after_misbehaviour: true, -} -``` - -Now let's do the same for `ibc-1` as the destination chain: - -```shell -hermes tx raw create-client ibc-1 ibc-0 -``` - -Take note of the `client_id` allocated for this client. In the examples we assume is `07-tendermint-1` (this client identity is obtained by creating two clients on ibc-1 for ibc-0). - -As before, if the command is successful a message with `status:success` is displayed: - -```json -Success: CreateClient( - CreateClient( - Attributes { - height: Height { - revision: 1, - height: 135, - }, - client_id: ClientId( - "07-tendermint-1", - ), - client_type: Tendermint, - consensus_height: Height { - revision: 0, - height: 145, - }, - }, - ), -) -``` - -### 1.2 `update-client` - -Client states can be updated by sending an `update-client` transaction: - -```shell -hermes tx raw update-client ibc-0 07-tendermint-0 -``` - -```shell -hermes tx raw update-client ibc-1 07-tendermint-1 -``` - -## Next Steps - -In the next section, we'll establish the [Connection Handshake](./connection.md) diff --git a/guide/src/tutorials/local-chains/raw/connection.md b/guide/src/tutorials/local-chains/raw/connection.md deleted file mode 100644 index f93c35e074..0000000000 --- a/guide/src/tutorials/local-chains/raw/connection.md +++ /dev/null @@ -1,50 +0,0 @@ -# 2. Connection Handshake - -## 2.1 `conn-init` - -Initialize a new connection on `ibc-0`: -```shell -hermes tx raw conn-init ibc-0 ibc-1 07-tendermint-0 07-tendermint-1 -``` - -Take note of the ID allocated by the chain, e.g. `connection-0` on `ibc-0` in order to use it in the `conn-try` command below. - -## 2.2 `conn-try` - -Send a connection try to `ibc-1`: -```shell -hermes tx raw conn-try ibc-1 ibc-0 07-tendermint-1 07-tendermint-0 -s connection-0 -``` - -Take note of the ID allocated by the chain, e.g. `connection-1` on `ibc-1`. Use in the `conn-ack` CLI - -## 2.3 `conn-ack` - -Send a connection open acknowledgment to `ibc-0`: -```shell -hermes tx raw conn-ack ibc-0 ibc-1 07-tendermint-0 07-tendermint-1 -d connection-0 -s connection-1 -``` - -## 2.4 `conn-confirm` - -Send the open confirmation to `ibc-1`: -```shell -hermes tx raw conn-confirm ibc-1 ibc-0 07-tendermint-1 07-tendermint-0 -d connection-1 -s connection-0 -``` - -## 2.5 `query connection` - -To verify that the two ends are in `Open` state: - -```shell -hermes query connection end ibc-0 connection-0 -``` - -```shell -hermes query connection end ibc-1 connection-1 -``` - - -## Next Steps - -In the next section, we'll [establish a new channel](./channel.md) diff --git a/guide/src/tutorials/local-chains/raw/index.md b/guide/src/tutorials/local-chains/raw/index.md deleted file mode 100644 index bc0fbeebaa..0000000000 --- a/guide/src/tutorials/local-chains/raw/index.md +++ /dev/null @@ -1,12 +0,0 @@ -# Connecting the chains - -In the rest of this section we will show how to create the clients, establish a connection and a channel between the two chains, and relay packets over the channel. But first, make sure you followed the steps in the [start the local chains](../start.md) and [Identifiers section](../identifiers.md) - -## Steps to start relaying packets between the two local chains - -In order to start relaying packets please follow the steps below: - -* [Configure Clients](./client.md) -* [Connection Handshake](./connection.md) -* [Open the Channel](./channel.md) -* [Relay Packets](./packet.md) diff --git a/guide/src/tutorials/local-chains/raw/packet.md b/guide/src/tutorials/local-chains/raw/packet.md deleted file mode 100644 index 426b374d50..0000000000 --- a/guide/src/tutorials/local-chains/raw/packet.md +++ /dev/null @@ -1,92 +0,0 @@ -# 4. Relay Packets - -### 4.1 Query balances - -- balance at ibc-0 - - ```shell - gaiad --node tcp://localhost:26657 query bank balances $(gaiad --home data/ibc-0 keys --keyring-backend="test" show user -a) - ``` - -- balance at ibc-1 - - ```shell - gaiad --node tcp://localhost:26557 query bank balances $(gaiad --home data/ibc-1 keys --keyring-backend="test" show user -a) - ``` - -> Note that the addresses used in the two commands above are configured in `dev-env`. - -### 4.2 Packet relaying - -First, we'll send `9999` `samoleans` from `ibc-0` to `ibc-1`. - -- start the transfer of 9999 samoleans from `ibc-0` to `ibc-1`. This sends a `MsgTransfer` in a transaction to `ibc-0` - - ```shell - hermes tx raw ft-transfer ibc-1 ibc-0 transfer channel-0 9999 -o 1000 -n 1 -d samoleans - ``` - -- query packet commitments on `ibc-0` - - ```shell - hermes query packet commitments ibc-0 transfer channel-0 - ``` - -- query unreceived packets on `ibc-1` - - ```shell - hermes query packet unreceived-packets ibc-1 transfer channel-1 - ``` - -- send `recv_packet` to `ibc-1` - - ```shell - hermes tx raw packet-recv ibc-1 ibc-0 transfer channel-0 - ``` - -- query unreceived acks on `ibc-0` - - ```shell - hermes query packet unreceived-acks ibc-0 transfer channel-0 - ``` - -- send acknowledgement to `ibc-0` - - ```shell - hermes tx raw packet-ack ibc-0 ibc-1 transfer channel-1 - ``` - -Send those samoleans back, from `ibc-1` to `ibc-0`. - -```shell -hermes tx raw ft-transfer ibc-0 ibc-1 transfer channel-1 9999 -o 1000 -n 1 -d ibc/49D321B40FCF56B0370E5673CF090389C8E9CD185209FBE1BEE5D94E58E69BDC -hermes tx raw packet-recv ibc-0 ibc-1 transfer channel-1 -hermes tx raw packet-ack ibc-1 ibc-0 transfer channel-0 -``` - -The `ibc/49D321B40FCF56B0370E5673CF090389C8E9CD185209FBE1BEE5D94E58E69BDC` denominator above can be obtained by querying the balance at `ibc-1` after the transfer from `ibc-0` to `ibc-1` is concluded. - -Next we will test the packet timeouts. -- send 1 packet with low timeout height offset to ibc-0 - - ```shell - hermes tx raw ft-transfer ibc-1 ibc-0 transfer channel-0 9999 -o 2 -n 1 - ``` - -- send timeout to `ibc-0` - - ```shell - hermes tx raw packet-recv ibc-1 ibc-0 transfer channel-0 - ``` - -- send 1 packet with 2 second timeout to ibc-0 - - ```shell - hermes tx raw ft-transfer ibc-1 ibc-0 transfer channel-0 9999 -t 2 -n 1 - ``` - -- send timeout to `ibc-0` - - ```shell - hermes tx raw packet-recv ibc-1 ibc-0 transfer channel-0 - ``` \ No newline at end of file diff --git a/guide/src/tutorials/local-chains/relay-paths/create-new-path.md b/guide/src/tutorials/local-chains/relay-paths/create-new-path.md deleted file mode 100644 index 1ddc70699e..0000000000 --- a/guide/src/tutorials/local-chains/relay-paths/create-new-path.md +++ /dev/null @@ -1,65 +0,0 @@ -# Create a new path - -Perform client creation, connection and channel handshake to establish a new path between the `transfer` ports on `ibc-0` and `ibc-1` chains. - -```shell -hermes create channel ibc-0 -c ibc-1 --port-a transfer --port-b transfer --new-client-connection -``` - -If all the handshakes are performed successfully you should see a message similar to the one below: - -```json -Success: Channel { - ordering: Unordered, - a_side: ChannelSide { - chain: ProdChainHandle { - chain_id: ChainId { - id: "ibc-0", - version: 0, - }, - runtime_sender: Sender { .. }, - }, - client_id: ClientId( - "07-tendermint-0", - ), - connection_id: ConnectionId( - "connection-0", - ), - port_id: PortId( - "transfer", - ), - channel_id: ChannelId( - "channel-0", - ), - }, - b_side: ChannelSide { - chain: ProdChainHandle { - chain_id: ChainId { - id: "ibc-1", - version: 1, - }, - runtime_sender: Sender { .. }, - }, - client_id: ClientId( - "07-tendermint-1", - ), - connection_id: ConnectionId( - "connection-1", - ), - port_id: PortId( - "transfer", - ), - channel_id: ChannelId( - "channel-1", - ), - }, - connection_delay: 0s, - version: Some( - "ics20-1", - ), -} - -``` - -Note that for each side, *a_side* (__ibc-0__) and *b_side* (__ibc-1__) there are a __client_id__, __connection_id__, __channel_id__ and __port_id__. -With all these established, you have [a path that you can relay packets over](./multiple-paths.md). diff --git a/guide/src/tutorials/local-chains/relay-paths/index.md b/guide/src/tutorials/local-chains/relay-paths/index.md deleted file mode 100644 index bcd7dfa3aa..0000000000 --- a/guide/src/tutorials/local-chains/relay-paths/index.md +++ /dev/null @@ -1,12 +0,0 @@ - -# Connect the chains using relay paths - -A relay path refers to a specific channel used to interconnect two chains and over which packets are being sent. - -Hermes can be started to listen for packet events on the two ends of multiple paths and relay packets over these paths. -This can be done over a new path or over existing paths. - -- [Create a new path](./create-new-path.md) -- [Packet relaying on multiple paths](./multiple-paths.md) - -Before proceeding to the sections above, please first, make sure you followed the steps in the [Identifiers section](../identifiers.md) diff --git a/guide/src/tutorials/local-chains/relay-paths/multiple-paths.md b/guide/src/tutorials/local-chains/relay-paths/multiple-paths.md deleted file mode 100644 index e314197603..0000000000 --- a/guide/src/tutorials/local-chains/relay-paths/multiple-paths.md +++ /dev/null @@ -1,328 +0,0 @@ -# Relay packets on multiple paths - -Hermes can relay packets over all current or future paths between the configured set of chains. - -Follow the steps below to connect three chains together and relay packets between them: - -1. Paste the following configuration in the standard Hermes configuration file at `~/.hermes/config.toml`: - - ```toml - [global] - log_level = 'info' - - [mode] - - [mode.clients] - enabled = true - refresh = true - misbehaviour = true - - [mode.connections] - enabled = false - - [mode.channels] - enabled = false - - [mode.packets] - enabled = true - clear_interval = 100 - clear_on_start = true - tx_confirmation = true - - [[chains]] - id = 'ibc-0' - rpc_addr = 'http://127.0.0.1:26657' - grpc_addr = 'http://127.0.0.1:9090' - websocket_addr = 'ws://127.0.0.1:26657/websocket' - rpc_timeout = '10s' - account_prefix = 'cosmos' - key_name = 'testkey' - store_prefix = 'ibc' - max_gas = 2000000 - gas_price = { price = 0.001, denom = 'stake' } - gas_adjustment = 0.1 - clock_drift = '5s' - trusting_period = '14days' - trust_threshold = { numerator = '1', denominator = '3' } - - [[chains]] - id = 'ibc-1' - rpc_addr = 'http://127.0.0.1:26557' - grpc_addr = 'http://127.0.0.1:9091' - websocket_addr = 'ws://127.0.0.1:26557/websocket' - rpc_timeout = '10s' - account_prefix = 'cosmos' - key_name = 'testkey' - store_prefix = 'ibc' - max_gas = 2000000 - gas_price = { price = 0.001, denom = 'stake' } - gas_adjustment = 0.1 - clock_drift = '5s' - trusting_period = '14days' - trust_threshold = { numerator = '1', denominator = '3' } - - [[chains]] - id = 'ibc-2' - rpc_addr = 'http://127.0.0.1:26457' - grpc_addr = 'http://127.0.0.1:9092' - websocket_addr = 'ws://127.0.0.1:26457/websocket' - rpc_timeout = '10s' - account_prefix = 'cosmos' - key_name = 'testkey' - store_prefix = 'ibc' - max_gas = 2000000 - gas_price = { price = 0.001, denom = 'stake' } - gas_adjustment = 0.1 - clock_drift = '5s' - trusting_period = '14days' - trust_threshold = { numerator = '1', denominator = '3' } - ``` - - This configuration has three chains `ibc-0`, `ibc-1` and `ibc-2`. - -2. Run the `dev-env` script with the parameters below to start three chains: - - ```bash - ./scripts/dev-env ~/.hermes/config.toml ibc-0 ibc-1 ibc-2 - ``` - - > __NOTE__: The script will prompt you to delete the data folder, double check the path and - > if it points to the `data` directory in the current directory, answer __'yes'__. - - The script configures and starts three __`gaiad`__ instances, named __`ibc-0`__, and __`ibc-1`__, and __`ibc-2`__. - - -3. Create a channel between `ibc-0` and `ibc-1`. Since this is the first time - we're connecting these two chains, we'll need to spin up a client and a - connection between them as well. The `create channel` command gives us the - convenient option to create a client and a connection. Keep in mind that this - is not the default behavior of `create channel`, but in this case we're - making an exception. Execute the following command: - - ```shell - hermes create channel ibc-0 --chain-b ibc-1 --port-a transfer --port-b transfer --new-client-connection - ``` - - Then respond 'yes' to the prompt that pops up. Once the command has run to - completion, you should see the following among the output logs: - - ```json - (...) - - Success: Channel { - ordering: Unordered, - a_side: ChannelSide { - chain: ProdChainHandle { - chain_id: ChainId { - id: "ibc-0", - version: 0, - }, - runtime_sender: Sender { .. }, - }, - client_id: ClientId( - "07-tendermint-0", - ), - connection_id: ConnectionId( - "connection-0", - ), - port_id: PortId( - "transfer", - ), - channel_id: ChannelId( - "channel-0", - ), - }, - b_side: ChannelSide { - chain: ProdChainHandle { - chain_id: ChainId { - id: "ibc-1", - version: 1, - }, - runtime_sender: Sender { .. }, - }, - client_id: ClientId( - "07-tendermint-0", - ), - connection_id: ConnectionId( - "connection-0", - ), - port_id: PortId( - "transfer", - ), - channel_id: ChannelId( - "channel-0", - ), - }, - connection_delay: 0s, - version: Some( - "ics20-1", - ), - } - ``` - - Note that the channel identifier on both `ibc-0` and `ibc-1` is `channel-0`. - -4. Create a channel between `ibc-1` and `ibc-2` using the structure of the - previous invocation we used to create a channel between `ibc-0` and `ibc-1`: - - ```shell - hermes create channel ibc-1 --chain-b ibc-2 --port-a transfer --port-b transfer --new-client-connection - ``` - - ```json - (...) - - Success: Channel { - ordering: Unordered, - a_side: ChannelSide { - chain: ProdChainHandle { - chain_id: ChainId { - id: "ibc-1", - version: 1, - }, - runtime_sender: Sender { .. }, - }, - client_id: ClientId( - "07-tendermint-1", - ), - connection_id: ConnectionId( - "connection-1", - ), - port_id: PortId( - "transfer", - ), - channel_id: ChannelId( - "channel-1", - ), - }, - b_side: ChannelSide { - chain: ProdChainHandle { - chain_id: ChainId { - id: "ibc-2", - version: 2, - }, - runtime_sender: Sender { .. }, - }, - client_id: ClientId( - "07-tendermint-0", - ), - connection_id: ConnectionId( - "connection-0", - ), - port_id: PortId( - "transfer", - ), - channel_id: ChannelId( - "channel-0", - ), - }, - connection_delay: 0s, - version: Some( - "ics20-1", - ), - } - ``` - - Note that the channel identifier on `ibc-1` is `channel-1`, and on `ibc-2` it is `channel-0`. - -5. Start Hermes using the `start` command: - - ```shell - hermes start - ``` - - Hermes will first relay the pending packets that have not been relayed and then - start passive relaying by listening to and acting on packet events. - -6. In a separate terminal, use the `ft-transfer` command to send: - - - Two packets from `ibc-0` to `ibc-1` from source channel `channel-0` - - ```shell - hermes tx raw ft-transfer ibc-1 ibc-0 transfer channel-0 9999 -o 1000 -n 2 - ``` - - ```json - Success: [ - SendPacket( - SendPacket { - height: revision: 0, height: 3056, - packet: PortId("transfer") ChannelId("channel-0") Sequence(3), - }, - ), - SendPacket( - SendPacket { - height: revision: 0, height: 3056, - packet: PortId("transfer") ChannelId("channel-0") Sequence(4), - }, - ), - ] - ``` - - - Two packets from `ibc-1` to `ibc-2` from source channel `channel-1` - - ```shell - hermes tx raw ft-transfer ibc-2 ibc-1 transfer channel-1 9999 -o 1000 -n 2 - ``` - - ```json - Success: [ - SendPacket( - SendPacket { - height: revision: 1, height: 3076, - packet: PortId("transfer") ChannelId("channel-1") Sequence(3), - }, - ), - SendPacket( - SendPacket { - height: revision: 1, height: 3076, - packet: PortId("transfer") ChannelId("channel-1") Sequence(4), - }, - ), - ] - ``` - -7. Observe the output on the relayer terminal, verify that the send events are processed, and that the `recv_packets` are sent out. - - ```text - (...) - - INFO ibc_relayer::link: [ibc-0 -> ibc-1] result events: - UpdateClientEv(ev_h:1-3048, 07-tendermint-0(0-3057), ) - WriteAcknowledgementEv(h:1-3048, seq:3, path:channel-0/transfer->channel-0/transfer, toh:1-4045, tos:0)) - WriteAcknowledgementEv(h:1-3048, seq:4, path:channel-0/transfer->channel-0/transfer, toh:1-4045, tos:0)) - INFO ibc_relayer::link: [ibc-0 -> ibc-1] success - - (...) - - INFO ibc_relayer::link: [ibc-1 -> ibc-0] clearing old packets - INFO ibc_relayer::link: [ibc-1 -> ibc-0] received from query_txs [] - INFO ibc_relayer::link: [ibc-1 -> ibc-0] finished clearing pending packets - INFO ibc_relayer::link: [ibc-1 -> ibc-0] generate messages from batch with 2 events - INFO ibc_relayer::link: [ibc-1 -> ibc-0] scheduling op. data with 2 msg(s) for Destination chain (height 1-3049) - INFO ibc_relayer::link: [ibc-1 -> ibc-0] relay op. data to Destination, proofs height 1-3048, (delayed by: 2.154603ms) [try 1/10] - INFO ibc_relayer::link: [ibc-1 -> ibc-0] prepending Destination client update @ height 1-3049 - INFO ibc_relayer::link: [ibc-1 -> ibc-0] assembled batch of 3 message(s) - INFO ibc_relayer::link: [ibc-1 -> ibc-0] result events: - UpdateClientEv(ev_h:0-3059, 07-tendermint-0(1-3049), ) - AcknowledgePacketEv(h:0-3059, seq:3, path:channel-0/transfer->channel-0/transfer, toh:1-4045, tos:0)) - AcknowledgePacketEv(h:0-3059, seq:4, path:channel-0/transfer->channel-0/transfer, toh:1-4045, tos:0)) - INFO ibc_relayer::link: [ibc-1 -> ibc-0] success - - (...) - ``` - -8. Query the unreceived packets and acknowledgments on `ibc-1` and `ibc-2` from a different terminal: - - ```shell - hermes query packet unreceived-packets ibc-1 transfer channel-0 - hermes query packet unreceived-acks ibc-0 transfer channel-0 - hermes query packet unreceived-packets ibc-2 transfer channel-0 - hermes query packet unreceived-acks ibc-1 transfer channel-1 - ``` - - If everything went well, each of these commands should result in: - - ``` - Success: [] - ``` diff --git a/guide/src/tutorials/local-chains/start.md b/guide/src/tutorials/local-chains/start.md deleted file mode 100644 index 791d977fb3..0000000000 --- a/guide/src/tutorials/local-chains/start.md +++ /dev/null @@ -1,222 +0,0 @@ -# Start the local chains - -In this chapter, you will learn how to spawn two Gaia chains, and use Hermes to relay packets between them. -To spawn the chains and configure Hermes accordingly, we will make use of script bundled in the `ibc-rs` repository. - -To this end, clone the `ibc-rs` repository and check out the current version: - -```bash -git clone git@github.com:informalsystems/ibc-rs.git -cd ibc-rs -git checkout v0.15.0 -``` - -### Stop existing `gaiad` processes - -If this is not the first time you are running the script, you can manually stop the two gaia instances executing the following command to kill all `gaiad` processes: - -```shell -killall gaiad -``` - -> __NOTE__: If you have any `Docker` containers running that might be using the same ports as `gaiad` (e.g. port 26657 or port 9090), please ensure you stop them first before proceeding to the next step. - -### Configuration file - -In order to run the script, you will need a `TOML` configuration file to be passed as a parameter. Please check the [`Configuration`](../../config.md) section for more information about the relayer configuration file. - -The following configuration file in the `ibc-rs` repository folder can be used for running the local chains: - -__config.toml__ - -```toml -{{#include ../../../../config.toml}} -``` - -#### Saving the configuration file - -##### Create the config.toml file - -```shell -mkdir -p $HOME/.hermes && touch $HOME/.hermes/config.toml -``` - -##### Add content to the configuration file: - -You can use your preferred text editor. If using `vi` you can run: - -```shell -vi ~/.hermes/config.toml -``` - -Then just __`copy`__ the content for `config.toml` above and __`paste`__ into this file. - -### Generating private keys -Next, we will need to associate a private key with chains `ibc-0` and `ibc-1` which `hermes` will use to sign transactions. There -are two steps involved. For each chain, -1. Generate a *Key Seed file* using `gaiad` -2. Use the *Key Seed file* to associate the corresponding private key with the chain - -In this tutorial, we will only generate a single *Key Seed file*, which we will use with both chains. - -#### Generate a Key Seed file -We will generate the Key Seed file for a key that we will call `testkey`. - -```shell -gaiad keys add testkey --output json -``` -This will generate an output similar to the one below (albeit all on the same line): -```json -{ - "name": "testkey", - "type": "local", - "address": "cosmos1tc3vcuxyyac0dmayf887t95tdg7qpyql48w7gj", - "pubkey": "cosmospub1addwnpepqgg7ng4ycm60pdxfzdfh4hjvkwcr3da59mr8k883vsstx60ruv7kur4525u", - "mnemonic": "[24 words mnemonic]" -} -``` - -Next, copy and paste the output to a new file called `key_seed.json`. This file contains all the information necessary for `hermes` to -derive a private key from. - -#### Associate a private key with each chain -Our config file specifies two chains: `ibc-0` and `ibc-1`. We will need to specify a private key that `hermes` will use for each chain. As -previously mentioned, in this tutorial we will use the same private key for both chains. - -```shell -hermes keys add ibc-0 -f key_seed.json -hermes keys add ibc-1 -f key_seed.json -``` -If successful, both commands should show an output similar to: - -``` -Success: Added key testkey ([ADDRESS]) on [CHAIN ID] chain -``` - -And that's it! `hermes` will now be able to sign transactions to be sent to both chains. `key_seed.json` can safely be disposed of. - -### Running the script to start the chains - -From the `ibc-rs` repository folder run the following script with the parameters below to start the chains (`ibc-0` and `ibc-1`) -and import the signing keys into the keyring: - -```bash -./scripts/dev-env ~/.hermes/config.toml ibc-0 ibc-1 -``` - -> __NOTE__: If the script above prompts you to delete the data folder just answer __'yes'__ - -The script configures and starts two __`gaiad`__ instances, one named __`ibc-0`__ and the other __`ibc-1`__ - -```mermaid -graph TD - A[dev-env] -->|run| C(start chains) - C -->|gaiad| D[ibc-0] - C -->|gaiad| F[ibc-1] -``` - -If the script runs successfully you should see a message similar to the one below in the terminal: - -```shell -GAIA VERSION INFO: v4.2.1 -Generating gaia configurations... -Creating gaiad instance: home=./data | chain-id=ibc-0 | p2p=:26656 | rpc=:26657 | profiling=:6060 | grpc=:9090 | samoleans=:100000000000 -Change settings in config.toml file... -Start gaia on grpc port: 9090... -Balances for validator 'cosmos15cugtww7rwmayvshfznuxam55jsv23xh3jdeqv' @ 'tcp://localhost:26657' -balances: -- amount: "0" - denom: stake -pagination: - next_key: null - total: "0" -Balances for user 'cosmos1usn8g2rj9q48y245pql9589zf9m8srcpxtzklg' @ 'tcp://localhost:26657' -balances: -- amount: "100000000000" - denom: samoleans -- amount: "100000000000" - denom: stake -pagination: - next_key: null - total: "0" -Creating gaiad instance: home=./data | chain-id=ibc-1 | p2p=:26556 | rpc=:26557 | profiling=:6061 | grpc=:9091 | samoleans=:100000000000 -Change settings in config.toml file... -Start gaia on grpc port: 9091... -Balances for validator 'cosmos1zdmr04w7c04ef4vkuur9c0vyvl78q45qjncmja' @ 'tcp://localhost:26557' -balances: -- amount: "0" - denom: stake -pagination: - next_key: null - total: "0" -Balances for user 'cosmos12p6k2dta0lsd6n80tpz34yepfpv7u7fvedm5mp' @ 'tcp://localhost:26557' -balances: -- amount: "100000000000" - denom: samoleans -- amount: "100000000000" - denom: stake -pagination: - next_key: null - total: "0" -ibc-0 initialized. Watch file /Users/ancaz/rust/ibc-rs/data/ibc-0.log to see its execution. -ibc-1 initialized. Watch file /Users/ancaz/rust/ibc-rs/data/ibc-1.log to see its execution. -Building the Rust relayer... -Importing keys... -Success: Added key 'testkey' (cosmos1usn8g2rj9q48y245pql9589zf9m8srcpxtzklg) on chain ibc-0 -Success: Added key 'testkey' (cosmos12p6k2dta0lsd6n80tpz34yepfpv7u7fvedm5mp) on chain ibc-1 -Done! -``` - -### Data directory -The script creates a __`data`__ directory in the current directory in order. The __`data`__ directory contains the chain stores and configuration files. - -The __`data`__ directory has a tree structure similar to the one below: - -```shell -data -├── ibc-0 -│   ├── config -│   ├── data -│   ├── keyring-test -│   ├── user_seed.json -│   ├── user2_seed.json -│   └── validator_seed.json -├── ibc-0.log -├── ibc-1 -│   ├── config -│   ├── data -│   ├── keyring-test -│   ├── user_seed.json -│   ├── user2_seed.json -│   └── validator_seed.json -└── ibc-1.log - -``` - -> __Tip__: You can use the command `tree ./data/ -L 2` to view the folder structure above: - -### $HOME/.hermes directory - -By the default `hermes` expects the configuration file to be in the __`$HOME/.hermes`__ folder. - -It also stores the private keys for each chain in this folder as outlined in the [Keys](../../commands/keys/index.md) section. - -After executing the __`dev-env`__ script, this is how the folder should look like: - -```shell -$HOME/.hermes/ -├── config.toml -└── keys - ├── ibc-0 - │   └── keyring-test - │   └── testkey.json - └── ibc-1 - └── keyring-test - └── testkey.json -``` - -#### Next Steps - -[The next section](./identifiers.md) describes how identifers for clients, connections and channels -are allocated, and will walk you through how to pre-allocate some identifers -to help matching them with their corresponding chains for the purpose of this tutorial. diff --git a/guide/theme/css/chrome.css b/guide/theme/css/chrome.css deleted file mode 100644 index 825b2bd029..0000000000 --- a/guide/theme/css/chrome.css +++ /dev/null @@ -1,495 +0,0 @@ -/* CSS for UI elements (a.k.a. chrome) */ - -@import 'variables.css'; - -::-webkit-scrollbar { - background: var(--bg); -} -::-webkit-scrollbar-thumb { - background: var(--scrollbar); -} -html { - scrollbar-color: var(--scrollbar) var(--bg); -} -#searchresults a, -.content a:link, -a:visited, -a > .hljs { - color: var(--links); -} - -/* Menu Bar */ - -#menu-bar, -#menu-bar-hover-placeholder { - z-index: 101; - margin: auto calc(0px - var(--page-padding)); -} -#menu-bar { - position: relative; - display: flex; - flex-wrap: wrap; - background-color: var(--bg); - border-bottom-color: var(--bg); - border-bottom-width: 1px; - border-bottom-style: solid; -} -#menu-bar.sticky, -.js #menu-bar-hover-placeholder:hover + #menu-bar, -.js #menu-bar:hover, -.js.sidebar-visible #menu-bar { - position: -webkit-sticky; - position: sticky; - top: 0 !important; -} -#menu-bar-hover-placeholder { - position: sticky; - position: -webkit-sticky; - top: 0; - height: var(--menu-bar-height); -} -#menu-bar.bordered { - border-bottom-color: var(--table-border-color); -} -#menu-bar i, #menu-bar .icon-button { - position: relative; - padding: 0 8px; - z-index: 10; - line-height: var(--menu-bar-height); - cursor: pointer; - transition: color 0.5s; -} -@media only screen and (max-width: 420px) { - #menu-bar i, #menu-bar .icon-button { - padding: 0 5px; - } -} - -.icon-button { - border: none; - background: none; - padding: 0; - color: inherit; -} -.icon-button i { - margin: 0; -} - -.right-buttons { - margin: 0 15px; -} -.right-buttons a { - text-decoration: none; -} - -.left-buttons { - display: flex; - margin: 0 5px; -} -.no-js .left-buttons { - display: none; -} - -.menu-title { - display: inline-block; - font-weight: 200; - font-size: 2rem; - line-height: var(--menu-bar-height); - text-align: center; - margin: 0; - flex: 1; - white-space: nowrap; - overflow: hidden; - text-overflow: ellipsis; -} -.js .menu-title { - cursor: pointer; -} - -.menu-bar, -.menu-bar:visited, -.nav-chapters, -.nav-chapters:visited, -.mobile-nav-chapters, -.mobile-nav-chapters:visited, -.menu-bar .icon-button, -.menu-bar a i { - color: var(--icons); -} - -.menu-bar i:hover, -.menu-bar .icon-button:hover, -.nav-chapters:hover, -.mobile-nav-chapters i:hover { - color: var(--icons-hover); -} - -/* Nav Icons */ - -.nav-chapters { - font-size: 2.5em; - text-align: center; - text-decoration: none; - - position: fixed; - top: 0; - bottom: 0; - margin: 0; - max-width: 150px; - min-width: 90px; - - display: flex; - justify-content: center; - align-content: center; - flex-direction: column; - - transition: color 0.5s, background-color 0.5s; -} - -.nav-chapters:hover { - text-decoration: none; - background-color: var(--theme-hover); - transition: background-color 0.15s, color 0.15s; -} - -.nav-wrapper { - margin-top: 50px; - display: none; -} - -.mobile-nav-chapters { - font-size: 2.5em; - text-align: center; - text-decoration: none; - width: 90px; - border-radius: 5px; - background-color: var(--nav-chapter); -} - -.previous { - float: left; -} - -.next { - float: right; - right: var(--page-padding); -} - -@media only screen and (max-width: 1080px) { - .nav-wide-wrapper { display: none; } - .nav-wrapper { display: block; } -} - -@media only screen and (max-width: 1380px) { - .sidebar-visible .nav-wide-wrapper { display: none; } - .sidebar-visible .nav-wrapper { display: block; } -} - -/* Inline code */ - -:not(pre) > .hljs { - display: inline; - padding: 0.1em 0.3em; - border-radius: 3px; -} - -:not(pre):not(a) > .hljs { - color: var(--inline-code-color); - overflow-x: initial; -} - -a:hover > .hljs { - text-decoration: underline; -} - -pre { - position: relative; -} -pre > .buttons { - position: absolute; - z-index: 100; - right: 5px; - top: 5px; - - color: var(--buttons); - cursor: pointer; -} -pre > .buttons :hover { - color: var(--buttons-active); -} -pre > .buttons i { - margin-left: 8px; -} -pre > .buttons button { - color: inherit; - background: transparent; - border: none; - cursor: inherit; -} -pre > .result { - margin-top: 10px; -} - -/* Search */ - -#searchresults a { - text-decoration: none; -} - -mark { - border-radius: 2px; - padding: 0 3px 1px 3px; - margin: 0 -3px -1px -3px; - background-color: var(--search-mark-bg); - transition: background-color 300ms linear; - cursor: pointer; -} - -mark.fade-out { - background-color: rgba(0,0,0,0) !important; - cursor: auto; -} - -.searchbar-outer { - margin-left: auto; - margin-right: auto; - max-width: var(--content-max-width); -} - -#searchbar { - width: 100%; - margin: 5px auto 0px auto; - padding: 10px 16px; - transition: box-shadow 300ms ease-in-out; - border: 1px solid var(--searchbar-border-color); - border-radius: 3px; - background-color: var(--searchbar-bg); - color: var(--searchbar-fg); -} -#searchbar:focus, -#searchbar.active { - box-shadow: 0 0 3px var(--searchbar-shadow-color); -} - -.searchresults-header { - font-weight: bold; - font-size: 1em; - padding: 18px 0 0 5px; - color: var(--searchresults-header-fg); -} - -.searchresults-outer { - margin-left: auto; - margin-right: auto; - max-width: var(--content-max-width); - border-bottom: 1px dashed var(--searchresults-border-color); -} - -ul#searchresults { - list-style: none; - padding-left: 20px; -} -ul#searchresults li { - margin: 10px 0px; - padding: 2px; - border-radius: 2px; -} -ul#searchresults li.focus { - background-color: var(--searchresults-li-bg); -} -ul#searchresults span.teaser { - display: block; - clear: both; - margin: 5px 0 0 20px; - font-size: 0.8em; -} -ul#searchresults span.teaser em { - font-weight: bold; - font-style: normal; -} - -/* Sidebar */ - -.sidebar { - position: fixed; - left: 0; - top: 0; - bottom: 0; - width: var(--sidebar-width); - font-size: 0.875em; - box-sizing: border-box; - -webkit-overflow-scrolling: touch; - overscroll-behavior-y: contain; - background-color: var(--sidebar-bg); - color: var(--sidebar-fg); -} -.sidebar-resizing { - -moz-user-select: none; - -webkit-user-select: none; - -ms-user-select: none; - user-select: none; -} -.js:not(.sidebar-resizing) .sidebar { - transition: transform 0.3s; /* Animation: slide away */ -} -.sidebar code { - line-height: 2em; -} -.sidebar .sidebar-scrollbox { - overflow-y: auto; - position: absolute; - top: 0; - bottom: 0; - left: 0; - right: 0; - padding: 10px 10px; -} -.sidebar .sidebar-resize-handle { - position: absolute; - cursor: col-resize; - width: 0; - right: 0; - top: 0; - bottom: 0; -} -.js .sidebar .sidebar-resize-handle { - cursor: col-resize; - width: 5px; -} -.sidebar-hidden .sidebar { - transform: translateX(calc(0px - var(--sidebar-width))); -} -.sidebar::-webkit-scrollbar { - background: var(--sidebar-bg); -} -.sidebar::-webkit-scrollbar-thumb { - background: var(--scrollbar); -} - -.sidebar-visible .page-wrapper { - transform: translateX(var(--sidebar-width)); -} -@media only screen and (min-width: 620px) { - .sidebar-visible .page-wrapper { - transform: none; - margin-left: var(--sidebar-width); - } -} - -.chapter { - list-style: none outside none; - padding-left: 0; - line-height: 2.2em; -} - -.chapter ol { - width: 100%; -} - -.chapter li { - display: flex; - color: var(--sidebar-non-existant); -} -.chapter li a { - display: block; - padding: 0; - text-decoration: none; - color: var(--sidebar-fg); -} - -.chapter li a:hover { - color: var(--sidebar-active); -} - -.chapter li a.active { - color: var(--sidebar-active); -} - -.chapter li > a.toggle { - cursor: pointer; - display: block; - margin-left: auto; - padding: 0 10px; - user-select: none; - opacity: 0.68; -} - -.chapter li > a.toggle div { - transition: transform 0.5s; -} - -/* collapse the section */ -.chapter li:not(.expanded) + li > ol { - display: none; -} - -.chapter li.chapter-item { - line-height: 1.5em; - margin-top: 0.6em; -} - -.chapter li.expanded > a.toggle div { - transform: rotate(90deg); -} - -.spacer { - width: 100%; - height: 3px; - margin: 5px 0px; -} -.chapter .spacer { - background-color: var(--sidebar-spacer); -} - -@media (-moz-touch-enabled: 1), (pointer: coarse) { - .chapter li a { padding: 5px 0; } - .spacer { margin: 10px 0; } -} - -.section { - list-style: none outside none; - padding-left: 20px; - line-height: 1.9em; -} - -/* Theme Menu Popup */ - -.theme-popup { - position: absolute; - left: 10px; - top: var(--menu-bar-height); - z-index: 1000; - border-radius: 4px; - font-size: 0.7em; - color: var(--fg); - background: var(--theme-popup-bg); - border: 1px solid var(--theme-popup-border); - margin: 0; - padding: 0; - list-style: none; - display: none; -} -.theme-popup .default { - color: var(--icons); -} -.theme-popup .theme { - width: 100%; - border: 0; - margin: 0; - padding: 2px 10px; - line-height: 25px; - white-space: nowrap; - text-align: left; - cursor: pointer; - color: inherit; - background: inherit; - font-size: inherit; -} -.theme-popup .theme:hover { - background-color: var(--theme-hover); -} -.theme-popup .theme:hover:first-child, -.theme-popup .theme:hover:last-child { - border-top-left-radius: inherit; - border-top-right-radius: inherit; -} diff --git a/guide/theme/css/variables.css b/guide/theme/css/variables.css deleted file mode 100644 index 776cb8c9bf..0000000000 --- a/guide/theme/css/variables.css +++ /dev/null @@ -1,257 +0,0 @@ - -/* Globals */ - -:root { - --sidebar-width: 300px; - --page-padding: 15px; - --content-max-width: 750px; - --menu-bar-height: 50px; -} -/* Themes */ - -.ayu { - --bg: hsl(210, 25%, 8%); - --fg: #c5c5c5; - - --sidebar-bg: #14191f; - --sidebar-fg: #c8c9db; - --sidebar-non-existant: #5c6773; - --sidebar-active: #ffb454; - --sidebar-spacer: #2d334f; - - --scrollbar: var(--sidebar-fg); - - --icons: #737480; - --icons-hover: #b7b9cc; - - --links: #0096cf; - - --inline-code-color: #ffb454; - - --theme-popup-bg: #14191f; - --theme-popup-border: #5c6773; - --theme-hover: #191f26; - - --quote-bg: hsl(226, 15%, 17%); - --quote-border: hsl(226, 15%, 22%); - - --table-border-color: hsl(210, 25%, 13%); - --table-header-bg: hsl(210, 25%, 28%); - --table-alternate-bg: hsl(210, 25%, 11%); - - --searchbar-border-color: #848484; - --searchbar-bg: #424242; - --searchbar-fg: #fff; - --searchbar-shadow-color: #d4c89f; - --searchresults-header-fg: #666; - --searchresults-border-color: #888; - --searchresults-li-bg: #252932; - --search-mark-bg: #e3b171; -} - -.coal { - --bg: hsl(200, 7%, 8%); - --fg: #98a3ad; - - --sidebar-bg: #292c2f; - --sidebar-fg: #a1adb8; - --sidebar-non-existant: #505254; - --sidebar-active: #3473ad; - --sidebar-spacer: #393939; - - --scrollbar: var(--sidebar-fg); - - --icons: #43484d; - --icons-hover: #b3c0cc; - - --links: #2b79a2; - - --inline-code-color: #c5c8c6;; - - --theme-popup-bg: #141617; - --theme-popup-border: #43484d; - --theme-hover: #1f2124; - - --quote-bg: hsl(234, 21%, 18%); - --quote-border: hsl(234, 21%, 23%); - - --table-border-color: hsl(200, 7%, 13%); - --table-header-bg: hsl(200, 7%, 28%); - --table-alternate-bg: hsl(200, 7%, 11%); - - --searchbar-border-color: #aaa; - --searchbar-bg: #b7b7b7; - --searchbar-fg: #000; - --searchbar-shadow-color: #aaa; - --searchresults-header-fg: #666; - --searchresults-border-color: #98a3ad; - --searchresults-li-bg: #2b2b2f; - --search-mark-bg: #355c7d; -} - -.light { - --bg: hsl(0, 0%, 97%); - --fg: #181818; - - --sidebar-bg: #335787; - --sidebar-fg: #ffffff; - --sidebar-non-existant: #aaaaaa; - --sidebar-active: #9cefff; - --sidebar-spacer: #e5e5e5; - - --buttons: #898989; - --buttons-active: #8faaef; - - --scrollbar: var(--sidebar-fg); - - --icons: #2f2f2f; - --icons-hover: #333333; - - --nav-chapter: #82aee8; - - --links: #267CB9; - - --inline-code-color: #2d2b26; - - --theme-popup-bg: #fafafa; - --theme-popup-border: #cccccc; - --theme-hover: #e6e6e6; - - --quote-bg: hsl(212, 52%, 86%); - --quote-border: hsl(220, 83%, 67%); - - --table-border-color: hsl(0, 0%, 95%); - --table-header-bg: hsl(0, 0%, 80%); - --table-alternate-bg: hsl(0, 0%, 97%); - - --searchbar-border-color: #aaa; - --searchbar-bg: #fafafa; - --searchbar-fg: #000; - --searchbar-shadow-color: #aaa; - --searchresults-header-fg: #666; - --searchresults-border-color: #888; - --searchresults-li-bg: #e4f2fe; - --search-mark-bg: #a2cff5; -} - -.navy { - --bg: hsl(226, 23%, 11%); - --fg: #bcbdd0; - - --sidebar-bg: #282d3f; - --sidebar-fg: #c8c9db; - --sidebar-non-existant: #505274; - --sidebar-active: #2b79a2; - --sidebar-spacer: #2d334f; - - --scrollbar: var(--sidebar-fg); - - --icons: #737480; - --icons-hover: #b7b9cc; - - --links: #2b79a2; - - --inline-code-color: #c5c8c6;; - - --theme-popup-bg: #161923; - --theme-popup-border: #737480; - --theme-hover: #282e40; - - --quote-bg: hsl(226, 15%, 17%); - --quote-border: hsl(226, 15%, 22%); - - --table-border-color: hsl(226, 23%, 16%); - --table-header-bg: hsl(226, 23%, 31%); - --table-alternate-bg: hsl(226, 23%, 14%); - - --searchbar-border-color: #aaa; - --searchbar-bg: #aeaec6; - --searchbar-fg: #000; - --searchbar-shadow-color: #aaa; - --searchresults-header-fg: #5f5f71; - --searchresults-border-color: #5c5c68; - --searchresults-li-bg: #242430; - --search-mark-bg: #a2cff5; -} - -.rust { - --bg: hsl(60, 9%, 87%); - --fg: #262625; - - --sidebar-bg: #3b2e2a; - --sidebar-fg: #c8c9db; - --sidebar-non-existant: #505254; - --sidebar-active: #e69f67; - --sidebar-spacer: #45373a; - - --scrollbar: var(--sidebar-fg); - - --icons: #737480; - --icons-hover: #262625; - - --links: #2b79a2; - - --inline-code-color: #6e6b5e; - - --theme-popup-bg: #e1e1db; - --theme-popup-border: #b38f6b; - --theme-hover: #99908a; - - --quote-bg: hsl(60, 5%, 75%); - --quote-border: hsl(60, 5%, 70%); - - --table-border-color: hsl(60, 9%, 82%); - --table-header-bg: #b3a497; - --table-alternate-bg: hsl(60, 9%, 84%); - - --searchbar-border-color: #aaa; - --searchbar-bg: #fafafa; - --searchbar-fg: #000; - --searchbar-shadow-color: #aaa; - --searchresults-header-fg: #666; - --searchresults-border-color: #888; - --searchresults-li-bg: #dec2a2; - --search-mark-bg: #e69f67; -} - -@media (prefers-color-scheme: dark) { - .light.no-js { - --bg: hsl(200, 7%, 8%); - --fg: #98a3ad; - - --sidebar-bg: #292c2f; - --sidebar-fg: #a1adb8; - --sidebar-non-existant: #505254; - --sidebar-active: #3473ad; - --sidebar-spacer: #393939; - - --scrollbar: var(--sidebar-fg); - - --icons: #43484d; - --icons-hover: #b3c0cc; - - --links: #2b79a2; - - --inline-code-color: #c5c8c6;; - - --theme-popup-bg: #141617; - --theme-popup-border: #43484d; - --theme-hover: #1f2124; - - --quote-bg: hsl(234, 21%, 18%); - --quote-border: hsl(234, 21%, 23%); - - --table-border-color: hsl(200, 7%, 13%); - --table-header-bg: hsl(200, 7%, 28%); - --table-alternate-bg: hsl(200, 7%, 11%); - - --searchbar-border-color: #aaa; - --searchbar-bg: #b7b7b7; - --searchbar-fg: #000; - --searchbar-shadow-color: #aaa; - --searchresults-header-fg: #666; - --searchresults-border-color: #98a3ad; - --searchresults-li-bg: #2b2b2f; - --search-mark-bg: #355c7d; - } -} diff --git a/relayer-cli/.gitignore b/relayer-cli/.gitignore deleted file mode 100644 index 27128c443f..0000000000 --- a/relayer-cli/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -/target -**/*.rs.bk - -*.sh -*.db diff --git a/relayer-cli/Cargo.toml b/relayer-cli/Cargo.toml deleted file mode 100644 index 9ad50602e4..0000000000 --- a/relayer-cli/Cargo.toml +++ /dev/null @@ -1,90 +0,0 @@ -[package] -name = "ibc-relayer-cli" -version = "0.15.0" -edition = "2021" -license = "Apache-2.0" -readme = "README.md" -keywords = ["blockchain", "consensus", "cosmos", "ibc", "tendermint"] -homepage = "https://hermes.informal.systems/" -repository = "https://github.com/informalsystems/ibc-rs" -authors = ["Informal Systems "] -rust-version = "1.60" -description = """ - Hermes is an IBC Relayer written in Rust -""" -default-run = "hermes" - -[[bin]] -name = "hermes" - -[features] -default = ["telemetry", "rest-server", "std", "eyre_tracer",] -std = ["flex-error/std"] -eyre_tracer = ["flex-error/eyre_tracer"] -profiling = ["ibc-relayer/profiling"] -telemetry = ["ibc-relayer/telemetry", "ibc-telemetry"] -rest-server = ["ibc-relayer-rest"] - -[dependencies] -ibc = { version = "0.15.0", path = "../modules", features = ["std", "clock"] } -ibc-relayer = { version = "0.15.0", path = "../relayer" } -ibc-proto = { version = "0.18.0", path = "../proto" } -ibc-telemetry = { version = "0.15.0", path = "../telemetry", optional = true } -ibc-relayer-rest = { version = "0.15.0", path = "../relayer-rest", optional = true } - -clap = { version = "3.2", features = ["cargo"] } -clap_complete = "3.2" -humantime = "2.1" -serde = { version = "1.0", features = ["serde_derive"] } -tokio = { version = "1.0", features = ["full"] } -tracing = "0.1.34" -tracing-subscriber = { version = "0.3.11", features = ["fmt", "env-filter", "json"]} -eyre = "0.6.8" -color-eyre = "0.6.2" -oneline-eyre = "0.1" -futures = "0.3.21" -toml = "0.5.9" -serde_derive = "1.0.116" -serde_json = "1" -hex = "0.4" -crossbeam-channel = "0.5.4" -subtle-encoding = "0.5" -dirs-next = "2.0.0" -itertools = "0.10.3" -atty = "0.2.14" -flex-error = { version = "0.4.4", default-features = false, features = ["std", "eyre_tracer"] } -signal-hook = "0.3.14" -dialoguer = "0.10.1" -console = "0.15.0" - -[dependencies.tendermint-proto] -git = "https://github.com/composableFi/tendermint-rs" -rev = "5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8" - -[dependencies.tendermint] -git = "https://github.com/composableFi/tendermint-rs" -rev = "5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8" -features = ["secp256k1"] - -[dependencies.tendermint-rpc] -git = "https://github.com/composableFi/tendermint-rs" -rev = "5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8" -features = ["http-client", "websocket-client"] - -[dependencies.tendermint-light-client] -git = "https://github.com/composableFi/tendermint-rs" -rev = "5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8" -features = ["unstable"] - -[dependencies.tendermint-light-client-verifier] -git = "https://github.com/composableFi/tendermint-rs" -rev = "5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8" - -[dependencies.abscissa_core] -version = "=0.6.0" -features = ["options"] - -[dev-dependencies] -abscissa_core = { version = "=0.6.0", features = ["testing"] } -once_cell = "1.12" -regex = "1.5" diff --git a/relayer-cli/README.md b/relayer-cli/README.md deleted file mode 100644 index 47e5a9c17a..0000000000 --- a/relayer-cli/README.md +++ /dev/null @@ -1,44 +0,0 @@ -# Hermes: IBC Relayer CLI - -[![Crate][crate-image]][crate-link] -[![Docs][docs-image]][docs-link] -[![Build Status][build-image]][build-link] -[![End to End testing][e2e-image]][e2e-link] -[![Apache 2.0 Licensed][license-image]][license-link] -![Rust Stable][rustc-image] -![Rust 1.51+][rustc-version] - -This is the repository for the CLI of the IBC Relayer built in Rust, called -`hermes`. - -For any information about the relayer binary, please read the comprehensive -guide available at [hermes.informal.systems](https://hermes.informal.systems). - - -## License - -Copyright © 2021 Informal Systems Inc. and ibc-rs authors. - -Licensed under the Apache License, Version 2.0 (the "License"); you may not use the files in this repository except in compliance with the License. You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - -[//]: # (badges) - -[crate-image]: https://img.shields.io/crates/v/ibc-relayer-cli.svg -[crate-link]: https://crates.io/crates/ibc-relayer-cli -[docs-image]: https://docs.rs/ibc-relayer-cli/badge.svg -[docs-link]: https://docs.rs/ibc-relayer-cli/ - -[build-image]: https://github.com/informalsystems/ibc-rs/workflows/Rust/badge.svg -[build-link]: https://github.com/informalsystems/ibc-rs/actions?query=workflow%3ARust -[e2e-image]: https://github.com/informalsystems/ibc-rs/workflows/End%20to%20End%20testing/badge.svg -[e2e-link]: https://github.com/informalsystems/ibc-rs/actions?query=workflow%3A%22End+to+End+testing%22 - -[license-image]: https://img.shields.io/badge/license-Apache2.0-blue.svg -[license-link]: https://github.com/informalsystems/ibc-rs/blob/master/LICENSE -[rustc-image]: https://img.shields.io/badge/rustc-stable-blue.svg -[rustc-version]: https://img.shields.io/badge/rustc-1.51+-blue.svg - diff --git a/relayer-cli/build.rs b/relayer-cli/build.rs deleted file mode 100644 index 29413121ab..0000000000 --- a/relayer-cli/build.rs +++ /dev/null @@ -1,81 +0,0 @@ -use std::env; - -use git::Handle as GitHandle; - -fn main() { - // Overwrites the package `version` to include metadata, and package `name` to be 'hermes' - // (the binary name), instead of 'ibc-relayer-cli' (which is the crate name), so that abscissa - // outputs consistent usage and help messages. - // https://github.com/informalsystems/ibc-rs/issues/590 - // Note: This can potentially break the normal cargo (or crates.io) workflow. - println!("cargo:rustc-env=CARGO_PKG_NAME=hermes"); - println!("cargo:rustc-env=CARGO_PKG_VERSION={}", version()); -} - -// returns a valid semver string optionally suffixed with the current git branch and last commit -// hash. e.g. 0.4.0 or 0.4.0+cli-git-hash-eb0e94fc-dirty -fn version() -> String { - let mut vers = env::var("CARGO_PKG_VERSION").unwrap(); - if !is_ci_release() { - if let Some(git) = GitHandle::new() { - println!("cargo:rustc-rerun-if-changed=.git/HEAD"); - vers.push('+'); - vers.push_str(&git.last_commit_hash()); - if git.is_dirty() { - vers.push_str("-dirty"); - } - vers = vers.replace('\n', ""); - } - } - vers -} - -fn is_ci_release() -> bool { - matches!(env::var("GITHUB_JOB"), Ok(job) if job == "create-release") -} - -mod git { - use core::marker::PhantomData; - use std::ffi::OsStr; - use std::process::{Command, Output}; - - // A wrapper over a git shell command that is only constructable if git is available & the - // current directory is a git repository - pub struct Handle(PhantomData); - - impl Handle { - pub fn new() -> Option { - if Self::is_git_repo() { - Some(Handle(PhantomData)) - } else { - None - } - } - - // Returns the short hash of the last git commit - pub fn last_commit_hash(&self) -> String { - let commit = Self::command(&["rev-parse", "--short", "HEAD"]); - String::from_utf8_lossy(&commit.stdout).into_owned() - } - - // Checks if the git repo is dirty - pub fn is_dirty(&self) -> bool { - !Self::command(&["diff-index", "--quiet", "HEAD", "--"]) - .status - .success() - } - - #[inline] - fn command(args: impl IntoIterator>) -> Output { - Command::new("git").args(args).output().unwrap() - } - - // returns true iff git is installed and current directory is a git repo - fn is_git_repo() -> bool { - Command::new("git") - .args(&["rev-parse", "--git-dir"]) - .output() - .map_or(false, |o| o.status.success()) - } - } -} diff --git a/relayer-cli/src/application.rs b/relayer-cli/src/application.rs deleted file mode 100644 index ff478e148d..0000000000 --- a/relayer-cli/src/application.rs +++ /dev/null @@ -1,190 +0,0 @@ -//! Cli Abscissa Application - -use std::path::PathBuf; - -use abscissa_core::{ - application::{self, AppCell}, - component::Component, - config::{self, CfgCell}, - terminal::component::Terminal, - terminal::ColorChoice, - Application, Configurable, FrameworkError, FrameworkErrorKind, StandardPaths, -}; -use ibc_relayer::config::Config; - -use crate::{ - components::{JsonTracing, PrettyTracing}, - config::validate_config, - entry::EntryPoint, -}; - -/// Application state -pub static APPLICATION: AppCell = AppCell::new(); - -/// Obtain a read-only (multi-reader) lock on the application state. -/// -/// Panics if the application state has not been initialized. -pub fn app_reader() -> &'static CliApp { - &APPLICATION -} - -/// Obtain a read-only (multi-reader) lock on the application configuration. -/// -/// Panics if the application configuration has not been loaded. -pub fn app_config() -> config::Reader { - APPLICATION.config.read() -} - -/// Cli Application -#[derive(Debug)] -pub struct CliApp { - /// Application configuration. - config: CfgCell, - - /// Application state. - state: application::State, - - /// Toggle json output on/off. Changed with the global config option `-j` / `--json`. - json_output: bool, - - /// Path to the config file. - config_path: Option, -} - -/// Initialize a new application instance. -/// -/// By default no configuration is loaded, and the framework state is -/// initialized to a default, empty state (no components, threads, etc). -impl Default for CliApp { - fn default() -> Self { - Self { - config: CfgCell::default(), - state: application::State::default(), - json_output: false, - config_path: None, - } - } -} - -impl CliApp { - /// Whether or not JSON output is enabled - pub fn json_output(&self) -> bool { - self.json_output - } - - /// Returns the path to the configuration file - pub fn config_path(&self) -> Option<&PathBuf> { - self.config_path.as_ref() - } -} - -impl Application for CliApp { - /// Entrypoint command for this application. - type Cmd = EntryPoint; - - /// Application configuration. - type Cfg = Config; - - /// Paths to resources within the application. - type Paths = StandardPaths; - - /// Accessor for application configuration. - fn config(&self) -> config::Reader { - self.config.read() - } - - /// Borrow the application state immutably. - fn state(&self) -> &application::State { - &self.state - } - - /// Register all components used by this application. - /// - /// If you would like to add additional components to your application - /// beyond the default ones provided by the framework, this is the place - /// to do so. - fn register_components(&mut self, command: &Self::Cmd) -> Result<(), FrameworkError> { - let framework_components = self.framework_components(command)?; - let mut app_components = self.state.components_mut(); - app_components.register(framework_components) - } - - /// Post-configuration lifecycle callback. - /// - /// Called regardless of whether config is loaded to indicate this is the - /// time in app lifecycle when configuration would be loaded if - /// possible. - fn after_config(&mut self, config: Self::Cfg) -> Result<(), FrameworkError> { - use crate::config::Diagnostic; - - // Configure components - let mut components = self.state.components_mut(); - components.after_config(&config)?; - - if let Err(diagnostic) = validate_config(&config) { - match diagnostic { - Diagnostic::Warning(e) => { - tracing::warn!("relayer may be misconfigured: {}", e); - } - Diagnostic::Error(e) => { - return Err(FrameworkErrorKind::ConfigError.context(e).into()); - } - } - }; - - self.config.set_once(config); - - Ok(()) - } - - /// Overrides the default abscissa components, so that we can setup tracing on our own. See - /// also `register_components`. - fn framework_components( - &mut self, - command: &Self::Cmd, - ) -> Result>>, FrameworkError> { - let terminal = Terminal::new(self.term_colors(command)); - - let config_path = command.config_path(); - self.config_path = config_path.clone(); - - let config = config_path - .map(|path| self.load_config(&path)) - .transpose() - .map_err(|err| { - let path = self.config_path.clone().unwrap_or_default(); - eprintln!( - "The Hermes configuration file at path '{}' is invalid, reason: {}", - path.to_string_lossy(), - err - ); - eprintln!( - "Please see the example configuration for detailed information about the \ - supported configuration options: \ - https://github.com/informalsystems/ibc-rs/blob/master/config.toml" - ); - std::process::exit(1); - }) - .expect("invalid config") - .unwrap_or_default(); - - // Update the `json_output` flag used by `conclude::Output` - self.json_output = command.json; - - if command.json { - // Enable JSON by using the crate-level `Tracing` - let tracing = JsonTracing::new(config.global)?; - Ok(vec![Box::new(terminal), Box::new(tracing)]) - } else { - // Use abscissa's tracing, which pretty-prints to the terminal obeying log levels - let tracing = PrettyTracing::new(config.global)?; - Ok(vec![Box::new(terminal), Box::new(tracing)]) - } - } - - // Disable color support due to - // https://github.com/iqlusioninc/abscissa/issues/589 - fn term_colors(&self, _command: &Self::Cmd) -> ColorChoice { - ColorChoice::Never - } -} diff --git a/relayer-cli/src/bin/hermes/main.rs b/relayer-cli/src/bin/hermes/main.rs deleted file mode 100644 index 4cdb1744fc..0000000000 --- a/relayer-cli/src/bin/hermes/main.rs +++ /dev/null @@ -1,36 +0,0 @@ -//! Main entry point for Cli - -#![deny(warnings, missing_docs, trivial_casts, unused_qualifications)] -#![forbid(unsafe_code)] - -use ibc_relayer_cli::application::APPLICATION; -use ibc_relayer_cli::components::enable_ansi; - -fn main() -> eyre::Result<()> { - install_error_reporter()?; - - abscissa_core::boot(&APPLICATION); -} - -fn install_error_reporter() -> eyre::Result<()> { - if !backtrace_enabled() { - // If backtraces are disabled, display errors in single line. - oneline_eyre::install() - } else if enable_ansi() { - // Else, if backtraces are enabled and we are in a terminal - // supporting color, display full error logs in color. - color_eyre::install() - } else { - // Otherwise, backtraces are enabled and we are piping to logs, so use the - // default error report handler, which displays multiline errors - // without color. - Ok(()) - } -} - -fn backtrace_enabled() -> bool { - match std::env::var("RUST_BACKTRACE").as_deref() { - Ok("" | "0") | Err(_) => false, - Ok(_) => true, - } -} diff --git a/relayer-cli/src/cli_utils.rs b/relayer-cli/src/cli_utils.rs deleted file mode 100644 index 3be90d5eba..0000000000 --- a/relayer-cli/src/cli_utils.rs +++ /dev/null @@ -1,91 +0,0 @@ -use alloc::sync::Arc; - -use tokio::runtime::Runtime as TokioRuntime; - -use ibc::core::ics02_client::client_state::ClientState; -use ibc::core::ics24_host::identifier::{ChainId, ChannelId, PortId}; - -use ibc_relayer::{ - chain::{ - counterparty::{channel_connection_client, ChannelConnectionClient}, - handle::{BaseChainHandle, ChainHandle}, - }, - config::Config, - spawn, -}; - -use crate::error::Error; - -#[derive(Clone, Debug)] -/// Pair of chain handles that are used by most CLIs. -pub struct ChainHandlePair { - /// Source chain handle - pub src: Chain, - /// Destination chain handle - pub dst: Chain, -} - -impl ChainHandlePair { - /// Spawn the source and destination chain runtime from the configuration and chain identifiers, - /// and return the pair of associated handles. - pub fn spawn_generic( - config: &Config, - src_chain_id: &ChainId, - dst_chain_id: &ChainId, - ) -> Result { - let src = spawn_chain_runtime_generic(config, src_chain_id)?; - let dst = spawn_chain_runtime_generic(config, dst_chain_id)?; - - Ok(ChainHandlePair { src, dst }) - } -} - -impl ChainHandlePair { - pub fn spawn( - config: &Config, - src_chain_id: &ChainId, - dst_chain_id: &ChainId, - ) -> Result { - Self::spawn_generic(config, src_chain_id, dst_chain_id) - } -} - -/// Spawns a chain runtime from the configuration and given a chain identifier. -/// Returns the corresponding handle if successful. -pub fn spawn_chain_runtime(config: &Config, chain_id: &ChainId) -> Result { - spawn_chain_runtime_generic::(config, chain_id) -} - -pub fn spawn_chain_runtime_generic( - config: &Config, - chain_id: &ChainId, -) -> Result { - let rt = Arc::new(TokioRuntime::new().unwrap()); - spawn::spawn_chain_runtime(config, chain_id, rt).map_err(Error::spawn) -} - -/// Spawns a chain runtime for specified chain identifier, queries the counterparty chain associated -/// with specified port and channel id, and spawns a chain runtime for the counterparty chain. -/// Returns a tuple with a pair of associated chain handles and the ChannelEnd -pub fn spawn_chain_counterparty( - config: &Config, - chain_id: &ChainId, - port_id: &PortId, - channel_id: &ChannelId, -) -> Result<(ChainHandlePair, ChannelConnectionClient), Error> { - let chain = spawn_chain_runtime_generic::(config, chain_id)?; - let channel_connection_client = - channel_connection_client(&chain, port_id, channel_id).map_err(Error::supervisor)?; - let counterparty_chain = { - let counterparty_chain_id = channel_connection_client.client.client_state.chain_id(); - spawn_chain_runtime_generic::(config, &counterparty_chain_id)? - }; - - Ok(( - ChainHandlePair { - src: chain, - dst: counterparty_chain, - }, - channel_connection_client, - )) -} diff --git a/relayer-cli/src/commands.rs b/relayer-cli/src/commands.rs deleted file mode 100644 index c2da281dcc..0000000000 --- a/relayer-cli/src/commands.rs +++ /dev/null @@ -1,169 +0,0 @@ -//! Cli Subcommands -//! -//! This is where you specify the subcommands of your application. -//! -//! See the `impl Configurable` below for how to specify the path to the -//! application's configuration file. - -mod clear; -mod completions; -mod config; -mod create; -mod health; -mod keys; -mod listen; -mod misbehaviour; -mod query; -mod start; -mod tx; -mod update; -mod upgrade; -mod version; - -use self::{ - clear::ClearCmds, completions::CompletionsCmd, config::ConfigCmd, create::CreateCmds, - health::HealthCheckCmd, keys::KeysCmd, listen::ListenCmd, misbehaviour::MisbehaviourCmd, - query::QueryCmd, start::StartCmd, tx::TxCmd, update::UpdateCmds, upgrade::UpgradeCmds, - version::VersionCmd, -}; - -use core::time::Duration; -use std::path::PathBuf; - -use abscissa_core::clap::Parser; -use abscissa_core::{config::Override, Command, Configurable, FrameworkError, Runnable}; -use tracing::{error, info}; - -use crate::DEFAULT_CONFIG_PATH; -use ibc_relayer::config::Config; - -/// Default configuration file path -pub fn default_config_file() -> Option { - dirs_next::home_dir().map(|home| home.join(DEFAULT_CONFIG_PATH)) -} - -/// Cli Subcommands -#[derive(Command, Parser, Debug, Runnable)] -pub enum CliCmd { - /// Validate Hermes configuration file - #[clap(subcommand)] - Config(ConfigCmd), - - /// Manage keys in the relayer for each chain - #[clap(subcommand)] - Keys(KeysCmd), - - /// Create objects (client, connection, or channel) on chains - #[clap(subcommand)] - Create(CreateCmds), - - /// Update objects (clients) on chains - #[clap(subcommand)] - Update(UpdateCmds), - - /// Upgrade objects (clients) after chain upgrade - #[clap(subcommand)] - Upgrade(UpgradeCmds), - - /// Clear objects, such as outstanding packets on a channel. - #[clap(subcommand)] - Clear(ClearCmds), - - /// Start the relayer in multi-chain mode. - /// - /// Relays packets and open handshake messages between all chains in the config. - Start(StartCmd), - - /// Query objects from the chain - #[clap(subcommand)] - Query(QueryCmd), - - /// Create and send IBC transactions - #[clap(subcommand)] - Tx(TxCmd), - - /// Listen to and display IBC events emitted by a chain - Listen(ListenCmd), - - /// Listen to client update IBC events and handles misbehaviour - Misbehaviour(MisbehaviourCmd), - - /// The `version` subcommand, retained for backward compatibility. - Version(VersionCmd), - - /// Performs a health check of all chains in the the config - HealthCheck(HealthCheckCmd), - - /// Generate auto-complete scripts for different shells. - #[clap(display_order = 1000)] - Completions(CompletionsCmd), -} - -/// This trait allows you to define how application configuration is loaded. -impl Configurable for CliCmd { - /// Location of the configuration file - /// This is called only when the `-c` command-line option is omitted. - fn config_path(&self) -> Option { - let path = default_config_file(); - - match path { - Some(path) if path.exists() => { - info!("using default configuration from '{}'", path.display()); - Some(path) - } - Some(path) => { - // No file exists at the config path - error!("could not find configuration file at '{}'", path.display()); - error!("for an example, please see https://hermes.informal.systems/config.html#example-configuration-file"); - None - } - None => { - // The path to the default config file could not be found - error!("could not find default configuration file"); - error!( - "please create one at '~/{}' or specify it with the '-c'/'--config' flag", - DEFAULT_CONFIG_PATH - ); - error!("for an example, please see https://hermes.informal.systems/config.html#example-configuration-file"); - None - } - } - } - - /// Apply changes to the config after it's been loaded, e.g. overriding - /// values in a config file using command-line options. - /// - /// This can be safely deleted if you don't want to override config - /// settings from command-line options. - fn process_config(&self, mut config: Config) -> Result { - // Alter the memo for all chains to include a suffix with Hermes build details - let web = "https://hermes.informal.systems"; - let suffix = format!("{} {} ({})", CliCmd::name(), clap::crate_version!(), web); - for ccfg in config.chains.iter_mut() { - ccfg.memo_prefix.apply_suffix(&suffix); - } - - // For all commands except for `start` Hermes retries - // for a prolonged period of time. - if !matches!(self, CliCmd::Start(_)) { - for c in config.chains.iter_mut() { - c.rpc_timeout = Duration::from_secs(120); - } - } - - match self { - CliCmd::Tx(cmd) => cmd.override_config(config), - // CliCmd::Help(cmd) => cmd.override_config(config), - // CliCmd::Keys(cmd) => cmd.override_config(config), - // CliCmd::Create(cmd) => cmd.override_config(config), - // CliCmd::Update(cmd) => cmd.override_config(config), - // CliCmd::Upgrade(cmd) => cmd.override_config(config), - // CliCmd::Start(cmd) => cmd.override_config(config), - // CliCmd::Query(cmd) => cmd.override_config(config), - // CliCmd::Listen(cmd) => cmd.override_config(config), - // CliCmd::Misbehaviour(cmd) => cmd.override_config(config), - // CliCmd::Version(cmd) => cmd.override_config(config), - _ => Ok(config), - } - } -} diff --git a/relayer-cli/src/commands/clear.rs b/relayer-cli/src/commands/clear.rs deleted file mode 100644 index eff01641da..0000000000 --- a/relayer-cli/src/commands/clear.rs +++ /dev/null @@ -1,91 +0,0 @@ -use abscissa_core::clap::Parser; -use abscissa_core::{Command, Runnable}; - -use ibc::core::ics24_host::identifier::{ChainId, ChannelId, PortId}; -use ibc::events::IbcEvent; -use ibc_relayer::chain::handle::BaseChainHandle; -use ibc_relayer::link::error::LinkError; -use ibc_relayer::link::{Link, LinkParameters}; - -use crate::application::app_config; -use crate::cli_utils::spawn_chain_counterparty; -use crate::conclude::Output; -use crate::error::Error; - -/// `clear` subcommands -#[derive(Command, Debug, Parser, Runnable)] -pub enum ClearCmds { - /// Clear outstanding packets (i.e., packet-recv and packet-ack) - /// on a given channel in both directions. The channel is identified - /// by the chain, port, and channel IDs at one of its ends. - Packets(ClearPacketsCmd), -} - -#[derive(Debug, Parser)] -pub struct ClearPacketsCmd { - #[clap(required = true, help = "identifier of the chain")] - chain_id: ChainId, - - #[clap(required = true, help = "identifier of the port")] - port_id: PortId, - - #[clap(required = true, help = "identifier of the channel")] - channel_id: ChannelId, -} - -impl Runnable for ClearPacketsCmd { - fn run(&self) { - let config = app_config(); - - let chains = match spawn_chain_counterparty::( - &config, - &self.chain_id, - &self.port_id, - &self.channel_id, - ) { - Ok((chains, _)) => chains, - Err(e) => Output::error(format!("{}", e)).exit(), - }; - - let mut ev_list = vec![]; - - // Construct links in both directions. - let opts = LinkParameters { - src_port_id: self.port_id.clone(), - src_channel_id: self.channel_id, - }; - let fwd_link = match Link::new_from_opts(chains.src.clone(), chains.dst, opts, false) { - Ok(link) => link, - Err(e) => Output::error(format!("{}", e)).exit(), - }; - let rev_link = match fwd_link.reverse(false) { - Ok(link) => link, - Err(e) => Output::error(format!("{}", e)).exit(), - }; - - // Schedule RecvPacket messages for pending packets in both directions. - // This may produce pending acks which will be processed in the next phase. - run_and_collect_events(&mut ev_list, || { - fwd_link.relay_recv_packet_and_timeout_messages() - }); - run_and_collect_events(&mut ev_list, || { - rev_link.relay_recv_packet_and_timeout_messages() - }); - - // Schedule AckPacket messages in both directions. - run_and_collect_events(&mut ev_list, || fwd_link.relay_ack_packet_messages()); - run_and_collect_events(&mut ev_list, || rev_link.relay_ack_packet_messages()); - - Output::success(ev_list).exit() - } -} - -fn run_and_collect_events(ev_list: &mut Vec, f: F) -where - F: FnOnce() -> Result, LinkError>, -{ - match f() { - Ok(mut ev) => ev_list.append(&mut ev), - Err(e) => Output::error(Error::link(e)).exit(), - }; -} diff --git a/relayer-cli/src/commands/completions.rs b/relayer-cli/src/commands/completions.rs deleted file mode 100644 index 392cd9bb13..0000000000 --- a/relayer-cli/src/commands/completions.rs +++ /dev/null @@ -1,20 +0,0 @@ -use crate::entry::EntryPoint; -use abscissa_core::clap::Parser; -use abscissa_core::Runnable; -use clap::IntoApp; -use clap_complete::Shell; -use std::io; - -#[derive(Debug, Parser)] -pub struct CompletionsCmd { - #[clap(arg_enum)] - shell: Shell, -} - -impl Runnable for CompletionsCmd { - fn run(&self) { - let mut app = EntryPoint::command(); - let app_name = app.get_name().to_owned(); - clap_complete::generate(self.shell, &mut app, app_name, &mut io::stdout()); - } -} diff --git a/relayer-cli/src/commands/config.rs b/relayer-cli/src/commands/config.rs deleted file mode 100644 index 2359b389c0..0000000000 --- a/relayer-cli/src/commands/config.rs +++ /dev/null @@ -1,13 +0,0 @@ -//! `config` subcommand - -use abscissa_core::clap::Parser; -use abscissa_core::{Command, Runnable}; - -mod validate; - -/// `config` subcommand -#[derive(Command, Debug, Parser, Runnable)] -pub enum ConfigCmd { - /// Validate the relayer configuration - Validate(validate::ValidateCmd), -} diff --git a/relayer-cli/src/commands/config/validate.rs b/relayer-cli/src/commands/config/validate.rs deleted file mode 100644 index 671c4b0021..0000000000 --- a/relayer-cli/src/commands/config/validate.rs +++ /dev/null @@ -1,49 +0,0 @@ -use std::fs; - -use abscissa_core::clap::Parser; -use abscissa_core::{Command, Runnable}; - -use crate::conclude::Output; -use crate::config; -use crate::prelude::*; - -/// In order to validate the configuration file the command will check that the file exists, -/// that it is readable and not empty. It will then check the validity of the fields inside -/// the file. -#[derive(Command, Debug, Parser)] -pub struct ValidateCmd {} - -impl Runnable for ValidateCmd { - /// Validate the loaded configuration. - fn run(&self) { - let config = app_config(); - trace!("loaded configuration: {:#?}", *config); - - // Verify that the configuration file has been found. - match config::config_path() { - Some(p) => { - // If there is a configuration file, verify that it is readable and not empty. - match fs::read_to_string(p.clone()) { - Ok(content) => { - if content.is_empty() { - Output::error("the configuration file is empty").exit(); - } - } - Err(e) => Output::error(format!( - "error reading the configuration file {:?}: {}", - p, e - )) - .exit(), - } - } - None => Output::error("no configuration file found").exit(), - } - - // No need to output the underlying error, this is done already when the application boots. - // See `application::CliApp::after_config`. - match config::validate_config(&config) { - Ok(_) => Output::success("configuration is valid").exit(), - Err(_) => Output::error("configuration is invalid").exit(), - } - } -} diff --git a/relayer-cli/src/commands/create.rs b/relayer-cli/src/commands/create.rs deleted file mode 100644 index f2e9ea3a95..0000000000 --- a/relayer-cli/src/commands/create.rs +++ /dev/null @@ -1,25 +0,0 @@ -//! `create` subcommand -use abscissa_core::clap::Parser; -use abscissa_core::{Command, Runnable}; - -use crate::commands::create::channel::CreateChannelCommand; -use crate::commands::create::connection::CreateConnectionCommand; -use crate::commands::tx::client::TxCreateClientCmd; - -mod channel; -mod connection; - -/// `create` subcommands -#[derive(Command, Debug, Parser, Runnable)] -pub enum CreateCmds { - /// Create a new IBC client - Client(TxCreateClientCmd), - - /// Create a new connection between two chains - Connection(CreateConnectionCommand), - - /// Create a new channel between two chains using a pre-existing connection. - /// Alternatively, create a new client and a new connection underlying - /// the new channel if a pre-existing connection is not provided. - Channel(CreateChannelCommand), -} diff --git a/relayer-cli/src/commands/create/channel.rs b/relayer-cli/src/commands/create/channel.rs deleted file mode 100644 index 837724b457..0000000000 --- a/relayer-cli/src/commands/create/channel.rs +++ /dev/null @@ -1,238 +0,0 @@ -use abscissa_core::clap::Parser; -use abscissa_core::{Command, Runnable}; - -use console::style; -use dialoguer::Confirm; - -use ibc::core::ics02_client::client_state::ClientState; -use ibc::core::ics03_connection::connection::IdentifiedConnectionEnd; -use ibc::core::ics04_channel::channel::Order; -use ibc::core::ics04_channel::Version; -use ibc::core::ics24_host::identifier::{ChainId, ConnectionId, PortId}; -use ibc::Height; -use ibc_relayer::chain::handle::ChainHandle; -use ibc_relayer::chain::requests::{IncludeProof, QueryClientStateRequest, QueryConnectionRequest}; -use ibc_relayer::channel::Channel; -use ibc_relayer::connection::Connection; -use ibc_relayer::foreign_client::ForeignClient; - -use crate::cli_utils::{spawn_chain_runtime, ChainHandlePair}; -use crate::conclude::{exit_with_unrecoverable_error, Output}; -use crate::prelude::*; -use ibc_relayer::config::default::connection_delay; - -static PROMPT: &str = "Are you sure you want a new connection & clients to be created? Hermes will use default security parameters."; -static HINT: &str = "Consider using the default invocation\n\nhermes create channel --port-a --port-b \n\nto re-use a pre-existing connection."; - -/// The data structure that represents all the possible options when invoking -/// the `create channel` CLI command. -/// -/// There are two possible ways to invoke this command: -/// -/// `create channel --port-a --port-b ` is the default -/// way in which this command should be used, specifying a `Connection-ID` for this new channel -/// to re-use. The command expects that `Connection-ID` is associated with chain A. -/// -/// `create channel --port-a --port-b --new-client-connection` -/// to indicate that a new connection/client pair is being created as part of this new channel. -/// This brings up an interactive yes/no prompt to ensure that the operator at least -/// considers the fact that they're initializing a new connection with the channel. -/// -/// Note that `Connection-ID`s have to be considered based off of the chain's perspective. Although -/// chain A and chain B might refer to the connection with different names, they are actually referring -/// to the same connection. -#[derive(Clone, Command, Debug, Parser)] -#[clap(disable_version_flag = true)] -pub struct CreateChannelCommand { - #[clap( - required = true, - help = "Identifier of the side `a` chain for the new channel" - )] - chain_a: ChainId, - - #[clap( - short, - long, - help = "Identifier of the side `b` chain for the new channel" - )] - chain_b: Option, - - /// Identifier of the connection on chain `a` to use in creating the new channel. - connection_a: Option, - - #[clap( - long, - required = true, - help = "Identifier of the side `a` port for the new channel" - )] - port_a: PortId, - - #[clap( - long, - required = true, - help = "Identifier of the side `b` port for the new channel" - )] - port_b: PortId, - - #[clap( - short, - long, - help = "The channel ordering, valid options 'unordered' (default) and 'ordered'", - default_value_t - )] - order: Order, - - #[clap( - short, - long = "channel-version", - alias = "version", - help = "The version for the new channel" - )] - version: Option, - - #[clap( - long, - help = "Indicates that a new client and connection will be created underlying the new channel" - )] - new_client_connection: bool, -} - -impl Runnable for CreateChannelCommand { - fn run(&self) { - match &self.connection_a { - Some(conn) => self.run_reusing_connection(conn), - None => match &self.chain_b { - Some(chain_b) => { - if self.new_client_connection { - match Confirm::new() - .with_prompt(format!( - "{}: {}\n{}: {}", - style("WARN").yellow(), - PROMPT, - style("Hint").cyan(), - HINT - )) - .interact() - { - Ok(confirm) => { - if confirm { - self.run_using_new_connection(chain_b); - } else { - Output::error("You elected not to create new clients and connections. Please re-invoke `create channel` with a pre-existing connection ID".to_string()).exit(); - } - } - Err(e) => { - Output::error(format!( - "An error occurred while waiting for user input: {}", - e - )); - } - } - } else { - Output::error( - "The `--new-client-connection` flag is required if invoking with `--chain-b`".to_string() - ) - .exit(); - } - } - None => Output::error("Missing one of `` or ``".to_string()) - .exit(), - }, - } - } -} - -impl CreateChannelCommand { - /// Creates a new channel, as well as a new underlying connection and clients. - fn run_using_new_connection(&self, chain_b: &ChainId) { - let config = app_config(); - - let chains = ChainHandlePair::spawn(&config, &self.chain_a, chain_b) - .unwrap_or_else(exit_with_unrecoverable_error); - - info!( - "Creating new clients, new connection, and a new channel with order {}", - self.order - ); - - let client_a = ForeignClient::new(chains.src.clone(), chains.dst.clone()) - .unwrap_or_else(exit_with_unrecoverable_error); - let client_b = ForeignClient::new(chains.dst.clone(), chains.src) - .unwrap_or_else(exit_with_unrecoverable_error); - - // Create the connection. - let con = Connection::new(client_a, client_b, connection_delay()) - .unwrap_or_else(exit_with_unrecoverable_error); - - // Finally create the channel. - let channel = Channel::new( - con, - self.order, - self.port_a.clone(), - self.port_b.clone(), - self.version.clone(), - ) - .unwrap_or_else(exit_with_unrecoverable_error); - - Output::success(channel).exit(); - } - - /// Creates a new channel, reusing an already existing connection and its clients. - fn run_reusing_connection(&self, connection_a: &ConnectionId) { - let config = app_config(); - - // Validate & spawn runtime for side a. - let chain_a = spawn_chain_runtime(&config, &self.chain_a) - .unwrap_or_else(exit_with_unrecoverable_error); - - // Query the connection end. - let height = Height::new(chain_a.id().version(), 0); - let (conn_end, _) = chain_a - .query_connection( - QueryConnectionRequest { - connection_id: connection_a.clone(), - height, - }, - IncludeProof::No, - ) - .unwrap_or_else(exit_with_unrecoverable_error); - - // Query the client state, obtain the identifier of chain b. - let chain_b = chain_a - .query_client_state( - QueryClientStateRequest { - client_id: conn_end.client_id().clone(), - height, - }, - IncludeProof::No, - ) - .map(|(cs, _)| cs.chain_id()) - .unwrap_or_else(exit_with_unrecoverable_error); - - // Spawn the runtime for side b. - let chain_b = - spawn_chain_runtime(&config, &chain_b).unwrap_or_else(exit_with_unrecoverable_error); - - // Create the foreign client handles. - let client_a = ForeignClient::find(chain_b.clone(), chain_a.clone(), conn_end.client_id()) - .unwrap_or_else(exit_with_unrecoverable_error); - let client_b = ForeignClient::find(chain_a, chain_b, conn_end.counterparty().client_id()) - .unwrap_or_else(exit_with_unrecoverable_error); - - let identified_end = IdentifiedConnectionEnd::new(connection_a.clone(), conn_end); - - let connection = Connection::find(client_a, client_b, &identified_end) - .unwrap_or_else(exit_with_unrecoverable_error); - - let channel = Channel::new( - connection, - self.order, - self.port_a.clone(), - self.port_b.clone(), - self.version.clone(), - ) - .unwrap_or_else(exit_with_unrecoverable_error); - - Output::success(channel).exit(); - } -} diff --git a/relayer-cli/src/commands/create/connection.rs b/relayer-cli/src/commands/create/connection.rs deleted file mode 100644 index f694a5538d..0000000000 --- a/relayer-cli/src/commands/create/connection.rs +++ /dev/null @@ -1,166 +0,0 @@ -use core::time::Duration; - -use abscissa_core::clap::Parser; -use abscissa_core::{Command, Runnable}; - -use ibc::core::ics02_client::client_state::ClientState; -use ibc::core::ics24_host::identifier::{ChainId, ClientId}; -use ibc::Height; -use ibc_relayer::chain::handle::ChainHandle; -use ibc_relayer::chain::requests::{IncludeProof, QueryClientStateRequest}; -use ibc_relayer::connection::Connection; -use ibc_relayer::foreign_client::ForeignClient; - -use crate::cli_utils::{spawn_chain_runtime, ChainHandlePair}; -use crate::conclude::{exit_with_unrecoverable_error, Output}; -use crate::prelude::*; - -#[derive(Clone, Command, Debug, Parser)] -pub struct CreateConnectionCommand { - #[clap( - required = true, - help = "identifier of the side `a` chain for the new connection" - )] - chain_a_id: ChainId, - - #[clap(help = "identifier of the side `b` chain for the new connection")] - chain_b_id: Option, - - #[clap( - long, - help = "identifier of client hosted on chain `a`; default: None (creates a new client)" - )] - client_a: Option, - - #[clap( - long, - help = "identifier of client hosted on chain `b`; default: None (creates a new client)" - )] - client_b: Option, - - #[clap( - long, - help = "delay period parameter for the new connection (seconds)", - default_value = "0" - )] - delay: u64, -} - -// cargo run --bin hermes -- create connection ibc-0 ibc-1 -// cargo run --bin hermes -- create connection ibc-0 ibc-1 --delay 100 -// cargo run --bin hermes -- create connection ibc-0 --client-a-id 07-tendermint-0 --client-b-id 07-tendermint-0 -impl Runnable for CreateConnectionCommand { - fn run(&self) { - match &self.chain_b_id { - Some(side_b) => self.run_using_new_clients(side_b), - None => self.run_reusing_clients(), - } - } -} - -impl CreateConnectionCommand { - /// Creates a connection that uses newly created clients on each side. - fn run_using_new_clients(&self, chain_b_id: &ChainId) { - let config = app_config(); - - let chains = ChainHandlePair::spawn(&config, &self.chain_a_id, chain_b_id) - .unwrap_or_else(exit_with_unrecoverable_error); - - // Validate the other options. Bail if the CLI was invoked with incompatible options. - if self.client_a.is_some() { - Output::error("Option `` is incompatible with `--client-a`".to_string()) - .exit(); - } - if self.client_b.is_some() { - Output::error("Option `` is incompatible with `--client-b`".to_string()) - .exit(); - } - - info!( - "Creating new clients hosted on chains {} and {}", - self.chain_a_id, chain_b_id - ); - - let client_a = ForeignClient::new(chains.src.clone(), chains.dst.clone()) - .unwrap_or_else(exit_with_unrecoverable_error); - let client_b = ForeignClient::new(chains.dst.clone(), chains.src) - .unwrap_or_else(exit_with_unrecoverable_error); - - // Finally, execute the connection handshake. - let delay = Duration::from_secs(self.delay); - match Connection::new(client_a, client_b, delay) { - Ok(conn) => Output::success(conn).exit(), - Err(e) => Output::error(format!("{}", e)).exit(), - } - } - - /// Create a connection reusing pre-existing clients on both chains. - fn run_reusing_clients(&self) { - let config = app_config(); - - // Validate & spawn runtime for chain_a. - let chain_a = match spawn_chain_runtime(&config, &self.chain_a_id) { - Ok(handle) => handle, - Err(e) => Output::error(format!("{}", e)).exit(), - }; - - // Unwrap the identifier of the client on chain_a. - let client_a_id = match &self.client_a { - Some(c) => c, - None => Output::error( - "Option `--client-a` is necessary when is missing".to_string(), - ) - .exit(), - }; - - // Query client state. Extract the target chain (chain_id which this client is verifying). - let height = Height::new(chain_a.id().version(), 0); - let chain_b_id = match chain_a.query_client_state( - QueryClientStateRequest { - client_id: client_a_id.clone(), - height, - }, - IncludeProof::No, - ) { - Ok((cs, _)) => cs.chain_id(), - Err(e) => Output::error(format!( - "failed while querying client '{}' on chain '{}' with error: {}", - client_a_id, self.chain_a_id, e - )) - .exit(), - }; - - // Validate & spawn runtime for chain_b. - let chain_b = match spawn_chain_runtime(&config, &chain_b_id) { - Ok(handle) => handle, - Err(e) => Output::error(format!("{}", e)).exit(), - }; - - // Unwrap the identifier of the client on chain_b. - let client_b_id = match &self.client_b { - Some(c) => c, - None => Output::error( - "Option `--client-b` is necessary when is missing".to_string(), - ) - .exit(), - }; - - info!( - "Creating a new connection with pre-existing clients {} and {}", - client_a_id, client_b_id - ); - - // Get the two ForeignClient objects. - let client_a = ForeignClient::find(chain_b.clone(), chain_a.clone(), client_a_id) - .unwrap_or_else(exit_with_unrecoverable_error); - let client_b = ForeignClient::find(chain_a, chain_b, client_b_id) - .unwrap_or_else(exit_with_unrecoverable_error); - - // All verification passed. Create the Connection object & do the handshake. - let delay = Duration::from_secs(self.delay); - match Connection::new(client_a, client_b, delay) { - Ok(conn) => Output::success(conn).exit(), - Err(e) => Output::error(format!("{}", e)).exit(), - } - } -} diff --git a/relayer-cli/src/commands/health.rs b/relayer-cli/src/commands/health.rs deleted file mode 100644 index badca4ff95..0000000000 --- a/relayer-cli/src/commands/health.rs +++ /dev/null @@ -1,41 +0,0 @@ -use abscissa_core::clap::Parser; -use abscissa_core::{Command, Runnable}; - -use ibc_relayer::chain::endpoint::HealthCheck::*; -use ibc_relayer::chain::handle::ChainHandle; - -use crate::cli_utils::spawn_chain_runtime; -use crate::conclude::{exit_with_unrecoverable_error, Output}; -use crate::prelude::*; - -#[derive(Clone, Command, Debug, Parser)] -pub struct HealthCheckCmd {} - -impl Runnable for HealthCheckCmd { - fn run(&self) { - let config = (*app_config()).clone(); - - for ch in &config.chains { - info!("[{}] performing health check...", ch.id); - - let chain = - spawn_chain_runtime(&config, &ch.id).unwrap_or_else(exit_with_unrecoverable_error); - - match chain.health_check() { - Ok(Healthy) => info!(chain = %ch.id, "chain is healthy"), - Ok(Unhealthy(_)) => { - // No need to print the error here as it's already printed in `Chain::health_check` - // TODO(romac): Move the printing code here and in the supervisor/registry - warn!("[{}] chain is unhealthy", ch.id) - } - Err(e) => error!( - "[{}] failed to perform health check, reason: {}", - ch.id, - e.detail() - ), - } - } - - Output::success_msg("performed health check for all chains in the config").exit() - } -} diff --git a/relayer-cli/src/commands/keys.rs b/relayer-cli/src/commands/keys.rs deleted file mode 100644 index 20a67429b1..0000000000 --- a/relayer-cli/src/commands/keys.rs +++ /dev/null @@ -1,24 +0,0 @@ -//! `keys` subcommand -use abscissa_core::clap::Parser; -use abscissa_core::{Command, Runnable}; - -mod add; -mod balance; -mod delete; -mod list; - -/// `keys` subcommand -#[derive(Command, Debug, Parser, Runnable)] -pub enum KeysCmd { - /// Adds key to a configured chain or restores a key to a configured chain using a mnemonic - Add(add::KeysAddCmd), - - /// Delete key(s) from a configured chain - Delete(delete::KeysDeleteCmd), - - /// List keys configured on a chain - List(list::KeysListCmd), - - /// Query balance for a key from a configured chain. If no key is given, the key is retrieved from the configuration file. - Balance(balance::KeyBalanceCmd), -} diff --git a/relayer-cli/src/commands/keys/add.rs b/relayer-cli/src/commands/keys/add.rs deleted file mode 100644 index 1e85269899..0000000000 --- a/relayer-cli/src/commands/keys/add.rs +++ /dev/null @@ -1,181 +0,0 @@ -use core::str::FromStr; -use std::{ - fs, - path::{Path, PathBuf}, -}; - -use abscissa_core::clap::Parser; -use abscissa_core::{Command, Runnable}; - -use ibc::core::ics24_host::identifier::ChainId; -use ibc_relayer::{ - config::{ChainConfig, Config}, - keyring::{HDPath, KeyEntry, KeyRing, Store}, -}; - -use crate::application::app_config; -use crate::conclude::Output; - -/// The data structure that represents the arguments when invoking the `keys add` CLI command. -/// -/// The command has one argument and two exclusive flags: -/// -/// The command to add a key from a file: -/// -/// `keys add [OPTIONS] --key-file ` -/// -/// The command to restore a key from a file containing mnemonic: -/// -/// `keys add [OPTIONS] --mnemonic-file ` -/// -/// The key-file and mnemonic-file flags can't be given at the same time, this will cause a terminating error. -/// If successful the key will be created or restored, depending on which flag was given. -#[derive(Clone, Command, Debug, Parser)] -pub struct KeysAddCmd { - #[clap(required = true, help = "identifier of the chain")] - chain_id: ChainId, - - #[clap( - short = 'f', - long, - required = true, - help = "path to the key file", - group = "add-restore" - )] - key_file: Option, - - #[clap( - short, - long, - required = true, - help = "path to file containing mnemonic to restore the key from", - group = "add-restore" - )] - mnemonic_file: Option, - - #[clap( - short, - long, - help = "name of the key (defaults to the `key_name` defined in the config)" - )] - key_name: Option, - - #[clap( - short = 'p', - long, - help = "derivation path for this key", - default_value = "m/44'/118'/0'/0/0" - )] - hd_path: String, -} - -impl KeysAddCmd { - fn options(&self, config: &Config) -> Result> { - let chain_config = config - .find_chain(&self.chain_id) - .ok_or_else(|| format!("chain '{}' not found in configuration file", self.chain_id))?; - - let name = self - .key_name - .clone() - .unwrap_or_else(|| chain_config.key_name.clone()); - - let hd_path = HDPath::from_str(&self.hd_path) - .map_err(|_| format!("invalid derivation path: {}", self.hd_path))?; - - Ok(KeysAddOptions { - config: chain_config.clone(), - name, - hd_path, - }) - } -} - -#[derive(Clone, Debug)] -pub struct KeysAddOptions { - pub name: String, - pub config: ChainConfig, - pub hd_path: HDPath, -} - -impl Runnable for KeysAddCmd { - fn run(&self) { - let config = app_config(); - - let opts = match self.options(&config) { - Err(err) => Output::error(err).exit(), - Ok(result) => result, - }; - - // Check if --file or --mnemonic was given as input. - match (self.key_file.clone(), self.mnemonic_file.clone()) { - (Some(key_file), _) => { - let key = add_key(&opts.config, &opts.name, &key_file, &opts.hd_path); - match key { - Ok(key) => Output::success_msg(format!( - "Added key '{}' ({}) on chain {}", - opts.name, key.account, opts.config.id - )) - .exit(), - Err(e) => Output::error(format!( - "An error occurred adding the key on chain {} from file {:?}: {}", - self.chain_id, key_file, e - )) - .exit(), - } - } - (_, Some(mnemonic_file)) => { - let key = restore_key(&mnemonic_file, &opts.name, &opts.hd_path, &opts.config); - - match key { - Ok(key) => Output::success_msg(format!( - "Restored key '{}' ({}) on chain {}", - opts.name, key.account, opts.config.id - )) - .exit(), - Err(e) => Output::error(format!( - "An error occurred restoring the key on chain {} from file {:?}: {}", - self.chain_id, mnemonic_file, e - )) - .exit(), - } - } - // This case should never trigger. - // The 'required' parameter for the flags will trigger an error if both flags have not been given. - // And the 'group' parameter for the flags will trigger an error if both flags are given. - _ => Output::error("--mnemonic-file and --key-file can't both be None".to_string()) - .exit(), - } - } -} - -pub fn add_key( - config: &ChainConfig, - key_name: &str, - file: &Path, - hd_path: &HDPath, -) -> Result> { - let mut keyring = KeyRing::new(Store::Test, &config.account_prefix, &config.id)?; - - let key_contents = fs::read_to_string(file).map_err(|_| "error reading the key file")?; - let key = keyring.key_from_seed_file(&key_contents, hd_path)?; - - keyring.add_key(key_name, key.clone())?; - Ok(key) -} - -pub fn restore_key( - mnemonic: &Path, - key_name: &str, - hdpath: &HDPath, - config: &ChainConfig, -) -> Result> { - let mnemonic_content = - fs::read_to_string(mnemonic).map_err(|_| "error reading the mnemonic file")?; - - let mut keyring = KeyRing::new(Store::Test, &config.account_prefix, &config.id)?; - let key_entry = keyring.key_from_mnemonic(&mnemonic_content, hdpath, &config.address_type)?; - - keyring.add_key(key_name, key_entry.clone())?; - Ok(key_entry) -} diff --git a/relayer-cli/src/commands/keys/balance.rs b/relayer-cli/src/commands/keys/balance.rs deleted file mode 100644 index 2b0558f27e..0000000000 --- a/relayer-cli/src/commands/keys/balance.rs +++ /dev/null @@ -1,66 +0,0 @@ -use abscissa_core::clap::Parser; -use abscissa_core::{Command, Runnable}; - -use ibc::core::ics24_host::identifier::ChainId; -use ibc_relayer::chain::handle::ChainHandle; - -use crate::application::app_config; -use crate::cli_utils::spawn_chain_runtime; -use crate::conclude::{exit_with_unrecoverable_error, json, Output}; - -/// The data structure that represents the arguments when invoking the `keys balance` CLI command. -/// -/// The command has one argument and one optional flag: -/// -/// `keys balance --key-name ` -/// -/// If no key name is given, it will be taken from the configuration file. -/// If successful the balance and denominator of the account, associated with the key name -/// on the given chain, will be displayed. -#[derive(Clone, Command, Debug, Parser)] -pub struct KeyBalanceCmd { - #[clap(required = true, help = "identifier of the chain")] - chain_id: ChainId, - - #[clap( - long, - short, - help = "(optional) name of the key (defaults to the `key_name` defined in the config)" - )] - key_name: Option, -} - -impl Runnable for KeyBalanceCmd { - fn run(&self) { - let config = app_config(); - - let chain = spawn_chain_runtime(&config, &self.chain_id) - .unwrap_or_else(exit_with_unrecoverable_error); - let key_name = self.key_name.clone(); - - match chain.query_balance(key_name.clone()) { - Ok(balance) if json() => Output::success(balance).exit(), - Ok(balance) => { - // Retrieve the key name string to output. - let key_name_str = match key_name { - Some(name) => name, - None => { - let chain_config = - chain.config().unwrap_or_else(exit_with_unrecoverable_error); - chain_config.key_name - } - }; - Output::success_msg(format!( - "balance for key `{}`: {} {}", - key_name_str, balance.amount, balance.denom - )) - .exit() - } - Err(e) => Output::error(format!( - "there was a problem querying the chain balance: {}", - e - )) - .exit(), - } - } -} diff --git a/relayer-cli/src/commands/keys/delete.rs b/relayer-cli/src/commands/keys/delete.rs deleted file mode 100644 index 717f620b53..0000000000 --- a/relayer-cli/src/commands/keys/delete.rs +++ /dev/null @@ -1,108 +0,0 @@ -use abscissa_core::clap::Parser; -use abscissa_core::{Command, Runnable}; - -use ibc::core::ics24_host::identifier::ChainId; -use ibc_relayer::{ - config::{ChainConfig, Config}, - keyring::{KeyRing, Store}, -}; - -use crate::application::app_config; -use crate::conclude::Output; - -#[derive(Clone, Command, Debug, Parser)] -pub struct KeysDeleteCmd { - #[clap(required = true, help = "identifier of the chain")] - chain_id: ChainId, - - #[clap(short = 'n', long, help = "name of the key")] - name: Option, - - #[clap(short = 'a', long, help = "delete all keys")] - all: bool, -} - -impl KeysDeleteCmd { - fn options( - &self, - config: &Config, - ) -> Result, Box> { - let chain_config = config - .find_chain(&self.chain_id) - .ok_or_else(|| format!("chain '{}' not found in configuration file", self.chain_id))?; - - let id = match (self.all, &self.name) { - (true, Some(_)) => { - return Err("cannot set both -n/--name and -a/--all".to_owned().into()); - } - (false, None) => { - return Err("must provide either -n/--name or -a/--all" - .to_owned() - .into()); - } - (true, None) => KeysDeleteId::All, - (false, Some(ref name)) => KeysDeleteId::Named(name), - }; - - Ok(KeysDeleteOptions { - config: chain_config.clone(), - id, - }) - } -} - -#[derive(Clone, Debug)] -struct KeysDeleteOptions<'a> { - id: KeysDeleteId<'a>, - config: ChainConfig, -} - -#[derive(Clone, Debug)] -enum KeysDeleteId<'a> { - All, - Named(&'a str), -} - -impl Runnable for KeysDeleteCmd { - fn run(&self) { - let config = app_config(); - - let opts = match self.options(&config) { - Err(err) => Output::error(err).exit(), - Ok(result) => result, - }; - - match opts.id { - KeysDeleteId::All => match delete_all_keys(&opts.config) { - Ok(_) => { - Output::success_msg(format!("Removed all keys on chain {}", opts.config.id)) - .exit() - } - Err(e) => Output::error(format!("{}", e)).exit(), - }, - KeysDeleteId::Named(name) => match delete_key(&opts.config, name) { - Ok(_) => Output::success_msg(format!( - "Removed key ({}) on chain {}", - name, opts.config.id - )) - .exit(), - Err(e) => Output::error(format!("{}", e)).exit(), - }, - }; - } -} - -pub fn delete_key(config: &ChainConfig, key_name: &str) -> Result<(), Box> { - let mut keyring = KeyRing::new(Store::Test, &config.account_prefix, &config.id)?; - keyring.remove_key(key_name)?; - Ok(()) -} - -pub fn delete_all_keys(config: &ChainConfig) -> Result<(), Box> { - let mut keyring = KeyRing::new(Store::Test, &config.account_prefix, &config.id)?; - let keys = keyring.keys()?; - for key in keys { - keyring.remove_key(&key.0)?; - } - Ok(()) -} diff --git a/relayer-cli/src/commands/keys/list.rs b/relayer-cli/src/commands/keys/list.rs deleted file mode 100644 index b627fd77cb..0000000000 --- a/relayer-cli/src/commands/keys/list.rs +++ /dev/null @@ -1,70 +0,0 @@ -use alloc::collections::btree_map::BTreeMap as HashMap; - -use abscissa_core::clap::Parser; -use abscissa_core::{Command, Runnable}; - -use ibc::core::ics24_host::identifier::ChainId; -use ibc_relayer::{ - config::{ChainConfig, Config}, - keyring::{KeyEntry, KeyRing, Store}, -}; - -use crate::conclude::Output; -use crate::{application::app_config, conclude::json}; - -#[derive(Clone, Command, Debug, Parser)] -pub struct KeysListCmd { - #[clap(required = true, help = "identifier of the chain")] - chain_id: ChainId, -} - -impl KeysListCmd { - fn options(&self, config: &Config) -> Result { - let chain_config = config - .find_chain(&self.chain_id) - .ok_or_else(|| format!("chain '{}' not found in configuration file", self.chain_id))?; - - Ok(KeysListOptions { - chain_config: chain_config.clone(), - }) - } -} - -impl Runnable for KeysListCmd { - fn run(&self) { - let config = app_config(); - - let opts = match self.options(&config) { - Err(err) => Output::error(err).exit(), - Ok(result) => result, - }; - - match list_keys(opts.chain_config) { - Ok(keys) if json() => { - let keys = keys.into_iter().collect::>(); - Output::success(keys).exit() - } - Ok(keys) => { - let mut msg = String::new(); - for (name, key) in keys { - msg.push_str(&format!("\n- {} ({})", name, key.account)); - } - Output::success_msg(msg).exit() - } - Err(e) => Output::error(e).exit(), - } - } -} - -#[derive(Clone, Debug)] -pub struct KeysListOptions { - pub chain_config: ChainConfig, -} - -pub fn list_keys( - config: ChainConfig, -) -> Result, Box> { - let keyring = KeyRing::new(Store::Test, &config.account_prefix, &config.id)?; - let keys = keyring.keys()?; - Ok(keys) -} diff --git a/relayer-cli/src/commands/listen.rs b/relayer-cli/src/commands/listen.rs deleted file mode 100644 index 9a5c448116..0000000000 --- a/relayer-cli/src/commands/listen.rs +++ /dev/null @@ -1,161 +0,0 @@ -use alloc::sync::Arc; -use core::{fmt, ops::Deref, str::FromStr}; -use std::thread; - -use abscissa_core::clap::Parser; -use abscissa_core::{application::fatal_error, Runnable}; -use itertools::Itertools; -use tokio::runtime::Runtime as TokioRuntime; -use tracing::{error, info}; - -use ibc::{core::ics24_host::identifier::ChainId, events::IbcEvent}; - -use ibc_relayer::{ - config::ChainConfig, - event::monitor::{EventMonitor, EventReceiver}, -}; - -use crate::prelude::*; - -#[derive(Copy, Clone, Debug, PartialEq, Eq)] -pub enum EventFilter { - NewBlock, - Tx, -} - -impl EventFilter { - pub fn matches(&self, event: &IbcEvent) -> bool { - match self { - EventFilter::NewBlock => matches!(event, IbcEvent::NewBlock(_)), - EventFilter::Tx => { - !(matches!( - event, - IbcEvent::NewBlock(_) | IbcEvent::Empty(_) | IbcEvent::ChainError(_) - )) - } - } - } -} - -impl fmt::Display for EventFilter { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - Self::NewBlock => write!(f, "NewBlock"), - Self::Tx => write!(f, "Tx"), - } - } -} - -impl FromStr for EventFilter { - type Err = Box; - - fn from_str(s: &str) -> Result { - match s { - "NewBlock" => Ok(Self::NewBlock), - "Tx" => Ok(Self::Tx), - invalid => Err(format!("unrecognized event type: {}", invalid).into()), - } - } -} - -#[derive(Debug, Parser)] -pub struct ListenCmd { - /// Identifier of the chain to listen for events from - chain_id: ChainId, - - /// Add an event type to listen for, can be repeated. - /// Listen for all events by default (available: Tx, NewBlock). - #[clap(short = 'e', long = "event", value_name = "EVENT")] - events: Vec, -} - -impl ListenCmd { - fn cmd(&self) -> Result<(), Box> { - let config = app_config(); - - let chain_config = config - .find_chain(&self.chain_id) - .ok_or_else(|| format!("chain '{}' not found in configuration", self.chain_id))?; - - let events = if self.events.is_empty() { - &[EventFilter::Tx, EventFilter::NewBlock] - } else { - self.events.as_slice() - }; - - listen(chain_config, events) - } -} - -impl Runnable for ListenCmd { - fn run(&self) { - self.cmd() - .unwrap_or_else(|e| fatal_error(app_reader().deref(), &*e)); - } -} - -/// Listen to events -pub fn listen( - config: &ChainConfig, - filters: &[EventFilter], -) -> Result<(), Box> { - let rt = Arc::new(TokioRuntime::new()?); - let (event_monitor, rx) = subscribe(config, rt)?; - - info!( - "[{}] listening for queries {}", - config.id, - event_monitor.queries().iter().format(", "), - ); - - thread::spawn(|| event_monitor.run()); - - while let Ok(event_batch) = rx.recv() { - match event_batch { - Ok(batch) => { - let matching_events = batch - .events - .into_iter() - .filter(|e| event_match(e, filters)) - .collect_vec(); - - if matching_events.is_empty() { - continue; - } - - info!("- event batch at height {}", batch.height); - - for event in matching_events { - info!("+ {:#?}", event); - } - - info!(""); - } - Err(e) => error!("- error: {}", e), - } - } - - Ok(()) -} - -fn event_match(event: &IbcEvent, filters: &[EventFilter]) -> bool { - filters.iter().any(|f| f.matches(event)) -} - -fn subscribe( - chain_config: &ChainConfig, - rt: Arc, -) -> Result<(EventMonitor, EventReceiver), Box> { - let (mut event_monitor, rx, _) = EventMonitor::new( - chain_config.id.clone(), - chain_config.websocket_addr.clone(), - rt, - ) - .map_err(|e| format!("could not initialize event monitor: {}", e))?; - - event_monitor - .subscribe() - .map_err(|e| format!("could not initialize subscriptions: {}", e))?; - - Ok((event_monitor, rx)) -} diff --git a/relayer-cli/src/commands/misbehaviour.rs b/relayer-cli/src/commands/misbehaviour.rs deleted file mode 100644 index db33522873..0000000000 --- a/relayer-cli/src/commands/misbehaviour.rs +++ /dev/null @@ -1,132 +0,0 @@ -use abscissa_core::clap::Parser; -use abscissa_core::{Command, Runnable}; -use ibc::core::ics02_client::events::UpdateClient; -use ibc::core::ics02_client::height::Height; -use ibc::core::ics24_host::identifier::{ChainId, ClientId}; -use ibc::events::IbcEvent; -use ibc_relayer::chain::handle::ChainHandle; -use ibc_relayer::chain::requests::{IncludeProof, QueryClientStateRequest}; -use ibc_relayer::config::Config; -use ibc_relayer::foreign_client::{ForeignClient, MisbehaviourResults}; -use std::ops::Deref; - -use crate::cli_utils::{spawn_chain_runtime, spawn_chain_runtime_generic}; -use crate::conclude::Output; -use crate::prelude::*; -use ibc::core::ics02_client::client_state::ClientState; - -#[derive(Clone, Command, Debug, Parser)] -pub struct MisbehaviourCmd { - #[clap( - required = true, - help = "identifier of the chain where client updates are monitored for misbehaviour" - )] - chain_id: ChainId, - - #[clap( - required = true, - help = "identifier of the client to be monitored for misbehaviour" - )] - client_id: ClientId, -} - -impl Runnable for MisbehaviourCmd { - fn run(&self) { - let config = app_config(); - - let res = monitor_misbehaviour(&self.chain_id, &self.client_id, &config); - match res { - Ok(some_event) => Output::success(some_event).exit(), - Err(e) => Output::error(format!("{}", e)).exit(), - } - } -} - -pub fn monitor_misbehaviour( - chain_id: &ChainId, - client_id: &ClientId, - config: &Config, -) -> Result, Box> { - let chain = spawn_chain_runtime(config, chain_id) - .map_err(|e| format!("could not spawn the chain runtime for {}: {}", chain_id, e))?; - - let subscription = chain.subscribe()?; - - // check previous updates that may have been missed - misbehaviour_handling(chain.clone(), config, client_id.clone(), None)?; - - // process update client events - while let Ok(event_batch) = subscription.recv() { - match event_batch.deref() { - Ok(event_batch) => { - for event in &event_batch.events { - match event { - IbcEvent::UpdateClient(update) => { - debug!("{:?}", update); - misbehaviour_handling( - chain.clone(), - config, - update.client_id().clone(), - Some(update.clone()), - )?; - } - - IbcEvent::CreateClient(_create) => { - // TODO - get header from full node, consensus state from chain, compare - } - - IbcEvent::ClientMisbehaviour(ref _misbehaviour) => { - // TODO - submit misbehaviour to the witnesses (our full node) - return Ok(Some(event.clone())); - } - - _ => {} - } - } - } - Err(e) => { - dbg!(e); - } - } - } - - Ok(None) -} - -fn misbehaviour_handling( - chain: Chain, - config: &Config, - client_id: ClientId, - update: Option, -) -> Result<(), Box> { - let (client_state, _) = chain - .query_client_state( - QueryClientStateRequest { - client_id: client_id.clone(), - height: Height::zero(), - }, - IncludeProof::No, - ) - .map_err(|e| format!("could not query client state for {}: {}", client_id, e))?; - - if client_state.is_frozen() { - return Err(format!("client {} is already frozen", client_id).into()); - } - - let counterparty_chain = spawn_chain_runtime_generic::(config, &client_state.chain_id()) - .map_err(|e| { - format!( - "could not spawn the chain runtime for {}: {}", - client_state.chain_id(), - e - ) - })?; - - let client = ForeignClient::restore(client_id, chain, counterparty_chain); - let result = client.detect_misbehaviour_and_submit_evidence(update); - if let MisbehaviourResults::EvidenceSubmitted(events) = result { - info!("evidence submission result {:?}", events); - } - - Ok(()) -} diff --git a/relayer-cli/src/commands/query.rs b/relayer-cli/src/commands/query.rs deleted file mode 100644 index e8d0580da5..0000000000 --- a/relayer-cli/src/commands/query.rs +++ /dev/null @@ -1,89 +0,0 @@ -//! `query` subcommand - -use abscissa_core::clap::Parser; -use abscissa_core::{Command, Runnable}; - -use crate::commands::query::channel_client::QueryChannelClientCmd; -use crate::commands::query::channel_ends::QueryChannelEndsCmd; -use crate::commands::query::channels::QueryChannelsCmd; -use crate::commands::query::packet::QueryPacketCmds; - -mod channel; -mod channel_client; -mod channel_ends; -mod channels; -mod client; -mod clients; -mod connection; -mod connections; -mod packet; -mod tx; - -/// `query` subcommand -#[derive(Command, Debug, Parser, Runnable)] -pub enum QueryCmd { - /// Query information about clients - #[clap(subcommand)] - Client(QueryClientCmds), - - /// Query the identifiers of all clients on a chain - Clients(clients::QueryAllClientsCmd), - - /// Query information about connections - #[clap(subcommand)] - Connection(QueryConnectionCmds), - - /// Query the identifiers of all connections on a chain - Connections(connections::QueryConnectionsCmd), - - /// Query information about channels - #[clap(subcommand)] - Channel(QueryChannelCmds), - - /// Query the identifiers of all channels on a given chain - Channels(QueryChannelsCmd), - - /// Query information about packets - #[clap(subcommand)] - Packet(QueryPacketCmds), - - /// Query information about transactions - #[clap(subcommand)] - Tx(tx::QueryTxCmd), -} - -#[derive(Command, Debug, Parser, Runnable)] -pub enum QueryClientCmds { - /// Query the client full state - State(client::QueryClientStateCmd), - - /// Query the client consensus state - Consensus(client::QueryClientConsensusCmd), - - /// Query for the header used in a client update at a certain height - Header(client::QueryClientHeaderCmd), - - /// Query the client connections - Connections(client::QueryClientConnectionsCmd), -} - -#[derive(Command, Debug, Parser, Runnable)] -pub enum QueryConnectionCmds { - /// Query connection end - End(connection::QueryConnectionEndCmd), - - /// Query connection channels - Channels(connection::QueryConnectionChannelsCmd), -} - -#[derive(Command, Debug, Parser, Runnable)] -pub enum QueryChannelCmds { - /// Query channel's client state - Client(QueryChannelClientCmd), - - /// Query channel end - End(channel::QueryChannelEndCmd), - - /// Query channel ends and underlying connection and client objects - Ends(QueryChannelEndsCmd), -} diff --git a/relayer-cli/src/commands/query/channel.rs b/relayer-cli/src/commands/query/channel.rs deleted file mode 100644 index c62a732c14..0000000000 --- a/relayer-cli/src/commands/query/channel.rs +++ /dev/null @@ -1,61 +0,0 @@ -use abscissa_core::clap::Parser; -use abscissa_core::{Command, Runnable}; -use ibc_relayer::chain::handle::ChainHandle; - -use ibc::core::ics24_host::identifier::ChainId; -use ibc::core::ics24_host::identifier::{ChannelId, PortId}; -use ibc_relayer::chain::requests::{IncludeProof, QueryChannelRequest}; - -use crate::cli_utils::spawn_chain_runtime; -use crate::conclude::{exit_with_unrecoverable_error, Output}; -use crate::prelude::*; -use ibc::core::ics04_channel::channel::State; - -#[derive(Clone, Command, Debug, Parser)] -pub struct QueryChannelEndCmd { - #[clap(required = true, help = "identifier of the chain to query")] - chain_id: ChainId, - - #[clap(required = true, help = "identifier of the port to query")] - port_id: PortId, - - #[clap(required = true, help = "identifier of the channel to query")] - channel_id: ChannelId, - - #[clap(short = 'H', long, help = "height of the state to query")] - height: Option, -} - -impl Runnable for QueryChannelEndCmd { - fn run(&self) { - let config = app_config(); - - debug!("Options: {:?}", self); - - let chain = spawn_chain_runtime(&config, &self.chain_id) - .unwrap_or_else(exit_with_unrecoverable_error); - - let res = chain.query_channel( - QueryChannelRequest { - port_id: self.port_id.clone(), - channel_id: self.channel_id, - height: ibc::Height::new(chain.id().version(), self.height.unwrap_or(0_u64)), - }, - IncludeProof::No, - ); - match res { - Ok((channel_end, _)) => { - if channel_end.state_matches(&State::Uninitialized) { - Output::error(format!( - "port '{}' & channel '{}' does not exist", - self.port_id, self.channel_id - )) - .exit() - } else { - Output::success(channel_end).exit() - } - } - Err(e) => Output::error(format!("{}", e)).exit(), - } - } -} diff --git a/relayer-cli/src/commands/query/channel_client.rs b/relayer-cli/src/commands/query/channel_client.rs deleted file mode 100644 index fef376e3a7..0000000000 --- a/relayer-cli/src/commands/query/channel_client.rs +++ /dev/null @@ -1,44 +0,0 @@ -use abscissa_core::clap::Parser; -use abscissa_core::{Command, Runnable}; - -use ibc::core::ics24_host::identifier::{ChainId, ChannelId, PortId}; -use ibc_relayer::chain::handle::ChainHandle; -use ibc_relayer::chain::requests::QueryChannelClientStateRequest; - -use crate::application::app_config; -use crate::cli_utils::spawn_chain_runtime; -use crate::conclude::{exit_with_unrecoverable_error, Output}; - -/// The data structure that represents the arguments when invoking the `query channel client` CLI command. -/// -/// `query channel client --port-id --channel-id ` -/// -/// If successful the channel's client state is displayed. -#[derive(Clone, Command, Debug, Parser)] -pub struct QueryChannelClientCmd { - #[clap(required = true, help = "identifier of the chain to query")] - chain_id: ChainId, - - #[clap(required = true, long, help = "identifier of the port to query")] - port_id: PortId, - - #[clap(required = true, long, help = "identifier of the channel to query")] - channel_id: ChannelId, -} - -impl Runnable for QueryChannelClientCmd { - fn run(&self) { - let config = app_config(); - - let chain = spawn_chain_runtime(&config, &self.chain_id) - .unwrap_or_else(exit_with_unrecoverable_error); - - match chain.query_channel_client_state(QueryChannelClientStateRequest { - port_id: self.port_id.clone(), - channel_id: self.channel_id, - }) { - Ok(cs) => Output::success(cs).exit(), - Err(e) => Output::error(format!("{}", e)).exit(), - } - } -} diff --git a/relayer-cli/src/commands/query/channel_ends.rs b/relayer-cli/src/commands/query/channel_ends.rs deleted file mode 100644 index c38e21b9e0..0000000000 --- a/relayer-cli/src/commands/query/channel_ends.rs +++ /dev/null @@ -1,220 +0,0 @@ -use abscissa_core::clap::Parser; -use abscissa_core::{Command, Runnable}; -use serde::{Deserialize, Serialize}; - -use ibc::core::ics02_client::client_state::{AnyClientState, ClientState}; -use ibc::core::ics03_connection::connection::ConnectionEnd; -use ibc::core::ics04_channel::channel::{ChannelEnd, State}; -use ibc::core::ics24_host::identifier::ChainId; -use ibc::core::ics24_host::identifier::{ChannelId, ClientId, ConnectionId, PortId}; -use ibc::Height; -use ibc_relayer::chain::handle::{BaseChainHandle, ChainHandle}; -use ibc_relayer::chain::requests::{ - IncludeProof, QueryChannelRequest, QueryClientStateRequest, QueryConnectionRequest, -}; -use ibc_relayer::registry::Registry; - -use crate::conclude::Output; -use crate::prelude::*; - -#[derive(Clone, Command, Debug, Parser)] -pub struct QueryChannelEndsCmd { - #[clap(required = true, help = "identifier of the chain to query")] - chain_id: ChainId, - - #[clap(required = true, help = "identifier of the port to query")] - port_id: PortId, - - #[clap(required = true, help = "identifier of the channel to query")] - channel_id: ChannelId, - - #[clap(short = 'H', long, help = "height of the state to query")] - height: Option, - - #[clap( - short = 'v', - long, - help = "enable verbose output, displaying all details of channels, connections & clients" - )] - verbose: bool, -} - -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct ChannelEnds { - pub channel_end: ChannelEnd, - pub connection_end: ConnectionEnd, - pub client_state: AnyClientState, - pub counterparty_channel_end: ChannelEnd, - pub counterparty_connection_end: ConnectionEnd, - pub counterparty_client_state: AnyClientState, -} - -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct ChannelEndsSummary { - chain_id: ChainId, - client_id: ClientId, - connection_id: ConnectionId, - channel_id: ChannelId, - port_id: PortId, - - counterparty_chain_id: ChainId, - counterparty_client_id: ClientId, - counterparty_connection_id: ConnectionId, - counterparty_channel_id: ChannelId, - counterparty_port_id: PortId, -} - -fn do_run(cmd: &QueryChannelEndsCmd) -> Result<(), Box> { - debug!("Options: {:?}", cmd); - - let config = app_config(); - - let chain_id = cmd.chain_id.clone(); - let port_id = cmd.port_id.clone(); - let channel_id = cmd.channel_id; - - let mut registry = >::new((*config).clone()); - let chain = registry.get_or_spawn(&chain_id)?; - - let chain_height = match cmd.height { - Some(height) => Height::new(chain.id().version(), height), - None => chain.query_latest_height()?, - }; - - let (channel_end, _) = chain.query_channel( - QueryChannelRequest { - port_id: port_id.clone(), - channel_id, - height: chain_height, - }, - IncludeProof::No, - )?; - if channel_end.state_matches(&State::Uninitialized) { - return Err(format!( - "{}/{} on chain {} @ {:?} is uninitialized", - port_id, channel_id, chain_id, chain_height - ) - .into()); - } - - let connection_id = channel_end - .connection_hops - .first() - .ok_or_else(|| { - format!( - "missing connection_hops for {}/{} on chain {} @ {:?}", - port_id, channel_id, chain_id, chain_height - ) - })? - .clone(); - - let (connection_end, _) = chain.query_connection( - QueryConnectionRequest { - connection_id: connection_id.clone(), - height: chain_height, - }, - IncludeProof::No, - )?; - - let client_id = connection_end.client_id().clone(); - - let (client_state, _) = chain.query_client_state( - QueryClientStateRequest { - client_id: client_id.clone(), - height: chain_height, - }, - IncludeProof::No, - )?; - - let channel_counterparty = channel_end.counterparty().clone(); - let connection_counterparty = connection_end.counterparty().clone(); - - let counterparty_client_id = connection_counterparty.client_id().clone(); - - let counterparty_connection_id = connection_counterparty.connection_id.ok_or_else(|| { - format!( - "connection end for {} on chain {} @ {:?} does not have counterparty connection id: {:?}", - connection_id, - chain_id, - chain_height, - connection_end - ) - })?; - - let counterparty_port_id = channel_counterparty.port_id().clone(); - - let counterparty_channel_id = channel_counterparty.channel_id.ok_or_else(|| { - format!( - "channel end for {}/{} on chain {} @ {:?} does not have counterparty channel id: {:?}", - port_id, channel_id, chain_id, chain_height, channel_end - ) - })?; - - let counterparty_chain_id = client_state.chain_id(); - let counterparty_chain = registry.get_or_spawn(&counterparty_chain_id)?; - let counterparty_chain_height = counterparty_chain.query_latest_height()?; - - let (counterparty_connection_end, _) = counterparty_chain.query_connection( - QueryConnectionRequest { - connection_id: counterparty_connection_id.clone(), - height: counterparty_chain_height, - }, - IncludeProof::No, - )?; - - let (counterparty_client_state, _) = counterparty_chain.query_client_state( - QueryClientStateRequest { - client_id: counterparty_client_id.clone(), - height: counterparty_chain_height, - }, - IncludeProof::No, - )?; - - let (counterparty_channel_end, _) = counterparty_chain.query_channel( - QueryChannelRequest { - port_id: counterparty_port_id.clone(), - channel_id: counterparty_channel_id, - height: counterparty_chain_height, - }, - IncludeProof::No, - )?; - - if cmd.verbose { - let res = ChannelEnds { - channel_end, - connection_end, - client_state, - - counterparty_channel_end, - counterparty_connection_end, - counterparty_client_state, - }; - - Output::success(res).exit(); - } else { - let res = ChannelEndsSummary { - chain_id, - client_id, - connection_id, - channel_id, - port_id, - - counterparty_chain_id, - counterparty_client_id, - counterparty_connection_id, - counterparty_channel_id, - counterparty_port_id, - }; - - Output::success(res).exit(); - } -} - -impl Runnable for QueryChannelEndsCmd { - fn run(&self) { - match do_run::(self) { - Ok(()) => {} - Err(e) => Output::error(format!("{}", e)).exit(), - } - } -} diff --git a/relayer-cli/src/commands/query/channels.rs b/relayer-cli/src/commands/query/channels.rs deleted file mode 100644 index a35199d887..0000000000 --- a/relayer-cli/src/commands/query/channels.rs +++ /dev/null @@ -1,270 +0,0 @@ -use core::fmt::{Debug, Error, Formatter}; - -use abscissa_core::clap::Parser; -use abscissa_core::Runnable; -use serde::Serialize; - -use ibc::core::ics02_client::client_state::ClientState; -use ibc::core::ics04_channel::channel::{ChannelEnd, State}; -use ibc::core::ics24_host::identifier::{ChainId, ChannelId, ConnectionId, PortChannelId, PortId}; -use ibc::Height; -use ibc_relayer::chain::handle::{BaseChainHandle, ChainHandle}; -use ibc_relayer::chain::requests::{ - IncludeProof, PageRequest, QueryChannelRequest, QueryChannelsRequest, QueryClientStateRequest, - QueryConnectionRequest, -}; -use ibc_relayer::registry::Registry; - -use crate::commands::query::channel_ends::ChannelEnds; -use crate::conclude::Output; -use crate::prelude::*; - -#[derive(Clone, Command, Debug, Parser)] -pub struct QueryChannelsCmd { - #[clap(required = true, help = "identifier of the chain to query")] - chain_id: ChainId, - - #[clap( - short = 'd', - long, - help = "identifier of the channel's destination chain" - )] - destination_chain: Option, - - #[clap( - short = 'v', - long, - help = "enable verbose output, displaying all client and connection ids" - )] - verbose: bool, -} - -fn run_query_channels( - cmd: &QueryChannelsCmd, -) -> Result> { - debug!("Options: {:?}", cmd); - - let mut output = if cmd.verbose { - QueryChannelsOutput::verbose() - } else { - QueryChannelsOutput::summary() - }; - - let config = app_config(); - let chain_id = cmd.chain_id.clone(); - - let mut registry = >::new((*config).clone()); - let chain = registry.get_or_spawn(&cmd.chain_id)?; - let chain_height = chain.query_latest_height()?; - - let identified_channels = chain.query_channels(QueryChannelsRequest { - pagination: Some(PageRequest::all()), - })?; - - for identified_channel in identified_channels { - let port_id = identified_channel.port_id; - let channel_id = identified_channel.channel_id; - let chain_id = chain_id.clone(); - let channel_end = identified_channel.channel_end; - - if channel_end.state_matches(&State::Uninitialized) { - return Err(format!( - "{}/{} on chain {} @ {:?} is uninitialized", - port_id, channel_id, chain_id, chain_height - ) - .into()); - } - - let connection_id = channel_end - .connection_hops - .first() - .ok_or_else(|| { - format!( - "missing connection_hops for {}/{} on chain {} @ {:?}", - port_id, channel_id, chain_id, chain_height - ) - })? - .clone(); - - if cmd.verbose { - let channel_ends = query_channel_ends( - &mut registry, - &chain, - cmd.destination_chain.as_ref(), - channel_end, - connection_id, - chain_id, - port_id, - channel_id, - chain_height, - ); - - match channel_ends { - Ok(channel_ends) => output.push_verbose(channel_ends), - Err(e) => error!("failed to query channel ends: {}", e), - } - } else { - output.push_summary(PortChannelId { - channel_id, - port_id, - }); - } - } - - Ok(output) -} - -#[allow(clippy::too_many_arguments)] -fn query_channel_ends( - registry: &mut Registry, - chain: &Chain, - destination_chain: Option<&ChainId>, - channel_end: ChannelEnd, - connection_id: ConnectionId, - chain_id: ChainId, - port_id: PortId, - channel_id: ChannelId, - chain_height: Height, -) -> Result> { - let (connection_end, _) = chain.query_connection( - QueryConnectionRequest { - connection_id: connection_id.clone(), - height: chain_height, - }, - IncludeProof::No, - )?; - let client_id = connection_end.client_id().clone(); - let (client_state, _) = chain.query_client_state( - QueryClientStateRequest { - client_id, - height: chain_height, - }, - IncludeProof::No, - )?; - let counterparty_chain_id = client_state.chain_id(); - - if let Some(dst_chain_id) = destination_chain { - if dst_chain_id != &counterparty_chain_id { - return Err(format!( - "mismatch between supplied destination chain ({}) and counterparty chain ({})", - dst_chain_id, counterparty_chain_id - ) - .into()); - } - } - - let channel_counterparty = channel_end.counterparty().clone(); - let connection_counterparty = connection_end.counterparty().clone(); - let counterparty_client_id = connection_counterparty.client_id().clone(); - - let counterparty_connection_id = connection_counterparty.connection_id.ok_or_else(|| { - format!( - "connection end for {} on chain {} @ {:?} does not have counterparty connection id: {:?}", - connection_id, - chain_id, - chain_height, - connection_end - ) - })?; - - let counterparty_port_id = channel_counterparty.port_id().clone(); - - let counterparty_channel_id = channel_counterparty.channel_id.ok_or_else(|| { - format!( - "channel end for {}/{} on chain {} @ {:?} does not have counterparty channel id: {:?}", - port_id, channel_id, chain_id, chain_height, channel_end - ) - })?; - - let counterparty_chain = registry.get_or_spawn(&counterparty_chain_id)?; - let counterparty_chain_height = counterparty_chain.query_latest_height()?; - - let (counterparty_connection_end, _) = counterparty_chain.query_connection( - QueryConnectionRequest { - connection_id: counterparty_connection_id, - height: counterparty_chain_height, - }, - IncludeProof::No, - )?; - - let (counterparty_client_state, _) = counterparty_chain.query_client_state( - QueryClientStateRequest { - client_id: counterparty_client_id, - height: counterparty_chain_height, - }, - IncludeProof::No, - )?; - - let (counterparty_channel_end, _) = counterparty_chain.query_channel( - QueryChannelRequest { - port_id: counterparty_port_id, - channel_id: counterparty_channel_id, - height: counterparty_chain_height, - }, - IncludeProof::No, - )?; - - Ok(ChannelEnds { - channel_end, - connection_end, - client_state, - counterparty_channel_end, - counterparty_connection_end, - counterparty_client_state, - }) -} - -impl Runnable for QueryChannelsCmd { - fn run(&self) { - match run_query_channels::(self) { - Ok(output) => Output::success(output).exit(), - Err(e) => Output::error(format!("{}", e)).exit(), - } - } -} - -#[derive(Serialize)] -#[serde(untagged)] -enum QueryChannelsOutput { - Verbose(Vec), - Summary(Vec), -} - -impl QueryChannelsOutput { - fn verbose() -> Self { - Self::Verbose(Vec::new()) - } - - fn summary() -> Self { - Self::Summary(Vec::new()) - } - - fn push_verbose(&mut self, ce: ChannelEnds) { - assert!(matches!(self, Self::Verbose(_))); - - if let Self::Verbose(ref mut ces) = self { - ces.push(ce); - } else { - unreachable!(); - } - } - - fn push_summary(&mut self, pc: PortChannelId) { - assert!(matches!(self, Self::Summary(_))); - - if let Self::Summary(ref mut pcs) = self { - pcs.push(pc); - } else { - unreachable!(); - } - } -} - -impl Debug for QueryChannelsOutput { - fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> { - match self { - QueryChannelsOutput::Verbose(output) => write!(f, "{:#?}", output), - QueryChannelsOutput::Summary(output) => write!(f, "{:#?}", output), - } - } -} diff --git a/relayer-cli/src/commands/query/client.rs b/relayer-cli/src/commands/query/client.rs deleted file mode 100644 index 823cb95cf7..0000000000 --- a/relayer-cli/src/commands/query/client.rs +++ /dev/null @@ -1,252 +0,0 @@ -use abscissa_core::clap::Parser; -use abscissa_core::{Command, Runnable}; -use tracing::debug; - -use ibc_relayer::chain::handle::ChainHandle; -use ibc_relayer::chain::requests::{ - IncludeProof, PageRequest, QueryClientConnectionsRequest, QueryClientStateRequest, - QueryConsensusStateRequest, QueryConsensusStatesRequest, -}; - -use ibc::core::ics02_client::client_consensus::QueryClientEventRequest; -use ibc::core::ics02_client::client_state::ClientState; -use ibc::core::ics24_host::identifier::ChainId; -use ibc::core::ics24_host::identifier::ClientId; -use ibc::events::WithBlockDataType; -use ibc::query::QueryTxRequest; -use ibc::Height; - -use crate::application::app_config; -use crate::cli_utils::spawn_chain_runtime; -use crate::conclude::{exit_with_unrecoverable_error, Output}; - -/// Query client state command -#[derive(Clone, Command, Debug, Parser)] -pub struct QueryClientStateCmd { - #[clap(required = true, help = "identifier of the chain to query")] - chain_id: ChainId, - - #[clap(required = true, help = "identifier of the client to query")] - client_id: ClientId, - - #[clap(short = 'H', long, help = "the chain height context for the query")] - height: Option, -} - -/// Command for querying a client's state. -/// hermes query client state ibc-1 07-tendermint-0 --height 3 -impl Runnable for QueryClientStateCmd { - fn run(&self) { - let config = app_config(); - - let chain = spawn_chain_runtime(&config, &self.chain_id) - .unwrap_or_else(exit_with_unrecoverable_error); - - let height = ibc::Height::new(chain.id().version(), self.height.unwrap_or(0_u64)); - - match chain.query_client_state( - QueryClientStateRequest { - client_id: self.client_id.clone(), - height, - }, - IncludeProof::No, - ) { - Ok((cs, _)) => Output::success(cs).exit(), - Err(e) => Output::error(format!("{}", e)).exit(), - } - } -} - -/// Query client consensus command -#[derive(Clone, Command, Debug, Parser)] -pub struct QueryClientConsensusCmd { - #[clap(required = true, help = "identifier of the chain to query")] - chain_id: ChainId, - - #[clap(required = true, help = "identifier of the client to query")] - client_id: ClientId, - - #[clap( - short = 'c', - long, - help = "height of the client's consensus state to query" - )] - consensus_height: Option, - - #[clap(short = 's', long, help = "show only consensus heights")] - heights_only: bool, - - #[clap( - short = 'H', - long, - help = "the chain height context to be used, applicable only to a specific height" - )] - height: Option, -} - -/// Implementation of the query for a client's consensus state at a certain height. -/// hermes query client consensus ibc-0 07-tendermint-0 -c 22 -impl Runnable for QueryClientConsensusCmd { - fn run(&self) { - let config = app_config(); - - debug!("Options: {:?}", self); - - let chain = spawn_chain_runtime(&config, &self.chain_id) - .unwrap_or_else(exit_with_unrecoverable_error); - - let counterparty_chain = match chain.query_client_state( - QueryClientStateRequest { - client_id: self.client_id.clone(), - height: Height::zero(), - }, - IncludeProof::No, - ) { - Ok((cs, _)) => cs.chain_id(), - Err(e) => Output::error(format!( - "failed while querying client '{}' on chain '{}' with error: {}", - self.client_id, self.chain_id, e - )) - .exit(), - }; - - match self.consensus_height { - Some(cs_height) => { - let height = ibc::Height::new(chain.id().version(), self.height.unwrap_or(0_u64)); - let consensus_height = ibc::Height::new(counterparty_chain.version(), cs_height); - - let res = chain - .query_consensus_state( - QueryConsensusStateRequest { - client_id: self.client_id.clone(), - consensus_height, - query_height: height, - }, - IncludeProof::No, - ) - .map(|(consensus_state, _)| consensus_state); - - match res { - Ok(cs) => Output::success(cs).exit(), - Err(e) => Output::error(format!("{}", e)).exit(), - } - } - None => { - let res = chain.query_consensus_states(QueryConsensusStatesRequest { - client_id: self.client_id.clone(), - pagination: Some(PageRequest::all()), - }); - - match res { - Ok(states) => { - if self.heights_only { - let heights: Vec = states.iter().map(|cs| cs.height).collect(); - Output::success(heights).exit() - } else { - Output::success(states).exit() - } - } - Err(e) => Output::error(format!("{}", e)).exit(), - } - } - } - } -} - -#[derive(Clone, Command, Debug, Parser)] -pub struct QueryClientHeaderCmd { - #[clap(required = true, help = "identifier of the chain to query")] - chain_id: ChainId, - - #[clap(required = true, help = "identifier of the client to query")] - client_id: ClientId, - - #[clap(required = true, help = "height of header to query")] - consensus_height: u64, - - #[clap(short = 'H', long, help = "the chain height context for the query")] - height: Option, -} - -/// Implementation of the query for the header used in a client update at a certain height. -/// hermes query client header ibc-0 07-tendermint-0 22 -impl Runnable for QueryClientHeaderCmd { - fn run(&self) { - let config = app_config(); - - debug!("Options: {:?}", self); - - let chain = spawn_chain_runtime(&config, &self.chain_id) - .unwrap_or_else(exit_with_unrecoverable_error); - - let counterparty_chain = match chain.query_client_state( - QueryClientStateRequest { - client_id: self.client_id.clone(), - height: Height::zero(), - }, - IncludeProof::No, - ) { - Ok((cs, _)) => cs.chain_id(), - Err(e) => Output::error(format!( - "failed while querying client '{}' on chain '{}' with error: {}", - self.client_id, self.chain_id, e - )) - .exit(), - }; - - let consensus_height = - ibc::Height::new(counterparty_chain.version(), self.consensus_height); - - let height = ibc::Height::new(chain.id().version(), self.height.unwrap_or(0_u64)); - - let res = chain.query_txs(QueryTxRequest::Client(QueryClientEventRequest { - height, - event_id: WithBlockDataType::UpdateClient, - client_id: self.client_id.clone(), - consensus_height, - })); - - match res { - Ok(header) => Output::success(header).exit(), - Err(e) => Output::error(format!("{}", e)).exit(), - } - } -} - -/// Query client connections command -#[derive(Clone, Command, Debug, Parser)] -pub struct QueryClientConnectionsCmd { - #[clap(required = true, help = "identifier of the chain to query")] - chain_id: ChainId, - - #[clap(required = true, help = "identifier of the client to query")] - client_id: ClientId, - - #[clap( - short = 'H', - long, - help = "the chain height which this query should reflect" - )] - height: Option, -} - -// hermes query connections ibc-0 -impl Runnable for QueryClientConnectionsCmd { - fn run(&self) { - let config = app_config(); - - debug!("Options: {:?}", self); - - let chain = spawn_chain_runtime(&config, &self.chain_id) - .unwrap_or_else(exit_with_unrecoverable_error); - - let res = chain.query_client_connections(QueryClientConnectionsRequest { - client_id: self.client_id.clone(), - }); - - match res { - Ok(ce) => Output::success(ce).exit(), - Err(e) => Output::error(format!("{}", e)).exit(), - } - } -} diff --git a/relayer-cli/src/commands/query/clients.rs b/relayer-cli/src/commands/query/clients.rs deleted file mode 100644 index bee5f9263c..0000000000 --- a/relayer-cli/src/commands/query/clients.rs +++ /dev/null @@ -1,103 +0,0 @@ -use abscissa_core::clap::Parser; -use abscissa_core::{Command, Runnable}; -use ibc_relayer::chain::handle::ChainHandle; -use serde::Serialize; - -use ibc::core::ics02_client::client_state::ClientState; -use ibc::core::ics24_host::identifier::{ChainId, ClientId}; -use ibc_relayer::chain::requests::{PageRequest, QueryClientStatesRequest}; - -use crate::cli_utils::spawn_chain_runtime; -use crate::conclude::{exit_with_unrecoverable_error, Output}; -use crate::error::Error; -use crate::prelude::*; - -/// Query clients command -#[derive(Clone, Command, Debug, Parser)] -pub struct QueryAllClientsCmd { - #[clap(required = true, help = "identifier of the chain to query")] - chain_id: ChainId, - - #[clap( - short, - long, - help = "filter for clients which target a specific chain id (implies '-o')", - value_name = "ID" - )] - src_chain_id: Option, - - #[clap(short, long, help = "omit printing the source chain for each client")] - omit_chain_ids: bool, -} - -#[derive(Debug, Serialize)] -struct ClientChain { - client_id: ClientId, - chain_id: ChainId, -} - -/// Command for querying all clients. -/// hermes -c cfg.toml query clients ibc-1 -impl Runnable for QueryAllClientsCmd { - fn run(&self) { - let config = app_config(); - - debug!("Options: {:?}", self); - - let chain = spawn_chain_runtime(&config, &self.chain_id) - .unwrap_or_else(exit_with_unrecoverable_error); - - let res: Result<_, Error> = chain - .query_clients(QueryClientStatesRequest { - pagination: Some(PageRequest::all()), - }) - .map_err(Error::relayer); - - match res { - Ok(clients) => { - match self.src_chain_id.clone() { - None => { - match self.omit_chain_ids { - true => { - // Omit chain identifiers - debug!( - "printing identifiers of all clients hosted on chain {}", - self.chain_id - ); - let out: Vec = - clients.into_iter().map(|cs| cs.client_id).collect(); - Output::success(out).exit() - } - false => { - // Include chain identifiers - debug!("printing identifiers (and target chain identifiers) of all clients hosted on chain {}", self.chain_id); - let out: Vec = clients - .into_iter() - .map(|cs| ClientChain { - client_id: cs.client_id, - chain_id: cs.client_state.chain_id(), - }) - .collect(); - Output::success(out).exit() - } - }; - } - Some(source_chain_id) => { - debug!( - "printing identifiers of all clients hosted on chain {} which target chain {}", - self.chain_id, source_chain_id - ); - // Filter and omit chain ids - let out: Vec = clients - .into_iter() - .filter(|cs| cs.client_state.chain_id().eq(&source_chain_id)) - .map(|cs| cs.client_id) - .collect(); - Output::success(out).exit() - } - } - } - Err(e) => Output::error(format!("{}", e)).exit(), - } - } -} diff --git a/relayer-cli/src/commands/query/connection.rs b/relayer-cli/src/commands/query/connection.rs deleted file mode 100644 index 7925c4800b..0000000000 --- a/relayer-cli/src/commands/query/connection.rs +++ /dev/null @@ -1,108 +0,0 @@ -use abscissa_core::clap::Parser; -use abscissa_core::{Command, Runnable}; -use ibc_relayer::chain::handle::ChainHandle; -use ibc_relayer::chain::requests::{ - IncludeProof, PageRequest, QueryConnectionChannelsRequest, QueryConnectionRequest, -}; - -use ibc::core::{ - ics03_connection::connection::State, - ics24_host::identifier::ConnectionId, - ics24_host::identifier::{ChainId, PortChannelId}, -}; - -use crate::cli_utils::spawn_chain_runtime; -use crate::conclude::{exit_with_unrecoverable_error, Output}; -use crate::error::Error; -use crate::prelude::*; - -#[derive(Clone, Command, Debug, Parser)] -pub struct QueryConnectionEndCmd { - #[clap(required = true, help = "identifier of the chain to query")] - chain_id: ChainId, - - #[clap(required = true, help = "identifier of the connection to query")] - connection_id: ConnectionId, - - #[clap(short = 'H', long, help = "height of the state to query")] - height: Option, -} - -// cargo run --bin hermes -- query connection end ibc-test connectionidone --height 3 -impl Runnable for QueryConnectionEndCmd { - fn run(&self) { - let config = app_config(); - - debug!("Options: {:?}", self); - - let chain = spawn_chain_runtime(&config, &self.chain_id) - .unwrap_or_else(exit_with_unrecoverable_error); - - let height = ibc::Height::new(chain.id().version(), self.height.unwrap_or(0_u64)); - let res = chain.query_connection( - QueryConnectionRequest { - connection_id: self.connection_id.clone(), - height, - }, - IncludeProof::No, - ); - match res { - Ok((connection_end, _)) => { - if connection_end.state_matches(&State::Uninitialized) { - Output::error(format!( - "connection '{}' does not exist", - self.connection_id - )) - .exit() - } else { - Output::success(connection_end).exit() - } - } - Err(e) => Output::error(format!("{}", e)).exit(), - } - } -} - -/// Command for querying the channel identifiers associated with a connection. -/// Sample invocation: -/// `cargo run --bin hermes -- query connection channels ibc-0 connection-0` -#[derive(Clone, Command, Debug, Parser)] -pub struct QueryConnectionChannelsCmd { - #[clap(required = true, help = "identifier of the chain to query")] - chain_id: ChainId, - - #[clap(required = true, help = "identifier of the connection to query")] - connection_id: ConnectionId, -} - -impl Runnable for QueryConnectionChannelsCmd { - fn run(&self) { - let config = app_config(); - - debug!("Options: {:?}", self); - - let chain = spawn_chain_runtime(&config, &self.chain_id) - .unwrap_or_else(exit_with_unrecoverable_error); - - let res: Result<_, Error> = chain - .query_connection_channels(QueryConnectionChannelsRequest { - connection_id: self.connection_id.clone(), - pagination: Some(PageRequest::all()), - }) - .map_err(Error::relayer); - - match res { - Ok(channels) => { - let ids: Vec = channels - .into_iter() - .map(|identified_channel| PortChannelId { - port_id: identified_channel.port_id, - channel_id: identified_channel.channel_id, - }) - .collect(); - Output::success(ids).exit() - } - Err(e) => Output::error(format!("{}", e)).exit(), - } - } -} diff --git a/relayer-cli/src/commands/query/connections.rs b/relayer-cli/src/commands/query/connections.rs deleted file mode 100644 index 72f04f433a..0000000000 --- a/relayer-cli/src/commands/query/connections.rs +++ /dev/null @@ -1,44 +0,0 @@ -use abscissa_core::clap::Parser; -use abscissa_core::Runnable; - -use ibc::core::ics24_host::identifier::{ChainId, ConnectionId}; -use ibc_relayer::chain::handle::ChainHandle; -use ibc_relayer::chain::requests::{PageRequest, QueryConnectionsRequest}; - -use crate::cli_utils::spawn_chain_runtime; -use crate::conclude::{exit_with_unrecoverable_error, Output}; -use crate::prelude::*; - -#[derive(Clone, Command, Debug, Parser)] -pub struct QueryConnectionsCmd { - #[clap(required = true, help = "identifier of the chain to query")] - chain_id: ChainId, -} - -// hermes query connections ibc-0 -impl Runnable for QueryConnectionsCmd { - fn run(&self) { - let config = app_config(); - - debug!("Options: {:?}", self); - - let chain = spawn_chain_runtime(&config, &self.chain_id) - .unwrap_or_else(exit_with_unrecoverable_error); - - let res = chain.query_connections(QueryConnectionsRequest { - pagination: Some(PageRequest::all()), - }); - - match res { - Ok(connections) => { - let ids: Vec = connections - .into_iter() - .map(|identified_connection| identified_connection.connection_id) - .collect(); - - Output::success(ids).exit() - } - Err(e) => Output::error(format!("{}", e)).exit(), - } - } -} diff --git a/relayer-cli/src/commands/query/packet.rs b/relayer-cli/src/commands/query/packet.rs deleted file mode 100644 index 5ef8b97c42..0000000000 --- a/relayer-cli/src/commands/query/packet.rs +++ /dev/null @@ -1,34 +0,0 @@ -use abscissa_core::clap::Parser; -use abscissa_core::{Command, Runnable}; - -mod ack; -mod acks; -mod commitment; -mod commitments; -mod pending; -mod unreceived_acks; -mod unreceived_packets; - -#[derive(Command, Debug, Parser, Runnable)] -pub enum QueryPacketCmds { - /// Query packet commitments - Commitments(commitments::QueryPacketCommitmentsCmd), - - /// Query packet commitment - Commitment(commitment::QueryPacketCommitmentCmd), - - /// Query packet acknowledgments - Acks(acks::QueryPacketAcknowledgementsCmd), - - /// Query packet acknowledgment - Ack(ack::QueryPacketAcknowledgmentCmd), - - /// Query unreceived packets - UnreceivedPackets(unreceived_packets::QueryUnreceivedPacketsCmd), - - /// Query unreceived acknowledgments - UnreceivedAcks(unreceived_acks::QueryUnreceivedAcknowledgementCmd), - - /// Output a summary of pending packets in both directions - Pending(pending::QueryPendingPacketsCmd), -} diff --git a/relayer-cli/src/commands/query/packet/ack.rs b/relayer-cli/src/commands/query/packet/ack.rs deleted file mode 100644 index cfd45e3b3f..0000000000 --- a/relayer-cli/src/commands/query/packet/ack.rs +++ /dev/null @@ -1,68 +0,0 @@ -use abscissa_core::clap::Parser; -use abscissa_core::{Command, Runnable}; -use ibc_relayer::chain::requests::{IncludeProof, QueryPacketAcknowledgementRequest}; -use subtle_encoding::{Encoding, Hex}; - -use ibc::core::ics04_channel::packet::Sequence; -use ibc::core::ics24_host::identifier::{ChainId, ChannelId, PortId}; -use ibc::Height; -use ibc_relayer::chain::handle::ChainHandle; - -use crate::cli_utils::spawn_chain_runtime; -use crate::conclude::Output; -use crate::error::Error; -use crate::prelude::*; - -#[derive(Clone, Command, Debug, Parser)] -pub struct QueryPacketAcknowledgmentCmd { - #[clap(required = true, help = "identifier of the chain to query")] - chain_id: ChainId, - - #[clap(required = true, help = "identifier of the port to query")] - port_id: PortId, - - #[clap(required = true, help = "identifier of the channel to query")] - channel_id: ChannelId, - - #[clap(required = true, help = "sequence of packet to query")] - sequence: Sequence, - - #[clap(short = 'H', long, help = "height of the state to query")] - height: Option, -} - -impl QueryPacketAcknowledgmentCmd { - fn execute(&self) -> Result { - let config = app_config(); - - debug!("Options: {:?}", self); - - let chain = spawn_chain_runtime(&config, &self.chain_id)?; - - chain - .query_packet_acknowledgement( - QueryPacketAcknowledgementRequest { - port_id: self.port_id.clone(), - channel_id: self.channel_id, - sequence: self.sequence, - height: Height::new(chain.id().version(), self.height.unwrap_or(0_u64)), - }, - IncludeProof::No, - ) - .map_err(Error::relayer) - .map(|(bytes, _)| { - Hex::upper_case() - .encode_to_string(bytes.clone()) - .unwrap_or_else(|_| format!("{:?}", bytes)) - }) - } -} - -impl Runnable for QueryPacketAcknowledgmentCmd { - fn run(&self) { - match self.execute() { - Ok(hex) => Output::success(hex).exit(), - Err(e) => Output::error(format!("{}", e)).exit(), - } - } -} diff --git a/relayer-cli/src/commands/query/packet/acks.rs b/relayer-cli/src/commands/query/packet/acks.rs deleted file mode 100644 index d7179dc79d..0000000000 --- a/relayer-cli/src/commands/query/packet/acks.rs +++ /dev/null @@ -1,63 +0,0 @@ -use abscissa_core::clap::Parser; -use abscissa_core::{Command, Runnable}; -use ibc::core::ics04_channel::packet::Sequence; -use serde::Serialize; - -use ibc::core::ics24_host::identifier::{ChainId, ChannelId, PortId}; -use ibc::Height; -use ibc_relayer::chain::handle::BaseChainHandle; - -use crate::cli_utils::spawn_chain_counterparty; -use crate::conclude::Output; -use crate::error::Error; -use crate::prelude::*; -use ibc_relayer::chain::counterparty::acknowledgements_on_chain; - -#[derive(Serialize, Debug)] -struct PacketSeqs { - height: Height, - seqs: Vec, -} - -#[derive(Clone, Command, Debug, Parser)] -pub struct QueryPacketAcknowledgementsCmd { - #[clap(required = true, help = "identifier of the chain to query")] - chain_id: ChainId, - - #[clap(required = true, help = "identifier of the port to query")] - port_id: PortId, - - #[clap(required = true, help = "identifier of the channel to query")] - channel_id: ChannelId, -} - -impl QueryPacketAcknowledgementsCmd { - fn execute(&self) -> Result { - let config = app_config(); - - debug!("Options: {:?}", self); - - let (chains, chan_conn_cli) = spawn_chain_counterparty::( - &config, - &self.chain_id, - &self.port_id, - &self.channel_id, - )?; - - let (seqs, height) = - acknowledgements_on_chain(&chains.src, &chains.dst, &chan_conn_cli.channel) - .map_err(Error::supervisor)?; - - Ok(PacketSeqs { seqs, height }) - } -} - -// cargo run --bin hermes -- query packet acknowledgements ibc-0 transfer ibconexfer --height 3 -impl Runnable for QueryPacketAcknowledgementsCmd { - fn run(&self) { - match self.execute() { - Ok(ps) => Output::success(ps).exit(), - Err(e) => Output::error(format!("{}", e)).exit(), - } - } -} diff --git a/relayer-cli/src/commands/query/packet/commitment.rs b/relayer-cli/src/commands/query/packet/commitment.rs deleted file mode 100644 index 47c941df79..0000000000 --- a/relayer-cli/src/commands/query/packet/commitment.rs +++ /dev/null @@ -1,78 +0,0 @@ -use abscissa_core::clap::Parser; -use abscissa_core::{Command, Runnable}; -use ibc_relayer::chain::requests::{IncludeProof, QueryPacketCommitmentRequest}; -use serde::Serialize; -use subtle_encoding::{Encoding, Hex}; - -use ibc::core::ics04_channel::packet::Sequence; -use ibc::core::ics24_host::identifier::{ChainId, ChannelId, PortId}; -use ibc::Height; -use ibc_relayer::chain::handle::ChainHandle; - -use crate::cli_utils::spawn_chain_runtime; -use crate::conclude::Output; -use crate::error::Error; -use crate::prelude::*; - -#[derive(Serialize, Debug)] -struct PacketSeqs { - height: Height, - seqs: Vec, -} - -#[derive(Clone, Command, Debug, Parser)] -pub struct QueryPacketCommitmentCmd { - #[clap(required = true, help = "identifier of the chain to query")] - chain_id: ChainId, - - #[clap(required = true, help = "identifier of the port to query")] - port_id: PortId, - - #[clap(required = true, help = "identifier of the channel to query")] - channel_id: ChannelId, - - #[clap(required = true, help = "sequence of packet to query")] - sequence: Sequence, - - #[clap(short = 'H', long, help = "height of the state to query")] - height: Option, -} - -impl QueryPacketCommitmentCmd { - fn execute(&self) -> Result { - let config = app_config(); - - debug!("Options: {:?}", self); - - let chain = spawn_chain_runtime(&config, &self.chain_id)?; - - let (bytes, _) = chain - .query_packet_commitment( - QueryPacketCommitmentRequest { - port_id: self.port_id.clone(), - channel_id: self.channel_id, - sequence: self.sequence, - height: Height::new(chain.id().version(), self.height.unwrap_or(0_u64)), - }, - IncludeProof::No, - ) - .map_err(Error::relayer)?; - - if bytes.is_empty() { - Ok("None".to_owned()) - } else { - Ok(Hex::upper_case() - .encode_to_string(bytes.clone()) - .unwrap_or_else(|_| format!("{:?}", bytes))) - } - } -} - -impl Runnable for QueryPacketCommitmentCmd { - fn run(&self) { - match self.execute() { - Ok(hex) => Output::success(hex).exit(), - Err(e) => Output::error(format!("{}", e)).exit(), - } - } -} diff --git a/relayer-cli/src/commands/query/packet/commitments.rs b/relayer-cli/src/commands/query/packet/commitments.rs deleted file mode 100644 index 15dfcac521..0000000000 --- a/relayer-cli/src/commands/query/packet/commitments.rs +++ /dev/null @@ -1,58 +0,0 @@ -use abscissa_core::clap::Parser; -use abscissa_core::{Command, Runnable}; -use ibc::core::ics04_channel::packet::Sequence; -use serde::Serialize; - -use ibc::core::ics24_host::identifier::{ChainId, ChannelId, PortId}; -use ibc::Height; -use ibc_relayer::chain::counterparty::commitments_on_chain; - -use crate::cli_utils::spawn_chain_runtime; -use crate::conclude::Output; -use crate::error::Error; -use crate::prelude::*; - -#[derive(Serialize, Debug)] -struct PacketSeqs { - height: Height, - seqs: Vec, -} - -#[derive(Clone, Command, Debug, Parser)] -pub struct QueryPacketCommitmentsCmd { - #[clap(required = true, help = "identifier of the chain to query")] - chain_id: ChainId, - - #[clap(required = true, help = "identifier of the port to query")] - port_id: PortId, - - #[clap(required = true, help = "identifier of the channel to query")] - channel_id: ChannelId, -} - -impl QueryPacketCommitmentsCmd { - fn execute(&self) -> Result { - let config = app_config(); - - debug!("Options: {:?}", self); - - let chain = spawn_chain_runtime(&config, &self.chain_id)?; - - commitments_on_chain(&chain, &self.port_id, &self.channel_id) - .map_err(Error::supervisor) - .map(|(seqs_vec, height)| PacketSeqs { - height, - seqs: seqs_vec, - }) - } -} - -// cargo run --bin hermes -- query packet commitments ibc-0 transfer ibconexfer --height 3 -impl Runnable for QueryPacketCommitmentsCmd { - fn run(&self) { - match self.execute() { - Ok(p) => Output::success(p).exit(), - Err(e) => Output::error(format!("{}", e)).exit(), - } - } -} diff --git a/relayer-cli/src/commands/query/packet/pending.rs b/relayer-cli/src/commands/query/packet/pending.rs deleted file mode 100644 index e59a7a1986..0000000000 --- a/relayer-cli/src/commands/query/packet/pending.rs +++ /dev/null @@ -1,95 +0,0 @@ -use abscissa_core::clap::Parser; -use abscissa_core::{Command, Runnable}; -use serde::Serialize; - -use ibc::core::ics24_host::identifier::{ChainId, ChannelId, PortId}; -use ibc_relayer::chain::counterparty::{ - channel_on_destination, pending_packet_summary, PendingPackets, -}; -use ibc_relayer::chain::handle::BaseChainHandle; - -use crate::cli_utils::spawn_chain_counterparty; -use crate::conclude::Output; -use crate::error::Error; -use crate::prelude::*; - -/// A structure to display pending packet commitment sequence IDs -/// at both ends of a channel. -#[derive(Debug, Serialize)] -struct Summary { - /// The packets sent on the source chain as identified by the command. - src: PendingPackets, - /// The packets sent on the counterparty chain. - dst: PendingPackets, -} - -/// This command does the following: -/// -/// 1. queries the chain to get its counterparty chain, channel and port identifiers (needed in 2) -/// 2. queries both chains for all packet commitments/ sequences for the given port and channel -/// and its counterparty. -/// 3. queries both chains for the unreceived sequences and acks out of the lists obtained in 2. -#[derive(Clone, Command, Debug, Parser)] -pub struct QueryPendingPacketsCmd { - #[clap( - required = true, - help = "identifier of the chain at one end of the channel" - )] - chain_id: ChainId, - - #[clap( - required = true, - help = "port identifier on the chain given by " - )] - port_id: PortId, - - #[clap( - required = true, - help = "channel identifier on the chain given by " - )] - channel_id: ChannelId, -} - -impl QueryPendingPacketsCmd { - fn execute(&self) -> Result { - let config = app_config(); - - let (chains, chan_conn_cli) = spawn_chain_counterparty::( - &config, - &self.chain_id, - &self.port_id, - &self.channel_id, - )?; - - debug!( - chain=%self.chain_id, - "fetched channel from source chain: {:?}", - chan_conn_cli.channel - ); - - let src_summary = pending_packet_summary(&chains.src, &chains.dst, &chan_conn_cli.channel) - .map_err(Error::supervisor)?; - let counterparty_channel = channel_on_destination( - &chan_conn_cli.channel, - &chan_conn_cli.connection, - &chains.dst, - ) - .map_err(Error::supervisor)? - .ok_or_else(|| Error::missing_counterparty_channel_id(chan_conn_cli.channel))?; - let dst_summary = pending_packet_summary(&chains.dst, &chains.src, &counterparty_channel) - .map_err(Error::supervisor)?; - Ok(Summary { - src: src_summary, - dst: dst_summary, - }) - } -} - -impl Runnable for QueryPendingPacketsCmd { - fn run(&self) { - match self.execute() { - Ok(pending) => Output::success(pending).exit(), - Err(e) => Output::error(format!("{}", e)).exit(), - } - } -} diff --git a/relayer-cli/src/commands/query/packet/unreceived_acks.rs b/relayer-cli/src/commands/query/packet/unreceived_acks.rs deleted file mode 100644 index a27ee607b3..0000000000 --- a/relayer-cli/src/commands/query/packet/unreceived_acks.rs +++ /dev/null @@ -1,63 +0,0 @@ -use abscissa_core::clap::Parser; -use abscissa_core::{Command, Runnable}; - -use ibc::core::ics04_channel::packet::Sequence; -use ibc::core::ics24_host::identifier::{ChainId, ChannelId, PortId}; -use ibc_relayer::chain::counterparty::unreceived_acknowledgements; -use ibc_relayer::chain::handle::BaseChainHandle; - -use crate::cli_utils::spawn_chain_counterparty; -use crate::conclude::Output; -use crate::error::Error; -use crate::prelude::*; - -/// This command does the following: -/// 1. queries the chain to get its counterparty, channel and port identifiers (needed in 2) -/// 2. queries the chain for all packet commitments/ sequences for a given port and channel -/// 3. queries the counterparty chain for the unacknowledged sequences out of the list obtained in 2. -#[derive(Clone, Command, Debug, Parser)] -pub struct QueryUnreceivedAcknowledgementCmd { - #[clap( - required = true, - help = "identifier of the chain to query the unreceived acknowledgments" - )] - chain_id: ChainId, - - #[clap(required = true, help = "port identifier")] - port_id: PortId, - - #[clap(required = true, help = "channel identifier")] - channel_id: ChannelId, -} - -impl QueryUnreceivedAcknowledgementCmd { - fn execute(&self) -> Result, Error> { - let config = app_config(); - debug!("Options: {:?}", self); - - let (chains, chan_conn_cli) = spawn_chain_counterparty::( - &config, - &self.chain_id, - &self.port_id, - &self.channel_id, - )?; - - debug!( - "fetched from source chain {} the following channel {:?}", - self.chain_id, chan_conn_cli.channel, - ); - - unreceived_acknowledgements(&chains.src, &chains.dst, &(&chan_conn_cli.channel).into()) - .map(|(sns, _)| sns) - .map_err(Error::supervisor) - } -} - -impl Runnable for QueryUnreceivedAcknowledgementCmd { - fn run(&self) { - match self.execute() { - Ok(seqs) => Output::success(seqs).exit(), - Err(e) => Output::error(format!("{}", e)).exit(), - } - } -} diff --git a/relayer-cli/src/commands/query/packet/unreceived_packets.rs b/relayer-cli/src/commands/query/packet/unreceived_packets.rs deleted file mode 100644 index fe027c50d7..0000000000 --- a/relayer-cli/src/commands/query/packet/unreceived_packets.rs +++ /dev/null @@ -1,63 +0,0 @@ -use abscissa_core::clap::Parser; -use abscissa_core::{Command, Runnable}; - -use ibc::core::ics04_channel::packet::Sequence; -use ibc::core::ics24_host::identifier::{ChainId, ChannelId, PortId}; -use ibc_relayer::chain::counterparty::unreceived_packets; -use ibc_relayer::chain::handle::BaseChainHandle; - -use crate::cli_utils::spawn_chain_counterparty; -use crate::conclude::Output; -use crate::error::Error; -use crate::prelude::*; - -/// This command does the following: -/// 1. queries the chain to get its counterparty chain, channel and port identifiers (needed in 2) -/// 2. queries the counterparty chain for all packet commitments/ sequences for a given port and channel -/// 3. queries the chain for the unreceived sequences out of the list obtained in 2. -#[derive(Clone, Command, Debug, Parser)] -pub struct QueryUnreceivedPacketsCmd { - #[clap( - required = true, - help = "identifier of the chain for the unreceived sequences" - )] - chain_id: ChainId, - - #[clap(required = true, help = "port identifier")] - port_id: PortId, - - #[clap(required = true, help = "channel identifier")] - channel_id: ChannelId, -} - -impl QueryUnreceivedPacketsCmd { - fn execute(&self) -> Result, Error> { - let config = app_config(); - debug!("Options: {:?}", self); - - let (chains, chan_conn_cli) = spawn_chain_counterparty::( - &config, - &self.chain_id, - &self.port_id, - &self.channel_id, - )?; - - debug!( - "fetched from source chain {} the following channel {:?}", - self.chain_id, chan_conn_cli.channel - ); - - unreceived_packets(&chains.src, &chains.dst, &(&chan_conn_cli.channel).into()) - .map_err(Error::supervisor) - .map(|(seq, _)| seq) - } -} - -impl Runnable for QueryUnreceivedPacketsCmd { - fn run(&self) { - match self.execute() { - Ok(seqs) => Output::success(seqs).exit(), - Err(e) => Output::error(format!("{}", e)).exit(), - } - } -} diff --git a/relayer-cli/src/commands/query/tx.rs b/relayer-cli/src/commands/query/tx.rs deleted file mode 100644 index 0a7849ed0e..0000000000 --- a/relayer-cli/src/commands/query/tx.rs +++ /dev/null @@ -1,13 +0,0 @@ -//! `query tx` subcommand - -use abscissa_core::clap::Parser; -use abscissa_core::{Command, Runnable}; - -mod events; - -/// `query tx` subcommand -#[derive(Command, Debug, Parser, Runnable)] -pub enum QueryTxCmd { - /// Query the events emitted by transaction - Events(events::QueryTxEventsCmd), -} diff --git a/relayer-cli/src/commands/query/tx/events.rs b/relayer-cli/src/commands/query/tx/events.rs deleted file mode 100644 index ed22017250..0000000000 --- a/relayer-cli/src/commands/query/tx/events.rs +++ /dev/null @@ -1,52 +0,0 @@ -use core::str::FromStr; - -use abscissa_core::clap::Parser; -use abscissa_core::{Command, Runnable}; -use tracing::debug; - -use tendermint::abci::transaction::Hash; - -use ibc::core::ics24_host::identifier::ChainId; -use ibc::query::{QueryTxHash, QueryTxRequest}; - -use ibc_relayer::chain::handle::ChainHandle; - -use crate::cli_utils::spawn_chain_runtime; -use crate::conclude::{exit_with_unrecoverable_error, Output}; -use crate::error::Error; -use crate::prelude::app_config; - -/// Query the events emitted by transaction -#[derive(Clone, Command, Debug, Parser)] -pub struct QueryTxEventsCmd { - #[clap(required = true, help = "identifier of the chain to query")] - chain_id: ChainId, - - #[clap(required = true, help = "transaction hash to query")] - hash: String, -} - -// cargo run --bin hermes -- query tx events ibc-0 B8E78AD83810239E21863AC7B5FC4F99396ABB39EB534F721EEF43A4979C2821 -impl Runnable for QueryTxEventsCmd { - fn run(&self) { - let config = app_config(); - - debug!("Options: {:?}", self); - - let chain = spawn_chain_runtime(&config, &self.chain_id) - .unwrap_or_else(exit_with_unrecoverable_error); - - let res = Hash::from_str(self.hash.as_str()) - .map_err(|e| Error::invalid_hash(self.hash.clone(), e)) - .and_then(|h| { - chain - .query_txs(QueryTxRequest::Transaction(QueryTxHash(h))) - .map_err(Error::relayer) - }); - - match res { - Ok(res) => Output::success(res).exit(), - Err(e) => Output::error(format!("{}", e)).exit(), - } - } -} diff --git a/relayer-cli/src/commands/start.rs b/relayer-cli/src/commands/start.rs deleted file mode 100644 index 2ff33a4945..0000000000 --- a/relayer-cli/src/commands/start.rs +++ /dev/null @@ -1,190 +0,0 @@ -use ibc_relayer::supervisor::SupervisorOptions; -use std::error::Error; -use std::io; - -use abscissa_core::clap::Parser; -use abscissa_core::{Command, Runnable}; -use crossbeam_channel::Sender; - -use ibc_relayer::chain::handle::{CachingChainHandle, ChainHandle}; -use ibc_relayer::config::Config; -use ibc_relayer::registry::SharedRegistry; -use ibc_relayer::rest; -use ibc_relayer::supervisor::{cmd::SupervisorCmd, spawn_supervisor, SupervisorHandle}; - -use crate::conclude::json; -use crate::conclude::Output; -use crate::prelude::*; - -#[derive(Clone, Command, Debug, Parser)] -pub struct StartCmd { - #[clap( - short = 'f', - long = "full-scan", - help = "Force a full scan of the chains for clients, connections and channels" - )] - full_scan: bool, -} - -impl Runnable for StartCmd { - fn run(&self) { - let config = (*app_config()).clone(); - - let supervisor_handle = make_supervisor::(config, self.full_scan) - .unwrap_or_else(|e| { - Output::error(format!("Hermes failed to start, last error: {}", e)).exit() - }); - - match crate::config::config_path() { - Some(_) => { - register_signals(supervisor_handle.sender.clone()).unwrap_or_else(|e| { - warn!("failed to install signal handler: {}", e); - }); - } - None => { - warn!("cannot figure out configuration path, skipping registration of signal handlers"); - } - }; - - info!("Hermes has started"); - - supervisor_handle.wait(); - } -} - -/// Register the SIGHUP and SIGUSR1 signals, and notify the supervisor. -/// - [DEPRECATED] SIGHUP: Trigger a reload of the configuration. -/// - SIGUSR1: Ask the supervisor to dump its state and print it to the console. -fn register_signals(tx_cmd: Sender) -> Result<(), io::Error> { - use signal_hook::{consts::signal::*, iterator::Signals}; - - let sigs = vec![ - SIGHUP, // Reload of configuration (disabled) - SIGUSR1, // Dump state - ]; - - let mut signals = Signals::new(&sigs)?; - - std::thread::spawn(move || { - for signal in &mut signals { - match signal { - SIGHUP => warn!( - "configuration reloading via SIGHUP has been disabled, \ - the signal handler will be removed in the future" - ), - SIGUSR1 => { - info!("dumping state (triggered by SIGUSR1)"); - - let (tx, rx) = crossbeam_channel::bounded(1); - tx_cmd.try_send(SupervisorCmd::DumpState(tx)).unwrap(); - - std::thread::spawn(move || { - if let Ok(state) = rx.recv() { - if json() { - match serde_json::to_string(&state) { - Ok(out) => println!("{}", out), - Err(e) => { - error!("failed to serialize relayer state to JSON: {}", e) - } - } - } else { - state.print_info(); - } - } - }); - } - - _ => (), - } - } - }); - - Ok(()) -} - -#[cfg(feature = "rest-server")] -fn spawn_rest_server(config: &Config) -> Option { - let rest = config.rest.clone(); - - if rest.enabled { - let rest_config = ibc_relayer_rest::Config::new(rest.host, rest.port); - let (_, rest_receiver) = ibc_relayer_rest::server::spawn(rest_config); - Some(rest_receiver) - } else { - info!("[rest] address not configured, REST server disabled"); - None - } -} - -#[cfg(not(feature = "rest-server"))] -fn spawn_rest_server(config: &Config) -> Option { - let rest = config.rest.clone(); - - if rest.enabled { - warn!( - "REST server enabled in the config but Hermes was built without REST support, \ - build Hermes with --features=rest-server to enable REST support." - ); - - None - } else { - None - } -} - -#[cfg(feature = "telemetry")] -fn spawn_telemetry_server(config: &Config) -> Result<(), Box> { - let state = ibc_telemetry::global(); - - let telemetry = config.telemetry.clone(); - if telemetry.enabled { - match ibc_telemetry::spawn((telemetry.host, telemetry.port), state.clone()) { - Ok((addr, _)) => { - info!( - "telemetry service running, exposing metrics at http://{}/metrics", - addr - ); - } - Err(e) => { - error!("telemetry service failed to start: {}", e); - return Err(e); - } - } - } - - Ok(()) -} - -#[cfg(not(feature = "telemetry"))] -fn spawn_telemetry_server( - config: &Arc>, -) -> Result<(), Box> { - if config.read().expect("poisoned lock").telemetry.enabled { - warn!( - "telemetry enabled in the config but Hermes was built without telemetry support, \ - build Hermes with --features=telemetry to enable telemetry support." - ); - } - - Ok(()) -} - -fn make_supervisor( - config: Config, - force_full_scan: bool, -) -> Result> { - let registry = SharedRegistry::::new(config.clone()); - spawn_telemetry_server(&config)?; - - let rest = spawn_rest_server(&config); - - Ok(spawn_supervisor( - config, - registry, - rest, - SupervisorOptions { - health_check: true, - force_full_scan, - }, - )?) -} diff --git a/relayer-cli/src/commands/tx.rs b/relayer-cli/src/commands/tx.rs deleted file mode 100644 index 64c3ee31b4..0000000000 --- a/relayer-cli/src/commands/tx.rs +++ /dev/null @@ -1,98 +0,0 @@ -//! `tx` subcommand -use abscissa_core::clap::Parser; -use abscissa_core::{config::Override, Command, Runnable}; -use ibc_relayer::config::Config; - -use crate::commands::tx::client::{ - TxCreateClientCmd, TxUpdateClientCmd, TxUpgradeClientCmd, TxUpgradeClientsCmd, -}; - -mod channel; -pub(crate) mod client; -mod connection; -mod packet; -mod transfer; -mod upgrade; - -/// `tx` subcommand -#[allow(clippy::large_enum_variant)] -#[derive(Command, Debug, Parser, Runnable)] -pub enum TxCmd { - /// Raw commands for sending transactions to a configured chain. - #[clap(subcommand)] - Raw(TxRawCommands), -} - -#[derive(Command, Debug, Parser, Runnable)] -pub enum TxRawCommands { - /// Create a client for source chain on destination chain - CreateClient(TxCreateClientCmd), - - /// Update the specified client on destination chain - UpdateClient(TxUpdateClientCmd), - - /// Upgrade the specified client on destination chain - UpgradeClient(TxUpgradeClientCmd), - - /// Upgrade all IBC clients that target a specific chain - UpgradeClients(TxUpgradeClientsCmd), - - /// Initialize a connection (ConnectionOpenInit) - ConnInit(connection::TxRawConnInitCmd), - - /// Relay the connection attempt (ConnectionOpenTry) - ConnTry(connection::TxRawConnTryCmd), - - /// Relay acknowledgment of a connection attempt (ConnectionOpenAck) - ConnAck(connection::TxRawConnAckCmd), - - /// Confirm opening of a connection (ConnectionOpenConfirm) - ConnConfirm(connection::TxRawConnConfirmCmd), - - /// Initialize a channel (ChannelOpenInit) - ChanOpenInit(channel::TxRawChanOpenInitCmd), - - /// Relay the channel attempt (ChannelOpenTry) - ChanOpenTry(channel::TxRawChanOpenTryCmd), - - /// Relay acknowledgment of a channel attempt (ChannelOpenAck) - ChanOpenAck(channel::TxRawChanOpenAckCmd), - - /// Confirm opening of a channel (ChannelOpenConfirm) - ChanOpenConfirm(channel::TxRawChanOpenConfirmCmd), - - /// Initiate the closing of a channel (ChannelCloseInit) - ChanCloseInit(channel::TxRawChanCloseInitCmd), - - /// Confirm the closing of a channel (ChannelCloseConfirm) - ChanCloseConfirm(channel::TxRawChanCloseConfirmCmd), - - /// Send a fungible token transfer test transaction (ICS20 MsgTransfer) - FtTransfer(transfer::TxIcs20MsgTransferCmd), - - /// Relay receive or timeout packets - PacketRecv(packet::TxRawPacketRecvCmd), - - /// Relay acknowledgment packets - PacketAck(packet::TxRawPacketAckCmd), - - /// Send an IBC upgrade plan - UpgradeChain(upgrade::TxIbcUpgradeChainCmd), -} - -impl Override for TxCmd { - fn override_config(&self, config: Config) -> Result { - match self { - Self::Raw(cmd) => cmd.override_config(config), - } - } -} - -impl Override for TxRawCommands { - fn override_config(&self, config: Config) -> Result { - match self { - Self::FtTransfer(cmd) => cmd.override_config(config), - _ => Ok(config), - } - } -} diff --git a/relayer-cli/src/commands/tx/channel.rs b/relayer-cli/src/commands/tx/channel.rs deleted file mode 100644 index d0c2187b34..0000000000 --- a/relayer-cli/src/commands/tx/channel.rs +++ /dev/null @@ -1,471 +0,0 @@ -use abscissa_core::clap::Parser; -use abscissa_core::{Command, Runnable}; - -use ibc::core::ics03_connection::connection::ConnectionEnd; -use ibc::core::ics04_channel::channel::Order; -use ibc::core::ics24_host::identifier::{ChainId, ChannelId, ClientId, ConnectionId, PortId}; -use ibc::events::IbcEvent; -use ibc::Height; -use ibc_relayer::chain::handle::ChainHandle; -use ibc_relayer::chain::requests::{IncludeProof, QueryConnectionRequest}; -use ibc_relayer::channel::{Channel, ChannelSide}; - -use crate::cli_utils::ChainHandlePair; -use crate::conclude::Output; -use crate::error::Error; -use crate::prelude::*; - -macro_rules! tx_chan_cmd { - ($dbg_string:literal, $func:ident, $self:expr, $chan:expr) => { - let config = app_config(); - - let chains = match ChainHandlePair::spawn(&config, &$self.src_chain_id, &$self.dst_chain_id) - { - Ok(chains) => chains, - Err(e) => Output::error(format!("{}", e)).exit(), - }; - - // Retrieve the connection - let dst_connection = match chains.dst.query_connection( - QueryConnectionRequest { - connection_id: $self.dst_conn_id.clone(), - height: Height::default(), - }, - IncludeProof::No, - ) { - Ok((connection, _)) => connection, - Err(e) => Output::error(format!("{}", e)).exit(), - }; - - let channel = $chan(chains, dst_connection); - - info!("Message {}: {:?}", $dbg_string, channel); - - let res: Result = channel.$func().map_err(Error::channel); - - match res { - Ok(receipt) => Output::success(receipt).exit(), - Err(e) => Output::error(format!("{}", e)).exit(), - } - }; -} - -#[derive(Clone, Command, Debug, Parser)] -pub struct TxRawChanOpenInitCmd { - #[clap(required = true, help = "identifier of the destination chain")] - dst_chain_id: ChainId, - - #[clap(required = true, help = "identifier of the source chain")] - src_chain_id: ChainId, - - #[clap(required = true, help = "identifier of the destination connection")] - dst_conn_id: ConnectionId, - - #[clap(required = true, help = "identifier of the destination port")] - dst_port_id: PortId, - - #[clap(required = true, help = "identifier of the source port")] - src_port_id: PortId, - - #[clap( - short, - long, - default_value_t, - help = "the channel ordering, valid options 'unordered' (default) and 'ordered'" - )] - order: Order, -} - -impl Runnable for TxRawChanOpenInitCmd { - fn run(&self) { - let config = app_config(); - - let chains = match ChainHandlePair::spawn(&config, &self.src_chain_id, &self.dst_chain_id) { - Ok(chains) => chains, - Err(e) => Output::error(format!("{}", e)).exit(), - }; - - // Retrieve the connection - let dst_connection = match chains.dst.query_connection( - QueryConnectionRequest { - connection_id: self.dst_conn_id.clone(), - height: Height::default(), - }, - IncludeProof::No, - ) { - Ok((connection, _)) => connection, - Err(e) => Output::error(format!("{}", e)).exit(), - }; - - let channel = Channel { - connection_delay: Default::default(), - ordering: self.order, - a_side: ChannelSide::new( - chains.src, - ClientId::default(), - ConnectionId::default(), - self.src_port_id.clone(), - None, - None, - ), - b_side: ChannelSide::new( - chains.dst, - dst_connection.client_id().clone(), - self.dst_conn_id.clone(), - self.dst_port_id.clone(), - None, - None, - ), - }; - - info!("Message ChanOpenInit: {:?}", channel); - - let res: Result = channel - .build_chan_open_init_and_send() - .map_err(Error::channel); - - match res { - Ok(receipt) => Output::success(receipt).exit(), - Err(e) => Output::error(format!("{}", e)).exit(), - } - } -} - -#[derive(Clone, Command, Debug, Parser)] -pub struct TxRawChanOpenTryCmd { - #[clap(required = true, help = "identifier of the destination chain")] - dst_chain_id: ChainId, - - #[clap(required = true, help = "identifier of the source chain")] - src_chain_id: ChainId, - - #[clap(required = true, help = "identifier of the destination connection")] - dst_conn_id: ConnectionId, - - #[clap(required = true, help = "identifier of the destination port")] - dst_port_id: PortId, - - #[clap(required = true, help = "identifier of the source port")] - src_port_id: PortId, - - #[clap( - short = 's', - long, - required = true, - help = "identifier of the source channel (required)", - value_name = "ID" - )] - src_chan_id: ChannelId, - - #[clap( - short = 'd', - long, - help = "identifier of the destination channel (optional)", - value_name = "ID" - )] - dst_chan_id: Option, -} - -impl Runnable for TxRawChanOpenTryCmd { - fn run(&self) { - tx_chan_cmd!( - "ChanOpenTry", - build_chan_open_try_and_send, - self, - |chains: ChainHandlePair, dst_connection: ConnectionEnd| { - Channel { - connection_delay: Default::default(), - ordering: Order::default(), - a_side: ChannelSide::new( - chains.src, - ClientId::default(), - ConnectionId::default(), - self.src_port_id.clone(), - Some(self.src_chan_id), - None, - ), - b_side: ChannelSide::new( - chains.dst, - dst_connection.client_id().clone(), - self.dst_conn_id.clone(), - self.dst_port_id.clone(), - self.dst_chan_id, - None, - ), - } - } - ); - } -} - -#[derive(Clone, Command, Debug, Parser)] -pub struct TxRawChanOpenAckCmd { - #[clap(required = true, help = "identifier of the destination chain")] - dst_chain_id: ChainId, - - #[clap(required = true, help = "identifier of the source chain")] - src_chain_id: ChainId, - - #[clap(required = true, help = "identifier of the destination connection")] - dst_conn_id: ConnectionId, - - #[clap(required = true, help = "identifier of the destination port")] - dst_port_id: PortId, - - #[clap(required = true, help = "identifier of the source port")] - src_port_id: PortId, - - #[clap( - short = 'd', - long, - required = true, - help = "identifier of the destination channel (required)", - value_name = "ID" - )] - dst_chan_id: ChannelId, - - #[clap( - short = 's', - long, - required = true, - help = "identifier of the source channel (required)", - value_name = "ID" - )] - src_chan_id: ChannelId, -} - -impl Runnable for TxRawChanOpenAckCmd { - fn run(&self) { - tx_chan_cmd!( - "ChanOpenAck", - build_chan_open_ack_and_send, - self, - |chains: ChainHandlePair, dst_connection: ConnectionEnd| { - Channel { - connection_delay: Default::default(), - ordering: Order::default(), - a_side: ChannelSide::new( - chains.src, - ClientId::default(), - ConnectionId::default(), - self.src_port_id.clone(), - Some(self.src_chan_id), - None, - ), - b_side: ChannelSide::new( - chains.dst, - dst_connection.client_id().clone(), - self.dst_conn_id.clone(), - self.dst_port_id.clone(), - Some(self.dst_chan_id), - None, - ), - } - } - ); - } -} - -#[derive(Clone, Command, Debug, Parser)] -pub struct TxRawChanOpenConfirmCmd { - #[clap(required = true, help = "identifier of the destination chain")] - dst_chain_id: ChainId, - - #[clap(required = true, help = "identifier of the source chain")] - src_chain_id: ChainId, - - #[clap(required = true, help = "identifier of the destination connection")] - dst_conn_id: ConnectionId, - - #[clap(required = true, help = "identifier of the destination port")] - dst_port_id: PortId, - - #[clap(required = true, help = "identifier of the source port")] - src_port_id: PortId, - - #[clap( - short = 'd', - long, - required = true, - help = "identifier of the destination channel (required)", - value_name = "ID" - )] - dst_chan_id: ChannelId, - - #[clap( - short = 's', - long, - required = true, - help = "identifier of the source channel (required)", - value_name = "ID" - )] - src_chan_id: ChannelId, -} - -impl Runnable for TxRawChanOpenConfirmCmd { - fn run(&self) { - tx_chan_cmd!( - "ChanOpenConfirm", - build_chan_open_confirm_and_send, - self, - |chains: ChainHandlePair, dst_connection: ConnectionEnd| { - Channel { - connection_delay: Default::default(), - ordering: Order::default(), - a_side: ChannelSide::new( - chains.src, - ClientId::default(), - ConnectionId::default(), - self.src_port_id.clone(), - Some(self.src_chan_id), - None, - ), - b_side: ChannelSide::new( - chains.dst, - dst_connection.client_id().clone(), - self.dst_conn_id.clone(), - self.dst_port_id.clone(), - Some(self.dst_chan_id), - None, - ), - } - } - ); - } -} - -#[derive(Clone, Command, Debug, Parser)] -pub struct TxRawChanCloseInitCmd { - #[clap(required = true, help = "identifier of the destination chain")] - dst_chain_id: ChainId, - - #[clap(required = true, help = "identifier of the source chain")] - src_chain_id: ChainId, - - #[clap(required = true, help = "identifier of the destination connection")] - dst_conn_id: ConnectionId, - - #[clap(required = true, help = "identifier of the destination port")] - dst_port_id: PortId, - - #[clap(required = true, help = "identifier of the source port")] - src_port_id: PortId, - - #[clap( - short = 'd', - long, - required = true, - help = "identifier of the destination channel (required)", - value_name = "ID" - )] - dst_chan_id: ChannelId, - - #[clap( - short = 's', - long, - required = true, - help = "identifier of the source channel (required)", - value_name = "ID" - )] - src_chan_id: ChannelId, -} - -impl Runnable for TxRawChanCloseInitCmd { - fn run(&self) { - tx_chan_cmd!( - "ChanCloseInit", - build_chan_close_init_and_send, - self, - |chains: ChainHandlePair, dst_connection: ConnectionEnd| { - Channel { - connection_delay: Default::default(), - ordering: Order::default(), - a_side: ChannelSide::new( - chains.src, - ClientId::default(), - ConnectionId::default(), - self.src_port_id.clone(), - Some(self.src_chan_id), - None, - ), - b_side: ChannelSide::new( - chains.dst, - dst_connection.client_id().clone(), - self.dst_conn_id.clone(), - self.dst_port_id.clone(), - Some(self.dst_chan_id), - None, - ), - } - } - ); - } -} - -#[derive(Clone, Command, Debug, Parser)] -pub struct TxRawChanCloseConfirmCmd { - #[clap(required = true, help = "identifier of the destination chain")] - dst_chain_id: ChainId, - - #[clap(required = true, help = "identifier of the source chain")] - src_chain_id: ChainId, - - #[clap(required = true, help = "identifier of the destination connection")] - dst_conn_id: ConnectionId, - - #[clap(required = true, help = "identifier of the destination port")] - dst_port_id: PortId, - - #[clap(required = true, help = "identifier of the source port")] - src_port_id: PortId, - - #[clap( - short = 'd', - long, - required = true, - help = "identifier of the destination channel (required)", - value_name = "ID" - )] - dst_chan_id: ChannelId, - - #[clap( - short = 's', - long, - required = true, - help = "identifier of the source channel (required)", - value_name = "ID" - )] - src_chan_id: ChannelId, -} - -impl Runnable for TxRawChanCloseConfirmCmd { - fn run(&self) { - tx_chan_cmd!( - "ChanCloseConfirm", - build_chan_close_confirm_and_send, - self, - |chains: ChainHandlePair, dst_connection: ConnectionEnd| { - Channel { - connection_delay: Default::default(), - ordering: Order::default(), - a_side: ChannelSide::new( - chains.src, - ClientId::default(), - ConnectionId::default(), - self.src_port_id.clone(), - Some(self.src_chan_id), - None, - ), - b_side: ChannelSide::new( - chains.dst, - dst_connection.client_id().clone(), - self.dst_conn_id.clone(), - self.dst_port_id.clone(), - Some(self.dst_chan_id), - None, - ), - } - } - ); - } -} diff --git a/relayer-cli/src/commands/tx/client.rs b/relayer-cli/src/commands/tx/client.rs deleted file mode 100644 index e791ec21c7..0000000000 --- a/relayer-cli/src/commands/tx/client.rs +++ /dev/null @@ -1,394 +0,0 @@ -use core::fmt; - -use abscissa_core::clap::Parser; -use abscissa_core::{Command, Runnable}; - -use ibc::core::ics02_client::client_state::ClientState; -use ibc::core::ics24_host::identifier::{ChainId, ClientId}; -use ibc::events::IbcEvent; -use ibc_relayer::chain::handle::ChainHandle; -use ibc_relayer::chain::requests::{ - IncludeProof, PageRequest, QueryClientStateRequest, QueryClientStatesRequest, -}; -use ibc_relayer::config::Config; -use ibc_relayer::foreign_client::{CreateOptions, ForeignClient}; -use tendermint_light_client_verifier::types::TrustThreshold; - -use crate::application::app_config; -use crate::cli_utils::{spawn_chain_runtime, spawn_chain_runtime_generic, ChainHandlePair}; -use crate::conclude::{exit_with_unrecoverable_error, Output}; -use crate::error::Error; - -#[derive(Clone, Command, Debug, Parser)] -pub struct TxCreateClientCmd { - #[clap(required = true, help = "identifier of the destination chain")] - dst_chain_id: ChainId, - - #[clap(required = true, help = "identifier of the source chain")] - src_chain_id: ChainId, - - /// The maximum allowed clock drift for this client. - /// - /// The clock drift is a correction parameter. It helps deal with clocks - /// that are only approximately synchronized between the source and destination chains - /// of this client. - /// The destination chain for this client uses the clock drift parameter when deciding - /// to accept or reject a new header (originating from the source chain) for this client. - /// If this option is not specified, a suitable clock drift value is derived from the chain - /// configurations. - #[clap(short = 'd', long)] - clock_drift: Option, - - /// Override the trusting period specified in the config. - /// - /// The trusting period specifies how long a validator set is trusted for - /// (must be shorter than the chain's unbonding period). - #[clap(short = 'p', long)] - trusting_period: Option, - - /// Override the trust threshold specified in the configuration. - /// - /// The trust threshold defines what fraction of the total voting power of a known - /// and trusted validator set is sufficient for a commit to be accepted going forward. - #[clap(short = 't', long, parse(try_from_str = parse_trust_threshold))] - trust_threshold: Option, -} - -/// Sample to run this tx: -/// `hermes tx raw create-client ibc-0 ibc-1` -impl Runnable for TxCreateClientCmd { - fn run(&self) { - let config = app_config(); - - if self.src_chain_id == self.dst_chain_id { - Output::error("source and destination chains must be different".to_string()).exit() - } - - let chains = match ChainHandlePair::spawn(&config, &self.src_chain_id, &self.dst_chain_id) { - Ok(chains) => chains, - Err(e) => Output::error(format!("{}", e)).exit(), - }; - - let client = ForeignClient::restore(ClientId::default(), chains.dst, chains.src); - - let options = CreateOptions { - max_clock_drift: self.clock_drift.map(Into::into), - trusting_period: self.trusting_period.map(Into::into), - trust_threshold: self.trust_threshold.map(Into::into), - }; - - // Trigger client creation via the "build" interface, so that we obtain the resulting event - let res: Result = client - .build_create_client_and_send(options) - .map_err(Error::foreign_client); - - match res { - Ok(receipt) => Output::success(receipt).exit(), - Err(e) => Output::error(format!("{}", e)).exit(), - } - } -} - -#[derive(Clone, Command, Debug, Parser)] -pub struct TxUpdateClientCmd { - #[clap(required = true, help = "identifier of the destination chain")] - dst_chain_id: ChainId, - - #[clap( - required = true, - help = "identifier of the client to be updated on destination chain" - )] - dst_client_id: ClientId, - - #[clap(short = 'H', long, help = "the target height of the client update")] - target_height: Option, - - #[clap(short = 't', long, help = "the trusted height of the client update")] - trusted_height: Option, -} - -impl Runnable for TxUpdateClientCmd { - fn run(&self) { - let config = app_config(); - - let dst_chain = match spawn_chain_runtime(&config, &self.dst_chain_id) { - Ok(handle) => handle, - Err(e) => Output::error(format!("{}", e)).exit(), - }; - - let src_chain_id = match dst_chain.query_client_state( - QueryClientStateRequest { - client_id: self.dst_client_id.clone(), - height: ibc::Height::zero(), - }, - IncludeProof::No, - ) { - Ok((cs, _)) => cs.chain_id(), - Err(e) => { - Output::error(format!( - "Query of client '{}' on chain '{}' failed with error: {}", - self.dst_client_id, self.dst_chain_id, e - )) - .exit(); - } - }; - - let src_chain = match spawn_chain_runtime(&config, &src_chain_id) { - Ok(handle) => handle, - Err(e) => Output::error(format!("{}", e)).exit(), - }; - - let height = match self.target_height { - Some(height) => ibc::Height::new(src_chain.id().version(), height), - None => ibc::Height::zero(), - }; - - let trusted_height = match self.trusted_height { - Some(height) => ibc::Height::new(src_chain.id().version(), height), - None => ibc::Height::zero(), - }; - - let client = ForeignClient::find(src_chain, dst_chain, &self.dst_client_id) - .unwrap_or_else(exit_with_unrecoverable_error); - - let res = client - .build_update_client_and_send(height, trusted_height) - .map_err(Error::foreign_client); - - match res { - Ok(events) => Output::success(events).exit(), - Err(e) => Output::error(format!("{}", e)).exit(), - } - } -} - -#[derive(Clone, Command, Debug, Parser)] -pub struct TxUpgradeClientCmd { - #[clap( - required = true, - help = "identifier of the chain that hosts the client" - )] - chain_id: ChainId, - - #[clap(required = true, help = "identifier of the client to be upgraded")] - client_id: ClientId, -} - -impl Runnable for TxUpgradeClientCmd { - fn run(&self) { - let config = app_config(); - - let dst_chain = match spawn_chain_runtime(&config, &self.chain_id) { - Ok(handle) => handle, - Err(e) => Output::error(format!("{}", e)).exit(), - }; - - let src_chain_id = match dst_chain.query_client_state( - QueryClientStateRequest { - client_id: self.client_id.clone(), - height: ibc::Height::zero(), - }, - IncludeProof::No, - ) { - Ok((cs, _)) => cs.chain_id(), - Err(e) => { - Output::error(format!( - "Query of client '{}' on chain '{}' failed with error: {}", - self.client_id, self.chain_id, e - )) - .exit(); - } - }; - - let src_chain = match spawn_chain_runtime(&config, &src_chain_id) { - Ok(handle) => handle, - Err(e) => Output::error(format!("{}", e)).exit(), - }; - - let client = ForeignClient::find(src_chain, dst_chain, &self.client_id) - .unwrap_or_else(exit_with_unrecoverable_error); - - let outcome = client.upgrade(); - - match outcome { - Ok(receipt) => Output::success(receipt).exit(), - Err(e) => Output::error(format!("{}", e)).exit(), - } - } -} - -#[derive(Clone, Command, Debug, Parser)] -pub struct TxUpgradeClientsCmd { - #[clap( - required = true, - help = "identifier of the chain that underwent an upgrade; all clients targeting this chain will be upgraded" - )] - src_chain_id: ChainId, -} - -impl Runnable for TxUpgradeClientsCmd { - fn run(&self) { - let config = app_config(); - let src_chain = match spawn_chain_runtime(&config, &self.src_chain_id) { - Ok(handle) => handle, - Err(e) => Output::error(format!("{}", e)).exit(), - }; - - let results = config - .chains - .iter() - .filter_map(|chain| { - (self.src_chain_id != chain.id) - .then(|| self.upgrade_clients_for_chain(&config, src_chain.clone(), &chain.id)) - }) - .collect(); - - let output = OutputBuffer(results); - match output.into_result() { - Ok(events) => Output::success(events).exit(), - Err(e) => Output::error(e).exit(), - } - } -} - -impl TxUpgradeClientsCmd { - fn upgrade_clients_for_chain( - &self, - config: &Config, - src_chain: Chain, - dst_chain_id: &ChainId, - ) -> UpgradeClientsForChainResult { - let dst_chain = spawn_chain_runtime_generic::(config, dst_chain_id)?; - - let req = QueryClientStatesRequest { - pagination: Some(PageRequest::all()), - }; - let outputs = dst_chain - .query_clients(req) - .map_err(Error::relayer)? - .into_iter() - .filter_map(|c| (self.src_chain_id == c.client_state.chain_id()).then(|| c.client_id)) - .map(|id| TxUpgradeClientsCmd::upgrade_client(id, dst_chain.clone(), src_chain.clone())) - .collect(); - - Ok(outputs) - } - - fn upgrade_client( - client_id: ClientId, - dst_chain: Chain, - src_chain: Chain, - ) -> Result, Error> { - let client = ForeignClient::restore(client_id, dst_chain, src_chain); - client.upgrade().map_err(Error::foreign_client) - } -} - -fn parse_trust_threshold(input: &str) -> Result { - let (num_part, denom_part) = input.split_once('/').ok_or_else(|| { - Error::cli_arg("expected a fractional argument, two numbers separated by '/'".into()) - })?; - let numerator = num_part - .trim() - .parse() - .map_err(|_| Error::cli_arg("invalid numerator for the fraction".into()))?; - let denominator = denom_part - .trim() - .parse() - .map_err(|_| Error::cli_arg("invalid denominator for the fraction".into()))?; - TrustThreshold::new(numerator, denominator) - .map_err(|e| Error::cli_arg(format!("invalid trust threshold fraction: {}", e))) -} - -type UpgradeClientResult = Result, Error>; -type UpgradeClientsForChainResult = Result, Error>; - -struct OutputBuffer(Vec); - -impl fmt::Display for OutputBuffer { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fn sep<'a>(pos: usize, len: usize, other: &'a str, last: &'a str) -> &'a str { - if pos != len - 1 { - other - } else { - last - } - } - - let outer_results = &self.0; - writeln!(f, ".")?; - for (o, outer_result) in outer_results.iter().enumerate() { - write!(f, "{}", sep(o, outer_results.len(), "├─", "└─"))?; - match outer_result { - Ok(inner_results) => { - writeln!(f, ".")?; - for (i, inner_result) in inner_results.iter().enumerate() { - write!( - f, - "{} {} ", - sep(o, outer_results.len(), "│", " "), - sep(i, inner_results.len(), "├─", "└─"), - )?; - match inner_result { - Ok(events) => writeln!(f, "{:#?}", events)?, - Err(e) => writeln!(f, "{}", e)?, - } - } - } - Err(e) => writeln!(f, " {}", e)?, - } - } - Ok(()) - } -} - -impl OutputBuffer { - fn into_result(self) -> Result>, Self> { - let mut all_events = vec![]; - let mut has_err = false; - 'outer: for outer_result in &self.0 { - match outer_result { - Ok(inner_results) => { - for inner_result in inner_results { - match inner_result { - Ok(events) => all_events.push(events.clone()), - Err(_) => { - has_err = true; - break 'outer; - } - } - } - } - Err(_) => { - has_err = true; - break 'outer; - } - } - } - if has_err { - Err(self) - } else { - Ok(all_events) - } - } -} - -#[cfg(test)] -mod tests { - use super::parse_trust_threshold; - - #[test] - fn test_parse_trust_threshold() { - let threshold = parse_trust_threshold("3/5").unwrap(); - assert_eq!(threshold.numerator(), 3); - assert_eq!(threshold.denominator(), 5); - - let threshold = parse_trust_threshold("3 / 5").unwrap(); - assert_eq!(threshold.numerator(), 3); - assert_eq!(threshold.denominator(), 5); - - let threshold = parse_trust_threshold("\t3 / 5 ").unwrap(); - assert_eq!(threshold.numerator(), 3); - assert_eq!(threshold.denominator(), 5); - } -} diff --git a/relayer-cli/src/commands/tx/connection.rs b/relayer-cli/src/commands/tx/connection.rs deleted file mode 100644 index dc48202400..0000000000 --- a/relayer-cli/src/commands/tx/connection.rs +++ /dev/null @@ -1,240 +0,0 @@ -use abscissa_core::clap::Parser; -use abscissa_core::{Command, Runnable}; - -use ibc::core::ics24_host::identifier::{ChainId, ClientId, ConnectionId}; -use ibc::events::IbcEvent; -use ibc::timestamp::ZERO_DURATION; -use ibc_relayer::connection::{Connection, ConnectionSide}; - -use crate::cli_utils::ChainHandlePair; -use crate::conclude::Output; -use crate::error::Error; -use crate::prelude::*; - -macro_rules! conn_open_cmd { - ($dbg_string:literal, $func:ident, $self:expr, $conn:expr) => { - let config = app_config(); - - let chains = match ChainHandlePair::spawn(&config, &$self.src_chain_id, &$self.dst_chain_id) - { - Ok(chains) => chains, - Err(e) => Output::error(format!("{}", e)).exit(), - }; - - let connection = $conn(chains); - - debug!("Message {}: {:?}", $dbg_string, connection); - - let res: Result = connection.$func().map_err(Error::connection); - - match res { - Ok(receipt) => Output::success(receipt).exit(), - Err(e) => Output::error(format!("{}", e)).exit(), - } - }; -} - -#[derive(Clone, Command, Debug, Parser)] -pub struct TxRawConnInitCmd { - #[clap(required = true, help = "identifier of the destination chain")] - dst_chain_id: ChainId, - - #[clap(required = true, help = "identifier of the source chain")] - src_chain_id: ChainId, - - #[clap(required = true, help = "identifier of the destination client")] - dst_client_id: ClientId, - - #[clap(required = true, help = "identifier of the source client")] - src_client_id: ClientId, -} - -impl Runnable for TxRawConnInitCmd { - fn run(&self) { - conn_open_cmd!( - "ConnOpenInit", - build_conn_init_and_send, - self, - |chains: ChainHandlePair| { - Connection { - delay_period: ZERO_DURATION, - a_side: ConnectionSide::new(chains.src, self.src_client_id.clone(), None), - b_side: ConnectionSide::new(chains.dst, self.dst_client_id.clone(), None), - } - } - ); - } -} - -#[derive(Clone, Command, Debug, Parser)] -pub struct TxRawConnTryCmd { - #[clap(required = true, help = "identifier of the destination chain")] - dst_chain_id: ChainId, - - #[clap(required = true, help = "identifier of the source chain")] - src_chain_id: ChainId, - - #[clap(required = true, help = "identifier of the destination client")] - dst_client_id: ClientId, - - #[clap(required = true, help = "identifier of the source client")] - src_client_id: ClientId, - - #[clap( - short = 's', - long, - required = true, - help = "identifier of the source connection (required)", - value_name = "ID" - )] - src_conn_id: ConnectionId, - - #[clap( - short = 'd', - long, - help = "identifier of the destination connection (optional)", - value_name = "ID" - )] - dst_conn_id: Option, -} - -impl Runnable for TxRawConnTryCmd { - fn run(&self) { - conn_open_cmd!( - "ConnOpenTry", - build_conn_try_and_send, - self, - |chains: ChainHandlePair| { - Connection { - delay_period: ZERO_DURATION, - a_side: ConnectionSide::new( - chains.src, - self.src_client_id.clone(), - Some(self.src_conn_id.clone()), - ), - b_side: ConnectionSide::new( - chains.dst, - self.dst_client_id.clone(), - self.dst_conn_id.clone(), - ), - } - } - ); - } -} - -#[derive(Clone, Command, Debug, Parser)] -pub struct TxRawConnAckCmd { - #[clap(required = true, help = "identifier of the destination chain")] - dst_chain_id: ChainId, - - #[clap(required = true, help = "identifier of the source chain")] - src_chain_id: ChainId, - - #[clap(required = true, help = "identifier of the destination client")] - dst_client_id: ClientId, - - #[clap(required = true, help = "identifier of the source client")] - src_client_id: ClientId, - - #[clap( - short = 'd', - long, - required = true, - help = "identifier of the destination connection (required)", - value_name = "ID" - )] - dst_conn_id: ConnectionId, - - #[clap( - short = 's', - long, - required = true, - help = "identifier of the source connection (required)", - value_name = "ID" - )] - src_conn_id: ConnectionId, -} - -impl Runnable for TxRawConnAckCmd { - fn run(&self) { - conn_open_cmd!( - "ConnOpenAck", - build_conn_ack_and_send, - self, - |chains: ChainHandlePair| { - Connection { - delay_period: ZERO_DURATION, - a_side: ConnectionSide::new( - chains.src, - self.src_client_id.clone(), - Some(self.src_conn_id.clone()), - ), - b_side: ConnectionSide::new( - chains.dst, - self.dst_client_id.clone(), - Some(self.dst_conn_id.clone()), - ), - } - } - ); - } -} - -#[derive(Clone, Command, Debug, Parser)] -pub struct TxRawConnConfirmCmd { - #[clap(required = true, help = "identifier of the destination chain")] - dst_chain_id: ChainId, - - #[clap(required = true, help = "identifier of the source chain")] - src_chain_id: ChainId, - - #[clap(required = true, help = "identifier of the destination client")] - dst_client_id: ClientId, - - #[clap(required = true, help = "identifier of the source client")] - src_client_id: ClientId, - - #[clap( - short = 'd', - long, - required = true, - help = "identifier of the destination connection (required)", - value_name = "ID" - )] - dst_conn_id: ConnectionId, - - #[clap( - short = 's', - long, - required = true, - help = "identifier of the source connection (required)", - value_name = "ID" - )] - src_conn_id: ConnectionId, -} - -impl Runnable for TxRawConnConfirmCmd { - fn run(&self) { - conn_open_cmd!( - "ConnOpenConfirm", - build_conn_confirm_and_send, - self, - |chains: ChainHandlePair| { - Connection { - delay_period: ZERO_DURATION, - a_side: ConnectionSide::new( - chains.src, - self.src_client_id.clone(), - Some(self.src_conn_id.clone()), - ), - b_side: ConnectionSide::new( - chains.dst, - self.dst_client_id.clone(), - Some(self.dst_conn_id.clone()), - ), - } - } - ); - } -} diff --git a/relayer-cli/src/commands/tx/packet.rs b/relayer-cli/src/commands/tx/packet.rs deleted file mode 100644 index 607703b4f6..0000000000 --- a/relayer-cli/src/commands/tx/packet.rs +++ /dev/null @@ -1,98 +0,0 @@ -use abscissa_core::clap::Parser; -use abscissa_core::{Command, Runnable}; - -use ibc::core::ics24_host::identifier::{ChainId, ChannelId, PortId}; -use ibc::events::IbcEvent; -use ibc_relayer::link::{Link, LinkParameters}; - -use crate::cli_utils::ChainHandlePair; -use crate::conclude::Output; -use crate::error::Error; -use crate::prelude::*; - -#[derive(Clone, Command, Debug, Parser)] -pub struct TxRawPacketRecvCmd { - #[clap(required = true, help = "identifier of the destination chain")] - dst_chain_id: ChainId, - - #[clap(required = true, help = "identifier of the source chain")] - src_chain_id: ChainId, - - #[clap(required = true, help = "identifier of the source port")] - src_port_id: PortId, - - #[clap(required = true, help = "identifier of the source channel")] - src_channel_id: ChannelId, -} - -impl Runnable for TxRawPacketRecvCmd { - fn run(&self) { - let config = app_config(); - - let chains = match ChainHandlePair::spawn(&config, &self.src_chain_id, &self.dst_chain_id) { - Ok(chains) => chains, - Err(e) => Output::error(format!("{}", e)).exit(), - }; - - let opts = LinkParameters { - src_port_id: self.src_port_id.clone(), - src_channel_id: self.src_channel_id, - }; - let link = match Link::new_from_opts(chains.src, chains.dst, opts, false) { - Ok(link) => link, - Err(e) => Output::error(format!("{}", e)).exit(), - }; - - let res: Result, Error> = link - .relay_recv_packet_and_timeout_messages() - .map_err(Error::link); - - match res { - Ok(ev) => Output::success(ev).exit(), - Err(e) => Output::error(format!("{}", e)).exit(), - } - } -} - -#[derive(Clone, Command, Debug, Parser)] -pub struct TxRawPacketAckCmd { - #[clap(required = true, help = "identifier of the destination chain")] - dst_chain_id: ChainId, - - #[clap(required = true, help = "identifier of the source chain")] - src_chain_id: ChainId, - - #[clap(required = true, help = "identifier of the source port")] - src_port_id: PortId, - - #[clap(required = true, help = "identifier of the source channel")] - src_channel_id: ChannelId, -} - -impl Runnable for TxRawPacketAckCmd { - fn run(&self) { - let config = app_config(); - - let chains = match ChainHandlePair::spawn(&config, &self.src_chain_id, &self.dst_chain_id) { - Ok(chains) => chains, - Err(e) => Output::error(format!("{}", e)).exit(), - }; - - let opts = LinkParameters { - src_port_id: self.src_port_id.clone(), - src_channel_id: self.src_channel_id, - }; - let link = match Link::new_from_opts(chains.src, chains.dst, opts, false) { - Ok(link) => link, - Err(e) => Output::error(format!("{}", e)).exit(), - }; - - let res: Result, Error> = - link.relay_ack_packet_messages().map_err(Error::link); - - match res { - Ok(ev) => Output::success(ev).exit(), - Err(e) => Output::error(format!("{}", e)).exit(), - } - } -} diff --git a/relayer-cli/src/commands/tx/transfer.rs b/relayer-cli/src/commands/tx/transfer.rs deleted file mode 100644 index 77c92a9ace..0000000000 --- a/relayer-cli/src/commands/tx/transfer.rs +++ /dev/null @@ -1,255 +0,0 @@ -use abscissa_core::clap::Parser; -use abscissa_core::{config::Override, Command, FrameworkErrorKind, Runnable}; - -use core::time::Duration; -use ibc::{ - applications::transfer::Amount, - core::{ - ics02_client::client_state::ClientState, - ics02_client::height::Height, - ics24_host::identifier::{ChainId, ChannelId, PortId}, - }, - events::IbcEvent, -}; -use ibc_relayer::chain::handle::ChainHandle; -use ibc_relayer::chain::requests::{ - IncludeProof, QueryChannelRequest, QueryClientStateRequest, QueryConnectionRequest, -}; -use ibc_relayer::{ - config::Config, - transfer::{build_and_send_transfer_messages, TransferOptions}, -}; - -use crate::cli_utils::ChainHandlePair; -use crate::conclude::{exit_with_unrecoverable_error, Output}; -use crate::error::Error; -use crate::prelude::*; - -#[derive(Clone, Command, Debug, Parser)] -pub struct TxIcs20MsgTransferCmd { - #[clap(required = true, help = "identifier of the destination chain")] - dst_chain_id: ChainId, - - #[clap(required = true, help = "identifier of the source chain")] - src_chain_id: ChainId, - - #[clap(required = true, help = "identifier of the source port")] - src_port_id: PortId, - - #[clap(required = true, help = "identifier of the source channel")] - src_channel_id: ChannelId, - - #[clap( - required = true, - help = "amount of coins (samoleans, by default) to send (e.g. `100000`)" - )] - amount: Amount, - - #[clap( - short = 'o', - long, - default_value = "0", - help = "timeout in number of blocks since current" - )] - timeout_height_offset: u64, - - #[clap( - short = 't', - long, - default_value = "0", - help = "timeout in seconds since current" - )] - timeout_seconds: u64, - - #[clap( - short = 'r', - long, - help = "receiving account address on the destination chain" - )] - receiver: Option, - - #[clap( - short = 'd', - long, - help = "denomination of the coins to send", - default_value = "samoleans" - )] - denom: String, - - #[clap(short = 'n', long, help = "number of messages to send")] - number_msgs: Option, - - #[clap( - short = 'k', - long, - help = "use the given signing key (default: `key_name` config)" - )] - key: Option, -} - -impl Override for TxIcs20MsgTransferCmd { - fn override_config(&self, mut config: Config) -> Result { - let src_chain_config = config.find_chain_mut(&self.src_chain_id).ok_or_else(|| { - FrameworkErrorKind::ComponentError.context(format!( - "missing configuration for source chain '{}'", - self.src_chain_id - )) - })?; - - if let Some(ref key_name) = self.key { - src_chain_config.key_name = key_name.to_string(); - } - - Ok(config) - } -} - -impl TxIcs20MsgTransferCmd { - fn validate_options( - &self, - config: &Config, - ) -> Result> { - config.find_chain(&self.src_chain_id).ok_or_else(|| { - format!( - "missing configuration for source chain '{}'", - self.src_chain_id - ) - })?; - - config.find_chain(&self.dst_chain_id).ok_or_else(|| { - format!( - "missing configuration for destination chain '{}'", - self.dst_chain_id - ) - })?; - - let denom = self.denom.clone(); - - let number_msgs = self.number_msgs.unwrap_or(1); - if number_msgs == 0 { - return Err("number of messages should be greater than zero".into()); - } - - if self.timeout_height_offset == 0 && self.timeout_seconds == 0 { - return Err( - "packet timeout height and packet timeout timestamp cannot both be 0, \ - please specify either --timeout-height-offset or --timeout-seconds" - .into(), - ); - } - - let opts = TransferOptions { - packet_src_port_id: self.src_port_id.clone(), - packet_src_channel_id: self.src_channel_id, - amount: self.amount, - denom, - receiver: self.receiver.clone(), - timeout_height_offset: self.timeout_height_offset, - timeout_duration: Duration::from_secs(self.timeout_seconds), - number_msgs, - }; - - Ok(opts) - } -} - -impl Runnable for TxIcs20MsgTransferCmd { - fn run(&self) { - let config = app_config(); - - let opts = match self.validate_options(&config) { - Err(err) => Output::error(err).exit(), - Ok(result) => result, - }; - - debug!("Message: {:?}", opts); - - let chains = ChainHandlePair::spawn(&config, &self.src_chain_id, &self.dst_chain_id) - .unwrap_or_else(exit_with_unrecoverable_error); - - // Double check that channels and chain identifiers match. - // To do this, fetch from the source chain the channel end, then the associated connection - // end, and then the underlying client state; finally, check that this client is verifying - // headers for the destination chain. - let (channel_end_src, _) = chains - .src - .query_channel( - QueryChannelRequest { - port_id: opts.packet_src_port_id.clone(), - channel_id: opts.packet_src_channel_id, - height: Height::zero(), - }, - IncludeProof::No, - ) - .unwrap_or_else(exit_with_unrecoverable_error); - if !channel_end_src.is_open() { - Output::error(format!( - "the requested port/channel ('{}'/'{}') on chain id '{}' is in state '{}'; expected 'open' state", - opts.packet_src_port_id, - opts.packet_src_channel_id, - self.src_chain_id, - channel_end_src.state - )) - .exit(); - } - - let conn_id = match channel_end_src.connection_hops.first() { - None => { - Output::error(format!( - "could not retrieve the connection hop underlying port/channel '{}'/'{}' on chain '{}'", - opts.packet_src_port_id, opts.packet_src_channel_id, self.src_chain_id - )) - .exit(); - } - Some(cid) => cid, - }; - - let (conn_end, _) = chains - .src - .query_connection( - QueryConnectionRequest { - connection_id: conn_id.clone(), - height: Height::zero(), - }, - IncludeProof::No, - ) - .unwrap_or_else(exit_with_unrecoverable_error); - - debug!("connection hop underlying the channel: {:?}", conn_end); - - let (src_chain_client_state, _) = chains - .src - .query_client_state( - QueryClientStateRequest { - client_id: conn_end.client_id().clone(), - height: Height::zero(), - }, - IncludeProof::No, - ) - .unwrap_or_else(exit_with_unrecoverable_error); - - debug!( - "client state underlying the channel: {:?}", - src_chain_client_state - ); - - if src_chain_client_state.chain_id() != self.dst_chain_id { - Output::error( - format!("the requested port/channel ('{}'/'{}') provides a path from chain '{}' to \ - chain '{}' (not to the destination chain '{}'). Bailing due to mismatching arguments.", - opts.packet_src_port_id, opts.packet_src_channel_id, - self.src_chain_id, - src_chain_client_state.chain_id(), self.dst_chain_id)).exit(); - } - - // Checks pass, build and send the tx - let res: Result, Error> = - build_and_send_transfer_messages(&chains.src, &chains.dst, &opts) - .map_err(Error::transfer); - - match res { - Ok(ev) => Output::success(ev).exit(), - Err(e) => Output::error(format!("{}", e)).exit(), - } - } -} diff --git a/relayer-cli/src/commands/tx/upgrade.rs b/relayer-cli/src/commands/tx/upgrade.rs deleted file mode 100644 index 1422a6c8ab..0000000000 --- a/relayer-cli/src/commands/tx/upgrade.rs +++ /dev/null @@ -1,133 +0,0 @@ -use core::time::Duration; - -use abscissa_core::clap::Parser; -use abscissa_core::{Command, Runnable}; - -use ibc::core::ics24_host::identifier::{ChainId, ClientId}; -use ibc_relayer::config::Config; -use ibc_relayer::upgrade_chain::{build_and_send_ibc_upgrade_proposal, UpgradePlanOptions}; - -use crate::cli_utils::spawn_chain_runtime; -use crate::conclude::{exit_with_unrecoverable_error, Output}; -use crate::error::Error; -use crate::prelude::*; - -#[derive(Clone, Command, Debug, Parser)] -pub struct TxIbcUpgradeChainCmd { - #[clap(required = true, help = "identifier of the chain to upgrade")] - dst_chain_id: ChainId, - - #[clap(required = true, help = "identifier of the source chain")] - src_chain_id: ChainId, - - #[clap( - required = true, - help = "identifier of the client on source chain from which the plan is created" - )] - src_client_id: ClientId, - - #[clap(required = true, help = "amount of stake")] - amount: u64, - - #[clap( - required = true, - help = "upgrade height offset in number of blocks since current" - )] - height_offset: u64, - - #[clap( - short = 'c', - long, - value_name = "CHAIN-ID", - help = "new chain identifier to assign to the upgrading chain (optional)" - )] - new_chain_id: Option, - - #[clap( - short = 'u', - long, - value_name = "PERIOD", - help = "new unbonding period to assign to the upgrading chain, in seconds (optional)" - )] - new_unbonding: Option, - - #[clap( - short = 'n', - long, - value_name = "NAME", - help = "a string to name the upgrade proposal plan (default: 'plan')" - )] - upgrade_name: Option, - - #[clap( - short = 'd', - long, - help = "denomination for the deposit (default: 'stake')" - )] - denom: Option, -} - -impl TxIbcUpgradeChainCmd { - fn validate_options(&self, config: &Config) -> Result { - let src_chain_config = config.find_chain(&self.src_chain_id).ok_or_else(|| { - format!( - "missing configuration for source chain '{}'", - self.src_chain_id - ) - })?; - - let dst_chain_config = config.find_chain(&self.dst_chain_id).ok_or_else(|| { - format!( - "missing configuration for destination chain '{}'", - self.dst_chain_id - ) - })?; - - let opts = UpgradePlanOptions { - dst_chain_config: dst_chain_config.clone(), - src_chain_config: src_chain_config.clone(), - src_client_id: self.src_client_id.clone(), - amount: self.amount, - denom: self.denom.as_deref().unwrap_or("stake").into(), - height_offset: self.height_offset, - upgraded_chain_id: self - .new_chain_id - .clone() - .unwrap_or_else(|| self.dst_chain_id.clone()), - upgraded_unbonding_period: self.new_unbonding.map(Duration::from_secs), - upgrade_plan_name: self - .upgrade_name - .clone() - .unwrap_or_else(|| "plan".to_string()), - }; - - Ok(opts) - } -} - -impl Runnable for TxIbcUpgradeChainCmd { - fn run(&self) { - let config = app_config(); - - let opts = match self.validate_options(&config) { - Err(err) => Output::error(err).exit(), - Ok(result) => result, - }; - - info!("Message {:?}", opts); - - let src_chain = spawn_chain_runtime(&config, &self.src_chain_id) - .unwrap_or_else(exit_with_unrecoverable_error); - - let dst_chain = spawn_chain_runtime(&config, &self.dst_chain_id) - .unwrap_or_else(exit_with_unrecoverable_error); - - let res = build_and_send_ibc_upgrade_proposal(dst_chain, src_chain, &opts) - .map_err(Error::upgrade_chain); - - match res { - Ok(ev) => Output::success(ev).exit(), - Err(e) => Output::error(format!("{}", e)).exit(), - } - } -} diff --git a/relayer-cli/src/commands/update.rs b/relayer-cli/src/commands/update.rs deleted file mode 100644 index 351c764b0f..0000000000 --- a/relayer-cli/src/commands/update.rs +++ /dev/null @@ -1,12 +0,0 @@ -//! `update` subcommand - -use abscissa_core::clap::Parser; -use abscissa_core::{Command, Runnable}; - -use crate::commands::tx::client::TxUpdateClientCmd; - -#[derive(Command, Debug, Parser, Runnable)] -pub enum UpdateCmds { - /// Update an IBC client - Client(TxUpdateClientCmd), -} diff --git a/relayer-cli/src/commands/upgrade.rs b/relayer-cli/src/commands/upgrade.rs deleted file mode 100644 index f69f904142..0000000000 --- a/relayer-cli/src/commands/upgrade.rs +++ /dev/null @@ -1,15 +0,0 @@ -//! `upgrade` subcommand - -use abscissa_core::clap::Parser; -use abscissa_core::{Command, Runnable}; - -use crate::commands::tx::client::{TxUpgradeClientCmd, TxUpgradeClientsCmd}; - -#[derive(Command, Debug, Parser, Runnable)] -pub enum UpgradeCmds { - /// Upgrade an IBC client - Client(TxUpgradeClientCmd), - - /// Upgrade all IBC clients that target a specific chain - Clients(TxUpgradeClientsCmd), -} diff --git a/relayer-cli/src/commands/version.rs b/relayer-cli/src/commands/version.rs deleted file mode 100644 index 427afd3808..0000000000 --- a/relayer-cli/src/commands/version.rs +++ /dev/null @@ -1,21 +0,0 @@ -//! `version` subcommand - -use super::CliCmd; -use abscissa_core::clap::Parser; -use abscissa_core::{Command, Runnable}; - -/// `version` subcommand -/// -/// This subcommand is implemented for backward compatibility reasons. -/// Its behavior should be the same as that of the `--version` flag which -/// is handled internally by clap. -#[derive(Command, Debug, Default, Parser)] -#[clap(hide = true)] -pub struct VersionCmd {} - -impl Runnable for VersionCmd { - /// Print version message - fn run(&self) { - println!("{} {}", CliCmd::name(), clap::crate_version!()); - } -} diff --git a/relayer-cli/src/components.rs b/relayer-cli/src/components.rs deleted file mode 100644 index 645cc50e75..0000000000 --- a/relayer-cli/src/components.rs +++ /dev/null @@ -1,110 +0,0 @@ -use abscissa_core::{Component, FrameworkError, FrameworkErrorKind}; -use tracing_subscriber::{filter::EnvFilter, util::SubscriberInitExt, FmtSubscriber}; - -use ibc_relayer::config::{GlobalConfig, LogLevel}; - -use crate::config::Error; - -/// The name of the environment variable through which one can override -/// the tracing filter built in [`build_tracing_filter`]. -const HERMES_LOG_VAR: &str = "RUST_LOG"; - -/// A custom component for parametrizing `tracing` in the relayer. -/// Primarily used for: -/// -/// - Customizing the log output level, for filtering the output produced via tracing macros -/// (`debug!`, `info!`, etc.) or abscissa macros (`status_err`, `status_info`, etc.). -/// - Enabling JSON-formatted output without coloring -#[derive(Component, Debug)] -pub struct JsonTracing; - -impl JsonTracing { - /// Creates a new [`JsonTracing`] component - pub fn new(cfg: GlobalConfig) -> Result { - let filter = build_tracing_filter(cfg.log_level)?; - // Note: JSON formatter is un-affected by ANSI 'color' option. Set to 'false'. - let use_color = false; - - // Construct a tracing subscriber with the supplied filter and enable reloading. - let builder = FmtSubscriber::builder() - .with_target(false) - .with_env_filter(filter) - .with_writer(std::io::stderr) - .with_ansi(use_color) - .with_thread_ids(true) - .json(); - - let subscriber = builder.finish(); - subscriber.init(); - - Ok(Self) - } -} - -#[derive(Component, Debug)] -pub struct PrettyTracing; - -impl PrettyTracing { - /// Creates a new [`PrettyTracing`] component - pub fn new(cfg: GlobalConfig) -> Result { - let filter = build_tracing_filter(cfg.log_level)?; - - // Construct a tracing subscriber with the supplied filter and enable reloading. - let builder = FmtSubscriber::builder() - .with_target(false) - .with_env_filter(filter) - .with_writer(std::io::stderr) - .with_ansi(enable_ansi()) - .with_thread_ids(true); - - let subscriber = builder.finish(); - subscriber.init(); - - Ok(Self) - } -} - -/// Check if both stdout and stderr are proper terminal (tty), -/// so that we know whether or not to enable colored output, -/// using ANSI escape codes. If either is not, eg. because -/// stdout is redirected to a file, we don't enable colored output. -pub fn enable_ansi() -> bool { - atty::is(atty::Stream::Stdout) && atty::is(atty::Stream::Stderr) -} - -/// The relayer crates targeted by the default log level. -const TARGET_CRATES: [&str; 2] = ["ibc_relayer", "ibc_relayer_cli"]; - -/// Build a tracing directive setting the log level for the relayer crates to the -/// given `log_level`. -fn default_directive(log_level: LogLevel) -> String { - use itertools::Itertools; - - TARGET_CRATES - .iter() - .map(|&c| format!("{}={}", c, log_level)) - .join(",") -} - -/// Builds a tracing filter based on the input `log_level`. -/// Enables tracing exclusively for the relayer crates. -/// Returns error if the filter failed to build. -fn build_tracing_filter(default_level: LogLevel) -> Result { - let directive = - std::env::var(HERMES_LOG_VAR).unwrap_or_else(|_| default_directive(default_level)); - - // Build the filter directive - match EnvFilter::try_new(&directive) { - Ok(out) => Ok(out), - Err(e) => { - eprintln!( - "ERROR: unable to initialize Hermes with log filtering directive {:?}: {}", - directive, e - ); - - Err(FrameworkErrorKind::ConfigError - .context(Error::invalid_log_directive(directive, e)) - .into()) - } - } -} diff --git a/relayer-cli/src/conclude.rs b/relayer-cli/src/conclude.rs deleted file mode 100644 index 294c20e764..0000000000 --- a/relayer-cli/src/conclude.rs +++ /dev/null @@ -1,268 +0,0 @@ -//! Custom-made solution to output a JSON return message and ensure a return code -//! from a CLI command. The main use-case for this module is to provide a consistent output for -//! queries and transactions. -//! -//! The examples below rely on crate-private methods (for this reason, doctests are ignored). -//! They are intended for contributors to crate `relayer-cli`, and _not_ for users of this binary. -//! -//! ## Examples on how to use the quick-access constructors: -//! -//! - Exit from a query/tx with a `String` error: -//! -//! ```ignore -//! let e = String::from("error message"); -//! Output::error(e).exit(); -//! // or as an alternative: -//! Output::error(json!("error occurred")).exit(); -//! ``` -//! -//! - Exit from a query/tx with an error of type `anomaly`: -//! In the case where the error is a complex type such as anomaly (including backtraces), it is -//! better to simplify the output and only write out the chain of error sources, which we can -//! achieve with `format!("{}", e)`. The complete solution is as follows: -//! -//! ```ignore -//! let e: Error = Kind::Query.into(); -//! Output::error(format!("{}", e)).exit(); -//! ``` -//! -//! #### Note: -//! The resulting output that this approach generates is determined by the 'error' annotation given -//! to the error object `Kind::Query`. If this error object comprises any positional arguments, -//! e.g. as achieved by `Query(String, String)`, then it is important to cover these arguments -//! in the `error` annotation, for instance: -//! ```ignore -//! #[derive(Debug, Error)] -//! pub enum Kind { -//! #[error("failed with underlying causes: {0}, {1}")] -//! Query(String, String), -//! // ... -//! } -//! ``` -//! -//! - Exit from a query/tx with success: -//! -//! ```ignore -//! let cs = ChannelEnd::default(); -//! Output::success(cs).exit(); -//! ``` -//! -//! - Exit from a query/tx with success and multiple objects in the result: -//! -//! ```ignore -//! let h = Height::default(); -//! let end = ConnectionEnd::default(); -//! Output::success(h).with_result(end).exit(); -//! ``` - -use core::fmt; - -use serde::Serialize; -use tracing::warn; - -use crate::prelude::app_reader; - -/// Functional-style method to exit a program. -/// -/// ## Note: See `Output::exit()` for the preferred method of exiting a relayer command. -pub fn exit_with(out: Output) -> ! { - let status = out.status; - - // Handle the output message - if json() { - println!("{}", serde_json::to_string(&out.into_json()).unwrap()); - } else { - println!("{}: {}", out.status, out.result); - } - - // The return code - if status == Status::Error { - std::process::exit(1); - } else { - std::process::exit(0); - } -} - -/// Returns true if the application global json flag `-j` or `--json` is enabled. -/// Returns false otherwise. -pub fn json() -> bool { - let a = app_reader(); - a.json_output() -} - -/// Exits the program. Useful when a type produces an error which can no longer be propagated, and -/// the program must exit instead. -/// -/// ## Example of use -/// - Without this function: -/// ```ignore -/// let res = ForeignClient::new(chains.src.clone(), chains.dst.clone()); -/// let client = match res { -/// Ok(client) => client, -/// Err(e) => Output::error(format!("{}", e)).exit(), -/// }; -/// ``` -/// - With support from `exit_with_unrecoverable_error`: -/// ```ignore -/// let client_a = ForeignClient::new(chains.src.clone(), chains.dst.clone()) -/// .unwrap_or_else(exit_with_unrecoverable_error); -/// ``` -pub fn exit_with_unrecoverable_error(err: E) -> T { - Output::error(format!("{}", err)).exit() -} - -/// The result to display before quitting, can either be a JSON value, some plain text, -/// a value to print with its Debug instance, or nothing. -#[derive(Debug)] -pub enum Result { - Json(serde_json::Value), - Value(Box), - Text(String), - Nothing, -} - -impl fmt::Display for Result { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - Result::Json(v) => write!(f, "{}", serde_json::to_string(v).unwrap()), - Result::Value(v) => write!(f, "{:#?}", v), - Result::Text(t) => write!(f, "{}", t), - Result::Nothing => write!(f, "no output"), - } - } -} - -/// A CLI output with support for JSON serialization. The only mandatory field is the `status`, -/// which typically signals a success (UNIX process return code `0`) or an error (code `1`). An -/// optional `result` can be added to an output. -/// -pub struct Output { - /// The return status - pub status: Status, - - /// The result of a command, such as the output from a query or transaction. - pub result: Result, -} - -impl Output { - /// Constructs a new `Output` with the provided `status` and an empty `result`. - pub fn new(status: Status) -> Self { - Output { - status, - result: Result::Nothing, - } - } - - /// Constructor that returns a new `Output` having a `Success` status and empty `result`. - pub fn with_success() -> Self { - Output::new(Status::Success) - } - - /// Constructor that returns a new `Output` having an `Error` status and empty `result`. - pub fn with_error() -> Self { - Output::new(Status::Error) - } - - /// Builder-style method for attaching a result to an output object. - pub fn with_result(mut self, result: R) -> Self - where - R: Serialize + core::fmt::Debug + 'static, - { - if json() { - self.result = Result::Json(serialize_result(result)); - } else { - self.result = Result::Value(Box::new(result)); - } - - self - } - - /// Builder-style method for attaching a plain text message to an output object. - pub fn with_msg(mut self, msg: impl ToString) -> Self { - self.result = Result::Text(msg.to_string()); - self - } - - /// Quick-access constructor for an output signalling a success `status` and tagged with the - /// input `result`. - pub fn success(result: R) -> Self - where - R: Serialize + core::fmt::Debug + 'static, - { - Output::with_success().with_result(result) - } - - /// Quick-access constructor for an output message signalling a error `status`. - pub fn error(msg: impl ToString) -> Self { - Output::with_error().with_msg(msg) - } - - /// Quick-access constructor for an output signalling a success `status` and tagged with the - /// input `result`. - pub fn success_msg(msg: impl ToString) -> Self { - Output::with_success().with_msg(msg) - } - - /// Exits from the process with the current output. Convenience wrapper over `exit_with`. - pub fn exit(self) -> ! { - exit_with(self); - } - - /// Convert this output value to a JSON value - pub fn into_json(self) -> serde_json::Value { - let mut map = serde_json::Map::new(); - - map.insert( - "status".to_string(), - serde_json::to_value(self.status).unwrap(), - ); - - let value = match self.result { - Result::Json(v) => v, - Result::Value(v) => serde_json::Value::String(format!("{:#?}", v)), - Result::Text(v) => serde_json::Value::String(v), - Result::Nothing => serde_json::Value::String("no output".to_string()), - }; - - map.insert("result".to_string(), value); - - serde_json::Value::Object(map) - } -} - -/// Helper to serialize a result into a `serde_json::Value`. -fn serialize_result(res: impl Serialize + core::fmt::Debug) -> serde_json::Value { - let last_resort = format!("{:#?}", res); - - match serde_json::to_value(res) { - Ok(json_val) => json_val, - Err(e) => { - // Signal the serialization error - warn!( - "Output constructor failed with non-recoverable error {} for input {}", - e, last_resort - ); - // Package the result with the infallible `Debug` instead of `JSON` - serde_json::Value::String(last_resort) - } - } -} - -/// Represents the exit status of any CLI command -#[derive(Copy, Clone, Debug, PartialEq, Eq, Serialize)] -pub enum Status { - #[serde(rename(serialize = "success"))] - Success, - - #[serde(rename(serialize = "error"))] - Error, -} - -impl fmt::Display for Status { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - Status::Success => write!(f, "Success"), - Status::Error => write!(f, "Error"), - } - } -} diff --git a/relayer-cli/src/config.rs b/relayer-cli/src/config.rs deleted file mode 100644 index 05bf4c94c1..0000000000 --- a/relayer-cli/src/config.rs +++ /dev/null @@ -1,170 +0,0 @@ -//! Cli Config -//! -//! See instructions in `commands.rs` to specify the path to your -//! application's configuration file and/or command-line options -//! for specifying it. - -use alloc::collections::BTreeSet; -use std::path::PathBuf; - -use flex_error::{define_error, TraceError}; -use ibc::core::ics24_host::identifier::ChainId; -use ibc_relayer::config::{Config, ModeConfig}; -use tendermint_light_client_verifier::types::TrustThreshold; -use tracing_subscriber::filter::ParseError; - -use crate::application::app_reader; - -/// Get the path to configuration file -pub fn config_path() -> Option { - let app = app_reader(); - app.config_path().cloned() -} - -// Specifies all the possible syntactic errors -// that a Hermes configuration file could contain. -define_error! { - Error { - ZeroChain - |_| { "config file does not specify any chain" }, - - InvalidLogDirective - { directive: String, } - [ TraceError ] - |e| { - format!("invalid log directive: {0:?}", e.directive) - }, - - InvalidMode - { reason: String, } - |e| { - format!("config file specifies invalid mode config, caused by: {0}", - e.reason) - }, - - DuplicateChains - { chain_id: ChainId } - |e| { - format!("config file has duplicate entry for the chain with id {0}", - e.chain_id) - }, - - InvalidTrustThreshold - { - threshold: TrustThreshold, - chain_id: ChainId, - reason: String - } - |e| { - format!("config file specifies an invalid `trust_threshold` ({0}) for the chain with id {1}, caused by: {2}", - e.threshold, e.chain_id, e.reason) - }, - - InvalidGasAdjustment - { - gas_adjustment: f64, - chain_id: ChainId, - reason: String - } - |e| { - format!("config file specifies an invalid `gas_adjustment` ({0}) for the chain with id {1}, caused by: {2}", - e.gas_adjustment, e.chain_id, e.reason) - }, - } -} - -#[derive(Clone, Debug)] -pub enum Diagnostic { - Warning(E), - Error(E), -} - -/// Method for syntactic validation of the input configuration file. -pub fn validate_config(config: &Config) -> Result<(), Diagnostic> { - // Check for duplicate chain configuration and invalid trust thresholds - let mut unique_chain_ids = BTreeSet::new(); - for c in config.chains.iter() { - let already_present = !unique_chain_ids.insert(c.id.clone()); - if already_present { - return Err(Diagnostic::Error(Error::duplicate_chains(c.id.clone()))); - } - - validate_trust_threshold(&c.id, c.trust_threshold)?; - - // Validate gas-related settings - validate_gas_settings(&c.id, c.gas_adjustment)?; - } - - // Check for invalid mode config - validate_mode(&config.mode)?; - - Ok(()) -} - -fn validate_mode(mode: &ModeConfig) -> Result<(), Diagnostic> { - if mode.all_disabled() { - return Err(Diagnostic::Warning(Error::invalid_mode( - "all operation modes of Hermes are disabled, relayer won't perform any action aside from subscribing to events".to_string(), - ))); - } - - if mode.clients.enabled && !mode.clients.refresh && !mode.clients.misbehaviour { - return Err(Diagnostic::Error(Error::invalid_mode( - "either `refresh` or `misbehaviour` must be set to true if `clients.enabled` is set to true".to_string(), - ))); - } - - Ok(()) -} - -/// Check that the trust threshold is: -/// -/// a) non-zero -/// b) greater or equal to 1/3 -/// c) strictly less than 1 -fn validate_trust_threshold( - id: &ChainId, - trust_threshold: TrustThreshold, -) -> Result<(), Diagnostic> { - if trust_threshold.denominator() == 0 { - return Err(Diagnostic::Error(Error::invalid_trust_threshold( - trust_threshold, - id.clone(), - "trust threshold denominator cannot be zero".to_string(), - ))); - } - - if trust_threshold.numerator() * 3 < trust_threshold.denominator() { - return Err(Diagnostic::Error(Error::invalid_trust_threshold( - trust_threshold, - id.clone(), - "trust threshold cannot be < 1/3".to_string(), - ))); - } - - if trust_threshold.numerator() >= trust_threshold.denominator() { - return Err(Diagnostic::Error(Error::invalid_trust_threshold( - trust_threshold, - id.clone(), - "trust threshold cannot be >= 1".to_string(), - ))); - } - - Ok(()) -} - -fn validate_gas_settings( - id: &ChainId, - gas_adjustment: Option, -) -> Result<(), Diagnostic> { - match gas_adjustment { - Some(gas_adjustment) if !(0.0..=1.0).contains(&gas_adjustment) => { - Err(Diagnostic::Error(Error::invalid_gas_adjustment( - gas_adjustment, - id.clone(), - "gas adjustment must be between 0.0 and 1.0 inclusive".to_string(), - ))) - } - _ => Ok(()), - } -} diff --git a/relayer-cli/src/entry.rs b/relayer-cli/src/entry.rs deleted file mode 100644 index 312d43dfe2..0000000000 --- a/relayer-cli/src/entry.rs +++ /dev/null @@ -1,72 +0,0 @@ -use std::path::PathBuf; -use std::process; - -use abscissa_core::clap::Parser; -use abscissa_core::{Command, Configurable, FrameworkError, Runnable}; -use clap::IntoApp; -use ibc_relayer::config::Config; - -use crate::commands::CliCmd; - -/// Entry point for Hermes CLI. -#[derive(Command, Debug, Parser)] -#[clap(author, about, version)] -pub struct EntryPoint { - /// Path to the configuration file - #[clap(short = 'c', long, help = "path to configuration file")] - pub config: Option, - - /// Toggle JSON output mode one verbosity setting - #[clap(short = 'j', long, help = "enable JSON output")] - pub json: bool, - - /// Subcommand to execute. - /// - /// The `command` option will delegate option parsing to the command type, - /// starting at the first free argument. - #[clap(subcommand)] - pub command: Option, -} - -impl Runnable for EntryPoint { - fn run(&self) { - match &self.command { - Some(cmd) => cmd.run(), - None => { - EntryPoint::command().print_help().unwrap(); - process::exit(0); - } - } - } -} - -impl Configurable for EntryPoint { - /// Path to the command's configuration file - fn config_path(&self) -> Option { - // Skip config processing for `completions` - // and the legacy `version` subcommand. - match &self.command { - Some(CliCmd::Completions(_)) | Some(CliCmd::Version(_)) => { - return None; - } - _ => {} - } - - match &self.config { - // Use explicit `-c`/`--config` argument if passed - Some(cfg) => Some(cfg.clone()), - - // Otherwise defer to the toplevel command's config path logic - None => self.command.as_ref().and_then(|cmd| cmd.config_path()), - } - } - - /// Process the configuration after it has been loaded, potentially - /// modifying it or returning an error if options are incompatible - fn process_config(&self, config: Config) -> Result { - match &self.command { - Some(cmd) => cmd.process_config(config), - None => Ok(config), - } - } -} diff --git a/relayer-cli/src/error.rs b/relayer-cli/src/error.rs deleted file mode 100644 index de6f176fc2..0000000000 --- a/relayer-cli/src/error.rs +++ /dev/null @@ -1,104 +0,0 @@ -use flex_error::define_error; - -use tendermint::Error as TendermintError; - -use ibc::core::ics04_channel::channel::IdentifiedChannelEnd; -use ibc::core::ics24_host::identifier::ChainId; - -use ibc_relayer::channel::ChannelError; -use ibc_relayer::connection::ConnectionError; -use ibc_relayer::error::Error as RelayerError; -use ibc_relayer::foreign_client::ForeignClientError; -use ibc_relayer::link::error::LinkError; -use ibc_relayer::spawn::SpawnError; -use ibc_relayer::supervisor::Error as SupervisorError; -use ibc_relayer::transfer::TransferError; -use ibc_relayer::upgrade_chain::UpgradeChainError; - -define_error! { - /// An error raised within the relayer CLI - Error { - Config - |_| { "config error" }, - - Io - |_| { "I/O error" }, - - Query - |_| { "query error" }, - - Runtime - |_| { "chain runtime error" }, - - Tx - |_| { "tx error" }, - - InvalidHash - { hash: String } - [ TendermintError ] - | e | { - format_args!("CLI argument error: could not parse '{}' into a valid hash", - e.hash) - }, - - CliArg - { reason: String } - | e | { - format_args!("CLI argument error: {0}", - e.reason) - }, - - Keys - |_| { "keys error" }, - - MissingChainConfig - { chain_id: ChainId } - | e | { - format_args!("missing chain with id '{}' in configuration file", - e.chain_id) - }, - - MissingCounterpartyChannelId - { channel_end: IdentifiedChannelEnd } - | e | { - format_args!("this channel's counterparty has no channel id: {:?}", - e.channel_end) - }, - - Relayer - [ RelayerError ] - |_| { "relayer error" }, - - Spawn - [ SpawnError ] - |_| { "failed to spawn chain runtime" }, - - Connection - [ ConnectionError ] - |_| { "connection error" }, - - Transfer - [ TransferError ] - |_| { "transfer error" }, - - Channel - [ ChannelError ] - |_| { "channel error" }, - - ForeignClient - [ ForeignClientError ] - |_| { "foreign client error" }, - - Supervisor - [ SupervisorError ] - |_| { "supervisor error" }, - - Link - [ LinkError ] - |_| { "link error" }, - - UpgradeChain - [ UpgradeChainError ] - |_| { "upgrade chain error" }, - } -} diff --git a/relayer-cli/src/lib.rs b/relayer-cli/src/lib.rs deleted file mode 100644 index cab10648d9..0000000000 --- a/relayer-cli/src/lib.rs +++ /dev/null @@ -1,39 +0,0 @@ -//! Hermes: IBC Relayer CLI built in Rust -//! -//! The Hermes binary is a wrapper over the [ibc-relayer] library. This binary builds on -//! the [Abscissa] framework. -//! -//! For a comprehensive guide to using Hermes, the authoritative resource is -//! at [hermes.informal.systems]. -//! -//! [ibc-relayer]: https://docs.rs/ibc-relayer -//! [Abscissa]: https://github.com/iqlusioninc/abscissa -//! [hermes.informal.systems]: https://hermes.informal.systems - -// Tip: Deny warnings with `RUSTFLAGS="-D warnings"` environment variable in CI - -#![forbid(unsafe_code)] -#![deny( - rust_2018_idioms, - trivial_casts, - unused_lifetimes, - unused_qualifications -)] -#![allow(deprecated)] - -extern crate alloc; - -pub mod application; -pub mod cli_utils; -pub mod commands; -pub mod components; -pub mod config; -pub mod prelude; - -pub mod error; - -pub(crate) mod conclude; -pub(crate) mod entry; - -/// The path to the default configuration file, relative to the home directory. -pub const DEFAULT_CONFIG_PATH: &str = ".hermes/config.toml"; diff --git a/relayer-cli/src/prelude.rs b/relayer-cli/src/prelude.rs deleted file mode 100644 index bc45c61b63..0000000000 --- a/relayer-cli/src/prelude.rs +++ /dev/null @@ -1,9 +0,0 @@ -//! Application-local prelude: conveniently import types/functions/macros -//! which are generally useful and should be available in every module with -//! `use crate::prelude::*; - -/// Abscissa core prelude -pub use abscissa_core::prelude::*; - -/// Application state accessors -pub use crate::application::{app_config, app_reader}; diff --git a/relayer-cli/tests/acceptance.rs b/relayer-cli/tests/acceptance.rs deleted file mode 100644 index d179078721..0000000000 --- a/relayer-cli/tests/acceptance.rs +++ /dev/null @@ -1,59 +0,0 @@ -//! Acceptance test: runs the application as a subprocess and asserts its -//! output for given argument combinations matches what is expected. -//! -//! Modify and/or delete these as you see fit to test the specific needs of -//! your application. -//! -//! For more information, see: -//! - -// Tip: Deny warnings with `RUSTFLAGS="-D warnings"` environment variable in CI - -#![forbid(unsafe_code)] -#![warn( - missing_docs, - rust_2018_idioms, - trivial_casts, - unused_lifetimes, - unused_qualifications -)] - -use abscissa_core::testing::prelude::*; -use once_cell::sync::Lazy; - -/// Executes your application binary via `cargo run`. -/// -/// Storing this value as a [`Lazy`] static ensures that all instances of -/// the runner acquire a mutex when executing commands and inspecting -/// exit statuses, serializing what would otherwise be multithreaded -/// invocations as `cargo test` executes tests in parallel by default. -pub static RUNNER: Lazy = Lazy::new(CmdRunner::default); - -/// Use `Config::default()` value if no config or args -#[cfg(not(tarpaulin))] -#[test] -fn start_no_args() { - let mut runner = RUNNER.clone(); - let mut cmd = runner.capture_stdout().run(); - cmd.stdout().expect_regex( - format!( - "^[^ ]*{} {}", - env!("CARGO_PKG_NAME"), - regex::escape(env!("CARGO_PKG_VERSION")) - ) - .as_str(), - ); // Todo: find out how to disable colored output and then remove the `[^ ]*` part from the regexp. - cmd.wait().unwrap().expect_success(); -} - -#[cfg(not(tarpaulin))] -#[test] -fn example_configuration_is_valid() { - let mut runner = RUNNER.clone(); - let mut cmd = runner - .capture_stdout() - .args(["--config", "../config.toml", "config", "validate"]) - .run(); - cmd.stdout().expect_regex("configuration is valid"); - cmd.wait().unwrap().expect_success(); -} diff --git a/relayer-cli/tests/fixtures/two_chains.toml b/relayer-cli/tests/fixtures/two_chains.toml deleted file mode 100644 index 6ce815451c..0000000000 --- a/relayer-cli/tests/fixtures/two_chains.toml +++ /dev/null @@ -1,55 +0,0 @@ -[global] -log_level = 'error' # valid options: 'error', 'warn', 'info', 'debug', 'trace' - -[mode] - -[mode.clients] -enabled = true -refresh = true -misbehaviour = true - -[mode.connections] -enabled = false - -[mode.channels] -enabled = false - -[mode.packets] -enabled = true -clear_interval = 100 -clear_on_start = true -tx_confirmation = true - -[[chains]] -id = 'ibc-0' -rpc_addr = 'http://127.0.0.1:26657' -grpc_addr = 'http://127.0.0.1:9090' -websocket_addr = 'ws://127.0.0.1:26657/websocket' -rpc_timeout = '10s' -account_prefix = 'cosmos' -key_name = 'testkey' -store_prefix = 'ibc' -client_ids = ['cla1', 'cla2'] -clock_drift = '5s' -trusting_period = '14days' - -[chains.trust_threshold] -numerator = '1' -denominator = '3' - -[[chains]] -id = 'ibc-1' -rpc_addr = 'http://127.0.0.1:26657' -grpc_addr = 'http://127.0.0.1:9090' -websocket_addr = 'ws://127.0.0.1:26657/websocket' -rpc_timeout = '10s' -account_prefix = 'cosmos' -key_name = 'testkey' -store_prefix = 'ibc' -client_ids = ['clb1'] -clock_drift = '5s' -trusting_period = '14days' - -[chains.trust_threshold] -numerator = '1' -denominator = '3' diff --git a/relayer-rest/Cargo.toml b/relayer-rest/Cargo.toml deleted file mode 100644 index 60e4d9bc7e..0000000000 --- a/relayer-rest/Cargo.toml +++ /dev/null @@ -1,28 +0,0 @@ -[package] -name = "ibc-relayer-rest" -version = "0.15.0" -authors = ["Informal Systems "] -edition = "2021" -license = "Apache-2.0" -readme = "README.md" -keywords = ["ibc", "rest", "api", "cosmos", "tendermint"] -homepage = "https://hermes.informal.systems/" -repository = "https://github.com/informalsystems/ibc-rs" -rust-version = "1.60" -description = """ - Rust implementation of a RESTful API server for Hermes -""" - -[dependencies] -ibc = { version = "0.15.0", path = "../modules" } -ibc-relayer = { version = "0.15.0", path = "../relayer" } - -crossbeam-channel = "0.5" -rouille = "3.5" -serde = "1.0" -tracing = "0.1" - -[dev-dependencies] -serde_json = "1.0.81" -toml = "0.5.9" -ureq = "2.4.0" diff --git a/relayer-rest/README.md b/relayer-rest/README.md deleted file mode 100644 index 6de476697a..0000000000 --- a/relayer-rest/README.md +++ /dev/null @@ -1,42 +0,0 @@ -# IBC Relayer REST Server - -[![Crate][crate-image]][crate-link] -[![Docs][docs-image]][docs-link] -[![Build Status][build-image]][build-link] -[![End to End testing][e2e-image]][e2e-link] -[![Apache 2.0 Licensed][license-image]][license-link] -![Rust Stable][rustc-image] -![Rust 1.60+][rustc-version] - -This is the repository for the IBC REST server for use in the Hermes IBC relayer. - -See the [REST server][rest-doc] section in the Hermes guide for more information. - -## License - -Copyright © 2021 Informal Systems Inc. and ibc-rs authors. - -Licensed under the Apache License, Version 2.0 (the "License"); you may not use the files in this repository except in compliance with the License. You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - -[//]: # (badges) - -[crate-image]: https://img.shields.io/crates/v/ibc-relayer-rest.svg -[crate-link]: https://crates.io/crates/ibc-relayer-rest -[docs-image]: https://docs.rs/ibc-relayer-rest/badge.svg -[docs-link]: https://docs.rs/ibc-relayer-rest/ - -[build-image]: https://github.com/informalsystems/ibc-rs/workflows/Rust/badge.svg -[build-link]: https://github.com/informalsystems/ibc-rs/actions?query=workflow%3ARust -[e2e-image]: https://github.com/informalsystems/ibc-rs/workflows/End%20to%20End%20testing/badge.svg -[e2e-link]: https://github.com/informalsystems/ibc-rs/actions?query=workflow%3A%22End+to+End+testing%22 -[telemetry-doc]: https://hermes.informal.systems/rest-api.html - -[license-image]: https://img.shields.io/badge/license-Apache2.0-blue.svg -[license-link]: https://github.com/informalsystems/ibc-rs/blob/master/LICENSE -[rustc-image]: https://img.shields.io/badge/rustc-stable-blue.svg -[rustc-version]: https://img.shields.io/badge/rustc-1.60+-blue.svg diff --git a/relayer-rest/src/config.rs b/relayer-rest/src/config.rs deleted file mode 100644 index 1949fef04a..0000000000 --- a/relayer-rest/src/config.rs +++ /dev/null @@ -1,24 +0,0 @@ -use core::fmt; - -/// REST server configuration -#[derive(Clone, Debug)] -pub struct Config { - pub host: String, - pub port: u16, -} - -impl Config { - pub fn new(host: String, port: u16) -> Self { - Self { host, port } - } - - pub fn address(&self) -> (&str, u16) { - (&self.host, self.port) - } -} - -impl fmt::Display for Config { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}:{}", self.host, self.port) - } -} diff --git a/relayer-rest/src/handle.rs b/relayer-rest/src/handle.rs deleted file mode 100644 index 3d3b047b0b..0000000000 --- a/relayer-rest/src/handle.rs +++ /dev/null @@ -1,87 +0,0 @@ -use core::fmt::Debug; - -use tracing::error; - -use crossbeam_channel as channel; - -use ibc::core::ics24_host::identifier::ChainId; -use ibc_relayer::supervisor::dump_state::SupervisorState; -use ibc_relayer::{ - config::ChainConfig, - rest::{ - request::{reply_channel, ReplySender, Request, VersionInfo}, - RestApiError, - }, -}; - -pub const NAME: &str = env!( - "CARGO_PKG_NAME", - "the env. variable CARGO_PKG_NAME of ibc-relayer-rest is not set!" -); -pub const VER: &str = env!( - "CARGO_PKG_VERSION", - "the env. variable CARGO_PKG_VERSION of ibc-relayer-rest is not set!" -); - -fn submit_request(request_sender: &channel::Sender, f: F) -> Result -where - F: FnOnce(ReplySender) -> Request, - O: Debug, -{ - let (reply_sender, reply_receiver) = reply_channel(); - - // Construct the request, which is simply an enum variant - let req = f(reply_sender); - - // Send the request - request_sender - .send(req) - .map_err(|e| RestApiError::ChannelSend(e.to_string()))?; - - // Wait for the reply - reply_receiver - .recv() - .map_err(|e| RestApiError::ChannelRecv(e.to_string()))? -} - -pub fn all_chain_ids(sender: &channel::Sender) -> Result, RestApiError> { - submit_request(sender, |reply_to| Request::GetChains { reply_to }) -} - -pub fn chain_config( - sender: &channel::Sender, - chain_id: &str, -) -> Result { - submit_request(sender, |reply_to| Request::GetChain { - chain_id: ChainId::from_string(chain_id), - reply_to, - }) -} - -pub fn supervisor_state( - sender: &channel::Sender, -) -> Result { - submit_request(sender, |reply_to| Request::State { reply_to }) -} - -pub fn assemble_version_info(sender: &channel::Sender) -> Vec { - // Fetch the relayer library version - let lib_version = submit_request(sender, |reply_to| Request::Version { reply_to }) - .map_err(|e| { - error!( - "[rest-server] failed while fetching relayer lib version info: {}", - e - ) - }) - .unwrap_or(VersionInfo { - name: "[ibc relayer library]".to_string(), - version: "[failed to fetch the version]".to_string(), - }); - // Append the REST API version info - let rest_api_version = VersionInfo { - name: NAME.to_string(), - version: VER.to_string(), - }; - - vec![lib_version, rest_api_version] -} diff --git a/relayer-rest/src/lib.rs b/relayer-rest/src/lib.rs deleted file mode 100644 index 58787b8285..0000000000 --- a/relayer-rest/src/lib.rs +++ /dev/null @@ -1,9 +0,0 @@ -#[macro_use] -extern crate rouille; - -mod config; -pub use config::Config; - -pub mod server; - -pub(crate) mod handle; diff --git a/relayer-rest/src/server.rs b/relayer-rest/src/server.rs deleted file mode 100644 index 89ac24199a..0000000000 --- a/relayer-rest/src/server.rs +++ /dev/null @@ -1,96 +0,0 @@ -use std::thread; - -use crossbeam_channel as channel; -use serde::{Deserialize, Serialize}; -use tracing::{info, trace}; - -use ibc_relayer::rest::request::Request; - -use crate::{ - handle::{all_chain_ids, assemble_version_info, chain_config, supervisor_state}, - Config, -}; - -pub struct ServerHandle { - join_handle: thread::JoinHandle<()>, - tx_stop: std::sync::mpsc::Sender<()>, -} - -impl ServerHandle { - pub fn join(self) -> std::thread::Result<()> { - self.join_handle.join() - } - - pub fn stop(&self) { - self.tx_stop.send(()).unwrap(); - } -} - -pub fn spawn(config: Config) -> (ServerHandle, channel::Receiver) { - let (req_tx, req_rx) = channel::unbounded::(); - - info!("starting REST API server listening at http://{}", config); - let handle = run(config, req_tx); - - (handle, req_rx) -} - -#[derive(Debug, Serialize, Deserialize)] -#[serde(tag = "status", content = "result")] -#[serde(rename_all = "lowercase")] -enum JsonResult { - Success(R), - Error(E), -} - -impl From> for JsonResult { - fn from(r: Result) -> Self { - match r { - Ok(a) => Self::Success(a), - Err(e) => Self::Error(e), - } - } -} - -#[allow(clippy::manual_strip)] -fn run(config: Config, sender: channel::Sender) -> ServerHandle { - let server = rouille::Server::new(config.address(), move |request| { - router!(request, - (GET) (/version) => { - trace!("[rest/server] GET /version"); - let result = assemble_version_info(&sender); - rouille::Response::json(&result) - }, - - (GET) (/chains) => { - // TODO(Soares): Add a `into_detail` to consume the error and obtain - // the underlying detail, so that we avoid doing `e.0` - trace!("[rest] GET /chains"); - let result = all_chain_ids(&sender); - rouille::Response::json(&JsonResult::from(result)) - }, - - (GET) (/chain/{id: String}) => { - trace!("[rest] GET /chain/{}", id); - let result = chain_config(&sender, &id); - rouille::Response::json(&JsonResult::from(result)) - }, - - (GET) (/state) => { - trace!("[rest] GET /state"); - let result = supervisor_state(&sender); - rouille::Response::json(&JsonResult::from(result)) - }, - - _ => rouille::Response::empty_404(), - ) - }) - .unwrap(); - - let (join_handle, tx_stop) = server.stoppable(); - - ServerHandle { - join_handle, - tx_stop, - } -} diff --git a/relayer-rest/tests/mock.rs b/relayer-rest/tests/mock.rs deleted file mode 100644 index 7fc6e891ec..0000000000 --- a/relayer-rest/tests/mock.rs +++ /dev/null @@ -1,139 +0,0 @@ -use std::str::FromStr; - -use serde::{Deserialize, Serialize}; - -use ibc::core::ics24_host::identifier::ChainId; -use ibc_relayer::{ - config::ChainConfig, - rest::request::{Request, VersionInfo}, - supervisor::dump_state::SupervisorState, -}; - -use ibc_relayer_rest::{server::spawn, Config}; - -enum TestResult { - Success, - WrongRequest(Request), -} - -#[derive(Debug, Serialize, Deserialize)] -#[serde(tag = "status", content = "result")] -#[serde(rename_all = "lowercase")] -enum JsonResult { - Success(R), - Error(E), -} - -fn run_test(port: u16, path: &str, expected: R, handler: F) -where - R: Serialize, - F: FnOnce(Request) -> TestResult + Send + 'static, -{ - let config = Config::new("127.0.0.1".to_string(), port); - - let (handle, rx) = spawn(config); - - std::thread::spawn(move || match rx.recv() { - Ok(r) => match handler(r) { - TestResult::Success => (), // all good - TestResult::WrongRequest(r) => panic!("got the wrong request: {:?}", r), - }, - Err(e) => panic!("got an error: {}", e), - }); - - let response = ureq::get(&format!("http://127.0.0.1:{}{}", port, path)) - .call() - .unwrap() - .into_string() - .unwrap(); - - let expected_json = serde_json::to_string(&expected).unwrap(); - assert_eq!(response, expected_json); - - handle.stop(); - handle.join().unwrap(); -} - -#[test] -fn version() { - let version = VersionInfo { - name: "mock".to_string(), - version: "0.0.0".to_string(), - }; - - let rest_api_version = VersionInfo { - name: "ibc-relayer-rest".to_string(), - version: "0.15.0".to_string(), - }; - - let result = vec![version.clone(), rest_api_version]; - - run_test(19101, "/version", result, |req| match req { - Request::Version { reply_to } => { - reply_to.send(Ok(version)).unwrap(); - TestResult::Success - } - req => TestResult::WrongRequest(req), - }) -} - -#[test] -fn get_chains() { - let chain_id = ChainId::from_str("mock-0").unwrap(); - let result: JsonResult<_, ()> = JsonResult::Success(vec![chain_id.clone()]); - - run_test(19102, "/chains", result, |req| match req { - Request::GetChains { reply_to } => { - reply_to.send(Ok(vec![chain_id])).unwrap(); - TestResult::Success - } - req => TestResult::WrongRequest(req), - }); -} - -const MOCK_CHAIN_CONFIG: &str = r#" -id = 'mock-0' -rpc_addr = 'http://127.0.0.1:26557' -grpc_addr = 'http://127.0.0.1:9091' -websocket_addr = 'ws://127.0.0.1:26557/websocket' -rpc_timeout = '10s' -account_prefix = 'cosmos' -key_name = 'testkey' -store_prefix = 'ibc' -max_gas = 3000000 -gas_price = { price = 0.001, denom = 'stake' } -gas_adjustment = 0.1 -max_msg_num = 30 -max_tx_size = 2097152 -clock_drift = '5s' -trusting_period = '14days' -trust_threshold = { numerator = '1', denominator = '3' } -"#; - -#[test] -fn get_chain() { - let config: ChainConfig = toml::de::from_str(MOCK_CHAIN_CONFIG).unwrap(); - let result: JsonResult<_, ()> = JsonResult::Success(config.clone()); - - run_test(19103, "/chain/mock-0", result, |req| match req { - Request::GetChain { chain_id, reply_to } if chain_id.to_string().as_str() == "mock-0" => { - reply_to.send(Ok(config)).unwrap(); - TestResult::Success - } - req => TestResult::WrongRequest(req), - }); -} - -#[test] -fn state() { - let state = SupervisorState::new(vec!["mock-0".parse().unwrap()], std::iter::empty()); - let result: JsonResult<_, ()> = JsonResult::Success(state.clone()); - - run_test(19104, "/state", result, |req| match req { - Request::State { reply_to } => { - reply_to.send(Ok(state)).unwrap(); - TestResult::Success - } - req => TestResult::WrongRequest(req), - }); -} diff --git a/relayer/Cargo.toml b/relayer/Cargo.toml deleted file mode 100644 index dc934b7f8a..0000000000 --- a/relayer/Cargo.toml +++ /dev/null @@ -1,108 +0,0 @@ -[package] -name = "ibc-relayer" -version = "0.15.0" -edition = "2021" -license = "Apache-2.0" -readme = "README.md" -keywords = ["blockchain", "consensus", "cosmos", "ibc", "tendermint"] -repository = "https://github.com/informalsystems/ibc-rs" -authors = ["Informal Systems "] -rust-version = "1.60" -description = """ - Implementation of an IBC Relayer in Rust, as a library -""" - -[package.metadata.docs.rs] -all-features = true - -[features] -default = ["flex-error/std", "flex-error/eyre_tracer"] -profiling = [] -telemetry = ["ibc-telemetry"] - -[dependencies] -ibc = { version = "0.15.0", path = "../modules" } -ibc-proto = { version = "0.18.0", path = "../proto" } -ibc-telemetry = { version = "0.15.0", path = "../telemetry", optional = true } - -subtle-encoding = "0.5" -humantime-serde = "1.1.1" -serde = "1.0" -serde_derive = "1.0" -thiserror = "1.0.30" -toml = "0.5" -tracing = "0.1.34" -tokio = { version = "1.0", features = ["rt-multi-thread", "time", "sync"] } -serde_json = { version = "1" } -bytes = "1.1.0" -prost = { version = "0.10" } -prost-types = { version = "0.10" } -tonic = { version = "0.7.2", features = ["tls", "tls-roots"] } -futures = "0.3.21" -crossbeam-channel = "0.5.4" -k256 = { version = "0.10.4", features = ["ecdsa-core", "ecdsa", "sha256"]} -hex = "0.4" -bitcoin = { version = "=0.28", features = ["use-serde"] } -tiny-bip39 = "0.8.0" -hdpath = { version = "0.6.1" } -sha2 = "0.10.2" -tiny-keccak = { version = "2.0.2", features = ["keccak"], default-features = false } -ripemd160 = "0.9.1" -bech32 = "0.9.0" -itertools = "0.10.3" -dirs-next = "2.0.0" -retry = { version = "1.3.1", default-features = false } -async-stream = "0.3.3" -http = "0.2.8" -flex-error = { version = "0.4.4", default-features = false } -signature = "1.4.0" -anyhow = "1.0" -semver = "1.0" -humantime = "2.1.0" -nanoid = "0.4.0" -regex = "1.5.5" -moka = "0.8.5" -uuid = { version = "1.1.2", features = ["v4"] } - -[dependencies.num-bigint] -version = "0.4" -features = ["serde"] - -[dependencies.num-rational] -version = "0.4.0" -features = ["num-bigint", "serde"] - -[dependencies.tendermint] -git = "https://github.com/composableFi/tendermint-rs" -rev = "5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8" -features = ["secp256k1"] - -[dependencies.tendermint-rpc] -git = "https://github.com/composableFi/tendermint-rs" -rev = "5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8" -features = ["http-client", "websocket-client"] - -[dependencies.tendermint-light-client] -git = "https://github.com/composableFi/tendermint-rs" -rev = "5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8" -default-features = false -features = ["rpc-client", "secp256k1", "unstable"] - -[dependencies.tendermint-light-client-verifier] -git = "https://github.com/composableFi/tendermint-rs" -rev = "5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8" -default-features = false - -[dependencies.tendermint-proto] -git = "https://github.com/composableFi/tendermint-rs" -rev = "5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8" - -[dev-dependencies] -ibc = { version = "0.15.0", path = "../modules", features = ["mocks"] } -serial_test = "0.7.0" -env_logger = "0.9.0" -tracing-subscriber = { version = "0.3.11", features = ["fmt", "env-filter", "json"] } -test-log = { version = "0.2.10", features = ["trace"] } - -# Needed for generating (synthetic) light blocks. -tendermint-testgen = { git = "https://github.com/composableFi/tendermint-rs", rev = "5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8"} diff --git a/relayer/README.md b/relayer/README.md deleted file mode 100644 index fd902c9094..0000000000 --- a/relayer/README.md +++ /dev/null @@ -1,40 +0,0 @@ -# IBC Relayer (library) - -[![Crate][crate-image]][crate-link] -[![Docs][docs-image]][docs-link] -[![Build Status][build-image]][build-link] -[![End to End testing][e2e-image]][e2e-link] -[![Apache 2.0 Licensed][license-image]][license-link] -![Rust Stable][rustc-image] -![Rust 1.51+][rustc-version] - -This is the repository for the IBC Relayer built in Rust, as a library. - - -## License - -Copyright © 2021 Informal Systems Inc. and ibc-rs authors. - -Licensed under the Apache License, Version 2.0 (the "License"); you may not use the files in this repository except in compliance with the License. You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - -[//]: # (badges) - -[crate-image]: https://img.shields.io/crates/v/ibc-relayer.svg -[crate-link]: https://crates.io/crates/ibc-relayer -[docs-image]: https://docs.rs/ibc-relayer/badge.svg -[docs-link]: https://docs.rs/ibc-relayer/ - -[build-image]: https://github.com/informalsystems/ibc-rs/workflows/Rust/badge.svg -[build-link]: https://github.com/informalsystems/ibc-rs/actions?query=workflow%3ARust -[e2e-image]: https://github.com/informalsystems/ibc-rs/workflows/End%20to%20End%20testing/badge.svg -[e2e-link]: https://github.com/informalsystems/ibc-rs/actions?query=workflow%3A%22End+to+End+testing%22 - -[license-image]: https://img.shields.io/badge/license-Apache2.0-blue.svg -[license-link]: https://github.com/informalsystems/ibc-rs/blob/master/LICENSE -[rustc-image]: https://img.shields.io/badge/rustc-stable-blue.svg -[rustc-version]: https://img.shields.io/badge/rustc-1.51+-blue.svg \ No newline at end of file diff --git a/relayer/src/account.rs b/relayer/src/account.rs deleted file mode 100644 index 8ee076ab6e..0000000000 --- a/relayer/src/account.rs +++ /dev/null @@ -1,12 +0,0 @@ -//! Data structures related to the accounts used by the relayer. - -use serde::{Deserialize, Serialize}; - -/// The balance for a specific denom -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct Balance { - /// The amount of coins in the account, as a string to allow for large amounts - pub amount: String, - /// The denomination for that coin - pub denom: String, -} diff --git a/relayer/src/cache.rs b/relayer/src/cache.rs deleted file mode 100644 index 29fca4898a..0000000000 --- a/relayer/src/cache.rs +++ /dev/null @@ -1,183 +0,0 @@ -//! Module to provide caching support for the relayer. -//! -//! Utilizes the [`moka`](https://docs.rs/moka) crate, which provides full -//! concurrency of retrievals and a high expected concurrency for updates. -use core::fmt::Formatter; -use std::fmt; -use std::time::Duration; - -use moka::sync::Cache as MokaCache; - -use ibc::core::ics02_client::client_state::AnyClientState; -use ibc::core::ics02_client::height::Height; -use ibc::core::ics03_connection::connection::ConnectionEnd; -use ibc::core::ics04_channel::channel::ChannelEnd; -use ibc::core::ics24_host::identifier::{ClientId, ConnectionId, PortChannelId}; - -const CHANNEL_CACHE_TTL: Duration = Duration::from_secs(60); -const CONNECTION_CACHE_TTL: Duration = Duration::from_secs(10 * 60); -const CLIENT_STATE_CACHE_TTL: Duration = Duration::from_millis(500); -const LATEST_HEIGHT_CACHE_TTL: Duration = Duration::from_millis(200); - -const CHANNEL_CACHE_CAPACITY: u64 = 10_000; -const CONNECTION_CACHE_CAPACITY: u64 = 10_000; -const CLIENT_STATE_CACHE_CAPACITY: u64 = 10_000; - -/// Whether or not a result was in cache (ie. a cache hit) -#[derive(Copy, Clone, Debug, PartialEq, Eq)] -pub enum CacheStatus { - Hit, - Miss, -} - -/// Alias for a result and its cache status. -pub type CacheResult = Result<(A, CacheStatus), E>; - -/// The main cache data structure, which comprises multiple sub-caches for caching -/// different chain components, each with different time-to-live values. -/// -/// There should be one `Cache` instantiated per every chain runtime. -#[derive(Clone)] -pub struct Cache { - /// Cache storing [`ChannelEnd`]s keyed by their [`PortChannelId`]s. - channels: MokaCache, - /// Cache storing [`ConnectionEnd`]s keyed by their [`ConnectionId`]s. - connections: MokaCache, - /// Cache storing [`AnyClientState`]s keyed by their [`ClientId`]s. - client_states: MokaCache, - /// The latest `Height` associated with the chain runtime this `Cache` is associated with. - latest_height: MokaCache<(), Height>, -} - -impl Default for Cache { - fn default() -> Self { - Self::new() - } -} - -impl Cache { - /// Initializes a new empty [`Cache`] with default time-to-live values. - pub fn new() -> Cache { - let channels = MokaCache::builder() - .time_to_live(CHANNEL_CACHE_TTL) - .max_capacity(CHANNEL_CACHE_CAPACITY) - .build(); - - let connections = MokaCache::builder() - .time_to_live(CONNECTION_CACHE_TTL) - .max_capacity(CONNECTION_CACHE_CAPACITY) - .build(); - - let client_states = MokaCache::builder() - .time_to_live(CLIENT_STATE_CACHE_TTL) - .max_capacity(CLIENT_STATE_CACHE_CAPACITY) - .build(); - - let latest_height = MokaCache::builder() - .time_to_live(LATEST_HEIGHT_CACHE_TTL) - .max_capacity(1) - .build(); - - Cache { - channels, - connections, - client_states, - latest_height, - } - } - - /// Return a cached [`ChannelEnd`] via its [`PortChannelId`] if it exists in the cache. - /// Otherwise, attempts to fetch it via the supplied fetcher function `F`. If `F` - /// returns successfully with the channel end in an open state, a copy of it is stored in - /// the cache before it is returned. - pub fn get_or_try_insert_channel_with( - &self, - id: &PortChannelId, - f: F, - ) -> CacheResult - where - F: FnOnce() -> Result, - { - if let Some(chan) = self.channels.get(id) { - // If cache hit, return it. - Ok((chan, CacheStatus::Hit)) - } else { - // Only cache a channel end if the channel is open. - let chan = f()?; - if chan.state().is_open() { - self.channels.insert(id.clone(), chan.clone()); - } - Ok((chan, CacheStatus::Miss)) - } - } - - /// Return a cached [`ConnectionEnd`] via its [`ConnectionId`] if it exists in the cache. - /// Otherwise, attempts to fetch it via the supplied fetcher function `F`. If `F` - /// returns successfully with the connection end in an open state, a copy of it is - /// in the cache before it is returned. - pub fn get_or_try_insert_connection_with( - &self, - id: &ConnectionId, - f: F, - ) -> CacheResult - where - F: FnOnce() -> Result, - { - if let Some(conn) = self.connections.get(id) { - Ok((conn, CacheStatus::Hit)) - } else { - let conn = f()?; - if conn.state().is_open() { - self.connections.insert(id.clone(), conn.clone()); - } - Ok((conn, CacheStatus::Miss)) - } - } - - /// Return a cached [`AnyClientState`] via its [`ClientId`] if it exists in the cache. - /// Otherwise, attempts to fetch it via the supplied fetcher function `F`. If `F` - /// returns successfully with the client state, a copy of it is stored in the cache - /// before it is returned. - pub fn get_or_try_insert_client_state_with( - &self, - id: &ClientId, - f: F, - ) -> CacheResult - where - F: FnOnce() -> Result, - { - if let Some(state) = self.client_states.get(id) { - Ok((state, CacheStatus::Hit)) - } else { - let state = f()?; - self.client_states.insert(id.clone(), state.clone()); - Ok((state, CacheStatus::Miss)) - } - } - - /// Returns the latest [`Height`] value if it exists in the cache. - /// Otherwise, attempts to fetch it via the supplied fetcher function `F`. If - /// `F` returns successfully with the latest height, a copy of it is stored in the - /// cache before it is returned. - /// - /// This value is cached with a small time-to-live so that the latest height - /// query returns the same height if the same query is repeated within a small time frame. - pub fn get_or_try_update_latest_height_with(&self, f: F) -> CacheResult - where - F: FnOnce() -> Result, - { - if let Some(height) = self.latest_height.get(&()) { - Ok((height, CacheStatus::Hit)) - } else { - let height = f()?; - self.latest_height.insert((), height); - Ok((height, CacheStatus::Miss)) - } - } -} - -impl fmt::Debug for Cache { - fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { - f.debug_struct("Cache").finish_non_exhaustive() - } -} diff --git a/relayer/src/chain.rs b/relayer/src/chain.rs deleted file mode 100644 index 37b075e4e4..0000000000 --- a/relayer/src/chain.rs +++ /dev/null @@ -1,76 +0,0 @@ -pub mod client; -pub mod cosmos; -pub mod counterparty; -pub mod endpoint; -pub mod handle; -pub mod requests; -pub mod runtime; -pub mod tracking; - -#[cfg(test)] -pub mod mock; - -use serde::{de::Error, Deserialize, Serialize}; - -// NOTE(new): When adding a variant to `ChainType`, make sure to update -// the `Deserialize` implementation below and the tests. -// See the NOTE(new) comments below. - -#[derive(Copy, Clone, Debug, Serialize)] -/// Types of chains the relayer can relay to and from -pub enum ChainType { - /// Chains based on the Cosmos SDK - CosmosSdk, - - /// Mock chain used for testing - #[cfg(test)] - Mock, -} - -impl<'de> Deserialize<'de> for ChainType { - fn deserialize(deserializer: D) -> Result - where - D: serde::Deserializer<'de>, - { - let original = String::deserialize(deserializer)?; - let s = original.to_ascii_lowercase().replace('-', ""); - - match s.as_str() { - "cosmossdk" => Ok(Self::CosmosSdk), - - #[cfg(test)] - "mock" => Ok(Self::Mock), - - // NOTE(new): Add a case here - _ => Err(D::Error::unknown_variant(&original, &["cosmos-sdk"])), // NOTE(new): mention the new variant here - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[derive(Copy, Clone, Debug, Serialize, Deserialize)] - pub struct Config { - tpe: ChainType, - } - - fn parse(variant: &str) -> Result { - toml::from_str::(&format!("tpe = '{variant}'")).map(|e| e.tpe) - } - - #[test] - fn deserialize() { - use ChainType::*; - - assert!(matches!(parse("CosmosSdk"), Ok(CosmosSdk))); - assert!(matches!(parse("cosmossdk"), Ok(CosmosSdk))); - assert!(matches!(parse("cosmos-sdk"), Ok(CosmosSdk))); - assert!(matches!(parse("mock"), Ok(Mock))); - - // NOTE(new): Add tests here - - assert!(matches!(parse("hello-world"), Err(_))); - } -} diff --git a/relayer/src/chain/client.rs b/relayer/src/chain/client.rs deleted file mode 100644 index b791c9d199..0000000000 --- a/relayer/src/chain/client.rs +++ /dev/null @@ -1,33 +0,0 @@ -//! Data structures and logic to set up IBC client's parameters. - -use crate::chain::cosmos; -use crate::config::ChainConfig; -use crate::foreign_client::CreateOptions; - -/// Client parameters for the `build_create_client` operation. -/// -/// The parameters are specialized for each supported chain type. -#[derive(Clone, Debug)] -pub enum ClientSettings { - Tendermint(cosmos::client::Settings), -} - -impl ClientSettings { - /// Takes the settings from the user-supplied options if they have been specified, - /// falling back to defaults using the configuration of the source - /// and the destination chain. - pub fn for_create_command( - options: CreateOptions, - src_chain_config: &ChainConfig, - dst_chain_config: &ChainConfig, - ) -> Self { - // Currently, only Tendermint chain pairs are supported by - // ForeignClient::build_create_client_and_send. Support for - // heterogeneous chains is left for future revisions. - ClientSettings::Tendermint(cosmos::client::Settings::for_create_command( - options, - src_chain_config, - dst_chain_config, - )) - } -} diff --git a/relayer/src/chain/cosmos.rs b/relayer/src/chain/cosmos.rs deleted file mode 100644 index 93b500a871..0000000000 --- a/relayer/src/chain/cosmos.rs +++ /dev/null @@ -1,1749 +0,0 @@ -use alloc::sync::Arc; -use bytes::{Buf, Bytes}; -use core::{ - convert::{TryFrom, TryInto}, - future::Future, - str::FromStr, - time::Duration, -}; -use num_bigint::BigInt; -use std::thread; - -use bitcoin::hashes::hex::ToHex; -use tendermint::block::Height; -use tendermint::{ - abci::{Event, Path as TendermintABCIPath}, - node::info::TxIndexStatus, -}; -use tendermint_light_client_verifier::types::LightBlock as TmLightBlock; -use tendermint_proto::Protobuf; -use tendermint_rpc::{ - endpoint::broadcast::tx_sync::Response, endpoint::status, Client, HttpClient, Order, -}; -use tokio::runtime::Runtime as TokioRuntime; -use tonic::codegen::http::Uri; -use tracing::{error, span, warn, Level}; - -use ibc::clients::ics07_tendermint::consensus_state::ConsensusState as TMConsensusState; -use ibc::clients::ics07_tendermint::header::Header as TmHeader; -use ibc::core::ics02_client::client_consensus::{AnyConsensusState, AnyConsensusStateWithHeight}; -use ibc::core::ics02_client::client_state::{AnyClientState, IdentifiedAnyClientState}; -use ibc::core::ics02_client::client_type::ClientType; -use ibc::core::ics02_client::error::Error as ClientError; -use ibc::core::ics03_connection::connection::{ConnectionEnd, IdentifiedConnectionEnd}; -use ibc::core::ics04_channel::channel::{ - ChannelEnd, IdentifiedChannelEnd, QueryPacketEventDataRequest, -}; -use ibc::core::ics04_channel::events as ChannelEvents; -use ibc::core::ics04_channel::packet::{Packet, Sequence}; -use ibc::core::ics23_commitment::commitment::CommitmentPrefix; -use ibc::core::ics24_host::identifier::{ChainId, ClientId, ConnectionId}; -use ibc::core::ics24_host::path::{ - AcksPath, ChannelEndsPath, ClientConsensusStatePath, ClientStatePath, CommitmentsPath, - ConnectionsPath, ReceiptsPath, SeqRecvsPath, -}; -use ibc::core::ics24_host::{ClientUpgradePath, Path, IBC_QUERY_PATH, SDK_UPGRADE_QUERY_PATH}; -use ibc::events::IbcEvent; -use ibc::query::QueryBlockRequest; -use ibc::query::QueryTxRequest; -use ibc::signer::Signer; -use ibc::Height as ICSHeight; -use ibc::{ - clients::ics07_tendermint::client_state::{AllowUpdate, ClientState}, - core::ics23_commitment::merkle::MerkleProof, -}; -use ibc_proto::cosmos::staking::v1beta1::Params as StakingParams; - -use crate::account::Balance; -use crate::chain::client::ClientSettings; -use crate::chain::cosmos::batch::{ - send_batched_messages_and_wait_check_tx, send_batched_messages_and_wait_commit, -}; -use crate::chain::cosmos::encode::encode_to_bech32; -use crate::chain::cosmos::gas::{calculate_fee, mul_ceil}; -use crate::chain::cosmos::query::account::get_or_fetch_account; -use crate::chain::cosmos::query::balance::query_balance; -use crate::chain::cosmos::query::status::query_status; -use crate::chain::cosmos::query::tx::query_txs; -use crate::chain::cosmos::query::{abci_query, fetch_version_specs, packet_query, QueryResponse}; -use crate::chain::cosmos::types::account::Account; -use crate::chain::cosmos::types::config::TxConfig; -use crate::chain::cosmos::types::gas::{default_gas_from_config, max_gas_from_config}; -use crate::chain::endpoint::{ChainEndpoint, ChainStatus, HealthCheck}; -use crate::chain::tracking::TrackedMsgs; -use crate::config::ChainConfig; -use crate::error::Error; -use crate::event::monitor::{EventMonitor, EventReceiver, TxMonitorCmd}; -use crate::keyring::{KeyEntry, KeyRing}; -use crate::light_client::tendermint::LightClient as TmLightClient; -use crate::light_client::{LightClient, Verified}; - -use super::requests::{ - IncludeProof, QueryChannelClientStateRequest, QueryChannelRequest, QueryChannelsRequest, - QueryClientConnectionsRequest, QueryClientStateRequest, QueryClientStatesRequest, - QueryConnectionChannelsRequest, QueryConnectionRequest, QueryConnectionsRequest, - QueryConsensusStateRequest, QueryConsensusStatesRequest, QueryHostConsensusStateRequest, - QueryNextSequenceReceiveRequest, QueryPacketAcknowledgementRequest, - QueryPacketAcknowledgementsRequest, QueryPacketCommitmentRequest, - QueryPacketCommitmentsRequest, QueryPacketReceiptRequest, QueryUnreceivedAcksRequest, - QueryUnreceivedPacketsRequest, QueryUpgradedClientStateRequest, - QueryUpgradedConsensusStateRequest, -}; - -pub mod batch; -pub mod client; -pub mod compatibility; -pub mod encode; -pub mod estimate; -pub mod gas; -pub mod query; -pub mod retry; -pub mod simulate; -pub mod tx; -pub mod types; -pub mod version; -pub mod wait; - -/// fraction of the maximum block size defined in the Tendermint core consensus parameters. -pub const GENESIS_MAX_BYTES_MAX_FRACTION: f64 = 0.9; -// https://github.com/cosmos/cosmos-sdk/blob/v0.44.0/types/errors/errors.go#L115-L117 - -pub struct CosmosSdkChain { - config: ChainConfig, - tx_config: TxConfig, - rpc_client: HttpClient, - grpc_addr: Uri, - rt: Arc, - keybase: KeyRing, - /// A cached copy of the account information - account: Option, -} - -impl CosmosSdkChain { - /// Get a reference to the configuration for this chain. - pub fn config(&self) -> &ChainConfig { - &self.config - } - - /// Performs validation of chain-specific configuration - /// parameters against the chain's genesis configuration. - /// - /// Currently, validates the following: - /// - the configured `max_tx_size` is appropriate - /// - the trusting period is greater than zero - /// - the trusting period is smaller than the unbonding period - /// - the default gas is smaller than the max gas - /// - /// Emits a log warning in case any error is encountered and - /// exits early without doing subsequent validations. - pub fn validate_params(&self) -> Result<(), Error> { - let unbonding_period = self.unbonding_period()?; - let trusting_period = self.trusting_period(unbonding_period); - - // Check that the trusting period is greater than zero - if trusting_period <= Duration::ZERO { - return Err(Error::config_validation_trusting_period_smaller_than_zero( - self.id().clone(), - trusting_period, - )); - } - - // Check that the trusting period is smaller than the unbounding period - if trusting_period >= unbonding_period { - return Err( - Error::config_validation_trusting_period_greater_than_unbonding_period( - self.id().clone(), - trusting_period, - unbonding_period, - ), - ); - } - - let max_gas = max_gas_from_config(&self.config); - let default_gas = default_gas_from_config(&self.config); - - // If the default gas is strictly greater than the max gas and the tx simulation fails, - // Hermes won't be able to ever submit that tx because the gas amount wanted will be - // greater than the max gas. - if default_gas > max_gas { - return Err(Error::config_validation_default_gas_too_high( - self.id().clone(), - default_gas, - max_gas, - )); - } - - // Get the latest height and convert to tendermint Height - let latest_height = Height::try_from(self.query_chain_latest_height()?.revision_height) - .map_err(Error::invalid_height)?; - - // Check on the configured max_tx_size against the consensus parameters at latest height - let result = self - .block_on(self.rpc_client.consensus_params(latest_height)) - .map_err(|e| { - Error::config_validation_json_rpc( - self.id().clone(), - self.config.rpc_addr.to_string(), - "/consensus_params".to_string(), - e, - ) - })?; - - let max_bound = result.consensus_params.block.max_bytes; - let max_allowed = mul_ceil(max_bound, GENESIS_MAX_BYTES_MAX_FRACTION); - let max_tx_size = BigInt::from(self.max_tx_size()); - - if max_tx_size > max_allowed { - return Err(Error::config_validation_tx_size_out_of_bounds( - self.id().clone(), - self.max_tx_size(), - max_bound, - )); - } - - // Check that the configured max gas is lower or equal to the consensus params max gas. - let consensus_max_gas = result.consensus_params.block.max_gas; - - // If the consensus max gas is < 0, we don't need to perform the check. - if consensus_max_gas >= 0 { - let consensus_max_gas: u64 = consensus_max_gas - .try_into() - .expect("cannot over or underflow because it is positive"); - - let max_gas = max_gas_from_config(&self.config); - - if max_gas > consensus_max_gas { - return Err(Error::config_validation_max_gas_too_high( - self.id().clone(), - max_gas, - result.consensus_params.block.max_gas, - )); - } - } - - Ok(()) - } - - /// Query the chain staking parameters - pub fn query_staking_params(&self) -> Result { - crate::time!("query_staking_params"); - crate::telemetry!(query, self.id(), "query_staking_params"); - - let mut client = self - .block_on( - ibc_proto::cosmos::staking::v1beta1::query_client::QueryClient::connect( - self.grpc_addr.clone(), - ), - ) - .map_err(Error::grpc_transport)?; - - let request = - tonic::Request::new(ibc_proto::cosmos::staking::v1beta1::QueryParamsRequest {}); - - let response = self - .block_on(client.params(request)) - .map_err(Error::grpc_status)?; - - let params = response - .into_inner() - .params - .ok_or_else(|| Error::grpc_response_param("no staking params".to_string()))?; - - Ok(params) - } - - /// The unbonding period of this chain - pub fn unbonding_period(&self) -> Result { - crate::time!("unbonding_period"); - - let unbonding_time = self.query_staking_params()?.unbonding_time.ok_or_else(|| { - Error::grpc_response_param("no unbonding time in staking params".to_string()) - })?; - - Ok(Duration::new( - unbonding_time.seconds as u64, - unbonding_time.nanos as u32, - )) - } - - /// The number of historical entries kept by this chain - pub fn historical_entries(&self) -> Result { - crate::time!("historical_entries"); - - self.query_staking_params().map(|p| p.historical_entries) - } - - /// Run a future to completion on the Tokio runtime. - fn block_on(&self, f: F) -> F::Output { - crate::time!("block_on"); - self.rt.block_on(f) - } - - /// The maximum size of any transaction sent by the relayer to this chain - fn max_tx_size(&self) -> usize { - self.config.max_tx_size.into() - } - - fn query( - &self, - data: impl Into, - height: ICSHeight, - prove: bool, - ) -> Result { - crate::time!("query"); - - // SAFETY: Creating a Path from a constant; this should never fail - let path = TendermintABCIPath::from_str(IBC_QUERY_PATH) - .expect("Turning IBC query path constant into a Tendermint ABCI path"); - - let height = Height::try_from(height.revision_height).map_err(Error::invalid_height)?; - - let data = data.into(); - if !data.is_provable() & prove { - return Err(Error::private_store()); - } - - let response = self.block_on(abci_query( - &self.rpc_client, - &self.config.rpc_addr, - path, - data.to_string(), - height, - prove, - ))?; - - // TODO - Verify response proof, if requested. - if prove {} - - Ok(response) - } - - /// Perform an ABCI query against the client upgrade sub-store. - /// Fetches both the target data, as well as the proof. - /// - /// The data is returned in its raw format `Vec`, and is either - /// the client state (if the target path is [`UpgradedClientState`]), - /// or the client consensus state ([`UpgradedClientConsensusState`]). - fn query_client_upgrade_state( - &self, - data: ClientUpgradePath, - height: Height, - ) -> Result<(Vec, MerkleProof), Error> { - let prev_height = Height::try_from(height.value() - 1).map_err(Error::invalid_height)?; - - // SAFETY: Creating a Path from a constant; this should never fail - let path = TendermintABCIPath::from_str(SDK_UPGRADE_QUERY_PATH) - .expect("Turning SDK upgrade query path constant into a Tendermint ABCI path"); - let response: QueryResponse = self.block_on(abci_query( - &self.rpc_client, - &self.config.rpc_addr, - path, - Path::Upgrade(data).to_string(), - prev_height, - true, - ))?; - - let proof = response.proof.ok_or_else(Error::empty_response_proof)?; - - Ok((response.value, proof)) - } - - fn key(&self) -> Result { - self.keybase() - .get_key(&self.config.key_name) - .map_err(Error::key_base) - } - - fn trusting_period(&self, unbonding_period: Duration) -> Duration { - self.config - .trusting_period - .unwrap_or(2 * unbonding_period / 3) - } - - /// Query the chain status via an RPC query. - /// - /// Returns an error if the node is still syncing and has not caught up, - /// ie. if `sync_info.catching_up` is `true`. - fn chain_status(&self) -> Result { - let status = self - .block_on(self.rpc_client.status()) - .map_err(|e| Error::rpc(self.config.rpc_addr.clone(), e))?; - - if status.sync_info.catching_up { - return Err(Error::chain_not_caught_up( - self.config.rpc_addr.to_string(), - self.config().id.clone(), - )); - } - - Ok(status) - } - - /// Query the chain's latest height - pub fn query_chain_latest_height(&self) -> Result { - crate::time!("query_latest_height"); - crate::telemetry!(query, self.id(), "query_latest_height"); - - let status = self.rt.block_on(query_status( - self.id(), - &self.rpc_client, - &self.config.rpc_addr, - ))?; - - Ok(status.height) - } - - async fn do_send_messages_and_wait_commit( - &mut self, - tracked_msgs: TrackedMsgs, - ) -> Result, Error> { - crate::time!("send_messages_and_wait_commit"); - - let _span = - span!(Level::DEBUG, "send_tx_commit", id = %tracked_msgs.tracking_id()).entered(); - - let proto_msgs = tracked_msgs.msgs; - - let key_entry = self.key()?; - - let account = - get_or_fetch_account(&self.grpc_addr, &key_entry.account, &mut self.account).await?; - - send_batched_messages_and_wait_commit( - &self.tx_config, - self.config.max_msg_num, - self.config.max_tx_size, - &key_entry, - account, - &self.config.memo_prefix, - proto_msgs, - ) - .await - } - - async fn do_send_messages_and_wait_check_tx( - &mut self, - tracked_msgs: TrackedMsgs, - ) -> Result, Error> { - crate::time!("send_messages_and_wait_check_tx"); - - let span = span!(Level::DEBUG, "send_tx_check", id = %tracked_msgs.tracking_id()); - let _enter = span.enter(); - - let proto_msgs = tracked_msgs.msgs; - - let key_entry = self.key()?; - - let account = - get_or_fetch_account(&self.grpc_addr, &key_entry.account, &mut self.account).await?; - - send_batched_messages_and_wait_check_tx( - &self.tx_config, - self.config.max_msg_num, - self.config.max_tx_size, - &key_entry, - account, - &self.config.memo_prefix, - proto_msgs, - ) - .await - } -} - -impl ChainEndpoint for CosmosSdkChain { - type LightBlock = TmLightBlock; - type Header = TmHeader; - type ConsensusState = TMConsensusState; - type ClientState = ClientState; - type LightClient = TmLightClient; - - fn bootstrap(config: ChainConfig, rt: Arc) -> Result { - let rpc_client = HttpClient::new(config.rpc_addr.clone()) - .map_err(|e| Error::rpc(config.rpc_addr.clone(), e))?; - - // Initialize key store and load key - let keybase = KeyRing::new(config.key_store_type, &config.account_prefix, &config.id) - .map_err(Error::key_base)?; - - let grpc_addr = Uri::from_str(&config.grpc_addr.to_string()) - .map_err(|e| Error::invalid_uri(config.grpc_addr.to_string(), e))?; - - let tx_config = TxConfig::try_from(&config)?; - - // Retrieve the version specification of this chain - - let chain = Self { - config, - rpc_client, - grpc_addr, - rt, - keybase, - account: None, - tx_config, - }; - - Ok(chain) - } - - fn init_light_client(&self) -> Result { - use tendermint_light_client_verifier::types::PeerId; - - crate::time!("init_light_client"); - - let peer_id: PeerId = self - .rt - .block_on(self.rpc_client.status()) - .map(|s| s.node_info.id) - .map_err(|e| Error::rpc(self.config.rpc_addr.clone(), e))?; - - let light_client = TmLightClient::from_config(&self.config, peer_id)?; - - Ok(light_client) - } - - fn init_event_monitor( - &self, - rt: Arc, - ) -> Result<(EventReceiver, TxMonitorCmd), Error> { - crate::time!("init_event_monitor"); - - let (mut event_monitor, event_receiver, monitor_tx) = EventMonitor::new( - self.config.id.clone(), - self.config.websocket_addr.clone(), - rt, - ) - .map_err(Error::event_monitor)?; - - event_monitor.subscribe().map_err(Error::event_monitor)?; - - thread::spawn(move || event_monitor.run()); - - Ok((event_receiver, monitor_tx)) - } - - fn shutdown(self) -> Result<(), Error> { - Ok(()) - } - - fn id(&self) -> &ChainId { - &self.config().id - } - - fn keybase(&self) -> &KeyRing { - &self.keybase - } - - fn keybase_mut(&mut self) -> &mut KeyRing { - &mut self.keybase - } - - /// Does multiple RPC calls to the full node, to check for - /// reachability and some basic APIs are available. - /// - /// Currently this checks that: - /// - the node responds OK to `/health` RPC call; - /// - the node has transaction indexing enabled; - /// - the SDK version is supported; - /// - /// Emits a log warning in case anything is amiss. - /// Exits early if any health check fails, without doing any - /// further checks. - fn health_check(&self) -> Result { - if let Err(e) = do_health_check(self) { - warn!("Health checkup for chain '{}' failed", self.id()); - warn!(" Reason: {}", e.detail()); - warn!(" Some Hermes features may not work in this mode!"); - - return Ok(HealthCheck::Unhealthy(Box::new(e))); - } - - if let Err(e) = self.validate_params() { - warn!("Hermes might be misconfigured for chain '{}'", self.id()); - warn!(" Reason: {}", e.detail()); - warn!(" Some Hermes features may not work in this mode!"); - - return Ok(HealthCheck::Unhealthy(Box::new(e))); - } - - Ok(HealthCheck::Healthy) - } - - /// Send one or more transactions that include all the specified messages. - /// The `proto_msgs` are split in transactions such they don't exceed the configured maximum - /// number of messages per transaction and the maximum transaction size. - /// Then `send_tx()` is called with each Tx. `send_tx()` determines the fee based on the - /// on-chain simulation and if this exceeds the maximum gas specified in the configuration file - /// then it returns error. - /// TODO - more work is required here for a smarter split maybe iteratively accumulating/ evaluating - /// msgs in a Tx until any of the max size, max num msgs, max fee are exceeded. - fn send_messages_and_wait_commit( - &mut self, - tracked_msgs: TrackedMsgs, - ) -> Result, Error> { - let runtime = self.rt.clone(); - - runtime.block_on(self.do_send_messages_and_wait_commit(tracked_msgs)) - } - - fn send_messages_and_wait_check_tx( - &mut self, - tracked_msgs: TrackedMsgs, - ) -> Result, Error> { - let runtime = self.rt.clone(); - - runtime.block_on(self.do_send_messages_and_wait_check_tx(tracked_msgs)) - } - - /// Get the account for the signer - fn get_signer(&mut self) -> Result { - crate::time!("get_signer"); - - // Get the key from key seed file - let key = self - .keybase() - .get_key(&self.config.key_name) - .map_err(|e| Error::key_not_found(self.config.key_name.clone(), e))?; - - let bech32 = encode_to_bech32(&key.address.to_hex(), &self.config.account_prefix)?; - bech32 - .parse() - .map_err(|e| Error::ics02(ClientError::signer(e))) - } - - /// Get the chain configuration - fn config(&self) -> ChainConfig { - self.config.clone() - } - - /// Get the signing key - fn get_key(&mut self) -> Result { - crate::time!("get_key"); - - // Get the key from key seed file - let key = self - .keybase() - .get_key(&self.config.key_name) - .map_err(|e| Error::key_not_found(self.config.key_name.clone(), e))?; - - Ok(key) - } - - fn add_key(&mut self, key_name: &str, key: KeyEntry) -> Result<(), Error> { - self.keybase_mut() - .add_key(key_name, key) - .map_err(Error::key_base)?; - - Ok(()) - } - - fn ibc_version(&self) -> Result, Error> { - let version_specs = self.block_on(fetch_version_specs(self.id(), &self.grpc_addr))?; - Ok(version_specs.ibc_go_version) - } - - fn query_balance(&self, key_name: Option) -> Result { - // If a key_name is given, extract the account hash. - // Else retrieve the account from the configuration file. - let account = match key_name { - Some(account) => { - let key = self.keybase().get_key(&account).map_err(Error::key_base)?; - key.account - } - _ => { - let key = self.key()?; - key.account - } - }; - - let balance = self.block_on(query_balance( - &self.grpc_addr, - &account, - &self.config.gas_price.denom, - ))?; - - Ok(balance) - } - - fn query_commitment_prefix(&self) -> Result { - crate::time!("query_commitment_prefix"); - crate::telemetry!(query, self.id(), "query_commitment_prefix"); - - // TODO - do a real chain query - CommitmentPrefix::try_from(self.config().store_prefix.as_bytes().to_vec()) - .map_err(|_| Error::ics02(ClientError::empty_prefix())) - } - - /// Query the application status - fn query_application_status(&self) -> Result { - crate::time!("query_application_status"); - crate::telemetry!(query, self.id(), "query_application_status"); - - // We cannot rely on `/status` endpoint to provide details about the latest block. - // Instead, we need to pull block height via `/abci_info` and then fetch block - // metadata at the given height via `/blockchain` endpoint. - let abci_info = self - .block_on(self.rpc_client.abci_info()) - .map_err(|e| Error::rpc(self.config.rpc_addr.clone(), e))?; - - // Query `/blockchain` endpoint to pull the block metadata corresponding to - // the latest block that the application committed. - // TODO: Replace this query with `/header`, once it's available. - // https://github.com/informalsystems/tendermint-rs/pull/1101 - let blocks = self - .block_on( - self.rpc_client - .blockchain(abci_info.last_block_height, abci_info.last_block_height), - ) - .map_err(|e| Error::rpc(self.config.rpc_addr.clone(), e))? - .block_metas; - - return if let Some(latest_app_block) = blocks.first() { - let height = ICSHeight { - revision_number: ChainId::chain_version(latest_app_block.header.chain_id.as_str()), - revision_height: u64::from(abci_info.last_block_height), - }; - let timestamp = latest_app_block.header.time.into(); - - Ok(ChainStatus { height, timestamp }) - } else { - // The `/blockchain` query failed to return the header we wanted - Err(Error::query( - "/blockchain endpoint for latest app. block".to_owned(), - )) - }; - } - - fn query_clients( - &self, - request: QueryClientStatesRequest, - ) -> Result, Error> { - crate::time!("query_clients"); - crate::telemetry!(query, self.id(), "query_clients"); - - let mut client = self - .block_on( - ibc_proto::ibc::core::client::v1::query_client::QueryClient::connect( - self.grpc_addr.clone(), - ), - ) - .map_err(Error::grpc_transport)?; - - let request = tonic::Request::new(request.into()); - let response = self - .block_on(client.client_states(request)) - .map_err(Error::grpc_status)? - .into_inner(); - - // Deserialize into domain type - let mut clients: Vec = response - .client_states - .into_iter() - .filter_map(|cs| IdentifiedAnyClientState::try_from(cs).ok()) - .collect(); - - // Sort by client identifier counter - clients.sort_by_cached_key(|c| client_id_suffix(&c.client_id).unwrap_or(0)); - - Ok(clients) - } - - fn query_client_state( - &self, - request: QueryClientStateRequest, - include_proof: IncludeProof, - ) -> Result<(AnyClientState, Option), Error> { - crate::time!("query_client_state"); - crate::telemetry!(query, self.id(), "query_client_state"); - - let res = self.query( - ClientStatePath(request.client_id.clone()), - request.height, - matches!(include_proof, IncludeProof::Yes), - )?; - let client_state = AnyClientState::decode_vec(&res.value).map_err(Error::decode)?; - - match include_proof { - IncludeProof::Yes => { - let proof = res.proof.ok_or_else(Error::empty_response_proof)?; - Ok((client_state, Some(proof))) - } - IncludeProof::No => Ok((client_state, None)), - } - } - - fn query_upgraded_client_state( - &self, - request: QueryUpgradedClientStateRequest, - ) -> Result<(AnyClientState, MerkleProof), Error> { - crate::time!("query_upgraded_client_state"); - crate::telemetry!(query, self.id(), "query_upgraded_client_state"); - - // Query for the value and the proof. - let tm_height = - Height::try_from(request.height.revision_height).map_err(Error::invalid_height)?; - let (upgraded_client_state_raw, proof) = self.query_client_upgrade_state( - ClientUpgradePath::UpgradedClientState(request.height.revision_height), - tm_height, - )?; - - let client_state = AnyClientState::decode_vec(&upgraded_client_state_raw) - .map_err(Error::conversion_from_any)?; - - Ok((client_state, proof)) - } - - fn query_upgraded_consensus_state( - &self, - request: QueryUpgradedConsensusStateRequest, - ) -> Result<(AnyConsensusState, MerkleProof), Error> { - crate::time!("query_upgraded_consensus_state"); - crate::telemetry!(query, self.id(), "query_upgraded_consensus_state"); - - let tm_height = - Height::try_from(request.height.revision_height).map_err(Error::invalid_height)?; - - // Fetch the consensus state and its proof. - let (upgraded_consensus_state_raw, proof) = self.query_client_upgrade_state( - ClientUpgradePath::UpgradedClientConsensusState(request.height.revision_height), - tm_height, - )?; - - let consensus_state = AnyConsensusState::decode_vec(&upgraded_consensus_state_raw) - .map_err(Error::conversion_from_any)?; - - Ok((consensus_state, proof)) - } - - /// Performs a query to retrieve the identifiers of all connections. - fn query_consensus_states( - &self, - request: QueryConsensusStatesRequest, - ) -> Result, Error> { - crate::time!("query_consensus_states"); - crate::telemetry!(query, self.id(), "query_consensus_states"); - - let mut client = self - .block_on( - ibc_proto::ibc::core::client::v1::query_client::QueryClient::connect( - self.grpc_addr.clone(), - ), - ) - .map_err(Error::grpc_transport)?; - - let request = tonic::Request::new(request.into()); - let response = self - .block_on(client.consensus_states(request)) - .map_err(Error::grpc_status)? - .into_inner(); - - let mut consensus_states: Vec = response - .consensus_states - .into_iter() - .filter_map(|cs| TryFrom::try_from(cs).ok()) - .collect(); - consensus_states.sort_by(|a, b| a.height.cmp(&b.height)); - consensus_states.reverse(); - Ok(consensus_states) - } - - fn query_consensus_state( - &self, - request: QueryConsensusStateRequest, - include_proof: IncludeProof, - ) -> Result<(AnyConsensusState, Option), Error> { - crate::time!("query_consensus_state"); - crate::telemetry!(query, self.id(), "query_consensus_state"); - - let res = self.query( - ClientConsensusStatePath { - client_id: request.client_id.clone(), - epoch: request.consensus_height.revision_number, - height: request.consensus_height.revision_height, - }, - request.query_height, - matches!(include_proof, IncludeProof::Yes), - )?; - - let consensus_state = AnyConsensusState::decode_vec(&res.value).map_err(Error::decode)?; - - if !matches!(consensus_state, AnyConsensusState::Tendermint(_)) { - return Err(Error::consensus_state_type_mismatch( - ClientType::Tendermint, - consensus_state.client_type(), - )); - } - - match include_proof { - IncludeProof::Yes => { - let proof = res.proof.ok_or_else(Error::empty_response_proof)?; - Ok((consensus_state, Some(proof))) - } - IncludeProof::No => Ok((consensus_state, None)), - } - } - - fn query_client_connections( - &self, - request: QueryClientConnectionsRequest, - ) -> Result, Error> { - crate::time!("query_client_connections"); - crate::telemetry!(query, self.id(), "query_client_connections"); - - let mut client = self - .block_on( - ibc_proto::ibc::core::connection::v1::query_client::QueryClient::connect( - self.grpc_addr.clone(), - ), - ) - .map_err(Error::grpc_transport)?; - - let request = tonic::Request::new(request.into()); - - let response = match self.block_on(client.client_connections(request)) { - Ok(res) => res.into_inner(), - Err(e) if e.code() == tonic::Code::NotFound => return Ok(vec![]), - Err(e) => return Err(Error::grpc_status(e)), - }; - - // TODO: add warnings for any identifiers that fail to parse (below). - // similar to the parsing in `query_connection_channels`. - - let ids = response - .connection_paths - .iter() - .filter_map(|id| ConnectionId::from_str(id).ok()) - .collect(); - - Ok(ids) - } - - fn query_connections( - &self, - request: QueryConnectionsRequest, - ) -> Result, Error> { - crate::time!("query_connections"); - crate::telemetry!(query, self.id(), "query_connections"); - - let mut client = self - .block_on( - ibc_proto::ibc::core::connection::v1::query_client::QueryClient::connect( - self.grpc_addr.clone(), - ), - ) - .map_err(Error::grpc_transport)?; - - let request = tonic::Request::new(request.into()); - - let response = self - .block_on(client.connections(request)) - .map_err(Error::grpc_status)? - .into_inner(); - - // TODO: add warnings for any identifiers that fail to parse (below). - // similar to the parsing in `query_connection_channels`. - - let connections = response - .connections - .into_iter() - .filter_map(|co| IdentifiedConnectionEnd::try_from(co).ok()) - .collect(); - - Ok(connections) - } - - fn query_connection( - &self, - request: QueryConnectionRequest, - include_proof: IncludeProof, - ) -> Result<(ConnectionEnd, Option), Error> { - crate::time!("query_connection"); - crate::telemetry!(query, self.id(), "query_connection"); - - async fn do_query_connection( - chain: &CosmosSdkChain, - connection_id: &ConnectionId, - height: ICSHeight, - ) -> Result { - use ibc_proto::ibc::core::connection::v1 as connection; - use tonic::IntoRequest; - - let mut client = - connection::query_client::QueryClient::connect(chain.grpc_addr.clone()) - .await - .map_err(Error::grpc_transport)?; - - let mut request = connection::QueryConnectionRequest { - connection_id: connection_id.to_string(), - } - .into_request(); - - let height_param = - str::parse(&height.revision_height.to_string()).map_err(Error::invalid_metadata)?; - - request - .metadata_mut() - .insert("x-cosmos-block-height", height_param); - - let response = client.connection(request).await.map_err(|e| { - if e.code() == tonic::Code::NotFound { - Error::connection_not_found(connection_id.clone()) - } else { - Error::grpc_status(e) - } - })?; - - match response.into_inner().connection { - Some(raw_connection) => { - let connection_end = raw_connection.try_into().map_err(Error::ics03)?; - - Ok(connection_end) - } - None => { - // When no connection is found, the GRPC call itself should return - // the NotFound error code. Nevertheless even if the call is successful, - // the connection field may not be present, because in protobuf3 - // everything is optional. - Err(Error::connection_not_found(connection_id.clone())) - } - } - } - - match include_proof { - IncludeProof::Yes => { - let res = self.query( - ConnectionsPath(request.connection_id.clone()), - request.height, - true, - )?; - let connection_end = - ConnectionEnd::decode_vec(&res.value).map_err(Error::decode)?; - - Ok(( - connection_end, - Some(res.proof.ok_or_else(Error::empty_response_proof)?), - )) - } - IncludeProof::No => self - .block_on(async { - do_query_connection(self, &request.connection_id, request.height).await - }) - .map(|conn_end| (conn_end, None)), - } - } - - fn query_connection_channels( - &self, - request: QueryConnectionChannelsRequest, - ) -> Result, Error> { - crate::time!("query_connection_channels"); - crate::telemetry!(query, self.id(), "query_connection_channels"); - - let mut client = self - .block_on( - ibc_proto::ibc::core::channel::v1::query_client::QueryClient::connect( - self.grpc_addr.clone(), - ), - ) - .map_err(Error::grpc_transport)?; - - let request = tonic::Request::new(request.into()); - - let response = self - .block_on(client.connection_channels(request)) - .map_err(Error::grpc_status)? - .into_inner(); - - // TODO: add warnings for any identifiers that fail to parse (below). - // https://github.com/informalsystems/ibc-rs/pull/506#discussion_r555945560 - - let channels = response - .channels - .into_iter() - .filter_map(|ch| IdentifiedChannelEnd::try_from(ch).ok()) - .collect(); - Ok(channels) - } - - fn query_channels( - &self, - request: QueryChannelsRequest, - ) -> Result, Error> { - crate::time!("query_channels"); - crate::telemetry!(query, self.id(), "query_channels"); - - let mut client = self - .block_on( - ibc_proto::ibc::core::channel::v1::query_client::QueryClient::connect( - self.grpc_addr.clone(), - ), - ) - .map_err(Error::grpc_transport)?; - - let request = tonic::Request::new(request.into()); - - let response = self - .block_on(client.channels(request)) - .map_err(Error::grpc_status)? - .into_inner(); - - let channels = response - .channels - .into_iter() - .filter_map(|ch| IdentifiedChannelEnd::try_from(ch).ok()) - .collect(); - Ok(channels) - } - - fn query_channel( - &self, - request: QueryChannelRequest, - include_proof: IncludeProof, - ) -> Result<(ChannelEnd, Option), Error> { - crate::time!("query_channel"); - crate::telemetry!(query, self.id(), "query_channel"); - - let res = self.query( - ChannelEndsPath(request.port_id, request.channel_id), - request.height, - matches!(include_proof, IncludeProof::Yes), - )?; - - let channel_end = ChannelEnd::decode_vec(&res.value).map_err(Error::decode)?; - - match include_proof { - IncludeProof::Yes => { - let proof = res.proof.ok_or_else(Error::empty_response_proof)?; - Ok((channel_end, Some(proof))) - } - IncludeProof::No => Ok((channel_end, None)), - } - } - - fn query_channel_client_state( - &self, - request: QueryChannelClientStateRequest, - ) -> Result, Error> { - crate::time!("query_channel_client_state"); - crate::telemetry!(query, self.id(), "query_channel_client_state"); - - let mut client = self - .block_on( - ibc_proto::ibc::core::channel::v1::query_client::QueryClient::connect( - self.grpc_addr.clone(), - ), - ) - .map_err(Error::grpc_transport)?; - - let request = tonic::Request::new(request.into()); - - let response = self - .block_on(client.channel_client_state(request)) - .map_err(Error::grpc_status)? - .into_inner(); - - let client_state: Option = response - .identified_client_state - .map_or_else(|| None, |proto_cs| proto_cs.try_into().ok()); - - Ok(client_state) - } - - fn query_packet_commitment( - &self, - request: QueryPacketCommitmentRequest, - include_proof: IncludeProof, - ) -> Result<(Vec, Option), Error> { - let res = self.query( - CommitmentsPath { - port_id: request.port_id, - channel_id: request.channel_id, - sequence: request.sequence, - }, - request.height, - matches!(include_proof, IncludeProof::Yes), - )?; - - match include_proof { - IncludeProof::Yes => { - let proof = res.proof.ok_or_else(Error::empty_response_proof)?; - - Ok((res.value, Some(proof))) - } - IncludeProof::No => Ok((res.value, None)), - } - } - - /// Queries the packet commitment hashes associated with a channel. - fn query_packet_commitments( - &self, - request: QueryPacketCommitmentsRequest, - ) -> Result<(Vec, ICSHeight), Error> { - crate::time!("query_packet_commitments"); - crate::telemetry!(query, self.id(), "query_packet_commitments"); - - let mut client = self - .block_on( - ibc_proto::ibc::core::channel::v1::query_client::QueryClient::connect( - self.grpc_addr.clone(), - ), - ) - .map_err(Error::grpc_transport)?; - - let request = tonic::Request::new(request.into()); - - let response = self - .block_on(client.packet_commitments(request)) - .map_err(Error::grpc_status)? - .into_inner(); - - let mut commitment_sequences: Vec = response - .commitments - .into_iter() - .map(|v| v.sequence.into()) - .collect(); - commitment_sequences.sort_unstable(); - - let height = response - .height - .ok_or_else(|| Error::grpc_response_param("height".to_string()))? - .into(); - - Ok((commitment_sequences, height)) - } - - fn query_packet_receipt( - &self, - request: QueryPacketReceiptRequest, - include_proof: IncludeProof, - ) -> Result<(Vec, Option), Error> { - let res = self.query( - ReceiptsPath { - port_id: request.port_id, - channel_id: request.channel_id, - sequence: request.sequence, - }, - request.height, - matches!(include_proof, IncludeProof::Yes), - )?; - - match include_proof { - IncludeProof::Yes => { - let proof = res.proof.ok_or_else(Error::empty_response_proof)?; - - Ok((res.value, Some(proof))) - } - IncludeProof::No => Ok((res.value, None)), - } - } - - /// Queries the unreceived packet sequences associated with a channel. - fn query_unreceived_packets( - &self, - request: QueryUnreceivedPacketsRequest, - ) -> Result, Error> { - crate::time!("query_unreceived_packets"); - crate::telemetry!(query, self.id(), "query_unreceived_packets"); - - let mut client = self - .block_on( - ibc_proto::ibc::core::channel::v1::query_client::QueryClient::connect( - self.grpc_addr.clone(), - ), - ) - .map_err(Error::grpc_transport)?; - - let request = tonic::Request::new(request.into()); - - let mut response = self - .block_on(client.unreceived_packets(request)) - .map_err(Error::grpc_status)? - .into_inner(); - - response.sequences.sort_unstable(); - Ok(response - .sequences - .into_iter() - .map(|seq| seq.into()) - .collect()) - } - - fn query_packet_acknowledgement( - &self, - request: QueryPacketAcknowledgementRequest, - include_proof: IncludeProof, - ) -> Result<(Vec, Option), Error> { - let res = self.query( - AcksPath { - port_id: request.port_id, - channel_id: request.channel_id, - sequence: request.sequence, - }, - request.height, - matches!(include_proof, IncludeProof::Yes), - )?; - - match include_proof { - IncludeProof::Yes => { - let proof = res.proof.ok_or_else(Error::empty_response_proof)?; - - Ok((res.value, Some(proof))) - } - IncludeProof::No => Ok((res.value, None)), - } - } - - /// Queries the packet acknowledgment hashes associated with a channel. - fn query_packet_acknowledgements( - &self, - request: QueryPacketAcknowledgementsRequest, - ) -> Result<(Vec, ICSHeight), Error> { - crate::time!("query_packet_acknowledgements"); - crate::telemetry!(query, self.id(), "query_packet_acknowledgements"); - - let mut client = self - .block_on( - ibc_proto::ibc::core::channel::v1::query_client::QueryClient::connect( - self.grpc_addr.clone(), - ), - ) - .map_err(Error::grpc_transport)?; - - let request = tonic::Request::new(request.into()); - - let response = self - .block_on(client.packet_acknowledgements(request)) - .map_err(Error::grpc_status)? - .into_inner(); - - let acks_sequences = response - .acknowledgements - .into_iter() - .map(|v| v.sequence.into()) - .collect(); - - let height = response - .height - .ok_or_else(|| Error::grpc_response_param("height".to_string()))? - .into(); - - Ok((acks_sequences, height)) - } - - /// Queries the unreceived acknowledgements sequences associated with a channel. - fn query_unreceived_acknowledgements( - &self, - request: QueryUnreceivedAcksRequest, - ) -> Result, Error> { - crate::time!("query_unreceived_acknowledgements"); - crate::telemetry!(query, self.id(), "query_unreceived_acknowledgements"); - - let mut client = self - .block_on( - ibc_proto::ibc::core::channel::v1::query_client::QueryClient::connect( - self.grpc_addr.clone(), - ), - ) - .map_err(Error::grpc_transport)?; - - let request = tonic::Request::new(request.into()); - - let mut response = self - .block_on(client.unreceived_acks(request)) - .map_err(Error::grpc_status)? - .into_inner(); - - response.sequences.sort_unstable(); - Ok(response - .sequences - .into_iter() - .map(|seq| seq.into()) - .collect()) - } - - fn query_next_sequence_receive( - &self, - request: QueryNextSequenceReceiveRequest, - include_proof: IncludeProof, - ) -> Result<(Sequence, Option), Error> { - crate::time!("query_next_sequence_receive"); - crate::telemetry!(query, self.id(), "query_next_sequence_receive"); - - match include_proof { - IncludeProof::Yes => { - let res = self.query( - SeqRecvsPath(request.port_id, request.channel_id), - request.height, - true, - )?; - - // Note: We expect the return to be a u64 encoded in big-endian. Refer to ibc-go: - // https://github.com/cosmos/ibc-go/blob/25767f6bdb5bab2c2a116b41d92d753c93e18121/modules/core/04-channel/client/utils/utils.go#L191 - if res.value.len() != 8 { - return Err(Error::query("next_sequence_receive".into())); - } - let seq: Sequence = Bytes::from(res.value).get_u64().into(); - - let proof = res.proof.ok_or_else(Error::empty_response_proof)?; - - Ok((seq, Some(proof))) - } - IncludeProof::No => { - let mut client = self - .block_on( - ibc_proto::ibc::core::channel::v1::query_client::QueryClient::connect( - self.grpc_addr.clone(), - ), - ) - .map_err(Error::grpc_transport)?; - - let request = tonic::Request::new(request.into()); - - let response = self - .block_on(client.next_sequence_receive(request)) - .map_err(Error::grpc_status)? - .into_inner(); - - Ok((Sequence::from(response.next_sequence_receive), None)) - } - } - } - - /// This function queries transactions for events matching certain criteria. - /// 1. Client Update request - returns a vector with at most one update client event - /// 2. Packet event request - returns at most one packet event for each sequence specified - /// in the request. - /// Note - there is no way to format the packet query such that it asks for Tx-es with either - /// sequence (the query conditions can only be AND-ed). - /// There is a possibility to include "<=" and ">=" conditions but it doesn't work with - /// string attributes (sequence is emmitted as a string). - /// Therefore, for packets we perform one tx_search for each sequence. - /// Alternatively, a single query for all packets could be performed but it would return all - /// packets ever sent. - fn query_txs(&self, request: QueryTxRequest) -> Result, Error> { - crate::time!("query_txs"); - crate::telemetry!(query, self.id(), "query_txs"); - - self.block_on(query_txs( - self.id(), - &self.rpc_client, - &self.config.rpc_addr, - request, - )) - } - - fn query_blocks( - &self, - request: QueryBlockRequest, - ) -> Result<(Vec, Vec), Error> { - crate::time!("query_blocks"); - crate::telemetry!(query, self.id(), "query_blocks"); - - match request { - QueryBlockRequest::Packet(request) => { - crate::time!("query_blocks: query block packet events"); - - let mut begin_block_events: Vec = vec![]; - let mut end_block_events: Vec = vec![]; - - for seq in &request.sequences { - let response = self - .block_on(self.rpc_client.block_search( - packet_query(&request, *seq), - 1, - 1, // there should only be a single match for this query - Order::Ascending, - )) - .map_err(|e| Error::rpc(self.config.rpc_addr.clone(), e))?; - - assert!( - response.blocks.len() <= 1, - "block_results: unexpected number of blocks" - ); - - if let Some(block) = response.blocks.first().map(|first| &first.block) { - let response_height = - ICSHeight::new(self.id().version(), u64::from(block.header.height)); - - if request.height != ICSHeight::zero() && response_height > request.height { - continue; - } - - let response = self - .block_on(self.rpc_client.block_results(block.header.height)) - .map_err(|e| Error::rpc(self.config.rpc_addr.clone(), e))?; - - begin_block_events.append( - &mut response - .begin_block_events - .unwrap_or_default() - .into_iter() - .filter_map(|ev| filter_matching_event(ev, &request, *seq)) - .collect(), - ); - - end_block_events.append( - &mut response - .end_block_events - .unwrap_or_default() - .into_iter() - .filter_map(|ev| filter_matching_event(ev, &request, *seq)) - .collect(), - ); - } - } - Ok((begin_block_events, end_block_events)) - } - } - } - - fn query_host_consensus_state( - &self, - request: QueryHostConsensusStateRequest, - ) -> Result { - let height = - Height::try_from(request.height.revision_height).map_err(Error::invalid_height)?; - - // TODO(hu55a1n1): use the `/header` RPC endpoint instead when we move to tendermint v0.35.x - let rpc_call = match height.value() { - 0 => self.rpc_client.latest_block(), - _ => self.rpc_client.block(height), - }; - let response = self - .block_on(rpc_call) - .map_err(|e| Error::rpc(self.config.rpc_addr.clone(), e))?; - Ok(response.block.header.into()) - } - - fn build_client_state( - &self, - height: ICSHeight, - settings: ClientSettings, - ) -> Result { - let ClientSettings::Tendermint(settings) = settings; - let unbonding_period = self.unbonding_period()?; - let trusting_period = settings - .trusting_period - .unwrap_or_else(|| self.trusting_period(unbonding_period)); - - // Build the client state. - ClientState::new( - self.id().clone(), - settings.trust_threshold, - trusting_period, - unbonding_period, - settings.max_clock_drift, - height, - self.config.proof_specs.clone(), - vec!["upgrade".to_string(), "upgradedIBCState".to_string()], - AllowUpdate { - after_expiry: true, - after_misbehaviour: true, - }, - ) - .map_err(Error::ics07) - } - - fn build_consensus_state( - &self, - light_block: Self::LightBlock, - ) -> Result { - crate::time!("build_consensus_state"); - - Ok(TMConsensusState::from(light_block.signed_header.header)) - } - - fn build_header( - &self, - trusted_height: ICSHeight, - target_height: ICSHeight, - client_state: &AnyClientState, - light_client: &mut Self::LightClient, - ) -> Result<(Self::Header, Vec), Error> { - crate::time!("build_header"); - - // Get the light block at target_height from chain. - let Verified { target, supporting } = - light_client.header_and_minimal_set(trusted_height, target_height, client_state)?; - - Ok((target, supporting)) - } -} - -fn filter_matching_event( - event: Event, - request: &QueryPacketEventDataRequest, - seq: Sequence, -) -> Option { - fn matches_packet( - request: &QueryPacketEventDataRequest, - seq: Sequence, - packet: &Packet, - ) -> bool { - packet.source_port == request.source_port_id - && packet.source_channel == request.source_channel_id - && packet.destination_port == request.destination_port_id - && packet.destination_channel == request.destination_channel_id - && packet.sequence == seq - } - - if event.type_str != request.event_id.as_str() { - return None; - } - - let ibc_event = ChannelEvents::try_from_tx(&event)?; - match ibc_event { - IbcEvent::SendPacket(ref send_ev) if matches_packet(request, seq, &send_ev.packet) => { - Some(ibc_event) - } - IbcEvent::WriteAcknowledgement(ref ack_ev) - if matches_packet(request, seq, &ack_ev.packet) => - { - Some(ibc_event) - } - _ => None, - } -} - -/// Returns the suffix counter for a CosmosSDK client id. -/// Returns `None` if the client identifier is malformed -/// and the suffix could not be parsed. -fn client_id_suffix(client_id: &ClientId) -> Option { - client_id - .as_str() - .split('-') - .last() - .and_then(|e| e.parse::().ok()) -} - -fn do_health_check(chain: &CosmosSdkChain) -> Result<(), Error> { - let chain_id = chain.id(); - let grpc_address = chain.grpc_addr.to_string(); - let rpc_address = chain.config.rpc_addr.to_string(); - - // Checkup on the self-reported health endpoint - chain.block_on(chain.rpc_client.health()).map_err(|e| { - Error::health_check_json_rpc( - chain_id.clone(), - rpc_address.clone(), - "/health".to_string(), - e, - ) - })?; - - // Check that the staking module maintains some historical entries, meaning that - // local header information is stored in the IBC state and therefore client - // proofs that are part of the connection handshake messages can be verified. - if chain.historical_entries()? == 0 { - return Err(Error::no_historical_entries(chain_id.clone())); - } - - let status = chain.chain_status()?; - - // Check that transaction indexing is enabled - if status.node_info.other.tx_index != TxIndexStatus::On { - return Err(Error::tx_indexing_disabled(chain_id.clone())); - } - - // Check that the chain identifier matches the network name - if !status.node_info.network.as_str().eq(chain_id.as_str()) { - // Log the error, continue optimistically - error!("/status endpoint from chain id '{}' reports network identifier to be '{}': this is usually a sign of misconfiguration, check your config.toml", - chain_id, status.node_info.network); - } - - let version_specs = chain.block_on(fetch_version_specs(&chain.config.id, &chain.grpc_addr))?; - - // Checkup on the underlying SDK & IBC-go versions - if let Err(diagnostic) = compatibility::run_diagnostic(&version_specs) { - return Err(Error::sdk_module_version( - chain_id.clone(), - grpc_address, - diagnostic.to_string(), - )); - } - - Ok(()) -} - -#[cfg(test)] -mod tests { - use ibc::{ - core::{ - ics02_client::client_state::{AnyClientState, IdentifiedAnyClientState}, - ics02_client::client_type::ClientType, - ics24_host::identifier::ClientId, - }, - mock::client_state::MockClientState, - mock::header::MockHeader, - Height, - }; - - use crate::{chain::cosmos::client_id_suffix, config::GasPrice}; - - use super::calculate_fee; - - #[test] - fn mul_ceil() { - // Because 0.001 cannot be expressed precisely - // as a 64-bit floating point number (it is - // stored as 0.001000000047497451305389404296875), - // `num_rational::BigRational` will represent it as - // 1152921504606847/1152921504606846976 instead - // which will sometimes round up to the next - // integer in the computations below. - // This is not a problem for the way we compute the fee - // and gas adjustment as those are already based on simulated - // gas which is not 100% precise. - assert_eq!(super::mul_ceil(300_000, 0.001), 301.into()); - assert_eq!(super::mul_ceil(300_004, 0.001), 301.into()); - assert_eq!(super::mul_ceil(300_040, 0.001), 301.into()); - assert_eq!(super::mul_ceil(300_400, 0.001), 301.into()); - assert_eq!(super::mul_ceil(304_000, 0.001), 305.into()); - assert_eq!(super::mul_ceil(340_000, 0.001), 341.into()); - assert_eq!(super::mul_ceil(340_001, 0.001), 341.into()); - } - - /// Before https://github.com/informalsystems/ibc-rs/pull/1568, - /// this test would have panic'ed with: - /// - /// thread 'chain::cosmos::tests::fee_overflow' panicked at 'attempt to multiply with overflow' - #[test] - fn fee_overflow() { - let gas_amount = 90000000000000_u64; - let gas_price = GasPrice { - price: 1000000000000.0, - denom: "uatom".to_string(), - }; - - let fee = calculate_fee(gas_amount, &gas_price); - assert_eq!(&fee.amount, "90000000000000000000000000"); - } - - #[test] - fn sort_clients_id_suffix() { - let mut clients: Vec = vec![ - IdentifiedAnyClientState::new( - ClientId::new(ClientType::Tendermint, 4).unwrap(), - AnyClientState::Mock(MockClientState::new(MockHeader::new(Height::new(0, 0)))), - ), - IdentifiedAnyClientState::new( - ClientId::new(ClientType::Tendermint, 1).unwrap(), - AnyClientState::Mock(MockClientState::new(MockHeader::new(Height::new(0, 0)))), - ), - IdentifiedAnyClientState::new( - ClientId::new(ClientType::Tendermint, 7).unwrap(), - AnyClientState::Mock(MockClientState::new(MockHeader::new(Height::new(0, 0)))), - ), - ]; - clients.sort_by_cached_key(|c| client_id_suffix(&c.client_id).unwrap_or(0)); - assert_eq!( - client_id_suffix(&clients.first().unwrap().client_id).unwrap(), - 1 - ); - assert_eq!(client_id_suffix(&clients[1].client_id).unwrap(), 4); - assert_eq!( - client_id_suffix(&clients.last().unwrap().client_id).unwrap(), - 7 - ); - } -} diff --git a/relayer/src/chain/cosmos/batch.rs b/relayer/src/chain/cosmos/batch.rs deleted file mode 100644 index 867a1989b3..0000000000 --- a/relayer/src/chain/cosmos/batch.rs +++ /dev/null @@ -1,151 +0,0 @@ -use ibc::events::IbcEvent; -use ibc_proto::google::protobuf::Any; -use prost::Message; -use tendermint_rpc::endpoint::broadcast::tx_sync::Response; - -use crate::chain::cosmos::retry::send_tx_with_account_sequence_retry; -use crate::chain::cosmos::types::account::Account; -use crate::chain::cosmos::types::config::TxConfig; -use crate::chain::cosmos::types::tx::TxSyncResult; -use crate::chain::cosmos::wait::wait_for_block_commits; -use crate::config::types::{MaxMsgNum, MaxTxSize, Memo}; -use crate::error::Error; -use crate::keyring::KeyEntry; - -pub async fn send_batched_messages_and_wait_commit( - config: &TxConfig, - max_msg_num: MaxMsgNum, - max_tx_size: MaxTxSize, - key_entry: &KeyEntry, - account: &mut Account, - tx_memo: &Memo, - messages: Vec, -) -> Result, Error> { - if messages.is_empty() { - return Ok(Vec::new()); - } - - let mut tx_sync_results = send_messages_as_batches( - config, - max_msg_num, - max_tx_size, - key_entry, - account, - tx_memo, - messages, - ) - .await?; - - wait_for_block_commits( - &config.chain_id, - &config.rpc_client, - &config.rpc_address, - &config.rpc_timeout, - &mut tx_sync_results, - ) - .await?; - - let events = tx_sync_results - .into_iter() - .flat_map(|el| el.events) - .collect(); - - Ok(events) -} - -pub async fn send_batched_messages_and_wait_check_tx( - config: &TxConfig, - max_msg_num: MaxMsgNum, - max_tx_size: MaxTxSize, - key_entry: &KeyEntry, - account: &mut Account, - tx_memo: &Memo, - messages: Vec, -) -> Result, Error> { - if messages.is_empty() { - return Ok(Vec::new()); - } - - let batches = batch_messages(max_msg_num, max_tx_size, messages)?; - - let mut responses = Vec::new(); - - for batch in batches { - let response = - send_tx_with_account_sequence_retry(config, key_entry, account, tx_memo, batch, 0) - .await?; - - responses.push(response); - } - - Ok(responses) -} - -async fn send_messages_as_batches( - config: &TxConfig, - max_msg_num: MaxMsgNum, - max_tx_size: MaxTxSize, - key_entry: &KeyEntry, - account: &mut Account, - tx_memo: &Memo, - messages: Vec, -) -> Result, Error> { - if messages.is_empty() { - return Ok(Vec::new()); - } - - let batches = batch_messages(max_msg_num, max_tx_size, messages)?; - - let mut tx_sync_results = Vec::new(); - - for batch in batches { - let events_per_tx = vec![IbcEvent::default(); batch.len()]; - - let response = - send_tx_with_account_sequence_retry(config, key_entry, account, tx_memo, batch, 0) - .await?; - - let tx_sync_result = TxSyncResult { - response, - events: events_per_tx, - }; - - tx_sync_results.push(tx_sync_result); - } - - Ok(tx_sync_results) -} - -fn batch_messages( - max_msg_num: MaxMsgNum, - max_tx_size: MaxTxSize, - messages: Vec, -) -> Result>, Error> { - let max_message_count = max_msg_num.to_usize(); - let max_tx_size = max_tx_size.into(); - - let mut batches = vec![]; - - let mut current_count = 0; - let mut current_size = 0; - let mut current_batch = vec![]; - - for message in messages.into_iter() { - current_count += 1; - current_size += message.encoded_len(); - current_batch.push(message); - - if current_count >= max_message_count || current_size >= max_tx_size { - let insert_batch = current_batch.drain(..).collect(); - batches.push(insert_batch); - current_count = 0; - current_size = 0; - } - } - - if !current_batch.is_empty() { - batches.push(current_batch); - } - - Ok(batches) -} diff --git a/relayer/src/chain/cosmos/client.rs b/relayer/src/chain/cosmos/client.rs deleted file mode 100644 index 9b00dda7b4..0000000000 --- a/relayer/src/chain/cosmos/client.rs +++ /dev/null @@ -1,58 +0,0 @@ -//! Cosmos-specific client settings. - -use core::time::Duration; - -use tracing::warn; - -use ibc::core::ics02_client::trust_threshold::TrustThreshold; - -use crate::config::ChainConfig; -use crate::foreign_client::CreateOptions; - -/// Cosmos-specific client parameters for the `build_client_state` operation. -#[derive(Clone, Debug, Default)] -pub struct Settings { - pub max_clock_drift: Duration, - pub trusting_period: Option, - pub trust_threshold: TrustThreshold, -} - -impl Settings { - pub fn for_create_command( - options: CreateOptions, - src_chain_config: &ChainConfig, - dst_chain_config: &ChainConfig, - ) -> Self { - let max_clock_drift = match options.max_clock_drift { - None => calculate_client_state_drift(src_chain_config, dst_chain_config), - Some(user_value) => { - if user_value > dst_chain_config.max_block_time { - warn!( - "user specified max_clock_drift ({:?}) exceeds max_block_time \ - of the destination chain {}", - user_value, dst_chain_config.id, - ); - } - user_value - } - }; - let trust_threshold = options - .trust_threshold - .unwrap_or_else(|| src_chain_config.trust_threshold.into()); - Settings { - max_clock_drift, - trusting_period: options.trusting_period, - trust_threshold, - } - } -} - -/// The client state clock drift must account for destination -/// chain block frequency and clock drift on source and dest. -/// https://github.com/informalsystems/ibc-rs/issues/1445 -fn calculate_client_state_drift( - src_chain_config: &ChainConfig, - dst_chain_config: &ChainConfig, -) -> Duration { - src_chain_config.clock_drift + dst_chain_config.clock_drift + dst_chain_config.max_block_time -} diff --git a/relayer/src/chain/cosmos/compatibility.rs b/relayer/src/chain/cosmos/compatibility.rs deleted file mode 100644 index 28869bc95b..0000000000 --- a/relayer/src/chain/cosmos/compatibility.rs +++ /dev/null @@ -1,87 +0,0 @@ -//! Cosmos-SDK compatibility constants and diagnostic methods. - -use thiserror::Error; -use tracing::debug; - -use super::version; - -/// Specifies the SDK module version requirement. -/// -/// # Note: Should be consistent with [features] guide page. -/// -/// [features]: https://hermes.informal.systems/features.html -const SDK_MODULE_VERSION_REQ: &str = ">=0.41, <0.46"; - -/// Specifies the IBC-go module version requirement. -/// At the moment, we support both chains with and without -/// the standalone ibc-go module, i.e., it's not an error -/// if the chain binary does not build with this module. -/// -/// # Note: Should be consistent with [features] guide page. -/// -/// [features]: https://hermes.informal.systems/features.html -const IBC_GO_MODULE_VERSION_REQ: &str = ">=1.1, <=3"; - -#[derive(Error, Debug)] -pub enum Diagnostic { - #[error( - "SDK module at version '{found}' does not meet compatibility requirements {requirements}" - )] - MismatchingSdkModuleVersion { requirements: String, found: String }, - - #[error("Ibc-Go module at version '{found}' does not meet compatibility requirements {requirements}")] - MismatchingIbcGoModuleVersion { requirements: String, found: String }, -} - -/// Runs a diagnostic check on the provided [`VersionInfo`] -/// to ensure that the Sdk & IBC-go modules version match -/// the predefined requirements. -/// -/// Returns `None` upon success, or a [`Diagnostic`] upon -/// an error. -/// -/// Relies on the constant [`SDK_MODULE_NAME`] to find the -/// Sdk module by name, as well as the constants -/// [`SDK_MODULE_VERSION_REQ`] and [`IBC_GO_MODULE_VERSION_REQ`] -/// for establishing compatibility requirements. -pub(crate) fn run_diagnostic(v: &version::Specs) -> Result<(), Diagnostic> { - debug!("running diagnostic on version info {:?}", v); - sdk_diagnostic(v.sdk_version.clone())?; - ibc_go_diagnostic(v.ibc_go_version.clone())?; - Ok(()) -} - -fn sdk_diagnostic(version: semver::Version) -> Result<(), Diagnostic> { - // Parse the SDK requirements into a semver - let sdk_reqs = semver::VersionReq::parse(SDK_MODULE_VERSION_REQ) - .expect("parsing the SDK module requirements into semver"); - - // Finally, check the version requirements - match sdk_reqs.matches(&version) { - true => Ok(()), - false => Err(Diagnostic::MismatchingSdkModuleVersion { - requirements: SDK_MODULE_VERSION_REQ.to_string(), - found: version.to_string(), - }), - } -} - -fn ibc_go_diagnostic(version_info: Option) -> Result<(), Diagnostic> { - // Parse the IBC-go module requirements into a semver - let ibc_reqs = semver::VersionReq::parse(IBC_GO_MODULE_VERSION_REQ) - .expect("parsing the IBC-Go module requirements into semver"); - - // Find the Ibc-Go module - match version_info { - // If binary lacks the ibc-go dependency it is _not_ an error, - // we support chains without the standalone ibc-go module. - None => Ok(()), - Some(v) => match ibc_reqs.matches(&v) { - true => Ok(()), - false => Err(Diagnostic::MismatchingIbcGoModuleVersion { - requirements: IBC_GO_MODULE_VERSION_REQ.to_string(), - found: v.to_string(), - }), - }, - } -} diff --git a/relayer/src/chain/cosmos/encode.rs b/relayer/src/chain/cosmos/encode.rs deleted file mode 100644 index 416bc049d0..0000000000 --- a/relayer/src/chain/cosmos/encode.rs +++ /dev/null @@ -1,179 +0,0 @@ -use bech32::{ToBase32, Variant}; -use core::str::FromStr; -use ibc::core::ics24_host::identifier::ChainId; -use ibc_proto::cosmos::tx::v1beta1::mode_info::{Single, Sum}; -use ibc_proto::cosmos::tx::v1beta1::{AuthInfo, Fee, ModeInfo, SignDoc, SignerInfo, TxBody, TxRaw}; -use ibc_proto::google::protobuf::Any; -use tendermint::account::Id as AccountId; - -use crate::chain::cosmos::types::account::{Account, AccountNumber, AccountSequence}; -use crate::chain::cosmos::types::config::TxConfig; -use crate::chain::cosmos::types::tx::SignedTx; -use crate::config::types::Memo; -use crate::config::AddressType; -use crate::error::Error; -use crate::keyring::{sign_message, KeyEntry}; - -pub fn sign_and_encode_tx( - config: &TxConfig, - key_entry: &KeyEntry, - account: &Account, - tx_memo: &Memo, - messages: Vec, - fee: &Fee, -) -> Result, Error> { - let signed_tx = sign_tx(config, key_entry, account, tx_memo, messages, fee)?; - - let tx_raw = TxRaw { - body_bytes: signed_tx.body_bytes, - auth_info_bytes: signed_tx.auth_info_bytes, - signatures: signed_tx.signatures, - }; - - encode_tx_raw(tx_raw) -} - -pub fn sign_tx( - config: &TxConfig, - key_entry: &KeyEntry, - account: &Account, - tx_memo: &Memo, - messages: Vec, - fee: &Fee, -) -> Result { - let key_bytes = encode_key_bytes(key_entry)?; - - let signer = encode_signer_info(&config.address_type, account.sequence, key_bytes)?; - - let (body, body_bytes) = tx_body_and_bytes(messages, tx_memo)?; - - let (auth_info, auth_info_bytes) = auth_info_and_bytes(signer, fee.clone())?; - - let signed_doc = encode_sign_doc( - &config.chain_id, - key_entry, - &config.address_type, - account.number, - auth_info_bytes.clone(), - body_bytes.clone(), - )?; - - Ok(SignedTx { - body, - body_bytes, - auth_info, - auth_info_bytes, - signatures: vec![signed_doc], - }) -} - -fn encode_key_bytes(key: &KeyEntry) -> Result, Error> { - let mut pk_buf = Vec::new(); - - prost::Message::encode(&key.public_key.to_pub().to_bytes(), &mut pk_buf) - .map_err(|e| Error::protobuf_encode("PublicKey".into(), e))?; - - Ok(pk_buf) -} - -fn encode_sign_doc( - chain_id: &ChainId, - key: &KeyEntry, - address_type: &AddressType, - account_number: AccountNumber, - auth_info_bytes: Vec, - body_bytes: Vec, -) -> Result, Error> { - let sign_doc = SignDoc { - body_bytes, - auth_info_bytes, - chain_id: chain_id.to_string(), - account_number: account_number.to_u64(), - }; - - // A protobuf serialization of a SignDoc - let mut signdoc_buf = Vec::new(); - prost::Message::encode(&sign_doc, &mut signdoc_buf).unwrap(); - - let signed = sign_message(key, signdoc_buf, address_type).map_err(Error::key_base)?; - - Ok(signed) -} - -fn encode_signer_info( - address_type: &AddressType, - sequence: AccountSequence, - key_bytes: Vec, -) -> Result { - let pk_type = match address_type { - AddressType::Cosmos => "/cosmos.crypto.secp256k1.PubKey".to_string(), - AddressType::Ethermint { pk_type } => pk_type.clone(), - }; - // Create a MsgSend proto Any message - let pk_any = Any { - type_url: pk_type, - value: key_bytes, - }; - - let single = Single { mode: 1 }; - let sum_single = Some(Sum::Single(single)); - let mode = Some(ModeInfo { sum: sum_single }); - let signer_info = SignerInfo { - public_key: Some(pk_any), - mode_info: mode, - sequence: sequence.to_u64(), - }; - Ok(signer_info) -} - -fn encode_tx_raw(tx_raw: TxRaw) -> Result, Error> { - let mut tx_bytes = Vec::new(); - prost::Message::encode(&tx_raw, &mut tx_bytes) - .map_err(|e| Error::protobuf_encode("Transaction".to_string(), e))?; - - Ok(tx_bytes) -} - -pub fn encode_to_bech32(address: &str, account_prefix: &str) -> Result { - let account = AccountId::from_str(address) - .map_err(|e| Error::invalid_key_address(address.to_string(), e))?; - - let encoded = bech32::encode(account_prefix, account.to_base32(), Variant::Bech32) - .map_err(Error::bech32_encoding)?; - - Ok(encoded) -} - -fn auth_info_and_bytes(signer_info: SignerInfo, fee: Fee) -> Result<(AuthInfo, Vec), Error> { - let auth_info = AuthInfo { - signer_infos: vec![signer_info], - fee: Some(fee), - }; - - // A protobuf serialization of a AuthInfo - let mut auth_buf = Vec::new(); - - prost::Message::encode(&auth_info, &mut auth_buf) - .map_err(|e| Error::protobuf_encode(String::from("AuthInfo"), e))?; - - Ok((auth_info, auth_buf)) -} - -fn tx_body_and_bytes(proto_msgs: Vec, memo: &Memo) -> Result<(TxBody, Vec), Error> { - // Create TxBody - let body = TxBody { - messages: proto_msgs.to_vec(), - memo: memo.to_string(), - timeout_height: 0_u64, - extension_options: Vec::::new(), - non_critical_extension_options: Vec::::new(), - }; - - // A protobuf serialization of a TxBody - let mut body_buf = Vec::new(); - - prost::Message::encode(&body, &mut body_buf) - .map_err(|e| Error::protobuf_encode(String::from("TxBody"), e))?; - - Ok((body, body_buf)) -} diff --git a/relayer/src/chain/cosmos/estimate.rs b/relayer/src/chain/cosmos/estimate.rs deleted file mode 100644 index 3b463b28e9..0000000000 --- a/relayer/src/chain/cosmos/estimate.rs +++ /dev/null @@ -1,158 +0,0 @@ -use ibc::core::ics24_host::identifier::ChainId; -use ibc_proto::cosmos::tx::v1beta1::{Fee, Tx}; -use ibc_proto::google::protobuf::Any; -use tonic::codegen::http::Uri; -use tracing::{debug, error, span, warn, Level}; - -use crate::chain::cosmos::encode::sign_tx; -use crate::chain::cosmos::gas::{gas_amount_to_fees, PrettyFee}; -use crate::chain::cosmos::simulate::send_tx_simulate; -use crate::chain::cosmos::types::account::Account; -use crate::chain::cosmos::types::config::TxConfig; -use crate::chain::cosmos::types::gas::GasConfig; -use crate::config::types::Memo; -use crate::error::Error; -use crate::keyring::KeyEntry; - -pub async fn estimate_tx_fees( - config: &TxConfig, - key_entry: &KeyEntry, - account: &Account, - tx_memo: &Memo, - messages: Vec, -) -> Result { - let gas_config = &config.gas_config; - - debug!( - "max fee, for use in tx simulation: {}", - PrettyFee(&gas_config.max_fee) - ); - - let signed_tx = sign_tx( - config, - key_entry, - account, - tx_memo, - messages, - &gas_config.max_fee, - )?; - - let tx = Tx { - body: Some(signed_tx.body), - auth_info: Some(signed_tx.auth_info), - signatures: signed_tx.signatures, - }; - - let estimated_fee = - estimate_fee_with_tx(gas_config, &config.grpc_address, &config.chain_id, tx).await?; - - Ok(estimated_fee) -} - -async fn estimate_fee_with_tx( - gas_config: &GasConfig, - grpc_address: &Uri, - chain_id: &ChainId, - tx: Tx, -) -> Result { - let estimated_gas = estimate_gas_with_tx(gas_config, grpc_address, tx).await?; - - if estimated_gas > gas_config.max_gas { - debug!( - id = %chain_id, estimated = ?estimated_gas, max = ?gas_config.max_gas, - "send_tx: estimated gas is higher than max gas" - ); - - return Err(Error::tx_simulate_gas_estimate_exceeded( - chain_id.clone(), - estimated_gas, - gas_config.max_gas, - )); - } - - let adjusted_fee = gas_amount_to_fees(gas_config, estimated_gas); - - debug!( - id = %chain_id, - "send_tx: using {} gas, fee {}", - estimated_gas, - PrettyFee(&adjusted_fee) - ); - - Ok(adjusted_fee) -} - -/// Try to simulate the given tx in order to estimate how much gas will be needed to submit it. -/// -/// It is possible that a batch of messages are fragmented by the caller (`send_msgs`) such that -/// they do not individually verify. For example for the following batch: -/// [`MsgUpdateClient`, `MsgRecvPacket`, ..., `MsgRecvPacket`] -/// -/// If the batch is split in two TX-es, the second one will fail the simulation in `deliverTx` check. -/// In this case we use the `default_gas` param. -async fn estimate_gas_with_tx( - gas_config: &GasConfig, - grpc_address: &Uri, - tx: Tx, -) -> Result { - let simulated_gas = send_tx_simulate(grpc_address, tx) - .await - .map(|sr| sr.gas_info); - - let _span = span!(Level::ERROR, "estimate_gas").entered(); - - match simulated_gas { - Ok(Some(gas_info)) => { - debug!( - "tx simulation successful, gas amount used: {:?}", - gas_info.gas_used - ); - - Ok(gas_info.gas_used) - } - - Ok(None) => { - warn!( - "tx simulation successful but no gas amount used was returned, falling back on default gas: {}", - gas_config.default_gas - ); - - Ok(gas_config.default_gas) - } - - // If there is a chance that the tx will be accepted once actually submitted, we fall - // back on the default gas and will attempt to send it anyway. - // See `can_recover_from_simulation_failure` for more info. - Err(e) if can_recover_from_simulation_failure(&e) => { - warn!( - "failed to simulate tx, falling back on default gas because the error is potentially recoverable: {}", - e.detail() - ); - - Ok(gas_config.default_gas) - } - - Err(e) => { - error!( - "failed to simulate tx. propagating error to caller: {}", - e.detail() - ); - // Propagate the error, the retrying mechanism at caller may catch & retry. - Err(e) - } - } -} - -/// Determine whether the given error yielded by `tx_simulate` -/// can be recovered from by submitting the tx anyway. -fn can_recover_from_simulation_failure(e: &Error) -> bool { - use crate::error::ErrorDetail::*; - - match e.detail() { - GrpcStatus(detail) => { - detail.is_client_state_height_too_low() - || detail.is_account_sequence_mismatch_that_can_be_ignored() - } - _ => false, - } -} diff --git a/relayer/src/chain/cosmos/gas.rs b/relayer/src/chain/cosmos/gas.rs deleted file mode 100644 index 9b881845db..0000000000 --- a/relayer/src/chain/cosmos/gas.rs +++ /dev/null @@ -1,74 +0,0 @@ -use core::cmp::min; -use core::fmt; -use ibc_proto::cosmos::base::v1beta1::Coin; -use ibc_proto::cosmos::tx::v1beta1::Fee; -use num_bigint::BigInt; -use num_rational::BigRational; - -use crate::chain::cosmos::types::gas::GasConfig; -use crate::config::GasPrice; - -pub struct PrettyFee<'a>(pub &'a Fee); - -pub fn gas_amount_to_fees(config: &GasConfig, gas_amount: u64) -> Fee { - let adjusted_gas_limit = adjust_gas_with_simulated_fees(config, gas_amount); - - // The fee in coins based on gas amount - let amount = calculate_fee(adjusted_gas_limit, &config.gas_price); - - Fee { - amount: vec![amount], - gas_limit: adjusted_gas_limit, - payer: "".to_string(), - granter: config.fee_granter.clone(), - } -} - -pub fn calculate_fee(adjusted_gas_amount: u64, gas_price: &GasPrice) -> Coin { - let fee_amount = mul_ceil(adjusted_gas_amount, gas_price.price); - - Coin { - denom: gas_price.denom.to_string(), - amount: fee_amount.to_string(), - } -} - -/// Multiply `a` with `f` and round the result up to the nearest integer. -pub fn mul_ceil(a: u64, f: f64) -> BigInt { - assert!(f.is_finite()); - - let a = BigInt::from(a); - let f = BigRational::from_float(f).expect("f is finite"); - (f * a).ceil().to_integer() -} - -/// Adjusts the fee based on the configured `gas_adjustment` to prevent out of gas errors. -/// The actual gas cost, when a transaction is executed, may be slightly higher than the -/// one returned by the simulation. -fn adjust_gas_with_simulated_fees(config: &GasConfig, gas_amount: u64) -> u64 { - let gas_adjustment = config.gas_adjustment; - - assert!(gas_adjustment <= 1.0); - - let (_, digits) = mul_ceil(gas_amount, gas_adjustment).to_u64_digits(); - assert!(digits.len() == 1); - - let adjustment = digits[0]; - let gas = gas_amount.checked_add(adjustment).unwrap_or(u64::MAX); - - min(gas, config.max_gas) -} - -impl fmt::Display for PrettyFee<'_> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let amount = match self.0.amount.get(0) { - Some(coin) => format!("{}{}", coin.amount, coin.denom), - None => "".to_string(), - }; - - f.debug_struct("Fee") - .field("amount", &amount) - .field("gas_limit", &self.0.gas_limit) - .finish() - } -} diff --git a/relayer/src/chain/cosmos/query.rs b/relayer/src/chain/cosmos/query.rs deleted file mode 100644 index 67b8b868de..0000000000 --- a/relayer/src/chain/cosmos/query.rs +++ /dev/null @@ -1,157 +0,0 @@ -use http::uri::Uri; -use ibc::core::ics02_client::client_consensus::QueryClientEventRequest; -use ibc::core::ics04_channel::channel::QueryPacketEventDataRequest; -use ibc::core::ics04_channel::packet::Sequence; -use ibc::core::ics23_commitment::merkle::{convert_tm_to_ics_merkle_proof, MerkleProof}; -use ibc::core::ics24_host::identifier::ChainId; -use ibc::query::QueryTxHash; -use ibc_proto::cosmos::base::tendermint::v1beta1::service_client::ServiceClient; -use ibc_proto::cosmos::base::tendermint::v1beta1::GetNodeInfoRequest; -use tendermint::abci::Path as TendermintABCIPath; -use tendermint::block::Height; -use tendermint_rpc::query::Query; -use tendermint_rpc::{Client, HttpClient, Url}; - -use crate::chain::cosmos::version::Specs; -use crate::error::Error; - -pub mod account; -pub mod balance; -pub mod status; -pub mod tx; - -/// Generic query response type -#[derive(Clone, Debug, PartialEq)] -pub struct QueryResponse { - pub value: Vec, - pub proof: Option, - pub height: Height, -} - -pub fn packet_query(request: &QueryPacketEventDataRequest, seq: Sequence) -> Query { - Query::eq( - format!("{}.packet_src_channel", request.event_id.as_str()), - request.source_channel_id.to_string(), - ) - .and_eq( - format!("{}.packet_src_port", request.event_id.as_str()), - request.source_port_id.to_string(), - ) - .and_eq( - format!("{}.packet_dst_channel", request.event_id.as_str()), - request.destination_channel_id.to_string(), - ) - .and_eq( - format!("{}.packet_dst_port", request.event_id.as_str()), - request.destination_port_id.to_string(), - ) - .and_eq( - format!("{}.packet_sequence", request.event_id.as_str()), - seq.to_string(), - ) -} - -pub fn header_query(request: &QueryClientEventRequest) -> Query { - Query::eq( - format!("{}.client_id", request.event_id.as_str()), - request.client_id.to_string(), - ) - .and_eq( - format!("{}.consensus_height", request.event_id.as_str()), - format!( - "{}-{}", - request.consensus_height.revision_number, request.consensus_height.revision_height - ), - ) -} - -pub fn tx_hash_query(request: &QueryTxHash) -> Query { - Query::eq("tx.hash", request.0.to_string()) -} - -/// Perform a generic `abci_query`, and return the corresponding deserialized response data. -pub async fn abci_query( - rpc_client: &HttpClient, - rpc_address: &Url, - path: TendermintABCIPath, - data: String, - height: Height, - prove: bool, -) -> Result { - let height = if height.value() == 0 { - None - } else { - Some(height) - }; - - // Use the Tendermint-rs RPC client to do the query. - let response = rpc_client - .abci_query(Some(path), data.into_bytes(), height, prove) - .await - .map_err(|e| Error::rpc(rpc_address.clone(), e))?; - - if !response.code.is_ok() { - // Fail with response log. - return Err(Error::abci_query(response)); - } - - if prove && response.proof.is_none() { - // Fail due to empty proof - return Err(Error::empty_response_proof()); - } - - let proof = response - .proof - .map(|p| convert_tm_to_ics_merkle_proof(&p)) - .transpose() - .map_err(Error::ics23)?; - - let response = QueryResponse { - value: response.value, - height: response.height, - proof, - }; - - Ok(response) -} - -/// Queries the chain to obtain the version information. -pub async fn fetch_version_specs(chain_id: &ChainId, grpc_address: &Uri) -> Result { - let grpc_addr_string = grpc_address.to_string(); - - // Construct a gRPC client - let mut client = ServiceClient::connect(grpc_address.clone()) - .await - .map_err(|e| { - Error::fetch_version_grpc_transport( - chain_id.clone(), - grpc_addr_string.clone(), - "tendermint::ServiceClient".to_string(), - e, - ) - })?; - - let request = tonic::Request::new(GetNodeInfoRequest {}); - - let response = client.get_node_info(request).await.map_err(|e| { - Error::fetch_version_grpc_status( - chain_id.clone(), - grpc_addr_string.clone(), - "tendermint::ServiceClient".to_string(), - e, - ) - })?; - - let version = response.into_inner().application_version.ok_or_else(|| { - Error::fetch_version_invalid_version_response( - chain_id.clone(), - grpc_addr_string.clone(), - "tendermint::GetNodeInfoRequest".to_string(), - ) - })?; - - // Parse the raw version info into a domain-type `version::Specs` - version - .try_into() - .map_err(|e| Error::fetch_version_parsing(chain_id.clone(), grpc_addr_string.clone(), e)) -} diff --git a/relayer/src/chain/cosmos/query/account.rs b/relayer/src/chain/cosmos/query/account.rs deleted file mode 100644 index 15cdd8f467..0000000000 --- a/relayer/src/chain/cosmos/query/account.rs +++ /dev/null @@ -1,83 +0,0 @@ -use http::uri::Uri; -use ibc_proto::cosmos::auth::v1beta1::query_client::QueryClient; -use ibc_proto::cosmos::auth::v1beta1::{BaseAccount, EthAccount, QueryAccountRequest}; -use prost::Message; -use tracing::info; - -use crate::chain::cosmos::types::account::Account; -use crate::error::Error; - -/// Get a `&mut Account` from an `&mut Option` if it is `Some(Account)`. -/// Otherwise query for the account information, update the `Option` to `Some`, -/// and return the underlying `&mut` reference. -pub async fn get_or_fetch_account<'a>( - grpc_address: &'a Uri, - account_address: &'a str, - m_account: &'a mut Option, -) -> Result<&'a mut Account, Error> { - match m_account { - Some(account) => Ok(account), - None => { - let account = query_account(grpc_address, account_address).await?; - *m_account = Some(account.into()); - - Ok(m_account - .as_mut() - .expect("account was supposedly just cached")) - } - } -} - -/// Refresh the account sequence behind the `&mut Account` by refetching the -/// account and updating the `&mut` reference. -pub async fn refresh_account<'a>( - grpc_address: &Uri, - account_address: &str, - m_account: &'a mut Account, -) -> Result<(), Error> { - let account = query_account(grpc_address, account_address).await?; - - info!( - sequence = %account.sequence, - number = %account.account_number, - "refresh: retrieved account", - ); - - *m_account = account.into(); - - Ok(()) -} - -/// Uses the GRPC client to retrieve the account sequence -pub async fn query_account( - grpc_address: &Uri, - account_address: &str, -) -> Result { - let mut client = QueryClient::connect(grpc_address.clone()) - .await - .map_err(Error::grpc_transport)?; - - let request = tonic::Request::new(QueryAccountRequest { - address: account_address.to_string(), - }); - - let response = client.account(request).await; - - // Querying for an account might fail, i.e. if the account doesn't actually exist - let resp_account = match response.map_err(Error::grpc_status)?.into_inner().account { - Some(account) => account, - None => return Err(Error::empty_query_account(account_address.to_string())), - }; - - if resp_account.type_url == "/cosmos.auth.v1beta1.BaseAccount" { - Ok(BaseAccount::decode(resp_account.value.as_slice()) - .map_err(|e| Error::protobuf_decode("BaseAccount".to_string(), e))?) - } else if resp_account.type_url.ends_with(".EthAccount") { - Ok(EthAccount::decode(resp_account.value.as_slice()) - .map_err(|e| Error::protobuf_decode("EthAccount".to_string(), e))? - .base_account - .ok_or_else(Error::empty_base_account)?) - } else { - Err(Error::unknown_account_type(resp_account.type_url)) - } -} diff --git a/relayer/src/chain/cosmos/query/balance.rs b/relayer/src/chain/cosmos/query/balance.rs deleted file mode 100644 index e59cd4618d..0000000000 --- a/relayer/src/chain/cosmos/query/balance.rs +++ /dev/null @@ -1,37 +0,0 @@ -use http::uri::Uri; - -use ibc_proto::cosmos::bank::v1beta1::{query_client::QueryClient, QueryBalanceRequest}; - -use crate::{account::Balance, error::Error}; - -/// Uses the GRPC client to retrieve the account balance for a specific denom -pub async fn query_balance( - grpc_address: &Uri, - account_address: &str, - denom: &str, -) -> Result { - let mut client = QueryClient::connect(grpc_address.clone()) - .await - .map_err(Error::grpc_transport)?; - - let request = tonic::Request::new(QueryBalanceRequest { - address: account_address.to_string(), - denom: denom.to_string(), - }); - - let response = client - .balance(request) - .await - .map(|r| r.into_inner()) - .map_err(Error::grpc_status)?; - - // Querying for a balance might fail, i.e. if the account doesn't actually exist - let balance = response - .balance - .ok_or_else(|| Error::empty_query_account(account_address.to_string()))?; - - Ok(Balance { - amount: balance.amount, - denom: balance.denom, - }) -} diff --git a/relayer/src/chain/cosmos/query/status.rs b/relayer/src/chain/cosmos/query/status.rs deleted file mode 100644 index 2978f4ff43..0000000000 --- a/relayer/src/chain/cosmos/query/status.rs +++ /dev/null @@ -1,40 +0,0 @@ -use ibc::core::ics24_host::identifier::ChainId; -use ibc::Height; -use tendermint_rpc::{Client, HttpClient, Url}; - -use crate::chain::endpoint::ChainStatus; -use crate::error::Error; - -/// Query the chain status via an RPC query. -/// -/// Returns an error if the node is still syncing and has not caught up, -/// ie. if `sync_info.catching_up` is `true`. -pub async fn query_status( - chain_id: &ChainId, - rpc_client: &HttpClient, - rpc_address: &Url, -) -> Result { - let response = rpc_client - .status() - .await - .map_err(|e| Error::rpc(rpc_address.clone(), e))?; - - if response.sync_info.catching_up { - return Err(Error::chain_not_caught_up( - rpc_address.to_string(), - chain_id.clone(), - )); - } - - let time = response.sync_info.latest_block_time; - - let height = Height { - revision_number: ChainId::chain_version(response.node_info.network.as_str()), - revision_height: u64::from(response.sync_info.latest_block_height), - }; - - Ok(ChainStatus { - height, - timestamp: time.into(), - }) -} diff --git a/relayer/src/chain/cosmos/query/tx.rs b/relayer/src/chain/cosmos/query/tx.rs deleted file mode 100644 index dab694a53c..0000000000 --- a/relayer/src/chain/cosmos/query/tx.rs +++ /dev/null @@ -1,250 +0,0 @@ -use ibc::core::ics02_client::client_consensus::QueryClientEventRequest; -use ibc::core::ics02_client::events as ClientEvents; -use ibc::core::ics04_channel::channel::QueryPacketEventDataRequest; -use ibc::core::ics04_channel::events as ChannelEvents; -use ibc::core::ics04_channel::packet::{Packet, Sequence}; -use ibc::core::ics24_host::identifier::ChainId; -use ibc::events::{from_tx_response_event, IbcEvent}; -use ibc::query::QueryTxRequest; -use ibc::Height as ICSHeight; -use tendermint::abci::Event; -use tendermint_rpc::endpoint::tx::Response as ResultTx; -use tendermint_rpc::{Client, HttpClient, Order, Url}; - -use crate::chain::cosmos::query::{header_query, packet_query, tx_hash_query}; -use crate::error::Error; - -/// This function queries transactions for events matching certain criteria. -/// 1. Client Update request - returns a vector with at most one update client event -/// 2. Packet event request - returns at most one packet event for each sequence specified -/// in the request. -/// Note - there is no way to format the packet query such that it asks for Tx-es with either -/// sequence (the query conditions can only be AND-ed). -/// There is a possibility to include "<=" and ">=" conditions but it doesn't work with -/// string attributes (sequence is emmitted as a string). -/// Therefore, for packets we perform one tx_search for each sequence. -/// Alternatively, a single query for all packets could be performed but it would return all -/// packets ever sent. -pub async fn query_txs( - chain_id: &ChainId, - rpc_client: &HttpClient, - rpc_address: &Url, - request: QueryTxRequest, -) -> Result, Error> { - crate::time!("query_txs"); - crate::telemetry!(query, chain_id, "query_txs"); - - match request { - QueryTxRequest::Packet(request) => { - crate::time!("query_txs: query packet events"); - - let mut result: Vec = vec![]; - - for seq in &request.sequences { - // query first (and only) Tx that includes the event specified in the query request - let response = rpc_client - .tx_search( - packet_query(&request, *seq), - false, - 1, - 1, // get only the first Tx matching the query - Order::Ascending, - ) - .await - .map_err(|e| Error::rpc(rpc_address.clone(), e))?; - - assert!( - response.txs.len() <= 1, - "packet_from_tx_search_response: unexpected number of txs" - ); - - if response.txs.is_empty() { - continue; - } - - if let Some(event) = packet_from_tx_search_response( - chain_id, - &request, - *seq, - response.txs[0].clone(), - ) { - result.push(event); - } - } - Ok(result) - } - - QueryTxRequest::Client(request) => { - crate::time!("query_txs: single client update event"); - - // query the first Tx that includes the event matching the client request - // Note: it is possible to have multiple Tx-es for same client and consensus height. - // In this case it must be true that the client updates were performed with tha - // same header as the first one, otherwise a subsequent transaction would have - // failed on chain. Therefore only one Tx is of interest and current API returns - // the first one. - let mut response = rpc_client - .tx_search( - header_query(&request), - false, - 1, - 1, // get only the first Tx matching the query - Order::Ascending, - ) - .await - .map_err(|e| Error::rpc(rpc_address.clone(), e))?; - - if response.txs.is_empty() { - return Ok(vec![]); - } - - // the response must include a single Tx as specified in the query. - assert!( - response.txs.len() <= 1, - "packet_from_tx_search_response: unexpected number of txs" - ); - - let tx = response.txs.remove(0); - let event = update_client_from_tx_search_response(chain_id, &request, tx); - - Ok(event.into_iter().collect()) - } - - QueryTxRequest::Transaction(tx) => { - let mut response = rpc_client - .tx_search( - tx_hash_query(&tx), - false, - 1, - 1, // get only the first Tx matching the query - Order::Ascending, - ) - .await - .map_err(|e| Error::rpc(rpc_address.clone(), e))?; - - if response.txs.is_empty() { - Ok(vec![]) - } else { - let tx = response.txs.remove(0); - Ok(all_ibc_events_from_tx_search_response(chain_id, tx)) - } - } - } -} - -// Extracts from the Tx the update client event for the requested client and height. -// Note: in the Tx, there may have been multiple events, some of them may be -// for update of other clients that are not relevant to the request. -// For example, if we're querying for a transaction that includes the update for client X at -// consensus height H, it is possible that the transaction also includes an update client -// for client Y at consensus height H'. This is the reason the code iterates all event fields in the -// returned Tx to retrieve the relevant ones. -// Returns `None` if no matching event was found. -fn update_client_from_tx_search_response( - chain_id: &ChainId, - request: &QueryClientEventRequest, - response: ResultTx, -) -> Option { - let height = ICSHeight::new(chain_id.version(), u64::from(response.height)); - if request.height != ICSHeight::zero() && height > request.height { - return None; - } - - response - .tx_result - .events - .into_iter() - .filter(|event| event.type_str == request.event_id.as_str()) - .flat_map(|event| ClientEvents::try_from_tx(&event)) - .flat_map(|event| match event { - IbcEvent::UpdateClient(mut update) => { - update.common.height = height; - Some(update) - } - _ => None, - }) - .find(|update| { - update.common.client_id == request.client_id - && update.common.consensus_height == request.consensus_height - }) - .map(IbcEvent::UpdateClient) -} - -// Extract the packet events from the query_txs RPC response. For any given -// packet query, there is at most one Tx matching such query. Moreover, a Tx may -// contain several events, but a single one must match the packet query. -// For example, if we're querying for the packet with sequence 3 and this packet -// was committed in some Tx along with the packet with sequence 4, the response -// will include both packets. For this reason, we iterate all packets in the Tx, -// searching for those that match (which must be a single one). -fn packet_from_tx_search_response( - chain_id: &ChainId, - request: &QueryPacketEventDataRequest, - seq: Sequence, - response: ResultTx, -) -> Option { - let height = ICSHeight::new(chain_id.version(), u64::from(response.height)); - if request.height != ICSHeight::zero() && height > request.height { - return None; - } - - response - .tx_result - .events - .into_iter() - .find_map(|ev| filter_matching_event(ev, request, seq)) -} - -fn filter_matching_event( - event: Event, - request: &QueryPacketEventDataRequest, - seq: Sequence, -) -> Option { - fn matches_packet( - request: &QueryPacketEventDataRequest, - seq: Sequence, - packet: &Packet, - ) -> bool { - packet.source_port == request.source_port_id - && packet.source_channel == request.source_channel_id - && packet.destination_port == request.destination_port_id - && packet.destination_channel == request.destination_channel_id - && packet.sequence == seq - } - - if event.type_str != request.event_id.as_str() { - return None; - } - - let ibc_event = ChannelEvents::try_from_tx(&event)?; - match ibc_event { - IbcEvent::SendPacket(ref send_ev) if matches_packet(request, seq, &send_ev.packet) => { - Some(ibc_event) - } - IbcEvent::WriteAcknowledgement(ref ack_ev) - if matches_packet(request, seq, &ack_ev.packet) => - { - Some(ibc_event) - } - _ => None, - } -} - -fn all_ibc_events_from_tx_search_response(chain_id: &ChainId, response: ResultTx) -> Vec { - let height = ICSHeight::new(chain_id.version(), u64::from(response.height)); - let deliver_tx_result = response.tx_result; - if deliver_tx_result.code.is_err() { - return vec![IbcEvent::ChainError(format!( - "deliver_tx for {} reports error: code={:?}, log={:?}", - response.hash, deliver_tx_result.code, deliver_tx_result.log - ))]; - } - - let mut result = vec![]; - for event in deliver_tx_result.events { - if let Some(ibc_ev) = from_tx_response_event(height, &event) { - result.push(ibc_ev); - } - } - result -} diff --git a/relayer/src/chain/cosmos/retry.rs b/relayer/src/chain/cosmos/retry.rs deleted file mode 100644 index 9a83b61a60..0000000000 --- a/relayer/src/chain/cosmos/retry.rs +++ /dev/null @@ -1,184 +0,0 @@ -use core::future::Future; -use core::pin::Pin; -use core::time::Duration; -use ibc_proto::google::protobuf::Any; -use std::thread; -use tendermint::abci::Code; -use tendermint_rpc::endpoint::broadcast::tx_sync::Response; -use tracing::{debug, error, span, warn, Level}; - -use crate::chain::cosmos::query::account::refresh_account; -use crate::chain::cosmos::tx::estimate_fee_and_send_tx; -use crate::chain::cosmos::types::account::Account; -use crate::chain::cosmos::types::config::TxConfig; -use crate::config::types::Memo; -use crate::error::Error; -use crate::keyring::KeyEntry; -use crate::sdk_error::sdk_error_from_tx_sync_error_code; -use crate::telemetry; - -// Maximum number of retries for send_tx in the case of -// an account sequence mismatch at broadcast step. -const MAX_ACCOUNT_SEQUENCE_RETRY: u64 = 1; - -// Backoff multiplier to apply while retrying in the case -// of account sequence mismatch. -const BACKOFF_MULTIPLIER_ACCOUNT_SEQUENCE_RETRY: u64 = 300; - -// The error "incorrect account sequence" is defined as the unique error code 32 in cosmos-sdk: -// https://github.com/cosmos/cosmos-sdk/blob/v0.44.0/types/errors/errors.go#L115-L117 -const INCORRECT_ACCOUNT_SEQUENCE_ERR: u32 = 32; - -/// Try to `send_tx` with retry on account sequence error. -/// An account sequence error can occur if the account sequence that -/// the relayer caches becomes outdated. This may happen if the relayer -/// wallet is used concurrently elsewhere, or when tx fees are mis-configured -/// leading to transactions hanging in the mempool. -/// -/// Account sequence mismatch error can occur at two separate steps: -/// 1. as Err variant, propagated from the `estimate_gas` step. -/// 2. as an Ok variant, with an Code::Err response, propagated from -/// the `broadcast_tx_sync` step. -/// -/// We treat both cases by re-fetching the account sequence number -/// from the full node. -/// Upon case #1, we do not retry submitting the same tx (retry happens -/// nonetheless at the worker `step` level). Upon case #2, we retry -/// submitting the same transaction. -pub async fn send_tx_with_account_sequence_retry( - config: &TxConfig, - key_entry: &KeyEntry, - account: &mut Account, - tx_memo: &Memo, - messages: Vec, - retry_counter: u64, -) -> Result { - crate::time!("send_tx_with_account_sequence_retry"); - - let _span = - span!(Level::ERROR, "send_tx_with_account_sequence_retry", id = %config.chain_id).entered(); - - telemetry!(msg_num, &config.chain_id, messages.len() as u64); - - do_send_tx_with_account_sequence_retry( - config, - key_entry, - account, - tx_memo, - messages, - retry_counter, - ) - .await -} - -// We have to do explicit return of `Box` because Rust -// do not currently support recursive async functions behind the -// `async fn` syntactic sugar. -fn do_send_tx_with_account_sequence_retry<'a>( - config: &'a TxConfig, - key_entry: &'a KeyEntry, - account: &'a mut Account, - tx_memo: &'a Memo, - messages: Vec, - retry_counter: u64, -) -> Pin> + 'a>> { - Box::pin(async move { - debug!( - "sending {} messages using account sequence {}", - messages.len(), - account.sequence, - ); - - let tx_result = - estimate_fee_and_send_tx(config, key_entry, account, tx_memo, messages.clone()).await; - - match tx_result { - // Gas estimation failed with acct. s.n. mismatch at estimate gas step. - // It indicates that the account sequence cached by hermes is stale. - // This can happen when the same account is used by another agent. - Err(e) if mismatch_account_sequence_number_error_requires_refresh(&e) => { - warn!("failed at estimate_gas step mismatching account sequence: dropping the tx & refreshing account sequence number"); - refresh_account(&config.grpc_address, &key_entry.account, account).await?; - // Note: propagating error here can lead to bug & dropped packets: - // https://github.com/informalsystems/ibc-rs/issues/1153 - // But periodic packet clearing will catch any dropped packets. - Err(e) - } - - // Gas estimation succeeded. Broadcasting failed with a retry-able error. - Ok(response) if response.code == Code::Err(INCORRECT_ACCOUNT_SEQUENCE_ERR) => { - if retry_counter < MAX_ACCOUNT_SEQUENCE_RETRY { - let retry_counter = retry_counter + 1; - warn!("failed at broadcast step with incorrect account sequence. retrying ({}/{})", - retry_counter, MAX_ACCOUNT_SEQUENCE_RETRY); - // Backoff & re-fetch the account s.n. - let backoff = retry_counter * BACKOFF_MULTIPLIER_ACCOUNT_SEQUENCE_RETRY; - - thread::sleep(Duration::from_millis(backoff)); - refresh_account(&config.grpc_address, &key_entry.account, account).await?; - - // Now retry. - do_send_tx_with_account_sequence_retry( - config, - key_entry, - account, - tx_memo, - messages, - retry_counter + 1, - ) - .await - } else { - // If after the max retry we still get an account sequence mismatch error, - // we ignore the error and return the original response to downstream. - // We do not return an error here, because the current convention - // let the caller handle error responses separately. - error!("failed due to account sequence errors. the relayer wallet may be used elsewhere concurrently."); - Ok(response) - } - } - - // Catch-all arm for the Ok variant. - // This is the case when gas estimation succeeded. - Ok(response) => { - match response.code { - // Gas estimation succeeded and broadcasting was successful. - Code::Ok => { - debug!("broadcast_tx_sync: {:?}", response); - - account.sequence.increment_mut(); - Ok(response) - } - - // Gas estimation succeeded, but broadcasting failed with unrecoverable error. - Code::Err(code) => { - // Do not increase the account s.n. if CheckTx failed. - // Log the error. - error!( - "broadcast_tx_sync: {:?}: diagnostic: {:?}", - response, - sdk_error_from_tx_sync_error_code(code) - ); - Ok(response) - } - } - } - - // Catch-all case for the Err variant. - // Gas estimation failure or other unrecoverable error, propagate. - Err(e) => Err(e), - } - }) -} - -/// Determine whether the given error yielded by `tx_simulate` -/// indicates hat the current sequence number cached in Hermes -/// is smaller than the full node's version of the s.n. and therefore -/// account needs to be refreshed. -fn mismatch_account_sequence_number_error_requires_refresh(e: &Error) -> bool { - use crate::error::ErrorDetail::*; - - match e.detail() { - GrpcStatus(detail) => detail.is_account_sequence_mismatch_that_requires_refresh(), - _ => false, - } -} diff --git a/relayer/src/chain/cosmos/simulate.rs b/relayer/src/chain/cosmos/simulate.rs deleted file mode 100644 index ce249a44db..0000000000 --- a/relayer/src/chain/cosmos/simulate.rs +++ /dev/null @@ -1,33 +0,0 @@ -use ibc_proto::cosmos::tx::v1beta1::service_client::ServiceClient; -use ibc_proto::cosmos::tx::v1beta1::{SimulateRequest, SimulateResponse, Tx}; -use tonic::codegen::http::Uri; - -use crate::error::Error; - -pub async fn send_tx_simulate(grpc_address: &Uri, tx: Tx) -> Result { - crate::time!("send_tx_simulate"); - - // The `tx` field of `SimulateRequest` was deprecated in Cosmos SDK 0.43 in favor of `tx_bytes`. - let mut tx_bytes = vec![]; - prost::Message::encode(&tx, &mut tx_bytes) - .map_err(|e| Error::protobuf_encode(String::from("Transaction"), e))?; - - #[allow(deprecated)] - let req = SimulateRequest { - tx: Some(tx), // needed for simulation to go through with Cosmos SDK < 0.43 - tx_bytes, // needed for simulation to go through with Cosmos SDk >= 0.43 - }; - - let mut client = ServiceClient::connect(grpc_address.clone()) - .await - .map_err(Error::grpc_transport)?; - - let request = tonic::Request::new(req); - let response = client - .simulate(request) - .await - .map_err(Error::grpc_status)? - .into_inner(); - - Ok(response) -} diff --git a/relayer/src/chain/cosmos/tx.rs b/relayer/src/chain/cosmos/tx.rs deleted file mode 100644 index e6a0519e1f..0000000000 --- a/relayer/src/chain/cosmos/tx.rs +++ /dev/null @@ -1,53 +0,0 @@ -use ibc_proto::cosmos::tx::v1beta1::Fee; -use ibc_proto::google::protobuf::Any; -use tendermint_rpc::endpoint::broadcast::tx_sync::Response; -use tendermint_rpc::{Client, HttpClient, Url}; - -use crate::chain::cosmos::encode::sign_and_encode_tx; -use crate::chain::cosmos::estimate::estimate_tx_fees; -use crate::chain::cosmos::types::account::Account; -use crate::chain::cosmos::types::config::TxConfig; -use crate::config::types::Memo; -use crate::error::Error; -use crate::keyring::KeyEntry; - -pub async fn estimate_fee_and_send_tx( - config: &TxConfig, - key_entry: &KeyEntry, - account: &Account, - tx_memo: &Memo, - messages: Vec, -) -> Result { - let fee = estimate_tx_fees(config, key_entry, account, tx_memo, messages.clone()).await?; - - send_tx_with_fee(config, key_entry, account, tx_memo, messages, &fee).await -} - -async fn send_tx_with_fee( - config: &TxConfig, - key_entry: &KeyEntry, - account: &Account, - tx_memo: &Memo, - messages: Vec, - fee: &Fee, -) -> Result { - let tx_bytes = sign_and_encode_tx(config, key_entry, account, tx_memo, messages, fee)?; - - let response = broadcast_tx_sync(&config.rpc_client, &config.rpc_address, tx_bytes).await?; - - Ok(response) -} - -/// Perform a `broadcast_tx_sync`, and return the corresponding deserialized response data. -async fn broadcast_tx_sync( - rpc_client: &HttpClient, - rpc_address: &Url, - data: Vec, -) -> Result { - let response = rpc_client - .broadcast_tx_sync(data.into()) - .await - .map_err(|e| Error::rpc(rpc_address.clone(), e))?; - - Ok(response) -} diff --git a/relayer/src/chain/cosmos/types/account.rs b/relayer/src/chain/cosmos/types/account.rs deleted file mode 100644 index 5350c7b373..0000000000 --- a/relayer/src/chain/cosmos/types/account.rs +++ /dev/null @@ -1,70 +0,0 @@ -use core::fmt; -use ibc_proto::cosmos::auth::v1beta1::BaseAccount; - -/// Wrapper for account number and sequence number. -/// -/// More fields may be added later. -#[derive(Clone, Debug, PartialEq)] -pub struct Account { - // pub address: String, - // pub pub_key: Option, - pub number: AccountNumber, - pub sequence: AccountSequence, -} - -impl From for Account { - fn from(value: BaseAccount) -> Self { - Self { - number: AccountNumber::new(value.account_number), - sequence: AccountSequence::new(value.sequence), - } - } -} - -/// Newtype for account numbers -#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] -pub struct AccountNumber(u64); - -impl AccountNumber { - pub fn new(number: u64) -> Self { - Self(number) - } - - pub fn to_u64(self) -> u64 { - self.0 - } -} - -impl fmt::Display for AccountNumber { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{:?}", self) - } -} - -/// Newtype for account sequence numbers -#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] -pub struct AccountSequence(u64); - -impl AccountSequence { - pub fn new(sequence: u64) -> Self { - Self(sequence) - } - - pub fn to_u64(self) -> u64 { - self.0 - } - - pub fn increment(self) -> Self { - Self(self.0 + 1) - } - - pub fn increment_mut(&mut self) { - self.0 += 1 - } -} - -impl fmt::Display for AccountSequence { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{:?}", self) - } -} diff --git a/relayer/src/chain/cosmos/types/config.rs b/relayer/src/chain/cosmos/types/config.rs deleted file mode 100644 index fa2f924d2d..0000000000 --- a/relayer/src/chain/cosmos/types/config.rs +++ /dev/null @@ -1,44 +0,0 @@ -use core::str::FromStr; -use core::time::Duration; -use http::Uri; -use ibc::core::ics24_host::identifier::ChainId; -use tendermint_rpc::{HttpClient, Url}; - -use crate::chain::cosmos::types::gas::GasConfig; -use crate::config::{AddressType, ChainConfig}; -use crate::error::Error; - -#[derive(Debug, Clone)] -pub struct TxConfig { - pub chain_id: ChainId, - pub gas_config: GasConfig, - pub rpc_client: HttpClient, - pub rpc_address: Url, - pub grpc_address: Uri, - pub rpc_timeout: Duration, - pub address_type: AddressType, -} - -impl<'a> TryFrom<&'a ChainConfig> for TxConfig { - type Error = Error; - - fn try_from(config: &'a ChainConfig) -> Result { - let rpc_client = HttpClient::new(config.rpc_addr.clone()) - .map_err(|e| Error::rpc(config.rpc_addr.clone(), e))?; - - let grpc_address = Uri::from_str(&config.grpc_addr.to_string()) - .map_err(|e| Error::invalid_uri(config.grpc_addr.to_string(), e))?; - - let gas_config = GasConfig::from(config); - - Ok(Self { - chain_id: config.id.clone(), - gas_config, - rpc_client, - rpc_address: config.rpc_addr.clone(), - grpc_address, - rpc_timeout: config.rpc_timeout, - address_type: config.address_type.clone(), - }) - } -} diff --git a/relayer/src/chain/cosmos/types/gas.rs b/relayer/src/chain/cosmos/types/gas.rs deleted file mode 100644 index 79e1001772..0000000000 --- a/relayer/src/chain/cosmos/types/gas.rs +++ /dev/null @@ -1,80 +0,0 @@ -use ibc_proto::cosmos::tx::v1beta1::Fee; - -use crate::chain::cosmos::calculate_fee; -use crate::config::{ChainConfig, GasPrice}; - -/// Default gas limit when submitting a transaction. -const DEFAULT_MAX_GAS: u64 = 400_000; - -/// Fraction of the estimated gas to add to the estimated gas amount when submitting a transaction. -const DEFAULT_GAS_PRICE_ADJUSTMENT: f64 = 0.1; - -const DEFAULT_FEE_GRANTER: &str = ""; - -#[derive(Debug, Clone)] -pub struct GasConfig { - pub default_gas: u64, - pub max_gas: u64, - pub gas_adjustment: f64, - pub gas_price: GasPrice, - pub max_fee: Fee, - pub fee_granter: String, -} - -impl<'a> From<&'a ChainConfig> for GasConfig { - fn from(config: &'a ChainConfig) -> Self { - Self { - default_gas: default_gas_from_config(config), - max_gas: max_gas_from_config(config), - gas_adjustment: gas_adjustment_from_config(config), - gas_price: config.gas_price.clone(), - max_fee: max_fee_from_config(config), - fee_granter: fee_granter_from_config(config), - } - } -} - -/// The default amount of gas the relayer is willing to pay for a transaction, -/// when it cannot simulate the tx and therefore estimate the gas amount needed. -pub fn default_gas_from_config(config: &ChainConfig) -> u64 { - config - .default_gas - .unwrap_or_else(|| max_gas_from_config(config)) -} - -/// The maximum amount of gas the relayer is willing to pay for a transaction -pub fn max_gas_from_config(config: &ChainConfig) -> u64 { - config.max_gas.unwrap_or(DEFAULT_MAX_GAS) -} - -/// The gas price adjustment -fn gas_adjustment_from_config(config: &ChainConfig) -> f64 { - config - .gas_adjustment - .unwrap_or(DEFAULT_GAS_PRICE_ADJUSTMENT) -} - -/// Get the fee granter address -fn fee_granter_from_config(config: &ChainConfig) -> String { - config - .fee_granter - .as_deref() - .unwrap_or(DEFAULT_FEE_GRANTER) - .to_string() -} - -fn max_fee_from_config(config: &ChainConfig) -> Fee { - let max_gas = max_gas_from_config(config); - - // The maximum fee the relayer pays for a transaction - let max_fee_in_coins = calculate_fee(max_gas, &config.gas_price); - - let fee_granter = fee_granter_from_config(config); - - Fee { - amount: vec![max_fee_in_coins], - gas_limit: max_gas, - payer: "".to_string(), - granter: fee_granter, - } -} diff --git a/relayer/src/chain/cosmos/types/mod.rs b/relayer/src/chain/cosmos/types/mod.rs deleted file mode 100644 index b8fc7adc9d..0000000000 --- a/relayer/src/chain/cosmos/types/mod.rs +++ /dev/null @@ -1,4 +0,0 @@ -pub mod account; -pub mod config; -pub mod gas; -pub mod tx; diff --git a/relayer/src/chain/cosmos/types/tx.rs b/relayer/src/chain/cosmos/types/tx.rs deleted file mode 100644 index 9509c9826c..0000000000 --- a/relayer/src/chain/cosmos/types/tx.rs +++ /dev/null @@ -1,18 +0,0 @@ -use ibc::events::IbcEvent; -use ibc_proto::cosmos::tx::v1beta1::{AuthInfo, TxBody}; -use tendermint_rpc::endpoint::broadcast::tx_sync::Response; - -pub struct SignedTx { - pub body: TxBody, - pub body_bytes: Vec, - pub auth_info: AuthInfo, - pub auth_info_bytes: Vec, - pub signatures: Vec>, -} - -pub struct TxSyncResult { - // the broadcast_tx_sync response - pub response: Response, - // the events generated by a Tx once executed - pub events: Vec, -} diff --git a/relayer/src/chain/cosmos/version.rs b/relayer/src/chain/cosmos/version.rs deleted file mode 100644 index e115226e4f..0000000000 --- a/relayer/src/chain/cosmos/version.rs +++ /dev/null @@ -1,212 +0,0 @@ -//! Utilities for extracting and parsing versioning information -//! of Cosmos-SDK networks. The extracted version specification -//! is captured in a domain-type semver format in [`Specs`]. - -use flex_error::define_error; -use tracing::trace; - -use ibc_proto::cosmos::base::tendermint::v1beta1::VersionInfo; - -/// Specifies the SDK, IBC-go, and Tendermint modules path, as expected -/// to appear in the application version information of a -/// Cosmos-SDK network. -/// -/// The module identification is captured in a [`Module`] -/// with the following structure as an example: -/// ```json,ignore -/// Module { -/// path: "github.com/cosmos/cosmos-sdk", -/// version: "v0.42.4", -/// sum: "h1:yaD4PyOx0LnyfiWasC5egg1U76lT83GRxjJjupPo7Gk=", -/// }, -/// ``` -const SDK_MODULE_NAME: &str = "cosmos/cosmos-sdk"; -const IBC_GO_MODULE_NAME: &str = "cosmos/ibc-go"; -const TENDERMINT_MODULE_NAME: &str = "tendermint/tendermint"; - -/// Captures the version(s) specification of different -/// modules of a network. -/// -/// Assumes that the network runs on Cosmos SDK. -/// Stores both the SDK version as well as -/// the IBC-go module version (if existing). -#[derive(Debug)] -pub struct Specs { - pub sdk_version: semver::Version, - pub ibc_go_version: Option, - pub tendermint_version: semver::Version, -} - -define_error! { - Error { - SdkModuleNotFound - { - address: String, - app: AppInfo, - } - |e| { format!("failed to find the SDK module dependency ('{}') for application {}", e.address, e.app) }, - - TendermintModuleNotFound - { - address: String, - app: AppInfo, - } - |e| { format!("failed to find the Tendermint dependency ('{}') for application {}", e.address, e.app) }, - - VersionParsingFailed - { - module_path: String, - raw_version: String, - cause: String, - app: AppInfo, - } - |e| { format!("failed parsing the module path ('{}') version number '{}' into a semver for application {}; cause: {}", - e.module_path, e.raw_version, e.app, e.cause) }, - } -} - -impl TryFrom for Specs { - type Error = Error; - - fn try_from(raw_version: VersionInfo) -> Result { - // Get the Cosmos SDK version - let sdk_version = parse_sdk_version(&raw_version)?; - let ibc_go_version = parse_ibc_go_version(&raw_version)?; - let tendermint_version = parse_tendermint_version(&raw_version)?; - - trace!( - application = %raw_version.app_name, - version = %raw_version.version, - git_commit = %raw_version.git_commit, - sdk_version = %sdk_version, - ibc_go_status = ?ibc_go_version, - tendermint_version = %tendermint_version, - "parsed version specification" - ); - - Ok(Self { - sdk_version, - ibc_go_version, - tendermint_version, - }) - } -} - -fn parse_sdk_version(version_info: &VersionInfo) -> Result { - let module = version_info - .build_deps - .iter() - .find(|&m| m.path.contains(SDK_MODULE_NAME)) - .ok_or_else(|| { - Error::sdk_module_not_found(SDK_MODULE_NAME.to_string(), AppInfo::from(version_info)) - })?; - - // The raw version number has a leading 'v', trim it out; - let plain_version = module.version.trim_start_matches('v'); - - // Parse the module version - let mut version = semver::Version::parse(plain_version).map_err(|e| { - Error::version_parsing_failed( - module.path.clone(), - module.version.clone(), - e.to_string(), - AppInfo::from(version_info), - ) - })?; - - // Remove the pre-release version to ensure we treat pre-releases of the SDK - // as their normal version, eg. 0.42.0-rc2 should satisfy >=0.41.3, <= 0.42.6. - version.pre = semver::Prerelease::EMPTY; - - Ok(version) -} - -fn parse_ibc_go_version(version_info: &VersionInfo) -> Result, Error> { - // Find the Ibc-Go module - match version_info - .build_deps - .iter() - .find(|&m| m.path.contains(IBC_GO_MODULE_NAME)) - { - // If binary lacks the ibc-go dependency it is _not_ an error, - // we support networks without the standalone ibc-go module; typically these - // are SDK 0.42-based networks, which will eventually no longer be supported. - None => Ok(None), - Some(ibc_module) => { - // The raw version number has a leading 'v', trim it out; - let plain_version = ibc_module.version.trim_start_matches('v'); - - // Parse the Ibc-Go module version - semver::Version::parse(plain_version) - .map(|mut version| { - // Remove the pre-release identifier from the semver - version.pre = semver::Prerelease::EMPTY; - Some(version) - }) - .map_err(|e| { - Error::version_parsing_failed( - ibc_module.path.clone(), - ibc_module.version.clone(), - e.to_string(), - AppInfo::from(version_info), - ) - }) - } - } -} - -fn parse_tendermint_version(version_info: &VersionInfo) -> Result { - let module = version_info - .build_deps - .iter() - .find(|&m| m.path.contains(TENDERMINT_MODULE_NAME)) - .ok_or_else(|| { - Error::tendermint_module_not_found( - TENDERMINT_MODULE_NAME.to_string(), - AppInfo::from(version_info), - ) - })?; - - // The raw version number has a leading 'v', trim it out; - let plain_version = module.version.trim_start_matches('v'); - - // Parse the module version - let mut version = semver::Version::parse(plain_version).map_err(|e| { - Error::version_parsing_failed( - module.path.clone(), - module.version.clone(), - e.to_string(), - AppInfo::from(version_info), - ) - })?; - - // Remove the pre-release version to ensure we don't give special treatment to pre-releases. - version.pre = semver::Prerelease::EMPTY; - - Ok(version) -} - -/// Helper struct to capture all the reported information of an -/// IBC application, e.g., `gaiad`. -#[derive(Clone, Debug)] -pub struct AppInfo { - app_name: String, - version: String, - git_commit: String, -} - -impl core::fmt::Display for AppInfo { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - write!(f, "{}:{}-{}", self.app_name, self.version, self.git_commit) - } -} - -impl From<&VersionInfo> for AppInfo { - fn from(vi: &VersionInfo) -> Self { - Self { - app_name: vi.app_name.clone(), - version: vi.version.clone(), - git_commit: vi.git_commit.clone(), - } - } -} diff --git a/relayer/src/chain/cosmos/wait.rs b/relayer/src/chain/cosmos/wait.rs deleted file mode 100644 index 953395fef2..0000000000 --- a/relayer/src/chain/cosmos/wait.rs +++ /dev/null @@ -1,114 +0,0 @@ -use core::time::Duration; -use ibc::core::ics24_host::identifier::ChainId; -use ibc::events::IbcEvent; -use ibc::query::{QueryTxHash, QueryTxRequest}; -use itertools::Itertools; -use std::thread; -use std::time::Instant; -use tendermint_rpc::{HttpClient, Url}; -use tracing::{info, trace}; - -use crate::chain::cosmos::query::tx::query_txs; -use crate::chain::cosmos::types::tx::TxSyncResult; -use crate::error::Error; - -const WAIT_BACKOFF: Duration = Duration::from_millis(300); - -/// Given a vector of `TxSyncResult` elements, -/// each including a transaction response hash for one or more messages, periodically queries the chain -/// with the transaction hashes to get the list of IbcEvents included in those transactions. -pub async fn wait_for_block_commits( - chain_id: &ChainId, - rpc_client: &HttpClient, - rpc_address: &Url, - rpc_timeout: &Duration, - tx_sync_results: &mut [TxSyncResult], -) -> Result<(), Error> { - let start_time = Instant::now(); - - let hashes = tx_sync_results - .iter() - .map(|res| res.response.hash.to_string()) - .join(", "); - - info!( - id = %chain_id, - "wait_for_block_commits: waiting for commit of tx hashes(s) {}", - hashes - ); - - loop { - let elapsed = start_time.elapsed(); - - if all_tx_results_found(tx_sync_results) { - trace!( - id = %chain_id, - "wait_for_block_commits: retrieved {} tx results after {}ms", - tx_sync_results.len(), - elapsed.as_millis(), - ); - - return Ok(()); - } else if &elapsed > rpc_timeout { - return Err(Error::tx_no_confirmation()); - } else { - thread::sleep(WAIT_BACKOFF); - - for tx_sync_result in tx_sync_results.iter_mut() { - // ignore error - let _ = - update_tx_sync_result(chain_id, rpc_client, rpc_address, tx_sync_result).await; - } - } - } -} - -async fn update_tx_sync_result( - chain_id: &ChainId, - rpc_client: &HttpClient, - rpc_address: &Url, - tx_sync_result: &mut TxSyncResult, -) -> Result<(), Error> { - let TxSyncResult { response, events } = tx_sync_result; - - // If this transaction was not committed, determine whether it was because it failed - // or because it hasn't been committed yet. - if empty_event_present(events) { - // If the transaction failed, replace the events with an error, - // so that we don't attempt to resolve the transaction later on. - if response.code.value() != 0 { - *events = vec![IbcEvent::ChainError(format!( - "deliver_tx on chain {} for Tx hash {} reports error: code={:?}, log={:?}", - chain_id, response.hash, response.code, response.log - ))]; - } - - // Otherwise, try to resolve transaction hash to the corresponding events. - let events_per_tx = query_txs( - chain_id, - rpc_client, - rpc_address, - QueryTxRequest::Transaction(QueryTxHash(response.hash)), - ) - .await?; - - // If we get events back, progress was made, so we replace the events - // with the new ones. in both cases we will check in the next iteration - // whether or not the transaction was fully committed. - if !events_per_tx.is_empty() { - *events = events_per_tx; - } - } - - Ok(()) -} - -fn empty_event_present(events: &[IbcEvent]) -> bool { - events.iter().any(|ev| matches!(ev, IbcEvent::Empty(_))) -} - -fn all_tx_results_found(tx_sync_results: &[TxSyncResult]) -> bool { - tx_sync_results - .iter() - .all(|r| !empty_event_present(&r.events)) -} diff --git a/relayer/src/chain/counterparty.rs b/relayer/src/chain/counterparty.rs deleted file mode 100644 index acc75b26fd..0000000000 --- a/relayer/src/chain/counterparty.rs +++ /dev/null @@ -1,614 +0,0 @@ -use std::collections::HashSet; - -use ibc::core::ics04_channel::packet::Sequence; -use serde::{Deserialize, Serialize}; -use tracing::{error, trace}; - -use super::requests::{ - IncludeProof, PageRequest, QueryChannelRequest, QueryClientConnectionsRequest, - QueryClientStateRequest, QueryConnectionRequest, QueryPacketAcknowledgementsRequest, - QueryUnreceivedAcksRequest, QueryUnreceivedPacketsRequest, -}; -use super::{ - handle::ChainHandle, - requests::{QueryConnectionChannelsRequest, QueryPacketCommitmentsRequest}, -}; -use crate::channel::ChannelError; -use crate::path::PathIdentifiers; -use crate::supervisor::Error; -use ibc::{ - core::{ - ics02_client::client_state::{ClientState, IdentifiedAnyClientState}, - ics03_connection::connection::{ - ConnectionEnd, IdentifiedConnectionEnd, State as ConnectionState, - }, - ics04_channel::channel::{IdentifiedChannelEnd, State}, - ics24_host::identifier::{ - ChainId, ChannelId, ClientId, ConnectionId, PortChannelId, PortId, - }, - }, - Height, -}; - -pub fn counterparty_chain_from_connection( - src_chain: &impl ChainHandle, - src_connection_id: &ConnectionId, -) -> Result { - let (connection_end, _) = src_chain - .query_connection( - QueryConnectionRequest { - connection_id: src_connection_id.clone(), - height: Height::zero(), - }, - IncludeProof::No, - ) - .map_err(Error::relayer)?; - - let client_id = connection_end.client_id(); - let (client_state, _) = src_chain - .query_client_state( - QueryClientStateRequest { - client_id: client_id.clone(), - height: Height::zero(), - }, - IncludeProof::No, - ) - .map_err(Error::relayer)?; - - trace!( - chain_id=%src_chain.id(), connection_id=%src_connection_id, - "counterparty chain: {}", client_state.chain_id() - ); - Ok(client_state.chain_id()) -} - -fn connection_on_destination( - connection_id_on_source: &ConnectionId, - counterparty_client_id: &ClientId, - counterparty_chain: &impl ChainHandle, -) -> Result, Error> { - let counterparty_connections = counterparty_chain - .query_client_connections(QueryClientConnectionsRequest { - client_id: counterparty_client_id.clone(), - }) - .map_err(Error::relayer)?; - - for counterparty_connection in counterparty_connections.into_iter() { - let (counterparty_connection_end, _) = counterparty_chain - .query_connection( - QueryConnectionRequest { - connection_id: counterparty_connection.clone(), - height: Height::zero(), - }, - IncludeProof::No, - ) - .map_err(Error::relayer)?; - - let local_connection_end = &counterparty_connection_end.counterparty(); - if let Some(local_connection_id) = local_connection_end.connection_id() { - if local_connection_id == connection_id_on_source { - return Ok(Some(counterparty_connection_end)); - } - } - } - Ok(None) -} - -pub fn connection_state_on_destination( - connection: &IdentifiedConnectionEnd, - counterparty_chain: &impl ChainHandle, -) -> Result { - if let Some(remote_connection_id) = connection.connection_end.counterparty().connection_id() { - let (connection_end, _) = counterparty_chain - .query_connection( - QueryConnectionRequest { - connection_id: remote_connection_id.clone(), - height: Height::zero(), - }, - IncludeProof::No, - ) - .map_err(Error::relayer)?; - - Ok(connection_end.state) - } else { - // The remote connection id (used on `counterparty_chain`) is unknown. - // Try to retrieve this id by looking at client connections. - let counterparty_client_id = connection.connection_end.counterparty().client_id(); - - let dst_connection = connection_on_destination( - &connection.connection_id, - counterparty_client_id, - counterparty_chain, - )?; - - dst_connection.map_or_else( - || Ok(ConnectionState::Uninitialized), - |remote_connection| Ok(remote_connection.state), - ) - } -} - -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct ChannelConnectionClient { - pub channel: IdentifiedChannelEnd, - pub connection: IdentifiedConnectionEnd, - pub client: IdentifiedAnyClientState, -} - -impl ChannelConnectionClient { - pub fn new( - channel: IdentifiedChannelEnd, - connection: IdentifiedConnectionEnd, - client: IdentifiedAnyClientState, - ) -> Self { - Self { - channel, - connection, - client, - } - } -} - -/// Returns the [`ChannelConnectionClient`] associated with the -/// provided port and channel id. -pub fn channel_connection_client( - chain: &impl ChainHandle, - port_id: &PortId, - channel_id: &ChannelId, -) -> Result { - let (channel_end, _) = chain - .query_channel( - QueryChannelRequest { - port_id: port_id.clone(), - channel_id: *channel_id, - height: Height::zero(), - }, - IncludeProof::No, - ) - .map_err(Error::relayer)?; - - if channel_end.state_matches(&State::Uninitialized) { - return Err(Error::channel_uninitialized( - port_id.clone(), - *channel_id, - chain.id(), - )); - } - - let connection_id = channel_end - .connection_hops() - .first() - .ok_or_else(|| Error::missing_connection_hops(*channel_id, chain.id()))?; - - let (connection_end, _) = chain - .query_connection( - QueryConnectionRequest { - connection_id: connection_id.clone(), - height: Height::zero(), - }, - IncludeProof::No, - ) - .map_err(Error::relayer)?; - - if !connection_end.is_open() { - return Err(Error::connection_not_open( - connection_id.clone(), - *channel_id, - chain.id(), - )); - } - - let client_id = connection_end.client_id(); - let (client_state, _) = chain - .query_client_state( - QueryClientStateRequest { - client_id: client_id.clone(), - height: Height::zero(), - }, - IncludeProof::No, - ) - .map_err(Error::relayer)?; - - let client = IdentifiedAnyClientState::new(client_id.clone(), client_state); - let connection = IdentifiedConnectionEnd::new(connection_id.clone(), connection_end); - let channel = IdentifiedChannelEnd::new(port_id.clone(), *channel_id, channel_end); - - Ok(ChannelConnectionClient::new(channel, connection, client)) -} - -pub fn counterparty_chain_from_channel( - src_chain: &impl ChainHandle, - src_channel_id: &ChannelId, - src_port_id: &PortId, -) -> Result { - channel_connection_client(src_chain, src_port_id, src_channel_id) - .map(|c| c.client.client_state.chain_id()) -} - -fn fetch_channel_on_destination( - port_id: &PortId, - channel_id: &ChannelId, - counterparty_chain: &impl ChainHandle, - remote_connection_id: &ConnectionId, -) -> Result, Error> { - let counterparty_channels = counterparty_chain - .query_connection_channels(QueryConnectionChannelsRequest { - connection_id: remote_connection_id.clone(), - pagination: Some(PageRequest::all()), - }) - .map_err(Error::relayer)?; - - for counterparty_channel in counterparty_channels.into_iter() { - let local_channel_end = &counterparty_channel.channel_end.remote; - if let Some(local_channel_id) = local_channel_end.channel_id() { - if local_channel_id == channel_id && local_channel_end.port_id() == port_id { - return Ok(Some(counterparty_channel)); - } - } - } - Ok(None) -} - -pub fn channel_state_on_destination( - channel: &IdentifiedChannelEnd, - connection: &IdentifiedConnectionEnd, - counterparty_chain: &impl ChainHandle, -) -> Result { - let remote_channel = channel_on_destination(channel, connection, counterparty_chain)?; - Ok(remote_channel.map_or_else( - || State::Uninitialized, - |remote_channel| remote_channel.channel_end.state, - )) -} - -pub fn channel_on_destination( - channel: &IdentifiedChannelEnd, - connection: &IdentifiedConnectionEnd, - counterparty_chain: &impl ChainHandle, -) -> Result, Error> { - if let Some(remote_channel_id) = channel.channel_end.counterparty().channel_id() { - let counterparty = counterparty_chain - .query_channel( - QueryChannelRequest { - port_id: channel.channel_end.counterparty().port_id().clone(), - channel_id: *remote_channel_id, - height: Height::zero(), - }, - IncludeProof::No, - ) - .map(|(c, _)| IdentifiedChannelEnd { - port_id: channel.channel_end.counterparty().port_id().clone(), - channel_id: *remote_channel_id, - channel_end: c, - }) - .map_err(Error::relayer)?; - - Ok(Some(counterparty)) - } else if let Some(remote_connection_id) = connection.end().counterparty().connection_id() { - fetch_channel_on_destination( - &channel.port_id, - &channel.channel_id, - counterparty_chain, - remote_connection_id, - ) - } else { - Ok(None) - } -} - -/// Queries a channel end on a [`ChainHandle`], and verifies -/// that the counterparty field on that channel end matches an -/// expected counterparty. -/// Returns `Ok` if the counterparty matches, and `Err` otherwise. -pub fn check_channel_counterparty( - target_chain: impl ChainHandle, - target_pchan: &PortChannelId, - expected: &PortChannelId, -) -> Result<(), ChannelError> { - let (channel_end_dst, _) = target_chain - .query_channel( - QueryChannelRequest { - port_id: target_pchan.port_id.clone(), - channel_id: target_pchan.channel_id, - height: Height::zero(), - }, - IncludeProof::No, - ) - .map_err(|e| ChannelError::query(target_chain.id(), e))?; - - let counterparty = channel_end_dst.remote; - match counterparty.channel_id { - Some(actual_channel_id) => { - let actual = PortChannelId { - channel_id: actual_channel_id, - port_id: counterparty.port_id, - }; - if &actual != expected { - return Err(ChannelError::mismatch_channel_ends( - target_chain.id(), - target_pchan.clone(), - expected.clone(), - actual, - )); - } - } - None => { - error!( - "channel {} on chain {} has no counterparty channel id ", - target_pchan, - target_chain.id() - ); - return Err(ChannelError::incomplete_channel_state( - target_chain.id(), - target_pchan.clone(), - )); - } - } - - Ok(()) -} - -/// Returns the sequences of the packet commitments on a given chain and channel (port_id + channel_id). -/// These are the sequences of the packets that were either: -/// - not yet received by the counterparty chain, or -/// - received on counterparty chain but not yet acknowledged by this chain, -pub fn commitments_on_chain( - chain: &impl ChainHandle, - port_id: &PortId, - channel_id: &ChannelId, -) -> Result<(Vec, Height), Error> { - // get the packet commitments on the counterparty/ source chain - let (mut commit_sequences, response_height) = chain - .query_packet_commitments(QueryPacketCommitmentsRequest { - port_id: port_id.clone(), - channel_id: *channel_id, - pagination: Some(PageRequest::all()), - }) - .map_err(Error::relayer)?; - - commit_sequences.sort_unstable(); - - Ok((commit_sequences, response_height)) -} - -/// Returns the sequences of the packets that were sent on the counterparty chain but for which -/// `MsgRecvPacket`-s have not been received on a given chain and channel (port_id + channel_id) -pub fn unreceived_packets_sequences( - chain: &impl ChainHandle, - port_id: &PortId, - channel_id: &ChannelId, - commitments_on_counterparty: Vec, -) -> Result, Error> { - if commitments_on_counterparty.is_empty() { - return Ok(vec![]); - } - - chain - .query_unreceived_packets(QueryUnreceivedPacketsRequest { - port_id: port_id.clone(), - channel_id: *channel_id, - packet_commitment_sequences: commitments_on_counterparty, - }) - .map_err(Error::relayer) -} - -/// Returns the sequences of the written acknowledgments on a given chain and channel (port_id + channel_id), out of -/// the commitments still present on the counterparty chain. -pub fn packet_acknowledgements( - chain: &impl ChainHandle, - port_id: &PortId, - channel_id: &ChannelId, - commit_sequences: Vec, -) -> Result<(Vec, Height), Error> { - let commit_set = commit_sequences.iter().cloned().collect::>(); - - // Get the packet acknowledgments on counterparty/source chain - let (mut acked_sequences, response_height) = chain - .query_packet_acknowledgements(QueryPacketAcknowledgementsRequest { - port_id: port_id.clone(), - channel_id: *channel_id, - pagination: Some(PageRequest::all()), - packet_commitment_sequences: commit_sequences, - }) - .map_err(Error::relayer)?; - - acked_sequences.retain(|s| commit_set.contains(s)); - acked_sequences.sort_unstable(); - - Ok((acked_sequences, response_height)) -} - -/// Returns the sequences of the packets that were sent on the chain and for which: -/// - `MsgRecvPacket`-s have been received on the counterparty chain but -/// - `MsgAcknowledgement`-s have NOT been received by the chain -pub fn unreceived_acknowledgements_sequences( - chain: &impl ChainHandle, - port_id: &PortId, - channel_id: &ChannelId, - acks_on_counterparty: Vec, -) -> Result, Error> { - if acks_on_counterparty.is_empty() { - return Ok(vec![]); - } - - chain - .query_unreceived_acknowledgements(QueryUnreceivedAcksRequest { - port_id: port_id.clone(), - channel_id: *channel_id, - packet_ack_sequences: acks_on_counterparty, - }) - .map_err(Error::relayer) -} - -/// Given a channel, this method returns: -/// - The sequences of the packets _sent_ on the counterparty chain and not _received_ by -/// the (target) chain. -/// - The counterparty height at which the query was made. -/// -/// Expects an [`IdentifiedChannelEnd`] and a pair of [`ChainHandle`]s representing the chains -/// at the two ends of this channel, called a (target) chain and a counterparty chain. -/// -/// ### Implementation details -/// This method involves two separate queries: -/// -/// 1. It performs a [`QueryPacketCommitmentsRequest`] on the counterparty chain. -/// This query returns the sequences for the packets with stored -/// commitments in the counterparty chain's state, and the height at which the query was made -/// -/// This step relies on [`commitments_on_chain`], see that method for more details. -/// -/// 2. It performs a [`QueryUnreceivedPacketsRequest`] on the (target) chain. -/// Given the sequences of packet commitments on the counterparty (query #1), -/// this query returns the sequences of the packets which the target -/// chain has not yet _received_. -/// -/// This step relies on [`unreceived_packets_sequences`], see that method for more details. -/// -pub fn unreceived_packets( - chain: &impl ChainHandle, - counterparty_chain: &impl ChainHandle, - path: &PathIdentifiers, -) -> Result<(Vec, Height), Error> { - let (commit_sequences, h) = commitments_on_chain( - counterparty_chain, - &path.counterparty_port_id, - &path.counterparty_channel_id, - )?; - - let packet_seq_nrs = - unreceived_packets_sequences(chain, &path.port_id, &path.channel_id, commit_sequences)?; - - Ok((packet_seq_nrs, h)) -} - -pub fn acknowledgements_on_chain( - chain: &impl ChainHandle, - counterparty_chain: &impl ChainHandle, - channel: &IdentifiedChannelEnd, -) -> Result<(Vec, Height), Error> { - let counterparty = channel.channel_end.counterparty(); - let counterparty_channel_id = counterparty - .channel_id - .as_ref() - .ok_or_else(Error::missing_counterparty_channel_id)?; - - let (commitments_on_counterparty, _) = commitments_on_chain( - counterparty_chain, - &counterparty.port_id, - counterparty_channel_id, - )?; - - let (sequences, height) = packet_acknowledgements( - chain, - &channel.port_id, - &channel.channel_id, - commitments_on_counterparty, - )?; - - Ok((sequences, height)) -} - -/// Given a channel, this method returns: -/// - The sequences of all packets _received on the counterparty chain and not _acknowledged_ by -/// the (target) chain. -/// - The counterparty height at which the query was made. -/// -/// Expects an [`IdentifiedChannelEnd`] and a pair of [`ChainHandle`]s representing the chains -/// at the two ends of this channel, called a (target) chain and a counterparty chain. -/// -/// ### Implementation details -/// This method involves two separate queries: -/// -/// 1. It performs a [`QueryPacketCommitmentsRequest`] on the target chain. -/// This query returns the sequences for the packets with stored -/// commitments in the target chain's state, and the height at which the query was made -/// -/// This step relies on [`commitments_on_chain`], see that method for more details. -/// -/// 2. It performs a [`QueryPacketAcknowledgementsRequest`] on the counterparty chain. -/// Given the sequences of packet commitments on the target chain (query #1), -/// this query returns the sequences of the packets which the counterparty chain has -/// _acknowledged_. -/// -/// This step relies on [`packet_acknowledgements`], see that method for more details. -/// -/// 3. It performs a [`QueryUnreceivedAcksRequest`] on the target chain. -/// Given the sequences of packet acknowledgements on the counterparty (step #2), -/// this query fetches the subset for which acknowledgements have not been -/// received by the target chain. -/// This step relies on [`unreceived_acknowledgements_sequences`]. -pub fn unreceived_acknowledgements( - chain: &impl ChainHandle, - counterparty_chain: &impl ChainHandle, - path: &PathIdentifiers, -) -> Result<(Vec, Height), Error> { - let (commitments_on_src, _) = commitments_on_chain(chain, &path.port_id, &path.channel_id)?; - - let (acks_on_counterparty, src_response_height) = packet_acknowledgements( - counterparty_chain, - &path.counterparty_port_id, - &path.counterparty_channel_id, - commitments_on_src, - )?; - - let sns = unreceived_acknowledgements_sequences( - chain, - &path.port_id, - &path.channel_id, - acks_on_counterparty, - )?; - - Ok((sns, src_response_height)) -} - -/// A structure to display pending packet commitment IDs -/// at one end of a channel. -#[derive(Debug, Serialize)] -pub struct PendingPackets { - /// Not yet received on the counterparty chain. - pub unreceived_packets: Vec, - /// Received on the counterparty chain, - /// but the acknowledgement is not yet received on the local chain. - pub unreceived_acks: Vec, -} - -pub fn pending_packet_summary( - chain: &impl ChainHandle, - counterparty_chain: &impl ChainHandle, - channel: &IdentifiedChannelEnd, -) -> Result { - let counterparty = channel.channel_end.counterparty(); - let counterparty_channel_id = counterparty - .channel_id - .as_ref() - .ok_or_else(Error::missing_counterparty_channel_id)?; - - let (commitments_on_src, _) = - commitments_on_chain(chain, &channel.port_id, &channel.channel_id)?; - - let unreceived = unreceived_packets_sequences( - counterparty_chain, - &counterparty.port_id, - counterparty_channel_id, - commitments_on_src.clone(), - )?; - - let (acks_on_counterparty, _) = packet_acknowledgements( - counterparty_chain, - &counterparty.port_id, - counterparty_channel_id, - commitments_on_src, - )?; - - let pending_acks = unreceived_acknowledgements_sequences( - chain, - &channel.port_id, - &channel.channel_id, - acks_on_counterparty, - )?; - - Ok(PendingPackets { - unreceived_packets: unreceived, - unreceived_acks: pending_acks, - }) -} diff --git a/relayer/src/chain/endpoint.rs b/relayer/src/chain/endpoint.rs deleted file mode 100644 index b63d627f06..0000000000 --- a/relayer/src/chain/endpoint.rs +++ /dev/null @@ -1,578 +0,0 @@ -use alloc::sync::Arc; -use core::convert::TryFrom; -use ibc::core::ics23_commitment::merkle::MerkleProof; - -use tokio::runtime::Runtime as TokioRuntime; - -use ibc::core::ics02_client::client_consensus::{ - AnyConsensusState, AnyConsensusStateWithHeight, ConsensusState, -}; -use ibc::core::ics02_client::client_state::{ - AnyClientState, ClientState, IdentifiedAnyClientState, -}; -use ibc::core::ics02_client::header::Header; -use ibc::core::ics03_connection::connection::{ConnectionEnd, IdentifiedConnectionEnd, State}; -use ibc::core::ics03_connection::version::{get_compatible_versions, Version}; -use ibc::core::ics04_channel::channel::{ChannelEnd, IdentifiedChannelEnd}; -use ibc::core::ics04_channel::packet::{PacketMsgType, Sequence}; -use ibc::core::ics23_commitment::commitment::{CommitmentPrefix, CommitmentProofBytes}; -use ibc::core::ics24_host::identifier::{ChainId, ChannelId, ClientId, ConnectionId, PortId}; -use ibc::events::IbcEvent; -use ibc::proofs::{ConsensusProof, Proofs}; -use ibc::query::{QueryBlockRequest, QueryTxRequest}; -use ibc::signer::Signer; -use ibc::timestamp::Timestamp; -use ibc::Height as ICSHeight; -use tendermint_rpc::endpoint::broadcast::tx_sync::Response as TxResponse; - -use crate::account::Balance; -use crate::chain::client::ClientSettings; -use crate::chain::requests::{ - QueryChannelClientStateRequest, QueryChannelRequest, QueryChannelsRequest, - QueryClientConnectionsRequest, QueryClientStateRequest, QueryClientStatesRequest, - QueryConnectionChannelsRequest, QueryConnectionRequest, QueryConnectionsRequest, - QueryConsensusStateRequest, QueryConsensusStatesRequest, QueryHostConsensusStateRequest, - QueryNextSequenceReceiveRequest, QueryPacketAcknowledgementsRequest, - QueryPacketCommitmentsRequest, QueryUnreceivedAcksRequest, QueryUnreceivedPacketsRequest, - QueryUpgradedClientStateRequest, QueryUpgradedConsensusStateRequest, -}; -use crate::chain::tracking::TrackedMsgs; -use crate::config::ChainConfig; -use crate::connection::ConnectionMsgType; -use crate::error::{Error, QUERY_PROOF_EXPECT_MSG}; -use crate::event::monitor::{EventReceiver, TxMonitorCmd}; -use crate::keyring::{KeyEntry, KeyRing}; -use crate::light_client::LightClient; - -use super::requests::{ - IncludeProof, QueryPacketAcknowledgementRequest, QueryPacketCommitmentRequest, - QueryPacketReceiptRequest, -}; - -/// The result of a health check. -#[derive(Debug)] -pub enum HealthCheck { - Healthy, - Unhealthy(Box), -} - -/// The result of the application status query. -#[derive(Clone, Debug)] -pub struct ChainStatus { - pub height: ICSHeight, - pub timestamp: Timestamp, -} - -/// Defines a blockchain as understood by the relayer -pub trait ChainEndpoint: Sized { - /// Type of light blocks for this chain - type LightBlock: Send + Sync; - - /// Type of headers for this chain - type Header: Header; - - /// Type of consensus state for this chain - type ConsensusState: ConsensusState; - - /// Type of the client state for this chain - type ClientState: ClientState; - - type LightClient: LightClient; - - /// Constructs the chain - fn bootstrap(config: ChainConfig, rt: Arc) -> Result; - - #[allow(clippy::type_complexity)] - /// Initializes and returns the light client (if any) associated with this chain. - fn init_light_client(&self) -> Result; - - /// Initializes and returns the event monitor (if any) associated with this chain. - fn init_event_monitor( - &self, - rt: Arc, - ) -> Result<(EventReceiver, TxMonitorCmd), Error>; - - /// Returns the chain's identifier - fn id(&self) -> &ChainId; - - /// Shutdown the chain runtime - fn shutdown(self) -> Result<(), Error>; - - /// Perform a health check - fn health_check(&self) -> Result; - - /// Returns the chain's keybase - fn keybase(&self) -> &KeyRing; - - /// Returns the chain's keybase, mutably - fn keybase_mut(&mut self) -> &mut KeyRing; - - /// Sends one or more transactions with `msgs` to chain and - /// synchronously wait for it to be committed. - fn send_messages_and_wait_commit( - &mut self, - tracked_msgs: TrackedMsgs, - ) -> Result, Error>; - - /// Sends one or more transactions with `msgs` to chain. - /// Non-blocking alternative to `send_messages_and_wait_commit` interface. - fn send_messages_and_wait_check_tx( - &mut self, - tracked_msgs: TrackedMsgs, - ) -> Result, Error>; - - fn get_signer(&mut self) -> Result; - - fn config(&self) -> ChainConfig; - - fn get_key(&mut self) -> Result; - - fn add_key(&mut self, key_name: &str, key: KeyEntry) -> Result<(), Error>; - - /// Return the version of the IBC protocol that this chain is running, if known. - fn ibc_version(&self) -> Result, Error>; - - // Queries - - /// Query the balance of the given account for the denom used to pay tx fees. - /// If no account is given, behavior must be specified, e.g. retrieve it from configuration file. - fn query_balance(&self, key_name: Option) -> Result; - - fn query_commitment_prefix(&self) -> Result; - - fn query_compatible_versions(&self) -> Result, Error> { - // TODO - do a real chain query - Ok(get_compatible_versions()) - } - - /// Query the latest height and timestamp the application is at - fn query_application_status(&self) -> Result; - - /// Performs a query to retrieve the state of all clients that a chain hosts. - fn query_clients( - &self, - request: QueryClientStatesRequest, - ) -> Result, Error>; - - /// Performs a query to retrieve the state of the specified light client. A - /// proof can optionally be returned along with the result. - fn query_client_state( - &self, - request: QueryClientStateRequest, - include_proof: IncludeProof, - ) -> Result<(AnyClientState, Option), Error>; - - /// Performs a query to retrieve the consensus state for a specified height - /// `consensus_height` that the specified light client stores. - fn query_consensus_state( - &self, - request: QueryConsensusStateRequest, - include_proof: IncludeProof, - ) -> Result<(AnyConsensusState, Option), Error>; - - /// Performs a query to retrieve all the consensus states that the specified - /// light client stores. - fn query_consensus_states( - &self, - request: QueryConsensusStatesRequest, - ) -> Result, Error>; - - fn query_upgraded_client_state( - &self, - request: QueryUpgradedClientStateRequest, - ) -> Result<(AnyClientState, MerkleProof), Error>; - - fn query_upgraded_consensus_state( - &self, - request: QueryUpgradedConsensusStateRequest, - ) -> Result<(AnyConsensusState, MerkleProof), Error>; - - /// Performs a query to retrieve the identifiers of all connections. - fn query_connections( - &self, - request: QueryConnectionsRequest, - ) -> Result, Error>; - - /// Performs a query to retrieve the identifiers of all connections. - fn query_client_connections( - &self, - request: QueryClientConnectionsRequest, - ) -> Result, Error>; - - /// Performs a query to retrieve the connection associated with a given - /// connection identifier. A proof can optionally be returned along with the - /// result. - fn query_connection( - &self, - request: QueryConnectionRequest, - include_proof: IncludeProof, - ) -> Result<(ConnectionEnd, Option), Error>; - - /// Performs a query to retrieve all channels associated with a connection. - fn query_connection_channels( - &self, - request: QueryConnectionChannelsRequest, - ) -> Result, Error>; - - /// Performs a query to retrieve all the channels of a chain. - fn query_channels( - &self, - request: QueryChannelsRequest, - ) -> Result, Error>; - - /// Performs a query to retrieve the channel associated with a given channel - /// identifier. A proof can optionally be returned along with the result. - fn query_channel( - &self, - request: QueryChannelRequest, - include_proof: IncludeProof, - ) -> Result<(ChannelEnd, Option), Error>; - - /// Performs a query to retrieve the client state for the channel associated - /// with a given channel identifier. - fn query_channel_client_state( - &self, - request: QueryChannelClientStateRequest, - ) -> Result, Error>; - - /// Performs a query to retrieve a stored packet commitment hash, stored on - /// the chain at path `path::CommitmentsPath`. A proof can optionally be - /// returned along with the result. - fn query_packet_commitment( - &self, - request: QueryPacketCommitmentRequest, - include_proof: IncludeProof, - ) -> Result<(Vec, Option), Error>; - - /// Performs a query to retrieve all the packet commitments hashes - /// associated with a channel. Returns the corresponding packet sequence - /// numbers and the height at which they were retrieved. - fn query_packet_commitments( - &self, - request: QueryPacketCommitmentsRequest, - ) -> Result<(Vec, ICSHeight), Error>; - - /// Performs a query to retrieve a given packet receipt, stored on the chain at path - /// `path::CommitmentsPath`. A proof can optionally be returned along with the result. - fn query_packet_receipt( - &self, - request: QueryPacketReceiptRequest, - include_proof: IncludeProof, - ) -> Result<(Vec, Option), Error>; - - /// Performs a query about which IBC packets in the specified list has not - /// been received. Returns the sequence numbers of the packets that were not - /// received. - /// - /// For example, given a request with the sequence numbers `[5,6,7,8]`, a - /// response of `[7,8]` would indicate that packets 5 & 6 were received, - /// while packets 7, 8 were not. - fn query_unreceived_packets( - &self, - request: QueryUnreceivedPacketsRequest, - ) -> Result, Error>; - - /// Performs a query to retrieve a stored packet acknowledgement hash, - /// stored on the chain at path `path::AcksPath`. A proof can optionally be - /// returned along with the result. - fn query_packet_acknowledgement( - &self, - request: QueryPacketAcknowledgementRequest, - include_proof: IncludeProof, - ) -> Result<(Vec, Option), Error>; - - /// Performs a query to retrieve all the packet acknowledgements associated - /// with a channel. Returns the corresponding packet sequence numbers and - /// the height at which they were retrieved. - fn query_packet_acknowledgements( - &self, - request: QueryPacketAcknowledgementsRequest, - ) -> Result<(Vec, ICSHeight), Error>; - - /// Performs a query about which IBC packets in the specified list has not - /// been acknowledged. Returns the sequence numbers of the packets that were not - /// acknowledged. - /// - /// For example, given a request with the sequence numbers `[5,6,7,8]`, a - /// response of `[7,8]` would indicate that packets 5 & 6 were acknowledged, - /// while packets 7, 8 were not. - fn query_unreceived_acknowledgements( - &self, - request: QueryUnreceivedAcksRequest, - ) -> Result, Error>; - - /// Performs a query to retrieve `nextSequenceRecv` stored at path - /// `path::SeqRecvsPath` as defined in ICS-4. A proof can optionally be - /// returned along with the result. - fn query_next_sequence_receive( - &self, - request: QueryNextSequenceReceiveRequest, - include_proof: IncludeProof, - ) -> Result<(Sequence, Option), Error>; - - fn query_txs(&self, request: QueryTxRequest) -> Result, Error>; - - fn query_blocks( - &self, - request: QueryBlockRequest, - ) -> Result<(Vec, Vec), Error>; - - fn query_host_consensus_state( - &self, - request: QueryHostConsensusStateRequest, - ) -> Result; - - fn build_client_state( - &self, - height: ICSHeight, - settings: ClientSettings, - ) -> Result; - - fn build_consensus_state( - &self, - light_block: Self::LightBlock, - ) -> Result; - - /// Fetch, and verify the header at `target_height`, assuming we trust the - /// header at `trusted_height` with the given `client_state`. - /// - /// Returns all the supporting headers that were need to verify the target - /// header, for use when building a `ClientUpdate` message. - fn build_header( - &self, - trusted_height: ICSHeight, - target_height: ICSHeight, - client_state: &AnyClientState, - light_client: &mut Self::LightClient, - ) -> Result<(Self::Header, Vec), Error>; - - /// Builds the required proofs and the client state for connection handshake messages. - /// The proofs and client state must be obtained from queries at same height. - fn build_connection_proofs_and_client_state( - &self, - message_type: ConnectionMsgType, - connection_id: &ConnectionId, - client_id: &ClientId, - height: ICSHeight, - ) -> Result<(Option, Proofs), Error> { - let (connection_end, maybe_connection_proof) = self.query_connection( - QueryConnectionRequest { - connection_id: connection_id.clone(), - height, - }, - IncludeProof::Yes, - )?; - let connection_proof = maybe_connection_proof.expect(QUERY_PROOF_EXPECT_MSG); - - // Check that the connection state is compatible with the message - match message_type { - ConnectionMsgType::OpenTry => { - if !connection_end.state_matches(&State::Init) - && !connection_end.state_matches(&State::TryOpen) - { - return Err(Error::bad_connection_state()); - } - } - ConnectionMsgType::OpenAck => { - if !connection_end.state_matches(&State::TryOpen) - && !connection_end.state_matches(&State::Open) - { - return Err(Error::bad_connection_state()); - } - } - ConnectionMsgType::OpenConfirm => { - if !connection_end.state_matches(&State::Open) { - return Err(Error::bad_connection_state()); - } - } - } - - let mut client_state = None; - let mut client_proof = None; - let mut consensus_proof = None; - - match message_type { - ConnectionMsgType::OpenTry | ConnectionMsgType::OpenAck => { - let (client_state_value, maybe_client_state_proof) = self.query_client_state( - QueryClientStateRequest { - client_id: client_id.clone(), - height, - }, - IncludeProof::Yes, - )?; - let client_state_proof = maybe_client_state_proof.expect(QUERY_PROOF_EXPECT_MSG); - - client_proof = Some( - CommitmentProofBytes::try_from(client_state_proof) - .map_err(Error::malformed_proof)?, - ); - - let consensus_state_proof = { - let (_, maybe_consensus_state_proof) = self.query_consensus_state( - QueryConsensusStateRequest { - client_id: client_id.clone(), - consensus_height: client_state_value.latest_height(), - query_height: height, - }, - IncludeProof::Yes, - )?; - - maybe_consensus_state_proof.expect(QUERY_PROOF_EXPECT_MSG) - }; - - consensus_proof = Option::from( - ConsensusProof::new( - CommitmentProofBytes::try_from(consensus_state_proof) - .map_err(Error::malformed_proof)?, - client_state_value.latest_height(), - ) - .map_err(Error::consensus_proof)?, - ); - - client_state = Some(client_state_value); - } - _ => {} - } - - Ok(( - client_state, - Proofs::new( - CommitmentProofBytes::try_from(connection_proof).map_err(Error::malformed_proof)?, - client_proof, - consensus_proof, - None, - height.increment(), - ) - .map_err(Error::malformed_proof)?, - )) - } - - /// Builds the proof for channel handshake messages. - fn build_channel_proofs( - &self, - port_id: &PortId, - channel_id: &ChannelId, - height: ICSHeight, - ) -> Result { - // Collect all proofs as required - let (_, maybe_channel_proof) = self.query_channel( - QueryChannelRequest { - port_id: port_id.clone(), - channel_id: *channel_id, - height, - }, - IncludeProof::Yes, - )?; - let channel_proof = maybe_channel_proof.expect(QUERY_PROOF_EXPECT_MSG); - let channel_proof_bytes = - CommitmentProofBytes::try_from(channel_proof).map_err(Error::malformed_proof)?; - - Proofs::new(channel_proof_bytes, None, None, None, height.increment()) - .map_err(Error::malformed_proof) - } - - /// Builds the proof for packet messages. - fn build_packet_proofs( - &self, - packet_type: PacketMsgType, - port_id: PortId, - channel_id: ChannelId, - sequence: Sequence, - height: ICSHeight, - ) -> Result { - let (maybe_packet_proof, channel_proof) = match packet_type { - PacketMsgType::Recv => { - let (_, maybe_packet_proof) = self.query_packet_commitment( - QueryPacketCommitmentRequest { - port_id, - channel_id, - sequence, - height, - }, - IncludeProof::Yes, - )?; - - (maybe_packet_proof, None) - } - PacketMsgType::Ack => { - let (_, maybe_packet_proof) = self.query_packet_acknowledgement( - QueryPacketAcknowledgementRequest { - port_id, - channel_id, - sequence, - height, - }, - IncludeProof::Yes, - )?; - - (maybe_packet_proof, None) - } - PacketMsgType::TimeoutUnordered => { - let (_, maybe_packet_proof) = self.query_packet_receipt( - QueryPacketReceiptRequest { - port_id, - channel_id, - sequence, - height, - }, - IncludeProof::Yes, - )?; - - (maybe_packet_proof, None) - } - PacketMsgType::TimeoutOrdered => { - let (_, maybe_packet_proof) = self.query_next_sequence_receive( - QueryNextSequenceReceiveRequest { - port_id, - channel_id, - height, - }, - IncludeProof::Yes, - )?; - - (maybe_packet_proof, None) - } - PacketMsgType::TimeoutOnClose => { - let channel_proof = { - let (_, maybe_channel_proof) = self.query_channel( - QueryChannelRequest { - port_id: port_id.clone(), - channel_id, - height, - }, - IncludeProof::Yes, - )?; - let channel_merkle_proof = maybe_channel_proof.expect(QUERY_PROOF_EXPECT_MSG); - Some( - CommitmentProofBytes::try_from(channel_merkle_proof) - .map_err(Error::malformed_proof)?, - ) - }; - let (_, maybe_packet_proof) = self.query_packet_receipt( - QueryPacketReceiptRequest { - port_id, - channel_id, - sequence, - height, - }, - IncludeProof::Yes, - )?; - - (maybe_packet_proof, channel_proof) - } - }; - - let packet_proof = maybe_packet_proof.expect(QUERY_PROOF_EXPECT_MSG); - - let proofs = Proofs::new( - CommitmentProofBytes::try_from(packet_proof).map_err(Error::malformed_proof)?, - None, - None, - channel_proof, - height.increment(), - ) - .map_err(Error::malformed_proof)?; - - Ok(proofs) - } -} diff --git a/relayer/src/chain/handle.rs b/relayer/src/chain/handle.rs deleted file mode 100644 index 6766590a3c..0000000000 --- a/relayer/src/chain/handle.rs +++ /dev/null @@ -1,632 +0,0 @@ -use alloc::sync::Arc; -use core::fmt::{self, Debug}; - -use crossbeam_channel as channel; -use serde::Serialize; - -use ibc::{ - core::{ - ics02_client::{ - client_consensus::{AnyConsensusState, AnyConsensusStateWithHeight}, - client_state::{AnyClientState, IdentifiedAnyClientState}, - events::UpdateClient, - header::AnyHeader, - misbehaviour::MisbehaviourEvidence, - }, - ics03_connection::{ - connection::{ConnectionEnd, IdentifiedConnectionEnd}, - version::Version, - }, - ics04_channel::{ - channel::{ChannelEnd, IdentifiedChannelEnd}, - packet::{PacketMsgType, Sequence}, - }, - ics23_commitment::{commitment::CommitmentPrefix, merkle::MerkleProof}, - ics24_host::identifier::{ChainId, ChannelId, ClientId, ConnectionId, PortId}, - }, - events::IbcEvent, - proofs::Proofs, - query::{QueryBlockRequest, QueryTxRequest}, - signer::Signer, - Height, -}; - -use crate::{ - account::Balance, - config::ChainConfig, - connection::ConnectionMsgType, - error::Error, - event::monitor::{EventBatch, Result as MonitorResult}, - keyring::KeyEntry, -}; - -use super::{ - client::ClientSettings, - endpoint::{ChainStatus, HealthCheck}, - requests::{ - IncludeProof, QueryChannelClientStateRequest, QueryChannelRequest, QueryChannelsRequest, - QueryClientConnectionsRequest, QueryClientStateRequest, QueryClientStatesRequest, - QueryConnectionChannelsRequest, QueryConnectionRequest, QueryConnectionsRequest, - QueryConsensusStateRequest, QueryConsensusStatesRequest, QueryHostConsensusStateRequest, - QueryNextSequenceReceiveRequest, QueryPacketAcknowledgementRequest, - QueryPacketAcknowledgementsRequest, QueryPacketCommitmentRequest, - QueryPacketCommitmentsRequest, QueryPacketReceiptRequest, QueryUnreceivedAcksRequest, - QueryUnreceivedPacketsRequest, QueryUpgradedClientStateRequest, - QueryUpgradedConsensusStateRequest, - }, - tracking::TrackedMsgs, -}; - -mod base; -mod cache; -mod counting; - -pub use base::BaseChainHandle; -pub use counting::CountingChainHandle; - -pub type CachingChainHandle = cache::CachingChainHandle; -pub type CountingAndCachingChainHandle = - cache::CachingChainHandle>; - -/// A pair of [`ChainHandle`]s. -#[derive(Clone)] -pub struct ChainHandlePair { - pub a: ChainA, - pub b: ChainB, -} - -impl ChainHandlePair { - /// Swap the two handles. - pub fn swap(self) -> ChainHandlePair { - ChainHandlePair { - a: self.b, - b: self.a, - } - } -} - -impl Debug for ChainHandlePair { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("ChainHandlePair") - .field("a", &self.a.id()) - .field("b", &self.b.id()) - .finish() - } -} - -pub type Subscription = channel::Receiver>>; - -pub type ReplyTo = channel::Sender>; -pub type Reply = channel::Receiver>; - -pub fn reply_channel() -> (ReplyTo, Reply) { - channel::bounded(1) -} - -/// Requests that a `ChainHandle` may send to a `ChainRuntime`. -#[derive(Clone, Debug)] -#[allow(clippy::large_enum_variant)] -pub enum ChainRequest { - Shutdown { - reply_to: ReplyTo<()>, - }, - - HealthCheck { - reply_to: ReplyTo, - }, - - Subscribe { - reply_to: ReplyTo, - }, - - SendMessagesAndWaitCommit { - tracked_msgs: TrackedMsgs, - reply_to: ReplyTo>, - }, - - SendMessagesAndWaitCheckTx { - tracked_msgs: TrackedMsgs, - reply_to: ReplyTo>, - }, - - Config { - reply_to: ReplyTo, - }, - - Signer { - reply_to: ReplyTo, - }, - - GetKey { - reply_to: ReplyTo, - }, - - AddKey { - key_name: String, - key: KeyEntry, - reply_to: ReplyTo<()>, - }, - - IbcVersion { - reply_to: ReplyTo>, - }, - - QueryBalance { - key_name: Option, - reply_to: ReplyTo, - }, - - QueryApplicationStatus { - reply_to: ReplyTo, - }, - - QueryClients { - request: QueryClientStatesRequest, - reply_to: ReplyTo>, - }, - - BuildHeader { - trusted_height: Height, - target_height: Height, - client_state: AnyClientState, - reply_to: ReplyTo<(AnyHeader, Vec)>, - }, - - BuildClientState { - height: Height, - settings: ClientSettings, - reply_to: ReplyTo, - }, - - BuildConsensusState { - trusted: Height, - target: Height, - client_state: AnyClientState, - reply_to: ReplyTo, - }, - - BuildMisbehaviour { - client_state: AnyClientState, - update_event: UpdateClient, - reply_to: ReplyTo>, - }, - - BuildConnectionProofsAndClientState { - message_type: ConnectionMsgType, - connection_id: ConnectionId, - client_id: ClientId, - height: Height, - reply_to: ReplyTo<(Option, Proofs)>, - }, - - QueryClientState { - request: QueryClientStateRequest, - include_proof: IncludeProof, - reply_to: ReplyTo<(AnyClientState, Option)>, - }, - - QueryClientConnections { - request: QueryClientConnectionsRequest, - reply_to: ReplyTo>, - }, - - QueryConsensusState { - request: QueryConsensusStateRequest, - include_proof: IncludeProof, - reply_to: ReplyTo<(AnyConsensusState, Option)>, - }, - - QueryConsensusStates { - request: QueryConsensusStatesRequest, - reply_to: ReplyTo>, - }, - - QueryUpgradedClientState { - request: QueryUpgradedClientStateRequest, - reply_to: ReplyTo<(AnyClientState, MerkleProof)>, - }, - - QueryUpgradedConsensusState { - request: QueryUpgradedConsensusStateRequest, - reply_to: ReplyTo<(AnyConsensusState, MerkleProof)>, - }, - - QueryCommitmentPrefix { - reply_to: ReplyTo, - }, - - QueryCompatibleVersions { - reply_to: ReplyTo>, - }, - - QueryConnection { - request: QueryConnectionRequest, - include_proof: IncludeProof, - reply_to: ReplyTo<(ConnectionEnd, Option)>, - }, - - QueryConnections { - request: QueryConnectionsRequest, - reply_to: ReplyTo>, - }, - - QueryConnectionChannels { - request: QueryConnectionChannelsRequest, - reply_to: ReplyTo>, - }, - - QueryChannels { - request: QueryChannelsRequest, - reply_to: ReplyTo>, - }, - - QueryChannel { - request: QueryChannelRequest, - include_proof: IncludeProof, - reply_to: ReplyTo<(ChannelEnd, Option)>, - }, - - QueryChannelClientState { - request: QueryChannelClientStateRequest, - reply_to: ReplyTo>, - }, - - QueryNextSequenceReceive { - request: QueryNextSequenceReceiveRequest, - include_proof: IncludeProof, - reply_to: ReplyTo<(Sequence, Option)>, - }, - - BuildChannelProofs { - port_id: PortId, - channel_id: ChannelId, - height: Height, - reply_to: ReplyTo, - }, - - BuildPacketProofs { - packet_type: PacketMsgType, - port_id: PortId, - channel_id: ChannelId, - sequence: Sequence, - height: Height, - reply_to: ReplyTo, - }, - - QueryPacketCommitment { - request: QueryPacketCommitmentRequest, - include_proof: IncludeProof, - reply_to: ReplyTo<(Vec, Option)>, - }, - - QueryPacketCommitments { - request: QueryPacketCommitmentsRequest, - reply_to: ReplyTo<(Vec, Height)>, - }, - - QueryPacketReceipt { - request: QueryPacketReceiptRequest, - include_proof: IncludeProof, - reply_to: ReplyTo<(Vec, Option)>, - }, - - QueryUnreceivedPackets { - request: QueryUnreceivedPacketsRequest, - reply_to: ReplyTo>, - }, - - QueryPacketAcknowledgement { - request: QueryPacketAcknowledgementRequest, - include_proof: IncludeProof, - reply_to: ReplyTo<(Vec, Option)>, - }, - - QueryPacketAcknowledgements { - request: QueryPacketAcknowledgementsRequest, - reply_to: ReplyTo<(Vec, Height)>, - }, - - QueryUnreceivedAcknowledgement { - request: QueryUnreceivedAcksRequest, - reply_to: ReplyTo>, - }, - - QueryPacketEventDataFromTxs { - request: QueryTxRequest, - reply_to: ReplyTo>, - }, - - QueryPacketEventDataFromBlocks { - request: QueryBlockRequest, - reply_to: ReplyTo<(Vec, Vec)>, - }, - - QueryHostConsensusState { - request: QueryHostConsensusStateRequest, - reply_to: ReplyTo, - }, -} - -pub trait ChainHandle: Clone + Send + Sync + Serialize + Debug + 'static { - fn new(chain_id: ChainId, sender: channel::Sender) -> Self; - - /// Get the [`ChainId`] of this chain. - fn id(&self) -> ChainId; - - /// Shutdown the chain runtime. - fn shutdown(&self) -> Result<(), Error>; - - /// Perform a health check - fn health_check(&self) -> Result; - - /// Subscribe to the events emitted by the chain. - fn subscribe(&self) -> Result; - - /// Send the given `msgs` to the chain, packaged as one or more transactions, - /// and return the list of events emitted by the chain after the transaction was committed. - fn send_messages_and_wait_commit( - &self, - tracked_msgs: TrackedMsgs, - ) -> Result, Error>; - - /// Submit messages asynchronously. - /// Does not block waiting on the chain to produce the - /// resulting events. Instead of events, this method - /// returns a set of transaction hashes. - fn send_messages_and_wait_check_tx( - &self, - tracked_msgs: TrackedMsgs, - ) -> Result, Error>; - - fn get_signer(&self) -> Result; - - fn config(&self) -> Result; - - fn get_key(&self) -> Result; - - fn add_key(&self, key_name: String, key: KeyEntry) -> Result<(), Error>; - - /// Return the version of the IBC protocol that this chain is running, if known. - fn ibc_version(&self) -> Result, Error>; - - /// Query the balance of the given account for the denom used to pay tx fees. - /// If no account is given, behavior must be specified, e.g. retrieve it from configuration file. - fn query_balance(&self, key_name: Option) -> Result; - - /// Query the latest height and timestamp the application is at - fn query_application_status(&self) -> Result; - - fn query_latest_height(&self) -> Result { - Ok(self.query_application_status()?.height) - } - - /// Performs a query to retrieve the state of all clients that a chain hosts. - fn query_clients( - &self, - request: QueryClientStatesRequest, - ) -> Result, Error>; - - /// Performs a query to retrieve the state of the specified light client. A - /// proof can optionally be returned along with the result. - fn query_client_state( - &self, - request: QueryClientStateRequest, - include_proof: IncludeProof, - ) -> Result<(AnyClientState, Option), Error>; - - /// Performs a query to retrieve the identifiers of all connections. - fn query_client_connections( - &self, - request: QueryClientConnectionsRequest, - ) -> Result, Error>; - - /// Performs a query to retrieve the consensus state for a specified height - /// `consensus_height` that the specified light client stores. - fn query_consensus_state( - &self, - request: QueryConsensusStateRequest, - include_proof: IncludeProof, - ) -> Result<(AnyConsensusState, Option), Error>; - - /// Performs a query to retrieve all the consensus states that the specified - /// light client stores. - fn query_consensus_states( - &self, - request: QueryConsensusStatesRequest, - ) -> Result, Error>; - - fn query_upgraded_client_state( - &self, - request: QueryUpgradedClientStateRequest, - ) -> Result<(AnyClientState, MerkleProof), Error>; - - fn query_upgraded_consensus_state( - &self, - request: QueryUpgradedConsensusStateRequest, - ) -> Result<(AnyConsensusState, MerkleProof), Error>; - - fn query_commitment_prefix(&self) -> Result; - - fn query_compatible_versions(&self) -> Result, Error>; - - /// Performs a query to retrieve the connection associated with a given - /// connection identifier. A proof can optionally be returned along with the - /// result. - fn query_connection( - &self, - request: QueryConnectionRequest, - include_proof: IncludeProof, - ) -> Result<(ConnectionEnd, Option), Error>; - - /// Performs a query to retrieve the identifiers of all connections. - fn query_connections( - &self, - request: QueryConnectionsRequest, - ) -> Result, Error>; - - /// Performs a query to retrieve all channels associated with a connection. - fn query_connection_channels( - &self, - request: QueryConnectionChannelsRequest, - ) -> Result, Error>; - - /// Performs a query to retrieve `nextSequenceRecv` stored at path - /// `path::SeqRecvsPath` as defined in ICS-4. A proof can optionally be - /// returned along with the result. - fn query_next_sequence_receive( - &self, - request: QueryNextSequenceReceiveRequest, - include_proof: IncludeProof, - ) -> Result<(Sequence, Option), Error>; - - /// Performs a query to retrieve all the channels of a chain. - fn query_channels( - &self, - request: QueryChannelsRequest, - ) -> Result, Error>; - - /// Performs a query to retrieve the channel associated with a given channel - /// identifier. A proof can optionally be returned along with the result. - fn query_channel( - &self, - request: QueryChannelRequest, - include_proof: IncludeProof, - ) -> Result<(ChannelEnd, Option), Error>; - - /// Performs a query to retrieve the client state for the channel associated - /// with a given channel identifier. - fn query_channel_client_state( - &self, - request: QueryChannelClientStateRequest, - ) -> Result, Error>; - - fn build_header( - &self, - trusted_height: Height, - target_height: Height, - client_state: AnyClientState, - ) -> Result<(AnyHeader, Vec), Error>; - - /// Constructs a client state at the given height - fn build_client_state( - &self, - height: Height, - settings: ClientSettings, - ) -> Result; - - /// Constructs a consensus state at the given height - fn build_consensus_state( - &self, - trusted: Height, - target: Height, - client_state: AnyClientState, - ) -> Result; - - fn check_misbehaviour( - &self, - update: UpdateClient, - client_state: AnyClientState, - ) -> Result, Error>; - - fn build_connection_proofs_and_client_state( - &self, - message_type: ConnectionMsgType, - connection_id: &ConnectionId, - client_id: &ClientId, - height: Height, - ) -> Result<(Option, Proofs), Error>; - - fn build_channel_proofs( - &self, - port_id: &PortId, - channel_id: &ChannelId, - height: Height, - ) -> Result; - - fn build_packet_proofs( - &self, - packet_type: PacketMsgType, - port_id: &PortId, - channel_id: &ChannelId, - sequence: Sequence, - height: Height, - ) -> Result; - - /// Performs a query to retrieve a stored packet commitment hash, stored on - /// the chain at path `path::CommitmentsPath`. A proof can optionally be - /// returned along with the result. - fn query_packet_commitment( - &self, - request: QueryPacketCommitmentRequest, - include_proof: IncludeProof, - ) -> Result<(Vec, Option), Error>; - - /// Performs a query to retrieve all the packet commitments hashes - /// associated with a channel. Returns the corresponding packet sequence - /// numbers and the height at which they were retrieved. - fn query_packet_commitments( - &self, - request: QueryPacketCommitmentsRequest, - ) -> Result<(Vec, Height), Error>; - - /// Performs a query to retrieve a given packet receipt, stored on the chain at path - /// `path::CommitmentsPath`. A proof can optionally be returned along with the result. - fn query_packet_receipt( - &self, - request: QueryPacketReceiptRequest, - include_proof: IncludeProof, - ) -> Result<(Vec, Option), Error>; - - /// Performs a query about which IBC packets in the specified list has not - /// been received. Returns the sequence numbers of the packets that were not - /// received. - /// - /// For example, given a request with the sequence numbers `[5,6,7,8]`, a - /// response of `[7,8]` would indicate that packets 5 & 6 were received, - /// while packets 7, 8 were not. - fn query_unreceived_packets( - &self, - request: QueryUnreceivedPacketsRequest, - ) -> Result, Error>; - - /// Performs a query to retrieve a stored packet acknowledgement hash, - /// stored on the chain at path `path::AcksPath`. A proof can optionally be - /// returned along with the result. - fn query_packet_acknowledgement( - &self, - request: QueryPacketAcknowledgementRequest, - include_proof: IncludeProof, - ) -> Result<(Vec, Option), Error>; - - /// Performs a query to retrieve all the packet acknowledgements associated - /// with a channel. Returns the corresponding packet sequence numbers and - /// the height at which they were retrieved. - fn query_packet_acknowledgements( - &self, - request: QueryPacketAcknowledgementsRequest, - ) -> Result<(Vec, Height), Error>; - - /// Performs a query about which IBC packets in the specified list has not - /// been acknowledged. Returns the sequence numbers of the packets that were not - /// acknowledged. - /// - /// For example, given a request with the sequence numbers `[5,6,7,8]`, a - /// response of `[7,8]` would indicate that packets 5 & 6 were acknowledged, - /// while packets 7, 8 were not. - fn query_unreceived_acknowledgements( - &self, - request: QueryUnreceivedAcksRequest, - ) -> Result, Error>; - - fn query_txs(&self, request: QueryTxRequest) -> Result, Error>; - - fn query_blocks( - &self, - request: QueryBlockRequest, - ) -> Result<(Vec, Vec), Error>; - - fn query_host_consensus_state( - &self, - request: QueryHostConsensusStateRequest, - ) -> Result; -} diff --git a/relayer/src/chain/handle/base.rs b/relayer/src/chain/handle/base.rs deleted file mode 100644 index 738118aaf7..0000000000 --- a/relayer/src/chain/handle/base.rs +++ /dev/null @@ -1,484 +0,0 @@ -use core::fmt::Debug; - -use crossbeam_channel as channel; -use serde::{Serialize, Serializer}; - -use ibc::{ - core::{ - ics02_client::client_consensus::{AnyConsensusState, AnyConsensusStateWithHeight}, - ics02_client::client_state::{AnyClientState, IdentifiedAnyClientState}, - ics02_client::events::UpdateClient, - ics02_client::header::AnyHeader, - ics02_client::misbehaviour::MisbehaviourEvidence, - ics03_connection::connection::{ConnectionEnd, IdentifiedConnectionEnd}, - ics03_connection::version::Version, - ics04_channel::channel::{ChannelEnd, IdentifiedChannelEnd}, - ics04_channel::packet::{PacketMsgType, Sequence}, - ics23_commitment::{commitment::CommitmentPrefix, merkle::MerkleProof}, - ics24_host::identifier::ChainId, - ics24_host::identifier::ChannelId, - ics24_host::identifier::{ClientId, ConnectionId, PortId}, - }, - events::IbcEvent, - proofs::Proofs, - query::{QueryBlockRequest, QueryTxRequest}, - signer::Signer, - Height, -}; - -use crate::{ - account::Balance, - chain::{ - client::ClientSettings, - endpoint::ChainStatus, - requests::{ - IncludeProof, QueryChannelClientStateRequest, QueryChannelRequest, - QueryChannelsRequest, QueryClientConnectionsRequest, QueryClientStateRequest, - QueryClientStatesRequest, QueryConnectionChannelsRequest, QueryConnectionRequest, - QueryConnectionsRequest, QueryConsensusStateRequest, QueryConsensusStatesRequest, - QueryHostConsensusStateRequest, QueryNextSequenceReceiveRequest, - QueryPacketAcknowledgementRequest, QueryPacketAcknowledgementsRequest, - QueryPacketCommitmentRequest, QueryPacketCommitmentsRequest, QueryPacketReceiptRequest, - QueryUnreceivedAcksRequest, QueryUnreceivedPacketsRequest, - QueryUpgradedClientStateRequest, QueryUpgradedConsensusStateRequest, - }, - tracking::TrackedMsgs, - }, - config::ChainConfig, - connection::ConnectionMsgType, - error::Error, - keyring::KeyEntry, -}; - -use super::{reply_channel, ChainHandle, ChainRequest, HealthCheck, ReplyTo, Subscription}; - -/// A basic chain handle implementation. -/// For use in interactive CLIs, e.g., `query`, `tx raw`, etc. -#[derive(Debug, Clone)] -pub struct BaseChainHandle { - /// Chain identifier - chain_id: ChainId, - - /// The handle's channel for sending requests to the runtime - runtime_sender: channel::Sender, -} - -impl BaseChainHandle { - pub fn new(chain_id: ChainId, sender: channel::Sender) -> Self { - Self { - chain_id, - runtime_sender: sender, - } - } - - fn send(&self, f: F) -> Result - where - F: FnOnce(ReplyTo) -> ChainRequest, - O: Debug, - { - let (sender, receiver) = reply_channel(); - let input = f(sender); - - self.runtime_sender.send(input).map_err(Error::send)?; - - receiver.recv().map_err(Error::channel_receive)? - } -} - -impl ChainHandle for BaseChainHandle { - fn new(chain_id: ChainId, sender: channel::Sender) -> Self { - Self::new(chain_id, sender) - } - - fn id(&self) -> ChainId { - self.chain_id.clone() - } - - fn health_check(&self) -> Result { - self.send(|reply_to| ChainRequest::HealthCheck { reply_to }) - } - - fn shutdown(&self) -> Result<(), Error> { - self.send(|reply_to| ChainRequest::Shutdown { reply_to }) - } - - fn subscribe(&self) -> Result { - self.send(|reply_to| ChainRequest::Subscribe { reply_to }) - } - - fn send_messages_and_wait_commit( - &self, - tracked_msgs: TrackedMsgs, - ) -> Result, Error> { - self.send(|reply_to| ChainRequest::SendMessagesAndWaitCommit { - tracked_msgs, - reply_to, - }) - } - - fn send_messages_and_wait_check_tx( - &self, - tracked_msgs: TrackedMsgs, - ) -> Result, Error> { - self.send(|reply_to| ChainRequest::SendMessagesAndWaitCheckTx { - tracked_msgs, - reply_to, - }) - } - - fn get_signer(&self) -> Result { - self.send(|reply_to| ChainRequest::Signer { reply_to }) - } - - fn config(&self) -> Result { - self.send(|reply_to| ChainRequest::Config { reply_to }) - } - - fn get_key(&self) -> Result { - self.send(|reply_to| ChainRequest::GetKey { reply_to }) - } - - fn add_key(&self, key_name: String, key: KeyEntry) -> Result<(), Error> { - self.send(|reply_to| ChainRequest::AddKey { - key_name, - key, - reply_to, - }) - } - - fn ibc_version(&self) -> Result, Error> { - self.send(|reply_to| ChainRequest::IbcVersion { reply_to }) - } - - fn query_balance(&self, key_name: Option) -> Result { - self.send(|reply_to| ChainRequest::QueryBalance { key_name, reply_to }) - } - - fn query_application_status(&self) -> Result { - self.send(|reply_to| ChainRequest::QueryApplicationStatus { reply_to }) - } - - fn query_clients( - &self, - request: QueryClientStatesRequest, - ) -> Result, Error> { - self.send(|reply_to| ChainRequest::QueryClients { request, reply_to }) - } - - fn query_client_state( - &self, - request: QueryClientStateRequest, - include_proof: IncludeProof, - ) -> Result<(AnyClientState, Option), Error> { - self.send(|reply_to| ChainRequest::QueryClientState { - request, - include_proof, - reply_to, - }) - } - - fn query_client_connections( - &self, - request: QueryClientConnectionsRequest, - ) -> Result, Error> { - self.send(|reply_to| ChainRequest::QueryClientConnections { request, reply_to }) - } - - fn query_consensus_states( - &self, - request: QueryConsensusStatesRequest, - ) -> Result, Error> { - self.send(|reply_to| ChainRequest::QueryConsensusStates { request, reply_to }) - } - - fn query_consensus_state( - &self, - request: QueryConsensusStateRequest, - include_proof: IncludeProof, - ) -> Result<(AnyConsensusState, Option), Error> { - self.send(|reply_to| ChainRequest::QueryConsensusState { - request, - include_proof, - reply_to, - }) - } - - fn query_upgraded_client_state( - &self, - request: QueryUpgradedClientStateRequest, - ) -> Result<(AnyClientState, MerkleProof), Error> { - self.send(|reply_to| ChainRequest::QueryUpgradedClientState { request, reply_to }) - } - - fn query_upgraded_consensus_state( - &self, - request: QueryUpgradedConsensusStateRequest, - ) -> Result<(AnyConsensusState, MerkleProof), Error> { - self.send(|reply_to| ChainRequest::QueryUpgradedConsensusState { request, reply_to }) - } - - fn query_commitment_prefix(&self) -> Result { - self.send(|reply_to| ChainRequest::QueryCommitmentPrefix { reply_to }) - } - - fn query_compatible_versions(&self) -> Result, Error> { - self.send(|reply_to| ChainRequest::QueryCompatibleVersions { reply_to }) - } - - fn query_connection( - &self, - request: QueryConnectionRequest, - include_proof: IncludeProof, - ) -> Result<(ConnectionEnd, Option), Error> { - self.send(|reply_to| ChainRequest::QueryConnection { - request, - include_proof, - reply_to, - }) - } - - fn query_connections( - &self, - request: QueryConnectionsRequest, - ) -> Result, Error> { - self.send(|reply_to| ChainRequest::QueryConnections { request, reply_to }) - } - - fn query_connection_channels( - &self, - request: QueryConnectionChannelsRequest, - ) -> Result, Error> { - self.send(|reply_to| ChainRequest::QueryConnectionChannels { request, reply_to }) - } - - fn query_next_sequence_receive( - &self, - request: QueryNextSequenceReceiveRequest, - include_proof: IncludeProof, - ) -> Result<(Sequence, Option), Error> { - self.send(|reply_to| ChainRequest::QueryNextSequenceReceive { - request, - include_proof, - reply_to, - }) - } - - fn query_channels( - &self, - request: QueryChannelsRequest, - ) -> Result, Error> { - self.send(|reply_to| ChainRequest::QueryChannels { request, reply_to }) - } - - fn query_channel( - &self, - request: QueryChannelRequest, - include_proof: IncludeProof, - ) -> Result<(ChannelEnd, Option), Error> { - self.send(|reply_to| ChainRequest::QueryChannel { - request, - include_proof, - reply_to, - }) - } - - fn query_channel_client_state( - &self, - request: QueryChannelClientStateRequest, - ) -> Result, Error> { - self.send(|reply_to| ChainRequest::QueryChannelClientState { request, reply_to }) - } - - fn build_header( - &self, - trusted_height: Height, - target_height: Height, - client_state: AnyClientState, - ) -> Result<(AnyHeader, Vec), Error> { - self.send(|reply_to| ChainRequest::BuildHeader { - trusted_height, - target_height, - client_state, - reply_to, - }) - } - - fn build_client_state( - &self, - height: Height, - settings: ClientSettings, - ) -> Result { - self.send(|reply_to| ChainRequest::BuildClientState { - height, - settings, - reply_to, - }) - } - - fn build_consensus_state( - &self, - trusted: Height, - target: Height, - client_state: AnyClientState, - ) -> Result { - self.send(|reply_to| ChainRequest::BuildConsensusState { - trusted, - target, - client_state, - reply_to, - }) - } - - fn check_misbehaviour( - &self, - update_event: UpdateClient, - client_state: AnyClientState, - ) -> Result, Error> { - self.send(|reply_to| ChainRequest::BuildMisbehaviour { - client_state, - update_event, - reply_to, - }) - } - - fn build_connection_proofs_and_client_state( - &self, - message_type: ConnectionMsgType, - connection_id: &ConnectionId, - client_id: &ClientId, - height: Height, - ) -> Result<(Option, Proofs), Error> { - self.send( - |reply_to| ChainRequest::BuildConnectionProofsAndClientState { - message_type, - connection_id: connection_id.clone(), - client_id: client_id.clone(), - height, - reply_to, - }, - ) - } - - fn build_channel_proofs( - &self, - port_id: &PortId, - channel_id: &ChannelId, - height: Height, - ) -> Result { - self.send(|reply_to| ChainRequest::BuildChannelProofs { - port_id: port_id.clone(), - channel_id: *channel_id, - height, - reply_to, - }) - } - - fn build_packet_proofs( - &self, - packet_type: PacketMsgType, - port_id: &PortId, - channel_id: &ChannelId, - sequence: Sequence, - height: Height, - ) -> Result { - self.send(|reply_to| ChainRequest::BuildPacketProofs { - packet_type, - port_id: port_id.clone(), - channel_id: *channel_id, - sequence, - height, - reply_to, - }) - } - - fn query_packet_commitment( - &self, - request: QueryPacketCommitmentRequest, - include_proof: IncludeProof, - ) -> Result<(Vec, Option), Error> { - self.send(|reply_to| ChainRequest::QueryPacketCommitment { - request, - include_proof, - reply_to, - }) - } - - fn query_packet_commitments( - &self, - request: QueryPacketCommitmentsRequest, - ) -> Result<(Vec, Height), Error> { - self.send(|reply_to| ChainRequest::QueryPacketCommitments { request, reply_to }) - } - - fn query_packet_receipt( - &self, - request: QueryPacketReceiptRequest, - include_proof: IncludeProof, - ) -> Result<(Vec, Option), Error> { - self.send(|reply_to| ChainRequest::QueryPacketReceipt { - request, - include_proof, - reply_to, - }) - } - - fn query_unreceived_packets( - &self, - request: QueryUnreceivedPacketsRequest, - ) -> Result, Error> { - self.send(|reply_to| ChainRequest::QueryUnreceivedPackets { request, reply_to }) - } - - fn query_packet_acknowledgement( - &self, - request: QueryPacketAcknowledgementRequest, - include_proof: IncludeProof, - ) -> Result<(Vec, Option), Error> { - self.send(|reply_to| ChainRequest::QueryPacketAcknowledgement { - request, - include_proof, - reply_to, - }) - } - - fn query_packet_acknowledgements( - &self, - request: QueryPacketAcknowledgementsRequest, - ) -> Result<(Vec, Height), Error> { - self.send(|reply_to| ChainRequest::QueryPacketAcknowledgements { request, reply_to }) - } - - fn query_unreceived_acknowledgements( - &self, - request: QueryUnreceivedAcksRequest, - ) -> Result, Error> { - self.send(|reply_to| ChainRequest::QueryUnreceivedAcknowledgement { request, reply_to }) - } - - fn query_txs(&self, request: QueryTxRequest) -> Result, Error> { - self.send(|reply_to| ChainRequest::QueryPacketEventDataFromTxs { request, reply_to }) - } - - fn query_blocks( - &self, - request: QueryBlockRequest, - ) -> Result<(Vec, Vec), Error> { - self.send(|reply_to| ChainRequest::QueryPacketEventDataFromBlocks { request, reply_to }) - } - - fn query_host_consensus_state( - &self, - request: QueryHostConsensusStateRequest, - ) -> Result { - self.send(|reply_to| ChainRequest::QueryHostConsensusState { request, reply_to }) - } -} - -impl Serialize for BaseChainHandle { - fn serialize(&self, serializer: S) -> Result<::Ok, ::Error> - where - S: Serializer, - { - self.id().serialize(serializer) - } -} diff --git a/relayer/src/chain/handle/cache.rs b/relayer/src/chain/handle/cache.rs deleted file mode 100644 index 17b7562fa1..0000000000 --- a/relayer/src/chain/handle/cache.rs +++ /dev/null @@ -1,484 +0,0 @@ -use crossbeam_channel as channel; -use ibc::core::ics02_client::client_consensus::{AnyConsensusState, AnyConsensusStateWithHeight}; -use ibc::core::ics02_client::client_state::{AnyClientState, IdentifiedAnyClientState}; -use ibc::core::ics02_client::events::UpdateClient; -use ibc::core::ics02_client::misbehaviour::MisbehaviourEvidence; -use ibc::core::ics03_connection::connection::IdentifiedConnectionEnd; -use ibc::core::ics04_channel::channel::IdentifiedChannelEnd; -use ibc::core::ics04_channel::packet::{PacketMsgType, Sequence}; -use ibc::core::ics23_commitment::merkle::MerkleProof; -use ibc::query::QueryTxRequest; -use ibc::{ - core::ics02_client::header::AnyHeader, - core::ics03_connection::connection::ConnectionEnd, - core::ics03_connection::version::Version, - core::ics04_channel::channel::ChannelEnd, - core::ics23_commitment::commitment::CommitmentPrefix, - core::ics24_host::identifier::{ - ChainId, ChannelId, ClientId, ConnectionId, PortChannelId, PortId, - }, - events::IbcEvent, - proofs::Proofs, - query::QueryBlockRequest, - signer::Signer, - Height, -}; -use serde::{Serialize, Serializer}; - -use crate::account::Balance; -use crate::cache::{Cache, CacheStatus}; -use crate::chain::client::ClientSettings; -use crate::chain::endpoint::{ChainStatus, HealthCheck}; -use crate::chain::handle::{ChainHandle, ChainRequest, Subscription}; -use crate::chain::requests::{ - IncludeProof, QueryChannelClientStateRequest, QueryChannelRequest, QueryChannelsRequest, - QueryClientConnectionsRequest, QueryClientStateRequest, QueryClientStatesRequest, - QueryConnectionChannelsRequest, QueryConnectionRequest, QueryConnectionsRequest, - QueryConsensusStateRequest, QueryConsensusStatesRequest, QueryHostConsensusStateRequest, - QueryNextSequenceReceiveRequest, QueryPacketAcknowledgementRequest, - QueryPacketAcknowledgementsRequest, QueryPacketCommitmentRequest, - QueryPacketCommitmentsRequest, QueryPacketReceiptRequest, QueryUnreceivedAcksRequest, - QueryUnreceivedPacketsRequest, QueryUpgradedClientStateRequest, - QueryUpgradedConsensusStateRequest, -}; -use crate::chain::tracking::TrackedMsgs; -use crate::config::ChainConfig; -use crate::connection::ConnectionMsgType; -use crate::error::Error; -use crate::keyring::KeyEntry; -use crate::telemetry; - -/// A chain handle with support for caching. -/// To be used for the passive relaying mode (i.e., `start` CLI). -#[derive(Debug, Clone)] -pub struct CachingChainHandle { - inner: Handle, - cache: Cache, -} - -impl CachingChainHandle { - pub fn new(handle: Handle) -> Self { - Self { - inner: handle, - cache: Cache::new(), - } - } - - fn inner(&self) -> &Handle { - &self.inner - } -} - -impl Serialize for CachingChainHandle { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - self.inner.serialize(serializer) - } -} - -impl ChainHandle for CachingChainHandle { - fn new(chain_id: ChainId, sender: channel::Sender) -> Self { - Self::new(Handle::new(chain_id, sender)) - } - - fn id(&self) -> ChainId { - self.inner().id() - } - - fn shutdown(&self) -> Result<(), Error> { - self.inner().shutdown() - } - - fn health_check(&self) -> Result { - self.inner().health_check() - } - - fn subscribe(&self) -> Result { - self.inner().subscribe() - } - - fn send_messages_and_wait_commit( - &self, - tracked_msgs: TrackedMsgs, - ) -> Result, Error> { - self.inner().send_messages_and_wait_commit(tracked_msgs) - } - - fn send_messages_and_wait_check_tx( - &self, - tracked_msgs: TrackedMsgs, - ) -> Result, Error> { - self.inner().send_messages_and_wait_check_tx(tracked_msgs) - } - - fn get_signer(&self) -> Result { - self.inner().get_signer() - } - - fn config(&self) -> Result { - self.inner().config() - } - - fn get_key(&self) -> Result { - self.inner().get_key() - } - - fn add_key(&self, key_name: String, key: KeyEntry) -> Result<(), Error> { - self.inner().add_key(key_name, key) - } - - fn ibc_version(&self) -> Result, Error> { - self.inner().ibc_version() - } - - fn query_balance(&self, key_name: Option) -> Result { - self.inner().query_balance(key_name) - } - - fn query_application_status(&self) -> Result { - self.inner().query_application_status() - } - - fn query_latest_height(&self) -> Result { - let handle = self.inner(); - let (result, in_cache) = self - .cache - .get_or_try_update_latest_height_with(|| handle.query_latest_height())?; - - if in_cache == CacheStatus::Hit { - telemetry!(query_cache_hit, &self.id(), "query_latest_height"); - } - - Ok(result) - } - - fn query_clients( - &self, - request: QueryClientStatesRequest, - ) -> Result, Error> { - self.inner().query_clients(request) - } - - // TODO: Introduce new query_client_state_latest to separate from this one. - fn query_client_state( - &self, - request: QueryClientStateRequest, - include_proof: IncludeProof, - ) -> Result<(AnyClientState, Option), Error> { - let handle = self.inner(); - match include_proof { - IncludeProof::Yes => handle.query_client_state(request, IncludeProof::Yes), - IncludeProof::No => { - if request.height.is_zero() { - let (result, in_cache) = self.cache.get_or_try_insert_client_state_with( - &request.client_id, - || { - handle - .query_client_state(request.clone(), IncludeProof::No) - .map(|(client_state, _)| client_state) - }, - )?; - - if in_cache == CacheStatus::Hit { - telemetry!(query_cache_hit, &self.id(), "query_client_state"); - } - - Ok((result, None)) - } else { - handle.query_client_state(request, IncludeProof::No) - } - } - } - } - - fn query_client_connections( - &self, - request: QueryClientConnectionsRequest, - ) -> Result, Error> { - self.inner().query_client_connections(request) - } - - fn query_consensus_states( - &self, - request: QueryConsensusStatesRequest, - ) -> Result, Error> { - self.inner().query_consensus_states(request) - } - - fn query_consensus_state( - &self, - request: QueryConsensusStateRequest, - include_proof: IncludeProof, - ) -> Result<(AnyConsensusState, Option), Error> { - self.inner().query_consensus_state(request, include_proof) - } - - fn query_upgraded_client_state( - &self, - request: QueryUpgradedClientStateRequest, - ) -> Result<(AnyClientState, MerkleProof), Error> { - self.inner().query_upgraded_client_state(request) - } - - fn query_upgraded_consensus_state( - &self, - request: QueryUpgradedConsensusStateRequest, - ) -> Result<(AnyConsensusState, MerkleProof), Error> { - self.inner().query_upgraded_consensus_state(request) - } - - fn query_commitment_prefix(&self) -> Result { - self.inner().query_commitment_prefix() - } - - fn query_compatible_versions(&self) -> Result, Error> { - self.inner().query_compatible_versions() - } - - fn query_connection( - &self, - request: QueryConnectionRequest, - include_proof: IncludeProof, - ) -> Result<(ConnectionEnd, Option), Error> { - let handle = self.inner(); - match include_proof { - IncludeProof::Yes => handle.query_connection(request, IncludeProof::Yes), - IncludeProof::No => { - if request.height.is_zero() { - let (result, in_cache) = self.cache.get_or_try_insert_connection_with( - &request.connection_id, - || { - handle - .query_connection(request.clone(), IncludeProof::No) - .map(|(conn_end, _)| conn_end) - }, - )?; - - if in_cache == CacheStatus::Hit { - telemetry!(query_cache_hit, &self.id(), "query_connection"); - } - - Ok((result, None)) - } else { - handle.query_connection(request, IncludeProof::No) - } - } - } - } - - fn query_connections( - &self, - request: QueryConnectionsRequest, - ) -> Result, Error> { - self.inner().query_connections(request) - } - - fn query_connection_channels( - &self, - request: QueryConnectionChannelsRequest, - ) -> Result, Error> { - self.inner().query_connection_channels(request) - } - - fn query_next_sequence_receive( - &self, - request: QueryNextSequenceReceiveRequest, - include_proof: IncludeProof, - ) -> Result<(Sequence, Option), Error> { - self.inner() - .query_next_sequence_receive(request, include_proof) - } - - fn query_channels( - &self, - request: QueryChannelsRequest, - ) -> Result, Error> { - self.inner().query_channels(request) - } - - fn query_channel( - &self, - request: QueryChannelRequest, - include_proof: IncludeProof, - ) -> Result<(ChannelEnd, Option), Error> { - let handle = self.inner(); - match include_proof { - IncludeProof::Yes => handle.query_channel(request, IncludeProof::Yes), - IncludeProof::No => { - if request.height.is_zero() { - let (result, in_cache) = self.cache.get_or_try_insert_channel_with( - &PortChannelId::new(request.channel_id, request.port_id.clone()), - || { - handle - .query_channel(request, IncludeProof::No) - .map(|(channel_end, _)| channel_end) - }, - )?; - - if in_cache == CacheStatus::Hit { - telemetry!(query_cache_hit, &self.id(), "query_channel"); - } - - Ok((result, None)) - } else { - handle.query_channel(request, IncludeProof::No) - } - } - } - } - - fn query_channel_client_state( - &self, - request: QueryChannelClientStateRequest, - ) -> Result, Error> { - self.inner().query_channel_client_state(request) - } - - fn build_header( - &self, - trusted_height: Height, - target_height: Height, - client_state: AnyClientState, - ) -> Result<(AnyHeader, Vec), Error> { - self.inner() - .build_header(trusted_height, target_height, client_state) - } - - /// Constructs a client state at the given height - fn build_client_state( - &self, - height: Height, - settings: ClientSettings, - ) -> Result { - self.inner().build_client_state(height, settings) - } - - /// Constructs a consensus state at the given height - fn build_consensus_state( - &self, - trusted: Height, - target: Height, - client_state: AnyClientState, - ) -> Result { - self.inner() - .build_consensus_state(trusted, target, client_state) - } - - fn check_misbehaviour( - &self, - update: UpdateClient, - client_state: AnyClientState, - ) -> Result, Error> { - self.inner().check_misbehaviour(update, client_state) - } - - fn build_connection_proofs_and_client_state( - &self, - message_type: ConnectionMsgType, - connection_id: &ConnectionId, - client_id: &ClientId, - height: Height, - ) -> Result<(Option, Proofs), Error> { - self.inner().build_connection_proofs_and_client_state( - message_type, - connection_id, - client_id, - height, - ) - } - - fn build_channel_proofs( - &self, - port_id: &PortId, - channel_id: &ChannelId, - height: Height, - ) -> Result { - self.inner() - .build_channel_proofs(port_id, channel_id, height) - } - - fn build_packet_proofs( - &self, - packet_type: PacketMsgType, - port_id: &PortId, - channel_id: &ChannelId, - sequence: Sequence, - height: Height, - ) -> Result { - self.inner() - .build_packet_proofs(packet_type, port_id, channel_id, sequence, height) - } - - fn query_packet_commitment( - &self, - request: QueryPacketCommitmentRequest, - include_proof: IncludeProof, - ) -> Result<(Vec, Option), Error> { - self.inner().query_packet_commitment(request, include_proof) - } - - fn query_packet_commitments( - &self, - request: QueryPacketCommitmentsRequest, - ) -> Result<(Vec, Height), Error> { - self.inner().query_packet_commitments(request) - } - - fn query_packet_receipt( - &self, - request: QueryPacketReceiptRequest, - include_proof: IncludeProof, - ) -> Result<(Vec, Option), Error> { - self.inner().query_packet_receipt(request, include_proof) - } - - fn query_unreceived_packets( - &self, - request: QueryUnreceivedPacketsRequest, - ) -> Result, Error> { - self.inner().query_unreceived_packets(request) - } - - fn query_packet_acknowledgement( - &self, - request: QueryPacketAcknowledgementRequest, - include_proof: IncludeProof, - ) -> Result<(Vec, Option), Error> { - self.inner() - .query_packet_acknowledgement(request, include_proof) - } - - fn query_packet_acknowledgements( - &self, - request: QueryPacketAcknowledgementsRequest, - ) -> Result<(Vec, Height), Error> { - self.inner().query_packet_acknowledgements(request) - } - - fn query_unreceived_acknowledgements( - &self, - request: QueryUnreceivedAcksRequest, - ) -> Result, Error> { - self.inner().query_unreceived_acknowledgements(request) - } - - fn query_txs(&self, request: QueryTxRequest) -> Result, Error> { - self.inner().query_txs(request) - } - - fn query_blocks( - &self, - request: QueryBlockRequest, - ) -> Result<(Vec, Vec), Error> { - self.inner().query_blocks(request) - } - - fn query_host_consensus_state( - &self, - request: QueryHostConsensusStateRequest, - ) -> Result { - self.inner.query_host_consensus_state(request) - } -} diff --git a/relayer/src/chain/handle/counting.rs b/relayer/src/chain/handle/counting.rs deleted file mode 100644 index 1d18f1114f..0000000000 --- a/relayer/src/chain/handle/counting.rs +++ /dev/null @@ -1,468 +0,0 @@ -use crossbeam_channel as channel; -use ibc::core::ics02_client::client_consensus::{AnyConsensusState, AnyConsensusStateWithHeight}; -use ibc::core::ics02_client::client_state::{AnyClientState, IdentifiedAnyClientState}; -use ibc::core::ics02_client::events::UpdateClient; -use ibc::core::ics02_client::misbehaviour::MisbehaviourEvidence; -use ibc::core::ics03_connection::connection::IdentifiedConnectionEnd; -use ibc::core::ics04_channel::channel::IdentifiedChannelEnd; -use ibc::core::ics04_channel::packet::{PacketMsgType, Sequence}; -use ibc::core::ics23_commitment::merkle::MerkleProof; -use ibc::query::QueryTxRequest; -use ibc::{ - core::ics02_client::header::AnyHeader, - core::ics03_connection::connection::ConnectionEnd, - core::ics03_connection::version::Version, - core::ics04_channel::channel::ChannelEnd, - core::ics23_commitment::commitment::CommitmentPrefix, - core::ics24_host::identifier::{ChainId, ChannelId, ClientId, ConnectionId, PortId}, - events::IbcEvent, - proofs::Proofs, - query::QueryBlockRequest, - signer::Signer, - Height, -}; -use serde::{Serialize, Serializer}; -use std::collections::HashMap; -use std::sync::{Arc, RwLock, RwLockReadGuard}; -use tracing::debug; - -use crate::account::Balance; -use crate::chain::client::ClientSettings; -use crate::chain::endpoint::{ChainStatus, HealthCheck}; -use crate::chain::handle::{ChainHandle, ChainRequest, Subscription}; -use crate::chain::requests::{ - IncludeProof, QueryChannelClientStateRequest, QueryChannelRequest, QueryChannelsRequest, - QueryClientConnectionsRequest, QueryClientStateRequest, QueryClientStatesRequest, - QueryConnectionChannelsRequest, QueryConnectionRequest, QueryConnectionsRequest, - QueryConsensusStateRequest, QueryConsensusStatesRequest, QueryHostConsensusStateRequest, - QueryNextSequenceReceiveRequest, QueryPacketAcknowledgementRequest, - QueryPacketAcknowledgementsRequest, QueryPacketCommitmentRequest, - QueryPacketCommitmentsRequest, QueryPacketReceiptRequest, QueryUnreceivedAcksRequest, - QueryUnreceivedPacketsRequest, QueryUpgradedClientStateRequest, - QueryUpgradedConsensusStateRequest, -}; -use crate::chain::tracking::TrackedMsgs; -use crate::config::ChainConfig; -use crate::error::Error; -use crate::util::lock::LockExt; -use crate::{connection::ConnectionMsgType, keyring::KeyEntry}; - -#[derive(Debug, Clone)] -pub struct CountingChainHandle { - inner: Handle, - metrics: Arc>>, -} - -impl CountingChainHandle { - pub fn new(handle: Handle) -> Self { - Self { - inner: handle, - metrics: Arc::new(RwLock::new(HashMap::new())), - } - } - - fn inner(&self) -> &Handle { - &self.inner - } - - pub fn metrics(&self) -> RwLockReadGuard<'_, HashMap> { - self.metrics.acquire_read() - } - - fn inc_metric(&self, key: &str) { - let mut metrics = self.metrics.acquire_write(); - if let Some(entry) = metrics.get_mut(key) { - *entry += 1; - } else { - metrics.insert(key.to_string(), 1); - } - } -} - -impl Serialize for CountingChainHandle { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - self.inner.serialize(serializer) - } -} - -impl ChainHandle for CountingChainHandle { - fn new(chain_id: ChainId, sender: channel::Sender) -> Self { - Self::new(Handle::new(chain_id, sender)) - } - - fn id(&self) -> ChainId { - self.inner().id() - } - - fn shutdown(&self) -> Result<(), Error> { - debug!( - "shutting down chain handle {}. usage metrics for chain: \n {:?}", - self.id(), - self.metrics() - ); - - self.inner().shutdown() - } - - fn health_check(&self) -> Result { - self.inc_metric("health_check"); - self.inner().health_check() - } - - fn subscribe(&self) -> Result { - self.inc_metric("subscribe"); - self.inner().subscribe() - } - - fn send_messages_and_wait_commit( - &self, - tracked_msgs: TrackedMsgs, - ) -> Result, Error> { - self.inc_metric("send_messages_and_wait_commit"); - self.inner().send_messages_and_wait_commit(tracked_msgs) - } - - fn send_messages_and_wait_check_tx( - &self, - tracked_msgs: TrackedMsgs, - ) -> Result, Error> { - self.inc_metric("send_messages_and_wait_check_tx"); - self.inner().send_messages_and_wait_check_tx(tracked_msgs) - } - - fn get_signer(&self) -> Result { - self.inc_metric("get_signer"); - self.inner().get_signer() - } - - fn config(&self) -> Result { - self.inc_metric("config"); - self.inner().config() - } - - fn get_key(&self) -> Result { - self.inc_metric("get_key"); - self.inner().get_key() - } - - fn add_key(&self, key_name: String, key: KeyEntry) -> Result<(), Error> { - self.inc_metric("add_key"); - self.inner().add_key(key_name, key) - } - - fn ibc_version(&self) -> Result, Error> { - self.inc_metric("ibc_version"); - self.inner().ibc_version() - } - - fn query_balance(&self, key_name: Option) -> Result { - self.inc_metric("query_balance"); - self.inner().query_balance(key_name) - } - - fn query_application_status(&self) -> Result { - self.inc_metric("query_application_status"); - self.inner().query_application_status() - } - - fn query_latest_height(&self) -> Result { - self.inc_metric("query_latest_height"); - self.inner().query_latest_height() - } - - fn query_clients( - &self, - request: QueryClientStatesRequest, - ) -> Result, Error> { - self.inc_metric("query_clients"); - self.inner().query_clients(request) - } - - fn query_client_state( - &self, - request: QueryClientStateRequest, - include_proof: IncludeProof, - ) -> Result<(AnyClientState, Option), Error> { - self.inc_metric(&format!( - "query_client_state({}, {})", - request.client_id, request.height - )); - self.inner().query_client_state(request, include_proof) - } - - fn query_client_connections( - &self, - request: QueryClientConnectionsRequest, - ) -> Result, Error> { - self.inc_metric("query_client_connections"); - self.inner().query_client_connections(request) - } - - fn query_consensus_states( - &self, - request: QueryConsensusStatesRequest, - ) -> Result, Error> { - self.inc_metric("query_consensus_states"); - self.inner().query_consensus_states(request) - } - - fn query_consensus_state( - &self, - request: QueryConsensusStateRequest, - include_proof: IncludeProof, - ) -> Result<(AnyConsensusState, Option), Error> { - self.inc_metric("query_consensus_state"); - self.inner().query_consensus_state(request, include_proof) - } - - fn query_upgraded_client_state( - &self, - request: QueryUpgradedClientStateRequest, - ) -> Result<(AnyClientState, MerkleProof), Error> { - self.inc_metric("query_upgraded_client_state"); - self.inner().query_upgraded_client_state(request) - } - - fn query_upgraded_consensus_state( - &self, - request: QueryUpgradedConsensusStateRequest, - ) -> Result<(AnyConsensusState, MerkleProof), Error> { - self.inc_metric("query_upgraded_consensus_state"); - self.inner().query_upgraded_consensus_state(request) - } - - fn query_commitment_prefix(&self) -> Result { - self.inc_metric("query_commitment_prefix"); - self.inner().query_commitment_prefix() - } - - fn query_compatible_versions(&self) -> Result, Error> { - self.inc_metric("query_compatible_versions"); - self.inner().query_compatible_versions() - } - - fn query_connection( - &self, - request: QueryConnectionRequest, - include_proof: IncludeProof, - ) -> Result<(ConnectionEnd, Option), Error> { - self.inc_metric("query_connection"); - self.inner().query_connection(request, include_proof) - } - - fn query_connections( - &self, - request: QueryConnectionsRequest, - ) -> Result, Error> { - self.inc_metric("query_connections"); - self.inner().query_connections(request) - } - - fn query_connection_channels( - &self, - request: QueryConnectionChannelsRequest, - ) -> Result, Error> { - self.inc_metric("query_connection_channels"); - self.inner().query_connection_channels(request) - } - - fn query_next_sequence_receive( - &self, - request: QueryNextSequenceReceiveRequest, - include_proof: IncludeProof, - ) -> Result<(Sequence, Option), Error> { - self.inc_metric("query_next_sequence_receive"); - self.inner() - .query_next_sequence_receive(request, include_proof) - } - - fn query_channels( - &self, - request: QueryChannelsRequest, - ) -> Result, Error> { - self.inc_metric("query_channels"); - self.inner().query_channels(request) - } - - fn query_channel( - &self, - request: QueryChannelRequest, - include_proof: IncludeProof, - ) -> Result<(ChannelEnd, Option), Error> { - self.inc_metric("query_channel"); - self.inner().query_channel(request, include_proof) - } - - fn query_channel_client_state( - &self, - request: QueryChannelClientStateRequest, - ) -> Result, Error> { - self.inc_metric("query_channel_client_state"); - self.inner().query_channel_client_state(request) - } - - fn build_header( - &self, - trusted_height: Height, - target_height: Height, - client_state: AnyClientState, - ) -> Result<(AnyHeader, Vec), Error> { - self.inc_metric("build_header"); - self.inner() - .build_header(trusted_height, target_height, client_state) - } - - /// Constructs a client state at the given height - fn build_client_state( - &self, - height: Height, - options: ClientSettings, - ) -> Result { - self.inc_metric("build_client_state"); - self.inner().build_client_state(height, options) - } - - /// Constructs a consensus state at the given height - fn build_consensus_state( - &self, - trusted: Height, - target: Height, - client_state: AnyClientState, - ) -> Result { - self.inc_metric("build_consensus_state"); - self.inner() - .build_consensus_state(trusted, target, client_state) - } - - fn check_misbehaviour( - &self, - update: UpdateClient, - client_state: AnyClientState, - ) -> Result, Error> { - self.inc_metric("check_misbehaviour"); - self.inner().check_misbehaviour(update, client_state) - } - - fn build_connection_proofs_and_client_state( - &self, - message_type: ConnectionMsgType, - connection_id: &ConnectionId, - client_id: &ClientId, - height: Height, - ) -> Result<(Option, Proofs), Error> { - self.inc_metric("build_connection_proofs_and_client_state"); - self.inner().build_connection_proofs_and_client_state( - message_type, - connection_id, - client_id, - height, - ) - } - - fn build_channel_proofs( - &self, - port_id: &PortId, - channel_id: &ChannelId, - height: Height, - ) -> Result { - self.inc_metric("build_channel_proofs"); - self.inner() - .build_channel_proofs(port_id, channel_id, height) - } - - fn build_packet_proofs( - &self, - packet_type: PacketMsgType, - port_id: &PortId, - channel_id: &ChannelId, - sequence: Sequence, - height: Height, - ) -> Result { - self.inc_metric("build_packet_proofs"); - self.inner() - .build_packet_proofs(packet_type, port_id, channel_id, sequence, height) - } - - fn query_packet_commitment( - &self, - request: QueryPacketCommitmentRequest, - include_proof: IncludeProof, - ) -> Result<(Vec, Option), Error> { - self.inc_metric("query_packet_commitment"); - self.inner().query_packet_commitment(request, include_proof) - } - - fn query_packet_commitments( - &self, - request: QueryPacketCommitmentsRequest, - ) -> Result<(Vec, Height), Error> { - self.inc_metric("query_packet_commitments"); - self.inner().query_packet_commitments(request) - } - - fn query_packet_receipt( - &self, - request: QueryPacketReceiptRequest, - include_proof: IncludeProof, - ) -> Result<(Vec, Option), Error> { - self.inc_metric("query_packet_receipt"); - self.inner().query_packet_receipt(request, include_proof) - } - - fn query_unreceived_packets( - &self, - request: QueryUnreceivedPacketsRequest, - ) -> Result, Error> { - self.inc_metric("query_unreceived_packets"); - self.inner().query_unreceived_packets(request) - } - - fn query_packet_acknowledgement( - &self, - request: QueryPacketAcknowledgementRequest, - include_proof: IncludeProof, - ) -> Result<(Vec, Option), Error> { - self.inc_metric("query_packet_acknowledgement"); - self.inner() - .query_packet_acknowledgement(request, include_proof) - } - - fn query_packet_acknowledgements( - &self, - request: QueryPacketAcknowledgementsRequest, - ) -> Result<(Vec, Height), Error> { - self.inc_metric("query_packet_acknowledgements"); - self.inner().query_packet_acknowledgements(request) - } - - fn query_unreceived_acknowledgements( - &self, - request: QueryUnreceivedAcksRequest, - ) -> Result, Error> { - self.inc_metric("query_unreceived_acknowledgement"); - self.inner().query_unreceived_acknowledgements(request) - } - - fn query_txs(&self, request: QueryTxRequest) -> Result, Error> { - self.inc_metric("query_txs"); - self.inner().query_txs(request) - } - - fn query_blocks( - &self, - request: QueryBlockRequest, - ) -> Result<(Vec, Vec), Error> { - self.inc_metric("query_blocks"); - self.inner().query_blocks(request) - } - - fn query_host_consensus_state( - &self, - request: QueryHostConsensusStateRequest, - ) -> Result { - self.inner.query_host_consensus_state(request) - } -} diff --git a/relayer/src/chain/mock.rs b/relayer/src/chain/mock.rs deleted file mode 100644 index a98461ef46..0000000000 --- a/relayer/src/chain/mock.rs +++ /dev/null @@ -1,484 +0,0 @@ -use alloc::sync::Arc; -use core::ops::Add; -use core::time::Duration; -use ibc::core::ics23_commitment::merkle::MerkleProof; - -use crossbeam_channel as channel; -use tendermint_testgen::light_block::TmLightBlock; -use tokio::runtime::Runtime; - -use ibc::clients::ics07_tendermint::client_state::{ - AllowUpdate, ClientState as TendermintClientState, -}; -use ibc::clients::ics07_tendermint::consensus_state::ConsensusState as TendermintConsensusState; -use ibc::clients::ics07_tendermint::header::Header as TendermintHeader; -use ibc::core::ics02_client::client_consensus::{AnyConsensusState, AnyConsensusStateWithHeight}; -use ibc::core::ics02_client::client_state::{AnyClientState, IdentifiedAnyClientState}; -use ibc::core::ics03_connection::connection::{ConnectionEnd, IdentifiedConnectionEnd}; -use ibc::core::ics04_channel::channel::{ChannelEnd, IdentifiedChannelEnd}; -use ibc::core::ics04_channel::context::ChannelReader; -use ibc::core::ics04_channel::packet::Sequence; -use ibc::core::ics23_commitment::{commitment::CommitmentPrefix, specs::ProofSpecs}; -use ibc::core::ics24_host::identifier::{ChainId, ConnectionId}; -use ibc::events::IbcEvent; -use ibc::mock::context::MockContext; -use ibc::mock::host::HostType; -use ibc::query::{QueryBlockRequest, QueryTxRequest}; -use ibc::relayer::ics18_relayer::context::Ics18Context; -use ibc::signer::Signer; -use ibc::test_utils::get_dummy_account_id; -use ibc::Height; - -use crate::account::Balance; -use crate::chain::client::ClientSettings; -use crate::chain::endpoint::{ChainEndpoint, ChainStatus, HealthCheck}; -use crate::chain::requests::{ - QueryChannelClientStateRequest, QueryChannelRequest, QueryClientStatesRequest, -}; -use crate::config::ChainConfig; -use crate::error::Error; -use crate::event::monitor::{EventReceiver, EventSender, TxMonitorCmd}; -use crate::keyring::{KeyEntry, KeyRing}; -use crate::light_client::Verified; -use crate::light_client::{mock::LightClient as MockLightClient, LightClient}; - -use super::requests::{ - IncludeProof, QueryChannelsRequest, QueryClientConnectionsRequest, QueryClientStateRequest, - QueryConnectionChannelsRequest, QueryConnectionRequest, QueryConnectionsRequest, - QueryConsensusStateRequest, QueryConsensusStatesRequest, QueryHostConsensusStateRequest, - QueryNextSequenceReceiveRequest, QueryPacketAcknowledgementRequest, - QueryPacketAcknowledgementsRequest, QueryPacketCommitmentRequest, - QueryPacketCommitmentsRequest, QueryPacketReceiptRequest, QueryUnreceivedAcksRequest, - QueryUnreceivedPacketsRequest, QueryUpgradedClientStateRequest, - QueryUpgradedConsensusStateRequest, -}; -use super::tracking::TrackedMsgs; - -/// The representation of a mocked chain as the relayer sees it. -/// The relayer runtime and the light client will engage with the MockChain to query/send tx; the -/// primary interface for doing so is captured by `ICS18Context` which this struct can access via -/// the `context` field. -pub struct MockChain { - config: ChainConfig, - context: MockContext, - - // keep a reference to event sender to prevent it from being dropped - _event_sender: EventSender, - event_receiver: EventReceiver, -} - -impl MockChain { - fn trusting_period(&self) -> Duration { - self.config - .trusting_period - .unwrap_or_else(|| Duration::from_secs(14 * 24 * 60 * 60)) // 14 days - } -} - -impl ChainEndpoint for MockChain { - type LightBlock = TmLightBlock; - type Header = TendermintHeader; - type ConsensusState = TendermintConsensusState; - type ClientState = TendermintClientState; - type LightClient = MockLightClient; - - fn bootstrap(config: ChainConfig, _rt: Arc) -> Result { - let (sender, receiver) = channel::unbounded(); - Ok(MockChain { - config: config.clone(), - context: MockContext::new( - config.id.clone(), - HostType::SyntheticTendermint, - 50, - Height::new(config.id.version(), 20), - ), - _event_sender: sender, - event_receiver: receiver, - }) - } - - fn init_light_client(&self) -> Result { - Ok(MockLightClient::new(self)) - } - - fn init_event_monitor( - &self, - _rt: Arc, - ) -> Result<(EventReceiver, TxMonitorCmd), Error> { - let (tx, _) = crossbeam_channel::unbounded(); - Ok((self.event_receiver.clone(), tx)) - } - - fn id(&self) -> &ChainId { - &self.config.id - } - - fn health_check(&self) -> Result { - Ok(HealthCheck::Healthy) - } - - fn shutdown(self) -> Result<(), Error> { - Ok(()) - } - - fn keybase(&self) -> &KeyRing { - unimplemented!() - } - - fn keybase_mut(&mut self) -> &mut KeyRing { - unimplemented!() - } - - fn send_messages_and_wait_commit( - &mut self, - tracked_msgs: TrackedMsgs, - ) -> Result, Error> { - // Use the ICS18Context interface to submit the set of messages. - let events = self.context.send(tracked_msgs.msgs).map_err(Error::ics18)?; - - Ok(events) - } - - fn send_messages_and_wait_check_tx( - &mut self, - _tracked_msgs: TrackedMsgs, - ) -> Result, Error> { - todo!() - } - - fn get_signer(&mut self) -> Result { - Ok(get_dummy_account_id()) - } - - fn config(&self) -> ChainConfig { - self.config.clone() - } - - fn get_key(&mut self) -> Result { - unimplemented!() - } - - fn add_key(&mut self, _key_name: &str, _key: KeyEntry) -> Result<(), Error> { - unimplemented!() - } - - fn ibc_version(&self) -> Result, Error> { - Ok(Some(semver::Version::new(3, 0, 0))) - } - - fn query_balance(&self, _key_name: Option) -> Result { - unimplemented!() - } - - fn query_commitment_prefix(&self) -> Result { - unimplemented!() - } - - fn query_application_status(&self) -> Result { - Ok(ChainStatus { - height: self.context.host_height(), - timestamp: self.context.host_timestamp(), - }) - } - - fn query_clients( - &self, - _request: QueryClientStatesRequest, - ) -> Result, Error> { - unimplemented!() - } - - fn query_client_state( - &self, - request: QueryClientStateRequest, - _include_proof: IncludeProof, - ) -> Result<(AnyClientState, Option), Error> { - // TODO: unclear what are the scenarios where we need to take height into account. - let client_state = self - .context - .query_client_full_state(&request.client_id) - .ok_or_else(Error::empty_response_value)?; - - Ok((client_state, None)) - } - - fn query_upgraded_client_state( - &self, - _request: QueryUpgradedClientStateRequest, - ) -> Result<(AnyClientState, MerkleProof), Error> { - unimplemented!() - } - - fn query_connection( - &self, - _request: QueryConnectionRequest, - _include_proof: IncludeProof, - ) -> Result<(ConnectionEnd, Option), Error> { - unimplemented!() - } - - fn query_client_connections( - &self, - _request: QueryClientConnectionsRequest, - ) -> Result, Error> { - unimplemented!() - } - - fn query_connections( - &self, - _request: QueryConnectionsRequest, - ) -> Result, Error> { - unimplemented!() - } - - fn query_connection_channels( - &self, - _request: QueryConnectionChannelsRequest, - ) -> Result, Error> { - unimplemented!() - } - - fn query_channels( - &self, - _request: QueryChannelsRequest, - ) -> Result, Error> { - unimplemented!() - } - - fn query_channel( - &self, - _request: QueryChannelRequest, - _include_proof: IncludeProof, - ) -> Result<(ChannelEnd, Option), Error> { - unimplemented!() - } - - fn query_channel_client_state( - &self, - _request: QueryChannelClientStateRequest, - ) -> Result, Error> { - unimplemented!() - } - - fn query_packet_commitment( - &self, - _request: QueryPacketCommitmentRequest, - _include_proof: IncludeProof, - ) -> Result<(Vec, Option), Error> { - unimplemented!() - } - - fn query_packet_commitments( - &self, - _request: QueryPacketCommitmentsRequest, - ) -> Result<(Vec, Height), Error> { - unimplemented!() - } - - fn query_packet_receipt( - &self, - _request: QueryPacketReceiptRequest, - _include_proof: IncludeProof, - ) -> Result<(Vec, Option), Error> { - unimplemented!() - } - - fn query_unreceived_packets( - &self, - _request: QueryUnreceivedPacketsRequest, - ) -> Result, Error> { - unimplemented!() - } - - fn query_packet_acknowledgement( - &self, - _request: QueryPacketAcknowledgementRequest, - _include_proof: IncludeProof, - ) -> Result<(Vec, Option), Error> { - unimplemented!() - } - - fn query_packet_acknowledgements( - &self, - _request: QueryPacketAcknowledgementsRequest, - ) -> Result<(Vec, Height), Error> { - unimplemented!() - } - - fn query_unreceived_acknowledgements( - &self, - _request: QueryUnreceivedAcksRequest, - ) -> Result, Error> { - unimplemented!() - } - - fn query_next_sequence_receive( - &self, - _request: QueryNextSequenceReceiveRequest, - _include_proof: IncludeProof, - ) -> Result<(Sequence, Option), Error> { - unimplemented!() - } - - fn query_txs(&self, _request: QueryTxRequest) -> Result, Error> { - unimplemented!() - } - - fn query_blocks( - &self, - _request: QueryBlockRequest, - ) -> Result<(Vec, Vec), Error> { - unimplemented!() - } - - fn query_host_consensus_state( - &self, - _request: QueryHostConsensusStateRequest, - ) -> Result { - unimplemented!() - } - - fn build_client_state( - &self, - height: Height, - settings: ClientSettings, - ) -> Result { - let ClientSettings::Tendermint(settings) = settings; - let trusting_period = settings - .trusting_period - .unwrap_or_else(|| self.trusting_period()); - - let client_state = TendermintClientState::new( - self.id().clone(), - settings.trust_threshold, - trusting_period, - self.trusting_period().add(Duration::from_secs(1000)), - settings.max_clock_drift, - height, - ProofSpecs::default(), - vec!["upgrade/upgradedClient".to_string()], - AllowUpdate { - after_expiry: false, - after_misbehaviour: false, - }, - ) - .map_err(Error::ics07)?; - - Ok(client_state) - } - - fn build_consensus_state( - &self, - light_block: Self::LightBlock, - ) -> Result { - Ok(Self::ConsensusState::from(light_block.signed_header.header)) - } - - fn build_header( - &self, - trusted_height: Height, - target_height: Height, - client_state: &AnyClientState, - light_client: &mut Self::LightClient, - ) -> Result<(Self::Header, Vec), Error> { - let succ_trusted = light_client.fetch(trusted_height.increment())?; - - let Verified { target, supporting } = - light_client.verify(trusted_height, target_height, client_state)?; - - let target_header = Self::Header { - signed_header: target.signed_header, - validator_set: target.validators, - trusted_height, - trusted_validator_set: succ_trusted.validators.clone(), - }; - - let supporting_headers = supporting - .into_iter() - .map(|h| Self::Header { - signed_header: h.signed_header, - validator_set: h.validators, - trusted_height, - trusted_validator_set: succ_trusted.validators.clone(), - }) - .collect(); - - Ok((target_header, supporting_headers)) - } - - fn query_consensus_states( - &self, - request: QueryConsensusStatesRequest, - ) -> Result, Error> { - Ok(self.context.consensus_states(&request.client_id)) - } - - fn query_consensus_state( - &self, - request: QueryConsensusStateRequest, - include_proof: IncludeProof, - ) -> Result<(AnyConsensusState, Option), Error> { - // IncludeProof::Yes not implemented - assert!(matches!(include_proof, IncludeProof::No)); - - let consensus_states = self.context.consensus_states(&request.client_id); - let consensus_state = consensus_states - .into_iter() - .find(|s| s.height == request.consensus_height) - .ok_or_else(|| Error::query("Invalid consensus height".into()))? - .consensus_state; - Ok((consensus_state, None)) - } - - fn query_upgraded_consensus_state( - &self, - _request: QueryUpgradedConsensusStateRequest, - ) -> Result<(AnyConsensusState, MerkleProof), Error> { - unimplemented!() - } -} - -// For integration tests with the modules -#[cfg(test)] -pub mod test_utils { - use core::str::FromStr; - use core::time::Duration; - - use ibc::core::ics24_host::identifier::ChainId; - - use crate::{ - chain::ChainType, - config::{AddressType, ChainConfig, GasPrice, PacketFilter}, - }; - - /// Returns a very minimal chain configuration, to be used in initializing `MockChain`s. - pub fn get_basic_chain_config(id: &str) -> ChainConfig { - ChainConfig { - id: ChainId::from_str(id).unwrap(), - r#type: ChainType::Mock, - rpc_addr: "http://127.0.0.1:26656".parse().unwrap(), - grpc_addr: "http://127.0.0.1:9090".parse().unwrap(), - websocket_addr: "ws://127.0.0.1:26656/websocket".parse().unwrap(), - rpc_timeout: crate::config::default::rpc_timeout(), - account_prefix: "".to_string(), - key_name: "".to_string(), - store_prefix: "".to_string(), - default_gas: None, - key_store_type: Default::default(), - max_gas: None, - gas_price: GasPrice::new(0.001, "uatom".to_string()), - gas_adjustment: None, - fee_granter: None, - max_msg_num: Default::default(), - max_tx_size: Default::default(), - clock_drift: Duration::from_secs(5), - max_block_time: Duration::from_secs(10), - trusting_period: Some(Duration::from_secs(14 * 24 * 60 * 60)), // 14 days - trust_threshold: Default::default(), - packet_filter: PacketFilter::default(), - address_type: AddressType::default(), - memo_prefix: Default::default(), - proof_specs: Default::default(), - } - } -} diff --git a/relayer/src/chain/requests.rs b/relayer/src/chain/requests.rs deleted file mode 100644 index 027df4e3d4..0000000000 --- a/relayer/src/chain/requests.rs +++ /dev/null @@ -1,335 +0,0 @@ -use ibc::core::ics04_channel::packet::Sequence; -use ibc::core::ics24_host::identifier::{ChannelId, ClientId, ConnectionId, PortId}; -use ibc::Height; -use ibc_proto::cosmos::base::query::v1beta1::PageRequest as RawPageRequest; -use ibc_proto::ibc::core::channel::v1::{ - QueryChannelClientStateRequest as RawQueryChannelClientStateRequest, - QueryChannelsRequest as RawQueryChannelsRequest, - QueryConnectionChannelsRequest as RawQueryConnectionChannelsRequest, - QueryNextSequenceReceiveRequest as RawQueryNextSequenceReceiveRequest, - QueryPacketAcknowledgementsRequest as RawQueryPacketAcknowledgementsRequest, - QueryPacketCommitmentsRequest as RawQueryPacketCommitmentsRequest, - QueryUnreceivedAcksRequest as RawQueryUnreceivedAcksRequest, - QueryUnreceivedPacketsRequest as RawQueryUnreceivedPacketsRequest, -}; -use ibc_proto::ibc::core::client::v1::{ - QueryClientStatesRequest as RawQueryClientStatesRequest, - QueryConsensusStatesRequest as RawQueryConsensusStatesRequest, -}; -use ibc_proto::ibc::core::connection::v1::{ - QueryClientConnectionsRequest as RawQueryClientConnectionsRequest, - QueryConnectionsRequest as RawQueryConnectionsRequest, -}; - -use serde::{Deserialize, Serialize}; - -/// Defines a type to be used in select requests to specify whether or not a proof should be -/// returned along with the response. -#[derive(Clone, Copy, Debug, Serialize, Deserialize)] -pub enum IncludeProof { - Yes, - No, -} - -#[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize)] -pub struct PageRequest { - /// key is a value returned in PageResponse.next_key to begin - /// querying the next page most efficiently. Only one of offset or key - /// should be set. - pub key: ::prost::alloc::vec::Vec, - /// offset is a numeric offset that can be used when key is unavailable. - /// It is less efficient than using key. Only one of offset or key should - /// be set. - pub offset: u64, - /// limit is the total number of results to be returned in the result page. - /// If left empty it will default to a value to be set by each app. - pub limit: u64, - /// count_total is set to true to indicate that the result set should include - /// a count of the total number of items available for pagination in UIs. - /// count_total is only respected when offset is used. It is ignored when key - /// is set. - pub count_total: bool, - /// reverse is set to true if results are to be returned in the descending order. - pub reverse: bool, -} - -impl PageRequest { - pub fn all() -> PageRequest { - PageRequest { - limit: u64::MAX, - ..Default::default() - } - } -} - -impl From for RawPageRequest { - fn from(request: PageRequest) -> Self { - RawPageRequest { - key: request.key, - offset: request.offset, - limit: request.limit, - count_total: request.count_total, - reverse: request.reverse, - } - } -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct QueryClientStateRequest { - pub client_id: ClientId, - pub height: Height, -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct QueryClientStatesRequest { - pub pagination: Option, -} - -impl From for RawQueryClientStatesRequest { - fn from(request: QueryClientStatesRequest) -> Self { - RawQueryClientStatesRequest { - pagination: request.pagination.map(|pagination| pagination.into()), - } - } -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct QueryConsensusStateRequest { - pub client_id: ClientId, - pub consensus_height: Height, - pub query_height: Height, -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct QueryUpgradedClientStateRequest { - pub height: Height, -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct QueryUpgradedConsensusStateRequest { - pub height: Height, -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct QueryConsensusStatesRequest { - pub client_id: ClientId, - pub pagination: Option, -} - -impl From for RawQueryConsensusStatesRequest { - fn from(request: QueryConsensusStatesRequest) -> Self { - RawQueryConsensusStatesRequest { - client_id: request.client_id.to_string(), - pagination: request.pagination.map(|pagination| pagination.into()), - } - } -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct QueryConnectionsRequest { - pub pagination: Option, -} - -impl From for RawQueryConnectionsRequest { - fn from(request: QueryConnectionsRequest) -> Self { - RawQueryConnectionsRequest { - pagination: request.pagination.map(|pagination| pagination.into()), - } - } -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct QueryClientConnectionsRequest { - pub client_id: ClientId, -} - -impl From for RawQueryClientConnectionsRequest { - fn from(request: QueryClientConnectionsRequest) -> Self { - RawQueryClientConnectionsRequest { - client_id: request.client_id.to_string(), - } - } -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct QueryConnectionRequest { - pub connection_id: ConnectionId, - pub height: Height, -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct QueryConnectionChannelsRequest { - pub connection_id: ConnectionId, - pub pagination: Option, -} - -impl From for RawQueryConnectionChannelsRequest { - fn from(request: QueryConnectionChannelsRequest) -> Self { - RawQueryConnectionChannelsRequest { - connection: request.connection_id.to_string(), - pagination: request.pagination.map(|pagination| pagination.into()), - } - } -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct QueryChannelsRequest { - pub pagination: Option, -} - -impl From for RawQueryChannelsRequest { - fn from(request: QueryChannelsRequest) -> Self { - RawQueryChannelsRequest { - pagination: request.pagination.map(|pagination| pagination.into()), - } - } -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct QueryChannelRequest { - pub port_id: PortId, - pub channel_id: ChannelId, - pub height: Height, -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct QueryChannelClientStateRequest { - pub port_id: PortId, - pub channel_id: ChannelId, -} - -impl From for RawQueryChannelClientStateRequest { - fn from(request: QueryChannelClientStateRequest) -> Self { - RawQueryChannelClientStateRequest { - port_id: request.port_id.to_string(), - channel_id: request.channel_id.to_string(), - } - } -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct QueryPacketCommitmentRequest { - pub port_id: PortId, - pub channel_id: ChannelId, - pub sequence: Sequence, - pub height: Height, -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct QueryPacketCommitmentsRequest { - pub port_id: PortId, - pub channel_id: ChannelId, - pub pagination: Option, -} - -impl From for RawQueryPacketCommitmentsRequest { - fn from(request: QueryPacketCommitmentsRequest) -> Self { - RawQueryPacketCommitmentsRequest { - port_id: request.port_id.to_string(), - channel_id: request.channel_id.to_string(), - pagination: request.pagination.map(|pagination| pagination.into()), - } - } -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct QueryPacketReceiptRequest { - pub port_id: PortId, - pub channel_id: ChannelId, - pub sequence: Sequence, - pub height: Height, -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct QueryUnreceivedPacketsRequest { - pub port_id: PortId, - pub channel_id: ChannelId, - pub packet_commitment_sequences: Vec, -} - -impl From for RawQueryUnreceivedPacketsRequest { - fn from(request: QueryUnreceivedPacketsRequest) -> Self { - RawQueryUnreceivedPacketsRequest { - port_id: request.port_id.to_string(), - channel_id: request.channel_id.to_string(), - packet_commitment_sequences: request - .packet_commitment_sequences - .into_iter() - .map(|seq| seq.into()) - .collect(), - } - } -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct QueryPacketAcknowledgementRequest { - pub port_id: PortId, - pub channel_id: ChannelId, - pub sequence: Sequence, - pub height: Height, -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct QueryPacketAcknowledgementsRequest { - pub port_id: PortId, - pub channel_id: ChannelId, - pub pagination: Option, - pub packet_commitment_sequences: Vec, -} - -impl From for RawQueryPacketAcknowledgementsRequest { - fn from(request: QueryPacketAcknowledgementsRequest) -> Self { - RawQueryPacketAcknowledgementsRequest { - port_id: request.port_id.to_string(), - channel_id: request.channel_id.to_string(), - pagination: request.pagination.map(|pagination| pagination.into()), - packet_commitment_sequences: request - .packet_commitment_sequences - .into_iter() - .map(|seq| seq.into()) - .collect(), - } - } -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct QueryUnreceivedAcksRequest { - pub port_id: PortId, - pub channel_id: ChannelId, - pub packet_ack_sequences: Vec, -} - -impl From for RawQueryUnreceivedAcksRequest { - fn from(request: QueryUnreceivedAcksRequest) -> Self { - RawQueryUnreceivedAcksRequest { - port_id: request.port_id.to_string(), - channel_id: request.channel_id.to_string(), - packet_ack_sequences: request - .packet_ack_sequences - .into_iter() - .map(|seq| seq.into()) - .collect(), - } - } -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct QueryNextSequenceReceiveRequest { - pub port_id: PortId, - pub channel_id: ChannelId, - pub height: Height, -} - -impl From for RawQueryNextSequenceReceiveRequest { - fn from(request: QueryNextSequenceReceiveRequest) -> Self { - RawQueryNextSequenceReceiveRequest { - port_id: request.port_id.to_string(), - channel_id: request.channel_id.to_string(), - } - } -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct QueryHostConsensusStateRequest { - pub height: Height, -} diff --git a/relayer/src/chain/runtime.rs b/relayer/src/chain/runtime.rs deleted file mode 100644 index 7a721ab03b..0000000000 --- a/relayer/src/chain/runtime.rs +++ /dev/null @@ -1,897 +0,0 @@ -use alloc::sync::Arc; -use std::thread; - -use crossbeam_channel as channel; -use tokio::runtime::Runtime as TokioRuntime; -use tracing::error; - -use ibc::{ - core::{ - ics02_client::{ - client_consensus::{AnyConsensusState, AnyConsensusStateWithHeight, ConsensusState}, - client_state::{AnyClientState, ClientState, IdentifiedAnyClientState}, - events::UpdateClient, - header::{AnyHeader, Header}, - misbehaviour::MisbehaviourEvidence, - }, - ics03_connection::{ - connection::{ConnectionEnd, IdentifiedConnectionEnd}, - version::Version, - }, - ics04_channel::{ - channel::{ChannelEnd, IdentifiedChannelEnd}, - packet::{PacketMsgType, Sequence}, - }, - ics23_commitment::{commitment::CommitmentPrefix, merkle::MerkleProof}, - ics24_host::identifier::{ChannelId, ClientId, ConnectionId, PortId}, - }, - events::IbcEvent, - proofs::Proofs, - query::{QueryBlockRequest, QueryTxRequest}, - signer::Signer, - Height, -}; - -use crate::{ - account::Balance, - config::ChainConfig, - connection::ConnectionMsgType, - error::Error, - event::{ - bus::EventBus, - monitor::{EventBatch, EventReceiver, MonitorCmd, Result as MonitorResult, TxMonitorCmd}, - }, - keyring::KeyEntry, - light_client::LightClient, -}; - -use super::{ - client::ClientSettings, - endpoint::{ChainEndpoint, ChainStatus, HealthCheck}, - handle::{ChainHandle, ChainRequest, ReplyTo, Subscription}, - requests::{ - IncludeProof, QueryChannelClientStateRequest, QueryChannelRequest, QueryChannelsRequest, - QueryClientConnectionsRequest, QueryClientStateRequest, QueryClientStatesRequest, - QueryConnectionChannelsRequest, QueryConnectionRequest, QueryConnectionsRequest, - QueryConsensusStateRequest, QueryConsensusStatesRequest, QueryHostConsensusStateRequest, - QueryNextSequenceReceiveRequest, QueryPacketAcknowledgementRequest, - QueryPacketAcknowledgementsRequest, QueryPacketCommitmentRequest, - QueryPacketCommitmentsRequest, QueryPacketReceiptRequest, QueryUnreceivedAcksRequest, - QueryUnreceivedPacketsRequest, QueryUpgradedClientStateRequest, - QueryUpgradedConsensusStateRequest, - }, - tracking::TrackedMsgs, -}; - -pub struct Threads { - pub chain_runtime: thread::JoinHandle<()>, - pub event_monitor: Option>, -} - -#[derive(Debug)] -pub enum EventMonitorCtrl { - None { - /// Empty channel for when the None case - never: EventReceiver, - }, - Live { - /// Receiver channel from the event bus - event_receiver: EventReceiver, - - /// Sender channel to terminate the event monitor - tx_monitor_cmd: TxMonitorCmd, - }, -} - -impl EventMonitorCtrl { - pub fn none() -> Self { - Self::None { - never: channel::never(), - } - } - - pub fn live(event_receiver: EventReceiver, tx_monitor_cmd: TxMonitorCmd) -> Self { - Self::Live { - event_receiver, - tx_monitor_cmd, - } - } - - pub fn enable(&mut self, event_receiver: EventReceiver, tx_monitor_cmd: TxMonitorCmd) { - *self = Self::live(event_receiver, tx_monitor_cmd); - } - - pub fn recv(&self) -> &EventReceiver { - match self { - Self::None { ref never } => never, - Self::Live { - ref event_receiver, .. - } => event_receiver, - } - } - - pub fn shutdown(&self) -> Result<(), Error> { - match self { - Self::None { .. } => Ok(()), - Self::Live { - ref tx_monitor_cmd, .. - } => tx_monitor_cmd - .send(MonitorCmd::Shutdown) - .map_err(Error::send), - } - } - - pub fn is_live(&self) -> bool { - matches!(self, Self::Live { .. }) - } -} - -pub struct ChainRuntime { - /// The specific chain this runtime runs against - chain: Endpoint, - - /// The sender side of a channel to this runtime. Any `ChainHandle` can use this to send - /// chain requests to this runtime - request_sender: channel::Sender, - - /// The receiving side of a channel to this runtime. The runtime consumes chain requests coming - /// in through this channel. - request_receiver: channel::Receiver, - - /// An event bus, for broadcasting events that this runtime receives (via `event_receiver`) to subscribers - event_bus: EventBus>>, - - /// Interface to the event monitor - event_monitor_ctrl: EventMonitorCtrl, - - /// A handle to the light client - light_client: Endpoint::LightClient, - - #[allow(dead_code)] - rt: Arc, // Making this future-proof, so we keep the runtime around. -} - -impl ChainRuntime -where - Endpoint: ChainEndpoint + Send + 'static, -{ - /// Spawns a new runtime for a specific Chain implementation. - pub fn spawn( - config: ChainConfig, - rt: Arc, - ) -> Result { - // Similar to `from_config`. - let chain = Endpoint::bootstrap(config, rt.clone())?; - - // Start the light client - let light_client = chain.init_light_client()?; - - // Instantiate & spawn the runtime - let (handle, _) = Self::init(chain, light_client, rt); - - Ok(handle) - } - - /// Initializes a runtime for a given chain, and spawns the associated thread - fn init( - chain: Endpoint, - light_client: Endpoint::LightClient, - rt: Arc, - ) -> (Handle, thread::JoinHandle<()>) { - let chain_runtime = Self::new(chain, light_client, rt); - - // Get a handle to the runtime - let handle: Handle = chain_runtime.handle(); - - // Spawn the runtime & return - let id = handle.id(); - let thread = thread::spawn(move || { - if let Err(e) = chain_runtime.run() { - error!("failed to start runtime for chain '{}': {}", id, e); - } - }); - - (handle, thread) - } - - /// Basic constructor - fn new(chain: Endpoint, light_client: Endpoint::LightClient, rt: Arc) -> Self { - let (request_sender, request_receiver) = channel::unbounded::(); - - Self { - rt, - chain, - request_sender, - request_receiver, - event_bus: EventBus::new(), - event_monitor_ctrl: EventMonitorCtrl::none(), - light_client, - } - } - - pub fn handle(&self) -> Handle { - let chain_id = ChainEndpoint::id(&self.chain).clone(); - let sender = self.request_sender.clone(); - - Handle::new(chain_id, sender) - } - - fn run(mut self) -> Result<(), Error> { - loop { - channel::select! { - recv(self.event_monitor_ctrl.recv()) -> event_batch => { - match event_batch { - Ok(event_batch) => { - self.event_bus - .broadcast(Arc::new(event_batch)); - }, - Err(e) => { - error!("received error via event bus: {}", e); - return Err(Error::channel_receive(e)); - }, - } - }, - recv(self.request_receiver) -> event => { - match event { - Ok(ChainRequest::Shutdown { reply_to }) => { - self.event_monitor_ctrl.shutdown()?; - - let res = self.chain.shutdown(); - reply_to.send(res) - .map_err(Error::send)?; - - break; - } - - Ok(ChainRequest::HealthCheck { reply_to }) => { - self.health_check(reply_to)? - }, - - Ok(ChainRequest::Subscribe { reply_to }) => { - self.subscribe(reply_to)? - }, - - Ok(ChainRequest::SendMessagesAndWaitCommit { tracked_msgs, reply_to }) => { - self.send_messages_and_wait_commit(tracked_msgs, reply_to)? - }, - - Ok(ChainRequest::SendMessagesAndWaitCheckTx { tracked_msgs, reply_to }) => { - self.send_messages_and_wait_check_tx(tracked_msgs, reply_to)? - }, - - Ok(ChainRequest::Signer { reply_to }) => { - self.get_signer(reply_to)? - } - - Ok(ChainRequest::Config { reply_to }) => { - self.get_config(reply_to)? - } - - Ok(ChainRequest::GetKey { reply_to }) => { - self.get_key(reply_to)? - } - - Ok(ChainRequest::AddKey { key_name, key, reply_to }) => { - self.add_key(key_name, key, reply_to)? - } - - Ok(ChainRequest::IbcVersion { reply_to }) => { - self.ibc_version(reply_to)? - } - - Ok(ChainRequest::BuildHeader { trusted_height, target_height, client_state, reply_to }) => { - self.build_header(trusted_height, target_height, client_state, reply_to)? - } - - Ok(ChainRequest::BuildClientState { height, settings, reply_to }) => { - self.build_client_state(height, settings, reply_to)? - } - - Ok(ChainRequest::BuildConsensusState { trusted, target, client_state, reply_to }) => { - self.build_consensus_state(trusted, target, client_state, reply_to)? - } - - Ok(ChainRequest::BuildMisbehaviour { client_state, update_event, reply_to }) => { - self.check_misbehaviour(update_event, client_state, reply_to)? - } - - Ok(ChainRequest::BuildConnectionProofsAndClientState { message_type, connection_id, client_id, height, reply_to }) => { - self.build_connection_proofs_and_client_state(message_type, connection_id, client_id, height, reply_to)? - }, - - Ok(ChainRequest::BuildChannelProofs { port_id, channel_id, height, reply_to }) => { - self.build_channel_proofs(port_id, channel_id, height, reply_to)? - }, - - Ok(ChainRequest::QueryBalance { key_name, reply_to }) => { - self.query_balance(key_name, reply_to)? - } - - Ok(ChainRequest::QueryApplicationStatus { reply_to }) => { - self.query_application_status(reply_to)? - } - - Ok(ChainRequest::QueryClients { request, reply_to }) => { - self.query_clients(request, reply_to)? - }, - - Ok(ChainRequest::QueryClientConnections { request, reply_to }) => { - self.query_client_connections(request, reply_to)? - }, - - Ok(ChainRequest::QueryClientState { request, include_proof, reply_to }) => { - self.query_client_state(request, include_proof, reply_to)? - }, - - Ok(ChainRequest::QueryConsensusStates { request, reply_to }) => { - self.query_consensus_states(request, reply_to)? - }, - - Ok(ChainRequest::QueryConsensusState { request, include_proof, reply_to }) => { - self.query_consensus_state(request, include_proof, reply_to)? - }, - - Ok(ChainRequest::QueryUpgradedClientState { request, reply_to }) => { - self.query_upgraded_client_state(request, reply_to)? - } - - Ok(ChainRequest::QueryUpgradedConsensusState { request, reply_to }) => { - self.query_upgraded_consensus_state(request, reply_to)? - } - - Ok(ChainRequest::QueryCommitmentPrefix { reply_to }) => { - self.query_commitment_prefix(reply_to)? - }, - - Ok(ChainRequest::QueryCompatibleVersions { reply_to }) => { - self.query_compatible_versions(reply_to)? - }, - - Ok(ChainRequest::QueryConnection { request, include_proof, reply_to }) => { - self.query_connection(request, include_proof, reply_to)? - }, - - Ok(ChainRequest::QueryConnections { request, reply_to }) => { - self.query_connections(request, reply_to)? - }, - - Ok(ChainRequest::QueryConnectionChannels { request, reply_to }) => { - self.query_connection_channels(request, reply_to)? - }, - - Ok(ChainRequest::QueryChannels { request, reply_to }) => { - self.query_channels(request, reply_to)? - }, - - Ok(ChainRequest::QueryChannel { request, include_proof, reply_to }) => { - self.query_channel(request, include_proof, reply_to)? - }, - - Ok(ChainRequest::QueryChannelClientState { request, reply_to }) => { - self.query_channel_client_state(request, reply_to)? - }, - - Ok(ChainRequest::BuildPacketProofs { packet_type, port_id, channel_id, sequence, height, reply_to }) => { - self.build_packet_proofs(packet_type, port_id, channel_id, sequence, height, reply_to)? - }, - - Ok(ChainRequest::QueryPacketCommitment { request, include_proof, reply_to }) => { - self.query_packet_commitment(request, include_proof, reply_to)? - }, - - Ok(ChainRequest::QueryPacketCommitments { request, reply_to }) => { - self.query_packet_commitments(request, reply_to)? - }, - - Ok(ChainRequest::QueryPacketReceipt { request, include_proof, reply_to }) => { - self.query_packet_receipt(request, include_proof, reply_to)? - }, - - Ok(ChainRequest::QueryUnreceivedPackets { request, reply_to }) => { - self.query_unreceived_packets(request, reply_to)? - }, - - Ok(ChainRequest::QueryPacketAcknowledgement { request, include_proof, reply_to }) => { - self.query_packet_acknowledgement(request, include_proof, reply_to)? - }, - - Ok(ChainRequest::QueryPacketAcknowledgements { request, reply_to }) => { - self.query_packet_acknowledgements(request, reply_to)? - }, - - Ok(ChainRequest::QueryUnreceivedAcknowledgement { request, reply_to }) => { - self.query_unreceived_acknowledgement(request, reply_to)? - }, - - Ok(ChainRequest::QueryNextSequenceReceive { request, include_proof, reply_to }) => { - self.query_next_sequence_receive(request, include_proof, reply_to)? - }, - - Ok(ChainRequest::QueryPacketEventDataFromTxs { request, reply_to }) => { - self.query_txs(request, reply_to)? - }, - - Ok(ChainRequest::QueryPacketEventDataFromBlocks { request, reply_to }) => { - self.query_blocks(request, reply_to)? - }, - - Ok(ChainRequest::QueryHostConsensusState { request, reply_to }) => { - self.query_host_consensus_state(request, reply_to)? - }, - - Err(e) => error!("received error via chain request channel: {}", e), - } - }, - } - } - - Ok(()) - } - - fn health_check(&mut self, reply_to: ReplyTo) -> Result<(), Error> { - let result = self.chain.health_check(); - reply_to.send(result).map_err(Error::send) - } - - fn subscribe(&mut self, reply_to: ReplyTo) -> Result<(), Error> { - if !self.event_monitor_ctrl.is_live() { - self.enable_event_monitor()?; - } - - let subscription = self.event_bus.subscribe(); - reply_to.send(Ok(subscription)).map_err(Error::send) - } - - fn enable_event_monitor(&mut self) -> Result<(), Error> { - let (event_receiver, tx_monitor_cmd) = self.chain.init_event_monitor(self.rt.clone())?; - - self.event_monitor_ctrl - .enable(event_receiver, tx_monitor_cmd); - - Ok(()) - } - - fn send_messages_and_wait_commit( - &mut self, - tracked_msgs: TrackedMsgs, - reply_to: ReplyTo>, - ) -> Result<(), Error> { - let result = self.chain.send_messages_and_wait_commit(tracked_msgs); - reply_to.send(result).map_err(Error::send) - } - - fn send_messages_and_wait_check_tx( - &mut self, - tracked_msgs: TrackedMsgs, - reply_to: ReplyTo>, - ) -> Result<(), Error> { - let result = self.chain.send_messages_and_wait_check_tx(tracked_msgs); - reply_to.send(result).map_err(Error::send) - } - - fn query_balance( - &self, - key_name: Option, - reply_to: ReplyTo, - ) -> Result<(), Error> { - let balance = self.chain.query_balance(key_name); - reply_to.send(balance).map_err(Error::send) - } - - fn query_application_status(&self, reply_to: ReplyTo) -> Result<(), Error> { - let latest_timestamp = self.chain.query_application_status(); - reply_to.send(latest_timestamp).map_err(Error::send) - } - - fn get_signer(&mut self, reply_to: ReplyTo) -> Result<(), Error> { - let result = self.chain.get_signer(); - reply_to.send(result).map_err(Error::send) - } - - fn get_config(&self, reply_to: ReplyTo) -> Result<(), Error> { - let result = Ok(self.chain.config()); - reply_to.send(result).map_err(Error::send) - } - - fn get_key(&mut self, reply_to: ReplyTo) -> Result<(), Error> { - let result = self.chain.get_key(); - reply_to.send(result).map_err(Error::send) - } - - fn add_key( - &mut self, - key_name: String, - key: KeyEntry, - reply_to: ReplyTo<()>, - ) -> Result<(), Error> { - let result = self.chain.add_key(&key_name, key); - reply_to.send(result).map_err(Error::send) - } - - fn ibc_version(&mut self, reply_to: ReplyTo>) -> Result<(), Error> { - let result = self.chain.ibc_version(); - reply_to.send(result).map_err(Error::send) - } - - fn build_header( - &mut self, - trusted_height: Height, - target_height: Height, - client_state: AnyClientState, - reply_to: ReplyTo<(AnyHeader, Vec)>, - ) -> Result<(), Error> { - let result = self - .chain - .build_header( - trusted_height, - target_height, - &client_state, - &mut self.light_client, - ) - .map(|(header, support)| { - let header = header.wrap_any(); - let support = support.into_iter().map(|h| h.wrap_any()).collect(); - (header, support) - }); - - reply_to.send(result).map_err(Error::send) - } - - /// Constructs a client state for the given height - fn build_client_state( - &self, - height: Height, - settings: ClientSettings, - reply_to: ReplyTo, - ) -> Result<(), Error> { - let client_state = self - .chain - .build_client_state(height, settings) - .map(|cs| cs.wrap_any()); - - reply_to.send(client_state).map_err(Error::send) - } - - /// Constructs a consensus state for the given height - fn build_consensus_state( - &mut self, - trusted: Height, - target: Height, - client_state: AnyClientState, - reply_to: ReplyTo, - ) -> Result<(), Error> { - let verified = self.light_client.verify(trusted, target, &client_state)?; - - let consensus_state = self - .chain - .build_consensus_state(verified.target) - .map(|cs| cs.wrap_any()); - - reply_to.send(consensus_state).map_err(Error::send) - } - - /// Constructs AnyMisbehaviour for the update event - fn check_misbehaviour( - &mut self, - update_event: UpdateClient, - client_state: AnyClientState, - reply_to: ReplyTo>, - ) -> Result<(), Error> { - let misbehaviour = self - .light_client - .check_misbehaviour(update_event, &client_state); - - reply_to.send(misbehaviour).map_err(Error::send) - } - - fn build_connection_proofs_and_client_state( - &self, - message_type: ConnectionMsgType, - connection_id: ConnectionId, - client_id: ClientId, - height: Height, - reply_to: ReplyTo<(Option, Proofs)>, - ) -> Result<(), Error> { - let result = self.chain.build_connection_proofs_and_client_state( - message_type, - &connection_id, - &client_id, - height, - ); - - let result = result - .map(|(opt_client_state, proofs)| (opt_client_state.map(|cs| cs.wrap_any()), proofs)); - - reply_to.send(result).map_err(Error::send) - } - - fn query_clients( - &self, - request: QueryClientStatesRequest, - reply_to: ReplyTo>, - ) -> Result<(), Error> { - let result = self.chain.query_clients(request); - reply_to.send(result).map_err(Error::send) - } - - fn query_client_connections( - &self, - request: QueryClientConnectionsRequest, - reply_to: ReplyTo>, - ) -> Result<(), Error> { - let result = self.chain.query_client_connections(request); - reply_to.send(result).map_err(Error::send) - } - - fn query_client_state( - &self, - request: QueryClientStateRequest, - include_proof: IncludeProof, - reply_to: ReplyTo<(AnyClientState, Option)>, - ) -> Result<(), Error> { - let res = self - .chain - .query_client_state(request, include_proof) - .map(|(cs, proof)| (cs.wrap_any(), proof)); - - reply_to.send(res).map_err(Error::send) - } - - fn query_upgraded_client_state( - &self, - request: QueryUpgradedClientStateRequest, - reply_to: ReplyTo<(AnyClientState, MerkleProof)>, - ) -> Result<(), Error> { - let result = self - .chain - .query_upgraded_client_state(request) - .map(|(cl, proof)| (cl.wrap_any(), proof)); - - reply_to.send(result).map_err(Error::send) - } - - fn query_consensus_states( - &self, - request: QueryConsensusStatesRequest, - reply_to: ReplyTo>, - ) -> Result<(), Error> { - let consensus_states = self.chain.query_consensus_states(request); - reply_to.send(consensus_states).map_err(Error::send) - } - - fn query_consensus_state( - &self, - request: QueryConsensusStateRequest, - include_proof: IncludeProof, - reply_to: ReplyTo<(AnyConsensusState, Option)>, - ) -> Result<(), Error> { - let res = self.chain.query_consensus_state(request, include_proof); - - reply_to.send(res).map_err(Error::send) - } - - fn query_upgraded_consensus_state( - &self, - request: QueryUpgradedConsensusStateRequest, - reply_to: ReplyTo<(AnyConsensusState, MerkleProof)>, - ) -> Result<(), Error> { - let result = self - .chain - .query_upgraded_consensus_state(request) - .map(|(cs, proof)| (cs.wrap_any(), proof)); - - reply_to.send(result).map_err(Error::send) - } - - fn query_commitment_prefix(&self, reply_to: ReplyTo) -> Result<(), Error> { - let prefix = self.chain.query_commitment_prefix(); - reply_to.send(prefix).map_err(Error::send) - } - - fn query_compatible_versions(&self, reply_to: ReplyTo>) -> Result<(), Error> { - let versions = self.chain.query_compatible_versions(); - reply_to.send(versions).map_err(Error::send) - } - - fn query_connection( - &self, - request: QueryConnectionRequest, - include_proof: IncludeProof, - reply_to: ReplyTo<(ConnectionEnd, Option)>, - ) -> Result<(), Error> { - let connection_end = self.chain.query_connection(request, include_proof); - reply_to.send(connection_end).map_err(Error::send) - } - - fn query_connections( - &self, - request: QueryConnectionsRequest, - reply_to: ReplyTo>, - ) -> Result<(), Error> { - let result = self.chain.query_connections(request); - reply_to.send(result).map_err(Error::send) - } - - fn query_connection_channels( - &self, - request: QueryConnectionChannelsRequest, - reply_to: ReplyTo>, - ) -> Result<(), Error> { - let result = self.chain.query_connection_channels(request); - reply_to.send(result).map_err(Error::send) - } - - fn query_channels( - &self, - request: QueryChannelsRequest, - reply_to: ReplyTo>, - ) -> Result<(), Error> { - let result = self.chain.query_channels(request); - reply_to.send(result).map_err(Error::send) - } - - fn query_channel( - &self, - request: QueryChannelRequest, - include_proof: IncludeProof, - reply_to: ReplyTo<(ChannelEnd, Option)>, - ) -> Result<(), Error> { - let result = self.chain.query_channel(request, include_proof); - reply_to.send(result).map_err(Error::send) - } - - fn query_channel_client_state( - &self, - request: QueryChannelClientStateRequest, - reply_to: ReplyTo>, - ) -> Result<(), Error> { - let result = self.chain.query_channel_client_state(request); - reply_to.send(result).map_err(Error::send) - } - - fn build_channel_proofs( - &self, - port_id: PortId, - channel_id: ChannelId, - height: Height, - reply_to: ReplyTo, - ) -> Result<(), Error> { - let result = self - .chain - .build_channel_proofs(&port_id, &channel_id, height); - - reply_to.send(result).map_err(Error::send) - } - - fn build_packet_proofs( - &self, - packet_type: PacketMsgType, - port_id: PortId, - channel_id: ChannelId, - sequence: Sequence, - height: Height, - reply_to: ReplyTo, - ) -> Result<(), Error> { - let result = - self.chain - .build_packet_proofs(packet_type, port_id, channel_id, sequence, height); - - reply_to.send(result).map_err(Error::send) - } - - fn query_packet_commitment( - &self, - request: QueryPacketCommitmentRequest, - include_proof: IncludeProof, - reply_to: ReplyTo<(Vec, Option)>, - ) -> Result<(), Error> { - let result = self.chain.query_packet_commitment(request, include_proof); - reply_to.send(result).map_err(Error::send) - } - - fn query_packet_commitments( - &self, - request: QueryPacketCommitmentsRequest, - reply_to: ReplyTo<(Vec, Height)>, - ) -> Result<(), Error> { - let result = self.chain.query_packet_commitments(request); - reply_to.send(result).map_err(Error::send) - } - - fn query_packet_receipt( - &self, - request: QueryPacketReceiptRequest, - include_proof: IncludeProof, - reply_to: ReplyTo<(Vec, Option)>, - ) -> Result<(), Error> { - let result = self.chain.query_packet_receipt(request, include_proof); - reply_to.send(result).map_err(Error::send) - } - - fn query_unreceived_packets( - &self, - request: QueryUnreceivedPacketsRequest, - reply_to: ReplyTo>, - ) -> Result<(), Error> { - let result = self.chain.query_unreceived_packets(request); - reply_to.send(result).map_err(Error::send) - } - - fn query_packet_acknowledgement( - &self, - request: QueryPacketAcknowledgementRequest, - include_proof: IncludeProof, - reply_to: ReplyTo<(Vec, Option)>, - ) -> Result<(), Error> { - let result = self - .chain - .query_packet_acknowledgement(request, include_proof); - reply_to.send(result).map_err(Error::send) - } - - fn query_packet_acknowledgements( - &self, - request: QueryPacketAcknowledgementsRequest, - reply_to: ReplyTo<(Vec, Height)>, - ) -> Result<(), Error> { - let result = self.chain.query_packet_acknowledgements(request); - reply_to.send(result).map_err(Error::send) - } - - fn query_unreceived_acknowledgement( - &self, - request: QueryUnreceivedAcksRequest, - reply_to: ReplyTo>, - ) -> Result<(), Error> { - let result = self.chain.query_unreceived_acknowledgements(request); - reply_to.send(result).map_err(Error::send) - } - - fn query_next_sequence_receive( - &self, - request: QueryNextSequenceReceiveRequest, - include_proof: IncludeProof, - reply_to: ReplyTo<(Sequence, Option)>, - ) -> Result<(), Error> { - let result = self - .chain - .query_next_sequence_receive(request, include_proof); - reply_to.send(result).map_err(Error::send) - } - - fn query_txs( - &self, - request: QueryTxRequest, - reply_to: ReplyTo>, - ) -> Result<(), Error> { - let result = self.chain.query_txs(request); - reply_to.send(result).map_err(Error::send) - } - - fn query_blocks( - &self, - request: QueryBlockRequest, - reply_to: ReplyTo<(Vec, Vec)>, - ) -> Result<(), Error> { - let result = self.chain.query_blocks(request); - - reply_to.send(result).map_err(Error::send)?; - - Ok(()) - } - - fn query_host_consensus_state( - &self, - request: QueryHostConsensusStateRequest, - reply_to: ReplyTo, - ) -> Result<(), Error> { - let result = self - .chain - .query_host_consensus_state(request) - .map(|h| h.wrap_any()); - - reply_to.send(result).map_err(Error::send)?; - - Ok(()) - } -} diff --git a/relayer/src/chain/tracking.rs b/relayer/src/chain/tracking.rs deleted file mode 100644 index 83b7db3795..0000000000 --- a/relayer/src/chain/tracking.rs +++ /dev/null @@ -1,97 +0,0 @@ -use core::fmt; - -use ibc_proto::google::protobuf::Any; -use uuid::Uuid; - -/// Identifier used to track an `EventBatch` along -/// the relaying pipeline until the corresponding -/// transactions are submitted and/or confirmed. -#[derive(Copy, Clone, Debug)] -pub enum TrackingId { - /// Random identifier, used for tracking an event batch received over WebSocket. - Uuid(Uuid), - /// Static identifier, used as a placeholder for when there is no - /// corresponding event batch, eg. when performing actions from - /// the CLI or during packet clearing. - Static(&'static str), -} - -impl TrackingId { - /// See [`TrackingId::Uuid`] - pub fn new_uuid() -> Self { - Self::Uuid(Uuid::new_v4()) - } - - /// See [`TrackingId::Static`] - pub fn new_static(s: &'static str) -> Self { - Self::Static(s) - } -} - -impl fmt::Display for TrackingId { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - TrackingId::Uuid(u) => { - let mut s = u.to_string(); - s.truncate(8); - s.fmt(f) - } - TrackingId::Static(s) => s.fmt(f), - } - } -} - -/// A wrapper over a vector of proto-encoded messages -/// (`Vec`), which has an associated tracking -/// number. -/// -/// A [`TrackedMsgs`] correlates with a -/// [`TrackedEvents`](crate::link::operational_data::TrackedEvents) -/// by sharing the same `tracking_id`. -#[derive(Debug, Clone)] -pub struct TrackedMsgs { - pub msgs: Vec, - pub tracking_id: TrackingId, -} - -impl TrackedMsgs { - pub fn new(msgs: Vec, tracking_id: TrackingId) -> Self { - Self { msgs, tracking_id } - } - - pub fn new_static(msgs: Vec, tracking_id: &'static str) -> Self { - Self { - msgs, - tracking_id: TrackingId::Static(tracking_id), - } - } - - pub fn new_uuid(msgs: Vec, tracking_id: Uuid) -> Self { - Self { - msgs, - tracking_id: TrackingId::Uuid(tracking_id), - } - } - - pub fn new_single(msg: Any, tracking_id: &'static str) -> Self { - Self { - msgs: vec![msg], - tracking_id: TrackingId::Static(tracking_id), - } - } - - pub fn new_single_uuid(msg: Any, tracking_id: Uuid) -> Self { - Self { - msgs: vec![msg], - tracking_id: TrackingId::Uuid(tracking_id), - } - } - - pub fn messages(&self) -> &Vec { - &self.msgs - } - - pub fn tracking_id(&self) -> TrackingId { - self.tracking_id - } -} diff --git a/relayer/src/channel.rs b/relayer/src/channel.rs deleted file mode 100644 index 4abe48671c..0000000000 --- a/relayer/src/channel.rs +++ /dev/null @@ -1,1439 +0,0 @@ -use core::time::Duration; - -use ibc_proto::google::protobuf::Any; -use serde::Serialize; -use tracing::{debug, error, info, warn}; - -pub use error::ChannelError; -use ibc::core::ics04_channel::channel::{ - ChannelEnd, Counterparty, IdentifiedChannelEnd, Order, State, -}; -use ibc::core::ics04_channel::msgs::chan_close_confirm::MsgChannelCloseConfirm; -use ibc::core::ics04_channel::msgs::chan_close_init::MsgChannelCloseInit; -use ibc::core::ics04_channel::msgs::chan_open_ack::MsgChannelOpenAck; -use ibc::core::ics04_channel::msgs::chan_open_confirm::MsgChannelOpenConfirm; -use ibc::core::ics04_channel::msgs::chan_open_init::MsgChannelOpenInit; -use ibc::core::ics04_channel::msgs::chan_open_try::MsgChannelOpenTry; -use ibc::core::ics04_channel::Version; -use ibc::core::ics24_host::identifier::{ChainId, ChannelId, ClientId, ConnectionId, PortId}; -use ibc::events::IbcEvent; -use ibc::tx_msg::Msg; -use ibc::Height; - -use crate::chain::counterparty::{channel_connection_client, channel_state_on_destination}; -use crate::chain::handle::ChainHandle; -use crate::chain::requests::{ - IncludeProof, PageRequest, QueryChannelRequest, QueryConnectionChannelsRequest, - QueryConnectionRequest, -}; -use crate::chain::tracking::TrackedMsgs; -use crate::connection::Connection; -use crate::foreign_client::{ForeignClient, HasExpiredOrFrozenError}; -use crate::object::Channel as WorkerChannelObject; -use crate::supervisor::error::Error as SupervisorError; -use crate::util::retry::retry_with_index; -use crate::util::retry::{retry_count, RetryResult}; -use crate::util::task::Next; - -pub mod error; -pub mod version; - -mod retry_strategy { - use core::time::Duration; - - use retry::delay::Fibonacci; - - use crate::util::retry::clamp_total; - - // Default parameters for the retrying mechanism - const MAX_DELAY: Duration = Duration::from_secs(60); // 1 minute - const MAX_TOTAL_DELAY: Duration = Duration::from_secs(10 * 60); // 10 minutes - const INITIAL_DELAY: Duration = Duration::from_secs(1); // 1 second - - pub fn default() -> impl Iterator { - clamp_total(Fibonacci::from(INITIAL_DELAY), MAX_DELAY, MAX_TOTAL_DELAY) - } -} - -pub fn from_retry_error(e: retry::Error, description: String) -> ChannelError { - match e { - retry::Error::Operation { - error, - total_delay, - tries, - } => { - let detail = error::ChannelErrorDetail::MaxRetry(error::MaxRetrySubdetail { - description, - tries, - total_delay, - source: Box::new(error.0), - }); - ChannelError(detail, error.1) - } - retry::Error::Internal(reason) => ChannelError::retry_internal(reason), - } -} - -#[derive(Clone, Debug, Serialize)] -pub struct ChannelSide { - pub chain: Chain, - client_id: ClientId, - connection_id: ConnectionId, - port_id: PortId, - channel_id: Option, - version: Option, -} - -impl ChannelSide { - pub fn new( - chain: Chain, - client_id: ClientId, - connection_id: ConnectionId, - port_id: PortId, - channel_id: Option, - version: Option, - ) -> ChannelSide { - Self { - chain, - client_id, - connection_id, - port_id, - channel_id, - version, - } - } - - pub fn chain_id(&self) -> ChainId { - self.chain.id() - } - - pub fn client_id(&self) -> &ClientId { - &self.client_id - } - - pub fn connection_id(&self) -> &ConnectionId { - &self.connection_id - } - - pub fn port_id(&self) -> &PortId { - &self.port_id - } - - pub fn channel_id(&self) -> Option<&ChannelId> { - self.channel_id.as_ref() - } - - pub fn version(&self) -> Option<&Version> { - self.version.as_ref() - } - - pub fn map_chain( - self, - mapper: impl Fn(Chain) -> ChainB, - ) -> ChannelSide { - ChannelSide { - chain: mapper(self.chain), - client_id: self.client_id, - connection_id: self.connection_id, - port_id: self.port_id, - channel_id: self.channel_id, - version: self.version, - } - } -} - -#[derive(Clone, Debug, Serialize)] -pub struct Channel { - pub ordering: Order, - pub a_side: ChannelSide, - pub b_side: ChannelSide, - pub connection_delay: Duration, -} - -impl Channel { - /// Creates a new channel on top of the existing connection. If the channel is not already - /// set-up on both sides of the connection, this functions also fulfils the channel handshake. - pub fn new( - connection: Connection, - ordering: Order, - a_port: PortId, - b_port: PortId, - version: Option, - ) -> Result { - let src_connection_id = connection - .src_connection_id() - .ok_or_else(|| ChannelError::missing_local_connection(connection.src_chain().id()))?; - let dst_connection_id = connection - .dst_connection_id() - .ok_or_else(|| ChannelError::missing_local_connection(connection.dst_chain().id()))?; - - let mut channel = Self { - ordering, - a_side: ChannelSide::new( - connection.src_chain(), - connection.src_client_id().clone(), - src_connection_id.clone(), - a_port, - Default::default(), - version.clone(), - ), - b_side: ChannelSide::new( - connection.dst_chain(), - connection.dst_client_id().clone(), - dst_connection_id.clone(), - b_port, - Default::default(), - version, - ), - connection_delay: connection.delay_period, - }; - - channel.handshake()?; - - Ok(channel) - } - - pub fn restore_from_event( - chain: ChainA, - counterparty_chain: ChainB, - channel_open_event: IbcEvent, - ) -> Result, ChannelError> { - let channel_event_attributes = channel_open_event - .clone() - .channel_attributes() - .ok_or_else(|| ChannelError::invalid_event(channel_open_event))?; - - let port_id = channel_event_attributes.port_id.clone(); - let channel_id = channel_event_attributes.channel_id; - - let connection_id = channel_event_attributes.connection_id.clone(); - let (connection, _) = chain - .query_connection( - QueryConnectionRequest { - connection_id: connection_id.clone(), - height: Height::zero(), - }, - IncludeProof::No, - ) - .map_err(ChannelError::relayer)?; - - let connection_counterparty = connection.counterparty(); - - let counterparty_connection_id = connection_counterparty - .connection_id() - .ok_or_else(ChannelError::missing_counterparty_connection)?; - - Ok(Channel { - // The event does not include the channel ordering. - // The message handlers `build_chan_open..` determine the order included in the handshake - // message from channel query. - ordering: Default::default(), - a_side: ChannelSide::new( - chain, - connection.client_id().clone(), - connection_id, - port_id, - channel_id, - // The event does not include the version. - // The message handlers `build_chan_open..` determine the version from channel query. - None, - ), - b_side: ChannelSide::new( - counterparty_chain, - connection.counterparty().client_id().clone(), - counterparty_connection_id.clone(), - channel_event_attributes.counterparty_port_id.clone(), - channel_event_attributes.counterparty_channel_id, - None, - ), - connection_delay: connection.delay_period(), - }) - } - - /// Recreates a 'Channel' object from the worker's object built from chain state scanning. - /// The channel must exist on chain and its connection must be initialized on both chains. - pub fn restore_from_state( - chain: ChainA, - counterparty_chain: ChainB, - channel: WorkerChannelObject, - height: Height, - ) -> Result<(Channel, State), ChannelError> { - let (a_channel, _) = chain - .query_channel( - QueryChannelRequest { - port_id: channel.src_port_id.clone(), - channel_id: channel.src_channel_id, - height, - }, - IncludeProof::No, - ) - .map_err(ChannelError::relayer)?; - - let a_connection_id = a_channel.connection_hops().first().ok_or_else(|| { - ChannelError::supervisor(SupervisorError::missing_connection_hops( - channel.src_channel_id, - chain.id(), - )) - })?; - - let (a_connection, _) = chain - .query_connection( - QueryConnectionRequest { - connection_id: a_connection_id.clone(), - height: Height::zero(), - }, - IncludeProof::No, - ) - .map_err(ChannelError::relayer)?; - - let b_connection_id = a_connection - .counterparty() - .connection_id() - .cloned() - .ok_or_else(|| { - ChannelError::supervisor(SupervisorError::channel_connection_uninitialized( - channel.src_channel_id, - chain.id(), - a_connection.counterparty().clone(), - )) - })?; - - let mut handshake_channel = Channel { - ordering: *a_channel.ordering(), - a_side: ChannelSide::new( - chain.clone(), - a_connection.client_id().clone(), - a_connection_id.clone(), - channel.src_port_id.clone(), - Some(channel.src_channel_id), - None, - ), - b_side: ChannelSide::new( - counterparty_chain.clone(), - a_connection.counterparty().client_id().clone(), - b_connection_id.clone(), - a_channel.remote.port_id.clone(), - a_channel.remote.channel_id, - None, - ), - connection_delay: a_connection.delay_period(), - }; - - if a_channel.state_matches(&State::Init) && a_channel.remote.channel_id.is_none() { - let channels: Vec = counterparty_chain - .query_connection_channels(QueryConnectionChannelsRequest { - connection_id: b_connection_id, - pagination: Some(PageRequest::all()), - }) - .map_err(ChannelError::relayer)?; - - for chan in channels { - if let Some(remote_channel_id) = chan.channel_end.remote.channel_id() { - if remote_channel_id == &channel.src_channel_id { - handshake_channel.b_side.channel_id = Some(chan.channel_id); - break; - } - } - } - } - - Ok((handshake_channel, a_channel.state)) - } - - pub fn src_chain(&self) -> &ChainA { - &self.a_side.chain - } - - pub fn dst_chain(&self) -> &ChainB { - &self.b_side.chain - } - - pub fn src_client_id(&self) -> &ClientId { - &self.a_side.client_id - } - - pub fn dst_client_id(&self) -> &ClientId { - &self.b_side.client_id - } - - pub fn src_connection_id(&self) -> &ConnectionId { - &self.a_side.connection_id - } - - pub fn dst_connection_id(&self) -> &ConnectionId { - &self.b_side.connection_id - } - - pub fn src_port_id(&self) -> &PortId { - &self.a_side.port_id - } - - pub fn dst_port_id(&self) -> &PortId { - &self.b_side.port_id - } - - pub fn src_channel_id(&self) -> Option<&ChannelId> { - self.a_side.channel_id() - } - - pub fn dst_channel_id(&self) -> Option<&ChannelId> { - self.b_side.channel_id() - } - - pub fn src_version(&self) -> Option<&Version> { - self.a_side.version.as_ref() - } - - pub fn dst_version(&self) -> Option<&Version> { - self.b_side.version.as_ref() - } - - pub fn flipped(&self) -> Channel { - Channel { - ordering: self.ordering, - a_side: self.b_side.clone(), - b_side: self.a_side.clone(), - connection_delay: self.connection_delay, - } - } - - fn do_chan_open_init_and_send(&mut self) -> Result<(), ChannelError> { - let event = self.flipped().build_chan_open_init_and_send()?; - - info!("done {} => {:#?}\n", self.src_chain().id(), event); - - let channel_id = extract_channel_id(&event)?; - self.a_side.channel_id = Some(*channel_id); - info!("successfully opened init channel"); - - Ok(()) - } - - // Check that the channel was created on a_chain - fn do_chan_open_init_and_send_with_retry(&mut self) -> Result<(), ChannelError> { - retry_with_index(retry_strategy::default(), |_| { - self.do_chan_open_init_and_send() - }) - .map_err(|err| { - error!("failed to open channel after {} retries", err); - - from_retry_error( - err, - format!("failed to finish channel open init for {:?}", self), - ) - })?; - - Ok(()) - } - - fn do_chan_open_try_and_send(&mut self) -> Result<(), ChannelError> { - let event = self.build_chan_open_try_and_send().map_err(|e| { - error!("failed ChanOpenTry {:?}: {:?}", self.b_side, e); - e - })?; - - let channel_id = extract_channel_id(&event)?; - self.b_side.channel_id = Some(*channel_id); - - println!("done {} => {:#?}\n", self.dst_chain().id(), event); - Ok(()) - } - - fn do_chan_open_try_and_send_with_retry(&mut self) -> Result<(), ChannelError> { - retry_with_index(retry_strategy::default(), |_| { - if let Err(e) = self.do_chan_open_try_and_send() { - if e.is_expired_or_frozen_error() { - RetryResult::Err(e) - } else { - RetryResult::Retry(e) - } - } else { - RetryResult::Ok(()) - } - }) - .map_err(|err| { - error!("failed to open channel after {} retries", retry_count(&err)); - - from_retry_error( - err, - format!("failed to finish channel open try for {:?}", self), - ) - })?; - - Ok(()) - } - - /// Sends the last two steps, consisting of `Ack` and `Confirm` - /// messages, for finalizing the channel open handshake. - /// - /// Assumes that the channel open handshake was previously - /// started (with `Init` & `Try` steps). - /// - /// Returns `Ok` when both channel ends are in state `Open`. - /// Also returns `Ok` if the channel is undergoing a closing handshake. - /// - /// An `Err` can signal two cases: - /// - the common-case flow for the handshake protocol was interrupted, - /// e.g., by a competing relayer. - /// - Rpc problems (a query or submitting a tx failed). - /// In both `Err` cases, there should be retry calling this method. - fn do_chan_open_finalize(&self) -> Result<(), ChannelError> { - fn query_channel_states( - channel: &Channel, - ) -> Result<(State, State), ChannelError> { - let src_channel_id = channel - .src_channel_id() - .ok_or_else(ChannelError::missing_local_channel_id)?; - - let dst_channel_id = channel - .dst_channel_id() - .ok_or_else(ChannelError::missing_counterparty_channel_id)?; - - debug!( - "do_chan_open_finalize for src_channel_id: {}, dst_channel_id: {}", - src_channel_id, dst_channel_id - ); - - // Continue loop if query error - let (a_channel, _) = channel - .src_chain() - .query_channel( - QueryChannelRequest { - port_id: channel.src_port_id().clone(), - channel_id: *src_channel_id, - height: Height::zero(), - }, - IncludeProof::No, - ) - .map_err(|e| { - ChannelError::handshake_finalize( - channel.src_port_id().clone(), - *src_channel_id, - channel.src_chain().id(), - e, - ) - })?; - - let (b_channel, _) = channel - .dst_chain() - .query_channel( - QueryChannelRequest { - port_id: channel.dst_port_id().clone(), - channel_id: *dst_channel_id, - height: Height::zero(), - }, - IncludeProof::No, - ) - .map_err(|e| { - ChannelError::handshake_finalize( - channel.dst_port_id().clone(), - *dst_channel_id, - channel.dst_chain().id(), - e, - ) - })?; - - Ok((*a_channel.state(), *b_channel.state())) - } - - fn expect_channel_states( - ctx: &Channel, - a1: State, - b1: State, - ) -> Result<(), ChannelError> { - let (a2, b2) = query_channel_states(ctx)?; - - if (a1, b1) == (a2, b2) { - Ok(()) - } else { - warn!( - "expected channels to progress to states {}, {}), instead got ({}, {})", - a1, b1, a2, b2 - ); - - debug!("returning PartialOpenHandshake to retry"); - - // One more step (confirm) left. - // Returning error signals that the caller should retry. - Err(ChannelError::partial_open_handshake(a1, b1)) - } - } - - let (a_state, b_state) = query_channel_states(self)?; - debug!( - "do_chan_open_finalize with channel states: {}, {}", - a_state, b_state - ); - - match (a_state, b_state) { - // Handle sending the Ack message to the source chain, - // then the Confirm message to the destination. - (State::Init, State::TryOpen) | (State::TryOpen, State::TryOpen) => { - self.flipped().build_chan_open_ack_and_send()?; - - expect_channel_states(self, State::Open, State::TryOpen)?; - - self.build_chan_open_confirm_and_send()?; - - expect_channel_states(self, State::Open, State::Open)?; - - Ok(()) - } - - // Handle sending the Ack message to the destination chain, - // then the Confirm to the source chain. - (State::TryOpen, State::Init) => { - self.flipped().build_chan_open_ack_and_send()?; - - expect_channel_states(self, State::TryOpen, State::Open)?; - - self.flipped().build_chan_open_confirm_and_send()?; - - expect_channel_states(self, State::Open, State::Open)?; - - Ok(()) - } - - // Handle sending the Confirm message to the destination chain. - (State::Open, State::TryOpen) => { - self.build_chan_open_confirm_and_send()?; - - expect_channel_states(self, State::Open, State::Open)?; - - Ok(()) - } - - // Send Confirm to the source chain. - (State::TryOpen, State::Open) => { - self.flipped().build_chan_open_confirm_and_send()?; - - expect_channel_states(self, State::Open, State::Open)?; - - Ok(()) - } - - (State::Open, State::Open) => { - info!("channel handshake already finished for {:#?}\n", self); - Ok(()) - } - - // In all other conditions, return Ok, since the channel open handshake does not apply. - _ => Ok(()), - } - } - - /// Takes a partially open channel and finalizes the open handshake protocol. - /// - /// Pre-condition: the channel identifiers are established on both ends - /// (i.e., `OpenInit` and `OpenTry` have executed previously for this channel). - /// - /// Post-condition: the channel state is `Open` on both ends if successful. - fn do_chan_open_finalize_with_retry(&self) -> Result<(), ChannelError> { - retry_with_index(retry_strategy::default(), |_| { - if let Err(e) = self.do_chan_open_finalize() { - if e.is_expired_or_frozen_error() { - RetryResult::Err(e) - } else { - RetryResult::Retry(e) - } - } else { - RetryResult::Ok(()) - } - }) - .map_err(|err| { - error!("failed to open channel after {} retries", err); - from_retry_error( - err, - format!("failed to finish channel handshake for {:?}", self), - ) - })?; - - Ok(()) - } - - /// Executes the channel handshake protocol (ICS004) - fn handshake(&mut self) -> Result<(), ChannelError> { - self.do_chan_open_init_and_send_with_retry()?; - self.do_chan_open_try_and_send_with_retry()?; - self.do_chan_open_finalize_with_retry() - } - - pub fn counterparty_state(&self) -> Result { - // Source channel ID must be specified - let channel_id = self - .src_channel_id() - .ok_or_else(ChannelError::missing_local_channel_id)?; - - let channel_deps = - channel_connection_client(self.src_chain(), self.src_port_id(), channel_id) - .map_err(|e| ChannelError::query_channel(*channel_id, e))?; - - channel_state_on_destination( - &channel_deps.channel, - &channel_deps.connection, - self.dst_chain(), - ) - .map_err(|e| ChannelError::query_channel(*channel_id, e)) - } - - pub fn handshake_step( - &mut self, - state: State, - ) -> Result<(Option, Next), ChannelError> { - let res = match (state, self.counterparty_state()?) { - (State::Init, State::Uninitialized) => Some(self.build_chan_open_try_and_send()?), - (State::Init, State::Init) => Some(self.build_chan_open_try_and_send()?), - (State::TryOpen, State::Init) => Some(self.build_chan_open_ack_and_send()?), - (State::TryOpen, State::TryOpen) => Some(self.build_chan_open_ack_and_send()?), - (State::Open, State::TryOpen) => Some(self.build_chan_open_confirm_and_send()?), - (State::Open, State::Open) => return Ok((None, Next::Abort)), - - // If the counterparty state is already Open but current state is TryOpen, - // return anyway as the final step is to be done by the counterparty worker. - (State::TryOpen, State::Open) => return Ok((None, Next::Abort)), - - _ => None, - }; - - Ok((res, Next::Continue)) - } - - pub fn step_state(&mut self, state: State, index: u64) -> RetryResult { - match self.handshake_step(state) { - Err(e) => { - if e.is_expired_or_frozen_error() { - error!( - "failed to establish channel handshake on frozen client: {}", - e - ); - RetryResult::Err(index) - } else { - error!("failed Chan{:?} with error: {}", state, e); - RetryResult::Retry(index) - } - } - Ok((Some(ev), handshake_completed)) => { - info!("channel handshake step completed with events: {:#?}\n", ev); - RetryResult::Ok(handshake_completed) - } - Ok((None, handshake_completed)) => RetryResult::Ok(handshake_completed), - } - } - - pub fn step_event(&mut self, event: IbcEvent, index: u64) -> RetryResult { - let state = match event { - IbcEvent::OpenInitChannel(_) => State::Init, - IbcEvent::OpenTryChannel(_) => State::TryOpen, - IbcEvent::OpenAckChannel(_) => State::Open, - IbcEvent::OpenConfirmChannel(_) => State::Open, - _ => State::Uninitialized, - }; - - self.step_state(state, index) - } - - pub fn build_update_client_on_dst(&self, height: Height) -> Result, ChannelError> { - let client = ForeignClient::restore( - self.dst_client_id().clone(), - self.dst_chain().clone(), - self.src_chain().clone(), - ); - - client.build_update_client(height).map_err(|e| { - ChannelError::client_operation(self.dst_client_id().clone(), self.dst_chain().id(), e) - }) - } - - pub fn build_chan_open_init(&self) -> Result, ChannelError> { - let signer = self - .dst_chain() - .get_signer() - .map_err(|e| ChannelError::query(self.dst_chain().id(), e))?; - - let counterparty = Counterparty::new(self.src_port_id().clone(), None); - - // If the user supplied a version, use that. - // Otherwise, either use the version defined for the `transfer` - // or an empty version if the port is non-standard. - let version = self - .dst_version() - .cloned() - .or_else(|| version::default_by_port(self.dst_port_id())) - .unwrap_or_else(|| { - warn!( - chain = %self.dst_chain().id(), - channel = ?self.dst_channel_id(), - port = %self.dst_port_id(), - "no version specified for the channel, falling back on empty version" - ); - - Version::empty() - }); - - let channel = ChannelEnd::new( - State::Init, - self.ordering, - counterparty, - vec![self.dst_connection_id().clone()], - version, - ); - - // Build the domain type message - let new_msg = MsgChannelOpenInit { - port_id: self.dst_port_id().clone(), - channel, - signer, - }; - - Ok(vec![new_msg.to_any()]) - } - - pub fn build_chan_open_init_and_send(&self) -> Result { - let dst_msgs = self.build_chan_open_init()?; - - let tm = TrackedMsgs::new_static(dst_msgs, "ChannelOpenInit"); - - let events = self - .dst_chain() - .send_messages_and_wait_commit(tm) - .map_err(|e| ChannelError::submit(self.dst_chain().id(), e))?; - - // Find the relevant event for channel open init - let result = events - .into_iter() - .find(|event| { - matches!(event, IbcEvent::OpenInitChannel(_)) - || matches!(event, IbcEvent::ChainError(_)) - }) - .ok_or_else(|| { - ChannelError::missing_event("no chan init event was in the response".to_string()) - })?; - - match result { - IbcEvent::OpenInitChannel(_) => Ok(result), - IbcEvent::ChainError(e) => Err(ChannelError::tx_response(e)), - _ => Err(ChannelError::invalid_event(result)), - } - } - - /// Retrieves the channel from destination and compares it - /// against the expected channel. built from the message type [`ChannelMsgType`]. - /// - /// If the expected and the destination channels are compatible, - /// returns the expected channel - /// - /// # Precondition: - /// Source and destination channel IDs must be `Some`. - fn validated_expected_channel( - &self, - msg_type: ChannelMsgType, - ) -> Result { - // Destination channel ID must be specified - let dst_channel_id = self - .dst_channel_id() - .ok_or_else(ChannelError::missing_counterparty_channel_id)?; - - // If there is a channel present on the destination chain, - // the counterparty should look like this: - let counterparty = - Counterparty::new(self.src_port_id().clone(), self.src_channel_id().cloned()); - - // The highest expected state, depends on the message type: - let highest_state = match msg_type { - ChannelMsgType::OpenAck => State::TryOpen, - ChannelMsgType::OpenConfirm => State::TryOpen, - ChannelMsgType::CloseConfirm => State::Open, - _ => State::Uninitialized, - }; - - let dst_expected_channel = ChannelEnd::new( - highest_state, - self.ordering, - counterparty, - vec![self.dst_connection_id().clone()], - Version::empty(), - ); - - // Retrieve existing channel - let (dst_channel, _) = self - .dst_chain() - .query_channel( - QueryChannelRequest { - port_id: self.dst_port_id().clone(), - channel_id: *dst_channel_id, - height: Height::zero(), - }, - IncludeProof::No, - ) - .map_err(|e| ChannelError::query(self.dst_chain().id(), e))?; - - // Check if a channel is expected to exist on destination chain - // A channel must exist on destination chain for Ack and Confirm Tx-es to succeed - if dst_channel.state_matches(&State::Uninitialized) { - return Err(ChannelError::missing_channel_on_destination()); - } - - check_destination_channel_state( - *dst_channel_id, - dst_channel, - dst_expected_channel.clone(), - )?; - - Ok(dst_expected_channel) - } - - pub fn build_chan_open_try(&self) -> Result, ChannelError> { - // Source channel ID must be specified - let src_channel_id = self - .src_channel_id() - .ok_or_else(ChannelError::missing_local_channel_id)?; - - // Channel must exist on source - let (src_channel, _) = self - .src_chain() - .query_channel( - QueryChannelRequest { - port_id: self.src_port_id().clone(), - channel_id: *src_channel_id, - height: Height::zero(), - }, - IncludeProof::No, - ) - .map_err(|e| ChannelError::query(self.src_chain().id(), e))?; - - if src_channel.counterparty().port_id() != self.dst_port_id() { - return Err(ChannelError::mismatch_port( - self.dst_chain().id(), - self.dst_port_id().clone(), - self.src_chain().id(), - src_channel.counterparty().port_id().clone(), - *src_channel_id, - )); - } - - // Connection must exist on destination - self.dst_chain() - .query_connection( - QueryConnectionRequest { - connection_id: self.dst_connection_id().clone(), - height: Height::zero(), - }, - IncludeProof::No, - ) - .map_err(|e| ChannelError::query(self.dst_chain().id(), e))?; - - let query_height = self - .src_chain() - .query_latest_height() - .map_err(|e| ChannelError::query(self.src_chain().id(), e))?; - - let proofs = self - .src_chain() - .build_channel_proofs(self.src_port_id(), src_channel_id, query_height) - .map_err(ChannelError::channel_proof)?; - - // Build message(s) to update client on destination - let mut msgs = self.build_update_client_on_dst(proofs.height())?; - - let counterparty = - Counterparty::new(self.src_port_id().clone(), self.src_channel_id().cloned()); - - // Re-use the version that was either set on ChanOpenInit or overwritten by the application. - let version = src_channel.version().clone(); - - let channel = ChannelEnd::new( - State::TryOpen, - *src_channel.ordering(), - counterparty, - vec![self.dst_connection_id().clone()], - version, - ); - - // Get signer - let signer = self - .dst_chain() - .get_signer() - .map_err(|e| ChannelError::fetch_signer(self.dst_chain().id(), e))?; - - let previous_channel_id = if src_channel.counterparty().channel_id.is_none() { - self.b_side.channel_id - } else { - src_channel.counterparty().channel_id - }; - - // Build the domain type message - let new_msg = MsgChannelOpenTry { - port_id: self.dst_port_id().clone(), - previous_channel_id, - counterparty_version: src_channel.version().clone(), - channel, - proofs, - signer, - }; - - msgs.push(new_msg.to_any()); - Ok(msgs) - } - - pub fn build_chan_open_try_and_send(&self) -> Result { - let dst_msgs = self.build_chan_open_try()?; - - let tm = TrackedMsgs::new_static(dst_msgs, "ChannelOpenTry"); - - let events = self - .dst_chain() - .send_messages_and_wait_commit(tm) - .map_err(|e| ChannelError::submit(self.dst_chain().id(), e))?; - - // Find the relevant event for channel open try - let result = events - .into_iter() - .find(|event| { - matches!(event, IbcEvent::OpenTryChannel(_)) - || matches!(event, IbcEvent::ChainError(_)) - }) - .ok_or_else(|| { - ChannelError::missing_event("no chan try event was in the response".to_string()) - })?; - - match result { - IbcEvent::OpenTryChannel(_) => Ok(result), - IbcEvent::ChainError(e) => Err(ChannelError::tx_response(e)), - _ => Err(ChannelError::invalid_event(result)), - } - } - - pub fn build_chan_open_ack(&self) -> Result, ChannelError> { - // Source and destination channel IDs must be specified - let src_channel_id = self - .src_channel_id() - .ok_or_else(ChannelError::missing_local_channel_id)?; - let dst_channel_id = self - .dst_channel_id() - .ok_or_else(ChannelError::missing_counterparty_channel_id)?; - - // Check that the destination chain will accept the Ack message - self.validated_expected_channel(ChannelMsgType::OpenAck)?; - - // Channel must exist on source - let (src_channel, _) = self - .src_chain() - .query_channel( - QueryChannelRequest { - port_id: self.src_port_id().clone(), - channel_id: *src_channel_id, - height: Height::zero(), - }, - IncludeProof::No, - ) - .map_err(|e| ChannelError::query(self.src_chain().id(), e))?; - - // Connection must exist on destination - self.dst_chain() - .query_connection( - QueryConnectionRequest { - connection_id: self.dst_connection_id().clone(), - height: Height::zero(), - }, - IncludeProof::No, - ) - .map_err(|e| ChannelError::query(self.dst_chain().id(), e))?; - - let query_height = self - .src_chain() - .query_latest_height() - .map_err(|e| ChannelError::query(self.src_chain().id(), e))?; - - let proofs = self - .src_chain() - .build_channel_proofs(self.src_port_id(), src_channel_id, query_height) - .map_err(ChannelError::channel_proof)?; - - // Build message(s) to update client on destination - let mut msgs = self.build_update_client_on_dst(proofs.height())?; - - // Get signer - let signer = self - .dst_chain() - .get_signer() - .map_err(|e| ChannelError::fetch_signer(self.dst_chain().id(), e))?; - - // Build the domain type message - let new_msg = MsgChannelOpenAck { - port_id: self.dst_port_id().clone(), - channel_id: *dst_channel_id, - counterparty_channel_id: *src_channel_id, - counterparty_version: src_channel.version().clone(), - proofs, - signer, - }; - - msgs.push(new_msg.to_any()); - Ok(msgs) - } - - pub fn build_chan_open_ack_and_send(&self) -> Result { - fn do_build_chan_open_ack_and_send( - channel: &Channel, - ) -> Result { - let dst_msgs = channel.build_chan_open_ack()?; - - let tm = TrackedMsgs::new_static(dst_msgs, "ChannelOpenAck"); - - let events = channel - .dst_chain() - .send_messages_and_wait_commit(tm) - .map_err(|e| ChannelError::submit(channel.dst_chain().id(), e))?; - - // Find the relevant event for channel open ack - let event = events - .into_iter() - .find(|event| { - matches!(event, IbcEvent::OpenAckChannel(_)) - || matches!(event, IbcEvent::ChainError(_)) - }) - .ok_or_else(|| { - ChannelError::missing_event("no chan ack event was in the response".to_string()) - })?; - - match event { - IbcEvent::OpenAckChannel(_) => { - info!( - "done with ChanAck step {} => {:#?}\n", - channel.dst_chain().id(), - event - ); - - Ok(event) - } - IbcEvent::ChainError(e) => Err(ChannelError::tx_response(e)), - _ => Err(ChannelError::invalid_event(event)), - } - } - - do_build_chan_open_ack_and_send(self).map_err(|e| { - error!("failed ChanOpenAck {:?}: {}", self.b_side, e); - e - }) - } - - pub fn build_chan_open_confirm(&self) -> Result, ChannelError> { - // Source and destination channel IDs must be specified - let src_channel_id = self - .src_channel_id() - .ok_or_else(ChannelError::missing_local_channel_id)?; - let dst_channel_id = self - .dst_channel_id() - .ok_or_else(ChannelError::missing_counterparty_channel_id)?; - - // Check that the destination chain will accept the message - self.validated_expected_channel(ChannelMsgType::OpenConfirm)?; - - // Channel must exist on source - self.src_chain() - .query_channel( - QueryChannelRequest { - port_id: self.src_port_id().clone(), - channel_id: *src_channel_id, - height: Height::zero(), - }, - IncludeProof::No, - ) - .map_err(|e| ChannelError::query(self.src_chain().id(), e))?; - - // Connection must exist on destination - self.dst_chain() - .query_connection( - QueryConnectionRequest { - connection_id: self.dst_connection_id().clone(), - height: Height::zero(), - }, - IncludeProof::No, - ) - .map_err(|e| ChannelError::query(self.dst_chain().id(), e))?; - - let query_height = self - .src_chain() - .query_latest_height() - .map_err(|e| ChannelError::query(self.src_chain().id(), e))?; - - let proofs = self - .src_chain() - .build_channel_proofs(self.src_port_id(), src_channel_id, query_height) - .map_err(ChannelError::channel_proof)?; - - // Build message(s) to update client on destination - let mut msgs = self.build_update_client_on_dst(proofs.height())?; - - // Get signer - let signer = self - .dst_chain() - .get_signer() - .map_err(|e| ChannelError::fetch_signer(self.dst_chain().id(), e))?; - - // Build the domain type message - let new_msg = MsgChannelOpenConfirm { - port_id: self.dst_port_id().clone(), - channel_id: *dst_channel_id, - proofs, - signer, - }; - - msgs.push(new_msg.to_any()); - Ok(msgs) - } - - pub fn build_chan_open_confirm_and_send(&self) -> Result { - fn do_build_chan_open_confirm_and_send( - channel: &Channel, - ) -> Result { - let dst_msgs = channel.build_chan_open_confirm()?; - - let tm = TrackedMsgs::new_static(dst_msgs, "ChannelOpenConfirm"); - let events = channel - .dst_chain() - .send_messages_and_wait_commit(tm) - .map_err(|e| ChannelError::submit(channel.dst_chain().id(), e))?; - - // Find the relevant event for channel open confirm - let event = events - .into_iter() - .find(|event| { - matches!(event, IbcEvent::OpenConfirmChannel(_)) - || matches!(event, IbcEvent::ChainError(_)) - }) - .ok_or_else(|| { - ChannelError::missing_event( - "no chan confirm event was in the response".to_string(), - ) - })?; - - match event { - IbcEvent::OpenConfirmChannel(_) => { - info!("done {} => {:#?}\n", channel.dst_chain().id(), event); - Ok(event) - } - IbcEvent::ChainError(e) => Err(ChannelError::tx_response(e)), - _ => Err(ChannelError::invalid_event(event)), - } - } - - do_build_chan_open_confirm_and_send(self).map_err(|e| { - error!("failed ChanOpenConfirm {:?}: {}", self.b_side, e); - e - }) - } - - pub fn build_chan_close_init(&self) -> Result, ChannelError> { - // Destination channel ID must be specified - let dst_channel_id = self - .dst_channel_id() - .ok_or_else(ChannelError::missing_counterparty_channel_id)?; - - // Channel must exist on destination - self.dst_chain() - .query_channel( - QueryChannelRequest { - port_id: self.dst_port_id().clone(), - channel_id: *dst_channel_id, - height: Height::zero(), - }, - IncludeProof::No, - ) - .map_err(|e| ChannelError::query(self.dst_chain().id(), e))?; - - let signer = self - .dst_chain() - .get_signer() - .map_err(|e| ChannelError::fetch_signer(self.dst_chain().id(), e))?; - - // Build the domain type message - let new_msg = MsgChannelCloseInit { - port_id: self.dst_port_id().clone(), - channel_id: *dst_channel_id, - signer, - }; - - Ok(vec![new_msg.to_any()]) - } - - pub fn build_chan_close_init_and_send(&self) -> Result { - let dst_msgs = self.build_chan_close_init()?; - - let tm = TrackedMsgs::new_static(dst_msgs, "ChannelCloseInit"); - - let events = self - .dst_chain() - .send_messages_and_wait_commit(tm) - .map_err(|e| ChannelError::submit(self.dst_chain().id(), e))?; - - // Find the relevant event for channel close init - let result = events - .into_iter() - .find(|event| { - matches!(event, IbcEvent::CloseInitChannel(_)) - || matches!(event, IbcEvent::ChainError(_)) - }) - .ok_or_else(|| { - ChannelError::missing_event("no chan init event was in the response".to_string()) - })?; - - match result { - IbcEvent::CloseInitChannel(_) => Ok(result), - IbcEvent::ChainError(e) => Err(ChannelError::tx_response(e)), - _ => Err(ChannelError::invalid_event(result)), - } - } - - pub fn build_chan_close_confirm(&self) -> Result, ChannelError> { - // Source and destination channel IDs must be specified - let src_channel_id = self - .src_channel_id() - .ok_or_else(ChannelError::missing_local_channel_id)?; - let dst_channel_id = self - .dst_channel_id() - .ok_or_else(ChannelError::missing_counterparty_channel_id)?; - - // Check that the destination chain will accept the message - self.validated_expected_channel(ChannelMsgType::CloseConfirm)?; - - // Channel must exist on source - self.src_chain() - .query_channel( - QueryChannelRequest { - port_id: self.src_port_id().clone(), - channel_id: *src_channel_id, - height: Height::zero(), - }, - IncludeProof::No, - ) - .map_err(|e| ChannelError::query(self.src_chain().id(), e))?; - - // Connection must exist on destination - self.dst_chain() - .query_connection( - QueryConnectionRequest { - connection_id: self.dst_connection_id().clone(), - height: Height::zero(), - }, - IncludeProof::No, - ) - .map_err(|e| ChannelError::query(self.dst_chain().id(), e))?; - - let query_height = self - .src_chain() - .query_latest_height() - .map_err(|e| ChannelError::query(self.src_chain().id(), e))?; - - let proofs = self - .src_chain() - .build_channel_proofs(self.src_port_id(), src_channel_id, query_height) - .map_err(ChannelError::channel_proof)?; - - // Build message(s) to update client on destination - let mut msgs = self.build_update_client_on_dst(proofs.height())?; - - // Get signer - let signer = self - .dst_chain() - .get_signer() - .map_err(|e| ChannelError::fetch_signer(self.dst_chain().id(), e))?; - - // Build the domain type message - let new_msg = MsgChannelCloseConfirm { - port_id: self.dst_port_id().clone(), - channel_id: *dst_channel_id, - proofs, - signer, - }; - - msgs.push(new_msg.to_any()); - Ok(msgs) - } - - pub fn build_chan_close_confirm_and_send(&self) -> Result { - let dst_msgs = self.build_chan_close_confirm()?; - - let tm = TrackedMsgs::new_static(dst_msgs, "ChannelCloseConfirm"); - - let events = self - .dst_chain() - .send_messages_and_wait_commit(tm) - .map_err(|e| ChannelError::submit(self.dst_chain().id(), e))?; - - // Find the relevant event for channel close confirm - let result = events - .into_iter() - .find(|event| { - matches!(event, IbcEvent::CloseConfirmChannel(_)) - || matches!(event, IbcEvent::ChainError(_)) - }) - .ok_or_else(|| { - ChannelError::missing_event("no chan confirm event was in the response".to_string()) - })?; - - match result { - IbcEvent::CloseConfirmChannel(_) => Ok(result), - IbcEvent::ChainError(e) => Err(ChannelError::tx_response(e)), - _ => Err(ChannelError::invalid_event(result)), - } - } - - pub fn map_chain( - self, - mapper_a: impl Fn(ChainA) -> ChainC, - mapper_b: impl Fn(ChainB) -> ChainD, - ) -> Channel { - Channel { - ordering: self.ordering, - a_side: self.a_side.map_chain(mapper_a), - b_side: self.b_side.map_chain(mapper_b), - connection_delay: self.connection_delay, - } - } -} - -pub fn extract_channel_id(event: &IbcEvent) -> Result<&ChannelId, ChannelError> { - match event { - IbcEvent::OpenInitChannel(ev) => ev.channel_id(), - IbcEvent::OpenTryChannel(ev) => ev.channel_id(), - IbcEvent::OpenAckChannel(ev) => ev.channel_id(), - IbcEvent::OpenConfirmChannel(ev) => ev.channel_id(), - _ => None, - } - .ok_or_else(|| ChannelError::missing_event("cannot extract channel_id from result".to_string())) -} - -/// Enumeration of proof carrying ICS4 message, helper for relayer. -#[derive(Copy, Clone, Debug, PartialEq, Eq)] -pub enum ChannelMsgType { - OpenTry, - OpenAck, - OpenConfirm, - CloseConfirm, -} - -fn check_destination_channel_state( - channel_id: ChannelId, - existing_channel: ChannelEnd, - expected_channel: ChannelEnd, -) -> Result<(), ChannelError> { - let good_connection_hops = - existing_channel.connection_hops() == expected_channel.connection_hops(); - - // TODO: Refactor into a method - let good_state = *existing_channel.state() as u32 <= *expected_channel.state() as u32; - let good_channel_port_ids = existing_channel.counterparty().channel_id().is_none() - || existing_channel.counterparty().channel_id() - == expected_channel.counterparty().channel_id() - && existing_channel.counterparty().port_id() - == expected_channel.counterparty().port_id(); - - // TODO: Check versions - - if good_state && good_connection_hops && good_channel_port_ids { - Ok(()) - } else { - Err(ChannelError::channel_already_exist(channel_id)) - } -} diff --git a/relayer/src/channel/error.rs b/relayer/src/channel/error.rs deleted file mode 100644 index ef226b51c0..0000000000 --- a/relayer/src/channel/error.rs +++ /dev/null @@ -1,209 +0,0 @@ -use core::time::Duration; -use flex_error::define_error; -use ibc::core::ics02_client::error::Error as ClientError; -use ibc::core::ics04_channel::channel::State; -use ibc::core::ics24_host::identifier::{ChainId, ChannelId, ClientId, PortChannelId, PortId}; -use ibc::events::IbcEvent; - -use crate::error::Error; -use crate::foreign_client::{ForeignClientError, HasExpiredOrFrozenError}; -use crate::supervisor::Error as SupervisorError; - -define_error! { - ChannelError { - Relayer - [ Error ] - |_| { "relayer error" }, - - Supervisor - [ SupervisorError ] - |_| { "supervisor error" }, - - Client - [ ClientError ] - |_| { "ICS02 client error" }, - - InvalidChannel - { reason: String } - | e | { - format_args!("invalid channel: {0}", - e.reason) - }, - - MissingLocalChannelId - |_| { "failed due to missing local channel id" }, - - MissingLocalConnection - { chain_id: ChainId } - | e | { - format_args!("channel constructor failed due to missing connection id on chain id {0}", - e.chain_id) - }, - - MissingCounterpartyChannelId - |_| { "failed due to missing counterparty channel id" }, - - MissingCounterpartyConnection - |_| { "failed due to missing counterparty connection" }, - - MissingChannelOnDestination - |_| { "missing channel on destination chain" }, - - ChannelProof - [ Error ] - |_| { "failed to build channel proofs" }, - - ClientOperation - { - client_id: ClientId, - chain_id: ChainId, - } - [ ForeignClientError ] - | e | { - format_args!("failed during an operation on client '{0}' hosted by chain '{1}'", - e.client_id, e.chain_id) - }, - - FetchSigner - { chain_id: ChainId } - [ Error ] - |e| { format_args!("failed while fetching the signer for destination chain '{}'", e.chain_id) }, - - Query - { chain_id: ChainId } - [ Error ] - |e| { format_args!("failed during a query to chain '{0}'", e.chain_id) }, - - QueryChannel - { channel_id: ChannelId } - [ SupervisorError ] - |e| { format_args!("failed during a query to channel '{0}'", e.channel_id) }, - - Submit - { chain_id: ChainId } - [ Error ] - |e| { format_args!("failed during a transaction submission step to chain '{0}'", e.chain_id) }, - - HandshakeFinalize - { - port_id: PortId, - channel_id: ChannelId, - chain_id: ChainId, - } - [ Error ] - |e| { - format_args!("failed to finalize a channel open handshake while querying for channel end '{0}/{1}' on chain '{2}'", - e.port_id, e.channel_id, e.chain_id) - }, - - PartialOpenHandshake - { - state: State, - counterparty_state: State - } - | e | { - format_args!("the channel is partially open ({0}, {1})", - e.state, e.counterparty_state) - }, - - IncompleteChannelState - { - chain_id: ChainId, - port_channel_id: PortChannelId, - } - | e | { - format_args!("channel '{0}' on chain '{1}' has no counterparty channel id", - e.port_channel_id, e.chain_id) - }, - - ChannelAlreadyExist - { channel_id: ChannelId } - |e| { format_args!("channel '{}' already exist in an incompatible state", e.channel_id) }, - - MismatchChannelEnds - { - chain_id: ChainId, - port_channel_id: PortChannelId, - expected_counterrparty_port_channel_id: PortChannelId, - actual_counterrparty_port_channel_id: PortChannelId, - } - | e | { - format_args!("channel '{0}' on chain '{1}' expected to have counterparty '{2}' but instead has '{3}'", - e.port_channel_id, e.chain_id, - e.expected_counterrparty_port_channel_id, - e.actual_counterrparty_port_channel_id) - }, - - MismatchPort - { - destination_chain_id: ChainId, - destination_port_id: PortId, - source_chain_id: ChainId, - counterparty_port_id: PortId, - counterparty_channel_id: ChannelId, - } - | e | { - format_args!( - "channel open try to chain '{}' and destination port '{}' does not match \ - the source chain '{}' counterparty port '{}' for channel '{}'", - e.destination_chain_id, e.destination_port_id, - e.source_chain_id, - e.counterparty_port_id, - e.counterparty_channel_id) - }, - - MissingEvent - { description: String } - | e | { - format_args!("missing event: {}", e.description) - }, - - MaxRetry - { - description: String, - tries: u64, - total_delay: Duration, - source: Box, - } - | e | { - format_args!("Error after maximum retry of {} and total delay of {}s: {}", - e.tries, e.total_delay.as_secs(), e.description) - }, - - RetryInternal - { reason: String } - | e | { - format_args!("Encountered internal error during retry: {}", - e.reason) - }, - - TxResponse - { reason: String } - | e | { - format_args!("tx response error: {}", - e.reason) - }, - - InvalidEvent - { event: IbcEvent } - | e | { - format_args!("channel object cannot be built from event: {}", - e.event) - }, - } -} - -impl HasExpiredOrFrozenError for ChannelErrorDetail { - fn is_expired_or_frozen_error(&self) -> bool { - match self { - Self::ClientOperation(e) => e.source.is_expired_or_frozen_error(), - _ => false, - } - } -} - -impl HasExpiredOrFrozenError for ChannelError { - fn is_expired_or_frozen_error(&self) -> bool { - self.detail().is_expired_or_frozen_error() - } -} diff --git a/relayer/src/channel/version.rs b/relayer/src/channel/version.rs deleted file mode 100644 index dd2d74cb4d..0000000000 --- a/relayer/src/channel/version.rs +++ /dev/null @@ -1,20 +0,0 @@ -//! Helper module for the relayer channel logic. -//! -//! Provides support for resolving the appropriate -//! channel version to be used in a channel open -//! handshake. - -use ibc::{ - applications::transfer, - core::{ics04_channel::Version, ics24_host::identifier::PortId}, -}; - -/// Returns the default channel version, depending on the the given [`PortId`]. -pub fn default_by_port(port_id: &PortId) -> Option { - if port_id.as_str() == transfer::PORT_ID_STR { - // https://github.com/cosmos/ibc/tree/master/spec/app/ics-020-fungible-token-transfer#forwards-compatibility - Some(Version::ics20()) - } else { - None - } -} diff --git a/relayer/src/config.rs b/relayer/src/config.rs deleted file mode 100644 index 31d593dc9b..0000000000 --- a/relayer/src/config.rs +++ /dev/null @@ -1,431 +0,0 @@ -//! Relayer configuration - -pub mod error; -pub mod filter; -pub mod proof_specs; -pub mod types; - -use alloc::collections::BTreeMap; -use core::{fmt, time::Duration}; -use std::{fs, fs::File, io::Write, path::Path}; - -use serde_derive::{Deserialize, Serialize}; -use tendermint_light_client_verifier::types::TrustThreshold; - -use ibc::core::ics23_commitment::specs::ProofSpecs; -use ibc::core::ics24_host::identifier::{ChainId, ChannelId, PortId}; -use ibc::timestamp::ZERO_DURATION; - -use crate::chain::ChainType; -use crate::config::types::{MaxMsgNum, MaxTxSize, Memo}; -use crate::keyring::Store; - -pub use error::Error; - -pub use filter::PacketFilter; - -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] -pub struct GasPrice { - pub price: f64, - pub denom: String, -} - -impl GasPrice { - pub const fn new(price: f64, denom: String) -> Self { - Self { price, denom } - } -} - -impl fmt::Display for GasPrice { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}{}", self.price, self.denom) - } -} - -/// Defaults for various fields -pub mod default { - use super::*; - - pub fn chain_type() -> ChainType { - ChainType::CosmosSdk - } - - pub fn tx_confirmation() -> bool { - true - } - - pub fn clear_packets_interval() -> u64 { - 100 - } - - pub fn rpc_timeout() -> Duration { - Duration::from_secs(10) - } - - pub fn clock_drift() -> Duration { - Duration::from_secs(5) - } - - pub fn max_block_time() -> Duration { - Duration::from_secs(30) - } - - pub fn connection_delay() -> Duration { - ZERO_DURATION - } -} - -#[derive(Clone, Debug, Default, Deserialize, Serialize)] -#[serde(deny_unknown_fields)] -pub struct Config { - #[serde(default)] - pub global: GlobalConfig, - #[serde(default)] - pub mode: ModeConfig, - #[serde(default)] - pub rest: RestConfig, - #[serde(default)] - pub telemetry: TelemetryConfig, - #[serde(default = "Vec::new", skip_serializing_if = "Vec::is_empty")] - pub chains: Vec, -} - -impl Config { - pub fn has_chain(&self, id: &ChainId) -> bool { - self.chains.iter().any(|c| c.id == *id) - } - - pub fn find_chain(&self, id: &ChainId) -> Option<&ChainConfig> { - self.chains.iter().find(|c| c.id == *id) - } - - pub fn find_chain_mut(&mut self, id: &ChainId) -> Option<&mut ChainConfig> { - self.chains.iter_mut().find(|c| c.id == *id) - } - - /// Returns true if filtering is disabled or if packets are allowed on - /// the channel [`PortId`] [`ChannelId`] on [`ChainId`]. - /// Returns false otherwise. - pub fn packets_on_channel_allowed( - &self, - chain_id: &ChainId, - port_id: &PortId, - channel_id: &ChannelId, - ) -> bool { - match self.find_chain(chain_id) { - Some(chain_config) => chain_config.packet_filter.is_allowed(port_id, channel_id), - None => false, - } - } - - pub fn chains_map(&self) -> BTreeMap<&ChainId, &ChainConfig> { - self.chains.iter().map(|c| (&c.id, c)).collect() - } -} - -#[derive(Copy, Clone, Debug, Deserialize, Serialize)] -#[serde(deny_unknown_fields)] -pub struct ModeConfig { - pub clients: Clients, - pub connections: Connections, - pub channels: Channels, - pub packets: Packets, -} - -impl ModeConfig { - pub fn all_disabled(&self) -> bool { - !self.clients.enabled - && !self.connections.enabled - && !self.channels.enabled - && !self.packets.enabled - } -} - -impl Default for ModeConfig { - fn default() -> Self { - Self { - clients: Clients { - enabled: true, - refresh: true, - misbehaviour: true, - }, - connections: Connections { enabled: false }, - channels: Channels { enabled: false }, - packets: Packets { - enabled: true, - clear_interval: default::clear_packets_interval(), - clear_on_start: true, - tx_confirmation: true, - }, - } - } -} - -#[derive(Copy, Clone, Debug, Default, Deserialize, Serialize)] -#[serde(deny_unknown_fields)] -pub struct Clients { - pub enabled: bool, - #[serde(default)] - pub refresh: bool, - #[serde(default)] - pub misbehaviour: bool, -} - -#[derive(Copy, Clone, Debug, Default, Deserialize, Serialize)] -#[serde(deny_unknown_fields)] -pub struct Connections { - pub enabled: bool, -} - -#[derive(Copy, Clone, Debug, Default, Deserialize, Serialize)] -#[serde(deny_unknown_fields)] -pub struct Channels { - pub enabled: bool, -} - -#[derive(Copy, Clone, Debug, Deserialize, Serialize)] -#[serde(deny_unknown_fields)] -pub struct Packets { - pub enabled: bool, - #[serde(default = "default::clear_packets_interval")] - pub clear_interval: u64, - #[serde(default)] - pub clear_on_start: bool, - #[serde(default = "default::tx_confirmation")] - pub tx_confirmation: bool, -} - -impl Default for Packets { - fn default() -> Self { - Self { - enabled: false, - clear_interval: default::clear_packets_interval(), - clear_on_start: false, - tx_confirmation: default::tx_confirmation(), - } - } -} - -/// Log levels are wrappers over [`tracing_core::Level`]. -/// -/// [`tracing_core::Level`]: https://docs.rs/tracing-core/0.1.17/tracing_core/struct.Level.html -#[derive(Copy, Clone, Debug, PartialEq, Deserialize, Serialize)] -#[serde(rename_all = "lowercase")] -pub enum LogLevel { - Trace, - Debug, - Info, - Warn, - Error, -} - -impl Default for LogLevel { - fn default() -> Self { - Self::Info - } -} - -impl fmt::Display for LogLevel { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - LogLevel::Trace => write!(f, "trace"), - LogLevel::Debug => write!(f, "debug"), - LogLevel::Info => write!(f, "info"), - LogLevel::Warn => write!(f, "warn"), - LogLevel::Error => write!(f, "error"), - } - } -} - -#[derive(Clone, Debug, Default, Deserialize, Serialize)] -#[serde(default, deny_unknown_fields)] -pub struct GlobalConfig { - pub log_level: LogLevel, -} - -#[derive(Clone, Debug, Deserialize, Serialize)] -#[serde(deny_unknown_fields)] -pub struct TelemetryConfig { - pub enabled: bool, - pub host: String, - pub port: u16, -} - -impl Default for TelemetryConfig { - fn default() -> Self { - Self { - enabled: false, - host: "127.0.0.1".to_string(), - port: 3001, - } - } -} - -#[derive(Clone, Debug, Deserialize, Serialize)] -#[serde(deny_unknown_fields)] -pub struct RestConfig { - pub enabled: bool, - pub host: String, - pub port: u16, -} - -impl Default for RestConfig { - fn default() -> Self { - Self { - enabled: false, - host: "127.0.0.1".to_string(), - port: 3000, - } - } -} - -/// It defines the address generation method -/// TODO: Ethermint `pk_type` to be restricted -/// after the Cosmos SDK release with ethsecp256k1 -/// -#[derive(Clone, Debug, PartialEq, Deserialize, Serialize)] -#[serde( - rename_all = "lowercase", - tag = "derivation", - content = "proto_type", - deny_unknown_fields -)] -pub enum AddressType { - Cosmos, - Ethermint { pk_type: String }, -} - -impl Default for AddressType { - fn default() -> Self { - AddressType::Cosmos - } -} - -impl fmt::Display for AddressType { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - AddressType::Cosmos => write!(f, "cosmos"), - AddressType::Ethermint { .. } => write!(f, "ethermint"), - } - } -} - -#[derive(Clone, Debug, Deserialize, Serialize)] -#[serde(deny_unknown_fields)] -pub struct ChainConfig { - pub id: ChainId, - #[serde(default = "default::chain_type")] - pub r#type: ChainType, - pub rpc_addr: tendermint_rpc::Url, - pub websocket_addr: tendermint_rpc::Url, - pub grpc_addr: tendermint_rpc::Url, - #[serde(default = "default::rpc_timeout", with = "humantime_serde")] - pub rpc_timeout: Duration, - pub account_prefix: String, - pub key_name: String, - #[serde(default)] - pub key_store_type: Store, - pub store_prefix: String, - pub default_gas: Option, - pub max_gas: Option, - pub gas_adjustment: Option, - pub fee_granter: Option, - #[serde(default)] - pub max_msg_num: MaxMsgNum, - #[serde(default)] - pub max_tx_size: MaxTxSize, - - /// A correction parameter that helps deal with clocks that are only approximately synchronized - /// between the source and destination chains for a client. - /// This parameter is used when deciding to accept or reject a new header - /// (originating from the source chain) for any client with the destination chain - /// that uses this configuration, unless it is overridden by the client-specific - /// clock drift option. - #[serde(default = "default::clock_drift", with = "humantime_serde")] - pub clock_drift: Duration, - - #[serde(default = "default::max_block_time", with = "humantime_serde")] - pub max_block_time: Duration, - - /// The trusting period specifies how long a validator set is trusted for - /// (must be shorter than the chain's unbonding period). - #[serde(default, with = "humantime_serde")] - pub trusting_period: Option, - - #[serde(default)] - pub memo_prefix: Memo, - #[serde(default, with = "self::proof_specs")] - pub proof_specs: ProofSpecs, - - // these two need to be last otherwise we run into `ValueAfterTable` error when serializing to TOML - /// The trust threshold defines what fraction of the total voting power of a known - /// and trusted validator set is sufficient for a commit to be accepted going forward. - #[serde(default)] - pub trust_threshold: TrustThreshold, - pub gas_price: GasPrice, - #[serde(default)] - pub packet_filter: PacketFilter, - #[serde(default)] - pub address_type: AddressType, -} - -/// Attempt to load and parse the TOML config file as a `Config`. -pub fn load(path: impl AsRef) -> Result { - let config_toml = std::fs::read_to_string(&path).map_err(Error::io)?; - - let config = toml::from_str::(&config_toml[..]).map_err(Error::decode)?; - - Ok(config) -} - -/// Serialize the given `Config` as TOML to the given config file. -pub fn store(config: &Config, path: impl AsRef) -> Result<(), Error> { - let mut file = if path.as_ref().exists() { - fs::OpenOptions::new().write(true).truncate(true).open(path) - } else { - File::create(path) - } - .map_err(Error::io)?; - - store_writer(config, &mut file) -} - -/// Serialize the given `Config` as TOML to the given writer. -pub(crate) fn store_writer(config: &Config, mut writer: impl Write) -> Result<(), Error> { - let toml_config = toml::to_string_pretty(&config).map_err(Error::encode)?; - - writeln!(writer, "{}", toml_config).map_err(Error::io)?; - - Ok(()) -} - -#[cfg(test)] -mod tests { - use super::{load, store_writer}; - use test_log::test; - - #[test] - fn parse_valid_config() { - let path = concat!( - env!("CARGO_MANIFEST_DIR"), - "/tests/config/fixtures/relayer_conf_example.toml" - ); - - let config = load(path).expect("could not parse config"); - - dbg!(config); - } - - #[test] - fn serialize_valid_config() { - let path = concat!( - env!("CARGO_MANIFEST_DIR"), - "/tests/config/fixtures/relayer_conf_example.toml" - ); - - let config = load(path).expect("could not parse config"); - - let mut buffer = Vec::new(); - store_writer(&config, &mut buffer).unwrap(); - } -} diff --git a/relayer/src/config/error.rs b/relayer/src/config/error.rs deleted file mode 100644 index a24078e666..0000000000 --- a/relayer/src/config/error.rs +++ /dev/null @@ -1,17 +0,0 @@ -use flex_error::{define_error, TraceError}; - -define_error! { - Error { - Io - [ TraceError ] - |_| { "config I/O error" }, - - Decode - [ TraceError ] - |_| { "invalid configuration" }, - - Encode - [ TraceError ] - |_| { "invalid configuration" }, - } -} diff --git a/relayer/src/config/filter.rs b/relayer/src/config/filter.rs deleted file mode 100644 index 8016dafcd7..0000000000 --- a/relayer/src/config/filter.rs +++ /dev/null @@ -1,501 +0,0 @@ -//! Custom `serde` deserializer for `FilterMatch` - -use core::fmt; -use core::str::FromStr; - -use ibc::core::ics24_host::identifier::{ChannelId, PortId}; -use itertools::Itertools; -use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; - -/// Represents the ways in which packets can be filtered. -#[derive(Clone, Debug, Serialize, Deserialize)] -#[serde( - rename_all = "lowercase", - tag = "policy", - content = "list", - deny_unknown_fields -)] -pub enum PacketFilter { - /// Allow packets from the specified channels. - Allow(ChannelFilters), - /// Deny packets from the specified channels. - Deny(ChannelFilters), - /// Allow any & all packets. - AllowAll, -} - -impl Default for PacketFilter { - /// By default, allows all channels & ports. - fn default() -> Self { - Self::AllowAll - } -} - -impl PacketFilter { - /// Returns true if the packets can be relayed on the channel with [`PortId`] and [`ChannelId`], - /// false otherwise. - pub fn is_allowed(&self, port_id: &PortId, channel_id: &ChannelId) -> bool { - match self { - PacketFilter::Allow(filters) => filters.matches((port_id, channel_id)), - PacketFilter::Deny(filters) => !filters.matches((port_id, channel_id)), - PacketFilter::AllowAll => true, - } - } -} - -/// The internal representation of channel filter policies. -#[derive(Clone, Debug, Default, Deserialize)] -#[serde(deny_unknown_fields)] -pub struct ChannelFilters(Vec<(PortFilterMatch, ChannelFilterMatch)>); - -impl ChannelFilters { - /// Create a new filter from the given list of port/channel filters. - pub fn new(filters: Vec<(PortFilterMatch, ChannelFilterMatch)>) -> Self { - Self(filters) - } - - /// Indicates whether a match for the given [`PortId`]-[`ChannelId`] pair - /// exists in the filter policy. - pub fn matches(&self, channel_port: (&PortId, &ChannelId)) -> bool { - let (port_id, channel_id) = channel_port; - self.0.iter().any(|(port_filter, chan_filter)| { - port_filter.matches(port_id) && chan_filter.matches(channel_id) - }) - } - - /// Indicates whether this filter policy contains only exact patterns. - #[inline] - pub fn is_exact(&self) -> bool { - self.0.iter().all(|(port_filter, channel_filter)| { - port_filter.is_exact() && channel_filter.is_exact() - }) - } - - /// An iterator over the [`PortId`]-[`ChannelId`] pairs that don't contain wildcards. - pub fn iter_exact(&self) -> impl Iterator { - self.0.iter().filter_map(|port_chan_filter| { - if let &(FilterPattern::Exact(ref port_id), FilterPattern::Exact(ref chan_id)) = - port_chan_filter - { - Some((port_id, chan_id)) - } else { - None - } - }) - } -} - -impl fmt::Display for ChannelFilters { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!( - f, - "{}", - self.0 - .iter() - .map(|(pid, cid)| format!("{}/{}", pid, cid)) - .join(", ") - ) - } -} - -impl Serialize for ChannelFilters { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - use serde::ser::SerializeSeq; - - struct Pair<'a> { - a: &'a FilterPattern, - b: &'a FilterPattern, - } - - impl<'a> Serialize for Pair<'a> { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - let mut seq = serializer.serialize_seq(Some(2))?; - seq.serialize_element(self.a)?; - seq.serialize_element(self.b)?; - seq.end() - } - } - - let mut outer_seq = serializer.serialize_seq(Some(self.0.len()))?; - - for (port, channel) in &self.0 { - outer_seq.serialize_element(&Pair { - a: port, - b: channel, - })?; - } - - outer_seq.end() - } -} - -/// Newtype wrapper for expressing wildcard patterns compiled to a [`regex::Regex`]. -#[derive(Clone, Debug)] -pub struct Wildcard { - pattern: String, - regex: regex::Regex, -} - -impl Wildcard { - pub fn new(pattern: String) -> Result { - let escaped = regex::escape(&pattern).replace("\\*", "(?:.*)"); - let regex = format!("^{escaped}$").parse()?; - Ok(Self { pattern, regex }) - } - - #[inline] - pub fn is_match(&self, text: &str) -> bool { - self.regex.is_match(text) - } -} - -impl FromStr for Wildcard { - type Err = regex::Error; - - fn from_str(pattern: &str) -> Result { - Self::new(pattern.to_string()) - } -} - -impl fmt::Display for Wildcard { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}", self.pattern) - } -} - -impl Serialize for Wildcard { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - serializer.serialize_str(&self.pattern) - } -} - -impl PartialEq for Wildcard { - fn eq(&self, other: &Self) -> bool { - self.pattern == other.pattern - } -} - -/// Represents a single channel to be filtered in a [`ChannelFilters`] list. -#[derive(Clone, Debug, PartialEq)] -pub enum FilterPattern { - /// A channel specified exactly with its [`PortId`] & [`ChannelId`]. - Exact(T), - /// A glob of channel(s) specified with a wildcard in either or both [`PortId`] & [`ChannelId`]. - Wildcard(Wildcard), -} - -impl FilterPattern { - /// Indicates whether this filter is specified in part with a wildcard. - pub fn is_wildcard(&self) -> bool { - matches!(self, Self::Wildcard(_)) - } - - /// Indicates whether this filter is specified as an exact match. - pub fn is_exact(&self) -> bool { - matches!(self, Self::Exact(_)) - } - - /// Matches the given value via strict equality if the filter is an `Exact`, or via - /// wildcard matching if the filter is a `Pattern`. - pub fn matches(&self, value: &T) -> bool - where - T: PartialEq + ToString, - { - match self { - FilterPattern::Exact(v) => value == v, - FilterPattern::Wildcard(regex) => regex.is_match(&value.to_string()), - } - } - - /// Returns the contained value if this filter contains an `Exact` variant, or - /// `None` if it contains a `Pattern`. - pub fn exact_value(&self) -> Option<&T> { - match self { - FilterPattern::Exact(value) => Some(value), - FilterPattern::Wildcard(_) => None, - } - } -} - -impl fmt::Display for FilterPattern { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - FilterPattern::Exact(value) => write!(f, "{}", value), - FilterPattern::Wildcard(regex) => write!(f, "{}", regex), - } - } -} - -impl Serialize for FilterPattern -where - T: ToString, -{ - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - match self { - FilterPattern::Exact(e) => serializer.serialize_str(&e.to_string()), - FilterPattern::Wildcard(t) => serializer.serialize_str(&t.to_string()), - } - } -} - -/// Type alias for a [`FilterPattern`] containing a [`PortId`]. -pub type PortFilterMatch = FilterPattern; -/// Type alias for a [`FilterPattern`] containing a [`ChannelId`]. -pub type ChannelFilterMatch = FilterPattern; - -impl<'de> Deserialize<'de> for PortFilterMatch { - fn deserialize>(deserializer: D) -> Result { - deserializer.deserialize_string(port::PortFilterMatchVisitor) - } -} - -impl<'de> Deserialize<'de> for ChannelFilterMatch { - fn deserialize>(deserializer: D) -> Result { - deserializer.deserialize_string(channel::ChannelFilterMatchVisitor) - } -} - -pub(crate) mod port { - use super::*; - use ibc::core::ics24_host::identifier::PortId; - - pub struct PortFilterMatchVisitor; - - impl<'de> de::Visitor<'de> for PortFilterMatchVisitor { - type Value = PortFilterMatch; - - fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { - formatter.write_str("valid PortId or wildcard") - } - - fn visit_str(self, v: &str) -> Result { - if let Ok(port_id) = PortId::from_str(v) { - Ok(PortFilterMatch::Exact(port_id)) - } else { - let wildcard = v.parse().map_err(E::custom)?; - Ok(PortFilterMatch::Wildcard(wildcard)) - } - } - - fn visit_string(self, v: String) -> Result { - self.visit_str(&v) - } - } -} - -pub(crate) mod channel { - use super::*; - use ibc::core::ics24_host::identifier::ChannelId; - - pub struct ChannelFilterMatchVisitor; - - impl<'de> de::Visitor<'de> for ChannelFilterMatchVisitor { - type Value = ChannelFilterMatch; - - fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { - formatter.write_str("valid ChannelId or wildcard") - } - - fn visit_str(self, v: &str) -> Result { - if let Ok(channel_id) = ChannelId::from_str(v) { - Ok(ChannelFilterMatch::Exact(channel_id)) - } else { - let wildcard = v.parse().map_err(E::custom)?; - Ok(ChannelFilterMatch::Wildcard(wildcard)) - } - } - - fn visit_string(self, v: String) -> Result { - self.visit_str(&v) - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::config::PacketFilter; - - #[test] - fn deserialize_packet_filter_policy() { - let toml_content = r#" - policy = 'allow' - list = [ - ['ica*', '*'], - ['transfer', 'channel-0'], - ] - "#; - - let filter_policy: PacketFilter = - toml::from_str(toml_content).expect("could not parse filter policy"); - - dbg!(filter_policy); - } - - #[test] - fn serialize_packet_filter_policy() { - use std::str::FromStr; - - use ibc::core::ics24_host::identifier::{ChannelId, PortId}; - - let filter_policy = ChannelFilters(vec![ - ( - FilterPattern::Exact(PortId::from_str("transfer").unwrap()), - FilterPattern::Exact(ChannelId::from_str("channel-0").unwrap()), - ), - ( - FilterPattern::Wildcard("ica*".parse().unwrap()), - FilterPattern::Wildcard("*".parse().unwrap()), - ), - ]); - - let fp = PacketFilter::Allow(filter_policy); - let toml_str = toml::to_string_pretty(&fp).expect("could not serialize packet filter"); - - println!("{}", toml_str); - } - - #[test] - fn channel_filter_iter_exact() { - let toml_content = r#" - policy = 'deny' - list = [ - ['ica', 'channel-*'], - ['ica*', '*'], - ['transfer', 'channel-0'], - ['transfer*', 'channel-1'], - ['ft-transfer', 'channel-2'], - ] - "#; - - let pf: PacketFilter = toml::from_str(toml_content).expect("could not parse filter policy"); - - if let PacketFilter::Deny(channel_filters) = pf { - let exact_matches = channel_filters.iter_exact().collect::>(); - assert_eq!( - exact_matches, - vec![ - ( - &PortId::from_str("transfer").unwrap(), - &ChannelId::from_str("channel-0").unwrap() - ), - ( - &PortId::from_str("ft-transfer").unwrap(), - &ChannelId::from_str("channel-2").unwrap() - ) - ] - ); - } else { - panic!("expected `PacketFilter::Deny` variant"); - } - } - - #[test] - fn packet_filter_deny_policy() { - let deny_policy = r#" - policy = 'deny' - list = [ - ['ica', 'channel-*'], - ['ica*', '*'], - ['transfer', 'channel-0'], - ['transfer*', 'channel-1'], - ['ft-transfer', 'channel-2'], - ] - "#; - - let pf: PacketFilter = toml::from_str(deny_policy).expect("could not parse filter policy"); - - assert!(!pf.is_allowed( - &PortId::from_str("ft-transfer").unwrap(), - &ChannelId::from_str("channel-2").unwrap() - )); - assert!(pf.is_allowed( - &PortId::from_str("ft-transfer").unwrap(), - &ChannelId::from_str("channel-1").unwrap() - )); - assert!(pf.is_allowed( - &PortId::from_str("transfer").unwrap(), - &ChannelId::from_str("channel-2").unwrap() - )); - assert!(!pf.is_allowed( - &PortId::from_str("ica-1").unwrap(), - &ChannelId::from_str("channel-2").unwrap() - )); - } - - #[test] - fn packet_filter_allow_policy() { - let allow_policy = r#" - policy = 'allow' - list = [ - ['ica', 'channel-*'], - ['ica*', '*'], - ['transfer', 'channel-0'], - ['transfer*', 'channel-1'], - ['ft-transfer', 'channel-2'], - ] - "#; - - let pf: PacketFilter = toml::from_str(allow_policy).expect("could not parse filter policy"); - - assert!(pf.is_allowed( - &PortId::from_str("ft-transfer").unwrap(), - &ChannelId::from_str("channel-2").unwrap() - )); - assert!(!pf.is_allowed( - &PortId::from_str("ft-transfer").unwrap(), - &ChannelId::from_str("channel-1").unwrap() - )); - assert!(!pf.is_allowed( - &PortId::from_str("transfer-1").unwrap(), - &ChannelId::from_str("channel-2").unwrap() - )); - assert!(pf.is_allowed( - &PortId::from_str("ica-1").unwrap(), - &ChannelId::from_str("channel-2").unwrap() - )); - assert!(pf.is_allowed( - &PortId::from_str("ica").unwrap(), - &ChannelId::from_str("channel-1").unwrap() - )); - } - - #[test] - fn packet_filter_regex() { - let allow_policy = r#" - policy = 'allow' - list = [ - ['transfer*', 'channel-1'], - ] - "#; - - let pf: PacketFilter = toml::from_str(allow_policy).expect("could not parse filter policy"); - - assert!(!pf.is_allowed( - &PortId::from_str("ft-transfer").unwrap(), - &ChannelId::from_str("channel-1").unwrap() - )); - assert!(!pf.is_allowed( - &PortId::from_str("ft-transfer-port").unwrap(), - &ChannelId::from_str("channel-1").unwrap() - )); - } - - #[test] - fn to_string_wildcards() { - let wildcard = "ica*".parse::().unwrap(); - assert_eq!(wildcard.to_string(), "ica*".to_string()); - } -} diff --git a/relayer/src/config/proof_specs.rs b/relayer/src/config/proof_specs.rs deleted file mode 100644 index 0178ddd4d2..0000000000 --- a/relayer/src/config/proof_specs.rs +++ /dev/null @@ -1,35 +0,0 @@ -//! Custom `serde` deserializer for `ProofSpecs` - -use core::fmt; -use ibc::core::ics23_commitment::specs::ProofSpecs; -use serde::{de, ser, Deserializer, Serializer}; - -pub fn serialize( - proof_specs: &ProofSpecs, - serializer: S, -) -> Result { - let json_str = serde_json::to_string_pretty(proof_specs).map_err(ser::Error::custom)?; - serializer.serialize_str(&json_str) -} - -struct ProofSpecsVisitor; - -impl<'de> de::Visitor<'de> for ProofSpecsVisitor { - type Value = ProofSpecs; - - fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { - formatter.write_str("ICS23 proof-specs serialized as a JSON array") - } - - fn visit_str(self, v: &str) -> Result { - serde_json::from_str(v).map_err(E::custom) - } - - fn visit_string(self, v: String) -> Result { - self.visit_str(&v) - } -} - -pub fn deserialize<'de, D: Deserializer<'de>>(deserializer: D) -> Result { - deserializer.deserialize_string(ProofSpecsVisitor) -} diff --git a/relayer/src/config/types.rs b/relayer/src/config/types.rs deleted file mode 100644 index 7b6a7bf5ef..0000000000 --- a/relayer/src/config/types.rs +++ /dev/null @@ -1,322 +0,0 @@ -//! Configuration-related types. -//! -//! Implements defaults, as well as serializing and -//! deserializing with upper-bound verification. - -pub use max_msg_num::MaxMsgNum; - -pub mod max_msg_num { - flex_error::define_error! { - Error { - TooSmall - { value: usize } - |e| { - format_args!("`max_msg_num` must be greater than or equal to {}, found {}", - MaxMsgNum::MIN_BOUND, e.value) - }, - - TooBig - { value: usize } - |e| { - format_args!("`max_msg_num` must be less than or equal to {}, found {}", - MaxMsgNum::MAX_BOUND, e.value) - }, - } - } - - #[derive(Debug, Clone, Copy)] - pub struct MaxMsgNum(usize); - - impl MaxMsgNum { - const DEFAULT: usize = 30; - const MIN_BOUND: usize = 1; - const MAX_BOUND: usize = 100; - - pub fn new(value: usize) -> Result { - if value < Self::MIN_BOUND { - return Err(Error::too_small(value)); - } - - if value > Self::MAX_BOUND { - return Err(Error::too_big(value)); - } - - Ok(Self(value)) - } - - pub fn to_usize(self) -> usize { - self.0 - } - } - - impl Default for MaxMsgNum { - fn default() -> Self { - Self(Self::DEFAULT) - } - } - - use serde::de::Unexpected; - use serde::{de::Error as _, Deserialize, Deserializer, Serialize, Serializer}; - - impl<'de> Deserialize<'de> for MaxMsgNum { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - let value = usize::deserialize(deserializer)?; - - MaxMsgNum::new(value).map_err(|e| match e.detail() { - ErrorDetail::TooSmall(_) => D::Error::invalid_value( - Unexpected::Unsigned(value as u64), - &format!("a usize greater than or equal to {}", Self::MIN_BOUND).as_str(), - ), - ErrorDetail::TooBig(_) => D::Error::invalid_value( - Unexpected::Unsigned(value as u64), - &format!("a usize less than or equal to {}", Self::MAX_BOUND).as_str(), - ), - }) - } - } - - impl Serialize for MaxMsgNum { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - self.0.serialize(serializer) - } - } - - impl From for usize { - fn from(m: MaxMsgNum) -> Self { - m.0 - } - } -} - -pub use max_tx_size::MaxTxSize; - -pub mod max_tx_size { - flex_error::define_error! { - Error { - TooBig - { value: usize } - |e| { - format_args!("`max_tx_size` must be less than or equal to {}, found {}", - MaxTxSize::MAX_BOUND, e.value) - }, - } - } - - #[derive(Debug, Clone, Copy)] - pub struct MaxTxSize(usize); - - impl MaxTxSize { - const DEFAULT: usize = 2 * 1048576; // 2 MBytes - const MAX_BOUND: usize = 8 * 1048576; // 8 MBytes - - pub fn new(value: usize) -> Result { - if value > Self::MAX_BOUND { - return Err(Error::too_big(value)); - } - - Ok(Self(value)) - } - - pub fn to_usize(self) -> usize { - self.0 - } - } - - impl Default for MaxTxSize { - fn default() -> Self { - Self(Self::DEFAULT) - } - } - - use serde::de::Unexpected; - use serde::{de::Error as _, Deserialize, Deserializer, Serialize, Serializer}; - - impl<'de> Deserialize<'de> for MaxTxSize { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - let value = usize::deserialize(deserializer)?; - - MaxTxSize::new(value).map_err(|e| match e.detail() { - ErrorDetail::TooBig(_) => D::Error::invalid_value( - Unexpected::Unsigned(value as u64), - &format!("a usize less than or equal to {}", Self::MAX_BOUND).as_str(), - ), - }) - } - } - - impl Serialize for MaxTxSize { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - self.0.serialize(serializer) - } - } - - impl From for usize { - fn from(m: MaxTxSize) -> Self { - m.0 - } - } -} - -pub use memo::Memo; - -pub mod memo { - flex_error::define_error! { - Error { - TooLong - { length: usize } - |e| { - format_args!("`memo` must been no longer than {} characters, found length {}", - Memo::MAX_LEN, e.length) - } - } - } - - /// A memo domain-type. - /// - /// Hermes uses this type to populate the `tx.memo` field for - /// each transaction it submits. - /// The memo can be configured on a per-chain basis. - /// - #[derive(Clone, Debug, Default)] - pub struct Memo(String); - - impl Memo { - const MAX_LEN: usize = 50; - - pub fn new(memo: String) -> Result { - if memo.len() > Self::MAX_LEN { - return Err(Error::too_long(memo.len())); - } - - Ok(Self(memo)) - } - - pub fn apply_suffix(&mut self, suffix: &str) { - // Add a separator if the memo - // is pre-populated with some content already. - if !self.0.is_empty() { - self.0.push_str(" | "); - } - - self.0.push_str(suffix); - } - - pub fn as_str(&self) -> &str { - &self.0 - } - } - - use serde::{de::Error as _, Deserialize, Deserializer, Serialize, Serializer}; - - impl<'de> Deserialize<'de> for Memo { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - let value = String::deserialize(deserializer)?; - - Memo::new(value).map_err(|e| match e.detail() { - ErrorDetail::TooLong(sub) => D::Error::invalid_length( - sub.length, - &format!("a string length of at most {}", Self::MAX_LEN).as_str(), - ), - }) - } - } - - impl Serialize for Memo { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - self.0.serialize(serializer) - } - } - - use core::fmt; - - impl fmt::Display for Memo { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}", self.as_str()) - } - } -} - -#[cfg(test)] -#[allow(dead_code)] // the fields of the structs defined below are never accessed -mod tests { - use super::*; - - use serde::Deserialize; - use test_log::test; - - #[test] - fn parse_invalid_max_msg_num_min() { - #[derive(Debug, Deserialize)] - struct DummyConfig { - max_msg_num: MaxMsgNum, - } - - let err = toml::from_str::("max_msg_num = 0") - .unwrap_err() - .to_string(); - - assert!(err.contains("expected a usize greater than or equal to")); - } - - #[test] - fn parse_invalid_max_msg_num_max() { - #[derive(Debug, Deserialize)] - struct DummyConfig { - max_msg_num: MaxMsgNum, - } - - let err = toml::from_str::("max_msg_num = 1024") - .unwrap_err() - .to_string(); - - assert!(err.contains("expected a usize less than or equal to")); - } - - #[test] - fn parse_invalid_max_tx_size() { - #[derive(Debug, Deserialize)] - struct DummyConfig { - max_tx_size: MaxTxSize, - } - - let err = toml::from_str::("max_tx_size = 9999999999") - .unwrap_err() - .to_string(); - - assert!(err.contains("expected a usize less than or equal to")); - } - - #[test] - fn parse_invalid_memo() { - #[derive(Debug, Deserialize)] - struct DummyConfig { - memo: Memo, - } - - let err = toml::from_str::( - r#"memo = "foo bar baz foo bar baz foo bar baz foo bar baz foo bar baz""#, - ) - .unwrap_err() - .to_string(); - - assert!(err.contains("a string length of at most")); - } -} diff --git a/relayer/src/connection.rs b/relayer/src/connection.rs deleted file mode 100644 index 620fe0bf4f..0000000000 --- a/relayer/src/connection.rs +++ /dev/null @@ -1,1332 +0,0 @@ -use core::time::Duration; - -use ibc_proto::google::protobuf::Any; -use serde::Serialize; -use tracing::{debug, error, info, warn}; - -use ibc::core::ics02_client::height::Height; -use ibc::core::ics03_connection::connection::{ - ConnectionEnd, Counterparty, IdentifiedConnectionEnd, State, -}; -use ibc::core::ics03_connection::msgs::conn_open_ack::MsgConnectionOpenAck; -use ibc::core::ics03_connection::msgs::conn_open_confirm::MsgConnectionOpenConfirm; -use ibc::core::ics03_connection::msgs::conn_open_init::MsgConnectionOpenInit; -use ibc::core::ics03_connection::msgs::conn_open_try::MsgConnectionOpenTry; -use ibc::core::ics24_host::identifier::{ClientId, ConnectionId}; -use ibc::events::IbcEvent; -use ibc::timestamp::ZERO_DURATION; -use ibc::tx_msg::Msg; - -use crate::chain::counterparty::connection_state_on_destination; -use crate::chain::handle::ChainHandle; -use crate::chain::requests::{ - IncludeProof, PageRequest, QueryConnectionRequest, QueryConnectionsRequest, -}; -use crate::chain::tracking::TrackedMsgs; -use crate::foreign_client::{ForeignClient, HasExpiredOrFrozenError}; -use crate::object::Connection as WorkerConnectionObject; -use crate::util::retry::{retry_count, retry_with_index, RetryResult}; -use crate::util::task::Next; - -mod error; -pub use error::ConnectionError; - -/// Maximum value allowed for packet delay on any new connection that the relayer establishes. -pub const MAX_PACKET_DELAY: Duration = Duration::from_secs(120); - -mod handshake_retry { - //! Provides utility methods and constants to configure the retry behavior - //! for the channel handshake algorithm. - - use crate::connection::ConnectionError; - use crate::util::retry::{clamp_total, ConstantGrowth}; - use core::time::Duration; - - /// Approximate number of retries per block. - const PER_BLOCK_RETRIES: u32 = 10; - - /// Defines the increment in delay between subsequent retries. - /// A value of `0` will make the retry delay constant. - const DELAY_INCREMENT: u64 = 0; - - /// Maximum retry delay expressed in number of blocks - const BLOCK_NUMBER_DELAY: u32 = 10; - - /// The default retry strategy. - /// We retry with a constant backoff strategy. The strategy is parametrized by the - /// maximum block time expressed as a `Duration`. - pub fn default_strategy(max_block_times: Duration) -> impl Iterator { - let retry_delay = max_block_times / PER_BLOCK_RETRIES; - - clamp_total( - ConstantGrowth::new(retry_delay, Duration::from_secs(DELAY_INCREMENT)), - retry_delay, - max_block_times * BLOCK_NUMBER_DELAY, - ) - } - - /// Translates from an error type that the `retry` mechanism threw into - /// a crate specific error of [`ConnectionError`] type. - pub fn from_retry_error( - e: retry::Error, - description: String, - ) -> ConnectionError { - match e { - retry::Error::Operation { - error: _, - total_delay, - tries, - } => ConnectionError::max_retry(description, tries, total_delay), - retry::Error::Internal(reason) => ConnectionError::retry_internal(reason), - } - } -} - -#[derive(Clone, Debug)] -pub struct ConnectionSide { - pub(crate) chain: Chain, - client_id: ClientId, - connection_id: Option, -} - -impl ConnectionSide { - pub fn new(chain: Chain, client_id: ClientId, connection_id: Option) -> Self { - Self { - chain, - client_id, - connection_id, - } - } - - pub fn connection_id(&self) -> Option<&ConnectionId> { - self.connection_id.as_ref() - } - - pub fn map_chain( - self, - mapper: impl FnOnce(Chain) -> ChainB, - ) -> ConnectionSide { - ConnectionSide { - chain: mapper(self.chain), - client_id: self.client_id, - connection_id: self.connection_id, - } - } -} - -impl Serialize for ConnectionSide { - fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer, - { - #[derive(Debug, Serialize)] - struct ConnectionSide<'a> { - client_id: &'a ClientId, - connection_id: &'a Option, - } - - let value = ConnectionSide { - client_id: &self.client_id, - connection_id: &self.connection_id, - }; - - value.serialize(serializer) - } -} - -#[derive(Clone, Debug, Serialize)] -pub struct Connection { - pub delay_period: Duration, - pub a_side: ConnectionSide, - pub b_side: ConnectionSide, -} - -impl Connection { - /// Create a new connection, ensuring that the handshake has succeeded and the two connection - /// ends exist on each side. - pub fn new( - b_to_a_client: ForeignClient, - a_to_b_client: ForeignClient, - delay_period: Duration, - ) -> Result { - Self::validate_clients(&b_to_a_client, &a_to_b_client)?; - - // Validate the delay period against the upper bound - if delay_period > MAX_PACKET_DELAY { - return Err(ConnectionError::max_delay_period( - delay_period, - MAX_PACKET_DELAY, - )); - } - - let mut c = Self { - delay_period, - a_side: ConnectionSide::new( - b_to_a_client.dst_chain(), - b_to_a_client.id().clone(), - Default::default(), - ), - b_side: ConnectionSide::new( - a_to_b_client.dst_chain(), - a_to_b_client.id().clone(), - Default::default(), - ), - }; - - c.handshake()?; - - Ok(c) - } - - pub fn restore_from_event( - chain: ChainA, - counterparty_chain: ChainB, - connection_open_event: IbcEvent, - ) -> Result, ConnectionError> { - let connection_event_attributes = connection_open_event - .connection_attributes() - .ok_or_else(|| ConnectionError::invalid_event(connection_open_event.clone()))?; - - let connection_id = connection_event_attributes.connection_id.clone(); - - let counterparty_connection_id = connection_event_attributes - .counterparty_connection_id - .clone(); - - let client_id = connection_event_attributes.client_id.clone(); - let counterparty_client_id = connection_event_attributes.counterparty_client_id.clone(); - - Ok(Connection { - // The event does not include the connection delay. - delay_period: Default::default(), - a_side: ConnectionSide::new(chain, client_id, connection_id), - b_side: ConnectionSide::new( - counterparty_chain, - counterparty_client_id, - counterparty_connection_id, - ), - }) - } - - /// Recreates a 'Connection' object from the worker's object built from chain state scanning. - /// The connection must exist on chain. - pub fn restore_from_state( - chain: ChainA, - counterparty_chain: ChainB, - connection: WorkerConnectionObject, - height: Height, - ) -> Result<(Connection, State), ConnectionError> { - let (a_connection, _) = chain - .query_connection( - QueryConnectionRequest { - connection_id: connection.src_connection_id.clone(), - height, - }, - IncludeProof::No, - ) - .map_err(ConnectionError::relayer)?; - - let client_id = a_connection.client_id(); - let delay_period = a_connection.delay_period(); - - let counterparty_connection_id = a_connection.counterparty().connection_id.clone(); - - let counterparty_client_id = a_connection.counterparty().client_id(); - - let mut handshake_connection = Connection { - delay_period, - a_side: ConnectionSide::new( - chain, - client_id.clone(), - Some(connection.src_connection_id.clone()), - ), - b_side: ConnectionSide::new( - counterparty_chain.clone(), - counterparty_client_id.clone(), - counterparty_connection_id.clone(), - ), - }; - - if a_connection.state_matches(&State::Init) && counterparty_connection_id.is_none() { - let connections: Vec = counterparty_chain - .query_connections(QueryConnectionsRequest { - pagination: Some(PageRequest::all()), - }) - .map_err(ConnectionError::relayer)?; - - for conn in connections { - if !conn - .connection_end - .client_id_matches(a_connection.counterparty().client_id()) - { - continue; - } - if let Some(remote_connection_id) = - conn.connection_end.counterparty().connection_id() - { - if remote_connection_id == &connection.src_connection_id { - handshake_connection.b_side.connection_id = Some(conn.connection_id); - break; - } - } - } - } - - Ok((handshake_connection, *a_connection.state())) - } - - pub fn find( - a_client: ForeignClient, - b_client: ForeignClient, - conn_end_a: &IdentifiedConnectionEnd, - ) -> Result, ConnectionError> { - Self::validate_clients(&a_client, &b_client)?; - - // Validate the connection end - if conn_end_a.end().client_id().ne(a_client.id()) { - return Err(ConnectionError::connection_client_id_mismatch( - conn_end_a.end().client_id().clone(), - a_client.id().clone(), - )); - } - if conn_end_a.end().counterparty().client_id() != b_client.id() { - return Err(ConnectionError::connection_client_id_mismatch( - conn_end_a.end().counterparty().client_id().clone(), - b_client.id().clone(), - )); - } - if !conn_end_a.end().state_matches(&State::Open) { - return Err(ConnectionError::connection_not_open( - *conn_end_a.end().state(), - )); - } - let b_conn_id = conn_end_a - .end() - .counterparty() - .connection_id() - .cloned() - .ok_or_else(|| { - ConnectionError::missing_counterparty_connection_id_field( - conn_end_a.end().counterparty().clone(), - ) - })?; - - let c = Connection { - delay_period: conn_end_a.end().delay_period(), - a_side: ConnectionSide { - chain: a_client.dst_chain.clone(), - client_id: a_client.id, - connection_id: Some(conn_end_a.id().clone()), - }, - b_side: ConnectionSide { - chain: b_client.dst_chain.clone(), - client_id: b_client.id, - connection_id: Some(b_conn_id), - }, - }; - - Ok(c) - } - - // Verifies that the two clients are mutually consistent, i.e., they serve the same two chains. - fn validate_clients( - a_client: &ForeignClient, - b_client: &ForeignClient, - ) -> Result<(), ConnectionError> { - if a_client.src_chain().id() != b_client.dst_chain().id() { - return Err(ConnectionError::chain_id_mismatch( - a_client.src_chain().id(), - b_client.dst_chain().id(), - )); - } - - if a_client.dst_chain().id() != b_client.src_chain().id() { - return Err(ConnectionError::chain_id_mismatch( - a_client.dst_chain().id(), - b_client.src_chain().id(), - )); - } - - Ok(()) - } - - pub fn src_chain(&self) -> ChainA { - self.a_side.chain.clone() - } - - pub fn dst_chain(&self) -> ChainB { - self.b_side.chain.clone() - } - - pub fn a_chain(&self) -> ChainA { - self.a_side.chain.clone() - } - - pub fn b_chain(&self) -> ChainB { - self.b_side.chain.clone() - } - - pub fn src_client_id(&self) -> &ClientId { - &self.a_side.client_id - } - - pub fn dst_client_id(&self) -> &ClientId { - &self.b_side.client_id - } - - pub fn src_connection_id(&self) -> Option<&ConnectionId> { - self.a_side.connection_id() - } - - pub fn dst_connection_id(&self) -> Option<&ConnectionId> { - self.b_side.connection_id() - } - - pub fn a_connection_id(&self) -> Option<&ConnectionId> { - self.a_side.connection_id() - } - pub fn b_connection_id(&self) -> Option<&ConnectionId> { - self.b_side.connection_id() - } - - fn a_connection( - &self, - connection_id: Option<&ConnectionId>, - ) -> Result { - if let Some(id) = connection_id { - self.a_chain() - .query_connection( - QueryConnectionRequest { - connection_id: id.clone(), - height: Height::zero(), - }, - IncludeProof::No, - ) - .map(|(connection_end, _)| connection_end) - .map_err(|e| ConnectionError::chain_query(self.a_chain().id(), e)) - } else { - Ok(ConnectionEnd::default()) - } - } - - fn b_connection( - &self, - connection_id: Option<&ConnectionId>, - ) -> Result { - if let Some(id) = connection_id { - self.b_chain() - .query_connection( - QueryConnectionRequest { - connection_id: id.clone(), - height: Height::zero(), - }, - IncludeProof::No, - ) - .map(|(connection_end, _)| connection_end) - .map_err(|e| ConnectionError::chain_query(self.b_chain().id(), e)) - } else { - Ok(ConnectionEnd::default()) - } - } - - /// Returns a `Duration` representing the maximum value among the - /// [`ChainConfig.max_block_time`] for the two networks that - /// this connection belongs to. - fn max_block_times(&self) -> Result { - let a_block_time = self - .a_chain() - .config() - .map_err(ConnectionError::relayer)? - .max_block_time; - let b_block_time = self - .b_chain() - .config() - .map_err(ConnectionError::relayer)? - .max_block_time; - Ok(a_block_time.max(b_block_time)) - } - - pub fn flipped(&self) -> Connection { - Connection { - a_side: self.b_side.clone(), - b_side: self.a_side.clone(), - delay_period: self.delay_period, - } - } - - /// Queries the chains for latest connection end information. It verifies the relayer connection - /// IDs and updates them if needed. - /// Returns the states of the two connection ends. - /// - /// The relayer connection stores the connection identifiers on the two chains a and b. - /// These identifiers need to be cross validated with the corresponding on-chain ones at some - /// handshake steps. - /// This is required because of crossing handshake messages in the presence of multiple relayers. - /// - /// Chain a is queried with the relayer's `a_side.connection_id` (`relayer_a_id`) with result - /// `a_connection`. If the counterparty id of this connection, `a_counterparty_id`, - /// is some id then it must match the relayer's `b_side.connection_id` (`relayer_b_id`). - /// A similar check is done for the `b_side` of the connection. - /// - /// a relayer b - /// | a_side -- connection -- b_side | - /// a_id _____________> relayer_a_id relayer_b_id <______________> b_id - /// | \ / | - /// a_counterparty_id <_____________________________________/ | - /// \____________________________________> b_counterparty_id - /// - /// Case 1 (fix connection ID): - /// a b - /// | <-- Init (r1) | - /// | a_id = 1, a_counterparty_id = None | - /// | Try (r2) --> | - /// | b_id = 100, b_counterparty_id = 1 | - /// | Try (r1) --> | - /// | b_id = 101, b_counterparty_id = 1 | - /// | <-- Ack (r2) - /// | a_id = 1, a_counterparty_id = 100 - /// - /// Here relayer r1 has a_side connection 1 and b_side connection 101 - /// while on chain a the counterparty of connection 1 is 100. r1 needs to update - /// its b_side to 100 - /// - /// Case 2 (update from None to some connection ID): - /// a b - /// | <-- Init (r1) | - /// | a_id = 1, a_counterparty_id = None | - /// | Try (r2) --> | - /// | b_id = 100, b_counterparty_id = 1 | - /// | <-- Ack (r2) - /// | a_id = 1, a_counterparty_id = 100 - /// - /// Here relayer r1 has a_side connection 1 and b_side is unknown - /// while on chain a the counterparty of connection 1 is 100. r1 needs to update - /// its b_side to 100 - fn update_connection_and_query_states(&mut self) -> Result<(State, State), ConnectionError> { - let relayer_a_id = self.a_side.connection_id(); - let relayer_b_id = self.b_side.connection_id().cloned(); - - let a_connection = self.a_connection(relayer_a_id)?; - let a_counterparty_id = a_connection.counterparty().connection_id(); - - if a_counterparty_id.is_some() && a_counterparty_id != relayer_b_id.as_ref() { - warn!( - "updating the expected {:?} of side_b({}) since it is different than the \ - counterparty of {:?}: {:?}, on {}. This is typically caused by crossing handshake \ - messages in the presence of multiple relayers.", - relayer_b_id, - self.b_chain().id(), - relayer_a_id, - a_counterparty_id, - self.a_chain().id(), - ); - self.b_side.connection_id = a_counterparty_id.cloned(); - } - - let updated_relayer_b_id = self.b_side.connection_id(); - let b_connection = self.b_connection(updated_relayer_b_id)?; - let b_counterparty_id = b_connection.counterparty().connection_id(); - - if b_counterparty_id.is_some() && b_counterparty_id != relayer_a_id { - if updated_relayer_b_id == relayer_b_id.as_ref() { - warn!( - "updating the expected {:?} of side_b({}) since it is different than the \ - counterparty of {:?}: {:?}, on {}. This is typically caused by crossing handshake \ - messages in the presence of multiple relayers.", - relayer_a_id, - self.a_chain().id(), - updated_relayer_b_id, - b_counterparty_id, - self.b_chain().id(), - ); - self.a_side.connection_id = b_counterparty_id.cloned(); - } else { - panic!( - "mismatched connection ids in connection ends: {} - {:?} and {} - {:?}", - self.a_chain().id(), - a_connection, - self.b_chain().id(), - b_connection, - ); - } - } - Ok((*a_connection.state(), *b_connection.state())) - } - - /// Sends a connection open handshake message. - /// The message sent depends on the chain status of the connection ends. - fn do_conn_open_handshake(&mut self) -> Result<(), ConnectionError> { - let (a_state, b_state) = self.update_connection_and_query_states()?; - debug!( - "do_conn_open_handshake with connection end states: {}, {}", - a_state, b_state - ); - - match (a_state, b_state) { - // send the Init message to chain a (source) - (State::Uninitialized, State::Uninitialized) => { - let event = self.flipped().build_conn_init_and_send().map_err(|e| { - error!("failed ConnOpenInit {:?}: {:?}", self.a_side, e); - e - })?; - let connection_id = extract_connection_id(&event)?; - self.a_side.connection_id = Some(connection_id.clone()); - } - - // send the Try message to chain a (source) - (State::Uninitialized, State::Init) | (State::Init, State::Init) => { - let event = self.flipped().build_conn_try_and_send().map_err(|e| { - error!("failed ConnOpenTry {:?}: {:?}", self.a_side, e); - e - })?; - - let connection_id = extract_connection_id(&event)?; - self.a_side.connection_id = Some(connection_id.clone()); - } - - // send the Try message to chain b (destination) - (State::Init, State::Uninitialized) => { - let event = self.build_conn_try_and_send().map_err(|e| { - error!("failed ConnOpenTry {:?}: {:?}", self.b_side, e); - e - })?; - - let connection_id = extract_connection_id(&event)?; - self.b_side.connection_id = Some(connection_id.clone()); - } - - // send the Ack message to chain a (source) - (State::Init, State::TryOpen) | (State::TryOpen, State::TryOpen) => { - self.flipped().build_conn_ack_and_send().map_err(|e| { - error!("failed ConnOpenAck {:?}: {:?}", self.a_side, e); - e - })?; - } - - // send the Ack message to chain b (destination) - (State::TryOpen, State::Init) => { - self.build_conn_ack_and_send().map_err(|e| { - error!("failed ConnOpenAck {:?}: {:?}", self.b_side, e); - e - })?; - } - - // send the Confirm message to chain b (destination) - (State::Open, State::TryOpen) => { - self.build_conn_confirm_and_send().map_err(|e| { - error!("failed ConnOpenConfirm {:?}: {:?}", self.b_side, e); - e - })?; - } - - // send the Confirm message to chain a (source) - (State::TryOpen, State::Open) => { - self.flipped().build_conn_confirm_and_send().map_err(|e| { - error!("failed ConnOpenConfirm {:?}: {:?}", self.a_side, e); - e - })?; - } - - (State::Open, State::Open) => { - info!("connection handshake already finished for {:#?}\n", self); - return Ok(()); - } - - (a_state, b_state) => { - warn!( - "do_conn_open_handshake does not handle connection end state combination: \ - {}-{}, {}-{}. will retry to account for RPC node data availability issues.", - self.a_chain().id(), - a_state, - self.b_chain().id(), - b_state - ); - } - } - Err(ConnectionError::handshake_finalize()) - } - - /// Executes the connection handshake protocol (ICS003) - fn handshake(&mut self) -> Result<(), ConnectionError> { - let max_block_times = self.max_block_times()?; - - retry_with_index(handshake_retry::default_strategy(max_block_times), |_| { - if let Err(e) = self.do_conn_open_handshake() { - if e.is_expired_or_frozen_error() { - RetryResult::Err(e) - } else { - RetryResult::Retry(e) - } - } else { - RetryResult::Ok(()) - } - }) - .map_err(|err| { - error!( - "failed to open connection after {} retries", - retry_count(&err) - ); - handshake_retry::from_retry_error( - err, - format!("failed to finish connection handshake for {:?}", self), - ) - })?; - - Ok(()) - } - - pub fn counterparty_state(&self) -> Result { - // Source connection ID must be specified - let connection_id = self - .src_connection_id() - .ok_or_else(ConnectionError::missing_local_connection_id)?; - - let (connection_end, _) = self - .src_chain() - .query_connection( - QueryConnectionRequest { - connection_id: connection_id.clone(), - height: Height::zero(), - }, - IncludeProof::No, - ) - .map_err(|e| ConnectionError::connection_query(connection_id.clone(), e))?; - - let connection = IdentifiedConnectionEnd { - connection_end, - connection_id: connection_id.clone(), - }; - - connection_state_on_destination(&connection, &self.dst_chain()) - .map_err(ConnectionError::supervisor) - } - - pub fn handshake_step( - &mut self, - state: State, - ) -> Result<(Option, Next), ConnectionError> { - let event = match (state, self.counterparty_state()?) { - (State::Init, State::Uninitialized) => Some(self.build_conn_try_and_send()?), - (State::Init, State::Init) => Some(self.build_conn_try_and_send()?), - (State::TryOpen, State::Init) => Some(self.build_conn_ack_and_send()?), - (State::TryOpen, State::TryOpen) => Some(self.build_conn_ack_and_send()?), - (State::Open, State::TryOpen) => Some(self.build_conn_confirm_and_send()?), - (State::Open, State::Open) => return Ok((None, Next::Abort)), - - // If the counterparty state is already Open but current state is TryOpen, - // return anyway as the final step is to be done by the counterparty worker. - (State::TryOpen, State::Open) => return Ok((None, Next::Abort)), - - _ => None, - }; - - Ok((event, Next::Continue)) - } - - pub fn step_state(&mut self, state: State, index: u64) -> RetryResult { - match self.handshake_step(state) { - Err(e) => { - if e.is_expired_or_frozen_error() { - error!( - "failed to establish connection handshake on frozen client: {}", - e - ); - RetryResult::Err(index) - } else { - error!("failed {:?} with error {}", state, e); - RetryResult::Retry(index) - } - } - Ok((Some(ev), handshake_completed)) => { - info!( - "connection handshake step completed with events: {:#?}\n", - ev - ); - RetryResult::Ok(handshake_completed) - } - Ok((None, handshake_completed)) => RetryResult::Ok(handshake_completed), - } - } - - pub fn step_event(&mut self, event: IbcEvent, index: u64) -> RetryResult { - let state = match event { - IbcEvent::OpenInitConnection(_) => State::Init, - IbcEvent::OpenTryConnection(_) => State::TryOpen, - IbcEvent::OpenAckConnection(_) => State::Open, - IbcEvent::OpenConfirmConnection(_) => State::Open, - _ => State::Uninitialized, - }; - - self.step_state(state, index) - } - - /// Retrieves the connection from destination and compares against the expected connection - /// built from the message type (`msg_type`) and options (`opts`). - /// If the expected and the destination connections are compatible, it returns the expected connection. - fn validated_expected_connection( - &self, - msg_type: ConnectionMsgType, - ) -> Result { - let dst_connection_id = self - .dst_connection_id() - .ok_or_else(ConnectionError::missing_counterparty_connection_id)?; - - let prefix = self - .src_chain() - .query_commitment_prefix() - .map_err(|e| ConnectionError::chain_query(self.src_chain().id(), e))?; - - // If there is a connection present on the destination chain, it should look like this: - let counterparty = Counterparty::new( - self.src_client_id().clone(), - self.src_connection_id().cloned(), - prefix, - ); - - // The highest expected state, depends on the message type: - let highest_state = match msg_type { - ConnectionMsgType::OpenAck => State::TryOpen, - ConnectionMsgType::OpenConfirm => State::TryOpen, - _ => State::Uninitialized, - }; - - let versions = self - .src_chain() - .query_compatible_versions() - .map_err(|e| ConnectionError::chain_query(self.src_chain().id(), e))?; - - let dst_expected_connection = ConnectionEnd::new( - highest_state, - self.dst_client_id().clone(), - counterparty, - versions, - ZERO_DURATION, - ); - - // Retrieve existing connection if any - let (dst_connection, _) = self - .dst_chain() - .query_connection( - QueryConnectionRequest { - connection_id: dst_connection_id.clone(), - height: Height::zero(), - }, - IncludeProof::No, - ) - .map_err(|e| ConnectionError::chain_query(self.dst_chain().id(), e))?; - - // Check if a connection is expected to exist on destination chain - // A connection must exist on destination chain for Ack and Confirm Tx-es to succeed - if dst_connection.state_matches(&State::Uninitialized) { - return Err(ConnectionError::missing_connection_id( - self.dst_chain().id(), - )); - } - - check_destination_connection_state( - dst_connection_id.clone(), - dst_connection, - dst_expected_connection.clone(), - )?; - - Ok(dst_expected_connection) - } - - pub fn build_update_client_on_src(&self, height: Height) -> Result, ConnectionError> { - let client = self.restore_src_client(); - client.build_update_client(height).map_err(|e| { - ConnectionError::client_operation( - self.src_client_id().clone(), - self.src_chain().id(), - e, - ) - }) - } - - pub fn build_update_client_on_dst(&self, height: Height) -> Result, ConnectionError> { - let client = self.restore_dst_client(); - client.build_update_client(height).map_err(|e| { - ConnectionError::client_operation( - self.dst_client_id().clone(), - self.dst_chain().id(), - e, - ) - }) - } - - pub fn build_conn_init(&self) -> Result, ConnectionError> { - // Get signer - let signer = self - .dst_chain() - .get_signer() - .map_err(|e| ConnectionError::signer(self.dst_chain().id(), e))?; - - let prefix = self - .src_chain() - .query_commitment_prefix() - .map_err(|e| ConnectionError::chain_query(self.src_chain().id(), e))?; - - let counterparty = Counterparty::new(self.src_client_id().clone(), None, prefix); - - let version = self - .dst_chain() - .query_compatible_versions() - .map_err(|e| ConnectionError::chain_query(self.dst_chain().id(), e))?[0] - .clone(); - - // Build the domain type message - let new_msg = MsgConnectionOpenInit { - client_id: self.dst_client_id().clone(), - counterparty, - version: Some(version), - delay_period: self.delay_period, - signer, - }; - - Ok(vec![new_msg.to_any()]) - } - - pub fn build_conn_init_and_send(&self) -> Result { - let dst_msgs = self.build_conn_init()?; - - let tm = TrackedMsgs::new_static(dst_msgs, "ConnectionOpenInit"); - - let events = self - .dst_chain() - .send_messages_and_wait_commit(tm) - .map_err(|e| ConnectionError::submit(self.dst_chain().id(), e))?; - - // Find the relevant event for connection init - let event = events - .into_iter() - .find(|event| { - matches!(event, IbcEvent::OpenInitConnection(_)) - || matches!(event, IbcEvent::ChainError(_)) - }) - .ok_or_else(ConnectionError::missing_connection_init_event)?; - - // TODO - make chainError an actual error - match event { - IbcEvent::OpenInitConnection(_) => { - info!("🥂 {} => {:#?}\n", self.dst_chain().id(), event); - Ok(event) - } - IbcEvent::ChainError(e) => Err(ConnectionError::tx_response(e)), - _ => panic!("internal error"), - } - } - - /// Attempts to build a MsgConnOpenTry. - pub fn build_conn_try(&self) -> Result, ConnectionError> { - let src_connection_id = self - .src_connection_id() - .ok_or_else(ConnectionError::missing_local_connection_id)?; - - let (src_connection, _) = self - .src_chain() - .query_connection( - QueryConnectionRequest { - connection_id: src_connection_id.clone(), - height: Height::zero(), - }, - IncludeProof::No, - ) - .map_err(|e| ConnectionError::chain_query(self.src_chain().id(), e))?; - - // TODO - check that the src connection is consistent with the try options - - // Cross-check the delay_period - let delay = if src_connection.delay_period() != self.delay_period { - warn!("`delay_period` for ConnectionEnd @{} is {}s; delay period on local Connection object is set to {}s", - self.src_chain().id(), src_connection.delay_period().as_secs_f64(), self.delay_period.as_secs_f64()); - - warn!( - "Overriding delay period for local connection object to {}s", - src_connection.delay_period().as_secs_f64() - ); - - src_connection.delay_period() - } else { - self.delay_period - }; - - // Build add send the message(s) for updating client on source - // TODO - add check if update client is required - let src_client_target_height = self - .dst_chain() - .query_latest_height() - .map_err(|e| ConnectionError::chain_query(self.dst_chain().id(), e))?; - let client_msgs = self.build_update_client_on_src(src_client_target_height)?; - - let tm = - TrackedMsgs::new_static(client_msgs, "update client on source for ConnectionOpenTry"); - self.src_chain() - .send_messages_and_wait_commit(tm) - .map_err(|e| ConnectionError::submit(self.src_chain().id(), e))?; - - let query_height = self - .src_chain() - .query_latest_height() - .map_err(|e| ConnectionError::chain_query(self.src_chain().id(), e))?; - let (client_state, proofs) = self - .src_chain() - .build_connection_proofs_and_client_state( - ConnectionMsgType::OpenTry, - src_connection_id, - self.src_client_id(), - query_height, - ) - .map_err(ConnectionError::connection_proof)?; - - // Build message(s) for updating client on destination - let mut msgs = self.build_update_client_on_dst(proofs.height())?; - - let counterparty_versions = if src_connection.versions().is_empty() { - self.src_chain() - .query_compatible_versions() - .map_err(|e| ConnectionError::chain_query(self.src_chain().id(), e))? - } else { - src_connection.versions().to_vec() - }; - - // Get signer - let signer = self - .dst_chain() - .get_signer() - .map_err(|e| ConnectionError::signer(self.dst_chain().id(), e))?; - - let prefix = self - .src_chain() - .query_commitment_prefix() - .map_err(|e| ConnectionError::chain_query(self.src_chain().id(), e))?; - - let counterparty = Counterparty::new( - self.src_client_id().clone(), - self.src_connection_id().cloned(), - prefix, - ); - - let previous_connection_id = if src_connection.counterparty().connection_id.is_none() { - self.b_side.connection_id.clone() - } else { - src_connection.counterparty().connection_id.clone() - }; - - let new_msg = MsgConnectionOpenTry { - client_id: self.dst_client_id().clone(), - client_state, - previous_connection_id, - counterparty, - counterparty_versions, - proofs, - delay_period: delay, - signer, - }; - - msgs.push(new_msg.to_any()); - Ok(msgs) - } - - pub fn build_conn_try_and_send(&self) -> Result { - let dst_msgs = self.build_conn_try()?; - - let tm = TrackedMsgs::new_static(dst_msgs, "ConnectionOpenTry"); - - let events = self - .dst_chain() - .send_messages_and_wait_commit(tm) - .map_err(|e| ConnectionError::submit(self.dst_chain().id(), e))?; - - // Find the relevant event for connection try transaction - let event = events - .into_iter() - .find(|event| { - matches!(event, IbcEvent::OpenTryConnection(_)) - || matches!(event, IbcEvent::ChainError(_)) - }) - .ok_or_else(ConnectionError::missing_connection_try_event)?; - - match event { - IbcEvent::OpenTryConnection(_) => { - info!("🥂 {} => {:#?}\n", self.dst_chain().id(), event); - Ok(event) - } - IbcEvent::ChainError(e) => Err(ConnectionError::tx_response(e)), - _ => panic!("internal error"), - } - } - - /// Attempts to build a MsgConnOpenAck. - pub fn build_conn_ack(&self) -> Result, ConnectionError> { - let src_connection_id = self - .src_connection_id() - .ok_or_else(ConnectionError::missing_local_connection_id)?; - let dst_connection_id = self - .dst_connection_id() - .ok_or_else(ConnectionError::missing_counterparty_connection_id)?; - - let _expected_dst_connection = - self.validated_expected_connection(ConnectionMsgType::OpenAck)?; - - let (src_connection, _) = self - .src_chain() - .query_connection( - QueryConnectionRequest { - connection_id: src_connection_id.clone(), - height: Height::zero(), - }, - IncludeProof::No, - ) - .map_err(|e| ConnectionError::chain_query(self.src_chain().id(), e))?; - - // TODO - check that the src connection is consistent with the ack options - - // Build add **send** the message(s) for updating client on source. - // TODO - add check if it is required - let src_client_target_height = self - .dst_chain() - .query_latest_height() - .map_err(|e| ConnectionError::chain_query(self.dst_chain().id(), e))?; - let client_msgs = self.build_update_client_on_src(src_client_target_height)?; - - let tm = - TrackedMsgs::new_static(client_msgs, "update client on source for ConnectionOpenAck"); - - self.src_chain() - .send_messages_and_wait_commit(tm) - .map_err(|e| ConnectionError::submit(self.src_chain().id(), e))?; - - let query_height = self - .src_chain() - .query_latest_height() - .map_err(|e| ConnectionError::chain_query(self.src_chain().id(), e))?; - - let (client_state, proofs) = self - .src_chain() - .build_connection_proofs_and_client_state( - ConnectionMsgType::OpenAck, - src_connection_id, - self.src_client_id(), - query_height, - ) - .map_err(ConnectionError::connection_proof)?; - - // Build message(s) for updating client on destination - let mut msgs = self.build_update_client_on_dst(proofs.height())?; - - // Get signer - let signer = self - .dst_chain() - .get_signer() - .map_err(|e| ConnectionError::signer(self.dst_chain().id(), e))?; - - let new_msg = MsgConnectionOpenAck { - connection_id: dst_connection_id.clone(), - counterparty_connection_id: src_connection_id.clone(), - client_state, - proofs, - version: src_connection.versions()[0].clone(), - signer, - }; - - msgs.push(new_msg.to_any()); - Ok(msgs) - } - - pub fn build_conn_ack_and_send(&self) -> Result { - let dst_msgs = self.build_conn_ack()?; - - let tm = TrackedMsgs::new_static(dst_msgs, "ConnectionOpenAck"); - - let events = self - .dst_chain() - .send_messages_and_wait_commit(tm) - .map_err(|e| ConnectionError::submit(self.dst_chain().id(), e))?; - - // Find the relevant event for connection ack - let event = events - .into_iter() - .find(|event| { - matches!(event, IbcEvent::OpenAckConnection(_)) - || matches!(event, IbcEvent::ChainError(_)) - }) - .ok_or_else(ConnectionError::missing_connection_ack_event)?; - - match event { - IbcEvent::OpenAckConnection(_) => { - info!("🥂 {} => {:#?}\n", self.dst_chain().id(), event); - Ok(event) - } - IbcEvent::ChainError(e) => Err(ConnectionError::tx_response(e)), - _ => panic!("internal error"), - } - } - - /// Attempts to build a MsgConnOpenConfirm. - pub fn build_conn_confirm(&self) -> Result, ConnectionError> { - let src_connection_id = self - .src_connection_id() - .ok_or_else(ConnectionError::missing_local_connection_id)?; - let dst_connection_id = self - .dst_connection_id() - .ok_or_else(ConnectionError::missing_counterparty_connection_id)?; - - let _expected_dst_connection = - self.validated_expected_connection(ConnectionMsgType::OpenAck)?; - - let query_height = self - .src_chain() - .query_latest_height() - .map_err(|e| ConnectionError::chain_query(self.src_chain().id(), e))?; - - let (_src_connection, _) = self - .src_chain() - .query_connection( - QueryConnectionRequest { - connection_id: src_connection_id.clone(), - height: query_height, - }, - IncludeProof::No, - ) - .map_err(|e| ConnectionError::connection_query(src_connection_id.clone(), e))?; - - // TODO - check that the src connection is consistent with the confirm options - - let (_, proofs) = self - .src_chain() - .build_connection_proofs_and_client_state( - ConnectionMsgType::OpenConfirm, - src_connection_id, - self.src_client_id(), - query_height, - ) - .map_err(ConnectionError::connection_proof)?; - - // Build message(s) for updating client on destination - let mut msgs = self.build_update_client_on_dst(proofs.height())?; - - // Get signer - let signer = self - .dst_chain() - .get_signer() - .map_err(|e| ConnectionError::signer(self.dst_chain().id(), e))?; - - let new_msg = MsgConnectionOpenConfirm { - connection_id: dst_connection_id.clone(), - proofs, - signer, - }; - - msgs.push(new_msg.to_any()); - Ok(msgs) - } - - pub fn build_conn_confirm_and_send(&self) -> Result { - let dst_msgs = self.build_conn_confirm()?; - - let tm = TrackedMsgs::new_static(dst_msgs, "ConnectionOpenConfirm"); - - let events = self - .dst_chain() - .send_messages_and_wait_commit(tm) - .map_err(|e| ConnectionError::submit(self.dst_chain().id(), e))?; - - // Find the relevant event for connection confirm - let event = events - .into_iter() - .find(|event| { - matches!(event, IbcEvent::OpenConfirmConnection(_)) - || matches!(event, IbcEvent::ChainError(_)) - }) - .ok_or_else(ConnectionError::missing_connection_confirm_event)?; - - match event { - IbcEvent::OpenConfirmConnection(_) => { - info!("🥂 {} => {:#?}\n", self.dst_chain().id(), event); - Ok(event) - } - IbcEvent::ChainError(e) => Err(ConnectionError::tx_response(e)), - _ => panic!("internal error"), - } - } - - fn restore_src_client(&self) -> ForeignClient { - ForeignClient::restore( - self.src_client_id().clone(), - self.src_chain(), - self.dst_chain(), - ) - } - - fn restore_dst_client(&self) -> ForeignClient { - ForeignClient::restore( - self.dst_client_id().clone(), - self.dst_chain(), - self.src_chain(), - ) - } - - pub fn map_chain( - self, - mapper_a: impl Fn(ChainA) -> ChainC, - mapper_b: impl Fn(ChainB) -> ChainD, - ) -> Connection { - Connection { - delay_period: self.delay_period, - a_side: self.a_side.map_chain(mapper_a), - b_side: self.b_side.map_chain(mapper_b), - } - } -} - -pub fn extract_connection_id(event: &IbcEvent) -> Result<&ConnectionId, ConnectionError> { - match event { - IbcEvent::OpenInitConnection(ev) => ev.connection_id(), - IbcEvent::OpenTryConnection(ev) => ev.connection_id(), - IbcEvent::OpenAckConnection(ev) => ev.connection_id(), - IbcEvent::OpenConfirmConnection(ev) => ev.connection_id(), - _ => None, - } - .ok_or_else(ConnectionError::missing_connection_id_from_event) -} - -/// Enumeration of proof carrying ICS3 message, helper for relayer. -#[derive(Clone, Debug, PartialEq, Eq)] -pub enum ConnectionMsgType { - OpenTry, - OpenAck, - OpenConfirm, -} - -/// Verify that the destination connection exhibits the expected state. -fn check_destination_connection_state( - connection_id: ConnectionId, - existing_connection: ConnectionEnd, - expected_connection: ConnectionEnd, -) -> Result<(), ConnectionError> { - let good_client_ids = existing_connection.client_id() == expected_connection.client_id() - && existing_connection.counterparty().client_id() - == expected_connection.counterparty().client_id(); - - let good_state = *existing_connection.state() as u32 <= *expected_connection.state() as u32; - - let good_connection_ids = existing_connection.counterparty().connection_id().is_none() - || existing_connection.counterparty().connection_id() - == expected_connection.counterparty().connection_id(); - - let good_version = existing_connection.versions() == expected_connection.versions(); - - let good_counterparty_prefix = - existing_connection.counterparty().prefix() == expected_connection.counterparty().prefix(); - - if good_state - && good_client_ids - && good_connection_ids - && good_version - && good_counterparty_prefix - { - Ok(()) - } else { - Err(ConnectionError::connection_already_exists(connection_id)) - } -} diff --git a/relayer/src/connection/error.rs b/relayer/src/connection/error.rs deleted file mode 100644 index fd6ee1a695..0000000000 --- a/relayer/src/connection/error.rs +++ /dev/null @@ -1,199 +0,0 @@ -use core::time::Duration; -use flex_error::define_error; -use ibc::core::ics03_connection::connection::{Counterparty, State}; -use ibc::core::ics24_host::identifier::{ChainId, ClientId, ConnectionId}; -use ibc::events::IbcEvent; - -use crate::error::Error as RelayerError; -use crate::foreign_client::{ForeignClientError, HasExpiredOrFrozenError}; -use crate::supervisor::Error as SupervisorError; - -define_error! { - ConnectionError { - Relayer - [ RelayerError ] - |e| { format_args!("relayer error: {}", e.source) }, - - MissingLocalConnectionId - |_| { "failed due to missing local connection id" }, - - MissingCounterpartyConnectionIdField - { counterparty: Counterparty } - |e| { - format!("the connection end has no connection id field in the counterparty: {:?}", - e.counterparty) - }, - - MissingCounterpartyConnectionId - |_| { "failed due to missing counterparty connection id" }, - - ChainQuery - { chain_id: ChainId } - [ RelayerError ] - |e| { - format!("failed during a query to chain id {0}", e.chain_id) - }, - - ConnectionQuery - { connection_id: ConnectionId } - [ RelayerError ] - |e| { - format!("failed to query the connection for {}", e.connection_id) - }, - - ClientOperation - { - client_id: ClientId, - chain_id: ChainId, - } - [ ForeignClientError ] - |e| { - format!("failed during an operation on client '{0}' hosted by chain '{1}'", - e.client_id, e.chain_id) - }, - - Submit - { chain_id: ChainId } - [ RelayerError ] - |e| { - format!("failed during a transaction submission step to chain '{0}'", - e.chain_id) - }, - - HandshakeFinalize - |_| { "continue handshake" }, - - MaxDelayPeriod - { - delay_period: Duration, - max_delay_period: Duration - } - |e| { - format!("invalid delay period '{:?}': should be at max '{:?}'", - e.delay_period, e.max_delay_period) - }, - - InvalidEvent - { event: IbcEvent } - |e| { - format!("a connection object cannot be built from {}", - e.event) - }, - - RetryInternal - { reason: String } - | e | { - format_args!("encountered internal error during retry: {}", - e.reason) - }, - - TxResponse - { event: String } - |e| { - format!("tx response event consists of an error: {}", - e.event) - }, - - ConnectionClientIdMismatch - { - client_id: ClientId, - foreign_client_id: ClientId - } - |e| { - format!("the client id ({}) in the connection end does not match the foreign client id ({})", - e.client_id, e.foreign_client_id) - }, - - ChainIdMismatch - { - source_chain_id: ChainId, - destination_chain_id: ChainId - } - |e| { - format!("the source chain of client a ({}) does not not match the destination chain of client b ({})", - e.source_chain_id, e.destination_chain_id) - }, - - ConnectionNotOpen - { - state: State, - } - |e| { - format!("the connection end is expected to be in state 'Open'; found state: {:?}", - e.state) - }, - - Supervisor - [ SupervisorError ] - |_| { "supervisor error" }, - - MissingConnectionId - { - chain_id: ChainId, - } - |e| { - format!("missing connection on source chain {}", - e.chain_id) - }, - - Signer - { chain_id: ChainId } - [ RelayerError ] - |e| { - format!("failed while fetching the signer for chain ({})", - e.chain_id) - }, - - MissingConnectionIdFromEvent - |_| { "cannot extract connection_id from result" }, - - MissingConnectionInitEvent - |_| { "no conn init event was in the response" }, - - MissingConnectionTryEvent - |_| { "no conn try event was in the response" }, - - MissingConnectionAckEvent - |_| { "no conn ack event was in the response" }, - - MissingConnectionConfirmEvent - |_| { "no conn confirm event was in the response" }, - - ConnectionProof - [ RelayerError ] - |_| { "failed to build connection proofs" }, - - ConnectionAlreadyExists - { connection_id: ConnectionId } - |e| { - format!("connection {} already exists in an incompatible state", e.connection_id) - }, - - MaxRetry - { - description: String, - tries: u64, - total_delay: Duration, - } - | e | { - format_args!("Error after maximum retry of {} and total delay of {}s: {}", - e.tries, e.total_delay.as_secs(), e.description) - }, - - } -} - -impl HasExpiredOrFrozenError for ConnectionErrorDetail { - fn is_expired_or_frozen_error(&self) -> bool { - match self { - Self::ClientOperation(e) => e.source.is_expired_or_frozen_error(), - _ => false, - } - } -} - -impl HasExpiredOrFrozenError for ConnectionError { - fn is_expired_or_frozen_error(&self) -> bool { - self.detail().is_expired_or_frozen_error() - } -} diff --git a/relayer/src/error.rs b/relayer/src/error.rs deleted file mode 100644 index d8eed2d709..0000000000 --- a/relayer/src/error.rs +++ /dev/null @@ -1,668 +0,0 @@ -//! This module defines the various errors that be raised in the relayer. - -use core::time::Duration; - -use flex_error::{define_error, DisplayOnly, TraceClone, TraceError}; -use http::uri::InvalidUri; -use humantime::format_duration; -use prost::{DecodeError, EncodeError}; -use regex::Regex; -use tendermint::Error as TendermintError; -use tendermint_light_client::components::io::IoError as LightClientIoError; -use tendermint_light_client::errors::{ - Error as LightClientError, ErrorDetail as LightClientErrorDetail, -}; -use tendermint_proto::Error as TendermintProtoError; -use tendermint_rpc::endpoint::abci_query::AbciQuery; -use tendermint_rpc::endpoint::broadcast::tx_commit::TxResult; -use tendermint_rpc::Error as TendermintRpcError; -use tonic::{ - metadata::errors::InvalidMetadataValue, transport::Error as TransportError, - Status as GrpcStatus, -}; - -use ibc::{ - clients::ics07_tendermint::error as tendermint_error, - core::{ - ics02_client::{client_type::ClientType, error as client_error}, - ics03_connection::error as connection_error, - ics23_commitment::error as commitment_error, - ics24_host::identifier::{ChainId, ChannelId, ConnectionId}, - }, - proofs::ProofError, - relayer::ics18_relayer::error as relayer_error, -}; - -use crate::chain::cosmos::version; -use crate::chain::cosmos::GENESIS_MAX_BYTES_MAX_FRACTION; -use crate::event::monitor; -use crate::keyring::errors::Error as KeyringError; -use crate::sdk_error::SdkError; - -define_error! { - Error { - Io - [ TraceError ] - |_| { "I/O error" }, - - Rpc - { url: tendermint_rpc::Url } - [ TraceClone ] - |e| { format!("RPC error to endpoint {}", e.url) }, - - AbciQuery - { query: AbciQuery } - |e| { format!("ABCI query returned an error: {:?}", e.query) }, - - CheckTx - { - detail: SdkError, - tx: TxResult - } - |e| { format!("CheckTx commit returned an error: {0}, raw result: {1:?}", e.detail, e.tx) }, - - DeliverTx - { - detail: SdkError, - tx: TxResult - } - |e| { format!("DeliverTx Commit returns error: {0}. RawResult: {1:?}", e.detail, e.tx) }, - - WebSocket - { url: tendermint_rpc::Url } - |e| { format!("Websocket error to endpoint {}", e.url) }, - - EventMonitor - [ monitor::Error ] - |_| { "event monitor error" }, - - Grpc - |_| { "gRPC error" }, - - GrpcStatus - { status: GrpcStatus } - |e| { format!("gRPC call failed with status: {0}", e.status) }, - - GrpcTransport - [ TraceError ] - |_| { "error in underlying transport when making gRPC call" }, - - GrpcResponseParam - { param: String } - |e| { format!("missing parameter in GRPC response: {}", e.param) }, - - Decode - [ TraceError ] - |_| { "error decoding protobuf" }, - - LightClientVerification - { chain_id: String } - [ LightClientError ] - |e| { format!("light client verification error for chain id {0}", e.chain_id) }, - - LightClientState - [ client_error::Error ] - |_| { "light client encountered error due to client state".to_string() }, - - LightClientIo - { address: String } - [ LightClientIoError ] - |e| { format!("light client error for RPC address {0}", e.address) }, - - ChainNotCaughtUp - { - address: String, - chain_id: ChainId, - } - |e| { format!("node at {} running chain {} not caught up", e.address, e.chain_id) }, - - PrivateStore - |_| { "requested proof for a path in the private store" }, - - Event - |_| { "bad notification" }, - - ConversionFromAny - [ TraceError ] - |_| { "conversion from a protobuf `Any` into a domain type failed" }, - - EmptyUpgradedClientState - |_| { "found no upgraded client state" }, - - ConsensusStateTypeMismatch - { - expected: ClientType, - got: ClientType, - } - |e| { format!("consensus state type mismatch; hint: expected client type '{0}', got '{1}'", e.expected, e.got) }, - - EmptyResponseValue - |_| { "empty response value" }, - - EmptyResponseProof - |_| { "empty response proof" }, - - MalformedProof - [ ProofError ] - |_| { "malformed proof" }, - - InvalidHeight - [ TendermintError ] - |_| { "invalid height" }, - - InvalidMetadata - [ TraceError ] - |_| { "invalid metadata" }, - - BuildClientStateFailure - |_| { "failed to create client state" }, - - CreateClient - { client_id: String } - |e| { format!("failed to create client {0}", e.client_id) }, - - ClientStateType - { client_state_type: String } - |e| { format!("unexpected client state type {0}", e.client_state_type) }, - - ConnectionNotFound - { connection_id: ConnectionId } - |e| { format!("connection not found: {0}", e.connection_id) }, - - BadConnectionState - |_| { "bad connection state" }, - - ConnOpen - { connection_id: ConnectionId, reason: String } - |e| { - format!("failed to build conn open message {0}: {1}", e.connection_id, e.reason) - }, - - ConnOpenInit - { reason: String } - |e| { format!("failed to build conn open init: {0}", e.reason) }, - - ConnOpenTry - { reason: String } - |e| { format!("failed to build conn open try: {0}", e.reason) }, - - ChanOpenAck - { channel_id: ChannelId, reason: String } - |e| { - format!("failed to build channel open ack {0}: {1}", e.channel_id, e.reason) - }, - - ChanOpenConfirm - { channel_id: ChannelId, reason: String } - |e| { - format!("failed to build channel open confirm {0}: {1}", e.channel_id, e.reason) - }, - - ConsensusProof - [ ProofError ] - |_| { "failed to build consensus proof" }, - - Packet - { channel_id: ChannelId, reason: String } - |e| { - format!("failed to build packet {0}: {1}", e.channel_id, e.reason) - }, - - RecvPacket - { channel_id: ChannelId, reason: String } - |e| { - format!("failed to build recv packet {0}: {1}", e.channel_id, e.reason) - }, - - AckPacket - { channel_id: ChannelId, reason: String } - |e| { - format!("failed to build acknowledge packet {0}: {1}", e.channel_id, e.reason) - }, - - TimeoutPacket - { channel_id: ChannelId, reason: String } - |e| { - format!("failed to build timeout packet {0}: {1}", e.channel_id, e.reason) - }, - - MessageTransaction - { reason: String } - |e| { format!("message transaction failure: {0}", e.reason) }, - - Query - { query: String } - |e| { format!("query error occurred (failed to query for {0})", e.query) }, - - KeyBase - [ KeyringError ] - |_| { "keyring error" }, - - KeyNotFound - { key_name: String } - [ KeyringError ] - |e| { format!("signature key not found: {}", e.key_name) }, - - Ics02 - [ client_error::Error ] - |e| { format!("ICS 02 error: {}", e.source) }, - - Ics03 - [ connection_error::Error ] - |_| { "ICS 03 error" }, - - Ics07 - [ tendermint_error::Error ] - |_| { "ICS 07 error" }, - - Ics18 - [ relayer_error::Error ] - |_| { "ICS 18 error" }, - - Ics23 - [ commitment_error::Error ] - |_| { "ICS 23 error" }, - - InvalidUri - { uri: String } - [ TraceError ] - |e| { format!("error parsing URI {}", e.uri) }, - - ChainIdentifier - { chain_id: String } - |e| { format!("invalid chain identifier format: {0}", e.chain_id) }, - - NonProvableData - |_| { "requested proof for data in the privateStore" }, - - ChannelSend - |_| { "internal message-passing failure while sending inter-thread request/response" }, - - ChannelReceive - [ TraceError ] - |_| { "internal message-passing failure while receiving inter-thread request/response" }, - - InvalidInputHeader - |_| { "the input header is not recognized as a header for this chain" }, - - TxNoConfirmation - |_| { "failed tx: no confirmation" }, - - Misbehaviour - { reason: String } - |e| { format!("error raised while submitting the misbehaviour evidence: {0}", e.reason) }, - - InvalidKeyAddress - { address: String } - [ TendermintError ] - |e| { format!("invalid key address: {0}", e.address) }, - - Bech32Encoding - [ TraceError ] - |_| { "bech32 encoding failed" }, - - ClientTypeMismatch - { - expected: ClientType, - got: ClientType, - } - |e| { - format!("client type mismatch: expected '{}', got '{}'", - e.expected, e.got) - }, - - ProtobufDecode - { payload_type: String } - [ TraceError ] - |e| { format!("error decoding protocol buffer for {}", e.payload_type) }, - - ProtobufEncode - { payload_type: String } - [ TraceError ] - |e| { format!("error encoding protocol buffer for {}", e.payload_type) }, - - TxSimulateGasEstimateExceeded - { - chain_id: ChainId, - estimated_gas: u64, - max_gas: u64, - } - |e| { - format!("{} gas estimate {} from simulated Tx exceeds the maximum configured {}", - e.chain_id, e.estimated_gas, e.max_gas) - }, - - HealthCheckJsonRpc - { - chain_id: ChainId, - address: String, - endpoint: String, - } - [ DisplayOnly ] - |e| { - format!("health check failed for endpoint {0} on the JSON-RPC interface of chain {1}:{2}", - e.endpoint, e.chain_id, e.address) - }, - - FetchVersionParsing - { - chain_id: ChainId, - address: String, - } - [ version::Error ] - |e| { - format!("failed while parsing version info for chain {0}:{1}; caused by: {2}", - e.chain_id, e.address, e.source) - }, - - FetchVersionGrpcTransport - { - chain_id: ChainId, - address: String, - endpoint: String, - } - [ DisplayOnly ] - |e| { - format!("failed while fetching version info from endpoint {0} on the gRPC interface of chain {1}:{2}", - e.endpoint, e.chain_id, e.address) - }, - - FetchVersionGrpcStatus - { - chain_id: ChainId, - address: String, - endpoint: String, - status: tonic::Status - } - |e| { - format!("failed while fetching version info from endpoint {0} on the gRPC interface of chain {1}:{2}; caused by: {3}", - e.endpoint, e.chain_id, e.address, e.status) - }, - - FetchVersionInvalidVersionResponse - { - chain_id: ChainId, - address: String, - endpoint: String, - } - |e| { - format!("failed while fetching version info from endpoint {0} on the gRPC interface of chain {1}:{2}; the gRPC response contains no application version information", - e.endpoint, e.chain_id, e.address) - }, - - ConfigValidationJsonRpc - { - chain_id: ChainId, - address: String, - endpoint: String, - } - [ DisplayOnly ] - |e| { - format!("semantic config validation: failed to reach endpoint {0} on the JSON-RPC interface of chain {1}:{2}", - e.endpoint, e.chain_id, e.address) - }, - - ConfigValidationTxSizeOutOfBounds - { - chain_id: ChainId, - configured_bound: usize, - genesis_bound: u64, - } - |e| { - format!("semantic config validation failed for option `max_tx_size` for chain '{}', reason: `max_tx_size` = {} is greater than {}% of the consensus parameter `max_size` = {}", - e.chain_id, e.configured_bound, GENESIS_MAX_BYTES_MAX_FRACTION * 100.0, e.genesis_bound) - }, - - ConfigValidationMaxGasTooHigh - { - chain_id: ChainId, - configured_max_gas: u64, - consensus_max_gas: i64, - } - |e| { - format!("semantic config validation failed for option `max_gas` for chain '{}', reason: `max_gas` = {} is greater than the consensus parameter `max_gas` = {}", - e.chain_id, e.configured_max_gas, e.consensus_max_gas) - }, - - ConfigValidationTrustingPeriodSmallerThanZero - { - chain_id: ChainId, - trusting_period: Duration, - } - |e| { - format!("semantic config validation failed for option `trusting_period` of chain '{}', reason: trusting period ({}) must be greater than zero", - e.chain_id, format_duration(e.trusting_period)) - }, - - ConfigValidationTrustingPeriodGreaterThanUnbondingPeriod - { - chain_id: ChainId, - trusting_period: Duration, - unbonding_period: Duration, - } - |e| { - format!("semantic config validation failed for option `trusting_period` of chain '{}', reason: trusting period ({}) must be smaller than the unbonding period ({})", - e.chain_id, format_duration(e.trusting_period), format_duration(e.unbonding_period)) - }, - - ConfigValidationDefaultGasTooHigh - { - chain_id: ChainId, - default_gas: u64, - max_gas: u64, - } - |e| { - format!("semantic config validation failed for option `default_gas` of chain '{}', reason: default gas ({}) must be smaller than the max gas ({})", - e.chain_id, e.default_gas, e.max_gas) - }, - - SdkModuleVersion - { - chain_id: ChainId, - address: String, - cause: String - } - |e| { - format!("Hermes health check failed while verifying the application compatibility for chain {0}:{1}; caused by: {2}", - e.chain_id, e.address, e.cause) - }, - - UnknownAccountType - { - type_url: String - } - |e| { - format!("Failed to deserialize account of an unknown protobuf type: {0}", e.type_url) - }, - - EmptyBaseAccount - |_| { "empty BaseAccount within EthAccount" }, - - EmptyQueryAccount - { address: String } - |e| { format!("Query/Account RPC returned an empty account for address: {}", e.address) }, - - NoHistoricalEntries - { chain_id: ChainId } - |e| { - format_args!( - "staking module for chain '{}' does not maintain any historical entries \ - (`historical_entries` staking params is set to 0)", - e.chain_id - ) - }, - - - TxIndexingDisabled - { chain_id: ChainId } - |e| { - format_args!( - "transaction indexing for chain '{}' is disabled (`node_info.other.tx_index` is off)", - e.chain_id - ) - }, - } -} - -impl Error { - pub fn send(_: crossbeam_channel::SendError) -> Error { - Error::channel_send() - } - - pub fn is_trusted_state_outside_trusting_period_error(&self) -> bool { - match self.detail() { - ErrorDetail::LightClientVerification(e) => matches!( - e.source, - LightClientErrorDetail::TrustedStateOutsideTrustingPeriod(_) - ), - _ => false, - } - } -} - -impl GrpcStatusSubdetail { - /// Check whether this gRPC error matches - /// - message: verification failed: ... failed packet acknowledgement verification for client: client state height < proof height ... - pub fn is_client_state_height_too_low(&self) -> bool { - // Gaia v6.0.1 (SDK 0.44.5) returns code`InvalidArgument`, whereas gaia v6.0.4 - // (SDK 0.44.6, and potentially others) returns code `Unknown`. - // Workaround by matching strictly on the status message. - // if self.status.code() != tonic::Code::InvalidArgument - // return false; - // } - - let msg = self.status.message(); - msg.contains("verification failed") && msg.contains("client state height < proof height") - } - - /// Check whether this gRPC error message starts with "account sequence mismatch". - /// - /// # Note: - /// This predicate is tested and validated against errors - /// that appear at the `estimate_gas` step. The error - /// predicate to be used at the `broadcast_tx_sync` step - /// is different & relies on parsing the Response error code. - /// - /// It is currently expected that, in the case of a match, the error message is of form: - /// "account sequence mismatch, expected E, got G: incorrect account sequence", - /// where E > G. - /// The case where E < G is considered recoverable and should have been previously handled - /// (see `is_account_sequence_mismatch_that_can_be_ignored` for which the error is ignored and - /// simulation uses default gas). - /// However, if in future cosmos-sdk releases the gRPC error message changes such that - /// it still starts with "account sequence mismatch" but the rest doesn't match the remainder of - /// the pattern (", expected E, got G: incorrect account sequence"), or - /// there are hermes code changes such that the E < G case is not previously caught anymore, - /// then this predicate will catch all "account sequence mismatch" errors - pub fn is_account_sequence_mismatch_that_requires_refresh(&self) -> bool { - self.status - .message() - .trim_start() - .starts_with("account sequence mismatch") - } - - /// Check whether this gRPC error matches: - /// "account sequence mismatch, expected E, got G", - /// where E < G. - /// It is currently expected that, in the case of a match, the error message is of form: - /// "account sequence mismatch, expected E, got G: incorrect account sequence" - /// - /// # Note: - /// This predicate is tested and validated against errors - /// that appear during the `estimate_gas` step. - /// If it evaluates to true then the error is ignored and the transaction that caused this - /// simulation error is still sent to mempool with `broadcast_tx_sync` allowing for potential - /// recovery after mempool's `recheckTxs` step. - /// More details in - pub fn is_account_sequence_mismatch_that_can_be_ignored(&self) -> bool { - match parse_sequences_in_mismatch_error_message(self.status.message()) { - None => false, - Some((expected, got)) => expected < got, - } - } -} - -/// Assumes that the cosmos-sdk account sequence mismatch error message, that may be seen -/// during simulating or broadcasting a transaction, includes the following pattern: -/// "account sequence mismatch, expected E, got G". -/// If a match is found it extracts and returns (E, G). -fn parse_sequences_in_mismatch_error_message(message: &str) -> Option<(u64, u64)> { - let re = - Regex::new(r#"account sequence mismatch, expected (?P\d+), got (?P\d+)"#) - .unwrap(); - match re.captures(message) { - None => None, - Some(captures) => match (captures["expected"].parse(), captures["got"].parse()) { - (Ok(e), Ok(g)) => Some((e, g)), - _ => None, - }, - } -} - -pub const QUERY_PROOF_EXPECT_MSG: &str = - "Internal error. Requested proof with query but no proof was returned."; - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_parse_sequences_in_mismatch_error_message() { - struct Test<'a> { - name: &'a str, - message: &'a str, - result: Option<(u64, u64)>, - } - let tests: Vec> = vec![ - Test { - name: "good mismatch error, expected < got", - message: - "account sequence mismatch, expected 100, got 200: incorrect account sequence", - result: Some((100, 200)), - }, - Test { - name: "good mismatch error, expected > got", - message: - "account sequence mismatch, expected 200, got 100: incorrect account sequence", - result: Some((200, 100)), - }, - Test { - name: "good changed mismatch error, expected < got", - message: "account sequence mismatch, expected 100, got 200: this part has changed", - result: Some((100, 200)), - }, - Test { - name: "good changed mismatch error, expected > got", - message: - "account sequence mismatch, expected 200, got 100 --> this part has changed", - result: Some((200, 100)), - }, - Test { - name: "bad mismatch error, bad expected", - message: - "account sequence mismatch, expected 2a5, got 100: incorrect account sequence", - result: None, - }, - Test { - name: "bad mismatch error, bad got", - message: - "account sequence mismatch, expected 25, got -29: incorrect account sequence", - result: None, - }, - Test { - name: "not a mismatch error", - message: "some other error message", - result: None, - }, - ]; - - for test in tests { - assert_eq!( - test.result, - parse_sequences_in_mismatch_error_message(test.message), - "{}", - test.name - ) - } - } -} diff --git a/relayer/src/event.rs b/relayer/src/event.rs deleted file mode 100644 index c5f87abc85..0000000000 --- a/relayer/src/event.rs +++ /dev/null @@ -1,3 +0,0 @@ -pub mod bus; -pub mod monitor; -pub mod rpc; diff --git a/relayer/src/event/bus.rs b/relayer/src/event/bus.rs deleted file mode 100644 index c7b750048a..0000000000 --- a/relayer/src/event/bus.rs +++ /dev/null @@ -1,120 +0,0 @@ -use alloc::collections::VecDeque; - -use crossbeam_channel as channel; - -pub struct EventBus { - txs: VecDeque>, -} - -impl Default for EventBus { - fn default() -> Self { - Self::new() - } -} - -impl EventBus { - pub fn new() -> Self { - Self { - txs: VecDeque::new(), - } - } - - pub fn subscribe(&mut self) -> channel::Receiver { - let (tx, rx) = channel::unbounded(); - self.txs.push_back(tx); - rx - } - - pub fn broadcast(&mut self, value: T) - where - T: Clone, - { - let mut disconnected = Vec::new(); - - for (idx, tx) in self.txs.iter().enumerate() { - // TODO: Avoid cloning when sending to last subscriber - if let Err(channel::SendError(_)) = tx.send(value.clone()) { - disconnected.push(idx); - } - } - - // Remove all disconnected subscribers - for idx in disconnected { - self.txs.remove(idx); - } - } -} - -#[cfg(test)] -mod tests { - use super::EventBus; - - use core::sync::atomic::{AtomicUsize, Ordering}; - use serial_test::serial; - use test_log::test; - - static COUNTER: AtomicUsize = AtomicUsize::new(0); - - fn counter() -> usize { - COUNTER.load(Ordering::SeqCst) - } - - fn reset_counter() { - COUNTER.store(0, Ordering::SeqCst); - } - - fn incr_counter() { - COUNTER.fetch_add(1, Ordering::SeqCst); - } - - #[derive(Debug, PartialEq, Eq)] - pub struct Value(u32); - - impl Clone for Value { - fn clone(&self) -> Self { - incr_counter(); - Self(self.0) - } - } - - #[test] - #[serial] - fn single_subscribers() { - reset_counter(); - - let mut bus = EventBus::new(); - let rx = bus.subscribe(); - - bus.broadcast(Value(42)); - bus.broadcast(Value(113)); - - assert_eq!(rx.recv(), Ok(Value(42))); - assert_eq!(rx.recv(), Ok(Value(113))); - assert_eq!(counter(), 2); - } - - #[test] - #[serial] - fn multi_subscribers() { - reset_counter(); - - let mut bus = EventBus::new(); - - let n = 10; - let mut rxs = vec![]; - - for _i in 0..n { - rxs.push(bus.subscribe()); - } - - bus.broadcast(Value(42)); - bus.broadcast(Value(113)); - - for rx in rxs { - assert_eq!(rx.recv(), Ok(Value(42))); - assert_eq!(rx.recv(), Ok(Value(113))); - } - - assert_eq!(counter(), 20); - } -} diff --git a/relayer/src/event/monitor.rs b/relayer/src/event/monitor.rs deleted file mode 100644 index 70f12cfcf1..0000000000 --- a/relayer/src/event/monitor.rs +++ /dev/null @@ -1,487 +0,0 @@ -use alloc::sync::Arc; -use core::cmp::Ordering; - -use crossbeam_channel as channel; -use futures::{ - pin_mut, - stream::{self, select_all, StreamExt}, - Stream, TryStreamExt, -}; -use tokio::task::JoinHandle; -use tokio::{runtime::Runtime as TokioRuntime, sync::mpsc}; -use tracing::{debug, error, info, trace}; - -use tendermint_rpc::{ - event::Event as RpcEvent, query::Query, Error as RpcError, SubscriptionClient, Url, - WebSocketClient, WebSocketClientDriver, -}; - -use ibc::{ - core::ics02_client::height::Height, core::ics24_host::identifier::ChainId, events::IbcEvent, -}; - -use crate::{ - chain::tracking::TrackingId, - telemetry, - util::{ - retry::{retry_count, retry_with_index, RetryResult}, - stream::try_group_while, - }, -}; - -mod error; -pub use error::*; - -pub type Result = core::result::Result; - -mod retry_strategy { - use crate::util::retry::clamp_total; - use core::time::Duration; - use retry::delay::Fibonacci; - - // Default parameters for the retrying mechanism - const MAX_DELAY: Duration = Duration::from_secs(60); // 1 minute - const MAX_TOTAL_DELAY: Duration = Duration::from_secs(10 * 60); // 10 minutes - const INITIAL_DELAY: Duration = Duration::from_secs(1); // 1 second - - pub fn default() -> impl Iterator { - clamp_total(Fibonacci::from(INITIAL_DELAY), MAX_DELAY, MAX_TOTAL_DELAY) - } -} - -/// A batch of events from a chain at a specific height -#[derive(Clone, Debug)] -pub struct EventBatch { - pub chain_id: ChainId, - pub tracking_id: TrackingId, - pub height: Height, - pub events: Vec, -} - -type SubscriptionResult = core::result::Result; -type SubscriptionStream = dyn Stream + Send + Sync + Unpin; - -pub type EventSender = channel::Sender>; -pub type EventReceiver = channel::Receiver>; -pub type TxMonitorCmd = channel::Sender; - -#[derive(Debug)] -pub enum MonitorCmd { - Shutdown, -} - -/// Connect to a Tendermint node, subscribe to a set of queries, -/// receive push events over a websocket, and filter them for the -/// event handler. -/// -/// The default events that are queried are: -/// - [`EventType::NewBlock`](tendermint_rpc::query::EventType::NewBlock) -/// - [`EventType::Tx`](tendermint_rpc::query::EventType::Tx) -pub struct EventMonitor { - chain_id: ChainId, - /// WebSocket to collect events from - client: WebSocketClient, - /// Async task handle for the WebSocket client's driver - driver_handle: JoinHandle<()>, - /// Channel to handler where the monitor for this chain sends the events - tx_batch: channel::Sender>, - /// Channel where to receive client driver errors - rx_err: mpsc::UnboundedReceiver, - /// Channel where to send client driver errors - tx_err: mpsc::UnboundedSender, - /// Channel where to receive commands - rx_cmd: channel::Receiver, - /// Node Address - node_addr: Url, - /// Queries - event_queries: Vec, - /// All subscriptions combined in a single stream - subscriptions: Box, - /// Tokio runtime - rt: Arc, -} - -// TODO: These are SDK specific, should be eventually moved. -pub mod queries { - use tendermint_rpc::query::{EventType, Query}; - - pub fn all() -> Vec { - // Note: Tendermint-go supports max 5 query specifiers! - vec![ - new_block(), - ibc_client(), - ibc_connection(), - ibc_channel(), - // This will be needed when we send misbehavior evidence to full node - // Query::eq("message.module", "evidence"), - ] - } - - pub fn new_block() -> Query { - Query::from(EventType::NewBlock) - } - - pub fn ibc_client() -> Query { - Query::eq("message.module", "ibc_client") - } - - pub fn ibc_connection() -> Query { - Query::eq("message.module", "ibc_connection") - } - - pub fn ibc_channel() -> Query { - Query::eq("message.module", "ibc_channel") - } -} - -impl EventMonitor { - /// Create an event monitor, and connect to a node - pub fn new( - chain_id: ChainId, - node_addr: Url, - rt: Arc, - ) -> Result<(Self, EventReceiver, TxMonitorCmd)> { - let (tx_batch, rx_batch) = channel::unbounded(); - let (tx_cmd, rx_cmd) = channel::unbounded(); - - let ws_addr = node_addr.clone(); - let (client, driver) = rt - .block_on(async move { WebSocketClient::new(ws_addr).await }) - .map_err(|_| Error::client_creation_failed(chain_id.clone(), node_addr.clone()))?; - - let (tx_err, rx_err) = mpsc::unbounded_channel(); - let websocket_driver_handle = rt.spawn(run_driver(driver, tx_err.clone())); - - // TODO: move them to config file(?) - let event_queries = queries::all(); - - let monitor = Self { - rt, - chain_id, - client, - driver_handle: websocket_driver_handle, - event_queries, - tx_batch, - rx_err, - tx_err, - rx_cmd, - node_addr, - subscriptions: Box::new(futures::stream::empty()), - }; - - Ok((monitor, rx_batch, tx_cmd)) - } - - /// The list of [`Query`] that this event monitor is subscribing for. - pub fn queries(&self) -> &[Query] { - &self.event_queries - } - - /// Clear the current subscriptions, and subscribe again to all queries. - pub fn subscribe(&mut self) -> Result<()> { - let mut subscriptions = vec![]; - - for query in &self.event_queries { - trace!("[{}] subscribing to query: {}", self.chain_id, query); - - let subscription = self - .rt - .block_on(self.client.subscribe(query.clone())) - .map_err(Error::client_subscription_failed)?; - - subscriptions.push(subscription); - } - - self.subscriptions = Box::new(select_all(subscriptions)); - - trace!("[{}] subscribed to all queries", self.chain_id); - - Ok(()) - } - - fn try_reconnect(&mut self) -> Result<()> { - trace!( - "[{}] trying to reconnect to WebSocket endpoint {}", - self.chain_id, - self.node_addr - ); - - // Try to reconnect - let (mut client, driver) = self - .rt - .block_on(WebSocketClient::new(self.node_addr.clone())) - .map_err(|_| { - Error::client_creation_failed(self.chain_id.clone(), self.node_addr.clone()) - })?; - - let mut driver_handle = self.rt.spawn(run_driver(driver, self.tx_err.clone())); - - // Swap the new client with the previous one which failed, - // so that we can shut the latter down gracefully. - core::mem::swap(&mut self.client, &mut client); - core::mem::swap(&mut self.driver_handle, &mut driver_handle); - - trace!( - "[{}] reconnected to WebSocket endpoint {}", - self.chain_id, - self.node_addr - ); - - // Shut down previous client - trace!( - "[{}] gracefully shutting down previous client", - self.chain_id - ); - - let _ = client.close(); - - self.rt - .block_on(driver_handle) - .map_err(Error::client_termination_failed)?; - - trace!("[{}] previous client successfully shutdown", self.chain_id); - - Ok(()) - } - - /// Try to resubscribe to events - fn try_resubscribe(&mut self) -> Result<()> { - trace!("[{}] trying to resubscribe to events", self.chain_id); - self.subscribe() - } - - /// Attempt to reconnect the WebSocket client using the given retry strategy. - /// - /// See the [`retry`](https://docs.rs/retry) crate and the - /// [`crate::util::retry`] module for more information. - fn reconnect(&mut self) { - let result = retry_with_index(retry_strategy::default(), |_| { - // Try to reconnect - if let Err(e) = self.try_reconnect() { - trace!("[{}] error when reconnecting: {}", self.chain_id, e); - return RetryResult::Retry(()); - } - - // Try to resubscribe - if let Err(e) = self.try_resubscribe() { - trace!("[{}] error when resubscribing: {}", self.chain_id, e); - return RetryResult::Retry(()); - } - - RetryResult::Ok(()) - }); - - match result { - Ok(()) => info!( - "[{}] successfully reconnected to WebSocket endpoint {}", - self.chain_id, self.node_addr - ), - Err(retries) => error!( - "[{}] failed to reconnect to {} after {} retries", - self.chain_id, - self.node_addr, - retry_count(&retries) - ), - } - } - - /// Event monitor loop - #[allow(clippy::while_let_loop)] - pub fn run(mut self) { - debug!(chain = %self.chain_id, "starting event monitor"); - - // Continuously run the event loop, so that when it aborts - // because of WebSocket client restart, we pick up the work again. - loop { - match self.run_loop() { - Next::Continue => continue, - Next::Abort => break, - } - } - - debug!("[{}] event monitor is shutting down", self.chain_id); - - // Close the WebSocket connection - let _ = self.client.close(); - - // Wait for the WebSocket driver to finish - let _ = self.rt.block_on(self.driver_handle); - - trace!( - "[{}] event monitor has successfully shut down", - self.chain_id - ); - } - - fn run_loop(&mut self) -> Next { - // Take ownership of the subscriptions - let subscriptions = - core::mem::replace(&mut self.subscriptions, Box::new(futures::stream::empty())); - - // Convert the stream of RPC events into a stream of event batches. - let batches = stream_batches(subscriptions, self.chain_id.clone()); - - // Needed to be able to poll the stream - pin_mut!(batches); - - // Work around double borrow - let rt = self.rt.clone(); - - loop { - if let Ok(MonitorCmd::Shutdown) = self.rx_cmd.try_recv() { - return Next::Abort; - } - - let result = rt.block_on(async { - tokio::select! { - Some(batch) = batches.next() => batch, - Some(e) = self.rx_err.recv() => Err(Error::web_socket_driver(e)), - } - }); - - // Repeat check of shutdown command here, as previous recv() - // may block for a long time. - if let Ok(MonitorCmd::Shutdown) = self.rx_cmd.try_recv() { - return Next::Abort; - } - - match result { - Ok(batch) => self.process_batch(batch).unwrap_or_else(|e| { - error!("[{}] {}", self.chain_id, e); - }), - Err(e) => { - if let ErrorDetail::SubscriptionCancelled(reason) = e.detail() { - error!( - "[{}] subscription cancelled, reason: {}", - self.chain_id, reason - ); - - self.propagate_error(e).unwrap_or_else(|e| { - error!("[{}] {}", self.chain_id, e); - }); - - telemetry!(ws_reconnect, &self.chain_id); - - // Reconnect to the WebSocket endpoint, and subscribe again to the queries. - self.reconnect(); - - // Abort this event loop, the `run` method will start a new one. - // We can't just write `return self.run()` here because Rust - // does not perform tail call optimization, and we would - // thus potentially blow up the stack after many restarts. - return Next::Continue; - } else { - error!("[{}] failed to collect events: {}", self.chain_id, e); - - telemetry!(ws_reconnect, &self.chain_id); - - // Reconnect to the WebSocket endpoint, and subscribe again to the queries. - self.reconnect(); - - // Abort this event loop, the `run` method will start a new one. - // We can't just write `return self.run()` here because Rust - // does not perform tail call optimization, and we would - // thus potentially blow up the stack after many restarts. - return Next::Continue; - }; - } - } - } - } - - /// Propagate error to subscribers. - /// - /// The main use case for propagating RPC errors is for the [`Supervisor`] - /// to notice that the WebSocket connection or subscription has been closed, - /// and to trigger a clearing of packets, as this typically means that we have - /// missed a bunch of events which were emitted after the subscription was closed. - /// In that case, this error will be handled in [`Supervisor::handle_batch`]. - fn propagate_error(&self, error: Error) -> Result<()> { - self.tx_batch - .send(Err(error)) - .map_err(|_| Error::channel_send_failed())?; - - Ok(()) - } - - /// Collect the IBC events from the subscriptions - fn process_batch(&self, batch: EventBatch) -> Result<()> { - telemetry!(ws_events, &batch.chain_id, batch.events.len() as u64); - - self.tx_batch - .send(Ok(batch)) - .map_err(|_| Error::channel_send_failed())?; - - Ok(()) - } -} - -/// Collect the IBC events from an RPC event -fn collect_events( - chain_id: &ChainId, - event: RpcEvent, -) -> impl Stream> { - let events = crate::event::rpc::get_all_events(chain_id, event).unwrap_or_default(); - stream::iter(events).map(Ok) -} - -/// Convert a stream of RPC event into a stream of event batches -fn stream_batches( - subscriptions: Box, - chain_id: ChainId, -) -> impl Stream> { - let id = chain_id.clone(); - - // Collect IBC events from each RPC event - let events = subscriptions - .map_ok(move |rpc_event| collect_events(&id, rpc_event)) - .map_err(Error::canceled_or_generic) - .try_flatten(); - - // Group events by height - let grouped = try_group_while(events, |(h0, _), (h1, _)| h0 == h1); - - // Convert each group to a batch - grouped.map_ok(move |events| { - let height = events - .first() - .map(|(h, _)| h) - .copied() - .expect("internal error: found empty group"); // SAFETY: upheld by `group_while` - - let mut events = events.into_iter().map(|(_, e)| e).collect::>(); - sort_events(&mut events); - - EventBatch { - height, - events, - chain_id: chain_id.clone(), - tracking_id: TrackingId::new_uuid(), - } - }) -} - -/// Sort the given events by putting the NewBlock event first, -/// and leaving the other events as is. -fn sort_events(events: &mut [IbcEvent]) { - events.sort_by(|a, b| match (a, b) { - (IbcEvent::NewBlock(_), _) => Ordering::Less, - _ => Ordering::Equal, - }) -} - -async fn run_driver( - driver: WebSocketClientDriver, - tx: mpsc::UnboundedSender, -) { - if let Err(e) = driver.run().await { - if tx.send(e).is_err() { - error!("failed to relay driver error to event monitor"); - } - } -} - -pub enum Next { - Abort, - Continue, -} diff --git a/relayer/src/event/monitor/error.rs b/relayer/src/event/monitor/error.rs deleted file mode 100644 index 4a5e262b17..0000000000 --- a/relayer/src/event/monitor/error.rs +++ /dev/null @@ -1,62 +0,0 @@ -use flex_error::{define_error, TraceError}; - -use tendermint_rpc::{Error as RpcError, Url}; - -use ibc::core::ics24_host::identifier::ChainId; - -define_error! { - #[derive(Debug, Clone)] - Error { - WebSocketDriver - [ TraceError ] - |_| { "WebSocket driver failed" }, - - ClientCreationFailed - { chain_id: ChainId, address: Url } - |e| { format!("failed to create WebSocket driver for chain {0} with address {1}", e.chain_id, e.address) }, - - ClientTerminationFailed - [ TraceError ] - |_| { "failed to terminate previous WebSocket driver" }, - - ClientCompletionFailed - [ TraceError ] - |_| { "failed to run previous WebSocket driver to completion" }, - - ClientSubscriptionFailed - [ TraceError ] - |_| { "failed to run previous WebSocket driver to completion" }, - - NextEventBatchFailed - [ TraceError ] - |_| { "failed to collect events over WebSocket subscription" }, - - CollectEventsFailed - { reason: String } - |e| { format!("failed to extract IBC events: {0}", e.reason) }, - - ChannelSendFailed - |_| { "internal message-passing failure: monitor could not reach chain handler" }, - - SubscriptionCancelled - [ TraceError ] - |_| { "subscription cancelled" }, - - Rpc - [ TraceError ] - |_| { "RPC error" }, - } -} - -impl Error { - pub fn canceled_or_generic(e: RpcError) -> Self { - use tendermint_rpc::error::ErrorDetail; - - match e.detail() { - ErrorDetail::Server(detail) if detail.reason.contains("subscription was cancelled") => { - Self::subscription_cancelled(e) - } - _ => Self::rpc(e), - } - } -} diff --git a/relayer/src/event/rpc.rs b/relayer/src/event/rpc.rs deleted file mode 100644 index 561e078a44..0000000000 --- a/relayer/src/event/rpc.rs +++ /dev/null @@ -1,255 +0,0 @@ -use alloc::collections::BTreeMap as HashMap; -use core::convert::TryFrom; - -use tendermint_rpc::{event::Event as RpcEvent, event::EventData as RpcEventData}; - -use ibc::core::ics02_client::{events as ClientEvents, height::Height}; -use ibc::core::ics03_connection::events as ConnectionEvents; -use ibc::core::ics04_channel::events as ChannelEvents; -use ibc::core::ics24_host::identifier::ChainId; -use ibc::events::{IbcEvent, RawObject}; - -use crate::event::monitor::queries; - -/// Extract IBC events from Tendermint RPC events -/// -/// Events originate from the following ABCI methods -> -/// 1. `DeliverTx` - these events are generated during the execution of transaction messages. -/// 2. `BeginBlock` -/// 3. `EndBlock` -/// -/// Events originating from `DeliverTx` are currently extracted via the `RpcEvent::data` field as -/// the `EventData::Tx` variant. -/// Here's an example of what these events look like (i.e. `TxInfo::TxResult::events`) - -/// ```ron -/// [ -/// Event { -/// type_str: "channel_open_init", -/// attributes: [ -/// Tag { -/// key: Key( -/// "port_id", -/// ), -/// value: Value( -/// "transfer", -/// ), -/// }, -/// Tag { -/// key: Key( -/// "channel_id", -/// ), -/// value: Value( -/// "channel-1", -/// ), -/// }, -/// Tag { -/// key: Key( -/// "counterparty_port_id", -/// ), -/// value: Value( -/// "transfer", -/// ), -/// }, -/// Tag { -/// key: Key( -/// "counterparty_channel_id", -/// ), -/// value: Value( -/// "", -/// ), -/// }, -/// Tag { -/// key: Key( -/// "connection_id", -/// ), -/// value: Value( -/// "connection-1", -/// ), -/// }, -/// ], -/// }, -/// // ... -/// ] -/// ``` -/// -/// Events originating from `BeginBlock` and `EndBlock` methods are extracted via the -/// `RpcEvent::events` field. Here's an example of what these events look like -> -/// ```json -/// { -/// "channel_open_init.channel_id": [ -/// "channel-0", -/// ], -/// "channel_open_init.connection_id": [ -/// "connection-0", -/// ], -/// "channel_open_init.counterparty_channel_id": [ -/// "channel-0", -/// ], -/// "channel_open_init.counterparty_port_id": [ -/// "transfer", -/// ], -/// "channel_open_init.port_id": [ -/// "transfer", -/// ], -/// // ... -/// } -/// ``` -/// -/// Note: Historically, all events were extracted from the `RpcEvent::events` field. This was -/// possible because these events had a `message.action` field that allowed us to infer the order in -/// which these events were triggered -> -/// ```json -/// "message.action": [ -/// "update_client", -/// "channel_open_ack", -/// ], -/// "message.module": [ -/// "ibc_client", -/// "ibc_channel", -/// ], -/// ``` -/// {Begin,End}Block events however do not have any such `message.action` associated with them, so -/// this doesn't work. For this reason, we extract block events in the following order -> -/// OpenInit -> OpenTry -> OpenAck -> OpenConfirm -> SendPacket -> CloseInit -> CloseConfirm. -pub fn get_all_events( - chain_id: &ChainId, - result: RpcEvent, -) -> Result, String> { - let mut vals: Vec<(Height, IbcEvent)> = vec![]; - let RpcEvent { - data, - events, - query, - } = result; - let events = events.ok_or("missing events")?; - - match data { - RpcEventData::NewBlock { block, .. } if query == queries::new_block().to_string() => { - let height = Height::new( - ChainId::chain_version(chain_id.to_string().as_str()), - u64::from(block.as_ref().ok_or("tx.height")?.header.height), - ); - - vals.push((height, ClientEvents::NewBlock::new(height).into())); - vals.append(&mut extract_block_events(height, &events)); - } - RpcEventData::Tx { tx_result } => { - let height = Height::new( - ChainId::chain_version(chain_id.to_string().as_str()), - tx_result.height as u64, - ); - - for abci_event in &tx_result.result.events { - if query == queries::ibc_client().to_string() { - if let Some(mut client_event) = ClientEvents::try_from_tx(abci_event) { - client_event.set_height(height); - tracing::trace!("extracted ibc_client event {}", client_event); - vals.push((height, client_event)); - } - } - if query == queries::ibc_connection().to_string() { - if let Some(mut conn_event) = ConnectionEvents::try_from_tx(abci_event) { - conn_event.set_height(height); - tracing::trace!("extracted ibc_connection event {}", conn_event); - vals.push((height, conn_event)); - } - } - if query == queries::ibc_channel().to_string() { - if let Some(mut chan_event) = ChannelEvents::try_from_tx(abci_event) { - chan_event.set_height(height); - let _span = tracing::trace_span!("ibc_channel event").entered(); - tracing::trace!("extracted {}", chan_event); - if matches!(chan_event, IbcEvent::SendPacket(_)) { - // Should be the same as the hash of tx_result.tx? - if let Some(hash) = - events.get("tx.hash").and_then(|values| values.get(0)) - { - tracing::trace!(event = "SendPacket", "tx hash: {}", hash); - } - } - vals.push((height, chan_event)); - } - } - } - } - _ => {} - } - - Ok(vals) -} - -fn extract_block_events( - height: Height, - block_events: &HashMap>, -) -> Vec<(Height, IbcEvent)> { - #[inline] - fn extract_events<'a, T: TryFrom>>( - height: Height, - block_events: &'a HashMap>, - event_type: &str, - event_field: &str, - ) -> Vec { - block_events - .get(&format!("{}.{}", event_type, event_field)) - .unwrap_or(&vec![]) - .iter() - .enumerate() - .filter_map(|(i, _)| { - let raw_obj = RawObject::new(height, event_type.to_owned(), i, block_events); - T::try_from(raw_obj).ok() - }) - .collect() - } - - #[inline] - fn append_events>( - events: &mut Vec<(Height, IbcEvent)>, - chan_events: Vec, - height: Height, - ) { - events.append( - &mut chan_events - .into_iter() - .map(|ev| (height, ev.into())) - .collect(), - ); - } - - let mut events: Vec<(Height, IbcEvent)> = vec![]; - append_events::( - &mut events, - extract_events(height, block_events, "channel_open_init", "channel_id"), - height, - ); - append_events::( - &mut events, - extract_events(height, block_events, "channel_open_try", "channel_id"), - height, - ); - append_events::( - &mut events, - extract_events(height, block_events, "channel_open_ack", "channel_id"), - height, - ); - append_events::( - &mut events, - extract_events(height, block_events, "channel_open_confirm", "channel_id"), - height, - ); - append_events::( - &mut events, - extract_events(height, block_events, "send_packet", "packet_data"), - height, - ); - append_events::( - &mut events, - extract_events(height, block_events, "channel_close_init", "channel_id"), - height, - ); - append_events::( - &mut events, - extract_events(height, block_events, "channel_close_confirm", "channel_id"), - height, - ); - events -} diff --git a/relayer/src/foreign_client.rs b/relayer/src/foreign_client.rs deleted file mode 100644 index ecb39ad700..0000000000 --- a/relayer/src/foreign_client.rs +++ /dev/null @@ -1,1865 +0,0 @@ -//! Queries and methods for interfacing with foreign clients. -//! -//! The term "foreign client" refers to IBC light clients that are running on-chain, -//! i.e. they are *foreign* to the relayer. In contrast, the term "local client" -//! refers to light clients running *locally* as part of the relayer. - -use core::{fmt, time::Duration}; -use std::thread; -use std::time::Instant; - -use ibc_proto::google::protobuf::Any; -use itertools::Itertools; -use tracing::{debug, error, info, span, trace, warn, Level}; - -use flex_error::define_error; -use ibc::core::ics02_client::client_consensus::{ - AnyConsensusState, AnyConsensusStateWithHeight, ConsensusState, QueryClientEventRequest, -}; -use ibc::core::ics02_client::client_state::AnyClientState; -use ibc::core::ics02_client::client_state::ClientState; -use ibc::core::ics02_client::error::Error as ClientError; -use ibc::core::ics02_client::events::UpdateClient; -use ibc::core::ics02_client::header::{AnyHeader, Header}; -use ibc::core::ics02_client::misbehaviour::MisbehaviourEvidence; -use ibc::core::ics02_client::msgs::create_client::MsgCreateAnyClient; -use ibc::core::ics02_client::msgs::misbehavior::MsgSubmitAnyMisbehaviour; -use ibc::core::ics02_client::msgs::update_client::MsgUpdateAnyClient; -use ibc::core::ics02_client::msgs::upgrade_client::MsgUpgradeAnyClient; -use ibc::core::ics02_client::trust_threshold::TrustThreshold; -use ibc::core::ics24_host::identifier::{ChainId, ClientId}; -use ibc::downcast; -use ibc::events::{IbcEvent, WithBlockDataType}; -use ibc::query::QueryTxRequest; -use ibc::timestamp::{Timestamp, TimestampOverflowError}; -use ibc::tx_msg::Msg; -use ibc::Height; - -use crate::chain::client::ClientSettings; -use crate::chain::handle::ChainHandle; -use crate::chain::requests::{ - IncludeProof, PageRequest, QueryClientStateRequest, QueryConsensusStateRequest, - QueryConsensusStatesRequest, QueryUpgradedClientStateRequest, - QueryUpgradedConsensusStateRequest, -}; -use crate::chain::tracking::TrackedMsgs; -use crate::error::Error as RelayerError; - -const MAX_MISBEHAVIOUR_CHECK_DURATION: Duration = Duration::from_secs(120); - -const MAX_RETRIES: usize = 5; - -define_error! { - ForeignClientError { - ClientCreate - { - chain_id: ChainId, - description: String - } - [ RelayerError ] - |e| { - format_args!("error raised while creating client for chain {0}: {1}", - e.chain_id, e.description) - }, - - Client - [ ClientError ] - |_| { "ICS02 client error" }, - - HeaderInTheFuture - { - src_chain_id: ChainId, - src_header_height: Height, - src_header_time: Timestamp, - dst_chain_id: ChainId, - dst_latest_header_height: Height, - dst_latest_header_time: Timestamp, - max_drift: Duration - } - |e| { - format_args!("update header from {} with height {} and time {} is in the future compared with latest header on {} with height {} and time {}, adjusted with drift {:?}", - e.src_chain_id, e.src_header_height, e.src_header_time, e.dst_chain_id, e.dst_latest_header_height, e.dst_latest_header_time, e.max_drift) - }, - - ClientUpdate - { - chain_id: ChainId, - description: String - } - [ RelayerError ] - |e| { - format_args!("error raised while updating client on chain {0}: {1}", e.chain_id, e.description) - }, - - ClientUpdateTiming - { - chain_id: ChainId, - clock_drift: Duration, - description: String - } - [ TimestampOverflowError ] - |e| { - format_args!("error raised while updating client on chain {0}: {1}", e.chain_id, e.description) - }, - - ClientAlreadyUpToDate - { - client_id: ClientId, - chain_id: ChainId, - height: Height, - } - |e| { - format_args!("Client {} is already up-to-date with chain {}@{}", - e.client_id, e.chain_id, e.height) - }, - - MissingSmallerTrustedHeight - { - chain_id: ChainId, - target_height: Height, - } - |e| { - format_args!("chain {} is missing trusted state smaller than target height {}", - e.chain_id, e.target_height) - }, - - MissingTrustedHeight - { - chain_id: ChainId, - target_height: Height, - } - |e| { - format_args!("chain {} is missing trusted state at target height {}", - e.chain_id, e.target_height) - }, - - ClientRefresh - { - client_id: ClientId, - reason: String - } - [ RelayerError ] - |e| { - format_args!("error raised while trying to refresh client {0}: {1}", - e.client_id, e.reason) - }, - - ClientQuery - { - client_id: ClientId, - chain_id: ChainId, - } - [ RelayerError ] - |e| { - format_args!("failed while querying for client {0} on chain id {1}", - e.client_id, e.chain_id) - }, - - ClientConsensusQuery - { - client_id: ClientId, - chain_id: ChainId, - height: Height - } - [ RelayerError ] - |e| { - format_args!("failed while querying for client consensus state {0} on chain id {1} for height {2}", - e.client_id, e.chain_id, e.height) - }, - - ClientUpgrade - { - client_id: ClientId, - chain_id: ChainId, - description: String, - } - [ RelayerError ] - |e| { - format_args!("failed while trying to upgrade client id {0} for chain {1}: {2}: {3}", - e.client_id, e.chain_id, e.description, e.source) - }, - - ClientEventQuery - { - client_id: ClientId, - chain_id: ChainId, - consensus_height: Height - } - [ RelayerError ] - |e| { - format_args!("failed while querying Tx for client {0} on chain id {1} at consensus height {2}", - e.client_id, e.chain_id, e.consensus_height) - }, - - UnexpectedEvent - { - client_id: ClientId, - chain_id: ChainId, - event: String, - } - |e| { - format_args!("failed while querying Tx for client {0} on chain id {1}: query Tx-es returned unexpected event: {2}", - e.client_id, e.chain_id, e.event) - }, - - MismatchChainId - { - client_id: ClientId, - expected_chain_id: ChainId, - actual_chain_id: ChainId, - } - |e| { - format_args!("failed while finding client {0}: expected chain_id in client state: {1}; actual chain_id: {2}", - e.client_id, e.expected_chain_id, e.actual_chain_id) - }, - - ExpiredOrFrozen - { - client_id: ClientId, - chain_id: ChainId, - description: String, - } - |e| { - format_args!("client {0} on chain id {1} is expired or frozen: {2}", - e.client_id, e.chain_id, e.description) - }, - - ConsensusStateNotTrusted - { - height: Height, - elapsed: Duration, - } - |e| { - format_args!("the consensus state at height {} is outside of trusting period: elapsed {:?}", - e.height, e.elapsed) - }, - - Misbehaviour - { - description: String, - } - [ RelayerError ] - |e| { - format_args!("error raised while checking for misbehaviour evidence: {0}", e.description) - }, - - MisbehaviourExit - { reason: String } - |e| { - format_args!("cannot run misbehaviour: {0}", e.reason) - }, - - SameChainId - { - chain_id: ChainId - } - |e| { - format_args!("the chain ID ({}) at the source and destination chains must be different", e.chain_id) - }, - - MissingClientIdFromEvent - { event: IbcEvent } - |e| { - format_args!("cannot extract client_id from result: {:?}", - e.event) - }, - - ChainErrorEvent - { - chain_id: ChainId, - event: IbcEvent - } - |e| { - format_args!("failed to update client on destination {} because of error event: {}", - e.chain_id, e.event) - }, - } -} - -pub trait HasExpiredOrFrozenError { - fn is_expired_or_frozen_error(&self) -> bool; -} - -impl HasExpiredOrFrozenError for ForeignClientErrorDetail { - fn is_expired_or_frozen_error(&self) -> bool { - matches!(self, Self::ExpiredOrFrozen(_)) - } -} - -impl HasExpiredOrFrozenError for ForeignClientError { - fn is_expired_or_frozen_error(&self) -> bool { - self.detail().is_expired_or_frozen_error() - } -} - -/// User-supplied options for the [`ForeignClient::build_create_client`] operation. -/// -/// Currently, the parameters are specific to the Tendermint-based chains. -/// A future revision will bring differentiated options for other chain types. -#[derive(Debug, Default)] -pub struct CreateOptions { - pub max_clock_drift: Option, - pub trusting_period: Option, - pub trust_threshold: Option, -} - -/// Captures the diagnostic of verifying whether a certain -/// consensus state is within the trusting period (i.e., trusted) -/// or it's not within the trusting period (not trusted). -pub enum ConsensusStateTrusted { - NotTrusted { - elapsed: Duration, - network_timestamp: Timestamp, - consensus_state_timestmap: Timestamp, - }, - Trusted { - elapsed: Duration, - }, -} - -#[derive(Clone, Debug)] -pub struct ForeignClient { - /// The identifier of this client. The host chain determines this id upon client creation, - /// so we may be using the default value temporarily. - pub id: ClientId, - - /// A handle to the chain hosting this client, i.e., destination chain. - pub dst_chain: DstChain, - - /// A handle to the chain whose headers this client is verifying, aka the source chain. - pub src_chain: SrcChain, -} - -/// Used in Output messages. -/// Provides a concise description of a [`ForeignClient`], -/// using the format: -/// {CHAIN-ID} -> {CHAIN-ID}:{CLIENT} -/// where the first chain identifier is for the source -/// chain, and the second chain identifier is the -/// destination (which hosts the client) chain. -impl fmt::Display - for ForeignClient -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!( - f, - "{} -> {}:{}", - self.src_chain.id(), - self.dst_chain.id(), - self.id - ) - } -} - -impl ForeignClient { - /// Creates a new foreign client on `dst_chain`. Blocks until the client is created, or - /// an error occurs. - /// Post-condition: `dst_chain` hosts an IBC client for `src_chain`. - pub fn new( - dst_chain: DstChain, - src_chain: SrcChain, - ) -> Result, ForeignClientError> { - // Sanity check - if src_chain.id().eq(&dst_chain.id()) { - return Err(ForeignClientError::same_chain_id(src_chain.id())); - } - - let mut client = ForeignClient { - id: ClientId::default(), - dst_chain, - src_chain, - }; - - client.create()?; - - Ok(client) - } - - pub fn restore( - id: ClientId, - dst_chain: DstChain, - src_chain: SrcChain, - ) -> ForeignClient { - ForeignClient { - id, - dst_chain, - src_chain, - } - } - - /// Queries `host_chain` to verify that a client with identifier `client_id` exists. - /// If the client does not exist, returns an error. If the client exists, cross-checks that the - /// identifier for the target chain of this client (i.e., the chain whose headers this client is - /// verifying) is consistent with `expected_target_chain`, and if so, return a new - /// `ForeignClient` representing this client. - pub fn find( - expected_target_chain: SrcChain, - host_chain: DstChain, - client_id: &ClientId, - ) -> Result, ForeignClientError> { - let height = Height::new(expected_target_chain.id().version(), 0); - - match host_chain.query_client_state( - QueryClientStateRequest { - client_id: client_id.clone(), - height, - }, - IncludeProof::No, - ) { - Ok((cs, _)) => { - if cs.chain_id() != expected_target_chain.id() { - Err(ForeignClientError::mismatch_chain_id( - client_id.clone(), - expected_target_chain.id(), - cs.chain_id(), - )) - } else { - // TODO: Any additional checks? - Ok(ForeignClient::restore( - client_id.clone(), - host_chain, - expected_target_chain, - )) - } - } - Err(e) => Err(ForeignClientError::client_query( - client_id.clone(), - host_chain.id(), - e, - )), - } - } - - pub fn upgrade(&self) -> Result, ForeignClientError> { - // Fetch the latest height of the source chain. - let src_height = self.src_chain.query_latest_height().map_err(|e| { - ForeignClientError::client_upgrade( - self.id.clone(), - self.src_chain.id(), - "failed while querying src chain for latest height".to_string(), - e, - ) - })?; - - info!("[{}] upgrade Height: {}", self, src_height); - - let mut msgs = self.build_update_client(src_height)?; - - // Query the host chain for the upgraded client state, consensus state & their proofs. - let (client_state, proof_upgrade_client) = self - .src_chain - .query_upgraded_client_state(QueryUpgradedClientStateRequest { height: src_height }) - .map_err(|e| { - ForeignClientError::client_upgrade( - self.id.clone(), - self.src_chain.id(), - "failed while fetching from chain the upgraded client state".to_string(), - e, - ) - })?; - - debug!("[{}] upgraded client state {:?}", self, client_state); - - let (consensus_state, proof_upgrade_consensus_state) = self - .src_chain - .query_upgraded_consensus_state(QueryUpgradedConsensusStateRequest { - height: src_height, - }) - .map_err(|e| { - ForeignClientError::client_upgrade( - self.id.clone(), - self.src_chain.id(), - "failed while fetching from chain the upgraded client consensus state" - .to_string(), - e, - ) - })?; - - debug!( - "[{}] upgraded client consensus state {:?}", - self, consensus_state - ); - - // Get signer - let signer = self.dst_chain.get_signer().map_err(|e| { - ForeignClientError::client_upgrade( - self.id.clone(), - self.dst_chain.id(), - "failed while fetching the destination chain signer".to_string(), - e, - ) - })?; - - let msg_upgrade = MsgUpgradeAnyClient { - client_id: self.id.clone(), - client_state, - consensus_state, - proof_upgrade_client: proof_upgrade_client.into(), - proof_upgrade_consensus_state: proof_upgrade_consensus_state.into(), - signer, - } - .to_any(); - - msgs.push(msg_upgrade); - - let tm = TrackedMsgs::new_static(msgs, "upgrade client"); - - let res = self - .dst_chain - .send_messages_and_wait_commit(tm) - .map_err(|e| { - ForeignClientError::client_upgrade( - self.id.clone(), - self.dst_chain.id(), - "failed while sending message to destination chain".to_string(), - e, - ) - })?; - - Ok(res) - } - - /// Returns a handle to the chain hosting this client. - pub fn dst_chain(&self) -> DstChain { - self.dst_chain.clone() - } - - /// Returns a handle to the chain whose headers this client is sourcing (the source chain). - pub fn src_chain(&self) -> SrcChain { - self.src_chain.clone() - } - - pub fn id(&self) -> &ClientId { - &self.id - } - - /// Lower-level interface for preparing a message to create a client. - pub fn build_create_client( - &self, - options: CreateOptions, - ) -> Result { - // Get signer - let signer = self.dst_chain.get_signer().map_err(|e| { - ForeignClientError::client_create( - self.src_chain.id(), - format!( - "failed while fetching the dst chain ({}) signer", - self.dst_chain.id() - ), - e, - ) - })?; - - // Build client create message with the data from source chain at latest height. - let latest_height = self.src_chain.query_latest_height().map_err(|e| { - ForeignClientError::client_create( - self.src_chain.id(), - "failed while querying src chain for latest height".to_string(), - e, - ) - })?; - - // Calculate client state settings from the chain configurations and - // optional user overrides. - let src_config = self.src_chain.config().map_err(|e| { - ForeignClientError::client_create( - self.src_chain.id(), - "failed while querying the source chain for configuration".to_string(), - e, - ) - })?; - let dst_config = self.dst_chain.config().map_err(|e| { - ForeignClientError::client_create( - self.dst_chain.id(), - "failed while querying the destination chain for configuration".to_string(), - e, - ) - })?; - let settings = ClientSettings::for_create_command(options, &src_config, &dst_config); - - let client_state = self - .src_chain - .build_client_state(latest_height, settings) - .map_err(|e| { - ForeignClientError::client_create( - self.src_chain.id(), - "failed when building client state".to_string(), - e, - ) - })? - .wrap_any(); - - let consensus_state = self - .src_chain - .build_consensus_state( - client_state.latest_height(), - latest_height, - client_state.clone(), - ) - .map_err(|e| { - ForeignClientError::client_create( - self.src_chain.id(), - "failed while building client consensus state from src chain".to_string(), - e, - ) - })? - .wrap_any(); - - //TODO Get acct_prefix - let msg = MsgCreateAnyClient::new(client_state, consensus_state, signer) - .map_err(ForeignClientError::client)?; - - Ok(msg) - } - - /// Returns the identifier of the newly created client. - pub fn build_create_client_and_send( - &self, - options: CreateOptions, - ) -> Result { - let new_msg = self.build_create_client(options)?; - - let res = self - .dst_chain - .send_messages_and_wait_commit(TrackedMsgs::new_single( - new_msg.to_any(), - "create client", - )) - .map_err(|e| { - ForeignClientError::client_create( - self.dst_chain.id(), - "failed sending message to dst chain ".to_string(), - e, - ) - })?; - - assert!(!res.is_empty()); - Ok(res[0].clone()) - } - - /// Sends the client creation transaction & subsequently sets the id of this ForeignClient - fn create(&mut self) -> Result<(), ForeignClientError> { - let event = self - .build_create_client_and_send(CreateOptions::default()) - .map_err(|e| { - error!("[{}] failed CreateClient: {}", self, e); - e - })?; - - self.id = extract_client_id(&event)?.clone(); - info!("🍭 [{}] => {:#?}\n", self, event); - - Ok(()) - } - - pub fn validated_client_state( - &self, - ) -> Result<(AnyClientState, Option), ForeignClientError> { - let (client_state, _) = { - self.dst_chain - .query_client_state( - QueryClientStateRequest { - client_id: self.id().clone(), - height: Height::zero(), - }, - IncludeProof::No, - ) - .map_err(|e| { - ForeignClientError::client_refresh( - self.id().clone(), - "failed querying client state on dst chain".to_string(), - e, - ) - })? - }; - - if client_state.is_frozen() { - return Err(ForeignClientError::expired_or_frozen( - self.id().clone(), - self.dst_chain.id(), - "client state reports that client is frozen".into(), - )); - } - - match self - .check_consensus_state_trusting_period(&client_state, &client_state.latest_height())? - { - ConsensusStateTrusted::NotTrusted { - elapsed, - network_timestamp, - consensus_state_timestmap, - } => { - error!( - latest_height = %client_state.latest_height(), - network_timestmap = %network_timestamp, - consensus_state_timestamp = %consensus_state_timestmap, - elapsed = ?elapsed, - "[{}] client state is not valid: latest height is outside of trusting period!", - self - ); - return Err(ForeignClientError::expired_or_frozen( - self.id().clone(), - self.dst_chain.id(), - format!( - "expired: time elapsed since last client update: {:?}", - elapsed - ), - )); - } - ConsensusStateTrusted::Trusted { elapsed } => Ok((client_state, Some(elapsed))), - } - } - - /// Verifies if the consensus state at given [`Height`] - /// is within or outside of the client's trusting period. - fn check_consensus_state_trusting_period( - &self, - client_state: &AnyClientState, - height: &Height, - ) -> Result { - let _span = span!(Level::DEBUG, "check_consensus_state_trusting_period", height = %height) - .entered(); - - // Safety check - if client_state.chain_id() != self.src_chain.id() { - warn!("the chain id in the client state ('{}') is inconsistent with the client's source chain id ('{}')", - client_state.chain_id(), self.src_chain.id()); - } - - let consensus_state_timestamp = self.consensus_state(*height)?.timestamp(); - - let current_src_network_time = self - .src_chain - .query_application_status() - .map_err(|e| { - ForeignClientError::client_refresh( - self.id().clone(), - "failed querying the application status of source chain".to_string(), - e, - ) - })? - .timestamp; - - // Compute the duration of time elapsed since this consensus state was installed - let elapsed = current_src_network_time - .duration_since(&consensus_state_timestamp) - .unwrap_or_default(); - - if client_state.expired(elapsed) { - Ok(ConsensusStateTrusted::NotTrusted { - elapsed, - network_timestamp: current_src_network_time, - consensus_state_timestmap: consensus_state_timestamp, - }) - } else { - Ok(ConsensusStateTrusted::Trusted { elapsed }) - } - } - - pub fn is_expired_or_frozen(&self) -> bool { - match self.validated_client_state() { - Ok(_) => false, - Err(e) => e.is_expired_or_frozen_error(), - } - } - - pub fn refresh(&mut self) -> Result>, ForeignClientError> { - let (client_state, elapsed) = self.validated_client_state()?; - - // The refresh_window is the maximum duration - // we can backoff between subsequent client updates. - let refresh_window = client_state.refresh_period(); - - match (elapsed, refresh_window) { - (None, _) | (_, None) => Ok(None), - (Some(elapsed), Some(refresh_window)) => { - if elapsed > refresh_window { - info!("[{}] client requires refresh", self); - self.build_latest_update_client_and_send() - .map_or_else(Err, |ev| Ok(Some(ev))) - } else { - Ok(None) - } - } - } - } - - /// Wrapper for build_update_client_with_trusted. - pub fn build_update_client( - &self, - target_height: Height, - ) -> Result, ForeignClientError> { - self.build_update_client_with_trusted(target_height, Height::zero()) - } - - /// Returns a trusted height that is lower than the target height, so - /// that the relayer can update the client to the target height based - /// on the returned trusted height. - fn solve_trusted_height( - &self, - target_height: Height, - client_state: &AnyClientState, - ) -> Result { - let client_latest_height = client_state.latest_height(); - - if client_latest_height < target_height { - // If the latest height of the client is already lower than the - // target height, we can simply use it. - Ok(client_latest_height) - } else { - // The only time when the above is false is when for some reason, - // the command line user wants to submit a client update at an - // older height even when the relayer already have an up-to-date - // client at a newer height. - - // In production, this should rarely happen unless there is another - // relayer racing to update the client state, and that we so happen - // to get the the latest client state that was updated between - // the time the target height was determined, and the time - // the client state was fetched. - - warn!("[{}] resolving trusted height from the full list of consensus state heights for target height {}; this may take a while", - self, target_height); - - // Potential optimization: cache the list of consensus heights - // so that subsequent fetches can be fast. - let cs_heights = self.consensus_state_heights()?; - - // Iterate through the available consesnsus heights and find one - // that is lower than the target height. - cs_heights - .into_iter() - .find(|h| h < &target_height) - .ok_or_else(|| { - ForeignClientError::missing_smaller_trusted_height( - self.dst_chain().id(), - target_height, - ) - }) - } - } - - /// Validate a non-zero trusted height to make sure that there is a corresponding - /// consensus state at the given trusted height on the destination chain's client. - fn validate_trusted_height( - &self, - trusted_height: Height, - client_state: &AnyClientState, - ) -> Result<(), ForeignClientError> { - if client_state.latest_height() != trusted_height { - // There should be no need to validate a trusted height in production, - // Since it is always fetched from some client state. The only use is - // from the command line when the trusted height is manually specified. - // We should consider skipping the validation entirely and only validate - // it from the command line itself. - self.consensus_state(trusted_height)?; - } - - Ok(()) - } - - /// Given a client state and header it adds, if required, a delay such that the header will - /// not be considered in the future when submitted in an update client: - /// - determine the `dst_timestamp` as the time of the latest block on destination chain - /// - return if `header.timestamp <= dst_timestamp + client_state.max_clock_drift` - /// - wait for the destination chain to reach `dst_timestamp + 1` - /// Note: This is mostly to help with existing clients where the `max_clock_drift` did - /// not take into account the block time. - /// - return error if header.timestamp < dst_timestamp + client_state.max_clock_drift - /// - /// Ref: https://github.com/informalsystems/ibc-rs/issues/1445. - fn wait_for_header_validation_delay( - &self, - client_state: &AnyClientState, - header: &AnyHeader, - ) -> Result<(), ForeignClientError> { - // Get latest height and time on destination chain - let mut status = self.dst_chain().query_application_status().map_err(|e| { - ForeignClientError::client_update( - self.dst_chain.id(), - "failed querying latest status of the destination chain".to_string(), - e, - ) - })?; - - let ts_adjusted = (status.timestamp + client_state.max_clock_drift()).map_err(|e| { - ForeignClientError::client_update_timing( - self.dst_chain.id(), - client_state.max_clock_drift(), - "failed to adjust timestamp of destination chain with clock drift".to_string(), - e, - ) - })?; - - if header.timestamp().after(&ts_adjusted) { - // Header would be considered in the future, wait for destination chain to - // advance to the next height. - warn!("[{}] src header {} is after dst latest header {} + client state drift {:?}, wait for next height on {}", - self, header.timestamp(), status.timestamp, client_state.max_clock_drift(), self.dst_chain().id()); - - let target_dst_height = status.height.increment(); - loop { - thread::sleep(Duration::from_millis(300)); - status = self.dst_chain().query_application_status().map_err(|e| { - ForeignClientError::client_update( - self.dst_chain.id(), - "failed querying latest status of the destination chain".to_string(), - e, - ) - })?; - - if status.height >= target_dst_height { - break; - } - } - } - - let next_ts_adjusted = - (status.timestamp + client_state.max_clock_drift()).map_err(|e| { - ForeignClientError::client_update_timing( - self.dst_chain.id(), - client_state.max_clock_drift(), - "failed to adjust timestamp of destination chain with clock drift".to_string(), - e, - ) - })?; - - if header.timestamp().after(&next_ts_adjusted) { - // The header is still in the future - Err(ForeignClientError::header_in_the_future( - self.src_chain.id(), - header.height(), - header.timestamp(), - self.dst_chain.id(), - status.height, - status.timestamp, - client_state.max_clock_drift(), - )) - } else { - Ok(()) - } - } - - /// Returns a vector with a message for updating the client to height `target_height`. - /// If the client already stores a consensus state for this height, returns an empty vector. - pub fn build_update_client_with_trusted( - &self, - target_height: Height, - trusted_height: Height, - ) -> Result, ForeignClientError> { - let src_network_latest_height = || { - self.src_chain().query_latest_height().map_err(|e| { - ForeignClientError::client_create( - self.src_chain.id(), - "failed fetching src network latest height with error".to_string(), - e, - ) - }) - }; - - // Wait for the source network to produce block(s) & reach `target_height`. - while src_network_latest_height()? < target_height { - thread::sleep(Duration::from_millis(100)) - } - - // Get the latest client state on destination. - let (client_state, _) = self.validated_client_state()?; - - let trusted_height = if trusted_height == Height::zero() { - self.solve_trusted_height(target_height, &client_state)? - } else { - self.validate_trusted_height(trusted_height, &client_state)?; - trusted_height - }; - - if trusted_height != client_state.latest_height() { - // If we're using a trusted height that is different from the client latest height, - // then check if the consensus state at `trusted_height` is within trusting period - if let ConsensusStateTrusted::NotTrusted { - elapsed, - consensus_state_timestmap, - network_timestamp, - } = self.check_consensus_state_trusting_period(&client_state, &trusted_height)? - { - error!( - trusted_height = %trusted_height, - network_timestmap = %network_timestamp, - consensus_state_timestamp = %consensus_state_timestmap, - elapsed = ?elapsed, - "[{}] cannot build client update message because the provided trusted height is outside of trusting period!", - self - ); - return Err(ForeignClientError::consensus_state_not_trusted( - trusted_height, - elapsed, - )); - } - } - - if trusted_height >= target_height { - warn!( - "[{}] skipping update: trusted height ({}) >= chain target height ({})", - self, trusted_height, target_height - ); - return Ok(vec![]); - } - - let (header, support) = self - .src_chain() - .build_header(trusted_height, target_height, client_state.clone()) - .map_err(|e| { - ForeignClientError::client_update( - self.src_chain.id(), - "failed building header with error".to_string(), - e, - ) - })?; - - let signer = self.dst_chain().get_signer().map_err(|e| { - ForeignClientError::client_update( - self.dst_chain.id(), - "failed getting signer for dst chain".to_string(), - e, - ) - })?; - - self.wait_for_header_validation_delay(&client_state, &header)?; - - let mut msgs = vec![]; - - for header in support { - debug!( - "[{}] MsgUpdateAnyClient for intermediate height {}", - self, - header.height(), - ); - - msgs.push( - MsgUpdateAnyClient { - header, - client_id: self.id.clone(), - signer: signer.clone(), - } - .to_any(), - ); - } - - debug!( - "[{}] MsgUpdateAnyClient from trusted height {} to target height {}", - self, - trusted_height, - header.height(), - ); - - msgs.push( - MsgUpdateAnyClient { - header, - signer, - client_id: self.id.clone(), - } - .to_any(), - ); - - Ok(msgs) - } - - pub fn build_latest_update_client_and_send(&self) -> Result, ForeignClientError> { - self.build_update_client_and_send(Height::zero(), Height::zero()) - } - - pub fn build_update_client_and_send( - &self, - height: Height, - trusted_height: Height, - ) -> Result, ForeignClientError> { - let h = if height == Height::zero() { - self.src_chain.query_latest_height().map_err(|e| { - ForeignClientError::client_update( - self.src_chain.id(), - "failed while querying src chain ({}) for latest height".to_string(), - e, - ) - })? - } else { - height - }; - - let new_msgs = self.build_update_client_with_trusted(h, trusted_height)?; - if new_msgs.is_empty() { - return Err(ForeignClientError::client_already_up_to_date( - self.id.clone(), - self.src_chain.id(), - h, - )); - } - - let tm = TrackedMsgs::new_static(new_msgs, "update client"); - - let events = self - .dst_chain() - .send_messages_and_wait_commit(tm) - .map_err(|e| { - ForeignClientError::client_update( - self.dst_chain.id(), - "failed sending message to dst chain".to_string(), - e, - ) - })?; - - Ok(events) - } - - /// Attempts to update a client using header from the latest height of its source chain. - pub fn update(&self) -> Result<(), ForeignClientError> { - let res = self.build_latest_update_client_and_send()?; - - debug!("[{}] client updated with return message {:?}\n", self, res); - - Ok(()) - } - - /// Retrieves the client update event that was emitted when a consensus state at the - /// specified height was created on chain. - /// It is possible that the event cannot be retrieved if the information is not yet available - /// on the full node. To handle this the query is retried a few times. - pub fn update_client_event( - &self, - consensus_height: Height, - ) -> Result, ForeignClientError> { - let mut events = vec![]; - for i in 0..MAX_RETRIES { - thread::sleep(Duration::from_millis(100)); - let result = self - .dst_chain - .query_txs(QueryTxRequest::Client(QueryClientEventRequest { - height: Height::zero(), - event_id: WithBlockDataType::UpdateClient, - client_id: self.id.clone(), - consensus_height, - })) - .map_err(|e| { - ForeignClientError::client_event_query( - self.id().clone(), - self.dst_chain.id(), - consensus_height, - e, - ) - }); - match result { - Err(e) => { - error!( - "[{}] query_tx with error {}, retry {}/{}", - self, - e, - i + 1, - MAX_RETRIES - ); - continue; - } - Ok(result) => { - events = result; - // Should break to prevent retrying uselessly. - break; - } - } - } - - if events.is_empty() { - return Ok(None); - } - - // It is possible in theory that `query_txs` returns multiple client update events for the - // same consensus height. This could happen when multiple client updates with same header - // were submitted to chain. However this is not what it's observed during testing. - // Regardless, just take the event from the first update. - let event = events[0].clone(); - let update = downcast!(event.clone() => IbcEvent::UpdateClient).ok_or_else(|| { - ForeignClientError::unexpected_event( - self.id().clone(), - self.dst_chain.id(), - event.to_json(), - ) - })?; - Ok(Some(update)) - } - - /// Retrieves all consensus states for this client and sorts them in descending height - /// order. If consensus states are not pruned on chain, then last consensus state is the one - /// installed by the `CreateClient` operation. - fn consensus_states(&self) -> Result, ForeignClientError> { - let mut consensus_states = self - .dst_chain - .query_consensus_states(QueryConsensusStatesRequest { - client_id: self.id.clone(), - pagination: Some(PageRequest::all()), - }) - .map_err(|e| { - ForeignClientError::client_query(self.id().clone(), self.src_chain.id(), e) - })?; - consensus_states.sort_by_key(|a| core::cmp::Reverse(a.height)); - Ok(consensus_states) - } - - /// Returns the consensus state at `height` or error if not found. - fn consensus_state(&self, height: Height) -> Result { - let (consensus_state, _) = self - .dst_chain - .query_consensus_state( - QueryConsensusStateRequest { - client_id: self.id.clone(), - consensus_height: height, - query_height: Height::zero(), - }, - IncludeProof::No, - ) - .map_err(|e| { - ForeignClientError::client_consensus_query( - self.id.clone(), - self.dst_chain.id(), - height, - e, - ) - })?; - - Ok(consensus_state) - } - - /// Retrieves all consensus heights for this client sorted in descending - /// order. - fn consensus_state_heights(&self) -> Result, ForeignClientError> { - // [TODO] Utilize query that only fetches consensus state heights - // https://github.com/cosmos/ibc-go/issues/798 - let consensus_state_heights: Vec = self - .consensus_states()? - .iter() - .map(|cs| cs.height) - .collect(); - - Ok(consensus_state_heights) - } - - /// Checks for evidence of misbehaviour. - /// The check starts with and `update_event` emitted by chain B (`dst_chain`) for a client update - /// with a header from chain A (`src_chain`). The algorithm goes backwards through the headers - /// until it gets to the first misbehaviour. - /// - /// The following cases are covered: - /// 1 - fork: - /// Assumes at least one consensus state before the fork point exists. - /// Let existing consensus states on chain B be: [Sn,.., Sf, Sf-1, S0] with `Sf-1` being - /// the most recent state before fork. - /// Chain A is queried for a header `Hf'` at `Sf.height` and if it is different than the `Hf` - /// in the event for the client update (the one that has generated `Sf` on chain), then the two - /// headers are included in the evidence and submitted. - /// Note that in this case the headers are different but have the same height. - /// - /// 2 - BFT time violation for unavailable header (a.k.a. Future Lunatic Attack or FLA): - /// Some header with a height that is higher than the latest - /// height on A has been accepted and a consensus state was created on B. Note that this implies - /// that the timestamp of this header must be within the `clock_drift` of the client. - /// Assume the client on B has been updated with `h2`(not present on/ produced by chain A) - /// and it has a timestamp of `t2` that is at most `clock_drift` in the future. - /// Then the latest header from A is fetched, let it be `h1`, with a timestamp of `t1`. - /// If `t1 >= t2` then evidence of misbehavior is submitted to A. - /// - /// 3 - BFT time violation for existing headers (TODO): - /// Ensure that consensus state times are monotonically increasing with height. - /// - /// Other notes: - /// - the algorithm builds misbehavior at each consensus height, starting with the - /// highest height assuming the previous one is trusted. It submits the first constructed - /// evidence (the one with the highest height) - /// - a lot of the logic here is derived from the behavior of the only implemented client - /// (ics07-tendermint) and might not be general enough. - /// - pub fn detect_misbehaviour( - &self, - mut update: Option, - ) -> Result, ForeignClientError> { - thread::sleep(Duration::from_millis(100)); - let span_guard = update.as_ref().map(|ev| ev.consensus_height()); - let _span = span!( - tracing::Level::DEBUG, - "detect_misbehaviour", - update_height = ?span_guard, - ) - .entered(); - - // Get the latest client state on destination. - let (client_state, _) = { - self.dst_chain() - .query_client_state( - QueryClientStateRequest { - client_id: self.id().clone(), - height: Height::zero(), - }, - IncludeProof::No, - ) - .map_err(|e| { - ForeignClientError::misbehaviour( - format!("failed querying client state on dst chain {}", self.id), - e, - ) - })? - }; - - let consensus_state_heights = if let Some(ref event) = update { - vec![event.consensus_height()] - } else { - // Get the list of consensus state heights in descending order. - // Note: If chain does not prune consensus states then the last consensus state is - // the one installed by the `CreateClient` which does not include a header. - // For chains that do support pruning, it is possible that the last consensus state - // was installed by an `UpdateClient` and an event and header will be found. - self.consensus_state_heights()? - }; - - trace!( - "checking misbehaviour for consensus state heights (first 50 shown here): {}, total: {}", - consensus_state_heights.iter().take(50).join(", "), - consensus_state_heights.len() - ); - - let start_time = Instant::now(); - for target_height in consensus_state_heights { - // Start with specified update event or the one for latest consensus height - let update_event = if let Some(ref event) = update { - // we are here only on the first iteration when called with `Some` update event - event.clone() - } else if let Some(event) = self.update_client_event(target_height)? { - // we are here either on the first iteration with `None` initial update event or - // subsequent iterations - event - } else { - // we are here if the consensus state was installed on-chain when client was - // created, therefore there will be no update client event - break; - }; - - // Skip over heights higher than the update event one. - // This can happen if a client update happened with a lower height than latest. - if target_height > update_event.consensus_height() { - continue; - } - - // Ensure consensus height of the event is same as target height. This should be the - // case as we either - // - got the `update_event` from the `target_height` above, or - // - an `update_event` was specified and we should eventually find a consensus state - // at that height - // We break here in case we got a bogus event. - if target_height < update_event.consensus_height() { - break; - } - - // No header in events, cannot run misbehavior. - // May happen on chains running older SDKs (e.g., Akash) - if update_event.header.is_none() { - return Err(ForeignClientError::misbehaviour_exit(format!( - "could not extract header from update client event {:?} emitted by chain {:?}", - update_event, - self.dst_chain.id() - ))); - } - - // Check for misbehaviour according to the specific source chain type. - // In case of Tendermint client, this will also check the BFT time violation if - // a header for the event height cannot be retrieved from the witness. - let misbehavior = match self - .src_chain - .check_misbehaviour(update_event.clone(), client_state.clone()) - { - // Misbehavior check passed. - Ok(evidence_opt) => evidence_opt, - - // Predictable error occurred which we'll wrap. - // This error means we cannot check for misbehavior with the provided `target_height` - Err(e) if e.is_trusted_state_outside_trusting_period_error() => { - debug!(target = %target_height, - "exhausted checking trusted consensus states for this client; no evidence found"); - // It's safe to stop checking for misbehavior past this `target_height`. - break; - } - - // Unknown error occurred in the light client `check_misbehaviour` method. - // Propagate. - Err(e) => { - return Err(ForeignClientError::misbehaviour( - format!( - "failed to check misbehaviour for {} at consensus height {}", - update_event.client_id(), - update_event.consensus_height(), - ), - e, - )) - } - }; - - if misbehavior.is_some() { - return Ok(misbehavior); - } - - // Exit the loop if more than MAX_MISBEHAVIOUR_CHECK_TIME was spent here. - if start_time.elapsed() > MAX_MISBEHAVIOUR_CHECK_DURATION { - trace!( - "finished misbehaviour verification after {:?}", - start_time.elapsed() - ); - - return Ok(None); - } - - // Clear the update - update = None; - - // slight backoff - thread::sleep(Duration::from_millis(100)); - } - - trace!( - "finished misbehaviour verification after {:?}", - start_time.elapsed() - ); - - Ok(None) - } - - fn submit_evidence( - &self, - evidence: MisbehaviourEvidence, - ) -> Result, ForeignClientError> { - let signer = self.dst_chain().get_signer().map_err(|e| { - ForeignClientError::misbehaviour( - format!( - "failed getting signer for destination chain ({})", - self.dst_chain.id() - ), - e, - ) - })?; - - let mut msgs = vec![]; - - for header in evidence.supporting_headers { - msgs.push( - MsgUpdateAnyClient { - header, - client_id: self.id.clone(), - signer: signer.clone(), - } - .to_any(), - ); - } - - msgs.push( - MsgSubmitAnyMisbehaviour { - misbehaviour: evidence.misbehaviour, - client_id: self.id.clone(), - signer, - } - .to_any(), - ); - - let tm = TrackedMsgs::new_static(msgs, "evidence"); - - let events = self - .dst_chain() - .send_messages_and_wait_commit(tm) - .map_err(|e| { - ForeignClientError::misbehaviour( - format!( - "failed sending evidence to destination chain ({})", - self.dst_chain.id(), - ), - e, - ) - })?; - - Ok(events) - } - - pub fn detect_misbehaviour_and_submit_evidence( - &self, - update_event: Option, - ) -> MisbehaviourResults { - // check evidence of misbehaviour for all updates or one - let result = match self.detect_misbehaviour(update_event.clone()) { - Err(e) => Err(e), - Ok(None) => Ok(vec![]), // no evidence found - Ok(Some(detected)) => { - error!( - "[{}] MISBEHAVIOUR DETECTED {}, sending evidence", - self, detected.misbehaviour - ); - - self.submit_evidence(detected) - } - }; - - // Filter the errors if the detection was run for all consensus states. - // Even if some states may have failed to verify, e.g. if they were expired, just - // warn the user and continue. - match result { - Err(ForeignClientError(ForeignClientErrorDetail::MisbehaviourExit(s), _)) => { - warn!( - "[{}] misbehaviour checking is being disabled: {:?}", - self, s - ); - MisbehaviourResults::CannotExecute - } - Ok(misbehaviour_detection_result) => { - if !misbehaviour_detection_result.is_empty() { - info!( - "[{}] evidence submission result {:?}", - self, misbehaviour_detection_result - ); - MisbehaviourResults::EvidenceSubmitted(misbehaviour_detection_result) - } else { - MisbehaviourResults::ValidClient - } - } - Err(e) => match e.detail() { - ForeignClientErrorDetail::MisbehaviourExit(s) => { - error!( - "[{}] misbehaviour checking is being disabled: {:?}", - self, s - ); - MisbehaviourResults::CannotExecute - } - ForeignClientErrorDetail::ExpiredOrFrozen(_) => { - error!( - "[{}] cannot check misbehavior on frozen or expired client", - self - ); - MisbehaviourResults::CannotExecute - } - _ => { - if update_event.is_some() { - MisbehaviourResults::CannotExecute - } else { - warn!("[{}] misbehaviour checking result: {:?}", self, e); - MisbehaviourResults::ValidClient - } - } - }, - } - } - - pub fn map_chain( - self, - map_dst: impl Fn(DstChain) -> DstChain2, - map_src: impl Fn(SrcChain) -> SrcChain2, - ) -> ForeignClient { - ForeignClient { - id: self.id, - dst_chain: map_dst(self.dst_chain), - src_chain: map_src(self.src_chain), - } - } -} - -#[derive(Clone, Debug)] -pub enum MisbehaviourResults { - CannotExecute, - EvidenceSubmitted(Vec), - ValidClient, - VerificationError, -} - -pub fn extract_client_id(event: &IbcEvent) -> Result<&ClientId, ForeignClientError> { - match event { - IbcEvent::CreateClient(ev) => Ok(ev.client_id()), - IbcEvent::UpdateClient(ev) => Ok(ev.client_id()), - _ => Err(ForeignClientError::missing_client_id_from_event( - event.clone(), - )), - } -} - -/// Tests the integration of crates `relayer` plus `relayer-cli` against crate `ibc`. These tests -/// exercise various client methods (create, update, ForeignClient::new) using locally-running -/// instances of chains built using `MockChain`. -#[cfg(test)] -mod test { - use alloc::sync::Arc; - use core::str::FromStr; - - use test_log::test; - use tokio::runtime::Runtime as TokioRuntime; - - use ibc::core::ics24_host::identifier::ClientId; - use ibc::events::IbcEvent; - use ibc::Height; - - use crate::chain::handle::{BaseChainHandle, ChainHandle}; - use crate::chain::mock::test_utils::get_basic_chain_config; - use crate::chain::mock::MockChain; - use crate::chain::requests::{IncludeProof, QueryClientStateRequest}; - use crate::chain::runtime::ChainRuntime; - use crate::foreign_client::ForeignClient; - - /// Basic test for the `build_create_client_and_send` method. - #[test] - fn create_client_and_send_method() { - let a_cfg = get_basic_chain_config("chain_a"); - let b_cfg = get_basic_chain_config("chain_b"); - - let rt = Arc::new(TokioRuntime::new().unwrap()); - let a_chain = - ChainRuntime::::spawn::(a_cfg, rt.clone()).unwrap(); - let b_chain = ChainRuntime::::spawn::(b_cfg, rt).unwrap(); - let a_client = - ForeignClient::restore(ClientId::default(), a_chain.clone(), b_chain.clone()); - - let b_client = ForeignClient::restore(ClientId::default(), b_chain, a_chain); - - // Create the client on chain a - let res = a_client.build_create_client_and_send(Default::default()); - assert!( - res.is_ok(), - "build_create_client_and_send failed (chain a) with error {:?}", - res - ); - assert!(matches!(res.unwrap(), IbcEvent::CreateClient(_))); - - // Create the client on chain b - let res = b_client.build_create_client_and_send(Default::default()); - assert!( - res.is_ok(), - "build_create_client_and_send failed (chain b) with error {:?}", - res - ); - assert!(matches!(res.unwrap(), IbcEvent::CreateClient(_))); - } - - /// Basic test for the `build_update_client_and_send` & `build_create_client_and_send` methods. - #[test] - fn update_client_and_send_method() { - let a_cfg = get_basic_chain_config("chain_a"); - let b_cfg = get_basic_chain_config("chain_b"); - let a_client_id = ClientId::from_str("client_on_a_forb").unwrap(); - - // The number of ping-pong iterations - let num_iterations = 3; - - let rt = Arc::new(TokioRuntime::new().unwrap()); - let a_chain = - ChainRuntime::::spawn::(a_cfg, rt.clone()).unwrap(); - let b_chain = ChainRuntime::::spawn::(b_cfg, rt).unwrap(); - let mut a_client = ForeignClient::restore(a_client_id, a_chain.clone(), b_chain.clone()); - - let mut b_client = - ForeignClient::restore(ClientId::default(), b_chain.clone(), a_chain.clone()); - - // This action should fail because no client exists (yet) - let res = a_client.build_latest_update_client_and_send(); - assert!( - res.is_err(), - "build_update_client_and_send was supposed to fail (no client existed)" - ); - - // Remember b's height. - let b_height_start = b_chain.query_latest_height().unwrap(); - - // Create a client on chain a - let res = a_client.create(); - assert!( - res.is_ok(), - "build_create_client_and_send failed (chain a) with error {:?}", - res - ); - - // TODO: optionally add return events from `create` and assert on the event type, e.g.: - // assert!(matches!(res.as_ref().unwrap(), IBCEvent::CreateClient(_))); - // let a_client_id = extract_client_id(&res.unwrap()).unwrap().clone(); - - // This should fail because the client on chain a already has the latest headers. Chain b, - // the source chain for the client on a, is at the same height where it was when the client - // was created, so an update should fail here. - let res = a_client.build_latest_update_client_and_send(); - assert!( - res.is_err(), - "build_update_client_and_send was supposed to fail", - ); - - // Remember b's height. - let b_height_last = b_chain.query_latest_height().unwrap(); - assert_eq!(b_height_last, b_height_start); - - // Create a client on chain b - let res = b_client.create(); - assert!( - res.is_ok(), - "build_create_client_and_send failed (chain b) with error {:?}", - res - ); - // TODO: assert return events - // assert!(matches!(res.as_ref().unwrap(), IBCEvent::CreateClient(_))); - - // Chain b should have advanced - let mut b_height_last = b_chain.query_latest_height().unwrap(); - assert_eq!(b_height_last, b_height_start.increment()); - - // Remember the current height of chain a - let mut a_height_last = a_chain.query_latest_height().unwrap(); - - // Now we can update both clients -- a ping pong, similar to ICS18 `client_update_ping_pong` - for _i in 1..num_iterations { - let res = a_client.build_latest_update_client_and_send(); - - assert!( - res.is_ok(), - "build_update_client_and_send failed (chain a) with error: {:?}", - res - ); - - let res = res.unwrap(); - assert!(matches!(res.last(), Some(IbcEvent::UpdateClient(_)))); - - let a_height_current = a_chain.query_latest_height().unwrap(); - a_height_last = a_height_last.increment(); - assert_eq!( - a_height_last, a_height_current, - "after client update, chain a did not advance" - ); - - // And also update the client on chain b. - let res = b_client.build_latest_update_client_and_send(); - assert!( - res.is_ok(), - "build_update_client_and_send failed (chain b) with error: {:?}", - res - ); - - let res = res.unwrap(); - assert!(matches!(res.last(), Some(IbcEvent::UpdateClient(_)))); - - let b_height_current = b_chain.query_latest_height().unwrap(); - b_height_last = b_height_last.increment(); - assert_eq!( - b_height_last, b_height_current, - "after client update, chain b did not advance" - ); - } - } - - /// Tests for `ForeignClient::new()`. - #[test] - fn foreign_client_create() { - let a_cfg = get_basic_chain_config("chain_a"); - let b_cfg = get_basic_chain_config("chain_b"); - - let rt = Arc::new(TokioRuntime::new().unwrap()); - let a_chain = - ChainRuntime::::spawn::(a_cfg, rt.clone()).unwrap(); - let b_chain = ChainRuntime::::spawn::(b_cfg, rt).unwrap(); - - // Instantiate the foreign clients on the two chains. - let res_client_on_a = ForeignClient::new(a_chain.clone(), b_chain.clone()); - assert!( - res_client_on_a.is_ok(), - "Client creation (on chain a) failed with error: {:?}", - res_client_on_a - ); - - let client_on_a = res_client_on_a.unwrap(); - let a_client = client_on_a.id; - - let res_client_on_b = ForeignClient::new(b_chain.clone(), a_chain.clone()); - assert!( - res_client_on_b.is_ok(), - "Client creation (on chain a) failed with error: {:?}", - res_client_on_b - ); - let client_on_b = res_client_on_b.unwrap(); - let b_client = client_on_b.id; - - // Now that the clients exists, we should be able to query its state - let b_client_state_res = b_chain.query_client_state( - QueryClientStateRequest { - client_id: b_client, - height: Height::default(), - }, - IncludeProof::No, - ); - assert!( - b_client_state_res.is_ok(), - "Client query (on chain b) failed with error: {:?}", - b_client_state_res - ); - - let a_client_state_res = a_chain.query_client_state( - QueryClientStateRequest { - client_id: a_client, - height: Height::default(), - }, - IncludeProof::No, - ); - assert!( - a_client_state_res.is_ok(), - "Client query (on chain a) failed with error: {:?}", - a_client_state_res - ); - } - - /// Tests for `ForeignClient::update()`. - #[test] - fn foreign_client_update() { - let a_cfg = get_basic_chain_config("chain_a"); - let b_cfg = get_basic_chain_config("chain_b"); - let mut _a_client_id = ClientId::from_str("client_on_a_forb").unwrap(); - let mut _b_client_id = ClientId::from_str("client_on_b_fora").unwrap(); - - let rt = Arc::new(TokioRuntime::new().unwrap()); - let a_chain = - ChainRuntime::::spawn::(a_cfg, rt.clone()).unwrap(); - let b_chain = ChainRuntime::::spawn::(b_cfg, rt).unwrap(); - - // Instantiate the foreign clients on the two chains. - let client_on_a_res = ForeignClient::new(a_chain.clone(), b_chain.clone()); - assert!( - client_on_a_res.is_ok(), - "Client creation (on chain a) failed with error: {:?}", - client_on_a_res - ); - let client_on_a = client_on_a_res.unwrap(); - - let client_on_b_res = ForeignClient::new(b_chain.clone(), a_chain.clone()); - assert!( - client_on_b_res.is_ok(), - "Client creation (on chain a) failed with error: {:?}", - client_on_b_res - ); - let client_on_b = client_on_b_res.unwrap(); - - let num_iterations = 5; - - let mut b_height_start = b_chain.query_latest_height().unwrap(); - let mut a_height_start = a_chain.query_latest_height().unwrap(); - - // Update each client - for _i in 1..num_iterations { - let res = client_on_a.update(); - assert!(res.is_ok(), "Client update for chain a failed {:?}", res); - - // Basic check that the height of the chain advanced - let a_height_current = a_chain.query_latest_height().unwrap(); - a_height_start = a_height_start.increment(); - assert_eq!( - a_height_start, a_height_current, - "after client update, chain a did not advance" - ); - - let res = client_on_b.update(); - assert!(res.is_ok(), "Client update for chain b failed {:?}", res); - - // Basic check that the height of the chain advanced - let b_height_current = b_chain.query_latest_height().unwrap(); - b_height_start = b_height_start.increment(); - assert_eq!( - b_height_start, b_height_current, - "after client update, chain b did not advance" - ); - } - } -} diff --git a/relayer/src/keyring.rs b/relayer/src/keyring.rs deleted file mode 100644 index a8361ac6b9..0000000000 --- a/relayer/src/keyring.rs +++ /dev/null @@ -1,499 +0,0 @@ -use alloc::collections::btree_map::BTreeMap as HashMap; -use std::ffi::OsStr; -use std::fs::{self, File}; -use std::path::{Path, PathBuf}; - -use crate::config::AddressType; -use bech32::{ToBase32, Variant}; -use bip39::{Language, Mnemonic, Seed}; -use bitcoin::{ - network::constants::Network, - secp256k1::{Message, Secp256k1, SecretKey}, - util::bip32::{DerivationPath, ExtendedPrivKey, ExtendedPubKey}, -}; -use hdpath::StandardHDPath; -use ibc::core::ics24_host::identifier::ChainId; -use k256::ecdsa::{signature::Signer, Signature, SigningKey}; -use ripemd160::Ripemd160; -use serde::{Deserialize, Serialize}; -use sha2::{Digest, Sha256}; -use tiny_keccak::{Hasher, Keccak}; - -use errors::Error; -pub use pub_key::EncodedPubKey; - -pub mod errors; -mod pub_key; - -pub type HDPath = StandardHDPath; - -pub const KEYSTORE_DEFAULT_FOLDER: &str = ".hermes/keys/"; -pub const KEYSTORE_DISK_BACKEND: &str = "keyring-test"; -pub const KEYSTORE_FILE_EXTENSION: &str = "json"; - -// /!\ /!\ /!\ /!\ /!\ /!\ /!\ /!\ /!\ /!\ /!\ /!\ /!\ /!\ /!\ -// WARNING: Changing this struct in backward incompatible way -// will force users to re-import their keys. -// /!\ /!\ /!\ /!\ /!\ /!\ /!\ /!\ /!\ /!\ /!\ /!\ /!\ /!\ /!\ -/// Key entry stores the Private Key and Public Key as well the address -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct KeyEntry { - /// Public key - pub public_key: ExtendedPubKey, - - /// Private key - pub private_key: ExtendedPrivKey, - - /// Account Bech32 format - TODO allow hrp - pub account: String, - - /// Address - pub address: Vec, -} - -/// JSON key seed file -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct KeyFile { - pub name: String, - pub r#type: String, - pub address: String, - pub pubkey: String, - pub mnemonic: String, -} - -impl KeyEntry { - pub fn from_key_file(key_file: KeyFile, hd_path: &HDPath) -> Result { - // Decode the Bech32-encoded address from the key file - let keyfile_address_bytes = decode_bech32(&key_file.address)?; - - let encoded_key: EncodedPubKey = key_file.pubkey.parse()?; - let mut keyfile_pubkey_bytes = encoded_key.into_bytes(); - - // Decode the private key from the mnemonic - let private_key = private_key_from_mnemonic(&key_file.mnemonic, hd_path)?; - let derived_pubkey = ExtendedPubKey::from_priv(&Secp256k1::new(), &private_key); - let derived_pubkey_bytes = derived_pubkey.to_pub().to_bytes(); - assert!(derived_pubkey_bytes.len() <= keyfile_pubkey_bytes.len()); - - // FIXME: For some reason that is currently unclear, the public key decoded from - // the keyfile contains a few extraneous leading bytes. To compare both - // public keys, we therefore strip those leading bytes off and keep the - // common parts. - let keyfile_pubkey_bytes = - keyfile_pubkey_bytes.split_off(keyfile_pubkey_bytes.len() - derived_pubkey_bytes.len()); - - // Ensure that the public key in the key file and the one extracted from the mnemonic match. - if keyfile_pubkey_bytes != derived_pubkey_bytes { - Err(Error::public_key_mismatch( - keyfile_pubkey_bytes, - derived_pubkey_bytes, - )) - } else { - Ok(Self { - public_key: derived_pubkey, - private_key, - account: key_file.address, - address: keyfile_address_bytes, - }) - } - } -} - -pub trait KeyStore { - fn get_key(&self, key_name: &str) -> Result; - fn add_key(&mut self, key_name: &str, key_entry: KeyEntry) -> Result<(), Error>; - fn remove_key(&mut self, key_name: &str) -> Result<(), Error>; - fn keys(&self) -> Result, Error>; -} - -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct Memory { - account_prefix: String, - keys: HashMap, -} - -impl Memory { - pub fn new(account_prefix: String) -> Self { - Self { - account_prefix, - keys: HashMap::new(), - } - } -} - -impl KeyStore for Memory { - fn get_key(&self, key_name: &str) -> Result { - self.keys - .get(key_name) - .cloned() - .ok_or_else(Error::key_not_found) - } - - fn add_key(&mut self, key_name: &str, key_entry: KeyEntry) -> Result<(), Error> { - if self.keys.contains_key(key_name) { - Err(Error::key_already_exist()) - } else { - self.keys.insert(key_name.to_string(), key_entry); - - Ok(()) - } - } - - fn remove_key(&mut self, key_name: &str) -> Result<(), Error> { - self.keys - .remove(key_name) - .ok_or_else(Error::key_not_found)?; - - Ok(()) - } - - fn keys(&self) -> Result, Error> { - Ok(self - .keys - .iter() - .map(|(n, k)| (n.to_string(), k.clone())) - .collect()) - } -} - -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct Test { - account_prefix: String, - store: PathBuf, -} - -impl Test { - pub fn new(account_prefix: String, store: PathBuf) -> Self { - Self { - account_prefix, - store, - } - } -} - -impl KeyStore for Test { - fn get_key(&self, key_name: &str) -> Result { - let mut key_file = self.store.join(key_name); - key_file.set_extension(KEYSTORE_FILE_EXTENSION); - - if !key_file.as_path().exists() { - return Err(Error::key_file_not_found(format!("{}", key_file.display()))); - } - - let file = File::open(&key_file).map_err(|e| { - Error::key_file_io( - key_file.display().to_string(), - "failed to open file".to_string(), - e, - ) - })?; - - let key_entry = serde_json::from_reader(file) - .map_err(|e| Error::key_file_decode(format!("{}", key_file.display()), e))?; - - Ok(key_entry) - } - - fn add_key(&mut self, key_name: &str, key_entry: KeyEntry) -> Result<(), Error> { - let mut filename = self.store.join(key_name); - filename.set_extension(KEYSTORE_FILE_EXTENSION); - let file_path = filename.display().to_string(); - - let file = File::create(filename).map_err(|e| { - Error::key_file_io(file_path.clone(), "failed to create file".to_string(), e) - })?; - - serde_json::to_writer_pretty(file, &key_entry) - .map_err(|e| Error::key_file_encode(file_path, e))?; - - Ok(()) - } - - fn remove_key(&mut self, key_name: &str) -> Result<(), Error> { - let mut filename = self.store.join(key_name); - filename.set_extension(KEYSTORE_FILE_EXTENSION); - - fs::remove_file(filename.clone()) - .map_err(|e| Error::remove_io_fail(filename.display().to_string(), e))?; - - Ok(()) - } - - fn keys(&self) -> Result, Error> { - let dir = fs::read_dir(&self.store).map_err(|e| { - Error::key_file_io( - self.store.display().to_string(), - "failed to list keys".to_string(), - e, - ) - })?; - - let ext = OsStr::new(KEYSTORE_FILE_EXTENSION); - - dir.into_iter() - .flatten() - .map(|entry| entry.path()) - .filter(|path| path.extension() == Some(ext)) - .flat_map(|path| path.file_stem().map(OsStr::to_owned)) - .flat_map(|stem| stem.to_str().map(ToString::to_string)) - .map(|name| self.get_key(&name).map(|key| (name, key))) - .collect() - } -} - -#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] -pub enum Store { - Memory, - Test, -} - -impl Default for Store { - fn default() -> Self { - Store::Test - } -} - -#[derive(Clone, Debug, Serialize, Deserialize)] -pub enum KeyRing { - Memory(Memory), - Test(Test), -} - -impl KeyRing { - pub fn new(store: Store, account_prefix: &str, chain_id: &ChainId) -> Result { - match store { - Store::Memory => Ok(Self::Memory(Memory::new(account_prefix.to_string()))), - - Store::Test => { - let keys_folder = disk_store_path(chain_id.as_str())?; - - // Create keys folder if it does not exist - fs::create_dir_all(&keys_folder).map_err(|e| { - Error::key_file_io( - keys_folder.display().to_string(), - "failed to create keys folder".to_string(), - e, - ) - })?; - - Ok(Self::Test(Test::new( - account_prefix.to_string(), - keys_folder, - ))) - } - } - } - - pub fn get_key(&self, key_name: &str) -> Result { - match self { - KeyRing::Memory(m) => m.get_key(key_name), - KeyRing::Test(d) => d.get_key(key_name), - } - } - - pub fn add_key(&mut self, key_name: &str, key_entry: KeyEntry) -> Result<(), Error> { - match self { - KeyRing::Memory(m) => m.add_key(key_name, key_entry), - KeyRing::Test(d) => d.add_key(key_name, key_entry), - } - } - - pub fn remove_key(&mut self, key_name: &str) -> Result<(), Error> { - match self { - KeyRing::Memory(m) => m.remove_key(key_name), - KeyRing::Test(d) => d.remove_key(key_name), - } - } - - pub fn keys(&self) -> Result, Error> { - match self { - KeyRing::Memory(m) => m.keys(), - KeyRing::Test(d) => d.keys(), - } - } - - /// Get key from seed file - pub fn key_from_seed_file( - &self, - key_file_content: &str, - hd_path: &HDPath, - ) -> Result { - let key_file: KeyFile = serde_json::from_str(key_file_content).map_err(Error::encode)?; - - KeyEntry::from_key_file(key_file, hd_path) - } - - /// Add a key entry in the store using a mnemonic. - pub fn key_from_mnemonic( - &self, - mnemonic_words: &str, - hd_path: &HDPath, - at: &AddressType, - ) -> Result { - // Get the private key from the mnemonic - let private_key = private_key_from_mnemonic(mnemonic_words, hd_path)?; - - // Get the public Key from the private key - let public_key = ExtendedPubKey::from_priv(&Secp256k1::new(), &private_key); - - // Get address from the public Key - let address = get_address(public_key, at); - - // Compute Bech32 account - let account = bech32::encode(self.account_prefix(), address.to_base32(), Variant::Bech32) - .map_err(Error::bech32)?; - - Ok(KeyEntry { - public_key, - private_key, - account, - address, - }) - } - - /// Sign a message - pub fn sign_msg( - &self, - key_name: &str, - msg: Vec, - address_type: &AddressType, - ) -> Result, Error> { - let key = self.get_key(key_name)?; - - sign_message(&key, msg, address_type) - } - - pub fn account_prefix(&self) -> &str { - match self { - KeyRing::Memory(m) => &m.account_prefix, - KeyRing::Test(d) => &d.account_prefix, - } - } -} - -/// Sign a message -pub fn sign_message( - key: &KeyEntry, - msg: Vec, - address_type: &AddressType, -) -> Result, Error> { - let private_key_bytes = key.private_key.to_priv().to_bytes(); - match address_type { - AddressType::Ethermint { ref pk_type } if pk_type.ends_with(".ethsecp256k1.PubKey") => { - let hash = keccak256_hash(msg.as_slice()); - let s = Secp256k1::signing_only(); - // SAFETY: hash is 32 bytes, as expected in `Message::from_slice` -- see `keccak256_hash`, hence `unwrap` - let sign_msg = Message::from_slice(hash.as_slice()).unwrap(); - let key = SecretKey::from_slice(private_key_bytes.as_slice()) - .map_err(Error::invalid_key_raw)?; - let (_, sig_bytes) = s - .sign_ecdsa_recoverable(&sign_msg, &key) - .serialize_compact(); - Ok(sig_bytes.to_vec()) - } - AddressType::Cosmos | AddressType::Ethermint { .. } => { - let signing_key = - SigningKey::from_bytes(private_key_bytes.as_slice()).map_err(Error::invalid_key)?; - let signature: Signature = signing_key.sign(&msg); - Ok(signature.as_ref().to_vec()) - } - } -} - -/// Decode an extended private key from a mnemonic -fn private_key_from_mnemonic( - mnemonic_words: &str, - hd_path: &StandardHDPath, -) -> Result { - let mnemonic = Mnemonic::from_phrase(mnemonic_words, Language::English) - .map_err(Error::invalid_mnemonic)?; - - let seed = Seed::new(&mnemonic, ""); - - let base_key = ExtendedPrivKey::new_master(Network::Bitcoin, seed.as_bytes()) - .map_err(Error::private_key)?; - - let private_key = base_key - .derive_priv( - &Secp256k1::new(), - &standard_path_to_derivation_path(hd_path), - ) - .map_err(Error::private_key)?; - - Ok(private_key) -} - -/// Return an address from a Public Key -fn get_address(pk: ExtendedPubKey, at: &AddressType) -> Vec { - match at { - AddressType::Ethermint { ref pk_type } if pk_type.ends_with(".ethsecp256k1.PubKey") => { - let public_key = pk.public_key.serialize_uncompressed(); - // 0x04 is [SECP256K1_TAG_PUBKEY_UNCOMPRESSED](https://github.com/bitcoin-core/secp256k1/blob/d7ec49a6893751f068275cc8ddf4993ef7f31756/include/secp256k1.h#L196) - debug_assert_eq!(public_key[0], 0x04); - - let output = keccak256_hash(&public_key[1..]); - // right-most 20-bytes from the 32-byte keccak hash - // (see https://kobl.one/blog/create-full-ethereum-keypair-and-address/) - output[12..].to_vec() - } - AddressType::Cosmos | AddressType::Ethermint { .. } => { - let mut hasher = Sha256::new(); - hasher.update(pk.to_pub().to_bytes().as_slice()); - - // Read hash digest over the public key bytes & consume hasher - let pk_hash = hasher.finalize(); - - // Plug the hash result into the next crypto hash function. - use ripemd160::Digest; - let mut rip_hasher = Ripemd160::new(); - rip_hasher.update(pk_hash); - let rip_result = rip_hasher.finalize(); - - rip_result.to_vec() - } - } -} - -fn decode_bech32(input: &str) -> Result, Error> { - use bech32::FromBase32; - - let bytes = bech32::decode(input) - .and_then(|(_, data, _)| Vec::from_base32(&data)) - .map_err(Error::bech32_account)?; - - Ok(bytes) -} - -fn disk_store_path(folder_name: &str) -> Result { - let home = dirs_next::home_dir().ok_or_else(Error::home_location_unavailable)?; - - let folder = Path::new(home.as_path()) - .join(KEYSTORE_DEFAULT_FOLDER) - .join(folder_name) - .join(KEYSTORE_DISK_BACKEND); - - Ok(folder) -} - -fn keccak256_hash(bytes: &[u8]) -> Vec { - let mut hasher = Keccak::v256(); - hasher.update(bytes); - let mut resp = vec![0u8; 32]; - hasher.finalize(&mut resp); - resp -} - -fn standard_path_to_derivation_path(path: &StandardHDPath) -> DerivationPath { - use bitcoin::util::bip32::ChildNumber; - - let child_numbers = vec![ - ChildNumber::from_hardened_idx(path.purpose().as_value().as_number()) - .expect("Purpose is not Hardened"), - ChildNumber::from_hardened_idx(path.coin_type()).expect("Coin Type is not Hardened"), - ChildNumber::from_hardened_idx(path.account()).expect("Account is not Hardened"), - ChildNumber::from_normal_idx(path.change()).expect("Change is Hardened"), - ChildNumber::from_normal_idx(path.index()).expect("Index is Hardened"), - ]; - - DerivationPath::from(child_numbers) -} diff --git a/relayer/src/keyring/errors.rs b/relayer/src/keyring/errors.rs deleted file mode 100644 index 4e06f876d4..0000000000 --- a/relayer/src/keyring/errors.rs +++ /dev/null @@ -1,116 +0,0 @@ -use flex_error::{define_error, DisplayOnly, TraceError}; -use std::io::Error as IoError; - -define_error! { - Error { - InvalidKey - [ TraceError ] - |_| { "invalid key: could not build signing key from private key bytes" }, - - InvalidKeyRaw - [ TraceError ] - |_| { "invalid key: could not build signing key from private key bytes" }, - - KeyNotFound - |_| { "key not found" }, - - KeyAlreadyExist - |_| { "key already exist" }, - - InvalidMnemonic - [ DisplayOnly ] - |_| { "invalid mnemonic" }, - - PrivateKey - [ TraceError ] - |_| { "cannot generate private key" }, - - UnsupportedPublicKey - { key_type: String } - |e| { - format!("unsupported public key: {}. only secp256k1 pub keys are currently supported", - e.key_type) - }, - - EncodedPublicKey - { - key: String, - } - [ TraceError ] - |e| { - format!("cannot deserialize the encoded public key {0}", - e.key) - }, - - Bech32Account - [ TraceError ] - |_| { "cannot generate bech32 account" }, - - Bech32 - [ TraceError ] - |_| { "bech32 error" }, - - PublicKeyMismatch - { keyfile: Vec, mnemonic: Vec } - |_| { "mismatch between the public key in the key file and the public key in the mnemonic" }, - - KeyFileEncode - { file_path: String } - [ TraceError ] - |e| { - format!("error encoding key file at '{}'", - e.file_path) - }, - - Encode - [ TraceError ] - |_| { "error encoding key" }, - - KeyFileDecode - { file_path: String } - [ TraceError ] - |e| { - format!("error decoding key file at '{}'", - e.file_path) - }, - - KeyFileIo - { - file_path: String, - description: String, - } - [ TraceError ] - |e| { - format!("I/O error on key file at '{}': {}", - e.file_path, e.description) - }, - - KeyFileNotFound - { file_path: String } - |e| { - format!("cannot find key file at '{}'", - e.file_path) - }, - - HomeLocationUnavailable - |_| { "home location is unavailable" }, - - RemoveIoFail - { - file_path: String, - } - [ TraceError ] - |e| { - format!("I/O error while removing key file at location '{}'", - e.file_path) - }, - - InvalidHdPath - { - path: String, - } - |e| { - format!("invalid HD path: {0}", e.path) - }, - } -} diff --git a/relayer/src/keyring/pub_key.rs b/relayer/src/keyring/pub_key.rs deleted file mode 100644 index 730e4a0f08..0000000000 --- a/relayer/src/keyring/pub_key.rs +++ /dev/null @@ -1,96 +0,0 @@ -use core::str::FromStr; - -use serde::{Deserialize, Deserializer}; -use subtle_encoding::base64; -use tracing::{error, trace}; - -use super::decode_bech32; -use super::errors::Error; - -#[derive(Debug)] -pub enum EncodedPubKey { - Bech32(Vec), - Proto(ProtoAny), -} - -impl EncodedPubKey { - pub fn into_bytes(self) -> Vec { - match self { - EncodedPubKey::Bech32(vec) => vec, - EncodedPubKey::Proto(proto) => proto.key, - } - } -} - -/// A variant of [`EncodedPubKey`]. -/// A Protobuf `Any`, having support for deserialization from -/// JSON + base64 (see `deserialize_key`). -#[derive(Debug, Deserialize)] -pub struct ProtoAny { - #[serde(alias = "@type")] - tpe: String, - - #[serde(deserialize_with = "deserialize_key")] - key: Vec, -} - -/// This method is the workhorse for deserializing -/// the `key` field from a public key. -fn deserialize_key<'de, D>(deser: D) -> Result, D::Error> -where - D: Deserializer<'de>, -{ - // The key is a byte array that is base64-encoded - // and then marshalled into a JSON String. - let based64_encoded: Result = Deserialize::deserialize(deser); - let value = base64::decode(based64_encoded?) - .map_err(|e| serde::de::Error::custom(format!("error in decoding: {}", e)))?; - - Ok(value) -} - -impl FromStr for EncodedPubKey { - type Err = Error; - - fn from_str(s: &str) -> Result { - // Try to deserialize into a JSON Value. - let maybe_json: Result = serde_json::from_str(s); - - match maybe_json { - Ok(proto) => { - trace!( - "deserialized the encoded pub key into a ProtoAny of type '{}'", - proto.tpe - ); - - // Ethermint pubkey types: - // e.g. "/ethermint.crypto.v1alpha1.ethsecp256k1.PubKey", "/injective.crypto.v1beta1.ethsecp256k1.PubKey" - // "/ethermint.crypto.v1beta1.ethsecp256k1.PubKey", "/ethermint.crypto.v1.ethsecp256k1.PubKey", - // "/cosmos.crypto.ethsecp256k1.PubKey" - // TODO: to be restricted after the Cosmos SDK release with ethsecp256k1 - // https://github.com/cosmos/cosmos-sdk/pull/9981 - if proto.tpe != "/cosmos.crypto.secp256k1.PubKey" - && !proto.tpe.ends_with(".ethsecp256k1.PubKey") - { - Err(Error::unsupported_public_key(proto.tpe)) - } else { - Ok(EncodedPubKey::Proto(proto)) - } - } - Err(e) if e.classify() == serde_json::error::Category::Syntax => { - // Input is not syntactically-correct JSON. - // Attempt to decode via Bech32, for backwards compatibility with the old format. - trace!("using Bech32 to interpret the encoded pub key '{}'", s); - Ok(EncodedPubKey::Bech32(decode_bech32(s)?)) - } - Err(e) => { - error!( - "the encoded pub key is not in a valid format: '{}', error: {}", - s, e - ); - - Err(Error::encoded_public_key(s.to_string(), e)) - } - } - } -} diff --git a/relayer/src/lib.rs b/relayer/src/lib.rs deleted file mode 100644 index 800986d9c4..0000000000 --- a/relayer/src/lib.rs +++ /dev/null @@ -1,47 +0,0 @@ -#![forbid(unsafe_code)] -#![deny( - warnings, - trivial_casts, - trivial_numeric_casts, - unused_import_braces, - unused_qualifications, - rust_2018_idioms -)] -#![allow(clippy::too_many_arguments)] -// TODO: disable unwraps: -// https://github.com/informalsystems/ibc-rs/issues/987 -// #![cfg_attr(not(test), deny(clippy::unwrap_used))] - -//! IBC Relayer implementation as a library. -//! -//! For the IBC relayer binary, please see [Hermes] (`ibc-relayer-cli` crate). -//! -//! [Hermes]: https://docs.rs/ibc-relayer-cli/0.2.0/ - -extern crate alloc; - -pub mod account; -pub mod cache; -pub mod chain; -pub mod channel; -pub mod config; -pub mod connection; -pub mod error; -pub mod event; -pub mod foreign_client; -pub mod keyring; -pub mod light_client; -pub mod link; -pub mod macros; -pub mod object; -pub mod path; -pub mod registry; -pub mod rest; -pub mod sdk_error; -pub mod spawn; -pub mod supervisor; -pub mod telemetry; -pub mod transfer; -pub mod upgrade_chain; -pub mod util; -pub mod worker; diff --git a/relayer/src/light_client.rs b/relayer/src/light_client.rs deleted file mode 100644 index 5e87959f4f..0000000000 --- a/relayer/src/light_client.rs +++ /dev/null @@ -1,54 +0,0 @@ -use ibc::core::ics02_client::client_state::AnyClientState; -use ibc::core::ics02_client::misbehaviour::MisbehaviourEvidence; - -use crate::chain::endpoint::ChainEndpoint; -use crate::error; -use ibc::core::ics02_client::events::UpdateClient; - -pub mod tendermint; - -#[cfg(test)] -pub mod mock; - -/// Defines a light block from the point of view of the relayer. -pub trait LightBlock: Send + Sync { - fn signed_header(&self) -> &C::Header; -} - -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct Verified { - /// Verified target - pub target: H, - /// Supporting headers needed to verify `target` - pub supporting: Vec, -} - -/// Defines a client from the point of view of the relayer. -pub trait LightClient: Send + Sync { - /// Fetch and verify a header, and return its minimal supporting set. - fn header_and_minimal_set( - &mut self, - trusted: ibc::Height, - target: ibc::Height, - client_state: &AnyClientState, - ) -> Result, error::Error>; - - /// Fetch a header from the chain at the given height and verify it. - fn verify( - &mut self, - trusted: ibc::Height, - target: ibc::Height, - client_state: &AnyClientState, - ) -> Result, error::Error>; - - /// Given a client update event that includes the header used in a client update, - /// look for misbehaviour by fetching a header at same or latest height. - fn check_misbehaviour( - &mut self, - update: UpdateClient, - client_state: &AnyClientState, - ) -> Result, error::Error>; - - /// Fetch a header from the chain at the given height, without verifying it - fn fetch(&mut self, height: ibc::Height) -> Result; -} diff --git a/relayer/src/light_client/mock.rs b/relayer/src/light_client/mock.rs deleted file mode 100644 index de056e340e..0000000000 --- a/relayer/src/light_client/mock.rs +++ /dev/null @@ -1,86 +0,0 @@ -use ibc::core::ics02_client::misbehaviour::MisbehaviourEvidence; -use tendermint_testgen::light_block::TmLightBlock; - -use ibc::clients::ics07_tendermint::header::Header as TmHeader; -use ibc::core::ics02_client::client_state::AnyClientState; -use ibc::core::ics02_client::events::UpdateClient; -use ibc::core::ics24_host::identifier::ChainId; -use ibc::mock::host::HostBlock; -use ibc::Height; - -use crate::chain::endpoint::ChainEndpoint; -use crate::chain::mock::MockChain; -use crate::error::Error; - -use super::Verified; -use ibc::timestamp::Timestamp; - -/// A light client serving a mock chain. -pub struct LightClient { - chain_id: ChainId, -} - -impl LightClient { - pub fn new(chain: &MockChain) -> LightClient { - LightClient { - chain_id: chain.id().clone(), - } - } - - /// Returns a LightBlock at the requested height `h`. - fn light_block(&self, h: Height) -> TmLightBlock { - HostBlock::generate_tm_block(self.chain_id.clone(), h.revision_height, Timestamp::now()) - } -} - -impl super::LightClient for LightClient { - fn verify( - &mut self, - _trusted: Height, - target: Height, - _client_state: &AnyClientState, - ) -> Result, Error> { - Ok(Verified { - target: self.light_block(target), - supporting: Vec::new(), - }) - } - - fn fetch(&mut self, height: Height) -> Result { - Ok(self.light_block(height)) - } - - fn check_misbehaviour( - &mut self, - _update: UpdateClient, - _client_state: &AnyClientState, - ) -> Result, Error> { - unimplemented!() - } - - fn header_and_minimal_set( - &mut self, - trusted_height: Height, - target_height: Height, - client_state: &AnyClientState, - ) -> Result, Error> { - let Verified { target, supporting } = - self.verify(trusted_height, target_height, client_state)?; - - assert!(supporting.is_empty()); - - let succ_trusted = self.fetch(trusted_height.increment())?; - - let target = TmHeader { - signed_header: target.signed_header, - validator_set: target.validators, - trusted_height, - trusted_validator_set: succ_trusted.validators, - }; - - Ok(Verified { - target, - supporting: Vec::new(), - }) - } -} diff --git a/relayer/src/light_client/tendermint.rs b/relayer/src/light_client/tendermint.rs deleted file mode 100644 index 2e6ca90377..0000000000 --- a/relayer/src/light_client/tendermint.rs +++ /dev/null @@ -1,296 +0,0 @@ -use itertools::Itertools; - -use tendermint_light_client::{ - components::{self, io::AtHeight}, - light_client::LightClient as TmLightClient, - state::State as LightClientState, - store::{memory::MemoryStore, LightStore}, -}; -use tendermint_light_client_verifier::operations; -use tendermint_light_client_verifier::options::Options as TmOptions; -use tendermint_light_client_verifier::types::{Height as TMHeight, LightBlock, PeerId, Status}; -use tendermint_light_client_verifier::ProdVerifier; -use tendermint_rpc as rpc; - -use ibc::{ - clients::ics07_tendermint::{ - header::{headers_compatible, Header as TmHeader}, - misbehaviour::Misbehaviour as TmMisbehaviour, - }, - core::{ - ics02_client::{ - client_state::AnyClientState, - client_type::ClientType, - events::UpdateClient, - header::{AnyHeader, Header}, - misbehaviour::{Misbehaviour, MisbehaviourEvidence}, - }, - ics24_host::identifier::ChainId, - }, - downcast, -}; -use tracing::trace; - -use crate::{chain::cosmos::CosmosSdkChain, config::ChainConfig, error::Error}; - -use super::Verified; - -pub struct LightClient { - chain_id: ChainId, - peer_id: PeerId, - io: components::io::ProdIo, -} - -impl super::LightClient for LightClient { - fn header_and_minimal_set( - &mut self, - trusted: ibc::Height, - target: ibc::Height, - client_state: &AnyClientState, - ) -> Result, Error> { - let Verified { target, supporting } = self.verify(trusted, target, client_state)?; - let (target, supporting) = self.adjust_headers(trusted, target, supporting)?; - Ok(Verified { target, supporting }) - } - - fn verify( - &mut self, - trusted: ibc::Height, - target: ibc::Height, - client_state: &AnyClientState, - ) -> Result, Error> { - trace!(%trusted, %target, "light client verification"); - - let target_height = - TMHeight::try_from(target.revision_height).map_err(Error::invalid_height)?; - - let client = self.prepare_client(client_state)?; - let mut state = self.prepare_state(trusted)?; - - // Verify the target header - let target = client - .verify_to_target(target_height, &mut state) - .map_err(|e| Error::light_client_verification(self.chain_id.to_string(), e))?; - - // Collect the verification trace for the target block - let target_trace = state.get_trace(target.height()); - - // Compute the minimal supporting set, sorted by ascending height - let supporting = target_trace - .into_iter() - .filter(|lb| lb.height() != target.height()) - .unique_by(LightBlock::height) - .sorted_by_key(LightBlock::height) - .collect_vec(); - - Ok(Verified { target, supporting }) - } - - fn fetch(&mut self, height: ibc::Height) -> Result { - trace!(%height, "fetching header"); - - let height = TMHeight::try_from(height.revision_height).map_err(Error::invalid_height)?; - - self.fetch_light_block(AtHeight::At(height)) - } - - /// Given a client update event that includes the header used in a client update, - /// look for misbehaviour by fetching a header at same or latest height. - /// - /// ## TODO - /// - [ ] Return intermediate headers as well - fn check_misbehaviour( - &mut self, - update: UpdateClient, - client_state: &AnyClientState, - ) -> Result, Error> { - crate::time!("light client check_misbehaviour"); - - let update_header = update.header.clone().ok_or_else(|| { - Error::misbehaviour(format!( - "missing header in update client event {}", - self.chain_id - )) - })?; - - let update_header = downcast!(update_header => AnyHeader::Tendermint).ok_or_else(|| { - Error::misbehaviour(format!( - "header type incompatible for chain {}", - self.chain_id - )) - })?; - - let latest_chain_block = self.fetch_light_block(AtHeight::Highest)?; - let latest_chain_height = - ibc::Height::new(self.chain_id.version(), latest_chain_block.height().into()); - - // set the target height to the minimum between the update height and latest chain height - let target_height = core::cmp::min(update.consensus_height(), latest_chain_height); - let trusted_height = update_header.trusted_height; - - // TODO - check that a consensus state at trusted_height still exists on-chain, - // currently we don't have access to Cosmos chain query from here - - if trusted_height >= latest_chain_height { - // Can happen with multiple FLA attacks, we return no evidence and hope to catch this in - // the next iteration. e.g: - // existing consensus states: 1000, 900, 300, 200 (only known by the caller) - // latest_chain_height = 300 - // target_height = 1000 - // trusted_height = 900 - return Ok(None); - } - - let Verified { target, supporting } = - self.verify(trusted_height, target_height, client_state)?; - - if !headers_compatible(&target.signed_header, &update_header.signed_header) { - let (witness, supporting) = self.adjust_headers(trusted_height, target, supporting)?; - - let misbehaviour = TmMisbehaviour { - client_id: update.client_id().clone(), - header1: update_header, - header2: witness, - } - .wrap_any(); - - Ok(Some(MisbehaviourEvidence { - misbehaviour, - supporting_headers: supporting.into_iter().map(TmHeader::wrap_any).collect(), - })) - } else { - Ok(None) - } - } -} - -impl LightClient { - pub fn from_config(config: &ChainConfig, peer_id: PeerId) -> Result { - let rpc_client = rpc::HttpClient::new(config.rpc_addr.clone()) - .map_err(|e| Error::rpc(config.rpc_addr.clone(), e))?; - - let io = components::io::ProdIo::new(peer_id, rpc_client, Some(config.rpc_timeout)); - - Ok(Self { - chain_id: config.id.clone(), - peer_id, - io, - }) - } - - fn prepare_client(&self, client_state: &AnyClientState) -> Result { - let clock = components::clock::SystemClock; - let hasher = operations::hasher::ProdHasher; - let verifier = ProdVerifier::default(); - let scheduler = components::scheduler::basic_bisecting_schedule; - - let client_state = - downcast!(client_state => AnyClientState::Tendermint).ok_or_else(|| { - Error::client_type_mismatch(ClientType::Tendermint, client_state.client_type()) - })?; - - let params = TmOptions { - trust_threshold: client_state - .trust_level - .try_into() - .map_err(Error::light_client_state)?, - trusting_period: client_state.trusting_period, - clock_drift: client_state.max_clock_drift, - }; - - Ok(TmLightClient::new( - self.peer_id, - params, - clock, - scheduler, - verifier, - hasher, - self.io.clone(), - )) - } - - fn prepare_state(&self, trusted: ibc::Height) -> Result { - let trusted_height = - TMHeight::try_from(trusted.revision_height).map_err(Error::invalid_height)?; - - let trusted_block = self.fetch_light_block(AtHeight::At(trusted_height))?; - - let mut store = MemoryStore::new(); - store.insert(trusted_block, Status::Trusted); - - Ok(LightClientState::new(store)) - } - - fn fetch_light_block(&self, height: AtHeight) -> Result { - use tendermint_light_client::components::io::Io; - - self.io - .fetch_light_block(height) - .map_err(|e| Error::light_client_io(self.chain_id.to_string(), e)) - } - - fn adjust_headers( - &mut self, - trusted_height: ibc::Height, - target: LightBlock, - supporting: Vec, - ) -> Result<(TmHeader, Vec), Error> { - use super::LightClient; - - trace!( - trusted = %trusted_height, target = %target.height(), - "adjusting headers with {} supporting headers", supporting.len() - ); - - // Get the light block at trusted_height + 1 from chain. - // - // NOTE: This is needed to get the next validator set. While there is a next validator set - // in the light block at trusted height, the proposer is not known/set in this set. - let trusted_validator_set = self.fetch(trusted_height.increment())?.validators; - - let mut supporting_headers = Vec::with_capacity(supporting.len()); - - let mut current_trusted_height = trusted_height; - let mut current_trusted_validators = trusted_validator_set.clone(); - - for support in supporting { - let header = TmHeader { - signed_header: support.signed_header.clone(), - validator_set: support.validators, - trusted_height: current_trusted_height, - trusted_validator_set: current_trusted_validators, - }; - - // This header is now considered to be the currently trusted header - current_trusted_height = header.height(); - - // Therefore we can now trust the next validator set, see NOTE above. - current_trusted_validators = self.fetch(header.height().increment())?.validators; - - supporting_headers.push(header); - } - - // a) Set the trusted height of the target header to the height of the previous - // supporting header if any, or to the initial trusting height otherwise. - // - // b) Set the trusted validators of the target header to the validators of the successor to - // the last supporting header if any, or to the initial trusted validators otherwise. - let (latest_trusted_height, latest_trusted_validator_set) = match supporting_headers.last() - { - Some(prev_header) => { - let prev_succ = self.fetch(prev_header.height().increment())?; - (prev_header.height(), prev_succ.validators) - } - None => (trusted_height, trusted_validator_set), - }; - - let target_header = TmHeader { - signed_header: target.signed_header, - validator_set: target.validators, - trusted_height: latest_trusted_height, - trusted_validator_set: latest_trusted_validator_set, - }; - - Ok((target_header, supporting_headers)) - } -} diff --git a/relayer/src/link.rs b/relayer/src/link.rs deleted file mode 100644 index f5854116f3..0000000000 --- a/relayer/src/link.rs +++ /dev/null @@ -1,162 +0,0 @@ -use ibc::{ - core::{ - ics03_connection::connection::State as ConnectionState, - ics04_channel::channel::State as ChannelState, - ics24_host::identifier::{ChannelId, PortChannelId, PortId}, - }, - Height, -}; - -use crate::chain::requests::QueryChannelRequest; -use crate::chain::{counterparty::check_channel_counterparty, requests::QueryConnectionRequest}; -use crate::chain::{handle::ChainHandle, requests::IncludeProof}; -use crate::channel::{Channel, ChannelSide}; -use crate::link::error::LinkError; - -pub mod cli; -pub mod error; -pub mod operational_data; - -mod packet_events; -mod pending; -mod relay_path; -mod relay_sender; -mod relay_summary; -mod tx_hashes; - -use tx_hashes::TxHashes; - -// Re-export the telemetries summary -pub use relay_summary::RelaySummary; - -pub use relay_path::{RelayPath, Resubmit}; - -#[derive(Clone, Debug)] -pub struct LinkParameters { - pub src_port_id: PortId, - pub src_channel_id: ChannelId, -} - -pub struct Link { - pub a_to_b: RelayPath, -} - -impl Link { - pub fn new( - channel: Channel, - with_tx_confirmation: bool, - ) -> Result { - Ok(Self { - a_to_b: RelayPath::new(channel, with_tx_confirmation)?, - }) - } - - pub fn new_from_opts( - a_chain: ChainA, - b_chain: ChainB, - opts: LinkParameters, - with_tx_confirmation: bool, - ) -> Result, LinkError> { - // Check that the packet's channel on source chain is Open - let a_channel_id = &opts.src_channel_id; - let a_port_id = &opts.src_port_id; - let (a_channel, _) = a_chain - .query_channel( - QueryChannelRequest { - port_id: opts.src_port_id.clone(), - channel_id: opts.src_channel_id, - height: Height::default(), - }, - IncludeProof::No, - ) - .map_err(|e| { - LinkError::channel_not_found(a_port_id.clone(), *a_channel_id, a_chain.id(), e) - })?; - - if !a_channel.state_matches(&ChannelState::Open) - && !a_channel.state_matches(&ChannelState::Closed) - { - return Err(LinkError::invalid_channel_state( - *a_channel_id, - a_chain.id(), - )); - } - - let b_channel_id = a_channel - .counterparty() - .channel_id - .ok_or_else(|| LinkError::counterparty_channel_not_found(*a_channel_id))?; - - if a_channel.connection_hops().is_empty() { - return Err(LinkError::no_connection_hop(*a_channel_id, a_chain.id())); - } - - // Check that the counterparty details on the destination chain matches the source chain - check_channel_counterparty( - b_chain.clone(), - &PortChannelId { - channel_id: b_channel_id, - port_id: a_channel.counterparty().port_id.clone(), - }, - &PortChannelId { - channel_id: *a_channel_id, - port_id: opts.src_port_id.clone(), - }, - ) - .map_err(LinkError::initialization)?; - - // Check the underlying connection - let a_connection_id = a_channel.connection_hops()[0].clone(); - let (a_connection, _) = a_chain - .query_connection( - QueryConnectionRequest { - connection_id: a_connection_id.clone(), - height: Height::zero(), - }, - IncludeProof::No, - ) - .map_err(LinkError::relayer)?; - - if !a_connection.state_matches(&ConnectionState::Open) { - return Err(LinkError::channel_not_opened(*a_channel_id, a_chain.id())); - } - - let channel = Channel { - ordering: a_channel.ordering, - a_side: ChannelSide::new( - a_chain, - a_connection.client_id().clone(), - a_connection_id, - opts.src_port_id.clone(), - Some(opts.src_channel_id), - None, - ), - b_side: ChannelSide::new( - b_chain, - a_connection.counterparty().client_id().clone(), - a_connection.counterparty().connection_id().unwrap().clone(), - a_channel.counterparty().port_id.clone(), - Some(b_channel_id), - None, - ), - connection_delay: a_connection.delay_period(), - }; - - Link::new(channel, with_tx_confirmation) - } - - /// Constructs a link around the channel that is reverse to the channel - /// in this link. - pub fn reverse(&self, with_tx_confirmation: bool) -> Result, LinkError> { - let opts = LinkParameters { - src_port_id: self.a_to_b.dst_port_id().clone(), - src_channel_id: *self.a_to_b.dst_channel_id(), - }; - let chain_b = self.a_to_b.dst_chain().clone(); - let chain_a = self.a_to_b.src_chain().clone(); - - // Some of the checks and initializations may be redundant; - // going slowly, but reliably. - Link::new_from_opts(chain_b, chain_a, opts, with_tx_confirmation) - } -} diff --git a/relayer/src/link/cli.rs b/relayer/src/link/cli.rs deleted file mode 100644 index f41e5a1b1e..0000000000 --- a/relayer/src/link/cli.rs +++ /dev/null @@ -1,235 +0,0 @@ -use std::convert::TryInto; -use std::thread; -use std::time::{Duration, Instant}; - -use ibc::core::ics04_channel::packet::Sequence; -use tracing::{error_span, info}; - -use ibc::events::IbcEvent; -use ibc::Height; - -use crate::chain::counterparty::{unreceived_acknowledgements, unreceived_packets}; -use crate::chain::handle::ChainHandle; -use crate::chain::tracking::TrackingId; -use crate::link::error::LinkError; -use crate::link::operational_data::{OperationalData, TrackedEvents}; -use crate::link::packet_events::{ - query_packet_events_with, query_send_packet_events, query_write_ack_events, -}; -use crate::link::relay_path::RelayPath; -use crate::link::relay_sender::SyncSender; -use crate::link::Link; -use crate::path::PathIdentifiers; - -impl RelayPath { - /// Fetches an operational data that has fulfilled its predefined delay period. May _block_ - /// waiting for the delay period to pass. - /// Returns `Ok(None)` if there is no operational data scheduled. - pub(crate) fn fetch_scheduled_operational_data( - &self, - ) -> Result, LinkError> { - if let Some(odata) = self.src_operational_data.pop_front() { - Ok(Some(wait_for_conn_delay( - odata, - &|| self.src_time_latest(), - &|| self.src_max_block_time(), - &|| self.src_latest_height(), - )?)) - } else if let Some(odata) = self.dst_operational_data.pop_front() { - Ok(Some(wait_for_conn_delay( - odata, - &|| self.dst_time_latest(), - &|| self.dst_max_block_time(), - &|| self.dst_latest_height(), - )?)) - } else { - Ok(None) - } - } - - /// Given a vector of [`OperationalData`], this method proceeds to relaying - /// all the messages therein. It accumulates all events generated in the - /// mutable vector of [`IbcEvent`]s. - pub fn relay_and_accumulate_results( - &self, - from: Vec, - results: &mut Vec, - ) -> Result<(), LinkError> { - for od in from { - let mut last_res = self.relay_from_operational_data::(od)?; - results.append(&mut last_res.events); - } - - Ok(()) - } -} - -impl Link { - /// Implements the `packet-recv` CLI - pub fn relay_recv_packet_and_timeout_messages(&self) -> Result, LinkError> { - let _span = error_span!( - "PacketRecvCmd", - src_chain = %self.a_to_b.src_chain().id(), - src_port = %self.a_to_b.src_port_id(), - src_channel = %self.a_to_b.src_channel_id(), - dst_chain = %self.a_to_b.dst_chain().id(), - ) - .entered(); - - // Find the sequence numbers of unreceived packets - let (sequences, src_response_height) = unreceived_packets( - self.a_to_b.dst_chain(), - self.a_to_b.src_chain(), - &self.a_to_b.path_id, - ) - .map_err(LinkError::supervisor)?; - - if sequences.is_empty() { - return Ok(vec![]); - } - - info!("unreceived packets found: {} ", sequences.len()); - - self.relay_packet_messages( - sequences, - src_response_height, - query_send_packet_events, - TrackingId::new_static("packet-recv"), - ) - } - - /// Implements the `packet-ack` CLI - pub fn relay_ack_packet_messages(&self) -> Result, LinkError> { - let _span = error_span!( - "PacketAckCmd", - src_chain = %self.a_to_b.src_chain().id(), - src_port = %self.a_to_b.src_port_id(), - src_channel = %self.a_to_b.src_channel_id(), - dst_chain = %self.a_to_b.dst_chain().id(), - ) - .entered(); - - // Find the sequence numbers of unreceived acknowledgements - let (sequences, src_response_height) = unreceived_acknowledgements( - self.a_to_b.dst_chain(), - self.a_to_b.src_chain(), - &self.a_to_b.path_id, - ) - .map_err(LinkError::supervisor)?; - - if sequences.is_empty() { - return Ok(vec![]); - } - - info!("unreceived acknowledgements found: {} ", sequences.len()); - - self.relay_packet_messages( - sequences, - src_response_height, - query_write_ack_events, - TrackingId::new_static("packet-ack"), - ) - } - - fn relay_packet_messages( - &self, - sequences: Vec, - src_response_height: Height, - query_fn: impl Fn( - &ChainA, - &PathIdentifiers, - Vec, - Height, - ) -> Result, LinkError>, - tracking_id: TrackingId, - ) -> Result, LinkError> { - dbg!(src_response_height); - let mut results = vec![]; - for events_chunk in query_packet_events_with( - &sequences, - src_response_height, - self.a_to_b.src_chain(), - &self.a_to_b.path_id, - query_fn, - ) { - let tracked_events = TrackedEvents::new(events_chunk, tracking_id); - self.a_to_b.events_to_operational_data(tracked_events)?; - - // In case of zero connection delay, the op. data will already be ready - let (src_ods, dst_ods) = self.a_to_b.try_fetch_scheduled_operational_data()?; - self.a_to_b - .relay_and_accumulate_results(Vec::from(src_ods), &mut results)?; - self.a_to_b - .relay_and_accumulate_results(Vec::from(dst_ods), &mut results)?; - } - - // In case of non-zero connection delay, we block here waiting for all op.data - // until the connection delay elapses - while let Some(odata) = self.a_to_b.fetch_scheduled_operational_data()? { - self.a_to_b - .relay_and_accumulate_results(vec![odata], &mut results)?; - } - - Ok(results) - } -} - -fn wait_for_conn_delay( - odata: OperationalData, - chain_time: &ChainTime, - max_expected_time_per_block: &MaxBlockTime, - latest_height: &LatestHeight, -) -> Result -where - ChainTime: Fn() -> Result, - MaxBlockTime: Fn() -> Result, - LatestHeight: Fn() -> Result, -{ - let (time_left, blocks_left) = - odata.conn_delay_remaining(chain_time, max_expected_time_per_block, latest_height)?; - - match (time_left, blocks_left) { - (Duration::ZERO, 0) => { - info!( - "ready to fetch a scheduled op. data with batch of size {} targeting {}", - odata.batch.len(), - odata.target, - ); - Ok(odata) - } - (Duration::ZERO, blocks_left) => { - info!( - "waiting ({:?} blocks left) for a scheduled op. data with batch of size {} targeting {}", - blocks_left, - odata.batch.len(), - odata.target, - ); - - let blocks_left: u32 = blocks_left.try_into().expect("blocks_left > u32::MAX"); - - // Wait until the delay period passes - thread::sleep(blocks_left * max_expected_time_per_block()?); - - Ok(odata) - } - (time_left, _) => { - info!( - "waiting ({:?} left) for a scheduled op. data with batch of size {} targeting {}", - time_left, - odata.batch.len(), - odata.target, - ); - - // Wait until the delay period passes - thread::sleep(time_left); - - // `blocks_left` maybe non-zero, so recurse to recheck that all delays are handled. - wait_for_conn_delay( - odata, - chain_time, - max_expected_time_per_block, - latest_height, - ) - } - } -} diff --git a/relayer/src/link/error.rs b/relayer/src/link/error.rs deleted file mode 100644 index bf2f0803b2..0000000000 --- a/relayer/src/link/error.rs +++ /dev/null @@ -1,164 +0,0 @@ -use flex_error::define_error; -use ibc::core::ics02_client::error::Error as Ics02Error; -use ibc::core::ics24_host::identifier::{ChainId, ChannelId, PortId}; -use ibc::events::IbcEvent; -use ibc::Height; - -use crate::channel::ChannelError; -use crate::connection::ConnectionError; -use crate::error::Error; -use crate::foreign_client::{ForeignClientError, HasExpiredOrFrozenError}; -use crate::supervisor::Error as SupervisorError; -use crate::transfer::TransferError; - -define_error! { - LinkError { - Relayer - [ Error ] - |_| { "failed with underlying error" }, - - Supervisor - [ SupervisorError ] - |_| { "error originating from the supervisor" }, - - Initialization - [ ChannelError ] - |_| { "link initialization failed during channel counterparty verification" }, - - PacketProofsConstructor - { chain_id: ChainId } - [ Error ] - |e| { - format!("failed to construct packet proofs for chain {0}", e.chain_id) - }, - - Query - { chain_id: ChainId } - [ Error ] - |e| { - format!("failed during query to chain id {0}", e.chain_id) - }, - - Channel - [ ChannelError ] - |_| { "channel error" }, - - ChannelNotFound - { - port_id: PortId, - channel_id: ChannelId, - chain_id: ChainId, - } - [ Error ] - |e| { - format!("channel {}/{} does not exist on chain {}", - e.port_id, e.channel_id, e.chain_id) - }, - - Connection - [ ConnectionError ] - |_| { "connection error" }, - - Client - [ ForeignClientError ] - |_| { "failed during a client operation" }, - - Packet - [ TransferError ] - |_| { "packet error" }, - - OldPacketClearingFailed - |_| { "clearing of old packets failed" }, - - Send - { event: IbcEvent } - |e| { - format!("chain error when sending messages: {0}", e.event) - }, - - MissingChannelId - { chain_id: ChainId } - |e| { - format!("missing channel_id on chain {}", e.chain_id) - }, - - Signer - { - chain_id: ChainId - } - [ Error ] - |e| { - format!("could not retrieve signer from src chain {}", e.chain_id) - }, - - DecrementHeight - { height: Height } - [ Ics02Error ] - |e| { - format!("Cannot clear packets @height {}, because this height cannot be decremented", e.height) - }, - - UnexpectedEvent - { event: IbcEvent } - |e| { - format!("unexpected query tx response: {}", e.event) - }, - - InvalidChannelState - { - channel_id: ChannelId, - chain_id: ChainId, - } - |e| { - format!("channel {} on chain {} not in open or close state when packets and timeouts can be relayed", - e.channel_id, e.chain_id) - }, - - ChannelNotOpened - { - channel_id: ChannelId, - chain_id: ChainId, - } - |e| { - format!("connection for channel {} on chain {} is not in open state", - e.channel_id, e.chain_id) - }, - - CounterpartyChannelNotFound - { - channel_id: ChannelId, - } - |e| { - format!("counterparty channel id not found for {}", - e.channel_id) - }, - - NoConnectionHop - { - channel_id: ChannelId, - chain_id: ChainId, - } - |e| { - format!("channel {} on chain {} has no connection hops", - e.channel_id, e.chain_id) - }, - - UpdateClientFailed - |_| { "failed to update client" }, - } -} - -impl HasExpiredOrFrozenError for LinkErrorDetail { - fn is_expired_or_frozen_error(&self) -> bool { - match self { - Self::Client(e) => e.source.is_expired_or_frozen_error(), - _ => false, - } - } -} - -impl HasExpiredOrFrozenError for LinkError { - fn is_expired_or_frozen_error(&self) -> bool { - self.detail().is_expired_or_frozen_error() - } -} diff --git a/relayer/src/link/operational_data.rs b/relayer/src/link/operational_data.rs deleted file mode 100644 index c1797a6fd5..0000000000 --- a/relayer/src/link/operational_data.rs +++ /dev/null @@ -1,401 +0,0 @@ -use core::fmt; -use core::iter; -use std::time::{Duration, Instant}; - -use ibc_proto::google::protobuf::Any; -use tracing::{debug, info}; - -use ibc::core::ics02_client::client_state::ClientState; -use ibc::core::ics04_channel::context::calculate_block_delay; -use ibc::events::IbcEvent; -use ibc::Height; - -use crate::chain::handle::ChainHandle; -use crate::chain::requests::IncludeProof; -use crate::chain::requests::QueryClientStateRequest; -use crate::chain::tracking::TrackedMsgs; -use crate::chain::tracking::TrackingId; -use crate::link::error::LinkError; -use crate::link::RelayPath; - -/// The chain that the events associated with a piece of [`OperationalData`] are bound for. -#[derive(Clone, Copy, PartialEq)] -pub enum OperationalDataTarget { - /// The chain which generated the events associated with the `OperationalData`. - Source, - /// The chain receiving the events associated with the `OperationalData``. - Destination, -} - -impl fmt::Display for OperationalDataTarget { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - OperationalDataTarget::Source => write!(f, "Source"), - OperationalDataTarget::Destination => write!(f, "Destination"), - } - } -} - -/// A set of [`IbcEvent`]s that have an associated -/// tracking number to ensure better observability. -pub struct TrackedEvents { - events: Vec, - tracking_id: TrackingId, -} - -impl TrackedEvents { - pub fn new(events: Vec, tracking_id: TrackingId) -> Self { - Self { - events, - tracking_id, - } - } - - pub fn is_empty(&self) -> bool { - self.events.is_empty() - } - - pub fn events(&self) -> &[IbcEvent] { - &self.events - } - - pub fn tracking_id(&self) -> TrackingId { - self.tracking_id - } - - pub fn len(&self) -> usize { - self.events.len() - } -} - -/// A packet message that is prepared for sending -/// to a chain, but has not been sent yet. -/// -/// Comprises the proto-encoded packet message, -/// alongside the event which generated it. -#[derive(Clone)] -pub struct TransitMessage { - pub event: IbcEvent, - pub msg: Any, -} - -/// Holds all the necessary information for handling a batch of in-transit messages. This includes -/// an event received from a chain along with any other packets related to the event (i.e. -/// 'receive' or 'timeout' packets) that the relayer has to submit in response to the event. -#[derive(Clone)] -pub struct OperationalData { - /// Represents the height for the proofs in all the messages. Note that this is the height - /// at which the proofs are queried. For example, for Tendermint chains, a client consensus - /// state at `proofs_height + 1` must exist on-chain in order to verify the proofs. - pub proofs_height: Height, - /// The batch of messages associated with this piece of operational data. - pub batch: Vec, - /// Represents the target of the packet messages, either the source or the destination - /// chain. - pub target: OperationalDataTarget, - /// A unique ID for tracking this batch of events starting from when they were received - /// until the transactions corresponding to those events is submitted. - pub tracking_id: TrackingId, - /// Stores `Some(ConnectionDelay)` if the delay is non-zero and `None` otherwise - connection_delay: Option, -} - -impl OperationalData { - pub fn new( - proofs_height: Height, - target: OperationalDataTarget, - tracking_id: TrackingId, - connection_delay: Duration, - ) -> Self { - let connection_delay = if !connection_delay.is_zero() { - Some(ConnectionDelay::new(connection_delay)) - } else { - None - }; - - OperationalData { - proofs_height, - batch: vec![], - target, - connection_delay, - tracking_id, - } - } - - pub fn push(&mut self, msg: TransitMessage) { - self.batch.push(msg) - } - - /// Returns displayable information on the operation's data. - pub fn info(&self) -> OperationalInfo { - OperationalInfo { - tracking_id: self.tracking_id, - target: self.target, - proofs_height: self.proofs_height, - batch_len: self.batch.len(), - } - } - - /// Transforms `self` into the list of events accompanied with the tracking ID. - pub fn into_events(self) -> TrackedEvents { - let events = self.batch.into_iter().map(|gm| gm.event).collect(); - - TrackedEvents { - events, - tracking_id: self.tracking_id, - } - } - - /// Returns all the messages in this operational - /// data, plus prepending the client update message - /// if necessary. - pub fn assemble_msgs( - &self, - relay_path: &RelayPath, - ) -> Result { - // For zero delay we prepend the client update msgs. - let client_update_msg = if !self.conn_delay_needed() { - let update_height = self.proofs_height.increment(); - - debug!( - "prepending {} client update at height {}", - self.target, update_height - ); - - // Fetch the client update message. Vector may be empty if the client already has the header - // for the requested height. - let mut client_update_opt = match self.target { - OperationalDataTarget::Source => { - relay_path.build_update_client_on_src(update_height)? - } - OperationalDataTarget::Destination => { - relay_path.build_update_client_on_dst(update_height)? - } - }; - - client_update_opt.pop() - } else { - let (client_state, _) = match self.target { - OperationalDataTarget::Source => relay_path - .src_chain() - .query_client_state( - QueryClientStateRequest { - client_id: relay_path.src_client_id().clone(), - height: Height::zero(), - }, - IncludeProof::No, - ) - .map_err(|e| LinkError::query(relay_path.src_chain().id(), e))?, - - OperationalDataTarget::Destination => relay_path - .dst_chain() - .query_client_state( - QueryClientStateRequest { - client_id: relay_path.dst_client_id().clone(), - height: Height::zero(), - }, - IncludeProof::No, - ) - .map_err(|e| LinkError::query(relay_path.dst_chain().id(), e))?, - }; - - if client_state.is_frozen() { - return Ok(TrackedMsgs::new(vec![], self.tracking_id)); - } else { - None - } - }; - - let msgs: Vec = match client_update_msg { - Some(client_update) => iter::once(client_update) - .chain(self.batch.iter().map(|gm| gm.msg.clone())) - .collect(), - None => self.batch.iter().map(|gm| gm.msg.clone()).collect(), - }; - - let tm = TrackedMsgs::new(msgs, self.tracking_id); - - info!("assembled batch of {} message(s)", tm.messages().len()); - - Ok(tm) - } - - /// Returns true iff the batch contains a packet event - fn has_packet_msgs(&self) -> bool { - self.batch.iter().any(|msg| msg.event.packet().is_some()) - } - - /// Returns the `connection_delay` iff the connection delay for this relaying path is non-zero - /// and the `batch` contains packet messages. - fn get_delay_if_needed(&self) -> Option<&ConnectionDelay> { - self.connection_delay - .as_ref() - .filter(|_| self.has_packet_msgs()) - } - - /// Returns `true` iff the connection delay for this relaying path is non-zero and `op_data` - /// contains packet messages. - pub fn conn_delay_needed(&self) -> bool { - self.get_delay_if_needed().is_some() - } - - /// Sets the scheduled time that is used for connection-delay calculations - pub fn set_scheduled_time(&mut self, scheduled_time: Instant) { - if let Some(mut delay) = self.connection_delay.as_mut() { - delay.scheduled_time = scheduled_time; - } - } - - /// Sets the update height that is used for connection-delay calculations - pub fn set_update_height(&mut self, update_height: Height) { - if let Some(mut delay) = self.connection_delay.as_mut() { - delay.update_height = Some(update_height); - } - } - - /// Returns `Ok(remaining-delay)` on success or `LinkError` if the input closure fails. - fn conn_time_delay_remaining( - &self, - chain_time: &ChainTime, - ) -> Result - where - ChainTime: Fn() -> Result, - { - if let Some(delay) = self.get_delay_if_needed() { - Ok(delay.conn_time_delay_remaining(chain_time()?)) - } else { - Ok(Duration::ZERO) - } - } - - /// Returns `Ok(remaining-delay)` on success or `LinkError` if an input closure fails. - fn conn_block_delay_remaining( - &self, - max_expected_time_per_block: &MaxBlockTime, - latest_height: &LatestHeight, - ) -> Result - where - MaxBlockTime: Fn() -> Result, - LatestHeight: Fn() -> Result, - { - if let Some(delay) = self.get_delay_if_needed() { - let block_delay = delay.conn_block_delay(max_expected_time_per_block()?); - Ok(delay.conn_block_delay_remaining(block_delay, latest_height()?)) - } else { - Ok(0) - } - } - - pub fn has_conn_delay_elapsed( - &self, - chain_time: &ChainTime, - max_expected_time_per_block: &MaxBlockTime, - latest_height: &LatestHeight, - ) -> Result - where - ChainTime: Fn() -> Result, - MaxBlockTime: Fn() -> Result, - LatestHeight: Fn() -> Result, - { - Ok(self.conn_time_delay_remaining(chain_time)?.is_zero() - && self.conn_block_delay_remaining(max_expected_time_per_block, latest_height)? == 0) - } - - pub fn conn_delay_remaining( - &self, - chain_time: &ChainTime, - max_expected_time_per_block: &MaxBlockTime, - latest_height: &LatestHeight, - ) -> Result<(Duration, u64), LinkError> - where - ChainTime: Fn() -> Result, - MaxBlockTime: Fn() -> Result, - LatestHeight: Fn() -> Result, - { - Ok(( - self.conn_time_delay_remaining(chain_time)?, - self.conn_block_delay_remaining(max_expected_time_per_block, latest_height)?, - )) - } -} - -/// A struct that holds everything that is required to calculate and deal with the connection-delay -/// feature. -#[derive(Clone)] -struct ConnectionDelay { - delay: Duration, - scheduled_time: Instant, - update_height: Option, -} - -impl ConnectionDelay { - fn new(delay: Duration) -> Self { - Self { - delay, - scheduled_time: Instant::now(), - update_height: None, - } - } - - /// Returns `remaining-delay` if connection-delay hasn't elapsed and `Duration::ZERO` otherwise. - fn conn_time_delay_remaining(&self, chain_time: Instant) -> Duration { - // since chain time instant is relative to relayer's current time, it is possible that - // `scheduled_time` is later (by nano secs) than `chain_time`, hence the call to - // `saturating_duration_since()`. - let elapsed = chain_time.saturating_duration_since(self.scheduled_time); - if elapsed >= self.delay { - Duration::ZERO - } else { - self.delay - elapsed - } - } - - /// Returns `remaining-delay` if connection-delay hasn't elapsed and `0` otherwise. - fn conn_block_delay_remaining(&self, block_delay: u64, latest_height: Height) -> u64 { - let acceptable_height = self - .update_height - .expect("processed height not set") - .add(block_delay); - if latest_height >= acceptable_height { - 0 - } else { - debug_assert!(acceptable_height.revision_number == latest_height.revision_number); - acceptable_height.revision_height - latest_height.revision_height - } - } - - /// Calculates and returns the block-delay based on the `max_expected_time_per_block` - fn conn_block_delay(&self, max_expected_time_per_block: Duration) -> u64 { - calculate_block_delay(self.delay, max_expected_time_per_block) - } -} - -/// A lightweight informational data structure that can be extracted -/// out of [`OperationalData`] for e.g. logging purposes. -pub struct OperationalInfo { - tracking_id: TrackingId, - target: OperationalDataTarget, - proofs_height: Height, - batch_len: usize, -} - -impl OperationalInfo { - pub fn target(&self) -> OperationalDataTarget { - self.target - } - - /// Returns the length of the assembled batch of in-transit messages. - pub fn batch_len(&self) -> usize { - self.batch_len - } -} - -impl fmt::Display for OperationalInfo { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!( - f, - "{} ->{} @{}; len={}", - self.tracking_id, self.target, self.proofs_height, self.batch_len, - ) - } -} diff --git a/relayer/src/link/packet_events.rs b/relayer/src/link/packet_events.rs deleted file mode 100644 index ba8ed36833..0000000000 --- a/relayer/src/link/packet_events.rs +++ /dev/null @@ -1,135 +0,0 @@ -//! Utility methods for querying packet event data. - -use tracing::{info, span, trace, warn, Level}; - -use ibc::core::ics04_channel::channel::QueryPacketEventDataRequest; -use ibc::core::ics04_channel::packet::Sequence; -use ibc::events::{IbcEvent, WithBlockDataType}; -use ibc::query::{QueryBlockRequest, QueryTxRequest}; -use ibc::Height; - -use crate::chain::handle::ChainHandle; -use crate::link::error::LinkError; -use crate::path::PathIdentifiers; - -/// Limit on how many query results should be expected. -pub const QUERY_RESULT_LIMIT: usize = 50; - -/// Returns an iterator on batches of packet events. -pub fn query_packet_events_with<'a, ChainA>( - sequence_nrs: &'a [Sequence], - query_height: Height, - src_chain: &'a ChainA, - path: &'a PathIdentifiers, - query_fn: impl Fn(&ChainA, &PathIdentifiers, Vec, Height) -> Result, LinkError> - + 'a, -) -> impl Iterator> + 'a -where - ChainA: ChainHandle, -{ - let events_total_count = sequence_nrs.len(); - let mut events_left_count = events_total_count; - - sequence_nrs - .chunks(QUERY_RESULT_LIMIT) - .map_while(move |c| { - let sequences_nrs_chunk = c.to_vec(); - match query_fn(src_chain, path, sequences_nrs_chunk, query_height) { - Ok(mut events) => { - events_left_count -= c.len(); - info!(events_total = %events_total_count, events_left = %events_left_count, "pulled packet data for {} events;", events.len()); - - for event in events.iter_mut() { - event.set_height(query_height); - } - - Some(events) - }, - Err(e) => { - warn!("encountered query failure while pulling packet data: {}", e); - None - } - } - }) -} - -/// Returns relevant packet events for building RecvPacket and timeout messages -/// for the given vector of packet [`Sequence`] numbers. -pub fn query_send_packet_events( - src_chain: &ChainA, - path: &PathIdentifiers, - sequences: Vec, - src_query_height: Height, -) -> Result, LinkError> { - let mut events_result = vec![]; - let _span = span!(Level::DEBUG, "query_send_packet_events", h = %src_query_height).entered(); - - let mut query = QueryPacketEventDataRequest { - event_id: WithBlockDataType::SendPacket, - source_port_id: path.counterparty_port_id.clone(), - source_channel_id: path.counterparty_channel_id, - destination_port_id: path.port_id.clone(), - destination_channel_id: path.channel_id, - sequences, - height: src_query_height, - }; - - let tx_events = src_chain - .query_txs(QueryTxRequest::Packet(query.clone())) - .map_err(LinkError::relayer)?; - - let recvd_sequences: Vec = tx_events - .iter() - .filter_map(|ev| match ev { - IbcEvent::SendPacket(ref send_ev) => Some(send_ev.packet.sequence), - IbcEvent::WriteAcknowledgement(ref ack_ev) => Some(ack_ev.packet.sequence), - _ => None, - }) - .collect(); - - query.sequences.retain(|seq| !recvd_sequences.contains(seq)); - - let (start_block_events, end_block_events) = if !query.sequences.is_empty() { - src_chain - .query_blocks(QueryBlockRequest::Packet(query)) - .map_err(LinkError::relayer)? - } else { - Default::default() - }; - - trace!("start_block_events {:?}", start_block_events); - trace!("tx_events {:?}", tx_events); - trace!("end_block_events {:?}", end_block_events); - - // events must be ordered in the following fashion - - // start-block events followed by tx-events followed by end-block events - events_result.extend(start_block_events); - events_result.extend(tx_events); - events_result.extend(end_block_events); - - Ok(events_result) -} - -/// Returns packet event data for building ack messages for the -/// given list of [`Sequence`] numbers. -pub fn query_write_ack_events( - src_chain: &ChainA, - path: &PathIdentifiers, - sequences: Vec, - src_query_height: Height, -) -> Result, LinkError> { - // TODO(Adi): Would be good to make use of generics. - let events_result = src_chain - .query_txs(QueryTxRequest::Packet(QueryPacketEventDataRequest { - event_id: WithBlockDataType::WriteAck, - source_port_id: path.port_id.clone(), - source_channel_id: path.channel_id, - destination_port_id: path.counterparty_port_id.clone(), - destination_channel_id: path.counterparty_channel_id, - sequences, - height: src_query_height, - })) - .map_err(|e| LinkError::query(src_chain.id(), e))?; - - Ok(events_result) -} diff --git a/relayer/src/link/pending.rs b/relayer/src/link/pending.rs deleted file mode 100644 index 9086877614..0000000000 --- a/relayer/src/link/pending.rs +++ /dev/null @@ -1,290 +0,0 @@ -use core::iter::Iterator; -use core::time::Duration; -use std::time::Instant; - -use tracing::{debug, error, trace, trace_span}; - -use ibc::core::ics24_host::identifier::{ChainId, ChannelId, PortId}; -use ibc::events::IbcEvent; -use ibc::query::{QueryTxHash, QueryTxRequest}; - -use crate::chain::tracking::TrackingId; -use crate::error::Error as RelayerError; -use crate::link::{error::LinkError, RelayPath}; -use crate::telemetry; -use crate::util::queue::Queue; -use crate::{ - chain::handle::ChainHandle, - link::{operational_data::OperationalData, relay_sender::AsyncReply, RelaySummary, TxHashes}, -}; - -pub const TIMEOUT: Duration = Duration::from_secs(300); - -/// A wrapper over an [`OperationalData`] that is pending. -/// Additionally holds all the necessary information -/// to query for confirmations: -/// - hashes for all transactions in that op. data, -/// - the target chain to query for confirmations, -/// - timestamp to track time-outs and declare an -/// operational data as pending. -#[derive(Clone)] -pub struct PendingData { - pub original_od: OperationalData, - pub tx_hashes: TxHashes, - pub submit_time: Instant, - pub error_events: Vec, -} - -impl PendingData { - pub fn tracking_id(&self) -> TrackingId { - self.original_od.tracking_id - } -} - -/// Stores all pending data -/// and tries to confirm them asynchronously. -pub struct PendingTxs { - pub chain: Chain, - pub channel_id: ChannelId, - pub port_id: PortId, - pub counterparty_chain_id: ChainId, - pub pending_queue: Queue, -} - -impl PendingTxs { - pub fn new( - chain: Chain, - channel_id: ChannelId, - port_id: PortId, - counterparty_chain_id: ChainId, - ) -> Self { - Self { - chain, - channel_id, - port_id, - counterparty_chain_id, - pending_queue: Queue::new(), - } - } -} - -impl PendingTxs { - pub fn chain_id(&self) -> ChainId { - self.chain.id() - } - - /// Insert a new pending transaction to the back of the queue. - pub fn insert_new_pending_tx(&self, r: AsyncReply, od: OperationalData) { - let mut tx_hashes = Vec::new(); - let mut error_events = Vec::new(); - - for response in r.responses.into_iter() { - if response.code.is_err() { - // If the response is an error, we do not want to check for the - // transaction confirmation status because it is never going to - // be committed. Instead we convert it into an error event and - // store it to be returned in the RelaySummary after all other - // transactions have been confirmed. - // This is not ideal, but is just to follow the previous synchronous - // behavior of handling the OperationalData. - - let span = trace_span!( - "inserting new pending txs", - chain = %self.chain_id(), - counterparty_chain = %self.counterparty_chain_id, - port = %self.port_id, - channel = %self.channel_id, - ); - - let _guard = span.enter(); - - trace!( - "putting error response in error event queue: {:?} ", - response - ); - - let error_event = IbcEvent::ChainError(format!( - "deliver_tx on chain {} for Tx hash {} reports error: code={:?}, log={:?}", - self.chain_id(), - response.hash, - response.code, - response.log - )); - error_events.push(error_event); - } else { - tx_hashes.push(response.hash); - } - } - - let u = PendingData { - original_od: od, - tx_hashes: TxHashes(tx_hashes), - submit_time: Instant::now(), - error_events, - }; - - self.pending_queue.push_back(u); - } - - fn check_tx_events(&self, tx_hashes: &TxHashes) -> Result>, RelayerError> { - let mut all_events = Vec::new(); - for hash in &tx_hashes.0 { - let mut events = self - .chain - .query_txs(QueryTxRequest::Transaction(QueryTxHash(*hash)))?; - - if events.is_empty() { - return Ok(None); - } else { - all_events.append(&mut events) - } - } - Ok(Some(all_events)) - } - - /// Try and process one pending transaction within the given timeout duration if one - /// is available. - /// - /// A `resubmit` closure is provided when the clear interval for packets is 0. If this closure - /// is provided, the pending transactions that fail to process within the given timeout duration - /// are resubmitted following the logic specified by the closure. - pub fn process_pending( - &self, - timeout: Duration, - relay_path: &RelayPath, - resubmit: Option Result>, - ) -> Result, LinkError> { - // We process pending transactions in a FIFO manner, so take from - // the front of the queue. - if let Some(pending) = self.pending_queue.pop_front() { - let tx_hashes = &pending.tx_hashes; - let submit_time = &pending.submit_time; - - if tx_hashes.0.is_empty() { - return Ok(Some(RelaySummary::from_events(pending.error_events))); - } - - let span = trace_span!( - "processing pending tx", - chain = %self.chain_id(), - counterparty_chain = %self.counterparty_chain_id, - port = %self.port_id, - channel = %self.channel_id, - ); - - let _guard = span.enter(); - - // Process the given pending transaction. - trace!("trying to confirm {} ", tx_hashes); - - // Check for TX events for the given pending transaction hashes. - let events_result = self.check_tx_events(tx_hashes); - let res = match events_result { - Ok(None) => { - // There is no events for the associated transactions. - // This means the transaction has not yet been committed. - - trace!("transaction is not yet committed: {} ", tx_hashes); - - if submit_time.elapsed() > timeout { - // The submission time for the transaction has exceeded the - // timeout threshold. Returning Outcome::Timeout for the - // relayer to resubmit the transaction to the chain again. - error!("timed out while confirming {}", tx_hashes); - - match resubmit { - Some(f) => { - // The pending tx needs to be resubmitted. This involves replacing the tx's - // stale operational data with a fresh copy and then applying the `resubmit` - // closure to it. - let new_od = relay_path - .regenerate_operational_data(pending.original_od.clone()); - - trace!("regenerated operational data for {}", tx_hashes); - - match new_od.map(f) { - Some(Ok(reply)) => { - self.insert_new_pending_tx(reply, pending.original_od); - Ok(None) - } - Some(Err(e)) => { - self.pending_queue.push_back(pending); - Err(e) - } - None => { - // No operational data was regenerated; nothing to resubmit - Ok(None) - } - } - } - None => { - // `clear_interval != 0` such that resubmission has been disabled - Ok(None) - } - } - } else { - // Reinsert the pending transaction, this time - // to the back of the queue so that we process other - // pending transactions first in the meanwhile. - self.pending_queue.push_back(pending); - Ok(None) - } - } - Ok(Some(events)) => { - // We get a list of events for the transaction hashes, - // Meaning the transaction has been committed successfully - // to the chain. - - debug!( - tracking_id = %pending.tracking_id(), - elapsed = ?pending.submit_time.elapsed(), - tx_hashes = %tx_hashes, - "transactions confirmed", - ); - - telemetry!( - tx_confirmed, - pending.tracking_id(), - &self.chain.id(), - &self.channel_id, - &self.port_id, - &self.counterparty_chain_id - ); - - // Convert the events to RelaySummary and return them. - let mut summary = RelaySummary::from_events(events); - summary.extend(RelaySummary::from_events(pending.error_events)); - - Ok(Some(summary)) - } - Err(e) => { - // There are errors querying for the transaction hashes. - // This may be temporary errors when the relayer is communicating - // with the chain endpoint. - - error!( - "error querying for tx hashes {}: {}. will retry again later", - tx_hashes, e - ); - - // Push it to the back of the pending queue to process it - // again at a later time. - self.pending_queue.push_back(pending); - - Err(LinkError::relayer(e)) - } - }; - - if !self.pending_queue.is_empty() { - trace!( - "total pending transactions left: {}", - self.pending_queue.len() - ); - } - - res - } else { - Ok(None) - } - } -} diff --git a/relayer/src/link/relay_path.rs b/relayer/src/link/relay_path.rs deleted file mode 100644 index 5a2ea75b89..0000000000 --- a/relayer/src/link/relay_path.rs +++ /dev/null @@ -1,1717 +0,0 @@ -use alloc::collections::BTreeMap as HashMap; -use alloc::collections::VecDeque; -use std::ops::Sub; -use std::time::{Duration, Instant}; - -use ibc_proto::google::protobuf::Any; -use itertools::Itertools; -use tracing::{debug, error, info, span, trace, warn, Level}; - -use crate::chain::counterparty::unreceived_acknowledgements; -use crate::chain::counterparty::unreceived_packets; -use crate::chain::endpoint::ChainStatus; -use crate::chain::handle::ChainHandle; -use crate::chain::requests::IncludeProof; -use crate::chain::requests::QueryChannelRequest; -use crate::chain::requests::QueryHostConsensusStateRequest; -use crate::chain::requests::QueryNextSequenceReceiveRequest; -use crate::chain::requests::QueryPacketCommitmentRequest; -use crate::chain::requests::QueryUnreceivedAcksRequest; -use crate::chain::requests::QueryUnreceivedPacketsRequest; -use crate::chain::tracking::TrackedMsgs; -use crate::chain::tracking::TrackingId; -use crate::channel::error::ChannelError; -use crate::channel::Channel; -use crate::event::monitor::EventBatch; -use crate::foreign_client::{ForeignClient, ForeignClientError}; -use crate::link::error::{self, LinkError}; -use crate::link::operational_data::{ - OperationalData, OperationalDataTarget, TrackedEvents, TransitMessage, -}; -use crate::link::packet_events::query_packet_events_with; -use crate::link::packet_events::query_send_packet_events; -use crate::link::packet_events::query_write_ack_events; -use crate::link::pending::PendingTxs; -use crate::link::relay_sender::{AsyncReply, SubmitReply}; -use crate::link::relay_summary::RelaySummary; -use crate::link::{pending, relay_sender}; -use crate::path::PathIdentifiers; -use crate::telemetry; -use crate::util::queue::Queue; -use ibc::{ - core::{ - ics02_client::{ - client_consensus::QueryClientEventRequest, - events::ClientMisbehaviour as ClientMisbehaviourEvent, - events::UpdateClient as UpdateClientEvent, - }, - ics04_channel::{ - channel::{ChannelEnd, Order, State as ChannelState}, - events::{SendPacket, WriteAcknowledgement}, - msgs::{ - acknowledgement::MsgAcknowledgement, chan_close_confirm::MsgChannelCloseConfirm, - recv_packet::MsgRecvPacket, timeout::MsgTimeout, - timeout_on_close::MsgTimeoutOnClose, - }, - packet::{Packet, PacketMsgType}, - }, - ics24_host::identifier::{ChannelId, ClientId, ConnectionId, PortId}, - }, - events::{IbcEvent, PrettyEvents, WithBlockDataType}, - query::QueryTxRequest, - signer::Signer, - timestamp::Timestamp, - tx_msg::Msg, - Height, -}; - -const MAX_RETRIES: usize = 5; - -/// Whether or not to resubmit packets when pending transactions -/// fail to process within the given timeout duration. -#[derive(Copy, Clone, Debug, PartialEq, Eq)] -pub enum Resubmit { - Yes, - No, -} - -impl Resubmit { - /// Packet resubmission is enabled when the clear interval for packets is 0. Otherwise, - /// when the packet clear interval is > 0, the relayer will periodically clear unsent packets - /// such that resubmitting packets is not necessary. - pub fn from_clear_interval(clear_interval: u64) -> Self { - if clear_interval == 0 { - Self::Yes - } else { - Self::No - } - } -} - -pub struct RelayPath { - channel: Channel, - - pub(crate) path_id: PathIdentifiers, - - // Operational data, targeting both the source and destination chain. - // These vectors of operational data are ordered decreasingly by - // their age, with element at position `0` being the oldest. - // The operational data targeting the source chain comprises - // mostly timeout packet messages. - // The operational data targeting the destination chain - // comprises mostly RecvPacket and Ack msgs. - pub src_operational_data: Queue, - pub dst_operational_data: Queue, - - // Toggle for the transaction confirmation mechanism. - confirm_txes: bool, - - // Stores pending (i.e., unconfirmed) operational data. - // The relaying path periodically tries to confirm these pending - // transactions if [`confirm_txes`] is true. - pending_txs_src: PendingTxs, - pending_txs_dst: PendingTxs, -} - -impl RelayPath { - pub fn new( - channel: Channel, - with_tx_confirmation: bool, - ) -> Result { - let src_chain = channel.src_chain().clone(); - let dst_chain = channel.dst_chain().clone(); - - let src_chain_id = src_chain.id(); - let dst_chain_id = dst_chain.id(); - - let src_channel_id = *channel - .src_channel_id() - .ok_or_else(|| LinkError::missing_channel_id(src_chain.id()))?; - - let dst_channel_id = *channel - .dst_channel_id() - .ok_or_else(|| LinkError::missing_channel_id(dst_chain.id()))?; - - let src_port_id = channel.src_port_id().clone(); - let dst_port_id = channel.dst_port_id().clone(); - - let path = PathIdentifiers { - port_id: dst_port_id.clone(), - channel_id: dst_channel_id, - counterparty_port_id: src_port_id.clone(), - counterparty_channel_id: src_channel_id, - }; - - Ok(Self { - channel, - - path_id: path, - - src_operational_data: Queue::new(), - dst_operational_data: Queue::new(), - - confirm_txes: with_tx_confirmation, - pending_txs_src: PendingTxs::new(src_chain, src_channel_id, src_port_id, dst_chain_id), - pending_txs_dst: PendingTxs::new(dst_chain, dst_channel_id, dst_port_id, src_chain_id), - }) - } - - pub fn src_chain(&self) -> &ChainA { - self.channel.src_chain() - } - - pub fn dst_chain(&self) -> &ChainB { - self.channel.dst_chain() - } - - pub fn src_client_id(&self) -> &ClientId { - self.channel.src_client_id() - } - - pub fn dst_client_id(&self) -> &ClientId { - self.channel.dst_client_id() - } - - pub fn src_connection_id(&self) -> &ConnectionId { - self.channel.src_connection_id() - } - - pub fn dst_connection_id(&self) -> &ConnectionId { - self.channel.dst_connection_id() - } - - pub fn src_port_id(&self) -> &PortId { - &self.path_id.counterparty_port_id - } - - pub fn dst_port_id(&self) -> &PortId { - &self.path_id.port_id - } - - pub fn src_channel_id(&self) -> &ChannelId { - &self.path_id.counterparty_channel_id - } - - pub fn dst_channel_id(&self) -> &ChannelId { - &self.path_id.channel_id - } - - pub fn channel(&self) -> &Channel { - &self.channel - } - - fn src_channel(&self, height: Height) -> Result { - self.src_chain() - .query_channel( - QueryChannelRequest { - port_id: self.src_port_id().clone(), - channel_id: *self.src_channel_id(), - height, - }, - IncludeProof::No, - ) - .map(|(channel_end, _)| channel_end) - .map_err(|e| LinkError::channel(ChannelError::query(self.src_chain().id(), e))) - } - - fn dst_channel(&self, height: Height) -> Result { - self.dst_chain() - .query_channel( - QueryChannelRequest { - port_id: self.dst_port_id().clone(), - channel_id: *self.dst_channel_id(), - height, - }, - IncludeProof::No, - ) - .map(|(channel_end, _)| channel_end) - .map_err(|e| LinkError::channel(ChannelError::query(self.dst_chain().id(), e))) - } - - fn src_signer(&self) -> Result { - self.src_chain() - .get_signer() - .map_err(|e| LinkError::signer(self.src_chain().id(), e)) - } - - fn dst_signer(&self) -> Result { - self.dst_chain() - .get_signer() - .map_err(|e| LinkError::signer(self.dst_chain().id(), e)) - } - - pub(crate) fn src_latest_height(&self) -> Result { - self.src_chain() - .query_latest_height() - .map_err(|e| LinkError::query(self.src_chain().id(), e)) - } - - pub(crate) fn dst_latest_height(&self) -> Result { - self.dst_chain() - .query_latest_height() - .map_err(|e| LinkError::query(self.dst_chain().id(), e)) - } - - fn src_time_at_height(&self, height: Height) -> Result { - Self::chain_time_at_height(self.src_chain(), height) - } - - fn dst_time_at_height(&self, height: Height) -> Result { - Self::chain_time_at_height(self.dst_chain(), height) - } - - pub(crate) fn src_time_latest(&self) -> Result { - let elapsed = Timestamp::now() - .duration_since( - &self - .src_chain() - .query_application_status() - .unwrap() - .timestamp, - ) - .unwrap_or_default(); - - Ok(Instant::now().sub(elapsed)) - } - - pub(crate) fn dst_time_latest(&self) -> Result { - let elapsed = Timestamp::now() - .duration_since( - &self - .dst_chain() - .query_application_status() - .unwrap() - .timestamp, - ) - .unwrap_or_default(); - - Ok(Instant::now().sub(elapsed)) - } - - pub(crate) fn src_max_block_time(&self) -> Result { - // TODO(hu55a1n1): Ideally, we should get the `max_expected_time_per_block` using the - // `/genesis` endpoint once it is working in tendermint-rs. - Ok(self - .src_chain() - .config() - .map_err(LinkError::relayer)? - .max_block_time) - } - - pub(crate) fn dst_max_block_time(&self) -> Result { - Ok(self - .dst_chain() - .config() - .map_err(LinkError::relayer)? - .max_block_time) - } - - fn unordered_channel(&self) -> bool { - self.channel.ordering == Order::Unordered - } - - fn ordered_channel(&self) -> bool { - self.channel.ordering == Order::Ordered - } - - pub fn build_update_client_on_dst(&self, height: Height) -> Result, LinkError> { - let client = self.restore_dst_client(); - client - .build_update_client(height) - .map_err(LinkError::client) - } - - pub fn build_update_client_on_src(&self, height: Height) -> Result, LinkError> { - let client = self.restore_src_client(); - client - .build_update_client(height) - .map_err(LinkError::client) - } - - fn build_chan_close_confirm_from_event(&self, event: &IbcEvent) -> Result { - let src_channel_id = self.src_channel_id(); - let proofs = self - .src_chain() - .build_channel_proofs(self.src_port_id(), src_channel_id, event.height()) - .map_err(|e| LinkError::channel(ChannelError::channel_proof(e)))?; - - // Build the domain type message - let new_msg = MsgChannelCloseConfirm { - port_id: self.dst_port_id().clone(), - channel_id: *self.dst_channel_id(), - proofs, - signer: self.dst_signer()?, - }; - - Ok(new_msg.to_any()) - } - - /// Determines if the events received are relevant and should be processed. - /// Only events for a port/channel matching one of the channel ends should be processed. - fn filter_relaying_events( - &self, - events: Vec, - tracking_id: TrackingId, - ) -> TrackedEvents { - let src_channel_id = self.src_channel_id(); - - let mut result = vec![]; - - for event in events.into_iter() { - match &event { - IbcEvent::SendPacket(send_packet_ev) => { - if src_channel_id == send_packet_ev.src_channel_id() - && self.src_port_id() == send_packet_ev.src_port_id() - { - result.push(event); - } - } - IbcEvent::WriteAcknowledgement(write_ack_ev) => { - if src_channel_id == write_ack_ev.dst_channel_id() - && self.src_port_id() == write_ack_ev.dst_port_id() - { - result.push(event); - } - } - IbcEvent::CloseInitChannel(chan_close_ev) => { - if src_channel_id == chan_close_ev.channel_id() - && self.src_port_id() == chan_close_ev.port_id() - { - result.push(event); - } - } - IbcEvent::TimeoutPacket(timeout_ev) => { - if src_channel_id == timeout_ev.src_channel_id() - && self.channel.src_port_id() == timeout_ev.src_port_id() - { - result.push(event); - } - } - _ => {} - } - } - - // Transform into `TrackedEvents` - TrackedEvents::new(result, tracking_id) - } - - fn relay_pending_packets(&self, height: Option) -> Result<(), LinkError> { - let tracking_id = TrackingId::new_static("relay pending packets"); - - for i in 1..=MAX_RETRIES { - let cleared = self - .schedule_recv_packet_and_timeout_msgs(height, tracking_id) - .and_then(|_| self.schedule_packet_ack_msgs(height, tracking_id)); - - match cleared { - Ok(()) => return Ok(()), - Err(e) => error!( - "failed to clear packets, retry {}/{}: {}", - i, MAX_RETRIES, e - ), - } - } - - Err(LinkError::old_packet_clearing_failed()) - } - - /// Clears any packets that were sent before `height`. - /// If no height is passed in, then the latest height of the source chain is used. - pub fn schedule_packet_clearing(&self, height: Option) -> Result<(), LinkError> { - let span = span!(Level::DEBUG, "clear"); - let _enter = span.enter(); - - let clear_height = height - .map(|h| h.decrement().map_err(|e| LinkError::decrement_height(h, e))) - .transpose()?; - - self.relay_pending_packets(clear_height)?; - - debug!(height = ?clear_height, "done scheduling"); - Ok(()) - } - - /// Generate & schedule operational data from the input `batch` of IBC events. - pub fn update_schedule(&self, batch: EventBatch) -> Result<(), LinkError> { - // Collect relevant events from the incoming batch & adjust their height. - let events = self.filter_relaying_events(batch.events, batch.tracking_id); - - // Transform the events into operational data items - self.events_to_operational_data(events) - } - - /// Produces and schedules operational data for this relaying path based on the input events. - pub(crate) fn events_to_operational_data( - &self, - events: TrackedEvents, - ) -> Result<(), LinkError> { - // Obtain the operational data for the source chain (mostly timeout packets) and for the - // destination chain (e.g., receive packet messages). - let (src_opt, dst_opt) = self.generate_operational_data(events)?; - - if let Some(src_od) = src_opt { - self.schedule_operational_data(src_od)?; - } - if let Some(dst_od) = dst_opt { - self.schedule_operational_data(dst_od)?; - } - - Ok(()) - } - - /// Generates operational data out of a set of events. - /// Handles building operational data targeting both the destination and source chains. - /// - /// For the destination chain, the op. data will contain `RecvPacket` messages, - /// as well as channel close handshake (`ChanCloseConfirm`), `WriteAck` messages. - /// - /// For the source chain, the op. data will contain timeout packet messages (`MsgTimeoutOnClose` - /// or `MsgTimeout`). - fn generate_operational_data( - &self, - events: TrackedEvents, - ) -> Result<(Option, Option), LinkError> { - let span = span!(Level::DEBUG, "generate", id = %events.tracking_id()); - let _enter = span.enter(); - - let input = events.events(); - let src_height = match input.get(0) { - None => return Ok((None, None)), - Some(ev) => ev.height(), - }; - - let dst_latest_info = self - .dst_chain() - .query_application_status() - .map_err(|e| LinkError::query(self.src_chain().id(), e))?; - - let dst_latest_height = dst_latest_info.height; - - // Operational data targeting the source chain (e.g., Timeout packets) - let mut src_od = OperationalData::new( - dst_latest_height, - OperationalDataTarget::Source, - events.tracking_id(), - self.channel.connection_delay, - ); - - // Operational data targeting the destination chain (e.g., SendPacket messages) - let mut dst_od = OperationalData::new( - src_height, - OperationalDataTarget::Destination, - events.tracking_id(), - self.channel.connection_delay, - ); - - for event in input { - trace!("processing event: {}", event); - let (dst_msg, src_msg) = match event { - IbcEvent::CloseInitChannel(_) => { - (Some(self.build_chan_close_confirm_from_event(event)?), None) - } - IbcEvent::TimeoutPacket(ref timeout_ev) => { - // When a timeout packet for an ordered channel is processed on-chain (src here) - // the chain closes the channel but no close init event is emitted, instead - // we get a timeout packet event (this happens for both unordered and ordered channels) - // Here we check that the channel is closed on src and send a channel close confirm - // to the counterparty. - if self.ordered_channel() - && self - .src_channel(timeout_ev.height)? - .state_matches(&ChannelState::Closed) - { - (Some(self.build_chan_close_confirm_from_event(event)?), None) - } else { - (None, None) - } - } - IbcEvent::SendPacket(ref send_packet_ev) => { - if self.send_packet_event_handled(send_packet_ev)? { - debug!("{} already handled", send_packet_ev); - (None, None) - } else { - self.build_recv_or_timeout_from_send_packet_event( - send_packet_ev, - &dst_latest_info, - )? - } - } - IbcEvent::WriteAcknowledgement(ref write_ack_ev) => { - if self - .dst_channel(Height::zero())? - .state_matches(&ChannelState::Closed) - { - (None, None) - } else if self.write_ack_event_handled(write_ack_ev)? { - debug!("{} already handled", write_ack_ev); - (None, None) - } else { - (self.build_ack_from_recv_event(write_ack_ev)?, None) - } - } - _ => (None, None), - }; - - // Collect messages to be sent to the destination chain (e.g., RecvPacket) - if let Some(msg) = dst_msg { - debug!("{} from {}", msg.type_url, event); - dst_od.batch.push(TransitMessage { - event: event.clone(), - msg, - }); - } - - // Collect timeout messages, to be sent to the source chain - if let Some(msg) = src_msg { - // For Ordered channels a single timeout event should be sent as this closes the channel. - // Otherwise a multi message transaction will fail. - if self.unordered_channel() || src_od.batch.is_empty() { - debug!("{} from {}", msg.type_url, event); - src_od.batch.push(TransitMessage { - event: event.clone(), - msg, - }); - } - } - } - - let src_od_res = if src_od.batch.is_empty() { - None - } else { - Some(src_od) - }; - - let dst_od_res = if dst_od.batch.is_empty() { - None - } else { - Some(dst_od) - }; - - Ok((src_od_res, dst_od_res)) - } - - /// Relays an [`OperationalData`] using a specific - /// sender, which implements [`relay_sender::Submit`]. - pub(crate) fn relay_from_operational_data( - &self, - initial_od: OperationalData, - ) -> Result { - // We will operate on potentially different operational data if the initial one fails. - let _span = span!(Level::INFO, "relay", odata = %initial_od.info()).entered(); - - let mut odata = initial_od; - - for i in 0..MAX_RETRIES { - debug!("[try {}/{}]", i + 1, MAX_RETRIES); - - // Consume the operational data by attempting to send its messages - match self.send_from_operational_data::(&odata) { - Ok(reply) => { - // Done with this op. data - info!("success"); - - return Ok(reply); - } - Err(LinkError(error::LinkErrorDetail::Send(e), _)) => { - // This error means we could retry - error!("error {}", e.event); - if i + 1 == MAX_RETRIES { - error!("{}/{} retries exhausted. giving up", i + 1, MAX_RETRIES) - } else { - // If we haven't exhausted all retries, regenerate the op. data & retry - match self.regenerate_operational_data(odata.clone()) { - None => return Ok(S::Reply::empty()), // Nothing to retry - Some(new_od) => odata = new_od, - } - } - } - Err(e) => { - // Unrecoverable error, propagate up the stack - return Err(e); - } - } - } - - Ok(S::Reply::empty()) - } - - /// Generates fresh operational data for a tx given the initial operational data - /// that failed to send. - /// - /// Return value: - /// - `Some(..)`: a new operational data from which to retry sending, - /// - `None`: all the events in the initial operational data were exhausted (i.e., turned - /// into timeouts), so there is nothing to retry. - /// - /// Side effects: may schedule a new operational data targeting the source chain, comprising - /// new timeout messages. - pub(crate) fn regenerate_operational_data( - &self, - initial_odata: OperationalData, - ) -> Option { - let op_info = initial_odata.info(); - - warn!( - "failed. Regenerate operational data from {} events", - op_info.batch_len() - ); - - // Retry by re-generating the operational data using the initial events - let (src_opt, dst_opt) = match self.generate_operational_data(initial_odata.into_events()) { - Ok(new_operational_data) => new_operational_data, - Err(e) => { - error!( - "failed to regenerate operational data from initial data: {} \ - with error {}, discarding this op. data", - op_info, e - ); - return None; - } // Cannot retry, contain the error by reporting a None - }; - - if let Some(src_od) = src_opt { - if src_od.target == op_info.target() { - // Our target is the _source_ chain, retry these messages - info!(odata = %src_od.info(), "will retry"); - return Some(src_od); - } else { - // Our target is the _destination_ chain, the data in `src_od` contains - // potentially new timeout messages that have to be handled separately. - if let Err(e) = self.schedule_operational_data(src_od) { - error!( - "failed to schedule newly-generated operational data from \ - initial data: {} with error {}, discarding this op. data", - op_info, e - ); - return None; - } - } - } - - if let Some(dst_od) = dst_opt { - if dst_od.target == op_info.target() { - // Our target is the _destination_ chain, retry these messages - info!(odata = %dst_od.info(), "will retry"); - return Some(dst_od); - } else { - // Our target is the _source_ chain, but `dst_od` has new messages - // intended for the destination chain, this should never be the case - error!( - "generated new messages for destination chain while handling \ - failed events targeting the source chain!", - ); - } - } else { - // There is no message intended for the destination chain - if op_info.target() == OperationalDataTarget::Destination { - info!("exhausted all events from this operational data"); - return None; - } - } - - None - } - - /// Sends a transaction based on the [`OperationalData`] to - /// the corresponding target chain. - /// - /// Returns the appropriate reply associated with the given - /// [`relay_sender::Submit`]. The reply consists of either the tx - /// hashes generated by the target chain, if [`Async`] sender, - /// or the ibc events, if the sender is [`Sync`]. - /// - /// Propagates any encountered errors. - fn send_from_operational_data( - &self, - odata: &OperationalData, - ) -> Result { - if odata.batch.is_empty() { - error!("ignoring empty operational data!"); - return Ok(S::Reply::empty()); - } - - let msgs = odata.assemble_msgs(self)?; - - telemetry!({ - let (chain, counterparty, channel_id, port_id) = self.target_info(odata.target); - - ibc_telemetry::global().tx_submitted( - msgs.tracking_id, - &chain, - channel_id, - port_id, - &counterparty, - ); - }); - - match odata.target { - OperationalDataTarget::Source => S::submit(self.src_chain(), msgs), - OperationalDataTarget::Destination => S::submit(self.dst_chain(), msgs), - } - } - - fn enqueue_pending_tx(&self, reply: AsyncReply, odata: OperationalData) { - if !self.confirm_txes { - return; - } - - match odata.target { - OperationalDataTarget::Source => { - self.pending_txs_src.insert_new_pending_tx(reply, odata); - } - OperationalDataTarget::Destination => { - self.pending_txs_dst.insert_new_pending_tx(reply, odata); - } - } - } - - /// Checks if a sent packet has been received on destination. - fn send_packet_received_on_dst(&self, packet: &Packet) -> Result { - let unreceived_packet = self - .dst_chain() - .query_unreceived_packets(QueryUnreceivedPacketsRequest { - port_id: self.dst_port_id().clone(), - channel_id: *self.dst_channel_id(), - packet_commitment_sequences: vec![packet.sequence], - }) - .map_err(LinkError::relayer)?; - - Ok(unreceived_packet.is_empty()) - } - - /// Checks if a packet commitment has been cleared on source. - /// The packet commitment is cleared when either an acknowledgment or a timeout is received on source. - fn send_packet_commitment_cleared_on_src(&self, packet: &Packet) -> Result { - let (bytes, _) = self - .src_chain() - .query_packet_commitment( - QueryPacketCommitmentRequest { - port_id: self.src_port_id().clone(), - channel_id: *self.src_channel_id(), - sequence: packet.sequence, - height: Height::zero(), - }, - IncludeProof::No, - ) - .map_err(LinkError::relayer)?; - - Ok(bytes.is_empty()) - } - - /// Checks if a send packet event has already been handled (e.g. by another relayer). - fn send_packet_event_handled(&self, sp: &SendPacket) -> Result { - Ok(self.send_packet_received_on_dst(&sp.packet)? - || self.send_packet_commitment_cleared_on_src(&sp.packet)?) - } - - /// Checks if an acknowledgement for the given packet has been received on - /// source chain of the packet, ie. the destination chain of the relay path - /// that sends the acknowledgment. - fn recv_packet_acknowledged_on_src(&self, packet: &Packet) -> Result { - let unreceived_ack = self - .dst_chain() - .query_unreceived_acknowledgements(QueryUnreceivedAcksRequest { - port_id: self.dst_port_id().clone(), - channel_id: *self.dst_channel_id(), - packet_ack_sequences: vec![packet.sequence], - }) - .map_err(LinkError::relayer)?; - - Ok(unreceived_ack.is_empty()) - } - - /// Checks if a receive packet event has already been handled (e.g. by another relayer). - fn write_ack_event_handled(&self, rp: &WriteAcknowledgement) -> Result { - self.recv_packet_acknowledged_on_src(&rp.packet) - } - - /// Returns the `processed_height` for the consensus state at specified height - fn update_height( - chain: &impl ChainHandle, - client_id: ClientId, - consensus_height: Height, - ) -> Result { - let events = chain - .query_txs(QueryTxRequest::Client(QueryClientEventRequest { - height: Height::zero(), - event_id: WithBlockDataType::UpdateClient, - client_id, - consensus_height, - })) - .map_err(|e| LinkError::query(chain.id(), e))?; - - // The handler may treat redundant updates as no-ops and emit `UpdateClient` events for them - // but the `processed_height` is the height at which the first `UpdateClient` event for this - // consensus state/height was emitted. We expect that these events are received in the exact - // same order in which they were emitted. - match events.first() { - Some(IbcEvent::UpdateClient(event)) => Ok(event.height()), - Some(event) => Err(LinkError::unexpected_event(event.clone())), - None => Err(LinkError::unexpected_event(IbcEvent::default())), - } - } - - /// Loops over `tx_events` and returns a tuple of optional events where the first element is a - /// `ChainError` variant, the second one is an `UpdateClient` variant and the third one is a - /// `ClientMisbehaviour` variant. This function is essentially just an `Iterator::find()` for - /// multiple variants with a single pass. - #[inline] - fn event_per_type( - mut tx_events: Vec, - ) -> ( - Option, - Option, - Option, - ) { - let mut error = None; - let mut update = None; - let mut misbehaviour = None; - - while let Some(event) = tx_events.pop() { - match event { - IbcEvent::ChainError(_) => error = Some(event), - IbcEvent::UpdateClient(event) => update = Some(event), - IbcEvent::ClientMisbehaviour(event) => misbehaviour = Some(event), - _ => {} - } - } - - (error, update, misbehaviour) - } - - /// Returns an instant (in the past) that corresponds to the block timestamp of the chain at - /// specified height (relative to the relayer's current time). If the timestamp is in the future - /// wrt the relayer's current time, we simply return the current relayer time. - fn chain_time_at_height( - chain: &impl ChainHandle, - height: Height, - ) -> Result { - let chain_time = chain - .query_host_consensus_state(QueryHostConsensusStateRequest { height }) - .map_err(LinkError::relayer)? - .timestamp(); - let duration = Timestamp::now() - .duration_since(&chain_time) - .unwrap_or_default(); - Ok(Instant::now().sub(duration)) - } - - /// Handles updating the client on the destination chain - /// Returns the height at which the client update was processed - fn update_client_dst( - &self, - src_chain_height: Height, - tracking_id: TrackingId, - ) -> Result { - self.do_update_client_dst(src_chain_height, tracking_id, MAX_RETRIES) - } - - /// Perform actual update_client_dst with retries. - /// - /// Note that the retry is only performed in the case when there - /// is a ChainError event. It would return error immediately if - /// there are other errors returned from calls such as - /// build_update_client_on_dst. - fn do_update_client_dst( - &self, - src_chain_height: Height, - tracking_id: TrackingId, - retries_left: usize, - ) -> Result { - info!( "sending update_client to client hosted on source chain for height {} (retries left: {})", src_chain_height, retries_left ); - - let dst_update = self.build_update_client_on_dst(src_chain_height)?; - let tm = TrackedMsgs::new(dst_update, tracking_id); - let dst_tx_events = self - .dst_chain() - .send_messages_and_wait_commit(tm) - .map_err(LinkError::relayer)?; - - info!("result: {}", PrettyEvents(&dst_tx_events)); - - let (error, update, misbehaviour) = Self::event_per_type(dst_tx_events); - match (error, update, misbehaviour) { - // All updates were successful, no errors and no misbehaviour. - (None, Some(update_event), None) => Ok(update_event.height()), - (Some(chain_error), _, _) => { - // Atleast one chain-error so retry if possible. - if retries_left == 0 { - Err(LinkError::client(ForeignClientError::chain_error_event( - self.dst_chain().id(), - chain_error, - ))) - } else { - self.do_update_client_dst(src_chain_height, tracking_id, retries_left - 1) - } - } - (None, None, None) => { - // `tm` was empty and update wasn't required - match Self::update_height( - self.dst_chain(), - self.dst_client_id().clone(), - src_chain_height, - ) { - Ok(update_height) => Ok(update_height), - Err(_) if retries_left > 0 => { - self.do_update_client_dst(src_chain_height, tracking_id, retries_left - 1) - } - _ => Err(LinkError::update_client_failed()), - } - } - // Atleast one misbehaviour event, so don't retry. - (_, _, Some(_misbehaviour)) => Err(LinkError::update_client_failed()), - } - } - - /// Handles updating the client on the source chain - /// Returns the height at which the client update was processed - fn update_client_src( - &self, - dst_chain_height: Height, - tracking_id: TrackingId, - ) -> Result { - self.do_update_client_src(dst_chain_height, tracking_id, MAX_RETRIES) - } - - /// Perform actual update_client_src with retries. - /// - /// Note that the retry is only performed in the case when there - /// is a ChainError event. It would return error immediately if - /// there are other errors returned from calls such as - /// build_update_client_on_src. - fn do_update_client_src( - &self, - dst_chain_height: Height, - tracking_id: TrackingId, - retries_left: usize, - ) -> Result { - info!( "sending update_client to client hosted on source chain for height {} (retries left: {})", dst_chain_height, retries_left ); - - let src_update = self.build_update_client_on_src(dst_chain_height)?; - let tm = TrackedMsgs::new(src_update, tracking_id); - let src_tx_events = self - .src_chain() - .send_messages_and_wait_commit(tm) - .map_err(LinkError::relayer)?; - - info!("result: {}", PrettyEvents(&src_tx_events)); - - let (error, update, misbehaviour) = Self::event_per_type(src_tx_events); - match (error, update, misbehaviour) { - // All updates were successful, no errors and no misbehaviour. - (None, Some(update_event), None) => Ok(update_event.height()), - (Some(chain_error), _, _) => { - // Atleast one chain-error so retry if possible. - if retries_left == 0 { - Err(LinkError::client(ForeignClientError::chain_error_event( - self.src_chain().id(), - chain_error, - ))) - } else { - self.do_update_client_src(dst_chain_height, tracking_id, retries_left - 1) - } - } - (None, None, None) => { - // `tm` was empty and update wasn't required - match Self::update_height( - self.src_chain(), - self.src_client_id().clone(), - dst_chain_height, - ) { - Ok(update_height) => Ok(update_height), - Err(_) if retries_left > 0 => { - self.do_update_client_src(dst_chain_height, tracking_id, retries_left - 1) - } - _ => Err(LinkError::update_client_failed()), - } - } - // At least one misbehaviour event, so don't retry. - (_, _, Some(_misbehaviour)) => Err(LinkError::update_client_failed()), - } - } - - /// Schedules the relaying of [`MsgRecvPacket`] and [`MsgTimeout`] messages. - /// - /// The optional [`Height`] parameter allows specify a height on the source - /// chain where to query for packet data. If `None`, the latest available - /// height on the source chain is used. - /// - /// Blocks until _all_ outstanding messages have been scheduled. - pub fn schedule_recv_packet_and_timeout_msgs( - &self, - opt_query_height: Option, - tracking_id: TrackingId, - ) -> Result<(), LinkError> { - let _span = - span!(Level::DEBUG, "schedule_recv_packet_and_timeout_msgs", query_height = ?opt_query_height) - .entered(); - - // Pull the s.n. of all packets that the destination chain has not yet received. - let (sequences, src_response_height) = - unreceived_packets(self.dst_chain(), self.src_chain(), &self.path_id) - .map_err(LinkError::supervisor)?; - - let query_height = opt_query_height.unwrap_or(src_response_height); - - // Skip: no relevant events found. - if sequences.is_empty() { - return Ok(()); - } - - debug!( - "sequences of unreceived packets to send out to {} of the ones with commitments on {}: {} (first 10 shown here; total={})", - self.dst_chain().id(), - self.src_chain().id(), - sequences.iter().take(10).format(", "), sequences.len() - ); - - // Chunk-up the list of sequence nrs. into smaller parts, - // and schedule operational data incrementally across each chunk. - for events_chunk in query_packet_events_with( - &sequences, - query_height, - self.src_chain(), - &self.path_id, - query_send_packet_events, - ) { - self.events_to_operational_data(TrackedEvents::new(events_chunk, tracking_id))?; - } - - Ok(()) - } - - /// Schedules the relaying of [`MsgAcknowledgement`] messages. - /// - /// The `opt_query_height` parameter allows to optionally use a specific height on the source - /// chain where to query for packet data. If `None`, the latest available height on the source - /// chain is used. - pub fn schedule_packet_ack_msgs( - &self, - opt_query_height: Option, - tracking_id: TrackingId, - ) -> Result<(), LinkError> { - let _span = span!(Level::DEBUG, "build_packet_ack_msgs", h = ?opt_query_height).entered(); - - let (sequences, src_response_height) = - unreceived_acknowledgements(self.dst_chain(), self.src_chain(), &self.path_id) - .map_err(LinkError::supervisor)?; - - let query_height = opt_query_height.unwrap_or(src_response_height); - - // Skip: no relevant events found. - if sequences.is_empty() { - return Ok(()); - } - - debug!( - "seq. nrs. of ack packets to send out to {} of the ones with acknowledgments on {}: {} (first 10 shown here; total={})", - self.dst_chain().id(), - self.src_chain().id(), - sequences.iter().take(10).format(", "), sequences.len() - ); - - // Incrementally process all the available sequence numbers in chunks - for events_chunk in query_packet_events_with( - &sequences, - query_height, - self.src_chain(), - &self.path_id, - query_write_ack_events, - ) { - self.events_to_operational_data(TrackedEvents::new(events_chunk, tracking_id))?; - } - - Ok(()) - } - - fn build_recv_packet(&self, packet: &Packet, height: Height) -> Result, LinkError> { - let proofs = self - .src_chain() - .build_packet_proofs( - PacketMsgType::Recv, - &packet.source_port, - &packet.source_channel, - packet.sequence, - height, - ) - .map_err(|e| LinkError::packet_proofs_constructor(self.src_chain().id(), e))?; - - let msg = MsgRecvPacket::new(packet.clone(), proofs.clone(), self.dst_signer()?); - - trace!( - "built recv_packet msg {}, proofs at height {}", - msg.packet, - proofs.height() - ); - - Ok(Some(msg.to_any())) - } - - fn build_ack_from_recv_event( - &self, - event: &WriteAcknowledgement, - ) -> Result, LinkError> { - let packet = event.packet.clone(); - - let proofs = self - .src_chain() - .build_packet_proofs( - PacketMsgType::Ack, - &packet.destination_port, - &packet.destination_channel, - packet.sequence, - event.height, - ) - .map_err(|e| LinkError::packet_proofs_constructor(self.src_chain().id(), e))?; - - let msg = MsgAcknowledgement::new( - packet, - event.ack.clone().into(), - proofs.clone(), - self.dst_signer()?, - ); - - trace!( - "built acknowledgment msg {}, proofs at height {}", - msg.packet, - proofs.height() - ); - - Ok(Some(msg.to_any())) - } - - fn build_timeout_packet( - &self, - packet: &Packet, - height: Height, - ) -> Result, LinkError> { - let dst_channel_id = self.dst_channel_id(); - - debug!("build timeout for channel"); - let (packet_type, next_sequence_received) = if self.ordered_channel() { - let (next_seq, _) = self - .dst_chain() - .query_next_sequence_receive( - QueryNextSequenceReceiveRequest { - port_id: self.dst_port_id().clone(), - channel_id: *dst_channel_id, - height, - }, - IncludeProof::No, - ) - .map_err(|e| LinkError::query(self.dst_chain().id(), e))?; - - (PacketMsgType::TimeoutOrdered, next_seq) - } else { - (PacketMsgType::TimeoutUnordered, packet.sequence) - }; - - let proofs = self - .dst_chain() - .build_packet_proofs( - packet_type, - &packet.destination_port, - &packet.destination_channel, - next_sequence_received, - height, - ) - .map_err(|e| LinkError::packet_proofs_constructor(self.dst_chain().id(), e))?; - - let msg = MsgTimeout::new( - packet.clone(), - next_sequence_received, - proofs.clone(), - self.src_signer()?, - ); - - trace!( - "built timeout msg {}, proofs at height {}", - msg.packet, - proofs.height() - ); - - Ok(Some(msg.to_any())) - } - - fn build_timeout_on_close_packet( - &self, - packet: &Packet, - height: Height, - ) -> Result, LinkError> { - let proofs = self - .dst_chain() - .build_packet_proofs( - PacketMsgType::TimeoutOnClose, - &packet.destination_port, - &packet.destination_channel, - packet.sequence, - height, - ) - .map_err(|e| LinkError::packet_proofs_constructor(self.dst_chain().id(), e))?; - - let msg = MsgTimeoutOnClose::new( - packet.clone(), - packet.sequence, - proofs.clone(), - self.src_signer()?, - ); - - trace!( - "built timeout on close msg {}, proofs at height {}", - msg.packet, - proofs.height() - ); - - Ok(Some(msg.to_any())) - } - - fn build_timeout_from_send_packet_event( - &self, - event: &SendPacket, - dst_info: &ChainStatus, - ) -> Result, LinkError> { - let packet = event.packet.clone(); - if self - .dst_channel(dst_info.height)? - .state_matches(&ChannelState::Closed) - { - Ok(self.build_timeout_on_close_packet(&event.packet, dst_info.height)?) - } else if packet.timed_out(&dst_info.timestamp, dst_info.height) { - Ok(self.build_timeout_packet(&event.packet, dst_info.height)?) - } else { - Ok(None) - } - } - - fn build_recv_or_timeout_from_send_packet_event( - &self, - event: &SendPacket, - dst_info: &ChainStatus, - ) -> Result<(Option, Option), LinkError> { - let timeout = self.build_timeout_from_send_packet_event(event, dst_info)?; - if timeout.is_some() { - Ok((None, timeout)) - } else { - Ok((self.build_recv_packet(&event.packet, event.height)?, None)) - } - } - - /// Drives the relaying of elapsed operational data items meant for - /// a specified target chain forward. - /// - /// Given an iterator of `OperationalData` elements, this function - /// first determines whether the current piece of operational data - /// has elapsed. - /// - /// A piece of operational data is considered 'elapsed' if it has been waiting - /// for an amount of time that surpasses both of the following: - /// 1. The time duration specified in the connection delay - /// 2. The number of blocks specified in the connection delay - /// - /// If the current piece of operational data has elapsed, then relaying - /// is performed using the asynchronous sender. Operational data is - /// retained as pending and is associated with one or more transaction - /// hash(es). - /// - /// Should an error occur when attempting to relay a piece of operational - /// data, this function returns all subsequent unprocessed pieces of - /// operational data back to the caller so that they can be re-queued - /// for processing; the operational data that failed to send is dropped. - /// - /// Note that pieces of operational data that have not elapsed yet are - /// also placed in the 'unprocessed' bucket. - fn execute_schedule_for_target_chain>( - &mut self, - mut operations: I, - target_chain: OperationalDataTarget, - ) -> Result, (VecDeque, LinkError)> { - let mut unprocessed = VecDeque::new(); - - while let Some(od) = operations.next() { - let elapsed_result = match target_chain { - OperationalDataTarget::Source => od.has_conn_delay_elapsed( - &|| self.src_time_latest(), - &|| self.src_max_block_time(), - &|| self.src_latest_height(), - ), - OperationalDataTarget::Destination => od.has_conn_delay_elapsed( - &|| self.dst_time_latest(), - &|| self.dst_max_block_time(), - &|| self.dst_latest_height(), - ), - }; - - match elapsed_result { - Ok(elapsed) => { - if elapsed { - // The current piece of operational data has elapsed; we can go ahead and - // attempt to relay it. - match self - .relay_from_operational_data::(od.clone()) - { - // The operational data was successfully relayed; enqueue the associated tx. - Ok(reply) => self.enqueue_pending_tx(reply, od), - // The relaying process failed; return all of the subsequent pieces of operational - // data along with the underlying error that occurred. - Err(e) => { - unprocessed.extend(operations); - - return Err((unprocessed, e)); - } - } - } else { - // The current piece of operational data has not elapsed; add it to the bucket - // of unprocessed operational data and continue processing subsequent pieces - // of operational data. - unprocessed.push_back(od); - } - } - Err(e) => { - // An error occurred when attempting to determine whether the current piece of - // operational data has elapsed or not. Add the current piece of data, along with - // all of the subsequent pieces of data, to the unprocessed bucket and return it - // along with the error that resulted. - unprocessed.push_back(od); - unprocessed.extend(operations); - - return Err((unprocessed, e)); - } - } - } - - Ok(unprocessed) - } - - /// While there are pending operational data items, this function - /// performs the relaying of packets corresponding to those - /// operational data items to both the source and destination chains. - /// - /// Any operational data items that do not get successfully relayed are - /// dropped. Subsequent pending operational data items that went unprocessed - /// are queued up again for re-submission. - pub fn execute_schedule(&mut self) -> Result<(), LinkError> { - let src_od_iter = self.src_operational_data.take().into_iter(); - - match self.execute_schedule_for_target_chain(src_od_iter, OperationalDataTarget::Source) { - Ok(unprocessed_src_data) => self.src_operational_data = unprocessed_src_data.into(), - Err((unprocessed_src_data, e)) => { - self.src_operational_data = unprocessed_src_data.into(); - return Err(e); - } - } - - let dst_od_iter = self.dst_operational_data.take().into_iter(); - - match self - .execute_schedule_for_target_chain(dst_od_iter, OperationalDataTarget::Destination) - { - Ok(unprocessed_dst_data) => self.dst_operational_data = unprocessed_dst_data.into(), - Err((unprocessed_dst_data, e)) => { - self.dst_operational_data = unprocessed_dst_data.into(); - return Err(e); - } - } - - Ok(()) - } - - /// Kicks off the process of relaying pending txs to the source and destination chains. - /// - /// See [`Resubmit::from_clear_interval`] for more info about the `resubmit` parameter. - pub fn process_pending_txs(&self, resubmit: Resubmit) -> RelaySummary { - if !self.confirm_txes { - return RelaySummary::empty(); - } - - let mut summary_src = self.process_pending_txs_src(resubmit).unwrap_or_else(|e| { - error!("error processing pending events in source chain: {}", e); - RelaySummary::empty() - }); - - let summary_dst = self.process_pending_txs_dst(resubmit).unwrap_or_else(|e| { - error!( - "error processing pending events in destination chain: {}", - e - ); - RelaySummary::empty() - }); - - summary_src.extend(summary_dst); - summary_src - } - - fn process_pending_txs_src(&self, resubmit: Resubmit) -> Result { - let do_resubmit = match resubmit { - Resubmit::Yes => { - Some(|odata| self.relay_from_operational_data::(odata)) - } - Resubmit::No => None, - }; - - let res = self - .pending_txs_src - .process_pending(pending::TIMEOUT, self, do_resubmit)? - .unwrap_or_else(RelaySummary::empty); - - Ok(res) - } - - fn process_pending_txs_dst(&self, resubmit: Resubmit) -> Result { - let do_resubmit = match resubmit { - Resubmit::Yes => { - Some(|odata| self.relay_from_operational_data::(odata)) - } - Resubmit::No => None, - }; - - let res = self - .pending_txs_dst - .process_pending(pending::TIMEOUT, self, do_resubmit)? - .unwrap_or_else(RelaySummary::empty); - - Ok(res) - } - - /// Refreshes the scheduled batches. - /// Verifies if any sendPacket messages timed-out. If so, moves them from destination op. data - /// to source operational data, and adjusts the events and messages accordingly. - pub fn refresh_schedule(&self) -> Result<(), LinkError> { - // Bail fast if no op. data to refresh - if self.dst_operational_data.is_empty() { - return Ok(()); - } - - let span = span!(Level::INFO, "refresh"); - let _enter = span.enter(); - - let dst_status = self - .dst_chain() - .query_application_status() - .map_err(|e| LinkError::query(self.src_chain().id(), e))?; - - let dst_current_height = dst_status.height; - - // Intermediary data struct to help better manage the transfer from dst. operational data - // to source operational data. - let mut all_dst_odata = self.dst_operational_data.clone_vec(); - - let mut timed_out: HashMap = HashMap::default(); - - // For each operational data targeting the destination chain... - for (odata_pos, odata) in all_dst_odata.iter_mut().enumerate() { - // ... check each `SendPacket` event, whether it should generate a timeout message - let mut retain_batch = vec![]; - - for gm in odata.batch.iter() { - let TransitMessage { event, .. } = gm; - - match event { - IbcEvent::SendPacket(e) => { - // Catch any SendPacket event that timed-out - if self.send_packet_event_handled(e)? { - debug!("already handled send packet {}", e); - } else if let Some(new_msg) = - self.build_timeout_from_send_packet_event(e, &dst_status)? - { - debug!("found a timed-out msg in the op data {}", odata.info(),); - timed_out - .entry(odata_pos) - .or_insert_with(|| { - OperationalData::new( - dst_current_height, - OperationalDataTarget::Source, - odata.tracking_id, - self.channel.connection_delay, - ) - }) - .push(TransitMessage { - event: event.clone(), - msg: new_msg, - }); - } else { - // A SendPacket event, but did not time-out yet, retain - retain_batch.push(gm.clone()); - } - } - IbcEvent::WriteAcknowledgement(e) => { - if self.write_ack_event_handled(e)? { - debug!("already handled {} write ack ", e); - } else { - retain_batch.push(gm.clone()); - } - } - _ => retain_batch.push(gm.clone()), - } - } - - // Update the whole batch, keeping only the relevant ones - odata.batch = retain_batch; - } - - // Possibly some op. data became empty (if no events were kept). - // Retain only the non-empty ones. - all_dst_odata.retain(|o| !o.batch.is_empty()); - - // Replace the original operational data with the updated one - self.dst_operational_data.replace(all_dst_odata); - - // Handle timed-out events - if timed_out.is_empty() { - // Nothing timed out in the meantime - return Ok(()); - } - - // Schedule new operational data targeting the source chain - for (_, new_od) in timed_out.into_iter() { - info!( - "re-scheduling from new timed-out batch of size {}", - new_od.batch.len() - ); - - self.schedule_operational_data(new_od)?; - } - - Ok(()) - } - - /// Adds a new operational data item for this relaying path to process later. - /// If the relaying path has non-zero packet delays, this method also updates the client on the - /// target chain with the appropriate headers. - fn schedule_operational_data(&self, mut od: OperationalData) -> Result<(), LinkError> { - let _span = span!(Level::INFO, "schedule", odata = %od.info()).entered(); - - if od.batch.is_empty() { - info!( - "ignoring operational data for {} because it has no messages", - od.target - ); - return Ok(()); - } - - // Update clients ahead of scheduling the operational data, if the delays are non-zero. - // If the connection-delay must be taken into account, set the `scheduled_time` to an - // instant in the past, i.e. when this client update was first processed (`processed_time`) - let scheduled_time = if od.conn_delay_needed() { - debug!("connection delay must be taken into account: updating client"); - let target_height = od.proofs_height.increment(); - match od.target { - OperationalDataTarget::Source => { - let update_height = self.update_client_src(target_height, od.tracking_id)?; - od.set_update_height(update_height); - self.src_time_at_height(update_height)? - } - OperationalDataTarget::Destination => { - let update_height = self.update_client_dst(target_height, od.tracking_id)?; - od.set_update_height(update_height); - self.dst_time_at_height(update_height)? - } - } - } else { - debug!( - "connection delay need not be taken into account: client update message will be \ - prepended later" - ); - Instant::now() - }; - - od.set_scheduled_time(scheduled_time); - - match od.target { - OperationalDataTarget::Source => self.src_operational_data.push_back(od), - OperationalDataTarget::Destination => self.dst_operational_data.push_back(od), - }; - - Ok(()) - } - - /// Pulls out the operational elements with elapsed delay period and that can - /// now be processed. - pub(crate) fn try_fetch_scheduled_operational_data( - &self, - ) -> Result<(VecDeque, VecDeque), LinkError> { - // Extracts elements from a Vec when the predicate returns true. - // The mutable vector is then updated to the remaining unextracted elements. - fn partition( - queue: VecDeque, - pred: impl Fn(&T) -> Result, - ) -> Result<(VecDeque, VecDeque), LinkError> { - let mut true_res = VecDeque::new(); - let mut false_res = VecDeque::new(); - - for e in queue.into_iter() { - if pred(&e)? { - true_res.push_back(e); - } else { - false_res.push_back(e); - } - } - - Ok((true_res, false_res)) - } - - let (elapsed_src_ods, unelapsed_src_ods) = - partition(self.src_operational_data.take(), |op| { - op.has_conn_delay_elapsed( - &|| self.src_time_latest(), - &|| self.src_max_block_time(), - &|| self.src_latest_height(), - ) - })?; - - let (elapsed_dst_ods, unelapsed_dst_ods) = - partition(self.dst_operational_data.take(), |op| { - op.has_conn_delay_elapsed( - &|| self.dst_time_latest(), - &|| self.dst_max_block_time(), - &|| self.dst_latest_height(), - ) - })?; - - self.src_operational_data.replace(unelapsed_src_ods); - self.dst_operational_data.replace(unelapsed_dst_ods); - Ok((elapsed_src_ods, elapsed_dst_ods)) - } - - fn restore_src_client(&self) -> ForeignClient { - ForeignClient::restore( - self.src_client_id().clone(), - self.src_chain().clone(), - self.dst_chain().clone(), - ) - } - - fn restore_dst_client(&self) -> ForeignClient { - ForeignClient::restore( - self.dst_client_id().clone(), - self.dst_chain().clone(), - self.src_chain().clone(), - ) - } - - // we need fully qualified ChainId to avoid unneeded imports warnings - #[cfg(feature = "telemetry")] - fn target_info( - &self, - target: OperationalDataTarget, - ) -> ( - ibc::core::ics24_host::identifier::ChainId, // source chain - ibc::core::ics24_host::identifier::ChainId, // destination chain - &ChannelId, - &PortId, - ) { - match target { - OperationalDataTarget::Source => ( - self.src_chain().id(), - self.dst_chain().id(), - self.src_channel_id(), - self.src_port_id(), - ), - OperationalDataTarget::Destination => ( - self.dst_chain().id(), - self.src_chain().id(), - self.dst_channel_id(), - self.dst_port_id(), - ), - } - } -} diff --git a/relayer/src/link/relay_sender.rs b/relayer/src/link/relay_sender.rs deleted file mode 100644 index 0174bd11c0..0000000000 --- a/relayer/src/link/relay_sender.rs +++ /dev/null @@ -1,97 +0,0 @@ -use core::fmt; - -use tendermint_rpc::endpoint::broadcast::tx_sync; -use tracing::info; - -use ibc::events::{IbcEvent, PrettyEvents}; - -use crate::chain::handle::ChainHandle; -use crate::chain::tracking::TrackedMsgs; -use crate::link::error::LinkError; -use crate::link::RelaySummary; - -pub trait SubmitReply { - fn empty() -> Self; -} - -impl SubmitReply for RelaySummary { - fn empty() -> Self { - RelaySummary::empty() - } -} - -/// Captures the ability to submit messages to a chain. -pub trait Submit { - type Reply: SubmitReply; - - fn submit(target: &impl ChainHandle, msgs: TrackedMsgs) -> Result; -} - -/// Synchronous sender -pub struct SyncSender; - -impl Submit for SyncSender { - type Reply = RelaySummary; - - // TODO: Switch from the `Chain::send_msgs` interface in this method - // to use `Chain::submit_msgs` instead; implement waiting for block - // commits directly here (instead of blocking in the chain runtime). - fn submit(target: &impl ChainHandle, msgs: TrackedMsgs) -> Result { - let tx_events = target - .send_messages_and_wait_commit(msgs) - .map_err(LinkError::relayer)?; - - info!( - "[Sync->{}] result {}\n", - target.id(), - PrettyEvents(&tx_events) - ); - - let ev = tx_events - .clone() - .into_iter() - .find(|event| matches!(event, IbcEvent::ChainError(_))); - - match ev { - Some(ev) => Err(LinkError::send(ev)), - None => Ok(RelaySummary::from_events(tx_events)), - } - } -} - -pub struct AsyncReply { - pub responses: Vec, -} - -impl SubmitReply for AsyncReply { - fn empty() -> Self { - Self { responses: vec![] } - } -} - -// TODO(Adi): Consider removing the senders and keep only a generic -// send/submit method. -pub struct AsyncSender; - -impl Submit for AsyncSender { - type Reply = AsyncReply; - - fn submit(target: &impl ChainHandle, msgs: TrackedMsgs) -> Result { - let a = target - .send_messages_and_wait_check_tx(msgs) - .map_err(LinkError::relayer)?; - let reply = AsyncReply { responses: a }; - info!("[Async~>{}] {}\n", target.id(), reply); - - Ok(reply) - } -} - -impl fmt::Display for AsyncReply { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "response(s): {}", self.responses.len())?; - self.responses - .iter() - .try_for_each(|r| write!(f, "; {:?}:{}", r.code, r.hash)) - } -} diff --git a/relayer/src/link/relay_summary.rs b/relayer/src/link/relay_summary.rs deleted file mode 100644 index 2dbcdd9d83..0000000000 --- a/relayer/src/link/relay_summary.rs +++ /dev/null @@ -1,38 +0,0 @@ -use core::fmt; - -use ibc::events::IbcEvent; - -#[derive(Clone, Debug)] -pub struct RelaySummary { - pub events: Vec, - // errors: todo!(), - // timings: todo!(), -} - -impl RelaySummary { - pub fn empty() -> Self { - Self { events: vec![] } - } - - pub fn is_empty(&self) -> bool { - self.events.is_empty() - } - - pub fn from_events(events: Vec) -> Self { - Self { events } - } - - pub fn extend(&mut self, other: RelaySummary) { - self.events.extend(other.events) - } -} - -impl fmt::Display for RelaySummary { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "RelaySummary events: ")?; - for e in &self.events { - write!(f, "{}; ", e)? - } - write!(f, "total events = {}", self.events.len()) - } -} diff --git a/relayer/src/link/tx_hashes.rs b/relayer/src/link/tx_hashes.rs deleted file mode 100644 index 0ac2bd0a4c..0000000000 --- a/relayer/src/link/tx_hashes.rs +++ /dev/null @@ -1,28 +0,0 @@ -use core::fmt; - -use tendermint::abci::transaction; - -use crate::link::relay_sender::AsyncReply; - -/// A collection of transaction hashes. -#[derive(Clone)] -pub struct TxHashes(pub Vec); - -impl From for TxHashes { - fn from(r: AsyncReply) -> Self { - Self(r.responses.into_iter().map(|e| e.hash).collect()) - } -} - -impl From for Vec { - fn from(hs: TxHashes) -> Self { - hs.0 - } -} - -impl fmt::Display for TxHashes { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "TxHashes: count={}", self.0.len())?; - self.0.iter().try_for_each(|r| write!(f, "; {}", r)) - } -} diff --git a/relayer/src/macros.rs b/relayer/src/macros.rs deleted file mode 100644 index aaf969ddad..0000000000 --- a/relayer/src/macros.rs +++ /dev/null @@ -1,69 +0,0 @@ -#[cfg(feature = "profiling")] -pub mod profiling { - - use core::sync::atomic::AtomicUsize; - use core::sync::atomic::Ordering::Relaxed; - - std::thread_local! { - pub static DEPTH: AtomicUsize = AtomicUsize::new(0); - } - - /// Measure the time between when this value is allocated - /// and when it is dropped. - pub struct Timer { - name: String, - start: std::time::Instant, - } - - impl Timer { - pub fn new(name: String) -> Self { - let depth = DEPTH.with(|d| d.fetch_add(1, Relaxed)); - let pad = " ".repeat(depth); - - tracing::info!("{}⏳ {} - start", pad, name); - - Self { - name, - start: std::time::Instant::now(), - } - } - } - - impl Drop for Timer { - fn drop(&mut self) { - let elapsed = self.start.elapsed().as_millis(); - - let depth = DEPTH.with(|d| d.fetch_sub(1, Relaxed)); - let pad = " ".repeat(depth - 1); - - tracing::info!("{}⏳ {} - elapsed: {}ms", pad, self.name, elapsed); - } - } -} - -/// Measure the time until the current scope ends. -/// -/// Only enabled when the "profiling" feature is enabled. -/// -/// ## Example -/// -/// ```rust -/// use ibc_relayer::time; -/// -/// time!("full scope"); -/// -/// let x = { -/// time!("inner {}", "scope"); -/// -/// 42 -/// // "inner scope" timer ends here -/// }; -/// // "full scope" timer ends here -/// ``` -#[macro_export] -macro_rules! time { - ($($arg:tt)*) => { - #[cfg(feature = "profiling")] - let _timer = $crate::macros::profiling::Timer::new(format!($($arg)*)); - }; -} diff --git a/relayer/src/object.rs b/relayer/src/object.rs deleted file mode 100644 index ff74819a24..0000000000 --- a/relayer/src/object.rs +++ /dev/null @@ -1,485 +0,0 @@ -use flex_error::define_error; -use serde::{Deserialize, Serialize}; - -use ibc::{ - core::{ - ics02_client::{client_state::ClientState, events::UpdateClient}, - ics03_connection::events::Attributes as ConnectionAttributes, - ics04_channel::events::{ - Attributes, CloseInit, SendPacket, TimeoutPacket, WriteAcknowledgement, - }, - ics24_host::identifier::{ChainId, ChannelId, ClientId, ConnectionId, PortId}, - }, - Height, -}; - -use crate::chain::{ - counterparty::{ - channel_connection_client, counterparty_chain_from_channel, - counterparty_chain_from_connection, - }, - handle::ChainHandle, - requests::{IncludeProof, QueryClientStateRequest}, -}; -use crate::error::Error as RelayerError; -use crate::supervisor::Error as SupervisorError; - -/// Client -#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] -pub struct Client { - /// Destination chain identifier. - /// This is the chain hosting the client. - pub dst_chain_id: ChainId, - - /// Client identifier (allocated on the destination chain `dst_chain_id`). - pub dst_client_id: ClientId, - - /// Source chain identifier. - /// This is the chain whose headers the client worker is verifying. - pub src_chain_id: ChainId, -} - -impl Client { - pub fn short_name(&self) -> String { - format!( - "client::{}->{}:{}", - self.src_chain_id, self.dst_chain_id, self.dst_client_id - ) - } -} - -/// Connection -#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] -pub struct Connection { - /// Destination chain identifier. - pub dst_chain_id: ChainId, - - /// Source chain identifier. - pub src_chain_id: ChainId, - - /// Source connection identifier. - pub src_connection_id: ConnectionId, -} - -impl Connection { - pub fn short_name(&self) -> String { - format!( - "connection::{}:{}->{}", - self.src_connection_id, self.src_chain_id, self.dst_chain_id, - ) - } -} - -/// Channel -#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] -pub struct Channel { - /// Destination chain identifier. - pub dst_chain_id: ChainId, - - /// Source chain identifier. - pub src_chain_id: ChainId, - - /// Source channel identifier. - pub src_channel_id: ChannelId, - - /// Source port identifier. - pub src_port_id: PortId, -} - -impl Channel { - pub fn short_name(&self) -> String { - format!( - "channel::{}/{}:{}->{}", - self.src_channel_id, self.src_port_id, self.src_chain_id, self.dst_chain_id, - ) - } -} - -/// A packet worker between a source and destination chain, and a specific channel and port. -#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] -pub struct Packet { - /// Destination chain identifier. - pub dst_chain_id: ChainId, - - /// Source chain identifier. - pub src_chain_id: ChainId, - - /// Source channel identifier. - pub src_channel_id: ChannelId, - - /// Source port identifier. - pub src_port_id: PortId, -} - -impl Packet { - pub fn short_name(&self) -> String { - format!( - "packet::{}/{}:{}->{}", - self.src_channel_id, self.src_port_id, self.src_chain_id, self.dst_chain_id, - ) - } -} - -/// A wallet worker which monitors the balance of the wallet in use by Hermes -#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] -pub struct Wallet { - /// Chain identifier - pub chain_id: ChainId, -} - -impl Wallet { - pub fn short_name(&self) -> String { - format!("wallet::{}", self.chain_id) - } -} - -/// An object determines the amount of parallelism that can -/// be exercised when processing [`IbcEvent`](ibc::events::IbcEvent) -/// between two chains. For each [`Object`], a corresponding -/// [`WorkerHandle`](crate::worker::WorkerHandle) is spawned and all [`IbcEvent`](ibc::events::IbcEvent)s mapped -/// to an [`Object`] are sent to the associated [`WorkerHandle`](crate::worker::WorkerHandle) -/// for processing. -#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] -#[serde(tag = "type")] -pub enum Object { - /// See [`Client`]. - Client(Client), - /// See [`Connection`]. - Connection(Connection), - /// See [`Channel`]. - Channel(Channel), - /// See [`Packet`]. - Packet(Packet), - /// See [`Wallet`] - Wallet(Wallet), -} - -define_error! { - ObjectError { - Relayer - [ RelayerError ] - | _ | { "relayer error" }, - - Supervisor - [ SupervisorError ] - | _ | { "supervisor error" }, - - RefreshNotRequired - { - client_id: ClientId, - chain_id: ChainId, - } - | e | { - format!("client '{}' on chain {} does not require refresh", - e.client_id, e.chain_id) - }, - - MissingChannelId - { event: Attributes } - | e | { - format!("channel_id missing in channel open event '{:?}'", - e.event) - }, - - MissingConnectionId - { event: ConnectionAttributes } - | e | { - format!("connection_id missing from connection handshake event '{:?}'", - e.event) - }, - } -} - -impl Object { - /// Returns `true` if this [`Object`] is for a [`WorkerHandle`](crate::worker::WorkerHandle) - /// which is interested in new block events originating from the chain with - /// the given [`ChainId`]. Returns `false` otherwise. - pub fn notify_new_block(&self, src_chain_id: &ChainId) -> bool { - match self { - Object::Client(_) => false, - Object::Connection(c) => &c.src_chain_id == src_chain_id, - Object::Channel(c) => &c.src_chain_id == src_chain_id, - Object::Packet(p) => &p.src_chain_id == src_chain_id, - Object::Wallet(_) => false, - } - } - - /// Returns whether or not this object pertains to the given chain. - pub fn for_chain(&self, chain_id: &ChainId) -> bool { - match self { - Object::Client(c) => &c.src_chain_id == chain_id || &c.dst_chain_id == chain_id, - Object::Connection(c) => &c.src_chain_id == chain_id || &c.dst_chain_id == chain_id, - Object::Channel(c) => &c.src_chain_id == chain_id || &c.dst_chain_id == chain_id, - Object::Packet(p) => &p.src_chain_id == chain_id || &p.dst_chain_id == chain_id, - Object::Wallet(w) => &w.chain_id == chain_id, - } - } - - /// Return the type of object - pub fn object_type(&self) -> ObjectType { - match self { - Object::Client(_) => ObjectType::Client, - Object::Channel(_) => ObjectType::Channel, - Object::Connection(_) => ObjectType::Connection, - Object::Packet(_) => ObjectType::Packet, - Object::Wallet(_) => ObjectType::Wallet, - } - } -} - -/// The type of [`Object`]. -#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)] -pub enum ObjectType { - Client, - Channel, - Connection, - Packet, - Wallet, -} - -impl From for Object { - fn from(c: Client) -> Self { - Self::Client(c) - } -} - -impl From for Object { - fn from(c: Connection) -> Self { - Self::Connection(c) - } -} - -impl From for Object { - fn from(c: Channel) -> Self { - Self::Channel(c) - } -} - -impl From for Object { - fn from(p: Packet) -> Self { - Self::Packet(p) - } -} - -impl From for Object { - fn from(w: Wallet) -> Self { - Self::Wallet(w) - } -} - -impl Object { - pub fn src_chain_id(&self) -> &ChainId { - match self { - Self::Client(ref client) => &client.src_chain_id, - Self::Connection(ref connection) => &connection.src_chain_id, - Self::Channel(ref channel) => &channel.src_chain_id, - Self::Packet(ref path) => &path.src_chain_id, - Self::Wallet(ref wallet) => &wallet.chain_id, - } - } - - pub fn dst_chain_id(&self) -> &ChainId { - match self { - Self::Client(ref client) => &client.dst_chain_id, - Self::Connection(ref connection) => &connection.dst_chain_id, - Self::Channel(ref channel) => &channel.dst_chain_id, - Self::Packet(ref path) => &path.dst_chain_id, - Self::Wallet(ref wallet) => &wallet.chain_id, - } - } - - pub fn short_name(&self) -> String { - match self { - Self::Client(ref client) => client.short_name(), - Self::Connection(ref connection) => connection.short_name(), - Self::Channel(ref channel) => channel.short_name(), - Self::Packet(ref path) => path.short_name(), - Self::Wallet(ref wallet) => wallet.short_name(), - } - } - - /// Build the object associated with the given [`UpdateClient`] event. - pub fn for_update_client( - e: &UpdateClient, - dst_chain: &impl ChainHandle, - ) -> Result { - let (client_state, _) = dst_chain - .query_client_state( - QueryClientStateRequest { - client_id: e.client_id().clone(), - height: Height::zero(), - }, - IncludeProof::No, - ) - .map_err(ObjectError::relayer)?; - - if client_state.refresh_period().is_none() { - return Err(ObjectError::refresh_not_required( - e.client_id().clone(), - dst_chain.id(), - )); - } - let src_chain_id = client_state.chain_id(); - - Ok(Client { - dst_client_id: e.client_id().clone(), - dst_chain_id: dst_chain.id(), - src_chain_id, - } - .into()) - } - - /// Build the client object associated with the given channel event attributes. - pub fn client_from_chan_open_events( - e: &Attributes, // The attributes of the emitted event - chain: &impl ChainHandle, // The chain which emitted the event - ) -> Result { - let channel_id = e - .channel_id() - .ok_or_else(|| ObjectError::missing_channel_id(e.clone()))?; - - let client = channel_connection_client(chain, e.port_id(), channel_id) - .map_err(ObjectError::supervisor)? - .client; - - if client.client_state.refresh_period().is_none() { - return Err(ObjectError::refresh_not_required( - client.client_id, - chain.id(), - )); - } - - Ok(Client { - dst_client_id: client.client_id.clone(), - dst_chain_id: chain.id(), // The object's destination is the chain hosting the client - src_chain_id: client.client_state.chain_id(), - } - .into()) - } - - /// Build the Connection object associated with the given - /// [`Open`](ibc::core::ics03_connection::connection::State::Open) - /// connection event. - pub fn connection_from_conn_open_events( - e: &ConnectionAttributes, - src_chain: &impl ChainHandle, - ) -> Result { - let connection_id = e - .connection_id - .as_ref() - .ok_or_else(|| ObjectError::missing_connection_id(e.clone()))?; - - let dst_chain_id = counterparty_chain_from_connection(src_chain, connection_id) - .map_err(ObjectError::supervisor)?; - - Ok(Connection { - dst_chain_id, - src_chain_id: src_chain.id(), - src_connection_id: connection_id.clone(), - } - .into()) - } - - /// Build the Channel object associated with the given - /// [`Open`](ibc::core::ics04_channel::channel::State::Open) channel event. - pub fn channel_from_chan_open_events( - attributes: &Attributes, - src_chain: &impl ChainHandle, - ) -> Result { - let channel_id = attributes - .channel_id() - .ok_or_else(|| ObjectError::missing_channel_id(attributes.clone()))?; - - let dst_chain_id = - counterparty_chain_from_channel(src_chain, channel_id, attributes.port_id()) - .map_err(ObjectError::supervisor)?; - - Ok(Channel { - dst_chain_id, - src_chain_id: src_chain.id(), - src_channel_id: *channel_id, - src_port_id: attributes.port_id().clone(), - } - .into()) - } - - /// Build the object associated with the given [`SendPacket`] event. - pub fn for_send_packet( - e: &SendPacket, - src_chain: &impl ChainHandle, - ) -> Result { - let dst_chain_id = counterparty_chain_from_channel( - src_chain, - &e.packet.source_channel, - &e.packet.source_port, - ) - .map_err(ObjectError::supervisor)?; - - Ok(Packet { - dst_chain_id, - src_chain_id: src_chain.id(), - src_channel_id: e.packet.source_channel, - src_port_id: e.packet.source_port.clone(), - } - .into()) - } - - /// Build the object associated with the given [`WriteAcknowledgement`] event. - pub fn for_write_ack( - e: &WriteAcknowledgement, - src_chain: &impl ChainHandle, - ) -> Result { - let dst_chain_id = counterparty_chain_from_channel( - src_chain, - &e.packet.destination_channel, - &e.packet.destination_port, - ) - .map_err(ObjectError::supervisor)?; - - Ok(Packet { - dst_chain_id, - src_chain_id: src_chain.id(), - src_channel_id: e.packet.destination_channel, - src_port_id: e.packet.destination_port.clone(), - } - .into()) - } - - /// Build the object associated with the given [`TimeoutPacket`] event. - pub fn for_timeout_packet( - e: &TimeoutPacket, - src_chain: &impl ChainHandle, - ) -> Result { - let dst_chain_id = counterparty_chain_from_channel( - src_chain, - &e.packet.source_channel, - &e.packet.source_port, - ) - .map_err(ObjectError::supervisor)?; - - Ok(Packet { - dst_chain_id, - src_chain_id: src_chain.id(), - src_channel_id: *e.src_channel_id(), - src_port_id: e.src_port_id().clone(), - } - .into()) - } - - /// Build the object associated with the given [`CloseInit`] event. - pub fn for_close_init_channel( - e: &CloseInit, - src_chain: &impl ChainHandle, - ) -> Result { - let dst_chain_id = counterparty_chain_from_channel(src_chain, e.channel_id(), e.port_id()) - .map_err(ObjectError::supervisor)?; - - Ok(Packet { - dst_chain_id, - src_chain_id: src_chain.id(), - src_channel_id: *e.channel_id(), - src_port_id: e.port_id().clone(), - } - .into()) - } -} diff --git a/relayer/src/path.rs b/relayer/src/path.rs deleted file mode 100644 index e15d7648ab..0000000000 --- a/relayer/src/path.rs +++ /dev/null @@ -1,32 +0,0 @@ -use ibc::core::ics04_channel::channel::IdentifiedChannelEnd; -use ibc::core::ics24_host::identifier::{ChannelId, PortId}; - -/// Defines the channel & port identifiers which comprise -/// the two ends of a relayer path. -pub struct PathIdentifiers { - /// Channel & port ids on the target network, usually called the __destination__. - pub port_id: PortId, - pub channel_id: ChannelId, - - /// Channel & port ids on the counterparty network, often called the __source__. - pub counterparty_port_id: PortId, - pub counterparty_channel_id: ChannelId, -} - -// TODO: This should probably be a `TryFrom` instead of `From` so we can get rid of `expect`. -// TODO: Clarify if we should keep `From<&..>` or `From<..>. -impl From<&IdentifiedChannelEnd> for PathIdentifiers { - fn from(ice: &IdentifiedChannelEnd) -> Self { - let counterparty = ice.channel_end.counterparty(); - let counterparty_channel_id = counterparty - .channel_id - .expect("no channel identifier in counterparty channel end"); - - Self { - port_id: ice.port_id.clone(), - channel_id: ice.channel_id, - counterparty_port_id: counterparty.port_id.clone(), - counterparty_channel_id, - } - } -} diff --git a/relayer/src/registry.rs b/relayer/src/registry.rs deleted file mode 100644 index 6f93c22e04..0000000000 --- a/relayer/src/registry.rs +++ /dev/null @@ -1,122 +0,0 @@ -//! Registry for keeping track of [`ChainHandle`]s indexed by a `ChainId`. - -use alloc::collections::btree_map::BTreeMap as HashMap; -use alloc::sync::Arc; -use std::sync::{RwLock, RwLockReadGuard, RwLockWriteGuard}; - -use tokio::runtime::Runtime as TokioRuntime; -use tracing::{trace, warn}; - -use ibc::core::ics24_host::identifier::ChainId; - -use crate::{ - chain::handle::ChainHandle, - config::Config, - spawn::{spawn_chain_runtime, SpawnError}, - util::lock::RwArc, -}; - -/// Registry for keeping track of [`ChainHandle`]s indexed by a `ChainId`. -/// -/// The purpose of this type is to avoid spawning multiple runtimes for a single `ChainId`. -#[derive(Debug)] -pub struct Registry { - config: Config, - handles: HashMap, - rt: Arc, -} - -#[derive(Clone)] -pub struct SharedRegistry { - pub registry: RwArc>, -} - -impl Registry { - /// Construct a new [`Registry`] using the provided [`Config`] - pub fn new(config: Config) -> Self { - Self { - config, - handles: HashMap::new(), - rt: Arc::new(TokioRuntime::new().unwrap()), - } - } - - /// Return the size of the registry, i.e., the number of distinct chain runtimes. - pub fn size(&self) -> usize { - self.handles.len() - } - - /// Return an iterator overall the chain handles managed by the registry. - pub fn chains(&self) -> impl Iterator { - self.handles.values() - } - - /// Get the [`ChainHandle`] associated with the given [`ChainId`]. - /// - /// If there is no handle yet, this will first spawn the runtime and then - /// return its handle. - pub fn get_or_spawn(&mut self, chain_id: &ChainId) -> Result { - self.spawn(chain_id)?; - - let handle = self - .handles - .get(chain_id) - .expect("runtime was just spawned"); - - Ok(handle.clone()) - } - - /// Spawn a chain runtime for the chain with the given [`ChainId`], - /// only if the registry does not contain a handle for that runtime already. - /// - /// Returns whether or not the runtime was actually spawned. - pub fn spawn(&mut self, chain_id: &ChainId) -> Result { - if !self.handles.contains_key(chain_id) { - let handle = spawn_chain_runtime(&self.config, chain_id, self.rt.clone())?; - self.handles.insert(chain_id.clone(), handle); - trace!(chain = %chain_id, "spawned chain runtime"); - Ok(true) - } else { - Ok(false) - } - } - - /// Shutdown the runtime associated with the given chain identifier. - pub fn shutdown(&mut self, chain_id: &ChainId) { - if let Some(handle) = self.handles.remove(chain_id) { - if let Err(e) = handle.shutdown() { - warn!(chain = %chain_id, "chain runtime might have failed to shutdown properly: {}", e); - } - } - } -} - -impl SharedRegistry { - pub fn new(config: Config) -> Self { - let registry = Registry::new(config); - - Self { - registry: Arc::new(RwLock::new(registry)), - } - } - - pub fn get_or_spawn(&self, chain_id: &ChainId) -> Result { - self.registry.write().unwrap().get_or_spawn(chain_id) - } - - pub fn spawn(&self, chain_id: &ChainId) -> Result { - self.write().spawn(chain_id) - } - - pub fn shutdown(&self, chain_id: &ChainId) { - self.write().shutdown(chain_id) - } - - pub fn write(&self) -> RwLockWriteGuard<'_, Registry> { - self.registry.write().unwrap() - } - - pub fn read(&self) -> RwLockReadGuard<'_, Registry> { - self.registry.read().unwrap() - } -} diff --git a/relayer/src/rest.rs b/relayer/src/rest.rs deleted file mode 100644 index 4f6cb31fce..0000000000 --- a/relayer/src/rest.rs +++ /dev/null @@ -1,94 +0,0 @@ -use crossbeam_channel::TryRecvError; -use tracing::{error, trace}; - -use crate::{ - config::Config, - rest::request::ReplySender, - rest::request::{Request, VersionInfo}, - supervisor::dump_state::SupervisorState, -}; - -pub mod request; - -mod error; -pub use error::RestApiError; - -pub const NAME: &str = env!( - "CARGO_PKG_NAME", - "the env. variable CARGO_PKG_NAME in ibc-relayer is not set!" -); -pub const VER: &str = env!( - "CARGO_PKG_VERSION", - "the env. variable CARGO_PKG_VERSION in ibc-relayer is not set!" -); - -pub type Receiver = crossbeam_channel::Receiver; - -// TODO: Unify this enum with `SupervisorCmd` -// We won't unify yet as it is possible we will never implement -// REST API `/chain` adding endpoint; instead of `/chain` we might -// implement `/reload` for supporting a broader range of functionality -// e.g., adjusting chain config, removing chains, etc. -pub enum Command { - DumpState(ReplySender), -} - -/// Process incoming REST requests. -/// -/// Non-blocking receiving of requests from -/// the REST server, and tries to handle them locally. -/// -/// Any request that cannot be handled locally here is propagated -/// as a [`Command`] to the caller, which the supervisor itself should handle. -pub fn process_incoming_requests(config: &Config, channel: &Receiver) -> Option { - match channel.try_recv() { - Ok(request) => match request { - Request::Version { reply_to } => { - trace!("Version"); - - let v = VersionInfo { - name: NAME.to_string(), - version: VER.to_string(), - }; - - reply_to - .send(Ok(v)) - .unwrap_or_else(|e| error!("error replying to a REST request {}", e)); - } - - Request::GetChains { reply_to } => { - trace!("GetChains"); - - reply_to - .send(Ok(config.chains.iter().map(|c| c.id.clone()).collect())) - .unwrap_or_else(|e| error!("error replying to a REST request {}", e)); - } - - Request::GetChain { chain_id, reply_to } => { - trace!("GetChain {}", chain_id); - - let result = config - .find_chain(&chain_id) - .cloned() - .ok_or(RestApiError::ChainConfigNotFound(chain_id)); - - reply_to - .send(result) - .unwrap_or_else(|e| error!("error replying to a REST request {}", e)); - } - - Request::State { reply_to } => { - trace!("State"); - - return Some(Command::DumpState(reply_to)); - } - }, - Err(e) => { - if !matches!(e, TryRecvError::Empty) { - error!("error while waiting for requests: {}", e); - } - } - } - - None -} diff --git a/relayer/src/rest/error.rs b/relayer/src/rest/error.rs deleted file mode 100644 index f77bc69691..0000000000 --- a/relayer/src/rest/error.rs +++ /dev/null @@ -1,54 +0,0 @@ -use serde::ser::{Serialize, SerializeMap, Serializer}; -use thiserror::Error; - -use ibc::core::ics24_host::{error::ValidationErrorDetail, identifier::ChainId}; - -#[derive(Error, Debug)] -pub enum RestApiError { - #[error("failed to send a request through crossbeam channel: {0}")] - ChannelSend(String), - - #[error("failed to receive a reply from crossbeam channel: {0}")] - ChannelRecv(String), - - #[error("failed while serializing reply into json value: {0}")] - Serialization(String), - - #[error("could not find configuration for chain: {0}")] - ChainConfigNotFound(ChainId), - - #[error("failed to parse the string {0} into a valid chain identifier: {1}")] - InvalidChainId(String, ValidationErrorDetail), - - #[error("failed while parsing the request body into a chain configuration: {0}")] - InvalidChainConfig(String), - - #[error("not implemented")] - Unimplemented, -} - -impl RestApiError { - pub fn name(&self) -> &'static str { - match self { - RestApiError::ChannelSend(_) => "ChannelSend", - RestApiError::ChannelRecv(_) => "ChannelRecv", - RestApiError::Serialization(_) => "Serialization", - RestApiError::ChainConfigNotFound(_) => "ChainConfigNotFound", - RestApiError::InvalidChainId(_, _) => "InvalidChainId", - RestApiError::InvalidChainConfig(_) => "InvalidChainConfig", - RestApiError::Unimplemented => "Unimplemented", - } - } -} - -impl Serialize for RestApiError { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - let mut map = serializer.serialize_map(Some(3))?; - map.serialize_entry("name", self.name())?; - map.serialize_entry("msg", &self.to_string())?; - map.end() - } -} diff --git a/relayer/src/rest/request.rs b/relayer/src/rest/request.rs deleted file mode 100644 index 0c98a411c5..0000000000 --- a/relayer/src/rest/request.rs +++ /dev/null @@ -1,39 +0,0 @@ -use serde::Serialize; - -use ibc::core::ics24_host::identifier::ChainId; - -use crate::{config::ChainConfig, rest::RestApiError, supervisor::dump_state::SupervisorState}; - -pub type ReplySender = crossbeam_channel::Sender>; -pub type ReplyReceiver = crossbeam_channel::Receiver>; - -pub fn reply_channel() -> (ReplySender, ReplyReceiver) { - crossbeam_channel::bounded(1) -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize)] -pub struct VersionInfo { - pub name: String, - pub version: String, -} - -/// REST API request variants -#[derive(Clone, Debug)] -pub enum Request { - Version { - reply_to: ReplySender, - }, - - State { - reply_to: ReplySender, - }, - - GetChains { - reply_to: ReplySender>, - }, - - GetChain { - chain_id: ChainId, - reply_to: ReplySender, - }, -} diff --git a/relayer/src/sdk_error.rs b/relayer/src/sdk_error.rs deleted file mode 100644 index 499b8e897f..0000000000 --- a/relayer/src/sdk_error.rs +++ /dev/null @@ -1,200 +0,0 @@ -use flex_error::define_error; -use tendermint::abci::Code; -use tendermint_rpc::endpoint::broadcast::tx_commit::TxResult; - -// Provides mapping for errors returned from ibc-go and cosmos-sdk -define_error! { - SdkError { - Client - [ ClientError ] - |_| { "ICS02 Client Error" }, - - UnexpectedOk - |_| { "Expected error code, instead got Ok" }, - - UnknownSdk - { - codespace: String, - code: u32, - } - | e | { - format_args!("unknown SDK error with code space: {}, code: {}", e.codespace, e.code) - }, - - UnknownTxSync - { code: u32 } - | e | { format_args!("unknown TX sync response error: {}", e.code) }, - - OutOfGas - { code: u32 } - |_| { "the gas requirement is higher than the configured maximum gas! please check the Hermes config.toml".to_string() }, - - InsufficientFee - { code: u32 } - |_| { "the price configuration for this chain may be too low! please check the `gas_price.price` Hermes config.toml".to_string() }, - } -} - -define_error! { - ClientError { - LightClientAlreadyExists - |_| { "light client already exists" }, - - InvalidLightClient - |_| { "light client is invalid" }, - - LightClientNotFound - |_| { "light client not found" }, - - FrozenLightClient - |_| { "light client is frozen due to misbehaviour" }, - - InvalidClientMetadata - |_| { "invalid client metadata" }, - - ConsensusStateNotFound - |_| { "consensus state not found" }, - - InvalidConsensusState - |_| { "invalid consensus state" }, - - ClientTypeNotFound - |_| { "client type not found" }, - - InvalidClientType - |_| { "invalid client type" }, - - CommitmentRootNotFound - |_| { "commitment root not found" }, - - InvalidClientHeader - |_| { "invalid client header" }, - - InvalidLightClientMisbehavior - |_| { "invalid light client misbehaviour" }, - - ClientStateVerificationFailed - |_| { "client state verification failed" }, - - ClientConsensusStateVerificationFailed - |_| { "client consensus state verification failed" }, - - ConnectionStateVerificationFailed - |_| { "connection state verification failed" }, - - ChannelStateVerificationFailed - |_| { "channel state verification failed" }, - - PacketCommitmentVerificationFailed - |_| { "packet commitment verification failed" }, - - PacketAcknowledgementVerificationFailed - |_| { "packet acknowledgement verification failed" }, - - PacketReceiptVerificationFailed - |_| { "packet receipt verification failed" }, - - NextSequenceReceiveVerificationFailed - |_| { "next sequence receive verification failed" }, - - SelfConsensusStateNotFound - |_| { "self consensus state not found" }, - - UpdateLightClientFailed - |_| { "unable to update light client" }, - - InvalidUpdateClientProposal - |_| { "invalid update client proposal" }, - - InvalidClientUpgrade - |_| { "invalid client upgrade" }, - - InvalidHeight - |_| { "invalid height" }, - - InvalidClientStateSubstitute - |_| { "invalid client state substitute" }, - - InvalidUpgradeProposal - |_| { "invalid upgrade proposal" }, - - InactiveClient - |_| { "client is not active" }, - - UnknownClient - { code: u32 } - |e| { format!("unknown client error: {}", e.code) }, - } -} - -// The error code mapping follows the Go code at -// ibc-go/modules/core/02-client/types/errors.go -fn client_error_from_code(code: u32) -> ClientError { - match code { - 2 => ClientError::light_client_already_exists(), - 3 => ClientError::invalid_light_client(), - 4 => ClientError::light_client_not_found(), - 5 => ClientError::frozen_light_client(), - 6 => ClientError::invalid_client_metadata(), - 7 => ClientError::consensus_state_not_found(), - 8 => ClientError::invalid_consensus_state(), - 9 => ClientError::client_type_not_found(), - 10 => ClientError::invalid_client_type(), - 11 => ClientError::commitment_root_not_found(), - 12 => ClientError::invalid_client_header(), - 13 => ClientError::invalid_light_client_misbehavior(), - 14 => ClientError::client_state_verification_failed(), - 15 => ClientError::client_consensus_state_verification_failed(), - 16 => ClientError::connection_state_verification_failed(), - 17 => ClientError::client_state_verification_failed(), - 18 => ClientError::packet_commitment_verification_failed(), - 19 => ClientError::packet_acknowledgement_verification_failed(), - 20 => ClientError::packet_receipt_verification_failed(), - 21 => ClientError::next_sequence_receive_verification_failed(), - 22 => ClientError::self_consensus_state_not_found(), - 23 => ClientError::update_light_client_failed(), - 24 => ClientError::invalid_update_client_proposal(), - 25 => ClientError::invalid_client_upgrade(), - 26 => ClientError::invalid_height(), - 27 => ClientError::invalid_client_state_substitute(), - 28 => ClientError::invalid_upgrade_proposal(), - 29 => ClientError::inactive_client(), - _ => ClientError::unknown_client(code), - } -} - -// Converts the error in a TxResult into SdkError with the same -// mapping as defined in ibc-go and cosmos-sdk. This assumes the -// target chain we are interacting with are using cosmos-sdk and ibc-go. -// -// TODO: investigate ways to automatically generate the mapping by parsing -// the errors.go source code directly -pub fn sdk_error_from_tx_result(result: &TxResult) -> SdkError { - match result.code { - Code::Ok => SdkError::unexpected_ok(), - Code::Err(code) => { - let codespace = result.codespace.to_string(); - if codespace == "client" { - SdkError::client(client_error_from_code(code)) - } else { - // TODO: Implement mapping for other codespaces in ibc-go - SdkError::unknown_sdk(codespace, code) - } - } - } -} - -/// Converts error codes originating from `broadcast_tx_sync` responses -/// into IBC relayer domain-type errors. -/// See [`tendermint_rpc::endpoint::broadcast::tx_sync::Response`]. -/// Cf: -pub fn sdk_error_from_tx_sync_error_code(code: u32) -> SdkError { - match code { - // The primary reason (we know of) causing broadcast_tx_sync to fail - // is due to "out of gas" errors. These are unrecoverable at the moment - // on the Hermes side. We'll inform the user to check for misconfig. - 11 => SdkError::out_of_gas(code), - 13 => SdkError::insufficient_fee(code), - _ => SdkError::unknown_tx_sync(code), - } -} diff --git a/relayer/src/spawn.rs b/relayer/src/spawn.rs deleted file mode 100644 index 4e81bbee88..0000000000 --- a/relayer/src/spawn.rs +++ /dev/null @@ -1,69 +0,0 @@ -use alloc::sync::Arc; - -use flex_error::define_error; -use tokio::runtime::Runtime as TokioRuntime; - -use ibc::core::ics24_host::identifier::ChainId; - -use crate::{ - chain::{cosmos::CosmosSdkChain, handle::ChainHandle, runtime::ChainRuntime, ChainType}, - config::Config, - error::Error as RelayerError, -}; - -#[cfg(test)] -use crate::chain::mock::MockChain; - -define_error! { - SpawnError { - Relayer - [ RelayerError ] - | _ | { "relayer error" }, - - RuntimeNotFound - | _ | { "expected runtime to be found in registry" }, - - MissingChainConfig - { chain_id: ChainId } - | e | { - format_args!("missing chain config for '{}' in configuration file", e.chain_id) - } - } -} - -impl SpawnError { - pub fn log_as_debug(&self) -> bool { - self.detail().log_as_debug() - } -} - -impl SpawnErrorDetail { - pub fn log_as_debug(&self) -> bool { - matches!(self, SpawnErrorDetail::MissingChainConfig(_)) - } -} - -/// Spawns a chain runtime from the configuration and given a chain identifier. -/// Returns the corresponding handle if successful. -pub fn spawn_chain_runtime( - config: &Config, - chain_id: &ChainId, - rt: Arc, -) -> Result { - let chain_config = config - .find_chain(chain_id) - .cloned() - .ok_or_else(|| SpawnError::missing_chain_config(chain_id.clone()))?; - - dbg!(chain_config.r#type); - - let handle = match chain_config.r#type { - ChainType::CosmosSdk => ChainRuntime::::spawn::(chain_config, rt), - - #[cfg(test)] - ChainType::Mock => ChainRuntime::::spawn::(chain_config, rt), - } - .map_err(SpawnError::relayer)?; - - Ok(handle) -} diff --git a/relayer/src/supervisor.rs b/relayer/src/supervisor.rs deleted file mode 100644 index 9e027e414e..0000000000 --- a/relayer/src/supervisor.rs +++ /dev/null @@ -1,732 +0,0 @@ -use alloc::collections::btree_map::BTreeMap as HashMap; -use alloc::sync::Arc; -use core::convert::Infallible; -use core::ops::Deref; -use core::time::Duration; -use std::sync::RwLock; - -use crossbeam_channel::{unbounded, Receiver, Sender}; -use itertools::Itertools; -use tracing::{debug, error, error_span, info, trace, warn}; - -use ibc::{ - core::ics24_host::identifier::{ChainId, ChannelId, PortId}, - events::IbcEvent, - Height, -}; - -use crate::{ - chain::{endpoint::HealthCheck, handle::ChainHandle, tracking::TrackingId}, - config::Config, - event::monitor::{self, Error as EventError, ErrorDetail as EventErrorDetail, EventBatch}, - object::Object, - registry::{Registry, SharedRegistry}, - rest, - supervisor::scan::ScanMode, - telemetry, - util::{ - lock::LockExt, - task::{spawn_background_task, Next, TaskError, TaskHandle}, - }, - worker::WorkerMap, -}; - -pub mod client_state_filter; -use client_state_filter::{FilterPolicy, Permission}; - -pub mod error; -pub use error::{Error, ErrorDetail}; - -pub mod dump_state; -use dump_state::SupervisorState; - -pub mod scan; -pub mod spawn; - -pub mod cmd; -use cmd::SupervisorCmd; - -use self::{scan::ChainScanner, spawn::SpawnContext}; - -type ArcBatch = Arc>; -type Subscription = Receiver; - -/** - A wrapper around the SupervisorCmd sender so that we can - send stop signal to the supervisor before stopping the - chain drivers to prevent the supervisor from raising - errors caused by closed connections. -*/ -pub struct SupervisorHandle { - pub sender: Sender, - tasks: Vec, -} - -/// Options for the supervisor -#[derive(Debug)] -pub struct SupervisorOptions { - /// Perform a health check of all chains we connect to - pub health_check: bool, - - /// Force a full scan of the chains for clients, connections, and channels, - /// even when an allow list is configured for a chain and the full scan could - /// be omitted. - pub force_full_scan: bool, -} - -/** - Spawn a supervisor for testing purpose using the provided - [`Config`] and [`SharedRegistry`]. Returns a - [`SupervisorHandle`] that stops the supervisor when the - value is dropped. -*/ -pub fn spawn_supervisor( - config: Config, - registry: SharedRegistry, - rest_rx: Option, - options: SupervisorOptions, -) -> Result { - let (sender, receiver) = unbounded(); - - let tasks = spawn_supervisor_tasks(config, registry, rest_rx, receiver, options)?; - - Ok(SupervisorHandle { sender, tasks }) -} - -impl SupervisorHandle { - /** - Explicitly stop the running supervisor. This is useful in tests where - the supervisor has to be stopped and restarted explicitly. - - Note that after stopping the supervisor, the only way to restart it - is by respawning a new supervisor using [`spawn_supervisor`]. - */ - pub fn shutdown(self) { - for task in self.tasks { - // Send the shutdown signals in parallel - task.shutdown(); - } - // Dropping the tasks will cause this to block until all tasks - // are terminated. - } - - pub fn wait(self) { - for task in self.tasks { - task.join(); - } - } -} - -pub fn spawn_supervisor_tasks( - config: Config, - registry: SharedRegistry, - rest_rx: Option, - cmd_rx: Receiver, - options: SupervisorOptions, -) -> Result, Error> { - if options.health_check { - health_check(&config, &mut registry.write()); - } - - let workers = Arc::new(RwLock::new(WorkerMap::new())); - let client_state_filter = Arc::new(RwLock::new(FilterPolicy::default())); - - let scan = chain_scanner( - &config, - &mut registry.write(), - &mut client_state_filter.acquire_write(), - if options.force_full_scan { - ScanMode::Full - } else { - ScanMode::Auto - }, - ) - .scan_chains(); - - info!("Scanned chains:"); - info!("{}", scan); - - spawn_context(&config, &mut registry.write(), &mut workers.acquire_write()).spawn_workers(scan); - - let subscriptions = init_subscriptions(&config, &mut registry.write())?; - - let batch_tasks = spawn_batch_workers( - &config, - registry.clone(), - client_state_filter, - workers.clone(), - subscriptions, - ); - - let cmd_task = spawn_cmd_worker(registry.clone(), workers.clone(), cmd_rx); - - let mut tasks = vec![cmd_task]; - tasks.extend(batch_tasks); - - if let Some(rest_rx) = rest_rx { - let rest_task = spawn_rest_worker(config, registry, workers, rest_rx); - tasks.push(rest_task); - } - - Ok(tasks) -} - -fn spawn_batch_workers( - config: &Config, - registry: SharedRegistry, - client_state_filter: Arc>, - workers: Arc>, - subscriptions: Vec<(Chain, Subscription)>, -) -> Vec { - let mut handles = Vec::with_capacity(subscriptions.len()); - - for (chain, subscription) in subscriptions { - let config = config.clone(); - let registry = registry.clone(); - let client_state_filter = client_state_filter.clone(); - let workers = workers.clone(); - - let handle = spawn_background_task( - tracing::Span::none(), - Some(Duration::from_millis(5)), - move || -> Result> { - if let Ok(batch) = subscription.try_recv() { - handle_batch( - &config, - &mut registry.write(), - &mut client_state_filter.acquire_write(), - &mut workers.acquire_write(), - chain.clone(), - batch, - ); - } - - Ok(Next::Continue) - }, - ); - - handles.push(handle); - } - - handles -} - -pub fn spawn_cmd_worker( - registry: SharedRegistry, - workers: Arc>, - cmd_rx: Receiver, -) -> TaskHandle { - spawn_background_task( - error_span!("cmd"), - Some(Duration::from_millis(500)), - move || -> Result> { - if let Ok(cmd) = cmd_rx.try_recv() { - match cmd { - SupervisorCmd::DumpState(reply_to) => { - dump_state(®istry.read(), &workers.acquire_read(), reply_to); - } - } - } - - Ok(Next::Continue) - }, - ) -} - -pub fn spawn_rest_worker( - config: Config, - registry: SharedRegistry, - workers: Arc>, - rest_rx: rest::Receiver, -) -> TaskHandle { - spawn_background_task( - error_span!("rest"), - Some(Duration::from_millis(500)), - move || -> Result> { - handle_rest_requests(&config, ®istry.read(), &workers.acquire_read(), &rest_rx); - - Ok(Next::Continue) - }, - ) -} - -/// Returns `true` if the relayer should filter based on -/// client state attributes, e.g., trust threshold. -/// Returns `false` otherwise. -fn client_filter_enabled(_config: &Config) -> bool { - // we currently always enable the client filter - true -} - -/// Returns `true` if the relayer should filter based on -/// channel identifiers. -/// Returns `false` otherwise. -fn channel_filter_enabled(_config: &Config) -> bool { - // we currently always enable the channel filter - true -} - -/// Whether or not the given channel is allowed by the filter policy, if any. -fn is_channel_allowed( - config: &Config, - chain_id: &ChainId, - port_id: &PortId, - channel_id: &ChannelId, -) -> bool { - // If filtering is disabled, then relay all channels - if !channel_filter_enabled(config) { - return true; - } - - config.packets_on_channel_allowed(chain_id, port_id, channel_id) -} - -/// Whether or not the relayer should relay packets -/// or complete handshakes for the given [`Object`]. -fn relay_on_object( - config: &Config, - registry: &mut Registry, - client_state_filter: &mut FilterPolicy, - chain_id: &ChainId, - object: &Object, -) -> bool { - // No filter is enabled, bail fast. - if !channel_filter_enabled(config) && !client_filter_enabled(config) { - return true; - } - - // First, apply the channel filter on packets and channel workers - match object { - Object::Packet(p) => { - if !is_channel_allowed(config, chain_id, &p.src_port_id, &p.src_channel_id) { - // Forbid relaying packets on that channel - return false; - } - } - Object::Channel(c) => { - if !is_channel_allowed(config, chain_id, &c.src_port_id, &c.src_channel_id) { - // Forbid completing handshake for that channel - return false; - } - } - _ => (), - }; - - // Then, apply the client filter - let client_filter_outcome = match object { - Object::Client(client) => client_state_filter.control_client_object(registry, client), - Object::Connection(conn) => client_state_filter.control_conn_object(registry, conn), - Object::Channel(chan) => client_state_filter.control_chan_object(registry, chan), - Object::Packet(packet) => client_state_filter.control_packet_object(registry, packet), - Object::Wallet(_wallet) => Ok(Permission::Allow), - }; - - match client_filter_outcome { - Ok(Permission::Allow) => true, - Ok(Permission::Deny) => { - warn!( - "client filter denies relaying on object {}", - object.short_name() - ); - - false - } - Err(e) if e.log_as_debug() => { - debug!( - "denying relaying on object {}, caused by: {}", - object.short_name(), - e - ); - - false - } - Err(e) => { - warn!( - "denying relaying on object {}, caused by: {}", - object.short_name(), - e - ); - - false - } - } -} - -/// If `enabled`, build an `Object` using the provided `object_ctor` -/// and add the given `event` to the `collected` events for this `object`. -fn collect_event( - collected: &mut CollectedEvents, - event: &IbcEvent, - enabled: bool, - object_ctor: F, -) where - F: FnOnce() -> Option, -{ - if enabled { - if let Some(object) = object_ctor() { - collected - .per_object - .entry(object) - .or_default() - .push(event.clone()); - } - } -} - -pub fn collect_events( - config: &Config, - workers: &WorkerMap, - src_chain: &impl ChainHandle, - batch: &EventBatch, -) -> CollectedEvents { - let mut collected = - CollectedEvents::new(batch.height, batch.chain_id.clone(), batch.tracking_id); - - let mode = config.mode; - - for event in &batch.events { - match event { - IbcEvent::NewBlock(_) => { - collected.new_block = Some(event.clone()); - } - IbcEvent::UpdateClient(ref update) => { - collect_event(&mut collected, event, mode.clients.enabled, || { - // Collect update client events only if the worker exists - if let Ok(object) = Object::for_update_client(update, src_chain) { - workers.contains(&object).then(|| object) - } else { - None - } - }); - } - IbcEvent::OpenInitConnection(..) - | IbcEvent::OpenTryConnection(..) - | IbcEvent::OpenAckConnection(..) => { - collect_event(&mut collected, event, mode.connections.enabled, || { - event.connection_attributes().and_then(|attr| { - Object::connection_from_conn_open_events(attr, src_chain).ok() - }) - }); - } - IbcEvent::OpenInitChannel(..) | IbcEvent::OpenTryChannel(..) => { - collect_event(&mut collected, event, mode.channels.enabled, || { - event.clone().channel_attributes().and_then(|attr| { - Object::channel_from_chan_open_events(&attr, src_chain).ok() - }) - }); - } - IbcEvent::OpenAckChannel(open_ack) => { - // Create client and packet workers here as channel end must be opened - let attributes = open_ack.clone().into(); - collect_event(&mut collected, event, mode.clients.enabled, || { - Object::client_from_chan_open_events(&attributes, src_chain).ok() - }); - - // If handshake message relaying is enabled create worker to send the MsgChannelOpenConfirm message - collect_event(&mut collected, event, mode.channels.enabled, || { - Object::channel_from_chan_open_events(&attributes, src_chain).ok() - }); - } - IbcEvent::OpenConfirmChannel(open_confirm) => { - let attributes = open_confirm.clone().into(); - // Create client worker here as channel end must be opened - collect_event(&mut collected, event, mode.clients.enabled, || { - Object::client_from_chan_open_events(&attributes, src_chain).ok() - }); - } - IbcEvent::SendPacket(ref packet) => { - collect_event(&mut collected, event, mode.packets.enabled, || { - Object::for_send_packet(packet, src_chain).ok() - }); - } - IbcEvent::TimeoutPacket(ref packet) => { - collect_event(&mut collected, event, mode.packets.enabled, || { - Object::for_timeout_packet(packet, src_chain).ok() - }); - } - IbcEvent::WriteAcknowledgement(ref packet) => { - collect_event(&mut collected, event, mode.packets.enabled, || { - Object::for_write_ack(packet, src_chain).ok() - }); - } - IbcEvent::CloseInitChannel(ref packet) => { - collect_event(&mut collected, event, mode.packets.enabled, || { - Object::for_close_init_channel(packet, src_chain).ok() - }); - } - _ => (), - } - } - - collected -} - -/// Create a new `SpawnContext` for spawning workers. -fn spawn_context<'a, Chain: ChainHandle>( - config: &'a Config, - registry: &'a mut Registry, - workers: &'a mut WorkerMap, -) -> SpawnContext<'a, Chain> { - SpawnContext::new(config, registry, workers) -} - -fn chain_scanner<'a, Chain: ChainHandle>( - config: &'a Config, - registry: &'a mut Registry, - client_state_filter: &'a mut FilterPolicy, - full_scan: ScanMode, -) -> ChainScanner<'a, Chain> { - ChainScanner::new(config, registry, client_state_filter, full_scan) -} - -/// Perform a health check on all connected chains -fn health_check(config: &Config, registry: &mut Registry) { - use HealthCheck::*; - - let chains = &config.chains; - - for config in chains { - let id = &config.id; - let chain = registry.get_or_spawn(id); - - match chain { - Ok(chain) => match chain.health_check() { - Ok(Healthy) => info!(chain = %id, "chain is healthy"), - Ok(Unhealthy(e)) => warn!(chain = %id, "chain is unhealthy: {}", e), - Err(e) => error!(chain = %id, "failed to perform health check: {}", e), - }, - Err(e) => { - error!( - chain = %id, - "skipping health check, reason: failed to spawn chain runtime with error: {}", - e - ); - } - } - } -} - -/// Subscribe to the events emitted by the chains the supervisor is connected to. -fn init_subscriptions( - config: &Config, - registry: &mut Registry, -) -> Result, Error> { - let chains = &config.chains; - - let mut subscriptions = Vec::with_capacity(chains.len()); - - for chain_config in chains { - let chain = match registry.get_or_spawn(&chain_config.id) { - Ok(chain) => chain, - Err(e) => { - error!( - "failed to spawn chain runtime for {}: {}", - chain_config.id, e - ); - - continue; - } - }; - - match chain.subscribe() { - Ok(subscription) => subscriptions.push((chain, subscription)), - Err(e) => error!( - "failed to subscribe to events of {}: {}", - chain_config.id, e - ), - } - } - - // At least one chain runtime should be available, otherwise the supervisor - // cannot do anything and will hang indefinitely. - if registry.size() == 0 { - return Err(Error::no_chains_available()); - } - - Ok(subscriptions) -} - -/// Dump the state of the supervisor into a [`SupervisorState`] value, -/// and send it back through the given channel. -fn dump_state( - registry: &Registry, - workers: &WorkerMap, - reply_to: Sender, -) { - let state = state(registry, workers); - let _ = reply_to.try_send(state); -} - -/// Returns a representation of the supervisor's internal state -/// as a [`SupervisorState`]. -fn state(registry: &Registry, workers: &WorkerMap) -> SupervisorState { - let chains = registry.chains().map(|c| c.id()).collect_vec(); - SupervisorState::new(chains, workers.handles()) -} - -fn handle_rest_requests( - config: &Config, - registry: &Registry, - workers: &WorkerMap, - rest_rx: &rest::Receiver, -) { - if let Some(cmd) = rest::process_incoming_requests(config, rest_rx) { - handle_rest_cmd(registry, workers, cmd); - } -} - -fn handle_rest_cmd( - registry: &Registry, - workers: &WorkerMap, - m: rest::Command, -) { - match m { - rest::Command::DumpState(reply) => { - let state = state(registry, workers); - reply - .send(Ok(state)) - .unwrap_or_else(|e| error!("error replying to a REST request {}", e)); - } - } -} - -fn clear_pending_packets(workers: &mut WorkerMap, chain_id: &ChainId) -> Result<(), Error> { - for worker in workers.workers_for_chain(chain_id) { - worker.clear_pending_packets(); - } - - Ok(()) -} - -/// Process a batch of events received from a chain. -fn process_batch( - config: &Config, - registry: &mut Registry, - client_state_filter: &mut FilterPolicy, - workers: &mut WorkerMap, - src_chain: Chain, - batch: &EventBatch, -) -> Result<(), Error> { - assert_eq!(src_chain.id(), batch.chain_id); - - telemetry!(received_event_batch, batch.tracking_id); - - let collected = collect_events(config, workers, &src_chain, batch); - - // If there is a NewBlock event, forward this event first to any workers affected by it. - if let Some(IbcEvent::NewBlock(new_block)) = collected.new_block { - workers.notify_new_block(&src_chain.id(), batch.height, new_block); - } - - // Forward the IBC events. - for (object, events) in collected.per_object.into_iter() { - if !relay_on_object( - config, - registry, - client_state_filter, - &src_chain.id(), - &object, - ) { - trace!( - "skipping events for '{}'. \ - reason: filtering is enabled and channel does not match any allowed channels", - object.short_name() - ); - - continue; - } - - if events.is_empty() { - continue; - } - - let src = registry - .get_or_spawn(object.src_chain_id()) - .map_err(Error::spawn)?; - - let dst = registry - .get_or_spawn(object.dst_chain_id()) - .map_err(Error::spawn)?; - - let worker = workers.get_or_spawn(object, src, dst, config); - - worker.send_events( - batch.height, - events, - batch.chain_id.clone(), - batch.tracking_id, - ); - } - - Ok(()) -} - -/// Process the given batch if it does not contain any errors, -/// output the errors on the console otherwise. -fn handle_batch( - config: &Config, - registry: &mut Registry, - client_state_filter: &mut FilterPolicy, - workers: &mut WorkerMap, - chain: Chain, - batch: ArcBatch, -) { - let chain_id = chain.id(); - - match batch.deref() { - Ok(batch) => { - if let Err(e) = - process_batch(config, registry, client_state_filter, workers, chain, batch) - { - error!("[{}] error during batch processing: {}", chain_id, e); - } - } - Err(EventError(EventErrorDetail::SubscriptionCancelled(_), _)) => { - warn!(chain.id = %chain_id, "event subscription was cancelled, clearing pending packets"); - - let _ = clear_pending_packets(workers, &chain_id).map_err(|e| { - error!( - "[{}] error during clearing pending packets: {}", - chain_id, e - ) - }); - } - Err(e) => { - error!("[{}] error in receiving event batch: {}", chain_id, e) - } - } -} - -/// Describes the result of [`collect_events`]. -#[derive(Clone, Debug)] -pub struct CollectedEvents { - /// The height at which these events were emitted from the chain. - pub height: Height, - /// The chain from which the events were emitted. - pub chain_id: ChainId, - /// [`NewBlock`](ibc::events::IbcEventType::NewBlock) event - /// collected from the [`EventBatch`]. - pub new_block: Option, - /// Mapping between [`Object`]s and their associated [`IbcEvent`]s. - pub per_object: HashMap>, - /// Unique identifier for tracking this event batch - pub tracking_id: TrackingId, -} - -impl CollectedEvents { - pub fn new(height: Height, chain_id: ChainId, tracking_id: TrackingId) -> Self { - Self { - height, - chain_id, - tracking_id, - new_block: Default::default(), - per_object: Default::default(), - } - } - - /// Whether the collected events include a - /// [`NewBlock`](ibc::events::IbcEventType::NewBlock) event. - pub fn has_new_block(&self) -> bool { - self.new_block.is_some() - } -} diff --git a/relayer/src/supervisor/client_state_filter.rs b/relayer/src/supervisor/client_state_filter.rs deleted file mode 100644 index dfaa6419e8..0000000000 --- a/relayer/src/supervisor/client_state_filter.rs +++ /dev/null @@ -1,410 +0,0 @@ -use alloc::collections::BTreeMap as HashMap; - -use flex_error::define_error; -use tracing::{debug, trace}; - -use ibc::core::ics02_client::client_state::{AnyClientState, ClientState}; -use ibc::core::ics02_client::trust_threshold::TrustThreshold; -use ibc::core::ics03_connection::connection::ConnectionEnd; -use ibc::core::ics04_channel::error::Error as ChannelError; -use ibc::core::ics24_host::identifier::{ChainId, ChannelId, ClientId, ConnectionId, PortId}; -use ibc::Height; - -use crate::chain::handle::ChainHandle; -use crate::chain::requests::{ - IncludeProof, QueryChannelRequest, QueryClientStateRequest, QueryConnectionRequest, -}; -use crate::error::Error as RelayerError; -use crate::object; -use crate::registry::Registry; -use crate::spawn::SpawnError; - -#[derive(Copy, Clone, Debug, PartialEq, Eq)] -pub enum Permission { - Allow, - Deny, -} - -impl Permission { - fn and(self, other: &Self) -> Self { - if matches!(self, Self::Allow) && matches!(other, Self::Allow) { - Self::Allow - } else { - Self::Deny - } - } -} - -#[derive(Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)] -enum CacheKey { - Client(ChainId, ClientId), - Channel(ChainId, PortId, ChannelId), - Connection(ChainId, ConnectionId), -} - -define_error! { - FilterError { - Spawn - [ SpawnError ] - | _ | { "spawn error" }, - - Relayer - [ RelayerError ] - | _ | { "relayer error" }, - - Channel - [ ChannelError ] - | _ | { "channel error" }, - - } -} - -impl FilterError { - pub fn log_as_debug(&self) -> bool { - matches!(self.detail(), FilterErrorDetail::Spawn(e) if e.source.log_as_debug()) - } -} - -/// A cache storing filtering status (allow or deny) for -/// arbitrary identifiers. -#[derive(Default, Debug)] -pub struct FilterPolicy { - /// A cache associating a generic identifying key, such as - /// client id, channel id, or connection id, with an - /// [`Allow`](Permission::Allow) status. - permission_cache: HashMap, -} - -impl FilterPolicy { - /// Given a connection end and the underlying client for that - /// connection, controls both the client as well as the - /// client on the counterparty chain. - /// Returns `true` if both clients are allowed, `false` otherwise. - /// Caches the result for both clients as well as the connection. - /// - /// May encounter errors caused by failed queries. Any such error - /// is propagated and nothing is cached. - pub fn control_connection_end_and_client( - &mut self, - registry: &mut Registry, - chain_id: &ChainId, // Chain hosting the client & connection - client_state: &AnyClientState, - connection: &ConnectionEnd, - connection_id: &ConnectionId, - ) -> Result { - let identifier = CacheKey::Connection(chain_id.clone(), connection_id.clone()); - - trace!( - "[client filter] controlling permissions for {:?}", - identifier - ); - - // Return if cache hit - if let Some(p) = self.permission_cache.get(&identifier) { - trace!("[client filter] cache hit {:?} for {:?}", p, identifier); - return Ok(*p); - } - - // Fetch the details of the client on counterparty chain. - let counterparty_chain_id = client_state.chain_id(); - let counterparty_chain = registry - .get_or_spawn(&counterparty_chain_id) - .map_err(FilterError::spawn)?; - let counterparty_client_id = connection.counterparty().client_id(); - let (counterparty_client_state, _) = { - counterparty_chain - .query_client_state( - QueryClientStateRequest { - client_id: counterparty_client_id.clone(), - height: Height::zero(), - }, - IncludeProof::No, - ) - .map_err(FilterError::relayer)? - }; - - // Control both clients, cache their results. - let client_permission = self.control_client(chain_id, connection.client_id(), client_state); - let counterparty_client_permission = self.control_client( - &counterparty_chain_id, - counterparty_client_id, - &counterparty_client_state, - ); - let permission = client_permission.and(&counterparty_client_permission); - - debug!( - "[client filter] {:?}: relay for conn {:?}", - permission, identifier, - ); - // Save the connection id in the cache - self.permission_cache - .entry(identifier) - .or_insert(permission); - - Ok(permission) - } - - /// Given a client identifier and its corresponding client state, - /// controls the client state and decides if the client should - /// be allowed or not. - /// Returns `true` if client is allowed, `false` otherwise. - /// Caches the result. - pub fn control_client( - &mut self, - host_chain: &ChainId, - client_id: &ClientId, - state: &AnyClientState, - ) -> Permission { - let identifier = CacheKey::Client(host_chain.clone(), client_id.clone()); - - trace!( - "[client filter] controlling permissions for {:?}", - identifier - ); - - // Return if cache hit - if let Some(p) = self.permission_cache.get(&identifier) { - trace!("[client filter] cache hit {:?} for {:?}", p, identifier); - return *p; - } - - let permission = match state.trust_threshold() { - Some(trust) if trust == TrustThreshold::ONE_THIRD => Permission::Allow, - Some(_) => { - trace!( - "[client filter] client {} on chain {} has a trust threshold different than 1/3", - client_id, host_chain - ); - - Permission::Deny - } - None => { - trace!( - "[client filter] client {} on chain {} does not have a trust threshold set", - client_id, - host_chain - ); - - Permission::Deny - } - }; - - debug!( - "[client filter] {:?}: relay for client {:?}", - permission, identifier - ); - - self.permission_cache - .entry(identifier) - .or_insert(permission); - - permission - } - - pub fn control_client_object( - &mut self, - registry: &mut Registry, - obj: &object::Client, - ) -> Result { - let identifier = CacheKey::Client(obj.dst_chain_id.clone(), obj.dst_client_id.clone()); - - trace!( - "[client filter] controlling permissions for {:?}", - identifier - ); - - // Return if cache hit - if let Some(p) = self.permission_cache.get(&identifier) { - trace!("[client filter] cache hit {:?} for {:?}", p, identifier); - return Ok(*p); - } - - let chain = registry - .get_or_spawn(&obj.dst_chain_id) - .map_err(FilterError::spawn)?; - - trace!( - "[client filter] deciding if to relay on {:?} hosted chain {}", - obj.dst_client_id, - obj.dst_chain_id - ); - - let (client_state, _) = chain - .query_client_state( - QueryClientStateRequest { - client_id: obj.dst_client_id.clone(), - height: Height::zero(), - }, - IncludeProof::No, - ) - .map_err(FilterError::relayer)?; - - Ok(self.control_client(&obj.dst_chain_id, &obj.dst_client_id, &client_state)) - } - - pub fn control_conn_object( - &mut self, - registry: &mut Registry, - obj: &object::Connection, - ) -> Result { - let identifier = - CacheKey::Connection(obj.src_chain_id.clone(), obj.src_connection_id.clone()); - - trace!( - "[client filter] controlling permissions for {:?}", - identifier - ); - - // Return if cache hit - if let Some(p) = self.permission_cache.get(&identifier) { - trace!("[client filter] cache hit {:?} for {:?}", p, identifier); - return Ok(*p); - } - - let src_chain = registry - .get_or_spawn(&obj.src_chain_id) - .map_err(FilterError::spawn)?; - - trace!( - "[client filter] deciding if to relay on {:?} hosted on chain {}", - obj, - obj.src_chain_id - ); - - let (connection_end, _) = src_chain - .query_connection( - QueryConnectionRequest { - connection_id: obj.src_connection_id.clone(), - height: Height::zero(), - }, - IncludeProof::No, - ) - .map_err(FilterError::relayer)?; - - let (client_state, _) = src_chain - .query_client_state( - QueryClientStateRequest { - client_id: connection_end.client_id().clone(), - height: Height::zero(), - }, - IncludeProof::No, - ) - .map_err(FilterError::relayer)?; - - self.control_connection_end_and_client( - registry, - &obj.src_chain_id, - &client_state, - &connection_end, - &obj.src_connection_id, - ) - } - - fn control_channel( - &mut self, - registry: &mut Registry, - chain_id: &ChainId, - port_id: &PortId, - channel_id: &ChannelId, - ) -> Result { - let identifier = CacheKey::Channel(chain_id.clone(), port_id.clone(), *channel_id); - - trace!( - "[client filter] controlling permissions for {:?}", - identifier - ); - - // Return if cache hit - if let Some(p) = self.permission_cache.get(&identifier) { - trace!("[client filter] cache hit {:?} for {:?}", p, identifier); - return Ok(*p); - } - - let src_chain = registry - .get_or_spawn(chain_id) - .map_err(FilterError::spawn)?; - - let (channel_end, _) = src_chain - .query_channel( - QueryChannelRequest { - port_id: port_id.clone(), - channel_id: *channel_id, - height: Height::zero(), - }, - IncludeProof::No, - ) - .map_err(FilterError::relayer)?; - - let conn_id = channel_end.connection_hops.first().ok_or_else(|| { - FilterError::channel(ChannelError::invalid_connection_hops_length( - 1, - channel_end.connection_hops().len(), - )) - })?; - - let (connection_end, _) = src_chain - .query_connection( - QueryConnectionRequest { - connection_id: conn_id.clone(), - height: Height::zero(), - }, - IncludeProof::No, - ) - .map_err(FilterError::relayer)?; - - let (client_state, _) = src_chain - .query_client_state( - QueryClientStateRequest { - client_id: connection_end.client_id().clone(), - height: Height::zero(), - }, - IncludeProof::No, - ) - .map_err(FilterError::relayer)?; - - let permission = self.control_connection_end_and_client( - registry, - chain_id, - &client_state, - &connection_end, - conn_id, - )?; - - let key = CacheKey::Channel(chain_id.clone(), port_id.clone(), *channel_id); - - debug!( - "[client filter] {:?}: relay for channel {:?}: ", - permission, key - ); - - self.permission_cache.entry(key).or_insert(permission); - - Ok(permission) - } - - pub fn control_chan_object( - &mut self, - registry: &mut Registry, - obj: &object::Channel, - ) -> Result { - self.control_channel( - registry, - &obj.src_chain_id, - &obj.src_port_id, - &obj.src_channel_id, - ) - } - - pub fn control_packet_object( - &mut self, - registry: &mut Registry, - obj: &object::Packet, - ) -> Result { - self.control_channel( - registry, - &obj.src_chain_id, - &obj.src_port_id, - &obj.src_channel_id, - ) - } -} diff --git a/relayer/src/supervisor/cmd.rs b/relayer/src/supervisor/cmd.rs deleted file mode 100644 index 00cae44f6d..0000000000 --- a/relayer/src/supervisor/cmd.rs +++ /dev/null @@ -1,8 +0,0 @@ -use crossbeam_channel::Sender; - -use super::dump_state::SupervisorState; - -#[derive(Clone, Debug)] -pub enum SupervisorCmd { - DumpState(Sender), -} diff --git a/relayer/src/supervisor/dump_state.rs b/relayer/src/supervisor/dump_state.rs deleted file mode 100644 index 2c9e7168d4..0000000000 --- a/relayer/src/supervisor/dump_state.rs +++ /dev/null @@ -1,77 +0,0 @@ -use alloc::collections::BTreeMap; -use alloc::fmt; - -use ibc::core::ics24_host::identifier::ChainId; -use itertools::Itertools; -use serde::{Deserialize, Serialize}; -use tracing::info; - -use crate::{ - object::{Object, ObjectType}, - worker::{WorkerData, WorkerHandle, WorkerId}, -}; - -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct WorkerDesc { - pub id: WorkerId, - pub object: Object, - pub data: Option, -} - -impl WorkerDesc { - pub fn new(id: WorkerId, object: Object, data: Option) -> Self { - Self { id, object, data } - } -} - -#[derive(Clone, Debug, Default, Serialize, Deserialize)] -pub struct SupervisorState { - pub chains: Vec, - pub workers: BTreeMap>, -} - -impl SupervisorState { - pub fn new<'a>( - mut chains: Vec, - workers: impl Iterator, - ) -> Self { - chains.sort(); - - let workers = workers - .map(|h| WorkerDesc::new(h.id(), h.object().clone(), h.data().cloned())) - .into_group_map_by(|desc| desc.object.object_type()) - .into_iter() - .update(|(_, os)| os.sort_by_key(|desc| desc.object.short_name())) - .collect::>(); - - Self { chains, workers } - } - - pub fn print_info(&self) { - self.to_string() - .split('\n') - .for_each(|line| info!("{}", line)); - } -} - -impl fmt::Display for SupervisorState { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - writeln!(f)?; - writeln!(f, "* Chains: {}", self.chains.iter().join(", "))?; - for (tpe, objects) in &self.workers { - writeln!(f, "* {tpe:?} workers:")?; - for desc in objects { - writeln!(f, " - {} (id: {})", desc.object.short_name(), desc.id)?; - if let Some(WorkerData::Client { - misbehaviour, - refresh, - }) = desc.data - { - writeln!(f, " | misbehaviour: {misbehaviour}, refresh: {refresh}")?; - } - } - } - - Ok(()) - } -} diff --git a/relayer/src/supervisor/error.rs b/relayer/src/supervisor/error.rs deleted file mode 100644 index b517aaef80..0000000000 --- a/relayer/src/supervisor/error.rs +++ /dev/null @@ -1,79 +0,0 @@ -use flex_error::define_error; - -use ibc::core::ics03_connection::connection::Counterparty; -use ibc::core::ics24_host::identifier::{ChainId, ChannelId, ConnectionId, PortId}; - -use crate::error::Error as RelayerError; -use crate::spawn::SpawnError; -use crate::supervisor::scan::Error as ScanError; - -define_error! { - Error { - ChannelUninitialized - { - port_id: PortId, - channel_id: ChannelId, - chain_id: ChainId, - } - |e| { - format_args!("channel {0}/{1} on chain {2} is not open", - e.port_id, e.channel_id, e.chain_id) - }, - - ChannelConnectionUninitialized - { - channel_id: ChannelId, - chain_id: ChainId, - counterparty: Counterparty - } - |e| { - format_args!("channel {} on chain {} has a connection with uninitialized counterparty {:?}", - e.channel_id, e.chain_id, e.counterparty) - }, - - ConnectionNotOpen - { - connection_id: ConnectionId, - channel_id: ChannelId, - chain_id: ChainId, - } - |e| { - format_args!("connection {0} (underlying channel {1}) on chain {2} is not open", - e.connection_id, e.channel_id, e.chain_id) - }, - - MissingConnectionHops - { - channel_id: ChannelId, - chain_id: ChainId, - } - |e| { - format_args!("channel {0} on chain {1} has no connection hops specified", - e.channel_id, e.chain_id) - }, - - MissingCounterpartyChannelId - |_| { "failed due to missing counterparty channel id" }, - - Relayer - [ RelayerError ] - |_| { "relayer error" }, - - NoChainsAvailable - |_| { "supervisor was not able to connect to any chains" }, - - Spawn - [ SpawnError ] - |_| { "supervisor was not able to spawn chain runtime" }, - - Scan - [ ScanError ] - |_| { "supervisor encountered an error when scanning chains" }, - } -} - -impl Error { - pub fn log_as_debug(&self) -> bool { - matches!(self.detail(), ErrorDetail::Spawn(e) if e.source.log_as_debug()) - } -} diff --git a/relayer/src/supervisor/scan.rs b/relayer/src/supervisor/scan.rs deleted file mode 100644 index 4e5301f0c6..0000000000 --- a/relayer/src/supervisor/scan.rs +++ /dev/null @@ -1,816 +0,0 @@ -use core::fmt; -use std::collections::BTreeMap; - -use itertools::Itertools; -use tracing::{debug, error, info, info_span, warn}; - -use ibc::{ - core::{ - ics02_client::client_state::{ClientState, IdentifiedAnyClientState}, - ics03_connection::connection::{IdentifiedConnectionEnd, State as ConnectionState}, - ics04_channel::{ - channel::{IdentifiedChannelEnd, State as ChannelState}, - packet::Sequence, - }, - ics24_host::identifier::{ChainId, ChannelId, ClientId, ConnectionId, PortId}, - }, - Height, -}; - -use crate::{ - chain::{ - counterparty::{channel_on_destination, connection_state_on_destination}, - handle::ChainHandle, - requests::{ - IncludeProof, PageRequest, QueryChannelRequest, QueryClientConnectionsRequest, - QueryClientStateRequest, QueryClientStatesRequest, QueryConnectionChannelsRequest, - QueryConnectionRequest, - }, - }, - config::{filter::ChannelFilters, ChainConfig, Config, PacketFilter}, - registry::Registry, - supervisor::client_state_filter::{FilterPolicy, Permission}, -}; - -use crate::chain::counterparty::{unreceived_acknowledgements, unreceived_packets}; - -use crate::error::Error as RelayerError; -use crate::spawn::SpawnError; - -flex_error::define_error! { - Error { - Spawn - [ SpawnError ] - |_| { "spawn" }, - - Query - [ RelayerError ] - |_| { "query" }, - - MissingConnectionHop - { - port_id: PortId, - channel_id: ChannelId, - chain_id: ChainId, - } - |e| { - format_args!( - "could not retrieve the connection hop underlying port/channel {}/{} on chain '{}'", - e.port_id, e.channel_id, e.chain_id - ) - }, - - UninitializedChannel - { - port_id: PortId, - channel_id: ChannelId, - chain_id: ChainId, - } - |e| { - format_args!( - "channel '{}/{}' on chain '{}' is uninitialized", - e.port_id, e.channel_id, e.chain_id - ) - }, - - CounterpartyConnectionState - { - connection_id: ConnectionId, - counterparty_chain_id: ChainId, - reason: String, - } - |e| { - format_args!( - "failed to query counterparty connection state of connection '{}' on counterparty chain '{}', reason: {}", - e.connection_id, e.counterparty_chain_id, e.reason - ) - } - } -} - -#[derive(Debug)] -pub struct ChainsScan { - pub chains: Vec>, -} - -impl fmt::Display for ChainsScan { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - for scan in self.chains.iter().flatten() { - writeln!(f, "# Chain: {}", scan.chain_id)?; - - for client in scan.clients.values() { - writeln!(f, " - Client: {}", client.client.client_id)?; - - for conn in client.connections.values() { - let counterparty = conn - .counterparty_state - .as_ref() - .map(|s| s.to_string()) - .unwrap_or_else(|| "".to_string()); - - writeln!(f, " * Connection: {}", conn.connection.connection_id)?; - writeln!(f, " | State: {}", conn.state())?; - writeln!(f, " | Counterparty state: {}", counterparty)?; - - for chan in conn.channels.values() { - let counterparty = chan - .counterparty - .as_ref() - .map(|c| c.channel_id.to_string()) - .unwrap_or_else(|| "".to_string()); - - writeln!(f, " + Channel: {}", chan.channel.channel_id)?; - writeln!(f, " | Port: {}", chan.channel.port_id)?; - writeln!(f, " | State: {}", chan.channel.channel_end.state())?; - writeln!(f, " | Counterparty: {}", counterparty)?; - } - } - } - } - - Ok(()) - } -} - -#[derive(Clone, Debug)] -pub struct ChainScan { - pub chain_id: ChainId, - pub clients: BTreeMap, -} - -impl ChainScan { - fn new(chain_id: ChainId) -> ChainScan { - Self { - chain_id, - clients: BTreeMap::new(), - } - } -} - -#[derive(Clone, Debug)] -pub struct ClientScan { - pub client: IdentifiedAnyClientState, - pub connections: BTreeMap, -} - -impl ClientScan { - fn new(client: IdentifiedAnyClientState) -> ClientScan { - Self { - client, - connections: BTreeMap::new(), - } - } - - pub fn id(&self) -> &ClientId { - &self.client.client_id - } - - pub fn counterparty_chain_id(&self) -> ChainId { - self.client.client_state.chain_id() - } -} - -#[derive(Clone, Debug)] -pub struct ConnectionScan { - pub connection: IdentifiedConnectionEnd, - pub counterparty_state: Option, - pub channels: BTreeMap, -} - -impl ConnectionScan { - pub fn new( - connection: IdentifiedConnectionEnd, - counterparty_state: Option, - ) -> Self { - Self { - connection, - counterparty_state, - channels: BTreeMap::new(), - } - } - - pub fn id(&self) -> &ConnectionId { - &self.connection.connection_id - } - - pub fn state(&self) -> ConnectionState { - self.connection.connection_end.state - } - - pub fn is_open(&self) -> bool { - self.connection.connection_end.is_open() - } -} - -#[derive(Clone, Debug)] -pub struct ChannelScan { - pub channel: IdentifiedChannelEnd, - pub counterparty: Option, -} - -impl ChannelScan { - pub fn new(channel: IdentifiedChannelEnd, counterparty: Option) -> Self { - Self { - channel, - counterparty, - } - } - - pub fn id(&self) -> &ChannelId { - &self.channel.channel_id - } - - pub fn unreceived_packets_on_counterparty( - &self, - chain: &impl ChainHandle, - counterparty_chain: &impl ChainHandle, - ) -> Option> { - self.counterparty.as_ref().map(|counterparty| { - unreceived_packets(counterparty_chain, chain, &counterparty.into()) - .map(|(seq, _)| seq) - .unwrap_or_default() - }) - } - - pub fn unreceived_acknowledgements_on_counterparty( - &self, - chain: &impl ChainHandle, - counterparty_chain: &impl ChainHandle, - ) -> Option> { - self.counterparty.as_ref().map(|counterparty| { - unreceived_acknowledgements(counterparty_chain, chain, &counterparty.into()) - .map(|(sns, _)| sns) - .unwrap_or_default() - }) - } -} - -#[derive(Copy, Clone, Debug, PartialEq, Eq)] -pub enum ScanMode { - Auto, - Full, -} - -pub struct ChainScanner<'a, Chain: ChainHandle> { - config: &'a Config, - registry: &'a mut Registry, - client_state_filter: &'a mut FilterPolicy, - scan_mode: ScanMode, -} - -impl<'a, Chain: ChainHandle> ChainScanner<'a, Chain> { - pub fn new( - config: &'a Config, - registry: &'a mut Registry, - client_state_filter: &'a mut FilterPolicy, - scan_mode: ScanMode, - ) -> Self { - Self { - config, - registry, - client_state_filter, - scan_mode, - } - } - - pub fn scan_chains(mut self) -> ChainsScan { - let mut scans = ChainsScan { - chains: Vec::with_capacity(self.config.chains.len()), - }; - - for chain in self.config.chains.clone() { - scans.chains.push(self.scan_chain(&chain)); - } - - scans - } - - pub fn scan_chain(&mut self, chain_config: &ChainConfig) -> Result { - let span = info_span!("scan.chain", chain = %chain_config.id); - let _guard = span.enter(); - - info!("scanning chain..."); - - let chain = match self.registry.get_or_spawn(&chain_config.id) { - Ok(chain_handle) => chain_handle, - Err(e) => { - error!( - "aborting scan, reason: failed to spawn chain runtime with error: {}", - e - ); - - return Err(Error::spawn(e)); - } - }; - - let mut scan = ChainScan::new(chain_config.id.clone()); - - match self.use_allow_list(chain_config) { - Some(spec) if self.scan_mode == ScanMode::Auto => { - info!( - "chain uses an allow list (without wildcards), skipping scan for fast startup" - ); - info!("allowed ports/channels: {}", spec); - - self.query_allowed_channels(&chain, spec, &mut scan)?; - } - _ => { - info!("scanning chain for all clients, connections and channels"); - self.scan_all_clients(&chain, &mut scan)?; - } - }; - - Ok(scan) - } - - pub fn query_allowed_channels( - &mut self, - chain: &Chain, - filters: &ChannelFilters, - scan: &mut ChainScan, - ) -> Result<(), Error> { - info!("querying allowed channels..."); - - for (port_id, channel_id) in filters.iter_exact() { - let result = scan_allowed_channel(self.registry, chain, port_id, channel_id); - - match result { - Ok(ScannedChannel { - channel, - counterparty_channel, - connection, - counterparty_connection_state, - client, - }) => { - let client_scan = scan - .clients - .entry(client.client_id.clone()) - .or_insert_with(|| ClientScan::new(client)); - - let connection_scan = client_scan - .connections - .entry(connection.connection_id.clone()) - .or_insert_with(|| { - ConnectionScan::new(connection, counterparty_connection_state) - }); - - connection_scan - .channels - .entry(channel.channel_id) - .or_insert_with(|| ChannelScan::new(channel, counterparty_channel)); - } - Err(e) => error!(channel = %channel_id, "failed to scan channel, reason: {}", e), - } - } - - Ok(()) - } - - pub fn scan_all_clients(&mut self, chain: &Chain, scan: &mut ChainScan) -> Result<(), Error> { - info!("scanning all clients..."); - - let clients = query_all_clients(chain)?; - - for client in clients { - if let Some(client_scan) = self.scan_client(chain, client)? { - scan.clients.insert(client_scan.id().clone(), client_scan); - } - } - - Ok(()) - } - - fn scan_client( - &mut self, - chain: &Chain, - client: IdentifiedAnyClientState, - ) -> Result, Error> { - let span = info_span!("scan.client", client = %client.client_id); - let _guard = span.enter(); - - info!("scanning client..."); - - if !self.client_allowed(chain, &client) { - warn!( - trust_threshold = ?client.client_state.trust_threshold(), - "skipping client, reason: client is not allowed", - ); - - return Ok(None); - } - - let counterparty_chain_id = client.client_state.chain_id(); - let has_counterparty = self.config.has_chain(&counterparty_chain_id); - - if !has_counterparty { - debug!( - chain = %chain.id(), - counterparty_chain = %counterparty_chain_id, - "skipping client because its counterparty is not present in the config", - ); - - return Ok(None); - } - - let client_connections_ids = query_client_connections(chain, &client.client_id)?; - - let mut scan = ClientScan::new(client); - - for connection_end in client_connections_ids { - if let Some(connection_scan) = - self.scan_connection(chain, &scan.client, connection_end)? - { - scan.connections - .insert(connection_scan.id().clone(), connection_scan); - } - } - - Ok(Some(scan)) - } - - fn scan_connection( - &mut self, - chain: &Chain, - client: &IdentifiedAnyClientState, - connection: IdentifiedConnectionEnd, - ) -> Result, Error> { - let span = info_span!("scan.connection", connection = %connection.connection_id); - let _guard = span.enter(); - - info!("scanning connection..."); - - if !self.connection_allowed(chain, client, &connection) { - warn!("skipping connection, reason: connection is not allowed",); - return Ok(None); - } - - let mut scan = ConnectionScan::new(connection, None); - - if !scan.is_open() { - warn!("connection is not open, skipping scan of channels over this connection"); - return Ok(Some(scan)); - } - - let counterparty_state = match self.counterparty_connection_state(client, &scan.connection) - { - Ok(state) if !state.eq(&ConnectionState::Open) => { - warn!("counterparty connection is not open, skipping scan of channels over this connection"); - return Ok(Some(scan)); - } - Err(e) => { - error!("error fetching counterparty connection state: {}", e); - return Ok(None); - } - Ok(state) => state, - }; - - scan.counterparty_state = Some(counterparty_state); - - let channels = match query_connection_channels(chain, scan.connection.id()) { - Ok(channels) => channels, - Err(e) => { - error!("failed to fetch connection channels: {}", e); - Vec::new() - } - }; - - let counterparty_chain = self - .registry - .get_or_spawn(&client.client_state.chain_id()) - .map_err(Error::spawn)?; - - let channels = channels - .into_iter() - .filter(|channel| self.channel_allowed(chain, channel)) - .map(|channel| { - let counterparty = - channel_on_destination(&channel, &scan.connection, &counterparty_chain) - .unwrap_or_default(); - - let scan = ChannelScan { - channel, - counterparty, - }; - - (*scan.id(), scan) - }) - .collect(); - - scan.channels = channels; - - Ok(Some(scan)) - } - - fn counterparty_connection_state( - &mut self, - client: &IdentifiedAnyClientState, - connection: &IdentifiedConnectionEnd, - ) -> Result { - let counterparty_chain = self - .registry - .get_or_spawn(&client.client_state.chain_id()) - .map_err(Error::spawn)?; - - let counterparty_state = connection_state_on_destination(connection, &counterparty_chain) - .map_err(|e| { - Error::counterparty_connection_state( - connection.connection_id.clone(), - client.client_state.chain_id(), - e.to_string(), - ) - })?; - - Ok(counterparty_state) - } - - fn filtering_enabled(&self) -> bool { - // filtering is always enabled - true - } - - fn use_allow_list<'b>(&self, chain_config: &'b ChainConfig) -> Option<&'b ChannelFilters> { - if !self.filtering_enabled() { - return None; - } - - match chain_config.packet_filter { - PacketFilter::Allow(ref filters) if filters.is_exact() => Some(filters), - _ => None, - } - } - - fn client_allowed(&mut self, chain: &Chain, client: &IdentifiedAnyClientState) -> bool { - if !self.filtering_enabled() { - return true; - }; - - let permission = self.client_state_filter.control_client( - &chain.id(), - &client.client_id, - &client.client_state, - ); - - permission == Permission::Allow - } - - fn connection_allowed( - &mut self, - chain: &Chain, - client: &IdentifiedAnyClientState, - connection: &IdentifiedConnectionEnd, - ) -> bool { - if !self.filtering_enabled() { - return true; - } - - let permission = self.client_state_filter.control_connection_end_and_client( - self.registry, - &chain.id(), - &client.client_state, - &connection.connection_end, - &connection.connection_id, - ); - - match permission { - Ok(Permission::Deny) => { - warn!( - "skipping workers for chain {}, client {} & conn {}, \ - reason: client or counterparty client is not allowed", - chain.id(), - client.client_id, - connection.connection_id - ); - - false - } - Err(e) => { - error!( - "skipping workers for chain {}, client {} & conn {}, reason: {}", - chain.id(), - client.client_id, - connection.connection_id, - e - ); - - false - } - _ => true, - } - } - - fn channel_allowed(&mut self, chain: &Chain, channel: &IdentifiedChannelEnd) -> bool { - self.config - .packets_on_channel_allowed(&chain.id(), &channel.port_id, &channel.channel_id) - } -} - -struct ScannedChannel { - channel: IdentifiedChannelEnd, - counterparty_channel: Option, - connection: IdentifiedConnectionEnd, - counterparty_connection_state: Option, - client: IdentifiedAnyClientState, -} - -fn scan_allowed_channel( - registry: &'_ mut Registry, - chain: &Chain, - port_id: &PortId, - channel_id: &ChannelId, -) -> Result { - let span = info_span!("scan.channel", port = %port_id, channel = %channel_id); - let _guard = span.enter(); - - info!("querying channel..."); - let channel = query_channel(chain, port_id, channel_id)?; - - if channel - .channel_end - .state_matches(&ChannelState::Uninitialized) - { - return Err(Error::uninitialized_channel( - port_id.clone(), - *channel_id, - chain.id(), - )); - } - - let connection = query_connection_for_channel(chain, &channel)?; - let client_id = connection.connection_end.client_id(); - - info!( - connection = %connection.connection_id, client = %client_id, - "found connection and client", - ); - - info!(client = %client_id, "querying client..."); - let client = query_client(chain, client_id)?; - - info!( - client = %client_id, - counterparty_chain = %client.client_state.chain_id(), - "found counterparty chain for client", - ); - - let counterparty_chain = registry - .get_or_spawn(&client.client_state.chain_id()) - .map_err(Error::spawn)?; - - let counterparty_channel = - channel_on_destination(&channel, &connection, &counterparty_chain).unwrap_or_default(); - - let counterparty_channel_name = counterparty_channel - .as_ref() - .map(|c| c.channel_id.to_string()) - .unwrap_or_else(|| "".to_string()); - - info!( - counterparty_channel = %counterparty_channel_name, - "found counterparty channel" - ); - - let counterparty_connection_state = - connection_state_on_destination(&connection, &counterparty_chain) - .map(Some) - .unwrap_or_default(); - - let counterparty_connection_name = counterparty_connection_state - .as_ref() - .map(|s| s.to_string()) - .unwrap_or_else(|| "".to_string()); - - info!( - counterparty_connection_state = %counterparty_connection_name, - "found counterparty connection state" - ); - - Ok(ScannedChannel { - channel, - counterparty_channel, - connection, - counterparty_connection_state, - client, - }) -} - -fn query_client( - chain: &Chain, - client_id: &ClientId, -) -> Result { - let (client, _) = chain - .query_client_state( - QueryClientStateRequest { - client_id: client_id.clone(), - height: Height::zero(), - }, - IncludeProof::No, - ) - .map_err(Error::query)?; - - Ok(IdentifiedAnyClientState::new(client_id.clone(), client)) -} - -fn query_channel( - chain: &Chain, - port_id: &PortId, - channel_id: &ChannelId, -) -> Result { - let (channel_end, _) = chain - .query_channel( - QueryChannelRequest { - port_id: port_id.clone(), - channel_id: *channel_id, - height: Height::zero(), - }, - IncludeProof::No, - ) - .map_err(Error::query)?; - - Ok(IdentifiedChannelEnd::new( - port_id.clone(), - *channel_id, - channel_end, - )) -} - -fn query_connection_for_channel( - chain: &Chain, - channel: &IdentifiedChannelEnd, -) -> Result { - let connection_id = channel - .channel_end - .connection_hops() - .first() - .cloned() - .ok_or_else(|| { - Error::missing_connection_hop(channel.port_id.clone(), channel.channel_id, chain.id()) - })?; - - query_connection(chain, &connection_id) -} - -fn query_all_clients( - chain: &Chain, -) -> Result, Error> { - let clients_req = QueryClientStatesRequest { - pagination: Some(PageRequest::all()), - }; - - chain.query_clients(clients_req).map_err(Error::query) -} - -fn query_client_connections( - chain: &Chain, - client_id: &ClientId, -) -> Result, Error> { - let ids = chain - .query_client_connections(QueryClientConnectionsRequest { - client_id: client_id.clone(), - }) - .map_err(Error::query)?; - - let connections = ids - .into_iter() - .filter_map(|id| match query_connection(chain, &id) { - Ok(connection) => Some(connection), - Err(e) => { - error!("failed to query connection: {}", e); - None - } - }) - .collect_vec(); - - Ok(connections) -} - -fn query_connection( - chain: &Chain, - connection_id: &ConnectionId, -) -> Result { - let (connection_end, _) = chain - .query_connection( - QueryConnectionRequest { - connection_id: connection_id.clone(), - height: Height::zero(), - }, - IncludeProof::No, - ) - .map_err(Error::query)?; - - Ok(IdentifiedConnectionEnd { - connection_id: connection_id.clone(), - connection_end, - }) -} - -fn query_connection_channels( - chain: &Chain, - connection_id: &ConnectionId, -) -> Result, Error> { - chain - .query_connection_channels(QueryConnectionChannelsRequest { - connection_id: connection_id.clone(), - pagination: Some(PageRequest::all()), - }) - .map_err(Error::query) -} diff --git a/relayer/src/supervisor/spawn.rs b/relayer/src/supervisor/spawn.rs deleted file mode 100644 index 469dac4b7b..0000000000 --- a/relayer/src/supervisor/spawn.rs +++ /dev/null @@ -1,325 +0,0 @@ -use tracing::{error, info}; - -use ibc::core::{ - ics02_client::client_state::{ClientState, IdentifiedAnyClientState}, - ics03_connection::connection::IdentifiedConnectionEnd, - ics04_channel::channel::State as ChannelState, - ics24_host::identifier::ChainId, -}; - -use crate::{ - chain::{counterparty::connection_state_on_destination, handle::ChainHandle}, - config::Config, - object::{Channel, Client, Connection, Object, Packet, Wallet}, - registry::Registry, - supervisor::error::Error as SupervisorError, - telemetry, - worker::WorkerMap, -}; - -use super::{ - scan::{ChainScan, ChainsScan, ChannelScan, ClientScan, ConnectionScan}, - Error, -}; - -/// A context for spawning workers within the supervisor. -pub struct SpawnContext<'a, Chain: ChainHandle> { - config: &'a Config, - registry: &'a mut Registry, - workers: &'a mut WorkerMap, -} - -impl<'a, Chain: ChainHandle> SpawnContext<'a, Chain> { - pub fn new( - config: &'a Config, - registry: &'a mut Registry, - workers: &'a mut WorkerMap, - ) -> Self { - Self { - config, - registry, - workers, - } - } - - pub fn spawn_workers(&mut self, scan: ChainsScan) { - for chain_scan in scan.chains { - match chain_scan { - Ok(chain_scan) => self.spawn_workers_for_chain(chain_scan), - Err(e) => error!("failed to spawn worker for a chain, reason: {}", e), // TODO: Show chain id - } - } - } - - pub fn spawn_workers_for_chain(&mut self, scan: ChainScan) { - let chain = match self.registry.get_or_spawn(&scan.chain_id) { - Ok(chain_handle) => chain_handle, - Err(e) => { - error!( - chain = %scan.chain_id, - "skipping workers , reason: failed to spawn chain runtime with error: {}", - e - ); - - return; - } - }; - - for (_, client_scan) in scan.clients { - self.spawn_workers_for_client(chain.clone(), client_scan); - } - - // Let's only spawn the wallet worker if telemetry is enabled, - // otherwise the worker just ends up issuing queries to the node - // without making anything of the result - telemetry!(self.spawn_wallet_worker(chain)); - } - - pub fn spawn_wallet_worker(&mut self, chain: Chain) { - let wallet_object = Object::Wallet(Wallet { - chain_id: chain.id(), - }); - - self.workers - .spawn(chain.clone(), chain, &wallet_object, self.config) - .then(|| { - info!("spawning Wallet worker: {}", wallet_object.short_name()); - }); - } - - pub fn spawn_workers_for_client(&mut self, chain: Chain, client_scan: ClientScan) { - for (_, connection_scan) in client_scan.connections { - self.spawn_workers_for_connection(chain.clone(), &client_scan.client, connection_scan); - } - } - - pub fn spawn_workers_for_connection( - &mut self, - chain: Chain, - client: &IdentifiedAnyClientState, - connection_scan: ConnectionScan, - ) { - let connection_id = connection_scan.id().clone(); - - match self.spawn_connection_workers( - chain.clone(), - client.clone(), - connection_scan.connection, - ) { - Ok(true) => info!( - chain = %chain.id(), - connection = %connection_id, - "done spawning connection workers", - ), - Ok(false) => info!( - chain = %chain.id(), - connection = %connection_id, - "no connection workers were spawn", - ), - Err(e) => error!( - chain = %chain.id(), - connection = %connection_id, - "skipped connection workers, reason: {}", - e - ), - } - - for (channel_id, channel_scan) in connection_scan.channels { - match self.spawn_workers_for_channel(chain.clone(), client, channel_scan) { - Ok(true) => info!( - chain = %chain.id(), - channel = %channel_id, - "done spawning channel workers", - ), - Ok(false) => info!( - chain = %chain.id(), - channel = %channel_id, - "no channel workers were spawned", - ), - Err(e) => error!( - chain = %chain.id(), - channel = %channel_id, - "skipped channel workers, reason: {}", - e - ), - } - } - } - - fn spawn_connection_workers( - &mut self, - chain: Chain, - client: IdentifiedAnyClientState, - connection: IdentifiedConnectionEnd, - ) -> Result { - let config_conn_enabled = self.config.mode.connections.enabled; - - let counterparty_chain = self - .registry - .get_or_spawn(&client.client_state.chain_id()) - .map_err(Error::spawn)?; - - let conn_state_src = connection.connection_end.state; - let conn_state_dst = connection_state_on_destination(&connection, &counterparty_chain)?; - - info!( - chain = %chain.id(), - connection = %connection.connection_id, - counterparty_chain = %counterparty_chain.id(), - "connection is {:?}, state on destination chain is {:?}", - conn_state_src, - conn_state_dst - ); - - if conn_state_src.is_open() && conn_state_dst.is_open() { - info!( - chain = %chain.id(), - connection = %connection.connection_id, - "connection is already open, not spawning Connection worker", - ); - - Ok(false) - } else if config_conn_enabled - && !conn_state_dst.is_open() - && conn_state_dst.less_or_equal_progress(conn_state_src) - { - // create worker for connection handshake that will advance the remote state - let connection_object = Object::Connection(Connection { - dst_chain_id: client.client_state.chain_id(), - src_chain_id: chain.id(), - src_connection_id: connection.connection_id, - }); - - self.workers - .spawn(chain, counterparty_chain, &connection_object, self.config) - .then(|| { - info!( - "spawning Connection worker: {}", - connection_object.short_name() - ); - }); - - Ok(true) - } else { - Ok(false) - } - } - - /// Spawns all the [`WorkerHandle`](crate::worker::WorkerHandle)s that will - /// handle a given channel for a given source chain. - pub fn spawn_workers_for_channel( - &mut self, - chain: Chain, - client: &IdentifiedAnyClientState, - channel_scan: ChannelScan, - ) -> Result { - let mode = &self.config.mode; - - let counterparty_chain = self - .registry - .get_or_spawn(&client.client_state.chain_id()) - .map_err(SupervisorError::spawn)?; - - let chan_state_src = channel_scan.channel.channel_end.state; - let chan_state_dst = channel_scan - .counterparty - .as_ref() - .map_or(ChannelState::Uninitialized, |c| c.channel_end.state); - - info!( - chain = %chain.id(), - counterparty_chain = %counterparty_chain.id(), - channel = %channel_scan.id(), - "channel is {}, state on destination chain is {}", - chan_state_src, - chan_state_dst - ); - - if (mode.clients.enabled || mode.packets.enabled) - && chan_state_src.is_open() - && chan_state_dst.is_open() - { - if mode.clients.enabled { - // Spawn the client worker - let client_object = Object::Client(Client { - dst_client_id: client.client_id.clone(), - dst_chain_id: chain.id(), - src_chain_id: client.client_state.chain_id(), - }); - - self.workers - .spawn( - counterparty_chain.clone(), - chain.clone(), - &client_object, - self.config, - ) - .then(|| info!("spawned client worker: {}", client_object.short_name())); - } - - if mode.packets.enabled { - let has_packets = || { - !channel_scan - .unreceived_packets_on_counterparty(&chain, &counterparty_chain) - .unwrap_or_default() - .is_empty() - }; - - let has_acks = || { - !channel_scan - .unreceived_acknowledgements_on_counterparty(&chain, &counterparty_chain) - .unwrap_or_default() - .is_empty() - }; - - // If there are any outstanding packets or acks to send, spawn the worker - if has_packets() || has_acks() { - // Create the Packet object and spawn worker - let path_object = Object::Packet(Packet { - dst_chain_id: counterparty_chain.id(), - src_chain_id: chain.id(), - src_channel_id: *channel_scan.id(), - src_port_id: channel_scan.channel.port_id.clone(), - }); - - self.workers - .spawn( - chain.clone(), - counterparty_chain.clone(), - &path_object, - self.config, - ) - .then(|| info!("spawned packet worker: {}", path_object.short_name())); - } - } - - Ok(mode.clients.enabled) - } else if mode.channels.enabled - && !chan_state_dst.is_open() - && chan_state_dst.less_or_equal_progress(chan_state_src) - { - // create worker for channel handshake that will advance the remote state - let channel_object = Object::Channel(Channel { - dst_chain_id: counterparty_chain.id(), - src_chain_id: chain.id(), - src_channel_id: *channel_scan.id(), - src_port_id: channel_scan.channel.port_id, - }); - - self.workers - .spawn(chain, counterparty_chain, &channel_object, self.config) - .then(|| info!("spawned channel worker: {}", channel_object.short_name())); - - Ok(true) - } else { - Ok(false) - } - } - - pub fn shutdown_workers_for_chain(&mut self, chain_id: &ChainId) { - let affected_workers = self.workers.objects_for_chain(chain_id); - for object in affected_workers { - self.workers.shutdown_worker(&object); - } - } -} diff --git a/relayer/src/telemetry.rs b/relayer/src/telemetry.rs deleted file mode 100644 index 79c8656e9d..0000000000 --- a/relayer/src/telemetry.rs +++ /dev/null @@ -1,47 +0,0 @@ -// If the `telemetry` feature is enabled, re-export the `ibc-telemetry` state. -#[cfg(feature = "telemetry")] -pub type Telemetry = alloc::sync::Arc; - -// Otherwise, define and export a dummy type. -#[cfg(not(feature = "telemetry"))] -#[derive(Clone, Debug)] -pub struct TelemetryDisabled; - -#[cfg(not(feature = "telemetry"))] -pub type Telemetry = TelemetryDisabled; - -/// A macro to send metric updates via a telemetry handle, -/// only if the `telemetry` feature is enabled. -/// Otherwise, it compiles to a no-op. -/// -/// The macro can be used in two ways: -/// -/// a) By passing it an arbitrary expression as its single argument. -/// -/// ```rust,ignore -/// telemetry!(ibc_telemetry::global().tx_count(chain.id(), 1)); -/// ``` -/// -/// b) In the common case where one wants to update a metric, -/// the macro accepts the metric's name, followed by its arguments. -/// -/// ```rust,ignore -/// telemetry!(tx_count, chain.id(), 1); -/// ``` -/// -#[macro_export] -macro_rules! telemetry { - ($id:ident, $($args:expr),* $(,)*) => { - #[cfg(feature = "telemetry")] - { - ::ibc_telemetry::global().$id($($args),*); - } - }; - - ($e:expr) => { - #[cfg(feature = "telemetry")] - { - $e; - } - }; -} diff --git a/relayer/src/transfer.rs b/relayer/src/transfer.rs deleted file mode 100644 index e5f2ff6c29..0000000000 --- a/relayer/src/transfer.rs +++ /dev/null @@ -1,203 +0,0 @@ -use core::time::Duration; - -use flex_error::{define_error, DetailOnly}; -use ibc::applications::transfer::error::Error as Ics20Error; -use ibc::applications::transfer::msgs::transfer::MsgTransfer; -use ibc::applications::transfer::Amount; -use ibc::core::ics24_host::identifier::{ChainId, ChannelId, PortId}; -use ibc::events::IbcEvent; -use ibc::signer::Signer; -use ibc::timestamp::{Timestamp, TimestampOverflowError}; -use ibc::tx_msg::Msg; -use ibc::Height; -use ibc_proto::cosmos::base::v1beta1::Coin; -use ibc_proto::google::protobuf::Any; - -use crate::chain::endpoint::ChainStatus; -use crate::chain::handle::ChainHandle; -use crate::chain::tracking::TrackedMsgs; -use crate::error::Error; - -define_error! { - TransferError { - Relayer - [ Error ] - |_| { "relayer error" }, - - Key - [ Error ] - |_| { "key error" }, - - Submit - { chain_id: ChainId } - [ Error ] - |e| { - format!("failed while submitting the Transfer message to chain {0}", - e.chain_id) - }, - - TimestampOverflow - [ DetailOnly ] - |_| { "timestamp overflow" }, - - TxResponse - { event: String } - |e| { - format!("tx response event consists of an error: {}", - e.event) - }, - - UnexpectedEvent - { event: IbcEvent } - |e| { - format!("internal error, expected IBCEvent::ChainError, got {:?}", - e.event) - }, - - TokenTransfer - [ Ics20Error ] - |_| { "Token transfer error" }, - - ZeroTimeout - | _ | { "packet timeout height and packet timeout timestamp cannot both be 0" }, - } -} - -#[derive(Copy, Clone)] -pub struct TransferTimeout { - pub timeout_height: Height, - pub timeout_timestamp: Timestamp, -} - -impl TransferTimeout { - /** - Construct the transfer timeout parameters from the given timeout - height offset, timeout duration, and the latest chain status - containing the latest time of the destination chain. - - The height offset and duration are optional, with zero indicating - that the packet do not get expired at the given height or time. - If both height offset and duration are zero, then the packet will - never expire. - */ - pub fn new( - timeout_height_offset: u64, - timeout_duration: Duration, - destination_chain_status: &ChainStatus, - ) -> Result { - let timeout_height = if timeout_height_offset == 0 { - Height::zero() - } else { - destination_chain_status.height.add(timeout_height_offset) - }; - - let timeout_timestamp = if timeout_duration == Duration::ZERO { - Timestamp::none() - } else { - (destination_chain_status.timestamp + timeout_duration) - .map_err(TransferError::timestamp_overflow)? - }; - - Ok(TransferTimeout { - timeout_height, - timeout_timestamp, - }) - } -} - -#[derive(Clone, Debug)] -pub struct TransferOptions { - pub packet_src_port_id: PortId, - pub packet_src_channel_id: ChannelId, - pub amount: Amount, - pub denom: String, - pub receiver: Option, - pub timeout_height_offset: u64, - pub timeout_duration: Duration, - pub number_msgs: usize, -} - -pub fn build_transfer_message( - packet_src_port_id: PortId, - packet_src_channel_id: ChannelId, - amount: Amount, - denom: String, - sender: Signer, - receiver: Signer, - timeout_height: Height, - timeout_timestamp: Timestamp, -) -> Any { - let msg = MsgTransfer { - source_port: packet_src_port_id, - source_channel: packet_src_channel_id, - token: Coin { - denom, - amount: amount.to_string(), - }, - sender, - receiver, - timeout_height, - timeout_timestamp, - }; - - msg.to_any() -} - -pub fn build_and_send_transfer_messages( - packet_src_chain: &SrcChain, // the chain whose account is debited - packet_dst_chain: &DstChain, // the chain whose account eventually gets credited - opts: &TransferOptions, -) -> Result, TransferError> { - let receiver = packet_dst_chain.get_signer().map_err(TransferError::key)?; - - let sender = packet_src_chain.get_signer().map_err(TransferError::key)?; - - let destination_chain_status = packet_dst_chain - .query_application_status() - .map_err(TransferError::relayer)?; - - let timeout = TransferTimeout::new( - opts.timeout_height_offset, - opts.timeout_duration, - &destination_chain_status, - )?; - - let msg = MsgTransfer { - source_port: opts.packet_src_port_id.clone(), - source_channel: opts.packet_src_channel_id, - token: Coin { - denom: opts.denom.clone(), - amount: opts.amount.to_string(), - }, - sender, - receiver, - timeout_height: timeout.timeout_height, - timeout_timestamp: timeout.timeout_timestamp, - }; - - let raw_msg = msg.to_any(); - let msgs = vec![raw_msg; opts.number_msgs]; - - let events = packet_src_chain - .send_messages_and_wait_commit(TrackedMsgs::new_static(msgs, "ft-transfer")) - .map_err(|e| TransferError::submit(packet_src_chain.id(), e))?; - - // Check if the chain rejected the transaction - let result = events - .iter() - .find(|event| matches!(event, IbcEvent::ChainError(_))); - - match result { - None => Ok(events), - Some(err) => { - if let IbcEvent::ChainError(err) = err { - Err(TransferError::tx_response(err.clone())) - } else { - panic!( - "internal error, expected IBCEvent::ChainError, got {:?}", - err - ) - } - } - } -} diff --git a/relayer/src/upgrade_chain.rs b/relayer/src/upgrade_chain.rs deleted file mode 100644 index 3ba4f191db..0000000000 --- a/relayer/src/upgrade_chain.rs +++ /dev/null @@ -1,176 +0,0 @@ -//! Chain upgrade plans for triggering IBC-breaking upgrades. -#![allow(deprecated)] // TODO(hu55a1n1): remove this when we don't need legacy upgrade support - -use core::time::Duration; - -use bytes::BufMut; -use flex_error::define_error; - -use tendermint::abci::transaction::Hash as TxHash; - -use ibc::clients::ics07_tendermint::client_state::UpgradeOptions; -use ibc::core::ics02_client::client_state::{AnyClientState, ClientState}; -use ibc::core::ics02_client::height::Height; -use ibc::core::ics24_host::identifier::{ChainId, ClientId}; -use ibc::downcast; -use ibc_proto::cosmos::gov::v1beta1::MsgSubmitProposal; -use ibc_proto::cosmos::upgrade::v1beta1::Plan; -use ibc_proto::google::protobuf::Any; -use ibc_proto::ibc::core::client::v1::UpgradeProposal; - -use crate::chain::handle::ChainHandle; -use crate::chain::requests::{IncludeProof, QueryClientStateRequest}; -use crate::chain::tracking::TrackedMsgs; -use crate::config::ChainConfig; -use crate::error::Error; - -define_error! { - UpgradeChainError { - Query - [ Error ] - |_| { "error during a query" }, - - Key - [ Error ] - |_| { "key error" }, - - Submit - { chain_id: ChainId } - [ Error ] - |e| { - format!("failed while submitting the Transfer message to chain {0}", e.chain_id) - }, - - TxResponse - { event: String } - |e| { - format!("tx response event consists of an error: {}", e.event) - }, - - TendermintOnly - |_| { "only Tendermint clients can be upgraded" } - } -} - -#[derive(Clone, Debug)] -pub struct UpgradePlanOptions { - pub src_chain_config: ChainConfig, - pub dst_chain_config: ChainConfig, - pub src_client_id: ClientId, - pub amount: u64, - pub denom: String, - pub height_offset: u64, - pub upgraded_chain_id: ChainId, - pub upgraded_unbonding_period: Option, - pub upgrade_plan_name: String, -} - -pub fn build_and_send_ibc_upgrade_proposal( - dst_chain: impl ChainHandle, // the chain which will undergo an upgrade - src_chain: impl ChainHandle, // the source chain; supplies a client state for building the upgrade plan - opts: &UpgradePlanOptions, -) -> Result { - let upgrade_height = dst_chain - .query_latest_height() // FIXME(romac): Use query_chain_latest_height once added to ChainHandle - .map_err(UpgradeChainError::query)? - .add(opts.height_offset); - - let (client_state, _) = src_chain - .query_client_state( - QueryClientStateRequest { - client_id: opts.src_client_id.clone(), - height: Height::zero(), - }, - IncludeProof::No, - ) - .map_err(UpgradeChainError::query)?; - - let client_state = downcast!(client_state => AnyClientState::Tendermint) - .ok_or_else(UpgradeChainError::tendermint_only)?; - - // Retain the old unbonding period in case the user did not specify a new one - let upgrade_options = UpgradeOptions { - unbonding_period: opts - .upgraded_unbonding_period - .unwrap_or(client_state.unbonding_period), - }; - - let upgraded_client_state = client_state.upgrade( - upgrade_height.increment(), - upgrade_options, - opts.upgraded_chain_id.clone(), - ); - - let proposal = UpgradeProposal { - title: "proposal 0".to_string(), - description: "upgrade the chain software and unbonding period".to_string(), - upgraded_client_state: Some(Any::from(upgraded_client_state.wrap_any())), - plan: Some(Plan { - name: opts.upgrade_plan_name.clone(), - height: upgrade_height.revision_height as i64, - info: "".to_string(), - ..Default::default() // deprecated fields - time & upgraded_client_state - }), - }; - - let proposal = Proposal::Default(proposal); - - let mut buf_proposal = Vec::new(); - proposal.encode(&mut buf_proposal); - let any_proposal = Any { - type_url: proposal.type_url(), - value: buf_proposal, - }; - - // build the msg submit proposal - let proposer = dst_chain.get_signer().map_err(UpgradeChainError::key)?; - - let coins = ibc_proto::cosmos::base::v1beta1::Coin { - denom: opts.denom.clone(), - amount: opts.amount.to_string(), - }; - - let msg = MsgSubmitProposal { - content: Some(any_proposal), - initial_deposit: vec![coins], - proposer: proposer.to_string(), - }; - - let mut buf_msg = Vec::new(); - prost::Message::encode(&msg, &mut buf_msg).unwrap(); - let any_msg = Any { - type_url: "/cosmos.gov.v1beta1.MsgSubmitProposal".to_string(), - value: buf_msg, - }; - - // Can't use send_messages_and_wait_commit because no IBC events - // corresponding to the transaction can be recognized to confirm the - // upgrade. - // https://github.com/informalsystems/ibc-rs/issues/1288#issuecomment-1066884163 - - let responses = dst_chain - .send_messages_and_wait_check_tx(TrackedMsgs::new_single(any_msg, "upgrade")) - .map_err(|e| UpgradeChainError::submit(dst_chain.id(), e))?; - - Ok(responses[0].hash) -} - -enum Proposal { - Default(UpgradeProposal), -} - -impl Proposal { - fn encode(&self, buf: &mut impl BufMut) { - match self { - Proposal::Default(p) => prost::Message::encode(p, buf), - } - .unwrap() - } - - fn type_url(&self) -> String { - match self { - Proposal::Default(_) => "/ibc.core.client.v1.UpgradeProposal", - } - .to_owned() - } -} diff --git a/relayer/src/util.rs b/relayer/src/util.rs deleted file mode 100644 index ba0b17a771..0000000000 --- a/relayer/src/util.rs +++ /dev/null @@ -1,10 +0,0 @@ -mod block_on; -pub use block_on::block_on; - -pub mod diff; -pub mod iter; -pub mod lock; -pub mod queue; -pub mod retry; -pub mod stream; -pub mod task; diff --git a/relayer/src/util/block_on.rs b/relayer/src/util/block_on.rs deleted file mode 100644 index 93459ece34..0000000000 --- a/relayer/src/util/block_on.rs +++ /dev/null @@ -1,12 +0,0 @@ -//! Utility function to execute a future synchronously - -use futures::Future; - -/// Spawns a new tokio runtime and use it to block on the given future. -pub fn block_on(future: F) -> F::Output { - tokio::runtime::Builder::new_current_thread() - .enable_all() - .build() - .unwrap() - .block_on(future) -} diff --git a/relayer/src/util/diff.rs b/relayer/src/util/diff.rs deleted file mode 100644 index baa8ec627b..0000000000 --- a/relayer/src/util/diff.rs +++ /dev/null @@ -1,98 +0,0 @@ -use alloc::collections::BTreeMap as HashMap; -use core::cmp::Ord; -use core::hash::Hash; - -/// A change between two dictionaries. -#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] -pub enum Change { - /// The entry with this key was added. - Added(K), - /// The entry with this key was updated. - Updated(K), - /// The entry with this key was removed. - Removed(K), -} - -impl Change { - /// Return the key affected by this change. - pub fn key(&self) -> &K { - match self { - Self::Added(ref k) => k, - Self::Updated(ref k) => k, - Self::Removed(ref k) => k, - } - } - - /// Return the key affected by this change. - pub fn into_key(self) -> K { - match self { - Self::Added(k) => k, - Self::Updated(k) => k, - Self::Removed(k) => k, - } - } -} - -/// Computes the set of changes between the `prev` and `next` dictionaries. -/// An entry is deemed to have been updated when it is not equal to the original -/// value according to its `Eq` instance. -/// -/// Returns a list of changes holding on to the key affected by the change. -pub fn diff<'a, K, V>(prev: &'a HashMap, next: &'a HashMap) -> Vec> -where - K: Hash + Eq + Ord, - V: Eq, -{ - gdiff(prev, next, |a, b| a == b) -} - -/// Computes the set of changes between the `prev` and `next` dictionaries. -/// An entry is deemed to have been updated when `!eq(prev_value, next_value)`. -/// -/// Returns a list of changes holding on to the key affected by the change. -pub fn gdiff<'a, K, V, F>( - prev: &'a HashMap, - next: &'a HashMap, - eq: F, -) -> Vec> -where - K: Hash + Eq + Ord, - F: Fn(&'a V, &'a V) -> bool, -{ - let mut changes = Vec::new(); - - for (key, value) in prev { - if let Some(next_value) = next.get(key) { - if !eq(value, next_value) { - changes.push(Change::Updated(key)); - } - } else { - changes.push(Change::Removed(key)); - } - } - - for key in next.keys() { - if !prev.contains_key(key) { - changes.push(Change::Added(key)); - } - } - - changes -} - -#[cfg(test)] -mod tests { - use super::*; - use Change::*; - - #[test] - fn it_works() { - let prev = vec![(1, 1), (2, 2), (3, 3), (4, 4)].into_iter().collect(); - let next = vec![(1, 11), (2, 2), (4, 44), (5, 5)].into_iter().collect(); - - let mut diff = diff(&prev, &next); - diff.sort(); - - assert_eq!(diff, vec![Added(&5), Updated(&1), Updated(&4), Removed(&3)]); - } -} diff --git a/relayer/src/util/iter.rs b/relayer/src/util/iter.rs deleted file mode 100644 index c5b28f9faa..0000000000 --- a/relayer/src/util/iter.rs +++ /dev/null @@ -1,20 +0,0 @@ -pub trait SplitResults: Iterator { - fn split_results(self) -> (Vec, Vec) - where - Self: Iterator> + Sized, - { - let mut oks = vec![]; - let mut errs = vec![]; - - for item in self { - match item { - Ok(ok) => oks.push(ok), - Err(err) => errs.push(err), - } - } - - (oks, errs) - } -} - -impl SplitResults for T where T: Iterator {} diff --git a/relayer/src/util/lock.rs b/relayer/src/util/lock.rs deleted file mode 100644 index 5fa139b9fd..0000000000 --- a/relayer/src/util/lock.rs +++ /dev/null @@ -1,36 +0,0 @@ -use std::sync::{Arc, RwLock, RwLockReadGuard, RwLockWriteGuard}; - -pub type RwArc = Arc>; - -/** - Utility methods for acquiring an `Arc>` lock without having - to assert the success acquire every time. - - The current code base panics if the lock acquire fails due to - [poisoned lock](https://doc.rust-lang.org/std/sync/struct.PoisonError.html), - as the error is non-recoverable anyway. - - Using the `LockExt` methods, we can avoid having to write `.unwrap()` - or `.expect("poisoned lock")` everywhere in the code base. -*/ -pub trait LockExt { - fn new_lock(val: T) -> Self; - - fn acquire_read(&self) -> RwLockReadGuard<'_, T>; - - fn acquire_write(&self) -> RwLockWriteGuard<'_, T>; -} - -impl LockExt for Arc> { - fn new_lock(val: T) -> Self { - Arc::new(RwLock::new(val)) - } - - fn acquire_read(&self) -> RwLockReadGuard<'_, T> { - self.read().expect("poisoned lock") - } - - fn acquire_write(&self) -> RwLockWriteGuard<'_, T> { - self.write().expect("poisoned lock") - } -} diff --git a/relayer/src/util/queue.rs b/relayer/src/util/queue.rs deleted file mode 100644 index cf273dc837..0000000000 --- a/relayer/src/util/queue.rs +++ /dev/null @@ -1,71 +0,0 @@ -use alloc::collections::VecDeque; -use std::sync::{Arc, RwLock}; - -use crate::util::lock::LockExt; - -/// A lightweight wrapper type to RefCell> so that -/// we can safely mutate it with regular reference instead of -/// mutable reference. We only expose subset of VecDeque methods -/// that does not return any inner reference, so that the RefCell -/// can never panic caused by simultaneous borrow and borrow_mut. -pub struct Queue(Arc>>); - -impl Queue { - pub fn new() -> Self { - Queue(Arc::new(RwLock::new(VecDeque::new()))) - } - - pub fn pop_front(&self) -> Option { - self.0.acquire_write().pop_front() - } - - pub fn pop_back(&self) -> Option { - self.0.acquire_write().pop_back() - } - - pub fn push_back(&self, val: T) { - self.0.acquire_write().push_back(val) - } - - pub fn push_front(&self, val: T) { - self.0.acquire_write().push_front(val) - } - - pub fn len(&self) -> usize { - self.0.acquire_read().len() - } - - pub fn is_empty(&self) -> bool { - self.0.acquire_read().is_empty() - } - - pub fn into_vec(self) -> VecDeque { - self.0.acquire_write().drain(..).collect() - } - - pub fn replace(&self, queue: VecDeque) { - *self.0.acquire_write() = queue; - } - - pub fn take(&self) -> VecDeque { - self.0.acquire_write().drain(..).collect() - } -} - -impl Queue { - pub fn clone_vec(&self) -> VecDeque { - self.0.acquire_read().clone() - } -} - -impl Default for Queue { - fn default() -> Self { - Self::new() - } -} - -impl From> for Queue { - fn from(deque: VecDeque) -> Self { - Queue(Arc::new(RwLock::new(deque))) - } -} diff --git a/relayer/src/util/retry.rs b/relayer/src/util/retry.rs deleted file mode 100644 index 86cf66c224..0000000000 --- a/relayer/src/util/retry.rs +++ /dev/null @@ -1,179 +0,0 @@ -use core::time::Duration; - -pub use retry::{ - delay::{Fibonacci, Fixed}, - retry_with_index, Error as RetryError, OperationResult as RetryResult, -}; - -pub fn retry_count(err: &RetryError) -> u64 { - match err { - RetryError::Operation { - tries, - total_delay: _, - error: _, - } => *tries, - _ => 0, - } -} - -#[derive(Copy, Clone, Debug)] -pub struct ConstantGrowth { - delay: Duration, - incr: Duration, -} - -impl ConstantGrowth { - pub const fn new(delay: Duration, incr: Duration) -> Self { - Self { delay, incr } - } - - pub fn clamp(self, max_delay: Duration, max_retries: usize) -> impl Iterator { - clamp(self, max_delay, max_retries) - } -} - -impl From for ConstantGrowth { - fn from(delay: Duration) -> Self { - Self::new(delay, Duration::from_secs(1)) - } -} - -impl Iterator for ConstantGrowth { - type Item = Duration; - - fn next(&mut self) -> Option { - let delay = self.delay; - - if let Some(next) = self.delay.checked_add(self.incr) { - self.delay = next; - } - - Some(delay) - } -} - -pub fn clamp( - strategy: impl Iterator, - max_delay: Duration, - max_retries: usize, -) -> impl Iterator { - strategy - .take(max_retries) - .map(move |delay| delay.min(max_delay)) -} - -pub fn clamp_total( - strategy: impl Iterator, - max_delay: Duration, - max_total_delay: Duration, -) -> impl Iterator { - strategy.map(move |delay| delay.min(max_delay)).scan( - Duration::from_millis(0), - move |elapsed, delay| { - let next = if *elapsed >= max_total_delay { - None - } else if (*elapsed + delay) > max_total_delay { - Some(max_total_delay - *elapsed) - } else { - Some(delay) - }; - - *elapsed += delay; - next - }, - ) -} - -#[cfg(test)] -mod tests { - use super::*; - use test_log::test; - - const CONST_STRATEGY: ConstantGrowth = - ConstantGrowth::new(Duration::from_secs(1), Duration::from_millis(500)); - - #[test] - fn const_growth_no_clamp() { - let delays = CONST_STRATEGY.take(10).collect::>(); - assert_eq!( - delays, - vec![ - Duration::from_millis(1000), - Duration::from_millis(1500), - Duration::from_millis(2000), - Duration::from_millis(2500), - Duration::from_millis(3000), - Duration::from_millis(3500), - Duration::from_millis(4000), - Duration::from_millis(4500), - Duration::from_millis(5000), - Duration::from_millis(5500) - ] - ); - } - - #[test] - fn clamped_const_growth_max_delay() { - let strategy = CONST_STRATEGY.clamp(Duration::from_secs(10), 10); - let delays = strategy.collect::>(); - assert_eq!( - delays, - vec![ - Duration::from_millis(1000), - Duration::from_millis(1500), - Duration::from_millis(2000), - Duration::from_millis(2500), - Duration::from_millis(3000), - Duration::from_millis(3500), - Duration::from_millis(4000), - Duration::from_millis(4500), - Duration::from_millis(5000), - Duration::from_millis(5500) - ] - ); - } - - #[test] - fn clamped_const_growth_max_retries() { - let strategy = CONST_STRATEGY.clamp(Duration::from_secs(10000), 5); - let delays = strategy.collect::>(); - assert_eq!( - delays, - vec![ - Duration::from_millis(1000), - Duration::from_millis(1500), - Duration::from_millis(2000), - Duration::from_millis(2500), - Duration::from_millis(3000) - ] - ); - } - - #[test] - fn clamped_total_const_growth_max_retries() { - const MAX_DELAY: Duration = Duration::from_millis(500); - const DELAY_INCR: Duration = Duration::from_millis(100); - const INITIAL_DELAY: Duration = Duration::from_millis(200); - const MAX_RETRY_DURATION: Duration = Duration::from_secs(2); - - let strategy = clamp_total( - ConstantGrowth::new(INITIAL_DELAY, DELAY_INCR), - MAX_DELAY, - MAX_RETRY_DURATION, - ); - - let delays = strategy.collect::>(); - - assert_eq!( - delays, - vec![ - Duration::from_millis(200), - Duration::from_millis(300), - Duration::from_millis(400), - Duration::from_millis(500), - Duration::from_millis(500), - Duration::from_millis(100) - ] - ); - } -} diff --git a/relayer/src/util/serde.rs b/relayer/src/util/serde.rs deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/relayer/src/util/stream.rs b/relayer/src/util/stream.rs deleted file mode 100644 index c003067260..0000000000 --- a/relayer/src/util/stream.rs +++ /dev/null @@ -1,177 +0,0 @@ -use async_stream::stream; -use core::mem; -use futures::stream::Stream; - -/// ## Example -/// -/// ```rust -/// use ibc_relayer::util::stream::try_group_while; -/// use futures::{stream, StreamExt, executor::block_on}; -/// -/// let input = stream::iter(vec![ -/// Ok(1), -/// Ok(1), -/// Ok(2), -/// Err(()), -/// Ok(2), -/// Ok(2), -/// Ok(3), -/// Ok(3), -/// Err(()), -/// ]); -/// -/// let output = try_group_while(input, |a, b| a == b).collect::>(); -/// -/// assert_eq!( -/// block_on(output), -/// vec![ -/// Ok(vec![1, 1]), -/// Ok(vec![2]), -/// Err(()), -/// Ok(vec![2, 2]), -/// Ok(vec![3]), -/// Ok(vec![3]), -/// Err(()), -/// ] -/// ); -/// ``` -pub fn try_group_while( - input: S, - group_these: F, -) -> impl Stream, E>> -where - S: Stream>, - F: Fn(&A, &A) -> bool + 'static, -{ - struct State { - cur: A, - group: Vec, - } - - stream! { - let mut state = None; - - for await x in input { - match x { - Ok(x) => { - match &mut state { - None => { - state = Some(State { cur: x, group: vec![] }); - }, - Some(state) if group_these(&state.cur, &x) => { - let prev = mem::replace(&mut state.cur, x); - state.group.push(prev); - }, - Some(state) => { - let cur = mem::replace(&mut state.cur, x); - state.group.push(cur); - let group = mem::take(&mut state.group); - yield Ok(group); - } - } - } - Err(e) => { - if let Some(cur_state) = mem::take(&mut state) { - if !cur_state.group.is_empty() { - yield Ok(cur_state.group); - } - yield Ok(vec![cur_state.cur]); - } - - yield Err(e); - } - } - } - - if let Some(State { cur, mut group }) = state { - group.push(cur); - yield Ok(group); - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - use futures::{executor::block_on, stream, StreamExt}; - use test_log::test; - - fn ok(a: A) -> Result { - Ok(a) - } - - #[test] - fn try_group_while_non_empty() { - let input = stream::iter(vec![ - ok((1, 1)), - Ok((1, 2)), - Ok((2, 1)), - Ok((3, 1)), - Ok((3, 2)), - Ok((3, 3)), - Ok((4, 1)), - Ok((5, 1)), - Ok((5, 2)), - ]); - - let output = try_group_while(input, |a, b| a.0 == b.0).collect::>(); - let result = block_on(output); - - assert_eq!( - result, - vec![ - Ok(vec![(1, 1), (1, 2)]), - Ok(vec![(2, 1)]), - Ok(vec![(3, 1), (3, 2), (3, 3)]), - Ok(vec![(4, 1)]), - Ok(vec![(5, 1), (5, 2)]) - ] - ); - } - - #[test] - fn try_group_while_err() { - let input = stream::iter(vec![ - ok((1, 1)), - Ok((1, 2)), - Ok((2, 1)), - Ok((3, 1)), - Ok((3, 2)), - Err(()), - Ok((3, 3)), - Ok((4, 1)), - Ok((5, 1)), - Ok((5, 2)), - Err(()), - ]); - - let output = try_group_while(input, |a, b| a.0 == b.0).collect::>(); - let result = block_on(output); - - assert_eq!( - result, - vec![ - Ok(vec![(1, 1), (1, 2)]), - Ok(vec![(2, 1)]), - Ok(vec![(3, 1)]), - Ok(vec![(3, 2)]), - Err(()), - Ok(vec![(3, 3)]), - Ok(vec![(4, 1)]), - Ok(vec![(5, 1)]), - Ok(vec![(5, 2)]), - Err(()), - ] - ); - } - - #[test] - fn try_group_while_empty() { - let input = stream::iter(Vec::>::new()); - let output = try_group_while(input, |a, b| a == b).collect::>(); - let result = block_on(output); - - assert_eq!(result, Vec::, ()>>::new()); - } -} diff --git a/relayer/src/util/task.rs b/relayer/src/util/task.rs deleted file mode 100644 index 3a3638b864..0000000000 --- a/relayer/src/util/task.rs +++ /dev/null @@ -1,196 +0,0 @@ -use core::fmt::Display; -use core::mem; -use core::time::Duration; -use crossbeam_channel::{bounded, Sender}; -use std::sync::{Arc, RwLock}; -use std::thread; -use tracing::{debug, error, warn}; - -use crate::util::lock::LockExt; - -/** - A task handle holds the endpoints for stopping or waiting for a - background task to terminate. - - A holder of `TaskHandle` can explicitly stop the background task by - calling [`shutdown`](TaskHandle::shutdown) or - [`shutdown_and_wait`](TaskHandle::shutdown_and_wait). - - Otherwise, when the `TaskHandle` is dropped, it will stop the background - task and wait for the background task to terminate before returning. -*/ -pub struct TaskHandle { - shutdown_sender: Sender<()>, - stopped: Arc>, - join_handle: DropJoinHandle, -} - -/** - A wrapper to [`std::thread::JoinHandle`] so that the handle is joined - when it is dropped. -*/ -struct DropJoinHandle(Option>); - -/** - A wrapper around the error type returned by a background task step - function to indicate whether the background task should be terminated - because of the error. -*/ -pub enum TaskError { - /** - Inform the background task runner that an ignorable error has occured, - and the background task runner should log the error and then continue - execution. - */ - Ignore(E), - - /** - Inform the background task runner that a fatal error has occured, - and the background task runner should log the error and then abort - execution. - */ - Fatal(E), -} - -pub enum Next { - Continue, - Abort, -} - -/** - Spawn a long-running background task with the given step runner. - - The step runner is a `FnMut` closure that is called repeatedly and - returns a `Result<(), TaskError>`. If the step is executed successfuly, - the step runner should return `Ok(())` so that it will be called again. - - Otherwise if errors occurred or of the task needs to be aborted, - the step runner should return a [`TaskError`] that instructs the - task runner of whether the background task should be aborted. - - The function is also given a task name string, which is used for logging - information about the execution of the task. An optional [`Duration`] - argument is also given for the task runner to sleep for the given - duration before calling the step runner again. - - The function returns a [`TaskHandle`] that can be used to shutdown the - background task. If the [`TaskHandle`] is dropped or if explicit shutdown - instruction is sent, the task runner will stop calling the step runner - and abort the background task. - - If the step runner is receiving commands from other - [channels](crossbeam_channel::Receiver), it should use the - [`try_recv`](crossbeam_channel::Receiver::try_recv) function - so that the step runner do not get stuck indefinitely even - when shutdown instruction has been sent through the - [`TaskHandle`]. -*/ -pub fn spawn_background_task( - span: tracing::Span, - interval_pause: Option, - mut step_runner: impl FnMut() -> Result> + Send + Sync + 'static, -) -> TaskHandle { - debug!(parent: &span, "spawning task"); - - let stopped = Arc::new(RwLock::new(false)); - let write_stopped = stopped.clone(); - - let (shutdown_sender, receiver) = bounded(1); - - let join_handle = thread::spawn(move || { - let _entered = span.enter(); - loop { - match receiver.try_recv() { - Ok(()) => { - break; - } - _ => match step_runner() { - Ok(Next::Continue) => {} - Ok(Next::Abort) => { - debug!("aborting task"); - break; - } - Err(TaskError::Ignore(e)) => { - warn!("task encountered ignorable error: {}", e); - } - Err(TaskError::Fatal(e)) => { - error!("task aborting after encountering fatal error: {}", e); - break; - } - }, - } - if let Some(interval) = interval_pause { - thread::sleep(interval); - } - } - - *write_stopped.acquire_write() = true; - - debug!("task terminated"); - }); - - TaskHandle { - shutdown_sender, - stopped, - join_handle: DropJoinHandle(Some(join_handle)), - } -} - -impl TaskHandle { - /** - Wait for the background task to terminate. - - Note that because the background tasks are meant to run forever, - this would likely never return unless errors occurred or if - the step runner returns [`Next::Abort`] to abort prematurely. - */ - pub fn join(mut self) { - if let Some(handle) = mem::take(&mut self.join_handle.0) { - let _ = handle.join(); - } - } - - /** - Send the shutdown signal to the background task without waiting - for it to terminate. - - Note that the waiting will still happen when the [`TaskHandle`] is - dropped. - - This can be used to shutdown multiple tasks in parallel, and then - wait for them to all terminate concurrently. - */ - pub fn shutdown(&self) { - let _ = self.shutdown_sender.send(()); - } - - /** - Send the shutdown signal and wait for the task to terminate. - - This is done implicitly by the [`TaskHandle`] when it is dropped. - */ - pub fn shutdown_and_wait(self) { - let _ = self.shutdown_sender.send(()); - } - - /** - Check whether a background task has been stopped prematurely. - */ - pub fn is_stopped(&self) -> bool { - *self.stopped.acquire_read() - } -} - -impl Drop for DropJoinHandle { - fn drop(&mut self) { - if let Some(handle) = mem::take(&mut self.0) { - let _ = handle.join(); - } - } -} - -impl Drop for TaskHandle { - fn drop(&mut self) { - let _ = self.shutdown_sender.send(()); - } -} diff --git a/relayer/src/worker.rs b/relayer/src/worker.rs deleted file mode 100644 index 8496761ef4..0000000000 --- a/relayer/src/worker.rs +++ /dev/null @@ -1,165 +0,0 @@ -use alloc::sync::Arc; -use core::fmt; -use ibc::core::ics04_channel::channel::Order; -use serde::{Deserialize, Serialize}; -use std::sync::Mutex; -use tracing::error; - -use crate::foreign_client::ForeignClient; -use crate::link::{Link, LinkParameters, Resubmit}; -use crate::{ - chain::handle::{ChainHandle, ChainHandlePair}, - config::Config, - object::Object, -}; - -pub mod retry_strategy; - -mod error; -pub use error::RunError; - -mod handle; -pub use handle::{WorkerData, WorkerHandle}; - -mod cmd; -pub use cmd::WorkerCmd; - -mod map; -pub use map::WorkerMap; - -pub mod channel; -pub mod client; -pub mod connection; -pub mod packet; -pub mod wallet; - -#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] -#[serde(transparent)] -pub struct WorkerId(u64); - -impl WorkerId { - pub fn new(id: u64) -> Self { - Self(id) - } - - pub fn next(self) -> Self { - Self(self.0 + 1) - } -} - -impl fmt::Display for WorkerId { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}", self.0) - } -} - -pub fn spawn_worker_tasks( - chains: ChainHandlePair, - id: WorkerId, - object: Object, - config: &Config, -) -> WorkerHandle { - let mut task_handles = Vec::new(); - - let (cmd_tx, data) = match &object { - Object::Client(client) => { - let client = ForeignClient::restore(client.dst_client_id.clone(), chains.b, chains.a); - - let (mut refresh, mut misbehaviour) = (false, false); - - let refresh_task = client::spawn_refresh_client(client.clone()); - if let Some(refresh_task) = refresh_task { - task_handles.push(refresh_task); - refresh = true; - } - - let cmd_tx = if config.mode.clients.misbehaviour { - let (cmd_tx, cmd_rx) = crossbeam_channel::unbounded(); - let misbehavior_task = client::detect_misbehavior_task(cmd_rx, client); - if let Some(task) = misbehavior_task { - task_handles.push(task); - misbehaviour = true; - } - - Some(cmd_tx) - } else { - None - }; - - let data = WorkerData::Client { - misbehaviour, - refresh, - }; - - (cmd_tx, Some(data)) - } - Object::Connection(connection) => { - let (cmd_tx, cmd_rx) = crossbeam_channel::unbounded(); - let connection_task = - connection::spawn_connection_worker(connection.clone(), chains, cmd_rx); - task_handles.push(connection_task); - - (Some(cmd_tx), None) - } - Object::Channel(channel) => { - let (cmd_tx, cmd_rx) = crossbeam_channel::unbounded(); - let channel_task = channel::spawn_channel_worker(channel.clone(), chains, cmd_rx); - task_handles.push(channel_task); - - (Some(cmd_tx), None) - } - Object::Packet(path) => { - let packets_config = config.mode.packets; - let link_res = Link::new_from_opts( - chains.a.clone(), - chains.b, - LinkParameters { - src_port_id: path.src_port_id.clone(), - src_channel_id: path.src_channel_id, - }, - packets_config.tx_confirmation, - ); - - match link_res { - Ok(link) => { - let channel_ordering = link.a_to_b.channel().ordering; - let should_clear_on_start = - packets_config.clear_on_start || channel_ordering == Order::Ordered; - - let (cmd_tx, cmd_rx) = crossbeam_channel::unbounded(); - let link = Arc::new(Mutex::new(link)); - let resubmit = Resubmit::from_clear_interval(packets_config.clear_interval); - - let packet_task = packet::spawn_packet_cmd_worker( - cmd_rx, - link.clone(), - should_clear_on_start, - packets_config.clear_interval, - path.clone(), - ); - task_handles.push(packet_task); - - let link_task = packet::spawn_packet_worker(path.clone(), link, resubmit); - task_handles.push(link_task); - - (Some(cmd_tx), None) - } - Err(e) => { - error!("error initializing link object for packet worker: {}", e); - (None, None) - } - } - } - - Object::Wallet(wallet) => { - assert_eq!(wallet.chain_id, chains.a.id()); - - let wallet_task = wallet::spawn_wallet_worker(chains.a); - task_handles.push(wallet_task); - - (None, None) - } - }; - - WorkerHandle::new(id, object, data, cmd_tx, task_handles) -} diff --git a/relayer/src/worker/channel.rs b/relayer/src/worker/channel.rs deleted file mode 100644 index aed739e1aa..0000000000 --- a/relayer/src/worker/channel.rs +++ /dev/null @@ -1,82 +0,0 @@ -use core::time::Duration; -use crossbeam_channel::Receiver; -use tracing::{debug, error_span}; - -use crate::channel::Channel as RelayChannel; -use crate::util::task::{spawn_background_task, Next, TaskError, TaskHandle}; -use crate::{ - chain::handle::{ChainHandle, ChainHandlePair}, - object::Channel, - util::retry::retry_with_index, - worker::retry_strategy, -}; - -use super::error::RunError; -use super::WorkerCmd; - -pub fn spawn_channel_worker( - channel: Channel, - chains: ChainHandlePair, - cmd_rx: Receiver, -) -> TaskHandle { - spawn_background_task( - error_span!("worker.channel", channel = %channel.short_name()), - Some(Duration::from_millis(200)), - move || { - if let Ok(cmd) = cmd_rx.try_recv() { - match cmd { - WorkerCmd::IbcEvents { batch } => { - // there can be up to two event for this channel, e.g. init and try. - // process the last event, the one with highest "rank". - let last_event = batch.events.last(); - debug!("starts processing {:#?}", last_event); - - if let Some(event) = last_event { - let mut handshake_channel = RelayChannel::restore_from_event( - chains.a.clone(), - chains.b.clone(), - event.clone(), - ) - .map_err(|e| TaskError::Fatal(RunError::channel(e)))?; - - retry_with_index(retry_strategy::worker_default_strategy(), |index| { - handshake_channel.step_event(event.clone(), index) - }) - .map_err(|e| TaskError::Fatal(RunError::retry(e))) - } else { - Ok(Next::Continue) - } - } - WorkerCmd::NewBlock { - height: current_height, - new_block: _, - } => { - debug!("starts processing block event at {:#?}", current_height); - - let height = current_height - .decrement() - .map_err(|e| TaskError::Fatal(RunError::ics02(e)))?; - - let (mut handshake_channel, state) = RelayChannel::restore_from_state( - chains.a.clone(), - chains.b.clone(), - channel.clone(), - height, - ) - .map_err(|e| TaskError::Fatal(RunError::channel(e)))?; - - retry_with_index(retry_strategy::worker_default_strategy(), |index| { - handshake_channel.step_state(state, index) - }) - .map_err(|e| TaskError::Fatal(RunError::retry(e))) - } - - // nothing to do - WorkerCmd::ClearPendingPackets => Ok(Next::Continue), - } - } else { - Ok(Next::Continue) - } - }, - ) -} diff --git a/relayer/src/worker/client.rs b/relayer/src/worker/client.rs deleted file mode 100644 index a36608cabb..0000000000 --- a/relayer/src/worker/client.rs +++ /dev/null @@ -1,135 +0,0 @@ -use core::convert::Infallible; -use core::time::Duration; -use crossbeam_channel::Receiver; -use tracing::{debug, span, trace, warn}; - -use ibc::events::IbcEvent; - -use crate::util::task::{spawn_background_task, Next, TaskError, TaskHandle}; -use crate::{ - chain::handle::ChainHandle, - foreign_client::{ForeignClient, HasExpiredOrFrozenError, MisbehaviourResults}, - telemetry, -}; - -use super::WorkerCmd; - -pub fn spawn_refresh_client( - mut client: ForeignClient, -) -> Option { - if client.is_expired_or_frozen() { - warn!( - client = %client.id, - "skipping refresh client task on frozen client", - ); - None - } else { - Some(spawn_background_task( - span!( - tracing::Level::ERROR, - "refresh", - client = %client.id, - src_chain = %client.src_chain.id(), - dst_chain = %client.dst_chain.id(), - ), - Some(Duration::from_secs(1)), - move || { - let res = client.refresh().map_err(|e| { - if e.is_expired_or_frozen_error() { - TaskError::Fatal(e) - } else { - TaskError::Ignore(e) - } - })?; - - if res.is_some() { - telemetry!(ibc_client_updates, &client.dst_chain.id(), &client.id, 1); - } - - Ok(Next::Continue) - }, - )) - } -} - -pub fn detect_misbehavior_task( - receiver: Receiver, - client: ForeignClient, -) -> Option { - if client.is_expired_or_frozen() { - warn!( - client = %client.id(), - "skipping detect misbehavior task on frozen client", - ); - return None; - } - - let mut first_check_done = false; - - let handle = spawn_background_task( - span!( - tracing::Level::ERROR, - "DetectMisbehaviorWorker", - client = %client.id, - src_chain = %client.src_chain.id(), - dst_chain = %client.dst_chain.id(), - ), - Some(Duration::from_millis(600)), - move || -> Result> { - if !first_check_done { - first_check_done = true; - let _span = span!( - tracing::Level::DEBUG, - "DetectMisbehaviorFirstCheck", - client = %client.id, - src_chain = %client.src_chain.id(), - dst_chain = %client.dst_chain.id(), - ) - .entered(); - debug!("doing first check"); - let misbehavior_result = client.detect_misbehaviour_and_submit_evidence(None); - trace!("detect misbehavior result: {:?}", misbehavior_result); - } - - if let Ok(cmd) = receiver.try_recv() { - match cmd { - WorkerCmd::IbcEvents { batch } => { - trace!("received batch: {:?}", batch); - - for event in batch.events { - if let IbcEvent::UpdateClient(update) = event { - debug!("checking misbehavior for updated client"); - let misbehavior_result = - client.detect_misbehaviour_and_submit_evidence(Some(update)); - trace!("detect misbehavior result: {:?}", misbehavior_result); - - match misbehavior_result { - MisbehaviourResults::ValidClient => {} - MisbehaviourResults::VerificationError => { - // can retry in next call - } - MisbehaviourResults::EvidenceSubmitted(_) => { - // if evidence was submitted successfully then exit - return Ok(Next::Abort); - } - MisbehaviourResults::CannotExecute => { - // skip misbehaviour checking if chain does not have support for it (i.e. client - // update event does not include the header) - return Ok(Next::Abort); - } - } - } - } - } - - WorkerCmd::NewBlock { .. } => {} - WorkerCmd::ClearPendingPackets => {} - } - } - - Ok(Next::Continue) - }, - ); - - Some(handle) -} diff --git a/relayer/src/worker/cmd.rs b/relayer/src/worker/cmd.rs deleted file mode 100644 index c809daa108..0000000000 --- a/relayer/src/worker/cmd.rs +++ /dev/null @@ -1,36 +0,0 @@ -use core::fmt; - -use ibc::{core::ics02_client::events::NewBlock, Height}; - -use crate::event::monitor::EventBatch; - -/// A command for a [`WorkerHandle`](crate::worker::WorkerHandle). -#[derive(Debug, Clone)] -pub enum WorkerCmd { - /// A batch of packet events need to be relayed - IbcEvents { batch: EventBatch }, - - /// A new block has been committed - NewBlock { height: Height, new_block: NewBlock }, - - /// Trigger a pending packets clear - ClearPendingPackets, -} - -impl fmt::Display for WorkerCmd { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - WorkerCmd::IbcEvents { batch } => { - write!(f, "IbcEvents batch from {}: ", batch.chain_id)?; - for e in &batch.events { - write!(f, "{}; ", e)?; - } - write!(f, "batch Height: {}", batch.height) - } - WorkerCmd::NewBlock { height, new_block } => { - write!(f, "NewBlock({}, {:?})", height, new_block) - } - WorkerCmd::ClearPendingPackets => write!(f, "CleaPendingPackets"), - } - } -} diff --git a/relayer/src/worker/connection.rs b/relayer/src/worker/connection.rs deleted file mode 100644 index 1ddf8912f7..0000000000 --- a/relayer/src/worker/connection.rs +++ /dev/null @@ -1,85 +0,0 @@ -use core::time::Duration; -use crossbeam_channel::Receiver; -use tracing::{debug, error_span}; - -use crate::connection::Connection as RelayConnection; -use crate::util::task::{spawn_background_task, Next, TaskError, TaskHandle}; -use crate::{ - chain::handle::{ChainHandle, ChainHandlePair}, - object::Connection, - util::retry::retry_with_index, - worker::retry_strategy, -}; - -use super::error::RunError; -use super::WorkerCmd; - -pub fn spawn_connection_worker( - connection: Connection, - chains: ChainHandlePair, - cmd_rx: Receiver, -) -> TaskHandle { - spawn_background_task( - error_span!("connection", connection = %connection.short_name()), - Some(Duration::from_millis(200)), - move || { - if let Ok(cmd) = cmd_rx.try_recv() { - match cmd { - WorkerCmd::IbcEvents { batch } => { - // there can be up to two event for this connection, e.g. init and try. - // process the last event, the one with highest "rank". - let last_event = batch.events.last(); - - debug!("starts processing {:#?}", last_event); - - if let Some(event) = last_event { - let mut handshake_connection = RelayConnection::restore_from_event( - chains.a.clone(), - chains.b.clone(), - event.clone(), - ) - .map_err(|e| TaskError::Fatal(RunError::connection(e)))?; - - retry_with_index(retry_strategy::worker_default_strategy(), |index| { - handshake_connection.step_event(event.clone(), index) - }) - .map_err(|e| TaskError::Fatal(RunError::retry(e))) - } else { - Ok(Next::Continue) - } - } - - WorkerCmd::NewBlock { - height: current_height, - new_block: _, - } => { - debug!("starts processing block event at {}", current_height); - - let height = current_height - .decrement() - .map_err(|e| TaskError::Fatal(RunError::ics02(e)))?; - - let (mut handshake_connection, state) = - RelayConnection::restore_from_state( - chains.a.clone(), - chains.b.clone(), - connection.clone(), - height, - ) - .map_err(|e| TaskError::Fatal(RunError::connection(e)))?; - - retry_with_index(retry_strategy::worker_default_strategy(), |index| { - handshake_connection.step_state(state, index) - }) - .map_err(|e| TaskError::Fatal(RunError::retry(e))) - } - - // nothing to do - WorkerCmd::ClearPendingPackets => Ok(Next::Continue), - } - } else { - Ok(Next::Continue) - } - }, - ) -} diff --git a/relayer/src/worker/error.rs b/relayer/src/worker/error.rs deleted file mode 100644 index 4ea5d7b4b7..0000000000 --- a/relayer/src/worker/error.rs +++ /dev/null @@ -1,35 +0,0 @@ -use crossbeam_channel::RecvError; -use flex_error::{define_error, DisplayOnly}; -use ibc::core::ics02_client::error::Error as Ics02Error; - -use crate::channel::ChannelError; -use crate::connection::ConnectionError; -use crate::link::error::LinkError; - -define_error! { - RunError { - Ics02 - [ Ics02Error ] - | _ | { "client error" }, - - Connection - [ ConnectionError ] - | _ | { "connection error" }, - - Channel - [ ChannelError ] - | _ | { "channel error" }, - - Link - [ LinkError ] - | _ | { "link error" }, - - Retry - { retries: retry::Error } - | e | { format_args!("worker failed after {} retries", e.retries) }, - - Recv - [ DisplayOnly ] - | _ | { "error receiving from channel: sender end has been closed" }, - } -} diff --git a/relayer/src/worker/handle.rs b/relayer/src/worker/handle.rs deleted file mode 100644 index 2ef8565bf5..0000000000 --- a/relayer/src/worker/handle.rs +++ /dev/null @@ -1,160 +0,0 @@ -use core::fmt; -use core::mem; - -use crossbeam_channel::Sender; -use serde::Deserialize; -use serde::Serialize; -use tracing::{debug, trace}; - -use ibc::{ - core::{ics02_client::events::NewBlock, ics24_host::identifier::ChainId}, - events::IbcEvent, - Height, -}; - -use crate::chain::tracking::TrackingId; -use crate::util::lock::{LockExt, RwArc}; -use crate::util::task::TaskHandle; -use crate::{event::monitor::EventBatch, object::Object}; - -use super::{WorkerCmd, WorkerId}; - -#[derive(Clone, Debug, Serialize, Deserialize)] -#[serde(tag = "type")] -pub enum WorkerData { - Client { misbehaviour: bool, refresh: bool }, -} - -pub struct WorkerHandle { - id: WorkerId, - object: Object, - data: Option, - tx: RwArc>>, - task_handles: Vec, -} - -impl WorkerHandle { - pub fn new( - id: WorkerId, - object: Object, - data: Option, - tx: Option>, - task_handles: Vec, - ) -> Self { - Self { - id, - object, - data, - tx: >::new_lock(tx), - task_handles, - } - } - - pub fn try_send_command(&self, cmd: WorkerCmd) { - let res = if let Some(tx) = self.tx.acquire_read().as_ref() { - tx.send(cmd) - } else { - Ok(()) - }; - - if res.is_err() { - debug!("dropping sender end for worker {} as the receiver was dropped when the worker task terminated", self.id); - *self.tx.acquire_write() = None; - } - } - - /// Send a batch of events to the worker. - pub fn send_events( - &self, - height: Height, - events: Vec, - chain_id: ChainId, - tracking_id: TrackingId, - ) { - let batch = EventBatch { - chain_id, - height, - events, - tracking_id, - }; - - self.try_send_command(WorkerCmd::IbcEvents { batch }); - } - - /// Send a batch of [`NewBlock`] event to the worker. - pub fn send_new_block(&self, height: Height, new_block: NewBlock) { - self.try_send_command(WorkerCmd::NewBlock { height, new_block }); - } - - /// Instruct the worker to clear pending packets. - pub fn clear_pending_packets(&self) { - self.try_send_command(WorkerCmd::ClearPendingPackets); - } - - /// Shutdown all worker tasks without waiting for them to terminate. - pub fn shutdown(&self) { - for task in self.task_handles.iter() { - task.shutdown() - } - } - - /// Shutdown all worker tasks and wait for them to terminate - pub fn shutdown_and_wait(self) { - for task in self.task_handles.iter() { - // Send shutdown signal to all tasks in parallel. - task.shutdown() - } - // Drop handle automatically handles the waiting for tasks to terminate. - } - - pub fn is_stopped(&self) -> bool { - for task in self.task_handles.iter() { - if !task.is_stopped() { - return false; - } - } - true - } - - /// Wait for the worker thread to finish. - pub fn join(mut self) { - let task_handles = mem::take(&mut self.task_handles); - trace!(worker = %self.object.short_name(), "worker::handle: waiting for worker loop to end"); - for task in task_handles.into_iter() { - task.join() - } - trace!(worker = %self.object.short_name(), "worker::handle: waiting for worker loop to end: done"); - } - - /// Get the worker's id. - pub fn id(&self) -> WorkerId { - self.id - } - - /// Get a reference to the worker's object. - pub fn object(&self) -> &Object { - &self.object - } - - /// Get a reference to the worker handle's data. - pub fn data(&self) -> Option<&WorkerData> { - self.data.as_ref() - } -} - -// Drop handle to send shutdown signals to background tasks in parallel -// before waiting for all of them to terminate. -impl Drop for WorkerHandle { - fn drop(&mut self) { - self.shutdown() - } -} - -impl fmt::Debug for WorkerHandle { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("WorkerHandle") - .field("id", &self.id) - .field("object", &self.object) - .finish_non_exhaustive() - } -} diff --git a/relayer/src/worker/map.rs b/relayer/src/worker/map.rs deleted file mode 100644 index ad426f5b80..0000000000 --- a/relayer/src/worker/map.rs +++ /dev/null @@ -1,241 +0,0 @@ -use alloc::collections::btree_map::BTreeMap as HashMap; -use core::mem; - -use ibc::core::ics02_client::events::NewBlock; -use ibc::core::ics24_host::identifier::ChainId; -use ibc::Height; -use tracing::{debug, trace}; - -use crate::{ - chain::handle::{ChainHandle, ChainHandlePair}, - config::Config, - object::Object, - telemetry, -}; - -use super::{spawn_worker_tasks, WorkerHandle, WorkerId}; - -/// Manage the lifecycle of [`WorkerHandle`]s associated with [`Object`]s. -#[derive(Debug)] -pub struct WorkerMap { - workers: HashMap, - latest_worker_id: WorkerId, -} - -impl Default for WorkerMap { - fn default() -> Self { - Self { - workers: HashMap::new(), - latest_worker_id: WorkerId::new(0), - } - } -} - -impl WorkerMap { - /// Create a new worker map, which will spawn workers with - /// the given channel for sending messages back to the - /// [supervisor](crate::supervisor::SupervisorHandle). - pub fn new() -> Self { - Self::default() - } - - /// Returns `true` if there is a spawned [`WorkerHandle`] associated with the given [`Object`]. - pub fn contains(&self, object: &Object) -> bool { - self.workers.contains_key(object) - } - - /// Remove the [`WorkerHandle`] associated with the given [`Object`] from - /// the map and wait for its thread to terminate. - pub fn remove_stopped(&mut self, id: WorkerId, object: Object) -> bool { - match self.workers.remove(&object) { - Some(handle) if handle.id() == id => { - telemetry!(worker, metric_type(&object), -1); - - let id = handle.id(); - - trace!( - worker.id = %id, worker.object = %object.short_name(), - "waiting for worker loop to end" - ); - - let _ = handle.join(); - - trace!( - worker.id = %id, worker.object = %object.short_name(), - "worker loop has ended" - ); - - true - } - Some(handle) => { - debug!( - worker.object = %object.short_name(), - "ignoring attempt to remove worker with outdated id {} (current: {})", - id, handle.id() - ); - - self.workers.insert(object, handle); - - false - } - None => { - debug!( - worker.object = %object.short_name(), - "ignoring attempt to remove unknown worker", - ); - - false - } - } - } - - /// Returns all the [`WorkerHandle`] which are interested in new block events originating - /// from the chain with the given [`ChainId`]. - /// See: [`Object::notify_new_block`] - pub fn to_notify<'a>( - &'a self, - src_chain_id: &'a ChainId, - ) -> impl Iterator { - self.workers.iter().filter_map(move |(o, w)| { - if !w.is_stopped() && o.notify_new_block(src_chain_id) { - Some(w) - } else { - None - } - }) - } - - pub fn notify_new_block(&self, src_chain_id: &ChainId, height: Height, new_block: NewBlock) { - for worker in self.to_notify(src_chain_id) { - // Ignore send error if the worker task handling - // NewBlock cmd has been terminated. - let _ = worker.send_new_block(height, new_block); - } - } - - /// Get a handle to the worker in charge of handling events associated - /// with the given [`Object`]. - /// - /// This function will spawn a new [`WorkerHandle`] if one does not exists already. - pub fn get_or_spawn( - &mut self, - object: Object, - src: Chain, - dst: Chain, - config: &Config, - ) -> &WorkerHandle { - if self.workers.contains_key(&object) { - &self.workers[&object] - } else { - let worker = self.spawn_worker(src, dst, &object, config); - self.workers.entry(object).or_insert(worker) - } - } - - /// Spawn a new [`WorkerHandle`], only if one does not exists already. - /// - /// Returns whether or not the worker was actually spawned. - pub fn spawn( - &mut self, - src: Chain, - dst: Chain, - object: &Object, - config: &Config, - ) -> bool { - if !self.workers.contains_key(object) { - let worker = self.spawn_worker(src, dst, object, config); - self.workers.entry(object.clone()).or_insert(worker); - true - } else { - false - } - } - - /// Force spawn a worker for the given [`Object`]. - fn spawn_worker( - &mut self, - src: Chain, - dst: Chain, - object: &Object, - config: &Config, - ) -> WorkerHandle { - telemetry!(worker, metric_type(object), 1); - - spawn_worker_tasks( - ChainHandlePair { a: src, b: dst }, - self.next_worker_id(), - object.clone(), - config, - ) - } - - /// Compute the next worker id - fn next_worker_id(&mut self) -> WorkerId { - let id = self.latest_worker_id.next(); - self.latest_worker_id = id; - id - } - - /// List the [`Object`]s for which there is an associated worker - /// for the given chain. - pub fn objects_for_chain(&self, chain_id: &ChainId) -> Vec { - self.workers - .keys() - .filter(|o| o.for_chain(chain_id)) - .cloned() - .collect() - } - - /// List the [`WorkerHandle`]s associated with the given chain. - pub fn workers_for_chain(&self, chain_id: &ChainId) -> Vec<&WorkerHandle> { - self.workers - .iter() - .filter_map(|(o, h)| o.for_chain(chain_id).then(|| h)) - .collect() - } - - /// Return all the handles to the workers tracked in this map. - pub fn handles(&self) -> impl Iterator { - self.workers.values() - } - - /// Shutdown the worker associated with the given [`Object`], synchronously. - pub fn shutdown_worker(&mut self, object: &Object) { - if let Some(handle) = self.workers.remove(object) { - telemetry!(worker, metric_type(object), -1); - - handle.shutdown_and_wait(); - } - // Drop handle automatically handles the waiting for tasks to terminate. - } - - /// Shut down all the workers, asynchronously. - pub fn shutdown(&mut self) { - let workers = mem::take(&mut self.workers); - for worker in workers.values() { - // Send shutdown signal to all tasks in parallel. - worker.shutdown(); - } - } -} - -// Drop handle to send shutdown signals to background tasks in parallel -// before waiting for all of them to terminate. -impl Drop for WorkerMap { - fn drop(&mut self) { - self.shutdown() - } -} - -#[cfg(feature = "telemetry")] -fn metric_type(o: &Object) -> ibc_telemetry::state::WorkerType { - use ibc_telemetry::state::WorkerType; - - match o { - Object::Client(_) => WorkerType::Client, - Object::Connection(_) => WorkerType::Connection, - Object::Channel(_) => WorkerType::Channel, - Object::Packet(_) => WorkerType::Packet, - Object::Wallet(_) => WorkerType::Wallet, - } -} diff --git a/relayer/src/worker/packet.rs b/relayer/src/worker/packet.rs deleted file mode 100644 index eedf1d5f34..0000000000 --- a/relayer/src/worker/packet.rs +++ /dev/null @@ -1,268 +0,0 @@ -use core::time::Duration; -use std::sync::{Arc, Mutex}; - -use crossbeam_channel::Receiver; -use tracing::{error, error_span, trace}; - -use ibc::Height; - -use crate::chain::handle::ChainHandle; -use crate::event::monitor::EventBatch; -use crate::foreign_client::HasExpiredOrFrozenError; -use crate::link::Resubmit; -use crate::link::{error::LinkError, Link}; -use crate::object::Packet; -use crate::telemetry; -use crate::util::task::{spawn_background_task, Next, TaskError, TaskHandle}; - -use super::error::RunError; -use super::WorkerCmd; - -fn handle_link_error_in_task(e: LinkError) -> TaskError { - if e.is_expired_or_frozen_error() { - // If the client is expired or frozen, terminate the packet worker - // as there is no point of relaying further packets. - TaskError::Fatal(RunError::link(e)) - } else { - TaskError::Ignore(RunError::link(e)) - } -} - -/// Spawns a packet worker task in the background that handles the work of -/// processing pending txs between `ChainA` and `ChainB`. -pub fn spawn_packet_worker( - path: Packet, - // Mutex is used to prevent race condition between the packet workers - link: Arc>>, - resubmit: Resubmit, -) -> TaskHandle { - let span = { - let relay_path = &link.lock().unwrap().a_to_b; - error_span!( - "packet", - src_chain = %relay_path.src_chain().id(), - src_port = %relay_path.src_port_id(), - src_channel = %relay_path.src_channel_id(), - dst_chain = %relay_path.dst_chain().id(), - ) - }; - - spawn_background_task(span, Some(Duration::from_millis(1000)), move || { - handle_execute_schedule(&mut link.lock().unwrap(), &path, resubmit)?; - Ok(Next::Continue) - }) -} - -pub fn spawn_packet_cmd_worker( - cmd_rx: Receiver, - // Mutex is used to prevent race condition between the packet workers - link: Arc>>, - mut should_clear_on_start: bool, - clear_interval: u64, - path: Packet, -) -> TaskHandle { - let span = { - let relay_path = &link.lock().unwrap().a_to_b; - error_span!( - "packet_cmd", - src_chain = %relay_path.src_chain().id(), - src_port = %relay_path.src_port_id(), - src_channel = %relay_path.src_channel_id(), - dst_chain = %relay_path.dst_chain().id(), - ) - }; - - let mut current_command = None; - - spawn_background_task(span, Some(Duration::from_millis(200)), move || { - if current_command.is_none() { - // Only try to receive the next command if the - // previous command was processed successfully. - current_command = cmd_rx.try_recv().ok(); - } - - if let Some(cmd) = ¤t_command { - handle_packet_cmd( - &mut link.lock().unwrap(), - &mut should_clear_on_start, - clear_interval, - &path, - cmd.clone(), - )?; - - // Only reset current_command if handle_packet_cmd succeeds. - // Otherwise the same command will be retried in the next step. - current_command = None; - } - - Ok(Next::Continue) - }) -} - -/// Receives worker commands, which may be: -/// - IbcEvent => then it updates schedule -/// - NewBlock => schedules packet clearing -/// - Shutdown => exits -/// -/// Regardless of the incoming command, this method -/// also refreshes and executes any scheduled operational -/// data that is ready. -fn handle_packet_cmd( - link: &mut Link, - should_clear_on_start: &mut bool, - clear_interval: u64, - path: &Packet, - cmd: WorkerCmd, -) -> Result<(), TaskError> { - match cmd { - WorkerCmd::IbcEvents { batch } => handle_update_schedule(link, clear_interval, path, batch), - - // Handle the arrival of an event signaling that the - // source chain has advanced to a new block. - WorkerCmd::NewBlock { - height, - new_block: _, - } => { - if *should_clear_on_start { - handle_clear_packet(link, clear_interval, path, Some(height))?; - - // Clear the flag only if handle_clear_packet succeeds - *should_clear_on_start = false; - Ok(()) - } else if should_clear_packets(clear_interval, height) { - handle_clear_packet(link, clear_interval, path, Some(height)) - } else { - Ok(()) - } - } - - WorkerCmd::ClearPendingPackets => handle_clear_packet(link, clear_interval, path, None), - } -} - -/// Whether or not to clear pending packets at this `step` for the given height. -/// Packets are cleared if `clear_interval` is not `0` and if we have reached the interval. -fn should_clear_packets(clear_interval: u64, height: Height) -> bool { - clear_interval != 0 && height.revision_height % clear_interval == 0 -} - -fn handle_update_schedule( - link: &mut Link, - clear_interval: u64, - path: &Packet, - batch: EventBatch, -) -> Result<(), TaskError> { - link.a_to_b - .update_schedule(batch) - .map_err(handle_link_error_in_task)?; - - handle_execute_schedule(link, path, Resubmit::from_clear_interval(clear_interval)) -} - -fn handle_clear_packet( - link: &mut Link, - clear_interval: u64, - path: &Packet, - height: Option, -) -> Result<(), TaskError> { - link.a_to_b - .schedule_packet_clearing(height) - .map_err(handle_link_error_in_task)?; - - handle_execute_schedule(link, path, Resubmit::from_clear_interval(clear_interval)) -} - -fn handle_execute_schedule( - link: &mut Link, - _path: &Packet, - resubmit: Resubmit, -) -> Result<(), TaskError> { - link.a_to_b - .refresh_schedule() - .map_err(handle_link_error_in_task)?; - - link.a_to_b.execute_schedule().map_err(|e| { - if e.is_expired_or_frozen_error() { - TaskError::Fatal(RunError::link(e)) - } else { - error!("will retry: schedule execution encountered error: {}", e,); - TaskError::Ignore(RunError::link(e)) - } - })?; - - let summary = link.a_to_b.process_pending_txs(resubmit); - - if !summary.is_empty() { - trace!("produced relay summary: {:?}", summary); - } - - telemetry!(packet_metrics(_path, &summary)); - - Ok(()) -} - -#[cfg(feature = "telemetry")] -use crate::link::RelaySummary; - -#[cfg(feature = "telemetry")] -fn packet_metrics(path: &Packet, summary: &RelaySummary) { - receive_packet_metrics(path, summary); - acknowledgment_metrics(path, summary); - timeout_metrics(path, summary); -} - -#[cfg(feature = "telemetry")] -fn receive_packet_metrics(path: &Packet, summary: &RelaySummary) { - use ibc::events::IbcEvent::WriteAcknowledgement; - - let count = summary - .events - .iter() - .filter(|e| matches!(e, WriteAcknowledgement(_))) - .count(); - - telemetry!( - ibc_receive_packets, - &path.src_chain_id, - &path.src_channel_id, - &path.src_port_id, - count as u64, - ); -} - -#[cfg(feature = "telemetry")] -fn acknowledgment_metrics(path: &Packet, summary: &RelaySummary) { - use ibc::events::IbcEvent::AcknowledgePacket; - - let count = summary - .events - .iter() - .filter(|e| matches!(e, AcknowledgePacket(_))) - .count(); - - telemetry!( - ibc_acknowledgment_packets, - &path.src_chain_id, - &path.src_channel_id, - &path.src_port_id, - count as u64, - ); -} - -#[cfg(feature = "telemetry")] -fn timeout_metrics(path: &Packet, summary: &RelaySummary) { - use ibc::events::IbcEvent::TimeoutPacket; - let count = summary - .events - .iter() - .filter(|e| matches!(e, TimeoutPacket(_))) - .count(); - - telemetry!( - ibc_timeout_packets, - &path.src_chain_id, - &path.src_channel_id, - &path.src_port_id, - count as u64, - ); -} diff --git a/relayer/src/worker/retry_strategy.rs b/relayer/src/worker/retry_strategy.rs deleted file mode 100644 index 87122eb253..0000000000 --- a/relayer/src/worker/retry_strategy.rs +++ /dev/null @@ -1,40 +0,0 @@ -use crate::util::retry::{clamp_total, ConstantGrowth}; -use core::time::Duration; - -/// A basic worker retry strategy. -/// -/// The backoff delay is initially 200ms and grows -/// by 100ms at each step. The backoff delay is -/// capped at 500ms. -/// The overall amount of time spent backing off -/// is capped to 2 seconds. -/// See the `default_strategy` test below. -pub fn worker_default_strategy() -> impl Iterator { - let strategy = ConstantGrowth::new(Duration::from_millis(200), Duration::from_millis(100)); - clamp_total(strategy, Duration::from_millis(500), Duration::from_secs(2)) -} - -#[cfg(test)] -mod tests { - use std::time::Duration; - - use crate::worker::retry_strategy::worker_default_strategy; - - #[test] - fn default_strategy() { - let strategy = worker_default_strategy(); - let delays = strategy.take(10).collect::>(); - // This strategy has exactly 6 retry steps - assert_eq!( - delays, - vec![ - Duration::from_millis(200), - Duration::from_millis(300), - Duration::from_millis(400), - Duration::from_millis(500), - Duration::from_millis(500), - Duration::from_millis(100), - ] - ); - } -} diff --git a/relayer/src/worker/wallet.rs b/relayer/src/worker/wallet.rs deleted file mode 100644 index 6bf068adef..0000000000 --- a/relayer/src/worker/wallet.rs +++ /dev/null @@ -1,42 +0,0 @@ -use std::time::Duration; - -use tracing::{error_span, trace}; - -use crate::{ - chain::handle::ChainHandle, - telemetry, - util::task::{spawn_background_task, Next, TaskError, TaskHandle}, -}; - -pub fn spawn_wallet_worker(chain: Chain) -> TaskHandle { - let span = error_span!("wallet", chain = %chain.id()); - - spawn_background_task(span, Some(Duration::from_secs(5)), move || { - let key = chain.get_key().map_err(|e| { - TaskError::Fatal(format!("failed to get key in use by the relayer: {e}")) - })?; - - let balance = chain.query_balance(None).map_err(|e| { - TaskError::Ignore(format!("failed to query balance for the account: {e}")) - })?; - - let amount: u64 = balance.amount.parse().map_err(|_| { - TaskError::Ignore(format!( - "failed to parse amount into u64: {}", - balance.amount - )) - })?; - - trace!(%amount, denom = %balance.denom, account = %key.account, "wallet balance"); - - telemetry!( - wallet_balance, - &chain.id(), - &key.account, - amount, - &balance.denom, - ); - - Ok(Next::Continue) - }) -} diff --git a/relayer/tests/config/fixtures/relayer_conf_example.toml b/relayer/tests/config/fixtures/relayer_conf_example.toml deleted file mode 100644 index 8d27b1357d..0000000000 --- a/relayer/tests/config/fixtures/relayer_conf_example.toml +++ /dev/null @@ -1,61 +0,0 @@ -[global] -log_level = 'error' - -[mode] - -[mode.clients] -enabled = true -refresh = true -misbehaviour = true - -[mode.connections] -enabled = false - -[mode.channels] -enabled = false - -[mode.packets] -enabled = true -clear_interval = 100 -clear_on_start = true -tx_confirmation = true - -[[chains]] -id = 'chain_A' -rpc_addr = 'http://127.0.0.1:26657' -grpc_addr = 'http://127.0.0.1:9090' -websocket_addr = 'ws://localhost:26657/websocket' -rpc_timeout = '10s' -account_prefix = 'cosmos' -key_name = 'testkey' -store_prefix = 'ibc' -max_gas = 200000 -gas_price = { price = 0.001, denom = 'stake' } -max_msg_num = 4 -max_tx_size = 1048576 -clock_drift = '5s' -trusting_period = '14days' -trust_threshold = { numerator = '1', denominator = '3' } -address_type = { derivation = 'cosmos' } - -[chains.packet_filter] -policy = 'allow' -list = [ - ['ica*', '*'], - ['transfer', 'channel-0'], -] - -[[chains]] -id = 'chain_B' -rpc_addr = 'http://127.0.0.1:26557' -grpc_addr = 'http://127.0.0.1:9090' -websocket_addr = 'ws://localhost:26557/websocket' -rpc_timeout = '10s' -account_prefix = 'cosmos' -key_name = 'testkey' -store_prefix = 'ibc' -gas_price = { price = 0.001, denom = 'stake' } -clock_drift = '5s' -trusting_period = '14days' -trust_threshold = { numerator = '1', denominator = '3' } -address_type = { derivation = 'ethermint', proto_type = { pk_type = '/injective.crypto.v1beta1.ethsecp256k1.PubKey' } } \ No newline at end of file diff --git a/scripts/dev-env b/scripts/dev-env deleted file mode 100755 index eeb3dd87ba..0000000000 --- a/scripts/dev-env +++ /dev/null @@ -1,41 +0,0 @@ -#!/bin/bash -e - -usage() { - echo "Usage: $0 CONFIG_FILE CHAIN_0_ID CHAIN_1_ID [CHAIN_2_ID]" - echo "Example: $0 ./config.toml ibc-0 ibc-1 ibc-2" - exit 1 -} - -missing() { - echo "Missing $1 parameter. Please check if all parameters were specified." - usage -} - -if [ ! -r "$1" ]; then - missing "CONFIG_FILE ($1)" -fi - -if [ -z "$2" ]; then - missing "CHAIN_0_ID" -fi - -if [ -z "$3" ]; then - missing "CHAIN_1_ID" -fi - - -if [ "$#" -gt 4 ]; then - echo "Incorrect number of parameters." - usage -fi - -CONFIG_FILE="$1" -CHAIN_0_ID="$2" -CHAIN_1_ID="$3" -CHAIN_2_ID="$4" - -SETUP_CHAINS="$(dirname "$0")/setup-chains" -INIT_CLIENTS="$(dirname "$0")/init-hermes" - -"$SETUP_CHAINS" "$CHAIN_0_ID" "$CHAIN_1_ID" "$CHAIN_2_ID" -"$INIT_CLIENTS" "$CONFIG_FILE" "$CHAIN_0_ID" "$CHAIN_1_ID" "$CHAIN_2_ID" diff --git a/scripts/gm/CHANGELOG.md b/scripts/gm/CHANGELOG.md deleted file mode 100644 index d67eafe5be..0000000000 --- a/scripts/gm/CHANGELOG.md +++ /dev/null @@ -1,133 +0,0 @@ -# Gaiad Manager Change Log - -## v0.1.3 - -### BUGFIXES - -- Fixed variable TM to reference the GAIAD_BINARY ([#2210]). - -[#2210]: https://github.com/informalsystems/ibc-rs/issues/2210 - -## v0.1.2 - -### BUGFIXES - -- Fixed `gm hermes keys` which was broken following a breaking change in Hermes CLI ([#2262]). - -### IMPROVEMENTS - -- Adjusted the `max_gas` and `gas_price.price` parameters used for `gm hermes config`. - -[#2262]: https://github.com/informalsystems/ibc-rs/issues/2262 - - -## v0.1.1 - -### BUGFIXES - -- Added `unsafe-reset-all` support for SDK 0.44.8+ and 0.45.3+ ([#2162]) - - -[#2162]: https://github.com/informalsystems/ibc-rs/issues/2162 - -## v0.1.0 - -### FEATURES -- Implemented JSON output if `OUTPUT=json` is set in the environment. -- Added the `extra_wallets` parameter to generate extra wallets for networks. - -### BUGFIXES -- Increased default Hermes config constants `rpc_timeout` and `max_gas`. -- Fixed default for `$OUPUT` in `lib-gm`. -- Updated Hermes config for Hermes 0.9.0 compatibility. - -## v0.0.9 - -### FEATURES -- Binaries in the config can be defined as URLs now. -- Add the option to set gm-lib path via the $GM_LIB environment variable ([#1365]) - -### BUGFIXES -- Fixed debug messages not printing to stdout properly. -- Minor cosmetic fixes. - -## v0.0.8 - -### BUGFIXES -- Fixed gaiad 0.43 keys add printing key to stderr instead of stdout issue ([#1312]) -- Bumped default rpc_timeout in Hermes config to 5s ([#1312]) - -[#1312]: https://github.com/informalsystems/ibc-rs/issues/1312 -[#1365]: https://github.com/informalsystems/ibc-rs/issues/1365 - -## v0.0.7 - -### BUGFIXES -- Fixed gm not reporting missing dependencies ([#1261]) - -[#1261]: https://github.com/informalsystems/ibc-rs/issues/1261 - -## v0.0.6 - -### FEATURES -- Compatibility of hermes updated to 0.4.1 and above. ([#1049]) -- Enabled swagger page on the gaiad APP port. - -### BUGFIXES -- Re-enable APP port in configuration ([comment](https://github.com/informalsystems/ibc-rs/pull/1051#issuecomment-856024919)) - -[#1049]: https://github.com/informalsystems/ibc-rs/issues/1049 - -## v0.0.5 (unreleased) - -### FEATURES -- Reorganized the documentation and moved the configuration file documentation into the example configuration. -- Added auto-configuration of the `denom` and `prefix` options in hermes config. - -### BUGFIXES -- Fixed a small bug with the DEBUG features (used `==` instead of `=` when testing for the `DEBUG` flag.) -- Fixed the 5-network mesh example configuration in the documentation. -- Removed `--x-crisis-skip-assert-invariants` as not all networks support it. -- Only add node to the hermes config if there is no node already for that network - -### REFACTORS -- Reorganized the `lib-gm` file to make it slightly easier to add configuration options. -- Simplified the empty "default" config that gets created if no config exists. -- Moved away from the `testnet` command as not all networks support it. -- Renamed `wallet_hdpath` configuration item to `hdpath` to reflect that the validator address is also impacted during - creation. - -## v0.0.4 - -### FEATURES -- Updated hermes configuration with the hermes 0.4.0 configuration parameters. - -## v0.0.3 - -### BUGFIXES -- Apply defaults to missing configuration options ([#993]) - -### FEATURES -- Separated hermes configuration into the `global.hermes` section in the configuration - -### Dependencies -- Requires stoml 0.7.0 or above - -[#993]: https://github.com/informalsystems/ibc-rs/issues/993 - -## v0.0.2 - -### BUGFIXES -- Import hermes keys properly even if hdpath is set ([#975]) - -### FEATURES -- Introduced [CHANGELOG](https://github.com/informalsystems/ibc-rs/blob/master/scripts/gm/CHANGELOG.md) file. - -[#975]: https://github.com/informalsystems/ibc-rs/issues/975 - -## v0.0.1 - -### FEATURES -- Initial release ([#902]) - -[#902]: https://github.com/informalsystems/ibc-rs/issues/902 diff --git a/scripts/gm/README.md b/scripts/gm/README.md deleted file mode 100644 index c6628f3030..0000000000 --- a/scripts/gm/README.md +++ /dev/null @@ -1,326 +0,0 @@ -# Gaiad Manager `gm` - -## TL;DR -* Tool to manage local gaiad instances - no Docker needed. -* `scripts/gm/bin/gm install` to install it. Follow the instructions there for dependencies. -* `gm start` to start the nodes specified in the configuration. -* Config file is in `$HOME/.gm/gm.toml` play around and add more nodes. -* Tab completion is pretty good, use it! Or run `gm` by itself for help. -* Pre-1.0 warning: Got a shell error? [Raise an issue!](https://github.com/informalsystems/ibc-rs/issues/) - -## Overview -Gaiad Manager (`gm` from now on) is an easily configurable command-line tool (CLI) that helps manage local `gaiad` -networks. - -Typical problems running multiple `gaiad` instances involve: -* Identifying binaries and configurations for startup and nodes on the system for shutdown. -* Managing port allocations on the local machine. -* Copying and setting up configurations among nodes on the same network. -* Managing `hermes` configuration for IBC. - -`gm` solves this by using a unified configuration file that describes the nodes and their relationship and automating -configuration updates. - -## Requirements -* Bourne shell (`sh`) -* [`sconfig`](https://github.com/freshautomations/sconfig/releases) and - [`stoml`](https://github.com/freshautomations/stoml/releases) installed in your PATH (put them in `/usr/local/bin`) -* `sed`, `tr` (trying to remove these in the future) -* For shell-completion Bourne Again Shell (`bash`) for the local user - -## How to run -1. Install the dependencies. - -On MacOS: -```bash -curl -Lo /usr/local/bin/sconfig https://github.com/freshautomations/sconfig/releases/download/v0.1.0/sconfig_darwin_amd64 -curl -Lo /usr/local/bin/stoml https://github.com/freshautomations/stoml/releases/download/v0.7.0/stoml_darwin_amd64 -chmod 755 /usr/local/bin/sconfig -chmod 755 /usr/local/bin/stoml -``` -On Linux: -```bash -curl -Lo /usr/local/bin/sconfig https://github.com/freshautomations/sconfig/releases/download/v0.1.0/sconfig_linux_amd64 -curl -Lo /usr/local/bin/stoml https://github.com/freshautomations/stoml/releases/download/v0.7.0/stoml_linux_amd64 -chmod 755 /usr/local/bin/sconfig -chmod 755 /usr/local/bin/stoml -``` -2. Install `gm` -```bash -git clone https://github.com/informal/ibc-rs -ibc-rs/scripts/gm/bin/gm install -``` - -Alternatively, you can create the folder `$HOME/.gm/bin` and copy the files from `scripts/gm/bin` in there. -The rest is just fluff. - -3. Activate `gm` -* Add `source $HOME/.gm/bin/shell-support` to a file that executes when a new terminal window comes up - (`$HOME/.bash_profile` or `$HOME/.bashrc`) -* (Optional) Enable auto-completion -On MacOS: -```bash -# Note: zsh is the default shell on MacOS, so no need to run this unless you explicitly use bash -brew install bash-completion -``` -On Linux: -``` -apt install bash-completion || yum install bash-completion -``` -* Restart your terminal - -Note: The `shell-support` script allows bash-completion as well as creating a `gm` alias, so you don't need to add more -entries to your PATH environment variable. If you don't want to use this, you can always just add `$HOME/.gm/bin` to -your path. - -## Folders and files -### The HOME folder -**Where**: `$HOME/.gm` - -**Description**: The hard-wired home folder for `gm` is `$HOME/.gm`. It contains the binaries required to run `gm` and -the `gm.toml` file for node configuration. By default, newly created node configuration also resides here under the -`$HOME/.gm/` folder but that can be configured in `gm.toml`. - -### The configuration: `gm.toml` -**Where**: `$HOME/.gm/gm.toml`. - -**Description**: This file contains all the high-level node configuration that `gm` is aware of. Note that all entries under `[global]` are also valid entries under any `[node]` header, and can be used to override the global entries for specific nodes/validators. - -**Entries**: All entries are defined and documented in the [gm.toml](gm.toml) example configuration file. - -### The network configuration -**Where**: Default is the folder `$HOME/.gm/`, but it can be configured in `gm.toml` using the `home_dir` -entry. - -**Description**: The configuration and data folder for a node. Partially resembles a gaiad home folder (`.gaia`) but -it has additional files to store the wallet mnemonics. - -**Entries**: -* `config` - The node configuration folder. If the node is a full-node, the genesis file was copied from a validator -config. The persistent_peers section is automatically managed if the node has the `auto_maintain_config` parameter - enabled in `gm.toml`. -* `data` - The data folder. -* `keyring-test` - the keyring folder as defined by `gaiad testnet` with the "test" keyring-backend. -* `validator_seed.json` - the validator node's signing and wallet key. -* `wallet_seed.json` - an extra wallet mnemonic defined on validator nodes with some tokens for developer use. -* `pid` - the file that contains the process ID of the running node. (a la `/var/run`) Use `gm status` to see. -* `log` - the log file that contains the output of the running node. Use `gm log ` to see. - -This setup allows developers to run a node outside of `gm` just by pointing the `gaiad --home-dir` to the folder. - -### Ports -Ports are defined by the `ports_start_at` parameter which will be the first port assigned. -Port assignment is as follows: -``` -| name | port redirection | -|=================|====================| -| RPC (26657) | ports_start_at + 0 | -| App (1317) | ports_start_at + 1 | -| GRPC (9090 | ports_start_at + 2 | -| P2P (26656) | ports_start_at + 3 | -| PPROF (6060) | ports_start_at + 4 | -| GRPC-WEB (9091) | ports_start_at + 5 | -``` - -Example output of `gm ports` command when `node4.ports_start_at=27050`: -``` -node4 RPC : http://localhost:27050 -node4 APP : http://localhost:27051 -node4 GRPC : http://localhost:27052 -node4 P2P : http://localhost:27053 -node4 PPROF: http://localhost:27054 -node4 GRPCW: http://localhost:27055 -``` - -Note: The GRPC-Web port was recently introduced (after gaiad v4.2.1). It will be ignored in earlier versions. - -## Minimal configuration example -The following configuration is all you need to specify 2 `gaiad` chains. `hermes` will know about these chains. -```toml -[global] -gaiad_binary="path/to/your/gaiad" -add_to_hermes=true - -[global.hermes] -binary="path/to/your/hermes" - -[ibc-0] -[ibc-1] -``` - -This configuration specifies 2 networks (chains), `ibc-0` and `ibc-1`. A typical workflow might look like: - -```bash -# Generate the config for `hermes`. -# Notably, this will create the appropriate `[[chains]]` entries for `ibc-0` and `ibc-1`. -$ gm hermes config - -# Generate the keys so that `hermes` can sign transactions on both chains -$ gm hermes keys - -# Start the two chains -$ gm start - -# Create a connection -$ hermes create connection ibc-0 ibc-1 -``` - -## Tribal knowledge (things they don't tell you) -* the user is welcome to create additional nodes outside the scope of `gm` on the local machine but `gm` will only - manage nodes that are added to the configuration file. -* one quirk of the underlying tools is that if global variables are set, you can't unset them locally: empty values - in a node configuration will revert back to the global setting. -* The shortest node definition is a named subsection like this `[mynode1]`. Ths inherently defines the following things: - * this node is a validator - * the chain_id of the network is `mynode1` - * a wallet is generated with extra tokens that can be used by the developer (or by hermes) -* If you want to create a full-node, you have to define the `network="myvalidator"` option in the node configuration. If - you do, the following things are inherently defined: - * the node is connected to a validator called `myvalidator` - * the `genesis.json` file is copied from that validator and the `persistent_peers` section is updated to point to the - validator - * if Hermes is pointed to this node in the configuration (by adding the `add_to_hermes=true` option), Hermes will get - the wallet details from the validator node and use that wallet for transactions - -## Execution manual -### `gm help` -**Description**: shows the help screen - -### `gm hermes cc` -**Description**: create and print the `hermes create channel` commands to obtain a fully interconnected IBC mesh -on the screen. - -Tip: Pick and choose the ones you want to create. - -### `gm hermes config` -**Description**: generate the hermes `config.toml` config file and write it to the defined config path. - -Tip: Do not run this command, if you value your current hermes config. It will be overwritten. - -### `gm hermes keys` -**Description**: add network keys to a hermes configuration. - -Tip: Make sure you set the `global.hermes_binary` entry in the config to a valid binary path. - -### `gm install` -**Description**: Install the `gm` command under `$HOME/.gm`, create a default configuration and warn about missing -dependencies. - -Tip: You can run this command multiple times to check if the dependencies are met. - -### `gm keys [ ...]` -**Description**: List the keys installed on a node. (It gets them from the `keyring-test` folder.) -If no node is specified then it lists all keys. - -Tip: it will show you the seed phrase if it can find it in the folder. - -### `gm log [ ...] [-f|-r]` -**Description**: Show the log of the specified node(s). At least one node has to be specified. - -Tip: You can put `-f` and `-r` anywhere after `log` to get `tail -f` or `tail -r`-like functionality. - -### `gm ports [ ...]` -**Description**: List the ports assigned to a node. -If no node is specified then it lists all nodes' ports. - -Tip: When automation doesn't get you all the way, this helps in identifying your nodes on your local machine. - -### `gm start [ ...]` -**Description**: Start the node(s). This will use the defined `gaiad` binary and configuration. -If no node is specified then it will start all nodes. - -Tip: You can freely start nodes over-and-over. If they are proven running, the command will not do anything, if they -were killed for any reason, the `pid` configuration will be updated, and a fresh node started. - -### `gm status` -**Description**: List all nodes and their basic properties, such as: their PID if they are running, their home folder, -and the most common ports used. - -Home folders in brackets mean the configuration was not created yet. Configuration is automatically created during -startup of a node. - -Tip: PIDs in brackets mean that the node is not running when `gm` started them. This could be because of a configuration -error or maybe the user simply killed the PID. No worries, `gm` will clean up when `start` or `stop` is invoked. - -### `gm stop [ ...]` -**Description**: Stop the node(s). This will use the defined `gaiad` binary and configuration. -If no node is specified then it will stop all nodes. - -Tip: If a node was killed, you can use `gm stop` to clean up the PID file. - -### `gm reset [ ...]` -**Description**: Run `unsafe-reset-all` on the node(s) and reset the node database. This will use the defined -`gaiad` binary and configuration. If no node is specified then it will run for all nodes. - -Tip: It will stop nodes that are running and restart them after the database reset. - -### `gm rm [ ...]` -**Description**: Delete the node configuration. At least one node has to be specified. - -Tip: It will stop nodes that are running. - -### `gm version` -**Description**: Display the version of `gm`. - -Tip: Congratulations in reaching the bottom of the command references. For your endurance, you are rewarded -with an unsupported yet useful command: `gm ss` will list the open ports between 27000-28000 (the default port set -used by `gm`) on your local machine. Use it when you get port-clashes because of rogue processes on your machine that -`gm` can't account for. - -## Example: 5 networks with 5 full nodes attached -This is an example that recreates the "full-mesh" tool's default network setup with the added twist that all networks -have a full node and the hermes config is going through the full nodes instead of the validator nodes. - -You can get the hermes configuration automatically. - -You might need to replace the value of the `gaiad_binary` entry, if you don't set `$GOPATH` in your regular executions. - -The same is true for `hermes_binary`. - -`gm.toml`: -```toml -[global] -gaiad_binary="$GOPATH/bin/gaiad" - -[global.hermes] -binary="./hermes" - -[network1] -[network2] -[network3] -[network4] -[network5] - -[node1] -network="network1" -add_to_hermes=true -[node2] -network="network2" -add_to_hermes=true -[node3] -network="network3" -add_to_hermes=true -[node4] -network="network4" -add_to_hermes=true -[node5] -network="network5" -add_to_hermes=true -``` -(Ports will be auto-assigned and written in the configuration file on the first start.) - -Run the below: -```bash -gm start -gm hermes config -gm hermes keys -gm hermes cc -``` - -This will -* create the node configuration and start all nodes -* generate the keys for hermes -* generate the config for hermes -* print the `create client` commands for a full-mesh connection among the IBC node networks. - -Pick and choose the connections from the list that you want to create. diff --git a/scripts/gm/bin/gm b/scripts/gm/bin/gm deleted file mode 100755 index 930383db32..0000000000 --- a/scripts/gm/bin/gm +++ /dev/null @@ -1,272 +0,0 @@ -#!/usr/bin/env sh - -set -eu - -# Load lib-gm either from the local folder or from the global $HOME/.gm/bin/lib-gm -SCRIPT_0="${0:-$HOME/.gm/bin/gm}" -export SCRIPT_DIR="${SCRIPT_0%%gm}" -export LOCAL_LIB_GM="${SCRIPT_DIR}lib-gm" -if [ -f "$LOCAL_LIB_GM" ]; then - # shellcheck source=lib-gm - . "$LOCAL_LIB_GM" -elif [ -f "$HOME/.gm/bin/lib-gm" ]; then - . "$HOME/.gm/bin/lib-gm" -elif [ -f "${LIB_GM:-}" ]; then - # shellcheck source=lib-gm - . "$LIB_GM" -else - echo "ERROR: could not find lib-gm, exiting..." - exit 1 -fi - -enforce_requirements - -usage() { - cat < [[]...] - -COMMANDS DESCRIPTION - -help print this help and exit -hermes sub-command for hermes-related configuration -install install the script for the local user -keys print the keys of validator nodes -log print the log of a node -ports print the ports of a (running) node -start start one or more nodes (starts all nodes if no parameter is given) -status print the status of nodes -stop stop one or more nodes (stops all nodes if no parameter is given) -reset reset one or more nodes' database (resets all nodes if no parameter is given) -rm delete the configuration of a node -version print the application version - -EOF -} - -hermes_usage() { - cat < [[]...] - -SUBCOMMANDS DESCRIPTION - -cc print a list of hermes commands to create IBC connections -config update hermes config.toml with the current gm network details -help print this help and exit -keys add keys to $HOME/.hermes - -EOF -} - -case "${1:-help}" in - help) - disable_when_json - usage - ;; - hermes) - disable_when_json - shift - case "${1:-help}" in - help) - hermes_usage - ;; - config) - parse_config_file - hermes_config - ;; - keys) - parse_config_file - if [ $# -eq 1 ]; then - NODES="$ALL_HERMES_NODES" - else - shift - NODES="$*" - fi - for i in $NODES - do - warn_unknown_hermes_node "$i" || continue - hermes_keys "$i" - done - ;; - cc) - parse_config_file - hermes_cc - ;; - *) - hermes_usage - exit_with_error "could not understand the command hermes '$1'" - ;; - esac - ;; - install) - disable_when_json - install - ;; - keys) - disable_when_json # Todo: implement it for JSON - parse_config_file - if [ $# -eq 1 ]; then - NODES="$ALL_NODES" - else - shift - NODES="$*" - fi - for i in $NODES - do - warn_unknown_node "$i" || continue - list_keys "$i" - done - ;; - log) - disable_when_json - parse_config_file - shift - F_FLAG="" - R_FLAG="" - LIST="" - while [ "$#" -gt 0 ]; - do - case "$1" in - "-f") - F_FLAG="-f" ;; - "-r") - R_FLAG="-r" ;; - *) - LIST="$LIST $(get_home_dir "$1")/log" - esac - shift - done - if [ -z "$LIST" ]; then - exit_with_error "no node name was given" - fi - if [ -n "$F_FLAG" ] && [ -n "$R_FLAG" ]; then - exit_with_error "-f and -r is too much even for the tail command" - fi - if [ -z "$F_FLAG" ] && [ -z "$R_FLAG" ]; then - echo "less -R +G $LIST" - # Todo: spaces in folder names and file names are not supported yet. - # shellcheck disable=SC2086 - less -R +G $LIST - else - if [ -n "$F_FLAG" ]; then - echo "tail -f $LIST" - # Todo: spaces in folder names and file names are not supported yet. - # shellcheck disable=SC2086 - tail -f $LIST - elif [ -n "$R_FLAG" ]; then - echo "tail -r $LIST" - # Todo: spaces in folder names and file names are not supported yet. - # shellcheck disable=SC2086 - tail -r $LIST - fi - fi - ;; - ports) - disable_when_json - parse_config_file - if [ $# -eq 1 ]; then - NODES="$ALL_NODES" - else - shift - NODES="$*" - fi - for i in $NODES - do - warn_unknown_node "$i" || continue - ports "$i" - done - ;; - ss) - disable_when_json - # For debug purposes - if [ -n "$(which lsof)" ]; then - lsof -i:27000-28000 -P - elif [ -n "$(which ss)" ]; then - ss -tulpn sport '>=' :27000 '&&' sport '<=' :28000 - else - echo "ERROR: no app found to list open ports" - fi - ;; - start) - parse_config_file - if [ $# -eq 1 ]; then - NODES_TO_START="$ALL_NODES" - else - shift - NODES_TO_START="$*" - fi - for i in $NODES_TO_START - do - warn_unknown_node "$i" || continue - if [ ! -d "$(get_home_dir "$i")" ]; then - create "$i" || continue - fi - if get_auto_maintain_config "$i"; then - configure "$i" - fi - start "$i" - done - ;; - status) - parse_config_file - status - ;; - stop) - parse_config_file - if [ $# -eq 1 ]; then - NODES_TO_STOP="$ALL_NODES" - else - shift - NODES_TO_STOP="$*" - fi - for i in $NODES_TO_STOP - do - warn_unknown_node "$i" || continue - stop "$i" - done - ;; - reset) - parse_config_file - if [ $# -eq 1 ]; then - NODES_TO_STOP="$ALL_NODES" - else - shift - NODES_TO_STOP="$*" - fi - for i in $NODES_TO_STOP - do - warn_unknown_node "$i" || continue - reset "$i" - done - ;; - rm) - parse_config_file - shift - if [ "$#" -eq 0 ]; then - exit_with_error "no node was given" - fi - while [ "$#" -gt 0 ]; - do - if is_running "$1"; then - stop "$1" - sleep 1 - fi - safer_rm "$1" - shift - done - ;; - version) - version - exit 0 - ;; - *) - disable_when_json - usage - exit_with_error "could not understand the command '$1'" - ;; -esac diff --git a/scripts/gm/bin/lib-gm b/scripts/gm/bin/lib-gm deleted file mode 100644 index cb4e8877d4..0000000000 --- a/scripts/gm/bin/lib-gm +++ /dev/null @@ -1,1115 +0,0 @@ -#!/usr/bin/env sh -set -eu -if [ "${DEBUG:-}" = "2" ]; then - set -x -fi - -version() { - VERSION="v0.1.3" - if is_json_output; then - echo '{"status": "success", "message": "'"${VERSION}"'"}' - else - echo "$VERSION" - fi -} - -# Configuration Management -### - -# Get the operating system in lowercase letters. Works for linux and darwin. -get_operating_system() { - # uname is optional, we will assume linux if it's missing in some streamlined docker images. - OP_SYS="$(uname -s || echo)" - case "${OP_SYS}" in - "Linux" ) - echo linux - ;; - "Darwin" ) - echo darwin - ;; - * ) - echo linux - ;; - esac -} - -set_config_defaults() { - OP_SYS="$(get_operating_system)" - GLOBAL_GAIAD_BINARY="$(which gaiad || echo "https://github.com/cosmos/gaia/releases/download/v5.0.6/gaiad-v5.0.6-${OP_SYS}-amd64")" - #GLOBAL_PORTS_START_AT is deliberately not exported because it is updated sometimes during execution - DEFAULT_PORTS_START_AT=27000 - GLOBAL_HOME_DIR="${HOME}/.gm" - GLOBAL_AUTO_MAINTAIN_CONFIG="true" - GLOBAL_ADD_TO_HERMES="false" - GLOBAL_VALIDATOR_MNEMONIC="" - GLOBAL_WALLET_MNEMONIC="" - GLOBAL_EXTRA_WALLETS=0 - GLOBAL_HDPATH="" - GLOBAL_HERMES_BINARY="$(which hermes || echo "./hermes")" - GLOBAL_HERMES_CONFIG="${HOME}/.hermes/config.toml" - GLOBAL_HERMES_LOG_LEVEL="info" - GLOBAL_HERMES_TELEMETRY_ENABLED="true" - GLOBAL_HERMES_TELEMETRY_HOST="127.0.0.1" - GLOBAL_HERMES_TELEMETRY_PORT="3001" -} - -parse_config_file() { - set_config_defaults - find_config_file - if [ ! -f "$CONFIG_FILE" ]; then - write_default_config_file - fi - # File integrity check - stoml -s "$CONFIG_FILE" global 1> /dev/null || exit_with_error invalid config file. Make sure all strings are quoted and the global section exits - # Note: - # Shellcheck SC2155: Declare and assign separately to avoid masking return values. - # This is irrelevant here, because the return code is taken into account. - # shellcheck disable=SC2155 - export GLOBAL_GAIAD_BINARY="$(eval echo "$(stoml -sq "$CONFIG_FILE" global.gaiad_binary || echo "$GLOBAL_GAIAD_BINARY")")" - # shellcheck disable=SC2155 - export GLOBAL_HOME_DIR="$(eval echo "$(stoml -sq "$CONFIG_FILE" global.home_dir || echo "$GLOBAL_HOME_DIR")")" - # shellcheck disable=SC2155 - export GLOBAL_AUTO_MAINTAIN_CONFIG="$(stoml -sq "$CONFIG_FILE" global.auto_maintain_config || echo "$GLOBAL_AUTO_MAINTAIN_CONFIG")" - # shellcheck disable=SC2155 - export GLOBAL_ADD_TO_HERMES="$(stoml -sq "$CONFIG_FILE" global.add_to_hermes || echo "$GLOBAL_ADD_TO_HERMES")" - # shellcheck disable=SC2155 - export GLOBAL_VALIDATOR_MNEMONIC="$(stoml -sq "$CONFIG_FILE" global.validator_mnemonic || echo "$GLOBAL_VALIDATOR_MNEMONIC")" - # shellcheck disable=SC2155 - export GLOBAL_WALLET_MNEMONIC="$(stoml -sq "$CONFIG_FILE" global.wallet_mnemonic || echo "$GLOBAL_WALLET_MNEMONIC")" - # shellcheck disable=SC2155 - export GLOBAL_EXTRA_WALLETS="$(stoml -sq "$CONFIG_FILE" global.extra_wallets || echo "$GLOBAL_EXTRA_WALLETS")" - # shellcheck disable=SC2155 - export GLOBAL_HDPATH="$(stoml -sq "$CONFIG_FILE" global.hdpath || echo "$GLOBAL_HDPATH")" - # shellcheck disable=SC2155 - export GLOBAL_HERMES_BINARY="$(eval echo "$(stoml -sq "$CONFIG_FILE" global.hermes.binary || echo "$GLOBAL_HERMES_BINARY")")" - # shellcheck disable=SC2155 - export GLOBAL_HERMES_CONFIG="$(eval echo "$(stoml -sq "$CONFIG_FILE" global.hermes.config || echo "$GLOBAL_HERMES_CONFIG")")" - # shellcheck disable=SC2155 - export GLOBAL_HERMES_LOG_LEVEL="$(stoml -sq "$CONFIG_FILE" global.hermes.log_level || echo "$GLOBAL_HERMES_LOG_LEVEL")" - # shellcheck disable=SC2155 - export GLOBAL_HERMES_TELEMETRY_ENABLED="$(stoml -sq "$CONFIG_FILE" global.hermes.telemetry_enabled || echo "$GLOBAL_HERMES_TELEMETRY_ENABLED")" - # shellcheck disable=SC2155 - export GLOBAL_HERMES_TELEMETRY_HOST="$(stoml -sq "$CONFIG_FILE" global.hermes.telemetry_host || echo "$GLOBAL_HERMES_TELEMETRY_HOST")" - # shellcheck disable=SC2155 - export GLOBAL_HERMES_TELEMETRY_PORT="$(stoml -sq "$CONFIG_FILE" global.hermes.telemetry_port || echo "$GLOBAL_HERMES_TELEMETRY_PORT")" - - RAW_SECTIONS="$(load_all_sections || echo "")" - VALIDATORS="" - RAW_NODES="" - ALL_HERMES_NODES="" - for i in $RAW_SECTIONS - do - if [ "$i" = "global" ] || [ "$i" = "global.hermes" ]; then - continue - fi - if [ -z "$(stoml "$CONFIG_FILE" "${i}.network")" ]; then - VALIDATORS="$VALIDATORS $i" - if get_add_to_hermes "$i"; then - ALL_HERMES_NODES="$ALL_HERMES_NODES $i" - fi - else - RAW_NODES="$RAW_NODES $i" - fi - done - FULL_NODES="" - for i in $RAW_NODES - do - NODE_NETWORK="$(stoml "$CONFIG_FILE" "${i}.network")" - if ! a_in_b "${NODE_NETWORK}" "$VALIDATORS"; then - warn "invalid full node: $i, invalid network entry: ${NODE_NETWORK}, skipping..." - continue - fi - FULL_NODES="${FULL_NODES} $i" - if get_add_to_hermes "$i"; then - ADD="yes" - for j in $ALL_HERMES_NODES - do - if a_in_b "$j" "$VALIDATORS"; then - if [ "$NODE_NETWORK" = "$j" ]; then - warn "$i is a node on network $NODE_NETWORK and there is already a node for that network in the hermes config, skipping..." - ADD="no" - break - fi - else - J_NODE_NETWORK="$(get_network "$j")" - if [ "$NODE_NETWORK" = "$J_NODE_NETWORK" ]; then - warn "$i is a node on network $NODE_NETWORK and there is already a node for that network in the hermes config, skipping..." - ADD="no" - break - fi - fi - done - if [ "$ADD" = "yes" ]; then - ALL_HERMES_NODES="$ALL_HERMES_NODES $i" - fi - fi - done - VALIDATORS="${VALIDATORS## }" - FULL_NODES="${FULL_NODES## }" - ALL_HERMES_NODES="${ALL_HERMES_NODES## }" - export VALIDATORS - export FULL_NODES - export ALL_HERMES_NODES - export ALL_NODES="$VALIDATORS $FULL_NODES" - # Workaround for issue: https://github.com/spf13/viper/issues/1131 - # Fix "stoml" so it reads empty sections too. - for v in $VALIDATORS - do - # This will fill in the ports_start_at entry in all sections so empty sections have at least one entry. - get_ports_start_at "$v" 1> /dev/null - done - # End of workaround -} - -write_default_config_file() { - set +e - cat < "$CONFIG_FILE" -[global] -gaiad_binary="$GLOBAL_GAIAD_BINARY" - -[global.hermes] -binary="$GLOBAL_HERMES_BINARY" - -EOF - set -e -} - -find_config_file() { - if [ -n "${GM_TOML:-}" ] && [ -f "${GM_TOML:-}" ]; then - # shellcheck disable=SC2155 - export CONFIG_DIR="$(dirname "${GM_TOML}")" - export CONFIG_FILE="${GM_TOML}" - else - if [ -f "${SCRIPT_DIR}gm.toml" ]; then - export CONFIG_DIR="${SCRIPT_DIR}" - elif [ -f "${HOME}/.gm/gm.toml" ]; then - export CONFIG_DIR="${HOME}/.gm/" - elif [ -d "${HOME}/.gm" ]; then - export CONFIG_DIR="${HOME}/.gm/" - else - export CONFIG_DIR="${SCRIPT_DIR}" - fi - export CONFIG_FILE="${CONFIG_DIR}gm.toml" - fi -} - -# End Configuration Management -### - -install() { - mkdir -p "${HOME}/.gm/bin" - cp "$0" "${HOME}/.gm/bin/gm" - chmod 755 "${HOME}/.gm/bin/gm" - cp "${0%%gm}lib-gm" "${HOME}/.gm/bin/lib-gm" - chmod 644 "${HOME}/.gm/bin/lib-gm" - cp "${0%%gm}shell-support" "${HOME}/.gm/bin/shell-support" - chmod 644 "${HOME}/.gm/bin/shell-support" - CONFIG_FILE="${HOME}/.gm/gm.toml" - if [ -f "$CONFIG_FILE" ]; then - warn "Config file already exists. Please double-check the documentation to see if all necessary configuration items are set." - else - set_config_defaults - write_default_config_file - fi - if [ -z "$(which stoml)" ]; then - warn "missing mandatory stoml, install it from https://github.com/freshautomations/stoml/releases" - fi - if [ -z "$(which sconfig)" ]; then - warn "missing mandatory sconfig, install it from https://github.com/freshautomations/sconfig/releases" - fi - OS="$(uname -s)" - if [ "$OS" = "Darwin" ]; then - if [ ! -d /usr/local/etc/bash_completion.d ]; then - warn "run \"brew install bash-completion\" to install optional bash completion" - fi - else - if [ ! -d /etc/bash_completion.d ]; then - warn "run \"apt-get install bash-completion || yum install bash-completion\" to install optional bash completion" - fi - fi - echo "Please add \"source $HOME/.gm/bin/shell-support\" to your .profile, .bash_profile or other startup script and restart your shell." -} - -enforce_requirements() { - if [ -z "$(which sconfig || echo)" ]; then - exit_with_error "missing sconfig, install it from https://github.com/freshautomations/sconfig/releases" - fi - SED="$(which sed || echo)" - if [ -z "$SED" ]; then - exit_with_error "missing sed, please install it" - fi - if [ -z "$(which tr || echo)" ]; then - exit_with_error "missing tr, please install it" - fi - if [ -z "$(which basename || echo)" ]; then - exit_with_error "missing basename, please install it" - fi - if [ -z "$(which dirname || echo)" ]; then - exit_with_error "missing dirname, please install it" - fi - STOML="$(which stoml || echo)" - if [ -z "$STOML" ]; then - exit_with_error "missing stoml, install it from https://github.com/freshautomations/stoml/releases" - fi - STOML_VERSION="$("$STOML" --version | "$SED" 's/^stoml version //')" - MAJOR="$(echo "${STOML_VERSION}" | "$SED" "s/^\([0-9]*\)\.\([0-9]*\)\.\([0-9]*\)$/\1/")" - MINOR="$(echo "${STOML_VERSION}" | "$SED" "s/^\([0-9]*\)\.\([0-9]*\)\.\([0-9]*\)$/\2/")" - #PATCH="$(echo "${STOML_VERSION}" | "$SED" "s/^\([0-9]*\)\.\([0-9]*\)\.\([0-9]*\)$/\3/")" - if [ $((MAJOR)) -eq 0 ] && [ $((MINOR)) -lt 7 ]; then - exit_with_error "stoml too old, install 0.7.0 or newer from https://github.com/freshautomations/stoml/releases" - fi -} - -is_json_output() { - test "${OUTPUT:-}" = "json" -} - -disable_when_json() { - if is_json_output; then - exit_with_error "not implemented" - exit 0 - fi -} - -info() { - if ! is_json_output; then - echo "$*" - fi -} - -debug() { - if [ -n "${DEBUG:-}" ]; then - echo "DEBUG: $*" 1>&2 - fi -} - -warn() { - if ! is_json_output; then - echo "WARNING: $*" - fi -} - -warn_unknown_node() { - if ! is_json_output; then - if ! a_in_b "$1" "$ALL_NODES"; then - warn "unknown node $1, skipping..." - return 1 - fi - fi -} - -warn_unknown_hermes_node() { - if ! a_in_b "$1" "$ALL_HERMES_NODES"; then - warn "not a hermes node $1, skipping..." - return 1 - fi -} - -exit_with_error() { - if is_json_output; then - echo '{"status": "error", "message": "'"$*"'"}' - else - echo "ERROR: $*, exiting..." - return 1 - fi -} - -# Functions -### - -# Is string A in space-separated list B? -a_in_b() { - test -n "$(echo "$2" | grep '\(^\| \+\)'"${1}"'\( \+\|$\)')" -} - -# Is string A in comma-separated list B? -a_in_b_comma_separated() { - test -n "$(echo "$2" | grep '\(^\|,\) *'"${1}"' *\(,\|$\)')" -} - -# Return the n-th value from a comma-separated list. -# Starts with 1. -n_from_a() { - i=1 - N=$(($1)) - shift - LIST="$*" - for j in $LIST - do - if [ $((N-i)) -eq 0 ]; then - echo "$j" - return 0 - fi - i=$((i+1)) - done - return 1 -} - -# End Functions -### - -# Function to work around https://github.com/spf13/viper/issues/1131 -# This function can be replaced with `stoml -q "$CONFIG_FILE" .` after the issue is fixed. -load_all_sections() { - grep '^ *\[.\+\] *$' "$CONFIG_FILE" | sed 's/^ *\[\([^]]*\)\] *$/\1/' -} - -get_url_id() { - SUMMARIZER="$(which md5sum || which shasum || which sha256sum)" - if [ -z "${SUMMARIZER}" ]; then - echo "${1}" | tr -d ":/?=.&" - else - SUMMARY="$(echo "${1}" | "${SUMMARIZER}")" - SUMMARY="${SUMMARY%% -}" - echo "${SUMMARY}" - fi -} - -eval_binary() { - RAW_BINARY="$(eval echo "${1}")" - if [ "${RAW_BINARY##http://}" != "$RAW_BINARY" ] || [ "${RAW_BINARY##https://}" != "$RAW_BINARY" ] || [ "${RAW_BINARY##ftp://}" != "$RAW_BINARY" ]; then - FILE_URL_PATH="$(dirname "${RAW_BINARY}")" - FILE_URL_PATH_ID="$(get_url_id "${FILE_URL_PATH}")" - if [ ! -d "${CONFIG_DIR}${FILE_URL_PATH_ID}" ]; then - mkdir "${CONFIG_DIR}${FILE_URL_PATH_ID}" || exit_with_error "could not create temporary folder ${CONFIG_DIR}${FILE_URL_PATH_ID} to download binary ${RAW_BINARY}" - fi - FILE_NAME="$(basename "${RAW_BINARY}" | tr -d "?=&")" - BINARY="${CONFIG_DIR}${FILE_URL_PATH_ID}/${FILE_NAME}" - if [ -x "${BINARY}" ]; then - echo "${BINARY}" - else - DOWNLOADER="$(which wget)" - if [ -n "${DOWNLOADER}" ]; then - debug "downloading ${RAW_BINARY} using wget..." - # shellcheck disable=SC2086 - "${DOWNLOADER}" -q -nd ${WGET_DOWNLOAD_OPTIONS:-} "${RAW_BINARY}" -O "${BINARY}" - chmod +x "${BINARY}" - echo "${BINARY}" - else - DOWNLOADER="$(which curl)" - if [ -n "${DOWNLOADER}" ]; then - debug "downloading ${RAW_BINARY} using curl..." - # shellcheck disable=SC2086 - "${DOWNLOADER}" -s -f -L ${CURL_DOWNLOAD_OPTIONS:-} "${RAW_BINARY}" -o "${BINARY}" - chmod +x "${BINARY}" - echo "${BINARY}" - else - exit_with_error "found URL ${RAW_BINARY} but no downloader wget or curl" - fi - fi - fi - else - if [ -x "${RAW_BINARY}" ]; then - echo "${RAW_BINARY}" - else - exit_with_error "binary \"${RAW_BINARY}\" not found or not executable, check your gm.toml config" - fi - fi -} - -get_gaiad_binary() { - RESULT="$(stoml -q "$CONFIG_FILE" "${1}.gaiad_binary")" - if [ -z "$RESULT" ]; then - eval_binary "$GLOBAL_GAIAD_BINARY" - else - eval_binary "$RESULT" - fi -} - -get_hermes_binary() { - eval_binary "$GLOBAL_HERMES_BINARY" -} - -get_ports_start_at() { - RESULT="$(stoml -q "$CONFIG_FILE" "${1}.ports_start_at")" - if [ -z "$RESULT" ]; then - THIS_PORTS_START_AT="$(stoml -sq "$CONFIG_FILE" global.ports_start_at || echo "$DEFAULT_PORTS_START_AT")" - sconfig "$CONFIG_FILE" -t int "global.ports_start_at=$((THIS_PORTS_START_AT+10))" 1> /dev/null - sconfig "$CONFIG_FILE" -t int "${1}.ports_start_at=$THIS_PORTS_START_AT" 1> /dev/null - echo "$THIS_PORTS_START_AT" - else - echo "$RESULT" - fi -} - -get_rpc_port() { - get_ports_start_at "$1" -} - -get_app_port() { - echo "$(($(get_ports_start_at "$1")+1))" -} - -get_grpc_port() { - echo "$(($(get_ports_start_at "$1")+2))" -} - -get_p2p_port() { - echo "$(($(get_ports_start_at "$1")+3))" -} - -get_pprof_port() { - echo "$(($(get_ports_start_at "$1")+4))" -} - -get_grpcw_port() { - echo "$(($(get_ports_start_at "$1")+5))" -} - -get_home_dir() { - RESULT="$(stoml -q "$CONFIG_FILE" "${1}.home_dir")" - if [ -z "$RESULT" ]; then - echo "$GLOBAL_HOME_DIR/$1" - else - eval echo "$RESULT" - fi -} - -get_auto_maintain_config() { - RESULT="$(stoml -q "$CONFIG_FILE" "${1}.auto_maintain_config")" - if [ -z "$RESULT" ]; then - test "$GLOBAL_AUTO_MAINTAIN_CONFIG" = "true" - else - test "$RESULT" = "true" - fi -} - -get_network() { - RESULT="$(stoml -q "$CONFIG_FILE" "${1}.network")" - if [ -z "$RESULT" ]; then - exit_with_error "network not found for node ${1}" - else - if ! a_in_b "$RESULT" "$VALIDATORS"; then - return 1 - fi - fi - echo "$RESULT" -} - -# Note: this depends on one-validator nodes. -# We might want to change it to `stoml -q "${HOME_DIR}/config/genesis.json" "chain_id"` later. -get_chain_id() { - if a_in_b "$1" "$VALIDATORS"; then - echo "$1" - else - get_network "$1" - fi -} - -get_add_to_hermes() { - RESULT="$(stoml -q "$CONFIG_FILE" "${1}.add_to_hermes")" - if [ -z "$RESULT" ]; then - test "$GLOBAL_ADD_TO_HERMES" = "true" - else - test "$RESULT" = "true" - fi -} - -get_node_id() { - GAIAD_BINARY="$(get_gaiad_binary "$1")" - HOME_DIR="$(get_home_dir "$1")" - if [ ! -f "${HOME_DIR}/config/node_key.json" ]; then - warn "Invalid configuration: no node key found for $1" - echo "DEADBEEFDEADBEEF" - else - "$GAIAD_BINARY" tendermint show-node-id --home "$HOME_DIR" - fi -} - -get_validator_mnemonic() { - RESULT="$(stoml -q "$CONFIG_FILE" "${1}.validator_mnemonic")" - if [ -z "$RESULT" ]; then - echo "$GLOBAL_VALIDATOR_MNEMONIC" - else - echo "$RESULT" - fi -} - -get_wallet_mnemonic() { - RESULT="$(stoml -q "$CONFIG_FILE" "${1}.wallet_mnemonic")" - if [ -z "$RESULT" ]; then - echo "$GLOBAL_WALLET_MNEMONIC" - else - echo "$RESULT" - fi -} - -get_extra_wallets() { - RESULT="$(stoml -q "$CONFIG_FILE" "${1}.extra_wallets")" - if [ -z "$RESULT" ]; then - echo "$GLOBAL_EXTRA_WALLETS" - else - echo "$RESULT" - fi -} - -get_hdpath() { - RESULT="$(stoml -q "$CONFIG_FILE" "${1}.hdpath")" - if [ -z "$RESULT" ]; then - echo "$GLOBAL_HDPATH" - else - echo "$RESULT" - fi -} - -get_staking_denom() { - NETWORK="$(get_chain_id "$1")" - NETWORK_HOME_DIR="$(get_home_dir "$NETWORK")" - stoml "${NETWORK_HOME_DIR}/config/genesis.json" app_state.staking.params.bond_denom -} - -get_wallet_account_prefix() { - NETWORK="$(get_chain_id "$1")" - NETWORK_HOME_DIR="$(get_home_dir "$NETWORK")" - WALLET_ADDRESS="$(stoml "${NETWORK_HOME_DIR}/wallet_seed.json" address)" - IFS=1 read -r account_prefix _ <&1)" - echo "$EXEC_RESULT" > "${HOME_DIR}/init.json" - if [ "$(stoml "${HOME_DIR}/init.json" moniker)" != "$1" ]; then - warn "could not create config for ${1}: \"$EXEC_RESULT\", skipping..." - return 1 - fi - if a_in_b "$1" "$VALIDATORS"; then - HDPATH="$(get_hdpath "$1")" - # Create validator key - VALIDATOR_MNEMONIC="$(get_validator_mnemonic "$1")" - if [ -z "$VALIDATOR_MNEMONIC" ] && [ -z "$HDPATH" ]; then - "$GAIAD_BINARY" keys add "validator" --keyring-backend test --keyring-dir "${HOME_DIR}" --output json 1> "${HOME_DIR}/validator_seed.json" 2>&1 - elif [ -z "$VALIDATOR_MNEMONIC" ] && [ -n "$HDPATH" ]; then - "$GAIAD_BINARY" keys add "validator" --hd-path "$HDPATH" --keyring-backend test --keyring-dir "${HOME_DIR}" --output json 1> "${HOME_DIR}/validator_seed.json" 2>&1 - elif [ -n "$VALIDATOR_MNEMONIC" ] && [ -z "$HDPATH" ]; then - echo "$VALIDATOR_MNEMONIC" | "$GAIAD_BINARY" keys add "validator" --recover --keyring-backend test --keyring-dir "${HOME_DIR}" --output json 1> "${HOME_DIR}/validator_seed.json" 2>&1 - sconfig "${HOME_DIR}/validator_seed.json" -t string "mnemonic=${VALIDATOR_MNEMONIC}" 1> /dev/null - elif [ -n "$VALIDATOR_MNEMONIC" ] && [ -n "$HDPATH" ]; then - echo "$VALIDATOR_MNEMONIC" | "$GAIAD_BINARY" keys add "validator" --hd-path "$HDPATH" --recover --keyring-backend test --keyring-dir "${HOME_DIR}" --output json 1> "${HOME_DIR}/validator_seed.json" 2>&1 - sconfig "${HOME_DIR}/validator_seed.json" -t string "mnemonic=${VALIDATOR_MNEMONIC}" 1> /dev/null - fi - # Create wallet key - WALLET_MNEMONIC="$(get_wallet_mnemonic "$1")" - if [ -z "$WALLET_MNEMONIC" ] && [ -z "$HDPATH" ]; then - "$GAIAD_BINARY" keys add "wallet" --keyring-backend test --keyring-dir "${HOME_DIR}" --output json 1> "${HOME_DIR}/wallet_seed.json" 2>&1 - elif [ -z "$WALLET_MNEMONIC" ] && [ -n "$HDPATH" ]; then - "$GAIAD_BINARY" keys add "wallet" --hd-path "$HDPATH" --keyring-backend test --keyring-dir "${HOME_DIR}" --output json 1> "${HOME_DIR}/wallet_seed.json" 2>&1 - elif [ -n "$WALLET_MNEMONIC" ] && [ -z "$HDPATH" ]; then - echo "$WALLET_MNEMONIC" | "$GAIAD_BINARY" keys add "wallet" --recover --keyring-backend test --keyring-dir "${HOME_DIR}" --output json 1> "${HOME_DIR}/wallet_seed.json" 2>&1 - sconfig "${HOME_DIR}/wallet_seed.json" -t string "mnemonic=${WALLET_MNEMONIC}" 1> /dev/null - elif [ -n "$WALLET_MNEMONIC" ] && [ -n "$HDPATH" ]; then - echo "$WALLET_MNEMONIC" | "$GAIAD_BINARY" keys add "wallet" --hd-path "$HDPATH" --recover --keyring-backend test --keyring-dir "${HOME_DIR}" --output json 1> "${HOME_DIR}/wallet_seed.json" 2>&1 - sconfig "${HOME_DIR}/wallet_seed.json" -t string "mnemonic=${WALLET_MNEMONIC}" 1> /dev/null - fi - # Create extra wallets - EXTRA_WALLETS_COUNTER="$(get_extra_wallets "$1")" - while [ "$EXTRA_WALLETS_COUNTER" -gt 0 ]; - do - if [ -z "$HDPATH" ]; then - "$GAIAD_BINARY" keys add "wallet${EXTRA_WALLETS_COUNTER}" --keyring-backend test --keyring-dir "${HOME_DIR}" --output json 1> "${HOME_DIR}/wallet${EXTRA_WALLETS_COUNTER}_seed.json" 2>&1 - else #if [ -n "$HDPATH" ]; then - "$GAIAD_BINARY" keys add "wallet${EXTRA_WALLETS_COUNTER}" --hd-path "$HDPATH" --keyring-backend test --keyring-dir "${HOME_DIR}" --output json 1> "${HOME_DIR}/wallet_seed${EXTRA_WALLETS_COUNTER}.json" 2>&1 - fi - EXTRA_WALLETS_COUNTER="$((EXTRA_WALLETS_COUNTER - 1))" - done - # Add accounts to genesis - DENOM="$(get_staking_denom "$1")" - "$GAIAD_BINARY" add-genesis-account validator "100000000${DENOM},100000000samoleans" --keyring-backend test --home "${HOME_DIR}" - "$GAIAD_BINARY" add-genesis-account wallet "100000000${DENOM},100000000samoleans" --keyring-backend test --home "${HOME_DIR}" - EXTRA_WALLETS_COUNTER="$(get_extra_wallets "$1")" - while [ "$EXTRA_WALLETS_COUNTER" -gt 0 ]; - do - "$GAIAD_BINARY" add-genesis-account "wallet${EXTRA_WALLETS_COUNTER}" "100000000${DENOM},100000000samoleans" --keyring-backend test --home "${HOME_DIR}" - EXTRA_WALLETS_COUNTER="$((EXTRA_WALLETS_COUNTER - 1))" - done - # Create gentx - "$GAIAD_BINARY" gentx validator "10000000${DENOM}" --keyring-backend test --keyring-dir "${HOME_DIR}" --home "${HOME_DIR}" --chain-id "$1" 2> /dev/null - # Collect gentxs - "$GAIAD_BINARY" collect-gentxs --home "${HOME_DIR}" 2> /dev/null - # Validate genesis - "$GAIAD_BINARY" validate-genesis --home "${HOME_DIR}" > /dev/null - else - NETWORK_HOME_DIR="$(get_home_dir "$NETWORK")" - if [ -f "$NETWORK_HOME_DIR/config/genesis.json" ]; then - cp "$NETWORK_HOME_DIR/config/genesis.json" "$HOME_DIR/config/genesis.json" - else - warn "${NETWORK} does not have a valid genesis.json yet" - rm -f "${HOME_DIR}/config/genesis.json" - fi - fi - configure "$1" - sconfig "$HOME_DIR/config/config.toml" p2p.addr_book_strict=false 1> /dev/null - sconfig "$HOME_DIR/config/config.toml" p2p.allow_duplicate_ip=true 1> /dev/null -} - -configure() { - HOME_DIR="$(get_home_dir "$1")" - P2P="$(get_p2p_port "$1")" - RPC="$(get_rpc_port "$1")" - APP="$(get_app_port "$1")" - GRPC="$(get_grpc_port "$1")" - PPROF="$(get_pprof_port "$1")" - GRPCW="$(get_grpcw_port "$1")" - sconfig "$HOME_DIR/config/config.toml" "p2p.laddr=tcp://0.0.0.0:${P2P}" 1> /dev/null - sconfig "$HOME_DIR/config/config.toml" "rpc.laddr=tcp://0.0.0.0:${RPC}" 1> /dev/null - sconfig "$HOME_DIR/config/config.toml" "rpc.pprof_laddr=0.0.0.0:${PPROF}" 1> /dev/null - # Workaround for https://github.com/spf13/viper/issues/1132 - # Viper does not support writing array of arrays so we clean out the variable - sconfig "$HOME_DIR/config/app.toml" -t stringSlice "telemetry.global-labels=panic gaiad" 1> /dev/null - sed -i.bak 's/ global-labels = \["panic","gaiad"\]/ global-labels = []/' "$HOME_DIR/config/app.toml" - # End of workaround - sconfig "$HOME_DIR/config/app.toml" "api.address=tcp://0.0.0.0:${APP}" 1> /dev/null - sconfig "$HOME_DIR/config/app.toml" "api.enable=true" 1> /dev/null - sconfig "$HOME_DIR/config/app.toml" "api.swagger=true" 1> /dev/null - sconfig "$HOME_DIR/config/app.toml" "grpc.address=0.0.0.0:${GRPC}" 1> /dev/null - sconfig "$HOME_DIR/config/app.toml" "grpc-web.address=0.0.0.0:${GRPCW}" 1> /dev/null - if ! a_in_b "$i" "$VALIDATORS"; then - NETWORK="$(get_network "$1")" - NETWORK_HOME_DIR="$(get_home_dir "$NETWORK")" - if [ ! -f "$NETWORK_HOME_DIR/config/genesis.json" ]; then - warn "${NETWORK} does not have a genesis.json. Start ${NETWORK} first." - return 1 - fi - cp "$NETWORK_HOME_DIR/config/genesis.json" "$HOME_DIR/config/genesis.json" - NETWORK_NODE="$(get_node_id "$NETWORK")@localhost:$(get_p2p_port "$NETWORK")" - sconfig "$HOME_DIR/config/config.toml" "p2p.persistent_peers=$NETWORK_NODE" 1> /dev/null - if get_auto_maintain_config "$NETWORK"; then - EXISTING_PEERS="$(stoml -q "$NETWORK_HOME_DIR/config/config.toml" "p2p.unconditional_peer_ids")" - NODE_ID="$(get_node_id "$1")" - if ! a_in_b_comma_separated "$NODE_ID" "$EXISTING_PEERS"; then - if [ -z "$EXISTING_PEERS" ]; then - EXISTING_PEERS="$NODE_ID" - else - EXISTING_PEERS="$EXISTING_PEERS,$NODE_ID" - fi - sconfig "$NETWORK_HOME_DIR/config/config.toml" "p2p.unconditional_peer_ids=$EXISTING_PEERS" 1> /dev/null - fi - fi - fi -} - -is_running() { - HOME_DIR="$(get_home_dir "$1")" - GAIAD_PID_FILE="${HOME_DIR}/pid" - if [ -f "$GAIAD_PID_FILE" ]; then - GAIAD_PID="$(cat "$GAIAD_PID_FILE")" - if [ -n "$GAIAD_PID" ]; then - test "$(ps -p "$GAIAD_PID" -o pid | wc -l)" -eq 2 - else - return 1 - fi - else - return 1 - fi -} - -start() { - if is_running "$1"; then - warn "$1 is already running, skipping" - else - GAIAD_BINARY="$(get_gaiad_binary "$1")" - HOME_DIR="$(get_home_dir "$1")" - GAIAD_LOG="${HOME_DIR}/log" - VALIDATION="$("$GAIAD_BINARY" validate-genesis --home "$HOME_DIR" > "$GAIAD_LOG" 2>&1 || echo "ERR")" - if [ "$VALIDATION" = "ERR" ]; then - warn "invalid genesis.json for ${1}. Please check the log and fix manually. Skipping..." - return 0 - fi - nohup "$GAIAD_BINARY" start --home "$HOME_DIR" > "$GAIAD_LOG" 2>&1 & - GAIAD_PID=$! - echo "$GAIAD_PID" > "$HOME_DIR/pid" - info "$i started, PID: $GAIAD_PID, LOG: $GAIAD_LOG" - sleep 1 - if ! is_running "$1"; then - warn "$1 failed. Check the logs with \`gm log $1\`." - fi - fi -} - -stop() { - if is_running "$1"; then - HOME_DIR="$(get_home_dir "$1")" - GAIAD_PID="$(cat "${HOME_DIR}/pid")" - info "Stopping $1 with PID $GAIAD_PID..." - kill -TERM "$GAIAD_PID" 2> /dev/null && rm -f "$HOME_DIR/pid" - RESULT="$?" - if [ "$RESULT" != "0" ]; then - warn "SIGTERM failed on PID ${GAIAD_PID} for $1. Trying KILL..." - kill -KILL "$GAIAD_PID" && rm -f "$HOME_DIR/pid" - RESULT="$?" - if [ "$RESULT" != "0" ]; then - warn "SIGKILL failed on PID ${GAIAD_PID} for $1. Giving up..." - fi - fi - else - HOME_DIR="$(get_home_dir "$1")" - if [ -f "${HOME_DIR}/pid" ]; then - GAIAD_PID="$(cat "${HOME_DIR}/pid")" - if [ -n "$GAIAD_PID" ]; then - warn "no process with PID $GAIAD_PID found for $1. Removing stale data." - else - warn "no process ID found for $1. Removing stale data." - fi - rm -f "$HOME_DIR/pid" - fi - fi -} - -print_header_line() { - if is_json_output; then - echo "{" - echo ' "status": "success",' - echo ' "message": [' - JSON_COMMA=0 - else - echo "NODE PID RPC APP GRPC HOME_DIR" - fi -} - -print_footer_line() { - if is_json_output; then - echo " ]" - echo "}" - fi -} - -print_status_line() { - if is_json_output; then - HOME_DIR="$(get_home_dir "$1")" - GAIAD_PID_FILE="${HOME_DIR}/pid" - GAIAD_PID="" - if [ -f "$GAIAD_PID_FILE" ]; then - GAIAD_PID="$(cat "$GAIAD_PID_FILE")" - fi - if [ "$JSON_COMMA" = "0" ]; then - JSON_COMMA=1 - else - echo " ," - fi - CHAIN_ID="$1" - if [ "${2:-}" = " " ]; then - CHAIN_ID="$(get_network "$1")" - fi - if [ "${GAIAD_PID}" = "" ]; then - cat << EOF - { - "name": "$1", - "chain-id": "${CHAIN_ID}", - "config-dir": "${HOME_DIR}" - } -EOF - else - P2P="$(get_p2p_port "$1")" - RPC="$(get_rpc_port "$1")" - APP="$(get_app_port "$1")" - GRPC="$(get_grpc_port "$1")" - PPROF="$(get_pprof_port "$1")" - GRPCW="$(get_grpcw_port "$1")" - cat << EOF - { - "name": "$1", - "chain-id": "${CHAIN_ID}", - "pid": ${GAIAD_PID}, - "config-dir": "${HOME_DIR}", - "ports": { - "rpc": ${RPC}, - "app": ${APP}, - "grpc": ${GRPC}, - "p2p": ${P2P}, - "pprof": ${PPROF}, - "grpc-web": ${GRPC} - } - } -EOF - fi - else - NAME="${2:-}$1" - NAME_LENGTH="${#NAME}" - NAME_PAD="" - if [ "$NAME_LENGTH" -lt 15 ]; then - for _ in $(seq "$NAME_LENGTH" 15); - do - NAME_PAD="$NAME_PAD " - done - fi - HOME_DIR="$(get_home_dir "$1")" - HOME_DIR_PRINTED=" $HOME_DIR " - if [ ! -d "$HOME_DIR" ]; then - HOME_DIR_PRINTED="(${HOME_DIR})" - fi - GAIAD_PID_FILE="${HOME_DIR}/pid" - if [ -f "$GAIAD_PID_FILE" ]; then - GAIAD_PID="$(cat "$GAIAD_PID_FILE")" - if [ -z "$GAIAD_PID" ]; then - GAIAD_PID=" N/A " - fi - if [ "$(ps -p "$GAIAD_PID" -o pid | wc -l)" -eq 2 ]; then - echo "${NAME}${NAME_PAD} $GAIAD_PID $(get_rpc_port "$1") $(get_app_port "$1") $(get_grpc_port "$1") $HOME_DIR_PRINTED" - else - echo "${NAME}${NAME_PAD}($GAIAD_PID) - - - $HOME_DIR_PRINTED" - fi - else - echo "${NAME}${NAME_PAD} - - - - $HOME_DIR_PRINTED" - fi - fi -} - -status() { - print_header_line - for i in $VALIDATORS - do - print_status_line "$i" - for j in $FULL_NODES - do - NETWORK="$(get_network "$j")" - if [ "$i" = "$NETWORK" ]; then - print_status_line "$j" " " - else - continue - fi - done - done - print_footer_line -} - -ports() { - P2P="$(get_p2p_port "$1")" - RPC="$(get_rpc_port "$1")" - APP="$(get_app_port "$1")" - GRPC="$(get_grpc_port "$1")" - PPROF="$(get_pprof_port "$1")" - GRPCW="$(get_grpcw_port "$1")" - echo "${1} RPC : http://localhost:${RPC}" - echo "${1} APP : http://localhost:${APP}" - echo "${1} GRPC : http://localhost:${GRPC}" - echo "${1} P2P : http://localhost:${P2P}" - echo "${1} PPROF: http://localhost:${PPROF}" - echo "${1} GRPCW: http://localhost:${GRPCW}" -} - -list_keys() { - HOME_DIR="$(get_home_dir "$1")" - if [ ! -d "$HOME_DIR" ]; then - warn "No configuration at ${HOME_DIR}. Skipping..." - return 0 - fi - GAIAD_BINARY="$(get_gaiad_binary "$1")" - echo "\"$GAIAD_BINARY\" keys list --keyring-backend test --keyring-dir \"$HOME_DIR\"" - KEY_NAME="" - "$GAIAD_BINARY" keys list --keyring-backend test --keyring-dir "$HOME_DIR" | while read -r line - do - NAME="${line##'- name: '}" - TYPE="${line##'type: '}" - MNEMONIC="${line##'mnemonic:'}" - THRESHOLD="${line##'threshold: '}" - PUBKEYS="${line##'pubkeys: '}" - if [ "$NAME" != "$line" ]; then - KEY_NAME="$NAME" - echo - echo "$line" - elif [ "$TYPE" != "$line" ]; then - if [ "$line" != "type: local" ]; then - echo "$line" - fi - elif [ "$MNEMONIC" != "$line" ]; then - if a_in_b "${KEY_NAME%%v0}" "$VALIDATORS"; then - echo "mnemonic: \"$(stoml "${HOME_DIR}/key_seed.json" secret)\"" - elif a_in_b "${KEY_NAME%%n0}" "$FULL_NODES"; then - echo "mnemonic: \"$(stoml "${HOME_DIR}/key_seed.json" secret)\"" - elif [ -f "${HOME_DIR}/${KEY_NAME}_seed.json" ]; then - echo "mnemonic: \"$(stoml "${HOME_DIR}/${KEY_NAME}_seed.json" mnemonic)\"" - else - echo "mnemonic: \"\"" - fi - elif [ "$THRESHOLD" != "$line" ]; then - if [ "$line" != "threshold: 0" ]; then - echo "$line" - fi - elif [ "$PUBKEYS" != "$line" ]; then - if [ "$line" != "pubkeys: []" ]; then - echo "$line" - fi - else - echo "$line" - fi - done -} - -hermes_config() { - HERMES_DIR="$(dirname "$GLOBAL_HERMES_CONFIG")" - if [ ! -d "$HERMES_DIR" ]; then - mkdir -p "$HERMES_DIR" - fi - cat < "$GLOBAL_HERMES_CONFIG" -[global] -log_level = '${GLOBAL_HERMES_LOG_LEVEL}' - -[mode] - -[mode.clients] -enabled = true -refresh = true -misbehaviour = true - -[mode.connections] -enabled = true - -[mode.channels] -enabled = true - -[mode.packets] -enabled = true -clear_interval = 100 -clear_on_start = true -tx_confirmation = true - -[telemetry] -enabled = ${GLOBAL_HERMES_TELEMETRY_ENABLED} -host = '${GLOBAL_HERMES_TELEMETRY_HOST}' -port = ${GLOBAL_HERMES_TELEMETRY_PORT} - -EOF - for i in $ALL_NODES - do - if ! get_add_to_hermes "$i"; then - continue - fi - RPC="$(get_rpc_port "$i")" - GRPC="$(get_grpc_port "$i")" - ID="$(get_chain_id "$i")" - ACCOUNT_PREFIX="$(get_wallet_account_prefix "$i")" - DENOM="$(get_staking_denom "$i")" - cat <> "$GLOBAL_HERMES_CONFIG" -[[chains]] -id = '${ID}' -rpc_addr = 'http://localhost:${RPC}' -grpc_addr = 'http://localhost:${GRPC}' -websocket_addr = 'ws://localhost:${RPC}/websocket' -rpc_timeout = '15s' -account_prefix = '${ACCOUNT_PREFIX}' -key_name = 'wallet' -store_prefix = 'ibc' -gas_price = { price = 0.01, denom = '${DENOM}' } -max_gas = 10000000 -clock_drift = '5s' -trusting_period = '14days' -trust_threshold = { numerator = '1', denominator = '3' } - -EOF - done -} - -hermes_keys() { - ID="$(get_chain_id "$1")" - NETWORK_HOME_DIR="$(get_home_dir "$ID")" - HERMES_BINARY="$(get_hermes_binary)" - HDPATH="$(get_hdpath "$1")" - if [ -z "$GLOBAL_HERMES_CONFIG" ] && [ -z "$HDPATH" ]; then - "$HERMES_BINARY" keys add "$ID" -f "${NETWORK_HOME_DIR}/wallet_seed.json" - elif [ -n "$GLOBAL_HERMES_CONFIG" ] && [ -z "$HDPATH" ]; then - "$HERMES_BINARY" -c "$GLOBAL_HERMES_CONFIG" keys add "$ID" -f "${NETWORK_HOME_DIR}/wallet_seed.json" - elif [ -z "$GLOBAL_HERMES_CONFIG" ] && [ -n "$HDPATH" ]; then - "$HERMES_BINARY" keys add "$ID" --hd-path "$HDPATH" -f "${NETWORK_HOME_DIR}/wallet_seed.json" - elif [ -n "$GLOBAL_HERMES_CONFIG" ] && [ -n "$HDPATH" ]; then - "$HERMES_BINARY" -c "$GLOBAL_HERMES_CONFIG" keys add "$ID" --hd-path "$HDPATH" -f "${NETWORK_HOME_DIR}/wallet_seed.json" - fi - EXTRA_WALLETS_COUNTER="$(get_extra_wallets "$1")" - while [ "$EXTRA_WALLETS_COUNTER" -gt 0 ]; - do - if [ -z "$GLOBAL_HERMES_CONFIG" ] && [ -z "$HDPATH" ]; then - "$HERMES_BINARY" keys add "$ID" --key-name "wallet${EXTRA_WALLETS_COUNTER}" -f "${NETWORK_HOME_DIR}/wallet${EXTRA_WALLETS_COUNTER}_seed.json" - elif [ -n "$GLOBAL_HERMES_CONFIG" ] && [ -z "$HDPATH" ]; then - "$HERMES_BINARY" -c "$GLOBAL_HERMES_CONFIG" keys add "$ID" --key-name "wallet${EXTRA_WALLETS_COUNTER}" -f "${NETWORK_HOME_DIR}/wallet${EXTRA_WALLETS_COUNTER}_seed.json" - elif [ -z "$GLOBAL_HERMES_CONFIG" ] && [ -n "$HDPATH" ]; then - "$HERMES_BINARY" keys add "$ID" --hd-path "$HDPATH" --key-name "wallet${EXTRA_WALLETS_COUNTER}" -f "${NETWORK_HOME_DIR}/wallet${EXTRA_WALLETS_COUNTER}_seed.json" - elif [ -n "$GLOBAL_HERMES_CONFIG" ] && [ -n "$HDPATH" ]; then - "$HERMES_BINARY" -c "$GLOBAL_HERMES_CONFIG" keys add "$ID" --hd-path "$HDPATH" --key-name "wallet${EXTRA_WALLETS_COUNTER}" -f "${NETWORK_HOME_DIR}/wallet${EXTRA_WALLETS_COUNTER}_seed.json" - fi - EXTRA_WALLETS_COUNTER="$((EXTRA_WALLETS_COUNTER - 1))" - done -} - -hermes_cc() { - CHAINS="" - for i in $ALL_HERMES_NODES - do - if ! a_in_b "$i" "$CHAINS"; then - ID="$(get_chain_id "$i")" - CHAINS="$CHAINS $ID" - fi - done - CHAINS="${CHAINS## }" - N="$(($(echo "$CHAINS" | wc -w)))" - HERMES_BINARY="$(get_hermes_binary)" - for i in $(seq 1 $((N-1))) - do - for j in $(seq $((i+1)) $N) - do - echo "\"${HERMES_BINARY}\" create channel $(n_from_a "$i" "$CHAINS") $(n_from_a "$j" "$CHAINS") --port-a transfer --port-b transfer" - done - done -} - -reset() { - WAS_RUNNING="$(is_running "$1" || echo "no")" - if [ -z "$WAS_RUNNING" ]; then - if a_in_b "$1" "$VALIDATORS"; then - warn "After reset all full nodes will need to be reset too." - fi - stop "$1" - fi - GAIAD_BINARY="$(get_gaiad_binary "$1")" - HOME_DIR="$(get_home_dir "$1")" - info "Resetting $1..." - if [ ! -d "$HOME_DIR" ]; then - warn "No configuration at ${HOME_DIR}. Skipping..." - return 0 - fi - # `unsafe-reset-all` was moved to `gaiad tendermint` sub-command - TM="$($GAIAD_BINARY | grep -q 'unsafe' || echo "tendermint")" - if is_json_output; then - "$GAIAD_BINARY" "$TM" unsafe-reset-all --home "$HOME_DIR" 1> /dev/null 2> /dev/null - else - "$GAIAD_BINARY" "$TM" unsafe-reset-all --home "$HOME_DIR" - fi - if [ -z "$WAS_RUNNING" ]; then - start "$1" - fi -} - -# Guard against removing arbitrary folders/files, only remove folders that have at least a half-baked node configuration. -safer_rm() { - if [ $# -eq 1 ]; then - if a_in_b "$1" "$ALL_NODES"; then - HOME_DIR="$(get_home_dir "$1")" - if [ -d "$HOME_DIR" ]; then - if [ -d "$HOME_DIR/config" ]; then - if [ -f "$HOME_DIR/config/config.toml" ]; then - if [ -d "$HOME_DIR/data" ]; then - info "Executing \"rm -r $HOME_DIR\"..." - rm -r "$HOME_DIR" - else - warn "Anxious to run \"rm -r ${HOME_DIR}\" automatically, folder does not contain data folder, skipping..." - fi - else - warn "Anxious to run \"rm -r ${HOME_DIR}\" automatically, folder does not contain config file, skipping..." - fi - else - warn "Anxious to run \"rm -r ${HOME_DIR}\" automatically, folder does not contain config folder, skipping..." - fi - else - warn "Anxious to run \"rm -r ${HOME_DIR}\" automatically, folder does not exist, skipping..." - fi - else - warn "Anxious to delete \"${1}\" automatically, node not in configuration, skipping..." - fi - else - warn "Anxious to delete \"${*}\" automatically, looks like multiple paths, skipping..." - fi -} diff --git a/scripts/gm/bin/shell-support b/scripts/gm/bin/shell-support deleted file mode 100644 index 7e53f43626..0000000000 --- a/scripts/gm/bin/shell-support +++ /dev/null @@ -1,113 +0,0 @@ -# shell support for gm - -alias gm="$HOME/.gm/bin/gm" - -__gm_get_nodes() { - if [ -f "$HOME/.gm/gm.toml" ]; then - #Simplified grepping of sections - local NODES="" - for i in $(grep '^\[.\+\]$' "$HOME/.gm/gm.toml") - do - if [ "$i" = "[global]" ] || [ "$i" = "[global.hermes]" ]; then - continue - fi - o="${i##[}" - NODES="$NODES ${o%%]}" - done - echo "$NODES" - fi -} - -__gm_hermes_get_nodes() { - if [ -f "$HOME/.gm/gm.toml" ] && [ -n "$(which stoml)" ]; then - local NODES="" - local GLOBAL_ADD_TO_HERMES="$(stoml "$HOME/.gm/gm.toml" global.add_to_hermes || echo "false")" - if [ "$GLOBAL_ADD_TO_HERMES" != "true" ]; then - GLOBAL_ADD_TO_HERMES="false" - fi - for node in $(__gm_get_nodes) - do - local ADD_TO_HERMES="$(stoml "$HOME/.gm/gm.toml" "${node}.add_to_hermes" || echo "${GLOBAL_ADD_TO_HERMES}")" - if [ "$ADD_TO_HERMES" == "true" ]; then - NODES="$NODES $node" - fi - done - echo "$NODES" - else - __gm_get_nodes - fi -} - -_gm_nodes() { - local cur="${COMP_WORDS[COMP_CWORD]}" - local cmds="$(__gm_get_nodes)" - while read -r line; do COMPREPLY+=("$line"); done < <(compgen -W "$cmds" -- "$cur") -} - -_gm_nodes_log() { - local cur="${COMP_WORDS[COMP_CWORD]}" - local cmds="$(__gm_get_nodes) -f -r" - while read -r line; do COMPREPLY+=("$line"); done < <(compgen -W "$cmds" -- "$cur") -} - -_gm_hermes_nodes() { - local cur="${COMP_WORDS[COMP_CWORD]}" - local cmds="$(__gm_hermes_get_nodes)" - while read -r line; do COMPREPLY+=("$line"); done < <(compgen -W "$cmds" -- "$cur") -} - -_gm_complete_commands() { - local cur="${COMP_WORDS[COMP_CWORD]}" - local cmds="help hermes install keys log ports start status stop reset rm version" - while read -r line; do COMPREPLY+=("$line"); done < <(compgen -W "$cmds" -- "$cur") -} - -_gm_hermes_complete_commands() { - local cur="${COMP_WORDS[COMP_CWORD]}" - local cmds="help config keys cc" - while read -r line; do COMPREPLY+=("$line"); done < <(compgen -W "$cmds" -- "$cur") -} - -_gm() { - local i=1 cmd - - # find the subcommand - while [[ "$i" -lt "$COMP_CWORD" ]] - do - local s="${COMP_WORDS[i]}" - case "$s" in - --*) - cmd="$s" - break - ;; - -*) - ;; - *) - cmd="$s" - break - ;; - esac - (( i++ )) - done - - if [[ "$i" -eq "$COMP_CWORD" ]] - then - _gm_complete_commands - return - fi - - # subcommands have their own completion functions - case "$cmd" in - hermes) _gm_hermes_complete_commands ;; - keys) _gm_nodes ;; - log) _gm_nodes_log ;; - ports) _gm_nodes ;; - start) _gm_nodes ;; - stop) _gm_nodes ;; - *) ;; - esac -} -# Todo: hermes subcommands can use some shell-completion love - -complete -o bashdefault -o default -F _gm gm - diff --git a/scripts/gm/gm.toml b/scripts/gm/gm.toml deleted file mode 100644 index 04e31d2c2f..0000000000 --- a/scripts/gm/gm.toml +++ /dev/null @@ -1,92 +0,0 @@ -# Example configuration with one validator and one full node - -## -## Global section defines application-wide defaults. -## - -# All the entries here can be overridden on a per-node basis, except if indicated otherwise. -[global] - -# Path to the `gaiad` (or other SDK-based) binary to use. -# You can add a local path here or a ftp/http/https URL. -# In the case of a URL, a local copy will be downloaded into a temporary folder within the GM folder. -gaiad_binary="https://github.com/cosmos/gaia/releases/download/v5.0.5/gaiad-v5.0.5-linux-amd64" - -# The first free port to use for newly created nodes. -# The value will be incremented (by 10) when a new node requires ports. -## -# In the per-node configuration this is the first port to allocate for the node. -# A total of 10 ports will be allocated per node. -ports_start_at=27000 - -# The default folder where all node folders will be created. -## -# In the per-node configuration this is the folder for the node configuration. -home_dir="$HOME/.gm" - -# (Optional) The validator node's operator address will be created from this 24-word seed phrase instead of a random address. -validator_mnemonic="" - -# (Optional) The first wallet address (named "wallet") will be created from this 24-word seed phrase instead of a random address. -wallet_mnemonic="" - -# (Optional) The number of extra wallets with random addresses to create. -# These wallets will be funded and ready for developer use. -extra_wallets=0 - -# (Optional) The HD derivation path for network addresses. -# This is automatically set on a chain but Hermes needs to be informed of it, if it is not the default. -# The default is "m/44'/118'/0'/0/0" which defines the Cosmos Hub ATOM. -hdpath="" - -# Automatically update the `persistent_peers` and `unconditional_peer_ids` sections of the node configuration. -auto_maintain_config=true - -# This node should be part of the hermes config. Only one node per network should have this set. -add_to_hermes=false - - -## -## Global Hermes section defines hermes configuration items. -## These global-only parameters are used in setting up the hermes configuration. -## They are only used in the `hermes` sub-command. -## All of these variables are optional. (Default values shown.) -## - -[global.hermes] - -# Path to the `hermes` binary to use. -# You can add a local path here or a ftp/http/https URL. -# In the case of a URL, a local copy will be downloaded into a temporary folder within the GM folder. -binary="./hermes" - -# Hermes configuration file path. -config="$HOME/.hermes/config.toml" - -# Hermes configuration log_level parameter. -log_level="info" - -# Hermes configuration telemetry.enabled parameter. -telemetry_enabled=true - -# Hermes configuration telemetry.host parameter. -telemetry_host='127.0.0.1' - -# Hermes configuration telemetry.port parameter. -telemetry_port=3001 - - -## -## Sub-section (node) definitions and parameter redefinitons below. -## - -# A validator node that uses the default configuration. -[validator1] - -# A full node with some configuration -# A full node is defined by adding the `network` property to the section. -[node1] -# `network` is a sub-sections-only variable. It will be ignored in the `global` section. -# It defines the network (the validator connection) for the full node. -# Mandatory for full nodes, does not exist for validator nodes. -network="validator1" diff --git a/scripts/init-hermes b/scripts/init-hermes deleted file mode 100755 index ebf006e514..0000000000 --- a/scripts/init-hermes +++ /dev/null @@ -1,76 +0,0 @@ -#!/bin/bash -e - -# Initialize the light clients in the relayer configuration. - -usage() { - echo "Usage: $0 CONFIG_FILE CHAIN_0_ID CHAIN_1_ID [CHAIN_2_ID]" - echo "Example: $0 ./config.toml ibc-0 ibc-1 ibc-2" - exit 1 -} - -missing() { - echo "Missing $1 parameter. Please check if all parameters were specified." - usage -} - -if [ -z "$1" ]; then - missing "CONFIG_FILE" -fi - -if [ -z "$2" ]; then - missing "CHAIN_0_ID" -fi - -if [ -z "$3" ]; then - missing "CHAIN_1_ID" -fi - - -if [ "$#" -gt 4 ]; then - echo "Incorrect number of parameters." - usage -fi - -CONFIG_FILE="$1" -CHAIN_0_ID="$2" -CHAIN_1_ID="$3" -CHAIN_2_ID="$4" - -if ! [ -f "$CONFIG_FILE" ]; then - echo "[CONFIG_FILE] ($1) does not exist or is not a file." - usage -fi - -if ! grep -q -s "$CHAIN_0_ID" "$CONFIG_FILE"; then - echo "error: configuration for chain [$CHAIN_0_ID] does not exist in file $CONFIG_FILE." - usage -fi - -if ! grep -q -s "$CHAIN_1_ID" "$CONFIG_FILE"; then - echo "error: configuration for chain [$CHAIN_1_ID] does not exist in file $CONFIG_FILE." - usage -fi - -if [ -n "$CHAIN_2_ID" ] && ! grep -q -s "$CHAIN_2_ID" "$CONFIG_FILE"; then - echo "error: configuration for chain [$CHAIN_2_ID] does not exist in file $CONFIG_FILE." - usage -fi - -GAIA_DATA="$(pwd)/data" - -echo "Building the Rust relayer..." -cargo build -q --locked - -# add the key seeds to the keyring of each chain -echo "Importing keys..." -cargo run -q --bin hermes -- -c "$CONFIG_FILE" keys add "$CHAIN_0_ID" -f "$GAIA_DATA/$CHAIN_0_ID/user_seed.json" -cargo run -q --bin hermes -- -c "$CONFIG_FILE" keys add "$CHAIN_0_ID" -f "$GAIA_DATA/$CHAIN_0_ID/user2_seed.json" -k user2 -cargo run -q --bin hermes -- -c "$CONFIG_FILE" keys add "$CHAIN_1_ID" -f "$GAIA_DATA/$CHAIN_1_ID/user_seed.json" -cargo run -q --bin hermes -- -c "$CONFIG_FILE" keys add "$CHAIN_1_ID" -f "$GAIA_DATA/$CHAIN_1_ID/user2_seed.json" -k user2 - -if [ -n "$CHAIN_2_ID" ]; then - cargo run -q --bin hermes -- -c "$CONFIG_FILE" keys add "$CHAIN_2_ID" -f "$GAIA_DATA/$CHAIN_2_ID/user_seed.json" - cargo run -q --bin hermes -- -c "$CONFIG_FILE" keys add "$CHAIN_2_ID" -f "$GAIA_DATA/$CHAIN_2_ID/user2_seed.json" -k user2 -fi - -echo "Done!" diff --git a/scripts/one-chain b/scripts/one-chain deleted file mode 100755 index 30a1af787b..0000000000 --- a/scripts/one-chain +++ /dev/null @@ -1,159 +0,0 @@ -#!/bin/bash -# Copied from https://github.com/cosmos/relayer/tree/master/scripts - -usage() { - echo "Usage: $0 BINARY CHAIN_ID CHAIN_DIR RPC_PORT P2P_PORT PROFILING_PORT GRPC_PORT SAMOLEANS" - echo "Example: $0 gaiad ibc-0 ./data 26657 26656 6060 9090 100" - exit 1 -} - -missing() { - echo "Missing $1 parameter. Please check if all parameters were specified." - usage -} - -if [ -z "$1" ]; then - missing "[BINARY] (gaiad|akash)" -fi - -if [ -z "$2" ]; then - missing "[CHAIN_ID]" -fi - -if [ -z "$3" ]; then - missing "[CHAIN_DIR]" -fi - -if [ -z "$4" ]; then - missing "[RPC_PORT]" -fi - -if [ -z "$5" ]; then - missing "[P2P_PORT]" -fi - -if [ -z "$6" ]; then - missing "[PROFILING_PORT]" -fi - -if [ -z "$7" ]; then - missing "[GRPC_PORT]" -fi - -if [ -z "$8" ]; then - missing "[SAMOLEANS]" -fi - -if [ "$#" -gt 8 ]; then - echo "Incorrect number of parameters." - usage -fi - -BINARY=$1 -CHAIN_ID=$2 -CHAIN_DIR=$3 -RPC_PORT=$4 -P2P_PORT=$5 -PROF_PORT=$6 -GRPC_PORT=$7 -SAMOLEANS=$8 - -echo "Creating $BINARY instance: home=$CHAIN_DIR | chain-id=$CHAIN_ID | p2p=:$P2P_PORT | rpc=:$RPC_PORT | profiling=:$PROF_PORT | grpc=:$GRPC_PORT | samoleans=:$SAMOLEANS" - -# Add dir for chain, exit if error -if ! mkdir -p $CHAIN_DIR/$CHAIN_ID 2>/dev/null; then - echo "Failed to create chain folder. Aborting..." - exit 1 -fi - -# Build genesis file incl account for passed address -STAKE="100000000000stake" - -# The user also needs stake to perform actions -USER_COINS="${STAKE},${SAMOLEANS}samoleans" - -# Hermes also needs stake to perform actions -HERMES_COINS="${STAKE},${SAMOLEANS}samoleans" - -$BINARY --home $CHAIN_DIR/$CHAIN_ID --chain-id $CHAIN_ID init $CHAIN_ID &> /dev/null -sleep 1 -echo "Creating validator key" -$BINARY --home $CHAIN_DIR/$CHAIN_ID keys add validator --keyring-backend="test" --output json > $CHAIN_DIR/$CHAIN_ID/validator_seed.json 2>&1 -sleep 1 -echo "Creating user key" -$BINARY --home $CHAIN_DIR/$CHAIN_ID keys add user --keyring-backend="test" --output json > $CHAIN_DIR/$CHAIN_ID/user_seed.json 2>&1 -sleep 1 -echo "Creating user2 key" -$BINARY --home $CHAIN_DIR/$CHAIN_ID keys add user2 --keyring-backend="test" --output json > $CHAIN_DIR/$CHAIN_ID/user2_seed.json 2>&1 -sleep 1 - -# Add samoleans to user -USER=$($BINARY --home $CHAIN_DIR/$CHAIN_ID keys --keyring-backend="test" show user -a) -$BINARY --home $CHAIN_DIR/$CHAIN_ID add-genesis-account $USER $USER_COINS &> /dev/null -sleep 1 - -# Add samoleans to user2 -USER2=$($BINARY --home $CHAIN_DIR/$CHAIN_ID keys --keyring-backend="test" show user2 -a) -$BINARY --home $CHAIN_DIR/$CHAIN_ID add-genesis-account $USER2 $USER_COINS &> /dev/null -sleep 1 - - -# Add stake to validator -VALIDATOR=$($BINARY --home $CHAIN_DIR/$CHAIN_ID keys --keyring-backend="test" show validator -a) -$BINARY --home $CHAIN_DIR/$CHAIN_ID add-genesis-account $VALIDATOR $STAKE &> /dev/null -sleep 1 - -# Stake everything -$BINARY --home $CHAIN_DIR/$CHAIN_ID gentx validator --keyring-backend="test" --chain-id $CHAIN_ID $STAKE &> /dev/null -sleep 1 - -$BINARY --home $CHAIN_DIR/$CHAIN_ID collect-gentxs &> /dev/null -sleep 1 - -# Check platform -platform='unknown' -unamestr=`uname` -if [ "$unamestr" = 'Linux' ]; then - platform='linux' -fi - -# Set proper defaults and change ports (use a different sed for Mac or Linux) -echo "Change settings in config.toml file..." -if [ $platform = 'linux' ]; then - sed -i 's#"172800s"#"200s"#g' $CHAIN_DIR/$CHAIN_ID/config/genesis.json - sed -i 's#"tcp://127.0.0.1:26657"#"tcp://0.0.0.0:'"$RPC_PORT"'"#g' $CHAIN_DIR/$CHAIN_ID/config/config.toml - sed -i 's#"tcp://0.0.0.0:26656"#"tcp://0.0.0.0:'"$P2P_PORT"'"#g' $CHAIN_DIR/$CHAIN_ID/config/config.toml - sed -i 's#"localhost:6060"#"localhost:'"$PROF_PORT"'"#g' $CHAIN_DIR/$CHAIN_ID/config/config.toml - sed -i 's/timeout_commit = "5s"/timeout_commit = "1s"/g' $CHAIN_DIR/$CHAIN_ID/config/config.toml - sed -i 's/timeout_propose = "3s"/timeout_propose = "1s"/g' $CHAIN_DIR/$CHAIN_ID/config/config.toml - sed -i 's/index_all_keys = false/index_all_keys = true/g' $CHAIN_DIR/$CHAIN_ID/config/config.toml - sed -i '/^\[grpc-web\]/,/^\[/s/^enable = true/enable = false/' $CHAIN_DIR/$CHAIN_ID/config/app.toml - # sed -i '' 's#index-events = \[\]#index-events = \["message.action","send_packet.packet_src_channel","send_packet.packet_sequence"\]#g' $CHAIN_DIR/$CHAIN_ID/config/app.toml -else - sed -i '' 's#"172800s"#"200s"#g' $CHAIN_DIR/$CHAIN_ID/config/genesis.json - sed -i '' 's#"tcp://127.0.0.1:26657"#"tcp://0.0.0.0:'"$RPC_PORT"'"#g' $CHAIN_DIR/$CHAIN_ID/config/config.toml - sed -i '' 's#"tcp://0.0.0.0:26656"#"tcp://0.0.0.0:'"$P2P_PORT"'"#g' $CHAIN_DIR/$CHAIN_ID/config/config.toml - sed -i '' 's#"localhost:6060"#"localhost:'"$PROF_PORT"'"#g' $CHAIN_DIR/$CHAIN_ID/config/config.toml - sed -i '' 's/timeout_commit = "5s"/timeout_commit = "1s"/g' $CHAIN_DIR/$CHAIN_ID/config/config.toml - sed -i '' 's/timeout_propose = "3s"/timeout_propose = "1s"/g' $CHAIN_DIR/$CHAIN_ID/config/config.toml - sed -i '' 's/index_all_keys = false/index_all_keys = true/g' $CHAIN_DIR/$CHAIN_ID/config/config.toml - sed -i '' '/^\[grpc-web\]/,/^\[/s/^enable = true/enable = false/' $CHAIN_DIR/$CHAIN_ID/config/app.toml - # sed -i '' 's/min-retain-blocks = 0/min-retain-blocks = 100/g' $CHAIN_DIR/$CHAIN_ID/config/app.toml - # sed -i '' 's#index-events = \[\]#index-events = \["message.action","send_packet.packet_src_channel","send_packet.packet_sequence"\]#g' $CHAIN_DIR/$CHAIN_ID/config/app.toml - # sed -i '' 's/error/debug/g' $CHAIN_DIR/$CHAIN_ID/config/config.toml - # sed -i '' 's/info/debug/g' $CHAIN_DIR/$CHAIN_ID/config/config.toml -fi - -# Start gaia -echo "Start gaia on grpc port: $GRPC_PORT..." -$BINARY --home $CHAIN_DIR/$CHAIN_ID start --pruning=nothing --grpc.address="0.0.0.0:$GRPC_PORT" --log_level error > $CHAIN_DIR/$CHAIN_ID.log 2>&1 & - -# Show validator's and user's balance -sleep 3 -RPC_ADDR="tcp://localhost:$RPC_PORT" -echo "Balances for validator '$VALIDATOR' @ '$RPC_ADDR'" -$BINARY --node "$RPC_ADDR" query bank balances $VALIDATOR --log_level error -echo "Balances for user '$USER' @ '$RPC_ADDR'" -$BINARY --node "$RPC_ADDR" query bank balances $USER --log_level error -echo "Balances for user '$USER2' @ '$RPC_ADDR'" -$BINARY --node "$RPC_ADDR" query bank balances $USER2 --log_level error diff --git a/scripts/release.sh b/scripts/release.sh deleted file mode 100755 index d400c3fef5..0000000000 --- a/scripts/release.sh +++ /dev/null @@ -1,78 +0,0 @@ -#!/bin/bash - -# release.sh will hopefully allow us to publish all of the necessary crates in -# this repo in the right order. It is assumed that only one person will be -# releasing all crates at the same time. -# -# It has a default set of crates it will publish, which can be overridden by -# way of command line arguments: -# -# # Release all packages, prompting for each package as to whether to publish -# ./scripts/release.sh -# -# # Just release the ibc-proto and ibc crates, but nothing else -# ./scripts/release.sh ibc-proto ibc - -set -e - -# A space-separated list of all the crates we want to publish, in the order in -# which they must be published. It's important to respect this order, since -# each subsequent crate depends on one or more of the preceding ones. -DEFAULT_CRATES="ibc-proto ibc ibc-telemetry ibc-relayer ibc-relayer-rest ibc-relayer-cli ibc-test-framework" - -# Allows us to override the crates we want to publish. -CRATES=${*:-${DEFAULT_CRATES}} - -get_manifest_path() { - cargo metadata --format-version 1 | jq -r '.packages[]|select(.name == "'"${1}"'")|.manifest_path' -} - -get_local_version() { - cargo metadata --format-version 1 | jq -r '.packages[]|select(.name == "'"${1}"'")|.version' -} - -check_version_online() { - curl -s "https://crates.io/api/v1/crates/${1}" | jq -r '.versions[]|select(.num == "'"${2}"'").updated_at' -} - -publish() { - echo "Publishing crate $1..." - cargo publish --manifest-path "$(get_manifest_path "${1}")" - echo "" -} - -wait_until_available() { - echo "Waiting for crate ${1} to become available via crates.io..." - for retry in {1..5}; do - sleep 5 - ONLINE_DATE="$(check_version_online "${1}" "${2}")" - if [ -n "${ONLINE_DATE}" ]; then - echo "Crate ${crate} is now available online" - break - else - if [ "${retry}" == 5 ]; then - echo "ERROR: Crate should have become available by now" - exit 1 - else - echo "Not available just yet. Waiting a few seconds..." - fi - fi - done - echo "Waiting an additional 10 seconds for crate to propagate through CDN..." - sleep 10 -} - -echo "Attempting to publish crate(s): ${CRATES}" - -for crate in ${CRATES}; do - VERSION="$(get_local_version "${crate}")" - ONLINE_DATE="$(check_version_online "${crate}" "${VERSION}")" - echo "${crate} version number: ${VERSION}" - if [ -n "${ONLINE_DATE}" ]; then - echo "${crate} ${VERSION} has already been published at ${ONLINE_DATE}, skipping" - continue - fi - - publish "${crate}" - wait_until_available "${crate}" "${VERSION}" -done diff --git a/scripts/setup-chains b/scripts/setup-chains deleted file mode 100755 index a95e84db3d..0000000000 --- a/scripts/setup-chains +++ /dev/null @@ -1,92 +0,0 @@ -#!/bin/bash -e - -# Copied from https://github.com/cosmos/relayer and modified to initialize Gaia chains. - -usage() { - echo "Usage: $0 CHAIN_0_ID CHAIN_1_ID [CHAIN_2_ID]" - echo "Example: $0 ibc-0 ibc-1 ibc-2" - exit 1 -} - -missing() { - echo "Missing $1 parameter. Please check if all parameters were specified." - usage -} - -if [ -z "$1" ]; then - missing "CHAIN_0_ID" -fi - -if [ -z "$2" ]; then - missing "CHAIN_1_ID" -fi - - -if [ "$#" -gt 3 ]; then - echo "Incorrect number of parameters." - usage -fi - -CHAIN_0_ID="$1" -CHAIN_1_ID="$2" -CHAIN_2_ID="$3" - -GAIA_DATA="$(pwd)/data" - -# Ensure user understands what will be deleted -if [[ -d $GAIA_DATA ]] && [[ ! "$3" == "skip" ]]; then - echo "WARNING: $0 will DELETE the '$(pwd)/data' folder." - read -p "> Do you wish to continue? (y/n): " -n 1 -r - echo - if [[ ! $REPLY =~ ^[Yy]$ ]]; then - exit 1 - fi -fi - -# Ensure gaiad is installed -if ! [ -x "$(which gaiad)" ]; then - echo "Error: gaiad is not installed. Try running 'make build-gaia'" >&2 - exit 1 -fi - -# Display software version -echo "GAIA VERSION INFO: $(gaiad version --log_level info)" - -# Delete data from old runs -echo "Deleting $GAIA_DATA folder..." -rm -rf "$GAIA_DATA" - -# Stop existing gaiad processes -killall gaiad &> /dev/null || true -killall akash &> /dev/null || true - -echo "Generating gaia configurations..." -mkdir -p "$GAIA_DATA" && cd "$GAIA_DATA" && cd ../ - -ONE_CHAIN="$(dirname "$0")/one-chain" - -CHAIN_0_RPC_PORT=26657 -CHAIN_1_RPC_PORT=26557 -CHAIN_2_RPC_PORT=26457 - -CHAIN_0_GRPC_PORT=9090 -CHAIN_1_GRPC_PORT=9091 -CHAIN_2_GRPC_PORT=9092 - -CHAIN_0_SAMOLEANS=100000000000 -CHAIN_1_SAMOLEANS=100000000000 -CHAIN_2_SAMOLEANS=100000000000 - -"$ONE_CHAIN" gaiad "$CHAIN_0_ID" ./data $CHAIN_0_RPC_PORT 26656 6060 $CHAIN_0_GRPC_PORT $CHAIN_0_SAMOLEANS -"$ONE_CHAIN" gaiad "$CHAIN_1_ID" ./data $CHAIN_1_RPC_PORT 26556 6061 $CHAIN_1_GRPC_PORT $CHAIN_1_SAMOLEANS - -if [ -n "$CHAIN_2_ID" ]; then - "$ONE_CHAIN" gaiad "$CHAIN_2_ID" ./data $CHAIN_2_RPC_PORT 26456 6062 $CHAIN_2_GRPC_PORT $CHAIN_2_SAMOLEANS -fi - -[ -f "$GAIA_DATA/$CHAIN_0_ID.log" ] && echo "$CHAIN_0_ID initialized. Watch file $GAIA_DATA/$CHAIN_0_ID.log to see its execution." -[ -f "$GAIA_DATA/$CHAIN_1_ID.log" ] && echo "$CHAIN_1_ID initialized. Watch file $GAIA_DATA/$CHAIN_1_ID.log to see its execution." - -if [ -n "$CHAIN_2_ID" ]; then - [ -f "$GAIA_DATA/$CHAIN_2_ID.log" ] && echo "$CHAIN_2_ID initialized. Watch file $GAIA_DATA/$CHAIN_2_ID.log to see its execution." -fi diff --git a/scripts/sync-protobuf.sh b/scripts/sync-protobuf.sh deleted file mode 100755 index d2143b642b..0000000000 --- a/scripts/sync-protobuf.sh +++ /dev/null @@ -1,121 +0,0 @@ -#!/usr/bin/env bash - -set -eou pipefail - -# syn-protobuf.sh is a bash script to sync the protobuf -# files using the proto-compiler project. It will check -# out the protobuf files from the git versions specified in -# proto/src/prost/COSMOS_SDK_COMMIT and -# proto/src/prost/IBC_GO_COMMIT. If you want to sync -# the protobuf files to a newer version, modify the -# relevant files with the new commit IDs. - -# This script should be run from the root directory of ibc-rs - -# We can specify where to clone the git repositories -# for cosmos-sdk and ibc-go. By default they are cloned -# to /tmp/cosmos-sdk.git and /tmp/ibc-go.git. -# We can override this to existing directories -# that already have a clone of the repositories, -# so that there is no need to clone the entire -# repositories over and over again every time -# the script is called. - -CACHE_PATH="${XDG_CACHE_HOME:-$HOME/.cache}" -COSMOS_SDK_GIT="${COSMOS_SDK_GIT:-$CACHE_PATH/cosmos/cosmos-sdk.git}" -IBC_GO_GIT="${IBC_GO_GIT:-$CACHE_PATH/ibc-go.git}" - - -COSMOS_SDK_COMMIT="$(cat proto/src/COSMOS_SDK_COMMIT)" -IBC_GO_COMMIT="$(cat proto/src/IBC_GO_COMMIT)" - -echo "COSMOS_SDK_COMMIT: $COSMOS_SDK_COMMIT" -echo "IBC_GO_COMMIT: $IBC_GO_COMMIT" - -# Use either --sdk-commit flag for commit ID, -# or --sdk-tag for git tag. Because we can't modify -# proto-compiler to have smart detection on that. - -if [[ "$COSMOS_SDK_COMMIT" =~ ^[a-zA-Z0-9]{40}$ ]] -then - SDK_COMMIT_OPTION="--sdk-commit" -else - SDK_COMMIT_OPTION="--sdk-tag" -fi - -# If the git directories does not exist, clone them as -# bare git repositories so that no local modification -# can be done there. - -if [[ ! -e "$COSMOS_SDK_GIT" ]] -then - echo "Cloning cosmos-sdk source code to as bare git repository to $COSMOS_SDK_GIT" - git clone --mirror https://github.com/cosmos/cosmos-sdk.git "$COSMOS_SDK_GIT" -else - echo "Using existing cosmos-sdk bare git repository at $COSMOS_SDK_GIT" -fi - -if [[ ! -e "$IBC_GO_GIT" ]] -then - echo "Cloning ibc-go source code to as bare git repository to $IBC_GO_GIT" - git clone --mirror https://github.com/ComposableFi/ibc-go.git "$IBC_GO_GIT" -else - echo "Using existing ibc-go bare git repository at $IBC_GO_GIT" -fi - -# Update the repositories using git fetch. This is so that -# we keep local copies of the repositories up to sync first. -pushd "$COSMOS_SDK_GIT" -git fetch -popd - -pushd "$IBC_GO_GIT" -git fetch -popd - -# Create a new temporary directory to check out the -# actual source files from the bare git repositories. -# This is so that we do not accidentally use an unclean -# local copy of the source files to generate the protobuf. -COSMOS_SDK_DIR=$(mktemp -d /tmp/cosmos-sdk-XXXXXXXX) - -pushd "$COSMOS_SDK_DIR" -git clone "$COSMOS_SDK_GIT" . -git checkout "$COSMOS_SDK_COMMIT" - -# We have to name the commit as a branch because -# proto-compiler uses the branch name as the commit -# output. Otherwise it will just output HEAD -git checkout -b "$COSMOS_SDK_COMMIT" -popd - -IBC_GO_DIR=$(mktemp -d /tmp/ibc-go-XXXXXXXX) - -pushd "$IBC_GO_DIR" -git clone "$IBC_GO_GIT" . -git checkout "$IBC_GO_COMMIT" -git checkout -b "$IBC_GO_COMMIT" -popd - -# Remove the existing generated protobuf files -# so that the newly generated code does not -# contain removed files. - -rm -rf proto/src/prost -mkdir -p proto/src/prost - -cd proto-compiler - -cargo build --locked - -# Run the proto-compiler twice, -# once for std version with --build-tonic set to true -# and once for no-std version with --build-tonic set to false - -cargo run --locked -- compile \ - --sdk "$COSMOS_SDK_DIR" --ibc "$IBC_GO_DIR" --out ../proto/src/prost - -# Remove the temporary checkouts of the repositories - -rm -rf "$COSMOS_SDK_DIR" -rm -rf "$IBC_GO_DIR" diff --git a/telemetry/Cargo.toml b/telemetry/Cargo.toml deleted file mode 100644 index 0a7599f2f4..0000000000 --- a/telemetry/Cargo.toml +++ /dev/null @@ -1,26 +0,0 @@ -[package] -name = "ibc-telemetry" -version = "0.15.0" -edition = "2021" -license = "Apache-2.0" -readme = "README.md" -keywords = ["cosmos", "ibc", "relayer", "telemetry"] -repository = "https://github.com/informalsystems/ibc-rs" -authors = ["Informal Systems "] -rust-version = "1.60" -description = """ - Telemetry service for the Hermes IBC relayer -""" - -[dependencies] -ibc = { version = "0.15.0", path = "../modules" } - -crossbeam-channel = "0.5.4" -once_cell = "1.12.0" -opentelemetry = "0.17.0" -opentelemetry-prometheus = "0.10.0" -prometheus = "0.13.0" -rouille = "3.5.0" - -moka = "0.8.5" -uuid = { version = "1.1.2", features = ["v4"] } diff --git a/telemetry/README.md b/telemetry/README.md deleted file mode 100644 index b1bea92d85..0000000000 --- a/telemetry/README.md +++ /dev/null @@ -1,42 +0,0 @@ -# IBC Telemetry - -[![Crate][crate-image]][crate-link] -[![Docs][docs-image]][docs-link] -[![Build Status][build-image]][build-link] -[![End to End testing][e2e-image]][e2e-link] -[![Apache 2.0 Licensed][license-image]][license-link] -![Rust Stable][rustc-image] -![Rust 1.51+][rustc-version] - -This is the repository for the IBC telemetry service for use in the Hermes IBC relayer. - -See the [telemetry documentation][telemetry-doc] in the Hermes guide for more information. - -## License - -Copyright © 2021 Informal Systems Inc. and ibc-rs authors. - -Licensed under the Apache License, Version 2.0 (the "License"); you may not use the files in this repository except in compliance with the License. You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - -[//]: # (badges) - -[crate-image]: https://img.shields.io/crates/v/ibc-telemetry.svg -[crate-link]: https://crates.io/crates/ibc-telemetry -[docs-image]: https://docs.rs/ibc-telemetry/badge.svg -[docs-link]: https://docs.rs/ibc-telemetry/ - -[build-image]: https://github.com/informalsystems/ibc-rs/workflows/Rust/badge.svg -[build-link]: https://github.com/informalsystems/ibc-rs/actions?query=workflow%3ARust -[e2e-image]: https://github.com/informalsystems/ibc-rs/workflows/End%20to%20End%20testing/badge.svg -[e2e-link]: https://github.com/informalsystems/ibc-rs/actions?query=workflow%3A%22End+to+End+testing%22 -[telemetry-doc]: https://hermes.informal.systems/telemetry.html - -[license-image]: https://img.shields.io/badge/license-Apache2.0-blue.svg -[license-link]: https://github.com/informalsystems/ibc-rs/blob/master/LICENSE -[rustc-image]: https://img.shields.io/badge/rustc-stable-blue.svg -[rustc-version]: https://img.shields.io/badge/rustc-1.51+-blue.svg diff --git a/telemetry/src/lib.rs b/telemetry/src/lib.rs deleted file mode 100644 index 12a1a0bf23..0000000000 --- a/telemetry/src/lib.rs +++ /dev/null @@ -1,44 +0,0 @@ -extern crate alloc; - -pub mod server; -pub mod state; - -use alloc::sync::Arc; -use once_cell::sync::Lazy; -use std::{ - error::Error, - net::{SocketAddr, ToSocketAddrs}, - thread::JoinHandle, -}; - -pub use crate::state::TelemetryState; - -pub fn new_state() -> Arc { - Arc::new(TelemetryState::default()) -} - -static GLOBAL_STATE: Lazy> = Lazy::new(new_state); - -pub fn global() -> &'static Arc { - &GLOBAL_STATE -} - -pub fn spawn( - address: A, - state: Arc, -) -> Result<(SocketAddr, JoinHandle<()>), Box> -where - A: ToSocketAddrs + Send + 'static, -{ - let server = server::listen(address, state); - - match server { - Ok(server) => { - let address = server.server_addr(); - let handle = std::thread::spawn(move || server.run()); - - Ok((address, handle)) - } - Err(e) => Err(e), - } -} diff --git a/telemetry/src/server.rs b/telemetry/src/server.rs deleted file mode 100644 index ab25b3ebfa..0000000000 --- a/telemetry/src/server.rs +++ /dev/null @@ -1,47 +0,0 @@ -use alloc::sync::Arc; -use std::{error::Error, net::ToSocketAddrs}; - -use prometheus::{Encoder, TextEncoder}; -use rouille::{Request, Response, Server}; - -use crate::state::TelemetryState; - -enum Route { - Metrics, - Other, -} - -impl Route { - fn from_request(request: &Request) -> Route { - if request.url() == "/metrics" { - Route::Metrics - } else { - Route::Other - } - } -} - -pub fn listen( - address: impl ToSocketAddrs, - telemetry_state: Arc, -) -> Result Response>, Box> { - let server = Server::new(address, move |request| { - match Route::from_request(request) { - // The prometheus endpoint - Route::Metrics => { - let mut buffer = vec![]; - let encoder = TextEncoder::new(); - let metric_families = telemetry_state.gather(); - encoder.encode(&metric_families, &mut buffer).unwrap(); - - rouille::Response::from_data(encoder.format_type().to_string(), buffer) - } - - // Any other route - // Return an empty response with a 404 status code. - Route::Other => rouille::Response::empty_404(), - } - })?; - - Ok(server) -} diff --git a/telemetry/src/state.rs b/telemetry/src/state.rs deleted file mode 100644 index 9fbf09bbca..0000000000 --- a/telemetry/src/state.rs +++ /dev/null @@ -1,393 +0,0 @@ -use core::fmt; -use std::time::{Duration, Instant}; - -use opentelemetry::{ - global, - metrics::{Counter, UpDownCounter, ValueRecorder}, - KeyValue, -}; -use opentelemetry_prometheus::PrometheusExporter; -use prometheus::proto::MetricFamily; - -use ibc::core::ics24_host::identifier::{ChainId, ChannelId, ClientId, PortId}; - -#[derive(Copy, Clone, Debug)] -pub enum WorkerType { - Client, - Connection, - Channel, - Packet, - Wallet, -} - -impl fmt::Display for WorkerType { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - Self::Client => write!(f, "client"), - Self::Connection => write!(f, "connection"), - Self::Channel => write!(f, "channel"), - Self::Packet => write!(f, "packet"), - Self::Wallet => write!(f, "wallet"), - } - } -} - -pub struct TelemetryState { - exporter: PrometheusExporter, - - /// Number of workers per object - workers: UpDownCounter, - - /// Number of client updates per client - ibc_client_updates: Counter, - - /// Number of client misbehaviours per client - ibc_client_misbehaviours: Counter, - - /// Number of receive packets relayed, per channel - receive_packets: Counter, - - /// Number of acknowledgment packets relayed, per channel - acknowledgment_packets: Counter, - - /// Number of timeout packets relayed, per channel - timeout_packets: Counter, - - /// Number of queries emitted by the relayer, per chain and query type - queries: Counter, - - /// Number of cache hits for queries emitted by the relayer, per chain and query type - query_cache_hits: Counter, - - /// Number of time the relayer had to reconnect to the WebSocket endpoint, per chain - ws_reconnect: Counter, - - /// How many IBC events did Hermes receive via the WebSocket subscription, per chain - ws_events: Counter, - - /// How many messages Hermes submitted to the chain, per chain - msg_num: Counter, - - /// The balance in each wallet that Hermes is using, per wallet, denom and chain - wallet_balance: ValueRecorder, - - /// Indicates the latency for all transactions submitted to a specific chain, - /// i.e. the difference between the moment when Hermes received a batch of events - /// until the corresponding transaction(s) were submitted. Milliseconds. - tx_latency_submitted: ValueRecorder, - - /// Indicates the latency for all transactions submitted to a specific chain, - /// i.e. the difference between the moment when Hermes received a batch of events - /// until the corresponding transaction(s) were confirmed. Milliseconds. - tx_latency_confirmed: ValueRecorder, - - /// Records the time at which we started processing an event batch. - /// Used for computing the `tx_latency` metric. - in_flight_events: moka::sync::Cache, -} - -impl TelemetryState { - /// Gather the metrics for export - pub fn gather(&self) -> Vec { - self.exporter.registry().gather() - } - - /// Update the number of workers per object - pub fn worker(&self, worker_type: WorkerType, count: i64) { - let labels = &[KeyValue::new("type", worker_type.to_string())]; - self.workers.add(count, labels); - } - - /// Update the number of client updates per client - pub fn ibc_client_updates(&self, chain: &ChainId, client: &ClientId, count: u64) { - let labels = &[ - KeyValue::new("chain", chain.to_string()), - KeyValue::new("client", client.to_string()), - ]; - - self.ibc_client_updates.add(count, labels); - } - - /// Number of client misbehaviours per client - pub fn ibc_client_misbehaviour(&self, chain: &ChainId, client: &ClientId, count: u64) { - let labels = &[ - KeyValue::new("chain", chain.to_string()), - KeyValue::new("client", client.to_string()), - ]; - - self.ibc_client_misbehaviours.add(count, labels); - } - - /// Number of receive packets relayed, per channel - pub fn ibc_receive_packets( - &self, - src_chain: &ChainId, - src_channel: &ChannelId, - src_port: &PortId, - count: u64, - ) { - let labels = &[ - KeyValue::new("src_chain", src_chain.to_string()), - KeyValue::new("src_channel", src_channel.to_string()), - KeyValue::new("src_port", src_port.to_string()), - ]; - - self.receive_packets.add(count, labels); - } - - /// Number of acknowledgment packets relayed, per channel - pub fn ibc_acknowledgment_packets( - &self, - src_chain: &ChainId, - src_channel: &ChannelId, - src_port: &PortId, - count: u64, - ) { - let labels = &[ - KeyValue::new("src_chain", src_chain.to_string()), - KeyValue::new("src_channel", src_channel.to_string()), - KeyValue::new("src_port", src_port.to_string()), - ]; - - self.acknowledgment_packets.add(count, labels); - } - - /// Number of timeout packets relayed, per channel - pub fn ibc_timeout_packets( - &self, - src_chain: &ChainId, - src_channel: &ChannelId, - src_port: &PortId, - count: u64, - ) { - let labels = &[ - KeyValue::new("src_chain", src_chain.to_string()), - KeyValue::new("src_channel", src_channel.to_string()), - KeyValue::new("src_port", src_port.to_string()), - ]; - - self.timeout_packets.add(count, labels); - } - - /// Number of queries emitted by the relayer, per chain and query type - pub fn query(&self, chain_id: &ChainId, query_type: &'static str) { - let labels = &[ - KeyValue::new("chain", chain_id.to_string()), - KeyValue::new("query_type", query_type), - ]; - - self.queries.add(1, labels); - } - - /// Number of cache hits for queries emitted by the relayer, per chain and query type - pub fn query_cache_hit(&self, chain_id: &ChainId, query_type: &'static str) { - let labels = &[ - KeyValue::new("chain", chain_id.to_string()), - KeyValue::new("query_type", query_type), - ]; - - self.query_cache_hits.add(1, labels); - } - - /// Number of time the relayer had to reconnect to the WebSocket endpoint, per chain - pub fn ws_reconnect(&self, chain_id: &ChainId) { - let labels = &[KeyValue::new("chain", chain_id.to_string())]; - - self.ws_reconnect.add(1, labels); - } - - /// How many IBC events did Hermes receive via the WebSocket subscription, per chain - pub fn ws_events(&self, chain_id: &ChainId, count: u64) { - let labels = &[KeyValue::new("chain", chain_id.to_string())]; - - self.ws_events.add(count, labels); - } - - /// How many messages Hermes submitted to the chain, per chain - pub fn msg_num(&self, chain_id: &ChainId, count: u64) { - let labels = &[KeyValue::new("chain", chain_id.to_string())]; - - self.msg_num.add(count, labels); - } - - /// The balance in each wallet that Hermes is using, per account, denom and chain - pub fn wallet_balance(&self, chain_id: &ChainId, account: &str, amount: u64, denom: &str) { - let labels = &[ - KeyValue::new("chain", chain_id.to_string()), - KeyValue::new("account", account.to_string()), - KeyValue::new("denom", denom.to_string()), - ]; - - self.wallet_balance.record(amount, labels); - } - - pub fn received_event_batch(&self, tracking_id: impl ToString) { - self.in_flight_events - .insert(tracking_id.to_string(), Instant::now()); - } - - pub fn tx_submitted( - &self, - - tracking_id: impl ToString, - chain_id: &ChainId, - channel_id: &ChannelId, - port_id: &PortId, - counterparty_chain_id: &ChainId, - ) { - let tracking_id = tracking_id.to_string(); - - if let Some(start) = self.in_flight_events.get(&tracking_id) { - let latency = start.elapsed().as_millis() as u64; - - let labels = &[ - // KeyValue::new("tracking_id", tracking_id), - KeyValue::new("chain", chain_id.to_string()), - KeyValue::new("counterparty", counterparty_chain_id.to_string()), - KeyValue::new("channel", channel_id.to_string()), - KeyValue::new("port", port_id.to_string()), - ]; - - self.tx_latency_submitted.record(latency, labels); - } - } - - pub fn tx_confirmed( - &self, - tracking_id: impl ToString, - chain_id: &ChainId, - channel_id: &ChannelId, - port_id: &PortId, - counterparty_chain_id: &ChainId, - ) { - let tracking_id = tracking_id.to_string(); - - if let Some(start) = self.in_flight_events.get(&tracking_id) { - let latency = start.elapsed().as_millis() as u64; - - let labels = &[ - // KeyValue::new("tracking_id", tracking_id), - KeyValue::new("chain", chain_id.to_string()), - KeyValue::new("counterparty", counterparty_chain_id.to_string()), - KeyValue::new("channel", channel_id.to_string()), - KeyValue::new("port", port_id.to_string()), - ]; - - self.tx_latency_confirmed.record(latency, labels); - } - } -} - -use std::sync::Arc; - -use opentelemetry::metrics::Descriptor; -use opentelemetry::sdk::export::metrics::{Aggregator, AggregatorSelector}; -use opentelemetry::sdk::metrics::aggregators::{histogram, last_value, sum}; - -#[derive(Debug)] -struct CustomAggregatorSelector; -impl AggregatorSelector for CustomAggregatorSelector { - fn aggregator_for(&self, descriptor: &Descriptor) -> Option> { - match descriptor.name() { - "wallet_balance" => Some(Arc::new(last_value())), - "tx_latency_submitted" => Some(Arc::new(histogram(descriptor, &[0.5, 0.9, 0.99]))), - "tx_latency_confirmed" => Some(Arc::new(histogram(descriptor, &[0.5, 0.9, 0.99]))), - _ => Some(Arc::new(sum())), - } - } -} - -impl Default for TelemetryState { - fn default() -> Self { - let exporter = opentelemetry_prometheus::ExporterBuilder::default() - .with_aggregator_selector(CustomAggregatorSelector) - .init(); - - let meter = global::meter("hermes"); - - Self { - exporter, - - workers: meter - .i64_up_down_counter("workers") - .with_description("Number of workers per object") - .init(), - - ibc_client_updates: meter - .u64_counter("ibc_client_updates") - .with_description("Number of client updates performed per client") - .init(), - - ibc_client_misbehaviours: meter - .u64_counter("ibc_client_misbehaviours") - .with_description("Number of misbehaviours detected per client") - .init(), - - receive_packets: meter - .u64_counter("ibc_receive_packets") - .with_description("Number of receive packets relayed per channel") - .init(), - - acknowledgment_packets: meter - .u64_counter("ibc_acknowledgment_packets") - .with_description("Number of acknowledgment packets relayed per channel") - .init(), - - timeout_packets: meter - .u64_counter("ibc_timeout_packets") - .with_description("Number of timeout packets relayed per channel") - .init(), - - queries: meter - .u64_counter("queries") - .with_description( - "Number of queries emitted by the relayer, per chain and query type", - ) - .init(), - - query_cache_hits: meter - .u64_counter("cache_hits") - .with_description("Number of cache hits for queries emitted by the relayer, per chain and query type") - .init(), - - ws_reconnect: meter - .u64_counter("ws_reconnect") - .with_description("Number of time the relayer had to reconnect to the WebSocket endpoint, per chain") - .init(), - - ws_events: meter - .u64_counter("ws_events") - .with_description("How many IBC events did Hermes receive via the WebSocket subscription, per chain") - .init(), - - msg_num: meter - .u64_counter("msg_num") - .with_description("How many messages Hermes submitted to the chain, per chain") - .init(), - - wallet_balance: meter - .u64_value_recorder("wallet_balance") - .with_description("The balance in each wallet that Hermes is using, per wallet, denom and chain") - .init(), - - tx_latency_submitted: meter - .u64_value_recorder("tx_latency_submitted") - .with_description("The latency for all transactions submitted to a specific chain, \ - i.e. the difference between the moment when Hermes received a batch of events \ - and when it submitted the corresponding transaction(s). Milliseconds.") - .init(), - - tx_latency_confirmed: meter - .u64_value_recorder("tx_latency_confirmed") - .with_description("The latency for all transactions submitted to a specific chain, \ - i.e. the difference between the moment when Hermes received a batch of events \ - until the corresponding transaction(s) were confirmed. Milliseconds.") - .init(), - - in_flight_events: moka::sync::Cache::builder() - .time_to_live(Duration::from_secs(60 * 60)) // Remove entries after 1 hour - .time_to_idle(Duration::from_secs(30 * 60)) // Remove entries if they have been idle for 30 minutes - .build(), - } - } -} diff --git a/tools/integration-test/Cargo.toml b/tools/integration-test/Cargo.toml deleted file mode 100644 index bf6a5a4f25..0000000000 --- a/tools/integration-test/Cargo.toml +++ /dev/null @@ -1,44 +0,0 @@ -[package] -name = "ibc-integration-test" -version = "0.15.0" -edition = "2021" -license = "Apache-2.0" -readme = "README.md" -keywords = ["blockchain", "consensus", "cosmos", "ibc", "tendermint"] -homepage = "https://hermes.informal.systems/" -repository = "https://github.com/informalsystems/ibc-rs" -authors = ["Informal Systems "] - -description = """ - Integration tests for IBC Relayer -""" - -[dependencies] -ibc = { path = "../../modules" } -ibc-relayer = { path = "../../relayer" } -ibc-relayer-cli = { path = "../../relayer-cli" } -ibc-proto = { path = "../../proto" } -ibc-test-framework = { path = "../test-framework" } -tendermint = { git = "https://github.com/composableFi/tendermint-rs", rev = "5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8" } -tendermint-rpc = { git = "https://github.com/composableFi/tendermint-rs", rev = "5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8", features = ["http-client", "websocket-client"] } - -serde_json = "1" -modelator = { version = "0.4.2", optional = true } -time = "0.3" -serde = "1.0.136" - -[features] -default = [] -example = [] -manual = [] -ordered = [] -ica = [] -experimental = [] -mbt = ["modelator"] - -[[bin]] -name = "test_setup_with_binary_channel" -doc = true - -[dev-dependencies] -tempfile = "3.3.0" diff --git a/tools/integration-test/spec/.gitignore b/tools/integration-test/spec/.gitignore deleted file mode 100644 index 86ee001101..0000000000 --- a/tools/integration-test/spec/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -states/ -_apalache-out/ -run/ diff --git a/tools/integration-test/spec/MC_Transfer.cfg b/tools/integration-test/spec/MC_Transfer.cfg deleted file mode 100644 index e3e8c3d250..0000000000 --- a/tools/integration-test/spec/MC_Transfer.cfg +++ /dev/null @@ -1,2 +0,0 @@ -INIT Init -NEXT Next diff --git a/tools/integration-test/spec/MC_Transfer.tla b/tools/integration-test/spec/MC_Transfer.tla deleted file mode 100644 index 3af3909d55..0000000000 --- a/tools/integration-test/spec/MC_Transfer.tla +++ /dev/null @@ -1,49 +0,0 @@ ----- MODULE MC_Transfer ---- -EXTENDS Transfer_typedefs - -CHAIN_IDS == {1, 2} -N_INITIAL_ACCOUNTS == 2 -GENESIS_AMOUNT == 3 - -VARIABLES - \* Interchain state - \* @type: CHAIN_ID -> CHAIN; - chains, - \* @type: Bool; - relayerRunning, - \* Action performed at current step - \* @type: [ name: Str ]; - action, - \* Outcome after action performed at current step - \* @type: [ name: Str ]; - outcome - -INSTANCE Transfer - -\* Trace with a LocalTransfer action -LocalTransferTest == action.name = LocalTransferAction - -\* Trace with a RestoreRelay action -RestoreRelayTest == action.name = RestoreRelayAction -\* Trace with an InterruptRelay action -InterruptRelayTest == action.name = InterruptRelayAction - -\* Trace with an IBCTransferSendPacket action -IBCTransferSendPacketTest == action.name = IBCTransferSendPacketAction -\* Trace with an IBCTransferReceivePacket action -IBCTransferReceivePacketTest == action.name = IBCTransferReceivePacketAction -\* Trace with an IBCTransferAcknowledgePacket action -IBCTransferAcknowledgePacketTest == action.name = IBCTransferAcknowledgePacketAction -\* Trace with an IBCTransferTimeoutPacket action -IBCTransferTimeoutPacketTest == action.name = IBCTransferTimeoutPacketAction - -\* Negate the trace predicate to find counter-example -LocalTransferInv == ~LocalTransferTest -RestoreRelayInv == ~RestoreRelayTest -InterruptRelayInv == ~InterruptRelayTest -IBCTransferSendPacketInv == ~IBCTransferSendPacketTest -IBCTransferReceivePacketInv == ~IBCTransferReceivePacketTest -IBCTransferAcknowledgePacketInv == ~IBCTransferAcknowledgePacketTest -IBCTransferTimeoutPacketInv == ~IBCTransferTimeoutPacketTest - -==== diff --git a/tools/integration-test/spec/README.md b/tools/integration-test/spec/README.md deleted file mode 100644 index 8d94669151..0000000000 --- a/tools/integration-test/spec/README.md +++ /dev/null @@ -1,23 +0,0 @@ -# ICS20 Specification - -Add desired `Invariant` predicate in `MC_Transfer.tla`. Then execute, - -```sh -apalache check --inv=Invariant --run-dir=run MC_Transfer.tla -``` - -Provided invariants to pass, - -``` -LocalTransferInv -RestoreRelayInv -InterruptRelayInv -IBCTransferSendPacketInv -IBCTransferReceivePacketInv -IBCTransferAcknowledgePacketInv -IBCTransferTimeoutPacketInv -``` - -```sh -apalache check --inv=IBCTransferAcknowledgePacketInv --run-dir=run MC_Transfer.tla -``` diff --git a/tools/integration-test/spec/Transfer.tla b/tools/integration-test/spec/Transfer.tla deleted file mode 100644 index bb9067868d..0000000000 --- a/tools/integration-test/spec/Transfer.tla +++ /dev/null @@ -1,398 +0,0 @@ ----- MODULE Transfer ---- -EXTENDS Apalache, Sequences, Integers, Transfer_typedefs - -CONSTANTS - \* Set of blockchain names - \* @type: Set(CHAIN_ID); - CHAIN_IDS, - \* Number of accounts per blockchain - \* @type: ACCOUNT_ID; - N_INITIAL_ACCOUNTS, - \* Genesis balance per account - \* @type: Int; - GENESIS_AMOUNT - -VARIABLES - \* Interchain state - \* @type: CHAIN_ID -> CHAIN; - chains, - \* @type: Bool; - relayerRunning, - \* Action performed at current step - \* @type: [ name: Str ]; - action, - \* Outcome after action performed at current step - \* @type: [ name: Str ]; - outcome - -\* Account IDs starts from 1 -\* @type: () => Set(ACCOUNT_ID); -ACCOUNT_IDS == 1..N_INITIAL_ACCOUNTS - -\* Actions -NullAction == "Null" -LocalTransferAction == "LocalTransfer" -RestoreRelayAction == "RestoreRelay" -InterruptRelayAction == "InterruptRelay" -IBCTransferSendPacketAction == "IBCTransferSendPacket" -IBCTransferReceivePacketAction == "IBCTransferReceivePacket" -IBCTransferAcknowledgePacketAction == "IBCTransferAcknowledgePacket" -IBCTransferTimeoutPacketAction == "IBCTransferTimeoutPacket" - -\* Outcomes -SuccessOutcome == "Success" -ErrorOutcome == "Error" - -\* @type: (CHAIN_ID) => CHAIN; -Genesis(chainId) == - LET nativeDenom == chainId IN [ - \* Name of the chain - id |-> chainId, - - \* Bank data for this chain - \* To support different cross-chain(ibc) denoms, it is a 2D map. - \* `accountId` has `bank[accountId][denomId]` many `denomId`. - bank |-> [accountId \in ACCOUNT_IDS |-> [denom \in {nativeDenom} |-> GENESIS_AMOUNT]], - \* Record of circulating native and cross-chain(ibc) token sourced at this chain - supply |-> [denom \in {nativeDenom} |-> GENESIS_AMOUNT * N_INITIAL_ACCOUNTS ], - - \* Record of packets originated from this chain - localPackets |-> [ - \* A table of packets with packetId - list |-> SetAsFun({}), - \* Set of packetIds of packets which are not yet acknowledged by destination chain - pending |-> {}, - \* Set of packetIds of packets which are not delivered to destrination chain within timeout block height - expired |-> {}, - \* Set of packetIds of packets which are acknowledged by destination chain - success |-> {} - ], - - \* Escrow balance per chain - escrow |-> [cId \in CHAIN_IDS \ {chainId} |-> SetAsFun({})], - - \* Record of packets receiveed from other chains - \* Packets are maintained using the channelId, it was received at. - \* Note: A pair of chain may have multiple channels in the past. - remotePackets |-> SetAsFun({}), - - nextPacketId |-> 0 -] - -\* Get balance of denom in a bank -\* @type: (BANK, DENOM_ID) => Int; -GetDenomFromBank(bank, denom) == - IF denom \in DOMAIN bank THEN bank[denom] - ELSE 0 - -\* Add an entry to a map if its key does not exists -\* Else update the existing entry -\* @type: (k -> v, k, v) => (k -> v); -AddOrUpdateEntry(func, key, value) == - IF key \in DOMAIN func THEN [func EXCEPT ![key] = value] - ELSE [x \in {key} \union DOMAIN func |-> IF x = key THEN value ELSE func[x]] - - -(* -We will model TokenTransfer using following actions. - -LocalTransfer : on-chain transfer - -InterruptRelay : Interrupt relaying -RestoreRelay : Restore relaying - -IBCTransferSendPacket : account in source chain tries to send denom to an account in target chain -IBCTransferReceivePacket : account in target chain receives the denom sent from account in source chain -IBCTransferAcknowledgePacket : the transaction is acknowledged. source chain completes the transaction. -IBCTransferTimeoutPacket : the transfer is timed-out. balance is refunded to source account. -*) - -\* Checks if the source account has enough balance -\* @type: (CHAIN, ACCOUNT_ID, DENOM_ID, Int) => Bool; -HasEnoughBalance(chain, source, denom, amount) == - /\ source \in DOMAIN chain.bank - /\ denom \in DOMAIN chain.bank[source] - /\ chain.bank[source][denom] >= amount - -\* Updated bank after local transfer -\* @type: (CHAIN, ACCOUNT_ID, ACCOUNT_ID, DENOM_ID, Int) => CHAIN; -LocalTransfer(chain, source, target, denom, amount) == - [ - chain EXCEPT - !.bank = [ - @ EXCEPT - ![source] = AddOrUpdateEntry(@, denom, GetDenomFromBank(@, denom) - amount), - ![target] = AddOrUpdateEntry(@, denom, GetDenomFromBank(@, denom) + amount) - ] - ] - -\* Next operator for LocalTransfer -\* @type: () => Bool; -LocalTransferNext == - \E chainId \in CHAIN_IDS: - \E source, target \in ACCOUNT_IDS: - source /= target /\ - \E amount \in 1..10: - LET - chain == chains[chainId] - denom == chain.id IN - /\ HasEnoughBalance(chain, source, denom, amount) - /\ chains' = [ - chains EXCEPT - ![chainId] = LocalTransfer(@, source, target, chain.id, amount) - ] - /\ action' = [ - name |-> LocalTransferAction, - chainId |-> chainId, - source |-> source, - target |-> target, - denom |-> denom, - amount |-> amount - ] - /\ outcome' = [ name |-> SuccessOutcome ] - /\ UNCHANGED relayerRunning - -\* Next operator for RestoreRelay -\* @type: () => Bool; -RestoreRelayNext == - /\ relayerRunning = FALSE - /\ relayerRunning' = TRUE - /\ UNCHANGED chains - /\ action' = [name |-> RestoreRelayAction] - /\ outcome' = [name |-> SuccessOutcome] - -\* Next operator for InterruptRelay -\* @type: () => Bool; -InterruptRelayNext == - /\ relayerRunning = TRUE - /\ relayerRunning' = FALSE - /\ UNCHANGED chains - /\ action' = [name |-> InterruptRelayAction] - /\ outcome' = [name |-> SuccessOutcome] - -\* Checks if there exists a channel between two chains -\* @type: () => Bool; -IBCTransferSendPacketCondition == - relayerRunning - -\* Creates an IBCPacket with the necessary information and adds it to pending packets -\* @type: (CHAIN, ACCOUNT_ID, CHAIN, ACCOUNT_ID, DENOM_ID, Int) => CHAIN; -IBCTransferSendPacket(sourceChain, source, targetChain, target, denom, amount) == - [ - sourceChain EXCEPT - !.bank = [@ EXCEPT - ![source] = AddOrUpdateEntry(@, denom, GetDenomFromBank(@, denom) - amount) - ], - !.escrow = [@ EXCEPT - ![targetChain.id] = AddOrUpdateEntry(@, denom, GetDenomFromBank(@, denom) + amount) - ], - !.localPackets = [@ EXCEPT - !.list = AddOrUpdateEntry(@, - sourceChain.nextPacketId, - [ - id |-> sourceChain.nextPacketId, - from |-> source, - sourceChainId |-> sourceChain.id, - to |-> target, - targetChainId |-> targetChain.id, - denom |-> denom, - amount |-> amount - ] - ), - !.pending = @ \union {sourceChain.nextPacketId} - ], - !.nextPacketId = @ + 1 - ] - -\* Next operator for IBCTransferSendPacket -IBCTransferSendPacketNext == - \E chainId1, chainId2 \in CHAIN_IDS: - chainId1 /= chainId2 /\ - \E acc1, acc2 \in ACCOUNT_IDS: - \E denom \in DOMAIN chains[chainId1].supply: - \E amount \in 1..10: - /\ IBCTransferSendPacketCondition - /\ HasEnoughBalance(chains[chainId1], acc1, denom, amount) - /\ chains' = [chains EXCEPT - ![chainId1] = IBCTransferSendPacket(chains[chainId1], acc1, chains[chainId2], acc2, denom, amount) - ] - /\ action' = [ - name |-> IBCTransferSendPacketAction, - packet |-> chains'[chainId1].localPackets.list[chains[chainId1].nextPacketId] - ] - /\ outcome' = [name |-> SuccessOutcome] - /\ UNCHANGED relayerRunning - -\* TODO: -\* append CHANNEL_ID/PORT_ID for source zone -\* trim CHANNEL_ID/PORT_ID for sink zone -\* @type: (DENOM_ID, CHAIN_ID) => DENOM_ID; -TransformDenom(denom, targetChainId) == - denom - -\* Process an IBC packet at targetChain -\* @type: (PACKET) => CHAIN; -IBCTransferReceivePacket(packet) == - LET - targetChainId == packet.targetChainId - sourceChainId == packet.sourceChainId - destination == packet.to - denom == TransformDenom(packet.denom, targetChainId) - amount == packet.amount - targetChain == chains[targetChainId] - IN - [ - targetChain EXCEPT - !.bank = [@ EXCEPT - ![destination] = AddOrUpdateEntry(@, denom, GetDenomFromBank(@, denom) + amount) - ], - !.supply = AddOrUpdateEntry(@, denom, GetDenomFromBank(@, denom) + amount), - !.remotePackets = AddOrUpdateEntry( - @, - sourceChainId, - AddOrUpdateEntry( - IF sourceChainId \in DOMAIN @ THEN @[sourceChainId] ELSE SetAsFun({}), - packet.id, - packet - ) - ) - ] - -\* Checks if the packet is not processed by the targetChain -\* @type: (PACKET, CHAIN) => Bool; -IBCTransferReceivePacketCondition(packet, targetChain) == - /\ relayerRunning - /\ \/ packet.sourceChainId \notin DOMAIN targetChain.remotePackets - \/ packet.id \notin DOMAIN targetChain.remotePackets[packet.sourceChainId] - -\* Next operator for IBCTransferReceivePacket -IBCTransferReceivePacketNext == - \E chainId \in CHAIN_IDS: - \E packetId \in chains[chainId].localPackets.pending: - LET - packet == chains[chainId].localPackets.list[packetId] - targetChain == chains[packet.targetChainId] - IN - /\ IBCTransferReceivePacketCondition(packet, targetChain) - /\ chains' = [chains EXCEPT - ![packet.targetChainId] = IBCTransferReceivePacket(packet) - ] - /\ action' = [ - name |-> IBCTransferReceivePacketAction, - packet |-> packet - ] - /\ outcome' = [name |-> SuccessOutcome] - /\ UNCHANGED relayerRunning - - -\* Picks an IBCPacket from sourceChain to timeout -\* Refunds balance to source account -\* Moves the packet from pending to expired -\* @type: (PACKET) => CHAIN; -IBCTransferTimeoutPacket(packet) == - LET - from == packet.from - denom == packet.denom - amount == packet.amount - sourceChain == chains[packet.sourceChainId] - targetChain == chains[packet.targetChainId] - escrowAccount == sourceChain.escrow[packet.targetChainId] - IN - [ - sourceChain EXCEPT - !.bank = [@ EXCEPT - ![from] = AddOrUpdateEntry(@, denom, GetDenomFromBank(@, denom) + amount) - ], - !.escrow = [@ EXCEPT - ![packet.targetChainId] = AddOrUpdateEntry(@, denom, GetDenomFromBank(@, denom) - amount) - ], - !.localPackets = [@ EXCEPT - !.pending = @ \ {packet.id}, - !.expired = @ \union {packet.id} - ] - ] - -\* Checks if the packet is not processed by the targetChain -\* @type: (PACKET, CHAIN) => Bool; -IBCTransferTimeoutPacketCondition(packet, targetChain) == - /\ ~relayerRunning - /\ packet.id \notin DOMAIN targetChain.remotePackets[packet.sourceChainId] - -\* Next operator for IBCTransferTimeoutPacket -IBCTransferTimeoutPacketNext == - \E chainId \in CHAIN_IDS: - \E packetId \in chains[chainId].localPackets.pending: - LET - packet == chains[chainId].localPackets.list[packetId] - sourceChain == chains[packet.sourceChainId] - targetChain == chains[packet.targetChainId] - IN - /\ IBCTransferTimeoutPacketCondition(packet, targetChain) - /\ chains' = [chains EXCEPT - ![sourceChain.id] = IBCTransferTimeoutPacket(packet) - ] - /\ action' = [ - name |-> IBCTransferTimeoutPacketAction, - packet |-> packet - ] - /\ outcome' = [name |-> SuccessOutcome] - /\ UNCHANGED relayerRunning - - -\* Mark an IBC packet at sourceChain as success which is processed at targetChain -\* @type: (PACKET) => CHAIN; -IBCTransferAcknowledgePacket(packet) == - LET sourceChain == chains[packet.sourceChainId] IN - [ - sourceChain EXCEPT - !.localPackets = [@ EXCEPT - !.pending = @ \ {packet.id}, - !.success = @ \union {packet.id} - ] - ] - -\* Checks if the packet is already processed by the targetChain -\* @type: (PACKET, CHAIN) => Bool; -IBCTransferAcknowledgePacketCondition(packet, targetChain) == - /\ relayerRunning - /\ packet.sourceChainId \in DOMAIN targetChain.remotePackets - /\ packet.id \in DOMAIN targetChain.remotePackets[packet.sourceChainId] - -\* Next operator for IBCTransferAcknowledgePacket -IBCTransferAcknowledgePacketNext == - \E chainId \in CHAIN_IDS: - \E packetId \in chains[chainId].localPackets.pending: - LET - packet == chains[chainId].localPackets.list[packetId] - sourceChain == chains[packet.sourceChainId] - targetChain == chains[packet.targetChainId] - IN - /\ IBCTransferAcknowledgePacketCondition(packet, targetChain) - /\ chains' = [chains EXCEPT - ![sourceChain.id] = IBCTransferAcknowledgePacket(packet) - ] - /\ action' = [ - name |-> IBCTransferAcknowledgePacketAction, - packet |-> packet - ] - /\ outcome' = [name |-> SuccessOutcome] - /\ UNCHANGED relayerRunning - -\* Init predicate -Init == - /\ chains = [chainId \in CHAIN_IDS |-> Genesis(chainId)] - /\ relayerRunning = TRUE - /\ action = [ name |-> NullAction ] - /\ outcome = [ name |-> SuccessOutcome ] - -\* Complete Next predicate -Next == - \/ LocalTransferNext - \/ InterruptRelayNext - \/ RestoreRelayNext - \/ IBCTransferSendPacketNext - \/ IBCTransferReceivePacketNext - \/ IBCTransferTimeoutPacketNext - \/ IBCTransferAcknowledgePacketNext - -==== diff --git a/tools/integration-test/spec/Transfer_typedefs.tla b/tools/integration-test/spec/Transfer_typedefs.tla deleted file mode 100644 index f352ccfb76..0000000000 --- a/tools/integration-test/spec/Transfer_typedefs.tla +++ /dev/null @@ -1,45 +0,0 @@ ----- MODULE Transfer_typedefs ---- - -(* - @typeAlias: ACCOUNT_ID = Int; - @typeAlias: CHAIN_ID = Int; - @typeAlias: PACKET_ID = Int; - - TODO: Fix when to transfer back money to sink zone - @typeAlias: DENOM_ID = CHAIN_ID; - - @typeAlias: PACKET = [ - id: PACKET_ID, - from: ACCOUNT_ID, - sourceChainId: CHAIN_ID, - to: ACCOUNT_ID, - targetChainId: CHAIN_ID, - denom: DENOM_ID, - amount: Int - ]; - - @typeAlias: BANK = DENOM_ID -> Int; - - @typeAlias: CHAIN = [ - id: CHAIN_ID, - - bank: ACCOUNT_ID -> BANK, - supply: BANK, - - localPackets: [ - list: PACKET_ID -> PACKET, - pending: Set(PACKET_ID), - expired: Set(PACKET_ID), - success: Set(PACKET_ID) - ], - - remotePackets: CHAIN_ID -> PACKET_ID -> PACKET, - - escrow: CHAIN_ID -> BANK, - - nextPacketId: PACKET_ID - ]; -*) -typedefs == TRUE - -==== diff --git a/tools/integration-test/src/bin/test_setup_with_binary_channel.rs b/tools/integration-test/src/bin/test_setup_with_binary_channel.rs deleted file mode 100644 index be1c374f54..0000000000 --- a/tools/integration-test/src/bin/test_setup_with_binary_channel.rs +++ /dev/null @@ -1,82 +0,0 @@ -/*! - This is a simple wrapper around [`BinaryChannelTest`] and turn it into - an executable that can be used for manual testing with two test chains - with connected channel being setup. - - When the command is executed, you should see log messages such as - following near the end: - - ```bash - $ cargo run --bin test_setup_with_binary_channel - ... - INFO ibc_integration_test::framework::binary::channel: written channel environment to /path/to/ibc-rs/data/test-3742758098/binary-channels.env - WARN ibc_integration_test::util::suspend: suspending the test indefinitely. you can still interact with any spawned chains and relayers - ``` - - The `binary-channels.env` file generated contains the environment variables - that are essential for accessing the test chains. You can source it and - run the relayer commands in a separate terminal such as: - - ```bash - $ source /path/to/ibc-rs/data/test-1790156739/binary-channels.env - $ cargo run --bin hermes -- -c $RELAYER_CONFIG tx raw ft-transfer \ - $CHAIN_ID_B $CHAIN_ID_A $PORT_A $CHANNEL_ID_A 9999 -o 1000 \ - -k $NODE_A_WALLETS_USER1_KEY_ID -d $NODE_A_DENOM - ``` -*/ - -use ibc_relayer::keyring::Store; -use ibc_test_framework::prelude::*; -use std::env; -use std::path::PathBuf; - -struct Test { - store_dir: PathBuf, -} - -impl TestOverrides for Test { - fn modify_test_config(&self, config: &mut TestConfig) { - config.bootstrap_with_random_ids = false; - config.chain_store_dir = self.store_dir.clone(); - } - - fn modify_relayer_config(&self, config: &mut Config) { - for mut chain in config.chains.iter_mut() { - // Modify the key store type to `Store::Test` so that the wallet - // keys are stored to ~/.hermes/keys so that we can use them - // with external relayer commands. - chain.key_store_type = Store::Test; - } - } - - fn should_spawn_supervisor(&self) -> bool { - false - } -} - -impl BinaryChannelTest for Test { - fn run( - &self, - _config: &TestConfig, - _relayer: RelayerDriver, - _chains: ConnectedChains, - _channel: ConnectedChannel, - ) -> Result<(), Error> { - suspend() - } -} - -fn main() -> Result<(), Error> { - let store_dir = env::var("TEST_STORE_DIR").unwrap_or_else(|_| "data/test".to_string()); - - println!( - "Setting up binary channel test environment at {}. (Overridable with $TEST_STORE_DIR)", - store_dir - ); - - println!("Make sure the directory is clean for the setup to succeed"); - - run_binary_channel_test(&Test { - store_dir: store_dir.into(), - }) -} diff --git a/tools/integration-test/src/bin/test_setup_with_ternary_channel.rs b/tools/integration-test/src/bin/test_setup_with_ternary_channel.rs deleted file mode 100644 index 8d153a7c06..0000000000 --- a/tools/integration-test/src/bin/test_setup_with_ternary_channel.rs +++ /dev/null @@ -1,84 +0,0 @@ -/*! - This is a simple wrapper around [`BinaryChannelTest`] and turn it into - an executable that can be used for manual testing with two test chains - with connected channel being setup. - - When the command is executed, you should see log messages such as - following near the end: - - ```bash - $ cargo run --bin test_setup_with_binary_channel - ... - INFO ibc_integration_test::framework::binary::channel: written channel environment to /path/to/ibc-rs/data/test-3742758098/binary-channels.env - WARN ibc_integration_test::util::suspend: suspending the test indefinitely. you can still interact with any spawned chains and relayers - ``` - - The `binary-channels.env` file generated contains the environment variables - that are essential for accessing the test chains. You can source it and - run the relayer commands in a separate terminal such as: - - ```bash - $ source /path/to/ibc-rs/data/test-1790156739/binary-channels.env - $ cargo run --bin hermes -- -c $RELAYER_CONFIG tx raw ft-transfer \ - $CHAIN_ID_B $CHAIN_ID_A $PORT_A $CHANNEL_ID_A 9999 -o 1000 \ - -k $NODE_A_WALLETS_USER1_KEY_ID -d $NODE_A_DENOM - ``` -*/ - -use ibc_relayer::keyring::Store; -use ibc_test_framework::prelude::*; -use std::env; -use std::path::PathBuf; - -struct Test { - store_dir: PathBuf, -} - -impl TestOverrides for Test { - fn modify_test_config(&self, config: &mut TestConfig) { - config.bootstrap_with_random_ids = false; - config.chain_store_dir = self.store_dir.clone(); - } - - fn modify_relayer_config(&self, config: &mut Config) { - for mut chain in config.chains.iter_mut() { - // Modify the key store type to `Store::Test` so that the wallet - // keys are stored to ~/.hermes/keys so that we can use them - // with external relayer commands. - chain.key_store_type = Store::Test; - } - } - - fn should_spawn_supervisor(&self) -> bool { - false - } -} - -impl NaryChannelTest<3> for Test { - fn run( - &self, - _config: &TestConfig, - _relayer: RelayerDriver, - _chains: NaryConnectedChains, - _channels: NaryConnectedChannels, - ) -> Result<(), Error> { - suspend() - } -} - -impl PortsOverride<3> for Test {} - -fn main() -> Result<(), Error> { - let store_dir = env::var("TEST_STORE_DIR").unwrap_or_else(|_| "data/test".to_string()); - - println!( - "Setting up binary channel test environment at {}. (Overridable with $TEST_STORE_DIR)", - store_dir - ); - - println!("Make sure the directory is clean for the setup to succeed"); - - run_nary_channel_test(&Test { - store_dir: store_dir.into(), - }) -} diff --git a/tools/integration-test/src/lib.rs b/tools/integration-test/src/lib.rs deleted file mode 100644 index f4b83a25c4..0000000000 --- a/tools/integration-test/src/lib.rs +++ /dev/null @@ -1,7 +0,0 @@ -#[allow(clippy::too_many_arguments)] -#[cfg(test)] -pub mod tests; - -#[cfg(any(all(test, feature = "mbt"), doc))] -#[macro_use] -pub mod mbt; diff --git a/tools/integration-test/src/mbt/README.md b/tools/integration-test/src/mbt/README.md deleted file mode 100644 index f2c0311b28..0000000000 --- a/tools/integration-test/src/mbt/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# MBT for Hermes Integration Test - -Make sure [`apalache-mc`](https://github.com/informalsystems/apalache) is installed and setup properly. Check `apalache-mc version`. - -```bash -cargo test -p ibc-integration-test --features mbt mbt::transfer -``` diff --git a/tools/integration-test/src/mbt/handlers.rs b/tools/integration-test/src/mbt/handlers.rs deleted file mode 100644 index 1ad10c628b..0000000000 --- a/tools/integration-test/src/mbt/handlers.rs +++ /dev/null @@ -1,296 +0,0 @@ -use ibc_relayer::util::task::TaskHandle; -use ibc_relayer::worker::client::spawn_refresh_client; - -use ibc_test_framework::bootstrap::binary::chain::bootstrap_foreign_client_pair; -use ibc_test_framework::bootstrap::binary::connection::bootstrap_connection; -use ibc_test_framework::chain::tagged::TaggedChainDriverExt; -use ibc_test_framework::ibc::denom::derive_ibc_denom; -use ibc_test_framework::prelude::*; -use ibc_test_framework::relayer::channel::{assert_eventually_channel_established, init_channel}; -use ibc_test_framework::relayer::connection::{ - assert_eventually_connection_established, init_connection, -}; -use ibc_test_framework::types::binary::client::ClientIdPair; -use ibc_test_framework::types::binary::connection::ConnectedConnection; -use ibc_test_framework::types::tagged::mono::Tagged; - -use super::state::Packet; - -use super::utils::{get_denom, get_wallet, wait_for_client}; - -pub fn setup_chains( - chains: &ConnectedChains, -) -> Result<(), Error> { - { - let _refresh_task_a = spawn_refresh_client(chains.foreign_clients.client_b_to_a.clone()) - .ok_or_else(|| eyre!("expect refresh task spawned"))?; - - let _refresh_task_b = spawn_refresh_client(chains.foreign_clients.client_a_to_b.clone()) - .ok_or_else(|| eyre!("expect refresh task spawned"))?; - - bootstrap_connection(&chains.foreign_clients, Default::default())?; - }; - - wait_for_client(); - - Ok(()) -} - -pub fn local_transfer_handler( - node: Tagged, - source: u64, - target: u64, - denom: u64, - amount: u64, -) -> Result<(), Error> { - let wallets = node.wallets(); - - let source_wallet = get_wallet(&wallets, source); - let target_wallet = get_wallet(&wallets, target); - let denom = get_denom(&node, denom); - - node.chain_driver().local_transfer_token( - &source_wallet, - &target_wallet.address(), - amount, - &denom, - )?; - - Ok(()) -} - -pub fn create_channel( - chain_handle_a: &ChainA, - chain_handle_b: &ChainB, - channel: &mut Option>, - refresh_task_a: &mut Option, - refresh_task_b: &mut Option, -) -> Result<(), Error> { - let port_a = tagged_transfer_port(); - let port_b = tagged_transfer_port(); - - let clients2 = - bootstrap_foreign_client_pair(chain_handle_a, chain_handle_b, Default::default())?; - - *refresh_task_a = Some( - spawn_refresh_client(clients2.client_b_to_a.clone()) - .ok_or_else(|| eyre!("expect refresh task spawned"))?, - ); - - *refresh_task_b = Some( - spawn_refresh_client(clients2.client_a_to_b.clone()) - .ok_or_else(|| eyre!("expect refresh task spawned"))?, - ); - - let (connection_id_b, new_connection_b) = init_connection( - chain_handle_a, - chain_handle_b, - &clients2.client_b_to_a.tagged_client_id(), - &clients2.client_a_to_b.tagged_client_id(), - )?; - - let connection_id_a = assert_eventually_connection_established( - chain_handle_b, - chain_handle_a, - &connection_id_b.as_ref(), - )?; - - let (channel_id_b_2, channel_b_2) = init_channel( - chain_handle_a, - chain_handle_b, - &clients2.client_b_to_a.tagged_client_id(), - &clients2.client_a_to_b.tagged_client_id(), - &connection_id_a.as_ref(), - &connection_id_b.as_ref(), - &port_a.as_ref(), - &port_b.as_ref(), - )?; - - let channel_id_a_2 = assert_eventually_channel_established( - chain_handle_b, - chain_handle_a, - &channel_id_b_2.as_ref(), - &port_b.as_ref(), - )?; - - let client_ids = ClientIdPair::new( - clients2.client_b_to_a.tagged_client_id().cloned(), - clients2.client_a_to_b.tagged_client_id().cloned(), - ); - - let new_connected_connection = ConnectedConnection::new( - client_ids, - new_connection_b.flipped(), - connection_id_a, - connection_id_b, - ); - - let connected_channel = ConnectedChannel { - connection: new_connected_connection, - channel: channel_b_2.flipped(), - channel_id_a: channel_id_a_2, - channel_id_b: channel_id_b_2, - port_a, - port_b, - }; - - *channel = Some(connected_channel); - - info!("Channel is created"); - - Ok(()) -} - -pub fn expire_channel( - channel: &mut Option>, - refresh_task_a: &mut Option, - refresh_task_b: &mut Option, -) -> Result<(), Error> { - // dropping the client handler to expire the clients - super::utils::drop(refresh_task_a.take()); - super::utils::drop(refresh_task_b.take()); - - wait_for_client(); - - super::utils::drop(channel.take()); - - info!("Channel expired"); - - Ok(()) -} - -pub fn ibc_transfer_send_packet( - node_source: Tagged, - node_target: Tagged, - channels: &ConnectedChannel, - packet: &Packet, -) -> Result<(), Error> { - let wallets_source = node_source.wallets(); - let wallets_target = node_target.wallets(); - - let wallet_source = get_wallet(&wallets_source, packet.from); - let wallet_target = get_wallet(&wallets_target, packet.to); - let denom_source = get_denom(&node_source, packet.denom); - let amount_source_to_target = packet.amount; - - let (port_source, channel_id_source) = ( - DualTagged::new(channels.port_a.value()), - DualTagged::new(channels.channel_id_a.value()), - ); - - let balance_source = node_source - .chain_driver() - .query_balance(&wallet_source.address(), &denom_source)?; - - info!( - "Sending IBC transfer from chain {} to chain {} with amount of {} {}", - node_source.chain_id(), - node_target.chain_id(), - amount_source_to_target, - denom_source, - ); - - node_source.chain_driver().ibc_transfer_token( - &port_source, - &channel_id_source, - &wallet_source, - &wallet_target.address(), - &denom_source, - amount_source_to_target, - )?; - - node_source.chain_driver().assert_eventual_wallet_amount( - &wallet_source.address(), - balance_source - amount_source_to_target, - &denom_source, - )?; - - Ok(()) -} - -pub fn ibc_transfer_receive_packet( - node_source: Tagged, - node_target: Tagged, - channels: &ConnectedChannel, - packet: &Packet, -) -> Result<(), Error> { - let wallets_target = node_target.wallets(); - - let wallet_target = get_wallet(&wallets_target, packet.to); - let denom_source = get_denom(&node_source, packet.denom); - let amount_source_to_target = packet.amount; - - let (port_target, channel_id_target) = ( - DualTagged::new(channels.port_b.value()), - DualTagged::new(channels.channel_id_b.value()), - ); - - let denom_target = derive_ibc_denom(&port_target, &channel_id_target, &denom_source)?; - - info!( - "Waiting for user on chain {} to receive IBC transferred amount of {} {} (chain {}/{})", - node_target.chain_id(), - amount_source_to_target, - denom_target, - node_source.chain_id(), - denom_source - ); - - node_target.chain_driver().assert_eventual_wallet_amount( - &wallet_target.address(), - amount_source_to_target, - &denom_target.as_ref(), - )?; - - Ok(()) -} - -pub fn ibc_transfer_acknowledge_packet( - node_source: Tagged, - node_target: Tagged, - _channels: &Option>, - packet: &Packet, -) -> Result<(), Error> { - let denom_source = get_denom(&node_source, packet.denom); - let amount_source_to_target = packet.amount; - - info!( - "Waiting for user on chain {} to confirm IBC transferred amount of {} {}", - node_source.chain_id(), - amount_source_to_target, - denom_source - ); - - info!( - "Successfully performed IBC transfer from chain {} to chain {}", - node_source.chain_id(), - node_target.chain_id(), - ); - - Ok(()) -} - -pub fn ibc_transfer_expire_packet( - node_source: Tagged, - node_target: Tagged, - _channels: &Option>, - packet: &Packet, -) -> Result<(), Error> { - let denom_source = get_denom(&node_source, packet.denom); - let amount_source_to_target = packet.amount; - - info!( - "Waiting for user on chain {} to get refund of previously IBC transferred amount of {} {}", - node_source.chain_id(), - amount_source_to_target, - denom_source - ); - - info!( - "Successfully performed IBC packet expiry intended from chain {} to chain {}", - node_source.chain_id(), - node_target.chain_id(), - ); - - Ok(()) -} diff --git a/tools/integration-test/src/mbt/itf.rs b/tools/integration-test/src/mbt/itf.rs deleted file mode 100644 index 44bb354ed9..0000000000 --- a/tools/integration-test/src/mbt/itf.rs +++ /dev/null @@ -1,132 +0,0 @@ -use serde::{Deserialize, Deserializer, Serialize}; - -#[derive(Debug, Serialize, Deserialize)] -pub struct Meta { - pub format: String, - #[serde(rename = "format-description")] - pub format_description: String, - pub description: String, -} - -#[derive(Debug, Serialize, Deserialize)] -pub struct InformalTrace { - #[serde(rename = "#meta")] - pub meta: Meta, - pub vars: Vec, - pub states: Vec, -} - -#[derive(Debug, Serialize)] -pub struct Map(pub Vec<(K, V)>); - -impl<'de, K, V> Deserialize<'de> for Map -where - K: Deserialize<'de>, - V: Deserialize<'de>, -{ - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - #[derive(Debug, Deserialize)] - struct Meta { - #[serde(rename = "#map")] - map: Vec<(K, V)>, - } - let s: Meta<_, _> = Deserialize::deserialize(deserializer)?; - Ok(Self(s.map)) - } -} - -#[derive(Debug, Serialize)] -pub struct Set(pub Vec); - -impl<'de, E> Deserialize<'de> for Set -where - E: Deserialize<'de>, -{ - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - #[derive(Debug, Deserialize)] - pub struct Meta { - #[serde(rename = "#set")] - set: Vec, - } - let s: Meta<_> = Deserialize::deserialize(deserializer)?; - Ok(Self(s.set)) - } -} - -mod test { - use super::{Map, Set}; - - #[test] - fn test_empty_set() { - let itf = r##"{ "#set": [] }"##; - let s: Set = serde_json::from_str(itf).unwrap(); - assert!(s.0.is_empty()); - } - - #[test] - fn test_set() { - let itf = r##"{ "#set": [1,2,3] }"##; - let s: Set = serde_json::from_str(itf).unwrap(); - assert_eq!(s.0, vec![1, 2, 3]); - } - - #[test] - fn test_empty_map() { - let itf = r##"{ "#map": [ ] }"##; - let m: Map = serde_json::from_str(itf).unwrap(); - assert!(m.0.is_empty()); - } - - #[test] - #[should_panic] - fn test_singleton_map() { - let itf = r##"{ "#map": [1, 11] }"##; - let m: Map = serde_json::from_str(itf).unwrap(); - assert_eq!(m.0, vec![(1, 11)]); - } - - #[test] - fn test_normal_map() { - let itf = r##"{ "#map": [[1, 11], [2, 22]] }"##; - let m: Map = serde_json::from_str(itf).unwrap(); - assert_eq!(m.0, vec![(1, 11), (2, 22)]); - } - - #[test] - #[cfg(feature = "manual")] - fn parse_itf() { - use super::super::itf::InformalTrace; - use super::super::state::State; - - let itf_path = concat!( - env!("CARGO_MANIFEST_DIR"), - "/spec/example/counterexample.itf.json" - ); - - let itf_json = std::fs::read_to_string(itf_path).expect("itf file does not exist"); - - let t: InformalTrace = - serde_json::from_str(&itf_json).expect("deserialization error"); - - for state in t.states { - println!( - "action: {}", - serde_json::to_string_pretty(&state.action).unwrap() - ); - println!( - "outcome: {}", - serde_json::to_string_pretty(&state.outcome).unwrap() - ); - println!( - "chains: {}", - serde_json::to_string_pretty(&state.chains).unwrap() - ); - } - } -} diff --git a/tools/integration-test/src/mbt/mod.rs b/tools/integration-test/src/mbt/mod.rs deleted file mode 100644 index 173f132abb..0000000000 --- a/tools/integration-test/src/mbt/mod.rs +++ /dev/null @@ -1,8 +0,0 @@ -pub mod utils; - -pub mod itf; -pub mod state; - -pub mod handlers; - -pub mod transfer; diff --git a/tools/integration-test/src/mbt/state.rs b/tools/integration-test/src/mbt/state.rs deleted file mode 100644 index 129983c67d..0000000000 --- a/tools/integration-test/src/mbt/state.rs +++ /dev/null @@ -1,85 +0,0 @@ -use serde::{Deserialize, Serialize}; - -use super::itf::{Map, Set}; - -pub type ChainId = u64; -pub type DenomId = ChainId; -pub type AccountId = u64; -pub type PacketId = u64; -pub type Balance = u64; - -#[derive(Debug, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct Packet { - pub id: PacketId, - pub from: AccountId, - pub source_chain_id: ChainId, - pub to: AccountId, - pub target_chain_id: ChainId, - pub denom: DenomId, - pub amount: Balance, -} - -#[derive(Debug, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct LocalPackets { - pub list: Map, - pub pending: Set, - pub expired: Set, - pub success: Set, -} - -#[derive(Debug, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct Chain { - pub id: ChainId, - pub bank: Map>, - pub supply: Map, - pub local_packets: LocalPackets, - pub remote_packets: Map>, - pub escrow: Map>, - pub next_packet_id: PacketId, -} - -#[derive(Debug, Serialize, Deserialize)] -#[serde(tag = "name")] -pub enum Action { - Null, - #[serde(rename_all = "camelCase")] - LocalTransfer { - chain_id: ChainId, - source: AccountId, - target: AccountId, - denom: DenomId, - amount: Balance, - }, - RestoreRelay, - InterruptRelay, - IBCTransferSendPacket { - packet: Packet, - }, - IBCTransferReceivePacket { - packet: Packet, - }, - IBCTransferAcknowledgePacket { - packet: Packet, - }, - IBCTransferTimeoutPacket { - packet: Packet, - }, -} - -#[derive(Debug, Serialize, Deserialize)] -#[serde(tag = "name")] -pub enum Outcome { - Success, - Error, -} - -#[derive(Debug, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct State { - pub chains: Map, - pub action: Action, - pub outcome: Outcome, -} diff --git a/tools/integration-test/src/mbt/transfer.rs b/tools/integration-test/src/mbt/transfer.rs deleted file mode 100644 index 10b62fdee5..0000000000 --- a/tools/integration-test/src/mbt/transfer.rs +++ /dev/null @@ -1,365 +0,0 @@ -use std::io::Write; -use std::panic::{RefUnwindSafe, UnwindSafe}; - -use ibc_relayer::config::{ - Channels as ConfigChannels, Clients as ConfigClients, Connections as ConfigConnections, - ModeConfig, Packets as ConfigPackets, -}; - -use ibc_test_framework::prelude::*; -use ibc_test_framework::types::tagged::mono::Tagged; - -use super::state::{Action, State}; - -use super::itf::InformalTrace; -use super::utils::{get_chain, CLIENT_EXPIRY}; - -const TEST_NAMES: &[&str] = &[ - "LocalTransferInv", - "IBCTransferAcknowledgePacketInv", - "IBCTransferTimeoutPacketInv", -]; -const NUM_TRACES: Option<&str> = option_env!("MBT_TRACES"); -const APALACHE: Option<&str> = option_env!("APALACHE"); - -const ITF_TRACE_DIRECTORY: &str = concat!(env!("CARGO_MANIFEST_DIR"), "/data/mbt"); - -fn generate_mbt_traces( - apalache_path: &str, - test_name: &str, - num_traces: usize, -) -> Result, Error> { - let temp_dir = tempfile::TempDir::new()?; - let run_dir = temp_dir.path().join("run"); - let tla_path = concat!(env!("CARGO_MANIFEST_DIR"), "/spec/MC_Transfer.tla"); - let mut cmd = std::process::Command::new(apalache_path); - cmd.arg("check") - .arg("--init=Init") - .arg("--next=Next") - .arg(&format!("--inv={test_name}")) - .arg(&format!("--max-error={num_traces}")) - .arg(&format!( - "--run-dir={}", - run_dir.to_str().expect("no panic") - )) - .arg(&format!( - "--out-dir={}", - temp_dir.path().to_str().expect("no panic") - )) - .arg(tla_path); - let _ = cmd.status().expect("failed to execute process"); - - std::fs::read_dir(run_dir)? - .flatten() - .map(|entry| entry.path()) - .filter(|file_path| file_path.is_file()) - .flat_map(|file_path| { - file_path - .file_name() - .and_then(|file_name| file_name.to_str()) - .and_then(|file_name| { - (file_name != "counterexample.itf.json" - && file_name.starts_with("counterexample") - && file_name.ends_with(".itf.json")) - .then(|| { - let name = format!("{test_name}_{file_name}"); - Ok(( - name, - std::fs::read_to_string(file_path.to_str().expect("should not panic")) - .expect("error while reading counterexample.itf.json"), - )) - }) - }) - }) - .collect() -} - -fn execute_mbt(f: F) -> Result<(), Error> -where - F: FnOnce(Vec) -> Result<(), Error> + UnwindSafe + RefUnwindSafe + Copy, -{ - let apalache = APALACHE.unwrap_or("apalache-mc"); - let num_traces = NUM_TRACES - .unwrap_or("2") - .parse() - .expect("an number for number of traces per test"); - - let success_traces = &format!("{ITF_TRACE_DIRECTORY}/success"); - let failure_traces = &format!("{ITF_TRACE_DIRECTORY}/failure"); - - std::fs::create_dir_all(success_traces)?; - std::fs::create_dir_all(failure_traces)?; - - for test_name in TEST_NAMES { - for (itf_name, itf_json) in generate_mbt_traces(apalache, test_name, num_traces)? { - let itf: InformalTrace = - serde_json::from_str(&itf_json).expect("deserialization error"); - - let result = std::panic::catch_unwind(|| f(itf.states).expect("to fail")); - - let unique_itf_trace_path = if result.is_ok() { - format!("{success_traces}/{itf_name}") - } else { - format!("{failure_traces}/{itf_name}") - }; - - let mut file = std::fs::File::create(unique_itf_trace_path)?; - file.write_all(itf_json.as_bytes())?; - - if let Err(err) = result { - std::panic::resume_unwind(err); - } - } - } - Ok(()) -} - -#[test] -fn test_ibc_transfer() -> Result<(), Error> { - execute_mbt(|trace| run_binary_channel_test(&IbcTransferMBT(trace))) -} - -/** - Test that IBC token transfer can still work with a single - chain that is connected to itself. -*/ -#[test] -#[cfg(feature = "manual")] -fn test_self_connected_ibc_transfer() -> Result<(), Error> { - use ibc_test_framework::framework::binary::chain::run_self_connected_binary_chain_test; - use ibc_test_framework::framework::binary::channel::RunBinaryChannelTest; - - execute_mbt(|trace| { - run_self_connected_binary_chain_test(&RunBinaryConnectionTest::new( - &RunBinaryChannelTest::new(&IbcTransferMBT(trace)), - )) - }) -} - -pub struct IbcTransferMBT(Vec); - -impl TestOverrides for IbcTransferMBT { - fn modify_test_config(&self, config: &mut TestConfig) { - config.bootstrap_with_random_ids = false; - } - - fn modify_relayer_config(&self, config: &mut Config) { - config.mode = ModeConfig { - clients: ConfigClients { - enabled: true, - refresh: true, - misbehaviour: true, - }, - connections: ConfigConnections { enabled: true }, - channels: ConfigChannels { enabled: true }, - packets: ConfigPackets { - enabled: true, - clear_interval: 10, - clear_on_start: true, - tx_confirmation: true, - }, - }; - - for mut chain_config in config.chains.iter_mut() { - chain_config.trusting_period = Some(CLIENT_EXPIRY); - } - } - - fn should_spawn_supervisor(&self) -> bool { - false - } -} - -impl BinaryChannelTest for IbcTransferMBT { - fn run( - &self, - _config: &TestConfig, - relayer: RelayerDriver, - chains: ConnectedChains, - channels: ConnectedChannel, - ) -> Result<(), Error> { - // relayer is spawned - let mut supervisor = Some(relayer.spawn_supervisor()?); - - for state in &self.0 { - match &state.action { - Action::Null => { - info!("[Init] Done"); - } - Action::LocalTransfer { - chain_id, - source, - target, - denom, - amount, - } => { - info!("[LocalTransfer] Init"); - let node: Tagged = get_chain(&chains, *chain_id); - super::handlers::local_transfer_handler( - node, *source, *target, *denom, *amount, - )?; - info!("[LocalTransfer] Done"); - } - Action::RestoreRelay => { - if supervisor.is_none() { - supervisor = Some(relayer.spawn_supervisor()?); - } - - info!("[RestoreRelay] Done"); - } - Action::InterruptRelay => { - supervisor.take().expect("one").shutdown(); - - info!("[InterruptRelay] Done"); - } - Action::IBCTransferSendPacket { packet } => { - info!("[IBCTransferSendPacket] {:?}", packet); - - match (packet.source_chain_id, packet.target_chain_id) { - (1, 2) => { - assert!( - super::utils::get_committed_packets_at_src( - &chains.handle_a, - &channels - )? - .is_empty(), - "no packets present" - ); - - super::handlers::ibc_transfer_send_packet( - chains.node_a.as_ref(), - chains.node_b.as_ref(), - &channels, - packet, - )?; - - assert_eq!( - super::utils::get_committed_packets_at_src( - &chains.handle_a, - &channels, - )? - .len(), - 1, - "one packet is sent" - ); - } - (2, 1) => { - assert!( - super::utils::get_committed_packets_at_src( - &chains.handle_b, - &channels.clone().flip() - )? - .is_empty(), - "no packets present" - ); - - super::handlers::ibc_transfer_send_packet( - chains.node_b.as_ref(), - chains.node_a.as_ref(), - &channels.clone().flip(), - packet, - )?; - - assert_eq!( - super::utils::get_committed_packets_at_src( - &chains.handle_b, - &channels.clone().flip() - )? - .len(), - 1, - "one packet is present" - ); - } - _ => unreachable!(), - } - - info!("[IBCTransferSendPacket] Done"); - } - Action::IBCTransferReceivePacket { packet } => { - info!("[IBCTransferReceivePacket] {:?}", packet); - match (packet.source_chain_id, packet.target_chain_id) { - (1, 2) => { - super::handlers::ibc_transfer_receive_packet( - chains.node_a.as_ref(), - chains.node_b.as_ref(), - &channels, - packet, - )?; - assert_eq!( - super::utils::get_acknowledged_packets_at_dst( - &chains.handle_b, - &channels.clone().flip() - )? - .len(), - 1, - "one packet is received and sent acknowledgement" - ); - } - (2, 1) => { - super::handlers::ibc_transfer_receive_packet( - chains.node_b.as_ref(), - chains.node_a.as_ref(), - &channels.clone().flip(), - packet, - )?; - assert_eq!( - super::utils::get_acknowledged_packets_at_dst( - &chains.handle_a, - &channels - )? - .len(), - 1, - "one packet is received and sent acknowledgement" - ); - } - _ => unreachable!(), - } - - info!("[IBCTransferReceivePacket] Done"); - } - Action::IBCTransferAcknowledgePacket { packet } => { - info!("[IBCTransferAcknowledgePacket] {:?}", packet); - super::utils::wait_for_client(); - match (packet.source_chain_id, packet.target_chain_id) { - (1, 2) => { - assert!( - super::utils::get_committed_packets_at_src( - &chains.handle_a, - &channels - )? - .is_empty(), - "commitment is completed" - ); - } - (2, 1) => { - assert!( - super::utils::get_committed_packets_at_src( - &chains.handle_b, - &channels.clone().flip() - )? - .is_empty(), - "commitment is completed" - ); - } - _ => unreachable!(), - } - - info!("[IBCTransferAcknowledgePacket] Done"); - } - Action::IBCTransferTimeoutPacket { packet } => { - info!("[IBCTransferTimeoutPacket] {:?}", packet); - - match (packet.source_chain_id, packet.target_chain_id) { - (1, 2) => {} - (2, 1) => {} - _ => unreachable!(), - } - - info!("[IBCTransferTimeoutPacket] Done") - } - } - } - - Ok(()) - } -} diff --git a/tools/integration-test/src/mbt/utils.rs b/tools/integration-test/src/mbt/utils.rs deleted file mode 100644 index 1245f3d6b2..0000000000 --- a/tools/integration-test/src/mbt/utils.rs +++ /dev/null @@ -1,136 +0,0 @@ -use std::thread::sleep; -use std::time::Duration; - -use ibc::core::ics04_channel::packet::Sequence; -use ibc_relayer::chain::requests::{ - QueryPacketAcknowledgementsRequest, QueryPacketCommitmentsRequest, QueryUnreceivedAcksRequest, - QueryUnreceivedPacketsRequest, -}; -use ibc_test_framework::ibc::denom::Denom; -use ibc_test_framework::prelude::*; -use ibc_test_framework::types::tagged::mono::Tagged; - -use super::{ - itf::InformalTrace, - state::{DenomId, State}, -}; - -pub const CLIENT_EXPIRY: Duration = Duration::from_secs(15); - -pub fn get_chain( - chains: &ConnectedChains, - chain_id: u64, -) -> Tagged -where - ChainA: ChainHandle, - ChainB: ChainHandle, - ChainX: ChainHandle, -{ - Tagged::new(match chain_id { - 1 => chains.node_a.value(), - 2 => chains.node_b.value(), - _ => unreachable!(), - }) -} - -pub fn get_wallet<'a, ChainX>( - wallets: &'a Tagged, - user: u64, -) -> Tagged { - match user { - 1 => wallets.user1(), - 2 => wallets.user2(), - _ => unreachable!(), - } -} - -pub fn get_denom<'a, ChainX>( - chain: &'a Tagged, - denom: DenomId, -) -> Tagged { - match denom { - 1 => chain.denom(), - 2 => chain.denom(), - _ => unreachable!(), - } -} - -pub fn wait_for_client() { - let sleep_time = CLIENT_EXPIRY + Duration::from_secs(5); - - info!( - "Sleeping for {} seconds to wait for IBC client to expire", - sleep_time.as_secs() - ); - - sleep(sleep_time); -} - -pub fn parse_itf_from_json(itf_path: &str) -> Vec { - let itf_json = std::fs::read_to_string(itf_path).expect("itf file does not exist. did you run `apalache check --inv=Invariant --run-dir=run main.tla` first?"); - - let trace: InformalTrace = - serde_json::from_str(&itf_json).expect("deserialization error"); - - trace.states -} - -pub fn get_unreceived_packets_at_dst( - chain: &ChainA, - channel: &ConnectedChannel, -) -> Result, Error> { - let port_id_a = channel.port_a.value(); - let channel_id_a = channel.channel_id_a.value(); - let request = QueryUnreceivedPacketsRequest { - port_id: port_id_a.clone(), - channel_id: *channel_id_a, - packet_commitment_sequences: Vec::new(), - }; - Ok(chain.query_unreceived_packets(request)?) -} - -pub fn get_committed_packets_at_src( - chain: &ChainA, - channel: &ConnectedChannel, -) -> Result, Error> { - let port_id_a = channel.port_a.value(); - let channel_id_a = channel.channel_id_a.value(); - let request = QueryPacketCommitmentsRequest { - port_id: port_id_a.clone(), - channel_id: *channel_id_a, - pagination: None, - }; - let (sequences, _) = chain.query_packet_commitments(request)?; - Ok(sequences) -} - -pub fn get_unacknowledged_packets_at_src( - chain: &ChainA, - channel: &ConnectedChannel, -) -> Result, Error> { - let port_id_a = channel.port_a.value(); - let channel_id_a = channel.channel_id_a.value(); - let request = QueryUnreceivedAcksRequest { - port_id: port_id_a.clone(), - channel_id: *channel_id_a, - packet_ack_sequences: Vec::new(), - }; - Ok(chain.query_unreceived_acknowledgements(request)?) -} - -pub fn get_acknowledged_packets_at_dst( - chain: &ChainA, - channel: &ConnectedChannel, -) -> Result, Error> { - let port_id_a = channel.port_a.value(); - let channel_id_a = channel.channel_id_a.value(); - let request = QueryPacketAcknowledgementsRequest { - port_id: port_id_a.clone(), - channel_id: *channel_id_a, - pagination: None, - packet_commitment_sequences: Vec::new(), - }; - Ok(chain.query_packet_acknowledgements(request)?.0) -} - -pub fn drop(_: X) {} diff --git a/tools/integration-test/src/tests/clear_packet.rs b/tools/integration-test/src/tests/clear_packet.rs deleted file mode 100644 index 5ffef15349..0000000000 --- a/tools/integration-test/src/tests/clear_packet.rs +++ /dev/null @@ -1,181 +0,0 @@ -use ibc_test_framework::ibc::denom::derive_ibc_denom; -use ibc_test_framework::prelude::*; -use ibc_test_framework::util::random::random_u64_range; - -#[test] -fn test_clear_packet() -> Result<(), Error> { - run_binary_channel_test(&ClearPacketTest) -} - -#[test] -fn test_clear_packet_recovery() -> Result<(), Error> { - run_binary_channel_test(&ClearPacketRecoveryTest) -} - -pub struct ClearPacketTest; -pub struct ClearPacketRecoveryTest; - -impl TestOverrides for ClearPacketTest { - fn modify_relayer_config(&self, config: &mut Config) { - // Disabling clear_on_start should make the relayer not - // relay any packet it missed before starting. - config.mode.packets.clear_on_start = false; - config.mode.packets.clear_interval = 0; - } - - fn should_spawn_supervisor(&self) -> bool { - false - } - - // Unordered channel: will permit gaps in the sequence of relayed packets - fn channel_order(&self) -> Order { - Order::Unordered - } -} - -impl TestOverrides for ClearPacketRecoveryTest { - fn modify_relayer_config(&self, config: &mut Config) { - config.mode.packets.enabled = true; - config.mode.packets.clear_on_start = true; - } - - fn should_spawn_supervisor(&self) -> bool { - false - } -} - -impl BinaryChannelTest for ClearPacketTest { - fn run( - &self, - _config: &TestConfig, - relayer: RelayerDriver, - chains: ConnectedChains, - channel: ConnectedChannel, - ) -> Result<(), Error> { - let denom_a = chains.node_a.denom(); - - let wallet_a = chains.node_a.wallets().user1().cloned(); - let wallet_b = chains.node_b.wallets().user1().cloned(); - - let balance_a = chains - .node_a - .chain_driver() - .query_balance(&wallet_a.address(), &denom_a)?; - - let amount1 = random_u64_range(1000, 5000); - - info!( - "Performing IBC transfer with amount {}, which should *not* be relayed", - amount1 - ); - - chains.node_a.chain_driver().ibc_transfer_token( - &channel.port_a.as_ref(), - &channel.channel_id_a.as_ref(), - &wallet_a.as_ref(), - &wallet_b.address(), - &denom_a, - amount1, - )?; - - sleep(Duration::from_secs(1)); - - // Spawn the supervisor only after the first IBC trasnfer - relayer.with_supervisor(|| { - sleep(Duration::from_secs(1)); - - let amount2 = random_u64_range(1000, 5000); - - info!( - "Performing IBC transfer with amount {}, which should be relayed", - amount2 - ); - - chains.node_a.chain_driver().ibc_transfer_token( - &channel.port_a.as_ref(), - &channel.channel_id_a.as_ref(), - &wallet_a.as_ref(), - &wallet_b.address(), - &denom_a, - amount2, - )?; - - sleep(Duration::from_secs(1)); - - let denom_b = derive_ibc_denom( - &channel.port_b.as_ref(), - &channel.channel_id_b.as_ref(), - &denom_a, - )?; - - // Wallet on chain A should have both amount deducted. - chains.node_a.chain_driver().assert_eventual_wallet_amount( - &wallet_a.address(), - balance_a - amount1 - amount2, - &denom_a, - )?; - - // Wallet on chain B should only receive the second IBC transfer - chains.node_b.chain_driver().assert_eventual_wallet_amount( - &wallet_b.address(), - amount2, - &denom_b.as_ref(), - )?; - - Ok(()) - }) - } -} - -impl BinaryChannelTest for ClearPacketRecoveryTest { - fn run( - &self, - _config: &TestConfig, - relayer: RelayerDriver, - chains: ConnectedChains, - channel: ConnectedChannel, - ) -> Result<(), Error> { - let denom_a = chains.node_a.denom(); - let denom_b1 = chains.node_b.denom(); - - let wallet_a = chains.node_a.wallets().user1().cloned(); - let wallet_b = chains.node_b.wallets().user1().cloned(); - - let relayer_wallet_b = chains.node_b.wallets().relayer().cloned(); - - // mess up the cached account sequence in ChainHandle of chain B - chains.node_b.chain_driver().local_transfer_token( - &relayer_wallet_b.as_ref(), - &wallet_b.address(), - 100, - &denom_b1, - )?; - - let amount1 = random_u64_range(1000, 5000); - - chains.node_a.chain_driver().ibc_transfer_token( - &channel.port_a.as_ref(), - &channel.channel_id_a.as_ref(), - &wallet_a.as_ref(), - &wallet_b.address(), - &denom_a, - amount1, - )?; - - let denom_b2 = derive_ibc_denom( - &channel.port_b.as_ref(), - &channel.channel_id_b.as_ref(), - &denom_a, - )?; - - relayer.with_supervisor(|| { - chains.node_b.chain_driver().assert_eventual_wallet_amount( - &wallet_b.address(), - amount1, - &denom_b2.as_ref(), - )?; - - Ok(()) - }) - } -} diff --git a/tools/integration-test/src/tests/client_expiration.rs b/tools/integration-test/src/tests/client_expiration.rs deleted file mode 100644 index bfd08b83c6..0000000000 --- a/tools/integration-test/src/tests/client_expiration.rs +++ /dev/null @@ -1,482 +0,0 @@ -use core::time::Duration; -use std::thread::sleep; - -use ibc::core::ics03_connection::connection::State as ConnectionState; -use ibc::core::ics04_channel::channel::State as ChannelState; -use ibc_relayer::config::{self, Config, ModeConfig}; - -use ibc_test_framework::bootstrap::binary::chain::bootstrap_foreign_client_pair; -use ibc_test_framework::bootstrap::binary::channel::{ - bootstrap_channel_with_chains, bootstrap_channel_with_connection, -}; -use ibc_test_framework::bootstrap::binary::connection::bootstrap_connection; -use ibc_test_framework::ibc::denom::derive_ibc_denom; -use ibc_test_framework::prelude::*; -use ibc_test_framework::relayer::channel::{ - assert_eventually_channel_established, init_channel, query_channel_end, -}; -use ibc_test_framework::relayer::connection::{ - assert_eventually_connection_established, init_connection, query_connection_end, -}; -use ibc_test_framework::relayer::refresh::spawn_refresh_client_tasks; - -// The cosmos ChainHandle handles requests in serial, and a refresh client -// request may get blocked by other operations and cause the refresh to fail -// if the expiry time is too short. -const CLIENT_EXPIRY: Duration = Duration::from_secs(15); - -/** - A test to verify that the connection and channel workers are properly - terminated instead of looping indefinitely when it is not possible to - perform handshake due to the client being expired or frozen. - - Since the test involves long-running background tasks, it has to - be verified manually by inspecting the logs. Run the test with - the following command: - - ```bash - RUST_BACKTRACE=0 RUST_LOG=info cargo test \ - -p ibc-integration-test -- test_channel_expiration - ``` - - And you should see error logs such as: - - ```log - ERROR ibc_relayer::connection: failed to establish connection handshake on frozen client: - 0: failed during an operation on client (07-tendermint-0) hosted by chain (ibc-beta-6fe01a9b) - 1: client 07-tendermint-0 on chain id ibc-beta-6fe01a9b is expired or frozen - ERROR ibc_relayer::util::task: aborting task ConnectionWorker(connection::connection-1:ibc-beta-6fe01a9b -> ibc-alpha-43544e24) after encountering fatal error: - 0: Worker failed after 1 retries - INFO ibc_relayer::util::task: task ConnectionWorker(connection::connection-1:ibc-beta-6fe01a9b -> ibc-alpha-43544e24) has terminated - ``` - - The error messages should not repeat more than once. In the original code, - the connection worker would keep retrying and indefinitely flooding - the log with errors. -*/ -#[test] -fn test_channel_expiration() -> Result<(), Error> { - run_binary_chain_test(&ChannelExpirationTest) -} - -#[test] -fn test_packet_expiration() -> Result<(), Error> { - run_binary_chain_test(&PacketExpirationTest) -} - -#[test] -fn test_create_on_expired_client() -> Result<(), Error> { - run_binary_chain_test(&CreateOnExpiredClientTest) -} - -#[cfg(feature = "manual")] -#[test] -fn test_misbehavior_expiration() -> Result<(), Error> { - run_binary_chain_test(&MisbehaviorExpirationTest) -} - -fn wait_for_client_expiry() { - let sleep_time = CLIENT_EXPIRY + Duration::from_secs(5); - - info!( - "Sleeping for {} seconds to wait for IBC client to expire", - sleep_time.as_secs() - ); - - sleep(sleep_time); -} - -pub struct ExpirationTestOverrides; - -pub struct ChannelExpirationTest; - -pub struct PacketExpirationTest; - -pub struct CreateOnExpiredClientTest; - -pub struct MisbehaviorExpirationTest; - -impl TestOverrides for ExpirationTestOverrides { - fn modify_test_config(&self, config: &mut TestConfig) { - config.bootstrap_with_random_ids = false; - } - - fn modify_relayer_config(&self, config: &mut Config) { - config.mode = ModeConfig { - clients: config::Clients { - enabled: true, - refresh: true, - misbehaviour: true, - }, - connections: config::Connections { enabled: true }, - channels: config::Channels { enabled: true }, - packets: config::Packets { - enabled: true, - clear_interval: 10, - clear_on_start: true, - tx_confirmation: true, - }, - }; - - for mut chain_config in config.chains.iter_mut() { - chain_config.trusting_period = Some(CLIENT_EXPIRY); - } - } - - fn should_spawn_supervisor(&self) -> bool { - false - } -} - -impl BinaryChainTest for ChannelExpirationTest { - fn run( - &self, - _config: &TestConfig, - relayer: RelayerDriver, - chains: ConnectedChains, - ) -> Result<(), Error> { - let connection = { - let _refresh_tasks = spawn_refresh_client_tasks(&chains.foreign_clients)?; - - bootstrap_connection(&chains.foreign_clients, Default::default())? - }; - - wait_for_client_expiry(); - - relayer.with_supervisor(|| { - let port_a = tagged_transfer_port(); - let port_b = tagged_transfer_port(); - - { - info!("Trying to create connection and channel after client is expired"); - - let (connection_id_b, _) = init_connection( - &chains.handle_a, - &chains.handle_b, - &chains.client_id_a(), - &chains.client_id_b(), - )?; - - let (channel_id_b, _) = init_channel( - &chains.handle_a, - &chains.handle_b, - &chains.client_id_a(), - &chains.client_id_b(), - &connection.connection_id_a.as_ref(), - &connection.connection_id_b.as_ref(), - &port_a.as_ref(), - &port_b.as_ref(), - )?; - - info!("Sleeping for 10 seconds to make sure that connection and channel fails to establish"); - - sleep(Duration::from_secs(10)); - - { - let connection_end_b = - query_connection_end(&chains.handle_b, &connection_id_b.as_ref())?; - - assert_eq( - "connection end status should remain init", - connection_end_b.value().state(), - &ConnectionState::Init, - )?; - - assert_eq( - "connection end should not have counterparty", - &connection_end_b.tagged_counterparty_connection_id(), - &None, - )?; - } - - { - let channel_end_b = - query_channel_end(&chains.handle_b, &channel_id_b.as_ref(), &port_b.as_ref())?; - - assert_eq( - "channel end status should remain init", - channel_end_b.value().state(), - &ChannelState::Init, - )?; - - assert_eq( - "channel end should not have counterparty", - &channel_end_b.tagged_counterparty_channel_id(), - &None, - )?; - } - } - - { - info!( - "Trying to create new channel and worker after previous connection worker failed" - ); - - let foreign_clients_2 = bootstrap_foreign_client_pair( - &chains.handle_a, - &chains.handle_b, - Default::default(), - )?; - - // Need to spawn refresh client for new clients to make sure they don't expire - - let _refresh_tasks = spawn_refresh_client_tasks(&foreign_clients_2)?; - - let (connection_id_b, _) = init_connection( - &chains.handle_a, - &chains.handle_b, - &foreign_clients_2.client_b_to_a.tagged_client_id(), - &foreign_clients_2.client_a_to_b.tagged_client_id(), - )?; - - let connection_id_a = assert_eventually_connection_established( - &chains.handle_b, - &chains.handle_a, - &connection_id_b.as_ref(), - )?; - - let (channel_id_b_2, _) = init_channel( - &chains.handle_a, - &chains.handle_b, - &foreign_clients_2.client_b_to_a.tagged_client_id(), - &foreign_clients_2.client_a_to_b.tagged_client_id(), - &connection_id_a.as_ref(), - &connection_id_b.as_ref(), - &port_a.as_ref(), - &port_b.as_ref(), - )?; - - // At this point the misbehavior task may raise error, because it - // try to check on a client update event that is already expired. - // This happens because the misbehavior task is only started when - // there is at least one channel in it, _not_ when the client - // is created. - // - // Source of error: - // https://github.com/informalsystems/tendermint-rs/blob/c45ea8c82773de1946f7ae2eece13150f07ca5fe/light-client/src/light_client.rs#L216-L222 - - assert_eventually_channel_established( - &chains.handle_b, - &chains.handle_a, - &channel_id_b_2.as_ref(), - &port_b.as_ref(), - )?; - } - - Ok(()) - }) - } -} - -impl BinaryChainTest for PacketExpirationTest { - fn run( - &self, - _config: &TestConfig, - relayer: RelayerDriver, - chains: ConnectedChains, - ) -> Result<(), Error> { - let channels = { - let _refresh_tasks = spawn_refresh_client_tasks(&chains.foreign_clients)?; - - bootstrap_channel_with_chains( - &chains, - &PortId::transfer(), - &PortId::transfer(), - Default::default(), - Default::default(), - )? - }; - - chains.node_a.chain_driver().ibc_transfer_token( - &channels.port_a.as_ref(), - &channels.channel_id_a.as_ref(), - &chains.node_a.wallets().user1(), - &chains.node_b.wallets().user1().address(), - &chains.node_a.denom(), - 100, - )?; - - wait_for_client_expiry(); - - info!("Packet worker should fail after client expires"); - - relayer.with_supervisor(|| { - let denom_a = chains.node_a.denom(); - - let denom_b = derive_ibc_denom( - &channels.port_b.as_ref(), - &channels.channel_id_b.as_ref(), - &denom_a, - )?; - - sleep(Duration::from_secs(10)); - - let balance_b = chains.node_b.chain_driver().query_balance( - &chains.node_b.wallets().user1().address(), - &denom_b.as_ref(), - )?; - - assert_eq("balance on wallet B should remain zero", &balance_b, &0)?; - - Ok(()) - }) - } -} - -impl BinaryChainTest for CreateOnExpiredClientTest { - fn run( - &self, - _config: &TestConfig, - _relayer: RelayerDriver, - chains: ConnectedChains, - ) -> Result<(), Error> { - // Create a connection before the IBC client expires, so that we can try create - // new channel with the connection after the client expired. - let connection = { - let _refresh_tasks = spawn_refresh_client_tasks(&chains.foreign_clients)?; - - bootstrap_connection(&chains.foreign_clients, Default::default())? - }; - - wait_for_client_expiry(); - - info!("trying to bootstrap connection after IBC client is expired"); - - let res = bootstrap_connection(&chains.foreign_clients, Default::default()); - - match res { - Ok(_) => { - return Err(Error::generic(eyre!( - "expected bootstrap_connection to fail" - ))) - } - Err(e) => { - info!("bootstrap_connection failed with expected error {}", e); - } - } - - sleep(Duration::from_secs(5)); - - info!("trying to bootstrap channel after IBC client is expired"); - - let res = bootstrap_channel_with_connection( - &chains.handle_a, - &chains.handle_b, - connection, - &DualTagged::new(&PortId::transfer()), - &DualTagged::new(&PortId::transfer()), - Default::default(), - ); - - match res { - Ok(_) => { - return Err(Error::generic(eyre!( - "expected bootstrap_channel_with_connection to fail" - ))) - } - Err(e) => { - info!( - "bootstrap_channel_with_connection failed with expected error {}", - e - ); - } - } - - Ok(()) - } -} - -impl BinaryChainTest for MisbehaviorExpirationTest { - fn run( - &self, - _config: &TestConfig, - _relayer: RelayerDriver, - chains: ConnectedChains, - ) -> Result<(), Error> { - /* - This test reproduce the error log when a misbehavior task is - first started. The error arise when `detect_misbehaviour_and_submit_evidence` - is called with `None`, and the initial headers are already expired. - - Run this test with the `manual` feature and log level `trace`: - - ```text - $ RUST_BACKTRACE=0 RUST_LOG=trace cargo test --features manual -p ibc-integration-test -- test_misbehavior_expiration - ``` - - and logs such as follow will be shown: - - ```log - TRACE ibc_relayer::foreign_client: [ibc-beta-96682bb3 -> ibc-alpha-4095d39d:07-tendermint-0] checking misbehaviour for consensus state heights (first 50 shown here): 0-14, 0-9, 0-5, 0-3, total: 4 - TRACE ibc_relayer::light_client::tendermint: light client verification trusted=0-9 target=0-14 - TRACE ibc_relayer::light_client::tendermint: light client verification trusted=0-5 target=0-9 - TRACE ibc_relayer::light_client::tendermint: light client verification trusted=0-3 target=0-5 - WARN ibc_relayer::foreign_client: [ibc-beta-96682bb3 -> ibc-alpha-4095d39d:07-tendermint-0] misbehaviour checking result: - 0: error raised while checking for misbehaviour evidence: failed to check misbehaviour for 07-tendermint-0 at consensus height 0-5 - 1: Light client error for RPC address ibc-beta-96682bb3 - 2: - 2: 0: trusted state outside of trusting period - 2021-12-21T21:00:23.796731Z INFO ibc_integration_test::tests::client_expiration: misbehavior result: ValidClient - ``` - */ - - { - let _refresh_tasks = spawn_refresh_client_tasks(&chains.foreign_clients)?; - - // build a client header that will be expired - chains - .foreign_clients - .client_b_to_a - .build_latest_update_client_and_send() - .map_err(handle_generic_error)?; - - info!("waiting for the initial client header to expire, while keeping the IBC client refreshed"); - - wait_for_client_expiry(); - } - - // Calling detect_misbehaviour_and_submit_evidence(None) will always produce error logs - for _ in 0..3 { - let misbehavior_result = chains - .foreign_clients - .client_b_to_a - .detect_misbehaviour_and_submit_evidence(None); - - info!("misbehavior result: {:?}", misbehavior_result); - } - - suspend() - } -} - -impl HasOverrides for CreateOnExpiredClientTest { - type Overrides = ExpirationTestOverrides; - - fn get_overrides(&self) -> &ExpirationTestOverrides { - &ExpirationTestOverrides - } -} - -impl HasOverrides for ChannelExpirationTest { - type Overrides = ExpirationTestOverrides; - - fn get_overrides(&self) -> &ExpirationTestOverrides { - &ExpirationTestOverrides - } -} - -impl HasOverrides for PacketExpirationTest { - type Overrides = ExpirationTestOverrides; - - fn get_overrides(&self) -> &ExpirationTestOverrides { - &ExpirationTestOverrides - } -} - -impl HasOverrides for MisbehaviorExpirationTest { - type Overrides = ExpirationTestOverrides; - - fn get_overrides(&self) -> &ExpirationTestOverrides { - &ExpirationTestOverrides - } -} diff --git a/tools/integration-test/src/tests/client_settings.rs b/tools/integration-test/src/tests/client_settings.rs deleted file mode 100644 index 6aa2074552..0000000000 --- a/tools/integration-test/src/tests/client_settings.rs +++ /dev/null @@ -1,121 +0,0 @@ -use std::time::Duration; - -use ibc::core::ics02_client::trust_threshold::TrustThreshold; - -use ibc::clients::ics07_tendermint::client_state::ClientState as TendermintClientState; -use ibc::core::ics02_client::client_state::AnyClientState; -use ibc::Height; -use ibc_relayer::chain::requests::{IncludeProof, QueryClientStateRequest}; -use ibc_relayer::foreign_client::CreateOptions; - -use ibc_test_framework::prelude::*; - -/// A test to exercise default foreign client settings. -#[test] -fn test_client_defaults() -> Result<(), Error> { - run_binary_chain_test(&ClientDefaultsTest) -} - -/// A test to exercise customization of foreign client settings. -#[test] -fn test_client_options() -> Result<(), Error> { - run_binary_chain_test(&ClientOptionsTest) -} - -struct ClientDefaultsTest; - -struct ClientOptionsTest; - -impl TestOverrides for ClientDefaultsTest { - fn modify_relayer_config(&self, config: &mut Config) { - config.chains[0].clock_drift = Duration::from_secs(3); - config.chains[0].max_block_time = Duration::from_secs(5); - config.chains[0].trusting_period = Some(Duration::from_secs(120_000)); - config.chains[0].trust_threshold = TrustThreshold::new(13, 23).unwrap().try_into().unwrap(); - - config.chains[1].clock_drift = Duration::from_secs(6); - config.chains[1].max_block_time = Duration::from_secs(15); - config.chains[1].trusting_period = Some(Duration::from_secs(340_000)); - config.chains[1].trust_threshold = TrustThreshold::TWO_THIRDS.try_into().unwrap(); - } -} - -impl BinaryChainTest for ClientDefaultsTest { - fn run( - &self, - _config: &TestConfig, - _relayer: RelayerDriver, - chains: ConnectedChains, - ) -> Result<(), Error> { - let client_id = chains.foreign_clients.client_a_to_b.id(); - let state = query_client_state(chains.handle_b, client_id)?; - assert_eq!(state.max_clock_drift, Duration::from_secs(24)); - assert_eq!(state.trusting_period, Duration::from_secs(120_000)); - assert_eq!(state.trust_level, TrustThreshold::new(13, 23).unwrap()); - - let client_id = chains.foreign_clients.client_b_to_a.id(); - let state = query_client_state(chains.handle_a, client_id)?; - assert_eq!(state.max_clock_drift, Duration::from_secs(14)); - assert_eq!(state.trusting_period, Duration::from_secs(340_000)); - assert_eq!(state.trust_level, TrustThreshold::TWO_THIRDS); - Ok(()) - } -} - -impl TestOverrides for ClientOptionsTest { - fn client_options_a_to_b(&self) -> CreateOptions { - CreateOptions { - max_clock_drift: Some(Duration::from_secs(3)), - trusting_period: Some(Duration::from_secs(120_000)), - trust_threshold: Some(TrustThreshold::new(13, 23).unwrap()), - } - } - - fn client_options_b_to_a(&self) -> CreateOptions { - CreateOptions { - max_clock_drift: Some(Duration::from_secs(6)), - trusting_period: Some(Duration::from_secs(340_000)), - trust_threshold: Some(TrustThreshold::TWO_THIRDS), - } - } -} - -impl BinaryChainTest for ClientOptionsTest { - fn run( - &self, - _config: &TestConfig, - _relayer: RelayerDriver, - chains: ConnectedChains, - ) -> Result<(), Error> { - let client_id = chains.foreign_clients.client_a_to_b.id(); - let state = query_client_state(chains.handle_b, client_id)?; - assert_eq!(state.max_clock_drift, Duration::from_secs(3)); - assert_eq!(state.trusting_period, Duration::from_secs(120_000)); - assert_eq!(state.trust_level, TrustThreshold::new(13, 23).unwrap()); - - let client_id = chains.foreign_clients.client_b_to_a.id(); - let state = query_client_state(chains.handle_a, client_id)?; - assert_eq!(state.max_clock_drift, Duration::from_secs(6)); - assert_eq!(state.trusting_period, Duration::from_secs(340_000)); - assert_eq!(state.trust_level, TrustThreshold::TWO_THIRDS); - Ok(()) - } -} - -fn query_client_state( - handle: Chain, - id: &ClientId, -) -> Result { - let (state, _) = handle.query_client_state( - QueryClientStateRequest { - client_id: id.clone(), - height: Height::zero(), - }, - IncludeProof::No, - )?; - #[allow(unreachable_patterns)] - match state { - AnyClientState::Tendermint(state) => Ok(state), - _ => unreachable!("unexpected client state type"), - } -} diff --git a/tools/integration-test/src/tests/connection_delay.rs b/tools/integration-test/src/tests/connection_delay.rs deleted file mode 100644 index ea8be39ac8..0000000000 --- a/tools/integration-test/src/tests/connection_delay.rs +++ /dev/null @@ -1,106 +0,0 @@ -use core::time::Duration; -use time::OffsetDateTime; - -use ibc_test_framework::ibc::denom::derive_ibc_denom; -use ibc_test_framework::prelude::*; -use ibc_test_framework::util::random::random_u64_range; - -const CONNECTION_DELAY: Duration = Duration::from_secs(10); - -#[test] -fn test_connection_delay() -> Result<(), Error> { - run_binary_channel_test(&ConnectionDelayTest) -} - -pub struct ConnectionDelayTest; - -impl TestOverrides for ConnectionDelayTest { - fn connection_delay(&self) -> Duration { - CONNECTION_DELAY - } -} - -impl BinaryChannelTest for ConnectionDelayTest { - fn run( - &self, - _config: &TestConfig, - relayer: RelayerDriver, - chains: ConnectedChains, - channel: ConnectedChannel, - ) -> Result<(), Error> { - relayer.with_supervisor(|| { - let denom_a = chains.node_a.denom(); - - let wallet_a = chains.node_a.wallets().user1().cloned(); - let wallet_b = chains.node_b.wallets().user1().cloned(); - - let balance_a = chains - .node_a - .chain_driver() - .query_balance(&wallet_a.address(), &denom_a)?; - - let a_to_b_amount = random_u64_range(1000, 5000); - - info!( - "Sending IBC transfer from chain {} to chain {} with amount of {} {}", - chains.chain_id_a(), - chains.chain_id_b(), - a_to_b_amount, - denom_a - ); - - chains.node_a.chain_driver().ibc_transfer_token( - &channel.port_a.as_ref(), - &channel.channel_id_a.as_ref(), - &wallet_a.as_ref(), - &wallet_b.address(), - &denom_a, - a_to_b_amount, - )?; - - let time1 = OffsetDateTime::now_utc(); - - let denom_b = derive_ibc_denom( - &channel.port_b.as_ref(), - &channel.channel_id_b.as_ref(), - &denom_a, - )?; - - info!( - "Waiting for user on chain B to receive IBC transferred amount of {} {}", - a_to_b_amount, denom_b - ); - - chains.node_a.chain_driver().assert_eventual_wallet_amount( - &wallet_a.address(), - balance_a - a_to_b_amount, - &denom_a, - )?; - - chains.node_b.chain_driver().assert_eventual_wallet_amount( - &wallet_b.address(), - a_to_b_amount, - &denom_b.as_ref(), - )?; - - info!( - "successfully performed IBC transfer from chain {} to chain {}", - chains.chain_id_a(), - chains.chain_id_b(), - ); - - let time2 = OffsetDateTime::now_utc(); - - assert_gt( - &format!( - "Expect IBC transfer to only be successfull after {}s", - CONNECTION_DELAY.as_secs() - ), - &(time2 - time1).try_into().unwrap(), - &CONNECTION_DELAY, - )?; - - Ok(()) - }) - } -} diff --git a/tools/integration-test/src/tests/example.rs b/tools/integration-test/src/tests/example.rs deleted file mode 100644 index e8eb71f9eb..0000000000 --- a/tools/integration-test/src/tests/example.rs +++ /dev/null @@ -1,136 +0,0 @@ -/*! - A quick demo of how a test with full setup can be written. - - ```rust - # use ibc_integration_test::prelude::*; - - #[test] - pub fn example_test() -> Result<(), Error> { - run_binary_channel_test(&ExampleTest) - } - - pub struct ExampleTest; - - impl TestOverrides for ExampleTest {} - - impl BinaryChannelTest for ExampleTest { - fn run( - &self, - _config: &TestConfig, - _relayer: RelayerDriver, - _chains: ConnectedChains, - _channel: ConnectedChannel, - ) -> Result<(), Error> { - suspend() - } - } - ``` - - We first define an empty struct [`ExampleTest`] to represent our test case. - We then implement the - [`BinaryChannelTest`](ibc_test_framework::framework::binary::channel::BinaryChannelTest) - trait so that the test framework sets up the relayer with two chains - running together with connected channels. - - Inside our test, we simply call the [`suspend`] function to - suspend the test indefinitely. While this means that the test would never - pass, we can use this as a starting point to do _manual testing_ with the - chains that have been setup by the test, and figure out how to continue - writing our test. - - We also implement [`TestOverrides`] for [`ExampleTest`] with an empty body. - The [`TestOverrides`] trait allows us to override some behavior of the - test case by implement methods that override the default behavior. - - Finally, we define the `example_test` function with the `#[test]` pragma - as the entry point for Rust to execute the test. We call the runner function - [`run_binary_channel_test`](ibc_test_framework::framework::binary::channel::run_binary_channel_test), - which accepts a reference to any struct implementing - [`BinaryChannelTest`](ibc_test_framework::framework::binary::channel::BinaryChannelTest) - and run the test for us. - - By convention, the tests written are placed in the [`tests`](ibc_test_framework::tests) - module. We can then run the test on the command line such as follows: - - ```bash - RUST_LOG=info RUST_BACKTRACE=1 \ - cargo test -p ibc-relayer-test --features example -- --test-threads=1 \ - example_test - ``` - - We use the environment variables `RUST_LOG` to control the log level, - and `RUST_BACKTRACE` to display backtrace when errors occurred. - The test flag `--test-threads=1` is set so that Rust do not run multiple - tests in parallel, as it can make it difficult to follow the logs. - See [TestConfig](ibc_test_framework::types::config::TestConfig) for more information - about configuring how the tests should be run. - - For this example, we disable the test from running by default, since - it uses [`suspend`] and is never going to pass. Here we explicitly pass - `--features example` so that the `example` feature is activated and this - test will run. Finally we specify the name of the test, which in our case - is `example_test`, so that only that test is being run. - - After starting the test, we may see the logs such as following shown: - - ```text - $ cargo test -p ibc-integration-test --features example -- --nocapture --test-threads=1 example_test - ... - INFO created new chain/client/connection/channel from ibc-alpha-c4aed8f9/07-tendermint-4/connection-6/channel-1 to ibc-beta-fcb867bb/07-tendermint-6/connection-1/channel-6 - INFO written channel environment to /path/to/ibc-rs/tools/integration-test/data/test-1094235493/binary-channels.env - WARN suspending the test indefinitely. you can still interact with any spawned chains and relayers - ... - ``` - - Near the last line of the logs, you can find in the logs information about - the path to the environment variables exported by the test. you can also - find a warning that states that the test have been suspended indefinitely. - We can also notice that the chains are created with random IDs and - listening on random ports. - - Using the log information, we can for example use `gaiad` to query for - the balance of the accounts created by the test by running something like: - - ```bash - $ source /path/to/ibc-rs/tools/integration-test/data/test-1094235493/binary-channels.env - $ gaiad --home "$NODE_A_HOME" --node $NODE_A_RPC_ADDR query bank balances $NODE_A_WALLETS_USER1_ADDRESS - balances: - - amount: "6902395390297" - denom: coin95143d31 - - amount: "6902395390297" - denom: stake - pagination: - next_key: null - total: "0" - ``` - - The test data and configuration files are stored at the absolute path shown - in the log, which looks something like - `/path/to/ibc-rs/tools/integration-test/data/test-1094235493`. - The sub-directory `test-1094235493` is randomly generated at the beginning - of a test case, so that all data related to that test case belongs to the - same directory. -*/ - -use ibc_test_framework::prelude::*; - -#[test] -pub fn example_test() -> Result<(), Error> { - run_binary_channel_test(&ExampleTest) -} - -pub struct ExampleTest; - -impl TestOverrides for ExampleTest {} - -impl BinaryChannelTest for ExampleTest { - fn run( - &self, - _config: &TestConfig, - _relayer: RelayerDriver, - _chains: ConnectedChains, - _channel: ConnectedChannel, - ) -> Result<(), Error> { - suspend() - } -} diff --git a/tools/integration-test/src/tests/execute_schedule.rs b/tools/integration-test/src/tests/execute_schedule.rs deleted file mode 100644 index 96f374e609..0000000000 --- a/tools/integration-test/src/tests/execute_schedule.rs +++ /dev/null @@ -1,92 +0,0 @@ -//! This test ensures that the `RelayPath::execute_schedule` method does not -//! drop any scheduled `OperationalData` when events associated with a prior -//! piece of operational data fails to send. Subsequent pieces of operational -//! data that were scheduled should be re-queued and not dropped. -//! -//! In order to test this behavior, the test manually relays a batch (i.e. at least -//! 2) IBC transfers from chain A to chain B. Chain B is then shut down in order to -//! force the batch of messages (in the form of their associated pieces of operational -//! data) to be queued up again for re-submission. -//! -//! It is expected that the first message of the batch gets dropped (i.e. it is not -//! later found in the pending queue), but all of the subsequent messages should -//! exist in the pending queue. - -use ibc_test_framework::prelude::*; -use ibc_test_framework::util::random::random_u64_range; - -use ibc_relayer::link::{Link, LinkParameters}; - -/// The number of messages to be sent in a batch contained in a piece of operational data. -const BATCH_SIZE: usize = 10; - -#[test] -fn test_execute_schedule() -> Result<(), Error> { - run_binary_channel_test(&ExecuteScheduleTest) -} - -pub struct ExecuteScheduleTest; - -impl TestOverrides for ExecuteScheduleTest { - fn should_spawn_supervisor(&self) -> bool { - false - } -} - -impl BinaryChannelTest for ExecuteScheduleTest { - fn run( - &self, - _config: &TestConfig, - _relayer: RelayerDriver, - chains: ConnectedChains, - channel: ConnectedChannel, - ) -> Result<(), Error> { - let amount1 = random_u64_range(1000, 5000); - - let chain_a_link_opts = LinkParameters { - src_port_id: channel.port_a.clone().into_value(), - src_channel_id: channel.channel_id_a.into_value(), - }; - - let chain_a_link = Link::new_from_opts( - chains.handle_a().clone(), - chains.handle_b().clone(), - chain_a_link_opts, - true, - )?; - - let mut relay_path_a_to_b = chain_a_link.a_to_b; - - // Construct `BATCH_SIZE` pieces of operational data and queue them up to be sent to chain B. - for i in 0..BATCH_SIZE { - chains.node_a.chain_driver().ibc_transfer_token( - &channel.port_a.as_ref(), - &channel.channel_id_a.as_ref(), - &chains.node_a.wallets().user1(), - &chains.node_b.wallets().user1().address(), - &chains.node_a.denom(), - amount1, - )?; - - relay_path_a_to_b.schedule_packet_clearing(None)?; - - info!("Performing IBC send packet with a token transfer #{} from chain A to be received by chain B", i); - } - - // We should see that all of the events in the batch are queued up to be sent to chain B. - assert_eq!(relay_path_a_to_b.dst_operational_data.len(), BATCH_SIZE); - - chains.node_b.value().kill()?; - - // With chain B inactive, if we attempt to send the batch of messages, we expect to see - // `BATCH_SIZE` - 1 messages in the batch since the initial event should have failed to - // be relayed and was thus dropped. The subsequent messages in the batch should have all - // been re-added to the pending queue. - match relay_path_a_to_b.execute_schedule() { - Ok(_) => panic!("Expected an error when relaying tx from A to B"), - Err(_) => assert_eq!(relay_path_a_to_b.dst_operational_data.len(), BATCH_SIZE - 1), - } - - Ok(()) - } -} diff --git a/tools/integration-test/src/tests/ica.rs b/tools/integration-test/src/tests/ica.rs deleted file mode 100644 index 0e2db7aec5..0000000000 --- a/tools/integration-test/src/tests/ica.rs +++ /dev/null @@ -1,266 +0,0 @@ -use std::str::FromStr; - -use serde::Serialize; - -use ibc::core::ics04_channel::channel::State; -use ibc_relayer::config::{ - filter::{ChannelFilters, FilterPattern}, - PacketFilter, -}; - -use ibc_test_framework::{ - ibc::denom::Denom, - prelude::*, - relayer::channel::{assert_eventually_channel_established, query_channel_end}, -}; - -#[test] -fn test_ica_filter_default() -> Result<(), Error> { - run_binary_connection_test(&IcaFilterTestAllow::new(PacketFilter::default())) -} - -#[test] -fn test_ica_filter_allow() -> Result<(), Error> { - run_binary_connection_test(&IcaFilterTestAllow::new(PacketFilter::Allow( - ChannelFilters::new(vec![( - FilterPattern::Wildcard("ica*".parse().unwrap()), - FilterPattern::Wildcard("*".parse().unwrap()), - )]), - ))) -} - -pub struct IcaFilterTestAllow { - packet_filter: PacketFilter, -} - -impl IcaFilterTestAllow { - pub fn new(packet_filter: PacketFilter) -> Self { - Self { packet_filter } - } -} - -impl TestOverrides for IcaFilterTestAllow { - // Use `icad` binary and deterministic identifiers for clients, connections, and channels - fn modify_test_config(&self, config: &mut TestConfig) { - config.bootstrap_with_random_ids = false; - } - - // Enable channel workers and allow relaying on ICA channels - fn modify_relayer_config(&self, config: &mut Config) { - config.mode.channels.enabled = true; - - for chain in &mut config.chains { - chain.packet_filter = self.packet_filter.clone(); - } - } - - // Allow MsgSend messages over ICA - fn modify_genesis_file(&self, genesis: &mut serde_json::Value) -> Result<(), Error> { - use serde_json::Value; - - let allow_messages = genesis - .get_mut("app_state") - .and_then(|app_state| app_state.get_mut("interchainaccounts")) - .and_then(|ica| ica.get_mut("host_genesis_state")) - .and_then(|state| state.get_mut("params")) - .and_then(|params| params.get_mut("allow_messages")) - .and_then(|allow_messages| allow_messages.as_array_mut()); - - if let Some(allow_messages) = allow_messages { - allow_messages.push(Value::String("/cosmos.bank.v1beta1.MsgSend".to_string())); - Ok(()) - } else { - Err(Error::generic(eyre!("failed to update genesis file"))) - } - } -} - -impl BinaryConnectionTest for IcaFilterTestAllow { - fn run( - &self, - _config: &TestConfig, - relayer: RelayerDriver, - chains: ConnectedChains, - connection: ConnectedConnection, - ) -> Result<(), Error> { - // Register an interchain account on behalf of - // controller wallet `user1` where the counterparty chain is the interchain accounts host. - // Then spawn the supervisor. - let (_handle, wallet, channel_id, port_id) = - register_interchain_account(&relayer, &chains, &connection)?; - - // Check that the corresponding ICA channel is eventually established. - let _counterparty_channel_id = assert_eventually_channel_established( - chains.handle_a(), - chains.handle_b(), - &channel_id.as_ref(), - &port_id.as_ref(), - )?; - - // Query the controller chain for the address of the ICA wallet on the host chain. - let ica_address = chains - .node_a - .chain_driver() - .query_interchain_account(&wallet.address(), &connection.connection_id_a.as_ref())?; - - let stake_denom: MonoTagged = MonoTagged::new(Denom::base("stake")); - - // Query the interchain account balance on the host chain. It should be empty. - let ica_balance = chains - .node_b - .chain_driver() - .query_balance(&ica_address.as_ref(), &stake_denom.as_ref())?; - - assert_eq("balance of ICA account should be 0", &ica_balance, &0)?; - - // Send funds to the interchain account. - let ica_fund = 42000; - - chains.node_b.chain_driver().local_transfer_token( - &chains.node_b.wallets().user1(), - &ica_address.as_ref(), - ica_fund, - &stake_denom.as_ref(), - )?; - - // Check that the balance has been updated. - chains.node_b.chain_driver().assert_eventual_wallet_amount( - &ica_address.as_ref(), - ica_fund, - &stake_denom.as_ref(), - )?; - - #[derive(Serialize)] - struct MsgSend { - #[serde(rename = "@type")] - tpe: String, - from_address: String, - to_address: String, - amount: Vec, - } - - #[derive(Serialize)] - struct Amount { - denom: String, - amount: String, - } - - let amount = 12345; - - let msg = MsgSend { - tpe: "/cosmos.bank.v1beta1.MsgSend".to_string(), - from_address: ica_address.to_string(), - to_address: chains.node_a.wallets().user2().address().to_string(), - amount: vec![Amount { - denom: stake_denom.to_string(), - amount: amount.to_string(), - }], - }; - - // Send funds from the ICA account to the `user2` account on the host chain on behalf - // of the `user1` account on the controller chain. - chains.node_a.chain_driver().interchain_submit( - &wallet.address(), - &connection.connection_id_a.as_ref(), - &msg, - )?; - - // Check that the ICA account's balance has been debited the sent amount. - chains.node_b.chain_driver().assert_eventual_wallet_amount( - &ica_address.as_ref(), - ica_fund - amount, - &stake_denom.as_ref(), - )?; - - Ok(()) - } -} - -#[test] -fn test_ica_filter_deny() -> Result<(), Error> { - run_binary_connection_test(&IcaFilterTestDeny) -} - -pub struct IcaFilterTestDeny; - -impl TestOverrides for IcaFilterTestDeny { - // Use deterministic identifiers for clients, connections, and channels - fn modify_test_config(&self, config: &mut TestConfig) { - config.bootstrap_with_random_ids = false; - } - - // Enable channel workers and deny ICA ports - fn modify_relayer_config(&self, config: &mut Config) { - config.mode.channels.enabled = true; - - for chain in &mut config.chains { - chain.packet_filter = PacketFilter::Deny(ChannelFilters::new(vec![( - FilterPattern::Wildcard("ica*".parse().unwrap()), - FilterPattern::Wildcard("*".parse().unwrap()), - )])); - } - } -} - -impl BinaryConnectionTest for IcaFilterTestDeny { - fn run( - &self, - _config: &TestConfig, - relayer: RelayerDriver, - chains: ConnectedChains, - connection: ConnectedConnection, - ) -> Result<(), Error> { - // Register an interchain account on behalf of controller wallet `user1` - // where the counterparty chain is the interchain accounts host. - // Then spawn the supervisor. - let (_handle, _, channel_id, port_id) = - register_interchain_account(&relayer, &chains, &connection)?; - - // Wait a bit, the relayer will refuse to complete the channel handshake - // because the port is explicitly disallowed by the filter. - std::thread::sleep(Duration::from_secs(30)); - - let channel_end = - query_channel_end(chains.handle_a(), &channel_id.as_ref(), &port_id.as_ref())?; - - // Check that the channel is left in state Init - assert_eq( - "channel end should still be in state Init", - channel_end.value().state(), - &State::Init, - ) - } -} - -#[allow(clippy::type_complexity)] -fn register_interchain_account( - relayer: &RelayerDriver, - chains: &ConnectedChains, - connection: &ConnectedConnection, -) -> Result< - ( - SupervisorHandle, - MonoTagged, - TaggedChannelId, - TaggedPortId, - ), - Error, -> { - let wallet = chains.node_a.wallets().user1().cloned(); - let handle = relayer.spawn_supervisor()?; - - chains - .node_a - .chain_driver() - .register_interchain_account(&wallet.address(), &connection.connection_id_a.as_ref())?; - - let channel_id: TaggedChannelId = - TaggedChannelId::new("channel-0".parse().unwrap()); - - let icacontroller = - PortId::from_str(&format!("icacontroller-{}", wallet.address().value())).unwrap(); - - let port_id: TaggedPortId = TaggedPortId::new(icacontroller); - - Ok((handle, wallet, channel_id, port_id)) -} diff --git a/tools/integration-test/src/tests/manual/mod.rs b/tools/integration-test/src/tests/manual/mod.rs deleted file mode 100644 index fbf4121f9b..0000000000 --- a/tools/integration-test/src/tests/manual/mod.rs +++ /dev/null @@ -1,14 +0,0 @@ -/*! - Tests that require manual human verification. - - Currently there are tests that require manual human observation of the - relayer's behavior through the log messages to decide whether the test - is working as expected. The test cases behind the [`manual`](self) module - are only enabled when the `"manual"` feature flag is enabled manually. - - Any tests that require manual verification should be placed here. - It is also fine to use [`suspend`](ibc_test_framework::util::suspend::suspend) - inside the manual test, as the CI is not going to run the test. -*/ - -pub mod simulation; diff --git a/tools/integration-test/src/tests/manual/simulation.rs b/tools/integration-test/src/tests/manual/simulation.rs deleted file mode 100644 index 6419ab37a4..0000000000 --- a/tools/integration-test/src/tests/manual/simulation.rs +++ /dev/null @@ -1,98 +0,0 @@ -/*! - Test for verifying the solution in - [#1542](https://github.com/informalsystems/ibc-rs/pull/1542) - - On running the test, the log should show messages like: - - ```text - 2021-11-05T14:12:09.633184Z WARN ThreadId(30) [ibc-1] estimate_gas: failed to simulate tx, falling back on default gas because the error is potentially recoverable: gRPC call failed with status: status: InvalidArgument, message: "failed to execute message; message index: 0: acknowledge packet verification failed: packet acknowledgement verification failed: failed packet acknowledgement verification for client (07-tendermint-0): client state height < proof height ({0 243} < {0 554}): invalid height: invalid request", details: [], metadata: MetadataMap { headers: {"content-type": "application/grpc"} } - 2021-11-05T14:12:09.633290Z DEBUG ThreadId(30) [ibc-1] send_tx: using 900000000 gas, fee Fee { amount: "900000stake", gas_limit: 900000000 } - 2021-11-05T14:12:09.639044Z DEBUG ThreadId(30) [ibc-1] send_tx: broadcast_tx_sync: Response { code: Ok, data: Data([]), log: Log("[]"), hash: transaction::Hash(BA94AE4CA198F56E27B4A44DA5E508A2E2207E306F475E5285D873296D892170) } - ``` -*/ - -use core::time::Duration; -use ibc::events::IbcEvent; -use ibc_relayer::config::{types::MaxMsgNum, Config}; -use ibc_relayer::transfer::{build_and_send_transfer_messages, TransferOptions}; -use ibc_test_framework::prelude::*; - -#[test] -fn test_simulation() -> Result<(), Error> { - run_binary_channel_test(&SimulationTest) -} - -const MAX_MSGS: usize = 5; - -pub struct SimulationTest; - -impl TestOverrides for SimulationTest { - fn modify_relayer_config(&self, config: &mut Config) { - for mut chain in config.chains.iter_mut() { - chain.max_msg_num = MaxMsgNum::new(MAX_MSGS).unwrap(); - } - } -} - -impl BinaryChannelTest for SimulationTest { - fn run( - &self, - _config: &TestConfig, - _relayer: RelayerDriver, - chains: ConnectedChains, - channel: ConnectedChannel, - ) -> Result<(), Error> { - tx_raw_ft_transfer( - chains.handle_a(), - chains.handle_b(), - &channel, - &chains.node_b.wallets().user1().address(), - &chains.node_a.denom(), - 9999, - 1000, - Duration::from_secs(0), - MAX_MSGS, - )?; - - suspend() - } -} - -/** - Perform the same operation as `hermes tx raw ft-transfer`. - - The function call skips the checks done in the CLI, as we already - have the necessary information given to us by the test framework. - - Note that we cannot change the sender's wallet in this case, - as the current `send_tx` implementation in - [`CosmosSdkChain`](ibc_relayer::chain::cosmos::CosmosSdkChain) - always use the signer wallet configured in the - [`ChainConfig`](ibc_relayer::config::ChainConfig). -*/ -fn tx_raw_ft_transfer( - src_handle: &SrcChain, - dst_handle: &DstChain, - channel: &ConnectedChannel, - recipient: &MonoTagged, - denom: &MonoTagged, - amount: u64, - timeout_height_offset: u64, - timeout_duration: Duration, - number_messages: usize, -) -> Result, Error> { - let transfer_options = TransferOptions { - packet_src_port_id: channel.port_a.value().clone(), - packet_src_channel_id: *channel.channel_id_a.value(), - amount: amount.into(), - denom: denom.value().to_string(), - receiver: Some(recipient.value().0.clone()), - timeout_height_offset, - timeout_duration, - number_msgs: number_messages, - }; - - let events = build_and_send_transfer_messages(src_handle, dst_handle, &transfer_options)?; - - Ok(events) -} diff --git a/tools/integration-test/src/tests/memo.rs b/tools/integration-test/src/tests/memo.rs deleted file mode 100644 index b77f501e50..0000000000 --- a/tools/integration-test/src/tests/memo.rs +++ /dev/null @@ -1,90 +0,0 @@ -use ibc_relayer::config::{types::Memo, Config}; -use serde_json as json; - -use ibc_test_framework::ibc::denom::derive_ibc_denom; -use ibc_test_framework::prelude::*; -use ibc_test_framework::util::random::{random_string, random_u64_range}; - -#[test] -fn test_memo() -> Result<(), Error> { - let memo = Memo::new(random_string()).unwrap(); - let test = MemoTest { memo }; - run_binary_channel_test(&test) -} - -pub struct MemoTest { - memo: Memo, -} - -impl TestOverrides for MemoTest { - fn modify_relayer_config(&self, config: &mut Config) { - for mut chain in config.chains.iter_mut() { - chain.memo_prefix = self.memo.clone(); - } - } -} - -impl BinaryChannelTest for MemoTest { - fn run( - &self, - _config: &TestConfig, - _relayer: RelayerDriver, - chains: ConnectedChains, - channel: ConnectedChannel, - ) -> Result<(), Error> { - info!( - "testing IBC transfer with memo configured: \"{}\"", - self.memo - ); - - let denom_a = chains.node_a.denom(); - - let a_to_b_amount = random_u64_range(1000, 5000); - - chains.node_a.chain_driver().ibc_transfer_token( - &channel.port_a.as_ref(), - &channel.channel_id_a.as_ref(), - &chains.node_a.wallets().user1(), - &chains.node_b.wallets().user1().address(), - &denom_a, - a_to_b_amount, - )?; - - let denom_b = derive_ibc_denom( - &channel.port_b.as_ref(), - &channel.channel_id_b.as_ref(), - &denom_a, - )?; - - chains.node_b.chain_driver().assert_eventual_wallet_amount( - &chains.node_b.wallets().user1().address(), - a_to_b_amount, - &denom_b.as_ref(), - )?; - - let tx_info = chains - .node_b - .chain_driver() - .query_recipient_transactions(&chains.node_b.wallets().user1().address())?; - - assert_tx_memo_equals(&tx_info, self.memo.as_str())?; - - Ok(()) - } -} - -fn assert_tx_memo_equals(tx_info: &json::Value, expected_memo: &str) -> Result<(), Error> { - debug!("comparing memo field from json value {}", tx_info); - - let memo_field = &tx_info["txs"][0]["tx"]["body"]["memo"]; - - info!("memo field value: {}", memo_field); - - let memo_str = memo_field - .as_str() - .ok_or_else(|| eyre!("expect memo string field to be present in JSON"))?; - - assert_eq!(memo_str, expected_memo); - - Ok(()) -} diff --git a/tools/integration-test/src/tests/mod.rs b/tools/integration-test/src/tests/mod.rs deleted file mode 100644 index 09e21d5252..0000000000 --- a/tools/integration-test/src/tests/mod.rs +++ /dev/null @@ -1,30 +0,0 @@ -/*! - All test cases are placed within this module. - - We expose the modules as public so that cargo doc - will pick up the definition by default. -*/ - -pub mod clear_packet; -pub mod client_expiration; -mod client_settings; -pub mod connection_delay; -pub mod execute_schedule; -pub mod memo; -pub mod python; -mod query_packet; -pub mod supervisor; -pub mod ternary_transfer; -pub mod transfer; - -#[cfg(any(doc, feature = "ordered"))] -pub mod ordered_channel; - -#[cfg(any(doc, feature = "ica"))] -pub mod ica; - -#[cfg(any(doc, feature = "manual"))] -pub mod manual; - -#[cfg(any(doc, feature = "example"))] -pub mod example; diff --git a/tools/integration-test/src/tests/ordered_channel.rs b/tools/integration-test/src/tests/ordered_channel.rs deleted file mode 100644 index ea0467dd33..0000000000 --- a/tools/integration-test/src/tests/ordered_channel.rs +++ /dev/null @@ -1,109 +0,0 @@ -use ibc_test_framework::ibc::denom::derive_ibc_denom; -use ibc_test_framework::prelude::*; -use ibc_test_framework::util::random::random_u64_range; - -#[test] -fn test_ordered_channel() -> Result<(), Error> { - run_binary_channel_test(&OrderedChannelTest) -} - -pub struct OrderedChannelTest; - -impl TestOverrides for OrderedChannelTest { - fn modify_relayer_config(&self, config: &mut Config) { - // Disabling clear_on_start should make the relayer not - // relay any packet it missed before starting. - config.mode.packets.clear_on_start = false; - config.mode.packets.clear_interval = 0; - } - - fn should_spawn_supervisor(&self) -> bool { - false - } - - fn channel_order(&self) -> Order { - Order::Ordered - } -} - -impl BinaryChannelTest for OrderedChannelTest { - fn run( - &self, - _config: &TestConfig, - relayer: RelayerDriver, - chains: ConnectedChains, - channel: ConnectedChannel, - ) -> Result<(), Error> { - let denom_a = chains.node_a.denom(); - - let wallet_a = chains.node_a.wallets().user1().cloned(); - let wallet_b = chains.node_b.wallets().user1().cloned(); - - let balance_a = chains - .node_a - .chain_driver() - .query_balance(&wallet_a.address(), &denom_a)?; - - let amount1 = random_u64_range(1000, 5000); - - info!( - "Performing IBC transfer with amount {}, which should be relayed because its an ordered channel", - amount1 - ); - - chains.node_a.chain_driver().ibc_transfer_token( - &channel.port_a.as_ref(), - &channel.channel_id_a.as_ref(), - &wallet_a.as_ref(), - &wallet_b.address(), - &denom_a, - amount1, - )?; - - sleep(Duration::from_secs(1)); - - relayer.with_supervisor(|| { - sleep(Duration::from_secs(1)); - - let amount2 = random_u64_range(1000, 5000); - - info!( - "Performing IBC transfer with amount {}, which should be relayed", - amount2 - ); - - chains.node_a.chain_driver().ibc_transfer_token( - &channel.port_a.as_ref(), - &channel.channel_id_a.as_ref(), - &wallet_a.as_ref(), - &wallet_b.address(), - &denom_a, - amount2, - )?; - - sleep(Duration::from_secs(1)); - - let denom_b = derive_ibc_denom( - &channel.port_b.as_ref(), - &channel.channel_id_b.as_ref(), - &denom_a, - )?; - - // Wallet on chain A should have both amount deducted. - chains.node_a.chain_driver().assert_eventual_wallet_amount( - &wallet_a.address(), - balance_a - amount1 - amount2, - &denom_a, - )?; - - // Wallet on chain B should receive both IBC transfers - chains.node_b.chain_driver().assert_eventual_wallet_amount( - &wallet_b.address(), - amount1 + amount2, - &denom_b.as_ref(), - )?; - - Ok(()) - }) - } -} diff --git a/tools/integration-test/src/tests/python.rs b/tools/integration-test/src/tests/python.rs deleted file mode 100644 index 89cdfb588c..0000000000 --- a/tools/integration-test/src/tests/python.rs +++ /dev/null @@ -1,71 +0,0 @@ -use ibc_relayer::keyring::Store; -use ibc_test_framework::prelude::*; -use std::env; -use std::process::{Command, Stdio}; - -struct PythonTest; - -impl TestOverrides for PythonTest { - fn modify_test_config(&self, config: &mut TestConfig) { - config.bootstrap_with_random_ids = false; - } - - fn modify_relayer_config(&self, config: &mut Config) { - for mut chain in config.chains.iter_mut() { - // Modify the key store type to `Store::Test` so that the wallet - // keys are stored to ~/.hermes/keys so that we can use them - // with external relayer commands. - chain.key_store_type = Store::Test; - } - } - - fn should_spawn_supervisor(&self) -> bool { - false - } -} - -impl BinaryChainTest for PythonTest { - fn run( - &self, - _config: &TestConfig, - relayer: RelayerDriver, - _chains: ConnectedChains, - ) -> Result<(), Error> { - let config_path = relayer - .config_path - .to_str() - .ok_or_else(|| eyre!("failed to format relayer config path"))?; - - let current_dir = env::current_dir()? - .to_str() - .ok_or_else(|| eyre!("failed to format current directory"))? - .to_string(); - - // Use the directory where `cargo run` is called, instead of the - // package subdirectory automatically set by cargo - let base_dir = env::var("PWD").unwrap_or(current_dir); - - let command_args = [&format!("{}/e2e/run.py", base_dir), "-c", config_path]; - - let output = Command::new("python3") - .args(&command_args) - .current_dir(base_dir) - .stdout(Stdio::inherit()) - .stderr(Stdio::inherit()) - .output()?; - - if output.status.success() { - Ok(()) - } else { - Err(Error::generic(eyre!( - "Python E2E test exited with error code {:?}", - output.status.code(), - ))) - } - } -} - -#[test] -fn python_end_to_end_tests() -> Result<(), Error> { - run_binary_chain_test(&PythonTest) -} diff --git a/tools/integration-test/src/tests/query_packet.rs b/tools/integration-test/src/tests/query_packet.rs deleted file mode 100644 index 86480cfdce..0000000000 --- a/tools/integration-test/src/tests/query_packet.rs +++ /dev/null @@ -1,144 +0,0 @@ -use ibc_relayer::chain::counterparty::{channel_on_destination, pending_packet_summary}; -use ibc_relayer::link::{Link, LinkParameters}; - -use ibc_test_framework::prelude::*; -use ibc_test_framework::relayer::channel::query_identified_channel_end; -use ibc_test_framework::relayer::connection::query_identified_connection_end; -use ibc_test_framework::util::random::random_u64_range; - -#[test] -fn test_query_packet_pending() -> Result<(), Error> { - run_binary_channel_test(&QueryPacketPendingTest) -} - -pub struct QueryPacketPendingTest; - -impl TestOverrides for QueryPacketPendingTest { - fn modify_relayer_config(&self, config: &mut Config) { - // Disabling clear_on_start should make the relayer not - // relay any packet it missed before starting. - config.mode.packets.clear_on_start = false; - config.mode.packets.clear_interval = 0; - } - - fn should_spawn_supervisor(&self) -> bool { - false - } -} - -impl BinaryChannelTest for QueryPacketPendingTest { - fn run( - &self, - _config: &TestConfig, - _relayer: RelayerDriver, - chains: ConnectedChains, - channel: ConnectedChannel, - ) -> Result<(), Error> { - let denom_a = chains.node_a.denom(); - - let wallet_a = chains.node_a.wallets().user1().cloned(); - let wallet_b = chains.node_b.wallets().user1().cloned(); - - let amount1 = random_u64_range(1000, 5000); - - info!( - "Performing IBC transfer with amount {}, which should *not* be relayed", - amount1 - ); - - chains.node_a.chain_driver().ibc_transfer_token( - &channel.port_a.as_ref(), - &channel.channel_id_a.as_ref(), - &wallet_a.as_ref(), - &wallet_b.address(), - &denom_a, - amount1, - )?; - - sleep(Duration::from_secs(2)); - - let opts = LinkParameters { - src_port_id: channel.port_a.clone().into_value(), - src_channel_id: channel.channel_id_a.into_value(), - }; - let link = Link::new_from_opts( - chains.handle_a().clone(), - chains.handle_b().clone(), - opts, - false, - )?; - - let channel_end = query_identified_channel_end( - chains.handle_a(), - channel.channel_id_a.as_ref(), - channel.port_a.as_ref(), - )?; - - let summary = - pending_packet_summary(chains.handle_a(), chains.handle_b(), channel_end.value())?; - - assert_eq!(summary.unreceived_packets, [1.into()]); - assert!(summary.unreceived_acks.is_empty()); - - // Receive the packet on the destination chain - link.relay_recv_packet_and_timeout_messages()?; - - let summary = - pending_packet_summary(chains.handle_a(), chains.handle_b(), channel_end.value())?; - - assert!(summary.unreceived_packets.is_empty()); - assert_eq!(summary.unreceived_acks, [1.into()]); - - // Acknowledge the packet on the source chain - let link = link.reverse(false)?; - link.relay_ack_packet_messages()?; - - let summary = - pending_packet_summary(chains.handle_a(), chains.handle_b(), channel_end.value())?; - - assert!(summary.unreceived_packets.is_empty()); - assert!(summary.unreceived_acks.is_empty()); - - let denom_b = chains.node_b.denom(); - let amount2 = random_u64_range(1000, 5000); - - chains.node_b.chain_driver().ibc_transfer_token( - &channel.port_b.as_ref(), - &channel.channel_id_b.as_ref(), - &wallet_b.as_ref(), - &wallet_a.address(), - &denom_b, - amount2, - )?; - - info!( - "Performing IBC transfer with amount {}, which should *not* be relayed", - amount2 - ); - - sleep(Duration::from_secs(2)); - - // Get the reverse channel end, like the CLI command does - let connection_end = query_identified_connection_end( - chains.handle_a(), - channel.connection.connection_id_a.as_ref(), - )?; - let counterparty_channel_end = channel_on_destination( - channel_end.value(), - connection_end.value(), - chains.handle_b(), - )? - .unwrap(); - - let summary = pending_packet_summary( - chains.handle_b(), - chains.handle_a(), - &counterparty_channel_end, - )?; - - assert_eq!(summary.unreceived_packets, [1.into()]); - assert!(summary.unreceived_acks.is_empty()); - - Ok(()) - } -} diff --git a/tools/integration-test/src/tests/supervisor.rs b/tools/integration-test/src/tests/supervisor.rs deleted file mode 100644 index 4e739edc58..0000000000 --- a/tools/integration-test/src/tests/supervisor.rs +++ /dev/null @@ -1,149 +0,0 @@ -use ibc_relayer::config::{self, Config, ModeConfig}; -use ibc_test_framework::ibc::denom::derive_ibc_denom; - -use ibc_test_framework::prelude::*; -use ibc_test_framework::relayer::channel::{assert_eventually_channel_established, init_channel}; -use ibc_test_framework::relayer::connection::{ - assert_eventually_connection_established, init_connection, -}; - -#[test] -fn test_supervisor() -> Result<(), Error> { - run_binary_chain_test(&SupervisorTest) -} - -struct SupervisorTest; - -impl TestOverrides for SupervisorTest { - fn modify_relayer_config(&self, config: &mut Config) { - config.mode = ModeConfig { - clients: config::Clients { - enabled: true, - refresh: true, - misbehaviour: true, - }, - connections: config::Connections { enabled: true }, - channels: config::Channels { enabled: true }, - packets: config::Packets { - enabled: true, - clear_interval: 10, - clear_on_start: true, - tx_confirmation: true, - }, - }; - } -} - -impl BinaryChainTest for SupervisorTest { - fn run( - &self, - _config: &TestConfig, - _relayer: RelayerDriver, - chains: ConnectedChains, - ) -> Result<(), Error> { - let (connection_id_b, _) = init_connection( - &chains.handle_a, - &chains.handle_b, - &chains.foreign_clients.client_id_a(), - &chains.foreign_clients.client_id_b(), - )?; - - let connection_id_a = assert_eventually_connection_established( - &chains.handle_b, - &chains.handle_a, - &connection_id_b.as_ref(), - )?; - - let port_a = tagged_transfer_port(); - let port_b = tagged_transfer_port(); - - let (channel_id_b, _) = init_channel( - &chains.handle_a, - &chains.handle_b, - &chains.client_id_a(), - &chains.client_id_b(), - &connection_id_a.as_ref(), - &connection_id_b.as_ref(), - &port_a.as_ref(), - &port_b.as_ref(), - )?; - - let channel_id_a = assert_eventually_channel_established( - &chains.handle_b, - &chains.handle_a, - &channel_id_b.as_ref(), - &port_b.as_ref(), - )?; - - let denom_a = chains.node_a.denom(); - - let denom_b = derive_ibc_denom(&port_b.as_ref(), &channel_id_b.as_ref(), &denom_a)?; - - // Use the same wallet as the relayer to perform token transfer. - // This will cause an account sequence mismatch error. - let wallet_a = chains.node_a.wallets().user1().cloned(); - let wallet_b = chains.node_b.wallets().user1().cloned(); - - let transfer_amount = 1000; - - let balance_a = chains - .node_a - .chain_driver() - .query_balance(&wallet_a.address(), &denom_a)?; - - // Perform local transfers for both chains A and B using the relayer's - // wallet to mess up the account sequence number on both sides. - - chains.node_a.chain_driver().local_transfer_token( - &chains.node_a.wallets().relayer(), - &chains.node_a.wallets().user2().address(), - 1000, - &denom_a, - )?; - - chains.node_b.chain_driver().local_transfer_token( - &chains.node_b.wallets().relayer(), - &chains.node_b.wallets().user2().address(), - 1000, - &chains.node_b.denom(), - )?; - - info!( - "Sending IBC transfer from chain {} to chain {} with amount of {} {}", - chains.chain_id_a(), - chains.chain_id_b(), - transfer_amount, - denom_a - ); - - chains.node_a.chain_driver().ibc_transfer_token( - &port_a.as_ref(), - &channel_id_a.as_ref(), - &wallet_a.as_ref(), - &wallet_b.address(), - &denom_a, - transfer_amount, - )?; - - // During the test, you should see error logs showing "account sequence mismatch". - info!( - "Packet worker should still succeed and recover from account sequence mismatch error", - ); - - chains.node_a.chain_driver().assert_eventual_wallet_amount( - &wallet_a.address(), - balance_a - transfer_amount, - &denom_a, - )?; - - chains.node_b.chain_driver().assert_eventual_wallet_amount( - &wallet_b.address(), - transfer_amount, - &denom_b.as_ref(), - )?; - - std::thread::sleep(core::time::Duration::from_secs(10)); - - Ok(()) - } -} diff --git a/tools/integration-test/src/tests/ternary_transfer.rs b/tools/integration-test/src/tests/ternary_transfer.rs deleted file mode 100644 index 4c23a9eb4b..0000000000 --- a/tools/integration-test/src/tests/ternary_transfer.rs +++ /dev/null @@ -1,208 +0,0 @@ -use ibc_test_framework::ibc::denom::derive_ibc_denom; -use ibc_test_framework::prelude::*; - -#[test] -fn test_ternary_ibc_transfer() -> Result<(), Error> { - run_nary_channel_test(&TernaryIbcTransferTest) -} - -pub struct TernaryIbcTransferTest; - -impl TestOverrides for TernaryIbcTransferTest { - fn modify_test_config(&self, config: &mut TestConfig) { - config.bootstrap_with_random_ids = false; - } - - fn modify_relayer_config(&self, config: &mut Config) { - config.mode.clients.misbehaviour = false; - } -} - -impl PortsOverride<3> for TernaryIbcTransferTest {} - -impl NaryChannelTest<3> for TernaryIbcTransferTest { - fn run( - &self, - _config: &TestConfig, - _relayer: RelayerDriver, - chains: NaryConnectedChains, - channels: NaryConnectedChannels, - ) -> Result<(), Error> { - let node_a = chains.full_node_at::<0>()?; - let node_b = chains.full_node_at::<1>()?; - let node_c = chains.full_node_at::<2>()?; - - let denom_a = node_a.denom(); - - let wallet_a1 = node_a.wallets().user1().cloned(); - - let wallet_b1 = node_b.wallets().user1().cloned(); - let wallet_b2 = node_b.wallets().user2().cloned(); - - let wallet_c1 = node_c.wallets().user1().cloned(); - - let balance_a = node_a - .chain_driver() - .query_balance(&wallet_a1.address(), &denom_a)?; - - let a_to_b_amount = 5000; - - let channel_a_to_b = channels.channel_at::<0, 1>()?; - - info!( - "Sending IBC transfer from chain {} to chain {} with amount of {} {}", - node_a.chain_id(), - node_b.chain_id(), - a_to_b_amount, - denom_a - ); - - node_a.chain_driver().ibc_transfer_token( - &channel_a_to_b.port_a.as_ref(), - &channel_a_to_b.channel_id_a.as_ref(), - &wallet_a1.as_ref(), - &wallet_b1.address(), - &denom_a, - a_to_b_amount, - )?; - - let denom_a_to_b = derive_ibc_denom( - &channel_a_to_b.port_b.as_ref(), - &channel_a_to_b.channel_id_b.as_ref(), - &denom_a, - )?; - - // Chain B will receive ibc/port-b/channel-b/denom - - info!( - "Waiting for user on chain B to receive IBC transferred amount of {} {}", - a_to_b_amount, denom_a_to_b - ); - - node_a.chain_driver().assert_eventual_wallet_amount( - &wallet_a1.address(), - balance_a - a_to_b_amount, - &denom_a, - )?; - - node_b.chain_driver().assert_eventual_wallet_amount( - &wallet_b1.address(), - a_to_b_amount, - &denom_a_to_b.as_ref(), - )?; - - info!( - "successfully performed IBC transfer from chain {} to chain {}", - node_a.chain_id(), - node_b.chain_id(), - ); - - let channel_b_to_c = channels.channel_at::<1, 2>()?; - - let denom_a_to_c = derive_ibc_denom( - &channel_b_to_c.port_b.as_ref(), - &channel_b_to_c.channel_id_b.as_ref(), - &denom_a_to_b.as_ref(), - )?; - - let b_to_c_amount = 2500; - - node_b.chain_driver().ibc_transfer_token( - &channel_b_to_c.port_a.as_ref(), - &channel_b_to_c.channel_id_a.as_ref(), - &wallet_b1.as_ref(), - &wallet_c1.address(), - &denom_a_to_b.as_ref(), - b_to_c_amount, - )?; - - // Chain C will receive ibc/port-c/channel-c/port-b/channel-b/denom - - info!( - "Waiting for user on chain C to receive IBC transferred amount of {} {}", - b_to_c_amount, denom_a_to_c - ); - - node_b.chain_driver().assert_eventual_wallet_amount( - &wallet_b1.address(), - a_to_b_amount - b_to_c_amount, - &denom_a_to_b.as_ref(), - )?; - - node_c.chain_driver().assert_eventual_wallet_amount( - &wallet_c1.address(), - b_to_c_amount, - &denom_a_to_c.as_ref(), - )?; - - let channel_c_to_a = channels.channel_at::<2, 0>()?; - - let denom_a_to_c_to_a = derive_ibc_denom( - &channel_c_to_a.port_b.as_ref(), - &channel_c_to_a.channel_id_b.as_ref(), - &denom_a_to_c.as_ref(), - )?; - - let c_to_a_amount = 800; - - node_c.chain_driver().ibc_transfer_token( - &channel_c_to_a.port_a.as_ref(), - &channel_c_to_a.channel_id_a.as_ref(), - &wallet_c1.as_ref(), - &wallet_a1.address(), - &denom_a_to_c.as_ref(), - c_to_a_amount, - )?; - - // Chain A will receive ibc/port-a/channel-a/port-c/channel-c/port-b/channel-b/denom - - info!( - "Waiting for user on chain A to receive IBC transferred amount of {} {}", - c_to_a_amount, denom_a_to_c_to_a - ); - - node_c.chain_driver().assert_eventual_wallet_amount( - &wallet_c1.address(), - b_to_c_amount - c_to_a_amount, - &denom_a_to_c.as_ref(), - )?; - - node_a.chain_driver().assert_eventual_wallet_amount( - &wallet_a1.address(), - c_to_a_amount, - &denom_a_to_c_to_a.as_ref(), - )?; - - let c_to_b_amount = 500; - - node_c.chain_driver().ibc_transfer_token( - &channel_b_to_c.port_b.as_ref(), - &channel_b_to_c.channel_id_b.as_ref(), - &wallet_c1.as_ref(), - &wallet_b2.address(), - &denom_a_to_c.as_ref(), - c_to_b_amount, - )?; - - // Chain B will receive ibc/port-b/channel-b/denom - - info!( - "Waiting for user on chain B to receive IBC transferred amount of {} {}", - c_to_b_amount, denom_a_to_b - ); - - node_c.chain_driver().assert_eventual_wallet_amount( - &wallet_c1.address(), - b_to_c_amount - c_to_a_amount - c_to_b_amount, - &denom_a_to_c.as_ref(), - )?; - - node_b.chain_driver().assert_eventual_wallet_amount( - &wallet_b2.address(), - c_to_b_amount, - &denom_a_to_b.as_ref(), - )?; - - Ok(()) - } -} diff --git a/tools/integration-test/src/tests/transfer.rs b/tools/integration-test/src/tests/transfer.rs deleted file mode 100644 index a5a76750c8..0000000000 --- a/tools/integration-test/src/tests/transfer.rs +++ /dev/null @@ -1,155 +0,0 @@ -use ibc_test_framework::ibc::denom::derive_ibc_denom; -use ibc_test_framework::prelude::*; -use ibc_test_framework::util::random::random_u64_range; - -#[test] -fn test_ibc_transfer() -> Result<(), Error> { - run_binary_channel_test(&IbcTransferTest) -} - -/** - Test that IBC token transfer can still work with a single - chain that is connected to itself. -*/ -#[test] -fn test_self_connected_ibc_transfer() -> Result<(), Error> { - run_self_connected_binary_chain_test(&RunBinaryConnectionTest::new(&RunBinaryChannelTest::new( - &RunWithSupervisor::new(&IbcTransferTest), - ))) -} - -/** - Run the IBC transfer test as an N-ary chain test case with SIZE=2. - - The work on N-ary chain is currently still work in progress, so we put - this behind the "experimental" feature flag so that normal developers - are not obligated to understand how this test works yet. -*/ -#[test] -fn test_nary_ibc_transfer() -> Result<(), Error> { - run_binary_as_nary_channel_test(&IbcTransferTest) -} - -#[test] -fn test_self_connected_nary_ibc_transfer() -> Result<(), Error> { - run_self_connected_nary_chain_test(&RunNaryConnectionTest::new(&RunNaryChannelTest::new( - &RunBinaryAsNaryChannelTest::new(&IbcTransferTest), - ))) -} - -pub struct IbcTransferTest; - -impl TestOverrides for IbcTransferTest {} - -impl BinaryChannelTest for IbcTransferTest { - fn run( - &self, - _config: &TestConfig, - _relayer: RelayerDriver, - chains: ConnectedChains, - channel: ConnectedChannel, - ) -> Result<(), Error> { - let denom_a = chains.node_a.denom(); - - let wallet_a = chains.node_a.wallets().user1().cloned(); - let wallet_b = chains.node_b.wallets().user1().cloned(); - let wallet_c = chains.node_a.wallets().user2().cloned(); - - let balance_a = chains - .node_a - .chain_driver() - .query_balance(&wallet_a.address(), &denom_a)?; - - let a_to_b_amount = random_u64_range(1000, 5000); - - info!( - "Sending IBC transfer from chain {} to chain {} with amount of {} {}", - chains.chain_id_a(), - chains.chain_id_b(), - a_to_b_amount, - denom_a - ); - - chains.node_a.chain_driver().ibc_transfer_token( - &channel.port_a.as_ref(), - &channel.channel_id_a.as_ref(), - &wallet_a.as_ref(), - &wallet_b.address(), - &denom_a, - a_to_b_amount, - )?; - - let denom_b = derive_ibc_denom( - &channel.port_b.as_ref(), - &channel.channel_id_b.as_ref(), - &denom_a, - )?; - - info!( - "Waiting for user on chain B to receive IBC transferred amount of {} {}", - a_to_b_amount, denom_b - ); - - chains.node_a.chain_driver().assert_eventual_wallet_amount( - &wallet_a.address(), - balance_a - a_to_b_amount, - &denom_a, - )?; - - chains.node_b.chain_driver().assert_eventual_wallet_amount( - &wallet_b.address(), - a_to_b_amount, - &denom_b.as_ref(), - )?; - - info!( - "successfully performed IBC transfer from chain {} to chain {}", - chains.chain_id_a(), - chains.chain_id_b(), - ); - - let balance_c = chains - .node_a - .chain_driver() - .query_balance(&wallet_c.address(), &denom_a)?; - - let b_to_a_amount = random_u64_range(500, a_to_b_amount); - - info!( - "Sending IBC transfer from chain {} to chain {} with amount of {} {}", - chains.chain_id_b(), - chains.chain_id_a(), - b_to_a_amount, - denom_b - ); - - chains.node_b.chain_driver().ibc_transfer_token( - &channel.port_b.as_ref(), - &channel.channel_id_b.as_ref(), - &wallet_b.as_ref(), - &wallet_c.address(), - &denom_b.as_ref(), - b_to_a_amount, - )?; - - chains.node_b.chain_driver().assert_eventual_wallet_amount( - &wallet_b.address(), - a_to_b_amount - b_to_a_amount, - &denom_b.as_ref(), - )?; - - chains.node_a.chain_driver().assert_eventual_wallet_amount( - &wallet_c.address(), - balance_c + b_to_a_amount, - &denom_a, - )?; - - info!( - "successfully performed reverse IBC transfer from chain {} back to chain {}", - chains.chain_id_b(), - chains.chain_id_a(), - ); - - Ok(()) - } -} diff --git a/tools/test-framework/.gitignore b/tools/test-framework/.gitignore deleted file mode 100644 index 8fce603003..0000000000 --- a/tools/test-framework/.gitignore +++ /dev/null @@ -1 +0,0 @@ -data/ diff --git a/tools/test-framework/Cargo.toml b/tools/test-framework/Cargo.toml deleted file mode 100644 index 46d256f484..0000000000 --- a/tools/test-framework/Cargo.toml +++ /dev/null @@ -1,43 +0,0 @@ -[package] -name = "ibc-test-framework" -version = "0.15.0" -edition = "2021" -license = "Apache-2.0" -readme = "README.md" -keywords = ["blockchain", "consensus", "cosmos", "ibc", "tendermint"] -homepage = "https://hermes.informal.systems/" -repository = "https://github.com/informalsystems/ibc-rs" -authors = ["Informal Systems "] - -description = """ - Framework for writing integration tests for IBC relayers -""" - -[dependencies] -ibc = { version = "=0.15.0", path = "../../modules" } -ibc-relayer = { version = "=0.15.0", path = "../../relayer" } -ibc-relayer-cli = { version = "=0.15.0", path = "../../relayer-cli" } -ibc-proto = { version = "=0.18.0", path = "../../proto" } -tendermint = { git = "https://github.com/composableFi/tendermint-rs", rev = "5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8" } -tendermint-rpc = { git = "https://github.com/composableFi/tendermint-rs", rev = "5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8", features = ["http-client", "websocket-client"] } - -async-trait = "0.1.56" -http = "0.2.8" -tokio = { version = "1.0", features = ["full"] } -tracing = "0.1.34" -tracing-subscriber = "0.3.11" -eyre = "0.6.8" -color-eyre = "0.6" -rand = "0.8.5" -env_logger = "0.9.0" -hex = "0.4.3" -serde = "1.0" -serde_json = "1" -serde_yaml = "0.8.24" -itertools = "0.10" -toml = "0.5" -subtle-encoding = "0.5.1" -sha2 = "0.10.2" -crossbeam-channel = "0.5.4" -semver = "1.0.10" -flex-error = "0.4.4" diff --git a/tools/test-framework/README.md b/tools/test-framework/README.md deleted file mode 100644 index 8856e9d34f..0000000000 --- a/tools/test-framework/README.md +++ /dev/null @@ -1,27 +0,0 @@ -# IBC Relayer Integration Test Framework - -## Overview - -The `ibc-test-framework` crate provides the infrastructure and framework for writing end-to-end (E2E) tests that include the spawning of the relayer together with Cosmos full nodes running as child processes inside the tests. - -## Installation - -Other than Rust, the test suite assumes the `gaiad` binary is present in `$PATH`. You can install Gaia by either [building from source](https://github.com/cosmos/gaia), or load it using [Cosmos.nix](https://github.com/informalsystems/cosmos.nix/): - -```text -nix shell github:informalsystems/cosmos.nix#gaia7 -``` - -Alternatively, you can use `$CHAIN_COMMAND_PATH` to override with a different executable that is compatible with `gaiad`. - -## Examples - -Example tests written using `ibc-test-framework` can be found in the [`ibc-rs` project repository](https://github.com/informalsystems/ibc-rs/tree/master/tools/integration-test) - -## Diagrams - -Some diagrams have been prepared to ease the understanding of the test framework: - -- [Tagged Identifiers and Data Structures](https://app.excalidraw.com/l/4XqkU6POmGI/7za2eSTChuT) -- [Test Data Structures](https://app.excalidraw.com/l/4XqkU6POmGI/5y6i0NKqiEv) -- [Test Framework Traits](https://app.excalidraw.com/l/4XqkU6POmGI/80KAnVZ6cu4) diff --git a/tools/test-framework/src/bootstrap/binary/chain.rs b/tools/test-framework/src/bootstrap/binary/chain.rs deleted file mode 100644 index fed3347c3e..0000000000 --- a/tools/test-framework/src/bootstrap/binary/chain.rs +++ /dev/null @@ -1,301 +0,0 @@ -/*! - Helper functions for bootstrapping two relayer chain handles - with connected foreign clients. -*/ - -use eyre::Report as Error; -use ibc::core::ics24_host::identifier::ClientId; -use ibc_relayer::chain::handle::{ChainHandle, CountingAndCachingChainHandle}; -use ibc_relayer::config::Config; -use ibc_relayer::error::ErrorDetail as RelayerErrorDetail; -use ibc_relayer::foreign_client::{ - extract_client_id, CreateOptions as ClientOptions, ForeignClient, -}; -use ibc_relayer::keyring::errors::ErrorDetail as KeyringErrorDetail; -use ibc_relayer::registry::SharedRegistry; -use std::fs; -use std::path::Path; -use tracing::{debug, info}; - -use crate::relayer::driver::RelayerDriver; -use crate::types::binary::chains::ConnectedChains; -use crate::types::binary::foreign_client::ForeignClientPair; -use crate::types::config::TestConfig; -use crate::types::single::node::FullNode; -use crate::types::tagged::*; -use crate::types::wallet::{TestWallets, Wallet}; -use crate::util::random::random_u64_range; - -#[derive(Default)] -pub struct BootstrapClientOptions { - pub client_options_a_to_b: ClientOptions, - pub client_options_b_to_a: ClientOptions, - pub pad_client_id_a_to_b: u64, - pub pad_client_id_b_to_a: u64, -} - -/// Bootstraps two relayer chain handles with connected foreign clients. -/// -/// Returns a tuple consisting of the [`RelayerDriver`] and a -/// [`ConnectedChains`] object that contains the given -/// full nodes together with the corresponding two [`ChainHandle`]s and -/// [`ForeignClient`]s. -pub fn bootstrap_chains_with_full_nodes( - test_config: &TestConfig, - node_a: FullNode, - node_b: FullNode, - options: BootstrapClientOptions, - config_modifier: impl FnOnce(&mut Config), -) -> Result< - ( - RelayerDriver, - ConnectedChains, - ), - Error, -> { - let mut config = Config::default(); - - add_chain_config(&mut config, &node_a)?; - add_chain_config(&mut config, &node_b)?; - - config_modifier(&mut config); - - let config_path = test_config.chain_store_dir.join("relayer-config.toml"); - - save_relayer_config(&config, &config_path)?; - - let registry = new_registry(config.clone()); - - // Pass in unique closure expressions `||{}` as the first argument so that - // the returned chains are considered different types by Rust. - // See [`spawn_chain_handle`] for more details. - let handle_a = spawn_chain_handle(|| {}, ®istry, &node_a)?; - let handle_b = spawn_chain_handle(|| {}, ®istry, &node_b)?; - - pad_client_ids(&handle_a, &handle_b, options.pad_client_id_a_to_b)?; - pad_client_ids(&handle_b, &handle_a, options.pad_client_id_b_to_a)?; - - let foreign_clients = bootstrap_foreign_client_pair(&handle_a, &handle_b, options)?; - - let relayer = RelayerDriver { - config_path, - config, - registry, - hang_on_fail: test_config.hang_on_fail, - }; - - let chains = ConnectedChains::new( - handle_a, - handle_b, - MonoTagged::new(node_a), - MonoTagged::new(node_b), - foreign_clients, - ); - - Ok((relayer, chains)) -} - -/// Bootstraps two relayer chain handles with connected foreign clients. -/// -/// Returns a tuple consisting of the [`RelayerDriver`] and a -/// [`ConnectedChains`] object that contains the given -/// full nodes together with the corresponding two [`ChainHandle`]s and -/// [`ForeignClient`]s. -/// -/// This method gives the caller a way to modify the relayer configuration -/// that is pre-generated from the configurations of the full nodes. -pub fn bootstrap_foreign_client_pair( - chain_a: &ChainA, - chain_b: &ChainB, - options: BootstrapClientOptions, -) -> Result, Error> { - let client_a_to_b = bootstrap_foreign_client(chain_a, chain_b, options.client_options_a_to_b)?; - let client_b_to_a = bootstrap_foreign_client(chain_b, chain_a, options.client_options_b_to_a)?; - Ok(ForeignClientPair::new(client_a_to_b, client_b_to_a)) -} - -pub fn bootstrap_foreign_client( - chain_a: &ChainA, - chain_b: &ChainB, - client_options: ClientOptions, -) -> Result, Error> { - let foreign_client = - ForeignClient::restore(ClientId::default(), chain_b.clone(), chain_a.clone()); - - let event = foreign_client.build_create_client_and_send(client_options)?; - let client_id = extract_client_id(&event)?.clone(); - - info!( - "created foreign client from chain {} to chain {} with client id {} on chain {}", - chain_a.id(), - chain_b.id(), - client_id, - chain_b.id() - ); - - Ok(ForeignClient::restore( - client_id, - chain_b.clone(), - chain_a.clone(), - )) -} - -pub fn pad_client_ids( - chain_a: &ChainA, - chain_b: &ChainB, - pad_count: u64, -) -> Result<(), Error> { - let foreign_client = - ForeignClient::restore(ClientId::default(), chain_b.clone(), chain_a.clone()); - - for i in 0..pad_count { - debug!("creating new client id {} on chain {}", i + 1, chain_b.id()); - foreign_client.build_create_client_and_send(Default::default())?; - } - - Ok(()) -} - -/** - Spawn a new chain handle using the given [`SharedRegistry`] and - [`FullNode`]. - - The function accepts a proxy type `Seed` that should be unique - accross multiple calls so that the returned [`ChainHandle`] - have a unique type. - - For example, the following test should fail to compile: - - ```rust,compile_fail - # use ibc_test_framework::bootstrap::binary::chain::spawn_chain_handle; - fn same(_: T, _: T) {} - - let chain_a = spawn_chain_handle(|| {}, todo!(), todo!()).unwrap(); - let chain_b = spawn_chain_handle(|| {}, todo!(), todo!()).unwrap(); - same(chain_a, chain_b); // error: chain_a and chain_b have different types - ``` - - The reason is that Rust would give each closure expression `||{}` a - [unique anonymous type](https://doc.rust-lang.org/reference/types/closure.html). - When we instantiate two chains with different closure types, - the resulting values would be considered by Rust to have different types. - - With this we can treat `chain_a` and `chain_b` having different types - so that we do not accidentally mix them up later in the code. -*/ -pub fn spawn_chain_handle( - _: Seed, - registry: &SharedRegistry, - node: &FullNode, -) -> Result { - let chain_id = &node.chain_driver.chain_id; - let handle = registry.get_or_spawn(chain_id)?; - - add_keys_to_chain_handle(&handle, &node.wallets)?; - - Ok(handle) -} - -/** - Add a wallet key to a [`ChainHandle`]'s key store. - - Note that if the [`ChainConfig`](ibc_relayer::config::ChainConfig) is - configured to use in-memory store only, the added key would not be - accessible through external CLI. -*/ -pub fn add_key_to_chain_handle( - chain: &Chain, - wallet: &Wallet, -) -> Result<(), Error> { - let res = chain.add_key(wallet.id.0.clone(), wallet.key.clone()); - - // Ignore error if chain handle already have the given key - match res { - Err(e) => match e.detail() { - RelayerErrorDetail::KeyBase(e2) => match e2.source { - KeyringErrorDetail::KeyAlreadyExist(_) => Ok(()), - _ => Err(e.into()), - }, - _ => Err(e.into()), - }, - Ok(()) => Ok(()), - } -} - -/** - Add multiple wallets provided in [`TestWallets`] into the - [`ChainHandle`]'s key store. -*/ -pub fn add_keys_to_chain_handle( - chain: &Chain, - wallets: &TestWallets, -) -> Result<(), Error> { - add_key_to_chain_handle(chain, &wallets.relayer)?; - add_key_to_chain_handle(chain, &wallets.user1)?; - add_key_to_chain_handle(chain, &wallets.user2)?; - - Ok(()) -} - -/** - Create a new [`SharedRegistry`] that uses [`CountingAndCachingChainHandle`] - as the [`ChainHandle`] implementation. -*/ -pub fn new_registry(config: Config) -> SharedRegistry { - >::new(config) -} - -/** - Generate [`ChainConfig`](ibc_relayer::config::ChainConfig) from a running - [`FullNode`] and add it to the relayer's [`Config`]. -*/ -pub fn add_chain_config(config: &mut Config, running_node: &FullNode) -> Result<(), Error> { - let chain_config = running_node.generate_chain_config()?; - - config.chains.push(chain_config); - Ok(()) -} - -/** - Save a relayer's [`Config`] to the filesystem to make it accessible - through external CLI. -*/ -pub fn save_relayer_config(config: &Config, config_path: &Path) -> Result<(), Error> { - let config_str = toml::to_string_pretty(&config)?; - - fs::write(&config_path, &config_str)?; - - info!( - "written hermes config.toml to {}:\n{}", - config_path.display(), - config_str - ); - - Ok(()) -} - -impl BootstrapClientOptions { - /// Overrides options for the foreign client connecting chain A to chain B. - pub fn client_options_a_to_b(mut self, options: ClientOptions) -> Self { - self.client_options_a_to_b = options; - self - } - - /// Overrides options for the foreign client connecting chain B to chain A. - pub fn client_options_b_to_a(mut self, options: ClientOptions) -> Self { - self.client_options_b_to_a = options; - self - } - - pub fn bootstrap_with_random_ids(mut self, bootstrap_with_random_ids: bool) -> Self { - if bootstrap_with_random_ids { - self.pad_client_id_b_to_a = random_u64_range(1, 6); - self.pad_client_id_a_to_b = random_u64_range(1, 6); - } else { - self.pad_client_id_b_to_a = 0; - self.pad_client_id_a_to_b = 1; - } - - self - } -} diff --git a/tools/test-framework/src/bootstrap/binary/channel.rs b/tools/test-framework/src/bootstrap/binary/channel.rs deleted file mode 100644 index 04a2698fd8..0000000000 --- a/tools/test-framework/src/bootstrap/binary/channel.rs +++ /dev/null @@ -1,231 +0,0 @@ -/*! - Helper functions for bootstrapping a channel between two chains. -*/ - -use eyre::{eyre, Report as Error}; -use ibc::core::ics04_channel::channel::Order; -use ibc::core::ics04_channel::Version; -use ibc::core::ics24_host::identifier::PortId; -use ibc_relayer::chain::handle::ChainHandle; -use ibc_relayer::channel::{Channel, ChannelSide}; -use tracing::{debug, info}; - -use super::connection::{bootstrap_connection, BootstrapConnectionOptions}; -use crate::types::binary::chains::ConnectedChains; -use crate::types::binary::channel::ConnectedChannel; -use crate::types::binary::connection::ConnectedConnection; -use crate::types::binary::foreign_client::ForeignClientPair; -use crate::types::id::TaggedPortIdRef; -use crate::types::tagged::*; -use crate::util::random::random_u64_range; - -pub struct BootstrapChannelOptions { - pub order: Order, - pub version: Version, - pub pad_channel_id_a: u64, - pub pad_channel_id_b: u64, -} - -/** - Create a new [`ConnectedChannel`] based on the provided [`ConnectedChains`]. - - Also accepts the [`PortId`] that should be used for the two sides of the - channel. -*/ -pub fn bootstrap_channel_with_chains( - chains: &ConnectedChains, - port_a: &PortId, - port_b: &PortId, - connection_options: BootstrapConnectionOptions, - channel_options: BootstrapChannelOptions, -) -> Result, Error> { - let channel = bootstrap_channel( - &chains.foreign_clients, - &DualTagged::new(port_a), - &DualTagged::new(port_b), - connection_options, - channel_options, - )?; - - Ok(channel) -} - -/** - Create a new [`ConnectedChannel`] between two chains using foreign clients - with initialized client IDs. -*/ -pub fn bootstrap_channel( - foreign_clients: &ForeignClientPair, - port_a: &TaggedPortIdRef, - port_b: &TaggedPortIdRef, - connection_options: BootstrapConnectionOptions, - channel_options: BootstrapChannelOptions, -) -> Result, Error> { - let connection = bootstrap_connection(foreign_clients, connection_options)?; - - bootstrap_channel_with_connection( - &foreign_clients.handle_a(), - &foreign_clients.handle_b(), - connection, - port_a, - port_b, - channel_options, - ) -} - -/** - Create a new [`ConnectedChannel`] using existing [`ConnectedConnection`]. -*/ -pub fn bootstrap_channel_with_connection( - chain_a: &ChainA, - chain_b: &ChainB, - connection: ConnectedConnection, - port_a: &TaggedPortIdRef, - port_b: &TaggedPortIdRef, - options: BootstrapChannelOptions, -) -> Result, Error> { - pad_channel_id( - chain_a, - chain_b, - &connection, - port_a, - options.pad_channel_id_a, - )?; - pad_channel_id( - chain_b, - chain_a, - &connection.clone().flip(), - port_b, - options.pad_channel_id_b, - )?; - - let channel = Channel::new( - connection.connection.clone(), - options.order, - port_a.0.clone(), - port_b.0.clone(), - Some(options.version), - )?; - - let channel_id_a = *channel - .a_side - .channel_id() - .ok_or_else(|| eyre!("expect channel id"))?; - - let channel_id_b = *channel - .b_side - .channel_id() - .ok_or_else(|| eyre!("expect channel id"))?; - - info!( - "created new chain/client/connection/channel from {}/{}/{}/{} to {}/{}/{}/{}", - chain_a.id(), - connection.client_ids.client_id_a, - connection.connection_id_a, - channel_id_a, - chain_b.id(), - connection.client_ids.client_id_b, - connection.connection_id_b, - channel_id_b, - ); - - let res = ConnectedChannel { - connection, - channel, - channel_id_a: DualTagged::new(channel_id_a), - channel_id_b: DualTagged::new(channel_id_b), - port_a: port_a.cloned(), - port_b: port_b.cloned(), - }; - - Ok(res) -} - -/** - Create a random number of dummy channel IDs so that the bootstrapped - channel ID is random instead of being always `channel-0`. - - This would help us catch bugs where the channel IDs are used at - the wrong side of the chain, but still got accepted because the - channel IDs on both sides are the same. -*/ -pub fn pad_channel_id( - chain_a: &ChainA, - chain_b: &ChainB, - connection: &ConnectedConnection, - port_id: &TaggedPortIdRef, - pad_count: u64, -) -> Result<(), Error> { - let client_id_a = &connection.client_ids.client_id_a; - let client_id_b = &connection.client_ids.client_id_b; - - for i in 0..pad_count { - debug!( - "creating new channel id {} on chain/connection/client {}/{}/{}", - i + 1, - chain_a.id(), - connection.connection_id_a, - client_id_a, - ); - - let channel: Channel = Channel { - ordering: Order::Unordered, - a_side: ChannelSide::new( - chain_b.clone(), - client_id_b.value().clone(), - connection.connection_id_b.value().clone(), - port_id.cloned().into_value(), - None, - None, - ), - b_side: ChannelSide::new( - chain_a.clone(), - client_id_a.value().clone(), - connection.connection_id_a.value().clone(), - port_id.cloned().into_value(), - None, - None, - ), - connection_delay: connection.connection.delay_period, - }; - - channel.build_chan_open_init_and_send()?; - } - - Ok(()) -} - -impl Default for BootstrapChannelOptions { - fn default() -> Self { - Self { - order: Order::Unordered, - version: Version::ics20(), - pad_channel_id_a: 0, - pad_channel_id_b: 1, - } - } -} - -impl BootstrapChannelOptions { - pub fn order(mut self, order: Order) -> Self { - self.order = order; - self - } - - pub fn version(mut self, version: Version) -> Self { - self.version = version; - self - } - - pub fn bootstrap_with_random_ids(mut self, bootstrap_with_random_ids: bool) -> Self { - if bootstrap_with_random_ids { - self.pad_channel_id_a = random_u64_range(0, 6); - self.pad_channel_id_b = random_u64_range(0, 6); - } else { - self.pad_channel_id_a = 0; - self.pad_channel_id_b = 1; - } - - self - } -} diff --git a/tools/test-framework/src/bootstrap/binary/connection.rs b/tools/test-framework/src/bootstrap/binary/connection.rs deleted file mode 100644 index 511e225010..0000000000 --- a/tools/test-framework/src/bootstrap/binary/connection.rs +++ /dev/null @@ -1,152 +0,0 @@ -/*! - Helper functions for bootstrapping a connection between two chains. -*/ - -use core::time::Duration; -use eyre::{eyre, Report as Error}; -use ibc::timestamp::ZERO_DURATION; -use ibc_relayer::chain::handle::ChainHandle; -use ibc_relayer::config::default::connection_delay as default_connection_delay; -use ibc_relayer::connection::{Connection, ConnectionSide}; -use tracing::{debug, info}; - -use crate::relayer::connection::TaggedConnectionExt; -use crate::types::binary::client::ClientIdPair; -use crate::types::binary::connection::ConnectedConnection; -use crate::types::binary::foreign_client::ForeignClientPair; -use crate::types::id::TaggedClientIdRef; -use crate::util::random::random_u64_range; - -pub struct BootstrapConnectionOptions { - pub connection_delay: Duration, - pub pad_connection_id_a: u64, - pub pad_connection_id_b: u64, -} - -/** - Create a new [`ConnectedConnection`] using the foreign clients with - initialized client IDs. -*/ -pub fn bootstrap_connection( - foreign_clients: &ForeignClientPair, - options: BootstrapConnectionOptions, -) -> Result, Error> { - let chain_a = foreign_clients.handle_a(); - let chain_b = foreign_clients.handle_b(); - - let client_id_a = foreign_clients.client_id_a(); - let client_id_b = foreign_clients.client_id_b(); - - pad_connection_id( - &chain_a, - &chain_b, - &client_id_a, - &client_id_b, - options.pad_connection_id_a, - )?; - pad_connection_id( - &chain_b, - &chain_a, - &client_id_b, - &client_id_a, - options.pad_connection_id_b, - )?; - - let connection = Connection::new( - foreign_clients.client_b_to_a.clone(), - foreign_clients.client_a_to_b.clone(), - options.connection_delay, - )?; - - let connection_id_a = connection - .tagged_connection_id_a() - .ok_or_else(|| eyre!("expected connection id to present"))? - .cloned(); - - let connection_id_b = connection - .tagged_connection_id_b() - .ok_or_else(|| eyre!("expected connection id to present"))? - .cloned(); - - info!( - "created new chain/client/connection from {}/{}/{} to {}/{}/{}", - chain_a.id(), - client_id_a, - connection_id_a, - chain_b.id(), - client_id_b, - connection_id_b, - ); - - let connected_connection = ConnectedConnection::new( - ClientIdPair::new(client_id_a.cloned(), client_id_b.cloned()), - connection, - connection_id_a, - connection_id_b, - ); - - Ok(connected_connection) -} - -/** - Create a random number of dummy connection IDs so that the bootstrapped - connection ID is random instead of being always `connection-0`. - - This would help us catch bugs where the connection IDs are used at - the wrong side of the chain, but still got accepted because the - connection IDs on both sides are the same. -*/ -pub fn pad_connection_id( - chain_a: &ChainA, - chain_b: &ChainB, - client_id_a: &TaggedClientIdRef, - client_id_b: &TaggedClientIdRef, - pad_count: u64, -) -> Result<(), Error> { - for i in 0..pad_count { - debug!( - "creating new connection id {} on chain {}", - i + 1, - chain_a.id() - ); - - let connection: Connection = Connection { - delay_period: ZERO_DURATION, - a_side: ConnectionSide::new(chain_b.clone(), client_id_b.cloned().into_value(), None), - b_side: ConnectionSide::new(chain_a.clone(), client_id_a.cloned().into_value(), None), - }; - - connection.build_conn_init_and_send()?; - } - - Ok(()) -} - -impl Default for BootstrapConnectionOptions { - fn default() -> Self { - Self { - connection_delay: default_connection_delay(), - pad_connection_id_a: 0, - pad_connection_id_b: 0, - } - } -} - -impl BootstrapConnectionOptions { - pub fn connection_delay(mut self, connection_delay: Duration) -> Self { - self.connection_delay = connection_delay; - self - } - - pub fn bootstrap_with_random_ids(mut self, bootstrap_with_random_ids: bool) -> Self { - if bootstrap_with_random_ids { - self.pad_connection_id_a = random_u64_range(0, 6); - self.pad_connection_id_b = random_u64_range(0, 6); - } else { - self.pad_connection_id_a = 0; - self.pad_connection_id_b = 1; - } - - self - } -} diff --git a/tools/test-framework/src/bootstrap/binary/mod.rs b/tools/test-framework/src/bootstrap/binary/mod.rs deleted file mode 100644 index 5cf48e941e..0000000000 --- a/tools/test-framework/src/bootstrap/binary/mod.rs +++ /dev/null @@ -1,7 +0,0 @@ -/*! - Helper functions for bootstrapping constructs that involve two chains. -*/ - -pub mod chain; -pub mod channel; -pub mod connection; diff --git a/tools/test-framework/src/bootstrap/init.rs b/tools/test-framework/src/bootstrap/init.rs deleted file mode 100644 index ca5719b324..0000000000 --- a/tools/test-framework/src/bootstrap/init.rs +++ /dev/null @@ -1,84 +0,0 @@ -/*! - Functions for initializing each test at the beginning of a Rust test - session. -*/ - -use eyre::Report as Error; -use ibc_relayer_cli::components::enable_ansi; -use std::env; -use std::fs; -use std::sync::Once; -use tracing_subscriber::{ - self as ts, - filter::EnvFilter, - layer::{Layer, SubscriberExt}, - util::SubscriberInitExt, -}; - -use crate::types::config::TestConfig; -use crate::util::random::random_u32; - -static INIT: Once = Once::new(); - -/** - Initialize the test with a global logger and error handlers, - read the environment variables and return a [`TestConfig`]. -*/ -pub fn init_test() -> Result { - let no_color_log = env::var("NO_COLOR_LOG") - .ok() - .map(|val| val == "1") - .unwrap_or(false); - - INIT.call_once(|| { - if enable_ansi() && !no_color_log { - color_eyre::install().unwrap(); - } - install_logger(!no_color_log); - }); - - let chain_command_path = env::var("CHAIN_COMMAND_PATH").unwrap_or_else(|_| "gaiad".to_string()); - - let base_chain_store_dir = env::var("CHAIN_STORE_DIR").unwrap_or_else(|_| "data".to_string()); - - let account_prefix = env::var("ACCOUNT_PREFIX").unwrap_or_else(|_| "cosmos".to_string()); - - let chain_store_dir = format!("{}/test-{}", base_chain_store_dir, random_u32()); - - fs::create_dir_all(&chain_store_dir)?; - - let chain_store_dir = fs::canonicalize(chain_store_dir)?; - - let hang_on_fail = env::var("HANG_ON_FAIL") - .ok() - .map(|val| val == "1") - .unwrap_or(false); - - Ok(TestConfig { - chain_command_path, - chain_store_dir, - account_prefix, - hang_on_fail, - bootstrap_with_random_ids: true, - }) -} - -/** - Install the [`tracing_subscriber`] logger handlers so that logs will - be displayed during test. -*/ -pub fn install_logger(with_color: bool) { - // Use log level INFO by default if RUST_LOG is not set. - let env_filter = EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new("info")); - - let module_filter_fn = ts::filter::filter_fn(|metadata| match metadata.module_path() { - Some(path) => path.starts_with("ibc"), - None => false, - }); - - let layer = ts::fmt::layer() - .with_ansi(with_color) - .with_filter(module_filter_fn); - - ts::registry().with(env_filter).with(layer).init(); -} diff --git a/tools/test-framework/src/bootstrap/mod.rs b/tools/test-framework/src/bootstrap/mod.rs deleted file mode 100644 index 96a1e6de5f..0000000000 --- a/tools/test-framework/src/bootstrap/mod.rs +++ /dev/null @@ -1,17 +0,0 @@ -/*! - Helper functions for setting up test cases in an imperative way. - - Normal test authors should have no need to call functions provided - by the `bootstrap` module, as they are implicitly called by the - [`framework`](crate::framework) constructs. - - Advanced test authors with needs for more flexibility can call - functions in the `bootstrap` module directly, so that they have - more control of when exactly new chains and relayers should - be spawned. -*/ - -pub mod binary; -pub mod init; -pub mod nary; -pub mod single; diff --git a/tools/test-framework/src/bootstrap/nary/chain.rs b/tools/test-framework/src/bootstrap/nary/chain.rs deleted file mode 100644 index 30b2188266..0000000000 --- a/tools/test-framework/src/bootstrap/nary/chain.rs +++ /dev/null @@ -1,119 +0,0 @@ -/*! - Functions for bootstrapping N-ary number of chains. -*/ - -use core::convert::TryInto; -use ibc_relayer::chain::handle::ChainHandle; -use ibc_relayer::config::Config; -use ibc_relayer::foreign_client::ForeignClient; -use ibc_relayer::registry::SharedRegistry; - -use crate::bootstrap::binary::chain::{ - add_chain_config, add_keys_to_chain_handle, bootstrap_foreign_client, new_registry, - save_relayer_config, -}; -use crate::error::{handle_generic_error, Error}; -use crate::relayer::driver::RelayerDriver; -use crate::types::config::TestConfig; -use crate::types::nary::chains::{DynamicConnectedChains, NaryConnectedChains}; -use crate::types::single::node::FullNode; - -/** - Bootstrap a fixed number of chains specified by `SIZE`. -*/ -pub fn boostrap_chains_with_nodes( - test_config: &TestConfig, - full_nodes: [FullNode; SIZE], - config_modifier: impl FnOnce(&mut Config), -) -> Result<(RelayerDriver, NaryConnectedChains), Error> { - let (relayer, chains) = - boostrap_chains_with_any_nodes(test_config, full_nodes.into(), config_modifier)?; - - Ok((relayer, chains.try_into()?)) -} - -/** - Bootstrap a fixed number of chains that are actually - backed by the same underlying full node. -*/ -pub fn boostrap_chains_with_self_connected_node( - test_config: &TestConfig, - full_node: FullNode, - config_modifier: impl FnOnce(&mut Config), -) -> Result<(RelayerDriver, NaryConnectedChains), Error> { - let full_nodes = vec![full_node; SIZE]; - let (relayer, chains) = - boostrap_chains_with_any_nodes(test_config, full_nodes, config_modifier)?; - - Ok((relayer, chains.try_into()?)) -} - -/** - Bootstrap a dynamic number of chains, according to the number of full nodes - in the `Vec`. -*/ -pub fn boostrap_chains_with_any_nodes( - test_config: &TestConfig, - full_nodes: Vec, - config_modifier: impl FnOnce(&mut Config), -) -> Result<(RelayerDriver, DynamicConnectedChains), Error> { - let mut config = Config::default(); - - for node in full_nodes.iter() { - add_chain_config(&mut config, node)?; - } - - config_modifier(&mut config); - - let config_path = test_config.chain_store_dir.join("relayer-config.toml"); - - save_relayer_config(&config, &config_path)?; - - let registry = new_registry(config.clone()); - - let mut chain_handles = Vec::new(); - - for node in full_nodes.iter() { - let handle = spawn_chain_handle(®istry, node)?; - chain_handles.push(handle); - } - - let mut foreign_clients: Vec>> = Vec::new(); - - for handle_a in chain_handles.iter() { - let mut foreign_clients_b = Vec::new(); - - for handle_b in chain_handles.iter() { - let foreign_client = bootstrap_foreign_client(handle_a, handle_b, Default::default())?; - - foreign_clients_b.push(foreign_client); - } - - foreign_clients.push(foreign_clients_b); - } - - let relayer = RelayerDriver { - config_path, - config, - registry, - hang_on_fail: test_config.hang_on_fail, - }; - - let connected_chains = DynamicConnectedChains::new(chain_handles, full_nodes, foreign_clients); - - Ok((relayer, connected_chains)) -} - -fn spawn_chain_handle( - registry: &SharedRegistry, - node: &FullNode, -) -> Result { - let chain_id = &node.chain_driver.chain_id; - let handle = registry - .get_or_spawn(chain_id) - .map_err(handle_generic_error)?; - - add_keys_to_chain_handle(&handle, &node.wallets)?; - - Ok(handle) -} diff --git a/tools/test-framework/src/bootstrap/nary/channel.rs b/tools/test-framework/src/bootstrap/nary/channel.rs deleted file mode 100644 index ff04920983..0000000000 --- a/tools/test-framework/src/bootstrap/nary/channel.rs +++ /dev/null @@ -1,149 +0,0 @@ -/*! - Functions for bootstrapping N-ary number of chanels. -*/ - -use core::convert::TryInto; -use core::time::Duration; -use ibc::core::ics04_channel::channel::Order; -use ibc::core::ics24_host::identifier::PortId; -use ibc_relayer::chain::handle::ChainHandle; - -use crate::bootstrap::binary::channel::{ - bootstrap_channel_with_connection, BootstrapChannelOptions, -}; -use crate::bootstrap::nary::connection::bootstrap_connections_dynamic; -use crate::error::{handle_generic_error, Error}; -use crate::types::binary::channel::ConnectedChannel; -use crate::types::nary::chains::{DynamicConnectedChains, NaryConnectedChains}; -use crate::types::nary::channel::{ConnectedChannels, DynamicConnectedChannels}; -use crate::types::nary::connection::{ConnectedConnections, DynamicConnectedConnections}; -use crate::types::tagged::*; -use crate::util::array::{assert_same_dimension, into_nested_vec}; - -/** - Bootstrap a dynamic number of channels based on the number of - connections in `DynamicConnectedConnections`. -*/ -pub fn bootstrap_channels_with_connections_dynamic( - connections: DynamicConnectedConnections, - chains: &Vec, - ports: &Vec>, - order: Order, - bootstrap_with_random_ids: bool, -) -> Result, Error> { - let size = chains.len(); - - assert_same_dimension(size, connections.connections())?; - assert_same_dimension(size, ports)?; - - let mut channels: Vec>> = Vec::new(); - - for (i, connections_b) in connections.connections().iter().enumerate() { - let mut channels_b: Vec> = Vec::new(); - - for (j, connection) in connections_b.iter().enumerate() { - if i <= j { - let chain_a = &chains[i]; - let chain_b = &chains[j]; - - let port_a = &ports[i][j]; - let port_b = &ports[j][i]; - - let bootstrap_options = BootstrapChannelOptions::default() - .order(order) - .bootstrap_with_random_ids(bootstrap_with_random_ids); - - let channel = bootstrap_channel_with_connection( - chain_a, - chain_b, - connection.clone(), - &DualTagged::new(port_a), - &DualTagged::new(port_b), - bootstrap_options, - )?; - - channels_b.push(channel); - } else { - let counter_channel = &channels[j][i]; - let channel = counter_channel.clone().flip(); - - channels_b.push(channel); - } - } - - channels.push(channels_b); - } - - Ok(DynamicConnectedChannels::new(channels)) -} - -/** - Bootstrap a fixed number of connections with the same `SIZE` - as in `ConnectedConnections`. -*/ -pub fn bootstrap_channels_with_connections( - connections: ConnectedConnections, - chains: [Handle; SIZE], - ports: [[PortId; SIZE]; SIZE], - order: Order, - bootstrap_with_random_ids: bool, -) -> Result, Error> { - let channels = bootstrap_channels_with_connections_dynamic( - connections.into(), - &chains.into(), - &into_nested_vec(ports), - order, - bootstrap_with_random_ids, - )?; - - channels.try_into().map_err(handle_generic_error) -} - -/** - Boostrap a dynamic number of channels together with the - underlying connections based on the number of chain handles - in `DynamicConnectedChains`. -*/ -pub fn bootstrap_channels_and_connections_dynamic( - chains: &DynamicConnectedChains, - ports: &Vec>, - connection_delay: Duration, - order: Order, - bootstrap_with_random_ids: bool, -) -> Result, Error> { - let connections = bootstrap_connections_dynamic( - chains.foreign_clients(), - connection_delay, - bootstrap_with_random_ids, - )?; - - bootstrap_channels_with_connections_dynamic( - connections, - chains.chain_handles(), - ports, - order, - bootstrap_with_random_ids, - ) -} - -/** - Bootstrap a fixed number of channels as specified by `SIZE`, - together with bootstrapping the underlying connections. -*/ -pub fn bootstrap_channels_and_connections( - chains: &NaryConnectedChains, - ports: [[PortId; SIZE]; SIZE], - connection_delay: Duration, - order: Order, - bootstrap_with_random_ids: bool, -) -> Result, Error> { - let channels = bootstrap_channels_and_connections_dynamic( - &chains.clone().into(), - &into_nested_vec(ports), - connection_delay, - order, - bootstrap_with_random_ids, - )?; - - channels.try_into() -} diff --git a/tools/test-framework/src/bootstrap/nary/connection.rs b/tools/test-framework/src/bootstrap/nary/connection.rs deleted file mode 100644 index 932f9fc9be..0000000000 --- a/tools/test-framework/src/bootstrap/nary/connection.rs +++ /dev/null @@ -1,75 +0,0 @@ -/*! - Functions for bootstrapping N-ary number of connections. -*/ - -use core::convert::TryInto; -use core::time::Duration; -use ibc_relayer::chain::handle::ChainHandle; -use ibc_relayer::foreign_client::ForeignClient; - -use crate::bootstrap::binary::connection::{bootstrap_connection, BootstrapConnectionOptions}; -use crate::error::Error; -use crate::types::binary::connection::ConnectedConnection; -use crate::types::binary::foreign_client::ForeignClientPair; -use crate::types::nary::connection::{ConnectedConnections, DynamicConnectedConnections}; -use crate::types::nary::foreign_client::ForeignClientPairs; -use crate::util::array::assert_same_dimension; - -/** - Bootstrap a dynamic number of connections based on the - given foreign client NxN matrix. -*/ -pub fn bootstrap_connections_dynamic( - foreign_clients: &Vec>>, - connection_delay: Duration, - bootstrap_with_random_ids: bool, -) -> Result, Error> { - let size = foreign_clients.len(); - - assert_same_dimension(size, foreign_clients)?; - - let mut connections: Vec>> = Vec::new(); - - for (i, foreign_clients_b) in foreign_clients.iter().enumerate() { - let mut connections_b: Vec> = Vec::new(); - - for (j, foreign_client) in foreign_clients_b.iter().enumerate() { - if i <= j { - let counter_foreign_client = &foreign_clients[j][i]; - let foreign_clients = - ForeignClientPair::new(foreign_client.clone(), counter_foreign_client.clone()); - - let bootstrap_options = BootstrapConnectionOptions::default() - .connection_delay(connection_delay) - .bootstrap_with_random_ids(bootstrap_with_random_ids); - - let connection = bootstrap_connection(&foreign_clients, bootstrap_options)?; - - connections_b.push(connection); - } else { - let counter_connection = &connections[j][i]; - let connection = counter_connection.clone().flip(); - - connections_b.push(connection); - } - } - - connections.push(connections_b); - } - - Ok(DynamicConnectedConnections::new(connections)) -} - -pub fn bootstrap_connections( - foreign_clients: ForeignClientPairs, - connection_delay: Duration, - bootstrap_with_random_ids: bool, -) -> Result, Error> { - let connections = bootstrap_connections_dynamic( - &foreign_clients.into_nested_vec(), - connection_delay, - bootstrap_with_random_ids, - )?; - - connections.try_into() -} diff --git a/tools/test-framework/src/bootstrap/nary/mod.rs b/tools/test-framework/src/bootstrap/nary/mod.rs deleted file mode 100644 index d7d0b00768..0000000000 --- a/tools/test-framework/src/bootstrap/nary/mod.rs +++ /dev/null @@ -1,13 +0,0 @@ -/*! - Experimental work to bootstrap N-ary chains for testing. - - This is a work in progress and although it works, is currently - lack of documentation. We put this behind the "experimental" - feature flag so that developers who do not need this feature - are not obligated to go though the code and understand what - is happening under the hood. -*/ - -pub mod chain; -pub mod channel; -pub mod connection; diff --git a/tools/test-framework/src/bootstrap/single.rs b/tools/test-framework/src/bootstrap/single.rs deleted file mode 100644 index f64e1a0f9d..0000000000 --- a/tools/test-framework/src/bootstrap/single.rs +++ /dev/null @@ -1,155 +0,0 @@ -/*! - Helper functions for bootstrapping a single full node. -*/ -use core::time::Duration; -use std::sync::{Arc, RwLock}; -use toml; -use tracing::info; - -use crate::chain::builder::ChainBuilder; -use crate::chain::config; -use crate::chain::driver::ChainDriver; -use crate::error::Error; -use crate::ibc::denom::Denom; -use crate::types::single::node::FullNode; -use crate::types::wallet::{TestWallets, Wallet}; -use crate::util::random::{random_u32, random_u64_range}; - -/** - Bootstrap a single full node with the provided [`ChainBuilder`] and - a prefix for the chain ID. - - The function would generate random postfix attached to the end of - a chain ID. So for example having a prefix `"alpha"` may generate - a chain with an ID like `"ibc-alpha-f5a2a988"` - - The bootstrap function also tries to use as many random parameters - when intitializing the chain, such as using random denomination - and wallets. This is to help ensure that the test is written to - only work with specific hardcoded parameters. - - TODO: Due to the limitation of the `gaiad` command, currently - parameters such as the stake denomination (`stake`) and the wallet - address prefix (`cosmos`) cannot be overridden. It would be - great to be able to randomize these parameters in the future - as well. -*/ -pub fn bootstrap_single_node( - builder: &ChainBuilder, - prefix: &str, - use_random_id: bool, - config_modifier: impl FnOnce(&mut toml::Value) -> Result<(), Error>, - genesis_modifier: impl FnOnce(&mut serde_json::Value) -> Result<(), Error>, -) -> Result { - let stake_denom = Denom::base("stake"); - - let denom = if use_random_id { - Denom::base(&format!("coin{:x}", random_u32())) - } else { - Denom::base("samoleans") - }; - - let initial_amount = random_u64_range(1_000_000_000_000, 9_000_000_000_000); - - let chain_driver = builder.new_chain(prefix, use_random_id)?; - - chain_driver.initialize()?; - - chain_driver.update_genesis_file("genesis.json", genesis_modifier)?; - - let validator = add_wallet(&chain_driver, "validator", use_random_id)?; - let relayer = add_wallet(&chain_driver, "relayer", use_random_id)?; - let user1 = add_wallet(&chain_driver, "user1", use_random_id)?; - let user2 = add_wallet(&chain_driver, "user2", use_random_id)?; - - chain_driver.add_genesis_account(&validator.address, &[(&stake_denom, initial_amount)])?; - - chain_driver.add_genesis_validator(&validator.id, &stake_denom, initial_amount)?; - - chain_driver.add_genesis_account( - &user1.address, - &[(&stake_denom, initial_amount), (&denom, initial_amount)], - )?; - - chain_driver.add_genesis_account( - &user2.address, - &[(&stake_denom, initial_amount), (&denom, initial_amount)], - )?; - - chain_driver.add_genesis_account( - &relayer.address, - &[(&stake_denom, initial_amount), (&denom, initial_amount)], - )?; - - chain_driver.collect_gen_txs()?; - - let log_level = std::env::var("RUST_LOG").unwrap_or_else(|_| "info".to_string()); - - chain_driver.update_chain_config("config.toml", |config| { - config::set_log_level(config, &log_level)?; - config::set_rpc_port(config, chain_driver.rpc_port)?; - config::set_p2p_port(config, chain_driver.p2p_port)?; - config::set_timeout_commit(config, Duration::from_secs(1))?; - config::set_timeout_propose(config, Duration::from_secs(1))?; - - config_modifier(config)?; - - Ok(()) - })?; - - chain_driver.update_chain_config("app.toml", |config| { - config::set_grpc_port(config, chain_driver.grpc_port)?; - config::disable_grpc_web(config)?; - - Ok(()) - })?; - - let process = chain_driver.start()?; - - chain_driver.assert_eventual_wallet_amount(&relayer.address, initial_amount, &denom)?; - - info!( - "started new chain {} at with home path {} and RPC address {}.", - chain_driver.chain_id, - chain_driver.home_path, - chain_driver.rpc_address(), - ); - - info!( - "user wallet for chain {} - id: {}, address: {}", - chain_driver.chain_id, user1.id.0, user1.address.0, - ); - - info!( - "you can manually interact with the chain using commands starting with:\n{} --home '{}' --node {}", - chain_driver.command_path, - chain_driver.home_path, - chain_driver.rpc_address(), - ); - - let wallets = TestWallets { - validator, - relayer, - user1, - user2, - }; - - let node = FullNode { - chain_driver, - denom, - wallets, - process: Arc::new(RwLock::new(process)), - }; - - Ok(node) -} - -fn add_wallet(driver: &ChainDriver, prefix: &str, use_random_id: bool) -> Result { - if use_random_id { - let num = random_u32(); - let wallet_id = format!("{}-{:x}", prefix, num); - driver.add_wallet(&wallet_id) - } else { - driver.add_wallet(prefix) - } -} diff --git a/tools/test-framework/src/chain/builder.rs b/tools/test-framework/src/chain/builder.rs deleted file mode 100644 index ebbeacc42a..0000000000 --- a/tools/test-framework/src/chain/builder.rs +++ /dev/null @@ -1,114 +0,0 @@ -/*! - Builder construct that spawn new chains with some common parameters. -*/ - -use alloc::sync::Arc; -use ibc::core::ics24_host::identifier::ChainId; -use tokio::runtime::Runtime; - -use crate::chain::driver::ChainDriver; -use crate::error::Error; -use crate::types::config::TestConfig; -use crate::util::random::{random_u32, random_unused_tcp_port}; - -/** - Used for holding common configuration needed to create new `ChainDriver`s. - - Currently this is hardcoded to support only a single version of Gaia chain. - We may want to turn this into a trait in the future to support different - chain implementations. -*/ -#[derive(Debug)] -pub struct ChainBuilder { - /** - The CLI executable used for the chain commands. Defaults to `gaiad`. - - TODO: Have a mutable list of command paths so that the `ChainBuilder` - may return [`ChainDriver`]s bound to different chain commands - for testing with multiple chain implementations. - */ - pub command_path: String, - - /** - The filesystem path to store the data files used by the chain. - */ - pub base_store_dir: String, - - pub account_prefix: String, - - pub runtime: Arc, -} - -impl ChainBuilder { - /** - Create a new `ChainBuilder`. - */ - pub fn new( - command_path: &str, - base_store_dir: &str, - account_prefix: &str, - runtime: Arc, - ) -> Self { - Self { - command_path: command_path.to_string(), - base_store_dir: base_store_dir.to_string(), - account_prefix: account_prefix.to_string(), - runtime, - } - } - - /** - Create a `ChainBuilder` based on the provided [`TestConfig`]. - */ - pub fn new_with_config(config: &TestConfig, runtime: Arc) -> Self { - Self::new( - &config.chain_command_path, - &format!("{}", config.chain_store_dir.display()), - &config.account_prefix, - runtime, - ) - } - - /** - Create a new [`ChainDriver`] with the chain ID containing the - given prefix. - - Note that this only configures the [`ChainDriver`] without - the actual chain being intitialized or spawned. - - The `ChainBuilder` will configure the [`ChainDriver`] with random - unused ports, and add a random suffix to the chain ID. - - For example, calling this with a prefix `"alpha"` will return - a [`ChainDriver`] configured with a chain ID like - `"ibc-alpha-f5a2a988"`. - */ - pub fn new_chain(&self, prefix: &str, use_random_id: bool) -> Result { - let chain_id = if use_random_id { - ChainId::from_string(&format!("ibc-{}-{:x}", prefix, random_u32())) - } else { - ChainId::from_string(&format!("ibc-{}", prefix)) - }; - - let rpc_port = random_unused_tcp_port(); - let grpc_port = random_unused_tcp_port(); - let grpc_web_port = random_unused_tcp_port(); - let p2p_port = random_unused_tcp_port(); - - let home_path = format!("{}/{}", self.base_store_dir, chain_id); - - let driver = ChainDriver::create( - self.command_path.clone(), - chain_id, - home_path, - self.account_prefix.clone(), - rpc_port, - grpc_port, - grpc_web_port, - p2p_port, - self.runtime.clone(), - )?; - - Ok(driver) - } -} diff --git a/tools/test-framework/src/chain/config.rs b/tools/test-framework/src/chain/config.rs deleted file mode 100644 index bec1529485..0000000000 --- a/tools/test-framework/src/chain/config.rs +++ /dev/null @@ -1,103 +0,0 @@ -/*! - Helper functions for modifying the Gaia chain config in TOML. - - Since we do not need to understand the full structure of the - CosmosSDK config, we are updating the config as dynamic TOML - values instead of serializing them into proper types. -*/ - -use core::time::Duration; -use eyre::{eyre, Report as Error}; -use toml::Value; - -/// Set the `rpc` field in the full node config. -pub fn set_rpc_port(config: &mut Value, port: u16) -> Result<(), Error> { - config - .get_mut("rpc") - .ok_or_else(|| eyre!("expect rpc section"))? - .as_table_mut() - .ok_or_else(|| eyre!("expect object"))? - .insert( - "laddr".to_string(), - format!("tcp://0.0.0.0:{}", port).into(), - ); - - Ok(()) -} - -pub fn set_grpc_port(config: &mut Value, port: u16) -> Result<(), Error> { - config - .get_mut("grpc") - .ok_or_else(|| eyre!("expect grpc section"))? - .as_table_mut() - .ok_or_else(|| eyre!("expect object"))? - .insert("address".to_string(), format!("0.0.0.0:{}", port).into()); - - Ok(()) -} - -pub fn disable_grpc_web(config: &mut Value) -> Result<(), Error> { - if let Some(field) = config.get_mut("grpc-web") { - field - .as_table_mut() - .ok_or_else(|| eyre!("expect object"))? - .insert("enable".to_string(), false.into()); - } - - Ok(()) -} - -/// Set the `p2p` field in the full node config. -pub fn set_p2p_port(config: &mut Value, port: u16) -> Result<(), Error> { - config - .get_mut("p2p") - .ok_or_else(|| eyre!("expect p2p section"))? - .as_table_mut() - .ok_or_else(|| eyre!("expect object"))? - .insert( - "laddr".to_string(), - format!("tcp://0.0.0.0:{}", port).into(), - ); - - Ok(()) -} - -/// Set the `consensus.timeout_commit` field in the full node config. -pub fn set_timeout_commit(config: &mut Value, duration: Duration) -> Result<(), Error> { - config - .get_mut("consensus") - .ok_or_else(|| eyre!("expect consensus section"))? - .as_table_mut() - .ok_or_else(|| eyre!("expect object"))? - .insert( - "timeout_commit".to_string(), - format!("{}ms", duration.as_millis()).into(), - ); - - Ok(()) -} - -/// Set the `consensus.timeout_propose` field in the full node config. -pub fn set_timeout_propose(config: &mut Value, duration: Duration) -> Result<(), Error> { - config - .get_mut("consensus") - .ok_or_else(|| eyre!("expect consensus section"))? - .as_table_mut() - .ok_or_else(|| eyre!("expect object"))? - .insert( - "timeout_propose".to_string(), - format!("{}ms", duration.as_millis()).into(), - ); - - Ok(()) -} - -/// Set the `log_level` field in the full node config. -pub fn set_log_level(config: &mut Value, log_level: &str) -> Result<(), Error> { - config - .as_table_mut() - .ok_or_else(|| eyre!("expect object"))? - .insert("log_level".to_string(), log_level.into()); - - Ok(()) -} diff --git a/tools/test-framework/src/chain/driver.rs b/tools/test-framework/src/chain/driver.rs deleted file mode 100644 index 4734967e70..0000000000 --- a/tools/test-framework/src/chain/driver.rs +++ /dev/null @@ -1,514 +0,0 @@ -/*! - Implementation of [`ChainDriver`]. -*/ - -use core::str::FromStr; -use core::time::Duration; - -use alloc::sync::Arc; -use eyre::eyre; -use serde_json as json; -use std::fs; -use std::path::PathBuf; -use std::process::{Command, Stdio}; -use std::str; -use tokio::runtime::Runtime; -use toml; -use tracing::debug; - -use ibc::core::ics24_host::identifier::ChainId; -use ibc_proto::google::protobuf::Any; -use ibc_relayer::chain::cosmos::types::config::TxConfig; -use ibc_relayer::keyring::{HDPath, KeyEntry, KeyFile}; - -use crate::chain::exec::{simple_exec, ExecOutput}; -use crate::error::{handle_generic_error, Error}; -use crate::ibc::denom::Denom; -use crate::relayer::tx::{new_tx_config_for_test, simple_send_tx}; -use crate::types::env::{EnvWriter, ExportEnv}; -use crate::types::process::ChildProcess; -use crate::types::wallet::{Wallet, WalletAddress, WalletId}; -use crate::util::file::pipe_to_file; -use crate::util::retry::assert_eventually_succeed; - -pub mod interchain; -pub mod query_txs; -pub mod transfer; - -/** - Number of times (seconds) to try and query a wallet to reach the - target amount, as used by [`assert_eventual_wallet_amount`]. - - We set this to around 60 seconds to make sure that the tests still - pass in slower environments like the CI. - - If you encounter retry error, try increasing this constant. If the - test is taking much longer to reach eventual consistency, it might - be indication of some underlying performance issues. -*/ -const WAIT_WALLET_AMOUNT_ATTEMPTS: u16 = 60; - -const COSMOS_HD_PATH: &str = "m/44'/118'/0'/0/0"; - -/** - A driver for interacting with a chain full nodes through command line. - - The name `ChainDriver` is inspired by - [WebDriver](https://developer.mozilla.org/en-US/docs/Web/WebDriver), - which is the term used to describe programs that control spawning of the - web browsers. In our case, the ChainDriver is used to spawn and manage - chain full nodes. - - Currently the `ChainDriver` is hardcoded to support only a single version - of Gaia chain. In the future, we will want to turn this into one or more - `ChainDriver` traits so that they can be used to spawn multiple chain - implementations other than a single version of Gaia. -*/ - -#[derive(Debug, Clone)] -pub struct ChainDriver { - /** - The filesystem path to the Gaia CLI. Defaults to `gaiad`. - */ - pub command_path: String, - - /** - The ID of the chain. - */ - pub chain_id: ChainId, - - /** - The home directory for the full node to store data files. - */ - pub home_path: String, - - pub account_prefix: String, - - /** - The port used for RPC. - */ - pub rpc_port: u16, - - /** - The port used for GRPC. - */ - pub grpc_port: u16, - - pub grpc_web_port: u16, - - /** - The port used for P2P. (Currently unused other than for setup) - */ - pub p2p_port: u16, - - pub tx_config: TxConfig, - - pub runtime: Arc, -} - -impl ExportEnv for ChainDriver { - fn export_env(&self, writer: &mut impl EnvWriter) { - writer.write_env("CMD", &self.command_path); - writer.write_env("HOME", &self.home_path); - writer.write_env("RPC_ADDR", &self.rpc_address()); - writer.write_env("GRPC_ADDR", &self.grpc_address()); - } -} - -impl ChainDriver { - /// Create a new [`ChainDriver`] - pub fn create( - command_path: String, - chain_id: ChainId, - home_path: String, - account_prefix: String, - rpc_port: u16, - grpc_port: u16, - grpc_web_port: u16, - p2p_port: u16, - runtime: Arc, - ) -> Result { - let tx_config = new_tx_config_for_test( - chain_id.clone(), - format!("http://localhost:{}", rpc_port), - format!("http://localhost:{}", grpc_port), - )?; - - Ok(Self { - command_path, - chain_id, - home_path, - account_prefix, - rpc_port, - grpc_port, - grpc_web_port, - p2p_port, - tx_config, - runtime, - }) - } - - /// Returns the full URL for the RPC address. - pub fn rpc_address(&self) -> String { - format!("http://localhost:{}", self.rpc_port) - } - - /// Returns the full URL for the WebSocket address. - pub fn websocket_address(&self) -> String { - format!("ws://localhost:{}/websocket", self.rpc_port) - } - - /// Returns the full URL for the GRPC address. - pub fn grpc_address(&self) -> String { - format!("http://localhost:{}", self.grpc_port) - } - - /** - Returns the full URL for the RPC address to listen to when starting - the full node. - - This is somehow different from [`rpc_address`](ChainDriver::rpc_address) - as it requires the `"tcp://"` scheme. - */ - pub fn rpc_listen_address(&self) -> String { - format!("tcp://localhost:{}", self.rpc_port) - } - - /** - Returns the full URL for the GRPC address to listen to when starting - the full node. - - This is somehow different from [`grpc_address`](ChainDriver::grpc_address) - as it requires no scheme to be specified. - */ - pub fn grpc_listen_address(&self) -> String { - format!("localhost:{}", self.grpc_port) - } - - /** - Execute the gaiad command with the given command line arguments, and - returns the STDOUT result as String. - - This is not the most efficient way of interacting with the CLI, but - is sufficient for testing purposes of interacting with the `gaiad` - commmand. - - The function also output debug logs that show what command is being - executed, so that users can manually re-run the commands by - copying from the logs. - */ - pub fn exec(&self, args: &[&str]) -> Result { - simple_exec(self.chain_id.as_str(), &self.command_path, args) - } - - /** - Initialized the chain data stores. - - This is used by - [`bootstrap_single_node`](crate::bootstrap::single::bootstrap_single_node). - */ - pub fn initialize(&self) -> Result<(), Error> { - self.exec(&[ - "--home", - &self.home_path, - "--chain-id", - self.chain_id.as_str(), - "init", - self.chain_id.as_str(), - ])?; - - Ok(()) - } - - /** - Modify the Gaia genesis file. - */ - pub fn update_genesis_file( - &self, - file: &str, - cont: impl FnOnce(&mut serde_json::Value) -> Result<(), Error>, - ) -> Result<(), Error> { - let config1 = self.read_file(&format!("config/{}", file))?; - - let mut config2 = serde_json::from_str(&config1).map_err(handle_generic_error)?; - - cont(&mut config2)?; - - let config3 = serde_json::to_string_pretty(&config2).map_err(handle_generic_error)?; - - self.write_file("config/genesis.json", &config3)?; - - Ok(()) - } - - /** - Write the string content to a file path relative to the chain home - directory. - - This is not efficient but is sufficient for testing purposes. - */ - pub fn write_file(&self, file_path: &str, content: &str) -> Result<(), Error> { - let full_path = PathBuf::from(&self.home_path).join(file_path); - let full_path_str = format!("{}", full_path.display()); - fs::write(full_path, content)?; - debug!("created new file {:?}", full_path_str); - Ok(()) - } - - /** - Read the content at a file path relative to the chain home - directory, and return the result as a string. - - This is not efficient but is sufficient for testing purposes. - */ - pub fn read_file(&self, file_path: &str) -> Result { - let full_path = PathBuf::from(&self.home_path).join(file_path); - let res = fs::read_to_string(full_path)?; - Ok(res) - } - - /** - Add a wallet with the given ID to the full node's keyring. - */ - pub fn add_wallet(&self, wallet_id: &str) -> Result { - let output = self.exec(&[ - "--home", - self.home_path.as_str(), - "keys", - "add", - wallet_id, - "--keyring-backend", - "test", - "--output", - "json", - ])?; - - // gaia6 somehow displays result in stderr instead of stdout - let seed_content = if output.stdout.is_empty() { - output.stderr - } else { - output.stdout - }; - - let json_val: json::Value = json::from_str(&seed_content).map_err(handle_generic_error)?; - - let wallet_address = json_val - .get("address") - .ok_or_else(|| eyre!("expect address string field to be present in json result"))? - .as_str() - .ok_or_else(|| eyre!("expect address string field to be present in json result"))? - .to_string(); - - let seed_path = format!("{}-seed.json", wallet_id); - self.write_file(&seed_path, &seed_content)?; - - let hd_path = HDPath::from_str(COSMOS_HD_PATH) - .map_err(|e| eyre!("failed to create HDPath: {:?}", e))?; - - let key_file: KeyFile = json::from_str(&seed_content).map_err(handle_generic_error)?; - - let key = KeyEntry::from_key_file(key_file, &hd_path).map_err(handle_generic_error)?; - - Ok(Wallet::new(wallet_id.to_string(), wallet_address, key)) - } - - /** - Add a wallet address to the genesis account list for an uninitialized - full node. - */ - pub fn add_genesis_account( - &self, - wallet: &WalletAddress, - amounts: &[(&Denom, u64)], - ) -> Result<(), Error> { - let amounts_str = itertools::join( - amounts - .iter() - .map(|(denom, amount)| format!("{}{}", amount, denom)), - ",", - ); - - self.exec(&[ - "--home", - &self.home_path, - "add-genesis-account", - &wallet.0, - &amounts_str, - ])?; - - Ok(()) - } - - /** - Add a wallet ID with the given stake amount to be the genesis validator - for an uninitialized chain. - */ - pub fn add_genesis_validator( - &self, - wallet_id: &WalletId, - denom: &Denom, - amount: u64, - ) -> Result<(), Error> { - let amount_str = format!("{}{}", amount, denom); - - self.exec(&[ - "--home", - &self.home_path, - "gentx", - &wallet_id.0, - "--keyring-backend", - "test", - "--chain-id", - self.chain_id.as_str(), - &amount_str, - ])?; - - Ok(()) - } - - /** - Call `gaiad collect-gentxs` to generate the genesis transactions. - */ - pub fn collect_gen_txs(&self) -> Result<(), Error> { - self.exec(&["--home", &self.home_path, "collect-gentxs"])?; - - Ok(()) - } - - /** - Modify the Gaia chain config which is saved in toml format. - */ - pub fn update_chain_config( - &self, - file: &str, - cont: impl FnOnce(&mut toml::Value) -> Result<(), Error>, - ) -> Result<(), Error> { - let config_path = format!("config/{}", file); - - let config1 = self.read_file(&config_path)?; - - let mut config2 = toml::from_str(&config1).map_err(handle_generic_error)?; - - cont(&mut config2)?; - - let config3 = toml::to_string_pretty(&config2).map_err(handle_generic_error)?; - - self.write_file(&config_path, &config3)?; - - Ok(()) - } - - /** - Start a full node by running in the background `gaiad start`. - - Returns a [`ChildProcess`] that stops the full node process when the - value is dropped. - */ - pub fn start(&self) -> Result { - let base_args = [ - "--home", - &self.home_path, - "start", - "--pruning", - "nothing", - "--grpc.address", - &self.grpc_listen_address(), - "--rpc.laddr", - &self.rpc_listen_address(), - ]; - - let args: Vec<&str> = base_args.to_vec(); - - let mut child = Command::new(&self.command_path) - .args(&args) - .stdin(Stdio::null()) - .stdout(Stdio::piped()) - .stderr(Stdio::piped()) - .spawn()?; - - let stdout = child - .stdout - .take() - .ok_or_else(|| eyre!("expected stdout to be present in child process"))?; - - let stderr = child - .stderr - .take() - .ok_or_else(|| eyre!("expected stderr to be present in child process"))?; - - pipe_to_file(stdout, &format!("{}/stdout.log", self.home_path))?; - pipe_to_file(stderr, &format!("{}/stderr.log", self.home_path))?; - - Ok(ChildProcess::new(child)) - } - - /** - Query for the balances for a given wallet address and denomination - */ - pub fn query_balance(&self, wallet_id: &WalletAddress, denom: &Denom) -> Result { - let res = self - .exec(&[ - "--node", - &self.rpc_listen_address(), - "query", - "bank", - "balances", - &wallet_id.0, - "--denom", - denom.as_str(), - "--output", - "json", - ])? - .stdout; - - let amount_str = json::from_str::(&res) - .map_err(handle_generic_error)? - .get("amount") - .ok_or_else(|| eyre!("expected amount field"))? - .as_str() - .ok_or_else(|| eyre!("expected string field"))? - .to_string(); - - let amount = u64::from_str(&amount_str).map_err(handle_generic_error)?; - - Ok(amount) - } - - pub fn send_tx(&self, wallet: &Wallet, messages: Vec) -> Result<(), Error> { - self.runtime - .block_on(simple_send_tx(&self.tx_config, &wallet.key, messages)) - } - - /** - Assert that a wallet should eventually have the expected amount in the - given denomination. - */ - pub fn assert_eventual_wallet_amount( - &self, - wallet: &WalletAddress, - target_amount: u64, - denom: &Denom, - ) -> Result<(), Error> { - assert_eventually_succeed( - &format!("wallet reach {} amount {} {}", wallet, target_amount, denom), - WAIT_WALLET_AMOUNT_ATTEMPTS, - Duration::from_secs(1), - || { - let amount = self.query_balance(wallet, denom)?; - - if amount == target_amount { - Ok(()) - } else { - Err(Error::generic(eyre!( - "current balance of account {} with amount {} does not match the target amount {}", - wallet, - amount, - target_amount - ))) - } - }, - )?; - - Ok(()) - } -} diff --git a/tools/test-framework/src/chain/driver/interchain.rs b/tools/test-framework/src/chain/driver/interchain.rs deleted file mode 100644 index 80480e36d4..0000000000 --- a/tools/test-framework/src/chain/driver/interchain.rs +++ /dev/null @@ -1,140 +0,0 @@ -use eyre::eyre; -use serde::Serialize; -use serde_json as json; - -use ibc::core::ics24_host::identifier::ConnectionId; - -use crate::error::{handle_generic_error, Error}; -use crate::prelude::WalletAddress; - -use super::ChainDriver; - -/// Register a new interchain account controlled by the given account -/// over the given connection. -pub fn register_interchain_account( - driver: &ChainDriver, - from: &WalletAddress, - connection_id: &ConnectionId, -) -> Result<(), Error> { - let args = &[ - "--home", - &driver.home_path, - "--node", - &driver.rpc_listen_address(), - "--output", - "json", - "tx", - "intertx", - "register", - "--from", - &from.0, - "--connection-id", - connection_id.as_str(), - "--chain-id", - driver.chain_id.as_str(), - "--keyring-backend", - "test", - "-y", - ]; - - let res = driver.exec(args)?.stdout; - check_result_code(&res)?; - - Ok(()) -} - -/// Query the address of the interchain account -/// corresponding to the given controller account. -pub fn query_interchain_account( - driver: &ChainDriver, - account: &WalletAddress, - connection_id: &ConnectionId, -) -> Result { - let args = &[ - "--home", - &driver.home_path, - "--node", - &driver.rpc_listen_address(), - "--output", - "json", - "query", - "intertx", - "interchainaccounts", - connection_id.as_str(), - &account.0, - ]; - - let res = driver.exec(args)?.stdout; - let json_res = json::from_str::(&res).map_err(handle_generic_error)?; - - let address = json_res - .get("interchain_account_address") - .ok_or_else(|| eyre!("expected `interchain_account_address` field"))? - .as_str() - .ok_or_else(|| eyre!("expected string field"))?; - - Ok(WalletAddress(address.to_string())) -} - -/// Submit a msg from a controller account over an ICA channel -/// using the given connection. -pub fn interchain_submit( - driver: &ChainDriver, - from: &WalletAddress, - connection_id: &ConnectionId, - msg: &T, -) -> Result<(), Error> { - let msg_json = serde_json::to_string_pretty(msg).unwrap(); - println!("{}", msg_json); - - let args = &[ - "--home", - &driver.home_path, - "--node", - &driver.rpc_listen_address(), - "--output", - "json", - "tx", - "intertx", - "submit", - &msg_json, - "--connection-id", - connection_id.as_str(), - "--from", - &from.0, - "--chain-id", - driver.chain_id.as_str(), - "--keyring-backend", - "test", - "-y", - ]; - - let res = driver.exec(args)?.stdout; - check_result_code(&res)?; - - Ok(()) -} - -/// Check that a command succeeded, by ensuring that the JSON emitted -/// contains a `code` integer field set to 0. -fn check_result_code(res: &str) -> Result<(), Error> { - let json_res = json::from_str::(res).map_err(handle_generic_error)?; - - let code = json_res - .get("code") - .ok_or_else(|| eyre!("expected `code` field"))? - .as_i64() - .ok_or_else(|| eyre!("expected integer field"))?; - - if code == 0 { - Ok(()) - } else { - let raw_log = json_res - .get("raw_log") - .ok_or_else(|| eyre!("expected `raw_log` field"))? - .as_str() - .ok_or_else(|| eyre!("expected string field"))?; - - Err(Error::generic(eyre!("{}", raw_log))) - } -} diff --git a/tools/test-framework/src/chain/driver/query_txs.rs b/tools/test-framework/src/chain/driver/query_txs.rs deleted file mode 100644 index d824e9bc9a..0000000000 --- a/tools/test-framework/src/chain/driver/query_txs.rs +++ /dev/null @@ -1,58 +0,0 @@ -/*! - Methods for querying transactions on a chain. -*/ - -use serde_json as json; -use serde_yaml as yaml; - -use crate::error::{handle_generic_error, Error}; -use crate::types::wallet::WalletAddress; - -use super::ChainDriver; - -/** - Query for the transactions related to a wallet on `Chain` - receiving token transfer from others. -*/ -pub fn query_recipient_transactions( - driver: &ChainDriver, - recipient_address: &WalletAddress, -) -> Result { - let res = driver - .exec(&[ - "--node", - &driver.rpc_listen_address(), - "query", - "txs", - "--events", - &format!("transfer.recipient={}", recipient_address), - ])? - .stdout; - - tracing::debug!("parsing tx result: {}", res); - - match json::from_str(&res) { - Ok(res) => Ok(res), - _ => { - let value: yaml::Value = yaml::from_str(&res).map_err(handle_generic_error)?; - Ok(yaml_to_json_value(value)?) - } - } -} - -// Hack to convert yaml::Value to json::Value. Unfortunately there is -// no builtin conversion provided even though both Value types are -// essentially the same. We just convert the two types to and from -// strings as a shortcut. -// -// TODO: properly implement a common trait that is implemented by -// dynamic types like json::Value, yaml::Value, and toml::Value. -// That way we can write generic functions that work with any of -// the dynamic value types for testing purposes. -fn yaml_to_json_value(value: yaml::Value) -> Result { - let json_str = json::to_string(&value).map_err(handle_generic_error)?; - - let parsed = json::from_str(&json_str).map_err(handle_generic_error)?; - - Ok(parsed) -} diff --git a/tools/test-framework/src/chain/driver/transfer.rs b/tools/test-framework/src/chain/driver/transfer.rs deleted file mode 100644 index 0f6f5fa6c5..0000000000 --- a/tools/test-framework/src/chain/driver/transfer.rs +++ /dev/null @@ -1,37 +0,0 @@ -/*! - Methods for performing IBC token transfer on a chain. -*/ - -use crate::error::Error; -use crate::ibc::denom::Denom; -use crate::types::wallet::{Wallet, WalletAddress}; - -use super::ChainDriver; - -pub fn local_transfer_token( - driver: &ChainDriver, - sender: &Wallet, - recipient: &WalletAddress, - amount: u64, - denom: &Denom, -) -> Result<(), Error> { - driver.exec(&[ - "--node", - &driver.rpc_listen_address(), - "tx", - "bank", - "send", - &sender.address.0, - &recipient.0, - &format!("{}{}", amount, denom), - "--chain-id", - driver.chain_id.as_str(), - "--home", - &driver.home_path, - "--keyring-backend", - "test", - "--yes", - ])?; - - Ok(()) -} diff --git a/tools/test-framework/src/chain/exec.rs b/tools/test-framework/src/chain/exec.rs deleted file mode 100644 index 44e01cf046..0000000000 --- a/tools/test-framework/src/chain/exec.rs +++ /dev/null @@ -1,51 +0,0 @@ -use eyre::eyre; -use std::process::Command; -use std::str; -use tracing::{debug, trace}; - -use crate::error::{handle_exec_error, handle_generic_error, Error}; - -pub struct ExecOutput { - pub stdout: String, - pub stderr: String, -} - -pub fn simple_exec(desc: &str, command_path: &str, args: &[&str]) -> Result { - debug!( - "Executing command for {}: {} {}", - desc, - command_path, - itertools::join(args, " ") - ); - - let output = Command::new(&command_path) - .args(args) - .output() - .map_err(handle_exec_error(command_path))?; - - if output.status.success() { - let stdout = str::from_utf8(&output.stdout) - .map_err(handle_generic_error)? - .to_string(); - - let stderr = str::from_utf8(&output.stderr) - .map_err(handle_generic_error)? - .to_string(); - - trace!( - "command executed successfully with stdout: {}, stderr: {}", - stdout, - stderr - ); - - Ok(ExecOutput { stdout, stderr }) - } else { - let message = str::from_utf8(&output.stderr).map_err(handle_generic_error)?; - - Err(Error::generic(eyre!( - "command exited with error status {:?} and message: {}", - output.status.code(), - message - ))) - } -} diff --git a/tools/test-framework/src/chain/mod.rs b/tools/test-framework/src/chain/mod.rs deleted file mode 100644 index 7180f61a41..0000000000 --- a/tools/test-framework/src/chain/mod.rs +++ /dev/null @@ -1,22 +0,0 @@ -/*! - Constructs for spawning and managing full nodes, e.g. the - Gaia chains. - - Note that this is different from the "chains" being referred on - the relayer side in [`ibc_relayer`]. In testing we also need - to refer to the actual chains running rather than the chain client that - communicate with the chain. - - There is not yet good terminology to differentiate the two sides. - For now we will refer to the running chains as full nodes or - chain servers when qualification is needed, while keeping the original - chain terminology in the relayer unchanged to avoid having to - rename existing constructs in the relayer code. -*/ - -pub mod builder; -pub mod config; -pub mod driver; -pub mod exec; -pub mod tagged; -pub mod version; diff --git a/tools/test-framework/src/chain/tagged.rs b/tools/test-framework/src/chain/tagged.rs deleted file mode 100644 index da85694af5..0000000000 --- a/tools/test-framework/src/chain/tagged.rs +++ /dev/null @@ -1,239 +0,0 @@ -/*! - Methods for tagged version of the chain driver. -*/ - -use ibc_proto::google::protobuf::Any; -use ibc_relayer::chain::cosmos::types::config::TxConfig; -use serde::Serialize; -use serde_json as json; - -use crate::chain::driver::interchain::{ - interchain_submit, query_interchain_account, register_interchain_account, -}; -use crate::chain::driver::query_txs::query_recipient_transactions; -use crate::chain::driver::transfer::local_transfer_token; -use crate::chain::driver::ChainDriver; -use crate::error::Error; -use crate::ibc::denom::Denom; -use crate::prelude::TaggedConnectionIdRef; -use crate::relayer::transfer::ibc_token_transfer; -use crate::types::id::{TaggedChainIdRef, TaggedChannelIdRef, TaggedPortIdRef}; -use crate::types::tagged::*; -use crate::types::wallet::{Wallet, WalletAddress}; - -/** - A [`ChainDriver`] may be tagged with a `Chain` tag in the form - [`MonoTagged`]. - - It would implement the [`TaggedChainDriverExt`] trait to provide tagged - version of the chain methods. - - The tagged chain driver methods help ensure that the `ChainDriver` - methods are used with the values associated to the correct chain. -*/ -pub trait TaggedChainDriverExt { - fn chain_id(&self) -> TaggedChainIdRef; - - fn tx_config(&self) -> MonoTagged; - - fn send_tx(&self, wallet: &MonoTagged, messages: Vec) - -> Result<(), Error>; - - /** - Tagged version of [`ChainDriver::query_balance`]. - - Query for the balance of a wallet that belongs to `Chain` - in the denomination that belongs to `Chain`. - */ - fn query_balance( - &self, - wallet_id: &MonoTagged, - denom: &MonoTagged, - ) -> Result; - - /** - Tagged version of [`ChainDriver::assert_eventual_wallet_amount`]. - - Assert that a wallet belongs to `Chain` would reach the target - amount in the denomination that belongs to `Chain`. - */ - fn assert_eventual_wallet_amount( - &self, - user: &MonoTagged, - target_amount: u64, - denom: &MonoTagged, - ) -> Result<(), Error>; - - /** - Submits an IBC token transfer transaction to `Chain` to any other - `Counterparty` chain. - - The following parameters are accepted: - - - A `PortId` on `Chain` that corresponds to a channel connected to - `Counterparty`. - - - A `ChannelId` on `Chain` that corresponds to a channel connected to - `Counterparty`. - - - The [`Wallet`] of the sender on `Chain`. - - - The [`WalletAddress`] address of the recipient on `Counterparty`. - - - The denomination of the amount on `Chain`. - - - The transfer amount. - */ - fn ibc_transfer_token( - &self, - port_id: &TaggedPortIdRef, - channel_id: &TaggedChannelIdRef, - sender: &MonoTagged, - recipient: &MonoTagged, - denom: &MonoTagged, - amount: u64, - ) -> Result<(), Error>; - - fn local_transfer_token( - &self, - sender: &MonoTagged, - recipient: &MonoTagged, - amount: u64, - denom: &MonoTagged, - ) -> Result<(), Error>; - - /** - Taggged version of [`query_recipient_transactions`]. - - Query for the transactions related to a wallet on `Chain` - receiving token transfer from others. - */ - fn query_recipient_transactions( - &self, - recipient_address: &MonoTagged, - ) -> Result; - - fn register_interchain_account( - &self, - from: &MonoTagged, - connection_id: &TaggedConnectionIdRef, - ) -> Result<(), Error>; - - fn query_interchain_account( - &self, - from: &MonoTagged, - connection_id: &TaggedConnectionIdRef, - ) -> Result, Error>; - - fn interchain_submit( - &self, - from: &MonoTagged, - connection_id: &TaggedConnectionIdRef, - msg: &T, - ) -> Result<(), Error>; -} - -impl<'a, Chain: Send> TaggedChainDriverExt for MonoTagged { - fn chain_id(&self) -> TaggedChainIdRef { - self.map_ref(|val| &val.chain_id) - } - - fn tx_config(&self) -> MonoTagged { - self.map_ref(|val| &val.tx_config) - } - - fn send_tx( - &self, - wallet: &MonoTagged, - messages: Vec, - ) -> Result<(), Error> { - self.value().send_tx(wallet.value(), messages) - } - - fn query_balance( - &self, - wallet_id: &MonoTagged, - denom: &MonoTagged, - ) -> Result { - self.value().query_balance(wallet_id.value(), denom.value()) - } - - fn assert_eventual_wallet_amount( - &self, - user: &MonoTagged, - target_amount: u64, - denom: &MonoTagged, - ) -> Result<(), Error> { - self.value() - .assert_eventual_wallet_amount(user.value(), target_amount, denom.value()) - } - - fn ibc_transfer_token( - &self, - port_id: &TaggedPortIdRef, - channel_id: &TaggedChannelIdRef, - sender: &MonoTagged, - recipient: &MonoTagged, - denom: &MonoTagged, - amount: u64, - ) -> Result<(), Error> { - self.value().runtime.block_on(ibc_token_transfer( - &self.tx_config(), - port_id, - channel_id, - sender, - recipient, - denom, - amount, - )) - } - - fn local_transfer_token( - &self, - sender: &MonoTagged, - recipient: &MonoTagged, - amount: u64, - denom: &MonoTagged, - ) -> Result<(), Error> { - local_transfer_token( - self.value(), - sender.value(), - recipient.value(), - amount, - denom.value(), - ) - } - - fn query_recipient_transactions( - &self, - recipient_address: &MonoTagged, - ) -> Result { - query_recipient_transactions(self.value(), recipient_address.value()) - } - - fn register_interchain_account( - &self, - from: &MonoTagged, - connection_id: &TaggedConnectionIdRef, - ) -> Result<(), Error> { - register_interchain_account(self.value(), from.value(), connection_id.value()) - } - - fn query_interchain_account( - &self, - from: &MonoTagged, - connection_id: &TaggedConnectionIdRef, - ) -> Result, Error> { - query_interchain_account(self.value(), from.value(), connection_id.value()) - .map(MonoTagged::new) - } - - fn interchain_submit( - &self, - from: &MonoTagged, - connection_id: &TaggedConnectionIdRef, - msg: &T, - ) -> Result<(), Error> { - interchain_submit(self.value(), from.value(), connection_id.value(), msg) - } -} diff --git a/tools/test-framework/src/chain/version.rs b/tools/test-framework/src/chain/version.rs deleted file mode 100644 index db27cd0948..0000000000 --- a/tools/test-framework/src/chain/version.rs +++ /dev/null @@ -1,29 +0,0 @@ -use semver::Version; -use tracing::debug; - -use crate::chain::exec::simple_exec; -use crate::error::{handle_generic_error, Error}; - -pub fn get_chain_command_version(command: &str) -> Result, Error> { - let output = simple_exec("version-command", command, &["version"])?; - - // gaia6 somehow outputs version string result in STDERR - let raw_version_str = if output.stdout.is_empty() { - output.stderr - } else { - output.stdout - }; - - let version_str = match raw_version_str.trim().strip_prefix('v') { - Some(str) => str.trim(), - None => raw_version_str.trim(), - }; - - debug!("parsing version string: {}", version_str); - - let version = Version::parse(version_str) - .map_err(handle_generic_error) - .ok(); - - Ok(version) -} diff --git a/tools/test-framework/src/error.rs b/tools/test-framework/src/error.rs deleted file mode 100644 index 388f80b1dc..0000000000 --- a/tools/test-framework/src/error.rs +++ /dev/null @@ -1,129 +0,0 @@ -//! Error type used for the tests. - -use core::convert::{From, Into}; -use eyre::Report; -use flex_error::{define_error, TraceError}; -use ibc_relayer::channel::error::ChannelError; -use ibc_relayer::connection::ConnectionError; -use ibc_relayer::error::Error as RelayerError; -use ibc_relayer::link::error::LinkError; -use ibc_relayer::supervisor::error::Error as SupervisorError; -use ibc_relayer::transfer::TransferError; -use std::io::{Error as IoError, ErrorKind as IoErrorKind}; - -define_error! { - Error { - Generic - [ TraceError ] - | _ | { "generic error" }, - - Assertion - { message: String } - | e | { format_args!("assertion failure: {}", e.message) }, - - Io - [ TraceError ] - | _ | { "io error"}, - - CommandNotFound - { command: String } - [ TraceError ] - | e | { format_args!("failed to execute command: {}. make sure it is available in $PATH", e.command) }, - - Relayer - [ RelayerError ] - | _ | { "relayer error"}, - - Supervisor - [ SupervisorError ] - | _ | { "supervisor error"}, - - Channel - [ ChannelError ] - | _ | { "channel error"}, - - Connection - [ ConnectionError ] - | _ | { "connection error"}, - - Transfer - [ TransferError ] - | _ | { "transfer error"}, - - Link - [ LinkError ] - | _ | { "link error" }, - - Retry - { - task_name: String, - attempts: u16, - } - | e | { - format_args!( - "Expected task to eventually succeeed, but failed after {} attempts: {}", - e.attempts, - e.task_name - ) - }, - } -} - -pub fn handle_generic_error(e: impl Into) -> Error { - Error::generic(e.into()) -} - -pub fn handle_exec_error(command: &str) -> impl FnOnce(IoError) -> Error + '_ { - |e| match e.kind() { - IoErrorKind::NotFound => Error::command_not_found(command.to_string(), e), - _ => Error::io(e), - } -} - -impl From for Error { - fn from(e: Report) -> Self { - Error::generic(e) - } -} - -impl From for Error { - fn from(e: IoError) -> Self { - Error::io(e) - } -} - -impl From for Error { - fn from(e: RelayerError) -> Self { - Error::relayer(e) - } -} - -impl From for Error { - fn from(e: SupervisorError) -> Self { - Error::supervisor(e) - } -} - -impl From for Error { - fn from(e: ChannelError) -> Self { - Error::channel(e) - } -} - -impl From for Error { - fn from(e: ConnectionError) -> Self { - Error::connection(e) - } -} - -impl From for Error { - fn from(e: TransferError) -> Self { - Error::transfer(e) - } -} - -impl From for Error { - fn from(e: LinkError) -> Self { - Error::link(e) - } -} diff --git a/tools/test-framework/src/framework/base.rs b/tools/test-framework/src/framework/base.rs deleted file mode 100644 index 0ba59ea9fb..0000000000 --- a/tools/test-framework/src/framework/base.rs +++ /dev/null @@ -1,108 +0,0 @@ -/*! - Base infrastructure for the test framework. Includes basic setup for - initializing the logger and loading the test configuration. -*/ - -use alloc::sync::Arc; -use tokio::runtime::Runtime; -use tracing::info; - -use crate::bootstrap::init::init_test; -use crate::chain::builder::ChainBuilder; -use crate::error::Error; -use crate::types::config::TestConfig; - -/** - Runs a primitive test case implementing [`PrimitiveTest`]. -*/ -pub fn run_test(test: &Test) -> Result<(), Error> { - test.run() -} - -/** - Runs a basic test case implementing [`BasicTest`]. -*/ -pub fn run_basic_test(test: &Test) -> Result<(), Error> -where - Test: BasicTest, - Test: HasOverrides, - Overrides: TestConfigOverride, -{ - run_test(&RunBasicTest { test }) -} - -/** - Used for test case wrappers to indicate that the inner test case - implements override traits for overriding certain behavior of the test. - - Test writers do not need to be aware of this trait, as this is - automatically handled by - [TestOverrides](crate::framework::overrides::TestOverrides). -*/ -pub trait HasOverrides { - /** - The inner type that implements the override traits. - */ - type Overrides; - - /** - Get the reference to the inner override type. - */ - fn get_overrides(&self) -> &Self::Overrides; -} - -/** - A primitive test case provides no additional logic. -*/ -pub trait PrimitiveTest { - /// Test runner - fn run(&self) -> Result<(), Error>; -} - -/** - A basic test has the minimal test setup that is essential for almost all - tests. - - The test runner is given a [`TestConfig`] and [`ChainBuilder`], which - provides the essential customization for how the tests should be run. -*/ -pub trait BasicTest { - /// Test runner - fn run(&self, config: &TestConfig, builder: &ChainBuilder) -> Result<(), Error>; -} - -pub trait TestConfigOverride { - fn modify_test_config(&self, config: &mut TestConfig); -} - -/** - A wrapper type that lifts a test case that implements [`BasicTest`] - into a test case that implements [`PrimitiveTest`]. -*/ -pub struct RunBasicTest<'a, Test> { - /// Inner test - pub test: &'a Test, -} - -impl<'a, Test, Overrides> PrimitiveTest for RunBasicTest<'a, Test> -where - Test: BasicTest, - Test: HasOverrides, - Overrides: TestConfigOverride, -{ - fn run(&self) -> Result<(), Error> { - let mut config = init_test()?; - - let runtime = Arc::new(Runtime::new()?); - - self.test.get_overrides().modify_test_config(&mut config); - - info!("starting test with test config: {:?}", config); - - let builder = ChainBuilder::new_with_config(&config, runtime); - - self.test.run(&config, &builder)?; - - Ok(()) - } -} diff --git a/tools/test-framework/src/framework/binary/chain.rs b/tools/test-framework/src/framework/binary/chain.rs deleted file mode 100644 index fb7d0649d9..0000000000 --- a/tools/test-framework/src/framework/binary/chain.rs +++ /dev/null @@ -1,310 +0,0 @@ -/*! - Constructs for running test cases with two chains, - together with the relayer setup with chain handles and foreign clients. -*/ - -use ibc_relayer::chain::handle::ChainHandle; -use ibc_relayer::config::Config; -use ibc_relayer::foreign_client::CreateOptions as ClientOptions; -use tracing::info; - -use crate::bootstrap::binary::chain::{bootstrap_chains_with_full_nodes, BootstrapClientOptions}; -use crate::error::Error; -use crate::framework::base::{HasOverrides, TestConfigOverride}; -use crate::framework::binary::node::{ - run_binary_node_test, run_single_node_test, BinaryNodeTest, NodeConfigOverride, - NodeGenesisOverride, -}; -use crate::framework::supervisor::{RunWithSupervisor, SupervisorOverride}; -use crate::relayer::driver::RelayerDriver; -use crate::types::binary::chains::{ConnectedChains, DropChainHandle}; -use crate::types::config::TestConfig; -use crate::types::env::write_env; -use crate::types::single::node::FullNode; -use crate::util::suspend::hang_on_error; - -/** - Runs a test case that implements [`BinaryChainTest`], with - the test case being executed twice, with the second time having the - position of the two chains flipped. -*/ -pub fn run_two_way_binary_chain_test(test: &Test) -> Result<(), Error> -where - Test: BinaryChainTest, - Test: HasOverrides, - Overrides: NodeConfigOverride - + NodeGenesisOverride - + RelayerConfigOverride - + ClientOptionsOverride - + SupervisorOverride - + TestConfigOverride, -{ - run_binary_chain_test(&RunTwoWayBinaryChainTest::new(test)) -} - -/** - Runs a test case that implements [`BinaryChainTest`]. -*/ -pub fn run_binary_chain_test(test: &Test) -> Result<(), Error> -where - Test: BinaryChainTest, - Test: HasOverrides, - Overrides: NodeConfigOverride - + NodeGenesisOverride - + RelayerConfigOverride - + ClientOptionsOverride - + SupervisorOverride - + TestConfigOverride, -{ - run_binary_node_test(&RunBinaryChainTest::new(&RunWithSupervisor::new(test))) -} - -/** - Runs a test case that implements [`BinaryChainTest`], with - the test case being executed with a single chain that is connected - to itself. -*/ -pub fn run_self_connected_binary_chain_test(test: &Test) -> Result<(), Error> -where - Test: BinaryChainTest, - Test: HasOverrides, - Overrides: NodeConfigOverride - + NodeGenesisOverride - + RelayerConfigOverride - + ClientOptionsOverride - + TestConfigOverride, -{ - run_single_node_test(&RunBinaryChainTest::new(test)) -} - -/** - This trait is implemented for test cases that need to have two - full nodes running together with the relayer setup with chain - handles and foreign clients. - - Test writers can use this to implement test cases that only - need the chains and relayers setup without the connection or - channel handshake. -*/ -pub trait BinaryChainTest { - /// Test runner - fn run( - &self, - config: &TestConfig, - relayer: RelayerDriver, - chains: ConnectedChains, - ) -> Result<(), Error>; -} - -/** - An internal trait that can be implemented by test cases to override the - relayer config before the relayer gets initialized. - - This is called by [`RunBinaryChainTest`] after the - full nodes are running and before the relayer is initialized. - - Test writers should implement - [`TestOverrides`](crate::framework::overrides::TestOverrides) - for their test cases instead of implementing this trait directly. -*/ -pub trait RelayerConfigOverride { - /// Modify the relayer config - fn modify_relayer_config(&self, config: &mut Config); -} - -/// An internal trait that can be implemented by test cases to override the -/// settings for the foreign clients bootstrapped for the test. -/// -/// The default implementation returns the settings for a client -/// connecting two Cosmos chains with no customizations. -/// Test writers should implement [`TestOverrides`] -/// for their test cases instead of implementing this trait directly. -/// -/// [`TestOverrides`]: crate::framework::overrides::TestOverrides -/// -pub trait ClientOptionsOverride { - fn client_options_a_to_b(&self) -> ClientOptions { - Default::default() - } - - fn client_options_b_to_a(&self) -> ClientOptions { - Default::default() - } -} - -/** - A wrapper type that lifts a test case that implements [`BinaryChainTest`] - into a test case the implements [`BinaryNodeTest`]. -*/ -pub struct RunBinaryChainTest<'a, Test> { - /// Inner test - pub test: &'a Test, -} - -/** - A wrapper type that lifts a test case that implements [`BinaryChainTest`] - into a test case the implements [`BinaryChainTest`]. - - During execution, the underlying [`BinaryChainTest`] is run twice, with - the second time having the position of the two chains flipped. -*/ -pub struct RunTwoWayBinaryChainTest<'a, Test> { - /// Inner test - pub test: &'a Test, -} - -/** - A wrapper type that lifts a test case that implements [`BinaryChainTest`] - into a test case that implements [`BinaryNodeTest`]. - - During execution, the test case is given a [`ConnectedChains`] with a - single underlying chain that is connected to itself. -*/ -pub struct RunSelfConnectedBinaryChainTest<'a, Test> { - /// Inner test - pub test: &'a Test, -} - -impl<'a, Test> RunBinaryChainTest<'a, Test> -where - Test: BinaryChainTest, -{ - /// Create a new [`RunBinaryChainTest`] - pub fn new(test: &'a Test) -> Self { - Self { test } - } -} - -impl<'a, Test> RunTwoWayBinaryChainTest<'a, Test> -where - Test: BinaryChainTest, -{ - /// Create a new [`RunTwoWayBinaryChainTest`] - pub fn new(test: &'a Test) -> Self { - Self { test } - } -} - -impl<'a, Test> RunSelfConnectedBinaryChainTest<'a, Test> -where - Test: BinaryChainTest, -{ - /// Create a new [`RunSelfConnectedBinaryChainTest`] - pub fn new(test: &'a Test) -> Self { - Self { test } - } -} - -impl<'a, Test, Overrides> BinaryNodeTest for RunBinaryChainTest<'a, Test> -where - Test: BinaryChainTest, - Test: HasOverrides, - Overrides: RelayerConfigOverride + ClientOptionsOverride, -{ - fn run(&self, config: &TestConfig, node_a: FullNode, node_b: FullNode) -> Result<(), Error> { - let overrides = self.test.get_overrides(); - - let bootstrap_options = BootstrapClientOptions::default() - .client_options_a_to_b(overrides.client_options_a_to_b()) - .client_options_b_to_a(overrides.client_options_b_to_a()) - .bootstrap_with_random_ids(config.bootstrap_with_random_ids); - - let (relayer, chains) = bootstrap_chains_with_full_nodes( - config, - node_a, - node_b, - bootstrap_options, - |config| { - overrides.modify_relayer_config(config); - }, - )?; - - let env_path = config.chain_store_dir.join("binary-chains.env"); - - write_env(&env_path, &(&relayer, &chains))?; - - info!("written chains environment to {}", env_path.display()); - - let _drop_handle_a = DropChainHandle(chains.handle_a.clone()); - let _drop_handle_b = DropChainHandle(chains.handle_b.clone()); - - self.test.run(config, relayer, chains)?; - - Ok(()) - } -} - -impl<'a, Test: BinaryChainTest> BinaryChainTest for RunTwoWayBinaryChainTest<'a, Test> { - fn run( - &self, - config: &TestConfig, - relayer: RelayerDriver, - chains: ConnectedChains, - ) -> Result<(), Error> { - info!( - "running two-way chain test, from {} to {}", - chains.chain_id_a(), - chains.chain_id_b(), - ); - - self.test.run(config, relayer.clone(), chains.clone())?; - - info!( - "running two-way chain test in the opposite direction, from {} to {}", - chains.chain_id_b(), - chains.chain_id_a(), - ); - - let chains = chains.flip(); - - self.test.run(config, relayer, chains)?; - - Ok(()) - } -} - -impl<'a, Test, Overrides> BinaryChainTest for RunWithSupervisor<'a, Test> -where - Test: BinaryChainTest, - Test: HasOverrides, - Overrides: SupervisorOverride, -{ - fn run( - &self, - config: &TestConfig, - relayer: RelayerDriver, - chains: ConnectedChains, - ) -> Result<(), Error> { - if self.get_overrides().should_spawn_supervisor() { - relayer - .clone() - .with_supervisor(|| self.test.run(config, relayer, chains)) - } else { - hang_on_error(config.hang_on_fail, || { - self.test.run(config, relayer, chains) - }) - } - } -} - -impl<'a, Test, Overrides> HasOverrides for RunBinaryChainTest<'a, Test> -where - Test: HasOverrides, -{ - type Overrides = Overrides; - - fn get_overrides(&self) -> &Self::Overrides { - self.test.get_overrides() - } -} - -impl<'a, Test, Overrides> HasOverrides for RunTwoWayBinaryChainTest<'a, Test> -where - Test: HasOverrides, -{ - type Overrides = Overrides; - - fn get_overrides(&self) -> &Self::Overrides { - self.test.get_overrides() - } -} diff --git a/tools/test-framework/src/framework/binary/channel.rs b/tools/test-framework/src/framework/binary/channel.rs deleted file mode 100644 index e25822c785..0000000000 --- a/tools/test-framework/src/framework/binary/channel.rs +++ /dev/null @@ -1,309 +0,0 @@ -/*! - Constructs for running test cases with two full nodes together with the - relayer setup with chain handles and foreign clients, as well as - connected IBC channels with completed handshakes. -*/ - -use ibc::core::ics04_channel::channel::Order; -use ibc::core::ics04_channel::Version; -use ibc::core::ics24_host::identifier::PortId; -use ibc_relayer::chain::handle::ChainHandle; -use tracing::info; - -use crate::bootstrap::binary::channel::{ - bootstrap_channel_with_connection, BootstrapChannelOptions, -}; -use crate::error::Error; -use crate::framework::base::{HasOverrides, TestConfigOverride}; -use crate::framework::binary::chain::{ - ClientOptionsOverride, RelayerConfigOverride, RunBinaryChainTest, -}; -use crate::framework::binary::connection::{ - BinaryConnectionTest, ConnectionDelayOverride, RunBinaryConnectionTest, -}; -use crate::framework::binary::node::{ - run_binary_node_test, NodeConfigOverride, NodeGenesisOverride, -}; -use crate::framework::supervisor::{RunWithSupervisor, SupervisorOverride}; -use crate::relayer::driver::RelayerDriver; -use crate::types::binary::chains::ConnectedChains; -use crate::types::binary::channel::ConnectedChannel; -use crate::types::binary::connection::ConnectedConnection; -use crate::types::config::TestConfig; -use crate::types::env::write_env; -use crate::types::tagged::*; -use crate::util::suspend::hang_on_error; - -/** - Runs a test case that implements [`BinaryChannelTest`], with - the test case being executed twice, with the second time having the position - of the two chains flipped. -*/ -pub fn run_two_way_binary_channel_test(test: &Test) -> Result<(), Error> -where - Test: BinaryChannelTest, - Test: HasOverrides, - Overrides: TestConfigOverride - + NodeConfigOverride - + NodeGenesisOverride - + RelayerConfigOverride - + ClientOptionsOverride - + SupervisorOverride - + ConnectionDelayOverride - + PortsOverride - + ChannelOrderOverride - + ChannelVersionOverride, -{ - run_binary_channel_test(&RunTwoWayBinaryChannelTest::new(test)) -} - -/** - Runs a test case that implements [`BinaryChannelTest`]. -*/ -pub fn run_binary_channel_test(test: &Test) -> Result<(), Error> -where - Test: BinaryChannelTest, - Test: HasOverrides, - Overrides: TestConfigOverride - + NodeConfigOverride - + NodeGenesisOverride - + RelayerConfigOverride - + ClientOptionsOverride - + SupervisorOverride - + ConnectionDelayOverride - + PortsOverride - + ChannelOrderOverride - + ChannelVersionOverride, -{ - run_binary_node_test(&RunBinaryChainTest::new(&RunBinaryConnectionTest::new( - &RunBinaryChannelTest::new(&RunWithSupervisor::new(test)), - ))) -} - -/** - This trait is implemented for test cases that need to have two - full nodes running together with the relayer setup with chain - handles and foreign clients, together with connected IBC channels - with completed handshakes. -*/ -pub trait BinaryChannelTest { - /// Test runner - fn run( - &self, - config: &TestConfig, - relayer: RelayerDriver, - chains: ConnectedChains, - channels: ConnectedChannel, - ) -> Result<(), Error>; -} - -/** - An internal trait that can be implemented by test cases to override - the port IDs used when creating the channels. - - This is called by [`RunBinaryChannelTest`] before creating - the IBC channels. - - Test writers should implement - [`TestOverrides`](crate::framework::overrides::TestOverrides) - for their test cases instead of implementing this trait directly. -*/ -pub trait PortsOverride { - /** - Return the port ID for chain A. - */ - fn channel_port_a(&self) -> PortId; - - /** - Return the port ID for chain B. - */ - fn channel_port_b(&self) -> PortId; -} - -/** - An internal trait for test cases to override the channel ordering - when creating channels. - - This is called by [`RunBinaryChannelTest`] before creating - the IBC channels. - - Test writers should implement - [`TestOverrides`](crate::framework::overrides::TestOverrides) - for their test cases instead of implementing this trait directly. -*/ -pub trait ChannelOrderOverride { - /** - Return the channel ordering as [`Order`]. - */ - fn channel_order(&self) -> Order; -} - -/** Facility for overriding the channel version */ -pub trait ChannelVersionOverride { - fn channel_version(&self) -> Version; -} - -/** - A wrapper type that lifts a test case that implements [`BinaryChannelTest`] - into a test case the implements [`BinaryConnectionTest`]. -*/ -pub struct RunBinaryChannelTest<'a, Test> { - /// Inner test - pub test: &'a Test, -} - -/** - A wrapper type that lifts a test case that implements [`BinaryChannelTest`] - into a test case the implements [`BinaryChannelTest`]. -*/ -pub struct RunTwoWayBinaryChannelTest<'a, Test> { - /// Inner test - pub test: &'a Test, -} - -impl<'a, Test> RunBinaryChannelTest<'a, Test> -where - Test: BinaryChannelTest, -{ - /// Create a new [`RunBinaryChannelTest`] - pub fn new(test: &'a Test) -> Self { - Self { test } - } -} - -impl<'a, Test> RunTwoWayBinaryChannelTest<'a, Test> -where - Test: BinaryChannelTest, -{ - /// Create a new [`BinaryChannelTest`] - pub fn new(test: &'a Test) -> Self { - Self { test } - } -} - -impl<'a, Test, Overrides> BinaryConnectionTest for RunBinaryChannelTest<'a, Test> -where - Test: BinaryChannelTest, - Test: HasOverrides, - Overrides: PortsOverride + ChannelOrderOverride + ChannelVersionOverride, -{ - fn run( - &self, - config: &TestConfig, - relayer: RelayerDriver, - chains: ConnectedChains, - connection: ConnectedConnection, - ) -> Result<(), Error> { - let overrides = self.test.get_overrides(); - - let port_a = overrides.channel_port_a(); - let port_b = overrides.channel_port_b(); - - let bootstrap_options = BootstrapChannelOptions::default() - .order(overrides.channel_order()) - .version(overrides.channel_version()) - .bootstrap_with_random_ids(config.bootstrap_with_random_ids); - - let channels = bootstrap_channel_with_connection( - &chains.handle_a, - &chains.handle_b, - connection, - &DualTagged::new(port_a).as_ref(), - &DualTagged::new(port_b).as_ref(), - bootstrap_options, - )?; - - let env_path = config.chain_store_dir.join("binary-channels.env"); - - write_env(&env_path, &(&chains, &(&relayer, &channels)))?; - - info!("written channel environment to {}", env_path.display()); - - self.test.run(config, relayer, chains, channels)?; - - Ok(()) - } -} - -impl<'a, Test: BinaryChannelTest> BinaryChannelTest for RunTwoWayBinaryChannelTest<'a, Test> { - fn run( - &self, - config: &TestConfig, - relayer: RelayerDriver, - chains: ConnectedChains, - channels: ConnectedChannel, - ) -> Result<(), Error> { - info!( - "running two-way channel test, from {}/{} to {}/{}", - chains.chain_id_a(), - channels.channel_id_a, - chains.chain_id_b(), - channels.channel_id_b, - ); - - self.test - .run(config, relayer.clone(), chains.clone(), channels.clone())?; - - info!( - "running two-way channel test in the opposite direction, from {}/{} to {}/{}", - chains.chain_id_b(), - channels.channel_id_b, - chains.chain_id_a(), - channels.channel_id_a, - ); - - let chains = chains.flip(); - let channels = channels.flip(); - - self.test.run(config, relayer, chains, channels)?; - - Ok(()) - } -} - -impl<'a, Test, Overrides> BinaryChannelTest for RunWithSupervisor<'a, Test> -where - Test: BinaryChannelTest, - Test: HasOverrides, - Overrides: SupervisorOverride, -{ - fn run( - &self, - config: &TestConfig, - relayer: RelayerDriver, - chains: ConnectedChains, - channels: ConnectedChannel, - ) -> Result<(), Error> { - if self.get_overrides().should_spawn_supervisor() { - relayer - .clone() - .with_supervisor(|| self.test.run(config, relayer, chains, channels)) - } else { - hang_on_error(config.hang_on_fail, || { - self.test.run(config, relayer, chains, channels) - }) - } - } -} - -impl<'a, Test, Overrides> HasOverrides for RunBinaryChannelTest<'a, Test> -where - Test: HasOverrides, -{ - type Overrides = Overrides; - - fn get_overrides(&self) -> &Self::Overrides { - self.test.get_overrides() - } -} - -impl<'a, Test, Overrides> HasOverrides for RunTwoWayBinaryChannelTest<'a, Test> -where - Test: HasOverrides, -{ - type Overrides = Overrides; - - fn get_overrides(&self) -> &Self::Overrides { - self.test.get_overrides() - } -} diff --git a/tools/test-framework/src/framework/binary/connection.rs b/tools/test-framework/src/framework/binary/connection.rs deleted file mode 100644 index 9e96a7665e..0000000000 --- a/tools/test-framework/src/framework/binary/connection.rs +++ /dev/null @@ -1,258 +0,0 @@ -/*! - Constructs for running test cases with two full nodes together with the - relayer setup with chain handles and foreign clients, as well as - connected IBC connections with completed handshakes. -*/ - -use core::time::Duration; -use ibc_relayer::chain::handle::ChainHandle; -use tracing::info; - -use crate::bootstrap::binary::connection::{bootstrap_connection, BootstrapConnectionOptions}; -use crate::error::Error; -use crate::framework::base::HasOverrides; -use crate::framework::base::TestConfigOverride; -use crate::framework::binary::chain::{ - BinaryChainTest, ClientOptionsOverride, RelayerConfigOverride, RunBinaryChainTest, -}; -use crate::framework::binary::node::{ - run_binary_node_test, NodeConfigOverride, NodeGenesisOverride, -}; -use crate::framework::supervisor::{RunWithSupervisor, SupervisorOverride}; -use crate::relayer::driver::RelayerDriver; -use crate::types::binary::chains::ConnectedChains; -use crate::types::binary::connection::ConnectedConnection; -use crate::types::config::TestConfig; -use crate::types::env::write_env; -use crate::util::suspend::hang_on_error; - -/** - Runs a test case that implements [`BinaryConnectionTest`], with - the test case being executed twice, with the second time having the position - of the two chains flipped. -*/ -pub fn run_two_way_binary_connection_test(test: &Test) -> Result<(), Error> -where - Test: BinaryConnectionTest, - Test: HasOverrides, - Overrides: TestConfigOverride - + NodeConfigOverride - + NodeGenesisOverride - + RelayerConfigOverride - + ClientOptionsOverride - + SupervisorOverride - + ConnectionDelayOverride, -{ - run_binary_connection_test(&RunTwoWayBinaryConnectionTest::new(test)) -} - -/** - Runs a test case that implements [`BinaryConnectionTest`]. -*/ -pub fn run_binary_connection_test(test: &Test) -> Result<(), Error> -where - Test: BinaryConnectionTest, - Test: HasOverrides, - Overrides: TestConfigOverride - + NodeConfigOverride - + NodeGenesisOverride - + RelayerConfigOverride - + ClientOptionsOverride - + SupervisorOverride - + ConnectionDelayOverride, -{ - run_binary_node_test(&RunBinaryChainTest::new(&RunBinaryConnectionTest::new( - &RunWithSupervisor::new(test), - ))) -} - -/** - This trait is implemented for test cases that need to have two - full nodes running together with the relayer setup with chain - handles and foreign clients, together with connected IBC connections - with completed handshakes. - - Test writers can use this to implement test cases that only - need the connection setup without the channel handshake. -*/ -pub trait BinaryConnectionTest { - /// Test runner - fn run( - &self, - config: &TestConfig, - relayer: RelayerDriver, - chains: ConnectedChains, - connection: ConnectedConnection, - ) -> Result<(), Error>; -} - -/** - An internal trait that can be implemented by test cases to override - the connection delay parameter when creating connections. - - This is called by [`RunBinaryConnectionTest`] before creating - the IBC connections. - - Test writers should implement - [`TestOverrides`](crate::framework::overrides::TestOverrides) - for their test cases instead of implementing this trait directly. -*/ -pub trait ConnectionDelayOverride { - /** - Return the connection delay as [`Duration`]. - */ - fn connection_delay(&self) -> Duration; -} - -/** - A wrapper type that lifts a test case that implements [`BinaryConnectionTest`] - into a test case the implements [`BinaryChainTest`]. -*/ -pub struct RunBinaryConnectionTest<'a, Test> { - /// Inner test - pub test: &'a Test, -} - -/** - A wrapper type that lifts a test case that implements [`BinaryConnectionTest`] - into a test case the implements [`BinaryConnectionTest`]. -*/ -pub struct RunTwoWayBinaryConnectionTest<'a, Test> { - /// Inner test - pub test: &'a Test, -} - -impl<'a, Test> RunBinaryConnectionTest<'a, Test> -where - Test: BinaryConnectionTest, -{ - /// Create a new [`RunBinaryConnectionTest`] - pub fn new(test: &'a Test) -> Self { - Self { test } - } -} - -impl<'a, Test> RunTwoWayBinaryConnectionTest<'a, Test> -where - Test: BinaryConnectionTest, -{ - /// Create a new [`BinaryConnectionTest`] - pub fn new(test: &'a Test) -> Self { - Self { test } - } -} - -impl<'a, Test, Overrides> BinaryChainTest for RunBinaryConnectionTest<'a, Test> -where - Test: BinaryConnectionTest, - Test: HasOverrides, - Overrides: ConnectionDelayOverride, -{ - fn run( - &self, - config: &TestConfig, - relayer: RelayerDriver, - chains: ConnectedChains, - ) -> Result<(), Error> { - let bootstrap_options = BootstrapConnectionOptions::default() - .connection_delay(self.get_overrides().connection_delay()) - .bootstrap_with_random_ids(config.bootstrap_with_random_ids); - - let connection = bootstrap_connection(&chains.foreign_clients, bootstrap_options)?; - - let env_path = config.chain_store_dir.join("binary-connections.env"); - - write_env(&env_path, &(&chains, &connection))?; - - info!("written connection environment to {}", env_path.display()); - - self.test.run(config, relayer, chains, connection)?; - - Ok(()) - } -} - -impl<'a, Test: BinaryConnectionTest> BinaryConnectionTest - for RunTwoWayBinaryConnectionTest<'a, Test> -{ - fn run( - &self, - config: &TestConfig, - relayer: RelayerDriver, - chains: ConnectedChains, - connection: ConnectedConnection, - ) -> Result<(), Error> { - info!( - "running two-way connection test, from {}/{} to {}/{}", - chains.chain_id_a(), - connection.connection_id_a, - chains.chain_id_b(), - connection.connection_id_b, - ); - - self.test - .run(config, relayer.clone(), chains.clone(), connection.clone())?; - - info!( - "running two-way connection test in the opposite direction, from {}/{} to {}/{}", - chains.chain_id_b(), - connection.connection_id_b, - chains.chain_id_a(), - connection.connection_id_a, - ); - - let chains = chains.flip(); - let connection = connection.flip(); - - self.test.run(config, relayer, chains, connection)?; - - Ok(()) - } -} - -impl<'a, Test, Overrides> BinaryConnectionTest for RunWithSupervisor<'a, Test> -where - Test: BinaryConnectionTest, - Test: HasOverrides, - Overrides: SupervisorOverride, -{ - fn run( - &self, - config: &TestConfig, - relayer: RelayerDriver, - chains: ConnectedChains, - connection: ConnectedConnection, - ) -> Result<(), Error> { - if self.get_overrides().should_spawn_supervisor() { - relayer - .clone() - .with_supervisor(|| self.test.run(config, relayer, chains, connection)) - } else { - hang_on_error(config.hang_on_fail, || { - self.test.run(config, relayer, chains, connection) - }) - } - } -} - -impl<'a, Test, Overrides> HasOverrides for RunBinaryConnectionTest<'a, Test> -where - Test: HasOverrides, -{ - type Overrides = Overrides; - - fn get_overrides(&self) -> &Self::Overrides { - self.test.get_overrides() - } -} - -impl<'a, Test, Overrides> HasOverrides for RunTwoWayBinaryConnectionTest<'a, Test> -where - Test: HasOverrides, -{ - type Overrides = Overrides; - - fn get_overrides(&self) -> &Self::Overrides { - self.test.get_overrides() - } -} diff --git a/tools/test-framework/src/framework/binary/mod.rs b/tools/test-framework/src/framework/binary/mod.rs deleted file mode 100644 index b0b9ecdf16..0000000000 --- a/tools/test-framework/src/framework/binary/mod.rs +++ /dev/null @@ -1,8 +0,0 @@ -/*! - Constructs for test cases that involve interaction between two chains. -*/ - -pub mod chain; -pub mod channel; -pub mod connection; -pub mod node; diff --git a/tools/test-framework/src/framework/binary/node.rs b/tools/test-framework/src/framework/binary/node.rs deleted file mode 100644 index 9a5c0e3b87..0000000000 --- a/tools/test-framework/src/framework/binary/node.rs +++ /dev/null @@ -1,181 +0,0 @@ -/*! - Constructs for running test cases with two full nodes - running without setting up the relayer. -*/ - -use toml; - -use crate::bootstrap::single::bootstrap_single_node; -use crate::chain::builder::ChainBuilder; -use crate::error::Error; -use crate::framework::base::HasOverrides; -use crate::framework::base::{run_basic_test, BasicTest, TestConfigOverride}; -use crate::types::config::TestConfig; -use crate::types::single::node::FullNode; - -/** - Runs a test case that implements [`BinaryNodeTest`]. -*/ -pub fn run_binary_node_test(test: &Test) -> Result<(), Error> -where - Test: BinaryNodeTest, - Test: HasOverrides, - Overrides: NodeConfigOverride + NodeGenesisOverride + TestConfigOverride, -{ - run_basic_test(&RunBinaryNodeTest { test }) -} - -pub fn run_single_node_test(test: &Test) -> Result<(), Error> -where - Test: BinaryNodeTest, - Test: HasOverrides, - Overrides: NodeConfigOverride + NodeGenesisOverride + TestConfigOverride, -{ - run_basic_test(&RunSingleNodeTest { test }) -} - -/** - This trait is implemented for test cases that need to have two full nodes - running without the relayer being setup. - - The test case is given two [`FullNode`] which represents the two running full nodes. - - Test writers can use this to implement more advanced test cases which - require manual setup of the relayer, so that the relayer can be started - and stopped at a suitable time within the test. -*/ -pub trait BinaryNodeTest { - /// Test runner - fn run(&self, config: &TestConfig, node_a: FullNode, node_b: FullNode) -> Result<(), Error>; -} - -/** - An internal trait that can be implemented by test cases to override the - full node config before the chain gets initialized. - - The config is in the dynamic-typed [`toml::Value`] format, as we do not - want to model the full format of the node config in Rust. Test authors - can use the helper methods in [`chain::config`](crate::chain::config) - to modify common config fields. - - This is called by [`RunBinaryNodeTest`] before the full nodes are - initialized and started. - - Test writers should implement - [`TestOverrides`](crate::framework::overrides::TestOverrides) - for their test cases instead of implementing this trait directly. -*/ -pub trait NodeConfigOverride { - /// Modify the full node config - fn modify_node_config(&self, config: &mut toml::Value) -> Result<(), Error>; -} - -/** - An internal trait that can be implemented by test cases to override the - genesis file before the chain gets initialized. - - The config is in the dynamic-typed [`serde_json::Value`] format, as we do not - want to model the full format of the genesis file in Rust. - - This is called by [`RunBinaryNodeTest`] before the full nodes are - initialized and started. - - Test writers should implement - [`TestOverrides`](crate::framework::overrides::TestOverrides) - for their test cases instead of implementing this trait directly. -*/ -pub trait NodeGenesisOverride { - /// Modify the genesis file - fn modify_genesis_file(&self, genesis: &mut serde_json::Value) -> Result<(), Error>; -} - -/** - A wrapper type that lifts a test case that implements [`BinaryNodeTest`] - into a test case that implements [`BasicTest`]. -*/ -pub struct RunBinaryNodeTest<'a, Test> { - /// Inner test - pub test: &'a Test, -} - -pub struct RunSingleNodeTest<'a, Test> { - /// Inner test - pub test: &'a Test, -} - -impl<'a, Test, Overrides> BasicTest for RunBinaryNodeTest<'a, Test> -where - Test: BinaryNodeTest, - Test: HasOverrides, - Overrides: NodeConfigOverride + NodeGenesisOverride, -{ - fn run(&self, config: &TestConfig, builder: &ChainBuilder) -> Result<(), Error> { - let node_a = bootstrap_single_node( - builder, - "0", - config.bootstrap_with_random_ids, - |config| self.test.get_overrides().modify_node_config(config), - |genesis| self.test.get_overrides().modify_genesis_file(genesis), - )?; - - let node_b = bootstrap_single_node( - builder, - "1", - config.bootstrap_with_random_ids, - |config| self.test.get_overrides().modify_node_config(config), - |genesis| self.test.get_overrides().modify_genesis_file(genesis), - )?; - - let _node_process_a = node_a.process.clone(); - let _node_process_b = node_b.process.clone(); - - self.test.run(config, node_a, node_b)?; - - Ok(()) - } -} - -impl<'a, Test, Overrides> BasicTest for RunSingleNodeTest<'a, Test> -where - Test: BinaryNodeTest, - Test: HasOverrides, - Overrides: NodeConfigOverride + NodeGenesisOverride, -{ - fn run(&self, config: &TestConfig, builder: &ChainBuilder) -> Result<(), Error> { - let node = bootstrap_single_node( - builder, - "alpha", - config.bootstrap_with_random_ids, - |config| self.test.get_overrides().modify_node_config(config), - |genesis| self.test.get_overrides().modify_genesis_file(genesis), - )?; - - let _node_process = node.process.clone(); - - self.test.run(config, node.clone(), node)?; - - Ok(()) - } -} - -impl<'a, Test, Overrides> HasOverrides for RunBinaryNodeTest<'a, Test> -where - Test: HasOverrides, -{ - type Overrides = Overrides; - - fn get_overrides(&self) -> &Self::Overrides { - self.test.get_overrides() - } -} - -impl<'a, Test, Overrides> HasOverrides for RunSingleNodeTest<'a, Test> -where - Test: HasOverrides, -{ - type Overrides = Overrides; - - fn get_overrides(&self) -> &Self::Overrides { - self.test.get_overrides() - } -} diff --git a/tools/test-framework/src/framework/mod.rs b/tools/test-framework/src/framework/mod.rs deleted file mode 100644 index ecc6d402e2..0000000000 --- a/tools/test-framework/src/framework/mod.rs +++ /dev/null @@ -1,39 +0,0 @@ -/*! - Framework code for making it easier to write test cases. - - If you want to create a common test setup that is shared - by multiple test cases, the best way is to define them as - new traits within the [`framework`](crate::framework) module. - - The actual operations for bootstrapping the test setup - should *not* be implemented in this module. Instead, they - should be implemented as functions in the - [`bootstrap`](crate::bootstrap) module. This is so that - test writers can still have the option to manually - bootstrap the test setup without getting locked-in to - using the test framework. - - We can think of the test framework as being a DSL for - making it easier to write _declarative_ tests. On the - other hand, the [`bootstrap`](crate::bootstrap) module - allows the same test setup to be done in an _imperative_ way. - - ## Common Test Cases - - Here is a short list of common traits that are used for - defining simple test scenarios: - - - [`BinaryNodeTest`](binary::node::BinaryNodeTest) - - Test with two full nodes running without setting up the relayer. - - [`BinaryChainTest`](binary::chain::BinaryChainTest) - - Test with two full nodes running with the relayer setup with chain handles. - - [`BinaryChannelTest`](binary::channel::BinaryChannelTest) - - Test with two full nodes running with the relayer setup with chain handles - together with channels that are already connected. -*/ - -pub mod base; -pub mod binary; -pub mod nary; -pub mod overrides; -pub mod supervisor; diff --git a/tools/test-framework/src/framework/nary/chain.rs b/tools/test-framework/src/framework/nary/chain.rs deleted file mode 100644 index 540e71d12d..0000000000 --- a/tools/test-framework/src/framework/nary/chain.rs +++ /dev/null @@ -1,247 +0,0 @@ -/*! - Constructs for running test cases with more than two chains, - together with the relayer setup with chain handles and foreign clients. -*/ - -use ibc_relayer::chain::handle::ChainHandle; -use tracing::info; - -use crate::bootstrap::nary::chain::{ - boostrap_chains_with_nodes, boostrap_chains_with_self_connected_node, -}; -use crate::error::Error; -use crate::framework::base::{HasOverrides, TestConfigOverride}; -use crate::framework::binary::chain::RelayerConfigOverride; -use crate::framework::binary::node::{NodeConfigOverride, NodeGenesisOverride}; -use crate::framework::nary::node::{run_nary_node_test, NaryNodeTest}; -use crate::framework::supervisor::{RunWithSupervisor, SupervisorOverride}; -use crate::relayer::driver::RelayerDriver; -use crate::types::binary::chains::DropChainHandle; -use crate::types::config::TestConfig; -use crate::types::env::write_env; -use crate::types::nary::chains::NaryConnectedChains; -use crate::types::single::node::FullNode; -use crate::util::suspend::hang_on_error; - -/** - Runs a test case that implements [`NaryChainTest`] with a `SIZE` number of - chains bootstrapped. - - Note that the test may take more time as the number of chains increase, - as the required connections would increase exponentially. For each - new chain added, a self-connected foreign client is also created. - - Following shows a quick idea of how many connections are needed for each - new chain added: - - 1. 0-0 - 2. 0-0, 0-1, 1-1 - 3. 0-0, 0-1, 0-2, 1-1, 1-2, 2-2 - 4. 0-0, 0-1, 0-2, 0-3, 1-1, 1-2, 1-3, 2-2, 2-3, 3-3 - 5. ... -*/ -pub fn run_nary_chain_test(test: &Test) -> Result<(), Error> -where - Test: NaryChainTest, - Test: HasOverrides, - Overrides: TestConfigOverride - + NodeConfigOverride - + NodeGenesisOverride - + RelayerConfigOverride - + SupervisorOverride, -{ - run_nary_node_test(&RunNaryChainTest::new(&RunWithSupervisor::new(test))) -} - -/** - Runs a test case that implements [`NaryChainTest`], with one self-connected chain used - to emulate many connnections. - - This works because IBC allows a chain to connect back to itself without the chain - knowing it. Using this, we can emulate N-ary chain tests using only one chain - and save the performance overhead of spawning many chains. - - Note that with this, there is still performance overhead of establishing - new connections and channels for each position, as otherwise the transferred - IBC denoms will get mixed up. Some test cases also may not able to make - use of self connected chains, e.g. if they need to start and stop individual - chains. -*/ -pub fn run_self_connected_nary_chain_test( - test: &Test, -) -> Result<(), Error> -where - Test: NaryChainTest, - Test: HasOverrides, - Overrides: TestConfigOverride - + NodeConfigOverride - + NodeGenesisOverride - + RelayerConfigOverride - + SupervisorOverride, -{ - run_nary_node_test(&RunSelfConnectedNaryChainTest::new( - &RunWithSupervisor::new(test), - )) -} - -/** - This trait is implemented for test cases that need to have more than - two chains running. - - Test writers can use this to implement test cases that only - need the chains and relayers setup without the connection or - channel handshake. -*/ -pub trait NaryChainTest { - /// Test runner - fn run( - &self, - config: &TestConfig, - relayer: RelayerDriver, - chains: NaryConnectedChains, - ) -> Result<(), Error>; -} - -/** - A wrapper type that lifts a test case that implements [`RunNaryChainTest`] - into a test case the implements [`NaryNodeTest`]. -*/ -pub struct RunNaryChainTest<'a, Test, const SIZE: usize> { - /// Inner test - pub test: &'a Test, -} - -/** - A wrapper type that lifts a test case that implements [`RunNaryChainTest`] - into a test case the implements [`NaryNodeTest<1>`]. i.e. only one underlying - full node is spawned to emulate all chains. -*/ -pub struct RunSelfConnectedNaryChainTest<'a, Test, const SIZE: usize> { - /// Inner test - pub test: &'a Test, -} - -impl<'a, Test, Overrides, const SIZE: usize> NaryNodeTest for RunNaryChainTest<'a, Test, SIZE> -where - Test: NaryChainTest, - Test: HasOverrides, - Overrides: RelayerConfigOverride, -{ - fn run(&self, config: &TestConfig, nodes: [FullNode; SIZE]) -> Result<(), Error> { - let (relayer, chains) = boostrap_chains_with_nodes(config, nodes, |config| { - self.test.get_overrides().modify_relayer_config(config); - })?; - - let env_path = config.chain_store_dir.join("nary-chains.env"); - - write_env(&env_path, &(&relayer, &chains))?; - - info!("written chains environment to {}", env_path.display()); - - let _drop_handles = chains - .chain_handles() - .iter() - .map(|handle| DropChainHandle(handle.clone())) - .collect::>(); - - self.test.run(config, relayer, chains)?; - - Ok(()) - } -} - -impl<'a, Test, Overrides, const SIZE: usize> NaryNodeTest<1> - for RunSelfConnectedNaryChainTest<'a, Test, SIZE> -where - Test: NaryChainTest, - Test: HasOverrides, - Overrides: RelayerConfigOverride, -{ - fn run(&self, config: &TestConfig, nodes: [FullNode; 1]) -> Result<(), Error> { - let (relayer, chains) = - boostrap_chains_with_self_connected_node(config, nodes[0].clone(), |config| { - self.test.get_overrides().modify_relayer_config(config); - })?; - - let env_path = config.chain_store_dir.join("nary-chains.env"); - - write_env(&env_path, &(&relayer, &chains))?; - - info!("written chains environment to {}", env_path.display()); - - let _drop_handles = chains - .chain_handles() - .iter() - .map(|handle| DropChainHandle(handle.clone())) - .collect::>(); - - self.test.run(config, relayer, chains)?; - - Ok(()) - } -} - -impl<'a, Test, Overrides, const SIZE: usize> NaryChainTest for RunWithSupervisor<'a, Test> -where - Test: NaryChainTest, - Test: HasOverrides, - Overrides: SupervisorOverride, -{ - fn run( - &self, - config: &TestConfig, - relayer: RelayerDriver, - chains: NaryConnectedChains, - ) -> Result<(), Error> { - if self.get_overrides().should_spawn_supervisor() { - relayer - .clone() - .with_supervisor(|| self.test.run(config, relayer, chains)) - } else { - hang_on_error(config.hang_on_fail, || { - self.test.run(config, relayer, chains) - }) - } - } -} - -impl<'a, Test, const SIZE: usize> RunNaryChainTest<'a, Test, SIZE> -where - Test: NaryChainTest, -{ - pub fn new(test: &'a Test) -> Self { - Self { test } - } -} - -impl<'a, Test, const SIZE: usize> RunSelfConnectedNaryChainTest<'a, Test, SIZE> -where - Test: NaryChainTest, -{ - pub fn new(test: &'a Test) -> Self { - Self { test } - } -} - -impl<'a, Test, Overrides, const SIZE: usize> HasOverrides for RunNaryChainTest<'a, Test, SIZE> -where - Test: HasOverrides, -{ - type Overrides = Overrides; - - fn get_overrides(&self) -> &Self::Overrides { - self.test.get_overrides() - } -} - -impl<'a, Test, Overrides, const SIZE: usize> HasOverrides - for RunSelfConnectedNaryChainTest<'a, Test, SIZE> -where - Test: HasOverrides, -{ - type Overrides = Overrides; - - fn get_overrides(&self) -> &Self::Overrides { - self.test.get_overrides() - } -} diff --git a/tools/test-framework/src/framework/nary/channel.rs b/tools/test-framework/src/framework/nary/channel.rs deleted file mode 100644 index 85d451e984..0000000000 --- a/tools/test-framework/src/framework/nary/channel.rs +++ /dev/null @@ -1,257 +0,0 @@ -/*! - Constructs for running test cases with more than two chains, - together with the relayer setup with chain handles and foreign clients, - as well as connected IBC channels with completed handshakes. -*/ - -use ibc::core::ics24_host::identifier::PortId; -use ibc_relayer::chain::handle::ChainHandle; -use tracing::info; - -use crate::bootstrap::nary::channel::bootstrap_channels_with_connections; -use crate::error::Error; -use crate::framework::base::{HasOverrides, TestConfigOverride}; -use crate::framework::binary::chain::RelayerConfigOverride; -use crate::framework::binary::channel::{BinaryChannelTest, ChannelOrderOverride}; -use crate::framework::binary::connection::ConnectionDelayOverride; -use crate::framework::binary::node::{NodeConfigOverride, NodeGenesisOverride}; -use crate::framework::nary::chain::RunNaryChainTest; -use crate::framework::nary::connection::{NaryConnectionTest, RunNaryConnectionTest}; -use crate::framework::nary::node::run_nary_node_test; -use crate::framework::supervisor::{RunWithSupervisor, SupervisorOverride}; -use crate::relayer::driver::RelayerDriver; -use crate::types::config::TestConfig; -use crate::types::env::write_env; -use crate::types::nary::chains::NaryConnectedChains; -use crate::types::nary::channel::ConnectedChannels; -use crate::types::nary::connection::ConnectedConnections; -use crate::util::suspend::hang_on_error; - -pub fn run_nary_channel_test(test: &Test) -> Result<(), Error> -where - Test: NaryChannelTest, - Test: HasOverrides, - Overrides: TestConfigOverride - + NodeConfigOverride - + NodeGenesisOverride - + RelayerConfigOverride - + SupervisorOverride - + ConnectionDelayOverride - + PortsOverride - + ChannelOrderOverride, -{ - run_nary_node_test(&RunNaryChainTest::new(&RunNaryConnectionTest::new( - &RunNaryChannelTest::new(&RunWithSupervisor::new(test)), - ))) -} - -pub fn run_binary_as_nary_channel_test(test: &Test) -> Result<(), Error> -where - Test: BinaryChannelTest, - Test: HasOverrides, - Overrides: TestConfigOverride - + NodeConfigOverride - + NodeGenesisOverride - + RelayerConfigOverride - + SupervisorOverride - + ConnectionDelayOverride - + PortsOverride<2> - + ChannelOrderOverride, -{ - run_nary_channel_test(&RunBinaryAsNaryChannelTest::new(test)) -} - -/** - Returns a `SIZE`x`SIZE` number of transfer ports. - - This can be used by N-ary channel test cases to have a default - implementation of `PortsOverride`, with `"transfer"` used for - all port IDs. -*/ -pub fn transfer_port_overrides() -> [[PortId; SIZE]; SIZE] { - let port = PortId::transfer(); - let ports_ref = [[&port; SIZE]; SIZE]; - ports_ref.map(|inner_ports| inner_ports.map(Clone::clone)) -} - -/** - This trait is implemented for test cases that need to have more than - two chains running with connected channels. -*/ -pub trait NaryChannelTest { - /// Test runner - fn run( - &self, - config: &TestConfig, - relayer: RelayerDriver, - chains: NaryConnectedChains, - channels: ConnectedChannels, - ) -> Result<(), Error>; -} - -/** - An internal trait that can be implemented by test cases to override - the port IDs used when creating N-ary channels. - - When called, the implementer returns a `SIZE`x`SIZE` matrix - of [`PortId`]s to indicate which port ID for the chain at - the first position when connected to the chain at the second - position. - - This is called by [`RunNaryChannelTest`] before creating - the IBC channels. - - Note that this trait is not automatically implemented - for test cases via - [`TestOverrides`](crate::framework::overrides::TestOverrides), - except for the binary case `PortsOverride<2>`. - So each N-ary channel test must also implement this trait manually. - - It is possible to implement this with an empty body, in which case - the port ID matrix will all be populated with `"transfer"` ports. -*/ -pub trait PortsOverride { - fn channel_ports(&self) -> [[PortId; SIZE]; SIZE] { - transfer_port_overrides() - } -} - -/** - A wrapper type that lifts a test case that implements [`NaryChannelTest`] - into a test case the implements [`NaryConnectionTest`]. -*/ -pub struct RunNaryChannelTest<'a, Test, const SIZE: usize> { - /// Inner test - pub test: &'a Test, -} - -/** - A wrapper type that lifts a test case that implements [`BinaryChannelTest`] - into a test case the implements [`NaryChannelTest`]. - - This can be used to test the implementation of the N-ary test framework, - by running binary channel tests as N-ary channel tests. -*/ -pub struct RunBinaryAsNaryChannelTest<'a, Test> { - /// Inner test - pub test: &'a Test, -} - -impl<'a, Test, const SIZE: usize> RunNaryChannelTest<'a, Test, SIZE> -where - Test: NaryChannelTest, -{ - pub fn new(test: &'a Test) -> Self { - Self { test } - } -} - -impl<'a, Test> RunBinaryAsNaryChannelTest<'a, Test> -where - Test: BinaryChannelTest, -{ - pub fn new(test: &'a Test) -> Self { - Self { test } - } -} - -impl<'a, Test, Overrides, const SIZE: usize> NaryConnectionTest - for RunNaryChannelTest<'a, Test, SIZE> -where - Test: NaryChannelTest, - Test: HasOverrides, - Overrides: PortsOverride + ChannelOrderOverride, -{ - fn run( - &self, - config: &TestConfig, - relayer: RelayerDriver, - chains: NaryConnectedChains, - connections: ConnectedConnections, - ) -> Result<(), Error> { - let overrides = self.test.get_overrides(); - let port_ids = overrides.channel_ports(); - let order = overrides.channel_order(); - - let channels = bootstrap_channels_with_connections( - connections, - chains.chain_handles().clone(), - port_ids, - order, - config.bootstrap_with_random_ids, - )?; - - let env_path = config.chain_store_dir.join("nary-channels.env"); - - write_env(&env_path, &(&chains, &(&relayer, &channels)))?; - - info!("written channel environment to {}", env_path.display()); - - self.test.run(config, relayer, chains, channels)?; - - Ok(()) - } -} - -impl<'a, Test> NaryChannelTest<2> for RunBinaryAsNaryChannelTest<'a, Test> -where - Test: BinaryChannelTest, -{ - fn run( - &self, - config: &TestConfig, - relayer: RelayerDriver, - chains: NaryConnectedChains, - channels: ConnectedChannels, - ) -> Result<(), Error> { - self.test - .run(config, relayer, chains.into(), channels.into()) - } -} - -impl<'a, Test, Overrides, const SIZE: usize> NaryChannelTest for RunWithSupervisor<'a, Test> -where - Test: NaryChannelTest, - Test: HasOverrides, - Overrides: SupervisorOverride, -{ - fn run( - &self, - config: &TestConfig, - relayer: RelayerDriver, - chains: NaryConnectedChains, - channels: ConnectedChannels, - ) -> Result<(), Error> { - if self.get_overrides().should_spawn_supervisor() { - relayer - .clone() - .with_supervisor(|| self.test.run(config, relayer, chains, channels)) - } else { - hang_on_error(config.hang_on_fail, || { - self.test.run(config, relayer, chains, channels) - }) - } - } -} - -impl<'a, Test, Overrides, const SIZE: usize> HasOverrides for RunNaryChannelTest<'a, Test, SIZE> -where - Test: HasOverrides, -{ - type Overrides = Overrides; - - fn get_overrides(&self) -> &Self::Overrides { - self.test.get_overrides() - } -} - -impl<'a, Test, Overrides> HasOverrides for RunBinaryAsNaryChannelTest<'a, Test> -where - Test: HasOverrides, -{ - type Overrides = Overrides; - - fn get_overrides(&self) -> &Self::Overrides { - self.test.get_overrides() - } -} diff --git a/tools/test-framework/src/framework/nary/connection.rs b/tools/test-framework/src/framework/nary/connection.rs deleted file mode 100644 index 485b2ecd1b..0000000000 --- a/tools/test-framework/src/framework/nary/connection.rs +++ /dev/null @@ -1,189 +0,0 @@ -/*! - Constructs for running test cases with more than two chains, - together with the relayer setup with chain handles and foreign clients, - as well as connected IBC connections with completed handshakes. -*/ - -use ibc_relayer::chain::handle::ChainHandle; -use tracing::info; - -use crate::bootstrap::nary::connection::bootstrap_connections; -use crate::error::Error; -use crate::framework::base::{HasOverrides, TestConfigOverride}; -use crate::framework::binary::chain::RelayerConfigOverride; -use crate::framework::binary::connection::{BinaryConnectionTest, ConnectionDelayOverride}; -use crate::framework::binary::node::{NodeConfigOverride, NodeGenesisOverride}; -use crate::framework::nary::chain::{NaryChainTest, RunNaryChainTest}; -use crate::framework::nary::node::run_nary_node_test; -use crate::framework::supervisor::{RunWithSupervisor, SupervisorOverride}; -use crate::relayer::driver::RelayerDriver; -use crate::types::config::TestConfig; -use crate::types::env::write_env; -use crate::types::nary::chains::NaryConnectedChains; -use crate::types::nary::connection::ConnectedConnections; -use crate::util::suspend::hang_on_error; - -pub fn run_nary_connection_test( - test: &Test, -) -> Result<(), Error> -where - Test: NaryConnectionTest, - Test: HasOverrides, - Overrides: TestConfigOverride - + NodeConfigOverride - + NodeGenesisOverride - + RelayerConfigOverride - + SupervisorOverride - + ConnectionDelayOverride, -{ - run_nary_node_test(&RunNaryChainTest::new(&RunNaryConnectionTest::new( - &RunWithSupervisor::new(test), - ))) -} - -/** - This trait is implemented for test cases that need to have more than - two chains running with connected connections. - - Test writers can use this to implement test cases that only - need the connections without channel handshake. -*/ -pub trait NaryConnectionTest { - /// Test runner - fn run( - &self, - config: &TestConfig, - relayer: RelayerDriver, - chains: NaryConnectedChains, - connections: ConnectedConnections, - ) -> Result<(), Error>; -} - -/** - A wrapper type that lifts a test case that implements [`NaryConnectionTest`] - into a test case the implements [`NaryChainTest`]. -*/ -pub struct RunNaryConnectionTest<'a, Test, const SIZE: usize> { - /// Inner test - pub test: &'a Test, -} - -pub struct RunBinaryAsNaryConnectionTest<'a, Test> { - /// Inner test - pub test: &'a Test, -} - -impl<'a, Test, const SIZE: usize> RunNaryConnectionTest<'a, Test, SIZE> -where - Test: NaryConnectionTest, -{ - pub fn new(test: &'a Test) -> Self { - Self { test } - } -} - -impl<'a, Test> RunBinaryAsNaryConnectionTest<'a, Test> -where - Test: BinaryConnectionTest, -{ - pub fn new(test: &'a Test) -> Self { - Self { test } - } -} - -impl<'a, Test, Overrides, const SIZE: usize> NaryChainTest - for RunNaryConnectionTest<'a, Test, SIZE> -where - Test: NaryConnectionTest, - Test: HasOverrides, - Overrides: ConnectionDelayOverride, -{ - fn run( - &self, - config: &TestConfig, - relayer: RelayerDriver, - chains: NaryConnectedChains, - ) -> Result<(), Error> { - let connection_delay = self.get_overrides().connection_delay(); - - let connections = bootstrap_connections( - chains.foreign_clients().clone(), - connection_delay, - config.bootstrap_with_random_ids, - )?; - - let env_path = config.chain_store_dir.join("nary-connections.env"); - - write_env(&env_path, &(&chains, &(&relayer, &connections)))?; - - info!("written channel environment to {}", env_path.display()); - - self.test.run(config, relayer, chains, connections)?; - - Ok(()) - } -} - -impl<'a, Test> NaryConnectionTest<2> for RunBinaryAsNaryConnectionTest<'a, Test> -where - Test: BinaryConnectionTest, -{ - fn run( - &self, - config: &TestConfig, - relayer: RelayerDriver, - chains: NaryConnectedChains, - connections: ConnectedConnections, - ) -> Result<(), Error> { - self.test - .run(config, relayer, chains.into(), connections.into()) - } -} - -impl<'a, Test, Overrides, const SIZE: usize> NaryConnectionTest - for RunWithSupervisor<'a, Test> -where - Test: NaryConnectionTest, - Test: HasOverrides, - Overrides: SupervisorOverride, -{ - fn run( - &self, - config: &TestConfig, - relayer: RelayerDriver, - chains: NaryConnectedChains, - connections: ConnectedConnections, - ) -> Result<(), Error> { - if self.get_overrides().should_spawn_supervisor() { - relayer - .clone() - .with_supervisor(|| self.test.run(config, relayer, chains, connections)) - } else { - hang_on_error(config.hang_on_fail, || { - self.test.run(config, relayer, chains, connections) - }) - } - } -} - -impl<'a, Test, Overrides, const SIZE: usize> HasOverrides for RunNaryConnectionTest<'a, Test, SIZE> -where - Test: HasOverrides, -{ - type Overrides = Overrides; - - fn get_overrides(&self) -> &Self::Overrides { - self.test.get_overrides() - } -} - -impl<'a, Test, Overrides> HasOverrides for RunBinaryAsNaryConnectionTest<'a, Test> -where - Test: HasOverrides, -{ - type Overrides = Overrides; - - fn get_overrides(&self) -> &Self::Overrides { - self.test.get_overrides() - } -} diff --git a/tools/test-framework/src/framework/nary/mod.rs b/tools/test-framework/src/framework/nary/mod.rs deleted file mode 100644 index f2e5bacb84..0000000000 --- a/tools/test-framework/src/framework/nary/mod.rs +++ /dev/null @@ -1,8 +0,0 @@ -/*! - Run N-ary test cases that involve more than 2 chains. -*/ - -pub mod chain; -pub mod channel; -pub mod connection; -pub mod node; diff --git a/tools/test-framework/src/framework/nary/node.rs b/tools/test-framework/src/framework/nary/node.rs deleted file mode 100644 index f397c361c9..0000000000 --- a/tools/test-framework/src/framework/nary/node.rs +++ /dev/null @@ -1,86 +0,0 @@ -/*! - Constructs for running test cases with more than two full nodes, - running without setting up the relayer. -*/ - -use crate::bootstrap::single::bootstrap_single_node; -use crate::chain::builder::ChainBuilder; -use crate::error::Error; -use crate::framework::base::HasOverrides; -use crate::framework::base::{run_basic_test, BasicTest, TestConfigOverride}; -use crate::framework::binary::node::{NodeConfigOverride, NodeGenesisOverride}; -use crate::types::config::TestConfig; -use crate::types::single::node::FullNode; -use crate::util::array::try_into_array; - -pub fn run_nary_node_test(test: &Test) -> Result<(), Error> -where - Test: NaryNodeTest, - Test: HasOverrides, - Overrides: NodeConfigOverride + NodeGenesisOverride + TestConfigOverride, -{ - run_basic_test(&RunNaryNodeTest { test }) -} - -/** - This trait is implemented for test cases that need to have more than two - full nodes running without the relayer being setup. - - The test case is given `SIZE` number of [`FullNode`]s which represents - the running full nodes. - - Test writers can use this to implement more advanced test cases which - require manual setup of the relayer, so that the relayer can be started - and stopped at a suitable time within the test. -*/ -pub trait NaryNodeTest { - fn run(&self, config: &TestConfig, nodes: [FullNode; SIZE]) -> Result<(), Error>; -} - -/** - A wrapper type that lifts a test case that implements [`NaryNodeTest`] - into a test case the implements [`BasicTest`]. -*/ -pub struct RunNaryNodeTest<'a, Test, const SIZE: usize> { - pub test: &'a Test, -} - -impl<'a, Test, Overrides, const SIZE: usize> BasicTest for RunNaryNodeTest<'a, Test, SIZE> -where - Test: NaryNodeTest, - Test: HasOverrides, - Overrides: NodeConfigOverride + NodeGenesisOverride, -{ - fn run(&self, config: &TestConfig, builder: &ChainBuilder) -> Result<(), Error> { - let mut nodes = Vec::new(); - let mut node_processes = Vec::new(); - - for i in 0..SIZE { - let node = bootstrap_single_node( - builder, - &format!("{}", i), - config.bootstrap_with_random_ids, - |config| self.test.get_overrides().modify_node_config(config), - |genesis| self.test.get_overrides().modify_genesis_file(genesis), - )?; - - node_processes.push(node.process.clone()); - nodes.push(node); - } - - self.test.run(config, try_into_array(nodes)?)?; - - Ok(()) - } -} - -impl<'a, Test, Overrides, const SIZE: usize> HasOverrides for RunNaryNodeTest<'a, Test, SIZE> -where - Test: HasOverrides, -{ - type Overrides = Overrides; - - fn get_overrides(&self) -> &Self::Overrides { - self.test.get_overrides() - } -} diff --git a/tools/test-framework/src/framework/overrides.rs b/tools/test-framework/src/framework/overrides.rs deleted file mode 100644 index 38ac5dab9d..0000000000 --- a/tools/test-framework/src/framework/overrides.rs +++ /dev/null @@ -1,233 +0,0 @@ -/*! - Constructs for implementing overrides for test cases. -*/ - -use core::time::Duration; -use ibc::core::ics04_channel::channel::Order; -use ibc::core::ics04_channel::Version; -use ibc::core::ics24_host::identifier::PortId; -use ibc_relayer::config::default::connection_delay as default_connection_delay; -use ibc_relayer::config::Config; -use ibc_relayer::foreign_client::CreateOptions as ClientOptions; - -use crate::error::Error; -use crate::framework::base::HasOverrides; -use crate::framework::base::TestConfigOverride; -use crate::framework::binary::chain::{ClientOptionsOverride, RelayerConfigOverride}; -use crate::framework::binary::channel::{ - ChannelOrderOverride, ChannelVersionOverride, PortsOverride, -}; -use crate::framework::binary::connection::ConnectionDelayOverride; -use crate::framework::binary::node::{NodeConfigOverride, NodeGenesisOverride}; -use crate::framework::nary::channel::PortsOverride as NaryPortsOverride; -use crate::framework::supervisor::SupervisorOverride; -use crate::types::config::TestConfig; - -/** - This trait should be implemented for all test cases to allow overriding - some parts of the behavior during the test setup. - - Since all methods in this trait have default implementation, test cases - that do not need any override can have an empty implementation body for - this trait. - - The trait provides generic implementation of the specialized traits such as - [`RelayerConfigOverride`]. As a result, it is sufficient for test - writers to only implement this trait instead of implementing the - numerous override traits. - - When a new override trait is defined, the same trait method should - also be defined inside this trait with a default method body. -*/ -pub trait TestOverrides { - fn modify_test_config(&self, _config: &mut TestConfig) {} - - /** - Modify the full node config before the chain gets initialized. - - The config is in the dynamic-typed [`toml::Value`] format, as we do not - want to model the full format of the node config in Rust. Test authors - can use the helper methods in [`chain::config`](crate::chain::config) - to modify common config fields. - - Implemented for [`NodeConfigOverride`]. - */ - fn modify_node_config(&self, _config: &mut toml::Value) -> Result<(), Error> { - Ok(()) - } - - /** - Modify the genesis file before the chain gets initialized. - - The config is in the dynamic-typed [`serde_json::Value`] format, as we do not - want to model the full format of the genesis file in Rust. - - Implemented for [`NodeGenesisOverride`]. - */ - fn modify_genesis_file(&self, _genesis: &mut serde_json::Value) -> Result<(), Error> { - Ok(()) - } - - /** - Modify the relayer config before initializing the relayer. Does no - modification by default. - - Implemented for [`RelayerConfigOverride`]. - */ - fn modify_relayer_config(&self, _config: &mut Config) { - // No modification by default - } - - /// Returns the settings for the foreign client on the first chain for the - /// second chain. The defaults are for a client connecting two Cosmos chains - /// with no custom settings. - fn client_options_a_to_b(&self) -> ClientOptions { - Default::default() - } - - /// Returns the settings for the foreign client on the second chain for the - /// first chain. The defaults are for a client connecting two Cosmos chains - /// with no custom settings. - fn client_options_b_to_a(&self) -> ClientOptions { - Default::default() - } - - fn should_spawn_supervisor(&self) -> bool { - true - } - - /** - Return the connection delay used for creating connections as [`Duration`]. - Defaults to zero. - - Implemented for [`ConnectionDelayOverride`]. - */ - fn connection_delay(&self) -> Duration { - default_connection_delay() - } - - /** - Return the port ID used for creating the channel for the first chain. - Returns the "transfer" port by default. - - Implemented for [`PortsOverride`]. - */ - fn channel_port_a(&self) -> PortId { - PortId::transfer() - } - - /** - Return the port ID used for creating the channel for the second chain. - Returns the "transfer" port by default. - - Implemented for [`PortsOverride`]. - */ - fn channel_port_b(&self) -> PortId { - PortId::transfer() - } - - /** - Return the channel ordering used for creating channels as [`Order`]. - Defaults to [`Order::Unordered`]. - - Implemented for [`ChannelOrderOverride`]. - */ - fn channel_order(&self) -> Order { - Order::Unordered - } - - /** - Return the channel version used for creating channels as [`Version`]. - Defaults to [`Version::ics20()`]. - - Implemented for [`ChannelVersionOverride`]. - */ - fn channel_version(&self) -> Version { - Version::ics20() - } -} - -impl HasOverrides for Test { - type Overrides = Self; - - fn get_overrides(&self) -> &Self { - self - } -} - -impl TestConfigOverride for Test { - fn modify_test_config(&self, config: &mut TestConfig) { - TestOverrides::modify_test_config(self, config) - } -} - -impl NodeConfigOverride for Test { - fn modify_node_config(&self, config: &mut toml::Value) -> Result<(), Error> { - TestOverrides::modify_node_config(self, config) - } -} - -impl NodeGenesisOverride for Test { - fn modify_genesis_file(&self, genesis: &mut serde_json::Value) -> Result<(), Error> { - TestOverrides::modify_genesis_file(self, genesis) - } -} - -impl RelayerConfigOverride for Test { - fn modify_relayer_config(&self, config: &mut Config) { - TestOverrides::modify_relayer_config(self, config) - } -} - -impl ClientOptionsOverride for Test { - fn client_options_a_to_b(&self) -> ClientOptions { - TestOverrides::client_options_a_to_b(self) - } - - fn client_options_b_to_a(&self) -> ClientOptions { - TestOverrides::client_options_b_to_a(self) - } -} - -impl SupervisorOverride for Test { - fn should_spawn_supervisor(&self) -> bool { - TestOverrides::should_spawn_supervisor(self) - } -} - -impl ConnectionDelayOverride for Test { - fn connection_delay(&self) -> Duration { - TestOverrides::connection_delay(self) - } -} - -impl PortsOverride for Test { - fn channel_port_a(&self) -> PortId { - TestOverrides::channel_port_a(self) - } - - fn channel_port_b(&self) -> PortId { - TestOverrides::channel_port_b(self) - } -} - -impl ChannelOrderOverride for Test { - fn channel_order(&self) -> Order { - TestOverrides::channel_order(self) - } -} - -impl ChannelVersionOverride for Test { - fn channel_version(&self) -> Version { - TestOverrides::channel_version(self) - } -} - -impl NaryPortsOverride<2> for Test { - fn channel_ports(&self) -> [[PortId; 2]; 2] { - let port_a = self.channel_port_a(); - let port_b = self.channel_port_b(); - - [[port_a.clone(), port_b.clone()], [port_b, port_a]] - } -} diff --git a/tools/test-framework/src/framework/supervisor.rs b/tools/test-framework/src/framework/supervisor.rs deleted file mode 100644 index f43ea89536..0000000000 --- a/tools/test-framework/src/framework/supervisor.rs +++ /dev/null @@ -1,58 +0,0 @@ -/** - Constructs for wrapping test cases to spawn the relayer supervisor - before the inner test is executed. -*/ -use crate::framework::base::HasOverrides; - -/** - An internal trait that can be implemented by test cases to override - whether to automatically spawn the relayer supervisor before the - test starts. - - This is used by [`RunWithSupervisor`] to determine whether to - spawn the relayer. - - Test writers should implement - [`TestOverrides`](crate::framework::overrides::TestOverrides) - for their test cases instead of implementing this trait directly. -*/ -pub trait SupervisorOverride { - fn should_spawn_supervisor(&self) -> bool; -} - -/** - A wrapper type that implements the same test traits as the wrapped - `Test` type, and spawns the relayer supervisor before the inner test - is called. - - For example, if `Test` implements - [`BinaryChannelTest`](crate::framework::binary::channel::BinaryChannelTest), - then `RunWithSupervisor` also implements `BinaryChannelTest`. - - The automatic spawning of supervisor can be disabled by implementing - [`SupervisorOverride`] and returning false. - - When composing the test runners with `RunWithSupervisor`, it is important - to ensure that `RunWithSupervisor` do not appear more than once in the - nesting. Otherwise the supervisor may spawn more than once during tests. -*/ -pub struct RunWithSupervisor<'a, Test> { - pub test: &'a Test, -} - -impl<'a, Test> RunWithSupervisor<'a, Test> { - pub fn new(test: &'a Test) -> Self { - Self { test } - } -} - -impl<'a, Test, Overrides> HasOverrides for RunWithSupervisor<'a, Test> -where - Test: HasOverrides, -{ - type Overrides = Overrides; - - fn get_overrides(&self) -> &Self::Overrides { - self.test.get_overrides() - } -} diff --git a/tools/test-framework/src/ibc/denom.rs b/tools/test-framework/src/ibc/denom.rs deleted file mode 100644 index ec4bcabc6c..0000000000 --- a/tools/test-framework/src/ibc/denom.rs +++ /dev/null @@ -1,129 +0,0 @@ -/*! - Helper functions for deriving IBC denom. -*/ - -use core::fmt::{self, Display}; -use eyre::Report as Error; -use ibc::core::ics24_host::identifier::{ChannelId, PortId}; -use sha2::{Digest, Sha256}; -use subtle_encoding::hex; - -use crate::types::id::{TaggedChannelIdRef, TaggedPortIdRef}; -use crate::types::tagged::*; - -/** - A newtype wrapper to represent a denomination string. -*/ -#[derive(Debug, Clone)] -pub enum Denom { - Base(String), - Ibc { - path: String, - denom: String, - hashed: String, - }, -} - -/** - Type alias for [`Denom`] tagged with the chain it belongs to. -*/ -pub type TaggedDenom = MonoTagged; - -/** - Type alias for [`&Denom`](Denom) tagged with the chain it belongs to. -*/ -pub type TaggedDenomRef<'a, Chain> = MonoTagged; - -/** - A tagged version of [`derive_ibc_denom`](token_transfer::derive_ibc_denom) - from the [`ibc`] module. - - Derives the denom on `ChainB` based on a denom on `ChainA` that has been - transferred to `ChainB` via IBC. - - Accepts the following arguments: - - - A `PortId` on `ChainB` that corresponds to a channel connected - to `ChainA`. - - - A `ChannelId` on `ChainB` that corresponds to a channel connected - to `ChainA`. - - - The original denomination on `ChainA`. - - Returns the derived denomination on `ChainB`. -*/ -pub fn derive_ibc_denom( - port_id: &TaggedPortIdRef, - channel_id: &TaggedChannelIdRef, - denom: &TaggedDenomRef, -) -> Result, Error> { - fn derive_denom( - port_id: &PortId, - channel_id: &ChannelId, - denom: &str, - ) -> Result { - let transfer_path = format!("{}/{}/{}", port_id, channel_id, denom); - derive_denom_with_path(&transfer_path) - } - - /// Derive the transferred token denomination using - /// - fn derive_denom_with_path(transfer_path: &str) -> Result { - let mut hasher = Sha256::new(); - hasher.update(transfer_path.as_bytes()); - - let denom_bytes = hasher.finalize(); - let denom_hex = String::from_utf8(hex::encode_upper(denom_bytes))?; - - Ok(format!("ibc/{}", denom_hex)) - } - - match denom.value() { - Denom::Base(denom) => { - let hashed = derive_denom(port_id.value(), channel_id.value(), denom)?; - - Ok(MonoTagged::new(Denom::Ibc { - path: format!("{}/{}", port_id, channel_id), - denom: denom.clone(), - hashed, - })) - } - Denom::Ibc { path, denom, .. } => { - let new_path = format!("{}/{}/{}", port_id, channel_id, path); - let hashed = derive_denom_with_path(&format!("{}/{}", new_path, denom))?; - - Ok(MonoTagged::new(Denom::Ibc { - path: new_path, - denom: denom.clone(), - hashed, - })) - } - } -} - -impl Denom { - pub fn base(denom: &str) -> Self { - Denom::Base(denom.to_string()) - } - - pub fn as_str(&self) -> &str { - match self { - Denom::Base(denom) => denom, - Denom::Ibc { hashed, .. } => hashed, - } - } -} - -impl Display for Denom { - fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { - match self { - Denom::Base(denom) => { - write!(f, "{}", denom) - } - Denom::Ibc { hashed, .. } => { - write!(f, "{}", hashed) - } - } - } -} diff --git a/tools/test-framework/src/ibc/mod.rs b/tools/test-framework/src/ibc/mod.rs deleted file mode 100644 index e389251e1c..0000000000 --- a/tools/test-framework/src/ibc/mod.rs +++ /dev/null @@ -1,6 +0,0 @@ -/*! - Code that may belong to the [`ibc`] module, but are currently - in this crate for easier review or maintenance. -*/ - -pub mod denom; diff --git a/tools/test-framework/src/lib.rs b/tools/test-framework/src/lib.rs deleted file mode 100644 index aaeca1e376..0000000000 --- a/tools/test-framework/src/lib.rs +++ /dev/null @@ -1,17 +0,0 @@ -// #![deny(warnings)] -#![allow(clippy::too_many_arguments)] -#![allow(clippy::type_complexity)] -#![allow(clippy::ptr_arg)] -#![doc = include_str!("../README.md")] - -extern crate alloc; - -pub mod bootstrap; -pub mod chain; -pub mod error; -pub mod framework; -pub mod ibc; -pub mod prelude; -pub mod relayer; -pub mod types; -pub mod util; diff --git a/tools/test-framework/src/prelude.rs b/tools/test-framework/src/prelude.rs deleted file mode 100644 index 624ed806a4..0000000000 --- a/tools/test-framework/src/prelude.rs +++ /dev/null @@ -1,69 +0,0 @@ -/*! - Re-export of common constructs that are used by test cases. -*/ - -pub use core::time::Duration; -pub use eyre::eyre; -pub use ibc::core::ics04_channel::channel::Order; -pub use ibc::core::ics24_host::identifier::{ChainId, ChannelId, ClientId, ConnectionId, PortId}; -pub use ibc_relayer::chain::handle::ChainHandle; -pub use ibc_relayer::config::Config; -pub use ibc_relayer::foreign_client::ForeignClient; -pub use ibc_relayer::registry::SharedRegistry; -pub use ibc_relayer::supervisor::SupervisorHandle; -pub use std::thread::sleep; -pub use tracing::{debug, error, info, warn}; - -pub use crate::chain::driver::ChainDriver; -pub use crate::chain::tagged::TaggedChainDriverExt; -pub use crate::error::{handle_generic_error, Error}; -pub use crate::framework::base::HasOverrides; -pub use crate::framework::binary::chain::{ - run_binary_chain_test, run_self_connected_binary_chain_test, run_two_way_binary_chain_test, - BinaryChainTest, RunBinaryChainTest, RunSelfConnectedBinaryChainTest, -}; -pub use crate::framework::binary::channel::{ - run_binary_channel_test, run_two_way_binary_channel_test, BinaryChannelTest, - RunBinaryChannelTest, -}; -pub use crate::framework::binary::connection::{ - run_binary_connection_test, run_two_way_binary_connection_test, BinaryConnectionTest, - RunBinaryConnectionTest, -}; -pub use crate::framework::binary::node::{run_binary_node_test, BinaryNodeTest, RunBinaryNodeTest}; -pub use crate::framework::nary::chain::{ - run_nary_chain_test, run_self_connected_nary_chain_test, NaryChainTest, RunNaryChainTest, - RunSelfConnectedNaryChainTest, -}; -pub use crate::framework::nary::channel::{ - run_binary_as_nary_channel_test, run_nary_channel_test, NaryChannelTest, PortsOverride, - RunBinaryAsNaryChannelTest, RunNaryChannelTest, -}; -pub use crate::framework::nary::connection::{ - run_nary_connection_test, NaryConnectionTest, RunNaryConnectionTest, -}; -pub use crate::framework::nary::node::{run_nary_node_test, NaryNodeTest, RunNaryNodeTest}; -pub use crate::framework::overrides::TestOverrides; -pub use crate::framework::supervisor::RunWithSupervisor; -pub use crate::ibc::denom::Denom; -pub use crate::relayer::channel::TaggedChannelEndExt; -pub use crate::relayer::connection::{TaggedConnectionEndExt, TaggedConnectionExt}; -pub use crate::relayer::driver::RelayerDriver; -pub use crate::relayer::foreign_client::TaggedForeignClientExt; -pub use crate::types::binary::chains::ConnectedChains; -pub use crate::types::binary::channel::ConnectedChannel; -pub use crate::types::binary::connection::ConnectedConnection; -pub use crate::types::binary::foreign_client::ForeignClientPair; -pub use crate::types::config::TestConfig; -pub use crate::types::id::*; -pub use crate::types::nary::chains::NaryConnectedChains; -pub use crate::types::nary::channel::ConnectedChannels as NaryConnectedChannels; -pub use crate::types::nary::connection::ConnectedConnections as NaryConnectedConnections; -pub use crate::types::single::node::{FullNode, TaggedFullNodeExt}; -pub use crate::types::tagged::{DualTagged, MonoTagged}; -pub use crate::types::wallet::{ - TaggedTestWalletsExt, TaggedWallet, TestWallets, Wallet, WalletAddress, WalletId, -}; -pub use crate::util::assert::*; -pub use crate::util::retry::assert_eventually_succeed; -pub use crate::util::suspend::suspend; diff --git a/tools/test-framework/src/relayer/chain.rs b/tools/test-framework/src/relayer/chain.rs deleted file mode 100644 index cf80f7f4cb..0000000000 --- a/tools/test-framework/src/relayer/chain.rs +++ /dev/null @@ -1,404 +0,0 @@ -/*! - Definition for a proxy [`ChainHandle`] implementation for tagged - chain handles. - - Since we use the chain handle type to distinguish the chain tags, we will - run into problem if we have the same concrete `ChainHandle` implementations - for two chains that are not encapsulated behind an `impl ChainHandle`. - - This is the case for creating N-ary chains, because we cannot rely on the - existential type encapsulation of `impl ChainHandle` to turn the - [`CountingAndCachingChainHandle`](ibc_relayer::chain::handle::CountingAndCachingChainHandle) to turn - them into unqiue types. - - A workaround for this is to add a unique tag to `CountingAndCachingChainHandle` itself, - so that the type `MonoTagged` becomes a unique chain - handle type. - - We implement [`ChainHandle`] for a `MonoTagged`, since if the - underlying `Handle` type implements [`ChainHandle`], then a tagged handle - is still a [`ChainHandle`]. -*/ - -use crossbeam_channel as channel; -use ibc::core::ics02_client::client_consensus::{AnyConsensusState, AnyConsensusStateWithHeight}; -use ibc::core::ics02_client::client_state::{AnyClientState, IdentifiedAnyClientState}; -use ibc::core::ics02_client::events::UpdateClient; -use ibc::core::ics02_client::misbehaviour::MisbehaviourEvidence; -use ibc::core::ics03_connection::connection::IdentifiedConnectionEnd; -use ibc::core::ics04_channel::channel::IdentifiedChannelEnd; -use ibc::core::ics04_channel::packet::{PacketMsgType, Sequence}; -use ibc::core::ics23_commitment::merkle::MerkleProof; -use ibc::query::QueryTxRequest; -use ibc::{ - core::ics02_client::header::AnyHeader, - core::ics03_connection::connection::ConnectionEnd, - core::ics03_connection::version::Version, - core::ics04_channel::channel::ChannelEnd, - core::ics23_commitment::commitment::CommitmentPrefix, - core::ics24_host::identifier::ChainId, - core::ics24_host::identifier::ChannelId, - core::ics24_host::identifier::{ClientId, ConnectionId, PortId}, - events::IbcEvent, - proofs::Proofs, - query::QueryBlockRequest, - signer::Signer, - Height, -}; -use ibc_relayer::account::Balance; -use ibc_relayer::chain::client::ClientSettings; -use ibc_relayer::chain::endpoint::{ChainStatus, HealthCheck}; -use ibc_relayer::chain::handle::{ChainHandle, ChainRequest, Subscription}; -use ibc_relayer::chain::requests::{ - IncludeProof, QueryChannelClientStateRequest, QueryChannelRequest, QueryChannelsRequest, - QueryClientConnectionsRequest, QueryClientStateRequest, QueryClientStatesRequest, - QueryConnectionChannelsRequest, QueryConnectionRequest, QueryConnectionsRequest, - QueryConsensusStateRequest, QueryConsensusStatesRequest, QueryHostConsensusStateRequest, - QueryNextSequenceReceiveRequest, QueryPacketAcknowledgementRequest, - QueryPacketAcknowledgementsRequest, QueryPacketCommitmentRequest, - QueryPacketCommitmentsRequest, QueryPacketReceiptRequest, QueryUnreceivedAcksRequest, - QueryUnreceivedPacketsRequest, QueryUpgradedClientStateRequest, - QueryUpgradedConsensusStateRequest, -}; -use ibc_relayer::chain::tracking::TrackedMsgs; -use ibc_relayer::config::ChainConfig; -use ibc_relayer::connection::ConnectionMsgType; -use ibc_relayer::error::Error; -use ibc_relayer::keyring::KeyEntry; - -use crate::types::tagged::*; - -/** - Implement `ChainHandle` for any existential type `Handle: ChainHandle`. - This allows us to tag values for chains that are tagged by position - in [N-ary chains](crate::types::nary). -*/ -impl ChainHandle for MonoTagged -where - Tag: Send + Sync + 'static, - Handle: ChainHandle, -{ - fn new(chain_id: ChainId, sender: channel::Sender) -> Self { - Self::new(Handle::new(chain_id, sender)) - } - - fn id(&self) -> ChainId { - self.value().id() - } - - fn shutdown(&self) -> Result<(), Error> { - self.value().shutdown() - } - - fn health_check(&self) -> Result { - self.value().health_check() - } - - fn subscribe(&self) -> Result { - self.value().subscribe() - } - - fn send_messages_and_wait_commit( - &self, - tracked_msgs: TrackedMsgs, - ) -> Result, Error> { - self.value().send_messages_and_wait_commit(tracked_msgs) - } - - fn send_messages_and_wait_check_tx( - &self, - tracked_msgs: TrackedMsgs, - ) -> Result, Error> { - self.value().send_messages_and_wait_check_tx(tracked_msgs) - } - - fn get_signer(&self) -> Result { - self.value().get_signer() - } - - fn config(&self) -> Result { - self.value().config() - } - - fn get_key(&self) -> Result { - self.value().get_key() - } - - fn add_key(&self, key_name: String, key: KeyEntry) -> Result<(), Error> { - self.value().add_key(key_name, key) - } - - fn ibc_version(&self) -> Result, Error> { - self.value().ibc_version() - } - - fn query_application_status(&self) -> Result { - self.value().query_application_status() - } - - fn query_latest_height(&self) -> Result { - self.value().query_latest_height() - } - - fn query_clients( - &self, - request: QueryClientStatesRequest, - ) -> Result, Error> { - self.value().query_clients(request) - } - - fn query_client_state( - &self, - request: QueryClientStateRequest, - include_proof: IncludeProof, - ) -> Result<(AnyClientState, Option), Error> { - self.value().query_client_state(request, include_proof) - } - - fn query_client_connections( - &self, - request: QueryClientConnectionsRequest, - ) -> Result, Error> { - self.value().query_client_connections(request) - } - - fn query_consensus_states( - &self, - request: QueryConsensusStatesRequest, - ) -> Result, Error> { - self.value().query_consensus_states(request) - } - - fn query_consensus_state( - &self, - request: QueryConsensusStateRequest, - include_proof: IncludeProof, - ) -> Result<(AnyConsensusState, Option), Error> { - self.value().query_consensus_state(request, include_proof) - } - - fn query_upgraded_client_state( - &self, - request: QueryUpgradedClientStateRequest, - ) -> Result<(AnyClientState, MerkleProof), Error> { - self.value().query_upgraded_client_state(request) - } - - fn query_upgraded_consensus_state( - &self, - request: QueryUpgradedConsensusStateRequest, - ) -> Result<(AnyConsensusState, MerkleProof), Error> { - self.value().query_upgraded_consensus_state(request) - } - - fn query_commitment_prefix(&self) -> Result { - self.value().query_commitment_prefix() - } - - fn query_compatible_versions(&self) -> Result, Error> { - self.value().query_compatible_versions() - } - - fn query_connection( - &self, - request: QueryConnectionRequest, - include_proof: IncludeProof, - ) -> Result<(ConnectionEnd, Option), Error> { - self.value().query_connection(request, include_proof) - } - - fn query_connections( - &self, - request: QueryConnectionsRequest, - ) -> Result, Error> { - self.value().query_connections(request) - } - - fn query_connection_channels( - &self, - request: QueryConnectionChannelsRequest, - ) -> Result, Error> { - self.value().query_connection_channels(request) - } - - fn query_next_sequence_receive( - &self, - request: QueryNextSequenceReceiveRequest, - include_proof: IncludeProof, - ) -> Result<(Sequence, Option), Error> { - self.value() - .query_next_sequence_receive(request, include_proof) - } - - fn query_channels( - &self, - request: QueryChannelsRequest, - ) -> Result, Error> { - self.value().query_channels(request) - } - - fn query_channel( - &self, - request: QueryChannelRequest, - include_proof: IncludeProof, - ) -> Result<(ChannelEnd, Option), Error> { - self.value().query_channel(request, include_proof) - } - - fn query_channel_client_state( - &self, - request: QueryChannelClientStateRequest, - ) -> Result, Error> { - self.value().query_channel_client_state(request) - } - - fn build_header( - &self, - trusted_height: Height, - target_height: Height, - client_state: AnyClientState, - ) -> Result<(AnyHeader, Vec), Error> { - self.value() - .build_header(trusted_height, target_height, client_state) - } - - /// Constructs a client state at the given height - fn build_client_state( - &self, - height: Height, - settings: ClientSettings, - ) -> Result { - self.value().build_client_state(height, settings) - } - - /// Constructs a consensus state at the given height - fn build_consensus_state( - &self, - trusted: Height, - target: Height, - client_state: AnyClientState, - ) -> Result { - self.value() - .build_consensus_state(trusted, target, client_state) - } - - fn check_misbehaviour( - &self, - update: UpdateClient, - client_state: AnyClientState, - ) -> Result, Error> { - self.value().check_misbehaviour(update, client_state) - } - - fn build_connection_proofs_and_client_state( - &self, - message_type: ConnectionMsgType, - connection_id: &ConnectionId, - client_id: &ClientId, - height: Height, - ) -> Result<(Option, Proofs), Error> { - self.value().build_connection_proofs_and_client_state( - message_type, - connection_id, - client_id, - height, - ) - } - - fn build_channel_proofs( - &self, - port_id: &PortId, - channel_id: &ChannelId, - height: Height, - ) -> Result { - self.value() - .build_channel_proofs(port_id, channel_id, height) - } - - fn build_packet_proofs( - &self, - packet_type: PacketMsgType, - port_id: &PortId, - channel_id: &ChannelId, - sequence: Sequence, - height: Height, - ) -> Result { - self.value() - .build_packet_proofs(packet_type, port_id, channel_id, sequence, height) - } - - fn query_packet_commitment( - &self, - request: QueryPacketCommitmentRequest, - include_proof: IncludeProof, - ) -> Result<(Vec, Option), Error> { - self.value().query_packet_commitment(request, include_proof) - } - - fn query_packet_commitments( - &self, - request: QueryPacketCommitmentsRequest, - ) -> Result<(Vec, Height), Error> { - self.value().query_packet_commitments(request) - } - - fn query_packet_receipt( - &self, - request: QueryPacketReceiptRequest, - include_proof: IncludeProof, - ) -> Result<(Vec, Option), Error> { - self.value().query_packet_receipt(request, include_proof) - } - - fn query_unreceived_packets( - &self, - request: QueryUnreceivedPacketsRequest, - ) -> Result, Error> { - self.value().query_unreceived_packets(request) - } - - fn query_packet_acknowledgement( - &self, - request: QueryPacketAcknowledgementRequest, - include_proof: IncludeProof, - ) -> Result<(Vec, Option), Error> { - self.value() - .query_packet_acknowledgement(request, include_proof) - } - - fn query_packet_acknowledgements( - &self, - request: QueryPacketAcknowledgementsRequest, - ) -> Result<(Vec, Height), Error> { - self.value().query_packet_acknowledgements(request) - } - - fn query_unreceived_acknowledgements( - &self, - request: QueryUnreceivedAcksRequest, - ) -> Result, Error> { - self.value().query_unreceived_acknowledgements(request) - } - - fn query_txs(&self, request: QueryTxRequest) -> Result, Error> { - self.value().query_txs(request) - } - - fn query_blocks( - &self, - request: QueryBlockRequest, - ) -> Result<(Vec, Vec), Error> { - self.value().query_blocks(request) - } - - fn query_host_consensus_state( - &self, - request: QueryHostConsensusStateRequest, - ) -> Result { - self.value().query_host_consensus_state(request) - } - - fn query_balance(&self, key_name: Option) -> Result { - self.value().query_balance(key_name) - } -} diff --git a/tools/test-framework/src/relayer/channel.rs b/tools/test-framework/src/relayer/channel.rs deleted file mode 100644 index 560c6d369b..0000000000 --- a/tools/test-framework/src/relayer/channel.rs +++ /dev/null @@ -1,151 +0,0 @@ -use core::time::Duration; -use eyre::eyre; -use ibc::core::ics04_channel::channel::State as ChannelState; -use ibc::core::ics04_channel::channel::{ChannelEnd, IdentifiedChannelEnd, Order}; -use ibc::Height; -use ibc_relayer::chain::handle::ChainHandle; -use ibc_relayer::chain::requests::{IncludeProof, QueryChannelRequest}; -use ibc_relayer::channel::{extract_channel_id, Channel, ChannelSide}; - -use crate::error::Error; -use crate::types::id::{ - TaggedChannelId, TaggedChannelIdRef, TaggedClientIdRef, TaggedConnectionIdRef, TaggedPortId, - TaggedPortIdRef, -}; -use crate::types::tagged::DualTagged; -use crate::util::retry::assert_eventually_succeed; - -pub trait TaggedChannelEndExt { - fn tagged_counterparty_channel_id(&self) -> Option>; - fn tagged_counterparty_port_id(&self) -> TaggedPortId; -} - -impl TaggedChannelEndExt - for DualTagged -{ - fn tagged_counterparty_channel_id(&self) -> Option> { - self.contra_map(|c| c.counterparty().channel_id).transpose() - } - - fn tagged_counterparty_port_id(&self) -> TaggedPortId { - self.contra_map(|c| c.counterparty().port_id.clone()) - } -} - -pub fn init_channel( - handle_a: &ChainA, - handle_b: &ChainB, - client_id_a: &TaggedClientIdRef, - client_id_b: &TaggedClientIdRef, - connection_id_a: &TaggedConnectionIdRef, - connection_id_b: &TaggedConnectionIdRef, - src_port_id: &TaggedPortIdRef, - dst_port_id: &TaggedPortIdRef, -) -> Result<(TaggedChannelId, Channel), Error> { - let channel = Channel { - connection_delay: Default::default(), - ordering: Order::Unordered, - a_side: ChannelSide::new( - handle_a.clone(), - client_id_a.cloned_value(), - connection_id_a.cloned_value(), - src_port_id.cloned_value(), - None, - None, - ), - b_side: ChannelSide::new( - handle_b.clone(), - client_id_b.cloned_value(), - connection_id_b.cloned_value(), - dst_port_id.cloned_value(), - None, - None, - ), - }; - - let event = channel.build_chan_open_init_and_send()?; - - let channel_id = *extract_channel_id(&event)?; - - let channel2 = Channel::restore_from_event(handle_b.clone(), handle_a.clone(), event)?; - - Ok((DualTagged::new(channel_id), channel2)) -} - -pub fn query_channel_end( - handle: &ChainA, - channel_id: &TaggedChannelIdRef, - port_id: &TaggedPortIdRef, -) -> Result, Error> { - let (channel_end, _) = handle.query_channel( - QueryChannelRequest { - port_id: port_id.into_value().clone(), - channel_id: *channel_id.into_value(), - height: Height::zero(), - }, - IncludeProof::No, - )?; - - Ok(DualTagged::new(channel_end)) -} - -pub fn query_identified_channel_end( - handle: &ChainA, - channel_id: TaggedChannelIdRef, - port_id: TaggedPortIdRef, -) -> Result, Error> { - let (channel_end, _) = handle.query_channel( - QueryChannelRequest { - port_id: port_id.into_value().clone(), - channel_id: *channel_id.into_value(), - height: Height::zero(), - }, - IncludeProof::No, - )?; - Ok(DualTagged::new(IdentifiedChannelEnd::new( - port_id.into_value().clone(), - *channel_id.into_value(), - channel_end, - ))) -} - -pub fn assert_eventually_channel_established( - handle_a: &ChainA, - handle_b: &ChainB, - channel_id_a: &TaggedChannelIdRef, - port_id_a: &TaggedPortIdRef, -) -> Result, Error> { - assert_eventually_succeed( - "channel should eventually established", - 20, - Duration::from_secs(1), - || { - let channel_end_a = query_channel_end(handle_a, channel_id_a, port_id_a)?; - - if !channel_end_a.value().state_matches(&ChannelState::Open) { - return Err(Error::generic(eyre!( - "expected channel end A to be in open state" - ))); - } - - let channel_id_b = channel_end_a - .tagged_counterparty_channel_id() - .ok_or_else(|| { - eyre!("expected counterparty channel id to present on open channel") - })?; - - let port_id_b = channel_end_a.tagged_counterparty_port_id(); - - let channel_end_b = - query_channel_end(handle_b, &channel_id_b.as_ref(), &port_id_b.as_ref())?; - - if !channel_end_b.value().state_matches(&ChannelState::Open) { - return Err(Error::generic(eyre!( - "expected channel end B to be in open state" - ))); - } - - Ok(channel_id_b) - }, - ) -} diff --git a/tools/test-framework/src/relayer/connection.rs b/tools/test-framework/src/relayer/connection.rs deleted file mode 100644 index 757e1d8091..0000000000 --- a/tools/test-framework/src/relayer/connection.rs +++ /dev/null @@ -1,161 +0,0 @@ -/*! - Definition for extension trait methods for [`Connection`] -*/ - -use core::time::Duration; -use eyre::eyre; -use ibc::core::ics03_connection::connection::State as ConnectionState; -use ibc::core::ics03_connection::connection::{ConnectionEnd, IdentifiedConnectionEnd}; -use ibc::timestamp::ZERO_DURATION; -use ibc::Height; -use ibc_relayer::chain::handle::ChainHandle; -use ibc_relayer::chain::requests::{IncludeProof, QueryConnectionRequest}; -use ibc_relayer::connection::{extract_connection_id, Connection, ConnectionSide}; - -use crate::error::Error; -use crate::types::id::{TaggedClientIdRef, TaggedConnectionId, TaggedConnectionIdRef}; -use crate::types::tagged::DualTagged; -use crate::util::retry::assert_eventually_succeed; - -/** - An extension trait that provide helper methods to get tagged identifiers - out of a [`Connection`]. -*/ -pub trait TaggedConnectionExt { - /** - Get the connection ID from side A of the chain. - */ - fn tagged_connection_id_a(&self) -> Option>; - - /** - Get the connection ID from side B of the chain. - */ - fn tagged_connection_id_b(&self) -> Option>; -} - -pub trait TaggedConnectionEndExt { - fn tagged_counterparty_connection_id(&self) -> Option>; -} - -impl TaggedConnectionExt - for Connection -{ - fn tagged_connection_id_a(&self) -> Option> { - self.a_side.connection_id().map(DualTagged::new) - } - - fn tagged_connection_id_b(&self) -> Option> { - self.b_side.connection_id().map(DualTagged::new) - } -} - -impl TaggedConnectionEndExt - for DualTagged -{ - fn tagged_counterparty_connection_id(&self) -> Option> { - self.contra_map(|c| c.counterparty().connection_id.clone()) - .transpose() - } -} - -pub fn init_connection( - handle_a: &ChainA, - handle_b: &ChainB, - client_id_a: &TaggedClientIdRef, - client_id_b: &TaggedClientIdRef, -) -> Result< - ( - TaggedConnectionId, - Connection, - ), - Error, -> { - let connection = Connection { - delay_period: ZERO_DURATION, - a_side: ConnectionSide::new(handle_a.clone(), (*client_id_a.value()).clone(), None), - b_side: ConnectionSide::new(handle_b.clone(), (*client_id_b.value()).clone(), None), - }; - - let event = connection.build_conn_init_and_send()?; - - let connection_id = extract_connection_id(&event)?.clone(); - - let connection2 = Connection::restore_from_event(handle_b.clone(), handle_a.clone(), event)?; - - Ok((DualTagged::new(connection_id), connection2)) -} - -pub fn query_connection_end( - handle: &ChainA, - connection_id: &TaggedConnectionIdRef, -) -> Result, Error> { - let (connection_end, _) = handle.query_connection( - QueryConnectionRequest { - connection_id: connection_id.into_value().clone(), - height: Height::zero(), - }, - IncludeProof::No, - )?; - - Ok(DualTagged::new(connection_end)) -} - -pub fn query_identified_connection_end( - handle: &ChainA, - connection_id: TaggedConnectionIdRef, -) -> Result, Error> { - let (connection_end, _) = handle.query_connection( - QueryConnectionRequest { - connection_id: connection_id.into_value().clone(), - height: Height::zero(), - }, - IncludeProof::No, - )?; - Ok(DualTagged::new(IdentifiedConnectionEnd::new( - connection_id.into_value().clone(), - connection_end, - ))) -} - -pub fn assert_eventually_connection_established( - handle_a: &ChainA, - handle_b: &ChainB, - connection_id_a: &TaggedConnectionIdRef, -) -> Result, Error> { - assert_eventually_succeed( - "connection should eventually established", - 20, - Duration::from_secs(1), - || { - let connection_end_a = query_connection_end(handle_a, connection_id_a)?; - - if !connection_end_a - .value() - .state_matches(&ConnectionState::Open) - { - return Err(Error::generic(eyre!( - "expected connection end A to be in open state" - ))); - } - - let connection_id_b = connection_end_a - .tagged_counterparty_connection_id() - .ok_or_else(|| { - eyre!("expected counterparty connection id to present on open connection") - })?; - - let connection_end_b = query_connection_end(handle_b, &connection_id_b.as_ref())?; - - if !connection_end_b - .value() - .state_matches(&ConnectionState::Open) - { - return Err(Error::generic(eyre!( - "expected connection end B to be in open state" - ))); - } - - Ok(connection_id_b) - }, - ) -} diff --git a/tools/test-framework/src/relayer/driver.rs b/tools/test-framework/src/relayer/driver.rs deleted file mode 100644 index 4e16c3483d..0000000000 --- a/tools/test-framework/src/relayer/driver.rs +++ /dev/null @@ -1,89 +0,0 @@ -/*! - Driver for spawning the relayer. -*/ - -use ibc_relayer::chain::handle::CountingAndCachingChainHandle; -use ibc_relayer::config::Config; -use ibc_relayer::registry::SharedRegistry; -use ibc_relayer::supervisor::{spawn_supervisor, SupervisorHandle, SupervisorOptions}; -use std::path::PathBuf; - -use crate::error::Error; -use crate::types::env::{EnvWriter, ExportEnv}; -use crate::util::suspend::hang_on_error; - -/** - Encapsulates the parameters needed to spawn the relayer supervisor. - - In the future, other methods that correspond to the relayer CLI can - also be added here. -*/ -#[derive(Clone)] -pub struct RelayerDriver { - /** - The path to the relayer config saved on the filesystem. - - This allows users to test the relayer manually with the config file - while the test is suspended. - */ - pub config_path: PathBuf, - - /** - The relayer [`Config`]. Use this config when spawning new supervisor - using [`spawn_supervisor`](ibc_relayer::supervisor::spawn_supervisor). - */ - pub config: Config, - - /** - The relayer chain [`Registry`](ibc_relayer::registry::Registry) - that is shared with any running supervisor. - - Use this shared registry when spawning new supervisor using - [`spawn_supervisor`](ibc_relayer::supervisor::spawn_supervisor). - */ - pub registry: SharedRegistry, - - /** - Whether the driver should hang the test when the continuation - closure in [`with_supervisor`](Self::with_supervisor) fails. - */ - pub hang_on_fail: bool, -} - -impl RelayerDriver { - /** - Spawns the relayer supervisor and return the [`SupervisorHandle`]. - */ - pub fn spawn_supervisor(&self) -> Result { - spawn_supervisor( - self.config.clone(), - self.registry.clone(), - None, - SupervisorOptions { - health_check: false, - force_full_scan: false, - }, - ) - .map_err(Error::supervisor) - } - - /** - Spawns the relayer supervisor and then executes the provided continuation - with the supervisor running. - - The supervisor is stopped after the continuation returned. If - `hang_on_fail` is set to true, the call will suspend if the continuation - returns error. - */ - pub fn with_supervisor(&self, cont: impl FnOnce() -> Result) -> Result { - let _handle = self.spawn_supervisor()?; - - hang_on_error(self.hang_on_fail, cont) - } -} - -impl ExportEnv for RelayerDriver { - fn export_env(&self, writer: &mut impl EnvWriter) { - writer.write_env("RELAYER_CONFIG", &format!("{}", self.config_path.display())); - } -} diff --git a/tools/test-framework/src/relayer/foreign_client.rs b/tools/test-framework/src/relayer/foreign_client.rs deleted file mode 100644 index beb94641cc..0000000000 --- a/tools/test-framework/src/relayer/foreign_client.rs +++ /dev/null @@ -1,47 +0,0 @@ -/*! - Definition for extension trait methods for [`ForeignClient`] -*/ - -use ibc_relayer::chain::handle::ChainHandle; -use ibc_relayer::foreign_client::ForeignClient; - -use crate::types::id::{TaggedChainId, TaggedClientIdRef}; -use crate::types::tagged::*; - -/** - An extension trait for providing methods for getting tagged identifiers - out of a [`ForeignClient`]. -*/ -pub trait TaggedForeignClientExt { - /** - Get the source chain ID. - */ - fn tagged_src_chain_id(&self) -> TaggedChainId; - - /** - Get the destination chain ID. - */ - fn tagged_dst_chain_id(&self) -> TaggedChainId; - - /** - Get the client ID of the destination chain that corresponds - to the source chain. - */ - fn tagged_client_id(&self) -> TaggedClientIdRef; -} - -impl TaggedForeignClientExt - for ForeignClient -{ - fn tagged_src_chain_id(&self) -> TaggedChainId { - MonoTagged::new(self.src_chain().id()) - } - - fn tagged_dst_chain_id(&self) -> TaggedChainId { - MonoTagged::new(self.dst_chain().id()) - } - - fn tagged_client_id(&self) -> TaggedClientIdRef { - DualTagged::new(self.id()) - } -} diff --git a/tools/test-framework/src/relayer/mod.rs b/tools/test-framework/src/relayer/mod.rs deleted file mode 100644 index 44a848d41e..0000000000 --- a/tools/test-framework/src/relayer/mod.rs +++ /dev/null @@ -1,28 +0,0 @@ -/*! - Code that may belong to the [`ibc_relayer`] module, but are currently - in this crate for easier review or maintenance. - - Sometimes new constructs are implemented just for testing, and it is - unclear whether the constructs have more general use that are worth - provided in the main library, so we put them here so that test authors - do not get blocked on writing tests. - - There may also be cases where the original code in the main library - are difficult to be used in a test setting, and we may want to temporarily - make a copy of the code and modify them in this crate to make it - easier to be used for testing. We would first do a forked modification - here so that there are less worry about the behavior of the original - code being changed due to subtle modifications. The changes can be - merged back to the main library at a later time once the tests have - sufficiently proven that the modified code preserve the semantics of - the original code. -*/ - -pub mod chain; -pub mod channel; -pub mod connection; -pub mod driver; -pub mod foreign_client; -pub mod refresh; -pub mod transfer; -pub mod tx; diff --git a/tools/test-framework/src/relayer/refresh.rs b/tools/test-framework/src/relayer/refresh.rs deleted file mode 100644 index 04f373548e..0000000000 --- a/tools/test-framework/src/relayer/refresh.rs +++ /dev/null @@ -1,19 +0,0 @@ -use eyre::eyre; -use ibc_relayer::chain::handle::ChainHandle; -use ibc_relayer::util::task::TaskHandle; -use ibc_relayer::worker::client::spawn_refresh_client; - -use crate::error::Error; -use crate::types::binary::foreign_client::ForeignClientPair; - -pub fn spawn_refresh_client_tasks( - foreign_clients: &ForeignClientPair, -) -> Result<[TaskHandle; 2], Error> { - let refresh_task_a = spawn_refresh_client(foreign_clients.client_b_to_a.clone()) - .ok_or_else(|| eyre!("expect refresh task spawned"))?; - - let refresh_task_b = spawn_refresh_client(foreign_clients.client_a_to_b.clone()) - .ok_or_else(|| eyre!("expect refresh task spawned"))?; - - Ok([refresh_task_a, refresh_task_b]) -} diff --git a/tools/test-framework/src/relayer/transfer.rs b/tools/test-framework/src/relayer/transfer.rs deleted file mode 100644 index 3af20851a5..0000000000 --- a/tools/test-framework/src/relayer/transfer.rs +++ /dev/null @@ -1,92 +0,0 @@ -/*! - Functions for performing IBC transfer that works similar to - `hermes tx raw ft-transfer`. -*/ - -use core::ops::Add; -use core::time::Duration; - -use ibc::applications::transfer::error::Error as Ics20Error; -use ibc::timestamp::Timestamp; -use ibc::Height; -use ibc_proto::google::protobuf::Any; -use ibc_relayer::chain::cosmos::types::config::TxConfig; -use ibc_relayer::transfer::build_transfer_message as raw_build_transfer_message; -use ibc_relayer::transfer::TransferError; - -use crate::error::{handle_generic_error, Error}; -use crate::ibc::denom::Denom; -use crate::relayer::tx::simple_send_tx; -use crate::types::id::{TaggedChannelIdRef, TaggedPortIdRef}; -use crate::types::tagged::*; -use crate::types::wallet::{Wallet, WalletAddress}; - -pub fn build_transfer_message( - port_id: &TaggedPortIdRef<'_, SrcChain, DstChain>, - channel_id: &TaggedChannelIdRef<'_, SrcChain, DstChain>, - sender: &MonoTagged, - recipient: &MonoTagged, - denom: &MonoTagged, - amount: u64, -) -> Result { - let timeout_timestamp = Timestamp::now() - .add(Duration::from_secs(60)) - .map_err(handle_generic_error)?; - - let sender = sender - .value() - .address - .0 - .parse() - .map_err(|e| TransferError::token_transfer(Ics20Error::signer(e)))?; - - let receiver = recipient - .value() - .0 - .parse() - .map_err(|e| TransferError::token_transfer(Ics20Error::signer(e)))?; - - Ok(raw_build_transfer_message( - (*port_id.value()).clone(), - **channel_id.value(), - amount.into(), - denom.to_string(), - sender, - receiver, - Height::zero(), - timeout_timestamp, - )) -} - -/** - Perform a simplified version of IBC token transfer for testing purpose. - - It makes use of the local time to construct a 60 seconds IBC timeout - for testing. During test, all chains should have the same local clock. - We are also not really interested in setting a timeout for most tests, - so we just put an approximate 1 minute timeout as the timeout - field is compulsary, and we want to avoid IBC timeout on CI. - - The other reason we do not allow precise timeout to be specified is - because it requires accessing the counterparty chain to query for - the parameters. This will complicate the API which is unnecessary - in most cases. - - If tests require explicit timeout, they should explicitly construct the - transfer message and pass it to send_tx. -*/ -pub async fn ibc_token_transfer( - tx_config: &MonoTagged, - port_id: &TaggedPortIdRef<'_, SrcChain, DstChain>, - channel_id: &TaggedChannelIdRef<'_, SrcChain, DstChain>, - sender: &MonoTagged, - recipient: &MonoTagged, - denom: &MonoTagged, - amount: u64, -) -> Result<(), Error> { - let message = build_transfer_message(port_id, channel_id, sender, recipient, denom, amount)?; - - simple_send_tx(tx_config.value(), &sender.value().key, vec![message]).await?; - - Ok(()) -} diff --git a/tools/test-framework/src/relayer/tx.rs b/tools/test-framework/src/relayer/tx.rs deleted file mode 100644 index 18be12730d..0000000000 --- a/tools/test-framework/src/relayer/tx.rs +++ /dev/null @@ -1,129 +0,0 @@ -use core::str::FromStr; -use core::time::Duration; -use eyre::eyre; -use http::uri::Uri; -use ibc::core::ics24_host::identifier::ChainId; -use ibc::events::IbcEvent; -use ibc_proto::cosmos::tx::v1beta1::Fee; -use ibc_proto::google::protobuf::Any; -use ibc_relayer::chain::cosmos::gas::calculate_fee; -use ibc_relayer::chain::cosmos::query::account::query_account; -use ibc_relayer::chain::cosmos::tx::estimate_fee_and_send_tx; -use ibc_relayer::chain::cosmos::types::config::TxConfig; -use ibc_relayer::chain::cosmos::types::gas::GasConfig; -use ibc_relayer::chain::cosmos::types::tx::TxSyncResult; -use ibc_relayer::chain::cosmos::wait::wait_for_block_commits; -use ibc_relayer::config::GasPrice; -use ibc_relayer::keyring::KeyEntry; -use tendermint_rpc::{HttpClient, Url}; - -use crate::error::{handle_generic_error, Error}; - -pub fn gas_config_for_test() -> GasConfig { - let max_gas = 3000000; - let gas_adjustment = 0.1; - let gas_price = GasPrice::new(0.001, "stake".to_string()); - - let default_gas = max_gas; - let fee_granter = "".to_string(); - - let max_fee = Fee { - amount: vec![calculate_fee(max_gas, &gas_price)], - gas_limit: max_gas, - payer: "".to_string(), - granter: fee_granter.clone(), - }; - - GasConfig { - default_gas, - max_gas, - gas_adjustment, - gas_price, - max_fee, - fee_granter, - } -} - -pub fn new_tx_config_for_test( - chain_id: ChainId, - raw_rpc_address: String, - raw_grpc_address: String, -) -> Result { - let rpc_address = Url::from_str(&raw_rpc_address).map_err(handle_generic_error)?; - - let rpc_client = HttpClient::new(rpc_address.clone()).map_err(handle_generic_error)?; - - let grpc_address = Uri::from_str(&raw_grpc_address).map_err(handle_generic_error)?; - - let gas_config = gas_config_for_test(); - - let rpc_timeout = Duration::from_secs(30); - - let address_type = Default::default(); - - Ok(TxConfig { - chain_id, - gas_config, - rpc_client, - rpc_address, - grpc_address, - rpc_timeout, - address_type, - }) -} - -/** - A simplified version of send_tx that does not depend on `ChainHandle`. - - This allows different wallet ([`KeyEntry`]) to be used for submitting - transactions. The simple behavior as follows: - - - Query the account information on the fly. This may introduce more - overhead in production, but does not matter in testing. - - Do not split the provided messages into smaller batches. - - Wait for TX sync result, and error if any result contains - error event. -*/ -pub async fn simple_send_tx( - config: &TxConfig, - key_entry: &KeyEntry, - messages: Vec, -) -> Result<(), Error> { - let account = query_account(&config.grpc_address, &key_entry.account) - .await? - .into(); - - let message_count = messages.len(); - - let response = - estimate_fee_and_send_tx(config, key_entry, &account, &Default::default(), messages) - .await?; - - let events_per_tx = vec![IbcEvent::default(); message_count]; - - let tx_sync_result = TxSyncResult { - response, - events: events_per_tx, - }; - - let mut tx_sync_results = vec![tx_sync_result]; - - wait_for_block_commits( - &config.chain_id, - &config.rpc_client, - &config.rpc_address, - &config.rpc_timeout, - &mut tx_sync_results, - ) - .await?; - - for result in tx_sync_results.iter() { - for event in result.events.iter() { - if let IbcEvent::ChainError(e) = event { - return Err(Error::generic(eyre!("send_tx result in error: {}", e))); - } - } - } - - Ok(()) -} diff --git a/tools/test-framework/src/types/binary/chains.rs b/tools/test-framework/src/types/binary/chains.rs deleted file mode 100644 index 1f00577603..0000000000 --- a/tools/test-framework/src/types/binary/chains.rs +++ /dev/null @@ -1,175 +0,0 @@ -/*! - Type definition for two connected chains. -*/ - -use ibc_relayer::chain::handle::ChainHandle; -use tracing::info; - -use super::foreign_client::ForeignClientPair; -use crate::types::env::{prefix_writer, EnvWriter, ExportEnv}; -use crate::types::id::{TaggedChainIdRef, TaggedClientIdRef}; -use crate::types::single::node::{FullNode, TaggedFullNodeExt}; -use crate::types::tagged::*; - -/** - Two connected chains including the full node, chain handles, and - the corresponding foreign clients. -*/ -#[derive(Clone)] -pub struct ConnectedChains { - /** - The [`ChainHandle`] for chain A. - - The handle is wrapped in [`DropChainHandle`] to stop the chain - handle when this is dropped. - */ - pub handle_a: ChainA, - - /** - The [`ChainHandle`] for chain B. - - The handle is wrapped in [`DropChainHandle`] to stop the chain - handle when this is dropped. - */ - pub handle_b: ChainB, - - /** - The tagged [`FullNode`] for chain A. - */ - pub node_a: MonoTagged, - - /** - The tagged [`FullNode`] for chain B. - */ - pub node_b: MonoTagged, - - pub foreign_clients: ForeignClientPair, -} - -impl ConnectedChains { - /** - Create a new [`ConnectedChains`] - */ - pub fn new( - handle_a: ChainA, - handle_b: ChainB, - node_a: MonoTagged, - node_b: MonoTagged, - foreign_clients: ForeignClientPair, - ) -> Self { - Self { - handle_a, - handle_b, - node_a, - node_b, - foreign_clients, - } - } - - /** - Get a reference to the chain handle for chain A. - */ - pub fn handle_a(&self) -> &ChainA { - &self.handle_a - } - - /** - Get a reference to the chain handle for chain B. - */ - pub fn handle_b(&self) -> &ChainB { - &self.handle_b - } - - /** - The chain ID of chain A. - */ - pub fn chain_id_a(&self) -> TaggedChainIdRef { - self.node_a.chain_id() - } - - pub fn client_id_a(&self) -> TaggedClientIdRef { - self.foreign_clients.client_id_a() - } - - pub fn client_id_b(&self) -> TaggedClientIdRef { - self.foreign_clients.client_id_b() - } - - /** - The chain ID of chain B. - */ - pub fn chain_id_b(&self) -> TaggedChainIdRef { - self.node_b.chain_id() - } - - /** - Switch the position between chain A and chain B. - - The original chain B become the new chain A, and the original chain A - become the new chain B. - */ - pub fn flip(self) -> ConnectedChains { - ConnectedChains { - handle_a: self.handle_b, - handle_b: self.handle_a, - node_a: self.node_b, - node_b: self.node_a, - foreign_clients: self.foreign_clients.flip(), - } - } - - pub fn map_chain( - self, - map_a: &impl Fn(ChainA) -> ChainC, - map_b: &impl Fn(ChainB) -> ChainD, - ) -> ConnectedChains { - ConnectedChains { - handle_a: map_a(self.handle_a), - handle_b: map_b(self.handle_b), - node_a: self.node_a.retag(), - node_b: self.node_b.retag(), - foreign_clients: self.foreign_clients.map_chain(map_a, map_b), - } - } -} - -impl ExportEnv for ConnectedChains { - fn export_env(&self, writer: &mut impl EnvWriter) { - writer.write_env("CHAIN_ID_A", &format!("{}", self.node_a.chain_id())); - writer.write_env("CHAIN_ID_B", &format!("{}", self.node_b.chain_id())); - - writer.write_env( - "CLIENT_ID_B", - &format!("{}", self.foreign_clients.client_a_to_b.id()), - ); - writer.write_env( - "CLIENT_ID_A", - &format!("{}", self.foreign_clients.client_b_to_a.id()), - ); - - self.node_a.export_env(&mut prefix_writer("NODE_A", writer)); - self.node_b.export_env(&mut prefix_writer("NODE_B", writer)); - } -} - -/** - Newtype wrapper for [`ChainHandle`] to stop the chain handle when - this value is dropped. - - Note that we cannot stop the chain on drop for - [`CountingAndCachingChainHandle`](ibc_relayer::chain::handle::CountingAndCachingChainHandle) - itself, as the chain handles can be cloned. But for testing purposes, - we alway stop the chain handle when this "canonical" chain handle - is dropped. - - This is necessary as otherwise the chain handle will display error - logs when the full node is terminated at the end of tests. -*/ -pub struct DropChainHandle(pub Chain); - -impl Drop for DropChainHandle { - fn drop(&mut self) { - info!("stopping chain handle {}", self.0.id()); - let _ = self.0.shutdown(); - } -} diff --git a/tools/test-framework/src/types/binary/channel.rs b/tools/test-framework/src/types/binary/channel.rs deleted file mode 100644 index 99f5bb2e84..0000000000 --- a/tools/test-framework/src/types/binary/channel.rs +++ /dev/null @@ -1,98 +0,0 @@ -/*! - Type definitions for channel connected between two chains. -*/ - -use ibc_relayer::chain::handle::ChainHandle; -use ibc_relayer::channel::Channel; - -use super::connection::ConnectedConnection; -use crate::types::env::{EnvWriter, ExportEnv}; -use crate::types::id::{TaggedChannelId, TaggedPortId}; - -/** - A channel that is connected between two chains with the full handshake - completed. - - This is a wrapper around [`Channel`] with infallible retrieval - of the channel IDs, as the channel handshake has been completed. -*/ -#[derive(Debug, Clone)] -pub struct ConnectedChannel { - /** - The underlying [`ConnectedConnection`] that the channel operates on. - */ - pub connection: ConnectedConnection, - - /** - The underlying relayer [`Channel`]. - */ - pub channel: Channel, - - /** - The channel ID on chain A, corresponding to the channel connected - to chain B. - */ - pub channel_id_a: TaggedChannelId, - - /** - The channel ID on chain B, corresponding to the channel connected - to chain A. - */ - pub channel_id_b: TaggedChannelId, - - /** - The port ID on chain A, corresponding to the channel connected - to chain B. - */ - pub port_a: TaggedPortId, - - /** - The port ID on chain B, corresponding to the channel connected - to chain A. - */ - pub port_b: TaggedPortId, -} - -impl ConnectedChannel { - /** - Flip the position between chain A and chain B. - - The original chain A become the new chain B, and the original chain B - become the new chain A. - */ - pub fn flip(self) -> ConnectedChannel { - ConnectedChannel { - connection: self.connection.flip(), - channel: self.channel.flipped(), - channel_id_a: self.channel_id_b, - channel_id_b: self.channel_id_a, - port_a: self.port_b, - port_b: self.port_a, - } - } - - pub fn map_chain( - self, - map_a: impl Fn(ChainA) -> ChainC, - map_b: impl Fn(ChainB) -> ChainD, - ) -> ConnectedChannel { - ConnectedChannel { - connection: self.connection.map_chain(&map_a, &map_b), - channel: self.channel.map_chain(&map_a, &map_b), - channel_id_a: self.channel_id_a.retag(), - channel_id_b: self.channel_id_b.retag(), - port_a: self.port_a.retag(), - port_b: self.port_b.retag(), - } - } -} - -impl ExportEnv for ConnectedChannel { - fn export_env(&self, writer: &mut impl EnvWriter) { - self.connection.export_env(writer); - writer.write_env("CHANNEL_ID_A", &format!("{}", self.channel_id_a)); - writer.write_env("PORT_A", &format!("{}", self.port_a)); - writer.write_env("CHANNEL_ID_B", &format!("{}", self.channel_id_b)); - writer.write_env("PORT_B", &format!("{}", self.port_b)); - } -} diff --git a/tools/test-framework/src/types/binary/client.rs b/tools/test-framework/src/types/binary/client.rs deleted file mode 100644 index 87d5032436..0000000000 --- a/tools/test-framework/src/types/binary/client.rs +++ /dev/null @@ -1,51 +0,0 @@ -/*! - Type definitions for IBC clients connected between two chains. -*/ - -use crate::types::env::{EnvWriter, ExportEnv}; -use crate::types::id::TaggedClientId; - -/** - Data type to store the client IDs of two chains that are connected. -*/ -#[derive(Debug, Clone)] -pub struct ClientIdPair { - /** - The client ID on chain A. - */ - pub client_id_a: TaggedClientId, - - /** - The client ID on chain B. - */ - pub client_id_b: TaggedClientId, -} - -impl ClientIdPair { - pub fn new( - client_id_a: TaggedClientId, - client_id_b: TaggedClientId, - ) -> Self { - Self { - client_id_a, - client_id_b, - } - } - - /** - Flip the position of chain A and B of the client. - */ - pub fn flip(self) -> ClientIdPair { - ClientIdPair { - client_id_a: self.client_id_b, - client_id_b: self.client_id_a, - } - } -} - -impl ExportEnv for ClientIdPair { - fn export_env(&self, writer: &mut impl EnvWriter) { - writer.write_env("CLIENT_ID_A", &format!("{}", self.client_id_a)); - writer.write_env("CLIENT_ID_B", &format!("{}", self.client_id_b)); - } -} diff --git a/tools/test-framework/src/types/binary/connection.rs b/tools/test-framework/src/types/binary/connection.rs deleted file mode 100644 index 34455c3e83..0000000000 --- a/tools/test-framework/src/types/binary/connection.rs +++ /dev/null @@ -1,95 +0,0 @@ -/*! - Type definitions for connection that is connected between two chains. -*/ - -use ibc_relayer::chain::handle::ChainHandle; -use ibc_relayer::connection::Connection; - -use super::client::ClientIdPair; -use crate::types::env::{EnvWriter, ExportEnv}; -use crate::types::id::TaggedConnectionId; - -/** - A connection that is connected between two chains with the full - handshake completed. - - This is a wrapper around [`Connection`] with infallible retrieval - of the connection IDs, as the connection handshake has been completed. -*/ -#[derive(Debug, Clone)] -pub struct ConnectedConnection { - /** - The underlying connected clients - */ - pub client_ids: ClientIdPair, - - /** - The underlying [`Connection`] data - */ - pub connection: Connection, - - /** - The connection ID on chain A. - */ - pub connection_id_a: TaggedConnectionId, - - /** - The connection ID on chain B. - */ - pub connection_id_b: TaggedConnectionId, -} - -impl ConnectedConnection { - pub fn new( - client_ids: ClientIdPair, - connection: Connection, - connection_id_a: TaggedConnectionId, - connection_id_b: TaggedConnectionId, - ) -> Self { - Self { - client_ids, - connection, - connection_id_a, - connection_id_b, - } - } - - /** - Flip the position of chain A and B of the connection. - */ - pub fn flip(self) -> ConnectedConnection { - ConnectedConnection { - client_ids: self.client_ids.flip(), - - connection: self.connection.flipped(), - - connection_id_a: self.connection_id_b, - - connection_id_b: self.connection_id_a, - } - } - - pub fn map_chain( - self, - map_a: impl Fn(ChainA) -> ChainC, - map_b: impl Fn(ChainB) -> ChainD, - ) -> ConnectedConnection { - ConnectedConnection::new( - ClientIdPair::new( - self.client_ids.client_id_a.retag(), - self.client_ids.client_id_b.retag(), - ), - self.connection.map_chain(map_a, map_b), - self.connection_id_a.retag(), - self.connection_id_b.retag(), - ) - } -} - -impl ExportEnv for ConnectedConnection { - fn export_env(&self, writer: &mut impl EnvWriter) { - self.client_ids.export_env(writer); - writer.write_env("CONNECTION_ID_A", &format!("{}", self.connection_id_a)); - writer.write_env("CONNECTION_ID_B", &format!("{}", self.connection_id_b)); - } -} diff --git a/tools/test-framework/src/types/binary/foreign_client.rs b/tools/test-framework/src/types/binary/foreign_client.rs deleted file mode 100644 index e399fd1bfd..0000000000 --- a/tools/test-framework/src/types/binary/foreign_client.rs +++ /dev/null @@ -1,62 +0,0 @@ -use crate::relayer::foreign_client::TaggedForeignClientExt; -use crate::types::id::TaggedClientIdRef; -use ibc_relayer::chain::handle::ChainHandle; -use ibc_relayer::foreign_client::ForeignClient; - -#[derive(Clone)] -pub struct ForeignClientPair { - pub client_a_to_b: ForeignClient, - pub client_b_to_a: ForeignClient, -} - -impl ForeignClientPair { - pub fn new( - client_a_to_b: ForeignClient, - client_b_to_a: ForeignClient, - ) -> Self { - Self { - client_a_to_b, - client_b_to_a, - } - } - - pub fn client_id_a(&self) -> TaggedClientIdRef { - self.client_b_to_a.tagged_client_id() - } - - pub fn client_id_b(&self) -> TaggedClientIdRef { - self.client_a_to_b.tagged_client_id() - } - - pub fn handle_a(&self) -> ChainA { - self.client_b_to_a.dst_chain() - } - - pub fn handle_b(&self) -> ChainB { - self.client_a_to_b.dst_chain() - } - - /** - Switch the position between chain A and chain B. - - The original chain B become the new chain A, and the original chain A - become the new chain B. - */ - pub fn flip(self) -> ForeignClientPair { - ForeignClientPair { - client_a_to_b: self.client_b_to_a, - client_b_to_a: self.client_a_to_b, - } - } - - pub fn map_chain( - self, - map_a: &impl Fn(ChainA) -> ChainC, - map_b: &impl Fn(ChainB) -> ChainD, - ) -> ForeignClientPair { - ForeignClientPair { - client_a_to_b: self.client_a_to_b.map_chain(map_b, map_a), - client_b_to_a: self.client_b_to_a.map_chain(map_a, map_b), - } - } -} diff --git a/tools/test-framework/src/types/binary/mod.rs b/tools/test-framework/src/types/binary/mod.rs deleted file mode 100644 index 24fc47b30f..0000000000 --- a/tools/test-framework/src/types/binary/mod.rs +++ /dev/null @@ -1,9 +0,0 @@ -/*! - Definitions for data structures involving two chains. -*/ - -pub mod chains; -pub mod channel; -pub mod client; -pub mod connection; -pub mod foreign_client; diff --git a/tools/test-framework/src/types/config.rs b/tools/test-framework/src/types/config.rs deleted file mode 100644 index bc971f999c..0000000000 --- a/tools/test-framework/src/types/config.rs +++ /dev/null @@ -1,61 +0,0 @@ -/*! - Definition for the test configuration. -*/ - -use core::fmt::Debug; -use std::path::PathBuf; - -/** - The test config to be passed to each test case. Currently this is loaded - from the [`init_test`](crate::bootstrap::init::init_test) function - based on the test environment variables. -*/ -#[derive(Debug)] -pub struct TestConfig { - /** - The command that the [`ChainDriver`](crate::chain::driver::ChainDriver) - should use to execute chain commands. Defaults to `gaiad`. This can be - overridden with the `$CHAIN_COMMAND_PATH` environment variable. - - TODO: We might want to add a new field - `extra_chain_command_paths: Vec` - for additional chain command paths that the `ChainDriver` can use for different - implementations of chains to be spawned. - - For example one can list `"gaiad4"` as the main chain command and then - `["gaiad5"]` in `extra_chain_command_paths`, so that binary chain tests - will use `gaiad5` for the second chain being spawned. - */ - pub chain_command_path: String, - - pub account_prefix: String, - - /** - The directory path for storing the chain and relayer files. - Defaults to `"data"`. This can be overridden with the `$CHAIN_STORE_DIR` - environment variable. - - Note that this will resolve to `"relayer-test/data"` relative to the - root project directory, as `cargo test` will automatically csuspende the - working directory to the sub crate's directory. - */ - pub chain_store_dir: PathBuf, - - /** - Whether to suspend a test case when it fails whenever possible. - Defaults to `false`. This can be overrideen by setting `HANG_ON_FAIL=1`. - - Note that even when this is enabled, not all test case will necessary - suspend on failure. The suspend-on-failure hook is handled by individual - test runners such as - [`RunBinaryChainTest`](crate::framework::binary::chain::RunBinaryChainTest), - which will suspend the test case only if the test has been setup - successfully and only for the case when the runner holds the required - reference for the underlying resources. Because otherwise there is - no point suspending the test if the underlying chains or relayers are - no longer running. - */ - pub hang_on_fail: bool, - - pub bootstrap_with_random_ids: bool, -} diff --git a/tools/test-framework/src/types/env.rs b/tools/test-framework/src/types/env.rs deleted file mode 100644 index 1863f77d5d..0000000000 --- a/tools/test-framework/src/types/env.rs +++ /dev/null @@ -1,118 +0,0 @@ -/*! - Types for exporting test setup information into environment variables. -*/ - -use core::convert::AsRef; -use itertools::Itertools; -use std::collections::BTreeMap; -use std::fs::write; -use std::path::Path; - -use crate::error::Error; -use crate::types::tagged::*; - -/** - This trait is implemented by data types that can export the contained - information as environment variables. - - Using this, test framework can export them as `.env` files, which users - can then manually `source` them in the terminal to interact with the - test chains and relayer when the tests are suspended. -*/ -pub trait ExportEnv { - /** - Export the environment variables using the given [`EnvWriter`]. - */ - fn export_env(&self, writer: &mut impl EnvWriter); -} - -/** - The exported environment variables are stored in a data type that - implements this trait. -*/ -pub trait EnvWriter { - /** - Write an environment variable with the given key and value. - - Note that overlapping keys will be overridden with the new value. - */ - fn write_env(&mut self, key: &str, value: &str); -} - -/** - Create an [`EnvWriter`] that adds a prefix to the keys of the exported envs. -*/ -pub fn prefix_writer<'a, Writer: EnvWriter>( - prefix: &str, - writer: &'a mut Writer, -) -> impl EnvWriter + 'a { - PrefixEnvWriter { - prefix: prefix.to_string(), - writer, - } -} - -/** - A wrapper that implements [`EnvWriter`] by adding a prefix to the key - before writing to the underlying [`EnvWriter`]. -*/ -pub struct PrefixEnvWriter<'a, Writer> { - prefix: String, - writer: &'a mut Writer, -} - -impl EnvWriter for BTreeMap { - fn write_env(&mut self, key: &str, value: &str) { - self.insert(key.to_string(), value.to_string()); - } -} - -impl<'a, Writer: EnvWriter> EnvWriter for PrefixEnvWriter<'a, Writer> { - fn write_env(&mut self, key: &str, value: &str) { - self.writer - .write_env(&format!("{}_{}", self.prefix, key), value); - } -} - -impl ExportEnv for MonoTagged { - fn export_env(&self, writer: &mut impl EnvWriter) { - self.value().export_env(writer); - } -} - -impl ExportEnv for DualTagged { - fn export_env(&self, writer: &mut impl EnvWriter) { - self.value().export_env(writer); - } -} - -impl<'a, T1: ExportEnv, T2: ExportEnv> ExportEnv for (&'a T1, &'a T2) { - fn export_env(&self, writer: &mut impl EnvWriter) { - self.0.export_env(writer); - self.1.export_env(writer); - } -} - -/** - Retrieve the environment variables exported by a type implementing - `ExportEnv`, and export them as a string containing the variables - in the form of `KEY=VALUE` on each line. -*/ -pub fn format_env(exporter: &impl ExportEnv) -> String { - let mut envs = BTreeMap::new(); - exporter.export_env(&mut envs); - - envs.iter() - .map(|(key, value)| format!("{}={}", key, value)) - .join("\n") -} - -/** - Retrieve the environment variables exported by a type implementing - `ExportEnv`, and save them as a `.env` file to the given file path. -*/ -pub fn write_env(path: impl AsRef, exporter: &impl ExportEnv) -> Result<(), Error> { - write(path, format_env(exporter))?; - - Ok(()) -} diff --git a/tools/test-framework/src/types/id.rs b/tools/test-framework/src/types/id.rs deleted file mode 100644 index 07481784aa..0000000000 --- a/tools/test-framework/src/types/id.rs +++ /dev/null @@ -1,76 +0,0 @@ -/*! - This module contains the [tagged version](crate::types::tagged) of the - identifier types defined in [`ibc::core::ics24_host::identifier`]. -*/ - -use crate::types::tagged::*; -use ibc::core::ics24_host::identifier::*; - -/** - A [`ChainId`] tagged with the chain it belongs to. -*/ -pub type TaggedChainId = MonoTagged; - -/** - A reference to [`ChainId`] tagged with the chain it - belongs to. -*/ -pub type TaggedChainIdRef<'a, Chain> = MonoTagged; - -/** - A [`ClientId`] tagged with first, the chain it belongs to, and second, - the counterparty chain that the client ID corresponds to. -*/ -pub type TaggedClientId = DualTagged; - -/** - A reference to [`ClientId`] tagged with first, the chain it belongs to, - and second, the counterparty chain that the client ID corresponds to. -*/ -pub type TaggedClientIdRef<'a, ChainA, ChainB> = DualTagged; - -/** - A [`PortId`] tagged with first, the host chain that has - the port ID, and second, the counterparty chain that the port ID - corresponds to. -*/ -pub type TaggedPortId = DualTagged; - -/** - A reference to [`PortId`](PortId) tagged with first, the host chain - that has the port ID, and second, the counterparty chain that the port ID - corresponds to. -*/ -pub type TaggedPortIdRef<'a, ChainA, ChainB> = DualTagged; - -/** - A [`ChannelId`] tagged with first, the host chain that - has the channel ID, and second, the counterparty chain that the channel - ID corresponds to. -*/ -pub type TaggedChannelId = DualTagged; - -/** - A reference to [`ChannelId`] tagged with first, the host - chain that has the channel ID, and second, the counterparty chain that the - channel ID corresponds to. -*/ -pub type TaggedChannelIdRef<'a, ChainA, ChainB> = DualTagged; - -/** - A [`ConnectionId`] tagged with first, the host chain - that has the connection ID, and second, the counterparty chain that the - connection ID corresponds to. -*/ -pub type TaggedConnectionId = DualTagged; - -/** - A reference to [`ConnectionId`] tagged with first, - the host chain that has the connection ID, and second, the counterparty - chain that the connection ID corresponds to. -*/ -pub type TaggedConnectionIdRef<'a, ChainA, ChainB> = DualTagged; - -pub fn tagged_transfer_port() -> TaggedPortId { - DualTagged::new(PortId::transfer()) -} diff --git a/tools/test-framework/src/types/mod.rs b/tools/test-framework/src/types/mod.rs deleted file mode 100644 index a1c1db153d..0000000000 --- a/tools/test-framework/src/types/mod.rs +++ /dev/null @@ -1,19 +0,0 @@ -/*! - This module contains definitions of core data structures that are used - in the test suite. - - A data type belongs to this module if it only comes with the struct - definition with few methods associated to that struct. If a data - type has many complicated methods or trait implementations, it - probably does not belong to here. -*/ - -pub mod binary; -pub mod config; -pub mod env; -pub mod id; -pub mod nary; -pub mod process; -pub mod single; -pub mod tagged; -pub mod wallet; diff --git a/tools/test-framework/src/types/nary/aliases.rs b/tools/test-framework/src/types/nary/aliases.rs deleted file mode 100644 index 514ae8c95b..0000000000 --- a/tools/test-framework/src/types/nary/aliases.rs +++ /dev/null @@ -1,34 +0,0 @@ -use crate::types::tagged::*; - -/** - Lifts a const generic `usize` into a type. - - This allows us to use `usize` as a tag, for example, - `MonoTagged, String>` is a `String` that is - tagged by the const generic `1`. -*/ -pub enum Size {} - -/** - Tag a `Handle: ChainHandle` type with a const generic `TAG: usize`. - - In an N-ary chain implementation, we have to use the same - [`Handle: ChainHandle`](ibc_relayer::chain::handle::ChainHandle) - type for all elements in the N-ary data structures. However since the - [`ChainHandle`](ibc_relayer::chain::handle::ChainHandle) type is - also being used to tag other values, we want to be able to differentiate - between tagged values coming from chains at different positions - in the N-ary setup. - - The solution is to tag each `Handle` with the const generic - positions. With that a position-tagged type like - `MonoTagged, Handle>` would have a different type - from the type tagged at a different position like - `MonoTagged, Handle>`. - - To reduce the boilerplate, we define the type alias - `TaggedHandle` so that less typing is needed to refer - to `ChainHandle`s that are tagged by position. - -*/ -pub type NthChainHandle = MonoTagged, Handle>; diff --git a/tools/test-framework/src/types/nary/chains.rs b/tools/test-framework/src/types/nary/chains.rs deleted file mode 100644 index c16dc5c20b..0000000000 --- a/tools/test-framework/src/types/nary/chains.rs +++ /dev/null @@ -1,255 +0,0 @@ -/*! - Constructs for N-ary connected chains. -*/ - -use core::convert::{From, TryFrom}; -use eyre::eyre; -use ibc_relayer::chain::handle::ChainHandle; -use ibc_relayer::foreign_client::ForeignClient; - -use crate::error::Error; -use crate::types::binary::chains::ConnectedChains as BinaryConnectedChains; -use crate::types::env::{prefix_writer, EnvWriter, ExportEnv}; -use crate::types::nary::aliases::*; -use crate::types::nary::foreign_client::*; -use crate::types::single::node::FullNode; -use crate::types::tagged::*; -use crate::util::array::try_into_array; - -/** - A fixed-size N-ary connected chains as specified by `SIZE`. - - Contains `SIZE` number of [`ChainHandle`]s, `SIZE` number of - [`FullNode`]s, and `SIZE`x`SIZE` numbers of [`ForeignClient`] pairs. - - A `ConnectedChains` can be constructed by first constructing - a [`DynamicConnectedChains`], and then calling - [`try_into()`](core::convert::TryInto::try_into). -*/ -#[derive(Clone)] -pub struct NaryConnectedChains { - chain_handles: [Handle; SIZE], - full_nodes: [FullNode; SIZE], - foreign_clients: ForeignClientPairs, -} - -/** - A dynamic-sized N-ary connected chains, based on the - length of the underlying [`Vec`]. Each list must have the - same length. - - The main use of [`DynamicConnectedChains`] is to convert it into - a [`NaryConnectedChains`]. -*/ -#[derive(Clone)] -pub struct DynamicConnectedChains { - chain_handles: Vec, - full_nodes: Vec, - pub foreign_clients: Vec>>, -} - -/** - A pair of binary [`ConnectedChains`](BinaryConnectedChains) that are - tagged by a `Handle: CHainHandle` and the const generics - `CHAIN_A: usize` and `CHAIN_B: usize`. - - Recall that binary [`ConnectedChains`](BinaryConnectedChains) is tagged - by two generic types `ChainA: ChainHandle` and `ChainB: ChainHandle`. - For the case of N-ary chains, all elements must have the same type - `Handle: ChainHandle`. But we want to still able to differentiate - them when used as type parameter to `ConnectedChains`. - - The solution is to tag each `Handle` with the const generic - positions. So the first chain is `MonoTagged, Handle>`, - which has a different type from the second chain - `MonoTagged, Handle>`. - - Since writing the fully qualified chain types are rather cumbersome, - we use the type alias `TaggedConnectedChains` to refer to - connected chains that are parameterized by const generics rather - than the usual abstract type tags. -*/ -pub type NthConnectedChains = - BinaryConnectedChains, NthChainHandle>; - -/** - A [`FullNode`] that is tagged by a `Handle: ChainHandle` and - the const generics `TAG: usize`. -*/ -pub type NthFullNode = MonoTagged, FullNode>; - -impl NaryConnectedChains { - /** - Get a connected chain pair at position `CHAIN_A` and `CHAIN_B`, which - must be less than `SIZE`. - - Returns a binary [`ConnectedChains`](BinaryConnectedChains) with the - first chain tagged by `CHAIN_A`, and second chain tagged by `CHAIN_B`. - */ - pub fn connected_chains_at( - &self, - ) -> Result, Error> { - if CHAIN_A >= SIZE || CHAIN_B >= SIZE { - Err(Error::generic(eyre!( - "cannot get chains beyond position {}/{}", - CHAIN_A, - CHAIN_B - ))) - } else { - let node_a = self.full_nodes[CHAIN_A].clone(); - let node_b = self.full_nodes[CHAIN_B].clone(); - - let handle_a = self.chain_handles[CHAIN_A].clone(); - let handle_b = self.chain_handles[CHAIN_B].clone(); - - let foreign_clients = self.foreign_client_pair_at::()?; - - Ok(BinaryConnectedChains::new( - MonoTagged::new(handle_a), - MonoTagged::new(handle_b), - MonoTagged::new(node_a), - MonoTagged::new(node_b), - foreign_clients, - )) - } - } - - /** - Get the [`FullNode`] at position `POS`, which must be less than `SIZE`. - - Returns a [`FullNode`] tagged with `POS`. - */ - pub fn full_node_at(&self) -> Result, Error> { - if POS >= SIZE { - Err(Error::generic(eyre!( - "cannot get full_node beyond position {}", - POS - ))) - } else { - let full_node: FullNode = self.full_nodes[POS].clone(); - Ok(MonoTagged::new(full_node)) - } - } - - /** - Get the [`ChainHandle`] at position `POS`, which must be less than `SIZE`. - - Returns a [`ChainHandle`] tagged by `POS`. - */ - pub fn chain_handle_at(&self) -> Result, Error> { - if POS >= SIZE { - Err(Error::generic(eyre!( - "cannot get full_node beyond position {}", - POS - ))) - } else { - let handle = self.chain_handles[POS].clone(); - Ok(MonoTagged::new(handle)) - } - } - - /** - Get the [`ForeignClient`] with the source chain at position - `SRC: usize` and destination chain at position `DEST: usize`, - which must be less than `SIZE`. - */ - pub fn foreign_client_at( - &self, - ) -> Result, Error> { - self.foreign_clients.foreign_client_at::() - } - - pub fn foreign_client_pair_at( - &self, - ) -> Result, Error> { - self.foreign_clients - .foreign_client_pair_at::() - } - - pub fn chain_handles(&self) -> &[Handle; SIZE] { - &self.chain_handles - } - - pub fn full_nodes(&self) -> &[FullNode; SIZE] { - &self.full_nodes - } - - pub fn foreign_clients(&self) -> &ForeignClientPairs { - &self.foreign_clients - } -} - -impl DynamicConnectedChains { - pub fn new( - chain_handles: Vec, - full_nodes: Vec, - foreign_clients: Vec>>, - ) -> Self { - Self { - chain_handles, - full_nodes, - foreign_clients, - } - } - - pub fn chain_handles(&self) -> &Vec { - &self.chain_handles - } - - pub fn full_nodes(&self) -> &Vec { - &self.full_nodes - } - - pub fn foreign_clients(&self) -> &Vec>> { - &self.foreign_clients - } -} - -impl From> - for DynamicConnectedChains -{ - fn from(chains: NaryConnectedChains) -> Self { - Self { - chain_handles: chains.chain_handles.into(), - full_nodes: chains.full_nodes.into(), - foreign_clients: chains.foreign_clients.into_nested_vec(), - } - } -} - -impl TryFrom> - for NaryConnectedChains -{ - type Error = Error; - - fn try_from(chains: DynamicConnectedChains) -> Result { - Ok(NaryConnectedChains { - chain_handles: try_into_array(chains.chain_handles)?, - full_nodes: try_into_array(chains.full_nodes)?, - foreign_clients: chains.foreign_clients.try_into()?, - }) - } -} - -impl From> - for NthConnectedChains<0, 1, Handle> -{ - fn from(chains: NaryConnectedChains) -> Self { - chains.connected_chains_at::<0, 1>().unwrap() - } -} - -impl ExportEnv for NaryConnectedChains { - fn export_env(&self, writer: &mut impl EnvWriter) { - for (i, node) in self.full_nodes.iter().enumerate() { - writer.write_env( - &format!("CHAIN_ID_{}", i), - &format!("{}", node.chain_driver.chain_id), - ); - - self.foreign_clients.export_env(writer); - - node.export_env(&mut prefix_writer(&format!("NODE_{}", i), writer)); - } - } -} diff --git a/tools/test-framework/src/types/nary/channel.rs b/tools/test-framework/src/types/nary/channel.rs deleted file mode 100644 index d61e80de24..0000000000 --- a/tools/test-framework/src/types/nary/channel.rs +++ /dev/null @@ -1,158 +0,0 @@ -/*! - Constructs for N-ary connected channels. -*/ - -use core::convert::TryFrom; -use eyre::eyre; -use ibc::core::ics24_host::identifier::{ChannelId, PortId}; -use ibc_relayer::chain::handle::ChainHandle; -use ibc_relayer::channel::Channel; - -use super::aliases::NthChainHandle; -use crate::error::Error; -use crate::types::binary::channel::ConnectedChannel; -use crate::types::env::{EnvWriter, ExportEnv}; -use crate::types::tagged::*; -use crate::util::array::try_into_nested_array; - -/** - A fixed-size N-ary connected channels as specified by `SIZE`. - - Contains `SIZE`x`SIZE` number of binary [`ConnectedChannel`]s. -*/ -#[derive(Debug, Clone)] -pub struct ConnectedChannels { - channels: [[ConnectedChannel; SIZE]; SIZE], -} - -/** - A dynamic-sized N-ary connected channels, consist of a nested - vector of binary [`ConnectedChannel`]s which must be of the - same length. -*/ -#[derive(Debug, Clone)] -pub struct DynamicConnectedChannels { - channels: Vec>>, -} - -/** - A tagged [`ConnectedChannel`] that is connected between the chains - at position `CHAIN_A` and `CHAIN_B`. -*/ -pub type NthConnectedChannel = - ConnectedChannel, NthChainHandle>; - -/** - A tagged [`Channel`] with the A side at `CHAIN_A` position and B side at - the `CHAIN_B` position. -*/ -pub type NthChannel = - Channel, NthChainHandle>; - -/** - A tagged [`ChannelId`] for the chain at position `CHAIN_A` that is correspond - to the counterparty chain at position `CHAIN_B` -*/ -pub type NthChannelId = - DualTagged, NthChainHandle, ChannelId>; - -/** - A tagged [`PortId`] for the chain at position `CHAIN_A` that is correspond - to the counterparty chain at position `CHAIN_B` -*/ -pub type NthPortId = - DualTagged, NthChainHandle, PortId>; - -impl ConnectedChannels { - /** - Get the binary [`ConnectedChannel`] at position `CHAIN_A` and `CHAIN_B`, - which must be less than `SIZE`. - */ - pub fn channel_at( - &self, - ) -> Result, Error> { - if CHAIN_A >= SIZE || CHAIN_B >= SIZE { - Err(Error::generic(eyre!( - "cannot get channel beyond position {}/{}", - CHAIN_A, - CHAIN_B - ))) - } else { - let raw_channel = self.channels[CHAIN_A][CHAIN_B].clone(); - - let channel = raw_channel.map_chain(MonoTagged::new, MonoTagged::new); - - Ok(channel) - } - } - - pub fn channels(&self) -> &[[ConnectedChannel; SIZE]; SIZE] { - &self.channels - } -} - -impl DynamicConnectedChannels { - pub fn new(channels: Vec>>) -> Self { - Self { channels } - } - - pub fn channels(&self) -> &Vec>> { - &self.channels - } -} - -impl TryFrom> - for ConnectedChannels -{ - type Error = Error; - - fn try_from(channels: DynamicConnectedChannels) -> Result { - Ok(ConnectedChannels { - channels: try_into_nested_array(channels.channels)?, - }) - } -} - -impl From> for NthConnectedChannel<0, 1, Handle> { - fn from(channels: ConnectedChannels) -> Self { - channels.channel_at::<0, 1>().unwrap() - } -} - -impl ExportEnv for ConnectedChannels { - fn export_env(&self, writer: &mut impl EnvWriter) { - for (i, inner_channels) in self.channels.iter().enumerate() { - for (j, channel_i_to_j) in inner_channels.iter().enumerate() { - writer.write_env( - &format!("CONNECTION_ID_{}_to_{}", j, i), - &format!("{}", channel_i_to_j.connection.connection_id_a), - ); - - writer.write_env( - &format!("CONNECTION_ID_{}_to_{}", i, j), - &format!("{}", channel_i_to_j.connection.connection_id_b), - ); - - writer.write_env( - &format!("CHANNEL_ID_{}_to_{}", j, i), - &format!("{}", channel_i_to_j.channel_id_a), - ); - - writer.write_env( - &format!("PORT_{}_to_{}", j, i), - &format!("{}", channel_i_to_j.port_a), - ); - - writer.write_env( - &format!("CHANNEL_ID_{}_to_{}", i, j), - &format!("{}", channel_i_to_j.channel_id_b), - ); - - writer.write_env( - &format!("PORT_{}_to_{}", i, j), - &format!("{}", channel_i_to_j.port_b), - ); - } - } - } -} diff --git a/tools/test-framework/src/types/nary/connection.rs b/tools/test-framework/src/types/nary/connection.rs deleted file mode 100644 index 0ae1c61a0f..0000000000 --- a/tools/test-framework/src/types/nary/connection.rs +++ /dev/null @@ -1,135 +0,0 @@ -/*! - Constructs for N-ary connected connections. -*/ - -use core::convert::TryFrom; -use eyre::eyre; -use ibc::core::ics24_host::identifier::ConnectionId; -use ibc_relayer::chain::handle::ChainHandle; - -use super::aliases::NthChainHandle; -use crate::error::Error; -use crate::types::binary::connection::ConnectedConnection; -use crate::types::env::{EnvWriter, ExportEnv}; -use crate::types::tagged::*; -use crate::util::array::{into_nested_vec, try_into_nested_array}; - -/** - A fixed-size N-ary connected connections as specified by `SIZE`. - - Contains `SIZE`x`SIZE` number of binary [`ConnectedConnection`]s. -*/ -#[derive(Debug, Clone)] -pub struct ConnectedConnections { - connections: [[ConnectedConnection; SIZE]; SIZE], -} - -/** - A dynamic-sized N-ary connected connections, made of a - nested vector of binary [`ConnectedConnection`] which must be - in the same dimension. -*/ -#[derive(Debug, Clone)] -pub struct DynamicConnectedConnections { - connections: Vec>>, -} - -/** - A tagged binary [`ConnectedConnection`] that is connected between the chains at - position `CHAIN_A` and `CHAIN_B`. -*/ -pub type NthConnectedConnection = - ConnectedConnection, NthChainHandle>; - -/** - The connection ID on the chain at position `CHAIN_A` that corresponds to - the counterparty chain at position `CHAIN_B`. -*/ -pub type NthConnectionId = - DualTagged, NthChainHandle, ConnectionId>; - -impl ConnectedConnections { - /** - Get the connection pair for chains at position `CHAIN_A` and `CHAIN_B`, - which must be less then `SIZE`. - */ - pub fn connection_at( - &self, - ) -> Result, Error> { - if CHAIN_A >= SIZE || CHAIN_B >= SIZE { - Err(Error::generic(eyre!( - "cannot get connection beyond position {}/{}", - CHAIN_A, - CHAIN_B - ))) - } else { - let raw_connection = self.connections[CHAIN_A][CHAIN_B].clone(); - - let channel = raw_connection.map_chain(MonoTagged::new, MonoTagged::new); - - Ok(channel) - } - } - - pub fn connections(&self) -> &[[ConnectedConnection; SIZE]; SIZE] { - &self.connections - } -} - -impl DynamicConnectedConnections { - pub fn new(connections: Vec>>) -> Self { - Self { connections } - } - - pub fn connections(&self) -> &Vec>> { - &self.connections - } -} - -impl From> - for DynamicConnectedConnections -{ - fn from(connections: ConnectedConnections) -> Self { - DynamicConnectedConnections { - connections: into_nested_vec(connections.connections), - } - } -} - -impl TryFrom> - for ConnectedConnections -{ - type Error = Error; - - fn try_from(connections: DynamicConnectedConnections) -> Result { - Ok(ConnectedConnections { - connections: try_into_nested_array(connections.connections)?, - }) - } -} - -impl From> - for NthConnectedConnection<0, 1, Handle> -{ - fn from(channels: ConnectedConnections) -> Self { - channels.connection_at::<0, 1>().unwrap() - } -} - -impl ExportEnv for ConnectedConnections { - fn export_env(&self, writer: &mut impl EnvWriter) { - for (i, inner_connections) in self.connections.iter().enumerate() { - for (j, connection_i_to_j) in inner_connections.iter().enumerate() { - writer.write_env( - &format!("CONNECTION_ID_{}_to_{}", j, i), - &format!("{}", connection_i_to_j.connection_id_a), - ); - - writer.write_env( - &format!("CONNECTION_ID_{}_to_{}", i, j), - &format!("{}", connection_i_to_j.connection_id_b), - ); - } - } - } -} diff --git a/tools/test-framework/src/types/nary/foreign_client.rs b/tools/test-framework/src/types/nary/foreign_client.rs deleted file mode 100644 index 3be01964bb..0000000000 --- a/tools/test-framework/src/types/nary/foreign_client.rs +++ /dev/null @@ -1,88 +0,0 @@ -use core::convert::TryFrom; -use eyre::eyre; -use ibc_relayer::chain::handle::ChainHandle; -use ibc_relayer::foreign_client::ForeignClient; - -use super::aliases::NthChainHandle; -use crate::error::Error; -use crate::types::binary::foreign_client::ForeignClientPair; -use crate::types::env::{EnvWriter, ExportEnv}; -use crate::types::tagged::*; -use crate::util::array::{into_nested_vec, try_into_nested_array}; - -/** - A [`ForeignClient`] that is tagged by a `Handle: ChainHandle` and - the const generics `DEST: usize` and `SRC: usize`. -*/ -pub type NthForeignClient = - ForeignClient, NthChainHandle>; - -pub type NthForeignClientPair = - ForeignClientPair, NthChainHandle>; - -#[derive(Clone)] -pub struct ForeignClientPairs { - foreign_clients: [[ForeignClient; SIZE]; SIZE], -} - -impl ForeignClientPairs { - /** - Get the [`ForeignClient`] with the source chain at position - `SRC: usize` and destination chain at position `DEST: usize`, - which must be less than `SIZE`. - */ - pub fn foreign_client_at( - &self, - ) -> Result, Error> { - if SRC >= SIZE || DEST >= SIZE { - Err(Error::generic(eyre!( - "cannot get foreign client beyond position {}/{}", - SRC, - DEST - ))) - } else { - let client = self.foreign_clients[SRC][DEST] - .clone() - .map_chain(MonoTagged::new, MonoTagged::new); - - Ok(client) - } - } - - pub fn foreign_client_pair_at( - &self, - ) -> Result, Error> { - let client_a_to_b = self.foreign_client_at::()?; - let client_b_to_a = self.foreign_client_at::()?; - - Ok(ForeignClientPair::new(client_a_to_b, client_b_to_a)) - } - - pub fn into_nested_vec(self) -> Vec>> { - into_nested_vec(self.foreign_clients) - } -} - -impl TryFrom>>> - for ForeignClientPairs -{ - type Error = Error; - - fn try_from(clients: Vec>>) -> Result { - let foreign_clients = try_into_nested_array(clients)?; - Ok(Self { foreign_clients }) - } -} - -impl ExportEnv for ForeignClientPairs { - fn export_env(&self, writer: &mut impl EnvWriter) { - for (source, inner_clients) in self.foreign_clients.iter().enumerate() { - for (destination, client) in inner_clients.iter().enumerate() { - writer.write_env( - &format!("CLIENT_ID_{}_to_{}", source, destination), - &format!("{}", client.id()), - ); - } - } - } -} diff --git a/tools/test-framework/src/types/nary/mod.rs b/tools/test-framework/src/types/nary/mod.rs deleted file mode 100644 index 04bc360fd2..0000000000 --- a/tools/test-framework/src/types/nary/mod.rs +++ /dev/null @@ -1,66 +0,0 @@ -/*! - Definitions for tagged data structures involving N-ary chains. - - In the binary version of the tagged data structures, we use the - existential types `ChainA: ChainHandle` and `ChainB: ChainHandle` - to differentiate between two chains. Since Rust treat each type - differently, we can use `ChainA` and `ChainB` as type tags - to differentiate values coming from different chains. - For example, `DualTagged` - can be used to refer to a `ChainId` on `ChainA` with the - counterparty chain being `ChainB`. - - When extending to the N-ary case, we can no longer use - existential types to refer to each chain, because we cannot - know before hand how many types are needed. Instead, - we can use _const generics_ to identify chains by _position_. - - The first construct we need is the [`Size`](aliases::Size) struct, - which lifts a const generic `usize` into a type: - - ```rust - enum Size {} - ``` - - Using `Size`, we can for example use a `usize` as a tag. - For example, `MonoTagged, String>` is a `String` - that is tagged by the `usize` value `42`. - - Aside from the position, we still need to be able to differentiate - values coming from different _collections_ of chains. For example, - given a first collection `[ChainA, ChainB, ChainC]`, and a second - collection `[ChainD, ChainE]`, a naively position-tagged value like - `MonoTagged, Denom>` could be used to refer to a denomination - that come from either `ChainB` or `ChainE`, which defeats the purpose - of tagging values with type tags. - - Due to the initial design of using the `ChainHandle` existential type as - the type tag, it is also required that any type that is used to tag - values for chains to also implement `ChainHandle`. Since `Size` does - not implement `ChainHandle`, it is also not possible to use it directly - as tags in structures such as `ForeignClient`. - - Instead, we also require an existential `Collection: ChainHandle` type - to identify all chains within an N-ary collection. We then tag - the handle with the position, before tagging it again with the - values. For example, a `Denom` that is tagged with the third chain - in the first collection would be written as - `MonoTagged, Collection1>, Denom>`. - The tagging also works because we have defined a `ChainHandle` - implementation for `MonoTagged` for any `Chain: ChainHandle`. - - The current approach for tagging N-ary chain values is a bit cumbersome. - To save the effort of typing the fully qualified type of N-ary tagged - values, we also define type aliases such as - [`NthChainHandle`](aliases::NthChainHandle) and - [`NthForeignClient`](foreign_client::NthForeignClient). - This would still result in overly verbose messages in type errors involving - these types. If necessary, we will refactor these defintions as newtypes - so that they can be used and shown in a cleaner form. -*/ - -pub mod aliases; -pub mod chains; -pub mod channel; -pub mod connection; -pub mod foreign_client; diff --git a/tools/test-framework/src/types/process.rs b/tools/test-framework/src/types/process.rs deleted file mode 100644 index 1f32e53950..0000000000 --- a/tools/test-framework/src/types/process.rs +++ /dev/null @@ -1,52 +0,0 @@ -/*! - Define wrapper type around [`std::process::Child`] to kill the - child process when the value is dropped. -*/ - -use eyre::Report as Error; -use std::process::Child; - -/** - A lightweight wrapper around std::process::Child to ensure that the - process is killed when the handle is dropped. -*/ -pub struct ChildProcess { - child: Child, - waited: bool, -} - -impl ChildProcess { - /// Create a new [`ChildProcess`] from the primitive [`Child`] type. - pub fn new(child: Child) -> Self { - Self { - child, - waited: false, - } - } - - /// Wait for the child process to terminate. - pub fn wait(&mut self) -> Result<(), Error> { - if !self.waited { - self.waited = true; - self.child.wait()?; - } - - Ok(()) - } - - /// Kill the underlying child process. - pub fn kill(&mut self) -> Result<(), Error> { - self.child.kill()?; - self.wait()?; - - Ok(()) - } -} - -impl Drop for ChildProcess { - fn drop(&mut self) { - if !self.waited { - let _ = self.kill(); - } - } -} diff --git a/tools/test-framework/src/types/single/mod.rs b/tools/test-framework/src/types/single/mod.rs deleted file mode 100644 index 2ccc0ddc20..0000000000 --- a/tools/test-framework/src/types/single/mod.rs +++ /dev/null @@ -1,5 +0,0 @@ -/*! - Definitions for data types that involve a single chain. -*/ - -pub mod node; diff --git a/tools/test-framework/src/types/single/node.rs b/tools/test-framework/src/types/single/node.rs deleted file mode 100644 index d3f6fb8b6b..0000000000 --- a/tools/test-framework/src/types/single/node.rs +++ /dev/null @@ -1,178 +0,0 @@ -/*! - Type definition for a single running full node. -*/ - -use core::str::FromStr; -use core::time::Duration; -use eyre::eyre; -use eyre::Report as Error; -use ibc::core::ics24_host::identifier::ChainId; -use ibc_relayer::chain::ChainType; -use ibc_relayer::config; -use ibc_relayer::keyring::Store; -use std::sync::{Arc, RwLock}; -use tendermint_rpc::Url; - -use crate::chain::driver::ChainDriver; -use crate::ibc::denom::Denom; -use crate::types::env::{prefix_writer, EnvWriter, ExportEnv}; -use crate::types::process::ChildProcess; -use crate::types::tagged::*; -use crate::types::wallet::TestWallets; - -pub type TaggedFullNode = MonoTagged; - -pub type TaggedFullNodeRef<'a, Chain> = MonoTagged; - -/** - Represents a full node running as a child process managed by the test. -*/ -#[derive(Clone)] -pub struct FullNode { - /** - The [`ChainDriver`] used to communicate with the full node. - */ - pub chain_driver: ChainDriver, - - /** - The currency denomination which the wallets have been loaded - with initial balance during the chain setup. - */ - pub denom: Denom, - - /** - The test wallets with more than sufficient account balance that - can be used for testing. - */ - pub wallets: TestWallets, - - /** - The child process that is running the full node. - - The full node is killed when the `Arc` shared pointer is dropped. - - Test authors can acquire the child process and kill the full node - in the middle of tests using [`kill`](FullNode::kill). - */ - pub process: Arc>, -} - -/** - Extra methods for [`FullNode`] that is [tagged](crate::types::tagged). - - This trait is auto implemented for `MonoTagged` so - that we can call methods on it directly. -*/ -pub trait TaggedFullNodeExt { - /// Get the [`ChainId`] tagged with the given `Chain`. - fn chain_id(&self) -> MonoTagged; - - /// Get the [`ChainDriver`] tagged with the given `Chain`. - fn chain_driver(&self) -> MonoTagged; - - /// Get the [`TestWallets`] tagged with the given `Chain`. - fn wallets(&self) -> MonoTagged; - - /// Get the [`Denom`] tagged with the given `Chain`. - fn denom(&self) -> MonoTagged; -} - -impl TaggedFullNodeExt for MonoTagged { - fn chain_id(&self) -> MonoTagged { - self.map_ref(|c| &c.chain_driver.chain_id) - } - - fn chain_driver(&self) -> MonoTagged { - self.map_ref(|c| &c.chain_driver) - } - - fn wallets(&self) -> MonoTagged { - self.map_ref(|c| &c.wallets) - } - - fn denom(&self) -> MonoTagged { - self.map_ref(|c| &c.denom) - } -} - -impl<'a, Chain> TaggedFullNodeExt for MonoTagged { - fn chain_id(&self) -> MonoTagged { - self.map_ref(|c| &c.chain_driver.chain_id) - } - - fn chain_driver(&self) -> MonoTagged { - self.map_ref(|c| &c.chain_driver) - } - - fn wallets(&self) -> MonoTagged { - self.map_ref(|c| &c.wallets) - } - - fn denom(&self) -> MonoTagged { - self.map_ref(|c| &c.denom) - } -} - -impl FullNode { - /** - Generate the relayer's chain config based on the configuration of - the full node. - */ - pub fn generate_chain_config(&self) -> Result { - Ok(config::ChainConfig { - id: self.chain_driver.chain_id.clone(), - r#type: ChainType::CosmosSdk, - rpc_addr: Url::from_str(&self.chain_driver.rpc_address())?, - websocket_addr: Url::from_str(&self.chain_driver.websocket_address())?, - grpc_addr: Url::from_str(&self.chain_driver.grpc_address())?, - rpc_timeout: Duration::from_secs(10), - account_prefix: self.chain_driver.account_prefix.clone(), - key_name: self.wallets.relayer.id.0.clone(), - - // By default we use in-memory key store to avoid polluting - // ~/.hermes/keys. See - // https://github.com/informalsystems/ibc-rs/issues/1541 - key_store_type: Store::Memory, - - store_prefix: "ibc".to_string(), - default_gas: None, - max_gas: Some(3000000), - gas_adjustment: Some(0.1), - fee_granter: None, - max_msg_num: Default::default(), - max_tx_size: Default::default(), - max_block_time: Duration::from_secs(30), - clock_drift: Duration::from_secs(5), - trusting_period: Some(Duration::from_secs(14 * 24 * 3600)), - trust_threshold: Default::default(), - gas_price: config::GasPrice::new(0.001, "stake".to_string()), - packet_filter: Default::default(), - address_type: Default::default(), - memo_prefix: Default::default(), - proof_specs: Default::default(), - }) - } - - /** - Kill the underlying child process of the full node, thereby terminating it. - - Test writers can use this to kill the full node in the middle of tests, and - then restart it using - [`ChainDriver::start`](crate::chain::driver::ChainDriver::start). - */ - pub fn kill(&self) -> Result<(), Error> { - self.process - .write() - .map_err(|_| eyre!("poisoned mutex"))? - .kill() - } -} - -impl ExportEnv for FullNode { - fn export_env(&self, writer: &mut impl EnvWriter) { - self.chain_driver.export_env(writer); - writer.write_env("DENOM", self.denom.as_str()); - self.wallets - .export_env(&mut prefix_writer("WALLETS", writer)); - } -} diff --git a/tools/test-framework/src/types/tagged/dual.rs b/tools/test-framework/src/types/tagged/dual.rs deleted file mode 100644 index 851e2a9428..0000000000 --- a/tools/test-framework/src/types/tagged/dual.rs +++ /dev/null @@ -1,470 +0,0 @@ -/*! - Tagged data types with two type tags. - - This is mainly used to tag data types that are associated - to a single chain and also uniquely correspond to some - resource on a counterparty chain. - - Example: - - - [`Tagged`](crate::types::id::TaggedChannelId) - - A channel ID belongs to a chain `ChainA`, and it is also uniquely - corresponds to a channel connected to a counterparty chain `ChainB`. -*/ - -use core::cmp::{Eq, Ord, Ordering, PartialEq, PartialOrd}; -use core::fmt::{self, Debug, Display}; -use core::iter::{IntoIterator, Iterator}; -use core::marker::PhantomData; - -/** - Tag a `Value` type with a two type tags `TagA` and `TagB`. -*/ -pub struct Tagged(pub Value, PhantomData<(TagA, TagB)>); - -impl Tagged { - /** - Create a new tagged value with any type tag. - - Example: - - ```rust - # use ibc_test_framework::types::tagged::dual::Tagged; - struct Foo; - struct Bar; - - let val: Tagged = Tagged::new(42); - ``` - */ - pub fn new(value: Value) -> Self { - Tagged(value, PhantomData) - } - - /** - Get a reference to the underlying value. - - Example: - - ```rust - # use ibc_test_framework::types::tagged::dual::Tagged; - struct Foo; - struct Bar; - - let val1: Tagged = Tagged::new(42); - let val2: &i64 = val1.value(); - ``` - */ - pub fn value(&self) -> &Value { - &self.0 - } - - /** - Get a mutable reference to the underlying value. - - Example: - - ```rust - # use ibc_test_framework::types::tagged::dual::Tagged; - struct Foo; - struct Bar; - - let mut val1: Tagged = Tagged::new(42); - let val2: i64 = val1.into_value(); - ``` - */ - pub fn into_value(self) -> Value { - self.0 - } - - /** - Convert a tagged value into a tagged reference. - - Example: - - ```rust - # use ibc_test_framework::types::tagged::dual::Tagged; - struct Foo; - struct Bar; - - let val1: Tagged = Tagged::new(42); - let val2: Tagged = val1.as_ref(); - ``` - */ - pub fn as_ref(&self) -> Tagged { - Tagged::new(&self.0) - } - - /** - Flips the ordering of the two tags. - - Example: - - ```rust - # use ibc_test_framework::types::tagged::dual::Tagged; - struct Foo; - struct Bar; - - let val1: Tagged = Tagged::new(42); - let val2: Tagged = val1.flip(); - ``` - */ - pub fn flip(self) -> Tagged { - Tagged::new(self.0) - } - - /** - Retag a tagged value with a different tag. - - Example: - - ```rust - # use ibc_test_framework::types::tagged::dual::Tagged; - struct Foo; - struct Bar; - struct Baz; - struct Quux; - - let val1: Tagged = Tagged::new(42); - let val2: Tagged = val1.retag(); - ``` - */ - pub fn retag(self) -> Tagged { - Tagged::new(self.0) - } - - /** - Perform operation with the reference to the underlying reference, - and have result that preserve the tag. - - Example: - - ```rust - # use ibc_test_framework::types::tagged::dual::Tagged; - struct Foo; - struct Bar; - - let val1: Tagged = Tagged::new(42); - let val2: Tagged = val1.map(|x| format!("{}", x)); - ``` - */ - pub fn map(&self, mapper: impl FnOnce(&Value) -> T) -> Tagged { - Tagged::new(mapper(&self.0)) - } - - /** - Perform operation with the reference to the underlying reference, - and have result reference with the same lifetime that preserve the tags. - - Example: - - ```rust - # use ibc_test_framework::types::tagged::dual::Tagged; - struct Person { name: String, age: u8 } - struct Alice; - struct Wonderland; - - let person: Tagged = Tagged::new(Person { - name: "Alice".to_string(), - age: 30, - }); - - let name: Tagged = person - .map_ref(|person| person.name.as_str()); - ``` - */ - pub fn map_ref<'a, T: ?Sized>( - &'a self, - mapper: impl FnOnce(&'a Value) -> &'a T, - ) -> Tagged { - Tagged::new(mapper(self.value())) - } - - /** - Perform an operation consuming the original tagged value, and return - a result value preserving the original tag. - - Example: - - ```rust - # use ibc_test_framework::types::tagged::dual::Tagged; - struct Person { name: String, age: u8 } - struct Alice; - struct Wonderland; - - let person: Tagged = Tagged::new(Person { - name: "Alice".to_string(), - age: 30, - }); - - let name: Tagged = person.map_into(|person| person.name); - ``` - */ - pub fn map_into(self, mapper: impl FnOnce(Value) -> T) -> Tagged { - Tagged::new(mapper(self.0)) - } - - /** - Perform operation with the reference to the underlying reference, - and have two tags flipped in the result. - - Example: - - ```rust - # use ibc_test_framework::types::tagged::dual::Tagged; - struct Foo; - struct Bar; - - let val1: Tagged = Tagged::new(42); - let val2: Tagged = val1.contra_map(|x| format!("{}", x)); - ``` - - This is mainly useful for accessing IBC data structures that may contain - information about the counterparty chain. For example, consider - a tagged and simplified version of - [`ConnectionEnd`](ibc::core::ics03_connection::connection::ConnectionEnd): - - ```rust - # use ibc::core::ics24_host::identifier::ConnectionId; - # use ibc_test_framework::types::tagged::dual::Tagged; - struct ConnectionEnd { - connection_id: ConnectionId, - counterparty_connection_id: ConnectionId, - } - - fn process_connection_end( - connection_end: Tagged) - { - let connection_id: Tagged = - connection_end.map(|c| c.connection_id.clone()); - - let counterparty_connection_id: Tagged = - connection_end.contra_map(|c| c.connection_id.clone()); - - // do something - } - ``` - - The `ConnectionEnd` data type above is a _bidirectional_ data type that - contains fields that are specific to both chains: the connection ID - and the counterparty connection ID. But when we tag the `ConnectionEnd` - type, we have to choose one dominant chain to appear at the first position. - - When we extract the `connection_id` field, we use `map` to preserve the - tag ordering to say that the connection ID _belongs_ to the `ChainA`, - and corresponds to a connection to the counterparty `ChainB`. - - When we extract the `counterparty_connection_id` field, we use - `contra_map` to flip the tag ordering to say that the connection ID - _belongs_ to the counterparty `ChainB`, and corresponds to a connection - to `ChainA`. - */ - pub fn contra_map(&self, mapper: impl FnOnce(&Value) -> T) -> Tagged { - Tagged::new(mapper(&self.0)) - } - - /** - Perform operation with the reference to the underlying reference, - and have the result reference with the same lifetime and have the - two tags flipped in the result. - - Example: - - ```rust - # use ibc_test_framework::types::tagged::dual::Tagged; - struct Person { name: String, age: u8 } - struct Alice; - struct Wonderland; - - let person: Tagged = Tagged::new(Person { - name: "Alice".to_string(), - age: 30, - }); - - let name: Tagged = person - .contra_map_ref(|person| person.name.as_str()); - ``` - */ - pub fn contra_map_ref<'a, T: ?Sized>( - &'a self, - mapper: impl FnOnce(&'a Value) -> &'a T, - ) -> Tagged { - Tagged::new(mapper(self.value())) - } - - /** - Perform operation consuming the underlying reference, - and have two tags switched in the result. - - Example: - - ```rust - # use ibc_test_framework::types::tagged::dual::Tagged; - struct Foo; - struct Bar; - - let val1: Tagged = Tagged::new(42); - let val2: Tagged = val1.contra_map_into(|x| format!("{}", x)); - ``` - */ - pub fn contra_map_into(self, mapper: impl FnOnce(Value) -> T) -> Tagged { - Tagged::new(mapper(self.0)) - } -} - -impl<'a, TagA, TagB, Value: Clone> Tagged { - /** - Convert a [`Clone`]eable tagged reference into a tagged value. - - Example: - - ```rust - # use ibc_test_framework::types::tagged::dual::Tagged; - struct Foo; - struct Bar; - - let val1: String = "foo".to_string(); - let val2: Tagged = Tagged::new(&val1); - let val3: Tagged = val2.cloned(); - ``` - */ - pub fn cloned(&self) -> Tagged { - Tagged::new(self.0.clone()) - } - - pub fn cloned_value(&self) -> Value { - self.0.clone() - } -} - -impl Tagged> { - /** - Convert a tagged [`Option`] value into an optional tagged value. - - Example: - - ```rust - # use ibc_test_framework::types::tagged::dual::Tagged; - struct Foo; - struct Bar; - - let val1: Tagged> = Tagged::new(Some(8)); - let val2: Option> = val1.transpose(); - ``` - */ - pub fn transpose(self) -> Option> { - self.0.map(Tagged::new) - } -} - -impl Tagged> { - /** - Convert a tagged [`Result`] value into an result tagged value. - - Example: - - ```rust - # use ibc_test_framework::types::tagged::dual::Tagged; - struct Foo; - struct Bar; - struct Error; - - let val1: Tagged> = Tagged::new(Ok(8)); - let val2: Result, Error> = val1.transpose(); - ``` - */ - pub fn transpose(self) -> Result, E> { - self.0.map(Tagged::new) - } -} - -impl<'a, TagA, TagB, Value> AsRef for Tagged { - fn as_ref(&self) -> &Value { - self.value() - } -} - -impl AsRef for Tagged { - fn as_ref(&self) -> &Value { - self.value() - } -} - -impl Copy for Tagged {} - -unsafe impl Send for Tagged {} -unsafe impl Sync for Tagged {} - -impl Clone for Tagged { - fn clone(&self) -> Self { - Self::new(self.0.clone()) - } -} - -impl Debug for Tagged { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - Debug::fmt(self.value(), f) - } -} - -impl Display for Tagged { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - Display::fmt(self.value(), f) - } -} - -impl PartialEq for Tagged { - fn eq(&self, other: &Self) -> bool { - self.value().eq(other.value()) - } -} - -impl Eq for Tagged {} - -impl PartialOrd for Tagged { - fn partial_cmp(&self, other: &Self) -> Option { - self.value().partial_cmp(other.value()) - } -} - -impl Ord for Tagged { - fn cmp(&self, other: &Self) -> Ordering { - self.value().cmp(other.value()) - } -} - -/** - Create a tagged iterator, if the underlying value supports iteration. - - Example: - - ```rust - # use ibc_test_framework::types::tagged::dual::Tagged; - struct Foo; - struct Bar; - - let values: Tagged> = Tagged::new(vec![1, 2, 3]); - for value in values.into_iter() { - let value: Tagged = value; - // do something - } - ``` -*/ -pub struct TaggedIterator(Tagged); - -impl Iterator for TaggedIterator { - type Item = Tagged; - - fn next(&mut self) -> Option { - self.0 .0.next().map(Tagged::new) - } -} - -impl IntoIterator for Tagged { - type Item = Tagged; - - type IntoIter = TaggedIterator; - - fn into_iter(self) -> Self::IntoIter { - TaggedIterator(self.map_into(|v| v.into_iter())) - } -} diff --git a/tools/test-framework/src/types/tagged/mod.rs b/tools/test-framework/src/types/tagged/mod.rs deleted file mode 100644 index 50f75dc6ca..0000000000 --- a/tools/test-framework/src/types/tagged/mod.rs +++ /dev/null @@ -1,130 +0,0 @@ -/*! - A small library for adding one or two type tags to data types. - - This module introduces two data types, [`MonoTagged`] and - [`DualTagged`], for adding one or two type tags to any data - type, respectively. - - The main idea is that we add any type as a tag to a type, - so that two values with different tags are considered - different types. - - ```rust,compile_fail - # use ibc_test_framework::types::tagged::*; - struct Foo; - struct Bar; - - // Helper to test whether two values have the same type. - fn same(_: T, _: T) {} - - let val1: i64 = 42; // A raw `i64` value. - - // An `i64` value tagged with the `Foo` type. - let val2: MonoTagged = MonoTagged::new(42); - - // An `i64` value tagged with the `Bar` type. - let val3: MonoTagged = MonoTagged::new(42); - - // error, because the tags `Foo` and `Bar` are different. - same(val2, val3); - ``` - - The `tagged` library does not enforce how the type tags should be used - correctly. Therefore we can freely add or remove tags for a value at - any time. It is up to the user of this library to ensure that values - are tagged with the proper type tag as intended. - - For example, it is entirely fine to do something like: - - ```rust - # use ibc_test_framework::types::tagged::*; - struct Foo; - struct Bar; - struct Baz; - - let val1: i64 = 42; - - // Add a new tag `Foo` to `val1`. - let val2: MonoTagged = MonoTagged::new(val1); - - // Remove the tag `Foo` from `val2`. - let val3: i64 = val2.into_value(); - - // Retag `val3` with a new tag `Bar`. - let val4: MonoTagged = MonoTagged::new(val3); - - // Directly retag `val4` from `Bar` tag to `Baz` tag. - let val5: MonoTagged = val4.retag(); - ``` - - As a result, user is free to switch to work with the untagged version - of the values, if they find the tagged values to have too complicated - types to deal with. The retagging approach also works well for - interoperability between functions that use tagged and untagged values, - so that there is no need to convert an entire code base to use - tagged values. - - Currently the main use of the `tagged` library is to tag data types and - identifiers associated with different chains. For example, a tagged - type `DualTagged` is used to represent - a `ChannelId` value that is used on `ChainA` to identify a channel - that is connected to `ChainB`. With the tagged identifier, it is - more unlikely for us to accidentally use the `ChannelId` coming from - counterparty chain, as it would have the the type - `DualTagged` and thus result in - type error. - - Currently the type tags for the chain data types are derived from - the spawned chain handles, which has the existential type - [`impl ChainHandle`](ibc_relayer::chain::handle::ChainHandle). - Note that even though there is only one implementation of - `ChainHandle`, - [`CountingAndCachingChainHandle`](ibc_relayer::chain::handle::CountingAndCachingChainHandle), - when they are returned as `impl ChainHandle` they would be - considered by Rust as an - [abstract type](https://doc.rust-lang.org/reference/types/impl-trait.html#abstract-return-types) - that is different from the original type. Inside generic functions, - we can also treat the same type as different types by specifying - them as separate generic parameters. - - By using `impl ChainHandle` as the type tag, it also encourage - us to treat different `ChainHandle` values as having different - types. This will help us in the future to have easier transition - into implementing relayer code that support relaying between different - implementations of `ChainHandle`s that corresponding to different - chain implementations. - - - The use of tagged identifiers are especially useful for avoiding confusion - when using data types that have tags in _contravariant_ ordering, - such as - [`ForeignClient`](ibc_relayer::foreign_client::ForeignClient). - Whereas most relayer constructs such as - `Connection` would mean - "a connection from chain A to chain B", a - `ForeignClient` actually means "a foreign client from - chain B to chain A". As a result, if we want to always refer to - "from chain A to chain B", then we would have to instead write - `ForeignClient`. - - The use of contravariant ordering can be very confusing for developers - who are new to the code base, and we cannot expect developers to always - remember which construct requires contravariant ordering. We also cannot - easily refactor legacy constructs such as `ForeignClient` to use covariant - ordering, as we would have to search for the entire code base to - replace the ordering, and there is no guarantee to do the refactoring - correctly. - - With tagged identifiers, we can alleviate some of the confusion by - leaving it to the type system to track which identifier belong to - which chain. This way if a developer ever think that - `ForeignClient` means "foreign client from chain A - to chain B", the compiler will correct them of the mistake with a - type error. -*/ - -pub mod dual; -pub mod mono; - -pub use dual::Tagged as DualTagged; -pub use mono::Tagged as MonoTagged; diff --git a/tools/test-framework/src/types/tagged/mono.rs b/tools/test-framework/src/types/tagged/mono.rs deleted file mode 100644 index ef4a228b93..0000000000 --- a/tools/test-framework/src/types/tagged/mono.rs +++ /dev/null @@ -1,410 +0,0 @@ -/*! - Tagged data types with a single type tag. - - This is mainly used to tag data types that are associated - to a single chain and do not uniquely correspond to any - resource on a counterparty chain. - - Example: - - - [`Tagged`](crate::types::id::TaggedChainId) - - A chain ID belongs to a chain and do not uniquely - correspond to a counterparty chain. - - - [`Tagged`](crate::types::wallet::Wallet) - - A wallet belongs to a chain and do not uniquely - correspond to a counterparty chain - -*/ - -use core::cmp::{Eq, Ord, Ordering, PartialEq, PartialOrd}; -use core::fmt::{self, Debug, Display}; -use core::iter::{IntoIterator, Iterator}; -use core::marker::PhantomData; -use serde::{Serialize, Serializer}; - -use super::dual::Tagged as DualTagged; - -/** - Tag a `Value` type with a single `Tag` type tag. -*/ -pub struct Tagged(pub Value, pub PhantomData); - -impl Tagged { - /** - Create a new tagged value with any type tag. - - Example: - - ```rust - # use ibc_test_framework::types::tagged::mono::Tagged; - struct Foo; - - let val: Tagged = Tagged::new(42); - ``` - */ - pub fn new(value: Value) -> Self { - Tagged(value, PhantomData) - } - - /** - Get a reference to the underlying value. - - Example: - - ```rust - # use ibc_test_framework::types::tagged::mono::Tagged; - struct Foo; - - let val1: Tagged = Tagged::new(42); - let val2: &i64 = val1.value(); - ``` - */ - pub fn value(&self) -> &Value { - &self.0 - } - - /** - Get a mutable reference to the underlying value. - - Example: - - ```rust - # use ibc_test_framework::types::tagged::mono::Tagged; - struct Foo; - - let mut val1: Tagged = Tagged::new(42); - let val2: &mut i64 = val1.mut_value(); - ``` - */ - pub fn mut_value(&mut self) -> &mut Value { - &mut self.0 - } - - /** - Convert the tagged value into an untagged value. - - Example: - - ```rust - # use ibc_test_framework::types::tagged::mono::Tagged; - struct Foo; - - let val1: Tagged = Tagged::new(42); - let val2: i64 = val1.into_value(); - ``` - */ - pub fn into_value(self) -> Value { - self.0 - } - - /** - Convert a tagged value into a tagged reference. - - Example: - - ```rust - # use ibc_test_framework::types::tagged::mono::Tagged; - struct Foo; - - let val1: Tagged = Tagged::new(42); - let val2: Tagged = val1.as_ref(); - ``` - */ - pub fn as_ref(&self) -> Tagged { - Tagged::new(&self.0) - } - - /** - Retag a tagged value with a different tag. - - Example: - - ```rust - # use ibc_test_framework::types::tagged::mono::Tagged; - struct Foo; - struct Bar; - - let val1: Tagged = Tagged::new(42); - let val2: Tagged = val1.retag(); - ``` - */ - pub fn retag(self) -> Tagged { - Tagged::new(self.0) - } - - /** - Add an additional tag to a mono-tagged value, turning - it into a [`DualTagged`] value. - - Example: - - ```rust - # use ibc_test_framework::types::tagged::mono::Tagged; - # use ibc_test_framework::types::tagged::dual::Tagged as DualTagged; - struct Foo; - struct Bar; - - let val1: Tagged = Tagged::new(42); - let val2: DualTagged = val1.add_tag(); - ``` - */ - pub fn add_tag(self) -> DualTagged { - DualTagged::new(self.into_value()) - } - - /** - Perform operation with the reference to the underlying reference, - and have result that preserve the tag. - - Example: - - ```rust - # use ibc_test_framework::types::tagged::mono::Tagged; - struct Foo; - - let val1: Tagged = Tagged::new(42); - let val2: Tagged = val1.map(|x| format!("{}", x)); - ``` - */ - pub fn map(&self, mapper: impl FnOnce(&Value) -> T) -> Tagged { - Tagged::new(mapper(self.value())) - } - - /** - Perform operation with the reference to the underlying reference, - and have result reference with the same lifetime that preserve the tag. - - Example: - - ```rust - # use ibc_test_framework::types::tagged::mono::Tagged; - struct Person { name: String, age: u8 } - struct Alice; - - let person: Tagged = Tagged::new(Person { - name: "Alice".to_string(), - age: 30, - }); - - let name: Tagged = person.map_ref(|person| person.name.as_str()); - ``` - */ - pub fn map_ref<'a, T: ?Sized>( - &'a self, - mapper: impl FnOnce(&'a Value) -> &'a T, - ) -> Tagged { - Tagged::new(mapper(self.value())) - } - - /** - Perform an operation consuming the original tagged value, and return - a result value preserving the original tag. - - Example: - - ```rust - # use ibc_test_framework::types::tagged::mono::Tagged; - struct Person { name: String, age: u8 } - struct Alice; - - let person: Tagged = Tagged::new(Person { - name: "Alice".to_string(), - age: 30, - }); - - let name: Tagged = person.map_into(|person| person.name); - ``` - */ - pub fn map_into(self, mapper: impl FnOnce(Value) -> T) -> Tagged { - Tagged::new(mapper(self.0)) - } -} - -impl<'a, Tag, Value: Clone> Tagged { - /** - Convert a [`Clone`]eable tagged reference into a tagged value. - - Example: - - ```rust - # use ibc_test_framework::types::tagged::mono::Tagged; - struct Foo; - - let val1: String = "foo".to_string(); - let val2: Tagged = Tagged::new(&val1); - let val3: Tagged = val2.cloned(); - ``` - */ - pub fn cloned(&self) -> Tagged { - Tagged::new(self.0.clone()) - } - - pub fn cloned_value(&self) -> Value { - self.0.clone() - } -} - -impl Tagged> { - /** - Convert a tagged [`Option`] value into an optional tagged value. - - Example: - - ```rust - # use ibc_test_framework::types::tagged::mono::Tagged; - struct Foo; - - let val1: Tagged> = Tagged::new(Some(8)); - let val2: Option> = val1.transpose(); - ``` - */ - pub fn transpose(self) -> Option> { - self.0.map(Tagged::new) - } -} - -impl Tagged> { - /** - Convert a tagged [`Result`] value into an result tagged value. - - Example: - - ```rust - # use ibc_test_framework::types::tagged::mono::Tagged; - struct Foo; - struct Error; - - let val1: Tagged> = Tagged::new(Ok(8)); - let val2: Result, Error> = val1.transpose(); - ``` - */ - pub fn transpose(self) -> Result, E> { - self.0.map(Tagged::new) - } -} - -impl Tagged> { - /** - Convert a tagged [`Vec`] value into a list of tagged value. - - Example: - - ```rust - # use ibc_test_framework::types::tagged::mono::Tagged; - struct Foo; - - let val1: Tagged> = Tagged::new(vec![1, 2, 3]); - let val2: Vec> = val1.transpose(); - ``` - */ - pub fn transpose(self) -> Vec> { - self.into_iter().collect() - } -} - -impl<'a, Tag, Value> AsRef for Tagged { - fn as_ref(&self) -> &Value { - self.value() - } -} - -impl AsRef for Tagged { - fn as_ref(&self) -> &Value { - self.value() - } -} - -impl Serialize for Tagged { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - self.0.serialize(serializer) - } -} - -/** - Create a tagged iterator, if the underlying value supports iteration. - - Example: - - ```rust - # use ibc_test_framework::types::tagged::mono::Tagged; - struct Foo; - - let values: Tagged> = Tagged::new(vec![1, 2, 3]); - for value in values.into_iter() { - let value: Tagged = value; - // do something - } - ``` -*/ -pub struct TaggedIterator(Tagged); - -impl Iterator for TaggedIterator { - type Item = Tagged; - - fn next(&mut self) -> Option { - self.0 .0.next().map(Tagged::new) - } -} - -impl IntoIterator for Tagged { - type Item = Tagged; - - type IntoIter = TaggedIterator; - - fn into_iter(self) -> Self::IntoIter { - TaggedIterator(self.map_into(|v| v.into_iter())) - } -} - -impl Copy for Tagged {} - -unsafe impl Send for Tagged {} -unsafe impl Sync for Tagged {} - -impl Clone for Tagged { - fn clone(&self) -> Self { - Self::new(self.0.clone()) - } -} - -impl Default for Tagged { - fn default() -> Self { - Self::new(Value::default()) - } -} - -impl Debug for Tagged { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - Debug::fmt(self.value(), f) - } -} - -impl Display for Tagged { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - Display::fmt(self.value(), f) - } -} - -impl PartialEq for Tagged { - fn eq(&self, other: &Self) -> bool { - self.value().eq(other.value()) - } -} - -impl Eq for Tagged {} - -impl PartialOrd for Tagged { - fn partial_cmp(&self, other: &Self) -> Option { - self.value().partial_cmp(other.value()) - } -} - -impl Ord for Tagged { - fn cmp(&self, other: &Self) -> Ordering { - self.value().cmp(other.value()) - } -} diff --git a/tools/test-framework/src/types/wallet.rs b/tools/test-framework/src/types/wallet.rs deleted file mode 100644 index c9a6c950f5..0000000000 --- a/tools/test-framework/src/types/wallet.rs +++ /dev/null @@ -1,210 +0,0 @@ -/*! - Types for information about a chain wallet. -*/ - -use crate::types::env::{prefix_writer, EnvWriter, ExportEnv}; -use core::fmt::{self, Display}; -use ibc_relayer::keyring::KeyEntry; - -use crate::types::tagged::*; - -/** - Newtype wrapper for a wallet ID as identified by the chain and relayer. -*/ -#[derive(Debug, Clone)] -pub struct WalletId(pub String); - -/** - Newtype wrapper for the address a wallet corresponds to. -*/ -#[derive(Debug, Clone)] -pub struct WalletAddress(pub String); - -/** - A wallet containing the information about the ID, address, - and also the public/private key information in the form of - [KeyEntry](ibc_relayer::keyring::KeyEntry). -*/ -#[derive(Debug, Clone)] -pub struct Wallet { - /// The ID of the wallet for accessing it from the key store. - pub id: WalletId, - - /// The address for receiving tokens for this wallet. - pub address: WalletAddress, - - /// The wallet key information in the form of [`KeyEntry`] that - /// is used by the relayer. - pub key: KeyEntry, -} - -/** - A collection of wallets used for testing. We use an explicit - struct instead of a generic HashMap so that the retrieval - of a specific wallet can always succeed. We shouldn't need - more than the wallets listed here for testing purposes. - - In case we do need more wallets for testing, there shouldn't - be much overhead for adding a few more wallets here globally. - Alternatively the particular test that needs more wallets - can add new wallets in the test itself, or we can add - a HashMap here together with a - [`TestOverrides`](crate::framework::overrides::TestOverrides) - trait to generate additional wallets during test setup. -*/ -#[derive(Debug, Clone)] -pub struct TestWallets { - /// The validator wallet. - pub validator: Wallet, - - /// The relayer wallet. This is used by the relayer by default. - pub relayer: Wallet, - - /// The first user wallet that can be used for testing. - pub user1: Wallet, - - /// The second user wallet that can be used for testing. - pub user2: Wallet, -} - -/** - Extra methods for [`Wallet`] that is [tagged](crate::types::tagged). - - This trait is auto implemented for `MonoTagged` so - that we can call methods on it directly. -*/ -pub trait TaggedWallet { - /// Get the [`WalletId`] tagged with the given `Chain`. - fn id(&self) -> MonoTagged; - - /// Get the [`WalletAddress`] tagged with the given `Chain`. - fn address(&self) -> MonoTagged; - - /// Get the [`KeyEntry`] tagged with the given `Chain`. - fn key(&self) -> MonoTagged; -} - -/** - Extra methods for [`TestWallets`] that is [tagged](crate::types::tagged). - - This trait is auto implemented for `MonoTagged` so - that we can call methods on it directly. -*/ -pub trait TaggedTestWalletsExt { - /// Get the validator [`Wallet`] tagged with the given `Chain`. - fn validator(&self) -> MonoTagged; - - /// Get the relayer [`Wallet`] tagged with the given `Chain`. - fn relayer(&self) -> MonoTagged; - - /// Get the first user [`Wallet`] tagged with the given `Chain`. - fn user1(&self) -> MonoTagged; - - /// Get the second user [`Wallet`] tagged with the given `Chain`. - fn user2(&self) -> MonoTagged; -} - -impl Wallet { - /// Create a new [`Wallet`] - pub fn new(id: String, address: String, key: KeyEntry) -> Self { - Self { - id: WalletId(id), - address: WalletAddress(address), - key, - } - } -} - -impl TaggedWallet for MonoTagged { - fn id(&self) -> MonoTagged { - self.map_ref(|w| &w.id) - } - - fn address(&self) -> MonoTagged { - self.map_ref(|w| &w.address) - } - - fn key(&self) -> MonoTagged { - self.map_ref(|w| &w.key) - } -} - -impl<'a, Chain> TaggedWallet for MonoTagged { - fn id(&self) -> MonoTagged { - self.map_ref(|w| &w.id) - } - - fn address(&self) -> MonoTagged { - self.map_ref(|w| &w.address) - } - - fn key(&self) -> MonoTagged { - self.map_ref(|w| &w.key) - } -} - -impl TaggedTestWalletsExt for MonoTagged { - fn validator(&self) -> MonoTagged { - self.map_ref(|w| &w.validator) - } - - fn relayer(&self) -> MonoTagged { - self.map_ref(|w| &w.relayer) - } - - fn user1(&self) -> MonoTagged { - self.map_ref(|w| &w.user1) - } - - fn user2(&self) -> MonoTagged { - self.map_ref(|w| &w.user2) - } -} - -impl<'a, Chain> TaggedTestWalletsExt for MonoTagged { - fn validator(&self) -> MonoTagged { - self.map_ref(|w| &w.validator) - } - - fn relayer(&self) -> MonoTagged { - self.map_ref(|w| &w.relayer) - } - - fn user1(&self) -> MonoTagged { - self.map_ref(|w| &w.user1) - } - - fn user2(&self) -> MonoTagged { - self.map_ref(|w| &w.user2) - } -} - -impl ExportEnv for TestWallets { - fn export_env(&self, writer: &mut impl EnvWriter) { - self.validator - .export_env(&mut prefix_writer("VALIDATOR", writer)); - self.relayer - .export_env(&mut prefix_writer("RELAYER", writer)); - self.user1.export_env(&mut prefix_writer("USER1", writer)); - self.user2.export_env(&mut prefix_writer("USER2", writer)); - } -} - -impl ExportEnv for Wallet { - fn export_env(&self, writer: &mut impl EnvWriter) { - writer.write_env("KEY_ID", &self.id.0); - writer.write_env("ADDRESS", &self.address.0); - } -} - -impl Display for WalletId { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.0.fmt(f) - } -} - -impl Display for WalletAddress { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.0.fmt(f) - } -} diff --git a/tools/test-framework/src/util/array.rs b/tools/test-framework/src/util/array.rs deleted file mode 100644 index fc2a2ef88f..0000000000 --- a/tools/test-framework/src/util/array.rs +++ /dev/null @@ -1,87 +0,0 @@ -/*! - Helpers for manipulating fixed-sized arrays. -*/ - -use core::convert::TryInto; -use eyre::eyre; - -use crate::error::Error; - -/** - Converts a dynamic-sized vector `Vec` into a fixed-sized array - `[T; SIZE]`. Fails if the vector is not the same length as `SIZE`. -*/ -pub fn try_into_array(list: Vec) -> Result<[T; SIZE], Error> { - list.try_into() - .map_err(|_| Error::generic(eyre!("vector is not of length {}", SIZE))) -} - -/** - Converts a dynamic-sized nested vector `Vec>` into a fixed-sized - nested array `[[T; SIZE]; SIZE]`. Fails if the nested vector is not of - `SIZE`x`SIZE` length. -*/ -pub fn try_into_nested_array( - list: Vec>, -) -> Result<[[T; SIZE]; SIZE], Error> { - let list_a = list - .into_iter() - .map(try_into_array) - .collect::, _>>()?; - - try_into_array(list_a) -} - -/** - Converts a fixed-sized nested array `[[T; SIZE]; SIZE]` into a nested - vector `Vec>`. -*/ -pub fn into_nested_vec(array: [[T; SIZE]; SIZE]) -> Vec> { - array.map(|array_b| array_b.into()).into() -} - -/** - Map the elements in the fixed-sized array `[[T; SIZE]; SIZE]`. -*/ -pub fn map_nested_array( - array: [[T; SIZE]; SIZE], - mapper: impl Fn(T) -> Result, -) -> Result<[[R; SIZE]; SIZE], Error> { - let mapped = into_nested_vec(array) - .into_iter() - .map(|inner| { - inner - .into_iter() - .map(&mapper) - .collect::, _>>() - }) - .collect::, _>>()?; - - try_into_nested_array(mapped) -} - -/** - Asserts that a nested vector `Vec>` has the same dimension - in its inner vectors. -*/ -pub fn assert_same_dimension(size: usize, list: &Vec>) -> Result<(), Error> { - if list.len() != size { - return Err(Error::generic(eyre!( - "expect nested vector to have the dimension {} x {}", - size, - size - ))); - } - - for list_b in list.iter() { - if list_b.len() != size { - return Err(Error::generic(eyre!( - "expect nested vector to have the dimension {} x {}", - size, - size - ))); - } - } - - Ok(()) -} diff --git a/tools/test-framework/src/util/assert.rs b/tools/test-framework/src/util/assert.rs deleted file mode 100644 index 17dc85c1df..0000000000 --- a/tools/test-framework/src/util/assert.rs +++ /dev/null @@ -1,36 +0,0 @@ -use core::fmt::Debug; - -use crate::error::Error; - -pub fn assert_eq(message: &str, left: &T, right: &T) -> Result<(), Error> { - if left == right { - Ok(()) - } else { - Err(Error::assertion(format!( - "expect left ({:?}) to be equal to right ({:?}): {}", - left, right, message - ))) - } -} - -pub fn assert_not_eq(message: &str, left: &T, right: &T) -> Result<(), Error> { - if left != right { - Ok(()) - } else { - Err(Error::assertion(format!( - "expect left ({:?}) to be not equal to right ({:?}): {}", - left, right, message - ))) - } -} - -pub fn assert_gt(message: &str, left: &T, right: &T) -> Result<(), Error> { - if left > right { - Ok(()) - } else { - Err(Error::assertion(format!( - "expect left ({:?}) to be greater than right ({:?}): {}", - left, right, message - ))) - } -} diff --git a/tools/test-framework/src/util/file.rs b/tools/test-framework/src/util/file.rs deleted file mode 100644 index 4473c2093b..0000000000 --- a/tools/test-framework/src/util/file.rs +++ /dev/null @@ -1,32 +0,0 @@ -/*! - Filesystem utilities. -*/ - -use std::fs; -use std::io; -use std::thread; - -use crate::error::Error; - -/** - Pipe a streaming source implementing [`std::io::Read`] to a file in - append mode. - - This is used to pipe log output from a full node's child process - to log files. -*/ -pub fn pipe_to_file( - mut source: impl io::Read + Send + 'static, - file_path: &str, -) -> Result<(), Error> { - let mut file = fs::OpenOptions::new() - .append(true) - .create(true) - .open(file_path)?; - - thread::spawn(move || { - std::io::copy(&mut source, &mut file).unwrap(); - }); - - Ok(()) -} diff --git a/tools/test-framework/src/util/mod.rs b/tools/test-framework/src/util/mod.rs deleted file mode 100644 index 1832a2f3c6..0000000000 --- a/tools/test-framework/src/util/mod.rs +++ /dev/null @@ -1,10 +0,0 @@ -/*! - Utility and helper functions used in the tests. -*/ - -pub mod array; -pub mod assert; -pub mod file; -pub mod random; -pub mod retry; -pub mod suspend; diff --git a/tools/test-framework/src/util/random.rs b/tools/test-framework/src/util/random.rs deleted file mode 100644 index 4bb47295cf..0000000000 --- a/tools/test-framework/src/util/random.rs +++ /dev/null @@ -1,48 +0,0 @@ -/*! - Utilities for random value generation. -*/ - -use rand::Rng; -use std::net::{Ipv4Addr, SocketAddrV4, TcpListener}; - -/// Generates a random `u32` value. -pub fn random_u32() -> u32 { - let mut rng = rand::thread_rng(); - rng.gen() -} - -/// Generates a random `u64` value. -pub fn random_u64() -> u64 { - let mut rng = rand::thread_rng(); - rng.gen() -} - -/// Generates a random `u64` value between the given min and max. -pub fn random_u64_range(min: u64, max: u64) -> u64 { - let mut rng = rand::thread_rng(); - rng.gen_range(min..max) -} - -/// Generates a random string value, in the form of `u64` hex for simplicity. -pub fn random_string() -> String { - format!("{:x}", random_u64()) -} - -/// Generates a random non-privileged port that is greater than 1024. -fn random_port() -> u16 { - let mut rng = rand::thread_rng(); - rng.gen::() - .checked_add(1024) - .unwrap_or_else(random_port) -} - -/// Find a random unused non-privileged TCP port. -pub fn random_unused_tcp_port() -> u16 { - let port = random_port(); - let loopback = Ipv4Addr::new(127, 0, 0, 1); - let address = SocketAddrV4::new(loopback, port); - match TcpListener::bind(address) { - Ok(_) => port, - Err(_) => random_unused_tcp_port(), - } -} diff --git a/tools/test-framework/src/util/retry.rs b/tools/test-framework/src/util/retry.rs deleted file mode 100644 index 2d88feb2b7..0000000000 --- a/tools/test-framework/src/util/retry.rs +++ /dev/null @@ -1,38 +0,0 @@ -/*! - Utilities for retrying test operations. -*/ - -use core::time::Duration; -use std::thread::sleep; -use tracing::{info, trace}; - -use crate::error::Error; - -/** - A simplified version of retry logic used for testing. - We do not need complicated retry logic as we need this - only to test eventual consistency which should reach - within a few seconds. -*/ -pub fn assert_eventually_succeed( - task_name: &str, - attempts: u16, - interval: Duration, - task: impl Fn() -> Result, -) -> Result { - sleep(interval); - for i in 0..attempts { - match task() { - Ok(res) => { - info!("task {} succeed after {} tries", task_name, i); - return Ok(res); - } - Err(e) => { - trace!("retrying task {} that failed with error: {}", task_name, e); - sleep(interval) - } - } - } - - Err(Error::retry(task_name.to_string(), attempts)) -} diff --git a/tools/test-framework/src/util/suspend.rs b/tools/test-framework/src/util/suspend.rs deleted file mode 100644 index d5d0001aa2..0000000000 --- a/tools/test-framework/src/util/suspend.rs +++ /dev/null @@ -1,49 +0,0 @@ -/*! - Utilities for suspending the test. -*/ - -use core::fmt::{Debug, Display}; -use core::time::Duration; -use std::thread::sleep; -use tracing::{error, warn}; - -/** - Call this function in the middle of a test code of interest, - so that we can suspend the test and still interact with the - spawned Gaia chains and chain supervisor for debugging. -*/ -pub fn suspend() -> R { - warn!("suspending the test indefinitely. you can still interact with any spawned chains and relayers"); - - loop { - sleep(Duration::from_secs(999_999_999)) - } -} - -/** - Suspends the test using [`suspend`] if `hang_on_fail` is `true` and if - the continuation returns `Err`. - - The parameter `hang_on_fail` should be obtained from - [`TestConfig`](crate::types::config::TestConfig), - which in turns is set from the `HANG_ON_FAIL` environment variable. -*/ -pub fn hang_on_error( - hang_on_fail: bool, - cont: impl FnOnce() -> Result, -) -> Result { - if hang_on_fail { - cont().map_err(|e| { - error!("test failure occured with HANG_ON_FAIL=1, suspending the test to allow debugging: {:?}", - e); - - suspend() - }) - } else { - cont().map_err(|e| { - error!("test failure occured. set HANG_ON_FAIL=1 to suspend the test on failure for debugging: {}", - e); - e - }) - } -} From 6da3fa79f6475637a2ca0c27c3c91a53452cea36 Mon Sep 17 00:00:00 2001 From: Web3 Smith <31099392+Wizdave97@users.noreply.github.com> Date: Thu, 11 Aug 2022 17:40:51 +0100 Subject: [PATCH 73/96] Beefy client update (#48) --- Cargo.lock | 1 + modules/Cargo.toml | 1 + modules/src/clients/ics11_beefy/client_def.rs | 68 +- .../src/clients/ics11_beefy/client_state.rs | 129 +- modules/src/clients/ics11_beefy/error.rs | 3 + modules/src/clients/ics11_beefy/header.rs | 4 - modules/src/core/ics02_client/client_state.rs | 2 +- modules/src/core/ics02_client/context.rs | 15 +- .../ics02_client/handler/create_client.rs | 42 +- .../ics02_client/handler/update_client.rs | 165 +- .../core/ics02_client/msgs/create_client.rs | 26 +- .../core/ics04_channel/handler/send_packet.rs | 34 +- modules/src/core/ics26_routing/handler.rs | 4 +- modules/src/mock/context.rs | 16 +- modules/src/mock/host.rs | 5 +- modules/tests/runner/mod.rs | 2 +- proto-compiler/Cargo.lock | 1281 ------------- proto/src/IBC_GO_COMMIT | 2 +- proto/src/prost/ibc.applications.fee.v1.rs | 1697 +++++++++++++++++ .../src/prost/ibc.applications.transfer.v1.rs | 88 +- proto/src/prost/ibc.lightclients.beefy.v1.rs | 37 +- proto/src/prost/ics23.rs | 1 + scripts/sync-protobuf.sh | 121 ++ 23 files changed, 2276 insertions(+), 1468 deletions(-) delete mode 100644 proto-compiler/Cargo.lock create mode 100644 proto/src/prost/ibc.applications.fee.v1.rs create mode 100755 scripts/sync-protobuf.sh diff --git a/Cargo.lock b/Cargo.lock index e5d5a59b31..0f263bcda1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1822,6 +1822,7 @@ dependencies = [ "derive_more", "env_logger", "flex-error", + "frame-support", "ibc-proto", "ics23", "modelator", diff --git a/modules/Cargo.toml b/modules/Cargo.toml index 21ac94fab5..4903150e1e 100644 --- a/modules/Cargo.toml +++ b/modules/Cargo.toml @@ -128,6 +128,7 @@ sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkad sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.24" } sp-std = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.24" } sp-trie = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.24" } +frame-support = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.24" } [[test]] name = "mbt" diff --git a/modules/src/clients/ics11_beefy/client_def.rs b/modules/src/clients/ics11_beefy/client_def.rs index cc61f63ec2..4a79fbb7c9 100644 --- a/modules/src/clients/ics11_beefy/client_def.rs +++ b/modules/src/clients/ics11_beefy/client_def.rs @@ -92,7 +92,7 @@ impl ClientDef for BeefyClient ClientDef for BeefyClient Result<(Self::ClientState, ConsensusUpdateResult), Error> { let mut parachain_cs_states = vec![]; // Extract the new client state from the verified header - let client_state = client_state + let mut client_state = client_state .from_header(header.clone()) .map_err(Error::beefy)?; + let mut latest_para_height = client_state.latest_para_height; for header in header.parachain_headers { // Skip genesis block of parachains since it has no timestamp or ibc root if header.parachain_header.number == 0 { continue; } - let height = Height::new(header.para_id as u64, header.parachain_header.number as u64); + if latest_para_height < header.parachain_header.number { + latest_para_height = header.parachain_header.number; + } + let height = Height::new( + client_state.para_id as u64, + header.parachain_header.number as u64, + ); // Skip duplicate consensus states if ctx.consensus_state(&client_id, height).is_ok() { continue; @@ -156,6 +163,8 @@ impl ClientDef for BeefyClient ClientDef for BeefyClient Result { - let height = if let Some(mmr_update) = header.mmr_update_proof { - Height::new( - 0, - mmr_update.signed_commitment.commitment.block_number as u64, - ) - } else { - Height::new(0, client_state.latest_beefy_height as u64) - }; + let latest_para_height = header + .parachain_headers + .into_iter() + .map(|header| header.parachain_header.number) + .max(); + let frozen_height = latest_para_height + .map(|height| Height::new(client_state.para_id.into(), height.into())) + .unwrap_or(Height::new( + client_state.para_id.into(), + client_state.latest_para_height.into(), + )); client_state - .with_frozen_height(height) + .with_frozen_height(frozen_height) .map_err(|e| Error::beefy(BeefyError::implementation_specific(e.to_string()))) } @@ -206,8 +218,8 @@ impl ClientDef for BeefyClient ClientDef for BeefyClient Result<(), Error> { + client_state.verify_height(height)?; let path = ClientConsensusStatePath { client_id: client_id.clone(), epoch: consensus_height.revision_number, @@ -229,14 +242,15 @@ impl ClientDef for BeefyClient Result<(), Error> { + client_state.verify_height(height)?; let path = ConnectionsPath(connection_id.clone()); let value = expected_connection_end.encode_vec(); verify_membership::(prefix, proof, root, path, value) @@ -246,8 +260,8 @@ impl ClientDef for BeefyClient ClientDef for BeefyClient Result<(), Error> { + client_state.verify_height(height)?; let path = ChannelEndsPath(port_id.clone(), *channel_id); let value = expected_channel_end.encode_vec(); verify_membership::(prefix, proof, root, path, value) @@ -263,14 +278,15 @@ impl ClientDef for BeefyClient Result<(), Error> { + client_state.verify_height(height)?; let path = ClientStatePath(client_id.clone()); let value = expected_client_state.encode_vec(); verify_membership::(prefix, proof, root, path, value) @@ -280,7 +296,7 @@ impl ClientDef for BeefyClient ClientDef for BeefyClient Result<(), Error> { + client_state.verify_height(height)?; verify_delay_passed(ctx, height, connection_end)?; let commitment_path = CommitmentsPath { @@ -311,7 +328,7 @@ impl ClientDef for BeefyClient ClientDef for BeefyClient Result<(), Error> { + client_state.verify_height(height)?; verify_delay_passed(ctx, height, connection_end)?; let ack_path = AcksPath { @@ -341,7 +359,7 @@ impl ClientDef for BeefyClient ClientDef for BeefyClient Result<(), Error> { + client_state.verify_height(height)?; verify_delay_passed(ctx, height, connection_end)?; let seq_bytes = codec::Encode::encode(&u64::from(sequence)); @@ -368,7 +387,7 @@ impl ClientDef for BeefyClient ClientDef for BeefyClient Result<(), Error> { + client_state.verify_height(height)?; verify_delay_passed(ctx, height, connection_end)?; let receipt_path = ReceiptsPath { diff --git a/modules/src/clients/ics11_beefy/client_state.rs b/modules/src/clients/ics11_beefy/client_state.rs index c3cc35f398..1ec3366ef2 100644 --- a/modules/src/clients/ics11_beefy/client_state.rs +++ b/modules/src/clients/ics11_beefy/client_state.rs @@ -1,9 +1,10 @@ use crate::prelude::*; - use beefy_primitives::known_payload_ids::MMR_ROOT_ID; use beefy_primitives::mmr::BeefyNextAuthoritySet; use codec::{Decode, Encode}; use core::convert::TryFrom; +use core::fmt; +use core::str::FromStr; use core::time::Duration; use serde::{Deserialize, Serialize}; use sp_core::H256; @@ -24,6 +25,8 @@ use crate::Height; pub struct ClientState { /// The chain id pub chain_id: ChainId, + /// Relay chain + pub relay_chain: RelayChain, /// Latest mmr root hash pub mmr_root_hash: H256, /// block number for the latest mmr_root_hash @@ -33,6 +36,10 @@ pub struct ClientState { /// Block number that the beefy protocol was activated on the relay chain. /// This should be the first block in the merkle-mountain-range tree. pub beefy_activation_block: u32, + /// latest parachain height + pub latest_para_height: u32, + /// ParaId of associated parachain + pub para_id: u32, /// authorities for the current round pub authority: BeefyNextAuthoritySet, /// authorities for the next round @@ -44,19 +51,15 @@ impl Protobuf for ClientState {} impl ClientState { #[allow(clippy::too_many_arguments)] pub fn new( - chain_id: ChainId, + relay_chain: RelayChain, + para_id: u32, + latest_para_height: u32, mmr_root_hash: H256, beefy_activation_block: u32, latest_beefy_height: u32, authority_set: BeefyNextAuthoritySet, next_authority_set: BeefyNextAuthoritySet, ) -> Result { - if chain_id.version() == 0 { - return Err(Error::validation( - "ClientState Chain id cannot be equal to zero ".to_string(), - )); - } - if beefy_activation_block > latest_beefy_height { return Err(Error::validation( "ClientState beefy activation block cannot be greater than latest_beefy_height" @@ -70,6 +73,7 @@ impl ClientState { .to_string(), )); } + let chain_id = ChainId::new(relay_chain.to_string(), para_id.into()); Ok(Self { chain_id, @@ -79,6 +83,9 @@ impl ClientState { beefy_activation_block, authority: authority_set, next_authority_set, + relay_chain, + latest_para_height, + para_id, }) } @@ -168,11 +175,9 @@ impl ClientState { /// Verify that the client is at a sufficient height and unfrozen at the given height pub fn verify_height(&self, height: Height) -> Result<(), Error> { - if (self.latest_beefy_height as u64) < height.revision_height { - return Err(Error::insufficient_height( - Height::new(0, self.latest_beefy_height.into()), - height, - )); + let latest_para_height = Height::new(self.para_id.into(), self.latest_para_height.into()); + if latest_para_height < height { + return Err(Error::insufficient_height(latest_para_height, height)); } match self.frozen_height { @@ -182,6 +187,12 @@ impl ClientState { _ => Ok(()), } } + + /// Check if the state is expired when `elapsed` time has passed since the latest consensus + /// state timestamp + pub fn expired(&self, elapsed: Duration) -> bool { + elapsed > self.relay_chain.trusting_period() + } } #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] @@ -199,7 +210,7 @@ impl crate::core::ics02_client::client_state::ClientState for ClientState { } fn latest_height(&self) -> Height { - Height::new(0, self.latest_beefy_height.into()) + Height::new(self.para_id.into(), self.latest_para_height.into()) } fn frozen_height(&self) -> Option { @@ -260,15 +271,20 @@ impl TryFrom for ClientState { .ok_or_else(Error::missing_beefy_authority_set)?; let mmr_root_hash = H256::decode(&mut &*raw.mmr_root_hash).map_err(Error::scale_decode)?; + let relay_chain = RelayChain::from_i32(raw.relay_chain)?; + let chain_id = ChainId::new(relay_chain.to_string(), raw.para_id.into()); Ok(Self { - chain_id: ChainId::default(), + chain_id, mmr_root_hash, latest_beefy_height: raw.latest_beefy_height, frozen_height, beefy_activation_block: raw.beefy_activation_block, authority: authority_set, next_authority_set, + relay_chain, + latest_para_height: raw.latest_para_height, + para_id: raw.para_id, }) } } @@ -293,6 +309,85 @@ impl From for RawClientState { len: client_state.next_authority_set.len, authority_root: client_state.next_authority_set.root.encode(), }), + relay_chain: client_state.relay_chain as i32, + para_id: client_state.para_id, + latest_para_height: client_state.latest_para_height, + } + } +} + +#[derive(Clone, Copy, Debug, PartialEq, Eq, Deserialize, Serialize)] +pub enum RelayChain { + Polkadot = 0, + Kusama = 1, + Rococo = 2, +} + +impl Default for RelayChain { + fn default() -> Self { + RelayChain::Rococo + } +} + +impl fmt::Display for RelayChain { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.as_str()) + } +} + +// Unbonding period for relay chains in days +const POLKADOT_UNBONDING_PERIOD: u64 = 28; +const KUSAMA_UNBONDING_PERIOD: u64 = 7; + +impl RelayChain { + /// Yields the Order as a string + pub fn as_str(&self) -> &'static str { + match self { + Self::Polkadot => "Polkadot", + Self::Kusama => "Kusama", + Self::Rococo => "Rococo", + } + } + + // Parses the Order out from a i32. + pub fn from_i32(nr: i32) -> Result { + match nr { + 0 => Ok(Self::Polkadot), + 1 => Ok(Self::Kusama), + 2 => Ok(Self::Rococo), + id => Err(Error::unknown_relay_chain(id.to_string())), + } + } + + pub fn unbonding_period(&self) -> Duration { + match self { + Self::Polkadot => { + let secs = POLKADOT_UNBONDING_PERIOD * 24 * 60 * 60; + Duration::from_secs(secs) + } + Self::Kusama | Self::Rococo => { + let secs = KUSAMA_UNBONDING_PERIOD * 24 * 60 * 60; + Duration::from_secs(secs) + } + } + } + + pub fn trusting_period(&self) -> Duration { + let unbonding_period = self.unbonding_period(); + // Trusting period is 1/3 of unbonding period + unbonding_period.checked_div(3).unwrap() + } +} + +impl FromStr for RelayChain { + type Err = Error; + + fn from_str(s: &str) -> Result { + match s.to_lowercase().trim_start_matches("order_") { + "polkadot" => Ok(Self::Polkadot), + "kusama" => Ok(Self::Kusama), + "rococo" => Ok(Self::Rococo), + _ => Err(Error::unknown_relay_chain(s.to_string())), } } } @@ -305,7 +400,9 @@ pub mod test_util { pub fn get_dummy_beefy_state() -> AnyClientState { AnyClientState::Beefy( ClientState::new( - ChainId::new("polkadot".to_string(), 1), + RelayChain::Rococo, + 2000, + 0, Default::default(), 0, 0, diff --git a/modules/src/clients/ics11_beefy/error.rs b/modules/src/clients/ics11_beefy/error.rs index 6d6c3181e2..ee8ef393c2 100644 --- a/modules/src/clients/ics11_beefy/error.rs +++ b/modules/src/clients/ics11_beefy/error.rs @@ -200,5 +200,8 @@ define_error! { | e | { format_args!("the client is frozen: frozen_height={0} target_height={1}", e.frozen_height, e.target_height) }, + UnknownRelayChain + { type_id: String } + | e | { format_args!("Relaychain type not known: {}", e.type_id) }, } } diff --git a/modules/src/clients/ics11_beefy/header.rs b/modules/src/clients/ics11_beefy/header.rs index fee50b50e3..8315d92b25 100644 --- a/modules/src/clients/ics11_beefy/header.rs +++ b/modules/src/clients/ics11_beefy/header.rs @@ -51,8 +51,6 @@ pub struct ParachainHeader { pub parachain_header: SubstrateHeader, /// Reconstructed mmr leaf pub partial_mmr_leaf: PartialMmrLeaf, - /// parachain id - pub para_id: u32, /// Proof for our parachain header inclusion in the parachain headers root pub parachain_heads_proof: Vec, /// leaf index for parachain heads proof @@ -111,7 +109,6 @@ impl TryFrom for BeefyHeader { parent_number_and_hash: (mmr_partial_leaf.parent_number, parent_hash), beefy_next_authority_set, }, - para_id: raw_para_header.para_id, parachain_heads_proof: raw_para_header .parachain_heads_proof .into_iter() @@ -294,7 +291,6 @@ impl From for RawBeefyHeader { .encode(), }), }), - para_id: para_header.para_id, parachain_heads_proof: para_header .parachain_heads_proof .into_iter() diff --git a/modules/src/core/ics02_client/client_state.rs b/modules/src/core/ics02_client/client_state.rs index af360ece07..25bb3bd8c0 100644 --- a/modules/src/core/ics02_client/client_state.rs +++ b/modules/src/core/ics02_client/client_state.rs @@ -192,7 +192,7 @@ impl AnyClientState { match self { AnyClientState::Tendermint(tm_state) => tm_state.expired(elapsed_since_latest), #[cfg(any(test, feature = "ics11_beefy"))] - AnyClientState::Beefy(_) => false, + AnyClientState::Beefy(bf_state) => bf_state.expired(elapsed_since_latest), #[cfg(any(test, feature = "ics11_beefy"))] Self::Near(_) => false, #[cfg(any(test, feature = "mocks"))] diff --git a/modules/src/core/ics02_client/context.rs b/modules/src/core/ics02_client/context.rs index c6aa7831df..c100b14c4d 100644 --- a/modules/src/core/ics02_client/context.rs +++ b/modules/src/core/ics02_client/context.rs @@ -80,16 +80,11 @@ pub trait ClientKeeper { self.store_client_type(client_id.clone(), res.client_type)?; self.store_client_state(client_id.clone(), res.client_state.clone())?; - match res.consensus_state { - None => {} - Some(consensus_state) => { - self.store_consensus_state( - client_id, - res.client_state.latest_height(), - consensus_state, - )?; - } - } + self.store_consensus_state( + client_id, + res.client_state.latest_height(), + res.consensus_state, + )?; self.increase_client_counter(); self.store_update_time( res.client_id.clone(), diff --git a/modules/src/core/ics02_client/handler/create_client.rs b/modules/src/core/ics02_client/handler/create_client.rs index 0f3725a391..a2c39654d3 100644 --- a/modules/src/core/ics02_client/handler/create_client.rs +++ b/modules/src/core/ics02_client/handler/create_client.rs @@ -24,7 +24,7 @@ pub struct Result { pub client_id: ClientId, pub client_type: ClientType, pub client_state: AnyClientState, - pub consensus_state: Option, + pub consensus_state: AnyConsensusState, pub processed_time: Timestamp, pub processed_height: Height, } @@ -103,7 +103,7 @@ mod tests { let msg = MsgCreateAnyClient::new( MockClientState::new(MockHeader::new(height)).into(), - Some(MockConsensusState::new(MockHeader::new(height)).into()), + MockConsensusState::new(MockHeader::new(height)).into(), signer, ) .unwrap(); @@ -154,13 +154,11 @@ mod tests { ..height })) .into(), - Some( - MockConsensusState::new(MockHeader::new(Height { - revision_height: 42, - ..height - })) - .into(), - ), + MockConsensusState::new(MockHeader::new(Height { + revision_height: 42, + ..height + })) + .into(), signer.clone(), ) .unwrap(), @@ -170,13 +168,11 @@ mod tests { ..height })) .into(), - Some( - MockConsensusState::new(MockHeader::new(Height { - revision_height: 42, - ..height - })) - .into(), - ), + MockConsensusState::new(MockHeader::new(Height { + revision_height: 42, + ..height + })) + .into(), signer.clone(), ) .unwrap(), @@ -186,13 +182,11 @@ mod tests { ..height })) .into(), - Some( - MockConsensusState::new(MockHeader::new(Height { - revision_height: 50, - ..height - })) - .into(), - ), + MockConsensusState::new(MockHeader::new(Height { + revision_height: 50, + ..height + })) + .into(), signer, ) .unwrap(), @@ -264,7 +258,7 @@ mod tests { let msg = MsgCreateAnyClient::new( tm_client_state, - Some(AnyConsensusState::Tendermint(tm_header.try_into().unwrap())), + AnyConsensusState::Tendermint(tm_header.try_into().unwrap()), signer, ) .unwrap(); diff --git a/modules/src/core/ics02_client/handler/update_client.rs b/modules/src/core/ics02_client/handler/update_client.rs index 610f88a796..3ee503a322 100644 --- a/modules/src/core/ics02_client/handler/update_client.rs +++ b/modules/src/core/ics02_client/handler/update_client.rs @@ -4,7 +4,6 @@ use core::fmt::Debug; use crate::clients::host_functions::HostFunctionsProvider; use crate::core::ics02_client::client_def::{AnyClient, ClientDef, ConsensusUpdateResult}; use crate::core::ics02_client::client_state::{AnyClientState, ClientState}; -use crate::core::ics02_client::client_type::ClientType; use crate::core::ics02_client::error::Error; use crate::core::ics02_client::events::Attributes; use crate::core::ics02_client::handler::ClientResult; @@ -52,29 +51,27 @@ pub fn process( return Err(Error::client_frozen(client_id)); } - if client_type == ClientType::Tendermint { - // Read consensus state from the host chain store. - let latest_consensus_state = ctx - .consensus_state(&client_id, client_state.latest_height()) - .map_err(|_| { - Error::consensus_state_not_found(client_id.clone(), client_state.latest_height()) - })?; - - tracing::debug!("latest consensus state: {:?}", latest_consensus_state); - - let now = ctx.host_timestamp(); - let duration = now - .duration_since(&latest_consensus_state.timestamp()) - .ok_or_else(|| { - Error::invalid_consensus_state_timestamp(latest_consensus_state.timestamp(), now) - })?; - - if client_state.expired(duration) { - return Err(Error::header_not_within_trust_period( - latest_consensus_state.timestamp(), - header.timestamp(), - )); - } + // Read consensus state from the host chain store. + let latest_consensus_state = ctx + .consensus_state(&client_id, client_state.latest_height()) + .map_err(|_| { + Error::consensus_state_not_found(client_id.clone(), client_state.latest_height()) + })?; + + tracing::debug!("latest consensus state: {:?}", latest_consensus_state); + + let now = ctx.host_timestamp(); + let duration = now + .duration_since(&latest_consensus_state.timestamp()) + .ok_or_else(|| { + Error::invalid_consensus_state_timestamp(latest_consensus_state.timestamp(), now) + })?; + + if client_state.expired(duration) { + return Err(Error::header_not_within_trust_period( + latest_consensus_state.timestamp(), + now, + )); } client_def @@ -129,10 +126,7 @@ mod tests { use core::str::FromStr; use test_log::test; - use crate::clients::ics11_beefy::client_state::ClientState as BeefyClientState; - use crate::clients::ics11_beefy::header::BeefyHeader; - use crate::clients::ics11_beefy::header::ParachainHeader as BeefyParachainHeader; - use crate::core::ics02_client::client_consensus::AnyConsensusState; + use crate::core::ics02_client::client_consensus::{AnyConsensusState, ConsensusState}; use crate::core::ics02_client::client_state::{AnyClientState, ClientState}; use crate::core::ics02_client::client_type::ClientType; use crate::core::ics02_client::context::{ClientKeeper, ClientReader}; @@ -154,10 +148,6 @@ mod tests { use crate::test_utils::{get_dummy_account_id, Crypto}; use crate::timestamp::Timestamp; use crate::Height; - use beefy_client_primitives::NodesUtils; - use beefy_queries::ClientWrapper; - use codec::{Decode, Encode}; - use subxt::rpc::{rpc_params, JsonValue, Subscription, SubscriptionClientT}; #[test] fn test_update_client_ok() { @@ -575,13 +565,28 @@ mod tests { #[cfg(feature = "ics11_beefy")] #[tokio::test] async fn test_continuous_update_of_beefy_client() { + use crate::clients::ics11_beefy::client_state::ClientState as BeefyClientState; + use crate::clients::ics11_beefy::client_state::RelayChain; + use crate::clients::ics11_beefy::consensus_state::ConsensusState; + use crate::clients::ics11_beefy::header::BeefyHeader; + use crate::clients::ics11_beefy::header::ParachainHeader as BeefyParachainHeader; + use beefy_client_primitives::NodesUtils; + use beefy_client_primitives::PartialMmrLeaf; + use beefy_queries::runtime; + use beefy_queries::{ + helpers::{fetch_timestamp_extrinsic_with_proof, TimeStampExtWithProof}, + ClientWrapper, + }; + use codec::{Decode, Encode}; + use subxt::rpc::{rpc_params, JsonValue, Subscription, SubscriptionClientT}; + let client_id = ClientId::new(ClientType::Beefy, 0).unwrap(); let chain_start_height = Height::new(1, 11); let mut ctx = MockContext::new( ChainId::new("mockgaiaA".to_string(), 1), - HostType::Mock, + HostType::Beefy, 5, chain_start_height, ); @@ -605,25 +610,90 @@ mod tests { relay_client: client.clone(), para_client: para_client.clone(), beefy_activation_block: 0, - para_id: 2000, + para_id: 2001, }; let mut count = 0; - let client_state = - ClientWrapper::::get_initial_client_state(Some(&client)).await; - let beefy_client_state = BeefyClientState { - chain_id: Default::default(), - frozen_height: None, - latest_beefy_height: 0, - mmr_root_hash: Default::default(), - authority: client_state.current_authorities, - next_authority_set: client_state.next_authorities, - beefy_activation_block: 0, + let client_state = client_wrapper + .construct_beefy_client_state(0) + .await + .unwrap(); + let beefy_client_state = BeefyClientState::new( + RelayChain::Rococo, + client_wrapper.para_id, + 0, + client_state.mmr_root_hash, + client_state.beefy_activation_block, + client_state.latest_beefy_height, + client_state.current_authorities, + client_state.next_authorities, + ) + .unwrap(); + + let api = client_wrapper + .relay_client + .clone() + .to_runtime_api::>>(); + let subxt_block_number: subxt::BlockNumber = beefy_client_state.latest_beefy_height.into(); + let block_hash = client_wrapper + .relay_client + .rpc() + .block_hash(Some(subxt_block_number)) + .await + .unwrap(); + let head_data = api + .storage() + .paras() + .heads( + &runtime::api::runtime_types::polkadot_parachain::primitives::Id( + client_wrapper.para_id, + ), + block_hash, + ) + .await + .unwrap() + .unwrap(); + let decoded_para_head = + sp_runtime::generic::Header::::decode( + &mut &*head_data.0, + ) + .unwrap(); + let block_number = decoded_para_head.number; + let subxt_block_number: subxt::BlockNumber = block_number.into(); + let block_hash = client_wrapper + .para_client + .rpc() + .block_hash(Some(subxt_block_number)) + .await + .unwrap(); + + let TimeStampExtWithProof { + ext: timestamp_extrinsic, + proof: extrinsic_proof, + } = fetch_timestamp_extrinsic_with_proof(&client_wrapper.para_client, block_hash) + .await + .unwrap(); + let parachain_header = BeefyParachainHeader { + parachain_header: decoded_para_head, + partial_mmr_leaf: PartialMmrLeaf { + version: Default::default(), + parent_number_and_hash: Default::default(), + beefy_next_authority_set: Default::default(), + }, + parachain_heads_proof: vec![], + heads_leaf_index: 0, + heads_total_count: 0, + extrinsic_proof, + timestamp_extrinsic, }; + let consensus_state = ConsensusState::from_header(parachain_header) + .unwrap() + .wrap_any(); + let create_client = MsgCreateAnyClient { client_state: AnyClientState::Beefy(beefy_client_state), - consensus_state: None, + consensus_state, signer: signer.clone(), }; @@ -696,7 +766,6 @@ mod tests { parachain_header: Decode::decode(&mut &*header.parachain_header.as_slice()) .unwrap(), partial_mmr_leaf: header.partial_mmr_leaf, - para_id: header.para_id, parachain_heads_proof: header.parachain_heads_proof, heads_leaf_index: header.heads_leaf_index, heads_total_count: header.heads_total_count, @@ -743,10 +812,6 @@ mod tests { upd_res.client_state, ctx.latest_client_states(&client_id).clone() ); - assert_eq!( - upd_res.client_state.latest_height(), - Height::new(0, signed_commitment.commitment.block_number as u64), - ) } _ => panic!("update handler result has incorrect type"), } diff --git a/modules/src/core/ics02_client/msgs/create_client.rs b/modules/src/core/ics02_client/msgs/create_client.rs index c3018f4e47..a006b1b8c8 100644 --- a/modules/src/core/ics02_client/msgs/create_client.rs +++ b/modules/src/core/ics02_client/msgs/create_client.rs @@ -18,26 +18,21 @@ pub const TYPE_URL: &str = "/ibc.core.client.v1.MsgCreateClient"; #[derive(Clone, Debug, PartialEq, Eq)] pub struct MsgCreateAnyClient { pub client_state: AnyClientState, - pub consensus_state: Option, + pub consensus_state: AnyConsensusState, pub signer: Signer, } impl MsgCreateAnyClient { pub fn new( client_state: AnyClientState, - consensus_state: Option, + consensus_state: AnyConsensusState, signer: Signer, ) -> Result { - match consensus_state.as_ref() { - Some(consensus_state) - if client_state.client_type() != consensus_state.client_type() => - { - return Err(Error::raw_client_and_consensus_state_types_mismatch( - client_state.client_type(), - consensus_state.client_type(), - )) - } - _ => {} + if client_state.client_type() != consensus_state.client_type() { + return Err(Error::raw_client_and_consensus_state_types_mismatch( + client_state.client_type(), + consensus_state.client_type(), + )); } Ok(MsgCreateAnyClient { @@ -73,7 +68,8 @@ impl TryFrom for MsgCreateAnyClient { let consensus_state = raw .consensus_state - .and_then(|cs| AnyConsensusState::try_from(cs).ok()); + .and_then(|cs| AnyConsensusState::try_from(cs).ok()) + .ok_or_else(Error::missing_raw_consensus_state)?; MsgCreateAnyClient::new( AnyClientState::try_from(raw_client_state)?, @@ -87,7 +83,7 @@ impl From for RawMsgCreateClient { fn from(ics_msg: MsgCreateAnyClient) -> Self { RawMsgCreateClient { client_state: Some(ics_msg.client_state.into()), - consensus_state: ics_msg.consensus_state.map(|cs| cs.into()), + consensus_state: Some(ics_msg.consensus_state.into()), signer: ics_msg.signer.to_string(), } } @@ -115,7 +111,7 @@ mod tests { let msg = MsgCreateAnyClient::new( tm_client_state, - Some(AnyConsensusState::Tendermint(tm_header.try_into().unwrap())), + AnyConsensusState::Tendermint(tm_header.try_into().unwrap()), signer, ) .unwrap(); diff --git a/modules/src/core/ics04_channel/handler/send_packet.rs b/modules/src/core/ics04_channel/handler/send_packet.rs index e20fee5fda..ca484371f8 100644 --- a/modules/src/core/ics04_channel/handler/send_packet.rs +++ b/modules/src/core/ics04_channel/handler/send_packet.rs @@ -59,23 +59,23 @@ pub fn send_packet(ctx: &dyn ReaderContext, packet: Packet) -> HandlerResult Timestamp { - self.history - .last() - .expect("history cannot be empty") - .timestamp() - .add(self.block_time) - .unwrap() + if self.host_chain_type == HostType::Beefy { + (Timestamp::now() + Duration::from_secs(86400)).unwrap() + } else { + self.history + .last() + .expect("history cannot be empty") + .timestamp() + .add(self.block_time) + .unwrap() + } } fn host_consensus_state(&self, height: Height) -> Result { diff --git a/modules/src/mock/host.rs b/modules/src/mock/host.rs index d94b90f893..7a3e974af4 100644 --- a/modules/src/mock/host.rs +++ b/modules/src/mock/host.rs @@ -18,10 +18,11 @@ use crate::Height; /// - `Mock` defines that the context history consists of `MockHeader` blocks. /// - `SyntheticTendermint`: the context has synthetically-generated Tendermint (light) blocks. /// See also the `HostBlock` enum to get more insights into the underlying block type. -#[derive(Clone, Debug, Copy)] +#[derive(Clone, Debug, PartialEq, Eq, Copy)] pub enum HostType { Mock, SyntheticTendermint, + Beefy, } /// Depending on `HostType` (the type of host chain underlying a context mock), this enum defines @@ -62,7 +63,7 @@ impl HostBlock { timestamp: Timestamp, ) -> HostBlock { match chain_type { - HostType::Mock => HostBlock::Mock(MockHeader { + HostType::Mock | HostType::Beefy => HostBlock::Mock(MockHeader { height: Height::new(chain_id.version(), height), timestamp, }), diff --git a/modules/tests/runner/mod.rs b/modules/tests/runner/mod.rs index 96a4e0ce11..a63642601b 100644 --- a/modules/tests/runner/mod.rs +++ b/modules/tests/runner/mod.rs @@ -311,7 +311,7 @@ impl IbcTestRunner { // create ICS26 message and deliver it let msg = Ics26Envelope::Ics2Msg(ClientMsg::CreateClient(MsgCreateAnyClient { client_state: Self::client_state(client_state), - consensus_state: Some(Self::consensus_state(consensus_state)), + consensus_state: Self::consensus_state(consensus_state), signer: Self::signer(), })); ctx.deliver(msg) diff --git a/proto-compiler/Cargo.lock b/proto-compiler/Cargo.lock deleted file mode 100644 index 3b69bae13d..0000000000 --- a/proto-compiler/Cargo.lock +++ /dev/null @@ -1,1281 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -version = 3 - -[[package]] -name = "anyhow" -version = "1.0.57" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08f9b8508dccb7687a1d6c4ce66b2b0ecef467c94667de27d8d7fe1f8d2a9cdc" - -[[package]] -name = "argh" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbb41d85d92dfab96cb95ab023c265c5e4261bb956c0fb49ca06d90c570f1958" -dependencies = [ - "argh_derive", - "argh_shared", -] - -[[package]] -name = "argh_derive" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be69f70ef5497dd6ab331a50bd95c6ac6b8f7f17a7967838332743fbd58dc3b5" -dependencies = [ - "argh_shared", - "heck 0.3.3", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "argh_shared" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6f8c380fa28aa1b36107cd97f0196474bb7241bb95a453c5c01a15ac74b2eac" - -[[package]] -name = "async-stream" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dad5c83079eae9969be7fadefe640a1c566901f05ff91ab221de4b6f68d9507e" -dependencies = [ - "async-stream-impl", - "futures-core", -] - -[[package]] -name = "async-stream-impl" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10f203db73a71dfa2fb6dd22763990fa26f3d2625a6da2da900d23b87d26be27" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "async-trait" -version = "0.1.56" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96cf8829f67d2eab0b2dfa42c5d0ef737e0724e4a82b01b3e292456202b19716" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "autocfg" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" - -[[package]] -name = "axum" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab2504b827a8bef941ba3dd64bdffe9cf56ca182908a147edd6189c95fbcae7d" -dependencies = [ - "async-trait", - "axum-core", - "bitflags", - "bytes", - "futures-util", - "http", - "http-body", - "hyper", - "itoa", - "matchit", - "memchr", - "mime", - "percent-encoding", - "pin-project-lite", - "serde", - "sync_wrapper", - "tokio", - "tower", - "tower-http", - "tower-layer", - "tower-service", -] - -[[package]] -name = "axum-core" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da31c0ed7b4690e2c78fe4b880d21cd7db04a346ebc658b4270251b695437f17" -dependencies = [ - "async-trait", - "bytes", - "futures-util", - "http", - "http-body", - "mime", -] - -[[package]] -name = "base64" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" - -[[package]] -name = "bitflags" -version = "1.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" - -[[package]] -name = "bytes" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4872d67bab6358e59559027aa3b9157c53d9358c51423c17554809a8858e0f8" - -[[package]] -name = "cc" -version = "1.0.73" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fff2a6927b3bb87f9595d67196a70493f627687a71d87a0d692242c33f58c11" -dependencies = [ - "jobserver", -] - -[[package]] -name = "cfg-if" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" - -[[package]] -name = "cmake" -version = "0.1.48" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8ad8cef104ac57b68b89df3208164d228503abbdce70f6880ffa3d970e7443a" -dependencies = [ - "cc", -] - -[[package]] -name = "either" -version = "1.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" - -[[package]] -name = "fastrand" -version = "1.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3fcf0cee53519c866c09b5de1f6c56ff9d647101f81c1964fa632e148896cdf" -dependencies = [ - "instant", -] - -[[package]] -name = "fixedbitset" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "279fb028e20b3c4c320317955b77c5e0c9701f05a1d309905d6fc702cdc5053e" - -[[package]] -name = "fnv" -version = "1.0.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" - -[[package]] -name = "form_urlencoded" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fc25a87fa4fd2094bffb06925852034d90a17f0d1e05197d4956d3555752191" -dependencies = [ - "matches", - "percent-encoding", -] - -[[package]] -name = "fuchsia-cprng" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba" - -[[package]] -name = "futures-channel" -version = "0.3.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3083ce4b914124575708913bca19bfe887522d6e2e6d0952943f5eac4a74010" -dependencies = [ - "futures-core", -] - -[[package]] -name = "futures-core" -version = "0.3.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c09fd04b7e4073ac7156a9539b57a484a8ea920f79c7c675d05d289ab6110d3" - -[[package]] -name = "futures-sink" -version = "0.3.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21163e139fa306126e6eedaf49ecdb4588f939600f0b1e770f4205ee4b7fa868" - -[[package]] -name = "futures-task" -version = "0.3.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57c66a976bf5909d801bbef33416c41372779507e7a6b3a5e25e4749c58f776a" - -[[package]] -name = "futures-util" -version = "0.3.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8b7abd5d659d9b90c8cba917f6ec750a74e2dc23902ef9cd4cc8c8b22e6036a" -dependencies = [ - "futures-core", - "futures-task", - "pin-project-lite", - "pin-utils", -] - -[[package]] -name = "getrandom" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9be70c98951c83b8d2f8f60d7065fa6d5146873094452a1008da8c2f1e4205ad" -dependencies = [ - "cfg-if", - "libc", - "wasi 0.10.2+wasi-snapshot-preview1", -] - -[[package]] -name = "git2" -version = "0.13.25" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f29229cc1b24c0e6062f6e742aa3e256492a5323365e5ed3413599f8a5eff7d6" -dependencies = [ - "bitflags", - "libc", - "libgit2-sys", - "log", - "openssl-probe", - "openssl-sys", - "url", -] - -[[package]] -name = "h2" -version = "0.3.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37a82c6d637fc9515a4694bbf1cb2457b79d81ce52b3108bdeea58b07dd34a57" -dependencies = [ - "bytes", - "fnv", - "futures-core", - "futures-sink", - "futures-util", - "http", - "indexmap", - "slab", - "tokio", - "tokio-util", - "tracing", -] - -[[package]] -name = "hashbrown" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" - -[[package]] -name = "heck" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d621efb26863f0e9924c6ac577e8275e5e6b77455db64ffa6c65c904e9e132c" -dependencies = [ - "unicode-segmentation", -] - -[[package]] -name = "heck" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2540771e65fc8cb83cd6e8a237f70c319bd5c29f78ed1084ba5d50eeac86f7f9" - -[[package]] -name = "http" -version = "0.2.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75f43d41e26995c17e71ee126451dd3941010b0514a81a9d11f3b341debc2399" -dependencies = [ - "bytes", - "fnv", - "itoa", -] - -[[package]] -name = "http-body" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" -dependencies = [ - "bytes", - "http", - "pin-project-lite", -] - -[[package]] -name = "http-range-header" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bfe8eed0a9285ef776bb792479ea3834e8b94e13d615c2f66d03dd50a435a29" - -[[package]] -name = "httparse" -version = "1.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "496ce29bb5a52785b44e0f7ca2847ae0bb839c9bd28f69acac9b99d461c0c04c" - -[[package]] -name = "httpdate" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" - -[[package]] -name = "hyper" -version = "0.15.04" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42dc3c131584288d375f2d07f822b0cb012d8c6fb899a5b9fdb3cb7eb9b6004f" -dependencies = [ - "bytes", - "futures-channel", - "futures-core", - "futures-util", - "h2", - "http", - "http-body", - "httparse", - "httpdate", - "itoa", - "pin-project-lite", - "socket2", - "tokio", - "tower-service", - "tracing", - "want", -] - -[[package]] -name = "hyper-timeout" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" -dependencies = [ - "hyper", - "pin-project-lite", - "tokio", - "tokio-io-timeout", -] - -[[package]] -name = "ibc-proto-compiler" -version = "0.2.0" -dependencies = [ - "argh", - "git2", - "prost-build", - "tempdir", - "tonic", - "tonic-build", - "walkdir", -] - -[[package]] -name = "idna" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "418a0a6fab821475f634efe3ccc45c013f742efe03d853e8d3355d5cb850ecf8" -dependencies = [ - "matches", - "unicode-bidi", - "unicode-normalization", -] - -[[package]] -name = "indexmap" -version = "1.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6012d540c5baa3589337a98ce73408de9b5a25ec9fc2c6fd6be8f0d39e0ca5a" -dependencies = [ - "autocfg", - "hashbrown", -] - -[[package]] -name = "instant" -version = "0.1.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "itertools" -version = "0.10.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9a9d19fa1e79b6215ff29b9d6880b706147f16e9b1dbb1e4e5947b5b02bc5e3" -dependencies = [ - "either", -] - -[[package]] -name = "itoa" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "112c678d4050afce233f4f2852bb2eb519230b3cf12f33585275537d7e41578d" - -[[package]] -name = "jobserver" -version = "0.1.24" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af25a77299a7f711a01975c35a6a424eb6862092cc2d6c72c4ed6cbc56dfc1fa" -dependencies = [ - "libc", -] - -[[package]] -name = "lazy_static" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" - -[[package]] -name = "libc" -version = "0.2.126" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "349d5a591cd28b49e1d1037471617a32ddcda5731b99419008085f72d5a53836" - -[[package]] -name = "libgit2-sys" -version = "0.12.26+1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19e1c899248e606fbfe68dcb31d8b0176ebab833b103824af31bddf4b7457494" -dependencies = [ - "cc", - "libc", - "libssh2-sys", - "libz-sys", - "openssl-sys", - "pkg-config", -] - -[[package]] -name = "libssh2-sys" -version = "0.2.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b094a36eb4b8b8c8a7b4b8ae43b2944502be3e59cd87687595cf6b0a71b3f4ca" -dependencies = [ - "cc", - "libc", - "libz-sys", - "openssl-sys", - "pkg-config", - "vcpkg", -] - -[[package]] -name = "libz-sys" -version = "1.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9702761c3935f8cc2f101793272e202c72b99da8f4224a19ddcf1279a6450bbf" -dependencies = [ - "cc", - "libc", - "pkg-config", - "vcpkg", -] - -[[package]] -name = "log" -version = "0.4.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "matches" -version = "0.1.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3e378b66a060d48947b590737b30a1be76706c8dd7b8ba0f2fe3989c68a853f" - -[[package]] -name = "matchit" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73cbba799671b762df5a175adf59ce145165747bb891505c43d09aefbbf38beb" - -[[package]] -name = "memchr" -version = "2.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" - -[[package]] -name = "mime" -version = "0.3.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" - -[[package]] -name = "mio" -version = "0.8.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "713d550d9b44d89174e066b7a6217ae06234c10cb47819a88290d2b353c31799" -dependencies = [ - "libc", - "log", - "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys", -] - -[[package]] -name = "multimap" -version = "0.8.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" - -[[package]] -name = "once_cell" -version = "1.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7709cef83f0c1f58f666e746a08b21e0085f7440fa6a29cc194d68aac97a4225" - -[[package]] -name = "openssl-probe" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" - -[[package]] -name = "openssl-sys" -version = "0.9.74" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "835363342df5fba8354c5b453325b110ffd54044e588c539cf2f20a8014e4cb1" -dependencies = [ - "autocfg", - "cc", - "libc", - "pkg-config", - "vcpkg", -] - -[[package]] -name = "percent-encoding" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" - -[[package]] -name = "petgraph" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6d5014253a1331579ce62aa67443b4a658c5e7dd03d4bc6d302b94474888143" -dependencies = [ - "fixedbitset", - "indexmap", -] - -[[package]] -name = "pin-project" -version = "1.0.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58ad3879ad3baf4e44784bc6a718a8698867bb991f8ce24d1bcbe2cfb4c3a75e" -dependencies = [ - "pin-project-internal", -] - -[[package]] -name = "pin-project-internal" -version = "1.0.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "744b6f092ba29c3650faf274db506afd39944f48420f6c86b17cfe0ee1cb36bb" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "pin-project-lite" -version = "0.2.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116" - -[[package]] -name = "pin-utils" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" - -[[package]] -name = "pkg-config" -version = "0.3.25" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1df8c4ec4b0627e53bdf214615ad287367e482558cf84b109250b37464dc03ae" - -[[package]] -name = "ppv-lite86" -version = "0.2.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb9f9e6e233e5c4a35559a617bf40a4ec447db2e84c20b55a6f83167b7e57872" - -[[package]] -name = "prettyplease" -version = "0.1.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f28f53e8b192565862cf99343194579a022eb9c7dd3a8d03134734803c7b3125" -dependencies = [ - "proc-macro2", - "syn", -] - -[[package]] -name = "proc-macro2" -version = "1.0.39" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c54b25569025b7fc9651de43004ae593a75ad88543b17178aa5e1b9c4f15f56f" -dependencies = [ - "unicode-ident", -] - -[[package]] -name = "prost" -version = "0.10.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71adf41db68aa0daaefc69bb30bcd68ded9b9abaad5d1fbb6304c4fb390e083e" -dependencies = [ - "bytes", - "prost-derive", -] - -[[package]] -name = "prost-build" -version = "0.10.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ae5a4388762d5815a9fc0dea33c56b021cdc8dde0c55e0c9ca57197254b0cab" -dependencies = [ - "bytes", - "cfg-if", - "cmake", - "heck 0.4.0", - "itertools", - "lazy_static", - "log", - "multimap", - "petgraph", - "prost", - "prost-types", - "regex", - "tempfile", - "which", -] - -[[package]] -name = "prost-derive" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b670f45da57fb8542ebdbb6105a925fe571b67f9e7ed9f47a06a84e72b4e7cc" -dependencies = [ - "anyhow", - "itertools", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "prost-types" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d0a014229361011dc8e69c8a1ec6c2e8d0f2af7c91e3ea3f5b2170298461e68" -dependencies = [ - "bytes", - "prost", -] - -[[package]] -name = "quote" -version = "1.0.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1feb54ed693b93a84e14094943b84b7c4eae204c512b7ccb95ab0c66d278ad1" -dependencies = [ - "proc-macro2", -] - -[[package]] -name = "rand" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "552840b97013b1a26992c11eac34bdd778e464601a4c2054b5f0bff7c6761293" -dependencies = [ - "fuchsia-cprng", - "libc", - "rand_core 0.3.1", - "rdrand", - "winapi", -] - -[[package]] -name = "rand" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" -dependencies = [ - "libc", - "rand_chacha", - "rand_core 0.6.3", -] - -[[package]] -name = "rand_chacha" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" -dependencies = [ - "ppv-lite86", - "rand_core 0.6.3", -] - -[[package]] -name = "rand_core" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b" -dependencies = [ - "rand_core 0.4.2", -] - -[[package]] -name = "rand_core" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc" - -[[package]] -name = "rand_core" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" -dependencies = [ - "getrandom", -] - -[[package]] -name = "rdrand" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2" -dependencies = [ - "rand_core 0.3.1", -] - -[[package]] -name = "redox_syscall" -version = "0.2.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62f25bc4c7e55e0b0b7a1d43fb893f4fa1361d0abe38b9ce4f323c2adfe6ef42" -dependencies = [ - "bitflags", -] - -[[package]] -name = "regex" -version = "1.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d83f127d94bdbcda4c8cc2e50f6f84f4b611f69c902699ca385a39c3a75f9ff1" -dependencies = [ - "regex-syntax", -] - -[[package]] -name = "regex-syntax" -version = "0.6.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49b3de9ec5dc0a3417da371aab17d729997c15010e7fd24ff707773a33bddb64" - -[[package]] -name = "remove_dir_all" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" -dependencies = [ - "winapi", -] - -[[package]] -name = "same-file" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" -dependencies = [ - "winapi-util", -] - -[[package]] -name = "serde" -version = "1.0.137" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61ea8d54c77f8315140a05f4c7237403bf38b72704d031543aa1d16abbf517d1" - -[[package]] -name = "slab" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb703cfe953bccee95685111adeedb76fabe4e97549a58d16f03ea7b9367bb32" - -[[package]] -name = "socket2" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66d72b759436ae32898a2af0a14218dbf55efde3feeb170eb623637db85ee1e0" -dependencies = [ - "libc", - "winapi", -] - -[[package]] -name = "syn" -version = "1.0.96" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0748dd251e24453cb8717f0354206b91557e4ec8703673a4b30208f2abaf1ebf" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - -[[package]] -name = "sync_wrapper" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20518fe4a4c9acf048008599e464deb21beeae3d3578418951a189c235a7a9a8" - -[[package]] -name = "tempdir" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15f2b5fb00ccdf689e0149d1b1b3c03fead81c2b37735d812fa8bddbbf41b6d8" -dependencies = [ - "rand 0.4.6", - "remove_dir_all", -] - -[[package]] -name = "tempfile" -version = "3.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4" -dependencies = [ - "cfg-if", - "fastrand", - "libc", - "redox_syscall", - "remove_dir_all", - "winapi", -] - -[[package]] -name = "tinyvec" -version = "1.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" -dependencies = [ - "tinyvec_macros", -] - -[[package]] -name = "tinyvec_macros" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" - -[[package]] -name = "tokio" -version = "1.19.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c51a52ed6686dd62c320f9b89299e9dfb46f730c7a48e635c19f21d116cb1439" -dependencies = [ - "bytes", - "libc", - "memchr", - "mio", - "once_cell", - "pin-project-lite", - "socket2", - "tokio-macros", - "winapi", -] - -[[package]] -name = "tokio-io-timeout" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" -dependencies = [ - "pin-project-lite", - "tokio", -] - -[[package]] -name = "tokio-macros" -version = "1.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9724f9a975fb987ef7a3cd9be0350edcbe130698af5b8f7a631e23d42d052484" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "tokio-stream" -version = "0.1.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df54d54117d6fdc4e4fea40fe1e4e566b3505700e148a6827e59b34b0d2600d9" -dependencies = [ - "futures-core", - "pin-project-lite", - "tokio", -] - -[[package]] -name = "tokio-util" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc463cd8deddc3770d20f9852143d50bf6094e640b485cb2e189a2099085ff45" -dependencies = [ - "bytes", - "futures-core", - "futures-sink", - "pin-project-lite", - "tokio", - "tracing", -] - -[[package]] -name = "tonic" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5be9d60db39854b30b835107500cf0aca0b0d14d6e1c3de124217c23a29c2ddb" -dependencies = [ - "async-stream", - "async-trait", - "axum", - "base64", - "bytes", - "futures-core", - "futures-util", - "h2", - "http", - "http-body", - "hyper", - "hyper-timeout", - "percent-encoding", - "pin-project", - "prost", - "prost-derive", - "tokio", - "tokio-stream", - "tokio-util", - "tower", - "tower-layer", - "tower-service", - "tracing", - "tracing-futures", -] - -[[package]] -name = "tonic-build" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9263bf4c9bfaae7317c1c2faf7f18491d2fe476f70c414b73bf5d445b00ffa1" -dependencies = [ - "prettyplease", - "proc-macro2", - "prost-build", - "quote", - "syn", -] - -[[package]] -name = "tower" -version = "0.4.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a89fd63ad6adf737582df5db40d286574513c69a11dac5214dc3b5603d6713e" -dependencies = [ - "futures-core", - "futures-util", - "indexmap", - "pin-project", - "pin-project-lite", - "rand 0.8.5", - "slab", - "tokio", - "tokio-util", - "tower-layer", - "tower-service", - "tracing", -] - -[[package]] -name = "tower-http" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c530c8675c1dbf98facee631536fa116b5fb6382d7dd6dc1b118d970eafe3ba" -dependencies = [ - "bitflags", - "bytes", - "futures-core", - "futures-util", - "http", - "http-body", - "http-range-header", - "pin-project-lite", - "tower", - "tower-layer", - "tower-service", -] - -[[package]] -name = "tower-layer" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "343bc9466d3fe6b0f960ef45960509f84480bf4fd96f92901afe7ff3df9d3a62" - -[[package]] -name = "tower-service" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" - -[[package]] -name = "tracing" -version = "0.1.34" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d0ecdcb44a79f0fe9844f0c4f33a342cbcbb5117de8001e6ba0dc2351327d09" -dependencies = [ - "cfg-if", - "log", - "pin-project-lite", - "tracing-attributes", - "tracing-core", -] - -[[package]] -name = "tracing-attributes" -version = "0.1.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc6b8ad3567499f98a1db7a752b07a7c8c7c7c34c332ec00effb2b0027974b7c" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "tracing-core" -version = "0.1.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7709595b8878a4965ce5e87ebf880a7d39c9afc6837721b21a5a816a8117d921" -dependencies = [ - "once_cell", -] - -[[package]] -name = "tracing-futures" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" -dependencies = [ - "pin-project", - "tracing", -] - -[[package]] -name = "try-lock" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" - -[[package]] -name = "unicode-bidi" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "099b7128301d285f79ddd55b9a83d5e6b9e97c92e0ea0daebee7263e932de992" - -[[package]] -name = "unicode-ident" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d22af068fba1eb5edcb4aea19d382b2a3deb4c8f9d475c589b6ada9e0fd493ee" - -[[package]] -name = "unicode-normalization" -version = "0.1.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d54590932941a9e9266f0832deed84ebe1bf2e4c9e4a3554d393d18f5e854bf9" -dependencies = [ - "tinyvec", -] - -[[package]] -name = "unicode-segmentation" -version = "1.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e8820f5d777f6224dc4be3632222971ac30164d4a258d595640799554ebfd99" - -[[package]] -name = "url" -version = "2.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a507c383b2d33b5fc35d1861e77e6b383d158b2da5e14fe51b83dfedf6fd578c" -dependencies = [ - "form_urlencoded", - "idna", - "matches", - "percent-encoding", -] - -[[package]] -name = "vcpkg" -version = "0.2.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" - -[[package]] -name = "walkdir" -version = "2.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "808cf2735cd4b6866113f648b791c6adc5714537bc222d9347bb203386ffda56" -dependencies = [ - "same-file", - "winapi", - "winapi-util", -] - -[[package]] -name = "want" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0" -dependencies = [ - "log", - "try-lock", -] - -[[package]] -name = "wasi" -version = "0.10.2+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" - -[[package]] -name = "wasi" -version = "0.11.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" - -[[package]] -name = "which" -version = "4.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c4fb54e6113b6a8772ee41c3404fb0301ac79604489467e0a9ce1f3e97c24ae" -dependencies = [ - "either", - "lazy_static", - "libc", -] - -[[package]] -name = "winapi" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" -dependencies = [ - "winapi-i686-pc-windows-gnu", - "winapi-x86_64-pc-windows-gnu", -] - -[[package]] -name = "winapi-i686-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" - -[[package]] -name = "winapi-util" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" -dependencies = [ - "winapi", -] - -[[package]] -name = "winapi-x86_64-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" - -[[package]] -name = "windows-sys" -version = "0.36.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea04155a16a59f9eab786fe12a4a450e75cdb175f9e0d80da1e17db09f55b8d2" -dependencies = [ - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_msvc", -] - -[[package]] -name = "windows_aarch64_msvc" -version = "0.36.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47" - -[[package]] -name = "windows_i686_gnu" -version = "0.36.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6" - -[[package]] -name = "windows_i686_msvc" -version = "0.36.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.36.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1" - -[[package]] -name = "windows_x86_64_msvc" -version = "0.36.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680" diff --git a/proto/src/IBC_GO_COMMIT b/proto/src/IBC_GO_COMMIT index 1748fb8fa1..da922be27c 100644 --- a/proto/src/IBC_GO_COMMIT +++ b/proto/src/IBC_GO_COMMIT @@ -1 +1 @@ -c5d058f389c690d6846c36cb90ceed09c51adc9f +c46fd062166bbc478f169c6c57108c5615383729 diff --git a/proto/src/prost/ibc.applications.fee.v1.rs b/proto/src/prost/ibc.applications.fee.v1.rs new file mode 100644 index 0000000000..bdefcf0e4e --- /dev/null +++ b/proto/src/prost/ibc.applications.fee.v1.rs @@ -0,0 +1,1697 @@ +/// Fee defines the ICS29 receive, acknowledgement and timeout fees +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Fee { + /// the packet receive fee + #[prost(message, repeated, tag="1")] + pub recv_fee: ::prost::alloc::vec::Vec, + /// the packet acknowledgement fee + #[prost(message, repeated, tag="2")] + pub ack_fee: ::prost::alloc::vec::Vec, + /// the packet timeout fee + #[prost(message, repeated, tag="3")] + pub timeout_fee: ::prost::alloc::vec::Vec, +} +/// PacketFee contains ICS29 relayer fees, refund address and optional list of permitted relayers +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct PacketFee { + /// fee encapsulates the recv, ack and timeout fees associated with an IBC packet + #[prost(message, optional, tag="1")] + pub fee: ::core::option::Option, + /// the refund address for unspent fees + #[prost(string, tag="2")] + pub refund_address: ::prost::alloc::string::String, + /// optional list of relayers permitted to receive fees + #[prost(string, repeated, tag="3")] + pub relayers: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, +} +/// PacketFees contains a list of type PacketFee +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct PacketFees { + /// list of packet fees + #[prost(message, repeated, tag="1")] + pub packet_fees: ::prost::alloc::vec::Vec, +} +/// IdentifiedPacketFees contains a list of type PacketFee and associated PacketId +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct IdentifiedPacketFees { + /// unique packet identifier comprised of the channel ID, port ID and sequence + #[prost(message, optional, tag="1")] + pub packet_id: ::core::option::Option, + /// list of packet fees + #[prost(message, repeated, tag="2")] + pub packet_fees: ::prost::alloc::vec::Vec, +} +/// MsgRegisterPayee defines the request type for the RegisterPayee rpc +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MsgRegisterPayee { + /// unique port identifier + #[prost(string, tag="1")] + pub port_id: ::prost::alloc::string::String, + /// unique channel identifier + #[prost(string, tag="2")] + pub channel_id: ::prost::alloc::string::String, + /// the relayer address + #[prost(string, tag="3")] + pub relayer: ::prost::alloc::string::String, + /// the payee address + #[prost(string, tag="4")] + pub payee: ::prost::alloc::string::String, +} +/// MsgRegisterPayeeResponse defines the response type for the RegisterPayee rpc +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MsgRegisterPayeeResponse { +} +/// MsgRegisterCounterpartyPayee defines the request type for the RegisterCounterpartyPayee rpc +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MsgRegisterCounterpartyPayee { + /// unique port identifier + #[prost(string, tag="1")] + pub port_id: ::prost::alloc::string::String, + /// unique channel identifier + #[prost(string, tag="2")] + pub channel_id: ::prost::alloc::string::String, + /// the relayer address + #[prost(string, tag="3")] + pub relayer: ::prost::alloc::string::String, + /// the counterparty payee address + #[prost(string, tag="4")] + pub counterparty_payee: ::prost::alloc::string::String, +} +/// MsgRegisterCounterpartyPayeeResponse defines the response type for the RegisterCounterpartyPayee rpc +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MsgRegisterCounterpartyPayeeResponse { +} +/// MsgPayPacketFee defines the request type for the PayPacketFee rpc +/// This Msg can be used to pay for a packet at the next sequence send & should be combined with the Msg that will be +/// paid for +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MsgPayPacketFee { + /// fee encapsulates the recv, ack and timeout fees associated with an IBC packet + #[prost(message, optional, tag="1")] + pub fee: ::core::option::Option, + /// the source port unique identifier + #[prost(string, tag="2")] + pub source_port_id: ::prost::alloc::string::String, + /// the source channel unique identifer + #[prost(string, tag="3")] + pub source_channel_id: ::prost::alloc::string::String, + /// account address to refund fee if necessary + #[prost(string, tag="4")] + pub signer: ::prost::alloc::string::String, + /// optional list of relayers permitted to the receive packet fees + #[prost(string, repeated, tag="5")] + pub relayers: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, +} +/// MsgPayPacketFeeResponse defines the response type for the PayPacketFee rpc +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MsgPayPacketFeeResponse { +} +/// MsgPayPacketFeeAsync defines the request type for the PayPacketFeeAsync rpc +/// This Msg can be used to pay for a packet at a specified sequence (instead of the next sequence send) +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MsgPayPacketFeeAsync { + /// unique packet identifier comprised of the channel ID, port ID and sequence + #[prost(message, optional, tag="1")] + pub packet_id: ::core::option::Option, + /// the packet fee associated with a particular IBC packet + #[prost(message, optional, tag="2")] + pub packet_fee: ::core::option::Option, +} +/// MsgPayPacketFeeAsyncResponse defines the response type for the PayPacketFeeAsync rpc +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MsgPayPacketFeeAsyncResponse { +} +/// Generated client implementations. +#[cfg(feature = "client")] +pub mod msg_client { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + /// Msg defines the ICS29 Msg service. + #[derive(Debug, Clone)] + pub struct MsgClient { + inner: tonic::client::Grpc, + } + impl MsgClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: std::convert::TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl MsgClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + Send + 'static, + ::Error: Into + Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> MsgClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + Send + Sync, + { + MsgClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with `gzip`. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_gzip(mut self) -> Self { + self.inner = self.inner.send_gzip(); + self + } + /// Enable decompressing responses with `gzip`. + #[must_use] + pub fn accept_gzip(mut self) -> Self { + self.inner = self.inner.accept_gzip(); + self + } + /// RegisterPayee defines a rpc handler method for MsgRegisterPayee + /// RegisterPayee is called by the relayer on each channelEnd and allows them to set an optional + /// payee to which reverse and timeout relayer packet fees will be paid out. The payee should be registered on + /// the source chain from which packets originate as this is where fee distribution takes place. This function may be + /// called more than once by a relayer, in which case, the latest payee is always used. + pub async fn register_payee( + &mut self, + request: impl tonic::IntoRequest, + ) -> Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/ibc.applications.fee.v1.Msg/RegisterPayee", + ); + self.inner.unary(request.into_request(), path, codec).await + } + /// RegisterCounterpartyPayee defines a rpc handler method for MsgRegisterCounterpartyPayee + /// RegisterCounterpartyPayee is called by the relayer on each channelEnd and allows them to specify the counterparty + /// payee address before relaying. This ensures they will be properly compensated for forward relaying since + /// the destination chain must include the registered counterparty payee address in the acknowledgement. This function + /// may be called more than once by a relayer, in which case, the latest counterparty payee address is always used. + pub async fn register_counterparty_payee( + &mut self, + request: impl tonic::IntoRequest, + ) -> Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/ibc.applications.fee.v1.Msg/RegisterCounterpartyPayee", + ); + self.inner.unary(request.into_request(), path, codec).await + } + /// PayPacketFee defines a rpc handler method for MsgPayPacketFee + /// PayPacketFee is an open callback that may be called by any module/user that wishes to escrow funds in order to + /// incentivize the relaying of the packet at the next sequence + /// NOTE: This method is intended to be used within a multi msg transaction, where the subsequent msg that follows + /// initiates the lifecycle of the incentivized packet + pub async fn pay_packet_fee( + &mut self, + request: impl tonic::IntoRequest, + ) -> Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/ibc.applications.fee.v1.Msg/PayPacketFee", + ); + self.inner.unary(request.into_request(), path, codec).await + } + /// PayPacketFeeAsync defines a rpc handler method for MsgPayPacketFeeAsync + /// PayPacketFeeAsync is an open callback that may be called by any module/user that wishes to escrow funds in order to + /// incentivize the relaying of a known packet (i.e. at a particular sequence) + pub async fn pay_packet_fee_async( + &mut self, + request: impl tonic::IntoRequest, + ) -> Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/ibc.applications.fee.v1.Msg/PayPacketFeeAsync", + ); + self.inner.unary(request.into_request(), path, codec).await + } + } +} +/// Generated server implementations. +#[cfg(feature = "server")] +pub mod msg_server { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + ///Generated trait containing gRPC methods that should be implemented for use with MsgServer. + #[async_trait] + pub trait Msg: Send + Sync + 'static { + /// RegisterPayee defines a rpc handler method for MsgRegisterPayee + /// RegisterPayee is called by the relayer on each channelEnd and allows them to set an optional + /// payee to which reverse and timeout relayer packet fees will be paid out. The payee should be registered on + /// the source chain from which packets originate as this is where fee distribution takes place. This function may be + /// called more than once by a relayer, in which case, the latest payee is always used. + async fn register_payee( + &self, + request: tonic::Request, + ) -> Result, tonic::Status>; + /// RegisterCounterpartyPayee defines a rpc handler method for MsgRegisterCounterpartyPayee + /// RegisterCounterpartyPayee is called by the relayer on each channelEnd and allows them to specify the counterparty + /// payee address before relaying. This ensures they will be properly compensated for forward relaying since + /// the destination chain must include the registered counterparty payee address in the acknowledgement. This function + /// may be called more than once by a relayer, in which case, the latest counterparty payee address is always used. + async fn register_counterparty_payee( + &self, + request: tonic::Request, + ) -> Result< + tonic::Response, + tonic::Status, + >; + /// PayPacketFee defines a rpc handler method for MsgPayPacketFee + /// PayPacketFee is an open callback that may be called by any module/user that wishes to escrow funds in order to + /// incentivize the relaying of the packet at the next sequence + /// NOTE: This method is intended to be used within a multi msg transaction, where the subsequent msg that follows + /// initiates the lifecycle of the incentivized packet + async fn pay_packet_fee( + &self, + request: tonic::Request, + ) -> Result, tonic::Status>; + /// PayPacketFeeAsync defines a rpc handler method for MsgPayPacketFeeAsync + /// PayPacketFeeAsync is an open callback that may be called by any module/user that wishes to escrow funds in order to + /// incentivize the relaying of a known packet (i.e. at a particular sequence) + async fn pay_packet_fee_async( + &self, + request: tonic::Request, + ) -> Result, tonic::Status>; + } + /// Msg defines the ICS29 Msg service. + #[derive(Debug)] + pub struct MsgServer { + inner: _Inner, + accept_compression_encodings: (), + send_compression_encodings: (), + } + struct _Inner(Arc); + impl MsgServer { + pub fn new(inner: T) -> Self { + Self::from_arc(Arc::new(inner)) + } + pub fn from_arc(inner: Arc) -> Self { + let inner = _Inner(inner); + Self { + inner, + accept_compression_encodings: Default::default(), + send_compression_encodings: Default::default(), + } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> InterceptedService + where + F: tonic::service::Interceptor, + { + InterceptedService::new(Self::new(inner), interceptor) + } + } + impl tonic::codegen::Service> for MsgServer + where + T: Msg, + B: Body + Send + 'static, + B::Error: Into + Send + 'static, + { + type Response = http::Response; + type Error = std::convert::Infallible; + type Future = BoxFuture; + fn poll_ready( + &mut self, + _cx: &mut Context<'_>, + ) -> Poll> { + Poll::Ready(Ok(())) + } + fn call(&mut self, req: http::Request) -> Self::Future { + let inner = self.inner.clone(); + match req.uri().path() { + "/ibc.applications.fee.v1.Msg/RegisterPayee" => { + #[allow(non_camel_case_types)] + struct RegisterPayeeSvc(pub Arc); + impl tonic::server::UnaryService + for RegisterPayeeSvc { + type Response = super::MsgRegisterPayeeResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = self.0.clone(); + let fut = async move { + (*inner).register_payee(request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = RegisterPayeeSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/ibc.applications.fee.v1.Msg/RegisterCounterpartyPayee" => { + #[allow(non_camel_case_types)] + struct RegisterCounterpartyPayeeSvc(pub Arc); + impl< + T: Msg, + > tonic::server::UnaryService + for RegisterCounterpartyPayeeSvc { + type Response = super::MsgRegisterCounterpartyPayeeResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = self.0.clone(); + let fut = async move { + (*inner).register_counterparty_payee(request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = RegisterCounterpartyPayeeSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/ibc.applications.fee.v1.Msg/PayPacketFee" => { + #[allow(non_camel_case_types)] + struct PayPacketFeeSvc(pub Arc); + impl tonic::server::UnaryService + for PayPacketFeeSvc { + type Response = super::MsgPayPacketFeeResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = self.0.clone(); + let fut = async move { + (*inner).pay_packet_fee(request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = PayPacketFeeSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/ibc.applications.fee.v1.Msg/PayPacketFeeAsync" => { + #[allow(non_camel_case_types)] + struct PayPacketFeeAsyncSvc(pub Arc); + impl tonic::server::UnaryService + for PayPacketFeeAsyncSvc { + type Response = super::MsgPayPacketFeeAsyncResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = self.0.clone(); + let fut = async move { + (*inner).pay_packet_fee_async(request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = PayPacketFeeAsyncSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + _ => { + Box::pin(async move { + Ok( + http::Response::builder() + .status(200) + .header("grpc-status", "12") + .header("content-type", "application/grpc") + .body(empty_body()) + .unwrap(), + ) + }) + } + } + } + } + impl Clone for MsgServer { + fn clone(&self) -> Self { + let inner = self.inner.clone(); + Self { + inner, + accept_compression_encodings: self.accept_compression_encodings, + send_compression_encodings: self.send_compression_encodings, + } + } + } + impl Clone for _Inner { + fn clone(&self) -> Self { + Self(self.0.clone()) + } + } + impl std::fmt::Debug for _Inner { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:?}", self.0) + } + } + impl tonic::transport::NamedService for MsgServer { + const NAME: &'static str = "ibc.applications.fee.v1.Msg"; + } +} +/// IncentivizedAcknowledgement is the acknowledgement format to be used by applications wrapped in the fee middleware +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct IncentivizedAcknowledgement { + /// the underlying app acknowledgement bytes + #[prost(bytes="vec", tag="1")] + pub app_acknowledgement: ::prost::alloc::vec::Vec, + /// the relayer address which submits the recv packet message + #[prost(string, tag="2")] + pub forward_relayer_address: ::prost::alloc::string::String, + /// success flag of the base application callback + #[prost(bool, tag="3")] + pub underlying_app_success: bool, +} +/// GenesisState defines the ICS29 fee middleware genesis state +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GenesisState { + /// list of identified packet fees + #[prost(message, repeated, tag="1")] + pub identified_fees: ::prost::alloc::vec::Vec, + /// list of fee enabled channels + #[prost(message, repeated, tag="2")] + pub fee_enabled_channels: ::prost::alloc::vec::Vec, + /// list of registered payees + #[prost(message, repeated, tag="3")] + pub registered_payees: ::prost::alloc::vec::Vec, + /// list of registered counterparty payees + #[prost(message, repeated, tag="4")] + pub registered_counterparty_payees: ::prost::alloc::vec::Vec, + /// list of forward relayer addresses + #[prost(message, repeated, tag="5")] + pub forward_relayers: ::prost::alloc::vec::Vec, +} +/// FeeEnabledChannel contains the PortID & ChannelID for a fee enabled channel +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct FeeEnabledChannel { + /// unique port identifier + #[prost(string, tag="1")] + pub port_id: ::prost::alloc::string::String, + /// unique channel identifier + #[prost(string, tag="2")] + pub channel_id: ::prost::alloc::string::String, +} +/// RegisteredPayee contains the relayer address and payee address for a specific channel +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RegisteredPayee { + /// unique channel identifier + #[prost(string, tag="1")] + pub channel_id: ::prost::alloc::string::String, + /// the relayer address + #[prost(string, tag="2")] + pub relayer: ::prost::alloc::string::String, + /// the payee address + #[prost(string, tag="3")] + pub payee: ::prost::alloc::string::String, +} +/// RegisteredCounterpartyPayee contains the relayer address and counterparty payee address for a specific channel (used +/// for recv fee distribution) +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RegisteredCounterpartyPayee { + /// unique channel identifier + #[prost(string, tag="1")] + pub channel_id: ::prost::alloc::string::String, + /// the relayer address + #[prost(string, tag="2")] + pub relayer: ::prost::alloc::string::String, + /// the counterparty payee address + #[prost(string, tag="3")] + pub counterparty_payee: ::prost::alloc::string::String, +} +/// ForwardRelayerAddress contains the forward relayer address and PacketId used for async acknowledgements +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ForwardRelayerAddress { + /// the forward relayer address + #[prost(string, tag="1")] + pub address: ::prost::alloc::string::String, + /// unique packet identifer comprised of the channel ID, port ID and sequence + #[prost(message, optional, tag="2")] + pub packet_id: ::core::option::Option, +} +/// QueryIncentivizedPacketsRequest defines the request type for the IncentivizedPackets rpc +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct QueryIncentivizedPacketsRequest { + /// pagination defines an optional pagination for the request. + #[prost(message, optional, tag="1")] + pub pagination: ::core::option::Option, + /// block height at which to query + #[prost(uint64, tag="2")] + pub query_height: u64, +} +/// QueryIncentivizedPacketsResponse defines the response type for the IncentivizedPackets rpc +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct QueryIncentivizedPacketsResponse { + /// list of identified fees for incentivized packets + #[prost(message, repeated, tag="1")] + pub incentivized_packets: ::prost::alloc::vec::Vec, +} +/// QueryIncentivizedPacketRequest defines the request type for the IncentivizedPacket rpc +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct QueryIncentivizedPacketRequest { + /// unique packet identifier comprised of channel ID, port ID and sequence + #[prost(message, optional, tag="1")] + pub packet_id: ::core::option::Option, + /// block height at which to query + #[prost(uint64, tag="2")] + pub query_height: u64, +} +/// QueryIncentivizedPacketsResponse defines the response type for the IncentivizedPacket rpc +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct QueryIncentivizedPacketResponse { + /// the identified fees for the incentivized packet + #[prost(message, optional, tag="1")] + pub incentivized_packet: ::core::option::Option, +} +/// QueryIncentivizedPacketsForChannelRequest defines the request type for querying for all incentivized packets +/// for a specific channel +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct QueryIncentivizedPacketsForChannelRequest { + /// pagination defines an optional pagination for the request. + #[prost(message, optional, tag="1")] + pub pagination: ::core::option::Option, + #[prost(string, tag="2")] + pub port_id: ::prost::alloc::string::String, + #[prost(string, tag="3")] + pub channel_id: ::prost::alloc::string::String, + /// Height to query at + #[prost(uint64, tag="4")] + pub query_height: u64, +} +/// QueryIncentivizedPacketsResponse defines the response type for the incentivized packets RPC +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct QueryIncentivizedPacketsForChannelResponse { + /// Map of all incentivized_packets + #[prost(message, repeated, tag="1")] + pub incentivized_packets: ::prost::alloc::vec::Vec, +} +/// QueryTotalRecvFeesRequest defines the request type for the TotalRecvFees rpc +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct QueryTotalRecvFeesRequest { + /// the packet identifier for the associated fees + #[prost(message, optional, tag="1")] + pub packet_id: ::core::option::Option, +} +/// QueryTotalRecvFeesResponse defines the response type for the TotalRecvFees rpc +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct QueryTotalRecvFeesResponse { + /// the total packet receive fees + #[prost(message, repeated, tag="1")] + pub recv_fees: ::prost::alloc::vec::Vec, +} +/// QueryTotalAckFeesRequest defines the request type for the TotalAckFees rpc +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct QueryTotalAckFeesRequest { + /// the packet identifier for the associated fees + #[prost(message, optional, tag="1")] + pub packet_id: ::core::option::Option, +} +/// QueryTotalAckFeesResponse defines the response type for the TotalAckFees rpc +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct QueryTotalAckFeesResponse { + /// the total packet acknowledgement fees + #[prost(message, repeated, tag="1")] + pub ack_fees: ::prost::alloc::vec::Vec, +} +/// QueryTotalTimeoutFeesRequest defines the request type for the TotalTimeoutFees rpc +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct QueryTotalTimeoutFeesRequest { + /// the packet identifier for the associated fees + #[prost(message, optional, tag="1")] + pub packet_id: ::core::option::Option, +} +/// QueryTotalTimeoutFeesResponse defines the response type for the TotalTimeoutFees rpc +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct QueryTotalTimeoutFeesResponse { + /// the total packet timeout fees + #[prost(message, repeated, tag="1")] + pub timeout_fees: ::prost::alloc::vec::Vec, +} +/// QueryPayeeRequest defines the request type for the Payee rpc +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct QueryPayeeRequest { + /// unique channel identifier + #[prost(string, tag="1")] + pub channel_id: ::prost::alloc::string::String, + /// the relayer address to which the distribution address is registered + #[prost(string, tag="2")] + pub relayer: ::prost::alloc::string::String, +} +/// QueryPayeeResponse defines the response type for the Payee rpc +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct QueryPayeeResponse { + /// the payee address to which packet fees are paid out + #[prost(string, tag="1")] + pub payee_address: ::prost::alloc::string::String, +} +/// QueryCounterpartyPayeeRequest defines the request type for the CounterpartyPayee rpc +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct QueryCounterpartyPayeeRequest { + /// unique channel identifier + #[prost(string, tag="1")] + pub channel_id: ::prost::alloc::string::String, + /// the relayer address to which the counterparty is registered + #[prost(string, tag="2")] + pub relayer: ::prost::alloc::string::String, +} +/// QueryCounterpartyPayeeResponse defines the response type for the CounterpartyPayee rpc +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct QueryCounterpartyPayeeResponse { + /// the counterparty payee address used to compensate forward relaying + #[prost(string, tag="1")] + pub counterparty_payee: ::prost::alloc::string::String, +} +/// QueryFeeEnabledChannelsRequest defines the request type for the FeeEnabledChannels rpc +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct QueryFeeEnabledChannelsRequest { + /// pagination defines an optional pagination for the request. + #[prost(message, optional, tag="1")] + pub pagination: ::core::option::Option, + /// block height at which to query + #[prost(uint64, tag="2")] + pub query_height: u64, +} +/// QueryFeeEnabledChannelsResponse defines the response type for the FeeEnabledChannels rpc +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct QueryFeeEnabledChannelsResponse { + /// list of fee enabled channels + #[prost(message, repeated, tag="1")] + pub fee_enabled_channels: ::prost::alloc::vec::Vec, +} +/// QueryFeeEnabledChannelRequest defines the request type for the FeeEnabledChannel rpc +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct QueryFeeEnabledChannelRequest { + /// unique port identifier + #[prost(string, tag="1")] + pub port_id: ::prost::alloc::string::String, + /// unique channel identifier + #[prost(string, tag="2")] + pub channel_id: ::prost::alloc::string::String, +} +/// QueryFeeEnabledChannelResponse defines the response type for the FeeEnabledChannel rpc +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct QueryFeeEnabledChannelResponse { + /// boolean flag representing the fee enabled channel status + #[prost(bool, tag="1")] + pub fee_enabled: bool, +} +/// Generated client implementations. +#[cfg(feature = "client")] +pub mod query_client { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + /// Query defines the ICS29 gRPC querier service. + #[derive(Debug, Clone)] + pub struct QueryClient { + inner: tonic::client::Grpc, + } + impl QueryClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: std::convert::TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl QueryClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + Send + 'static, + ::Error: Into + Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> QueryClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + Send + Sync, + { + QueryClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with `gzip`. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_gzip(mut self) -> Self { + self.inner = self.inner.send_gzip(); + self + } + /// Enable decompressing responses with `gzip`. + #[must_use] + pub fn accept_gzip(mut self) -> Self { + self.inner = self.inner.accept_gzip(); + self + } + /// IncentivizedPackets returns all incentivized packets and their associated fees + pub async fn incentivized_packets( + &mut self, + request: impl tonic::IntoRequest, + ) -> Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/ibc.applications.fee.v1.Query/IncentivizedPackets", + ); + self.inner.unary(request.into_request(), path, codec).await + } + /// IncentivizedPacket returns all packet fees for a packet given its identifier + pub async fn incentivized_packet( + &mut self, + request: impl tonic::IntoRequest, + ) -> Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/ibc.applications.fee.v1.Query/IncentivizedPacket", + ); + self.inner.unary(request.into_request(), path, codec).await + } + /// Gets all incentivized packets for a specific channel + pub async fn incentivized_packets_for_channel( + &mut self, + request: impl tonic::IntoRequest< + super::QueryIncentivizedPacketsForChannelRequest, + >, + ) -> Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/ibc.applications.fee.v1.Query/IncentivizedPacketsForChannel", + ); + self.inner.unary(request.into_request(), path, codec).await + } + /// TotalRecvFees returns the total receive fees for a packet given its identifier + pub async fn total_recv_fees( + &mut self, + request: impl tonic::IntoRequest, + ) -> Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/ibc.applications.fee.v1.Query/TotalRecvFees", + ); + self.inner.unary(request.into_request(), path, codec).await + } + /// TotalAckFees returns the total acknowledgement fees for a packet given its identifier + pub async fn total_ack_fees( + &mut self, + request: impl tonic::IntoRequest, + ) -> Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/ibc.applications.fee.v1.Query/TotalAckFees", + ); + self.inner.unary(request.into_request(), path, codec).await + } + /// TotalTimeoutFees returns the total timeout fees for a packet given its identifier + pub async fn total_timeout_fees( + &mut self, + request: impl tonic::IntoRequest, + ) -> Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/ibc.applications.fee.v1.Query/TotalTimeoutFees", + ); + self.inner.unary(request.into_request(), path, codec).await + } + /// Payee returns the registered payee address for a specific channel given the relayer address + pub async fn payee( + &mut self, + request: impl tonic::IntoRequest, + ) -> Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/ibc.applications.fee.v1.Query/Payee", + ); + self.inner.unary(request.into_request(), path, codec).await + } + /// CounterpartyPayee returns the registered counterparty payee for forward relaying + pub async fn counterparty_payee( + &mut self, + request: impl tonic::IntoRequest, + ) -> Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/ibc.applications.fee.v1.Query/CounterpartyPayee", + ); + self.inner.unary(request.into_request(), path, codec).await + } + /// FeeEnabledChannels returns a list of all fee enabled channels + pub async fn fee_enabled_channels( + &mut self, + request: impl tonic::IntoRequest, + ) -> Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/ibc.applications.fee.v1.Query/FeeEnabledChannels", + ); + self.inner.unary(request.into_request(), path, codec).await + } + /// FeeEnabledChannel returns true if the provided port and channel identifiers belong to a fee enabled channel + pub async fn fee_enabled_channel( + &mut self, + request: impl tonic::IntoRequest, + ) -> Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/ibc.applications.fee.v1.Query/FeeEnabledChannel", + ); + self.inner.unary(request.into_request(), path, codec).await + } + } +} +/// Generated server implementations. +#[cfg(feature = "server")] +pub mod query_server { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + ///Generated trait containing gRPC methods that should be implemented for use with QueryServer. + #[async_trait] + pub trait Query: Send + Sync + 'static { + /// IncentivizedPackets returns all incentivized packets and their associated fees + async fn incentivized_packets( + &self, + request: tonic::Request, + ) -> Result< + tonic::Response, + tonic::Status, + >; + /// IncentivizedPacket returns all packet fees for a packet given its identifier + async fn incentivized_packet( + &self, + request: tonic::Request, + ) -> Result< + tonic::Response, + tonic::Status, + >; + /// Gets all incentivized packets for a specific channel + async fn incentivized_packets_for_channel( + &self, + request: tonic::Request, + ) -> Result< + tonic::Response, + tonic::Status, + >; + /// TotalRecvFees returns the total receive fees for a packet given its identifier + async fn total_recv_fees( + &self, + request: tonic::Request, + ) -> Result, tonic::Status>; + /// TotalAckFees returns the total acknowledgement fees for a packet given its identifier + async fn total_ack_fees( + &self, + request: tonic::Request, + ) -> Result, tonic::Status>; + /// TotalTimeoutFees returns the total timeout fees for a packet given its identifier + async fn total_timeout_fees( + &self, + request: tonic::Request, + ) -> Result< + tonic::Response, + tonic::Status, + >; + /// Payee returns the registered payee address for a specific channel given the relayer address + async fn payee( + &self, + request: tonic::Request, + ) -> Result, tonic::Status>; + /// CounterpartyPayee returns the registered counterparty payee for forward relaying + async fn counterparty_payee( + &self, + request: tonic::Request, + ) -> Result< + tonic::Response, + tonic::Status, + >; + /// FeeEnabledChannels returns a list of all fee enabled channels + async fn fee_enabled_channels( + &self, + request: tonic::Request, + ) -> Result< + tonic::Response, + tonic::Status, + >; + /// FeeEnabledChannel returns true if the provided port and channel identifiers belong to a fee enabled channel + async fn fee_enabled_channel( + &self, + request: tonic::Request, + ) -> Result< + tonic::Response, + tonic::Status, + >; + } + /// Query defines the ICS29 gRPC querier service. + #[derive(Debug)] + pub struct QueryServer { + inner: _Inner, + accept_compression_encodings: (), + send_compression_encodings: (), + } + struct _Inner(Arc); + impl QueryServer { + pub fn new(inner: T) -> Self { + Self::from_arc(Arc::new(inner)) + } + pub fn from_arc(inner: Arc) -> Self { + let inner = _Inner(inner); + Self { + inner, + accept_compression_encodings: Default::default(), + send_compression_encodings: Default::default(), + } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> InterceptedService + where + F: tonic::service::Interceptor, + { + InterceptedService::new(Self::new(inner), interceptor) + } + } + impl tonic::codegen::Service> for QueryServer + where + T: Query, + B: Body + Send + 'static, + B::Error: Into + Send + 'static, + { + type Response = http::Response; + type Error = std::convert::Infallible; + type Future = BoxFuture; + fn poll_ready( + &mut self, + _cx: &mut Context<'_>, + ) -> Poll> { + Poll::Ready(Ok(())) + } + fn call(&mut self, req: http::Request) -> Self::Future { + let inner = self.inner.clone(); + match req.uri().path() { + "/ibc.applications.fee.v1.Query/IncentivizedPackets" => { + #[allow(non_camel_case_types)] + struct IncentivizedPacketsSvc(pub Arc); + impl< + T: Query, + > tonic::server::UnaryService + for IncentivizedPacketsSvc { + type Response = super::QueryIncentivizedPacketsResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::QueryIncentivizedPacketsRequest, + >, + ) -> Self::Future { + let inner = self.0.clone(); + let fut = async move { + (*inner).incentivized_packets(request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = IncentivizedPacketsSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/ibc.applications.fee.v1.Query/IncentivizedPacket" => { + #[allow(non_camel_case_types)] + struct IncentivizedPacketSvc(pub Arc); + impl< + T: Query, + > tonic::server::UnaryService + for IncentivizedPacketSvc { + type Response = super::QueryIncentivizedPacketResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::QueryIncentivizedPacketRequest, + >, + ) -> Self::Future { + let inner = self.0.clone(); + let fut = async move { + (*inner).incentivized_packet(request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = IncentivizedPacketSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/ibc.applications.fee.v1.Query/IncentivizedPacketsForChannel" => { + #[allow(non_camel_case_types)] + struct IncentivizedPacketsForChannelSvc(pub Arc); + impl< + T: Query, + > tonic::server::UnaryService< + super::QueryIncentivizedPacketsForChannelRequest, + > for IncentivizedPacketsForChannelSvc { + type Response = super::QueryIncentivizedPacketsForChannelResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::QueryIncentivizedPacketsForChannelRequest, + >, + ) -> Self::Future { + let inner = self.0.clone(); + let fut = async move { + (*inner).incentivized_packets_for_channel(request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = IncentivizedPacketsForChannelSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/ibc.applications.fee.v1.Query/TotalRecvFees" => { + #[allow(non_camel_case_types)] + struct TotalRecvFeesSvc(pub Arc); + impl< + T: Query, + > tonic::server::UnaryService + for TotalRecvFeesSvc { + type Response = super::QueryTotalRecvFeesResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = self.0.clone(); + let fut = async move { + (*inner).total_recv_fees(request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = TotalRecvFeesSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/ibc.applications.fee.v1.Query/TotalAckFees" => { + #[allow(non_camel_case_types)] + struct TotalAckFeesSvc(pub Arc); + impl< + T: Query, + > tonic::server::UnaryService + for TotalAckFeesSvc { + type Response = super::QueryTotalAckFeesResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = self.0.clone(); + let fut = async move { + (*inner).total_ack_fees(request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = TotalAckFeesSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/ibc.applications.fee.v1.Query/TotalTimeoutFees" => { + #[allow(non_camel_case_types)] + struct TotalTimeoutFeesSvc(pub Arc); + impl< + T: Query, + > tonic::server::UnaryService + for TotalTimeoutFeesSvc { + type Response = super::QueryTotalTimeoutFeesResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = self.0.clone(); + let fut = async move { + (*inner).total_timeout_fees(request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = TotalTimeoutFeesSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/ibc.applications.fee.v1.Query/Payee" => { + #[allow(non_camel_case_types)] + struct PayeeSvc(pub Arc); + impl tonic::server::UnaryService + for PayeeSvc { + type Response = super::QueryPayeeResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = self.0.clone(); + let fut = async move { (*inner).payee(request).await }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = PayeeSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/ibc.applications.fee.v1.Query/CounterpartyPayee" => { + #[allow(non_camel_case_types)] + struct CounterpartyPayeeSvc(pub Arc); + impl< + T: Query, + > tonic::server::UnaryService + for CounterpartyPayeeSvc { + type Response = super::QueryCounterpartyPayeeResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = self.0.clone(); + let fut = async move { + (*inner).counterparty_payee(request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = CounterpartyPayeeSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/ibc.applications.fee.v1.Query/FeeEnabledChannels" => { + #[allow(non_camel_case_types)] + struct FeeEnabledChannelsSvc(pub Arc); + impl< + T: Query, + > tonic::server::UnaryService + for FeeEnabledChannelsSvc { + type Response = super::QueryFeeEnabledChannelsResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::QueryFeeEnabledChannelsRequest, + >, + ) -> Self::Future { + let inner = self.0.clone(); + let fut = async move { + (*inner).fee_enabled_channels(request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = FeeEnabledChannelsSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/ibc.applications.fee.v1.Query/FeeEnabledChannel" => { + #[allow(non_camel_case_types)] + struct FeeEnabledChannelSvc(pub Arc); + impl< + T: Query, + > tonic::server::UnaryService + for FeeEnabledChannelSvc { + type Response = super::QueryFeeEnabledChannelResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = self.0.clone(); + let fut = async move { + (*inner).fee_enabled_channel(request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = FeeEnabledChannelSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + _ => { + Box::pin(async move { + Ok( + http::Response::builder() + .status(200) + .header("grpc-status", "12") + .header("content-type", "application/grpc") + .body(empty_body()) + .unwrap(), + ) + }) + } + } + } + } + impl Clone for QueryServer { + fn clone(&self) -> Self { + let inner = self.inner.clone(); + Self { + inner, + accept_compression_encodings: self.accept_compression_encodings, + send_compression_encodings: self.send_compression_encodings, + } + } + } + impl Clone for _Inner { + fn clone(&self) -> Self { + Self(self.0.clone()) + } + } + impl std::fmt::Debug for _Inner { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:?}", self.0) + } + } + impl tonic::transport::NamedService for QueryServer { + const NAME: &'static str = "ibc.applications.fee.v1.Query"; + } +} +/// Metadata defines the ICS29 channel specific metadata encoded into the channel version bytestring +/// See ICS004: +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Metadata { + /// fee_version defines the ICS29 fee version + #[prost(string, tag="1")] + pub fee_version: ::prost::alloc::string::String, + /// app_version defines the underlying application version, which may or may not be a JSON encoded bytestring + #[prost(string, tag="2")] + pub app_version: ::prost::alloc::string::String, +} diff --git a/proto/src/prost/ibc.applications.transfer.v1.rs b/proto/src/prost/ibc.applications.transfer.v1.rs index a69c10dc6f..88037983e1 100644 --- a/proto/src/prost/ibc.applications.transfer.v1.rs +++ b/proto/src/prost/ibc.applications.transfer.v1.rs @@ -292,7 +292,7 @@ pub struct Params { #[derive(::serde::Serialize, ::serde::Deserialize)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct QueryDenomTraceRequest { - /// hash (in hex format) of the denomination trace information. + /// hash (in hex format) or denom (full denom with ibc prefix) of the denomination trace information. #[prost(string, tag="1")] pub hash: ::prost::alloc::string::String, } @@ -344,7 +344,7 @@ pub struct QueryParamsResponse { #[derive(::serde::Serialize, ::serde::Deserialize)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct QueryDenomHashRequest { - /// The denomination trace (\[port_id\]/\[channel_id\])+/\[denom\] + /// The denomination trace (\[port_id]/[channel_id])+/[denom\] #[prost(string, tag="1")] pub trace: ::prost::alloc::string::String, } @@ -357,6 +357,25 @@ pub struct QueryDenomHashResponse { #[prost(string, tag="1")] pub hash: ::prost::alloc::string::String, } +/// QueryEscrowAddressRequest is the request type for the EscrowAddress RPC method. +#[derive(::serde::Serialize, ::serde::Deserialize)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct QueryEscrowAddressRequest { + /// unique port identifier + #[prost(string, tag="1")] + pub port_id: ::prost::alloc::string::String, + /// unique channel identifier + #[prost(string, tag="2")] + pub channel_id: ::prost::alloc::string::String, +} +/// QueryEscrowAddressResponse is the response type of the EscrowAddress RPC method. +#[derive(::serde::Serialize, ::serde::Deserialize)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct QueryEscrowAddressResponse { + /// the escrow account address + #[prost(string, tag="1")] + pub escrow_address: ::prost::alloc::string::String, +} /// Generated client implementations. #[cfg(feature = "client")] pub mod query_client { @@ -503,6 +522,26 @@ pub mod query_client { ); self.inner.unary(request.into_request(), path, codec).await } + /// EscrowAddress returns the escrow address for a particular port and channel id. + pub async fn escrow_address( + &mut self, + request: impl tonic::IntoRequest, + ) -> Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/ibc.applications.transfer.v1.Query/EscrowAddress", + ); + self.inner.unary(request.into_request(), path, codec).await + } } } /// Generated server implementations. @@ -533,6 +572,11 @@ pub mod query_server { &self, request: tonic::Request, ) -> Result, tonic::Status>; + /// EscrowAddress returns the escrow address for a particular port and channel id. + async fn escrow_address( + &self, + request: tonic::Request, + ) -> Result, tonic::Status>; } /// Query provides defines the gRPC querier service. #[derive(Debug)] @@ -734,6 +778,46 @@ pub mod query_server { }; Box::pin(fut) } + "/ibc.applications.transfer.v1.Query/EscrowAddress" => { + #[allow(non_camel_case_types)] + struct EscrowAddressSvc(pub Arc); + impl< + T: Query, + > tonic::server::UnaryService + for EscrowAddressSvc { + type Response = super::QueryEscrowAddressResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = self.0.clone(); + let fut = async move { + (*inner).escrow_address(request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = EscrowAddressSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } _ => { Box::pin(async move { Ok( diff --git a/proto/src/prost/ibc.lightclients.beefy.v1.rs b/proto/src/prost/ibc.lightclients.beefy.v1.rs index 8fae30ad66..7824ca14b2 100644 --- a/proto/src/prost/ibc.lightclients.beefy.v1.rs +++ b/proto/src/prost/ibc.lightclients.beefy.v1.rs @@ -11,15 +11,24 @@ pub struct ClientState { /// Block height when the client was frozen due to a misbehaviour #[prost(uint64, tag="3")] pub frozen_height: u64, + //// Known relay chains + #[prost(enumeration="RelayChain", tag="4")] + pub relay_chain: i32, + //// ParaId of associated parachain + #[prost(uint32, tag="5")] + pub para_id: u32, + //// latest parachain height + #[prost(uint32, tag="6")] + pub latest_para_height: u32, /// block number that the beefy protocol was activated on the relay chain. /// This should be the first block in the merkle-mountain-range tree. - #[prost(uint32, tag="4")] + #[prost(uint32, tag="7")] pub beefy_activation_block: u32, /// authorities for the current round - #[prost(message, optional, tag="5")] + #[prost(message, optional, tag="8")] pub authority: ::core::option::Option, /// authorities for the next round - #[prost(message, optional, tag="6")] + #[prost(message, optional, tag="9")] pub next_authority_set: ::core::option::Option, } /// Actual payload items @@ -126,26 +135,23 @@ pub struct ParachainHeader { /// scale-encoded parachain header bytes #[prost(bytes="vec", tag="1")] pub parachain_header: ::prost::alloc::vec::Vec, - /// reconstructed MmrLeaf, see beefy-go spec + /// see beefy-go spec #[prost(message, optional, tag="2")] pub mmr_leaf_partial: ::core::option::Option, - /// para_id of the header. - #[prost(uint32, tag="3")] - pub para_id: u32, /// proofs for our header in the parachain heads root - #[prost(bytes="vec", repeated, tag="4")] + #[prost(bytes="vec", repeated, tag="3")] pub parachain_heads_proof: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, /// leaf index for parachain heads proof - #[prost(uint32, tag="5")] + #[prost(uint32, tag="4")] pub heads_leaf_index: u32, /// total number of para heads in parachain_heads_root - #[prost(uint32, tag="6")] + #[prost(uint32, tag="5")] pub heads_total_count: u32, /// trie merkle proof of inclusion in header.extrinsic_root - #[prost(bytes="vec", repeated, tag="7")] + #[prost(bytes="vec", repeated, tag="6")] pub extrinsic_proof: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, /// the actual timestamp extrinsic - #[prost(bytes="vec", tag="8")] + #[prost(bytes="vec", tag="7")] pub timestamp_extrinsic: ::prost::alloc::vec::Vec, } /// Partial data for MmrLeaf @@ -196,3 +202,10 @@ pub struct BeefyMmrLeaf { #[prost(bytes="vec", tag="5")] pub parachain_heads: ::prost::alloc::vec::Vec, } +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum RelayChain { + Polkadot = 0, + Kusama = 1, + Rococo = 2, +} diff --git a/proto/src/prost/ics23.rs b/proto/src/prost/ics23.rs index 2c25a3c12a..c5780d970f 100644 --- a/proto/src/prost/ics23.rs +++ b/proto/src/prost/ics23.rs @@ -292,6 +292,7 @@ pub enum HashOp { Ripemd160 = 4, /// ripemd160(sha256(x)) Bitcoin = 5, + Sha512256 = 6, } ///* ///LengthOp defines how to process the key and value of the LeafOp diff --git a/scripts/sync-protobuf.sh b/scripts/sync-protobuf.sh new file mode 100755 index 0000000000..25c551522d --- /dev/null +++ b/scripts/sync-protobuf.sh @@ -0,0 +1,121 @@ +#!/usr/bin/env bash + +set -eou pipefail + +# syn-protobuf.sh is a bash script to sync the protobuf +# files using the proto-compiler project. It will check +# out the protobuf files from the git versions specified in +# proto/src/prost/COSMOS_SDK_COMMIT and +# proto/src/prost/IBC_GO_COMMIT. If you want to sync +# the protobuf files to a newer version, modify the +# relevant files with the new commit IDs. + +# This script should be run from the root directory of ibc-rs + +# We can specify where to clone the git repositories +# for cosmos-sdk and ibc-go. By default they are cloned +# to /tmp/cosmos-sdk.git and /tmp/ibc-go.git. +# We can override this to existing directories +# that already have a clone of the repositories, +# so that there is no need to clone the entire +# repositories over and over again every time +# the script is called. + +CACHE_PATH="${XDG_CACHE_HOME:-$HOME/.cache}" +COSMOS_SDK_GIT="${COSMOS_SDK_GIT:-$CACHE_PATH/cosmos/cosmos-sdk.git}" +IBC_GO_GIT="${IBC_GO_GIT:-$CACHE_PATH/ibc-go.git}" + + +COSMOS_SDK_COMMIT="$(cat proto/src/COSMOS_SDK_COMMIT)" +IBC_GO_COMMIT="$(cat proto/src/IBC_GO_COMMIT)" + +echo "COSMOS_SDK_COMMIT: $COSMOS_SDK_COMMIT" +echo "IBC_GO_COMMIT: $IBC_GO_COMMIT" + +# Use either --sdk-commit flag for commit ID, +# or --sdk-tag for git tag. Because we can't modify +# proto-compiler to have smart detection on that. + +if [[ "$COSMOS_SDK_COMMIT" =~ ^[a-zA-Z0-9]{40}$ ]] +then + SDK_COMMIT_OPTION="--sdk-commit" +else + SDK_COMMIT_OPTION="--sdk-tag" +fi + +# If the git directories does not exist, clone them as +# bare git repositories so that no local modification +# can be done there. + +if [[ ! -e "$COSMOS_SDK_GIT" ]] +then + echo "Cloning cosmos-sdk source code to as bare git repository to $COSMOS_SDK_GIT" + git clone --mirror https://github.com/cosmos/cosmos-sdk.git "$COSMOS_SDK_GIT" +else + echo "Using existing cosmos-sdk bare git repository at $COSMOS_SDK_GIT" +fi + +if [[ ! -e "$IBC_GO_GIT" ]] +then + echo "Cloning ibc-go source code to as bare git repository to $IBC_GO_GIT" + git clone --mirror https://github.com/ComposableFi/ibc-go.git "$IBC_GO_GIT" +else + echo "Using existing ibc-go bare git repository at $IBC_GO_GIT" +fi + +# Update the repositories using git fetch. This is so that +# we keep local copies of the repositories up to sync first. +pushd "$COSMOS_SDK_GIT" +git fetch +popd + +pushd "$IBC_GO_GIT" +git fetch +popd + +# Create a new temporary directory to check out the +# actual source files from the bare git repositories. +# This is so that we do not accidentally use an unclean +# local copy of the source files to generate the protobuf. +COSMOS_SDK_DIR=$(mktemp -d /tmp/cosmos-sdk-XXXXXXXX) + +pushd "$COSMOS_SDK_DIR" +git clone "$COSMOS_SDK_GIT" . +git checkout "$COSMOS_SDK_COMMIT" + +# We have to name the commit as a branch because +# proto-compiler uses the branch name as the commit +# output. Otherwise it will just output HEAD +git checkout -b "$COSMOS_SDK_COMMIT" +popd + +IBC_GO_DIR=$(mktemp -d /tmp/ibc-go-XXXXXXXX) + +pushd "$IBC_GO_DIR" +git clone "$IBC_GO_GIT" . +git checkout "$IBC_GO_COMMIT" +git checkout -b "$IBC_GO_COMMIT" +popd + +# Remove the existing generated protobuf files +# so that the newly generated code does not +# contain removed files. + +rm -rf proto/src/prost +mkdir -p proto/src/prost + +cd proto-compiler + +cargo build + +# Run the proto-compiler twice, +# once for std version with --build-tonic set to true +# and once for no-std version with --build-tonic set to false + +cargo run -- compile \ + --sdk "$COSMOS_SDK_DIR" --ibc "$IBC_GO_DIR" --out ../proto/src/prost + +# Remove the temporary checkouts of the repositories + +rm -rf "$COSMOS_SDK_DIR" +rm -rf "$IBC_GO_DIR" From 67325316e3875759a482dca2ca380037e56594ad Mon Sep 17 00:00:00 2001 From: Seun Lanlege Date: Fri, 12 Aug 2022 13:21:33 +0100 Subject: [PATCH 74/96] bump beefy-rs --- Cargo.lock | 15 +++++++-------- modules/Cargo.toml | 11 +++++------ 2 files changed, 12 insertions(+), 14 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0f263bcda1..bc17c20947 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -287,9 +287,9 @@ dependencies = [ ] [[package]] -name = "beefy-generic-client" +name = "beefy-light-client" version = "0.1.0" -source = "git+https://github.com/ComposableFi/beefy-rs?rev=6b99ac5459caaa1baf94cafa208fffe8e9dd0b54#6b99ac5459caaa1baf94cafa208fffe8e9dd0b54" +source = "git+https://github.com/ComposableFi/beefy-rs?rev=50d53a6e85b0d7e2c1203ae3711f2abd04addd18#50d53a6e85b0d7e2c1203ae3711f2abd04addd18" dependencies = [ "beefy-primitives", "ckb-merkle-mountain-range", @@ -334,9 +334,9 @@ dependencies = [ ] [[package]] -name = "beefy-queries" +name = "beefy-prover" version = "0.1.0" -source = "git+https://github.com/ComposableFi/beefy-rs?rev=6b99ac5459caaa1baf94cafa208fffe8e9dd0b54#6b99ac5459caaa1baf94cafa208fffe8e9dd0b54" +source = "git+https://github.com/ComposableFi/beefy-rs?rev=50d53a6e85b0d7e2c1203ae3711f2abd04addd18#50d53a6e85b0d7e2c1203ae3711f2abd04addd18" dependencies = [ "beefy-primitives", "color-eyre", @@ -1814,15 +1814,14 @@ dependencies = [ name = "ibc" version = "0.15.0" dependencies = [ - "beefy-generic-client", + "beefy-light-client", "beefy-primitives", - "beefy-queries", + "beefy-prover", "borsh", "bytes", "derive_more", "env_logger", "flex-error", - "frame-support", "ibc-proto", "ics23", "modelator", @@ -3148,7 +3147,7 @@ dependencies = [ [[package]] name = "primitives" version = "0.1.0" -source = "git+https://github.com/ComposableFi/beefy-rs?rev=6b99ac5459caaa1baf94cafa208fffe8e9dd0b54#6b99ac5459caaa1baf94cafa208fffe8e9dd0b54" +source = "git+https://github.com/ComposableFi/beefy-rs?rev=50d53a6e85b0d7e2c1203ae3711f2abd04addd18#50d53a6e85b0d7e2c1203ae3711f2abd04addd18" dependencies = [ "beefy-primitives", "ckb-merkle-mountain-range", diff --git a/modules/Cargo.toml b/modules/Cargo.toml index 4903150e1e..78149d151d 100644 --- a/modules/Cargo.toml +++ b/modules/Cargo.toml @@ -61,8 +61,8 @@ flex-error = { version = "0.4.4", default-features = false } num-traits = { version = "0.2.15", default-features = false } derive_more = { version = "0.99.17", default-features = false, features = ["from", "into", "display"] } uint = { version = "0.9", default-features = false } -beefy-client = { package = "beefy-generic-client", git = "https://github.com/ComposableFi/beefy-rs", rev = "6b99ac5459caaa1baf94cafa208fffe8e9dd0b54", default-features = false, optional = true } -beefy-client-primitives = { package = "primitives", git = "https://github.com/ComposableFi/beefy-rs", rev = "6b99ac5459caaa1baf94cafa208fffe8e9dd0b54", default-features = false, optional = true } +beefy-client = { package = "beefy-light-client", git = "https://github.com/ComposableFi/beefy-rs", rev = "50d53a6e85b0d7e2c1203ae3711f2abd04addd18", default-features = false, optional = true } +beefy-client-primitives = { package = "primitives", git = "https://github.com/ComposableFi/beefy-rs", rev = "50d53a6e85b0d7e2c1203ae3711f2abd04addd18", default-features = false, optional = true } pallet-mmr-primitives = { package = "sp-mmr-primitives", git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.24", default-features = false, optional = true } beefy-primitives = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.24", default-features = false, optional = true } codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } @@ -116,11 +116,11 @@ sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0 subxt = "0.22.0" tokio = { version = "1.17.0", features = ["full"] } serde_json = "1.0.74" -beefy-client = { package = "beefy-generic-client", git = "https://github.com/ComposableFi/beefy-rs", rev = "6b99ac5459caaa1baf94cafa208fffe8e9dd0b54" } -beefy-queries = { git = "https://github.com/ComposableFi/beefy-rs", rev = "6b99ac5459caaa1baf94cafa208fffe8e9dd0b54" } +beefy-client = { package = "beefy-light-client", git = "https://github.com/ComposableFi/beefy-rs", rev = "50d53a6e85b0d7e2c1203ae3711f2abd04addd18" } +beefy-queries = { package = "beefy-prover", git = "https://github.com/ComposableFi/beefy-rs", rev = "50d53a6e85b0d7e2c1203ae3711f2abd04addd18" } sha3 = { version = "0.10.1" } ripemd = { version = "0.1.1" } -beefy-client-primitives = { package = "primitives", git = "https://github.com/ComposableFi/beefy-rs", rev = "6b99ac5459caaa1baf94cafa208fffe8e9dd0b54" } +beefy-client-primitives = { package = "primitives", git = "https://github.com/ComposableFi/beefy-rs", rev = "50d53a6e85b0d7e2c1203ae3711f2abd04addd18" } beefy-primitives = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.24" } pallet-mmr-primitives = { package = "sp-mmr-primitives", git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.24" } codec = { package = "parity-scale-codec", version = "3.0.0"} @@ -128,7 +128,6 @@ sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkad sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.24" } sp-std = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.24" } sp-trie = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.24" } -frame-support = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.24" } [[test]] name = "mbt" From ca6f2f4ca3b9c2c90d59f49344437ccf32d15475 Mon Sep 17 00:00:00 2001 From: Seun Lanlege Date: Fri, 12 Aug 2022 13:31:20 +0100 Subject: [PATCH 75/96] bump beefy-rs --- modules/src/clients/host_functions.rs | 4 +--- modules/src/clients/ics11_beefy/client_def.rs | 18 ++++++++++-------- .../core/ics02_client/handler/update_client.rs | 5 ++--- modules/src/test_utils.rs | 5 +++-- 4 files changed, 16 insertions(+), 16 deletions(-) diff --git a/modules/src/clients/host_functions.rs b/modules/src/clients/host_functions.rs index cccb2d02d6..e4499031fc 100644 --- a/modules/src/clients/host_functions.rs +++ b/modules/src/clients/host_functions.rs @@ -37,7 +37,6 @@ pub trait HostFunctionsProvider: Clone + Send + Sync + Default { fn verify_timestamp_extrinsic( root: &[u8; 32], proof: &[Vec], - key: &[u8], value: &[u8], ) -> Result<(), Error>; @@ -86,10 +85,9 @@ where fn verify_timestamp_extrinsic( root: sp_core::H256, proof: &[Vec], - key: &[u8], value: &[u8], ) -> Result<(), beefy_client_primitives::error::BeefyClientError> { - T::verify_timestamp_extrinsic(root.as_fixed_bytes(), proof, key, value) + T::verify_timestamp_extrinsic(root.as_fixed_bytes(), proof, value) .map_err(|_| From::from("Timestamp verification failed".to_string())) } } diff --git a/modules/src/clients/ics11_beefy/client_def.rs b/modules/src/clients/ics11_beefy/client_def.rs index 4a79fbb7c9..dd15818248 100644 --- a/modules/src/clients/ics11_beefy/client_def.rs +++ b/modules/src/clients/ics11_beefy/client_def.rs @@ -1,4 +1,3 @@ -use beefy_client::BeefyLightClient; use beefy_client_primitives::ClientState as LightClientState; use beefy_client_primitives::{ParachainHeader, ParachainsUpdateProof}; use codec::{Decode, Encode}; @@ -68,13 +67,14 @@ impl ClientDef for BeefyClient>::new(); // If mmr update exists verify it and return the new light client state // or else return existing light client state let light_client_state = if let Some(mmr_update) = header.mmr_update_proof { - light_client - .verify_mmr_root_with_proof(light_client_state, mmr_update) - .map_err(|e| Error::beefy(BeefyError::invalid_mmr_update(format!("{:?}", e))))? + beefy_client::verify_mmr_root_with_proof::>( + light_client_state, + mmr_update, + ) + .map_err(|e| Error::beefy(BeefyError::invalid_mmr_update(format!("{:?}", e))))? } else { light_client_state }; @@ -123,9 +123,11 @@ impl ClientDef for BeefyClient>( + light_client_state, + parachain_update_proof, + ) + .map_err(|e| Error::beefy(BeefyError::invalid_mmr_update(format!("{:?}", e)))) } fn update_state( diff --git a/modules/src/core/ics02_client/handler/update_client.rs b/modules/src/core/ics02_client/handler/update_client.rs index 3ee503a322..fe5541bbde 100644 --- a/modules/src/core/ics02_client/handler/update_client.rs +++ b/modules/src/core/ics02_client/handler/update_client.rs @@ -126,15 +126,14 @@ mod tests { use core::str::FromStr; use test_log::test; - use crate::core::ics02_client::client_consensus::{AnyConsensusState, ConsensusState}; + use crate::core::ics02_client::client_consensus::AnyConsensusState; use crate::core::ics02_client::client_state::{AnyClientState, ClientState}; use crate::core::ics02_client::client_type::ClientType; - use crate::core::ics02_client::context::{ClientKeeper, ClientReader}; + use crate::core::ics02_client::context::ClientReader; use crate::core::ics02_client::error::{Error, ErrorDetail}; use crate::core::ics02_client::handler::dispatch; use crate::core::ics02_client::handler::ClientResult::Update; use crate::core::ics02_client::header::AnyHeader; - use crate::core::ics02_client::msgs::create_client::MsgCreateAnyClient; use crate::core::ics02_client::msgs::update_client::MsgUpdateAnyClient; use crate::core::ics02_client::msgs::ClientMsg; use crate::core::ics24_host::identifier::{ChainId, ClientId}; diff --git a/modules/src/test_utils.rs b/modules/src/test_utils.rs index 3281a0045c..d1a53a3d0c 100644 --- a/modules/src/test_utils.rs +++ b/modules/src/test_utils.rs @@ -1,5 +1,6 @@ use std::sync::{Arc, Mutex}; use std::time::Duration; +use codec::Encode; use crate::clients::host_functions::HostFunctionsProvider; use crate::core::ics02_client::context::ClientReader; @@ -195,14 +196,14 @@ impl HostFunctionsProvider for Crypto { fn verify_timestamp_extrinsic( root: &[u8; 32], proof: &[Vec], - key: &[u8], value: &[u8], ) -> Result<(), Ics02Error> { let root = sp_core::H256::from_slice(root); + let key = codec::Compact(0u32).encode(); sp_io::trie::blake2_256_verify_proof( root, proof, - key, + &key, value, sp_core::storage::StateVersion::V0, ) From 288a2717031fc0b21c526f05377f000ea5da97c5 Mon Sep 17 00:00:00 2001 From: Seun Lanlege Date: Fri, 12 Aug 2022 14:01:29 +0100 Subject: [PATCH 76/96] bump beefy-rs --- Cargo.lock | 85 +++++-------------- modules/Cargo.toml | 12 +-- .../ics02_client/handler/update_client.rs | 5 +- 3 files changed, 28 insertions(+), 74 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index bc17c20947..0fe291f050 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -289,7 +289,7 @@ dependencies = [ [[package]] name = "beefy-light-client" version = "0.1.0" -source = "git+https://github.com/ComposableFi/beefy-rs?rev=50d53a6e85b0d7e2c1203ae3711f2abd04addd18#50d53a6e85b0d7e2c1203ae3711f2abd04addd18" +source = "git+https://github.com/ComposableFi/beefy-rs?rev=ea3c74c7c1f959ba1b90e2508b1b6c1fd9afc7ee#ea3c74c7c1f959ba1b90e2508b1b6c1fd9afc7ee" dependencies = [ "beefy-primitives", "ckb-merkle-mountain-range", @@ -336,7 +336,7 @@ dependencies = [ [[package]] name = "beefy-prover" version = "0.1.0" -source = "git+https://github.com/ComposableFi/beefy-rs?rev=50d53a6e85b0d7e2c1203ae3711f2abd04addd18#50d53a6e85b0d7e2c1203ae3711f2abd04addd18" +source = "git+https://github.com/ComposableFi/beefy-rs?rev=ea3c74c7c1f959ba1b90e2508b1b6c1fd9afc7ee#ea3c74c7c1f959ba1b90e2508b1b6c1fd9afc7ee" dependencies = [ "beefy-primitives", "color-eyre", @@ -2035,6 +2035,7 @@ version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1f2ab5a60e558e74ea93bcf5164ebc47939a7fff8938fa9b5233bbc63e16061" dependencies = [ + "jsonrpsee-client-transport 0.13.1", "jsonrpsee-core 0.13.1", "jsonrpsee-http-server", "jsonrpsee-proc-macros", @@ -2043,16 +2044,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "jsonrpsee" -version = "0.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11e017217fcd18da0a25296d3693153dd19c8a6aadab330b3595285d075385d1" -dependencies = [ - "jsonrpsee-client-transport 0.14.0", - "jsonrpsee-core 0.14.0", -] - [[package]] name = "jsonrpsee" version = "0.15.1" @@ -2067,14 +2058,14 @@ dependencies = [ [[package]] name = "jsonrpsee-client-transport" -version = "0.14.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce395539a14d3ad4ec1256fde105abd36a2da25d578a291cabe98f45adfdb111" +checksum = "26d682f4a55081a2be3e639280c640523070e4aeb8ee2fd8dd9168fdae57a9db" dependencies = [ "futures-util", "http", - "jsonrpsee-core 0.14.0", - "jsonrpsee-types 0.14.0", + "jsonrpsee-core 0.13.1", + "jsonrpsee-types 0.13.1", "pin-project", "rustls-native-certs 0.6.2", "soketto", @@ -2115,9 +2106,11 @@ checksum = "6e27462b21279edf9a6a91f46ffbe125e9cdc58b901d2e08bf59b31a47d7d0ab" dependencies = [ "anyhow", "arrayvec 0.7.2", + "async-lock", "async-trait", "beef", "futures-channel", + "futures-timer", "futures-util", "hyper", "jsonrpsee-types 0.13.1", @@ -2132,28 +2125,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "jsonrpsee-core" -version = "0.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16efcd4477de857d4a2195a45769b2fe9ebb54f3ef5a4221d3b014a4fe33ec0b" -dependencies = [ - "anyhow", - "async-lock", - "async-trait", - "beef", - "futures-channel", - "futures-timer", - "futures-util", - "jsonrpsee-types 0.14.0", - "rustc-hash", - "serde", - "serde_json", - "thiserror", - "tokio", - "tracing", -] - [[package]] name = "jsonrpsee-core" version = "0.15.1" @@ -2243,20 +2214,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "jsonrpsee-types" -version = "0.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3bcf76cd316f5d3ad48138085af1f45e2c58c98e02f0779783dbb034d43f7c86" -dependencies = [ - "anyhow", - "beef", - "serde", - "serde_json", - "thiserror", - "tracing", -] - [[package]] name = "jsonrpsee-types" version = "0.15.1" @@ -3147,7 +3104,7 @@ dependencies = [ [[package]] name = "primitives" version = "0.1.0" -source = "git+https://github.com/ComposableFi/beefy-rs?rev=50d53a6e85b0d7e2c1203ae3711f2abd04addd18#50d53a6e85b0d7e2c1203ae3711f2abd04addd18" +source = "git+https://github.com/ComposableFi/beefy-rs?rev=ea3c74c7c1f959ba1b90e2508b1b6c1fd9afc7ee#ea3c74c7c1f959ba1b90e2508b1b6c1fd9afc7ee" dependencies = [ "beefy-primitives", "ckb-merkle-mountain-range", @@ -4985,16 +4942,15 @@ checksum = "734676eb262c623cec13c3155096e08d1f8f29adce39ba17948b18dad1e54142" [[package]] name = "subxt" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e731c0245979a80f9090a89b43635e23f367f13a225695f286f307978db36f11" +version = "0.21.0" +source = "git+https://github.com/paritytech/subxt?rev=ec23283d75e4b3b894294e351fd7ffa2b4431201#ec23283d75e4b3b894294e351fd7ffa2b4431201" dependencies = [ "bitvec", "derivative", "frame-metadata", "futures", "hex", - "jsonrpsee 0.14.0", + "jsonrpsee 0.13.1", "parity-scale-codec", "parking_lot", "scale-info", @@ -5010,9 +4966,8 @@ dependencies = [ [[package]] name = "subxt-codegen" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c9462b52d539cde2e0dbbd1c89d28079459ed790f42218c5bfc9d61c9575e32" +version = "0.21.0" +source = "git+https://github.com/paritytech/subxt?rev=ec23283d75e4b3b894294e351fd7ffa2b4431201#ec23283d75e4b3b894294e351fd7ffa2b4431201" dependencies = [ "darling", "frame-metadata", @@ -5028,9 +4983,8 @@ dependencies = [ [[package]] name = "subxt-macro" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38521809516f4c244b6f38ed13fc67ef6ada29a846fa26123a4206ff743f3461" +version = "0.21.0" +source = "git+https://github.com/paritytech/subxt?rev=ec23283d75e4b3b894294e351fd7ffa2b4431201#ec23283d75e4b3b894294e351fd7ffa2b4431201" dependencies = [ "darling", "proc-macro-error", @@ -5040,9 +4994,8 @@ dependencies = [ [[package]] name = "subxt-metadata" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37b01bac35f2524ce590fa1438fb6c81a63df1b4c94f686be391afd8d02615b3" +version = "0.21.0" +source = "git+https://github.com/paritytech/subxt?rev=ec23283d75e4b3b894294e351fd7ffa2b4431201#ec23283d75e4b3b894294e351fd7ffa2b4431201" dependencies = [ "frame-metadata", "parity-scale-codec", diff --git a/modules/Cargo.toml b/modules/Cargo.toml index 78149d151d..64b553f7ec 100644 --- a/modules/Cargo.toml +++ b/modules/Cargo.toml @@ -61,8 +61,8 @@ flex-error = { version = "0.4.4", default-features = false } num-traits = { version = "0.2.15", default-features = false } derive_more = { version = "0.99.17", default-features = false, features = ["from", "into", "display"] } uint = { version = "0.9", default-features = false } -beefy-client = { package = "beefy-light-client", git = "https://github.com/ComposableFi/beefy-rs", rev = "50d53a6e85b0d7e2c1203ae3711f2abd04addd18", default-features = false, optional = true } -beefy-client-primitives = { package = "primitives", git = "https://github.com/ComposableFi/beefy-rs", rev = "50d53a6e85b0d7e2c1203ae3711f2abd04addd18", default-features = false, optional = true } +beefy-client = { package = "beefy-light-client", git = "https://github.com/ComposableFi/beefy-rs", rev = "ea3c74c7c1f959ba1b90e2508b1b6c1fd9afc7ee", default-features = false, optional = true } +beefy-client-primitives = { package = "primitives", git = "https://github.com/ComposableFi/beefy-rs", rev = "ea3c74c7c1f959ba1b90e2508b1b6c1fd9afc7ee", default-features = false, optional = true } pallet-mmr-primitives = { package = "sp-mmr-primitives", git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.24", default-features = false, optional = true } beefy-primitives = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.24", default-features = false, optional = true } codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } @@ -113,14 +113,14 @@ tendermint-rpc = { git = "https://github.com/composableFi/tendermint-rs", rev = tendermint-testgen = { git = "https://github.com/composableFi/tendermint-rs", rev = "5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8" } # Needed for generating (synthetic) light blocks. # Beefy Light Client testing dependencies sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.24"} -subxt = "0.22.0" +subxt = { git = "https://github.com/paritytech/subxt", rev = "ec23283d75e4b3b894294e351fd7ffa2b4431201" } tokio = { version = "1.17.0", features = ["full"] } serde_json = "1.0.74" -beefy-client = { package = "beefy-light-client", git = "https://github.com/ComposableFi/beefy-rs", rev = "50d53a6e85b0d7e2c1203ae3711f2abd04addd18" } -beefy-queries = { package = "beefy-prover", git = "https://github.com/ComposableFi/beefy-rs", rev = "50d53a6e85b0d7e2c1203ae3711f2abd04addd18" } +beefy-client = { package = "beefy-light-client", git = "https://github.com/ComposableFi/beefy-rs", rev = "ea3c74c7c1f959ba1b90e2508b1b6c1fd9afc7ee" } +beefy-queries = { package = "beefy-prover", git = "https://github.com/ComposableFi/beefy-rs", rev = "ea3c74c7c1f959ba1b90e2508b1b6c1fd9afc7ee" } sha3 = { version = "0.10.1" } ripemd = { version = "0.1.1" } -beefy-client-primitives = { package = "primitives", git = "https://github.com/ComposableFi/beefy-rs", rev = "50d53a6e85b0d7e2c1203ae3711f2abd04addd18" } +beefy-client-primitives = { package = "primitives", git = "https://github.com/ComposableFi/beefy-rs", rev = "ea3c74c7c1f959ba1b90e2508b1b6c1fd9afc7ee" } beefy-primitives = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.24" } pallet-mmr-primitives = { package = "sp-mmr-primitives", git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.24" } codec = { package = "parity-scale-codec", version = "3.0.0"} diff --git a/modules/src/core/ics02_client/handler/update_client.rs b/modules/src/core/ics02_client/handler/update_client.rs index fe5541bbde..4c3cfb01a2 100644 --- a/modules/src/core/ics02_client/handler/update_client.rs +++ b/modules/src/core/ics02_client/handler/update_client.rs @@ -126,16 +126,17 @@ mod tests { use core::str::FromStr; use test_log::test; - use crate::core::ics02_client::client_consensus::AnyConsensusState; + use crate::core::ics02_client::client_consensus::{AnyConsensusState, ConsensusState}; use crate::core::ics02_client::client_state::{AnyClientState, ClientState}; use crate::core::ics02_client::client_type::ClientType; - use crate::core::ics02_client::context::ClientReader; + use crate::core::ics02_client::context::{ClientKeeper, ClientReader}; use crate::core::ics02_client::error::{Error, ErrorDetail}; use crate::core::ics02_client::handler::dispatch; use crate::core::ics02_client::handler::ClientResult::Update; use crate::core::ics02_client::header::AnyHeader; use crate::core::ics02_client::msgs::update_client::MsgUpdateAnyClient; use crate::core::ics02_client::msgs::ClientMsg; + use crate::core::ics02_client::msgs::create_client::MsgCreateAnyClient; use crate::core::ics24_host::identifier::{ChainId, ClientId}; use crate::events::IbcEvent; use crate::handler::HandlerOutput; From a189a3b796cbb84807061180a4ad67c4a1ded8d0 Mon Sep 17 00:00:00 2001 From: Seun Lanlege Date: Fri, 12 Aug 2022 14:23:15 +0100 Subject: [PATCH 77/96] cargo fmt --- modules/src/core/ics02_client/handler/update_client.rs | 2 +- modules/src/test_utils.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/src/core/ics02_client/handler/update_client.rs b/modules/src/core/ics02_client/handler/update_client.rs index 4c3cfb01a2..3ee503a322 100644 --- a/modules/src/core/ics02_client/handler/update_client.rs +++ b/modules/src/core/ics02_client/handler/update_client.rs @@ -134,9 +134,9 @@ mod tests { use crate::core::ics02_client::handler::dispatch; use crate::core::ics02_client::handler::ClientResult::Update; use crate::core::ics02_client::header::AnyHeader; + use crate::core::ics02_client::msgs::create_client::MsgCreateAnyClient; use crate::core::ics02_client::msgs::update_client::MsgUpdateAnyClient; use crate::core::ics02_client::msgs::ClientMsg; - use crate::core::ics02_client::msgs::create_client::MsgCreateAnyClient; use crate::core::ics24_host::identifier::{ChainId, ClientId}; use crate::events::IbcEvent; use crate::handler::HandlerOutput; diff --git a/modules/src/test_utils.rs b/modules/src/test_utils.rs index d1a53a3d0c..6590c59346 100644 --- a/modules/src/test_utils.rs +++ b/modules/src/test_utils.rs @@ -1,6 +1,6 @@ +use codec::Encode; use std::sync::{Arc, Mutex}; use std::time::Duration; -use codec::Encode; use crate::clients::host_functions::HostFunctionsProvider; use crate::core::ics02_client::context::ClientReader; From 13de07663749a59a424d67bb259a714182df28eb Mon Sep 17 00:00:00 2001 From: Web3 Smith <31099392+Wizdave97@users.noreply.github.com> Date: Mon, 15 Aug 2022 13:40:43 +0100 Subject: [PATCH 78/96] Fix client update time and update height for batch consensus updates (#50) fix client update time storage --- modules/Cargo.toml | 3 +- modules/src/core/ics02_client/context.rs | 32 ++++++++++------ .../core/ics03_connection/handler/verify.rs | 38 +++++++++++++++++++ 3 files changed, 61 insertions(+), 12 deletions(-) diff --git a/modules/Cargo.toml b/modules/Cargo.toml index 64b553f7ec..4565aa3121 100644 --- a/modules/Cargo.toml +++ b/modules/Cargo.toml @@ -39,7 +39,8 @@ clock = ["tendermint/clock", "time/std"] # This feature grants access to development-time mocking libraries, such as `MockContext` or `MockHeader`. # Depends on the `testgen` suite for generating Tendermint light blocks. mocks = ["tendermint-testgen", "clock", "std", "sha3", "ripemd", "ics11_beefy"] -ics11_beefy = ["sp-io", "sp-core", "sp-std", "beefy-primitives", "sp-runtime", "sp-trie", "pallet-mmr-primitives", "beefy-client", "beefy-client-primitives"] +ics11_beefy = ["sp-io", "sp-core", "sp-std", "beefy-primitives", "sp-runtime", "sp-trie", "pallet-mmr-primitives", "beefy-client", "beefy-client-primitives", "skip_host_consensus_verification"] +skip_host_consensus_verification = [] [dependencies] # Proto definitions for all IBC-related interfaces, e.g., connections or channels. diff --git a/modules/src/core/ics02_client/context.rs b/modules/src/core/ics02_client/context.rs index c100b14c4d..1658cab2b9 100644 --- a/modules/src/core/ics02_client/context.rs +++ b/modules/src/core/ics02_client/context.rs @@ -109,6 +109,17 @@ pub trait ClientKeeper { res.client_state.latest_height(), cs_state, )?; + + self.store_update_time( + res.client_id.clone(), + res.client_state.latest_height(), + res.processed_time, + )?; + self.store_update_height( + res.client_id, + res.client_state.latest_height(), + res.processed_height, + )?; } ConsensusUpdateResult::Batch(cs_states) => { for (height, cs_state) in cs_states { @@ -117,21 +128,20 @@ pub trait ClientKeeper { height, cs_state, )?; + self.store_update_time( + res.client_id.clone(), + height, + res.processed_time, + )?; + self.store_update_height( + res.client_id.clone(), + height, + res.processed_height, + )?; } } }, } - - self.store_update_time( - res.client_id.clone(), - res.client_state.latest_height(), - res.processed_time, - )?; - self.store_update_height( - res.client_id, - res.client_state.latest_height(), - res.processed_height, - )?; Ok(()) } Upgrade(res) => { diff --git a/modules/src/core/ics03_connection/handler/verify.rs b/modules/src/core/ics03_connection/handler/verify.rs index 66f9404f70..d1784c96c9 100644 --- a/modules/src/core/ics03_connection/handler/verify.rs +++ b/modules/src/core/ics03_connection/handler/verify.rs @@ -10,6 +10,7 @@ use crate::core::ics26_routing::context::ReaderContext; use crate::proofs::{ConsensusProof, Proofs}; use crate::Height; +#[cfg(not(feature = "skip_host_consensus_verification"))] /// Entry point for verifying all proofs bundled in any ICS3 message. pub fn verify_proofs( ctx: &dyn ReaderContext, @@ -56,6 +57,43 @@ pub fn verify_proofs( } } +#[cfg(feature = "skip_host_consensus_verification")] +/// Entry point for verifying all proofs bundled in any ICS3 message. +pub fn verify_proofs( + ctx: &dyn ReaderContext, + client_state: Option, + height: Height, + connection_end: &ConnectionEnd, + expected_conn: &ConnectionEnd, + proofs: &Proofs, +) -> Result<(), Error> { + verify_connection_proof::( + ctx, + height, + connection_end, + expected_conn, + proofs.height(), + proofs.object_proof(), + )?; + + // If the message includes a client state, then verify the proof for that state. + if let Some(expected_client_state) = client_state { + verify_client_proof::( + ctx, + height, + connection_end, + expected_client_state, + proofs.height(), + proofs + .client_proof() + .as_ref() + .ok_or_else(Error::null_client_proof)?, + )?; + } + + Ok(()) +} + /// Verifies the authenticity and semantic correctness of a commitment `proof`. The commitment /// claims to prove that an object of type connection exists on the source chain (i.e., the chain /// which created this proof). This object must match the state of `expected_conn`. From 3cf99c8de88ba0d3674d9efc8243bbde7bf9fd36 Mon Sep 17 00:00:00 2001 From: Web3 Smith <31099392+Wizdave97@users.noreply.github.com> Date: Thu, 25 Aug 2022 12:15:49 +0100 Subject: [PATCH 79/96] Allow optional storage of receive packets (#52) optionally store receive packets --- modules/src/core/ics04_channel/context.rs | 26 +++++++++++++++---- .../core/ics04_channel/handler/recv_packet.rs | 6 ++++- modules/src/mock/context.rs | 10 ++++++- modules/src/test_utils.rs | 10 ++++++- 4 files changed, 44 insertions(+), 8 deletions(-) diff --git a/modules/src/core/ics04_channel/context.rs b/modules/src/core/ics04_channel/context.rs index fd0764fd81..a177868b2f 100644 --- a/modules/src/core/ics04_channel/context.rs +++ b/modules/src/core/ics04_channel/context.rs @@ -138,20 +138,29 @@ pub trait ChannelKeeper { res.commitment, )?; - self.store_packet((res.port_id.clone(), res.channel_id, res.seq), res.packet)?; + self.store_send_packet((res.port_id.clone(), res.channel_id, res.seq), res.packet)?; } PacketResult::Recv(res) => match res { RecvPacketResult::Ordered { port_id, channel_id, next_seq_recv, - } => self.store_next_sequence_recv((port_id, channel_id), next_seq_recv)?, + packet, + } => { + self.store_next_sequence_recv((port_id.clone(), channel_id), next_seq_recv)?; + self.store_recv_packet((port_id, channel_id, packet.sequence), packet)? + } RecvPacketResult::Unordered { port_id, channel_id, sequence, receipt, - } => self.store_packet_receipt((port_id, channel_id, sequence), receipt)?, + packet, + } => { + self.store_packet_receipt((port_id.clone(), channel_id, sequence), receipt)?; + self.store_recv_packet((port_id, channel_id, packet.sequence), packet)? + } + RecvPacketResult::NoOp => unreachable!(), }, PacketResult::WriteAck(res) => { @@ -193,8 +202,15 @@ pub trait ChannelKeeper { commitment: PacketCommitment, ) -> Result<(), Error>; - /// Allow implementers to optionally store packet in storage - fn store_packet( + /// Allow implementers to optionally store send packets in storage + fn store_send_packet( + &mut self, + key: (PortId, ChannelId, Sequence), + packet: Packet, + ) -> Result<(), Error>; + + /// Allow implementers to optionally store received packets in storage + fn store_recv_packet( &mut self, key: (PortId, ChannelId, Sequence), packet: Packet, diff --git a/modules/src/core/ics04_channel/handler/recv_packet.rs b/modules/src/core/ics04_channel/handler/recv_packet.rs index 11b2b5edde..b486fad1df 100644 --- a/modules/src/core/ics04_channel/handler/recv_packet.rs +++ b/modules/src/core/ics04_channel/handler/recv_packet.rs @@ -5,7 +5,7 @@ use crate::core::ics04_channel::error::Error; use crate::core::ics04_channel::events::ReceivePacket; use crate::core::ics04_channel::handler::verify::verify_packet_recv_proofs; use crate::core::ics04_channel::msgs::recv_packet::MsgRecvPacket; -use crate::core::ics04_channel::packet::{PacketResult, Receipt, Sequence}; +use crate::core::ics04_channel::packet::{Packet, PacketResult, Receipt, Sequence}; use crate::core::ics24_host::identifier::{ChannelId, PortId}; use crate::core::ics26_routing::context::ReaderContext; use crate::events::IbcEvent; @@ -23,11 +23,13 @@ pub enum RecvPacketResult { channel_id: ChannelId, sequence: Sequence, receipt: Receipt, + packet: Packet, }, Ordered { port_id: PortId, channel_id: ChannelId, next_seq_recv: Sequence, + packet: Packet, }, } @@ -112,6 +114,7 @@ pub fn process( port_id: packet.destination_port.clone(), channel_id: packet.destination_channel, next_seq_recv: next_seq_recv.increment(), + packet: packet.clone(), }) } else { let packet_rec = ctx.get_packet_receipt(&( @@ -135,6 +138,7 @@ pub fn process( channel_id: packet.destination_channel, sequence: packet.sequence, receipt: Receipt::Ok, + packet: packet.clone(), }) } Err(e) => return Err(Error::implementation_specific(e.to_string())), diff --git a/modules/src/mock/context.rs b/modules/src/mock/context.rs index 2c55a7e094..3923ba240f 100644 --- a/modules/src/mock/context.rs +++ b/modules/src/mock/context.rs @@ -961,7 +961,15 @@ impl ChannelKeeper for MockContext { Ok(()) } - fn store_packet( + fn store_send_packet( + &mut self, + _key: (PortId, ChannelId, Sequence), + _packet: crate::core::ics04_channel::packet::Packet, + ) -> Result<(), Ics04Error> { + Ok(()) + } + + fn store_recv_packet( &mut self, _key: (PortId, ChannelId, Sequence), _packet: crate::core::ics04_channel::packet::Packet, diff --git a/modules/src/test_utils.rs b/modules/src/test_utils.rs index 6590c59346..da2c91bcd3 100644 --- a/modules/src/test_utils.rs +++ b/modules/src/test_utils.rs @@ -311,7 +311,15 @@ impl ChannelKeeper for DummyTransferModule { unimplemented!() } - fn store_packet( + fn store_send_packet( + &mut self, + _key: (PortId, ChannelId, Sequence), + _packet: crate::core::ics04_channel::packet::Packet, + ) -> Result<(), Error> { + Ok(()) + } + + fn store_recv_packet( &mut self, _key: (PortId, ChannelId, Sequence), _packet: crate::core::ics04_channel::packet::Packet, From 90adc2b279a70d029d8bf33661d274f06bfca64e Mon Sep 17 00:00:00 2001 From: Web3 Smith <31099392+Wizdave97@users.noreply.github.com> Date: Fri, 26 Aug 2022 13:03:48 +0100 Subject: [PATCH 80/96] Delete all acknowledged packet commitments (#54) delete acknowledged packet commitments --- modules/src/core/ics04_channel/context.rs | 19 ++++++------------- 1 file changed, 6 insertions(+), 13 deletions(-) diff --git a/modules/src/core/ics04_channel/context.rs b/modules/src/core/ics04_channel/context.rs index a177868b2f..8767aca0e5 100644 --- a/modules/src/core/ics04_channel/context.rs +++ b/modules/src/core/ics04_channel/context.rs @@ -170,20 +170,13 @@ pub trait ChannelKeeper { )?; } PacketResult::Ack(res) => { - match res.seq_number { - Some(s) => { - //Ordered Channel - self.store_next_sequence_ack((res.port_id.clone(), res.channel_id), s)?; - } - None => { - //Unordered Channel - self.delete_packet_commitment(( - res.port_id.clone(), - res.channel_id, - res.seq, - ))?; - } + if let Some(s) = res.seq_number { + //Ordered Channel + self.store_next_sequence_ack((res.port_id.clone(), res.channel_id), s)?; } + + // Delete packet commitment since packet has been aknowledged + self.delete_packet_commitment((res.port_id.clone(), res.channel_id, res.seq))?; } PacketResult::Timeout(res) => { if let Some(c) = res.channel { From 979eef2e8cf47597ba51d59cbf8db84d54077abd Mon Sep 17 00:00:00 2001 From: Web3 Philosopher Date: Sat, 27 Aug 2022 08:45:24 +0100 Subject: [PATCH 81/96] Allow 02-client get host consensus state from offchain data (#55) * allow chains fetch host consensus state from proof * add docs --- modules/src/core/ics02_client/context.rs | 4 +++- modules/src/core/ics03_connection/handler/verify.rs | 2 +- modules/src/mock/context.rs | 4 ++-- modules/src/test_utils.rs | 3 ++- 4 files changed, 8 insertions(+), 5 deletions(-) diff --git a/modules/src/core/ics02_client/context.rs b/modules/src/core/ics02_client/context.rs index 1658cab2b9..8acf920a7d 100644 --- a/modules/src/core/ics02_client/context.rs +++ b/modules/src/core/ics02_client/context.rs @@ -8,6 +8,7 @@ use crate::core::ics02_client::client_state::AnyClientState; use crate::core::ics02_client::client_type::ClientType; use crate::core::ics02_client::error::{Error, ErrorDetail}; use crate::core::ics02_client::handler::ClientResult::{self, Create, Update, Upgrade}; +use crate::core::ics23_commitment::commitment::CommitmentProofBytes; use crate::core::ics24_host::identifier::ClientId; use crate::timestamp::Timestamp; use crate::Height; @@ -64,7 +65,8 @@ pub trait ClientReader { fn host_timestamp(&self) -> Timestamp; /// Returns the `ConsensusState` of the host (local) chain at a specific height. - fn host_consensus_state(&self, height: Height) -> Result; + /// If this is fetched from a proof whose origin is off-chain, it should ideally be verified first. + fn host_consensus_state(&self, height: Height, proof: &CommitmentProofBytes) -> Result; /// Returns a natural number, counting how many clients have been created thus far. /// The value of this counter should increase only via method `ClientKeeper::increase_client_counter`. diff --git a/modules/src/core/ics03_connection/handler/verify.rs b/modules/src/core/ics03_connection/handler/verify.rs index d1784c96c9..6b2e80b445 100644 --- a/modules/src/core/ics03_connection/handler/verify.rs +++ b/modules/src/core/ics03_connection/handler/verify.rs @@ -208,7 +208,7 @@ pub fn verify_consensus_proof( // Fetch the expected consensus state from the historical (local) header data. let expected_consensus = ctx - .host_consensus_state(proof.height()) + .host_consensus_state(proof.height(), proof.proof()) .map_err(|e| Error::consensus_state_verification_failure(proof.height(), e))?; let consensus_state = ctx diff --git a/modules/src/mock/context.rs b/modules/src/mock/context.rs index 3923ba240f..68075ded78 100644 --- a/modules/src/mock/context.rs +++ b/modules/src/mock/context.rs @@ -34,7 +34,7 @@ use crate::core::ics04_channel::packet::{Receipt, Sequence}; use crate::core::ics05_port::context::PortReader; use crate::core::ics05_port::error::Error as Ics05Error; use crate::core::ics05_port::error::Error; -use crate::core::ics23_commitment::commitment::CommitmentPrefix; +use crate::core::ics23_commitment::commitment::{CommitmentPrefix, CommitmentProofBytes}; use crate::core::ics24_host::identifier::{ChainId, ChannelId, ClientId, ConnectionId, PortId}; use crate::core::ics26_routing::context::{ Ics26Context, Module, ModuleId, ReaderContext, Router, RouterBuilder, @@ -1143,7 +1143,7 @@ impl ClientReader for MockContext { } } - fn host_consensus_state(&self, height: Height) -> Result { + fn host_consensus_state(&self, height: Height, _proof: &CommitmentProofBytes) -> Result { match self.host_block(height) { Some(block_ref) => Ok(block_ref.clone().into()), None => Err(Ics02Error::missing_local_consensus_state(height)), diff --git a/modules/src/test_utils.rs b/modules/src/test_utils.rs index da2c91bcd3..7035922ed6 100644 --- a/modules/src/test_utils.rs +++ b/modules/src/test_utils.rs @@ -27,6 +27,7 @@ use crate::core::ics04_channel::packet::{Receipt, Sequence}; use crate::core::ics04_channel::Version; use crate::core::ics05_port::context::PortReader; use crate::core::ics05_port::error::Error as PortError; +use crate::core::ics23_commitment::commitment::CommitmentProofBytes; use crate::core::ics24_host::identifier::{ChannelId, ClientId, ConnectionId, PortId}; use crate::core::ics26_routing::context::{Module, ModuleId, ModuleOutputBuilder, ReaderContext}; use crate::mock::context::MockIbcStore; @@ -415,7 +416,7 @@ impl ClientReader for DummyTransferModule { Height::zero() } - fn host_consensus_state(&self, _height: Height) -> Result { + fn host_consensus_state(&self, _height: Height, _proof: &CommitmentProofBytes) -> Result { unimplemented!() } From 2a092e43d6af5242e661685ac324ae6ba9ad1f7d Mon Sep 17 00:00:00 2001 From: Web3 Philosopher Date: Sun, 28 Aug 2022 08:45:12 +0100 Subject: [PATCH 82/96] chain agnostic connection handshakes (#57) * allow chains fetch host consensus state from proof * add docs * remove skip_host_consensus_verification * rename to host_proof * require client_state in conn_open_{try/ack} --- modules/Cargo.toml | 3 +- modules/src/core/ics02_client/context.rs | 13 +- .../ics03_connection/handler/conn_open_ack.rs | 36 ++++- .../handler/conn_open_confirm.rs | 8 +- .../ics03_connection/handler/conn_open_try.rs | 41 +++++- .../core/ics03_connection/handler/verify.rs | 129 ++++++------------ .../src/core/ics23_commitment/commitment.rs | 7 + modules/src/core/ics26_routing/context.rs | 2 +- modules/src/mock/context.rs | 14 +- modules/src/test_utils.rs | 71 +++++++++- 10 files changed, 209 insertions(+), 115 deletions(-) diff --git a/modules/Cargo.toml b/modules/Cargo.toml index 4565aa3121..64b553f7ec 100644 --- a/modules/Cargo.toml +++ b/modules/Cargo.toml @@ -39,8 +39,7 @@ clock = ["tendermint/clock", "time/std"] # This feature grants access to development-time mocking libraries, such as `MockContext` or `MockHeader`. # Depends on the `testgen` suite for generating Tendermint light blocks. mocks = ["tendermint-testgen", "clock", "std", "sha3", "ripemd", "ics11_beefy"] -ics11_beefy = ["sp-io", "sp-core", "sp-std", "beefy-primitives", "sp-runtime", "sp-trie", "pallet-mmr-primitives", "beefy-client", "beefy-client-primitives", "skip_host_consensus_verification"] -skip_host_consensus_verification = [] +ics11_beefy = ["sp-io", "sp-core", "sp-std", "beefy-primitives", "sp-runtime", "sp-trie", "pallet-mmr-primitives", "beefy-client", "beefy-client-primitives"] [dependencies] # Proto definitions for all IBC-related interfaces, e.g., connections or channels. diff --git a/modules/src/core/ics02_client/context.rs b/modules/src/core/ics02_client/context.rs index 8acf920a7d..6f59f41a5e 100644 --- a/modules/src/core/ics02_client/context.rs +++ b/modules/src/core/ics02_client/context.rs @@ -28,6 +28,9 @@ pub trait ClientReader { height: Height, ) -> Result; + /// This should return the host type. + fn host_client_type(&self) -> ClientType; + /// Similar to `consensus_state`, attempt to retrieve the consensus state, /// but return `None` if no state exists at the given height. fn maybe_consensus_state( @@ -66,7 +69,11 @@ pub trait ClientReader { /// Returns the `ConsensusState` of the host (local) chain at a specific height. /// If this is fetched from a proof whose origin is off-chain, it should ideally be verified first. - fn host_consensus_state(&self, height: Height, proof: &CommitmentProofBytes) -> Result; + fn host_consensus_state( + &self, + height: Height, + proof: Option>, + ) -> Result; /// Returns a natural number, counting how many clients have been created thus far. /// The value of this counter should increase only via method `ClientKeeper::increase_client_counter`. @@ -220,4 +227,8 @@ pub trait ClientKeeper { height: Height, host_height: Height, ) -> Result<(), Error>; + + /// validates the client parameters for a client of the running chain + /// This function is only used to validate the client state the counterparty stores for this chain + fn validate_self_client(&self, client_state: &AnyClientState) -> Result<(), Error>; } diff --git a/modules/src/core/ics03_connection/handler/conn_open_ack.rs b/modules/src/core/ics03_connection/handler/conn_open_ack.rs index a157ca212f..4e6218047c 100644 --- a/modules/src/core/ics03_connection/handler/conn_open_ack.rs +++ b/modules/src/core/ics03_connection/handler/conn_open_ack.rs @@ -5,7 +5,8 @@ use crate::core::ics03_connection::connection::{ConnectionEnd, Counterparty, Sta use crate::core::ics03_connection::error::Error; use crate::core::ics03_connection::events::Attributes; use crate::core::ics03_connection::handler::verify::{ - check_client_consensus_height, verify_proofs, + check_client_consensus_height, verify_client_proof, verify_connection_proof, + verify_consensus_proof, }; use crate::core::ics03_connection::handler::{ConnectionIdState, ConnectionResult}; use crate::core::ics03_connection::msgs::conn_open_ack::MsgConnectionOpenAck; @@ -67,16 +68,41 @@ pub(crate) fn process( ) }; - // 2. Pass the details to the verification function. - verify_proofs::( + let client_state = msg.client_state.ok_or_else(|| { + Error::implementation_specific("client state is required in connOpenTry".into()) + })?; + + let client_proof = msg.proofs.client_proof().as_ref().ok_or_else(|| { + Error::implementation_specific("client proof is required in connOpenTry".into()) + })?; + + let consensus_proof = msg.proofs.consensus_proof().ok_or_else(|| { + Error::implementation_specific("consensus proof is required in connOpenTry".into()) + })?; + + ctx.validate_self_client(&client_state) + .map_err(Error::ics02_client)?; + + verify_connection_proof::( ctx, - msg.client_state.clone(), msg.proofs.height(), &conn_end, &expected_conn, - &msg.proofs, + msg.proofs.height(), + msg.proofs.object_proof(), + )?; + + verify_client_proof::( + ctx, + msg.proofs.height(), + &conn_end, + client_state, + msg.proofs.height(), + client_proof, )?; + verify_consensus_proof::(ctx, msg.proofs.height(), &conn_end, &consensus_proof)?; + output.log("success: connection verification passed"); let event_attributes = Attributes { diff --git a/modules/src/core/ics03_connection/handler/conn_open_confirm.rs b/modules/src/core/ics03_connection/handler/conn_open_confirm.rs index 307c4ae5fd..3c098334e8 100644 --- a/modules/src/core/ics03_connection/handler/conn_open_confirm.rs +++ b/modules/src/core/ics03_connection/handler/conn_open_confirm.rs @@ -4,7 +4,7 @@ use crate::clients::host_functions::HostFunctionsProvider; use crate::core::ics03_connection::connection::{ConnectionEnd, Counterparty, State}; use crate::core::ics03_connection::error::Error; use crate::core::ics03_connection::events::Attributes; -use crate::core::ics03_connection::handler::verify::verify_proofs; +use crate::core::ics03_connection::handler::verify::verify_connection_proof; use crate::core::ics03_connection::handler::{ConnectionIdState, ConnectionResult}; use crate::core::ics03_connection::msgs::conn_open_confirm::MsgConnectionOpenConfirm; use crate::core::ics26_routing::context::ReaderContext; @@ -41,13 +41,13 @@ pub(crate) fn process( ); // 2. Pass the details to the verification function. - verify_proofs::( + verify_connection_proof::( ctx, - None, msg.proofs.height(), &conn_end, &expected_conn, - &msg.proofs, + msg.proofs.height(), + msg.proofs.object_proof(), )?; output.log("success: connection verification passed"); diff --git a/modules/src/core/ics03_connection/handler/conn_open_try.rs b/modules/src/core/ics03_connection/handler/conn_open_try.rs index cd138a2e05..40fd6e9cc3 100644 --- a/modules/src/core/ics03_connection/handler/conn_open_try.rs +++ b/modules/src/core/ics03_connection/handler/conn_open_try.rs @@ -5,7 +5,8 @@ use crate::core::ics03_connection::connection::{ConnectionEnd, Counterparty, Sta use crate::core::ics03_connection::error::Error; use crate::core::ics03_connection::events::Attributes; use crate::core::ics03_connection::handler::verify::{ - check_client_consensus_height, verify_proofs, + check_client_consensus_height, verify_client_proof, verify_connection_proof, + verify_consensus_proof, }; use crate::core::ics03_connection::handler::{ConnectionIdState, ConnectionResult}; use crate::core::ics03_connection::msgs::conn_open_try::MsgConnectionOpenTry; @@ -80,14 +81,44 @@ pub(crate) fn process( msg.delay_period, ); - // 2. Pass the details to the verification function. - verify_proofs::( + let client_state = msg.client_state.ok_or_else(|| { + Error::implementation_specific("client state is required in connOpenTry".into()) + })?; + + let client_proof = msg.proofs.client_proof().as_ref().ok_or_else(|| { + Error::implementation_specific("client proof is required in connOpenTry".into()) + })?; + + let consensus_proof = msg.proofs.consensus_proof().ok_or_else(|| { + Error::implementation_specific("consensus proof is required in connOpenTry".into()) + })?; + + ctx.validate_self_client(&client_state) + .map_err(Error::ics02_client)?; + + verify_connection_proof::( ctx, - msg.client_state.clone(), msg.proofs.height(), &new_connection_end, &expected_conn, - &msg.proofs, + msg.proofs.height(), + msg.proofs.object_proof(), + )?; + + verify_client_proof::( + ctx, + msg.proofs.height(), + &new_connection_end, + client_state, + msg.proofs.height(), + client_proof, + )?; + + verify_consensus_proof::( + ctx, + msg.proofs.height(), + &new_connection_end, + &consensus_proof, )?; // Transition the connection end to the new state & pick a version. diff --git a/modules/src/core/ics03_connection/handler/verify.rs b/modules/src/core/ics03_connection/handler/verify.rs index 6b2e80b445..71947d484b 100644 --- a/modules/src/core/ics03_connection/handler/verify.rs +++ b/modules/src/core/ics03_connection/handler/verify.rs @@ -1,97 +1,23 @@ //! ICS3 verification functions, common across all four handlers of ICS3. use crate::clients::host_functions::HostFunctionsProvider; +#[cfg(feature = "ics11_beefy")] use crate::core::ics02_client::client_consensus::ConsensusState; use crate::core::ics02_client::client_state::{AnyClientState, ClientState}; +use crate::core::ics02_client::client_type::ClientType; use crate::core::ics02_client::{client_def::AnyClient, client_def::ClientDef}; use crate::core::ics03_connection::connection::ConnectionEnd; use crate::core::ics03_connection::error::Error; use crate::core::ics23_commitment::commitment::CommitmentProofBytes; use crate::core::ics26_routing::context::ReaderContext; -use crate::proofs::{ConsensusProof, Proofs}; +use crate::proofs::ConsensusProof; use crate::Height; +use codec::{Decode, Encode}; -#[cfg(not(feature = "skip_host_consensus_verification"))] -/// Entry point for verifying all proofs bundled in any ICS3 message. -pub fn verify_proofs( - ctx: &dyn ReaderContext, - client_state: Option, - height: Height, - connection_end: &ConnectionEnd, - expected_conn: &ConnectionEnd, - proofs: &Proofs, -) -> Result<(), Error> { - verify_connection_proof::( - ctx, - height, - connection_end, - expected_conn, - proofs.height(), - proofs.object_proof(), - )?; - - // If the message includes a client state, then verify the proof for that state. - if let Some(expected_client_state) = client_state { - verify_client_proof::( - ctx, - height, - connection_end, - expected_client_state, - proofs.height(), - proofs - .client_proof() - .as_ref() - .ok_or_else(Error::null_client_proof)?, - )?; - } - - // If a consensus proof is attached to the message, then verify it. - if let Some(proof) = proofs.consensus_proof() { - Ok(verify_consensus_proof::( - ctx, - height, - connection_end, - &proof, - )?) - } else { - Ok(()) - } -} - -#[cfg(feature = "skip_host_consensus_verification")] -/// Entry point for verifying all proofs bundled in any ICS3 message. -pub fn verify_proofs( - ctx: &dyn ReaderContext, - client_state: Option, - height: Height, - connection_end: &ConnectionEnd, - expected_conn: &ConnectionEnd, - proofs: &Proofs, -) -> Result<(), Error> { - verify_connection_proof::( - ctx, - height, - connection_end, - expected_conn, - proofs.height(), - proofs.object_proof(), - )?; - - // If the message includes a client state, then verify the proof for that state. - if let Some(expected_client_state) = client_state { - verify_client_proof::( - ctx, - height, - connection_end, - expected_client_state, - proofs.height(), - proofs - .client_proof() - .as_ref() - .ok_or_else(Error::null_client_proof)?, - )?; - } - - Ok(()) +/// Connection proof type, used in relayer +#[derive(Encode, Decode)] +pub struct ConnectionProof { + pub host_proof: Vec, + pub connection_proof: Vec, } /// Verifies the authenticity and semantic correctness of a commitment `proof`. The commitment @@ -206,30 +132,53 @@ pub fn verify_consensus_proof( return Err(Error::frozen_client(connection_end.client_id().clone())); } - // Fetch the expected consensus state from the historical (local) header data. - let expected_consensus = ctx - .host_consensus_state(proof.height(), proof.proof()) - .map_err(|e| Error::consensus_state_verification_failure(proof.height(), e))?; - let consensus_state = ctx .consensus_state(connection_end.client_id(), height) .map_err(|e| Error::consensus_state_verification_failure(height, e))?; let client = AnyClient::::from_client_type(client_state.client_type()); + let (consensus_proof, expected_consensus) = match ctx.host_client_type() { + #[cfg(feature = "ics11_beefy")] + ClientType::Beefy => { + // if the host is beefy or near, we need to decode the proof before passing it on. + let connection_proof: ConnectionProof = + codec::Decode::decode(&mut proof.proof().as_bytes()).map_err(|e| { + Error::implementation_specific(format!("failed to decode: {:?}", e)) + })?; + // Fetch the expected consensus state from the historical (local) header data. + let expected_consensus = ctx + .host_consensus_state(proof.height(), Some(connection_proof.host_proof)) + .map_err(|e| Error::consensus_state_verification_failure(proof.height(), e))?; + ( + CommitmentProofBytes::try_from(connection_proof.connection_proof).map_err(|e| { + Error::implementation_specific(format!("empty proof bytes: {:?}", e)) + })?, + expected_consensus, + ) + } + _ => ( + proof.proof().clone(), + ctx.host_consensus_state(proof.height(), None) + .map_err(|e| Error::consensus_state_verification_failure(proof.height(), e))?, + ), + }; + client .verify_client_consensus_state( ctx, &client_state, height, connection_end.counterparty().prefix(), - proof.proof(), + &consensus_proof, consensus_state.root(), connection_end.counterparty().client_id(), proof.height(), &expected_consensus, ) - .map_err(|e| Error::consensus_state_verification_failure(proof.height(), e)) + .map_err(|e| Error::consensus_state_verification_failure(proof.height(), e))?; + + Ok(()) } /// Checks that `claimed_height` is within normal bounds, i.e., fresh enough so that the chain has diff --git a/modules/src/core/ics23_commitment/commitment.rs b/modules/src/core/ics23_commitment/commitment.rs index 8f25479f4e..01f898eeb3 100644 --- a/modules/src/core/ics23_commitment/commitment.rs +++ b/modules/src/core/ics23_commitment/commitment.rs @@ -55,6 +55,13 @@ pub struct CommitmentProofBytes { bytes: Vec, } +impl CommitmentProofBytes { + /// Get proof bytes + pub fn as_bytes(&self) -> &[u8] { + &self.bytes + } +} + impl fmt::Debug for CommitmentProofBytes { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let hex = Hex::upper_case().encode_to_string(&self.bytes).unwrap(); diff --git a/modules/src/core/ics26_routing/context.rs b/modules/src/core/ics26_routing/context.rs index 66555ce008..6b03ba248d 100644 --- a/modules/src/core/ics26_routing/context.rs +++ b/modules/src/core/ics26_routing/context.rs @@ -22,7 +22,7 @@ use crate::handler::HandlerOutputBuilder; use crate::signer::Signer; /// This trait captures all the functional dependencies of needed in light client implementations -pub trait ReaderContext: ClientReader + ConnectionReader + ChannelReader {} +pub trait ReaderContext: ClientKeeper + ClientReader + ConnectionReader + ChannelReader {} /// This trait captures all the functional dependencies (i.e., context) which the ICS26 module /// requires to be able to dispatch and process IBC messages. In other words, this is the diff --git a/modules/src/mock/context.rs b/modules/src/mock/context.rs index 68075ded78..6f7deeecdc 100644 --- a/modules/src/mock/context.rs +++ b/modules/src/mock/context.rs @@ -1070,6 +1070,10 @@ impl ClientReader for MockContext { } } + fn host_client_type(&self) -> ClientType { + ClientType::Tendermint + } + /// Search for the lowest consensus state higher than `height`. fn next_consensus_state( &self, @@ -1143,7 +1147,11 @@ impl ClientReader for MockContext { } } - fn host_consensus_state(&self, height: Height, _proof: &CommitmentProofBytes) -> Result { + fn host_consensus_state( + &self, + height: Height, + _proof: Option>, + ) -> Result { match self.host_block(height) { Some(block_ref) => Ok(block_ref.clone().into()), None => Err(Ics02Error::missing_local_consensus_state(height)), @@ -1249,6 +1257,10 @@ impl ClientKeeper for MockContext { .insert((client_id, height), host_height); Ok(()) } + + fn validate_self_client(&self, _client_state: &AnyClientState) -> Result<(), Ics02Error> { + Ok(()) + } } impl Ics18Context for MockContext { diff --git a/modules/src/test_utils.rs b/modules/src/test_utils.rs index 7035922ed6..37bb8e2e33 100644 --- a/modules/src/test_utils.rs +++ b/modules/src/test_utils.rs @@ -3,7 +3,7 @@ use std::sync::{Arc, Mutex}; use std::time::Duration; use crate::clients::host_functions::HostFunctionsProvider; -use crate::core::ics02_client::context::ClientReader; +use crate::core::ics02_client::context::{ClientKeeper, ClientReader}; use crate::core::ics03_connection::context::ConnectionReader; use crate::prelude::*; use sp_core::keccak_256; @@ -17,6 +17,7 @@ use crate::applications::transfer::context::{BankKeeper, Ics20Context, Ics20Keep use crate::applications::transfer::{error::Error as Ics20Error, PrefixedCoin}; use crate::core::ics02_client::client_consensus::AnyConsensusState; use crate::core::ics02_client::client_state::AnyClientState; +use crate::core::ics02_client::client_type::ClientType; use crate::core::ics03_connection::connection::ConnectionEnd; use crate::core::ics03_connection::error::Error as Ics03Error; use crate::core::ics04_channel::channel::{ChannelEnd, Counterparty, Order}; @@ -416,7 +417,11 @@ impl ClientReader for DummyTransferModule { Height::zero() } - fn host_consensus_state(&self, _height: Height, _proof: &CommitmentProofBytes) -> Result { + fn host_consensus_state( + &self, + _height: Height, + _proof: Option>, + ) -> Result { unimplemented!() } @@ -440,10 +445,7 @@ impl ClientReader for DummyTransferModule { } } - fn client_type( - &self, - _client_id: &ClientId, - ) -> Result { + fn client_type(&self, _client_id: &ClientId) -> Result { todo!() } @@ -470,6 +472,10 @@ impl ClientReader for DummyTransferModule { fn client_counter(&self) -> Result { todo!() } + + fn host_client_type(&self) -> ClientType { + ClientType::Tendermint + } } impl ChannelReader for DummyTransferModule { @@ -563,6 +569,59 @@ impl ChannelReader for DummyTransferModule { } } +impl ClientKeeper for DummyTransferModule { + fn store_client_type( + &mut self, + _client_id: ClientId, + _client_type: ClientType, + ) -> Result<(), Ics02Error> { + todo!() + } + + fn store_client_state( + &mut self, + _client_id: ClientId, + _client_state: AnyClientState, + ) -> Result<(), Ics02Error> { + todo!() + } + + fn store_consensus_state( + &mut self, + _client_id: ClientId, + _height: Height, + _consensus_state: AnyConsensusState, + ) -> Result<(), Ics02Error> { + todo!() + } + + fn increase_client_counter(&mut self) { + todo!() + } + + fn store_update_time( + &mut self, + _client_id: ClientId, + _height: Height, + _timestamp: Timestamp, + ) -> Result<(), Ics02Error> { + todo!() + } + + fn store_update_height( + &mut self, + _client_id: ClientId, + _height: Height, + _host_height: Height, + ) -> Result<(), Ics02Error> { + Ok(()) + } + + fn validate_self_client(&self, _client_state: &AnyClientState) -> Result<(), Ics02Error> { + Ok(()) + } +} + impl Ics20Context for DummyTransferModule { type AccountId = Signer; } From 5b4c2726ccda7e5e7b3c6944418b9cdd8988cddf Mon Sep 17 00:00:00 2001 From: Seun Lanlege Date: Sun, 28 Aug 2022 15:38:33 +0100 Subject: [PATCH 83/96] fix no_std --- Cargo.lock | 180 ++++++++++-------- modules/src/core/ics02_client/context.rs | 3 +- .../core/ics03_connection/handler/verify.rs | 2 + 3 files changed, 104 insertions(+), 81 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0fe291f050..7da617a98c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -47,6 +47,15 @@ dependencies = [ "memchr", ] +[[package]] +name = "android_system_properties" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7ed72e1635e121ca3e79420540282af22da58be50de153d36f81ddc6b83aa9e" +dependencies = [ + "libc", +] + [[package]] name = "ansi_term" version = "0.12.1" @@ -58,9 +67,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.59" +version = "1.0.62" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c91f1f46651137be86f3a2b9a8359f9ab421d04d941c62b5982e1ca21113adf9" +checksum = "1485d4d2cc45e7b201ee3767015c96faa5904387c9d87c6efdd0fb511f12d305" [[package]] name = "approx" @@ -203,9 +212,9 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "axum" -version = "0.5.13" +version = "0.5.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b9496f0c1d1afb7a2af4338bbe1d969cddfead41d87a9fb3aaa6d0bbc7af648" +checksum = "9de18bc5f2e9df8f52da03856bf40e29b747de5a84e43aefff90e3dc4a21529b" dependencies = [ "async-trait", "axum-core", @@ -505,9 +514,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.10.0" +version = "3.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37ccbd214614c6783386c1af30caf03192f17891059cecc394b4fb119e363de3" +checksum = "c1ad822118d20d2c234f427000d5acc36eabe1e29a348c89b63dd60b13f28e5d" [[package]] name = "byte-slice-cast" @@ -559,14 +568,13 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chrono" -version = "0.4.20" +version = "0.4.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6127248204b9aba09a362f6c930ef6a78f2c1b2215f8a7b398c06e1083f17af0" +checksum = "bfd4d1b31faaa3a89d7934dbded3111da0d2ef28e3ebccdb4f0179f5929d1ef1" dependencies = [ - "js-sys", + "iana-time-zone", "num-integer", "num-traits", - "wasm-bindgen", "winapi", ] @@ -587,9 +595,9 @@ dependencies = [ [[package]] name = "clap" -version = "3.2.16" +version = "3.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3dbbb6653e7c55cc8595ad3e1f7be8f32aba4eb7ff7f0fd1163d4f3d137c0a9" +checksum = "29e724a68d9319343bb3328c9cc2dfde263f4b3142ee1059a9980580171c954b" dependencies = [ "atty", "bitflags", @@ -604,18 +612,18 @@ dependencies = [ [[package]] name = "clap_complete" -version = "3.2.3" +version = "3.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ead064480dfc4880a10764488415a97fdd36a4cf1bb022d372f02e8faf8386e1" +checksum = "e4179da71abd56c26b54dd0c248cc081c1f43b0a1a7e8448e28e57a29baa993d" dependencies = [ "clap", ] [[package]] name = "clap_derive" -version = "3.2.15" +version = "3.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ba52acd3b0a5c33aeada5cdaa3267cdc7c594a98731d4268cdc1532f4264cb4" +checksum = "13547f7012c01ab4a0e8f8967730ada8f9fdf419e8b6c792788f39cf4e46eefa" dependencies = [ "heck 0.4.0", "proc-macro-error", @@ -705,9 +713,9 @@ checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" [[package]] name = "cpufeatures" -version = "0.2.2" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59a6001667ab124aebae2a495118e11d30984c3a653e99d86d58971708cf5e4b" +checksum = "dc948ebb96241bb40ab73effeb80d9f93afaad49359d159a5e61be51619fe813" dependencies = [ "libc", ] @@ -1062,9 +1070,9 @@ dependencies = [ [[package]] name = "either" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f107b87b6afc2a64fd13cac55fe06d6c8859f12d4b14cbcdd2c67d0976781be" +checksum = "90e5c1c8368803113bf0c9584fc495a58b86dc8a29edbf8fe877d21d9507e797" [[package]] name = "elliptic-curve" @@ -1280,7 +1288,7 @@ version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" dependencies = [ "frame-support-procedural-tools-derive", - "proc-macro-crate 1.2.0", + "proc-macro-crate 1.2.1", "proc-macro2", "quote", "syn", @@ -1327,9 +1335,9 @@ checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" [[package]] name = "futures" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f73fe65f54d1e12b726f517d3e2135ca3125a437b6d998caf1962961f7172d9e" +checksum = "ab30e97ab6aacfe635fad58f22c2bb06c8b685f7421eb1e064a729e2a5f481fa" dependencies = [ "futures-channel", "futures-core", @@ -1342,9 +1350,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3083ce4b914124575708913bca19bfe887522d6e2e6d0952943f5eac4a74010" +checksum = "2bfc52cbddcfd745bf1740338492bb0bd83d76c67b445f91c5fb29fae29ecaa1" dependencies = [ "futures-core", "futures-sink", @@ -1352,15 +1360,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c09fd04b7e4073ac7156a9539b57a484a8ea920f79c7c675d05d289ab6110d3" +checksum = "d2acedae88d38235936c3922476b10fced7b2b68136f5e3c03c2d5be348a1115" [[package]] name = "futures-executor" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9420b90cfa29e327d0429f19be13e7ddb68fa1cccb09d65e5706b8c7a749b8a6" +checksum = "1d11aa21b5b587a64682c0094c2bdd4df0076c5324961a40cc3abd7f37930528" dependencies = [ "futures-core", "futures-task", @@ -1370,15 +1378,15 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc4045962a5a5e935ee2fdedaa4e08284547402885ab326734432bed5d12966b" +checksum = "93a66fc6d035a26a3ae255a6d2bca35eda63ae4c5512bef54449113f7a1228e5" [[package]] name = "futures-macro" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33c1e13800337f4d4d7a316bf45a567dbcb6ffe087f16424852d97e97a91f512" +checksum = "0db9cce532b0eae2ccf2766ab246f114b56b9cf6d445e00c2549fbc100ca045d" dependencies = [ "proc-macro2", "quote", @@ -1387,15 +1395,15 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21163e139fa306126e6eedaf49ecdb4588f939600f0b1e770f4205ee4b7fa868" +checksum = "ca0bae1fe9752cf7fd9b0064c674ae63f97b37bc714d745cbde0afb7ec4e6765" [[package]] name = "futures-task" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57c66a976bf5909d801bbef33416c41372779507e7a6b3a5e25e4749c58f776a" +checksum = "842fc63b931f4056a24d59de13fb1272134ce261816e063e634ad0c15cdc5306" [[package]] name = "futures-timer" @@ -1405,9 +1413,9 @@ checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" [[package]] name = "futures-util" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8b7abd5d659d9b90c8cba917f6ec750a74e2dc23902ef9cd4cc8c8b22e6036a" +checksum = "f0828a5471e340229c11c77ca80017937ce3c58cb788a17e5f1c2d5c485a9577" dependencies = [ "futures-channel", "futures-core", @@ -1533,9 +1541,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.13" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37a82c6d637fc9515a4694bbf1cb2457b79d81ce52b3108bdeea58b07dd34a57" +checksum = "5ca32592cf21ac7ccab1825cd87f6c9b3d9022c44d086172ed0966bec8af30be" dependencies = [ "bytes", "fnv", @@ -1810,6 +1818,19 @@ dependencies = [ "tokio-io-timeout", ] +[[package]] +name = "iana-time-zone" +version = "0.1.46" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad2bfd338099682614d3ee3fe0cd72e0b6a41ca6a87f6a74a3bd593c91650501" +dependencies = [ + "android_system_properties", + "core-foundation-sys", + "js-sys", + "wasm-bindgen", + "winapi", +] + [[package]] name = "ibc" version = "0.15.0" @@ -2194,7 +2215,7 @@ version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b8d7f449cab3b747f12c3efc27f5cad537f3b597c6a3838b0fac628f4bf730a" dependencies = [ - "proc-macro-crate 1.2.0", + "proc-macro-crate 1.2.1", "proc-macro2", "quote", "syn", @@ -2281,9 +2302,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.127" +version = "0.2.132" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "505e71a4706fa491e9b1b55f51b95d4037d0821ee40131190475f692b35b009b" +checksum = "8371e4e5341c3a96db127eb2465ac681ced4c433e01dd0e938adbef26ba93ba5" [[package]] name = "libgit2-sys" @@ -2301,9 +2322,9 @@ dependencies = [ [[package]] name = "libm" -version = "0.2.3" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da83a57f3f5ba3680950aa3cbc806fc297bc0b289d42e8942ed528ace71b8145" +checksum = "292a948cd991e376cf75541fe5b97a1081d713c618b4f1b9500f8844e49eb565" [[package]] name = "libsecp256k1" @@ -2391,9 +2412,9 @@ dependencies = [ [[package]] name = "lock_api" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "327fa5b6a6940e4699ec49a9beae1ea4845c6bab9314e4f84ac68742139d8c53" +checksum = "9f80bf5aacaf25cbfc8210d1cfb718f2bf3b11c4c54e5afe36c236853a8ec390" dependencies = [ "autocfg", "scopeguard", @@ -2729,9 +2750,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.13.0" +version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18a6dbe30758c9f83eb00cbea4ac95966305f5a7772f3f42ebfc7fc7eddbd8e1" +checksum = "074864da206b4973b84eb91683020dbefd6a8c3f0f38e054d93954e891935e4e" [[package]] name = "opaque-debug" @@ -2766,15 +2787,15 @@ dependencies = [ [[package]] name = "os_str_bytes" -version = "6.2.0" +version = "6.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "648001efe5d5c0102d8cea768e348da85d90af8ba91f0bea908f157951493cd4" +checksum = "9ff7415e9ae3fff1225851df9e0d9e4e5479f947619774677a63572e55e80eff" [[package]] name = "owo-colors" -version = "3.4.0" +version = "3.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "decf7381921fea4dcb2549c5667eda59b3ec297ab7e2b5fc33eac69d2e7da87b" +checksum = "c1b04fb49957986fdce4d6ee7a65027d55d4b6d2265e5848bbb507b58ccfdb6f" [[package]] name = "pallet-beefy" @@ -2906,7 +2927,7 @@ version = "3.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9299338969a3d2f491d65f140b00ddec470858402f888af98e8642fb5e8965cd" dependencies = [ - "proc-macro-crate 1.2.0", + "proc-macro-crate 1.2.1", "proc-macro2", "quote", "syn", @@ -3036,18 +3057,18 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.0.11" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78203e83c48cffbe01e4a2d35d566ca4de445d79a85372fc64e378bfc812a260" +checksum = "ad29a609b6bcd67fee905812e544992d216af9d755757c05ed2d0e15a74c6ecc" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.0.11" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "710faf75e1b33345361201d36d04e98ac1ed8909151a017ed384700836104c74" +checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" dependencies = [ "proc-macro2", "quote", @@ -3127,9 +3148,9 @@ dependencies = [ [[package]] name = "proc-macro-crate" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26d50bfb8c23f23915855a00d98b5a35ef2e0b871bb52937bacadb798fbb66c8" +checksum = "eda0fc3b0fb7c975631757e14d9049da17374063edb6ebbcbc54d880d4fe94e9" dependencies = [ "once_cell", "thiserror", @@ -3695,7 +3716,7 @@ version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "50e334bb10a245e28e5fd755cabcafd96cfcd167c99ae63a46924ca8d8703a3c" dependencies = [ - "proc-macro-crate 1.2.0", + "proc-macro-crate 1.2.1", "proc-macro2", "quote", "syn", @@ -3820,9 +3841,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.6.1" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dc14f172faf8a0194a3aded622712b0de276821addc574fa54fc0a1167e10dc" +checksum = "2bc1bb97804af6631813c55739f771071e0f2ed33ee20b68c86ec505d906356c" dependencies = [ "bitflags", "core-foundation", @@ -3849,9 +3870,9 @@ checksum = "93f6841e709003d68bb2deee8c343572bf446003ec20a583e76f7b15cebf3711" [[package]] name = "serde" -version = "1.0.142" +version = "1.0.144" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e590c437916fb6b221e1d00df6e3294f3fccd70ca7e92541c475d6ed6ef5fee2" +checksum = "0f747710de3dcd43b88c9168773254e809d8ddbdf9653b84e2554ab219f17860" dependencies = [ "serde_derive", ] @@ -3867,9 +3888,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.142" +version = "1.0.144" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34b5b8d809babe02f538c2cfec6f2c1ed10804c0e5a6a041a049a4f5588ccc2e" +checksum = "94ed3a816fb1d101812f83e789f888322c34e291f894f19590dc310963e87a00" dependencies = [ "proc-macro2", "quote", @@ -3889,9 +3910,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.83" +version = "1.0.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38dd04e3c8279e75b31ef29dbdceebfe5ad89f4d0937213c53f7d49d01b3d5a7" +checksum = "e55a28e3aaef9d5ce0506d0a14dbba8054ddc7e499ef522dd8b26859ec9d4a44" dependencies = [ "itoa 1.0.3", "ryu", @@ -4054,9 +4075,9 @@ checksum = "2fd0db749597d91ff862fd1d55ea87f7855a744a8425a64695b6fca237d1dad1" [[package]] name = "socket2" -version = "0.4.4" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66d72b759436ae32898a2af0a14218dbf55efde3feeb170eb623637db85ee1e0" +checksum = "10c98bba371b9b22a71a9414e420f92ddeb2369239af08200816169d5e2dd7aa" dependencies = [ "libc", "winapi", @@ -4100,7 +4121,7 @@ version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" dependencies = [ "blake2", - "proc-macro-crate 1.2.0", + "proc-macro-crate 1.2.1", "proc-macro2", "quote", "syn", @@ -4607,7 +4628,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22ecb916b9664ed9f90abef0ff5a3e61454c1efea5861b2997e03f39b59b955f" dependencies = [ "Inflector", - "proc-macro-crate 1.2.0", + "proc-macro-crate 1.2.1", "proc-macro2", "quote", "syn", @@ -4619,7 +4640,7 @@ version = "5.0.0" source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" dependencies = [ "Inflector", - "proc-macro-crate 1.2.0", + "proc-macro-crate 1.2.1", "proc-macro2", "quote", "syn", @@ -4868,9 +4889,9 @@ checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] name = "ss58-registry" -version = "1.25.0" +version = "1.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a039906277e0d8db996cd9d1ef19278c10209d994ecfc1025ced16342873a17c" +checksum = "1c8a1e645fa0bd3e81a90e592a677f7ada3182ac338c4a71cd9ec0ba911f6abb" dependencies = [ "Inflector", "num-format", @@ -5238,11 +5259,10 @@ dependencies = [ [[package]] name = "time" -version = "0.3.12" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74b7cc93fc23ba97fde84f7eea56c55d1ba183f495c6715defdfc7b9cb8c870f" +checksum = "3c3f9a28b618c3a6b9251b6908e9c99e04b9e5c02e6581ccbb67d59c34ef7f9b" dependencies = [ - "js-sys", "libc", "num_threads", "time-macros", diff --git a/modules/src/core/ics02_client/context.rs b/modules/src/core/ics02_client/context.rs index 6f59f41a5e..33b58316fb 100644 --- a/modules/src/core/ics02_client/context.rs +++ b/modules/src/core/ics02_client/context.rs @@ -8,10 +8,11 @@ use crate::core::ics02_client::client_state::AnyClientState; use crate::core::ics02_client::client_type::ClientType; use crate::core::ics02_client::error::{Error, ErrorDetail}; use crate::core::ics02_client::handler::ClientResult::{self, Create, Update, Upgrade}; -use crate::core::ics23_commitment::commitment::CommitmentProofBytes; use crate::core::ics24_host::identifier::ClientId; use crate::timestamp::Timestamp; use crate::Height; +use sp_std::vec::Vec; + /// Defines the read-only part of ICS2 (client functions) context. pub trait ClientReader { diff --git a/modules/src/core/ics03_connection/handler/verify.rs b/modules/src/core/ics03_connection/handler/verify.rs index 71947d484b..946b50902a 100644 --- a/modules/src/core/ics03_connection/handler/verify.rs +++ b/modules/src/core/ics03_connection/handler/verify.rs @@ -12,6 +12,8 @@ use crate::core::ics26_routing::context::ReaderContext; use crate::proofs::ConsensusProof; use crate::Height; use codec::{Decode, Encode}; +use alloc::format; +use sp_std::vec::Vec; /// Connection proof type, used in relayer #[derive(Encode, Decode)] From 049e0c6dfb3bdd836a6a1dd7ebff1cf4ef8ae34f Mon Sep 17 00:00:00 2001 From: Seun Lanlege Date: Mon, 29 Aug 2022 12:57:06 +0100 Subject: [PATCH 84/96] bump beefy-rs --- Cargo.lock | 6 +++--- modules/Cargo.toml | 10 +++++----- modules/src/core/ics02_client/handler/update_client.rs | 8 +++++++- 3 files changed, 15 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7da617a98c..769dd0cdd3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -298,7 +298,7 @@ dependencies = [ [[package]] name = "beefy-light-client" version = "0.1.0" -source = "git+https://github.com/ComposableFi/beefy-rs?rev=ea3c74c7c1f959ba1b90e2508b1b6c1fd9afc7ee#ea3c74c7c1f959ba1b90e2508b1b6c1fd9afc7ee" +source = "git+https://github.com/ComposableFi/beefy-rs?rev=78ee13ceca9df7f2776a894850454648abd67e6a#78ee13ceca9df7f2776a894850454648abd67e6a" dependencies = [ "beefy-primitives", "ckb-merkle-mountain-range", @@ -345,7 +345,7 @@ dependencies = [ [[package]] name = "beefy-prover" version = "0.1.0" -source = "git+https://github.com/ComposableFi/beefy-rs?rev=ea3c74c7c1f959ba1b90e2508b1b6c1fd9afc7ee#ea3c74c7c1f959ba1b90e2508b1b6c1fd9afc7ee" +source = "git+https://github.com/ComposableFi/beefy-rs?rev=78ee13ceca9df7f2776a894850454648abd67e6a#78ee13ceca9df7f2776a894850454648abd67e6a" dependencies = [ "beefy-primitives", "color-eyre", @@ -3125,7 +3125,7 @@ dependencies = [ [[package]] name = "primitives" version = "0.1.0" -source = "git+https://github.com/ComposableFi/beefy-rs?rev=ea3c74c7c1f959ba1b90e2508b1b6c1fd9afc7ee#ea3c74c7c1f959ba1b90e2508b1b6c1fd9afc7ee" +source = "git+https://github.com/ComposableFi/beefy-rs?rev=78ee13ceca9df7f2776a894850454648abd67e6a#78ee13ceca9df7f2776a894850454648abd67e6a" dependencies = [ "beefy-primitives", "ckb-merkle-mountain-range", diff --git a/modules/Cargo.toml b/modules/Cargo.toml index 64b553f7ec..5575cb5a67 100644 --- a/modules/Cargo.toml +++ b/modules/Cargo.toml @@ -61,8 +61,8 @@ flex-error = { version = "0.4.4", default-features = false } num-traits = { version = "0.2.15", default-features = false } derive_more = { version = "0.99.17", default-features = false, features = ["from", "into", "display"] } uint = { version = "0.9", default-features = false } -beefy-client = { package = "beefy-light-client", git = "https://github.com/ComposableFi/beefy-rs", rev = "ea3c74c7c1f959ba1b90e2508b1b6c1fd9afc7ee", default-features = false, optional = true } -beefy-client-primitives = { package = "primitives", git = "https://github.com/ComposableFi/beefy-rs", rev = "ea3c74c7c1f959ba1b90e2508b1b6c1fd9afc7ee", default-features = false, optional = true } +beefy-client = { package = "beefy-light-client", git = "https://github.com/ComposableFi/beefy-rs", rev = "78ee13ceca9df7f2776a894850454648abd67e6a", default-features = false, optional = true } +beefy-client-primitives = { package = "primitives", git = "https://github.com/ComposableFi/beefy-rs", rev = "78ee13ceca9df7f2776a894850454648abd67e6a", default-features = false, optional = true } pallet-mmr-primitives = { package = "sp-mmr-primitives", git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.24", default-features = false, optional = true } beefy-primitives = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.24", default-features = false, optional = true } codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } @@ -116,11 +116,11 @@ sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0 subxt = { git = "https://github.com/paritytech/subxt", rev = "ec23283d75e4b3b894294e351fd7ffa2b4431201" } tokio = { version = "1.17.0", features = ["full"] } serde_json = "1.0.74" -beefy-client = { package = "beefy-light-client", git = "https://github.com/ComposableFi/beefy-rs", rev = "ea3c74c7c1f959ba1b90e2508b1b6c1fd9afc7ee" } -beefy-queries = { package = "beefy-prover", git = "https://github.com/ComposableFi/beefy-rs", rev = "ea3c74c7c1f959ba1b90e2508b1b6c1fd9afc7ee" } +beefy-client = { package = "beefy-light-client", git = "https://github.com/ComposableFi/beefy-rs", rev = "78ee13ceca9df7f2776a894850454648abd67e6a" } +beefy-queries = { package = "beefy-prover", git = "https://github.com/ComposableFi/beefy-rs", rev = "78ee13ceca9df7f2776a894850454648abd67e6a" } sha3 = { version = "0.10.1" } ripemd = { version = "0.1.1" } -beefy-client-primitives = { package = "primitives", git = "https://github.com/ComposableFi/beefy-rs", rev = "ea3c74c7c1f959ba1b90e2508b1b6c1fd9afc7ee" } +beefy-client-primitives = { package = "primitives", git = "https://github.com/ComposableFi/beefy-rs", rev = "78ee13ceca9df7f2776a894850454648abd67e6a" } beefy-primitives = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.24" } pallet-mmr-primitives = { package = "sp-mmr-primitives", git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.24" } codec = { package = "parity-scale-codec", version = "3.0.0"} diff --git a/modules/src/core/ics02_client/handler/update_client.rs b/modules/src/core/ics02_client/handler/update_client.rs index 3ee503a322..795e2e16d7 100644 --- a/modules/src/core/ics02_client/handler/update_client.rs +++ b/modules/src/core/ics02_client/handler/update_client.rs @@ -124,6 +124,7 @@ pub fn process( #[cfg(test)] mod tests { use core::str::FromStr; + use subxt::sp_runtime::traits::Header; use test_log::test; use crate::core::ics02_client::client_consensus::{AnyConsensusState, ConsensusState}; @@ -744,10 +745,15 @@ mod tests { ); let block_number = signed_commitment.commitment.block_number; + let headers = client_wrapper + .query_finalized_parachain_headers_at(block_number, client_state.latest_beefy_height) + .await + .unwrap(); let (parachain_headers, batch_proof) = client_wrapper - .fetch_finalized_parachain_headers_at( + .query_finalized_parachain_headers_with_proof( block_number, client_state.latest_beefy_height, + headers.iter().map(|h| *h.number()).collect(), ) .await .unwrap(); From 6e5daff0bc37553557751ef3781a1a89c8c781ba Mon Sep 17 00:00:00 2001 From: Web3 Philosopher Date: Tue, 30 Aug 2022 17:58:37 +0100 Subject: [PATCH 85/96] update beefy proto (#58) * wip * bump ibc-rs * update ibc proto * update proto files to ibc-go v4.0.0 * update beefy go proto Co-authored-by: David Salami --- Cargo.lock | 7 +- help | 0 modules/Cargo.toml | 11 +- .../clients/ics07_tendermint/client_state.rs | 37 +-- modules/src/clients/ics11_beefy/client_def.rs | 149 ++++++------ modules/src/clients/ics11_beefy/header.rs | 217 ++++++++++-------- modules/src/core/ics02_client/context.rs | 3 +- .../ics02_client/handler/create_client.rs | 8 +- .../ics02_client/handler/update_client.rs | 208 +++++++++-------- .../ics03_connection/handler/conn_open_try.rs | 78 ++----- .../core/ics03_connection/handler/verify.rs | 8 +- .../ics03_connection/msgs/conn_open_ack.rs | 7 +- .../ics03_connection/msgs/conn_open_try.rs | 37 +-- .../ics04_channel/handler/chan_open_try.rs | 154 ++----------- .../core/ics04_channel/msgs/chan_open_try.rs | 39 +--- modules/src/mock/context.rs | 2 +- modules/src/test_utils.rs | 1 - modules/tests/runner/mod.rs | 3 +- proto-compiler/src/cmd/clone.rs | 50 +++- proto-compiler/src/cmd/compile.rs | 69 ++++++ proto/src/COMPOSABLE_IBC_GO_COMMIT | 1 + proto/src/COSMOS_SDK_COMMIT | 2 +- proto/src/IBC_GO_COMMIT | 2 +- proto/src/prost/cosmos.bank.v1beta1.rs | 97 ++++++++ .../prost/cosmos.base.snapshots.v1beta1.rs | 56 +++++ proto/src/prost/cosmos.base.store.v1beta1.rs | 36 --- proto/src/prost/cosmos.staking.v1beta1.rs | 6 +- proto/src/prost/cosmos.tx.signing.v1beta1.rs | 11 + proto/src/prost/cosmos.tx.v1beta1.rs | 102 +++++++- proto/src/prost/ibc.core.channel.v1.rs | 24 +- proto/src/prost/ibc.core.client.v1.rs | 98 ++++++++ proto/src/prost/ibc.core.connection.v1.rs | 4 +- proto/src/prost/ibc.lightclients.beefy.v1.rs | 19 +- .../prost/ibc.lightclients.tendermint.v1.rs | 8 +- proto/src/prost/ics23.rs | 1 - scripts/sync-protobuf.sh | 31 ++- 36 files changed, 954 insertions(+), 632 deletions(-) create mode 100644 help create mode 100644 proto/src/COMPOSABLE_IBC_GO_COMMIT diff --git a/Cargo.lock b/Cargo.lock index 769dd0cdd3..2d571e8287 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -298,7 +298,7 @@ dependencies = [ [[package]] name = "beefy-light-client" version = "0.1.0" -source = "git+https://github.com/ComposableFi/beefy-rs?rev=78ee13ceca9df7f2776a894850454648abd67e6a#78ee13ceca9df7f2776a894850454648abd67e6a" +source = "git+https://github.com/ComposableFi/beefy-rs?rev=cb8cadc8d45bc444367002c77cbd395eff8a741c#cb8cadc8d45bc444367002c77cbd395eff8a741c" dependencies = [ "beefy-primitives", "ckb-merkle-mountain-range", @@ -345,7 +345,7 @@ dependencies = [ [[package]] name = "beefy-prover" version = "0.1.0" -source = "git+https://github.com/ComposableFi/beefy-rs?rev=78ee13ceca9df7f2776a894850454648abd67e6a#78ee13ceca9df7f2776a894850454648abd67e6a" +source = "git+https://github.com/ComposableFi/beefy-rs?rev=cb8cadc8d45bc444367002c77cbd395eff8a741c#cb8cadc8d45bc444367002c77cbd395eff8a741c" dependencies = [ "beefy-primitives", "color-eyre", @@ -1843,6 +1843,7 @@ dependencies = [ "derive_more", "env_logger", "flex-error", + "frame-support", "ibc-proto", "ics23", "modelator", @@ -3125,7 +3126,7 @@ dependencies = [ [[package]] name = "primitives" version = "0.1.0" -source = "git+https://github.com/ComposableFi/beefy-rs?rev=78ee13ceca9df7f2776a894850454648abd67e6a#78ee13ceca9df7f2776a894850454648abd67e6a" +source = "git+https://github.com/ComposableFi/beefy-rs?rev=cb8cadc8d45bc444367002c77cbd395eff8a741c#cb8cadc8d45bc444367002c77cbd395eff8a741c" dependencies = [ "beefy-primitives", "ckb-merkle-mountain-range", diff --git a/help b/help new file mode 100644 index 0000000000..e69de29bb2 diff --git a/modules/Cargo.toml b/modules/Cargo.toml index 5575cb5a67..ed18de365d 100644 --- a/modules/Cargo.toml +++ b/modules/Cargo.toml @@ -61,8 +61,8 @@ flex-error = { version = "0.4.4", default-features = false } num-traits = { version = "0.2.15", default-features = false } derive_more = { version = "0.99.17", default-features = false, features = ["from", "into", "display"] } uint = { version = "0.9", default-features = false } -beefy-client = { package = "beefy-light-client", git = "https://github.com/ComposableFi/beefy-rs", rev = "78ee13ceca9df7f2776a894850454648abd67e6a", default-features = false, optional = true } -beefy-client-primitives = { package = "primitives", git = "https://github.com/ComposableFi/beefy-rs", rev = "78ee13ceca9df7f2776a894850454648abd67e6a", default-features = false, optional = true } +beefy-client = { package = "beefy-light-client", git = "https://github.com/ComposableFi/beefy-rs", rev = "cb8cadc8d45bc444367002c77cbd395eff8a741c", default-features = false, optional = true } +beefy-client-primitives = { package = "primitives", git = "https://github.com/ComposableFi/beefy-rs", rev = "cb8cadc8d45bc444367002c77cbd395eff8a741c", default-features = false, optional = true } pallet-mmr-primitives = { package = "sp-mmr-primitives", git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.24", default-features = false, optional = true } beefy-primitives = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.24", default-features = false, optional = true } codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } @@ -116,11 +116,11 @@ sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0 subxt = { git = "https://github.com/paritytech/subxt", rev = "ec23283d75e4b3b894294e351fd7ffa2b4431201" } tokio = { version = "1.17.0", features = ["full"] } serde_json = "1.0.74" -beefy-client = { package = "beefy-light-client", git = "https://github.com/ComposableFi/beefy-rs", rev = "78ee13ceca9df7f2776a894850454648abd67e6a" } -beefy-queries = { package = "beefy-prover", git = "https://github.com/ComposableFi/beefy-rs", rev = "78ee13ceca9df7f2776a894850454648abd67e6a" } +beefy-client = { package = "beefy-light-client", git = "https://github.com/ComposableFi/beefy-rs", rev = "cb8cadc8d45bc444367002c77cbd395eff8a741c" } +beefy-queries = { package = "beefy-prover", git = "https://github.com/ComposableFi/beefy-rs", rev = "cb8cadc8d45bc444367002c77cbd395eff8a741c" } sha3 = { version = "0.10.1" } ripemd = { version = "0.1.1" } -beefy-client-primitives = { package = "primitives", git = "https://github.com/ComposableFi/beefy-rs", rev = "78ee13ceca9df7f2776a894850454648abd67e6a" } +beefy-client-primitives = { package = "primitives", git = "https://github.com/ComposableFi/beefy-rs", rev = "cb8cadc8d45bc444367002c77cbd395eff8a741c" } beefy-primitives = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.24" } pallet-mmr-primitives = { package = "sp-mmr-primitives", git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.24" } codec = { package = "parity-scale-codec", version = "3.0.0"} @@ -128,6 +128,7 @@ sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkad sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.24" } sp-std = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.24" } sp-trie = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.24" } +frame-support = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.24" } [[test]] name = "mbt" diff --git a/modules/src/clients/ics07_tendermint/client_state.rs b/modules/src/clients/ics07_tendermint/client_state.rs index 5205b13687..81502fffbd 100644 --- a/modules/src/clients/ics07_tendermint/client_state.rs +++ b/modules/src/clients/ics07_tendermint/client_state.rs @@ -30,16 +30,9 @@ pub struct ClientState { pub latest_height: Height, pub proof_specs: ProofSpecs, pub upgrade_path: Vec, - pub allow_update: AllowUpdate, pub frozen_height: Option, } -#[derive(Copy, Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct AllowUpdate { - pub after_expiry: bool, - pub after_misbehaviour: bool, -} - impl Protobuf for ClientState {} impl ClientState { @@ -53,7 +46,6 @@ impl ClientState { latest_height: Height, proof_specs: ProofSpecs, upgrade_path: Vec, - allow_update: AllowUpdate, ) -> Result { // Basic validation of trusting period and unbonding period: each should be non-zero. if trusting_period <= Duration::new(0, 0) { @@ -108,7 +100,6 @@ impl ClientState { latest_height, proof_specs, upgrade_path, - allow_update, frozen_height: None, }) } @@ -237,8 +228,6 @@ impl crate::core::ics02_client::client_state::ClientState for ClientState { // Reset custom fields to zero values self.trusting_period = ZERO_DURATION; self.trust_level = TrustThreshold::ZERO; - self.allow_update.after_expiry = false; - self.allow_update.after_misbehaviour = false; self.frozen_height = None; self.max_clock_drift = ZERO_DURATION; @@ -299,10 +288,6 @@ impl TryFrom for ClientState { .into(), frozen_height, upgrade_path: raw.upgrade_path, - allow_update: AllowUpdate { - after_expiry: raw.allow_update_after_expiry, - after_misbehaviour: raw.allow_update_after_misbehaviour, - }, proof_specs: raw.proof_specs.into(), }) } @@ -319,9 +304,8 @@ impl From for RawClientState { frozen_height: Some(value.frozen_height.unwrap_or_else(Height::zero).into()), latest_height: Some(value.latest_height.into()), proof_specs: value.proof_specs.into(), - allow_update_after_expiry: value.allow_update.after_expiry, - allow_update_after_misbehaviour: value.allow_update.after_misbehaviour, upgrade_path: value.upgrade_path, + ..Default::default() } } } @@ -336,7 +320,7 @@ mod tests { use ibc_proto::ics23::ProofSpec as Ics23ProofSpec; use tendermint_rpc::endpoint::abci_query::AbciQuery; - use crate::clients::ics07_tendermint::client_state::{AllowUpdate, ClientState}; + use crate::clients::ics07_tendermint::client_state::ClientState; use crate::core::ics02_client::trust_threshold::TrustThreshold; use crate::core::ics23_commitment::specs::ProofSpecs; use crate::core::ics24_host::identifier::ChainId; @@ -353,7 +337,6 @@ mod tests { latest_height: Height, proof_specs: ProofSpecs, upgrade_path: Vec, - allow_update: AllowUpdate, } #[test] @@ -382,10 +365,6 @@ mod tests { latest_height: Height::new(0, 10), proof_specs: ProofSpecs::default(), upgrade_path: vec!["".to_string()], - allow_update: AllowUpdate { - after_expiry: false, - after_misbehaviour: false, - }, }; struct Test { @@ -465,7 +444,6 @@ mod tests { p.latest_height, p.proof_specs, p.upgrade_path, - p.allow_update, ); assert_eq!( @@ -569,10 +547,6 @@ mod tests { latest_height: Height::new(1, 10), proof_specs: ProofSpecs::default(), upgrade_path: vec!["".to_string()], - allow_update: AllowUpdate { - after_expiry: false, - after_misbehaviour: false, - }, }; struct Test { @@ -616,7 +590,6 @@ mod tests { p.latest_height, p.proof_specs, p.upgrade_path, - p.allow_update, ) .unwrap(); let client_state = match test.setup { @@ -644,7 +617,7 @@ pub mod test_util { use tendermint::block::Header; - use crate::clients::ics07_tendermint::client_state::{AllowUpdate, ClientState}; + use crate::clients::ics07_tendermint::client_state::ClientState; use crate::core::ics02_client::client_state::AnyClientState; use crate::core::ics02_client::height::Height; use crate::core::ics24_host::identifier::ChainId; @@ -663,10 +636,6 @@ pub mod test_util { ), Default::default(), vec!["".to_string()], - AllowUpdate { - after_expiry: false, - after_misbehaviour: false, - }, ) .unwrap(), ) diff --git a/modules/src/clients/ics11_beefy/client_def.rs b/modules/src/clients/ics11_beefy/client_def.rs index dd15818248..9d3fe1f103 100644 --- a/modules/src/clients/ics11_beefy/client_def.rs +++ b/modules/src/clients/ics11_beefy/client_def.rs @@ -80,54 +80,57 @@ impl ClientDef for BeefyClient>(); - - let leaf_count = - (client_state.to_leaf_index(light_client_state.latest_beefy_height) + 1) as u64; - - let parachain_update_proof = ParachainsUpdateProof { - parachain_headers, - mmr_proof: BatchProof { - leaf_indices, - leaf_count, - items: header - .mmr_proofs - .into_iter() - .map(|item| { - H256::decode(&mut &*item).map_err(|e| { - Error::beefy(BeefyError::invalid_mmr_update(format!("{:?}", e))) + if let Some(headers_with_proof) = header.headers_with_proof { + let mut leaf_indices = vec![]; + let parachain_headers = headers_with_proof + .headers + .into_iter() + .map(|header| { + let leaf_index = client_state + .to_leaf_index(header.partial_mmr_leaf.parent_number_and_hash.0 + 1); + leaf_indices.push(leaf_index as u64); + ParachainHeader { + parachain_header: header.parachain_header.encode(), + partial_mmr_leaf: header.partial_mmr_leaf, + para_id: client_state.para_id, + parachain_heads_proof: header.parachain_heads_proof, + heads_leaf_index: header.heads_leaf_index, + heads_total_count: header.heads_total_count, + extrinsic_proof: header.extrinsic_proof, + timestamp_extrinsic: header.timestamp_extrinsic, + } + }) + .collect::>(); + + let leaf_count = + (client_state.to_leaf_index(light_client_state.latest_beefy_height) + 1) as u64; + + let parachain_update_proof = ParachainsUpdateProof { + parachain_headers, + mmr_proof: BatchProof { + leaf_indices, + leaf_count, + items: headers_with_proof + .mmr_proofs + .into_iter() + .map(|item| { + H256::decode(&mut &*item).map_err(|e| { + Error::beefy(BeefyError::invalid_mmr_update(format!("{:?}", e))) + }) }) - }) - .collect::, _>>()?, - }, - }; + .collect::, _>>()?, + }, + }; - // Perform the parachain header verification - beefy_client::verify_parachain_headers::>( - light_client_state, - parachain_update_proof, - ) - .map_err(|e| Error::beefy(BeefyError::invalid_mmr_update(format!("{:?}", e)))) + // Perform the parachain header verification + beefy_client::verify_parachain_headers::>( + light_client_state, + parachain_update_proof, + ) + .map_err(|e| Error::beefy(BeefyError::invalid_mmr_update(format!("{:?}", e))))? + } + + Ok(()) } fn update_state( @@ -143,26 +146,29 @@ impl ClientDef for BeefyClient ClientDef for BeefyClient Result { let latest_para_height = header - .parachain_headers - .into_iter() - .map(|header| header.parachain_header.number) - .max(); + .headers_with_proof + .map(|headers| { + headers + .headers + .into_iter() + .map(|header| header.parachain_header.number) + .max() + }) + .flatten(); let frozen_height = latest_para_height .map(|height| Height::new(client_state.para_id.into(), height.into())) .unwrap_or(Height::new( diff --git a/modules/src/clients/ics11_beefy/header.rs b/modules/src/clients/ics11_beefy/header.rs index 8315d92b25..56ad47a947 100644 --- a/modules/src/clients/ics11_beefy/header.rs +++ b/modules/src/clients/ics11_beefy/header.rs @@ -18,9 +18,9 @@ use bytes::Buf; use codec::{Compact, Decode, Encode}; use ibc_proto::ibc::lightclients::beefy::v1::{ BeefyAuthoritySet as RawBeefyAuthoritySet, BeefyMmrLeaf as RawBeefyMmrLeaf, - BeefyMmrLeafPartial as RawBeefyMmrLeafPartial, Commitment as RawCommitment, - CommitmentSignature, Header as RawBeefyHeader, MmrUpdateProof as RawMmrUpdateProof, - PayloadItem, SignedCommitment as RawSignedCommitment, + BeefyMmrLeafPartial as RawBeefyMmrLeafPartial, ClientStateUpdateProof as RawMmrUpdateProof, + Commitment as RawCommitment, CommitmentSignature, ConsensusStateUpdateProof, + Header as RawBeefyHeader, PayloadItem, SignedCommitment as RawSignedCommitment, }; use pallet_mmr_primitives::Proof; use sp_core::H256; @@ -30,12 +30,17 @@ use sp_runtime::traits::{BlakeTwo256, SaturatedConversion}; /// Beefy consensus header #[derive(Clone, PartialEq, Eq, Debug)] pub struct BeefyHeader { - pub parachain_headers: Vec, // contains the parachain headers - pub mmr_proofs: Vec>, // mmr proofs for these headers - pub mmr_size: u64, // The latest mmr size + pub headers_with_proof: Option, pub mmr_update_proof: Option, // Proof for updating the latest mmr root hash } +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct ParachainHeadersWithProof { + pub headers: Vec, // contains the parachain headers + pub mmr_proofs: Vec>, // mmr proofs for these headers + pub mmr_size: u64, // The latest mmr size +} + impl crate::core::ics02_client::header::Header for BeefyHeader { fn client_type(&self) -> ClientType { ClientType::Beefy @@ -77,59 +82,70 @@ impl TryFrom for BeefyHeader { type Error = Error; fn try_from(raw_header: RawBeefyHeader) -> Result { - let parachain_headers = raw_header - .parachain_headers - .into_iter() - .map(|raw_para_header| { - let mmr_partial_leaf = raw_para_header - .mmr_leaf_partial - .ok_or_else(Error::invalid_raw_header)?; - let parent_hash = - H256::decode(&mut mmr_partial_leaf.parent_hash.as_slice()).unwrap(); - let beefy_next_authority_set = - if let Some(next_set) = mmr_partial_leaf.beefy_next_authority_set { - BeefyNextAuthoritySet { - id: next_set.id, - len: next_set.len, - root: H256::decode(&mut next_set.authority_root.as_slice()) - .map_err(|e| Error::invalid_mmr_update(e.to_string()))?, - } - } else { - Default::default() - }; - Ok(ParachainHeader { - parachain_header: decode_parachain_header(raw_para_header.parachain_header) - .map_err(|_| Error::invalid_raw_header())?, - partial_mmr_leaf: PartialMmrLeaf { - version: { - let (major, minor) = - split_leaf_version(mmr_partial_leaf.version.saturated_into::()); - MmrLeafVersion::new(major, minor) - }, - parent_number_and_hash: (mmr_partial_leaf.parent_number, parent_hash), - beefy_next_authority_set, - }, - parachain_heads_proof: raw_para_header - .parachain_heads_proof - .into_iter() - .map(|item| { - let mut dest = [0u8; 32]; - if item.len() != 32 { - return Err(Error::invalid_raw_header()); + let headers_with_proof = raw_header.consensus_state.map(|consensus_update| { + let parachain_headers = consensus_update + .parachain_headers + .into_iter() + .map(|raw_para_header| { + let mmr_partial_leaf = raw_para_header + .mmr_leaf_partial + .ok_or_else(Error::invalid_raw_header)?; + let parent_hash = + H256::decode(&mut mmr_partial_leaf.parent_hash.as_slice()).unwrap(); + let beefy_next_authority_set = + if let Some(next_set) = mmr_partial_leaf.beefy_next_authority_set { + BeefyNextAuthoritySet { + id: next_set.id, + len: next_set.len, + root: H256::decode(&mut next_set.authority_root.as_slice()) + .map_err(|e| Error::invalid_mmr_update(e.to_string()))?, } - dest.copy_from_slice(&*item); - Ok(dest) - }) - .collect::, Error>>()?, - heads_leaf_index: raw_para_header.heads_leaf_index, - heads_total_count: raw_para_header.heads_total_count, - extrinsic_proof: raw_para_header.extrinsic_proof, - timestamp_extrinsic: raw_para_header.timestamp_extrinsic, + } else { + Default::default() + }; + Ok(ParachainHeader { + parachain_header: decode_parachain_header(raw_para_header.parachain_header) + .map_err(|_| Error::invalid_raw_header())?, + partial_mmr_leaf: PartialMmrLeaf { + version: { + let (major, minor) = split_leaf_version( + mmr_partial_leaf.version.saturated_into::(), + ); + MmrLeafVersion::new(major, minor) + }, + parent_number_and_hash: (mmr_partial_leaf.parent_number, parent_hash), + beefy_next_authority_set, + }, + parachain_heads_proof: raw_para_header + .parachain_heads_proof + .into_iter() + .map(|item| { + let mut dest = [0u8; 32]; + if item.len() != 32 { + return Err(Error::invalid_raw_header()); + } + dest.copy_from_slice(&*item); + Ok(dest) + }) + .collect::, Error>>()?, + heads_leaf_index: raw_para_header.heads_leaf_index, + heads_total_count: raw_para_header.heads_total_count, + extrinsic_proof: raw_para_header.extrinsic_proof, + timestamp_extrinsic: raw_para_header.timestamp_extrinsic, + }) }) + .collect::, Error>>() + .ok(); + parachain_headers.map(|parachain_headers| { + ParachainHeadersWithProof { + headers: parachain_headers, + mmr_proofs: consensus_update.mmr_proofs, + mmr_size: consensus_update.mmr_size, + } }) - .collect::, Error>>()?; + }).flatten(); - let mmr_update_proof = if let Some(mmr_update) = raw_header.mmr_update_proof { + let mmr_update_proof = if let Some(mmr_update) = raw_header.client_state { let commitment = mmr_update .signed_commitment .as_ref() @@ -253,9 +269,7 @@ impl TryFrom for BeefyHeader { }; Ok(Self { - parachain_headers, - mmr_proofs: raw_header.mmr_proofs, - mmr_size: raw_header.mmr_size, + headers_with_proof, mmr_update_proof, }) } @@ -264,48 +278,57 @@ impl TryFrom for BeefyHeader { impl From for RawBeefyHeader { fn from(beefy_header: BeefyHeader) -> Self { Self { - parachain_headers: beefy_header - .parachain_headers - .into_iter() - .map( - |para_header| ibc_proto::ibc::lightclients::beefy::v1::ParachainHeader { - parachain_header: para_header.parachain_header.encode(), - mmr_leaf_partial: Some(RawBeefyMmrLeafPartial { - version: { - let (major, minor) = para_header.partial_mmr_leaf.version.split(); - merge_leaf_version(major, minor) as u32 - }, - parent_number: para_header.partial_mmr_leaf.parent_number_and_hash.0, - parent_hash: para_header - .partial_mmr_leaf - .parent_number_and_hash - .1 - .encode(), - beefy_next_authority_set: Some(RawBeefyAuthoritySet { - id: para_header.partial_mmr_leaf.beefy_next_authority_set.id, - len: para_header.partial_mmr_leaf.beefy_next_authority_set.len, - authority_root: para_header + consensus_state: beefy_header.headers_with_proof.map(|headers| { + let parachain_headers = headers + .headers + .into_iter() + .map( + |para_header| ibc_proto::ibc::lightclients::beefy::v1::ParachainHeader { + parachain_header: para_header.parachain_header.encode(), + mmr_leaf_partial: Some(RawBeefyMmrLeafPartial { + version: { + let (major, minor) = + para_header.partial_mmr_leaf.version.split(); + merge_leaf_version(major, minor) as u32 + }, + parent_number: para_header + .partial_mmr_leaf + .parent_number_and_hash + .0, + parent_hash: para_header .partial_mmr_leaf - .beefy_next_authority_set - .root + .parent_number_and_hash + .1 .encode(), + beefy_next_authority_set: Some(RawBeefyAuthoritySet { + id: para_header.partial_mmr_leaf.beefy_next_authority_set.id, + len: para_header.partial_mmr_leaf.beefy_next_authority_set.len, + authority_root: para_header + .partial_mmr_leaf + .beefy_next_authority_set + .root + .encode(), + }), }), - }), - parachain_heads_proof: para_header - .parachain_heads_proof - .into_iter() - .map(|item| item.to_vec()) - .collect(), - heads_leaf_index: para_header.heads_leaf_index, - heads_total_count: para_header.heads_total_count, - extrinsic_proof: para_header.extrinsic_proof, - timestamp_extrinsic: para_header.timestamp_extrinsic, - }, - ) - .collect(), - mmr_proofs: beefy_header.mmr_proofs, - mmr_size: beefy_header.mmr_size, - mmr_update_proof: if let Some(mmr_update) = beefy_header.mmr_update_proof { + parachain_heads_proof: para_header + .parachain_heads_proof + .into_iter() + .map(|item| item.to_vec()) + .collect(), + heads_leaf_index: para_header.heads_leaf_index, + heads_total_count: para_header.heads_total_count, + extrinsic_proof: para_header.extrinsic_proof, + timestamp_extrinsic: para_header.timestamp_extrinsic, + }, + ) + .collect(); + ConsensusStateUpdateProof { + parachain_headers, + mmr_proofs: headers.mmr_proofs, + mmr_size: headers.mmr_size, + } + }), + client_state: if let Some(mmr_update) = beefy_header.mmr_update_proof { Some(RawMmrUpdateProof { mmr_leaf: Some(RawBeefyMmrLeaf { version: { diff --git a/modules/src/core/ics02_client/context.rs b/modules/src/core/ics02_client/context.rs index 33b58316fb..92b753e95d 100644 --- a/modules/src/core/ics02_client/context.rs +++ b/modules/src/core/ics02_client/context.rs @@ -11,8 +11,7 @@ use crate::core::ics02_client::handler::ClientResult::{self, Create, Update, Upg use crate::core::ics24_host::identifier::ClientId; use crate::timestamp::Timestamp; use crate::Height; -use sp_std::vec::Vec; - +use alloc::vec::Vec; /// Defines the read-only part of ICS2 (client functions) context. pub trait ClientReader { diff --git a/modules/src/core/ics02_client/handler/create_client.rs b/modules/src/core/ics02_client/handler/create_client.rs index a2c39654d3..94e5d79b2c 100644 --- a/modules/src/core/ics02_client/handler/create_client.rs +++ b/modules/src/core/ics02_client/handler/create_client.rs @@ -73,9 +73,7 @@ mod tests { use core::time::Duration; use test_log::test; - use crate::clients::ics07_tendermint::client_state::{ - AllowUpdate, ClientState as TendermintClientState, - }; + use crate::clients::ics07_tendermint::client_state::ClientState as TendermintClientState; use crate::clients::ics07_tendermint::header::test_util::get_dummy_tendermint_header; use crate::core::ics02_client::client_consensus::AnyConsensusState; use crate::core::ics02_client::client_state::ClientState; @@ -248,10 +246,6 @@ mod tests { Height::new(0, u64::from(tm_header.height)), ProofSpecs::default(), vec!["".to_string()], - AllowUpdate { - after_expiry: false, - after_misbehaviour: false, - }, ) .unwrap() .wrap_any(); diff --git a/modules/src/core/ics02_client/handler/update_client.rs b/modules/src/core/ics02_client/handler/update_client.rs index 795e2e16d7..9612148b16 100644 --- a/modules/src/core/ics02_client/handler/update_client.rs +++ b/modules/src/core/ics02_client/handler/update_client.rs @@ -123,6 +123,7 @@ pub fn process( #[cfg(test)] mod tests { + use crate::clients::ics11_beefy::header::ParachainHeadersWithProof; use core::str::FromStr; use subxt::sp_runtime::traits::Header; use test_log::test; @@ -567,7 +568,6 @@ mod tests { #[tokio::test] async fn test_continuous_update_of_beefy_client() { use crate::clients::ics11_beefy::client_state::ClientState as BeefyClientState; - use crate::clients::ics11_beefy::client_state::RelayChain; use crate::clients::ics11_beefy::consensus_state::ConsensusState; use crate::clients::ics11_beefy::header::BeefyHeader; use crate::clients::ics11_beefy::header::ParachainHeader as BeefyParachainHeader; @@ -615,85 +615,98 @@ mod tests { }; let mut count = 0; - let client_state = client_wrapper - .construct_beefy_client_state(0) - .await - .unwrap(); - let beefy_client_state = BeefyClientState::new( - RelayChain::Rococo, - client_wrapper.para_id, - 0, - client_state.mmr_root_hash, - client_state.beefy_activation_block, - client_state.latest_beefy_height, - client_state.current_authorities, - client_state.next_authorities, - ) - .unwrap(); - - let api = client_wrapper - .relay_client - .clone() - .to_runtime_api::>>(); - let subxt_block_number: subxt::BlockNumber = beefy_client_state.latest_beefy_height.into(); - let block_hash = client_wrapper - .relay_client - .rpc() - .block_hash(Some(subxt_block_number)) - .await - .unwrap(); - let head_data = api - .storage() - .paras() - .heads( - &runtime::api::runtime_types::polkadot_parachain::primitives::Id( - client_wrapper.para_id, - ), - block_hash, - ) - .await - .unwrap() - .unwrap(); - let decoded_para_head = - sp_runtime::generic::Header::::decode( - &mut &*head_data.0, - ) - .unwrap(); - let block_number = decoded_para_head.number; - let subxt_block_number: subxt::BlockNumber = block_number.into(); - let block_hash = client_wrapper - .para_client - .rpc() - .block_hash(Some(subxt_block_number)) - .await - .unwrap(); + let (client_state, consensus_state) = loop { + let beefy_state = client_wrapper + .construct_beefy_client_state(0) + .await + .unwrap(); - let TimeStampExtWithProof { - ext: timestamp_extrinsic, - proof: extrinsic_proof, - } = fetch_timestamp_extrinsic_with_proof(&client_wrapper.para_client, block_hash) - .await + let api = + client_wrapper + .relay_client + .clone() + .to_runtime_api::, + >>(); + let subxt_block_number: subxt::BlockNumber = beefy_state.latest_beefy_height.into(); + let block_hash = client_wrapper + .relay_client + .rpc() + .block_hash(Some(subxt_block_number)) + .await + .unwrap(); + let head_data = api + .storage() + .paras() + .heads( + &runtime::api::runtime_types::polkadot_parachain::primitives::Id( + client_wrapper.para_id, + ), + block_hash, + ) + .await + .unwrap() + .unwrap(); + let decoded_para_head = frame_support::sp_runtime::generic::Header::< + u32, + frame_support::sp_runtime::traits::BlakeTwo256, + >::decode(&mut &*head_data.0) .unwrap(); - let parachain_header = BeefyParachainHeader { - parachain_header: decoded_para_head, - partial_mmr_leaf: PartialMmrLeaf { - version: Default::default(), - parent_number_and_hash: Default::default(), - beefy_next_authority_set: Default::default(), - }, - parachain_heads_proof: vec![], - heads_leaf_index: 0, - heads_total_count: 0, - extrinsic_proof, - timestamp_extrinsic, - }; + let block_number = decoded_para_head.number; + let client_state = BeefyClientState { + chain_id: ChainId::new("relay-chain".to_string(), 0), + relay_chain: Default::default(), + mmr_root_hash: beefy_state.mmr_root_hash, + latest_beefy_height: beefy_state.latest_beefy_height, + frozen_height: None, + beefy_activation_block: beefy_state.beefy_activation_block, + latest_para_height: block_number, + para_id: client_wrapper.para_id, + authority: beefy_state.current_authorities, + next_authority_set: beefy_state.next_authorities, + }; + // we can't use the genesis block to construct the initial state. + if block_number == 0 { + continue; + } + let subxt_block_number: subxt::BlockNumber = block_number.into(); + let block_hash = client_wrapper + .para_client + .rpc() + .block_hash(Some(subxt_block_number)) + .await + .unwrap(); - let consensus_state = ConsensusState::from_header(parachain_header) - .unwrap() - .wrap_any(); + let TimeStampExtWithProof { + ext: timestamp_extrinsic, + proof: extrinsic_proof, + } = fetch_timestamp_extrinsic_with_proof(&client_wrapper.para_client, block_hash) + .await + .unwrap(); + let parachain_header = BeefyParachainHeader { + parachain_header: decoded_para_head, + partial_mmr_leaf: PartialMmrLeaf { + version: Default::default(), + parent_number_and_hash: Default::default(), + beefy_next_authority_set: Default::default(), + }, + parachain_heads_proof: vec![], + heads_leaf_index: 0, + heads_total_count: 0, + extrinsic_proof, + timestamp_extrinsic, + }; + + let consensus_state = ConsensusState::from_header(parachain_header) + .unwrap() + .wrap_any(); + + break (client_state.wrap_any(), consensus_state); + }; let create_client = MsgCreateAnyClient { - client_state: AnyClientState::Beefy(beefy_client_state), + client_state, consensus_state, signer: signer.clone(), }; @@ -746,7 +759,10 @@ mod tests { let block_number = signed_commitment.commitment.block_number; let headers = client_wrapper - .query_finalized_parachain_headers_at(block_number, client_state.latest_beefy_height) + .query_finalized_parachain_headers_at( + block_number, + client_state.latest_beefy_height, + ) .await .unwrap(); let (parachain_headers, batch_proof) = client_wrapper @@ -766,25 +782,29 @@ mod tests { let mmr_size = NodesUtils::new(batch_proof.leaf_count).size(); let header = BeefyHeader { - parachain_headers: parachain_headers - .into_iter() - .map(|header| BeefyParachainHeader { - parachain_header: Decode::decode(&mut &*header.parachain_header.as_slice()) + headers_with_proof: Some(ParachainHeadersWithProof { + headers: parachain_headers + .into_iter() + .map(|header| BeefyParachainHeader { + parachain_header: Decode::decode( + &mut &*header.parachain_header.as_slice(), + ) .unwrap(), - partial_mmr_leaf: header.partial_mmr_leaf, - parachain_heads_proof: header.parachain_heads_proof, - heads_leaf_index: header.heads_leaf_index, - heads_total_count: header.heads_total_count, - extrinsic_proof: header.extrinsic_proof, - timestamp_extrinsic: header.timestamp_extrinsic, - }) - .collect(), - mmr_proofs: batch_proof - .items - .into_iter() - .map(|item| item.encode()) - .collect(), - mmr_size, + partial_mmr_leaf: header.partial_mmr_leaf, + parachain_heads_proof: header.parachain_heads_proof, + heads_leaf_index: header.heads_leaf_index, + heads_total_count: header.heads_total_count, + extrinsic_proof: header.extrinsic_proof, + timestamp_extrinsic: header.timestamp_extrinsic, + }) + .collect(), + mmr_proofs: batch_proof + .items + .into_iter() + .map(|item| item.encode()) + .collect(), + mmr_size, + }), mmr_update_proof: Some(mmr_update), }; diff --git a/modules/src/core/ics03_connection/handler/conn_open_try.rs b/modules/src/core/ics03_connection/handler/conn_open_try.rs index 40fd6e9cc3..422a23b4a1 100644 --- a/modules/src/core/ics03_connection/handler/conn_open_try.rs +++ b/modules/src/core/ics03_connection/handler/conn_open_try.rs @@ -28,48 +28,24 @@ pub(crate) fn process( } // Unwrap the old connection end (if any) and its identifier. - let (mut new_connection_end, conn_id) = match &msg.previous_connection_id { - // A connection with this id should already exist. Search & validate. - Some(prev_id) => { - let old_connection_end = ctx.connection_end(prev_id)?; - - // Validate that existing connection end matches with the one we're trying to establish. - if old_connection_end.state_matches(&State::Init) - && old_connection_end.counterparty_matches(&msg.counterparty) - && old_connection_end.client_id_matches(&msg.client_id) - && old_connection_end.delay_period() == msg.delay_period - { - // A ConnectionEnd already exists and all validation passed. - output.log(format!( - "success: `previous_connection_id` {} validation passed", - prev_id - )); - Ok((old_connection_end, prev_id.clone())) - } else { - // A ConnectionEnd already exists and validation failed. - Err(Error::connection_mismatch(prev_id.clone())) - } - } - // No prev. connection id was supplied, create a new connection end and conn id. - None => { - // Build a new connection end as well as an identifier. - let conn_end = ConnectionEnd::new( - State::Init, - msg.client_id.clone(), - msg.counterparty.clone(), - msg.counterparty_versions.clone(), - msg.delay_period, - ); - let id_counter = ctx.connection_counter()?; - let conn_id = ConnectionId::new(id_counter); - - output.log(format!( - "success: new connection end and identifier {} generated", - conn_id - )); - Ok((conn_end, conn_id)) - } - }?; + let (mut new_connection_end, conn_id) = { + // Build a new connection end as well as an identifier. + let conn_end = ConnectionEnd::new( + State::Init, + msg.client_id.clone(), + msg.counterparty.clone(), + msg.counterparty_versions.clone(), + msg.delay_period, + ); + let id_counter = ctx.connection_counter()?; + let conn_id = ConnectionId::new(id_counter); + + output.log(format!( + "success: new connection end and identifier {} generated", + conn_id + )); + (conn_end, conn_id) + }; // Proof verification in two steps: // 1. Setup: build the ConnectionEnd as we expect to find it on the other party. @@ -144,11 +120,7 @@ pub(crate) fn process( let result = ConnectionResult { connection_id: conn_id, - connection_id_state: if matches!(msg.previous_connection_id, None) { - ConnectionIdState::Generated - } else { - ConnectionIdState::Reused - }, + connection_id_state: ConnectionIdState::Generated, connection_end: new_connection_end, }; @@ -251,18 +223,6 @@ mod tests { msg: ConnectionMsg::ConnectionOpenTry(Box::new(msg_proof_height_missing)), want_pass: false, }, - Test { - name: "Good parameters but has previous_connection_id".to_string(), - ctx: context.clone().with_client(&msg_conn_try.client_id, Height::new(0, client_consensus_state_height)), - msg: ConnectionMsg::ConnectionOpenTry(Box::new(msg_conn_try.clone())), - want_pass: false, - }, - Test { - name: "Good parameters".to_string(), - ctx: context.with_client(&msg_conn_try.client_id, Height::new(0, client_consensus_state_height)), - msg: ConnectionMsg::ConnectionOpenTry(Box::new(msg_conn_try.with_previous_connection_id(None))), - want_pass: true, - }, ] .into_iter() .collect(); diff --git a/modules/src/core/ics03_connection/handler/verify.rs b/modules/src/core/ics03_connection/handler/verify.rs index 946b50902a..4a83860e2f 100644 --- a/modules/src/core/ics03_connection/handler/verify.rs +++ b/modules/src/core/ics03_connection/handler/verify.rs @@ -1,8 +1,8 @@ //! ICS3 verification functions, common across all four handlers of ICS3. use crate::clients::host_functions::HostFunctionsProvider; -#[cfg(feature = "ics11_beefy")] use crate::core::ics02_client::client_consensus::ConsensusState; use crate::core::ics02_client::client_state::{AnyClientState, ClientState}; +#[cfg(feature = "ics11_beefy")] use crate::core::ics02_client::client_type::ClientType; use crate::core::ics02_client::{client_def::AnyClient, client_def::ClientDef}; use crate::core::ics03_connection::connection::ConnectionEnd; @@ -11,10 +11,14 @@ use crate::core::ics23_commitment::commitment::CommitmentProofBytes; use crate::core::ics26_routing::context::ReaderContext; use crate::proofs::ConsensusProof; use crate::Height; -use codec::{Decode, Encode}; +#[cfg(feature = "ics11_beefy")] use alloc::format; +#[cfg(feature = "ics11_beefy")] +use codec::{Decode, Encode}; +#[cfg(feature = "ics11_beefy")] use sp_std::vec::Vec; +#[cfg(feature = "ics11_beefy")] /// Connection proof type, used in relayer #[derive(Encode, Decode)] pub struct ConnectionProof { diff --git a/modules/src/core/ics03_connection/msgs/conn_open_ack.rs b/modules/src/core/ics03_connection/msgs/conn_open_ack.rs index f95f7c3317..64d4bcbe5d 100644 --- a/modules/src/core/ics03_connection/msgs/conn_open_ack.rs +++ b/modules/src/core/ics03_connection/msgs/conn_open_ack.rs @@ -138,12 +138,15 @@ impl From for RawMsgConnectionOpenAck { #[cfg(test)] pub mod test_util { + use crate::core::ics02_client::client_state::AnyClientState; use crate::prelude::*; use ibc_proto::ibc::core::client::v1::Height; use ibc_proto::ibc::core::connection::v1::MsgConnectionOpenAck as RawMsgConnectionOpenAck; use crate::core::ics03_connection::version::Version; use crate::core::ics24_host::identifier::ConnectionId; + use crate::mock::client_state::MockClientState; + use crate::mock::header::MockHeader; use crate::test_utils::{get_dummy_bech32_account, get_dummy_proof}; pub fn get_dummy_raw_msg_conn_open_ack( @@ -163,7 +166,9 @@ pub mod test_util { revision_number: 0, revision_height: consensus_height, }), - client_state: None, + client_state: Some( + AnyClientState::Mock(MockClientState::new(MockHeader::default())).into(), + ), proof_client: get_dummy_proof(), version: Some(Version::default().into()), signer: get_dummy_bech32_account(), diff --git a/modules/src/core/ics03_connection/msgs/conn_open_try.rs b/modules/src/core/ics03_connection/msgs/conn_open_try.rs index f3593b3810..6c8c5b49f4 100644 --- a/modules/src/core/ics03_connection/msgs/conn_open_try.rs +++ b/modules/src/core/ics03_connection/msgs/conn_open_try.rs @@ -1,7 +1,6 @@ use crate::prelude::*; use core::{ convert::{TryFrom, TryInto}, - str::FromStr, time::Duration, }; @@ -14,7 +13,7 @@ use crate::core::ics03_connection::connection::Counterparty; use crate::core::ics03_connection::error::Error; use crate::core::ics03_connection::version::Version; use crate::core::ics23_commitment::commitment::CommitmentProofBytes; -use crate::core::ics24_host::identifier::{ClientId, ConnectionId}; +use crate::core::ics24_host::identifier::ClientId; use crate::proofs::{ConsensusProof, Proofs}; use crate::signer::Signer; use crate::tx_msg::Msg; @@ -27,7 +26,6 @@ pub const TYPE_URL: &str = "/ibc.core.connection.v1.MsgConnectionOpenTry"; /// #[derive(Clone, Debug, PartialEq, Eq)] pub struct MsgConnectionOpenTry { - pub previous_connection_id: Option, pub client_id: ClientId, pub client_state: Option, pub counterparty: Counterparty, @@ -67,12 +65,6 @@ impl TryFrom for MsgConnectionOpenTry { type Error = Error; fn try_from(msg: RawMsgConnectionOpenTry) -> Result { - let previous_connection_id = Some(msg.previous_connection_id) - .filter(|x| !x.is_empty()) - .map(|v| FromStr::from_str(v.as_str())) - .transpose() - .map_err(Error::invalid_identifier)?; - let consensus_proof_obj = { let proof_bytes: Option = msg.proof_consensus.try_into().ok(); let consensus_height = msg @@ -107,7 +99,6 @@ impl TryFrom for MsgConnectionOpenTry { } Ok(Self { - previous_connection_id, client_id: msg.client_id.parse().map_err(Error::invalid_identifier)?, client_state: msg .client_state @@ -137,9 +128,6 @@ impl From for RawMsgConnectionOpenTry { fn from(ics_msg: MsgConnectionOpenTry) -> Self { RawMsgConnectionOpenTry { client_id: ics_msg.client_id.as_str().to_string(), - previous_connection_id: ics_msg - .previous_connection_id - .map_or_else(|| "".to_string(), |v| v.as_str().to_string()), client_state: ics_msg .client_state .map_or_else(|| None, |v| Some(v.into())), @@ -166,12 +154,14 @@ impl From for RawMsgConnectionOpenTry { .consensus_proof() .map_or_else(|| None, |h| Some(h.height().into())), signer: ics_msg.signer.to_string(), + ..Default::default() } } } #[cfg(test)] pub mod test_util { + use crate::core::ics02_client::client_state::AnyClientState; use crate::prelude::*; use ibc_proto::ibc::core::client::v1::Height; use ibc_proto::ibc::core::connection::v1::MsgConnectionOpenTry as RawMsgConnectionOpenTry; @@ -179,22 +169,13 @@ pub mod test_util { use crate::core::ics03_connection::msgs::conn_open_try::MsgConnectionOpenTry; use crate::core::ics03_connection::msgs::test_util::get_dummy_raw_counterparty; use crate::core::ics03_connection::version::get_compatible_versions; - use crate::core::ics24_host::identifier::{ClientId, ConnectionId}; + use crate::core::ics24_host::identifier::ClientId; + use crate::mock::client_state::MockClientState; + use crate::mock::header::MockHeader; use crate::test_utils::{get_dummy_bech32_account, get_dummy_proof}; /// Testing-specific helper methods. impl MsgConnectionOpenTry { - /// Moves the given message into another one, and updates the `previous_connection_id` field. - pub fn with_previous_connection_id( - self, - previous_connection_id: Option, - ) -> MsgConnectionOpenTry { - MsgConnectionOpenTry { - previous_connection_id, - ..self - } - } - /// Setter for `client_id`. pub fn with_client_id(self, client_id: ClientId) -> MsgConnectionOpenTry { MsgConnectionOpenTry { client_id, ..self } @@ -211,8 +192,9 @@ pub mod test_util { ) -> RawMsgConnectionOpenTry { RawMsgConnectionOpenTry { client_id: ClientId::default().to_string(), - previous_connection_id: ConnectionId::default().to_string(), - client_state: None, + client_state: Some( + AnyClientState::Mock(MockClientState::new(MockHeader::default())).into(), + ), counterparty: Some(get_dummy_raw_counterparty()), delay_period: 0, counterparty_versions: get_compatible_versions() @@ -231,6 +213,7 @@ pub mod test_util { }), proof_client: get_dummy_proof(), signer: get_dummy_bech32_account(), + ..Default::default() } } } diff --git a/modules/src/core/ics04_channel/handler/chan_open_try.rs b/modules/src/core/ics04_channel/handler/chan_open_try.rs index 6900b91c4e..71f2351d8b 100644 --- a/modules/src/core/ics04_channel/handler/chan_open_try.rs +++ b/modules/src/core/ics04_channel/handler/chan_open_try.rs @@ -21,46 +21,26 @@ pub(crate) fn process( let mut output = HandlerOutput::builder(); // Unwrap the old channel end (if any) and validate it against the message. - let (mut new_channel_end, channel_id) = match &msg.previous_channel_id { - Some(prev_id) => { - let old_channel_end = ctx.channel_end(&(msg.port_id.clone(), *prev_id))?; - - // Validate that existing channel end matches with the one we're trying to establish. - if old_channel_end.state_matches(&State::Init) - && old_channel_end.order_matches(msg.channel.ordering()) - && old_channel_end.connection_hops_matches(msg.channel.connection_hops()) - && old_channel_end.counterparty_matches(msg.channel.counterparty()) - && old_channel_end.version_matches(msg.channel.version()) - { - // A ChannelEnd already exists and all validation passed. - Ok((old_channel_end, *prev_id)) - } else { - // A ConnectionEnd already exists and validation failed. - Err(Error::channel_mismatch(*prev_id)) - } - } - // No previous channel id was supplied. Create a new channel end & an identifier. - None => { - let channel_end = ChannelEnd::new( - State::Init, - *msg.channel.ordering(), - msg.channel.counterparty().clone(), - msg.channel.connection_hops().clone(), - msg.counterparty_version.clone(), - ); - - // Channel identifier construction. - let id_counter = ctx.channel_counter()?; - let chan_id = ChannelId::new(id_counter); - - output.log(format!( - "success: generated new channel identifier: {}", - chan_id - )); - - Ok((channel_end, chan_id)) - } - }?; + let (mut new_channel_end, channel_id) = { + let channel_end = ChannelEnd::new( + State::Init, + *msg.channel.ordering(), + msg.channel.counterparty().clone(), + msg.channel.connection_hops().clone(), + msg.counterparty_version.clone(), + ); + + // Channel identifier construction. + let id_counter = ctx.channel_counter()?; + let chan_id = ChannelId::new(id_counter); + + output.log(format!( + "success: generated new channel identifier: {}", + chan_id + )); + + (channel_end, chan_id) + }; // An IBC connection running on the local (host) chain should exist. if msg.channel.connection_hops().len() != 1 { @@ -136,11 +116,7 @@ pub(crate) fn process( let result = ChannelResult { port_id: msg.port_id.clone(), - channel_id_state: if matches!(msg.previous_channel_id, None) { - ChannelIdState::Generated - } else { - ChannelIdState::Reused - }, + channel_id_state: ChannelIdState::Generated, channel_id, channel_end: new_channel_end, }; @@ -170,11 +146,11 @@ mod tests { use crate::core::ics03_connection::msgs::test_util::get_dummy_raw_counterparty; use crate::core::ics03_connection::version::get_compatible_versions; use crate::core::ics04_channel::channel::{ChannelEnd, State}; + use crate::core::ics04_channel::error; use crate::core::ics04_channel::handler::channel_dispatch; use crate::core::ics04_channel::msgs::chan_open_try::test_util::get_dummy_raw_msg_chan_open_try; use crate::core::ics04_channel::msgs::chan_open_try::MsgChannelOpenTry; use crate::core::ics04_channel::msgs::ChannelMsg; - use crate::core::ics04_channel::{error, Version}; use crate::core::ics24_host::identifier::{ChannelId, ClientId, ConnectionId}; use crate::events::IbcEvent; use crate::mock::context::MockContext; @@ -217,12 +193,10 @@ mod tests { // this channel should depend on connection `conn_id`. let chan_id = ChannelId::new(24); let hops = vec![conn_id.clone()]; - msg.previous_channel_id = Some(chan_id); msg.channel.connection_hops = hops; // This message does not assume a channel should already be initialized. - let mut msg_vanilla = msg.clone(); - msg_vanilla.previous_channel_id = None; + let msg_vanilla = msg.clone(); // A preloaded channel end that resides in the context. This is constructed so as to be // consistent with the incoming ChanOpenTry message `msg`. @@ -234,48 +208,7 @@ mod tests { msg.channel.version().clone(), ); - // A preloaded channel end that resides in the context. This is constructed so as to be - // __inconsistent__ with the incoming ChanOpenTry message `msg` due to its version field. - let version = Version::from(format!("{}-", msg.channel.version())); - let incorrect_chan_end_ver = ChannelEnd::new( - State::Init, - *msg.channel.ordering(), - msg.channel.counterparty().clone(), - msg.channel.connection_hops().clone(), - version, - ); - - // A preloaded channel end residing in the context, which will be __inconsistent__ with - // the incoming ChanOpenTry message `msg` due to its connection hops field. - let hops = vec![ConnectionId::new(9890)]; - let incorrect_chan_end_hops = ChannelEnd::new( - State::Init, - *msg.channel.ordering(), - msg.channel.counterparty().clone(), - hops, - msg.channel.version().clone(), - ); - let tests: Vec = vec![ - Test { - name: "Processing fails because no channel is preloaded in the context".to_string(), - ctx: context.clone(), - msg: ChannelMsg::ChannelOpenTry(msg.clone()), - want_pass: false, - match_error: { - let port_id = msg.port_id.clone(); - let channel_id = chan_id; - Box::new(move |e| match e { - error::ErrorDetail::ChannelNotFound(e) => { - assert_eq!(e.port_id, port_id); - assert_eq!(e.channel_id, channel_id); - } - _ => { - panic!("Expected ChannelNotFound, instead got {}", e) - } - }) - }, - }, Test { name: "Processing fails because no connection exists in the context".to_string(), ctx: context.clone(), @@ -298,47 +231,6 @@ mod tests { }) }, }, - Test { - name: "Processing fails because of inconsistent version with preexisting channel" - .to_string(), - ctx: context - .clone() - .with_connection(conn_id.clone(), conn_end.clone()) - .with_channel(msg.port_id.clone(), chan_id, incorrect_chan_end_ver), - msg: ChannelMsg::ChannelOpenTry(msg.clone()), - want_pass: false, - match_error: { - let channel_id = chan_id; - Box::new(move |e| match e { - error::ErrorDetail::ChannelMismatch(e) => { - assert_eq!(e.channel_id, channel_id); - } - _ => { - panic!("Expected ChannelMismatch, instead got {}", e) - } - }) - }, - }, - Test { - name: "Processing fails because of inconsistent connection hops".to_string(), - ctx: context - .clone() - .with_connection(conn_id.clone(), conn_end.clone()) - .with_channel(msg.port_id.clone(), chan_id, incorrect_chan_end_hops), - msg: ChannelMsg::ChannelOpenTry(msg.clone()), - want_pass: false, - match_error: { - let channel_id = chan_id; - Box::new(move |e| match e { - error::ErrorDetail::ChannelMismatch(e) => { - assert_eq!(e.channel_id, channel_id); - } - _ => { - panic!("Expected ChannelMismatch, instead got {}", e) - } - }) - }, - }, Test { name: "Processing fails b/c the context has no client state".to_string(), ctx: context diff --git a/modules/src/core/ics04_channel/msgs/chan_open_try.rs b/modules/src/core/ics04_channel/msgs/chan_open_try.rs index 5af004d529..bdd1e5c46b 100644 --- a/modules/src/core/ics04_channel/msgs/chan_open_try.rs +++ b/modules/src/core/ics04_channel/msgs/chan_open_try.rs @@ -2,7 +2,7 @@ use crate::core::ics04_channel::channel::ChannelEnd; use crate::core::ics04_channel::error::Error as ChannelError; use crate::core::ics04_channel::Version; use crate::core::ics24_host::error::ValidationError; -use crate::core::ics24_host::identifier::{ChannelId, PortId}; +use crate::core::ics24_host::identifier::PortId; use crate::prelude::*; use crate::proofs::Proofs; use crate::signer::Signer; @@ -11,8 +11,6 @@ use crate::tx_msg::Msg; use ibc_proto::ibc::core::channel::v1::MsgChannelOpenTry as RawMsgChannelOpenTry; use tendermint_proto::Protobuf; -use core::str::FromStr; - pub const TYPE_URL: &str = "/ibc.core.channel.v1.MsgChannelOpenTry"; /// @@ -21,7 +19,6 @@ pub const TYPE_URL: &str = "/ibc.core.channel.v1.MsgChannelOpenTry"; #[derive(Clone, Debug, PartialEq)] pub struct MsgChannelOpenTry { pub port_id: PortId, - pub previous_channel_id: Option, pub channel: ChannelEnd, pub counterparty_version: Version, pub proofs: Proofs, @@ -31,7 +28,6 @@ pub struct MsgChannelOpenTry { impl MsgChannelOpenTry { pub fn new( port_id: PortId, - previous_channel_id: Option, channel: ChannelEnd, counterparty_version: Version, proofs: Proofs, @@ -39,7 +35,6 @@ impl MsgChannelOpenTry { ) -> Self { Self { port_id, - previous_channel_id, channel, counterparty_version, proofs, @@ -89,15 +84,8 @@ impl TryFrom for MsgChannelOpenTry { ) .map_err(ChannelError::invalid_proof)?; - let previous_channel_id = Some(raw_msg.previous_channel_id) - .filter(|x| !x.is_empty()) - .map(|v| FromStr::from_str(v.as_str())) - .transpose() - .map_err(ChannelError::identifier)?; - let msg = MsgChannelOpenTry { port_id: raw_msg.port_id.parse().map_err(ChannelError::identifier)?, - previous_channel_id, channel: raw_msg .channel .ok_or_else(ChannelError::missing_channel)? @@ -118,14 +106,12 @@ impl From for RawMsgChannelOpenTry { fn from(domain_msg: MsgChannelOpenTry) -> Self { RawMsgChannelOpenTry { port_id: domain_msg.port_id.to_string(), - previous_channel_id: domain_msg - .previous_channel_id - .map_or_else(|| "".to_string(), |v| v.to_string()), channel: Some(domain_msg.channel.into()), counterparty_version: domain_msg.counterparty_version.to_string(), proof_init: domain_msg.proofs.object_proof().clone().into(), proof_height: Some(domain_msg.proofs.height().into()), signer: domain_msg.signer.to_string(), + ..Default::default() } } } @@ -136,7 +122,7 @@ pub mod test_util { use ibc_proto::ibc::core::channel::v1::MsgChannelOpenTry as RawMsgChannelOpenTry; use crate::core::ics04_channel::channel::test_util::get_dummy_raw_channel_end; - use crate::core::ics24_host::identifier::{ChannelId, PortId}; + use crate::core::ics24_host::identifier::PortId; use crate::test_utils::{get_dummy_bech32_account, get_dummy_proof}; use ibc_proto::ibc::core::client::v1::Height; @@ -144,7 +130,6 @@ pub mod test_util { pub fn get_dummy_raw_msg_chan_open_try(proof_height: u64) -> RawMsgChannelOpenTry { RawMsgChannelOpenTry { port_id: PortId::default().to_string(), - previous_channel_id: ChannelId::default().to_string(), channel: Some(get_dummy_raw_channel_end()), counterparty_version: "".to_string(), proof_init: get_dummy_proof(), @@ -153,6 +138,7 @@ pub mod test_util { revision_height: proof_height, }), signer: get_dummy_bech32_account(), + ..Default::default() } } } @@ -211,27 +197,10 @@ mod tests { Test { name: "Correct channel identifier".to_string(), raw: RawMsgChannelOpenTry { - previous_channel_id: "channel-34".to_string(), ..default_raw_msg.clone() }, want_pass: true, }, - Test { - name: "Bad channel, name too short".to_string(), - raw: RawMsgChannelOpenTry { - previous_channel_id: "chshort".to_string(), - ..default_raw_msg.clone() - }, - want_pass: false, - }, - Test { - name: "Bad channel, name too long".to_string(), - raw: RawMsgChannelOpenTry { - previous_channel_id: "channel-12839128379182739812739879".to_string(), - ..default_raw_msg.clone() - }, - want_pass: false, - }, Test { name: "Empty counterparty version (valid choice)".to_string(), raw: RawMsgChannelOpenTry { diff --git a/modules/src/mock/context.rs b/modules/src/mock/context.rs index 6f7deeecdc..68038b1fc6 100644 --- a/modules/src/mock/context.rs +++ b/modules/src/mock/context.rs @@ -34,7 +34,7 @@ use crate::core::ics04_channel::packet::{Receipt, Sequence}; use crate::core::ics05_port::context::PortReader; use crate::core::ics05_port::error::Error as Ics05Error; use crate::core::ics05_port::error::Error; -use crate::core::ics23_commitment::commitment::{CommitmentPrefix, CommitmentProofBytes}; +use crate::core::ics23_commitment::commitment::CommitmentPrefix; use crate::core::ics24_host::identifier::{ChainId, ChannelId, ClientId, ConnectionId, PortId}; use crate::core::ics26_routing::context::{ Ics26Context, Module, ModuleId, ReaderContext, Router, RouterBuilder, diff --git a/modules/src/test_utils.rs b/modules/src/test_utils.rs index 37bb8e2e33..1ab996cc3d 100644 --- a/modules/src/test_utils.rs +++ b/modules/src/test_utils.rs @@ -28,7 +28,6 @@ use crate::core::ics04_channel::packet::{Receipt, Sequence}; use crate::core::ics04_channel::Version; use crate::core::ics05_port::context::PortReader; use crate::core::ics05_port::error::Error as PortError; -use crate::core::ics23_commitment::commitment::CommitmentProofBytes; use crate::core::ics24_host::identifier::{ChannelId, ClientId, ConnectionId, PortId}; use crate::core::ics26_routing::context::{Module, ModuleId, ModuleOutputBuilder, ReaderContext}; use crate::mock::context::MockIbcStore; diff --git a/modules/tests/runner/mod.rs b/modules/tests/runner/mod.rs index a63642601b..02f1a406be 100644 --- a/modules/tests/runner/mod.rs +++ b/modules/tests/runner/mod.rs @@ -374,7 +374,7 @@ impl IbcTestRunner { } Action::Ics03ConnectionOpenTry { chain_id, - previous_connection_id, + previous_connection_id: _, client_id, client_state, counterparty_chain_id: _, @@ -387,7 +387,6 @@ impl IbcTestRunner { // create ICS26 message and deliver it let msg = Ics26Envelope::Ics3Msg(ConnectionMsg::ConnectionOpenTry(Box::new( MsgConnectionOpenTry { - previous_connection_id: previous_connection_id.map(Self::connection_id), client_id: Self::client_id(client_id), // TODO: is this ever needed? client_state: None, diff --git a/proto-compiler/src/cmd/clone.rs b/proto-compiler/src/cmd/clone.rs index ce8ff132d6..dac534c127 100644 --- a/proto-compiler/src/cmd/clone.rs +++ b/proto-compiler/src/cmd/clone.rs @@ -20,13 +20,18 @@ pub struct CloneCmd { #[argh(option, short = 'i')] ibc_go_commit: Option, + /// commit to checkout + #[argh(option, short = 'l')] + composable_ibc_go_commit: Option, + /// where to checkout the repository #[argh(option, short = 'o')] out: PathBuf, } pub const COSMOS_SDK_URL: &str = "https://github.com/cosmos/cosmos-sdk"; -pub const IBC_GO_URL: &str = "https://github.com/ComposableFi/ibc-go.git"; +pub const IBC_GO_URL: &str = "https://github.com/cosmos/ibc-go"; +pub const COMPOSABLE_IBC_GO_URL: &str = "https://github.com/ComposableFi/ibc-go"; impl CloneCmd { pub fn validate(&self) { @@ -48,6 +53,12 @@ impl CloneCmd { ibc_path } + pub fn composable_ibc_subdir(&self) -> PathBuf { + let mut ibc_path = self.out.clone(); + ibc_path.push("composable/ibc/"); + ibc_path + } + pub fn run(&self) { self.validate(); @@ -117,7 +128,42 @@ impl CloneCmd { } None => { println!( - "[info ] No `-i`/`--ibc_go_commit` option passed. Skipping the IBC Go repo." + "[info ] No `-i`/`--ibc_go_commit` option passed. Skipping the Cosmos's IBC Go repo." + ) + } + } + + println!("[info ] Cloning composabl;efi/ibc-go repository..."); + + match &self.composable_ibc_go_commit { + Some(ibc_go_commit) => { + let ibc_path = self.composable_ibc_subdir(); + let ibc_repo = if ibc_path.exists() { + println!("[info ] Found IBC Go source at '{}'", ibc_path.display()); + + Repository::open(&ibc_path).unwrap_or_else(|e| { + println!("[error] Failed to open repository: {}", e); + process::exit(1) + }) + } else { + Repository::clone(COMPOSABLE_IBC_GO_URL, &ibc_path).unwrap_or_else(|e| { + println!("[error] Failed to clone the IBC Go repository: {}", e); + process::exit(1) + }) + }; + + println!("[info ] Cloned at '{}'", ibc_path.display()); + checkout_commit(&ibc_repo, ibc_go_commit).unwrap_or_else(|e| { + println!( + "[error] Failed to checkout IBC Go commit {}: {}", + ibc_go_commit, e + ); + process::exit(1) + }); + } + None => { + println!( + "[info ] No `-i`/`--ibc_go_commit` option passed. Skipping the Composable's IBC Go repo." ) } } diff --git a/proto-compiler/src/cmd/compile.rs b/proto-compiler/src/cmd/compile.rs index 4f73ecc469..90fd9b89b8 100644 --- a/proto-compiler/src/cmd/compile.rs +++ b/proto-compiler/src/cmd/compile.rs @@ -19,6 +19,10 @@ pub struct CompileCmd { /// path to the Cosmos IBC proto files ibc: Option, + #[argh(option, short = 'b')] + /// path to the Composable IBC proto files + beefy: Option, + #[argh(option, short = 'o')] /// path to output the generated Rust sources into out: PathBuf, @@ -36,6 +40,14 @@ impl CompileCmd { } Some(ibc_path) => { let tmp_ibc = TempDir::new("ibc-proto-ibc-go").unwrap(); + + match &self.beefy { + Some(ibc_path) => { + Self::compile_beefy_proto(ibc_path, tmp_ibc.as_ref()); + } + _ => {} + } + Self::compile_ibc_protos(ibc_path, tmp_ibc.as_ref()); // Merge the generated files into a single directory, taking care not to overwrite anything @@ -180,6 +192,63 @@ impl CompileCmd { } } + fn compile_beefy_proto(ibc_dir: &Path, out_dir: &Path) { + println!( + "[info ] Compiling Beefy client .proto files to Rust into '{}'...", + out_dir.display() + ); + + // Paths + let proto_paths = [ + // ibc-go proto files + format!("{}/proto/ibc", ibc_dir.display()), + ]; + + let proto_includes_paths = [ + format!("{}/proto", ibc_dir.display()), + format!("{}/third_party/proto", ibc_dir.display()), + ]; + + // List available proto files + let mut protos: Vec = vec![]; + for proto_path in &proto_paths { + println!("Looking for beefy proto files in {:?}", proto_path); + protos.append( + &mut WalkDir::new(proto_path) + .into_iter() + .filter_map(|e| e.ok()) + .filter(|e| { + e.file_type().is_file() + && e.file_name().to_str().unwrap().starts_with("beefy") + && e.path().extension().is_some() + && e.path().extension().unwrap() == "proto" + }) + .map(|e| e.into_path()) + .collect(), + ); + } + + println!("[info ] Compiling.."); + + // List available paths for dependencies + let includes: Vec = proto_includes_paths.iter().map(PathBuf::from).collect(); + + let compilation = tonic_build::configure() + .out_dir(out_dir) + .compile_well_known_types(true) + .compile(&protos, &includes); + + match compilation { + Ok(_) => { + println!("Successfully compiled beefy proto file"); + } + Err(e) => { + println!("Failed to compile:{:?}", e.to_string()); + process::exit(1); + } + } + } + fn compile_sdk_protos(sdk_dir: &Path, out_dir: &Path, ibc_dep: Option) { println!( "[info ] Compiling Cosmos-SDK .proto files to Rust into '{}'...", diff --git a/proto/src/COMPOSABLE_IBC_GO_COMMIT b/proto/src/COMPOSABLE_IBC_GO_COMMIT new file mode 100644 index 0000000000..563c1bef06 --- /dev/null +++ b/proto/src/COMPOSABLE_IBC_GO_COMMIT @@ -0,0 +1 @@ +e871fd96b657c1364c7d70294d30ba1c18c14f54 \ No newline at end of file diff --git a/proto/src/COSMOS_SDK_COMMIT b/proto/src/COSMOS_SDK_COMMIT index c11b353538..bdff2322fd 100644 --- a/proto/src/COSMOS_SDK_COMMIT +++ b/proto/src/COSMOS_SDK_COMMIT @@ -1 +1 @@ -2646b474c7beb0c93d4fafd395ef345f41afc251 +35ae2c4c72d4aeb33447d5a7af23ca47f786606e diff --git a/proto/src/IBC_GO_COMMIT b/proto/src/IBC_GO_COMMIT index da922be27c..8f825ea129 100644 --- a/proto/src/IBC_GO_COMMIT +++ b/proto/src/IBC_GO_COMMIT @@ -1 +1 @@ -c46fd062166bbc478f169c6c57108c5615383729 +2b7c969066fbcb18f90c7f5bd256439ca12535c7 diff --git a/proto/src/prost/cosmos.bank.v1beta1.rs b/proto/src/prost/cosmos.bank.v1beta1.rs index 8f8f7e7ff0..f099b257e9 100644 --- a/proto/src/prost/cosmos.bank.v1beta1.rs +++ b/proto/src/prost/cosmos.bank.v1beta1.rs @@ -450,6 +450,30 @@ pub struct QueryAllBalancesResponse { #[prost(message, optional, tag="2")] pub pagination: ::core::option::Option, } +/// QuerySpendableBalancesRequest defines the gRPC request structure for querying +/// an account's spendable balances. +#[derive(::serde::Serialize, ::serde::Deserialize)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct QuerySpendableBalancesRequest { + /// address is the address to query spendable balances for. + #[prost(string, tag="1")] + pub address: ::prost::alloc::string::String, + /// pagination defines an optional pagination for the request. + #[prost(message, optional, tag="2")] + pub pagination: ::core::option::Option, +} +/// QuerySpendableBalancesResponse defines the gRPC response structure for querying +/// an account's spendable balances. +#[derive(::serde::Serialize, ::serde::Deserialize)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct QuerySpendableBalancesResponse { + /// balances is the spendable balances of all the coins. + #[prost(message, repeated, tag="1")] + pub balances: ::prost::alloc::vec::Vec, + /// pagination defines the pagination in the response. + #[prost(message, optional, tag="2")] + pub pagination: ::core::option::Option, +} /// QueryTotalSupplyRequest is the request type for the Query/TotalSupply RPC /// method. #[derive(::serde::Serialize, ::serde::Deserialize)] @@ -646,6 +670,30 @@ pub mod query_client { ); self.inner.unary(request.into_request(), path, codec).await } + /// SpendableBalances queries the spenable balance of all coins for a single + /// account. + pub async fn spendable_balances( + &mut self, + request: impl tonic::IntoRequest, + ) -> Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/cosmos.bank.v1beta1.Query/SpendableBalances", + ); + self.inner.unary(request.into_request(), path, codec).await + } /// TotalSupply queries the total supply of all coins. pub async fn total_supply( &mut self, @@ -766,6 +814,15 @@ pub mod query_server { &self, request: tonic::Request, ) -> Result, tonic::Status>; + /// SpendableBalances queries the spenable balance of all coins for a single + /// account. + async fn spendable_balances( + &self, + request: tonic::Request, + ) -> Result< + tonic::Response, + tonic::Status, + >; /// TotalSupply queries the total supply of all coins. async fn total_supply( &self, @@ -918,6 +975,46 @@ pub mod query_server { }; Box::pin(fut) } + "/cosmos.bank.v1beta1.Query/SpendableBalances" => { + #[allow(non_camel_case_types)] + struct SpendableBalancesSvc(pub Arc); + impl< + T: Query, + > tonic::server::UnaryService + for SpendableBalancesSvc { + type Response = super::QuerySpendableBalancesResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = self.0.clone(); + let fut = async move { + (*inner).spendable_balances(request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = SpendableBalancesSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } "/cosmos.bank.v1beta1.Query/TotalSupply" => { #[allow(non_camel_case_types)] struct TotalSupplySvc(pub Arc); diff --git a/proto/src/prost/cosmos.base.snapshots.v1beta1.rs b/proto/src/prost/cosmos.base.snapshots.v1beta1.rs index 39aae96577..3550faf02e 100644 --- a/proto/src/prost/cosmos.base.snapshots.v1beta1.rs +++ b/proto/src/prost/cosmos.base.snapshots.v1beta1.rs @@ -19,3 +19,59 @@ pub struct Metadata { #[prost(bytes="vec", repeated, tag="1")] pub chunk_hashes: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, } +/// SnapshotItem is an item contained in a rootmulti.Store snapshot. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SnapshotItem { + /// item is the specific type of snapshot item. + #[prost(oneof="snapshot_item::Item", tags="1, 2, 3, 4")] + pub item: ::core::option::Option, +} +/// Nested message and enum types in `SnapshotItem`. +pub mod snapshot_item { + /// item is the specific type of snapshot item. + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Item { + #[prost(message, tag="1")] + Store(super::SnapshotStoreItem), + #[prost(message, tag="2")] + Iavl(super::SnapshotIavlItem), + #[prost(message, tag="3")] + Extension(super::SnapshotExtensionMeta), + #[prost(message, tag="4")] + ExtensionPayload(super::SnapshotExtensionPayload), + } +} +/// SnapshotStoreItem contains metadata about a snapshotted store. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SnapshotStoreItem { + #[prost(string, tag="1")] + pub name: ::prost::alloc::string::String, +} +/// SnapshotIAVLItem is an exported IAVL node. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SnapshotIavlItem { + #[prost(bytes="vec", tag="1")] + pub key: ::prost::alloc::vec::Vec, + #[prost(bytes="vec", tag="2")] + pub value: ::prost::alloc::vec::Vec, + /// version is block height + #[prost(int64, tag="3")] + pub version: i64, + /// height is depth of the tree. + #[prost(int32, tag="4")] + pub height: i32, +} +/// SnapshotExtensionMeta contains metadata about an external snapshotter. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SnapshotExtensionMeta { + #[prost(string, tag="1")] + pub name: ::prost::alloc::string::String, + #[prost(uint32, tag="2")] + pub format: u32, +} +/// SnapshotExtensionPayload contains payloads of an external snapshotter. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SnapshotExtensionPayload { + #[prost(bytes="vec", tag="1")] + pub payload: ::prost::alloc::vec::Vec, +} diff --git a/proto/src/prost/cosmos.base.store.v1beta1.rs b/proto/src/prost/cosmos.base.store.v1beta1.rs index 7bc9739142..6600689491 100644 --- a/proto/src/prost/cosmos.base.store.v1beta1.rs +++ b/proto/src/prost/cosmos.base.store.v1beta1.rs @@ -25,42 +25,6 @@ pub struct CommitId { #[prost(bytes="vec", tag="2")] pub hash: ::prost::alloc::vec::Vec, } -/// SnapshotItem is an item contained in a rootmulti.Store snapshot. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SnapshotItem { - /// item is the specific type of snapshot item. - #[prost(oneof="snapshot_item::Item", tags="1, 2")] - pub item: ::core::option::Option, -} -/// Nested message and enum types in `SnapshotItem`. -pub mod snapshot_item { - /// item is the specific type of snapshot item. - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum Item { - #[prost(message, tag="1")] - Store(super::SnapshotStoreItem), - #[prost(message, tag="2")] - Iavl(super::SnapshotIavlItem), - } -} -/// SnapshotStoreItem contains metadata about a snapshotted store. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SnapshotStoreItem { - #[prost(string, tag="1")] - pub name: ::prost::alloc::string::String, -} -/// SnapshotIAVLItem is an exported IAVL node. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SnapshotIavlItem { - #[prost(bytes="vec", tag="1")] - pub key: ::prost::alloc::vec::Vec, - #[prost(bytes="vec", tag="2")] - pub value: ::prost::alloc::vec::Vec, - #[prost(int64, tag="3")] - pub version: i64, - #[prost(int32, tag="4")] - pub height: i32, -} /// StoreKVPair is a KVStore KVPair used for listening to state changes (Sets and Deletes) /// It optionally includes the StoreKey for the originating KVStore and a Boolean flag to distinguish between Sets and /// Deletes diff --git a/proto/src/prost/cosmos.staking.v1beta1.rs b/proto/src/prost/cosmos.staking.v1beta1.rs index b73ed6c9a3..1f26860897 100644 --- a/proto/src/prost/cosmos.staking.v1beta1.rs +++ b/proto/src/prost/cosmos.staking.v1beta1.rs @@ -2276,7 +2276,7 @@ pub struct StakeAuthorization { pub mod stake_authorization { /// Validators defines list of validator addresses. #[derive(Clone, PartialEq, ::prost::Message)] - pub struct ValidatorsVec { + pub struct ValidatorVec { #[prost(string, repeated, tag="1")] pub address: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, } @@ -2286,10 +2286,10 @@ pub mod stake_authorization { /// allow_list specifies list of validator addresses to whom grantee can delegate tokens on behalf of granter's /// account. #[prost(message, tag="2")] - AllowList(ValidatorsVec), + AllowList(ValidatorVec), /// deny_list specifies list of validator addresses to whom grantee can not delegate tokens. #[prost(message, tag="3")] - DenyList(ValidatorsVec), + DenyList(ValidatorVec), } } /// AuthorizationType defines the type of staking module authorization type diff --git a/proto/src/prost/cosmos.tx.signing.v1beta1.rs b/proto/src/prost/cosmos.tx.signing.v1beta1.rs index afa7835191..3dae83ec6f 100644 --- a/proto/src/prost/cosmos.tx.signing.v1beta1.rs +++ b/proto/src/prost/cosmos.tx.signing.v1beta1.rs @@ -82,4 +82,15 @@ pub enum SignMode { /// SIGN_MODE_LEGACY_AMINO_JSON is a backwards compatibility mode which uses /// Amino JSON and will be removed in the future LegacyAminoJson = 127, + /// SIGN_MODE_EIP_191 specifies the sign mode for EIP 191 signing on the Cosmos + /// SDK. Ref: + /// + /// Currently, SIGN_MODE_EIP_191 is registered as a SignMode enum variant, + /// but is not implemented on the SDK by default. To enable EIP-191, you need + /// to pass a custom `TxConfig` that has an implementation of + /// `SignModeHandler` for EIP-191. The SDK may decide to fully support + /// EIP-191 in the future. + /// + /// Since: cosmos-sdk 0.45.2 + Eip191 = 191, } diff --git a/proto/src/prost/cosmos.tx.v1beta1.rs b/proto/src/prost/cosmos.tx.v1beta1.rs index db864f78f8..7f2c669007 100644 --- a/proto/src/prost/cosmos.tx.v1beta1.rs +++ b/proto/src/prost/cosmos.tx.v1beta1.rs @@ -195,7 +195,7 @@ pub struct GetTxsEventRequest { /// events is the list of transaction event type. #[prost(string, repeated, tag="1")] pub events: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - /// pagination defines an pagination for the request. + /// pagination defines a pagination for the request. #[prost(message, optional, tag="2")] pub pagination: ::core::option::Option, #[prost(enumeration="OrderBy", tag="3")] @@ -211,7 +211,7 @@ pub struct GetTxsEventResponse { /// tx_responses is the list of queried TxResponses. #[prost(message, repeated, tag="2")] pub tx_responses: ::prost::alloc::vec::Vec, - /// pagination defines an pagination for the response. + /// pagination defines a pagination for the response. #[prost(message, optional, tag="3")] pub pagination: ::core::option::Option, } @@ -277,6 +277,35 @@ pub struct GetTxResponse { #[prost(message, optional, tag="2")] pub tx_response: ::core::option::Option, } +/// GetBlockWithTxsRequest is the request type for the Service.GetBlockWithTxs +/// RPC method. +/// +/// Since: cosmos-sdk 0.45.2 +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetBlockWithTxsRequest { + /// height is the height of the block to query. + #[prost(int64, tag="1")] + pub height: i64, + /// pagination defines a pagination for the request. + #[prost(message, optional, tag="2")] + pub pagination: ::core::option::Option, +} +/// GetBlockWithTxsResponse is the response type for the Service.GetBlockWithTxs method. +/// +/// Since: cosmos-sdk 0.45.2 +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetBlockWithTxsResponse { + /// txs are the transactions in the block. + #[prost(message, repeated, tag="1")] + pub txs: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag="2")] + pub block_id: ::core::option::Option<::tendermint_proto::types::BlockId>, + #[prost(message, optional, tag="3")] + pub block: ::core::option::Option<::tendermint_proto::types::Block>, + /// pagination defines a pagination for the response. + #[prost(message, optional, tag="4")] + pub pagination: ::core::option::Option, +} /// OrderBy defines the sorting order #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] @@ -450,6 +479,28 @@ pub mod service_client { ); self.inner.unary(request.into_request(), path, codec).await } + /// GetBlockWithTxs fetches a block with decoded txs. + /// + /// Since: cosmos-sdk 0.45.2 + pub async fn get_block_with_txs( + &mut self, + request: impl tonic::IntoRequest, + ) -> Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/cosmos.tx.v1beta1.Service/GetBlockWithTxs", + ); + self.inner.unary(request.into_request(), path, codec).await + } } } /// Generated server implementations. @@ -480,6 +531,13 @@ pub mod service_server { &self, request: tonic::Request, ) -> Result, tonic::Status>; + /// GetBlockWithTxs fetches a block with decoded txs. + /// + /// Since: cosmos-sdk 0.45.2 + async fn get_block_with_txs( + &self, + request: tonic::Request, + ) -> Result, tonic::Status>; } /// Service defines a gRPC service for interacting with transactions. #[derive(Debug)] @@ -681,6 +739,46 @@ pub mod service_server { }; Box::pin(fut) } + "/cosmos.tx.v1beta1.Service/GetBlockWithTxs" => { + #[allow(non_camel_case_types)] + struct GetBlockWithTxsSvc(pub Arc); + impl< + T: Service, + > tonic::server::UnaryService + for GetBlockWithTxsSvc { + type Response = super::GetBlockWithTxsResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = self.0.clone(); + let fut = async move { + (*inner).get_block_with_txs(request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = GetBlockWithTxsSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } _ => { Box::pin(async move { Ok( diff --git a/proto/src/prost/ibc.core.channel.v1.rs b/proto/src/prost/ibc.core.channel.v1.rs index 0d47ed7846..a1f1543d7f 100644 --- a/proto/src/prost/ibc.core.channel.v1.rs +++ b/proto/src/prost/ibc.core.channel.v1.rs @@ -113,6 +113,22 @@ pub struct PacketState { #[prost(bytes="vec", tag="4")] pub data: ::prost::alloc::vec::Vec, } +/// PacketId is an identifer for a unique Packet +/// Source chains refer to packets by source port/channel +/// Destination chains refer to packets by destination port/channel +#[derive(::serde::Serialize, ::serde::Deserialize)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct PacketId { + /// channel port identifier + #[prost(string, tag="1")] + pub port_id: ::prost::alloc::string::String, + /// channel unique identifier + #[prost(string, tag="2")] + pub channel_id: ::prost::alloc::string::String, + /// packet sequence + #[prost(uint64, tag="3")] + pub sequence: u64, +} /// Acknowledgement is the recommended acknowledgement format to be used by /// app-specific protocols. /// NOTE: The field numbers 21 and 22 were explicitly chosen to avoid accidental @@ -223,6 +239,8 @@ pub struct MsgChannelOpenInit { pub struct MsgChannelOpenInitResponse { #[prost(string, tag="1")] pub channel_id: ::prost::alloc::string::String, + #[prost(string, tag="2")] + pub version: ::prost::alloc::string::String, } /// MsgChannelOpenInit defines a msg sent by a Relayer to try to open a channel /// on Chain B. The version field within the Channel field has been deprecated. Its @@ -232,8 +250,8 @@ pub struct MsgChannelOpenInitResponse { pub struct MsgChannelOpenTry { #[prost(string, tag="1")] pub port_id: ::prost::alloc::string::String, - /// in the case of crossing hello's, when both chains call OpenInit, we need - /// the channel identifier of the previous channel in state INIT + /// Deprecated: this field is unused. Crossing hello's are no longer supported in core IBC. + #[deprecated] #[prost(string, tag="2")] pub previous_channel_id: ::prost::alloc::string::String, /// NOTE: the version field within the channel has been deprecated. Its value will be ignored by core IBC. @@ -252,6 +270,8 @@ pub struct MsgChannelOpenTry { #[derive(::serde::Serialize, ::serde::Deserialize)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct MsgChannelOpenTryResponse { + #[prost(string, tag="1")] + pub version: ::prost::alloc::string::String, } /// MsgChannelOpenAck defines a msg sent by a Relayer to Chain A to acknowledge /// the change of channel state to TRYOPEN on Chain B. diff --git a/proto/src/prost/ibc.core.client.v1.rs b/proto/src/prost/ibc.core.client.v1.rs index 1e323c54a0..b501dfb31c 100644 --- a/proto/src/prost/ibc.core.client.v1.rs +++ b/proto/src/prost/ibc.core.client.v1.rs @@ -771,6 +771,30 @@ pub struct QueryConsensusStatesResponse { #[prost(message, optional, tag="2")] pub pagination: ::core::option::Option, } +/// QueryConsensusStateHeightsRequest is the request type for Query/ConsensusStateHeights +/// RPC method. +#[derive(::serde::Serialize, ::serde::Deserialize)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct QueryConsensusStateHeightsRequest { + /// client identifier + #[prost(string, tag="1")] + pub client_id: ::prost::alloc::string::String, + /// pagination request + #[prost(message, optional, tag="2")] + pub pagination: ::core::option::Option, +} +/// QueryConsensusStateHeightsResponse is the response type for the +/// Query/ConsensusStateHeights RPC method +#[derive(::serde::Serialize, ::serde::Deserialize)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct QueryConsensusStateHeightsResponse { + /// consensus state heights + #[prost(message, repeated, tag="1")] + pub consensus_state_heights: ::prost::alloc::vec::Vec, + /// pagination response + #[prost(message, optional, tag="2")] + pub pagination: ::core::option::Option, +} /// QueryClientStatusRequest is the request type for the Query/ClientStatus RPC /// method #[derive(::serde::Serialize, ::serde::Deserialize)] @@ -984,6 +1008,29 @@ pub mod query_client { ); self.inner.unary(request.into_request(), path, codec).await } + /// ConsensusStateHeights queries the height of every consensus states associated with a given client. + pub async fn consensus_state_heights( + &mut self, + request: impl tonic::IntoRequest, + ) -> Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/ibc.core.client.v1.Query/ConsensusStateHeights", + ); + self.inner.unary(request.into_request(), path, codec).await + } /// Status queries the status of an IBC client. pub async fn client_status( &mut self, @@ -1102,6 +1149,14 @@ pub mod query_server { &self, request: tonic::Request, ) -> Result, tonic::Status>; + /// ConsensusStateHeights queries the height of every consensus states associated with a given client. + async fn consensus_state_heights( + &self, + request: tonic::Request, + ) -> Result< + tonic::Response, + tonic::Status, + >; /// Status queries the status of an IBC client. async fn client_status( &self, @@ -1337,6 +1392,49 @@ pub mod query_server { }; Box::pin(fut) } + "/ibc.core.client.v1.Query/ConsensusStateHeights" => { + #[allow(non_camel_case_types)] + struct ConsensusStateHeightsSvc(pub Arc); + impl< + T: Query, + > tonic::server::UnaryService< + super::QueryConsensusStateHeightsRequest, + > for ConsensusStateHeightsSvc { + type Response = super::QueryConsensusStateHeightsResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::QueryConsensusStateHeightsRequest, + >, + ) -> Self::Future { + let inner = self.0.clone(); + let fut = async move { + (*inner).consensus_state_heights(request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = ConsensusStateHeightsSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } "/ibc.core.client.v1.Query/ClientStatus" => { #[allow(non_camel_case_types)] struct ClientStatusSvc(pub Arc); diff --git a/proto/src/prost/ibc.core.connection.v1.rs b/proto/src/prost/ibc.core.connection.v1.rs index 2501975ab4..bba1a9602c 100644 --- a/proto/src/prost/ibc.core.connection.v1.rs +++ b/proto/src/prost/ibc.core.connection.v1.rs @@ -171,8 +171,8 @@ pub struct MsgConnectionOpenInitResponse { pub struct MsgConnectionOpenTry { #[prost(string, tag="1")] pub client_id: ::prost::alloc::string::String, - /// in the case of crossing hello's, when both chains call OpenInit, we need - /// the connection identifier of the previous connection in state INIT + /// Deprecated: this field is unused. Crossing hellos are no longer supported in core IBC. + #[deprecated] #[prost(string, tag="2")] pub previous_connection_id: ::prost::alloc::string::String, #[prost(message, optional, tag="3")] diff --git a/proto/src/prost/ibc.lightclients.beefy.v1.rs b/proto/src/prost/ibc.lightclients.beefy.v1.rs index 7824ca14b2..aa546370e7 100644 --- a/proto/src/prost/ibc.lightclients.beefy.v1.rs +++ b/proto/src/prost/ibc.lightclients.beefy.v1.rs @@ -76,7 +76,7 @@ pub struct SignedCommitment { } /// data needed to update the client #[derive(Clone, PartialEq, ::prost::Message)] -pub struct MmrUpdateProof { +pub struct ClientStateUpdateProof { /// the new mmr leaf SCALE encoded. #[prost(message, optional, tag="1")] pub mmr_leaf: ::core::option::Option, @@ -113,9 +113,19 @@ pub struct Misbehaviour { #[prost(message, optional, tag="3")] pub header_2: ::core::option::Option
, } -/// Header contains the neccessary data to proove finality about IBC commitments +/// Header contains the neccessary data to prove finality about IBC commitments #[derive(Clone, PartialEq, ::prost::Message)] pub struct Header { + /// optional payload to update ConsensusState + #[prost(message, optional, tag="1")] + pub consensus_state: ::core::option::Option, + /// optional payload to update the ClientState. + #[prost(message, optional, tag="2")] + pub client_state: ::core::option::Option, +} +//// Parachain headers and their mmr proofs. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ConsensusStateUpdateProof { /// parachain headers needed for proofs and ConsensusState #[prost(message, repeated, tag="1")] pub parachain_headers: ::prost::alloc::vec::Vec, @@ -125,9 +135,6 @@ pub struct Header { /// size of the mmr for the given proof #[prost(uint64, tag="3")] pub mmr_size: u64, - /// optional payload to update the mmr root hash. - #[prost(message, optional, tag="4")] - pub mmr_update_proof: ::core::option::Option, } /// data needed to prove parachain header inclusion in mmr. #[derive(Clone, PartialEq, ::prost::Message)] @@ -135,7 +142,7 @@ pub struct ParachainHeader { /// scale-encoded parachain header bytes #[prost(bytes="vec", tag="1")] pub parachain_header: ::prost::alloc::vec::Vec, - /// see beefy-go spec + /// see beefy spec #[prost(message, optional, tag="2")] pub mmr_leaf_partial: ::core::option::Option, /// proofs for our header in the parachain heads root diff --git a/proto/src/prost/ibc.lightclients.tendermint.v1.rs b/proto/src/prost/ibc.lightclients.tendermint.v1.rs index 23bd610879..3ce35376c8 100644 --- a/proto/src/prost/ibc.lightclients.tendermint.v1.rs +++ b/proto/src/prost/ibc.lightclients.tendermint.v1.rs @@ -34,12 +34,12 @@ pub struct ClientState { /// "upgradedIBCState"}` #[prost(string, repeated, tag="9")] pub upgrade_path: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - /// This flag, when set to true, will allow governance to recover a client - /// which has expired + /// allow_update_after_expiry is deprecated + #[deprecated] #[prost(bool, tag="10")] pub allow_update_after_expiry: bool, - /// This flag, when set to true, will allow governance to unfreeze a client - /// whose chain has experienced a misbehaviour event + /// allow_update_after_misbehaviour is deprecated + #[deprecated] #[prost(bool, tag="11")] pub allow_update_after_misbehaviour: bool, } diff --git a/proto/src/prost/ics23.rs b/proto/src/prost/ics23.rs index c5780d970f..2c25a3c12a 100644 --- a/proto/src/prost/ics23.rs +++ b/proto/src/prost/ics23.rs @@ -292,7 +292,6 @@ pub enum HashOp { Ripemd160 = 4, /// ripemd160(sha256(x)) Bitcoin = 5, - Sha512256 = 6, } ///* ///LengthOp defines how to process the key and value of the LeafOp diff --git a/scripts/sync-protobuf.sh b/scripts/sync-protobuf.sh index 25c551522d..74a056e2cb 100755 --- a/scripts/sync-protobuf.sh +++ b/scripts/sync-protobuf.sh @@ -24,13 +24,16 @@ set -eou pipefail CACHE_PATH="${XDG_CACHE_HOME:-$HOME/.cache}" COSMOS_SDK_GIT="${COSMOS_SDK_GIT:-$CACHE_PATH/cosmos/cosmos-sdk.git}" IBC_GO_GIT="${IBC_GO_GIT:-$CACHE_PATH/ibc-go.git}" +COMPOSABLE_IBC_GO_GIT="${COMPOSABLE_IBC_GO_GIT:-$CACHE_PATH/composable/ibc-go.git}" COSMOS_SDK_COMMIT="$(cat proto/src/COSMOS_SDK_COMMIT)" IBC_GO_COMMIT="$(cat proto/src/IBC_GO_COMMIT)" +COMPOSABLE_IBC_GO_COMMIT="$(cat proto/src/COMPOSABLE_IBC_GO_COMMIT)" echo "COSMOS_SDK_COMMIT: $COSMOS_SDK_COMMIT" echo "IBC_GO_COMMIT: $IBC_GO_COMMIT" +echo "COMPOSABLE_IBC_GO_COMMIT: $COMPOSABLE_IBC_GO_COMMIT" # Use either --sdk-commit flag for commit ID, # or --sdk-tag for git tag. Because we can't modify @@ -58,11 +61,19 @@ fi if [[ ! -e "$IBC_GO_GIT" ]] then echo "Cloning ibc-go source code to as bare git repository to $IBC_GO_GIT" - git clone --mirror https://github.com/ComposableFi/ibc-go.git "$IBC_GO_GIT" + git clone --mirror https://github.com/cosmos/ibc-go.git "$IBC_GO_GIT" else echo "Using existing ibc-go bare git repository at $IBC_GO_GIT" fi +if [[ ! -e "$COMPOSABLE_IBC_GO_GIT" ]] +then + echo "Cloning composable ibc-go source code to as bare git repository to $COMPOSABLE_IBC_GO_GIT" + git clone --mirror https://github.com/ComposableFi/ibc-go.git "$COMPOSABLE_IBC_GO_GIT" +else + echo "Using existing composable ibc-go bare git repository at $COMPOSABLE_IBC_GO_GIT" +fi + # Update the repositories using git fetch. This is so that # we keep local copies of the repositories up to sync first. pushd "$COSMOS_SDK_GIT" @@ -73,6 +84,10 @@ pushd "$IBC_GO_GIT" git fetch popd +pushd "$COMPOSABLE_IBC_GO_GIT" +git fetch +popd + # Create a new temporary directory to check out the # actual source files from the bare git repositories. # This is so that we do not accidentally use an unclean @@ -97,6 +112,17 @@ git checkout "$IBC_GO_COMMIT" git checkout -b "$IBC_GO_COMMIT" popd +# Make the composable temp repo first +rm -rf /tmp/composable +mktemp -d /tmp/composable +COMPOSABLE_IBC_GO_DIR=$(mktemp -d /tmp/composable/ibc-go-XXXXXXXX) + +pushd "$COMPOSABLE_IBC_GO_DIR" +git clone "$COMPOSABLE_IBC_GO_GIT" . +git checkout "$COMPOSABLE_IBC_GO_COMMIT" +git checkout -b "$COMPOSABLE_IBC_GO_COMMIT" +popd + # Remove the existing generated protobuf files # so that the newly generated code does not # contain removed files. @@ -113,9 +139,10 @@ cargo build # and once for no-std version with --build-tonic set to false cargo run -- compile \ - --sdk "$COSMOS_SDK_DIR" --ibc "$IBC_GO_DIR" --out ../proto/src/prost + --sdk "$COSMOS_SDK_DIR" --ibc "$IBC_GO_DIR" --beefy "$COMPOSABLE_IBC_GO_DIR" --out ../proto/src/prost # Remove the temporary checkouts of the repositories rm -rf "$COSMOS_SDK_DIR" rm -rf "$IBC_GO_DIR" +rm -rf "$COMPOSABLE_IBC_GO_DIR" From 9925f24c5061832ec5d5a0f4b16a1802dbb75128 Mon Sep 17 00:00:00 2001 From: Web3 Smith <31099392+Wizdave97@users.noreply.github.com> Date: Mon, 5 Sep 2022 20:44:25 +0100 Subject: [PATCH 86/96] Fix timeout processing (#59) --- modules/src/clients/ics11_beefy/header.rs | 116 +++++++++--------- modules/src/core/ics04_channel/error.rs | 13 ++ .../handler/chan_close_confirm.rs | 2 +- .../ics04_channel/handler/chan_open_ack.rs | 2 +- .../handler/chan_open_confirm.rs | 2 +- .../ics04_channel/handler/chan_open_try.rs | 2 +- .../src/core/ics04_channel/handler/timeout.rs | 18 +-- .../ics04_channel/handler/timeout_on_close.rs | 5 +- .../src/core/ics04_channel/handler/verify.rs | 7 +- .../ics04_channel/msgs/timeout_on_close.rs | 7 +- modules/src/core/ics04_channel/packet.rs | 28 ++++- modules/src/timestamp.rs | 6 +- 12 files changed, 127 insertions(+), 81 deletions(-) diff --git a/modules/src/clients/ics11_beefy/header.rs b/modules/src/clients/ics11_beefy/header.rs index 56ad47a947..9e8280fb60 100644 --- a/modules/src/clients/ics11_beefy/header.rs +++ b/modules/src/clients/ics11_beefy/header.rs @@ -82,68 +82,74 @@ impl TryFrom for BeefyHeader { type Error = Error; fn try_from(raw_header: RawBeefyHeader) -> Result { - let headers_with_proof = raw_header.consensus_state.map(|consensus_update| { - let parachain_headers = consensus_update - .parachain_headers - .into_iter() - .map(|raw_para_header| { - let mmr_partial_leaf = raw_para_header - .mmr_leaf_partial - .ok_or_else(Error::invalid_raw_header)?; - let parent_hash = - H256::decode(&mut mmr_partial_leaf.parent_hash.as_slice()).unwrap(); - let beefy_next_authority_set = - if let Some(next_set) = mmr_partial_leaf.beefy_next_authority_set { - BeefyNextAuthoritySet { - id: next_set.id, - len: next_set.len, - root: H256::decode(&mut next_set.authority_root.as_slice()) - .map_err(|e| Error::invalid_mmr_update(e.to_string()))?, - } - } else { - Default::default() - }; - Ok(ParachainHeader { - parachain_header: decode_parachain_header(raw_para_header.parachain_header) + let headers_with_proof = raw_header + .consensus_state + .map(|consensus_update| { + let parachain_headers = consensus_update + .parachain_headers + .into_iter() + .map(|raw_para_header| { + let mmr_partial_leaf = raw_para_header + .mmr_leaf_partial + .ok_or_else(Error::invalid_raw_header)?; + let parent_hash = + H256::decode(&mut mmr_partial_leaf.parent_hash.as_slice()).unwrap(); + let beefy_next_authority_set = + if let Some(next_set) = mmr_partial_leaf.beefy_next_authority_set { + BeefyNextAuthoritySet { + id: next_set.id, + len: next_set.len, + root: H256::decode(&mut next_set.authority_root.as_slice()) + .map_err(|e| Error::invalid_mmr_update(e.to_string()))?, + } + } else { + Default::default() + }; + Ok(ParachainHeader { + parachain_header: decode_parachain_header( + raw_para_header.parachain_header, + ) .map_err(|_| Error::invalid_raw_header())?, - partial_mmr_leaf: PartialMmrLeaf { - version: { - let (major, minor) = split_leaf_version( - mmr_partial_leaf.version.saturated_into::(), - ); - MmrLeafVersion::new(major, minor) + partial_mmr_leaf: PartialMmrLeaf { + version: { + let (major, minor) = split_leaf_version( + mmr_partial_leaf.version.saturated_into::(), + ); + MmrLeafVersion::new(major, minor) + }, + parent_number_and_hash: ( + mmr_partial_leaf.parent_number, + parent_hash, + ), + beefy_next_authority_set, }, - parent_number_and_hash: (mmr_partial_leaf.parent_number, parent_hash), - beefy_next_authority_set, - }, - parachain_heads_proof: raw_para_header - .parachain_heads_proof - .into_iter() - .map(|item| { - let mut dest = [0u8; 32]; - if item.len() != 32 { - return Err(Error::invalid_raw_header()); - } - dest.copy_from_slice(&*item); - Ok(dest) - }) - .collect::, Error>>()?, - heads_leaf_index: raw_para_header.heads_leaf_index, - heads_total_count: raw_para_header.heads_total_count, - extrinsic_proof: raw_para_header.extrinsic_proof, - timestamp_extrinsic: raw_para_header.timestamp_extrinsic, + parachain_heads_proof: raw_para_header + .parachain_heads_proof + .into_iter() + .map(|item| { + let mut dest = [0u8; 32]; + if item.len() != 32 { + return Err(Error::invalid_raw_header()); + } + dest.copy_from_slice(&*item); + Ok(dest) + }) + .collect::, Error>>()?, + heads_leaf_index: raw_para_header.heads_leaf_index, + heads_total_count: raw_para_header.heads_total_count, + extrinsic_proof: raw_para_header.extrinsic_proof, + timestamp_extrinsic: raw_para_header.timestamp_extrinsic, + }) }) - }) - .collect::, Error>>() - .ok(); - parachain_headers.map(|parachain_headers| { - ParachainHeadersWithProof { + .collect::, Error>>() + .ok(); + parachain_headers.map(|parachain_headers| ParachainHeadersWithProof { headers: parachain_headers, mmr_proofs: consensus_update.mmr_proofs, mmr_size: consensus_update.mmr_size, - } + }) }) - }).flatten(); + .flatten(); let mmr_update_proof = if let Some(mmr_update) = raw_header.client_state { let commitment = mmr_update diff --git a/modules/src/core/ics04_channel/error.rs b/modules/src/core/ics04_channel/error.rs index b4473966e3..9a73862d1b 100644 --- a/modules/src/core/ics04_channel/error.rs +++ b/modules/src/core/ics04_channel/error.rs @@ -72,6 +72,9 @@ define_error! { MissingHeight | _ | { "invalid proof: missing height" }, + MissingChannelProof + | _ | { "invalid proof: missing channel proof" }, + MissingNextRecvSeq { port_channel_id: (PortId, ChannelId) } | e | { @@ -212,6 +215,16 @@ define_error! { "Receiving chain block height {0} >= packet timeout height {1}", e.chain_height, e.timeout_height) }, + PacketTimeoutNotReached + { + timeout_height: Height, + chain_height: Height, + timeout_timestamp: Timestamp, + chain_timestamp: Timestamp, + } + | e | { format_args!( + "Packet timeout not satisified for either packet height or timestamp, Packet timeout height {0}, chain height {1}, Packet timeout timestamp {2}, chain timestamp {3}", + e.timeout_height, e.chain_height, e.timeout_timestamp, e.chain_timestamp) }, PacketTimeoutHeightNotReached { diff --git a/modules/src/core/ics04_channel/handler/chan_close_confirm.rs b/modules/src/core/ics04_channel/handler/chan_close_confirm.rs index 4c54788afe..0a54903a29 100644 --- a/modules/src/core/ics04_channel/handler/chan_close_confirm.rs +++ b/modules/src/core/ics04_channel/handler/chan_close_confirm.rs @@ -71,7 +71,7 @@ pub(crate) fn process( &channel_end, &conn, &expected_channel_end, - &msg.proofs, + &msg.proofs.object_proof(), )?; output.log("success: channel close confirm "); diff --git a/modules/src/core/ics04_channel/handler/chan_open_ack.rs b/modules/src/core/ics04_channel/handler/chan_open_ack.rs index 34f50d9f40..041d1f00c3 100644 --- a/modules/src/core/ics04_channel/handler/chan_open_ack.rs +++ b/modules/src/core/ics04_channel/handler/chan_open_ack.rs @@ -78,7 +78,7 @@ pub(crate) fn process( &channel_end, &conn, &expected_channel_end, - &msg.proofs, + &msg.proofs.object_proof(), )?; output.log("success: channel open ack "); diff --git a/modules/src/core/ics04_channel/handler/chan_open_confirm.rs b/modules/src/core/ics04_channel/handler/chan_open_confirm.rs index e2c9c58873..1e362caef5 100644 --- a/modules/src/core/ics04_channel/handler/chan_open_confirm.rs +++ b/modules/src/core/ics04_channel/handler/chan_open_confirm.rs @@ -73,7 +73,7 @@ pub(crate) fn process( &channel_end, &conn, &expected_channel_end, - &msg.proofs, + &msg.proofs.object_proof(), ) .map_err(Error::chan_open_confirm_proof_verification)?; diff --git a/modules/src/core/ics04_channel/handler/chan_open_try.rs b/modules/src/core/ics04_channel/handler/chan_open_try.rs index 71f2351d8b..081b1ec0f9 100644 --- a/modules/src/core/ics04_channel/handler/chan_open_try.rs +++ b/modules/src/core/ics04_channel/handler/chan_open_try.rs @@ -97,7 +97,7 @@ pub(crate) fn process( &new_channel_end, &conn, &expected_channel_end, - &msg.proofs, + &msg.proofs.object_proof(), )?; output.log("success: channel open try "); diff --git a/modules/src/core/ics04_channel/handler/timeout.rs b/modules/src/core/ics04_channel/handler/timeout.rs index 5f2e7b4480..ea771b0902 100644 --- a/modules/src/core/ics04_channel/handler/timeout.rs +++ b/modules/src/core/ics04_channel/handler/timeout.rs @@ -13,7 +13,6 @@ use crate::core::ics26_routing::context::ReaderContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; -use crate::timestamp::Expiry; use core::fmt::Debug; #[derive(Clone, Debug)] @@ -59,14 +58,6 @@ pub fn process( // check that timeout height or timeout timestamp has passed on the other end let proof_height = msg.proofs.height(); - let packet_height = packet.timeout_height; - - if (!packet.timeout_height.is_zero()) && packet_height > proof_height { - return Err(Error::packet_timeout_height_not_reached( - packet.timeout_height, - proof_height, - )); - } let consensus_state = ctx .consensus_state(&client_id, proof_height) @@ -74,10 +65,11 @@ pub fn process( let proof_timestamp = consensus_state.timestamp(); - let packet_timestamp = packet.timeout_timestamp; - if let Expiry::Expired = packet_timestamp.check_expiry(&proof_timestamp) { - return Err(Error::packet_timeout_timestamp_not_reached( - packet_timestamp, + if !packet.timed_out(&proof_timestamp, proof_height) { + return Err(Error::packet_timeout_not_reached( + packet.timeout_height, + proof_height, + packet.timeout_timestamp, proof_timestamp, )); } diff --git a/modules/src/core/ics04_channel/handler/timeout_on_close.rs b/modules/src/core/ics04_channel/handler/timeout_on_close.rs index 3b194a395f..6f46deb180 100644 --- a/modules/src/core/ics04_channel/handler/timeout_on_close.rs +++ b/modules/src/core/ics04_channel/handler/timeout_on_close.rs @@ -81,7 +81,10 @@ pub fn process( &source_channel_end, &connection_end, &expected_channel_end, - &msg.proofs, + msg.proofs + .other_proof() + .as_ref() + .ok_or_else(|| Error::missing_channel_proof())?, )?; let result = if source_channel_end.order_matches(&Order::Ordered) { diff --git a/modules/src/core/ics04_channel/handler/verify.rs b/modules/src/core/ics04_channel/handler/verify.rs index 7e2f2e9ea9..c9be117dce 100644 --- a/modules/src/core/ics04_channel/handler/verify.rs +++ b/modules/src/core/ics04_channel/handler/verify.rs @@ -7,6 +7,7 @@ use crate::core::ics04_channel::channel::ChannelEnd; use crate::core::ics04_channel::error::Error; use crate::core::ics04_channel::msgs::acknowledgement::Acknowledgement; use crate::core::ics04_channel::packet::{Packet, Sequence}; +use crate::core::ics23_commitment::commitment::CommitmentProofBytes; use crate::core::ics26_routing::context::ReaderContext; use crate::prelude::*; use crate::proofs::Proofs; @@ -19,7 +20,7 @@ pub fn verify_channel_proofs( channel_end: &ChannelEnd, connection_end: &ConnectionEnd, expected_chan: &ChannelEnd, - proofs: &Proofs, + proof: &CommitmentProofBytes, ) -> Result<(), Error> { // This is the client which will perform proof verification. let client_id = connection_end.client_id().clone(); @@ -32,7 +33,7 @@ pub fn verify_channel_proofs( } let consensus_state = ctx - .consensus_state(&client_id, proofs.height()) + .consensus_state(&client_id, height) .map_err(|_| Error::error_invalid_consensus_state())?; let client_def = AnyClient::::from_client_type(client_state.client_type()); @@ -46,7 +47,7 @@ pub fn verify_channel_proofs( &client_state, height, connection_end.counterparty().prefix(), - proofs.object_proof(), + &proof, consensus_state.root(), channel_end.counterparty().port_id(), channel_end.counterparty().channel_id().unwrap(), diff --git a/modules/src/core/ics04_channel/msgs/timeout_on_close.rs b/modules/src/core/ics04_channel/msgs/timeout_on_close.rs index 05fda53a54..f13a0104db 100644 --- a/modules/src/core/ics04_channel/msgs/timeout_on_close.rs +++ b/modules/src/core/ics04_channel/msgs/timeout_on_close.rs @@ -64,7 +64,12 @@ impl TryFrom for MsgTimeoutOnClose { .map_err(Error::invalid_proof)?, None, None, - None, + Some( + raw_msg + .proof_close + .try_into() + .map_err(Error::invalid_proof)?, + ), raw_msg .proof_height .ok_or_else(Error::missing_height)? diff --git a/modules/src/core/ics04_channel/packet.rs b/modules/src/core/ics04_channel/packet.rs index b8745e8f8e..6e9b985235 100644 --- a/modules/src/core/ics04_channel/packet.rs +++ b/modules/src/core/ics04_channel/packet.rs @@ -153,6 +153,12 @@ impl core::fmt::Debug for Packet { } } +pub enum TimeoutVariant { + Height, + Timestamp, + Both, +} + impl Packet { /// Checks whether a packet from a /// [`SendPacket`](crate::core::ics04_channel::events::SendPacket) @@ -168,10 +174,30 @@ impl Packet { /// instead of the common-case where it results in /// [`MsgRecvPacket`](crate::core::ics04_channel::msgs::recv_packet::MsgRecvPacket). pub fn timed_out(&self, dst_chain_ts: &Timestamp, dst_chain_height: Height) -> bool { - (self.timeout_height != Height::zero() && self.timeout_height < dst_chain_height) + (self.timeout_height != Height::zero() && self.timeout_height <= dst_chain_height) || (self.timeout_timestamp != Timestamp::none() && dst_chain_ts.check_expiry(&self.timeout_timestamp) == Expired) } + + pub fn timeout_variant( + &self, + dst_chain_ts: &Timestamp, + dst_chain_height: Height, + ) -> Option { + let height_timeout = + self.timeout_height != Height::zero() && self.timeout_height < dst_chain_height; + let timestamp_timeout = self.timeout_timestamp != Timestamp::none() + && (dst_chain_ts.check_expiry(&self.timeout_timestamp) == Expired); + if height_timeout && !timestamp_timeout { + Some(TimeoutVariant::Height) + } else if timestamp_timeout && !height_timeout { + Some(TimeoutVariant::Timestamp) + } else if timestamp_timeout && height_timeout { + Some(TimeoutVariant::Both) + } else { + None + } + } } /// Custom debug output to omit the packet data diff --git a/modules/src/timestamp.rs b/modules/src/timestamp.rs index 477afe43fa..99a1dd511a 100644 --- a/modules/src/timestamp.rs +++ b/modules/src/timestamp.rs @@ -144,7 +144,7 @@ impl Timestamp { pub fn check_expiry(&self, other: &Timestamp) -> Expiry { match (self.time, other.time) { (Some(time1), Some(time2)) => { - if time1 > time2 { + if time1 >= time2 { Expiry::Expired } else { Expiry::NotExpired @@ -271,8 +271,8 @@ mod tests { assert!(Timestamp::from_nanoseconds(i64::MAX.try_into().unwrap()).is_ok()); assert_eq!(timestamp1.check_expiry(×tamp2), Expiry::NotExpired); - assert_eq!(timestamp1.check_expiry(×tamp1), Expiry::NotExpired); - assert_eq!(timestamp2.check_expiry(×tamp2), Expiry::NotExpired); + assert_eq!(timestamp1.check_expiry(×tamp1), Expiry::Expired); + assert_eq!(timestamp2.check_expiry(×tamp2), Expiry::Expired); assert_eq!(timestamp2.check_expiry(×tamp1), Expiry::Expired); assert_eq!( timestamp1.check_expiry(&nil_timestamp), From af5bb83fc4ba3585a04489ac3dde1f7ed79e2c65 Mon Sep 17 00:00:00 2001 From: Vladislav Date: Fri, 16 Sep 2022 15:37:27 +0400 Subject: [PATCH 87/96] Refactor: Decouple `*Client` types from `Any*` types (#60) * Decouple `*Client` types from `Any*` types * Remove unused bounds and imports * Fix tests * Remove commented code * Fix warnings * Fix more warnings * ClientTypes should only exist on ClientKeeper * Finish "ClientTypes should only exist on ClientKeeper" * Get rid of `GlobalDefs` * fix tests * remove unused functions * Proc-macros for `Any*` types + Protobuf generation * Move client types to a separate crate * fix tests * remove ics18-relayer * make `downcast` and `wrap` return Option * HostFunctions is now the responsibility of Clients * Make `ClientType` - `str` * Finish 'HostFunctions is now the responsibility of Clients' * cargo fix & fmt * Clean up dependencies * re-introduce host consensus state proofs * fix compilation * remove clients * cargo fmt Co-authored-by: Seun Lanlege --- .changelog/config.toml | 79 - .changelog/epilogue.md | 1081 ------- .changelog/unreleased/.gitkeep | 0 .../1075-change-key-name-flag.md | 1 - .../2143-config-validate-cli.md | 2 - ...153-fix-execute-schedule-leaky-pipeline.md | 2 - .../ibc/2293-fix-recv-packet-dest-portchan.md | 3 - .../relayer-cli/2168-conn-handshake-retry.md | 3 - .../912-balance-subcommand-cli.md | 2 - .../999-channel-client-subcommand-cli.md | 2 - .../features/ibc-relayer/2240-chain-types.md | 3 - .../features/proto/2277-proto-server.md | 2 - .../2301-tendermint-version-support.md | 2 - .../1400-fee-related-error-message.md | 2 - .../improvements/ibc/1759-complete-ics20.md | 1 - .../ibc/2279-u256-serde-derive.md | 1 - .../ibc/2280-ics20-api-improvements.md | 2 - .../2223-consolidate-chain-query-proven.md | 2 - .../relayer/2249-ignore-nonce-mismatch.md | 2 - .../breaking-changes/1660-msrv-1.57.md | 2 - .../1665-tendermint-0.23.2.md | 2 - .../ibc-relayer/1656-supervisor-spawn.md | 3 - .../ibc/1618-get-frozen-height.md | 3 - .../ibc/1665-remove-chrono.md | 4 - .../bug-fixes/1264-recover-acct-seq.md | 2 - .../1634-update-unclog-instructions.md | 2 - .../ibc-relayer/1664-handle-expired-client.md | 1 - .../1715-execute-schedule-after-packet-cmd.md | 1 - .../ibc-relayer/1750-misbehavior-config.md | 1 - ...-delete-commitment-in-acknowledgePacket.md | 2 - .../ibc/1649-fix-chan-open-ack-verify.md | 2 - .../ibc/1697-assert-non-zero-trust-level.md | 2 - .../ibc/1710-fix-frozen-height-proto-conv.md | 2 - .../v0.10.0/features/1410-dynamic-version.md | 2 - .changelog/v0.10.0/features/1550-ci-gaiav6.md | 1 - .changelog/v0.10.0/features/1606.md | 2 - .../features/1633-allow-fee-granters.md | 2 - .../ibc-relayer/1561-config-proof-specs.md | 2 - .../ibc/1583-module-verification-ICS07.md | 3 - .../1063-event-monitor-on-demand.md | 2 - .../improvements/ibc-relayer-cli/1636.md | 2 - .../ibc-relayer/1576-update-abscissa.md | 6 - .../improvements/ibc/1665-remove-chrono.md | 4 - .changelog/v0.10.0/summary.md | 16 - .changelog/v0.11.0/1749-build-aarch64.md | 2 - .../breaking-changes/1612-ibc-clock.md | 1 - .../breaking-changes/1765-msrv-1.58.md | 1 - .../1767-tendermint-rs-0.23.5.md | 2 - .../1817-remove-filter-option.md | 2 - .../1662-configurable-upgrade-denom.md | 2 - .../1807-foreign-client-create-params.md | 4 - .../1745-fix-consensus-proof-verification.md | 1 - ...63-init-consensus-meta-on-client-create.md | 2 - .../v0.11.0/improvements/1536-fast-start.md | 3 - .../improvements/1641-tendermint-0.23.4.md | 3 - .../improvements/1687-remove-mock-sleep.md | 1 - .../1662-configurable-upgrade-denom.md | 2 - .../1777-update-abscissa-and-clap.md | 2 - .../ibc-relayer-cli/1789-cli-completions.md | 2 - .../836-create-client-options.md | 2 - .../1481-chainendpoint-any-consensus-state.md | 2 - .../ibc-relayer/1491-structured-logs.md | 2 - .../1785-clarify-ethermint-keys.md | 2 - .../ibc/1760-path-variants-as-types.md | 2 - ...allow-empty-commitment-prefix-and-proof.md | 2 - .changelog/v0.11.0/summary.md | 36 - .../1822-skip-config-for-completions.md | 2 - ...connection-handshake-verification-logic.md | 1 - .../ibc-relayer/1663-pending-timeout.md | 2 - .../1793-begin-end-block-chan-events.md | 2 - .changelog/v0.11.1/summary.md | 1 - .../1885-disable-config-reload.md | 1 - .../ibc-relayer/1837-non-standard-ports.md | 2 - .../1844-duplicate-send-packet-events.md | 2 - .../ibc-relayer/1861-non-standard-ports.md | 3 - .../ibc-relayer/1872-clear-packets.md | 2 - ...x-formatting-for-some-tendermint-errors.md | 3 - .../ibc/1770-deterministic-host-timestamp.md | 2 - .changelog/v0.12.0/features/1797-ibc-v3.md | 2 - .../features/ibc-relayer-cli/1895-rust-log.md | 2 - .../ibc-relayer-cli/1834-clear-packets-cmd.md | 3 - .../ibc-relayer/1388-more-health-checks.md | 2 - .../1880-nonallocating-verions-method.md | 1 - .../ibc/1706-add-client-state-tests.md | 2 - .../ibc/1769-cap-reader-keeper.md | 2 - .changelog/v0.12.0/summary.md | 13 - .../ibc-relayer/1835-ordered-channels.md | 2 - .../1991-packet-worker-chan-open.md | 2 - .../ibc-relayer/2008-slow-relayer.md | 3 - .../ibc-proto/1913-cosmwasm-support.md | 5 - .../1988-serde-serialize-deserialize.md | 3 - .../ibc-relayer/1908-caching-layer.md | 2 - .../1927-packet-filtering-wildcards.md | 3 - .../1961-test-framework.md | 1 - .../ibc-relayer-cli/1559-cli-output.md | 1 - .../1908-caching-layer-documentation.md | 1 - .../ibc/718-rework-ics04_channel-events.md | 2 - .changelog/v0.13.0/summary.md | 59 - .../breaking-changes/2081-msrv-1.60.md | 2 - .../1288-upgrade-chain-confirmation.md | 2 - .../1921-create-client-options.md | 2 - .../ibc-relayer/1772-fix-conn-delay-check.md | 2 - ...x-hermes-retrying-not-regenerating-msgs.md | 1 - .../1998-default-max-block-time.md | 2 - .../2075-wildcard-filter-middle.md | 2 - .../ibc-relayer/2097-misbehavior-height.md | 2 - .../ibc/2035-handler-event-height.md | 2 - .../ibc/2062-conn-open-init-version.md | 2 - .../ibc-relayer/2036-caching-metrics.md | 2 - .../improvements/1936-missing-chain-warn.md | 2 - .../improvements/2045-tendermint-0.23.6.md | 2 - .../1421-create-channel-cli.md | 2 - .../2096-query-packet-pending.md | 4 - .../improvements/ibc/1758-complete-ics26.md | 1 - .../improvements/ibc/2068-chan-id-u64.md | 1 - .changelog/v0.14.0/summary.md | 17 - .../ibc-relayer/1970-app-latest-height.md | 2 - .changelog/v0.14.1/summary.md | 2 - .changelog/v0.15.0/2181-update-codeowners.md | 2 - .../ibc-relayer/1971-non-batch-fix.md | 2 - .../ibc-relayer/2180-client-expiry-time.md | 2 - .../ibc/2104-fix-commitment-computation.md | 2 - .../ibc/2114-fix-ack-verification.md | 2 - .../bug-fixes/ibc/2178-conn-ack-bug-fix.md | 2 - .../v0.15.0/features/1986-gaia-e2e-tests.md | 2 - .../features/ibc-relayer/2112-new-metrics.md | 3 - .../ibc-relayer/1971-max-msg-num-min-bound.md | 2 - .../2031-misleading-misbehavior-error.md | 2 - .../2087-incremental-packet-clearing.md | 2 - .../ibc-relayer/2192-adr009-impl.md | 2 - .../improvements/ibc/2159-remove-ocaps.md | 2 - .changelog/v0.15.0/summary.md | 4 - .../1247-add-missing-protobuf-impl.md | 3 - .../features/1020-augment-error-type.md | 3 - .../1021-cli-indicate-config-file-error.md | 3 - .../v0.6.2/features/1229-upgrade-clis.md | 3 - .changelog/v0.6.2/features/988-flex-error.md | 4 - .../1245-max-params-validation.md | 3 - .changelog/v0.6.2/summary.md | 5 - .../v0.7.0/bug-fixes/1261-gm-req-detect.md | 5 - .../v0.7.0/bug-fixes/1285-fix-typeok-bug.md | 4 - .../ibc/1257-set-capability-index.md | 3 - .../v0.7.0/features/1065-keys-delete.md | 3 - .changelog/v0.7.0/features/1175-update-ci.md | 4 - .../v0.7.0/features/1287-upgrade-legacy.md | 3 - .changelog/v0.7.0/features/843-rest-api.md | 3 - .../901-conditionally-spawn-worker.md | 3 - .../948-upgrade-to-cosmos-sdk-v0.43.md | 3 - .../1132-query-channels-filter.md | 4 - .../v0.7.0/improvements/1191-ica-compat.md | 4 - .../improvements/1249-update-modelator.md | 3 - .../1265-async-tx-confirmation.md | 5 - .../ibc/1297-impl-consensus-state.md | 3 - .changelog/v0.7.0/summary.md | 3 - .../v0.7.1/bug-fixes/1312-fix-gm-stderr.md | 7 - .../1343-fix-header-decoding-error.md | 1 - .../v0.7.1/features/1267-ethermint-support.md | 4 - .../1281-derive-traits-module-errors.md | 4 - .../1311-mbt-test-client-upgrade.md | 4 - .../improvements/1319-u256-amount-transfer.md | 4 - .../improvements/ibc/1268-reader-result.md | 3 - .../improvements/ibc/1333-modules-error.md | 9 - .changelog/v0.7.1/summary.md | 2 - .../features/1155-secp256k1-signatures.md | 1 - .../v0.7.2/features/1290-stubborn-workers.md | 2 - .../features/1362-skip-consensus-states.md | 1 - .../v0.7.2/features/1371-gm-features.md | 4 - .../features/1380-toggle-confirmations.md | 1 - .../improvements/1156-use-core-alloc.md | 1 - .../improvements/1336-better-health-check.md | 2 - .../improvements/1337-semver-pre-compat.md | 1 - .../improvements/1344-bump-compat-0.44.md | 1 - .../1376-consensus-params-explicit-height.md | 1 - .changelog/v0.7.2/summary.md | 4 - .../1345-fix-tx-simulation-0.42.md | 4 - .../1402-fix-account-seq-error-case.md | 3 - .../1392-trusting-period-default.md | 2 - .changelog/v0.7.3/summary.md | 4 - .../breaking-changes/ibc/1214-ics07.md | 3 - .../v0.8.0-pre.1/features/1433-memo-field.md | 3 - .../features/ibc-relayer/1457-default-gas.md | 2 - .../features/ibc-relayer/1464-ibc-go-check.md | 2 - .../v0.8.0-pre.1/features/ibc/1214-ics07.md | 2 - .../1231-begin-end-block-events.md | 2 - .../1440-improve-error-msg-create-client.md | 3 - .changelog/v0.8.0-pre.1/summary.md | 10 - .../v0.8.0/breaking-changes/1519-msrv-1.56.md | 2 - .../v0.8.0/bug-fixes/1445-clock-drift.md | 9 - .../v0.8.0/bug-fixes/1504-timeout_check.md | 2 - .../1417-update-client-misbehavior-perf.md | 2 - .../improvements/1502-update-prost-09.md | 2 - .../ibc/1436-restructure-to-match-ibc-go.md | 4 - .../improvements/ibc/1460-path-fromstr.md | 1 - .../838-converting-IbcEvent-into-AbciEvent.md | 1 - .changelog/v0.8.0/summary.md | 10 - ...onnOpenAck-counterparty-conn-id-not-set.md | 5 - .../v0.9.0/features/1408-vega-protos.md | 2 - .../v0.9.0/features/1534-ibc-queries.md | 2 - .../features/ibc-relayer/1518-config-modes.md | 2 - .../improvements/1544-typed-tla-mbt-specs.md | 2 - .../v0.9.0/improvements/1556-arch-doc.md | 5 - .../1515-single-line-errors.md | 2 - .../1555-fee-amount-overflow.md | 3 - .../1479-abort-failed-simulated-txs.md | 3 - .../ibc/1546-add-partialeq-ibcevent.md | 2 - .changelog/v0.9.0/summary.md | 52 - .dockerignore | 1 - .rustfmt.toml | 28 +- Cargo.lock | 2322 +------------- Cargo.toml | 3 +- codecov.yml | 23 - config.toml | 252 -- derive/Cargo.toml | 14 + derive/src/client_def.rs | 643 ++++ derive/src/client_state.rs | 187 ++ derive/src/coercion.rs | 52 + derive/src/consensus_state.rs | 67 + derive/src/header.rs | 44 + derive/src/lib.rs | 169 + derive/src/misbehaviour.rs | 46 + derive/src/protobuf.rs | 93 + docs/architecture/README.md | 38 - docs/architecture/adr-001-repo.md | 175 -- docs/architecture/adr-002-ibc-relayer.md | 798 ----- .../adr-003-handler-implementation.md | 635 ---- .../adr-004-relayer-domain-decomposition.md | 303 -- .../adr-005-relayer-v0-implementation.md | 234 -- .../adr-006-hermes-v0.2-usecases.md | 294 -- docs/architecture/adr-007-error.md | 228 -- .../adr-008-ics20-implementation.md | 236 -- ...9-chain-endpoint-handle-standardization.md | 51 - docs/architecture/adr-template.md | 36 - docs/architecture/architecture.md | 130 - .../assets/IBC_client_heights.jpeg | Bin 143106 -> 0 bytes .../assets/IBC_conn_handshake_relay.jpeg | Bin 336221 -> 0 bytes docs/architecture/assets/IBC_relayer.jpeg | Bin 121564 -> 0 bytes .../assets/IBC_relayer_threads.jpeg | Bin 93841 -> 0 bytes docs/architecture/assets/ibc-rs-layout.png | Bin 58389 -> 0 bytes docs/architecture/assets/relayer-v0-arch.jpg | Bin 558757 -> 0 bytes docs/architecture/assets/relayer-v0-link.jpeg | Bin 51722 -> 0 bytes docs/disclosure-log.md | 358 --- flake.lock | 801 ----- flake.nix | 47 - help | 0 modules/Cargo.toml | 62 +- .../applications/transfer/acknowledgement.rs | 65 +- modules/src/applications/transfer/context.rs | 553 ++-- modules/src/applications/transfer/denom.rs | 652 ++-- modules/src/applications/transfer/error.rs | 202 +- modules/src/applications/transfer/events.rs | 237 +- .../applications/transfer/msgs/transfer.rs | 237 +- modules/src/applications/transfer/packet.rs | 50 +- modules/src/applications/transfer/relay.rs | 55 +- .../transfer/relay/on_ack_packet.rs | 29 +- .../transfer/relay/on_recv_packet.rs | 118 +- .../transfer/relay/on_timeout_packet.rs | 20 +- .../transfer/relay/send_transfer.rs | 158 +- modules/src/clients/host_functions.rs | 140 - .../clients/ics07_tendermint/client_def.rs | 553 ---- .../clients/ics07_tendermint/client_state.rs | 643 ---- .../ics07_tendermint/consensus_state.rs | 136 - modules/src/clients/ics07_tendermint/error.rs | 294 -- .../src/clients/ics07_tendermint/header.rs | 202 -- .../clients/ics07_tendermint/misbehaviour.rs | 76 - modules/src/clients/ics07_tendermint/mod.rs | 9 - modules/src/clients/ics11_beefy/client_def.rs | 511 --- .../src/clients/ics11_beefy/client_state.rs | 415 --- .../clients/ics11_beefy/consensus_state.rs | 120 - modules/src/clients/ics11_beefy/error.rs | 207 -- modules/src/clients/ics11_beefy/header.rs | 430 --- .../src/clients/ics11_beefy/misbehaviour.rs | 1 - modules/src/clients/ics11_beefy/mod.rs | 8 - modules/src/clients/ics13_near/client_def.rs | 376 --- .../src/clients/ics13_near/client_state.rs | 77 - .../src/clients/ics13_near/consensus_state.rs | 25 - modules/src/clients/ics13_near/error.rs | 31 - modules/src/clients/ics13_near/header.rs | 27 - modules/src/clients/ics13_near/mod.rs | 6 - modules/src/clients/ics13_near/types.rs | 311 -- modules/src/clients/mod.rs | 8 - .../src/core/ics02_client/client_consensus.rs | 212 +- modules/src/core/ics02_client/client_def.rs | 1330 ++------ modules/src/core/ics02_client/client_state.rs | 447 +-- modules/src/core/ics02_client/client_type.rs | 121 - modules/src/core/ics02_client/context.rs | 477 +-- modules/src/core/ics02_client/error.rs | 837 ++--- modules/src/core/ics02_client/events.rs | 444 +-- modules/src/core/ics02_client/handler.rs | 48 +- .../ics02_client/handler/create_client.rs | 469 ++- .../ics02_client/handler/update_client.rs | 1090 ++----- .../ics02_client/handler/upgrade_client.rs | 394 +-- modules/src/core/ics02_client/header.rs | 171 +- modules/src/core/ics02_client/height.rs | 239 +- modules/src/core/ics02_client/misbehaviour.rs | 136 +- modules/src/core/ics02_client/msgs.rs | 21 +- .../core/ics02_client/msgs/create_client.rs | 189 +- .../src/core/ics02_client/msgs/misbehavior.rs | 107 +- .../core/ics02_client/msgs/update_client.rs | 183 +- .../core/ics02_client/msgs/upgrade_client.rs | 355 ++- .../src/core/ics02_client/trust_threshold.rs | 134 +- .../src/core/ics03_connection/connection.rs | 612 ++-- modules/src/core/ics03_connection/context.rs | 130 +- modules/src/core/ics03_connection/error.rs | 322 +- modules/src/core/ics03_connection/events.rs | 458 ++- modules/src/core/ics03_connection/handler.rs | 68 +- .../ics03_connection/handler/conn_open_ack.rs | 593 ++-- .../handler/conn_open_confirm.rs | 372 +-- .../handler/conn_open_init.rs | 367 +-- .../ics03_connection/handler/conn_open_try.rs | 453 +-- .../core/ics03_connection/handler/verify.rs | 363 ++- modules/src/core/ics03_connection/msgs.rs | 52 +- .../ics03_connection/msgs/conn_open_ack.rs | 468 +-- .../msgs/conn_open_confirm.rs | 267 +- .../ics03_connection/msgs/conn_open_init.rs | 302 +- .../ics03_connection/msgs/conn_open_try.rs | 671 ++-- modules/src/core/ics03_connection/version.rs | 465 ++- modules/src/core/ics04_channel/channel.rs | 946 +++--- modules/src/core/ics04_channel/commitment.rs | 24 +- modules/src/core/ics04_channel/context.rs | 509 ++- modules/src/core/ics04_channel/error.rs | 734 ++--- modules/src/core/ics04_channel/events.rs | 1758 +++++------ modules/src/core/ics04_channel/handler.rs | 317 +- .../ics04_channel/handler/acknowledgement.rs | 436 +-- .../handler/chan_close_confirm.rs | 367 +-- .../ics04_channel/handler/chan_close_init.rs | 305 +- .../ics04_channel/handler/chan_open_ack.rs | 633 ++-- .../handler/chan_open_confirm.rs | 439 +-- .../ics04_channel/handler/chan_open_init.rs | 366 +-- .../ics04_channel/handler/chan_open_try.rs | 605 ++-- .../core/ics04_channel/handler/recv_packet.rs | 583 ++-- .../core/ics04_channel/handler/send_packet.rs | 464 +-- .../src/core/ics04_channel/handler/timeout.rs | 469 ++- .../ics04_channel/handler/timeout_on_close.rs | 552 ++-- .../src/core/ics04_channel/handler/verify.rs | 443 +-- .../handler/write_acknowledgement.rs | 382 +-- modules/src/core/ics04_channel/msgs.rs | 85 +- .../ics04_channel/msgs/acknowledgement.rs | 334 +- .../ics04_channel/msgs/chan_close_confirm.rs | 214 +- .../ics04_channel/msgs/chan_close_init.rs | 182 +- .../core/ics04_channel/msgs/chan_open_ack.rs | 256 +- .../ics04_channel/msgs/chan_open_confirm.rs | 229 +- .../core/ics04_channel/msgs/chan_open_init.rs | 249 +- .../core/ics04_channel/msgs/chan_open_try.rs | 275 +- .../core/ics04_channel/msgs/recv_packet.rs | 299 +- .../src/core/ics04_channel/msgs/timeout.rs | 332 +- .../ics04_channel/msgs/timeout_on_close.rs | 192 +- modules/src/core/ics04_channel/packet.rs | 599 ++-- modules/src/core/ics04_channel/version.rs | 52 +- modules/src/core/ics05_port/context.rs | 17 +- modules/src/core/ics05_port/error.rs | 33 +- .../src/core/ics23_commitment/commitment.rs | 195 +- modules/src/core/ics23_commitment/error.rs | 50 +- modules/src/core/ics23_commitment/merkle.rs | 378 +-- modules/src/core/ics23_commitment/specs.rs | 173 +- modules/src/core/ics24_host/error.rs | 72 +- modules/src/core/ics24_host/identifier.rs | 541 ++-- modules/src/core/ics24_host/path.rs | 1627 +++++----- modules/src/core/ics24_host/validate.rs | 207 +- modules/src/core/ics26_routing/context.rs | 319 +- modules/src/core/ics26_routing/error.rs | 50 +- modules/src/core/ics26_routing/handler.rs | 1072 +++---- modules/src/core/ics26_routing/msgs.rs | 293 +- modules/src/events.rs | 899 +++--- modules/src/handler.rs | 95 +- modules/src/lib.rs | 26 +- modules/src/mock/client_def.rs | 462 +-- modules/src/mock/client_state.rs | 340 +- modules/src/mock/context.rs | 2772 ++++++++--------- modules/src/mock/header.rs | 143 +- modules/src/mock/host.rs | 171 +- modules/src/mock/misbehaviour.rs | 97 +- modules/src/prelude.rs | 19 +- modules/src/proofs.rs | 148 +- modules/src/query.rs | 20 - modules/src/relayer/ics18_relayer/context.rs | 34 - modules/src/relayer/ics18_relayer/error.rs | 38 - modules/src/relayer/ics18_relayer/mod.rs | 5 - modules/src/relayer/ics18_relayer/utils.rs | 211 -- modules/src/relayer/mod.rs | 5 - modules/src/serializers.rs | 63 +- modules/src/signer.rs | 32 +- modules/src/test.rs | 22 +- modules/src/test_utils.rs | 1168 +++---- modules/src/timestamp.rs | 493 ++- modules/src/tx_msg.rs | 56 +- modules/tests/README.md | 61 - modules/tests/mbt.rs | 30 - modules/tests/runner/mod.rs | 537 ---- modules/tests/runner/step.rs | 192 -- modules/tests/support/model_based/.gitignore | 13 - modules/tests/support/model_based/IBC.cfg | 13 - modules/tests/support/model_based/IBC.tla | 590 ---- .../support/model_based/IBCDefinitions.tla | 89 - .../tests/support/model_based/IBCTests.cfg | 9 - .../tests/support/model_based/IBCTests.tla | 60 - modules/tests/support/model_based/ICS02.tla | 147 - modules/tests/support/model_based/ICS03.tla | 438 --- .../query/serialization/client_state.json | 11 - .../serialization/client_state_proof.json | 24 - .../query/serialization/consensus_state.json | 11 - .../serialization/consensus_state_proof.json | 24 - modules/tests/support/signed_header.json | 64 - proto-compiler/src/cmd/clone.rs | 363 ++- proto-compiler/src/cmd/compile.rs | 720 +++-- proto-compiler/src/main.rs | 21 +- proto/src/google.rs | 440 ++- proto/src/lib.rs | 370 +-- 407 files changed, 21400 insertions(+), 39369 deletions(-) delete mode 100644 .changelog/config.toml delete mode 100644 .changelog/epilogue.md delete mode 100644 .changelog/unreleased/.gitkeep delete mode 100644 .changelog/unreleased/breaking-changes/ibc-relayer-cli/1075-change-key-name-flag.md delete mode 100644 .changelog/unreleased/bug-fixes/ibc-relayer-cli/2143-config-validate-cli.md delete mode 100644 .changelog/unreleased/bug-fixes/ibc-relayer/1153-fix-execute-schedule-leaky-pipeline.md delete mode 100644 .changelog/unreleased/bug-fixes/ibc/2293-fix-recv-packet-dest-portchan.md delete mode 100644 .changelog/unreleased/bug-fixes/relayer-cli/2168-conn-handshake-retry.md delete mode 100644 .changelog/unreleased/features/ibc-relayer-cli/912-balance-subcommand-cli.md delete mode 100644 .changelog/unreleased/features/ibc-relayer-cli/999-channel-client-subcommand-cli.md delete mode 100644 .changelog/unreleased/features/ibc-relayer/2240-chain-types.md delete mode 100644 .changelog/unreleased/features/proto/2277-proto-server.md delete mode 100644 .changelog/unreleased/features/relayer/2301-tendermint-version-support.md delete mode 100644 .changelog/unreleased/improvements/ibc-relayer/1400-fee-related-error-message.md delete mode 100644 .changelog/unreleased/improvements/ibc/1759-complete-ics20.md delete mode 100644 .changelog/unreleased/improvements/ibc/2279-u256-serde-derive.md delete mode 100644 .changelog/unreleased/improvements/ibc/2280-ics20-api-improvements.md delete mode 100644 .changelog/unreleased/improvements/relayer/2223-consolidate-chain-query-proven.md delete mode 100644 .changelog/unreleased/improvements/relayer/2249-ignore-nonce-mismatch.md delete mode 100644 .changelog/v0.10.0/breaking-changes/1660-msrv-1.57.md delete mode 100644 .changelog/v0.10.0/breaking-changes/1665-tendermint-0.23.2.md delete mode 100644 .changelog/v0.10.0/breaking-changes/ibc-relayer/1656-supervisor-spawn.md delete mode 100644 .changelog/v0.10.0/breaking-changes/ibc/1618-get-frozen-height.md delete mode 100644 .changelog/v0.10.0/breaking-changes/ibc/1665-remove-chrono.md delete mode 100644 .changelog/v0.10.0/bug-fixes/1264-recover-acct-seq.md delete mode 100644 .changelog/v0.10.0/bug-fixes/1634-update-unclog-instructions.md delete mode 100644 .changelog/v0.10.0/bug-fixes/ibc-relayer/1664-handle-expired-client.md delete mode 100644 .changelog/v0.10.0/bug-fixes/ibc-relayer/1715-execute-schedule-after-packet-cmd.md delete mode 100644 .changelog/v0.10.0/bug-fixes/ibc-relayer/1750-misbehavior-config.md delete mode 100644 .changelog/v0.10.0/bug-fixes/ibc/1573-delete-commitment-in-acknowledgePacket.md delete mode 100644 .changelog/v0.10.0/bug-fixes/ibc/1649-fix-chan-open-ack-verify.md delete mode 100644 .changelog/v0.10.0/bug-fixes/ibc/1697-assert-non-zero-trust-level.md delete mode 100644 .changelog/v0.10.0/bug-fixes/ibc/1710-fix-frozen-height-proto-conv.md delete mode 100644 .changelog/v0.10.0/features/1410-dynamic-version.md delete mode 100644 .changelog/v0.10.0/features/1550-ci-gaiav6.md delete mode 100644 .changelog/v0.10.0/features/1606.md delete mode 100644 .changelog/v0.10.0/features/1633-allow-fee-granters.md delete mode 100644 .changelog/v0.10.0/features/ibc-relayer/1561-config-proof-specs.md delete mode 100644 .changelog/v0.10.0/features/ibc/1583-module-verification-ICS07.md delete mode 100644 .changelog/v0.10.0/improvements/ibc-relayer-cli/1063-event-monitor-on-demand.md delete mode 100644 .changelog/v0.10.0/improvements/ibc-relayer-cli/1636.md delete mode 100644 .changelog/v0.10.0/improvements/ibc-relayer/1576-update-abscissa.md delete mode 100644 .changelog/v0.10.0/improvements/ibc/1665-remove-chrono.md delete mode 100644 .changelog/v0.10.0/summary.md delete mode 100644 .changelog/v0.11.0/1749-build-aarch64.md delete mode 100644 .changelog/v0.11.0/breaking-changes/1612-ibc-clock.md delete mode 100644 .changelog/v0.11.0/breaking-changes/1765-msrv-1.58.md delete mode 100644 .changelog/v0.11.0/breaking-changes/1767-tendermint-rs-0.23.5.md delete mode 100644 .changelog/v0.11.0/breaking-changes/1817-remove-filter-option.md delete mode 100644 .changelog/v0.11.0/breaking-changes/ibc-relayer/1662-configurable-upgrade-denom.md delete mode 100644 .changelog/v0.11.0/breaking-changes/ibc-relayer/1807-foreign-client-create-params.md delete mode 100644 .changelog/v0.11.0/bug-fixes/ibc/1745-fix-consensus-proof-verification.md delete mode 100644 .changelog/v0.11.0/bug-fixes/ibc/1763-init-consensus-meta-on-client-create.md delete mode 100644 .changelog/v0.11.0/improvements/1536-fast-start.md delete mode 100644 .changelog/v0.11.0/improvements/1641-tendermint-0.23.4.md delete mode 100644 .changelog/v0.11.0/improvements/1687-remove-mock-sleep.md delete mode 100644 .changelog/v0.11.0/improvements/ibc-relayer-cli/1662-configurable-upgrade-denom.md delete mode 100644 .changelog/v0.11.0/improvements/ibc-relayer-cli/1777-update-abscissa-and-clap.md delete mode 100644 .changelog/v0.11.0/improvements/ibc-relayer-cli/1789-cli-completions.md delete mode 100644 .changelog/v0.11.0/improvements/ibc-relayer-cli/836-create-client-options.md delete mode 100644 .changelog/v0.11.0/improvements/ibc-relayer/1481-chainendpoint-any-consensus-state.md delete mode 100644 .changelog/v0.11.0/improvements/ibc-relayer/1491-structured-logs.md delete mode 100644 .changelog/v0.11.0/improvements/ibc-relayer/1785-clarify-ethermint-keys.md delete mode 100644 .changelog/v0.11.0/improvements/ibc/1760-path-variants-as-types.md delete mode 100644 .changelog/v0.11.0/improvements/ibc/1761-disallow-empty-commitment-prefix-and-proof.md delete mode 100644 .changelog/v0.11.0/summary.md delete mode 100644 .changelog/v0.11.1/bug-fixes/ibc-relayer-cli/1822-skip-config-for-completions.md delete mode 100644 .changelog/v0.11.1/improvements/ibc-relayer/1389-add-connection-handshake-verification-logic.md delete mode 100644 .changelog/v0.11.1/improvements/ibc-relayer/1663-pending-timeout.md delete mode 100644 .changelog/v0.11.1/improvements/ibc-relayer/1793-begin-end-block-chan-events.md delete mode 100644 .changelog/v0.11.1/summary.md delete mode 100644 .changelog/v0.12.0/bug-fixes/ibc-relayer-cli/1885-disable-config-reload.md delete mode 100644 .changelog/v0.12.0/bug-fixes/ibc-relayer/1837-non-standard-ports.md delete mode 100644 .changelog/v0.12.0/bug-fixes/ibc-relayer/1844-duplicate-send-packet-events.md delete mode 100644 .changelog/v0.12.0/bug-fixes/ibc-relayer/1861-non-standard-ports.md delete mode 100644 .changelog/v0.12.0/bug-fixes/ibc-relayer/1872-clear-packets.md delete mode 100644 .changelog/v0.12.0/bug-fixes/ibc/1706-fix-formatting-for-some-tendermint-errors.md delete mode 100644 .changelog/v0.12.0/bug-fixes/ibc/1770-deterministic-host-timestamp.md delete mode 100644 .changelog/v0.12.0/features/1797-ibc-v3.md delete mode 100644 .changelog/v0.12.0/features/ibc-relayer-cli/1895-rust-log.md delete mode 100644 .changelog/v0.12.0/improvements/ibc-relayer-cli/1834-clear-packets-cmd.md delete mode 100644 .changelog/v0.12.0/improvements/ibc-relayer/1388-more-health-checks.md delete mode 100644 .changelog/v0.12.0/improvements/ibc-relayer/1880-nonallocating-verions-method.md delete mode 100644 .changelog/v0.12.0/improvements/ibc/1706-add-client-state-tests.md delete mode 100644 .changelog/v0.12.0/improvements/ibc/1769-cap-reader-keeper.md delete mode 100644 .changelog/v0.12.0/summary.md delete mode 100644 .changelog/v0.13.0/bug-fixes/ibc-relayer/1835-ordered-channels.md delete mode 100644 .changelog/v0.13.0/bug-fixes/ibc-relayer/1991-packet-worker-chan-open.md delete mode 100644 .changelog/v0.13.0/bug-fixes/ibc-relayer/2008-slow-relayer.md delete mode 100644 .changelog/v0.13.0/features/ibc-proto/1913-cosmwasm-support.md delete mode 100644 .changelog/v0.13.0/features/ibc-proto/1988-serde-serialize-deserialize.md delete mode 100644 .changelog/v0.13.0/features/ibc-relayer/1908-caching-layer.md delete mode 100644 .changelog/v0.13.0/features/ibc-relayer/1927-packet-filtering-wildcards.md delete mode 100644 .changelog/v0.13.0/improvements/ibc-integration-test/1961-test-framework.md delete mode 100644 .changelog/v0.13.0/improvements/ibc-relayer-cli/1559-cli-output.md delete mode 100644 .changelog/v0.13.0/improvements/ibc-relayer/1908-caching-layer-documentation.md delete mode 100644 .changelog/v0.13.0/improvements/ibc/718-rework-ics04_channel-events.md delete mode 100644 .changelog/v0.13.0/summary.md delete mode 100644 .changelog/v0.14.0/breaking-changes/2081-msrv-1.60.md delete mode 100644 .changelog/v0.14.0/bug-fixes/ibc-relayer-cli/1288-upgrade-chain-confirmation.md delete mode 100644 .changelog/v0.14.0/bug-fixes/ibc-relayer-cli/1921-create-client-options.md delete mode 100644 .changelog/v0.14.0/bug-fixes/ibc-relayer/1772-fix-conn-delay-check.md delete mode 100644 .changelog/v0.14.0/bug-fixes/ibc-relayer/1792-fix-hermes-retrying-not-regenerating-msgs.md delete mode 100644 .changelog/v0.14.0/bug-fixes/ibc-relayer/1998-default-max-block-time.md delete mode 100644 .changelog/v0.14.0/bug-fixes/ibc-relayer/2075-wildcard-filter-middle.md delete mode 100644 .changelog/v0.14.0/bug-fixes/ibc-relayer/2097-misbehavior-height.md delete mode 100644 .changelog/v0.14.0/bug-fixes/ibc/2035-handler-event-height.md delete mode 100644 .changelog/v0.14.0/bug-fixes/ibc/2062-conn-open-init-version.md delete mode 100644 .changelog/v0.14.0/features/ibc-relayer/2036-caching-metrics.md delete mode 100644 .changelog/v0.14.0/improvements/1936-missing-chain-warn.md delete mode 100644 .changelog/v0.14.0/improvements/2045-tendermint-0.23.6.md delete mode 100644 .changelog/v0.14.0/improvements/ibc-relayer-cli/1421-create-channel-cli.md delete mode 100644 .changelog/v0.14.0/improvements/ibc-relayer-cli/2096-query-packet-pending.md delete mode 100644 .changelog/v0.14.0/improvements/ibc/1758-complete-ics26.md delete mode 100644 .changelog/v0.14.0/improvements/ibc/2068-chan-id-u64.md delete mode 100644 .changelog/v0.14.0/summary.md delete mode 100644 .changelog/v0.14.1/bug-fixes/ibc-relayer/1970-app-latest-height.md delete mode 100644 .changelog/v0.14.1/summary.md delete mode 100644 .changelog/v0.15.0/2181-update-codeowners.md delete mode 100644 .changelog/v0.15.0/bug-fixes/ibc-relayer/1971-non-batch-fix.md delete mode 100644 .changelog/v0.15.0/bug-fixes/ibc-relayer/2180-client-expiry-time.md delete mode 100644 .changelog/v0.15.0/bug-fixes/ibc/2104-fix-commitment-computation.md delete mode 100644 .changelog/v0.15.0/bug-fixes/ibc/2114-fix-ack-verification.md delete mode 100644 .changelog/v0.15.0/bug-fixes/ibc/2178-conn-ack-bug-fix.md delete mode 100644 .changelog/v0.15.0/features/1986-gaia-e2e-tests.md delete mode 100644 .changelog/v0.15.0/features/ibc-relayer/2112-new-metrics.md delete mode 100644 .changelog/v0.15.0/improvements/ibc-relayer/1971-max-msg-num-min-bound.md delete mode 100644 .changelog/v0.15.0/improvements/ibc-relayer/2031-misleading-misbehavior-error.md delete mode 100644 .changelog/v0.15.0/improvements/ibc-relayer/2087-incremental-packet-clearing.md delete mode 100644 .changelog/v0.15.0/improvements/ibc-relayer/2192-adr009-impl.md delete mode 100644 .changelog/v0.15.0/improvements/ibc/2159-remove-ocaps.md delete mode 100644 .changelog/v0.15.0/summary.md delete mode 100644 .changelog/v0.6.2/bug-fixes/1247-add-missing-protobuf-impl.md delete mode 100644 .changelog/v0.6.2/features/1020-augment-error-type.md delete mode 100644 .changelog/v0.6.2/features/1021-cli-indicate-config-file-error.md delete mode 100644 .changelog/v0.6.2/features/1229-upgrade-clis.md delete mode 100644 .changelog/v0.6.2/features/988-flex-error.md delete mode 100644 .changelog/v0.6.2/improvements/1245-max-params-validation.md delete mode 100644 .changelog/v0.6.2/summary.md delete mode 100644 .changelog/v0.7.0/bug-fixes/1261-gm-req-detect.md delete mode 100644 .changelog/v0.7.0/bug-fixes/1285-fix-typeok-bug.md delete mode 100644 .changelog/v0.7.0/bug-fixes/ibc/1257-set-capability-index.md delete mode 100644 .changelog/v0.7.0/features/1065-keys-delete.md delete mode 100644 .changelog/v0.7.0/features/1175-update-ci.md delete mode 100644 .changelog/v0.7.0/features/1287-upgrade-legacy.md delete mode 100644 .changelog/v0.7.0/features/843-rest-api.md delete mode 100644 .changelog/v0.7.0/features/901-conditionally-spawn-worker.md delete mode 100644 .changelog/v0.7.0/features/948-upgrade-to-cosmos-sdk-v0.43.md delete mode 100644 .changelog/v0.7.0/improvements/1132-query-channels-filter.md delete mode 100644 .changelog/v0.7.0/improvements/1191-ica-compat.md delete mode 100644 .changelog/v0.7.0/improvements/1249-update-modelator.md delete mode 100644 .changelog/v0.7.0/improvements/1265-async-tx-confirmation.md delete mode 100644 .changelog/v0.7.0/improvements/ibc/1297-impl-consensus-state.md delete mode 100644 .changelog/v0.7.0/summary.md delete mode 100644 .changelog/v0.7.1/bug-fixes/1312-fix-gm-stderr.md delete mode 100644 .changelog/v0.7.1/bug-fixes/1343-fix-header-decoding-error.md delete mode 100644 .changelog/v0.7.1/features/1267-ethermint-support.md delete mode 100644 .changelog/v0.7.1/improvements/1281-derive-traits-module-errors.md delete mode 100644 .changelog/v0.7.1/improvements/1311-mbt-test-client-upgrade.md delete mode 100644 .changelog/v0.7.1/improvements/1319-u256-amount-transfer.md delete mode 100644 .changelog/v0.7.1/improvements/ibc/1268-reader-result.md delete mode 100644 .changelog/v0.7.1/improvements/ibc/1333-modules-error.md delete mode 100644 .changelog/v0.7.1/summary.md delete mode 100644 .changelog/v0.7.2/features/1155-secp256k1-signatures.md delete mode 100644 .changelog/v0.7.2/features/1290-stubborn-workers.md delete mode 100644 .changelog/v0.7.2/features/1362-skip-consensus-states.md delete mode 100644 .changelog/v0.7.2/features/1371-gm-features.md delete mode 100644 .changelog/v0.7.2/features/1380-toggle-confirmations.md delete mode 100644 .changelog/v0.7.2/improvements/1156-use-core-alloc.md delete mode 100644 .changelog/v0.7.2/improvements/1336-better-health-check.md delete mode 100644 .changelog/v0.7.2/improvements/1337-semver-pre-compat.md delete mode 100644 .changelog/v0.7.2/improvements/1344-bump-compat-0.44.md delete mode 100644 .changelog/v0.7.2/improvements/1376-consensus-params-explicit-height.md delete mode 100644 .changelog/v0.7.2/summary.md delete mode 100644 .changelog/v0.7.3/bug-fixes/ibc-relayer/1345-fix-tx-simulation-0.42.md delete mode 100644 .changelog/v0.7.3/bug-fixes/ibc-relayer/1402-fix-account-seq-error-case.md delete mode 100644 .changelog/v0.7.3/improvements/ibc-relayer/1392-trusting-period-default.md delete mode 100644 .changelog/v0.7.3/summary.md delete mode 100644 .changelog/v0.8.0-pre.1/breaking-changes/ibc/1214-ics07.md delete mode 100644 .changelog/v0.8.0-pre.1/features/1433-memo-field.md delete mode 100644 .changelog/v0.8.0-pre.1/features/ibc-relayer/1457-default-gas.md delete mode 100644 .changelog/v0.8.0-pre.1/features/ibc-relayer/1464-ibc-go-check.md delete mode 100644 .changelog/v0.8.0-pre.1/features/ibc/1214-ics07.md delete mode 100644 .changelog/v0.8.0-pre.1/improvements/ibc-relayer/1231-begin-end-block-events.md delete mode 100644 .changelog/v0.8.0-pre.1/improvements/ibc-relayer/1440-improve-error-msg-create-client.md delete mode 100644 .changelog/v0.8.0-pre.1/summary.md delete mode 100644 .changelog/v0.8.0/breaking-changes/1519-msrv-1.56.md delete mode 100644 .changelog/v0.8.0/bug-fixes/1445-clock-drift.md delete mode 100644 .changelog/v0.8.0/bug-fixes/1504-timeout_check.md delete mode 100644 .changelog/v0.8.0/improvements/1417-update-client-misbehavior-perf.md delete mode 100644 .changelog/v0.8.0/improvements/1502-update-prost-09.md delete mode 100644 .changelog/v0.8.0/improvements/ibc/1436-restructure-to-match-ibc-go.md delete mode 100644 .changelog/v0.8.0/improvements/ibc/1460-path-fromstr.md delete mode 100644 .changelog/v0.8.0/improvements/ibc/838-converting-IbcEvent-into-AbciEvent.md delete mode 100644 .changelog/v0.8.0/summary.md delete mode 100644 .changelog/v0.9.0/bug-fixes/ibc/1532-connOpenAck-counterparty-conn-id-not-set.md delete mode 100644 .changelog/v0.9.0/features/1408-vega-protos.md delete mode 100644 .changelog/v0.9.0/features/1534-ibc-queries.md delete mode 100644 .changelog/v0.9.0/features/ibc-relayer/1518-config-modes.md delete mode 100644 .changelog/v0.9.0/improvements/1544-typed-tla-mbt-specs.md delete mode 100644 .changelog/v0.9.0/improvements/1556-arch-doc.md delete mode 100644 .changelog/v0.9.0/improvements/ibc-relayer-cli/1515-single-line-errors.md delete mode 100644 .changelog/v0.9.0/improvements/ibc-relayer-cli/1555-fee-amount-overflow.md delete mode 100644 .changelog/v0.9.0/improvements/ibc-relayer/1479-abort-failed-simulated-txs.md delete mode 100644 .changelog/v0.9.0/improvements/ibc/1546-add-partialeq-ibcevent.md delete mode 100644 .changelog/v0.9.0/summary.md delete mode 100644 .dockerignore delete mode 100644 codecov.yml delete mode 100644 config.toml create mode 100644 derive/Cargo.toml create mode 100644 derive/src/client_def.rs create mode 100644 derive/src/client_state.rs create mode 100644 derive/src/coercion.rs create mode 100644 derive/src/consensus_state.rs create mode 100644 derive/src/header.rs create mode 100644 derive/src/lib.rs create mode 100644 derive/src/misbehaviour.rs create mode 100644 derive/src/protobuf.rs delete mode 100644 docs/architecture/README.md delete mode 100644 docs/architecture/adr-001-repo.md delete mode 100644 docs/architecture/adr-002-ibc-relayer.md delete mode 100644 docs/architecture/adr-003-handler-implementation.md delete mode 100644 docs/architecture/adr-004-relayer-domain-decomposition.md delete mode 100644 docs/architecture/adr-005-relayer-v0-implementation.md delete mode 100644 docs/architecture/adr-006-hermes-v0.2-usecases.md delete mode 100644 docs/architecture/adr-007-error.md delete mode 100644 docs/architecture/adr-008-ics20-implementation.md delete mode 100644 docs/architecture/adr-009-chain-endpoint-handle-standardization.md delete mode 100644 docs/architecture/adr-template.md delete mode 100644 docs/architecture/architecture.md delete mode 100644 docs/architecture/assets/IBC_client_heights.jpeg delete mode 100644 docs/architecture/assets/IBC_conn_handshake_relay.jpeg delete mode 100644 docs/architecture/assets/IBC_relayer.jpeg delete mode 100644 docs/architecture/assets/IBC_relayer_threads.jpeg delete mode 100644 docs/architecture/assets/ibc-rs-layout.png delete mode 100644 docs/architecture/assets/relayer-v0-arch.jpg delete mode 100644 docs/architecture/assets/relayer-v0-link.jpeg delete mode 100644 docs/disclosure-log.md delete mode 100644 flake.lock delete mode 100644 flake.nix delete mode 100644 help delete mode 100644 modules/src/clients/host_functions.rs delete mode 100644 modules/src/clients/ics07_tendermint/client_def.rs delete mode 100644 modules/src/clients/ics07_tendermint/client_state.rs delete mode 100644 modules/src/clients/ics07_tendermint/consensus_state.rs delete mode 100644 modules/src/clients/ics07_tendermint/error.rs delete mode 100644 modules/src/clients/ics07_tendermint/header.rs delete mode 100644 modules/src/clients/ics07_tendermint/misbehaviour.rs delete mode 100644 modules/src/clients/ics07_tendermint/mod.rs delete mode 100644 modules/src/clients/ics11_beefy/client_def.rs delete mode 100644 modules/src/clients/ics11_beefy/client_state.rs delete mode 100644 modules/src/clients/ics11_beefy/consensus_state.rs delete mode 100644 modules/src/clients/ics11_beefy/error.rs delete mode 100644 modules/src/clients/ics11_beefy/header.rs delete mode 100644 modules/src/clients/ics11_beefy/misbehaviour.rs delete mode 100644 modules/src/clients/ics11_beefy/mod.rs delete mode 100644 modules/src/clients/ics13_near/client_def.rs delete mode 100644 modules/src/clients/ics13_near/client_state.rs delete mode 100644 modules/src/clients/ics13_near/consensus_state.rs delete mode 100644 modules/src/clients/ics13_near/error.rs delete mode 100644 modules/src/clients/ics13_near/header.rs delete mode 100644 modules/src/clients/ics13_near/mod.rs delete mode 100644 modules/src/clients/ics13_near/types.rs delete mode 100644 modules/src/clients/mod.rs delete mode 100644 modules/src/query.rs delete mode 100644 modules/src/relayer/ics18_relayer/context.rs delete mode 100644 modules/src/relayer/ics18_relayer/error.rs delete mode 100644 modules/src/relayer/ics18_relayer/mod.rs delete mode 100644 modules/src/relayer/ics18_relayer/utils.rs delete mode 100644 modules/src/relayer/mod.rs delete mode 100644 modules/tests/README.md delete mode 100644 modules/tests/mbt.rs delete mode 100644 modules/tests/runner/mod.rs delete mode 100644 modules/tests/runner/step.rs delete mode 100644 modules/tests/support/model_based/.gitignore delete mode 100644 modules/tests/support/model_based/IBC.cfg delete mode 100644 modules/tests/support/model_based/IBC.tla delete mode 100644 modules/tests/support/model_based/IBCDefinitions.tla delete mode 100644 modules/tests/support/model_based/IBCTests.cfg delete mode 100644 modules/tests/support/model_based/IBCTests.tla delete mode 100644 modules/tests/support/model_based/ICS02.tla delete mode 100644 modules/tests/support/model_based/ICS03.tla delete mode 100644 modules/tests/support/query/serialization/client_state.json delete mode 100644 modules/tests/support/query/serialization/client_state_proof.json delete mode 100644 modules/tests/support/query/serialization/consensus_state.json delete mode 100644 modules/tests/support/query/serialization/consensus_state_proof.json delete mode 100644 modules/tests/support/signed_header.json diff --git a/.changelog/config.toml b/.changelog/config.toml deleted file mode 100644 index 598fd03941..0000000000 --- a/.changelog/config.toml +++ /dev/null @@ -1,79 +0,0 @@ -# The GitHub URL for your project. -# -# This is mainly necessary if you need to automatically generate changelog -# entries directly from the CLI. Right now we only support GitHub, but if -# anyone wants GitLab support please let us know and we'll try implement it -# too. -project_url = "https://github.com/informalsystems/ibc-rs" - -# The file to use as a Handlebars template for changes added directly through -# the CLI. -# -# Assumes that relative paths are relative to the `.changelog` folder. If this -# file does not exist, a default template will be used. -change_template = "change-template.md" - -# The number of characters at which to wrap entries automatically added from -# the CLI. -wrap = 80 - -# The heading right at the beginning of the changelog. -heading = "# CHANGELOG" - -# What style of bullet to use for the instances where unclog has to generate -# bullets for you. Can be "-" or "*". -bullet_style = "-" - -# The message to output when your changelog has no entries yet. -empty_msg = "Nothing to see here! Add some entries to get started." - -# The name of the file (relative to the `.changelog` directory) to use as an -# epilogue for your changelog (will be appended as-is to the end of your -# generated changelog). -epilogue_filename = "epilogue.md" - - -# Settings relating to unreleased changelog entries. -[unreleased] - -# The name of the folder containing unreleased entries, relative to the -# `.changelog` folder. -folder = "unreleased" - -# The heading to use for the unreleased entries section. -heading = "## Unreleased" - - -# Settings relating to sets (groups) of changes in the changelog. For example, -# the "BREAKING CHANGES" section would be considered a change set. -[change_sets] - -# The filename containing a summary of the intended changes. Relative to the -# change set folder (e.g. `.changelog/unreleased/breaking-changes/summary.md`). -summary_filename = "summary.md" - -# The extension of files in a change set. -entry_ext = "md" - - -# Settings related to components/sub-modules. Only relevant if you make use of -# components/sub-modules. -[components] - -# The title to use for the section of entries not relating to a specific -# component. -general_entries_title = "General" - -# The number of spaces to inject before each component-related entry. -entry_indent = 2 - - # The components themselves. Each component has a name (used when rendered - # to Markdown) and a path relative to the project folder (i.e. relative to - # the parent of the `.changelog` folder). - [components.all] - ibc = { name = "IBC Modules", path = "modules" } - ibc-proto = { name = "IBC Proto", path = "proto" } - ibc-relayer = { name = "Relayer Library", path = "relayer" } - ibc-relayer-cli = { name = "Relayer CLI", path = "relayer-cli" } - ibc-integration-test = { name = "Integration Test Framework", path = "tools/test-framework" } - guide = { name = "Guide", path = "guide" } diff --git a/.changelog/epilogue.md b/.changelog/epilogue.md deleted file mode 100644 index 9c812992ba..0000000000 --- a/.changelog/epilogue.md +++ /dev/null @@ -1,1081 +0,0 @@ -## v0.6.1 -*July 22nd, 2021* - -This minor release mainly improves the reliability of the relayer -by ensuring that pending packets are cleared on start, -and that Hermes can recover from the WebSocket subscriptions -being closed under its feet by Tendermint. - -Upgrading from version `0.6.0` to `0.6.1` requires no explicit steps. - -> **WARNING:** Due to a regression ([#1229]), the `upgrade client`, -> `tx raw upgrade-clients`, and `tx raw upgrade-chain` commands have -> been temporarily disabled in this version. -> These commands will be re-enabled in the next version. - -### FEATURES - -- [ibc] - - Enable `pub` access to verification methods of ICS 03 & 04 ([#1198]) - - Add `ics26_routing::handler::decode` function ([#1194]) - - Add a pseudo root to `MockConsensusState` ([#1215]) - -### IMPROVEMENTS - -- [ibc-relayer-cli] - - Add CLI git hash ([#1094]) - - Fix unwraps in `packet query` CLIs ([#1114]) - -### BUG FIXES - -- [ibc] - - Fix stack overflow in `MockHeader` implementation ([#1192]) - - Align `as_str` and `from_str` behavior in `ClientType` ([#1192]) - -- [ibc-relayer] - - Ensure pending packets are cleared on start ([#1200]) - - Recover from missed RPC events after WebSocket subscription is closed by Tendermint ([#1196]) - - -[#1094]: https://github.com/informalsystems/ibc-rs/issues/1094 -[#1114]: https://github.com/informalsystems/ibc-rs/issues/1114 -[#1192]: https://github.com/informalsystems/ibc-rs/issues/1192 -[#1194]: https://github.com/informalsystems/ibc-rs/issues/1194 -[#1196]: https://github.com/informalsystems/ibc-rs/issues/1196 -[#1198]: https://github.com/informalsystems/ibc-rs/issues/1198 -[#1200]: https://github.com/informalsystems/ibc-rs/issues/1200 -[#1215]: https://github.com/informalsystems/ibc-rs/issues/1215 -[#1229]: https://github.com/informalsystems/ibc-rs/issues/1229 - - -## v0.6.0 -*July 12th, 2021* - - -Many thanks to Fraccaroli Gianmarco (@Fraccaman) for helping us improve the -reliability of Hermes ([#697]). - -This release includes two major features to Hermes: (1) support for reloading -the chains from the configuration file at runtime, and (2) a filtering mechanism -to restrict Hermes activity based on predefined parameters (e.g., packet relaying -on certain ports and channels exclusively, and ignoring activity for clients -that have non-standard trust threshold). - -In addition to these two, we have also added a health checkup mechanism, plus new -`config validate` and `query channel ends` CLIs. - -### Upgrading from 0.5.0 to 0.6.0 - -When upgrading from Hermes v0.5.0 to v0.6.0, the most important -point to watch out for is the configuration file. -The Hermes config.toml configuration file has went through a few revisions, -with the changes described below. - -#### Added inline documentation for all options. - -Please have a look around the [config.toml](https://github.com/informalsystems/ibc-rs/blob/v0.6.0/config.toml) directly. - -#### Added a packet filtering mechanism based on channel/port identifiers - -This feature will restrict the channels on which Hermes relays packets. -There are two new options in the configuration file: - -1. A global `filter` parameter to enable or disable filtering globally. -2. A per-chain `.filters` option that expects a `policy` (either `allow` or - `deny`) plus a list of channel and - port identifiers. If policy is `allow`, then packet relaying will be restricted to this - list for the corresponding chain. If the policy is `deny`, then any packets - from this list will be ignored. - -#### Added filtering based on client state - -The global `filter` option additionally enables filtering of all activities -based on client state trust threshold. If enabled, Hermes will ignore all -activity for clients that have a trust threshold different than `1/3`. - -#### Added a packet clearing configuration option - -This will enable the parametrization of the frequency -at which Hermes will clear pending packets. This is a global option, called -`clear_packets_interval`, which applies to all chains in the configuration. - - -The full list of changes is described below. - -### FEATURES - -- [ibc-relayer] - - The chains configuration can be reloaded by sending the Hermes process a `SIGHUP` signal ([#1117]) - - Added support for filtering based on client state trust threshold ([#1165]) - -- [ibc-relayer-cli] - - Added `config validate` CLI to Hermes ([#600]) - - Added filtering capability to deny or allow for specific channels ([#1140], [#1141], [#69]) - - Added basic channel filter ([#1140]) - - Added `query channel ends` CLI command ([#1062]) - - Added a health checkup mechanism for Hermes ([#697, #1057]) - -### IMPROVEMENTS - -- Update to `tendermint-rs` v0.20.0 ([#1125]) -- Add inline documentation to config.toml ([#1127]) - -- [ibc-relayer] - - Hermes will now clear pending packets at a configurable interval ([#1124]) - -### BUG FIXES - -- [ibc-relayer] - - Fix for schedule refreshing bug ([#1143]) - - -[#69]: https://github.com/informalsystems/ibc-rs/issues/69 -[#600]: https://github.com/informalsystems/ibc-rs/issues/600 -[#697]: https://github.com/informalsystems/ibc-rs/issues/697 -[#1062]: https://github.com/informalsystems/ibc-rs/issues/1062 -[#1117]: https://github.com/informalsystems/ibc-rs/issues/1117 -[#1057]: https://github.com/informalsystems/ibc-rs/issues/1057 -[#1125]: https://github.com/informalsystems/ibc-rs/issues/1125 -[#1124]: https://github.com/informalsystems/ibc-rs/issues/1124 -[#1127]: https://github.com/informalsystems/ibc-rs/issues/1127 -[#1140]: https://github.com/informalsystems/ibc-rs/issues/1140 -[#1141]: https://github.com/informalsystems/ibc-rs/issues/1141 -[#1143]: https://github.com/informalsystems/ibc-rs/issues/1143 -[#1165]: https://github.com/informalsystems/ibc-rs/issues/1165 - - -## v0.5.0 -*June 22nd, 2021* - -This release brings a few features, and several improvements and bug fixes to the Hermes -relayer, notably the capability for Hermes to complete IBC connection handshakes when -it detects that one has been initialized, as well as the ability to detect chain -impersonation attacks and to dynamically estimate the gas needed to submit -a transaction. - -Moreover, the overall reliability and availability of the relayer has also been improved -substantially by switching over to `tx_broadcast_sync` for submitting transactions. - -### FEATURES - -- [ibc-relayer-cli] - - Add `--hd-path` option to `keys restore` and `keys add` commands to specify - derivation path when importing keys ([#1049]) - -- [ibc-relayer] - - Event-based handshake completion for IBC connections ([#821]) - - Enable TLS support for gRPC client ([#877]) - -### IMPROVEMENTS - -- [ibc-relayer-cli] - - Minor log output improvements: color enabled, reduced redundant information ([#1100]) - -- [ibc-relayer] - - Update the on-chain IBC client with supporting headers when light client verification - performs bisection when verifying a header for a client update or a misbehaviour detection ([#673]) - - Add mitigation for chain impersonation attacks ([#1038]) - - Determine gas fee dynamically per transaction ([#930]) - - Submit transactions with `broadcast_tx_sync` and keep track of account sequences ([#986]) - -### BUG FIXES - -- [gaiad-manager] - - Removed the testnet command as not all networks support it ([#1050]) - - Update for compatibility with Hermes's new `--hd-path` option - -- [ibc-relayer] - - Fix bug where channels were left partially open after `channel create` ([#1064]) - - Prevent account sequence mismatch errors in many cases ([#919], [#978]) - - Prevent timeouts when submitting transactins ([#977]) - -### BREAKING CHANGES - -- [ibc-relayer-cli] - - Removed `--coin-type` option from `keys restore` command. Use `--hd-path` instead ([#1049]) - -[#673]: https://github.com/informalsystems/ibc-rs/issues/673 -[#821]: https://github.com/informalsystems/ibc-rs/issues/821 -[#877]: https://github.com/informalsystems/ibc-rs/issues/877 -[#919]: https://github.com/informalsystems/ibc-rs/issues/919 -[#930]: https://github.com/informalsystems/ibc-rs/issues/930 -[#977]: https://github.com/informalsystems/ibc-rs/issues/977 -[#978]: https://github.com/informalsystems/ibc-rs/issues/978 -[#986]: https://github.com/informalsystems/ibc-rs/issues/986 -[#1038]: https://github.com/informalsystems/ibc-rs/issues/1038 -[#1049]: https://github.com/informalsystems/ibc-rs/issues/1049 -[#1050]: https://github.com/informalsystems/ibc-rs/issues/1050 -[#1064]: https://github.com/informalsystems/ibc-rs/issues/1064 -[#1100]: https://github.com/informalsystems/ibc-rs/issues/1100 - -## v0.4.0 -*June 3rd, 2021* - -- This release of Hermes features an internal [telemetry service][telemetry] - which can export metrics about the relayer to Prometheus. -- A new [relaying strategy][strategy] is now available, which enables Hermes to - complete channel handshakes in an event-based fashion. -- Hermes now checks if another relayer may have already processed a packet event, - and will not attempt to process it itself, which improves performance. -- The startup time of the relayer has been substantially improved. -- The `start-multi` command has been promoted to `start`, which means - that the worker-based relayer is not experimental anymore. -- A regression where Hermes would not recover after a node went down and up again was fixed. - -[telemetry]: https://hermes.informal.systems/telemetry.html -[strategy]: http://hermes.informal.systems/config.html?highlight=strategy#global - -> Special thanks to Colin Axnér (@colin-axner) and Jongwhan Lee (@leejw51crypto) -> for raising multiple issues that helped us improve the reliability of Hermes. - -### FEATURES - -- [ibc-relayer] - - Add telemetry and Prometheus endpoint ([#868], [#1032]) - - Add support for event based channel relaying ([#822]) - - Graceful handling of packet events in the presence of multiple relayers ([#983]) - -### IMPROVEMENTS - -- [ibc] - - Started `unwrap` cleanup ([#871]) - -- [ibc-relayer-cli] - - Include chain-id in `query clients` command, and sort output by client counter ([#992]) - - Improve config loading message ([#996]) - - Improve Hermes worker spawn time for `start` command ([#998]) - - Better Hermes help message when command is unrecognized ([#1003]) - -### BUG FIXES - -- [ibc-relayer] - - Fix client worker initialization error ([#972]) - - Fix `hermes start` panic when all chains are unreachable ([#972]) - - Ensure expired or frozen client worker logs message and terminates ([#1022]) - - Fix regression where Hermes would not recover after a node went down and up again ([#1026]) - -- [gaiad-manager] - - Import hermes keys properly even if wallet HD derivation path is set ([#975]) - - Apply default values to missing configuration parameters ([#993]) - - `gm hermes config` now creates hermes 0.4.0 compatible configuration ([#1039]) - -### BREAKING CHANGES - -- [ibc-relayer-cli] - - Promote `start-multi` command to `start` ([#911]) - -[#822]: https://github.com/informalsystems/ibc-rs/issues/822 -[#868]: https://github.com/informalsystems/ibc-rs/issues/868 -[#871]: https://github.com/informalsystems/ibc-rs/issues/871 -[#911]: https://github.com/informalsystems/ibc-rs/issues/911 -[#972]: https://github.com/informalsystems/ibc-rs/issues/972 -[#975]: https://github.com/informalsystems/ibc-rs/issues/975 -[#983]: https://github.com/informalsystems/ibc-rs/issues/983 -[#992]: https://github.com/informalsystems/ibc-rs/issues/992 -[#996]: https://github.com/informalsystems/ibc-rs/issues/996 -[#993]: https://github.com/informalsystems/ibc-rs/issues/993 -[#998]: https://github.com/informalsystems/ibc-rs/issues/998 -[#1003]: https://github.com/informalsystems/ibc-rs/issues/1003 -[#1022]: https://github.com/informalsystems/ibc-rs/issues/1022 -[#1026]: https://github.com/informalsystems/ibc-rs/issues/1026 -[#1032]: https://github.com/informalsystems/ibc-rs/issues/1032 -[gaiad-manager]: https://github.com/informalsystems/ibc-rs/blob/master/scripts/gm/README.md -[#1039]: https://github.com/informalsystems/ibc-rs/issues/1039 - -## v0.3.2 -*May 21st, 2021* - -This is minor release which brings substantial performance improvements -to the relayer (relaying 1000 packets now takes 2-5min instead of 1h+), -better UX for the `ft-transfer` command, and automatic deployment of -Docker images to Docker Hub. - -### FEATURES - -- [ibc-relayer-cli] - - Add a `--key` option to the tx raw ft-transfer command to override the account used for sending messages ([#963]) - -- [ibc-relayer] - - Add support for multiple keys to the keyring ([#963]) - -- [release] - - Released the official [Hermes image][hermes-docker] on Docker Hub ([#894]) - - Automatically deploy Docker Hub image during release ([#967]) - -### IMPROVEMENTS - -- [ibc-relayer] - - Batch together all events from all transactions included in a block ([#957]) - -### BUG FIXES - -- [ibc-relayer-cli] - - Prevent sending `ft-transfer` MsgTransfer on a non-Open channel ([#960]) - -### BREAKING CHANGES - -> Nothing - -[#868]: https://github.com/informalsystems/ibc-rs/issues/868 -[#894]: https://github.com/informalsystems/ibc-rs/pull/894 -[#957]: https://github.com/informalsystems/ibc-rs/issues/957 -[#960]: https://github.com/informalsystems/ibc-rs/issues/960 -[#963]: https://github.com/informalsystems/ibc-rs/issues/963 -[#967]: https://github.com/informalsystems/ibc-rs/issues/967 - -[hermes-docker]: https://hub.docker.com/r/informalsystems/hermes - -## v0.3.1 -*May 14h, 2021* - -This release improves the UX of a couple commands, fixes a bug related -to delay periods, and adds support for packet timeouts based on timestamps, -as well as support Protobuf-encoded keys. - -### FEATURES - -- [scripts] - - Created the Gaiad Manager `gm` CLI tool for managing gaiad instances on the local machine ([#902]) - -- [ibc-relayer] - - Add support for packet timeout based on timeout timestamp ([#937]) - - Added support for Protobuf-based Keyring ([#925]) - -### IMPROVEMENTS - -- [ibc-relayer-cli] - - Improve UX when querying non-existing connections and channels ([#875], [#920]) - - More details in error messages to increase debuggability ([#921], [#934]) - - Disallow creating a client with same source and destination chains ([#932]) - - Make packet worker more resilient to nodes being unreachable for a short amount of time ([#943]) - -### BUG FIXES - -- [ibc] - - Process raw `delay_period` field as nanoseconds instead of seconds. ([#927]) - -### BREAKING CHANGES - -> Nothing - - -[#875]: https://github.com/informalsystems/ibc-rs/issues/875 -[#920]: https://github.com/informalsystems/ibc-rs/issues/920 -[#902]: https://github.com/informalsystems/ibc-rs/issues/902 -[#921]: https://github.com/informalsystems/ibc-rs/issues/921 -[#925]: https://github.com/informalsystems/ibc-rs/issues/925 -[#927]: https://github.com/informalsystems/ibc-rs/issues/927 -[#932]: https://github.com/informalsystems/ibc-rs/issues/932 -[#934]: https://github.com/informalsystems/ibc-rs/issues/934 -[#937]: https://github.com/informalsystems/ibc-rs/issues/937 -[#943]: https://github.com/informalsystems/ibc-rs/issues/943 - - -## v0.3.0 -*May 7h, 2021* - -Special thanks to Jongwhan Lee (@leejw51crypto) for his contributions ([#878]). - -This release mostly focuses on improving the UX and the experimental multi-paths relayer (`start-multi` command), -which has been made more resilient against nodes going down, and is now able to clear pending packets -and periodically refresh IBC clients. The relayer now also supports [ICS 027 (Interchain Accounts)][ics27]. - -[ics27]: https://github.com/cosmos/ibc/blob/master/spec/app/ics-027-interchain-accounts/README.md - -### FEATURES - -- [ibc-relayer] - - Support for ICS27 ([#794]) - -- [ibc-relayer-cli] - - Added packet clearing and client refresh capabilities for the `start-multi` command ([#784], [#786]) - -### IMPROVEMENTS - -- [ibc] - - Reinstated `ics23` dependency ([#854]) - - Use proper Timestamp type to track time ([#758]) - -- [ibc-relayer] - - Change the default for client creation to allow governance recovery in case of expiration or misbehaviour ([#785]) - - Use a single supervisor in `start-multi` to subscribe to all configured chains ([#862]) - - The `start-multi` command is now more resilient to a node not being up or going down, and will attempt to reconnect ([#871]) - -### BUG FIXES - -- [ibc] - - Fix parsing in `chain_version` when chain identifier has multiple dashes ([#878]) - -- [ibc-relayer] - - Fix pagination in gRPC query for clients ([#811]) - - Fix relayer crash when hermes starts in the same time as packets are being sent ([#851]) - - Fix missing port information in `hermes query channels` ([#840]) - - Fix crash during initialization of event monitor when node is down ([#863]) - - Spawn a single Tokio runtime for the whole supervisor instead of one per chain ([#909]) - -- [ibc-relayer-cli] - - Fix for `ft-transfer` mismatching arguments ([#869]) - - Fix channel destination chain mismatch on unreceived-packets or unreceived-acks ([#873]) - -### BREAKING CHANGES - -- [ibc-relayer] - - `hermes -j query channels` command now returns `result` array with the format - `[{"channel_id":"channel-0","port_id":"transfer"}, ...]` instead of `["channel-0", ...]` ([#840]) - - -[#758]: https://github.com/informalsystems/ibc-rs/issues/758 -[#784]: https://github.com/informalsystems/ibc-rs/issues/784 -[#785]: https://github.com/informalsystems/ibc-rs/issues/785 -[#786]: https://github.com/informalsystems/ibc-rs/issues/786 -[#794]: https://github.com/informalsystems/ibc-rs/issues/794 -[#811]: https://github.com/informalsystems/ibc-rs/issues/811 -[#840]: https://github.com/informalsystems/ibc-rs/issues/840 -[#851]: https://github.com/informalsystems/ibc-rs/issues/851 -[#854]: https://github.com/informalsystems/ibc-rs/issues/854 -[#862]: https://github.com/informalsystems/ibc-rs/issues/862 -[#863]: https://github.com/informalsystems/ibc-rs/issues/863 -[#869]: https://github.com/informalsystems/ibc-rs/issues/869 -[#871]: https://github.com/informalsystems/ibc-rs/issues/871 -[#873]: https://github.com/informalsystems/ibc-rs/issues/873 -[#878]: https://github.com/informalsystems/ibc-rs/issues/878 -[#909]: https://github.com/informalsystems/ibc-rs/issues/909 - -## v0.2.0 -*April 14th, 2021* - -This release includes initial support for relaying over multiple paths from a single `hermes` instance. -Adds support for relayer restart, where pending packets are cleared. -Includes support for ordered channels, packet delay, misbehaviour detection and evidence submission, client upgrade after counterparty chain upgrades. - -This release brings improvements to the relayer UX by providing new and updated commands for keys, client, connection and channel management. -In addition, it simplifies the configuration of and integration with the light client. - -This release also finalizes the initial implementation of all the ICS 004 handlers. - -### FEATURES - -- Update to `tendermint-rs` v0.19.0 ([#798]) - -- [ibc] - - Added handler(s) for sending packets ([#695]), recv. and ack. packets ([#736]), and timeouts ([#362]) - -- [ibc-relayer] - - Support for relayer restart ([#561]) - - Add support for ordered channels ([#599]) - - Misbehaviour detection and evidence submission ([#632]) - - Use a stateless light client without a runtime ([#673]) - -- [ibc-relayer-cli] - - Added `create connection` and `create channel` CLIs ([#630], [#715]) - - Proposed ADR 006 to describe Hermes v0.2.0 use-cases ([#637]) - - Added `client-upgrade` CLI ([#357]) - - Added delay feature for packet relaying ([#640]) - - Update gaia to version 4.2.0 for e2e tests on CI ([#809]) - - Add `start-multi` command to relay on all paths defined in the configuration ([#748]) - - Add option to specify which events to listen for in `listen` command ([#550]) - - Add option to customise receiver address for `ft-transfer` command ([#806]) - - Add `keys restore` command to import a signing key from its mnemonic ([#813]) - -### IMPROVEMENTS - -- [ibc] - - Follow Rust guidelines naming conventions ([#689]) - - Per client structure modules ([#740]) - - MBT: use modelator crate ([#761]) - -- [ibc-relayer] - - Consistent identifier handling across ICS 02, 03 and 04 ([#622]) - -- [ibc-relayer-cli] - - Clarified success path for updating a client that is already up-to-date ([#734]) - - Added `create` and `update` wrappers for client raw commands ([#772]) - - Output by default is human-readable, and JSON is optional ([#805]) - -### BUG FIXES - -- [ibc] - - Fix overflow bug in ICS03 client consensus height verification method ([#685]) - - Allow a conn open ack to succeed in the happy case ([#699]) - -- [ibc-relayer] - - Replaced `rust-crypto` & `bitcoin-wallet` deprecated dependencies ([#352]) - - Fix for hard-coded account number ([#752]) - - Fix for chains that don't have `cosmos` account prefix ([#416]) - - Fix for building the `trusted_validator_set` for the header used in client updates ([#770]) - - Don't send `MsgAcknowledgment` if channel is closed ([#675]) - - Fix a bug where the keys addresses had their account prefix overriden by the prefix in the configuration ([#751]) - -- [ibc-relayer-cli] - - Hermes guide: improved installation guideline ([#672]) - - Make fee denom and amount configurable ([#754]) - -- [ibc-proto] - - Fix for proto files re-compilation bug ([#801]) - -### BREAKING CHANGES - -- [ibc] - - `MsgConnectionOpenAck.counterparty_connection_id` is now a `ConnectionId` instead of an `Option`([#700]) - -- [ibc-relayer] - - Remove the light client configuration from the global configuration ([#793]) - -- [ibc-relayer-cli] - - Remove the light add and light rm commands ([#793]) - - -[#352]: https://github.com/informalsystems/ibc-rs/issues/352 -[#362]: https://github.com/informalsystems/ibc-rs/issues/362 -[#357]: https://github.com/informalsystems/ibc-rs/issues/357 -[#416]: https://github.com/informalsystems/ibc-rs/issues/416 -[#561]: https://github.com/informalsystems/ibc-rs/issues/561 -[#550]: https://github.com/informalsystems/ibc-rs/issues/550 -[#599]: https://github.com/informalsystems/ibc-rs/issues/599 -[#630]: https://github.com/informalsystems/ibc-rs/issues/630 -[#632]: https://github.com/informalsystems/ibc-rs/issues/632 -[#640]: https://github.com/informalsystems/ibc-rs/issues/640 -[#672]: https://github.com/informalsystems/ibc-rs/issues/672 -[#673]: https://github.com/informalsystems/ibc-rs/issues/673 -[#675]: https://github.com/informalsystems/ibc-rs/issues/675 -[#685]: https://github.com/informalsystems/ibc-rs/issues/685 -[#689]: https://github.com/informalsystems/ibc-rs/issues/689 -[#695]: https://github.com/informalsystems/ibc-rs/issues/695 -[#699]: https://github.com/informalsystems/ibc-rs/issues/699 -[#700]: https://github.com/informalsystems/ibc-rs/pull/700 -[#715]: https://github.com/informalsystems/ibc-rs/issues/715 -[#734]: https://github.com/informalsystems/ibc-rs/issues/734 -[#736]: https://github.com/informalsystems/ibc-rs/issues/736 -[#740]: https://github.com/informalsystems/ibc-rs/issues/740 -[#748]: https://github.com/informalsystems/ibc-rs/issues/748 -[#751]: https://github.com/informalsystems/ibc-rs/issues/751 -[#752]: https://github.com/informalsystems/ibc-rs/issues/752 -[#754]: https://github.com/informalsystems/ibc-rs/issues/754 -[#761]: https://github.com/informalsystems/ibc-rs/issues/761 -[#772]: https://github.com/informalsystems/ibc-rs/issues/772 -[#770]: https://github.com/informalsystems/ibc-rs/issues/770 -[#793]: https://github.com/informalsystems/ibc-rs/pull/793 -[#798]: https://github.com/informalsystems/ibc-rs/issues/798 -[#801]: https://github.com/informalsystems/ibc-rs/issues/801 -[#805]: https://github.com/informalsystems/ibc-rs/issues/805 -[#806]: https://github.com/informalsystems/ibc-rs/issues/806 -[#809]: https://github.com/informalsystems/ibc-rs/issues/809 - - -## v0.1.1 -*February 17, 2021* - -This release brings a quick fix for a problem with a dependency of crate -`ibc-relayer`, which causes build & installation issues. Many thanks to -@Fraccaman for bringing this problem to our attention! ([#672]) - - -Additionally, this release also introduces initial implementation for most of -ICS 004 handlers, and several bug fixes and improvements, e.g., refactored -some CLI code, refactored the Height type in the IBC Events, and a bug fix -involving packet acks in a 3-chain setup. More details below. - -### FEATURES -- [ibc-relayer] - - Listen to channel close initialization event and perform the close handshake ([#560]) - - Updated to tendermint-rs `v0.18.1` ([#682], [#671]) - -### IMPROVEMENTS - -- [ibc] - - Change event height to ICS height ([#549]) - -- [ibc-relayer-cli] - - Cleanup CLI code ([#572]) - -### BUG FIXES - -- [ibc] - - Fix panic in conn open try when no connection id is provided ([#626]) - - Disable MBT tests if the "mocks" feature is not enabled ([#643]) - -- [ibc-relayer] - - Quick fix for `funty` breaking change bug ([#665]) - -- [ibc-relayer-cli] - - Fix wrong acks sent with `tx raw packet-ack` in a 3-chain setup ([#614]) - -### BREAKING CHANGES - -- [ibc] - - Implementation of the `ChanOpenAck`, `ChanOpenConfirm`, `ChanCloseInit`, and `ChanCloseConfirm` handlers ([#316]) - - Remove dependency on `tendermint-rpc` ([#624]) - -- [ibc-relayer-cli] - - Remove the `proof` option from CLI ([#572]) - -[#316]: https://github.com/informalsystems/ibc-rs/issues/316 -[#549]: https://github.com/informalsystems/ibc-rs/issues/549 -[#560]: https://github.com/informalsystems/ibc-rs/issues/560 -[#572]: https://github.com/informalsystems/ibc-rs/issues/572 -[#614]: https://github.com/informalsystems/ibc-rs/issues/614 -[#622]: https://github.com/informalsystems/ibc-rs/issues/622 -[#624]: https://github.com/informalsystems/ibc-rs/issues/624 -[#626]: https://github.com/informalsystems/ibc-rs/issues/626 -[#637]: https://github.com/informalsystems/ibc-rs/issues/637 -[#643]: https://github.com/informalsystems/ibc-rs/issues/643 -[#665]: https://github.com/informalsystems/ibc-rs/issues/665 -[#671]: https://github.com/informalsystems/ibc-rs/pull/671 -[#682]: https://github.com/informalsystems/ibc-rs/issues/682 - -[ibc]: https://github.com/informalsystems/ibc-rs/tree/master/modules -[ibc-relayer-cli]: https://github.com/informalsystems/ibc-rs/tree/master/relayer-cli - -## v0.1.0 -*February 4, 2021* - -🎉 This release brings the first publication of `ibc-relayer` and -`ibc-relayer-cli` to [crates.io](https://crates.io). - -Noteworthy changes in this release include: - -- The binary in the `ibc-relayer-cli` crate was given the name Hermes. -- We published a comprehensive guide for Hermes at [hermes.informal.systems](https://hermes.informal.systems). -- Major improvements to user experience, in particular at CLI level: JSON output, - configurable log output level, dedicated channel handshake command, as well as - overall improvements to error display and output. - -### FEATURES - -- Continous Integration (CI) end-to-end (e2e) testing with gaia v4 ([#32], [#582], [#602]) -- Add support for streamlining releases ([#507]) - -- [ibc-relayer-cli] - - Implement command to query the channels associated with a connection ([#505]) - - JSON output for queries and txs ([#500]) - - Added 'required' annotation for CLIs queries & txs; better error display ([#555]) - - Implement commands for channel close init and confirm ([#538]) - - Implement command to perform the handshake for a new channel ([#557]) - - Query all clients command ([#552]) - - Query all connections command ([#553]) - - Query all channels command ([#568]) - - Added a relayer binary guide ([#542]) - - Split the dev-env script in `setup_chains` and `init_clients` ([#577]) - -- [ibc-relayer] - - Added retry mechanism, restructured relayer ([#519]) - - Relay `MsgTimeoutOnClose` if counterparty channel state is `State::Closed` - -- [ibc] - - Add `MsgTimeoutOnClose` message type ([#563]) - - Implement `MsgChannelOpenTry` message handler ([#543]) - -### IMPROVEMENTS - -- Update to `tendermint-rs` v0.18.0 ([#517], [#583]) -- Update to `tokio` 1.0, `prost` 0.7 and `tonic` 0.4 ([#527]) - -- [ibc-relayer-cli] - - Replace `ChannelConfig` in `Channel::new` ([#511]) - - Add `packet-send` CLI ([#470]) - - UX improvements for relayer txs ([#536], [#540], [#554]) - - Allow running standalone commands concurrently to the main relayer loop ([#501]) - - Remove the simd-based integration tests ([#593]) - -- [ibc-relayer] - - Performance improvements ([#514], [#537]) - - Fix for mismatching `bitcoin` dep ([#525]) - -- [ibc] - - Clean the `validate_basic` method ([#94]) - - `MsgConnectionOpenAck` testing improvements ([#306]) - -### BUG FIXES: -- [ibc-relayer-cli] - - Help and usage commands show 'hermes' for executable name ([#590]) - -- [ibc] - - Fix for storing `ClientType` upon 'create-client' ([#513]) - -### BREAKING CHANGES: - -- [ibc] - - The `ibc::handler::Event` is removed and handlers now produce `ibc::events::IBCEvent`s ([#535]) - -[#32]: https://github.com/informalsystems/ibc-rs/issues/32 -[#94]: https://github.com/informalsystems/ibc-rs/issues/94 -[#306]: https://github.com/informalsystems/ibc-rs/issues/306 -[#470]: https://github.com/informalsystems/ibc-rs/issues/470 -[#500]: https://github.com/informalsystems/ibc-rs/issues/500 -[#501]: https://github.com/informalsystems/ibc-rs/issues/501 -[#505]: https://github.com/informalsystems/ibc-rs/issues/505 -[#507]: https://github.com/informalsystems/ibc-rs/issues/507 -[#511]: https://github.com/informalsystems/ibc-rs/pull/511 -[#513]: https://github.com/informalsystems/ibc-rs/issues/513 -[#514]: https://github.com/informalsystems/ibc-rs/issues/514 -[#517]: https://github.com/informalsystems/ibc-rs/issues/517 -[#519]: https://github.com/informalsystems/ibc-rs/issues/519 -[#525]: https://github.com/informalsystems/ibc-rs/issues/525 -[#527]: https://github.com/informalsystems/ibc-rs/issues/527 -[#535]: https://github.com/informalsystems/ibc-rs/issues/535 -[#536]: https://github.com/informalsystems/ibc-rs/issues/536 -[#537]: https://github.com/informalsystems/ibc-rs/issues/537 -[#538]: https://github.com/informalsystems/ibc-rs/issues/538 -[#540]: https://github.com/informalsystems/ibc-rs/issues/540 -[#542]: https://github.com/informalsystems/ibc-rs/issues/542 -[#543]: https://github.com/informalsystems/ibc-rs/issues/543 -[#552]: https://github.com/informalsystems/ibc-rs/issues/553 -[#553]: https://github.com/informalsystems/ibc-rs/issues/553 -[#554]: https://github.com/informalsystems/ibc-rs/issues/554 -[#555]: https://github.com/informalsystems/ibc-rs/issues/555 -[#557]: https://github.com/informalsystems/ibc-rs/issues/557 -[#563]: https://github.com/informalsystems/ibc-rs/issues/563 -[#568]: https://github.com/informalsystems/ibc-rs/issues/568 -[#577]: https://github.com/informalsystems/ibc-rs/issues/577 -[#582]: https://github.com/informalsystems/ibc-rs/issues/582 -[#583]: https://github.com/informalsystems/ibc-rs/issues/583 -[#590]: https://github.com/informalsystems/ibc-rs/issues/590 -[#593]: https://github.com/informalsystems/ibc-rs/issues/593 -[#602]: https://github.com/informalsystems/ibc-rs/issues/602 - -## v0.0.6 -*December 23, 2020* - -This release focuses on upgrading the relayer and ibc modules to the latest interfaces from the ecosystem: -tendermint-rs `v0.17`, which brings the protobuf changes from tendermint `v0.34.0`, plus alignment with -the latest cosmos proto versions from `v0.40.0-rc5` (sometimes called 'stargate-5'). - -### FEATURES -- Update to tendermint-rs version `0.17` ([#451]) -- Update to cosmos-sdk IBC proto version `v0.40.0-rc5` ([#451]) - -- [ibc-relayer] - -- [ibc-relayer-cli] - - Packet CLIs for recv_packet ([#443]) - - Packet CLIs for acknowledging packets ([#468]) - -### IMPROVEMENTS -- [ibc-relayer] - - Mock chain (implementing IBC handlers) and integration against CLI ([#158]) - - Relayer tests for client update (ping pong) against MockChain ([#381]) - - Relayer refactor to improve testing and add semantic dependencies ([#447]) - -[#158]: https://github.com/informalsystems/ibc-rs/issues/158 -[#379]: https://github.com/informalsystems/ibc-rs/issues/379 -[#381]: https://github.com/informalsystems/ibc-rs/issues/381 -[#443]: https://github.com/informalsystems/ibc-rs/issues/443 -[#447]: https://github.com/informalsystems/ibc-rs/issues/447 -[#451]: https://github.com/informalsystems/ibc-rs/issues/451 -[#468]: https://github.com/informalsystems/ibc-rs/issues/468 - - -## v0.0.5 -*December 2, 2020* - -This release focuses on implementing relayer and relayer-cli functionality towards a full v0 implementation. -We now have the full-stack implementation for supporting client creation & updates, as well as connection- and channel handshakes. -We also consolidated our TLA+ specs into an "IBC Core TLA+ specification," and added ICS 020 spec. - -Special thanks to external contributors for this release: @CharlyCst ([#347], [#419]). - -- [ibc-relayer-cli] - - Add `--all` option to `light rm` command to remove all peers for a given chain ([#431]) - -[#431]: https://github.com/informalsystems/ibc-rs/issues/431 - -### FEATURES - -- Update to tendermint-rs version `0.17-RC3` ([#403]) -- [changelog] Added "unreleased" section in `CHANGELOG.MD` to help streamline releases ([#274]) -- [ibc] - - Implement flexible connection id selection ([#332]) - - ICS 4 Domain Types for channel handshakes and packets ([#315], [#95]) - - Introduce LightBlock support for MockContext ([#389]) -- [ibc-relayer] - - Retrieve account sequence information from a chain using a GRPC client (#337) - - Implementation of chain runtime for v0 ([#330]) - - Integrate relayer spike into ibc-relayer crate ([#335]) - - Implement `query_header_at_height` via plain RPC queries (no light client verification) ([#336]) - - Implement the relayer logic for connection handshake messages ([#358], [#359], [#360]) - - Implement the relayer logic for channel handshake messages ([#371], [#372], [#373], [#374]) -- [ibc-relayer-cli] - - Merge light clients config in relayer config and add commands to add/remove light clients ([#348]) - - CLI for client update message ([#277]) - - Implement the relayer CLI for connection handshake messages ([#358], [#359], [#360]) - - Implement the relayer CLI for channel handshake messages ([#371], [#372], [#373], [#374]) - - Added basic client, connection, and channel lifecyle in relayer v0 ([#376], [#377], [#378]) - - Implement commands to add and list keys for a chain ([#363]) - - Allow overriding of peer_id, height and hash in light add command ([#428]) -- [proto-compiler] - - Refactor and allow specifying a commit at which the Cosmos SDK should be checked out ([#366]) - - Add a `--tag` option to the `clone-sdk` command to check out a tag instead of a commit ([#369]) - - Fix `--out` command line parameter (instead of `--path`) ([#419]) -- [ibc/relayer-spec] - - ICS 020 spec in TLA+ ([#386]) - - Prepare IBC Core TLA+ specs ([#404]) - -### IMPROVEMENTS - -- [ibc-relayer] - - Pin chain runtime against Tokio 0.2 by downgrading for 0.3 to avoid dependency hell ([#415], follow up to [#402]) -- [ibc-relayer-cli] - - Split tasks spawned by CLI commands into their own modules ([#331]) - - V0 command implementation ([#346]) -- [ibc] - - Split `msgs.rs` of ICS002 in separate modules ([#367]) - - Fixed inconsistent versioning for ICS003 and ICS004 ([#97]) - - Fixed `get_sign_bytes` method for messages ([#98]) - - Homogenize ConnectionReader trait so that all functions return owned objects ([#347]) - - Align with tendermint-rs in the domain type definition of `block::Id` ([#338]) - - -[#95]: https://github.com/informalsystems/ibc-rs/issues/95 -[#97]: https://github.com/informalsystems/ibc-rs/issues/97 -[#98]: https://github.com/informalsystems/ibc-rs/issues/98 -[#274]: https://github.com/informalsystems/ibc-rs/issues/274 -[#277]: https://github.com/informalsystems/ibc-rs/issues/277 -[#315]: https://github.com/informalsystems/ibc-rs/issues/315 -[#330]: https://github.com/informalsystems/ibc-rs/issues/330 -[#332]: https://github.com/informalsystems/ibc-rs/issues/332 -[#335]: https://github.com/informalsystems/ibc-rs/pull/335 -[#336]: https://github.com/informalsystems/ibc-rs/issues/336 -[#337]: https://github.com/informalsystems/ibc-rs/issues/337 -[#338]: https://github.com/informalsystems/ibc-rs/issues/338 -[#346]: https://github.com/informalsystems/ibc-rs/issues/346 -[#347]: https://github.com/informalsystems/ibc-rs/issues/347 -[#348]: https://github.com/informalsystems/ibc-rs/pull/348 -[#358]: https://github.com/informalsystems/ibc-rs/issues/358 -[#359]: https://github.com/informalsystems/ibc-rs/issues/359 -[#360]: https://github.com/informalsystems/ibc-rs/issues/360 -[#363]: https://github.com/informalsystems/ibc-rs/issues/363 -[#366]: https://github.com/informalsystems/ibc-rs/issues/366 -[#367]: https://github.com/informalsystems/ibc-rs/issues/367 -[#368]: https://github.com/informalsystems/ibc-rs/issues/368 -[#369]: https://github.com/informalsystems/ibc-rs/pull/369 -[#371]: https://github.com/informalsystems/ibc-rs/issues/371 -[#372]: https://github.com/informalsystems/ibc-rs/issues/372 -[#373]: https://github.com/informalsystems/ibc-rs/issues/373 -[#374]: https://github.com/informalsystems/ibc-rs/issues/374 -[#376]: https://github.com/informalsystems/ibc-rs/issues/376 -[#377]: https://github.com/informalsystems/ibc-rs/issues/377 -[#378]: https://github.com/informalsystems/ibc-rs/issues/378 -[#386]: https://github.com/informalsystems/ibc-rs/issues/386 -[#389]: https://github.com/informalsystems/ibc-rs/issues/389 -[#402]: https://github.com/informalsystems/ibc-rs/issues/402 -[#403]: https://github.com/informalsystems/ibc-rs/issues/403 -[#404]: https://github.com/informalsystems/ibc-rs/issues/404 -[#419]: https://github.com/informalsystems/ibc-rs/issues/419 -[#415]: https://github.com/informalsystems/ibc-rs/issues/415 -[#428]: https://github.com/informalsystems/ibc-rs/issues/428 -[changelog]: https://github.com/informalsystems/ibc-rs/tree/master/CHANGELOG.md -[proto-compiler]: https://github.com/informalsystems/ibc-rs/tree/master/proto-compiler - -## v0.0.4 -*October 19, 2020* - -This release focuses on alignment with the Cosmos ecosystem: adaptations to Tendermint-rs 0.16 and subsequently to 0.17 (`0.17.0-rc1`), and numerous protobuf updates following latest stargate releases. - -Additional highlights: -- Adding DomainTypes and (de)serialization capability to ICS02 and ICS03 messages and structures. -- Improvements of the IBC message processor framework (handlers, contexts and mocks). -- Added initial implementations for the ICS26 (routing module) and ICS18 (basic relayer algorithms module) for use in testing. -- Also added support for packet handling in the relayer algorithm specifications. - -### BREAKING CHANGES: -- [ibc-relayer] & [ibc] Alignment with ecosystem updates: - - Compatibility with the latest protobuf (Gaia stargate-3 and stargate-4) ([#191], [#272], [#273], [#278]) - - Adaptations to tendermint 0.17 ([#286], [#293], [#300], [#302], [#308]) -- [ibc-relayer] UX improvement: Remove proof option from client connections command ([#205]) - -### FEATURES: -- [ibc/ics03] ICS03 Ack and Confirm message processors ([#223]) -- [ibc-relayer-cli] - - Relayer CLIs for client messages ([#207]) - - Relayer CLIs for connection-open-init ([#206]) - - Queries for consensus state and client state ([#149], [#150]) -- [ibc] Routing module minimal implementation for MVP ([#159], [#232]) -- [ibc/relayer-spec] Relayer specification for packet handling ([#229], [#234], [#237]) -- [ibc/relayer-spec] Basic packet handling in TLA+([#124]) -- [ibc] Basic relayer functionality: a test with ClientUpdate ping-pong between two mocked chains ([#276]) - -### IMPROVEMENTS: -- [ibc] Implemented the `DomainType` trait for IBC proto structures ([#245], [#249]). -- [ibc] & [ibc-proto] Several improvements to message processors, among which ([#218]): - - ICS03 connection handshake protocol initial implementation and tests ([#160]) - - Add capability to decode from protobuf Any* type into Tendermint and Mock client states - - Cleanup Any* client wrappers related code - - Migrate handlers to newer protobuf definitions ([#226]) - - Extend client context mock ([#221]) - - Context mock simplifications and cleanup ([#269], [#295], [#296], [#297]) -- [ibc/ics03] Split `msgs.rs` in multiple files, implement `From` for all messages ([#253]) -- [ibc-proto] - - Move ibc-proto source code into ibc-rs ([#142]) and fixed code deduplication ([#282], [#284]) - - Consolidate proto-compiler logic [#241] -- [ibc/relayer-spec] Add support for APALACHE to the Relayer TLA+ spec ([#165]) -- [ibc-relayer] Update to tendermint v.0.16 and integrate with the new light client implementation ([#90], [#243]) - -### BUG FIXES: -- [ibc] Removed "Uninitialized" state from connection ([#217]) -- [ibc-relayer-cli] Fix for client query subcommands ([#231]) -- [disclosure-log] & [spec/connection-handshake] Disclosed bugs in ICS3 version negotiation and proposed a fix ([#209], [#213]) - -[#90]: https://github.com/informalsystems/ibc-rs/issues/90 -[#124]: https://github.com/informalsystems/ibc-rs/issues/124 -[#142]: https://github.com/informalsystems/ibc-rs/issues/142 -[#149]: https://github.com/informalsystems/ibc-rs/issues/149 -[#150]: https://github.com/informalsystems/ibc-rs/issues/150 -[#159]: https://github.com/informalsystems/ibc-rs/issues/159 -[#160]: https://github.com/informalsystems/ibc-rs/issues/160 -[#165]: https://github.com/informalsystems/ibc-rs/issues/165 -[#191]: https://github.com/informalsystems/ibc-rs/issues/191 -[#205]: https://github.com/informalsystems/ibc-rs/issues/205 -[#206]: https://github.com/informalsystems/ibc-rs/issues/206 -[#207]: https://github.com/informalsystems/ibc-rs/issues/207 -[#209]: https://github.com/informalsystems/ibc-rs/issues/209 -[#213]: https://github.com/informalsystems/ibc-rs/issues/213 -[#217]: https://github.com/informalsystems/ibc-rs/issues/217 -[#218]: https://github.com/informalsystems/ibc-rs/issues/218 -[#221]: https://github.com/informalsystems/ibc-rs/issues/221 -[#223]: https://github.com/informalsystems/ibc-rs/issues/223 -[#226]: https://github.com/informalsystems/ibc-rs/issues/226 -[#229]: https://github.com/informalsystems/ibc-rs/issues/229 -[#231]: https://github.com/informalsystems/ibc-rs/issues/231 -[#232]: https://github.com/informalsystems/ibc-rs/issues/232 -[#234]: https://github.com/informalsystems/ibc-rs/issues/234 -[#237]: https://github.com/informalsystems/ibc-rs/issues/237 -[#241]: https://github.com/informalsystems/ibc-rs/issues/241 -[#243]: https://github.com/informalsystems/ibc-rs/issues/243 -[#245]: https://github.com/informalsystems/ibc-rs/issues/245 -[#249]: https://github.com/informalsystems/ibc-rs/issues/249 -[#253]: https://github.com/informalsystems/ibc-rs/issues/253 -[#269]: https://github.com/informalsystems/ibc-rs/issues/269 -[#272]: https://github.com/informalsystems/ibc-rs/issues/272 -[#273]: https://github.com/informalsystems/ibc-rs/issues/273 -[#276]: https://github.com/informalsystems/ibc-rs/issues/276 -[#278]: https://github.com/informalsystems/ibc-rs/issues/278 -[#282]: https://github.com/informalsystems/ibc-rs/issues/282 -[#284]: https://github.com/informalsystems/ibc-rs/issues/284 -[#286]: https://github.com/informalsystems/ibc-rs/issues/286 -[#293]: https://github.com/informalsystems/ibc-rs/issues/293 -[#295]: https://github.com/informalsystems/ibc-rs/issues/295 -[#296]: https://github.com/informalsystems/ibc-rs/issues/296 -[#297]: https://github.com/informalsystems/ibc-rs/issues/297 -[#300]: https://github.com/informalsystems/ibc-rs/issues/300 -[#302]: https://github.com/informalsystems/ibc-rs/issues/302 -[#308]: https://github.com/informalsystems/ibc-rs/issues/308 -[ibc-proto]: https://github.com/informalsystems/ibc-rs/tree/master/proto -[disclosure-log]: https://github.com/informalsystems/ibc-rs/blob/master/docs/disclosure-log.md -[spec/connection-handshake]: https://github.com/informalsystems/ibc-rs/tree/master/docs/spec/connection-handshake -[ibc-relayer]: https://github.com/informalsystems/ibc-rs/tree/master/relayer - -## v0.0.3 -*September 1, 2020* - -This release focuses on the IBC message processor framework and initial -implementations in ICS02 and ICS07. It also introduces an initial specification for the relayer algorithm. - -Other highlights: -- The ibc crate is published as [ibc](https://crates.io/crates/ibc) in crates.io -- ADR-001 and ADR-003 are complete. 🎉 - -### BREAKING CHANGES: -- [ibc] Renamed `modules` crate to `ibc` crate. Version number for the new crate is not reset. ([#198]) -- [ibc/ics02] `ConnectionId`s are now decoded to `Vec` and validated instead of `Vec` ([#185]) -- [ibc/ics03] Removed `Connection` and `ConnectionCounterparty` traits ([#193]) -- [ibc/ics04] Removed `Channel` and `ChannelCounterparty` traits ([#192]) - -### FEATURES: -- [ibc/ics02] partial implementation of message handler ([#119], [#194]) -- [ibc/ics07] partial implementation of message handler ([#119], [#194]) -- [architecture/ADR-003] Proposal for IBC handler (message processor) architecture ([#119], [#194]) -- [ibc/relayer-spec] Detailed technical specification of the relayer algorithm with focus on client update ([#84]) -- [architecture/ADR-001] Documentation for the repository structure ([#1]) -- [architecture/FSM-1] Connection Handshake FSM English description ([#122]) - -### IMPROVEMENTS: -- [contributing] Updated CONTRIBUTING.md. Please read before opening PRs ([#195]) -- [ibc-relayer-cli] Refactor ConnectionId decoding in `query client` ([#185]) - -### BUG FIXES: -- [ibc/ics24] Identifiers limit update according to ICS specs ([#168]) - -[ibc/relayer-spec]: https://github.com/informalsystems/ibc-rs/blob/master/docs/spec/relayer/Relayer.md -[#84]: https://github.com/informalsystems/ibc-rs/issues/84 -[architecture/ADR-001]: https://github.com/informalsystems/ibc-rs/blob/master/docs/architecture/adr-001-repo.md -[#1]: https://github.com/informalsystems/ibc-rs/issues/1 -[contributing]: https://github.com/informalsystems/ibc-rs/blob/master/CONTRIBUTING.md -[#195]: https://github.com/informalsystems/ibc-rs/pull/195 -[ibc]: https://github.com/informalsystems/ibc-rs/tree/master/modules -[#198]: https://github.com/informalsystems/ibc-rs/issues/198 -[ibc/ics02]: https://github.com/informalsystems/ibc-rs/tree/master/modules/src/core/ics02_client -[#185]: https://github.com/informalsystems/ibc-rs/issues/185 -[ibc/ics03]: https://github.com/informalsystems/ibc-rs/tree/master/modules/src/core/ics03_connection -[#193]: https://github.com/informalsystems/ibc-rs/issues/193 -[ibc/ics04]: https://github.com/informalsystems/ibc-rs/tree/master/modules/src/core/ics04_channel -[#192]: https://github.com/informalsystems/ibc-rs/issues/192 -[ibc-relayer-cli]: https://github.com/informalsystems/ibc-rs/tree/master/relayer-cli -[architecture/FSM-1]: https://github.com/informalsystems/ibc-rs/blob/v0.1.0/docs/architecture/fsm-async-connection.md -[#122]: https://github.com/informalsystems/ibc-rs/issues/122 -[architecture/ADR-003]: https://github.com/informalsystems/ibc-rs/blob/master/docs/architecture/adr-003-handler-implementation.md -[#119]: https://github.com/informalsystems/ibc-rs/issues/119 -[#194]: https://github.com/informalsystems/ibc-rs/issues/194 -[ibc/ics24]: https://github.com/informalsystems/ibc-rs/tree/master/modules/src/core/ics24_host -[#168]: https://github.com/informalsystems/ibc-rs/issues/168 -[ibc/ics07]: https://github.com/informalsystems/ibc-rs/tree/master/modules/src/clients/ics07_tendermint - -## v0.0.2 - -*August 1, 2020* - -This release is focused on updating the query system from amino to protobuf, -implementing a few queries from the CLI, and establishing an initial testing framework -that will support multiple chain types. - -It does not target a stable release of Cosmos-SDK chains, but is tracking -the latest state of development towards the Cosmos-SDK Stargate release. - -### BREAKING CHANGES: - -- [ibc|ibc-relayer] Refactor queries, paths, and Chain trait to reduce code and use - protobuf instead of Amino. - [\#152](https://github.com/informalsystems/ibc-rs/pull/152), - [\#174](https://github.com/informalsystems/ibc-rs/pull/174), - [\#155](https://github.com/informalsystems/ibc-rs/pull/155) -- [repo] Moved relayer/cli to relayer-cli, relayer/relay to relayer. [\#183](https://github.com/informalsystems/ibc-rs/pull/183) - -### FEATURES: - -- [ibc-relayer] Query connections given client id. [\#169](https://github.com/informalsystems/ibc-rs/pull/169) -- [ibc-relayer] Query connection given connection id. [\#136](https://github.com/informalsystems/ibc-rs/pull/136) -- [ibc-relayer] Query channel given channel id and port [\#163](https://github.com/informalsystems/ibc-rs/pull/163) -- [spec] Channel closing datagrams in TLA+ [\#141](https://github.com/informalsystems/ibc-rs/pull/141) - -### IMPROVEMENTS: - -- [ci] Framework (scripts and Github Actions) for integration testing the relayer queries against - the Cosmos-SDK's `simd` binary with prepopulated IBC state in the genesis - [\#140](https://github.com/informalsystems/ibc-rs/pull/140), - [\#184](https://github.com/informalsystems/ibc-rs/pull/184) -- [ibc-relayer|ibc] Implemented better Raw type handling. [\#156](https://github.com/informalsystems/ibc-rs/pull/156) -- [repo] Add rust-toolchain file. [\#154](https://github.com/informalsystems/ibc-rs/pull/154) - -### BUG FIXES: - -- [ibc] Fixed the identifiers limits according to updated ics spec. [\#189](https://github.com/informalsystems/ibc-rs/pull/189) -- [ibc/relayer] Remove some warnings triggered during compilation due to dependency specification. [\#132](https://github.com/informalsystems/ibc-rs/pull/132) -- [ibc] Fix nightly runs. [\#161](https://github.com/informalsystems/ibc-rs/pull/161) -- [repo] Fix for incomplete licence terms. [\#153](https://github.com/informalsystems/ibc-rs/pull/153) - -## 0.0.1 - -*July 1st, 2020* - -This is the initial prototype release of an IBC relayer and TLA+ specifications. -There are no compatibility guarantees until v0.1.0. - -Includes: - -- Configuration file definition and validation -- Client state, consensus state, connection, channel queries. - - Note: deserialization is unimplemented as it has dependency on migration to protobuf for ABCI queries -- Per chain light clients threads are created and headers are periodically retrieved and verified. -- Per chain IBC event monitor threads are spawned and main event handler that receives them. - - Note: the event handler just displays the events. -- IBC Modules partial implementation for datastructures, messages and queries. -- Some English and TLA+ specifications for Connection & Channel Handshake as well as naive relayer algorithm. diff --git a/.changelog/unreleased/.gitkeep b/.changelog/unreleased/.gitkeep deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/.changelog/unreleased/breaking-changes/ibc-relayer-cli/1075-change-key-name-flag.md b/.changelog/unreleased/breaking-changes/ibc-relayer-cli/1075-change-key-name-flag.md deleted file mode 100644 index 28e9d3a4d8..0000000000 --- a/.changelog/unreleased/breaking-changes/ibc-relayer-cli/1075-change-key-name-flag.md +++ /dev/null @@ -1 +0,0 @@ -- Merged commands `keys add` and `keys restore` into single command `keys add`. The flag to specify the key name for the CLI command `keys add` has been changed from `-n` to `-k`. Restoring a key now takes a file containing the mnemonic as input instead of directly taking the mnemonic. ([#1075](https://github.com/informalsystems/ibc-rs/issues/1075)) \ No newline at end of file diff --git a/.changelog/unreleased/bug-fixes/ibc-relayer-cli/2143-config-validate-cli.md b/.changelog/unreleased/bug-fixes/ibc-relayer-cli/2143-config-validate-cli.md deleted file mode 100644 index 6ea31939c6..0000000000 --- a/.changelog/unreleased/bug-fixes/ibc-relayer-cli/2143-config-validate-cli.md +++ /dev/null @@ -1,2 +0,0 @@ -- CLI command `config validate` now correctly outputs an error if the configuration file - does not exist or is empty. ([#2143](https://github.com/informalsystems/ibc-rs/issues/2143)) \ No newline at end of file diff --git a/.changelog/unreleased/bug-fixes/ibc-relayer/1153-fix-execute-schedule-leaky-pipeline.md b/.changelog/unreleased/bug-fixes/ibc-relayer/1153-fix-execute-schedule-leaky-pipeline.md deleted file mode 100644 index 41a113d0e8..0000000000 --- a/.changelog/unreleased/bug-fixes/ibc-relayer/1153-fix-execute-schedule-leaky-pipeline.md +++ /dev/null @@ -1,2 +0,0 @@ -- Fix `execute_schedule` method dropping operational data due to improper - handling of errors. ([#2118](https://github.com/informalsystems/ibc-rs/issues/1153)) diff --git a/.changelog/unreleased/bug-fixes/ibc/2293-fix-recv-packet-dest-portchan.md b/.changelog/unreleased/bug-fixes/ibc/2293-fix-recv-packet-dest-portchan.md deleted file mode 100644 index fc812b924b..0000000000 --- a/.changelog/unreleased/bug-fixes/ibc/2293-fix-recv-packet-dest-portchan.md +++ /dev/null @@ -1,3 +0,0 @@ -- Fix `recv_packet` handler incorrectly querying `packet_receipt` and `next_sequence_recv` using - packet's `source_{port, channel}`. - ([#2293](https://github.com/informalsystems/ibc-rs/issues/2293)) diff --git a/.changelog/unreleased/bug-fixes/relayer-cli/2168-conn-handshake-retry.md b/.changelog/unreleased/bug-fixes/relayer-cli/2168-conn-handshake-retry.md deleted file mode 100644 index a3dc964f4c..0000000000 --- a/.changelog/unreleased/bug-fixes/relayer-cli/2168-conn-handshake-retry.md +++ /dev/null @@ -1,3 +0,0 @@ -- Fix the flow for crate connection to ensure success - despite concurrent relayers racing to finish the handshake. - ([#2168](https://github.com/informalsystems/ibc-rs/issues/2168)) \ No newline at end of file diff --git a/.changelog/unreleased/features/ibc-relayer-cli/912-balance-subcommand-cli.md b/.changelog/unreleased/features/ibc-relayer-cli/912-balance-subcommand-cli.md deleted file mode 100644 index 29b56f825b..0000000000 --- a/.changelog/unreleased/features/ibc-relayer-cli/912-balance-subcommand-cli.md +++ /dev/null @@ -1,2 +0,0 @@ -- Added CLI command `keys balance` which outputs the balance of an account associated with a - key. ([#912](https://github.com/informalsystems/ibc-rs/issues/912)) \ No newline at end of file diff --git a/.changelog/unreleased/features/ibc-relayer-cli/999-channel-client-subcommand-cli.md b/.changelog/unreleased/features/ibc-relayer-cli/999-channel-client-subcommand-cli.md deleted file mode 100644 index 6daf2f0af0..0000000000 --- a/.changelog/unreleased/features/ibc-relayer-cli/999-channel-client-subcommand-cli.md +++ /dev/null @@ -1,2 +0,0 @@ -- Added CLI command `query channel client` which outputs the channel's client state. - ([#999](https://github.com/informalsystems/ibc-rs/issues/999)) \ No newline at end of file diff --git a/.changelog/unreleased/features/ibc-relayer/2240-chain-types.md b/.changelog/unreleased/features/ibc-relayer/2240-chain-types.md deleted file mode 100644 index 682f0a6588..0000000000 --- a/.changelog/unreleased/features/ibc-relayer/2240-chain-types.md +++ /dev/null @@ -1,3 +0,0 @@ -- Add preliminary support for multiple chain types, which can be specified in - the chain configuration. At the moment only the `CosmosSdk` chain type is - supported. ([#2240](https://github.com/informalsystems/ibc-rs/issues/2240)) \ No newline at end of file diff --git a/.changelog/unreleased/features/proto/2277-proto-server.md b/.changelog/unreleased/features/proto/2277-proto-server.md deleted file mode 100644 index 723bb9ebb9..0000000000 --- a/.changelog/unreleased/features/proto/2277-proto-server.md +++ /dev/null @@ -1,2 +0,0 @@ -- Generate gRPC server code under feature 'server' - ([#2277](https://github.com/informalsystems/ibc-rs/issues/2277)) \ No newline at end of file diff --git a/.changelog/unreleased/features/relayer/2301-tendermint-version-support.md b/.changelog/unreleased/features/relayer/2301-tendermint-version-support.md deleted file mode 100644 index 40e5abb490..0000000000 --- a/.changelog/unreleased/features/relayer/2301-tendermint-version-support.md +++ /dev/null @@ -1,2 +0,0 @@ -- Add support for fetching & parsing the Tendermint version of a network that - Hermes is connected to. ([#2301](https://github.com/informalsystems/ibc-rs/issues/2301)) diff --git a/.changelog/unreleased/improvements/ibc-relayer/1400-fee-related-error-message.md b/.changelog/unreleased/improvements/ibc-relayer/1400-fee-related-error-message.md deleted file mode 100644 index ed6d1c495e..0000000000 --- a/.changelog/unreleased/improvements/ibc-relayer/1400-fee-related-error-message.md +++ /dev/null @@ -1,2 +0,0 @@ -- Added handler for SDK Err(13) in order to output an understanble error - message. ([#1400](https://github.com/informalsystems/ibc-rs/issues/1400)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/ibc/1759-complete-ics20.md b/.changelog/unreleased/improvements/ibc/1759-complete-ics20.md deleted file mode 100644 index 8d7cbbbbad..0000000000 --- a/.changelog/unreleased/improvements/ibc/1759-complete-ics20.md +++ /dev/null @@ -1 +0,0 @@ -- Complete ICS20 implementation ([#1759](https://github.com/informalsystems/ibc-rs/issues/1759)) diff --git a/.changelog/unreleased/improvements/ibc/2279-u256-serde-derive.md b/.changelog/unreleased/improvements/ibc/2279-u256-serde-derive.md deleted file mode 100644 index 8b95125a33..0000000000 --- a/.changelog/unreleased/improvements/ibc/2279-u256-serde-derive.md +++ /dev/null @@ -1 +0,0 @@ -- Derive `serde::{Serialize, Deserialize}` for `U256`. ([#2279](https://github.com/informalsystems/ibc-rs/issues/2279)) diff --git a/.changelog/unreleased/improvements/ibc/2280-ics20-api-improvements.md b/.changelog/unreleased/improvements/ibc/2280-ics20-api-improvements.md deleted file mode 100644 index 0a7c3ef6f2..0000000000 --- a/.changelog/unreleased/improvements/ibc/2280-ics20-api-improvements.md +++ /dev/null @@ -1,2 +0,0 @@ -- Remove unnecessary supertraits requirements from ICS20 traits. -([#2280](https://github.com/informalsystems/ibc-rs/pull/2280)) diff --git a/.changelog/unreleased/improvements/relayer/2223-consolidate-chain-query-proven.md b/.changelog/unreleased/improvements/relayer/2223-consolidate-chain-query-proven.md deleted file mode 100644 index df7817429a..0000000000 --- a/.changelog/unreleased/improvements/relayer/2223-consolidate-chain-query-proven.md +++ /dev/null @@ -1,2 +0,0 @@ -- Consolidate ChainEndpoint::proven_* methods with their corresponding query_*() - form ([#2223](https://github.com/informalsystems/ibc-rs/issues/2223)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/relayer/2249-ignore-nonce-mismatch.md b/.changelog/unreleased/improvements/relayer/2249-ignore-nonce-mismatch.md deleted file mode 100644 index 57937fa2ca..0000000000 --- a/.changelog/unreleased/improvements/relayer/2249-ignore-nonce-mismatch.md +++ /dev/null @@ -1,2 +0,0 @@ -- Reduce relaying delay when some account mismatch errors occur during Tx - simulation ([#2249](https://github.com/informalsystems/ibc-rs/issues/2249)) \ No newline at end of file diff --git a/.changelog/v0.10.0/breaking-changes/1660-msrv-1.57.md b/.changelog/v0.10.0/breaking-changes/1660-msrv-1.57.md deleted file mode 100644 index 398ea568f3..0000000000 --- a/.changelog/v0.10.0/breaking-changes/1660-msrv-1.57.md +++ /dev/null @@ -1,2 +0,0 @@ -- Update MSRV to Rust 1.57 - ([#1660](https://github.com/informalsystems/ibc-rs/issues/1660)) diff --git a/.changelog/v0.10.0/breaking-changes/1665-tendermint-0.23.2.md b/.changelog/v0.10.0/breaking-changes/1665-tendermint-0.23.2.md deleted file mode 100644 index 9ffadf79d6..0000000000 --- a/.changelog/v0.10.0/breaking-changes/1665-tendermint-0.23.2.md +++ /dev/null @@ -1,2 +0,0 @@ -- Pin tendermint-rs dependencies to =0.23.2 - ([#1665](https://github.com/informalsystems/ibc-rs/pull/1665)) diff --git a/.changelog/v0.10.0/breaking-changes/ibc-relayer/1656-supervisor-spawn.md b/.changelog/v0.10.0/breaking-changes/ibc-relayer/1656-supervisor-spawn.md deleted file mode 100644 index aab7c64a3e..0000000000 --- a/.changelog/v0.10.0/breaking-changes/ibc-relayer/1656-supervisor-spawn.md +++ /dev/null @@ -1,3 +0,0 @@ -- Improve spawning of supervisor worker tasks ([#1656](https://github.com/informalsystems/ibc-rs/issues/1656)) - - The `Supervisor` struct is removed. - - Supervisor is now spawned using the `spawn_supervisor` function. diff --git a/.changelog/v0.10.0/breaking-changes/ibc/1618-get-frozen-height.md b/.changelog/v0.10.0/breaking-changes/ibc/1618-get-frozen-height.md deleted file mode 100644 index 544d3c083a..0000000000 --- a/.changelog/v0.10.0/breaking-changes/ibc/1618-get-frozen-height.md +++ /dev/null @@ -1,3 +0,0 @@ -- Add the `frozen_height()` method to the `ClientState` trait. (Includes breaking changes to the Tendermint - `ClientState` API.) - ([#1618](https://github.com/informalsystems/ibc-rs/issues/1618)) diff --git a/.changelog/v0.10.0/breaking-changes/ibc/1665-remove-chrono.md b/.changelog/v0.10.0/breaking-changes/ibc/1665-remove-chrono.md deleted file mode 100644 index c58260ee02..0000000000 --- a/.changelog/v0.10.0/breaking-changes/ibc/1665-remove-chrono.md +++ /dev/null @@ -1,4 +0,0 @@ -- Remove `Timestamp` API that depended on the `chrono` crate: - ([#1665](https://github.com/informalsystems/ibc-rs/pull/1665)): - - `Timestamp::from_datetime`; use `From` - - `Timestamp::as_datetime`, superseded by `Timestamp::into_datetime` diff --git a/.changelog/v0.10.0/bug-fixes/1264-recover-acct-seq.md b/.changelog/v0.10.0/bug-fixes/1264-recover-acct-seq.md deleted file mode 100644 index 6a7bf3d093..0000000000 --- a/.changelog/v0.10.0/bug-fixes/1264-recover-acct-seq.md +++ /dev/null @@ -1,2 +0,0 @@ -- Added a recovery mechanism to automatically retry or drop tx upon account - sequence mismatch errors ([#1264](https://github.com/informalsystems/ibc-rs/issues/1264)) diff --git a/.changelog/v0.10.0/bug-fixes/1634-update-unclog-instructions.md b/.changelog/v0.10.0/bug-fixes/1634-update-unclog-instructions.md deleted file mode 100644 index e259131b17..0000000000 --- a/.changelog/v0.10.0/bug-fixes/1634-update-unclog-instructions.md +++ /dev/null @@ -1,2 +0,0 @@ -- Update `CONTRIBUTING.md` for latest version of unclog - ([#1634](https://github.com/informalsystems/ibc-rs/issues/1634)) \ No newline at end of file diff --git a/.changelog/v0.10.0/bug-fixes/ibc-relayer/1664-handle-expired-client.md b/.changelog/v0.10.0/bug-fixes/ibc-relayer/1664-handle-expired-client.md deleted file mode 100644 index 8af52818cd..0000000000 --- a/.changelog/v0.10.0/bug-fixes/ibc-relayer/1664-handle-expired-client.md +++ /dev/null @@ -1 +0,0 @@ -- Handle expired client errors in workers ([#1543](https://github.com/informalsystems/ibc-rs/issues/1543)) diff --git a/.changelog/v0.10.0/bug-fixes/ibc-relayer/1715-execute-schedule-after-packet-cmd.md b/.changelog/v0.10.0/bug-fixes/ibc-relayer/1715-execute-schedule-after-packet-cmd.md deleted file mode 100644 index 77aabfddad..0000000000 --- a/.changelog/v0.10.0/bug-fixes/ibc-relayer/1715-execute-schedule-after-packet-cmd.md +++ /dev/null @@ -1 +0,0 @@ -- Perform `execute_schedule` after handling packet commands in packet worker ([#1715](https://github.com/informalsystems/ibc-rs/issues/1715)) diff --git a/.changelog/v0.10.0/bug-fixes/ibc-relayer/1750-misbehavior-config.md b/.changelog/v0.10.0/bug-fixes/ibc-relayer/1750-misbehavior-config.md deleted file mode 100644 index 8c6ad062d1..0000000000 --- a/.changelog/v0.10.0/bug-fixes/ibc-relayer/1750-misbehavior-config.md +++ /dev/null @@ -1 +0,0 @@ -- Do not spawn detect misbehavior task if it is disabled in config [#1750](https://github.com/informalsystems/ibc-rs/issues/1750) diff --git a/.changelog/v0.10.0/bug-fixes/ibc/1573-delete-commitment-in-acknowledgePacket.md b/.changelog/v0.10.0/bug-fixes/ibc/1573-delete-commitment-in-acknowledgePacket.md deleted file mode 100644 index d8d3dc8a39..0000000000 --- a/.changelog/v0.10.0/bug-fixes/ibc/1573-delete-commitment-in-acknowledgePacket.md +++ /dev/null @@ -1,2 +0,0 @@ -- Delete packet commitment instead of acknowledgement in acknowledgePacket - [#1573](https://github.com/informalsystems/ibc-rs/issues/1573) \ No newline at end of file diff --git a/.changelog/v0.10.0/bug-fixes/ibc/1649-fix-chan-open-ack-verify.md b/.changelog/v0.10.0/bug-fixes/ibc/1649-fix-chan-open-ack-verify.md deleted file mode 100644 index 62eb667104..0000000000 --- a/.changelog/v0.10.0/bug-fixes/ibc/1649-fix-chan-open-ack-verify.md +++ /dev/null @@ -1,2 +0,0 @@ -- Set the `counterparty_channel_id` correctly to fix ICS04 [`chanOpenAck` handler verification](https://github.com/informalsystems/ibc-rs/blob/master/modules/src/core/ics04_channel/handler/chan_open_ack.rs) - ([#1649](https://github.com/informalsystems/ibc-rs/issues/1649)) diff --git a/.changelog/v0.10.0/bug-fixes/ibc/1697-assert-non-zero-trust-level.md b/.changelog/v0.10.0/bug-fixes/ibc/1697-assert-non-zero-trust-level.md deleted file mode 100644 index efb81f0339..0000000000 --- a/.changelog/v0.10.0/bug-fixes/ibc/1697-assert-non-zero-trust-level.md +++ /dev/null @@ -1,2 +0,0 @@ -- Add missing assertion for non-zero trust-level in Tendermint client initialization. - ([#1697](https://github.com/informalsystems/ibc-rs/issues/1697)) diff --git a/.changelog/v0.10.0/bug-fixes/ibc/1710-fix-frozen-height-proto-conv.md b/.changelog/v0.10.0/bug-fixes/ibc/1710-fix-frozen-height-proto-conv.md deleted file mode 100644 index a8938f9d36..0000000000 --- a/.changelog/v0.10.0/bug-fixes/ibc/1710-fix-frozen-height-proto-conv.md +++ /dev/null @@ -1,2 +0,0 @@ -- Fix conversion to Protocol Buffers of `ClientState`'s `frozen_height` field. - ([#1710](https://github.com/informalsystems/ibc-rs/issues/1710)) \ No newline at end of file diff --git a/.changelog/v0.10.0/features/1410-dynamic-version.md b/.changelog/v0.10.0/features/1410-dynamic-version.md deleted file mode 100644 index 1ed737d8e3..0000000000 --- a/.changelog/v0.10.0/features/1410-dynamic-version.md +++ /dev/null @@ -1,2 +0,0 @@ -- Support dynamic versions in channel open handshake & enable full support for - ibc-go v2 ([#1410](https://github.com/informalsystems/ibc-rs/issues/1410)) diff --git a/.changelog/v0.10.0/features/1550-ci-gaiav6.md b/.changelog/v0.10.0/features/1550-ci-gaiav6.md deleted file mode 100644 index 6215a94ea8..0000000000 --- a/.changelog/v0.10.0/features/1550-ci-gaiav6.md +++ /dev/null @@ -1 +0,0 @@ -- Extend CI test suite to include E2E tests using Gaia v6.0.0 [#1550](https://github.com/informalsystems/ibc-rs/issues/1550) diff --git a/.changelog/v0.10.0/features/1606.md b/.changelog/v0.10.0/features/1606.md deleted file mode 100644 index 88dc7212b3..0000000000 --- a/.changelog/v0.10.0/features/1606.md +++ /dev/null @@ -1,2 +0,0 @@ -- Added the `extra_wallets` parameter to `gm` to create additional funded wallets. -- Added the possibility of JSON output to `gm` by setting the environment variable `OUTPUT=json`. diff --git a/.changelog/v0.10.0/features/1633-allow-fee-granters.md b/.changelog/v0.10.0/features/1633-allow-fee-granters.md deleted file mode 100644 index 9c8344f7cf..0000000000 --- a/.changelog/v0.10.0/features/1633-allow-fee-granters.md +++ /dev/null @@ -1,2 +0,0 @@ -- Added support for fee granters through config file - ([#1633](https://github.com/informalsystems/ibc-rs/issues/1633)) \ No newline at end of file diff --git a/.changelog/v0.10.0/features/ibc-relayer/1561-config-proof-specs.md b/.changelog/v0.10.0/features/ibc-relayer/1561-config-proof-specs.md deleted file mode 100644 index a9d4447b5e..0000000000 --- a/.changelog/v0.10.0/features/ibc-relayer/1561-config-proof-specs.md +++ /dev/null @@ -1,2 +0,0 @@ -- Allow custom proof-specs in chain config - ([#1561](https://github.com/informalsystems/ibc-rs/issues/1561)) \ No newline at end of file diff --git a/.changelog/v0.10.0/features/ibc/1583-module-verification-ICS07.md b/.changelog/v0.10.0/features/ibc/1583-module-verification-ICS07.md deleted file mode 100644 index 5f11ff8e2f..0000000000 --- a/.changelog/v0.10.0/features/ibc/1583-module-verification-ICS07.md +++ /dev/null @@ -1,3 +0,0 @@ -- Implement proof verification for Tendermint client (ICS07). - ([#1583](https://github.com/informalsystems/ibc-rs/pull/1583)) - diff --git a/.changelog/v0.10.0/improvements/ibc-relayer-cli/1063-event-monitor-on-demand.md b/.changelog/v0.10.0/improvements/ibc-relayer-cli/1063-event-monitor-on-demand.md deleted file mode 100644 index eba09eba33..0000000000 --- a/.changelog/v0.10.0/improvements/ibc-relayer-cli/1063-event-monitor-on-demand.md +++ /dev/null @@ -1,2 +0,0 @@ -- Improve performance of standalone commands by starting the event monitor on-demand - ([#1063](https://github.com/informalsystems/ibc-rs/issues/1063)) diff --git a/.changelog/v0.10.0/improvements/ibc-relayer-cli/1636.md b/.changelog/v0.10.0/improvements/ibc-relayer-cli/1636.md deleted file mode 100644 index 5c333db4f0..0000000000 --- a/.changelog/v0.10.0/improvements/ibc-relayer-cli/1636.md +++ /dev/null @@ -1,2 +0,0 @@ -- Increase the default for `max_gas` from `300_000` to `400_000` ([#1636](https://github.com/informalsystems/ibc-rs/pull/1636)) - diff --git a/.changelog/v0.10.0/improvements/ibc-relayer/1576-update-abscissa.md b/.changelog/v0.10.0/improvements/ibc-relayer/1576-update-abscissa.md deleted file mode 100644 index 31d107373e..0000000000 --- a/.changelog/v0.10.0/improvements/ibc-relayer/1576-update-abscissa.md +++ /dev/null @@ -1,6 +0,0 @@ -- Update to abscissa framework version 0.6.0-beta.1, adding support for - `--help` flags in subcommands and improving help and usage printouts. - The `--version` option of the `create channel` subcommand has been renamed - to `--channel-version`, with the old name still supported as an alias. - ([#1576](https://github.com/informalsystems/ibc-rs/pull/1576), - [#1743](https://github.com/informalsystems/ibc-rs/pull/1743)) diff --git a/.changelog/v0.10.0/improvements/ibc/1665-remove-chrono.md b/.changelog/v0.10.0/improvements/ibc/1665-remove-chrono.md deleted file mode 100644 index 477fa06f2d..0000000000 --- a/.changelog/v0.10.0/improvements/ibc/1665-remove-chrono.md +++ /dev/null @@ -1,4 +0,0 @@ -- More conventional ad-hoc conversion methods on `Timestamp` - ([#1665](https://github.com/informalsystems/ibc-rs/pull/1665)): - - `Timestamp::nanoseconds` replaces `Timestamp::as_nanoseconds` - - `Timestamp::into_datetime` substitutes `Timestamp::as_datetime` diff --git a/.changelog/v0.10.0/summary.md b/.changelog/v0.10.0/summary.md deleted file mode 100644 index 7d8ccc26c6..0000000000 --- a/.changelog/v0.10.0/summary.md +++ /dev/null @@ -1,16 +0,0 @@ -*January 13th, 2021* - -This release notably updates the underlying CLI framework (`abscissa`) to version 0.6.0-beta.1, -which now uses `clap` for parsing command line arguments. This substantially improves the UX of the CLI, -by adding support for `--help` flags in subcommands and improving help and usage printouts. - -The `--version` option of the `create channel` subcommand has been renamed -to `--channel-version`, with the old name still supported as an alias. -Additionally, the `-h` short flag on many commands is now `-H` to avoid -clashes with the clap-provided short flag for help. - -This release also improves the handling of account sequence mismatch errors, -with a recovery mechanism to automatically retry or drop tx upon such errors. - -The relayer now also supports dynamic versions in channel open handshake (which is needed by Interchain Accounts), and enables full support for IBC v2. - diff --git a/.changelog/v0.11.0/1749-build-aarch64.md b/.changelog/v0.11.0/1749-build-aarch64.md deleted file mode 100644 index 5039c18a3f..0000000000 --- a/.changelog/v0.11.0/1749-build-aarch64.md +++ /dev/null @@ -1,2 +0,0 @@ -- Hermes builds for Linux on AArch64 are now released. - ([#1749](https://github.com/informalsystems/ibc-rs/pull/1749)) diff --git a/.changelog/v0.11.0/breaking-changes/1612-ibc-clock.md b/.changelog/v0.11.0/breaking-changes/1612-ibc-clock.md deleted file mode 100644 index 40954b86a6..0000000000 --- a/.changelog/v0.11.0/breaking-changes/1612-ibc-clock.md +++ /dev/null @@ -1 +0,0 @@ -- Hide `ibc::Timestamp::now()` behind `clock` feature flag (#1612)[https://github.com/informalsystems/ibc-rs/issues/1612] diff --git a/.changelog/v0.11.0/breaking-changes/1765-msrv-1.58.md b/.changelog/v0.11.0/breaking-changes/1765-msrv-1.58.md deleted file mode 100644 index eafdc89f88..0000000000 --- a/.changelog/v0.11.0/breaking-changes/1765-msrv-1.58.md +++ /dev/null @@ -1 +0,0 @@ -- Update MSRV to Rust 1.58 ([#1765](https://github.com/informalsystems/ibc-rs/issues/1765)) diff --git a/.changelog/v0.11.0/breaking-changes/1767-tendermint-rs-0.23.5.md b/.changelog/v0.11.0/breaking-changes/1767-tendermint-rs-0.23.5.md deleted file mode 100644 index 4dff907825..0000000000 --- a/.changelog/v0.11.0/breaking-changes/1767-tendermint-rs-0.23.5.md +++ /dev/null @@ -1,2 +0,0 @@ -- Update tendermint-rs dependencies to 0.23.5 - ([#1767](https://github.com/informalsystems/ibc-rs/issues/1767)) \ No newline at end of file diff --git a/.changelog/v0.11.0/breaking-changes/1817-remove-filter-option.md b/.changelog/v0.11.0/breaking-changes/1817-remove-filter-option.md deleted file mode 100644 index a659b122f7..0000000000 --- a/.changelog/v0.11.0/breaking-changes/1817-remove-filter-option.md +++ /dev/null @@ -1,2 +0,0 @@ -- Remove `mode.packets.filter` config option and enable filtering by default - ([#1817](https://github.com/informalsystems/ibc-rs/issues/1817)) \ No newline at end of file diff --git a/.changelog/v0.11.0/breaking-changes/ibc-relayer/1662-configurable-upgrade-denom.md b/.changelog/v0.11.0/breaking-changes/ibc-relayer/1662-configurable-upgrade-denom.md deleted file mode 100644 index a4ce9eedcb..0000000000 --- a/.changelog/v0.11.0/breaking-changes/ibc-relayer/1662-configurable-upgrade-denom.md +++ /dev/null @@ -1,2 +0,0 @@ -- Added a `denom` member to `upgrade_chain::UpgradePlanOptions`. - ([#1662](https://github.com/informalsystems/ibc-rs/issues/1662)) diff --git a/.changelog/v0.11.0/breaking-changes/ibc-relayer/1807-foreign-client-create-params.md b/.changelog/v0.11.0/breaking-changes/ibc-relayer/1807-foreign-client-create-params.md deleted file mode 100644 index a57a9ca5b8..0000000000 --- a/.changelog/v0.11.0/breaking-changes/ibc-relayer/1807-foreign-client-create-params.md +++ /dev/null @@ -1,4 +0,0 @@ -- `foreign_client::CreateParams` struct added, passed as the parameter to - `ForeignClient::build_create_client` and - `ForeignClient::build_create_client_and_send`. - ([#1807](https://github.com/informalsystems/ibc-rs/pull/1807)) diff --git a/.changelog/v0.11.0/bug-fixes/ibc/1745-fix-consensus-proof-verification.md b/.changelog/v0.11.0/bug-fixes/ibc/1745-fix-consensus-proof-verification.md deleted file mode 100644 index 0c8103df5f..0000000000 --- a/.changelog/v0.11.0/bug-fixes/ibc/1745-fix-consensus-proof-verification.md +++ /dev/null @@ -1 +0,0 @@ -- Verify the client consensus proof against the client's consensus state root and not the host's state root diff --git a/.changelog/v0.11.0/bug-fixes/ibc/1763-init-consensus-meta-on-client-create.md b/.changelog/v0.11.0/bug-fixes/ibc/1763-init-consensus-meta-on-client-create.md deleted file mode 100644 index c32fbad959..0000000000 --- a/.changelog/v0.11.0/bug-fixes/ibc/1763-init-consensus-meta-on-client-create.md +++ /dev/null @@ -1,2 +0,0 @@ -- Initialize consensus metadata on client creation - ([#1763](https://github.com/informalsystems/ibc-rs/issues/1763)) \ No newline at end of file diff --git a/.changelog/v0.11.0/improvements/1536-fast-start.md b/.changelog/v0.11.0/improvements/1536-fast-start.md deleted file mode 100644 index 4141495364..0000000000 --- a/.changelog/v0.11.0/improvements/1536-fast-start.md +++ /dev/null @@ -1,3 +0,0 @@ -- Improve startup time of the relayer - - When scanning a chain with filtering enabled and an allow list, skip scanning all the clients and query the allowed channels directly. This results in much fewer queries and a faster start. - - Add a `--full-scan` option to `hermes start` to opt out of the fast start mechanism and do a full scan. diff --git a/.changelog/v0.11.0/improvements/1641-tendermint-0.23.4.md b/.changelog/v0.11.0/improvements/1641-tendermint-0.23.4.md deleted file mode 100644 index df22ac3c78..0000000000 --- a/.changelog/v0.11.0/improvements/1641-tendermint-0.23.4.md +++ /dev/null @@ -1,3 +0,0 @@ -- Update `tendermint-rs` to v0.23.4 and harmonize the dependencies to use a single TLS stack. - A system installation of OpenSSL is no longer required to build Hermes. - ([#1641](https://github.com/informalsystems/ibc-rs/issues/1641)) \ No newline at end of file diff --git a/.changelog/v0.11.0/improvements/1687-remove-mock-sleep.md b/.changelog/v0.11.0/improvements/1687-remove-mock-sleep.md deleted file mode 100644 index beb9510f9c..0000000000 --- a/.changelog/v0.11.0/improvements/1687-remove-mock-sleep.md +++ /dev/null @@ -1 +0,0 @@ -- Remove 1 second sleep in `generate_tm_block` during testing with mock context. [#1687](https://github.com/informalsystems/ibc-rs/issues/1687) diff --git a/.changelog/v0.11.0/improvements/ibc-relayer-cli/1662-configurable-upgrade-denom.md b/.changelog/v0.11.0/improvements/ibc-relayer-cli/1662-configurable-upgrade-denom.md deleted file mode 100644 index 324bb40250..0000000000 --- a/.changelog/v0.11.0/improvements/ibc-relayer-cli/1662-configurable-upgrade-denom.md +++ /dev/null @@ -1,2 +0,0 @@ -- Make the deposit denomination configurable in `tx raw upgrade-chain` via a new `--denom` flag. - ([#1662](https://github.com/informalsystems/ibc-rs/issues/1662)) diff --git a/.changelog/v0.11.0/improvements/ibc-relayer-cli/1777-update-abscissa-and-clap.md b/.changelog/v0.11.0/improvements/ibc-relayer-cli/1777-update-abscissa-and-clap.md deleted file mode 100644 index 75f584a3e8..0000000000 --- a/.changelog/v0.11.0/improvements/ibc-relayer-cli/1777-update-abscissa-and-clap.md +++ /dev/null @@ -1,2 +0,0 @@ -- Update to abscissa_core 0.6.0-rc.0 and clap 3.x - ([#1777](https://github.com/informalsystems/ibc-rs/pull/1777)) diff --git a/.changelog/v0.11.0/improvements/ibc-relayer-cli/1789-cli-completions.md b/.changelog/v0.11.0/improvements/ibc-relayer-cli/1789-cli-completions.md deleted file mode 100644 index 7d46b207e3..0000000000 --- a/.changelog/v0.11.0/improvements/ibc-relayer-cli/1789-cli-completions.md +++ /dev/null @@ -1,2 +0,0 @@ -- Add `completions` CLI command to generate shell auto-completion scripts. - ([#1789](https://github.com/informalsystems/ibc-rs/pull/1789)) diff --git a/.changelog/v0.11.0/improvements/ibc-relayer-cli/836-create-client-options.md b/.changelog/v0.11.0/improvements/ibc-relayer-cli/836-create-client-options.md deleted file mode 100644 index 635234e9aa..0000000000 --- a/.changelog/v0.11.0/improvements/ibc-relayer-cli/836-create-client-options.md +++ /dev/null @@ -1,2 +0,0 @@ -- Add custom options to the `create client` command. - ([#836](https://github.com/informalsystems/ibc-rs/issues/836)) diff --git a/.changelog/v0.11.0/improvements/ibc-relayer/1481-chainendpoint-any-consensus-state.md b/.changelog/v0.11.0/improvements/ibc-relayer/1481-chainendpoint-any-consensus-state.md deleted file mode 100644 index 743dc14e31..0000000000 --- a/.changelog/v0.11.0/improvements/ibc-relayer/1481-chainendpoint-any-consensus-state.md +++ /dev/null @@ -1,2 +0,0 @@ -- Allow `ChainEndpoint` implementations to fetch any types of clients - and consensus states ([#1481](https://github.com/informalsystems/ibc-rs/issues/1481)) \ No newline at end of file diff --git a/.changelog/v0.11.0/improvements/ibc-relayer/1491-structured-logs.md b/.changelog/v0.11.0/improvements/ibc-relayer/1491-structured-logs.md deleted file mode 100644 index 1f1ae711e9..0000000000 --- a/.changelog/v0.11.0/improvements/ibc-relayer/1491-structured-logs.md +++ /dev/null @@ -1,2 +0,0 @@ -- More structural logging in relayer, using tracing spans and key-value pairs. - ([#1491](https://github.com/informalsystems/ibc-rs/pull/1491)) diff --git a/.changelog/v0.11.0/improvements/ibc-relayer/1785-clarify-ethermint-keys.md b/.changelog/v0.11.0/improvements/ibc-relayer/1785-clarify-ethermint-keys.md deleted file mode 100644 index 94e4e72be4..0000000000 --- a/.changelog/v0.11.0/improvements/ibc-relayer/1785-clarify-ethermint-keys.md +++ /dev/null @@ -1,2 +0,0 @@ -- Improved documention w.r.t. keys for Ethermint-based chains - ([#1785](https://github.com/informalsystems/ibc-rs/issues/1785)) \ No newline at end of file diff --git a/.changelog/v0.11.0/improvements/ibc/1760-path-variants-as-types.md b/.changelog/v0.11.0/improvements/ibc/1760-path-variants-as-types.md deleted file mode 100644 index 3045efb67e..0000000000 --- a/.changelog/v0.11.0/improvements/ibc/1760-path-variants-as-types.md +++ /dev/null @@ -1,2 +0,0 @@ -- Extract all `ics24_host::Path` variants into their separate types - ([#1760](https://github.com/informalsystems/ibc-rs/issues/1760)) \ No newline at end of file diff --git a/.changelog/v0.11.0/improvements/ibc/1761-disallow-empty-commitment-prefix-and-proof.md b/.changelog/v0.11.0/improvements/ibc/1761-disallow-empty-commitment-prefix-and-proof.md deleted file mode 100644 index d59818fd55..0000000000 --- a/.changelog/v0.11.0/improvements/ibc/1761-disallow-empty-commitment-prefix-and-proof.md +++ /dev/null @@ -1,2 +0,0 @@ -- Disallow empty `CommitmentPrefix` and `CommitmentProofBytes` - ([#1761](https://github.com/informalsystems/ibc-rs/issues/1761)) \ No newline at end of file diff --git a/.changelog/v0.11.0/summary.md b/.changelog/v0.11.0/summary.md deleted file mode 100644 index 8a19f4003f..0000000000 --- a/.changelog/v0.11.0/summary.md +++ /dev/null @@ -1,36 +0,0 @@ -This release notably speeds up the startup time of Hermes, -adds options to the `create client` command to customize the client parameters, -makes the deposit denomination configurable in `tx raw upgrade-chain` via a new `--denom` flag, -and adds a `completions` CLI command to generate shell auto-completion scripts. - -### Note for operators - -This release includes a breaking change, which requires the configuration file to be edited. -The `mode.packets.filter` configuration option has been removed and is now enabled by default. - -Before running Hermes v0.11.0, make sure you remove the `mode.packets.filter` option from the configuration file. - -```diff ---- a/config.toml -+++ b/config.toml -@@ -50,18 +50,6 @@ clear_interval = 100 - # Whether or not to clear packets on start. [Default: false] - clear_on_start = true - --# Enable or disable the filtering mechanism. --# Valid options are 'true', 'false'. --# Currently Hermes supports two filters: --# 1. Packet filtering on a per-chain basis; see the chain-specific --# filter specification below in [chains.packet_filter]. --# 2. Filter for all activities based on client state trust threshold; this filter --# is parametrized with (numerator = 1, denominator = 3), so that clients with --# thresholds different than this will be ignored. --# If set to 'true', both of the above filters will be enabled. --# [Default: false] --filter = false -- - # Toggle the transaction confirmation mechanism. - # The tx confirmation mechanism periodically queries the `/tx_search` RPC - # endpoint to check that previously-submitted transactions -``` - diff --git a/.changelog/v0.11.1/bug-fixes/ibc-relayer-cli/1822-skip-config-for-completions.md b/.changelog/v0.11.1/bug-fixes/ibc-relayer-cli/1822-skip-config-for-completions.md deleted file mode 100644 index 36eed16cc4..0000000000 --- a/.changelog/v0.11.1/bug-fixes/ibc-relayer-cli/1822-skip-config-for-completions.md +++ /dev/null @@ -1,2 +0,0 @@ -- Do not require a config file to be present for the `completions` command. - ([#1822](https://github.com/informalsystems/ibc-rs/pull/1822)) diff --git a/.changelog/v0.11.1/improvements/ibc-relayer/1389-add-connection-handshake-verification-logic.md b/.changelog/v0.11.1/improvements/ibc-relayer/1389-add-connection-handshake-verification-logic.md deleted file mode 100644 index be5a15dae4..0000000000 --- a/.changelog/v0.11.1/improvements/ibc-relayer/1389-add-connection-handshake-verification-logic.md +++ /dev/null @@ -1 +0,0 @@ -- Add missing checks for `ConnectionEnd::version` and `Counterparty::prefix` fields in the `check_destination_connection_state` method. ([#1389](https://github.com/informalsystems/ibc-rs/issues/1389)) diff --git a/.changelog/v0.11.1/improvements/ibc-relayer/1663-pending-timeout.md b/.changelog/v0.11.1/improvements/ibc-relayer/1663-pending-timeout.md deleted file mode 100644 index d9da56b8c7..0000000000 --- a/.changelog/v0.11.1/improvements/ibc-relayer/1663-pending-timeout.md +++ /dev/null @@ -1,2 +0,0 @@ -- Increased tx confirmation timeout to 300s to prevent aggressive tx - resubmission ([#1663](https://github.com/informalsystems/ibc-rs/issues/1663)) \ No newline at end of file diff --git a/.changelog/v0.11.1/improvements/ibc-relayer/1793-begin-end-block-chan-events.md b/.changelog/v0.11.1/improvements/ibc-relayer/1793-begin-end-block-chan-events.md deleted file mode 100644 index 369f3c51da..0000000000 --- a/.changelog/v0.11.1/improvements/ibc-relayer/1793-begin-end-block-chan-events.md +++ /dev/null @@ -1,2 +0,0 @@ -- Handle channel events originating from Tendermint ABCI's BeginBlock and EndBlock methods - ([#1793](https://github.com/informalsystems/ibc-rs/issues/1793)) \ No newline at end of file diff --git a/.changelog/v0.11.1/summary.md b/.changelog/v0.11.1/summary.md deleted file mode 100644 index 5a5f1350b1..0000000000 --- a/.changelog/v0.11.1/summary.md +++ /dev/null @@ -1 +0,0 @@ -This release adds support for channel events originating from Tendermint ABCI's `BeginBlock` and `EndBlock` methods. diff --git a/.changelog/v0.12.0/bug-fixes/ibc-relayer-cli/1885-disable-config-reload.md b/.changelog/v0.12.0/bug-fixes/ibc-relayer-cli/1885-disable-config-reload.md deleted file mode 100644 index 47df3b67a2..0000000000 --- a/.changelog/v0.12.0/bug-fixes/ibc-relayer-cli/1885-disable-config-reload.md +++ /dev/null @@ -1 +0,0 @@ -- Disable reloading of configuration upon receiving a SIGHUP signal. ([#1885](https://github.com/informalsystems/ibc-rs/issues/1885)) diff --git a/.changelog/v0.12.0/bug-fixes/ibc-relayer/1837-non-standard-ports.md b/.changelog/v0.12.0/bug-fixes/ibc-relayer/1837-non-standard-ports.md deleted file mode 100644 index 758a217a66..0000000000 --- a/.changelog/v0.12.0/bug-fixes/ibc-relayer/1837-non-standard-ports.md +++ /dev/null @@ -1,2 +0,0 @@ -- Handle non-standard ports in channel handshake - ([#1837](https://github.com/informalsystems/ibc-rs/issues/1837)) diff --git a/.changelog/v0.12.0/bug-fixes/ibc-relayer/1844-duplicate-send-packet-events.md b/.changelog/v0.12.0/bug-fixes/ibc-relayer/1844-duplicate-send-packet-events.md deleted file mode 100644 index 88cafb5b79..0000000000 --- a/.changelog/v0.12.0/bug-fixes/ibc-relayer/1844-duplicate-send-packet-events.md +++ /dev/null @@ -1,2 +0,0 @@ -- Fix duplicate SendPacket events emitted by EndBlock - ([#1844](https://github.com/informalsystems/ibc-rs/issues/1844)) \ No newline at end of file diff --git a/.changelog/v0.12.0/bug-fixes/ibc-relayer/1861-non-standard-ports.md b/.changelog/v0.12.0/bug-fixes/ibc-relayer/1861-non-standard-ports.md deleted file mode 100644 index 96ab22af07..0000000000 --- a/.changelog/v0.12.0/bug-fixes/ibc-relayer/1861-non-standard-ports.md +++ /dev/null @@ -1,3 +0,0 @@ -- Fix support for non-standard ports in channel handshake - ([#1861](https://github.com/informalsystems/ibc-rs/issues/1861), - [#1837](https://github.com/informalsystems/ibc-rs/issues/1837)) diff --git a/.changelog/v0.12.0/bug-fixes/ibc-relayer/1872-clear-packets.md b/.changelog/v0.12.0/bug-fixes/ibc-relayer/1872-clear-packets.md deleted file mode 100644 index 4a72814e75..0000000000 --- a/.changelog/v0.12.0/bug-fixes/ibc-relayer/1872-clear-packets.md +++ /dev/null @@ -1,2 +0,0 @@ -- Fixed bug where Hermes cleared packets at startup, despite - `clear_on_start = false` ([#1872](https://github.com/informalsystems/ibc-rs/issues/1872)) diff --git a/.changelog/v0.12.0/bug-fixes/ibc/1706-fix-formatting-for-some-tendermint-errors.md b/.changelog/v0.12.0/bug-fixes/ibc/1706-fix-formatting-for-some-tendermint-errors.md deleted file mode 100644 index ad064f0e4c..0000000000 --- a/.changelog/v0.12.0/bug-fixes/ibc/1706-fix-formatting-for-some-tendermint-errors.md +++ /dev/null @@ -1,3 +0,0 @@ -- Fixed the formatting of NotEnoughTimeElapsed and NotEnoughBlocksElapsed - in tendermint errors - ([#1706](https://github.com/informalsystems/ibc-rs/issues/1706)) diff --git a/.changelog/v0.12.0/bug-fixes/ibc/1770-deterministic-host-timestamp.md b/.changelog/v0.12.0/bug-fixes/ibc/1770-deterministic-host-timestamp.md deleted file mode 100644 index 821801926b..0000000000 --- a/.changelog/v0.12.0/bug-fixes/ibc/1770-deterministic-host-timestamp.md +++ /dev/null @@ -1,2 +0,0 @@ -- IBC handlers now retrieve the host timestamp from the latest host consensus - state ([#1770](https://github.com/informalsystems/ibc-rs/issues/1770)) \ No newline at end of file diff --git a/.changelog/v0.12.0/features/1797-ibc-v3.md b/.changelog/v0.12.0/features/1797-ibc-v3.md deleted file mode 100644 index fbc07d90ff..0000000000 --- a/.changelog/v0.12.0/features/1797-ibc-v3.md +++ /dev/null @@ -1,2 +0,0 @@ -- Upgrade protos and compatibility to IBC v3.0.0-rc.0 and Cosmos SDK v0.45.1 - ([#1797](https://github.com/informalsystems/ibc-rs/issues/1797)) diff --git a/.changelog/v0.12.0/features/ibc-relayer-cli/1895-rust-log.md b/.changelog/v0.12.0/features/ibc-relayer-cli/1895-rust-log.md deleted file mode 100644 index 337b05d53a..0000000000 --- a/.changelog/v0.12.0/features/ibc-relayer-cli/1895-rust-log.md +++ /dev/null @@ -1,2 +0,0 @@ -- Allow overriding the tracing filter with `RUST_LOG` environment variable - ([#1895](https://github.com/informalsystems/ibc-rs/issues/1895)) \ No newline at end of file diff --git a/.changelog/v0.12.0/improvements/ibc-relayer-cli/1834-clear-packets-cmd.md b/.changelog/v0.12.0/improvements/ibc-relayer-cli/1834-clear-packets-cmd.md deleted file mode 100644 index d9bb22fb4d..0000000000 --- a/.changelog/v0.12.0/improvements/ibc-relayer-cli/1834-clear-packets-cmd.md +++ /dev/null @@ -1,3 +0,0 @@ -- Added `clear packets` command, combining the effects of - `tx raw packet-recv` and `tx raw packet-ack`. - ([#1834](https://github.com/informalsystems/ibc-rs/pull/1834)) diff --git a/.changelog/v0.12.0/improvements/ibc-relayer/1388-more-health-checks.md b/.changelog/v0.12.0/improvements/ibc-relayer/1388-more-health-checks.md deleted file mode 100644 index acd8d55307..0000000000 --- a/.changelog/v0.12.0/improvements/ibc-relayer/1388-more-health-checks.md +++ /dev/null @@ -1,2 +0,0 @@ -- Add two more health checks: tx indexing enabled and historical entries > 0 - ([#1388](https://github.com/informalsystems/ibc-rs/issues/1388)) diff --git a/.changelog/v0.12.0/improvements/ibc-relayer/1880-nonallocating-verions-method.md b/.changelog/v0.12.0/improvements/ibc-relayer/1880-nonallocating-verions-method.md deleted file mode 100644 index ecba32327b..0000000000 --- a/.changelog/v0.12.0/improvements/ibc-relayer/1880-nonallocating-verions-method.md +++ /dev/null @@ -1 +0,0 @@ -- Changed `ConnectionEnd::versions` method to be non-allocating by having it return a `&[Version]` instead of `Vec`. ([#1880](https://github.com/informalsystems/ibc-rs/pull/1880)) diff --git a/.changelog/v0.12.0/improvements/ibc/1706-add-client-state-tests.md b/.changelog/v0.12.0/improvements/ibc/1706-add-client-state-tests.md deleted file mode 100644 index 4fb330d8ab..0000000000 --- a/.changelog/v0.12.0/improvements/ibc/1706-add-client-state-tests.md +++ /dev/null @@ -1,2 +0,0 @@ -- Added more unit tests to verify Tendermint ClientState - ([#1706](https://github.com/informalsystems/ibc-rs/issues/1706)) \ No newline at end of file diff --git a/.changelog/v0.12.0/improvements/ibc/1769-cap-reader-keeper.md b/.changelog/v0.12.0/improvements/ibc/1769-cap-reader-keeper.md deleted file mode 100644 index 6e8f46352f..0000000000 --- a/.changelog/v0.12.0/improvements/ibc/1769-cap-reader-keeper.md +++ /dev/null @@ -1,2 +0,0 @@ -- Define CapabilityReader and CapabilityKeeper traits - ([#1769](https://github.com/informalsystems/ibc-rs/issues/1769)) \ No newline at end of file diff --git a/.changelog/v0.12.0/summary.md b/.changelog/v0.12.0/summary.md deleted file mode 100644 index 2801cc0b94..0000000000 --- a/.changelog/v0.12.0/summary.md +++ /dev/null @@ -1,13 +0,0 @@ -This release notably brings compatibility with Cosmos SDK 0.45 and IBC v3.0.0, -as well as support for non-standard ports in the channel handshake. -It also contains a fix for a bug where `SendPacket` events were duplicated when emitted at EndBlock, -and fixes another bug where Hermes would clear packet at startup even when `clear_on_start = false`. -Additionally, a new CLI command `clear packets` has been added for clearing packets in both direction on a given channel. -The relayer will now also honor the `tracing` filter specified in the `RUST_LOG` environment variable, if any. - -### Note for operators - -As of this release, the relayer will not respond to the `SIGHUP` signal and will therefore -not reload the configuration anymore. This feature has been deemed unnecessary given the -recent performance improvements, and it is now recommended to just restart the relayer -when the configuration is updated. diff --git a/.changelog/v0.13.0/bug-fixes/ibc-relayer/1835-ordered-channels.md b/.changelog/v0.13.0/bug-fixes/ibc-relayer/1835-ordered-channels.md deleted file mode 100644 index a326c7f02e..0000000000 --- a/.changelog/v0.13.0/bug-fixes/ibc-relayer/1835-ordered-channels.md +++ /dev/null @@ -1,2 +0,0 @@ -- Fixed relayer behavior on ordered channels - ([#1835](https://github.com/informalsystems/ibc-rs/issues/1835)) \ No newline at end of file diff --git a/.changelog/v0.13.0/bug-fixes/ibc-relayer/1991-packet-worker-chan-open.md b/.changelog/v0.13.0/bug-fixes/ibc-relayer/1991-packet-worker-chan-open.md deleted file mode 100644 index 6f8c9a4418..0000000000 --- a/.changelog/v0.13.0/bug-fixes/ibc-relayer/1991-packet-worker-chan-open.md +++ /dev/null @@ -1,2 +0,0 @@ -- Do not spawn packet worker on chan open ack/confirm events - ([#1991](https://github.com/informalsystems/ibc-rs/issues/1991)) \ No newline at end of file diff --git a/.changelog/v0.13.0/bug-fixes/ibc-relayer/2008-slow-relayer.md b/.changelog/v0.13.0/bug-fixes/ibc-relayer/2008-slow-relayer.md deleted file mode 100644 index b0ca5dfd76..0000000000 --- a/.changelog/v0.13.0/bug-fixes/ibc-relayer/2008-slow-relayer.md +++ /dev/null @@ -1,3 +0,0 @@ -- Fix a bug which would cause the relayer to slow down exponentially when either - the average block time was low or when it was relaying on too many chains at - once ([#2008](https://github.com/informalsystems/ibc-rs/issues/2008)) diff --git a/.changelog/v0.13.0/features/ibc-proto/1913-cosmwasm-support.md b/.changelog/v0.13.0/features/ibc-proto/1913-cosmwasm-support.md deleted file mode 100644 index f5c247276b..0000000000 --- a/.changelog/v0.13.0/features/ibc-proto/1913-cosmwasm-support.md +++ /dev/null @@ -1,5 +0,0 @@ -- Add CosmWasm support to the generated Protobuf code ([#1913](https://github.com/informalsystems/ibc-rs/issues/1913)) - * Add a new `client` feature to gate the tonic client code, implies the `std` feature. - * Add a new `json-schema` feature to derive `schemars::JsonSchema` on some proto types, implies the `std` feature. - * Add `#[serde(default)]` to fields that might be omitted by Golang `omitempty` directive. - * Change serialization of byte arrays to Base64 for compatibility with Go. diff --git a/.changelog/v0.13.0/features/ibc-proto/1988-serde-serialize-deserialize.md b/.changelog/v0.13.0/features/ibc-proto/1988-serde-serialize-deserialize.md deleted file mode 100644 index ed2b6ad8a3..0000000000 --- a/.changelog/v0.13.0/features/ibc-proto/1988-serde-serialize-deserialize.md +++ /dev/null @@ -1,3 +0,0 @@ -- Derive `Serialize` and `Deserialize` for `ibc-proto::ibc::core` and `ibc_proto::ibc::applications` structs, - and switch to Google's Protobuf standard types instead of Prost's types. - ([#1988](https://github.com/informalsystems/ibc-rs/issues/1988)) diff --git a/.changelog/v0.13.0/features/ibc-relayer/1908-caching-layer.md b/.changelog/v0.13.0/features/ibc-relayer/1908-caching-layer.md deleted file mode 100644 index a27e275b40..0000000000 --- a/.changelog/v0.13.0/features/ibc-relayer/1908-caching-layer.md +++ /dev/null @@ -1,2 +0,0 @@ -- Added caching layer for hermes start command - ([#1908](https://github.com/informalsystems/ibc-rs/issues/1908)) \ No newline at end of file diff --git a/.changelog/v0.13.0/features/ibc-relayer/1927-packet-filtering-wildcards.md b/.changelog/v0.13.0/features/ibc-relayer/1927-packet-filtering-wildcards.md deleted file mode 100644 index 0b4f8be40f..0000000000 --- a/.changelog/v0.13.0/features/ibc-relayer/1927-packet-filtering-wildcards.md +++ /dev/null @@ -1,3 +0,0 @@ -- Add support for wildcards in port and channel identifiers in the packet filter configuration, - which enable operators to filter ICA channels based on the port prefix - ([#1927](https://github.com/informalsystems/ibc-rs/issues/1927)) diff --git a/.changelog/v0.13.0/improvements/ibc-integration-test/1961-test-framework.md b/.changelog/v0.13.0/improvements/ibc-integration-test/1961-test-framework.md deleted file mode 100644 index 8a998cdd9e..0000000000 --- a/.changelog/v0.13.0/improvements/ibc-integration-test/1961-test-framework.md +++ /dev/null @@ -1 +0,0 @@ -- Split out test framework as new crate `ibc-test-framework` from `ibc-integration-test`. ([#1961](https://github.com/informalsystems/ibc-rs/pull/1961)) diff --git a/.changelog/v0.13.0/improvements/ibc-relayer-cli/1559-cli-output.md b/.changelog/v0.13.0/improvements/ibc-relayer-cli/1559-cli-output.md deleted file mode 100644 index b436c92549..0000000000 --- a/.changelog/v0.13.0/improvements/ibc-relayer-cli/1559-cli-output.md +++ /dev/null @@ -1 +0,0 @@ -- Print packet data on one line ([#1559](https://github.com/informalsystems/ibc-rs/issues/1559)) diff --git a/.changelog/v0.13.0/improvements/ibc-relayer/1908-caching-layer-documentation.md b/.changelog/v0.13.0/improvements/ibc-relayer/1908-caching-layer-documentation.md deleted file mode 100644 index c215b025ce..0000000000 --- a/.changelog/v0.13.0/improvements/ibc-relayer/1908-caching-layer-documentation.md +++ /dev/null @@ -1 +0,0 @@ -- Add documentation for the caching layer implemented in ([#1908](https://github.com/informalsystems/ibc-rs/issues/1908)) diff --git a/.changelog/v0.13.0/improvements/ibc/718-rework-ics04_channel-events.md b/.changelog/v0.13.0/improvements/ibc/718-rework-ics04_channel-events.md deleted file mode 100644 index f3272acf64..0000000000 --- a/.changelog/v0.13.0/improvements/ibc/718-rework-ics04_channel-events.md +++ /dev/null @@ -1,2 +0,0 @@ -- Refactored ics04_channel events - ([#718](https://github.com/informalsystems/ibc-rs/issues/718)) \ No newline at end of file diff --git a/.changelog/v0.13.0/summary.md b/.changelog/v0.13.0/summary.md deleted file mode 100644 index 041f9fce19..0000000000 --- a/.changelog/v0.13.0/summary.md +++ /dev/null @@ -1,59 +0,0 @@ -*March 28th, 2022* - -Hermes v0.13.0 improves performance by lowering the pressure -on the full nodes by adding a caching layer for some queries. -It also fixes a bug which could cause an exponential slowdown -when relaying between many chains with a low average block time. - -This release also add support for wildcards in port and channel identifiers -in the packet filter configuration, which enable operators to filter -ICA channels based on the port prefix. - -Additionally, the IBC Protocol Buffers definitions can now be used from CosmWasm. - -## Note for operators - -As of version 0.13.0, Hermes supports relaying on [Interchain Accounts][ica] channels. - -If the `packet_filter` option in the chain configuration is disabled, then -Hermes will relay on all existing and future channels, including ICA channels. - -There are two kinds of ICA channels: - -1. The host channels, whose port is `icahost` -2. The controller channels, whose port starts with `icacontroller-` followed - by the owner account address. [See the spec for more details][ica]. - -If you wish to only relay on a few specific standard channels (here `channel-0` and `channel-1`), -but also relay on all ICA channels, you can specify the following packet filter: - -> Note the use of wildcards in the port and channel identifiers (`['ica*', '*']`) -> to match over all the possible ICA ports. - -```toml -[chains.packet_filter] -policy = 'allow' -list = [ - ['ica*', '*'], # allow relaying on all channels whose port starts with `ica` - ['transfer', 'channel-0'], - ['transfer', 'channel-1'], - # Add any other port/channel pairs you wish to relay on -] -``` - -If you wish to relay on all channels but not on ICA channels, you can use -the following packet filter configuration: - -```toml -[chains.packet_filter] -policy = 'deny' -list = [ - ['ica*', '*'], # deny relaying on all channels whose port starts with `ica` -] -``` - -This information can also be found in the [Hermes guide][guide-ica]. - -[ica]: https://github.com/cosmos/ibc/blob/master/spec/app/ics-027-interchain-accounts/README.md -[guide-ica]: https://hermes.informal.systems/config.html#support-for-interchain-accounts - diff --git a/.changelog/v0.14.0/breaking-changes/2081-msrv-1.60.md b/.changelog/v0.14.0/breaking-changes/2081-msrv-1.60.md deleted file mode 100644 index 5b1b8897d6..0000000000 --- a/.changelog/v0.14.0/breaking-changes/2081-msrv-1.60.md +++ /dev/null @@ -1,2 +0,0 @@ -- Update MSRV to Rust 1.60 - ([#2081](https://github.com/informalsystems/ibc-rs/issues/2081)) diff --git a/.changelog/v0.14.0/bug-fixes/ibc-relayer-cli/1288-upgrade-chain-confirmation.md b/.changelog/v0.14.0/bug-fixes/ibc-relayer-cli/1288-upgrade-chain-confirmation.md deleted file mode 100644 index 656ed75bde..0000000000 --- a/.changelog/v0.14.0/bug-fixes/ibc-relayer-cli/1288-upgrade-chain-confirmation.md +++ /dev/null @@ -1,2 +0,0 @@ -- Skip waiting for confirmation events on tx raw upgrade-chain - ([#1288](https://github.com/informalsystems/ibc-rs/issues/1288)) \ No newline at end of file diff --git a/.changelog/v0.14.0/bug-fixes/ibc-relayer-cli/1921-create-client-options.md b/.changelog/v0.14.0/bug-fixes/ibc-relayer-cli/1921-create-client-options.md deleted file mode 100644 index 2c2135d998..0000000000 --- a/.changelog/v0.14.0/bug-fixes/ibc-relayer-cli/1921-create-client-options.md +++ /dev/null @@ -1,2 +0,0 @@ -- Apply client options specified with the `create client` command. - ([#1921](https://github.com/informalsystems/ibc-rs/issues/1921)) diff --git a/.changelog/v0.14.0/bug-fixes/ibc-relayer/1772-fix-conn-delay-check.md b/.changelog/v0.14.0/bug-fixes/ibc-relayer/1772-fix-conn-delay-check.md deleted file mode 100644 index 3bbf8ce62f..0000000000 --- a/.changelog/v0.14.0/bug-fixes/ibc-relayer/1772-fix-conn-delay-check.md +++ /dev/null @@ -1,2 +0,0 @@ -- Fix the connection delay logic to use the timestamp of the host block when the client update header was installed. - ([#1772](https://github.com/informalsystems/ibc-rs/issues/1772)) \ No newline at end of file diff --git a/.changelog/v0.14.0/bug-fixes/ibc-relayer/1792-fix-hermes-retrying-not-regenerating-msgs.md b/.changelog/v0.14.0/bug-fixes/ibc-relayer/1792-fix-hermes-retrying-not-regenerating-msgs.md deleted file mode 100644 index 7e01a0fb21..0000000000 --- a/.changelog/v0.14.0/bug-fixes/ibc-relayer/1792-fix-hermes-retrying-not-regenerating-msgs.md +++ /dev/null @@ -1 +0,0 @@ -- Fixed Hermes retrying mechanism not regenerating operational data for messages ([#1792](https://github.com/informalsystems/ibc-rs/pull/1951)) diff --git a/.changelog/v0.14.0/bug-fixes/ibc-relayer/1998-default-max-block-time.md b/.changelog/v0.14.0/bug-fixes/ibc-relayer/1998-default-max-block-time.md deleted file mode 100644 index c6d688f593..0000000000 --- a/.changelog/v0.14.0/bug-fixes/ibc-relayer/1998-default-max-block-time.md +++ /dev/null @@ -1,2 +0,0 @@ -- Adjusted max_block_time default value to 30s - ([#1998](https://github.com/informalsystems/ibc-rs/issues/1998)) \ No newline at end of file diff --git a/.changelog/v0.14.0/bug-fixes/ibc-relayer/2075-wildcard-filter-middle.md b/.changelog/v0.14.0/bug-fixes/ibc-relayer/2075-wildcard-filter-middle.md deleted file mode 100644 index f8e3a59605..0000000000 --- a/.changelog/v0.14.0/bug-fixes/ibc-relayer/2075-wildcard-filter-middle.md +++ /dev/null @@ -1,2 +0,0 @@ -- Fix a bug in the wildcard filter where pattern would match in the middle of a - string ([#2075](https://github.com/informalsystems/ibc-rs/issues/2075)) \ No newline at end of file diff --git a/.changelog/v0.14.0/bug-fixes/ibc-relayer/2097-misbehavior-height.md b/.changelog/v0.14.0/bug-fixes/ibc-relayer/2097-misbehavior-height.md deleted file mode 100644 index ef2ed9d7ea..0000000000 --- a/.changelog/v0.14.0/bug-fixes/ibc-relayer/2097-misbehavior-height.md +++ /dev/null @@ -1,2 +0,0 @@ -- Fixed target height used in misbehavior detection. - ([#2097](https://github.com/informalsystems/ibc-rs/issues/2097)) \ No newline at end of file diff --git a/.changelog/v0.14.0/bug-fixes/ibc/2035-handler-event-height.md b/.changelog/v0.14.0/bug-fixes/ibc/2035-handler-event-height.md deleted file mode 100644 index 97c3a54a48..0000000000 --- a/.changelog/v0.14.0/bug-fixes/ibc/2035-handler-event-height.md +++ /dev/null @@ -1,2 +0,0 @@ -- Make all handlers emit an IbcEvent with current host chain height as height parameter value. - ([#2035](https://github.com/informalsystems/ibc-rs/issues/2035)) \ No newline at end of file diff --git a/.changelog/v0.14.0/bug-fixes/ibc/2062-conn-open-init-version.md b/.changelog/v0.14.0/bug-fixes/ibc/2062-conn-open-init-version.md deleted file mode 100644 index 0d3b661368..0000000000 --- a/.changelog/v0.14.0/bug-fixes/ibc/2062-conn-open-init-version.md +++ /dev/null @@ -1,2 +0,0 @@ -- Use the version in the message when handling a MsgConnOpenInit - ([#2062](https://github.com/informalsystems/ibc-rs/issues/2062)) \ No newline at end of file diff --git a/.changelog/v0.14.0/features/ibc-relayer/2036-caching-metrics.md b/.changelog/v0.14.0/features/ibc-relayer/2036-caching-metrics.md deleted file mode 100644 index 4ba6cb3841..0000000000 --- a/.changelog/v0.14.0/features/ibc-relayer/2036-caching-metrics.md +++ /dev/null @@ -1,2 +0,0 @@ -- Add a metric for query cache hits - ([#2036](https://github.com/informalsystems/ibc-rs/issues/2036)) \ No newline at end of file diff --git a/.changelog/v0.14.0/improvements/1936-missing-chain-warn.md b/.changelog/v0.14.0/improvements/1936-missing-chain-warn.md deleted file mode 100644 index eb510e28df..0000000000 --- a/.changelog/v0.14.0/improvements/1936-missing-chain-warn.md +++ /dev/null @@ -1,2 +0,0 @@ -- Log `missing chain in configuration` errors emitted during event processing at - debug level ([#1936](https://github.com/informalsystems/ibc-rs/issues/1936)) diff --git a/.changelog/v0.14.0/improvements/2045-tendermint-0.23.6.md b/.changelog/v0.14.0/improvements/2045-tendermint-0.23.6.md deleted file mode 100644 index 23784ad668..0000000000 --- a/.changelog/v0.14.0/improvements/2045-tendermint-0.23.6.md +++ /dev/null @@ -1,2 +0,0 @@ -- Update tendermint-rs dependencies to v0.23.6 - ([#2045](https://github.com/informalsystems/ibc-rs/issues/2045)) \ No newline at end of file diff --git a/.changelog/v0.14.0/improvements/ibc-relayer-cli/1421-create-channel-cli.md b/.changelog/v0.14.0/improvements/ibc-relayer-cli/1421-create-channel-cli.md deleted file mode 100644 index a64398d712..0000000000 --- a/.changelog/v0.14.0/improvements/ibc-relayer-cli/1421-create-channel-cli.md +++ /dev/null @@ -1,2 +0,0 @@ -- Change `create channel` CLI command such that it is more difficult to create - clients / connections using it ([#1421](https://github.com/informalsystems/ibc-rs/issues/1421)) diff --git a/.changelog/v0.14.0/improvements/ibc-relayer-cli/2096-query-packet-pending.md b/.changelog/v0.14.0/improvements/ibc-relayer-cli/2096-query-packet-pending.md deleted file mode 100644 index 0ec1253df3..0000000000 --- a/.changelog/v0.14.0/improvements/ibc-relayer-cli/2096-query-packet-pending.md +++ /dev/null @@ -1,4 +0,0 @@ -- Added `query packet pending` command to list outstanding packet - commitments that are either unreceived or pending acknowledgement - at both ends of a channel. - ([#1862](https://github.com/informalsystems/ibc-rs/issues/1862)) diff --git a/.changelog/v0.14.0/improvements/ibc/1758-complete-ics26.md b/.changelog/v0.14.0/improvements/ibc/1758-complete-ics26.md deleted file mode 100644 index ebad8cf544..0000000000 --- a/.changelog/v0.14.0/improvements/ibc/1758-complete-ics26.md +++ /dev/null @@ -1 +0,0 @@ -- Complete ICS26 implementation ([#1758](https://github.com/informalsystems/ibc-rs/issues/1758)) \ No newline at end of file diff --git a/.changelog/v0.14.0/improvements/ibc/2068-chan-id-u64.md b/.changelog/v0.14.0/improvements/ibc/2068-chan-id-u64.md deleted file mode 100644 index f9ffd9d8cb..0000000000 --- a/.changelog/v0.14.0/improvements/ibc/2068-chan-id-u64.md +++ /dev/null @@ -1 +0,0 @@ -- Improve `ChannelId` validation. ([#2068](https://github.com/informalsystems/ibc-rs/issues/2068)) diff --git a/.changelog/v0.14.0/summary.md b/.changelog/v0.14.0/summary.md deleted file mode 100644 index 0f5e1996c2..0000000000 --- a/.changelog/v0.14.0/summary.md +++ /dev/null @@ -1,17 +0,0 @@ -This release notably features a new [`query packet pending`][pending] command to -list outstanding packet commitments that are either unreceived or pending -acknowledgement at both ends of a channel. - -The `ibc` crate now also come with a complete [ICS 026][ics-26] implementation. - -### Note for operators - -There is a new `query packet pending` command, see above for more information. - -The `create channel` command now requires an existing client and connection, -unless the `--new-client-connection` flag is provided. -Please [refer to the guide][create-channel] for more information. - -[ics-26]: https://github.com/cosmos/ibc/blob/master/spec/core/ics-026-routing-module/README.md -[pending]: https://hermes.informal.systems/commands/queries/packet.html#pending-packets -[create-channel]: http://hermes.informal.systems/commands/path-setup/channels.html#establish-channel diff --git a/.changelog/v0.14.1/bug-fixes/ibc-relayer/1970-app-latest-height.md b/.changelog/v0.14.1/bug-fixes/ibc-relayer/1970-app-latest-height.md deleted file mode 100644 index 167173977e..0000000000 --- a/.changelog/v0.14.1/bug-fixes/ibc-relayer/1970-app-latest-height.md +++ /dev/null @@ -1,2 +0,0 @@ -- Fixed query for application status when application lags blockchain state. - ([#1970](https://github.com/informalsystems/ibc-rs/issues/1970)) \ No newline at end of file diff --git a/.changelog/v0.14.1/summary.md b/.changelog/v0.14.1/summary.md deleted file mode 100644 index 49c6ac749c..0000000000 --- a/.changelog/v0.14.1/summary.md +++ /dev/null @@ -1,2 +0,0 @@ -This release improves the reliability of the relayer by fixing an edge case where -some queries would fail if they reach a full node after a new block is committed but before the application state updates to reflect the changes in that block. diff --git a/.changelog/v0.15.0/2181-update-codeowners.md b/.changelog/v0.15.0/2181-update-codeowners.md deleted file mode 100644 index 5452abf60e..0000000000 --- a/.changelog/v0.15.0/2181-update-codeowners.md +++ /dev/null @@ -1,2 +0,0 @@ -- Update `.github/CODEOWNERS` file so that each root-level directory is assigned - a default reviewer. ([#2181](https://github.com/informalsystems/ibc-rs/pull/2181)) diff --git a/.changelog/v0.15.0/bug-fixes/ibc-relayer/1971-non-batch-fix.md b/.changelog/v0.15.0/bug-fixes/ibc-relayer/1971-non-batch-fix.md deleted file mode 100644 index acf9f7a16f..0000000000 --- a/.changelog/v0.15.0/bug-fixes/ibc-relayer/1971-non-batch-fix.md +++ /dev/null @@ -1,2 +0,0 @@ -- Fix a bug where connection and channel handshakes would fail with non-batching transactions - ([#1971](https://github.com/informalsystems/ibc-rs/issues/1971)) diff --git a/.changelog/v0.15.0/bug-fixes/ibc-relayer/2180-client-expiry-time.md b/.changelog/v0.15.0/bug-fixes/ibc-relayer/2180-client-expiry-time.md deleted file mode 100644 index 5bbb0a47eb..0000000000 --- a/.changelog/v0.15.0/bug-fixes/ibc-relayer/2180-client-expiry-time.md +++ /dev/null @@ -1,2 +0,0 @@ -- Fixed client expiry computation to avoid using local time. - ([#2180](https://github.com/informalsystems/ibc-rs/issues/2180)) \ No newline at end of file diff --git a/.changelog/v0.15.0/bug-fixes/ibc/2104-fix-commitment-computation.md b/.changelog/v0.15.0/bug-fixes/ibc/2104-fix-commitment-computation.md deleted file mode 100644 index 570c1b3338..0000000000 --- a/.changelog/v0.15.0/bug-fixes/ibc/2104-fix-commitment-computation.md +++ /dev/null @@ -1,2 +0,0 @@ -- Fix packet commitment calculation to match IBC-Go - ([#2104](https://github.com/informalsystems/ibc-rs/issues/2104)) diff --git a/.changelog/v0.15.0/bug-fixes/ibc/2114-fix-ack-verification.md b/.changelog/v0.15.0/bug-fixes/ibc/2114-fix-ack-verification.md deleted file mode 100644 index 0987d40b6f..0000000000 --- a/.changelog/v0.15.0/bug-fixes/ibc/2114-fix-ack-verification.md +++ /dev/null @@ -1,2 +0,0 @@ -- Fix incorrect acknowledgement verification - ([#2114](https://github.com/informalsystems/ibc-rs/issues/2114)) diff --git a/.changelog/v0.15.0/bug-fixes/ibc/2178-conn-ack-bug-fix.md b/.changelog/v0.15.0/bug-fixes/ibc/2178-conn-ack-bug-fix.md deleted file mode 100644 index af72298e4b..0000000000 --- a/.changelog/v0.15.0/bug-fixes/ibc/2178-conn-ack-bug-fix.md +++ /dev/null @@ -1,2 +0,0 @@ -- Fix connection identifier mix-up in connection acknowledgement processing - ([#2178](https://github.com/informalsystems/ibc-rs/issues/2178)) diff --git a/.changelog/v0.15.0/features/1986-gaia-e2e-tests.md b/.changelog/v0.15.0/features/1986-gaia-e2e-tests.md deleted file mode 100644 index 4b3fef7acf..0000000000 --- a/.changelog/v0.15.0/features/1986-gaia-e2e-tests.md +++ /dev/null @@ -1,2 +0,0 @@ -- Replaced gaia v5 with v7 in E2E tests. - ([#1986](https://github.com/informalsystems/ibc-rs/issues/1986)) \ No newline at end of file diff --git a/.changelog/v0.15.0/features/ibc-relayer/2112-new-metrics.md b/.changelog/v0.15.0/features/ibc-relayer/2112-new-metrics.md deleted file mode 100644 index 42e041ed48..0000000000 --- a/.changelog/v0.15.0/features/ibc-relayer/2112-new-metrics.md +++ /dev/null @@ -1,3 +0,0 @@ -- Add six new metrics: `wallet_balance`, `ws_events`, `ws_reconnect`, - `tx_latency_submitted`, `tx_latency_confirmed`, `msg_num` - ([#2112](https://github.com/informalsystems/ibc-rs/issues/2112)) \ No newline at end of file diff --git a/.changelog/v0.15.0/improvements/ibc-relayer/1971-max-msg-num-min-bound.md b/.changelog/v0.15.0/improvements/ibc-relayer/1971-max-msg-num-min-bound.md deleted file mode 100644 index b416ad6d92..0000000000 --- a/.changelog/v0.15.0/improvements/ibc-relayer/1971-max-msg-num-min-bound.md +++ /dev/null @@ -1,2 +0,0 @@ -- Ensure `max_msg_num` is between 1 and 100 with a default of 30 - ([#1971](https://github.com/informalsystems/ibc-rs/issues/1971)) diff --git a/.changelog/v0.15.0/improvements/ibc-relayer/2031-misleading-misbehavior-error.md b/.changelog/v0.15.0/improvements/ibc-relayer/2031-misleading-misbehavior-error.md deleted file mode 100644 index 66941ed5c6..0000000000 --- a/.changelog/v0.15.0/improvements/ibc-relayer/2031-misleading-misbehavior-error.md +++ /dev/null @@ -1,2 +0,0 @@ -- Fixed misleading error message leaking from the misbehavior detection task. - ([#2031](https://github.com/informalsystems/ibc-rs/issues/2031)) \ No newline at end of file diff --git a/.changelog/v0.15.0/improvements/ibc-relayer/2087-incremental-packet-clearing.md b/.changelog/v0.15.0/improvements/ibc-relayer/2087-incremental-packet-clearing.md deleted file mode 100644 index 024797fe2e..0000000000 --- a/.changelog/v0.15.0/improvements/ibc-relayer/2087-incremental-packet-clearing.md +++ /dev/null @@ -1,2 +0,0 @@ -- Added support for incremental processing of packet clearing commands. - ([#2087](https://github.com/informalsystems/ibc-rs/issues/2087)) \ No newline at end of file diff --git a/.changelog/v0.15.0/improvements/ibc-relayer/2192-adr009-impl.md b/.changelog/v0.15.0/improvements/ibc-relayer/2192-adr009-impl.md deleted file mode 100644 index ab1172cbcf..0000000000 --- a/.changelog/v0.15.0/improvements/ibc-relayer/2192-adr009-impl.md +++ /dev/null @@ -1,2 +0,0 @@ -- Implement ADR 9: add domain type for request messages that are passed to query - functions ([#2192](https://github.com/informalsystems/ibc-rs/issues/2192)) \ No newline at end of file diff --git a/.changelog/v0.15.0/improvements/ibc/2159-remove-ocaps.md b/.changelog/v0.15.0/improvements/ibc/2159-remove-ocaps.md deleted file mode 100644 index 54c3531742..0000000000 --- a/.changelog/v0.15.0/improvements/ibc/2159-remove-ocaps.md +++ /dev/null @@ -1,2 +0,0 @@ -- Remove object capabilities from the modules - ([#2159](https://github.com/informalsystems/ibc-rs/issues/2159)) \ No newline at end of file diff --git a/.changelog/v0.15.0/summary.md b/.changelog/v0.15.0/summary.md deleted file mode 100644 index 0252036cb8..0000000000 --- a/.changelog/v0.15.0/summary.md +++ /dev/null @@ -1,4 +0,0 @@ -This release brings a number of bug fixes, some performance improvements, -notably when [clearing packets](https://github.com/informalsystems/ibc-rs/issues/2087), -as well as [new metrics](https://github.com/informalsystems/ibc-rs/issues/2112) -for better observability of the relayer's operations. diff --git a/.changelog/v0.6.2/bug-fixes/1247-add-missing-protobuf-impl.md b/.changelog/v0.6.2/bug-fixes/1247-add-missing-protobuf-impl.md deleted file mode 100644 index b4722bd865..0000000000 --- a/.changelog/v0.6.2/bug-fixes/1247-add-missing-protobuf-impl.md +++ /dev/null @@ -1,3 +0,0 @@ -- Add missing `Protobuf` impl for `ics03_connection::connection::Counterparty` ([#1247]) - -[#1247]: https://github.com/informalsystems/ibc-rs/issues/1247 diff --git a/.changelog/v0.6.2/features/1020-augment-error-type.md b/.changelog/v0.6.2/features/1020-augment-error-type.md deleted file mode 100644 index 4fbba672cc..0000000000 --- a/.changelog/v0.6.2/features/1020-augment-error-type.md +++ /dev/null @@ -1,3 +0,0 @@ -- Augment ClientCreationFailed error with chain id and WS address ([#1020]) - -[#1020]: https://github.com/informalsystems/ibc-rs/issues/1020 diff --git a/.changelog/v0.6.2/features/1021-cli-indicate-config-file-error.md b/.changelog/v0.6.2/features/1021-cli-indicate-config-file-error.md deleted file mode 100644 index ff5de3ccb2..0000000000 --- a/.changelog/v0.6.2/features/1021-cli-indicate-config-file-error.md +++ /dev/null @@ -1,3 +0,0 @@ -- Improve the error message for config file parse errors ([#1021]) - -[#1021]: https://github.com/informalsystems/ibc-rs/issues/1021 \ No newline at end of file diff --git a/.changelog/v0.6.2/features/1229-upgrade-clis.md b/.changelog/v0.6.2/features/1229-upgrade-clis.md deleted file mode 100644 index 980128a25f..0000000000 --- a/.changelog/v0.6.2/features/1229-upgrade-clis.md +++ /dev/null @@ -1,3 +0,0 @@ -- Fix for upgrade CLI regression using new type ics02::TrustThreshold ([#1229]) - -[#1229]: https://github.com/informalsystems/ibc-rs/issues/1229 diff --git a/.changelog/v0.6.2/features/988-flex-error.md b/.changelog/v0.6.2/features/988-flex-error.md deleted file mode 100644 index d89a1b6525..0000000000 --- a/.changelog/v0.6.2/features/988-flex-error.md +++ /dev/null @@ -1,4 +0,0 @@ -- Use the [`flex-error`](https://docs.rs/flex-error/) crate to define and -handle errors ([#1158]) - -[#1158]: https://github.com/informalsystems/ibc-rs/issues/1158 diff --git a/.changelog/v0.6.2/improvements/1245-max-params-validation.md b/.changelog/v0.6.2/improvements/1245-max-params-validation.md deleted file mode 100644 index f0205f6b39..0000000000 --- a/.changelog/v0.6.2/improvements/1245-max-params-validation.md +++ /dev/null @@ -1,3 +0,0 @@ -- Add semantic validation of of `max_tx_size` and `max_num_msg` config options ([#1245]) - -[#1245]: https://github.com/informalsystems/ibc-rs/issues/1245 diff --git a/.changelog/v0.6.2/summary.md b/.changelog/v0.6.2/summary.md deleted file mode 100644 index dd59d511d1..0000000000 --- a/.changelog/v0.6.2/summary.md +++ /dev/null @@ -1,5 +0,0 @@ -This minor release of Hermes re-enables the `upgrade client`, `upgrade clients`, -`tx raw upgrade-clients`, and `tx raw upgrade-chain`, and otherwise -contains a few bug fixes and internal improvements. - -Upgrading from version `0.6.1` to `0.6.2` requires no explicit steps. diff --git a/.changelog/v0.7.0/bug-fixes/1261-gm-req-detect.md b/.changelog/v0.7.0/bug-fixes/1261-gm-req-detect.md deleted file mode 100644 index 2361683873..0000000000 --- a/.changelog/v0.7.0/bug-fixes/1261-gm-req-detect.md +++ /dev/null @@ -1,5 +0,0 @@ -- [gm] Fix silent exit when requirements are missing - -[#1261]: https://github.com/informalsystems/ibc-rs/issues/1261 - - diff --git a/.changelog/v0.7.0/bug-fixes/1285-fix-typeok-bug.md b/.changelog/v0.7.0/bug-fixes/1285-fix-typeok-bug.md deleted file mode 100644 index 44dee3d0de..0000000000 --- a/.changelog/v0.7.0/bug-fixes/1285-fix-typeok-bug.md +++ /dev/null @@ -1,4 +0,0 @@ -- Fix small typo in IBC.tla specification - -[#1285]: https://github.com/informalsystems/ibc-rs/pull/1285 - diff --git a/.changelog/v0.7.0/bug-fixes/ibc/1257-set-capability-index.md b/.changelog/v0.7.0/bug-fixes/ibc/1257-set-capability-index.md deleted file mode 100644 index 2c78951c52..0000000000 --- a/.changelog/v0.7.0/bug-fixes/ibc/1257-set-capability-index.md +++ /dev/null @@ -1,3 +0,0 @@ -- Set the index of `ibc::ics05_port::capabilities::Capability` ([#1257]) - -[#1257]: https://github.com/informalsystems/ibc-rs/issues/1257 diff --git a/.changelog/v0.7.0/features/1065-keys-delete.md b/.changelog/v0.7.0/features/1065-keys-delete.md deleted file mode 100644 index 52a132c11d..0000000000 --- a/.changelog/v0.7.0/features/1065-keys-delete.md +++ /dev/null @@ -1,3 +0,0 @@ -- Added `keys delete` CLI command ([#1065]) - -[#1065]: https://github.com/informalsystems/ibc-rs/issues/1065 diff --git a/.changelog/v0.7.0/features/1175-update-ci.md b/.changelog/v0.7.0/features/1175-update-ci.md deleted file mode 100644 index 52b9f44b74..0000000000 --- a/.changelog/v0.7.0/features/1175-update-ci.md +++ /dev/null @@ -1,4 +0,0 @@ -- Update CI to test with gaiad v5.0.5 ([#1175]) - - -[#1175]: https://github.com/informalsystems/ibc-rs/issues/1175 diff --git a/.changelog/v0.7.0/features/1287-upgrade-legacy.md b/.changelog/v0.7.0/features/1287-upgrade-legacy.md deleted file mode 100644 index 2ff89373cf..0000000000 --- a/.changelog/v0.7.0/features/1287-upgrade-legacy.md +++ /dev/null @@ -1,3 +0,0 @@ -- Add `--legacy | -l` flag to support upgrades for chains built with Cosmos SDK < v0.43.0 ([#1287]) - -[#1287]: https://github.com/informalsystems/ibc-rs/issues/1287 diff --git a/.changelog/v0.7.0/features/843-rest-api.md b/.changelog/v0.7.0/features/843-rest-api.md deleted file mode 100644 index 6a3866ece1..0000000000 --- a/.changelog/v0.7.0/features/843-rest-api.md +++ /dev/null @@ -1,3 +0,0 @@ -- Expose the Hermes config and internal state over a REST API ([#843]) - -[#843]: https://github.com/informalsystems/ibc-rs/issues/843 diff --git a/.changelog/v0.7.0/features/901-conditionally-spawn-worker.md b/.changelog/v0.7.0/features/901-conditionally-spawn-worker.md deleted file mode 100644 index f953aeb8c1..0000000000 --- a/.changelog/v0.7.0/features/901-conditionally-spawn-worker.md +++ /dev/null @@ -1,3 +0,0 @@ -- Spawn packet workers only when there are outstanding packets or acknowledgements to relay ([#901]) - -[#901]: https://github.com/informalsystems/ibc-rs/issues/901 diff --git a/.changelog/v0.7.0/features/948-upgrade-to-cosmos-sdk-v0.43.md b/.changelog/v0.7.0/features/948-upgrade-to-cosmos-sdk-v0.43.md deleted file mode 100644 index 8b558e883e..0000000000 --- a/.changelog/v0.7.0/features/948-upgrade-to-cosmos-sdk-v0.43.md +++ /dev/null @@ -1,3 +0,0 @@ -- Upgrade to Cosmos SDK proto (v0.43.0) & ibc-go proto (v1.0.0) ([#948]) - -- [#948]: https://github.com/informalsystems/ibc-rs/pull/948 diff --git a/.changelog/v0.7.0/improvements/1132-query-channels-filter.md b/.changelog/v0.7.0/improvements/1132-query-channels-filter.md deleted file mode 100644 index fa73b5d663..0000000000 --- a/.changelog/v0.7.0/improvements/1132-query-channels-filter.md +++ /dev/null @@ -1,4 +0,0 @@ -- Add optional destination chain and `--verbose` options for `query channels` CLI ([#1132]) - -[#1132]: https://github.com/informalsystems/ibc-rs/issues/1132 - diff --git a/.changelog/v0.7.0/improvements/1191-ica-compat.md b/.changelog/v0.7.0/improvements/1191-ica-compat.md deleted file mode 100644 index b189173405..0000000000 --- a/.changelog/v0.7.0/improvements/1191-ica-compat.md +++ /dev/null @@ -1,4 +0,0 @@ -- Improve support for Interchain Accounts (ICS 027) ([#1191]) - -[#1191]: https://github.com/informalsystems/ibc-rs/issues/1191 - diff --git a/.changelog/v0.7.0/improvements/1249-update-modelator.md b/.changelog/v0.7.0/improvements/1249-update-modelator.md deleted file mode 100644 index 7912ece5fd..0000000000 --- a/.changelog/v0.7.0/improvements/1249-update-modelator.md +++ /dev/null @@ -1,3 +0,0 @@ -- Update Modelator to 0.2.0 ([#1249]) - -[#1249]: https://github.com/informalsystems/ibc-rs/pull/1249 diff --git a/.changelog/v0.7.0/improvements/1265-async-tx-confirmation.md b/.changelog/v0.7.0/improvements/1265-async-tx-confirmation.md deleted file mode 100644 index 59cacc2c1b..0000000000 --- a/.changelog/v0.7.0/improvements/1265-async-tx-confirmation.md +++ /dev/null @@ -1,5 +0,0 @@ -- Improve performance and reliability of the relayer by asynchronously waiting for tx confirmations ([#1124], [#1265]) - -[#1124]: https://github.com/informalsystems/ibc-rs/issues/1124 -[#1265]: https://github.com/informalsystems/ibc-rs/issues/1265 - diff --git a/.changelog/v0.7.0/improvements/ibc/1297-impl-consensus-state.md b/.changelog/v0.7.0/improvements/ibc/1297-impl-consensus-state.md deleted file mode 100644 index 4719175311..0000000000 --- a/.changelog/v0.7.0/improvements/ibc/1297-impl-consensus-state.md +++ /dev/null @@ -1,3 +0,0 @@ -- Implement `ics02_client::client_consensus::ConsensusState` for `AnyConsensusState` ([#1297]) - -[#1297]: https://github.com/informalsystems/ibc-rs/issues/1297 diff --git a/.changelog/v0.7.0/summary.md b/.changelog/v0.7.0/summary.md deleted file mode 100644 index e098cdbc39..0000000000 --- a/.changelog/v0.7.0/summary.md +++ /dev/null @@ -1,3 +0,0 @@ -This release of Hermes is the first to be compatible with the development version of Cosmos SDK 0.43. -Hermes 0.7.0 also improves the performance and reliability of the relayer, notably by waiting asynchronously for transactions to be confirmed. -Additionnally, Hermes now includes a REST server which exposes the relayer's internal state over HTTP. diff --git a/.changelog/v0.7.1/bug-fixes/1312-fix-gm-stderr.md b/.changelog/v0.7.1/bug-fixes/1312-fix-gm-stderr.md deleted file mode 100644 index 34ab0696fa..0000000000 --- a/.changelog/v0.7.1/bug-fixes/1312-fix-gm-stderr.md +++ /dev/null @@ -1,7 +0,0 @@ - -- [gm](scripts/gm) - - Fix gaiad keys add prints to stderr instead of stdout in SDK 0.43 ([#1312]) - - Bumped default rpc_timeout in Hermes config to 5s ([#1312]) - -[#1312]: https://github.com/informalsystems/ibc-rs/issues/1312 - diff --git a/.changelog/v0.7.1/bug-fixes/1343-fix-header-decoding-error.md b/.changelog/v0.7.1/bug-fixes/1343-fix-header-decoding-error.md deleted file mode 100644 index 30aaea171a..0000000000 --- a/.changelog/v0.7.1/bug-fixes/1343-fix-header-decoding-error.md +++ /dev/null @@ -1 +0,0 @@ -- Fix header decoding error which resulted in killing the chain runtime ([#1342](https://github.com/informalsystems/ibc-rs/issues/1342)) diff --git a/.changelog/v0.7.1/features/1267-ethermint-support.md b/.changelog/v0.7.1/features/1267-ethermint-support.md deleted file mode 100644 index 602c516bee..0000000000 --- a/.changelog/v0.7.1/features/1267-ethermint-support.md +++ /dev/null @@ -1,4 +0,0 @@ -- Added post-Stargate (v0.5+) Ethermint support ([#1267] [#1071]) - -[#1267]: https://github.com/informalsystems/ibc-rs/issues/1267 -[#1071]: https://github.com/informalsystems/ibc-rs/issues/1071 diff --git a/.changelog/v0.7.1/improvements/1281-derive-traits-module-errors.md b/.changelog/v0.7.1/improvements/1281-derive-traits-module-errors.md deleted file mode 100644 index 5349faf775..0000000000 --- a/.changelog/v0.7.1/improvements/1281-derive-traits-module-errors.md +++ /dev/null @@ -1,4 +0,0 @@ -- Derive `Debug`, `PartialEq` and `Eq` traits for module errors ([#1281]) - -[#1281]: https://github.com/informalsystems/ibc-rs/issues/1281 - diff --git a/.changelog/v0.7.1/improvements/1311-mbt-test-client-upgrade.md b/.changelog/v0.7.1/improvements/1311-mbt-test-client-upgrade.md deleted file mode 100644 index d93be65047..0000000000 --- a/.changelog/v0.7.1/improvements/1311-mbt-test-client-upgrade.md +++ /dev/null @@ -1,4 +0,0 @@ -- Add MBT tests for ICS 07 Client Upgrade ([#1311]) - -[#1311]: https://github.com/informalsystems/ibc-rs/issues/1311 - diff --git a/.changelog/v0.7.1/improvements/1319-u256-amount-transfer.md b/.changelog/v0.7.1/improvements/1319-u256-amount-transfer.md deleted file mode 100644 index 85f8bdfd08..0000000000 --- a/.changelog/v0.7.1/improvements/1319-u256-amount-transfer.md +++ /dev/null @@ -1,4 +0,0 @@ -- Add support for uint256 transfer amounts ([#1319]) - -[#1319]: https://github.com/informalsystems/ibc-rs/issues/1319 - diff --git a/.changelog/v0.7.1/improvements/ibc/1268-reader-result.md b/.changelog/v0.7.1/improvements/ibc/1268-reader-result.md deleted file mode 100644 index c477be4df4..0000000000 --- a/.changelog/v0.7.1/improvements/ibc/1268-reader-result.md +++ /dev/null @@ -1,3 +0,0 @@ -- Change all `*Reader` traits to return `Result` instead of `Option` ([#1268]) - -[#1268]: https://github.com/informalsystems/ibc-rs/issues/1268 diff --git a/.changelog/v0.7.1/improvements/ibc/1333-modules-error.md b/.changelog/v0.7.1/improvements/ibc/1333-modules-error.md deleted file mode 100644 index 103394ce6f..0000000000 --- a/.changelog/v0.7.1/improvements/ibc/1333-modules-error.md +++ /dev/null @@ -1,9 +0,0 @@ - -- Clean up modules' errors ([#1333]) - -[#1333]: https://github.com/informalsystems/ibc-rs/issues/1333 diff --git a/.changelog/v0.7.1/summary.md b/.changelog/v0.7.1/summary.md deleted file mode 100644 index d5a6a6d523..0000000000 --- a/.changelog/v0.7.1/summary.md +++ /dev/null @@ -1,2 +0,0 @@ -This minor release of Hermes notably features support for Ethermint chains and transfer amounts expressed as a 256-bit unsigned integer. -This release also fixes a bug where the chain runtime within the relayer would crash when failing to decode a invalid header included in a `ClientUpdate` IBC event. diff --git a/.changelog/v0.7.2/features/1155-secp256k1-signatures.md b/.changelog/v0.7.2/features/1155-secp256k1-signatures.md deleted file mode 100644 index ba8f34df47..0000000000 --- a/.changelog/v0.7.2/features/1155-secp256k1-signatures.md +++ /dev/null @@ -1 +0,0 @@ -- Support for chains which use Secp256k1 signatures in consensus votes ([#1155](https://github.com/informalsystems/ibc-rs/issues/1155)) diff --git a/.changelog/v0.7.2/features/1290-stubborn-workers.md b/.changelog/v0.7.2/features/1290-stubborn-workers.md deleted file mode 100644 index d89e80f338..0000000000 --- a/.changelog/v0.7.2/features/1290-stubborn-workers.md +++ /dev/null @@ -1,2 +0,0 @@ -- Modified packet worker to use stubborn strategy ([#1290](https://github.com/informalsystems/ibc-rs/issues/1290)) - diff --git a/.changelog/v0.7.2/features/1362-skip-consensus-states.md b/.changelog/v0.7.2/features/1362-skip-consensus-states.md deleted file mode 100644 index 44b0e49cfa..0000000000 --- a/.changelog/v0.7.2/features/1362-skip-consensus-states.md +++ /dev/null @@ -1 +0,0 @@ -- Skip `consensus_heights` query in `update_client` when possible ([#1362](https://github.com/informalsystems/ibc-rs/issues/1362)) diff --git a/.changelog/v0.7.2/features/1371-gm-features.md b/.changelog/v0.7.2/features/1371-gm-features.md deleted file mode 100644 index d82b27464f..0000000000 --- a/.changelog/v0.7.2/features/1371-gm-features.md +++ /dev/null @@ -1,4 +0,0 @@ - -- [gm](scripts/gm) - - Binaries in the config can be defined as URLs now. - - Add the option to set gm-lib path via the $GM_LIB environment variable ([#1365](https://github.com/informalsystems/ibc-rs/issues/1365)) diff --git a/.changelog/v0.7.2/features/1380-toggle-confirmations.md b/.changelog/v0.7.2/features/1380-toggle-confirmations.md deleted file mode 100644 index 195ef85191..0000000000 --- a/.changelog/v0.7.2/features/1380-toggle-confirmations.md +++ /dev/null @@ -1 +0,0 @@ -- Support for disabling tx confirmation mechanism ([#1380](https://github.com/informalsystems/ibc-rs/issues/1380)) diff --git a/.changelog/v0.7.2/improvements/1156-use-core-alloc.md b/.changelog/v0.7.2/improvements/1156-use-core-alloc.md deleted file mode 100644 index 672c0731dc..0000000000 --- a/.changelog/v0.7.2/improvements/1156-use-core-alloc.md +++ /dev/null @@ -1 +0,0 @@ -- Use `core` and `alloc` crates for `no_std` compatibility ([#1156](https://github.com/informalsystems/ibc-rs/issues/1156)) diff --git a/.changelog/v0.7.2/improvements/1336-better-health-check.md b/.changelog/v0.7.2/improvements/1336-better-health-check.md deleted file mode 100644 index 4d27362957..0000000000 --- a/.changelog/v0.7.2/improvements/1336-better-health-check.md +++ /dev/null @@ -1,2 +0,0 @@ -- Improve performance of health check, and only perform it on `hermes start`. - Add a `hermes health-check` command. ([#1336](https://github.com/informalsystems/ibc-rs/issues/1336)) diff --git a/.changelog/v0.7.2/improvements/1337-semver-pre-compat.md b/.changelog/v0.7.2/improvements/1337-semver-pre-compat.md deleted file mode 100644 index 8ecc3d68cf..0000000000 --- a/.changelog/v0.7.2/improvements/1337-semver-pre-compat.md +++ /dev/null @@ -1 +0,0 @@ -- Treat pre-releases of the Cosmos SDK as their standard version in compatibility check ([#1337](https://github.com/informalsystems/ibc-rs/issues/1337)) diff --git a/.changelog/v0.7.2/improvements/1344-bump-compat-0.44.md b/.changelog/v0.7.2/improvements/1344-bump-compat-0.44.md deleted file mode 100644 index 2448d2ad84..0000000000 --- a/.changelog/v0.7.2/improvements/1344-bump-compat-0.44.md +++ /dev/null @@ -1 +0,0 @@ -- Bump Cosmos SDK compatibility to v0.44.0 ([#1344](https://github.com/informalsystems/ibc-rs/issues/1344)) diff --git a/.changelog/v0.7.2/improvements/1376-consensus-params-explicit-height.md b/.changelog/v0.7.2/improvements/1376-consensus-params-explicit-height.md deleted file mode 100644 index dce8e9f170..0000000000 --- a/.changelog/v0.7.2/improvements/1376-consensus-params-explicit-height.md +++ /dev/null @@ -1 +0,0 @@ -- Improve reliability of health check ([#1382](https://github.com/informalsystems/ibc-rs/issues/1376)) diff --git a/.changelog/v0.7.2/summary.md b/.changelog/v0.7.2/summary.md deleted file mode 100644 index 944ef18229..0000000000 --- a/.changelog/v0.7.2/summary.md +++ /dev/null @@ -1,4 +0,0 @@ -This minor release brings substantial performance improvements as well as -support for chains using Secp256k1 signatures in consensus votes. - -It also bumps the compatibility to Cosmos SDK 0.44. diff --git a/.changelog/v0.7.3/bug-fixes/ibc-relayer/1345-fix-tx-simulation-0.42.md b/.changelog/v0.7.3/bug-fixes/ibc-relayer/1345-fix-tx-simulation-0.42.md deleted file mode 100644 index 430dc7a65b..0000000000 --- a/.changelog/v0.7.3/bug-fixes/ibc-relayer/1345-fix-tx-simulation-0.42.md +++ /dev/null @@ -1,4 +0,0 @@ -- Fix a bug introduced in Hermes v0.7.0 where tx simulations would fail on - chains based on Cosmos SDK 0.42. This would cause Hermes to use the max - gas specified in the config when submitted the tx, leading to high fees. - ([#1345](https://github.com/informalsystems/ibc-rs/issues/1345)) \ No newline at end of file diff --git a/.changelog/v0.7.3/bug-fixes/ibc-relayer/1402-fix-account-seq-error-case.md b/.changelog/v0.7.3/bug-fixes/ibc-relayer/1402-fix-account-seq-error-case.md deleted file mode 100644 index 509f3d3067..0000000000 --- a/.changelog/v0.7.3/bug-fixes/ibc-relayer/1402-fix-account-seq-error-case.md +++ /dev/null @@ -1,3 +0,0 @@ -- Only increase cached account sequence number when `broadcast_tx_sync` fails, - therefore ensuring that the cached sequence number stays in sync with the - node. ([#1402](https://github.com/informalsystems/ibc-rs/issues/1402)) \ No newline at end of file diff --git a/.changelog/v0.7.3/improvements/ibc-relayer/1392-trusting-period-default.md b/.changelog/v0.7.3/improvements/ibc-relayer/1392-trusting-period-default.md deleted file mode 100644 index 42d997a229..0000000000 --- a/.changelog/v0.7.3/improvements/ibc-relayer/1392-trusting-period-default.md +++ /dev/null @@ -1,2 +0,0 @@ -- Set default trusting period to be 2/3 of unbonding period for Cosmos chains - ([#1392](https://github.com/informalsystems/ibc-rs/issues/1392)) \ No newline at end of file diff --git a/.changelog/v0.7.3/summary.md b/.changelog/v0.7.3/summary.md deleted file mode 100644 index 0f3643fc99..0000000000 --- a/.changelog/v0.7.3/summary.md +++ /dev/null @@ -1,4 +0,0 @@ -This minor release most notably includes a fix for a bug introduced in v0.7.0 -where Hermes would always use the max gas when submitting transactions to -chains based on Cosmos SDK <= 0.42. -It also improves the handling of account sequence numbers diff --git a/.changelog/v0.8.0-pre.1/breaking-changes/ibc/1214-ics07.md b/.changelog/v0.8.0-pre.1/breaking-changes/ibc/1214-ics07.md deleted file mode 100644 index 42080cdefc..0000000000 --- a/.changelog/v0.8.0-pre.1/breaking-changes/ibc/1214-ics07.md +++ /dev/null @@ -1,3 +0,0 @@ -- The `check_header_and_update_state` method of the `ClientDef` - trait (ICS02) has been expanded to facilitate ICS07 - ([#1214](https://github.com/informalsystems/ibc-rs/issues/1214)) \ No newline at end of file diff --git a/.changelog/v0.8.0-pre.1/features/1433-memo-field.md b/.changelog/v0.8.0-pre.1/features/1433-memo-field.md deleted file mode 100644 index c25e653426..0000000000 --- a/.changelog/v0.8.0-pre.1/features/1433-memo-field.md +++ /dev/null @@ -1,3 +0,0 @@ -- Add support for the `tx.memo` field ([#1433]) - -[#1433]: https://github.com/informalsystems/ibc-rs/issues/1433 diff --git a/.changelog/v0.8.0-pre.1/features/ibc-relayer/1457-default-gas.md b/.changelog/v0.8.0-pre.1/features/ibc-relayer/1457-default-gas.md deleted file mode 100644 index 94b731220e..0000000000 --- a/.changelog/v0.8.0-pre.1/features/ibc-relayer/1457-default-gas.md +++ /dev/null @@ -1,2 +0,0 @@ -- Add a `default_gas` setting to be used for submitting a tx when tx simulation - fails ([#1457](https://github.com/informalsystems/ibc-rs/issues/1457)) \ No newline at end of file diff --git a/.changelog/v0.8.0-pre.1/features/ibc-relayer/1464-ibc-go-check.md b/.changelog/v0.8.0-pre.1/features/ibc-relayer/1464-ibc-go-check.md deleted file mode 100644 index 69a75ed2ad..0000000000 --- a/.changelog/v0.8.0-pre.1/features/ibc-relayer/1464-ibc-go-check.md +++ /dev/null @@ -1,2 +0,0 @@ -- Update compatibility check for IBC-Go dependency - ([#1464](https://github.com/informalsystems/ibc-rs/issues/1464)) \ No newline at end of file diff --git a/.changelog/v0.8.0-pre.1/features/ibc/1214-ics07.md b/.changelog/v0.8.0-pre.1/features/ibc/1214-ics07.md deleted file mode 100644 index 460b04b45a..0000000000 --- a/.changelog/v0.8.0-pre.1/features/ibc/1214-ics07.md +++ /dev/null @@ -1,2 +0,0 @@ -- Add ICS07 verification functionality by using `tendermint-light-client` - ([#1214](https://github.com/informalsystems/ibc-rs/issues/1214)) diff --git a/.changelog/v0.8.0-pre.1/improvements/ibc-relayer/1231-begin-end-block-events.md b/.changelog/v0.8.0-pre.1/improvements/ibc-relayer/1231-begin-end-block-events.md deleted file mode 100644 index 4f8f5af6a5..0000000000 --- a/.changelog/v0.8.0-pre.1/improvements/ibc-relayer/1231-begin-end-block-events.md +++ /dev/null @@ -1,2 +0,0 @@ -- Handle SendPacket events originating from Tendermint ABCI's BeginBlock - and EndBlock methods ([#1231](https://github.com/informalsystems/ibc-rs/issues/1231)) diff --git a/.changelog/v0.8.0-pre.1/improvements/ibc-relayer/1440-improve-error-msg-create-client.md b/.changelog/v0.8.0-pre.1/improvements/ibc-relayer/1440-improve-error-msg-create-client.md deleted file mode 100644 index eb6c3d8170..0000000000 --- a/.changelog/v0.8.0-pre.1/improvements/ibc-relayer/1440-improve-error-msg-create-client.md +++ /dev/null @@ -1,3 +0,0 @@ -- Improve error message when `create client` fails and add a health - check for the trusting period being smaller than the unbonding period - ([#1440](https://github.com/informalsystems/ibc-rs/issues/1440)) diff --git a/.changelog/v0.8.0-pre.1/summary.md b/.changelog/v0.8.0-pre.1/summary.md deleted file mode 100644 index 8ba6721527..0000000000 --- a/.changelog/v0.8.0-pre.1/summary.md +++ /dev/null @@ -1,10 +0,0 @@ -This is a pre-release which depends on forks of various Rust libraries. -As such, it is advised to avoid depending on the `ibc` and `ibc-relayer` crates -at version 0.8.0-pre.1. - -However, Hermes v0.8.0-pre.1 is considered stable and it is recommended for all -users to update to this version. - -This release notably includes a new [`memo_prefix`][memo] configuration option -for specifying a prefix to be include in the memo of each transaction submitted -by Hermes. diff --git a/.changelog/v0.8.0/breaking-changes/1519-msrv-1.56.md b/.changelog/v0.8.0/breaking-changes/1519-msrv-1.56.md deleted file mode 100644 index 3fec5b405c..0000000000 --- a/.changelog/v0.8.0/breaking-changes/1519-msrv-1.56.md +++ /dev/null @@ -1,2 +0,0 @@ -- Update MSRV to Rust 1.56 and use the 2021 edition - ([#1519](https://github.com/informalsystems/ibc-rs/issues/1519)) \ No newline at end of file diff --git a/.changelog/v0.8.0/bug-fixes/1445-clock-drift.md b/.changelog/v0.8.0/bug-fixes/1445-clock-drift.md deleted file mode 100644 index a92f3b9798..0000000000 --- a/.changelog/v0.8.0/bug-fixes/1445-clock-drift.md +++ /dev/null @@ -1,9 +0,0 @@ -- Fix for client state clock drift [#1445]: - * Added new config param `max_clock_drift` to prevent - the problem for appearing in newly-created clients. - * Added a synchronos waiting in client update logic - to allow destination chain to reach a new height - before submitting a client update message. - - -[#1445]: https://github.com/informalsystems/ibc-rs/issues/1445 diff --git a/.changelog/v0.8.0/bug-fixes/1504-timeout_check.md b/.changelog/v0.8.0/bug-fixes/1504-timeout_check.md deleted file mode 100644 index 66bfdde2c1..0000000000 --- a/.changelog/v0.8.0/bug-fixes/1504-timeout_check.md +++ /dev/null @@ -1,2 +0,0 @@ -- Fix for packet timeout computation - ([#1504](https://github.com/informalsystems/ibc-rs/issues/1504)) \ No newline at end of file diff --git a/.changelog/v0.8.0/improvements/1417-update-client-misbehavior-perf.md b/.changelog/v0.8.0/improvements/1417-update-client-misbehavior-perf.md deleted file mode 100644 index 40e88bed72..0000000000 --- a/.changelog/v0.8.0/improvements/1417-update-client-misbehavior-perf.md +++ /dev/null @@ -1,2 +0,0 @@ -- Improve performance of misbehaviour checks triggered by an `UpdateClient` - event ([#1417](https://github.com/informalsystems/ibc-rs/issues/1417)) \ No newline at end of file diff --git a/.changelog/v0.8.0/improvements/1502-update-prost-09.md b/.changelog/v0.8.0/improvements/1502-update-prost-09.md deleted file mode 100644 index 8ec716b587..0000000000 --- a/.changelog/v0.8.0/improvements/1502-update-prost-09.md +++ /dev/null @@ -1,2 +0,0 @@ -- Update to official releases of `prost` 0.9 and `tonic` 0.6 - ([#1502](https://github.com/informalsystems/ibc-rs/issues/1502)) \ No newline at end of file diff --git a/.changelog/v0.8.0/improvements/ibc/1436-restructure-to-match-ibc-go.md b/.changelog/v0.8.0/improvements/ibc/1436-restructure-to-match-ibc-go.md deleted file mode 100644 index 74e62cc066..0000000000 --- a/.changelog/v0.8.0/improvements/ibc/1436-restructure-to-match-ibc-go.md +++ /dev/null @@ -1,4 +0,0 @@ -- Restructure the layout of the `ibc` crate to match `ibc-go`'s [layout][ibc-go-layout] ([#1436][issue-1436]). - -[issue-1436]: https://github.com/informalsystems/ibc-rs/issues/1436 -[ibc-go-layout]: https://github.com/cosmos/ibc-go#contents diff --git a/.changelog/v0.8.0/improvements/ibc/1460-path-fromstr.md b/.changelog/v0.8.0/improvements/ibc/1460-path-fromstr.md deleted file mode 100644 index df0743ee62..0000000000 --- a/.changelog/v0.8.0/improvements/ibc/1460-path-fromstr.md +++ /dev/null @@ -1 +0,0 @@ -- Implement `FromStr` to enable string-encoded paths to be converted into Path identifiers ([#1460](https://github.com/informalsystems/ibc-rs/issues/1460)) diff --git a/.changelog/v0.8.0/improvements/ibc/838-converting-IbcEvent-into-AbciEvent.md b/.changelog/v0.8.0/improvements/ibc/838-converting-IbcEvent-into-AbciEvent.md deleted file mode 100644 index 502e199cf1..0000000000 --- a/.changelog/v0.8.0/improvements/ibc/838-converting-IbcEvent-into-AbciEvent.md +++ /dev/null @@ -1 +0,0 @@ -- Support for converting `ibc::events::IbcEvent` into `tendermint::abci::Event` ([#838](https://github.com/informalsystems/ibc-rs/issues/838)) diff --git a/.changelog/v0.8.0/summary.md b/.changelog/v0.8.0/summary.md deleted file mode 100644 index e90a86f670..0000000000 --- a/.changelog/v0.8.0/summary.md +++ /dev/null @@ -1,10 +0,0 @@ -This is the final release of version 0.8.0, which now depends on the official releases of the `prost` and `tonic` crates. -In addition to everything that's included in v0.8.0-pre.1, this release updates the minimum supported Rust version to 1.56, -and contains various bug fixes and performance improvements which make the relayer more reliable. - -#### Notice for operators -A new setting was added to the Hermes configuration: `max_block_time`. -This setting specifies the maximum time per block for this chain. -The block time together with the clock drift are added to the source drift to estimate -the maximum clock drift when creating a client on this chain. -For Cosmos-SDK chains a good approximation is `timeout_propose` + `timeout_commit` diff --git a/.changelog/v0.9.0/bug-fixes/ibc/1532-connOpenAck-counterparty-conn-id-not-set.md b/.changelog/v0.9.0/bug-fixes/ibc/1532-connOpenAck-counterparty-conn-id-not-set.md deleted file mode 100644 index a0136fcd10..0000000000 --- a/.changelog/v0.9.0/bug-fixes/ibc/1532-connOpenAck-counterparty-conn-id-not-set.md +++ /dev/null @@ -1,5 +0,0 @@ -- Set the connection counterparty in the ICS03 [`connOpenAck` handler][conn-open-ack-handler] - ([#1532](https://github.com/informalsystems/ibc-rs/issues/1532)) - -[conn-open-ack-handler]: https://github.com/informalsystems/ibc-rs/blob/master/modules/src/core/ics03_connection/handler/conn_open_ack.rs - diff --git a/.changelog/v0.9.0/features/1408-vega-protos.md b/.changelog/v0.9.0/features/1408-vega-protos.md deleted file mode 100644 index d0e97aee0f..0000000000 --- a/.changelog/v0.9.0/features/1408-vega-protos.md +++ /dev/null @@ -1,2 +0,0 @@ -- Support for compatibility with gaia Vega upgrade (protos matching ibc-go v1.2.2 and SDK v0.44.3) - ([#1408](https://github.com/informalsystems/ibc-rs/issues/1408)) diff --git a/.changelog/v0.9.0/features/1534-ibc-queries.md b/.changelog/v0.9.0/features/1534-ibc-queries.md deleted file mode 100644 index 81868ac372..0000000000 --- a/.changelog/v0.9.0/features/1534-ibc-queries.md +++ /dev/null @@ -1,2 +0,0 @@ -- Optimize the WS client to subscribe to IBC events only (instead of all Tx - events) ([#1534](https://github.com/informalsystems/ibc-rs/issues/1534)) \ No newline at end of file diff --git a/.changelog/v0.9.0/features/ibc-relayer/1518-config-modes.md b/.changelog/v0.9.0/features/ibc-relayer/1518-config-modes.md deleted file mode 100644 index c42e7de9b0..0000000000 --- a/.changelog/v0.9.0/features/ibc-relayer/1518-config-modes.md +++ /dev/null @@ -1,2 +0,0 @@ -- Allow for more granular control of relaying modes. The `mode` configuration section replaces the `strategy` option. - ([#1518](https://github.com/informalsystems/ibc-rs/issues/1518)) diff --git a/.changelog/v0.9.0/improvements/1544-typed-tla-mbt-specs.md b/.changelog/v0.9.0/improvements/1544-typed-tla-mbt-specs.md deleted file mode 100644 index 12679a61bf..0000000000 --- a/.changelog/v0.9.0/improvements/1544-typed-tla-mbt-specs.md +++ /dev/null @@ -1,2 +0,0 @@ -- Upgrade IBC-rs TLA+ MBT models to modern Apalache type annotations - ([#1544](https://github.com/informalsystems/ibc-rs/issues/1544)) \ No newline at end of file diff --git a/.changelog/v0.9.0/improvements/1556-arch-doc.md b/.changelog/v0.9.0/improvements/1556-arch-doc.md deleted file mode 100644 index 1f019e1274..0000000000 --- a/.changelog/v0.9.0/improvements/1556-arch-doc.md +++ /dev/null @@ -1,5 +0,0 @@ -- Add architecture.md doc that gives a high-level overview of the structure of the codebase. -- Add some module-level documentation - ([#1556][1556]) - -[1556]: https://github.com/informalsystems/ibc-rs/pulls/1556 diff --git a/.changelog/v0.9.0/improvements/ibc-relayer-cli/1515-single-line-errors.md b/.changelog/v0.9.0/improvements/ibc-relayer-cli/1515-single-line-errors.md deleted file mode 100644 index 340a87363b..0000000000 --- a/.changelog/v0.9.0/improvements/ibc-relayer-cli/1515-single-line-errors.md +++ /dev/null @@ -1,2 +0,0 @@ -- Output errors on a single line if ANSI output is disabled - ([#1515](https://github.com/informalsystems/ibc-rs/issues/1515)) \ No newline at end of file diff --git a/.changelog/v0.9.0/improvements/ibc-relayer-cli/1555-fee-amount-overflow.md b/.changelog/v0.9.0/improvements/ibc-relayer-cli/1555-fee-amount-overflow.md deleted file mode 100644 index 9acc77b50e..0000000000 --- a/.changelog/v0.9.0/improvements/ibc-relayer-cli/1555-fee-amount-overflow.md +++ /dev/null @@ -1,3 +0,0 @@ -- Compute fee amount using big integers to prevent overflow - when using denominations with high decimal places - ([#1555](https://github.com/informalsystems/ibc-rs/issues/1555)) \ No newline at end of file diff --git a/.changelog/v0.9.0/improvements/ibc-relayer/1479-abort-failed-simulated-txs.md b/.changelog/v0.9.0/improvements/ibc-relayer/1479-abort-failed-simulated-txs.md deleted file mode 100644 index fdf23c78d0..0000000000 --- a/.changelog/v0.9.0/improvements/ibc-relayer/1479-abort-failed-simulated-txs.md +++ /dev/null @@ -1,3 +0,0 @@ -- The relayer will now avoid submitting a tx after the simulation failed - (in all but one special case) to avoid wasting fees unnecessarily - ([#1479](https://github.com/informalsystems/ibc-rs/issues/1479)) \ No newline at end of file diff --git a/.changelog/v0.9.0/improvements/ibc/1546-add-partialeq-ibcevent.md b/.changelog/v0.9.0/improvements/ibc/1546-add-partialeq-ibcevent.md deleted file mode 100644 index af9048c549..0000000000 --- a/.changelog/v0.9.0/improvements/ibc/1546-add-partialeq-ibcevent.md +++ /dev/null @@ -1,2 +0,0 @@ -- Derive `PartialEq` and `Eq` on `IbcEvent` and inner types - ([#1546](https://github.com/informalsystems/ibc-rs/issues/1546)) diff --git a/.changelog/v0.9.0/summary.md b/.changelog/v0.9.0/summary.md deleted file mode 100644 index 27fb85dc71..0000000000 --- a/.changelog/v0.9.0/summary.md +++ /dev/null @@ -1,52 +0,0 @@ -*November 23rd, 2021* - -> This release honors Anca Zamfir, who has lead ibc-rs from its inception and through its first two years of life. -> The whole team is grateful for her dedication and the nurturing environment she created. -> To many more achievements, Anca!! 🥂 - -#### Notice for operators - -This release requires operators to update their Hermes configuration. -The `mode` configuration section now replaces the `strategy` option. - -##### `strategy = 'packets'` - -If Hermes was configured with `strategy = 'packets'`, then the configuration needs to be changed in the following way: - -```diff - [global] --strategy = 'packets' - log_level = 'trace' --clear_packets_interval = 100 --tx_confirmation = true -+ -+[mode] -+ -+[mode.clients] -+enabled = true -+refresh = true -+misbehaviour = true -+ -+[mode.connections] -+enabled = false -+ -+[mode.channels] -+enabled = false -+ -+[mode.packets] -+enabled = true -+clear_interval = 100 -+clear_on_start = true -+filter = false -+tx_confirmation = true -``` - -##### `strategy = 'all'` - -If Hermes was configured to complete connection and channel handshakes as well, ie. with `strategy = 'all'`, -then on top of the changes above, `mode.connections.enabled` and `mode.chhanels.enabled` must be set to `true`. - -[See the relevant section][config-mode-toml] of the documented `config.toml` file in the repository for more details. - -[config-mode-toml]: https://github.com/informalsystems/ibc-rs/blob/v0.9.0/config.toml#L9-L59 - diff --git a/.dockerignore b/.dockerignore deleted file mode 100644 index 9f970225ad..0000000000 --- a/.dockerignore +++ /dev/null @@ -1 +0,0 @@ -target/ \ No newline at end of file diff --git a/.rustfmt.toml b/.rustfmt.toml index 8c83f08221..8b3be5e7d9 100644 --- a/.rustfmt.toml +++ b/.rustfmt.toml @@ -1,9 +1,23 @@ +# Basic +hard_tabs = true max_width = 100 +use_small_heuristics = "Max" +# Imports +imports_granularity = "Crate" reorder_imports = true - -# nightly only - -# unstable_features = true -# format_strings = false -# comment_width = 100 -# wrap_comments = true +# Consistency +newline_style = "Unix" +# Format comments +comment_width = 100 +wrap_comments = true +# Misc +binop_separator = "Back" +chain_width = 80 +match_arm_blocks = false +match_arm_leading_pipes = "Preserve" +match_block_trailing_comma = true +reorder_impl_items = false +spaces_around_ranges = false +trailing_comma = "Vertical" +trailing_semicolon = false +use_field_init_shorthand = true diff --git a/Cargo.lock b/Cargo.lock index 2d571e8287..c90a423975 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12,15 +12,6 @@ dependencies = [ "regex", ] -[[package]] -name = "addr2line" -version = "0.17.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9ecd88a8c8378ca913a680cd98f0f13ac67383d35993f86c90a70e3f137816b" -dependencies = [ - "gimli", -] - [[package]] name = "adler" version = "1.0.2" @@ -71,15 +62,6 @@ version = "1.0.62" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1485d4d2cc45e7b201ee3767015c96faa5904387c9d87c6efdd0fb511f12d305" -[[package]] -name = "approx" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cab112f0a86d568ea0e627cc1d6be74a1e9cd55214684db5561995f6dad897c6" -dependencies = [ - "num-traits", -] - [[package]] name = "argh" version = "0.1.8" @@ -136,15 +118,6 @@ version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6" -[[package]] -name = "async-lock" -version = "2.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e97a171d191782fba31bb902b14ad94e24a68145032b7eedf871ab0bc0d077b6" -dependencies = [ - "event-listener", -] - [[package]] name = "async-stream" version = "0.3.3" @@ -177,22 +150,6 @@ dependencies = [ "syn", ] -[[package]] -name = "async-tungstenite" -version = "0.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e00550829ef8e2c4115250d0ee43305649b0fa95f78a32ce5b07da0b73d95c5c" -dependencies = [ - "futures-io", - "futures-util", - "log", - "pin-project-lite", - "tokio", - "tokio-rustls 0.22.0", - "tungstenite", - "webpki-roots 0.21.1", -] - [[package]] name = "atty" version = "0.2.14" @@ -253,27 +210,6 @@ dependencies = [ "mime", ] -[[package]] -name = "backtrace" -version = "0.3.66" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cab84319d616cfb654d03394f38ab7e6f0919e181b1b57e1fd15e7fb4077d9a7" -dependencies = [ - "addr2line", - "cc", - "cfg-if 1.0.0", - "libc", - "miniz_oxide", - "object", - "rustc-demangle", -] - -[[package]] -name = "base16ct" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "349a06037c7bf932dd7e7d1f653678b2038b9ad46a74102f1fc7bd7872678cce" - [[package]] name = "base58" version = "0.2.0" @@ -286,95 +222,6 @@ version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" -[[package]] -name = "beef" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a8241f3ebb85c056b509d4327ad0358fbbba6ffb340bf388f26350aeda225b1" -dependencies = [ - "serde", -] - -[[package]] -name = "beefy-light-client" -version = "0.1.0" -source = "git+https://github.com/ComposableFi/beefy-rs?rev=cb8cadc8d45bc444367002c77cbd395eff8a741c#cb8cadc8d45bc444367002c77cbd395eff8a741c" -dependencies = [ - "beefy-primitives", - "ckb-merkle-mountain-range", - "color-eyre", - "derive_more", - "frame-metadata", - "frame-support", - "hex", - "jsonrpsee 0.15.1", - "pallet-beefy-mmr", - "pallet-mmr", - "parity-scale-codec", - "primitives", - "rs_merkle", - "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-core-hashing 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-mmr-primitives", - "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "subxt-codegen", - "syn", - "tokio", -] - -[[package]] -name = "beefy-merkle-tree" -version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" - -[[package]] -name = "beefy-primitives" -version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" -dependencies = [ - "parity-scale-codec", - "scale-info", - "sp-api", - "sp-application-crypto 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", -] - -[[package]] -name = "beefy-prover" -version = "0.1.0" -source = "git+https://github.com/ComposableFi/beefy-rs?rev=cb8cadc8d45bc444367002c77cbd395eff8a741c#cb8cadc8d45bc444367002c77cbd395eff8a741c" -dependencies = [ - "beefy-primitives", - "color-eyre", - "derive_more", - "frame-metadata", - "frame-support", - "hex", - "hex-literal", - "jsonrpsee 0.15.1", - "pallet-beefy-mmr", - "pallet-mmr", - "pallet-mmr-rpc", - "parity-scale-codec", - "primitives", - "rs_merkle", - "serde_json", - "sp-core 6.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-core-hashing 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-io 6.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-mmr-primitives", - "sp-runtime 6.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-trie 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "subxt", - "subxt-codegen", - "syn", - "tokio", -] - [[package]] name = "bitflags" version = "1.3.2" @@ -503,15 +350,6 @@ dependencies = [ "syn", ] -[[package]] -name = "bstr" -version = "0.2.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba3569f383e8f1598449f1a423e72e99569137b47740b1da11ef19af3d5c3223" -dependencies = [ - "memchr", -] - [[package]] name = "bumpalo" version = "3.11.0" @@ -554,12 +392,6 @@ dependencies = [ "jobserver", ] -[[package]] -name = "cfg-if" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" - [[package]] name = "cfg-if" version = "1.0.0" @@ -584,15 +416,6 @@ version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fff857943da45f546682664a79488be82e69e43c1a7a2307679ab9afb3a66d2e" -[[package]] -name = "ckb-merkle-mountain-range" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f061f97d64fd1822664bdfb722f7ae5469a97b77567390f7442be5b5dc82a5b" -dependencies = [ - "cfg-if 0.1.10", -] - [[package]] name = "clap" version = "3.2.17" @@ -650,39 +473,6 @@ dependencies = [ "cc", ] -[[package]] -name = "color-eyre" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a667583cca8c4f8436db8de46ea8233c42a7d9ae424a82d338f2e4675229204" -dependencies = [ - "backtrace", - "color-spantrace", - "eyre", - "indenter", - "once_cell", - "owo-colors", - "tracing-error", -] - -[[package]] -name = "color-spantrace" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ba75b3d9449ecdccb27ecbc479fdc0b87fa2dd43d2f8298f9bf0e59aacc8dce" -dependencies = [ - "once_cell", - "owo-colors", - "tracing-core", - "tracing-error", -] - -[[package]] -name = "const-oid" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4c78c047431fee22c1a7bb92e00ad095a02a983affe4d8a72e2a2c62c1b94f3" - [[package]] name = "constant_time_eq" version = "0.1.5" @@ -691,18 +481,11 @@ checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" [[package]] name = "convert_case" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" - -[[package]] -name = "core-foundation" -version = "0.9.3" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146" +checksum = "ec182b0ca2f35d8fc196cf3404988fd8b8c739a4d270ff118a398feb0cbec1ca" dependencies = [ - "core-foundation-sys", - "libc", + "unicode-segmentation", ] [[package]] @@ -726,7 +509,7 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] @@ -735,7 +518,7 @@ version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2dd04ddaf88237dc3b8d8f9a3c1004b506b54b3313403944054d23c0870c521" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "crossbeam-utils", ] @@ -745,7 +528,7 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "715e8152b692bba2d374b53d4875445368fdf21a94751410af607a5ac677d1fc" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "crossbeam-epoch", "crossbeam-utils", ] @@ -757,7 +540,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "045ebe27666471bb549370b4b0b3e51b07f56325befa4284db65fc89c02511b1" dependencies = [ "autocfg", - "cfg-if 1.0.0", + "cfg-if", "crossbeam-utils", "memoffset", "once_cell", @@ -770,7 +553,7 @@ version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "51887d4adc7b564537b15adcfb307936f8075dfcd5f00dde9a9f1d29383682bc" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "once_cell", ] @@ -780,18 +563,6 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" -[[package]] -name = "crypto-bigint" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03c6a1d5fa1de37e071642dfa44ec552ca5b299adb128fab16138e24b548fd21" -dependencies = [ - "generic-array 0.14.6", - "rand_core 0.6.3", - "subtle", - "zeroize", -] - [[package]] name = "crypto-common" version = "0.1.6" @@ -822,15 +593,6 @@ dependencies = [ "subtle", ] -[[package]] -name = "ct-logs" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1a816186fa68d9e426e3cb4ae4dff1fcd8e4a2c34b781bf7a822574a0d0aac8" -dependencies = [ - "sct 0.6.1", -] - [[package]] name = "curve25519-dalek" version = "2.1.3" @@ -870,71 +632,14 @@ dependencies = [ "zeroize", ] -[[package]] -name = "darling" -version = "0.14.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4529658bdda7fd6769b8614be250cdcfc3aeb0ee72fe66f9e41e5e5eb73eac02" -dependencies = [ - "darling_core", - "darling_macro", -] - -[[package]] -name = "darling_core" -version = "0.14.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "649c91bc01e8b1eac09fb91e8dbc7d517684ca6be8ebc75bb9cafc894f9fdb6f" -dependencies = [ - "fnv", - "ident_case", - "proc-macro2", - "quote", - "strsim", - "syn", -] - -[[package]] -name = "darling_macro" -version = "0.14.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddfc69c5bfcbd2fc09a0f38451d2daf0e372e367986a83906d1b0dbc88134fb5" -dependencies = [ - "darling_core", - "quote", - "syn", -] - -[[package]] -name = "der" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6919815d73839e7ad218de758883aae3a257ba6759ce7a9992501efbb53d705c" -dependencies = [ - "const-oid", -] - -[[package]] -name = "derivative" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "derive_more" version = "0.99.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" dependencies = [ - "convert_case", "proc-macro2", "quote", - "rustc_version", "syn", ] @@ -1020,18 +725,6 @@ version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4f94fa09c2aeea5b8839e414b7b841bf429fd25b9c522116ac97ee87856d88b2" -[[package]] -name = "ecdsa" -version = "0.13.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0d69ae62e0ce582d56380743515fefaf1a8c70cec685d9677636d7e30ae9dc9" -dependencies = [ - "der", - "elliptic-curve", - "rfc6979", - "signature", -] - [[package]] name = "ed25519" version = "1.5.2" @@ -1074,24 +767,6 @@ version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "90e5c1c8368803113bf0c9584fc495a58b86dc8a29edbf8fe877d21d9507e797" -[[package]] -name = "elliptic-curve" -version = "0.11.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25b477563c2bfed38a3b7a60964c49e058b2510ad3f12ba3483fd8f62c2306d6" -dependencies = [ - "base16ct", - "crypto-bigint", - "der", - "ff", - "generic-array 0.14.6", - "group", - "rand_core 0.6.3", - "sec1", - "subtle", - "zeroize", -] - [[package]] name = "env_logger" version = "0.9.0" @@ -1111,12 +786,6 @@ version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68b91989ae21441195d7d9b9993a2f9295c7e1a8c96255d8b729accddc124797" -[[package]] -name = "event-listener" -version = "2.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" - [[package]] name = "eyre" version = "0.6.8" @@ -1142,16 +811,6 @@ dependencies = [ "instant", ] -[[package]] -name = "ff" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "131655483be284720a17d74ff97592b8e76576dc25563148601df2d7c9080924" -dependencies = [ - "rand_core 0.6.3", - "subtle", -] - [[package]] name = "fixed-hash" version = "0.7.0" @@ -1207,155 +866,40 @@ dependencies = [ ] [[package]] -name = "frame-benchmarking" -version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" +name = "fuchsia-cprng" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba" + +[[package]] +name = "funty" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" + +[[package]] +name = "futures" +version = "0.3.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab30e97ab6aacfe635fad58f22c2bb06c8b685f7421eb1e064a729e2a5f481fa" dependencies = [ - "frame-support", - "frame-system", - "linregress", - "log", - "parity-scale-codec", - "paste", - "scale-info", - "serde", - "sp-api", - "sp-application-crypto 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-io 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-runtime-interface 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-storage 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "futures-channel", + "futures-core", + "futures-executor", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", ] [[package]] -name = "frame-metadata" -version = "15.0.0" +name = "futures-channel" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df6bb8542ef006ef0de09a5c4420787d79823c0ed7924225822362fd2bf2ff2d" +checksum = "2bfc52cbddcfd745bf1740338492bb0bd83d76c67b445f91c5fb29fae29ecaa1" dependencies = [ - "cfg-if 1.0.0", - "parity-scale-codec", - "scale-info", - "serde", -] - -[[package]] -name = "frame-support" -version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" -dependencies = [ - "bitflags", - "frame-metadata", - "frame-support-procedural", - "impl-trait-for-tuples", - "k256", - "log", - "once_cell", - "parity-scale-codec", - "paste", - "scale-info", - "serde", - "smallvec", - "sp-arithmetic 5.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-core-hashing-proc-macro", - "sp-inherents", - "sp-io 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-staking", - "sp-state-machine 0.12.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-tracing 5.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "tt-call", -] - -[[package]] -name = "frame-support-procedural" -version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" -dependencies = [ - "Inflector", - "frame-support-procedural-tools", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "frame-support-procedural-tools" -version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" -dependencies = [ - "frame-support-procedural-tools-derive", - "proc-macro-crate 1.2.1", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "frame-support-procedural-tools-derive" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "frame-system" -version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" -dependencies = [ - "frame-support", - "log", - "parity-scale-codec", - "scale-info", - "serde", - "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-io 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-version", -] - -[[package]] -name = "fuchsia-cprng" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba" - -[[package]] -name = "funty" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" - -[[package]] -name = "futures" -version = "0.3.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab30e97ab6aacfe635fad58f22c2bb06c8b685f7421eb1e064a729e2a5f481fa" -dependencies = [ - "futures-channel", - "futures-core", - "futures-executor", - "futures-io", - "futures-sink", - "futures-task", - "futures-util", -] - -[[package]] -name = "futures-channel" -version = "0.3.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bfc52cbddcfd745bf1740338492bb0bd83d76c67b445f91c5fb29fae29ecaa1" -dependencies = [ - "futures-core", - "futures-sink", + "futures-core", + "futures-sink", ] [[package]] @@ -1405,12 +949,6 @@ version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "842fc63b931f4056a24d59de13fb1272134ce261816e063e634ad0c15cdc5306" -[[package]] -name = "futures-timer" -version = "3.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" - [[package]] name = "futures-util" version = "0.3.23" @@ -1454,7 +992,7 @@ version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "js-sys", "libc", "wasi 0.9.0+wasi-snapshot-preview1", @@ -1467,19 +1005,13 @@ version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4eb1a864a501629691edf6c15a593b7a51eebaa1e8468e9ddc623de7c9b58ec6" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "js-sys", "libc", "wasi 0.11.0+wasi-snapshot-preview1", "wasm-bindgen", ] -[[package]] -name = "gimli" -version = "0.26.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22030e2c5a68ec659fde1e949a745124b48e6fa8b045b7ed5bd1fe4ccc5c4e5d" - [[package]] name = "git2" version = "0.13.25" @@ -1495,50 +1027,6 @@ dependencies = [ "url", ] -[[package]] -name = "globset" -version = "0.4.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a1e17342619edbc21a964c2afbeb6c820c6a2560032872f397bb97ea127bd0a" -dependencies = [ - "aho-corasick", - "bstr", - "fnv", - "log", - "regex", -] - -[[package]] -name = "group" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc5ac374b108929de78460075f3dc439fa66df9d8fc77e8f12caa5165fcf0c89" -dependencies = [ - "ff", - "rand_core 0.6.3", - "subtle", -] - -[[package]] -name = "gumdrop" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bc700f989d2f6f0248546222d9b4258f5b02a171a431f8285a81c08142629e3" -dependencies = [ - "gumdrop_derive", -] - -[[package]] -name = "gumdrop_derive" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "729f9bd3449d77e7831a18abfb7ba2f99ee813dfd15b8c2167c9a54ba20aa99d" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "h2" version = "0.3.14" @@ -1587,34 +1075,6 @@ name = "hashbrown" version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" -dependencies = [ - "ahash", -] - -[[package]] -name = "headers" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4cff78e5788be1e0ab65b04d306b2ed5092c815ec97ec70f4ebd5aee158aa55d" -dependencies = [ - "base64", - "bitflags", - "bytes", - "headers-core", - "http", - "httpdate", - "mime", - "sha-1 0.10.0", -] - -[[package]] -name = "headers-core" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7f66481bfee273957b1f20485a4ff3362987f85b2c236580d81b4eb7a326429" -dependencies = [ - "http", -] [[package]] name = "heck" @@ -1646,12 +1106,6 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" -[[package]] -name = "hex-literal" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ebdb29d2ea9ed0083cd8cece49bbd968021bd99b0849edb4a9a7ee0fdf6a4e0" - [[package]] name = "hmac" version = "0.8.1" @@ -1672,17 +1126,6 @@ dependencies = [ "digest 0.9.0", ] -[[package]] -name = "hmac-drbg" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17ea0a1394df5b6574da6e0c1ade9e78868c9fb0a4e5ef4428e32da4676b85b1" -dependencies = [ - "digest 0.9.0", - "generic-array 0.14.6", - "hmac 0.8.1", -] - [[package]] name = "http" version = "0.2.8" @@ -1753,59 +1196,6 @@ dependencies = [ "want", ] -[[package]] -name = "hyper-proxy" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca815a891b24fdfb243fa3239c86154392b0953ee584aa1a2a1f66d20cbe75cc" -dependencies = [ - "bytes", - "futures", - "headers", - "http", - "hyper", - "hyper-rustls 0.22.1", - "rustls-native-certs 0.5.0", - "tokio", - "tokio-rustls 0.22.0", - "tower-service", - "webpki 0.21.4", -] - -[[package]] -name = "hyper-rustls" -version = "0.22.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f9f7a97316d44c0af9b0301e65010573a853a9fc97046d7331d7f6bc0fd5a64" -dependencies = [ - "ct-logs", - "futures-util", - "hyper", - "log", - "rustls 0.19.1", - "rustls-native-certs 0.5.0", - "tokio", - "tokio-rustls 0.22.0", - "webpki 0.21.4", - "webpki-roots 0.21.1", -] - -[[package]] -name = "hyper-rustls" -version = "0.23.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d87c48c02e0dc5e3b849a2041db3029fd066650f8f717c07bf8ed78ccb895cac" -dependencies = [ - "http", - "hyper", - "log", - "rustls 0.20.6", - "rustls-native-certs 0.6.2", - "tokio", - "tokio-rustls 0.23.4", - "webpki-roots 0.22.4", -] - [[package]] name = "hyper-timeout" version = "0.4.1" @@ -1835,25 +1225,19 @@ dependencies = [ name = "ibc" version = "0.15.0" dependencies = [ - "beefy-light-client", - "beefy-primitives", - "beefy-prover", "borsh", - "bytes", "derive_more", "env_logger", "flex-error", - "frame-support", + "ibc-derive", "ibc-proto", "ics23", "modelator", "num-traits", "parity-scale-codec", "primitive-types", - "primitives", "prost", "prost-types", - "ripemd", "safe-regex", "serde", "serde_derive", @@ -1861,18 +1245,11 @@ dependencies = [ "sha2 0.10.2", "sha3 0.10.2", "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-io 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-mmr-primitives", - "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-trie 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", "subtle-encoding", - "subxt", "tendermint", - "tendermint-light-client-verifier", "tendermint-proto", "tendermint-rpc", - "tendermint-testgen", "test-log", "time", "tokio", @@ -1881,6 +1258,16 @@ dependencies = [ "uint", ] +[[package]] +name = "ibc-derive" +version = "0.1.0" +dependencies = [ + "convert_case", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "ibc-proto" version = "0.18.0" @@ -1923,12 +1310,6 @@ dependencies = [ "sp-core 6.0.0 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "ident_case" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" - [[package]] name = "idna" version = "0.2.3" @@ -1985,31 +1366,13 @@ dependencies = [ "hashbrown 0.12.3", ] -[[package]] -name = "input_buffer" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f97967975f448f1a7ddb12b0bc41069d09ed6a1c161a92687e057325db35d413" -dependencies = [ - "bytes", -] - [[package]] name = "instant" version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" dependencies = [ - "cfg-if 1.0.0", -] - -[[package]] -name = "integer-sqrt" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "276ec31bcb4a9ee45f58bec6f9ec700ae4cf4f4f8f2fa7e06cb406bd5ffdd770" -dependencies = [ - "num-traits", + "cfg-if", ] [[package]] @@ -2052,298 +1415,52 @@ dependencies = [ ] [[package]] -name = "jsonrpsee" -version = "0.13.1" +name = "keccak" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1f2ab5a60e558e74ea93bcf5164ebc47939a7fff8938fa9b5233bbc63e16061" -dependencies = [ - "jsonrpsee-client-transport 0.13.1", - "jsonrpsee-core 0.13.1", - "jsonrpsee-http-server", - "jsonrpsee-proc-macros", - "jsonrpsee-types 0.13.1", - "jsonrpsee-ws-server", - "tracing", -] +checksum = "f9b7d56ba4a8344d6be9729995e6b06f928af29998cdf79fe390cbf6b1fee838" [[package]] -name = "jsonrpsee" -version = "0.15.1" +name = "lazy_static" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bd0d559d5e679b1ab2f869b486a11182923863b1b3ee8b421763cdd707b783a" -dependencies = [ - "jsonrpsee-client-transport 0.15.1", - "jsonrpsee-core 0.15.1", - "jsonrpsee-http-client", - "jsonrpsee-types 0.15.1", -] +checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" + +[[package]] +name = "libc" +version = "0.2.132" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8371e4e5341c3a96db127eb2465ac681ced4c433e01dd0e938adbef26ba93ba5" [[package]] -name = "jsonrpsee-client-transport" -version = "0.13.1" +name = "libgit2-sys" +version = "0.12.26+1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26d682f4a55081a2be3e639280c640523070e4aeb8ee2fd8dd9168fdae57a9db" +checksum = "19e1c899248e606fbfe68dcb31d8b0176ebab833b103824af31bddf4b7457494" dependencies = [ - "futures-util", - "http", - "jsonrpsee-core 0.13.1", - "jsonrpsee-types 0.13.1", - "pin-project", - "rustls-native-certs 0.6.2", - "soketto", - "thiserror", - "tokio", - "tokio-rustls 0.23.4", - "tokio-util", - "tracing", - "webpki-roots 0.22.4", + "cc", + "libc", + "libssh2-sys", + "libz-sys", + "openssl-sys", + "pkg-config", ] [[package]] -name = "jsonrpsee-client-transport" -version = "0.15.1" +name = "libsecp256k1" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8752740ecd374bcbf8b69f3e80b0327942df76f793f8d4e60d3355650c31fb74" -dependencies = [ - "futures-util", - "http", - "jsonrpsee-core 0.15.1", - "jsonrpsee-types 0.15.1", - "pin-project", - "rustls-native-certs 0.6.2", - "soketto", - "thiserror", - "tokio", - "tokio-rustls 0.23.4", - "tokio-util", - "tracing", - "webpki-roots 0.22.4", -] - -[[package]] -name = "jsonrpsee-core" -version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e27462b21279edf9a6a91f46ffbe125e9cdc58b901d2e08bf59b31a47d7d0ab" -dependencies = [ - "anyhow", - "arrayvec 0.7.2", - "async-lock", - "async-trait", - "beef", - "futures-channel", - "futures-timer", - "futures-util", - "hyper", - "jsonrpsee-types 0.13.1", - "parking_lot", - "rand 0.8.5", - "rustc-hash", - "serde", - "serde_json", - "soketto", - "thiserror", - "tokio", - "tracing", -] - -[[package]] -name = "jsonrpsee-core" -version = "0.15.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3dc3e9cf2ba50b7b1d7d76a667619f82846caa39e8e8daa8a4962d74acaddca" -dependencies = [ - "anyhow", - "async-lock", - "async-trait", - "beef", - "futures-channel", - "futures-timer", - "futures-util", - "hyper", - "jsonrpsee-types 0.15.1", - "rustc-hash", - "serde", - "serde_json", - "thiserror", - "tokio", - "tracing", - "tracing-futures", -] - -[[package]] -name = "jsonrpsee-http-client" -version = "0.15.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52f7c0e2333ab2115c302eeb4f137c8a4af5ab609762df68bbda8f06496677c9" -dependencies = [ - "async-trait", - "hyper", - "hyper-rustls 0.23.0", - "jsonrpsee-core 0.15.1", - "jsonrpsee-types 0.15.1", - "rustc-hash", - "serde", - "serde_json", - "thiserror", - "tokio", - "tracing", - "tracing-futures", -] - -[[package]] -name = "jsonrpsee-http-server" -version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7178f16eabd7154c094e24d295b9ee355ec1e5f24c328759c56255ff7bbd4548" -dependencies = [ - "futures-channel", - "futures-util", - "globset", - "hyper", - "jsonrpsee-core 0.13.1", - "jsonrpsee-types 0.13.1", - "lazy_static", - "serde_json", - "tokio", - "tracing", - "unicase", -] - -[[package]] -name = "jsonrpsee-proc-macros" -version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b8d7f449cab3b747f12c3efc27f5cad537f3b597c6a3838b0fac628f4bf730a" -dependencies = [ - "proc-macro-crate 1.2.1", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "jsonrpsee-types" -version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fd11763134104122ddeb0f97e4bbe393058017dfb077db63fbf44b4dd0dd86e" -dependencies = [ - "anyhow", - "beef", - "serde", - "serde_json", - "thiserror", - "tracing", -] - -[[package]] -name = "jsonrpsee-types" -version = "0.15.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e290bba767401b646812f608c099b922d8142603c9e73a50fb192d3ac86f4a0d" -dependencies = [ - "anyhow", - "beef", - "serde", - "serde_json", - "thiserror", - "tracing", -] - -[[package]] -name = "jsonrpsee-ws-server" -version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfb6c21556c551582b56e4e8e6e6249b0bbdb69bb7fa39efe9b9a6b54af9f206" -dependencies = [ - "futures-channel", - "futures-util", - "jsonrpsee-core 0.13.1", - "jsonrpsee-types 0.13.1", - "serde_json", - "soketto", - "tokio", - "tokio-util", - "tracing", -] - -[[package]] -name = "k256" -version = "0.10.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19c3a5e0a0b8450278feda242592512e09f61c72e018b8cd5c859482802daf2d" -dependencies = [ - "cfg-if 1.0.0", - "ecdsa", - "elliptic-curve", - "sec1", -] - -[[package]] -name = "keccak" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9b7d56ba4a8344d6be9729995e6b06f928af29998cdf79fe390cbf6b1fee838" - -[[package]] -name = "kvdb" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a301d8ecb7989d4a6e2c57a49baca77d353bdbf879909debe3f375fe25d61f86" -dependencies = [ - "parity-util-mem", - "smallvec", -] - -[[package]] -name = "lazy_static" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" - -[[package]] -name = "libc" -version = "0.2.132" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8371e4e5341c3a96db127eb2465ac681ced4c433e01dd0e938adbef26ba93ba5" - -[[package]] -name = "libgit2-sys" -version = "0.12.26+1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19e1c899248e606fbfe68dcb31d8b0176ebab833b103824af31bddf4b7457494" -dependencies = [ - "cc", - "libc", - "libssh2-sys", - "libz-sys", - "openssl-sys", - "pkg-config", -] - -[[package]] -name = "libm" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "292a948cd991e376cf75541fe5b97a1081d713c618b4f1b9500f8844e49eb565" - -[[package]] -name = "libsecp256k1" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95b09eff1b35ed3b33b877ced3a691fc7a481919c7e29c53c906226fcf55e2a1" +checksum = "95b09eff1b35ed3b33b877ced3a691fc7a481919c7e29c53c906226fcf55e2a1" dependencies = [ "arrayref", "base64", "digest 0.9.0", - "hmac-drbg", "libsecp256k1-core", "libsecp256k1-gen-ecmult", "libsecp256k1-gen-genmult", "rand 0.8.5", "serde", "sha2 0.9.9", - "typenum", ] [[package]] @@ -2401,16 +1518,6 @@ dependencies = [ "vcpkg", ] -[[package]] -name = "linregress" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6c601a85f5ecd1aba625247bca0031585fb1c446461b142878a16f8245ddeb8" -dependencies = [ - "nalgebra", - "statrs", -] - [[package]] name = "lock_api" version = "0.4.8" @@ -2427,16 +1534,7 @@ version = "0.4.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" dependencies = [ - "cfg-if 1.0.0", -] - -[[package]] -name = "lru" -version = "0.7.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e999beba7b6e8345721bd280141ed958096a2e4abdf74f67ff4ce49b4b54e47a" -dependencies = [ - "hashbrown 0.12.3", + "cfg-if", ] [[package]] @@ -2469,15 +1567,6 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "73cbba799671b762df5a175adf59ce145165747bb891505c43d09aefbbf38beb" -[[package]] -name = "matrixmultiply" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "add85d4dd35074e6fedc608f8c8f513a3548619a9024b751949ef0e8e45a4d84" -dependencies = [ - "rawpointer", -] - [[package]] name = "memchr" version = "2.5.0" @@ -2493,17 +1582,6 @@ dependencies = [ "autocfg", ] -[[package]] -name = "memory-db" -version = "0.29.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6566c70c1016f525ced45d7b7f97730a2bafb037c788211d0c186ef5b2189f0a" -dependencies = [ - "hash-db", - "hashbrown 0.12.3", - "parity-util-mem", -] - [[package]] name = "memory_units" version = "0.3.0" @@ -2522,12 +1600,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "micromath" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39617bc909d64b068dcffd0e3e31679195b5576d0c83fadc52690268cc2b2b55" - [[package]] name = "mime" version = "0.3.16" @@ -2592,35 +1664,6 @@ version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" -[[package]] -name = "nalgebra" -version = "0.27.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "462fffe4002f4f2e1f6a9dcf12cc1a6fc0e15989014efc02a941d3e0f5dc2120" -dependencies = [ - "approx", - "matrixmultiply", - "nalgebra-macros", - "num-complex", - "num-rational 0.4.1", - "num-traits", - "rand 0.8.5", - "rand_distr", - "simba", - "typenum", -] - -[[package]] -name = "nalgebra-macros" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01fcc0b8149b4632adc89ac3b7b31a12fb6099a0317a4eb2ebff574ef7de7218" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "nodrop" version = "0.1.14" @@ -2648,15 +1691,6 @@ dependencies = [ "num-traits", ] -[[package]] -name = "num-complex" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ae39348c8bc5fbd7f40c727a9925f03517afd2ab27d46702108b6a7e5414c19" -dependencies = [ - "num-traits", -] - [[package]] name = "num-derive" version = "0.3.3" @@ -2700,17 +1734,6 @@ dependencies = [ "num-traits", ] -[[package]] -name = "num-rational" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0638a1c9d0a3c0914158145bc76cff373a75a627e6ecbfb71cbe6f453a5a19b0" -dependencies = [ - "autocfg", - "num-integer", - "num-traits", -] - [[package]] name = "num-traits" version = "0.2.15" @@ -2718,7 +1741,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" dependencies = [ "autocfg", - "libm", ] [[package]] @@ -2740,15 +1762,6 @@ dependencies = [ "libc", ] -[[package]] -name = "object" -version = "0.29.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21158b2c33aa6d4561f1c0a6ea283ca92bc54802a93b263e910746d679a7eb53" -dependencies = [ - "memchr", -] - [[package]] name = "once_cell" version = "1.13.1" @@ -2793,140 +1806,24 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ff7415e9ae3fff1225851df9e0d9e4e5479f947619774677a63572e55e80eff" [[package]] -name = "owo-colors" -version = "3.5.0" +name = "parity-scale-codec" +version = "3.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1b04fb49957986fdce4d6ee7a65027d55d4b6d2265e5848bbb507b58ccfdb6f" - -[[package]] -name = "pallet-beefy" -version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" +checksum = "9182e4a71cae089267ab03e67c99368db7cd877baf50f931e5d6d4b71e195ac0" dependencies = [ - "beefy-primitives", - "frame-support", - "frame-system", - "pallet-session", - "parity-scale-codec", - "scale-info", + "arrayvec 0.7.2", + "bitvec", + "byte-slice-cast", + "impl-trait-for-tuples", + "parity-scale-codec-derive", "serde", - "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", ] [[package]] -name = "pallet-beefy-mmr" -version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" -dependencies = [ - "beefy-merkle-tree", - "beefy-primitives", - "frame-support", - "frame-system", - "hex", - "log", - "pallet-beefy", - "pallet-mmr", - "pallet-session", - "parity-scale-codec", - "scale-info", - "serde", - "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-io 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", -] - -[[package]] -name = "pallet-mmr" -version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" -dependencies = [ - "ckb-merkle-mountain-range", - "frame-benchmarking", - "frame-support", - "frame-system", - "parity-scale-codec", - "scale-info", - "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-io 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-mmr-primitives", - "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", -] - -[[package]] -name = "pallet-mmr-rpc" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" -dependencies = [ - "jsonrpsee 0.13.1", - "parity-scale-codec", - "serde", - "sp-api", - "sp-blockchain", - "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-mmr-primitives", - "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", -] - -[[package]] -name = "pallet-session" -version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" -dependencies = [ - "frame-support", - "frame-system", - "impl-trait-for-tuples", - "log", - "pallet-timestamp", - "parity-scale-codec", - "scale-info", - "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-io 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-session", - "sp-staking", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-trie 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", -] - -[[package]] -name = "pallet-timestamp" -version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" -dependencies = [ - "frame-benchmarking", - "frame-support", - "frame-system", - "log", - "parity-scale-codec", - "scale-info", - "sp-inherents", - "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-timestamp", -] - -[[package]] -name = "parity-scale-codec" -version = "3.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9182e4a71cae089267ab03e67c99368db7cd877baf50f931e5d6d4b71e195ac0" -dependencies = [ - "arrayvec 0.7.2", - "bitvec", - "byte-slice-cast", - "impl-trait-for-tuples", - "parity-scale-codec-derive", - "serde", -] - -[[package]] -name = "parity-scale-codec-derive" -version = "3.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9299338969a3d2f491d65f140b00ddec470858402f888af98e8642fb5e8965cd" +name = "parity-scale-codec-derive" +version = "3.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9299338969a3d2f491d65f140b00ddec470858402f888af98e8642fb5e8965cd" dependencies = [ "proc-macro-crate 1.2.1", "proc-macro2", @@ -2940,11 +1837,9 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c32561d248d352148124f036cac253a644685a21dc9fea383eb4907d7bd35a8f" dependencies = [ - "cfg-if 1.0.0", - "hashbrown 0.12.3", + "cfg-if", "impl-trait-for-tuples", "parity-util-mem-derive", - "parking_lot", "primitive-types", "winapi", ] @@ -2982,7 +1877,7 @@ version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09a279cbf25cb0757810394fbc1e359949b59e348145c643a939a525692e6929" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", "redox_syscall", "smallvec", @@ -3123,21 +2018,6 @@ dependencies = [ "uint", ] -[[package]] -name = "primitives" -version = "0.1.0" -source = "git+https://github.com/ComposableFi/beefy-rs?rev=cb8cadc8d45bc444367002c77cbd395eff8a741c#cb8cadc8d45bc444367002c77cbd395eff8a741c" -dependencies = [ - "beefy-primitives", - "ckb-merkle-mountain-range", - "derive_more", - "parity-scale-codec", - "rs_merkle", - "sp-core 6.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-mmr-primitives", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", -] - [[package]] name = "proc-macro-crate" version = "0.1.5" @@ -3208,7 +2088,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ae5a4388762d5815a9fc0dea33c56b021cdc8dde0c55e0c9ca57197254b0cab" dependencies = [ "bytes", - "cfg-if 1.0.0", + "cfg-if", "cmake", "heck 0.4.0", "itertools", @@ -3352,16 +2232,6 @@ dependencies = [ "getrandom 0.2.7", ] -[[package]] -name = "rand_distr" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32cb0b9bc82b0a0876c2dd994a7e7a2683d3e7390ca40e6886785ef0c7e3ee31" -dependencies = [ - "num-traits", - "rand 0.8.5", -] - [[package]] name = "rand_hc" version = "0.2.0" @@ -3380,12 +2250,6 @@ dependencies = [ "rand_core 0.5.1", ] -[[package]] -name = "rawpointer" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60a357793950651c4ed0f3f52338f53b2f809f32d83a07f72909fa13e4c6c1e3" - [[package]] name = "rayon" version = "1.5.3" @@ -3494,17 +2358,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "rfc6979" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96ef608575f6392792f9ecf7890c00086591d29a83910939d430753f7c050525" -dependencies = [ - "crypto-bigint", - "hmac 0.11.0", - "zeroize", -] - [[package]] name = "ring" version = "0.16.20" @@ -3520,15 +2373,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "ripemd" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1facec54cb5e0dc08553501fa740091086d0259ad0067e0d4103448e4cb22ed3" -dependencies = [ - "digest 0.10.3", -] - [[package]] name = "ripemd160" version = "0.9.1" @@ -3540,22 +2384,6 @@ dependencies = [ "opaque-debug 0.3.0", ] -[[package]] -name = "rs_merkle" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a632a43487c1332be8e183588079f89b6820fab24e04db49521eacd536837372" -dependencies = [ - "micromath", - "sha2 0.10.2", -] - -[[package]] -name = "rustc-demangle" -version = "0.1.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ef03e0a2b150c7a90d01faf6254c9c48a41e95fb2a8c2ac1c6f0d2b9aefc342" - [[package]] name = "rustc-hash" version = "1.1.0" @@ -3568,28 +2396,6 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" -[[package]] -name = "rustc_version" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" -dependencies = [ - "semver", -] - -[[package]] -name = "rustls" -version = "0.19.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35edb675feee39aec9c99fa5ff985081995a06d594114ae14cbe797ad7b7a6d7" -dependencies = [ - "base64", - "log", - "ring", - "sct 0.6.1", - "webpki 0.21.4", -] - [[package]] name = "rustls" version = "0.20.6" @@ -3598,41 +2404,8 @@ checksum = "5aab8ee6c7097ed6057f43c187a62418d0c05a4bd5f18b3571db50ee0f9ce033" dependencies = [ "log", "ring", - "sct 0.7.0", - "webpki 0.22.0", -] - -[[package]] -name = "rustls-native-certs" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a07b7c1885bd8ed3831c289b7870b13ef46fe0e856d288c30d9cc17d75a2092" -dependencies = [ - "openssl-probe", - "rustls 0.19.1", - "schannel", - "security-framework", -] - -[[package]] -name = "rustls-native-certs" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0167bac7a9f490495f3c33013e7722b53cb087ecbe082fb0c6387c96f634ea50" -dependencies = [ - "openssl-probe", - "rustls-pemfile", - "schannel", - "security-framework", -] - -[[package]] -name = "rustls-pemfile" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0864aeff53f8c05aa08d86e5ef839d3dfcf07aeba2db32f12db0ef716e87bd55" -dependencies = [ - "base64", + "sct", + "webpki", ] [[package]] @@ -3704,11 +2477,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c46be926081c9f4dd5dd9b6f1d3e3229f2360bc6502dd8836f84a93b7c75e99a" dependencies = [ "bitvec", - "cfg-if 1.0.0", + "cfg-if", "derive_more", "parity-scale-codec", "scale-info-derive", - "serde", ] [[package]] @@ -3723,16 +2495,6 @@ dependencies = [ "syn", ] -[[package]] -name = "schannel" -version = "0.1.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88d6731146462ea25d9244b2ed5fd1d716d25c52e4d54aa4fb0f3c4e9854dbe2" -dependencies = [ - "lazy_static", - "windows-sys", -] - [[package]] name = "schemars" version = "0.8.10" @@ -3781,16 +2543,6 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" -[[package]] -name = "sct" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b362b83898e0e69f38515b82ee15aa80636befe47c3b6d3d89a911e78fc228ce" -dependencies = [ - "ring", - "untrusted", -] - [[package]] name = "sct" version = "0.7.0" @@ -3801,18 +2553,6 @@ dependencies = [ "untrusted", ] -[[package]] -name = "sec1" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08da66b8b0965a5555b6bd6639e68ccba85e1e2506f5fbb089e93f8a04e1a2d1" -dependencies = [ - "der", - "generic-array 0.14.6", - "subtle", - "zeroize", -] - [[package]] name = "secp256k1" version = "0.21.3" @@ -3840,35 +2580,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "security-framework" -version = "2.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bc1bb97804af6631813c55739f771071e0f2ed33ee20b68c86ec505d906356c" -dependencies = [ - "bitflags", - "core-foundation", - "core-foundation-sys", - "libc", - "security-framework-sys", -] - -[[package]] -name = "security-framework-sys" -version = "2.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0160a13a177a45bfb43ce71c01580998474f556ad854dcbca936dd2841a5c556" -dependencies = [ - "core-foundation-sys", - "libc", -] - -[[package]] -name = "semver" -version = "1.0.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93f6841e709003d68bb2deee8c343572bf446003ec20a583e76f7b15cebf3711" - [[package]] name = "serde" version = "1.0.144" @@ -3931,30 +2642,6 @@ dependencies = [ "syn", ] -[[package]] -name = "sha-1" -version = "0.9.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99cd6713db3cf16b6c84e06321e049a9b9f699826e16096d23bbcc44d15d51a6" -dependencies = [ - "block-buffer 0.9.0", - "cfg-if 1.0.0", - "cpufeatures", - "digest 0.9.0", - "opaque-debug 0.3.0", -] - -[[package]] -name = "sha-1" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "028f48d513f9678cda28f6e4064755b3fbb2af6acd672f2c209b62323f7aea0f" -dependencies = [ - "cfg-if 1.0.0", - "cpufeatures", - "digest 0.10.3", -] - [[package]] name = "sha2" version = "0.8.2" @@ -3974,7 +2661,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" dependencies = [ "block-buffer 0.9.0", - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", "digest 0.9.0", "opaque-debug 0.3.0", @@ -3986,7 +2673,7 @@ version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "55deaec60f81eefe3cce0dc50bda92d6d8e88f2a27df7c5033b42afeb1ed2676" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", "digest 0.10.3", ] @@ -4036,28 +2723,6 @@ name = "signature" version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "02658e48d89f2bec991f9a78e69cfa4c316f8d6a6c4ec12fae1aeb263d486788" -dependencies = [ - "digest 0.9.0", - "rand_core 0.6.3", -] - -[[package]] -name = "simba" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e82063457853d00243beda9952e910b82593e4b07ae9f721b9278a99a0d3d5c" -dependencies = [ - "approx", - "num-complex", - "num-traits", - "paste", -] - -[[package]] -name = "simple-error" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc47a29ce97772ca5c927f75bac34866b16d64e07f330c3248e2d7226623901b" [[package]] name = "slab" @@ -4085,166 +2750,27 @@ dependencies = [ ] [[package]] -name = "soketto" -version = "0.7.1" +name = "sp-core" +version = "6.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41d1c5305e39e09653383c2c7244f2f78b3bcae37cf50c64cb4789c9f5096ec2" +checksum = "77963e2aa8fadb589118c3aede2e78b6c4bcf1c01d588fbf33e915b390825fbd" dependencies = [ - "base64", - "bytes", + "base58", + "bitflags", + "blake2-rfc", + "byteorder", + "dyn-clonable", + "ed25519-dalek", "futures", - "httparse", - "log", - "rand 0.8.5", - "sha-1 0.9.8", -] - -[[package]] -name = "sp-api" -version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" -dependencies = [ "hash-db", + "hash256-std-hasher", + "hex", + "impl-serde", + "lazy_static", + "libsecp256k1", "log", - "parity-scale-codec", - "sp-api-proc-macro", - "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-state-machine 0.12.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-version", - "thiserror", -] - -[[package]] -name = "sp-api-proc-macro" -version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" -dependencies = [ - "blake2", - "proc-macro-crate 1.2.1", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "sp-application-crypto" -version = "6.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acb4490364cb3b097a6755343e552495b0013778152300714be4647d107e9a2e" -dependencies = [ - "parity-scale-codec", - "scale-info", - "serde", - "sp-core 6.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-io 6.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-std 4.0.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "sp-application-crypto" -version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" -dependencies = [ - "parity-scale-codec", - "scale-info", - "serde", - "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-io 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", -] - -[[package]] -name = "sp-arithmetic" -version = "5.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31ef21f82cc10f75ed046b65e2f8048080ee76e59f1b8aed55c7150daebfd35b" -dependencies = [ - "integer-sqrt", - "num-traits", - "parity-scale-codec", - "scale-info", - "serde", - "sp-debug-derive 4.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-std 4.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "static_assertions", -] - -[[package]] -name = "sp-arithmetic" -version = "5.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" -dependencies = [ - "integer-sqrt", - "num-traits", - "parity-scale-codec", - "scale-info", - "serde", - "sp-debug-derive 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "static_assertions", -] - -[[package]] -name = "sp-blockchain" -version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" -dependencies = [ - "futures", - "log", - "lru", - "parity-scale-codec", - "parking_lot", - "sp-api", - "sp-consensus", - "sp-database", - "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-state-machine 0.12.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "thiserror", -] - -[[package]] -name = "sp-consensus" -version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" -dependencies = [ - "async-trait", - "futures", - "futures-timer", - "log", - "parity-scale-codec", - "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-inherents", - "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-state-machine 0.12.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-version", - "thiserror", -] - -[[package]] -name = "sp-core" -version = "6.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77963e2aa8fadb589118c3aede2e78b6c4bcf1c01d588fbf33e915b390825fbd" -dependencies = [ - "base58", - "bitflags", - "blake2-rfc", - "byteorder", - "dyn-clonable", - "ed25519-dalek", - "futures", - "hash-db", - "hash256-std-hasher", - "hex", - "impl-serde", - "lazy_static", - "libsecp256k1", - "log", - "merlin", - "num-traits", + "merlin", + "num-traits", "parity-scale-codec", "parity-util-mem", "parking_lot", @@ -4344,26 +2870,6 @@ dependencies = [ "twox-hash", ] -[[package]] -name = "sp-core-hashing-proc-macro" -version = "5.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" -dependencies = [ - "proc-macro2", - "quote", - "sp-core-hashing 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "syn", -] - -[[package]] -name = "sp-database" -version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" -dependencies = [ - "kvdb", - "parking_lot", -] - [[package]] name = "sp-debug-derive" version = "4.0.0" @@ -4408,185 +2914,6 @@ dependencies = [ "sp-storage 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", ] -[[package]] -name = "sp-inherents" -version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" -dependencies = [ - "async-trait", - "impl-trait-for-tuples", - "parity-scale-codec", - "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "thiserror", -] - -[[package]] -name = "sp-io" -version = "6.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "935fd3c71bad6811a7984cabb74d323b8ca3107024024c3eabb610e0182ba8d3" -dependencies = [ - "futures", - "hash-db", - "libsecp256k1", - "log", - "parity-scale-codec", - "parking_lot", - "secp256k1", - "sp-core 6.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-externalities 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-keystore 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-runtime-interface 6.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-state-machine 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-std 4.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-tracing 5.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-trie 6.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-wasm-interface 6.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "tracing", - "tracing-core", -] - -[[package]] -name = "sp-io" -version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" -dependencies = [ - "futures", - "hash-db", - "libsecp256k1", - "log", - "parity-scale-codec", - "parking_lot", - "secp256k1", - "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-externalities 0.12.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-keystore 0.12.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-runtime-interface 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-state-machine 0.12.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-tracing 5.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-trie 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-wasm-interface 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "tracing", - "tracing-core", -] - -[[package]] -name = "sp-keystore" -version = "0.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3261eddca8c8926e3e1de136a7980cb3afc3455247d9d6f3119d9b292f73aaee" -dependencies = [ - "async-trait", - "futures", - "merlin", - "parity-scale-codec", - "parking_lot", - "schnorrkel", - "sp-core 6.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-externalities 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)", - "thiserror", -] - -[[package]] -name = "sp-keystore" -version = "0.12.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" -dependencies = [ - "async-trait", - "futures", - "merlin", - "parity-scale-codec", - "parking_lot", - "schnorrkel", - "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-externalities 0.12.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "thiserror", -] - -[[package]] -name = "sp-mmr-primitives" -version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" -dependencies = [ - "log", - "parity-scale-codec", - "serde", - "sp-api", - "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-debug-derive 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", -] - -[[package]] -name = "sp-panic-handler" -version = "4.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2101f3c555fceafcfcfb0e61c55ea9ed80dc60bd77d54d9f25b369edb029e9a4" -dependencies = [ - "backtrace", - "lazy_static", - "regex", -] - -[[package]] -name = "sp-panic-handler" -version = "4.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" -dependencies = [ - "backtrace", - "lazy_static", - "regex", -] - -[[package]] -name = "sp-runtime" -version = "6.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb7d8a8d5ab5d349c6cf9300af1721b7b6446ba63401dbb11c10a1d65197aa5f" -dependencies = [ - "either", - "hash256-std-hasher", - "impl-trait-for-tuples", - "log", - "parity-scale-codec", - "parity-util-mem", - "paste", - "rand 0.7.3", - "scale-info", - "serde", - "sp-application-crypto 6.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-arithmetic 5.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-core 6.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-io 6.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-std 4.0.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "sp-runtime" -version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" -dependencies = [ - "either", - "hash256-std-hasher", - "impl-trait-for-tuples", - "log", - "parity-scale-codec", - "parity-util-mem", - "paste", - "rand 0.7.3", - "scale-info", - "serde", - "sp-application-crypto 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-arithmetic 5.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-io 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", -] - [[package]] name = "sp-runtime-interface" version = "6.0.0" @@ -4617,105 +2944,34 @@ dependencies = [ "sp-runtime-interface-proc-macro 5.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", "sp-storage 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-tracing 5.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-wasm-interface 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "static_assertions", -] - -[[package]] -name = "sp-runtime-interface-proc-macro" -version = "5.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22ecb916b9664ed9f90abef0ff5a3e61454c1efea5861b2997e03f39b59b955f" -dependencies = [ - "Inflector", - "proc-macro-crate 1.2.1", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "sp-runtime-interface-proc-macro" -version = "5.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" -dependencies = [ - "Inflector", - "proc-macro-crate 1.2.1", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "sp-session" -version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" -dependencies = [ - "parity-scale-codec", - "scale-info", - "sp-api", - "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-staking", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", -] - -[[package]] -name = "sp-staking" -version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" -dependencies = [ - "parity-scale-codec", - "scale-info", - "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-tracing 5.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-wasm-interface 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "static_assertions", ] [[package]] -name = "sp-state-machine" -version = "0.12.0" +name = "sp-runtime-interface-proc-macro" +version = "5.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecee3b33eb78c99997676a571656bcc35db6886abecfddd13e76a73b5871c6c1" +checksum = "22ecb916b9664ed9f90abef0ff5a3e61454c1efea5861b2997e03f39b59b955f" dependencies = [ - "hash-db", - "log", - "num-traits", - "parity-scale-codec", - "parking_lot", - "rand 0.7.3", - "smallvec", - "sp-core 6.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-externalities 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-panic-handler 4.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-std 4.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-trie 6.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "thiserror", - "tracing", - "trie-db", - "trie-root", + "Inflector", + "proc-macro-crate 1.2.1", + "proc-macro2", + "quote", + "syn", ] [[package]] -name = "sp-state-machine" -version = "0.12.0" +name = "sp-runtime-interface-proc-macro" +version = "5.0.0" source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" dependencies = [ - "hash-db", - "log", - "num-traits", - "parity-scale-codec", - "parking_lot", - "rand 0.7.3", - "smallvec", - "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-externalities 0.12.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-panic-handler 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-trie 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "thiserror", - "tracing", - "trie-root", + "Inflector", + "proc-macro-crate 1.2.1", + "proc-macro2", + "quote", + "syn", ] [[package]] @@ -4756,22 +3012,6 @@ dependencies = [ "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", ] -[[package]] -name = "sp-timestamp" -version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" -dependencies = [ - "async-trait", - "futures-timer", - "log", - "parity-scale-codec", - "sp-api", - "sp-inherents", - "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "thiserror", -] - [[package]] name = "sp-tracing" version = "5.0.0" @@ -4797,66 +3037,6 @@ dependencies = [ "tracing-subscriber 0.2.25", ] -[[package]] -name = "sp-trie" -version = "6.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6fc34f4f291886914733e083b62708d829f3e6b8d7a7ca7fa8a55a3d7640b0b" -dependencies = [ - "hash-db", - "memory-db", - "parity-scale-codec", - "scale-info", - "sp-core 6.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-std 4.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "trie-db", - "trie-root", -] - -[[package]] -name = "sp-trie" -version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" -dependencies = [ - "hash-db", - "memory-db", - "parity-scale-codec", - "scale-info", - "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "thiserror", - "trie-db", - "trie-root", -] - -[[package]] -name = "sp-version" -version = "5.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" -dependencies = [ - "impl-serde", - "parity-scale-codec", - "parity-wasm", - "scale-info", - "serde", - "sp-core-hashing-proc-macro", - "sp-runtime 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", - "sp-version-proc-macro", - "thiserror", -] - -[[package]] -name = "sp-version-proc-macro" -version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" -dependencies = [ - "parity-scale-codec", - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "sp-wasm-interface" version = "6.0.0" @@ -4909,19 +3089,6 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" -[[package]] -name = "statrs" -version = "0.15.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05bdbb8e4e78216a85785a85d3ec3183144f98d0097b9281802c019bb07a6f05" -dependencies = [ - "approx", - "lazy_static", - "nalgebra", - "num-traits", - "rand 0.8.5", -] - [[package]] name = "strsim" version = "0.10.0" @@ -4962,69 +3129,6 @@ version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "734676eb262c623cec13c3155096e08d1f8f29adce39ba17948b18dad1e54142" -[[package]] -name = "subxt" -version = "0.21.0" -source = "git+https://github.com/paritytech/subxt?rev=ec23283d75e4b3b894294e351fd7ffa2b4431201#ec23283d75e4b3b894294e351fd7ffa2b4431201" -dependencies = [ - "bitvec", - "derivative", - "frame-metadata", - "futures", - "hex", - "jsonrpsee 0.13.1", - "parity-scale-codec", - "parking_lot", - "scale-info", - "serde", - "serde_json", - "sp-core 6.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-runtime 6.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "subxt-macro", - "subxt-metadata", - "thiserror", - "tracing", -] - -[[package]] -name = "subxt-codegen" -version = "0.21.0" -source = "git+https://github.com/paritytech/subxt?rev=ec23283d75e4b3b894294e351fd7ffa2b4431201#ec23283d75e4b3b894294e351fd7ffa2b4431201" -dependencies = [ - "darling", - "frame-metadata", - "heck 0.4.0", - "parity-scale-codec", - "proc-macro-error", - "proc-macro2", - "quote", - "scale-info", - "subxt-metadata", - "syn", -] - -[[package]] -name = "subxt-macro" -version = "0.21.0" -source = "git+https://github.com/paritytech/subxt?rev=ec23283d75e4b3b894294e351fd7ffa2b4431201#ec23283d75e4b3b894294e351fd7ffa2b4431201" -dependencies = [ - "darling", - "proc-macro-error", - "subxt-codegen", - "syn", -] - -[[package]] -name = "subxt-metadata" -version = "0.21.0" -source = "git+https://github.com/paritytech/subxt?rev=ec23283d75e4b3b894294e351fd7ffa2b4431201#ec23283d75e4b3b894294e351fd7ffa2b4431201" -dependencies = [ - "frame-metadata", - "parity-scale-codec", - "scale-info", - "sp-core 6.0.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "syn" version = "1.0.99" @@ -5076,7 +3180,7 @@ version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "fastrand", "libc", "redox_syscall", @@ -5125,18 +3229,6 @@ dependencies = [ "url", ] -[[package]] -name = "tendermint-light-client-verifier" -version = "0.24.0-pre.2" -source = "git+https://github.com/composableFi/tendermint-rs?rev=5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8#5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8" -dependencies = [ - "derive_more", - "flex-error", - "serde", - "tendermint", - "time", -] - [[package]] name = "tendermint-proto" version = "0.24.0-pre.2" @@ -5159,16 +3251,9 @@ name = "tendermint-rpc" version = "0.24.0-pre.2" source = "git+https://github.com/composableFi/tendermint-rs?rev=5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8#5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8" dependencies = [ - "async-trait", - "async-tungstenite", "bytes", "flex-error", - "futures", "getrandom 0.2.7", - "http", - "hyper", - "hyper-proxy", - "hyper-rustls 0.22.1", "peg", "pin-project", "serde", @@ -5181,28 +3266,11 @@ dependencies = [ "tendermint-proto", "thiserror", "time", - "tokio", - "tracing", "url", "uuid", "walkdir", ] -[[package]] -name = "tendermint-testgen" -version = "0.24.0-pre.2" -source = "git+https://github.com/composableFi/tendermint-rs?rev=5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8#5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8" -dependencies = [ - "ed25519-consensus", - "gumdrop", - "serde", - "serde_json", - "simple-error", - "tempfile", - "tendermint", - "time", -] - [[package]] name = "termcolor" version = "1.1.3" @@ -5360,28 +3428,6 @@ dependencies = [ "syn", ] -[[package]] -name = "tokio-rustls" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc6844de72e57df1980054b38be3a9f4702aba4858be64dd700181a8a6d0e1b6" -dependencies = [ - "rustls 0.19.1", - "tokio", - "webpki 0.21.4", -] - -[[package]] -name = "tokio-rustls" -version = "0.23.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59" -dependencies = [ - "rustls 0.20.6", - "tokio", - "webpki 0.22.0", -] - [[package]] name = "tokio-stream" version = "0.1.9" @@ -5401,7 +3447,6 @@ checksum = "cc463cd8deddc3770d20f9852143d50bf6094e640b485cb2e189a2099085ff45" dependencies = [ "bytes", "futures-core", - "futures-io", "futures-sink", "pin-project-lite", "tokio", @@ -5519,7 +3564,7 @@ version = "0.1.36" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2fce9567bd60a67d08a16488756721ba392f24f29006402881e43b19aac64307" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "log", "pin-project-lite", "tracing-attributes", @@ -5547,16 +3592,6 @@ dependencies = [ "valuable", ] -[[package]] -name = "tracing-error" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d686ec1c0f384b1277f097b2f279a2ecc11afe8c133c1aabf036a27cb4cd206e" -dependencies = [ - "tracing", - "tracing-subscriber 0.3.15", -] - [[package]] name = "tracing-futures" version = "0.2.5" @@ -5631,66 +3666,19 @@ dependencies = [ "tracing-serde", ] -[[package]] -name = "trie-db" -version = "0.23.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d32d034c0d3db64b43c31de38e945f15b40cd4ca6d2dcfc26d4798ce8de4ab83" -dependencies = [ - "hash-db", - "hashbrown 0.12.3", - "log", - "rustc-hex", - "smallvec", -] - -[[package]] -name = "trie-root" -version = "0.17.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a36c5ca3911ed3c9a5416ee6c679042064b93fc637ded67e25f92e68d783891" -dependencies = [ - "hash-db", -] - [[package]] name = "try-lock" version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" -[[package]] -name = "tt-call" -version = "1.0.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e66dcbec4290c69dd03c57e76c2469ea5c7ce109c6dd4351c13055cf71ea055" - -[[package]] -name = "tungstenite" -version = "0.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ada8297e8d70872fa9a551d93250a9f407beb9f37ef86494eb20012a2ff7c24" -dependencies = [ - "base64", - "byteorder", - "bytes", - "http", - "httparse", - "input_buffer", - "log", - "rand 0.8.5", - "sha-1 0.9.8", - "url", - "utf-8", -] - [[package]] name = "twox-hash" version = "1.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "digest 0.10.3", "rand 0.8.5", "static_assertions", @@ -5714,15 +3702,6 @@ dependencies = [ "static_assertions", ] -[[package]] -name = "unicase" -version = "2.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" -dependencies = [ - "version_check", -] - [[package]] name = "unicode-bidi" version = "0.3.8" @@ -5773,10 +3752,10 @@ dependencies = [ "flate2", "log", "once_cell", - "rustls 0.20.6", + "rustls", "url", - "webpki 0.22.0", - "webpki-roots 0.22.4", + "webpki", + "webpki-roots", ] [[package]] @@ -5791,12 +3770,6 @@ dependencies = [ "percent-encoding", ] -[[package]] -name = "utf-8" -version = "0.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" - [[package]] name = "uuid" version = "0.8.2" @@ -5860,7 +3833,7 @@ version = "0.2.82" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc7652e3f6c4706c8d9cd54832c4a4ccb9b5336e2c3bd154d5cccfbf1c1f5f7d" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "wasm-bindgen-macro", ] @@ -5917,7 +3890,7 @@ dependencies = [ "downcast-rs", "libc", "memory_units", - "num-rational 0.2.4", + "num-rational", "num-traits", "parity-wasm", "wasmi-validation", @@ -5942,16 +3915,6 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "webpki" -version = "0.21.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8e38c0608262c46d4a56202ebabdeb094cef7e560ca7a226c6bf055188aa4ea" -dependencies = [ - "ring", - "untrusted", -] - [[package]] name = "webpki" version = "0.22.0" @@ -5962,22 +3925,13 @@ dependencies = [ "untrusted", ] -[[package]] -name = "webpki-roots" -version = "0.21.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aabe153544e473b775453675851ecc86863d2a81d786d741f6b76778f2a48940" -dependencies = [ - "webpki 0.21.4", -] - [[package]] name = "webpki-roots" version = "0.22.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f1c760f0d366a6c24a02ed7816e23e691f5d92291f94d15e836006fd11b04daf" dependencies = [ - "webpki 0.22.0", + "webpki", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index e6336aa79b..3eda14e8e9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,5 +5,6 @@ resolver = "2" members = [ "modules", "proto", - "proto-compiler" + "proto-compiler", + "derive", ] \ No newline at end of file diff --git a/codecov.yml b/codecov.yml deleted file mode 100644 index a48cba05d6..0000000000 --- a/codecov.yml +++ /dev/null @@ -1,23 +0,0 @@ -codecov: - require_ci_to_pass: yes - allow_coverage_offsets: true - -ignore: - -coverage: - precision: 1 - round: down - range: "70...100" - - status: - project: true - patch: true - changes: true - -parsers: - gcov: - branch_detection: - conditional: yes - loop: yes - method: yes - macro: no diff --git a/config.toml b/config.toml deleted file mode 100644 index 9e3749cdf8..0000000000 --- a/config.toml +++ /dev/null @@ -1,252 +0,0 @@ -# The global section has parameters that apply globally to the relayer operation. -[global] - -# Specify the verbosity for the relayer logging output. Default: 'info' -# Valid options are 'error', 'warn', 'info', 'debug', 'trace'. -log_level = 'info' - - -# Specify the mode to be used by the relayer. [Required] -[mode] - -# Specify the client mode. -[mode.clients] - -# Whether or not to enable the client workers. [Required] -enabled = true - -# Whether or not to enable periodic refresh of clients. [Default: true] -# This feature only applies to clients that underlie an open channel. -# For Tendermint clients, the frequency at which Hermes refreshes them is 2/3 of their -# trusting period (e.g., refresh every ~9 days if the trusting period is 14 days). -# Note: Even if this is disabled, clients will be refreshed automatically if -# there is activity on a connection or channel they are involved with. -refresh = true - -# Whether or not to enable misbehaviour detection for clients. [Default: false] -misbehaviour = true - -# Specify the connections mode. -[mode.connections] - -# Whether or not to enable the connection workers for handshake completion. [Required] -enabled = false - -# Specify the channels mode. -[mode.channels] - -# Whether or not to enable the channel workers for handshake completion. [Required] -enabled = false - -# Specify the packets mode. -[mode.packets] - -# Whether or not to enable the packet workers. [Required] -enabled = true - -# Parametrize the periodic packet clearing feature. -# Interval (in number of blocks) at which pending packets -# should be periodically cleared. A value of '0' will disable -# periodic packet clearing. [Default: 100] -clear_interval = 100 - -# Whether or not to clear packets on start. [Default: false] -clear_on_start = true - -# Toggle the transaction confirmation mechanism. -# The tx confirmation mechanism periodically queries the `/tx_search` RPC -# endpoint to check that previously-submitted transactions -# (to any chain in this config file) have been successfully delivered. -# If they have not been, and `clear_interval = 0`, then those packets are -# queued up for re-submission. -# Experimental feature. Affects telemetry if set to false. -# [Default: true] -tx_confirmation = true - -# The REST section defines parameters for Hermes' built-in RESTful API. -# https://hermes.informal.systems/rest.html -[rest] - -# Whether or not to enable the REST service. Default: false -enabled = true - -# Specify the IPv4/6 host over which the built-in HTTP server will serve the RESTful -# API requests. Default: 127.0.0.1 -host = '127.0.0.1' - -# Specify the port over which the built-in HTTP server will serve the restful API -# requests. Default: 3000 -port = 3000 - - -# The telemetry section defines parameters for Hermes' built-in telemetry capabilities. -# https://hermes.informal.systems/telemetry.html -[telemetry] - -# Whether or not to enable the telemetry service. Default: false -enabled = true - -# Specify the IPv4/6 host over which the built-in HTTP server will serve the metrics -# gathered by the telemetry service. Default: 127.0.0.1 -host = '127.0.0.1' - -# Specify the port over which the built-in HTTP server will serve the metrics gathered -# by the telemetry service. Default: 3001 -port = 3001 - - -# A chains section includes parameters related to a chain and the full node to which -# the relayer can send transactions and queries. -[[chains]] - -# Specify the chain ID. Required -id = 'ibc-0' - -# Specify the RPC address and port where the chain RPC server listens on. Required -rpc_addr = 'http://127.0.0.1:26657' - -# Specify the GRPC address and port where the chain GRPC server listens on. Required -grpc_addr = 'http://127.0.0.1:9090' - -# Specify the WebSocket address and port where the chain WebSocket server -# listens on. Required -websocket_addr = 'ws://127.0.0.1:26657/websocket' - -# Specify the maximum amount of time (duration) that the RPC requests should -# take before timing out. Default: 10s (10 seconds) -# Note: Hermes uses this parameter _only_ in `start` mode; for all other CLIs, -# Hermes uses a large preconfigured timeout (on the order of minutes). -rpc_timeout = '10s' - -# Specify the prefix used by the chain. Required -account_prefix = 'cosmos' - -# Specify the name of the private key to use for signing transactions. Required -# See the Adding Keys chapter for more information about managing signing keys: -# https://hermes.informal.systems/commands/keys/index.html#adding-keys -key_name = 'testkey' - -# Specify the address type which determines: -# 1) address derivation; -# 2) how to retrieve and decode accounts and pubkeys; -# 3) the message signing method. -# The current configuration options are for Cosmos SDK and Ethermint. -# -# Example configuration for chains based on Ethermint library: -# -# address_type = { derivation = 'ethermint', proto_type = { pk_type = '/ethermint.crypto.v1.ethsecp256k1.PubKey' } } -# -# Default: { derivation = 'cosmos' }, i.e. address derivation as in Cosmos SDK. -# Warning: This is an advanced feature! Modify with caution. -address_type = { derivation = 'cosmos' } - -# Specify the store prefix used by the on-chain IBC modules. Required -# Recommended value for Cosmos SDK: 'ibc' -store_prefix = 'ibc' - -# Specify the default amount of gas to be used in case the tx simulation fails, -# and Hermes cannot estimate the amount of gas needed. -# Default: 100 000 -default_gas = 100000 - -# Specify the maximum amount of gas to be used as the gas limit for a transaction. -# Default: 400 000 -max_gas = 400000 - -# Specify the price per gas used of the fee to submit a transaction and -# the denomination of the fee. Required -gas_price = { price = 0.001, denom = 'stake' } - -# Specify the ratio by which to increase the gas estimate used to compute the fee, -# to account for potential estimation error. Default: 0.1, ie. 10%. -# Valid range: 0.0 to 1.0 (inclusive) -gas_adjustment = 1.0 - -# Specify how many IBC messages at most to include in a single transaction. -# Default: 30 -max_msg_num = 30 - -# Specify the maximum size, in bytes, of each transaction that Hermes will submit. -# Default: 2097152 (2 MiB) -max_tx_size = 2097152 - -# Specify the maximum amount of time to tolerate a clock drift. -# The clock drift parameter defines how much new (untrusted) header's time -# can drift into the future. Default: 5s -clock_drift = '5s' - -# Specify the maximum time per block for this chain. -# The block time together with the clock drift are added to the source drift to estimate -# the maximum clock drift when creating a client on this chain. Default: 30s -# For cosmos-SDK chains a good approximation is `timeout_propose` + `timeout_commit` -# Note: This MUST be the same as the `max_expected_time_per_block` genesis parameter for Tendermint chains. -max_block_time = '30s' - -# Specify the amount of time to be used as the light client trusting period. -# It should be significantly less than the unbonding period -# (e.g. unbonding period = 3 weeks, trusting period = 2 weeks). -# Default: 2/3 of the `unbonding period` for Cosmos SDK chains -trusting_period = '14days' - -# Specify the trust threshold for the light client, ie. the maximum fraction of validators -# which have changed between two blocks. -# Default: { numerator = '1', denominator = '3' }, ie. 1/3. -# Warning: This is an advanced feature! Modify with caution. -trust_threshold = { numerator = '1', denominator = '3' } - -# Specify a string that Hermes will use as a memo for each transaction it submits -# to this chain. The string is limited to 50 characters. Default: '' (empty). -# Note: Hermes will append to the string defined here additional -# operational debugging information, e.g., relayer build version. -memo_prefix = '' - -# This section specifies the filters for policy based relaying. -# -# Default: no policy / filters, allow all packets on all channels. -# -# Only packet filtering based on channel identifier can be specified. -# A channel filter has two fields: -# 1. `policy` - one of two types are supported: -# - 'allow': permit relaying _only on_ the port/channel id in the list below, -# - 'deny': permit relaying on any channel _except for_ the list below. -# 2. `list` - the list of channels specified by the port and channel identifiers. -# Optionally, each element may also contains wildcards, for eg. 'ica*' -# to match all identifiers starting with 'ica' or '*' to match all identifiers. -# -# Example configuration of a channel filter, only allowing packet relaying on -# channel with port ID 'transfer' and channel ID 'channel-0', as well as on -# all ICA channels. -# -# [chains.packet_filter] -# policy = 'allow' -# list = [ -# ['ica*', '*'], -# ['transfer', 'channel-0'], -# ] - -# Specify that the transaction fees should be payed from this fee granter's account. -# Optional. If unspecified (the default behavior), then no fee granter is used, and -# the account specified in `key_name` will pay the tx fees for all transactions -# submitted to this chain. -# fee_granter = '' - -[[chains]] -id = 'ibc-1' -rpc_addr = 'http://127.0.0.1:26557' -grpc_addr = 'http://127.0.0.1:9091' -websocket_addr = 'ws://127.0.0.1:26557/websocket' -rpc_timeout = '10s' -account_prefix = 'cosmos' -key_name = 'testkey' -store_prefix = 'ibc' -default_gas = 100000 -max_gas = 400000 -gas_price = { price = 0.001, denom = 'stake' } -gas_adjustment = 0.1 -max_msg_num = 30 -max_tx_size = 2097152 -clock_drift = '5s' -max_block_time = '30s' -trusting_period = '14days' -trust_threshold = { numerator = '1', denominator = '3' } -address_type = { derivation = 'cosmos' } diff --git a/derive/Cargo.toml b/derive/Cargo.toml new file mode 100644 index 0000000000..9c2da5343c --- /dev/null +++ b/derive/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "ibc-derive" +version = "0.1.0" +edition = "2021" +license = "Apache-2.0" + +[dependencies] +syn = { version = "1.0.99", features = ["extra-traits", "full", "derive", "parsing"] } +quote = "1.0.21" +proc-macro2 = "1.0.43" +convert_case = "0.6.0" + +[lib] +proc-macro = true diff --git a/derive/src/client_def.rs b/derive/src/client_def.rs new file mode 100644 index 0000000000..5422da0829 --- /dev/null +++ b/derive/src/client_def.rs @@ -0,0 +1,643 @@ +use crate::State; +use quote::quote; + +impl State { + fn impl_fn_verify_header(&self) -> proc_macro2::TokenStream { + let any_client_state = &self.any_data.client_state_ident; + let any_header = &self.any_data.header_ident; + let gen_params = &self.generics.params; + + let cases = self.clients.iter().map(|client| { + let variant_ident = &client.variant_ident; + let attrs = &client.attrs; + quote! { + #(#attrs)* + Self::#variant_ident(client) => { + let client_type = client_state.client_type().to_owned(); + let (client_state, header) = downcast!( + client_state => #any_client_state::<#gen_params>::#variant_ident, + header => #any_header::#variant_ident, + ) + .ok_or_else(|| Error::client_args_type_mismatch(client_type))?; + + client.verify_header::(ctx, client_id, client_state, header) + } + } + }); + + quote! { + #[doc = "Validate an incoming header"] + fn verify_header( + &self, + ctx: &Ctx, + client_id: ClientId, + client_state: Self::ClientState, + header: Self::Header, + ) -> Result<(), Error> + where + Ctx: ReaderContext, + { + match self { + #(#cases)* + } + } + } + } + + fn impl_fn_update_state(&self) -> proc_macro2::TokenStream { + let any_client_state = &self.any_data.client_state_ident; + let any_header = &self.any_data.header_ident; + let gen_params = &self.generics.params; + let cases = self.clients.iter().map(|client| { + let variant_ident = &client.variant_ident; + let attrs = &client.attrs; + quote! { + #(#attrs)* + Self::#variant_ident(client) => { + let client_type = client_state.client_type().to_owned(); + let (client_state, header) = downcast!( + client_state => #any_client_state::<#gen_params>::#variant_ident, + header => #any_header::#variant_ident, + ) + .ok_or_else(|| Error::client_args_type_mismatch(client_type))?; + + let (new_state, new_consensus) = + client.update_state(ctx, client_id, client_state, header)?; + + Ok((Self::ClientState::#variant_ident(new_state), new_consensus)) + } + } + }); + + quote! { + #[doc = "Validates an incoming `header` against the latest consensus state of this client."] + fn update_state( + &self, + ctx: &Ctx, + client_id: ClientId, + client_state: Self::ClientState, + header: Self::Header, + ) -> Result<(Self::ClientState, ConsensusUpdateResult), Error> + where + Ctx: ReaderContext, + { + match self { + #(#cases)* + } + } + } + } + + fn impl_fn_update_state_on_misbehaviour(&self) -> proc_macro2::TokenStream { + let any_client_state = &self.any_data.client_state_ident; + let any_header = &self.any_data.header_ident; + let gen_params = &self.generics.params; + let cases = self.clients.iter().map(|client| { + let variant_ident = &client.variant_ident; + let attrs = &client.attrs; + quote! { + #(#attrs)* + Self::#variant_ident(client) => { + let client_type = client_state.client_type().to_owned(); + let (client_state, header) = downcast!( + client_state => #any_client_state::<#gen_params>::#variant_ident, + header => #any_header::#variant_ident, + ) + .ok_or_else(|| Error::client_args_type_mismatch(client_type))?; + + let client_state = client.update_state_on_misbehaviour(client_state, header)?; + Ok(Self::ClientState::#variant_ident(client_state)) + } + } + }); + + quote! { + fn update_state_on_misbehaviour( + &self, + client_state: Self::ClientState, + header: Self::Header, + ) -> Result { + match self { + #(#cases)* + } + } + } + } + + fn impl_fn_check_for_misbehaviour(&self) -> proc_macro2::TokenStream { + let cases = self.clients.iter().map(|client| { + let variant_ident = &client.variant_ident; + let attrs = &client.attrs; + quote! { + #(#attrs)* + Self::#variant_ident(client) => { + let client_type = client_state.client_type().to_owned(); + let (client_state, header) = downcast!( + client_state => Self::ClientState::#variant_ident, + header => Self::Header::#variant_ident, + ) + .ok_or_else(|| Error::client_args_type_mismatch(client_type))?; + client.check_for_misbehaviour(ctx, client_id, client_state, header) + } + } + }); + + quote! { + #[doc = "Checks for misbehaviour in an incoming header"] + fn check_for_misbehaviour( + &self, + ctx: &Ctx, + client_id: ClientId, + client_state: Self::ClientState, + header: Self::Header, + ) -> Result + where + Ctx: ReaderContext, + { + match self { + #(#cases)* + } + } + } + } + + fn impl_fn_verify_upgrade_and_update_state(&self) -> proc_macro2::TokenStream { + let cases = self.clients.iter().map(|client| { + let variant_ident = &client.variant_ident; + let attrs = &client.attrs; + quote! { + #(#attrs)* + Self::#variant_ident(client) => { + let client_type = client_state.client_type().to_owned(); + let (client_state, consensus_state) = downcast!( + client_state => Self::ClientState::#variant_ident, + consensus_state => Self::ConsensusState::#variant_ident, + ) + .ok_or_else(|| Error::client_args_type_mismatch(client_type))?; + + let (new_state, new_consensus) = client.verify_upgrade_and_update_state::( + client_state, + consensus_state, + proof_upgrade_client, + proof_upgrade_consensus_state, + )?; + + Ok((Self::ClientState::#variant_ident(new_state), new_consensus)) + } + } + }); + + quote! { + fn verify_upgrade_and_update_state( + &self, + client_state: &Self::ClientState, + consensus_state: &Self::ConsensusState, + proof_upgrade_client: Vec, + proof_upgrade_consensus_state: Vec, + ) -> Result<(Self::ClientState, ConsensusUpdateResult), Error> { + match self { + #(#cases)* + } + } + } + } + + fn impl_fn_verify_client_consensus_state(&self) -> proc_macro2::TokenStream { + let cases = self.clients.iter().map(|client| { + let variant_ident = &client.variant_ident; + let attrs = &client.attrs; + quote! { + #(#attrs)* + Self::#variant_ident(client) => { + let client_type = client_state.client_type().to_owned(); + let client_state = downcast!( + client_state => Self::ClientState::#variant_ident + ) + .ok_or_else(|| Error::client_args_type_mismatch(client_type))?; + + client.verify_client_consensus_state( + ctx, + client_state, + height, + prefix, + proof, + root, + client_id, + consensus_height, + expected_consensus_state, + ) + } + } + }); + + quote! { + fn verify_client_consensus_state( + &self, + ctx: &Ctx, + client_state: &Self::ClientState, + height: Height, + prefix: &CommitmentPrefix, + proof: &CommitmentProofBytes, + root: &CommitmentRoot, + client_id: &ClientId, + consensus_height: Height, + expected_consensus_state: &Ctx::AnyConsensusState, + ) -> Result<(), Error> { + match self { + #(#cases)* + } + } + } + } + + fn impl_fn_verify_connection_state(&self) -> proc_macro2::TokenStream { + let cases = self.clients.iter().map(|client| { + let variant_ident = &client.variant_ident; + let attrs = &client.attrs; + quote! { + #(#attrs)* + Self::#variant_ident(client) => { + let client_type = client_state.client_type().to_owned(); + let client_state = downcast!(client_state => Self::ClientState::#variant_ident) + .ok_or_else(|| Error::client_args_type_mismatch(client_type))?; + + client.verify_connection_state( + ctx, + client_id, + client_state, + height, + prefix, + proof, + root, + connection_id, + expected_connection_end, + ) + } + } + }); + + quote! { + fn verify_connection_state( + &self, + ctx: &Ctx, + client_id: &ClientId, + client_state: &Self::ClientState, + height: Height, + prefix: &CommitmentPrefix, + proof: &CommitmentProofBytes, + root: &CommitmentRoot, + connection_id: &ConnectionId, + expected_connection_end: &ConnectionEnd, + ) -> Result<(), Error> { + match self { + #(#cases)* + } + } + } + } + + fn impl_fn_verify_channel_state(&self) -> proc_macro2::TokenStream { + let cases = self.clients.iter().map(|client| { + let variant_ident = &client.variant_ident; + let attrs = &client.attrs; + quote! { + #(#attrs)* + Self::#variant_ident(client) => { + let client_type = client_state.client_type().to_owned(); + let client_state = downcast!(client_state => Self::ClientState::#variant_ident) + .ok_or_else(|| Error::client_args_type_mismatch(client_type))?; + + client.verify_channel_state( + ctx, + client_id, + client_state, + height, + prefix, + proof, + root, + port_id, + channel_id, + expected_channel_end, + ) + } + } + }); + + quote! { + fn verify_channel_state( + &self, + ctx: &Ctx, + client_id: &ClientId, + client_state: &Self::ClientState, + height: Height, + prefix: &CommitmentPrefix, + proof: &CommitmentProofBytes, + root: &CommitmentRoot, + port_id: &PortId, + channel_id: &ChannelId, + expected_channel_end: &ChannelEnd, + ) -> Result<(), Error> { + match self { + #(#cases)* + } + } + } + } + + fn impl_fn_verify_client_full_state(&self) -> proc_macro2::TokenStream { + let cases = self.clients.iter().map(|client| { + let variant_ident = &client.variant_ident; + let attrs = &client.attrs; + quote! { + #(#attrs)* + Self::#variant_ident(client) => { + let client_type = client_state.client_type().to_owned(); + let client_state = downcast!( + client_state => Self::ClientState::#variant_ident + ) + .ok_or_else(|| Error::client_args_type_mismatch(client_type))?; + + client.verify_client_full_state( + ctx, + client_state, + height, + prefix, + proof, + root, + client_id, + client_state_on_counterparty, + ) + } + } + }); + + quote! { + fn verify_client_full_state( + &self, + ctx: &Ctx, + client_state: &Self::ClientState, + height: Height, + prefix: &CommitmentPrefix, + proof: &CommitmentProofBytes, + root: &CommitmentRoot, + client_id: &ClientId, + client_state_on_counterparty: &Ctx::AnyClientState, + ) -> Result<(), Error> { + match self { + #(#cases)* + } + } + } + } + + fn impl_fn_verify_packet_data(&self) -> proc_macro2::TokenStream { + let cases = self.clients.iter().map(|client| { + let variant_ident = &client.variant_ident; + let attrs = &client.attrs; + quote! { + #(#attrs)* + Self::#variant_ident(client) => { + let client_type = client_state.client_type().to_owned(); + let client_state = downcast!( + client_state => Self::ClientState::#variant_ident + ) + .ok_or_else(|| Error::client_args_type_mismatch(client_type))?; + + client.verify_packet_data( + ctx, + client_id, + client_state, + height, + connection_end, + proof, + root, + port_id, + channel_id, + sequence, + commitment, + ) + } + } + }); + + quote! { + fn verify_packet_data( + &self, + ctx: &Ctx, + client_id: &ClientId, + client_state: &Self::ClientState, + height: Height, + connection_end: &ConnectionEnd, + proof: &CommitmentProofBytes, + root: &CommitmentRoot, + port_id: &PortId, + channel_id: &ChannelId, + sequence: Sequence, + commitment: PacketCommitment, + ) -> Result<(), Error> { + match self { + #(#cases)* + } + } + } + } + + fn impl_fn_verify_packet_acknowledgement(&self) -> proc_macro2::TokenStream { + let cases = self.clients.iter().map(|client| { + let variant_ident = &client.variant_ident; + let attrs = &client.attrs; + quote! { + #(#attrs)* + Self::#variant_ident(client) => { + let client_type = client_state.client_type().to_owned(); + let client_state = downcast!( + client_state => Self::ClientState::#variant_ident + ) + .ok_or_else(|| Error::client_args_type_mismatch(client_type))?; + + client.verify_packet_acknowledgement( + ctx, + client_id, + client_state, + height, + connection_end, + proof, + root, + port_id, + channel_id, + sequence, + ack_commitment, + ) + } + } + }); + + quote! { + fn verify_packet_acknowledgement( + &self, + ctx: &Ctx, + client_id: &ClientId, + client_state: &Self::ClientState, + height: Height, + connection_end: &ConnectionEnd, + proof: &CommitmentProofBytes, + root: &CommitmentRoot, + port_id: &PortId, + channel_id: &ChannelId, + sequence: Sequence, + ack_commitment: AcknowledgementCommitment, + ) -> Result<(), Error> { + match self { + #(#cases)* + } + } + } + } + + fn impl_fn_verify_next_sequence_recv(&self) -> proc_macro2::TokenStream { + let cases = self.clients.iter().map(|client| { + let variant_ident = &client.variant_ident; + let attrs = &client.attrs; + quote! { + #(#attrs)* + Self::#variant_ident(client) => { + let client_type = client_state.client_type().to_owned(); + let client_state = downcast!( + client_state => Self::ClientState::#variant_ident + ) + .ok_or_else(|| Error::client_args_type_mismatch(client_type))?; + + client.verify_next_sequence_recv( + ctx, + client_id, + client_state, + height, + connection_end, + proof, + root, + port_id, + channel_id, + sequence, + ) + } + } + }); + + quote! { + fn verify_next_sequence_recv( + &self, + ctx: &Ctx, + client_id: &ClientId, + client_state: &Self::ClientState, + height: Height, + connection_end: &ConnectionEnd, + proof: &CommitmentProofBytes, + root: &CommitmentRoot, + port_id: &PortId, + channel_id: &ChannelId, + sequence: Sequence, + ) -> Result<(), Error> { + match self { + #(#cases)* + } + } + } + } + + fn impl_fn_verify_packet_receipt_absence(&self) -> proc_macro2::TokenStream { + let cases = self.clients.iter().map(|client| { + let variant_ident = &client.variant_ident; + let attrs = &client.attrs; + let _client_state_path = &client.client_state_path; + quote! { + #(#attrs)* + Self::#variant_ident(client) => { + let client_type = client_state.client_type().to_owned(); + let client_state = downcast!( + client_state => Self::ClientState::#variant_ident + ) + .ok_or_else(|| Error::client_args_type_mismatch(client_type))?; + + client.verify_packet_receipt_absence( + ctx, + client_id, + client_state, + height, + connection_end, + proof, + root, + port_id, + channel_id, + sequence, + ) + } + } + }); + + quote! { + fn verify_packet_receipt_absence( + &self, + ctx: &Ctx, + client_id: &ClientId, + client_state: &Self::ClientState, + height: Height, + connection_end: &ConnectionEnd, + proof: &CommitmentProofBytes, + root: &CommitmentRoot, + port_id: &PortId, + channel_id: &ChannelId, + sequence: Sequence, + ) -> Result<(), Error> { + match self { + #(#cases)* + } + } + } + } + + pub fn impl_client_def(&self) -> proc_macro2::TokenStream { + let this = &self.self_ident; + let any_header = &self.any_data.header_ident; + let any_client_state = &self.any_data.client_state_ident; + let any_consensus_state = &self.any_data.consensus_state_ident; + let gens = &self.generics; + let gens_where = &self.generics.where_clause; + let gen_params = &self.generics.params; + + let fn_verify_header = self.impl_fn_verify_header(); + let fn_update_state = self.impl_fn_update_state(); + let fn_update_state_on_misbehaviour = self.impl_fn_update_state_on_misbehaviour(); + let fn_check_for_misbehaviour = self.impl_fn_check_for_misbehaviour(); + let fn_verify_upgrade_and_update_state = self.impl_fn_verify_upgrade_and_update_state(); + let fn_verify_client_consensus_state = self.impl_fn_verify_client_consensus_state(); + let fn_verify_connection_state = self.impl_fn_verify_connection_state(); + let fn_verify_channel_state = self.impl_fn_verify_channel_state(); + let fn_verify_client_full_state = self.impl_fn_verify_client_full_state(); + let fn_verify_packet_data = self.impl_fn_verify_packet_data(); + let fn_verify_packet_acknowledgement = self.impl_fn_verify_packet_acknowledgement(); + let fn_verify_next_sequence_recv = self.impl_fn_verify_next_sequence_recv(); + let fn_verify_packet_receipt_absence = self.impl_fn_verify_packet_receipt_absence(); + + quote! { + impl #gens ClientDef for #this #gens #gens_where { + type Header = #any_header; + type ClientState = #any_client_state::<#gen_params>; + type ConsensusState = #any_consensus_state; + + #fn_verify_header + #fn_update_state + #fn_update_state_on_misbehaviour + #fn_check_for_misbehaviour + #fn_verify_upgrade_and_update_state + #fn_verify_client_consensus_state + #fn_verify_connection_state + #fn_verify_channel_state + #fn_verify_client_full_state + #fn_verify_packet_data + #fn_verify_packet_acknowledgement + #fn_verify_next_sequence_recv + #fn_verify_packet_receipt_absence + } + } + } +} diff --git a/derive/src/client_state.rs b/derive/src/client_state.rs new file mode 100644 index 0000000000..6cdf150dad --- /dev/null +++ b/derive/src/client_state.rs @@ -0,0 +1,187 @@ +use crate::State; + +use quote::quote; + +impl State { + fn impl_fn_chain_id(&self) -> proc_macro2::TokenStream { + let cases = self.clients.iter().map(|client| { + let variant_ident = &client.variant_ident; + let attrs = &client.attrs; + quote! { + #(#attrs)* + Self::#variant_ident(state) => state.chain_id(), + } + }); + + quote! { + fn chain_id(&self) -> ChainId { + match self { + #(#cases)* + } + } + } + } + + pub fn impl_fn_client_def(&self) -> proc_macro2::TokenStream { + let cases = self.clients.iter().map(|client| { + let variant_ident = &client.variant_ident; + let attrs = &client.attrs; + quote! { + #(#attrs)* + Self::#variant_ident(state) => AnyClient::#variant_ident(state.client_def()), + } + }); + + quote! { + fn client_def(&self) -> Self::ClientDef { + match self { + #(#cases)* + } + } + } + } + + pub fn impl_fn_client_type(&self) -> proc_macro2::TokenStream { + let cases = self.clients.iter().map(|client| { + let variant_ident = &client.variant_ident; + let attrs = &client.attrs; + quote! { + #(#attrs)* + Self::#variant_ident(state) => state.client_type(), + } + }); + + quote! { + fn client_type(&self) -> ClientType { + match self { + #(#cases)* + } + } + } + } + + fn impl_fn_latest_height(&self) -> proc_macro2::TokenStream { + let cases = self.clients.iter().map(|client| { + let variant_ident = &client.variant_ident; + let attrs = &client.attrs; + quote! { + #(#attrs)* + Self::#variant_ident(state) => state.latest_height(), + } + }); + + quote! { + fn latest_height(&self) -> Height { + match self { + #(#cases)* + } + } + } + } + + fn impl_fn_frozen_height(&self) -> proc_macro2::TokenStream { + let cases = self.clients.iter().map(|client| { + let variant_ident = &client.variant_ident; + let attrs = &client.attrs; + quote! { + #(#attrs)* + Self::#variant_ident(state) => state.frozen_height(), + } + }); + + quote! { + fn frozen_height(&self) -> Option { + match self { + #(#cases)* + } + } + } + } + + fn impl_fn_upgrade(&self) -> proc_macro2::TokenStream { + let cases = self.clients.iter().map(|client| { + let variant_ident = &client.variant_ident; + let attrs = &client.attrs; + quote! { + #(#attrs)* + Self::#variant_ident(state) => { + let upgrade_options = downcast!(upgrade_options => Self::UpgradeOptions::#variant_ident) + .expect(&format!("upgrade options should be {}", stringify!(#variant_ident))); + + Self::#variant_ident(state.upgrade( + upgrade_height, + upgrade_options, + chain_id, + )) + } + } + }); + + quote! { + fn upgrade( + self, + upgrade_height: Height, + upgrade_options: Self::UpgradeOptions, + chain_id: ChainId, + ) -> Self { + match self { + #(#cases)* + } + } + } + } + + fn impl_fn_expired(&self) -> proc_macro2::TokenStream { + let cases = self.clients.iter().map(|client| { + let variant_ident = &client.variant_ident; + let attrs = &client.attrs; + quote! { + #(#attrs)* + Self::#variant_ident(state) => state.expired(elapsed), + } + }); + + quote! { + fn expired(&self, elapsed: Duration) -> bool { + match self { + #(#cases)* + } + } + } + } + + pub fn impl_client_state(&self) -> proc_macro2::TokenStream { + let this = &self.self_ident; + let gens = &self.generics; + let gens_where = &self.generics.where_clause; + + let fn_chain_id = self.impl_fn_chain_id(); + let fn_client_type = self.impl_fn_client_type(); + let fn_client_def = self.impl_fn_client_def(); + let fn_latest_height = self.impl_fn_latest_height(); + let fn_frozen_height = self.impl_fn_frozen_height(); + let fn_upgrade = self.impl_fn_upgrade(); + let fn_expired = self.impl_fn_expired(); + let fn_downcast = self.impl_fn_downcast(); + let fn_wrap = self.impl_fn_wrap(); + let fn_encode_to_vec = self.impl_fn_encode_to_vec(); + + quote! { + impl #gens ClientState for #this #gens #gens_where { + type UpgradeOptions = AnyUpgradeOptions; // TODO: make variable? + type ClientDef = AnyClient #gens; + + #fn_chain_id + #fn_client_type + #fn_client_def + #fn_latest_height + #fn_frozen_height + #fn_upgrade + #fn_expired + #fn_downcast + #fn_wrap + #fn_encode_to_vec + } + } + } +} diff --git a/derive/src/coercion.rs b/derive/src/coercion.rs new file mode 100644 index 0000000000..c173f7ac35 --- /dev/null +++ b/derive/src/coercion.rs @@ -0,0 +1,52 @@ +use crate::State; +use quote::quote; + +impl State { + pub(crate) fn impl_fn_downcast(&self) -> proc_macro2::TokenStream { + let cases = self.clients.iter().map(|client| { + let variant_ident = &client.variant_ident; + let attrs = &client.attrs; + quote! { + #(#attrs)* + Self::#variant_ident(state) => state.downcast::(), + } + }); + + quote! { + fn downcast(self) -> Option { + match self { + #(#cases)* + } + } + } + } + + pub(crate) fn impl_fn_wrap(&self) -> proc_macro2::TokenStream { + let cases = self.clients.iter().map(|client| { + let variant_ident = &client.variant_ident; + let attrs = &client.attrs; + let client_state_type = &client.inner_ty_path; + quote! { + #(#attrs)* + if let Some(state) = sub_state.downcast_ref::<#client_state_type>() { + return Some(Self::#variant_ident(state.clone())); + } + } + }); + + quote! { + fn wrap(sub_state: &dyn core::any::Any) -> Option { + #(#cases)* + None + } + } + } + + pub(crate) fn impl_fn_encode_to_vec(&self) -> proc_macro2::TokenStream { + quote! { + fn encode_to_vec(&self) -> Vec { + Protobuf::encode_vec(self) + } + } + } +} diff --git a/derive/src/consensus_state.rs b/derive/src/consensus_state.rs new file mode 100644 index 0000000000..251924cf9d --- /dev/null +++ b/derive/src/consensus_state.rs @@ -0,0 +1,67 @@ +use crate::State; + +use quote::quote; + +impl State { + fn impl_fn_root(&self) -> proc_macro2::TokenStream { + let cases = self.clients.iter().map(|client| { + let variant_ident = &client.variant_ident; + let attrs = &client.attrs; + quote! { + #(#attrs)* + Self::#variant_ident(state) => state.root(), + } + }); + + quote! { + fn root(&self) -> &CommitmentRoot { + match self { + #(#cases)* + } + } + } + } + + fn impl_fn_timestamp(&self) -> proc_macro2::TokenStream { + let cases = self.clients.iter().map(|client| { + let variant_ident = &client.variant_ident; + let attrs = &client.attrs; + quote! { + #(#attrs)* + Self::#variant_ident(state) => state.timestamp(), + } + }); + + quote! { + fn timestamp(&self) -> Timestamp { + match self { + #(#cases)* + } + } + } + } + + pub fn impl_consensus_state(&self) -> proc_macro2::TokenStream { + let this = &self.self_ident; + let gens = &self.generics; + let gens_where = &self.generics.where_clause; + + let fn_root = self.impl_fn_root(); + let fn_timestamp = self.impl_fn_timestamp(); + let fn_downcast = self.impl_fn_downcast(); + let fn_wrap = self.impl_fn_wrap(); + let fn_encode_to_vec = self.impl_fn_encode_to_vec(); + + quote! { + impl #gens ConsensusState for #this #gens #gens_where { + type Error = Infallible; + + #fn_root + #fn_timestamp + #fn_downcast + #fn_wrap + #fn_encode_to_vec + } + } + } +} diff --git a/derive/src/header.rs b/derive/src/header.rs new file mode 100644 index 0000000000..dc6caa7a21 --- /dev/null +++ b/derive/src/header.rs @@ -0,0 +1,44 @@ +use crate::State; + +use quote::quote; + +impl State { + pub fn impl_fn_height(&self) -> proc_macro2::TokenStream { + let cases = self.clients.iter().map(|client| { + let variant_ident = &client.variant_ident; + let attrs = &client.attrs; + quote! { + #(#attrs)* + Self::#variant_ident(state) => state.height(), + } + }); + + quote! { + fn height(&self) -> Height { + match self { + #(#cases)* + } + } + } + } + + pub fn impl_header(&self) -> proc_macro2::TokenStream { + let this = &self.self_ident; + let gens = &self.generics; + let gens_where = &self.generics.where_clause; + + let fn_height = self.impl_fn_height(); + let fn_downcast = self.impl_fn_downcast(); + let fn_wrap = self.impl_fn_wrap(); + let fn_encode_to_vec = self.impl_fn_encode_to_vec(); + + quote! { + impl #gens Header for #this #gens #gens_where { + #fn_height + #fn_downcast + #fn_wrap + #fn_encode_to_vec + } + } + } +} diff --git a/derive/src/lib.rs b/derive/src/lib.rs new file mode 100644 index 0000000000..ffb1d4afd6 --- /dev/null +++ b/derive/src/lib.rs @@ -0,0 +1,169 @@ +mod client_def; +mod client_state; +mod coercion; +mod consensus_state; +mod header; +mod misbehaviour; +mod protobuf; + +use proc_macro::TokenStream; +use proc_macro2::Ident; + +use syn::{parse_macro_input, Data, DeriveInput, Generics, Type, TypePath}; + +struct AnyData { + pub header_ident: Ident, + pub client_state_ident: Ident, + pub consensus_state_ident: Ident, +} + +struct ClientData { + pub variant_ident: Ident, + pub inner_ty_path: TypePath, + pub client_state_path: TypePath, + pub attrs: Vec, + pub proto_ty_url: Option, + pub proto_decode_error: Option, +} + +impl ClientData { + pub fn new( + variant_ident: Ident, + inner_ty_path: TypePath, + attrs: Vec, + proto_ty_url: Option, + proto_decode_error: Option, + ) -> Self { + let client_state_path = + ident_path(Ident::new(&format!("{}ClientState", variant_ident), variant_ident.span())); + Self { + variant_ident, + inner_ty_path, + client_state_path, + attrs, + proto_ty_url, + proto_decode_error, + } + } +} + +struct State { + pub any_data: AnyData, + pub clients: Vec, + pub self_ident: Ident, + pub generics: Generics, +} + +#[proc_macro_derive(ClientDef, attributes(ibc))] +pub fn derive_client_def(input: TokenStream) -> TokenStream { + let input = parse_macro_input!(input as DeriveInput); + let state = State::from_input(input, client_data_with_proto_attrs); + state.impl_client_def().into() +} + +#[proc_macro_derive(ClientState, attributes(ibc))] +pub fn derive_client_state(input: TokenStream) -> TokenStream { + let input = parse_macro_input!(input as DeriveInput); + let state = State::from_input(input, client_data_with_proto_attrs); + state.impl_client_state().into() +} + +#[proc_macro_derive(ConsensusState, attributes(ibc))] +pub fn derive_consensus_state(input: TokenStream) -> TokenStream { + let input = parse_macro_input!(input as DeriveInput); + let state = State::from_input(input, client_data_with_proto_attrs); + state.impl_consensus_state().into() +} + +#[proc_macro_derive(Header, attributes(ibc))] +pub fn derive_header(input: TokenStream) -> TokenStream { + let input = parse_macro_input!(input as DeriveInput); + let state = State::from_input(input, client_data_with_proto_attrs); + state.impl_header().into() +} + +#[proc_macro_derive(Misbehaviour, attributes(ibc))] +pub fn derive_misbehaviour(input: TokenStream) -> TokenStream { + let input = parse_macro_input!(input as DeriveInput); + let state = State::from_input(input, client_data_with_proto_attrs); + state.impl_misbehaviour().into() +} + +#[proc_macro_derive(Protobuf, attributes(ibc))] +pub fn derive_protobuf(input: TokenStream) -> TokenStream { + let input = parse_macro_input!(input as DeriveInput); + let state = State::from_input(input, client_data_with_proto_attrs); + state.impl_protobuf().into() +} + +fn client_data_with_proto_attrs(variant: &syn::Variant) -> ClientData { + assert_eq!(variant.fields.len(), 1, "Only single field variants are supported"); + let field = variant.fields.iter().next().unwrap(); + let client_def_path = match &field.ty { + Type::Path(p) => p.clone(), + _ => panic!("Only path types are supported"), + }; + let mut proto_url = None; + let mut proto_decode_error = None; + let attrs = variant + .attrs + .iter() + .filter(|attr| { + let string = format!("{}", attr.path.segments.first().unwrap().ident); + if string == "ibc" { + let meta = attr.parse_meta().unwrap(); + if let syn::Meta::List(list) = meta { + for nested in list.nested { + if let syn::NestedMeta::Meta(syn::Meta::NameValue(nv)) = nested { + let ident = &nv.path.segments.first().unwrap().ident; + if let syn::Lit::Str(lit) = nv.lit { + if ident == "proto_url" { + assert!( + proto_url.is_none(), + "Only one proto type url is allowed" + ); + proto_url = Some(Ident::new(&lit.value(), lit.span())); + } else if ident == "proto_decode_err" { + assert!( + proto_decode_error.is_none(), + "Only one proto decode error is allowed" + ); + proto_decode_error = Some(Ident::new(&lit.value(), lit.span())); + } + } + } + } + } + } + string == "cfg" + }) + .cloned() + .collect(); + + ClientData::new(variant.ident.clone(), client_def_path, attrs, proto_url, proto_decode_error) +} + +impl State { + fn from_input(input: DeriveInput, client_fn: impl Fn(&syn::Variant) -> ClientData) -> Self { + let data = match &input.data { + Data::Enum(data) => data, + _ => panic!("Only enums are supported"), + }; + let span = input.ident.span(); + State { + self_ident: input.ident, + any_data: AnyData { + header_ident: Ident::new("AnyHeader", span), + client_state_ident: Ident::new("AnyClientState", span), + consensus_state_ident: Ident::new("AnyConsensusState", span), + }, + clients: data.variants.iter().map(client_fn).collect(), + generics: input.generics.clone(), + } + } +} + +fn ident_path(ident: Ident) -> TypePath { + let client_def_path = TypePath { qself: None, path: syn::Path::from(ident) }; + client_def_path +} diff --git a/derive/src/misbehaviour.rs b/derive/src/misbehaviour.rs new file mode 100644 index 0000000000..ef54d57777 --- /dev/null +++ b/derive/src/misbehaviour.rs @@ -0,0 +1,46 @@ +use crate::State; + +use quote::quote; + +impl State { + pub fn impl_fn_client_id(&self) -> proc_macro2::TokenStream { + let cases = self.clients.iter().map(|client| { + let variant_ident = &client.variant_ident; + let attrs = &client.attrs; + quote! { + #(#attrs)* + Self::#variant_ident(misbehaviour) => misbehaviour.client_id(), + } + }); + + quote! { + fn client_id(&self) -> &ClientId { + match self { + #(#cases)* + } + } + } + } + + pub fn impl_misbehaviour(&self) -> proc_macro2::TokenStream { + let this = &self.self_ident; + let gens = &self.generics; + let gens_where = &self.generics.where_clause; + + let fn_client_id = self.impl_fn_client_id(); + let fn_height = self.impl_fn_height(); + let fn_downcast = self.impl_fn_downcast(); + let fn_wrap = self.impl_fn_wrap(); + let fn_encode_to_vec = self.impl_fn_encode_to_vec(); + + quote! { + impl #gens Misbehaviour for #this #gens #gens_where { + #fn_client_id + #fn_height + #fn_downcast + #fn_wrap + #fn_encode_to_vec + } + } + } +} diff --git a/derive/src/protobuf.rs b/derive/src/protobuf.rs new file mode 100644 index 0000000000..0c919f7660 --- /dev/null +++ b/derive/src/protobuf.rs @@ -0,0 +1,93 @@ +use crate::State; +use convert_case::{Case, Casing}; + +use quote::quote; + +impl State { + pub fn impl_try_from_any(&self) -> proc_macro2::TokenStream { + let this = &self.self_ident; + let gens = &self.generics; + let gens_where = &self.generics.where_clause; + + let cases = self.clients.iter().filter_map(|client| { + let type_url = client.proto_ty_url.as_ref()?; + let decode_err = client.proto_decode_error.clone().unwrap_or_else(|| { + let string_without_any = &this.to_string()[3..]; + syn::parse_str(&format!("decode_raw_{}", string_without_any.to_case(Case::Snake))) + .unwrap() + }); + let variant_ident = &client.variant_ident; + let attrs = &client.attrs; + let inner_ty = &client.inner_ty_path; + Some(quote! { + #(#attrs)* + #type_url => Ok(Self::#variant_ident( + <#inner_ty>::decode_vec(&value.value) + .map_err(Error::#decode_err)?, + )), + }) + }); + + // TODO: fix up error variants used in decoding + quote! { + impl #gens TryFrom for #this #gens #gens_where { + type Error = Error; + + fn try_from(value: Any) -> Result { + match value.type_url.as_str() { + "" => Err(Error::empty_consensus_state_response()), + #(#cases)* + _ => Err(Error::unknown_consensus_state_type(value.type_url)), + } + } + } + } + } + + pub fn impl_from_self_for_any(&self) -> proc_macro2::TokenStream { + let this = &self.self_ident; + let gens = &self.generics; + let gens_where = &self.generics.where_clause; + let gen_params = &self.generics.params; + + let cases = self.clients.iter().filter_map(|client| { + let variant_ident = &client.variant_ident; + let attrs = &client.attrs; + let type_url = client.proto_ty_url.as_ref()?; + Some(quote! { + #(#attrs)* + #this ::<#gen_params> ::#variant_ident(value) => Any { + type_url: #type_url.to_string(), + value: value.encode_to_vec(), + }, + }) + }); + + quote! { + impl #gens From<#this #gens> for Any #gens_where { + fn from(value: #this #gens) -> Self { + match value { + #(#cases)* + } + } + } + } + } + + pub fn impl_protobuf(&self) -> proc_macro2::TokenStream { + let this = &self.self_ident; + let gens = &self.generics; + let gens_where = &self.generics.where_clause; + + let impl_try_from_any = self.impl_try_from_any(); + let impl_from_self_for_any = self.impl_from_self_for_any(); + + quote! { + impl #gens Protobuf for #this #gens #gens_where {} + + #impl_try_from_any + + #impl_from_self_for_any + } + } +} diff --git a/docs/architecture/README.md b/docs/architecture/README.md deleted file mode 100644 index b50a2fd3d0..0000000000 --- a/docs/architecture/README.md +++ /dev/null @@ -1,38 +0,0 @@ -# Architecture Decision Records (ADR) - -This is a location to record all high-level architecture decisions in the IBC-RS project. - -You can read more about the ADR concept in this [blog post](https://product.reverb.com/documenting-architecture-decisions-the-reverb-way-a3563bb24bd0#.78xhdix6t). - -An ADR should provide: - -- Context on the relevant goals and the current state -- Proposed changes to achieve the goals -- Summary of pros and cons -- References -- Changelog - -Note the distinction between an ADR and a spec. The ADR provides the context, intuition, reasoning, and -justification for a change in architecture, or for the architecture of something -new. The spec is much more compressed and streamlined summary of everything as -it is or should be. - -If recorded decisions turned out to be lacking, convene a discussion, record the new decisions here, and then modify the code to match. - -Note the context/background should be written in the present tense. - -To suggest an ADR, please make use of the [ADR template](./adr-template.md) provided. - -## Table of Contents - -| ADR \# | Description | Status | -| ------ | ----------- | ------ | -| [001](./adr-001-repo.md) | Repository structure for `ibc-rs` | Accepted | -| [002](./adr-002-ibc-relayer.md) | IBC Relayer in Rust | Accepted | -| [003](./adr-003-handler-implementation.md) | IBC handlers implementation | Accepted | -| [004](./adr-004-relayer-domain-decomposition.md) | Relayer domain decomposition | Accepted | -| [005](./adr-005-relayer-v0-implementation.md) | Relayer v0 implementation | Accepted | -| [006](./adr-006-hermes-v0.2-usecases.md) | Hermes v0.2.0 Use-Cases | Proposed | -| [007](./adr-007-error.md) | Error Management | Accepted | -| [008](./adr-008-ics20-implementation.md) | ICS20 implementation | Accepted | -| [009](./adr-009-chain-endpoint-handle-standardization.md) | ChainEndpoint and ChainHandle methods standardization | Accepted | diff --git a/docs/architecture/adr-001-repo.md b/docs/architecture/adr-001-repo.md deleted file mode 100644 index a160313c0f..0000000000 --- a/docs/architecture/adr-001-repo.md +++ /dev/null @@ -1,175 +0,0 @@ -# ADR 001: Repository Structure - -## Changelog - -* 2020-07-22: First draft. - -## Context - -This document provides a basic rundown of the structure of this repository, plus some plans for its evolution. - -This repository comprises a Rust implementation of the [IBC](https://github.com/cosmos/ibc) suite of protocols. -To complement this implementation, this repository also comprises specifications, primarily written in TLA+, and -sometimes in English. - -At the moment we are invested mostly in the development of a relayer and several important modules (client, connection, -channel, and packets). -Eventually, we hope to cover the full IBC suite. - -## Decision - -The `ibc-rs` repository comprises three broad parts: - -1. The codebase for the IBC relayer implementation in Rust is in `relayer/`, which consists of crate **`relayer-cli`** (the -frontend application of the relayer) as well as crate **`relayer`** (the core relayer functionality). -2. The codebase for IBC modules is in `modules/`, making up the crate called **`relayer-modules`**. -3. English and TLA+ specs reside under `docs/spec`, classified by the component they target, e.g., relayer or connection -handshake. - -Following the work in [#142](https://github.com/informalsystems/ibc-rs/issues/142), the crate -**`ibc-proto`**(originally in a [separate repo](https://github.com/informalsystems/ibc-proto) and [documented here](https://docs.rs/ibc-proto/)) -shall also become absorbed into the present repo. - -In the following, we discuss the current state and proposed evolution of each of the Rust crates. - -#### Crate `relayer-cli` - -The basic concern of this crate is to provide user-facing functionality for the IBC relayer. This means -implementing a CLI application that dispatches a _command_ to a specific part of the relayer, and then outputs the -result of executing that command. This crate builds on -[Abscissa](https://docs.rs/abscissa_core/0.5.2/abscissa_core/) to simplify command line parsing, application process -lifecycle, and error handling. - -This crate can accept various sub-commands, e.g. `query` a chain for some specific part of their store, `start` the -relayer, or start the `light` client for a given chain. Note that most commands can be further refined with parameters -(for instance, the `query` command can be issued for a `connection` or `channel` or `client`). The bulk of data types -and logic resides in `relayer/cli/commands`, grouped by each specific command. - -#### Crate `relayer` - -This crate implements the core responsibilities of an IBC relayer. Briefly speaking, there are 3 high-level -requirements on a IBC relayer, in no particular order: - -- __R1.__ ability to interface with IBC-enabled chains, with the purpose of reading their state and submitting transactions to -these chains; -- __R2.__ ability to run a light client for IBC-enabled chains, with the purpose of verifying headers and state of these chains; -- __R3.__ implement the IBC relayer algorithms (ICS 018). - -Some functionality described above overlaps with functionality of IBC Modules. For instance, some logic -that the relayer implements for handling connection handshakes (in ICS18) overlaps with logic in the IBC module specific -for connections (ICS3). Given this overlap, the `relayer-modules` crate serves as the "ground truth" implementing the -said logic, while the `relayer` crate has a natural dependency on `relayer-modules`. - -In addition to the dependency on the IBC Modules, the relayer also depends on the `tendermint-rs` crate. This is -useful in particular for interfacing with the light client implementation from this crate, as well as core data types -such as `SignedHeader`, `Validator`, or `ValidatorSet`. - -[ADR 002](./adr-002-ibc-relayer.md) captures more specific details on the relayer architecture. - -#### Crate `relayer-modules` - -The [canonical IBC specification](https://github.com/cosmos/ibc/tree/master/spec/) is modular in the sense of grouping -different components of the specification in modules; for instance, specification _ICS03_ pertains to the abstraction of -IBC connections and the IBC connection handshake protocol, while _ICS04_ pertains to IBC channels and packets. -We group the code in this crate to reflect the modular separation in the canonical IBC specification. - -A few common patterns we employ in this crate are as follows. - -###### `msgs.rs` - -Many IBC protocols involve the receiving and processing of messages. -The protocols for establishing a connection (ICS03) or a channel (ICS04), for example, comprise -the processing of four different types of messages each. -In particular, the data structures representing these messages for connection handshake are `MsgConnectionOpenInit`, -`MsgConnectionOpenTry`, `MsgConnectionOpenAck`, and `MsgConnectionOpenConfirm`. - -The creation and validation of protocol messages for each protocol resides in `msgs.rs` within the respective ICS. -Each of these messages should implement the trait `pub trait Msg`, ensuring that all messages implement a basic -interface allowing them to be routed correctly (via the IBC routing module and with the help of the `route()` method) -or support basic validation. - -###### Error handling - -Each ICS enumerates specific errors that may occur within `icsX_NAME/error.rs`. -The error-handling pattern here build on [thiserror](https://lib.rs/crates/thiserror) and -[anomaly](https://lib.rs/crates/anomaly) for capturing the context of errors plus backtraces (optional). -Generally speaking, an IBC module constructs and propagates errors to the caller by two patterns: - -```Rust -return Err(Kind::MissingCounterparty.into()) -``` - -or if a context can be supplied this is preferable: - -```rust -return Err(Kind::InvalidConnectionHopsLength - .context("validate channel") - .into()); -``` -where the ICS itself defines `Kind::InvalidConnectionHopsLength` and `Kind::MissingCounterparty`. - -###### Deserialization - -See the details for the crate `ibc-proto` [below](#crate-ibc-proto). - -#### Crate `ibc_proto` - -The `ibc-proto` library gives a developer access to the Cosmos SDK IBC proto-defined structs directly in Rust. -The canonical IBC structs reside presently in [cosmos-sdk](https://github.com/cosmos/ibc-go/tree/main/proto/ibc), -defined in a proto3 syntax. -We compile these structs via prost directly to .rs files and import them into the other crates typically under the same -name prefixed with "Raw", for example: - -```Rust -use ibc_proto::channel::Channel as RawChannel; -``` - -For any Raw data type that is defined in `ibc-proto` we implement the `DomainType` trait, which serves as a translation -& validation layer between the proto ("Raw") types and the domain types. For example, for a `Channel` we do as follows: - -```Rust -impl DomainType for ChannelEnd {} - -impl TryFrom for ChannelEnd { - type Error = anomaly::Error; - - fn try_from(value: RawChannel) -> Result { - // Translate, validate each field from RawChannel into a Channel. - } -} - -impl From for RawChannel { - fn from(value: ChannelEnd) -> Self { - // Translate Channel into a RawChannel - } -} -``` - -This issue [#130](https://github.com/informalsystems/ibc-rs/issues/130) is a good starting place for more context -on `ibc-proto`. - -### References - -The following resources serve as reference implementations or specifications that we use to guide the development of -the present crates: - -For the IBC relayer: - -- A first implementation of the IBC relayer in Golang is under active development at -[iqlusioninc/relayer](https://github.com/iqlusioninc/relayer). -- The English specification of the relayer algorithm is captured in the -[ICS018](https://github.com/cosmos/ibc/tree/master/spec/relayer/ics-018-relayer-algorithms) spec. - -For IBC modules: - -- A Golang implementation of IBC modules is under active development -at [cosmos/ibc-go](https://github.com/cosmos/ibc-go/tree/main/modules). -- The English specifications for IBC modules reside in [cosmos/ibc](https://github.com/cosmos/ibc/tree/master/spec). - -## Status - -Proposed - -## Consequences - -Not applicable. diff --git a/docs/architecture/adr-002-ibc-relayer.md b/docs/architecture/adr-002-ibc-relayer.md deleted file mode 100644 index 3947251401..0000000000 --- a/docs/architecture/adr-002-ibc-relayer.md +++ /dev/null @@ -1,798 +0,0 @@ -# ADR 002: IBC Relayer in Rust - -## Changelog -* 2020-05-19: First draft. Accepted -* 2020-04-06: Configuration updates - -## Definitions -These definitions are specific for this document and they may not be consistent with the IBC Specification. - -IBC transaction - a transaction that includes IBC datagrams (including packets). This is constructed by the relayer and sent over the physical network to a chain according to the chain rules. For example, for tendermint chains a `broadcast_tx_commit` request is sent to a tendermint RPC server. - -IBC datagram - is an element of the transaction payload sent by the relayer; it includes client, connection, channel and IBC packet data. Multiple IBC datagrams may be included in an IBC transaction. - -IBC packet - a particular type of IBC datagram that includes the application packet and its commitment proof. - -On-chain IBC Client (or IBC Client) - client code running on chain, typically only the light client verification related functionality. - -Relayer Light Client - full light client functionality, including connecting to at least one provider (full node), storing and verifying headers, etc. - -Source chain - the chain from which the relayer reads data to fill an IBC datagram. - -Destination chain - the chain where the relayer submits transactions that include the IBC datagram. - -A and B chains - for connection protocol, A is the "initiating" chain where `MsgConnectionOpenInit` is initially processed and eventually `MsgConnectionOpenAck`. B is the chain where `MsgConnectionOpenTry` and `MsgConnectionOpenConfirm` are processed. -Similar for channel handshake protocol. - -## Context -A relayer is an off-chain process responsible for relaying IBC datagrams between two or more chains by scanning their states and submitting transactions. This is because in the IBC architecture, modules are not directly sending messages to each other over networking infrastructure, but instead they create and store the data to be retrieved and used by a relayer to build the IBC datagrams. - -This document provides an initial Rust implementation specification of a relayer that interconnects Cosmos-SDK/ Tendermint chains. - -The diagram below shows a high level view of the relayer and its interactions with the source and destination chains. The next sections go in more details of the different interactions. - -![IBC Relayer Architecture Diagram](assets/IBC_relayer.jpeg). - -## Assumptions and Dependencies -This section covers assumptions and dependencies about the chains and their IBC implementation. The first implementation focuses on and will only be tested with Cosmos-SDK/ Tendermint chains. In addition, functionality required by the relayer that is outside the scope of this document, and the availability of their implementations is considered. - -#### Data Availability -The relayer monitors the chain state to determine when packet forwarding is required. The relayer must be able to retrieve the data within some time bound. This is referred to as **data availability**. - -#### Data Legibility -IBC protocol defines the minimal data set that must be made available to relayers for correct operation of the protocol. The relayer expects the data to be legible, i.e. **data should be serialized** according to the IBC specification format; this includes consensus state, client, connection, channel, and packet information, and any auxiliary state structure necessary to construct proofs of inclusion or exclusion of particular key/value pairs in state. - -#### Query Functionality -IBC host state machines MUST expose an interface for inspecting their state. For Cosmos/Tendermint chains this means: -- the IBC modules on chain correctly implement and respond to queries - - [IBC-Modules-Rust] an implementation for some queries currently exist in Cosmos-SDK and same and more need to be implemented in Rust. The full requirements are detailed in section Relayer Queries. -- the relayer needs the ability to send rpc/http ABCI queries to and receive replies from Tendermint/Cosmos-SDK - - [[ABCI Rust](https://github.com/tendermint/rust-abci)] - ABCI Rust implementation - - [IBC-Modules-Rust] identifier validation is required (ICS-024) - - [IBC-Modules-Rust] requires Rust types for all query responses - - [[Merkle-Proofs-Rust](https://github.com/confio/ics23/tree/master/rust)] (candidate implementation) - some query responses include proofs and included in IBC transactions by the relayer (some may be validated, TBD) - -#### IBC Messages -The relayer creates transactions that include IBC messages to manage clients, connections and channels, and send application packets to destination chains. These messages must be defined in the IBC Rust implementation [IBC-Modules-Rust]. - -#### IBC Logging System -IBC packet data & timeouts are not stored directly in the chain state (as this storage is presumed to be expensive) but are instead committed to with a succinct cryptographic commitment (only the commitment is stored). -As a consequence, IBC requires that a **host state machine MUST provide an event logging system** that logs data in the course of transaction execution. **Logs must be queryable** by relayers to read IBC packet data & timeouts. - -The logging system must provide the following functions: - - [IBC-Modules-Go] emitLogEntry for emitting log entries called by the state machine during transaction execution: - - type emitLogEntry = (topic: string , data: []byte) => void - - example: emitLogEntry("sendPacket", {sequence: packet.sequence , data: packet.data, timeout: packet.timeout}) - - [IBC-Modules-Go] queryByTopic for querying past logs matching a given topic: - - type queryByTopic = (height: uint64 , topic: string) => Array < []byte > - -#### Keyring -The relay process must have access to its accounts with tokens on all destination chains, with sufficient balance to pay for transaction fees. Account key information must be stored and managed securely. A keyring implementation is required for CRUD key operations. -[Keyring-Rust] Investigation in existing Rust implementations is needed. (ex: [hwchen-keyring](https://github.com/hwchen/keyring-rs)) - -### Chain Transactions and Signing -The relayer must create chain specific signed transactions. -[Cosmos-Tx-Rust] For the first release Cosmos-SDK transaction signing is required. One possible implementation is [iqlusion's sdtx crate](https://github.com/iqlusioninc/crates/tree/main/stdtx) - -#### Implementation of IBC "routing module" -The default IBC handler uses a receiver call pattern, where modules must individually call the IBC handler in order to bind to -ports, start handshakes, accept handshakes, send and receive packets, etc. While this provides flexibility for modules, it imposes extra work on the part of the relayer processes that now needs to track the state of multiple modules. The IBC specification describes an IBC “routing module” to route packets, and simplify the task of relayers. This routing module accepts external datagrams and calls into the IBC handler to deal with handshakes and packet relay. The routing module keeps a lookup table of modules, which it can use to look up and call a module when a packet is received, so that external relayers need only ever relay packets to the routing module. -[IBC-Routing-Module-Go] Initial version of the relayer assumes that chains implement the "routing module" - -#### Batching -The relayer may batch IBC datagrams in a single transaction if supported by destination chain and allowed by configuration. In this case the relayer can amortise any overhead costs (e.g. signature checks for fee payment). -Initial version of the relayer assumes batching is supported by all chains. An option may be later included in the configuration file. - -## Relayer Requirements - -A correct relayer MUST: - -- **[R-config-start]** Read, parse, validate a configuration file upon start and configure itself for the specifed chains and paths -- **[R-transport]** Have access to the networking protocols (e.g. TCP/IP, UDP/IP, or QUIC/IP) and physical transport, required to read the state of one blockchain/ machine and submit data to another -- **[R-provider]** Maintain transport connections to at least one full node per chain -- **[R-query]** Query IBC data on source and destination chains -- **[R-light-client]** Run light clients for source chains and -- **[R-IBC-client]** create and update IBC clients on destination chains -- **[R-accounts]** Own accounts on destination chains with sufficient balance to pay for transaction fees -- **[R-transact]** Create, sign and forward IBC datagram transactions -- **[R-relay]** Perform correct relaying of all required messages, according to the IBC sub-protocol constraints -- **[R-restart]** Resume correct functionality after restarts -- **[R-upgrade]** Resume correct functionality after upgrades -- **[R-proofs]** Perform proof verification (as it will be done on the destination chain) and not forward messages where proof verification fails - -The relayer MAY: -- **[R-config-cli]** Provide ways to change configuration at runtime -- **[R-bisection]** Perform bisection to optimize transaction costs and computation on destination chains -- **[R-relay-prio]** Filter or order transactions based on some criteria (e.g. in accordance with the fee payment model) - -## Implementation -The initial implementation will heavily borrow from the Go relayer implementation that uses a "naive" algorithm for relaying messages. The structure of the configuration file is similar with the one in Go (see [Go-Relayer](https://github.com/cosmos/relayer)) - -### Configuration - -> WIP - -Upon start the relayer reads a configuration file that includes global and per chain parameters. The file format is .toml -Below is an example of a configuration file. - -```toml -[global] -log_level = "error" - -[mode] - -[mode.clients] -enabled = true -refresh = true -misbehaviour = true - -[mode.connections] -enabled = false - -[mode.channels] -enabled = false - -[mode.packets] -enabled = true -clear_interval = 100 -clear_on_start = true -tx_confirmation = true - -[[chains]] - id = "chain_A" - rpc_addr = "http://localhost:26657" - grpc_addr = "http://localhost:9090" - websocket_addr = "ws://localhost:26657/websocket" - rpc_timeout = "10s" - account_prefix = "cosmos" - key_name = "testkey" - store_prefix = "ibc" - client_ids = ["clA1", "clA2"] - gas = 200000 - gas_adjustement = 1.3 - gas_price = "0.025stake" - trusting_period = "336h" - -[[chains]] - id = "chain_B" - rpc_addr = "http://localhost:26557" - grpc_addr = "http://localhost:9091" - websocket_addr = "ws://localhost:26557/websocket" - rpc_timeout = "10s" - account_prefix = "cosmos" - key_name = "testkey" - store_prefix = "ibc" - client_ids = ["clB1"] - gas = 200000 - gas_adjustement = 1.3 - gas_price = "0.025stake" - trusting_period = "336h" - -``` -The main sections of the configuration file are: -- `global`: -Relaying is done periodically and the frequency is dictated by the `timeout` parameter. The `strategy` parameter configures the relayer to run a particular relaying algorithm. -- `chains`: -Chain level information including account and key name, gas information, trusting period, etc. All source and destination chains must be listed here. -- paths (`connections`, `connections.paths`): -The relayer may be configured to relay between some application ports, over a number of connections and channels, in unidirectional or bidirectional mode. - -### Initialization - -The relayer performs initialization based on the content of the configuration file: -- the file is parsed and semantically validated -- the chains, connections, ports, channels for which relaying is enabled are stored in the Config structure - -```rust -pub struct Config { - pub global: GlobalConfig, - pub chains: Vec, - pub connections: Option>, -} - -pub struct GlobalConfig { - /// All valid log levels, as defined in tracing: - /// https://docs.rs/tracing-core/0.1.17/tracing_core/struct.Level.html - pub log_level: String, -} - -pub struct ChainConfig { - pub id: ChainId, - pub rpc_addr: tendermint_rpc::Url, - pub websocket_addr: tendermint_rpc::Url, - pub grpc_addr: tendermint_rpc::Url, - pub rpc_timeout: Duration, - pub account_prefix: String, - pub key_name: String, - pub client_ids: Vec, - pub gas: u64, - pub trusting_period: Duration, -} - -pub struct Connection { - pub src: Option, // use any source - pub dest: Option, // use any destination - pub paths: Option>, // use any port, direction bidirectional -} - -pub struct ConnectionEnd { - pub client_id: String, - pub connection_id: Option, // use all connections to this client -} - -pub enum Direction { - Unidirectional, - Bidirectional, -} - -pub struct RelayPath { - pub src_port: Option, // default from any source port - pub dest_port: Option, // default from any dest port - pub src_channel: Option, // default from any source port - pub dest_channel: Option, // default from any dest port - pub direction: Direction, // default bidirectional -} -``` -All `Option` fields with `None` values mean "any" values. For `direction`, default is bidirectional. -All non-`Option` fields are mandatory and must appear in the configuration file. -If the relayer is started with an invalid configuration file, an error is displayed and the realyer process exits. - -### Relayer Commands - -#### Validate -To validate a configuration file: - -`relayer -c config validate ` - -The command verifies that the specified configuration file parses and it is semantically correct. - -#### Start -To start the relayer: - -`relayer -c start` - -The command performs the validation as described above and then starts the relayer. - -#### Query -Most of the queries performed while relaying are also available from the CLI. - -`relayer -c query client state [-h ] [-p ]` - -The command queries the full client state of `` on `` at ``, with or without proof depending on the `` flag. Default `` is latest state and `` is `true`. - -`relayer -c query client consensus [-h ] [-p ]` - -The command queries the consensus state of `` at height `` on `` at ``, with or without proof depending on the `` flag. Default `` is latest state and `` is `true`. - -### Relayer Queries -The relayer queries chain state in order to build the IBC messages. It is expected that each chain type provides implementations of these queries. Initial Rust relayer implementation will be tested with Cosmos-SDK/Tendermint chains, and while some of the IBC-Modules functionality in Rust is not required (e.g. handler functions), a "query" crate should be available for the relayer. -For tendermint, the queries use the `abci.RequestQuery` over rpc/http to retrieve the data. - -The format of the public/ provable state query parameters and responses is chain independent and should also be defined in this crate. - -The following queries are required: - -- `query_store_prefix(chain)` - returns the commitment prefix of the chain (returns chain specific []byte, e.g. `ibc` for tendermint) -- `query_all_client_states(chain)` - returns the IBC light clients instantiated on the chain -- `query_client_consensus_state(chain, clientID, height)` - returns the consensus state proof for a light client at a given height if height > 0, else it returns the latest height -- `query_connections(chain)` - returns all connections created on the chain -- `query_client_connections(chain, clientID)` - returns all connections associated with a light client -- ...more to be added - -### Relayer Concurrency Architecture -The following threads are spawned and execute within the relayer process: -- one Tendermint full light client thread, per configured configured source chain. For example if A->C and B->C paths are enabled then there will be two light client threads, one for A and one for B. These threads download light client headers (block header and commits), verify them and store them as trusted headers in the per chain stores. -- one thread for the main relaying functionality, aka relay thread. -- one thread to relay notifications from source chain and to generate IBC events to the relay thread. - -The figure below shows the interactions for the last two threads. -![IBC relayer threads](assets/IBC_relayer_threads.jpeg) - -On start: -1. Communication (channel ?) between the relay and the notification threads is established. -2. The notification thread registers for IBC events. -3. The relay thread creates the IBC datagrams for A, for all configuration triggered events, for clients `MsgCreateClient`, `MsgUpdateClient` and -4. for connections and channels, i.e. `MsgConnOpenInit` and `MsgChannOpenInit` are sent to chains to initiate connection and channel handshake if required. It then waits for events from the notification thread. -5. The notification thread queries the source chain A at latest height and -6. sends IBC events to the relay thread. Then it waits for notifications from A. -7. For each event related to X (connection, channel or packets), the relay thread queries the client and X state on destination B, and -8. the X state on source chain A. -9. With the information collected in previous steps, the relay thread creates a buffer of messages destined to destination B. -10. When the notification thread receives an IBC notification for X it sends it to the relay thread. -11. Steps 11-14 are the same as 6-9 above. - -Initial version will have a single relay thread for all configured paths. Temporary threads may be created for the source and destination queries required. -Future versions may create multiple relay threads. One possibility is to create one for each destination chain Z, responsible for relaying over *->Z paths. Or have a thread pool, selecting an available thread for relaying to a given destination. The notification thread will route the IBC events to the proper thread. Multiple notification threads, e.g. per source, should also be considered. - -### Relayer Algorithm - -A relayer algorithm is described in [relayer algorithm described in IBC Specification](https://github.com/cosmos/ibc/blame/master/spec/relayer/ics-018-relayer-algorithms/README.md#L47) and [Go relayer implementation ](https://github.com/cosmos/relayer/blob/f3a302df9e6e0c28883f5480199d3190821bcc06/relayer/strategies.go#L49.). - -This section describes some of the details of the realy thread algorithm in the Rust implementation. Inputs are the IBC Events and the events of interest are described in Appendix A. - -At high level, for each event from a source chain, the relayer: -- queries client, connection, channels and/or packet related state on source and destination chains, -- creates new datagrams if needed, -- batches multiple datagrams in single transaction, -- signs and submits these transactions to the destination. - -#### Proofs -The relayer must include proofs in some datagrams as required by the IBC handlers. There are two types of proofs: -- proof of some local state on source chain (A). For example, a proof of correct connection state (`ProofInit`, `ProofTry`, `ProofAck`) is included in some of the connection handshake datagrams. The `ConnOpenTry` message includes the `ProofInit` that is obtained from chain A where the connection should be in `INIT` state and have certain local and counterpary identifiers. The message specific sections below go in more details. -- proof that the chain A's IBC client `clB` is updated with a consensus state and height that have been stored on chain B. -- these proofs are verified on chain B against the consensus state stored by the A client at `proof_height`. - -Notes: -The proof checks require the handlers on B to recreate the state as expected on chain A and to verify the proof against this. For this to work the store prefix of A needs to be added as prefix to the proof path (standardized in ICS 24). There is currently no query endpoint for this in Cosmos-SDK/Tendermint and initial relayer version includes a per chain store prefix in the configuration. -The verification on B requires the presence of a consensus state for client A at same height as `proof_height`. - -#### Light Client Messages -After initialization, relayer light clients are created on the destination chains if not already present. -For a successful A->B relay of IBC packets IBC clients must be instantiated on both source and destination chains, potentially by different relayers. The client creation is permissionless and a relayer may create a client if not already present. -```rust -let msg = MsgCreateClient::new(client_id, header, trusting_period, bonding_period, signer); -``` - -The relayer runs its own light client thread for A that periodically retrieves and verifies headers. The relay thread uses the stored headers to update the A-client on chain B with new headers as required. -```rust -let msg = MsgUpdateClient::new(client_id, header, signer); -``` -It is possible that the relay thread needs a more recent trusted header and in this case it would need a mechanism to signal the client thread to retrieve this header. -Since the relayer must pay for all transactions, including `MsgClientCreate` and `MsgClientUpdate`, there are incentives for optimizations. -For example, light client implementation of Tendermint supports bisection and the relayer may choose to send skipping headers to A-client on B, periodically or when required by new IBC datagrams. - -#### IBC Client Consensus State vs Relayer Light Client States vs Chain states -A number of IBC datagrams contain proofs obtained from chain A at a some height `h`. A proof needs to be verified on B against the commitment root for `h` which, for Tendermint clients, is included in the client consensus state at `h+1`. This is because for Tendermint chains the application Hash after applying all transactions in block `n` is included in block at height `n+1`. - -The relayer therefore needs to ensure that the consensus state at `proof_height+1` exists on chain B. - -One proposal is shown below and described in the rest of this section. -![IBC_client_heights](assets/IBC_client_heights.jpeg) - -The relayer creates a light client on B with `hi` and then updates it as required by processing different IBC events. Let `ha'` be the last consensus state for client on B. -When some IBC event for X (connection, channel or packet) is received, it includes the height, let it be `hx-1` at which the event occured on A. -According to the proposal here, the relayer should: -- get the latest consensus state height of client on B, `ha` -- let `h = max(hx, ha)` -- query for item X at height `h-1` and get a proof `p` at this height -- wait for the block at height `hx` to be received, i.e. `Ev{block, hx}` -- get the minimal set of headers from the light client such that `h` verifies against `ha` -- send zero or more `MsgUpdateClient` datagrams and the `MsgX{X, p, h}` in a transaction to B -- if the transaction is successful or `MsgX..` failed, then "consume" the `Ev{X,..}` - - if `MsgX` fails there is nothing that can be done, another relayer must have submitted first -- else raise again the event at `hA-1` if one not already there -- the effect of this is that a new query is made at `hA-1` and since the consensus state at `hA` exists on B, only `MsgX` needs to be sent out - -#### Connection Messages -The relayer queries the source and destination chains of the relaying paths in order to determine if connection handshake datagrams should be sent to destination chains. - -##### Connection Query -The following structures pertain to connection queries and should be detailed in [IBC-Modules-Rust-ADR]. -The structures are shown here for reference. - -```rust -pub struct Counterparty { - pub client_id: ClientId, - pub connection_id: ConnectionId, - pub prefix: CommitmentRoot, -} - -pub struct ConnectionEnd { - pub state: ConnectionState, - pub Id: ConnectionId, - pub client_id: ClientId, - pub counterparty: Counterparty, - pub versions: Vec -} - -pub enum ConnectionState { - "UNINIT", - "INIT", - "TRYOPEN", - "OPEN", -} - -// ConnectionResponse defines the query response for a connection. -// It includes the proof and the height at which the proof was retrieved. -pub struct ConnectionResponse { - pub connection: ConnectionEnd, - pub proof: Option, - pub proof_path: CommitmentPath, - pub proof_height: Height, -} -``` - -#### Connection Relaying - -The figure below shows the four connection handshake message types that can be created during a relay cycle (see the Relayer box and the four actions). For each message the queries (light grey arrows) and expected states on `A` and `B` are shown. For example, if the connection on A is in `OPEN` state and on B in `TRYOPEN`, the relayer will send a transaction to B including the `ConnOpenConfirm` datagram. Once processed on B, the state of connection changes from `TRYOPEN` to `OPEN`. - -![IBC connection handshake relay](assets/IBC_conn_handshake_relay.jpeg) - -##### MsgConnectionOpenInit -The `MsgConnectionOpenInit` message is used to initialize a connection. This is done when the relay thread starts, after loading the configuration that includes the connection information and before entering its event loop. In this section it is assumed the message is relayed to A. -```rust -pub struct MsgConnectionOpenInit { - pub connection_id: ConnectionId, // connAtoB - pub client_id: ClientId, // clB - pub counterparty: Counterparty, // {ClientID: clA, ConnectionID: connBtoA, Prefix: "B_store"> - pub signer: AccAddress -} -``` -The comments show the values of the fields for the diagram above. - -The relayer creates and forwards this message only if it has been explicitly configured with the connection information (see `connections.src` and `connections.dest`sections of the configuration file). - -In order to create a `MsgConnectionOpenInit` the relayer recreates the `ConnectionEnd` from the configuration, as it will be stored on A. The steps are: -- create the `ConnectionEnd` for the B->A path -```rust - let connection_a = getConfiguredConnection(A, B, ..); -``` -- query connection state on chain A and if it already exist then continue with next event -```rust -let existing_a = ibc_query_connection(chainA, connection_a); -if existing_a.state != "UNINIT" { - continue; -} -``` -- create the message -```rust -let init_msg = MsgConnectionOpenInit { - connection_id: connection_a.connection_id, - client_id: connection_a.client_id, - counterparty: Counterparty { - ClientID: connection_a.counterparty.client_id, - connection_id: connection_a.counterparty.connection_id, - prefix: config(B).store_prefix, - } - Signer: config(A).signer, -} -``` -- send `init_msg` in a transaction to B - -##### MsgConnectionOpenTry -The `MsgConnectionOpenTry` defines the message sent by the relayer to try to open a connection. In this section it is assumed to be relayed to B. - -```rust -pub struct MsgConnectionOpenTry { - pub connection_id: ConnectionId, // connBtoA - pub client_id: ClientId, // clA - pub counterparty: Counterparty, // {ClientID: clB, ConnectionID: connAtoB, Prefix: "A_store"> - pub counterparty_versions: Vec, - pub proof_init: CommitmentProof, // proof that connAtoB connection end is stored on Chain A - pub proof_consensus: CommitmentProof, // proof that on A at proof_height (hA), the B client has - // stored B's consensus state at consensus_height (hB) - pub proof_height: Height, // hA, height of A at which relayer retrieved proof_init - pub consensus_height: Height, // hB - pub signer: AccAddress, -} -``` -The comments show the values of the fields for the diagram above. -Note: -- `proof_height` is the height of chain A when relayer created the `proof_init`, hA in the diagram. -- `consensus_height` is the latest height of chain B that chain A has stored in its client `clB` at the time the relayer queried that client, `hB` in the diagram - -The relayer creates a `MsgConnectionOpenTry` for the A->B relay path when an IBC event notification is received. -The steps are: -- let `connAtoB` be the connection identifier on A,`hx` the height when the event occurred and `clA` the client ID of A on B -- query last client state height on B -```rust -let ha_prime = ibc_query_client_state(chainB, 0).height; -``` -- create `UpdateClientMsg`(s) for `clA` on chain B if required (i.e. if `hx` is higher than latest height of `clA` on B) -```rust - let h = max(hx, ha_prime); - let headers = get_minimal_set(h, ha_prime); - let client_msgs = updateClientMsgs(clA, headers, signer); -``` -- send `client_msgs` to B -- query latest height `ha` of A and wait for `ha > h` (Rust TODO) -- query connection with proof at `h` on chain A and if it is not in proper state then continue with the next event -```rust - let query_response = ibc_query_connection_with_proof(chainA, connAtoB, h); - if query_response.connection.state != "INIT" { - continue; - } - let connection_a = query_response.connection; - let proof_init = query_response.proof; - let proof_height := query_response.proof_height; - assert(proof_height = h); -``` -- query the consensus state stored by client `clB` on A -```rust - let consensus_response = ibc_query_consensus_with_proof(chainA, connection_a.client_id); - let proof_consensus = consensus_response.proof; - let consensus_height = consensus_response.proof_height; -``` -- create the `MsgConnectionOpenTry` message with the information collected above. -```rust -let try_msg = MsgConnectionOpenTry { - connection_id: connBtoA, - client_id: clA, - counterparty: Counterparty{ - client_id: connection_a.client_id, - connection_id: connAtoB, - prefix: config(A).store_prefix, - } - proof_init, - proof_consensus, - proof_height, - consensus_height, - signer: config.B.Signer(), -} -``` -- send `try_msg` to B - -When `MsgConnectionOpenTry` is processed on B, the message handler: -- checks that `consensus_height` is valid (smaller or equal than chain B's current height) and within trusting period, -- client `clA` verifies `proof_consensus` for B's consensus state at `consensus_height` and -- client `clA` verifies `proof_init` for the `ConnectionEnd`object that B expects to be present on A at `proof_height`. -The relayer may also perform these verifications before submitting the transaction. - -##### MsgConnectionOpenAck -(WIP) - needs to be updated with correct query sequence - -`MsgConnectionOpenAck` defines the message sent by the relayer to chain A to acknowledge the change of connection state to `TRYOPEN` on Chain B. - -```rust -pub struct MsgConnectionOpenAck { - pub connection_id: ConnectionId, // connAtoB - pub proof_try: CommitmentProof, // proof that connBtoA on Chain B is in TRYOPEN state - pub proof_consensus: CommitmentProof, // proof that on B at proof_height (hB), the A client has - // stored A's consensus state at consensus_height (hA) - pub proof_height: Height, // hB, height of B at which relayer retrieved proof_try - pub consensus_height: Height, // hA - pub versions: , - pub signer: AccAddress, -} -``` -The comments show the values of the fields for the diagram above. -Note: -- `proof_height` is the height of chain B when relayer created the `proof_try`, hB in the diagram. -- `consensus_height` is the latest height of chain A that chain B has stored in its client `clA` at the time the relayer queried that client, `hA` in the diagram - -The relayer creates a `MsgConnectionOpenAck` for the B->A relay path when an IBC event notification is received or when chain B is scanned. The steps are: -- let `connBtoA` be the connection identifier on B -- query connection with proof on chain B and if it is not in proper state then continue with next event -```rust - let query_response = ibc_query_connection_with_proof(chainB, connBtoA); - if query_response.connection.state != "TRYOPEN" { - continue; - } - let connection_b = query_response.connection; - let proof_try = query_response.proof; - let proof_height := query_response.proof_height; -``` -- query connection on chain A and validate its state: -```rust - let connAtoB = connection_b.counterparty.connection_id; - let connection_a = ibc_query_connection(chainA, connAtoB); - if connection_a.state != "INIT" && connection_a.state != "TRYOPEN" { - continue; - } -``` -- create `UpdateClientMsg` for `clB` on chain A if required (i.e. if `proof_height` is higher than latest height of `clB` on A) -```rust - let client_msg = MsgUpdateClient::new(connection_a.client_id, header, signer); -``` -- query the consensus state stored by client `clA` on B: -```rust - let consensus_response = ibc_query_consensus_with_proof(chainB, connection_b.client_id); - let proof_consensus = consensus_response.proof; - let consensus_height = consensus_response.proof_height; -``` -- create the `MsgConnectionOpenAck` message with the information collected above -```rust -let ack_msg = MsgConnectionOpenAck { - connection_id: connAtoB, - proof_try, - proof_consensus, - proof_height, - consensus_height, - signer: config.A.Signer(), -} -``` -- send `client_msg` and `ack_msg` in a transaction to A - -##### MsgConnectionOpenConfirm -(WIP) - needs to be updated with correct query sequence - -`MsgConnectionOpenConfirm` defines the message sent by the relayer to chain B to confirm the opening of a connection on chain A. - -```rust -pub struct MsgConnectionOpenConfirm { - pub connection_id: ConnectionId, // connBtoA - pub proof_confirm: CommitmentProof,// proof that connAtoB on chain A is in OPEN state - pub proof_height: Height, // hA, height of A at which relayer retrieved the proof_confirm - pub signer: AccAddress, -} -``` - -The relayer creates a `MsgConnectionOpenConfirm` for the A->B relay path when an IBC event notification is received or when chain A is scanned. The steps are: -- let `connAtoB` be the connection identifier on A -- query connection with proof on chain A and if it is not in proper state then continue with next event -```rust - let query_response = ibc_query_connection_with_proof(chainA, connAtoB); - if query_response.connection.state != "OPEN" { - continue; - } - let connection_a = query_response.connection; - let proof_confirm = query_response.proof; - let proof_height = query_response.proof_height; -``` -- query connection on chain B and validate its state: -```rust - let connBtoA = connection_a.counterparty.connection_id; - let connection_b = ibc_query_connection(chainB, connBtoA); - if connection_b.state != "INIT" && connection_b.state != "TRYOPEN" { - continue; - } -``` -- create `UpdateClientMsg` for `clA` on chain B if required (i.e. if `proof_height` is higher than latest height of `clA` on B) -```rust - let client_msg = MsgUpdateClient::new(connection_b.client_id, header, config.B.Signer()); -``` -- create the `MsgConnectionOpenConfirm` message with the information collected above -```rust -let confirm_msg = MsgConnectionOpenAck { - connection_id: connBtoA, - proof_confirm, - proof_height, - signer: config.B.Signer(), -} -``` -- send `client_msg` and `confirm_msg` in a transaction to A - -#### Channels -(WIP) -The channel handshake messages are relayed in a similar way as the connection ones. In addition, checks on the state of the underlying connection is performed. - -#### Packet, Timeouts and Acknowledgments -(WIP) -Application packets are not stored in the chain state, only a cryptographic commitment is stored. -The relayer has to query the chain's logging system to get the packet data for a given source port and channel. -The result of the query includes among others: - - the source port and channel identifiers - - the sequence number -These are used to create the packet's commitment path which is then used in a state query to get the packet commitment. - -## Inter-relayer Coordination -Multiple relayers may run in parallel and, while it is expected that they relay over disjoint paths, it could be the case that they may submit same transactions to a chain. In this case only the first transaction succeeds while subsequent fail causing loss of fees. Ideally some coordination would be in place to avoid this but this is out of scope of this document. - -## Relayer Restarts and Upgrades - -## Decision - -> This section explains all of the details of the proposed solution, including implementation details. -It should also describe affects / corollary items that may need to be changed as a part of this. -If the proposed change will be large, please also indicate a way to do the change to maximize ease of review. -(e.g. the optimal split of things to do between separate PR's) - -## Status - -> A decision may be "proposed" if it hasn't been agreed upon yet, or "accepted" once it is agreed upon. If a later ADR changes or reverses a decision, it may be marked as "deprecated" or "superseded" with a reference to its replacement. - -{Deprecated|Proposed|Accepted} - -## Consequences - -> This section describes the consequences, after applying the decision. All consequences should be summarized here, not just the "positive" ones. - -### Positive - -### Negative - -### Neutral - -### Appendix A -The IBC Events, input to the relay thread are described here. - -``` -{"create_client": { - "client_id": , - "client_type": , - } -} - -{"update_client": { - "client_id": , - "client_type": , - } -} - -{"connection_open_init": { - "connection_id": , - "client_id": , - "counterparty_connection_id": , - "counterparty_client_id": , - } -} - -{}"connection_open_try": { - "connection_id": , - "client_id": , - "counterparty_connection_id": , - "counterparty_client_id": , - } -} - -{"connection_open_ack": { - "connection_id": , - } -} - -{"connection_open_confirm": { - "connection_id": , - } -} - -{"channel_open_init": { - "port_id": , - "channel_id": , - "counterparty_port_id": , - "counterparty_channel_id": , - "connection_id": , - } -} - -{"channel_open_try": { - "port_id": , - "channel_id": , - "counterparty_port_id": , - "counterparty_channel_id": , - "connection_id": , - } -} - -{"channel_open_ack": { - "port_id": , - "channel_id": , - } -} - -{"channel_open_confirm": { - "port_id": , - "channel_id": , - } -} - -{"channel_close_init": { - "port_id": , - "channel_id": , - } -} - -{"channel_close_confirm": { - "port_id": , - "channel_id": , - } -} - -{"send_packet": { - "packet_data": String, - "packet_timeout_height": String, - "packet_timeout_timestamp": String, - "packet_sequence": String, - "packet_src_port": , - "packet_src_channel": , - "packet_dst_port": , - "packet_dst_channel": , -} - -{"recv_packet": { - "packet_data": String, - "packet_ack": String, - "packet_timeout_height": String, - "packet_timeout_timestamp": String, - "packet_sequence": String, - "packet_src_port": , - "packet_src_channel": , - "packet_dst_port": , - "packet_dst_channel": , -} -``` - -## References - -> Are there any relevant PR comments, issues that led up to this, or articles referrenced for why we made the given design choice? If so link them here! - -* {reference link} diff --git a/docs/architecture/adr-003-handler-implementation.md b/docs/architecture/adr-003-handler-implementation.md deleted file mode 100644 index 5876c81fc0..0000000000 --- a/docs/architecture/adr-003-handler-implementation.md +++ /dev/null @@ -1,635 +0,0 @@ -# ADR 003: IBC handlers implementation - -## Changelog -* 2020-08-06: Initial proposal -* 2020-08-10: Rename Handler to Message Processor -* 2020-08-14: Revamp definition of chain-specific messages, readers and keepers -* 2021-12-29: Consolidate ADR with the implementation. - -## Context - -In this ADR, we provide recommendations for implementing the IBC -handlers within the `ibc` (modules) crate. - -## Decision - -Concepts are introduced in the order given by a topological sort of their dependencies on each other. - -### Events - -IBC handlers must be able to emit events which will then be broadcasted via the node's pub/sub mechanism, -and eventually picked up by the IBC relayer. - -An event has an arbitrary structure, depending on the handler that produces it. -Here is the [list of all IBC-related events][events], as seen by the relayer. -Note that the consumer of these events in production would not be the relayer directly -(instead the consumer is the node/SDK where the IBC module executes), -but nevertheless handlers will reuse these event definitions. - -[events]: https://github.com/informalsystems/ibc-rs/blob/bf84a73ef7b3d5e9a434c9af96165997382dcc9d/modules/src/events.rs#L15-L43 - -```rust -pub enum IBCEvent { - NewBlock(NewBlock), - - CreateClient(ClientEvents::CreateClient), - UpdateClient(ClientEvents::UpdateClient), - ClientMisbehavior(ClientEvents::ClientMisbehavior), - - OpenInitConnection(ConnectionEvents::OpenInit), - OpenTryConnection(ConnectionEvents::OpenTry), - // ... -} -``` - -### Logging - -IBC handlers must be able to log information for introspectability and ease of debugging. -A handler can output multiple log records, which are expressed as a pair of a status and a -log line. The interface for emitting log records is described in the next section. - -```rust -pub enum LogStatus { - Success, - Info, - Warning, - Error, -} - -pub struct Log { - status: LogStatus, - body: String, -} - -impl Log { - fn success(msg: impl Display) -> Self; - fn info(msg: impl Display) -> Self; - fn warning(msg: impl Display) -> Self; - fn error(msg: impl Display) -> Self; -} -``` - -### Handler output - -IBC handlers must be able to return arbitrary data, together with events and log records, as described above. -As a handler may fail, it is necessary to keep track of errors. - -To this end, we introduce a type for the return value of a handler: - -```rust -pub type HandlerResult = Result, E>; - -pub struct HandlerOutput { - pub result: T, - pub log: Vec, - pub events: Vec, -} -``` - -We introduce a builder interface to be used within the handler implementation to incrementally build a `HandlerOutput` value. - -```rust -impl HandlerOutput { - pub fn builder() -> HandlerOutputBuilder { - HandlerOutputBuilder::new() - } -} - -pub struct HandlerOutputBuilder { - log: Vec, - events: Vec, - marker: PhantomData, -} - -impl HandlerOutputBuilder { - pub fn log(&mut self, log: impl Into); - pub fn emit(&mut self, event: impl Into); - pub fn with_result(self, result: T) -> HandlerOutput; -} -``` - -We provide below an example usage of the builder API: - -```rust -fn some_ibc_handler() -> HandlerResult { - let mut output = HandlerOutput::builder(); - - // ... - - output.log(Log::info("did something")) - - // ... - - output.log(Log::success("all good")); - output.emit(SomeEvent::AllGood); - - Ok(output.with_result(42)); -} -``` - -### IBC Submodule - -The various IBC messages and their processing logic, as described in the IBC specification, -are split into a collection of submodules, each pertaining to a specific aspect of -the IBC protocol, eg. client lifecycle management, connection lifecycle management, -packet relay, etc. - -In this section we propose a general approach to implement the handlers for a submodule. -As a running example we will use a dummy submodule that deals with connections, which should not -be mistaken for the actual ICS 003 Connection submodule. - -#### Reader - -A typical handler will need to read data from the chain state at the current height, -via the private and provable stores. - -To avoid coupling between the handler interface and the store API, we introduce an interface -for accessing this data. This interface, called a `Reader`, is shared between all handlers -in a submodule, as those typically access the same data. - -Having a high-level interface for this purpose helps avoiding coupling which makes -writing unit tests for the handlers easier, as one does not need to provide a concrete -store, or to mock one. - -```rust -pub trait ConnectionReader -{ - fn connection_end(&self, connection_id: &ConnectionId) -> Option; -} -``` - -A production implementation of this `Reader` would hold references to both the private and provable -store at the current height where the handler executes, but we omit the actual implementation as -the store interfaces are yet to be defined, as is the general IBC top-level module machinery. - -A mock implementation of the `ConnectionReader` trait could looks as follows: - -```rust -struct MockConnectionReader { - connection_id: ConnectionId, - connection_end: Option, - client_reader: MockClientReader, -} - -impl ConnectionReader for MockConnectionReader { - fn connection_end(&self, connection_id: &ConnectionId) -> Option { - if connection_id == &self.connection_id { - self.connection_end.clone() - } else { - None - } - } -} -``` - -#### Keeper - -Once a handler executes successfully, some data will typically need to be persisted in the chain state -via the private/provable store interfaces. In the same vein as for the reader defined in the previous section, -a submodule should define a trait which provides operations to persist such data. -The same considerations w.r.t. to coupling and unit-testing apply here as well. - -```rust -pub trait ConnectionKeeper { - fn store_connection( - &mut self, - client_id: ConnectionId, - client_type: ConnectionType, - ) -> Result<(), Error>; - - fn add_connection_to_client( - &mut self, - client_id: ClientId, - connection_id: ConnectionId, - ) -> Result<(), Error>; -} -``` - -#### Submodule implementation - -We now come to the actual definition of a handler for a submodule. - -We recommend each handler to be defined within its own Rust module, named -after the handler itself. For example, the "Create Client" handler of ICS 002 would -be defined in `modules::ics02_client::handler::create_client`. - -##### Message type - -Each handler must define a datatype which represent the message it can process. - -```rust -pub struct MsgConnectionOpenInit { - connection_id: ConnectionId, - client_id: ClientId, - counterparty: Counterparty, -} -``` - -##### Handler implementation - -In this section we provide guidelines for implementing an actual handler. - -We divide the handler in two parts: processing and persistence. - -###### Processing - -The actual logic of the handler is expressed as a pure function, typically named -`process`, which takes as arguments a `Reader` and the corresponding message, and returns -a `HandlerOutput`, where `T` is a concrete datatype and `E` is an error type which defines -all potential errors yielded by the handlers of the current submodule. - -```rust -pub struct ConnectionMsgProcessingResult { - connection_id: ConnectionId, - connection_end: ConnectionEnd, -} -``` - -The `process` function will typically read data via the `Reader`, perform checks and validation, construct new -datatypes, emit log records and events, and eventually return some data together with objects to be persisted. - -To this end, this `process` function will create and manipulate a `HandlerOutput` value like described in -the corresponding section. - -```rust -pub fn process( - reader: &dyn ConnectionReader, - msg: MsgConnectionOpenInit, -) -> HandlerResult -{ - let mut output = HandlerOutput::builder(); - - let MsgConnectionOpenInit { connection_id, client_id, counterparty, } = msg; - - if reader.connection_end(&connection_id).is_some() { - return Err(Kind::ConnectionAlreadyExists(connection_id).into()); - } - - output.log("success: no connection state found"); - - if reader.client_reader.client_state(&client_id).is_none() { - return Err(Kind::ClientForConnectionMissing(client_id).into()); - } - - output.log("success: client found"); - - output.emit(IBCEvent::ConnectionOpenInit(connection_id.clone())); - - Ok(output.with_result(ConnectionMsgProcessingResult { - connection_id, - client_id, - counterparty, - })) -} -``` - -###### Persistence - -If the `process` function specified above succeeds, the result value it yielded is then -passed to a function named `keep`, which is responsible for persisting the objects constructed -by the processing function. This `keep` function takes the submodule's `Keeper` and the result -type defined above, and performs side-effecting calls to the keeper's methods to persist the result. - -Below is given an implementation of the `keep` function for the "Create Connection" handlers: - -```rust -pub fn keep( - keeper: &mut dyn ConnectionKeeper, - result: ConnectionMsgProcessingResult, -) -> Result<(), Error> -{ - keeper.store_connection(result.connection_id.clone(), result.connection_end)?; - keeper.add_connection_to_client(result.client_id, result.connection_id)?; - - Ok(()) -} -``` - -##### Submodule dispatcher - -> This section is very much a work in progress, as further investigation into what -> a production-ready implementation of the `ctx` parameter of the top-level dispatcher -> is required. As such, implementors should feel free to disregard the recommendations -> below, and are encouraged to come up with amendments to this ADR to better capture -> the actual requirements. - -Each submodule is responsible for dispatching the messages it is given to the appropriate -message processing function and, if successful, pass the resulting data to the persistance -function defined in the previous section. - -To this end, the submodule should define an enumeration of all messages, in order -for the top-level submodule dispatcher to forward them to the appropriate processor. -Such a definition for the ICS 003 Connection submodule is given below. - -```rust -pub enum ConnectionMsg { - ConnectionOpenInit(MsgConnectionOpenInit), - ConnectionOpenTry(MsgConnectionOpenTry), - ... -} -``` -The actual implementation of a submodule dispatcher is quite straightforward and unlikely to vary -much in substance between submodules. We give an implementation for the ICS 003 Connection module below. - -```rust -pub fn dispatch(ctx: &mut Ctx, msg: Msg) -> Result, Error> -where - Ctx: ConnectionReader + ConnectionKeeper, -{ - match msg { - Msg::ConnectionOpenInit(msg) => { - let HandlerOutput { - result, - log, - events, - } = connection_open_init::process(ctx, msg)?; - - connection::keep(ctx, result)?; - - Ok(HandlerOutput::builder() - .with_log(log) - .with_events(events) - .with_result(())) - } - - Msg::ConnectionOpenTry(msg) => // omitted - } -} -``` - -In essence, a top-level dispatcher is a function of a message wrapped in the enumeration introduced above, -and a "context" which implements both the `Reader` and `Keeper` interfaces. - -### Dealing with chain-specific datatypes - -The ICS 002 Client submodule stands out from the other submodules as it needs -to deal with chain-specific datatypes, such as `Header`, `ClientState`, and -`ConsensusState`. - -To abstract over chain-specific datatypes, we introduce a trait which specifies -both which types we need to abstract over, and their interface. - -For the ICS 002 Client submodule, this trait looks as follow: - -```rust -pub trait ClientDef { - type Header: Header; - type ClientState: ClientState; - type ConsensusState: ConsensusState; -} -``` - -The `ClientDef` trait specifies three datatypes, and their corresponding interface, which is provided -via a trait defined in the same submodule. - -A production implementation of this interface would instantiate these types with the concrete -types used by the chain, eg. Tendermint datatypes. Each concrete datatype must be provided -with a `From` instance to lift it into its corresponding `Any...` enumeration. - -For the purpose of unit-testing, a mock implementation of the `ClientDef` trait could look as follows: - -```rust -struct MockHeader(u32); - -impl Header for MockHeader { - // omitted -} - -impl From for AnyHeader { - fn from(mh: MockHeader) -> Self { - Self::Mock(mh) - } -} - -struct MockClientState(u32); - -impl ClientState for MockClientState { - // omitted -} - -impl From for AnyClientState { - fn from(mcs: MockClientState) -> Self { - Self::Mock(mcs) - } -} - -struct MockConsensusState(u32); - -impl ConsensusState for MockConsensusState { - // omitted -} - -impl From for AnyConsensusState { - fn from(mcs: MockConsensusState) -> Self { - Self::Mock(mcs) - } -} - -struct MockClient; - -impl ClientDef for MockClient { - type Header = MockHeader; - type ClientState = MockClientState; - type ConsensusState = MockConsensusState; -} -``` - -Since the actual type of client can only be determined at runtime, we cannot encode -the type of client within the message itself. - -Because of some limitations of the Rust type system, namely the lack of proper support -for existential types, it is currently impossible to define `Reader` and `Keeper` traits -which are agnostic to the actual type of client being used. - -We could alternatively model all chain-specific datatypes as boxed trait objects (`Box`), -but this approach runs into a lot of limitations of trait objects, such as the inability to easily -require such trait objects to be Clonable, or Serializable, or to define an equality relation on them. -Some support for such functionality can be found in third-party libraries, but the overall experience -for the developer is too subpar. - -We thus settle on a different strategy: lifting chain-specific data into an `enum` over all -possible chain types. - -For example, to model a chain-specific `Header` type, we would define an enumeration in the following -way: - -```rust -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] // TODO: Add Eq -pub enum AnyHeader { - Mock(mocks::MockHeader), - Tendermint(tendermint::header::Header), -} - -impl Header for AnyHeader { - fn height(&self) -> Height { - match self { - Self::Mock(header) => header.height(), - Self::Tendermint(header) => header.height(), - } - } - - fn client_type(&self) -> ClientType { - match self { - Self::Mock(header) => header.client_type(), - Self::Tendermint(header) => header.client_type(), - } - } -} -``` - -This enumeration dispatches method calls to the underlying datatype at runtime, while -hiding the latter, and is thus akin to a proper existential type without running -into any limitations of the Rust type system (`impl Header` bounds not being allowed -everywhere, `Header` not being able to be treated as a trait objects because of `Clone`, -`PartialEq` and `Serialize`, `Deserialize` bounds, etc.) - -Other chain-specific datatypes, such as `ClientState` and `ConsensusState` require their own -enumeration over all possible implementations. - -On top of that, we also need to lift the specific client definitions (`ClientDef` instances), -into their own enumeration, as follows: - -```rust -#[derive(Clone, Debug, PartialEq, Eq)] -pub enum AnyClient { - Mock(mocks::MockClient), - Tendermint(tendermint::TendermintClient), -} - -impl ClientDef for AnyClient { - type Header = AnyHeader; - type ClientState = AnyClientState; - type ConsensusState = AnyConsensusState; -} -``` - -Messages can now be defined generically over the `ClientDef` instance: - - -```rust -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] -pub struct MsgCreateClient { - pub client_id: ClientId, - pub client_type: ClientType, - pub consensus_state: CD::ConsensusState, -} - -pub struct MsgUpdateClient { - pub client_id: ClientId, - pub header: CD::Header, -} -``` - -The `Keeper` and `Reader` traits are defined for any client: - -```rust -pub trait ClientReader { - fn client_type(&self, client_id: &ClientId) -> Option; - fn client_state(&self, client_id: &ClientId) -> Option; - fn consensus_state(&self, client_id: &ClientId, height: Height) -> Option; -} - -pub trait ClientKeeper { - fn store_client_type( - &mut self, - client_id: ClientId, - client_type: ClientType, - ) -> Result<(), Error>; - - fn store_client_state( - &mut self, - client_id: ClientId, - client_state: AnyClientState, - ) -> Result<(), Error>; - - fn store_consensus_state( - &mut self, - client_id: ClientId, - consensus_state: AnyConsensusState, - ) -> Result<(), Error>; -} -``` - -This way, only one implementation of the `ClientReader` and `ClientKeeper` trait is required, -as it can delegate eg. the serialization of the underlying datatypes to the `Serialize` bound -of the `Any...` wrappper. - -Both the `process` and `keep` function are defined to take a message generic over -the actual client type: - -```rust -pub fn process( - ctx: &dyn ClientReader, - msg: MsgCreateClient, -) -> HandlerResult, Error>; - -pub fn keep( - keeper: &mut dyn ClientKeeper, - result: CreateClientResult, -) -> Result<(), Error>; -``` - -Same for the top-level dispatcher: - -```rust -pub fn dispatch(ctx: &mut Ctx, msg: ClientMsg) -> Result, Error> -where - Ctx: ClientReader + ClientKeeper; -``` - -With this boilerplate out of way, one can write tests using a mock client, and associated mock datatypes -in a fairly straightforward way, taking advantage of the `From` instance to lift concerete mock datatypes -into the `Any...` enumeration: - -```rust - #[test] - fn test_create_client_ok() { - let client_id: ClientId = "mockclient".parse().unwrap(); - - let reader = MockClientReader { - client_id: client_id.clone(), - client_type: None, - client_state: None, - consensus_state: None, - }; - - let msg = MsgCreateClient { - client_id, - client_type: ClientType::Tendermint, - consensus_state: MockConsensusState(42).into(), // lift into `AnyConsensusState` - }; - - let output = process(&reader, msg.clone()); - - match output { - Ok(HandlerOutput { - result, - events, - log, - }) => { - // snip - } - Err(err) => { - panic!("unexpected error: {}", err); - } - } - } -``` - -## Status - -Proposed - -## Consequences - -### Positive -- clear separation of message handlers logic (processing and persistence logic) from the store -- provide support to mock the context of a handler and test the handler functionality in isolation - - -### Negative -- data type system around submodule ICS02 is relatively complex - -### Neutral - -## References \ No newline at end of file diff --git a/docs/architecture/adr-004-relayer-domain-decomposition.md b/docs/architecture/adr-004-relayer-domain-decomposition.md deleted file mode 100644 index 23a7d8d39c..0000000000 --- a/docs/architecture/adr-004-relayer-domain-decomposition.md +++ /dev/null @@ -1,303 +0,0 @@ -# ADR 004: Relayer Domain Decomposition - -## Changelog -* 21.7.2020: Initial sketch -* 27.7.2020: Dependencies outline -* 5.10.2020: Update based on sketch -* 2.11.2020: Reviewed & accepted - -## Context - -The IBC handlers queries and relayer are defined loosely in the [specs]. -The goal of this ADR is to provide clarity around the basic domain objects -from the perspective of the relayer, -their interfaces as well as dependencies between them in order to -guide the implementation. The success criteria for the decomposition is -how well it can be tested. It's expected that any decomposition will -lend itself to tight unit tests allowing more collaborators make change -in the code base with confidence. - -## Decision -The decomposition should be motivated by what we want to test and what -we need to mock out to exercise the core logic. - -We want to be able to test the following high-level functions: - -* Client create and update - * With different chain states -* Connection handshake - * With different chain states -* Channel Setup - * With different chain states -* Datagram Construction - * With different chain states -* Datagram to Transaction - * Batching - * Signing -* Datagram Submission - * With different chain states - * Missing Client Updates - * With Missing Proofs -* Handlers (datagrams, chain state) -> events - * Handling the batch of datagrams - * With different chain states - * Specifically, the key value store - * Produce events - -## Dependencies - -In this section, we map the operations which need to be performed at different -stages of both the relayer and the IBC handlers. This gives an outline -of what low-level operations and dependencies need to be mocked out to test each -stage in isolation, and will inform the design of the various traits needed -to mock those out. - -Not all stages are listed here because the operations and dependencies -outlined below cover all the possible dependencies at each stage. - -### Initializing a connection from the relayer - -- Need a relayer configuration (relayer.toml) -- Query chain B for its commitment prefix (ABCI query) -- Send `MsgConnectionOpenInit` message to chain A (transaction) - -### `ConnOpenInit` (Handler) - -- Provable store -- Private store - -### `updateIBCClient` (Relayer) - -- get the latest height from chain A (Query) -- get client consensus state from chain B (Query) -- get latest header + minimal set from chain A (Light Client) -- verify client state proof (Prover) -- create and submit datagrams to update B's view of A (Message Builder, Transaction) -- replace full node for B with other full node (PeerList) -- create and submit proof of fork (Fork Evidence Reporter) -- wait for UpdateClient event (Event Subscription) - -### `pendingDatagrams` (Relayer) -Builds the datagrams required by the given on-chain states. -For connection datagrams: -- get connection objects from chain A (Query) -- get connection objects from chain B (Query) -- get proof\* of connection state (e.g. `Init`) from chain A (Query, Prover, Light Client) -- get proof\* of client state and consensus state from chain A (Query, Prover, Light Client) - - \* involves querying the chain + get header/minimal set + verify proof -- build the next message in the connection handshake, e.g. `ConnOpenTry` (Message Builder) - -Channel datagrams are built similarly. Packet datagrams are triggered by events, and they are detailed in the Link section below. - -### IBC Module - -For every a transaction in a block of height H: - -- call appropriate handler (this is realized by ICS26 routing sub-module), -- If handler succeeds (transaction does not abort), then - apply the updates to the key-value store (provable & private), and also - get the current height H and emit appropriate events. - -## Objects - -The main domain objects in the relayer (`ForeignClient`, `Connection`, `Channel`) -will be created as concrete types which contain their configuration. -These objects are the relayer's representation of different parts of state from the two chains. -Dependencies between types indicate runtime dependencies of the chain -state. For instance, objects parameterized by a `ForeignClient` type, such as a `Connection`, -have the pre-condition that the runtime completed client creation before operating with -those objects. - -### ChainHandle - -The ChainHandle trait is a local representation of a chain state on the relayer. -The API of a ChainHandle provides reliable access to chain state whether -crashed, constructed or queried. We envision a mock version of a chain -will be used to test both handler and relayer logic ([#158]). - -The method set of the ChainHandle trait will reflect specific needs and not -intermediate representations. Query and light client verification -concerns will be internal to the chain handle implementation and not exposed -via this API. Users of a ChainHandle implementation (i.e., relayer methods) -can assume verified data or receive an Error and act appropriately. - -```rust -trait ChainHandle { - // Generate a packet - fn create_packet(&self, event: IBCEvent) -> Result; - - // Fetch the height of an IBC client hosted by a chain - // - query the consensus_state of src on dst - // - query the highest consensus_state - // - verify if with the light client - // - return the height - fn get_height(&self, client: &ForeignClient) -> Result; - - // Submit a transaction to a chains underlying full node - fn submit(&self, transaction: EncodedTransaction) -> Result<(), ChainError>; - - // ... -} - -``` - -### Connection - -```rust -impl Connection { - fn new( - chain_a: &ChainHandle, - chain_b: &ChainHandle, - foreign_client_a: &ForeignClient, - foreign_client_b: &ForeignClient, - config: ConnectionConfig) - -> Result { - // Establish a connection between ChainA and ChainB via ICS 3 handshake. - // For a first version this can be completely synchronous (a blocking call). - } -} -``` - -### Channel -```rust -impl Channel { - fn new( - chain_a: &ChainHandle, - chain_b: &ChainHandle, - connection: &Connection, - config: ChannelConfig) - -> Result { - // Establish a channel between two modules (i.e., ICS4 handshake). - } -} -``` - -## Link - -A link is the object that connects two specific modules on separate chains. -Links are responsible for relaying packets from `chain_a` -to `chain_b` and are therefore uni-directional. A single relayer process -should be able to support multiple link instances and each link should -run in its own thread. Links depend on `ForeignClient`s, -`Connection` and `Channel`. - -```rust -struct Link { - src_chain: &ChainHandle, - dst_chain: &ChainHandle, - channel: &Channel, -} - -impl Link { - fn new(channel: &Channel, config: LinkConfig) - -> Link { - // ... - } - - /// Relay Link specific packets from src_chain to dst_chain - /// Expect this to run in a thread - fn run(self) -> Error { - let subscription = self.src_chain.subscribe(&self.channel); - - for (target_height, events) in subscription.iter() { - // ... - - let datagrams = events.map(|event| { - Datagram::Packet(self.dsrc_chain.build_packet(target_height, event)) - }); - - for attempt in self.config_submission_attempts { - let current_height = self.dst_chain.get_height(&self.connection.channel.foreign_client)?; - let signed_headers = self.src_chain.get_minimal_set(current_height, target_height)?; - - let mut attempt_datagrams = datagrams.clone(); - attempt_datagrams.push(Datagram::ClientUpdat(ClientUpdate::new(signed_headers))); - - let transaction = Transaction::new(datagram); - self.dst_chain.submit(transaction.sign().encode())?; - } - - } - } -} -``` - -### Example Main - -Example of the initializing of a single link between two chains. Each -chain has its own runtime and exposes a `handle` to communicate with -that runtime from different threads. There are dependencies between -ForeignClients, Connections, Channels and Links which are encoded in the -type system. The construction of them reflects that their corresponding -handshake protocol has completed successfully. - -```rust -fn main() -> Result<(), Box> { - let src_chain = ChainRuntime::new(); - let dst_chain = ChainRuntime::new(); - - /// chains expose handlers for commuicating with the chain related runtime - /// which move into their own threads - let src_chain_handle = src_chain.handle(); - thread::spawn(move || { - src_chain.run().unwrap(); - }); - - let dst_chain_handle = dst_chain.handle(); - thread::spawn(move || { - // What should we do on return here? - dst_chain.run().unwrap(); - }); - - let src_foreign_client_on_dst = ForeignClient::new( - &src_chain_handle, - &dst_chain_handle)?; - - let dst_foreign_client_on_src = ForeignClient::new( - &src_chain_handle, - &dst_chain_handle)?; - - let connection = Connection::new( - &src_chain_handle, - &dst_chain_handle, - dst_foreign_client_on_src, - src_foreign_client_on_dst, - ConnectionConfig::default()).unwrap(); - - let channel = Channel::new( - &src_chain_handle, - &dst_chain_handle, - connection, - ChannelConfig::default()).unwrap(); - - let link = Link::new( - src_chain_handle, - dst_chain_handle, - channel, - LinkConfig::default())?; - - link.run()?; - - Ok(()) -} -``` - -## Status - -- Accepted (first implementation in [#335](https://github.com/informalsystems/ibc-rs/pull/335)). - -## Consequences - -### Positive -* Clean abstractions an isolation from IO -* Handshakes are correct by construction -* Sane error handling - -### Negative - -### Neutral - -## References - -[specs]: https://github.com/cosmos/ibc/tree/master/spec -[#158]: https://github.com/informalsystems/ibc-rs/issues/158 diff --git a/docs/architecture/adr-005-relayer-v0-implementation.md b/docs/architecture/adr-005-relayer-v0-implementation.md deleted file mode 100644 index e15ad3783e..0000000000 --- a/docs/architecture/adr-005-relayer-v0-implementation.md +++ /dev/null @@ -1,234 +0,0 @@ -# ADR 005: Relayer v0.1 implementation - -## Changelog - -* 04.01.2020: First draft proposed. -* 09.02.2020: Revised, fixed todos, reviewed. - -## Context - -This ADR documents the implementation of the `v0.1` [relayer lib crate] -[ibc-relayer]. -This library is instantiated in the [Hermes][hermes] binary of the -[ibc-relayer-cli crate][ibc-relayer-cli] (which is not the focus of this discussion). - -As a main design goal, `v0.1` is meant to lay a foundation upon which we can -add more features and enhancements incrementally with later relayer versions. -This is to say that `v0.1` may be deficient in terms of features or -robustness, and rather aims to be simple, adaptable, and extensible. -For this reason, we primarily discuss aspects of concurrency and architecture. - - -### Relayer versioning scheme - -On the mid-term, the relayer architecture is set out to evolve across three -versions. - -The first of these, `v0.1`, makes several simplifying assumptions -about the environment of the relayer and its features. These assumptions -are important towards limiting the scope that `v0.1` aims to -cover, and allowing a focus on the architecture and concurrency model to -provide for growth in the future. - -These assumptions are documented below in the [decision](#decision) section. - -## Decision - - -### Configuration - -For the most part, the relayer configuration will be -static: the configuration for chains and their respective objects (clients, -connections, or channels) will be fully specified in the relayer -configuration file and will not change throughout execution. -Light clients are also statically defined in the config file, and cannot be -switched dynamically at runtime. - -Recent changes to the ICS protocol specifies identifier -selection for clients, connections, and channels to be [deterministic][ids]. -For this reason, we will not need to specify any identifiers in the -configuration file. -We only specify which pairs of chains should communicate with one -another, and the port identifier to use for that purpose. -This pair of chains plus their corresponding port identifiers is called a -__relaying path__. -Any relaying path is unidirectional. - -An example with the relevant section of the configuration file follows. - -```toml -[[connections]] -a_chain = 'ibc-0' -b_chain = 'ibc-1' - -[[connections.paths]] -a_port = 'transfer' -b_port = 'transfer' -``` - -Here there are two chains, ith one connection between them, and a path for -relaying on the port called `transfer` on both chains, from chain `ibc-0` -to `ibc-1`. - -### Links - -A [link][link] is a relayer-level protocol that implements packet relay across -one relaying path. -The relayer at `v0.1` will focus on a single link. -This limitation will be lifted in subsequent versions. - -### Chain State - -Each chain is assumed to start with an empty IBC state. -This means that the relayer will take care of creating the client, -connection, and channel objects respectively on each side of a link. - -### Proof Verification - -The `v0.1` relayer will _not_ do proof verification. - -### Feature set - -The [complete list of features is documented elsewhere][features] in detail. - -## Relayer Concurrency Model - -Relayer `v0.1` works under the assumption that there are no competing relayers -running concurrently (which may interfere with each other). -Furthermore, as stated above, the relayer will handle a single link (one -packet relaying direction from a source chain to a destination chain). -The following diagram sketches the relayer domain decomposition at a -high-level, with a focus on one link. - -relayer v0 domain 
-decomposition - - -The relayer supports a single stack made of a connection, a channel, and a link. - -The application thread that runs upon starting creates a link associated -with the relaying path. -It also triggers messages for creating all objects (clients, a connection, -and a channel) underlying this link. -These will cause the relayer to build and send all messages associated with -the handshakes for these objects, plus a retry mechanism. -It should work even these events are received by the link in the same time -with the live chain IBC events. -In other words, no synchronization with starts of other threads should be -required. - -Beside the application thread, the relayer maintains one or more threads -for each chain. -The number of threads per chain is chain-specific: -- For the production chain [Gaia][gaia] (see also the [References] - (#references) below), there are three separate - threads, described in more detail in the [architecture](#architecture) - section. -- For the mock chain ([Mock](#references)), there is one thread. - -The link runs in the main application thread. This consumes events -from the chains, performs queries and sends transactions synchronously. - - -#### Architecture - -The following diagram provides more detail into how the relayer is -structured. -Here the focus is on the interfaces within the relayer, as well as the -interface between the relayer and a single chain. - -relayer v0 architecture - -##### Legend - -Some of the notation from this figure has the following meaning. - -| Notation | Description | Examples | -| ------ | ----------- | ----------- | -| `E` | Enum: typically messages between threads | `ChainRequest`; `IBCEvent` | -| `S` | Struct: a processing element | `ForeignClient`; `Connection` | -| `T` | Trait: typically interface between threads | `Chain`; `LightClient` | - -##### Levels of abstraction - -At the top of this diagram, there is a chain consisting of multiple full nodes. -The deeper (i.e., lower) we go into this sketch, the closer we get to the user, or -Hermes (the relayer CLI). -To understand the relayer architecture intuitively, we can break down the -levels of abstraction as follows: - -###### 1. The actual chain, comprising a number of full nodes -- This is the lowest level of abstraction, the farthest away from relayer - users -- The relayer communicates with a chain via three interfaces: - - (i) the `LightClient` trait (handled via the supervisor for the - production chain), - - (ii) the `Chain` trait (where the communication happens over the - ABCI/gRPC interface primarily), and - - (iii) an `EventMonitor` which subscribes to a full node, and carries batches - of events from that node to the chain runtime in the relayer. Currently, - the relayer registers for `Tx` and `Block` notifications. It then extracts - the IBC events from the `Tx` and generates a `NewBlock` event also for the - block. Note that a notification may include multiple IBC Events. - -###### 2. The chain runtime - -- This is an intermediary layer, sitting between the relayer application and - any chain(s); -- The runtime is universal for all possible chains, i.e., does _not_ contain any - chain-specific code; -- Accepts as input requests from the application (Hermes, the CLI), in the form of - [`ChainRequest`][chain-req] via a crossbeam channel -- Responds to the application via a crossbeam channel -- Has objects which implement the three interfaces named above - (`LightClient`, `Chain`, and `EventMonitor`) and orchestrates access to - these objects as required by application requests - -###### 3. The relayer application - -- Communicates with the runtime via a `ChainHandle`, which contains the - appropriate crossbeam sender and receiver channels to/from the runtime -- Upon start-up, instantiates relayer-level objects in the following order: - two `ForeignClient`s (one per chain), a `Connection` (which contains the - two clients), a `Channel` (containing the connection), and on top of that - a `Link`. -- The code here is part of the Hermes (relayer CLI) binary. - -##### Threads - -Each thread in this diagram is a separate box shaded in gray. -There are four threads running: the `EventMonitor`, the `Supervisor`, the -`Runtime`, and the main application thread, called `V0Cmd`. - -## Status - -Accepted - -## Consequences - -### Positive -- prepares the relayer crate for incremental growth - -### Negative - -### Neutral - -## References: - -- __Gaia__: the correct Gaia instance for working with `v0.1` can be obtained - from https://github.com/cosmos/relayer, with `git checkout v4.0.0` by - executing `make build-gaia`. This - [comment](https://github.com/informalsystems/ibc-rs/pull/449#issuecomment-750248113) - provides additional insights into development-time relayer `v0.1` environment. - -- __Mock__: https://github.com/informalsystems/ibc-rs/blob/master/relayer/src/chain/mock.rs - - - -[ids]: https://github.com/cosmos/cosmos-sdk/pull/7993 -[link]: https://github.com/informalsystems/ibc-rs/blob/master/docs/architecture/adr-004-relayer-domain-decomposition.md#link -[chain-req]: https://github.com/informalsystems/ibc-rs/blob/379dd9812f6e7a42b9428f64eb52fe292d417476/relayer/src/chain/handle.rs#L51 -[ibc-relayer]: https://github.com/informalsystems/ibc-rs/tree/master/relayer/ -[ibc-relayer-cli]: https://github.com/informalsystems/ibc-rs/tree/master/relayer-cli/ -[hermes]: https://hermes.informal.systems -[features]: https://github.com/informalsystems/ibc-rs/blob/v0.1.0/guide/src/feature_matrix.md diff --git a/docs/architecture/adr-006-hermes-v0.2-usecases.md b/docs/architecture/adr-006-hermes-v0.2-usecases.md deleted file mode 100644 index bbbabc5596..0000000000 --- a/docs/architecture/adr-006-hermes-v0.2-usecases.md +++ /dev/null @@ -1,294 +0,0 @@ -# ADR 006: Hermes v0.2.0 Use-Cases - -## Changelog -* 16.02.2021: Proposed. - -## Context - -One major problem with planning for the evolution of Hermes is that presently -there is insufficient clarity regarding its requirements. -It is not known who are the typical Hermes users (is it human operators or -automated pipelines?), and what are their primary use-cases. - -This ADR proposes a few use-cases that seem interesting from the point -of view of a general target base of users, and which will -hopefully be a subset of the requirements of (any) future users. - -Three elements that provide further context for this discussion are: - -1. Hermes is still at an early stage of implementation, so these use-cases are - not set in stone. - -2. Some concrete use-cases are starting to emerge ([#628][#628]), which Hermes - v0.1.0 either does not cover altogether, or covers poorly (e.g., because of - inconsistent UX), thus informing this proposal. - -3. Hermes is one of _three_ relayer binaries that are being developed roughly in -parallel. The other two are being developed in Go and Typescript, -respectively (see the [references](#references) section). -In this context, it is plausible that Hermes will focus on performance, -robustness, and richness of features on a longer term. - -## Decision - -This is a summary of the use-cases (commands) discussed in the rest of this ADR. -Note that the commands below omit the binary name `hermes` , to keep the command -length to a minimum. - -To create and update a client: -- `create client ` - - Optional params: `[--clock-drift ] [--trusting-period ] [--trust-threshold ]` -- `update client ` - -To create a connection: -- `create connection ` - - Optional: `[--delay ]` -- `create connection --client-a --client-b ` - - Optional: `[--delay ]` - -To create a channel: -- `create channel --port-a --port-b ` - - Optional: `[--order ] [--version ]` -- `create channel --connection-a --port-a --port-b ` - - Optional: `[--order ] [--version ]` - -To start packet relaying: -- `start --port-a --port-b ` - - Optional: `[--order ] [--version ]` -- `start --connection-a --port-a --port-b ` - - Optional: `[--order ] [--version ]` -- `start --channel-a --port-a ` - -For finishing pre-initialized, but unfinished object handshakes, for connection and channel: -- `establish connection --connection-a ` -- `establish channel --channel-a --port-a ` - -### Rationale - -The primary goal for the uses-cases we decided to cover is to prevent situations -where users could get stuck. For example, the output of a command may be -unclear, or there may be an error and thereby some CLI command -finishes partially, or two relayers concurrently try to perform some -operation(s) and interfere with each other, resulting in a chain state that is -obscure to the user, and then the user could consequently be stuck. - -The first of the patterns below seeks to help "unblock" a user. -The second pattern is a variation on the first; this permits more efficiency -because it allows the reuse of previously-created objects in the -creation of new objects on a chain (e.g., reuse a client in the creation of a -connection, or reuse a connection in the creation of a new channel). - -#### Patterns - -We propose two basic patterns that Hermes should be able to fulfil. - -1. Simple invocations to perform basic actions. - - By _action_ here we mean doing the complete handshake for an object from - scratch (specifically _connection_ or _channel_) on two chains, or - relaying packets between two chains. - - The focus here is for the command to include retrying mechanisms - (perform it _robustly_) and have the simplest interface. - -2. Allow reusing of pre-existing state for basic commands. - - The pre-existing state could be a client with some specific trust options, - for instance, and in this case Hermes would provide support for creating - a connection that uses this specific client. - - This pattern should also include a retrying mechanism. - -#### Details of Use-Cases - -Applying the above patterns to a few cases, we get the following concrete -commands that Hermes v0.2.0 should fulfil. - -##### Create & Update a Client - -- Minimal invocation: this will create the client from scratch: - -``` -create client [--clock-drift ] [--trusting-period ] [--trust-threshold ] -``` - -**Details:** -Submits a transaction of type [client create][client-create] to chain -`` (sometimes called the _destination_ chain of this -transaction). The new client will be verifying headers for -chain `` (often called the _source_ chain). - -See also the [limitations](#limitations) section discussing the optional -security parameters for this command. - -- Update a client: - -``` -update client -``` - -**Details:** -Submits a transaction to chain id `` to update the client having -identifier `` with new consensus state from up-to-date headers. -Hermes will automatically infer the target chain of this client from -the [client state][client-state]. - -- Upgrade a client: - -``` -upgrade client -``` - -**Details:** -Submits a transaction to chain id `` to upgrade the client having -identifier ``. -Hermes will automatically infer the target chain of this client from -the [client state][client-state]. - -- Upgrade all clients that target a specific chain: - -``` -upgrade clients -``` - -**Details:** -Submits a transaction to upgrade clients of all chains in the config that target -chain id ``. - -##### Create New Connection - -- Minimal invocation: this will create the connection from scratch, using - _new_ clients: - -``` -create connection [--delay ] -``` - -**Details:** -Starts a transaction to perform the connection open handshake protocol between -two chains. -The chains are called symbolically `a` and `b`, hence the option names -`` and ``. In all handshakes, Hermes submits the first -step (typically called _init_, e.g., `ConnOpenInit`), to side `a`, then the -second step (e.g., `ConnOpenTry`) to side `b`, and so on. - -The optional parameter `--delay` is the delay period that the new connection -should have. Note also the [limitations](#limitations) around the -`delay_period` feature. - -- Reusing pre-existing state, concretely, with _existing_ clients: - -``` -create connection --client-a --client-b [--delay ] -``` - -**Details:** -Similar to the previous command, this command will perform the connection -open handshake protocol, but will reuse the client with identifier from -option `--client-a`. This client is expected to exist on chain ``. -The target chain of this client is identified in the -[client state][client-state] (concretely, the target chain is represented under -`chain_id` field of the client state), which provides the identifier for the -side `b` of the new connection. On the side `b` chain, this command will -establish the connection using the client with identifier from the option -`--client-b`, which must be verifying headers for chain ``. - -##### Create New Channel - -- With _new_ connection and clients: - -``` -create channel --port-a --port-b [--order ] [--version ] -``` - -- With _existing_ specific connection: - -``` -create channel --connection-a --port-a --port-b [--order ] [--version ] -``` - -##### Packet Relaying - -- relay packets over a _new_ channel, _new_ connection, and _new_ clients: - -``` -start --port-a --port-b [--order ] [--version ] -``` - -- relay packets over a _new_ channel that re-uses an _existing_ connection: - -``` -start --connection-a --port-a --port-b [--order ] [--version ] -``` - -- relay packets over an _existing_ channel: - -``` -start --channel-a --port-a -``` - -##### Finishing partially complete handshakes: - -These commands serve the purpose of covering certain corner-cases where a -handshake may be partially started. - -- Finalize handshake for _partially established_ connection: - -``` -establish connection --connection-a -``` - -- Finalize handshake for _partially established_ channel: - -``` -establish channel --channel-a --port-a -``` - - -### Command Output - -By default, the command will provide human-readable output, i.e., pretty -printing. -In practice, the final result of a Hermes command is captured in an -[Output][output] structure that has support for JSON serialization. To -enable JSON, we add a configuration parameter `log_json`. The global section -of the config file will look as follows: - -```toml -[global] -log_level = 'error' -log_json = 'false' -``` - -By default, this parameter is `false`. When set to `true`, all the Hermes output -will be in JSON. - -## Status - -Partially implemented. - -## Consequences -### Positive - -- Simpler, more accurate CLI invocation: "create" is more precise than "tx" or - "handshake" -- Improved output for human operators. - -### Negative - -- Some commands will possibly turn out to be useless. -- Requires some rethinking of the Relayer architecture (mainly because of the - [limitations](#limitations) surrounding light clients.) - -### Neutral - - -## References - -- Relayer in Go: https://github.com/cosmos/relayer -- Relayer in Typescript: https://github.com/confio/ts-relayer - - - -[#628]: https://github.com/informalsystems/ibc-rs/issues/628 -[#673]: https://github.com/informalsystems/ibc-rs/issues/673 -[#640]: https://github.com/informalsystems/ibc-rs/issues/640 -[client-state]: https://hermes.informal.systems/commands/queries/client.html#query-the-client-state -[client-create]: https://docs.rs/ibc/0.1.1/ibc/ics02_client/msgs/create_client/index.html -[output]: https://github.com/informalsystems/ibc-rs/blob/1f2e72dbcafee5a8bbdab381ff4927d5870b4b59/relayer-cli/src/conclude.rs#L80 diff --git a/docs/architecture/adr-007-error.md b/docs/architecture/adr-007-error.md deleted file mode 100644 index 2faf45cd23..0000000000 --- a/docs/architecture/adr-007-error.md +++ /dev/null @@ -1,228 +0,0 @@ -# ADR 007: Error Management - -## Changelog - -* 2020-07-26: Initial Proposal - -## Context - -This document describes the reason behind the switch from using -`anomaly` for error handling to -the [`flex-error`](https://docs.rs/flex-error/) crate that is developed in-house. - -## Decision - -### Problem Statement - -To keep things brief, we will look at the issue of error handling from a specific example -in `relayer/src/error.rs`: - -```rust -pub type Error = anomaly::Error; - -#[derive(thiserror::Error)] -pub enum Kind { - #[error("GRPC error")] - Grpc, - ... -} - -impl Kind { - pub fn context(self, source: impl Into>) -> anomaly::Context { - Context::new(self, Some(source.into())) - } -} -``` - -The design above is meant to separate between two concerns: - - - The metadata about an error, as captured in `Kind`. - - The trace of how the error occured, as captured in `anomaly::Context`. - - The type `Error` is defined to be `anomaly::Error`, which is a newtype wrapper to `Box>`. - -There are a few issues with the original design using `anomaly`: - - - The error source type is erased and turned into a `Box`, making it difficult to recover metadata - information about the original error. - - The `Kind::context` method allows any error type to be used as an error source, making it difficult to statically - analyze which sub-error has what kind of error source. - -We can demonstrate the design issue with a specific use case: - -```rust -pub fn unbonding_period(&self) -> Result { - let mut client = self - .block_on(QueryClient::connect(self.grpc_addr.clone())) - .map_err(|e| Kind::Grpc.context(e))?; - - let request = Request::new(QueryParamsRequest {}); - - let response = self - .block_on(client.params(request)) - .map_err(|e| Kind::Grpc.context(e))?; - ... -} -``` - -Without the help of an IDE, it would be challenging to figure out that -the first use of `Kind::Grpc.context` has `tonic::Status` as the error source -type, while the second use has the error source type -`tonic::TransportError`. - -The mixing up of `tonic::Status` and `tonic::TransportError` as error sources -are not too critical in this specific case. However this would not be the -case if we want to use the error source information to determine whether -an error is _recoverable_ or not. For instance, let's say if we want to -implement custom retry logic only when the error source is -`std::io::Error`, there is not easy way to distinguished if an error -variant `Kind::Grpc` is caused by `std::io::Error`. - -### Proposed Design - -A better design is to define error construction functions with _explicit_ -error sources. The proposed design is as follows: - -```rust -pub struct Error(pub ErrorDetail, pub eyre::Report); - -pub enum ErrorDetail { - GrpcStatus { - status: tonic::Status - }, - GrpcTransport, - ... -} - -impl Error { - pub fn grpc_status(status: tonic::Status) -> Error { - let detail = ErrorDetail::GrpcStatus { status }; - Error(detail, Eyre::msg(detail)) - } - - pub fn grpc_transport(source: tonic::TransportError) -> Error { - let detail = ErrorDetail::GrpcTransport; - let trace = Eyre::new(source).wrap_err(detail); - Error(detail, trace) - } -} -``` - -There are a few things addressed by the design above: - - We use the `eyre::Report` type as an _error tracer_ to trace - the error sources, together with additional information such as backtrace. - - Depending on the error source type, we want to have different strategies - to trace the error. - - For example, we may not care about the metadata - inside `tonic::TransportError`, so we just discard the data - after tracing it using `eyre`. - - We define _error constructor functions_ that handle the error source using - different strategies. The function constructs the `ErrorDetail` and - `eyre::Report` values, and then wrap them as the `Error` tuple. - -In general, when the error sources are defined by external libraries, -we have little control of how the types are defined, and need to have -different ways to handle them. -But when we have multiple error types that are defined in the same crate, -we want to have special way to handle the propagation of error. - -For example, consider the `LinkError` type, which has the error -we defined earlier as the error source: - -```rust -use crate::error::{Error as RelayerError, ErrorDetail as RelayerErrorDetail}; - -pub struct LinkError(LinkErrorDetail, eyre::Report); - -pub enum LinkErrorDetail { - Relayer { - source: RelayerErrorDetail - }, - ... -} - -impl LinkError { - pub fn relayer_error((source_detail, trace): RelayerError) -> LinkError { - let detail = LinkErrorDetail::Relayer(source_detail); - LinkError(detail, trace.wrap_err(detail)) - } -} -``` - -We propagate the error detail to LinkErrorDetail so that we can recover -additional detail later on. Furthermore, we extract the `eyre::Report` -from the error source and use it to add additional information -when we construct `LinkError`. - -### `flex-error` - -The proposed design has a lot of boilerplate required to properly define -the error types. To reduce boilerplate, we have developed -[`flex-error`](https://docs.rs/flex-error/) with the `define_error!` -macro which makes it straightforward to implement the error types -using a DSL syntax. With that, the error types can instead be defined as: - -```rust -use flex_error::{define_error, TraceError}; - -define_error! { - Error { - GrpcStatus - { status: GrpcStatus } - | e | { format!("GRPC call return error status {0}", e.status) }, - GrpcTransport - [ TraceError ] - | _ | { "error in underlying transport when making GRPC call" }, - ... - } -} -``` - -Aside from the syntactic sugar provided by the `define_error!` macro, `flex-error` -also allows error tracer implementation to be switched based on the Cargo feature -flags set on the `flex-error` crate. For example, we can switch from the -[`eyre`](https://docs.rs/eyre/) tracer to the [`anyhow`](https://docs.rs/anyhow/) -tracer by disabling `"flex-error/eyre_tracer"` and enabling `"flex-error/anyhow_tracer"` features. - -If all error tracer features and the `"flex-error/std"` feature are disabled, -a simple `flex_error::StringTracer` is used for tracing errors. The `StringTracer` -do not provide additional information such as back trace, but it is useful -for supporting `no_std`, where standard constructs such as `std::error::Error` and -error backtrace are not available. - -The full documentation for `flex-error` is available at [Docs.rs](https://docs.rs/flex-error/). - -## Status - -Accepted - The PR has been merged in [#988](https://github.com/informalsystems/ibc-rs/pull/988) - -## Consequences - -All error definitions in the `ibc-rs` project will be defined using the -`flex-error` crate. - -### Positive - -- Fine grained error handling. -- Flexible error tracing. -- `no_std` support. - -### Negative - -- It takes time to learn about the DSL and how to manage different error sources. -- Compile errors arise inside the macros may be difficult to debug. -- IDE provides limited integration for code inside macros. - -### Neutral - -- The error variants are defined in the `ErrorDetail::ErrorVariant{...}` convention, - but the error constructor functions are defined in the `Error::error_variant(...)` - convention. - -## References - -- [PR #988](https://github.com/informalsystems/ibc-rs/pull/988): - Use flex-error to define errors -- [Issue #712](https://github.com/informalsystems/ibc-rs/issues/712): - Relayer error handling specification -- [Issue #11588](https://github.com/informalsystems/ibc-rs/issues/1158): - Tracking issue for no-std support diff --git a/docs/architecture/adr-008-ics20-implementation.md b/docs/architecture/adr-008-ics20-implementation.md deleted file mode 100644 index 3839a2315a..0000000000 --- a/docs/architecture/adr-008-ics20-implementation.md +++ /dev/null @@ -1,236 +0,0 @@ -# ADR 008: ICS20 Implementation Proposal - -## Status - -Accepted - -## Changelog - -* 21.04.2022: Draft Proposed - -## Context - -The goal of this ADR is to provide recommendations and a guide for implementing the ICS20 application. - -## Decision - -The proposal is broken down into traits that should be implemented by the ICS20 module. It also defines some primitives -that would help in building a module compliant with the ICS20 spec. - -#### Types - -The implementation must provide a base denom type that is serializable to string. Additionally, the following denom -types must also be provided: - -* `HashedDenom`: A denom type that can be serialized to a string of the form `'ibc/{Hash(trace_path/base_denom)}'`. -* `PrefixedDenom`: A denom type with a base denom which is prefixed with a trace. The trace itself consists - of `'{PortId}/{ChannelId}'` pairs and enables coin source tracing[^1]. - -```rust -/// Base denomination type -pub struct Denom(String); -``` - -A `Coin` defines a token with a denomination and an amount where the denomination may be any one of the denom types -described above. - -```rust -#[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Serialize, Deserialize)] -pub struct Coin { - /// Denomination - pub denom: Denom, - /// Amount - pub amount: U256, -} -``` - -The ICS20 acknowledgement type and packet data type are defined in the spec[^2] and maybe modelled as follows. Note that -these types must be (de)serializable from/to JSON. - -```rust -pub enum ICS20Acknowledgement { - /// Equivalent to b"AQ==" - Success, - /// Error Acknowledgement - Error(String) -} - -pub struct FungibleTokenPacketData { - denomination: Denom, - amount: U256, - sender: String, - receiver: String, -} -``` - -#### Keepers and readers - -```rust -pub trait ICS20Keeper: ChannelKeeper -+ PortKeeper -+ BankKeeper::AccountId> -+ AccountKeeper::AccountId> -{ - /// The account identifier type. - type AccountId: Into; - - /// Set channel escrow address - fn set_channel_escrow_address(&mut self, port_id: &PortId, channel_id: &ChannelId) -> Result<(), ICS20Error>; - /// Sets a new {trace hash -> denom trace} pair to the store. - fn set_denom_trace(&mut self, denom_trace: DenomTrace) -> Result<(), Ics20Error>; -} - -pub trait ICS20Reader: ChannelReader -+ PortReader -+ AccountReader::AccountId> -+ BankReader::AccountId> -{ - /// The account identifier type. - type AccountId: Into + FromStr; - - /// Returns true iff sending is allowed in the module params - fn is_send_enabled(&self) -> bool; - /// Returns true iff receiving is allowed in the module params - fn is_receive_enabled(&self) -> bool; - /// get_transfer_account returns the ICS20 - transfers AccountId. - fn get_transfer_account(&self) -> AccountId; - /// Sets and returns the escrow account id for a port and channel combination - fn get_channel_escrow_address(&self, port_id: &PortId, channel_id: &ChannelId) -> Result; - /// Returns true iff the store contains a `DenomTrace` entry for the specified `HashedDenom`. - fn has_denom_trace(&self, hashed_denom: HashedDenom) -> bool; - /// Gets the denom trace associated with the specified hash in the store. - fn get_denom_trace(&self, denom_hash: HashedDenom) -> Option; -} - -pub trait BankKeeper { - /// The account identifier type. - type AccountId: Into; - - /// This function should enable sending ibc fungible tokens from one account to another - fn send_coins(&mut self, from: &Self::AccountId, to: &Self::AccountId, amt: Coin) -> Result<(), ICS20Error>; - /// This function to enable minting tokens(vouchers) in a module - fn mint_coins(&mut self, amt: Coin) -> Result<(), ICS20Error>; - /// This function should enable burning of minted tokens or vouchers - fn burn_coins(&mut self, module: &Self::AccountId, amt: Coin) -> Result<(), ICS20Error>; - /// This function should enable transfer of tokens from the ibc module to an account - fn send_coins_from_module_to_account( - &mut self, - module: Self::AccountId, - to: Self::AccountId, - amt: Coin, - ) -> Result<(), Ics20Error>; - /// This function should enable transfer of tokens from an account to the ibc module - fn send_coins_from_account_to_module( - &mut self, - from: Self::AccountId, - module: Self::AccountId, - amt: Coin, - ) -> Result<(), Ics20Error>; -} - -pub trait BankReader { - /// The account identifier type. - type AccountId: Into + FromStr; - - /// Returns true if the specified account is not allowed to receive funds and false otherwise. - fn is_blocked_account(&self, account: &Self::AccountId) -> bool; -} - -pub trait AccountReader { - /// The account identifier type. - type AccountId: Into + FromStr; - - /// This function should return the account of the ibc module - fn get_module_account(&self) -> Self::AccountId; -} - -pub trait Ics20Context: -Ics20Keeper::AccountId> -+ Ics20Reader::AccountId> -{ - type AccountId: Into + FromStr; -} -``` - -## Handling ICS20 Packets - -ICS20 messages are still a subset of channel packets, so they should be handled as such. - -The following handlers are recommended to be implemented in the `ics20_fungible_token_transfer` application in the `ibc` -crate. These handlers will be executed in the module callbacks of any third-party IBC module that is implementing an -ICS20 application on-chain. - -```rust -/// Should be used in the transaction that initiates the ICS20 token transfer -/// Performs all logic related to token transfer and returns a SendTransferPacket type -/// for the calling module to create the actual packet and register it in the ibc module. -pub fn send_transfer(ctx: &Ctx, _msg: MsgTransfer) -> Result - where Ctx: ICS20Context -{ - if !ctx.is_send_enabled() { - return Err(ICS20Error::send_disabled()); - } - - // implementation details, see ICS 20 for reference -} - -/// Handles incoming packets with ICS20 data -/// To be called inside the on_recv_packet callback -pub fn on_recv_packet(ctx: &Ctx, _packet: &Packet, _data: &FungibleTokenPacketData) -> ICS20Acknowledgement - where Ctx: ICS20Context -{ - if !ctx.is_received_enabled() { - return Err(ICS20Error::receive_disabled()); - } - - // implementation details, see ICS 20 for reference -} - -/// on_timeout_packet refunds the sender since the original packet sent was -/// never received and has been timed out. -/// To be called inside the on_timeout_packet callback -pub fn on_timeout_packet(ctx: &Ctx, data: &FungibleTokenPacketData) -> Result<(), ICS20Error> - where Ctx: ICS20Context -{ - refund_packet_token(ctx, data) -} - -/// Responds to the the success or failure of a packet -/// acknowledgement written on the receiving chain. If the acknowledgement -/// was a success then nothing occurs. If the acknowledgement failed, then -/// the sender is refunded their tokens. -/// To be called inside the on_acknowledgement_packet callback -pub fn on_acknowledgement_packet(ctx: &Ctx, ack: ICS20Acknowledgement, data: &FungibleTokenPacketData) -> Result<(), ICS20Error> - where Ctx: ICS20Context -{ - match ack { - ICS20Acknowledgement::Sucess => Ok(()), - _ => refund_packet_token(ctx, data) - } -} - -/// Implements logic for refunding a sender on packet timeout or acknowledgement error -pub fn refund_packet_token(_ctx: &Ctx, _data: &FungibleTokenPacketData) -> Result<(), ICS20Error> - where Ctx: ICS20Context -{ - //... -} -``` - - -## Consequences - -### Positive - -- Provides more clarity on the details of implementing the ICS20 application in the `ibc` crate. -- Helps align closer with the ibc-go implementation[^3]. - -### Negative - -### Neutral - -## References - -[^1]: [ibc-go ADR 001: Coin Source Tracing](https://github.com/cosmos/ibc-go/blob/4271027a5ab1e6faaa2edbc2b9840209c315afab/docs/architecture/adr-001-coin-source-tracing.md) -[^2]: [ICS20 spec](https://github.com/cosmos/ibc/tree/master/spec/app/ics-020-fungible-token-transfer) -[^3]: [ibc-go's transfer module implementation](https://github.com/cosmos/ibc-go/tree/d31f92d9bf709f5550b75db5c70a3b44314d9781/modules/apps/transfer) diff --git a/docs/architecture/adr-009-chain-endpoint-handle-standardization.md b/docs/architecture/adr-009-chain-endpoint-handle-standardization.md deleted file mode 100644 index 9073842ea4..0000000000 --- a/docs/architecture/adr-009-chain-endpoint-handle-standardization.md +++ /dev/null @@ -1,51 +0,0 @@ -# ADR 009: ChainEndpoint and ChainHandle methods standardization - -## Status - -Accepted - The PR has been merged in [#2108](https://github.com/informalsystems/ibc-rs/pull/2108) - -## Changelog -* 2022-04-19: Initial Proposal - -## Context -There is a lot of common methods in the `ChainHandle` and `ChainEndpoint` traits, sometimes with minute differences between one another. This document provides a way to remove the duplication of methods for increased maintainability of the codebase, along with a few suggestions to standardize the method signatures. - -## Decision - -### Query methods parameters -There are currently discrepancies between how methods take their arguments. Some take a `request` object, and others take fine-grained arguments that will be used to build a `request` object in the implementation of the method. For example, `query_consensus_state()` takes arguments that will be used to build a request object, whereas `query_consensus_states()` takes a request object directly. -```rust -fn query_consensus_state( - &self, - client_id: ClientId, - consensus_height: Height, - query_height: Height, -) -> ...; - -fn query_consensus_states( - &self, - request: QueryConsensusStatesRequest, -) -> ...; -``` - -All methods will be refactored to take a request object as argument. - -### Query request objects -Currently, the type for the request objects is the "raw type", coming from the compiled protobuf files. For each such type, we will create a corresponding domain type, following a similar pattern as elsewhere in the codebase. - -This will allow us to modify the domain type as we wish, without requiring a change in the protobuf file (and thus, requiring a change in the communication protocol). A first such change of the domain type we foresee would alter the type to specify a height in queries; however this is out of scope for this particular ADR. - - -## Consequences - -### Positive -+ The protobuf types are not exposed directly, which allows `hermes` to work with future non-tendermint chains -+ Increased readability of the codebase; similar methods have a similar format - -### Negative - - -## References - -* [Option type should be used with non-zero Height #1009](https://github.com/informalsystems/ibc-rs/issues/1009) - + The new domain types proposed here, as well as the reduced deduplication of methods, will make fixing this issue easier diff --git a/docs/architecture/adr-template.md b/docs/architecture/adr-template.md deleted file mode 100644 index 28a5ecfbbc..0000000000 --- a/docs/architecture/adr-template.md +++ /dev/null @@ -1,36 +0,0 @@ -# ADR {ADR-NUMBER}: {TITLE} - -## Changelog -* {date}: {changelog} - -## Context - -> This section contains all the context one needs to understand the current state, and why there is a problem. It should be as succinct as possible and introduce the high level idea behind the solution. -## Decision - -> This section explains all of the details of the proposed solution, including implementation details. -It should also describe affects / corollary items that may need to be changed as a part of this. -If the proposed change will be large, please also indicate a way to do the change to maximize ease of review. -(e.g. the optimal split of things to do between separate PR's) - -## Status - -> A decision may be "proposed" if it hasn't been agreed upon yet, or "accepted" once it is agreed upon. If a later ADR changes or reverses a decision, it may be marked as "deprecated" or "superseded" with a reference to its replacement. - -{Deprecated|Proposed|Accepted} - -## Consequences - -> This section describes the consequences, after applying the decision. All consequences should be summarized here, not just the "positive" ones. - -### Positive - -### Negative - -### Neutral - -## References - -> Are there any relevant PR comments, issues that led up to this, or articles referrenced for why we made the given design choice? If so link them here! - -* {reference link} diff --git a/docs/architecture/architecture.md b/docs/architecture/architecture.md deleted file mode 100644 index ea549cfd8f..0000000000 --- a/docs/architecture/architecture.md +++ /dev/null @@ -1,130 +0,0 @@ -# Architecture - -This document describes the architecture of `ibc-rs`. If you're looking for a high-level overview of the code base, you've come to the right place! - -## Terms - -Some important terms and acronyms that are commonly used include: - - * **IBC**: Refers to the **I**nter**B**lockchain **C**ommunication protocol, a distributed protocol that allows different sovereign blockchains to communicate with one another. The protocol has both on-chain and off-chain components. - * **ICS**: Refers to **I**nter**C**hain **S**tandards, which are stadardization documents that capture the specifications of the IBC protocol across multiple documents. For example, ICS02 captures the client abstraction of the IBC protocol. - * **IBC module**: Refers to a piece of on-chain logic on an IBC-enabled chain. - * **Relayer**: Refers to an off-chain process that is responsible for relaying packets between chains. - * **Hermes**: Refers to the `ibc-rs` crate's particular relayer implementation. - -## Bird's Eye View - -![][layout-image] - -At its highest level, `ibc-rs` implements the InterBlockchain Communication protocol which is captured in [specifications in a separate repository][ibc-specs]. `ibc-rs` exposes modules that implement the specified protocol logic. The IBC protocol can be understood as having two separate components: on-chain and off-chain logic. The relayer, which is the main off-chain component, is a standalone process, of which Hermes is an implementation. On-chain components can be thought of as modules or smart contracts that run as part of a chain. The main on-chain components deal with the abstractions of clients, connections, and channels. - -## Code Map - -This section talks briefly about the various directories and modules in `ibc-rs`. - -### `modules`/`ibc` - -> Note: While the name of the directory is `modules`, the name of the crate is `ibc`. - -This crate contains the main data structures and on-chain logic of the IBC protocol; the fundamental pieces. There is the conceptual notion of 'handlers', which are pieces of code that each handle a particular type of message. The most notable handlers are the [client][ibc-client], [connection][ibc-connection], and [channel][ibc-channel] handlers. - -> Note: The naming of directories in the `ibc` crate follow a slightly different convention compared to the other crates in `ibc-rs`. This is because this crate implements the [ICS standards][ics-standards]. Modules in the `ibc` crate that implement a piece of the ICS standard are prefixed with the standard's designation. For example, the `modules/src/ics02_client` implements [ICS 02][ics02], which specifies the Client abstraction. These prefixes may be removed in the future. - -#### Core - -Consists of the designs and logic pertaining to the transport, authentication, and ordering layers of the IBC protocol, the fundamental pieces. - -##### ICS 02 - Client - -Clients encapsulate all of the verification methods of another IBC-enabled chain in order to ensure that the other chain adheres to the IBC protocol and does not exhibit misbehaviour. Clients "track" the metadata of the other chain's blocks, and each chain has a client for every other chain that it communicates with. - -##### ICS 03 - Connection - -Connections associate a chain with another chain by connecting a client on the local chain with a client on the remote chain. This association is pair-wise unique and is established between two chains following a 4-step handshake process. - -##### ICS 04 - Channel - -Channels are an abstraction layer that facilitate communication between applications and the chains those applications are built upon. One important function that channels can fulfill is guaranteeing that data packets sent between an application and its chain are well-ordered. - -##### ICS 05 - Port - -The port standard specifies an allocation scheme by which modules can bind to uniquely-named ports allocated by the IBC handler in order to facilitate module-to-module traffic. These ports are used to open channels and can be transferred or released by the module which originally bound them. - -##### ICS 23 - Commitment - -Commitments (sometimes called _vector commitments_) define an efficient cryptographic construction to prove inclusion or non-inclusion of values in at particular paths in state. This scheme provides a guarantee of a particular state transition that has occurred on one chain which can be verified on another chain. - -#### Applications - -Consists of various packet encoding and processing semantics which underpin the various types of transactions that users can perform on any IBC-compliant chain. - -##### ICS 20 - Fungible Token Transfer - -Specifies the packet data structure, state machine handling logic, and encoding details used for transferring fungible tokens between IBC chains. This process preserves asset fungibility and ownership while limiting the impact of Byzantine faults. - -#### Clients - -Consists of implementations of client verification algorithms (following the base client interface that is defined in `Core`) for specific types of chains. A chain uses these verification algorithms to verify the state of a remote chain. - -##### ICS 07 - Tendermint - -The Tendermint client implements a client verification algorithm for blockchains which use the Tendermint consensus algorithm. This enables state machines of various sorts replicated using the Tendermint consensus algorithm to interface with other replicated state machines or solo machines over IBC. - -#### Relayer - -Contains utilities for testing the `ibc` crate against the Hermes IBC relayer. It acts as scaffolding for gluing the `ibc` crate with Hermes for testing purposes. - -##### ICS 18 - Relayer - -Relayer algorithms are the "physical" connection layer of IBC — off-chain processes responsible for relaying data between two chains running the IBC protocol by scanning the state of each chain, constructing appropriate datagrams, and executing them on the opposite chain as allowed by the protocol. - -### `relayer` - -This crate provides the logic for relaying datagrams between chains. The process of relaying packets is an off-chain process that is kicked off by submitting transactions to read from or write to an IBC-enabled chain's state. More broadly, a relayer enables a chain to ascertain another chain's state by accessing its clients, connections, channels, or anything that is IBC-related. - -### `relayer-cli` - -A CLI wrapper around the `relayer` crate for running and issuing commands to a chain via a relayer. This crate exposes the Hermes binary. - -### `relayer-rest` - -An add-on to the CLI mainly for exposing some internal runtime details of Hermes for debugging and observability reasons. - -### `proto` - -Depends on the `proto-compiler` crate's generated proto files. - -Consists of protobuf-generated Rust types which are necessary for interacting with the Cosmos SDK. Also contains client and server methods that the relayer library includes for accessing the gRPC calls of a chain. - -### `proto-compiler` - -CLI tool to automate the compilation of proto buffers, which allows Hermes developers to go from a type specified in proto files to generate client gRPC code or server gRPC code. - -### `telemetry` - -Used by Hermes to gather telemetry data and expose it via a Prometheus endpoint. - -## Cross-Cutting Concerns - -### Testing - -Most of the components in the `ibc` crate (i.e. the `modules` directory) have basic unit testing coverage. These unit tests make use of mocked up chain components in order to ensure that message payloads are being sent and received as expected. - -We also run end-to-end tests to more thoroughly test IBC modules in a more heterogenous fashion. - -### Error Handling - -Most errors occur within the relayer as a result of either I/O operations or user misconfiguration. I/O-related errors can be sub-categorized into web socket errors and chain RPC errors. The latter occur when full nodes are out of sync with the rest of the network, which result in transactions that are based off of conflicting chain states. Such errors are usually either resolved by retrying the transaction, or might require operator intervention in order to flush the transaction from the mempool in conjunction with restarting the full node. - -The [flex-error][flex-error] library is the main tool used to handle errors in the code. This [demo][flex-error-demo] showcases some of the main patterns of how `flex-error` is used. For a more real-world example, [this][relayer-errors] file defines all of the possible errors for the relayer. - -[flex-error]: https://github.com/informalsystems/flex-error -[flex-error-demo]: https://github.com/informalsystems/flex-error/blob/master/flex-error-demo-full/src/main.rs -[ibc-specs]: https://github.com/cosmos/ibc#interchain-standards -[ics-standards]: https://github.com/cosmos/ibc#standardisation -[ibc-client]: https://github.com/informalsystems/ibc-rs/tree/master/modules/src/core/ics02_client -[ibc-connection]: https://github.com/informalsystems/ibc-rs/tree/master/modules/src/core/ics03_connection -[ibc-channel]: https://github.com/informalsystems/ibc-rs/tree/master/modules/src/core/ics04_channel -[ics02]: https://github.com/cosmos/ibc/blob/master/spec/core/ics-002-client-semantics/README.md -[layout-image]: assets/ibc-rs-layout.png -[relayer-errors]: https://github.com/informalsystems/ibc-rs/blob/master/relayer/src/error.rs diff --git a/docs/architecture/assets/IBC_client_heights.jpeg b/docs/architecture/assets/IBC_client_heights.jpeg deleted file mode 100644 index b24e4d997d5c45d21bbeaaad03f8ebb2466e3edd..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 143106 zcmeFZcU+UpwlIucL3ATVlwzS9ngRi-wuBxqk`Ovuia=vyb zK)`^aQiTX4^di!`AiciqbMJfhKKDCkpM8J#{oX&m-}mH?teIKQDwA0=Yi6y*(RbM-Cr4dgREFW5e7EPPR1Uh1N#teCK{wCZKqD_513l`lxCXE}2@f|+Jf9dLx(>Gy&GotsND0;_c35Y4F`X&|@ zgH&!o?jp?W9nepU-nX-liYvo0K5_90C9Q1|w{=XP;j?Qb{XPtR&sjKoP1o@kDKIiD zcaM|co&^6W#(!`+yw|40wSWPS5Hz zG+qr4urHz)jxn`M*IeAHV zLaj~>NNV*K(L)AAVrrtbc zK(mg)84JpEjUZ}(?y_if7Jh7+2jvT5P+vO}dxbI=WpZXOngF4QB3%qwo%7;P_diMQb46%Jp3?l$OaF zC^Iz=*n?)}$S=Z+yp2`L@{g_0cgZ{*hDy%^0eFYHKTYlYZ>8w zI*uM^8T-aW92Ki>EPNK)Rr3R~ar*eGHU}gjXf6NKgk?@qZ)T|_t|2KyUy$+)V?HLj z4#bNUCbS%r8Aly+^-N^kh6tC+OcfT8gRz~aKnjMz2T&6&klo3u6V5KRS>}zK_hfW~ z5Twx94DLE}Yf=fkmBEV)mB;+wcpxtWnu+~7qflw0g5I-U$aXv0Unuzc$3{uxvp|M2s6fPF|~F>{dgM@7(0W zN>f*@E6M6Iw-f20Qyg(av3NBd$?JjT(+S2A370g4ESv2}G~B>gN-+FO?`woLVWoCJ zEWQb^u*>!NZpxYDlF4XDXm*F7&a975ybLw2O+58J1c`34j%Wr@zLCO4Gj8}a)w#pk zRy;g89n+Zmz6i^^hK91`U#s2vM*1RHBa&*8u==+ZXFLSRG|R$71g7hLX)hu_=Z%6pI&yp@t$DSbs2Kn2Xoh?PhC+kOCPEYl_MaD@IRup%zqU@ZfH;-m4EG z?#!-)TbVe$ev&^_s{XJms#XOL;y-&J_|L6ii_-1;%MF`ZK-<*RMj(n{9358F@m;Z* z)XaZ5BVa6)d`-%sKmCif@@Q)X1*!%VffY%awO6QS10ojD;Ut!3`^O_r{GT8_9xI)B zGE;8K38$yU?36g>bwNA{BN6G1OUBl%O007d*|Sz~TNlrYUrf3@=Gs^47~=E3+&vLE zcHbyUbch@cN8|C!HBKh!sdsFB!Zv*~H|6inRI!&2$&` zyU3nUzdCtD3>c06_h_BgL`z=a_F}(IcMB-RFj2~Do zZ8>gacKdZnB5yI&M6b~Ll&j0-oVemju_8(`*_r&~I|KnnOu4Q6;>LjcmBbI`3s6>4 z&10n}UHmD0Tbh{OR{!k80Q@jdGe0Vf! zva3EkoYG8UrQj%Z>8~@-c^&I7Su$XSDk+UbPG)GQD(lz_jj>yW-Jqn|To8Sw+owIz z=Z!V4!9Gb+HsK3Mweoq9kF7~BC!p*bg1FQVPtPtcwuz2w(DnCiov0ePG(@-T3^$?ZJpb#*mrw?Lyjk;3RIwi?TS4F`(Za$Ms?2it_Q(3V*^bNeDW-tOzm?xm zzw!^1{-=c#mwKJ%o)+JAWy;yW>*coYcl3$-J?L7bf=7p+#V6xResm|{OJFN>;!J5^ z*LX=kvI<5~?8pgEO{=BDt~RjNbi(j2id8s%F9O*;v2v?=sP-57hT@t!wQRT!C3Z5p zz5|c`@qn%M>XRhljh*savuW_k`rLt>$A3)~3^RmE!`UHR zHJpya3!cs+-V-fejjIV&scZtVudKBK$KRa@-)B_VhmxAzzpArs!s~vg5RgP@pY@e$ z7%Qlwcd|b6yaGaJrF1Gr!osEgGi>fb&R#q8r(NxCO zzF$&Er*-953}D}je_wr6np;C^95HYPQRwC1F)}G^v2kA;cPOLH-^NDwkjUYjj8XZ;cN&^eYrbIT-Ap;r~-$L5WpbAWcLfoB9PgkVujMC#vbW_7G#n!;EMg2%?2w zI*s$-)#YWsx9t_rpZU*WL-FtL9B8GoL51 zJW)BPC!?DdGe*Z~{!C=%E|W-QnW%00h;>?ooc9Tn1^In&~+qC0#w4`>SKKl zjDUS~e>`s3g`Az2pM-VIHd7>ANIq3C6+d8Ba{qC5Ymr%poD;NV(vnCAa12v~Bfh31 z&yBt1AiPIjH95@6=uK(FdvsK?F^EbHQ6H0NyJrZSN?iDy9#b=_;$?}*)m-4WoVWcc zCNMGo9B9O%n$Q^WqqP*P^BvC}HQR8`a>Xxn8$*4O&MJ$nCvh-3mSZnEGlq@G1gO9M zYc~8Fmar6^H0@ex*ZRZCRS0QW-}A^cY z07?&vz=@*}Bc}^9_+75mF=DtKedTc#3TorRRxwNz5nkzPJvVT6Jji7fEzbu4sKu6f zw$M9h5hd!W=659ugTRB}(pd!m(3&5)O{h6AWFDsdV+t zH*T=IH@&j*@VU`qd3n)=;Exj>^eYidmx}(na{m?+ut^^nh5BdFd*zns7oKTJy!sU}$^JB7Y71qt#bOE~&=MCSSaE>;7a)nNSEsC4|sxR{lV z5e-ozYS2gCmI7q?-u27wdHQ8;-W3)5b8lxvVWs!Nd;gpnw_B}dw-F}JL6T2(8iXqQ znqGj=kzxM0Ge9{xmogOh=CTN|4pZRSJ?irk9(whq6Y^NA`bA+W-P<|IZ-TZGKbKKk zPx&B?qK3sQ1)fH;=$!;j|DgKH&nVWMbNig_OD417rL1x07ieDzy+*Cz^vn!ivY#CL zz1q)Wje%?1b0j1!DyDcDX8? zp*agRUAeFQMuJS;`lv*3OCXuXKmNXL`S8mkR3t*HXkKp$Yv4~`xGE8(LIznzCXs=Q z2+;ERr|p)d1n$WGLb}ooj)Ta@V7D^08VW!$Wt`O<{BCRs;8S6;nO&gp)}Y7Sv&1DK z@f59tVeQdov(nnpHB&Q=StbpKpX_#b?Qj`oXGGWgJGbS@e zB0mpQPcW~Oh(BjnT7DzNq+jKxq;=N5s^y!7`>CT_1cd9iuXiWKwJkT(=smc`4$*Aq zIAs0U_;o2XS#-!d!RRZu$ID%=o|mROyv}2jjapgDyfa9@eFj-5^1T)_ z*DyF)l8Z}*v!~2m5oIW1+_}7O41W+GF``VvG+wAx^SED#$h|jTI=_7R0xF@;W2W%2 zbBRSV@!Z%L#0aTLgzGD-y)%;KtR50)vjn2`V7pzW9ErhzW1>YO_Uh_)p$!gCh~yU&N0T6aGn6c$QFPpo)L#?Cs# zqOsAKMC~}gJOc|d9jcOxOSJLJjkxzbR3_dHA&gDM&;zvkO+TJ+a4K!>sjkqOhTMdT zXo;`%x0%)ns(_jt5KvMRHz_k6zR-2rXO_f)nnCJf3d@ZoSIT-#()nZADY@=591}J* z7hb7<@=*JNP1t?Dt7X@|n;g)IFYZ^kP3k9I_J{y*9D~#DH%s!dQ1qr^zX6xg?+g3c ze`2WzB3HV_l{Y>VPvOEGF2*qDl61F{=;BY1GNaGz@;~+P0(i$usQ^O_F=9@fTw{x| zLn|#PR^YwqYD5xsHZC_(3iCQ4aCSgV{>q*IH@@q_^-MZd@&qd6U%b z^t{(+R>sz>6ZReuk@JRFAuCg96{4^O9@+OoULfq)-{E;JM1$rS!HhF1GbFLtaQ3;;xyEU{_CvMkPeF z7(ItlP=Mi-*OxVemdFfbd%1h;jI+Jz+iuI;l*JvC%F)Vo3@C zMES#C9ybnXbij{i1dhMDkm0$i+K#eiZDK`Or@&fIJ5E0`@`o~WE@cUs0b8c#6+TAc(BOd%BJ^B zk#Ba`#IoC(rtkyKHB#bzHIEkW81wi22FsZ`lbeUy@T0D!b@~XHkbP_`vlkH&C90%h z)v7Lf=BGayUjMb}rz9b!(_mZrT)S2!qfa8|MRewTcz1NMs+@^ax3$CNCh`-N|N74V zpuE4@V)a9DD0jiu?4_S&mNSDjRKJpUxmw!JaETW9_j+cOHaw{4&^0t_u^M>WS9O$a z?1@-3c8&%SU3?K(o==sL{mw|SYt){3r$$a=@C0ENdPu_hSAA3H=@vqKBzHaApLY%3 zmoia~7@wE!e-Sb%ccCux2kQVHu)(SlN&Sv^H15i?&n|y+sJr?Qa>wsip8u9Mm$a;N zmOpfa%5JZg2VTEFGW2~d&1m>sf2Q#8`GZ}5k<_g~hW_-6{4I;k`3Fm(%?%%h7Oth# zDphK$)oLF5OQ)B=Gdi3u=PQ9`^#i{WzZy(Peyx8ww6~G2bI6XroUnJ!@KX}6ntq^wBo_}$F3t+e2bL$;)T6@jU z>pfzZD?LY+>n7^xvUu#xshO2}M_q+;Zo{z?q@glYo;K5<+Ia(sbmdkT(t?EGf|Pkg znPOOD^|`*04~c$Txh5xGK)ATKod-`euK}xnl!QGl=m_~fGx(IU%cY@r=<$C;=tG#c zvX@0C@fuGz_*+B9tG?Uq@O|q2FqmE~meJ>2m*t?mxh(Z)$v^Oy>Idc9z2Rxb=l$k; zJ3RxO&$7fG{53}0xtiY?p zXnUDNL<{O3UtsEJkonG2(@;R^E>~5}fle9cHR_KAfY-9v@1Vr9eYVSu-8(YDTM(;l z{gXd;f1{XB zGhs#M_PzYP7`#f8f1+@75Ud^>VMIz(z@_&8Fb{1t-{lf++D>pWVb0;U0EX*?q@Sd# z!r7ta8?1>)WXRH^?3F;2@r9ESA6qmC{?2vR>Ygb5E;rP9dR7}(lWC}HUfeH<1X=K? zsz`yzqq>!NdA7x;(yBde`X5$7i#{k`p7#|kOXbe zuOrD^UimcVP3fTZN}VZ>BT6H}AO(yDI&;6>o(f!6FbEx!t)Ag~)QCZO0NT^L!>038 zqNpIiPwl3LFwK1IQCvZsE<>;b#19-rjD$z9L^JS$I2ZE8#JKk|u!Z_MG29B-Nf?`! z2v8fL1SaP`!kn>NIa)n>K6lct?^E*jQ-yVdAP-H2_n9(Jmh$!xuKy>(6|4LXimuC< zc^xZhVtHvrix&7 z#FeQ%oR=z>`-7@<_uLzj42;55V#P^8Zf)N8XiX}QTb2+{K8WiHlYO@14zAK5}H?AK5)x0Q?;>5+DHM_d6@c#ZCg zM?vg2_31E#5{qDDDXAjK>_lgW(DR>=t45+9WLhS;l?^nNl$i93XJ&@CUA8g`z7qBM zb|s(P&UcUNPbqt17{vY#I}~kB80Y1pxXxF;`rqRJo9}(LUowOGL2JO|A5d8dl}{#= znTpVE)Dkq#b5tLoD$v$e7FsLj?@%eP6g3RgTW!9#331rMD6HI(y-410hHcz#9f)aX zuI`P1fwcID4=w$hL8X4Pl3l?3TY+7! zXWo9_SI52T$IH$wOqe%oZ|e_kiwwM;?~}jo_f3lbMSA7Ra&Lx3tx|p8zv-YqHvidJ ze8#qX{t{}{i?nQvun~_qetM}jKaK^&ldnb_lZla?$Lc# zlVc|&+2|91vhhgKRkg|{lxSjoN*4_!3hIK4vZil&{_gRX_29^rkW!Gb;uH$xmBAm1 z9Gi@5`1^k6;+a48Az!ATwME(O^r&?&KEB@&oe)?ZmLS1 zeDU#{^Vyb}wehpxB^&>pm;Cqc5uT?Pd=I&>7j9m%i@V+FLXIGo$e>fKdJ+8Vz$3Sg z@V8qU3{vO=70;I@$h|KVcw?|Ub~T8-}Az$<(rl;IdyS>^fDobM)ga3JH76a$X0FEMVRP? zPa`CSObjAlc%dm>*kxTQ%>wuW_&^@A^QL<#2g%WJ(o#0TAswzcz!1m_isvV$@HG5W z8UTTP*ee4t<))&Y)Pfry)L)}j* zZP^msG#zSU5^9OX$_TkS;B)l{g~WWKkZL=P__?M4`QW$l`@Vp*CVl2!F>FtrP;Y%V zS#GdhxiFL_ul);(V_qf%6+st2r(aR@NNMSBhSUZJrWs`54CGXMn#DPBX6lFZQk4q^ zmK_}6_G$z2I(X3n+~vYY?aCWoMV20RhA`x`oSc?;|1K9(k6f89DEqMG+Cx9S@>)Kw z8HLJL-C%4>Q9(#j`m$U!U+7pTh)k{$?#r~`4eDSgQ9ih{69AoVSotT1`#u?BWyn-ifnv87K?>k zEL-me4xbnT@rjyfRm)uXNrEc4fvebP0yK7d7u$&2SDkfx#dcqtie-|CGv_@js)mDK zv+r8Te#Vt@e3AwM8dIezJ`OnSGPD%xgE-C%w%(PNwuSAIGA8G)YWf%+LR%$ELpOfXiGr2VMg%JMP zIh9|mBxCQA84^Cnb;xsit$?P?<4)S~(ejbGk%i{IvK&xMReSF_Q$x$f6v2{N9eiOF z7_Ng?I{KDTpq;}UHtwd&zisvPw{lmin)b**l_1dKG43!*pu+Rf{mVh@X)m@}Dle~8 zNO`B0?b=%9sf@#0q_lEdi;;*X^r)gvUnSTHR8tk#X@1 zPJ(GgC|7aI77o4Hr>dRQ5f{VT?_jS(LrXDi!n9V5qin7}^xqCD@Qs8GuQcz3Ad?2% zKJ(&et(S#zJu2|n#AP%6F&(o;GBqxmT#0D8FW}dSMvHE?OIv5h#?^M89l;2PQ!z+s zA`@`JqkH?bs*ohJ%z*7Hyt?`9Xy0Pj7#+ahptfV$dB2q6N1pj$0v~X>NJ#?vw@U#S z#zNvjg0Ch>&tHq2*Z&p;aE2L#hgANYOO`KTHj@cyjD( z$_%^rj&c{de)?%E=6#WcMh^6Y?N`-hKZ0G7dkcD;7^9M6OaWHn>+{i8Kq0k4yxep@j?-kIma zqW(T-|65?6ZO8T334>uC)$?zX_-{?R<-WaBfu$GtoCvzG!m#6y{T!%8jvvz@-voqw%zF zMpFDva=C78o@aktkg5F4Xh_aT`N;mE?$rAXYn1y&%(QbJgK1|4HCQs!TefC2vsBcO zPl-r@XZwC}Qg+>wlXVk?hXt9eBBbhgum)Axafy=fo*m%V6=cO04prwzZ?XI8i z*yXZzDx0KulnxJmoo_>^NZsn>SObB6g^e7vsNSlmlX>58X3=BgfBUQaMT96u*Q`Kp zWLs&4;LeGMAyesbGc8JGXnCb)Etyx{`+_LxAd4%Hwc zr)-RK&MgJ?)Z!`0M>%`J#HBH5pA0OcmqZ~hapDXtU!^K5Sh>40V!&oJ0FjgcpZ=Ul z%X&KMNqU_h839G`XW^95gfTUMaEcMo2@r_U?wxR*EuFFyG+*;!7b>1jVIbWE!=sG~ zKsBPi`xV$8mo^g%cjO~?Ozry#`DWM`aLbvFsdH%@hn(D8M`JTGeinfe9hQ!KiRS-* z>VHV1L`2^%&%Gp?t|(0?`&j3BW5Gu2r{TwNO+YYSW7@OmV_ z4lRsYC(mu97RebzqO&|~2wAS=alE}Dz(?kp?p4f;XMY+Y)L@BqSq=lL{Q$*FF<4q- z1fB{#W@Gd@aZ=%f4xJclnwsy}5i=6$DK^QH9FcPTr$RgNIJLi>9gr!le1||rfRaLl z>F0*mDdB4)(e-0YatZ`uOeCWF?Rpe)ls)F6eLUaf4S#9AAc*fSl3x4-W%jgnlchzXgv+4Qf{h*?H}l*; z&;rw&-p_S!TY$T|#T9G&8@q*s5{Vq6;kwH|qHhzu*q|~s=MG$GvxLmRl!j4+a6yK$ zr`0D+S`4bPfz;G|V+w2&viP+7LD?NnOCj4@+{fp$j+#G%lxT#%rxuV9*!&1hYOT0S zW|mc#3!PULwMBy~wY^Pdz-))hM2iJ{vtvxf33A&)nz0(vRz8>lR~7oUUXCaF{!7 zBToXN4fjj-_jjTrD?BG91_m(($CA>o{n|oY*@P`OS$9eTG}rfaT8|ZC$AKVX3S!SN zeqsj_8m#VF4a4<^PW5&*Qp6E?L4krbOuC1N+a8e=s0x`8LP(6CVhUodlDv|B| zpci_&`f@lWeNdtyaVkcIE>{%e;hR;`)?O%gdWJ}x!+x$E#VYH?2Cs}FGwX-~8uxRG z?Jg2=?U>vG_hTuiaNyYH=_tC|kD6hYa8!~T=WrJN6Nb)~fXFg>mQeJ)I6eeYgU90+ z@4gHwZ8P7rYC{TnDEkyyZ*t_&;6Cr3*z8y2h^9CKrjrnOo|<^Y)wn}pr3!f4t)NV0 ziE0;noVX*5jTovNRrq|w;E;zeJ(7Ej4cCikwa2G#R!7n!D~H(Y)1PufA)q|&?qS~$*_5H4f)?-;1(lxJf|9l|1=4NKr&ZNwoXvAU3?9(}XkXi3%iWpZsf)fJ zSuIl?A|YftIh<(`TBV;}p*?i|`aF3u{<=!H@S$J#mN67k(uyF4dMKVOnO$`amDiD% zP|)Eyf(vbA1r;a)L*VZ$Zd$uO~X1<)* z+^)9hlcpt<1u5BpNABW*i@-5eY0&Jaj0 zKNc{Tv;jvD3v$Ew6yNIO`a*y@EUHhgB1MYb@o*sBa%!SAG`d^K)YV2q_gU^6%da(a z#sFcDn)St720+V)eAli(1f`%+;H}3u%d96;w1)!nNZ*ZTQ;L7MLs;(?D+az$R?2BV z0v_aZyddj|qBYwR3HyxS_L72bzR}l@jIPpijtNAiOr-M!-Ou{iZZ-`qa0n7uu$Mxwe(^wTcgbv9o~4_5vsS1t zKG*Z|>E1~zm@w5GH-d0WyfVg^gTw8Ffk6JKnToKUGu6iMKA#RyN&NyJGzL@!qQ#5lkJ(@{>yPteC|iC*{1rrWGKwyEMYs}Qhh zJH3QAnqfbRn22dgBRy*gz>F}&DJ76*T*i1{`$lX>vdvv_ssk}a+f4a`^(QoeQyYk1 zMl34)d?Ltmrd9*v#g?mtf^f^Ha+caYrC2G_`PvrzF$kNM+CC@K_XpF{I?P-Rcx*nv z8nkLSniwm2SZk?jJ4G;jFH@{(&qqd+GHi8kalt**(YsuL9$dvHnNVQh(pNt@N!rXe z!Nl^}iGrG#0cgnHVtq?Rij4C z{quh6fkG|#y-Yfu1Vld3X+JpCB5Y_@ca0X3zqWJ{1U{W)`(>ou6xHDK$)+|@#8d(y zBc4mq3A#*Y?YS&$_$ceWcbN6EgDoL0qevbA)ikZGCMJfDY@9xE{q+)6YvVmF30vWd zuY%|QWK9-BYWtw%C%tFpAVGue!iyP)=3x^Ywnzpoetzqq`NbE^2mT?=pZ)wijVm?R zoj=k+R0qC%6o~i=46YG9|Bh(06aul$FP)Nc>TO*vDcNodVNWWdF80NAhoL>J=f+tD z#_}fIM?@1ts-$ZEGz3J^P_$&qkljOH=>!H&OkjiTOSa9gx;2DX{d(P#fy6ba+M1(3WOw@Sme@*p4;wG|5*y zCk!Jo5%$RZ;AIj9HUbb(UV@xOv23I$rU#qWdEr)W*S?n6Dv}=+hJaTO2okQ{ zbDVBXd_cGCP)J%KlLKgXHXqEa%rrGO78_0Vq>2k5WfOf!M)9iQ^NC`LBVD5l@bS#^ z;mM52%Tup9*f&pqc8i_pJ{to@`@mT*-*z9!>_2DNUEu6dYPm*NbM?&BM#Top_2W~Q^>h&h^VCB}y2k%JN&M~(wmPV(3AxfK+(yI0Jr7#lHAja$ zyLTMb^sJG_Cy=M~YMNJsyv$)HcF1{B+^7*n?;+yyc@FqMQNs9DRW*P>HttZtUBASf zC31sE%{)KZBEiz~QE5}Agw9stj$)S6>-?A@4+1qpI2Qh~;Z=+gP%IXpG(Q%4gXw2V zbq9{K&B?$^kQzhXh#u1T^HG?#&+Vn>iEj|>Ng>(A!Gb$gmS@F%JmOSx0vwPisgrS% z-mFad0NbB!pI9&J94T{84w_E=7J*45SDzsRgHQjMf{;Qdk-@Q=nZfy^yIkZ$34qdx zR58<@mXM!q*(z{3_dcTYM4m$?&CT7CYWzB1s$`zE~@luD#sTcWje-kJ6(S(S=vb zC!gbg=H77Ymt1v^gjo^%6F|JNFwO6Mi&}xxd7RKr+eyN{~ELtK#xJ^9FP&0VgyD-PQr zqpg!Xr0L=nrr4v{>LTNrzAX_&1-8WvKn`0qSzahxeimVCuz^zsuhJwD8wtykAvfk= zlm4J!Gt>kwbKPG)^ib43E8jXZwb0HyIjYHdPA01{(jo(WN|4hs!dNzm#qTxGX#zayxK=Cm; z#7aAtIpuM@OheZ1;qcpPc4ymPzt6p zl7)s4E#B6+j$4!2{=DIzWm;iPNuvQ$cHV6_ep0W&5=OHX;HSN1=^$lO$Y~d1YT6i3 z)G0s;v>@#-2YBe2*(v~ZgKE``|D59vQX#)6JmpuRLT}ckStXI3l$ywS%3#|PF8t)0 zJP5f0zYh6g?(w~b@-enhJ$JD{R_S@U8%)&+iCe7Y4vW-7X+UVxKSM#s%XpqG=JY%d zeAUNsSd5S#@Is0z)Pun+kgc;Tm-oYx3~J7o4evsWuCny2d+P!>yHLoAo6%dUCeYm| z6QIG<5a0ueBWH6qQf(%!nr+jToT|E&$z%Fv4dT!lBWC@8UJ4i`>J&)Eivn(DOgEtQSb<&tGypJU=DADQ>XIdrqI@_cH`2yj1J5WVR$(_iB1Sk~4zO2cL? z`EBPry6eLN7zn-=9lVn%dFgf@m)fe^m4@?~blX>-Vv`K%pxXO*Ai1^t`PSw1U?IS+5pY-xY9FWr$M^d7poGWN)e=_6Dh1?85R?1kMpeLHsaxKl+dhd zi{ATL#h-vM(+a;P*&+kB+ulX2A;Ivpiif`6SKW5G2tl52ttYq1R#38r<4cpc$#f|I zOaMUm5>9dq~7>Zp$15N3g1>|87>adRYb&YHnO|3CuQ zw%!^2KGV|z%@$T}F6ln7yA`PLc6ESfm#gMl<-?#KWh0p0sI?{8`Wo8LXK9_F!c*g#TQBd&X6c;lEBw55s_;++tP08y!wCy5jUKm@uUVyb_KIGR#4bV>q6)+&eeC-%G=ZRlpn?!yrX z+&~L5S;Fzktxs|K=gZv$BN7pI2f5e-6!f3D#;qHC%5dS%Zb-XG|Z=)Ksq&t!6 zo6OkCtaP`@l<8hKpKkh0ZeK~v%P+M&>nr~{*XbM3rPoeKyKAf^+E`S^9s;)Mo@}Z= zmhi%&HZnB~>-!y?g_C_WRk_Pmm$JYSY}AI9nvWYZmZDk}nqzpX(h^ZE1IltThEh%IK!E);L>GwDEF7g%riDpW4}JKyLG0zRzXWPg5Roq2b1Qe87!Vut~(#@mY{s5DVs(E};=y zhze3lqdY?)brbE-p0YaUetzx~#jeoqamuJN(AJfai{-ZN@QB*7_R7*`z*5 zXZmccs-Zp6HZpwd#qeV0QpnHdPb@8)?|&S3C^Fo#dQ06%N?R&Rzby!C$W7a^FrK&J zuR7l`@U!`QOQ<0lwOy%eXCBG1+(6=`7M&3rKQ)^WdtnHGW>V@WjS;k}Pl>0QsusYy z4=+`%`S!u#{-kv;wz9}hw|cY5$6zZ?j8_S#h9Dfyywgi2=Vj%_k;yJ!pPrE!N&3y$ z@(>8y@Vgp9+n08^hNr64hwV!IHcakJ^lQO8j@5950W3?j-y1*qnh453d zv(8}k`-NWlZnmRtk|CPZh{kkp(K4?NbI;^Wr*!EvMS0FSz`a9OGrDb@!7f)^3fNk7 zn7pV0oGtO^aZH*byX0mkX$uWuLJ~c%Mm-Ex7w#;<9rc-w2g7=#b7B($pC(==J#3_f zs#45f2GSJY#>+fg|Mf)516p3G6`4ZLF`p#igWIp8XdNIxi?tMJB=SoPnPZ04lI_~DnsQh2Fg5k9=aDt_ixl?k3Q*P+*R47Bet95gtAWSg+U@n9~VIfJ(V9%as`z zL=K}wOhn+k(XBbst-ON0i0G}G?)h^Fv;@ZLI`8?yvS{@q8WG!p3eDMkOvHX*ko^y71e{TH=JBx}NMuyj+N~`yuq=BN|A7!4T?s zv37e~-|@walb=Y_9e-e}WCG)bDkFX}X!tpixHjSt+Z=>YjDZ*%VAB>|1p@`C#e|A&yy{~@H)ze^JS zrzHP%<=(QGVguS+7ONH}HS$*#816$0Dw`t6YrUOGm{uM>o3@@5Uj;%nQ=>XtzPLAZ zx;o-R@9dJ1v%m4fI>i2u|BUj!3kW-LxhJl!lC*CvzcO6=K7&)>m_8aB?I|F=_umTd zgr)=j9UFCEitA^xR_ zE}CcF6CK8E@{?ac4fzQ;@ZaVE>vALV5MLlhMs-^# zw^FCXv=iQz!~AyG5NgN$U`$6%k<&;(Tw(N#z{lGq17|Jpo_8<>J&)E~&?5mlE9Qzb)N}ZfjAgJJ+?Q20kv!XOjCp zqdnOt13^Wmg%6_|hg6Gkc`Yf%7SxBRj;7YA@d>?9 z5H~{a{9@Ocerc0jxU*+m4)*;dcVyrT)1G;ismWOKs;(it!8#fO?jv##wFrLgr30bo zzTxyOPJfPVz3v#^DM}G}W-YHS4-nw_N_3C)O^An>SZNWsugziTfD6iQrAW{fcC1yi zsce_x;g;+L0{3ADNHJ9p z<=>saz*gWCydmaNJ3C&h(vhL)df^$oh7n5%(5TBWm2?}x(vmnia!ZK2hz-G6C=#aS z-r^H?6-yp%97Ukt#^a^LM_%0Ex;+&X-05*SptRj%9^j+&{>*ZXXRs&ADVQI42ZYG= znZJ*-53DUx-xMz4wS1MFW{xr6(%8F64Tn#>jiI5e<(85S&Jii3VIm==(h&=8vd;J} zgKWNTfu22}KS)ZFz?Z5N#O&QdaeY|m;mKYYb0W1bWBk%@6Zsqr)Po!oT8uUg#WJvI zZwoUVoAK?s_N5Sy(sl_gVWDzwvAz+{ahYUpG9`+<_hp7=h=?iDN_Qe3pD9n;-v5u(2)K9%1cg*QPc~tp%3u)VTB`EW$wb%=Fvc<8IQXqP zo)(Z=MwPN>BN9OVZb32(M(aVB`q-*JZaT?a}JGnW=t??(L|4Rr7KJ`wXd5dXR2bJLfjkpkI79 z$b;7u0hA`~UG)X(^V>8v;r7J=$pP)NN}oiX&wprrbSdp|CdAb;5otnjUsN*{+Vg}8 z+RG$WCOnib^vSZ91}^s(pV0QezFOPoaMeWV=e#)oaYqki<$1jnnVp;-B#&@s_@f3} zR8PblSpPx955Ark#?-^O3ijRpdGeY|Zjik}nNdN{C%ET@M1-{i4f**v&Y@&hioBwG z6B7e08{TZA%-$Uh<`aldUL)t$Z+SS+WLnvl=AYqyC5EVJun6bVS_>5>{SZxO-Ei>; zBD9GYbX=*5C8uPWr^7#as)=efa-0_fg1=fQBPV%`ca9Ub+V}h(w?##fgX^~C4#3I2 z(N*zqvia}LdgnX&3ck=Ba4~L_%dN1z4dut%wJ$@KeNA$4TD}&CS_XLsp$IFFRkc(T zU8KbA^W%Cmv-)Dj?k$?^zCt9-gtlxWp+$d#X!qCtVHQV8%JVUxHhIs?8rvYn?NHah zZXkELUQJ#X`;4=irxaB0a@nlE5QMrhRKn{NejNQg&eM>$myz1@9~r5h0O$onawoV; zMAyGWCB383DMoEJPVKYTv2qB;8I0cio_Uq_p=}TXs#NEai8xb;@~~_=9b>|g5dREW z^A@;Z5SDCCSCtUK>Pk(S(e8_GJ#>4SWpWc+%RLy<;W2%$u=@I&w+62^TwhWds2n5x zJn6DWhkW`5bL~;vH_YE#`_653aa!7Zjp0*W(?28ZQZcdGY3Dx_pCuLV_hZIMad}81~)}%s4*7LsFR2|Xy3Ps((ZPgLcrE3JShmp<2hn`h>CoH~h%eo^81|W_qnD3S zqBn9zMj%sIx7Mj9kr22~$?DmDnCMWUjCH4T6L&aiK}O{7x6AhbuzQeG3Ny5_fr)4- zL;J;xG~6sRmVxfG4mwrlxamrK4AKJ|Kyr%tU$nhSfdsh_D@P1$ zEs6*;3O}UqC7|_J?8;)FE@rYu7c-$hkB=#*-Bqw5K_ zVch0{q#U5Ea>~8Vc429}bd{PnJeJio#LB>b9DUOpeE%-?HQTNz>(!`o3WQU+pf{h# zfOc`ihgNB^rCr>y>=g=$Jd*%H2?w#i@{D$FYp##VLlyggj6ef1JxfaUXSlA?al*&% zIKKj%>PF~*0^zK(obtZTbdUa2!DfI4KuretUk5lp{HqYo&jgx9@Whh#x$uKg%}eEI zpPf)sDc|^&o&?`#|HYK|gQ{rRb?plr>?`MEXUe_aU!svbF2QfU0QjvqRbSaeYe^TD zZNPOFNz*&$>5gu+{yk26p*u+!M(=+sYRuf!c@ifzAT2Tb5xf-jN$2}lk-!;}XKG++ zno)Co)T&7*3T`BZkj9IrTtn4Yro1q++Iwkh7(ZTK6cev|*b!(P@;m3Kj!;4~Lf{&7 zytuFIQP9;IwE|U%WLnGQquSBTHlxZ?s);*)#IoQ|1&z4&E~MSQBwU4G%@K6DO$}x zYpwKkZlG)eTAT~=>gRDiimE(td-M71pnnd{>d*!XyJpg$#kTu-D=Y9p9a23i316Ss z-&Yrpv&c`dNwM?}JjI7ze$(sQYaaTTe&nKw*?sJE>acg}EMjG3EDP3@9GE&fr!@tI zBWvtIUeVTKNt8+oEE0>oR0Cty2@wotsr}>Ckdx`uF%}-!&AxghnKS1bLc16Fr z-pRb!&)s9?3_kezR#)CRrxTB*-?kPJT*`Z_1ka{Jq-*{ zE5Jmma`0T9+8ehH`vtkrUH({_QSwW+$9*fJflxdgO?0|}reUIBzYBhHvt2zm(xi|& z?2}iFOW@4)8xQkQrzvbZnY-1CA!CIm{f@QG1xpYU*fb+pl@>0ujT#eLadgkJi_2VJ zc&zaI+haOluIReIZTNCZtb{jmMucrlEM(m6{`Qmmj(4EKFSgLU6lQkjTn$!G?Swu5 zmKH^d!Pf;tyU<|MoU110WcEl@J`d1p5Uo$E#9)HG(V?bpF&$mw_pqBkH6*Uh*qR9@_ z_kaGUCL-r6k7#|kM)6_ArWZ>kZnK^k0^#5TTG$g3UV5@dQH>00ml|+=>4{WUI{O^2 zs-wTlaQ6espdP=I=jb3F{3f|0Es>v06-7xchhA0~(6ybHnR`_;AwxMg3E_9ypzG9{ zZz`6KH$B5}b!CrKDFnCtXY8fI73S_?LEOG-E+yhNS(|sHVdAn}VNMxmpuek7V`x!F zXYJ-*o!`Omgf^=;!u4W_y>>ug&!)2Da0icCI^{(X!`43vaCDoq6eR~g2J3c#`7O{t zw;uu9lr}XlfYvh$Zx8%eKkr|9Ixuk2R!`X357Xd>!wCy`C8FoV&Eb3ibADC$d~C{H zl`dW;JAY;09f-7a&p0co&Nrfz$zW1qa5@m?7k(fKJ8VFJ~(E!{jK@YFtI*EwhUOtwQ(t;ArY9ocma$P+o;N!$l9RMb+&M=Q#s+ z-Zj$OXrnU$k7fYmc=X7sfU-a;=f=Vn_sazZxT|gVA1D@o=4yw@m47+?e{b=K+PX(! zy$x_V#ck!7F|rjTc-lKTF%%%kOW3$^)+0=B_yE89iehaJ5Ake2MIn4xHiu3hB>H9m#dftyx2+>FW zd&W}bqShboSG)Hfd@62;q(-P4>MSm4+J6{-UA6!gJ^3J!ZT@>ql3BY-Rm7t0T6sd< zaYFmyT;5lnBZJ&rBvjZ9k!iS*@F?KOwR-7(J-AO!)g3rSKcJK{jq4Xjle!k;xY=i= z4$vek`A4U~XbOWw!Ll8XebRisZeZj}#g`S(@-H}QT&)j5E*s{iFGSxeN0jXIeEOC#&PY!G$v5tSH-1*l!N}^ zh>;K6G4*GE<<$m_EOW)D^S{*j^)=LlDC~ziuAi}2E?FUT98L&+X})1Y;D2d~|4W!E zFOY^uY@4}!L-UL9Hj$347&?Dl#6`1I^A|8e@?btVsJ@xFbv=O^7lL z!Pv399{GQQ8Oul6YaS6Au~^J-U*JPGUtrvPje zuK=?o&H)J$tF-j)RNYB_)yA38v~~@~ITNTSVLe}-J1KZzW^EwLSA-?8FnCfr3^J3l zCiPgP1UABN=5}xfj5v`}iChZhhqq8-TExU9OunQ=zv;z1b+DG*7S%@Ba{zI7-$|T= zK#Vjj#2A4LXB|$l@qOqAD|t%}sIK1dWR$YMa=Dm9qTwig2nOR49^yV}*(_HCgm|j# zhsqq2I8k{c5pGV-oXXbz8AR&`cQ4;Y8P%Vj-Up8KW~9Y7eaKypckY1+mDQl^ozgp zbnq$+a3{s=ibbig9_XssWbtQ4xG=o%H`~o%*N4OkN|3MYDtw0m6AOd9_|9%+uPbxR z?=>^K)X1}QZ)fGpZzYFUU854b!(Eh57#-2~9d1Gwo_>Q6D*l#R^Jo_CL{_h36| zr|htCQd#un&y!>0b73csNDpu`_!CA*DOC#-dD@zl7hmndDDE)1aq(n7z}yeTW^%J{ z79hlIXIq203y)<#6yb8tgcp~xIU4+0r34Sdy}_s z33v`j-&$I?i|abu0b4CDr6|0;<6oDKIF0I!-IqZDJdvS`^NBx~J-(5KtRo|=C?7HI z*e~MM9q^wv5_I)r-wkIiLk`l~VbF6Khw~KP6lb-B5lP`)F_wkGS+FC(QlWyio6_7- zA>Spw{6h^}65hpn!7@n{X(Q@snJkVcz~6-%2Fa|o54FUUfkfv^^o=L>nA@K*>*dMX zYrjUP(LKNqD-)vCo5_`eoZ3Ekm%@p$sko|W%AA;V=`sp7Hub)IZ#z%hKR~VbLgUBZ8}L5}}%=PQr@rM)Xh&n|WS!eTB`FEvh{ z{mQeV5%%2Adxg?>4>Y0cCT4D6cHrF|cJix-89~RmAoP1_HE~}cUP(4&VKjPAu z(5yRWcJ!Pf3Fv;|WTI;0ODFpmc2%OjQQN;-yjh^KORUHJ0f6XXvfGMz)36NCVE}fs z$cwED!WUaoFs<;JneM6p^6PQs@Mo2r1?g91Fr*4};4Vy+IZsFkn76~uaIn-&*Hm+# z-*ijf!%_s1)Rg+AfO3{4P!5V(r^lzay&NsZ*u{JdCtt1C)E|Qomjy(#)Ot-^sMURa z9pgcF&9KG$DfS?d2A~de?X#)DACD2>=Exq`W#OCcC}Mt2Qt*s(V*l;LO5O5@Y?!xf z6`H7Er#@;r|6}DTU)nq}lknU&8F`f)<*%Nx_2v;#TH)E7;t|;wfpxxx67)MH5V1;= z4XCp1GfY5&s}rO^z2vNp6kTGFf>zMH(6iedx||q5C1N-p#YE3%~P9v9~5uGUvSX#jA3v3-6Z z#_5TdQd zT32EQwUMv#h|<_R9oGG@M@a$=5}K{@ejO7VPn5zC9-~G1^nB_pCOjtcimJ$qE0{70 zv8Kcm2XkaBTaD5A0E$tcZ|hRB_U@EXKtE6_4FXa_u+aA-?!d7v76LTol-&*|krU$f zUbLn(Qp-a^tB@U0^~=E(T6N65Y7{&4#_Tuo0|;^2QMqw*9g3uuFP$Ck?I;GH!n&^nrhAndC+qo# zIV`3E#fV^<<+6urNg3{~%(-~tZ&oH`8LG#^BJlhG8wvqg5b|x<9`@oYGjP68fNEjS z^O;~r<3Wdzs|kc`ywhFw8yJzYkxhL5|)y9YF3H> zrXwXJqOXvUIIN^QdOq2cYIkwNi*IaiD8$leG$KtvV^8} zATVPo)tz%TT?)6XLrcflE?8TohbY;*RNITgR`D>j(+wS+7J%aF_p$a!*C*vTM-jI- zLRs$0;0xhRR%|N@b|na6Nd)e9xOC_tc^R@jl?%cEe8C7pfIBxEoY@y105|E=Af^n9 z1+!O0iNht9>FHp5(GQh2oK(||8SyR3vL*38pA9w(F4)9+^>H>jmHRc0G_S0R3dG7u zYPOWb3YeKCeH4eH3m$>WpYecFahjfQwkC^_Udle~Z7WjJl|tObCdBOt59HcthELpN z%4MmlK6CeQHtOmvem8fZC6OS2Q)$|6!BSYCTKM=y`BX(6CJIQ)HL0uN(a7K6_DKR+ z&(I*@Mz4Wo9pgfI27KqdcyDE8djw{{eK-teQ=N|n`NR)LPx3+2@FiET4C2+?P+{qP zw@iMzYl6umSn4(*wRD?bpr^P=gB@r@1;}1*uA=z6TQ=R2$_XWY@i3;QFuy1r+AQb= z&@hMfguLhPb90m13c>mNhf!$2edHkXnPt6%j;DUHE)i^ljfwlPlE(%}o_TqH3sDbq?uZnP-QlS>ivo$Q#OhosNO)f zs-r0dN}2nI#|-z~l8xr``)s__YACf$eQyPl*7eWA;i-CzVjT|~Z1v}EwO_Z(JHBij zWG>9A4PuHNrem85a5SX2-E1zJX6lABi$1Acv>raEki97Wl_zt&lrvd6G>=kVK|u7r z|HUJXT1k{bjMs%rJO_N`v7SBXcGPC+edW2aN$?(>SwGN;SWo3S=F1@zdUH1Fw)BF) z;!zE@`8s>u9wWNsH+p$j@(U^*KB6l>G^3!iQR(}3^-Ng{#;`-3Vn@lH%9J|W+~web zF*EO4*@JQ3RMv9TJUawGcpXUt`2JR6tYCUCJhXqT*5jl{Ze-Q3fw6EtVI>u2{WM0g zXHHLMaFz|gXB*rxM-<8j{3gj9rKI4vklXq@?F<&=tK>gL)7}qwN`eV364#wB#g**dh`OWi>L6x z1v>(u<1vL0@w~FT9i(6C>JnDp|GpTYj-EVEO)>Uc463O7*je-NS{@556JlR?=ZLsC zx!*&h3n?Z(?U`ZTvD!#EdysnY@uTCk=$CDN^umZ=)v&i2Zpl_o=sja(P((7|qf-Iq zgKIfQMpgwM;_|%+(w*nn5BRO!~6?91>1@S)gy1&%i2BEohOiH<;|~>u<39l zQ5w$8lPVwSM#9;8MrRFxbwVZ1?v>Xyxe)?rvN_i@sTIq9BqZojThsw{^sxyJ5d~js zmov&kw>OR9NKM|xLEJnw*N$@Nj*LS&WaW~}SuogpLv|<>W*|QPg@LA&h1g>TC)-Z3 z4L2E^1xDyQ{imm78SC>non1CORI$se3Ti`gBIY^`AAQuQxe}7{EpX8N?TP#7Q3~QY zYR}xhVXgavsQLtJVoH55n!9xvUc#O=SF%)x zS?OXLs~K5r4U|dae=A{z)gK%m6jP|xyoOo?EvuxzhL3wV;SGZ=#m9%t0gLH6(^wKg z>z>h*QyR;p0&IX-uFC^WzTioy)IA<-gZ=RRGPu2DKADV7$utlz@US}|v&}_F+!#WM zR!qUzDSt3Cl2Dms*=1rb?2Ogjz`(Cze|Y~dq6Ejoj3RRT_QK0P+s)|9q}L+-Lu$a} zRd{2l=>@&vv8f7C1^)s18nf$`y%=mn%p-*~G`wcpLsV4-hpKSZ6lT5mc&|THV}l84 z9*YRnK&*w(&|uQ4>9f{I^$XiTRgIHv+@qr%*bl7zS6!8B}CZ(Kc~Cg182G-*LC-I_d!W>qMAQc(%+BSnqvx_rtI{N!JdN`61`ZlavR{xc>ECK8!cp@o z23nOwPiD~Vl~0=@UpkmLWcT@DOygFcpuPZXcR5h|le!zXS%e^ao*$ijdaG&}Zd0~# z_fQDs#9z=YqW0<`Hc=sEolPdVs8Q~t*1NXCR|tT5URV*s5Zr+XW9wY z@1QiE)PHfA{`%54+{mTk!*95ev44geQTR_NU+6zT`Tn}Af4+S58_=n0j|+5a2>eH& z6Nr%VChU0~->BewLfoq6dgNE0PYLql7r!j;ifm13X`Vd)&-eV_U%AA)QhA{8ZpU=& zaQDOmklOeUu!W(8X}_3udF7ulxrvHZ6dMoSN;p}KPUb0jBJ^`zdty| zoLJKPbJ0wDC|qu*bn?xqOBTi2+uJ3rDGdY9{#;D_sLhZ2aPf6y4ceO(HpQ-lD; zp4~(Q=>1Y}-y|ZyC1trzRsYF5!a%75O$RB(RrB8K&v|3(m2q&%CyzSGusK{+a^{fP zf=HUc7|j2l;2o{LeK8&^HnbON07Yory(Q!oT2?1CP>83j-BdZHwvQ8~VQn~zBzH1# zcDEo{v3QRAt8aL2U0^s*L|v=N`pj4*jgKm}6>~=Y?T8Faugd=WR)gC+H@*Zj7I*rK z+FNxG$f_tOb5ab>i~j<14oIQ=I1YX=x?^D6O#EYYRm0oexkoBjv0sEeY)XoW>k)~t z8GdLI=39NYsDHHhTnw#(ma;hK27) zU#s2DqoHRMb($ro9o9y;#%j}-v>Fzi_3kbk*~|Gvp+({@ZqpOs zI&+LKdtYA6hSA-}5QP=s=rQ-=SBCp&3jCJ5C2uOQR&Ju|TNj9p$=qA}-cfJNEssE} z?591f=L{PzNDzxHYYL>R2pe|bE-%&lT`i&dAmHMDl`CCshC?V$yKtF60R`(c=j#vA znnHqS%->{UvFXM$P|1X`eEGpsKFemIxm6>*S1#$kU@6t>buk_l(Ksxa&U{RIx9ZyM ztW%PQ{lh7%Qmtsez+JW$b`Bx}*ub=9Bqk9- zi6U76lM|=yeWUD)vI>w?!y&KaAruQE#bnk%64&%Q=YB#?^51AAQe3ljrjNqy!}hX` z0_<Q^0%MGLpy)Kux+z4R7(PBF}%7YGm2)CN>K@}pc*4Dcy>sdmBN7hfJ zbWtv`z*vl$s4{gteW^nTf7k(*Yc219B)R>Onvma_=YJcNu|?t(>rL^JYRv^jcF9V5 zg>=Z0ovi{Vx%b6u>tT?)%)E1GocJ^nyn@M$eLC~P*p;Y&e`=b(nQT8op&Q!}Xus{x zTH0Ru1D7=>?(sU)lJd}aO4J-m5F{M$x?m4(*w0=rdmC0zrrO`uTud?<5pE4;oF5M% zaPNez+1(~K6||~;+3I20^5N7DsKu^31$oXC47$ibd+NMP;X{b@UQ2hVHj&zL42mm9 zX}Wx{Eo4~JKvu6i=j6b50_#RZYB60Rgp2M|&lMLAESUBY_bYAa;PFjlO<9hVQRZo6 z%a#(xuC+$R3({D?eVwC-!m(;36#zNvVUItAhFXPTWwA>drtQuWK_{?#nlu zR`qv9hCev)za+7Am(f6qKde!q_9f;e7_+22Q7AE3RlTY7!LQf+&K}It!Zg^nk;L1k+)*UR(!$ZdQgwYa4xfw^JR{ULK>3{oD+rWETt$??A^gx z*rGSvZ849{QJXY@HW5qTK?S-g={*Ef30ols%)W!bfNeke&VV#n6~Y~813Y}t1GTUo zk32#^QLJ$%_z{>7jYDdhD-OO5mOoE-@~pB|9E&o8H zjd$ROQ5)}#>dOrE>Ji-P9AVM)<#$31V;cJ9%=FQpJnND2{hMKl_{jQ5wjoE4XrW35 z!QT^?oiefU5I835DU)&?kLi?Sh7=E$qfjV@!;)lbv#qi@OR>v|a@j-k<;=UVJnrYj zQWHWM8z(Pj7FT&JL&LJ(%So~;Voe8B;mebH^pWKAreRkpx=MEtT7$*&uB0deft+gN zS_&WG>(nt>c>3AYEq(eJdKs0dOo;9`Eh#FBT(vioX9ctLq-y5|p)4)mB^;Q z2V%mLN*oAkbmW?Cil{T-8P`VM>14A-WnkVV@`F|+)NS_Ji)YqOeA#-byp_R%tJRyB zQ)Y=%&EJzrS4eC-W`HVprh-WX*j2Z7Qde|X)P(Jl3aui#Hly|0s5Ft7Qkx%!dlhRR0bPaHDAm&bn*#*= zx`Bc3fWtERRYbm~Wnn2Rcqc9UDCD3AQc zauEbEX(Y}8DuZh4>|r7oE(zf5z#lE?XlyG6#+>W;vA)x$a$(bypcfG`BEJ@B>H6Ys zTg+CeW_!;9v?Z-?yI7oDQq_^~f6V06fD~*H1{&ihPe0JK!y$>pmW(uB8ibo4pUW;k zjs%hwg0VqaXB}27;85}Y(>8wZoZvlLZdmeU6vXtE5U2sz9mZu=?A^TUwKBjjGUMAz|h9?6S@kjDRRc$uyb7b zB&hUa4qY=e!&azd9oI6pO#3Z9jTavKD0ioe(;)Ik`QY9DQMWlFwSMav8knHter7pel;Dv4xeGe;i1N;afIOZ5BRTSsgGb$*p zkIM9}#=yf9biWv^TD#Y2PO5ppWpVi_pKlM(IstlH=1_X7{7J3^)twzahs1hF z`&NKa0xcnjf6>8}%{tr4^nf%|r^=MkVX7iF+t+hsjAYCD-bs8?JK@m6_zX(y5Kd90 z!&T)4Q=y6Fozz~WZ5XiYm`jUmM{_7?ILER^MU`3X#iu5%3|YHvqrORjb$BfFYKLxk z&q#qiG`r>r3s>(a6kJGFG!p*GgPn%TLpv)UScdWZ8_-6fu)XAlJ@vX<6V-O%%T1=z zXVe3im#Q?%<6RT0{{}FQ_{MtQ{(sGS|CdeqIr^r+`sRKp6y#G&V5t-lX{TwKLe z{hk`UEEif}gI|o@?|cpx!9@U)mSUo47UPj`WtN z`TKV#U&(w}$DC^wPD|oO2p^&CTvL77Wedfd0h!ciCU!-iIKWgQvA)U~4Mgq7@riI* zeLmQ_tH)aYWQhJZYW|ti7^`MXiqN-!|0Lfj)ukKYjTglQshc;8Uw+t zau$;gUuy&w+?ban;L8Y0vfxiJ{VzK6d_|r{?c$ac+KN3qm{KBPEcSx|WfohXiU$hC z=B5Rn&A~P~A7{=x{jLSiw8Zumxy5l4s&ba%cTLKC9X40B?^j88_%d)Jw}JI+xoS0?faEY6Ki4F zDTK^7@oe^cTNE3-ne=Ia{J^hY9|!*%1MNhLotT2@gXo%*DK+@|1P4hl%mjY{K*@MH z4M9%&{EdHh>RY7h``I(Ej<;K+p6nN_(L{QPP1XdorY_^)<}}AMHLB1L50eJ(T${B4 z29{(HktNfb@a+oyklY98&o)V)0Z_g}GV~JHd&lg05S)A5TDiHvrGtcz+aCl)h`k&% z{(a(8Fmumu4ePJ7*Zv+6<#>GT>7|?%^N1%C(jE~}xH5&l!tPa4j@g4UZ0>|Ck}Hp8 znq4|Rspp=}s68&fOS)`UY3oiA?7mPLabtghIh4Hrp~I4Wc=BxcZO?mNHfjd7T6YN* zy^2cEQ}FA%ZUAj5<6fjXtoHAfMwVG$u3lPkD}35~8#&0PWC zZU17xXM$^=g>)1aW27!Ycq8~!4vmElsdvl2T-X%onEQ#DohE)gp#?Rm{`{6CmZtD2 zgzbUYfg5OFS-HN$C|fvsxNuMRE041G$Q$AQNwu(Ff;b8C-UT`78pIGhw;D;`w*KL& zQ@Ze+KymQF@f)S%(jUEV`~2)qzVAIY@Ab2E=5u+i+|giSFK`kh6t_RiXh=Enp})P( zQRcZj#Q#-RTjBtefp{xneDDKDlV;2f$KYk$D$EQ_?|=L$vi%>M5keCbw0}8Ryjmeg zQ*b@ui={Y5{28+P2JbHm7_3e|-#;RkRCg;2k>i!OtzW$Fp z{2#ac&0c={|JuvD|NpR;|Lwq+!IH;ZnBC9ssvM&v(D3;<0tA;ZX(mEpqruE{T_86Q zY2o>u!z%vW7PfU~*70L*y~i02=+abrL|N+3$3p&Gdp;m)Pue^?rcTJFm-kkkK&|`I z)}$81TV}ju2IaPrd8-|LgLx3O|CgM7<uKnc@Nl-hP1**H-MAH=9dy-zN z+8$kCM?*^qW%g&Js8XciT3YzvYN|UGki~Ja)P1h`F<7@q$BqF2h}H%GsvDh}c=ttv z!mH>od$|qP#})s(dZTmr{A=5{HxZ2cPFAsvjV@@)bR*j0t)y&<`5<(l2xGx=iO0oT zQgn-TFm|;JZs+TsU`Vi5k#Wz-&5KU`LBi*DQ-X@LE}j2)_N~`HaFG!LuwIM)rnY-g zF>O;g(m?4qGLdOajOXM3@!I}B+<5fZl6hs1y`Js}%0CPOLo)}UKFJrp*=307)$s15pTDBcB0v*_} z`67bz)4-Vd!pV1XNy973{Ox9vI$mTeK#7M)3xm%oaWO7kn9bkR@=cA1`nwv@(aZ+r z7Hi_Ibb}j(hTi2l8a8lchR#;8VO*ISqJ`<8eFh{^3xP7j5C5`J@DbP0&nKlF9|^($7RXKxpIc(KUkxRj@bJ#Z`GV8!7Zz{WzGVJ6a7w$Y*l2`OCDPiDt}Q)jnf!>@?sAiYyx-J zLxry3CQ3t1jl*;%6vW@X9F!7a!Pa1H&HHA5G{33fni?cqIE$erI@WvwW6r(I^aNdo zMlTxK?pD<~V||6pMl?S#K9I`9JG|faTFJ1V9b_nO{^4q&VX&X9dS~I-V!se0ddeqO zL#TZ>{MLL%QG`#ue(a2_T*ScjWzl<`JEoB*9ASlgwkTC(c zeSIxKA}CqwzQ*xOW-Q1d-@_7*hR6r{*@lH2v^89P3#sAt<-uric zZ=qUGEX!!TLar>+gG8kZ0RZ2WR-J7!zan2c-}t{Udbkw?&#v0;S8+7I8@-XW=45x(X0YeGOvp>8QT6Rf>YgTrqgkTsnJ z`k{A}N$o&qs~qHJNFrrM=g`Qm;d|>|gr%9IiABq;+dCs7XQ3=jsQ{bWQA$h(fGZZ~ zrFN*AKGk!r5{~N7v6-p07=YS|$6n4#i4SoO$><1Povlse8{Q@(zqi4ssu^0j6DQt9 z#kBZdoU1+l(wM6!|0W7ox%r=;_5bVNBRPMj+l4y#&)L)nO2^Bae zqd#WtMXFga%CAo(En=~O*Hoylu@nN*cX7Y%euTv{P0ND*;ENOFygG^JZvofphh7xv z@5m30)z({1Gvz;S_DI~0Fn3(?%UTqsd-j{kn;;6JIJPpt6*W8^We2pjDkAvvDt`UwQxQ zOLPgq7y9!a4+}r0XWospa{S`fYjUB2P4rHI2u1M08xrrnJSJ^gKES8QKk&0h{=)f} z1>~XvoljVQvyQ}h!<~k=CH^K~d4R0swdBI2H^494j&mCY;SB+7&@aspxlRw|{c-wF zGFwa`!)I&Ps1cc_zldn~rR!=K(w8(!Ew_2&Fd5d3Q-MLQ&PkuRbzPd~{ywKK!{gIq z@pv1i^LpwroxQT)e>nrcv3;sT&p=VjTyfD>XEejE=}-+60TfQOBNf^~5Z@2Bmx-lI zR zY-UNlnz=iJNxm-&=EFp;JFpM=<~Q~PV(Ru;o!d|U_@y>#Ps_6wa=pgT&wKj#fx+2* z-@C>HV+?oHt6^}Mt~AU4D$=NUIBsi6cx$H!L!Y?2nOL+>ubkqQMyQ=}(fWI&Mg8&Z za=4AiG8IlAzcN4iat~L%dTzlkhPmhh|Ma0CJkrUcyidO(K>x`1&_%o^=s_5#;KJf% z&-hsJ0?r(TF#^2+Gp%3K9yH2c5jH;%_{uYYm#unYkr2qXxMeP>T+x#lvwcV9dMRAX z=rtwX49zuX2vEJ~w3?0}N^XCOg^k=7t0|Imr!^%XH)1#RfjiG7QVM)x<4a^bEGZRZ ze&t|*<{fL4)j4;G2+3F>0#}|xtm2JZN4N3+@wET?>|%(BV36A)S!VItyftiqzUunq zHMSbENO12V=BV%hGUN+w>x)^I;5_}L&WibD-|N0XMF_vv@^~l8bUC+?9=lds5eJYW z7xaIYG;y(G%NR+#_)L6vl)Cp@9h^VV!63z0-UsC&3ROwWi=2`qBa8JnP$(mAG4gak zmxp~Rbi}y!E%f}6zB5-yBy47H@Wsb}HvOGji#>f~h&64()5#yH+D874ZHK`Zko&cv zDGdeil?9V|dFjBoh=8%$2d?VD)1~g(eQbp_Zdrv7{D(aeliFMS!DbtDqc06})gOk% zg|c;p97>2jJi(^V|2)ROT>3GIQM^Qz+k0SxnaKbK0!yn#r0FmDOz8-n>D-S(Tds85rb$&6=50-2= z>g6Od-GjgU<2{z&W~GteAdsW)fwX3bTMIP@R*+sIX`!c&vsf>?9UX$_6xtVa~d_`@_-4-38E%Gv9Y~H1-UwAkvBt0l)~Johh)MP zG1HR7d6pTL;O?;?&nH{qf0w13nbNDDpyQau``KS@#@l XIrdT)lWBHSHm}xVH$5!n>;>C5vs3k5ZMFD>4_??WtB)WP54+8&hxWCf@_*cC=~=5Q%c;;@rVexR zO*E~ry5h$#A!ymP>T`Q|g^yF;tXJKCw_e@*p&MY4k_Pg6O?*YzxA)1XyrQIcQWCOM z>Q#TyrCQ#2Stl8ec`T&!8MhI~6*qp&nVamQy9lnE+0vuvQ!;Qj~U0 zt64=GskOZv+dn=Hej~t92s7{c%9ivE&t#OAYzq?FoYRWIJ zyvMxQu^G@)uuqNXw~JG9FiR%mikS0eeAG&$bghSTB3}R<@fP5du9A^Ot`#orx#;}t z-oJG{{#PByg@0K!>@g3Io>pD&*oRkE41=?6NnW2J3(pRxAJ06u6nnIOXP&3*&Mfz~ z*s_zGcy$ZhK21(GG?J?{UdbtBZo;_LOOkE^@B_h(il-ep&1LwJ;e$KHpRTD&6y;CP z0qJlFW7lknxj&4y6Ic-*8-@GEUwPJ&Y9@@^seU8N=9c*aamwgQm$;aiBDDJSj8cCm zb~B*K!7`%TSV!l<@#%h+0_FA6m<$OhS_a%ICj2opP8HJM7!x8YtoA}df5f|LtNZg;qVHO0KGrQnd1wuxb|9S;7<3-LSMp`zF_r}s>my}aArHkf9( z^J=)#vKHeI51EOVmT=To^44Ey;)<};F%(Nmj&o8lKK}vOHDv{%%8Xr~n$?lK(%nI_ zhYTN7#Y$W=fHqMuIpJFer7OWws{kEtm5yFh1lb}M-#2#(MCHF#+@0nB;|c!9oHsOc zL0VV)#V>x2)2RpFfhZM{t$HI~6M0S%q2Pl&g<;@Y(+HQH(e#cuCQ}Vtxry zsIO#PmL*yB$xcSo!K8J;;bG(F8^2Tk@nmH8>F#r^;d0@CsZ_%KbPnlkgV%OjDosMZjE0Sh9UZ*<-i9|m!wB0mA z?yZ5!DU!LdZdDd#flcjp^N7>o%@BK1Ttd*+i=Mn?3nU6eRi3f;hd!usSae5idS!K| zW{JawNHcc(+Sc}22@edld|yzx5|Ak_nz(v5_n zTby*f%oW+83_+%-yP18s5!7>V$l3a0_h-$zk57gnb9ImI`?iOoqw7qg3!|4GR1Z;v zQ3SS-1=T`snUw!IcCaTRHyltz{#t<@;s=e(*Rr zM3BgNBRNM01nyfwxN(Z#+*9Z(UQ7?v^^7{rmQDK@a_{pz`Rr@uSB82K)AR`Gmx4IS z@ZT&JD4-&N{{FIu(ww=6gJOk}yZ!Bo@3_j$+DJ(%ygKsx-@1(N|GcyF zKfPFVAjggn-<@}%t}L%v9;V;^$}_zBMf$MO+b}$p9Fg{A`{(Sfkgq%^zVf`z=*!Nh zIC43*nynW*R<+`l>l}6d;MuVMzw6Ke5j)txv)XC)g6wM{~2bpQ7ln8 zIOe5vvz-{*?}-DPPY>B6?rcInFH9}Aj?3&DQVLU3lW={u)k*JkH!W4$Hr}OOAq)t1 ztR-&>Sw;sN`N@)~g{c#lP7{b1_{zZ5Ixu*P;FZkXM}%Mc&Mn$7ZH68exj3IpWBHa# z>cX!j0f{S+7DcX=I&5$9w;!Rw(k*AXQEU0J9g8Oii{iy1sSPtD4ZL?F>U8{bQQ5tl zkGP=VkojG$$EUf1|G$(Mw)!tsKmYfy``>#97kC+jbk8p&NEsb1wlomtBzSPhQApw% z3^wztiODHBW!QV2wtKPMXrPmMv6DGBKVhmLllj_x!kw~IanPLea!KTvsIy3_INP_&YThYNDMF3`hnBG zc%vSI68XwA^WfWTYvcn29%Lo)D#cMx_+bJ~KluS@n{LyppxK6-d-kKWmUE)Wjb)|n z>-Ue1`wM9M5p%P@yr4|X9erbezQMb+leL~;*|5f6{rzu>eSHr?U)8zbs9PhbMu~&u zKW28#OxTuD0c~&a<PNZ}yW; z`E@Bl1va^N1OlHac^9quqwU#vTJ}d?7;+U!Z| zKIq5oo{BF_4|w!|Di1&H$%v1dT(aOX@{>|d^XIu5vnE4f1~B^HGfsOnTf#t zU-mBj)64|dSuIXeK4~i_5vy>@ebyk{G*HanEU?>~c(1p1u()hk&IZyvIW;sEKVz=; z#bkg26f2C?Z{eD0I~496S@_Qr@=!@ir+dISK}k7XoN{sg;HosaG2eErr~fy2gEYBg zV6LImWAIa1BLl0~lHlht%XV<@RTI8<=H(p+B+$B_A9_dn^HAKb$#b11{n#{9PJyvj zw8Wtp>dVWkw6s?bB0{tZGQPZ6G3!Y3{-7l(ZCE~RKlJH|k*J-1-T0;r`w(DZ##GNb zyy(g3@2)=~D;$A0J#xc*L>m+Eb0`XO_Msv2LS3(Bmt)aRnnzwl;J5aNeb}%leRHzY zFI`dVE{c_SePsjuNjmez(lgTb#li~1YQq+KL^#`f=C&*H(fPbc8R%Kv!NTN30k4UQ zmsP265H=^$_y+pxLX&iQW>k8YVN4(}1oz|6GFmK`B(rH{~_+Z!axK! z)PcV#_nA(PosIyAstOgg5+>nr-ntOTImwh+exzU*aZ5@Y7eJw(&bX+Q=OHEQJOHNT zb5p|mRF#^0h{S9GFF4dDaZJa$md;`@ovk!}2Wg?oEHXI>6eQ` zB$9ToXHdcB^=-itNeI(Q=CeA$?6kqD1YtGp921Vs*f^^or{xKRNgx4Gw$t&c#|)3i zHOE)&qr(N&d7T{oGETN{Q6>cVkD!w&}P5_iSe=CGp zJOhC|-#p&R&qsd3;|Z@D4jX9e@JV=53sAxw1PtVfrjZ%AiLu(-C;yENzpHG%K7FZZ zZuIlK)TjQ?QoaE{h%IPC)3+_2wtY7-67|GK9{dTwlh!j0DiGP(xltc8ex~{$k@fm& zhA%9QDtoZ{AK1D&*lr?pfLdv08$t&nE_TkVd0O6*vzlsJ;t6uKDoOLlDM<;{g%2MB z^&2hiiJKS{H&2+QPzep5B+jfzMHJ`@Q4EmX`1|&~(tZYz_X^XL&!cr2ZdqKt3?v>y zUk)8{EK&nbp~9CT6X-21@|n|Z;_XuwI*@aB7$w%*A(oz%bmb#h*(bAknDXSTnTXR> zH=d)El{i7h%%$iW*$Ep!DaSj${VT!f@m2UYwr&2C^wVxDS z1`Lej4FVFo6k7_THL#JRt>Hs{ddCqQrqJr3G%#Yvptu{_k(~I`NT!jUSiVqrFX%4w z-0W9V!O&m!o$8E-p6dv{05VenEyv~x^LeD}z6?*bSVe6cXOf+*ntbT8EmyaVa}EXU z&2P)P9sh7W0JMBr)QvKbdG=awu+zxtoTwKdQDI}H^HUV&)fXehjzJ;^j*M&^7F!%d z;%z_T2EUFk<6Jp#DDeadV4b=CV)eARUuVC{hxEg|C^O3~;2v;x+~TbweZ*82*SO6+15_e(k`w@fd^g9f8i0=B;XK(elbLs;Ks~~y_Y#Ob$iGB z7q(M>W^nwgnq|&LQ%{Ka-d1^l#?yaHe4~IY?Hx8Ye=7m684d_ktoL>e3j=r-;kX|T^_Rkxwz?pmET!ec@>n2P_^TAXG}v9=W1iW zuFJ62Ehh^1WWv6&m5hEd-g`0Im{i?T{UX}L$8g?#b6Wm>whLC zm?%gq2?UA)FmBJ9tMcNhWZEEwsdp0Am$dLiE6>gpkhj~g)i3q_Oy|Cq-nvF|& z!3+el&1Jvy9vwO)zNG*!9`$@$*`{KV>;;i2Z&iugO{!-QNL8B{!up=ddV5DFlG>x= zLChSxe5x~5&bp|Gw3?Q%Z6lVci@obxushaYdjzrX`~3Ak+p+)Hjr+H)&RuldaV`Sj zqCNYb$s|-@+XDBAsI*_5JHpJ;_!k70JQXSRrSqW->!`1dWoz*bhe`plosarYKWBd( zHxDA24P#xkrET`t;#ZKg;GfL(zS(JA$qT7m2IcfXTz(WG52uE%_abzu~jKuHb- zi?G<%?XI@PC95?p+bPq zY1WXFx{%*~DnowjP0m*?#P)z)RR4Onko*Y7&il9S-s;$E+lmDed*KU2NuJRuYiIeN z8JoYmIu{F?qJ6A9rZ5uO1Lpd9BgM9f;M^!oV> z)VKg4)sSp>Jz>sY@A)tD=2)xi@I?p zdc+vhvUijr)kEi$px`z4*19yrs*0#BVJMJ4h_~BfM##Fm3z%z$IpN=Dlw_siCivl& zn4&}lsWM9}3(n5=gCz9{%e|Q=&-i%}2w2S!PSsf6QFJ0F80$!q{DopE z?@&COV@|XS?02#7V=CET=+TaT0@+TfV7QV$mxOUJfw_4~VBolXFWB>7x({mF?1;1>7ez;{Mm^L_Y?Y}uLb@HU zBM|y!rkvefbfX$)DTV^aWe)n0vtNd^wnYocsAqB+EVY#hRJzJ663TBHRV*Y_=Od4O z=&8{Uwyq&Mu8dSEK~Z3<{PE9+vJY0;vEl=;;~se0TZn|jxpWc4qT4}pRZHG!$)?Eh zOZ)fD9v(`x-2gABQ5j8wlJXvHceERBYB$7ow^C0}J!@4|u^^JI1O%jH+EKmddQpKT zcd1t6bPEeCkp?*T0m)6*Q8cVR0xX!D=KL&DjE_SPp}|s)1jgb28%W*!-wGEHM5W| zK%1YxZ7<%h{qC-o{Z}ukO_Mq{%0KOkVrW8bwjHBs!*b-SQr%VzV>OCen0= zw)DPOMG|JrA|hg?A~$aQA}5#JeH|1Q8JUHElU`nYt@sS}D6^h^>0>7kN^VZw7iQLh;URkCO(}Mt)z3F9p58^8FjAhJN-`*ZXA6-K za_-0@pv0f+KLzJbL&|AU;Y3Jt(X;Zx^m;M1Hhfq%Cs2>#Oa=!X(D>b!EBn~Wh6m}8Cv zE%EYbQNW!VzoFM+{@`)!ukU3vr^VB^K~gtpO=9szXW0${Kvj=RW7(ui^Pbs)%g!k2 zb?e?*Sgz80aILFV%FO$(?1>3E3!nP^hx6u~^F2}(HKTLM&T(q#=@Z(^aND>j#+y>6 znG2U!1&Rbe$@CwGkw=OU9a>5Yci*gdt#9CR% z&!;W>H;Ki_Q-=`uYkL@Py-E#D23pN+Fyd|19fr}qic4CW22QHcF39VU0(svTd1WwT z2mLTyzUP;;daR;6j~$igczm5CPaG)25iLQ+RVdFcmU{N+v?Ku0;H}Y=)|8uf*GrcP z*t`$gv*TLYdIefukbrC{So1mSBl%*YrR`d&(}xvE;prHA3LbSA!v|G$94A?2_5`tl zx=a_{D0vl@AIUi>B__ha@@0Wx4}W1N;N{Qx^bc= zp@1+1MPPB@9OsaaE?sRc75bPd`!&muF#~|0(W> zCaQi@pa`NY%#-xUIL2^<6sV1>9y$xS+LMppDZ6eoEshxQGsGzYRyOb2?QoU z;ZUNa%sfft+r8rX7On$^!!7#lO~=%a)f1S~B7>p?z#~8iRJPq{W#EHB_+S0$N|p76 ztw8>EkUBE>CY)H$B=hvJlXaMKSFjrzJm{w>)9Wjb-i6Qg@33QPb3jRwo z2FO2K80!jjO2iQ!#X0Q2~_9 z)jlQ~P!|(~aK;IUh;!88!!!E}_e}BvUi_tpx_d8&U(c$9mE5MyTKeLMW8G~=%`bDj zH6%XMt%$`MSI4sLk2A~g9l^qK^PI_HQl)bviZ0IYX4oo!{`XhvuPy$KYR$#)RM1zV5!uEMfn`g(`g5DT9o zWLN{rye!R`?s@Y8o8Gnmp+|@Qz*fxkt-ni+%v_hhQffJARTG}lS_9_v+CKHs6wL|X z4Szz)RcFH62m=6xMyWQ|m>nl(xqk+3X}qroU=QQmxyGP%is-Q;GkO3OW@O8B2q?Yk zZtHl~Hmpj%HeO0R>^31{oa>%L3z;2$azZ~}(5i+nD1V|!iL{ZWCKh0~M{6WzdN8RW z*$=*>xpO!t-!GM+8f|feFvq+Xrwk*@VRa%xMv)FA7Uz%z=WUiIE3m6PA-zJ0CpAcw zUBW0nr+|`3&NfeNPXYzr%0Rq&>vAz**;1AM zI@Ly2X`<&xQd3dRHET0}$xO{E|F=u%H8}N(<>?ze*O(QIQ&8whScEcW)}iB=HCk?dhGW##MVwOsP@&1QnlzZcc&+UB8~r{nt3DLdh7nwFS(Zh= zov#HkD$F>yV6NIOK2?D@i^t;%2zrE>PoCM;7;r9{OUWLTmefQ%;B$m`lCY$#-iD%? z>g`?6UplJ}GA$Nn-AOt+^&-co61$ks~!I*{Rn@Ns&K8?81I~CCS;nl7W9)rrl z{wu709ZA`?pK5eJ9@9K>88)w%W-auX9nNk#9hCB#q2r-*qfaJ+Sd0UNGYSpU6a2@o zzKz~G73x@+9qhwJUc0U`cy87Odkg=&2I<`pVG19kHT!}68&R^_Gz6EIF>)(%=G%X5^j9hLGmt&bG zd18@Qnid57zMlAuoaAGzreFA_*F+l8S7w#FFQNR6tzKvcXG}LICACE`ohAjpPb*Km z*}IeQQbl)tRk>Ia#st5@>jHraJ7%@5hLu2S*cY>_TYdb^br)vG+`K)sonu?Q@Z!F4 zf)_sOYdT-iI`+ypHiNX$!~N@z_2I*9n{D$f)ik4?wM!3EfDC(;RcNf_=nUWH3Xq2O z%ydeC8K3)=)9^Ns*1OTx!YcJld; zr^TM+-U$pknQG}RSJVUr<6@Z$s4K-%t$_)(Vr^Jd)i&<}!#85z@Ep4BQjhgEtgGAC zG9wXOg*ZVoXpPF0drN zz!Qukkr`XcdMB{U0L`%o+E#7;F7`bPT_z$|N0fx7vY(Q-KAlA~u>z?yc+16VK@>JZ z&uSNObPQGXf+FcQsC$)Gb|vN{@(Y^II~}7P3?Hum-J+2MENdaMPu8v@%NzVlZV&(B z{4BgUBY}gasp%=d2vp1jh)(QNFaIO%0UH~@v{(i#Pc{B(dHSDo(gevY*{(rHLS9sq zEQdl71fkl+gD;Q$qaa6zaBzZ1A2ruCYp9+~TRFA!QOy_M&`qium&>0c80HOYxr zy{ly>0?q12h|`iBYGn`7Rm8`kGw<|S>&A~y$*Waw{X`fGc)xwI%!{*MYx`@+;StXO zu4!Wy>ryycE7DcaC z?aqqZvu7l98dJ?dy@?@xgtra()yF?v+tJ%uGAt4dULv{)&UF$dw$GJHdwpXo2+U~v zc(rH%phAjVKk|-Au|-*WBd3@FlpeLqA)$&k=9eX(;ZXaK`mEU(8i%K}Z)w~dzMLts z>Cn0GEBY9T;yJk$l}Zj0sSCMY8#*hielD<(7D#cEe9WHbKwV@+Y$QgTSGSBvQ~jtd zSpV6~TUHXOjuoTskvM>-OupwoY;5v+Wi%c(O_Q@R?-ZJgKo1A16_2>y9s-TJ>ow=6 z>AiSo_9cC$3+_&28LU|)&+ICd-0pb`=Yo%Q-xPGP)AdXN)T**^5bH1_>vLTWGPw2q z4ty`IJPDnrJuD$zmXhAL&n+?*XqRFKf%g0Z_3qoiI`FCnPJ zlu5o^8`rvVF~;%Cu)wY!%e_ziKIJB#(GrwVqxAqP2L#eW=bQu{iLu~)$1EiGz;0$t z0a;f9BJ-<2+^;_Ii=~_R?4^x1+Gu2|e5!%quL%pA2=x<7Su#>_ zNl}H}acwSy@?NG4ubwY>B6gUAbCvQKs58r*J#h+&gCqrtFW?C}f-_`}h1kemu-CIy z8*?3jbr{U%-WBV(S#H=nIVPOT89#lJ%g03jT&S$o0w^#^^r*{?ikoI#7KI38La^B0 z8lVWf(maXwq7w;`EKQrJX-0|#6GKpu&B>{FOGHcdl#;N=XCz(jmE7u;Qs>7!Rho$? zH_iz9&HL2#yoDVcCO#=rju5Gs8%tDs@5P@f;RWYXkGm9860M}~jxR{$ZJo_exOgn2 zAknumzssUm2`aYcd2qD4lD1^nn$m>*>7w6OHM90Hu~u%}#zk&B@D`3)Zf_UfwM74*if|txw?3E*!D{ABEh7+x3ksE5iP+*ju9BL(x{q5c`GS7M#Aj~u za2Vi#8Pui9lB8PL^_%Ao;v6vZE-or!adNScAq4?JsGltpFKfE|Y9WajsTY|{v?u00 zt!#i8La)eN2@yv~Lq%)8vB|F;OzB^szF$O`GlnkTprX0J#Iqw!GCXR$RbBirGnis9gkuCq zOlq7J7O+1n0yTt!f@wsW+v*F7#>)V_Q!6PyebRg$*VqJXSX}4erM-^ z*xyFYpY)IBoBOoqO(szKs$vw}E9Dt|8(dV-||cuTh(%*^CP z-OvAsiM`Ir5!=|91DH(T1$@2%lgm4d8V0-6;2gVb$3kNGNf#_@fSSC+5p zcN6VH{6#dgEWR53)IwX#^PlDKBQM<4;gaQ_R*F36uqmf{SL0*-MsFcCgp=tXK{f|#I zuS$^g2jw-~ENJZ2Dk$Wg=qtClI<)C?&otq6IQPgU8*~<%N*I@qL~CpUO#U_tS}oIm zC-zcrP-}XOyx$417oU)ewVEt{IU1i(pk;dn4vpQE;I*VWR+T_Ov~cXlXOD$E$@RKI zPB9vBJ@~imIw-iZa%%gHMJGUm_ZIcUa~h8t*JY~zt|V%`nLC$B<5A)rWYGZ~drhJv zJS5KS>$tA>f;=mJ_DJ|df1>sZ>t5DR3ji^ua7{z!z{!d%<1GZMaI-J|ebVFE%85We zkp-^)I+ZLVTtL($E>X`BW-a>4$5f^A9>Lz!dA&bjvt&s_btZ!c&9pGK90GqDhe4Hn zyGWT6&S$hqTiF7De$V^ev=}bA@$Q}h%2FVaNi~QIER~ghZhVCd50^_R%5^aq<9n*) zfh8?n(=cSg8GxyLHaQ#gz;ZP=VWtt^L2B;Jt{PZ6XH=L?hcLV4dAwITdW#4do)cEW zr$tDviEY@}h5a^nTF;29Pig#WgD#hwpMCn2e8lX!S!C+W9O_v!{xdvEL7>UibkXGi&+&Au@3WsoXE_mt#3 z-mbS)*xN(mep<-no|{wNi+Y1I6Lu~Ln(rbO+C13SOX=V+;v-)?o!+C^u5yQNXov@o zvXd7d3k8a@Y!?OW-=%sQIH{RGui zmjelU{&x05fK`igAbmQOrlqykl=Yj6)~Ts}uha+kC1DAk49WgMW0iYmNuXp;0WwgF z3yQZ~HYJg!94aPt%yxXV-wz-M1sX}|f;8fI_Qopo$!4X@>_zw8@Wg_+%vrBj36ik& zg^u(2Tkxh9xF#T!YU=vgYD2Q!Ws^_Dsb^L~o}(u2K{pbW5nfO%B?1O=D+=T9&L3Rx z>Y0z6@~uVW;q|UA-oEXZkkU*5v=tGO2z&nCH?`7>vC)D2SbIR@#47vMJ8~kgn$BJq zOHPs7bSf}G*L?S+_agZ+lf z!s?zeUbwPeU;lurLGRx`<-`>c1{;Fgg);bKj0_!_@Kz;=KSkb(xHxgK(guyjMb!4q z2}h>vpo(pl5i*TSSI$g_4Mjb(&r~4po?g58QXo4UBnb%+YozWD`|MAHP7L&VpiB}Y zT?H!&qKb&M$~hV98DQ;B3Da=K2B(s(IBmeGy6w{>hDcg}boK9v!vDgB+mZM$FnM0E zL=Ub1@Cuay4YWKT7lcJcwFGa^092O0mf>{w{{o}G=Rxv)K;m4NfZ?deumTUMmBf``UPfFDuP0AzfxDLZfLpd{?iEpzrQVOIX^&~C0*Nf&i>aB5th(>r$Dv+M-j ztuz^4xBD>dM-KJFZ4O)pAcT%32y2l)gAi6+`yU~MhLQikzZd*>{JXL7DS0{Aie~f6 zf5E>Wee=Wp`=cSarhwAV-cz%^!OuU|`+4qKmXtk$Rk% zVd;rdaJ;oy;G)^oqKN_p#m^DVA94k&WxEVGZUX^-J@PfAwRJt=YC#NFOI{k%-lAR7+F8P#o zZ;DO@JT~3=^I0SPl8Y8u`7_pU&hhm+D1ax)Z8ud}E6Sbhck=Qdo6@WcH5(@f!)zUR zL@{AAXutpoq|Cy_pl+$hOWJq^N@)~Uk3I+A=yfC=ySE=%+JorPbxN!0&eNl=I%`^Y zQ7xF0VF-b{2vH2<{QR5KemY3nW)`ZuBDETg1cIgUE7Bx0jEqv`6kt&Scb#FJZxY&i zVe#~)rM%>;;t>)EESO)~+JDDc3MJhdgJuK()e9Ns48h_d7oWnT(*ET#^!qrt6?%zK zNQk4GeU%OdFk!j?M}ZVY zhx_4HigD8^EViF_>;@&$^adne&@V~|@!!6c-(}S!AgL~TzeLM2r`HoTk(%dv_WE#- zeBc91cTBoVhrbBMkqu_Iu+!0g*PBTRHeex?9r6g_I`Vf-!DcK?L;BP+BVsk|QQ4+o z?u;dbKgzuzFTtiDk3e{To8CS^L0e!J$K#Yd^V=F^yx$x<)T3e+XZ{waVJ7l$(gPv?{{3mZX5e5)7?|xQ@Am!?w-nqA}RGG%5w1BtQuToFiYfU z#-OjxqJT>y&-X;wHM2K=d5Zpz{%ZZ567vcUtuS23W<}w%!OqNtfYjr_J3E? zYtNUy$SQRd=Cb_<`d_Mh_z!;hxIQy5bZ-x_1BfWZr_(;@09yZXJZkS_t|EYd2XH>o zZ7Z1>Nouc>W#raRS-w0(2g)x$zTGgBzbKq-ndOc34N$1D3>tFGc8ybYH626zRqILN zPk(GLU4Q&gKhm<7fDgx|TKIpu_gb&vUMwI9R834aB?8yU$n4zu3InQTeApoJr{rhY z?~-xyoIf!&Cn7LyM>954$(6=Kv!WpG%nFpQ9X=_bjg7s2 zv{vJ!{b<&~dc>avj%;ka@}<(Ih03iIt;de+%-Mb67E9tpe+<7ajtk>Pash$tC{ikS4G-72*zvy^s7CjlF}FbFF*NtI^9@g*MSk zRT^Ut(zd3@o&~ObG>cp=XxqH80j2L#1N5ssG{VLf`eU$OOHCNNzjfpXUlDRt@@HrL z&b;~E(SQnLxbEh3%UitfaP01_G>@=NXq;25{jcJ@*cT9q&I%nBH9lJaEF}w3)V-t# zb|)q7>V{cKm4B}+{vU1}N%N`w-g_N$&p%fth*MQ12K^T7DqY>nJEYS^Uk~+c;1`qN zPk$dp+U1YA4wnRS40=83kbO3G{I~z86X(lUzOjuo?Z=&00|V|fm-D`{S&e>qRDMEd z@6hJ;4N6a-+YBNnkix66J0iTdyZ?P@bhwrhaB~Y`|JDA?*EePdEdI&VE|Xz3n_!SP zm3A4Z>%3wA8QfA>0B#4NH2U!0*q%4dw40h8RFdfx(YjHw=inTRwJ+S7M8`rxXOugT`-IOo31XszPq^@|kg=lQhI?jZ zpG`MYRKFe}a4}h&Qih7bP1`>)zOng__XNhkx>6rOzc90U6S=iunyu*rQKfAejd8_W zTDj9%hETQH(e6Tw_%lbp<~~3dR$+ZOOPmZ5e_w4)%Y)#TN5g^ zEJ(sH%9Z<~t*^u42hZj#T?P+yjG`yjCz)v#a}LzGMU}f2>QcDC&cQEf4^+>0ePa`0 zuT*M+HC0V|59t}T1u9p&3nj+&yWeA>&$$LPaL;md0x8M52Vai|RKu_5?^=E+8Te8> z_(^Z7IKN9-L2v8&qM)AuXuN;=S>QFZ;D9P?NyS^GjY;cyLd6JZ8(hK**=?S-n!*c4 zT%Bx(bIz&u`sTUiepnC4D}^d&>NR#Vq1C>+MvJX6t#>^O>&Qw@NphkJ0q9Ff_ua!Ni*@4FBjSA%!`cl>BJ!A>`zA9Ze=FEUQWR^qn_Eg0;U{`{Z#ud z4(|1uY(;O4Sa4$ROeHHla?R^S+^6^kuy3aLWP?k)-yNoq?-5yOj`opgR}R|*O@nDs z15*@sxo~j@bpn>DJA1Eyn1(5<)MUN0E@Q9ss?N;ER<)~lJ2OV)9Q0@WtqPZ2>M+eI zutcGucUd9#4t0d*Mwv5)sXpE6 zG9wR4y$f3L2QgSvLEdVJ)r409w28er!j)9z8)yj6nafU{^wL7hO>}D6d}$*0Q0KcY z@={cZ4$@t}k-&gS0k*AgSjeHFJuh;gzaN%7&6fgRP2P)}RPup)TsRs{Sv);#NnDC+ zajRqr+S;L%ATFP!2Xo*}bD32`RohbH-q)({FDeQr7tu0t`5Q^cN2pv>V^6w6%Fyt$ zb(FW}BGk&c?QKPO<4}zfhwl<9q+uBPDaDG%0z{afLC*i$ zdE!AD*iV?}v?i?c?X^itm6b$w=^O|R&)5RZrSihj+>Y<~T~L8WfxTCakxBxfe4lRR zd>?W6XV?ezQLi%N;d!yk;Cp?5v_Z}b5+ajlC|4e%u}?#duAR_m&UMTvdFRI(&S9t% z$!S%lCPfSrgQ8mzJxhjUBV;?Ar=5q*+cMp!#1dpj>%6Fj?kfyOUI41up|HZwO;-HN zC9TS7PDtL7QHWmzWu+C3w~|uSB|zSL zmmrwBCPkCY%z=nVLL}KJ!Yf*1z%}O8qKKI_(T!i3J?egUqRpbr=c^trp;sKC>}{zc z=1rTBiX`ysO)VO}Q6U*OK>5wBirb<$?VmR_bi zqfH=K0);ci_H3GxdCSCtR|bf=fLB%#nf;f8)cNZCkZIkZSk->}0zh;iH`tYu_d*qH zz1^JQh0Mt6L^*vlXZv|`Z~}25;{?Pif20Q-i@HA%O^hH!Jq6jTG7>!j#JQyKiDeQP zn7%1BA=CiH3Sbrr`{Xv76)wHp`RG5LzyEX<`2R=9p>10j6~Ne1F)@6*Tw2WV@LlS! zjvVf5b-K_Y^SHsqQZkD+P(I{3sn4az<2t;1K6>~WS@zjDNUB%9cy>2~_3OAF$Q<>+ zlD6E};5#(tk#2t#8apImU0~}yDc{;gG?(CNOjO2o@BR$ajzKA%kCVrwJMon%s^2;W zt3g$Ar~V%0U(wx#lX(`qKloy+z!*7t_)0$L6*=cIIb(1vQ=x_Lb$%jUCMs-8>=mFBFOL3u4n(yK_N!WH$a!}o`Za&l1F zuAkOmTJzYDYu*oUXW1{FIzNS@jUkb09$}rXIR(YV(uzbYPcP(Bd`)*?LUY5{ z$YVE;Df_6ovYcs#o)Oi))*r3#9Rtts?^3oZEK_S;%jR(c>+Ow&Gd0V04m)pV9!|}* zgBt2fE4q>=7H9puLR|F-wH6m30~{7r+E(MJpj-mBLRF;t5~gi>;@C}fP_{5^I!?DA z4Y5?+1S~}-n0n82MBYytWPZ$9Nc3BLtacInIit|RNmBie6dG~}0(Xq&P$H6^_xcPB zFcHdQw<0hJ@RX{W;)7X6o1yAW(Eif*X-R$F-lg*iTX&B;B8SzNQ7ZC9a2^uUexml; z<%>kQU0$-!W?!^sE0NXo#5lLd+Ami+%VHt)nE=4xO3=5i=He1XLZGeOz%r`y4TeXI zQ={t|c=X<>%VR`|6E(VK4e&4bnk&W)zgT~SuWEV{zcypu7qNj!g$1{w_CA}`%AcI8 zeXknu*>NRumGtFo*-myVTVcSw+e+CeE2#PyV$XcJr@2bJcW666{YE_-nT*fkFJ)~Y z+J3*Rv3IkxpjYbDh0t$odNI4#@3EQg*;R*pqFmTw0&5hTfc5s>cX~{xck-{tw;Li3 zSX*Y|>&6WkPVW@unx;C{pZD0(2NlQjHA=Xz9dWBh+YX?QjU#JIF0ppr@#int&QAer zL4ZHdcIh>25^4vOB$M{NntXa{%alVu8;lO<|8lTwX3t?arzAw^#jft6OF-I|uJISD z#HqILCT^d;1Z`a!Uqn~E)z|yeYJx9(qcBSO^$i!vYQ5WKD1Cq81p*Y zzLNQkjn?^%P3bNh;>FEbfI53AYaZa@j;W}3)zZ<5yXQhz#2*5qDIxXdmYDrl>oZ?* zt;nc@!e^VDzp>qm**{!8H%o720*p0211mw@$}uIPZohyV&B-H zfRnVtAM5oXIf-Ic+eqM`0tt1gk8ba!dTvyHoEUC_LG}xX7uiQJ=Z0{%3pTL#oq>>E ze(s}1s?tXtK~uA#dRuYasfUM8V_)myM>xBcT0$6nmM64YQqYj}PkZKz9#@XA!G#3Vh@RMI`|#iTI66I4Hv%qFBF2$WQCkbqZXu{6}Ho9rHVJDH>KLd|OO)hf6s(2_? zDqW_9m(o$_dGGU_Rt`z1O0$L^+-?a`zo+XS7JtCfH5MNeuZUhivShte4Wv4@RAy!_ z;z*oJ7UVsiEUM90fX~lflikc#DD-&a5?5*?FbJF~fdcw{Uz1bmUosO}`N^zXR5{62 z^Tk3x26eGDs89UPiC)hhm)YQ=+AlpA=w&V4Sxz%)4tS4J)gbNuzEWGwGw$DxAW_tG zRPDMN7J%{5TO0h|JuKgW?Ic}j5w{9`UedGfqg}QnA7!Cb-0l)|>>;T_k7d=>BQ5cIKTqx^fDG`MZO-z1`?%#d^ z^4}O?W_jn;=uFl$nFD*hRs0Z~Svk92*JAMn66AzSl-n+z!fSY}^$rVFqNx%@#RoJC znlyA;2L$52*_Mjc0?=_%j~|^nO=iF)5Z4KXGTD+`=9YZ1JRd)4EAJ@5O+e+B@;~-- zgywEb)I>VGFZEuPluoLTYMT+9?B+$~ae(9&vtdRNJEFrec`prsbvi)JuMrIZ>%J^~ zVM|YS?dAM>nG1Y=VmpBl`82PK`y@z5=Za{}&#<<$L9+liaO;u%5i~Q(B)EgTeK~V* z(n(_vI)?-ldf!%$+BaRX*`HMkmGTVhOCa8N|;7Bjvzf6XQOrP*6cpGvsmaqgT~@XzBG2`%5)5>3~>Wbx@2^i?hOh(T1j z`(?R8RDGI0yfnXV-BDx*5)_9%$SG^*d3WBAk@aZ0W?o_Ab>C=KpN)_~u-qH-1D5)@ zT$Df?&fae!j{SudeajW?nWe6Nr{eJ%FHu9s+8Rp^$Hlh9_|B%*-sZk$I+#z~{_I`2 zBec@?OGxO4gpk*Zw#Jt(3K4heZLA8Rx}8(@@DMe&jsHER#M7@lkoxXJ%**nIGw#c1 zs07!|OW04&eMRDyvE|dX6Vpj1NS~Sv6y@whA6)6v{Vd+`&eJH*@7L2Mbyr6v4n>k` ztdaXlR(#2%@#Z-JmS~P6DQ>69imt}KJjrL@^a29-j#HqB%#=opbsNVwmrt3^yo#iu zaPlVRu^jW6`q=d7oEr#J+VUZLEel2MC4n&|m3Jei9<2^%pR;lQ7dYh^5Fvs{dSXP> z>9ScwaFYmRn2&@r=|qKC>cl5unGt=KOnUXq;pfsVf`ay$Tk-`c2O!G&#ePtbcTg=ItZ`oX4sWRMM zTWM6=UJ=*w>u~_g4f2P+u~}3Ljg)8At!z3sH7RY`*BAY5top%MaH?tV1Deou_r~6Q zwY5lo?ylFwM@FS<^IooDdEKs9i)?ez^Tbm+x`f2(VZn^dXiDO!;srulZVX>SIlL`6 zw~ov+bhz&akM?8DfI+<7xguoR=hlPa)!i}X6D3ytnil7DRfe{c0?@zbGq$xZmup2}&1R4?zeOB8k48OQCO*wivMniq*9x(hHbt%Ye z_Fr^1n|)i()XZOMyJuuGet5^GQ*)-oqvlQVt=LA4KgrY81``$wot>~eWkD1LeNlBJ zm782Cs3YEO>GJ1LYP#|(L!T5f8CuhrDK-Ku%{o!}F|4$lkm*7xK(I41dUWsL`{R1$ ziI!iH;i!tIL~rw%SH2;^g>J0h+X{E6M%~7m^~+_Id;k7Xth+8lXK~>sZgz~(4pa&w5$%ZH(cc10>0G+?x*<3CchU~w*(k? zjmS%TQiSC47Y5*}MBXn$Sr?nT>)8r+TKrJE0g_uWBgWcGq4V!CL}BeI7scSGo3~zT z@72zz^?$hsEU{gCbuD(aqS1OSxXPf4Yj02`Qas}g* zL0drHBiS(Xp&}Psp&<+qZOy6qOIefc}Tkza-Ef7eT^HOB8b-PyU z0lmdDw+Aow)3W0dCv_CJ(q7q|YY*YpG|l$3A$4As$t13GRZPAV6Zif=8x({4Hf*&r{iM$hnl zQuRn0{{BbCx~!pZ#bTsb8fOLq`2mGRGwb z@K=Qi^L{ft1)JBu0mUVfVG3mk)$Baikl~^g)0Qv441xp=h z#l={jR3)gN)JWx9wj#aBd&ZtV+Qf8h6FKlEQ(3+OJL*`JE3KfT*K!%8jRcR{IYgg%mg(<^USCzqJ z>F7wYpLnjXGPFPsaM4k5g%1Dz>QRF?*E>^*n!VHMFKNoW=muzBVhv)-i4<$9AdhMB z9Up)+6ZZenIE*wm8CjzIu=H=6+BdOoL*p1nkfpvt8L`G1Dn z?HU@VSRL~^BG8!VqF-3;Rbq>rw_7){%+Tix#yQ#usI`*lq%o>RroZIJzYoMe^yZYZ z-k)Gxhkjrt`%k=~f1peMZ-b5OJue`AMISb~r0AKKh!3lD-Ft>`>EOB0C>zGjmpW3q zS3LK6C2&z34wS5DZgwiJ?kSBP>A#%4^8=mwM-y;OiBvy3%V>IsH7mJMzt8iu#8F+T zv+|PAAGjRAuBEpoK-u+UIdQTiPAbxgBAIgKq=}MXbf@PSATT+ZspWFvnSiZ5Lfi#` zyZEHlU+j4=EBDBz|(q%WJ`qb~-CF)nJ8 z%G5e%vKpJ-yk--emxwRBAa>dO7*B;!!p{zHj$#3j;OnTcIwggGF1_3;z#u?;Y1vw(tGpjEs&BItUR3!5ONACP+Z4dL;B>NJ1bG80mx} zh9=d~8BoCBfQDW)v=kC*0tsyfr8j|u5}F7B0tBQB>dbG?x%YRUbI-l!oacG&?~mVK zteyR0?X^?(TI>7$eBYlRL3q`Y;t~6XL5+C3p7~Hh4R;hlR4Am`Lc}G2YVtYbEBuFl zLKh@u?@yFo^M~xQ)IuDGM{Y6AoagYMEw#Ph$tbZ7&S&dg6_`lWq z;$P{~l56cVk}?wg)}Q|Z-{gmHhkBXcRgCJ1t>*RuPTr6&L;uxfzRQ|#)7g8@ncns?Od+(hr(=pAl3jLr-Jeo-lTsrA4w%6z>8 zb-v>+;S1Zp>EvHGMWeU`U~N4%dkx(zGkx!V+ot};;Hz`{OTSkCD_zOfVlj^vrHg1Q z{85_o{LK&?P`ask`6&bLd8XhX@zI%2sR+XC{HeEx{5iq@}RcPjo#8^8Wf zM)p6@BK(tH|L)WmtWsK3s;7IrmdDIT?r;pnq2AP9%!s-6*YY~8n*3+RsptPywC7j5 zAOEh~zdJOGdff==4q%xT@M;qg3#rLlQd)Q&5pCA7JVlVM;L;TXwuuGYmn?vt=BSh7o$nNnm z4f7xMQPk#kBSt*~#HIIrqKV-PL4+s!X8%HTB=S$brT_f>_%CO51(yOYIVA|JM6b!A z9Q41^^*dt`SKcV=)?iE7hKJGv0x|E#vx;oS_FHzt3n|ZpgU^1l7d9&F0WRz(*UA+K zzf=F=KX*AXKib8Vl%2-I2QbS;m2-lC!)RN@zweQWVmsMly9+wh`CWMM{VFii@e}u^aas zg8PDkAJ4vr zN9_JS_d}{Z`=NPtU}ZQ5zV2zHZE}$%VIq|grl&dmLXZdQ2#n~MZ=*eWlpFaw>wF%uqm7@evs1%1N^^o8TU)A^7@yx3bq;p)GwU5=2TKRO=O$a*w12B&4#- z@>@8S7CXK4;GrC)LykAonvUxgl(#ZG|o^NgLR)+8FD=CnYh&r zW{6`L6;OAVo9w@jCOqxyvzF7(ZPjC=H_4z8@gOE;l^J!|GiGDQe#tj>c0vIdPQb3U0YI|0H zz854ki|Wh&mH7oef)ho}q~+NNB1dkTNCsu{L#cT8p70o-Ctl+4Fs{Ki!d7J{?bvGb zwaEH*SBPmblhae}psP!^Gcko<+O?ZRCJ&lklY8kK&d`UxB-gnK921q01>I60-$0a% zv#<-lm`Z5-nCxw4pPzQe>OKWy#e+cNc`>dLH7ERTDh63-C}2)b?L#iJ+{*3_zBEy$ zS3B4B7uL+*i>n&_?F`)ZL%x8*MRP3_gG1KnFx1!Gn75)8-%8&{O+;LwC@{Dc&qlzC zJ6mQSN}rnObP_d9J|B0e8BJN=w4Z!c@C|;39;{2ZoMiPu6J*wW`aKAV%gs{Xxj(qc zZ7zNz;Prl^tw1@C%hflue4Ak&6{XP3G$21Rx|EFtfcu>{XNp@!rz%H-wLQrhnMAg3 zC6#J^duIpl@fFL!V0{m++FN+*WS7Czm>rDF&{&x$*FWW%ajF z6wvwUsN4_RgQ|8XN%X9gSO8vnRNOrng5i}nQr-i)$T07a7G{)Uu`p!Q zNN)54?_;LI(~h&3{FoErl#Y7J>)e-7)pGbGWi`U(X3zjYc<8)jj}9qhEn{O7MO~K{ zDocBPADviM>X3c1g`q=j@4Y>F32b=s7IKCLksLEG*3x4?f}s==Y!>c(_PQUl(6(bA zt?}!BeoFjzNB_4|ywv}l&P(Lke>aGK_!O z(YUGuNcIrx_i64K8*UDB_hR+D{+jdx*luXrDwEHs}gkNA8XNS}SLvW<9a=tJ6A|Dlya)Kp1_ ztB&xV0I)SLc2X@Y;4qh(R2;#pjdgN%%1WHRz}nNlEq<`tn-Q)MQCQvJY~)`xSrm!3 zx$xWy;&#WWSc;dYi9`&V*LVKf5a~TyUu$Pf!z%k1BikYt!qVPcQl;OYK3%GceP-I8M@GW;nnw)jL~J00XGosIvNqq(bOzhXv&6en$OQX<#aL!w zu*tm_EEH^Nd|89iyy=bKH2g-MvJP;P?NjI8lDU#gkR5>O!jA0Xoy)Q3HO9^wd14`Q zO$IEAipMY950Yua6JjAOj)b5KyigB+bp{J6O;CuW$V{CA*T0x?2d-fHQz8iEhHVCj%0yy}3(o3oVcNr8>HlR%sj>5sYZA zpjBk+xEZuonyt}jxDnwqC~YdCo4@!A=`R-trYdS`cIZ(TtD)|nNs_E;jz0y#Sue% zgazPB>+KHite%#*ikNJDRc7lr1)jjD^dbifqqAqrHO@HCSF@9pn6ICh!?(@L+AZ`ujS1q{sk7?OoL1W#1Dc^W z79=vs@kEMr(p|4y5RTQDo04|tn&hC_ham=LC9vKXO;k&yWTY@3u6`qhntY~>m|N=| znhwjyA(8ejZIzm}2}A7r@a_xsu|W|M=6NnU218w(RUk+yn-3ciXYZ- z8eFAgIfXALC1Q@IA=);@L7*;DHgX^#?uQc=fxDwW*9IVy{FR;!?+g!@oWfFGKVGB4 z3~Lw4HC=ci2)DP@k?(g|HnI?tfxa%mB4H9OkWuOC*&1|r+1mbIbq8>xpbT1kl~_KK z4s`|jbksLNm3N*NozM*=jE6Nv%ilJTH9sh;ILu718>5DU(#!P}@q5`V;4u}wPm(9J z0>;4T$@R1e*H77f-x>-3i z!>(*a$;WO!O}Mh5!M^{O0KCSP(ps0d?Ne$-mcf=U5sbt8)Gu6cD zd}>WTYbpKYX`fuV0*1y@`(e*gZDoEK%STGAhd$$9q#|;2Ml;HKV0c+Q)6mEc;;q-R z4dF41Qq|;9Uf%vGT*SbYLAl|b&a3g_&WAKVo{=}}w@`v-IW#v{Q${h&GuDmfyX1AU zJDQGK`t406Tg!eW;AQ4!KvJ2dL=X+>iT>y7n$!%tJpmDD5N)4Zl$r$50frc1@f!7kRSzSorc zd@Y>qaNBjUu1@Fb`nQ}n6^IKey$aFA&gz)B5j$I2^lYb{#we3OUnPv0GYgNFoe&fi|84 z9ek7`n|v5Dq*xzt@uqakdPk=l%cGt_b0TV8`-)Po9&vd}w;)OAU0yqeep@(e+fj4e z8~V`^aybvfHBi<`eRG0a_8aPSKyoB&B`_Yt&1_HHL2#Omu|Wy2m3#{@(Wq0-{oTo0 zv~~FJOFj8!vl}VsCA(fe-F@bMH&*4>`OR5Fs<+PM50h=>k)i#~msvd#9v9EF-XCF{ z4J=K)C7`I-DAg!WF1^IV!#UQZJT%?odHbt$z%yg(MZj|IifagSbTnpSE~$D9ME6pR zk#CXeNM&8&iW?qy`-9o&&q(^Nl8J2y(*bq#8`4ZZFDcw9kBe)E ztc-{u(uoS*k&7lW`NR+`aUR^1l?BlO_I=(%T;@qTC5W!hu#Lkv)?7o(H5me{IW9u6YJ=byKSe+@+)^uA8_Np0cECg22mXSdp6Mu4? z4JYuVW1{7D8)5DCyKw=MX^2>7O7a?G+`dq4yWqqK_GOLh?RwXM96PI1_wHH@v5?3^ zBr-ucaV05NHU4%QXJxYao72HyvxeAxm9Et%;L4}=FPwCNLO0CIiu+;HH;U#%F-4Wv zU_;iKi6aarHM5banGAQ$h*LCrSb~lB{S}#yqh;czrlNKKWqSRuFDPh@wl@RJ2D0+o zeV$7|Lt|9XOnbE*lMwY{*hK;@q%dy?AeX>5GLm|pb?aBPro(IbRwXc8j z%KxNBH8xfL1KZSn)WGDMLUvdFrC4_Sz^ z1>e4Gxq2B1=X4u=UufWQG~WQ-S)8dqCly#d6iRCF5WVTo$2@bTc(Ji7IhwnSuf^6W zgdy9CNrl{skAYPj%m-T>!r?CW`j>gw+|L#;ULx_8Un5Nv;OkjY~}T_(?Uan%q3*4R`_WOU3y1M?^HX>L^$HnD@P& zW=LScjB#Z;ftS0)<89U!P>8ZX9+GLugMXJfYMhslwF1PrHVQoI| zO+tCHne$%C&)$gS9vrM+NiaFYfEy7&;q?fZ}? zZ_s`6LZwrs5~4RhdF1K<{1@4uFA&eJDvs^4Z$NBUuh#`ND8&jq(Mj6w70x`;V-C{o zqcyipKjV9avdSzj6T6BD9>^swd6VK0yWGdWbXNEEg=As9vLMLLPuG`U8>1hl1UtaW zb6#)M?etR;T-VlcbC+A94M6728SP4CWn%Gym2%`O&@f(g-r21)uH0;~y7BJZVA0d` zi2Bc;D=+@|_S3ruZ$u2n0#G0Ex$JgnH>`%9VX|Uyd{6FC!X{Wg=#DM|3tlReT3M8p zys-H2Gtz(fFbQxzD&sJD))BeM<9_F#n$x`q&W}@48l&f=$T#o%OTY!$8+Z58o?(xA zVHFuI>CF=}YG26fUm_PYrcZ5dUSHpGg5QLTsx!W=A%^4HB?=!Oh&N2p?jSqhR|U^? zLwG4kgC0Rd(z|XKT+Z#XhiBJgt8&5!SGsBT*p~AKj5dSa(*Zm^zQaA1<11XfmCH9l zPWNMpuiKsKHQep_jRyNa6pdKcr*R>{qs2mey`mluvXWjOasV^dp?Ac^i7=cQpi{)) z-#Er@w??~f>WeofjQ53f=Ehp}t^tn&!jm2Q4+_%P7!Ld?Tz*Xb8tvx+^mMShG{ z4afYW>XUW1%D-$+wEy1d=%0&T%_WX8wq6-PyRxo+&YX;HjKtGlyll1!DE6_{%(TAeEbxhGPa+x|m@~8q zZ9cgCH>Z9*72wUO=S_=|Wi;9nv+_(Lpe#*fL(;|D@H4Gq2wVB3b8Qrr9FJ5t8vPHE zAAHWT5{j*R#!2gvY2|q7RCzT=%c)UijzvPb+AoHL`de?NL$Zv#ZPX)}0`X_B)FK;C z*)oLNfzJTOnDU#8^d_XsKzmR2Br6eA0w&-09SmBh9(DV9Rj>xWk_jIn4g4axE>*=_@{ zmz-?e$p@&Uk79bQ7!^_GrSxF;40QGsesB^9LMSBsHG%lz8=cqPb8$M#izbvY<-5-j zZNEseW$a;sT)e(R$b>%$$3`a@r?QYR6b}_*pFCu9=}Ys8pwik&RUzlw_W`Qe>Fsj- z$kB2@yiNIpcv>;+WRq4>%z+@>Y#1_UuByCi)`Nh0@gyO23vfjx8;d-k4rgM8R$sj& z5yR+po9fyKy=dtQ&*F5L_=V)5^`$4<``<5i7uWq>z~s8poVw;pC`gz2umUY9)kF(5 zE)p(xz7w+T_^bJhmZYAH9dtl&>-kRja#|)5+lU1dRTQDfMN1l`e|R`}2EF3rQd*v2(Ur1m zX4gf>Q8c9sK-xb^Vse*1RQ#Ue^Dv90o(!&@%;y7PJ_C0u3n`8?{6G#7LAes^cA^fA zR)d!(36iR!#@$zj&Pzj_@so|%kD7?ZLZ!!vT>a62f-4@;8OH{CEI*`5$R^@^Bq|>{ z+Gl!`N|M4uI0SrxaZS#bS4BkBGVN?YTL%$<_%&9bF|4RXL&ctP;0{%558g>xsw{Hs z5CQdHBhNQYX=YB4r>*WX2ippBHY~slxOzIZR|6Mj@3Q^(8Gyf?=uhlr?xyNjI^PfW z+qn-2)_(n5DAn~ls%NjM_v`b4Ev}U|u2bNv(;XCR!>70VC$4C-T84pd)MAdqy?>bR z{z|{AL;emrjVIV@U-lveeWt|EZ0u0M*E?s3TF^JGieD7n2#ExY2&+SH7~= z$6Q3Y%-$|d$x$>becQFv+_#D1lO9tSayoM}zWdt||L?VroKIlUvK*$?$)|zUPcOV} z8d&|PF_%t0nLpo_D%KCEkS8LX?*S@gFd$$9RLHXRS8V~Q@wfjV=y;iZFGoQkt|D-D z3r#me#dW{?>WACj)jerWCY3xsR_Hbdr^GE~W64V>J=J&VeG3a-L&#QB!y{G4tuYQk zR89o*QW=z5?0x5%W(aq-QS6Q}_1PWWPW6{RS*`eR^$vULir1TYY^+@q=3_%3exl9G z)WiAgcPJSODE4OL^Y#1669T#2_TNDY6SZ=mmactqcNeiaFz%siN0{Punuzyqe|P>j zK2kb`LV8>X_DZ^k^?G+xP09BzE6qYVA^gkBf8K$beE5@~rvn{;JKOl`&zowz(=a3! z|GiCAWqC?&nMF^xJRTK@%rGdt*8au7H{WtbIK&ExPB0|2Nx!(N^~lWkz3INb=mqSy zy>+w1(wA4k!ztu?eO$A&F%}YJ5XQ>zU%A{$O!XN@`%()j2+ZC(2Jba$mNDvI5tle$Fk-s+ zZ99o7E(%Ix;3xLhE8-hFBLEPO4v1&43th9@DFm1$&6RCMvzpS?KINuKA*1pm_la|x; zhe(zTD?Z`<^cCmUNN$uMu9*_)_4&B#?gMYp;P42=qDa*@-#ZDj@%N+; z)H2&J>r{emQ&TfbDy#9u*{|h;hS>ULe?H_@a#h=f;kik@6aAy@H}8(@8ph-hv`gQC z%S|@h_+f?9cz*j{mfSGl8Uio9Nv!KPaYNo65B~g4g|XSUEWK0>2xTp_blbLVrK9Bf z>+n2*Qb$c+H`pg#F-yX7)OukvOuTDMs}Nq4cD)p>8$Uch(UQbQPk*VU7pY-q9N{u& zccE!&%&W|m0ZX`0`<(59p;ANM_eU^YHS|6ZaaKp+QORZLcM~lX&`+>o9-#O8rL-qO zL4oHuf#_^=;%#x`CqKgK3Lfs3M`*sl4>|Vwv)q&#EF3c^)}39CDebbT?{IWwJC;uJ%V(bQx{J>64P$K)9v5|*_f&>j84QmnE3Fk@`cfT6`*{&xy|{C_ z;kV!n!a-Dh^I*?yz|b z!OgRp+sVV&eReeEYfuc$5K)ZRL3AA7xQg6YCt@(|glG3h3D1-k%=*4~ky4ndwZy7h z$4B87@(9KUa{l4LJirNpZ}Z`Tk*MSwr`w|BwOI)xkSqBf6gBbL{3q4xd-l3$-; zkWlauE*$CnmRMP8ZbHxn>4akB>3Xb;U^guU0@0~+ZOx(hejv8>Jomn{a}d)gq434J zUY7^RbT?tF?*OGX#*3kRh<;#JyDZH<*o9(v8HK%|!}PkDr!VtT(kb$bC^`q*42lls z_lFVO3Tcv*`< zm=c1yT=ozfz;}iTV`1Xx1q-he1(&ho$RLvntW7Yw_O{_B>hmvE;Uk6#%n?civaS5luPiQ9!*t9il97i}o$F(Y7%07}4eXCh=Z@B8 z<`ATecCx@uY*gq1-kodSMalu9DTom@w8qpOzL=3dT5TJ z73hXnJ#RH|H9(ciKz;3qCcE#O41<-$b@*+6u*!-)>-=z;SMr4A(L4qP@)6|2O$yM{<3gk62eWsnUs=eu#H?ueR7~- zXg-RGg%3^7-x``(RUM5k31;gtPdGd3?iD2B`0JezH=LQ7!?zU>MS64bE_2LPWRKn)qErXtw z-j?h)?DcbjU`lDNn-NV6>H6WRmtq(?nU&yJ3nu69I-D}IvqOyLL6t)C%f_Bzr6ceg znKgw`H(iCwJLwckUMRgoqBpnlN=ba$TT1;(Z+s6m)h?j|{d}%I@Z|w`rWtvG;Ltr^veY1|P)HWE*(Eq|WHyrd#+uL*|%y z=uC$V?Po{OoJvRP!P3~26(>hdO$}jKI61V2z}*hT+cg{b;4CI%bx6hZRwXE3?NWrO zjt;;5Ckew~Iy3_U<-MNTMTAJn8jDa@3bf!><7>P}Bl`h#g5+I0*NzI*D?^)VxpvQw zmpB%hQ}W>326<@qM|C#yw{oo07Bib>OCvq(>ttgeQH17=!2T+^-oW8nj-z>bZM-XP z_+-htbQwPQvf22Ha9vj)!ud2AZ={<7ns%pqv0<@9xMVg}POtM&-?#MN*jquA#AUQq zCY2e71?J^5^nGCPqHB3aj=ZFnE0@JrUZ+?DEJgh}+ilYqi9!V=v{ zWl2A!wVdjqL@)U%OdRWMqf>WJXjy8uo>ahVsbQ{~H z8CqMOHN2;tPYFSW5yH5rLv3nVIO=7izHfR3IoQVO#AC%Ayi;+WS|Ro4!DpNAzc|$$ z&@d8t+RCQP+nF9VDP!zlal2RX@zY4zxBRIE=%7+(xz=^%p$q-(b88 z)@Xa7iys54r^YUk6F1GavWJo};cJKSmp=ZyN4x1~_A=KQ&PznIl9yArvKJNNYVxYn z$~cENVYBqekZV$zMidumZ$s`-9bdrpc~17$@MSeFK%tOyL$ zh5}_pGL7fmfZ8F%K|C1%nC%;~Pel9)FmnlbH~uHU49sYR0{}As=jy-&Q`n!c1yL)@ z(J>Qb`Anil%D{y*PqjQobQuVFx#`WZ0Pb=q0m86#d8W77G2-| zbI&F?drGAUqAOb&9t*odsVF+}A#3aE$#ZJSblu4jZH#A;@yL&zHqW>1NGyl@H+vb) zMYv)Gc2CPmP$6xZx{gRax%0qBG??9zY5SfQwIm}!yuv>X6}q#FV-H0D)|Ta4@lLQG7s`wNcr zZNluG!gJ|O()_5NjAIZvS|m)TsbKKcui!P7)2&EV6JwABlHCYvNh97Xoe$>sA732 zPM)Zqh7Eake$zd{VEO1*&ue&D%z``m#pHX>sTS&|=Jd5qEeY<62WkW)5>B@i9IsHmo+F`V z2wP&gLy-|W&>k*G0aKh+GJQEtHctF!=PgWdr0ni()CIbeeBY*mzx4 zg`HE0@Kq>y_y*A%``~2Odh(+&4AQgnbAoFDp#E!K~BX-7Srm z>stVM#jucrk|SD&R!>Rz4#;1ND#T^0z1a&iVSoeFb16$fwOo^d_@nB0f!rEjJ~o#s z9rpeUMkL{fwSB9J-FA~<$~MFr!em>WTXJISgCR;d{)K5udlVy|5~nTbG##9TfG$Xf zm>5nbGNB}58qq-dGleptEiJ_IDMeN>P5#)xM2(zF>!9b5y`X*R4wh0n3k&J`yxceh zlQ8&0WGw(X(dgCuX10J7QZpl%;b$fuya%919gdc0RWnka0W&y4=QYM@W3 zJ|rIChd|PdK__8$x5kQn%Aes_bk7gq5X1rDdilus#%1iJXxpoa?YjLaauBkn)Ad9R z8l@@s)XOc8ARZ;Z?M94PuncI11UE+l6YM%n!+Mhbt(BEb;exlLx55%u^%cEtc~Sy2UPhPHWhuj=8fk( z8I3TRr-CmS6H${z6>2`OC`ufpZ5RO%0N~9T3Es#~mUT#i`QA78zW+u`@(9#}m@d0b<)SZSV^aL){oWqsPJR$zjcD15xz z!L!5rqk)Cbs<+HnuE%(0RyBC{TkJA@)%3ZeWdKCc>fhwW;b`c6V9Y z0mb3jeTa|sHfo#Q(s<;ZW>jWqc*==$!Au_dgs<>n=GmJvak(5bfCqZ~so9DOWfNjL zt0{Ig_%KSX^~L6jaC+ihpwXyt@k4y;XHmx<5xTz306CdqvKLgz$RYa1&%6%qe08jS zh=bGgVa?_@8@Pkd?`kh6{&EBz^0i-O}AH;F_-tKzO`_Bt{cTZ zRg*suy^b7w4b+cTyotGyye0_LDs+0N^ogeUeboectBU8ZfLx&T_$nv-toS1+d-rJQ z-h-8Vr#)>GkU(y0jEH;pA2q?XB1hvQ^TMU1+&o{FrLoqWr*CzkGYlxCMkIjz>XDzH z`nM9EW3T=)DTcsoGDs-_?|^@M+Q6B7n|3wiiersoXXzCM zax6?GjHT$Gc3|D~xk#d)+3sIc{8v9AK_O#|}5)<|mht(aD175naJe z4>iyB7QaD=G{r|&M-A2(9fs4=ikXwt;Ba>$aDyO-n?y^&Cxbl6$Nc0!M5=Kr-5$6+ zzw?h^gC#B!GSZ0j?RsnMy|Q$_B#j!d@YNI^ z$>;x~q5K@VLjXnS{(>Coh1=9WJ*gO2bZVhD3L>S^qYO8(#*aC9fEf|?xx>su)sc_0XOXK^$b8-h~1+Yf-MiG<+qNgg*SMmDYP{-YUab4OERp?*oW6I31ynW{;1 z+7fVej(H!`eC8S$7@uTcWX?wx;GKpb~|o!P!?QxZzX_8 zN?iU>>S>Z@pN1`iCH#2hVRh-QEOd`g3h;e+buy^IFazR?+bf3QSeuJZA@OC%H_MIF zyO#tD8I#c&Q`Pcx5I$Xou(ZXb$NeaHv51VBbQ%W>xxAF_kg{qwu#3Z9N|q@x-JQzt z_Q=>sm{nRD5<3)P5?EL&{*m>hzfU|_0lkuYFg5BUMCs{4fX#i|N1N`;Ec+kP0Siwf>ZsSvfgVgQ^BIDe>YOlq1|!m2vdJN=5`ScdL4oEQ1ll8gSIpFYpP~YtBQ{ zQan6NcBIgbjSr-%n?9j)GO2bZ%sH5tVau*}Wq1@oSV8ATk4O>)T^scex?LevO^=dW{Q_CC?HSYl7ETjbMXq%2y|B<--t@(4B55?p$e99s56aNK^89Q;v?XbTf@4Zv6oWQLioGHV;0(Xy{sIS(#i5=FxDb@ z>8Bj`#_JWV$OsE07RsCnL4AeRvl42kukZs8KFmo|Qa2pr%xda5{Qm&PP=otMh;&G!lK4MJrFoMmb{D!qT z(ZS1tbvo`=c!OCp6moSu!NM!!)ASkh1Nyq|A!X3JYLdKuGV%pFH(HV4h1G&s7)-tI zCs9Yh4G!NL9yj=0iCj_D%!-HLUI1%=0RQ2docI0mBM5EOc{gC(QpcNdlxE|IkJ7Z3 zsT~asAD(qIneQTNr(?NqzdLW$n#{VK?QYjrsr8-V^-H`F=cyh3NBc>Q%r3eFroc%2 z=`+vb>`@_RT3qK-rP4y`L$FCe#UvT5o6odCWN{N2w;Vt>htsRT6t$w~GnCYy+f*P( zLE7z+2ZO3cH!I3Vq%)b@LCKoAG0+s^39Ac>wv8~LB&A*hOR035lK=;~RI-Q3d+Z)6 zvXDL&I?1*PhUXvp0wIF+*`#K2uMx`RZlTM`af5kALRk|wknvS!-Q*In&gwUEA@gOQ z3%|qZ>K;*u5DxQ>tLNc&DWo*RWQvkgHTQ~-hk9ijor7=>96X+!5i=CZ?Qg8f_=v4HO}ockqcI1T zboAv_YW4uJ?{-_g=>_&%_2&%4E+?HcJP3js)21&}o5*#R=Ynwvy;D)e_dkSTX2Zj= zc;@@OFlIZzl98ECb}qEDlFGQ}6F2bqKc<%_j9HDbQrbG`LdZN)N!FrjaV;l%YBoUP zYEPJLdDAlmjYy*iLSSs-wPUSFD;JoMHMy9e51^YmLi3+}z5d@901nd&u~-A&qzp96 zb?dw2ehZVZG&;g;+aoe&2Z)UZT$BjAOY!F7JssM_$DS zI@hXmU9UbCxGk1(+7!^#q+Q?xR_nih$UA2uUoYypUz+&cOeNAR1UYWDoIjR3ulDnx znf~?vxU2u8#+U9yqBJ%Dt3m*LbwFE|`wD=sj*lCq3Gmffcz4($;Bkc>E4wF#C-@=D z%EcL=3}1i&t1zi>01mUzzXPwr(1zTDxTCCTh3_lMj_%Tf@#CQ zS-c$bMQi;!cQYTStSXdW^Ei!gzT;k9dh6ALH)5b6=79-WQ)(hA%;OMCCT;xW4J_OC zrp3KOa6Jc71Azs$DMdPL=xqDxng~X8Kh}5-Qw~dl zS2uk+_`YF$yie%mTG(I<;@4wILstz`lDmrLooK61Pp7LHd@?;A9~3j7h~er<7bZxK z+yIDec5eZm5W3mBeaWZBy70tgNdRg;j5bU1+?$2|WXU0d{r z$kR|@U*GIYzoS}LaHy{02DeOUaev+OlGl@@^m^~(n(1yWr`a@9`xZb50D=mK%09G= zqECyt&ZxybUjFA!QbfnIg5@rTNzMSW4Wd@u^QpVxl#{P7tEe04qGgZ%eS1#1NA#~J$_7T_;4f~V7Rq*lXnVQb``}Q?B1o0DmUWvd5G2b zkP3F8E3=Bj)Q4+7sio=YAaIoKc25?r@Z7IwL&MHNm@g^AhEI`-ddtEce1XPLQ2MaR z5DQIDN5U>QxLYRgp8aTEQtt$X+QD!yeP*!!{ETE3tmT*iPy!8#Qh|ICC)v|%5y^;5 z4y+k1mCWpYrLz7sMW5+kB!T8zP*`XZiJE9;-?APBgnf+FVB<(LxA2~DlOK=aY155g zH=Y|XDE#%`o(W&Ry`=k@?1+Rz;in3l*{aSN+T&%mKn|$ErS?t#p3P7D1~G>L_bacw z8#d&bu566;xB@lH-yL!RYqCF!mK*-IX!(bwS?`H>g<7snId0Am-%w!?JnG{`$L3cy7)P+QlvOOnA+~C@kR$fy;=zpPMx#?5SprWU%CmCA*M_7u~fHgH*lCk!ExHP;tIB>O9|tKaYzAaY!h6Pb*VtR zxi_sv^yectgvu#&X0ZiLLw%$~Iwix!=@J*PscY1XG=vx3v0n(EA|a1F?%N>(qc~dd0kCB}LsK^Yl&$=?-nFOQXcw!20J$1D_T?iWuQb zPNKiMM@yM^BGkce4Xe|#!r);#sYw*ALWzvZO#gkE__+c+HhcZ~nPC)n#V6e}ox;I2 z4~1bXoz{C4o&n#^v)5Oqjn0y*4JULr9#D1RicdoLP_Lo!&en4u9t`gK%*@dQ!rYK7 zd-nkB0zGkegIqcR5Z(AFEs2H3GBl|Hu}_yW(uCw*tP6I4_@oJ4?=-1j8Fsi~UHdTz zSO%_zBi$`i7gC3C5uwLU*5AS&1j!b2hbF1Cv6z)I1Y8^DD&Ik&J5_gcm)n5|L#1X3 zbldLMXh4kFaIVhQN^e{hXLNMb(V(_J@l|oiM!?1t3xN$b!+!kb^I4I_D_lD(UVpvg zq0W*<4c}*7JuVz)@TR-N;+@mHx~*64$4}3Ju#=JK!;O3OE6p;Y4OU9twQt589c_%1 z6Q?HSoSO=>-EB5KO!q|i1hmG!d*Amdo!)jvxD;Bwp|z^gOhrLz(mL`0yjboaiWp38 zz8J78KBNHNaWD1ZN(7SSUxx9Pc91~@pq$UBR$_tVk)B->Q!?8>_R5BCQ2fvs1^Z~^ z*6XFHd-arJH(>oTg>Y7SdnPO6iPl^)U4}2I<}GfD38(mF$??G37fi-~1{mi-IDr1@KN$z%C2r zWf4(wPUe)ZdNWQ4-`=ig%XZhaCZ%#ss1Y+fI5lSf-my5T$>j5#RLFW>HE<1&>lfM{ z$`r5#JPlX%Gj5JCj|)JG6L`km_wRSGRHSULaTQDO?#7Kf#lEDP!52dPk0$U{=Z__4 zMjS-JHxg$uAioc+d13Xlt{HQ$kgk59i%SH0?5|+!Z zQbb|Eyyc2>hu*yfvja`@)=Qo8J~~tPJ8bL)Yz>HaFr~n|ps`4?hNHPOu&?R1R0$}! z#0^t6#3Iv5=yt}!8(6w!k%@88%uZrgnw5;4R19%0k`FRmN{0_;j+0}rzr@H`#k?G8 z<{?=(TqQS0)cz2Gy|Rkl)Yme`nc2H5ZQBGe1JLJbbqvSeJ1(;t>Sy!|uDG$Bhh{XO zKyj#PJR&!zhAma1Wo}I(b*e=vpe_*LszrI<-4f33JPp~;ZIIHQ461t5*8q6pX4r$3 z@25Pv`JHY{-cPpZB8v+(2#(E_6)=V0ahUAR3Y&iV(5w5~qJN(&j<`lv)ouEw>0x}| z_9|+|olRl6Gy|GB@&06VLm1KkTz2c^_wK_p)xNu{r-c>2KmAzr;bLvu!Nr~f_p$8V zo{8Vp;yyuLKU@Ay^X#{%W*qZ=%Ir09r$0m-P5!1W`ZH+5jaKxoe(*K2mGks}H6Pp- zjU7|-`#{^?1l`$Hmd4-pEi2C?-mCofH(iIUA5+{1*=9F^K{QLWv46iKXSoDU`=?g|$CFv z+9B^i9>?!Mf*f+hmD(<%Y)q0<;%~-3xI5OwbWgY+6xa$d*+ERf$3D0DQGJW@HNUb_ z+4kQaGUt*_Z<~p1v4lx z9*HBy)|A#e{{}-6@6_$!V^_TE2fA_xpD(G0n3?U9rAQx5iaF>cwb`=D$ewY5W4de* z#WGCVV4#i8b*FE4DlP;C^q*hZ?XYfjfq(|%Y^1(g!gS(U2u#;B`BCiE!Hml6|A(~q z4r?-9*S?t<9mj%-5K)RHfJkU61f)7Dp%+6)B1wQzh)Ad=6sgWQf)oKk0@8wnmI6Ua zfB=CJDFOll2_=At5Fk{QCe8VtS?hcEUh8<*+IxTd%O5#-!jt4ajwJVU-`91X=kHV~ zlvz-`FmBfnzv%Ck8>_tdDJkBRD?QGU>ZEC+=Hl*9+1DNA$h-lOBIQSxlPs>y^~$ID zF2i@RjD=brn-4|Bv6q_Jw&)&lMn$Al0#KKO!=}Hk2Qa(4^hzMBAS!jF)abPNSF&q?m7$hjKZ)au?Boc8H{&lY zn%&}s=hw;+45s#9ee5@E{fyE2EfnsTiQ!5R>%w?*`hhJrggCCX$fx6Hv?;Ejl(Uje?92BXtl$FuqG(bUf%m zM9li;RKPVhma`N}7DdjqcT8`8er9%4G>?2%2GCN)_Zn!Vyv<{H6HVSB(NEs4} zI1f0snq#^wF3Pa6T^M=}UhB<)4G*oZs{wt3R$(^9wf$(L$|@n!lach9=^VWP(It>N zzSy1KkUSkEZP%PYosYKCOg=6+JW1q(ADP;hQk zA=U_ICL>!?!xaEu3t6054`h#&I)elSC8JD=^+#`$4cAj2)P63hv+BZm1y3zU;dXmK)N{XFE0Fim?!<5W0A-5 zwdFi4B#(EKnK{1#zz0dBdGp&X;_mmG>ZHcn7$g~?=_^NBK2q}X)Y1#P)6%)G zE88|q-tkn=@Tvyw&1+zc{LaZue%vC|7=Le2O%T;XqiypkLOaLfl7`Aurq+k)HNrOF zz@E%_u*&xQQ%vjeX0(Mw?LQox)}t>gPp_2lnZ7mlV|RxE?05SQU*d^u#AN{GHNzLr zqj;DvvwFK|c_mR9GXCUVlb3k|VGza=(%B-bNHdEwtZ~r4ybr~p4cZ*jn~iCg@4dIw zZRL4)>CWw7lL=jxzT)A;+=z`W-%n1O(;1}S^JZPR-!gZktu_m@$BX={dmDPjei1vB znJ*@$?dkb37ZU+Cy1$}xe9>fT#=ocLJ#_k^N^kw$j8)g9#nJSpBA8vUW49g{EB1Z+ zKhKE&eDrszoH2W&@vkgdpMT7f{rR8I{I8D=tS9X5q3@^?{}c;<{*t^XzqKFn6(#(( zec|}gzdiGNF6-MyZQ3W`E0J~6a=mW*{hZ4yq)Ue1|NH;HPWr)u|711uZVjn8P8#H9 zp8eXS`(mSPkmdp@>s;8AV`!Ko8h!U6hDI3vamMzIe3g286w`Tn0j-Q9iMqsVZ=SZX zIAoh)F)l09l57oxiMbLd_mk&d+~7IXalCI8-Lf8$p2ru zlKBURX!jEVAmVgA|5Kj z)~R_hZXFNgNBkPw5mZq_n}1;%J)tU{+&|FF!JF`HJu^Su;k9+g`fJVnDtFrMTWZOJ zXmG5xOcUE6LtkPb)pdRn3g!ULtjBR}LkQ&?$6KpZ^`y zxtT*~CSuw|_yO7d4?mocD}Vh~dMh?=J83z0`w`}+Zd%_8t&CaO_G9e2_gtU5oEZ&u zksNpQiQnA(>Up|t>)-_=UCWBGwR{&=AS7zSFU~gu)bm(7ef+6D)3FsOQBl?65zOQTPv}`jM~UbD5E^KkAag={NZuVciJ(iHLd;V8F(RL@X^QmWqwy+oGEi zPe{b)J{u(KfSx}}*O~6cRqKcd$K^rUC$`=OSb6NNb#%R17buC1j0jL%+nDYCbw}&; ziqzrg8`A^Q%&w(LkKXlK%hZwgFL|S$)Bb*!{O4t$C1@i=3*vMV>MJSXO`)s0bS7>t zhLl&wQ@X9$LCRXl;~SFoz~5H)V|5C-g?T@~jXohJkUgFV z4QD6GQk?t2gjnL(5m&$q$A5-db69^aOJmB+g7ab>NacJqUPOtFNyz%O5m>H78^Ch= zy&gh~Xw~f~5!#@)E0AaM=~dCy2=3UnX|3SJQp&og?*0nSTT%f%ENRJvAe5lJcbsZ; zffOI#?#ir!D6;B0!x&h_j_^}!u%tw@TOrlM4xi@i4UGFyF?DX)@!VB!2g#} zzhmV=fAz?oN;WT{8n;wAu+dkWHsb2s(4TCxv!x;FtCrTE;%Qk&_GVUWWwNV#`CkYG zSStCP#eGlCETxY){Tz4S{nS46KhAysRV>oq&-5kqOu-Gl`J5MESeuX4TF0etlbCn-{ zVK_q(vjI*GJ$uBHDpc)*<#Sw#ES~lKFUXmtb92k6k+ArL@`jA;CF?vqxddq!`|P>B ze~EtA!>t-p<-A zts$ZV`-c@ignc^etsCqk?a5V`bajNOo@mq6BdU_#Ief|zN^1nMg$H8q^c?DKKlqH~ z=9+||PshC@qCD@;ADZc-hw|aqs;1cdKAzg(PxQi})rf(*zz|KPDO>*7UG*-)0II)# zdDGBQg0oqkccD@w04vL5|A?&_KVLEstYJ%0ZW>U1UXpYXJb{$q&oXM74j2dzq0q*ty#26a$^z_y2si*QsPfF^I@2q78pZR*@Lm{QT}QgNI~G zJ1!1j&d<61b`fgQ#{|c8_SPpMJ7*Gpzwo@j@@=cW+N2U8VUvXKQ}q^%4!Nj;dL0nl zKGQW@T744sv$lut#(_GUX*?q@KQ;No*4;vyO&L2LH5p{gB-`cuaDgJl`lYXg1IN00 zk=cHTvd%~T3y!PMF+qWqkw{TzGg;UD8*=a)*sIr z9$55#Pv3f&(eFFVp|=$$b#<7X{vEN=?UJkafnPrNyddGjJR7gB`{RVVHe@3|G-K42 zvEuA{q58lG2m9T34>)}6`CN`Yw4$$RD!}?~I*IDcB5W49gveyX&bwH6MlJd#0GlRSlC6S&>rz-du z2YhwSFUc>B!RIj^QniU}mqXF3Fk5=cXGQHqQPak^Td>WJyzn%v{a{N$xT?TA#yW)S zzdJmBh1dJQ{Zy&bmB65$HEoySPB9~8%f`92{tfpqfHMT!&H9J))Wi+_WV`L@ybd1zQ_qcm{o4orzr6dA3p3H?_3`Xf!4J==P9-gwvg1tvseLdQ zOnOs91IWFQXeEH$+Z;x`_y!>NrkjC_0CI0IbZKA~z&Fq1mj1#wH&Dz>X#l>N!FNEy z0DN-_M!nZ@fv59EUST4yD1oADP6LYOG#;`E?7n~M*S~Ae{)Z#}n94O`-1XM>PZos; zlMe)%2)%7IKo}mZZKRf{vwMX8fH-($J=Sg>a1S^Rtrn6lbohH}T%8TK+D@%`P-*`F z7rv@fu-l2dB2c-)()_mx*^d%$_kK#)EBE`mt+WkwV9k3s|69#$1}cF%lDEKvdQBh^ z@2(ztI~vDcjHgX?0B)+d{v@4nC;*I04GJyAz(T}+!2cok!^ailUoTvjx?!61T2|q< zzo$%e%%|&mvfhRC0!+haO$kMsf6}&P-OZXNAoj+*Bm^w>o!hTs=d)k-JD>v5!4FgS zyQlAnmo>ML(j;2}PD!|gO>Cx9UZnwS@3k)fLT{;2bGI19d~&kFy_8X2P@;pzF9))# zueW&Dyt+Adoenv!?DK(-n@UwCw#xCRXkiVr2Vq~Pw)60;DyLJ;uJa{i)jg#I*Pn#8 zwylzk`^w43-|fZP+zmBNXJnM8^8}yP|? z_kInlB)Qf{p*psX@VIwI+k*yA+2UK_e-?cmq!bxJy~_%$~>0mnl0C^`2UIpwi! zu9_jeO9Rx$-Z|K#+74#h#=Z&Ulm?mB4GCvDi_})d_Uz5E#1_}u@oQ~$^S%<>*4u<% zlW?q=L|qk&fy#q>qdH%xrEZh2)e`WIna66+pK(inn@scxj}$I#R`7q8dtFHw7$m4G zdlVKGr!rE3$_(OCc?K*#Nl0A$I@^WsDvBfE@tXgz?|%P`f?u zO1A?o_g`bKDDMYcw<6<$5&P!xYEhJVgG1W!E3@y1Iss%-28GLvjXxV6W7uD3p0jxl zxs@1KJnM?ISR5RR2sN_mHCA(d3Q&+dS(PIvYeO?tu$|dByWdY6uNNL#6|`CN>+m|c zm2a1v-v*R%g13__`cc$}$tAIZjGN~OLXcv-TpWe_bGx{SWc7tExtQrT@VDz1opVe) z^wXHr{iq;L2-CRz$ZZA$7XKz4U!X{=7OMFW zXQrDpS1A#1qz86NWSv&ISGL@iUoC`Wmu)1j&*IuHJ$&CgcOsfCLzp}kL5DMHTw;}D zgu6^^o>K+ZGcO3IitYb6r4Z2oU%Ri z^EOS7Z;bk+q)4TiGlp8*_mrBNw*WH%Blfw&r_K5}sR4ZpYISqnB)-A4^PsDPNhcgG z5$xZz9dFsCZUe_cos66U4pmIXkKGX@ySBjL2;>s`Q4tuMFm)d6EKU$DL?o@~2U!6*r zihbgW9Qur7fP(|Tx3TtXlzUpyJ;IAckdvn>c$J9`7RSvCOjcm-e~RUaj0yTtRwdee zSNY` zOp7dHrB3T%uZwt(_h;!CyX@3fsW2VcG|9kAiZ1baW>_ZkcJ**BgIN?=)`LLn=X=Af^Dv_ZU15@KliX8LxfUw< zl&T4Yl6G;ja*y%a9ey`Y_z2eI-j;?SR9M0Xa0MmaHs0D53ikem**Bko&q)GF&>Yvc zE{o=hNqaE(j=RK_G21F;HkIvm?2=BoOiYz&w^w4C>6clG+|yy6II-8mQU`He-LTut zLUODu+33U=O=$ADwRYP>nN^HM{n7*{k!=b}CBs zUZf2@>tv->@`0}FW{{5sX)21L$P0`?SI-7~Q!;8-)9R|Dyc-+JW=lY^Qsig%j%zzx zRVM@K1&N5oL8}{Mgzb+F)Z`M`>iWKpGA^PW-JCs~;8jwBHC^aYdytc18X7OJ?j}`X z+v?!JOg^p&B~tOW?wH1%BMg9kd}Z@<8WTm%Vs5hwD z?~xycF~CAt73z zKyhb1xX{}loxqpLK!@|)?=EyY!<|L%xeTRVWhGYYcO%j z`kE#^I?L{6VpC(jf?KWlYTq^-+7DYKtp^z-9ZyVlenT6FK=e*aB2+0B`rvT$N8FCc zbtj`)td)C4i0HKJsiiQZImQg&BL69xxpo4o2**~kp-~h)A-R4}qMHU00$uE2P;W^m z(;c5&$6(a&K3~?`(s%=WpTC|qxA1<}=l~zg%X$j4{5qaSL_-Qkl_|RTh}*uScPA=M z{L@~DIRH!G@20j$V=e-vz)b$dn>6j20;TX{DAnziUj+Qn^e1{uinrbAQ(4hDq0ZMCawU)k zi1Id)EV78ZgMNpun|`@VE&HaIwynbcrPd~p9mI&eFDmjjS^WZ&1N;ZQ)?dl+jsXVH zdHRB&zn``GRjkxwxtHts6?J6;0{_oG335&&B(*;Ta6~f!k^G3w&EcX1Oj_HG4V~C* zWbtdc_tDuQj1Q>|&Sx6euLRZE3(qjid6qoqlD8V2==X6cU%w;$ZTHloRve|@u+-Y~b<-sx2GGvJ$YY-Cs+?EhthC~i}Y~!86uLPB3B&!h44i!(lnFe#}&>6Ro zh=V};CpOxHUO=OuV0*05?Fysuo-#3HBIApR$xMtTw1!)?1X;*io^AivRw-|Hkn+vo-jyl$*eGu(E&Z z0>wOBELbqEC@Z@yWh4f^&BZb_B{5VQjP)CB6FfwJB86ZaSI50S zU#^N&fPHb8{+QYS*u4Vf?M#YEc(A?D@Jc)2sF}9yXuC`TVUhvCA7apMum<((!o`2f zUJ6n5dZ~P$@lZQ7gEp`TL&5}cGm*OBIy;}2PAvF)fKA@i#p{EBIjpw!h*9uLTXO1q zyn#`fq~u`H_`$^Ab+Ph2$87I7+W|)p};yMe19YL_Oulb6R>a zS;Q~TYn*5$2}RxxBl3nZQZ}Z{tPM?|BUswJLagh@^zF$EEufLXQ99>v8=7f#T_I=f zd-b$Wa+M^X=CL$A$&jt^2R@uKgtdUzMO}8BahimM4C>;>;&eZE91cb8G z_?=d?Ymo)F`j+RLDf{DA+~4g5ALzBURmmSpfp{2iLfPynwfdCj#L5~@CCK(svrxBx zw9T-&QX|bW)cT?oqRy+l>iDAdwk=Nik8v~bDnve4tsP&0@h5bR-4n$yyKNoZWz&Oy0V2Sa5u^jp>5xDo^RoUElzMxJ>Tr98`)Mozkk@MEMZK( ziBoDD$H{7l%3WUY`U>#U*Th>Dof`&Sa&P!zUppUv?D5-o@53c|APDS7%;^*Jkp1^TFIv45P)rVrY6fgua%u;|sgn+K+E#qcy7ZBc~JpFxt z5rh3mPP-B?%Sy{)rd6qJUpUIIZWII*23s4f=k$uq-xqvdvGh!ARJ*d%Fuw8llzC{} zvVCBF+c-4=Qc#XBPCVgxc|IYw!Jyva_qQXQxtR}};f1=I&~8lss0w{vL@P+b!V%ot zK76MUglZ3!xT0g3P-w~KX9QP5S{)#^@%}NX)oRuNfg&9z8#p&YK9!F(YAPc?NzEpJ zv(K83#wIke;d-HO-LJ31sG<1OWwoYO%e&(Z;#WQ}O1t|d+zR8jKlFb)0BEKzhR2PY z4Q?x&4IVBOGaB?mR|d^0*-c>6wij9+F=yv$@h=$pmeHxX8t9Y9?p?M9uW7Rc+Z2Za)dZFs3l3STUri*M+mrMGM!;WEn8zXC-*xKI_ z#^a`Q&nJ^3-%(pG704Y}?8D3PhC4Ee zUqdo(iYCJPxMi)az;vY{LCfMJ!;bd5JwrK|8EW-)5mIwh*mc>eS@g#W71ejfi8DqB zxm^XwG=rJI(npWHZ!JF#R?XU>E{{`g;;3M-rXmtK^XTahvL}Xy1}@SD*cajfD^Bpq zpXR$d4Ac`E=2hjs&WiGBB$*ntf(*;S`YY?O>!Gn+-fYmko#R9-vJm^bx=nT2yW;cw z2S9(QLy%nRT2z9^Ig48sSebn9z^QH!k}@E9KJKH% zum&coR{$1y^7$I8d>vs)5hFnBEYNmbB!N!0BTnKbTaF%nbYQ8f7FS%WGf%8U=uoRN zd4wh#8o`{gpC&MU>o#-&{>t0v*%w$g~W zfFFv!PtVgttCq|lUpv8DVjs@Uh1^*H7LR%Gd+m}&AuNaD*Mhz8<17fFj}j7UCw`=6 z40;Pf>AO1EaT1Qjw0k((&Pu=PRh1Oj z?KKMy-Ml`+l4-+5;~$9fK5Nr{#X?#P z7$B$I_CZ~F+z!ViI3TURY<$Q2tyNtg5ZTjCV-kU2E*qG*)kk~+Bja2%pRlcl)&niW zY*_^1rJ8HLm^sgq(i=BR$Z0oM7aPj=8B>K)Ht9_LfJk}pt&-29B{M0aw}+n}Xh;m7 zSS24W<-T({pK5|pjfAk}6)r9+#^3gW!DdU4JqbVUKA>p;5$CB^a%G4yO{mpDh+Dj( zxdH*dYzpKp!))tof4jdAY86%3yVcj`Q#XEm!>gXNDpE`-YufV2;5j&;!t#lwu-TZU zna375c0@jJuv=+PKh%c0X{2wO@`N;};Q8|DQj@nS6}&Vpe9v+I*&j0}2n{&2#wO-F z@IKc%x;e*tA|UnLR#h88)TVdTgf|^;BR4$J@pfVe37cPrUDK>Q!X3!pISJpa1{uF4 z*M=}kZ8@X*j}ge6xma|URn^~(B%-mR6e745(cxp`N4bw1{1ZjTp|{d7x=nN7Paw)!~A9c zvDSpLPw%ir1u+T$y8bYl3v#JzBYc^Q={kD&&Eb2r<&XmQ7S^f{Wq-rj=Sk8U`(0aG z!tvI(99Pd?Ee`2Pk%q-Gl&-N=J}BUpT8(Keu&TSv)GcjOievhf4HZ5;H-logvcOb8 z%)D5*=F{*>alNC;!Ay!3WkaJCCvh=TI0yu<2RW^>oGPemK~gCt$!L3MDdilvR9wEF zIQbJ(n3eUq(&`B}-6hR_*q|;u*!3g8%S0Zt!0=H#w&eO1#!64dl2z5O{k#Ers8kaF zQZ4HP_Y3R8pwF_sk*;b|7`7*^E(dwz+|mX?Rh&T;-lKUN$%&6CvS3d1EPq=9gApB_ z#dmNl)lQqo>B@kTJPKh$<;}BW?MUfiB6#gu%t#98*gD=x8<`k;B1ii89#`Z!7#Uci z(o>pNrs&Ix0g8ih^5uNn-CJh9zqy5=oK%MoDid!Zi(`%GGNli{bb#G+E@;{kp$y?{ zOmbz{J$wMG|DL0}w~?T%(joH$2`9qMKqWBr)Iv->k9jxV)95wQX>}pvq3q5LdnLSj z(|N4SVz*q|;yMzJdkayfgvE|5qs3t_LbYqhAjhaxd5Bld&s-hry!zOB0P^!0TuZ^@R3f5Ysd$v>52?9$dyPkZUaTBp0?{`I`qd$ms64!x*i3Rr=F=Gm00 zS-C7_EfiNAK%!K~;!l>|g&IKvu;&?~j5ji0$@7oW<4$W6G$i1}tlgd8|69^~VjY%R zKg=v4)tX*+wA~7pmjMD()|p5+5~LOR1@2J|yHjyP@WDSZDj_@Nb1e)8!1YY24C-H9 zxhRAGZmXt-ga!yAhS4szm1{uGqq+$el%*%vjMgQrSVI`Mf?pi_^{+MB|M>P_n7RJr zH57{SDcEc!Ym#@YVu-~lw4j5yCG}L>2eeSVfhWl=87@X%{J{FW75_2mNw>s_a53qB z-(39fwItt{j~Q<=+519GH#`$sD+nu1W^D8*XV&zcDDDZye%=nLNE~B6FAhteqQ#qD z1Vy;@v{;p`{WKQUTj{J9)_8!&+o**Q_T(mHFfM!a>Cy8eT1ZKf2L>m?cY`G^h+>@V z9?)2LXqCaEpWYv`8+{W<-S)wO-3>Qy>F)^Vn>Drn$Pj`CGgki;8yhX(P?{LaDKK(K zUCWP8Dy!&!Q?70*$GZ)^_-HnknR7TA!#a)`>9X0YWI7-?QMWRpZ6Sbl;_e_oy&DKiDj&AVmwlivqyR7r`S!8aOy>gn=4lWZZG1Q-d|Ez?s|FH z_plvIgo0s!VxDpT!TPJxBfF9Uc{E6;E%3L5`1xM9_zT5$5%2L_8`;S6CVl%b&Vy`}@W&J~BK`U~ui!1=CN4e{{`_WQ{%{tc85rj3|9@*0%zQ zfhw6)Vo`j@bfp-l$XH)#s9{>mm5GO4k4~I0lj-lQy71^Fj<6(N6GZz+xQbPjP&2O~ z$v%My(W+V0?RSxb_j=UUb^16@avIsLXeu)Z3<0o!C+Bd2*0KwL%V8!ho$lyWP@0;G zi6$=;+O zlRT~YP!b`S)2Q@M>h~T4;SdJI$zg&pLb-E|Q8?k`o-Cf}0qOGy(lE+`5LfK)9|G5= z7MqxPeM26|w<^8se<%{6>3bk5 ze~l`b!dL|u+S+1AWPG+ll50$C&5z_a--;~twDs1z*L810_!#Kuj(qzx`Dz&#zM1={ zm~j11vHHuO?vFgE_w(F(nkE}=W6CHWXgfYfaL87Qn6jx$epbQ9a|Dp0y?EYTNV4mK@r9|t=!iy7+ zviNHj2oQHeUc%^0kKsYZcsKE6MkuV2;k@1uIZCAd?JNO2}_@R||{L@i2yat)n-;ze*@8tAtclB0|rL(tcvW zaCqdsWqT~o=InM#**A^H-w@N3U0Hrkr0btz!~(T5Vi!4!cXah+0dm7V@|Pdp_O%_< z?NqoqO2yAb1068s7Ua>sKLPmLDg8W6_rQQ3d`~?km^@`7>uThG6hy^D}1oD1PzISP0(O`<*~olY+$A`O7b%fmAIsD+dA4n&I^&9JxL zL+M0=Wc=4A$o5~BOy9kYD35q_L~55?VeQB=RGgk*HmGxW5nxq(mcTHkkkgr@cFCR; zC1wTpIza1+;@=DNbH;Pq@8#3q^W73{W!T?37;L!`;;rDq5cwzHU9aNL&vyv~lP0DYzhX{s-zt0HD)L2dn-F!%_w$o3l_=VI$k2EMZ=@5){92&Ef)C-=9;|pfBe#)j_ohlo2bsciFr0490#%FGK^13T{sm zvItbo@*3P%`h79dw;pY7K*D4M#{oN0(T|&se}_3sR`k|~G7^Z)@Dwy;MtmyUu#8=* zE$kC1WvsZGpAM`*s(pHGP*rtD@4~vzC!~p85O_gs`sE1Jlr^A(Wq<1gC6D19l@ihk z!*C^&ZUR^XyO4*Ww}9n%;=@w^?PI=qsVpB)~YPm?oABx|L2jQ2x z4vJ6-tOD?IeCP#0wf=-jiW2JILKjNE-5*-A*fHoBSXfj@OX$~*xxS|!871E_z|3g} z0ro7tTM`m_^xoxE**p0!EM-d|cKak0eJe{yr}$IT97ZgEgZhG65mn6DlB+G8c{tj3i0Z|GiKI^ zg~6Ip$Ri<_+JpOouu(ZTQQ_LL?Bg9Q)gZ(Yu67?3aIr&#(g!G>J^Mq&?CTI^qXEk9 z)v`!NWPF2Umabbne_&{p^ND@zQVfzo0d2x!B1 zZ^hJ}{;MJU?Xmx=Hv}4YOk0SYDF=6`;@eXpg;%TJ5} zwZ$?z2S{)M1kRVpV{RQW+S#vvyYZCq^IC*s9Z83U9ZU_|`6<20!)YGoUKhd-e!;z} zxGwto*$mt|YaSAhf1CyE9+$AlkL+z zv>BhgBDPIX9;Xc6JDqnuDlP80( zB07f&9)BHF28MtOFOsL}U3!vwu|{UMFO1@( z2urh5JmlAvt$4wyZvRD@i%#ZqkE_=ZZ&*FuS<}DBz@bq;avBDG?#Z>ms5&AW5~FjS=Pv=R7gb$i_!3mXH#%{#k3zRH0rGO6(Vwr zY;lPRv$}@ui`a(P=5VbmWSO+`c`K!lgARPun?}{p^}Mu+o_RGXeN>yzP~|{JAg57j zuB+QlsDYk8mzdw;DStM5JsZ1Kq8ZHtD#@pQRr^0*yYJqtMKTth1m*k@?`LQ;@#OOb z=J=S{mas3(h}m_abktJ%#jVqb2qEkAqEybH^wyOI(99>~z8OO2cRln9(Hu6Uw=27H z-s3H2U+$Xj5@zCCuf?w4s_}n_cJ;riEMSa5AK-6{{!{K76RzOSeZ?Gha=JsK7A8Er zU)`mx(K)UNiH>fU<0=H<&L+C>2?9)w=Pxxaz7?wD{;S&u}`!1 z(r%t_hL1mQZ6lx-H%VpYG2B9NgX|y+87QsTbV7Q6XCeB>2tFYIDIe8P+fR%g_NEqk zv68jN;D)g~Ua;9#@GT&!JnecxUVe&`0fLtIqS#xUH3)?`0i4$ip`smf`I)<#`Kg$m zD-37croYBZMzI8gg~Y@*Ufq}1cop*Cz-Z(6o$tn>j~79SpWikPn}rRWu0&mFyY#0R zUhT|J;zn1A1sTgo|JdY{Os0KqS>IbxMr*!=Ni>Ueg29PF7sX0`7)wFYyKfPT<;`408I#J}CIAgRxU7j*Ir5K*9tqh)7J5R-jNw9ck_bwj}0UOCIoC|RUB0+NY+W);016BQy3aIiaGU+P$sk#>{ zTBBL<$O{S9UpfM&VMA)@gN7Bx)i(ym>`kdqwoUvo!~2B;MK>-GWDMA_4ci`{_G+skvJxg?@uaS>l z#|;$8TmgAz%$!3|FwC6NqpU@YipLxawW}3cT5iv2S8Dw!v+PY=BuRz^BOwlwKeZVi zQ!OqQq)r&;l<_vUV(u!E`TmT-7TefNOhRCUJ)@{RnmLVFO|9y$I^2%x_oo--bFdSX z4C;u6Ju(Tm1J*hjxIkqT!gV#!$-fcn1LNE);X_Q6KBE@fp&1aMZMmTrn3utG??CGz zWn!l5KjF(g14r3MUN9Fn4%)nWr*3_bp9fNk2T7hUA#UR~i=w1vQkhP$^54rbs*h|f zqj)_E>8jy{z*dMxDpH39Rt3Rkc74 zf%HM7RAK@~;2(Dw6;|MioSmTtQ-hzWGS6qz)YhL<)0gV(8wyX}W6KOsHRR&DK&1ZD zdT*JY{)>#LmC(M7zHlQyuL>(@M%e;NKadloTgL@;p&_p7Kt`ePA!;TxT%&kl!+v~3 zz81pR|B)UR$9`)0RWN1dK1D56?6EubtBVtzKElEl)Eo*R}MB2Z4#3lvJorQ?L+Sex~6nn75AO}P(LJ0$#F zRUmjF$vzKK?3`eNbm+ieikmW>UA6mD3?(BpX)4(p!ZJkr|B97n9{^=U`I=*fhAjz53$4gqK2lULH1PVAU+e}KUed8@ z+h}2c{ibsV)`falPwF%Q@ca4w363JLx5yI9MW0h|Wa@6kqgi9L`rZ)3Bbe`7j0+d0v0SHa(cZbQ!^bU$=JGu!MpXNZfLKtT ziLC`+TKdAc%em(nr_MFFiLT0>AHRci=p>N(@APFN2D$)hvM~}(zVbIAv{)x^$H~^fe)oUoZryWThOg+F=wxPSA zUf$>~jvw~Snk^tMPH<#mX*6M5TB0qUcKCV`)CpGYFmDj9UN{q6Y{e9s)01>wpW>N} zZa|#i8uCTmJ}}bP>FgkoyPRZsJ-$2Bo+?j7{0vNjm%F^QuOHn0E~I?U_40g>lsjH zV(}`s$b;^qn*~Sl`7QEQ&q!y@GbPD!;F*z-FUTH6=cpNi6t|=2a6tiuN}2Jc7kLpc z1+f!d3kd*X)`pA0gEPQ&G zk25IkOM^dUS_S4(bjppMDU0_wVwwZjOXpmAN>P*N_wF3sig?&Ch|kaC;HEgpX1*8M z4lv7+a6~)BreM~k{6GGAIsPw>j-=aNP|##U{q-Ye&{u8Qpm%*%>V{a*j)e>WS0#`D zJ^L*^TA2g?@4`6!{y`fHDYk?c%5RD!fg59XU0~27r=!LIDZyXXE$w?(|F3{h1n)J{ z4rC%}rhSZkTJ8x^4?txDg8gJ_k>PLU!24^>{)=(_`-lGh2I=o15Wq7}yk9IvnsiHE zhI$A5KeWAfKvQ|2H|mTsDvAywq7(~Nf`F8OR7XkZMMy%Vk8~1>bm^lb2mykWP=W*q zkP1OcfPi47g$@ZwmlAqWdJ*Sl_TA^*XYcOaeg3)m=d_&Uobx-$`Ib*vW!Ozqk%9@* zQ5gFfUh>f|i(EZWkvP1#%rn7Hob$i`Ptq0tns7zfeyhaC+2mxn&MJaS;C#=+E!A(I zANPH!Vd-BZHLdu={>$I>Zix8~=V8u6i`bu|S+Y{$9})Dy!LBdyk*mb--<)1e_2xah@T0V>YD}KBVIbvNT`6(hfWYBW zqRUMwfxDO(E4oxysAI5XeBuV4*v+>1PTm%%j{uFTa!e>*PTB(uZWLS&=5`(*$Hymp z`BOzFRWm$1a(X!bc}#}c;Cm;v{DIV_WcIB>@2I7OtE|a5ajq%Xlx5hH*-wWK<0IIi zCvKSQL;}2ZTp}_Ogv8EHyy<(h8w9p@nFzY|Q^P6RfM=3R52qUu#2GP@BHbJ{q*}rQ zg1%NoMAgWBW^_M(l^VV&?R#-ZE}CZ2D0XJ~_smpT4w{>+x}*++cQDGhYeqirK&Arq zDW26E*_@uul&u2Dh+nWWq8?Q*{SnxbS{A3q9OAZ=yGHX>nd0T9TeC*ofVh0+Ku(qX zo?`rP^o61{*WHRM49Us(K)LjOTK&(TbE0$!3DghhB?AM*SbH&`39|qX=G~ z2$$BNljR#WIlbUf(^sh>Teisp+P5QMZBzl#cv6bf%diGNYoi|f#!=jwwK37_z3 zx8Y-^_79Yq+(`mqD3rPRoAIxPba~=dX)D&_a*tUTx3`9PZ6!gWfq7|eOQd9XTS`2I zlF!pFVtsV}F(nG-n}H?Z!5wp1#;nFQCt$#PjE=I;Mdr!gS`(I(Xo}G_bF5>DL|}Rm z=FM5a;XBj-JW`t>EoW%I(a`qS;`f~^Ed0p>F_Hbwl^SfgXm|_ zAA*dSrcMJpg>dVR<}FxU{h!HbnV^ASOTQNX2Jh3a-2C%_>RaJ=doLJC3Vdv-gPXQk zoWV!8vW<5r6>Vr!dfS^mAzq%O?ZpsDsr2pR_w z1m1b`7*(eydGXhKk4ZEar&hAQ8aRj+APy8d#e#i=U}^kZ!*goq#J}G|KV4p4&8xjw ztGRq1Gp<$1Qc2`&sN|DQtB8CazC5Qxnq%B9gE9eOm)%MUar({jAk&DEc_1y7-#&TY zIKI(RJ2K^>5J2RWoIH<@j=fnxMGp~EDXyW3rvNsOOUX+C&uRl~pA@738pNG}3`gp7 zZQW5lDLce0o)BdcZn}Uix5)Y2o*5qR^?MLoz~HQ@X}yss7tb<5Z9lIv2!TL~H(B6| zCeMwoZXscCB9PO0-VdeTNQ?9d%w`}l{4na;hHj~4y?$FMqLHD`fM7jn7l%lUUWleO z)S8Qq%}z5+!IW78W1tL2qjjAJLss@=JBS{P2d4GI^tE9M;yd%zAA5(vXX&PtOmyD% z+5HRk($vvS1?4zf)DuKu5Rl(M)xuO$Ll})rpGahtB6S=DUzj=~RxwbZkXDdc_<$}b zpjH79;e$+V%XaeD9?kf5q<~hA?$aO;*Xk) z)^<-2Vl$SH-$;H<0o?Lr=}(1kEV!UKVEOaW=*qmUh|thDCOhS7L{$>ME!^6U9+i`& z6&ar#&?qK=V@L2`ox?Nu!#sQ;y1XX_$c2$+vAq?}zPFnZKBhslk(8nm+KX7286PMr z#Q0Nq6fdIRaTU(ZFc)YUFcmJ#{25mrEIJ6x48Cu8))FkLRC6bLP>K8;2|1OprYB35 z|A593G`%Qa&|zT)0<#D%BT^Z_F+MUcDt%Fn6}$7fz*aLc2;&=0e-gj*mRqIwPl{-| zZsYz;IL;dEKG6T*=(xTAdDFB|-nL zv5kbG$#g{AHnP4nxz^ruS?t2J{y_JzVB-1+^$)d7oZz3&UbnoS5!E5rUVXR_?jj;0 z+yWFHB=D26K^79m_;21taW)8=>PzsM8smKrq$RrLF$XqV_Z4Ddxs(`!9jKZFaFB~L z?hAb>rk&Kj()n1}&q5$CJ3>wY_Tyu_D2FthLH$S>GSj^XHxDpGKzFnLv_jQj zr;M2>n`xZTR-}aiu`6TNN}+z55n)i&zY|va=FjHpSW`|dYrox`Af?&(fJbQ8LJ|(4SG=9d24@!-nN(-T{?_;SL zpdUt^7d{&Ce5b9BUeRhfIWb3_wqNspxD5DFB9!$&>dHlb>Z=wd%612al)#7_>ijk? zTZuLlXWorZtk8w8G{uBHtXLvb{SF9ejjZae5L7lE?dZK@-$`k-uUqCk>#(#jGclzL z3M%QHl(KOR2fjH7bo|XV?J~z&ZpMX;Hc$_=3@OKk$!7OqIbbu18B!S$rEkz2A<6v( zRoxjQQQK8Ibk1XjkUo{a^f>t!7spOY8z{bkWv|wz2?4FxZ^*%j5s_*!%AmL}#jbdAV4-3b36KkEvNvS%9)< z-$G(jj!LZgo1w+)QMRvt^xMS@OZ;{0smI`7#|RBV`^kQriztPvaucLC{al*iv`ONf zSK1UtgG0yZu4tDOyKukwh={XRs_JE8{n2F)RSr)3{B_JV<;QV0_57+>?NQp@Jzusc+H__7K+Z)V{ z9tPJc^p$CYdysQO-<1O??%{21N&VYLUI@QGe9w1it3yxE%;XcD?e_&V==-VqbGHCa z`K=LOi30V>0BidK2auh}XX3DtD;K6EJB6#;&C~(zzAO4J1n2h84Bd+Jc%m!&bJfa@ zfEZ4r_Ic{Mz3B2_6f*ee>;T!c8?}@)KMxM5y?Z~!8d%1TS=0e#lpZCk1#SY=eWNiF zF`ra-%qGJ;Q)nJv8)0K4lEfq&e&Xy=Mw$d5jlMfV zYAhUAkgI7Y0;G{mru`n*E`nNrJQ_a)Hw32PGe?CttZT3A4~S$f>lGI9bGdpa*)ky> zrMLm(16lsfx%uRkzHJ2hjT2H%Py;VMb4`fr)|6M+Ya0-&ocVb!G?55PzstkfzT}+` zvU**%(}--59%~YPTXo`MFQg^)<5~X-O<*zzw0GmxH4}|F_wYad%MkwO6(`(i`@W6x zeve5rp~B>wOFA#qT{~Om4h?f!AXqn@*_t5VQ zE5KBy?(}Uw*`+-ow7T6wJ5hJm=5lAHB&gewIw1EP#6@%uoTB_uN=-|>KGny9mR6}r z8HOB{2~o!}7H-;H)B{-8vbPm<@Irr)tBqyt@l7g_E5T$c4GW?33-Fm6Mim$NT(GFL zO>j`{&an=&|1S4N1ZRej<`>R3cSKMc3jT2}_YusI#E|4BxRsV0787VcbF>jSWEnWiCmCNotgfUwZYeSluhZ^6y13P((d&AkmD>$w0{j|zw zCG4B?ila3jS|c1iSGi&>V5?RWBv*8}4sy?{%$GK)KsBUtAVwE z@>}!MC)ANn;f?ox+`EhGSFC1Cr&MA?8dnWWz>}vt2A#tKL zMZpA|a&KhyV(3E;Y~{SpxY`hF$fU_=lx|0dDaGLdc$c1X$#;Rt@S>n`q2K+-$evy< zWoQ&OWJoFBJ9XohCkHMAH1LIfpx`~mED+%x!DhRrwsu4}7Cf7jMiADJ_71AhpIfz& z97Ca+dT!x2K`nw1t?rfZQ62IirQzrY;qxH%_oB?QJCEltLqUEzbvu?}GO6mJy{sX* zSUx6p{&CHi=VC(!rP$3BPLq?xI?Grq+~Df z&{9L3PF|JRAlK5gQDZ)C<%d{V!~)qRjxclg=NkM)?*J2FXj^tfpECLRXb)aNGeDDF zRMh~>Jmi~0RZe5PetvIN*_{^3g2Bd>8G`mJ6a`BNUdG&jd9`b0E96XOe&jaB8qo@< zL3z}=so9qw+TtbBN1%>~w^BO$ygDIiO^t($(mdR#H>_qUdk$ThIWNqxoiKO#+CuGX zZx2=E*zaTkin!(!q&#h=*?A9tNYA~=ZJxDdGrt_^5&|_3)xzzXIhG+bmU1|l*0|e_ z$7T*rntrCg@UWUF-}>4UP_J9KjoFxTOR(JqQd%3db;>|38aDtV?vRj=L~;>1lX9o} zdy@C2(Wl7x6wugwG#TC;X*PoEl;42b+gXG)J{a}22*QLcA>xY92R2!AQ9P_rZM>rP z4B^*TFO2NGw)uD_@?2wC7v=8pXbIjix&#rectzmXeA?`REsT(?w3|xsh?^qkTUh>V z_QB|LW#bdXU&mNQ7=e+m?-AS1-&c(umD4>#`7-$2EV#RX~-4>P8qx$XV=pjFR zjol2fwr`*xg7-Bf&v<)-9T9aP(46I?*EV^hQ7-vj1u!2xpc+Q-kR1%2bIe!FVlJ;M z4BEM@?6i40Il-Mi%%;Pr)?kYhFmAG->z$oTTpXoJvSe>jVNxQVnLz00BqV#GQRP*6 zw&*jrS_I!+^4ehXd9F*V2h>7pdKo&%oR`ocf>$)!24`enh>LSH6f9LbY;yd9lRz~n0yI$YdX7MOu9F|K&9f;xkrUrp{d*GD?QNqd$C-MQ;i zXJgg%_^lI#bOWJ`mPOdrs!t@Ntv+;GcCjf3ICqm~D^IS7i)HIld??7Kg1VOEoYN$!QpN$({}2n9g6Z zNMMZ;JV(pK$o*c2Go+}i6`VT}>g(M~dBYakF7MEd3$}K7+qEMic`bcGQ^f&2_jmB- ztc5gS8)@Z9q~_G^U+V=-#>hHu1r0WZwGU|u$by-0*Uu0VGLK>U%|3hj1wkNBY; z*uuB)87fT@g~h2gU-arLLp&15uX|pzuw>_eV zVSwtgeU60*sBTY-XWs5d?yZf3`v}UZCo7OaRX=h9g)hK2)zJ|nj zv^K+8cZrQs-w|)Ss?pVY!A+C2aMZJUUQhe&B7%+AUIX5lSj}cW5)9khR9GiIv$-Pq zvSqh54;^R;V=TvhZQr>mPE4q{Cm^2Oc$o{T(LDu8jGtY~%UA2{xi$YSR`a76?9YdT zc5(;_OqRa9tFp4Lh&9y7hcs3WG)MNlKeK~9{9^gz43*3A!|&bTifE@@o|Fvx%r%g; z!PPk(k1;#?%Eg25uz!SPH(_e&;`WCQ>S_`2TEB*(KhDkf{PB+v<$s-WN!vs&X`8lJ zw*@bXs6D^uD1EfQ=W_LXrg+1$^3lsi=vqvfW7@jC#nh7sHJ6!JhZWNg|L@1|MSk$m zmFd?dJawR6uakv%Vy#tW zTBaoW)JC#1fTl6uIxlN>%R=3E=%5%ec*n9qX~TLULVch*0=Csdj);M2v7?JWM1j-L z!+L)1OyfJ({WAQEVs6y%Iai3?;7ruW8 z%Tah475S#rn>X<#MoThN+nIQ9WD3llLMd+ym*yrF;^H^bGIKb;gG0-Jr4>_#EwynKI#Yx5wH`nSsi_S+x7`6H7@qFVl5aP`25{GgFU+$^rn7#xR0#$_uO87TT(>brA_JC~0Yjf2WWx z1Q-F!VBB`PL`Z#M|CZWPR-e^C1+ zx-+s>NXbkso3bu)b-$X}!{^WyT9rfzQ#iK%-0V_8K^<2***5Gdz2;K_!nPEu|4oEZ z=a|^m<4vFx)i(3F19s|cEz$j16qy^H|B%mJN>_-TC zGb)guUjukj$&+4UYJkSR%DBYqGzOA75JK zD{^_*R&0%@U)#Ve;utM(f~ml_e z!v?@mFBcAe?2k6CC39!C0!izRwSJ^YdJdhnp2>EHRIg7)P@V>=HT_ z@_PW6JF&dDjZwgaloeOLHvqLS(=hU}Ql!){3E?5>779VM&VriO<|%Ij^Jj%d=|Cb% znkv)OpjKmK?5Amc&IkHJk4GTv}|vr4CV1;AS#16>@s0l^0<#BDFZ4zJ&O!5Y#`uLig{loH1?Dz?`*l z0cAPk>FU64QQ1adV>ig#GleO0OTmABO?<&GJPp4{3p(1V5zZWyH~JWPThb@h zK_n{y{kLVNDMcLrrUo8X3m1kDTTZfG^+Hx5Hu2nF~cS&e) zvH`quQ2%TjX292-Ng!ZEPMF(z4-{c6McHFC@Q1Z-?b|1v#jCdu{)7x)Zy1@4%m}2W zq9X(;4fbV@ChK4+bN+(g*^8gQ3u(C>$OY@104iJct0knB=nr4r^}o92g&AXwCW(7s zpN_Y2y+gg~pgt|O<38)d8 zz|Vb!<23?PBmq0fsB0dYxkj42+1CWS)B8eoMcZa=OB^L6IbASEnlHTQx0Nsd^vL|@ z*_)c9Tccbuk5bupj?$GLshxS}>WNH6zHgDA@2LUqMrNc-o>$TW&u~b<4x-7feA1qqc3DM*euvRx3bm5On{K{1DzaF zL85TwkYJDF8#QuS)`x;4!zOk{2IkcmC1p?hwz`PySp%T|bLcF&-BiAE^oR1qB-M@% z9dP-l!1OhH!H&;6Lb@327Y7fy+uu(0>xEu2e!;T21h_K6b}4bY6@mHNO_da1$CObF z5FnO>+l9N_QcGd^lo3_OTcYLuxZY5Mf2up=U;tUiN_lKrB??r93! zioaY$pJdLzz4AeVADVb5wKyqPJB=z|dRO}=6QDsN_l4o4FfTvsyM_!ZQRWrXWF9C# z%P|`PW=8Ot9XwHhhCC_u_AVn8hby~=N=EQALOQ3^0W!e*%?_ajwKjA0Y?watl=F;c zB&R_s1KMi8=R$)VGyGG3^f9sCj=Xk}vP~pA8(nBbHCvaf+H+CmkbIRV$F6V96_v19 zFraFxnTYICNpXO2Jc{N#1-nh{XnS>xcf0E0!*cb5LqWdgbbFG@#0`Ch+EhpbFFE!D zMSKd*`|!7)#g9m3n(iY@hs{Aj8H??L`>=q0Ai1k!4Rj_d&Ovi@oJR*1rTtRvi8CQ( zwX&j(i~Ho5Ke3;Be4_5g*7lD?h)ofr*WXHr(P?ekRqlf2AhC0M0D!GJMg>Bf-&ri(YQ_e0eDu)JK|ELe?1AZKQlTtm6KG;uBjAaxeeO}rH9bo8s0 zIa}KK1AJc{K$4ioS+fqW@2m?q1JEix|NZoNf`_@W;U2X z)5P~{S0DOy$8!a2>`Zq4?A~5JzUG8aKe+oNKL7jq;i<^@x5*ogfyn(_H(`zj45*vQX2czRo- zrDb^Gv$O%!bdyzU$WWjX@f+TAxyh!98{S4n(plIlnIG7uHiK~mI@(WCME~)40H2rl9 zfg24haWj)#q2-9km@dc}99K6Y$%h)Z!j*a`=qp~cF-&AFQEi@}Ou`|2m*nMRSb3PSGT?(zueEJJ zx-zfYYO-2mxi^C2=nM4~97~s*&2dB>ORlzfOs;7L_Qz(x5R{as>5`-|0A67hwDh-& zgD6wXH}TC_c^;l>o6+mIu!t`tO{-tfc)1z?khW*_R5Hv_b?pOxh+0n-1-tE54KCgZ zI^sd|-m#_1gnM_`NNC`JA{+c`JD6_!dCHSa;X_}FZ(#+3Vj_8g-K+-`b7Qc}d_TcD z?1%khnfNCG7S!TDG`vIj^or^7OLp!##jHymWhT_iH1gl47|KMfo5}?aT=@{i6L*sy zrG~K~FM3x@WEP>S{VTc>9`%gX&U03e9F=enzbRZl=fG9x9g2$aU@0%!MyZcog2T+j zOaqJ_(%f0T5~ct*_(-UB*o6`O_V9uFEK5j--o;ERrUjVL#Hem5m4bp^H~O77yfjLtCkVl{h`26vI$VrJcwf^kD; zB70k1By`Dk06<(F3k*?uGn9rePpMxzHj{B6W2vI=)e%wlnNq}Bt>a2V-gQ0^i7M}2 zQIwI9WNt1P4=yh6$px{HUN~D!$sDOu=LAW?}w~50mUE zpW<`baBPuNyAU^0xFzS_G#X(=dDGX|YJ;qY=<$IPHT4hRN-*VQ`*qX6R_vZj-pWrc z)jLV-OHt}h#n&r)-(k=gjBqU5a62`H>1&uiI+1KI4#0~O0=vvopma#X-2tew-D3YD z(Kln8{~hLLDHRQL^jv2Jgr=;(yMA3RIQJ;{HG{;)n*O02W+DF78&>r(*}Os< zn)))Z=8VtXtA@TA>gwmGMp%U2(G{ z#dC2JxEK0Oi(2L-wqXjl4XgSY4m=>`rYp`cQ-vYA<>MxYvm+6vBh`(f88uk0V`Ez- zbH1@BvfPmt_=-X)uwLBS9@K3XBaWLS;zzETy_;5FLp6hZS5}uA1OL3gOjyrI_p~b` zj0!q{Q8{1DKud8SvLjD*7VQ&+%UUHJ$y4csW7>2sJ%$|Hs=A@EgBqxfuy9HEy}bC# z*r*vy_}(WMMq|GmBmhv0oBgJF`FQGpXJXeNpMXoFop7cWOuWajmD#Fh`y4?}of4%q zgwNxPFv=!N(af!F_ndN6hg&heUeaz+hKSJLVNQCH&|jGZm02SAn7)m4Ij#dNKbYHK z?{GY#QwuxmoxStA3})(aXTCT6|La+Oj1RDMS;2Y<%iC6$!M4=AQe*(|bxE~s&-g&F zgcDU(o8oYzz9j`EDKE@S1a2hcVz>cqJd3$$FD?V&+3eRXA3NedkQsMdi;-Cy74l&2 zL|bcaY+kx+c<|PHt0_umTzviSEdnv^+%4e&8$X+J59RpD7y7NW=MWm&80Cl)zw39n z)^z1Nb(8s(GF&%nU`^5_qv0_R0&om)UK~^)D5V4zaKKb2eVn~S?Gc1ET^8sO) zZ!A!BN?l$|^3(o(X$rk4HLXMP{83VchR}wH|3q1pi(9ZJWec3jA04Z1t;V=L$Bk$K z!U4mp18%N(BFvHc5#&_t{B7d=*tmCcxEl>@p$Zv{niXC|IGKf6&kR>sTtr=KIvA z)`#i3k2Ft7HL*W32_3fVA3PVF+5}*Bxy)c}-eg6lkob{}sO@fgxIjTNYTr9Yz%A;o zZhWP3aMy+EeQf1Hu((t?pl#dV$|C|d)rq}&r^5fRy!D8AxoNNv}Z zEYl^k1K42E`k!f+aa%}^T@r5+*b({Mo~DVT&*e8d7FWO?9;UJFt zmhex_6Izx><^B1qDt{ego@64Eqsr+_s+pU8JhA?r_e$>uxx_iC=em3L_6* zC`-4UT&5C&d0aPay)(#+&NTg&xiy5$pW{8cBG4M7k4nT_J0D;s*IFJZSnqf%ps{<3 zup{S&P!N2~usDB0r@ff$lb7B>$TMeV-t$RCCA)`qRKV<6OQ>=f^c(Z36u~>0*n6o# zsvCnI^+V$*d3%Ad5OEzU+N9^E|0pQcemnWMMzRiI%`iyE?dQmEh z!&TnXwn^M(_=RUxib$#9827{+%tSA41JevC3${t1RJ-Re8x0pM5oVA7lFrzP@}~B_ z^uk`W1r;Hk^sMp~u8ek7amDA(MjY8xYg;g zAPQOXr;**3!w&Z79z2kk)5)!1Wa=78kUNUu5@8K-9L}Z~>aeVOE(dp0QT?HpRL(2I zk*$C=Uw@n-F|i5tdc2ff^Vcy@@4bmfkw&+IIt}zG*#_eHVDS7`okfvg=4A0|h7g;8 zoigFQzb_upNYmdyaN1B*x`6KYr(}}ns!$maxc?dq-CGT8*R$H0cacod%Eh5b2|YC3 zU^T6~hvQyv-*d@Exn5<|#celk4fN8fTWiH6LZpgrjm_m9YAR${A5Xa5BwZ7aXItCy znmFvpsrax|&EjOq(y9F#o%(pW)GiQc1VvqcCoF~=e19*Tg>^5#?QkWCOmjVlAYQ;3 zh}Yfv9lSF+JH@)n`iHAZrZ7&`zgRMXJ5OkZ4!#+r-I`S3*S{}Lg1*GoHg z?r%dFKj`5mjsACIr&fkj(1B~M3C{oRf5U&j@;t!53wQvliphTey(KT%T!Zk|xW;eu z^4;u#T+hYxkgc~4O=csSBZ$2xTj`aawQ`{q0Xg6gg%#6*QBpP}9mm{0RhBld*Bo54 z#Hk)O-f&ibYqpNLmq`N3Yu9T4&x|B_{Nz|Veo)Nx0sl~bbPnm&X(2;4LyRzR_US&E z>Wo$7$h>+wHW6d-Y9}yJUf%o_!^W4M5`N$#CWt7P^zI300=DMsG_A1@7stY^*x2GO*0wR)}*42({DfV{)b7Z|0B7P_4NpH z3=b^YtNHoAUyA>qD5G+$5ft$^FRTY54sIb28$mqDN}P~19GkBPE>XyI64$01D}4?f z*u>_)6m=o7O9;}((N#D;aA+BaTgYsPd(~uRFOw{VFMgQ44@iYy1|c!R$E|&T2d_G_D>^7=U{sog8>)_Orv)fpcMmy0Sk4Z0D{v~1S1ctRTp#me8+!X`0{ zTjXrf_7W2<4EsUFTM8N+CANw!y4fLSI=qNz%#~Ea z?l#^?t|-EhXi!iK{Hk=Ds5RR!3yL&z5^J?ZoCyk2KPRTG8VZh$XZOg{z>6toFLn4} z`6T#Sc7G{h+5XvJrPGb4Fr@Tg0_R}J$GFK#QobfJ0bCuPxV%=o{kHFwpHWQ2?wE(4 z_G-`DlfU(2qVk#_r=^>ic=|Y)+}>LhR5u*taQg%@x22+dYY!5aeXos0`ogZFe%6=t>nC7r9}?pJH9Uo-WHRAQDDA>MM2IzOJox zOtUoj1#N3s=3kTHVbrWEoA+n@iNRGhqhe;zM8^)gjw#f#BV!ARPo{;pTK4i?kT!s2XYZ!YElwdTU*0M|hu7&KTrSj=Gtz4# z@vmbx2gP3z9yr0%qah^v*x@aPweCoZ@;F#WC#nSQar#>eX%F5mC~&ID7^m@@ye*>1 zqy|$pMg$pA79NqpWz2e9?BCj^j=X@?bOhq*R-N1>i7_90 zJH&sJ38a32GD7YscA+(~NlMzFK^PRH;Dz+<>h@$QCnBNLBHti}Cx45rA7OE+!`yY?m7 zdyQG%aK-5l;l@Ji$@1`oK`|wNN)fj2o>Vt^tH=7A>tDwxPhoUcINRbS}S z^^`p(6B?8UlbCiH9!Ez6Ce7f^#o#X;nvJLx)lB%MkBy7TtB!576>)peNhx;_Pr_&# znRdaFSo64=nqBe|m27#Lli7HA@LfE&R$m5MW@zr;09P&N$)7|DXU@oPBvc1q3@VKa zP9KiICNlV=-i7=c1~w1}C|HIS{!1)LZZ@|dU$cZgXnzvKs?hW*)~BqxSi(A@_-w&q&!52km@kQiaJ@JZ|O`gUf zy&r=#f)Bt&{8{H@L)(;yt3)U50yUZcAAJ~ zB%h5>kP6MM`04Uo1wkJu@x~~@TiKK2Ya?Ng^RdTm;9PHE9s75EjUCWmA5P58EWe*; z4;gfyDfw_$y0ihC|5M|WvAa`Ga(Cyak|H)9qW+aA@c0yA)JFuFNxzPOQXaXQa?E-`pDZkicE(Hf(E@fR_&rwpD)c%78^e=S$j1IbYETU$qa03 zcd}i&Yy~aB6)EUbANkJ9I6m1>|LxPK;M?yl?q0=dgy`z58u+B%p6z)?%t4=tl>%+t z69oBTP^7)SFoMvTj(N4O$jVHd#<a&VHx1t{a1 z+#}yX1_VEY4|aTBQQ~lquSL35xsjhX#&%&-5>rCEJ>o9LUD8Jf6bUDT132)j$Z3sP z_Y+Z)g5O}q2iU=$GGAJC+jiFPLFrg24wGX7A~eS)_xnspJJloFja{oY~~ z@UpMo?&kWX#j@?MYT-tsHHNRKFQfyfHbvg07W&Y8hgW-hA6T_o81XvaDKeD^<3u-w zslWR_gL-_*krj&SaIN;YY!w*SWGIO%LR*w#EtEe%>7fgl{O{Daa%a^N&4T70nHgo% z9yySsnCUPA1$S!Ly4n$~Kd-bh?XU9Oeys=XpK_ciqk2;_WlQt!26_MDTlLvOZcWbC zU0wrX>>ij^@YCnd|G1O>m+4QOf6f3Z%JtA*<^5&3-&@Ylsm_f=-QJEd*&dxu>Vslb zu{OP|_qUY4%{ing*{;ZCiDq?{_1e;a^>A}9#sa1AnNe{jqdTE%Sj06j6;(G&`?@FF z|NZ2WxhD%3ur=?I5TCTgIm#IA{u7_s_aGw>E^qH`BfHCLY)kP|+kmw;=w7*%X-lSh zvx0%Qjd)CE%vA%F9+q*t;3i!HH|utPpVYk}5SIO^To_33aWp~49!@U4eIN7|>!n-# z%YQ8V4jG6n-9GleX9LH6iv8NE&@Jg5-71mGAT_`=0bw_BPsme|1I#wOvv1(UEYcQ- zp!W7YJMy5oX)Ug?RFq4WMbDuN(MFC5d1X*1FRn%VPfZp2i*|q(b}ET91dz5AqFt1< ztbrS+(j^1LHQEv|phb*!vn7w-tRVpnP8hwK6rkmj{@%YYY)e-UZUGaSJp0YKMZO;a!RdpLm}nO$PHf-SDA-6J{$6wo21ySyXehL9hXcM?v{61T;&9}JW2`6o&&V1 zIHHr`q8OMqpB(T*sEwy&!;sxI2Tg$7Ldl^int9aa&=n@bD)6%cEfgY)<1Q~rA~d6U zBLLq6=ab5{H?3-Ex3U_>Q4tPLeX83X(Wv_H;&83!efFx3U^fL;qh~)t;Xfl+8YR2#V0J z(bZi#boHH6Xc50{>&xbtcu*Lu_di2f67Uyi`Q>WxO<=Go1wTbXQDYZ)`VDhbebM^B z6~{8J&B+(!0bu;c4U-=wZ@(&ASxk6-$@gVvs=f6MOd>osSkhXR&UpH5>hAr3wSOW7ErEg>K2$Zn@C}&Db&9n z@sunk)r^r5(vh7GOO#KhJ^>0!^X*m0221LO4a{bA7Wuv&Y%Bra~o2zKjCg3XE5i0rpbAV(*GuBbIO zBJB=M$QYm@+0w#-Dg#41QDj`=)=!drUWkHcmpX2JQ&tB^IPT(suyAq3x9^&Hx+ftv z@?tu{s`Lj(gO@o-T+a+pNcP+DC&I8$fL!RArojP-zeA<2cA5Kbm^2)F_>yEmF3%FN zrUZ-bpllo5*q)1S<#6Q_&X3vFU@%vY^;JSr;5(srUPh0Lo?2_7m`^x{dp*b!5Y*kj zf%mP9c2(~#*uliV#K%k!DtKFKw&O(nPI)J}jjzCiafYFKJGh z?WxSysJd9ey5H^`;hL~&09>&zN)5x~iO}Q!-WdNc53*Ao^Tkn4j01@+i~5eS9~R>^ z3O$&W@1?<SXWxdl&vFz#$1mb$f}8}2+W zv}cA6@_VJMW`uW|VzlR;LN;7`&f)2%Ic|}%--Ac@9~=AV z*&?H)493ZY>3iSsNsd~xpDMvH9=*V`ovGTNXCVrZ&KK#XBhvQu$;nO=fZ7e&N2AH$G+m zbu1}o^Yq8Xduu`u8}4mWB7$ zM55#Og>6QMQNc2Z$Vkx{ zs)Qy;KuWMB^qz#!8Kox_r3wg^vCtWM2MrKXNRSdBgfh~L)DV!+q=XU(C{;l8U9sEyy8IQ-5Q)PnLe$JN?hn3_5 zFopI3UJkjRqqXSrjGsDMlXyd1etPDTei3<(>EMy^r1<7NJ9bKtxT#j<@%mt&$pU5F z7oXo=6-*+*?-v!OU*pKSnx)JaW?WFOxRad^OhG(PcERmn=awN2fDhWGPV-9rmt(pv z@72gV?-k3nuAMifILxf0To$@!DSUgN!sKk~FVVHrXIm2{`wOn0zI`?Bop|yOGF_s% z3sf!=J<|zH9F|>|obTRg-qjlpA0(0is`W!bSrtkxvzUV=P=a(0KF(t*pPSiq2bt-K zj6*%=RG6lwrX2`S7=6pA_bYNb?L~2!9gX;YO=Zn2*OO5I!11>(ZVPz}G1I-gioN4( z9eYWP98KlqEDQz?HwtmHp8^6F;g(-t>VHLEm&jQ7lKS6zov+5U#+TwRR&4QT=RE$4 z596;k=_n_)RsoP|6;Ei*WM0f=cwjK`B{ky3roI$9CV$I8ZoK>JvxBMSvbPAJYnQh* z`zhM*b(Qlicg!Sgz3;HJ;j1j^YiY=0io{c8@qUG;3@~>|!p1N4a z%!oCqdug^zwCfS`hm@<@0dY8R5#lH$9?BZ-R+dIOIsDG=OFx+6I0J0=?E81*P^4ZZ z?CR4`4+rW;D0|Z?@=;f>^aO;+7tE(=SHp`&GjHt%TIl}1>cGP#h+9(&y!jQFHT1X*jT}72Z z-zg%S#Q}C0Eb;Ji{xwOkdVZx19X^m)H3%PQ#D)&pcon|yDEukMfox`L+GjE+UvU2s z)6V*(H7o13!`|$S$=FPO-htE$y~We$y-nw$#a&0K6{W_(AU~R;(zX@SpbI2lJl*1Q zcOFQZ*2AIXXGzQ(6~SQ(;qS@hP@hcpfShC3z@`#}fbNE;Ky z*|C*NGo)A9Y`kBHCwJq%*RSL}Vta$&@kJlB{6v-ms4QB=m!VcYX&TxDQ&=a|70F$_ zNGQ*h@%EjN2`zCTKIvaBQ&^oM*|UnQRgV|iw-JCD@nq*aF3^wA42&g&J$`d}*Nc3? zuf~K(^|~ZNSaTrZ!qy^D9ocib2G4ajVWzAzo;%XG8s@I(xnyHod$2avm7))}6%3K^ z+Zj*$*nWSQ^TB`866sgBlO$f9VN7{VAGDlBkn&24id>jtF#y;`qfB{tkuUFlcZq8q zidD6aP7O>Qv`>C+FNLV`UvpDS2Ct^Le9Tjq*nl>dNR~_mN<)VBq-T@Y1FJ^*kHCi; zpITSOwJzuNI%w*M!FrN9+4D4Lf3o0yHf0nQ!B7ugUJg37 zpe03kk!+i+?ufFyxVpiht&yz-i`hT;=VgAC32)DBtg>TW}KsID(5qmtQpLM|~BwD^{% z*t04<(RO7#oV+zsEiHAn@o9qN$JXumP%4l+j}#KRmxr3TrTRiU!y_WYqI*|eZp~0E zM+{H7OWM`A{3m|@p;?OG>J-DPPMvLNAK+$8C@@7V_9ZK#wLz3D2PW#ed@2I56wOG= zFQ`0l16P_2sN%?X-waE`uBP=)>;}w#`Qv$%dxa{AN!C)B}_n{aV8 zQx`WENlZ>EbOBGw-JXAWqj40@S{3Hor?~=ohm`yt;ZqLDN;P_$=-qG zt@qmUJ|m-8FNLl9hUd0Wnw)ITL@5~&sqPjMKFQ!8{IQWEYFg1$Z3^PheWX?(Lbn%Y z{}{b5HiBaY!?SBzAUY?`l$%zBqwRe|!h73aToyMsHn+5xOznyT_v%pvoQCH|wXbbx ztOW8xT-7}kIVUWN9EM6IN7d`yOdkw*FWntwK{j+Bg+-7!hwXa zNuLqim?kv>0$GF!vqY*3fcA%cEV$FVy^r0YJ}U0~~IDz~r3SIQ2E$7+evpm|kahzF%S4 zT=0&eI|9`pk_iJa+Ncb$yj{r+TxrB@^tKQuKlydMYbt`F@(aQwve>Qxd3Bf{M%t2y z{qpQl@2x~u;<)T!RfW=fnPEh|!0X#aPj`}pRvS;oX0mX**1QTxFxz!SG~O} znTSu$+8fh}OH|aC@ON<0*$Xm1t7gd_qtW>1o>5??INHDc|+AJ zf>JJvrFoay^kuFPoCJSeL9-4gyx&n$ANr z5nw?{CiQVvOJay+V8wu)M_xGwZMbnohrs*q#zve#TDDU9>nP1WSU0H-@OK+YtJ_ zP4V?9x?X_eG#ZwF9Dg9kRZlpk>9xEbA0rjOe2?*nCocf8ud)L{q%x1d8u8ok(AN!) ztJ}y@@1Xnyyq?t3a5D&to$1W2$kPo%UeTx^oeOi?EcuXMrzKyynvFCRS!^c*dSN1- zw$R#5m+uXIaxA#Q(wPoSzB$}AT*fluvxYYUT!|?(fs0cB6IlOFs2kpksW&N2-2QU@ zT9!Wl)&{EL@=x*JR#XaF6Q7&X78y_^lTl>)Yp+R4|MUgFEg)NM=Hu?)QqXtjCLTnl zy^buTG&H!Tqr1<&PS4}yYUsrPCO*o&`ajsreGRjg_uq$HBmDfLX|SA04wD@R_}eiH zqafa1=xCor+dqnP-~G< zhk|PryVp~8ePS!w;n(HGLqlUP0b~5?FlXJAwWN?H;4jfl^9R@EA)2Az$^46o{PpG| zf+HYv2Z+s#`E>ZpA`PzDr8Chj?E!gXAKO)CP4DQ$o6Q+cA4*RCx;~+FEOe$s@MYe( zM$mZvgTAZpCXU*Lf$UNbBGjzD@rXw?r4AIB+PUu`EK5EK+3%gWXXRxd^vndheUp;? zJo=Dp?`Rf1Sl(B=-ye&nw>Eh;G@&DLHX`e#`Tsed*Gj(h=>)5w9h0$ttl0Nc*?4K9 zbqNQXN7;{Ne3gxbwM<_oRbyxrz9+ z2g-?P3R2e|4iSF+?m(6)Y#-tg%rSD53@Uq`kmF;jJ*#(JRTCwe8{37tbYzTBCK6Lw zIiALPI&JSl8=vfO1@m<%lvkdP3?^c!b`k#0U_e)!{OpT8g zhr9G$OX%pjs}x$6yeH?x)WYFr&_i3U2j8BPE!v@CSSYOqu2fk?V3iM-y{S1a^c6rBtQiRe4 zCxQYez}%pztXxB{9f*3(Dd)6!Vr@XE#Nra1J&mxiSxZ$z4l?!9%DgQRQl|sO^tDg9 z=U3xkiqaWIW8k?pgcaxfL^|8~TmTiws(!B5sOB4x>fYx}*hxrZrJB_S`=B6F zr%Mf{%@M=T>A9uer2)Cq^Xy3PC*eOlr!2$5YFh5qAUfdss-O-9Ogfzp-*gKLxc{dA z(P|v+Q+*~FSX>shEDfXq^`StGu$7N*b@=SkYKW@QrRLVp)MF#GlkOHiA?Asfb3 z#OZWhJY4I%8OP#&Ifxm-~bS2 zO<1mQ#?bLT!4_w;nR-`QN^kM4z;R5Xt6gwiHIOwkDd&Z8on_iDqdx~SnkWwMLO18} z-~W&tWAKQUUMvMe*D>z-1G!+JZo*c{_2|*=MoQt+>9P3zgmjrUbKY$1wL-MNi#>mm zfG+Q=|G4OqWg#-Qr6~TQINJ;QrqZ`Fna#AHn8AWmwQt|u&2S!Gf{kE^g$b{{^6n>$ zbRQq#eJ)Zq!;YXL(Xu-1tBDW9ABtgDNZY4aGq3NyLB73J_cT3J_CHkC{xpGm#L}HN zS-;)9Ga>D1 z3B8fB3pvR1;BF;>0OpnYM*ob6;yO#OwLX^@^u|%cdry8%#;F(HojffP6E{vPU0JB* z9K#&bET#>vpig?OR*zvgwicfYtqXI$S4vw+gPMTx#0Ddo z68b0{Em8{*3h8D;s^>$lNg5RA>w}bB)5?4;RdchM%_T7pTA0C4LJ6ztbPM=MwRVH_ zEw~ot@_2=b$;dheDRiaO|3Oe659d^a<#mfL7kzqDhY71j13mYGVvSwI`NwvqYOiE+5Q8w8*$QWsQ3*n7c79fXF-?vZMn4k1N`uZpk zJg!8_%gbTdQM_X)hc~uK$p$tHq|X90w|1?X@ceia38IAZFN7f2NbjRcFHnO$FRjaP zT1lATl{wm*L9!R~cAye#olj?OAS_m1Eoh0_3|p^gO{yOp>b@>ELXdH_`l2>F5}H3P z6;z%=nqaYWI(yqtCX}8NZmnT?+M3D= zR1J1Xp_j7h7icP65Dn>=W=e>2JzWIlh#oeX*m}?>r5*Qk)ucxq%Y}A>g?iFHKY4$%RcbU~dCv#zJ-U@0 zFO5$Sea?4NFd}0kFiBoONF5MXJ8a5of2B0-6cehpdAEH=>SW*sDeCE(mo6@46We{w znew^xjBDdPcyY}Q^~)2xG?8{JDVP1Hr3-lykm-(=cP?lO*ey_iY*LV1hi{1ec8CXy zGZ;{(Wpp4pEl84l61Q%lnWaaoA5@K@wd+6@Zw?G25q5eL-K5m*zT|wlpO$UfOMPs{ zU&subVDE%dPz4qWf4rJpt66DpFjaP~tsXVkH(+&h7XIjRMFyma^4o1$*T`|*EKy5~ zHT>JpD@`V6Ye(#g$;Ieh(cHBU9g->=3uuW#WCKbpBf|{M`%cU)n}zLD}h@o=%+w{?`y_CCz|7Lp1BqK9sAsUZNOMR6Xvs{3||i*{I9;B0T2;J>8K8 zsC<3qk69ek917v;u?|rsN@=Mz{Ap>D5fJar-&L)L;bok{=0KBlh>xM?13b)5C zE&2L!`KH4O-h;ab-aj@kO>}^K4f0#{!{zcZqr|=MMjIeESFV%j)61t9r>ngk>lF{K z(s3aVM5*9YI9z&2VOqILzrPBRjezz`nUU5sonFUmqsFV$s*QH+7c?Luv*_?VE8>PtqhZrxtdwtHqtt&@B0*qV2QSQ$P>{T>I! ziuNBV(RldGEH^DIC!wqstu)|w-bNl*QfbfSJ|#Vk8r8+QB2CK>NYm2A9_;p!+p9^1 z)(h3^HcQ&wN-AwFSJ_STi?fFNVNdGP6wag#PKcXNxwx?K$@;k<>ZFimSc&t+=kmxw zo2sudrl&A9(mw5tNduOas_J$6+CR;8)+ga(x$>i80PrMTJmRKh30SO0hjho~UP*_3LbRT+zzZ0U|33Q?NLTt-tDJ0mIn9^8oiD!nIxtZIh=qB#Hbx z^}!8wJeHC9n(~zGiz_S4h&DwEy1#&-aev-bwGJRRA+1fJ;VTYgrkYUFePH!C@Ty+I zpR31(DRRK-ahaLNcmyw`dfNE5@ukz*=`t;X8XzE=Q-F}R0LX%0`TkX1{`EFW80gc^ z&8?yJ>2ckAup~#RvIE>WcCsqnx^S0PZYZBm35KWaaQ&1| zJN>|JNZo0iP*MwgXY^oY^&RPK$Pw#^AtJ^iWJyJo)|L`pmAfyajF zX8WwU6#9ddrP0`JD0_epmi?9zNS?Mr+Ix|~Ril{PZI#=Q+jD{TdFckkcm$&Pz;tC5 zC!EDT%$>Y5bZ+Z}r8%Szv!X1tt}pL0LFpOHT6_jI@hH1We5}cBeoIiWo0=i0O+v(* z`HL*8t7C|6&zC-rQxjZ165@hjyL%fWuUB^(m%W+(9NXpOWjw3BlZ8OROpcvF_>jG=vrJ2=x(bmZW~H@!ToVrt`R1RCO)pd?jnnKRN2mA*atu1R$21x z*pZZ}S5JmR(i!T>o*yf3m6u!g!8$7FNu>GOg}~pBX%XsH6qR2Q(y8hD^j74X!A}mm z5YTM+V7xyNeGo-YNk?JJXj*bJcCm3-C1JUg_d*cKU&|^SgTOGmj^^5lNu*g^aKv-8 ztlCE>*v-R&X`)*9@|bmADzau-9y2jUt4jhx2xXO1iP4)rTJMjPA;-QZ5sTCFSnh)& zMs4n&@mS1wYP;RMuOpy4D3X?+48w*QKe%YwNk~P!U8?&7xDC!cH%CSO$AGO` z2;#Ue?f#7iwBFs4Na07Z}+D=OHT+%?imL124T~LR7Yxzgofa%+$OD#!FatR$k;P>VFf#Xee2M#{XzIJOPQS<0^Bci`g4f zk#7`23M+JCqKV-jORl)ltBfK<>ZH>_CtU1;Uc1dTNxFFR#k?zhr0frVsGE3{@>rAO zN$fsl`l>)LE`9nA4*wMEp((;f-HJnVKlP-vyp~1XDa)a12W^*AJ%6n>HrZV_DV+F% zbtyAHJH_)GvpUp|Wwhe;q}P=h*dL>DeZDYF(_=C@J6+r~@bw_U5e+E16`Q5=$VBg` z&$eeZqH@wxHCr!&qh-K2U(`>3V8sCAk$3M>C0$;VfaxG8VlppCC` z^BIcy&BS7wOifEh4)t;HGY>^StwJs`yJfU$t~&{m9!scg35#G9wS9ZBZf`9ewLJ^1 zp$W((z}*6%Iv?J<d{=z30F4j7I&63I*QXRik|^BJzC$nM znJutTfc}zZN~|C4xOl3#)d&O{AM+~Ggk(~Qo9|mml3pe zi36Wlcy-luPuswCLbbcL|EhPmWhc9Df_^-!|LU8^nq;Nl>1~cA;hBU3Q!psMiJ7%OpBD?L4`{ewmWoVZZ^AB21z$orR2NFKBk+6S~WsL+Y1)<)dV~ z?b~sPJ>=|huVQO()z%wL={#ZGp+>!#+T~TOa(_>Lla~m(#@g$I&#=5`iQ#O{Z}QBz zERj``*!)5iVH+E$bmKHEoWbe?C@RU-qNf^}LJ1Xo*%)m!YY`zYmh~H-kQp-?BI z#(6~%bVoqp=st1R_UIiPts?euw1j4e{`pZXpc37xNm>*JOOHAj`Hb*pV4zV+6qkgt+a?7#WWJB?irz1tps}B6 zU*QSh6Lo(xn8Dx1o6utzK%D^1mlorb$-mg?cg0b;2xNdI{Bpd>p-$a{qxq&d@JUwe z&Cv;vtuvTF#rP9b08Xq#t*AIZX*pvg<5k&33KyE61)kvB10qjxwc; z^v}ND_(CHs&auAVA>)qeoul-zZds+sOS*7s(4ta zrnWE)W!)hL*K{Jrj6BShg&xhXyoDBJr`TFgp63r*v{aQ*&A$**@k47S#^H-nh|B5r z(7=^v@%5gQjs&%%YQQ=xKQH*Ck99i>_m0zbVNi8KQiO(b>n71Je1cPXG=1DkY?VC` zb&U!AMln6N;O4ibT_S2eA(8Q6vPwn?`XHC|Q!(NykLo7DKNK}sF$PCbc9TBX*9nml zEkN@Y^^GT)f^wPP-~_%pcBRI%hpC8i-!T3@+#9j2GI|&481p%MWNyb{FzhFp)vsJf zEOio#aJ@RYwmCAWH17|gzu%Gg+5s%*#ezgOS_Rb}S6RGFPt z2Y!VhuQY6ol$J~ypl&os!eHH?av_oR7-c2cGrleBJxECY{DH|Szx&=NYd@vAhSa3R zdi(~bJVa1-s@Dy?vV9bfQSBp2U*yYxRmm8qu}d=7?+&9q7PT%2EQ#?(JA0KelJH z((}&dP?B?4IrNPO8!@7^w-L!L*Z5N4!S*OomIVL5_uh{N{^RNPZ~gqC;C|8i*QB*K zA#1`%>pwE0Tl%;;b=-a(j|&UFLj5o&0ECvYzOrNv+x2C)Kl-dM93tqh{w2Y_vf%Xd zxPM*5cbmr}{#1V_(Vg0908+!#*rnPuZ|mHbeWG4elhIlIp-Q`c8)&Q12{kG$I`eFp zhcXA7)!Af+-x7C_JLHCu0rL7uf-+my{P{H`_xR+qOTohyvaA&X=ur=}6d+Y(KRwr) z-W>)2H6oh&WO*h!3{x&j|4_3$%eM3^yZia$`(%lS7W>)Qkp#V-tFvKWaW=Uz1(x97 z<}ycR#D)S;Rv?beKti^~#ry+MGgr+OpqYoM5k4Aqfs5;XyQuO75ab>cnms)&w z)?ooBtOR!ZdyFM-R#^hlfR&TYonoK>%fMxEv$5b;4FlEHV<;g<`83Ol)>SG}m@iAC z&o7KAG_vB9HH<(|tw}l=r`P>ok&6I=wFb>586phorI_G^vmvSmX#T^n;NM){*stUm zAN@I<9TY~5jR`(bG@%I}r*wmGLW21>vK?*Rw)3)ms08V;cc{92bAtmz$Yopm9L!?00xe6@AGE8ZzcJ~9xGpv(?rs7JTBpVwd?X$hAiPF$} zt?@lNjLY*dogGs&jyUuqT5D;JR6m&~#Y84!A!|Dm!)VYJ8Z4k*qV&pUSAd&4p z_q%{np(9z=$q6KG^`bF(5hm_<3V~D`!r3aA>w~-25bE8L&rM~)yg|N6zTIKjHCjZq zS?~pQLH~Qx8^V0KNdj7-zuY@=m6869$ELw~uB?b&@^HpG_?q^P?_}IVicO#4)s*Sk zdPMM|I3$4r(uxjbsT&b~F?^u1n*k!({HfiGH+$Gnno@V`Q$sDMd%|(GYh*Sbrx>Pf1oc#k!b#y2gfc+$ma-CVKmB5(OII`eZ^M zLOV?P7&ZS9c^k{3{J!GUm*prbz^xsCBtxg7w$aqp&)WXq-6_GppyY$1q`ZIcZhh#7 zW{WX_;B=bBd(B0UjL(x}+v$7;uB8NAiBVW)=KQp|BqXmwViUc=F8E#{Hi%B2x5+0g zaMG*#Mq~Pfa-gzLzE8{8(>1NgR{#J2=JZQ9CJA}HqTc9GP}61ov)a&{u7R7oKDV}Z zNCe5RQbtXwdrNfI7t;P*$GOs*t@+G?w+*_2G+mFWTZ%Ch^SUXL@bz7E+?SWF3{Mhw zQpwN!T#&x>{)@wJJVoz5o4K7iovr?`gxaL_;iaRz&!b6uwZ`k-Sv~#z1a7unUtg-J zbKaVBccI3oy>C2)>b_cB-^7#^Le6IQ@YfW<^$}c9*^@^xU%uX6@wToGtYLg{fqlW* za8oTK`VxK!=&ux!-8!@I@U)LGhomjck4oFsV}Jc^79AVT;40#CAlL5E{7uwlR3PbK zUDvlO7iZ{%1>NbLyHghV6%t=^Ob5Fb0Qkq@J1x5&V~>*BUK3WeV&4q7*^`q-&ytGC zf;P*DuHppz8#h{NCQwz>p-%x@{l$3glAt<6`u5XOmT$38`3k}3fYof9=%*^K8)<*^$P)3-<9M6nPPzR)M zV8P?V(D|t7o*(jdvMF@)iRH_n^Z9jX!@DIJVU;f>Gx4LoW?s(`4kc>mLFhX1>)u6W zc)OS-FNqkeBj>AB{9VaBS||l6Q1~;{Ohy=>K-WOMLm1!?Tr-(n8pGy#FhX z{iQl%tvJ5~XPN!aIgmGpAH4?H?`h_p#R)S6?#Pz){RcxO9LdG4GN|SbW+cU{YOWBX z8TGFFp4jXR$yX>Ri?3M0_(R)H6VRD>aUxu3%|P+TTd6`!<(|g(of}Lb#izT!o|WU2 z$EEYPQ{C_LKmPep+5gc*X+T-VS?f2SGeqZ@0t)dS4NOz#PM@~24zB$At-v2K>V=Ow zOJ2o(UJq#c<NN$H)pmUed;axF!=oQOn*IDk zGp8+2K2oyQNDrm`7`<&Z%zpQKm&Dh7)T7p&UKMu_Bbl;Kx$G~*tAttIZ#-M>y)~2E zcMiIBS)}=jny)gl(`1De=Oc4F%Kh^Y^$rZ$|3B_2J$p~=L7v#j#R7LV^QBAgK8vxG znZfRq^W{PjgS1DlHnMS3-*{?2E%cj-DLJGl#>bhvk^SDiEd6=1so-tw&xfwCz4N3T zMz7qS@<@a+7ygy)R_d@d71iHuIyZ4eL-|low!8R|aoetuv*Np|Qmf$sbFoW)ch|$S z@gtkfG675!Z*W^Em#?RUn_gDlAxg7?nYL|BYKN@e)d;>)+jljN!03mq2nzK1XTsf! z>M#*!f~qTPkPMLk|C?C{QU} z+V9BaLD~)yKJHILo{r*|zOnlCGG589qQi{H0*PkqHs~Vl`K971)>Iu)njRmSK1UQ-z5XfURrxt1}~X4>ZKKm5=asz=cx`tR+R!FU45%6A*Rx*cr^@&tlt zK%G2Evbsf8%8~@+c{yxDec8yCJVu9fjj5ws<1)#OYDp$?7PpmD*&83^A z^1NDJ>kcgbA^%lztqmSTy8DD+sgqQgw&B8Z*z>ry`|BuUg#O}1B5J4@)b?w%v$IcD zg-WomyR(RfCKL?>;3m}f#ogpUH2s*?#nle<<{{GHn(I|9$Y+>|rd>Uw0LD%=|M{12 zr5gCqlD3b_-Cnomb3_t8%-wK4jFi^zzZufaE;zD^u^fUASiaRa7Q9>XR4w81``k=G zjVbuS`|&?FMv{slJweNu@aOuXwM;{O%T-OfjJt*g3)%-Pwacwbb@m_=Cf$MgWb?P{ zOeI+&J1S}((1}73FhrkluW1b6W%Vj9$(J$yzSP=K zl$C&C5uT&58wdpZiMjbHuo~hcwDVPTROcL!A?#v#B_5cnjmmxV!yo@{_5QXW-#STt z=jL`<1Hh=#px^t(^L*BCy5NP%djFHV@{N_sWBg3B#eL?o(st`t;g^-}x|;p;i_%g5 zP8t7trWXA_W@;T|s|Jv4kHY6`P84gI$o%5#|D3>dWeg*z9-Vk{3oGO1SfIyRILHt= zGkaZbdp@a>HN=2T4k6PtNJ&VQxX)*Aa3~3RJ`Fal)iKtxDra6Db$k5pRP}HB9HABa zX2cfYA{1{l; z?SOt+$nxs5&6{4eZK0JGlRtaMy{O1b6R3tl{lQ#-cp^dc;9q$4uZ=fjnsWZB6-!IOiUS|L18l7cH#Iw6LpA3@}Rt!g~#^keasNVp4$rCU$b5_XP zmKX0gUJ&5+r(2wQ1;}0M!mcF)cJEj54)OX%TWGg18C2`3-#terFZ(o1HzHxjLbKl7 zs?a{ZC%$19yU=*t4+!L1{Nq{U`^g7ESH}=268qvGTz-fPQQoFZ zf8aafAAfx{|8nbhCw^t5W(WC33>i)@%Rj3*QQHR_m&%-o|IdE0H^s}ocy@8Qz}OKR4APnqN6rE2!AUm@l-}qGCp393S@$U1vDij{NI*D;7An4u9gws^AbvSvJHVf zRXWqT00NJvWZyvw%k}ccWK&+6kITt(&SUFD46yi&u%>89o2W%!Crld5p$vS);(t|V z-jETugI~f36Mudv(o?ks%^!EXJ32IXU0VN>kAKw3sN2TA&M3-eDl1^{$5W5>jPVL@ zv|UHTj!(wTY%9Pz+DwnFG)TN#W2!>|(ZKX<|6ncukMdq+frWc8yduM;?1#XVOS%&u z_Ul;9c#50DkjC4)g;`5Ys?^wm$LFtDH$12+>%=v5Hky`?cx>w9IQzDODlES~KNB+1 zJ;DsFJj@$cveE#z#rahER5Lo@!L$*57)-32AY$;1hpGLOBVqG%N&S?gxdlG^nw)@oaSLAT}<;z2I(piHf+SP2Vh6i$g1O0V%{$TT*^_JL)1HE1-iKK{fSsj+9yEjZ8BgD z-yu`vWs2|#*~L|2C#FS{lDRKWv)Cru{9X44N_R4RYvGv7KUYFZ9i2^=0DgfRU2LK^ zfzoOgj_T6oiKs*8jFb$hKk!JFN}ulnHVWkYX)g2pXVa6E;O(1 zYYI6&yYjcB(zC|D@mMWjB~(y5r=iV@x_tQBw}Pxw3g?dpx-}mkl8?w={9Y9l_~Mwx zOWMOL`P~mKejoLBS}Q9Wsy0C1F-{NP?)@d?bb7(IK_Wm{=}Lw6MS5djA*&AioZc%V zQcyQK6-FS*h{1Kt5RPhje-Nco!ymWZpMVmNTM#BID@B!6b0EL?CVjGM@77MM&Wc%N z!WWNkK^K-q>NlAWKgQ-@sPYk>uPUlv6beUQs8Va9n>bg-G86~Dd~7mg zw_&*HDX~dyF3*;#R8b!6h#3|bC!1GV4BlcEg%_<`wGQhkU-qga5u#@XnlLp6=G&MI&yNLN)Xj015Gnts`n##%hO38)}D-nG! zw7NCu$%;@PJ(K8#MG48#t{)kZIiwM-^fqtW;DlUhwYG_s78Elxi)GCIayHsR^O1IHttKU2}dFVRP&9pK;mllNmB86A~6frJ(n+V1IQ5eRvoY{w+7zg9{-cu80LyMBIMJfVE#6l-9AxFC zL0#HvrXp$cglFu;BLyt-TE!d?rGu}kE+l4);HB3s?3r8ZdbBdfGFUg@`6ONzKL)q) zqUz?6tB?_WSg>A$R{Pe%sBD8+RMU|%>^A#k-Pu7~f^StSvfuFMBy@>4Msn7_yw9Wz zya%fNbYI(@`Nq+EyGAP($r|JpkuNePYkb!|?929-LCGbam8{?k{jfSUt+r~dOEDxS z#eNv=Wcv+eI!B#drl^g7EO&gJqr>_gJS3meD}bk&haJO&gFuB5va*fxrikdRYVfU2 z9&(MXr2)6x-mgC-d5mMb-4OMX$y6l;oX&0RIk>bQd^Kd>&sX=F!moTmHMafE2=98kF~UWhdeM2of6mPNO^n4urVS45EBXQ6YS;1%?T~^-VS_Hp(@fHjL;hs z5a=AGibdgL2)WAHIYQ??4l|6+bxhJv*p&XM7#XF)`IxYIJfY`Ee!d9Wlkg!r>87AS zQhR$Lpv<@<5(P~7YW{laF?z&$$e!Q~w}qclJGrpzp7c}u7f$`}yC(+$kjU$#3ca1e z%pE1WKV>BZCQskr&*3{DzeZ5WUt`hXyH%0I+JSa%g*Cjq>%+2L?4Id~Aotv-;AHzl za-10@!qv0D>_#@P&;EhFMly$_+}sHMl5m0&*phT!yG;k*B>OyL?W@wT!xx^4Bh_3# zSHG^HX4uAyQMin5L^&}d~d>B*(0b4EetrTeUS_uV@qJtqwhq-uL*nSc1P zk96{_fq&autAHI$R&;&pv)}r)V-j3QQ(S9pbw1B>N1Eea@n4bmsX^Cx#1bw;CPa;D zmM5<^z1pv~KVZ?f5vxc}x#g|Qy&CYe(~a_{RL!4T)w6K34@E{VY<6 zfhe18x$dX;3#O^YM+BNWgl^vdZIt^nh1F~76<+yF4_V$#T7x=0I^wSdvautGt0}-1 z@b3ptE=5H}ML#o(;(avgqea@GCkpre^1@JMlZ&34eGptzsbop6Yq;(vo=@gFXIV+Z z6khSMFd*3jm2u_uzqb*zr~=!Skqm7e#p@ACO%-eSzDHMcW4F)$%QxpOQp|`lyzXl6JMA^ z4s5lPZ4}~m>pp=>m?pM2ghqNx9kPN>#VAh|yADM>l=*t293zaCu(rYWbLd4wE;FsZ zk*Nwd_%0E(vREx6LBs0RsE{-It8PSC;>*@q4}QzMu}^&}A54nN;55N#f>#oXt)SlO zdp)*J-(@c4Un2;Q)K-hmXHJq<=AhqrXhk@fb3vf5PFQ7Nh{lTXM%lVcAv!|r$U7d& zK4sTG!(!TL`u#4has^`bsS?TEzh+cprTWPlqS(UFj=OKj<{ioxGs@A7Z zXI1Kb&B?lXR(U91_OZagm?F^bw*cT;A`RiDEpYYYZJ*z7UsQ*-&*4O(f4lPcCgRBH z^pH!4m}`x~^|{JjuM06f*6j0LGq=QH|`g0{d&)<%=-|BL5s5 z{_X4MAf?<`guSf~iIA-&ot2ExKZ^}{0S;*K-_|3}U0duSTov<^^r2e5Q^-_dPt7S zr@n~ypA>HVz0`mAJKsWDos&{z))w_x;GZ#T1l^&}pNF~Q73N;3VE@J;e+PdB_dLZQ zY5)F7&KPG}+oq}m8A6hjgMp(#ZqH0e@;WdtDz4qduZQb~~BL8XJ#6gr6X z-n+q@ne*J=%=0_v%$alUy?@;E+`qE3_LsH4{q40^*80l(vJ*?H21w=Ws+$Aj0QMHf zJFP&IU)7JU+t&?fWr8dTVWjX>@#GVRjZ*E&p|eNq?b+~?gvbIR;pjZH3O5u0yJ`bE zvKpL4pfh?;&M6ONn9)76q@wp`;#gXvn%X5J)%?DL_Whv{^iLwj(?V&T6AELIr8Q}>3yZ19~ zW<&&W(x<^XW{cQ}Ud8D3y-09Z@(tJpQs3^b#b;yIYZq|##t`?ybK`+5N2}bdNS+xj z<$l*UkekRPbyTMH`h5cEm<%3+a8-b}AXMwjVs0yZftMr*xxpQnI%4cGGz7bV+{ zN1+-I=qm;$vYJxgB0pco=u^Tj|%-y$c7agCrbcMNi_c=@%$B z-~6p{N<|Kj{G2MlX3P~mu_qvfoIl)neAI*KI{T>L+R(f@aTjPz`N>=!>GPohC}(iH z{lc})v<$+Dvr^^ElanPaa?7QVi*;xHXt8x9pDfSFVl#W=?_^rUx)Xln{SnE#jd4aG^_VI=@8jckZBpd4 z_PW;RMTb=?!Peb1YK~cmy{n(hZ&Kwv%K|DN4=L z)*&wB1*~b8jGZBUQ9h)3w5*sj!EXV(>K;ApTabgTHA#W?cDItE5$`sQzip0S{pfn6 z&bkR-FjYBanf~rC6U;y1lyrvEZ|NDz7_1dYT(YgvyD;HwMmJ!yLbG>(-!=o|Ft42| z+FX!l=Z6938JLc9(;=$`=JIjcd$%+m2qp@&bZj+Tr{*yra=9kYSnoGEJ$;x1nbR3D%lZi$ZY>%)R{gE}dMzH!C(i!K ztNl4Ku7H0nyy7(KM4D$iGh&{;ahk_vCjkBZ$X7j8u#s%wqgM!UtJe)%9Qh4xt5tt5 zgg_0AksyhVXVz!wHV1#EcXs~9zaW)VoVhaa884OCprgd2tJNesWt$?iK`#^TIM>(%e~_rI;3 z5&fou4NJHr)t(x)BAq#&Kp|} zq#zFH%gTe>*!lewqieOPDr!tvpJOm;g;6%pM;L%W>^f)Mp^l!+?Th!VNu?&1HeWfw z0XkLfT)^?gDvhwl52G0>Hqs9hM2qIcFrBch*d6D<^R-;09`=e+rWPq?q5*a`-SKce zy_Vbl6)^Suf5u1gJ2HA3Xx3u zhKSv~xO>yX>HOo<9}+HeI!S>t#Y|glJ^huIuK-z!P1$90%=K6!3qKsFEiCJYc$6Y~QPsm1b+%S!B z29_x#RE%r@a6^N-CX<6J$QE-bLoQ4?j6S$vqPr{NoT%&*K4=H-6xH7JWLH98AO_bM z6Xhy^-NyJ=M9{#|%JwE~y}oWLlboIeN2)*(JZS$sS#1F}64oj>N2X}cK`p1JZfnr8 zs$e>p^3qzDD6TPb&N5H>wcsIc?#s`6(qM#T_}Z-5oQ!l%@~rB6i<2=*I@c#6JFQ4f zKovf_^7RjPSFOSIr)09`_H~%am)s2)9#Dh&=%Pi}Z;Y1Nr0A(w;xl79j=s`5-SpOqu;*?6bjGZmY z0a;G2j%u7}m5N30d@ApT^=>)8DJo7+Pz@;0cn}&x>G0(~<5+Gz<&6Zgv83DZAOoRK zs*(*G8l@&fIL2a;&EWlaZ#D&u67px!1E;uZAXN-M zoz-#pC(zuM!qG{@2q{e=M62Tjrn7)@(aM>e^R(d>M6s*l>U7i(I$Dzk!m%R}ACK+h zaQWsAQBsu8p;$eE>K8n%HWDBQAP2G&+-M&AZ#v}7B~B+?PH0+s;7o&XY5bPy=lF|Jzawmy6Zf;DFq(IQL0rOH| z0M_Y8uYL=_71(4YDy&z$=#|Y$?{@-%Nvws9<=q#Le*S4;;30v!_n}2?kjv8sO%~1x z7@Id@rCgw|qIdBI_@s=&=X0bJR$QUmIn@^-VBHt4evhzj!UKe`RHbzTcGS6$au-;)RNtqT^oy~!X0w_q*^9KFrSDK=jEdZ#GN$Kv-jBp2CG9-myQsJ7ycx@8 zYr5yFZs8xB(Vl!q`W=k2;zxt5H2gWT4{r77O7LVw)q_fw998{+z1%(E5}qc z@nsnA_27q#wJ_^XR2t)Qmt`(H^E2n1)O3Vb_mK3MZFFBFeLjkC%PD;ng!m>GSJqSK zAK4aKdR`^5g}WUL-YG@|y;~mnyW#vNG}TZ;9T)wk{w32=p)p(P^U%rd(hS9Is;2G3 z-~C;20yhV^L}QBtOk;^T!SgQ+ql*K(`I}Taol%q~_42T^NASm1H%=4Xi}oCidaZ$| zyr^W2=6Xx%C`279ebX~D$Pgn)6lUz9qV@r5#3V$0y|Jf?Me&j&{6IGNN3%ndRA-uu zr;2*Fak;^`WX6x0Pm4*LH=Tg0&LM^;3kOZ_H6fuq@LDIl3H#~99VulQbHIh% zQl|=(xw9L<&>W|&!arcf6Vtr8ohM=pSZlrak>g0m1kz42ZskbVH%f%6T4N{Y_>rp? z3NxR)TLjXc`Kn*}rdiPyLG~!>HdP@s!D$5!wz3}O1UB>#UnNB&s?ExWi1=i3b2dGk z0VSr2R|kk!UytUr0i$<(;xkc969`xxa0;KC?9i6wDI^!;C_$(_cw$HI83J()y!XJl zinfNdh`8!F6e}+6o+;Hl&`({dD&TzmyJz^_J7}ezs{wOgLlo3nF*+ivCX$TxVHd{U zP*^QjzrtMG{vpGZTkk#9xiFXuE+6xP7p zawGLC#}gwB_C$WFcV%%@g{=1nDH7-185PUJILa6F^3FwRlzGXpu?s$G{}Be%Sb-ov z@42EKtKl>mP~CQ+S87{(i0VI{4nd+yQg3M(K1UiAI(t9&;0&Y>I_@ymR~Owk>wY_I zq=KITmSL_L>G5sexj#D!6}HntN|`a(6k#N>2kG}^l+WfV zp#`$JqJL(>3Nn?N!=kjS2@_x~guz%4z3*R37UgjH=k#QAmejI2OZG?Sky(MIr+2Ky zSi3uR7ryN93RTWuVf|bUUt*_n#4KAHk39@jvKvp`&;R`Yr*fPX$76BnDF1PGax=rO zZ=zL&^a^hCB=yiSyy9@4{^011`&ct4UT}22repI9gjTCEJHYALp}uDL_}lN^pDff- z_-vOGAtf+GM=D5$eAsa~`RNc3K9`pz@9?zSIdZk|52*%9QNF>lxD6Ka)@>QWPmZpPZzYlA5r|gOmx;H3`R!pM$(t*}oZe*^i1i=T>dp30LfBc=y7{?& zILJ{2K8*3jfb3}94;Fco^%dbCD$YhzJp+ds!9W8J$!!@f0L`Kl3b%9?wsUQ{9=8z- zF@(S{;PC0lvL^6~vPHIJQXFH)XH&pOO6%Mt_gKw`e~~+V z?_-cxhOav0d%!4SQmZ}yyHmenw9@pe6QW7{rKy{%WvTgS^k_{~By8pr?`=-k5`iJ- zg}MC*7R|(js_B}qyk~5UrMp-vz~tN46NVc6dU`>cne`mGd)LfCn6z7tHX=UBMvqi; zZY)Y}rT_qLzUN?Dl5WY@@}icT&E5f)4=y`~M7ak$yu8@aNw@PcDsmTLyBoAvCPP#A zg3og{^|0O5*=6VrE#&N=2|lW=k}>)X%G;P7j&{}RtZ;hf`RK?4yTtKwezV7bVme=m zZ14T~6xJ%jg!E`Cp1gT}8Hoadl$`+Z)jAoO2JZ;8)2nIaAkay&PQEe-p-|3FO=9zu z8+HBa@Z_Ul)_-w$_WsV{$$^KD3NLP_ls<+a}Blc?&gn^KjH>!Jsswu8!HMzdC4DS^UX zm*WWma%IE6A?@3DdIb5n*+tW zte3*zIxxhn4!oKgvRa{a?{b;85a`OQ5BQI-Y6ps3ok1detXqK=4L;IxPoo8N2Of^; zjC)11BfOIaIInKRWiKGUPuetv7473QFzOc4Q|vvl3X0X}n2IjY8k!8Bo7mNWA`lNq zudVm5#f)CGRq7p15G@rwnKq40azE`=DujP26y4ueNi8T+W#i*{NjT!-6*GHP0K(zc z<>+1AT+=b+=2fi@lm&(VFlqX=Vov(Jg{TIn@HwcB+gKn%wqvVt+ieTQuyzXRPzP|E zFvvv|SrTpDHEkN*q`9vUm7BU1Pn@23Ht$`Jv~n~ik*JbZhMcNO{RVxj=LMnxJxfECL(7~W z?#lNbeQg>j;r(;(DAE0kv1N=m=KZ<~Y1a0`14fGfgKm|(s=)nj9wI<(E82pZS!}bo zSrb(;I9yD7@Gu;Dc-q_8ZKBm{qI@!bb#{ryIb1OhZG%DK`>-sW{?!Qs?WP55A%zUZ z2?k=ay)~WVrtkQ6cI8)&3#U6T=Q>j^xpb=r{DkP=Z^F%>$71x0la^x&1K(~0BL{}# zcN|nD9T%IH_52RhFQ(s#EyT{vwFN1=BFrD;4^F$#w{|7_dQ~je&gj8Xx;17y=Z|Cp zjA)u$0TEvsjhh2qltUI}*0;l}j}Ds1+j`n+t`tlHYTWuFGDP6>=_dTitrZc5mM+8N zmD|mNr}^%&l2!-fI(r^_8iI2qyhrUfl1n0941@K!1a-1`{m4VVuq?3|>_4g<9SI9F z@+}O+#cv6M)b1tl4}>CyZ1LeewH1vMR+Sc=r~E{0Mu=#70LbS%M{@T5SB^uxM=vc~ z3XXbt38tmQQad%xSFfNplDnMMaM=XPvI2wFVE9dtVD*3)UMX|Z&&^Xf5r0{d*AQ`o z&&AVELh8w9BW4;Yce-La7g~93p03(4ZVq8rd5NOH^=8?&lYDM4^@6mZ#COyOqZBp6<+wsU!CJ#Q zkQA^l5`k*5DBf2VD)v=A*9vrN61}dfE2W2DHh{yKen{X$J%}#FCgb}|mSdh4`rGxF zaFw>tvA65lsLgG~UaM^1?I2}}#gA(Dr?A93rYUD%K^7wqwg*IwTTIv? z(#E=6vsXT@SesElm84A%t6gK=#XG5GrYhGk_3S-?t0e}=`3#UvViNZ{@A|4^q^>xQ z4ewq&Ge+))+TdNyRtFyjP&Jy1W|DSZ^WH(4&JzXN_t4794MJu4Rx3Ino_S5x{XpBy zN?l~N`WrG1VZ4!z#@sP`zgt+!1@SSwVdw3gC?IMjeWtm=N6SfwO;{4=eMRlqf?e{P zoNP5TbEQ-8d{O$_V)?C_n5Q1%g|^2R2yZMrS@?tY-N-NJ9<_1t%M@*+%)}P?4EUMUW>${Uck5{;R zIVD9*S*}h56yr>JpBR`t+v0q;N(I%sT{gadQz)NOmnhk${>A?sGnyy8quMag&_mE` z@KY%oQpKdmLw(-p;(}c5#&t5;j7DBC^?1Ha(x#Y1K}DefxnjFt;h;qY+2s>|I=`YH z#c=Q`0ORAM;0SHJlANqrll0D_jN&8QFz5gyx1V!8+s@>Y0JyXy(B`AJ&-aH$cDpz@ zgA13R-}?fv*&Vp~4mq#6(3gDZkEC*i|yFW8~Uvkh`$B;{r%RuR> zoi@9y%}BfJaN+=83#~SUkNwV(HT(7VtB^XN;VB7I-b&ALG!+@`w0L5Ijgd`0n7@4^ zRn6e@R}P)ZJxi@wqoB)FkqlVbJGwm}GuOX0lMSaZl8wV+qxkiGT|#+9 zJW5sAgeoWN-gH(gw9JB~cwR-ho~6Klwj#+wLG1*BojGu6%c?Y!K8~Cpbg-@9ts`=r2Cx6fah&Bida8W~Si4>n@;Hx) zSy>o;O8&~Brfc>$p4q?F^sg4rRgXlW_dz}Y4R~A({e}pX-jvo=-VWEn0r(`y=|mdo z1o%9i7tIgE5`mNWr&qIyv1&LkR9K!RiHd+}AoCE1um4jT|CxN@f6(E7Ll?hXf%6*b zwoFf!A(R5&Xp`w4b$fEIx-6S$Z80#l-l&qfW)uE$A!u|MG|%EUo;xOUYfm{x<)EVF zU>a`rD--Ajqgoh{6r#Kkz!qBmMd-V}&Dsj*(LKcsI-l&F$krW;0Z=U|c+S0##cuEY zrTwV4k6>m+`l>lW_DYkR*R&SdM@`7hE)dXRTF>|ZT3UVYX{2Si)Zj+H45JN8_K?!g z_1K5ZRbF6ke23D`r!yZ^6@(d>;3)qHx3gv>dv#W{=15X)**U6dPHe4>3Ti@2`Cd%lr_w$ZTF?hwvkHi@(Wq0aA z4DB9;9J+XOYdLT%u*|)l#7aL^hfpHPsse!zWlLlf|3H;j2Mg{o~(>^+M4nZ?2{ zkFxXQIMQ?X$NFnIkq5p%zXj}d)(EDoARbLSo%ja>=6~)qVAK@3@YB3aAPc?W?mPCC z<3#;v?X{D3qZxd`sWn-AMN}X6#$FuL7n}T<9N%#B-~6_Z4p;8G4E0YcxwXh zLQo)r8do}+?nw%nlA#OMF{EjY+YLjT&leY8t}50JJbwGM`nPY-y_gqEORm+kr$hAN z4>#wIw+(Yi8yBE1m4BT6tUF28<&JE!B+2^koK;JI%c{;l5KG*LQuJ9mR!vI*8AkM9 z6Fyx%uRRwIh6!S+Mg};j|LgDh&u}@|JG+*0DMJKB7_o2&ye+~qKRXIJb~bYBEjbEz zA65kUhT;8y2AwuEM2W8E3GQ*u_!6SWa&L#+p@Y{u6YyN(YKRyXaUrK&@g3>b!Hu>44 z-S9lT==4uZFT-1@NP{oq&)7M5Y4H5Ve{M|t?~%GB&F>j&MCq*1Q)7ufNElN-*Ia2I zD-d0#RfmcDAd4I^jBl6nuCin)%%V@t4!gzDOfWgCtHi-l`}7{_!XzepqHKcm4?so! z_n6|ZHKpZHv#I9$IhKI)$E>~?I>-G*MPTjKg14u`m56q(4`<%vB^Bp}_OoMadyvQ6 zcsx=gO7!H*1)dow_ZtXJVm=u@{_8&f4O#&Ictn`B{~;b6*SmiMGyL@}9R5cmVxr%& zrx()DE~lCJdMS-Lt05ykU~`c_t;GE*y)E$QYyW=$ DIx4Og diff --git a/docs/architecture/assets/IBC_conn_handshake_relay.jpeg b/docs/architecture/assets/IBC_conn_handshake_relay.jpeg deleted file mode 100644 index 8c6964c4c546e04993abae014c8c0094a406e42f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 336221 zcmeFZcUV)~)-E1__Jv*&2fpQEBWcl8p@MOtQtYb?wROiVws z3vm6+%E!jU#4XOl_luy2h{!cANsxq)jDWC+&{rcz&YU@Oj_TZ%^XIP!U1z#3^xys* zz6VgBK5cRydh7@*;3)NxW7J0uYXMAvBY>mFC};tHeomY`cKpuZH@#aGZ{S`K0 zF>xJ;u9NH2@W}jvPU7fom*=U~HSD4i3Z~Cc==8$zQgUAl zS@X4*rsh2P3#L)<)aBvJ%xR2Du*%;}u|yDeC4I8KoV!(5?Ek{%WGCH{^2~n1JIMB_ zw6%ja6QIwtpB(K*AN1e);phi-KQ4y(@2XuYl+D*@UU?i&}#oXfCR+ zUeCYdkx*~^_~^~Q&oG>LlR0S&Jx!8zVbHuneiu5CJYr+k4G@hPSP@;o2&yR&@OU`v zV((>=FZU(#%tHu4H2gdiGn-9X?aGhBnAv@*q;ATe@G|4hs>}KWTl3>|v!ij3nnK`8 znq=PcONXO%dEKC`PCHSoQ~VmQrL0*cXC5^k0-CikIWEiv#%}Z(B+-tIYm%)SGQnyK z^Hb95?r$}&9DDOLG<7ZUa!_H5EujnAbi5$vPSl$Pm>sV^X`MUhm8cZeMbM4DYuhTv zOC)8}Cy*bo4(l-N&Oj;<>n1jEE-zKbBFEH>5Bj-lMxYxDF4VWqDO^>Xw*DNFB7vg`W>cRJ&En2 z`^rN6A1u&V1pm&$vFxMe@HAEV4CH{ONLxV2>6vj@xqxn7W>?!UvDL1OK5JFxeq`=T zpGOY?(g%?n)vHS#kLKrO{;1A^8?ZBEe1KZ!CZ$Ibq#5n~r$g1F2{W(In9G=35Taq& zJrR6LdgWc+!4ar>@!P)i;Ctn2&h!$dH%{G9V!lt!^s6Nvo$j);efV7}qJG@;?!|=3 zT^5B5w+-jSjPhQtb1qLleP|caeOhF#(Yl7sDAV{jx!u+nh6@hPHnYgtKs10(6eLB@ zhwh~xHDpTG{vv;$r2iDVZerkU0i~RV2T>xXwt3lV7RU;QM2=i_y`MX~&l-pxU%!!_ zG0^MMF7LGfYBwCEmMHZC6g_48&61>e+aVww^tNJm&7}NT$W1Qh z_7i~%o;fab1x6e68H9gurOz<%-ZBv3vfP2{7;#xT`%IZDk?QNa1@pUiMaPpKOI`Uh zV8XH`g$fYfg)r$vi+ahiCp`U;Uj!NRL!7H3>kP>)ou@QTsQ#-VpZPP09a}K0`u0n% zIVD{%qj0aLiOjI8BGz5bgLp$rQ9}!_w%9+gpIzt<7T7=zSb&LQXB)3+vc)XUFl^vp zSDUSX*7xSLKtjt9^Br`uzTMk z_GPY4j`v~)p~}=@;zlz_G-sE_@A@O1U$`c}m?ooM5cNm$#J|v!S>n3)gQ{Lv@x)w= zZ$duOz4l*>-IsOXU;j%ihuaBJodu{ zh3X$f{apWoW(LafRfYDV?LCxlxKSCn(kKk!3acB1#RxjMkHvT=n zxc>+fwfC08YC2!u?))CCseeSG&?ei%?O!2O{+CdDA}1a}yO%BdUyb?vV`IWc{t2?y zl7Dtqj3?~bCcoHW$fkcl{T9*3_x_0+`ya9XHLd=L^?!C%{ZNm8EwitSp|aRhvS2t+ znz{Xntd4rGP@k{uA;85%==u2;njv{*4*k39i(52oYo={ zoz)Hjr(Ztx<3AHKF-h!cOV5)1s6CXktXmYKUlOZLMi^WwFM}Gh=(tS68uB%?tp^)K zo7#y>^39Gm&RtUB(VFk;!b0Lag&tX>1S;9Sl(f zK5$2`Pd&MUI$({;Zna7iTM&mMTCZ7XFRXg23@`jrddK4#u^8#KOEb62AjNIwToh9` z7}(W}pSXQ$(qB{_fleuCx?4}yqMj?7&`4kvkfjb%tnDNO6|NfxBI+}Wd#6Ge+T86G z7C38!%O&Qn8|029O)0BtD&(3kni)=2S=^fvpc4$ zYe_Z?X|@n&opPFbaRDvXqQ8SRh*a@pH%rP^Bp4=W(R*D}K(r!AT!W#71VSve*O(nG8mmq^Cm3#ym}GSa+JX>*?*P#FrC3J{*+ z2=Q&UBHFz>BbU#6HPS0snMjO>W#3q746kjXrlW3E{hA|fAnB|wuZK+IgyR+06)ZMOIoK24Q9e2;IDOg68@Jw{- z2N1P@SVb8%m(tHeIqjPC*RBg1tGW6@dUR|Rxo2lnj^aQ62SN3hE}<1(9ZAzk7VM{Y z8*;8L22E<6TN>a&Uc9HX8}C!!lyhtDte>+(OdqR}=)#K>z}vrc1ptmp70)c}js&YK zR_4`DRcd8Z?!#eke##Zr-|*wX+6QA+rwVKC!%S>vE*4MW!Wvn#sBpuAeCvRa3 z2CwY^Pk0&Yh!{%rcR>)z_pwVtJjJ*0g*k1y^2&b2@AT-X!?GKDBw8G%HChwI=503Zp_3+OSB_iL_WW9@Q~my|7LjxfpeIrmuC=<^wjk zTA*{Zyu(-bv(#Pgy=WuL}$H5de z9~R$S2x&@s^L91SLtf3Q0*R}UXamgD(qVKB9K`3+lOmGlafq;jg@W^KP;mkGWi}Vf z@@LQ{*d47qqD4!Jb4Z>Z0k{>TQi&So+@hL=bhECc%+CB=m7Bwi#D(k&?v7;EQJt(g z5z1OnTLq*8n6B2I$ZE#)(jVNm&{7V}s-!<%q1NvlsgE7HJ0zT$t)sUXE@*O;ccTx; z3dGYhE6oFe=y|M_^y8|dskd^H<+xq@Rf?YQqy-3g#ib_ps{||>H9fB$g{AwdmIW`^ zU|Bo*P0=gtJkqJodQ4~cLmb;6YF1PosL>C!Q~;jh;k!Q*rEWi6az(qilJW?q#r1pT zb_R(kC-egv9#lQq`0Opnmrxu(M6GOSjp>sMXLv5a`gh=+7HvuPsm(mERv_B(j6#xqNvLxJ~NK za3+bxks#{nW>7lEo>yE-E)V~9oqb<8su9usab>%)`l3AIQU47uByq;wbVpG;a&Jkl zxSjk(T%u1sIeoc#W$B39_kE{XmyC<7%D7^cHJ%Er&sln-JtNb1^{qE+uH9q?w03{r z{=ZwkpNg1UZ|>lyvW8!#4PzguVJNL{lR&R$c}f-otI1ZZV$uLcHwt+rt2Wn@C zcdDqfg>^w`D{ktBK9*C(KC{Y*ThyP9Sya10lg$iP@Bo)kspXEfdP?n5maO|h_1wdL z!|AyCROa4%0WBK0HEouncjJyE5gNmpsH8X7XtZ83#ttql8VVc$)9vgvDlaMvW(nJ> znHt5m+iKy4ClFdfSlshFDvU!|&u|HQk#N~plkqIym*XHJREvjw2))J*3i4loJm;Fp}D( zf%BmRb4;~T`izmqM#E=)AWeg4WpLPt)<05D!@OHcOZBRR!^Dr$1USQNdI?k|O+v(Mt|# zi9cuJGmy}OJ7oBcFvx~M&T$&WbEK}07FO20P*f&feh?XmtKt&uDDz@ghC!i@KKVl8 zoZi{aA#Tp7@$zxe#SKEi90LPqEBjl)6+W3UvOejNBe`)zqK$qFB+EXtI7J;h@zyCC zrlT%eoHxty(uTcVY~9{hQ_C)3O3|UXQ>d)ywE4Zb*eD1bJ1SJZ71;)nO+@VW$>n?N zq#D37MqDzJtOkuv$(GiGG_+!7>zC*QUwu>?wILFV0+MDLp!@4xZ#y;&W*^vzQG51) zsAG(&k{G}Et2{Hz$7z$a)%p3vru!G+Flh_J%*>=L*x8qrI%nR${FIl!dcRa}wxd16 ztalCUlp%yIk)Iy^xYhN%GfH2zC^Zv}HA|#o^NtH7%Qs6>i*METdp@9DD{~!_-y!vS zcH5?+7Qm2Z0gq&xN^yZP49=x+b0WopZ>2V}an0D7H_|BK;cjOQJk|9CHVU<SKL#N;**vgZy6U`^8HGd3{qQ2bmxqAo z2jm0!@i7jL0G=@y?Y!LJrWc}3k&sykQ2deR0-e&f&EPE0FIa3{iwux7ItJl%o=@Z^ z7MiXK%EFl2hNT<0-n=uijn3p|Ew3@rn9;8r&KISI(jb@Qmsmf~PFL2;Tk#^iysbZ4 zI@5G0kbxQKxbP?UlkTC~-8_qN&ZSa8bI|VaXbl5)d^VCxHc7NU4lM74qsehcz0=vZ zvENkOe*Hle9qUBYg0*T1<4n|g6f4E_yBwSb%>sVW5iT=w#Hf0C*(W$h-wr`bA%J%Zc`bDS5@8BPZY@`MAWaF2aagtowmMT`C*6 z^~T9wzHPmERQz%B}ubpBj>xMh|7Bgc3Fd84Ls-tsf$XVbbLSw2FCE$`;s8%lQg zQVgJ~`1IdRa^JUavRKC*?E5sD3F-O54*^vsLNvycUX|gI07l90+kPw4?JL*Eue@XP z=;%tYsbU-xf9Wmhcj-OA|NXZ%|G^PcPh|26;abU!8J>5;>RE%eCTi)!u{+v68QpnA zaVoN_OqWl0t)s29-PQ<7`*SU2J-zbG(}6td7l|h7Rr;$Q6BGIR1{U3Gv}z~|1pK~R zX9m4@n0lpCemSf*Fc`+}m}Y%IRq3%=5dLT`!=5g#EoqKkw6~*$CNX-n7oOqn5uBr! zuDjDa%admA>L_kizJ9~r1Gr}97g^+4b9Jq!58e&xfT=K>xxaDcfP$`xkmVN*12Z$< z(G2*XH!6;hw4P%KjuOw9aC7Gi%&MyQdDoMJGqs?T*J1S!k)P@WD@rrNhi4ugeY9$x zgi(~pY@(FTvo(HX9H zW-aa1sQB=PxFjseMAD{P%5u~PkBJM7>kZ+5VOPcY1*@qX;AmhR9T?)NYHV}eG6aWF z;1Z={oyX|5xO={SHbI&#nw>rbgohKF#x+0d<6ZKheefC925G(v!KH?r&=OV=*KmBp z9j)_L;>GOc>b-96W+$ARdh8^}x=ke?=uHgl+378w*-Pg}a;A_3CIOhr&Z<+J=pw1NFuEA80DDRa!J0sQ^*DVwgh2&mCOm|2Je8M@0 zB!R9SSyW6b-cVHmprLtUw5v%A^YQ+*MmDN1M7LO5$c@CCagXj=yzeL zjUA*iNUft3S*fSNK6yRT%g&&?JGS08tW29cla!c_YWo<55#`+9RUbkvXVBNH)2TC> zm)(oXU}&?sAAP~TX7vRpd3r`u5Wx#F8cwp4ax4@484&QllVXls^H@&>b;vX;QE!Io z3lGtJL|+{#(xGK}i`f+04vx@&)=hgbIU<-3E<+4lym{pQ-;&sgGjY6cB{j<6OKOts zYCHy~Ojji)m%}Iz)Qb0c187L@#UBR`0d>%%&xe2xZH?dVu#|Rs6G7*}eD#B2l37Uv zwIem9{6wkng*0hSQnqCw(M@%r_hYJURwTG3AFly28rR*#%-L+;EVkKq$Pz#m!E5C+ zvJ`Pww#7M_Nc{?%2GPjTRztMFVscYJOo6gP8WukZ(riNy@?XP@j^xm5JJ8XB`wbGuFq@8A8?vS@0=sST?|x280hiZ z1Xj3bK|6$;f;Ch59sBHd96A@R;B<+dc32=5TYjM)3Wcf;AxSRy4k`;$Hd6n^?OB^- z+H%8rc#NXeRYkKQA;+0`;A_E-^#3 zr{T;dnXd91ldTsm3vVfFa*2Yf%X!)*=^SUpb5$_REmc=PoG^?Ft{~WeyK)IqxCgp& zszn=Gi6hv7k%WoJw>G}+DF|H7##w}2JrOF@M2iyUy-Zh6P09Kd)YMwk&d=+`V{#Lc z5j;1<7W6b9rsZ_~)LJY9B%7ay18o&qwCF8pZ0Lc=(srgy#z7ph#jJd0-%+IwHh{f! zHYaj;+7P$w;O)nJQM9Zfd;4bRjxjxZ7d2OgYsqkf;$Ec5Sys$Wt$@_i;Amsbxvce5 zf@=A9aoB`9m=Jxc8>*=@vxigXy_VGHh8ggPoC-z!F~T0~XiZV&b~6Euq}$)t4kEarOOoouy8f_vFMy@?9dd@k4h^-V!2TLu{?A5`&owk|dh#Svz8C95itZ z`eB~0MpxkAwYZn=2gl^(FL>w3=JJntUz}rd=@ty5Jj~AQCj1f@^c*}ZiXnW0Lh{k* zkR-kM;Mi-IB%A!3A3%jTg|BFYR9)0|5G3Cuvy$!-M0#`ujm|2BDP_j?R%6k*p3aZ= z)zb!SZS#6((r|+>kEI}dcnSGgA#(3$1Q5n`W^p0#3_3a?WWaMbUWiWG%7Bd?P(W+J zKQpt^x`Y)W`B5|%n>|w}%IfJ-J8D!yz%`K#gNqvMr&6abCy7wa&4~$L#}C zT_3$F>m)SxsI$L<=ibIK1e#1e1;!X1UlF>O5aZ0!1u;3x6Ox2X3OLmuuX#cWh$}J+ zW{7Q|1J`?<8EEGl?=sV{q3(7fd+BI%qB3#mxf$^mv++t@i!l?;540Q`h=mUeerai&=Lt**$c=4@wdR> zz4oe#93UnQ(*+Ry>2CbPm*qfQ5|fbC!VAphz>3-aysIIl2aGbA`?7tT`G)}W642~E zE8dB$8w#unv?mR4BQ`GQhoAMgbo1=(7?2!)l`$?SOfUR#)poMT&Fz<%dcjw&^aE`c zlLp3|D7>`cYWN`l>bfkypU+U^^il(Hwz!8au2zrLiBMX-U?nj-w4A6PJ|yn|+hAs= z-Sz07OjTGg3wRj6l@)^RaLCTGC5ecvDg_3HhO;}3S7(Z?*{y_$RrE{h%RN^}AuZn< zL&xWh2m|AC?U@UngXL#-$DD@lp*CjWMTC11wSCms(t0iWYiCxVh?HzwyR0-!Y#!&8 z@Y)asj5kmDQ_u4;wF-nBH8w2?R<7Ihn~wdYxLRFF0zRMg!7ei#DH8rB z8ybv^Z7CC{b(;Yx`Z+24l=5j=(8t14Q8LN_m=~TMI&3LHuW~TCb`smB!ekSI^&BW& ze`}`GCf;A!axpQfa9Z=yaSXA4@lZEX65b@B9k%lCnER>>X&)rkIKA zY%qt^T!eOh)YT4LR9aqY-sdLN>pG@0RBQ-Sgqv1i7Z_YG7?MpL;%O(TtyVfwzE04BZgGMsn@|?9N@9f} zA#MN|tE1)U&C>{@Pjm(*+>>m6x0d7+t~7zT*QJKV;wQiw;n#|dcsA87 z_YQ9KgS!3hdc9n>(40+hYSS#eN{lPGW8i=yMJvt1J%@^J@mbgQq2|?C>v12lh_wVn zlvH$FS+sqb&SG5a_MS)$P9di@ShE3F4^jLXV0sQLjqw<;II|03iqN z{MQ#yt0QCkpZ?;_oXKXn$ZalNb|!lyxo-E)H+Vol>8RHm&D%>WkEVUTdCR{bvSsjX zv~Rw_qmxT)QSvtP+1#pfk^SbZ?-YX%f8w8G{qzkTGWi=j*S6LplUJdS@;e%lD4pH2J1A{cb``wdC20-)&#`%>=+U+VRwd z-)%GfW&-fvf!D(k*8m$@UvF#3LTr;#Golk-#0Hhq2PJ*yKq8`8-v^*i72oR`dRJ%tMDs(X0aEe(^eq=)6eN2K-S4T#M zxHoWvpOFq6Tcao_RoSDWBf?g00N$huS6Jo~Nh%*?w}*8Sy5i$B_i3B+yGd;;A2o6RGwGwc5K z#ZgqNndn>VTE9ZKEOiOwiiLrJ&1P_*<@uH!2M!B<6E(GgI{>=hm{0m8#Ho%S}6 zTYo%vP(Cg{K4$n#$Ef(@Kg_Iv!|`P zuXV~UOEOL3$ZD0)W@L%!L+}j$Nyd{g1--Ml#moH~rr4<)AU!UrP3K%iN#_?pw1anv zS>WaGU0nF{VNB$oPiRz32pOS1f@?=Uog2FUeTVvhyy09(IXPXXo$m0L!|~}QBr&m8f#FUCWbojos2~dZ`MFin<fo#KsWYm2n-}l9J=BC<_uWD)T;X7*GOAFn&s97b&;y? zu?c#e1JZ$-JBPVNgq~=Vk_wEi?n)bWq`EI7Kj@{Ba_Z%Kz;#=lVXLRE6`eJjK(&b= zRtUt!Lk+m8S|z!CCMEWj2FsI3%3tAKY@ zK9YfA> zfL$V-Loj~gKwi3Xfst}AhtDZP)Dp^%kG=<71h7S|fCtL$RE0LoV%ylgsb;-aU%8yH z>|oZu33y9;Wq6~3F^OEGkHl6^9GhnlYf0oE4kkdH_`)Qi)C0hwnPD9dR!&Y~%Bs-M z6eeZjef`!k0p$laFI*%{I7dwOZAS5jfVc-k$9UHP$FYCEQje`ZiX-ji!q>2C$l?NN zMzh2nI(Bbq0|VKdtc3aU7g}<^>j3@N#Lf6|=QmwR{zF$9|7~3vw(2-S(UoJve-j7a zpI9j3#ng^IM$wn!e-)OClD|1QBc@tc&j|ER>a#8UX{bN!>T zD{~58AM4+d-e@-cV=WeO*Q+{ta9~nQDO%n8!8CyK|D%8XgyigaKiVAExi{rDmso8A zW=St3&tU&TjkGEXbF4&v_38deD4LW-ioT-<6k~q`16z1QJooxEfhNs`g z8FC6E`#5yqNbN3T^dW!}$iEu->cTt$UVdL9`744Axq!!Rzr*~im9M_$?M)6EmOq^j zcf>9{{tbBV0mG5r-+ceocphb<{2lJWs$EJ7p_o3X8U1&FP^kD9mUpk!+!Rx;#}yq5 zqLBL4K#X#O%U5~yam)zcIDjjxG4=6@h!U*D`)HF`44_Ohv5L{V_f%@-H=zj~+pVxD{ zSkQY{b#^ceK1g5dY`;#qNp^A3e^e`iQ4}kN?xi_JpSZJDL7?uIl`z#!d(EbEiM&t` zLz)G-%>Wb9e8IZ8J6Z>MX(L6tYX!kx`=fNq@-X!#0)#loo*95JmEumQ1^Y`93|TQO z-0?esL)UNlR@hjxrI$$!C||04)f>dLoq&h&qMVR{$dYPT9#iMIipWL7L%?$6zE*TZ zwp*}615?XR16_l}u2c?AFx{pBG%On)+6jA^0NV{oEbDTE6t)f^?Gk{+?ktsU@^j<0 zy+oLPf2Bz#50J|xa%~BW6b0Al@V@_4v`7wL8BTYYuJukE?nWf)unh)wSJFXYPc6V| zUv{I@E%|L-vo+@=5c&xFowo6ZKJInJx6Z{2>eMhe$4eUW=9g-OvUptPJ+2>xB!if{ z&GBz~74PT;!PO`s$MYfrcQNi@XmJ{+4h{_fwOGdL9|*2aXU`QtZ%aJml@}H zwGhhFO{{9CNgb-s`@!I5U@r4C68SQ5Yb1pGqnZW%fY-o_5UozL1hd+*Ye&O8-xnoz7dPkZ!nxB*G+V`B^TOw0K)mn=YDhMmPBYFeiE>WG7Uf>) zcCwvCUzFEOxAqG%q{vQBf>%LcK9QujoMa-Xk%cAp_LcbUJbjs?1RX5i+0)+*=Oe!b&m@MHQC|y6>)G*1e&6EYtilo+8U6B;tim)`c0@(?>lduMg=|`KoVpdZiav+Uya0~Ncq}X0kG#)^ zbPNySc;KJ2gNvdRgHoW23G>okobA4qpc8InhVm#?eY6Fm3bt}_61?@PYM~fmWg|=q zCxyJN>C)gV%65)hH3!f;R)M+>0jQUICSSO0B@#8a?^V?u0$AN@q}N@hHy$4ie#Q~q zox8ifdi~WO_gL!Qv9lX0cLBB&-}1@#NB?wVzLuoLPM2|2*Q0mB&S5ul&WG+O&P|AH z|AWfwIZ`*sjFATE>ioHH$1!1VKW4XT%bk9SGD>i4cMe}2=3&b8)##+Wgz7LYL-5L5 z*BjMf?;??0%g%n@!TA(wz-t5hSVCZ&1MZ8m{AFk_UywX=Q3v$lyqCuSZ{J`ZQ?MFZ zfDWpYPJIUCx_;5%^UL*MaglVHhc>E+D~T;Bh@R?+eiMI4vX$&WTM_FB@jxAvnYFYQ z-ooU7v^FZ>(;TbYme{7P!7R}h^Wgb0sdhp{wd^n*=KiJO5{;GfY4IOt_f;8TrFw38 zBmGW$a1pS2lAS!&zUr(YX$ANC)onzMx4Nm<>OpJ9obsFzurC`$_eo(PS`Dw%k0S=R zBn1z;hj;VeQTMT(H`gY1>6BYqbE-*J)gV2Pb?2)fHncvF!sWq*U6nP2N&dzbwR?tF zgmnV3EF@r4N183wmEXr^&0_*?Cm=3x!0TV_&~P&*)S{5!P{z6%v=TaMQGfSw*(}oA z&c}sXr(I8ftzGb#Q;r58!Pv>riU7&0}(l{pz=r;TqbNZQt6c$W3&<(3Wjs3 z;8hciijeu}BskJzBI-ncP@DR^A(|{>?XQ48JxMl3nI8gb(g)yXk`P6o(iNI3X1NSu z)HIS)=Yt3pYod-tWD9I+T3BOn;$oX{cTB@eNj?~&QWQPF;j6NLCa9Dl76x1fFTXZK z*EiTm8ct>y@X}S1+hm$Ct<5uv4jn}^_(HXuaHVmDn;WTN=Dp)p5;G%Jf%Q7I1b%qG zmx5Du+3@E^M*V$pNTe(-QCX;5#x1X)hI!Aug@dj$YRcNKI$pqk9%hSu;O+>=7|+vf z<3ugw%M`^(4jDoBDr-A=6M`GG?LV7-@>t-s%{|pJc}uX#2!yCB`Zy(+FQV&Us;m(4O5i_qS*HOy1*c*;o{o6qm>A_Vsm9*jvAL4>$bM&YINn0 zxyE&TaIV*ClA~PtJOJ9Yx7t+Pa|qD)KvYe}xZm;rVz+(>=uTo-+RkA9Bvf@5ppx>p z*uTIrZE*;Y@GhlYqvz`N#;4w4II(oF&+JyVdLRIe=IN z;+~G!5;&^incJ~6(*p1X_Lu$;Fl0P|TIvQw(YNRhojSy!%(n7NUQRWdjYREX_O!TJ zT;XA%^@#ZhQ)+mB;&K`X5vphVWfe+TX#kq>!FdysXyL#&1iMOuTx!WRx-bZvueP9j zadpC7OJRR2jTIvzO(+k`llQ?%oAwzwzDy*o1{p`bcC%vJJh@0*%~6tfv9XPLR!XMw zgW%Z)ISc*3%s^~>hZNmO-{7dgHc9phtN7hIH+4r0Th_e0(Sre_J}}gQ%xX5ubw3H1 zc~#`1Z5W8U)f>j0)s0kBQ?`@V6}FBshoK+PY0-I@?{F6<43(-D-sN>ezX>L3j(ljT zY0PZrLF85##HtX9rN~`{gL_MoyHJ@9YGLA;j8_A0o~A<6N|RVDD;n!!zQe1lk6;(y z%`T+75MH{d(QJyv3OZFTR>T+cD_a*lC_Xzq=9QuKGFqD1LoG}4OZLowTYqi)bj)!m ze%H2A)aaHT?Ep7&0zTR_DnI;Ua1(jfJI!qdey_jjUcL}V8#F6%dvp)=g(hY;yK_Y% zW-{!vINju&oz!e?x@sB~>#`xff$Ww1e2Y{T&UiivmbPGL9#)>$pOuKQS6jDl=DGUH z0zM5_N0%ftHZon7NRXsbO@K`D9OeHYVpuL1!yd2S_@)e!}TS@L8Abawptz zJ!k+)BiG5yZJ_E|Tosb@p^m0Y8-=aPr(&Z%Mg6FpoSdhpB!#*g*W@8{9~O#M9m*A^ zK0)iG#7VZ&z>%|#w~{T~46ZhCegT!MbQ#UmU4&}Ur=-s0;xAJW!X|m_qIP|{q20{} zt%h==G`5pyFhUD&)EHQ)r0ej)2t+$pS`k?k*jUkcq{9)K6QE+p^eFpnJq!lVPO9;ESe_*Qe|R;ZE_oNT-#Bf`Bzp=ba4)30hiS zS~u4KfN!rZ@vFNR8~mIeQkKu)-lbkZW$s2d7jlNI$?bpRYIb(vf7zGEOhF9UQ zAZ|hQ7nX-6O6qQ@G|!|es6u-6!z;^+CAXR}PD9O=XS#gjMXNL&hWFrzuQ0#+9Dz~` zQBoi35!pcbp5azniJRRWj~Z~o>=X{c-DUg*f3xh_OXU`VMfICc_e8SS1rAi+$$Ljv z*Ngg0ijuFPV5LTvpGU+SpDrrH_;61gD|!}G9679=<+QU;#U&EIX)tR#M67JiAXP>0 zTyQn5=!MU)kgBsUvBxZ@hGCOWGY8V`o+J?CVyTPbvuRYTrY}Iv<4ZO#RAb$Vj`zO< z`?Y96=+Eh*0?E^z;1?lj4rE4M(Qif)lxNV#imXBz)KV)N_m9 z7oj*6jQ4+Cn2Ib$s0(bFO&?3STK4|C=Kre5RDimOzsOnT?^lVD)=q$-u6-*li{&b^ z60!Iq)%AdO3@=UHzG zd7W+v{xSgXQ|WI^aV`;U8?M}QZ1AD6&bG8*_mUsTjcUPbhvf4lbv5VFUjUCL2<*8x zo~^V~KAHJ6I)B`bhRsiQUE^Xin#*|JhkZ9D-99-%$D{z5znX;W9v`78a;a-AT`V(fS%7L77@_1Ybos$Ivxh492 zP^IQe`;;PMq5iXtWG_x=d`DxVH?RK{iO8X1nNDQMo%&LySb6zn>f)UxRa+>*`<{y< zFb~%E(4jh0ak>m#JOcy~6taW$hATk2Ywa&tZaX&Vb!~6>3gsp7hGn4)xqJc^uyt)k zH`Hz15=!K!XjqJoyK}3AbkdA_@s5vdH43&~?#|Y=(`&n&?o4^(DDXvxb{SD0=t3Yw zXLU1B{xJm)A207U8l+FU}5j2n*O-&APu0kCnbkY4bD<@Q`n$(qErYbl?C5tH_b}B;oebkV;xKyKA zJlN2JvCb!;^~BhSbA~=rB}1JNq>Z_}rRAPd&88h85u}tv8|Uk)u&)fICFF_qH^#Th zS$Ja>+xDz5EUfJ&?kNO?o^Ddz$(}65a<45KWJHi|S-*QwOj7VF$&i|s0ZDX@j!(-C zP!=BzEO8nSp?1#=ti#Uj3lQmuXsld(FC0S)bLW1?H&y#f;cSm>KbPQ{LWq;aFPUCx zm*djxFsfR0LaW|Ct%mdVFaj$($=-ci7mH@&87GzEUD9Ea50k-aRyFS*P-iik+O_bY zAaDo6pWDU~n>D+txem%@g@))m2R`2lk#`mL%ze?H8|eOt%#gqCoYO@T>27WsM}~{D zg6tdxJRsBoEm5Ilt`ZxNwsLlbNVF7_Z>7{AxA~v4gqc= zD+{RifIAQWRm1~M$&w4^?)V&p?rb$KD!wD=nowr3xv#^%)gFASP2UmiSTw$Q(mv%+ zo5xuQJI}gIo&o(JqChZ86WzjBu`g5)wR}XrNiup&B2t#g-Y4P;V>i z562tPUR;z>E^q(LZmWMgqzT3kZ`IN>06rkmUF>(0ifHtIO-<*Ylbau?|47zy;w zsxs{35XxCFawM^DnL%d;$&e^Y7h+e+GiEB93so<|K7<%LuF%AMSfnzg!>7+tQ-dTA zu&yjmGW^*>@G}o{GJ{cVB&XLKzJB|t7I~=Z@d6M^s7#R$87uF^D)38Cm+#EJoF>L@ z=!Gg@zG7i+KS~S68A&pR3GJ!3cj8!olXnPE+V=@a{ztXX zMKSXGQfIr1^1F=ZdCmjvG-fnsPMX5>e6By$i!FPPA~lx(3HpHmyCZ&Bdb`~*_BHs zJb)25u+9APkFc4~Oc}Bm-d5S*3WEqqRFPu|>b~OUrCLHeePK2xP~k*PAg^IoVz%^Y zTn$gD>f_Jiyevt1p&5Q<(aHw6oVy$R>gfXBz|7_V2phDiBVlRlr(L2O?<-jI0J$lQ z*se`7vl)jNMM5=z@dbmJ7ilFJfKcGtN=$5Qg3DREW}<7+tY~H5{SaI}!>g=x=Z@HV z=`^UJ3)f1MQ`<@{xUehDShgAL5*M=Vla4L9Gh;_5T-3=u*^>6T#86;=I<`%mF|@Ep zI+EKy=+qXCP0?kejGb(Oc`Fb|KF5RQq1wZ%+9JG5!omr-vbF)3E3vb)L%xl&aG+JL zG}KtWiA7WQztn&Y^0_r=p*dVHw1kUG{z9&{5U!B~FF6XNfJenY-ktL5*rv8nDDT|ef$LZ z?m66YM@(VAgGJdMS zk=*(v=M2( zqI3bVe$@grqI7Z{ly~i`=RIlDOBIab7<|-~u+q1O?80`;+{*@SSaa~PP(Aimc?7k` z0~mKKbcOY*2(&tU2)7so*AGlbXZgvgOb;Ww3@!9ymIUN}Dj-@{%AM!alI>Gx%?l)X z_adk4$11-RObmnq zk-;+%uo_<6o|m54F3o-;=&F8?(x%{}$6NGvxRBHH59q5Rq)oXb-FNJWm}1+HR@s^o zN>KfrLZ4*a8u=R*oeioYua()t)dZTC;ApcD57(?5v~JQ!)0(@iC3~ENrFUANdb&bo zOQpxSljnJ64x17T7fgvGKtVP=f?q~+EHiIZ+{MCqY6kSl9eeiWaeaGxM?y?&NU(l+ zKNb&dS7dTsq?Q@=);5jG)^N0LABexfr5opkgA~U#j%Q|LV;A+z;Xt(2Q>y?NZ+ULT zgNn{u8Krj$bngnjZ7{~2%c~z0ZN>&A{D0iNXIPqP+wbd4$|R;tj4jqotP$JTyJ^@( zML-nWsIj5gOT;$G#6(ds_Sj<=5F86CDmt-?T~Q;pB#OO6V~IvHo99_;o_XGPz3bR} ze|V2$?fnU^dfyzteVx~No#*c_xu6mum0!z~MZdB71up~t7LYeWTj`KgUQyX}NNWd> z_F==(LKpR+&4>*&J59Z0A3u{Rv*W|Dv-o^C+P&@hFqk6&7+&|f4#}9@>O8B)n#WI^ zlWW0`g9q?+JEeMASc#39t?}9LhAq=jJyFXE7 zIeLS?&o>YaAIF_*9F>9>aM-@?jUmVPqyy?lCea{WZBCJxRsraG^*jU z#;UtMA(=Ff&uQ4l2XC9TB)IUwLyqZ0&5t6wBQ+99MM;)#!%EO&nyPr+yVi-1vxZ<1 z8Vm0qULPs=w(HQAEtxHru%ef>vJm>C&Nfe|yZ z;v~2sG6seu9|(~H(rsO#5-zXSySiCtjxR&Hq05(5yYn{7VBs^hOiR_>f*kJ5d&sJ6 zOE(QE{#4$CZV71}cg5)`L=1N zUUYfI>LbGQm%vZkX?oq-G<}z}&h+^1bw;g1fe*_J84EQXL7)!TtVW@=;NF8|T}+Y} zZz%^_Q%xTH;~OQ!#>DZA4G{-T^1SzI{&r~+hgFSk(Qq+YsH+-uf!k^z2o>H;T7xwk zy*E&cHXDAjdSW%V=9clM#A=gTEt(%cvJz_c1!fdC(4m9;MDPh4=#0*S)^Uqzz9?fx zH+C~HSpd~rDH3DA*w{v&sAJAQVlV{;qa*8&TSln#=_IH9Q5F84?QXcRcn{)nCBp~2 zYBJb86O{OA1Zr9fF`JamiCLX%dqMO0fIXJ=nt(B0LRGcw98Y;v<+Z?KNX&{;s7eH2 zZq^S|RC_BnpWp?z^7XRxw~KoH>C3nm)*@K#q)lOXZ6PGd0p8Tx8R`3YxCT4@p^kE3 z92;~^Ig5D?B8k4I@TiEYSAK979I>lwJq^}^Rd$C`ASFX{qT5^h({qv4Su=tydnWTU zH16OUYr@{(>5Da9@urH>h%g|2oqT>NyI5j@;H+|n{sHg(AmD7x%j%F;(nAUyJ12^3rTa7KIeCG}+&`_T!nA0~4I-_6Bs=XKIFp8p-(p8Zggsq@VO$6rMqRs8Aip8_tMMZ#SK{bk+ z+}TpraltDL198pQRu%%8=X9;$?%xfUf7TD_b3R<3J08~s&xps*5JFc>kMF0RclXYa_aQFiDKUj=tf@^6 zQ9;9L|BaoMdZ^Xa!{Nz9BrCOd8@M4F{@2Y)T_(ya2cxfm#p_!$%B*IV;bNRB>yrc+ zcc|PJ?NW?L!MKl&&p}NM6XHF(pCWS&n-}sJXXguBeP7YM7mJCDvBPP}-(g7Y%RNn- z@TcLPSD;MNSE)}NR^F=}|MW~0r^xAX`6 zX%t9*#{~gMAH;P@z|?w;!^*4n^$J-B!Z_`(DM(nB{LLR(v))X6HVX1?*mHgIBKx8! zB6ew>D(A3*#_zx`w;z-p=&@K-TDs$qp-${{xL}Ta;+&aTlEOJPK$BZGGo-L0qeu!> zQ}W}0N6S72pQAwLwqw%dLM7P$WD}qKT6(d3BGh<82UN6xSRNKsx%?ctr32%9oBm<) z#Y>Tp*zNQwswbQWiAOGBF9e_yOJ~sKks--S^UyVmgoi6mOwG4ZRM|ozv9OUvD7)Y- z$Y41nhYnU(?c|@}_Znyy^r)PsjI6-2s%(#X!X4)Z89NP2Ns%JTv{lifAWW&nOU&e@ ze761zF{F|CkGjVSR517Uv?J%ec}3*&{hdsz0?KOZtxp-E;ZL?V-`09ttqhpwfl7N% z2`-cX4Zo^q8BH&1@)?GUq(nZEBTq7gJ5(Cn)pma-2RWju33O{O74!{9=kL#iejT#R z?0;@)hYK1^j(=W7AvfFK-Sn2+;99SAadENkWS!D*aA@A(9DTsde_yAaHOoIG?FbX| z6Dg1E#pkP%El!fawiK16eshz+XQjK9KmSoZu0je`U!B;t)39y6y92mk*Klv#U@cFU z=a|HM9*y^Rmh_81bN*)h`q;i4wF6t7lZlT-0tAnt9^d;b9a4tz7Vz!z;sLFxiS-eCbzD*S(^Q-Grs6e z`S|CN&m4Nbu1dw$$rTCS;Q-9*RZ~Gpb31&{O5w*Hi=;rYMz}6J4EAxV_||5yoPcEF z_X?T%mCWDF-rDNuaO(`M(p(*)ik=nZVwBAQB$#P=q*}0)*+k|>NIg?PPtQ9~vJ|Uk zg_3-@C7n((i!pOY%iE0CiDRzaDdG-JM|*c# z$^@7Llk?I%Y8E~9^qTY-?{#a9hp8shE43pXjzYAWi=S0SYu)Z94?zP&N5Ph$4ARdz zHSsAO#bkRG2dpuzcU$?!OwnT%ZnYO;zR8I0Wj$B`euel1XUTL_%Lgk9=Q$TM*2(ya zxF^yagA2#9uO^w3^`x2xkY_eJ4F9n*Z-SdE?N2t-tXmbabjR=8x?`!|J)WIyTx(<; zzi)6@uejA183c`f;InTg7a(sO6qMP_eK?1cyA_{q2f~l`T1?KkV6}FRr0W7Ts@dZZRv3AOeS@14N?k z3R(QyV)+A4eRSF+;gs+8Dnv+o`ma#vf9vtjx8HH%RK%N&ZAvM#rB>}j%t~k1J&h+S zxG};h++W7e!P&<~!zLwttdDU}rD64Hd6bm{yk@wx*yFx)|G~_ZEVr2;I z$6pp&w0K3jc_#byH>0urDUs}l9h|oz`k9D2vvmZW;f}cnNn!^a^`8j|skCDFH4Sa9 zDPJ}Gm>BnhT=5$C$m1FsiA1a0UQkx(;DlVmsum;S=7Fm0s?;e~Qty6UkaE0#>y(x= zHYN+^kXY^#HH22jZ*M@KmR1Kehy2NQxoRTgt0q-7#|UKD7e?;E1rVa^-fL2X0;(B1 z7N6lJ!Q$SL*D7MFdVZ5?WKPbx5S3pvz8TjX&Zdmb=GB_`mBqVG!`gk2A^?f4l5Q3a ztz7W5aLUSRk65^Wz-{+QQk@Kw!5XNDC3&(AjO@P_C954&@HZ8|lGbyUwR(+9nLtc1 z9-D_3uv64ne4(tiQf_}bTqd^&sdG8ZEpLkcS(B^X0I|Kc<$FG4EK(tl1lyTS$%-e)=cY}33Y5OdJkPPbQ^3Vp#UAh#36>Qh#gBD)M=?+dEh^eC zJoJw;Br6|kvI3dtob`G8*8>ZKhJoJUavjrkR6c0H6L?Fo!@JF>*_k#?ciMTD+2KU?xZEIoQ{MY?UiJIWN zrOY~Q=g7R#EU-$?)Q9i=ICN^Qbz17bh2ExVx&0m(zBpu7~M?7gf|No+6r?GfXBaBGHroE#a`GLx6IRP^L5 z1@Gkdyg^2@=u+6vRkrp)N+EUw#X)I%Dyy-Zglv3#vn;awsS)+wd?3KL+r#C4~Sy=Xl;5B$Zh|8;vXo_FF=Hr)VlE0UM zk|nJMtG>du*6a2J3ph?&P^OnBhz$EPS?GNHhjm$8N+t~kwbC{9l>!O+kw2AciMoJs z`v#)Cm-1EsX+yRHpfDz9pJ_u*m1H`Ze1ynC65O+Xs;EdG3p7mBXPM30K-OyiTK2?p z(8i@&5!@mC+V4S6!{^?@aQr*ld^0);=1g zD<%`hJt1#92mIlyt;RgGb%%Eq8C08%1BwKr4I2_t@1LLGw>Grrn40Kl1MU;2G3|~e zGRCIgO$%=}>DMTSgW*+8J#iN&icWXsil%QZ6Osp^W`OuVc9R|_8dUc#SN$Nuye`mZ z_UlWqr*L1Jk@jRcz10}OW6ZLqf%;8wX+J!5h~_&Nvmq7)LYWOV&DIK zZXF)RNvkI|uG#B6Qpq+PtH%>Xe8xGB-TKr0;uog(((sW&PIN)ag6wAt)C}&8&9b|W z*8vrUC+uf>}0`#H)9DY07c?#v4yoKtnCyF?0Do2fj328uwyi%uV4Yj)- znNe2zpsbt)bCC(E!f9Sf!08Q~^*VLs#yZjdMRko1_m^&X$XJ1`H{;!m&y-bYZm7cp z$BJXO-)&G6@uus>4#DH(ij@cH0hGS{yo_3*gGaiJOY?osW5JpIO6zOBTA&Fk?7cQ zs6NC%l!If&BrR}MT?8*_Z~+at$#oju?o)D6i65NU0;m7j| zOsz7su8wk}9VKGFo_)PyRW?l^a9q({%u>UaVmJE%zFN8P4HGOR?D?V0XLhfLQzwlD z$4EER>P-u75STA%wHFIIXnR;{lb1k&XxV8EI*|j$HQnrO0~)=EHjf=!uPRa2`RE;rxBVJ zESFikpb=JF-#zgGyxURcnz}T~!DE=2mD);3AV*w^we=ow&0PnYiJ$QP^u|WR zW%MZTg$d$o(6(J*aOJdlzbTosy~1H_{g=w8wd3Z3pQwt{B4`bg*f zg)a>B;dpq$wE*VkOeO_FqIq4>n%XbatAea#qtO|ZZ5J`17mNt^?SQ{YB{10+#1tQ) zo|TbGx1fo{_z?f`@g$Do?!+7vr)S*2o|$L%3HyfN^#+mpe6i(Z?13rN?8?IXRz)v? z25!&=cO2Q;j?pa>FbL=$tl}x-;FKC^u3eIiU>}AT7OLi{=NSCFOy1s)x3!ujO|H;K zhvEjICE-TGOTZreP?stK_FEcMd{U-i4)0MFlvM1j5f+!sy_HN2Pkr0b8lS+)j(%O) z>65EduLiQ;v6w}rU&Yoa+ttVy62{9tF*#p$k8g|X>hr$_)mqc35{4Et%NuWTK-5Zz{8&kvobA}(bIvkto3yW({`u%;2W9@-k%+%2vK z!$IdER=u6k_e8-%NQvblz^4u{XDGN>1m7%NLU~DapLTj?4G1vvfeMJmo^+X zN3^>qmzw9R8R+0I2+a>|*#tE@raumMwSp+Pz{&g=997*1=*x2EDU%%I?0BKADZ} zH(XzW4UFMsP;t~S=%?eRNBom+%_xt!VQi#z@GPmKr~x5@F5c#rMSf74Eh>Z4h!7rX zhQ94qJwEia!o)zbmixOhPT^~7ONttiN=y~vHih7-3zup}_3dfg6Jt#A(6Ya3D~*ut zVFpU>E{H6+h(FLQKW6s^V^o#6b=!Bk0bEK{;VFHC5(L*eA$sbS8P1CnBD=%UB~K?* z7mPJ@HiaWU*GpD&?D?}!+TNRCcQbszvi+!;aw8#!<<0sm7g)y$2mDzXx*_`{#VgT3 zQd(y;(baazCAYi5-BqeU6{AQ^lEu{mVsC< z(`;BM&8qwl%+9Z$?am$|bjIW-)L%{3ua}>|LyBYEQl0_BKO%ht1L8llvz)FSvW9hC z)gL(~;KUyxD1@A6rCWEuAS2j71ZRp}I6I4QFMu{mI6@Fd7?9b57*p*pKROKo+V?97fcY$NXv=z^ef76JUe^}2Zl6(=J?jMC$p-mYNo->Mi9u5 zVQgEk@9n3s2tD+lY&y*r^Q&66aYI!`swMgA6R~tXQ|V-|BCbbQ_Kb&J(C*l<5(hQV z^C|b1vHKL>c1rzWLIE|Z%p7vb4X@tldz^tE^jHz3p&8G;XQX4G45Y{ttx`w!J4${m z6Kcb@!q$XKt6E?&kz>H2ZqIi^S*5F~%)`Sc&9BLG>vKs9!{3U@9U8QQd{W~D+l#sX z)dz)>nYs)X5ENuENe-eRn`5G!&rM^CrZTdoHGC$6lP5W>+qM0QMvtGXbmB zGg7jZ0b6D*OR@2%8K3k%;a7Hzy8TM4Iu<;3Zvj`0n}c;=!R>3d=OofK>PxEHo24@W z<b(p9nRh;mZ@>_*(MBx zw+=d0-hvK%E&>FTIpD)J)^!JJPHP*HTB+B)uQJT{tJA*(SRCX%4xOPs%Mf?)DNI1i zqYXr&A2%Vrau4mbbb_T_f~vhm#A5Li{mGi#nzVs{*bvWo^#*!=^5qd23h|88pa*pj z!*b!UQVxk;a2FZ9?$DLu%BfLAegjyyxmF}5w@0;dIUAQptXu(54fTaMs%)}*(tEj3 zvILSE@d5Y&gj>lOt48Hfr3j@3NE%n!gmK<-G`DO@&Ze^682Qh3#lOnm4O6^NMi%ZB ze`{5|m%KxSWu8TEEQx9S7nM>`i9z9+ufEHv_(~jl<{*?hHlfH<^CAch>2{dw_u4pAmZjiRzh1s$O)% z48T@D-ul=LZzYCBqBJ-vCg##Nc5YrJQrA#6zJ-l;3x0)FAsw*Ib~3Hetd*U(*3E^P z$yC#CD2;3Baue~-jlw3pY{t}AgcM~MYf1}*RV;;m(L-!jspc&?@_$-?>p-@qzrfG_O1g%s+vtzS1T7hI|3^t-S`;j4Zm$BiR;!+Sb!+_ghT55%1;~e-cKN z*i}$x-qWjngL6Fit@5ZB_4AB)Wkt4FGESg6Z+q@-BnF3C91D~MJA%BJW>Ypcl4|DM z5)NnSAxc9+r+@t!{_~sf z+i>dRQEHvO{F0K$gzVx$*Y0EjLD-q=`L=^g&SiCFrJz0-SZAZ6qLERNC4BXrC6z=AO>D z4EfRdG&kb;N9%5~g#XQHvV%vJpDy~cmEr5Nmm1c@h35`wJB4BaXV=2_oBC8#bi@wG zDNv2ItB!p>{SZ0WY+PPfQ5PX)%`do#^a8<7M^Z%61MA}G_*lx?hd;7Nd~4`zMs-sd zZG@e91nqLeVG`47VN+%~;J3WSL9P)dZ+V~I7H&w%u2+{=MaKuaLcPS{TeZ@YjiC9f z?-NW+yvA73<1aSC3Tq&W=wnJn?%DGI1^YJk#@Rf>lBAnd{PzB&tv8GtiXtSAi*jJQ zne8N&I+1)@e-z!q+7X!AO>#ADWOVeuvh2E+t+?d{ZJjj{l?#J1 zhno1=vKfKcapDH0js5tN!z5-Br8`(52UUUHggmf0pW2hxI4z}AvrcKPcfx=ZiLAEoJKZ|RJViybyKi6z}aJO|DUJ#iu2zWh^N-G-$4@A_4Two(OB zc|W5~AEx?*Rf%3yj!Q&SZ_xvGp`dlw^Mniyk;jQ0@1`Unsgf0)NGA>st)7vKuFL%) z_4N>)C%V6$V}Hvziw*LIOJ1-lvp9(`O~&+8laRZtz_i2f1LZk5I4`|^)T2;ks!>tV z0=D@29iPi%??hGB&z7^0^M`*xZbjUyet37$@zP`wzDNYHYbUI$%7Uzs1t&gYvJ`*$ z%r?-g8G zxGbC$#PUL)mLdUJf0(T~r`%m~>wh<;hU-gHyt8`ACBi!#R)y5y#Q+{3BzSRNL#k*d z{~XEYhSmiq>cN1fRovRv)`WDIh!>^_&zG%dI0fD&ntv~B#3`OQN=*zDd~HQh#N_QO zzW@9kUj{qcTIyEivs)jLBv>_Tn?rr_2vD74Yb%xCa{E4CepPxR=>Vkn>5VmG2O8VT zrMsudne`+#_BF6w_}3f%`H}MP<*BI|<}8Xj&vKaJ`4e-D%zXQgZxx3E&S{jYARXRb)E{KUI1OOZLCH?Z=CPmxVcL4?Dq&O$clRU1`8pGLt!fK&A zMJamBQ-%LG z^UtJ|6eJ9mAqJxrCv*5u;-0fert%N6nYiG?`k2EUo-BMdWN>!SuL`ceT>WH|$th$% zG`2H`m9+Z~H3Y8=>T<|iatfL#%@iO4ld$5}#Sv8wYr;&H@G;-);$@0r9F?Ug6Axrah|1W1!{puRw@M8 z2&yM5)MU-ZQwS5y7vqnO>Ek1c8L9UjEI%D?%HO+^W!AGefdcsYpm~Kk%YSt>CHa(; zEa6+Vawb1SMP5yC=zIyNLbE)qM9_K`X!Z4MWPpuZeqY}lDTDOGTj)|mO^+xy2}xP6 zz{FE|-CeK5UJTQS$ruf&WX>Oj6W-u3&+^mnlxAD!PU_6a`1&zTHPj57GfU!|N{>tf z=4_*r%x2~4VD!poyq48=3EM7uv?72SdykKuk6m8EIe@to+PDp3gOjY)NocVJ+{C9l zROA@vA~4O#FfcxAE~CsZr}vhI>BK9`L)lQtOiwu!QVq#-FDrSbW|Nh@>5hZ92j9O+ zd4U~Mo5?4o+HPE$xfU|i#eGF6?P())p+_LyFY6-{)}EedJ=3+4Y2;cB>>~u72|~o= zqlR@S33kgO`+yQ!KBSpTh_lqEe$W$N)$pX)y^OQSQu?uhyfQXmr{~&?@cq{XK`Ni2 z$T=tPOmjp~aQB>wiw35t>2Oo;oIPa*fSZ$0VkyWylUodj?V-e@#e)!TDg0+SmG6H8#j9kk3Eg;uoq7<(F#Y zy2*uL1~xx&BrqtMvOUv@i)oh|9_N6BoGq10@dzBC+;S&1(J|w8D6^UDxOX!VJ{{@Q z15*urTwBxJAM3w}_icm$CmQf>GyC%|TfnpBpcp*m!C;4JYd_$12&cCHdxtCjRUlh~ zveg_PlVIB3lnPnCg7HF9g4c6U|C8NjnP!d6AA(=Ug<;g<@#s859R;Q>Lr3kH{_+7^ z;ErjS)nO9oTkuc&J?Xw4`wJ_ULg&95rvBCEw|@-_y77bVzxw?6*RVSy_y6BbO%tc1 zPr~c!C&xSu;2r$B2rE0m8inR`?tzbSb848Na+}mz3g|G$JSzC7;&C|I$FAz+1rn44 zvp*;DqBz%iiz4tnz`JNm;!q0{^yEq}m(LJWje*iRrvLWWxI1BN;AX9~1<$&p70c^? zHODM{r@y%ki7Xpm$TQUXtC{!J1%bcD{q(P9hx`9s)575a>A!yq7-d1O;nG4T6MfCa z;^8Bc3}55m?;EB}w53F%3v0FtUV3l1|3HSHc2leg<#;23a*}5Tg$UEHhRd)v#?`Pv z)_O!zdvm+w>9fnl6)~wDqK`59#jg)3tjACp7!1Mi{qd3UP>deYip=SgfCV!7}kkDJ?;>uTUqXt=vT%aiX9o)4kT?s@CAqQ!mEuRCi>t58J}Pebg8NSBs2 zU}|s4lq5RS3VCw!bs|y>iU_bOvi%Sd4GiI(5N(18!XWI8)9*{vL~PY;u{HEn{IZ=7 zvUg(cnqJ`UQgSQs-0y4;kW_P@G1bf4M^YTs1{BtoTn=7!QAHCLb)L10uuxB2#$cOA zb;L#AyP9%}T9)x4ur6(H11fNDA%XB{BYHS+%rVfafVD1T(DENQj^GS&^qag<#HFIe)7Ii65zW{$m0Do$ddF^)KgYP4=Sw zZQhsGdNOcp_SFzUi-$+;n|P#5~V7!!#H4DbB zSf`j$I$$t0ofiMjD+U8!_0MS2ZHh@tx0SX8j-=gAY4={${3`AEcPV1H&UcoW^ZD+# zhcq_g-=znibw}F*>U{@_kFVVQp8R*|hrcJ4dBZ;Scj>?B64wK1KT7VyPV?-D=#VKB z6N@Ai)NdU4ZVXG7{}x2NorA44HjxtV%Q`XN0@bNGYUx3_k{=wl&@-s#vXcm*Q*5 z#4SC{dhyxjbuOg`prHBzS7(-$s9Kmo6wlBtQ$b{%<_GqA2BexlFrf|QP!`@luIKa2 zGndrNa@OH7hvqSp<3H{QS$WCU7wwn_XJdG0r5Kej3J3dN$LIA@g>ez(bKTRALQ8_h zyRqy|p#VUS62~CT&raSvLN8f)$Zq>2H{F_embMX@Ut{2sT~_=q!ziE;l&1G#!M-Q= zP5n%Hm@v_mG`9wklZ(4q;B;##a00&+AW{|l`-I-Yh%E|z&Ul(20Dthf6jK;q*C&WM za6V}8gkO0t?o-cFqe)MjLfl2ZcNx>~Q3^>GjKvjP=Z+|rnC`3#LbZ(%d0mUFL~nBo z+ATr~*Xi)+7QDr=400}blHArN=e16WeZ1i_o+&Km?QRVOnu<7G6M$t*yOc-jkTCp) z`QQ9E;~EXLE@e4<J5gNTauGWY;>>Kpr6ZGaceQ1(b z?^Ki4ZA-(f$4_cnB{k`FLnSFz3U<|6&8EjC5fF>iKaM!o%Fd792`gNNh*q;&Wq z&}^A6#o*hcfSq5<_QK6=2?t$0j&MjTzl!ttOqPi6^m}~%c|AP`_Nowdo>dLC(Ygb< zyzG!T2STn6{;>Qh9v`GxWnjNAKZAY77Do7u^VU3ih1N!X&b zLZ2nhSbghD4sl^EAz;wZ}#p^~K3F5_CvC zOtIw#eF;4X3LoP|ZggE$F$!#KHBfaE&0a!E@Lr*>Xs@cGB+K{r3}PD9BbcY>0w(AIM!wu$l}MfL^HhQI9*45L(~Dqg013mO{r<&7s@(x z?5_=Fv{pL>;@*^|*Jup2e}Hj>s<1>1TR3?Cumx1C5!S>?E1zj%!fb24lp?MyfMe?BUP3nWWRD+@TB9TIR?{bW)oj-qNs!I zcs*Cvy&|#;%BUVvQE*z5%e6nYZ~lpDV+Jx69G6Nm5=Xp&f-ktsWUjB_7Rl64mCALN%ybR^gA#EFHjRgGuB=LJH8@u}lH&7Oc1F(ah}SNpC|pLwrI&ujM_!!ZXi zuV*F+##5?=VDPk$D$K7DW-7rQn85K`)#SCKr}utKGyNW{M zo6UK*zp$BFCDygG?C2h;%wdB%(qzO52nJ;wDtf|o0I`kc{5z|fLX1AI)^Tx_v1j=; z5+p>bf=~_BqXissF{VJzlXwq2txC{Q(a}FOi3yy^R}V~wX+M&hzS}e>8tzgQ4?;qJ zw-rsg`UX2ENc2jF4cV7Eq&r-!LLydJu6I@@flJp{y?u`K!}b_P2IWt-fv|h;cB4kN zOju0BW$l8^Cv6_BPgA|`+T*+mPJ2?%l2{THbn@^wm%eX3@rUuNe5(Jy8F)`)ui2Bz zIFmP-EDmNqe&OJ0mG~;qI>P1vKpBF({Vk;cfBazS5EKkU{mJ$bRQxQxM%A$}5F0gC z4dz6Ppz|f9B}LJ?!!^e&^3A~9*WfCKd9SGoq#ffRd&>o-krNQ*SwFq47B_hXbpiAk zmr#Ex_rUaE@mVN+?c`AkDimbFD6^yfgv3jy*AhygtNphc9nYozIHFvaMu%>emy}~J zS-PeLC-g$Vt9&av{r^qrvw1dp@p zoQQ!?Ge?dtLC~}d|Kx$n&rdg9W?qs7^Nh1}d%oxGbXunQ&Hcs{NhHBl67?JaERe_V zSMOI~UPEc;fxvd3M&EliQG z}-Fit= zY3$|{vcA;a@J!^%jT6=f(O2QUOV``Ev;V&a`TUz#?_5E^^g&^3w82@#<7m+Z1VNtV z9G>bm{4Z-Q&-ho5?{!DV+^i)M03>`o>9ItD2Z|9GtSzUI^Rx{Co zE|^wi&6o%H_k?MmY2T=HQ@BpF^_X`%UeR5EUjH>f%VIsF7~(0dNZQbg$Z7rY`SH}+ zpKR-ccXRKxH-6K0&F?Q;^A4V3ZxqkkgBuMQCP6#ka`x|lhbEnSyJc%taOvR3d!~JN z#a2WOCw^x3!<%T)MY)$|wEP-`TcRZE70!OIT-V%)>9-m4Y(gG;c+T-^$Rvl#2Yi5E zqFMt%bEhlkeHzE>cu@C-nwX9{Ci#>43``7KgY`5v4Hn7x`YoxXmEK`Z<{X;@zJB~k z>w1rV1fRn#mFYs%&a*=Dgi89}@NKnWXkXoz@g)D|kh3%0hZ40Kp5^PxYcCAD#4-VBdapdNm(Oumu~E+f9o{{@%oijJ_|+W&EsdowaYrv^Y@qHbG zVWqIc*K15^m?s(9xl~b9l8k?C-^gfgiMf5h|J@1c$hRoGz{@3lU}3hpcT+-F=E2*H+-m{0P;MM}e2DcO{2HLUs~PR%{Qe;yG*3EJB0QvIB? znXuH9uR6Wq4m=TESLF^LL;|8x;z=${hmlIqhI!jbr{k}$MnW)ORf>x8Td%vp>C%k3 z`exZ_qWxky(_xRpRA4-pTw)!ttMshQua$<7{%t*_ipw#?;63?06tb5d7e{5P_ci*g}3ii*gpf;PULKIS* zTc@6ok;lFnP5M)MVBwCB@M6Ka!J&h%bi183(_6lWI*)K3LMofYUHfGRfx=l8Pf+q1K%v7iwX zuVsQW4oT2Blp69l<0gc@OM(Z;m6s>4zU0q{xoLh&&WeN+;1rtqwOWb8r}G8)*H05KmZ5L~Bs{zbx_% zo20(9VE&91F3T@LxN9smvavVcXS|!xBgnfbg+zj4F74JG`I(Y9KR{J$ zoN&(om{0>{R5B)qu|$)Pd+3$Wjh4#xvbUTf26}B+PaT@Zn-0e4S%VdGIiog@j(D0Q zyRjRF3_M?swvAQs%w(=b(H>k6q=O;Z+)3Q4$APO@0Ci5M7T)eMlt2y-zQwcWNI|+; zB4-CKY)@P0gJ?4R`Hn)-iPv{n%L$2)8+-j(7)Ize{KjWkmUZ7HUL zQ3XnY4tpdXWSWxB;buS@YL`<;x1W<@0tS#sIY@!XQBcT}QO;5x*q1%R@g+UW87a~% zVeTiqyT&4b+vyg)c<69f1%+LMysHbnRZ^g? zkC%qQsk!c*#&mjz$+_ixD?Lt5&;J20Gv>WJi9p!Oz>U>y%wZrU1u+mSl=_*(c4>RL z)ipT_xdeQY`8d6IwPIaC$5=&e^XT5q=ANf&uZO_qCiXU!WA2;wB?TvQ?B_ANW^1dG-n(R4S76|C z7EMX*i^Z!xKKB|6Yn&s<=eO**ofW~k1}(XEi2x?-eXLQ&SBh+dQbX~`cX$88FX{hr zAu8fuP|3KCau!F);0cSP^pS6Ybw+>x|9+Ogp^smCh1Dd|Cyp?YwIQiLlLSK%* z*=BfDpo{a_m*xL3@*f56Gp7<+#!&CqNVK%@1mjsDZtmf?x(&0sHS?RLzkc_2ixF?L zrvO`v97~^))voF^W7t0*ipn_qdS$FK)N&C!Iu$epKpQ~Gi6u=2An2x~^CUn|0-Y-T zQhpwv^7-vG(;uHb$&yhNA@BCm1MTvR<0%#^ne)K?(&^z0ma6oMUDX$*TK=j?EKMXd zyW$6%in4NIX-ay?_Wx5SITa0b$>W7VDUCoNH>AV4WsH)0`E*=S2 z_17Pk^mjw@H50)Eaa?#L#JVbrmS=d(8@g1%D+KS)$gEcRG$U@?c+{|FZ$qOlOV3xOAhaH-b(qFh_QQ)*<#Bp*nWTfcp5o%QdDFW3tU!sFp~2NEZD0fB7yiYnt0C^{j|q z+LEdyGBTK9CN%a+}E>+`c&FVnRNvdX4>5X)?_39`k#geu^@l8|B(GIDSyC= zKB)bN6u9LXQ91lW(wpnQx^4W`{YOE1fHX_u85g6sro$GY$K}_Z+$G@R?8o}p`J$VD zb^G7GYPkPbx*$qW;G`URHp~E^VvWiv)7Z$p9bsZIk;<E>ZNmWSl{mwltoxkdV;jJ1Nf z85mVXlc3&B6%JetKJW6k3~jt4N8ek`>~u15@=gd`6oGHKfdQF#Abx$=tU1 z_wjm!wQIz#%4Z`{j(*W}MG9=8EA)n@<>T7Pb%Zi(6R9Rs-JmfIWPC|(eGn^ZYguc* zdn+Bkh?C{X;?ZQReoW@zw$2Jn)mhSmxGjXf`MFPctX(%EB28D276b&O z7gtF@O2808q${1!q)PyGRgjjrlu!d`Xeoq93mp`Z5+sz+tCRqtDpf)7joZ%dp0npZ z&vW1J^Sqyze{#*tHNVVUnd`^QuYA9iAS#}O@$(|}TeHWFBz&!91DR_SVt$Q!29Cgk^Sy3Tu37wK*u~FvXr)Qe*WnTX7=kWpLUe3})Rk+Fn^ z20YIxco>BYFT&r?X{>0iJhq}lYP)LbolDKg^dx1njfZ^P*WQEcKeh-L3@*K8&2q{7 z?Lzps4Ku^EP#3Sv&M#cHeCL`sdr7Ukb?Yl91^-p;sXCoX&DLhFw(ft3eAHWaBlQy#}E7aAN)he?^jp;Kg?mEcJQyPPrnTNzYDB> zpON_su=@DQ7p@z%duP9)f6}am^-mLDAhISXp*Ckm!c2Wg!gtI4)}M<~v*P-!J-bT^ z7Y+1xkq#S%0p1w}z|5Gi5B7HII2~m#LGS9Br2W-yBoc~u2uMjJWY37l`I*%k-T{G| z?o-rImz?FI#%Ez>ii+js4Z*7rmm_`&srQS405j6Ka5tSbQDvodY*!J;wiHdn9;xwt z9+I$Nrxd7@W`xG*0M!NI0H^{cCS5Pa-Mcgfq=Oe07TQft;X1uTXlaYY_-z8?nK}vTDDb+eb*Hr}!%~GFomUOj|SvZ0oiPiam@fkq+ee zZQ|4h+u_DS8EmhxRcQ*lj9aB5vZzK#h{0Q3GqKX2{AS6bmxCte-O&1&E$-=?=skx| zvx|+0*{v|SzydauSWO+dPzoHKNK5fiweCo1R9Ocm3cy26H_Xfs(145dH^I+!9ieIp z35GIiYKKleybzC@9w=L&!L(qqcR|M0#WifXsHV-U>~!64bDzZ(9CU~*q!&s#H2OqD zcaDo2hZpiEgFx@kfp!Kg?3|oLIA)}i|7soozM%j1;=oMH7cP3|C?}@19H;ghX7>5? ztp|JlyvhET5)S^leKGmrw;j!QjGdz7-{?Y9yMG0(juU0t^rN_r{8@-ohPM=q( z9v=h!62)%!aEZZ9Uy9EKy(BS)+?w(o_(X=3cd_N##?EqM?w>b7f0p!5r9IS^4RkrX zMwcvuE$V97KT#;2=Pv70f!3v$qokk!Z{a&|w)DQg!G_-ZJFUIwKe+Cb7G1N(j)rpi z?N$CW|LH${`JXEGCi?urCi~}1xh&+G_uQsI$2UOL-8x^QjF~`Gw4HC;FUPgi+*y$I9UpsLos*RiAXP#h z#_=iegpBbOX6O43NzRa|0X>ZspQpxVKfJ!j^M$KKqt9=(haGe~H(W0VQ3#DKv?7oA zpLbui=U|kD75CoX;5X%O3?D!~#4HrS`mPGxob6Q%=mlJwG93vsl;u8)d6rSe3{H12 zicAuxDm97iVCL^XsstH-+R9H+JunxrPJ#&2YnBA*(0$vhkau{{<%9r6QiMl5B6Av;3WnwAS* z_7^-kL7SY|Du*o&Gdstmn|>YC0uev}xanHBjxr(e{>n{lw-9+Xi)#0p^4EcrK}vTi zU@MX^)tr57o;&4j?SpUb{OMQxPYZv<1^xp9@K66cxWERnSAM0l5u++C8(l;QIIFh> zI9^WpJk98B9D-Lg!c-XE2`V#;xlI2n@4s+u<;=?oS#6c1WEgjT|9O73GH=BA^Oaq~ zrxDmv-tIXL^a5?xxxVbl?6Z6R|ELRDB5uOIaGm(VMN+={|IgOv@hhgdII98r5x+52 zlM)$7cBLqHMYV6Re)(V%X(<$O?)>@H!*u~Y?E9KuxL)m)4m|Dm2$jMOu3NXC<)97v z9vtvG(f#Z%kCgoLBSm*)@Kh`OPI=^}YJ-__yqrkH*P=oPtkGp_XJ7XZ4%(;8(fZB| zwjX~)N+_Ah3YAE0yuihS{C_ifVCUE5flWF*b)2AjrDAaLj_-Q$s?~6?hZ84N#An%u zLnjNLxKYx8qp~&C^&FmeC00nf(l*X%EFcP)!|U|y*{_`B2VM1I)H_sWJOs-^oBDUw z>vpal@L|~|41qV|T5R(c?hKQa)Xxg44h4IfDOc)YiAln_>p#$LZ*d8pz6T3}<#Z=h zqO8^JJqAgliNFxjq9UL*Jv-A;S#AjHTUGnG>>q-EkYBhpnO@>EvU$#alY{fPzL$ov ztesrAw>7r=jPHs40l~$r#O3nsbF~~iC4$f&ZsdBae&&xu{Nax`etWzBRg=04t7=KB zX_>{JzxCrEZ#??C z@DV1Uvo&xy^y!%87q0H(|G4pilTeJH?;l40$NTnP`ghX|3y$}q?iO#)eWk>CJNY?< zEsgMCon9sQA0w&h41b@_xH*kpoN>TDHNz_|nL-$oJq-)rq*p%J1OKy{@|OMG7KYo~ z(YovRg$uO%ad}Gl>uq;~5>y$veIrj$_-qKV?76y7xm`QW1co5m^uKJ^$bj!~tl-IBWij8TQyuuOxNL!p^h1sr&nAXVv z{s^S&kcNEGb*quwn&<@6dGIwmJCp(qI}cEb48L;?hK!bWhv^x6huQxKaP%Q@Lf^{` zN$);A>q%5kx9IA$5{~nui(I0#Ka#9#P`-9o4YzB~S~KFLfDJ&l(I^rf#qVqkQ$HV> zL0t9`7DwxU-YRGbp!LC^Rw9Xh4?#L!$f^n(K2UpvVZ*7enokWzrvsY%wvt2&yYmL< zpT6G;9a_GYVIpi^T4tY%4@B#-F&y?c9&F4X7Q*y{^P{!>`2dk`i2ZHP< z``uwuFc~+*aj!6P&56_zFtV&qR~KQ{Jm=8ZfQ-h{$JNw2X!hjeJewX3ESZBY{z9bO z8R<&?Mm_jRV676qK(6VYTmsuL^VAlp2_e|^+WSY&C<4Peux=1N*sH4+xm81T!lmz+o%{geyPF}MJso1R5)vNJl9Ey zVLyxWjit0=0qWyCf*fuUSoIIcEjNFsj6|t)OQI`$0gfeN!HlVQxzh59X*!9zod{|0 zK1(n2v5pk_n%~jY`pz8gyjlg^WZoz9qJuX}A#oPsy+y18_Hx_DV;KXGd}o1=1t-Wh zIVHAdH-^aei8{IPWV0%vyQbimBZnE7Y|z_Y?{lE~M;UHtzj>=z6Z1I{i`?d^ba+ZX z$Vh;hgLo&S6y3%%m*RvcxHrGiso}uOTR96fM}ru=b-CTSIHZLthXT$#;?GO?Z%cmy zMijd8$wK`r%UmF5`^f=u{@Y%^H~hwPFZsRhsHV$TR$2}>%z)uG4DY^u8^ z*Zv2;_xyTsO3clsou$Uk4waqWkIEjB+NgK-9{*uOw)uE6L zTM=J$!oU`1shs_wqU#C~@MI#S+Pb1pdB#DRnFMzRvJ|7Md8dH`LE{9TAb0HlE6eWx z`JFBomzMGMw2`H_CJ>wC;ATVdX<#H(aH|+Upt_7$vvE?!zNDwcnesF-5v2eQVp&J> zWSqB3>k&70?}t1E2I7v+Fg0La8{kHoeBvq}@e50cVZg{$w<=Z%Pb_^3l4Iv6gsGM} z6?1t47BIZ3zr+@39zt&nxQ4h>$qvV3c`PvC(a4EQR*IS#13+zrwV;pZ5=_R&Ih=DY zm=r%5*^}d&^0=Mrb`HmElOE61Z@Gg&ks8D&a3;lf zv>ar#m6&XwGbduQL1k^aduvF2m{;G2)oAtf7O5Qm)l}|;BDiwcyi!397d@F7)c`g6 zc&Wdn`-J(c!BRNlKS{Z8{efCm-Vu6<(=mHJU6h2yZWuBk9w-cYZ6??|b&^x)Wwh?Iva*`D2= zgN#A{QtsgLyyJzBqK)NRMC03XXATlG@>7+zDLjLj8F-?@hg_D~2k@M!HG2}+O&dC* zNbhd)3trNgA1>)1DWO>99e><@y}qocQLH>Ulk{HWu?J*hK+jg_=(?e!VN|!n&ze(< z+l*>hZCkcn#x&N zC|nM+!XqtCNxwrOMlHew4KBoZTyz~>W@u5XUL{IA}y zU)u$@6Wb}rR=2LqPl|jxyVYZUzi+qpn)bh{Oq%V}tq9`ct9EH*x33h9NyI2p-`2k1j0Br-EPKMC)mR05U9yRHFB|v zL-4W$6^}xT^8x98!6s%TnJyVRRVW{oFD4h?FT-Cm{HzTuU6fwwn*Y{c4_#{g*w%TW zC?oIT-4>`j*LUS#$M!!=>+p{28(7V&EL#ePi+1}Q?UdKYZ-;OgQ!pITGF)^kA^glQL+ij3H#L+YRQ35~m?lW|(;cU7HKv~slXRh{S6P+Jy*L9=A67Q%1SD4Ug zOa>b|hL9q(SFpz*$H`fhG*Lf^MkyQO@M0;w3RrtoQYsYdzl#pFE;tltp|=Uz$Ys7C znN7CIbFPayV_zvk9vkE4KV9;7-ic|HJ$vVL@&iJdswb*YHAxRBAZE5_WT$yipm2v9gcjZb*-;Og#VN5!D_+g z__gNfkRFW;q-$4YoF35u*@yvwbju_N(=$E=Y6@Lu3bf({b}3nHt59%J?##?n&%25F zh_p~GT6X!(@k9<%q}+ul5XgvqXAK0lrbkcMpqsC+JeyHQFx5m&;|p(6%sw!NK0Iu7 z>V>C9LzRwr5F@=jPwV9xszx$#z+A*b@ty1glD3G;XUBQx&l)zhYV@;_#tlq=Z#nbL z_NA(U%N~_2=N#pziB+k-j)dT0(z^A0mCA&#^Z_T#HYzcZ=wP7(X0!;{Q1p0s0EDHD zbz8vj$XeJoGj_#bSA(*-Z|8zEvT^Qp8q;mm&)-9#J?Rt3#(g(_zTH5&SkI{C0!9Vb zN2)+`9103eIk1;{^NXTh%#AJn)NeH-3Z@+fQB|(q3D!I6N#RQ$)=64bm+vzq>-2J- zU^9txvNL*H$E9d;&Zekn@5}a@_L+T4uLz_Iq$y0Ixm7`I=c^Ky6{!FgG@ZHC-WQD4 zZ49yH{=zjdDEftq`H1mk^ylg@-A2Q}dog){=MkT0WsLns6nE9ihEM8A-trA9kDcq4 z>QN?5-h%>aVS8ue4OX5TKtKiVl2C5*l7)iTod77JkvMVDDpt&8>E3L+e5_jDd@1qH6x~Tk3!6WBy+xE&Pk-+oSbiUMCnIlnI?zs}0w~d!PRt3aLIf-;TZMF6ek$!!&vk&&X{8Rsfb7OgB%l8x&qZH5PRQ33< z?lYM^i-SZ7zKfSX4{_h7_wyiJxm^RBm!z1Z*H-@2_rEH{XRjA)-1E7V>*GwM^0px; zE;2m09qEFK^I1<*?SpbJ#*4{!gRS3s3BKQ(``|Cq{!=&QU%LIvq7TFLUb#l3#&xBD?h;+*yLr!l|hCj$H9IDk#Gu#1B!i*ySDt5c86M&y4#2| z;h~KW{<#tlA-xrQiLg+xbm)B&rZ0tJ(br?fTs}8)h)vmVlusBaNk&-5(hliNs z*Jk{;qA9z#zHs#r`B=vzx}uSDo-rK1(c$~zo4O|KZ3aS8e>4-S(6n|R)3Kz2<^$pwlCw*md2Tnx-nUNXd6MFDYBVWNj~lD>)bN9JSAocDA<^V-SIRH zShLB$``ruIEw+^Er$jEu1pJ>SDVJ8@4%iOw6L6h!YppX$KN<-T@d05^zj*+wwFk+5b-k4ht zNhn!@L_j&f^TTOIV2e#i~Ki#>)xLa42>w&ElSdw;aXQ=A)EUMX@VD^6)yyM^;C9i9H;YRl-ra zZPJjouD7uy=F5>B5Skc9zuOc1CdoY1D{s5`J|z6CkyZ|3WNi9shQWKO4*)BlLCVUV zViP2CPF%g>y46$Uok6!)z&)Oo49ndr%&xEOtV$bRi&~Hwq&&}mt86Dw=<|eahxbue z?o-+KKWQm4#geRGMd|c`;pRAH;c+OPRsTs(+B}P5R-+*5c+p4cE>A^p2C2wHl@rr4 z!t8uxV7_yQaKC#u#jB4{lfy9)l6UO&+I4UeWv4y(5c$=$D16QLX8Oh*+GQaO0Xs5EMRrDvlKzGd@MC6 zwlSjaOU%m8&GaOGc=Onk2Lt8+!9I0-e*)M5veoZpKe^4tSM%&&>X?77{O|e%{#hay z*M1`;+5z|c^-ctg@iNdWx|S59eqzWC8wD+k=Jq>5@gItY<{_1e_ShXx0X zc6$Sgn*uHcDfnl$ojh=D>t0U<6|WeeM57^UIK2X2gkA;b3($}Dm<+r28It9M%8;KuMe5sCaMop{9LaJucZX%vY@bv5951oF9zh% zOjcq&m@9*0qI7&pdsJB}GTY;IlA}OCiK zfdt}wMG)Vix3xQXbo<|FNzLQ}! zPA8;|E}fZhy1>?SZ$F1S`tC1sg!Wng%0VpI0UoXSg@m}y4RZE>kA(Oiw8PLhLC~#t zD{dbV+k?KP0*I`qh)SCWhi-6vGrszwA)rf9p$|Pnnlf~RD!i16GXIK`TXQ#`gOEcY_dWQUK*T{an*H{y z2$c931X@0)F{HFXv&^XW(t0U(;)id(|H~8pyW;WCR_EUEPdn$<}KStT3^GXPpw88DicO0c5tZCT()f5yK;VLs*;7D*=!ZHzsU zq303Twa7;@)Suq!>T{{MmUct^hN;$wh!O_@>)va`ZGi>aUDmJ)HKtfY`q>`J*KzC> zP3om?udkkuX>nX9@9u;&LxNN0HaNz0_o=@hKVtnCtvspwtD2TxV0|{=+QZAmbvS1k zF(j?l)n-R3Glf`MMZGYUKjq6o%|;by3Ve8ViDW@9oxj=S(i`BRK_ii#3|o)}lZS?z zYG6$DItqPS0xhHA5-Z8|I^^WVn3xp+KeolZ{F1Pm49#MY3vp>W^Pi!8e$53AIT zaG*XC;fnpyEEl1w)Mcn%SBlH>e%#38J6ins`rnWK_m;oY$yF~8ZojdoY#`?v6$L+j zA3_1YBlVI#UzOulh`3GdHVbwi?s06);Dah*F(?n=yDDj?c!BEa%2&WJ!4CzCx#}u5 zIde8E2@KiqFd6>5+`W6e?n>kgacW&#;#!rFQc7$Ft56SDh%mqtJlLGo@{JP0%(lYKM~uwm03jr^;)o|kw3yM}hV6!X z^EIz;-lX{E^B~ToDFs+H;%h8wb`q@aw(uWG7wXE)#1k-gSJA&r1D5D2)fii9asOj< z&>$Ob1`jzRrd$9}_P1|zcO|FS%0SK3`73B!{aB&X^im2+xXMR%l*$5rk2ai~LmPv6 zqCud~wX0p$$2PYufyi5-eukky;kQq%(;wTET94SkEXK(B2!6)v?DD1yp8@4&i+6dT zfDo{JRL`*?tR)$FGY)nQH<`S+==-7@WXhf(Pssbkdt{fs@~XPQd}9n3GJqkKL;_x< z7*1l8Zg}Aw0c^i=Ec5NO{gnc4RNYOmV`^ezMh=>25%Fn*3=gogB2ka}3j}(=Am7H3 zt1TTI^>xGLU@Aj!o#pqAryjD7XBvl1!xJMt3c_M? zQ_6kqL7uxMO-6TW&#xN3l~U}?xx6T+F_Xy#IwnD^{ds6oIej;*&TNG*76F5lEV^w3 zmG8`gw4E?d;I7+?8i4NhfYP@M*V$Mlyx}+kSTWK>3|x{kab3KfJMh**=2U3{1a78% zBPuFoZtiowmv!TtvovaUC38qoi2A}Y9h#pKov3;@aFfG_ir_O`k3AFPoKRXly6SmW z+9sr0cZjS+>h-peDLHj{T`oJ)v?0IMx9v>@LhF>((%i)t)&vqIBSjk~KzL7PkQPb- zyoX8T*Jn(gQYGJ6o9DvMJ2{a+u>r6uNICPq%~H{85~VlAZ-QyoQ~5CLsV?an#j|VK z8h6{b?jArPL7LmWRe;TgT%Hj2)x00b^q)@k*Og;uyL_GmHi_JqI`dJ_V(87WV|B$d zM0?3QQE}ej_M`R~eNiw|&vZZtY~V`BIITl*FNDhal$#C?HbMivl`m^aRb26dA9Igw zVDtCZD8(PD;Y7tnl*}Bl?CLEYi;Q5XmR$}g3SVQ`734%(Uu>U#4ys=ec2o(B4SCFB zLI*F-p{WH?2S^?wyg-&wO~NR=LOv&N(Gi(=@YD3$&-$E1OG9zvvqjY0W!eeD`GUbm z^1a5ue2?JMI)&t(&e3IIp6ItA=aFEP#hA&qowu#a++1SoV!`{Avfejpc*o|RrTF^j znxoh<)DaKxWgdU&D2pWbYdzqlK8a*wV33ieHL56J#HHNz3)l1O#qOqx!e)wXaM1Yc zhgI*?{Oeo1s!UBxdts}^CRaQUcvMs!Nw6B{QK%s{5o$beFM2x)aJ|kd$vY-5_xzh} z^6#4L_ly6l5qifeXYM3*9w2%Y_4Q~y6bSK(!ZKCu>}jr>r788Yev7l|QoVlKc2(@f z?J+due<@~a|ADx)oW?o}w=EQXiFBku(?Ls;fu<5CeN>rrzDG$SnuW<)49eLe}C{ix-s&`%-0+pB@IT9F834Y^d zqUc$Idl9T>d52XlE#016jmfjAcb>fDe6joH<%23Wif zRIaeM_z3sF;XN7Hr!R$`#g&+rm z)4^~}u;ppzo|=-vg>f?(>Dl+GYJ0xT5gi?oS8_Q|tXE_w*kF!q5`tQ=mroRT8-29* zYa^PvM|C5_eMS!(iKMB`BW=2qSvVGpQOOSNmA~miw$(N;Y9rd7ovHHiVz4HrAgyDg z?r_&5If7Q}q@6n!iDF&yuJV0U1R_c7&8A*=&xpwf7s`S<1|U?Lo-}zdWfI*IHcY50 z<6p5_$KthoiiQkq_i|T-s=rteH~zvE{kw`v5h_OEV|f>#HU&KB*W! z*IjNnsc9W_c`cC=zRW8JztKR6L#%p>+^IU4Vsr>|uvQxtwX97n`P_3c`Xb2J_7s}I z!Mb^8BSFa46+hLXZoE2wG-Tchbsm)O052SFo;Eck!+c}adU%6OaZeo3Rpb`6Pr2yzBvscl;B%0M%Oh8<+@2C=A@6mg(b6!*Z=XHK zM-?d5_tlg}<-_~y#ppr4FN<`&vr|V0H@f9q;iO%Egk!l8j=CrPe$J1>_<__HAPSa5 z1=|H0jCk1;^Gd)0KMa8j8PgIDSX)j6)NKMGEmlLu%Kx1`US+dj+cMi&cA-e&CVP|d zBB$$=k6)F<7~Qm~-bf_YvNXMa!ncKL!SCjL+gIA%WhB7|22>^YmB!4}>ihVbi`u04 z$yd34pVFy(ZwDv*87ZbJn>Q!c7xlJbS~JnWzbk5fZyr9q7}V!3m6qQ6*eXQA3qT`l z9!pJG+xVzTz^%u^JQ$qF8wD)f=HTO41P1~?6Y|^LA1jf>nn{1uQiXZck2q$heBnJRyPQr!s?vTkzMn zk2qHtDhf2IGxN?&kPPOI86dm50lg!0W*HfJCZ{Vzd{@@NriO|!)LG%Q3vvZMcSh&0wlfsU08cR*l^j({^7DAOD$S7 zZriiltbEC=GO-(Wos@PxDk=&Gg$Cj`!%s(p)~-J|$cf7SpEgZ>fCG5ocOp=T^?;On zWZG*Fe>y%Y)=3vGsy5DSDw>*J?T8{pRkR=H3bpXhO|P>-N-I_2{QY0ZImE^m&UGV! z!lWw#GNV)P$PF1P(@`ZI{hva+PY<~t%gf0tEi7#8fj7?iThDV)IN@+aju9n@M0{R2 z44ftQ=8_J(iiYn)RyYJ(kb1|J3*dlDX`7GV%4;R+_PsH|VvpJLQ!D}&e7)NBSykra zuV7D}sEaxqj1_D`&dyUNdguLpP2d);BEgr?4Bq2ob^FzgRx*i-9u&$8_8=-PUMZ2d zqF=~l<_13=tDzI3%E2SkXOa*H)H|)7Sr?%2R0CwNq0)8(V3l&+I#Kgd6!Fc_djf%z z{GT$8-uX`0{$DJ-zf>I;{RbHF>Vtn+%fgq{AgD?wco$?}WTO5X`}k^Nn9>iz-l|XJ zm=;k&=k{$#t!N#Robr9nSt4m7Jib+Y?Dn1Kin;5@+HZ_XiDUL*ntOM?S$=vvb|@Yb z_}*u;?7U~<8J3|YIxtKg)ys+b4ityp%x}6_=|weg8VYLPU4gEh4ocU4TE-GyLy zf)@PQ2insGELFlIL6*26!vh?9y zVaripu?uL7(Y8-$7Ju(JA@ZG=lw7L@w!1fHRb}RijlN8#Qxj0_N0psJ(JUR}Q;tuS zer3jW>xAsKM4wbFw^Y~NX^|Zh4v*)zqc;svIAkA8NN(r(RhDSnD_z%ed1XBzQ4`hK z$kz{Z5zm$FHP@8M%l=7ia?(x-gh0)GRkGmdN*`fivIWLSo!LwXdo={)ps<{~=w^>p zOxK=oC&=Xs-#p%KqH04uZbNTQaF?1 zY75QFQ)gRQF1LR49fkJlt0!huREk#~Ht*?@f`tn<**BtX&yx$>QbLL>mP#OTq`qU? ziO&gd3?~CPkOa!A3S-*2suYxa4aYonO{c+G2@T1}_vAL5$uxD{BIKKSGwK?M3UhM? zH8s6E>PJfz;~EP~Pm0=qPq%mFu@E?iR`tsCFm9AhlpM(;4P8*2zp<0t z6J1w9pCQ7IG*#Od_7z_pCVMDHOhrYxnpq$d>?#V3gakgizkgTDoxAEjG@~R?+J_$R zr0LvV1cN#F)WVW-m{4vnxTOVCk>y#v^SM3;oS#)aI-XtBsYXiYM*fMb-88NSsUIvF zN{hQG8oPNYh$bZ$USN@8wX?$Q-PoKKuE-zR5&5Xc%{t5%>it@@P-U*ZFMCi-Bq7u` z^uV)|&3}0<|6TF_st+0%om1;poj6No+gcxW`Q&P;U}mNqu&JO5T1u;d<*2n6rw>t* z4%U2!N@Pjzhe?Q)H8zvdkHm>rIc56V#sD{EhNC-bbV2`ag8$U!SUTd-GtVKD4V5k!9UQtIKVN4CZnJ1WpA)l>6{z+Ay( zA1|Yl`MrOQt>pSYeMA9^n7vY@?Ra+oRL{=J0b`!?)mXe?eBlyFe}AUHVhI>wpUYeb zg@-UeZf4@QiOd8m=Qd{C8X+pJJ^YOT9G;Klps3qlm}yNNRDQci6E;o18NZ>ExPV2r z4|uOeb=ON34Num@&%?cXE4`Nc7-0iYUOtf3eh?gN3O0RbG-b)0(b$^YmI$0S$Xj<; z3zHmCJTf^=f}`ohSy!^AOy`u5{;Y~c2eXJVTXT`v{IGmq4ORHHQ38(VM7Z`TB&m6@ zhl3Rr6N5Wq=IlNpK1*=hXca>bVL5m*Edm*hRjz=$K;dzPCPJx+_{XD1zHsGc*mGc* zIQHZ8)h_w2%A3_=n)Ziv9lrv`^$+AeqM49m%v8^si; zuzafmSW3W8S4)(zt)Ndpq*XHJRAft%pCckm6a2;kGfa#ZkQ{#+EbNA$@JA>dIAWpz z@YD~8DC+IJIX8mfedZ(n1?3}ZRIT1yW!T8X z=MqcD=4PjJX^XzzQbSMNbA4vSbpo(Z71CvPRHJJ1rW|SNUHRyfu5nIMd@TU0b$TdZ ziQS&JGP8xJsaapmTb_*;J(9N8fLsEPWr`F44kbVpsJEuuP_dgL*V_v#g)R^2|1 z-c8YJl|D8$y{c6*NFc|3eD3;JH2o26srI~-5pIpVPp}a>588z`;YTHyu}VHOW9dte z8FEd5Cs9mu1i~GIFG5R-qC9-B@8p1H8B_I}b)N-3|Kcy0Z(KdkEiGx#Fzvj3zGUgy z6X*>+#6EdYuYMu<*91thqL5Mw(vy=xHuqg`q1>62>8-zpX#dk|{)dYH=@8E|Gwaov zs9O6ZpKsO=S${M#?g1Ggk3(-24bLrNn49#xh6vn_B2Z1)kx-SS{&BJ)Piv|bp@%OS z0n^5Fmx~RgE%S7*>X5pTh3Lk0$R}iC#1!2_;Q3I2Qkwrndda35KdRYcE!dubKHKdh zoO`E61-_DS&!Uq%UabUFf1zVHj=Qo74tFK*S7cyXp}uiJDf~=1dY_zNpStUvzB>b; zac094*1c4GRXi%0O=P7y8ywE|F0@)UO&LyW5c7^SoNYa}zwFwOgn;qB*f#y9ZpR3s z!f-byl%WY&qj~09Z1cknX`TF%rqcd3|F)Ye6%$miBtQup6^eNZ8SLL6+YG93*g@;@ z3t}vZkT@v3%SsX1&1^et;Em8J(xD6))}frEPPpo*zRu%1T*}3D^jFt$xWaGl3dJR7 z@_^(I-xUA!ho9=tm*3cS<6P?mCX~@adW~rxYDMmr6(@bT$aT<)i!xWf9^4yWs^{T7 z=~Wc0DOA_Ju&{vVc*HZmJJovNlRh%@mHQ6AJsF2g((y^r&6n~wgj)nN`TI{E3P=|& zb_7ILCNTQ&P2z=7?qP>BjXrIkUzt82l@i1bJU`2;{as$iKgRPzKl|C= zOv>G_!&f%F_4>Ab$mYlUep-RjzYeEENzFwyi8Cb4U#56YJ>#cepNTkUPGKm)`wXwy zy3zD*teBh%*Ee66-`^f$-fGZG~3Rr{ZN%Y56#4Cp@HDg||$7YU=M zxFI;k>QxvD8#y-wyVjHXCg^R0zUg-~3f!r>xv&$Vdm+=_)J!$x;1t;8T4inGh|yeB z%VfCNhY(Et{ZHa@;l2h9i^SH> zFDmuDHZbY6%82nZG+PV=F3$J764aS(MC69&P);?eTK4VrHm$T}7g(sxz}@l({ccp8 zzK*rX4^XHG1WJ}=iKpgWAIc?QSLh#mtUd`A_$=E1tGb;O9u6u|l-qF^HMz+1gDHeH z55KjpmZOxLokcnvR-fR360uh2vwSL{7LrxMm3S3<2Ee8oLFCQ}*^e+msza7_hKD>U4(-=t z=?$$`ktJyxh$BzGONp>F*4a#M@JVSzJT~=lRDQm6-CAO5QNOza(jCjX&vPtDu@M)Q zm+apwR6QvT_PsC3eeU2}`&1RPG>99w%FS2QW2TCx3#o?$E*Nv>V};MNkUY9$Z-@EO zEpd{cnT;Fm5+;!gal*omfk3~b^@Rq86X$QUwUma+3KItBw7$n;ztg?!Irx>erN*VI zQ(wrT@D+W^guYRvbc2lBlmQST7D&`<0N2+LA-?W3-H4Q(^nIJ6_cCwx1Kmt8QaG6u zIRD6e5o=EOj;*B1Cq6;X;9;~q49Kz)KJSv82s1z=G@c87%B!hycx66a32W7q`(kl@ zJm6=~PLke!2zT}j|Li&bf+2(i#dIS2ors(!w}(1H0Dt>MpGds4sp;}r?2h>UjMVnF z0cXFp>d7PzI;qOCxFdaX&cpgv9>qdWreraN2MTlZg{$DH`)m)#G*vS5JeWL@xKQpU zx+;m(ZA$kxO#N+OBaZN|H>Oou<}6 zWDe#`ieONjt_ZZkXN`J)zgU0rQyKI*h*~A0J94pUKIzusY@sqUbD2^%JzZOY8N=~p zvqgK1d}JoTgk{9C{$U$oXJ!f$`LtNz1b6CHlEcaDO0mQ|0DL-6b8B<+U5zC(WnvRd zH5rrf9c*jMJEhD%Fi{+oAx3Z1$~9fT9u-jw$rmmq+s=ft;bq#8nxfRa@Q=2y3If)bCou;j=eo9( z3i>fYG1(n!5wtmV@JOrBu@L#@6u6!G@p^A7na_bOo)7K0JF#RWL;6f8*P$Ociu#wo z|0HXWRh>0Vo?SmB$29AH`6dTRw!^l8t;?3{WrL1AnO(H$kGoD?4lcw6m{M%_@Og_X z0EEt_-)GLhF34F1RAuGt#knk+`-g6KSr#WEfVG!{W0fX@p*|e*nX;_S$|8H6%2cFn zID{rg5jgnU35HZ1mD7|7dzsMtU~*1qa(kWZq3?7pWqVuS#y5G9^hUl_eXH0UiJL^_ z@;-d8xWr%7OO#7j%Hay18WWX;XFW`NV4bj96fx!K#dYK07ZD0Va9aAjM=^Ki(PvZzE7zvBHqn-drBd|l z7%=w`Dh(|px~I?hMq?LVc$HA$$#LNa-n{|Fqx6~VbvL=w`Hhw*Vq+&g7CjM%u#Ki$ zU=DFt0}|+3`YkoXfbyoZQ%yI^(I{Lj07?vN($zKh?M9KLRUOKP7EfL%s!fSLMKxS_ zQyg9;1T3FxWDPs=$Es#JKPT@dAqNxkr+c@2p7sPExRleCN}D&rmMTYnAE&ceq$+l= z%iodPH^fGJdLX3R!uQ4-*UCzZW-HDab}E2QCA$@tl+P^#J;fuZdCImz?Cg>vljo1) zk1E3b)=ZZPq?DiP+m?by9$({0oix(&bd_-jk7IwH@iDPZA;;I&$1HW+%#&|^^S;GC z?n0t+>4yrX*tVZ_xB!Hh-xCYDz_)B@v@*9fxRGqD2 z`n-*$O?AVqge%2MYvG3CF)wsQ#Lz_z8@0-KRVtFj!}|~)QDUrT#QAToK7VOzpXh6| zev1E*{nqtL^q$wrjTLia$LTVMj)XFz=J&K|bD~!8oBm@ibYYIlafn@v0Ecj1R+Y2tUG+Mcr+=JtzngmA2q@d6cP#aziU#XUaGb@$? z{1&6<{Jb1GOzc7dlV>6WJ>&-J*JVvM6P{4(EapWm4x<|NOgUKk{K71r3h=C$7$4FV z=}}@ASv~(kyY23?Vhf@umFRFZn7@}?Tod4nZd`+RcXiMziWLge&#A?c z#;a5qKGod&oa9GKX#F@Jk3Cdz34SNZL~(0?RT|ckdw2{e#@k?Qg5X5A4rvO0Xy-21 z#DK2}cWUz1CgzAJnd zftw7?pyIhj+3QK7wUCP!eMD%^s0G%YC`tg!a9qQ26=W;k%^!+YJeVFU&?_kz`Pe6y zSLHqM?uzgy+^wFqc!-v?2e;L@3m31?SLG){zLzmp^Rc`4vO_m5wFVB$8z&D{n$*X& z8MoCa@mB$(ONd2P8KXy-qp-Wbs>}cA;~R0kd8fOdr*4P8A}(ywN!y_`Y9@(h@U~aC zO|zQW!!UGgj;QKi;(Pa}Eb0S{8xB*ciIuZE{+jz0>ABywUWCd~Qkr|oson3Qdvh1f zl6y~itKFrVFk(D=>p5V_<#XWST9YBGb9@R)QC-f*al9{~C1M=Pf=ZOYqd2_@XO$KJ zfTxYYE0I=|Vk~*1eIsm zKgABEu7Vw^(nu3eYesp%?MYbSsEM{xE3qM~23vvH?}jR@h>L!1nx<`$LWzZN9-jpu zK4jlwMwCu1uGiyqWQ0e>g02XB5o~jiT)nL-6q|rh&n_Nyn?irmRrIY{g1?t^?oq=R z2t2gl7!}|;wIIVP*Z$X*oy?|M8^0NqC&x3lupWBdC3XrCt5U>i)0|=tG;o|}{!Q?RgTE4~utz`kjknWa0k=nioh-UJ!bD1oQN>B(4jAk_ znKFv9BV+(Ag2Yp1o3kK!s;KWe4c3X>M)~@)72$QaZO9l-HnTIT;9D^c-ME>zYlNN* zYL9a^rp(r(4569;_rNY1k}pvL)ZP=KA8GnGVulWAn%`(P?<}e6dx=9d7>TQhAdK8< z#pGDDpz5%^o&YssoW#;mUD@N_zYGTwvd_+*Xlnuar1dq%E4X(Q`J6-RZF6KWJH@J+(DCJsh767O`?@LNsSYvo+czkSsa_(S6iYz71$gJdAV8nzS2G8f(g_ktH08G2SAp?cghnF53 z9rN)W(lu?tMnp{^Wfnf_hhJLyuGEX%da&HSSqWsi6y_f~(4TmdKz075S z4Nv`~hKx)(1)12aI{4_Mw`6;!QU0?ZonE|X0m_x_19Q+Yvryt#*g;5{Z^_Vp-)HN|h$ zdD@J1w$G*&TZfGaxiF;wh8=N?;@GH}(!qjJQRyxO9o~W*54!mOL)?2uHI@DSzc`k$ zJBWxHAZjvW%m!BgD6yW(gG!w2rPO0EbAno~r$HLpK-jFRHb1Z|hMeJIU@L5fA5Q@){1PPlFL@k*0#T1uvU@C6w~KHI=Fd6(4E zKbs~IavX*kz^TyROHMat`C2rVfQo4mJK2w<_}3#|7Eve{k|KA2>7uZS5Ps+>=e&Ux zLRfl5$f$Pg$YdU)0zTx~Bkt>0{6Tc~^jN>V`U(;zcFJ3ZuiPcY{@iIW_(spW`PtZ5 zXj6N}jy+$?ExkgO&C(4S99|^Nsl&%=*D&d;?kTK!X|(A@1>T8Ch%g&zO6?ZTz3x%Wtf*#4~ZY>?q2_{UU zs&hq7Z*Fd(PX7q!YwUNawkFwYQMh7f#LP>DNP%iGM8H6qBH~qNMfXz%CUfPkzqUe3 zqMvn{Po0`nVdS&%??-w!U#0oL7@{(&ghyuCz>M%DG4l>)Y0q|GXs>Mo*INuv}-w?#D%@A`Vp@HdnAmI@HOW_s`(?RkHAO7_ZGP~T(P|pZu zCnSLdQ`zllsqq|pfh)>EM$Jc5$BnO|t~%HYWWd@3=zK529yWH<40gOSK9gNNtj>&K zB0$*W!E@9g1gtdNGL{#S{eJqo;^>1sX1gaLi%yHI_KJRtNv-%U!@ZOQ1G@$_lY%N@ zh&VM42q0)=zh0*-E(VOHg90kSlw5^kqF@HKjDv@~K=bhXJ>k(h z7GAdu^EI#_WR+}~0?q~?Su_Q1=BcKki9C~0<^NtNy@cQ&K}|qNE(1x~yGlBAd}Q4?basMOyqDOzZ^PxbHFB%qZ>x5WGrmBKgViMP|4% za9|~B#Ebs($t*K=V{oY!>+WULQHc^6sx#qIst|66DHvH@XwWJlhMrC7Dh-a@{OvYv zN%>=J$^1`{M;=s$7VVi7Jb?E(vDRZ463X*mQhon7^B#T{<;IPl8Ar@nIo-~2=O>mR z+U7XCZT)q}s3a=LGKII#QQ(zU|qx%94o$inr$ zs$wylzE6C@fM0^00HL@Z5Q??0EfY7!C-*-MWZdlTIn;6RpI`pV)$RWe-t3p@s+d=y ziY`4n!Rgwe6TR?m6S&a*6fl(5k#&SO7FR z9-g1B?PauI$#%{R4RQ4gRvk8Z;e@z6_0r`;nv1&1wf>9ATQx%_1yrM##zSZtU#czl z>ND$Eb>y7X$(f7y4*iYiJoZl;``ee}+85FLd%Levc9c0w32%4?|A#n&q6Jfxar~N4 z3n6F9HpKWM<{m6se-b{~h)?&%R9;GENe>tF^fP5Ey&N}138YI*1c zg67Un$xAiqK$rSbV$uk5FF+uBba3%AlrDW~GoV-b-j9h0E(|KgK67pY!1|(Oh6u8A z&-VVv`Yv;IAy*TA#_iKT767jt?u-DL;xWwsK&;|DWv=Mjg60@U|

#{P@0d%&rI;&F1p~6>mwW~2#y-fX`GFs zsGC=D%38GhZX7!Egy*XE&-tt`UB3%p>p$7%Q3+9~Sl2XwK24SUK6R?M!N9`9GAS|x z_nPOIx4>N=-l)%d|2+2~dVhYDz(4nJf)WNSN(BX&-?bw1oO}Z8q5eAS_4oOM$o;kP z&O`r+ms%J4Q3&~d(^ey9CUVA=yR5<}wa}UJr6%UpE56Nzn~46?qzN_EN+$o^V3=sWU8tX5tm>il#Ti1#Fq^) zfMPD6D3E8UseQ6_oy*_Mi^@`z9WE(hq6DO)YlWBqVhCAg4K8wk{cdNXyO1iOYiC2P+1nVax9mVY}_zX?Hg07d6<*yoSnrMJijO*ImSj- z#sa3W?L=m4*pc#QKs+zZO1lFP3j8V*PUrRJXDMQ50)qF6XKW;k{FA9hW&TasE0E?t z-P|}8msne$hnW2zZhtr}H)>@?P^N&Vz878zpg8v(^(zMRYS+Atuqj*QnGbhhlx@R>)kW~l*I z9QFI{AGnRgl*DDPH?b=fZE4@n;6U#7Z~+34dnp>QedkrF+pvj99>}ev!^qCGDavyE z6cL3<>YN_UI5;t?7DBD>WuvOb%F2cf6S{({DOUY2JxxVqznt7?1^X2$UhHt;TUGv! zvlvy4FTr9KBB_#)X#5Lm8SoAeHjHoLQL%^SjOA^CL`j zSWHlb&S@cQ(p|DpZhm3Z@yU$jyKlb^;jLw3shrjejbB!IcNWGC!G5JE-BtrPTE3fj zEPgy3)<{)2!{Qikfv#Zp(MDmtM5M)Y1M`onhLMS2M+Zmi=aKUpPvnZRO%ai-GWkde&rWgQ1lBAB z9YvjrNqsY1SlkgZT{6tCjN<$P6Yeg)NqmMxu7kdQ@w|h>yErjlRu&|1x+@$kFA9K` zCAlqp!axFR^voo|B0GfxI3S#=mL1tW)sk9`8k@_Zkt^hc<%0@@%-VCjFnMae zzN_QiFW=!XO;H@&ALMGqJ|-_ZpNS2DHu*fa;H0ON@~4J+4bXI| zy`zNwU}nhmmB-op3lm!ZWY3DM8(uLl5P1@Z)Uc$&Ye#P?ad8m!yhnk^*NhQ2&ng_Y zigmUT%~)vTEYK~ca8!H6zj&}>X{}1zDSd9>q>rtTw1bg^>Z3l(Bk|T)hoBN@?2=o2 zlq|836|j@2&7WWRW!?~NWN^9!1&z}$(T$=HjYZaH@d-tkNO^>g$DiDj1oN*ONU26Q zSSKN;#-Ymdco!p&(gs)Nd-$2gWJpfr6xRo%zCy9oht{H% zt+}}rGFO+YjWjylgOKe5Pqq}P7Q$AuQM|#s;+w-9h#?z3abu?pf7k1iin)W)!-OmLvx` zQNkCE{gRocTj|-miyItY+QMoNzK46)3ysu?RhrfPyuXl^4H(4AR~%of{lxJ3 zY&Yrhs3U81ZdGM-uT&&Z)GwTRMv6)iuPFW);}l;^{XWsgWr?n}Oi%Tht=Im=vuSsv zeXsO3F`YgjqZC;@FkH1!#{7yuKtHNrrj$#;OC_raS=eH6GeE zC6y0KlWP^oafXjBpQrc2`7vzR#xH1r(ZJv98o+YQPwz`&#VTl)BUCDLz_jkl@5;i( z5F$mOW6B~H(;3(*b70fJ52TL&vFQ+{LQqvy2a_{y)fRejFhQ+@W%$VT4ZO1i4A}_8 z%aX^{U$(z1l-!9z7ZfljAJ{A}RSjXu_eTb&EL<)RqD|iw9ATVX;D7^Q&kRokTVoA~Ur4e9e$Nn%*{ui=a&a#vazTo1?jXp^8S( z^p71e3byL80Y#0KU#=N$B*mP&ZN*=(ptm&z5HD6}<)LEs_2{7Um8HQJ1znz3}H$Q0v2aqD7=yRYSKI zI#lXh+AQQoL}UVN9%frO+re7Z(wOY0$X~$q45vk3u<}^h<`quHNgZt`db1H)q{sA> z-ir)KSG(3spqau06YIK~zRVfgo>J@%H?Ey^q$0Yo#IW6QfK5Qgcocg=JNE}+u#wup z+gQ3Ya=rR~hQ!eQ1ndl5F~P!+Y`|GmDI0eo)xk9qV?({n*>{CR1HPnNjwClumrZ53 zj6TD3gQHp=TN?*&ZHL^sslOn^kqfQMde#4qi*ygW<;9+lp=l(!OYzWQgJb#D2B zh$Hs3@ozgO!X2T;YfZ()0Bx;37OqATU7a6E-ILsME@KWbB*1lvAoP22cb`NHhz$`( znmTJrC%HX{NGs{v>~$?bLt4DG?LS>`^fu^EDXBNE`a{|wKkR0w$3_RLrL$8p-0Xh- z=qeIzXt9=GDQ=Ty>|$Wa8r}=Dk>J zyuVW6R8{ze#aep9cA!?og>vnlk)^QvE(t!x`>`43Qj7teE~(g`-QI>~n2DaJi?OWI z0e8H0g8$7DWfpY1`scyv^?!vmpO3I)+Igb_)SbJ$Lt>mo$bH(O)CR> z?Em3V=Bms>G{u!85MW=iIi@cKLTIQ&34wg5sd62+s>A*es%Poqi05yUr6*x1-8;o( z+L%u#gHJHF7*38ZN&Vu}gz-{O(_Xgm5V>iRTUTB6ZR)H?w!47gsP5uci@WcE}KJ0GnO+4K)xk%{S?70K!ZjAWg zk3#ulsN@-L02ROr*0Us#oV^r6&!E49n`P3RUadEAaWv*dK_8xhS-h)vS44R#E3(5a zV|t`d?J@r&5BdGkgKYy-xI2gPFux97+~sbp`{C_K0hYsp1P>ql6fc}FBEqN@vofuU z-&oEZA5;kVw2gdg%nCvh9$oXvPNr64r;T@P0+G8nZsWXUJuQau84P>|G3vu*pcMTNi3iOPacPa#an3kWXj5LWsP1thQEn8c}12!VLP} z!r-^a>cb>zumVBS|K@cFKS$R~!>|Fejo6?Ecz7f}= z1aIatJ|a$5h=|89^%R1`8pd_xg>zw6_9-bKne4(0Fg}1(%rpj(*6fa*-i3C~hB}%L zVeHOnWfzSvbbP=!-hCjia{i{`?MPABfoY$Ad;?RUCD-Tn-KpBqnLO(3MSQGJiEcj) zz5;_{<<&N+uSE{oQy3WP0!_@JUSpyhzF@-{aoieokR`?cd3y14aNg=-HWjqI;}$aD z6FY#F?6r9b3_nRL;~9jw@jO2Xe-hJpZaMb7M~r&<;1FugG}pG~;kvWT<<`WyB6Bx^ zH0M$gpZ3jgwL6FpXx9C*ZvC&LDr>q$j&EleAF&|PSAGK%uN;qZ+6BMzujzb~u?6wf zs3GiZ&}_c?#5maPp!(y1kHDNt8Sd4VXBNm0v7T4c!J!vqW!p{5!gPx9CZ-q5 z%BLku0Vp6sJHMp7W07C0MJ`{EXnt&1Sst%%&_#Ajq+{KG3I^ zAIuI(ZGd~`d6+=mM01QX<{_;vmnJLcM3V~(UJH8QZ$E(1JmIVX-Vc*3?geAtOh4ac zMGr%mk4Y2lB!S@D$F}+b=m(2Z3bcotX1T=veK_E^n}hZc0BtziGI2>&IQk&go#gw29!s z*6dE@G3>d zC`9CAhijb<27k9q@OnCGT+`$gT*2DHg6JC6ncwDW5hL>cW2S`+{~^^r;{h=k-iKK^ z8Ijtb?G1@6D;@NZJ0s1llIzy(4Mr|C2I;#8-^G{jTRA=D#PVTU%sS&hi*(>wpOaI&bIZ!L zEZ4+PZ1mxCiS&&s7%I5$VI;}m*pelAt~`*=4ms|%5^ zZI~`mAAhNrQT=`owZK_Z{z{a)Ou5$!-s0zbFJ71^_e_5+!J4#5+@{<3CXW_fC@@~< zYM&2^lk-B|Dyr>mE4z-9pD{VIQSqmj*Cj6Nhy;Sf$=httDyl>8SqG$iH+mrvf)SSGz-}(Br>_Jva zdLaY#NlBUtnUNokeJh&BlJD`tk07$^!WYwAO1%=g$-S>`i-M(M16&@TwvD~L?W)5g zzS|P4x~r$&q^9aMzU8(OLm%aiv1 zbE)?K<*u}yiFuAG)8yB<*{6p(woqpFxEIN=q(6R&owg+QKIynFe%vID1o- z=yZFC+%HZ8(s%*V86_)toJG5tf!Iy*D|5u8rbJGvBu7L93JPLg*X7AYu>N?X!f|43 zjL=AycGp8jysLfQQlqHTbDxvuCsDrKTm?*C5ai1CJL-<&mO`q%oEu7L5XKveY9$>fHMAA4V0B=rKHRZh%n;TpxeynWh1d&hHZ?bkIVVDs2K zt0`E^Upyx^05j)>D|a-{2TnV}gHgd@QjQ4}WY z2bz4o)$Y+(_60^zFe-OJ`l6vreH!Z_D^D13L$f)6XM|ZR4SSxpx+=eP5Gi7px17)i zadUMYKJwOm58pP87PUvTHhsuTlBN=Fz+c{DJ^N+M3=lUb5cGpGy{SpHU9~-@_J*t5&kIT{ zx<`jY%8OSTv!@4)X~OC1^Dt8#t;DX58;w=US29wFgu#yoi)$P85`yyZ!2w`tp+Ykg zdH_vl72%aKPs}7ctt^N54Ut?w=iruQapXZhp%k_aE3$trc`4~-d+*l;TMj+V+2xzq zBBK1qQ_f<&RYFAyroQbHrRL+|AvtwLz5YXP!bEo2@>5Ixx@;VTzby&5wkGDa`R_*@ z@S#(5(+P1FTXc*57mv)=W6sCAVIhw<*_r9-9m9H}3P)k@y<39<@Q@kB?io{7zj;;h z#>dW^EkcP^01#K5?oLN*9Midg+Ln%>e394EeI8Yeuz5pHtfePj%D@w}8M@1$0HbHC zVg^Yb<`{@h!PEOqJ<#3zW)9TI>b3~S>BaA;yDRg1($FrM_ZU|bMj4HAdH0S!adfmEG0vi#a-2*nHamP8RyNb854h=Wi4HK6akqT`L! z%#V#9$bLsd6RTAG4og&M3Ykx!P#Oq|08Pu_-3$D!-a7q z5^c&6AZf~!MKDeGNaw4l_sbu&$xuL7ubb2C-KouY8RhM6 zgP5H>{^D#ch;ETD)wC<022wereQJny_os~xJo+(iW%hl^stjL56U%pILm*dF(!1|w zxPLdU*S9?P>6B*%DGz3ggF2#5q_=T~U?$@y#6B!Lrs4{+u+(>7cl(xoK_IEKdjM7F zr>hswSRG9qp7-;+&@O%!r{=^5L7|X1dBxgbZnlZ_;>zYKm6{!u!9RMw0umM$rWcZG zW~;xM5Xo?sO4BnAQ_8`}EmvXpQVgu8^L^tA+WPIS+5X<#K~->JR9v^5I9sv6__eF> zXf6_F?09*+r|!0D{2sI5FCLpc-EoNS>n-m0UPJ0j#LJ|AbyvGf`hOykosQGBRLMXzZ>B`jMrFjIOUst%2pa zg|`gmcn~k{=^{Z0Xq+gC&T^L&I8l4&8I3*UKl|H`1!ja1^%2B~h-MI8kY+D0uJhU^ zxfbzODI|?n{obp`yXpWR!i zDHZdJE{!+Xg(y4g1H3;$*IHq@Y}2Xl4z->YEv$`U(*(+Dk(NcMdB*@ejb(XC@?%wcC_ zxi+KAQ*atzi1Y`X(*=UA$>x#@O6T_WC=LUU#WSR*VRO!0|@>AJ&;;o=?rYNaB;}O);~gF^>g+&dW*b1aVEU>ZVb*bs^5e z`kHSl&b=uWJ0j|B)81d*1a(kWD$nP`sFYFqL?v;Y+7VY7Fj0HxDG5L?y*B8e2kgC; z!q@rasOoQHmdlrzO3@k8=6`Wx z?lB?NsRxYPgr^^O)R`;yTzcFg^-lZRi>02B!@ScSHR8KJ4$S`h9J)b&ANO<4-zRa< zO0Ia83AqZ4XnkV8sqX4`>Dzx($>iC5eznx#%W1uM)i3e0V%r|;1l-NPN42N_9@V~j z4~%yYMXWoIU64oe3nZL1K>>>;;JF=)4?|cXK{Lcx>kZE5?JUI3&v-c?_fM{7R&Eev zOOpvWb-&^HskEr>U*VEf2smmzRF6LtG4FaYQ6f&4DOd@GwQSmE$`t>5uUs#9FA|CK za(&f`{@+!>k9u^9ZO~7(dEvm$a z(Ts6r+{0aN4+C)7AlzSdH>}=P>MM!|(WX?zINN(Em*(9c8fZHvy(FX;gH+{)iR}%7 z>0D7??FJ9z&MDa}JL5sI>8PKb&ck%`mZnZDJSwu>cDftEE|^V}%0`%>;jU#F<77F{ zr|4RA$<>QJ+Ug($%p;}fOu4!SuVUFnP$8d^CH8GxW4~z2=!mNK$mWV|iB78ZkV|f; z_O?Ve1xvf1A19zK@Y(KJOtWNOry$F+4{)pX-!2doM3#)kaPNw8TmqBFO^228!XR$?8{sR*!6Q{hmLPUdAzt8&{m2 z;Z!i8g*!B7JSEvuk6$V*MH?N?dy_}B@MkDR*3xP<=c0inRu?8#S|O3S+;@DMn(-j5 zwLZn?E}wTjh|($)%a;pP;gmab(tOuCG3)@Dd8QvC2J&MCPV592bE6SYT z=EM_|?K(80F*Rc>g)o=PtC@G$K!ge1l#ZYGsx;_S%DoJ<9-Eg^;}8wNYUxmdo^%q| zruQ$N9x?dXk{LOhX&GCLZcu^@D_(y9@CU!ZK1Y5wce86^1f*-8vYi_hXEg<*Pjb+T zO6lt+RV_mJ&XbxVVl+;B@_n3!s_qyAWft8YkFc68x4B-cO?tDJ55G0-i>rABJXk7EcH`pLmE;lt=#Vv4 z%IsMro9I#b;PS#iKRCX(1d3e)s3Ni=xFvQc+>BB3?nKK>zb8;P4v*87mR5S zi%Z?(RjlmoQIx2#v|C>D{DWWI4e@8d3Up2LifC)DPOeW_2P=`-!aN9hv| zw$2vDc(R+Q1%P}?7gqO>imA~bS{NpIZY2ol>S&zc&bk7tBD9cGgl#oZPvnBRXFR&q zzq&!sCC3jbj0K4#4GRX8B%`L7aOLXy$ixeDxAMM-w6C+7qq&c**Q4$2_Q40;PxUW{ zaCNXjE#e8ADsyvFI45c)b``c-|M|AV_soMSOT_-sgYF{#I1^>M#m;r555M0%uXp{W zXCR|dY8zAgov!{%?cT>OlkUvH9TtCLayhsX(bAS1*`Y_{bMc6(Y@8zO1Kr|K>Y}6D zjz-ug9Y6f;>)M-Q9>r~Y`Xl`vnga#c)kZMWn97BR9KX+=E1YKa5nwP~1eaeu;7DsZ zs&0@mQ+uO$c$0g?!jgg|e@@X;6ww4wjkOEuy+mMCORIT0y;YFd<(-u-qvk-XV51jk`zd)2ly1&1Af=P}`&!SNmyJiol?A`lptXa!Ol2 z4dKdXUn3A>xNCIZ0(;4G!$$0dbGe1Pfv9$S!f1C|n1qJLN!>rI8y*-e5kH9g#n4s+ z3v3q1k%Vp4$LB{)$iGt93{{Kwq9$fn)#n9-0L!uI`2%xacdkx~>-z#TyY&>v3s6Q~ zMR4l(Ip2*W+>dZ$W_{<=TEt@gU}krU%8$P zxj_zI1}ChbESi-dfXk@K{Fe}k#IMVR*B%B*QAx33Bj(8;My-7>S~XkOkwjis*FUFz z&UibUqMK1GS;O?fx?$(&RV^u_*C`?rO68C%TqrONlI__ff; zoQ%~3v0+2lsT$^bR-auYqa>x{EQZAojV>m;V`zZ*FvxDHW2f-(r71@-}P_tH5Kd#EF9=PpiPv;8Le)@N(C`!V5cF0}Dxrp(H11`&TcO&fZ!-hs% zt3IsJJn3vz1EB}`DHshu0~fL-r_f(ONDgL9S0*k%d6&8@hDqHlsTRoo@X$;E@uv{)c{R4eZqugI|eI9bT6 zAWGwMap~KyJ>Wp=)| zjsfm$y!A4?BiJT`VJfl`?{5^|WrPd6Et}Gc0%Mxp=V&Dy6w+8UZ_`*R!dRu3Gw9kW zDXP)9u7n~p&N%5li`(43MU*1|Y3;P)L?&F#MO(yCZvoW4Rh}1Aeyg=vvOO@5DyiyI z56XQwhY$ML6!^06x9qtNtA}j#c@!!q|a<&Cq!A?W(j?sVn`}{w@NCsh6;eGSf9x9vgcRTI! z@8c|}BuV}Ndj$D|#LS}V^QX%&6XE_7qWJlfiYO|Tk|yc%URxqS_u-JX_DpP;Ynq&v znZBH&m0u+$aIsm|m0He$lru1?nr|yn=02M3dg@Nt4i9jq%O{>|6LUArw0yr9sL*v= zg-|QFFHk6ptWo-_sC&SI(7x=#gDbO5A1;E?U`(}sM6EXW7V3Cp7qr8kC(Mzw@K^@`c+!0rmgrKR81cqk<21G~V)j z=;PtZ>rjn0HP5Fb*+o^jTvWfVi1B8AQ_SuxBmh;Znak<~-r_>wE4U-70I_d~P6ATAqY@zxKZIS*wLcET(7e zVwZdj9;}iul8o#+RW3n*jI>JK%6phU_N3NgMB0s8$*w|%cf3!F9LeKHzj)GcZ;xZ~ z4e;>On>%iY$X|U6tqgfNh|R|ow>|5s*47-7%jey-Z3ty+)Gfl#&?(Xnvd7cx;mRZ2 z0D8}sYdT$p!DkjgM%9SdqNg-Hf|YiC$`LiHykmCGK~GXvB@?cm_fXw)&{lm99!cRak zzB{Z|lXAU~&2#iOU=LxV|5&-t@@y*o#gl%s`(~Zy?%x)-SB2z;h8kUper}({*#Uk!N+Af#a}Fr{7$TKmG`>`voAot-#gEL#AbiXYH%4ERDB5yQ{gmoye%U zdhc=w4X`1CB3Cmecop%jZ%)e|G2D!GaHv&>V3OC9RS}o8c|J1o?nj%5$&3teEvSma z5M8>OyoRBiYBuL0!9QcsoU-vetUx!>6D}S0;`pwcBXx7Z_ti zak=wC+RYWR;p@wX?-djvb)nHjd?Z0OBTaH;%?DT~p^AzY@i#`eNJtWX5NjqLbpniLiphnf-y)m-#k+<*|g+t-Fa1T>hYFr zL>8a-W<*4iNtO}1Mqm#d8hQ%a){fW5Y!S*E*w?S2gSKLY@9a^G+d^30eXa#0HrM?<36{QpI0WQO6$7=X< z=H;HojZvfKl1Hfz0W#IVsuMC%TUftE>}a8C*-%h>v!l%daKYPiKiz41+cKNY!K0;) zA^~JzOTQ%l4{x7!CBEbkcQwc$@JiO!lyoP6sAbBB_$Az%NfxAhi*q$EpUv1wvw>i7Ud`4|e^a)Aip}%?!XIkQ zvR@tr+=n*L<>Yg!Rt!c-s?8&9p3cLQ`VK^1aeNGPEY$?2^Um0~)c~kSdF?=z%{uqn ziI2rOzm%JlH#G$TD;a`7HQ_lj zGBT+O8VPIS86B5^>pZz6*{kieCJtvs#I>}Nyr4dCvpj|%TbCp%b>#LZzB=!hH3Ob~ z{s8Fl4Z)l}8UfvrK5$RJG63R9pBG-AfKh*Ls!H~*_!b!ctleVINZ&F8YY3vAz zXFF)){l!gXGQ!0pzO@!!UWuH6|I%gN7m>`uMB@GGlKIJ$4z;L&SA)7FNnXBVwSW(3 z?T5Pisu8{bWD}fz+eAE4Y<|+Gch}Sbv}^LxzB{ zFWl~QzIku_TKvyR$L2%CjlGV6wT_2oWZdV|v3>I6o%7^vPkGV%iF@Yi*tojq>80&r zDPyh}a`St%8)Q%H5$U0waD%mT)Y4f5J*B>Mx64?@>SqTZ^_efRdH&lK`?AmZH~|#{ z@BTg)@Q1g*$nvqqd2zqRzN>^M9`K-+b3R^4o&WPaLJbhNg66{1Q(@fu3Fj+v^DQ%eA>f>d(PX`5zB80}1P_`)xc>3$YxLONfbB zy(@v010!pL(A!I2gd75Ek+6~ch=@oguh!3Rq<-0OqB4K9a*!yAL;@*XKz!mYzm%Q- zR7y7cxhmul4HJ=v_;@HzmbZKTt3xc@PDq&2fFf7wygOncmfhs`!^5kNr%=xuYIkDp zm{``krZV=Tl=@p}(Y9ezR|4x8T%};^$*!-vPi&_ns^c>?_0!%VWy=gRe)5Vm@CEvG zD8j+;!Ff~fU-4I$Vk0wUwzrUu#KIwKd7%vLsow4cci{6Xr}ltcA>jw}QX2&O6cF_L zr6cQ4a8Xi^uFAgKlXOu|aCPlBEyipHmW<2-oySqA4J#u5O?WZz>3Hd$r}38eAB~(Z zLVZO`H~W#O)RWD_JUrt6d^d;w>okLrhWO7kLle#jQr`;Bo#P?$@LUD`v*A8WYEirL zM6pCwSyRpp$F!C_SC{};m}wun^jxapbmd)9M~=D&_(B^VYAk9G82(-RQe51rCySw` z@gst+TFL#eJJUX-{vyAV<)RKe`MpC5M6ALEox2yj)lNHN(oKx7-V@{HOp-ue##!rq zig?}PLafvgXyav`8@q#06cYMjy|~zxU{N8{;9c{Lr%7wj82Go}7Q!YY>iFgkpLUOo zt#EqjhM-XX=5G1-GdX@(;tov4tAm6M9T5Itq7ZARV>ORA175IAt9^FGAX_dAbVh(2KCp)Pu*PFFXgC>R5P=}|2kTo#$^7>wtgUoR%(FDIzj_ zs&Wq~jQG(B@x2S>Ilrq5t`P)G5;@vD76lg#S+a~CuJv0Vj+f_q2NcqgPu-X*6`cL1 z zbH^@OnVk}*-@oow)lH7NZ$Zif5+>?`4n+i(B(?B~SRQAa9tW=f>GbU;N2`V(1Ar$d z{`HaYS+UY7Q|p@hi>K&i%yjS~$_a3*+uJ>H_P_o4q+Pbn749^T^TOG?teF0vEbu?24yWU4tzQ}HnD>PU@NlcVbOG`a?6UKL>UUG3Db zgc=Agr|;|2p=%-+0fSbL6u&P@jQc+MFpF|YAbDQ{9d>3$u}fL`MKOgp|3a?HWVd2w z!9hI?O+1-E?e%9pXLPvD=w;g`;hdr>k#h(f0KgmmAR&hmrd?QiD)PGGvN4lf6U=hO zrCPG~6|uBPy1S2I9QaC-wJbkE+NpZq>GRH|ncE!`Cjc&2#ISsWLMB3(k>8`nEV!NOy0pKC+LxO}P+ zF;yvkpUQJ*WRr$7l4jO-efl*XV}lkJW|j=e5wY2*h#G(X8>wr(U*a_y>*jsBRVso& zi4D46!x|@GWBUv@IHmeAzjbu&8G;2|(<%r0&7=O!hG7=L7CMlOmVYnsLQgCD%Qa;_ zKE=%Y=A2p;hJTaklJB-F&dDe+&Vt>`X&FZP)JpwM=P_j+wsYIjRw=m*>cp7ZOkWI6f}mZ8h-GTC%a)C9Sh3 zK)Uvl(tFD4`Sm|kGaqM=oyt``RAjJu77Jo` zz>z{XBHtFZ^!Wowl4Sdug@E;NxLaa}kncA!Jw2OnJBClk^`= z^z!vOM%Z{!@@eOPCePn;8|gxqm_Iq}^6QWC|91PSo}%9SSgpxRP$M3%HWi|~=H;R= zY02{o;2&$DAn44+akf)Cbly;NvPs59w*}>!hB*PNE#FD<@BmTAqiF@UzUi=j%e$+^ zV+-@5Z(w?Gy#>iJT7Na9amAV>#gi0xm*VL=wD+_@?Z=@o{G>W2h8%l9U+QDwpV7^gpgR8my_`W;ev5kp>9Yr}AB{r&82IIg>mV+jx`vUv zcT=(EU|rm(V>zCf?WX(r(Y7x7%8uoD00w!hH^-_Iq2rw5;uC99?NR&QLYg)CKtHNU zyDv0xNwW_eoPh9<#FdPK)>ABKZYz&dQ(2*HjUuZuNUH0D6ha9VY zcKf+{Rv8t7$QG-4^ESKdSJis&Az4`|!lUq4!V_ukluNlN`{|8}kVsofUD0f$@;!^f zqp1&T;MF#_vj`*@OgdYF6*;RTgz3F@U-`Z#BEza&ct)V#zS(=#vHG1@6;@Eyq~U7k z{E6D8>()F}`g@<;8R?9)taWZ*r_E{O(Ea0QpFQiV7KNw2TjL10*Zk~GZ;sQxs2&hf zM3Gf(nk!5^)}+<=JS3eeN_ySBU@9ieZ6|OA^&yknSJgnQgrb**U6vUSzxijOp;-U4 zI9Pedb<1)!h<4+*Q|6yVJ`Oi$F#$>{95~^>dD(KZZOLlKYAgLq(Dyf#KMa2s{5aqC zVCv1`*u|RNkBVVV-pTv&Gg7cf5S`ri1Juat2M&$B8}0O|!=wEecg2mNWW_J{Z9v@0 zjnH3;UkBt|xvgq`4XjGPngM}ZUO0mGdS#g+>Qdu~L zPmw=ZLb8d`JSe1hd%C6ORrn)>I{bkuDxB^f?QUpG9P$u6{^IxK>ifZ3a!<;fEf~qa*T)fZBfHVtZY;GjbUw6VH8)*vhmaSQy?{knt zqkesho^t0hyo}Smxk9L=N(5#vtkvr1hFEm=yJDP0qUzYlS>9<(Wx< zB$tN{JKKN6`ZXOflAC;dp5Dj0BaJ0b#X}6+B|pF0&pe4y;#j#>fsa7Ut>G2qyP9H+ zbnzudbC-))>fTESuOlsr@|sg>iHRjsp>z~OaWgC~fi8m+)IIH+PsV$v8xAB+G`T45 zVgl~WBE?U-{E?$&u{L+Pxi;&BQPVIz^k(gGkla{aAxhInqac}*4yxqd*mATUwOM7v za{V`-IKIj7*anj~uuLvOk3Tt?QTuDmORGVFmHG|v%Q;y!9(TQdA}l^(!=?{5Y4-se zG}1zKYl`k(fbORqOthS8r4|&B%WHb!8~F}RyU z6Cf4&OG3G}`@{0{vi?agbi6w#&0gYj*TUit-dh-3*Kal(QCX@ZO^zQ-^0yZX{r%3o zs&>{(Ef8{Rp{0dqcH_;8EJJRwZ56_(3gCm+_0~0os?zlz{P1Gt3~iU9id4o5^915l zxn%vo0IY=-g2XQg2t^WU;g-n@-4hxTYcSy_=D>#};5 zT*wiEPt%4AlArC9*G0^jI}?Uk1!O}sDDrLovdf9Kq@4}H$XcX1j2MubAn%MV#|t^& z*Jnd!+d*=s>*ICSD@)+fkA|w&3Yip`*`@>!N_!EPpO_v{=rG#&nUqB1F6VH7#z6r9 zC-f81E=6^G`>`Ct^bcYxskUCXvT|MC%CWEm*#mTu4!me_IkN<${DAiE9vFNl-l1FL zwqJ@M5c&HV3wOaUTMQ4wsF{A~p4YF;4cWP#AQcCWCyxoVG$TkVYE1+F`UZzB>}C z9`xNckLKo$Gvu;b)l6w5_ihe3skm%XTRU1`ukGiQePQ8!6^R-`}qIr9=JpNG zA*?E3F1GAKw0SspymnW+5B$2zfDvs&t>zvCXzbC;=g4?ah9|PK{BHJ!T43uq3Zw*$ zSrMluT;^;V)ips7h7b#&!!a$-QWUr#QKQ~Qq|+uC6Eu@hU86_z83$gYtwxv^IY5YN zb%vm%y96Gg=NbFfKP+p4KO3{XJU#+lq?6ZWgHmQxe3%|JXwpiID#$R)RQL<11JxuD zleD0+mQ(qGK$jOfQ||noDkW`TDqZ)mYc4Hiep%jvSw7cRfR2@tBO zQbg~I9M6!i?&z2JLRhG4+9TYNEilL2EnDJ(Ep@YKNj&nB$~AR&l|5zqhWNdW z`j7sdw}uvHAK-E+)P7Q`$^UEQxh zUbHKOiY_z%O=(1B$2iFFO|fp!pP%874sY%IuIq;BEtnVEuTGJb?VfM?zdzr1)}`jx z$%)1P8v*x^{Um9v4Jlh+B;m=e2jj|Z`~S$!Sb@I1v=h^{LyY|4;LrEwGabT=6<^an zhxlMHf|M|=r|1kc4_Tzu4NmR}~{dY}bJdHBgMVV6m>2M{7OpZ(c z&dNsZQWB}gg(bBs0rgfyiktXT$7%ltZDZ*U2weSvK@|6P zu!`zZQ&8f41#l-m>yA&OtP~b_BI*z1f_{nPY==Vztn6iSO)teH)+yx0j>8E_putjzi@yWwFo<-&^iNn|IF|5xfkp zE0rSbD_7hq3tH9FJvWT+BSNyZm|#IIP(c9IAsxC|g`F@30)#zd(>q7>sILE*d1af7 zT**aPokaIo(;)Vu=`RGB?0_$6%qLrEPi*xeTPi~7HQ3^bsE1F7n|)(#Jl&C}+d&}Z zSP}k&mM-hDjWZ!(oWU+hU`_;F7T%V^5PBZ&R_(dd*2-tkcBc~UV4uci1@+sBf-Uu} zHpV5{(A5d7|MY}3d$^C9PO6%C4HVx*;sW3hOWlNN?-E#K6|_rlE=D=GcdB_yv^jyQ z-CzSs%0bJ;ieOG)7iYYaq^TsoV@+caCZ&d*)MDbOkuz6A6D%9PjKEt-~5zOyn8Km?#!pEp} zA!oz-Vi<2bAntk3zCLYDS2tNZj+`aqYpx8K%rlikH%kjwQ_35-C&9wwI{5)VFh^D4 z6=Rcle$pKL?HPx6rV3YVqeLrvJaK@qtsTsqZaX(l>u^vAjEc{yL%#M#*&g4#VhTf0 zjp(c~_*`;jaw4=t&*oVVA(NK0Qa=htC*z|of$DkH>Ys8H%ahK%#@BA#M@lus+Q#Nt zn(bzr6_&__vEi#v*KFoX~GlUYv)t12(J0-Tfq8yg56p)K{e^o zcjYTdZ?n(x>gk~*8!KIzdoYBwdu_g@-s$Lyt=tKjCPmE&_ew6b6fe!c$i&SxO}Fuf zg$buvNEBxcI4Ty0X5~6JQu_)m+~zTop#hzn?~dR1@g1LBbl=!IjyaJgGgMf6dC_ms zUm?7($m4s7;4+V%+$Bpk83+dy{zQq`+CNK0`}35)0Fg5S+ejXV@Wrh)A~mFa(r3&u zD&NX!You34a+FgmqOGZkX^Q?RkL|kqU2gvk6r;%=;?Uy?AcP&(!)9S;E1a9wWE+dO zf*MWj1G6+rZ@X)JsP4wiGzkfbP|yqck1B^n);hpp($yaALI_{S9Zg2~r;2xtUI`JP zRJ1jCOEaG&L_ZZJsTKhX#!446z7JV) zJ+CsH)r{>dW0gT)?sU~fSXta5csPlSP#*oNAmPwNsb|j@DMbcC;A=gUz>%GhRh@k= zYEu2>RDaMqE%eD|<9yZ3V^ri;mKDUtY}IU8aQQ*o9h*3Ez}8!r)+{OW=Yn+VBV)r3 zqZSPqkZ(#mWU%<*gIjJNv^vkczjg4$Z7TV;Vg!kWa0bF_iFgLU1Hl3=F_$iAYk9hF zKJ~4?q}Q|iMh`7?O)E)jn_*}A?K=IsOxVT*;wO(M142xXNNmfk&@xI~L2df5;|;{w zWs;4es!_Tj1js+Za0P6h5kPoX7z)aA&ueU+r~<3`g?^u05Z>@+!MD`Cun3IA@IvLm zeotY}K2uwJ9m8j;n2Cryw9Dv`quyt zEWKlMk<3lUpUJxNc51a|KH%;-lIzM85CM?0p1Hn6OxDy+i8rE%rnm14nH&G6T!|~_ zn4g~MlKc5i?eIF;EW&AS!g&lz`AYOTu9KW$2%pCDhbRV@`;Qa?vhE+QDEpDbKGNYK zfSG@(;&QR|R4K?6R=O;c89`6?M3Shs70=>5rOyB6lH0Om9m=8*J=`6jo>;P~x}VG$V7(>L9kZ^Bj0?2AUts+m%-sowVoE~>A&zn%2` zoo{MxlUwAzyvh?2u(mPP8!&>1A-2#vlEF3-M}XP`77MQ}dwfk-89E#E#6^Ec6>igk zgzn3_8OQ^FJs^M0jC_|@N z+Vu61LTRRDip^Mind_z!hOxb#Nid1@au(O(!|#0bOVyupC*;Y6`p!_EH-~BT2~6P> z1-i=Ld8VLV#R{4(ra9z{IlBEInrA+YGCL>lRbXO&Vt1mS5auq{VVf66F|B>J!Zi`? zR4an|M}U&?n{juK-+O|8T;{uK2yu%tTr^nr*R6OnJVJkzfb8qQG}#(Lx84eoH_bP) zIjw|bG6sxZx|MI+NRyD~DwBV!G#rvuQ5a^g*$!hSig-x%k&MO_V)trnlUL@|4`rRf z?EE(+d#@5s5Bcb#c0Sg6E;a0aXqnI+>$+c0)x411prf!YT1XsQ(!Yr*wEUUmKL{I8 z&(zSihKEKn$&y6PeP~Vtzk|O~3_w5zkoWV&5~Q?|X|XlXcS!@QW%eI5 z33V>Fo^&|gk=ppJ!9#og#=q1{2@+6C9(ONMK9|2e-{I*jhQfY9QlwQO_%3YE1r=+Q;m_Q@x$m4~C*=~Lr+l6M z1hUOFSC$;@s9(EH^cSZe4|21i%)6G$4k-;0HHn+s{zW-yPArMqSkL_^qu;8>}}U98n-&l*zV92{>Kq_t5rwr1k9zHlgnSg zk2)UyBPClGd>jiU!UQpr+E*z}G3R}pO+(s{E9l{H(}J` zhXU8?{*gD|ayOX7@GY$Ncm8%+E~YY_n8z`$Wuv^>6H_=(DHqFlB9faNhsF758bd~3 z=*H`+gkwM`gr7f(+Y5xXyTmIBE1B)Y{(9@}xD*-xGyzR%SKm85e&XJ!K_|SL#@fX@*OAv^xJtK+W2UhQFtEnE@;Xk>~Yy z?)$>3jp$!{_JH)s)HYD*JaaLxeCQMIm9>rSB&u+w$cOF-UkOS&%P5$!o;h>r;OyTu zn)MG_F#7+B2@c|qV%zc($~VH}Gz~j@5ISVT(I99rdM{g^D{ztrR;aM2H(&4eo+6h$ zs<%a4PqY8=-aD|wbA%`vw-s>tWO!5VtGt9!VtZqepROWOJD&xsoD5o?vv?6nW-Lcj z)_h8=5EAND80}qoB<~Zz`W(1GR2Dp!&&``o<_L|aaY**!Cl|F_(tXId@o^TK z!j4Z~3rgnrRZ{cV?H%E2Y30jSqO<%LWelhrUn)L&cFB(#1f&;?i7}e+ER&>lo4Cl) z9n<^|j|Z$=I6F^Gi_oc@Dn6`3S2PR@eatr8sN}loQ$~_;*A)fU+sZgCbVw zU;N)K`Onf2P$t>lX8~Gkt00c|259emlEEzT7gngF%TK&&W=eCFJdJgZXMt_IIZm5h zcke{+JN;5HjARf?8Mg3{6GCa{>%P;yAiftg*&@GMH+Ow*SLOA%5$*m#yW}lRq2#{k z=D<~Xzlp$pgWvhyF4~?*Q9p$UBW{b=3>$dl7LcTn;FbFp`z!5o?Xhf*t}}|_SiKEx z=#Bm`26F1~by_FP3Pc{{ShlP&zB74^B8n#h5;H?X-sQw!y&9qv4ak}nyPt-)z634p z-T$3$FRr`fBjmlA)mmF+Z%I?d$|d(PORZ@k)qGfERg_oP2a49l?|em-+fD%*{@;01 z8Xbpf&Kzr6l%x)+I3$x3OLlQAtpd$zuktNn?O(Fw*l_*i%;$Y%{fmRYxgT$9j5T6`MVqu&UagadMlsv)v0oKmSBm z!pM7Yp7X>;ZhmFpNr0_`OA?!pF(ZG@X$RfX2_F9Sk>stbkyXA==pyaE2I!a?kd^bF zddh~(x(r8<89QLG+D#x1&@~xw`(D`Pkz>4D{T}EQdw?%czJ!)k$iEU0_#sFP<27cf z4)M23h149}8lbN#`#0Ns7~P$hreVnB=P~XI%gCWTwJ^8|)t2#D>HgbDk*AJtaWk(X ztBmDB0qSt$bJ1-Thw-Mwq~BylM8qtsW5tE}qd)=Xa>G(d_6eb-K)P3(*eLo|1g{VM ztCbD3L9AKjT~{DZ*uxk7)SEy2TXe49t+1xa(Hc*NZu%)#Tvqa`?ifN0H}Mo(4I>eGZ`n%*^3l7 zaUEF7-`V%10G3xsu%Q2QGyaeW4sNiBhfH+j@;zNnA*>-vuEn`bIwU7pp z(>r${oqAD_x937-YBwy(H_Z>38q3@OEQL4$1f}jinox1XN$zy&o8=QH%@)uv0hz0l zER^xYJ!nF^j~EMa%Qi#Oa0A9VjlRqjbH^KGA3&AkUX%jsC-x5O1?9i>Xo7%wMwAqr zSBeGCt@Qn}n;3zM2+GNNATz`d4|&(U+aFqMdZzZlkeI)q;aGopI%} zOBY>SYvV~*=;|@~4fh0wEvx|3&P!wv+Dl?#X6}ZqBO~mZTo~N8lexh4Oev_cr6b5ox|b7M?8DWbUkx*K7bP>~u?CNEtR6AB zJcAS~GOb_0W%KD|a{=#k_ULGNGk86L_IZ!0xh$?*bbW(=Rfs)nvNIyRmSCnn}4p3r^13A;Y z>;|tYk|?{n&ThBlE~xD?qh9gFXXKhFAp+=kM5YoyOz}ReI{WAJC)Q=G1 zgt?nl{*U*^@t>^BpLn{tsQ`4uLw{V_|KrQW61m^`6pSO=7A)t20Miw7a9a6;;FE-i zRlehYSU&#aUVm+r+PYBxwNPk!$Y6WT=&Nk;hVCxY?bngBUw~MnTHl+P_7b43sbP|5 z5q)c!@5*00{;@*b#RonDbfHvja&Bug{;|1O(KNuwdD&~@rF|e4N#1rn`@GI^XZQ0~ zC-E;m<^RzVsKk9M|owE6QQ@9TXhq5z z#5X13KMefNm-eA2x*c>uO0-Bcv23ZgU|geby+IO=M+P=YhZjB#-i08(&EyHZET+Vf zD=bx$i;rxbmmA&fxm-Mcw%B2b?kJ&ieWRNGCS`EJdNJxT+le4++M^?!gsYm@he!Jt z`)bN*UM34T>bQDLgh72NesJR& zJrc;+7))gLd%XtJ)iH5c@8S#>l#$AS`^1tff7p%)R8EjPShMkBJQ;6>lLK|eQ!XmD zEZ&U(KQxQ(y8MIFm7J~g^s|Uc-1*urUH-!CM+hN7aU^EGY>Jd+AyhCEl;2;-Qxy=2tIeF7wsO@@Y+rBgXB_nPc>UZ+2OkNx>z9JKmG-cOxH!*5#9L!8CV zO=A&oX4I)WFr<3J)6WgXZtK8~XvqtkF47*+-s7BtaSVMGK>OT9yQ-KBW{=o-+b=$> z@Q2)xjmDj!YCQ4kiu;{!@K@G_L+F;BMhC?rx3ffFiXwu)BRuI=rQuj^v7OM=sHLXT z*6CCJ-v(eMn+N*Lj|kah$`{H>6tCUQgsBa=N(hQqk~_&2N8qgQ>$|?mg*{U9rdMbk zXy;EoKo$kc4|aWv$secsX1C3B zArG~rU^N8?!CDIcbK*l3ZAD%T>k!I_c>O)z_tPP)K(v&D|~^(3?#| zkLAo3avlQTW8IW^hCRAD`+l}6 zdUNO{FrwzQHiS%T?p%nA+&;Y8UuMxiU3?Oxr1=4gVKrC49IR3N+AT^%jQ)F*4pcW6 zDoNK`CxE!opXbZ90TFX@^!@Eq-6iH~!qg}$O#==+cvVs0l>Z^^?H9VPk_t%07YLD_ z`oeuNI#&H)-W2{kaC|*KGh=Z&|Hbef7Bc)3=ytOoQRP)6aD6`oJz4$sy_%XD)BQ}D z1Fxkuc|u5$lWjdRTq6||kc4m|G2|H)ZEzF z07lM#w=7LI#Civo3)}tLWM6aB`|lA_sSeRrS#P{p8`>vkPJ(NlRUt;Npbg(hS+Xl zz#;LFH)WpL97F_O(=mjad^T1+!y+H%keuf3JVa^3WFBO#$joQ07BnY&4a0Q{O(mld zR~N7?u_rq8WHW&KyyM4#W{U%2&Xn$#e1BIfyPaWQ<9^{6a{ZV>!mws*MYji$>p3Hx zIGz#WHOZX_G%go?sw6)D-qYD7xuJO8SjhQGTJCYj`mPGIyE8x6 zJ&!qjG_AqHduS&#Oku~fv}ln)C`GW4?h&@;-_%3r3tw=meWQ|DZZbQ3GGCuY{_dTf z)64bY*LUve2-A1=Z^yOfch9_a$Wx(O1}MY(HzLy1W^}_D2h*#r&k?TV;JzllgE;CZ zIk{DWLO_~&sm4pv==9IZ@MyF>g|sO^L`Y^Yt43t1<z?zVmXMqUzVnbGS7_A1w;Gw@qsJK8Z5hW$X+;mbKaLEK{nW5B`a9p5q>1An#3tT~j(?Y}U9a@8 zn&Q!e&B31iG!Y_NYeXFad8k%0veRvzLC0m!Qt^Zy63=2p=_X;gZz_&K2M1%-;`Ang|Fpq(1IN$2y7DwD9R^}qp_kioc^v%N8VhlIbL zsV8$?9on`=`x4Y-@@GQH!w^jSiSLhtNBvreuCS6UZ!Q<52-=!(Fak*a zVkhMXE3)yd3G;A5RxVXN#%d1NPS^n{U(Xgy3OPuV=L{FL>m=;`Y*qH=iH^~(-c3!P zb4_r#p+(8*h+l>)+3Lv8iczs*9#H(e|5aDlce6AH;KKN`^ zy%bg>@b0Irhh*|C(T@4HaG{9(US5Xj&$kj9bR3#EsDdCrWt5>6SoI@nBrJSPcy-sp>Zm6ik|uOD zrR6GaHmkR%f4I(1B4N+d!FWA*<}qWuHFR5euXF6&B7oKGiHEFQZ>gwH6~)w|k};&| zG5gw@wsZ42&Qn0*d70PMq1J+NhZ+pD&&sLl*&$%czb^l84X5Wg>&AYRK@p1FQ%zDA z-E$K`(w{D54L@YB=^l}Bvo;Tnmt_eFDjAW3Awbf=J{%zLDz&(pqDHb^qD%F4t~kpv zY1WU9lgLH=GC`zroWnu|pxl(Z6&V1BYv&d=qWrB)^&tk2$Z0j#Zozn}fm6lt!mV;g zGs@MXzyxuH8zVIi+`^T#ib?%DVPW7l7-}`S zX+BPv6Bv_jXm}tUU&|p^jYj1br?+a$LihNSIZnatAlVp02*D9`5Iy82kk9=*tKEXh z*}F$W+FJxx#6sYG39^`qme+=te83F=CwD!Pni+yKG{h(fATU^%fRUR{9TmTEz6#pj zv2fl1vEhrRJfT|9)H(XbS3i~tEkEf!x77G9Q^??qr<)EIi2Zf*@H|SF@z4)b1Ic^d z9=sL}po3vCw>^g8V^#0Vgn))WX#Poila0Q2;*;%6wZqo$wGtO`NGXH>LI9ZnKWG;p zwnGk0(uFfCON+H1IaCj%jX~dMZt5KI4^wQwqI~7j&)Iiug$q72bkz1CJwn;mZ3PC7 zo_ac;w8-0f^zDtR`p@l;c$E&!9WE*I9&z~Q9 zfV8l8ud@Ii>CW|F+))aX)A(gJ!s(56VHKdPxBcLlro)mW=(<-E$Gmc;z5J0du|iS~4UvamkCH3vr4Pun^sFz10X02V1Z-EQ%Ch zLK6xz?Miy?DiSbf-H8TSo35nv_^URyKRZIP>O-}5E;!%U>jdm+OA3l2Po*Gi)K{5w zUtxQp3HkKn#m8i$UMf1v7dXu1U7U*4gUZt!xE^O&D6;lBF2qv)Nd?;78R=TF>S~YB zzcEDW&M1Ij2!1NT*yZU~SaGD3rDyit69WP{*N<|m{4{VQ#ngq)zLeio(n2_tnK4LL z>U6`j|6Zi_Du4G9DtOF!W!6%DUc5GG_$e}UB1q3~v$hjswVsSx}{Q1!MA@ZzNDI%KHX&@kYDsW{>z&?@Xu5eC*FZcUz zNI|sn23RYV3Qsud?z5V5pE;`)!TwlIN~p~RC0@u#uq}PP^JW3gyvz(|sjUw@i;<3| zsS8Zm`-PcX*KCaYX(13$(9#m~Afu6-1Fe1ATB-8*?tE?zX5G=KhnC%#%MF=y-@ zEntK-L$)P`;mi&Muv8@MWh z*XkZ_|8sV{jCQ}y&gVa7r%U`?b#v}O6vE?3jCD`ja@1J3U^37--OA2qoqxO07%q1_ z_VfI-xP{IdR)UH{DWIg{q@DcrU(ax6tTleKd#x09{0Nw(X+|CMbxQ6)^8oTujNu~x z%Cvi^VV~S{oHfK1KvKfd0X`F-WppT{#>(Ka#o!aL|7=p4Rss2(?m8^tv+!{j)}6f%@Lg=3`+@XU~}xn%}>=DF1#l z@!AaQsnsTQ+sex7?gl*PWUlBP|D?xUKYmJmQaxiKLTF{{Qj`Hm(RMlTjFa0#qDSap zsSMdq(e!q}1}j{ZG})NEBM004>mC28fyaO3GDNIP?*uM)6xDe13hylZZ2d6eS^&ZBmal|bEhUdE>(qqaa-pvs z`s#A{`||C>lomi_w_NNYqjQWgT8#so*w%R)KBY4!WBEeA)wKKj;Qz(?x4Gz2-t&dg zx3spzL!ue(7@m}zw6SO zf%hO18M4tglWh+f$c|p-q6eFlcEeaLB-s;Y%(& z1Sd$xd~6Wt&65f)azFlToh_6(>bSBx=Q8I#xUo$%hu-R6`3&@8+@#J0@bNdl=D-+} zmA{SfYl1G$@w+aW9-Nk=&=I*)sx}-a$=l2)K<)>Vvo$MnK{dGNH&MohBf3-YH~djh z)GYa4_RmtclU8zDQgYy1@*+VT4PU3pengBPR#QPVnQO%l7CC&YoMxBW_s-&5vjnbj z$UV=}%5+c3SwuPuATj53gtRzr2+(<);P!OhWePUpo**VGunHL-C2eKm8&FZ_T)w!x zHXqY+5^mcRb8G?PDjySrx)uJf^dObZ@k*=V>0B04enr zu#xLD>#v{HD1Fv6xWaS=Bvl~eS#@0lkfXug3%RAl}qW~@6(97S}x_QHi+@$q3IsL z2*xJfdC4$O%L|-pOWt&mxN&$Dd7kO?!|s%^55uHp(6-iIP@#X;VPi5VIg$LcuPBr3 z_ywO{O!O|zEw?u61ZZj+rIVFKk+ci7LFGJOht2e$=F}~{WqHx$X0k-c?KlvwGy&@0 zIVL2!xe{4%MBq4^oowV0aT8@bIy6BV!Su{0NKe)8-BJTC!b;ipb_#MPx1Myd04>=@ zhyao#)SYM~sFRcQk|pzt-uA^aCf%i>S=yqVL*hsl0sP`6c9!oen zRp}gH9>?w+hd|#`opa~4R<^Q|=_q*~i!C5sR z$7ZNe2*OmYZBMS zj`L5>jw9f#&m~OY27e!E%WxYh@-R3Fvk-q<`*j+2jKLdD1j1Wj7B1A;zFtNN7e}X> zs_J^cDyd^GK!|i#YO1pD$kRoeXG3xG#t5U;Zje+KUI201jeE7otmAZJY2nS$pB4Rl zt|y7w=z2s~fFc~Xi>2of4RnRn0R)@x#@9dg|G9i;z^z-m5%VSEV88o|aVy={l?Mop z9z6JuTgwl?gPSft9SwN+cONvH^*4)jF3lFxQoHfXx5yv=IKa2*?{I=E^srO6s}c7G z>(0MnC_7bT+Au@SKAZbC*pHzLLbLE6FLz{|Mv{49JG3NdCVuZyB7kx$#qM;FiIP=4yeH!VXu$r64R_pNv5ml}5E*PDATf zRNG)^_Q^fv^6R+{l-p{((qT%`MSJt|_3VK4$m0{bbvw4;o%!pbK*7a;b+;$2c_yfM z0lQQGeqBQ`Jt`3p#b^!OJH8R@F?<$tC(4@N0Y$9XzowR;&0+?$2V1uE-e+duWayDj zs84AhzJJmrTd%TQJ7TLZ5Nqj>*!GQGHGvVd%I4{4jLaLAby?b#xMz{cxOME!z9oap zo<<$@O=C<+z(OCl&x-SQR1D2cmbHfc^q_r`3|3`~CX=Z=o4XH(Y7!Ik@>anL6pHVN z%9#T`VYjbe1kLw9r=NvRMethKZE@r__&g4C@ykO>+wq=*%H)YsiwjG(eCKtZ{qZ$^ zefjp!Vn*tpIb{~nxYb>=n|yx%s?Ys3^lvLjo}6f4=YHC}v2`%;yU+}Io~0X ze~pXN!%ng5k}auV46o0q&kRX95JwN$2imhn%W`XcYCFo|IT-lSA4F`ZunG< zF41L!UTyAeTbG!h`mkC^sdC{CiHd0&ym2K8@g|X6Sq!;eS0>G=85|hjQOr7$y`0R= z2&ws!MRM>h4%Yg}Bimmbp!+b+y)ooby~GQK zWPK>2VJViIStu>K+Kd%~ys|2sl+iA&AnLb7a?F8D4Tbq@20eUYC-2d~t?zR)8qwvb zd8pL3yf0Xl2u6@MSoE6?9tR-0OSMaIZ#+vtKr_&A+DYAmTmkhtKVvs(Mn1^vz;A5Y z05Kn6y>iku_4y+0*xME@`jMGJ^;VbD^FJ!ZzVEQ)nmDUep0C`9E$VjW_orw$ty8yt z=aU{kz=D$oG_!#uzlNz>%=+@;6U64Wej72FxB?=?q!@>4xg9Q%e&W7yltsVq&qrvm zmDPjG?iHClN7XA9MjKm!;=z+ttMes4eMYPfEV)iR=w&L?b`~S#tUK==A3X0nY`0aKpj;>^jq(oAwHq)Uhy)GjbG zQJhiMpxB%h7oF&!GIM*3TOD)b$2o^;3uL-g+hC4vgYj|1X zfp=yfG1;^A@^ni5O9Up+68Oq~%`VW1t_wgmciSt%IhFy#!@ghLwZ#`GZ*CxOaGHn7 z)ZO0ZD+taI*dYx?aax!_bVb3&AbP_Qk?P>E_TDJdTWOy5zZBkb$_PBUxn^i7v9qjT z&pq#7ZI5FohZR9%-DkeXyP}AVYrQAggJn<>Db1x*R;+$Z4BO>EsCe++(dlx<=vnnV zAD7%_yM_gK7*Yw2Se|J+wSD{fT}CSpsCxJXne^WEFdFyjp2A|SjVMJ)U1HlPjl>~R7V_lKAbh1904ZaU6)59$C4x=NI{ zQl`0ikm;xoMX+dT6Dqqf^=5W>=$Gndw)NUt0HJxv!t|$I?Mues@Pht4&1S=u-i8`B z(SDhyP?6LUtY4ReTk9Pc+nexW!1?}#oQ2Qivt^%Oo5X3xe2tYthQ`j@pt+zd*-70 zy&X=OEkZGs&Wae9(Ie?P_LaA=18Kru-RzJu43UR${=lUD`jF(?{N4Bm{klPYnCs!e z8fKAdX4|#nDpoZAk&o^^n=9%f=iH5YzEcMf7J@0r1d;QQ0aFzf;icK)GM8~!`${tg z9HoUE;<#>{LN|+=6&V-nIdMK8zN8+_Q*cD@Wa)f&W6W}59UC6bctY{Z0DWeOa_qAmNMGn)M{wMg6jK3e8Fs~9!oDF@3E7&y#(~z+8C>3i|E(2AvF#f z8WQwc8cU^3tWPh;ou2#>(Y5cm_eJVNM|kwn(#fes?U8@P5n{iIOuz=iq_Y}iAYGv~ znqmi(qYlY;2^a1rrS z;m+LpcX7R()dDjeRJTZ>F&N?Qc@<}9g_tD5wEWKwn1=Z+VdlJY<~%WSqMIG1omdR@bwt?$ic_ho&`$k{g5rC51_=M0EKnKRhBz*HK&;Rr&m1CvB6|#O zR)z2Hq(R{Dpf0dZSAVP4AUTm{2%j39Qkp;2A*Ah5e;4dJn3K;HTNQ14A+jcFsckPv z@@`FYi*mb`LoF&ctJVxC_EK2LD$oSZM}z~;8exV9Pvu^)^bSx;^6*oC;F0D8+VE@4 zNX6GXomoQ>X@02A0nuw7NDyvWSx`b;r#>I|)cDF9#!b~VA2K1EZ6#W#Z~qn0%yrK$ z*lT_PZm4LgY&2Ug<~T3o5YYYI?3d0k%nw}AW}lsg(xx70SD9uq!f0JqK!zzvix*Uw z4R&OWCHb#VI;xgGEM5G#Yrg#Cvo5<4Z!YPn`7c=W9i6vn?@004D+&?^&6b#Gd}oyE z;(JroPA-B829lLpB8VZ3BNWSV3D4oP>{Bm1{p>N@o1>Wh2-*XJH)X033UU4!vp;{8esl8-+Mxt+8f;%Hv` zX;0s=aQ?c(V#<4udg9$LLU)a-peZYbCmgwFRNjEA2{PuCrlzFz3wbH5jLHInS!C; z4~cx_$N+rh%s=_cdx1=l?8y4tWrV>RxTI`vY?%gEB9^+A!E1yzFcEuUp>l_6LUP9{ z>G4DhHOoC#Pr(6hOq8C;MuR{ELLl@j8z^upsn778mjkQZJrwGu(`8w_)#4c3$VT5g zrN4+gaK0hxsSXsLR}(k(*Lp+V7=SIdjHj;DLJT`=YZ6mVj9FH-b2Yt=14RYXSW_X( zq7nEw_EOA_s&OC3PZQMtn#q$Rxt<}eKVl%6617IE3Rje$AWas0FPTif+~Oj161$vw zO&UQCSX^R;&r7cvy-b~iM@mK5p-DO4+m&N!6c9n7j$X?hFbaOk@HsHUM1vIwaawq3 z6$&=Zh{DL3&p3X9>a*&rE@S*ESU0Na;#m}}Q@qg5NfxF_4Ss&A`h2C2a(LpK3Z(ST zzaYp7u)phyWRMgvj7wUTl&=X zom}wAE%sNo$^UZsvpSmfXTkJxLc~T+2au`wxxdx@_Q*qC{I#aWKEF(*g54LdfJETG zFW*S~OR!eM+8EWn-|Kyn*cFIT@2b3!}%izT^xXx@g?@N4nv| zKL;Pi%T?p%-)F{un^joe*=E0XfN4G;=gTWyW=1aJ2K*G4>>UAVq9aHP0@6F`(7TinN`RqDhtNZFRFD=N zkkE?+2q8d_5+ER;NRiN_gc6$c-UCR}yV0@CIrBaDyw1JzJHLB>C;#k~owZliUVHDg zp1s!dygz-T+Q`w*lbo3u9;9>gecIZlQOL-|&Zf&`e1ScgiaQuvpzw*x`>m0$ zB4O)tx~ihoB~qLz!iYuklt%S+k{t=T>FbdWv?T|>pON-0fi8N=o6gw8btM;o@ z+P>|xi1n8;JHkdjp4^nrva;@Hnaw`4glmJhIr?4lt zL%n4Vr{SGD`t#zmn<6ZUzqzO1tA{H`)rm=&T#HzJ*65;BUVyfciFMWs%$W;H<(w5y zexyEVc;AV(=MN&b1EoC8N?2*!2I2N+cw%Uf7jkz$&&2Z0QEI7O?!y{*oE)ZQGCG6< z>fokY%g^u&c=b8ol$8OK$9jOCsrv*QrC>tC*a@@n(5a6&<$?1D?Ylr(`6SCf=*7I# z3rx7*lFC$Nn;xgURg{|2*z>mAC_G*RaADrL66@Yg800e`z1<=Z2^z7cNR$0DYDv-J zS*_y&GDdMy2>i>8U*n|9qKY|14Yt6TD0$ej!q~hnF{@#%(mp~bJD?q_vLLFRpRK=v za9$cFax9k;*PtlLdDkujZ+0 zj&U$y?KQc1Io|p=42FyhtKyE!x%N{$&V^`50U;0kd>22~~*_ z6DZ20t723j5+GiaL`qVU>gZe!r)R7(CMt2A4eY~K4)Pag8%#DHC#P?HqPm$`+AykW zw8U#|siR5|EGDe-Y*SdB#~xX#L+`WoJ$rK>mVFFrIQ^nPu^Qn2`JZ1e)Gq&xq!;I4 z$?IJj7gZcz@dgE363b*zm>ni{Y4*ExnSroYIT;2^Ys=vo%X%IW-60cCuBH`U0(a$W z_75{Y%@Kix^quR7btnND#B?i!UFLLgNPjF();tqgQPb6}+jXa(dn}3}F*RYrUL?1c z=dtFF_6v!?`{i4<1bq>()LhB>Gcgt=mdb;>4xUnJ^DoV37umcge!tvWDI-hPcaq(> z-J%8Q8puJT1Li9Yu7@kQTUgd4RaF4U@>8TYAjrzL)Un0}VUONia=z_5PN%CahP6_u z?^11g@#L(Ytq6n_U((>^8CEf_Cfo2&RFxHT1A?Rc1wEqhB<7<+dU+GzIA^Ce2c~3F zZY+`D_E=ss#&RG8U#^iEt2LPNOno;>C3*!~Rb)gmU6-I~4e3P`X$z@$cgWln?9d^( z1B@$T0eC2Rqsp4b?Ua&7PX1(Lag2ywS`4jYzMjbde9XN&R?c0+9;+sso|RRVU)tg5 zr!8aYp3FlJ3J(u`DHQ4bxPFYbS{#}DQRg+2k*|D}@pDb*#8m)z%v>^rwS1YZDF;y{ z=i(&G4rJ7r+3e<_1&F!0C7#hIAXl(DpESo7GD@umOPIeAwx-isAHq^vSIf+u&PLNR z0)Uo0O6;BP3a`oBH_@4HV`IWir$NuJ!=b(<}zD2h%U93Z&tI&pm76!*Pc7)tF%_JR1@r`Cnk_9J#(Hq8FZgt~yAI;Mh%kUSG_6K`wE zSZqPET@}mI6vrk0E0oFklgZ(ib9ToJq|LHW?6{gJa{Y4R%qbS*V__TGB-1=A#4oT) zaVAVI2c_LO7_9L&93lcHwz$4{Tk1u~j82v&DOoIv(AWtkn;5h&mw1C^c*)ZPo`u03 z<$}D3Eaxtf;XWlJ5HM%^L3WdOp-hTSfjuV8aNI#hWyWBrH`9nP;H(QE06kn4K8t~r zD{-ZlOE{ZwFW`ofUl9Wc*lQYRMp8^TsIfJ4!)ZIr4mEaoywd%u!9fKfZMGPR-=I82 zh=P1;`Wzm>TW3y;Hz`hWw$hfId$|~eh;GonG({JR7*8%=)nu`aXEDom_DW>bkBVNl z7^nZW>i(jK6obObr2A~LI;>RP_UZvMKGq{vI=%Pq-FtI$Iw$g7AerkCJ-Jnk%}?YF z?+>G|V=y`4cJ|uDsc6jd3@CK?fI*XGov%Z|P@y@slZVaUj#c?X!bdKWJrJaV1?|hQ zXDS>h4<57ysmQMfQ^xu83==-a9#0G_Yrp3r`j(nmv*-q(-j+ZOZ>Qtl)T}*i& z%(PD`UP!hcCMbhGQN7O58%sNxx5__GpZ2gizaY%BTSOaQrBk0m@hc`cAB@lvecndp z?S{8@3-n54ifqcwb7LH17bp&omlTF$6;V0Ym{1>L`+Le-cpPIrwn5F@_vc(AEw7}@ z*O}8WfK7%??B(XT(LI&P-Qgj)20EVW&bo}zy6}(b$g|`vF*)xNDCoLJ*@qjud^B7m zb9uFI9uId%UfzBaA2_dxlUzSw>dQl?6~547;YB-&a=Hr%KC{k;TGKmZh+B{cy^Nra&%HSw&AeIVZ* zvBGi@C{p+jm39Gb-IF*NYNl!Rg0&FyOuA8rvU6EDZmbz9OwoX}7NHh7=~1^qVUr&a zekLoLa0m-}&Bf_#zP+F_`q4>Tqi9JaiZbPjsg}8cEUu{;Xg;XHhC~!kTKZwuF;x;! zEUVk>ts&2-xJsimVq>KeA%b$^@P2YtvOHP6ADlx?SLP-jrQ2B!GZ6mxd#)!cqpJ>4 zFX*0ZFbER?Ur99tuOgSEyu0*U{YQV;wv3Gp5sNwM$Yl$_!it$7|-cnZKe(<%&B{#H`O>&210zw?zmV%isEJgoKsl((rV5uX99)1R(dO-aXw z&Q<;`0F|y>F-fL9pGwg4!8fb<_@9VTImVcl1<1kf&L2tJLZrpm=Trl4zh(M&iNbkJ z;ZejIb7NH2gvN(a>X*g?Eg#~jj`DraIr;IsoJnb{>^)&K7cU0hfb@PSN!MPY-8QwI zX=`!9uBJnx<8E7^_cb6M=o8i0=%%urvAi5b{I<0JGWRy*6V>@oRM@*iJ11&&Mlo|- z?28MIRV>kFC0C%0P|Qk3exg^4>(Y?>gVc8i@)wAnFQfFk7KrD>$>hX2`{#xxZcaZf z%44F2a?77i;>8FuI?iLs{Ox0(sCLZur9NUbX9Gh|%sfqP`b4!}`@#D_d!r&}`13D{ zvw=?ze`$~f-V1^>j9@b3No+P925L%Pd_A6Y(8=`m;QUb&Vg>rK0t z_lfFB5b2s!cmU}W)tlP+M;^E39&I#zEh2NqO{?e_qK>=?%=#F^odHn_CulV zmbVBH<+|V7*f_eY+3vMBbf)v>lEfcW7Y{GyxM_&lM$Rthc2kh}iH!LDxsPE7Yol+S z4}bfhF9HGefEQlR>05x$!UqEbL{a9gYHN$cM#@`^ML8fYgO>v0hb z784r&()D+vK1;5Ms8ZS2TrI6ujej)u@eDF+w=`&1_3REhdjWCl&093>SAYB9sBVwN zBNPAyw-ogUZ2X82>>;fEs;ua3W+#X8DZxb9S3>-$AvnRF;Wyqt&uPbdVJ61Y#*TI^ zeVMYOh<^z{#-F@nPj0nZW9mgS^=KWTP&Ks)xwx2D8B8E%@|Yovhw=%B!WaglG&h`E z68a87;%p(}$;FK#VLd&sV~^UnJ!vF5IhhOf0rYl2hAupHWAozR`EiW;NyQ&9m0l0 zfHlt`rth2uq*`F+M&YRx=5y`6L_kEQ|G2>@^9_6aqC>O~3aC!Fcyfq9z9t$CD7B=R zuLEFte?bN^wg6E89>)w>HUIv=MnOGewO1r@z+Mc9UlpV})U^5f`EO;ikymnZM)vI; z(&^~c{QG=)J0V>)l?wEI^bjztrpSf&zL^0Kt zCxEFP+go2nCBEkS%l1zWTbRz*>6f20KZ{6oZd~h1d@ncqHS-Zpl?5B0{Ymqd&G(j~$GF?qc=ogGgB~V|)q+ zd{vYwB38b`^uqJAa@|t~Kv=6br0~SMQ@{2aT{8^KDD-&yN0-Qaf3z53S=}0vOY{3J z_)qwJ`PQb2KeZu_*b+sUUjv#GEfZE@Pl|Aw+3M-4Kxl%V{7Tx|$$Af;9(j%CM^mkq zouWy7mT+sqfCmi`e&y-|i|eZ&5-4x_ z`+Rgc0f*m*z@DpY+Vosu$T?|!JOmVqO%<1YaBB^qt^txf{%iRTMhFLOYH?PMTLBNu zkK}PrrmOWe9Oa9F6y17D#?H8CJ|4H~j$MALeos#O5`HNdGp#%X;@3evQ+*Qpcq;w0 z*vhW?^sS-m>YWs@@b-ZMyMl1160hn_=NJ*dnJH@|{H0IM`Qmw>u-{|I+79|7*LW=% z4kO`u!hok)>NF-I>cqS2XtvDmDI*eKuVFn7z3<{Y)&9apWzMkH z{_T&LonhVOoR5IHqE|EP?piRm%XKX^&|7aW7Z~$DQ2~QKHlPO>^Z{cG@$UbGpRr%g zn`gHv8apiHlAZe;m$U-J^FK-m4>KqLgb%=1mjPJz8uRm1DW#Ild$(`c%POyUsT#B8 zCd&kXUyIs26sNuvr!L|}H#?;;g3~By9-7H2+h5Mz2E0!%0M?)8ZwM)^68yh#+PV8f zPLF_}Go=)Zw479z+h*)dwAfeE|K^m%CX7T&F6&mYFV*cCu-#lz z8-Ait8z_dpm0)@%jk)6SoB6T>curs3p73REo6?S-vy#R`s|2Jfhf~bOIi{ev-eu;+ zY@4zPENfrfE|VJclHJ@d5Iquh+cC(KAXPBfnOp{uFgeiwO_}QC_mlo>%8maUP(9`6 z?{aXHVm?Q%`Q``Iw3tAcSZhvuzwI$SU0`2WPeYwpe+%l#p4P|OVC?#k5fGLRUnEGY zyc{SWn#Fm{$(&OMQ_nDKR~>qBny$x=XysFo`t;BRR%;Cts~Db1Bxb4+pet>Tf#K@_ zU%i3-;exSSR-7^_F#@kXbi=8Uo-2*LNgsUrN4|RHZ98WZPno1PP7&9_C~i8THWW{4 z)Js-SMob^Z2~r9(&NGxmSsR5Gz&L+*&6awWB+l1bG0KCoBE^s-sx;LX(bR(_Z%Dq> z$MdtooAMm#)%=*xKI>@HDo#*BVL2#cNQEze^%a}i3pw?vT#3Wi%G{7-zqZP_DVC!E zVqGs1QYNB61azn&(zj!3ew7euPx7?T*>#{7&wC7&vRq#)Sw*ryevOyaU80SGWx=|| zT;S+#?Jl0=ssd`=M)if3a|GdFy?33juk^Ip*Ka8z4jJpz+T!LA*JLeeKe~5aZLBu# zE=wI}^0FKlaA0|R4j9C$FUejq%G|k#r^33xJsH4Te^5=sRO*$-ENI>Ud+I3A;=m)* znP5mzfKlRF!YrOIEVHz1Bo-zsV$&M_ff~Hfo0lfB?(th#t@N)w6f?dB`3{LFGbK$l z5Grw8ON-|QFuV7~4f>i}kC&fb3dyPNq*bFA-Egn;sh6nxXG-ke_fU6(Dx*^M;a*Y_ zirdEV^}YKXpQt#?u<$|kKKuRwT|+}5QrFua7|ONrBO?^IT(c(NIB|AUM%bAIbh!Gj+msiK>hC9#l=$p8!m|>jtNOVPINb7LPqXTu-5U#4V zu8N6X)xpwWIxOI(nguvi=(IiQ!c()(zE5Lc&pq<|eHb|hY$M7qrk~7c?=3TwBKf$j zm#L*tXk>sy9XuxK?XZin_A#}%)eqF1Pk2H~&elz^Oj9VyeI|1D#VFpoGI8)A znbeX}O_3+=AJgy8-LS3>5_XxbGw;%KHQlt{T7Wic-4NQimq`wfRL8oGrLgJBUM;GG z-<70WE^TLJ4GOX@Bjqk`m*;WuWX8y=Z`gm}Y|!3&-A(;H9Wk6Dq2Ueowy?F?(O!AujKY_&JF%2@_xY42f!q|H zqyc)ue(bAZ0db_Il4?(cj*Y~)`{uTR8ICgKDHFAjFXh{46y^RIXedRePDk68cFOPu zzXtm7-dRwPOX~ML_b_UJF73c^kvpN@3XzlwF8oBL;%&<_s>SHl?QxlWM$H1RCuuK6 zXt#{hbfb~hrq2-S>`O~|cDcDQ(?mA<_IO}hmB~oR+Rro3F^-)@gXi7d39b={kF>yY zr3ose6^H|Q<}(6}x=_-n`ZAACD8)BFuVk|G(j zUg{n3QGWS)>h6PoXCVJgDJTDw#?!g;^d-FWFh~Q4j44j|x?G#obM-|I5LtqVVedBW zhf|&TYY`IXDhFI6I#1*}>%0RCK6LPR49|T>VD&2l)Js0p6~dO@R-ftdkh3YDTiP2% zX#Do|=%b%y=pKi$z&D>vJZIMXljNMh_2;X^*L;83DwqX0(U0!S=l&^6cZBoHo5Ri* zvh%mR&)0SBD+V{yYI5_-NxF9-t`2ikN%R6GVkxPx2E8}W&TGu|rdoe^mbF=ipTAig*nut*azFF_BhdWTyIUXqup8yzT&Y-dN*U;|b!_8)G~odtSj5q>pEX z+ZFO&wvVigFt-Jm&M>yVpvj3ZNM~c`p2#OjPSVj>9`@kSJ`tMC*CV{mZ4K3EuO==H zJ-s(-XQtEim6&Px{NhiBja_H!C3~stWD$yhNa$8~;~mw656Zk|Og%%T2T_LUH*}OE zq~c-CksJ^faq@Z`yy{#Tu78(3E4YYwM8cKdcX zo3>DlV5Z)H9XCL99;2~DR%uUwW;onp?K#}WL2ww(9;d;VpE#(Zb)~Z@Eo6}`S{hq( zlX>@iry5666b{8O6PIBcKF1WLX92=9CC(02RZTF==R1sS8033K=5Mv&&y5^QdYJu8 zfc{J*3bozoow4R#W9)P?HPzs)*E3#V$xruX*vs6x4~9-ttSfibLUeDQ{OFYXSYeq? z%)1OJZNv#`49#?*nCru+%!onsP7RwdtR9=64kpa(~?GkNgH-4R*iUIrW)l8HalY0r!B}!Zxjt&283%*(>hsx ze!wa)v1rhcX@>;c1o0( zVEsFXRe`_iaF!ng5IqSG2{&+ZEcd!}?@g=Y;^HFuV932m<|8{qL3x(Dung&!M&yR} z4^^v`rVn&PTFAtW&+A+8Mmg|oDkOo9>(`83(yazFEbVxSj|m`~e9!i_OqH$oRX)ru zafD~p;hV=4hHkNGc$ z^JDd31B+cI97#BFtFZ9g7Y*$uX{TP(@DPNMH;)B@`$m^29x%HF50Q~c)7M?#A`cU!#pkDRE$EtW2n63`2u@Xr9Q-JJL zp}UM}vntiOlm|cL`)l&_$3K?SY-GX?1 zj+f;PN?Phm!Y?H3vtelR?aGyz{)tnFg|tmzF{azT$)Uxc;#u0-t?|1KHnmNo9LQiB zUc#WK(NcwLr}{FT{X?C;CI8hF_i7&&ZEbR+be+SHhb~I9RIe_XiduZ;yk!pljIb3= z|4Km&y&PMr7QZOYF%sOq=)GPe%|~~tKq#nkUMd?pr`2Xio$$Mr&DCvZO^G`63}_-` zlJAld^WntP;WQGf$gNO)9)XqS?f|&4=-`G{?Y=}*Izi|T31YZ%HV)+7THH-yqic)4 z71lW-Kla8qra0HLH@6IKNSo(W-rU}-vjq}qi3Nf+Mo^wBCjF9fnGT7?TbYM*t1x&k zJ~=5gY=q5<(!oAMf_RL&w9O2zJh@$k%Hi3P2cn4hlXQYi4iupWZMRjbia97qw{ah6)Wk#l`7668w%EZV8BT`O#4*IFged{)J&CvsLWwEq-N52foV|^DjGo?xeQ#re-rNdkZKCS1hgyhc#jVLywuqX!gYX+LpB{IsJ+J=ZCB5P1>C8uuAC#tOF(blPhW1vdsvYO#$ z1TVNO8n0W_1!^P^%Tv6-)MD5i$m3ZV+hnff%2Nq5abQv?x;b(q>>g-yB%VM{rz47O zL|J6cWd#!_y~I#_x6-cUgf=%9(3#MMpj1PxLb%a}*P}G0#N|7lUOTQ<*TteSJ8Zcj6+9v0HB89(@Xn*XS5 z^v}m0=^3sd#m)qj6{cg$)Y|lw+GJjKliJm---cH8FQ0yScFOG860=)3{HeYPk9Bs; znFb3j&3@A-C?0>>;0PV zKePQYDv}(<^Mi0KhFpCGUfo5ZQVrKEK&OrRm(5GwH_1K4jq3n7C3JBNw_WhZs$vILH?bN!zk1IA}u_Dxv@QE;q_31K1iIA}cyDG^; z>_&K}ZxlNQ`y3{T5`RYGn1bc_apk^;QW!a>e(Kph!Fx?c#Bwn7y>cqP^d&U}z<26Q z=QN4d!w5`?raXwr$<#CQtzB+uSQ%W0Ncyvg6wi|dX$CgPMbn!tx56d?k{R#wz)Y^& zy@5py@y{)vho$6b{mOkmAuT$XiJUlPzp~$1n@OFkrA@X$R}}ce*lQ{^VW1E^xB!YB z)0PIM`aOF-o1iIvy2q4Db4r$Zgfq=1$WxW~x0ckdw5S%v`9|!tZ(0PirHd)0qV01F47$3pQj+(Us;!-j!~(=bmkMzC z6IZlSIUVw{ONW-}R`bE&b`P8dnvUjlZtUzxUPVQ|BgDxO?PGgl^bXNd(Z<3>qDevO znM*GCBcSst$dH%tbjoOCYMjWfw|YP=D>xtaluBFO>8J?as_ZdFTtMD4H6Ptt3S)F{%0N!AkWYLHD{5(D;>^fkJ_0 z<}`R)Nu6`_nKhO12N*xg?2)*6aS{10a=ccr&Yj3~k>Nku&^0ncG*3+@Vf^Q&LOH08 ze3O?xtl>X}{Q`;x^|f%IaJ`p}5tV!Waaib<$yNHN32 zFF>XK?(?rSOFxI*{=RupekqaNlBH*cl{*^e(0PTVpfD1i%6@##A**WrX+QqOMv>Pg zDO)?^YI-kIy#!H;_`w;!S4w~DZLR>u~mq}=KzC2N#0ggReE&U&KL<8l(5 zh{Azx@gj$UQ$6`hVJtR7=!40-#I^=m%`_}y305OM~_0c_e^+S$I|o)AT{+fE%cz+swYaC8b+z+=2=)H zUuWtpOce;BB*8>zq|>ImXrV&N@=VZrq;Efe|NgMSPEzcW4f$iamrOlNUMgcH(uGjimfb5syFBGabL16xPkK5E3npn&;E{2-q@oSw1q6V++zus5m>bL0)>G}qD9^GA=F zmb}nbf6?MsLkc?U#erLmJ=ueFsL+YOlc;hMT>jf_-#hjLwcca8cXoVnkxv|W->BJY%>oY*FK0914c3xhuU0XLpkH>K$UVqVDfzOw~uS!DRq5x>Dtxr7=Uj1wq`jX5UHaY6Ua@OgWg+TYyFQ-wSJjHh2478X*60E zF@h@HHR-R3?#aF_)v>g(CwsMD;gM}Aw#?rauE71ddldaV+yfZsk-}&9Z?49BSVL zlDEgi;)fYS*QcJU5BLE%j&poA8N6}(5Q^wU9X9I{oKpy zGgm4ctSl2ttEXVsr81BncwUoE!=?cX>2~o|nU$25Scf*39NNo(#?}ksrUJZywFqol z#jBt<+FK*c+B?W-Ca36GNE`MBJ(Kr9E(PY~n->S)Q)u+H&JEk2(1+v=C78r>Kr-lP zh&f7Y;e#I5OuM3;FS~qe9vn-gq7ups{6S^-;BOv~zwY{f(_Y&IU(Uc~ zp=?qrUdu^=`gfnS4rh6Ar=V{UMuQu5#Z6`nPJhfR8Q~Z9a5KcM;M>j_bHwSRa6@(} zHy!ncUd$`y*!y@6~m~L!#i`su@g;h7CeMH;7p#%0F2w$e-gTCL!U+IKgI87hk4C#uxjowsYGc0P;M)7Ap>XSMHS z1%HmkR)!VodG*#_%(7z@lR_@T(2!SQRdrdPs6J6Gnay5fyhdb?!eUkZ@U+g+YeE+C zHk*p5DV+QY2&T4>rr|(#7sI^ZUbB}alT2L9ztZdb}Ex}@Dz@Qj$ryE5K1}Y9GlSP@#Nx~ zOU@1=Bb%rj52qxX-GVxe`q+$%bE}yNz?;_O+nME*IWq6-LTO6=qu8Ct1BO?bfw9V*Ld<-PMJ=5w10JQ-PzVIYZS0vHq8SYF7WajDOL>KGO$K!RRx6`SVHxaD!+Th|=jM;nO{kC2U@& z31Vli1tFtTZ_6FGsWqjdnYW{yPVLxNs|~JU_(Y|VDMOFoDz$IR6lyzVle#G#+baMD zn}^$2Lox(c1fz`r1z1yaY|{HCHLWw9tYM~@;cU%0FwINE4GKAE&%|SNAK>@OcQm_v zNGl)yL{vtiASGa{K6wG88Cn8@7SPn$7qWuQdCi$7x=jH+v6A@4q zotG{CXXa~;>aAsH)Ji~*_oE?PNRPLvlW)%M&ZE@Yu~{Xr2-C=-)%V+2YyXqUw@)^@ z<{Np;!TZ{;Z&+Z86j1iF_6sO$u+1P6SnR7qPrGUc0}~FM&B2}5w7`zh!#xfiis4mT z5cikL75gZs%AYmav@eGdG07yGz)w_%(#=cV1?t+>Gw8_HEIsQ1df^6_K@ z$i?isZ0BRew>>?hc2R+Cnp$~WyxZJPq>CEo+7CQ zPNad^r4p^nQ;d_TN6&3*7dII@=lGvTI^57_i|!w&!%GL|y%(HkUi3QSVpBt}o_xP8 z4o4pt_-loiX11k7S{PE=Oh=JqVys~v&}jh?dEq|DI2vioC|xvc>{s&I)k=-Vn!F73 zA5VA!e@R{I9yWY8s%MnB(-oK!Jat+$WEVxc*8Yq#$**zQa_;uv)4m-z)%C6)`tW~| z+~4^|2&H-QEgoK|_8yh$gMV?z_uWUX`D|t$x(g^%Z$C9UbRj@7j+W`t{^>&T%;>Ac zi+rWS;DtL!e*1nXkUDlMiRXocW~Kd#x`2kM|4avK_zb)xc z0qqB0kNijHXMeh&a6gK)0uNuBduk2$)j?kA6DVpmUzcJen-rqm>ReZzJ^m6R|9i^U z3jMkL?_hrNz*pw^W|r-CmF;!=TAU}_Pgfu>85FeXgD6tw0;|cNe;!5LR=JR@aE8x6 zy|YsWT#z=>Pik%utXGdsn`?Sv&B3Nv;+$Mt&-F6Zjx06aJLK%R7%wo}Sf&70fT5u? zo;b?H=*_Lv<8d15co~UT^Xs>6M zQC-e0U~drL%HEKcUGhjyPh~XeT#WL~0>6|3gXH1@jkon+XRwd$nZ7q}sT=~-0&S!7 znWn(Ia3^iTfx!4h5#?cBV!C;+M0<|oN-*P_3IrWk+CE~6lW?a?A1fofhd9T>jsCq= zVY>U|7Itw=H^dt?6E)7GGnhxg;g>?vkT%&K5u#ljlMvQSGB!mBIXXHw;)}uOS5^+w zrS&;uW;kGy`dC2RyY^I6X1K!+y%}`(}z3ux!4N)Mwx^Otu_w{SXH7UhDJ2NlK1MXox+^1#Pn!uRa#l7 zfy9_o3Y9Et5zXU5FpW(S$o?9j6xC^#Bs7NDEZYrGvul^N)^AWPI}c`-qe+Ptg_JOk zHwl2L;UDn+awfWMVq9)bG`$iw(>k1}6?^O$eRZ&?&w597&_}ktk_AH!iF;+7yB?pY z(2#3qdA=+z@DE z0^z*fv}C0f9>qepB8;j()Eab^1rZIuEMy%Hx<>)TH1TeG7` z%{ltapjUPW;6yilq3k>jDO;MB^zy1Eg+W`;C0+d`USW4x5qqg?a!tByk~ zFQ+}KXxCa!ooHv_XEgcL9ZDL069UV98Cm;oNI0a_c%K%R6un+)<#mukptr;6y|3!K z%9qXb)SxB*zIh3`U{iYXsSEBz9QYq;~>fRrl3m zkz~mv!%4Egly)8D%GBoy4?!kFy#&&rEyHeKNvHd`#3JVYHgLwk|knzF9bk zgEzykyV#!E0_q_#kDXi-vlDtRBCPnN3`*H^z{R%5siaLs%tqNTBtL_J-1NY!zm+a0 zhBuY4NOWe|&kvS$hc@XcC`>;bB^zNmuU z2xf;RkM=N2B5THRvVyLM}l!vy~CW7a+`)rQf}<> z?`%km0r#>J0{w(YtMouusr86A(!-P%9s+PwDcl;yEOJ)AzwMJ~YiEjhSGCRm>zGQgZDyMi+O}5hzQ1=R z@%*dwRQRh3kHHGivb@Q#YBEXSWf3w}D;wi^cFP+)gtfaIl9>{bm~7rvb*joC$~8qd z=u+!lmOFoC)qH!KWN7_U59DzlA+69Vo9cM`oxeGgFT0Kf{mID`_WT0Q93N>ntA9at z-Sx*qaz_g!H46;~c$~ZITZUwd5#5XV3D6w(?qFh0P<|QRnLS}+JU+S)XIXZ%DSOX; ze>Z!eT`C=%nu<%A@KKkkFrd^AWg0tsf(vY(N2Thd#TQdwR`fRMZKA8=SwjrQ!zMf{ zFhks>?gXW>Hlx{Ry4jLjJaj7x`W(HR0K70bP^k88Lita3{^ff2&tY)Jp!W3H^ih`m z_gB9KR9>$DPUELwuQ=Rbp^SiaaT)Gvz4C_E=_+`k=`H9ShY?oX)*vg`%0j|2`qs3y zILI^OMTIGwo6t=UN|}1QuUcCiYKkdw;q}&$b1!!E@jfW-<%?92cOTEoP_a+bV9X_- z-nnWn8A5}hij?ba<z1E(pwbrZ(G^1rGI9q8V>&f@!74lL~iVl>~C zDubp@me({7eU=&{lWaOwuvpKtw8`{F6*}BAGgqseG-BlkzylhsE=OlqlV5#3wSP7k z5Hh)E2=(O1B*j&}wX=NKrvHa-+%&MAd*>jNCd61`vZ0gqYUIuHftKCUww3B>IUJ+@3U zYQzbDaKE|(=+r)|JdBCerWRvmy>=(g5O@?CF2q_Izs3MC|5t*)ZJbQGNI9}moY;PG z+ZuRs&RT6)3q(50Q28&4_2>NISe^^SBtB_><5f}&JaQDOHEsIvYg~=xkF)_zfCtv& zD1KT7;He794v@yv(p|hOD0efOkpIk9>U8d7sK@(3s<0+u>!?1KG+;zRXgB{%jm0F8 zO+gj2z$&~|jM&e=m$Uoq+y5fw`tQmsnBO~>eouRF zZUv0X4hQv51{+IR7_yHbY#@OU9zq3lRp+h#CL&YIboRa-o=%Q6N3~J%sKdky8<~$ zbF3~$Ni5dSQ3RlqNOl)mR2yvSSu8n&HD|9J+>dxzl@%C`CjZbY3Dz-DtA_>11SWr=&X-Zi?xd%YOQ`q zGLLC|F17ry)XW(~*VW!pnQ{J+c)l*-^r7nbgLL}EsPD-~{@k_S%k*EXlp`<9yj0lh znj$Mc+lw#$L{&0=5dA~?50_xE!&3Xq4oiKSCZ_CX;=dhQ3x`%IcLl+=%(nc^ilx3M z&#j{Ozm?j#(KvU(p)hTYenl613x$W87>)&ZNHkET>DqtMCFsx6rg0mBh`q8WK&&8??Wx|!g&mAv%?*BfW)bXK+peu+X2Enx+m?nDdw;tS zA`5DJYEn+Cw19hC$Y#Hee}MmTy?HPNZ3{|294f!`)zaVXh3t~=Sy{5}v)0P80jwCwCy}LD;9hG zI{nA9_-q+{`Li-FWVY{c)JCIu64WiGc%a=bhmQw9XcNcYOWd%%~ zZ?!|Ly4n(RK3f+qhZf6c>*7n3@uq%joVVX=r}#l!XiE2cYyMz;#kGGw)gNl-e}!>9 z_p^EQxoy6fN1y*p&G!4L{!qJ`^Z#=p{^vsc&lBhu6yPgmfQ|j?+8|>MUQ!=T&Yg!2!S@Wem?hb zy=8)XtuJxym-5Y~9VwSWuXXK3j z5b-U;b>(@*SJo}^xm7(0y4;m#f*n;Mo~zQ#j`KAHYtG5s($c+#g}ROnp}lixhm0Ot z{bvlzADZMJO8!4HT`z(!SN&hyy$4uRS+_pUs5pwEgMdiW5d;JTq<0vVBE5tVI?_Y} zp@gPEz=l%8P(lYKKmq}RlmtTzC`uO)QjmlqLg+=1j^Hmdqu%-Mz2E=d``vH8=lNeA zp6Bf3?6b~ZJA1FQ&)RFf?`_pvvhtSJH4QiVQmHB@i>p{nsZqZ%r9!JNhn~6K9YAJm zGzsLq_@=;L>!AKG=}_fQgcRVtlY0*S`w8*SbU}a9^{+Yp;6Mw9XkUL)R7we%&&j5y z;CVx|rc`Cu49ANO#S_dndxhTDAV{vIG7d2KsP;(QnpL->kY&+XBU^PXwYn$$g-^_47G|RT{`;W^~_=j(JudUznzQsvz)*{5tv#CJ;euqeJ zrzNS5-!@n-DpMmzslE<4xenbEyYs`5JAb-{-F+>p(Gtwi#y@-|=8Uh>MzBEWt~8+w zS&_xdJ>0jvrTNO?WhiP!ydw~kgX~hR0=sg_HO?uZDgDJbFQC?)lW*Lq7U*P9ugB-Jx!AY3MH89(YE3Ba(T=uo11 zbD;mcZ1BkN^#ga<{@k)ZHS%9u{00+q?i+H9)zKw}Wc)W8JI*V!51E%(9h-z6gg ztKupm0&8WmeYDShVcWiEl!O~?f`4HHB;^<6O$GT#^Iy@=>8#3?M2qsv=X}} zMwr~J$?S-XNr-=RrFXD606OBml z5BaC}t$bJVFP**o5Rh-=OK&u?3RPN=(%BhXm|Ybj+le|IO8_&=39$6n0Ch#OYQpJq z8g7F_$30`lDb1>*r+R4agi)p%7=xOshpXKs|MI=He-!?iX-NN0^gsFIfrI7Ghgm58 zQ2K@b;oksPTzSBy749tD(gW|~5Y6Jy;;kPrb4%;nQtC>(HBwunAfezkQM^^PER@i* zuxYx~8XWW66WD_i?znpZ|Da|t*NGngz{}Ays_OjUwPp{|ndtm5b+=#kNvuTxbC^ph*KhXflU6r~jIS~}g*oRlMzNuj(zh`&0d&ekIc9AIWQ2BHOqs(ml#J+9^x%kWUWW5OX4R*19T+Kz2 z1dyyH!spJP`@gV&{_gwzY4;gp4#$j2hYJ)}6{7uC+9&ICY%%hIRC-TT?8x~%zBZPT z`}+5nPVBGiV*@Taz}^uj)A`94$wG1(L{CZyD>@`aAbL%!tvWH;NvzPXAAG;H=-ct1 z3H0zy=3jICNdV()#K|i%lLPa?uwY2d5mK?VS4}>_dCRRq2o`CqL^~(&WN}V>ic+_} zlCn%WE3-Xa-?wnIF01wCxLWhLniK0uZJ=$?ZM_ANMo$#QohUp>l32{C9#a*c#{`YV zytnIIL>5x0P0aMi*r8^%68yi^^~?*YOv0ktGp0i z7NE>iYdMu$>=~Q%_z0reTU#wmF}?X@b784cPXWQ#SBy3ubeqp1z@98-VwW~gaGvE= z$&Yc~(H6SwR_$0aCxu&RvaHzwaLQab!oAR~|6A&MRB$p=1nW>fgxWh*i!k1mxdiHz zskiJ1&CsBOy$yv=T#ZdtZO{)>8*gwLFVhNp8u!=ytp8J~-&W24Al@QLC;3?)^d*c6 zH8HMJruaQPPu~cxgq}0X6z`E=RSg7wD3A`S{mQ2I^jqWp(w!UNmQG*2uxja=S3lmt z{^#em=T;YD7D4i*TcG3mU=i7K<{TukC;tkx#c%5VHOE7opi9xutoqu}i$Rv=ozEr; zj0Ef8af<1!s!Pq|VL^!~qZLy5g@K4+ULM*l+u$N!hR(DCy|28O92YZrJ-KtZ+W*GprTY5___nF=ii|bK5zKG{* za1wp|GKl!R>4Fp`#1Arn6i3QU?#h+;?IL=;^EwTlu^<{&N7P*bqr)C&cg&v@4j{I%EVk^%#_Xpif_Wo8 z0HlK-%hPeR-P#cxFS>uPbroJBxiu0IP|!8uA5@d8Ychry zACWk;AXx;VgPFG)tSnfNV2YpCIJ3KLtJ|lrr(!8KrtQyxFgDzuhw5)l|Gq){PXJi` z-W7jIfnyKzls3zuasGUj^2z#3wjHDA_~|K?J&rBhC49eyp$A=)ZXlW|0{W}~M$ppw z5^c}W!{?3-1|!M|TrfUq0*sd{u$YzJyj-SKp>fYw6C;bT!9FQ9a(GHQHIpgC3pnLG z1gJETw4(|@J0ePJVGw6At#q8AO(n!wZ|3oX#o$>)xX6Z7ynVZ9>ED9y{*_UIg1ZA( z`{#UR#70g%zMV-^(BOr()nSqr8(2|+;oAv878HSumk=6F8rV$RPACCYN~LC3O{!#i z2M@r!08wh4^sImuCZ{K6DK+3>|QQHKJmZ! zR`JhAF3_l>{ss%DQ0gmK@-IehtEihlZR>b!16a|$-|K$$ekDrgWzYJyd;-on9EtJP zHSxt>y)izSHR6+St|Jl01qv2y(_))>^8KbRDC zs+;}vCiNsl8Jg40yB((dks{$%v|@!TXsW=j>){*Ayc4zHGTIDYf~z} z$3XgHE<)QFeH{3#?g?x9p1tj>*J05N-SWR$?Gz3tX?58Bnv>%Uxc@F25fWI{s{Q z>~cT%-$#`Cb5H#9%G9c7{f?3}SjXT|pMK^@IVG+I^n%MLzSXOaL&ls~fz9`+uoldrDeBS+3a&2(Vi;?zFmf46UOgy za{EdL6+aA(-A}$b1vZttgJLM}D%GrO)3R5CMdr)v%dH}2TAB_A!tExLYV_7ga8q2j z6B2#vYF3sny3))%b(`%cis;hmI`0OFY@>ZT%X0=1$Xn@GG}k-bzoj|*TEdzi4>2ZH zu4bA=xK019f0&J}yaQFKl!HNAUNNc;!NiE<6y?Dda4J^ZULY@3`RCk4ji}{+?W>I2 zQ=bHFIHqw7QamYzg?^%@23Lh^tOjSQK{4VR#D{9xaT*qmifvQ-|2?z#e<%KIfAgVo z-1e~Nqr8fI0j4QlX=G!Qd;Go4nTn-)0*zA_469e05*o&tm$**Ux1}h;&FXe_P{sam ztI1nekT-i3^1b%sDkFp3H6jvxsNwS!432{J_(%E3xqtw{t@+U!^-T#2b9J+^w+i_l zlC#m%FpWogTlX6ar~`{Mfdx$=`U;($&1tc=GEzQ>R;GyG%y&ny$IEL`1kr} zRPR43`ofl~OMK#`(F&-T+Wjs-Q=c|6qWY<$$@BKevD3+WSN9h0rqA)4*FIiPs3egB zrfOMH;(U$~H%EKLcT+yyRDLOhFz0}UxoEB2b;JLv zMMG)i@#J*1p^1`(rH2A6I8rnGEoBCN#0#`m3e6Fx;a$8@J4LOr2Gbj6 zB<1BCQwNAxq5-P^vUg!!b<%y#;Z7>Q4LN%r+fk#Q)BHyFpF zu)Qwr&{<>(u(Z;(sre&DLZeN+OmVk%6r0O{r!;!XwJm=?Znc9m}9t-JR_ z5AGJ8@EgD`G^thFc99+Z0=TBXWJ)ioHM;#!^49DFV}4tn6wxs52K_zb0JE79nbT~B z_AJnZYkzL_U-{6gj%WmP2s?Y@8S}Cgcpz*R#~XBPcXy&WV6;umjG0&AQoiOXfbTQ& z!CUuwW)4tK=pIjw`W#?>IL53CG5k#Y43J)cPjhix{d7TD(Hi(mP2&fub1YS*UUoiO zj0-y9GNT&UonAp34M^W!8TG!`x>d>jqQ)%H^nO2n2~|;}V}mmbxfBYy)e<{ZSODgzrH)3KvMkRfaU0I(&qwCws|v%1fuGW5cL@nqFCGJ&hQihcdU=D=pYKKG&0ga^}? z$!Zf!GWoHU<`P3#w9ES5pphTSk0;RZsL$Ki zkh};|dIGA8qM*?jOTPL8hn$z43ipgOwl%=I-&WjjHOS#{LJEwiXePr~b@BvfF!t&QuIsl>K|M=LB zb@|UYUkd&%>C04LCX4h1QZqo+xhHr#ARa+hclThCzTmQ(og_H#a;Q!s!+Mj%7A}inPXT^i6aS&edTHTNz;i-FllL& z`oJaT>=^m^Q{O~0SC=_4!H*M3{W2P3FctpHs=M#Qz&Uhd@dY68%6=F#Md0z_lM7xY zSxnzGnS88%Gjc#LK4D9L-=nwV22;|DbT|fSDC_*@a-yp=V({bpG>a=4Db3fJw|ikX znnKuTQPBDhg(5+caLcDOG(6WH9|#PObBNE z(fVYKv|6{Mi|f!FNO}NLncFm5+z3P4*rD zBzzU^fE&IcXS-Qmi3lCu6~Q8ck*3BHK8l2FlF~r-_rnXN<9&70qPoVvE{5d`k{OyitFE7&>TP1HBgzo z7&mEcnn>Cnd>mEaCFxJgasH0ZsI|nK^?ZJ1SdrlyOuB-G00r0xd z8|xPD70gJWwh=2v=km!uc^J4$C2o3$GWJneJR_AuO^{RR!b5R!u0*hiMBLHY+_#=I zT>P`pU2|03B)O}tPWu;5skstPTtz6viD!E4l`W_UN}A8`;8Ik3_@N-ntw1V^Ur9lE zS-W(SRGto%W7k*qtvWg9kwvG|VWBXuvJ0`pRR=HeX6NYz2i!NVnYnU_TfDsd;h~Xd z9wjhkVWK9&G(D73f9M3vI2N50E;T>R#KY8QxL6Ff ziJO)?m4vs9B2NS&@I@O|;5Xam%FnOj5EMH*tff>;c-!&|q8AfWhlwpyAaTVf5L7?% zsDh8AxzquFwej=t%}EiiE#PnaApYue9s^!QI|NzWg(+Dp*fGxe8&(jmX@@x zYZl%X_MupXDASPf_#$&B=-`uJLNWAX+ex%T2s%Q0oIj~H*&E%^h&B(EK2;Z*tPA|i z_Ag$zu?N^WhpZtVK8!Rx03|n6C*5^Du9nh8vKpQjkzHXW565hGqo)_v_%ISceHp)? z=A^qd70@NyrXx*Zd=XI6Ra0s0GXh1{gB77F(Wa%vm{4bfZs`wbBU=eK&0-56i_)`0 z-@3G`h>!-3F_R9q8~Kk^m|5hHywe7HNhJ;__IRh953a6AzegXk^rS!xYLPs@UGL(E`neha7YM#SFjJ z6L$*NRT_7^3heeSe|()*O;2`3`k|tB_IVu4sOQ$b&^G)lk522rs9$Z@nRfe-`S93j zFB3}aHasYdS0&P%``yNh_(g0%;|_#zF_YbT>sUo(AB)(JX6;w+#cSUtxQV)%Af!>9 zX`r#7>1tH7P^|AQ5dFHTu$F9um3U zrB(g;-p`bQK?w0W=!JWkk!^duH+k7r!IO@lkSy_k80% z*%w{qGDOu!MYH$;nBR8(yD?(L!{;x{N5k@I1Ug-8aI()<*FF0~_IS!YI>YBALlIHO zo3DGU6#kz1{LKJ4*!O|HH>T5rg1YHRrv#09KFq(6Dta3360WI9E}|X{7Ijo&;j7(2 zwwP6&xPAd~;T_$qa{1%Tw5;-s^<1Q}6;{etHQ0n0Tup>8ig+9%(?+>9uU{NwKVFU7 zU|d*fTRGV!>VE;J$0+Zk#$p*ZZ4ToP7f*2?%xK(d9ei*{a>GtJ0h`t^tm$|ayM02c zjUYd2n%e-CF%q`via=Dw*bj+FYuiTmnHN4VXbhJxJUs&gv@oZ>WsXMtiX$l~}0p&@ou+SIfAi!KWh*BwbRjwIETH)m?kgqd=i*D zP(=N?O-m>sEo$%!oAe<`JU#LT1EDcsBTp$^ow7Qqy)g4W`$9Ns=!{Q3zFyQ9SHHiC^K?Z(n zA=h!?gHyX|{Dy8<agg-d}O!0iOO&xh=SPF2yg_liO%_$N^av3sBvk-!L{ zN*3Ow$CH7~94!@4o6YOi8OLFd=B!PqcmXe&6xFi%@i<}k^HIG}Uvw1}VhWZ{fGOsT@Q+HRfo79MJn zZc)a++xNIwU2{9-{_vS?!-4arARi8dIp=L~BWJNiVq3@S#Hh5SN~dzM2&a6Lye-6$ zXp{S2*miUcGNm}bu)Vjzb}p9bwS9O-t=R5WT;kuRsHj_I$9W@TobtwJRr*Lbl5AZ2 zudqi5pE<`Rns%gGU^7F{W|Jpt=2&FFMyO6_V-}$owHN}dWuEtSC|#Q#G?We=1EX#q z8I`?_Ru@J~0(p5)R$}5G-(1m%Z_ey7opBTpp}eRuI{tV4-b#R|4`! z4GYVzE4K8_6S&*UlMfyV&8x0Nn;84}XC$_5IV?syoF}!OA^Rkl?)okkOj&IobbL&U zmkWBdN_QvgI_#hcl&HI$MZ4`^*w(Bod3iH#l~o0*$}5D%F*XqSH?HtytZUH%3<8k`8`a#D0_2<&UW!H}xAd#Et@lOcdMsCk%I;rncRS0j3 zGVkXQf&m@r%WG*`OgI8e_-^(Ir5DbF{w1*!${pHUkka)!5G z7Oh6s?0B-C2@&~IPCa9rrsA3{<*vJiwwoTiDmg>BktSc*s-j;Tc!NI`p_d(lx{h$7 z2H26K!kj9jiUQakIx`%DSz1or|8o(!LeJnV%gy=(n&~$sv$~;{c7m)eFeT0$bP?R? zY1NXl7%V3#)b<0Jy#1~5^MQte&|A<*@U);+3Dy+qb%H%QiPV29elngweu;96zr~Ab zBXJXeLs;PSa>icfV`avy_vw%IDPO@Y(T#bGy?0;Of=17xp#*Ci04~L(ZUQE&0#tR2 zczq?(xM#$&`Yo(EcPMxAif2r5qoaHAJyptBw}%zLjgfhJzpkH36TYcDy!{Gn^}!QS zO*h;wZr8E#5`(j9p=x$`g+29J;=;p$^?^12tslSDJ&^SuEB+vUsOKFd8WL^0^lR8F z-yPrW`rCJm?kVC0T^X)a;)L*1`W}6+a`fjD|A4?QVcI=gQHDK-$c{u2sGa!YZE1t! zB(cLkz7etZwlz5eQLF6b5grh%m~K8pq-p>vcGUCn{PwK8>U!1J>u=2hJVY?TK#_=) zx}BUo4pLXW$f}%GUxY&DLRPd4ThCG9Y?Jjjx*7LtC8Gu(t-L+@{^ekLli#bY8=C)v zDt~MfLc+Y{7#H^(1XN+MUdTcCjgf`op#hOnLCRN#)f}5pn+0SHw%{C8p3BN5x1OK) z^(HG0vp2=BmPB0rL=RdO zOP)AmL8cRQ;wKWnupMc*I^zXJt$H|3$~PVrAugvMIT=4O`3fm%m#(ysLvoBOF9Baj zp3_^GS(32!mVV9xuDid0?_gN?YUecO`32`Eb?Z{beKqX{$3`WN$$Vkk0PODH`Bjre zEFQ6#S2r60>vK6Tg1H-xJDpGFtHhcuxNJ+ls5+T2aCW;uI;p|lSBdGk?$U-;dA4zU z+K|Q6Jz=)#vVErHYNU}z38ewn&~5kl5+=?bmsl>`f;Hb(=K{Pq7=xTMil`$yO?ypA zg6)!%h8Ig_ZCG0>4uWXmi4zN!_;ONC**mYJZ&H5_D6zwnKC_9xP31O zmfbY={@_QTo@v=njF?F9e4jN@NToXO=H}TPZQa0%SLI-Y7<6~Mzq%7dBfbv5B$5SB-EuLsdKNyC z1eN>c#PY5r3s)-X=Il(W3X-D#Qa9rf>D$EzkG(0`xqhbW($^7tF!0)Zrz_s=bKTcL zeaHL1QAL5ih@V|6D2Q*&K!HK?Hu^`qBMQTS7gzPWKr+r<6q3Q?>(;xq$C-t{OknwC z8U|b~90kkVSfHb;E9SYJ+0Q5DGNcw+q#+~?a>cP1oEE0jQNySW+GkXG6|Ggf+o2e! z-#iAlSwRNuv*F7BW(>ZW7Phnl-ruatHyiZJxtILk%+RrKrseQOgKyU5{x{RI&-8yT z@bot+cZ^ap|KJ)f!XNp=R4K`Z$8+R_;U(Ae;QFnv(yA&sM)9EjJQoGV!DQ5`uJ`Zu zb_qrKS}G8uHfYsfDw)9@XD!`^!rQe~3U_pO;9CdJ4WG8bT24hmng>1acGO>pWt#6~ zdNt8CAPsQo(1wE4gYzv%xRT6Q&c3m>Thywus*lJ%J;~Hg^bgOQ(>^fMX74kHhDf4M zEABMjIasB>l4tJA3SOvKC}3?MDF{odm3419Cv^qXUyU150V#tT%ZP7KFjCFJ7dCU& z4%5IFw#bZKi8TrEo-f~YVK2eyx@_10GyL^xn*q}zy=msKpm-XN0N2+;c|h}TWVyy1 zi|C~41rXo9NXQnA>4AodW`<9f@y!QJv$mqvqh?M60IZzJ^~WIr(~`VTu>J4k_R?q= zm4aHYWh|5uk;02fpiDI145FAo3kDR;9Oz{+Q&zkh#2DpJ;k&+d!UfC5qaWin)CesB ze1}Gp5(=wkiD5YKEa8pgO=~PHF9Ttq%;HkIE)h6y<|9Rt- zu33qu(J)wvM2pA!m9Pf`t8B(<;I>oERMSnqMkoI-Z1!2&t;fke=u2p8)(gIul->}v zdB!sa1`Hd{*d&j=$jZ)Ya}o%z45@Nlr++$;%5pCP zRVrt%;aYKSY3j3SC7bHE zmgK%y1^$N^f-0HP4@|{xKX8F>r{JPhy|eO{_)6SG(|Xn%>KjXtspW4&D6|f!;Ht?10y%gE7 zmh8jS#lR%+b~)=9X{XrIvi^8a$@N`VAN#tGMNohb@SS6#wx}Hq5vc+8*y6$he?~JABWq- z5qP(kv%A_M0rK{V(Mz@?e8qw4s}%Yc`Dz8`dTamuQo_UVkdrcU!rSM;C$^+LqxysG z28Xhow$nczdR(M!#)%kil$bg4!(<>Z^qfPu}@}_XXfNh!H4kV?3I3$k@w9U>Q7V zCzbW3y-05cnk%*iL~5zt z&+`qOtxh6Sd?G~<)kw&v!NJklut@8h2DiMlBX*ekF(+vXtuY^E8AbQbko%Qj zCl~}lo*Z^buG9~sqcjGDR0ChitFN1*l8-4iNX)0Pi!2?i3#Ap(W*_2{oLA2Qlc8=J zWG$$_&$BfXSqn!>cch$BDMjDAywl^J_rt5bTG#TA0-~mOA}ViNsILAbfRtW+O5Qe< zGFXLvA`FgXnZk-cBHsCge!8CIYSW!mOdsVtx-`wLd&T0ioHGNdlR9mfy8<3MQ9`3X zC~^W`Fu$4^HZ>N+;tk+&X=AeU4VbR9tQ^i0wJ?l(oFlq8oltN$5w#*`fuCmB_=Lf9 zs(cv6xmt?Ehl}h{YX_@SmozI8oDcy_@YUxd3mpZL1?uA78CGU813CV&Z4GlTf}|Xv zESE@H#+s(9NjFBB+k?uKf2sINq5&=p6zeGW%&sVz>=R0;o?Rh)$R4|=*rn!pUvS+& zxv6IG`Nxt$pS4`sw;D{#0V|OzjqKzoX4`>Yp_4~q_8w9}CMLEUD#nP{H^CV1^SR5f zV&jUXo}8IWMdCHW+V*qx_~s)FMMAI( z9W5aC*s9seOCa=(X>@Td#aYK?U$4`wlNwGXHb0x`LB0A zHANfyJ0V%r!G|B=>I5)mw=UTm1*4M94fo-ZDcy@BpEY6O+Td>)R(6%uqv!$r4oQ3Y z?clY$_S#KR=b#G#ZR{U%S-!!=5B4wr*TG`)xL=FoCvkBi%Qku)zZSmJA@Bu|(pGa! zuW&-lYVF!=_cEUYsk28~IQ0_F1sQJ`V4cdXgp<;j_;Ku2mNX|p8O(b+EF?*w!rp0Q zP8!iHvU=?mYQCO>(=4>S3_bZizt?UQPo;OQB22>EC{WxAJu_$GABV8s@}c0u4ZHjZDjZ&5QM(!zcxk{;EV9jq zQkQ995=>Bmd4h|wQVD}j{zIiP6-SH4=9;3sxJpYG5q0v0J{aR$sd~YARkqA$&WsdT zpe^Yvtzf8jba+SX@=9~#b|=(A1l z^+k9BVfB)IFM0CzYpob0>Z@E2M7Fro%nCc)-b+oY@@;1WcU>PYw*q*i4wWVvn?(-6W z@sjgicRpqT5#%Hkf3h4F6*&HH3-Z)dwmrQWr|&-nm3}(EJQ8d7 zqEqy8HDoaTSn(4QC`_(+t<0>tzGqV3pB4l^L^5HDoS)BWl$yOICHv;KObRyT`KL@7 z7E>I$wBc?#<1!vU-(|kQ>8y`MO7UAuWvX(KK&PD63N2P2IYdBvqr>MjUx);PZx|F$ zUUvw~0?#ajEQ5?wDY$1T(}d2cO5@7VG>}PPS!&K~E1(gip%UCim-b167~--=O%AOk9qgt|Arz;pU3j)>{^@pixT zYUQ08tW}IrVR%skoywRM4K>EsNhb}?^*EzeQ5Wr&Y}RnL*<4FQ~y2nv3jz;%^tKc1y`f1PKS!G{r0mP zEtb6qDrYWkV5Xq56U%3n@j`p(KJ$v#G^d=0-B1E1Oqi!17Y->39PM--J?!&j;se%6 zrxB~h4S(%}Upg2ra$CYsPtR1u?Wfi3=S3E~s^+Ajw{HY z?tmr_U#^&kLz1U|FEoz}3;wvA*yR*_6Tg^56l0hJw4JjH^&YyAQ0OxPZnwQ2RIG*1jL2QBi)g%aKjL zicbp&y0^lH*BinNl0WPW)M~8BP}wCc5m{@y*lsJSw`Xo+)Ez@ z6)tj*6l3c8i=XxmlSTAVBkH(`Md5MB9^Os7`lvn60GDnn0{^VBU z0=7q`_9dwDNXO)2Vfp~s8eh@2`Xn@l9oTD1weHvN=~;8f&lcBn~LA>et}O z=pBIu+3WtddIsr+{!Sh+sY37lT2Gq}K6jC8ytvACioEHtiWH8o{T!Wt_Cx zZpL+Ge;r9HwYjYV4r6xrER>i({-LT9*wU{8@3P1=Q|vcXR>K5>Zw}~O?)5K%G&0Pt zQai5*+85yTMoJohQ&Z8U+3=K8k?Wii^rsPY0X_$Zn==j-_r$SuajV#T4wG-(Ma5Sj zM)@_P$ojs<`23z?H0S~Z2#g~D+wQzu#Xnk*3V;Q)NBn{IkwdSh`?F)rF#lT*(4M&rx9okpt}Gro7aqv4(Yca%t91sIknxn5&= zKt?4S*!ovd3G-)`h|JUJeUbNfH*TY*_nY+R$UTj2TuO%l^w*DX46o!q`7RG&Jafs+ z!l<4nLYA)A(_fXB^GBZ7{!T})bLme>ADKPwtdx=Ewg+9`?tC4&mUwz{R6g&JY7L8r z9tD+9p;YdMKaNJBd`mmDKkfs*>yCz!gY%d}{v58%2II4?4%- zzWIBzYQ61|+k#CJJR#3#8&LEqfGO2^I@fqo?!K3mE-x%0AqNh|wa3S2*|?nvVGc9hQ!to?+WP&#|6K#XGw}Afd=gF;fY3?s_AR zHEq(7(f1^dQoCP-9KWzFqPKtCknm9-xUpf83-Z@mkSif2cl$wz`Si*4n||Z`OLzNz zVLLQ8Ov`BTs%)TXUN1U~l6-H2qatVm z&CJflCyz{g!rm|x&)~Ec!8kEJSCqEKsXPbjNO>YEFjg?{fu)h-u_w=IZ9QT(^X}Rm zvjw^4FcudMx!lYX9krP^Jy0Icsp(!qwiVDc6Soxz^MKqWb&+~pwlSF0YkIAj31E>+ za_R!nqUxqM`5whG?%R|LS34R{64c6d^ta4vC$A1lhk1H9jg*+)<-S>gA~^Y+|7;}^CM`!!{3Oay^$Bc+c!!#g9s4I#)M;bZ@wBlCYX(nruw8Rza|LsgszEdNos z>w}N4u2I$_n83(J2B??nC^qRUI$1E{Gdm@&Frnu>AAa=X{&3u!E4_)P$<&5-_{Vfk zTFV0g!u0aAcEY(k0tFeCyCr5zW?Mg&r0ejp(yK zy7li?9-lay$Il#yy*97miYxs5K=_f#dY$gC4+9n4h1Nsj2RoTZF|n;?waG`aCMB9f zAH0V)T9t|2rR}un%>10@)uT7oiq1Y_>olWcg~=8+Y51 z1rz;pnoxXdN9Vx;kJ1H9GP=jgZgXg;%64RRS5oQPuUHLEH$%;dx(!>8=!aH2`=-imX0jp?wLLd4Bx<9seGxJ;hS*n0|xo-;#v)m zy`oBAYjWZM`U@M_thELDI8CjRiwps&oX@F-;8bz#?A~FTn=b9jmhJwhNrq$b7%0asp7=b`n`3_Q++x22bVUQfNm5U6_##`(o>a*$7j}z zb2F2ZOvGASf`7jS;NRcEi=nyml23X>^pVhBrT4%ry{=-MImLm!TSptLn{pp`^`^U5 zouO$K#%xtZVg!begR-h!z;i0I{!v#a=`6saXQomi7(Pq$cMMz}EVs2@a8&)u48~|$ ziODVlP$wW`DkGo?G3ML;V#}}nwZ%ob9;(#Q$vzt}HZf4Iksi~$D67BkbwVRJqh5GI z-!<;Y=xbBB#eXN)F6c6_#cox?aM!MWcjS{PLM$$^x(TOx-R%BEG@D zUJC_LX8Xxt$79NYZ|ge`fK>8v@~GWX;kris<}EJc)oP=oaJa38YCRM;6Jx<~rf@SS z6yG5D;6`VIT?sgl;Vy-~3p+Yx@v6yW*I~KKT47=v+;yAK*CB=-f7ho1^v&)iWruME zKSZM0uT(Ie6yyw;u$$&hEH~5IcT1enA|g`Ea4e|el~0Y8{n-qu^TR?=U5En~Rc)#GiPwoo}Y4sM*AL%vzC0bDe`&f zW5_o*MMTugBO_dh*G1QDLbVethp_U!euOY>4ZS4Wuq%U^UO*71zBxz16^9mds50kd z0jhDX*Sgl%U#BC5VGl<7+cYoPSbseHI#kb>E?LW!TqE^`&Cu2LdO^;p)f<~8Z+A1? z^@|h0kz6uVil>nhnp_|Kd1CZz|1Vi_O1Mh&so3PfGXICjoWb9eW$Fbf*Xa6yeA;8< z3C?qfIZ0JcIm{FRXvRM7&My{Gnrq=*9ZQ$!W!F_^K^vX_8%$F^VEE=VLmc9!{f)YqC! zX4_?LYAtRt9;(~T2o6HAWGgG+#j=>r(2TA%;mF7gFqi}0RuvA?a3@nO6!I#~Ut43sdgWI`k^j?f!D>P3dL4N9&HYJ7&4b>6bHt0YNOQ2GHv?a0g{s94b+lu4# zo*r#u*`Szl+GWSp#tQVa{%-9l|3gfVVsEcbqM5$FO*4v5k`}VRFSf^cj$Sv&b!tbX ziW0iE?lV&=r5>L-KW~JuXcdYll41V(mow=Z)$2=N*z$5GG_G=UiD!`N#<{q5L@Ja* zDJShD%V*WMGawKnd{vJVhUFz&w7HB9?BB*CnLs7J{8&%KViGA+$`1zwvS_eOC9K}U zE^&|X&~P#;vG8bpZ)5u<+s3X{IqE+I3hE9xEJF0N!ePitTi|< zv``lt*Z^;y4*(&(mst!#i#$r2haExeIwRl0r?GqIeFxqxY#8Uja!PHDBnRZNj5NVM z7+=(?bbUFd5u<323rhTye7iVz)^c@U&(Ht4U-uvTwrZA0`p|8tFO$==Q%nZ79j#Ud zs)#x<2d7zhTSM8M8T-ofJZ`o*%Juxl zthv%?(z0L+DX%H+8r$Xf|8@y0hPiLn1i`6&Fux-HF8G`+)mB?!+P!tnJwj}38U2%< zglJV!M)hh6^~H7;+2@Bcedzr$ z6!p`TR^H%Qa^ADqBR1UKnm^;K`n+vGB&TU1CeVB*eLmbV(Kc5b(rK%`Yz^qJT(1>0 zrYSp>W)J86Y}%@ER$8e$)VARXg=?0h^u|n~-D8L^C>H?~CV_m)5w3!CxNi?yX=WlgV%d){szG+l>*xO=j(1;SRs|VEp zXIoDs*o-xml)oiGTt7Gxx=eaecnN%TEoL+Ms-rMWX16zsOYd)>SlLcXAw%v?8S%rOTfeL92T}+ZbFy@~%;_WrwaU;7+PWNu8x17or3Mv3j;x zroLGx`@YfX`+aMY@ue*2ZN1T+%gO$*6bGiO)LEHNffHZY7zAC#+;z^*ZtfE7@!;8z zHQb6ibZTAEf$6f1^fFhktYgOacDCl#^wG!R4{NIl(pr@Y!^5(C{B+p1}FQp4Sn^VToj2#S!K6UMrBL_{evQYExO1f+=y1nB|^ zp`#Rm&?KQr9UTGbg7gw3K!AhSa_{Hw~zW2WCe)oQN{ntOs zCFe;_-DjV@pL5RMzYToN%TXOU=~D`?M1N!&xSjlUK^?&?W0AWbnI0#8HJ7wPvOy9n2Z_8sk?AgeZ;^sk><+l zm-gDnyIw5zEv&k`1oC&;0>tNm@5KGio8p?NIsBpXuSTQX**JVT)7Mf!5aw8Lh~vSE|S^kI%HdS*+c)YIa$mwVMV|F z7PwK`gT(5O+A{FTkLsz2yh7g-LM30tZ=}p7Fr9(_qX1HB9?v=%7l*gl%rOg31Y7W( z4%8s4A0qPTI~(^R|;R&D$k~9(gus{4h_wtBqsag zOHqiMTs)$s>v7}=QkTR6ADuk~;p;s|n`?D4_jgc1_aO3^+zSt7T>Iyy{|Cs(vkqV49LNS1a(^mfXxW7T$4S{LB6X2(JT>-sZqCRtwEC2Pgb zfbmB;WTI>%B;bjw2X0cVr})uzhlSHJ=An-Z3JMd9LIy2y*bm5InPA7YCfZKW9VdUm zf~Xy^tJS}XYqO-+#ARRKD6z{DK*-}n-J_#|EC57 zzc&o0#6S0%vStfg@Bfnh@c2n1lQ%ocy3X+$wn3adIY!DCQ^6nXC5*Mw7kkxB4A4CW{=G^h$0d&V=d|*r2*j@ozt{*#0lZqC4lsumm9sT#3ZHhz-1Q$8nYuja z+$-CFHnga|Q26o;D0rUDBkf`os9``x+^l`}-(hVw_JmgEk-!i_ME8t-;$K?Xc(thJ9VKqZ0I;cs;KjHftJBa*! z@6nF?yy)5rf;F8N?ctbFNq6=$>*PQfOii+9WP>R62FD_1^K6u3>J{wSFGbcgf9gas zqhI0#-okl@wVQR2 zlg)`hEG0#P6@7B1-$jnNpOnzPc<3%{R9`R6BjN=U%cH=Fo~TW=wf|m(veP3&uJnP` zIxUUkhL8>l;l3T*yj2u@wi3GNprYn}T90KSMKo>}|7_~&9ESAgjPjqkEU5 zQwIk|*Kud{!6T!#-#O`vas17=-eQv=g#&m7jG=OCt*!jS$eus_(*qj5d#E2iJ|#K7 z!Z0_@4sKOi()ju5#CKpWCZ@00T55&tf%J0g;FSZyyj`j9RA;xpIAVWqHl{0$JX$CL zNe6SY8?@ieN-6Mcg~}{JlrPn50MI@x^=uKR1kOiv@?b{j?1Pl^SB}D;&JEO~0iEnVK`+x`-C$QVq5bYhCc!HUI3wls4+%4>2Wnel;mpGWIjL6FWI91KCZpU?*L(~8PONEx4E>P0m<(8(8!yopQvJKcz!cj#wPBJ`tZ8+>1xmud;=V{L z)}i{AQ}B$+U{rNQ_fuA-ZE{*#Io4S!-zr!iljRK&L9~u&c`&m3%WaFag&ujcon*nd z*O@3@n8{2eJCp^-XME1IuLaM3=yBC-z%S42vS-elH z9s#=RC2Rv9PFl>Yg*({#2_KX2O-t+5>7fZ5h;u)YP8l4Su_(+~m^iCWGtxHg1nUQt zY96}WRVs%XOywkU4a-I3|5AonldkW|aV%3=6p7oTQ+=QMoVQ$n02!aU=`SD__ZD83Btf}1rPdy( z5ODVfF-N$Ye&&7_M&tsjSSw7xCB&?t#HE5|h}fL5=(*0l3v_OE$6Z7_iFcY`D9V58 z<~KJ`$n8P=vjHET2tnucidG-_GqdwV(+Tr@87p)-?7^8h>S`CY7RMf+hRS?AyamfY z=r+|4yVquLB3J0o6v975g8pAbd}J~%Ru~wyqb6ml80Z@Bq{qk;`R|$xRoh=;LG2hwdP3yx)gSBho!yY*b~Z zySdR}GVCTenDV9SwMiPkn|1beGTz)RZTBgcRo-(3Y!2k*skZXH@ zpii77qxhgiXpEkAzmg$dMvRziLOJKWQfsGI1ONo*R*pQ*=CX_#Ge1rk1Y6%iKQX$o zoeP?EHbd9_!a87n>pHr8SdfM|cOf+2{jR)JdAcN=|*ks1})>}YAjtv|HES4vsFcKpu0;Q zO$9XmesxbHaK9xajON$>p-Zyk#t~P1xV(0B30_eSS?i&S`y zo>HLCpKQD)TR^fsOssh8@TY-a!%0Rgo3?NHn8s@I_JS@4uS;&{XEy!x@L=e#?|J^q zrvu-B@7aU&@rjLwP-RnRY(&x(F?#qR#cbQSc4#fAg=$fNc}#gIe3R$M_XfTsetj1C z^?X3^M+}SPMW>wa2Lzw;{R&*!7fXIMs=({rzT$HJWYktoaHQb-ing%fvJd8m|DF)u zVQhenT$og^F)=apzzzoT(0zgD^!FrtopW)=N2WUK-mRpKvYqU%(YD}sWM)w!l$eOnL>*(!I?fzFje|K{tV)euo!0L+Idsu{1L1lw<2HGpT z4(y4{PCAQ*@qjIMdyVjWJc=Q?hrivacp*a^>SVp%fV&GF^GV zpcJ+&!4q9htAWVj?1e&vgDTNA6aMj%5A^cR95rN3JP7nr`&!6d)FOSg&Cp4mIhSV-K zR7v=gpDzaf7kBGShYWWysg6Cy+FV6cuwLcNU7YOZpUaKLA3VI*U)}LZDrE%7>s55a zm7C73?}6=kd?=V4tMTJo7E2A2OUZ!rWN(lV=O))idj6Eqpi?0Mo;+7b?WU8WsMGyn zn~KiELI%7qrAU6=6^tkWrq?wXQ376K&~#Cb)!`qie z%_0VF`Uo6*V1gdWg^>nD-d!s1Yz` zRsIJrd3s_9Ck%(}AGo?#DX2YQt5hFon{1e#5!djPuRUnBg58$C`y5$5h$L1Gx>n$J zOm1t)%sm1keQrkgL*?0EJIEHuyO!NZj|psK}%`{ zRB=-P;+$6ticdm~Y?k;Ld9PdLsgnVo0FmORw?$9mF{&mT#Lg;j5^JE=TDyg0xIutJ zPu%3#<&li)J}7S0LcI4h-0-SHnu$THaWzZ%0vefb%zmVmz4;f%MetmUdn`J-gcKE4 zvcYaJ(mbf0;VXyRd&ylAruY9p-S-Lg&hJW+S?YM`<#7f8l-f$+skveo*#i#|Xaw#m z4B?A%D2<+-T+Pl9`zj2fREXiN#tI1dkfL%zDL>oTua0L$Xil^VO_7<1yZ#qPV)OxD z5T#KYneN}==c_V*hM$+;;qpbVSf76R<4VGJMkWeAGU;rV3I@O8GC&V+l_y67ATy_|RgQVx3 zrt2ZrR8(&DXHcEFWBkS*N2{y7jjV?}i~FC2h~t!t{fCXRL;b`DQ^}B8&#Q}B<9$lJ z=Ty4dchYbF!pX#RSYb+^YF5J*e0w(kdlI{A85Fj+vtRe70-r@ak+curt86LtJe}!4 zSIw)-_{ijTX(96$g%)Kia$x>;^0Qyw{u|8)lQ0#3KO%EAgaA?izBD%%REmi<;Ewix z2So#r4AUJgtq)6ns)JlJb@jW*6^qwg(8Y{9T?bnCeU$kZqt`~q_d2$R)?*2B?ZJ_o zsx~#jIk~^4e=Z{WsYv!3aqgr`G2$Z=$5MD7LZ$Od(F==---?D2AfJk=xm;nGwcI=` zw>?z;t+p9>*ZF>H<$cNFdrX{qfDo%=Vl9_5qt)b)I@}R`z3_oN)i#l>6tV z!rG>C150cDKUi0Bu{Q1~NGD>4DlcPP);X9T3#0R0;%^Y6TM#orLHzNw7=3YtLr&(< zLTq}O>~>qTKX+kCl*{2wEB9DdVRZ%aPJM6+?hR$`Hf# z(^4`aM+&zn*qvlf2FFK z#+QlsL{(mwFGGFY7xR^>;y)(jI|=VF27gVyu5Bh)`3pIC6%lb84cp5rL9;(7{(b&_ zbm({(@9Z4Y^hYK|#FGv3=llVm7~%7*BE5xU*FFD&2QwyN*Y@)yT=h}D@P!{bKU4gl z8~s*EX86~E!n&^ZjeeTjPc!+o65tz;A2a!dX~Gy?4=njIlPA8h&yRtA@A+?(NRism<= zwwYuq+uHb!YRvwAeYUPZS#j%9I$c+oSvKY#(=XTljo@H9(o(N3s3^i?>a|WfgSs4j zzo!flxcA)G(cji1`-JRi7$2Be&&7;|nzEaEY zTTzz0BF1#Y;^(RXy)7fi?4C2`URCw|-sqwY%e#J96DwMoAPlx`mh?*-Q_$Vl;IkKG66jE#8t{bY?NHx8l+;Tq)Ez#Ze!`! zO|ab@X|De6r7!QKIfK<$1bR=my>(lvaNrV&;=2P8El*kQfBA3TBo9yP-6SfOGcFzU zlw}IBVb6&%?j=?YGwvB}-f7FXQyLWXGS`SsUzYyKrN`E`sJ~Mf!uB)cy#6d~zy0>0 zg!15R?I*Tj2;4+ zsQb4@-NToDbXRretEAu^{`DV+n=|^O?&C3OC+xpV#gj|l#fD1N*ZRIlim*+MtRF+? z;<>`;`}g|)x$plaQYd{>f1fG)Vp`=#^_xG)@_*6w*nc4VJ^9#A!uyL%ld+jR(J5&6 z1k@sZ(HsGFysH=3(uq%UdEa&~D@oWH8;33OYk7*A1sx0{0j5JFK zS>V3zbT30KeyxgOI_;;O|LafO^D{ZGf0~}!@6+>L#ee0V|I!$LD5;Z6U%Bx+JN*OA z{vUAT7oGN%8ykO+{rr!`@s+t-zUZ1y%>8|Bd}HpBPuALnL3TC(ul{iY!l_yR~T1FmIt{%#%Ppe*{wLD?kL0`Ih_ z2t_@6$czc`QaTpCXnUU^vk2uw5(l^PPphjUM&;pD9~pn~y;$cEAK^=_42VK_*_7Ii z`1Nv(BLBjku=zbCfl`I6#W9<(MLlhbVt9smm=j>JD0j0MQFdGjJyBl+U3;-N-HmMT zpV#0&D6b+oB$2oY%`2!h27%vJ6Pu?X)#SX2cxU|aSSzC;39vXSH{zVZDv{mb(va|p zo$TYXHoQeI^|sNo!BqhF=HP|cG+fv&67?Z@zZozN`Q4}%TE)8*k-loZ%Ri0D-k&Y3 z6a2_jdQ5Aa*{2nvy4PtG!~i+73!r+c471Bk)@@u5x48ZvR@WpY8qVuW&r$AFB13-E zDwNrtuy{^c+MH)UvrPjz#<`!je8kAGOh$V?+A2--njf?WPuH{kBQN>`kLXix|E9+{ z&|H7RLZY}-v!aaPLC!EMs`0RdSCOVa&zgelwO)7N<`s~IB52= z|DT{Lo}zYlun~1z(DYq&{T0#GSZa zNaRgxcs_DpJ_!{%cPzS98tNa;g$VIm_smH&FjB1I$g6r!a20F8p^^~c?_cy9%f~>Z zVuvE4=PZc#9;a_2-Gwry89oF<(auHybfubV9BgY_sI*Vqi9hEdVX}w3ZXv_Ub~N#W z%f5u)FXpXNgkC|>`EiOulDVxn0dr1`8hezIS!5f_E_nv4=~C;oPBI6aLPnd+OcjO+ zk=>!l_b@LJ{zHPw;sTyeZZyqJ*Z`Z-x`FtD6d=;;_C(DSV8W)`tp!%mbhkVl{EjUI zere96@;yH4!xJ&khtj!$K?|@a2m-AN&seuVq^xLH@cLc^-Pxj`XR0{_JX*(nRGf?K z8f$LcZ9d0Wlv{a?3ZH{c0Eq_dtmG81F1a8gGGv)uQWcM84m@WK?^iFt6qyVyxC5mM zAFUEwCay|!$|iSe+6G+2nqmAiWmOUiPd8872K;5dq*UNEua{5k!|VdmkfVA}Yat~y zLj?u(z{W6F`4&QWG$Mq?hn&Lb=Pe=nJt{001D_PO>Rao?l^FQ&Cs&bWAo*wK8$MjW zN4Txbw8+LI>6Oc%;i$yH!+Zo9?e4HwEp-Pta)}(qFu~X*az3I87uby6I27ux^_|Qm!6mL!v8F#7rxyfbkn$r0Q z(@DI7<5a$duKulVyONoQXg)L}z_$8@`|K5zTdU^Kq~A2!Kt)e=`Pux3+vM4l*P;9Y z>acz;aYT4^0;uSXS2-74CVU8qF_{9t#~1`iEI)9gD&_Z;lSUL(bFh`e=TprTj>Mau zOCY=28=ceL(!t41Fh>M9wSmR_LN^GSe-z^_gP&@{9`M0?&%O4+eQm8jyZ`I03pcYxP_hr{XENEoLsafqmc8 z`iiCy^{N8;@znbZM_XtwFKOqt73$PB_)5wzDP#4HVBIJI5swe|6kzfzSKa-K@)Aoa`i_X3 zxq2s{ucp-hkQXX&*0)EVX>ddRXSWdgG+yRyGGkFuo^VJO1@s|Lge zk61Uzpd4)c1f6<*liadoE}~`5W|D$06dWpzU&{}2;z(om2Vv`s407=@uHkj+v3CW5 zIfPP7m^2LO7a#3BrBL`G$I2m|S}^l$!CT`v0pJzz1mLnLzp{7-jmg|w7g$i4;zg_c zCcW+y8nd%KFX`dF!9;se*iJT%&M$~R>fXJOB#b#vb>0L>uwU`Y;*}9++f>!vmfU+H zKIt{z!yEJHWUIE?UI*o~!dV#@)ukO=5Cso#qO%~aB{=ndJyDY4J2S_x7eFF&WQn+n z{5#Z9k@14RXJHN4AhXG#!q-Kc(|O3CS*Oox>`)E+$+V>K-Rw{Lu4%i_F{_I6lM;O3 zen|DFY*(RX!b_o_bl@N6FK_+G7W<>@nfIU{x%hu7d#L88yQa*OKm7tDSqI~x5@zL@ z_NbCX&xQCHVdcjKk0%RZXm_<)(CUMoV?Q=XUjqh&8-8;I{Y6hDPx?hz=h_$hm2Wu@ z=uEa7SxHtEn{`--%I}z4!qkCnuF=yer8+4s2JAjkHSib26c^`t3(4=V5}=Vf=^@#X;Wwd_ro zk4$G?zT0Y#Y}_ifW(k-L98{@)?tAr8a9$Q&_Qk==OiU-d$XRH-uh9O>PQC9*^Np&# zle_f2;Ba?m6*Q0dhnZcEWV=}rg?3t)cCh^ZkbS?xL9Zk~Qo^qOn&h=ZDc}0{uYcto z04f&eb%U8dGWqLymqoyG=r1RG-xqI|&p68#Yn(6k%tqst<74vHsd4~El+D;%cqjX5 zBbq-C2phqY$3F=hE7CMiv|9B{7__enog#zx6=;sovpsmTev=qAtC^y{o&g_yL#L{U zM|t^q1vY^PI{Bn57ElX9tni6f_F8TkI#pYo2GFj%@GFc+OZArm*;{|dYv;PQ6@g99 zh#?tv$^4=v`%5gWz8SA$*881s@UUl+zhuLD*y{b`JE3^(dqp9vlt5N~TLS}*h#9QH zqc&!zfwc(i@Wk8YhVgt?>YBE%J!;W8kbnb0R8tLsnj;!~9ut$I6)e=n`)VlT{=~KG zzYZRkw8P6cpsT9~8rUv|SJ$Ww@Ld`}ByE&8>Hz&-=L3)cp&LtcpMO%5YWQi)o z&`Dmh!rb1)ODE*wa5>i4-g204tCD@#AL}gMd!;~(8WUp@=Pzg|OT1KRjIxL%vB@`@ z`4a4Vd(F78g0)rkeO)>mcray=4~Z5sF!-|;EFz1#D>#^+O~aU3JzcyKn_nk<10ELz zk-zsi6-?uIsX#l^58v$~0AJyZ@pf+P9`!~wj?U1JOxook0R@!-acjD-mRoi+M~==x5jl^9-wV>UwyE z#^IMKGRG?go^PH`&Si@h4ZthfE4bHR0y&wV1wwpg*GN5@Ri+%HgT zLaH}8p%!*!GDo$jnA$yWu9}w@AM1x-Ds;A3p9fA{)p=UwZ_q^p90tAURTpzEyDT}u zRePCc{gLSx@l)26k&@Kd`osId@arc9HG|t-1_Z$z(W8(|aF+r;vf zTKBsk;+CD?&6xA^B+HW-zRzV`R50rmhVxMsZO$=C#LW*1^^@lgZ_Ayf>z`mdw+wie z{)THVWiQ!^&S_2Eq zd%bVC$}4nM%Wo;!xc(mD=RYFN+On|^p|tk#iOJD)Rb5=J0 zsY{DmR7l6obtuuudj@c?Z2|4J%07I-r@Fa70_U1}S1Y1hE7@DysN^Xir9rVs7{bk3 ztDGFG9Y{!>iY;By49>yUPzP)@6@?O6?ON`Q#IQ^Vf^dUgybiR;kgSqZGPzK|+t{|z z6H#sxJ*bdIQ=P$~q)6qGO+bOsEV`dHD%!8Ku&(ZP(6|rqQqinc&l{k;1A5}`krmx% zJ>a8$p}qe2W;aBDfW(X;P}#3Eyqa9g?|zHc=egGvnug?k)-AIXFsQwL*?s3?WJYvn z_m- z!@7!e*)P=nU}x`W1E3lmCm zN!mQLv_0pTDh8CnU?PT=GC3YO%Ib)CTpKdS&9H)PDfYy6AHBQ7k2aRLe{W<8J6a!L z&3vk!<7Nd#*|I>=P0d)KP@AQ!W8~`H;;5}ESfalEJu@!*gB9mGcUhiSs)Ht?_|V%& zMp@4#QRZBoT`5s-)$9YuUfAW$4F?~Zo+J!Z$zOD42Xi(mI&l}0j2;3c6xffGjEZL0 z$(MwssOU8lC5T<4xMH{zCyixwJo=X)`#Zmt4Enmqg9B1O{v?OPV4RtasnRWAt6lR4?G?;E@?O4P|0y zW9~}h5zMLYLR{~^RrAMpte2-*4G^PiJ7~9{k)O0D7nX~mJ+U(k?U}0=Z^=Py=RMFU zGm=tjGFv!Zp);+wEo;pF)LHx$8h2&M!lxoBQhj>DadP@0ah(mKPcxK7=MZafEy+i! z;|eml4_d*cM0(ae$9j#l7^?Kmsx{{Z8qJX29#>-ob7Xj4Ma|5Ky=!kvx)POw<=gflxxVy))ZJ{sq4L3N1UAmH2+Af*7ez2e|jc-wh=;s z&Z+6Q!vy+k_&eSG!)Mo_)jIP#EdnuC(40z}VlAYr6l4W#^eWHiL*Is1Z%2>k zmDKH$j6fcHJ=NK$oso_p_E@@MrderrcQ;(8uSWyesC+hq;dJ@(4bibi-+!|7ArARa zOL*sA!&^M;8_q-RkW$0Hes%ccNB{pR^-Eph6-toY8+T5Hr_pi~0l*b^2gz|0 zrr#@n&Im5{m_H2NRa{u>Dt&{fvwNKGHd=R1=Y7MUHNH#Yf;MzlD!T%K0)bXjD5kgR zi_(M1+I2y460j6Kkr>6TgRjZ+sDenV!NqPuiXElv1+Jh7Y>85-8qQc6i5?DVg9L-0 z6+!ed=Ebh>y9_Ccx(bKP3Nt1LH8w3j%v^kXhg}Zm-*Yeg=PvzY+Cx%Ra+|WpwmVUm z@oPuRzUN0Kwf(K-NjZ()VMo_U&K|1zFPfP#9(`?BCdi?K-2I%fP$b^yVqsU9a;2g7 z6bAE7BlzLq+%OAAwx9(nfcW8&(7IqVH7&6GDIWnD((>!T(#3F^k1xZIFeznnlG2+F z)x9h49~60;34>j&Ie9C#+TvZg`CV+`z%)_^N45up?>)}^P-x*+SgU(~hdkDuOR%grD>JOcXo>ewbo=F<n}~lUPKE)Cc^{oiNek#TU2CQYo?u-60S3&lP!x zK`tHY4_Hd%zuRl*0NV}#w&sAV2-CaZK$cn2fppzyYeKA!18hujUyuJ(xpSApIn&U^ zDYHdPI<%&Ch$g31s28vCe2J!k9y=S|sKu}olPz4P{@zD(gP_^7r!6XDot-R(gxD2n zmr}FN2gJt9DQa0D-p75w`R?Xex8)#?RaWW7DvdVG@97K3UNt zFPr0xh;^pa6ohw^7Ki*!R@WAqaXm7w;uP-fB~@#4S(irX?Jyt=YU9GKV|A-bOQ+-x zinLPc_n(di2Cxm`_RnU z`R%iuR&Oq*WJh1x=IMTXUB3utyrFL3N$H{(${X;A0=W1plrE*{_`a7CA>c)?q*{Ot z*!+;f0i9Jt@5%;qag1eP3u=31?}O%ZTS;iUdw3 zLfXH!bz2Zc6ol6WvDM_9(j)QlXMy|f0lalh#T(`X(PP(kI2-U5J($v1wTaG&IkH`= z7gaK^+7IkjQCo{Y8|!~c&DJE7Zk0JVbv<0id2VqSp)CT5HK8cbVb9_-R|uiPEduGL zWVv&%eA^>j!G?#P5D>)Ol0HfP0_zH*IdNwj@@EfY8k=xMKnCLVC(y<3G zS~%23gI1oh!~L?1WQuf^^4ED(0d%i%l-L?vbkfa;B_!z9+)a@Gsn^*B#?@*5_Wq}2 zjEQZYj8oo-O1*QleEUu^+`V=?s&aQ07uG(EPqdUpZI%j2cXU#5A>K6otGLi3)n2rS z8K`!`C8W^;nrKZljk&l~#I7l?ia?=mV$V3uo3wvW8zARfk@?1y0BUMglC;ZntkTMm z_eSd*Rj8UC?_=w2xo9xR7CflyeFb$d#Y*vcAXMC8?l=t*((NPwD=9WLw#;NiFqK%4 z@Q8{o>*Qt2D+{_W-k3TB-;EU!)t7a0jZJ#$WY#=Fhib+sDn=?F+Gk11^6E*AEbSYb zvQ74rCz2(fGQ8nKn z*_WogV?uF}C6=Ouv-|LiF|+FGY7?(tN55H2wC!=yd80I}I` zEx$tfWY;NmzWjCCKd#6Y+y-0ca|d53HRy)=Si3W$BY(X=>E_oP79I^+Wo`h|uObIb zB7k=-ONw2hoChZ1j7ycwi+)qUWw4swwsD7?ijL=7DN<0RR!vLIQCjdYNdHu#+pT3V zTzAeQLI2^Ayg`>X@S8q&;L=gVR>btZ^c^%sOGc?Gb6NXzAdlYC$CdibaRlQmqkQ&A9lCWBZE(1B_|}si zi0Z+J;aun8kVfklxo0wZFISLpuaYvm6SXr#872^nqiT+$bukOl*Ich!;3 z04_Hn(e_jA=2SJOWy(xz@FM#4B3`og0>8GouK6i#T?6h;++yWOjLz0A4As%~PqGH_ z&a?rWsIZ+Pad}g#a0*b9B~}8RdzYJNpW7ESSjnFjIaJMazcCSb;U--nu^TJN+Ws2% zOI9J>H89r51bdkvk9kJa>6=E|W`yfRY*aYiqv|$IS<*6C9zdQkF-`109>p#m9y9sK z1Qzl5aldIqVy!bDIQgn%A8j~+40`4{^RCLBdRZhWoZ*o4;ve50etP=s7*P1dF+h!$ z=BNlP1+7VK+_-eM%Q9gQSfqs>T4^xg;2A%~2yk2lY89`VEYTXcpX?)7HY_KS*=NOE zYh;YM)zdTh?peXjEbZUgWm_{`#{Nw2gjFYWT^M)vOLMR5`Lo%)$%}EL>i%#_6<&xe zDhz*mGhRk8+F}}sV`E&zK=J|(E7ro~id+gd8*j8k9ap!4h3`6VvxGUki15zj@UOOz zFt}9(6lA}wI1ra#sMZg4h~>V2dR9%KqAReqS8mp?4I z4@kZ{m-y?nl};2C&O$B(ksFQfs7-2?`nWyD_=ic-(dv6o`Z8MwLYqGpwC zaz12B)FPQf$P-&)JeNx6Asp(@#bm=1TVo5o&8A*?^2bTN2Bato9xf6gHP_n7-gLKm zGVX)M29u+Xmm1A&m$xDy!uH+9n0O65M<_W5!aIv}2k8)5efsfr!lL&l$CLF31byrW zh!vg9m(xe4k;t6P4s--bGvl6W&R=B-*U7+=h@k-hLf`$ARcAVvOSC`xaw8ftaU}Cf zyT7tPvXI%OF_c&`pk**^cX+@Hfw?niGG0Ih4Pla%AzanC4ZR4aX^Sv3H-hDH19jhL zX))ftO|uiJ=@Q1G=jvL@5&Xx;Uwfr@vg9utbm;Bcojw_3hANwNft7l`Dz>s11@Pz` zXenIq0q*=kTb9U96^|k0x}y7+*ayXj5hSw9YVY03QO}ci1`1F`g$tFVtv81A@38h; z__jS^Dxd}O=oek)cS($V+u918wS&W+lY5pd?i=`%^M0cg64g6A*HbC{cBTN>0Ku43u$-TMW2=l_r9h4EuoV1#Ee0H~0MjXFVAwn9gAo|mY;N`*~(Vd=Nuz#nIzbh9f1EmsD z&t@?R6-|_Lw_<{g=;6;Gy8^NXbDrw-jJTFbQ4IB6bt@XnH(h}n9y*Gm<>}8SX@>A? zbMy4qR#uD{tw?jvL=Qn_9=74+MqXGd%+t+A7znng)UB_3x+`-xtAl_6zl^rGysXvF z*~=ft;RK2B7&m<}A?n;6kml^%+SO@?pyw~2)XaMr*PnLu5{6hwUWTtc8c7+6cvN2v za&|Q>bWFW|8%^1`DF89nheggltvDKrk5L1)A4B=hC_X1$IY5@z!CQ*TU+-iuzsv@r zbb-*_O$}2eGS=reC4rORl{4bD+RAQ`q%MluoI_gZT@5Zaz>Fw6>+cwIoX^p~-~EyX z#RH9IFH|&a6@(-M!uW`cyWvsw8yif|U@DH4rk*M`7PHg2 z1q+3|t>hs^=IB=+ekMC5LAkrATqIRC9ar69fdrN~oyENx%K)=_-@}4N zW6DaHE>c!Ck@HXiG@^r^=1>$bX|I(@Nfv!M47J^kcC`K^X6nkVubq}m=?VEPgj#li zH@B52oc8-EX0A&Q^XL4Xm13755qH`^vyuXzFV+46XEy7_RLMKj7Tx1fdH4IR`!As= znIy=O1);TxX_@RmhAe{ZcY1$Asb%JP>wM$09Gcq8UgPYj{UpSK9c%x(Xf1_hSyW`0 zqb6^J{m6sdVag6@D3|;s;j9OEf8>dr{Uzi_`5##%pZ(lKj!js6G?!~J3Y&*lPCVU3 zaLnw#glkDFD5zwBqarzO{iMJC%XehLn)7#kKTO`aHi??^DDLswN8oDFC}!=5O9*)s ze30dSGv%1Ng^ZlgB<-3)(@!mYN*=z>KmslMWW}Q8$?EeZ1XP~n07awr><-iHZ*C03 zdB*3r=zp>pUtt)Vp3xq2&pcV}uthj|xIurtY#-If6#5s_fi|KM-MIMRXrF?}_oQf0 zuQkIVC8(}=MSSsRv++a1rhB*M+&}w53tx*VWgK&4IqloeGL?O9_&@ybu-KHw{NK&R zW7f+~ZQUMLprA^nw0-vdjf_Pges-dJDfy!PL!Rqonbo5-=xrCrSPdf*Cx*xX{F4~h zs(5N*6IqJgvt4bjkr2m zjZR+V(1LU|B@GDOOqYm@SS*s{cK24A;^Sva@$>GkrM0@Vo{bI8zpAOrQsB@nD}yM+ zYCX0yK}@4q2jdJpIvd}KoBrWR$cWT6I~88KRt)da6ljpmsMKxlNKw1tOM=I^D_S`# zB&B(7Tq(RkbV?ttGw2>!Uw==eET<*8Amu$KEqAw23N6z{unQUmxzrx#RHqA`!u#;UI#u-}uxvRz4uYOK?|@7e<=Y#&1GIWKU|j1|SPb2kFszh}UO zz;J71($MqHU@7ax6q^!j`uQ9rk#V)0jY4`4-2$%rXH0@XWR#zQbX^Y29V|_A7GIIk z72I!R16Uukpp3ygi8{q*NB}^w_*ta=eUH+BAHoMa3JtJ6luK&zC5h?E=`q z`??0Q!pF#RO(9(?+=l%aQ@O6pUpq&RCpMec&Fy9`r44$7+vbtF?rgQ0U=qM?K6JP7 z3K=eN?=3aPDGGV$BehdsUg6I#eyLgLa!2Mg^hu1{dm)va5(-kUG|435bbn z9>QBizjsA$Q?-*K9$vZZh3D7d&(tot~SqCBBg?qp<`SXT@i%#xiF6S*FJS4OR>*#H78>V zKdi#BP&~LL7F5_>djoE4E@Mxw8exR2PykMq)P~2xO%rvOEVl%_E>mWe_2)8)CR=X) z+Tzlk2D0fePiO$K~><6!5qRMf-#xz;;u2BucBDXhQ&R1Dk*1H*pO<7q2C`DknMsr6{5g@e z!nC3(g%?{-tBuDX*h>o^m~(5xN{#zPmX711i%ikRKo=s1c%!eCm=`{6x)T;2ZD!^T z(3*nrA(v<8#viHa^@S38a(5!ll}c1zaW1n7O~rIHbH=#aF5dC$6a+EQ$fU^9W>rFX za(IF#G;?V&zuq;p%FR%`H%SVkage#O&goKQ8Q;ARb^r;YuR1;DqipWa8K9V`~=Y-r`s z+K^#g4v6u~*J5Dtukf8p^Xs=%bu@8HY%fA5lIOTL?Xm5v<_YhNom>@iG+T`H?eiaw zxy8}8a^iK9-(WgPMm{Gpx7rp89ybUx4;sqY(9?l%-kTXPOLKQ*OqEvOT(+x4bbmn8 z8x5kI8+f7vaUZH%e$# zBPk6Am-;v77&QAA#1$k;7J=gY{8pyG0^(-?JC^P~RXPoM;;O9tAQ%9>G&}na6-7{- zM${P(6-iw;(l)Dx881A%Em){e72{$1`^B7;-pd^?~l9r;7{ZYy z=(@v4CmZC@>6^Rku0iOMULz@tzSfi}m=R?zRqtq*S_AybdnZ~?y0h#ItIDt-25Fae zgVrm<006xzMvoblRv`TXYxyhFya-}dg(IS^P4 zq&`1US$t)7#5vzZFZYd}Z7!XGzPe67Mp)80zr|{Z^K*i-n%7%QqwxV(`b9s))pMb7 z@x+2+TRtdYMGfp7(ByS?%ntPS0fX=GoYAulQh%qm=E$7MAnBT;teS1&AgrQs=x8t< z5Mu!lwt|V9hKI-6s{SA9-aD+RtX&_*nQ=zJGKh%M905VPfPhpB1f&y42pyH0gkB}I zQ32@;CG>y>NJs)fN`QcXN)r%}6nc?Ps7kN;MMvk%`~J>(&wG8>Ilt@s^2gpQ*=y~U zz1J?!^Q`;1Z_8RFjaoVB)-8>J?^?>g#ydIPM_YTD zL|3WN%}8R0_|M8)=E}FFW|Ylm*E^y0X7iJn%w@?!2Hs-i_BdyrB#z@vMWcO|{OFIi zog}nVll3SHgI`)EJW%4z;muLiY2sOQ9g<7*+mOZ<5~}*U;c@%SbNw1(3pkr4)ritu z;qDihdR&!qQ6#Pi%2hv-vG9|>zg3SbF>E7+sg@GrJPJ;?9cNg$)vU{y%CuJt=JufP z1&A%pUH@>&HCA6?7%f&@X`USTTN3Bu$a=C;O~gbO9RRpr|zjD{ON;$JQBV6sV-ub$QS;|9GFro4%&lhASwh`uwdB5;+W1`wew zSh{tD1_xhbR3n0Hp*$^X zpIJ^iHVCT=Q>)}5cj7_bN}be++l(R?ryA(co|u0=CXGubiVzM4EeeY(RfsP_+~WYX zetSMZ@{JxBNXZxCtxW@QOL1M(g=ldr)wSl-`4&SDo^?{2`VZR$zM~z~LMdO)q%sl* zd68SN7wFxrIfmL0KZg=0O2EZSa%gM{XAD;qoiDWE0gEOT7P~&Dvf%y7b<*r|GSO)U zx7w)^Y3=U5^4&?$Bq3i@E&9-FY;1E8St9EO(5R?#)XqszMIaO3rW76SEgm3sUEt#c z?Uos|D=7FOR7$?=vNB$bv;qeXuQ%SS+eUjySvu>rU|mbmb7oLsurI~_7#4A)m6`(ZNl>Z%O?RK74o%Ehem@B0S2KH{N0MXhU6^}K}mnj6laUH76 zEv7>Hm50094Fr=Ck+!4ha#!kx>ek;ImfM@VAhJuk(t|X+Vn_;RT4ZpY?${}KEpLYA zN09`9iYh@q9>c0sM66ZteR{J|0+24=al*JAO+YEI_b!QZU1+N6<>2R?Y6Yy>7|(CS zn_@x*VhR@$yjS4o2Hp5JE{BR#zzFASdaad-mt%+^^$;~#Wk_?P;MqR!UQ-SJC#!UDmj4C0vql#fn!{pxj`gA1!r&>A>5I=iJRO zLvUw=_$o47mT9?UG)(F(Qz`J7cXsuM zR?|Rdajp_c3qN{VEKPBt_WBj|OaONP$7=T>BksdjDZ$BO;!o+s{;FM^7%m%={X-#0 z3g$-GCvN#5ovs#Tr>pHNL5QSLoxDzp`iVeoz}yJ(Ovy}0e(YA ze*61y{6qQF<$0IUVeJj0)r8(6_h{!bdLJG`b6sIa*w&>?fUY+_%R89=6n*%S=I53* z6;kxHAUa<6Tt_mf(TyEZ?Ym-7Dt2rE6ZsYVP-mUS7ZdE=7bP_ZJV1ZWB$_pYhI%^TlWl4D^xMxufGQS@kJgSUvk| z!(&GhjZ%=L1YsN3-|&^5Ona+Oc4MsS_Q93fR3(qU&pkl4Bb z_>g^+@1trGWKlypEqb1RGco5iX+Cq1DJlM*psij)*>Fp+)D1$ORN_N6mdfl(On|z}5nyet14nbFaLO9?A;>vLBmxc{tC^F#V%~3A z^>bNjpTm8{lbVnTj{E|sp4#gkh##KpufhYuG3O`2aEKImQC9v%n4DPBrZ+rch( zMoo`)eApt`*n#F0)n9RXeZXU*`rpNQV3>@RnsfV-z{Q+Wd_Y@r*WKzp$UStvS-u^w zPE}N6ht0|kSohE-Z57sNjf;i&CV4Lz(9aq~olfX}kG40)<7FNr&(*TO?F&4dS&k-^@!QNj2DnpqfedE zSCU$nFRSFmWA3y#t)Pknk#`L4CKgd@!}7XE5@J0<4+x6c^t6&&csnD`zVlZkKQ_93 z)MDmjm^Q>FH2#b*IJB}S)S6gLJdKYXSkGrB*1!aTGBp`95%Qw&l}c^VD5E~j9z7?g z4p$#TcV(GJPYTIBnI&_~3<8?#1EtEHHDB&XWnBw<*tX$)^aD?@L>lX!B)gq!X1p8d zZRU9zLQITYWAN#WfaL7xukrK^3Ky`=y;);d9{Uqx&ZDb;Ujf<^41@Bp>c*Z#_G zp_*YAZWJt&6iSo~w9T4bKN7Z*cw@3h@Y&QC7$jd-xsVDd9hd3&bY+X?E)$vzz(X84+;+64_^b|&-LaF{tNqMkz^7(y zRwbV-6z^;3t;EHOQd(3b6mwa*KTZC4&-c_)>Ym!QVW#q7?6}S0dD|E7_ha(FHY;a) zhy$eF*N4^GKarCS*O-J5*px&aC1)Gas4Pp7(s>%~gwN(|o}&ivzSfN5$}j-Dp09@l z$g6>*9-+_avDKS8z?2Bo`m+&q?ohqAXV*>i|LCLWbidpdHdRSyo6@vieSf>33TInZ z9)|svUx*DoW07&NEbK#;CB%=&G?xBIaP3Bwa9GdfkC|MG$4EW*c$KXng6=Fq{5}mTmxp^sL3cA%ZwOl?fcAfcq0kKkkp**m5Cn^ z_sjC3s`2Q+ia-hI^MaZIe9_!Aql>x^x)g3$z0yw&iPn~g8ci*+2zUT~0S>+$m>Ny3 zeB0c8pLiF@7VtD&Fxvlk(K@}kxs2WsZ%_>)fDygibR*>yZV$sffE%*B zBbiQ9vQw>@ta*=`9-lG&`AK=-z|y*Dzf3W5lXXLj%;n!{n*S2*jKdbXkbL;$4tuMG zwAL!1W*_}z@w=BboR%>w#kA867Gn8s8bi?%9L|nAn)(ko*%hS?53=RcEZYNbR*w#} zmIz&2%YxL#2cU_GPF0d%?@H)~Z3I&odHw@8!iIOnWcAp_p#R&tx?#7r zR}arGbi3iM7uM>p^?!~PN4Oq2e;b=lKMAn;WC07$A_HrYvYL7}-1 zuM~tz)of+5jGRe?w2!(ux^<&j-z=mHZhJOah41)G2`vqodmb)CQx1zo zUPc9b$6maF#|d^TFfv9q9alvp5DNAE4*RxeX1uVdqNOX5}s#8l3f^M z`7KYWYu*mK1zn7f-dwcfF;6ZC;*r*7iqA|;Xq>}RA>z3u=+eUY1Z+fEKHZk7ae*hU z{*aREdi~rEqMvtdWafw2w8LwA{winaVwxFK6)m09qf??oJ|OirXo#R|IjG86d|c#u zQ9_4+Jr9#n6LmS(99q}Sw%0(4&FwlOE2lDeM#=PX&!<)Y7I8!Rj<0tF?{w3zB|J3CrO#ePaAjc~oJ z=VPK?-`Yys*i^*X^M9bw;v7^lROLKILe?)zxVqYoKq|lKT&mXkPP_JZ#VdKJ`sgNX$~cFbXq`lR!6J^MMh zqZgaq=Zlli3ryEAWWg*%k@7dyB8CtT23=gcHo6k-tj%u?=eP;f(UuX&+tW^4EG)cX zy*!)sPY=YuxP8hZ3%?kKvN55_x&dg##h_a|W;f`%*1krxQ#l7*OcyRfx>uRy@_(K5 z^<&!F<68s$vvm2Z9Ywu^Of8YR^!I~nNI?R8G0c%MgTkd#>Mu5hcC6SbOGnM<+ZP0x zH$#*#yf-Cy}jhxOt2jQ2~ZeMgx;6<0F`8S+JH_l#ED z)_H{VF!j$jSk8TK_~%-yKSWJ^d&<;$r>NFywyxbhrUAM6t~D_YSt0LeAK>95mukqU zqbr*h@Bh{C-{s=^s{ym{6L`C?H-xF^sG9^Cw*qW44n;tU+evUiE)d!0(p^mTDUPHtRAZZYzYMJyszCsmz#&xTGf37yZevZ8s+dg=$wdo~6Zv_zqhvLK7$^sX0`ro*k%3P=N3auqQox;M7` zdG&>kIqy(ew0oP*eetZAnWCa;5<=TKfhpcmVK!JWV4j)i5L<^0<;||~kZ@p%a2@?} zZohwi{LWnz*Y3DU_;eWj#vmwjVN2JT#r1DQ@!xBIUqd!UxIJ2T;-|Dtyf6nIQDc%z zGY5JsKm5gh`=ugl$+k|f4GEdNtwup0vQDgn-X^h^Pxv$?8XkBVv=kNDZH(oR;qc3p z8Bbp>n1==RvYtiH60yI?X61G1npZAc>hZ`H_q-Rh#o>?zwvBf~xcBC>)x!P`0e+1a z2CZkf#4-(1+YsGgLS^it(yeNk!wZ_`kYxZ<6dv_9bEsoZC8bs$!}Z8R^0$AX9WiMe zX}6Bu6WMMHyZ+0w^aIA1sLA*ncNUgnnx!8vQnrEbPLk#vzFhtF{UzZ5VMqc;!OliH z=#VZl%bxbTp}Ap1aL?+MSis5dBr3wuPj}j1fun4gzm5096l(0Y_0dZuaLzc5cmFch zc4Q4e9dM%s=&TsuEFmB{g+hnZl|bMTLPEIEdUBG(3u`gb4CN6FtDYu&r#KpTbwsHs z*H8F7Yz&z(dZ9(ImH25Z|MPA?&P?;$ns5u*|Gu5eG}=1>0#7ZWtF*j+KZF>?*C)(nJNtx>9}tmamIC*A z8_+|R>4g=&@BY&cd#&{dlfExX-WUCCe}609H;SgLRsR>qs4k+{F~<7HJKhK}-*YvNV*&(q}Q%^haC*Cg;>`en1M3t%$K1+hC{f8r@8;hCk}2RhQRZZ3TufTFRtsHrFbCs2HAkytKAYed3XJx}tN{@OG!Q?JpSt)+cTREpV_Cz*M`{L!})hg0ZCz-+_NIcH&HUe3%iCtPKw%RDYT!B zPf8d|fX#5VhWzm-{OikE{xR)ulmoEqrGKX*$cJrsEo|$(J0kXVJiacJoFy*me|h)6 zB9+1xpqs+E3Ea{4_R|KKvW2LXE3?k5@&N z*`@t(H<;NvU-)uK0vzsvkmf6BJGGnI=Of3zs;5n30H9W3`i5b)v3eo8{JFx@YVv_k z|D*H&rgY?u8WU1J9QG&FyubUa(J;&u2xdMNZ<6LqiwwT85*}0I+&qpA``I)US1=J? z2Y>gE@7{mz=7&atv?Ba6CvR2IwR9Z}A>o3+eY~rTPrCCXPtA@M{TaO@c67h({SZFh zEX3%^;8lXXNCLz)|GxE%PipIduxCxTXTeu1VU8}5Osn-zPS$D&`#{4Z1Y|N60X=p< zfhTIpIJ*Gna9Mh$#hhnB^MmWwL*c`)3SJIxF^tbE%NHs$asL-}I$4gGG!9qBYgLxh z#)KlO`w%S&NXydKc+B)jjXzk9+GnL3|H;2oxZ4n( zTufs(B|sS}mrE=zJPEMk$9cwl;;EMAJ@@Vo%RiIT|20JaWnST5HU32*n8->Z=+lg0 zdy7qT(eG&TZ~BD&Q^aB$^Pg5uu#VP&BtwgeEBr+iQoFp@U(EVHFUrO8)NH0)xK~mk zKi;pJyisk*+JS@O7SPELz$RMqgJu(+1$9ILgKvJ05n7Jg7H%Y0)Dyb^+=3 z4C`)c?!#Iy8b*~>#mRE#jOM8QSn>FU)qZn#Hhp8eP!|?isOB9exGTLlxT`wD zJXH|J&(_?F$g&soR`q3IHhIKj{{2cJW5;B$2g0`@nZM5P-A>%Gcx6uEj`p~gfWccs zsMvry9%vJsFNEztPst)qfl7V>@+> zDRFUo0>G5G0C!4?y@LQ{&*#HOyYrI-3>gYuiO=dDm7e`*_51e=n*FQx)tPUAZMW~r z7I(ixvcK2+`})W?eXBDC-+la_a>8_-AD+&i{mL8k?tA4Qk3I%jtRfZJLaIvh@Kt(U zX4+k9FR1ixeMctBWMGB$#fcfalw}?-Dk9O$M%=M5nXTo%k>P;tj1PAr(sRQMEikG1 zoP@1&PL_x>lOhrYzhqGcnoyOr6`p+VSk=4Lu&VOt~SL8 zH0ZKS=j`^T_eqo{5yy%(2U{V9ot1d>$F;ZB8;Xh1QA{<^lLV65+Okh2!jELwF}Pdn zYle0eAQ$1|rzklA#8VL(?Nxc0`#v9g%swI005Inl@olXxi;Z}|OTJ5f?+6?{?)z?$%=xpW3pT zNc}Sfmhfe5qaY)lRTmW?b&N_08sfNn?0>czfTA2|%AaZydW#R-K9-HQPfATTC5NS- zAa*=JrS7oZ{V^TvVm0${g+Sf@r6aojzU|CH?rD=%e$}75VyzS~`IP-;n`racvdW?o zSRD=+Kngwm%Z~+|@umehc|Migs3b_4`V(1^iyJn5d91-~9PHwK>t;Km+*-R}JSFx;+G06!r$4K_QD1p1 zRfbwGrqtCCh_4wOY`u!>!x%vD;>GFN06qy%3BQDx;7;~)T%XHY5|&s!H>@UE zV6mGUkh3RfRe_2_6zYF49fdlOtv$AsiWqj+Kx+aWv-o?sN;+fp5(+bzqbAAiVgRkX z&^&h_&3JX%h=cZwk>B;#U-@qfe}-7TSxbfu-lsJ^`nJZu#x{?Z{*8g**5B-?6PJf= zfB#4D$3OSOd1xVAm=Kdlih?@g{j8xk9PolXrw$8kc3Zu29KCXczt~Qm3p(@jRDHUs zDXpp`0R`iZ>DOo!jdM<5N`$h1Go4EIlF3`u7E*s03&}0_2@X~z%ki?yG=>H^G8qme z00BVrRW7KcboIDHRf)vN%*$-!a3{NMqVgOhudlH^fjIw6sb`t-G`jX@G@OP8JSs z>eb8Q8hJSo;mm|l;XlHz^ex1+3mE7R`7Jp~Bmm6r4R3|$5;=FeKdI@&LV-0#`BokB zRbqWD!?$17$IRHT#ew{1Kr24sp>mCc7^jirEJyx%K)!!x0#zJ8vnYq5!-50We@9#h z>&~0c8|OQSeE+@1zgVAEW<44iN#q9-NoT0~Z*1EY9DHH%{?;S+9*BktK6_!cASGw4 zVbVs|j46$mOPe~@;46F{Zc>%?wp&Izi-)sz)DCE+XV66hi4~U)doMeB*CCntv(dL7 z7A!8p{Nh(G;IV1v$Az0clC1kC^@Z6X$7>oljvK}=B4*2?n6Z(67|L&d^nV~Wa;!wh zcO$pW)QoNzz{4K0wqhi5kzEWJ2uRZjk*IE|cTDPT3-hts)795j;76|;C}k`pO)qo~ z^v&5g>(ju_j*u9+8{C-Sh^4+lY(iQx_k$Gn=4N}-7t!Gg$1>m6Ub{ABWGH_6=1&SN zEP>78bVc1*`A6u6OhMl=0{ak%pZQ*_fp3XS|A}*M zI#!;&r0PH`HrQ{jcp4ncGg2*@+8ui4w9#4w#3nE@dAh^tmZE2WD;=O+it7dkNT0rt zu1{3DS{sDYp69gW5y2?mCRUvb3kP-+!G=JJ~oJ_jkgd z)B9e=SqQ+yf&0`L;WhpqoVYu;LhZ8LJvaT6NXvLweo)$sY-Z1E03w7jCrs)_&XYW~ zlDbi1WPN5X!zm+_%;xjjeP*BR35L;|9By}pRWW3z=L^GKv;F?a)OR|jx&wVbYqbUF z&iH$0j=${1Gf|U~C@O8}SVbjJL6APnOSvyF$qp0VH_ zOAiyhae+&nH+^8*p{!g>iMI`{ghyR@aSRori9{F{4l4z-4qA-8o(Mf^dM*L)XNE-k z^h?>B$$yXsbG+A-?2}z5@TvtC_~t?>->tMi#1XSPm#DJpBIHD!3s2-daWAyZYPW{5 zvZQy?J4Y3b-eu2fI81Hv*kT)cmC-TlLSEV;@)egs@yvuvn*qRRgoZp|7&O1PgC^oE zGnmA3Y6nvehB-<>1t$Jwr6nx_eZ|dzeiamcH-IblX+bd#=X#Y^cJWL-7eY*6BOrCx zzo1gvRL5|k)raIlk0^?%yR&NgO3_*Ex_ESe7j#CZ?e`B^;B8$VYpA3f$e8p%d^B!{ zqu?EyUZoO_<5YXCcxTR)R0_C{-xQ_@YaOQ-7!GE1_^vlA+6!^Imz2HJpLed&N|qm% zg0tI2ELaP?j}z`Mu~*0u(E?u20&yver|L=a>6?}*ij@tvMJO{l7B3$ny#cqhA(d}N zHc)XL0WLL~wl6${1?3`@$1quzL3_2a)(K-VvG3L9m4XD(Bo~#qK6q+5XqFTCk!)G) zPjLrm0$MjEl(}aFHws7rtT>@N(@8yP&IBZPR7_oeC`X_i*!K`**=rG}-)i=mg+<^l z%|9j!3cFwWBr_Vwzb(Zi>wUs8JBpL!vOt~WgbN|%M)2ZbFIr4=9{c&Uj|4+t#%vmd zgsTUtiiKP<>Z6^5_4i5+z27qk4^u499O~{ya7C_)=9$2BtvFF1nvBP~%`GdJTfsgf z`8D?bo8z>r-9t>4gB2i~4#Tv!Hcf?8dknV7BiRL9Ny_FC5BHvWB;ff9zQ}PNipAOJ zwh$=5%|;`Su>%-fJS!XdCYMvZY|*x7Mww9%^keo$LsG?-om#GsDEE}Ta{%?hOo&mK zUx4)?7#I{DhM8bb3(dEBY%tJ?ek5CJJaol*R!ki2%4FQeSTng&ocU#MGlN3Hog7R) z=}dSQ8DO}U6v@o($Fi%A-z|Y#6{ii$YX3w`KtG%BBPTQ^;p)L$Z*pbX^lhZBWyL6q zqYO0TanTAUP|i0o)7tU@yQ*TA;+;|^%&EISDN&>O5Sve?p7MZf z%N{d*6S<5l-{?RyMW=#R9V*_4T`3n;hQFVA@I_v>{I zrQc@GED`+{EhXH-&dFk+Ub&(bBP3O;$A_onWgAiH=447WjqC!31e{!yGG3}aX>_-e z?yuId>xM2G)b;_(d$k@&A8kT%{oLq*&{!e1ed>L(_4~B9H(#;j61W2XWO?pNLNJXn z1J8jOtV4S{h-uP>P{GkOk{B@Pu>0~Fs`fD$x?gBwGQ3*V(GdZ#b;>0w@fYPl9PxPl zht0m@Qs;ug8QLL*A!q{pp6v{zg_Ls-a>40SHK%9OMy3GQ+WO5}_ri{{`Vt#d zZn~U$AtQ%euYS!~W#B3WOzrP&g6N4mVl$(Kyyd+UU>KnBTjpq;A6EsExp%qy#U|Jx zh)fptZ!@&)V&5c;Z|bydU>^Lsr*6Mzs(LJh+2qg9H|3+a-!{yum(klrCF<$^(gl8^ zM&0ZC!Jq5|ed1FG_v}uT@_*pE`@PM#^-+P<&?@I~Y^Dz2dr8->>i@OJ);qC&?Jf3^ zPNBEL^&z59`ZIP}e2A!BA6-wc5LN{_g|s^Jpg-(Y66=uHh1)K*A`3P^aFMu0#@*2S z_g4)Xr?HdRrpIIEYBqVVZfvo+M?BNzyL5X*K;5ah2c+)|_0Id5qZvfLyXhasOr4d+ z4T^_heKx^N?$|qvJ$ScEUtBY;jrX}fJSf0zH7`ZAtx5q{+T84>QLirKh9R{ zvpfwWsT<`XK5j%BT_TB`AQtM;`I%+rW@e-l=w`dUb=k7E$U_6MtZjgQA=;k%#e#Sa zp@k#tv_Mvss0|xlGDW&qbOb-P5Ban{_3Lbih4H50>@i?b{bpOoQ&($uS(82!)8^KC zXMZmEGuh~mxJ@gdSAv5ZACIt8H@F)i{E4Xk<88YSC_4_*Am~)bE@xO}L^$i@m=w-p z02OiiMtn(lnD&$Fia3~5Pmw*WC_jd?o7y{%&#v~}%u4I|fYR|SYMuY|saKgdJAFcgc-FPX z3hy$#$6W;TgCAW|#|GI66dI7|@Z9DGMs97oq@KQ-tv2*&xvmO=K zAipw7fnokq8m7s+-`V5QM}yI|DV)5$tDFg??al&zp|IIl{>@m9hs~j?qOT~ErxHF+ z8LO~tQ_ARgn{)l-&XttKSQG}s*=R15Ux*x-o5~c@+VRZemgZ}92TFlcha8s+F&0q# zg$nuGY8+VrGjxC|1TKb}sFjh8*@JfxEIyU`4MK|7l~$3Gf<7h7?wmEoTb!5WCGVG3D}lD|xZ3#f)%HSk z^dN%Sk|vWva893kj{GNQ;yZuAS9~FF&Q6uahsdu)@43p7Fv6i32xZ%}^b+APg3=+M z%F~OIn^!`(U%dbGBKvoie@7rJ&i(df^40A#%a=*nrnA%X9PuVbx0xJ_uSzRn_AF11y0_4Smu)Kz?@#L*x#i&8GU*p=eyCquK5 zM5O{`W@o)# zOrZz^4SCdNx>y$E9nB}H_)LL2a$q8LEVeE`?97{6d^i7bPF&U^g!vL}w39vm_M5a) zct1I})S-~&=kA;T&ZS~9T+9aYL%Bx1L? zpu5%25Vqc_uiBg+BmqwAIQO(=3uq;Uv49^^9>?};qoNe2LYLep$)J(C@?-Z(Uoaj7 zYE?6Z*UmFDT~yMt3hx5UUGl1rC7g56E{g@b*RPc8W{{Cxt;DXEJv|qF-syGvE_wo z@5|L#G_kvAnm-Ql!0N`pimV5PhPBr3;L6E{$PWD79GN_zO@+)-4LU#N3R#1K%@-%w z*gr&qM2|{bmA_wP0UVk1n~HIlzt_j5g73VX_GTf=993T4E-MFpumbq#_wl?S4dQAU z(r@|}djg~TDVNsOppJk-h4PEBcIcajJBF3kA{ls%zwl!i*C4Y*q_ zEqmIE&YU;69DQQuPv@RBbV4a4NEWN2wcS}A31clnCJ-zWp$UFZ zejmb!Z2{~X0<9N{E^4T0aQ0vj_+mIxUkb>TUldu^b?qP3z5YHb!J9dYhO0B`AGec3 zQw(FYZ#h3LBL4S^=t>5fczXZHdh zJd^3LfSY(=Q}3#kE(U&Z=4uOM=N(()5kNnv6Ii>g=JRrC9=cN_k#sU-o!+= z&Q$Mv0r&iPcAPL*&DX8aN{P~#x-qm1lZ<@)k#Via`r6&*|Ca=E=wki zYQxfojehPDDw+-sa<|S;VXKFzs!Xp;cjqS1>DihGHPZ#&;Dwd(g|I5~P3Ez>eLYq| z=yR*$SjEDPgC4c`Mzqpkt5*x8#2`EVs3awR!_Uj-j~hG&LuPYqsZ4jWU75Z;Z)q_EQ2smel@F`((%n(iQ zj-0&79LHa4pP6UfpCfHm9Mo6`UkwtuDl8Kt*o&-FbnMJ z?ibv@H=e8k8}@PAv&1?9LPyFz>f5t6UMU6FgS3Bs!zWpcrlz$+FKX?M?52T02mue6 z@b!R1cX`E-5KoM5kax?8K)4OkOzwIId8h5Rr(r^JZ+ooevYKfcn)%YIiwS%B`a`_t z`zZbH%RSE+rMNxSS z0FEaEl3Du+PaR5O@>~}+oIIoHIUXNyqD0@4s>ohO#0k+Kr-CeJNf5_3bv3@yCqJE0 zSUl;J*q2zlE|ryKVJB$wil{v&*?l197#xLhGa_W-T%J`~iNdURZ4-j5X&n=a^6YY` z{q-@)o~yIF0_i1Fvz^`s0OOqWHfh!RFzgo9X%eoKx_0|%GbhU3}_`Zz*cD5u721wg-hW@B{rEf@= zCtf1;oq{4wa2g4O)cTu-y~b8q>XulPq^&(y7P~&{if6I`ycX{Cq10vPU8v~5EG&AE z8@t2zo0KrVk}|v%?J{_`vkIuwwyB`y2qAOizF6IU)n=BZqg;D@{|NKO%I?nta@s94 z8Wad;?hSmsvsv4;wuqy_n%+a<@6xVFSJ11IlbK0!!yd?mCFgNiv=E0OV_V7ghRe|w-fexe~Z7%!={fuvwX2>DrFIT)?dsjm@#!T<+C|vqv$N6 zHob3RTEcryQ9}TvnB!v`mCZUrs9i@bzH@`x*m%{$SYt!BWL3^^0~jXT!P1OZf6g87^UW(8YCzLZnx~; zW>AJ|Xm#&a-Wu^^9miss5g8vzrVj^LVgHmJp`3qsTdpv%SV%fc>ruL47m)i7DM>NM zw+Df*_s+K1F~~K-2B(RleUK)ibZ*q0qS!>K;DXkIz3iHx*qH8Ef@7n;HGv-&RRFOs zJ*h7eAHdOf!9Y-tW(GeW=i>rH)VOCgaO+@PeN$KN--A za>yMN%9eUKKfCftFI_!&*Z4W&c27m!ny&tR+eR#Hyzrc!n~3Gfg2me+lyf1d@nl5W zm9hrs-CrZyZJ0kQZxmJXAk{1{@+WC*LDhe3dESyR>x|y+J$rIUgmmTZN(zISk$Pck zcqyerbMN+e71!F0sEvq5SLx}nU)9B*>ekV&3T~NbYAp*Nc$hkF>6Ng~COaTecWQRH zys#(bTs%^XEo7r+$<}pcFUmmU^P=U^(yA*(DK}2D{6<*@>VI;t%IpewXxRXmbbsYt znCGn!i20lW}9PsTY{r&P9WCyrBLs-tqPSV zAE33G>y3hv;sc^>^4P7X+4QKx8(9mYmi_(Kxznx*hKwZnj?q-x{j3?=hPk4yj(B9K zlcN7ihC?uv7eBJn(0Wlq~NI6IT~P0eS0QtNyvq>L5oelbkY0v)#Va{eZZ zIHdlU!cD#~XPc!Dbh@jcQXBbAo#-n?F~6LI{M!tS_rA_uH@pz3VXVA18#*j@nC>x^ z8_u|KGbfaAP;{m7sPy~`wywOCLrLt)i5kn|cP8{Qd|yYK5}4hTtwrxT3+q@FWuvx9 z&&(3W-R-SKz)gEQ-bZqfz5P5!>Rs@t%YXEh5FU+d0Ik3R+D1k^g~-KMB``F^2Z#Z{zo@#LXl=YUxzgsf);W?zDlk@T|Uj5+xo%DhE%vrlqp<;`XxU#O-** zF2}Rm+oTSGgL0zdhpV#d8vK-s4ac6pjU7V--*rmfp!>I6lS=9&%TXs8YL1*MjX^18 zQ?{hRo$DDmarhFWd_hZDO04d9UWZX!S|N?*JyD+jK44ELZP#@Ahcm>1s?K!wBj_*C8B`wkTSmw*1>ApYy>9t^Vn7MviBW%B;uN z1JNO!N{cbi+QcKA^Y5{gFDnj;q`G}s5Vx=whm8)xceP<&hn1gM-Wa8_ouh^%2Wov; z=&t_ttN*upH_N`Z`~O|z=UC?dKDP7a!;?|M}Sa_8b2na?4Lb?=%N0S~WSX;25rT)Y#y8hpNvk zv3EZNJEa$#_+rrMP&HFGmLm8nR69v6zS8x%An$nd&9uieuKwzSg_=MP4QkGk4t zUWr#CRbfl*x^uTbvz(b}T)_vj_DJv(d*0{mA0??=zQJfb$YKh=7x>N575@0-cUTj< z=V^50W`7=NNMpBuYmx~eP(4cMvtUlUk&KvEknBFoIqyg%hdcLz6E-DahZ>@%P=UKfc522geFX8sza|#pdVg(i0>1?uUfDEAn>+6DUb}7I#9Vs80$DNxb3ZT z=K9FCCrqp~>J{MD(VyHMRlwXcL)OHAOiXme>zzDQI?1wT!T8cfGpy2wrldl4v@W$2 z`3M#db$z2EUafV zXYc!D=ce0}jkqURFA8tKIWhQ>E|A5H76xHSo0{?7WgX5pxU!pWd?e*hg!Yk6CF8yK z-ro{-q<@TAd$#%VoA4JD)FE1eP?A@f>A6hJifjrfu3<3l;vN+)3NoCnJ;2>7$Lqu+ zI@}i(mk^y5rJi_VKxDIW)mP3vn4^jZzf`9 zjhoj0FyGFkgbpIZYDh=7u0YLxCS1ZqznOW@l${Z!5Zc~Aa*RZLyWDh8I?*#7_X9t@ zRiL*?RJ)3NW@!?cZ}x!k?7yd-?IYL_xjQQcs|GK>R!~@FOugcKfaTB0N{91(dTgZ= zR!(gJ4P{W21e4}RQEyP!M@L8jQV(M56Ale>q1Q7fM$rKgdv1!*k51rbL8Nup34sn)A0zf`9&>j;w)&>ixcH|^77qkb9)9} zP$Yt2hlPM~{XCnb?2{1ZXlZUDYfI; z+D3i__e-xbb8LOsA=g1*gVB}GEW8{&GB*JC>4R>n&D~4uxpsAuF)g8q7VQXf8{nb} zYny#XO^n= z-7z@DAn}m$NZJ%hYoW- zwwk`k)exekl1|63o3;oJF7R#%5P~l-m2}Lx>b@ep^HPr$_+zN&-Th|=-9L#}b&a!L z==vzvnO~KgUGncN|JR^K;Jd}NYre+7(FI9&EWuG@ z`Y~Otq}!^61FO<&dL=vcIp?Sy<|%ChbZvda`I#RD23Vf} zhoux*UD5`v_jHRcR*k3ulSNF++FD~jL@iVmYf|ppi|wyu5e!Y8=5Cz~;g!_%{dnV4 z+lf-6MCVqIv6Vk;m2MD-g@8Nb7jfSAEQ_WA0Qvuix$h2ZDrwtiU0oH+iV#tXu2cyE z0){HEDxntx38Ax6B_V*JL_x$=K|0c<1T;WM0t6`mLJTM%NDvT`P(l-t7MjwFu)plS zyZgHDw_m-!@49~fkU3$_GiT&t9wh-sBnnsqHw!5GEBvjDYcCSBioc&q?@cySln6?{Kz|mAc+= z#V*?u#pQRS`#1Y}^KXl4L*$WSOrP)kmjOKk$s@W?=yo7k{KfBMC^DWMSBrw%(_j!s zLUea3<0e5+ z*_zNUA(a`=kxV=xn~>W#oD@5t=-UYxaB+>wQGS_UpEhXwu>aeqikpm}#8aKVIk|?A z01_!^LBj?IZ+|{*P36EGL#nz?OYdqVrItjWNj3194_f$O)_j4Rz95w2Vd>MD-Xxy}EEd7A~-E3xjA~ z0cscIBg};XlS^BDPUA%ZS**!n5l!oZ3EIY!B7R`7=ZgwT%`L82FIBU;IUdGvOX%5* zl-w_B9bMz6r0J~kWJE3N1%Q9IxJI+d>8ia>f*s| zSBWF$D3d_}$JSwHmrCZU5wiF4fRO6TvVLR90Km}A)E0|?Wzd6#$TPPr2PGt=Q78wd zC^E0_XkPQO#)kr#j7pVO?>PgQ8d0-VF9#WHeOKL0o&~3T7Ztg9hd>UR)Hvak(=y!+ zt3}9)7jV0dCf(nvjsfGSd8b@dVqaZbzwb9>wL*Rz(-aiu;9Jg?e`dQ`-4fpd6V@e$ z)C-2fy?YS{ya%KMyfrg4A&woTB{&R|;Fc*;gVn6+%Zn=+GHy$sOzGhI>a zM{A2Q{2F&Q?^1iDQFu~P&$97Fi+FcvXD5$gc1-!Z^wLE^HL~)mpL~+mHLqZZh1;_F zgN6IQ)=x-(v2vR6FHovTB+_d~YAo^L&oxV70O^*j!kRY) zfvPI{uKLlG2ykohqJY3jrBb59d}+XG^o%Tn6e=a@mwGU;656d5{BpoPssiXCN*9i_ z?!319Yaci9`q;f$zsSL6CC7%k<3BU#bdcF|%?Gxcf{qTtCzCNRTa3#EKf>HX@t@4b z4tXRD4LW7zMpPEe$CFq|f(PcgFI4DZ?tqF{rXH-I^=QRz&B_4M^bpIpg;|{;Pw9^H zzs%2FtR9$|$~H8f+xzN%&oGfqbSH{!#>A zB7M*SQ_12fd zc*^-eF7!!Ud2b&X8*px$ePveMZQ7)91$ zlhdkO-psE|w}1%clu%ws+1XmeyO750As$ljhA9sFyj2SFHY!=UOn(~fzJ-Lu8&CuU z)FosuuN`nW?C0jpHUx>qz6Pk=xoW=mzzddgD|YsIv)Sh1my!akS7izcmp1HvN-$Hj zMn%=SoO-^;QzE1uLSQvP1hiY5AA?nJZK9`%P2PlBm3whl$39mW%+U#xt3Z$MMGyP9 zmX}jXoE7x-ul{hbS-JHv#49^Ku1h=}=l=QARmr5q=mfMs3;}$W`mI2B{3o!Y=9~U6 z{v3qj_b+6W<6EDejQq~J={U8!u|;zTR;mw^x6xK`w9lQmT@w8~;7!tHQODRw-;vs( z*rB3$X!Ra+VO6nr^rtDIt;TT&#?GvXF#ve&U3bNe0;G5+m64< zfgV1BwVsAK|ah3p#JT zBmiZ$@BLhDa^!4T_^VhBdFVWv_8I%9df zC{(g6Sl$=MlToefsla8KZ#Xg7(*f`pr7*s1$}qvtW&=4-xe2pkWV$EO&e0c^R(Xmt zKYp2lJPm4)h4l;tnzz_9gcX@HHS%n&Jdt&!VVc514O!q|tRC*T)@|+YVf~03-25i@ zAfUhV7eA=YChW(BM;8f9^+!veO$meg#oiut*C2yI|HJ{bo0qne+&iOpX!(Z=q5bvh1|L0HB?9(EvJxUFj6Vc~=crI$YUB*LVPxRS|o7|@&AkRT@QnP+kOnlDoayYg#&?O(XRvx zs!sl2DWCsp#{LAPBO93nAU%OPr|R$p42qjfW4%E2M8;s4vp^YoL7y?Fc}J8}51Lj* zzv9mA@`9>ugVb_ke&O?-?6zk-shDTy_db=9hQsPB2Ds*C;OBYNyJF zi#VSJ=RyVV4(af#W+6}Ve8Zcb{ThYz8`MpFpN8zcsk(`dyZPMXB{v8~U@={?U5-gl&+7(hi59B7nLuxrM2XZ+@Y7YIsq}>Y?8` z_-#el$`)ei#m9sYt+2Eq@~-I}NBC=2A%5VpIC;PlEKH1u8X7<4oc5UvE%O#v*+yth z+-zSE^a!OeT4Gz}=Q0YzqkUAWCfJ4M<(Yi@?C<^dtAEvJ&yQL9r#?I4ANuTczT^2X z!ORgN!Vh3mjaoBTh4gsPo)dC}TOOs)&Wrcd)g|ZUOx?)qXs;TQK@(|Zq0k&T;gapb zdwUi6_`XRQ>qLr>(PIRjiubqEBB>aDhd8kvskyB4Bvd&%^Xla-QmZJtIIs{5ZYpAolhGiAGPVRVZNR7a1e`{uoQT4B36GnI*f16k9?>10N3vn zJcm@^EB&(XM2q;S%cp@|qRnGbjiQBnVX51WEMoe+j4(m2o>o1T9VimC&aWM4o?ea9 zu$}VBF9;TZC+L22*+0LwdFBR5GBZAyQK#-9aBF<*qy=}%_jb*{vK9Q+X8-!<{|GGQ z(0f8u_i26&bEMeGR0vky#$O%WTwzQXc}xC6h6ngl-xoeEMap|(Y1bf^iL&m{j*|Fy z`ID2~ON;uxA?O%dU0rPof0u!d?XW~EN0g*}CU65cSBS3h6J5)=k5|UhhqO#h-1fP^ z94}Hx4thSym(vKZ1t%V%BzZ3Ot|jY7uUPiSB((1Rm0jiV?+~d-)jt?qKnq^Q19Crp zQ~CH`TjtN}Z(LY8-VLH3)Z@jCTe3gbOWvxf%=mns?||c(>phH@D=SjiRMsy0$u3FNHWu7RCu~#dfj^U+M6$R4v$X##)P|j6?5-X z;)`MLx#~6~cWv`Yb#S$yJpD~Ht&wmbIdC%xFzx@)Ls;qputhWZ9r|$7$n!f*Lk|&93wkiU{m;gnhejxJpN(k}FLt=CC zednH70&ib&mGL?@%s4j`8PPa@)jLAEjFf;)a0EHmGR8HoUQzHU!s`#vdhlU6*}8WQ zH?o=Zd}X^v30g`vkGlErD5_i+LhqF<2?ORdYW8=wOguV&#?!+zJKv%xpg>{(+ba*8(MaY_tF%0koGWt`5kO@ZrAZ zRJv3IgHS$x_m1wKTZcQ1x(lq`4W*ANn%$7VCQAp|*ndRJ26TT=1g!X!)vzFRMlVm^ z9IhTCfj`ppN>#<$AT>Is^&dTR_t&1G^v^2~C_t`(B}d+*^#^=TYflTgB$btWE;AE+ zQLad{B*2>df``EKP3beXyVl2;FGNCW&HK5FqFETjo*70a;s-(&+@vGIaQr}K*JaOd zRo^9A7733EbU3rJ?DMP?)b%cv2kVY>smE{Ij_1)O&TVQu!;ys21U!iIT7#v6N1PCaF(C9*)MXB*8>H+hDoDgO+I@N_-nP9qtDRvg52> zEYlR8Y}M1kna=%XFO0V(Yj!~MG26{~72azpMf9@WI!lgt!OSA^7d~>z+9L*bw|Hh} z5Zl0um4}Z=?IaEEX05#r`(Qb6aodkuTH3Ml@+b~2LHl` zrWk)}!^@ne(!ip)Natc~#nf-uZp%D7QXuxMpxIbFFoYmnnD$Pp|m( zK9fiav$`0ZD-t8(jDSNW0Yl@%G>`UpF5Sr6r6hez;S92@sHmTqmo;^!dV<4=hhczY z>d&;VN9?!|O5U0oD%EWduKjMSc!PJDPX^EY1Cu-pDN0HE+iZ?%9&u$110#G$pa?&qo(?Jw}A zOnlI_*2NFuU4^2HgA|`8N*e zj}GjsK7H?Bz}g46qeV4~cR`>8j4yRadgU_L!H2SH`80A=^h04_r|pyZtgv}c|JY&e z5LuH|?6I%b#ov?Pjog!)mU2?ZB1K* zl-Vr-8069;nO?0EU2YRqUXr%GuXi-z)X$mlQ~6oU;WQ{g$bezm%eesU-)=_ z-mF_ExW4);W@pTBM8YV~+YDXV5j#Y^ z4CGGddPr*>p!Jnz@>D>rGUmPWM-PYb1%9jk7tMngNBtPtBFTfNAzTxejJNPbSuOEW2GtuzFd`QPU?97XrkD~A2|9?~PA5D;$ z75pDtY?)rxp=}wyurs4;eWf@J@tvwrI+vK269#Y)xg)C2tES6u)jV5i?u*M`rHEhB zof1)h`yk|&;Dm8*gB^lpn|Y>i#d3%gRgNnPgV$kVZ|eNib5ZS*S;H9M))tb<+TjM? zZa*Nemg()JLEroqqesk({N8JX^@O(eDy%xO06 z^Jh}}z&SaYk_3{;4JT`n>&eupG6W_g=BCy1*sCI~jHvpsE%_69WT*8Zx1AHmgUv6` z&^u?1Qi~ep+94#rHAx|p-iN-J3|950G(Vzom`_i@i;71=o6l#bA+MeuqR6e4kNRm4KjDsZ zwh!ea(;zCbr?p4*Z%nj#*BA7zMYl*7k;#<9OvO3-yTQrCcn--YDykV~CesCAh_0D% zwz9v?wnN$5j^hzmRx{6Z;u(vLvH`C&Rv*{!{NZcl*dgyPZ|r9~rJy2RR^rs<_=ixSR?O$%zwb-0t-QIK*yS(8Cj@n}nB$d+5wOO1Uv{dl&-%s>WR z$~AWY7bTT5`b?oK93O>YS}FXQI;QW%6)-~;mn%fIo6L`df_+F6PQy21Sd3c-?EACz zo?yy2pr|-BDfr;gPUL+@Jpdd z+sbRPa;jJ>TbbwUn*h6gDwvGhN~#IX+D%T|y*73~&-hxCNxac1tKM=Es=mJ4`dRft zgk<46R@!lzr^eHO5xP+fDZjs9oNe#5M5-I{8d*i{OAuCki-^C=@H5ooFaF>w%m|y# zbaG>791>#8X9BEr$iPwtZi7Rrmy$m*Y}{|3g=DQ@ljPY8NX-sx1Sb5@E5Nn238GDn z_2tmWl=6@9wq#dhN_DC83t6GYTd{8GVwXZbYfz{1<`#?&3o%=*&4oT|ii^b=QmhLB zDv<%QbipU>@A!=VWs&+fmtPa11T8-f;n8pMkX*m2mS0l{k>?6p>>lP(K^r%I{}o5d zbwv)3`0=NIQ`T2sl*}4wR7*>uLBlfbr3_BVvHlGUtZWcIs43errQX-E!sU z2e8c}SjD!@5At|o*^0vawL{VICG);5Xft`=#CS`>#nt;rSv)IeT@O-jurbr)?9p$g zsbw7_%RAs`zii8@!}tA^`!+2Q-5cz)E!0^QEp4MHSf_73`&!2YnC@?Qjql3Kugv-; zxqTMCv$grSkyAXff;At650u>;Ma)Rl zY}IM(m<=wsYFEC(C_)4u9lOfuUq#QhhfYrxnyWlk@=$&=ObHJf;jC#ijwkS_vB)*t zOwe$Bb@GhmX>0-jWiFXLOyB8USqPmQt$Nb9Fk89z&3`~8cjl_r4m4bUcVoUUl%kM6 ztPte3<{Gc1Ud#&$)?$6ojf`(632qjxxBfU`EYMqwbgqqH#~Us-hNO*uXfw5tRP3@W z63Hqzo;jb`yB_?mu$6L~t^^&NN-v~2j*6G&c&Phv7gut*gQW_KDO!vzL*Q#!b*eQ# z+t?u$Itx}bJ#2NkgB~A<7y<%)q1;#2eL@#g#qx&E<`+fgDiHcyWhAXHjjNAo8C6-x zW`~J-4_eWX(Qm_*q#D+!ZS(k=atAAV;0u zYA3W)aBNAa1DdzXvg7S)Eb>;q$KI)|%Qpq@QSJ<&MH*ut=_%t-n3^wA=6DrO9pFWu z?y~02@#vDpxqQ=ySvJm%P(zejzLLS}+}xD-rDJ)C5NnpVw_j?!ovZuzX(tyZ&@4wv zNQPWO-l?NyeO>Wx_DBhewdiIS=OQXyWJm95*$R88NB%>Wfs`d&LZUSx?;B;6(yEpD z)7_d0DLGA^u}*NOX;Ep?YiV;6$#hStH~k+*YRxJ79GID+V97Akp4wMeR~+PYXqtRX z-P<*wvMRe~QNM*3FJY4X)-jX$`e^)p0~zs46*{xMg!w$xYwz&!B@+o{*OhAwV&=cV zk8B=`%665NtIkawwYH$lzir-05~>-T%X!9TskL>&RWh=I&bU)Ft36_|^*rqn%$Cy! zYbPokHUok-u37nhxz$yytLp>Ln{1nmT6fv8@m87&AB;A97pO>NK2E}Pe2;j~E-s<6c z#KD(|FC16xT8;E1Ga+y^$?H4KshISGvYGSl5Q^ivrhim49cC8W1EXGr5Fee# zguRMUR0Y+j_7syGa}(y*bUI*4tdYk9xqPSZ4F2x0vlgLKSb4Dm7B|>v3q{u`w0JquSZy9cz;2D zO`GJCstw=UBW;*~O%vk8mGU;%&a7mzhI;@}GXnnEfRvx&akH;=Ova^6fxkRh5ugyP z&eM;Ksz1_fkx|#{jdI|8?2JKpS$4>0ibzb{(w3v&!b4y;o1RuJRhhMjv7gjO7RsTP zQD7?zT9ozNXq`^$h8zBtwt2qqkhW z`{2zyY?WTYquAN8j%|9oD!#t0c;XCk)@#|Q;qt*fx=pJ(KD*w+(`SFMa*oV!54*?r z>$&td`^{HP^?ttd#^PAuD711z`0c~@Jh1GKh9f#Rx&kd#lJ|y<_r|U3FQ1FV%%{Ho zV{hI2bTvYcTJzQ?Nil2fw`j$$4gF>x1X|-e>N`YQ;YKMWAO{eMBsJ*o}vm)`28{aCB*}hNhxaSCR-gwg8Vg2Wgy;?lpC?%*F6+JORNA|k@ z&W&;S{jc^Cex(@^k@zme>)I6iO=e6oR$z&)8aI9}DhER<+SdZV3B#OcU!Wj>fv59h zepV^I_FPdwFlJHTa{b;cAKBa7+r2kZk}X{o`2- zh)TU9U*B+Z~B;Ozc>}1;0)UxR3Y8e7h$J$ruSp^-4~2Uc6_=T zELBn)e5$$^BX#e6ll>1U^zXf**$2E!V!zMc-VgpWxzCR?`;Q&IG`(b(5LmOEH{arv z^0Z)9k0h?}Wor9ohWoTYuN`;In&&g5^PTwV_rm_>Vqlau=4#n)FH zx>yLkr;s&Jre-=1>wGN+I@YVmVpV8+1lLXg;H9uT$lA^70YA6`_{ii||JYoxPm{S| zt>Vrs-MXkOWc=BoBEvhk+dW!8t#$@z%qDvHj@~FP>aB4|;jr+zYHI3|NidiIb-4SL znZ0Mb9je|1fw1j;4c8qqNdPlYI+!AZsk4Zo?`-1vo0~T>^M(x~+wU+zt4bADk2vjJ z7@YHJxc=6;05&|*1-~~^ZH>?xED2ioK4oYw2rJG1d2+5p?*~KeaqEQ8wZji$CsRtM5ugIG5`)14B!+uynHw_ zIhIxn8boZuiswK8&QVcIG>7m$T2P+mUCZ!lCWOuOy6BuskC`s@+W)EJE{H1B#ffgH8Q7EtGWqf|E{}RC++w= z>Ft*?3*|?$cHNkdtH*q8!gM_41yn^t&Vg)DE(F0RwJav2n`)htAkHP^!d&*y_SLbG zMU{g%T%q5)aSO!}vU8gA<_{snq%>l_edj{u>Xm^Vt-0Am{GCy^fQI+kCp%l5TE*vc zr8<}2UA6BMh$C##jG#{PZgut_jb`oQe-+^c`lFSu6ty+&)v>^`ISED*G~ILFV=k~N zcGq55(=GBsPT(gkj3B%5*^SEdk%aWBLDf@SJU3n$0>hs}1;6PYj+Yny9<8omz>^v4 zOWUx_$_5R;$a{4IAB{~X#HDFA>l^R{yy==lxgf`3GXC<{MpN?<>;iE(8Vpkc3g4k?2)OhlAL){IkY3 zn(EZD#zlq?3Ev6Vcy*y<8#vz$i#&sS7P0l_$_SD{>f7yQhA)?{-Czpkw^JEA;Ih9XT z^Dm9_mkYqo@7~z%-`%sZp_<{xU-Ct~{3_d?qPsmd+tpR&BV5HwCi|UNW}!QQm2n8$ z)>?G5Rj5(UM{etZrFV8hb-CjL25^7Tbk@YT4XL@4fs*y7^&QGR*|o2W?}<-u{~|?X zY4lc6y{_n`W9I_~nHovMyb3r<%q<1D{i;lTWkZgc98Jh2D*$#jNOFPeo zEVeGC#KJ;TN1IOk^dC`WpSfx%4%rdh9C9Aq9>@RdmG?=T*w*Q#IAK#)HH-&6S6sUyc;|o46gi4=& zOG&0ChUU=DP0dr_%~4M*_Xt09e$bDkJUm4t@c|`ED3d6rWHW@i?DT zrmt^PbEu)wkE^3Rvl^58o$=dV=3M>3u-K#v#MIGe;G&0l1%fIs?}<-}JzoZEA7Yz%V8#F; zrV_G{g$ojd>KC2Lwuu$={L&yBl#=OH+h+IA#q2kI68Lozeq~Xv2#`R@$v3@aJJ@fD zH|pN@#+DABmKcn?uht@QRA=3OWUVtCu24IKpnVlIL z6|vx4iLtizg*)`-mPD$_-UyQawge!0Bj0eX`I_td-CS1NSlhx&^t_QZ@S2MKfCwN`kS42R|= zdeU*L06Jb_x&HGFUVq4;LhZYe0j)Ka6WI<&IIFg!W1!jTXVhz5#@fNpGGkh0Z(keS zTN(W%rzzEX2IW!6m5ph;1QTK^s-_o>%ky+6@`H_0r*Mc;xbW~oPqj%O%161FQbAs| zK(A0S_yx-6z{7{#o)&mFTR4T}(yUgq-WuvwCZMM-YFRAqKR0{(&Uq8v5X1b<=1#Fz z3E%P8*Vt^%l^rzOk2;8pw`IlKt|k!(lej>ou%pl7jwUt5{Zv-QE*-ZPcoKIv!K>d9 zZ;~@Oi*tj=)<1mmIUnuW^Cge%urN;>0S8jphF!`M=@<{b%`rqmbPqo@W4CvvS%HQC2#iv17>1 zFD`qd6QA!`I_PDAw+v1rN?FY-Fs&W2#T(BH5ne`B;0GjN7buf=$^o`@TaCqv!n@f!2%l>Cl?%Jc>na;GB#A3{AV|tz|Q7VH*UPoQ%F?oN?ANeHn02p*=MoC;g6d>4E*(Oc`uir;zrgbsA*bq}sSp&)HGk`C((a2Ov9xTDZ& z-2Y<haj-trDPigWfuTx5P?yL z*-u-+>6_3-(C7^4&6Ac%(p1P<rS>#-jJHNy^V!%Lj3$j^YuCI^Whxf#~g!c1D} z&5=<${9Np-TkqsRlqrLy;_wxUlN10RZG}%N@k9iBgn(!kh++)YTCUe+Gc?qrrPh3`jb;;wWpeUc-p=g^I?9w#ACkT)uz>0+=f|e(Yxlz98E9SabE+fe5Z= z0CR9GA7Ln}4$OBXkiG=b4#h1_%gj>fLE&eY5VL{tg|%8qXR6}BNNJy_)-f`}&09Qf zD)^Anxib!>#v3a6tG=2Zr8M1$Q)%I8hZ%V}RNWt>eE@f!p&t~=DW+;g=C_l)(2WzMd6L}X&b2r>P?7}y>-iwS?zoW z=mzk($myGe$>vNL*zbzGeVjgY7WRRv9ota*6M7tCueB#%N1UqCw(@&-;MmM+2>Jw_!KOzf~iJ%h{jcWTkE4a+giN z9UKTuzE&YaA<^XG{Gbe+2SE5P25)LnJaPKa8JVg&QkGCMV&lV{7KSvQ6B_({E)XQZ zc%fPuhc~ack&Vnwp530*x2eOPjH++zL^^yTi9K-a?)d&}h=?5)c%|)R#dYG7L)Y_#rgOChI_;(mwOa-G|-1SjE8! zr;1I5;r0Ep?u<=4iGsMnPRG#+Abp_o%g-k7MDkLPUYlcDGe>@U8gxE(Kv+}GcAf4N z|71W!0(2p20h&gF4r-%kdwCBL3h(CMe6M09A3<;__PG$n>lIpmq-?+Lx zjkf%S&yRd4@fSW)+ZG{CD}v@6bL2wwd_cwe$mT~a?`!UtR z#=h0nCv@h)t|gYC>=(SFL_ag_{QIb_AMRF%GjYorRC=jrPIWmiDxusUiYJk3e!qG? zJb&z(L5T zMN?!2qbH5J=_iRCWQf;~*B**Us@2;5vpbIy z8Q_unS5JL0rtV)}yNfYeW!jzM7%k)}3ETqA@>J(t?>nTh=I};RgU^Frkx>`tKniN# zqUqX^I%mIJ!rZ|R{Y{+Gh1?1K_9Ve@J7wn^)dg-TciY-`?r0=zBJLBZ`gTOea;jjN z2Rf!+4STwc(V0M>qbNpSRl(mTeVi=c7%AErU%EZxU7{TCaCKb_t#Tt<$`aZCvE7l) z;Dw@0<@^e`xbHW228$9&SpF$fisA$$3TvD67eJ92^&^MIui}{8QZPxOYRSd5{cM*T zEa1=?y*wEGLZ91_)pJiemlaGtRIG$EDe56Ht)m-3=3c8;3mbBHh;o%VEDzVMWjL~u zG7+1Vgix78Nmj#sbp|Y?XSDcR?JUYXbeUh;-+klx&$o{ix{ySL}~zh^gK*okO|KSvie;hKr*t?0+ zO&;iZ&&Z08DF{u=X}^%Y{3RF5jcuy8-Qu6bm9XBmBUbJYn_p}5e2swVh<2zR?1`zJ zVc@hM?P)o{xA*+lW}Z41LLuFAKW!ZU(q#+k(G;80gwayaV*@QAw@&mt3(o_$ zmioler4EHV2JfLJ!px`;8k4$eH^7)~%_Cne;Zn?hZ62P5qp5}N|E|dKo@amOT!=pXce@_v(=LR?h5`S7?rH?q{^@;4UJ0a0&hhPe&#Tb^EkBao zmqU|R{2$=A3iDppyUXtJ;oI}40@CCE5v~syTUd{Lkv?5KBUKyggtR7}%tC8xy10ONecs+<#+Y224bIj*3LermwAOB}9{$CR^ zwpsO!s+iSMYYK?y$kHodH4_FTd2A0wx6!#e6W|xAE=jRw3lOD4SEv@(1*T~lEuDRu zac=o17sZXfap}oY>B_%>HAntDh`#PgNDD5=5L2l&Z(~J8>2r@-Jgsx#9b0(Uu?PH) z`o2vQ8fJQ!NysqKneVaZV&iu_`o95F&^~XQbXte!#YK%wW!I!xA_sz3le+*Jd%-V ziMcv0dBx}TTp1f(JH0#(}rJ6+T&jv5_XPNtua1^gQUy6&xo?i;-~wHo!J&g!TQio z;%~_fhJ#YQ<=obQ-OSeg7e*Fq2O5irt>Uvi`*e$wwTe5;1h_gLfpwgxAv&p6pi^mP zTQ#_Khcs&qa3dLTXn54pelhse`5;z&SiJ!#rw3T$AsU|p^oKUOy)?b2Y37(RGEEpn z)u12UDy#yjS!HK|LmgNH0&Nw!dtU|ja4ZH20=Lie6d9k_05@IYsm?EW%r$0Zdq434 zMStRhYU*_h+>k%KMnLA-3?Q&p6}lH)amBVn*8yRV8gDy2dsaFD94<+x&Synp-$|<& z#Ps2+^%*oxOT3cdWq&??&$xH~HwLk5bt0QUn?+TR02dLEBs8G!V8rN3*FEYzAZO7C zOuo2Su+*!Yl(XzPb+Q>fK^=~O&Z;k5D+H0V_6A!iqJmdWrVbE$;T>X0NDM)pC%+@~w;Jzgp! z<(-~9jPO53@t)ZB?|VtByZNi1Gsr0u9mtfU2u+pP2(v|ty{*!yQ*n9{{hOEA_R%6+ zqL*gAO#;YbHSxe898J{uJXWS{`VP#X?e2^Gkg;c;;iKqq_XRs089$&woFUAnt*tYa zxqURh^5te0)tD=Y4Nz&SJ6vFh}3bePs4{fxVZ-iZe=_78bLFep!2QCMFT(-P?6IVovK&6ge z&Ak{BCP({J=(_pFHk6`QE-gi77*@@ih z^bosjx?3*DEv1>(-rQPwDmsC@`9@lyE$ePI$sS)fC`CJFjaxTM(y^}f1%v%8FZ*`) zI$cK=)zv^oUCWY#5Z%<6;toaeG)IbZP7T^aaG&X8(;#>IyVW1Irs|S!->#hjz&+K~ zW0`kVsQN?LeKm&O4r_TubrthGTKux&fypyK`t1n)V8?6=XKiDuPPFl?!c5N*kVmhO zi0iE3EqW6y5GyqYOeayFlPZtE+N~edjPu`iuR!vf3%dAGoyT%Ffo=Wl8xsMpV1y(D zW*<`vMSpUZtqU-5L1{V9`joLK01Cxh;+KX9HK-Zb6`kol(49S z=BlRIxOVfJ`A@qRu9S*ReuUI&(4a{?8bumMdg&G&%<;N8r)9O(X(|0&+lr+yusw+8 zgwToA2?L$Iy%%DJrtWV%Fp)*E3ZijU7?ZbGkXjND2gzo zC)87n7+EIl*K*9cs&H)#hgw6Jn?lTQ;scSO5Y6eC?ou=mm|tDPli+i)0-Z>$AFiVF zr0t1_bH(1rFXomfggRX-^<C@dvo9QCu`%es@{Rf=d=i)f|<|AeBVsj@RR0C9rv zo~Y3j{l?kxphK*&2u1xI-rl1n3u?h z%XRmJ!M$m08yc?ua(J$&`+0*lUqd}taN}ovr?gJ%kfJNc(%CTDwYE~!`T@nyYOlpC zQQ-aRk2D$V#(6)^!t)o_2Zje2dFC}-9+{ZqY>8!gsY>~|WStB(GyIoD&dg})cwAtX zQ;GBL4f^x7*LsE88Y^W={aZFI%9Khj?ZtRv>e>VI5n|8;YAN&!(?Q0yS{qH>79b1W z2hQOy(7LstM@aI)&tmOu`q;xCa@!PCRC1%s=j!CtQo=1TlC+XV&ikmSFnbi|egyXZtHDy?BJ=e$0|-o7xeBQ_*K zg{VOQ#D_D}x>eQLbUQzF1P!~3Ow)19zIItMm&lI-E^uJ_Lxd+51qUK~^J)-4(b7rN z*CSe6_R+RnnF^f*4?=8J%t(TQg*-OrGfX6SkTCrNwPG0)S?5g>+cI`Iaa;}P3-+Az zn`Uen?T8opaqb=}!JtebL1E5a7dBzDZ-J5(A35mH9P_q0gB;#y+sZ+yht8i3t=J(2m_6*1DqO7qMgibk~}0`aXu5-wTf^r z4KrSRL?2!nIMH?+FCm6k_d9 z42(SznzI^sV3Nl>0ULBkXIh+;&hn!C? z$4q^b`-tQ3%&R+YKdRmv-C@6zaFYrL*2Ksz`45dZMCg)GAz4oR?SaBip4As>+i$Tw znT{*w8;&xaw@|$CGA@(KvD(_Tz%l1pl!HN@>yFjA320PtbpVAD2UOR5pL;fx*CdhA zH3LMD0vObi*~lgm-;GTBP&I`W2egR3JK zM)MAJs(4_{@)jO9Rx6%Cz8f)GmuT%aTFj0VydW=c@- z5CyOfpgtvgYTCAJAlrjGH@PLG+36gw^zLb^qHPbR(R(pI<0oOd*&cN=RSu1#USSmPUa4wo zEsu|}^>@@@)viRyV*sYFp(HTU34{`Q zpJ7C4i9-p!NPvVSB$1L}2xUYB3B80~M0#kV6am2#pDE8g^?cv&J?~#9|LnEa-m7FM zYp=cT`@XIVuR6ClS^v{RK$fkZF>X~g;Zapq{Ostp5w6IHndk~8cv%(_srYC#=DC66 z8=k8_{PlI}e={DTIHGm|F5>F^th!fEoD*ej@La54rnn)`9dmP+Bt9m> zt*({)tkp+F@vqIHS(Nq8;G>~qm*Et)e|4Jf{d5g0%j5SK1xk#wL|_htTB56hs5FJioUX1hiK%xGq01mWMJ4Ws}98}@MdfAs^#(- zh!e4o23Eb84bbxq1ixRF;By8CrV9tpMa~!xicfFCK>H@yaP+ny{2GdE^BJJbRm;iL z=_ITKmSLt)b5%@P5~*vQzKHmsCYMCNmQ`kElT4HhsO1GSxH6}o9Rp3J)@wPkCS&+9 z0st0ZHQL8mh0wseRzUD@lV;AG+9c{t9(%!~?{i*-X)^&}?dOSrk8Tx?mMjqg*yV>7 zj1E7v`b6?6V;8pa4tzq-rLNh~KW2?=Y>{1_BC2{C#1+4T)n917vqdW1!7iL4S4ZxO zRNt4-18cYKc=%*@^9racft-jjnmV2V#rxMY zT4iC-w$!J3{RI>tI>YB0>64JU`UI1P(wD6*VDyfQpsIgztFI1N%2d4 zxfGadFcu}#J&-YAs*!tvbyBwH_R-8qv^sQ7bL}U;-t^FRglA3f1V?JQFG*UWs!Eej zlRMwvGg!XHBU0$C6!+GeoM*ignI)3W6g5lL5kAe+fh?SFjxVfqN?pg4C(OH&qo&Me z%|L>j`qc|`dmmtB`@C0z-Z%IJu7f-J_Yb^w@2j-IZdm)*G($WHk#2+}ftxtLn6XC< zUZbQu&6xAKObZXz;!@R492xW7HQy14CNJ|8_eCg59D)?xGMr#_;${1nVS!9G{CF1Y zc;n9DL$S=8-oUZ?V~@D%s0s=7fFu1Gl^o50jt%9N71l633srqRDJNBqV7<&}H&?}Mi9hJy(4p2&j?;YdXZ6!?vT%!Z>Z9Gh1r31_{1eX`v%ok6| z&%xu<)$m@>7e+*^7szCW_h#RF$t+Xj@f-U7hIv?G^(J)0=LVtf(8MbMb3lrrnL1>2 z<-&Ssq_WY-To{?^&vR0Dqv3AYV_>3a|#$QdAhQC_p)br4^38x%^2xW zOx($v^SHRsWSWgh2lu%hMWm*`nX0EH>Putz$w_8mMJ!RG*FqCS>tm-vcMDW|w5{b)>A{~A?0Y0}DD{E)*DW8^Zattz75Ga&wzW>wE!XyYFfmW;P zs@^QT`X-!XC`+LBobx{)w&Y(Gnr@&m`+YW3MaN*raL9?iY*7H4eh!WJ+@R12Nj z)Hq(^;z4xryO~Vm3&o1cne64AcnNBb=ov^zLt!hrIR-=nj(?mu)vM=LMB3!gtMW{{ z#i!@SNi|y_?hX0S>seIAu_1Y`m&^BpbmP&vyf=wV?yRjBR&0KDvYOaUkOlW!d#d7W zCvhbsWYn^Kp`z5Qql^Zz6lb3VTC;Cw?RS+`N30PTL@C_X)}sAZx=_Ef2Zq{dibR`i zhApChCMwMgZ`r%#jYSdY&A{90X2RBAc#eA6e4UiVwK;+CD>^usfH z{}{kL>v3`@Aw5*@5PU1-yZJzqdjZb94hFNm!;|2N&U&~}K%s5EDDk@<7oosgb{pdL z&glklj~xBa#U?Jm&n~#mT|B7gGj*ZX)PkSyqP0Fubn?0Wfm-3(IYlY-&RbL&lDC-r z<|*SAZc#zhH3#|nQfN0B`U%7GH$-4ZT<}X_&WDwSwZOmV^ovq$rs5y_-JIHgZ2uAe zRr)DP?MOlT=Y{>ay|sx~5B|Ee|6iw%T>h#b=63sd^A5of9k=sSZ+`SbJ*X#l9To%5I-8Rgbzu9KoGS};1lRX+ zna6(3=HdDAX-#PNS3vi*qr1)Ii(EXn*~!%rsrWNi7Q5NuO)ca*i^rqb_>1ci~cnG z4>kW?=KmVSE>?*vuY%iRW(R*@MCh9;FJ>VErt8oihD z7oz5Fk0dgyj;QwyWCt{@KFmv_NQo6?d$}SUomDbw1kkh8)7T9!<^z~x*gRZF(Td4s zdswl*C@-H0u{YW1r8q=<;YoYXQ<3qP0sdq1AJNjn5X@!D8HxIss9L6zkc^=Fk zRSv$otkL1;h+qbHbOddmEa1JubM5zY-%Ku`lFN+>FAaE71Wx2!JaC2Q(2u{L`%i}> ze-+Tf+p8b>$|C9gS^6@k_`m?Ru0Uz^Ez6s&lyl`T(t3~2E>pkndtocH1rNZ#FtcDc z*1zG}1%rKXgD1p1=|V2K1p)jxj1(bPtk|l%11Xk{Rv@8;)y{r@$e-t%EB|RI|5EgP+~1Yl-(LFjVRiLSujx1Mw`eGc`kQ{-1{f;(&E}cM+U3f^ z&0<^%T7m2M!9Rv{>C(^VQuL0y=nFQ2-_7V8vHU-M#Qp!)i+I}Gv~WcU`rM8NP3|F| z3J|&)uTnd@2#q`I+)|MkacY!n6Nw-F@7Se3?*7+feBI8lT>Ymw^FM+U518C8R{Zrz z_s7dUnQ!mS|8ea%km+6BQUCEmcDlikyHoU}-sq!_4d-h)iLz`F!cc(oASmE+!KZ@efSHQt%GZ){fQPCGA7qHdEk*77 zGwG!L6jDM*&MtAzp$jAugS#4*{iKiK67&=qRUL~2xLpQ=PiaL`AcaDc;0BTX-mAz9?21mM8t{dP#iTgmm?Jk?y{FG&+gAB2;$*N9LO>oTDYr^*V zR|h6Zl<3{k=QqEe`I?pIG>KWoxPC}LK0ePj{2?h7-PAg|B`Wt5Yg9^r1t{Eb7P|($ zH@LKKxgO}LARp>QMl z1(q6ER%WI-1pR#1L%7@2f)M7u%cUcroh}b}ykY8Xa4tU=MIEsY^i7uzei)iKOa)Ui zm$b8#N|sP{3Z9-zOIzt$kq0m=Q0%hV=~qmgd6|I7iOQMnW4Ay90fjGBH;=_Piw9_% z6zSq5t1cj*>N@`N%iXrHuVIfV%K+U0qUaJ9>&@v{{YDPXCNl>Civ+jv9il&sXvqYS zx?^;>ga8`nI^BwX8ord4+pZIDb8W;ZOu{*w1KO4e1W=4RW}M)4;z2CktK6+8rP!gr z&E~cf@Y`oEB+7{l`jE1*MR!|(qGDyEk%X!!Wl7XWZmS(~`0<#jHc)d6tf`V|k`M!I za0S%xDxxp0@@q?Nv@#<0^cJ#4vC;#OUhLAtD^Fd$`*!fYtFG7DbXt$YIqq)5YwDRf z66y9_nwhMu-ILFIj0hE?+7`$S=bI$d-o+c~#&`I?%?g;!yVa85jCMeHE{kFKNI{pd z41b!3^y_K^!BqN3xdsZ02zZFhu;=~gOYgT)mmw682j|4Xe4j7j&RbHOiX4OnAeLpG za~C>Xw40IW1$X!Uxvx@ti!=ZGIIAOBBJbQHV)>*b&eGja(E>r&VC<170_J!vYJPrf= z`01n#`U2nmfu`9qF@!YTqJW=|(Tw^IHy=9a=3hGtAKYf$AB~@9^O|jY@vE*tS4=N7 z8Y!V*#@bfo<&2EZp}Lg^i_!oI1g=%aNZuPV1NQANiDR;i7ErKn3;mP)P8fsFA~ci( zd!+pmu`8>tK?eM-{QX&aD2Iko zxW8NxZ0wzj*vSQsw;*P_;`~n331B2MevBYNXZMI*iGjc){iIQ-q7~m>hT3|TJ7vYs z762b|OcJ`(!79utNdzI6iA(Q|xlo&s6MjX`L4ZY5fLO-D6ok9;(N*?+FT(eLM($NL zp7_FZOm1FExbTGeaCP=vpr{0N@vfb;JHY;s$^vHytmS@VdAQLU3Sb_sg)`Y|=b4u( zB03)!x)Tc!H`+#Y?1DUG;j3^jlSFE4;&erS`Wb)MOk-{P6QzPZkGBK?K*glap+zs= zmMJ*C!RVdqarX%e)4S<$GkH^tbPBBTo?b^+h0r6o)+YP zp8vXc99gNdvDH3=p(T6X6ZEzNkh)@X;l5|?>1}uAx8JUlgOW@0lDn|3*-PlfC==FH zP--!mE&&_QHc$>A;kCs}YjBXhYiuQzW6p&!VT?ewPY!ZMcTzRj$F{NqU({E5I`5Fx zG%CZUXRXY$yQFucHISM+G&7 z!DqxJs>SP8y&_I_AQchN7kYB{lDJ;7SCJp2lO^QY0uYi7)x$xTxYn>d*Y>1YI;jz} zIV9LO2NRCnFi`^^Oem`E^fWc5CzY?e^uU!W^erC|(27?Y`BPk@Q0-^VMUgIzz2fT~Bk-;3qwM zIN};;aMVUdJp&034Tb2LbPLbANN&O1mK~NYhL|9I)4?~d$7IL*`v*egIgw}2o(2^L z7(>D5SG+Sl7?n;VCYbqYxH6x!+MN2TpjMKsI1_K{usTzY^jyNC+1BQok%;TndmwZAQ5@0$9_7h0Rgks39m>r`mo3j?-d-^Tn;~#VlH*c$rqOCJh+kNedVjeRoan6s4vai!`eSCc&TC|+L6$%d z*P^{S#hY6EG&QfSMqvc`Y)SuGl*)iuIwR5t&d9iK= zH}*wR_*3uG@L2B4iqX2wcvc!ccfrImE;I4dPMMsPs>U(VfZ>Y*eL8q7@vo$I|ZZzoeH{Cn7tUp0n>UygNG$_7W{;+Y}{YQjMfB+ z%GsN#+RSMtsW|(^PMKb77nReRny2H7dop@!bo}U&d>Ij%sSEfboXrXIPOH7*wDYwc zv%?9GVC@t6KaxF`@7V_sjT^X4x#X7+hP*Xp z?qVsbOEkV?GvE)uXqA59d3wD*o4O!%eoU#Y)%a)^wRF-lSF!zJ$pFMalg}$aBh<Axl`n;9*VnwGdo(j2=q{&5`6n+Uu%0bWxOOa z?S?WWT3wYEZV>AyBVXcFIX|)?jQ02IU5z7gvZyNp$HoH%n(SOWwB}OKSxN&vnMDrW z^1B9O4+rGx;p*hn^-~7Ho1?U2DqJ2L(WRwzB5r9to80eDW$#gol}0y*D{{sv7)-mf zV#!Pc^C9sU+AosHpSYn~&qlG~n)cuL7y@as+Igv<1?!xTpNA217p9sYzhjTdpY60$ zk|M!@Av%s5ACh#^f_~p^|CsW0!RYMrc?%zx|kyS?oO^*L$cng*}(#uJ& zEgDGg1^fd;4SM8H=i8GS`!mtM>>QNpSIBi9{r;j!*`H(ir|4_QhoNC!GJg&8+oBD%E(wGZ-_2#eVk#d*NbusS)gjcmXf=~6>_bd$4 zs7C(CMcj`y?3QzT7!e>8)ozr0zl5vdgc^IvckIJ?5nr5#+*%j;Y%`7Pfk@Uo_QdmZ zOydt;sd|OI@xygysprgl7lz?dDk1X=wr3}U5MpL(pA1%>^eXQ}hBjLk-PZNsB?*0e z3DFi=mnET^i=2z*%@MX?ud)UfgqBa^(-vHlCH<1Cbs*}zwzM29b_FzEzpoF|D}pN@ zu0y-F3d#P`fJz!%xd!z%!XN4YtNL%tX0$A57T;e_Z5$Ye4sGTJzz$vq0J6`W4BYO~ z0U{+&lER9=@YJMfwm%Nd6_LT_xpv;V5GSq^Pn^-?TF4>VhO;;F9_Ia&I?!ubCaIsFi!9sle|_;Km}#n_H%K zEV|BMp$9+Ofk+W1HEXX~&t*Du{3ZMyO_F+#+M=`j`FiR7n5B|uK#<4iI(%teY|A-Z z=yT(;EDC+DZ*JVI6Sqru zy|9L*=0Brv#{)<1vh8G(O)Ww*-9i@Vrl+-Z8fTGneO+1ZCb?qj`|X#E z$mk&=(;%M6U3wZxec7XRP{}w$UBWX46rt5l33!a>M&$PZU8dh<-ZdDzbC@14XyD=r ziDl+{x8Cb{0JH{>$%g$-(m*N4_`!p0({8lGJ!)$ zH)9zri5_Q02Sh|1uQxxtz>-SC>Y7@Z$0ko|KGNw#X1>cFknZiXZBoU=>lor8{T#TZ zhg#xBW56!MzgqAM&qHR5HI8(#T^*Uk7y-oPo0l+i;`;Gpw;tEpt@@4zXhTgL4BVOr;Ima<5ME{f?&Z*8_4be6jk2~Zz z#no)mCO0XvV?ZtNi=!%fw~1vOny57Ip7uH8iw=F3bVKjzx+6Bps>;922s6MF-8r&c z_P(MxV}63tOovUby0y@=s1KKx0Jvav{+llrJHn5c&A=wm@pxV_cIkHmE}0f>bwe)JY;%|9NHsbuoq$+@+5P~YBpwNI8x*>z%@{_ z`Hr@vljCtcQeQ%Qq43mYtiqmNLE{>i z?vqK`I`s8%Y_(m!dELjvyG-}4bYlBGkpVN1{O%b&Qs8FJ25>Yx<>>fU!nfm<-9bVa zj&yCo@|+E7*fF=fWR{CBrL0h|cKiO^ksR!sD805bdcR)QEHUh18_8c6TW?n9@w!7@ z+EK^AD(eM`*{f*<_Xe3f;!JdTzAmN-RRW#ki8iWS!vx4N9eFL=L{C=hf(GDX9n=zH zfV16RIwSk~=o-`+3=aL+Q|zsu10Gn2F!rC_=~c^I;5}h{SlDvsc<6mTUZ9YpS7_2r zu(e~K%s^fS*&~fjC$(+fD?`QTtf=|QmSnsYsIzkre+jd-L}>fXtn-NfXD90)Pm}*4 zx!*U(BH1d72UT1{Tg(Y*FE4EcLkL>8ZeZLNH_}SMw*Jz0bXLwv`eZDQz!pCh$Xlo1 zgNRuGO5!Hu17WHoypcBN=tvkCM37-^QZW&hZQRYvR!%){)kjJCt|MgH`_0{v^Leef zT?e_=J45~xp1^H4aGmq{2~WUh)b^1!?8tJAeNz1ulh^q2g}T9n4_&`E%kP~DtQt99 zFBW;{<8}J(!>>)9N-%ZqjVKeCdA3yTO&9%K37Akw@TT)e>VE!t)(o-PK;?>Y2>qXQ zry`@BZoJuOpZ4?by^_LNGIfLCZJ`fm{^<96Pn$5=?B&>cnIpgTT41@IY8&yK^g(s{ z@*n;FIQW_+{J!-EyeG13amW~|{HDGoq<|=a+BwkH^rl(6rpX2k1nuylw5_oVN;Bnv z>QKT=*zA2wx#I;8ay|HzZBrVKwh$IE4v4dtfv8qCJ32FsAhKF+?q{U!>b3VfQvSYc zQh$9qq5ic;@e6Cpr#YPTQ_A4=e7B;v7uMW#Rh{!T}IdfdS?H| zx%2|O{?KLDJKvJ)ru6-b5yt11(T|DB@kKDh}pJ5mce@12;d(Lz8k1!1L z@BfI*_^xBGJmU|>#jL(d&+%I-+o)u5t}aF>&xt?&kiQ+pNRfFHwV(R>ulI$ft%6&* zE+*no%1F-(z z{PIWhDMcF z@5euPSw;9iw_fIZWTmjlr_WCR=(5Pe6Y}yvP^Z92^owgj3@enr5 z(XXJz>~1}*yTP~Pml7`>szO{skx4MlfHx}Ptvz_s{%VL50#(wl%*%&-sCKj%PZg(f z{FNB)1OD52`4wrh=>zN~J!3y70CO{B!vR07dL$RS=kMiW5n`ely>%}o?F()NdJDA>QXM0_Z9KhZOFpzz0|-sa&4{mTt(!YWj@=m(1}i$ zy%f?bKp7e;;%nF1CLOhMV9nnI|BC z?co3)wE)w-dyvM+uIUf>>z2^E`%H!NyXWaT_Iy4c9Y$D6C*&N*2u?*90p|`Kh_XOk zj>V)#sDwr2hw~daM9>>BrgGVD?J+AmZ{ombsAi){PP4wdy)8b{rt)bGGUTw1@&md} zi1iI`Bia%d_Y;IU`9eL{`S4r{YdIP&e>&_~ z#eHL<8UO+?ahn*L61`tezuk3W-}WufKTnST5?^DU4JmvIskRF~bG*12!bo?u@!Xt) z0p1z4p)g`j5i>~aYR4_;%6XLIP|y<_ue1fkz+s@J?i_-)khl`aM+h594<5V74cRdX#y}M3ku_N$72aGYtLw${Ss>ufAfI2D+vkseS0*@bZ=vUDB37iczbIM)lnSsJ$9gpOx1J(w zNnK$XioQvr5fivR+OIMMo5l&cWdY8U2mwP+I%k8`)U1gp7nOjVZU~SI3%c}EokHmj z%!;xU5&2?QrVs$~4(*qgD2yLa7MlJlDR9lJ71K(4T{n^Gu_OSjDUw_r8QCy0 zG`Ib_IoeGm63+d$x?JP<@%WaH?RNyd_B_Sxcf*m&Av;l!b~09gcExwx&-)aDH43#bf`k zVNZVzR@2yiqs?1yI}-I9=Jk9n|Hd(V)VCIqgrdnKe~DwenT`nM`KMsWe_E*iHTx$Y zzkKHJd>Y57e%f{|20W*7e*D+Ve=j+{Uo>U>X@a-LjboxTfm#!KU7J%M_XE$}xBs!q zKc6@)KJRKz{M){F{jJ^Oh_TAwDDn++m9fzVtg5A^#`$+M^ExK9tCek*irPZ6sNp$N zw<<#zxT@_niu{PR3ep*y1WB8+w=BB@qdfdJzg5|0ml%_uI&(RimiJ(5L|M=l+Jr-L zhf?BAypcG5QitkwLjR-5m|9@qut&d`RP`9}-W{jtMLEsE-a`ItnbzmiEN87lrQZ5t zPN=uDg(<%ln;6a&37#LY9c(dMKYH;wb?9B=M2?)9TtI!#5c$*#P$3ui=GiNik=A}|Yd&(E@4h%0*2_-t<%$g6%N#0P29xMWYbv@SawXkfy zSnF(j7{)ih_{_LCuA`&JYvQH+YpoLb>rc(_ioASmXBNUyDzDHIH>Izq^&O~B?_3Jg zMS|f&g<>CS5oXkvm1j33YKXaXFJxINfsdehmC#-#16}rkaq{feB*{UXh#MqEnB=Z+ z%42(eK7E>F(bO)vN{D=lc)r#4!5gcYAvm)*6m?+(@fguw-77i4bnT30ve4R+*sZ7~ zf>Vm&;Ovu!{X`w0G3SjLX~yV!qhnTZR$48|2-#qAtbh=AE!xuH_1jr>eu1XPj+tX_ zE?wycVVG*jj;&T~3xUll$yJ+ZM#>7r|GFLYSuB073a`)+#B!21WI zVF#k)4R+F)58pIDRH;rmN+i8I9}&n}Otz&cnLZ;R3$31lK@hn^U&-(7q61sc#P^I< zY4ubgAl~Jp*aFgA$bt>p>y<(V7-W)HX&!Qukb*(RY`ET8;oUE5wtyE}zrE`s{~b!k zy>``l;pooD3}sB?4#f75!qT#5?VAL2{f}eOSU`qYmH=&yt9PwVN(I>+M;_^EEv21$ zM-dM#zn=Fg&;s2tt6H1m%Cg;7VQj+U)=i-;_diRHYXb65_XPi9?0d2@0y<&g*(}v@ z8#u}X9{=ar_3!ilBeKZ@WYPwtsEi&Tr?R3b>BrO>mOI>RK)o9^+zDObwOOX|S5SRV z@DFYpcI7EI1hXQeD_VnJ^ufcj{GM71WB`p>`{|M)YLf3ySSpP4>>LwZuZDlS34|ul ze`y&L1Bco#<&MA2-ADfzi_+nKOpZS3y84g&N(WG^r+v$Ee`A&>TW>I)e`S_8jN`Hm z;N7FXGF}4Vo#gf>dwLwGyWKAzYPs~5T{Z6YgarGqg!8AeMk2&nnd=+BD7fXmrW$?f zC4|j;{CWDANs2n>Z=5hQ0=6=*?n1SdY4^9uROGe9z`Zqy4g z&5Y!{e%I_z((D9>Qv}5P=8HvWLNn@wr{?t9ncgK*I)`+F&PZswt>l>^aQxG2{hVeG?n#F5Z z-=te0e`XasRaV;_8D5$vHus6qdHI4!=@&b9ZcM~Xq>fuJMLr=ZYZPtmV!*5di2G8) zycv-yOxcp%&%ytAk~Ye4;{Y4aP{|XCy}gsSCoGdDv`%MjmM?Gf?hkd(hbVPheo9vJ ze@&Z@OYHkTg^JlWx2I(vsSKQG7u)8U@vo^_P#;gsI#H|^=7ltxu-%L$q39))vU^zW>xKJGjHruzw~rasB99cw`ZD^b ziM00m6DLVxiW43mUaX2f6ZqAK*eW2Ul_Kdh!r4M`8XB;063pRKQ~HM-tbRmo@`+7M|JNDef~ZyBFbg_Z6UDL zQD^u)W#iM$`@H902f(s;U0$*|%KX1S-t-aCN**b-MJ!IE5Jn0IJvj7V6BRkoT8lWtt2*c^WYm$M1goBy~~TLOjC2)F2V011W=5lJbPqInpYz zIYx9mj$IqnUvZNejsK|Md)b@$RVnkMK37oVw`5(N#rtY5&?m~d^1@V7dDD;}gDW&H zP=mYM#=WUWzQ26oL*9FUk_`u41(JzNt)PR_?lWFDby%UncIp%5wq-AL8rSVDOm_Tu z-I_vj?iBbl&8v9$mGl((^qTNoNe(z;^ynyk{zv4z3oek2>%?0)O zu`R+8u=z&LqTLsspN_v?xox6uL?6jpE-4s<$OmDkitE~|GB_Rmca zLMxi*@4v@JtzO)@uB@2RxZysJz!PkNgv&QvH*JJ85=JSv zQ)|_vlNR^Dwmxae(OG#R7v>(8#(@Wc?{j@43_hLaD_-@dpRzTxweIi>PReaXG9Xdk zgb(myY9-p;`s&`a={VdDh>vh3le=$hq@`Vxs_lr-?B>SsB`}+;$TsD8kUM7x;j1E4 ztZ5dKsfl9*;{<|)0YT}LPwJ4F9z+_r$E?TnT8hqmcTZVKBg-*z1Kbq+Lu*Swe08)S zYxqt62>>rD?x_&~>kRho%I>|y1z-h=`h82mi1l_*ibd&hi4jq{n~(ebCW=e246%#W zm;D>zo`u;jEkxYvBG)!38;R9n8O6`6#;hd+r^5o15+`42frEq3Q9L}HhV3nSInsWC zj%hGF)=MX)SfFOQ^VRq*|A4v(TasiS|8m=REtUPm85QoIwQp7VW}OL5GJvVw`K0>` z&y3@Avnln-_k}OO4;FBv-m6Z23d+e&jy{PTFAYd^uz@CRzo2$N|9n1O*}=Qqp|+)u z0xh)Awm+VnQ)=>QedERrux_h)ww2v=6u3G4#5?hj+t|lX^}6d+s^I?CPDp21YJBv8jmknJ5O>X=fS>x>k zpM|QUf9k35C@>R1;%xvA!@o^qV52MULyFpsUJJ5b+g+bsH43S5)=XI#UA`RoQ)`;; zfO_qWrEK^cQ5X7%0SpHJtP>v=~UN@k$_=)>zqGk(8j+s-C-s4H}LQNQiQ->-a)Q~zXg zFKFhJE9x5UZW1X;n0bt$Sih~iS#XVUu z^dZOa4`RHFq5z4|U|ub%WcRXk|4`I(qYf#>v#qA4L4;yuciw^2Fmjf(1*NEx@zGPI zTEMfd3C9<8L^J>pe!a+!z)qLGMSVu^Dbjwb=?Gb_kX^|mLuEsgj*rgo9VEXap@?b2 zmBW@;uR$n6`PPOHVZ7No9r~t)ai^k1D5=fgM;e-t3bKYue5_hLMlJ50dQG}mjCHv^ zwH{zVwDKU>A6lc|clK%>oE|N~OJemaRp2?>YqdA{d9%>Vv5n1(FacsV`rp1M^z=WcI~KRb=Gh?Spp8TB8U=K$MS9|`}(Gh0hRp&l6x9cG}VmQ<*fOb7$q#g#E z2}Ev;6M6?~%{0D?|B(1_fXrWuH<#h%yG%972y?do!sEYj=0)?)4W&wRMs@H3YtLL3DvAN*PxMo+oU#Rf$x zoYiO=u?X&Arz#=zKnA6QKyJjKHj(-HXmOcZ8q{V@39nUnagU9-m!GkBoVYsczArkZ zm&d-c?>5nObw$vN!c3X>9D4UE+<>6=aaRp3sy)%l(?;RWw zRd;RxIQx*$Kk>?ouiCnQ$e3}@AH%mqyXrB=@@MZ^B(H+Q_L2D)6$_Tw{n}cU0i_-i zbI>K_H`5JMLS1C*rJYVweH;1?KtKTtqgm%;7MhPpTJvTM_aF?{93<4+9P`;dE|}*H z+t>VTny@4c%yDZ}UCh_BSbmklp? zjKL0{&*R7rHTAojhDcumoa&EJ!XO;# zFc&Mt1W|g*ehf2JQJjXx?(4PVb?Ba?g;4#Zo{wzs@dUB%o3U{@qLSv$LF?0%LOl}F z7lZRC!-4&k3!8O69?p6D_ov;zO?~rMv!B#J$DOah?Gdg&hzscco;FV{G1mN~kiB6n zc~tu^7QshMkFI|Xd;GV}|E2JtczQo|cjM#L#6JjRjKeZHn+6YfJg5J1**_&A8LU%7j?x4XY;#}p!yvn_`YMfF&cOZGmQ zoM?WXk`sEdzTqe+{rdC?kh=Or`p)3dq8W-c@(1clpvO?HPe?8spg9In|N7f z##IyS^0O}4bRs6%Y%w*jbNwHXry=t1B-G_;xb=+qT?20Ph9|;q&UEKU6Wu!ZRwi2H z38QnxTC&8i?g@&r`Gx0ETJ50pqCfC(`M2`$0;lVnq;6h*a>MHT?5aKWg|WQ6L+Kf% zKiSMX>vXgxsp9PfYWotpOspn!H{)WTy1|zhW4ts4+n=7;w=fuSCYQ~JE%Ddd?BHAg>uzdp+`SIZC9=aYY0GSYCrv_-Wkq=&cP)6FgEu!Lg>sA>?z``rugH;-mh z=zYVLGs+?sW)-^BVI3sEo6EOu@*yk9dQpgqV@eARqVe~;NAz?5v|F_Eu2k*ddnP*St1eVqQ~N^c9_%!YkX=zF$05bC3Pwq-6q5lQ#jRk= z2S%yq1v4BS=*dpui(SExGJV$xn-!GVg42_To}j zVR7{1+|>0lh3mGtY4Tj&R@P?#&IVQ2oRYl}CS5W4@*<8vDp^wMf)@F4Myfj0J4d)) zaEmCDnY+*@z>pqwN3IRg@gx$q=n{%$4VnpICSqWV?{^4V#!;bH(EEm6lq06i@Y|N1tdOOVm+OB@Bj`IQq^JN(mL6xikZ79S13TB#6A*)xM@=R_0y+XMlUekQ;WC z50VXqsgsH@17C@<4&NA41CGAZ^0j4jx3~pIIKI3nzOO-Ix{0+;+%~Pf>~A3A!hEo) z#Enw3`wLJ1=cn(C zf6>ST8XSG*O-utB1z;sxf2>%ghehpIO-J#;ajS-g#;0_cVSEx+v16Z_j0X$vKXgKD z+<+@<9epG+@LAGqWTE=vI`G5X?iJ_dyq_udZ}uhj#G8^C{8uuCr^m^l27H_`tx5I0 z^vPL!Qbwg)_9=L2e5XY&rt^$I;<2XDD-kc(VZAPJ*HACes?eiTI|98erhYM+FN9dmThXs$*yp1f&xX9F^W8gb)aAXhISK(jl}NM?|`m(3=4Q zNl1_qAXG()bRh}7NC~}27ZAOfGiT2Eo_oLdcc0&L|GfM`Hn8_%?e*-Pto^R{{d_(> zf}am?OX(1ZtSoV-PrS3mXQxC06WpOM6Ty;L%j`wQ#?QP#&aXS2EAnA~Ff&u9MSm@h z!e8&Y(7)<`>e13FYM2~k(!++s;7xSZOhpB^YJX3kf0L-F%Kv_^kwck8*{M3P5?DSi zQbuqRvfC;YrD(A8OMKbLzE^K^r&Zr|^@Fc(hV^sNW{gQ8|sm?3q)4E4D9WRgePKMq(Kbev-bA9x$ za*q#aAFPFBTMZ+#d-Si36TRo# z(Njk^SHg~T{`vaL|80{A?zC~yod?TSZq!|H-is;znL?jli?p?jkhLdIqbJlb;`+PU zKSkx5diUw+;}CmriERq|HpU)#_50sB#@Hjzk$zIS=sU-e$IAWR{(rY8KV&_n zW1g^$4&pAZe_C&L^kPF#nzPqph_JMop}-8u3@J!(99~bT>BSgG8Kpcc0)F9)vt#Lu zmwq^1VLeohpy()KOuR7K%;DT$qNUV>96ik9hV4#cjBa>9Ii*FlJ0t1uf{RZwp=Q?< zyrGN_!+Fn?^@Jzhk&({OVBMz;YRFPnsWsTImbQsl3EH#z(VDyt0}hWic07&R_hbXo zquYViuu{=|7oCg!C0Pt4%YF4uZ11;1!=jk%xhE5s>JnFU6k^5*It=}Jyi(ResnDS! z-&E^~l}C>Z(D_k`aXG3OSE~vE6GgtkB2xhWFB~9+uN69HZjB(1Ae*@8`Sb+jkz*~?K4bC8t&Eq{@35j2ZE&=7*@ zAq8E}u8yH{6-n|rvUMtF0#1RG8jQwlUsTzMU{qf_&J!^QgYI6X=AP6~Uf18AIm5C_ z8X6Wi@-0ZX?NWd-fL)Y$uGI@_Q+}bmZ}f@KB;Y-H2}5+1M2Hm%BuUgW52PgLz1f`2pSOR^^XI=*Dm#psz27v(mZ;2aogds%NuO%2uO zN*?ulksCC;lsyDk-}?Ed4vZyIIPP`=-vSOu^Ph$lrwS_ywJJ6zII`2!WV!hm&F-EN z|GHiiPw(=znsuZRvu2^(kPl5K(nJ1vjb1`_<)Z*7L2yNca7Fc3j+ClL-5<51GAAZv z;=lYle#`VYw!?sZ8>s((Ucgsk9d|`N_Z5ysfU2ik-D<~(g_J_4KBo_ZB&@$!r?14X z446>wK&s|^m6C(ju4SK3_Dnp!q`E6xWB~529?K;%5)`=Kj%^oEyw-fY*(KxA-xebH z4PAfut!EImjuQDUy+5bDEBCo;x^* zP4LVhkYiwN7oV2x6krI-uck+{^8&`OcN#e7CgW|vvRO>C`4Blk6Y?*H zz!xfsxAOUA~BX-;i&qm9MzLUUy+ZQSI^pAtDfmF%UjTJ^0z+B z>2{ZAQg2?l*=D>%#;cN4Rc-UNtE1Mkhvzb$)H7hUv^u*=?ZbfTQq?F*yEY8L$&IPb ztVJcCL#>mkwh~tp2`lAHO>_KEnRy*X#{657jA-x>>Z+t{uarU6;r5HK7Qn+XXOMp3 zDmo_!d2x1R&AnG`Ojzk8K9$5H0fiyf``hh|1Qg0N%nE`a!LjRJ zqy|jVYn7A`NhF~Go?pmNdU-W5|VKW%6`aCH@`l~R8D^jp?Pib}XTvMH@Xo9Tc z>u>IT{SQ%gIbQh2swthKD%pGEMah|>Rb5iFs%7(E5(_eefFx>cmZ$Au%F*dK`YtAP z0-|0e-Xo>p5yXTAVT+0Nxs}0 zyE+}LIpm8A%*p>f9|Lpu=9C+xE60O z!0WDdDfVh=`daN-kXHHY`lxdc0dz!6XfbnV6a!_{w>hemuJnEJdDOZq9_wwh4CQ%S zM0#hGV-Ri%Ye%t!YH6!MgM376oS2xo4v|*O_)JWqS{EQM__juRR<@o}`#cxk<84dc z0Ks@SIR~$o8^c6UW67(_Snzo?F);dyq-RT2#yM_{sL^nwOdy=LeXEZ*s=MW=@efFN z2wGij2vp+z`4#Il9yoDgxmb{TZJZI~4+r9k-ac^s;FGkoDqTk|ljQCjU5zNa8MDqt za-I$vC2rlBadJdvm&zZ)1k6F#*HK?i%_T7T(pGZ{ir8BJDxAG_|*c?PP$^xVhOb_HXI{#)PcK^_@wg0s?b1dX4h9GW?$T z-h`A{+1ur?BAp*Oi%a$_jFWA1N`JVYw<}Y$=M?9wMaI7uUGeg{-@c$F1&&S9W`i3* zafTpbx|T6cdJONj7gd<^BKDO&c?6~i!^r3eeIAQ`a&A|a@cKPtL3ZHrSNyTISx`Y( z!HdfVp#>9Pr@wP>qH4y9*OTK4$-h@TWNDeJsX&bdu1HnYN@~eCKBiZl^}~)`g0{h; zHuzoq=7J4_mCp!lCtq(UHwdPi%4KsiudVCNI*38Pu-X5Yxjm;efpB5#{!w%e*5=@Y z=@}Vi>1Ntp5*CT`(9(h~PJfzWe&EkZEStbvlF*xt(8*FHNuZG%i4S&yS7bAnt0D;W zY2&y`L$?0MH`0`v(@;Gwb=6Aha-wr_vz~Nw8Q=J4HZl%s?fA9;pYq1&$wz7*5m}BS z@^sFF?v}W>@i8}c_A8#fcNBS9fkdP%0bq@EXs2ig#POs#8`;%mj`0o!YoXclGH> znGkj^tb())!=x}}+#R2wlP|1nqDGu8%_np_eN*{^fOVT$o}UR~5=L+On1^*Jq!CG; zJJ*fyweX)~A0DQK;up*{YSpnbEy*s$kOGU;yeJTi*yYHlRRcD+Ck(7EJo&vds>J_d z(dCouoOje{aXsZgH=}p$BVC8F_*q3YLEXE9n3a$4F>CcsI=q)#4y9Qoh=$~7OT*{& ziyK(O(jmAdwumkYw^hedc87-!`RVf8Xm=!@G>a7=jpJtT@zg&Bf~v52wIDWjrtOU@j=cTBDTQ{ucQWailpQUP2)O2bd zel4JKrxbL2{aBu_6~ESPX1%Az?LDPw3RTIKEfbC{LE4iI0lqh#g%kI)2h+DWPW;!H z3I~Tk#ec;6N>S)bq-ZYe<$M#8OR_5ebrY zMPkQ`Iig!P++n1U{7m>V{!y=sBT8+v1uM4uvb#?+{^d@11a!E_cxFXEHw>WM_sk|y zPFpx_&KrgSN}K3PV#W$sul~{?|8-IUKaebKj68mKA*R1-iHfY+ZFYVr<{&ILdOG~n z=!^MsKW=NtcEH&DM0VEaz67JsS~$nMLTTxDJiIg7sh!OC2|OyF(;xisNs$Enl!y1# zG9*V`cSNjP1Dv1cp3nQUrD5J%HY7JGByj@qCL7$5lX8rtBnabc=z4(9j!s5}pT)0w z0<|ewy?~Ecj-NjMd#wMTNE&|fokR6LE-cu8`?r5HDPMg0_b9Maf_evlL}$6IUv_@_ z%+4Q(eFSlG0gd=<7Q}Z2WdW6oHeaQE5ED~v;NZA-`h3Pq0)jmoU;Kb`Xjr$sgcO4Y z{}$&qR`>ok;;}VB+6PU<6}!CwK{?lYl|5%P_xAC})adsS!GGLs@F_*4MRiX*ZJv;O zafGDZcZh4uahX-SlfeBU=d#EvbChBh{O%#g$$$4v1NIi92}AN55sS)JNUxtcJLtw> zrlB;mKI}F6lNx%wm!k9=7G^7wWhU$sh_Jb2rH!&oJG$fY68U(%cwP2RvhU;G7Hdfy z2E>V}=AFy94UfaFIRK}*yPlb4bQ_C-Dt>tN69>nE`p-6`RnH+QE()jUL%kkJ<#j*L znAwu7ndh1Zg@PZuHlb_DZXrVVjPlv69q^Z4k=Yz?~If9y`|)~ zdo-6cclG9=cAT`l4TL}aF?GF13m_sV#^!YUOU{DBuf3q=7?}Lv%VwQ1%$MC?gm4Da zzIAA5SB-BNPdyDTCJ&D|$~GvAZ*GMrYFQ$9eHl>K0}(e*<+Ei(01nS3aI5x@KPbyODv-wW>p{qC}?z&Q7wKp*>H2*TRod@9m|SlEQvz2_UHIK4%6p zAz|!D+!@pXg`n!AV*(#1)j%}Lqu_q(Hx_VW19*Rzz+ElFqC|{Y{`C=j|{L49RdVkmRVlpYc zm^R|Q&u^Y_#lUrt8>0?gW|~dABN2T#A*)nPwrBfV^|0%Z8^7gXSs~(fn zjQGw4bVr?9jf{zucYcU3*U|_;9svOovOmxi097{jkqJ89D2tZ&kPM>_r>v5#Y(%+V zFmk@proypp=WcNAEX6~HF z$iz(TL9qd0yt(H14w1$kdeZcx_oXl^(e1|{mui)rEp~i}nf*d@OGksOU=)J~yh!wi zEb*PD-Cdv#1Z1(yzHp!07WhfW2z-rGtlLxe24h*%@Ybu8e;QZ+PV+7J{qM}*TLycb z4u7ZlqWUR{jFOLYUUlxKy>TvoGAP~-3p zu89icG@aS=4PDiM9T}7UzwXt4eh;rP^OKrCx93Nzq7L`UHyCcvY*G7~Ek&^y6WO}v zy06dUmDr20=E@#7M%%vL{6(|8d&#aB%Ba;T6_AE?;&&7j7)P^_#SN&2P_AEpup8Xt zS+?QzuGB0}0$+FNof|{uXGAsdn&SdGbAQeto{v=*!xFt7*`b3buDKhfYKViZ*SZFV zXd@dy|K2n&jmHNXH3O5GC6(?BWfi+V@JWdY^$8ir03gu?(bT!jP8GYndRYe-__T6z z*7-EfTZ%_>-9{w4`>M>8OtI~Dwg1rX%n@O+*mCW;Ac1sL22IPj^07+0q@+^y;GPD! z76Ri`9(==BVr#A|lM{sNA4X~$- zGAYe!)jg7E@PKdvxzinKe>b}e^7f_WZ!`B&bWHMS59?j|mJ2f^hDU29u$kv169|=L zcNnHRZuihMyM}DTW{hqS*k;4fP)Tf)kKB8)Ub+{ld~J*#lq6_u+Q~setyzKd6|gqF zun)@_Ga{A~T;M=yrAeD=#jD7T4HF8!SfK;h24=SxfE3|r!cnxFZCu(?vH z5IR#gdKRYE^0GnPBqfKJezk2V01~+Rla>Kk@E)h!Z3m=mLfPFxD%aVU*=JumosdR} zNLx4WYd2U!SQk_FA1sS5%o>NUe?9v8JnzJ5;;d*hd#S=+HSq*j(?Y$OKa61mqL9f?h&xWWP-qlBb_sB)0W-B>Rl!0v1Nlig2bfe56P z{w8MkIr`Z%_H#gDl&?b<7dORR8WKgBxnQ)X$+srF8!=iWY>Jcr^pt|z`lne7r{PC!B1N^4eDaR!d z?~J+hmqY@angHnqD7A}BnfsRVF9-_ncp0H}F4@+lWQ^9&Q7=2%GCn0rv%qqtA+X}5 zebm(sTtP*@Bp=~N2gd?yu~iQ!_=lT#g-hJeNELR0#){IJ$V{MvsuAHp)Zm^8lX$pdY`$y|qNvXG8a11W9$&$7-7s6!T9N9i2VN?Re37)gE4Itn zFLAgw_9Bf!`gqqO?F&DYUl>X2wTWY~ZBb&OC#X8OU20>t#FF&KN9Tb=IndfQSx$dU zZ;QMJjCJ=Xu3HoLnO==@@B81zG+;(@Lvx$~DDKEK4RMG$le~}|6m)mn^XT-!mB5e0I&sA1E04cqzi?XQL!~^Kzy;n41&b*y~Xd z9vfd&IhQ^cy`u-D0T-7@OSxd^1<~-O2*r+MQ!(}9F$A*J0TIySQlgCZkK$(vBVAqG z$|$V%>!qee4-V ze07-H@V*+_f!^I($ki*o*?+%$C?Ofxq+lbpmUz^XTdv_+Yc(diA9>%&y)-#kx@j(V z)Vw6GliMysI%TgTWClEP^I!Wa!#e&dML@vImH$tBRA0pYpW682HZ=5K|30|ir@G2u z<7()K2|MxTm2Y~-g~-lJt+4rMwm!}-AZbshhsPkusc}<$HI}*ccTb;@wzUT)M`Y%= zLOVXMn<~6Ffl5xUZ#@uW(>Jsa4}=F$e>UU>X>At!daF+|(4ty9YV`d1^r;qZ9lZK1 z0jsL6)vGcP&(_JyLPETierX1RKn4?_TIFLzo$V(5t4S9TMg@comuauUKj0VAX932f^FbAqN_-(klm6etq7#vcm`T{iMoK)qMP-kNh zCuGN2l;1DH98Ix=$SZ}U5I42g)cpVO4t|N=iWhJnATAu@yIWl%Fo9UT7MJtJh4V=y zF8bK&D+VSuBZMI^cYAT!dDxrzWuEQ_d$thCV@WFg2|{**`$6+GNA*O~IXkDE$eK`O zfR`)EVvv|>tn)b6D1n?HKN#ELT`!xHFi3?*F^e#c4dn%1fA|%yxfugwzbZO5y@wP| z1r^0wx4mUoo?1&=WmRijGwZlwR3f?VV(QVQA!uXHdQ~*^$CVWmS_0?Mr6;Q!c0%6-AfdV|D3kgU zn^8uyA5ePzP?M;+v@msjKO}?#qccFt#F zWhY)&O_IGH>uK;`%M5Z0O3lBWa~V6^bh2>!F~ou}8%z$a=w5sIYpmRo;N*q}7|4y9 zNHw3z3&+Y@30o8_-1uTUPvf~ULt`fcJW^+62IEKgRi6miLyJjuXr&?lu-j9=Iq=LZ zn^)YlXz628HQv;WrGO~4p>Ta2BA|O@z$j}MC!TvUw62!frKR18F$=j^-YN{ds@W3V zU{ZGv^4iN5i9jq*TOjS@EV5(qx(ss)k3_QVfyvOO&MDl;HIpipF)M!fAfA&@&Q9rg z`?3vEYNT5*Kc2|#e|Qv{NuUjl(jx5>$Zi)@)zCz;5xKx<9OfS|S<(ctVi#C3*|?Kf zincO_NOb1Qe)6L6Ft+4#`yu*gs#Rc@M%oK)9KBI@!0_|#?X)f6%NX2YRYAAr&PzRc zvt?d{X_fk27Tz3<^V3HYeQ}QWe^7L7f=o(Xc#78=K}ypFTPbWn{#PMWOHQR=tqVHh z$`InM*qS1zUfq5bPmKE};Ie6b&(4}fdau_QZjcItYpagAhG^ODrii3ECy4nwFTac; zF(;KuREbvEgF(*d3&kV{_rd16+l0F5$>^y^;3Qp(9_2f+nq!HtN9;}j#E_Z{l#T_r3q#VZvh<%mxl-hq<#e1oc*%zH?N66(=f?qwqrLy;}1Hyh?vG%l#Um zM3GO}%=8iXULqVdH!^Qc8-|)`Z8`JYgDU&?#-wR~Ie(d!KusMJEax6gcNxVf`j-*e zci|TB%ge|v4-@^tGR#(`y*-<8lWVMXGU=TN0Z0Vs#14P{qNjhpYW%ju@-P$S5?KyV zivr-Jc{+n(ql@ltjwpmfHg5w#*gIV5)(h1D-ivp%b@)w6kWV#@KD^YN?zd=MO3yJQ z7QZAV##(g>7}!>(WcpN}5rZJ22w6ebG@)i2>vuBZ(Y46AvIVq+Qyb6C4%5sIACql= z-n-G#bpB69>`#IjF@S8k_hGEnrevXITL>K<1b-h$hM&)H=^wpvt9KC_(P~_}j(AwR#ZG4Wp4|(AV48 zdnH7uqOw`t*``IRAI~R*ET>!_1O_usuuMx&Z!=wIU8lt1l*U)(A=wy#Kf%H%U}W4@!q+@4zNPa;k|x&e+9l9Mn@XceQPVO+FlCN1 zmxxKxov(P8Qkre%ASPF`yo{J+R4$~LKHZAJUK5xzukNxa2NA&6qE=-&dC)MI+nGc8 zuZ?=-Dfx}#YNs)_ZRtJGSd$~8P-WuN@tUa_h(Y3EzyRIKq&#bOVHmn0Yy#Oyj>zWM z>zole_Z*($vYD1?>nqOL%R6KR2V|sFOJvz28dkJNNpT|>(l{0wY^q9CjLvn$(t+U~L3+i4z~v z94M?dWtq#j0iYrVYW-sYJg6*rFjog*6MSS<;6-PMD-Egnd1V^f9xl)}34x4MBuSkL zM7Z9xV1;&NbnjnkrVLE;c-h}8l47JTQTESUMWeSK_nj9uS>E-7@<@Htw}%8W%xl%~ zNr_eW9N;=?Yy;dX1=2!$AUgmyBJg_@SuXYW`H0R`r7V~ZuWg&}IrWQdE8l;(o;{&< zwy^^is=}kLN0um&tIALjgqY81S^RnV$y}uHgaw^EBWg=r`xJ5HO%i!yJSr>UAaxw1~*fax+oJ;;W5hc?p3I^E2A{X&QIfx3jlmax;^FZ<4@Ft>>b zoC1Y-Jibuu&^&7^J?M2M3%7|V&Y3*}k`y;r3>;EESI+ztl?^O-Lal_CP#J5~&fupg}RY0tz-!(C(wfykt6OQqp_ZfSQzZrq*|_Gf6rPESaZXNas@ma7>by>^LVhk z#f?a8&ku0si3;%N!h7xQkgpdQ`d~P2<^4Um^17nwF}OGN#r!@~-*@{JMmV&(>N`hs z**D^2DU9SYm+`lY?69t0^A)X-hlr_n$x}pxioNvc4#)pgW&U@2|J&EUYX}_WEJN4~ zxF$O}F57ubLt-)qKj?pZ5C`|MUR&m7qi{e!BMZ^QKcgHS1PZ(QmLvS_|NfZ&=`R90 zAN#)f`~xFW9n>I|I3eY0g$2Z_i0yjq0L0;01|R!rzB)VLjkw=C6BP&R9Q1auWaNFx z=ev^at%)gF+BS{3#)^s#DNA<}Q3iC^5V{ z;<8*IlrEiFZz>rVHjWHL*@8*OyO_~(?Kj=BwrzvM)_+G^GkDDOmyypAeW3|d~^M>oh_B} zoKrSYaK3anCDzoaz*cf@yske0e@Vl5+66xv=k`R4H1b)Do;s_cNa9u;Ob^zZDCyBM zn~kkn_ym>AbP$&!7Z$cwCBw!*$8xESC~b>fZgmP=Sx?}Ncm0Y0adlre7WEf?>Cou> zF*>-?+gJIL+?U%~b6OZP;qmd^>ATUB)&WKaJDiNIY9}W`tQ~1 zN%=vs6hlB{tya7PKz*yhmM}nT`_6IomaM!=DS(uU=)Z?hqf92+mG6p7Eki9y4jE2{DGQ9Nd+<=>MUeeN3+ zTUjr+6#h|;y;~v7g@)W{8xlID{L5@rw$0bI`^F}+pg31LRmDH8v@+~&W8Xn^+jQ#4!4gbnOKg<~D{=d4 ze^dUs4e=gSDPM}tv^cy~XZNt^YoM_Fy_9S|bFppSRtL}BlnLo_GQM_TjC$%d+d{AOFhmzx zJ#Dz1dHLX{_oWs|%Tf~8ilc@YZVe5trLotH@bVYf#)f4h8QGUv^BVoDW2J-sqqo)6 z4;{3}=ohFWOV#0@hzu?BFCSBB*01Xp`3@K5dwL#8Ch$c0Ur4q+l%Dl4D5eDa0?FUtx+Yp;Fd3?N+TY!%cX_`6Tw_tF15zsU+Wd1}L?hrEjwQ8e2 z*GdvGmoviUDmfC-@}+!`r2@dQ2>~A9-@5W-u*xWfk3(y!lsIF=x<8MYGI0M3?u+~Y zoahpjlA*T{WGR4#f!M3a03U+pT&H5Thq5LrT@Y$~BYLv)JBM3(4E%0eRW=yxE`HDP z=I4y^6;-RAJv^OTp}*^dy7m67v!8YBMEFKZqDD$TI-eKBuJV@(E{SJ6VuM zb}%evHZ#=}p>!bp(=!G*aVzQpWsZ)fSdJKRSwuCpQzIvKUHjJl(`4GVkgx|T_~1HQaM0d&o(K=p(EL-qv=IJj@61}r5M>j?MW0aMejzUgMto7Fh+W9k&tt~(c2qK?eQgt z?c^>zypOXlahNPmNsEk(gByq!u5DBn&tf z`eBv7BwUhLt7@rVrt8<54^h4Q*o=mK{gqLhM~^GL-VIX&pl-N05`sx@T8S2j07F3TG<&Z%yPO$w#Ng7-KgAf2hwBp4exZbL6FTrbnC?K zs5hk3uSd#nXTiANoaQZ@ULihJ?cI0{k_uT)C+uVlUwsE?c~h%wP(_1DLv3PnK-P(0 zlP3H`$d$Up^uv3ugiolZiqFaPG%XvS({`A{_%-8;}EF?cb z)RO0!Wl&cs<5ZyT_>qnN^k&gT>0OsVNOaiOY>{_XTHvPzi76DZu8WZoieVdG`^^qd z$EwB&luFq_lg`9P0B3F9oGx)mJl(!|xuJm!c|7WF-{!Rq;Ph)cUEPHX<>i(Z=)B4yWquv)|g=7yh z>BZ?i5j#T+p!8`LR{?nvq^IY!s;&kWHQZRYl25B)W0qz})9t?-E|AzoKF`-nm`@S4 zr%=|3uukT@!}50yy@|Om!NgapLhlp=*4F3Ze9`96%uoUu+DQvqng5)&s;Y#;^cn4P z4PuHY0}ppcn|CBrpB-;pkqr=`Hso=f`s<`lXl$-%|LG`xCA=Y2`KO~;Q@+U!e_P!i zypG=HR~Mt6DUmqtX>@do^ckuW`t8Kq|1uaH9A}rOimEq46%}K76{YZ{P_v~k$i4OC zHw6_Cc51^5N+)g&=tfznezvop{NLcrPP`@_DAF-B8%1ZpJ8Qy9t+Tdz=+WzORTG%N zUGF_Fbf-|9cW!|%pwD=m2-(gW94NK+=tef|LaKjL>GX=|I)yEXnl2o%iH8DA)R&uL zFH}K3V7}Q>rS7d37Wdb28^pgRei#B0XT71cnWzY7l?aKewE9}ab%6YZ;b4oXMp~FY z{+F>ZV12PQ+r2a@bqjTDkgKIu#;DUTo&xd>Aii1nzOW3eKeWk(LnDzoGTj*^YC7kAX5Fvw>&L03V^>{$t4g@xw{1 zI5~ZGI|0vsx~O6bNLt$)Z44!j$jDpXRM^`F!$7jB0KkT0jmvNUi$_6-Gqvd-@|S{i z-hT>V*bg!ab^oFgb8tMg4r@=&jXcyhFggjLTHbLB;)%9qnAu%TP`aO$?gxF% ztyFtN&l#$lnmfjQI6Ef9waM3%6XxqtsSa8TYWM~o6@0zahnm>f3hj6L(Jeqj7`ENF zPNqDgyXMa9?jPKDEnFEBtopEmv06IF3CbEiVDk@QWx?FZFNgIMRB> zRdo;a?XQmR{r%fV|2m;NzoFyLv>?9)9y1$1?o5qd{`2Ur>oN3xdw%y=&!$@M>;=={ zi#mVAlMagZT?gP&JCal*E%5oB63=|tCuL6t=A4UC`ohl~KmRXR!GB_YCH^CYQsOU< zwth*ut=oQWS9bW%8LGX@NQ7F>lB$VUHjvb+Qe16O$|I$7Y35sk<{Rao*zp)mo1>v9 zpM^ND$dBRl{Kmj{YOJl|=5q!|dT;gj)8O+pGHPfsc3GEQ* zL76NPQeF5v)WtoDsR$w*Se5S*?}6a-icF0)T0i-Tn0o$sDtBVJVA#_- zb@D1GPj`5&ppzoJ_!X*%T;)O89O72&eXc4MdMn=PdJbLD`pm+}<|Hl#$dxR@ZXB|} zMMX>=t3rkSQ|PL=Gw70QTh3cyIF^=~*+GC0e!lpPk)N|7o0a}zTKk(94^3I-QrDbu z)ARC^yx`MyPpYz5k7`U%mmXXxWWF)!r-M3ryW7!dfV4k5k3_5 zveBJaRlv+5_?X5L@B9};-#q1oO|QtZ9XD9Hst6NXpKxcv5Jfdffjw`d*x@dd9w^kB z=;D>gW22HvmOWMLBI3)b-RelkC@!Y46U833orb(Ey|>+MZ>XnO3-P<@XrgM9om{e@ z{B*WrwccWbe@{?tNi-2|>zzf&L=1Gimo{tTs?PB{}umf9Ub$638r0K9HZ}y~6 zd>w&9YL~o#_MQU~ty(!0n?NI9-O*CA3d|BdurvQ5g>u;)Pv16QBx&SS zSeB-DChxC|rVT1YO&H{U)uzCvVd8Jmh>6=PXw-T&Fq}j)$YvKIil)!T9LU%CtzIwX zT#mu+>_%(hpLnv&2bGSP&F9T3dPF=>S>=*q^IK^X^!hBSmdPo69Iy~(CU;s13PS^F zd@t(`{TBl3YIa-_cj+(d$GWb?xRscz*BP+`-9A8xUcrLh(^)hllcEKuHyyMa6isU% zKz_Bf>h_$fg$Y-1z5G6y2Gd^xAzcj?d647Hnb>P{#=8Z_)-=|g}Iz`w@vv<);L zh+3Db9Tn3h_QOk&G%Kv}WvxY9p@&=bD$je*O@JWNK6`H8IS%KU=ISoR4pE6qcka@) zcVHmDppr1jOS7NGEhT-H-{Dy0WBmf((0oy>wz2XkVxu4{DdIPm;wSnTI1H4Yo@0r% zdo)6F^NU|g6b zH$}mN9!3xpT03V+1Zjo-GR9^>5VBO$Y{ZMW{`vYPm^-0%iI@S@&M!)Gbmkt`dOu2P zdK+c@>yESR5`n+zYi+R~>C(&?&i(p4y=`=9&P(~K7&k>5N5Sy$g0J$A-Zbj7$7Q1| z7=#?nPQ@|ZSj~ygYy@MOzsLCk7WWQ=jGF{Kl*FI`Wxbvx;=E!pqup>YT&I$u6`0xNOz#K4F7 zg3?a=ZmngT#Tw`ry-P;<$Q5aF-6)X%k}b=d8!9*V%~Zx#wx;2BdrUSx#bj}bd!+AX z`$!M5MD@K%6`xhfn+IV2Y6`s0hTp=2d&_h2jf~`3K8ILJzl;*I#bo6gtqAs^R4E zO^l@j@PCsdA{8<_`3&%CauUBSfD_IM418SdICMQqckLB@YWH5?ZFbdNXK= zq-;UGgA}2XB0-mXS4(e7u)e*s^kQB&2*0n~ZK`f=Q)C}dbaL~0V^a6D_HY~LCeM`e zevaMPd2Bn_oayxkxqp*Wew0KC#f3D< z@*-K~&Q;&zY6&*oT*Df@q!`xy6*a0f$aEdMxaNsweMh%BTs zSe+!=U7BH?fWO2?2(P;BNpy_VCak-=#li8$UUT82q7{4A<>{cL%vHTlx+4+(mMy`M zO|tn94ojN*JWs#sGIx^Nfeh)wO24ciI>8G~S0WhLCz2 zYeo3icz=~q-Bh&gdXdbPh!?9K9k&QO$Ig?-_GJA0L9N6=N7Y1l8=c4BcAntAIk*3# zoA``JS{mzRTKfb)CO@UI)kg0!Xk=-MUn%t*yP(RY^WiA_cyaMWOXv9`S;lr-nx9oF zsjR6j{LMujb6+MERi)|zoR6!3jwp=#CI}Hz`+ZbE9hqBbdz?spqqdX z^($GVEP#8P>Z_$3j>vFfH!qYBex$NnbA5RM3imKy1|{+{n=^8Lw~qRPNCLHeD^;cN zgkGRlje!x>kF*T<Nid zu1sUZSnK^{z5R|}P)Qg7I z*BBjbKFilgAA;^&tYDgH4I8%mEb$f38J}fiB#I*}KixG)aDjYW4=0RNTLAzE)hDVU z*l71ST)}{;1-?5bo%>1B4JhV2hs3?}k?|+Efl^W>JM~-TeI^%o2GlT;37r0p2>AMi z_s>c%ClQ}MV!>*sYtjphIg3^lC9`H60=vOF8gx~m+MF-db*x7&Bpu1owtX8mAXWm_ z&~!Gk&fok>gF%6Gs*R%od&U;TCyhcOzSy!xY=%|O==N@@__lFv*4FJGvDH~;U~r;` zsx5I!udI)4e6Ff7>O-;tMkge$I1MlveAgTM0LVHvmX!`Uf}emVGR8KO{?L0 zd%)s@)5C2%UmA=>sp@MtQb9wi(rd*s3Fanu1n7t{zU}yCmH=yc<8`c5aDM((gje^11r73if37a< z1jG&jBx?zZR9=qHEz$v$t;ANc!=RPz%{GTBIgb7({9rd=v%W5-it@dx0T8m1LF;>C zD129bc)o$`1nic~hB2L)~ibKyn zH@c;`TU|1i>=07c@yo!dP*vjI%EVS-(jbwP(MA~%PCd}CtKMt|*k4o@6!fh};jLFd zxdtPWepb{}H)V{N>_B}m(8p%{^eWqocBXB4BlD=+aqXr~vtqr{++JpN~nN-#PwV;QZrrNfg50fC~*pIpw#{0k%%{=OKb59$5 zzTl2ML%jsqH;`Gl8plusPuywB(zp9f`G1i2-eFB;Yx^*cI`)EqC`D%g=|xJY0xC^P zKuD87U;t@BT0)V~#)fpI_aK2JBs3`j0t7}xs?-pAQF;r~K?MBbIWwL)^Pclv?{&Su z_xru?_3b~h*WPwfOr;K@)3=?gw-JxmYDLtIX@?zkrN8!!GAQwzM` z)AqR``wyd}4`+H_ZDo;z=I!?;O#d)4T={YSuK!(zrdP>_?^REx$3#d%qsI> z<#?apjm-8ARap|v?iE3+z0j70pYoseWM5D+*tQvfbnsU{mWl7H8gx^@_=JCipu9HL z(axMFXT@c?!bwdi8xG!xV9~E^AQodn$MEhs!A)4-R@*`Lrm+l5dCD>HpfgQLBpw0= zGbrA1>LO9G>z>E|5lS5F`5 zV^q;Yuk()V}LMeZMWfB@P))bsCLHVv6AXBch+fE$}Fy zo``A_I$vMd@*|m?h8mhW_UJcjw(_HHRas zMQ-W$1%p*BK6h?SO1E?g=9e;-h(p0C7&gh@zIpnW%Ri4W@WK8Rv1zuVSfqEQ$m`ea zQ9y4D;(j^YwKG50HqUa2EZpO%t!-4O3iCa8sbZ0IahAVo)h+h1GZ#;jJwz_=X{)%K zj#b9|tQ3p?h>KY51~V7B?>gk4+Pes;zk*B>ztHK0J2OY@K>Wjh<*i}p~3$?!YG&-Z7bDO;U2Ak4LG0aHJD~}nNNU67ZxGea9 zP+Mfs7v8XGdC0Vvr>)0uTAj8L6%$(=?bRmTQ7@6e?V~D-6=6Jwv;putHjyEFg0B0LWC!pR9q^ zJ|-k=K%zPXF|#gwAdCQxcz0N|h(xE9H{e|;*RX?T3&^S&9eGnOcrI1jitqfpi-oV> zSGxA3aH*%22keOQuoReUO$7)ETy_a7vU3qpZBNY@u$IwqxTBD4oAG`|=Y#MinMZ?C zq1))D+&&7EGxQy_nGlW>ZsUxKi8i9Nqr_z%88wMb^_1Y$-a)vP3m@5-b5UDe1!Lr} z*Q}r9CR>?K+S_|^+|5?Id?jY$L4J{*plRjcX!WK$dMV$2Dg1+7bvijVnrCot(C2#;T`B<@a zixuYG=$Mzq)mB(qI=GOSh;yuR*d^$`Uz#m=cvS&YP$8uTXbp+gCq_`c#T{K-e5SxU zt?nHjXdVNSb)Im3Abt~--_yKs4M{bHz)*zN70AMz6xUi)FNPL_z%Hj)6uK%dH_cKv zmqK~(wkNNTs3~_Gs70tsgKnJl4`7sSwTRm0(z}{JI}0Y53uHqj$=jEp8+No0!kfBy z61_ddVb~0Z1-6N(m6j~yfe91o-1XY^8=LIBguV`q2#xC}r-KT!a_BK*g1Zt9+IO-$ zxH{%_78>_qtoxc8C)$F;eW0|Af#&-d>IQ*)~;9JAD>qU?>l zyg(3x%IJ#{BeW@_bhw18&hoHdt){azXXu{DF>!KUO=vD2C(meGpy`wqb4GUV_;P~5 zt?oMN5*YFMc|r=oKS}Pl^!FKl?&nwp-}aIDRb`*JbKvUlA3fGs_kP^wnpmFgY<@*8 zaTQfKrG0{eE0$Nk@Y8H$e^SZ^Z&!^>^jumxjcTLLVEiyf7YQB~#$v$KW}-C|3HV)7$+)X5u0d1%@VU9Bj%_tAQF1Ojbef~*RI z&s1fk3`NO}QC8Xvpe%R@*q>DCciRo}c&Axo!t)F@43II<^%U9NW;GI}=CIz^vF z@P-)s9GFTgv`1soj>ORQqn=Zjkur~3N2Di)b%Js#2d1b!_Dgnr({>dqEZ`~ylQb>h zA|O_3b_9C*vS@bMjd z8r8sOw;;cexu@(7xfBm$IKzc)F_Le?I%}O^7eOpxpTPOs%NNiQ(aMvhkEfAUEXeFC zw1)~+;dl~vk^yWD&v3%o+PJC*s_VaVOx0%7^7!j}ck0Tw50kpTeXlIe{F$pGEeY?3lPNU6d7;uh-Q+OLOB{rc~1{&Gb5 z#rGb3{0EJY;PvcXk?p4Yy}mmNOcH}~2IhOGQI?$IpY+&$MSSOceuqkxi#Y{HSrjXL z^cEBE>JBE|5vS>?r3RV+;nZc(|6qiWi){=$rBl(G|sdZ*9aBSU?t*NhXu zK;~$l%iH|I?UZXlnXe0gLhggJGYNvji}lhs7T?J~s**jjYw?L<$46VgWL4sqo)D-` z?)!uQJ;=kwhEJQ&x>Sbm6fEkj62nYE!Wm7Lwc%e7&LJZduIo#RKgt;|II6HiWTTox z$z;{%lJt;U2qCmAw$dO8ZBK5CZH5Fgm_h;i-CCqi1@ zK6tnS)h9pic7Yy2oWq^}VWZ~`ZB_AUKiz6%@``HHmKDR47h4!xQ?&^6LfEXSO({wF zrn+p#NRYVvDk#mlQ)3KcQM^c;ntI9PFQmls$B6`OGTOAKp`R@LmisdJ6N$t5y$#Z- zPs|1BIGr3R%>s6`qhgACMo7~!BZ7K3l>oz{izO}hC~9@%

Y%=stj95?m)*RkiR1BU|&XE@Fx2TR{74 zCbSR4kbVtsg+mv^=FG2aCXS43WWbe%Cq`&~jTnZyt0ahxf91ONK46cKJGG@gJnG(X zEpw6otF-@VPL0i5Ul}_CH-xu+_W^yEhqBG)4SNTum|iLE8N=q=^CsveAup=C>x<2< zpf{$!tAhWo{EJDNun^__FLX;_y7ihZ)ka#Cp-F^*^Oi1$KLH)tZk zrTX5KwH-nk2n)IL?exg}8hl6HkT7vhU-xcuJs=AzCIIv*J12=Ek zIdF}?c=H8Vh$~GQzDd#=OZqf3#uS4+T0xl_^4i5L1o1^qQ;BSEp*{HeD~BtcX+{Cr za)XE-;bLX$&03T^97KY_Jlg{a1wOl)7W|befgEWsdCy7>KGb*&fkaLd?zd^d3>lwb zT5ksc_kA`JNO?6~eM7VJ^f-%G$siSHEvcCFmBp2KWgr(v)k|$m0K=<>C-%OpGs2bF z&$6*ij~TSqtxmhoYzPDeYXX6!O}Gz)wNFe~Z_N>3<&h>; zNXeiGY--6nS`mLs^U&BrhirmG1lviOwo${j{`eTvCYI>z*J=!t(sg=q59}@9+6u)? z_%4O7j}p)Y@dpW*YWMf|Uc>dubMi!eh|E0J2{=a6H9O@NuS0o(kjR*k;^b6jn55Xq zOS*LuX1Xgu#{DN>F8WnJ6zi{EeqUGXg*}DUQ9LtnXgCKmmtcj+g!9PLk55~O*?~L6 z`YURht7!}`G1VWj~k`0UzeQK{iM=~XT5P2QB232^<( zHlYH<0{2nJHpdO9)klO(d{O&**z&48q;+QxwM~u{J937UoK+idZeAgjm1Rn_i#E+o zTVtPyKH@xQeT-6ah|#13WAl%=sOC>o(Qz@}Kotl?NjwK&zcgr%m|t7FkfyKiG?r?I zJmM;3V<-b6REb3;R|UYDCuiFsUcm(_&^RnrKNS0lK%{o?F}`_su2A)yLQ&B*+@g=$ zxD<>lmQVDMIbgX3tS@P=Zdtlqi^=5si8d`*E9cAE%$PA4lO27#np0{~m|&u>dDB?` zia|zR`%~cuv_18bqV{Td^k#>;FYdMZaFs_5)5&V9gmLZp8YshQBNr{6`HW=_v?PQY zvew4-Xkz@#!tjoes5q;_#H6-|e&^5^>b%u<2>5rC-}Jl6ZBQjW20x9LH%G_j+nsiC z1JATVzNgBBr27VqosBaIEE*_xsQqROs6#x2805Ll!r)!y#N|G?c-34??{N06WLPH} zkhmJLPlquDzJXN4LRmDAN|aY@a#rRG?>Nsynuup2xGA{(W|{gSNM@VnIkyS5JTBQ! zBMF|Ldg1txi4MWi*kOpxtTZ$b5pFFwd?y)W>5&+^+{jduy_=rwzdjW z$-fXXJU#WoO(rt3wnwd{9r5cfa-fWQ59=?(;Ax(0&9+2c%>P0lwkaKbvKWM^1?8_A zvOUwc{M37FfOoerX}`oSfA{(R`;o{Ke+h%03n`MCRM6;F`x;~DD;t&hzaUxY%f))IeU6Sr7?0_%{Lu%i%Y zU`-s%>B!np8a`3kz_0W5l>^FgK-jJO zi#H$L#DtYN13vWDX<+&Zy-=5hJYtPc$+W{&UX5lf3ju+32h#?v(mz#dqjmBZjEwgr zW$kS82h>sw6CH!9w)A4u4SDwgDyde-SP#(Bv}X}a;a5@KZuD);5Pcez_GuCkB1YVP z(+{29k(cl^c6=4_eN^P^%aIcwGQQc!S7S%LpUP={jEvDdnyyjz_DSC0m390JX%qPb zDHSC`*mS5$Ti?)!L7%D`-rjpK&(BMq2Ol+Semx(?(EL*RSs8fYB7-{|O*wVeEdAzY zXBhpbJ)x~$x7z8@p8Vn2;o|z|vCo2jcvL2~w#xa477TwcXt`dHgP-Wh50mv z*ggJy`_0sFSwN0lk7=OmO?ZC(jIQ|Na~^82<2uam&9k$5u6hfnH!52=dYx5nqo{nKj4?Bj)!etBc(HwIh@7<9` zT^!(Z?f&9?k*=l1N}Sd@)HT4f+*Q)1yPK{q+JL;?Mbd9!`pB!-aHp|Dna*k1r(E9E zhQ9!$k=QJiw2)xQ(nZV`qby*Q3mh(+8A~o}Up(-mifb3PRjv4Ze9jj^q1rF)?iBI# zevJ%#^RKtWp;(;U6xPJT>ZV^V3IiNxc-`+J&&Urg*zoZGc(_|#kzAU#=)_#(u0nW2 zMpkJ>s%=Tt`v56U)ux=ZC0O@Hih9N{&z?({Nz)z1_O#k7dK`^n%bvb#SXV$&e5)KdT%Ov4-x)}salsy$ zkq`1*)b-m3xSdsdcQdFn%>tsnV&!52{jd*km67!Vf3Y4?U4Jy0Gw?LB{2K3>)bcuh zdCryAwK`Emy0Mwq09D`N)uRfn;po&D15p_d&C68+B~Zc@^eV1k!RX>S8*cfpa8_YJ~^Ajw68oG6-!--rj*+-#PR5NG)S6`2AvU{kJm2Hk6xv`%0`G zk5rZqE~|ehLdSr@IGYL|16wh+&xCA*qXsC2ncbatZ|WO0TKYW*m$QYg@E^|rY)Eyv ztNHl`;eo@)i;B23&KN&mvM{WXw=J7ZM@GwkOyX9lHS1r>6*4;cnXU5xKe+73KT5g1 zq>?oQ#gUemW7cm+9X^>ct(5RMP|eH$EtvlC52m~HXG*H0!JpC2139W$v*c)eaF5}M zf5@Zy-M;*HrTW3YD3oj$ed_7hwOEbsZvKJGzgPJ!m-^1{rsSJf|I_jR(K#DU>;sgd z>d^aufLoD&`QwJzj{v}BiA086?;!ruY%hkOWcld_9iadnwA7qeHJ8MbW-ex9aTU~A z*Ljx3lV;jycBJg%a4$&34C0qJYAqH={ftKY(Xk1<4_lQ(E|+pUGscpT%LR;Oox2(F z?Rx`rl~EB3)xjPva?!&ndd$?2M-M8&+*j_Tf7;ztq>R!$7UDQZ#rQu04*Cgfy*J$D z-l3hpW#x%4%w-muY;2z)6Wo$p#kU?d)Yjb1P|SP_z-8$XXb3Up6)Uapud#E4x@#^* zPkoGxu8|ng=unwM1nIv@{cMQv&5E$9>{B=cv3i$!#n*f(0{5goe8G##YIU_o!VK&2 zAhc=0szk=rsDWa@z#D-@fWIpem}FXsh~B=ELyVX6DQyMmwy5S;wY&}GibGVhI0|(v zG(CFGyeg|N_ToN5vWvPt7stCLXHXtM^#hK*IFNtrB-Bb;fp8I1e9tNX=Y#8eljMpW zas&F6XGA}5;Jh}Wa-Pe?#A5a#5o2zi`kn1Gc2sA@*aU3qE_&>~SE^C)frX=i4Zd}j zQu3dzd?pHgI#tiFpS;&nnInVG>_toX3rb(!`bzI}Uci9-?v*mh?s2sEB8f}JErNpg zy(TSZ_5hBLR!96qYpR&Gs?^z~OS{tkbZ&cs#bOFFA%1Am+meQ|P-TuVxcn}iH_1f#vtXWcK55dX1X&*+hdyD$wdJ%-;{yU|HOtEBE?lt3mV7r|}W( z6_nP;A+iGe>5a9=5g50W^ukjm*7H&&CJHd;Z_Uf-?&Zg;m~C2G)BLDUVcZmq!c7&KeFaaq9;sjLhpl#JkWMdkUcl#nh(+jR0I^D$Jy5yH>Uh z$z~!e9k8Z*&lz&rhoY6u!z4SWqpL~$Tk`AxWQE_6i3~OqMNg<3N|s>)Rlr07N>l}8 z7k7yr7JclO5^nErZ+tnk!iwbEZoO)60fvheve?NZy9C=aY-)H;ynBDl?ouVQ@fDW7 z*1PTcJ&S0PUR~HC@$&t1<&m#xn3aiV#(bF)4KRCt0vbK+ZV_SU`oRP*1E!InPq#lC z@rB+{xiRUqy_4D4O-rxIafP{g$I*#*2FfAa;Y;$I+~V&)clgseSrvtyv(NM$eH9mL zEE{uWQf=7O_NZ2V)BQLlsIbCfewATf_}r&3#-gPwZ}zNsilDUHq=|tb;p6nYgwWNzrI4=%-DFq_6Nomg*aFB+f0O#9%^`x?6&fb zFiU5aS1-@4yEcwGCKDLW7WuZos>CSO`=hOK+~r;%5l$Ez{a4F(3?UHr{4`Rk6T&Vc zHZ40uphX=5G^@_wkD%ErfA(D9&eSx+Ig62a?$a-Pf_#k#?2|FcKC9(E*(EbPO{j|g z+z9A?9DygLq{B`@&8|vS(^X8IKkAkBEGlwTG26l5@yM6UnH(nwAj!27SdXNn!B0i2 z(52ihQGy^*G2!uRssZ*oV6PA<3SZ|idCPEoANRwk5fll7@K<%bKrb(g&ug?WUC319 zQmy2&p*fe=&@z=^6XEAqUE>W0AQmx0d+sVfEIYa4oS7j8CccB)L=2pghl@eL_Jnu$ z)NbRB%(Qph4N4-L^E5tn2tG*T>HY#Mk>HDehV>B^IHaSM$E7nn=O1%!&Kq#bm+-6bi zAQHH82#jp@ot7J4g@I~p_tO*`CQ&La@Hp-hzvUO=EJ474fw&r1Y)Wlmf)rD^!|_>Z ztros?U!La={KPSqKF!1@2AP z9Xmg5GRp&cXND(5k&$Y;3@BzD6G0=no)W0>yUe?2q}&u6PU1B@=sz6WFKozSMr>EQ z+<%C#sIjio{${JCq;)r2*2&5hiPD;&m&5Z-&g=u=(e_ajS}?JXCx*$6NYlAeQ$wV? zeO9eOzPc@cjSehgcdlCHs3Cx28A`LkXM;ZT{Y+5>rDQ|%c+E5Hrt}MVDxpmirzMMqts5_9Soe*duY&{)La-VQN6D9(c77= z3X^-+M2iOlEi^>upIxEB8Z;zcDouF8FV=gn&b&Y3!ue7mDzswIo_=B0Sv1Z*0bj`3 zKOKDT{Qwa6T-BUN1Bv*>F>%#EY#0=wmsRobRW98dq6-ufvFAU6UoTmb=KGM59v4$z zPHBZ>#S;&r{6CWO@CAgtaI`W=9EgYT!lhy?ho@(15-k&GhBm?U-d$e(n-E|nX5CO8 zY;S%mUPbO1lAZ6b;`su7!1nN97k)|I8^TPsNa}t{63>(e!;;PA^Wq9e-1rKeVW@X$kvI~#$%U%i zYcWz$IPlD}<{oQs@MZ2K-U)Ya=KcEwJkk_Aw*c)Q!v|DU_ysnXL6FB-r zAkfCApt0gi0UM9-G5-8Sc@Mo7i!0|=bd#w-nfD9uXwR|iOVod0Hv2VSe1eIlf3n`g zxAP&rffsTuNu#&BD!ew6{HQTEC-P7zDGOZgqsUpy*?{wihsL84MLUZwGg0&ZJE%ka zp#x2lKdZE{6AvI4@OC?TFn*aL#SOy(t{t;ft*Fa5e5 zkAP`D+DXbHvoyqoX_}rq-$cJ#5Q{1n3y`1Wee#YWoBhrCPpntTXoUYv%&KM;t+5YX zsXj?_7ge!WE)Kk%Btj$=S^4JT<|OsOR;Ns-^px#fnwtvTY!x#a>d^!W{Nx7=sul0< zgm*1vbjyJ>OqJOJ$O3a zZ;#HSDAS$J(PB*V^@>z|oD<4M>dB`(HH}hE>jX0TXcoxEtfweiQ4^mrR#%0f(-;ej zgEbc-9FE7-FSq;ua605j$d%rwAl8Wih`!82Wm`*1ONKr7r1}#bBUW^)(UUF(gz^-| zB0^p(!Q|boZTtgU{&!vqXf)f`%>Z(8Mf3BbsPxh_y?ww{Nu1dNL(UaV#+R$45GIlq zbmTw8U8{ax%`io?6SO%2q=Xu?;dF=5dCTCpTeW=%E44Cz;vyyfPR+xR`dP4x%X8iY zj>fBwsG->B2|*myT+!YcNHVg+F=w72h~7bGv36vkWTo$pc%bxNu3 z#damt+e0PZx5==xd32&A6?je_;YyH9{=9kJPc%fxq11*eVd`M~iSgOO{5@9Mc0!Ow zMG)uZnf02Ym5@0n^B@^3pWVt`ZF*~7sotk)gx{9pj{UMcAn(vMjC{j$Q{flo^a5m2 z=o0wuh>?E^HsE;&$^`>$bb9koC-B#oVL%8M=-65-!YTyoliSUk`IdO^s~hAEBiRA8 zDl+cvkOM!Cc~}RvrVO{EThLgRs^!^9qWc$`L;g>p-W5CUNkApgY-!_@*Ru)*iElfu``8;vUm^C+Sag#Zs z;O>n&JcFzysWkcTXrsddzyN!>-cGp@e4Nt)V~5hVu?Ca8he2Rf+{o8e{JbI&A8K)( z8|oJ4B$2YY1rL>Zwvj_w+@dm+*^wnF=#*@7h;cyIW?G1R;X_v~`7=XhBVFGQ^r4l* z#OGXUZnlK9;LFIyhW(a+-P6G=V#*ku%Ofn*lzfLE+B&?6L^Va-;Sgk7h-W{s$@{w_ z;{R|=d*|2NYIkn@w!$Z@bEfI$9TH(KkH4WW2f#U0m`WPEQkHbxMeRP64Bqg%~Ond7V4Pzl36 zJ)T4BR^i*MD4~#Sf})@{IxnMy^Lf&zMj0)C?-%1isV4C6{Ql|1Oy+O++>Fmie z-J9p8hwyvL^BHIG$WteHVSF>hp4-+ zz;}EvN28kY&x$;In$lwrUr%Hzv)O%DvVGRfW5e}9@Txjb%OxSs@282-6A@Y_2KT>@ zdcT59EfokkwyS$~TqE=0+| zb3r_r$^}|FbzO9go@v{N&amcExfCc^zLt37EiFDx25c65M;O?$JHCRx^#g zgRnBxy+RB0?au32@o950^rG_g3Qoab{saQ~aHBFx=H4LI>ED`WgAR+zCTNx|}s+l^NdZugY67%86Y(S-H@+ zf$^B*Imjcufk%ZAH6^yg^`yWc+kcXZa+KRSnO0Ty!Hu5y&Go<+I)d8oTQg$fNWNV8 z9?=zQgVJ}LERsf+e9J~Z#-mrhg&c#1 zn&r0&#ZrgvKSH5+oH*lTPrj3J6ZAr9&DuP>q@-W8Bd>xj;?4v+AiQOsj!&1fZ>Ipm zY;JBFX4~N7&c!;WOqZu`=*b}2d}$OWa9coRKI=%4hH>4Up_+)8Ims&$keGYcq!BP2 z?XGb$Nz0lq(BtK72O5+-ChsbePtHrL6hl{3Dm}0!n9B3S8kDcNn|_A5H%C2wlP2wW zdHY-ln+^FNx*i+7xevIj`Q^uryDl4`lc(dOljoquHuk#b;%mhQYM#vmkZj|crwJf? z)djGDfO*YX$tDBFpp@b^YkJY^^=;JtuKSAnF`aQu?#)78NTMQu^X-`>SFL!=ys}*C9d&tER88cHasQC0+ zM?@p-4VrBTXhbfZgJU~9FOH$y)H07o@V9VOgw-Vzr#ndCd3 zn992w#f|v34BLR(E6ZbdW}GDNd%7|P_2FeIA(~u%ud=RxGbCg@+E}gXb~5?%v^87W zy}D%Op7$-I*C(nNDc_znL^$Jg;m^&k)k@nqR?E!Su~m z7oxuBWAVY}j1Gg)^?sUbgC`Cj~Ek9N`zs*t>qBJEZh_^27L^4`?5v zdN9dsj$ldk>8X02L8ykQaJ|p``lzpT5Lb{)3qn?=madh~51|x953^HrV%VaQ4201# ze?G|Zx$0mGw=qpnC+aOSHuGABZ&y`L8#{euAJBNF`5>1cqfgcJ5#3y59n^}$-=l%d ztlm1$PI+CsJhGh&gM=E8yr0V_Gx^h6Zn`{w;p4OXhOv}u9+geEr;wgJgO18AjH@>? zEF^M+33WUDoXu12F%Sq)O>c4QR|D$5k7#^-kuldW$SYZW78d#Z`+W&n6MY{of8_z=XMPC0T;;|=R(i1 zie{Gg0dh{qZqDQqpH{|$()^YhBp@Grfil?#m({qfbP&ZKnw?pumxSF`p$Xo_8NpY+~_WW4HI4a8Lzru@H(-c+p8cEVds-bVXoe;unpkGz{gD9Fdlk)GVzID2f0>b zzk$6?Pq3P8p7iFo#!Zue$|scB*E0>}ll>>Y!j1B2tL~+Zu_PkuupdQ73O0IB6ev5G zw&_|mzY0a3w)B>8h3OR<*Sxx|6=H-LVgvYL?Y^#+7au3z!Ns2UB!%g) zQ(Did1ey=7p`C5xE#%!zC{a&LChaUq%6-__fYVOX+b;@-ddtZT6@Ol9e8l}orew(O zu|35-nxL(cIh1H*%eWv1ejs1iBqInC6!a0Od|ud1uu!iQcTS48$i=1Lt?9`+An&EB zj}xQ(62aCo`ec|yxPN$pMd7Fr-|2k`s$+1~VsoU8F)&zb8}&-+r8zC1T??TxZ&{ETh}<*Vd4)e_z$o1gfCTM0Ohmm((rOvucKo?Pt}KLdrv0?=}Y@5%Qi@ z8Q%MVH}5(bO;+U(O&SMynw*gJw;OCLU=ykSnWs0_cT~qhSJk6hFQeL_;t}B4FS>MK z`7vizdjljz0;Zl>O7^yXF451TrW~K~mUg6rJN%ulV=*IS*L^_jM(re51Fp7fwu*_! zHskZWM%0z-FoJQZK`a^L)kC3N+pMY>zRHI+Zr&Y?p$zDab;A$JT+xC)n1DWstgJ`f zeGKE+n%PPeA`J zhqz*rlgXw=wCyH+opZUcm6=4Ca~E)T-N0js|40_PFV|;R@^I_HK7Bs-R9tkS&XiR{ zKuRnN(%pU?G_v8lv=0b$BM`>Xd1)B?_Z~&o>ssxcTp(|I#)U|6CxU2!3xr-0D!D7#g*G9oDTF)v^U_Ri%{wCYV&Y&Oq^lwES*Uni&G8vQ zVr|A=j;&?zmc3o;jA6i6*^)U|4Sk5D8e^yU{kko)xw*ni3MgBJ@?P!RNZb3~W?uNK zFvG8U$eU8K>U8s68H=bsLdH81mCedw%<2T@TjehVLH?AQqQ)7j2V1EJYxVfgprBrL zKm2Q%b$00{t#ovuyxpRI8)XsKB)+Anc&S~vOO+NV#m(Vt#Wg1(JUPpjJLRx`8d~%) z*cy4uiJGN&86Twn>R*FCFkWssD@Bijfu+SyCPDZs3&%6Q0y&K{dpB>|Ma1`YG#4!n zBNmkhEFE&rr@2l2B4Tg+YnTDCGdDly6QbGqs*zST=L>X-`b}D4GS4H%y`|E!x*if- z<&kh9iBy_UfMOK|X3`D;DtGB(Nd7HVL8^Zx#lxx|FZBY~cT!$xL9cV{ZDvwgur(Cc zrlF9y7yNAVRYxCU=Sp#zo=PR#$?+NGF1+3d6>s3;qdhjX&r;f?&A!rj>i&n?GNxQ0 z6%EbVvabA)7ppiUCch<1vgt?o*2q03?I|shskB+$zbj+FbuG7s`Ya(8+a{9jbvo^ zJo98I1cBjDxan1>TW)!PXjS;CrZKjT4YO{I%rbLZ8$`dPHL& z?hXf@ge!$`izm>bFn!iqYT;POw;&>Fw{wAilgYV%=rC-Nv>aQ^N0d(Cj z#x-XO*}Nn8EAObjn9Ld)1Hz-xqB3G^t$p7uKRdlP>}&+g#3Zqp{$ZgX{h(&Cp2QQC zWGTw^W95!v*u^k<%NK2;nU`&BVUO`=fh-YM`MtDX@e;ku^v|j8)`hsGB*&mkL5s>Y z#e!1b<1*KHbX%X=yt5A=t{Wx~-zi;Z|5GZE6cC>x0dIO@8I0A7FV3pXq@3@OxtG0Q z-9QtM?mS(<7uC_0D{rN{^f5(K4paFpId5+Z$& zCZQ*UUK|TmU?`zC0|b&lkP>=P6p$bwkbrb4p%>{Lzo_FlZ@c%t-*@l*{XYK4Ift{) zK07;mueJ8tYdw#OCxa9m{h(X!XaV6!{-<-$j&{cJmf5zrXW9csFz6vfvbS(SA8t`#bElm zHT=!fknqMr_-ZvL_x<4jK@Mk9{ehD%v;q0d?&B?}J)&w(Qoq>@rgA2ZDaKZX50G)< zj-0f1uBu)eYLcJ_nPrs0Per$f0P{~8 zI_WGdD+hwW0}B;dQ7jV@B=ItJ7YKs!jXxn3fty zz7&nDyEn@WxHT^b0)XV8rf%9;(cA`m!F&5)yO?p9IAm=f#8DVqt}>)XKt{nU8*(LO zilV!QmcipoCeCnY3Z$&p)H*Gk?iNEtm`y;;Pd!6Utbrk-_+)_(k*ezSU96U;4V&GW z$t~K^GX)ZwGjRS#Wo7*8eOz=G^wB}xRj(;7`c7ng*-}1lNwXRFuHG?G?cUqf_)VJh zR8DQVRERrPk#EFkd8d@rtEuG}myh!yQNg?-mR~kk_(M#I6v;dzA@_wxQq?H@{IaQC z=G$Qd)mPXQp;zdo$SHQi=OLlB&AE&4~vK-QM8PTN#X3YMH%)FpaI<A0Z@>EK`U6S{65{CTg_NOcjvmM>K>k3kl3^Q%mC&erEl)rMJkr- zC~9w1E&AgWzuOH1}rQI_fh6eRc3f3ZJBk zPP}F(SZSYWaG7dFq%MzM8CIElh6RW5 z$)L|g{kcquFm#bTKl`D}{n9yiemW9VvWaGgDl`snK#Qo&xNkzH+%NqE2Qfh?_>c&9Xe@RMz`{DK4 z1CB_2vFyHT4}ODQ&&(RItgqi-sg6&PS;ge&+mMO|j60A8Dl*Y5nD0o8*Bd2Q&7O9O zzQ3W0vou0hZcE8YH+e_3eoJ}1y} z9As2n_%=53@{;IgpzQN*JU%QY-Z(U^@`5(3_5<`aNR;qoOh-bjsjand^SEXIX9gXN zYaz16^hw6D{VKM%EP3FWI3yn*uaQyF%ljZCU4o1me{Tj;${QQ5$}(o)srVab&EZ2O zRc#j88-Wh>of0lS9#AygDZ-gQTN%9oqo)h$vXj##KBk(+r!XB1;hcUNTtG49<)G;^ znB-$TF^zD49`_y8=5A7+)9QTx)`FSU)5Ri)dZb9;B!>NIFmb724gq_Y(1KOcJw3@BRFbCWXF5ac|)*ib< z^46Rcnq|YvASPSRFK!G=IL{U~9Dn~eUG{euMGwB&+FE_4!u!v|d@F4FO~WKdshgBnat6>x`oWr@ZQ!CRauSCF?Hn z1W6W4k_zBFAZD3p*>jb9lS){wI~sCo9kEfXEYlpRfr%Nv>>iS}d%YFm;7IP*JSQ{8R^dq z5{i#E4!d{!#>{*x^odefQ*~NiJtNnbBK%yF{9lWRd@B;XwkLRrfOdce99cVJ93aCN z;1KY5=h=N>5WP~a%*&K>&Ac6LF7E?6rcJK77Rthhe3VhjawSC~9Pi9}#cJC+o{qnH zP3d!R?3XgUYvdRu2+y~$Hn2wK?4`U&shLR^?0g4lKmI2!l<*+pW5bl_=Ck4G&iJq) zW}Cd|SV4RNrAs}Od))s%oDj{L8@4acq@bHn4(T?yQ-F@jj^GZuM>ACK!EhsUm5wU0 zu~u$;1NvQx=&4Cp+^;n~C62~zd=Q!sU4C_;Mb^1Kte!CmYC4s%8x={+sW4p_ZtB!8 zwuM*)cwbX32hL|3T~>j zh@+`Izu~4;^Fo8y$z#{jk7&rfx!m2>9jkwQ#*2%71=JWr4`Hh zL%AhwW@`{DRVwaL4Kz`jVz)i{a#mT>0nB5t;NGpp`0>k2t5^GuxUam}a>aHVZo~1j zfMSvLXZI#m)$@8W*hOef-+0H!7;?Tx04Tmd`-`uaJqeHbpOtE574|{$q z^GlVgl@{MIAJI8x{UD~+foEbeUIVa%bbDswT@m=Y#&5RNzY5QGow|Nw_WI^kcZ72# z%V@I<=A;yi{Mxwtu?kkGY$v-QQU*(~g0kNzu3S`ZR)ukGr~3;!=F@>c5EacWhAuynDV!!uhGS$mY76ZDas`##{M zZqla%ZXWQ-kkB#4p6WE_fD1~Q!Wog8N^N?X2siqiIWd29CgU)D*ZHSC0^1^x0$qN- zbNO`c__rySqV&gH`L`y5-}WE##KAk7<>TG8!*HcA`b53E+9I}Uvc~m-)u5guR!O4% zmmO6MlAHf!;B6gs^7Oz^*Jp-fv`x75Onl6Y0xy%JuI-+->SCoQ8bLtQ4K|LZjITW- zU+T#STtibN+}8!p5Y7;e7Wk-fBS|03&uU{qUdsy4QG(0g-juBjw`4gPs5iEHT}OTU z{^f4pGtn|7PrxFCF)Q98K`Z#p;urkKbD!-_yX>d$bGy(*gB(fRo$4G4ziJ(RCiI4H zOIk0#XzsH*bW=;~llq$__b3fnV5LL5C)OfY>{lno_uhQ%YW_dEn#Xw9T6yayLF0)A z%*}?t+h$5%rPl4Q1U}F;!Rj(O%@`RZmm96C7k3KS4?>%L#&V zCo%Kx<%r^o+M}VcH@YG9TU7Zb~utOa4j9WW{T} zKqlJA{%VcyHFnd!%A>i;Dhr^HJlvD*vrObR6Mw2W64EJ|9`tl}0&VROpA`n=n?>@0 zbF=uGGm%VK(|a_Dh1toWd3RR4$7{30>OI2u^6-)HE%do-C7J_5C{E}ZgoZ)Hphix# z`GoF@-6PM+$WQDA<~M|f()uQ6*d2tHFuEo&{(8`xF3o|#x-u19ChU1}Oi?krs77lSMG~7X!wchEap&6p%YzN~g;>%52V;D5w(*ne< zwdpdHT*%VxQfy!XbO8RWmQO+;VhBU2g;!1u>jK?h4uHc?dGZK(7PsiAq4G+>b(#a= zps0u|5pdUr>d0Fa&~3=PQ9)6^6NIAOkbOgy5G~W22BYuG%l_;nGS=@mIzUj z|2oBXkGfL|zbPqKR4b@jo{|?5dZ3A){LHXThan976Nd08C2BGVE@xL|4`2>#fzZ>w z=$LM~zF%jYwDAw@*kj+T3+R~HI~t=T$PPdhs8~EaKxpsv?9;^wC6GD7-U#< z?#~S5bEiKR4o7U$^L+fMyC2M?YT^QuGj4yCQr~7o6NxQRS$XT+7>PIqzu6ExeT=de zAVWu&lu-PQF7R-4iNS*PQ_3B^*Jw&GCeZcn7fJY4lu)cp-i3sZ-Ca8(8#RUjTI%nu zd~3yzRz?@VEty$9DrKHm|03bsRkrkhH1!XSz7eSR(TF#|P!dQp4y!-3_s8Trm3P3y zHhi6;vV3(PqM{9CVAVG4bUI~ZsrmLpY5`Aq!m0MgkM*A{ekaclKZT=s@Tr5v@z)SH zz1tVS>w`&3=^{|*xub!R{o*M(7#ek_DROU8>@oePbF1{V zU#;{L-qV$j?H<{c{Q1baKS7ng*=0X6oGSCMLB+|&@Q|b%=ZlPZU9{<|+y+dpCh(He z4;+pA9^TKBp8s0irMwoy7PhIQJHOF#cFxmyiA{LLYrF0H9`~vC2tCV+g?ooB7>a*)p|d`+?Q1r} z38V~%0y*9Db=Tau7rD1m7bfUA{?c86w1Ry2Za;BPoGGkR`(R_qNiaYSTYvl_zQgazN`KTneFm0;1`~x(zO8VDU+1rtjGU+`%j&uCg*D^P zGIHM^d&+P~>CZA}zdvS4XWsa~u6h1jgZ`#w4ktP6jeafXo6n04P(nA7ootaZu$F`Z zO>UwozNSr&^>FbY#`>|p>BH|AKLDV|@BGMvc4hhj0Ij|8mu~s;*gtmvx5~I)23~0Y zu(WD+vpo4Sx7Foec@8w3(@OP;`fz+wxi_0_x`ik4a)Acf8m{4v) z2cwFtI$EKk$NMa_HEviHkGZLejJ!&eez5;qcidB~iz@aztk?d-tKA`#P^0&Djb8}^#8 zFnsECL=)`v@Evb&2OkT$D4&7bvgerUXevmoW*9JWD%8tG1o?F7rnlGXO_nwk#gG6b z3C>xCE^22#D-0N&96@7|9s>*XU0p0ak7 z&YnhI5O$RcL)}Z;RX`l2J8cT>(6!V8gGOF+wj$W!mq4@x&;mlC)rt`~Tf>3nvXm7HjU+N!i^jg%WHQ9XN1^+i!v-SgN%o=8pa z^D>tvtGzjDCz`pQiUwl~_51T4SQX%iPfJ(aBBrYcJ~JHVg9iN4nb5FZ;aBzY?SO>b z+&Z*9g73GD*$0<-5mHSNP^gEGXs+rAKEGV{DK?8}9qR_MolB_+?Wm5^UcD*eas}L% z>iwc_aay#;$;qh(+Vh$tlnqQ<@GTD6ie|a_WqjFqqY>`cB_xt*kXg9a6NC1VVGQ0S zuq4B&tuw}4esYtQzWcRCd@E0ay05t}=HZg1E$zx;)Od_jT~lFA$c1D~hgaI#nnYW? zEf$LJm4MRmK(+UtB+_gr=<~RW)Tflo7zOE@o8!*L5*?<{?G8MH2p|Cj_3jQ9T{ib~ zRbq<;t>)(m5=_sn8Ig zzVIO8KF*Yv5;nyvFO!}ckc#m?aYq4an_QolMO4Al(oS5fm|rSSlc}un8y4|eNl*LC z@H|RVs%LJe`r()w-VqJh{{=uk0kL6bi~NYMJ-KLOab8%D_8MwbK=RjsnQ=flLfn=SzBAnT_CG3hEmo%UaeXU@UU&ja5VFQK!~uk)oB~b zmznzQ?=NT~@BczgbmRw`2#dmxJn)lW9{Pi9p!nhcjRw2qFHo#-4$--xIa|#o0Hy;V zb}Qkhbf&5#2?;0Hl*#y2S2=|z8@?YuGsIJTUf2KS-T&)aK1FvXTry@LeAazINt+kRR~%{?QH#L$I+&R3?!`Vg`d6b|$qz zf}I7COxG@KP?|h@aT11M^sh? zbOh9AdEF@6VZGsGzEb%CvU7{KKUbe`e?$%L>o3b^ra#N;=cv1jX8o=H(MjB>^)2PM znETFj@TV_$_uNRXU!}`9p;nZ&!P@w#V^%B!65+um`&eO)5%k&4AWDjK`4GMqU7|k6 zml@%iRRO{-u54z*Ew6-krzrnpH}p+!oPLlTVvy zHuitz3gx;yWg{V9LavCE6dbIbP$nlTMBYAlVdS0$06MRu0jq(fQv~Fi1S?H;K%-F0 zxo!Q}ooY{Uy-;HNtr-?KC6nK$XV`rUj3rxduPOU0oDr!HlC!?=5>WHoz}No$^Wz_} z-u}&O#}i~Dt49(s_LtdN(mD{_{N&t8M!Zj)`5mvEW+lEg55>n^BpvGe*sPhI90E)& z@m4rF7oyfb>g6^${yH=|CX$ge*FNmST8VoOo1*%~EDynM1ZrQu8Bec&pswfS_U z^;;u7c{F5M_pNn0A3dMOo;^Ke%I{P=R(GyckWGK&SCZ-Tyy}4mv$n>z7#cB@kV+GI zrBAcxJcf1l5q(rOk0UX>sjDvLnIlN|#(+E$keR6`!wO{Tab-b~N~le)rE0jCzsDsLoj-W}+J zf(`nO*)~Ltb9p8ZJeMPA%G#sSNcStU{wUX&j_m0^n4TKQtz3~v#9dDlZR%OfGLY_j z4WyX|oD3bm4g83v&Jp%x5mooH!i=lra)iVs1hu!g^TX7nHVerlFM=s9_2vaxsFaS5 z3k7NjC#2nIfjOiXCu10e8gpVK;DVmeMK!6ic37m>p{jHY(AXOYyk@QchUyoiKH+eW zLT4>0JgTf4tp@_wh(mu;Z?Qy&Y)#y!6~eI~kPk>ds^0`=@w`+xy~30iPsdR2r@mRA^fcQCK!J)&01$IT&{YzX18)Q-7%wrFg+Q~6GS}4wVfXg2zYWW z0hRC397^5+Mus&^cu=e=&f2#wN20o z^29!RJ72}rYx`mDVb*}p3~^3%$LoZY9wcLRug4n7@Z7)Y&o)Si9W}%$di0|FSM~MR zZK!J$2N+9+o^rcJb#+S|*`r-W_bK!(*POmywWz%wO^=YXcJ^H|cpE%Bb#-vm%PtrN2EV9(8P8oK0rYkB-7Qy!8<3IG|d7T_d0=A%Ew)rk- zK1xXE7riqnaRxF{DeNHEyhI2YnHKr|Oa6PE^9Py4froWh1~~(!H7&{+Lx(G}y=nWI z!4Adsf>VQTZM4*rQL3LANKI~$>5jj-{=as!3ni_%vR@B2%$J`<@V^!?AFXr2cN!mp z$F1RWJKt9-)+UyI>7$h$CW?XR10z9yLVu(-mX*=vOqJ)2L85$(r-BjczWMYV&{4K( z8Kk-6hK_ z^_@4SoqH;$?R=9s+J=ycd10v_FT0P5aTlWX^}!%j8uI7Rq9RiqS{1{6b5?G9zgF&` z-@7j1z<9)E#C{;{T2V6b5&jWwU7`d1N#TWHmmyx5^?KrKWb9-NwENy#slOHvkIuS3 z_4Rdy;qbp4DxZMwChcis0e&K;AUajeJ5x zaq~idl$s1;xYzd*ng4C-S@Z&5L5ki`V*eKIYQ#d)yIiQh+>5kg61cuQYWUN%z`eU$ zc*+z+?@2w#v(|r3V=YFUKeB5h1N>661dJth=t&D_8e; ze}AE_GRDw6Y?eAEUK%>XQk;e#Pm#8m^W&R8aBUY&lG#qc)BJ}D&rBz_PXBJ|wEnI$ z;j)>UI5Wj``@yeB07s9o*W#Kx|64cX-a!DnEzTw zpLB&=!$9dyB+-d0Z*hT`g^!Oc3oyU@G1!|0h|jCnmUNo^iS$6eRsDW`kKc>SVosssmVz{_ zg2a)_)lWS{8o>whdQUl|BexNmK{Pk?+5mBu>`^Ar8+2=syV5wMMr3T}PQv5uCpV_5 zdXnnSqfnZ$`Z;je%)pcAo)#TO0;1Tgg^IU=O$MSNAxI z|62QE^`}qmH&FjnULEI*`**|`zvR&kJ15!n+4&nD$?ev-x**q>Ol)JMyM>2TRN58wdJu#~ zapS33gqxY{n!|II6bt>O)1v%<`InPxIA;q zn?(;?<1LTo1gr*eb+3jFL}Fx8t2L*DvD7l+WE!C(4qcu-@42FL4+EpM)}(|-I@07# z6_0#sMLDHE*64wOT6x8Flc;CZH{&@sZSc5`;Vl5uP*tmzV}+^1)nqlrGG)I4-&j;w z(qx0{)XdV=4-l2{$KJ@-o)LDAyQ<(<0mTEW$O9PJV>LLfr zkl&gDx80={5WCAyG@EYyj7$=~KET3_j19lnBLM(s2)3=z7~5yhTvhxcZKAU2EH?Df z`NF_Zh_9fOSF8~VmM>|T2!$3Gc`asD7UNzYL@#%jK8v5Tafts!AGDt)WzqRzX`a5e zmE@2LbA|Qs5rd4E5+FK*QT-uB%xoJy6m%Ik%3HkDy}uCOOOZhB*mQP@tW?GnYy5oQ z#^(Obi=W23!e7@QQsfGg+dEBMp5ww8)rb40;sFsXLXg4n_PiRAajAB?BMi^&)qHgN zRjQ1STTfk};kGoN5z1wJ?Y<;Rr{%q=O$=wBVdZor(iJ}_#l;U$tm=v+q07Rl9Ov>h zPoZst#D@_@^sr^W@HNaDJ4Y56m=`*(WO^VnIb2u3^2pnt-5Q5-?A4sr^mW1(EGq*zN|6!FQFGz%~LAkr0hoZD=3GTf~4V#*9f)ufU{D!1My>>cmTImw)hz{NhlP2WZ}4#2CfihQy zh1B>sLZ5!j^YRs$`C?+x);NTQ26~^(#9aS5<4=o#^Si}z5`U6^==;S9Tb(ar%DwL4 zKP}6U`N+R7Rx=RjZ+0_tPtM60D8NQeCRagBN4r6qp|3x}3uh09(A~`I0_7=poKo90MqN8fc_P zsCWQe)7Beq9vy$V+=qJl7_<^lyTo1Cc|@GjFVd2ag>+xL&?y*gN(m1v z)uEaKB{<{8j-{@&fRCcwz=~ceATiVFF=XeJHD@U_u01cMB^7BSH!;TOP_Q^fytHr^ ziG37QxWVDR>b#<-ZahDgW|Ha=k~b_&7WP6GXlLj%nvLtR_e$(OZ5&`{NCl2tY}xfDK7&#EiaFe6bx9-* z@eo|dAO!);RT9lTG9`&Azl-hFm$cJ!Qa1=NJWeMBMh zTLVH8?)t=)>J`UAk69ZZX4X?<%EG^XvBmyHdhj1++SkLG^oq`9c&oPk8G@EqmZy+Q z!F__+(Hhn?=|v(l>x8-c-Fpmg9|#%o<&w0R{4{iY$~7pxY|c)7EI}*$lhus4m0S+% z-dIl0(`(MS%wCEc-T+?~w#m*SN8fQ^7nX$*o-7r@Q~Lx&$Sq^m(UpZ((Hy-Jh_LW? zd71vV|Jss2&-FZ-L)Ww=^YKDGG%eM{DX02{kG8io}3E=anWK_Fj}=rhHY&Dku)}l zO!b(qE@&tY)6u|9RNhsAkSpvzVr;}Xc@*&Vj8ihqhzc^ll-IyRMbS4c7(aue!U#H7 z3z42J>D>vgv3yVpSZr@L$?tE+hqW}vWc%|XwT?hxLe@l6jN52DQSr((hC@-oce*!%3@kh=GcEt=fxz_zkCW3bqF~ec()Qe-0)hVN zNvm?t7NJ{DKy8L1Dd`_MzNf@Kc16Q-J|E*UA|qQS8dS9sU6)-nh|$W8T3U4 ziz+)%nfpE_j>bu5o>97YJt4UFT93EbuO|HSLHWgUH-~Ad>z_R5At5&$wr+KNH?_HUrn>gpfk{~gAvc05JsSdKtB!=skhu#t z-IvE77Xs+8@BZn|euyf{;E30X`TV*^1dnf|Nj9}K(#r56#imQGQ8a9RS?}UGg(}Q( z^R*{>*;bP%5`e_G$&(b4@Q7QOH8)LGH?H4y8EjqEY=qEEo?U}W;E9eujdQT2`VMX_ zc~K6!XM8@m7zY$}-7Yej-buGs6Eh*!}=BazSPxMGQK`>AFVh9q30 z$P?Yu@3P6udZQWs30#VE_}vm%ftg!{y(U{fUm3caieSO0PxzXL)AhO4qb#pI>G3z4 zR7uQoL4n6^Fke04DEFrW*d8n-6$*CV2a=X&y{jT{CyMV(2nXNDPp*o%7pvaP zo&5}9*+zDScAP`&)fT@Z6qI@N-%oGv_mio?tVH_oprLNBn7t{hBL#v^-I8h7^ne{; z7${<4WH<}m?jg$z08g~^PX!qUT6Dg?BEQ@;pipmN!%2w8WpA0K7&c#{&iTDAx(O-^TJ^FZF&HJ{kXEKa#K-W+R4x-M;59Br(FebU6HF4<1z-dI4D-;3}VHdx{?_xsRDrlp30%C zNKW=n@&8)l>~_7<35I1Us3yrDyFEKQXtr;l1$b7yTVndbF8nCFii3rQCF7 zGo4+uL!w#3)WK2Q-LIAvs&=nG30kxQ8f6BbjL;Qql*VWFc#eTa~(bFX^K95w>V-0>RBVY+!!2{62)csI(mD?yXhvKNxE2! z$5O4{?&8$!>@0cz1Y~EI?wh_Wc4>NW0wK(~5bXp0%y5N{4>p^VtYs4u<9#)`${p6i z{$ldYu(0VteY9RGKLhVfx>q-Tiw@_{?MdsrYv(U% z^OU2y&DR^v!r|lNJ(xRi+o0`YDhhf>(7G_s-vE;@)pTHhz=-anTY7|Wx zk2Ei~Xze31T=6XZHm|;YDmq6*HLbGEBW*F~^C7ss^sm@q2s>+!^>_n!KtLHv} z=qaHm{y3h?J3{+LIPuFLyc4qw+lW6}@cp>+P|@^>(kr!n2*!{vTS;hy4O)NhsMv-`|3&JpSvX)3N-yE}_ zwzITXD&C%CMhv2sn5pSGV{)^96N280Oy_GF8BBX0;_#QP>LV$|b3%YvJ44$}5fiF= zr=c#F66v2iu=UP_Um!y?yLe2m;17@37;9Rb+_v2)a`A=$lwg*U%8%6l^&JG6*3?M#P^vl^xEI2c@kq*NS8f)o!;1; z-8_0XqsOcuq6nc!2-4!P2iz#qsiH-l3~OGK!AEH4ZnC)^2X^Mt^8@Q=`e!BZ-J2&` zU`NAnRTMM~{oC6_$ZNDIRW}$(JEwEyVR2^%OjfqcCoCBTLQtVDNu9Ts_6Gpu3ccQO ziHB#_V>L|N_f7D)i=M5N-}Q@Z12*~32n{)yybkj2t;_AuH}Od>Bs%zCSociU)DXyg z#F}OB{7PP4^VDYEgqoc|q+oMm*F#=o)fT?A{&k6C%6qjI#vRF6mr=eLlKHyc#Z@#N z6%9t;k^4q%_jN|H{GiI?-Mw%2)-f>KdzGJFQ=9Sq+|K-^7t+w zECax-#rU1X`SmLEiT%0A^gLrJ;K9KaL6%NgW|LR)I(HuF+-%bkDLZ{_0avLO7~zD5 z(}S5UEfJf8HEd}U@ZYq4yx=cqhva$~ix(X1UbqdZ!QST8ka3JaKitzD&8rF&CbflJ zHJ(@jPELou^k90%a=@b}e|hxF>3_ViOI3iWfDoO%Rcz|p#`xq@qfb-brgXSFt77SC z?NHqS2EWoDG=0~41l!-|CkOPw=iORS0A9#oVM*M=$l!e`-%F-;mzyBd>jR}qzFg%q z`waSrxNUnS#KX@{cG!3dna?=}(o{XDOzo@S6~C6Jt+jYduTS9WUg!7w^8q8dYlm*r z1AvJ`&v!rU*XvDldMun9ZeG|4ynTT|Z)c&Ot2Sz2?clfnrP6-{r2N^4UnaXttFScE zWr=HZI=<($2Y~NMvUR|Ssg)VJDg@d9d_?+Jmk-DMUB`dD{1FcP-^2xVZuC=f@~KXo zxX9ZGv&pRfNO~%{0)OVw_Y8~&zi!E<1JX)lFZEb!ORB(KxILr2J8qxKWcX%_eQVz# z`Tk_t=k8CBAZo#R_R#6@XGPWIrLCK9^;?EtnB)4(9%fBBSeWRqEjhnQA&ia3{0K%* zTL5t$wrAU*$tHjd2vpXj$YDCt)+4{1pVAj{i1TKMbn$m_U9ZHX?HFT4#C9l*KPkFt z_F=4_K@4m2=$Y_O6(9cSqd)*N_yqaV)!a~?AU@?clMZYVde6_jILUChPV6o~S?fZl zsOn8Qa|*;FTCe-khYt}}ps!g5Cqw)hbbKE&Wd5k?>)HPty6kb^KNC3ESc37>cwuAN z&AS?0f>7`;^0!L7S-!c&0>3i&~Ng%*EHM8own?<_;8SjsLMJ}~rNU`rX10(vMCC98|YlE>)=m*AW z768@NZTDyeB0V^^_R@m)R^0DjewY6&%p8E+HH+CHh;Ghv9PWW|5B*VA)TH?8AxsYqP8 zjb{z~{o6PF)2si+w1j_J|JQ%AR5fZA z)S3JYA%pJOM_HZ-Br8$wm<$kH%duBND2DmUnt9dC`ddN$mBsGFh>9pC}=gZ8)2^w{1r~iy4_z zjpGB!S?fMA_q&q$tyTZE8AqR)#x8paWjba~=4UFp!KTk$)Q(OJ*9*pBVyhROc3ryA zPP$U`wBXadjcf?q4h8d1zVo%6CL>f+Vep;Kx05FJ$^C@+d9p`R>(46FFjwfZWuQ-< z)4IwAlL*#X3bzT3`<>9=D=angu4k%=ro63t{GIz=kN-{TCP&qhSG2=iHqv!rb6K^} z7sZ2ABCfJ19Px@9xnX>f;fiK}C7=9JXoYyIp_Rlz-+HJaR&Fj;2WEc{J+j zuW}7~OuGow)b{qdsur7Auv}Udtu-OX%ktKVs`z5h_49Oo?*TL0h`m}8$QJH0#syD# zrh6$i*YooDV^h0Asj4{@h1sHwf?0Od5&T%*YOKY>)g3-nmUz z3O(`f_gCv)=B`Ele^b~g7f#YChl|=*_vCEcPwIgZ;p;~syM?qaI`=7^6l86@Z1SVQ z?H|AGUv{nk$C%CzBeRx;dnKRB@*g!m*$GY&V8mbpl&`;8wTsk@&>_yTgjKD)u#u_f zd*iHlMPBpL_tqVm;yJIL-Z+T>-X!}ou(X9%Z{(FKK zXQ+>(!|AomP=}O8=Kt@5>z_{uk?LEurl^bUPB4?~eUOQg5Lv6D$Hkevk$W7&?F#2V zDtX&`689+SQb`Ag`ewGH5l6%@VwKx24OWAk6L9W^8WsqlB*YCmSM=p@ID%OGX8C1F`dh6QpJNs@EMV%#)Zy<6oZd= z4;8(h6>W%-2=}&ip2ctr`hLVTZnA`{JqhUK=`>DxL%F98KZjBttm7g9AA764ZzYmll|8^R&Bt;E=Ga}!3=B#59?G2- z5TQL}LZx}`NYB@y^h%sKQjz0 zULeUV-B;G$q&qm@>%aWRft%uVNOp|cZ>1`A_K|%L&CNv?!EA>j>b3b6XE0vX;AgwJ zS6v!mnKzW6q9f>Z0bgIM-gyzCcfJ}GU=aBa)&1kDm4She8r9QU@VxeyYc z#U8tLB^a$mJ}g%|eG%Jc=A2$c;N}+jy{W(lvo`8GIQcv|ek9qpjSA_&7x-T|lb*C- z#Ou65l=g+H;2qUabp=ni{{(yX#c@)+HlrdyD+tNAQ)bA@KH*E&84< z);Y?kpFoPRO$_Cq?N@cFQWKHk$MZnj97-D7)XD`SFCWo7cSP7ds=C#|F)2UnI7Cr_ z`mtH24L+5U7CGASa??gE@^_`oM#{qZ`QU)vA_ECelivxI5_jsc@-`9nc>8N+?ZH8@>ES) zw*qlDeb$0b8qghut~YE3BW$Y5g0PRU%Fu|CT=-{(^8))lh-OeGO7Q3CVi_T{wqcPc znzKbN_-qEVdpk#>os{6*k6fceDV?!uIwfuL+vy)tIEhJDriimr%u+HPB7-P1Ie{)u zJ@)&i_f>p&VDf|BAwA>o&jAg)svx02ZX*CR?~RY0KzpK+*r0&D39t>sE_1y{4O1O% zoF+Z>${nR=R{QWR>J#-T)v09A%*91PQ{)zIL>W4{JkeP^uD)}zXIF5_<^2ml_3$Z;j~knWM7qq=blh{GNLo=hg?M_K5kKPZRp@8PAuyvv;UF?zxk0X`PKkLf-U~P8UZP2d@wYMG+Km5rvYh*$2g#%e*m?n$3z5ulsws39yVX_hWPX5ER+rL+aE9a~D5Ir}ey<;bUrptpV>mG;RX4MvBw_VE+z+O-mpi8hq&ReYQ zy|4Y;3^08IOuZ|XSbp2#BalH>DRYgXmTiw+^Xt4Da!tl-(xYjjQrs1NHK zUnB6Dm^!|9&Fb%5is&rG9g^V88)Vqv65ODtN83(9 zO@L5;vTsCu%qSN3ZQCd_wz1(LBGN~y1OY*cbPK(QB!rGq zCG^mn%s7H{qy!{12_+4}Pyz%9C@LigNC~}22@o*!Ce1JN%#1VVJI`~@`#a}7-}&R? z53*q2YwexA*Iw&h_qwm^f;oe3;BuhPWdba6hV27i3Y-Nzj9Ra;bab4v9hC3Zgi42~ zENoMMtTfl8TZ{-G+&S9gKKo58>(c8EPUM0DQN^z^NL04^A9^7+Z73S3;Lf@oZ`;b0 z?^C)Khx6?!-b5Y88@O>4lS{S^+;ioOxx%t{FpJz@17)s9ftl`hLgQfxy37VGdtmHc zgi2$)2;A4NL0{9)CfZ=~k@2E3`*D8_O3aGvWeBOy$iXW|^ChL=9u2;!ds&~+K8#F8o$5Djtg%hX+GHKuoMmAO=?r;6uoUusZ}AO zx+!H8Wh`%1sdww~ii!<-H;8)abYZm_U8HuxgOPeRw4g#=f!wWUg~4Gk_+x@P2*jDK z!RB0J)Ukw`jiK)uGH~l4=o50E0jUF68lJ%uH4}%=`^qBo+Sb>SHKU*`4Vzkt3)0D~ z7)x-F<^+gww6S`9QMPIlJF6He(0l*%>8a8sSXpfpO}r#8PRdMtxJ|xNEW)Z0#H*f< z-a0rg_|^D*7{(d!fP8*XR6d9#@`=XWtj5(|o7a*y5{ql;YTXo@c6LzWX5NU96lh!) z_mQ4?t<~ZpNu;Qtz>)^91(ciPkul`63DS!9bB@%CJ%<{2q!d}rP&N3F6%5TbLkhyq z$7SfhkQ{^jVuD%fM4u&|D05DMFm~_m({O zQUMY+Ws!(ZA`o=6X`xDv4)Q*P7U!^W)d;bbDMHS&ZJ*m0t|`sIdL=;}8PQ1_F&H*| zHsMgH1WjG{w)X0YGBQ%TDV2wMJQG_t`3D?03$mUyW@}!Q`?14}A_f|`TPZZFrE`~N z$VAg>*bEG&kv%Z7y?$Ax=) zBth)V@Y8M{76!jxN$LY{b0p{Hgg8CRBA*UCNcm8J9LdI#Ri$AYjd z3o52e-FAeyLoDV=?J=KSr^WQbjoGQ>(`uPx9~3)nASq_Wl5djCE9D{Ku*4n?8^KNV zF}p-hrn${n7_KC>WJ>!tN|_Ufp`V%VCL=?WA~?%`9krJuhL3E_Y6YoH|LgZGmtGzP z5!ZP8ZvN!!N~H#UoWCcHzti=!Pznb$goFKk>Q+8*EOfWuHLd5Ovzlh#`wQzGHvzE{ z*R~)I<@eD#vM=#hu_PO*TWvt7m-~cZB~0nsUQzP-q+aL^+Io2YneMP7t#dFe9ZMI= zNPP?}_qYRHRkOlvA8pJYwCP8fU-b%oGG_Wq%yR0gkx!MLC5JL_Jf(EP1+q>8${=j~ z@M)iB^5l;-fRi}KM)N*}l?K0V?S^Tbj}*pS$c z?P6-^E$vfs?=qCUaaoSh)d7Rc-X79f)as54-s9o%5=@GeE=bR*b^ZaH61``T_y@((X+=L8V*}t` zCfsUxg!jFH0cA>91Z`*E4f{~HP^U?6Hf%Q!o9-YKwuo(;=DXR$nULMlaC?85v{Sr? z!*|`8(XfOX-E+bgN1x27DR`72rsSIr7dy4s?f0RsM{k}y;g$9g=FazCoN6|9vny2; zC-@HgTrqAF2S7>R-)%E;U-H>;V+Mi#${6`4k0A z!Xbir4Xl14hS235a&k>4`+pb%iBIA z0;e{fYB7KMayT@;pq@D|{pSSA+lpj2uPU1?PfxXg*7+Tw{kVzmyM(Op&;05NE%For z-<6y>{T}yN{`}ITBK;T%aht&bAhqSUuB)IM1c&t&v@H76YVJ7d@aM?8EZ-pqM(eW; zjl7;31hh7V{W{rxy&$$c6mQ+w$kDZUJnb|XIK~((`PhzsyyCxJ)CU}VU7^=OU=LB| zkj3P17BN+p&6g7;N28TT#m!)DinaVPn4}h_Q_~t3#>RwWoY~^S*}A&p1`$?P>h^dn zipD2|jAci#;Yyi)dwVhr=NN7DFOT;>>=s@-f)sgKWuQ)Z6$J3ft7$?^Uymwi1X>K; zXrPe5J-mEzu=7Q}ABA_(UBZ{usI2-fG5{;goI8Y_nQX!--qSFva^`!wFTN;aj#gR& z>g^x<64)V2>eCXn&uo9!>hKnn1%v?G&m?ROokMlP8#xr8w@8R228KRMFh=?P1|{#Tqkkghzs`;Iv)K_UgVZQ{SOoOs}f0LUHHI2j73SE#Xt$_|(+|MYjm z|0Ut^1IM$Y|I-=& z-DWA%l&=~EqBg#N(nL@f`hKVFzvo%_Ia1xRl`ZPSp$`Mf6R zO3V6!x*y#@lIfz$U)_XZvhLjuftGAt>|VYjeZgD$%4}QC4?DxeR%}0a_h?OhN7;JW zIVDSqg6*=)D~sMmujb)p zVhI{WK$k~PF_lywA}B+8LI_*~CK009%E{H!j@Ewv8-vT)To0%t*&Rp-S{BGyIEtRe z<%wvWP%E!_jt(HSua;GVDODBKE2(D-{CMBdW*6*X&4O5WF`kD-C9j65^T^aZK=8Hn z#o+=6nKENLbtmH=e&?PNc)lvcTdE0tKe`988OEg+&_yn{66Uh6@@8Wh z5H=Zdp3)6QA9p})dSgweW~Q+197Y$&NB%O!h%D=w)gaaz7>^S?N#>T1T}xr8jmo=2 z5{Ns34VwsM4!@+IZv`Gi+E{Bwt9+IUH7Is-#uxfguYEDsapsS~Hpbvq&Zkora(|o$ z0S!Q|{ex&Gj9pTJfGzo@_rqMuPYx!dtEg46lYa2HxY!RCJYGefT|=V%+K8m6pZxb2 z8FDs-ev=ej{^=1(-%{)JTVf(+OLc2g?zd}e^YWLaHtY5VE=Y*gKE4RV&7mFcxySXN zdOxtZtqmXRWAY^12<6ugtP?Kav@q4jxm8oJEusfxkn%+;lY2Yw47}n4zO!W3zA?+8 zwlI3e^<@RrqD)cAIko0#UJzw?M6mtCDlrKv!`T!=gP3qJ{$Mo=jmZ|iT+!R!R${e- zdjtd(bFgGkd;53pQG*POBqdf7f0M;U2ewj!Zq8F;cFH3_0UV*G8~40xt^G3+2NXz7 z){FqiIRh}_l0O13SexM#him9yRL19y`@AtuHnuD%Ex;#@7>>sMV0O+Dm|G1*J>sUW zK7#(TUf8D`It34)q;@_y1;aHUl1`(D&|K>;I5Oa6kM1>zfPA2>ydBXq8&uPKZk#RI z+q5P~BI9acI}I)Y6&Meyf!vg#pOyUhHrY{Pp)i~gN+wFXSJILA_dv>O3OuJhP-06I zK^p9|w14ypgUbpk%Fs=*p|#550v|tildMkZGwpw1WWxO@q{2Q_nQe0 z`Xk=)Ck$!i?)U7zbpsz6XpjPTmP4E#V_CuxUa~JN`KfMU!S0kwdA!b7miNJ5SsXP! zI=IQLR%JU4-Yzx~+!yz(6qE=`S^SL#%70Fj&>evJya|%q_@Jcvp|n2JBvAw$P@OWM zMeb-H@MCh+x4lLy)3G-s<@HR!Y4Esrp>Q7(Jqlv%zsK5pXpTym{6HMtb=lvmjy!~Z zWnsce?`;)R2Pu>~ENUswOvkG>^o*cuD=z2a>C-B-&+ppOA~udLtlfsYTO07tOGBJ- zn7We4SAF?i141Dm%8UCIhu-IZWtrJz`o$U_T~|1*9up%fzp~us`|Mx65>=(HDA+m& zMXfCuM;3q!W|Qx2BANQn{${O16&+FTqjB*YS1o4qIYmKDcKN@a4K4c>xD+pZ6)mFQ zXGG}{VhgeuCq~pNu0m-o{C5c8>MPt$(Iv*lC`+`t_T0=C93Y5}Y+mz~%ZQKxx{%0Q zL6T$cH2-+*-%#u3vpk1;T5Y|_T>C>;su{gjaTUL5BMgZUuMdevTZd_++-}=8-f;C0 zZ;8>V?GkMgUW7H(W8*GGBx)qU0OFuCtyV9HX7%u0-EulMzjsBMpD{qK0KX=Bf^`M` zxOghFv=^9sKZ#EgqXD_O^u>kP)Jg+eSu%4G$BSo&z~g%mDr7B#fexdAUijIrt218c zjt|Nidn!}SrC(XN9an3vm0dljJQi-BS@U5u6AiK*7SbupvUgz^7fXqr&UXbLGde^tGD>ci-`m6@r?w{RyHQc#BVdG zM{On)Rt9p-V{nRire<|J1?a{krue0Y`iWhf^n08!dIh^gDEt+t@ocU?jNWQew6!+y zZV?@o+PV^`hMBWZ(H`6ItJ@k=5$P|r^@dfy(%O*$T({t0Cr(az?X1RU&%Id~w~{Ox zF8BpfY8@b#Y(J&6>fZ$~kGv5Rzjoa!KvLz^9Nz_Z&XeQjC95AFW+((kIKf)+TqSH> zUU%fpus{y>rWpJdX2~Vkq-`}U1Y1^3HD}lZftPjn7BCjO&B0g%B`EXG{v{EcBy35a zyV-uA8C)8sjKwYXG7JM3<8tmO@;>1|bx-`EyRiGiL>>UHeE=q?q55StzYPdd&&1}M zu)hMg36~m;2UUDfPTeye&0GyLT)G~i9$T+4Wy@g%pbB=jYtPHa;K$@i3x+G3fVGY( z)N{igd!Pv{F||{Y-^odwd_KFkF=uR~EjhWxLn!)=qX$r?)IpT3%uq1=`Al9a5<$=p z7G!37cIyr&Qg(+qqkysxpH4lp!jS^cwBdlno`}=i32GGON-U7cxP%z87Z29tQ8WCIC#V3 zoeTVm-Py-{gZTH$Vfpu}GLUCq4tAdBAH+#m&7)A#;YR_g63WIEGA^1EIx8g`Vqrp9 z-?jQ(X6wOMqo*K7!mI`L{q@AFA}}%{5n{(R577kE7a;3MZk1g5WE;D}I^A>z&I7I` zoWn%EtRCXG06bU67PFyLqrzVRoO$3x#iksmW2*Jy@@Op7-~z+gi5ZqwPazkrjW|y6 zOq#PshlUZ&@uZunT3$$n_nj3wvj-zUn@BVG8Tl1dvAP<@omEesHdYu)iU+N+sF6#5ylr`tFi_@leF!li15;L^)fyapI0Q zP9?P9m|(R zy7FH%y64jnaO&!|L7#lseEfMH_EyrKQdM`ps33D3cla#8U3)MNcEixIf^%5+1vzXjb%5%NF(ddJT&Dlh^c*Abcagca5 zRWm$iNW2i+7=FrdQ=qoLh$-`~BN0%7y8ZQ?y-Bmw|+4n*+ORt*oD9S2I+QW0JYvUuvZH`+M)#SK(%s=0}w? zHPYuoj1GgVUVqH*dC}nD3tPp-=Wuy4yeBIn2a&J1dsxdOWT%d6S1p>Cjmz*O2&bkG zB9~RfJGP~EWcqVwOVsaGw>lekwDE`Je=-XmDb!! z9QfFwb1y$twPW`azwrINXkovo=v_?m6nhMY_|(7xyY2zBX*S-aQk31UAj{Cmk}=M{ zRlTeVVlN{eEX6x%nF84IE`0P0`*aJwVv@2)k=D=bZ8h|(8Cm=!8rMJdz9K?uD;F0m zScFdc!6G3}q43(kqUQXA<&FG4&qkj%foUY%eQO)HY??L%taJl5#4G?douRy(t?2Ah zvf82L@28to%Is`ZP%)%#U-Zl7XS+5T1R$_!SYjIjxOy%KlbnSz?NJQG6_2N}d=GA# zm456zhr{#NQP%C*!^YcP=~xM8s!ShD$DCSpS^n{4d&T+Y!9XVAMEopW;RuI9dq!x* zDVk{nZCqJ}8V^8rhG*h?x|pmJK~l)L{>Sd2O%*T5i%Ggv4*MwjgeIiz)NInPwn_%& zP~`Oc{6n;7q?_G2vV>eME^c3?DBT0^AX_4;nKY$o=mXioC*}BK*W5{76yzB82yiD<$t1IYwQ|Y8Kr+R~oQT(x=uJo|g9di?J~Kp9o?y055t^puFG~8u z2Q;(7c=DNylg-!VGvA^!-+FH!KCzSg&Aba%UR2hgUZP)v2QHtJuMm}agK1{UU&bDl zIm@HfWg~AXvfJlu_Ig1#^5k0t zYNwpNDwDc8blR2Ea7xB1Ba`5^Fy1r9cI(mzu|x0a&84+PJa4)JXIXd%2H(;?!X78X z9#CqtSM|2=j=JyTv8XHXzOn6-TU(a_$M+m`NSMaOB*^r}LweJJL_m6=oxZx(K(1+} z?_~EziOTS&C-iOSq{>g};N5Haz+C2RJl#2|<9L4KMjdUrkT*EgskXoNZ3nQZ!0LSX z6$l7qb+EC{0gZAIm`<&8UVN6eygOGepMaSFuwf7bB@4#8^szDy9U4u zJ-l-?>j1OyfJCD!>~ADV>ER9h-QhG6Wv{Z4HSsE;(#AbCHRDr0k_T@t5flded*ioR@R zk32v2NTz2}RrWD9GW z;scZKmzkV^jAL@iai0HxHa<_gg7|GLSAE-WT91$Y8g0VR6@Yo^O~H2OZiyM2dC;e8 z(T9hP97x*@10ALYSAe9a+}SgLltSc=*nQ#?yR`13%WanKbY;>it~{mE#B8xYuCTUF zo+;;bq$Z}%2XVsH1zb?P!Dot(MoFKM4&nQNpV8{*L{s}UtPPwpvJy*ll<&+N((Dp+ z9)&7Z?Z@AmYhcjFDL!EbJO1ulyHFcX4qH%N#x`VPyhl8DGG}e4N!>5kw~wP%K7*^= zJK#bpJSUAlx=?XDM}CQ9au~e%#pSm=Tlko;e{UH#d41y$PVflp)?ED5$ckKAzHCEI zw*K{UVPx9GM4)0-g0~}APlBDgVFbA*fB*L6v0Ye3W=6@hiBGMK{XOUfo1}diVdnOI z3J{t<-CpJ9YK>c3Uc%FG<=AB3{uM6?>ltI!V6l%mmm<81s;%*kMfmb)@ws`&%rFBawCT3b>RS`z?Q98qlDvNKT4TSg2o%;xMRQE|*Hgc=>|rEEpf zy09P!ZhW^F$XQ^FR+6OHLfW3ki<71fs)+k;?$$0x=?Oc^+PckYJ>m^XyoXASF8a&7 z{sD?O3){dbsx>8@Y#28(2Pg0jm4w-Io@!%NeXkMIayF%7x}vAQN)QR5?!&1U`Z{#% z_1MjovKWPd>^I2ANqPQiS3+INyyUir+#88KC6tzTu|n<;5Es}uK(1{oyUh54DE_)= zynlxNK_v7MY-tc3nYz;mBDMx^`sFnwA(3JEmF5R&7+5Z+h!E*&`w&str?IoE?OpVX zi~a4hgVkLSdaOIwW15t7$`*T3cgkl91%s*kgiZ4B&mr5pL`c?xrc&`rl>_gF5qU#w zxCN!v+H5}2xt`^wh5F1HT)EtIeUq^O>*=KaE6|G)?&YLPKryx`?lH0gA99u4h^kd$ z)RBX8k1Mw}m|moZy#ZP18)01Tdc85LlDrV@BwJs9+sT{ST0Fo6n7^PX8*9W{6DtDo ztePi}D1tjdM+usp%zAzngp1Ur`?hvjd4jtPlhGcreMz+1E=cG0lXO;W#Rhxs4Q3zcaP;BodFPc4J&^8ZBsDPLFO?--eVq0;;60 zPdx)CoN;D0BtA!qQ@m^T!3zc_dKY?ixGIU)mM(WkqmHMAWKQJ)AV{kg&6zfE45g`sOqErGHA4dx0-Tuw%Fzu@8)IvWlc1BSEN z2bt1NNTi>F*m!i?zE1v&S80F4aIW8F`97!^;D!mzEXBCbIRz=#$AwHoN!-LMhZ)xr zv}PC|lsG`e`q$qzxmjN=baD$N_7CFOfJunlDz=bbCbm=ERCfOW_-;ZHSJ1z$TgwEy zOn@X+I76?Euc+1g=m;rN*_Dj_rbY6oCEL+ywo2s7Lmd(T$GC!SPq6s;35p)Vc3bBW zu=BlN!X{7Xd5#k5{e7N@8YaBjaP{pKNq2$tmQ_J9UNoy1l3KKN6o=Et+vX*uhRDVX z-zv0;X@p>!$=LqoPoqI z3-mw`FL=}lRZTUj$rw>fjnn7W@_Y45FRD@=P8&8(z|9?uY!Rbhe-W;>TwI(Ie6$*U z#XTpNW)~j(bE?}Cw|eXT)ilYHT3as(j@~sj2dn2^ltjmI=9-X%q0WAcaht&xeB5sU z%0}fRMkjUh`ET`78gt}%i((QTIegxS0q=;)Vxl|bihmJb{%MCh@lk1{IEcLfR-@w# z1;oEqg4m}YJ}8o7$m6V?%&kOw4d2(Xl;_Jd9P_4fn^j6g zU2HMv3V@p!m!+gGWEi~*C9^q#4IRbna%ZMbX+ps3;Cn(&(dLE2I|ShG`8MHCg9Wiq zbwv})ok?tDOVn%;3{GL$`kCpn$1Yw( z(YrV+RjveuV$F(~GNArzEem9NXUB&osygJsuaQh%PiOzJUbh2H@Q3cpp>6PF@Me67j_&w3CQ(e4JuBsn@{H88WgwCtS@*aX z+S=yKrfM{CLZ*V@i4Wi~F2R&J)n>lc<9$(xc9QhKHj~(`6BN^DGab)%l=!9Jpl%NI zn^GqU;F@eeQ(vo|rRiLTeh308qXuLX;9*K~^1Tm9i_QFK&HbxBc*@cb6HPPmDbl|) z*|nG7p%R90N5((v7HUD^*6h4;C-~DU$ME!Jx5q%lkERM^JQ+Hq%UQU%gcclXU2_JT ze~r(|Kli7^DsrlEf@aERhYCXA9h^MGCu@;1+k}4g=lbTqn#L}hZMfo!Y5u~cQMW`d zKT5BP+_)9FC-;eAr=wHJxH~V!;a2-Z%MTc7hP0ORi0<%COnl2_#AB5;uL*m2DfrK` z{q^yk$|y<_YNhawvTgpVgvS2-+E^o#8u962)Q@@YXKs!`N47=7jP~g{c?vDr_@l2Z zx>s)fRrlNHzspDe*TIv2CkvicAM=LuEG!;Bk$w8D;y^ z>Av$Jg1rgkq49dLyae5+WK`EJl3=k~sP2;0rtF9q2@l(@u<=ey4O|WH8 zwQ$*{OAbOz9o^a*rn2}KeY;yqZ3DCh*V+Aious1L*)5|_6>rhojVWW8xm0OK0YPmV zM5kF|loqz9c_+ajq1v26EG#we9TM*Ef2%oM`25m5OddmkXwLQ|rM6Qg0Q~qqxO_A1#^_K_COvB zf;p-q9gdDazlF&2ND#Y~C%D?bYGpm`oYx#S>hA65PNdD0-2m7c$diX13(b|C7;HYj zr-|{lWpppag%N9#28n)sHO{mf%a~pPU8)VfcKvN}u(a}Z$i%&8tL#08YRSdBqi!CB zM~MU{W(3@CE|({RU=vFLIMP;wlVzD%FNFp=1AUcp?z>A_x@n!<3%^>$Q77!Yywp}4 zu+Mlf_8RjM%Q}5K1I#OWa8v$;n~|Ot$jE&3IkmC?EBs`H5Ee`$6T`#$x}DRFzmd+7aEN!yF#mye@*RdHkmpqx|$8!Q0R=X z_P$v56rHXsSnM@tJo#xGjtaz2_2|T23ZUebk4WlRUoi1c=kO<*qU6KE1q;*E#FHX_ z#!RccJ)86>T&p(J^3xo0YO*?O^fAC~HPlM45P~cK;1=MDe2oc^*-0BObbGLiLFA}#7zf0M;9iGPEL8fU-=lXlVZ63^u<}0dvL4oggT}C0Spcfl<4ORF zFNf_fgwfw%;;42`^K|e$B}YZ>3YAAm8`2Lo;AZBq?Q09=pHI6M$Pq&7N9RmT_`id4 zPvV?gr}YFYERNCn0R$~WKL9{X*6-n>Rhe8(m_Jx9(flpHZzBl;vBBYXH{83^xXSIO z3I&W1-K+i0OZj6&Tdkx}+~}eNq552D9DouAFCxHE1P(Sf2)3RC>E`6yoEq`!J=DXh z<0Fe+3H0~mMTB@;h1S&_HiiaOe`Tqp749XQ9hox+-p{1R$3g?mkmf$qdzs*a-;bo3 zMf`O{cp(4xBX`+XIx~`I)C{(*kAx~2f0^jrV=-xvveJmzmZo}dKTeqR?V3bRey9HT7g70kN$EeH{MR@7k3TXF(Vb-F#(5Ug6U9k76m*0wG^ls!YK7V4CPkb4H_a5qCpD^!UpiOlo z+mbzQihkp#v%AinI_vz!`(?*z>r#VdtWk>@M!Ioj;IAyy#xRGaV8`yv4*Z3T;s{ zb}D{OMM3(DD!ra>M4AHu)j|0>Jh?COg&VheJd^3#uVAFcDfXCX1{A$Th!)mS(ijX< zX@Z>e)y7?(B2ig9I#>MjIj z&>*3(5RR5r!eR4Q7L=YW+9R98Zv0nx+y0n&{*le{j}^vG-=?MiYD*?HgGXJdcI)Yo zIX0QEESs~6FWNfp@dj&7z(m=zpI+muaPSn+@dj({++Q{@OthbJJ{WE@AvchdK<~z%>Cda0pvXGnB zgz3v9tE9gbT;en<(A{yWy{ljFX?%tSSz3mpne7!};H@$~r zUe&C3Vri#3J&7bDyjm>1BT-l`L6pLs(QM4F;#qP% z<|U*Ra_OcrVbw&7@c44g+^)4}E<{VM^|TI<8!wPkXcdFaK%sE1$v;Z4>5e!!CD;%b zleECCKohKTbfKx8JQ0Ge&01tkz6F8tpB%4(Q@Y{KIA*>lCu`4zHABaoIp}iezGpIc zz0FWSYr-7 zov!xoxoc*whQ}b4JM;}mhyfV@ni!9`%{>o9i53cL>JT|7^YY1%+4on<|M9H;e8Fz~ z<(WPF9aHhIA7_Oqw7a}Z6p86IoD7h+$(TjM3)utB)!>-8#xA=gKg0o3^it-q^c9Ox zW|u9|RwJ;s*W6mJv#=Z+oyuL88W1IdfN)bgd&dL{$;IA$*tSve z=P5r1>=vm?R2(^8n&FF}Upn_W(&Fvj&ppRX1)&d-5&|{Mo-07j*Q>;2(u=1Xv zL@M;_vYf~dst1jfaRdR-4tW`zU7|{B1KsScXYO8$IEyV;M4Ow!09K&}IX9!R6|aZn z2~S;wvfof}1osq1kT!@uU@PHNJ2sDxi!LFpKMKvu;;vMajcLHpG?{W(v#P#tWB6En zJVqQLthAwpjFKMeq31Elh)8Rv~!YI*~o(uE4(W}tfR7zDd&$x{2AxtRXijaN+KvQLoY zV^fH4h0loptW#*vIO2zx=KEMpG2gE&Ihs?wk&OkKf8V+PSo&|I@c28^NQPuvHW<-Z zQ?g)h@Zdx>MMfBxRUvOf)W^gSz=qQtzB3@(f4qz8FZ;8BeW8J=9rr|&Q<2$`zMJ(} zyM`XWAwW!QH*HbnfAd%Qzj4@8v|-9vc9ta>=xn>1hFx!NfOKqxqs0;yR7MtCW%>dm z{}`I{csxfZg2y^>o`@~tM@2*YRej5B319~}4uhkDyE!=d<-}h*D}IZNIoUUr3xN=!VPeP)P#-j7fo zTsL|=z*U$-`L+v zM^Fv_T;8`YE-vmlCdmv6Q!!5*sKrMFNXo0#tlHle&o!b1fXrMaQdTClV9pTs05(vN zlFhW7Rk9iQ;I@&T9>&tsadFIaSiG#K9Pft+nc}fx3|BnsIR$~tD-PdT)~WT$FdFN>c>-oH31H*Drz{CpBF034$v_Eef@%bB2)>;#*`ra z7>xB+pwL5<4G??}zOpEYk8ba?h3h|A@zSw$=gsLf-e&d?vs0Fj^~s3QT-Ikvlk*@@ zkJKxe7MY!U0BRDGiGa^aL>cFl-?|b}T#J`h<{zgrikh{634S%A_{?qb2cnrF4h@W; z2MT%MJb=|X+%ZsQtWsNudldn^QvCC`r}Sq*3?7-IY0n&Dn8S@9s*TCf4bbC%=~3_7 zTM@RR9|bWfFl8lgJ}|SxFS!em#u=4^n!^&I(M}HF)+tz$mR1W|VdIexE{75(S+70e z-gwWq$kO6BYsVTL)(r_Bc3uK((={wv-ROrC9FVHc0Eo~`T+_6XMrEIE0WON6cRhCr zhspBd$O%DkiuZ+cI`2?%b&z^SaD)~mNnihqa4umf9S#u6$*$9p=U{8V#Ka63`Yi;r z%=B5fVak~>)tmU+i>N|#5{M^-M2YXp*;&qiK^xoRsGsnurOK96+P%udm0T zl7chSd4L>1@@Fi8#n)TmT-d-4CAJvJ9Uou~dzJco5LHXdfyPOM15oH?mAn7AS^wFs zVqtLzFiM&VMlOvZqNjbLYUa%Gc0)4VT2C%tRhlSmk0aNx`PB9Bt5f*S7nwcHRqn5@ zWveA(Ud8r_o%}>=p-pU4)-{4)oF?`GaMOvYPHQt^WJX1)4#9R*n;PoX(WXL)$-7O` zq&oAd$jEwCU}JL|x26wh6k6CaV&;rQsnPXM#mP&Hkv}Z6>jsNE?$-;nJZ9lKu8@2F zG(!Vla47W6f=RLDy8o_^|D44!;rUw9Hw)39zsvu-vhp_zB+Kc@ZO?=R>zB?f|mq@K(ON`+)bUmX74-O`clcJ#<;z>5U6 z>(e>lvUpzb0_rB0auS`uyMUO&L=Ky* zz)e9dLs2ODPU{`y&!@TSMG2chwUhF;6aWs#m8~WzdExAZTjWMitxfx}YA}f{Qf}YV zEY)fv$A(@|jKr>_=9>TD$kcf&)`WeQzf*n^B2R{#F%J-v765y}bCUMGS6q3<8uxlG zBpszN1)gD`ze0y#W(fD|nB>uSWs zxA{}KVpi&{CWLDTO3}Y~1)S9r0Vlx~^aQ71GLUhehWg_TAJCY~KuzNP{ftrk49?0` z$k`p9)o58p=am6 zKd2@|>H@}^nF8sh+`HLU4EKh@>GzK+SU9_C^hUd#lJY* zv{#EaIf|#Dp@{^h_;ky9zh*a5(fgi(T%qXhikF(}TRV9eyA%$k!@^@;?(17Bk7Rzi zKV4dmi@{rmre2#$_P%l>(czF6x}E+lAn@1yI=Qh=HmD(si$bneJ|*Nglj~v*gB!lw za9XuRn=#I~?y63gmgOO!O;y@4`Qy#HSML5*^V{ct+(}}|^HlyoiQ{%J-G-Q0Sy2@3 z8Bio1xepe{&^fS-oU*`GUX+-uAP)OL2H9x>Q`7D)EFf9CJyx8~4unCQjP@pzjYs%n z6Nq;lo8MiOcXS^N)hhJA7%SSPuTO>ehvDD6oDbYt)wtl)q5+0ca7Iw5SM_54>B5U|*p$&tjPTwnaLU#TqhrWDh=0uVA6YPqY$u{_)t6s>zERkld8vK5ko%oTA-aExztszJJycR!YUByy#w-0 zDwCbU6b;K&rNHy66!nO=)IssiVmIq6E+d_4Ny(xWnU9kzxd!09 zRF^6&E$t@L^)oYsQ{~kYetiRwZ2nCR$C-sGH9uB0^+R}E zAvLM1L2d1MJ5J5=cd2=W&$}%Gf|zZ`Z?%3GvH#1&;#;ls-$m@d+5E3=IUmMXwiuS% zlJPX@3k3qE3_o%;>(1_hqvT~vH4SlSjT8PRJLu!6Nd3jg);~DJ|6ffA>bv(3sRzSX ze*D&1|6Sof5w1N^h#X%wIjNV)*LT>IA-+I_hH1V){|YgI?Hb3Fy?EM z=BHce5PEy(TVD)r<<6%V(}X5-`lA{Qknz zoG<=m`_Z^XWZ;p>JtH_a0GyZ?&)kW(iEUaOir^UoxU$05A*Q!?4&HiYTNab=U0sIx zgJ4;_<=V5Mg(2JveqtfugAwAk-!=y_4!4OWc#ufFog8HZs9AdgRM2~s#6LN7u1;$v z{zHJ*{y78UPGx;q;Vz(hvy=dD%|VfRCTY;1i+ffvv1`}6y?TfzupYYgsYyzorM-a) zJWsCs0dYe&%ceSWu+KUnnB3Y61XFc7!OM3MM=ySwwV#2UsmbUk;)dt2{xQQ;HmC|y zru*=)z0v5fdTa z-S{0~_P@5}6R|o>-S>+M6Aw(F6sw#hFlH_$Hdi;wtz<&aRiJ*m$zFfE0;ia2a%*?RMk!X-pRKg5T$EuhOBO*X4t_or zMeR(Uj`+m+%}wdVE8G2*<_sixai~bT;z8!4Ae*_YuPlN%!Q-ljBZdAwvBD1R@3j2b zIx||9txYcizVzw%>hd9?y2^ACf3c6W1}gxwpDPgbHBjIkr$Y>y@+y zD7||DYatx7^h^_T-FXW%;;la z14qcm2rC>`n~sdhg7`Ti=-s4cnxSz+r|nwNo0!(E2?yAQfL`>}K7(gN8y8%fIGP2sI|!-Q0I zAcoPwQ$J6;9c3J~&XqmDL(Hs46gIvm!^W3J&(`LAmKcKH*lYoJ1INg!M@JlIj?0T@ zWsmh5VeFTsPwsqWSq?gU82To?iov5d#oV0N2`kahAUj%!L1I_n{_+Bet7)R8e_bNw zFRWx`y^WivLOnjwDJNI{rdI|O1(+@~EI+xh$Fh5Z<8)@cMj~?o8P5&#MdPHZf42*7C=clsU;I*Bj20t`<`g-==g=nd2+7_u5#~ zZGJI0eSf!k0Q|x2~+L0$P*dH(weD81pVHm0R^yZfdy~pm61R z^}pij=J`Q^+BD|wG9_m6d=3uvNaj6+?Sa(=a;;^rIlTcfA1qpEU_BpKf3_B~)^XXH z(Fp;L%S36-rr+K__vOis?ckAtX{C`r%6T+aNo^pC0MU7~Rwc8xF!RdnJJo8&_a)qX zM)2m31|~Wlb@fQOSc_f3895K#M7t5n!h-5V&F$BWF?q+O)mx)p3b17(XHDpeq4y<;`C3TH_=MYus3!SN}hIqmhH z3kxp`r@WwH305wzsmL)O^6iZBjNGN<;y4BQK4Oyb4VoYFU63~9?!>R`>SWzz!hru( zp4=8|L(FK?J;%3}^-Q*AzqAk^d~^vU2%7y1J>S+$Kcl^LfCCB}Lx`F8_qUX~d{$Kn z;oTLKk;UXnl!?V+EiXlA0EC^F@>=oanES>6Xso()Ae{_>@D~+^%N?S0DCEmp z?uwbe!t0nlxnuIV?$nBvTit*NjX+(t7pA-|?9jauMB zBpHmZ9idvkXWh3GrY~KsUkSrL0pK=`mFn$wy{bQN8kKoEnU+_`;p-QiDLH8&$Qq#W%nFKvMm)`QBw$!z2nx5lG3dTi^X)TXV$WGDu%#Z>^>SUG;Ai z>Im&AJBQgYL!mgT(WE~mr~@~#Fq>%&d6QgBMGT!sc=Cx9BJ`E)Is`gViqSzK{Yntn zreAQ+=(0kCai2{=A#?y@_(C>2eYP24=*lm(tACOG-6NMB>Y-}e?sLi`1a&kY1nsS% z0d*eSErS(H)bj62`DKRRXuj{G#xZ!1i#N8*QtR0Er=KfRnX{)Iu1GaoW0xkZz{y73_Gi+iydF zmCVm%yYjE*%{98vaiRK<00PW;1*NDgHQ%M$W8Ts#P8;(<0TURvCCQ$r$WL8MA^89R z$v02Z*vo&xX5Hi$RbuG2_!7*P?+kO|1iZi5*Z^GmJS*v{j7GnLWxiYQXs|X;=}o)N zXGQ2039}hcte1z9X1TsObT?~arPV_`bGC{O6( z^16HGcESEvmS0l)Ktc&EASHm(yY!;q$?QFY?)}^6?B6->d%fr7 zA6BkBt2|epXRUkP>;8W3k8!dQk-n6hL!UG(oo!V-u;I@F7>Hi*4#g(dSWTgv3B6hS zT5$`OEQY(V!kw#@EujA(zdjp7;xb@9yW4M(Yu)OMQ?eHt)%{3ze9D}0h*DrXyT->p zvg3$ARK3?05Jx}jX!eleG8yc^TxyhgqNVdaN4G$dWy@T(Jj|9?3Cc(ZJx70%4ay%FFZi3!&9Pca9+Z8IcZ+luuyQM5r*%UZfQjN z)kc1CWy6nl3+#s`vyVuCO7)=4?w*aB%CSCM9lg zE;0L&uyq+6t*kK3ka3&-C|msY-fGVwUXdCH*)KfcOoTh7D$T?PEN%{*Zu8n1bBE2>CdNX^?Js49jHY8X6>95Jim||5eujC zdzy8a5HU^D6w&jQ0GVOz_QX1DF|r#+yrW@zwCv@++=d+}ya} zVilO|@aV}g|XYxuVQ~`cbFAZX{nShagA5zNA*Z!tw(Yv{%szzP=XzUPc2xt(2 zF2^C#>UcIR;R%3 zP#?TogZi}x;slwYg_*4Mo*!-#m#}76v5GER=aF!2dLggmf)nGMA$w^ERXa3m--nPf zbuwz_GgQ^IHqx71^bIHy^u}{~)U3VjV8P6sU%!gEyNZf-u#Ez}Y}RJ#%Wa)|^8t2L zFw$a!%}bpu&+#b5 zBHVm;E-@dRpA?#eJEG>C6F8r?B`7IhJ&erGZesHWkS8tTHd`by%r2#Hux{zeS}}o& z%EcR|Mn*7u2y&LBv3_(S$wz?7wJzlJRT<)qMP&xV9h#2l9x;%-nsrT{ch`X2SMjb zu9k*tDc zjN;@H9zFKl;r=9->y~%*fMts7_(yfXF3X+@94BTv_O3e*Cux!#_^EBAz5+n= zcfaoWld&IQkZsBJi6f6nx0$Apwj{YM@Kf9Do4W~2U1?8!+1P7eP;XSOfuZqPa0F! zZFZi3HU=%gqxf)|Xy7t%&7Ht?Go1^B+I_Bu%2uaU|AosvHQ0|e?6$DZZk@B6@R4t+ zde6ceRBksZcvmt&yOXAF1AQb<#KoVrhS1PRoENT@3*4dQw1ZhM$;Q&EJ%x9pPyZG04D#`VXay{LmCEB{G4s4EZs8? zYk;J$zM!{USb9+Sj#Llil)l--(r4iZ1LnqwUhbY%gVS@q?-ehBEt=}FzQ!PLMF7w5 zc!kUl2f%U2)R5ncQFZOLmDs-x8WNrQlbwgH4ljLPXor=2tSuGLp*!_?>IvTTWYcWm z2!^`TN3p8~cSmMDrruLs0LiU~MWWc)?xwYRyAND9yD6lwPpMwLiz50kT3+ZIc2Km= zqDl&7R;7Et+7xEde5e|IeoZcmf7MOroo?D*`>)dhKFIDt)jGz_{2?TNR zoX9tL(Bzexx8T&XVp&De%uRA8MASS^p-zn$V|35UGe5s0S3m!>>~=`&RJ&wpaqo~b zKACyfkLR+O*J#ZgQ{sJLXM^F89aF}mryzkoXrlU#c7P%O1^Q?i6#JGH6!lb)dmxe zDB};%`!rJ*bn?n=@Q$L~QYFH)>RAni#MD}&B9Iv;ewOLy^to5o$Iw;Tkn)sMB7ZW{ z7rTy01S8tf*@2eMFOM3O1utXeFcEkTo#xyJYFeGfub z+&pm(=$!H#u(>mpTOp^C&3jdkjjt16I?u`ChZxez= zLB^_+8uZE;rZ0t(Wyf{n$U5)5*URGJxXQdVJ)dhPMZMa{L$fS?f5cU|uiz3;1miD} zwA6^D5p#%f)AJ4YNKBlV6ZC+f*EeYz=Gge$8yTKI;0+

Ck_{W=GqO zrPiMa@V!yZSKaugD_^3Gf?q{RiBDXKSa7h=H|nDn#u`v9do52;NK>E2GTh~a|5QHq z%!O;vdJyZhvSy5~U^BE;U2wpgcsOV|@G@7zUs%37uS+5bv-%)fQ5XE36bK_SzPR9J zk`lBPX^`#qe0=BuKX(de=}E4txTVboXX`p(;K>Rf z)mh*GA(3qIo@FXNEb0fTpjp$`b^f*Wf12lb{+Q=7zFop#cI#itLM7yv!iorCD-fu_ z{dt>V^TIGbidV-7)-j<;aXV%A1Oj3$jjp|kVG5~>nWjMo_rc)N=hV(6oQmHN$I)%Y z^&ck2v>3EUm9P@yxma+GO?eeRN$i8F6(M8lFWOV#q-n?z`~0||Xi0D3|-HQPEW ze51p)LIW{IO$!d?h-8)Eaw_}h!u48>hEv_57R${kVqGQcu8NeWz0ZIXe*T7w@u@Ig zOIk#s)vT1SZ_%VZN)9^9EQ2JD$DEbXa{i34a_n<)9ZP&e(=+)Iip-HJzu@ep_>cuC zTOnUIHB~ZT>~Zq)Wr+yXgm2ySRIs~IsiAo%+ozLxes@T7vk~f9VeVAwguPk}9IGWz7qnB83q2IsvgTzS#t=sJ5l2h#@k>lIINA1UD!@?d5Kht_&y{9Tls zAA~684}s-|(o3DW7nUGAC_#aq*|Lr1785ksC}E^6EV4o*wTg8$Cz?Vym_E zHVbVu=%R;>#kde$0W`fwW0gZ>$@R0H{kK|Xb2YS^_U+zGsvZ90fPMD|)skK-GU!NI zH1nZTW6ZFTh$vi6R1X z^Ve3}YHQbrR&ot}0iY`2+b1{$s-i>n$k%_>8AV7*+>AmXmRVZMlD@9JkQbYe#L3<9 zyMtG0X`v$h#&amMl-@3Izpx5d_Qep)2mT4INvwOyr3 z;+R6*@4i}!SK%1Kn(MRIm6rU&O>IA@_wMqEJZ#0EP9u<`X#^C)WV#^QJjcCoJ|uB2 zq5<04z`OEE{btvk1K=yi-Ks8I+y_G+e9UbnM3F2pCdSrz5;}~i0kl-%rkWIDfBq@7 zIkNy!!Ut~gBa$=9O=qu?;`Dr*N|&feiZm(DYt^|FG1Ssf_BtcC8{z9`KD5+Fc5t{h zmk}K|Ge_??*d9zRfoMiyDlm!_U|X!bVx0;z_C;S;yYAig?fcfvLKt!l(*S6vu=YdL zz*=qVQ)lIp)k{pgUxkkQ{LkN4( z^X4s1;eCPwU)W;}@ah%iAqoEYlUj*c;hF2*a5T^)S=Ql+7R~V~PIH;yzWM{T--1yp z+K8~Ue=Eo`R8@^0oXw1ujgFI-wrn(9ZEKFT^E=9itS@!*F;_(D}IGfkWNX5-LT@o76)Hu8WVyDrY+&=6};PJai>ku9=&g z(+0-Qu@~k%^zp+?Om(t{Q~UaYzIn-tVf~Q!B0|%d=gH}mu@*E5BqkfO!en}!$<{mG z=cUOVwGM%e-3@A3AOq zL7x~)q!S}WX0>6O-jwX!OJ zExM3RkL5RJx-Zi+>zi@?F5GK>E79V{BgZm^5Su&(!%@lO)cOV+P#15PXEpjz0h zons%xG!@Q7XBEf!!DiX|YwSxXf3F{crU8e43=jM{_yTYBDT9dmq^2yZnpBN0h@qH_6P zau6~gD={cfskOThED-?9uJ5{|9x|6V`Qc?N^I54q6|XwQG&9I?8));4>>@YPG&RBf$=D}yV0l!$9yM3kP~$w^=L$(Thmtem zFkvu#v7!lCSC_KXmr8S{hi)VC!t|SEsn-p`+97u|-etRA zqKhh@r4Upp1TKZ?2}n#^I}gS=F}GZ7w$htwl-V(2!5Rj-eqg-K2;P9F@o?HF2e zFW0|v;Euoj+&Sn1@&s^O&SgyqhuqUsva>XV`Y_#l8iQ6|_5SG8?P_xY&|Tg^pn35!fCw{7>P-`faQwHvC%Sk^( zRYA@?PWqMP5#V_;J7-QGq$zfl-phd0iX~7R0n&`s)DzEyN5Yp~^NMwK!#B$VMCsxX zjW>!n!k=|eLza`qEGaP!_e8RVT?;J;o^eG=2|zItpcUAb91-ust-u0bXySgdSKn-J zPeFrt;B}H(6YqC~4Jd`EXCd$pZ55V*znd#z4D&y2n^kWO8Qk&MSgMtXjpv;i(!RB+ z?kg~@X6N!#hfjs8)g_2iJ*3a556zphUFh0;g`JYlTY1R!!(vRb2vJ4LJ0hmxgXH^n z9vj^;`kh0GfU?}q+IbZOCsKNbh9S(22d^x9?5Uvwgj7M{b9734kBr4mGDH`+1meOO zG7;cwl^@gRGV4?}qH=mcToj7XNA}Xu6fUp(;ZvMdDALZT$|-(!tD0BOX+&TGlNFdF zcmIryyIMfZd&>!fY~cr|*Y5klHm=kNb$+vq6SSS}rX7`Vyi;jcgGY7xs*WxxpRB1t zet<#yJTJ`;mf}}I3+{j-xLC|-G*M=&ICdpPLWU|g;MtnWmj4F_%Z=xr7sg(`#C8sv zuYPP0tL9dkp5qkLK3upFJJPJT5w6r>NS{H~o*em*!cLZxDr$efmEhDh0Q?xOhki`P z*{kN!#?xFbNq_Pu^ywtt6ySgE@V*8>Gsyb#WK*t4&RLo(AAIa-f`%0A@uy-e)_p$_ zsH%)f+ffY|)O%2r717JUak32>Snz{XHkN8brL6sm)olq?mDo*ta%P}~(VTzB=Cs1c z1kITapTZG&n+)gD_2Ixg7OoxPR6By6@&Mz?w5qP6X;5xI#Si;MymXX;bJZp|BUQ9a zQ^?`ImYcO&omA*Z;k6G~{FtC(J$_AP3~n&78tO-PKI5W|hq zd)`UN6ZJGBHz{=`SJhBP-b;%*7+zg_lF^y6FO)alig9+X+3_$1NrB~9Rq|$s>0+M? zymZL}P;qE6=--zWYA-gNIw6?Lvg+#On4EYqVu0dFgM?l@vkIBI%g&pgz&nBM7;^0qyyyz%1>X;+nI~FXQ+G6 ze=|At9RBH)ppdXl|A`uJ+btF-06@#X#^OXE^fc*pN+w%7a)Is@1n&Kg?U~koUC~n` z%AfnveDjq}Rm&l;($3|f+>jo-)(|a0!KF{*?H^STzThG;ykjqXGbv$iV)CZN*~%_O zZ<>N6)H~HT)xv>5D1)nZtDXG4+&U_P4)XSMRs{RIw(MpwjXyu$cuABvG}C1zh88sG zcVBy1M?!ijOU-nUe`B6s2FI|W4l~(9@dIADo+($!hlEJk|!oBU0gO-k?#x6 zRfRh=CZX*-H6FHPl1G}LTwJirKL*zAY!sVF!r7}Ea4y5MyO8^|4ZNL3Ju>QbpBwa}>8-SP%T_P;W7vr0_ap2t3`DJ~tEr6@*dv}_=;Mx^aM1DyX zZ%h7tL35ejDhJ+1_u4)6e(E#l?(*}a52=(Z;H ziOwF!h1DexsM1sB?Cf9H84c-=`by^cPUf3?w1raAbWsJ5(1I*oRLUHaKAkXOx`FP7 znhRIyOBmF()TJd|NWw+U4t~f7^ERDEcd^1j21H`6AC;f;y&D#b+x!gHsy}CML5J@Pdpo$#%e=uca;wn^b8Lnyz?Ht zERvTlDG6U}p3eO|{FP&SWg1W3aI#QM8h}1DF7vJE%fu$(SP^+6UV*W@r&D%|P8@Uk z)Oy$u;bg#>9F-fvzhDzivAdD-mE%^8&zyxX`{=5$1|~ML*}v}2qc<{O9}o_tsN0xt zz!TpsA@WB?niS(BBkMe`mn@VdMaLEKNI#riT+Bf$w${g5o@~e2EX&c#dIoH&bX%_) z)yLTSC#{C>tJ0Ksg12iPOg%I1LRy$PJE@w(^7yN}y(`rWl{@Iu>OTVArf>G7l@oRq z>{9pDA6z2p42u+xTu1E&8kJR|g%Fqz7i-g06gyaNx}XS>_(q$_4xcPuSQ|KrO@7>b zs8u^Ze}n0!36Su;Et?YS34XE)#`-C;GoPdrKZnKAEm!gsDvHuqmS=ZwxUzSMypzFc zN$sk#F=9g?IB>_tdVlM}_{Z^4B}AE6s_>jyY9gCt{WSSpAvpYLc-Ui%P0iC3HEz=v zU`p?&FBr7+mZ~Z?zZ{iTS_pB=N)<)wvHDcqQ@qG+36RJ8#Rg1>Pc_?mKNnn=`0^tN zVO;G-qK1GS*~zLw13wLT-zfKd9Eh(q?(9a1Q66@)3^og{pa04sCS>;dV#rtuAP72G zK0vWA6&@Vh(t61(l6(7^(qx44x9#E(Mhj~Kuq~gKlvrdQ-v{4`xB4%aRWB{ zX}JFMk=Q3j%Gv|xgJ~hL4la*=bu3i0e7_^NltpnxG|71RCYyZaVB`nAukQ{h`F4IF zGPp`?|1#R}yVdEO09#xJHN#G3+%5X;B*t|d54i;1=A4X#_YI^G`qbCvtv03B)2qBe zFti{le8@d>R5OZ~JBmKMqOmg8^{7nrT-w%J5RZcJ1{VwIxLTIxdwz zT7%2gR}k&C(&AMrm|xDPgNA{@R?6MTFX~S!6jsOUuSzG=@qhFJsfi}pak9+b&s%=RRYTOA3DA{4Z zM2oBDO9-Js1Zbc0h2>rXjgYE-Ww1zcXv=$LNQ5nQDh)hKv@p|m%1kOMdpJf2q3j4p zy?1~rAq>Dn%28H-9Bibs@_;h0A4F$XmdR}B%!m3!b57E$8*3}>t>0)M3g|VAR>j*7 z&auP8;t;rOJ?EZQS*egBReVGgg*BmROvw`2WYaz6&+YC{%v=4ucR9#x{$k697>W6^ z4i0f@_0x#-+Zvdf(o`3Bw_+;|k5uBcK^_Ds)Enpt{e|;o2^_RMSY%EmZP^+so|8%v zsl2CajHSNsr?}0SyN@5S)S45sNGm7(T50rJE}s%$UJ~n{f5+Z@!(#h=zImy)*Slv8 z$tsOce>fQc>Zf{U<)+_lG4v{|j0FK+Rxab)Y`UN`DJGK$i$03rI6wEb(4|EuKiDtL zhFW`rN;6wsQIc1zVLBrUo8;8q@sIL99}58#7#0;96+<7k=DsO2tF;-ARyODsk5bY! zUeXD}skl~x{U>JgC3c^HXR1Co3ctzH_w3dM3ON`PvM%sP%Yg!y5Ocl8SroSwTN;Om ztTUw>$BdiJi`UzcEI&T~Fdbr0q^)Gow2AX|jr4>;o!qp zF>YM$7lQ^Rw_)yi1F61X&u%iX%%PZfCIeL(KJ5(tMP5)W^&CVSB#-C5mWOd;C)F`+ zqn4-^G96R#fT*U{Z0Eur(Bi~^5~$s4P7D*_wOR>N`2yoL4+SqfRygR7WaMR)_Z#&n zLv$S@m@mXnlSuAcU0edqp3)HuHj-W{fk{vL&_1%cbxG|W<%SLgp|mpH**JwUM`-PN zxms^^9|T`=a!Uw(jPRR?*XP4FW$t-1nb~-JIbUn?c*Mf6DWJpMgq&gr;Ht3N7N`yY zXyufmsIyD`)yme7o<~PraRky_lvInPk&UtDKzD5W(;qdxi-yzYbKfXCSwISR%hG0E z$Ot1DC$B`%Ou7*Ac$tbfE5>8%&8moHG$7vBL89Zt;hyNOB3}yfP0_pP>`zZ87+~&e z;WcitzmPb78_bu}no&ZdoB(Wql%0$mkR(xjYkRJ6u=c*W5L6%pfoM|VNf<^aI6QD> zIC9MC_A>%R9W^697 zA@n4x7b|9A@GwPwWP7+?WR5s5=681Kj?JDB#KL#Z85`Yp%}T>j zWaHAjS?3C?6WcNOpn;k_kOJ96jYpZ|3L z|3L!!SJ3b+5qYuFDn@x+@9_NW!(Y7;wXWh}aw60?GJ*?04@(iGspvUWeeS*lc)wrx zWaWp9BS8hVS3X!o&R+=n+x)VQH_~Re>GInnzwz$Ztv?CzQqQ~h<<7S&_}FUPgUY85 zuDJD(t;Rj5taNb2|F!0moTBT*EQki?4;)efB3U`9GjCh*`Hnbq(V%1KZ+CDZH3O~8 zITs*c=vZs!KyW5x`DGC)IHV9}9RWvE#(`n!d_OD&cOh)wOLRonR+r*R`WVXUpjA0t zK)uMO+8xM2u3W8ySSr2)7c1wn|1&blvWHZ=Rc-9qpufwiULIgh{`Hyo$LFJy?b;wz z4Lt$2&XMMLn_ewkJe}E-738=Y3h`1Md{{|l-q@BzKx>t_j5MvbGk^Kk%#R>L``&n` ze24h=o zs-ms8)$+Al;N6}gfQ#Dl3@dv`v~|lX?%F0 z3OCo<29hm>;O~!wv?8h@Em@OXc^v?xfa<=2rSFT^Zquy-Uh4)ml8SzN^9BNx&^kji zXBs&p>B4wvj+7}EkCz69Ad{j%y!;4k=ZTvYN$`rB22WJZ9zs6in-4+?s3 zP-P&AJV9;9x#sE=mxEX~|-L7)p?=eFE`~+xo_i@`Z+;JiAd)&D` z+oG4OgGtYR|NML{W=?XepejI|nY_JsLEg-Hb=V`e^i!4nY&U`u#eJ<2ZpfMh^R8@l z!tYvU*PT>eXaf?uxjL%m3!nKfdf|MQEO0Mj7O9`t@S{gga0t zS}y)5?vB|o&q?k;XVss!obA?%to*mO$(#B?Z}DUpnLOu>|J+rDhwvJL$bu zV`mDPEg-diNY8GVl&>D3jFJe|pD(&n2zdl+%w5A4-HV^XswU`?3U1RHEnA+Ulxrnz zZf2K%a%C#%X6h>1hVaxaT)D;j5uz@k`>9?(ba9kodOE2Z)@5~pos&_|TR{63(||he z5k3juk#3%tQHrNJ7TtE3bYvFgom+H4bhbsLiP~JOuDzaq`BXt`y};EvAu;Sd6%iRU ze2~yj;?WxtroCnMB3hKKlWet>WT&*16_9qyUs^|1&*1@hGu=tMY}=c{h1h$hPxf*e-a0x%L`??6se(Qo!_X{%j_e|$E6rybo>RxKN?tEu-qPNQLDx^fA= zY)7Z!_?xCxW~);_@8?;ajO{>gF7Dp=WG2OBaW8<Jlb4>YdozMP@X~yQz z4x97xE{fdz2yU_9qHhQw*9!sH*;o6jwFsP#DTnB^`f~;&hoLmTv+O7w7(W1Z^7rC} zr5^uNrF#$4ey=~6cnyzz-{tp1XKO|WScGj}hutpKLkFe9Y&q(;`-t4MGWW8c!MZr&kj7m+I-QmfKDqNc$q?GS-IDT8Z|0+_e^h#8M3h)&6O#!I8Y92HyTtvh?^(6U za?f1AiS1wWNaGgh{(LLgK0xbwB(4 zvzj!OpU?g3Z;g+6(~@m5rAnHjNyozJmw)|hcz^t7zleV-LI1R3PkjF^2ghmERzs5l z04J%+G@sc6A~m%j^jgd#G!ZOK3~(GsjpR!U{GR+GGH@xjr(7a_U|y_bk3u7W%>wOb z#-oNxoAdfg12XKqCF2##xra9}NvSzC+&|^HD4T~Bpp%h|s{d!K&G^a2g z?nyX$P@AgThWg$o*>CiV@Aw$^S+YSDpWM zSYJ8p)Tb_0?56K;FA%>p^k0Uty`qP1f8&LGt95fP_A5t%@AzEM=kfnfhpYZ_h9zYP z70^^mF42^QXg7;{NQ2ril29nLQd4t0M)lv{@bb6Z6bDO%fo0C%PyG>6*wUUEr-uXc z*9gRjZWNvm=LaBBNR#Gvesrf2$FtotPU;XS_lG1gPvxuO8pf449d<64XN>@!P>$u_ zICvMa!&^pQ5QwQ1w$|&w?Em4Q8^hyi-@$zTc+?MeXMaMOe*Ep9Ux5Ge=|`U;gmLFw zlBealU0jh|)>@X=v*t+z8*p)-{LZqpT?8gk->$|p6bR)C#POf_`C9|r`SFx?i~KqB zN+en@p$nGG$+uw&zuARmzgnH#L|!XV1e~Tz3*y%YvZ3w|1;2l?16_e z^XA~eo~QG-KWhIw_!OjUXgG9dS3UiCMxtS+{b0Lkrn9lq(bocoNa2dvVbWU`KGK{b zSa1`WoiZ8t502&`D5l3)tnm(7GS0!%qa7_>gfJ>X3PBNAsCjq=oig4;Oh3Rq9Z%mY zXFsF+#_In7$vw*WcZ4Xmbb2UgCvwteb9CQk$As_n!OZd%9SkM+(0b(L@90*Qge`cOE2_xYZq}!fcj=c8 z<`?cS5d=?FdP`nrT~F^VJXI`C7g!!Aib>QK>#9L0+gS=Mq|e4$6aej%npR)_`*~6Q zPa5i9KNmYdCuL!BA;@f#mNoAZKj)kk88fLw#0Ft6j5MDbR~U2lJwSCOE-Di}v9`P* zHDYwjR}Mc{5CuaV5Gs$@`>{sK5^`l5+9$w#W|1Ib1%Dlv{T>x8X;tZp|Ewf zTrw2uC-TBSKdBu5J*7Jb$C;Ff(TKJV`KY_E7BsPCx^`bV5Dy@`J3RZs$I={4Q*iC} z*NfALWYROgWWTf3I3>kbnSXz-|E0eR`T1C}-pgM(uBd+*bTSC+&C)HiBAKRGM_KG4 z*c{W}`1FzcB(}0{hl}m+&RkH8J06z}MonP^#bcusp-M7uVT6Bb@;`Wa{QWqML`;<+6ijT-wj*~4*&Ve=4 zjkuV-Yk#T`jwI`w3J}&=fJCMXRZn3@u5D8YVTNZJi2B?}zw%$RceA_wCU$Ll{0(n1 z_RZVTnYj2(1bgd`q5fq&$NmZ1`A?n<_PpUMZ7e``IuXA`2Ayk3^RE*|>?np`Q7IBM zTC9d^ww1=sq400kmI5J>kz?P8&u3awBG8q{V_|r8ZM6+(ua|NN^4x@dPRtYaBb2Wk zuTD8PV$+LH2I%jV8ZggR*Wwdr8ya6_USKCm$i38>$CV}M0?SJQActxhvtKzL4*vGX z=>OM${}aU8p@Y0eN!+U6R)jwYV{G2VpO~CKe}sRaV!rQYY;IA#ADoa^w83UT!Xq4} zBC6>T+Cq5g*y$#ow-oGem#3y7@V7{w784vGd{)vhls-N_zusaM009)O|tuL`wuS$BJ>cB42VuRHC(Q_c@NzGMWp8YuKqjA z?)S2sKf~1_rL4fr`D#5AMy#y^?T(nGI6g>b+3r)k8XW`wZc|OzY=(yc%RaL zsig-wLMDm){+{$}>m>?pF-EM73BF*d@`WRh-!Cq=&~)x1#G_`2bl)Aih@+6B6xlG0 znx~VRP3%;?!pc82P1=`R%B>DMq#l;=;Ox1!j-#BX_hJx0!7N3<%@8;v;#U8+&DJ+g z^^=1|;5#GpsMWt=Rev)8zge@l{@-d+L8(>S-q+#`32490O5@EchpQ%DZNvq)mz)KRdG?Kw)B?_xl;=|Djz5VdL)qvt5Q2 z@=f2rdsi3NM+7U$T5Tk&AvXli{eq<+$zCm)4>Szmnik;6m8%aY_R!FSXOTBG)8K{U zw2`W>Q*rTb8#^L2jBrEwJ_!{NYl*Kw>O^UUMRwqy)>D7RmS~*L?ez6MO5ksGQc_fI zZ7EmJ>%8Qv=fTc_lv>w!ZSd_j*A3r4Ti(Ci@Y>Y7Y-m!4DmH6BVZ1CPoS26I?*n`) zq&kDHyLeFsjQGs(S668W-gArY@e`Vov@(XJa^L5wTR+PF(a1iBx0rLU+_q2>Avdb= zYS=}A4jg9s$(cuikA%sNX|&%s*xu)$A3DgjgQO`FG!6bjF9i|Z(O0Ra2^Fpj!^AMV zj`Hp%y?bXHKY-FMlGRp^!C%VPgd0#qki2k6?7Z}1)YCtXRWvo1Go9spb6PLXTHX4A zkKMKU#l_L#5yOQCSQWR?Lz{n$=9@RjjcrpdS1#plQ>g6<-{pD`@%uKx{(V#apZ1o2 zG<$qMW~sWwYU@FQ+bz*uNYzqT5G(Co;~!uBm%sn632{1LDx0)`J%LAPgeK|AqDne;w4>$$OY8cLW#s^Qc7nH9YTnmGsJJ&W{NE4V3=SO9O#0K*redGPv*Dv46 z5|=HKKdb3NwCUNiji}u7IrL8x*k7XyCgS`LIr_j#BPc}O%P!RxY(XX2gA`@=DIN^&oZLSLvFw zdzVYwPsE7@K{t>^{NB442z$q?jwIn|BcLAX;ha&_k_ z58Ie><7N6385ogO0`hogsF#3SKtYeM&7#^P!}dBFNZVy+tW<>ij3e?D1S6?#m}lEJP~eAkbXPaOe{S+YjNGo z@S%y+u7#0EcQ1rbj;5x|D=58)&{`H00lc3wC(1k`xGV7$IlGTI9$*E z-IMYx{_V_AEZp3SMhAKj6&Zu+yMR5h%&+Y2D6tmZTI+P`#%%e(m}9;tJFc}^6rDI? z>ts_jiFhN_e=KDi)>x{(F1Cxjvw!0t&w?GtU)~rt`U$SDO{5`isAC@53rEYlTJ#G6 zpx)b^vnly^&c)bMbD&K-SEF(F3!fWQ((~XN8eq>u%1(W~y;WA=s*k%>^Nj&Te0X9u z!K0`p4rIC4C&HI#>G=kWi=KBlVRCjIZzLhZ=zaG#7>sz(p(qKtIJ9G6bD7j^Iy1lC zem|uiZn^}x;aLcAB-w$~uY<(%Sb+kUurDjwL0unEiE7S;qr6%}s{=$Njc8V#Ps*D# zN{=>)a;pttXVF>m>F015>-W;7IM`1FAY^0VVixehBy7`5t#A@)0^Z^MAXpN1BE0fW zmjknJz7zqvDOf;=qX;LZ0?C)p8oYf&DWxZ{x#GMXJPFM?Yb>v@fW*z?IXFYvrrdLL zeQLQ>vCl|1i$*M4Y4YX+x5CVxy|MXq5U48dZBnS%k9P!-(2)#ZnRrf(u$Q&7HPrNx z?37`TwI870O<|phdk0>FqiY9qSSp$_hq_jYq9kNQuI2cG)J=m+y3pYf>Gq@6Vq-Wd z>)Cpd=fi=DBA-UgS5`I&z(uE8qqbS+k`1%rR#{2`WZq44*U-T0lXp5#JP!eYF0&-L zaM5>pckXNKH7oLw5?3Cid~(R!RO_t=Abf}ySjPcV`WC+o&RrL@HXO=XV29kz*<4Vn zqHt=p^N<9r5Vv;WrHJLSYr{CJcl8$#D`Tcat22yvk+Z`A7T;D|Z2#EJ-HWP`vLou@ zWVgK-NO+IKnVpS-&pf*Zv!g0i0cAZIHw6sy)TjIE7p!KL`SGql%|e6OGAc1sQ~UoozQP3RrUJ;4*QM!QMnA|$g;se zOwiKrJNaAe1F^w|e|!@o?P&Kp*#lms`%|f^6EHgiosCt>y^&ycz@bSLVdkr#HUP4mVj;T4)NPEEI7J_4%hnNt(ri3_$K z(2wV$x`FqgNHt-w^H&a;)Q98^lwJp_Y`D-xWLHkq4yNMvmeDN1xYcOFEEl#Y>=BPL z7;vjhJBAyhcBWon=-T>J`bZQ_iZ_ogDV=9ousJv$`n`V4^DTTW0zZmQa&@pYHOxI$ zX2wWfR1iSotl4=Tf|adhpJr9qAhmF@VtZHS{?9$0A9W3n_?1G6GdcUPDor+U7f2Ble4sc+ z{lwc+vS=Y3&=Oh9a0{o|#mUPQTHgm?@_ip<*OA}OY1)#Xq0E51y4Bi-w_apN4YfzK z)FhLrjov^vz6dbbVQ6LB>$<0~utxlakrsQFi{Wgvt;V7&wI;#R@k&Y!+LuyV)Os5= zZ=vcURxhmKXfDK#RdA1OR)^@W^p!+620W5jLPNYl9!|P|<&%eO#>S{4s!6zZWwfJ( zt^;ZpC88P`f^{W_W(h94&g|WRrh&oc1d}iRL00QuObW{7=X51-w9sX}sO5YE>1-?y zFI$$wmM3|PzrMTBOm$j**=q@ik-2uMu;pS>^30bk8{Q^wqHbemsWF30aK7%|V)^jW6O#>>Q&~X; zNGi;D?xa)sfVboljyH`(*Y0k@h4Cl8sm38r7a*DTeWFEMiq04~MK0}vJy!w!f#0!| zFSCph^{Ocr(!FOEqIrlAALmeR_WZ-mQT3!ymgRgYZTSuuFO`kV9-HK1C-0GtWt>{u z&+_tU<^RMGlMhe}s_xOJkkR&%+!7!InY+?fsw#O}wcIDjIEC?$S+yJU%KqKZeZBlB z<9mf2+X&ve8u^=}48JGOKejtosMGXWPDa1O(hd0@53V;gAT9dwX57s;>Y1W*MlfN6_0QX1^3DwVxmJ#`)mbj!4c5ZDmV|lG z2$z-mZvE$@rnFDtpTBZ2zjC~{&{nI1Ozxrl3G7zG2X2)OiuB|gYJ%q|tN1gczSDKY zjzvea+?!YQ2CFRkmtDGBDkjIsA(mU0d$lb_Qs=9)qhh4{9BKsX*hd3Fo2QE{J^_;e2>KQ!)f9Axa33YM8@@v;)J_Rj26vg8MmNIo@SP8^j$2-P5jl0f!A$?+8)S14v{s*!9@4+OnJDMn(NX?{Vl!N}}e;h%)DP^b6jH3Clu6a&jMY1=01-Nz9Mr z5yUUQyXH8(VIefO6!S@8U5YIoPbc8?08yZYl%Zslp;|HhMK)>#7P@sbyY94y^1YOV zmcR!StE+D2MU^(H1ykb;E1%5nC}ZYGMsQaCN@ankoX`2nn-zVfJU$~J@XkazS~hziHA#=pGdj)ZSDhR-pPf5XLJOdvs-SWH=_3W`#&W^c9k~t7~DcUlk$%O~?^~ZG6OF$3?Vkb6t-)FbO(8zGWJ;DPz<$<3HpW%6^w4fsi%hUWJ$POpCAz5-;!2FPFNlCWP_ zQ*c4}Cs0n2pJjiyG)}#^=$9m7nT>6bh&okLgbUVEh}$7R27z@qs_`j@G3%qa0Jj?zS22eD7n@5r zze`*F_$ge)!J!IYn>s@ooA|YDk?MG4W3++J_SGSm5an$8BD4wq>+5Gff(2albE?^9 z3)U&_hCgeruzZm#0NG$!#JL|=s_mVt?I_A?HLqQkMiE+>+H;=|DtR^nG4EEAt22UZ z$E(gD_FZ&LZw##H>+2g}CT52QH8lBG;LNU$*M3$iTQ)#ks3wtjJ|9~xz7QWa(Dm}Hg$90fYx^@zoSC)p z5scp-Pg+hroO)d<|2{+tsiKuYTWFw%wZGC|4=L*(UBV$wQo@4yB?@9vw)|%OjI1b9;Kvh^@-0GpsNUcf&*4MK=F8YCF4nuV2d;>3>t2^l(s_5LMFLauSUxYJT%nP{#18RSpEq51(JHx-fTc}0^*7UsP?+w%$XpC`*9o$x_iO?6b1dM{p9&H~rUOcE%w6WIN&>2P0R{T@j|NOD4v0Tr zDI#uJkN&pja{lL`+Jv`7RiE#g?!JtWwJ(u<@XkARr|Z-8uPGyueDYtJlkOKSYgpY; zHf&w=$k?{yC5R@{;fO@cG5_|#q^s|>On2QsOT|!wTnS4%>L2WlZxvt(S>q5TE2-?1 zni2*iWJh>U@12;~lc7h<)lG)oYRZ`~>%t-sXga+yW96eF|3xCZ_qRQsaqabUTwVr6wDYoQ@)p+%b}FLkV%!kO!pLk?g_3YpREQ0InlKz=_Vlou%P(HSyey&-cH^JhDt^2h~xqxC_$Po@y%`(b` zKuEMu%7l8Qg=i+>AB6RPkw5U4*_8OPFCH7V&73fY?rptuVL5rVXq={oP~#Du#DKA| zT=W-ri#=L@#due!kX8~2n|0y--(s6*b?go!Ws=AqlTVU^)_X5XLE6V10y2ueE_mHJ z`j^t^A?eJVzmEMEq|6^UcZ-_tI?c!}OXL5z!+)2L`g~3ldx!EST&Q_SGSr*F=cMOs zeWNsvoZ}B8)>zE-t9!4DWfb#SOEY1g5U&Eh+1-~2zOqO7HMY$gJ15grR%pTyQAtBn zJNWeai7_4R%LVTWU%PBX5zPac?cVfSL{D$2NZZm#8XUtaFnTWeePz3A$aYpk&80K*5?^y1=Et=E zIymTS4*5MtcN0gJ)Q~ByT*NZGfcrT$FQEF{=QsNR}|=C_oR_KU=F> zsM=URBM=5PVa0%C%uaR%lrlO}in?w%8#MD}^@wY$=8@-&fA+d|!32qy3HNd$#|2M1 z!=o!pE6Py_td>~iD&XuhZJm-dpOw(y50?;z7>S5QiJ>pot~slj86&*rq}=ib=XtI$4g6Va=-#%}bxUgWK?s8QvU z^mY;Hm(B4bW1Y+4?&qwKI}t$mzT?EioUYq#aH~eA>om5jH%W1e9 z2Azh|&384DM$%n<-afG(S+LGTB#br-DP;Kg2e@Z9s~)R~G@kT!Eb#6Sh_}2RSYkC& zFuiaprxWEIPO65mMkL>Lsj(J^-TMa`!Oj$N9M&BqouEg3O-VDDn^zl#+%!hqqA=+) z6|AA#gdjxFp%Bm{xdZ1g5$^G!S||7~xuYl*((P1a>icN%q%xYm0LaA%tJMcjz=6gl6KoCB-2B$dmKH zi?Ignsimk(oz@Long@OtE&cYApopDFP2VuiP=)8tzDL=laKD{>gvgQ z@I(qJMPpdQDpE`9gV7u)J(xmGJ%$dTYx^$1KbXcJ(?aiVyQ>OiEYw9}r|BC%Uu$!v zJl4)@o*I>u)V(wapKF_{x;gJT3g%edLb`dH8S-7eS5g7bkpblUY-0iQa!(x8o@*>q zBLS9y3a=_6Y95($V{m%f`tX2&pFLuA6=&m0;}oMbIV$%%%#9r=MKH8G2Ds`g{icOp z!STU%RVVeBWi4Jc8_d*my#!7b$G0ik*JXLS24|Hs#M%CEVPSP_rgS3|LHdU| z9l59zDGcn-g~9FCaB;L+)i|O-> zs5T$$SZA`Wv=fAtrO`k)QM)%JBXC{AzgY9N^}tO)J_DS+IY4iT;Tym1=0^VQpT*yj z2acf1px_)ChLvQIR~A7Lr?Z_YR!MoEK$-amPr*wrzX1#Q7`I25aZWq@N=OT6qdBIfWWD#L^ z1Oy64hm6cjqPG3Jo)sK(+1c~kp0jURpYi^8tV0Ltf)0XP!Kt^jq`^(_RawaRhQ(Hh z`cOyKkOQ-Vvc$%TKh7(RI%{{JE-AJuz0e!_0IK-V{5$BCKO29ds05X|pb#(uTfxZl zwm9bfsv=j<9aOtJJodXkE#>adTOE;n+6Q8vbH$4n288&}kNFy!ANi}jq~n{_R;9sd zO6Eq*TekaubtM?UZRs4nF!d=%ts{8*247YC%1h5Oj z6&0d}TQ2oDD)iiqOsrUov)Hk@_6)p<+~iC6Dw1~d^}=qtQTd7zMcjt$flfqqx5Ag} zd1(^|vFx!x=vLf{(lV7w%_%aY?0$Fqq8?aL<0uC3SP@C%Ku2V|Olo*f>lE!cA1wO& zy`ffjY?s*2?LNNl;d(r;s|}z&k=w*1>1$|ir3YBY=m|6TI~gV(xbk`%&qm($J^SWA zYC9XU@PdT03U07hGlIsrp24gj) zO6}y`I6I#)FNtpYh^Im@`p34_{m(t=wlY=kkWo@iu&pc(xI-IVaj%)wyXReC7M>fO zU^qC1vTG&^7MV$gKbbrn>@hVGb$ZfWe^E5n?9+D<^%t4(4+FXl79>upmAE?8*s($A z?Sa}zNkV=>?Ayn9`N2)Nzs)Oyppdob&d~c9uu;Mt1&MDxMosrNaN3=1`T|8>WsCwM zq=-=m^?I%~>wv~W=_KsTN^yimr5G`HO7D^;q!8L9Ug#8bgD(!)S}^1BQ@~PMcXMgU z`2i|4gggeCGxzU3us~WjGO{?ilBF%_QW}P#KaImQ)X1&MFC-+E{1AHvKxrL))yN6q zgw5p%^bs#5TK0m?2c`2Ly`w|y#d1MN=k!CbBczFs?=Tc3ou?*-fh2Nc`u**10~WH6 zpX_Iy84`2LD@Z7D_B$s8KNIKas(rScF3L6}NXn`IxU+9mRz39cU-Qxbw{Fy$ONM zSRxx4>=XeOQKoNenE&XmS&>FQ~CXK5LS<|5kTVP(mNbf zgn{c1a=m+!>=mb|i&Thpu3#H`lWrDn(GIUEmA*rLHlw{x_=e*~C~I?lye<)d^3Lq= zgr9aCP|SFS^mmWJec$1*iR+A^Nn>>axAYf4#>du0Vq~W=xRK!E8HqUWIDcW&02eW7 z`obL4I!xiJBUr*ykMD^y$fbjW)<cG~tc%s_;!t(nV35)5R%9?gB{v;%_sJsGeF!n8RchcAxdKPr0nF?tEG&Z(aAa z0|7vBjyry;S1LSRi!IpPqPI*);6H{G+HnD1luiWbMY@G~tgEs?iB@18<0aYY`KsK3 z-R}-sJNgkNtBQ&SSQk0DAG9=Y4wsuv`Q2QCd=jCo`&(yHqBK_T~+JUYDgTSw>C-9#>U zI6^dfG^JS5*fytTdMu04Xv2Xnv{O^DGB0haB%A)*>TJ?*2Umle|zhytd zNccI4T!ZeKHbey3uyfYG$)w5X#)^n~xP0982zy%#A3|0+&AJJ^&pXjpg72T;3Xi;Y z_a)1AQ(}3l?8r0K+XHe?yUA4U^&6bImTNV^yMPSw{fF^cqB=HYkHLh-BVyv7J{Rox zYO5c<-}u+s|JuVN?(P_QYE+0926b4RurjZ!@p8D!pu8U$;|SH#6x?n)dOYMX&1u7& z8@xUtD{zJqv?2zfCouz?hkt8%l-RX-oo@XcOCVR{I@8+0zK@{C;(~mGpt+ac&v)tw z*!)q`jqF`H3j=;5gEluAO(v}1DPkw>})M~R4&@PWmc6ME5m$ine+rJ zQ%Ic@etzP1o)@~}ySX~7bCg78s*_$~ukwpTL>uHz0{eJ)0#i%x*>2lMT4PeErjL%~ z+z%L<=u2}u;D&I8Y#W+T21_*>UFDT@B#u`aV zQUC?!!Xh)*546Vy)*(Y*lFD|$Q;~-SHll8%wcSc|zLc8;WUXW3Bbs-EOyiP~-Rz;> z()ssD1(yJC+tUd^D4+?}4AJrbRUo!=&eXje=75@=_E30UYAVaC3M6;(Wu$K=9M6>D z+ppCkMMGnEp5KZKIn%~C-*%yr0c(*M@SQXBle1e}uvMgUN;t;?Po9#`k2`xBHBvmr zkDi<~&}H(OU_Pqngt&Kq+v6+Rww!#DzQNWME?b4OXMVcBCaLcriwLAP`AuH7vhHoN zwJ=sT@FbIrl+=Lp@d)o)jM;Xn1-(Dp(Wuy~?B`A%a?ndy_++56(|Pl@9BbV- zUHex7SxB(q($yjjxIwp)i0Be2z!Mki?r zdCuF@FHlvobh+?OdKH&jS3A}jEHO_}#VkQdqx*Sb5z3iwAE%@xTnI8M1czm+tiv5R zV-1o3-`R%MZ=LT{S&n=8IxM|hV+RV1VZkk0A?oDU(;$AFY(C&SSM07|evh4K+f^CX zX_NxMjhr}S-i8z|3=%9HXKBTh(i$kY)H*E#K&)+B9v1w zZbBuJ>VQo%FeAJW`{9rK^B*z)`|e&vfiJtUb?}Y8#naw!CZ(ACX4)m_^GE(FZ7T=q z+DdM+Bo832dR4#%`c2#VtX?e;6GW-ZW&$}trm)YyXV%Opo*Mj%wfC3 zx^oI}sN$_hnWj2EdaS2xRtcZ2lkrI)u$h^~^&-=#Nr^;abf3vC=TG-Gb49*$l%L+c zqsqtnH}pCuOs^V+&j2W{p66eZ-NADVh-jL%q%0hoeLtREb(|gu+PB3-4i8bhA^G*| z99diQ8F-4p76YxK`{@(Q?%o-%b2&>oXS0Y|y*u>B`AcC#m#XID4EQG0X0E```!xT& zF#m$h`-{={|BS!!$0h&UM<>cl^DM2Go)VZKg*VK7gS)$prlVUhtH}*hIk294GX1f% z;U%_G#Uch~yHeKH#%@y9i1Vai$t*P+4hU6}wH>DLYJSUWXyqXvaLwzO%5`3?Zmtp^ zGRy9~cJ58?XUaDZzq%y<<*i&b{p07umw7M#9 z)}gr*d`y{jH3rQhwb6am(O*#?f9s)tPyTnNswccaw-s?xy2f4P? zA`^&uU^@Df3BwgpLq7>o8?&y%h8Gc9jCKaONJdRmf<_zPGpL&xvH7(!kY}<(|ZwL zt-$|s5WiDaODab$YVbv;3_p2KNQpd{h}14=>n3lg>bRa<`Omz_zs|e*kw->PrI++O zE}xK6Am<{g;zd-iLz}4&Ekv_e6iTS5F@;Br`|iivwZ<+%i#F^@3Z=T$Og>GvKTt`_ zm=LDnO7Ys!&O=rV9pW}7TT{XeSIt!Gd#A5#zDhkF zP=s_sm(6g^9X#kOwXPYLBFJavB&W396kPs z>?M;Wj>W@cvq=L%MesTc(b~r=2M<$St-QfvN!d{!T~-TXv}ZMm2 zJvp*JLh6IU6l}IJDf^AXRPMpO0D%IJ*C1m*I@0dcgczrF9^f}NX3mKe0Z{H6>#KK2 zTvEk*@Mn`q#!Bc!k-;3FEPZ{?D7cJVC)<(L2dx8ROa4}mPm^=vPCp-;d*IGX`wzKzN`TPc?wCh&B;bAsJ0L!B>rjeXc`FY(b0*Jr&D>Y;+a?Ouqx7y!2= z<3h#{MqK8vJbnHd@Dpoc^o2(zSj;MF?g$d`fD90p%#7*9HceK}vRp5@C3+AFqFwQZ zMqyPh{8WN5TM^ z$lmTch*MS!W54Li^>cHt5Vl(T}4&hzH<{{ds(N=GPCM*%!eYhq+A2FQCoBP*cLwiy6tj| z?5EN`&6m2r?ZJ-mPinUVdIPK>N=)k?jyE(O{&hhwxt#D0^w|73DjXan)WVfN`3t|R zzs`$p1)joIj|4spjcXzAJ|TPUwPfhUyBe+oK?xC63H;ehPu4*8Q_&;lS;R62Z_a9) z5?u?}>oJXk%0FeJY@Qn|*+n-3ofKbLE$rCo@n5zI5)CbG9`%DYJRb?m?OgouvO3mr zEZkZA5UtF+uGiXz$1M{sDkPEPQW9g8*sF!TdVUWv;=Zi)0O!0OEfX(6JM1n|Qdb|K{0Sfh+C zKrYBin;(+Ikw|f71j0Kskxx`hqm`9ES;^eme>pEOzwC1O$}bALKZMB)jnZ7JnI`QI zh}Dqtlv4NCabes3Tq5+lw<_nOnNOo1UZ-FEsi1t6=dEU{m`@@`@x|kayo<_995Y#T z)nfL~uQ$JxCiRtz_$@*v<&dU~-jXX0@I!tgt`G!nLX8XnijU4InP+`?odmMWq!Bo; z1A0cz9DX&c29Ziq9vwqF>vAj2=F-Sx+Ph!Lj9-n!u58P8dMR5s;8Hcp%5y1Jy|{BF z4Djx!DKT!2emO;b+; zX8u#*jW0jC>LvF0M3vSaD*A4NK*JiA2X7CQTcDVvYC(t{x&Bzq<5ye(>F)f7iJMAm zg}q&lm*uyCz8Tt*nnL1&g5UpBN9?`F2T6Pd4Cs7?G|2DC{)cY+Pji3ampOm!VYOYK z#j~#?j0OfiSPULR3?0Ay<8BK@tqmPAx;0w9eff&We}M!l@wFoaQtlG#dYLQlPVPPT zqHL>#vY0-&8^{*NeWn27*9_D*4ce+|R6T2G+s8QCvI0-Op3?q3LZAQPzOwx0^4AMz z^uICl(Z8fIw7;IM@y!1UbMaI(^Dh>;x$EP>a>vV_bfu9Bh*cAq#Wy8iRuijvM!WEA z3J~z??_>6VLNb5O19aF145!)$>D@@dy4m(JSY>K3A6=X^AgC>w&!O7z_tSIyqHVYL z>7jAEFyrm^c%JsbFNr;by_o*2-iWoSow#qlB=*SP%iJs|%MP#noO>_uOXBbkauHw3 zlK7JN-(P6*c#*DQi3b-q6!dhc7mGJ}6f<~7td7ZXaE}94H7}Z2vmMF;6PYw~YYH*y z5HXS`yFMxgZ-3Yz3rD2illY^9fA8cyXF8MvsvUaQ-_{(o3RLb5wXrR~4-3h++_ z|BvykkQ>P-N}t$r z0UI-s82?KsnM*O_F0mkx1v_(7;a~$HOO)g&V4&6^lyuA`)<`q*q@}j!0Q2f=BUwu6 zMDS{Hw8%WdWojg-B!8j6#MH~6g1H?zXNrn;_fOr;Te@pI5OlAn6U`X5>%h(JK05be zk-K#9quHfcW>iX?N(%JaAa_Ia`$LNXw7h{$2yD^7tGrFDXVr#0GZsLJNw%FfzkC1k zM+7jpudmU_LgJF3`e{j|y7m-Ovk)mOC@7RBc>Kz@5@oS^`Z+yxweA%eR%}O6j=xa3 zaqi^PEKM`IzEEzSrnqRa33uiEGJ$QFkqg~Ulsi~4jaF@Yq_MA5YfE$ZWP(X`N-Epm zr%Coa%Y-wB+{S;pi&58v9e)0MuO6J}M4hIgbM2yWa0kgB&N$}qj21G_L7}Fp4isU9 zJI=RVyD~qCJzRiIz739ddfHdmi`VD6gFuPjy}Xd%U!#~BvY55CM!RIAwx#(tw%C#E z5a$SV3Cp3=+lJZf$j{*cgn+v}PKV>J<^9`L{I?$tpg-o6HRp%cF1hBBaluO5szd;$ zg8L-$<^8iw7w?{Yx>2Ht6OoUjra|Y&CoS)ZFFNA6F8Mrr$Nico&vedQy73CWLIVyo z7Z~i>x9CyMW7-t$^!A6<^+eh;#Yo#QbPOWx}VMPqyi z*)*Z%h%4Vtx7kYS`Yi=5;;2kULxE1X9Q)7S|3_x=-n(B`slG+h*@X%`Ifp;zE zPx@XDX!r0EJF$qJf`$YJx+sF{jIFB6d7XvMl18qDokA+j+ajv^$BOQ|mn^C4UbFlt zyx)sxJANfqi=CShXLf@$-FUNpz7pOke1#Sz=S^=jE?rV1pJFmi%b-@?=5#AbX)Bl- z_0*&;CXi2ApJ)*`Pa)<~Uod!z9WKI{uxqQlcE_F`xV>nz?7G#Z+%R~1rAjb`A@YOu zeLyv=n-^iZV`rjM)n&z%#G5-!v6Fu;Hv<;8i`B3cgI5A1ed}3$+o;F+y;b@zD&c&? z;3+)^&2{yh*NNsJ>1k*G9>`G8Bw!WfIDT4qhc%1}7f7L4H;LuB+?WffSvI8WBY;9P zm(O#IWdh+BNW9ang4m(aD#pb%6-cGSR^?Q(c|fzVH>nRFDW-IBUd75ml33fT=8<1G zWBo`PM|$4_!6XP@sAo|!4>nq|IYTl7C3Y0oTrW*!;_0jwN|A;M%{hbPBu{$1IyaGZ zPwCeT9nP`jfgL2iUD^I8h71(-B9Ufn1#`JGje{)1#*A(WkLnJpZSl^?Zz;HQjyO6j z)@c09JjA3Jpic9MCAO7uqcCflh(b$2?l-q6)r}UBgqLWmS|aU?yOEku&_yFAmNu2l zsu4UN^J?)lS5&6!4j#)R#l=Nwdz}Z0T!H{S?m9fP*g4Sp*E9alcN*s0hgaZTk&moH zvb9bZqa)~zWS(+B(0dOTto%>+x$RO8G*%6^u4ec4<@KlOJF45s`h`8P3cVcOk<7I= zV_VRrlZCm7#EUo5P!918D)73Rd9!l zUSwt-cB?>e+vsF=&u<6;hn6-{~pGE;@P_B$j60_VSvvdoB_p z5~=AdC{Z8m2YOR&5m#R72%r|?A2%Hh)H2oR1(ubXw&fdG@g0byQloUEZXup%pQD&s zm(WK@E{@mQJEX=L_QH-DzQ9=6;Opkf>^Y(KaODpZ0(;qlsUs6U=a7yIii!L69r1p1s>{|6U$Kyq&U=C8VskQ z^_BEk>;#O&BL}(~4mOcB`ox?)9Ei%F#<4927vv=~Sy)bf!P-csI;!p#rIJTg0T zr1?3s+WX2Kx>^Vm_I# zSF9-L?tKd6Z8thIW?+DRhb%rOf4{mM!JP7rD%l9sh{G{>p((j8?&AD#E-7X$YpNba z<_CuB^}LSEBb`Rt%s+^3#`?de`RxC2Joxuv>`cuIj1D{d6fC6TT+P>)oyd##M;5KZ zZ6UKGkzdWj?}&c?b>KVG9vF?c9uLR&oZO_tIe{Yj1$gU<XwkixWJ(Auz>MhZjt*x`!2ZP%4WOMU*6w;)B4j(L=LG2NSq_@{I_~E}O zm#sb8p+Qr@3`^rbD|55Hm`xdiRKJ$Y-NrxV&jii<->qgJDdJl8B1Hj;{Hi{BR{y;! zj#egfME8ll@10-I>MT$ciZ+EHy_;~DAF#V`?P(``9!u(&c{4&2Qmj|^-&ks1N54o! zjm8fmQ&@#wm*J%aAW2OJx!E4)a-<+2n5L9`XoL!TToH0k>L2`S7NCjqJ1EBb9xO3jZJz$DS z)uTY-;h3VAgRFrSbaJNU(1%Mvh;Gn32ZH3g+0Nq!ONV1s7)b4lf#L zikP~W;EECdUWLxF=if$K_*U1!;FcY?r@aJU#YHV}JTXe2w3Mb3w9-456&uS|b3U;V zVjbmCz`~TvZb(Of8hh88@9cgHezSTS<^Yg%&Mse-`1N4iVW(053zdb&n*od>zDN;9N@%Pjp!EaT@Q4r$YgBM5h)QntR;9XSzo#y?Uf7{^I#N zzl!UD{HnQkr0{LdTWE@8!8Wp;LS75_tcK3-nRRyHA_ z1URxSDcAeLEnsE^HnM!;dU)^IM(9Nuf23X#wjkt#fuVPaAXJ|3owyY4RyY~Rao$1= z-gQ;jsUq)ClUU1ZVO+-e7Ng71zwenu@U#oxYHyYiXO@d+fWDpo`oRB@_-}w?|FwWW z_x?vx(VsX*{@?U1a0$7XvHlgHXnOlI%DP-b#1;O}cZDHBfP)+7F5YN!Mopph{%qV; z)`jF7oTYIXOY0I`_x+c)ezP9_46CJYBz2n2VzfUr_;w!c(+?0ZbtTaqa*RC+yv3uJ z@HNMf8vxalP!bmVl*9ndLO1qRSny)vo>VsF(aVM&JWxjDn;BWXnstf!I6%0i{fYW| z4hDfh<^8?4>?3x#%FG30VYm{RYb5DD1DXSwKQfoUm6o@e65HT$Y@c-D77kL!+O8z|_HMFK7RE6$fVmw@8&G_AWz9qfXR^fgNx2 zP~pKC;l1~j3Lne0D-O2~V(0YQA0p*+C>uX2SB;{|^U+Bb9&IiUOq+Mx^M}ZBq?SwP zDeHudV)akr=N5w=vGGj9Hrk3>{$7CYAj)aOw<->re777^KRyeS575v^8I~u_d*t$? z-c=<{LXaY4aKfJVTxE7jn3rDd;{(@C&G|;hL$9A^kJW6>!FB$;WdDWFZ{AeeFQ{w? zbt9mqghV&0G}c^x`nNqT7CH)_Bj_IK6*8J_s~6Nz(Rn*lZmq9z@OY#ot?4$EXTEoE-M=UUf^a@pnl*!3yS?7$nsmda2hOjWCEi3TN0l}aos%CjM z+3~XJRrlOS3j(e@BE85Z1UC69Qn*@JL=k5;alR6|lB;<87duVO#5Tt}#g$Nqa+Asi zEr4vrKtmNjWA2hJvj#bcfNGxSCjY{QnJ9hm@&<)QR)9~biYQ7+4gk}MIoEQgsN7?68{A+ z_?EVWOfS8nMk}CCEoLI(`n+d++Ci^BT5j&7LKMJY~z=35Rl_$&x1 zDM<^KRL#NPbu#s_2boh}cmXPs?B2^~47XfJ^R4Hl?Gc4{&6a zE??V)ur=6qG=mqB2X8B&dzP4@iVI7wwcV!X!UHw0>0yf;d;kOjy_HcApvW!Xxv8tX%gBUMkYot%k40@FK@kSefUp z_6(PX63#58sEJlK%)rqv0w>t`$LWy41wH}_(miIk)jTM^nUZ4IT4n=^17cuB>fEUy`-qzN~UPVJ^JbKBVMMM|S1fB?;-uqtypPq+hF z&zxM*B|_S;hGOy1^P`vPXNYO(Wub9dr=8OxD6n7SU!lgvVu#Fk=$0S28jo|!WcE`Y zK&~FkCXj6HnyrdqwaPQ-D|1v_RkdIB)0k)9WR{MuY^AHv8TJZytW&t*l`vC|$u93P zoi#^KuQZ$hW>gJ15QWZYFdrcLq51TUz%4~bWk*R^K6uh)iQ`u#=4?c7uSe5nuP+_= znG$1yth&Z-D0M zf0>^9#HTU&o;w{B<45b$@t*PFyS!570_?-zt;J3EK%_zbP8l8Tg4{t2D(i4TQ2X5~ zm7FSDAHK0n^3VL&2=K*qoA*w|=0oDtN644i+BXH8OFg$H+^|D87R%rPDIXh`1#55M zN~z*)qD5!|1<36Jbsft-ads4vr-Hz9U$f!w`eayKff|Qd3gek)d_YWGMhP!^>RSo7 z8q>3c3RK!MfZy(CWhbnZvMc*^1&Bejz8oV1?96X_Cc|YP6F!E%7zTf~!w>2`EVr|T zE9)3ZRwWrE3?Y5I^U^@=B~60lxHCExVEjTTcR!e!*n-L>C1yH5rsx<-CtQ}GZYNww zc2K=6w0?Vwn5r>^@lC7M!j6K z7Wr-zC)Mjx@?2PK=do?s#oba@!J?K3vsmY)3&;)^ZdMjs#5Ua~Hp zJ{@q6Kl`CHzelS8GORRkq)B|s-s~6S!$vn|yL7HUij9Yd(glTu0MT6M7rWOB=n=(` zF{;vL|3%0M#VW_D^pHye_8H1=xQ6_(TTe_0Pjk%X?$k0kfy%ZgjPyZ?H$Gx+Yw`WJ z@y|glS+%$aZGXt*zO2d%=qnqor2#2S8jV?M$0PUZgJ~%sv*ij!=F_Lq&o;zW|DfOA z|9!&p&E>zA5lNLET*v-}>u73$KV{y5R*;sYujviv$06ZbNy_G&Aq&COzjEhR^U}Sr zgu^Czl$6pSof~Abf=d?28O|5yx(y)6La}d-w`<^H@i&WTFN}r~s-nTt@uALPVO#*U zQWxtt(J0=Ml5}Mu6sX|(xJS$_ALohMJr>%kF+bfC~!RKR$8DTBItRug6^7LNxUA{3w6AdcSVe*Yt4-xPp>NntPbV_{e zJRh_gBn-_7`_*HJsI0cI6DDc}u=f}hZ94pVbJSYzZ08Un#CIArS#x^!QiZ^g0u_2+ zzqLK5mkm%_ay3XObv0;E92}6kSuezu%slNAAT$n8`o+8y0W}zMcRoGid1D0`7o-Jg z?~LvAYdNiR&s~Kn1s?HRlHrsv*;g!xS1u}pluMA5TyvyDU>{B~~E1`LL-(`CJ-*OdF$ zYiA?96kYS}YFM(Z!>y79WaF`g#5+?+dMsKGp>3pkHn2-Pv-{xQj@*sisCKVLHZxrN})qE#y%sRv}=oF;8U%05xwd)J&D8b=}b@ zM|mx0B}RMxJkX^Cd!0rLWz81L)wCQlFauBslK!sfyLT2fvvOU*lN|vsh>s6{@K{)) zpZsl45?+367U+_ozQyoM9yKs%78U_sA!IBT(Gu6a@lmzH_!Lj+gcU|rD>mjC9paa4 zE^(>-S18oF#lMJ@HPLQmHrSh$lzVZ z+KrT5LAM(cYYI_#J`x^-!XaGYkl-IA-k%t7#y;j8|It|Nw>?tE=3^WQCI?L?_tgtW zCJ{}SSCJ%CfTxIPjOT!xqiie=QZkZ}uz4w(rI$%NS5-~vr>XO;02_0J*H%5!;r28{ zsI4`ZKaJOHpqz{?GKPGTMri{yUr#(_6&#EF!Sc97?S!#^u9u_0b%4UyoMx#Z17yvx zBd-Af;SZ-Q76F~rg((A0RPPN0HY8&Rpp3dPIPHAGAi&%DLIie%sOxHenELAdFx@)8 z(sI6XqS9bEJ>RhbklJq6gh~_2A6>GqdW2L{dt^rO>-t_%4i8{@AZ4NePqQ^_W~5L( zj%PzEk}@w$o!|08AW+nPnAXm6k$2{e0$x*e3G+r?w2L9tzA@xHxn2l({dk_Ha2A)O zu{lwJ1ukQ@+XZG%Ps9DC)7qsKt9mNylAPAGkF*}R4slqfi)L|&OTw`gZ4j`sYZJq2I*%aPCBE~vvKW3K#r+oLY4xXIze~0A zH~I3?8lo@3^%PI4Pl|^^p79wyO|v`Ld70ZY#Rg81Te__3kvC`|w|=Y~2y%x~X}_%2 zIwWB;%#aQrgpC;sE?d_Kt7kwT2V>$hWl6OhRQhO9+16lTIpP%$ONK8QYMel)977SrD>&;<_H&qr_-Fz z)jR_LCg38NOM+KE=x;|>qm+MydQl4s${mzT;Y_^s4N8%du~S@Qf^ch`0dk7o&R7fC zXo^a@1H;+HKx#vm?aG57eahFT;!;lO*{}`OAX@JME2?J|-euYw##Rsand+$|LrvYH z?0~`^Be5x&AD=f!Rq6@GPQ5Nz(^DX8>~=E2ia`~nqmGum)b0_5v5m)wkM3%G8yaD# zF123!emiFi9}6saoBR&Wap=J(CYs#f&9p!NzuNomxTdmoZ9I-$bWjnc=nPdt8<9|r zI!cIiL)ZiY!9ou`bPy~vA_NIi0@6Z<6pA1LLQ`?1gboP^p-2fOG(n1rIA5GOqBCdi zxpU{Wok9D|8&3dB-}mS4n4iHQfz&N&NSB4$RCV+ zJ3bmI2Ezj!LH0I5XZQ+ zLD27vzu%QELAaA4(h>E*OurR+y%-|kXQSKc2Or+<|8hTwJHu%=D{HpT&bXNO=sDKT z(B$zFXbp6=)xCUUUO=!$j`#!NGTBWtLGM!kxU;PDyjp^V+a80JK^yZzi+Hf#vA%nC ze&tmYF|9cug`}iC*#v=F_1XKDKxEretr~?I%ah$OKAKuNO$|+YSir*TC3Ik9mE7@% zIc<4?bV-c(SeXu_d1*^;QRNWZkJOFQ#PuF8HuXG%?4hoSRO2s>*qD>ZPZNg#9(S?r z${IH^I4@iOw6~vL1Qa2CJJUNl?lB=s5qU~$x<6My@KkjeWk`Ld$6_AnmBAQF`Dfm8 z^M*q~?M5XfI=M>?rvwGHY+*&)pil#c9hAXb=V_&$WCGCYJf2Zw>lwOUdYHSW#+@yf&N07<5ng`P@9AYF)T$Z7937x+)(%?H3k0OKr0BkG>YMag_;71 zKf@nA12?DLDO>j^cA~G@@z)hmijl@DF*No)do3jGWdD|kRT~6+cIJX=(#l?%kKY3l zS30`u;<8tcSE9%O9zy{O=LaEiukF60_vNOugq)u-UOEKg=PWDaLme53?+Hy0b0O69p;rnV*o_Yj+1YH_uuTSv6-N$=zI>{mj^P}8>h&Pl>|#0u_b-nJ?Sr<{!NH{tJiR%kgHUP@c~}{e)1vge$DqXPJ>C9-!XJdXeLhCSNzR-*K zPR#J?WlQR^;oJB@h)0e)V4_OQ)pJlqmFk*PmLn}|Dw#$;QwyHj`F!66f_Opr0UkNI z>B07c*}*oO-}PRV&x_Rn$rIzuf#)pLhO&n(NTV?+#Y{%-1IFKJ|F$ z(#()|P8~-f$cZLp2j+j8k_wd=v7OU4o3DyRt{dcOdKz#%2^u`XpcL)l&KuPk+FQYx zE|qxaS)tv!*%!%ka8%ZuIeL)0h-|jS=zQ9Trbj$=A&|G!5O{; z;;>6>f+MkuW|?Dh&$bV2d3igYqD8P;LY_f6B@18RrJ)|!hEZI~ip4G1X}m3dGBI2_ zR>+D=5Y{3XmT`3Yj`ABQ*mRn23{F7sP#D+`9{7<1#PQtG-TYrOl&+?%7x>uraLJfq z2!ak{p1C;iobh3W7X?39owl=PN>Ab3I@gS;-#9q!G(FVVDPS{hH3@FyA-%*$5b3=) zzw30|#i{IfTjSTgjPxqgec&*j8JUtqLrtv-RLn*DSK-zO`)NhrKgG=W`Bv&eek2y! z+p8EM6NVW|$BqoY^>BG|L_fDpcD~$_W!1au0P!^fs(@CL#Nq9fZDQL3QYwJU0$I$m zBR+~mYN_ePH_4Zng9Ku~8V%kW=-=5Gu_ShLdq}Ub|6LQSK}(ajt~164{oYvcL4x)T z(75(!SWVtpBhRP%IqOC_6BK-9{0Q8Y4wvX6QVthyP#3Fv!HqnzL$1e2>r=y>TLriO zXvC-aQ!g$`Yt~;>~Px{+jCm$oBeU}-X6tKfiuR8XyX0*TkK5QdNFb_KV@+{14QZ~imXq^cECL!3><=kEs$Rk+a3Cu!Dsv-ERuUp!(Q~j)8^z-6OfUN9f9CrRYtXy3L%q&zW&I{{TL{*~aHQqaDxkQ+-%u zi$19uljtR@yoZP04zzjB(My8geZrbYZ<4l&CHn@A?2k4Le7FgPzWDhx_JhPX7si2O z`M#eL{hn;*34BKOZ?;jq+3c3h-;pvim+WX}c-E_O6~?K=#h{4#FEo95U}wGrt=AtBk1s(2 zO$?T4$M-7jgW5b!I1Jt6fi{Ek+CwY!_c|=RmEmW8IpKql!gV6|GLBN+ebdosveJ2! za#^MOyhG_{&A?A~_51B+X2_=(#cv9{&y+yy3=2!$8FKUQ$3p;traRiE)_2aH`%TIJ z;)gAfHFGeS(@gbhRn8chdS#F!&UH$ws%Bz6odtz{i1wE)`Al~4eaEshd7q+Mj9JX8nZYd`n1skkSW(nH!|HAvwuwZlvMC z!GWRGYOj0#%{TvRDTeR%IK9(s?o}%h!o#TUD?X(^^A+d%zE>I?^us)__ox=yGpf32 zkR>1K;Xave!=^QVB$NAO_n&R{#7FYV%`%%7&)}s5ZQAvTuhu`S7yq^+e{UY!K7Rex z{3lSK+Ag#%>y*ig)#+-1?3kZQq?cW+M%oNG0v^@K>fB?Wi(TLq1AHV;S1)E`8O6EF z_Y6h~t?b~+)k3xDAr{z-=+Y=V*Hg(nZq-bx9VUCDaLuWqYz~eL^_MpG9I-14Rqf&l z-L|?O|A31OC*Ak0s>$2eO9PCKysUsc`Ryft{A9oS2k0KBH~l-j3Un9kV9B2Ao06gF zj_3Zd{^pU-cmR*h8d>Ny$glby{F`v19b11Zxtq?n1JWB|0~)-zP7uLZ&ShHkM;YIt zH+Assmjsv61c5`vrcf!UGEFAD2FRaeS7aq#{xqA$Ma#P#_R1Vx@{X!`{n-P)c_i+$N4Y>(Twy&Cs#Wxc~r6P*+KzWw(=xuO|CK8JcReIjE^wE+RlVVpMZ=L#uImomtS~ zocv>_GG;fM(hj!TYt3R5c_lTJ2LK|xQ22v$T56Hb78kD7FmqEXyUi@8;&bkiUGzn| z_dJ)h#b28rQ*`qyZovh)1mepoarHDa)D)+pZn6m^>4q0{7GSrV(f#0W=McEH&b`6= zQfkahF#0~*6xs%D@=G<7;_(Znu^Kc*n6%Baq5LSnql`s>;Uzo>LHdUF=8)UX*Xd^yjz<#{Jn9X4t|=T#vH4c~K0K?$L9S+%^fy zlzG-+>$ANyKJOyed?c%~&y8s+r>RVlbS?+fAyF7QC2`~_78mhkZYBtf9&Nrjb6pqg zvn?2M{p(62!;(swOe!z{S00>o2=(w-h++j68E{cMoEj`&#)MzZ!Ube`uN){UYnvey zU~F~~#^S7&xAdp?c(14h1y@=IU+1s*Y5D|a(#AwHvL`DgEScSkW-%m^a3tJy40zo1 zQGV=;`G@QS#V2|=5)v-D;*UMWDvNk(c9&hNZiPHTvbe2c7KX+(hVJn`q(BWGRey$A zc__Bp-$TYH5Q!WptwD>qCe;?cC&k!Iq< zvT1faiAs&1Jf)DI!3_hc4n3agm9u!pKrpZOTIQhmSwSX1G0Pi=0VcjtoKnD25y+p6-w>(@#; zhN(-fNXt?&8E<^5ZB981^+*q|Jw7Pu;_UArM`R}++VvBP!A;V%Y_Pp(8R$}yCBO8& zv3O%aVvB81q`NFzTn*>BY)`3F{jejx*|ksX@`PJ;a+DYv}LI zllqBlpg<&V%A|FhiKbDWcDw_mvDywZdE3h$Ki}$}U^!R#ay{~b;@5~CO<}F%Z^kVx zgvEUu+uT0lE~kv_G=MTauB(q=SDs#TK<4TJVQn2BExJb18t7H@+?2KV0x_9iAtceB z%Eb01yOcK_bjU4LcI<>-vnd~%W0{?7A8Dx(^}hw(Asx>FOlO?gZUk8_Io`?6t5Y-e z1%`%WjZ&(bpKFx+r#ZVC0%)!5{ET2xvfPVkO`RHm#L@P(e1`LE(6rrmlW*@(hgK@a zI0AH<9X0ghd{AyJDCf1|S&?(|KIAj7$)%Co2<1 zDMczHn-tLJUIt}^U^ZscH}g38Bo4q~$lx9q^p<0p(Y-ss1VwYyXrM#rI|R#)NWVy@)F;o zkp`UUEFA>w$}5lC&$GNQb@{+{W*X(XE#s~28ZLMxC<^SOy+-!1SLD2Bij=ZlA%_sH zmSTl^WP3sz)-CG@%lT+12b(zdqiMYnC**cIy_u8-sNh%aUR}g72ZQBCBabu`53KVr zOKGtWJ^)T~aRik$5CjTVn2vMJBvQ%dZ@5Qr6uh@{%Qyf)q1ue1y&hZdy7vl&rxy=6 zn@7WSz2ke)>jH}stbuP*Ngw(mfk7zr{fB$UtB85blBeVumlg}hDQZSW-bHeNg>Yn@ zDdXN`G^(&p+OuDFjkO$c;n8&Ia`2BS zngN97N||CIG4=DdST9B4YN2W$iHhNzYLrUkHr9Rp+4EMILK73(!oaW&IJ*1<)A~9k z^VQQ1!>_nKLpilpJ(Xp1I&sR)+cXg+>fJ#i1yEmff`cM3q5U#te|DuR-NGgRhf9Xr zCWei;W<5Fbn5Imxq^c5~>NkT%4l<&oqcivtW@%Xf*-~NmkQ4Z;!!*J-ra1qhNM1mh zLSdQui7M??++VO@I_T0%5j$lOVrIzHw?JZVcf8Gx4FsHzG(L!OKDd)a8Mn5bMwGQ~ zHEIr4*Sl+Ks!aYMXB~T>x53u2QIRa8$+la^3CoXexY_sGOmCtMI zQ~jHRwBzQl1M@HN9sOGI;0F0;zKqLVmB$x(I^Lfr+rJy{Uwq_E!#}%bKRb5QzbNw8 zcRw$}pYweo1+nGaf7s-fGi-OP$mG^{9(y44?YpyS_kK^W_j`c!l}jHRG#dMv zPd)rGZ;~2tKFlDuASdf*zBoA(N~XGImQnV1OL(277zG_Tndnoev|qsRi)*;fibR4L zOr)ARU?fXiT;|*Iq;duVzUGxhA9j?@aX)R8N?GPDE>mv6iJP8)Tj$QW+t}JXSj~Dc zUPnWmAYFaQmcB^}83P}3tZ3)R=3Wa{y!P!tMo*>n#A*YKRN!@S-jQwE%7c|*HJ}(l?>@K9Vz)tMyE*-4RlSskIxQ zZ}6`yL_2TT@r(6E^)1-_%-59azH(fN`6y(LC9+;sl|0mo_l!FF@QYjVhpV|OJbHu8 zGM}AyJl*X-aGZQ(NccyW%r9!(|LBsra_Ng#mip%E>QN+Rm7XylcxLT_E{~NA<0un) z*hg}@dQE2>V`mWLBSjl+F*L^9IJQu9aEQIdUE9s!FGZLC35J#BYtwiDN!VjnbPdpjmmFOYmCD@LbV z14p8Y(*%VRjl|^QtZc$WzP6QO>cjHqi_Ll)1T*8ix7lB=jJ#H<3vU~$N1o_;)&8{2 z(Uq`>R_HK$CfX9}yHG40X+FoA05FRNuNSut?3J!&2}Nkfm2IdQ^Fv!ZTG3ik7`lk@ zK`i-4tE?1`v*lP*KeF8B5cJF*M%D~t(KS6AZOc$jp6 zYGc75axC>!d)y@S960^&4;dM@>->f58^_!0FX^s+l3kAboG$iDG2R&Zts7|f$HV&$ zXLXgiTDWuNo*%?W805Q8T-P6Vj>Rc)ty3QSG}G%4TsYfbWcVog_`Bd!UOX0+ur#Ih z6L-#SN$o~48mQy|XQ(%}hMXunQ1uA3H2oEkg0k>tP{$R(I=kE>gjn6!Nsxe84Cv?uO|Wv>4zR(DX6mSV)>w< z9wbr;GOs+hvhJbA<2?y9V}c;RC2QL_FsG?wi3+`1mx0Qla+Ur5kX6 zdUt2T9a9c5Y|=kz$WJL;z;qvdfq$S&vh#m=(?kTcZh&{c9oEkngx4=?pIpP5Siszyl# z!z+Dv0ZE~YqQ@^_OJNGiV`(e0Z-p=RHb_(`J$(k*9fG)kN!6oDKV+SPZKcn}yFA@3 zGeMu>1=jVK<96$5Vs>AgcAOr~yR9;4d#RXrNdW@t-%f9pbLbn2E|$=|YurJbwJNi) z;n+eK7S4jwbhGvR1-`pqAC|_=Smg`I$;?hojx~8E5!4_lzP+L@+xS4moJ#N>G)RVR8gNY9*k8| ze?U2akKrC-8d2=$m9i7pu4#sHVjk7pE#EWNpcd9taIB(3H|gj&%U9Z~+o+=O)?+S@ zJw4#ZsmTd#?a8gk(tVSt*ilB5>e$-mI>4ZH81-T$q!oOsz~^!TzK@rjX)xH7fqu@= zh-gR)MaU9k4L1v)`!uWh#mN8czrXB^-1UL^Ghav1GzQa|j8+v>XyDz+M0xrlVm4VoZE3SdO*K@B1aiMV;!S>9}J2DC5L>U-TiIau$iT~EnlJiVL9bJITvrmygGx&1S4*`Y_2R8r#FY~{^pR|#Pg zuPKYrVa%KUkSb}XoIHFH1@tbwE8hfIkxaR_A0`Kv4Eb(=61d2x%Xhv%J%A zOaoe{sC->kHPSvmW>;1Yy{r#$>dNh@cWP29F&d3b?aMAvsd-)RIWo{JhY@F$hDF7{B`n9d<^s zcL_(~xi{2eaTNcO?GKLZIb3#&nM9mH119f4mgOeh+NJZlh$T;P?H8}`Lr6MecOoNI zm_G=U@7hecT2#hlU*m9P<($@}FS3-QFknD5q9qJRB_3g<3%|6R(_|3Gd_(&ng_CX+ z(jC^JCiEWj*y4(zK{NUQB0vyTVG5l%V>TG@z3;emPWix~JDG&S1b+Db@KGyV0mF@M z6*&au{mQTT9hHUk%~%-y$oV8^LqOhCFe(pkA5PVVpAs`xV2&G;^Si2!XsEZC>3f)qt382XFG->6DlU@X~S1=C)rG4M{ z-sIczV@`itRMG;{nOrHg%}##l+-gsGEQTst0KgXAlv9*R6;rBlD_^tDHIXOqj>JUU zXu4>xOt7H(iQ^uBAK(9c^S2w>>xFYd8+{X^mv()Kov$jryyo!0;}_V+$C?NEZ7%3+ zP;Y)bh`M^EH6L-At?q@ zoE~s-1iKo3kC(cIw{l3-d-ykDX0`6+?D=MA7Hz(qNpo{6h@8G#Lt&YZv>(DyOL(dX zSC#3A$1sN?`!?_#h)uDC1D0ObPs;Cuwcy*vk_PPhM(!Ae+|K30534}B)<`CLFy^^Z zRg6$~gO;EZB@YLqizo9OdRLVb8SdibVtig=cA%0A9##wt z_)Em@B>!-YFNy7fWUuDbaE)!}4})Xe(z7ESN0FLIiVoMzKs3>npyFqzPwSYCeUI&a z7no*Xs1HYqm78=Hd{q>Fgh9%|@^I$~`~XZYi3hBfZMoCI8Iq4n$c2{`#XT3Z1)|-w zj>kn}2iCeFd{-5lkwLYCiqp^dIab|B zBdOx;p||~WuTTo6zFgP&yzE=|q1ALAQL%8MHKA_H{NHWizq6?P*o42LYI5+50F$#a5W{AvgF_f6WQ)M#iXhi^v7&)8Z2)= zhW)6n@S0s~ydle7J|d2>Za;SW${Vf%*o%U1%}%cvz^vY zW@rUq*XT11BiLI%WLN55>$lF2b|tJVN{MZ?dZ$iDW<^I0La!+zgQ5-@NUKw14(g^W z_>=`7h3R_aaRYN=0@AiGDakMl*0TITd0%0@U#A9IDoBaVbNs?>xye6VwOwy(_B@~= z^5fKMb1dnTW&LOVrY%}%+0uKj`Ylf`if%1{WUq4?Rz$aby+v!r*y+q$&kY#gr=2a@ zd{;;+JvFyE`e6GG3qW7A#hdxzSzQ|tMk;h$@^s^gp-(i0(NHwWcHg$ zY3CQobG|>McMuMPxPf72NwU18L&-cBxIu&by^z%FIZH&!OI=UK$o}-6tqoSTFG14W2MV^TFlTiC{1`mNeT|YKG7* zt~6&AMp{Z(*08~BYU^`Ny%&FjZclk^Rp5tut8fs3Y@mo&BQDkIoOFl`KaMGEoA>jp z^sF8ViKY2f&FHTZA4|nNgF6!LgG&zpjrB}}6)g4v29)(36Dv+x1TdLvq$@jFV)3&{ z&3ePKE`hh0aeE{MmC@85i=pXjX7b7S=*Av!a0%@ogx~7h>D;n`=^|6b?o}<;ZCda4 zCeJ*Jx0YU`NF~rX;LA&`_84?Vj~Zbs`0AZ@qh^3LL?^4G?FF|m2Km%9(nwDS5-koE z(inn5+xL1{OSfOF%(6Ff`y>qY5k|?p7za_H>#rb)6*Oh39-fyL8w({7J2keSIvAPl?b8aauo%w zNaPl81(drv&-qWo)U5B>ma{NU*}&s`t;D1K9&nIB7DuWY=S6whQsZR4KOliTadZFT zwuxSEYL6dgXCJujCE*cmKJOOL4LL@dln_a=^B>VDxujyvSS4&b55Sp^jZePtN|Vx~ zwHu4Nw1w;tKR&IA5=I?EkYzJ&Nd-2^NC~S2Gw4V~4Hf_Z##cmx&tC8?XNznbU7XD^ zQ4+@Ix}24s@xPvT<xI6IuNklAeYkT&Z|X+(rnWR4#PA!b<*YS5% zc<=_l6`o5{Ql0U>KU@^@SEp8du0os6J3rh`+9V>&5 zT5;TE%pR(`wrB_j!91*T5gg?&xQ#;qxB!6MnZh*=?Cv+NwYmX+a>M=b++}F`i~@Er zyJkoc*rhLM>m7MZHZ#_z5GkP%O^QO#uixK~kMAkB%q8D(?vX4W>dJ6o7mrLgYKfJM zt<5%fL*{pI$Aamt1q)?ul~c3cabI_zIYiumu(?5kt%f4Be_-!SsltN7^ z3BobyopXQgJSHd7aSRx#E|Dmi$P;{q zkh`~KDgn`ha8=cZ>a)cP6*S9bk5=L)8e`ok3x4H+8hA9}-Hds^4fD|2J4-3BiF};* zu5&ydG8uwSeiU{RloDj+mH8&hpa=ABj;as0Fj~9Ro)aI6A-yPwwDW%Jg}{w?P)Lc^ zVlvWfio2H9!%NfatHc6ol2BRbuq905@NOFLlPiGDtgooM3J<1`#Hi98Rx_xw^@~&U z9@dt&siow8O$QBu5^)V|GzFxj_j=?|mm!HnUOE0Src9?TPN_nk65doMU2}wPHL2kn zP>he|+mIpIbSn>1oDK)Il)rJ0VUI=^c}du?!%!=x&M7@IXN|r~d>KfY^K%9AsSfeI z7B1yI)8SL%F&V;9E>Y37eZrJTD^HzCb!eN$&CX8eb-;>=;aWyQ#Ddsj{H{>t65%{8 zp5|yOeQH=|GD}7?nAAUNS@PVenpx5<-`S(je+r1kx(#BfgsBvdLhZ>qJGi=o*46G3 ziP^)94cXveo(6m{00l;kvhH+df^>x?@vudsy*jz4?$ikz?y@EB4!&yWlX=~vcX-Cm zM#}AgtY1I*z6!V!hYKz1)VDlp_(Bu z5%y3o+d*NHM?rQg`%e$ht=;;x)Z^fx!d`^8-fo7)_^r7DcWIWH28t3jI$)=62lwo7 zmF0d*xfP3O743v5H%VYM3XR@@JP_}V189SwmDK**s)k+_QZ92r5kBwZ3L{@E^0XJz#EB1g zsCGMM6kBGLv1OCDNuD4z_ERbt9Dqlh_VJHO&^*Uu^BK#@$jRVn#v~m%fXt7)J_x&HGwaQ%p diff --git a/docs/architecture/assets/IBC_relayer_threads.jpeg b/docs/architecture/assets/IBC_relayer_threads.jpeg deleted file mode 100644 index 543a29e5d64186d55ba6a8b03eaa7e1363e3de4e..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 93841 zcmeFZcU)81)-arzv5W&agBS}%uT-T<2}qN1ARr|mkU|S6RRV#bB}%=cBSH|6E(t|L zPe_mw0)#RadXo|mLO|&R5~Ky`ANRS>+{j&GjXXUJQ)>(V) z)picvAAA7(U}kJ;4EXA+uK>>vKY#-kz!31w*MB+w^8L->apc&MzZ~BlJ9hNgiEqFA z?!>nzPJAc$z2J8OCk0NN5E2$T`TY++2>nIJAn@%`h2xN~z7hr; zyail1yg%UUZw`@s1^Cb7_|aqE9QpR^uTFe-c-`{{z*mQezWV0)@#EhfA~`Mu`0DF# zjvPI9T=>+bZ$(s31J6m`up3ajY5h1gz4^>pDU&*^5@^l_;>qn4*Z(~ z|K`BIIq+`|{F?*+3miBu0X90Fy2@mly?^m2A=#uB?x-E2^tXCm1Vp!(duqFrWCM z?SBD#?sVM&AkXd*dr17@Ev^Q0rM4^mSla^pEC>T{E6+>Tk~{!_Sy$-qD|C-pqi{P3 z&Ubn6tvg-JlJ9(^0R&Qmyou>>+M0rGh%pF(m6t5sk$C>kDL$SX_VW<@)_g`3 z7zWbIxmVzSEvFPS=Dtw(LmWHLxEy_Tx8#kku}Asr$>&8mI^X&9-tQ@Qz?CaVx;YP; zD5NAeF;|egXNO0{TnJ88xnh(-Gw%;=()my_+7MPLFEy4_XH1gJV~eWxfEVjZ8vgvx z$pCyU#S*ygBxb}*UGM)yZ5wB4h~3MR2HR)sFbdn)d$`%;LjHB8LATuWKnSt2J==e^ zsXb9kB}ECb#%{eo5w%bk^l@r9@gr$|ALa4#f#@Qej)kk{;+W%fBH<<-9%}5&T`AMwJt(AUjajal~QsbpmvH z1ADD6(P;TvZC9XtnbLLu+jnJQFgts#^kJp3X<%#ewHY$wja{pif}@1)7$?}B3V&PX zR&DAAbAjo5z}LALU!-x1t{Nnj6}Sk7=vC1K++Wn003~Ezbd_dTc7m)rl1QWq-({g1 zB2jsvbX4~S%2NV=r)uk1`P6x~rpeD{x(#*1R$o`YPe>|7wQ8VIO_-)+mFOkt;%bJ+ zU^jf&;O2IN#DL?Tpim9V4gHNz3DNHEjNK{3p!R}DKBn7^4`wrJwZKO2nIMNZr9Pt~ z7AreBq@Q*M2bCg@1}R-F#cU_cJu>gtPpa5VjP2b>(ln8jlmvb6;dqBqNVgdn^RcO* zUOv~(Ii-=$PzTHvVlAod&>WUT znZU_!1SRR|7x7fm+(szLxzf$lmrfK~I0GL8yT^!2qgpcL4-emxC7l|0#AA*U$#EPQ z@nF&Ls9}ja{!FcKMV`Zqe`yS3B@D3(`ST*oi86R<;1$vZVkP60&D51iD-G`5CkrEn zT;69xJey!Yvpn-z!8z0l%rpXpfZM!I<=L@MX(Hsw`)11T+WR7Z-L^n(x&)U=^cJso z%p|MK0MSOL;|p+jd*Kud%FSn+G|~{cmd+XohK+o8?hlU&kZ>%JX7WetL_-&f3rZf` z%qw~9w|ZQg6f1>gYl9HM6NU5lN0|#Lg$b1MR`*H{QeZZ`Dnkdyw}q-oRCfoOO6fbt zaeNu_)Ly93jr<0DMV61{#)Zc+B$|!8&O>uOp`z!31wawx>1@`$JZlke*~t09Kq>2D z<_ZV}5g2x)d!ZUm)!S1vH%H~qBTzZIaZYKh!AAQ_8R*a4aRq#X78Iq-hPEKX3>L?2 zV?S<8Bws7k|M|D@?DQQA8=6Z%sCJz6T1V?A@c3n>SmR;`csi~^>AU8_x;)ru@V z9Tsxsxb!r6HKVY{XN*t;g@Q-SDRpKel=Vudl1wYR&vu*`yM1&57!`=p9gSfW?hjP_ zk+pw4KXL%LrSW)me@$p^?M3mqxgfJqf0@?hNE6GwoK0+k*Kxm%xe7+qF`9mK<@~?_ zVDSK8-?(uA*v<1C*t@j*=lbsZyYr}HCKEPwg!VvziFDgaY&m<%{uM+2ns36ys~#)w zAOAV#!_nj|-r9o8mi8QKLSx@wkKU&DV&7`!Lg2>JPvFHp91f zm%Zw;pZ@p&5Et`|A5x2$w|rr}aNQ6gwNlS#n}@b?W}(K_<)3ISXZD1F9k}j#th>HQ zJ%|Ee+6QARFxJnkZNRoa?~;;}qpu{vK{;k9WZI6rJdI+a>kgerU={8Vju~cP$OA~N zN+0x^H5!W-ZIUgfmYaLyG@?a+wd7L6qyrWWnmSoL1|||Mo79kNWVP5qD8#)>6!&F3$|3V!6D(_z*DihK9lYtslDu&8Tcv`UUZKM1Am%n>|h#o zOxLZ&PC4kBln&U!tk7i-(j7u?GGldf)reRmB2m_lhm3m8rE7&d@RtZ;v}sGa)r~ow zo8(jYJU{;Fq~d8G{(OQFlzlzrk4x%*vi*qHT+)Wu=Mv99zJ|Y@3+LvTFmophp8aj) zuN~3-8$~rI?d>`fU=OD@@imh-&+ES)5i*L82g1%Jgu$$fse#;&b!Gi-25NO$2LO#4 zuPq^!Hy1OJVg~@cllpFS#>Zz)gkwu9kGIIHiIA5aR{CQfDsM%t`km0_w9|Q08OM9`-k5FWpBfB=@ zor-aUKc7E3)N1P4dpnwHpkU0E2{vnXiwIe{Ikp~z8386sCFy9xO^Qw}bf;LAJsT&X zsCko{-hw5){C=iL@q=_2;rh zB{gSX%g;*GzN>`ZYM-q5ZO(Q5Z~z7qcE=MmzxQ zedbTF_Kp}C6Aw*RIU^&3AOFbXzn(wYUAH3B;&W=UpemN(&eDMvP8>3&ByM=gp7t7gY zcYsp9p)(%rU18K+X@%v9t~tW2934k3lZnJM*gJ^wH-2H0uobvAk`eXWhuVdU-Zv$S z>+BfDEEIKDSaP*gGdjECo@{Ssi4>j?A&7k-+fx3B?>;PGY#K0#-@Ess+VgC+j>03b zroK=Z@&Xs?gH$UQ&B@cz)FcKNLHYz*a^D9zVY)$Qu(^>4L}N*RD{{8DShoiD#3+st z3nN$JHv^r?z5cFZ*ZvzP|6|}wbNcVl#*z0g!lQ8MxBou$*PfJqK@-fSzVqTr+#lD% zzX1FnuCrf6`#Tw5o!^>{u-}*e?=td)pS}l+TvLo{`DfzyMZx?1#{d7Z9`H6 zc2m-~o)IWoVdH~kn}!#!01f;l67{@XI*ukHiyqM1(akkQf3y#8;kBNA?AGR7yC zgHCT0Xe8ist~kZu_@GXDH0SYuE>=g1?9`olauwbNldXIfD2OKwsG0ONkQ#KWIwvz` zQ~Qz&j2FonL@2kF&~e0~H)W!f0{D}PwH)WWYKua|0Z{)$0MCs`3GPCbC3l`i^O$71 zsc1}-nev{-SvV%4sNa298{~{L-5t*qDeqPNE!Fv_^89hD@!(AHCoXB7kpw^TqOzCF zV1r3##hzxBH*Hc^=1fRJZ*WO65vNmj$x^r@d8z3N7i`z3{WC%zXpK?j3q&`vLsaDZ zyg@&mdFAGz-<_#8Qj$))of!`wkiN35l1kl^EIbTpxh&Ge66<}83*Yg>26b5#=fu)J z+*decr_LKg5u1{7<}1;Tm$edNuFm zZ4MuQ#imq>qW9yo!--uR#V!W`0r!j@zH1RzKML}GxRSLn8C+wu%Zx|(hmb89AqRlD zlU9b;dkz5aV-Kwn(*EUj^`+&#nB3jbvdP5%8!udW*S>xx&vNc&DVp-o>P_`Qe6SS<=n%Z+U1|FsC@CvAZ2}BhMiPnYY~?XEPPf%k)I+ zkM|x=w=GKT*Qv-cI*iW7>E9f$ccx$=)oknU5@@cO&Jbh~?HZ+4zP>&4mJ{{6ik6<) z*b*s^l;Oa@k+M&Iyv4qg2}JLLP7UU+=N!Vzp%j@e)9i{*RQ=gpOx_)=+CWj_>E!hjrm|yDqgi<~LY~Qzr zweRRwmvImE)mJUS!!$j+Mj3N!EB`nM+_(Z&>je#D3!n8!>Y!N0a_FARjctUI`9J?b z3;lljFYScGCHD*Z>WEhrokQNtblX?`XWrDmG%)_9o$$YHJp;bDivRxDUsSQb0|35E zopG3XB&q3sy1~?$+kXT9TjyVz7+>V)-)`}b#2vd%zVy=C)K&<+I$kd4cm&ChM2a0)Fl+)E0e{de zJTaAgmQHYYOLCaqyIj(DtEDx4veLF2aT4MketK2&d;!`Q)iwcxjKZ)e+2_X~OkD9bUrN(yLTA5JJwrq(Z@+eoO(+ zF^07);7cZPZ=G+JaOZDF>0U$3or0)`jAzsBiu1?_9inu+^H#cak(Fa|cb)$mG(Rvx z7BiFP%15=Kho2-58^UagpT4rS7q_`dZfqN>wMM)Ozy7e+K9KIncvOQJu-Srk7d>oD zD_!=$w#PHdN@qqeMjVC++G!~BXHj4ORkL<*Dr`EVX6a`mR>~bWxzNz!GUEneESyN> zR_|m%x0>#H4H|XC3siwcat4zY(v4?)up9;!iLtt4)ZqSpT(d2|twI1zNvS{hxb0jiNTytLuo7S!?_WfCTDFnVuA`K?2v%wb9g43Kc zEU)@BBJSoFHVut>6vXo9$-;@__gNzM6|6hPNnE(kFg!QAZIL zoUFWR%~ZvtA72NvsoU0vnM0O`VM#tdp)Na`YOha8e~U&VC7UNUi(>_;Udm=epI0W( zbz(w!ip>=#W(!_USCdl*4*{p9O+2zKE=5m_wNv(;}fKg;*#AK`z#p@m|=T^_h^Lu__D=a?mu7*zSg@__(ZF(kq zWmJb0dmHg43Hk}G;g{w>NxKwEmJS4siA1Df_6m3Iw<%tmQOR)EsuMt}QP~@4^FMhW z08Hdam)a#J`#|OX*2udP{x^qUWM8hGb?hWBY-L0fIUG?ZuGFqNJa(z7;sD?m5@O-4 zq0tT#?NkhtT=UUlcaK$elf<4v_yVZuLZVNjV6ZYTW6RH!Y zAQI?Cswn9`p+oyc(uLNd{sc?h<7)unoc;+?*|s~3Wv{f?WuCGF#mF)G{mTisKeOi6gugI!_t(YBIzMeR{InjCmm<%suOr4J6 zD(k0AaUt}GmO;LHtN;9}%U;WiM0dDT87)qwW+P1D?!_Vj%an4U*myy^csX?*muOrI zbZEdpZ9e@?i~XVVuRYxvw%1U-$PHc7L9gvtr6C3Ib-aS}PDEj0ZQHUoxxxKW!cA5s zVmQg6*R>XSYc;woN~!8{`&|twlkqwE$b`AHo|u@%i48Na!$q<@Y<+*KSJY$oMFer< zn<`6z2xd2;fhvb?G;wk9Dj(Pa=`PqaCPW7YKoT}F@$(~Rz_7idb^4x6c4pJU+!*We z8+~g*N{*~W_vg03KtF3t9fdo}yxAmPb5p zI5b%+B7?}wxm;MXpWM?mvA)Jbzxhb$N#M8lWe0T`T>50yk*K|;NXz|fa)C^I%!qeL z*pn_bUS5=KZ{#4Zz(=w0DqCz)9&92nO_&wh zUcs$Mu48Mp1Vs+npVo#oD5t*icygyn2rA>ZnEt`zbc z?AWSu=q$Ez@VQO1ZN0}ZJc1d8m*3O8CW|xP-i3|ziIgeTBO48_mZK~)Fd>7w$o|1G z*MZ>r0SYNWAR~RNF@Xq|F}fmUeU`CMJ!YGqumV#%1tgIA+tClR z)nR4SUpnR#14$|i$oBY@lr4FzPsPTS6F%u%)X{j42OmL4=z^LPW*x;*Sw+DKC8(Pd zd!8g-F6{(Z8RXip=h~dO82M|Tb>FB@iT#D3_+GK>nG^DgL0$Y$C6Ba>^RmcH%!mjR z<dHa~6+l2fsoTZeo8C9(x;z3lW$O;PiDwB*vm*FxAQcKdOd}{J56{M5 z;(k+DeO`PlOsxK$l@HD|ZK=qIBb9#l9Emh-yy@K+>PKpnpSe7mS7g_d%MZ@3cC@`z z<0P+W!}f8W*uZmsr947P_c~JpYgbjAW2nd^!I`b7tK0Qm4RP$-+mCHJ9N&aXRga&| zhq)Nq%ZHY^oOG=1T)ma*bcPsL*-&)S!}lqxY&%madO=;wCq0&#Lf$6w*h8BBJ6cxW z^*#yNw@2;j?mzP$*Y$7p*SL?aV~JN?E%hT6rAeD!O2HM~Z!cl?vO8j?Cx+B?>|-Zi z@}D#7Vgd-zz|sCrkN^uTs*`RyKUR5N#)zGeq3b*pgNkzI&KcJvw| z^zQ!}+?5`RxygT{^ctG30fY3FO^!o8AyXpw3QTC~wOmxR?G(lVg$l!@cWpa}UUL+@ zNvXLx?RIX^m>#k%GU}#bWwO3#y)OG$vCEr2jUx zLY4%rK~CHtkAt?9aV-8E2{ix331lK$WyjXnJNw0d1gJMV! zhagNJT_{}#_2s`bYX7h-6NPCcXW+{alDqkzMug15(8tbOj~ZA7-0ja5&uew9xnX!w znUg?79ak)7Z_8bet580i0FaucEPHtJhoQ#Ss{3g|37MIS7+vH7y}(O;sE7A)N4k^I zSIqnn51x#Pt&nZ=*C7;*q#Xc2%^Mi@ulDL3`7n{YsFlvF@@971CN{^`vV!a+UG-?afo(eLPq2<}~FmzS2kXQOBeb6X>iQ;cbO6sep| zSGl}~=0^)Wl1u$<-h=1is-HC#DD{k+riCh;Z#bRiW8y0$@y@A2Q04k=;aVD45PR%C z{UV7HR~t6ovMg-@&!fkWQ+J5Ije}D589sZW!%lJLE54$>@o6g@aDoxK=M~JTb-ftV z9bRfoT5-!j@8yn#-Tbd>sMNXrqlsQvael&@*ecMW}wFRRK`vN)O;z$}>(u%K1|CgAoX5^Adp>J#4tBFHwB}$Pe^ODxzW8);e6e z?a?z?9wR#}7qV87i7_c5@j|s(`2bBP7;l7}cHue4ec;xeHV5Il6RoQ!TxtF9Y!7o{ zgreAUxF$31WD_kkdu_#99EXc3ZUCMhsDMcR7Ko)lAPGA1(}4|XRTnvHv;)BKbWeiI z=(MeKxI=j%dHj`LCi8(o+UXoh&#{Qhyas;z9uLL`=@8N-a@@3;BBUkY*mRtGaY5QO zE6Zh0LmX3Qovj5P!N4?s_zzZ2l1K$F3h>p{METIrdu+vuj&(OFx5H#gk8?~+42hIq z?L3ITbfmb#V|7a^pXd0qxrakvDS_K8ck{E#<}#hD8D0im3AZGip=6_!PmeqrP3}eK zJY=MUk_&)tNj(T6HcXZ?yXst0aV4$F;{vv=?AhitmlxONW0`}rH)Z!Du%!rpi0tC{ zCzjW`rF?O=#rxxyefO%S&%9STqnieL{h*}&LrGzpG|re;!f+bL5r{=r&w2k<5dH_} z^moAD{rUx;?_HHU>!vH2UZG&mwR}!l$k6*{`cUl-x_k9EiaqRltaj0o-teOXfOIq{ z-(mR?ikE?E?r?Ngzh780Xun+bB>n!mRzDu_>L!D zRa)x!sAU95wF7qZPF))>Gm#9K=?vjhSz$t0nLbg4LND9mbJgBYClTadEgnqCQ0<09 zXUpsD7F^R3Pz{v%Ve~+2Ysqlk{Jx4uzha6&M8#HGbI9@{IoY9F=c9|woYc zDLm8P2=YD~2*a-JwMw*rPAZyz`)0W=EtSm*$}^p6zm)l;^>SX6&SQP}<;i7h5ymuG zk|cMm3{KomsI*XJB$z&(#>y0vobUrcd|eXH=#lrxczAY_c!Ki^t4t*i9S;h-W(^xH zE;rvVoYl&LkAJ=>kzS4Rz`u#s(PCEC0r~Wg87TAS@3CsvO+Aa|nq`7U z8MIERGz^RHn=M!vQHVhrQd<;wOj^})v;S4+g;SYbM#*>6fRvOJ1cu7Sj6`|UQxwUD zmsRXToI{?>*2+rVtNpDbEHxb$t*J9aJA4mPVNO}2{54nA;)5nibtbc;B+!`_`H7(Z8)ViJr8btWl{Wvds=#*nQ!!+1gFHl5+x>v7SxvccI??mq8_h}vaF(1nW6hm~!_4d1;S%BP8? z;iWgXD#qTd25{4vruWEhA0EJ|OPSqnq7`Q24E+xB$HtvYFAYq&0i1Ar5vmEvO=G5h z8gMEu$&bI{cjc74@dAEUA+3#R1U8W~mWBI3#|ua9 z%((Yvif(*tO_U=Aqm)@8e1hRRr%$$69X3=^*%R=7_jT$nVS4d+WMvR5Q8_%rKdpMa z*ZYO}NSK(h(^C4BeO$gS;wr2XTo~S z!5LGTpk?7YYnGV%vTtwfljKL#>G!Z?VI(xMfIJ<$gNkI&*xjn;`!B;j!XN-#^ zQuw?=_;P`!LQjfG-Ue`EiDu$cSXE#s-g}w%+(($oc5F@F^ll>4Ve^j-|I|k^kLM-I zx2!4fRm1YDD+ae2K`(+(3-60dO()Z}R5{EE4R>uc*N4s1#^)l) zW}Rt0b5hbJB}t<3J}|o>qG~ZN9wpmeNBHyq;A6`KG}~Zkv1dmvA3tPB9*v0K#}!Eykx8%*9cD*YGjk3AZucsK zQDv3MFQ|1$_+W3}8^Ib)1G$c*eOkv0!e_}~9@dHn-FAwed;Gz!MZmb~*tVxaVhm@(0ES zs|Y&Z;ACJ!4(C17B))6C+cwgF{IEiTH0HPzuIx-G+lb6P$)Hsorp8nG@!e{^RzFm- zu5$SAoYgw_&gfe?vh&!c6XaU6p)xSk7`>zIKQ)+x63VmHNRb?HKndMxSHZ;bUC_^K z!9igXY7PCN-M;Zg&NEyDv4z2HZZPx9a#=>`LzxQT*E9NHU|}OmLh$>@Lnmc^bTd!n zJ#Gf(GHT{ERQl#*|DI>3L~wfJlt&MFiSIA(yi!$OT3(?^0|KwLBm^aq`(T=UJo61~ z;OKL@hh22-Y;87Q6puwN4J#;_StDMPV%5Dzw>@Rli1M|Kb?cD&aBpq(jqlaDCqf1o zXP%FhFACn{%5)GNluxEnmoM}P2Z4|1)MgzE218b`k`aoyu_ih_yl z`1eVobH3|Q&idDQS>9WaHG{6OpX^_DAwXrVONj6@3KZ8LYl zIJFDmOIRvt_Vsi#Pey6RIMbx$N{PhserMl|$o(^8C6jD8TqigYtB@1JW|BEl)gy=? zgOO9Oci^$_{Vbc1K7`<+Wk(1?-9_l)d1=cZK%%&zG8~aym;U^Bd*`q1zsxxRz8Hr3 za-D}|RoUK)-UGn+d4mt$;Nx?N;Hzhq3|^I;((%c;3)L z-i~QbH69n88!k#7mS7oU-uyaw?RDPLId)^TrC!+k=ct<{>k9ie2LN2=%fk{OG!^>E z)cxh4sepu{w$S&O4OF1tGLrA&IU@}K$c4@n4nr*7EZhxX#p?_4+k*=mMmLq$Rwb! zpi*9C2#;>QAz)Rh7oCo(Z`=cM1me{tI33fm+^{KR&3?L;-&@!Q!q1VQgwL&NMmhLy zZhh*s&;lo#3D14Sb$-AK4djZC1%KNp|lhwfb2fN?C=yH0nCGsWr}R zr01~45y3$7Z7NGb&D{p|zIvt@Sobbf;K)!Y$uo3l;REDFvIhIQnID^O1?xC;IczQA z{5o^!z5P)PEi0^OVuzFFd|hHL<&)8JxWpc0j}Q!A`)L)}qA_ui?%vPlsOnWb{!`sB zo0A_*gT(g+cV5`7h9N>r*}@{tST=$~{z+j~V81-_7QDY1HE5^w^4<00+H1Cr_~SGe zDWGV!G+bNwu(nj0^SC>@do(gdf__IGaVPk?zeA{(djLdZt=&|Vre%g`ZT1XuTvW)4 z-^f5`ctDaNdX0=KanbygekG!wNtao3()caf1$brPp$a&uFx45c_jt?!?IKH=uI+aJ zB-6N-FfelG=Ctz`)n~jyp61vr;^Em@_gTfT?D;)RMA1Dby9*w(_9ec>FIfsoYi$ei zuEELLaJYU<5=pXL1jmSEd=B|zh3c0xVZTql-<^&BH&t$5hZIC{Pe$#3YCiycV|E#E zQ3L|iP=i`Ys$IjEg=M^=7_nhdzb(99t`1KhfJD4>x?M(R(p-*OoSbeKt1Fyu94{^k z2LO)Ht2_p}?nx*ub~A^+*J3ZA>XV}{@C)-8(>$K6U)D5+1!5F(ildwJ>yl1VK45nF zg6i_k=YJ0FHiujLnTe%Jj8PNs-!aj}={}a-?qoPI`AaAp{*(MT=Ybmr>Y@1yY2E%a zZUNIy{uEK@@=OD7M^YuYv1qOFFaaxs7`$1P2dMK}`?zIn)9Dh>P;{PN5Ykc9keL)2 zeKhF)%Ca_N2=Z*W3|W0$Bg5trv;O2n&Z}q-yFBHSnI)(WrFO%>hm5qoWyQ;L0VY3B zPuOF9@Qv(vGv~h4B%jf{aY_(d-ZmFMMT>M6!S@tfyVZt5E%NX3pGWQBUlD1>d zq~1Pju^(=5xx?Ovr&TqjxlXNxZdR(Py!(yne_el0E3~~*bYro)wI!Kjq(lFOwf6pM z7x}cus{xRbJ-GI=-z{ulo&L-tao%N~IAKwl8-!rJYz0Y*Ct9oR>3W_|bIgA$_T${R z0=Hd=x-fHVvp7|K)p^ft>(n-7az8uVf(m&wr5(pT<&K$92*J$=jgL31CvAcP{Mqds zZEbYjHkD20g4J(-H%=0`3J3rlFbk?MPK*wY)av3@75d||^Xk_b% ziHx_sUyu%SXd%}21TMwIPLhg5eCC~>Dk$8>rY?`6B~`*I{OY4917(I;f04nP1>Ezd zXBrA0M0#$R4k+L!I$EaMY9<^sN+vqP*nA`Bg$S#;2$xYm(X!$sZ{x3md*|M)J}Ee~ z6~Zjl{mmBBtiseK_XNM2U7H#Y!j=+L%bXd$cY@U&N;Z1WQQ8-(2Clz$1Uqw%zUW$N zN0T#cGz}UOG?I3S&TBZ`sMKLuwxDG`=1Kkl+O{>g0xiZ=#972vC7^dMwkA`1Z_EHy zemO-dYVZ#({WxyQDLXed95Ue=pD}ImR#kW0UM6P&gHC4K281*+ttN(;`KmgWvpGrw z^=Z5J${B?`C}beW>RmUgXjspcE>}2Uv2y|q3@bkKSiMrFn(A@${XYL%zsYKBSkGnd zzO54>SXC!!R4oNUvy$joq}n_L(mb>SGl+78KYhEPi@!&=Tt%!*w(tnG)>@Em149rg z*J%YemP3H3FUeHFc+VZgd%Ds`Yty9McHU2!UhI%rzLrPdf&`-9z2%tP?SN=9^|0o$ zeGN8;X;s3_*@^Pq`|?Zc7p!eQ3(J$jhBnQ5B&KsnEAib}B>(bAq*0flNW)CBJuky^ zM2V?&){zU{+e14TisbXWC)v8~F0FB5b$&BOY%PsF!68yaP>VufC|aZUWqKc??w7kk zx@wN7S!jIXvRk}#)a8u&$%Kg+P&?gFq(ma`iUte1!d4_0Zp7Kn{~pXae9qAF;)nQrP;ha|Yup<_sYn(gKW&0EkirM4(8 z_%KR(LD6r4ktPy6Hn9G0k@>gC{99!HztA$hYintky5R=>VJ^5cm~czoS2mgZY@GV! z<(NeC6?o`~haIO_#{N&f+?i%BoDrAQUYC>8=Q2YWGDD48L_ybchVi8xZBjX}D{hTR zV`-gd`b`uvwo83)hCMU^nXkPXwaiuDo$$PdP4y;KcGtox+k6~pefoaODRkH6KGuen z>xJCk4V?ehe$mnRa)Gi4Md1K&H|TKxYP8j7QR^S2b%sgzhlKDS?xlb4Q9U--$qp>C zThu11+2zI6Mro1);;Csaa$uMnP^@A@we~t06_R(Ud7YScb=DIr5XKwsj5nM`Q=>Db z;h;NiStBw*X+U9MTQLO;9qFJadE?8lnA48v+Vp#UZt$SR&DFu!w7J7HKCrGEmiO$cA&k7=srE%5O1`l zxRDW=`_t@-gjLVNhK__vk59R79B-(WcZOULD`T`Q317wCb>8%4C}iOdcj2JqaMTf@ zt3%(BTL{zVWnY+>g73ZN@J`zwf%*m-wGPPF!ZmAQ9dQ>W>Zps26F;GZh9Ty=X#<@; z+#P+LJ)Jc!q$;2*4on4uphGNDXxiDmT-FcEjCmC3RrBu3|zDBG5=;&W#RmCkO zC&lo%-mJQE(eYb(X&nrb0lNMa=PR9|!>CMX=l&fk%ycf-aXxIDGzyQV&6?1PLI5S4_kx z8C56!lc!#yPU;i26cR34s}sc6hi}f**2%V&m&X3oqRdGITb_`lvcm=WL8Y4tn%=$x zSpr9jJ67d6-|O^RZohqw!^IBc9Dv_+xU@ok82F{ zMz<_Vc-UU}oIh%E-2irtZ2iQCC8=%Q9U5%)jne>Hi#l|e^RS@>NjD8br@dzeF*pGl zSmJ1PHZ1Jdglk#siPpHZ9+-2?WJHVpn2oF=t`Z^HdPc^-(skkT4`$SVj`DP zD;@8N?T8~P*p95r!7#Oj2!ce@>30eqo#zrhA_Mb^J&p#EfN_3dm&fKB&c&3-X%Z_v zpsa{(f{b&GC$=m4a0c~9$fhU|TjY^9Tz<}3kvI|vSJg`XR(J_}k1~(r_G!5YrBR&^ zw_JI5MfT;dhmx%lnUHU&OO<2jHH##u=7lpa)RerS+3$-lH; zV_&AUpzk9QNe(_UbvJZ(Ho$NbJ;i0pIjR8QGAZ8}2YQ(BWrxnxQmUaEQ`G zFwRT4{SCP}j>HW!+c6&7P#d^#7_iRij_bFDb>DA*BBg>g)P79B9s}78cj4Z; zrOgi-lhYdRk+R|ZCp|9l$_0hdPED7r&*b$8U>+&d$DZA{okmTh(2=#fxsH+gkVU0I z&SJ-@#?7bxr z2&+JkFgbjAFFA=)=>FZ%W;o1VL|fNm-)W93&>!()T*$#RMKIvL(3I7zgaQLgVXOYm*5z2x2E zU!CpqHpKAkyU-{8!oDGzkNf)Gy#l_HkdmNz7zuMk~B)L-DIHXqL-XJLF;b3T5-+HIm_oZ-aA18${M41 z1QXJFTpS(tU?Kg`Fz#bpe?#4{~L?pg| z0kVCfcJw_C0Qb-bfRz$30B}MM6}21hWU$@AJOH%*X0_2jkTM!_s!XW)LuQr32*ljC zq&OD4s)N5T+K~w(b5=q`4i{mp^LGsxEdj8ErtYZboPVXG`1awJls-dKldow&dkLrF zw#;_xVumD03u;wxD`7f=qTad`g5@6TR9jNf4*5yH#!V`-9psoZ7#{epq0Dm^JJRj^ zZ4AM`y5#^6(GdmGfAOJrLw-W3u{Xt4gU_t7eI;XYntrTn`1;WxVVKxJs~@90GP`^` zL{F!H>duWg*T}XlLXNW2Gobzs1y8&(94*ylE_{FV z0a4eZr&{|`NzI_*_lU-8W4T?2r6}Tz5Gx?t8gF^_03f;4c>s7f9JSRjTbb}~fuA)~ z+o)Pvi*b1ZADjkeI675fb+#SMADym;mBnA#Lv%LI^Rnnre`j6US7~m~^qYPxrfLfM zZK0}>Yvv)NcB|)s8)^+3szzHA?L4T@dp19iS4<*hW3_Sy3|84Pd(-xZ+c=k1pQVlV z;xPEZdN-xCD{?(AD!V-rj1>sT)6o7_6`&CmlHIO}nUHCbGovYYk954=lNi#VR8+Ux z-(u;19JJ{)4bD&ZYHOwSNqug@>SD?s_!4-ySnxo%ODQv}kP;vsh`3#q8V=t?o=#n+ zkb^P?i`-I%;zi@y5gzP_Y^~h8%36q2zOoalr<@WpIUHK>NW9*0;(BPbs(9mD*wpZQ z+}(4J28>um8Z}h6dxHfX#q)VIC)IDMTBnQg?q0_YCA6dy8G&-f>nM=*S^sY^%vj{( zY0;1v=Rn`b$ND+u*O{mKx)r=4EFi`e2}I&av$r4Lko4PT=IF?rXAmj;s{~9Y;ZMYB z7s|axNP<$+MM^ydT2=#@IlZ$zK+gA)Q?pbGrDO77!rFzO1(d4uE-&Rqy(8PorRcsz zdEpy7-m}NaZe`07t?r{na0b(u%9)15|3Z_<-Z&XO5kG*WDJYcH4T7z%U(fLQwZc|k zJS+Rw$oTtb@5i#8=FkJd6CThFqHJ4i9d*KV`|s)!@a0ha??7KJrT^mI$FFqyoA{3n z4yUmD0pOQ{(}#bh^2@_{Zk|xPPmtMC`T-!tWzoAorRQv2y2W;AaA2+Lqf$3VF(4Zz ztYTPfqQBirHm%Ldchra(iu zS>T$Xp!F)I8zsNp!C(~b*@isVriA*=5)UhhY)2y>t~d;%+l4X6fhgD9=NA)voN&XE z@+!D1+uwJmL^{gui#;-O#>F%atY0KZ%9AAh8s+AH=h$D`f80IrkK=#u|DUf^0NG)` zciXmJ^>BNV3R6U>0%JRdT0n1z^u23aK7xPo#BOAmxXlIv&!RJY3j<2+j^*|{aLO_s zTkdd(4hxh|b<|L9AMlK)yO<&lr_iJUi>H5oJ%mM%-C!na3FuaFjO zJzN~*h$`i_g$qrF4wg)vYdhgu_mqcq;==H)OxVCE$|L%h3#9nf$K|*r61T4b$vlfr z9SSo+QCbq5RDf?)v?Y$52S(_Fkv;9`@KtmFASY6Xw=gLo$y#{Kk&z?}w8fE>H~u_* zZ3*G?RscU8k~~@p$K&Qz+S!^3z@i|6Zmnv)M-nBdaNC;wk?mdsGq>69^Kc#X-A zJsW?bBr|VypjwS_W$veHV}=`quUpQOm&QmAJSd&uAu;}l7Xk_v9(+Bg9#d-Sj>&1&2Cp)Ncfnkp*YL_egCzQg_6XIrq^Lv6z4EcA$MHybh4~wFAU@mXRL?HMC*;! zNwkoc*q$S@^gatfsgrRWRCRb35R?Xru2`f-Mix6XkR%&N1W!h&hF)<*otR=27$H-? zY#vKc?I_4{3y$wFkckg|_+DGlJ<+wGOZXfqq|JG`z48k7hKN+~*S z{wZ^E5dUg<+j^6gPIUAnc#4VHq+6Iu1uG^E3Dsu^6RStJ3L>`j|SKQH65tRYq!+U(5ybnv%-_;z0vcocYlzD34(6Eoc zn(s1)>M#cQR8g7GP=@ytee50wn4U{Mn2{?sel3K;>BHr`JgVv-#B$IZ>0j(BlA(cp z{iUx;Hf>XyID6o;J>2|@TJUpRYeNg;;QztgQdLom?)MX zms)$XB^KEjgDmpN1srt%9xr$rKLg?e3Y@k$;6D_i&%b+-$y zQM3om7aUr9b{oAwAJf3O7=h<}OR^_Y6<>xYCWt2p-pW8>^ z#K%JHuPg)Pwq~?E@?Pn+dR#tHV`Mq5G1W7!SEeMZ)>$zvhUksB>N^x+{NB6AYy{2= z$geS7FPcnoj9s^)w2{kILHM73@I zR5JKM#feU9?4K8;8=mMZrr}OHOy4T1Vk_*dxvC9$*O-cyZ{)PcoOD42yQ}%MtWrCt zgArhwl|CYTOP{T>gpQAT9|Xs$j*wl5q;Y!{w?k6YAm=(sr9H+B^y3iA>zziSWFtHo ze7nX@7-w?r{2p_b*mgwrIUad~5fj^Q=Sj1-x3_>xSFXRxIhL6(UqYIMg*I0{FpNLf zhmTX3`L&kd70O(#>tpu1iNO={2``V>(wYy7ge;f_-QSMva7zB|Pk-vx(5-$?YfP2u z^7UI0XPZkajXp>x5EQK)7HA%29~;FYYYntEMQu#9(k6x<`HxXj#Tf&)?VG=egM&2G zZ?%8Q<6a3huWdYsEk#jPQgi1GuZzHS3KIe4o;?oA7b8=14)jaWlH|if)QyI|tKulF zaO3>i;~pHu!ijtZQWn?Ev7ST2UFkEgUX@FWXa~W~9EgTlG^mfNO-udi?RiW^TM?*o zhb|r9B?cE;kp}fBBX{_6pNXwcTJx#qbv1C_K3&_mu%7*Wr@B6ER`F&37>N{W&8?-0 zX)8=sl!Y?WQzWz;`}j-51}xtb)e~fRa0ouXaW97s>*QrIz6noCe%~|3768nHo5QA( z$bvb$H9fr4N$(4zoqc{;%J#g4@J)K^pVcC3#ZNp9BMdj4m`Y2J=uaYShP~cf9r2RbfKo?%$s_=`=OfJ57(~y zk)*hU;LuX?6*5t67?9hsE#+YqBstQRy3lm#(e2kaJt)6k46iONcegkyMh1CA)}HfT z_ZV9#w%eAK_Qu`m$7&a?!82}i_~or$Y;q?WYeG;-*E7*5hfO85mY8!ubMJVs5%>KQ z@VPLc#7zm{1A`sX+V??-6H%qpvalj+>77PWpYu3C9})qYr1Nc-a`t4tsXtiA|GwVd z`2~5vPcyWy-)#794#j{0%$VSqm(typ_hS(ADtp&{5v(8l#e=Zn^OtsBX1ON~(N(yb z7QT5p$D9QpFYHb04#R?r59FLzh@?7{c6Tyu@7pq|>N)hh@|V&e&iw6p0?Eztgoc7E zTMsXP4^@!LW!{&t+&G3UIyI?o`bJfIF90hwJa@-O zQWfp>Jhc5WI^5P(jV@3+x2H1bTpxeI_?fyRXY5DN<&?S)4!u=W4iu!e1S^2krhCkup4?!*e*y#U6CheCS zDA^yg_2W{-W@V1lVky_bDtYeNCayE7ponX$6=Ak81vk~m`8a$(D#)u+yX~^(Mkc!< z-Jl9`z@ziBC0a<-WfnknE%}b5!?(d%+(#bb`(T}g)N86Q%&UpXR5kv@CU2Bj2|O;5 zf?V^ttGjqi);QsFkIiCiNXd`2RurcTZF8ZG`@bC-gl;~=YRHJA9UQB8;znd==vkf7 zG-OMfVe`ULi^Fe6LfV(Ge6VwPS#dyC??|l~umJ z9~lrRZ{E38$~W(n-v+QMNvMV3F7xqUvpHWRt~7F{dFmq>Mzb(n0k!kwK5`mtUS&Phe@x=z+`#Z0uE0F z6Z50VwuwD+%@4{j^eU#~B#uK7M%c(DY* zXP5Y+)m7}Dx{;SX>QSo1Ea?P=5_fGyPcmA#zI!enkwWQWnDL55`nr@=Gd1s`o=jZ; z?y8C>O6^a(+wMh*L0sDLgS5wdh#o9xai|6^V5hoEZataQV>mBp!q3~0$j$HwvgM3L z$DXCMw1>M|Hg^-5L~%P!1nUfmeQg{{ooR%9$~`G|%=joa9~$&D)ei1-L+&cKI%xA* z^2IqkX&v9x0y3*a>*jnKEf1_@OvA3StS-csB+~KlL}I#YQG>l62~iThkV*|IJQrxd z8!43toJm~G^|?Uz=6R;!(=#rycfsnRz=(iUDeZ=W!ut+$>`joa#!r0>5x0Z3mhy&T z_z!=eX}edjN6=r3$}hp;j;_QZ;MxkY0d7oMf(st~<-=EtGj)Px4tJZBhJ}1XSYf2% zf}bK_G{>2WRoFxUWQFIw{HbZiRz&M1bRIn~t)_X>$Ju)6V6XkX>WWK?J3IHZ(UzN> zVepd4rT*&ZQ`usjPEH}nhMz{1DiTr^44Ic=q*}rWZ!y>&Te3sN;oJHHL#idAn(G61 z*c~-gd1fycupX?kIvC^9v<3#U>z1(4?pB_3zMr!g-M*SDSQ>Z>ybWBi z?UBUDU2^`pC41WTC1(gaGyt(gottE^REw`#*fqG4WM=oqm_kV^qUHj#VB4&AX;QwT zn?ODF=t8^S^L0D@VNz_7({nyt9w##dTn542M7|mY3^UzMsawk%u#LYkd(-jCKedFe5Yx=e1i`_72a5b79%gzNlVh%X zu{ z7Q#Pv{KwVzAG3V_0|L3^-~PI*qq8o3i)l{%K(qMiW8aFoiR@BZKjoNaaoidBSWRkT z_N5%b?Jh7VJSYn<)soin&9r}lEMbI&dB&fwAs@&2gw2g^Z4{*PCz9+53V z6eJ!-{a8z5?-+|AUi{Q`sROO;++!l)WNP`OE4a$5qJ*Vq2^Mj0o%(s5)>Kz&d7YGL zK$KrXBUJ*1oLzxV$D5~y%UAXmW=*nULsO7wtbIg)nFkNBQZtjKIX~bP{K0#@(B{n* z(sZhH$7Yaa+~3N=Z^xohA`+wWcR!6*EGY z25ILoaJLtK`Sm0IVG#T%>AQt@caG5*vG3(y)Y}^$@Ge4C#4-NOK zF9$W6KK9O-M*zP|BKC}NEW%+6{DG~!(&J0iWM=+73}Y;Zyd}^tw})zFDaK8-z4rRN zst4YQT;_B*ujF*q+uQlS8gQ^U-w$oyi^^AUQj;Oah#`dvX;U!R8VCeZzAh`g;L0RV z2@&(|m*@i_qpHY*!P6LM3H_N;NU|1mZhl<3vhKgN7Ftqis!hi7@3GwL>-UajO$^#3 zFpis4lq!_wROE5u#*(&xwOWdqo{n&dx8Ce|Q{T7GI|;Rh#wKB?;Oh307eD7BQ)6w@ z@#uQ@h?-=|CcfqVLQ*3{S7qn_KZO6)vk=Gy7+cr8)(pqrh^!r@&Knl%!N=zX#^+;f zT5=ma?^9RL-b!XX#F0KeYQ;g&3C+i!mknBTQF8EtD#OpI@fD-P>`yw|jt8+P0i+`%zod4i8gds*-9)Q5mOnv0a?B#?z)z_*VSMtuRMO*q2|Fw zDL9BI{#EKhwv(2F8r`uCJk7-#`2A3tNm9$vJe55NF8!F=oSF&adevd^sBr6g{hU*H z%MSJ^DyaCqQEaAEbESYS6zmrM?lUuck@-Oc$O7vA_hDsy`z`6*T+6Xt7G>LZcdm5Q zTfK2oVKX-}%M9EgLv}lAqC-rDY3u-xDN|jm5dpR7`CY`N@cx??3JwT$mkTP@`4+Dw z;w9t>#4hh;9oAf~ZXFy5{?;)Bb$ z((P1Pl4dsZ3kJ1P_@&RHzS}9L)nZX|O))7yQXkp$U92Bmyvq4A`UKds(UCIZA*+`Q zAD%CJkZxX0aeQ^v&Pg}pf##tm2>N5^lI%+W@aJ6hYbFCJU>&Ok2^J59CS)ztrv+<`-nENxT;A7QD7!I_@%g&awohNCmD9SY^K#nH+?y;-L^q*S?S3hpRT@y~KKT@h{3D(&` zd5I{SYBasb5iZ8^m2`lq(plej4(|1)DNaz#7OGNf3BHAOsK7|Br<8DM-#wSTNGjN~ zTn4A8zj^8g$&rSm~|OS89W4SPuVW{xMKQ zu2r_dajojd;RQIdD;uDayfEHj0QpM5PL+J^vCjRpvncb}Bb4|{CC#j5v7;WIG#!T6 z`|xIZadP=aDbg53J}+Bw=^?6S2gg$a>)3`)!p;pq6^v#IC7Lq36quo|w}uA-8_kSO z{+WXO>);O?0)L#6C`pp{a=+uaWJq_G#xZx5dbnFe0(J#}tV;Ws@!4XP0oo#}q*QF) zJ90!eCZY8K*HS62mwv*^7aFBpWac4NOhc5Mo5v8-=Z&5|JFXJl>Dn{UT$ymE%pxA{ z9UXpnXc#A*(G};h>t7Q1X_7H5azmq8NNPJc#*Hl*9(OW%;`y|yNRvTQ8x=~KFw$E+ zWVwwuIx5x;WD+IS^zR|9-XWuy@o=dFrq)u>%*6MDjNgvb4Z3Y73Vu5x_4M`;qsh^* z@3U`X&VAqNC)WMS(yN){T;{)HU@%0eNw~Ihhy!3^OUp!>N0e z1^D-aJa3Vx8}qCUkZE}TQL>-V&*=16RHE-5|Eff9~(Mv_A~s6Z)!#_h|wQ4BOST*jyF(Do5M5 z&HWvDOG+}}!`m_4gdfmvs(C~;xGwgPi`s_6Q5_~Fjrc!R3)!s740``Ap~Hau2qvH#5~~63Zn$Z zhbCno9hSetGh}TKqzP@ncdusmy^ecXPXDgzV_DVx&Q1(?YFdgZnT{Z`OLI%q5RIO+ z4?>1uom6#+xI^7HLSek6r?l@f(j_>p*_O8l0f9ZT1Gy)qa|~ zpPM_wE}Y>&tI8g*`zD6tD3};`xlS3s+&oscvRK7x%;eVx%d}i=U*bCTURmhE+2rgj zNOjofWwF9NvRgziw&qtNUIfgQJ}ALE%8(ST z=K7?jKBk8fOX4kpjHAE-$bpz1ZKg}{hC9dK z4dplVVcoI<5oCYP<1UFlc5Cq%VHv*Lg93@P$le-x?mt|}p(DnSM7hB8vPlOgS=)P=l=ueICwpE1MZzVEqKD^1w*$&*`qK7?_B7uFXfktDDu$=)erSSo5H3NVohPgo2$h-rRPgMg-YZgMGPRC*)h)#)BJ^})HdnS z4?m1313kOgjwY`&Z_}wi@7=?lJXtVPVwDR--%8XsjU&c;#Hu7E{3#fn4P%1nxdJVNcsG-DKq7h0U_yAl#OIJ9dErEfo7tJKUYR-vtZJ||K97>EkM^Mszp;wB0`<6I_5A6Zx}=X%CI9kU0)?VKRQllH4o$HIvA6_BaH_MS*`Ytblx>KFu7EH2vNg!3T#|~~VWMhYVp62SmP3Rwc1jQ&^V)k3evgJt( zm~(kRPiT9leOahH&4A&f$D3^a$vOVR!A~{);5+XKWKJ*B`$EA`K2)5LG@^<^*a7r= z1>Rbc<7_S01O?PfmsG<6$J**pyeetLEx5|TQq3dyy9)mzAv-ZC^P5X^?Y?1E9A`?fA3a32Dr}8JX?{i2&?s*gT5@*%hYD3o2BMtE0`DA&Q zC@*kBg(C45DrOEf>Y8Vg{hP8T?FM9GzHbdRIH3nq5)!uI;p_Kj^c7=x(^u4FwwaXp zhhpbEdGUVq>1W6NVmdY|*7TmhJ5mSjATdRSXTM{b6sNE*j}h5w+ar2#-n-?Q9ZgW| zAYZ3vd85ySn`RLa_{PF&keLbr^IKX-TpqhE_#5X(R&~lAQX~GD^Ky`U2SksL-Q}Qu zIx^_g6nn3T(eD{b*6XA+=6>qmGq{C{_tp!hSF2rv6n?cykSqAA3-Vy?sP>=-x{GpM zHT0)AwMs?V-=x1)Pre54aKu^5Gcr;tFV06^j6UwOcY6nlm${F=zR~*aNb)bN#dJ~dRo>bz zVBI;ZVyUbi$`Zo%W|G?izhdFAy;Gr+$b5ovnAJv0wSDBMM*ljoF!kU<#b>jF3G5&;gy(Wzr+cwgXJI>#p`stnDA73r&BB}gB$EC zHX@MlPmpi2r#8X39?)S*4Gjx}k3F4rGQVpnmj4asWT?rl#{f$rQgzG?bPeeyLrg?_Ybj?dt9>~iINz0N3OV?zXUfAwYYQp;l zr;m7i`u)wh%r=1htpd?Vc3cBoBKYp#xOu;u2LD%P!I3|4DKz9tz{-llD3UHzE{JrN zk6#5{5sD3^86Cc!Y@WUszo+cV_oF#|Tz3Xhvl7n>%`@g-1b~GqfmKP^U;q0#msO+T zhhi%Nl|~Z*Ri(IWZN;Fkis+RgS}X(-R~7N&?|#bfhJQG7|GCY7Y|qL17j`E01d`Ze z3(j^HrbltWb2p~i&Ph?ia<}B=31@O?W=|$C$OO#{ugr<05%H9TmBq&kNeyCarMNZo z?GUtOT|PUTz;?o94Mno=(A}ow@dasHLah zRAODgt{J;KuiQ0#=qU*oTwsln&yK5>)`X)o(z2_q<(A7Tsv^`l?nCtw#i}o z0n?#@)Ft-)x4;{|Qu~E(HuyLC3sTkVJ!og`T{NGDzCIt6b0cTe`_@$4PbDk)+QN_> z*oP#Iwi{a9q1&7h=Oo>!si96Iz7wL}iZrHw?w##%yO=@}*P|i%T(~Z{*ZMTm8j+8@ z63ey?J>@`BO!uqK8e)A*CDdLkNQ=jAtJ^EYKWEp#gg{DA%iIdlC$BXkZ*=t;>P;3e zh*ho_oJC3Np|K3Gp+C<{@3zkx%dCKBIJ5M*q`#X+{BfE8w;B2$XXigB?LP;9NbdgU z7XLAO{szj;2i`X=AHk$>&vrTAqdE)>vXy2465MUwZzIBrLbzwRwGl+4|Jwtj6=0ML`?f>kbbji zvzd1>1-;-oVp{;I*0`T}FTe-v)wX%g_1a^=Zn_5C_`OUe)ASn;*Uc5P%H$9Fo-;!V zjR-?bXw8Ecje~cGXEYEav(mK#d^dFBva^%5?DAu))`(29Y0MyvY~ENCZ#E*<0KH!o z99KGrRu7jPq#{3tEruU1bawud)=;cO4J@fnJGWKa&fXWLT#1hV8NOhWJ{u()9U&3t zi0qt`HN+yL;r^wJ)@eKO+N7b%rRZWLd{p6R+e`c@Js9P;BUyR28Wysw>6>9`OEH7u z3eM{2*vi$fYeH!0 z){=vI7YG9+xCCWd4Vihvkad-DZVnUd5#Mq)ey?`{0OuKQzJ-QQ;6A694oK1cr;|L~7V{4dWYpfTv}4ZzEQ z%8S&24^WrYEh1hVX4NZ#=#a*9GkehN6gAcc{NRztv6iy7`bC+yT93;}Ib82j23!H! zkZAGtq~vt4BFKSAgX@{0=yQpfr?8ODk;$?I>Q$5ZngHK)e?38pR;}Jvsjx5U(LzvHMi#d`e$Z#P*;m={D$m4N@lxGQY z(4e}zm=Eu|n!}E!hF6V(oaEDOQ9Ok((KJ3%m>904)|x(lQA+zT;HgY|pH|exT$00JKH%36Z6zpG0hN6GPWL z+PT>HPIfhZtghknY>R&UXz1TB@BiF`=a2Es|9*A<<>LP1n*Z+gUmoH^@z+OVakRnd z-r7|)8r4$m`IiFOX*J1|nsD+oTeK>LNZ|#t3U{tP_O2-5TY@v4m5NsFrm;JD2oRD8txK7kkUdacjb~3u;U9+W~TTY+~!PCh%r%$cpDyQ2TSGK3$=90Ps8S~7v zR}13Dd>ZuR*fU!v)Jk{OUJ#h3vOlfw8k)~4u=WiwSM{V1Qr<%>EwPQaA$G2_)Eodl z$9#8ea6QyK9fkm?2pfyp1a6(TW1?6 zYW;hF$AMIRRZR&tv9BY{1CE?EJ?RlzORQoW2tDD3Q(4M=<@i#Q=TQ>_u`OyL1*vjG zD`y<_#laUx3R*uS-J5nnL59m0GuYAOsli7&U4c4aMz>Q>d=z1*hbfwz-I8a3HgGq z-Vs{u99g4tMc3`tH2o(4$lCG?c-7GI`G`u<6SoYG0 zoFDYqb)HSN2%9!BO>=<1-g9)icD+kljIya1w66u?8$rO(-)VB|70NDWmm17IyKnlj z7$+CD?&N>tbGk7CbS6TICc9_pm?jtmXUB0UUU|T}lP$seX{*g&Few5^7->BcrPJ6g z%Ch*JRz-<cOpzz8;HOL9K{>zSbi}(o4P%qs!^>pnZ$<#}Ygyx=TSQogH8c<3|(aW9(7~aelaO zJuU~YdF0G#x@CtCnK9`{t;(x$nM^_XZj65K3RiEk1S%Qa zhL@j}C?;gnVL_;-Uv#CX{XZ^aj6r8=YpT1@)>l0<>pV9Z=HvKkmFNY~`R%Gm$|o!vyY#TuYg?tc-^L#V&Si2r$FFhSn3~!ab;}Xr%1@eDieff)Yur z*;M(aNBy$XF{9#&GS9i>4(tN=qc9p_Y*PnbT<6GM>L*?FvNLw779Un44;ok($}U_M zr3Dim-c98;+POu^GQeA#*`9Ox_ZZU8IV=6wjhHRx;oV<{?NMr%7bJ(?La%ae6?l1% zxpHC)wqU$$>46t7odW0urmmZsEsuEwa?WD9G&rOvD@VLV;wjM6U1*nO7Hpx92o?*i zNx>(cE59y_@UTv_3~8NH3x-bSN@tA`Re4I*N(tHOmpf!qW)!4aX`(_js8)Z)4Rw~6 zK4$i2px85-E^1nm9sE6-LJ3q9;}UGRPKt1lKQ1j;2VGLKE-cW9(kmjrETBkE9`8rW z24i)HjEhQA<}Ll%rbZa5@@m4OwA4HQU7B= zY$%VngiAC=15J=Cjj#`!R?A^^enY;|Y`EDIYh_bYt6fPp0Hmg>kl8#oW8C95Jbf+h zMLh%jS@y_%@Gq0}E;vsnuBn2HDNDEj9VgppAgT$A#x6rbYFH!I?JlnPcNx|%&r)5B#(i@0(*eMC%xxKebSeY0vu!bik>u!}sANE9UV0NS%x zQ+fj8EHFUu)s@=UC*csWUOpE5WaszMqq8Uxn?G>+!}|E$ftnlZwYuNJcl)~_Kjz!i znBQB)3-u!K*!{?GL5oQK|A&OM|CaduAFF$z*$BgnTj(LBswz(54=-Ib#SGPRts))L z8#{|m(5|Q{VR@1IokcAm5X|U(a=aqt`|{}J#KqINg}8|mr@S{s`eT%4@^H4ew0KQX zSR0=tdM?~7MvgQP5rYu?_}61SE!An@oS?WN##wt0hDyJ{t4gyyJ+gg((9x8Xs;Kw3 z_peP}r{ksr>C;YyZ-#vX;`WMw(G4SeXI}i0hPi}>G@$Czuj8|$1VRxf8PXRK z>p8^@%MKE{F%vi8P`hQNh^+18!FjRlGi*T(Q^QE3e5I;v-M|)Fq8mkoT(@}bfhY-X zS`L~^Vn0RQcsqCHlyX-BFOzhvB;6qBR(?c=+2)kn=q#zAnbv8yV0d-%JdMk}ZXT9@ z7sgPP)c~_zV^=P7S9sYY($7{-qq0eGQBhqpv$?=$K~L@s+p67r7q~U=uc~D{!LN8_ zRrfw5JZ(~;GWu(|FeT}r3N1PGpq7uQMN%*OM}ng0!Qri!D_5VlscFtFM#}@#JRC|z zSJK!j;v`n!1e>y9F~6BSntHraVZjMoS!eKWcyy-hq)nY7n-sY|l`im_rgt_PSDxJH zZBY)4c<(@I?v=`_yrfwL`+)xck;mh&Aphx9dNF>`JZfcLoHi z8D(jsWAgADF-!%~f=oqJZsNG?`IadI(MO(+Mb}chP*Ektrz;ZiU6C(Hw={Lg>AL2` zEMP-Kj3Xz4MBNuP2;{%nNbgIUIyb$UC$g{DYFw+|i}ng-39g>hlt2DT3SE!9CaS%?vieqkuJLs6GREdF6Ou zpNofft{ka?OkO^|KAU(QOmWWAX9tJSF9z$YV1b-E>qT^Lqb zy?P7+8|vXGBx+(OQrPm!T`WM!YQR93PIR6!QJ2E*rHU(z}~R!&dY?e0k32Yuw*TK|%aBehE+}pV5|GIP^C78^8a8{P=&U zT>tcZI(obG6}T^esrwvB@aL&ulnhfOjtBUdNTurNalpP#;q3 zJU>Gy64KiWtJ?9DXcdv|Q2E&6b82@QdF%dS42Amk`O87|PllkzJr1N2T+)_MU>h_s zH5c*g5y?}zdTncfeOdY@L_KT@xJ>_`Ade`Wg~83>c3JuIo6x_aj{XxXvR2}2-&_UeS|kr#qAs8(B9Rp!+y8Se!kqZDMy9X(7rMox6PbE{ZthV=IU!* zUR<;Hlxq(;cWgkS`n?+4YBc2J*lHDiXXVF*p=>%Xc~&2s&sw{`_(qRz3DM(2s#2RDtmx>aSqz{M zZ`W)Vjq+9Mys)DzMN*dF7~tyixj}FCW4{I92cr4lI*Kc1qp&zyYe&Vpp<^6c!?xBUYoq|Nt*B~ZieY^nL)plr z{PTR*F3qRe9ykpyN%Nb>4zHC!7@L~pA**r+l?Yj*(AF6RXqKAL3x^mEW-a1Xsu3;~ zjN2h@U05&9Fkc6r6SMDk!FG2dO9DVO2Ud+k8XKZQHtBG;yA($){C?&fU@-tssTad8 zx(rEbgp8^di`axW&PLCV5juKt3UFH^NPa~}GgR-{0=B|4moN2VpzEu+9$KV=N~~2W z^)d^ICt9dPweaD63Pm$CnWR}@)$&02wP$qG;yclO%J>vJ0h9oUO9bZp zXW@PLU%W>8r^N2sSJ!0BHrqrw3efTtC95`4C1)g~CUqVN?hdSk3 zgHQr8fc%sGr7)aS4Ln$d)K|q9P$Pppt^^=Z^^c@cSK3~l3R8Ms8fWFSx+wvx3{cOq zXcqu}cC3jmX?|mpugu#AH>>{|<~(j!j>zj5z?HR~Akb)ho>7q^;&t0o2(y!za$CZXZ=s14T|LxM~&wh+$Yrj{!eELF? zUv{aqQK)?BD;2Yk>@~AB!|nmHbL0HX3vi)Z3ArOo+H>Qn1KO%`mp(fFsXP7p?uG>u zwb=X`eomM>t#~1Wai|!rx8)n*PgsZDL4rU>2k%nDoxBsq`a1zBZJ5u{V>x~~QNG-O zFo%#OU0!aJAg#sY3kKd%pYgoPLO-@bk4f>zr^SpCxZ`Y`XmsG0RKJQEN7^;z9<6!@ z>+EYh=-pSWbaI~(F>)eCJ}oU%%*RLkUCaGt)R+*o7{L6B0I!n}Uc2>8)GTpJ=Q`f>okt)N@(eOHDi)fRSXjTKkw{G3AQ(oZL}@G~h!PHrr$7tE z;{n^%XLmz}NG73AYfaj$)8NPjMJHtXYFTcU-XkGC!LQ`btDIN9{1nu+wks!(!_gHv z<%9lX+uqO!(GGNsrYhkNudG z$&{OWHVubOvE?Ic(mOulg#GUS>YM9`_V&J5ol5P{WG)d?BnYd>$|5SpJgZ4wb`S~< zR1sK!P0-k0?Fe~zI!ln@^wH5*BfJ)3-&%uEyOfsHs+Wxu_Gut`aJb>GQt6;_$p98%tt3EX~$J#ALXF`vtKEE5(tme#GlS4zr(TOf+r!d~UwY zHozGt!tMa57E04cBl|6YwKWp*hLn{sX#9CYz1T^}<~Jsdeuq!FylRK9UP9%(OEslP zMwCn1)Ue;(sX->PSi`kR?PI=*NlU`s{v)r!7-cBJR7hh|%#uoNls$^wW5*^Xk+U54 zdzaU0V4DUz`w*#xa+v*lu+9}&Q<0P84AmP*2@itJO>MR?r#>n|+os?YQ-7 z5ZdP%=`4%TS<_ZXU;b%3@EFL@Ac%s{1)?L3kttQgs{F*Xp#_a?2`sc~QC0)K-~<0K zns?G^WZm zMPR4R;2?7fuVO!t6%==Pb5SU*Ai`L2xv~8)Oz1i$od^#3btEi&XkQj+wAp8nx2BM% z77_3mZmWgfk0h?SrgA2+Wp^{D5YiVgsyJV@5pT|)d zp4;2@WBo-^&*q)1b^3cpJi|li%B@@{If{Ry!%9^Wu_`IQP!h9fyCMl%)vI$^K+iA9 zmB(H4Rk0%a<(4`r0(t`%CzfB8DCEXv!u2;yp*-TwF}8BivBZyaj<_W+nGZjWaP?rx zUV?m+N5EdY(cq7NpV6{XC#Q23p$LnB28aX9d3psf@!&<4Ns-;ctwAD5c3;BF@SV?W z=BCdM;s42d`foe=!~6W-Z<_z1toY~qM~+0z(#sbww_WKzv7d0aw{x}LAi)d&pnY^_ zE~m+Jm7c7UDOS6B;05|=^$wZo65z0yu<(ISN`Na~gMUz8&ht|a6LmWi$uW!n7Qo0- z?xN4x*5^YuE3b83P1P;mip%{|9a71`iL%;3pjbxvL~nh$w!V z+0SV!>?ZYH=k-;CNY%}n=zNMn*o;qJEyKoD{jPQF zBp)Nl5_8>(S6pq#-{1>0XYsLQ#$@xO3~-0T19Z@<1-;^Uql1F-{u;eqt^AnoikvUA znqL}Mu`80}i|kJlor2t$fgk{|7>emS+B~_%S6!OcEag#L8s_hLL#@%)uJ6&xYu#bq*%3HAMsA)l-eqtfl;##HoRso6<<&*$1o8 z>cZmb{!YAViI<2`o(6ET>)8y`_3*Yf;i8xsI5%$h{@WVjXgY^Dcu3}6kv4XPQ3^E+zv_M(O>#L#^1$O)?kc{$-74-FjbYehNr4l?|LBl zW3zP+Dh?|SIYOOwqSRt4y2vo;4l2K*yH+~iPka%_0F=K7BzO{H!{qyKwPXb?C{x|w z9bLueM$4vi+w){({6ktzQCrl!b6z^BR-O-OI&Bb7Mmb3TCD_b}mV z3CLlOxh&V{d_@DrAp<)|9wkiInto4uC5I`A<8U=Vljm@8m^37LqS%x!@Cm%tj2^t` z(H-#WB*2R)s0kA`f}MB3O6q}V<^V@Uab*(RfFN#;NOcLhg(OLxuJe7s zc?I}VG{z%4u5Y}>45~GEkowz^-oes?m1pw{oO*l|X-B!X!w9z3^}ssW4IqX{xv9I} z-v~DVlJiI%T}zXPXS);oVjHop96ln@*fa?LbBNanG7W||w1J0EC3Azt>Brlr-Aeq z<}cMTQ#O#;-XYd>HY!xxHGh>#)bE)5IDO^my>?dfsIZ>NB;BQ#yyMJmlWTce+rm}+ zeJd~RQv=5D3{~BolvUqKbo*GZ ze!lBG1!EU?&1EPgM#1MOc&Jejhp*r!@Tn)aE_pS2J!99hxp7CuBu*Llo~k5v&@W$(2*XwfFj)T07a@JjJ<_X|OF(z=arT4sg-n?4b+OBn?v`ib z&a&K`y544p@eyGfl&G2N`7Lb_w(meR8@j?3Tdu64)&20`L;c30^cYQcsXGi+hyQQv zy?HpB>)JQmz1QkmtEpn(R zsggg!EKYDjkL!CoS>(Niw~T{?E;Ivl-bny?iZHO7#s_jxJ4dTwEL}_D*`=Thps@=t z2KcrMtg+;(`xTT^J!Y&S*p74bskVBAc9kFhD4uAxXVgtbfhuoxEk>+6`5Qg2pdu>v zi5?kNx5&V9_rXEQZ(=vUIP7?=jL+h0b5~o#7IXJ3)Z!Nt>K0Xonf^N73suw?CyCA> z(_N)O(huNexJj8`+qPXN&c;x8!f^&7yTc9OVJY3uVzVBVFX||BrNxl&cuGt=09KuT|bQ6)3q#d z3tGRUy`2MAo$Ga-iyjw@X*3Lnzfs3DscjR+p#IkhOIgR_+68rX+%8x1YM0u`fe*m* zg`L%q&jwHBq&ik3YRAdMPTnYVMU!Layom7i-KP;QyM6K7d+QUkfuJ~cM(Loq08@0| z=cdt02*Ye*bV8?Zx{&2Z^HL4C%Ild?_UCA^ax8+?_bYGDVK~s4fFb})l#a) zSbII{-KRWlehCcMl~}A5Mvlz@hVRnN$2@>QBmH-y*}13&z)O1 zVE~_D70_pOA8AZT$ON?twj&le_M)WZ@Ybd@F+y+Pfetrnx!}fNb7(R+w8!(|>8J0V z2v{w&#Yk#KcqNfcZ*bWR6qtU)y7HqowJjOs_?-ZN0P2_3`K7{GLlc+0@pT>fUIYLu z^fHCc6QvSbeA-ktULG0+CM58U&lp=<9}f#aTD!*@-2$>7FxVnWWyD)O%}s51NiJVp zX=ynHJ9X*S^sOXubmF$oLxVcnVj`MF+six8EVx&ff=uYob+}vCKiG@WFxAXU56Sa!nQ-sExOBTNCA8{!LPLx(;PTVNrMjOrd>OTww zdk?ZqL2bIuTDF%Evn?`e^IxWdDYC(8UFab^p>yLRLr1vX8KbX*q+M#%d)$-k%&MiT zM~;U_2QkO`#abQ&y{+K69tvK(cfuzu#om;7P-+@nS9sCqryUvEI3+67MDLn+-re1Z zu8~h=fm!dnpCy4T6?bIarYF3(Fo-rkO1>lDkAHL_>EPP0M-;ZbRdV6#yTiDTS7&n0 z-f+Os+at)KFKl#W1AK?KI4R5n7Lzn&ACn%V~1M%WW8cK zswkg@&pJO>604PAHryyyOP4ATpR>Hf!mjlqB3Uj7SZ(X4Q!s!6m! zsfmvF<;p4o^>!X}hA`t{Z&Qg_8tm(lF?8dbNV`!hfAFx_s~J5=rbFO)*MZddBn*Gl zq{8Y@Sv{gPSau4QmZxUcpi6vTA}rB6AM8@D_;!IrM90|TvH5s7`F@V~6FEd>AWKHA zyQyk;9@KZHK)Po=w-~0)3p*Y`sf1goi>VB}rUtIz)!hQ4o!G7z803WNkG@Y*Ee*q5 zOSo2`yG=bJB4X&FH(#6 z<53X(>`i*Rg5el>PDZ2bLmq$+j)v%rjc9$g^#%RaB_ToC3&w0ZU)PDCmXz94c0fYT z$`R!b#VX3%iA$!{30|&e-obLs;ui02bLTzFqly zPr3I7cqesr*N5y2T_!0mn3Qr4->g7GBX)nDKFu{^wdt^;Y%y>2@r^X!*6Xp3#ydB5 zURT+T$uYVPXA5<2=4FRfQ__W8_%*AId~FlHN@Qb!V`UGV8Y_on5+%@lyXSB=w9!G{ zeemX&p>sRxr&P>kik#3h&^0?@F~64|s`|Ynf5`F*3%L66((Qx;+Yy%zt}Lc3%hzEu z;~IchKfs7(AR7e}hS())(aFArs&NCi?Wp=ORh2p$V`LIN@>X5ADPM+?ex9zeuDc3# z*GL|YO^+I)qOfZL-F_biSu@w|o|=ERv@CjNkZGLhE^&cGR{&S$~{x7D!m=)|S z>bL>qrC?9x-_W!uHq?6DveF^B(_t1ARW1S$n5Hns1lts(+~go!3U)F^h&^a6TgduG%r~&xRS!~TB$^Szt0ULp1 z$t4NQH3WAM$}mx*`oT{rA^isyoq;1^M@dY5!AfgiFH{7kc!^zqX@%cY`myo=c0dfb zMfroZqI#^KFQy{>+Ef`RMbo3OwotULg1zp7xY8F^vNS99q@CftM(%pD6idFHcEv>% z9(B{7g!mACc~No3{krbC#-Uo8sp+l=yFx0ge{37X^6NcM+R|^ccDLXMGi+h9i;A}A zollUr6rfYGqpiE5kW+`ni$hr7O>DtMj^yazGiSu2E&6B5vjvf0tFn**E!bN4S;Meh zaY9r}Y~{>Z{V;Hv%iS6$+Q#!?8_w$$2-~jfZA`F6EH%UhoxGt~>b@%|s%4^3gDTdL z zq->t@7$50}@7@}jc5O^3#kSLr%RAnyp8mq=&7bjO*_k^VJ6at=pf_Y8@6wY6{#X*J zsfwc!3pmWvt6RuY034>l-JKG~s3MG;yI5E}6zv&pVE22fS^0aiCrTDpHDSJd-*dDO z>r{~o13UZPqtx`Ko9#Z;SPk%{9LNO(t4$a~dpx*DGgV&k)?*NJ*1=W$(jsbM88PlM z`ImIk?9J9KU@HlFXB0664(Z>|w(;K}w2+v3`)@eoJIJh`U!b5;19*$he8pCm+^ zU$9kgzt$?auuoy<)$vjt_qOaJk#9WFYMXm&K5|>-qP)FlSsIyBI|QSkt~JXB&`=-h z5km`B)i)=yra#?4+rndeGF>*}{K5~H zQu+Ccm7-DyZ9%$y_j@$k5S50%cKWKE)e}u&fHnt;@4uxRmbvS8e`Jjf5e3leub$1F zW|z4ogI?2>Bn0TG@kqHhXlTZHR;`r8ms*3cXlc#?)$T}7SU(q}|5;tSXoQcGp~Sq# zSj^-WOWTz4fu-@)WQ7AaLiqntg9l;#Bb$Ih`JxdXPN^v7H3KkMCT)}ScY~$h494m7 z-ai1fE`_*k%M?~PQ27&>F=eltAD`BzS5#(Zw}dyOSdUI$zaOxDg8j}rTXS$f3xH^N z&>SVecZ`)RPEaJq+KXKB^$lr=L$=-b|JN8mQTRMNDBMuu$u z-0Z^BB$|+&!u9O6t)vZLT}e#?C3^~;Yy0XKYP=#{$FUqA#;=Rm(b4Qr=$~@e^XH<4 zP6XyJdjExT4k)5@%k>%>OYNIz%gzElxj#>>=IVCa4uzV540b46c<{&?>P-UHk^ z|75ssU0G%G@@VNE2bz{_0MB5A9Zq?7uE)e3Z(lj@1`j7N90fLd1SYd&UnX9DpIAHo z)>w$JOC{_!dlbkI><@4g^5{92opS^9>a=&xk8E42G^dn?2>*$61 z?!FxGxB_HIRArr$$9L?;!>2`t3Y_@ za6rP;(MFDMF}ALb7XBbLKVM#XI(=!`--!co_?Pui{JPG4J1&xo^7J zYP_>9f!OeZhI!Z3J!+=8C)tZFuM3JthMzd(0V0<=Fc(DUAy&l@H4DDJzhn>yk*<|8Nhxr{l{@aT5LjHz zkk+t+R+EE(5uT#jOFeHW`uzgf&X8Fs=V?oUj7gS$0P3gr!FTR}3d9GOy|+_rXR1OY z82}yXN8jq=q?w=;QkhdCS74^L0{diT81oaY#Fz2(^l~eVbe6(dRLQ3g8{0+DG6b5u z5U1?lxJ5as!4*9RGM^S2=f}ycRfj5A7w(UWTOy@#VRgYfXQlnyvoXIVt{7RHE*;G3 zeSTr@A-+ku;`rx}l7p{)tu5tDn3;BMR z$i(i_@DE+tN_BF{h%DlLju~N02u5Nd2a%tzrGef7)aT5Atf`O+ez{1tZ&1>ix?_3N zl1ED7q}=-wfHSI81wL_cK?I z?)}_%sYN+z*KJuQFZRXi;p!FFR@v)!e7KHkq^T=&tMz)VqKyo#+CVDe>aD;7UM(-~ z^TN*>5{;eB67)Eori$tGbh8D|V9+zu5KIi9`3$~r8`XCcRMdM1n_^lq^lb8nZKB?{Jk-a4s*YlU2Mz_RC8!kr!Jjuopsb-cEUMU^3hC)5i zPtZF@wOp?P!)GW`O~cQsXk3W(HjwI)xUQmHPUl?2IbiKe zBG{NXWwIUu-#?i@98x__t;$XYyP%+;Pv&-D11o;U+pVIPTjdNg?~7UO{9*BQ zb%6d~fzW1a+kI|M>1U-E83?yC;eoU4PuxTD;b3K{Q*+6#F0!~Dk%jVYyG)sLLinw~ zjtR?Q#AzQmcSI-AO&h$T^>(8JHxZ$N-+ z3UaJ_BWLj=tJN$qBV8K^v^dlDQzk21o0!nI3E#r;Er2Pje+hKmn5@eno?~*@B_g8m z^8=TsaSn6EzLd!ol_%v7^A?49n5s8RN{4(vn=QLX@ zDmHIt`xeDB4;dR05U+U`jjWzGOj1B~aV(%|q>`In0aqy_IJ;+Q{$lem6aC#xij!Z_ z5F+L&G{oOZ14${pby;e z;osGoL#&mB);hjL-oZ-h?~5gb*#;N%1x3+CEp!TUGRFUCR$n+eBQ$*jRN>0ZI0n&U z%mWDUgX0_0iQks7e>)s{00uvu%&}W20I|g0A+w{gp!yoy3b$cHh#swLi)g(hew~R} z`HrMZd+ziqnPa$c@0|d~5!LF6qUV%jN+pAW@v?4TkGMvtm*ECh5(BnQ8n=e_xiH+a zZC16y&aDIjOV2@AcPryQGxPCY!{AT|jFc#J*CWn8?E(e`WFHBO z@RIl#qJT1rw7!aOBc+<&9L^R6AGk8dM$|&1E|O{l1T=LpydhAiM{6>P}k?k zepj;^@0!+=CKGwN5@pX)afxK!vc7gIJZOV%ijav|gXxc^7gq5PzLW zGY=Y!%M{F1+gWgKX8>|1Q+28to-fN@3S1u+Imsa#9rrqAlkO;lm2Wz?_6By9@xoT= zn|xuHcMn)?j!^z0y^&F1jUSQ>V5BTs|q z_U0aS^VEda8zIE38IMQrT}b^469YFZEnDS$l659$z=Hr`vE%E1Buf7C^pek;$C zkOq$qM9W)v{lRi3SGD}UfnvYQKju7V{iDV_HTYrcZ+?`CPDic;bULe%lQeucThXN) zWu{^K{I{7cnZcXGuZIphMp_Suc~^|4(R~|_t9}s58?B59y7q<&Cn>2_RsU>m3#00& z=5rqLr!{rZo8u;M6gR(R7Xt=^O~Im4QU>?!UbnT1Ks3ZVM%Y}cza3r%I4&mAKh#)( zf+_VLFHxzSJ9nCZB%kxgc)#s%xmD@5A{-0IdN)Mn<;Y)1@`m<9X1*}+JJ#Rrjo@o- zT5UTfm}6M3I4PM<4otAZVzFqflAd0~2jZg?^kuv|D7~Vz(rJij^6fAd@@l@HhnRs! zv|m}0A~!b00Tj#CE#D8WtXPg4rkLx8qg**ro4RxZOVM|}0MX+}4`IjhM5)$&7gAAa zN`{P*Y(HHFukWK2G(|5T2%q=$MTTl^b$dyQya-Ld3h>Uz-keO3mjMAHU37qIKQR#} z^tZR8_isx#N0K|19gt{#Sh;x#ELN`W974goLHaNlM4Af@mdyt23c*YJ(kC5g9_CU_ zYHdiJE(b4*qWBq8soMeRLJ(;HrYOYI{l>umFtf?n*x0|2$aKE^p|}XBlD8Xd7B?5s z&=8_}i1X8_-&~zfOpxcs-`%pDvA6ba34n^BXVMWg5$3#mDb6$4aDT=hvNAnwuC~Y` z2R2x>nWNpgPmZhR1cDF^{jEw?UC}9$C=G3>DZ)?!@H$;`dZOJ&eIOjUwR;?V(vZe6 zwl+7n5WYtw5|b)gPawhAHo)JZZ|y1H;ZA)if7-JV=D8K1-FhQ}=6II;(K0YWDQC!B zcB0pwA-!T{61+mGBP_9(JtGLJFKy`DtSNg6U(&XCnOM``ZYS(zVqf&_R_hA&g7?FM z4-~>Cg8ry_KR7CYYHfsurfZ8wMMdOF+O)B`Wgbq6DTr?!DIh^D`RE&D_0BpgOvd>4FC71kr=)Hb<7H&-LnLIqwf*Jhpf}r?s*>X(H)t(;gk}m^3;U zkXRB3^tHQPncYG0gFe-EHZ}tlGVxgK*;bL;);};(Ow91HKP71Yzc3`2crX^HSbe=O zbA-Abbxd-&F{=@S`CDy|>nOP$&wz=MLv@kb(cpOg(fn5bV&;3>>fc4Go@<|T58PWG z0tr#U95_DucFZ|7Vas%jmv;Att_2MSjl0TLzZtDw8CS0F824vIMSKa>4<8G^(1q-) zoZeE79gjB$t(KIRc$uh$z{AKD^HiAWBJPwkIDhbN^$nj2rv=q25$eaIs+UeCoXc(8 z1ErYw+o53d`GXgJIR5*_jOSmMIwe?dcJHLp zxxtpIM@J|07+%FRg6F3WYOrqo%ME*4S7K>hDAd#_AWK;u6p+xF>NtaiTtE3-BAeeEOzQ;7x<*4K;_nJNn^YwdMlvt*;k})KPrJh%_8137(s8ovwH-r) zQ=9*14NAq|T>%VAC5&WWdVhR{=&l7kI zhr0gXHhAgv6$`q_JHneX^$ww)C|6hH6MtP$;#;@9G{gSH6syK#)sHe*jsl&%(5+|z zEo0^ebX<6V^O{?TVXaoaoVKskF>xzhzELDbKrf{O9EIzr&Q)JSvl(v+G+$rR->MVkua00mXc7B-pLGO*VPxGF%Fcxxg!jP~Q zNGLBPySEyGZmVV~Iy9<=NTO3scSTPzjh;ljKf2Au-`!@-s98t01i-e@qA7lX zqW1G%rH?#<8}j5rV1mgRxO`Wx(hBZ2_07bMutE}p@9Vd8d2uonB(f=_eUp&VS^09S z7Qn8WLZMKuRhBN=eI)j{;AIvFL5FlJ_3lQaBt!f4U?zwLXK@*2j5vFu-JAFxwK5`q zI<9_TI($sK^#RL9_DohIjemmkH(BX&pJS4okce7XC!NqmE@gMaZQk=QBUH2Xu6>gi zh!GX}+?HXgWo_#Ja*Guj0uzNmqLdWhw`-`tPZecMBu;7PN~sK#4ctKDQ?WW7q{MRj zwnVf5o79xrQE;O0J0Os$DZ_0EmaS{$)QKP&7&A#If zGtojBhU8hiZr3z7N{RfXG^dn6pc1lD>G`?~a~^o0yQX?R3l5mArSwA}Fj0{20csb_;zRN zl(TvCqMzP(y^Gds^l`NJ-p{X;t7LLR12#@Psx*|M*HS%d=+_s@+%>)Kz+f(9^1bCz z;nEQdI_CJvp#{~E+#$1ptCpIcHSd4hR>n!fRMQ(^Zh~U_mGL}?w$Aab7j$P_J#8mubw7RKg9-mx{8?0d zDs;UAk>UB#$f|;IemrW4j%>Pvu-E#0@{hqW>CPw45XwoQ2o@D>7q|tk&M>tuS%vGy z`M-Fx2P7Y*@&E;V@b^+)baxTMm7YCwA=3K5bWRYUbpwFKD9QOrty?dYHn2}r8Hc#1znNZ1pg!A&?V-h3rSm2;})6;Ct? z(O+uA><-nUTm|qW|8>bacZv@>U%4n~tp{~W>b1wNUw;Um(~kkU_8HcGS{1JkpX6_? zP3C^NnCtJeoFpnjybzFLyP;;Vy{uoFQExX;VvM%GI<90hY@ zeJS0{_~;PkBl0)za~SNNlycse^TB_hODlAvlo*!~YcQ@3Z~p3&{i9c6V<(X+vv=@ zQhZVOn!o9K{*T)gron~ilJq8IErO1`A|g{k=bAG9yylM;u_+6X=V zPB?(CN=WF0Y4*`K4=5PdDan?yF!JufKADW3I3aA!s#TBJ0!a$^oN(9@d$wau1+Gz) zI+5+6ojap)wYa}I7@tbezL`i$GwVPn3i3*uQoEWgCJGA+0eUQ+>Wllf07qPh;=+d% zS@gM=i9;1BfgOJjze7!oh5dkO_UJ4{w&EUB88zAI);jHogJyJ zoN%bA8}Yh%l6!56WY|9IeAR^FJ&b(naf}E`gKlDK5~XbdH4m5pCQ#U})xpV=Cus@! z+<>C^jMNPI8`jp}`$^ioeQU;e<{xIp06Z1O%lvK0N;B0#-xogQPyrMOMALApu$U^K zigUL#Yne>HMKyspz+jR3sKq9Wh;J*`3+f)1Jc`m&fM@`atEmH2gO!BAHUsE9>WD$a z3+?29b3?8-p2ON^%TzOdf^}`A$hzY@>g!~dlLgSrm@pnH=DDTo4Y19w&-QaY2kd6j z9j{--HNH~P&ogo2LxMs|?s0nsh1$G5OFi;3o#TSN3#RVscR zx^4<2AA{wW4i)rl2ek`1NMe#g2qYDAE{{{X3|r%Eo!P~KdMgcYR!{GyMbcSPtxL1i zSEH)wmR%O%_RWTN({6N)T(x)L^e&cK=MwC8bj99r+OaTH#yF$1U&%fPw+g^|)RFrT zvA2RgOR#MP6>MO;t!^PZ`!@VX;^WM?qt?admf#ZufG)MLTtVQLS%uI*K(2z^nrlNY zE+T*#6}6xix`n+)kp~x(JWm9audM;sJJ~paz@^C%)(ceEBkBv!^SA4T3r=oNb+;pT zmmf0oag}u?ugo$HNy9f&tgK8vncfjKp3IL=$guD#E{YfHtuG0EP-Jhq#Ocac=EOY2 z=)V5XbizO0{&iB}pFu-^`m5~!#(;KB+1HmAO1Bv!eV`C>u63SRD}ELgu}pPC2Uk2x zNlDcPl_6xEh6o*r`9CGb9zhMnKT$sW;f0NyU2e0_%3!_yfh2Rn<6CLle;$7-mzX4# z5{rl#WLm_B__LJh40BnBHX;D0n@=pT_`R5|@=yWQG;>i-nPi(20?UkTF5E zD9JMR4Yt(J8)b0tjv9wjerR`rzDu)4s6#cf^5%{;xp-DNe3p=EYZMv3yo=$pBYS7=_gi zCxfEGu^wz)QNmgG$nS=p_E{+5`{CuEAH3r;Z;s=H_BLF%xx%RZcOWw+kM!~|``(r> zZJ3g4&zxSkWZ&Xn4GMFdw#To`shjB#0Af~Mov2^_>z4oL`~Ozt|KGkp^1mo*|NA%p zqUP72y;QvU$pRc{;Qm+z`vG%4O!Q<1S4!%8hSFk^T1O&X1qn^nf(5RIb>uzY(00sp zzEW@h`Q`ZD*CRR()J-wHCWR-1mSfEl#c@B4qUTu2aPM!C)5TKX{JIZOUyr2Ndqu_0 z#r^UJ4b>Q=%abq6Y`n>pS?+W~Q>SEOJr^1mmNlG`MuTo^HQ7}h$aVn)v~&A8RjGyR z`l_a2ZI0+^Vp=^e#om1PCcjAzoayC;wxPena-U3A=e2E5?9{aek!m?R25MNh*`N5b zU(DEW!!aioNEl$;IB99JIsD!5KB7{2a8-Zj=+O}B=g3|z=mc)vtW(VU78@3HUSdfB zyls6C&9)khx>vG`OIwh(oBMjiXu78ubvm7kQ1WAvNU;X7)5{3D(SVs?M=2mzX~XtO zmgmqrbe*s)V-;SoAl<{&oD_yX@5UDR-mIml{KD?DH-_;=U zW{VNbnY@;uVW(Ndn2yBJhC@x_zR$pP%jUI0xey%>*E*$`Oxt0q&fva|cK=eAVj8cE zkujUqW!_?X=C{#AXJt{zt!mHcmn<3GsO3%Y?u(NOqy#Wx$l<(<%CnkL$=N-~SwPu4 zs1OFQaJ_os3GYs!{Kicf>F#Y-MsRlnmuDcCNO!OeL(0Sg?w@d_e3jeX}(4`m4=XYM#P#&*bMeLWHpo!h!XA-e^dD^&CZ{h%&D6i5}R zk5Gx0w6HSlKy}j9?~!D#U)s*%B3I1U`2s_f#@)|sNdpgiI!<`sy2aP+rb}tFhsblo z=Spc0`0}Nphp0kNRS%C;a}?~6(=0in98jeu0}>5?Q5v=y81>>#yer7NV-&akxGN^D zCk%lGD6RErp9_LHANN0NPqfp zJvjs0eW@105tE2oi8is_uG}Dcw9V+1W%3}-JuGIRbw(Q5_`dmm@%l@t6Akam%q#N( zO+uvT03L}ihR@tT4Rk()HpNpMlmc|V^<;6Bhkre}8)9wQpdyHOTM1{#W)25Gn51;- zebb(cntJSeO4;(|;*Yp`zTWuLbs z3{*_2U;d2TN;v*R;l=BDuN5zM!@Q|r*_Q&04uUY>AX4xlmax$RVL1QNUl!B{2!1&X zQY2hw?48h2t_wZ^XSPEcN+t7InQ5YK7973vDgMKf62Crha%s0Q5X>%iKUPhTq;o-` z4WFknOvbNaX=ZCr4(SBmaz0!8nZ8ohoCB^vt0K}oH>m20i=qMHM`QI6WufIciFC>K zfKnDr$8;W`+Wk(TLl93PbAL$jco?^jZ^Jo^t||fYz5}b4`ggNgIKj>7&9S@)^14;& z`*;3sH6KR5Ll4aAJ(>&Bv3EHikld5iNhw(^kj#)@d&>#dd8WRJ(Vpk1Bj1xwEGQF-vfodQx(sd5*{Y|OZelDB8SQn4eh&*Ui(fVXF*J6)kNGFd zeEjV7JG0S$XIwOwbNPCtBYpQ9Nu2YT+Aw&(ld&8*%Z@Q|9V7oRKGxRU0hdb?m*?;lRWnn(caDJz&!;MevFWH ziW299v*Q)SN#hoSkF&{{gaGL~rgE3|T9xtPmy1n8V!ic|gXDn5L0C300E}r#T>fF) zJ`E0PP+=T5t{afEjiWwTX~Nzi&O|jDo~(v4)X(<=8gQiAwCtVy{;?hF=lz{s~(&z|Hy+WFW9A$@-Cw3Z7_^X~etqN2jMqN{MNsf=vyk_Imj zXXmwrEo&fbypamE>RtWT;K8-j1uv45)KI!o=7E12YJcFsNSU)XkE|;xjp$ALkZoxC zE~e?-R_a_=OhW3d+OJ0}H81*{D>4mfhy*YZ{W^EPx0|}fFg`nUTl}%jr@m{E`00h* zAG7-{EUvHJn#Xl13Q0~lrt(4;bJeTfUu_%_*H7^OPV4&S4tVGG?xI9gKz=mr$amZ8MglaKmGr`r~enDjJV5UekXo(-Td+5K#E(3#4{KA5*5`r z7?svlnqw{(P>#MtAe*b|kfu%jujv}-DM@1ojAoeBB=^E$SLxKSB6xl6Bt!B-T|^ui zS_=tZs@q=JlNctJhlT*utoH@V;JiugF*G`!csui1I7<#(=Q=c<$}iOvqF4JUEXagR z!T3U5Bvw?+e~(W8=lQ>}uKAz;`S#&YyawMnu?>yd8Tz8+x52)1>$flg&$9BZRuGX6 zEV=`qzC2qF<%=4k0Y3t~xiZ}pO=l|)jQh4SRbMeMwPINNhhej}HTMQ!?gl~_^J*`X31DY0 zAmm2X*CSG`S6!s4P@+Qyc>1lbu3NR3eU~kbdWXZb1wNx=Cb%iv&G}`rwU54`_~?Q< z_9=GSK3VQID%74rZPQiEz2sl-uW)X3M<}`aZ0(pGlF2Kqv-R5~q(LZ~SM}-`HDjvj zPQ{OUgYJ(M#c2gANV`-YupYkAr<_gpZd&wKeB!+3{DOTBGlVDaMU?%K^|N=2c+sVz zqyf;Gz)gV(EO4O`(%Gp4-GC777oKD-m1^nbMwFM+%BlvgXAPTba;IM?9k4o!+(BXu z!SY>mx>3l%qOg6olp1~UBn-V|cfXwBT2aTQRl87EJbaSExNGDFnz`9QW+qJAHC>D_ z&q81L<4;X>#HfJL)On1kJ5=FaS5DE(jjn0Ncn&LXX!~mk_0!KSBBpaHEcN%=EfyPu zU@0l#6JA^SB?H$x4Rbd%+zW13B{ch&KOql|%{)RZ^^3J1c%x!~J!mDouJBf<-h@FK z#i-Tt%AZaC)7hVT%8?VG35~Wg9m;k6aF!`JxT(=WP_<&NOm-CqxutNm!Rb9~ythMF zaWMlYl=keN&FJ=sHZGXAG!fD>lyiDgqQfUSR@4x9`>4QPPdF$$! zQ9sC?vwMP`M$RQ$k<6Eem^amCRM*B_Uly+(!}cs-SQyDGA+AG#9a2>F@THB0L?HRVDecFi_> zRWsKPw&qZkPJs{DLtJ@Jv7ek$O^NA(=^@%eHFRk(L4-L0sbM@)Hmic|R7zWE<`vROo#-@h9!{+MJ)!F#P4#aP z%ZML_|r(*iK){wxB>p3%KGOowx z+t)VQJ_C`HFcmiGI>DaMrsjOZu2cP(c%?t8QqDV_MEAw($L zH51jn<-_dv40;E0UpXhyasPyzrB)e z>%mktJj}x0G6isG{g>8^TID@p5^CKXU72N;dIcbzP5F96HgaySGW6?_kDeE?zs>pD ze#iy|w?AI=${HFifgMOOcdOC`G6yF~CW}o&U=Bn)cV>QlF-5aJEa6=dF*PdL)m8Mi zP!r0dI94Y7rhw^Y0E{uHYqj2vu>P+f^GWUKlMi ziH0R^2X46X+jp-k$n_Td(zSQ{-mrfdD)f({+R5^IyI)76!XvfRLSq&(GsVI>TSa?a z$C5vga*?U*<0~GyeuNsEVK0V_-IN$P(XR-dFI8ORDM3}nI5vsGW!5JO$ha{6*+IOv ze#+z9Wg^)pQVA;=Hnu%hmCYyIEPozsOCh?O4B;zSYF#W=Im|6#>mPj};xW^DJ(;in z%aS15Dp1_tGSdU@;+^qX<^51yR^72;3gyW@%<0QC;wldP%$^Yr4?MtV^Fot%Y@&Zz ztZOlc6j(fQDKUi=cl~ioUY-|pNKZvwRdCDgLN}SGvai3dRz^)ewNMMg?jlC@aJMQf zPMT^_GZe)ewtD@s01T2LQvDzQxRQT<{S()coK#8Nh(yp{kJr~DQyyAsoR=P($04Cj zLO?6S;I^Kz)V zQW;;yzi#I&ZVnmd(iI7QeAJ%BhQ7n}pnovi-Xn)Ukd>fgKE7>ZR@Gl;Wi;^dj4m&3 zadV-zVZq+?xo35*YPYE1IHDJ6YeH+_DKGdf_{$*L>b;&kueg-baZv4<#N_Sr3I|(* zoO;R}fAD(lUY7}e=2+3qH$`&-Ki>vHI2P8;cT`rx%UkEC!7odh2iL8s^f33{&nHa= z7ndJj!vw+ZHD_xLIvuSr_b&w( z;e*Jy!$m4IMP~xJHSc_7zik4`o2_-GR--R}%(-jts#*!TNlr24mtjcc2}eWEg*v6^ z6QBOMVZTv;{R^J0BR^(>hASVYg=GpHk?p8tv5akFzd;VQCn+WHI(J`rgY%|}jj9O{R!@8haxhGR34xei)E)}W~$O9ycf`gOtG zGVJ-;5-%5a*WBBUXt6vi|3M!ab<-t*!Pg^)=-!rs&!+J03}E_JSv~bo0pBMVhp&7!x8DytmXZ)u-Oj#G<-7ylN~oKRPkrvWfr@{lVC9C_hXeb*{JaUX&nj z@$5z7)^fsdW^+)2X#S>@M9!Flzsk5)L}b71eQxn>H(3kiPSLe1klQ=v>RgR%|1ZMO_j7 zP+Pd6aDAwzu4Jn}M~B;y3+!1=)y-b%R%3XbZ_q}T!nD4$l83D?L9cPh4TEwf^w3a7 zhK#J|WbOk*s44is4WKu*q`O>2UhAspzKzVNTk@n85+;dkDdzWP*cHc6aat zqqiUbh7^b74uV8$1foo;Y_Q>768Z<7T&WM_&U2}zpEm7Az@e+ZZHlu_v)_5x`a zv)aKH*1-wT?8fy7eG*wRk+r&LHe((@8IHwRC$}k&!ZioyLv1%xriMn%Z&(M_ctQC3 z)1;CVh^Pl)GR;DAWH4JD7D?;iZwK-Sxbo89O`Qpck5c4qvcClX1%XLzJh{L$;!jv~! zk0mp@y=gU_sC?Y!AV-;eM+Bm~KN~%kOFuX0>Du7B#fL5}mHSBjB9L>h_K0eIW)WQt zR7lok$ivyTWg*I<=oK47bRz|Tlt5+v+*2R@dAJyQoq#kS|0PM=6KMw0nea5>p4MN? zUXR^^3%*>{HdJJT8OcPv*$8H!^aRCx981GNni)imCQisM6N zkKQ%?aiPq9s%!4WYUn6UYeBil(T|waf9q|us3d=6TV4Q#PJ}}3-WmOAxcxUL)b-R0 zcm25AC-(niAo=^O=nI%`*Xh18Y)SNj5NSIkXC>TCb)k`e;)jMU*!z`1H~0O|lUFB< zm740ajrN*H0L@#uVRwI6 zEU5RX1-YoSC@1p`P|l{Gu@)_vide2(Zh#nA+vofC#bsvG&l0@Dp8?sZK_%?c%prA? zv#HwTB4gnlwK8|#vt#J%kzkpN`v3(rZa-HSs<-3QqW{bMlaF86FBu>t>NaLwRahAZ zhjt1fmb^Zs-OTxG+0BD~QL!{?od9dNpHyoY-27k|=}^RYLr%>Ye)fi;Sxe484%2k% z7%QMF3UB4-mM%ZSp!zGt1PX4{VkXmzTsrp~x^)= zaiy%QWaqx%JrHz$90L2Tt+p%0Oh#5b1!lHd7ex1pmb8JsUy!mSP}WBl)wj=IEaQ1n z+;2WxO*c%O)A%3ky=hoeS-SUab(K}sB37vk0)kbHG7Azg4BD2AVn7HXOaWz-gn%-I zLGYBe$V>qOk}zn15E2q3kU$awtul`S34uUB<}o0%3|jBDuhXZx`n>&~>%7l-&hvaY zpRzaEVeP&4T6?c`ulxV|kE^y-UIJ*%e6h<%?UJrNwpWDtMo@^BeQ-JH$o^KYr*(#t zo!#OfIw#V}Gq*G_ZuOC3TZEC7h<=WyHd9X&8&X;|)#^L0*Y~W-anZhaR`>d= zS62BE3n}9vO)G}a|KT3~(=g88j{E1MhSZ(hkFfYntp}}IfBD@ogjR?qS`~53*ukuzvUS8uI5OSx3eOLhF%MhBX`Kx{M+7t&z<|Nyx+F|latopet6`_zfXGj`)_|A9Q@rk z|GM)}tMlIu{--DRAFQkYE?45;{@ZUG|8uKhT&R>X$?Dr`jC*2+--q~;2d%jg_-v`f(&e%}6qPEev`6Oo#RWA~5Pdte| zFtuAvYx#2rb?m?lZa1bVX5RGlCL?g@F)swO5Sx2LbT9!UQIVPsL?TY}>9BUBZ2a5( zlzdwpj|aQ`0o@ZMwX7_8>grJor#mSQAp4^hISykEL~_gN_#zEaQP0SJ&%-%JH~IS7 z>8Y(7;b{%7gPe*|rn!hQl97GKGr91r@MEE&P^t`WW9wxL5|1#Akl>wUspCJ>Jla%y zQJFCs8XEW)b0!G^Svwx2`ar*)D4kDc_G4OO#0 zEoW2dx;@ZFd?vI6`!eRws2D(R^yy}9U2M_P)PTrkUA{wXhFNMvy|)MrW(UL?d2Grx z{xZ!!{`$J>3^rMMfZGPwHs3y}8Sk;I1PwG=H;1c{r_TdL+Y{kzhmR+M&g-<*K%)`g^ zmwb0kB~p3zr^-wc{lP1nC;$6icAI@@TOEhY$!C%&$GQ>8UMTvpF7czH+LJ?+uev7C z8dnudd`I_5#K+^WEJEvCC_@K4VQ}3sH|yKvDi^t9yzU{z%fTWNu{YQPBZ0_{`=v0@ zumEKzlj|p4Xe&CZ6X(zFtg3N4mpbU~*xkFm`Yzji*Z6@$xBsg7p4^9ugDTlDifq!= zJB0qmx2G2UAQA(OEW9ZayFS|;=9U^DGtSLO##0v?51Ve=)?I}`LI?zEA{=)}?nR7i+eUBZMGGItdL9 z_md5Wy?R$g8pCOKxU8KglpCOLV=-Rj*&ctVkqnQw} z=+VuoQ&=V(K1E&5N9AH3w5_^91%`-Ab?(v81LNIpxl4JFaUNtQU*r;r1UHg&J12&n zRxiMuMwbGKDAO#0i)TC1r1nARHnPO4CC0>WB+^ShiSTH#_cQ8p9Or8CC)>}cOY?o% zBt6n!QJ0}+1-D`W)FqKk1yGmeUa;COkD9ad^6lXhwL$d?S5T`BqTBx3w)bqSWSC7c zNuK;SomcwziR}ATr}7`*0k|_)sm*au6-LfnR5n|&^YtXdGfvpBNG}FN7lW8Uu*)zW zUB~6^8OEZD;-}PXgy!S-i$vKxWGpP^jMm@}rLWk~E86W^Mp&}>hl|=PMF~8R&;1uD zW(k~)TxoCt2#z(2myzz!3c>83XI*p(y|kQ$xjFitRlAzkM-TBbKTZk!8Fsq3xEwMY z{zQ8aPb$oHnvwmYVh{1U{5y{4%Tz(N&N^?TLW<;$4z1f-G=OZ^o5clt%F8CPHb!sJ zmOkAk?iEsbgAQ#aM=}RS&=HuJ z@!gJv7xy=m9avUmHyjC+W&zi=1LR6o*ZOuK@+3YR2>ChCgA-!W@fP+o^*yKiV@Hpk zmhY5tFh-kMAEVAMp1ZjNZO#I(zH0_-;tp5o($Tr zw7Tv(R!u$=h7>W1^8UadyWVoIzAqi7@48S@9*gv+^UYIpI74IX)-~A@M4jXenbBIy z0zI8Ed#dZKTxQ>Oyw#!;g^$rfX$R4d6IXN2Hj>#4eKSDB45*~IL)tFmNxo9C=v=d} zxW~q_Dz>)hJISOJj;RxgYM$llf1iBX8&W_70NjciMpT}goLPhsDlquu=u*~Lj|o~y z!=sI*v#u%NOhj=c$DnaJn9+))TJPPIblS4OA}Bv4sz$J|prMl1ogBa#uXIB344tg* zS&YVU%I(E!kan>njU(l(SnPyB@der|8(YwLll1kn6n4yJ&|R*kQhpUGawgsL{LIdl zwu@w*YTJb_vy_Y7@;P4T17*bH7P(p_ZhM(#f$RhghS{%2NXf*6C&_`iOjmmDdZBM& ziC@z#-nhfe8OJGZZO`*Lic1q8yPaz%I@!Ui=*si@QkQy3hqQD;&BlK~9GQJX_rjPD zsc5wefMyrvE9OuMtNvkb`jEYcF zcb>f2s%UfbWi_liEL5`|1Unt~OVKS^FKYcbWyY0&5?BV+9A~oVXqQLTZf>fAEVqP? zKdAVIxT>ytTUjiFz#B7cjUYdI-Q`ZFD4!XpLO9WQxx{6hdt8u_>B?xzmpA*b$_&lmJ4pirN$Wdd%e$y)hAi z5v(!wz#kK+oLfPf@QW^qdu#@&1k98ibBKiO{1i|k>qm7_`Sr*TOk~}ikq8B0==2;r zb{*;N=(ZD^6PYE=^cRwb_S=iq9s~~a)4)S%=Fe-PcP8%qF`Pz74)j7eD1abQ8*^Za z<;7GWQ>?`S9zGH8^^*A0h>VzwR^haOYjp+9)Dgl%ZmYaS=<*T9*qx95A|*qTKBsmu z*!LMlySS&bCA-1NMPYTfVWtmwC#;nx7rX5e+@^`+-iO_hX?fQhCT2ZDSAo;&D`597 zvnHBiW2tL@Zc>X!tRd=Oxa89=OViX%>X@Xo_|bI)OJ{t*{yEkUK-bSt^j!h23qaS~ z&W&$M{S{rm7As^wVFT#;md~%d@QJLnfI)45r2OGy*Y#M-uFoW8U1Rr=6vxjbWkE$l zwi!TDwld;b?71HtD{MVR!o@*A;sIGsU1B5RR*@O2o)jn4hbpTewocb{^{1!9m~+xM(}7*p&r}9HsPC;*ln0tg$-75$LFKYW7AyYSK;yk5uRJeA6y9gSEjtP2dtZo(QXBj#Mm zYR=8A_bh~qrzI;`8}<^Ps^9W&3eN|wMs~J;mWTRoZGUh5SsrR0=GkWSSsp4WX{@16 znG)2L<+rR(rrmbSOZJscvDdxiF2Z1uEUcaGyVafZFJm?}LTpjGb95;D#|Q5ZToBfIBE+@Kb|b6F*p=I^?%mdx6x$Ykv6k|@0_`1k4+sHKlYWnhl?Z`b z0X$3qSiZGx0%qVBcd+ia8wcRzz<;ZJ1t?Q^`pTC>%9IU#~UNhD;`>wl;nbCN&M;_r%*7Ukaf7puNJxw6f~*T z-@a5tn8Gw94`fkZ_Xl&JPD2CAj>@5xFDXtduf8-w*MaH{=-T(^<^%(`UG2c57JzXWx2PqOKE9lACfo z>1>78@tBTt-;ngiumv7KM$?u_KJ~>dV-zy^fH=~6C3!S}h*pN>u?q|?*qC7F4OKfX zVtFz2j8Kqt9Cmg`{b|iOT1JwP{xL&%XEm}b^*E`7Qc_rR6)Gl3_swi?apt63SXe23 z4n&}K6RMwl~4_h&o>Bj?F$sIP^-~jGA7oS+u%5v<9^Vm zSv@cwSXK|+MJ9gQD6DdoX@MCDD;SH+@$Rq&CA$?M9ozWle3x|`JZ9LGwSF%lxtJ4q z=sJFVt}bhZt$IN1#8kqU6&wOe+wb9e#K(DelWGRwH@W)wB0T6lfG~wsArB-aZ@v<2 z2iWT7KlUsd0qIKA`#FEw>|6=2!v+RRd=%iXG?b%;EbqY}yGX!t$CoxXc;YWwG+*Yp ztACsJ_ngd*yKH`o)r~fAB+A*<14yvjGKVddUpC3|VLr~yR%x!<85QC=DJt5pg)n)D z0l+GUTWKLFp~1;Ge3cVa+=guI>V@qgaE5arnd}Itk%#mavQEt$HSRtn(xz?l_6?i3WN`^KO>;K zQeAo;b1k+{CtbD4KhTCYH&AI*hB3?agIV{3Kl;G+#ys0Vk)G*Dz%*1(o5xX)kJzj7 zCrkx*XTq+iId0#mGaJy^gLXvkHuc13&m3mBzp2RSq#fFSRn7I?Yjn8NR>@6nCe{m4 z-lc>j&l#VS8=Z624Fz*_BTo`BiDy6NuGJETlhlDe_GMH)>~inxpc=A zqcrUbb)FZZOUdD%Wbci;{GPVrLkY@m&^gI)ja_4=<|Un%k)^ zn9X!+-G~)%g)z+bB;L)#me50clj^~l50(yh+Jkt=t@3&S*_%wrahdh$&uX668}r}; z_aHerg?HR^|5fSJk=KDab`{=w{u(=$&odzj=x+3Q1l8ciIbk5i1A0>#Pe1OcPi zp*ME93!XJwcYx07|5I}oY4RQ^9gF>%gjXufwXmccIRPfWq!+J}{GUZW@XJtnrQc;} zbtiu<-Q+R8QKK_}Nv^>H<%3|-c)YHK9zCX}?5RwpF&a@3Rc45QwlQDYMicxx?I3gG z2A!X_F-Xm9|82X*uR{;wtEYBi8lI>&ycK9We<(CVL_}Nzpy<=~(j7`X+17G8%;7}3 zeBq>QQo!Mh!NbnBY&SOl>g-T#Dya73a^y_!h8nmbJ6vcaH2O(f5O(wZvHR@n;h*l$ zpUXHOi62G|v>z1w(eovEfKJ;pW;eW}I+F&#>*XmknM-LtPyvbf#Og%v`!w^KPd9;8d!G&pV)^;TvRSLFAby)28W4TDVBCfk zj9bOM=w~5&b6qx%LCiQyp6Vf;RQZW4m!&F$eh&?k7N-D=-F#`r43QngoA4X%^$Hm> z_3VTHGQ-EpS&$}9my@YfqMwO-(#h}my!?J-xU^b>^#&RgYDouUt__7(MoB`Rz!^^h9)ctyoB zX(&+YQr3Bett`6`3!f-@G+6R<#6pmCN>7P=?T{Buj3Q4P3I2H-mP5~GoKb(_1MADO zQ6LWK5tB)?PUhyq8=jIxmlI`fWsQry_cCUkhc)DFwEeQ?#luhb4$*s)27a0cDWt`o zrNAZvKyk$I4Q=4nb4{&o5)AbI$Q#Kknc9v0_c&0fGq04Ubx-T2HE=4SpzaU}#2C+9 z2=%$mMo^r^V17FXVWoBMXzm7V$Qjl>e2V}8NlSBW&I0E`u?S@XM%}EN5+$_g^vE0p-w3277`6AsH$dliJA zpGQx^u{%mgmW^*_XPS>J7p@7+*H6N3Se_=b;wmCXs*J-5r$=Co*oZPLwF8Lro|&_x zcgYot6>uL&gQ7@lHJL!Q(Bf_l+)z`*zu7c%Yz~rM&Q{7w=7)0z9eG@ajr`f*bQy(= z7_cZzmh$;r?1+SZ1Oh-&5t(DlKYT_}Z31|7#m^`z$==v85wON!@mYkMAoeYn0_JvV zq!i)iKa9hzY;^BAIQX(&A@26p>V#qXIuFEsYZvZc_YNlqckqx z4IMgUF-yZM($&%nyLv74gn*MQcZ$6=mb7%DXr|uFe=kBuNMaV`t5{IeNehc>!MRv+ zCqm56TkoUX8|-dyaNi)O#d}l=$ zdIzS*PaR)OSN(bbbAYQTBOxlY{gPBTjGV#WOEq8$f{YfPS;(IN^F5_)ZNCXauHQIL4m&SVh-l4*2fXqUKG#=lB&^fjt;Pg17ED=u@}-ZPjg&&J)WXfAdfCWK-t1T&JG=hzEZW1V|nit5^Te5BdW40 z-U`*5tTW&yCC=9T^!B65zjYg^O z6)cTEU3|1K&E-Iv*VQ~6ElSWk+mJue&-a=RUEUMTx8aONE+IZ{He(-ZP~S&7`5^yg5)fMgL0P(QIaHedxQ7b))f`bq|Y~LzRL4 zd3)c@>pHG`*!IS)Wk&C8LaxptCKNVVYenPtV_QR~8^=bQlI3g_I?ezFVqmJdGgCU; zY5ykgXz9<7*$<#-rN1o(7PNCvHqTh4g@qGjM<||Ngg;iVL@Fx+zsOHg4h62u zlLzTJ0ufP^1RJG##rDqxumi*ADH`mm1bq76$*k?`Cy4Np&G#0>qV6htF8stq(LXA8(Y5jEV)9D zSzm5#?N}JDkIG}^0;O+S~QU^G=*fS4jm6TS2xLzF^Cg9 z0W~;`ENG~o<#`*I$Dd+H*uxB$imtpI(Z-O|;&#>pD_WO{ffVu|pz!WFEzrklm`Ak| z%%ec^BoI0v5k~Xx#%thJmk1{E?UhWw+&sm$;FElJ)H52CYnhyuNhIM23n^16EBb%> z`=HT(oQ!hBbQ8C@`b|X~xiB56D&yNh2_AIHZeQh|gqdg9mRpqoEEup6qM#5uT#ADK zdL(nRMkl5QzBr56v|H5=3v<6_%)hBQ#@>i9U8*v-_wmU!#OsVkq36$jx2Fm!wmG;u zX*5dm7CbJ0>-EFtYkE)8r$4bv8y##MmY6_vy3%J>DE+4SQ~juD%Mg1E1*4|RUN^qv zY7clvoy>f5S~d+O%UwwO41IiW4!Un;6<+*BlLxKx9Hg3jbZ)b+7qm0#VX!TEzqm)Z zl8OW$G!1=*T$;%27b>PY8$)O25i05BGt zXd4`WHglVNNTsOe&^og^TT6<8W#?5P90QV!c`?=(-eIuMZ57 zI0BNc&k;A^gJwJ4i=XJ5|}T^Lv&!_=bpHLS@fJpGNhpIeWjjegffw~PnilL{gp zCRDKs$|(wD6fNc8&pvRxo1y>>Dz8BMI$>hIB~7MB(z}BwzJKNa?BGAF%&XCF(9_%e zTZ6clR!vZ43^&o!^#KP5NAuwq<&V=WX_tn%=BUr0CVtMq6WRBfVG(iwca!$PW(dM;QG=x@EM$dE* zE+~={zyiJ=4M<|fo5?8&Ql@JPig@}%Ch|C$nw!)0B5LjkiUDo2v9Pc??VV601(I!_ z_Hj5#0YFrrNmQUw%k)LFM}`zGP66lRB;(iBu+b$1QO_)sOj<0m3wlC~LY*MHp6nlg z>Df9s%4Ms~4854#3CbOu&@X8Zu65vD9j}YmomA8ah4B4G1?3OU0hxz|St+uuT3lxw zgdsK9|F-{6!?((l8jt(PCJxar|M}V%clw2DFa3F}Y@fj(h+|?WxlU|ev#PYCG7+c_scEXV96p3&E}5I(@gFlX9XdcP5aEvg!D%)l>3}% zS#QvIW0m49B`c;~G~SY)_3&ZgOXy!(V4?KP>c3(-eeETo-&mEf?1Ovfd9vb$treTc ziV)lTnjYF!+1Zy&o-T}|*bcJnswdaf^WW8b;)nfOYE~QvGhOFnm2)31+Q>sp##@9` zz+{T&1i%M^lOy#~z2sh38bmREy#1UuB3f}-Jbzw&^r-1^7+lj;?xZYN2#Yjdhg}Gd zF)7_yS&ege4qR^Wg^Jk;FFgIVU=Mup5z{Vhj!i#FW_<|PA4!l-M4q?;O`3OUsR9Bc zn(_`I%mEsq2|13)pI~(5l+kHI}uxW>*haY9M;Kb0pGQrv&}Yy!?>DB)s#|ZgGz;W_lwg zN+$J#3NsJ~u07Qih;_>R+iIc-cUt~CX zyIeL3dg*%Jq)(;JJU~;bT8J8_7m-b3!UeazO5AK!pMQ$cWcl0MScv8R_A&|N2TeW&f5{8!fSr{BDjL7sF z;pto8Q0%icTqKt2wtk>3%i-=E_-U$FOu;#x+d)C*$l!T^eF5;v zF029lEBodH6WzfZej_9ela0sdyL)ZnPwFi;(#_^KZG;~Vi;BKR=>S#v>k$QVb4_aS z$O9UDHy#$ubfn?2@!tid&dc??t?K%Gu63mD%e^=68?M<`?+4i*ZPkwYn49Lg6dOm& zdCFba9IuLD7%GB{!U|?bj7g+tu3K0-?OCeqPD#$( zqMSn_dkdg!%YAL1G967xsFCVgF_0#ky-Jct9E~#xC@)MT&Y-3Nm?93Km=&FW;&uGh zU5@d~mXuqU%F5+y{<7O4d6b%EjEpo}W)}c$oJb3bFGil8>J^_huEPfLoC#f%Z~JD>Y(LSWIZwNqVUtCH%%7HGI%S9D3?xsQv}E{X zy3JL+C5B%}yZkDApkohI{wxkBTf5sw+>nDwguld4N3%azCKlxR3IV~DqzFu4#X_+F z{@>}g6j-0Iq>_+USANr+UhySD6L&7I?(+E=3-bx|+lK$TLNnM4tJxEqNNt$Z?kgkS#TC zMa{@ytC!OnvZ%|62CQ&^PXDl8k!)N+2Ze`lAaW47M_Z0#GTF;EL|k|7g$};;NGWBbp`h zLM6no%oE_YrvT+A6(EJq*ZDyTW}TJ@J<&CSP)&=-EAfm$4|JY!;GN|=OZcFd+*eAl zEDI%h7C8O02PPg*I^!XBV{6S;J8XE1|8=$Z&=Tn}8JpX8RZ`jU1_kA8We89^@A*I@ zmTMf$p4h7b-KyoDk#I4lL#sm!%|{-1T;vTX>$6<4)IE7&m#pW_fOYVBjdGYF zs;6i$(@nGbV4H!R4q^g~PIoI9P#c6l%2sBnpAq$Uhr!&493bfa`kh-qd5kNnsEtq$ zN|CCAIca-v88P2hC}#Pg+-P8c)5=bf#Z@7&Nh`)+|K!E5qqE=oGa1jkcb7j>GOezK z*9)E#Nd#Xd!!s{Tl)Q1Ef;hE$k$&$)^4Gr~`oevdCSkLq0!9i$%UYdxjsy>k$ zS0Z>oY=j{mw4$pTZ38Q6pq__WMsSD0UcS<(1gQXvW5Ux9G(NnT7v35yM7@lBz@Cmy zcy}h43g@HERJKPTX^hSfw|{#uhTA}`*_8r8~k`?hEn;2EH@ zWF)~R0VZf5G;-QESeUgKH@-`h4a_q<9GQAGau@Bhg!@$OMweJgQYes3wE_$rSs>#r z``g5wTJ|vF=c_*WhH7R5=K8U2|Kwa}788}_i#m5Uc4IW9)b2ViVu#vZE7E(d9zK1f zdeVuCI^oE^I(|JO$n3*>coye5c$fmcqg|f^+K}@%GK2cIp9Hu>8kGbj zzRUm#nCXyWdMDx->yZAaZK(B~i zZNl)7+Pvp-S3^%wLPlpSHEtqWNC)3rQyzVAB6zgLzVJ^#2jD$<+flI}m^z!zP)@_U z_fTb92sW2Ctq)qO6g*@a$4#LNMqle?R#&%jQErJtcvXO*_r`wkqwD}eEiN2;oA9hx zbs1|Z;MKTImbg95$ondrjSQXcIS85Th|gZW8IG~p4rspu-bbhT6(QH|Kj|}UN15U{ zljE4UX?TXts@cx^JK^E?qt99`TifIuC;+H05vQmaD}Kd(3uO~txIGYM zoO@6SX-LP*9;fU?6QdXX5r>KXLo%WZJEIPr1>b)?AakBU23azW!2W zK%{;2?A^;Onk{3k$j@7`Ln#La>lMNy%)b5{Ku|U#=%=Q)JQ-fOL=m81@wYSIPB(gq z)pyxS9P&2w#@m8=O$eQU^@jsAG`1qj^J;0n;WHyS-q``#NOU(P+(_28aFQp#nPvYp zrN5nDQo44gs3b38vHw_nzA!j~MJG#0=E+tp-qDd_I9}804G$MZoCe}NB(@%49*?f` zEPYtzeDS#Mbx@s32?u5$`n+!yXs2{XmTyVgrg^h<|3QfxpzInte2M^gesKa(?YM(kP;)5a<)$s@_cH{DrNX**gI#&JJ@2dHj8b*05!70F{>DRGZy zy~((i);~s)38*(N3TV&br1?nuS%SuRUbNs<@oReJdl0+Ac~;KPb9sW!WI2?yF5QXG zt*o;3P5pRLmWmj-HU+eZ!WT3a)_1A=sNiZ!%;%>cL=@I*zZdFm%r8yBrrxR zwiVw3tk;?pS?c}d+0jK93<~ur)>WPiRHDXHq{rSbU1TI*TZ#8D6oy(|t zCz7X6dEs(w(%sL+_{MeV=#-`*ap$&AA($mGRgj$}zu$`fozO{Esy47dP?l&BP=^@c zVHh6jt2@})j0UusuSUaX-l9XVeehV_NaVH5@i>#HxWDLYk zXP%v%AR}lIo+O{jIP4e}Rk+yenoW6iQLMSnf6VoqjO)cLHAAavLp#KZM-H$m+JM=~ zN2%%xi~~j~GKSB+xeRuMxh`Opb5)Zn{cZmA*(9R{{OmgtH}n7+Ok^Kr+XqX;3}U!^ z{va!P+WR55)7J_^$pXWcW)~Tq#dV>!g3EmYT!hu@3*A=A<{g@TB)aF+^Zcs&&Q?kY z#~itkH^G-n5RBC@#?oV)!`2&lZh?Y_>ZFH&lHf+|+aJ?8aICnqhAqqyfBZvU^O#K4|cns<%B56J$0A? z(LrmmvlN%{-EQ{w$XXcB!SwWBR+@ft$FzgIOBCx{RLG7$)V(^bzH3u)zv!FRD#T!h zax8Z4LphnhU@Ud~yV{7xu4|8YNJ~9P0Au%is~G(A9DZOR-Wjjws`FlNsdXJry3$6{ zAS~0H>~!q-cOrax4VJGdXwDraIAQM7_8fc0qwY=9L|dzIEd-j06HRM_MpbhhUoqHm zHg5^Fl?C;=O2*t9i8cz6T!=8Z_+_s_m(*cm$C@(=0DSJ77;x{}$X{=!S?@+$sk|dE z>&d-3jn0d}Bw7E~;y!QvQ!&@y4*TaSxc`J2^ovxEkMr>)?stuXKyo3fGJVp#=3?GG zsKTL2$lDoVBvkdtg`EPEQ!mGsLn3c@4qm;-G#hXpf;zOF-PQ?XGJP|N>G7*DPZB34 z|6l%gW`FzqUB~&`AAk4xU!o8k06l{KI8bR(Hzz0#uXi507R0z|Xr6{7&B5OFqF5k9 zR1SOrnN1#NET1?X^LqVo%}%Q}be1pB3`3o#>JT5EFMA;46BcP@7#W)8fC-adUf~>b z{;|l(ccc-WWE-6R?Yc3^IBG5^dEPD?T1q0}V%Qp@!7zJ*NZi8a3DPhib7Y_>@2hr{ zmP6ZDr1>E&dmly5iY4Y+W`)Rn-59q2hI|R6U8l)|n9$tsA@>&Fk+Bb#7LgSBjS{#0 z1WqC!Z`Pjvj$`~`1-HBL(Z72EWJk7npL(q5Al#23RuBsDeA;g%RxsDO=RrYal7)(Z z%_ZQIB#e~TAG?#g0f_N}+MeU;O2F_x#9ry6!+SS_%!nWM@?d_78+5dR3EWBN8GS`&qI!gtEf- zjh0TWRY}#PYM2z)a50Z0l{l2i@_Wu9&moK|z^PenP!bk0;aQ54NADpC8^<@vZ+O#)b^CT%mcHt?O zmEvH@VECF0JNvzm46b@s;6L%lhrPMj_NTm|docbQU^(C>SO=N|OvdMN9nm zp?H(@PPTgag8w&Uz;8C)F2~;fWIQ41n|kM@AW%r<>H1T=rv3iQnot1(4e))vu6g*!YB7F*W7G zE$DI=5DQ|@c7n6;MGJ@VVm|?d`XD59_a>(&vKKYc1ApKZ;(eyjdf;P3XN0|c*L$6T zi?{33s%w)XTI7-k_=Zc>+(w%m5Yzx1NI zL|)09W?*q;mOo~@Z~XC2#WR|%>G$M*kxlW>N$ zJy1j#gO!=&xEG1ODEZNIr^FAwF081D^An1|%%nqBb0=;l@~x8>R9*Tp z&djI{Bp?Z88cqlzDZ~7Lu|_$++zGtQY}^!sa7I&8CO1s`0du~)Y zXlLDq9SHL#d3{1rK5gW#Lg(S$CF5$*SuTf z1ClR{kV@WtJVNp+&v$%tHHy*ydhb5ZW$0dVH6?iU-5mXt#XbfYvwKUw`$Cz|(dWxs zZc5Mj)CRF&b3im^GY^yEIDUV29D^A8crYr&+*jE%*;tDawH#~K&iti1t7F`ZVji|L znkC{z?ws=Y5>Tqi@fuPLdSb16a&PAuIaA2rM{3%dk2>1k9Qnb#5?E9hc3UrS{L3$U z0O2Z<)IbB*4 z_%+>zApI;@k7tkZdnm^0yODXF55-&rQ$o}ljW#WWCKSd^DDR&*$rzXmr zn>Vl8-)%6?d`iAc^f{?aba5dKGXW)yEGen6-Ts0&@SqEE-q0EZ0S`LY1P`Gbqw?x7 zMf)_`M*EuY&!=ccmDw0QCHywFU@sJrV|dYc`r#7w`BZo=`iG$kgV&qlt;Q9rD=)8N zTo<#{-Ijvq0zj0R-Bv(bR35_Vc`l|F3NBx{&$>ZM2 zIbvdx?$Snz>_;#Mqme((eF3OF)0W09u>PD<|E>#NII=S@(L}NgkFHQ52K}Pw45Y*d z&nbs{)2 za4Yf<jpj#Gr|Nf7vkZoo=T|xlPqm zr2qJA?+R}=%vJ4@;KM!%JsxUDeA1dVH8Lmj^XR(JmRmPJB6*SEMm`YfxjR?LYZl5z zo7#oRyh=wI>w3yn!9eQaO$e{5Kw|$+W#D=7%&dF&rVKq z|93Wr!dU@WV7Xmc*;`#Sls*07B5FZH{;{gGl_MYedN>PT+*Pc2y>K&MlesP|1pEyp zG?{U|D z%dMJd<5!dcWSjuc;YsXTO}}@ODk}8xaV%y%UPUp&JU=1X`vw1{tn2U>!SL(9UtLQaGC(0sAQ}Z&Ww0!EV>TRUgz= z1=?A{xg(OX( zgME1Xm7^~L7l#Z%>IX#6Id%YOZI0YjRabr7t`vCA&Kj4e&wh{&lzBO3IV><|O{KL2 zqHqx|OJ2&I^iVV9C!D126drPYvcm}GbbjPNRYAWV0lO8Ku_(EbS={@wJ=gAYJ*X)< z4bU-7?Z^Nv{;>q-&Jkp8(O02AWtpAbM(Rp!jK=-4ZRB4yGa%wwg+Dr9HQ32$Db&-zQ2U5cIW0|_t`D1pEahF z!SZsdMBfmnpfqRF%Ix~JpN51muGz6{dy#~++U#*^Tqj`{>S}We=H%#D3AL`Y{-BpU z?4=Q$_TF1#mA&TIy@|3r_Ye^#NOIMdWR367X?i4P@8epBNL#`s3NoJmqtVJo@}3l| zJX!%t=0&n0mL~BPbY2$?`sPdTWJ#wOW+!elv!n52%#V!8{pHd<(d4sF+Rp}g9)8{( z;s!4vc6-B#Bx;h1Pi*rD6*1Ce4tsxXSDBAcYSzEfVVZ4HcT)%r-uQ;h`$2dT%87NW zObHKz^PNAG4+IU*!P95AawI?S1Y(at+D^5OUB^iD8cVhr`ykQ_8NM$u5Rq-vUE9|K z?rBWdOjofn3dGc$|+SaKw5_QO!Yq9me`u^5$> z=bqNmy^$OnQSR%x7e!KRgp9s28o@wztUz{jKmV|l73j6|_2{Qf*9RYlyA5fhcdCqcOCn1i)qI!Xrsi)D$<5MTtqkkj9W)y2<xV}{<* zUZrFkvS^}Nt&qd~lQ=^QOsX|!b&PetWUX5NCLse>R^1mN>nk~KS7q<&y?trzAaeNm zvfT@JV5V?9XJAyR0iheR3BmWyg>9xeb+lA###Jr@G?ncb;HI~R8_82wx^U22GF|8W z9$k6($MYd~+gg@WH%H*~CZp2f(Q3rmP|TT|Pzo zz#ajuG!isZx+`7~Y|h`o%{UsX=cojn$-_~62+ z7GEmcX~0vILoLN;G~}}AU z?S9H}&diP2XAQ&6PC^8b!R-fCv3Tg6yV5yLl!#26ZW02nh~!Eulc$+hbyA|njtzGf zKDfcX>EW6EIF-QAU_R!6n>)vSbOJq>7 zRvc6*qH)@X66RCs^-=QviJVjmD-e$3ssuBH+2JJY`s)9m_O3jx$!kqxtyixdueFLB z+bAG4pja`afS8T~sj`(NEHOX@1Obz*ReTj$ z;dtcLM-mOA@*vq=`E+zl)=A?Fts4`fi-Np@OQNNIQP%el;)c5ErF@Ts?-tb@zF2Wo zdQ;$v!}&jkt+QlNEulD076pCb(zI~pNY+~J<^j%wX|9tv6)CPcS7w)*+^ZLFb>HoE zw!uufjjfkLLz5yA(1t6CT!WA%}%Qx!ip2Rso~x(Cdk z@I0*S<>#a1=P$QwJK3Y!g(wq?gt-LV>yx|X`@~nM40<$m#^8A8&>F1#g#;>>3N}Qm zdBw~S92XYw54Hb&hfl0Fdr>XU>-E*U>$rlez}!^8y|_me{r7~7xjS?~m@FM2MoBXI z92#3d`K1MuxxD%@(5}Wm<_~@{LBMl>Qo?>qNTT8Wr4fB>{rIPVt z^A{hLHEsqRYBthlPWqrKe z3ed&T_*)#}H}$;iE%J!Tw|vNm#^u%alvS3L)^L=kS({@%V&fR!epe+C3`ID_ow>96 z`?dmH&ak%qRK9D%NHFUPPJTcraMQQ06BSQ)aSlcJdACnONan}v2*})2NJy#+DaTZ+ zXG*in!ZGiM&4XJd>(ftLG+Pf=$2cTc|0(EY|KvV12Du7i(y*jErDVxIq}$|vFJbmY zQK;AJMC}ehRpKDi+4%~?mp_h_MDJA!7RYRH=A&7=w!pzVj;ArPl?#h%G0*Tm?WCHk z$71^}u%?24Vw=`7o{oTt@>-#Gw5PGSNuRxdpQSd8G{r#FK_9oEAmG!Cl*rLnvi!~d5tcM>pY67pr!UiVm_un8{3{6 z*Lt5=I55L^Rw}88s}UYOk7*sZTXMkWdoPr^hQb-)QrP!TxX-jUM6* zNIF41*RIKmGupl6e3m^+mp13LmFMjfy1k4+vWOeXGaJ6!3{T;5K?PV zd1SEV0!y1&MbH*o-#h=*Gt9!J7`1JS_oN*upkPySe`;Bj`SA*?VIkZte-~4u^I3-Z zrcDwEaR4zKBW7c}@0fKJQ4b9p&y1lGoFCtaFRd`&!FGF~Zq=!m#81mdluDf-aN#w6 zx~+v5lB(Eqc8*JRjod~j0@p!1Z}Rqdk8JNh=PfM1!=Q@GC&^a!+(6rMkc1xaiy`rQ z!I8j^8-wL(kVAESFKWGf3kp4S$!Y%M8k=!chPf#~+6L#d!WE91eG%I2JF_rjY{R}! zhA%rECS`4jDGI+f{EQn~`B~tXZvL)=6(qg}C&oJ=Vo2O)eG@lR=^vA1lv#Ty5v_k3 zM#<8Q?Hf6*H)R>+^g!nihj+mB?(jG)YkqEeOy5Gyfv>w8Ixk$|9&X@QHJ_NSHeBa* z>Sb*#OyANzz#69c>CiwC1<}*J{?z@9lBAp}91KJXkCa#T{}6nv8W$mMF6u$e^(LDf z>zJrR%PVJR^i1XA7PaMzwRxZOAG7C%q2SOpt@*CpE+Hlw6@Necs6lqm>7Ea?>iM%L61~bPo*v zz5@@_;5jZ!Jb&{*9d9Jc6lTBndxBq@+2MB3=kFH0Kpa`8iQWn3WAq>{L9 zRyW-hju^wu#Xk#ENY`d$REJ!K=1{J*Q0jM}?fGT~RWuuV#lg0E(iDCWn2T6AK2R_A z__1d@;#7A{(snNtdfsqIP!8^R$?@EIEpde2(p`Ku+1M#Hj|k)By)3x3yx=9Fw;k9# z3o{P~%=6{|BJTf;l&QBnsDNE8U7uSd`<~UR!`uRQH6z5mcPkg&2 zmbHDsv~xmv7@X;r;(sbpjR-#j2e&uS2wr*d?7e8Jvi_)iirU-9Ibn_GJ z;vX5NcL&$Ag1f#L2)RWAY_S<-J7*X+lC5`Z2tL6e@25q*4cyY&m_@an2@hhM4f5|1 z8@MjpN-yF!>?y|Vy2FRmJ?Db?$OKA`hTEFLj2b%&H=4|$`4;XweeUj(@|UkW0a7Em zA~(XB+LB=lyC3>^I8E>TDXCX4Bh@sxByhi&OEqR|x2oSxPU<%)iLgaYDLtQ8X1KB_ zh>S}|$Qfrc-J0MJZ9qmBvm)UbCV`c;;c0zn1+JmV!qy;Q<59=0b)KLf{}{$of6O1O z4vza2W*tWJ)6z8d+zz96{xs~}^2!SCz84(2oBh2zPlRrKqJF8Ih=O(-cvk5E%#2lS z4s8~L{x>2Cr0U&t()TcE)6)x)zImOxPjxG zZ&+VcUL20HW0m2J4LPuOSW>nXBjDzc~CY%2V^X8EkHu26&xugPS!6<)?&CyN6M zJhTGRy9&?V+nsp6C6lX;HM-F9Ici&zNI1P<_|`?`Hh6*em&mFVby2)Ab8s= z<8YLsz!$j{CBfZhbKpMkjyM{tuUS%|dvakKumeF4g}WVKFSNFmaa?nc}HF|VQA4J*lqb_fP>jbYt2!($kLLwiJ9cx z8j<(?WuW_sds@Rcyowj44eszfh)3M=o84kGi4B4D!U2*gSF~aR&AIzdp38zq{b+UM zTv=gPu(D+#2jlXUhol2*N)a7l^&5Jbm+j!~*Lxk~(y86N_ad>B|9NYP^iMa3y|<8R z*X7$ykF~d<652Ln9XA>q)VESaOx>*F0YXN5L{<|#u9o-ZYDBI}Aku^%GNooC0?nNC z{ccf-pC;sJTRw!Qvvt|$y%RV7#PA zl{)e`w^V>8&-ndPWk|Y!^S9{jBYBP9H8FL0FH(;4UF4(P<25expADU+#x6wj zWg~I%mLFjq1!boj_>Qle@1ctn(oukRTe)-k00$adk5*uzkF-^J|8p>5ZG|f!quP0~ zu&BmrVFf(ac`$Q6I(Dw^!!Z&?)M_K{xS6{ZHHU>Xs$QL{Isb2W!(F6k$f%BpJ>@b? zFX=xqYcP9a(A#T0p?@|oj6a`LJ2CpAK`2*^6spnlcmGA%^~kHI171X-g8U?$QtAwe z(3f0F=0#e&3c5_81LN%xn@)=HP3Dm-1Q{+K>GoMc%Nj>Y}xCF)_QkU>8T}O`(;y*C4rSs zVE-HTBun!9i@0o+lGjFT^STGHinR1O^;%Sz_?B^QWM|71cRJy1w(&e4e954fvye2A zftdEVRgaI*q)>-$8#fX@$Kf6lz{3MOZ9Xk`cXzc6G0ti&9QV9XhW7;R!;KWzQ73|_ z6KG}~Z47bu|LllsUF7B>5~VQ7Nej3g2EPb#bse!%?3LO5q|gg0J~V&E`4yqI7=)2C zeQj7_MKl|)a$Pimj139 zc~ym0KZjjf-rH64(x-3bRrUE5oP2L{TRrk>`n+TNx+LrOiA(nE-uL&f@NW4l#)l<2 zzg=A|dALMeRiBlOW0vIdMqIT+ucnWhS~nCT!?gQ|Tv|geN`%Htr6}iU9&&7>Y8v{c$iE|m=;SCMBIBu|L2v_ZY6q~aj9?KR2b4~N$yd!;$Gjke)%~ow zoX=OtVogabMl8r}Y$xCBp5;C9NnhwNje@4n!s6Oew2`xsf=nI(GIIX;3dvQERS2jM eP$8f~K!tz`0Tlu&1XKv95KtlT{{?}?-aiBHkRk2> diff --git a/docs/architecture/assets/ibc-rs-layout.png b/docs/architecture/assets/ibc-rs-layout.png deleted file mode 100644 index 9e68bae5f81c14fd470835d97b7e82216fb00bbc..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 58389 zcmeFZWmr~Q_dR?A0*W9OptK;;jdZDWcb6#LT@s3-l%Rx2H_|E%5(+4iHz|!0(p}R3 zdF%P<@zL|?{rW!Fb)L&}-1pvVuQk`4V~#P_4pdT*#KR%RK_C!#(o$k72n0qQ0)dV^ zjt>6?N$_|Af$&2}i(ON5H&_@s;V$=S4|_#yLHF`CDBVeGEY|B;v}l zI;mfAUDIblsPBdwGjLu!);Pb=2Pk>CID+z;{nzl5Lo|NjGOLjFI_N4UO8 zb7ZzoNzO1s!|KEb8*A$Tdo%4aN zi-RR{%2lp5pVVZc7);i!dl*IPdZS9ytiwz5gii{kt@Ws3s9)byQdc8Urz66Pet^Im zfPWAOPlt*3p%Ju-wqrF3YcoCBsl7r(Z}m09)% zsVqFC@OhE0H9kE(opi7CC2iwucUED#<8(XOPTiIADD4B2YWO+7umNc)8^Uph-|beq#AK7=I+d2Ovtwe*ti&va6I=!PhlSgvJi zUMqZHaHg8Y(`AD=DqOxW)Q(T356)pnu{w=L`qbJOKx=1m21m^0LpkLBXHLl0vN zUf-5hQ;Qr_c!=0daUNjF^;^R1ntZ zV+1V)ul?s*`7B zXY6k@zQ9kpMQC$vpr_Jho?OHyk4#S7w*Td|*^$>KvW||g2gNZ}@$kt7zkQ9ftJ$4n zyd#J}gv;N&d2?@jIZ4E~CM+y0D=Vw2%IlSJBaDRZn(JIo;HbZ8TWm)9`#2uEX8d-{ z8FfZqS)?G+8X1plMeZY+I`uk3I^T3Ibdq+-HRA`E-S7)oXg2awkdm7DRG8$rM09X9 zp4Cv;lQLAq1+vdrZ2Dh*$t|o&E%TQMC00J| z?C5AYT<&mmcu>3ZH7=Bd=Wz3r;gcs%*6JwuI zo6^Vcx(|CooUgyLQ+cOO-Kx$HjXM@0WYT=TQuateMkckYSXX6<$*|^*oLq?e*<^ag zLc>~N-UnxsZES3OcV}sdSH7)Iz$WR|_iV~m%@S*V*jHSps-Rv9(ZE5hs7OpiB(c zS%>R((`gy^#$X~h2Zvc?NZch+UJ*8p$DOa;luflP4b=8v9M2}d+VO*?I$aBNIlI;BX z^ex+=Qu!0(*%=hcUYm2`XuGh>Em;qq`yB3jR62ZHxk=1@^WCwMU|bUJkn2TjwWsJs zyZy)nT+Oke=FN@y9_iQI;UnWC;WAfB7DeOXDO(z;@;%u6v=Mz5pNwgFwA#ySdntj_ ztlew5TC>V^@!f-y-M-JxP)p=#7hQ2nJezhMwF{c)NoAsKx-&!9+Ke$5!+Ut$SBzFK zi^8x~yvxrfKRiX2Q%aY_Za4Zy+XiwRe#xWAh^O@Fp6>4EWr}Ol9Y^m;?2vwGl3^-y z8O0+x>bY;m&l@no`>LO)ce@c@SzVz|Sx2(%EvJ!gv1_L}{7zK&r6b3FNK! zA&s;V&JtCrL43C(3saZmghA-}M24a^of zIr*SfCER48jb}_vIJV8y@5;t>VNsEjwRPU0tnabf!`=DyuWe~VhIdxQ%WOw_SU6w{ zdM-RTNmLvf1i5gk<(Z-vzsr1IelJ4)ix)Ea+EFNB>a7otdt|lmK-_XG`dTXZ?5|3C zT29>}IA)G$dSGm9JVSZ3RJA-fG{j@oXWbl0FZr0Xvxdy?Z9Kn=qR^n)6BFmWQoB1L{dDMgMvaiKk{-QK*u%XpU%ss5m|E}H znJTVZZ2o0_-fM5m#^dw_9WAYXs3K$T6L@B3W}eu?uWn)I<9LJ%UlP6HYQiQe;Le1?5 zUYn3`TWh6Dogq#>5Kbk0;H9alNemU;A}9J#$g+p-s;H{5&uSyltSyY+&~rB99_C4v z(;}W5FV^~@qV(ozM|d)71)Ma9Cigsea!5N5jp%;FEYj8o>O~txCbC0Ww9fkZ+&5)Xpm*9C>&si z>M|7G?jy>}{QR-+ghJDA_`17)gP=ONS}S&eh^WkUQD@5WlaG&2m-yanAh+%Ciw!I+ zEKA?DmAbnaDsK^pfG6?sZ!EeqOKpY#?CgH~mhSPGbkcD{L)dn>Jcxi1s>n=v@#DwC z!$b8PHIMZ+9zZg5ZW!soeYp9gIwHkE8=KWC5YY%kQxJquFVqM1T#XNp$s+a;93g%9 z`Bc!O=wU~b{J18*si_GK0}BGtq&rjIkwP>Ysgp_|J}>FjJ@eNX4x}G>G$gD*W-jkHKBo(9qD$ZTr!q;}sZnxaJw1o@wf|ZYw`Jo(`x} zUjP`7yzx+7La>qO*cH#YC*p^GZ--{3<>Z{5oQB?b6b+*pnHJBPFLvTY3gqEIAR!Q4 zMesWaj+T4`;Weu;jpSiVn;!#8gz8zS_3FAU=F8(cUd8BB8df*Huf#?qvo81L%VK~2 zMgPEnkk7u>U;d)M|MNNXmxTY^=a%Mac=XHbtC-WG{w2u4@ApL{q2E#H%&4fRi7xh=C`*gSTnjcmZ@u0`OLP^~nS+@#w6> zvzO}KZo6Wlr9-wv)agV{q!bnw0!$SXyFZL)gjL*SUfr2Kv?$U&)IP z^$>-{J&@@P52_fQFRZ2 z0(b}`=IpSC00uFza9^loG-t}krL*|urId&<2K5o-vt%=F;uQ1$dYMEQ!QnDHLuf&C zN-SL#`q?Y+DeKJ-kPNBt3z;R^)jR@o@n5f%?j$Iups=;QT?qGv4eYV<`C0`DrGBxg z`4nfxw*k*KY*3-DASTKSMk9^=n_b!vq9lx4y?QKM8B=g{L&K#N5{3 zP$jw9W9gp95>Bz;ufcKn2o6^`nSCnIhg8K28yGUz$C2=P{c`)aA3l8O?0jW2R2s)^ zBQvdPmLF0m8hlgpv}o+;NlY! zB*Vz%*>%XD6totIo;3RPQ>zd4_wV0_GI$5@c;xd@_lZ!zm=7O3_~>7Ugg$a{eqPpB zl7LyK*ll?f(4oiD0I@~$APyJfuc5#85DbBKX0oTyPy`$MA=DAYIPOJgKi4SPf+i;? zQ4QL2jtuQ0)2*$o<(j=E2$Ld@k1N&%qE7!BO4j3t4h{~0U`Vf8MncFvz#$lgIu6|i zKq*;rY<}nUukujmp)&FD@u}zQxb(lg3H3YqXnd`l>0>rQjDJ7DvG6OX{cgqnVU_Lj zWmLNZ&84%w{R{1)uaGvMu^Rdu?7Dc6vTb#U93g$h?Ck6Sa-OA;ah~t98GYkH#A*=v z!6c4P!-)IU&&V}in)s6BdpJ8kKUC#r2Y?N_Rn=2*SFJRZl$5xSYDFf?XJ%$Fuut6q z+?v4e0-KIOzmn_VL#&XOtG&HFR$@Wwsd&!_{7HFjRC*9Hj5FoAm+AhBfLjL{E3??DqYaF0EGBDF|iK40CdCAtcIiA-RlvG!rCv5-oEw628hBag7y0D z*9g<7uhFLEHy)m1VPS#Qw;L{B4Zm~)nvV0Nr12$I=C_kdG>rIvP3{*b!8cG`0`N#s ztt~5SA+&0Iyu7@8e9G_U$7+vw#_kjW6#)o}k%L3U=R*4RA~7d%PKkAiB1tDHPU(M| z6}k1+=`8zid7=crHgtU0!@oDlbjJa!JJ0o4K&FOC+XW<}5YOuXA+hte0#R-0xyXgV zh0jI8Ma#v>CBh}krO9=R%Z|&7>tTFhSDQWN>z}KcS~c+nxQVEqZ@`U&USLm!owhmG z>*U~&LZnil$0rjGJ8g4qjTit6lxql?8bG<-S&D`gj#B`^)=Dp=S!SE&#Ls2@o1~bH z>YC5clrKy$_4|6)^u~_vb+@bdP8q+ zT?J3=MDGoCWg8jZOv_nrb%o9D+~iN!3#GC{4XCSCb^=|3zu?^dweQQw@NZndPS?3K zRA#rmG!iKZfuEP&A9#+|Q>e2*zbcyE@>9WLtPIct)`_8>Q`qH|U9ORMUD3T}DQ0Sk zv6jg)FUWI(EYonNQt{dX#^B!JmpZSH)%yAx8GVkVR~7#D+23}gvXfjTO#;s(=~YQd z)Hbt}>|BUdpYLQAHbRzOul%K>yLDgQMKb0p=h^Gh#xuwi6mZ{K9CDg|p4Vqm&srl| z@D8(u%jKt}Q2nR+(=sqlk3 zQ7hhu8!uS`7{m3i@r$SM(b1X1^B^MBYX%14?f8c$-YxTCLUk-|PZFtxS3Ejc_9f$o z9Aj?|+(iugE#QgnjumiXlj6Uvyig$Oia9HL`}S>7QPGY>p(w_$A&_kWt86ciJ^OI2 z6l$I*KXenD6Ja8*uq6Q`L9#_v2?*~?`1kVOt)9;NzINd(Vv>@q`jtK1-Q7UtSbZu0 zYzWQY8fB9*6xi@HCr+GzMgZ<^4Ye4c&{i!B4f=Sv;PTJ0u)ED5FWK2Gqf#D}KsL6O zFZB=Cj*+?IEAT(he^04&4Q{;A0TLu6ccMOOY;6YGsA0C=jSxt z{&B=e8}+)>F$xq@}OVI znD}Qb`cJFAocQ@P?M}$25I$qY7TwSQe)Kj%VL+oRP|skc8MR+e_P@H7mYOn?q93$kV#CjfRi+m;s2ca=rWTQLVn|| zXfUK(|mVU2hd5A!k@8WO_*jOFaZ5uWHUneIB2{KVGYmyp3Po| zhzAn_>*omw%@NxkMLZJ{5)e#HO>IKZg4m$5)MD~;{chXB9E^^RE-fuVr}}Kqj~(Ld zWe^l~xCQ?+-$v4(gEmFA(oj>jW;!vBWEI?C+Us;+XF#Pdff%K|e7VWH0^{*RBYj*= z%Bp*<(-7KG4C=dUEv!?gkQk`@rb45^GwPlOJxRs-^-f;$doJv#49pP$@iBh%c9s}5 zYnsN!2>kf=TJ*$7Bu0tPi+r>`NyH_d@}e_?4gCUOFYpi%0-|=P zOT!F(4|)d%b^ww%KD_qpQfy2tEEUf~;AY{KF2UoCrr^)8>77|VB7Z%bzIK02_Bn^i z%0SUgN&w9~ktqoY2~PDDq`$5?14t^}pSazN`z-1{GeB*-I5@EHyWu$e-{y++cBZVq z$`gWGZ+D#vM-zZtA>K9uKjYd&)!h6=ZZ2cNwzVKcXI&pmIn0pN=P|9f|Cg4OTpJo2 zuUhw?uDxn5g}P`!lES;E#(NJ)_KNz`Kj$`*>;Sm$&a{WaI02}xD_+?-6R|ikLG5+{ z{pX^-w*K<&0mKSEg>a-m-D%WRL7rFwQrQGbwBy6WUsGkJkSM5K={&nW-O)m3biOfo zh}GagcP_>%(|4v~VQ~>sKcb6(lvFQAExX99qnV8V1Ka=A6S|Y+LY|XfzeZ27o_b8m z_Zk3J^I~D`VI{;Ru$+qcX!AqWUSX;qRiZy4H<)N-qXw(o3iXa5>eitWLw054=GIt3 zat5qoIud*@71}GEk`$Y>-IBI#FChayW7gHESX^9`KY9XbDv=zWN$biDY9S0!?#+b( zC%~(v;WwEagce8noIZsyk~_)YQ~dr@W0X{9k^Gqz6El(ZpD0sxqg_3!q*zPm4Y7 zynHNI1(5&z-{39bp&fBNg4IOm%wJNgSM~6el_&iY^k*UPL(RL4Afi9O7`_?AQBWla`i- zD%i9Z#ZwHU0rl)fH5wXP&ujNp zfLwRTgS;l600~blM3E)l;}+@p8UTrfAt~1Kz8Flv4%5?13t(~32z;Nr z)0SbKTvfJ8OZ% zIgGdyCnp)cmx4q_Dyn-NOHquPlaKSBq8I1q=WksOZF$Dp?aNI;L9se{%XG09rPA$p z$+EV^^Qqc&K1JukGk{tP1+*u6Wn~3IOfz3c-R5rIN9*si7hK^q{Wv0`yQm;I=j6X0 zHDA;roL2jFs?S>LfaV<#)-S4Ic{8NE#~V&mdhdU8nDah5*zO(OS)W#IL8+RA%=DtP zG&IV31x`(|h9%h1!G@}sh^fD_5>GlP&zpzn;-B_v(cPLTy%T80pd?_G7;Omh1p45zb<@WCG z<->1De(g#wDEiPdDBEJfHE{riJDzh&+D?W3dXrc|At5L!SIWRHG9xc4S_c{;L)qH< zUj?lMYU|L@kfgZyM>2Sn=M6hE@u2f3@bIDpF5LQ+(})jVjh0wxRuF*yBWcuQnhaRc ziYWG9^(1?9rnA1IL-G4qvH+2lSbb8kH3|7Smhj8$?42J!N*f;Ko)~+ozb^QgE*9BtAag8qw+(9sOnyQ770}pg;6NnHnx8@%dSRbpitep#s2gA_N?H z0$PrTpvj)BGk*8(9nN{W(?mqHZZGfqWBk*I{YAi;pVfaPW=N#i-mV*(jZ?n z|NA;n7#2X4|Lr;h@6BGIb$+`JI#^p|^4;IA1NZ{s@?@W0*cT4;TXkLNFVjhQY|qoH zT!!4c^cjT7e>&70;QcsEnw#?L953EwH?ly}QUUS_Fcgp$Ol~XwKi{|)UL%gnf~pL? z<)0V12`dY+@NBpl|L2RS0j7oVMeE+3`scYXfeT>Kel2D@^{q*uCB8?85HZd9`=37}ezXKq>d;4qhoAmFH)Uf!Uf>aE zwxJPS9xTZP;_UI`$1qROve-hc=l=$r6R=NNBB2l#Bg4b*!zdJ)Eeq2XlY|W+p4`T~ zrxzCVAPEB~Ql5Zc1>jy(0g@ZaEikf%UeRyPC*QhtYxWC(8M=g2e65mGtx$_h9dF7wj_68Ii{b_c_6JWv1@ zKyVt7>}GC90Ui?SXlg!?^@@r@U7lLZ%znd^yu9VG5{`@_yu5xtdN7qJChawtL`Oz8 zU5PF}?!2`l9=m-F7JeI4s30V!~j!H{P7QyS_JLE~6F$r_{ zwdGkDw=PJBD=YRvaZn5yH1hfZt2H>X;^$~EJ%%<^gHxOa4$K6{I0mdlU<<(NSXR@N=16IES? zJ61&rB0#+1jrZLSS-!ic=)~rH+J(pR1#zjFgm;*Y@I2mP7{#U0@dC zg`Q|J!|ic$ax(1W67gVkE*trSJI}af?Kpuy>wTQUjNFQHl1f)hhU^3ixsb~wz*-1u z7@5n|5^${+D?m|1R)O-rfgR_Vh*eEWoNa0)4!fBiEtnP!wte5=Xb%|3(#JaA*nfEk zbS2od&LpFWieuM-n%eU3J!WEM#mI9FN25gw2Prm0LP#jl>$A}*>0AYz0<;ocTRC*s zN_&(FWUirA0$Q1iL8YKfx*Ju(p~ON$LgG0H`4cgzIbmUnAZ)@OkZuX|94fKO+6Z{~ zusf~~>?pJ<9PAm>(4ZU6G5~`MZ9uwNipaD~G*dQi=AwDwP^D!rgU#yswkUS!eKkLr zqlg#=K0a+CJJ(^N+%S$8qV9xJU&KxCdKCM-A|h>Cn|i#?dc; zXxt^W85@`(6P2Tsa&MnB;tIVP8{-!(5Ah(Nax8+grd13!fr;p4*Ma^$IV1Q$fk^z` z-S5na%Fof|s%57x3OP<5SKk8*RyTkhT>)NR^IAtDa&lqIo)^GmA8jM>=)W611_qRg z`*7L4-P88dlic}56`ylFum#tFCfReSSUh$Hix(-+ciX76zgp_aJ{Ofw`1Q*d?!-MTHXD`_KJyjm-rX`v?m7{>za!Spm(PE1sFM)9iMESzO*T#)F zgYST^Y3xR;1jClIu7D#Xww}}jzj&&vm{X9hi0D;ruT6fmtXgYq0QcJqW@toPpRp!6 zjyg%87fbPdsZvV0J~+?j>EWS#uhwOr5Ev%&S>xJn@h3(`-@6kvT-x9h8k`Zk9Il|R zKnDpN=S6;Jx+s2gBxS}nEH=<=4$jW$!*bQ&BC`Qs1r&QGZ+*omeo+eHJLRHx(Fqte zHnz93m|GJBo|P(^hlhs;XT10c5N+M(@JP7jKA%5@ zaF1o#63{6nlYCE?V$lh%iO(^*uh zujEBM^d7IRudlDI;o#w^FDb-weVn%dt4|}CtyVYu567UJydoE~0u(ooMc4g9dFbP7 znDJjH1I{~jflbubdmK2{4M$KVrXGiAzrKA!NYASz7k3w`3$*c9%1q_l-Fbzw9o4n1 zzLdrtKV~j8F2V`CRyy@yk&np=EPKyhUhXH824GPvPIgqw@jlQej&AQ*MP3TI;xyZp z;V6kSrA|j-XQr*Cb;0Jcu&}|BQQ%P*Ivct0p1#VP@=E3XQT_e>RINxurdH~{-gCKD zfnL6G)lJ{)-Z-Sw<`FDBV!9+@Df9mBuyX(w)L{}A7Z&D#seWzJ0xEbX8we?zs`xGQ zpgjU?oa&?1C^WEHUvMHyY8e`eBPT~8XF)~C9Z-JZrCPzDGAqrw_VLM1e~bsFDO%ON z@9}+w(+PCd#a`Nt7v`O*P{UK|gNAQ902+y&{zM$Z#O$Z{px(!AI-HkH>{#Ua+S1e0 zgn>mT#YE{}<=yL_pgC(ShtaBkdk&F+N)CR2P>O9wV&CT()*9HT70iqQ=#X)@q!u^B zL4rbA3trPfbLsadk*{98g6-6qIB)LpkHAV$|Izr~$JQBsQ6eTSHYhY)5vNX_a!k4k zMCqiC#_)0~OYlTQL$jr^I-Y}L&ZC;$qs1_a$i_y#oNp9}{I zRY?JrTj!3FviDa2i`>ZA>RB4Cj`L}+@ja^9n*aP1_Nr1|q|Z}uQ-NhB`^ape2%RhH zNjqOC6y=~m_5=Eew+Q*GR=zTF=IG9`>e-P;o3ZUj6}_oMiS3z=(G{9TgfD+=uDaIS zK$#qXEXWDWve^Nl+tPh#IXt4rmdB|Y#?~9&XBt~roZ*tYPZoh4-k+st!0Gnn;t6Dy zVp1kMw6^L?3=|^1VAoL$;|L530{~F&Jo|Jw1*`+yob$Wuq8nPIf^r+j zu-&`|DCc!HIENO;Y7J*qATNDtYsmKVbzuyQopRD@Yoebi#qay4w)7^e47^FlbZe0HK?%Ej1==!@p{g*lo6n={fE4;f-Bg74V+3A-)n zfnfqz(@cY;4V3fjp;Bw_sTRuSLRVK;09b4A&uieKF5L(~X5P7TCvp__{*xts0fB8G zX`v1P#D{JwHAae0CH>C5p<)ZwO4$K#NO_SQz)u;kPu|O+q7j99lk?0mR?vf^nh>Y3 zdHSk~U#7J@ARF>GPSXm_Gv{wwv^0w+iOG0oz6B5C(PG)y%w1bs+vc*BZd|0A7LX}3 zF5r|7N^}FeLWU|Gg|(_*`!iOGYC49vQi&(eO1lA@PQ3yiJOdkSU?>fFLn{`|(+Gg@ z(r^W}N&<%bRirC}X2Cwxz3zP=fF65(2qoD*a*28PTX^|BN41EFm|Io4dyNzr;(ihR zJv#B*=ToF;)0ZPy=P zSjh_zpA<7Fkj{G`3PetnJfo18k^&Jlc_n8!10nB^5|mFV`3zYB5jsAL0IRTA8Q|OO z$03g&7bvBmV`@K9q0Q)ryp#KBxZGu41CBu0B%VozJgcC8j#fcUm)p8ulo`wrXXw#l z4JZi;w<@cuy0@Z+B0rxbWO|Vx2i!%;*B-P=sV7u(84AYG;!M?Ubji-$YJ2;>m67da zKS8=NueO|#X`S55Vgysiojv_YOb6?GTFTma4n%J`8|q2c{6aVlp9J-w#dalbak$!5 zZxYhHf%wl!8etftZ@um(>@pWApQ9*;Ps(ewKDlib_1z3yvJIJ_hXXRt5&6ooj3L11 z5H&oOJyI6R3SW>0cV@v*w_!h(g(ybzT}$|wsF=}3hbGDUNs-vMuRSKB zmnKVAO~UYo%JP&x(hd(s5=VaBLW}syMZg$aBty9x`Kc?G>$o6|o>fHW61Q}rrd5bf zjs{}_y&I?lkYx_xGy~KJ{R0yWoF_7@nCA33Pr(jMZ%FtF%RK0%qR5di)I<6=U7-M2 z%*!~zC?02aYzE#rE0K!;b4Jt9TYV@sUMO`mARfCpm%fA7dCwzK-+~as!Tzr za%hKW%P*z*F!1mC(ux~%BAU=|YO2TQ*#$B{IE;2~U2YJNg3*$}~ujft79 z+l@5?M2rR;UTF532;~8(R5nm==WznJsOn1%3Pu&-6QxB(s@Kit5Y;G7|?uggRNgw-uZ)A9HS+F>%TJgXCE`+)tpJ5C|hDk{nD zBHHl&oyrErmalIId5^vuyJl^{$ij4R#K<$9w?Oo~1{Z4vJV9Xr^kc71o;dLlpeOKZ zTidn-;Gz@2^Xi+r2ENi8)o{KB$vs-*9Y>xKzY^}_cL+gZlh|IOCW^w7+}1B(Q|iM} zgAZX8xfqdVYUnSxMH#cCNrgPs$mmrPgA@5LUAnfDueX^)jWgx6&4sb?LOJclJPWCvKAe#_0;qsvQ41d<;DrwAH# z@K-;{omoE;@wXw=X5sMX053Znd;oR`xT_agM1Z(HY|yc?vTmMroCHA!l^#sy&GeJ0 z`S>cpTXg;+;LRNFt@%%3+$B)UR^K3mz#>?gU)?_poApHo=pW7cQ?mD)XM1p(56-Jc z70JWg^R-k!QJlk6udlCfNmnws1=cL+9edW_fF=Z*J3GXt`4pEegQAUcq7^_R)az9| zx8<@KR9j3XoFevZK5_Xt(!y9XJm|}9_Pfu~INt&l_oj8pEzCB!yQr~p*I3)b!M*sA zdplnpA9azqsm2^a{)vLu((9rkPP}A9z8=x_GbAKKpb=TR0mA3y1sArWVh8HP&AV=c zW7H+Uwn5Js=L7Z=NNR9q>zvK~dxc$bK7OMWPPDxm)t)Y%pFu_d77Xfy4$7WX-E1`Y zxVqZ=36(gg<)vWcHJN|}q~V#-(%*R@E|Nku9w-CwV6B5J2VDNc_jJ{2Hvrth2In_x zj|bQueaNO%zzj~MD=^!_n<(-m@zaM7SfK1o+MK>e%n1TJmGq}_v>8k+b^4#(iZxGXxhX% z3q(UtV!0gMMODAE_|qTCDYms)DsIW%eQae!VtMb%fO58=9VPv zUAg;q3*OY+e~_^T?2vbLdh}%JyX))y$)zeP0F^`MgWqUKoV?$E6-DFJ0=rVZzt#dg z#|!HsA8HzSWN5*WD=@kB=JcXM&Gn2YbEK_oB;Bx~@AY5iV=xtEZ3GA2*QHJFd2W1ZU>bkn=%V0apeyIXsE3yJ*Ifq*vLhl5< z&E7&$hgkE}`i%5!cGTO#>a9T{!#gRqlIi<}2zr%iUZ=u3rvIJ3=>@lsnR#-7&mB24 z%$kuWjuNsg`8w#&*0ug>a$_#Codce_#jDSU$f4Oe;Qi~h%%ZCb9oz{OH^n;`&en1K zd2Pg2Rg6+M5Bew1m>M#7=lI#Ud!)ZeNK`*p*ZL18cs)kb5K|ypmXSYe>Ad$wUNiF+ z=1-dW-xcQF({tkTajsQ+F$xZ&UoCcNA{(?kaIt^qGuSwdu4w5X2+#R+Z3qnAHEGa_ zt2g@^>*E{8`HLV%weJ7Cm6MnIjbk0Bt%e(en#cQ(o$Y6)TjlyF!aMm-)$Of_!G>`h%4i14IUz|vP+KJpY_E(Uv{{6QNVk_t&;96 zZU{<|{Jw>6zImPvApx5Sj${HSJ=+LXP95alY zegv-KVPSc!1C)_6oZ*mZ1vyt5Y83rm&FXs+c-uH$hjD@BH#aZ_L3OtN0@TL&=Jlx- z9yrkj?q6tb*+7*6!<`hI;~NE$7Z{p!csT%y&js9A#GO1BfD+FV?7M)P@m2B6yD0D96<8A}7IK$*asJAXd}G6V@&xe``PNKlaOUami0-Fth0ZE{Pb9Nt}|N?jY- z>lNaeipfQCM1R=;bfRFQ8e@zC$v_tm35pl+V;OI9y+1==VNnN0;)V#QKRS!*=*3%? zp!a$0v3qMG<;Fu5S@-f;9$ZV%J%C#&kxdu0fFuaQuvpo_{(hwK;o+X$;st7U*ac16 z%FpD@xC`@4`CE1-)w(Zs3}pQ^K=~d_0Csnd<0zi*e(?$fRG6i{#AkWOP;f=jW}wDf zpoB@kQqATfpX23{`wt$#!8p{pNZ{VOwqQLqb2XTWFOrbt#+P1WfMXqRu_^XJc$sqz zdFu|Gjbwv(OhNjqSJ|^U#MkQOH2a&b{cY7Eo*=we$Hgm0z-%v_eeBq=N3>`lZdr-CaWE-voPrgVg9M`Av1W_UGdwqe4%FQBl|UmMJX{%S50 zw+{7ifwL2Gnzo~(jm`wq`qgrp&I-Fn5w(s_|DeOzMq6)R76sV=+1_3Nw;M!@G`9$G z6o^BMwS5LM8N{XKv+>={`N(|n=2_BbtVeFr3^*Go^*{7$o;e08?=;2g?#b5@UHE)^ zcd>5WCv)r!7lr;`GS^2bzbl~nGbkn3L9xj_rB&gW(H60uSVWO5^6fK5W&Xj`3iR7Q z4%r3UcGuFs1=>V)F@e{CDiqFa-XBbLd#|g5cz)L78fhG*h8m&Ou=on@>PmoM9qE7V zX*lY1J{e9V%q%b0g53tlFGBb?u=C68xB2*iIVT8+0Du2_*p{M0S!OArup1{q@a~a) z%MU6GY-c!JhOv1F;t-rCbcf@uV8HCnpG{beCKQ!u2ngnMYTT8edKtnyN&QZ-8KYI^ zZ*gw`0zosXs>xL3+Doc~Qu- zc<{B|n6M@6PQ#vPSPCUsv4*3gRyd+-W)TMjK=YQN&yLBW`a0}va8S`?Fv4j>umqn3 zI?xO6dl--fz8ddhc-7Vo1z|~e{JrZFYkvKfu0Q7Lm}$To%6*cW+UlY+4xPXG-gg~Z zWf<_d=PhI7$x{dQUH9~;oD2J>nfJ*WqD#l$sXQ_2t=9hIZGUvDb^ZSXp6e%kDgR`Z ze{@9`y3&TmH>_V3)M#lKkdiY~DSX7pll`wdOPwVT3~p)cRc%*$=h|JVXQ5X9XA@pz7elvjCgX01cJi{#C0XYT#irR0tG zV{6Wo_tkGK)}>9zjh*iy`me{6Nk3xOziTV2+ub{}%R zWbxW9Q?V4D3v^h!;NSU&!tnzpy-a<1i^@DkOGn4rn&B{!cARPwP2x|v<_GvP^61oT z{Fn_a=d0JTyu#9A`q9`I68~#W8U-Jp2S(OV_mSVz@w(Fv)8T@Y+sOYI2U}UXyMs&G zx9X~uBA$adTUA+>KWoH4qY!pG^<3EE6rIIx&(1JaH!U*xzwX-P@V}LZ^1mjciQeH% zCZmP5wPj_F1d^?rfcvxde=W#sdiHkbsjrLra+(gKfbGPm(~o)JO8w{M3w=J_%a~Za z&wT_|-XzVs9lPZJ$F=t(A|mi^_cxq!JSljRnc1TREuC@g9xwN`ycbLmCZ8{kf5G!W z2tErXA6@tjXrSxs_&qolUT<9A_`Jf{oBE8F1nJtDFSJu=;SQDVMZbwgC!OI89_7Q! z?}KD>8@LrAtgH4Okqn7DrV3(l$by9U8@9d60^;Ir-5G;d9lGBH9x>ZAKFFZDBteeQ z&J5=HUBnT62EN+YbBEdv&hc6X%6XfoecR5nr>r5Gg!SJH{04S0E8(17xb*+DySy~` zex&Jty)DO;FHg$L^{l{}%c5Y(ojNu_h}j0aiU~nHMvT7sTT2ZjAfA zlS-C7jx^`tI2t5jB)0e(=OSN15>b-#H1uxMm?*DP2P>SH| zazA>ZZjqCV_7-}d0|~!Qxn$1s>_xHZx9kiOcDp28((L?WW;v@ogOhOL(4GAvhB1w7 z%C_QsSi}3iX7*(oo(>6niY>~L^2bY>*&bZlCzC_lEi0R3K`;isp?oZVnvo&+>JpWE zfV=w*azsVwALBdF{=xuqrtcA58HSdIbvZAmW}e+;^p;0AgsJw^1rd(Se?-%+lV|Dd zxtW(~jskw-e@!Zme1HDv9MPrypb3)uMjk0%jj&Vr#B>LPPRfd34-rI&{_!#~_F!1L zi@RZhcqt zYsoVCtdseJViI0)^uKbo5#g{y<$e@GUzaQXj$9-B<;DVcW13O3oi1y*KwyLm9t9E1L&&`N{Q zshK>~)#rS*(t9+Mu^SC?LC|cvs4CDu9fOrd^nNhMudBOq4`&5oO?(grLZ^7v^8aGj# z5$60>oIeN{gptP|Z*tv@vj~VnXq$8^LJ@7xm!dVW#0?2(vx3yGf)YIb?eZBHZI}b` zKY#g&GYxA7T0`if#=%^&_UJrtDg2I;Qd(Lu!%w2q)6>D5CmUA@#}vRa24}pALoKk( zb1w7pegj<$^!}bwvj&jC!H%(;#}7Wd?3Zwep~9b0pEbXSTr$WhxS^0)OjO#z=ux?o z6gi^q?jMnZ9BGxxvga~K8Z|sR1IGCbqU%RKV38|66QSY)=gVvo&EfySDLu&OU~hkc z&+#@0m*%(cqGQ4*WhgB7!LEZc?+C^xR{9_I3cC?Rv6B=Z>IzOF7yfPVly5f+==eQct$M?puQaC9A{j-s|Uv@Hm<|B*c8g> z8N1gtUMSrJ_H3gc7^)~{-Dnj78&&w850_sTAHmjuSyDF1O$9euS;y({*@&-xPcs!0 zI+!`U(edh%{<>0VvFJ2*;v85Gm%Ld@>LGk}IpW*Lvs6oHTem1AP)Cc4?wBEq`G>O&&o)BNX5_cQ0Nt=uHAEtjBDyn(_E8-*EbAe$P;rB1Og&d`m95YN*skbx9?0Yh?xYZJySXxgOz@4)77Lf!7)E zg&}aM7vlDv@n+61Ki(b1#m#+HU!B@KW)_D8;hFwNCU)lGRPh71MOEoGC?*?0DCcV# zy+3^KL}MtoE74vK4$Lyc85K{@0QWU^`&i&vXjL$0;0%IFYd*MPLG#G226Y&|{Y7@> zj52&)YHA65K`*S}BTiIP0>LP&sptC9@&h7+(~9UDV2UiTmXx&Kcv<`h|0A;;#)Me< zTb7c*K>``rxs}N85@gVc`ski3gm)yqgK8R@0daF!ijp6A+#>kD0+qr|PjRp$Eq<{O z4~Y_ihxOpgO4t0cz!Qpvi7Dj1QYUdP`6Fpe18sh||T^MaCefSPUT zcxBd+awGGk5FMH)$Q5(08~1j)Doe&TyzZhYWR9H@TsGS!yN19psu`pIt@AL397v66 z4mK~FWjO3IOK<=Jp>v!{HmEErDo+PxpMQfCi!#m~dHDsLPX;5RwT(@c%Y5cVDp&%N z)2Ho$XM?x|F_&jVgH=hMMZZ#v8Kq$lu7T@HxdmoFdjYJVxr^Ypr8BH~Q>q}-3x@LJ z$gijWV4M)5pWK(+&i-yLLnS&ioxt?w5%?MmxcpJ_N^{iLNY?$Gy}jHpAUO*Y5=N$4 zERHrFoKKbp0(@rn9=QIZAu)Gze}XUcB%VS-7DyhNE17PBaFk*OC8>1qX-te37_LG5 zc`(Rh-Je{l%9|O@=k)ajvurd|O0=Tzju@rrba=nLgK2@*nAEZ8GY{{GeUtV;__4nn zTuv1K-3|FYDhohG0{aE{t2V$)2j9+_YKwCNzF1}%tScy|)zmmL;)* z%kr}_P9Y)3IlH=ME<#ocFBSEOEk%O8W^(d2%qt{6z7ZgeuVidNi~^Zl3^^G77Cr^? z%GE!~d+Q>eD(g{Ra{1WHn>TcIhhTcTij+z=h*73BC{Q5FEl7tDJktoYNdyhfZTk#Q zY4GGx?I|2Tc8C7uMA<*yVsv!0xiRV^98CtH9O6sv+CXn_FMJ~c4#y6@jg-%lj{~t^ zXb!%Mu>&re;_9bhTC)jUC$D49-!v(3#bMo z1`JI6_U+4fhoAlNi)+17^-g<20kaIgHy6)-O9E^LGhe{aOU7v$@-$JrS(PfD%`H$U6o{QxHQVMI(C(5@#?R^LQ+9p zZ##@^ldjUx*4gd|N}Fg{3o_1s_{}Qwk;m6%1@sFM>RVNqI1d=X7FOci4$+Cb@p*Ktw<5u98i$*5#o z!dls{4KrH` z5Pp2yRq=oMBGH{~U42s3JQP)C{5azeXp7MK+Q00m2PAo1+z=RWr%oX_)6^xc!oJXd z``1sO`pYt2>DdUaD3l#B#9uFrd;UFQs8zKfTpGU`~cx&<$ z*0-fHPrghoalG#RHGi_+vrjmmUkS2qJzhq}-+@>I4Anl&;_ZkRl33a=K!X$r?fr)mnP(Y^iXku{xc!0U&R)wh>v z&eM_LWQ4H99<>M^y$^Sk)phJn-#!r9o{hLOeR+Cvagy)ydd1!ql={?;d^YJ)*F^9f&i!z$ zvwP*7r?5|vHi5lltj0S@XoB`eV02yDXU!FsTcg`~4do72@8eVPNiQ1Hj?U@aR7^kV zlu<$vlEYNKgP9zheH@0ota--$NNXnY6oxhW6kzpz^ab zXRSVqtWbSBD=4FHsun_0mf244`hfbzf|&EC%I&b?vWLZ#X&Yvkv#q%F_wmQhrC#lv zXx9^ykZ6zNVFeiv;LT{6T~cLC_=US2bxz;!^|Z3T?gEc(TcYX$Hfl%%CHD?8(x@{` zFf(X&GV1HoX*CTBC69MLZg|oK$OtxzF8Epif5R7XNQ>r(@6*BO{Tcyf!8r|8sQK=X zFOt14cZlP43=;a>bXwqxskD=A^oAyZg^#C&qUP+9j|=AY|Hs~!xKq8ZVMml9vrJJ4 znL~<5Eo0`8S&^}osFaYBm5iYxQ}Z%LGG~r??{Q=+g zUDv*@b9Q^R)^B*<=e?i%xu5%qelJm)bO&t;5E`J6cnEE#ghISJO??w$_85>ATFp_i zEP*TAp`C|q-Ck9hd2H0zRaa>+Z!2Ba)uT7Lusv8XM^QYyD@O19i^cTxI2)31$jI7U zC+rLRoW+nhT!70Tw6u`Dv_by#kbIc!rYpWFX`?<5@lCXHrk1OE!hwY6PleB(&O#B@ zR)maM$d&rwrn@sn!W)9Wbuga0v^CrGd?^qsdYshWy%yS!wQw=|P_t%zf>=OUp?>sF zb)8oc5fR!O%lEW4-M^oS=L3<*<|C6?)!at%cmpdp1sy;*MiY%O4ywl~3UIr)GUF_W z^X72)CGIW%xt>)~r*1>DPk7?sw;=A_cyNIHvf?A^n&?I^DBNoL5Lh0Sz0M4MR%8y) zgQ17e9bee>ZX`H{@m*7a?qHJoy+5wY=M^IZ1LD{1%CW2iwgq2kgYnkhdS{yT-Kz%l(N=g<)5M z1CDNu&%H;-ZQ?U^8f*SDoh4m)26gG4yMeF5Nj6B1A(!M|<66*jPI;@T8VLU-} zInDjgHJLXODcKGC7NQj;+vJ*ILI!XK>QsYptEz#R3HCLIecg7$LATEJ{u1f;xL{sc zYt;Nby4E69wnyd!fp&Yj2=lupg+r$lxC1=GDJg4Pn}s(8(eB@04Owx?TzIWA;}qh6 z`PIz|IWkmcF^#)bu_uVvzv&3!>KHOR1=meSEcnvh&xt1K{dc~5VN;+}>*(h`PHzAoStcK{L@6;!lQNyhp*{ITaS(nBCy6V-8AP~acWr$mxU*UzSpWXDzrX3qg= zl^(Kg-xAm>O@$Jx0xiRHHr;*3aC*V3PuPw8FJ1BGL6~o<-8pO}Nlku8KMNHA#G@sL z2j2mI;gyyvU`eyc>;tk5KuNs+!MR6(X0>)ey;(W;`Nu5O#o1ln#p7WUpp=yJUX82Fj(+l9sc<7LoUAB)yU$vCsVr`rAe4ud_zM_4no*T0o^;pNUmlw@}~`>d}JSxxb~OO8Io+&eq@>5s?AGcQ(; z-9&iHG+|ZkLkqOEX3%F*d5e_EFfJ}GsP*rMx!KB4HT{P^|NJo?6&cx_{2)&7WFtCN z&kQ3-dbqhc^ z$gLy9uwl<6ZtUgiPgDBj{~kFiWIyoH{DOZBP7BAV_Cot7{FnuPtC@*2MKW1ya^&m^ z(exLu7k{#?SQ_T5n$KA}-t5+w-oG5N*GBEeqU;gI41Xogki}$^ef%M$ZM0QWEFnDo z0=12*!KZ(^%M~khiMOcvtgf(rt<=K)N2If+7pmvKiM4kTn!!m$S3S+Ke#aY6pkrjq zthzQ5|BYnQadgA!Ra;K0p1AOEb$#@@VW5Olw{sJCEmR@pFH3*cQeF}yLOb3 z6L=-55uBl`m08K{y5Yz+bv!TKGqI!(MxNKqVmvoL-7!6#nbDp^^gbHiaDqr$12mZeh+^V&r;weM(Jl}2pkd(XQ9 zu}QBk;vMura4k|qTlVCwcnGZWCnJ`5c&0 zl9J)?4UOcWoiAy9eJQ#lCpcsz!AK?|BH-B3_B8yAlNuH+W55FdU6s-dQ=pK=E|ffr zj*bTRg^gy|1;`ic4Z*WT<=YQyuH5GliGAB@i?`&{5^9qv>weiPiNg2Im6cGq!w&fwpJOLrVKGhxlR%lGJyKZe%xR&%mq2r|vRJGwT007rzhm3xQ2B$gK0pEF=m+04@ zezJnmKPV^&l+LcJ8xQ3aM}L^J4pbj|S;>*;_$ot`b%;t8{ml_luJ4fAfj1<1!oBG+ zU$hr_yR>^3;pZ7gn3^pwAX=wVVG~ocRHK((DDfK}Zy8)pteaeh&FF<#H8yUPahW|d zJGIB8ja5Tp1^q09EpQ4MTC{<4Irmo+bLLUUaCNCTO`Rvve(zE?Hu5oRs_f^bB~I(! zpxxW^BI_2K54~V`4#OcU*a?1*d5`ubFes7;W&N(&4y601m*Y&_?!3I?EUYV&f8uBt zkq23LUub~;mODLhX4TI1s@X#YNN*JU{)yo5WyZ3`*$fTkST-4l(k$rCgPt1t)9oy9 zTwjLH?n&s9(FK~x{*&eL3Px3swM%-8&#MeVht}pfn^+7K>Rw;9x`DuKf1D5}f!Fx- z(P7AY6R@-12Q2jxIJCjr4TprTRo%q;0CeFfdsle38y}mO2Iv9@66pzemJ#L=5)&0< z=?{$OnduR72nH82FvbA)q2)hBL%Tbb-H|tYT$*|a?euu@=p}a3!iTgM7G~aYSPJbV z#jYqNq*6XJP>a>n)HEC&9q4?~%}u*==MLX!6z$yvqoD79!abLp!5;u-qyNy^GiRRT zX4@G%_Lk8uNGGrAd0FboIwc*hQs#~u!KPbdw-OE6@)4`Fg4@tN4Uu5F7uGt_m-*(+ z8<3XGYT--`tm%zT14);(j`Ag^x0M8eJ;j<3=LZU9nU8sUGZd;%ZN@TbFPt9|S63_( zQe|p|kdJ$Ona%@(?Oe;G$!A?F!6Z}-6=-f84+X%;U5W>El(r2Rjwge_L#&Ul^@JWa}XXow!Bs{@5@Pnek8X9u;iy zbO#sS1yoAIQSm7@5N}Z}Ev4 zQ2hP~B4NlZ84ynA|K~!9luo--TrX50Fg%Uxx5MC%+~t36`}DFB9)AU$si^<|b$`6% ze|mP zn4Z(Gy`r~5ni|jW?m`+<$Sa$xwoGC@zv8G^4~foKLZGujZ|Gg&Y?%&I3m@^GSX!2_ zkXPTUl9JL{71(}lxlVM&uSgNS!Gg4cxoN#J*KAe(<^oQgvPNR%>Xeq674|PTcB4Ly`@cilm?ysmo`4n!6x%Q%%?vmUO zFpzeyfVLJs5_-c3c1o;oM+XMNnUUp{9YijKj@?^TRn&vT{F@xpm@7U#Wz5r1Iya{% z#9qLS8Ho}T`V~_6He}36p`DwJ<;DUIw!2v`vYS=u{ zanc<|Drozp(5aF=8#ZmcH)X>qk)nL5mG{sa?s{V9DQ+5R6R-5X`md9d2Vo%(sw+1X z$r(73Y_p4B!3J2BIjIAu#CGGbceklmac^2?kNii>ON8Mcwve!FVJ0itL6LRx{Zq_d zu!PB)AAPoNFF9G-A$ z2J;XOEX|D@ch&~mIMm^Bfh1YR%DP>MN9|wl9w^4rmfTZ@iqX2VagY}l)u@Df=G}#p zaT>kYg$NK0cC65t55t>FpLqzUD*~|PUNzZJV2}ePGX$z8rAWW4xau0z%NY>(VZ)yBn zIH^zX*QloSR`^~l)xpthb~Ai$7%>ImmEgfQi*6WIVDBKN+%qx9zHbmoChRNKRx$}eJkl70$(=Y|oDxMxXLVP0dQUaVbUwDPU9%1pFSc++tLuTMACpy*qd8&;SOgH6r!h zIuwzFvp{ss{I{m-CaKr=zGOh+c z5{Xh-siyZ46(0RTbZtu+C3=7lLKz*Ap}+A-^8f?7qM}tMgsNkPIjf}{S5RI(h(|EIio|P)_L~8^cE#SWGryIQ}%()Adu()otsv(o# zS~&LU(`GEz1=|k-_Wpl5M{4AL1e+Xcy~9n56&YQ4X3&HsBP~6Iw+~O|G|UM)p9cQq zJ@yHn7@vSZt}`hS86v*D2JL4qw*$;}=8%|%VAnkJ<45LC5D)|W8%#Q0ZlR^jX=_VD z5THGZ-9t9C;a#~BW&_7zV07eOad9!qps~}j`vof7xgh5nmTzE$5XzKA zv7(#c%WzwH9^;&@%O!NVx^xB~-&!5ocE^k}L*`HG!;STH95)M@tt54?w#JY01h3mB z&-cB|XVw{`{OO$T3R~^Y$}$wFceFD&ShvT1>xDt_r)7p0R~UXJhjHHiGvKRu^H3UO z`{}D!UC;-e#Yw~RZs@~df|J#>mLdVw6n!?2lxY_>b@h{ymacN=SLfbmCmg?d{ewf{ zi<>VK-R_Z z&tJbrPL!|LQ0lqeffAaJ+OXR0J$tviMhEhsa|=-uh9bSFUZeh>A=@a}*4}x`?_K0| zb|zL<-4+D$cE&_rPEJY&!e}1r*j;R{zAi1g;Dc~8Q5F#t%bgF~>Dk@sL zgqTc)cGB(qE0&Bg`o#9kvcG~0F3N+xA!_{}3;lHd3wyC@4UGKhJ`M~_VF==`4XUbT z_?6;0B!n3%4XR!Qu_=~^11qi@47)+e<|qUAPK_;2j%oTD>vS(%wB2z$rN5mafbT1) z-+X;-oITI4Zdu!@&FGo8=z}4AV?yUao=GTx z?E610Q>MXZ5t5Pe>DRQ1s0!MDF}lya1WWFnq)Kv&j$+97Nl$c7<5UTG6#RW7@D;#e zs);s?A21OvyG&ihvAQGS?Tf@?C~OwxT_d>sj6n>NoEEQ0jR;n>xz4*uZ$r<; zDi(5EXzi&sIB|A=lpNi;3LECh6|LJN&(7j((mAH7sR^#hV9yygHxFrQyObvNziAD& zXQl+8N!gxrrD0XakAUt(M(*l{|82I)&|BQe5~}E1xVB(HkLBo>%Y%Nt^xT}W;3Moj z>)=L$9&$c;dr>c6<{|KDXw+Ma(h+b=LIyKAoeQ;$A019tE$OZ~+ZUq(boL!t4fe3S z1jz^VfS&!xcW-?PV3vk=t>u**i-{Q7$P+ArIHz(KK7oNRmVUOJ5a&I?Y=nhr$0Ev0 zLc0lph%LW8oYJr0m;n5R9YGhq3`!YR&6N5Sx63Fap)z`1DPtX!i{9X`hj4WG3hFzl+zTXey+YEy$`|gP&Ju^l57Ll#jlB9-#g80)(UmEHI zCW#V8J1z^#EkJoAs)6JD(9{;pSFnEsXlAL3GB$8&CRPm57wlMir6hVgSMF|mno$z8 zkYXw-+}Xs&@d3>ZR9PGkJ3dV3>+-l%b*e&icPnr($Td_jt#8>~)%b}9WsvGYEeg$+>Ftf9;_gwra+aEC!M zW0}SCT^MtUQUl$Z69On9-|s3Jm_>xX>M~AWI6VS2;52@hF&i$3xHW z?@u9T%TzU|*8s%a%(59a$xK^=?$j_+i%Xdm z#3<8a0LpA>y7&&%XHPqbHMeY&K6vS6^el2`T@eS8ZuGlNn2ezJL+}HRhofwm|K2 z)h*s6#hpFegUgt@U~47)+`Z+Jw6YMb?fK=i=A zo^%UwR|#xEO72#65EVIgwoRL7fku=un%6rCHCTHGS!dQWXT9jlF1u7@wB}=xp>uRR|`LG(mjS(0O z$_B~NbEat+4A6zi&-`-0W;X2{Y;5|MLDcsLEo-D!^W41`b7t>OM|n(le%)J8@lv$V zj5L79zjk(C{c%hcE4U%#&PnT!Sl+Lb$4Nu*vL0TCfa3P`hgu)=dZVJVI6-sbMTzw3vRL<*l5TUv+eAz1kf^ zhV^2fGDCw6C~qTJ7QhiG*d|JuX<*c?WfnKhqOvO>cG~t7%+N5t^#+dJluky~4YtlV z0et%t9w7OZHbITpC+6UihP4~Cr|5L!vpH+)pPB`;&v$U1@h7eFD6=P{|1*_XKP)j5#vNc5uL%bJ8mSblTg&q ziyrt;sTk9jiS> zP()AqRqwuj4Hjpt-$hxaD+B2kP+8qX|0ogKq9%6mlcBSq{J{K)SKYdfM%6bT`M>X6DV2p$52RyPo_14EL zwoOac!)}y~WtnWV+q~hasyX>?PMTGlS}(H7!cr64k4$+i<@}tVN37{ym_;8fKKd*9}s6*fQ^ zA^j9EzHxTaec~s}#J>1S7V^Vy#0##Kb*>2l%I%k+W}e#;di*NJXa#(=mG06Z)b@{Jw^Dg9|q);iRb_8R{y@ zq`l%B6?xRJ5R}3DK_B3cOXTjbF&+`FXt9w3UuY&J@>$by-o1-qg+vgy@>GoW6YJ|z zAWF3znLBiSiW^?$&bT z`Nc&xD4vU@bpZ3094wO#Q2ZJpkgaz5+8cmh^)rrG7>0&Svb)q|Od2hAb-kOAJfvOo zdcFCd;}56i1Yal1rY|E2_jum5f35nQBE_W4{a0cI&rJDN$H&)2&US&rup4eVnRY?6 zVb`B{HUiByn0NUX$jP~c$V%?I*t0UwXM=Y1#Gk8KzZPTA&Apeed>vW$nxpsg%Cex( z@A0D&|L~*x4-}m2GkVNoyshy1$B98Ua4FV*<5Jw#I3FrVeM_V!7bah@UK~Fby_6{F z{lf=;9x5o_wP$|Ki}$luGhaHrYu5eS2geFS}{UnM}H3gR`Ev%yAp=PpCgqmjK>RkjdF3)1BGt>1e8QsP6R zG&y0pFSbl6*8g}#HA(Z2zpF!M7QK(mS9u}R)!fK~*tQbKiYmNWe!q(Qw$fDQ z7PhDTwue-o$iyroYQGv)DeNG{mg6x-7BpCqGJAj+fU}OlQT_0FiF@jxd4auVfBeAY zXr|=Fix&^R0Lnw!-bwgu~ULbW;+);%h%Nn)bt21-wZ=2FF*wj zvf!JexLCaaHW3raJ$PKEv0i@p8-?B{=@EMK|K(Bd+xE&*VTXmW0)TdsULc7$xQK?q z*lwU(_X*r#L{!vP=(W?7?{{~1b4yY|ikt)6q$&@n&J zAwe?Rk0Rp12vk^eOwU`HLQTsxtEsALt@!apHtU~$+e`R7V%BnLi$!~S2z|eo8cbl7 zY$3iL2_tU``n-r3D4C}iy;^;<V76|8Z~%KV$m;#7=>V#RjNsklJ*KVzIuICmM(o4`QKG9%32CLfCo%C6o8-tJo5WCc^~qx) zpyf=7(VX?+%&2{Uz)hU)P9oSrbk73BSUu{a(JVY(qLl%7=?gSs&Eog49@ap&4W$$I zpDg1P*vtcDVl3ZYf`UjPDs&qd2#^MD4*GWw|80Ag0*^bDdoMq20KSR)d8-8bU)LbC z=qt0Ezj;wm!hxiA0y#RZ3P~m$t;$NRk8t>79-DG4Qh3h@q6H4$tYd@wJCT}dhtoG% zM#Sg}{e?9C|3siTy>%*(`sf;CXXi&4$0_B;y%q+0*F>8*!rq@rZNh zwnO5UFr_~Xrz1F=op}Kc925~I9umDhDbjQb`+o8S?<{tIz#cn&e6!32O61Hw%mI)G zBMyXLA6{8mIVaVILbcbkOM(UXvK4IU1PLAY_Ig}QaaUitKe+q?oILA&^qGe6WM{oq z&&nn&!?=j65OZUqRm?1*|>xS+`fX@LLB2`A-g%M#yUN1QMx9f!u|9iiKH8ug2lMW zK^;S5d>o<674x|2u!lyalkcZjno>^kgT*~mVU%uM&$p5G#~CwGL`OlwC=$)UN3+*M z$$MjygP6$Ry>ED8)ftZ6g@ zJ+wQ73K==(MHLv(%9@`CR$hO*Xwe>4<@uqO)yN3Eo?dvA4WX}LYf2R>QP}pFBVykA zdl<@W@fPx`v?gy>*!lDENu46PF%hMZ#>k9b_dB#zq$Fcrxld3H#4^ zV~`;z$b=H6U$WgaBW8sc6Ds_o)-eg8&V#~lnugbT3~7Av)HS;n%;%0PGrFXVdSehFhlemy@!Zy;)*3pfVR*ccD*PWZ`^kZ4D@p|E* zCPlc`$uEB%CkM^;s)YXeoEFBincf@5v2_X9=;mok={O*AIoJvmH;BbFl?&5l2lH+5 zCZEf&d4*Q<-tpH6HxX4g9dfWg(8*)h*(&50C@l6~=VDd)QpdrhhOHTb=hd0Xej6R8 z4Q_!3lSD#;`M_9WX*k#7OUsOtWf6O{e0x+)+HMYnp)u*vBocauxM7?%kJn3%zV1Tz ztvU)Uu%|@o)M!P2V+#^;1PfbhH91@qw^mF0y7gs8lpSAbM`O$G0~KGRdH$?`lI<&5 zWk+*mcDx28NuU4$NeFtC>}jcS^s7%&bH*)181zM0RJ#R0v&bi>L(VWBW!iy(iAM0~Ekl46V4XZ(sm{ff^RhWcnbs57I`v&2?mF?)bfu`+V|t z;~D+0OqFaPg^adzb3Yp!A!&e;n-`;!+(Jysj(wRx#Sk|%j0whrgAp-kewFQ8JP!nd zN8`%ZQ;9kg9@;P(l@;iS&UjEocTtsLzQHgn@R2Ye$nZxP+h;55puGnzw4mWNeO#H76X_r8-03ExrZATVgzE@g!UnHb=RNWM4e)N^4szgy-Q8(~~GJCcv_)*riXf+c3i?mrai=6GE2x_vt! zQ;BRDF?IU-&*b6p@hx6Y1b9K$Gjq@3`7~R7NhI%(E8$)-ZOZ1W@i%E@oaq?x9&hdX zuVC~yEuo6MpUv|&npD_dRhEy_8n$W0u3BF<@i*wo2^zhnZ{DSkBDUq6LLpub2RRhYh7{Q5r9-@Y%7 zGT(M{#!`0$+h0Ya1q=^*fBPxMzy6eMea4wzyX@WF{|cRdYjsPe4CVd0E78B*l_c+M z{_cs)$PM)Wqvj@%@Q@?3j!qu&pUe;Z?E*_=RyT#Dx>Wm@a9bVuGPphPLKj=?{5y6ty}%_; zL)Cv6Ulm)uUe%9tY-QzyLDVHTtKW3GagpzMxCFFvm@=x-cbfYe3+*;lE`R3I~D7Bn`azK7|V>rBv zOCMxbu3HWcJS=v-?_BgF6rCvG*tN^sFQHn`aZ!+$H%n2JIsEmz4ZTBWw@2$LojZ*Z z3NGVF`)`+#l@@wo?-2SPiQ)`;y&gPRDqf019CI76Nn)a-NyEqtFI~FS_Sjy#9C_pQ zg7*VJXMZq(GjNq1)Dmxp5!ml`|VEOUrUZW{&Viu?b|!%q zkI_$1R46#t%s^(kfWEZ0?+6QompuS)(ps0^moHa{oP05l@!l2$AOINW&euTe8bTRV zlSBa$XYpnvOihUB-)ClamE){5Xa&|yusH~il353!MidOu=G{y`3tbhWF$&KEYpxy( zGtl?USQl{SWl*NBxfna$FYGkB^1aRCjQi#-uM*t8x9aJRci&t(f99?2`zjXVmB2%V z^d4=hen4OVXj^{HtRCZh3dpw;j`FCPf)T$7jV)}4`M(OS_okYo%1KNuKsE~Ct^Hon zv5bkOx3Q6tCwCnhHrikby+osyMe)RqMliO>bRc=QBOtwj=LyHB)b z>Fo!;H7#P-IHM@Y4fviO=J;}*uEq5l`IBMELhbFypD0teED($|YesZF1p?tc7zJ~6 z(-Cp6bZxQFlH;0)tyWxgXLSou~OavWz4cg%W3Dah)#7w=5id;w`ErZWo4P{@5ubf=>9~jiNr^YpILDg{<>-y^9LO3+aVqZTT8hXhobf zz@pM@z)VbAOT0l}jm1$P^yV&|zrm|uy3O-b+x-p;()T=mY$AXnaEt~q;qoS`Q!=m) zNsIJ$fxr*Uxx{3{)vLnfd`oK99koVR;7CoGsr6c%~}wb}il# zrL$2^t&*d!npTJ$WbtE?rpD}P+3`)ONQhG;;MDumyq$fw$prb%&z}lNkvE3dvr=u} zCEh?fw)Hz;xS#BYZ-&k6Wd4N2sE7y#!JNB6@3_M&1r@n!39n>&N4`OTkEO8=zqktY z)nEMgGCQRNx=87?(0b!S z1ONN~8Yiqa9iI;sNL4r*aQL~~Qh#r>lVTvS=VO39^yLx}PX>~Udfvy)jz@5f(Dig9 z$(1j1`2e-?2^^+@bVbfi@lgfEK#f|wV!e0xdvJu2X6Qs*-=a3f$;QaYs69`3^Se1@toio<(m(aG zc)>KqXXE>4F1ep19=M-CGwq5@Cjboot`j27hv#P(9?!46Gvfvp&W!+K_TLd%8*iXT2o1axG)XkA z#}EktOii)ejTSJs0JPL{-b<^(=x~f}piYbRl_((BexACv4=FxsQ*pECsm%CEX!+!N zPe+MA44ztp$YC*8tlMYOv6Kx1p*%`3)${Nr>uArIE59F)5$X)Qw1zTr;n6fZwrF-u z{X<(%%|9Qn3jK6!+-zoep(rb8E7pt{U-FsRT2_vgo0!*&a z-*~Q}X|;Wmh%Ai}0g0mjm#-e&KYmM7f3x()U2ndPuyeT`|G%A8fkS;!o^nSj$3KsJ zb<^Qq_3y8!cUH51mGw>YlacX4u}6Ysw#P5xYM1zf)DJx^y z>bsY9e($&>`d_4SBItX#-k6L1-h)SU*6_6I(0yb&aEVf{U@9F3aR#{7?f?0Z`>B;!IKw9DPu}jjR>7^a>9&*ye0sHU)vBuy_Z>bl50J<9ylPn(@tYtRqIrIMl)heZGQn8)e_Xrs2NVHN zdNzk^2)gybD7C_Gg@fY^^7M%C@Z9{7u&Y;By^_&(L}A$;$9=nPBBy|IIum-d(G;Cg z!Od+aO??`ttj~cLKP4&rex~tcDZ$-mM7PsO?+G8F9Q+#d} zrB?8!qhFUb!D>SL$Qc1;`T((Vuo%N&8@eHCJP586jN9_qRp?wo3Q-KPsy~!kU<)nd z=N>?J3%nGm0d8r2_}WzuIuSm;4Je!dy=La(4{;RnOrr%y6;}6X1dG*ATG&le)7y`h zj56Ps8(ayOIby&D&PW`8P#f1lzFA6oP-?$;q!MQ~5L_wc4h}+)exM?psH78!nD8+a zZURj~RgpN3vQLR2asrUIq9bS+b{{Q{FLrk-)*n)km8-jTqUGI>6>c$axlbhmtWMg{ z26m=XHmqC1tJRPtKubM9x$lb2RCE(k)v?<+8S_~)DCUO)1Ea2>qO$BK(g@^q3H4V% z%3z>r93Ug)*r>Mz96o$m$+aHr<>DiRPK$v3Jz#7t*{oJ*9K=Oo;l%%R=FPQ=?!nS> z?Is2#o1~ia(KySc9qW#3IW8c(p_--7xT2P&>c0RV)97iqv~jjVdP)hw#>YQs#1-k6NM2kSG%aq)Ss@}x`1#Brz`pc%a5YavsLJ0vVb)=Oc@4D#qBilC$ZSI+e!42peJVrB7LC*L55 zmhM>9jxsEsfIs9{1GC^k$!)m_9v_|2_vG;d!6JeZIX68umDIUd?DTqMzj{n$bo5v* zfQU=0MO6pvo-&t!fOL`Z0gT3G8bw@K(YosR3?W&vSstyhEY%|E?$#qJdc8zwcPF*z znx3KyVP2{({Jzb0U1?6u)Ww^sYqHCv^ZoskD)_UIH`1HU!I?2{>}+DcI~Q*IBm?HD z{)DY0`p~SenL|E`Xl7J6{`PJ9jz@U#5fu24Ub0GOAqHXxchleb@#!`ckA_#YLT1Z2 zY0y9(3<$V_XRwLUVlL5!BsZwxJ^ zEGG?zlDvH}^`O=pg!qhw$=7d7DSRB8QJ}MTPBz8S0S#)@c3xy;*iU(}F>J)$>gnkr zpi0d$^N9ITh;aIAy{o#^r?V}sQrldVBX_S8W?(M!pT?5+&DPG&-!`pQ8gCzO&>Q|- zY*!ajE4k&WU0s1$cDaqqJT)!kP~EJ;v+uGGe%gF*cbOB-+%1{#J||&c9e08wT?7=L zViRO4fdIjJjG?p~wbzMlZyG0f&bUVpPJQ>efL|%(?jyP{opz z>qCc%CEk6aCI$Z$dfT`d)I=YyPe0k%?!rXOKztma+Ie&LHdBwM7Y-vYD-{`W*pXWy zNyP(&ABw_DWjTf^-0+KQ+tzL&N7D`W)olqc1&{pYz1f;IYr20T8;0=u(xxNatwfR7 zb{hQ>i0Dp9iCw#HgmYiUZqGMoV2)WqY!EUw2!G~xZgyYwyLkn{^qz53OpNkbN~$HC z4=f((DC(-Q(y>V@NUSd_=j^t4sS~i&X}=kJm(){ZsRG3x#v1N<9|GHaYgf=hF@#Fm zGNS>j8&Dv!4rKnbWVucd4i>X0|B-}sxnN$(CZfTsQUa#gN|lanCq!OE0eBu4h&P|K zN<4fHX`YC(8$-tdL^J8bB~{#pyB|Kq83cPG&^v385$nL7#Yvt9c?J8c+M|Jx#M&

D5k&c2`9_Fdm6iUjC4`M^hZKG{MCK~xs#NzJ@yVnPZ4#|5kI&2Ou zedwfYs}sX+-gXC#48Rv%x^%!7Qo+$jlrOA(OUtR@-m}m7^%7tHVfn?(UDs{oCT3&d z(=$6W^9tj3cty}O0y>~r*8*k3ZzywpAIdxL^oT_7@L zoJX!}(MmaItnBXtHH1C$8136qC?W9gkU+kAGRDokxjKN&HP zXo4Mk$j|SD_tw5}5`e(QS*bdscVPY+Zv-Vyg8WB44} znoX2?LyKH|smI${=VTk-Njj}sBs)j=MmEllj;J!wJJy&z=Ov8>j&_*&N-Db%s~Pm= zC9cWB0Pau%h(%^xgIr=#_YvxkrU~MWqk~Jtm6VDhY6L>D;ZKXl^}ptx)}a6bxjSxJ zn=DEmG8t!Q_f{{HoMWS%ZonhNel7v|g}fVWPWNDSTDZH#i;D*MKos>*Z)#xl;Xb-7 z$-<`v+%6Qa*MXzC5o|Nlhp-8vgS=eOu)h+kc*7iss1f>`CBP}brnm|tURu$~lNW+$ z@k0iNhK4xoG0rg!I~!P0SN-~4vN6^dpCb0V&>2Vw(bm4|L-h##;xfFkdO!-592{&c0BZ%pmRf+s z!xpAEKP&37YAuP5aIBIQ7z|rwXVBbd9tC8;ZuE0gs%G8g*_TsP-wdF84P` zN$@@T3utkkL{H4AZ_AnJ7cYh<0WrSv23u`n-7QgvzB^}ndm;S*g3YDe`;HzS*r&q1 z7X*7$c{)dPtPQHc7eLRzMjfx@wC~odmA)^fQk#x35)2C#<$Vn(m7>^1t{-zB#-Cu4 z>6z2%xhVNxsF8QjbqU^jK>fb9S?{}WfbOJl?sbOLW{$($+)IC3KU)uTl;!8|#UY+9 zDyB$u`$yt@)4ZjUb**uBY?JkBe~BFTnJxCXZ$Mju*#bSTofdrLNcx>Q?INh2-TC_A zd8-Atto}oH5Zl8`NGIqa5e^^HBWTvuSCY;sQqx0-7cXJNeS^Txc+9$alZ}-v4z~_U zcAS?q;ZyAdLk+FU;Iqp=aMnnNB|V2$7B3c1D6RvmIOGOfn2o&j=8MbHuL5*o-@6}E zVohH_tWA=4;PzIB;aKs*g^Q8(=&=gL;j}`POz|a9t1+lgd1LpgrNOA>qf$C#1MDH1 ziJEqVa#COZXDsqe{ajQ3#h14PeWn)Iu?o$;&*P9BevD8k{s~5d@sH_~>XtUCQf? zJQgh?zzjkH0@0Cp(d?@>j|~i1LwEW8`d4sy)W9v^fR*#BBNl6E_YLGfvuEa2knO6D z`GN=o*@(r5$@M$s_r_@G&J3K}2H&fB4iL)4yXO1dIdw>NXkK%3h$Eo};CjNJZyI|b zzOZ?GA&g=$jJ@6{U-1xSi$wl*iI)TtQNMimeB|7nVsO1Wy0QFy#J2ggQui>gf#r6 zbD+J5L2v#Im*`@`un+(Nljw9Sq)$*Hg{_ZEnH^hCXb8Z_Lo9cF2hp@*4|$ZN6A2$q zzdruz$h6STYzlhu{SF=49^&^7uR9kVktW%e28K56*UqGS60F^zP{=(3J>Quzy}(Js zK1nn|Xj%3cTgB4^*)mqa>Kx$MV`ANdFHo4E{={3tnHY@CmIm>_$(kgOc~Rj27IP(u ze2T#qDD0>=KA$E+Bt5%_`lX2+J)I|~#kh@qNc%)Uh4&agbo%3-VbxkxswysC-`#x+ za4d0J$QM98DO&|$Z`!gLw#xw3p4u`&GoQtRdy!ATbwby+IUDi-8P$?wN1T)(^0DS| zaY6$I{)&ThU9H~Pb-T@dxTLW6uZ?k+-LY>+=9cNGSL!@sz$GgkKg2vsC~5FTaz$t0 zpRj^&a5@$t4vD3)sV)hN(OHpReJN=kfWxSQv5a$YRmha34I`T8PFU9Dub|gJ9ih4X zjwV+4ZOY>Fb4478qRM(YF>Fu_Lqqhav!#fv?3uPU)DO= z8m@xD49>{yl~1gr^GpSEvJTG3rT}eL<Ow6M1l5v@Bgg zwZ=+plUbx#fk-?PenGhVSDsncg>?k&Dky9$FdBc`orY zv*cHZ6a0~mV+$)2Kn16QEZxW9;XL`}oo0}ZC=%|jLXZljf?y%}LzLfN$H+_4BFX`K zHXi*d-yyZRP54(QFJ7?2t9;RQ=u*e9L1z_8rH>%z#SFy5B$qEDv?EbC*|Y5z^DdX)S>?XXNWUWIydmaZyaCrx&8P zKCjA#kG_*zl9qW0 z{L$3b9gF#(dul_e293|)+hNK3<>+`VzmSk@>M6~{2|H9#4V?N*OBRg?PdCPXK)x+bX}G(Frbfq*$ckIqBryzZ_wSuctRfW<2a$x^yP@D$0I*xXLZ;zzcll& z$wwc8;D!SLf+#@4tVg0_d~8hq=gBpN+jJj^Y&kBx_v96m1aU@#CAEqDl2l7a4Bx=n zwdgkIR$ zF1b_Jcgpcz&92T3$>a2nEid0|*=Ch(W3pUEhisIaP3Agk(ZC%h9^l5$W$AjS^4r`x z5uTQdRp-*qfBQBt@k9%i30do=>(DJ9u@tVQjVc+k{jY(# zukN`ZAuG#Ljs~)06Ax0ks0o232LP+fQeE_`0G2TxM3^nv;`CP-f?GK6SD-RDWt%>d z?uMGK=bFD-?SPw`8|^sq!Dgp1W~^A`P-@;bl}q(ae3DD&kv>_7KYPk*xBxbh>oF4z zO}&YOXBY3~sSHht3E}^+&} zFUsREq1Fqj>*Uq%(LYx#F1yk#F(rC8W`!rY0pjpSkBoMDvm*vSy0AXKx~)T8T)b@| zFK_p`J4s$?6^VVZXQQX&RxeE|gHvG&pcw9`I&A2c0CoC_RQei4Me{q>lqtg<43mjt zJ}s#)mhkZ9D6~5S;CGj;uyAeytNQj`oDQ;GmT7tT{QfDcRPKZmS$&j@%QG`GNcAUA z3r`(eMhi~4k{%VO@OqY8YPG3RT3Xw3JrB`gRVw)-Fc!Ls)CoWFuxLDt7Yb8n-)3)f zsaFku#I!vTe;6mJwIpe30#XvB#tn^)d2$a2hw|Wj_@E{=s}3HsymGd2tE96+l}V%LJJO?`=(gSoH(I)#LrU~8BbQ)O zyyV}~~A7=2`M6L0LJ#H-B z=@02<@whslmBA0od|sw173?~Ak2kYWY3bOH90k`AEAe$DgOo{c78aI+fq@^UY402H zl@+)buMJ}+X%wR*p@ca4nM^AhE$R21 z%~WoLW!=4t8^=?p6$`(stEU3ME6CLnyOKg;J6vGE;^-qzsYB6t@P72J?`j0T~jyONI=FPc@*1^KfhQxdlVBt z+)!UGYz@Cvhl@~W*{YwU@N3O2P7_k~UB1P*%-J`v0H^-(oirP1~j%T`+FaqA07|CKSlFN$qOf7m5K2M>Ls%ZO5K`>+fdB z-l0FYM%nIQ>(lxRU-TS3ZCYvC*-byWz4BH8umlk%WcyZ*+Z*d>O!W_iAEHYRKXiOw z>Kf}1j|eFq)#LZkawdba3biGwnfy`-3Fi>0oHuoELRjctGfai1JIvB79gpB(vwR?> zO0x8f#U4TC39YGo+>>d>M*P;>7kATI(n~R$@QiH{YfXLGm?im?HJQoF!%LOVgY6Iw zZd5cVl51d=^Y9XT2yJ>)aYQuDYg(yQCacG`3{cL#XK{@WzT4Kp!omUvjXt1Rp6%(- z65p?{*Y}Uz*8?`h*D7VG$u;2h4n4h@wMgd}T37!-MQK)-_^E&PXJnX9M|E2q8E~l8o!u(E)?z zypm(7P7vt4CZ83I<_`2FIBBZTRRpi~{D+t3VsXpMY!Tz83e zVqw}5f$TpSA9?6W>d~Flw}{|4Tg5KM!;=hn0;PJQ?v*JOuP68=b z+Ot-If^kmH^uK;gT~2K@GV4J_TWtfHk??K~DgjNC`vCsYUtl1=-E2=VOB1-R`c^F6 z_khESg@&9cL00mZRnT*UJhFQG6~BA88oU#8aIyiplqcLy4x5{sj~#x+#^^-~%7u$-}C-gftc zA(s+1&2_1Hz&`1;>u+iixBeFLIn?aTCxDbSa(9Mqz#}h(ouV5{2SQl#zVMIO&eHO4 z?i^TWZgK4Lh)nSqTEj@XMUTst5M2WNZ;oM?A{^QAw$ciBZo4_h(c-DXrjW=f#KU@nFDu8ldxTzS~ ztHv~nkJA9B_Fh0#`Je`vH&u8SP4KF!&5r}(DD+R~uL!qZvt|tzY?wp^8i)vOgOt?H zflwzqoL#pfw~Od$F#52M2tIG0&+9)WB%Cebq^K8btQicCuqyp+OK}rXH1Qu|#q?d~ zt%>3W7lwV1oYey$4P3P?MSmwc#@)|iib5Q7$E@TV2ps*;9tVAzee6Lmo$_n*-kcG1Bk>>6e6VtYZjlCx@$AU%H!SZNsn%N6PiP znQDjDxRlq_oI_oPAhxkaKjaknfy~1BRZ17?X!JcNw#^GS$-${3^`*r%p$daBn27>F5u5Q7f-Lf(Veo7oF9l(MlusJ^ zOH&iJp(cvEcz4cUvEb-{XUYy%avS&r5k9`~??rTKz2uGizJfL!c7|gl^>=VHt?T{e$qD@JxMI3o&oI_7!O&mk&K_m{*x_zHPuiEa5X z&@~JOP%OO)&qk@PFb}j6?l}>H5qE?KhN8w}V8Os?ZKC^*;~82zP7c#@z1 zCh%LCUEUBztUzI4P21buy|3P`<<)JBS#W?^*Nifj_~<@c-HQges9}-j2Cl3{OGd8(ry62*#72(wXmN(g=mpq|OX#|8 zXK-opJF~{=7xc}xjZ#q1H1AEV(bVM>QmwD6)9X)WkgW!@;-zGFNn?6jRM_B@UM66?MD%-$KosY0EfkqQZ4~Z+c4f%2J=0ZV+|BkLBCRGKfgQx&I$719L^|X ze@oVms7z3fCr_RX8ZUV;tq)PdjF7Z7FmaB#3~ zCWN~{{VehTLfr5H7(Yz-t+pphFb&H(YYu=?8C z*d8SViv`pW)ed!Kqw$l4PuBth(Z5JoSoDtg&{0u!k-NpKHFcXB8Zb*}T_nG)$a^PI zWznsHRul)l(69?2#dAfEt8(lMkk*Q(?X{WitAy{ID7U&-2MyCLz#Ts~r-3yBINVU< zAycQU%Ok`cVZ&Y~=%Lc%(%kSgc1R#hIl_g#d93SG$`Ebf?DH)6#$I{gyysd$K>^k} zX*1$*K_SL~5lIfbfW%qb24~M99^rMg9L3Tvt~s~{*lyiVE8%YEgUFfkiJ zojDZ{v~Ee|$xYzv0o}~5g=?@sG*dlgqoZC6RY}w!1&g21k|Ds=X8y2#&)i{H)7;dP z1f->fO6V}QF>Lq*UXE$iD(8wTR79p<7&VE}N5zXyJH0nJ?WvlgQ@6dIV-uVhGgefM zI2#$i{Z}gIh7E0JmY`a?;c!NaH`(=m+15GZ#bYB0#E2$%?L1GG=H!Sf?*IYMEwS@? zhL*sSJ)@OB#kkgmndxMCBi}U4s%MVAv4&nM!jwHzaED5Qi)KT1%nd%ae?2#Muiy)D zt2=U~&mSo8akie2*v{bPFZ+eI!XWS|?J&r0pAVX_7)B5Xf(08=T{r~DdN{*Zc~|=O z5Gm5Yv!^mU4cV^&den1;T)LBYF4W-+t;@|2s0{orr=yW)`JCVFB+df2V?t}Pb-wbOmAgm=PcaJd zRjd*cLl%E^{}!fE`p;kQc@0(t#;0pPFk)=<-oCR5Ioc;A^?@tzwBiSkn?G>c8r&%; zB=Yx9y$@1+8MNbD9<@DH>CDN-m+zk(dvmmBZs7Da*{P77ZifHJW=;K@U= zL|>Kt37rUi-0#S$y2txZzbewdJtXnOOoODV=E0`A1$htYS`z(Sdd4ocvw@~*YIXym z&7)}w&7*?Z(NW$;45J+izX$r+Otb#l?}z~L_Xma3h%NSUbknV(7ht$O9`*dfyU@5x zNFbVTab-I?u*vUVTUaWty87?WkTa)zyfdy|XdM0g;=jKY)Bj@rum8y|HrZcWX8|b{vpN|mZ*{kR#`#KIND)=l*>#lU!Is!b< zC)==hxOqJ@$g8ru*=#rF2Y@W+u1koNp4OJhtW6!=KW?|7vnc9S3~f5Q!cfYb<+0#U zwDClF17JDt;NURPe3*W`$XG^l)PC$eHne_=vhFx%=%t5IP9Iua{aNxt9Al%RnuV_` zeUFrlRI4q;hq|qPk@~BStS9W;jgO-+el}imZe1T=b_7>x7xpc0S(%!!v*#W@rTE!` z#rZTpLx~A{{OYa`LAP~G{<_J1N&!E2#OLvwb+<%cF+bwx=1$zKd0>pwlH<_D|HH?* z-X|m37t+yr@~2))3p3SR>KTnDo2htvsaE+_lXDANvusu}$GOE)rTIfH|JxzgHpg2U>?4d_3;q;`u z5m}}jX}LLg{M!IJqx=fMCc=%s0NG3j(#Qr%$n~SgkFTJ~t3`77<6Vd!_7oHIhVX0C zN{IKFw;FF(Wq+&rs!}IcQyrLE47WbW#Yq?g>q6-pRh&_)CAsx z-H_8L6gB3XmDMZ!6-cTfxOTp?{6-KyW*_ssheHmj>0XM7qHP~$*uGWp{tT!BMx!+k z)f}tQ_B43zN5(^PaU*uX^AtZaZ6Kx|*am;y?T-FA9bA3A>__PIelILIi(1`-w*+j> zieU9HJ6DYNCjl1%Y3Y0vw?*S|ZQj@v?Aa*%ftj%wQ;^s~<)pP`%UU=h54_m|zy*kI z?T(0RLuL&#UwF}LN;C`B1tp@bLRs6$1LN& zwA!u+!l=Y=5;y?fL6LQlM|(p%s?<7R99cI?l@%oycViL?YY+Le4(n^K49MySj9IXf9S ziDbIm`j`X^6^Q}MfYDlOAt%jQLZ=fsywT%6LL6g)wys1qYIuM6u-GES2|}_+EyRP5 zzNX~77IEWiXKrM2GS92~uu;}@46QxC9CgUE8_F${jkZ0YNm`NGe@A{*i80PEGWt3@ zePMJ06aqw2GbVh?x1NVHS&&Atg`uB}3E5#^p+)xc}iP8Dt~7Ckv&I{iY3_U^=i za!HuJg5O>9PEAUx@rB=BYhI}K8APV>_G0I;9hjf>Fk(&xOvx5bXdxIj8Rz-}$eQlQ z9*t?Q#PD}8RidJzXhpIG9)n`Bg;toY=b$&|`xqpENd0NTv3Uu+64A&!{0C3pef&@- zuHiebbO+0=Jrm2&wW0fu2*Lsa(!vgvKI>6bw5%|DZ@}5miHu@6Q4jF)H znkk^BFOgwJZ}watX8>%|2({bASvU)cUjs=^*9N!5?WLy6+tcAXqVF`5Z*H~I zEh52^a2Rfx&3}Uwjg;m>w}w{us_pex%TRD9*pBEf%*s*_ZqXh&h#E0CYksR`T# z#DA0OdoJxIl*><5FY!B_oxBH@;GmGDT&qDy;|r3;RDYEv<@)8OOz^IT09n)ltTxSm zV5Yq)lC0EP&gkYo)6Oz8GZQdgCWJ{#yM|Epue{scc5gtFk*jTI!@q)u)r0bDs_v|7 zfwc8)@_7E#SNl>~4Q5Ss-x8;lU~=t;hFFoY9dUO!+hI|GJ(n>v{D$dwyJYav(Ql!V z5nX+_yA&P^cOl~w4O42QQ#qC%NgQWFG<0fGba9`DHz_x#5-;jbXn=3XPJMo=-~!4~ z#S|1jvn=U|tx$F^cyBCB5GXghX~<1|z!WFExe#iwS-l%LQqaVfw2r7J8Ah>-QXr{N z+@;uLyiU^x!2pP4>@0vLS5NL#G+4y?ad)kP!VP600Xr4Tr@C$dS*G6wbzr8>`Ns}U zFgM`NR>{KETPq$YJ4ow9H_4q{e$B8kCkQ4!#B{M*fD6+$ZJw!Rt8q})q46Tiu*VFf zaNYSGi8xkqmVAgB?C#beOq!m32tN=2@!{gKs~Y*vOs$W)OFc8YEutnw7l8FMcP@ z9i&RzJ+itI+hZ7!ji6FA2`rpz6ge)g3*WD1MIjRzzfH6%`{1?**AE4{lRzoo?7d%6 z=O#^echX?L+}|M3>8YH|OwXSCO1e6nEUjB>eXzaSeMhaaA#lXUUPDy$DE_X z@T_0eW?FlPyW&8-P|t?)?SbEq{SZz%zfCt#3PVJbxQ@gcOG6CQ1=Iq<9i<=_?G}6A zDOn|@JLs{&@X6l3zFh_evVF0aX-K-UmYt!@9iX4}f8&l#6_59n-C*tjN{reI9#di>)}Gwu-AJQO08`56moy?j)f$A9k#pNm-D3x>h&W{>`xLr z>bZLR2=`U4S0$Ue&+auC*tgx0Iao@%cs1XywaMy9s zvIx2aI!bMBvM#jZP8HYD&^1n(P&{8qGT@!#!B2UHd_(x;2vJ$wj5DT1IT&jL$|GvaAFd z8EkB!SEInGeL)yPLMrZ4+>2x(af8%%=FFKT&ybxqrGUAx6)i-vaj3_*y+b#|$0@*M zBmMx3WHorWqF^dS__yKkIFE6TILAo3O-)TB1`R^}c-6V4(5xAp^`@^?R8?u^wnmO1 zZxdeA=vxa*RGc>BZ*GU>M=fSpgmAF2)qO(pcm(pN9*^p~%pE`hnxTAAcg6b-Ae}M| zN>Rz9i67yr7UiHX%S4;Ur`9WMdwGunbqYDX83anp@gh&{f7CJRDld224_ZA;S9}0B zbn?`MlpYJ^1CGz~YP9kYX0piQ;1`CAT0~(X+2*K3KE0jjs5ih&zAT5Su97}5I9Cqb zRE3+2Zx(AylIE3e6^UJ7&>ZC@?l3Vh5JUR_Zp)sFe~Q*1!dCa=Z*V;lZwM%i+|<}e zx~yKIYr`*O#W0FdjzBlhPd{VQWH);+%=!D(EmIMf0(BU7+F64)%6aVn-M1J<)uJisMmzDq3$MtAa zi8&lEUmiz)*jUU4WR>7A_rA?rp#t=@C8m{sOagL>tgi^_TZ|<6iMpuV-r(EaN2n=W zTykue!CyfivEW^E^m+Q`Hb^t%2~)90I4ULOrv@Ggw4ua$dACdY+G(OB)*D8FY=84Q zyrtl^NKASfV>Q;T-w2vyuj9;tgtnP_aKzD(sD118(0PccqKFn4z@*Sk1k4K^B1Xx> z?F)HHY$PEX#Ma0<>LP+w2#r*%En%NKPeqMj3l1!+oJepz?Z=b(|J2+8c$=5`Wj!%= z!*=o_3(p(Aq&INB{!9Lr5oAENwmm4#>g$Lp2;;cv%K^}44uO4M>f2I+4#zV4zjR%- zPQ%vDo2So=(M$Bu!EQ9fZy=Bv(zl^M_a(~EfkF}5@Pby<3Lha61L)^Igm9Icn+vVb zm2e7_O2{m?JId`w0{=86wr#D2A94nASDZrJ{QwLRr#SFsXN`lq3K(YVMbW-7ultHD z{oN#0-2)V}2ZnyQ=J>N$@#rOmleLYaf0i!=0NrL48ZKJy}qUHPJbb2rPH! zJ|YZ=10HKI!v@Y~W?Zb`tK$eKDMWk+z7BV##}{3pdLpuZrMccCca9D`v<@b{FTlI5 z`SwDqpcUhRH5t>CokJn(^WyB`p19>jOim_WjEHdARTk2&*9LUCbt75+o$&tg&!6+< zgU+1lJxQ#xFyd_p=ZWgu7M}t7(OQdTfTYU!`lA8p>|oi@MwSh0t^^ct(Xf6PUV_o} zvUF;yzNnOb&wU9CDPSkswrUHfeIP?-j(Sx=D|}n*$o%{~xQj2ypCdTLVp0H@>%T=^ z2#5(IRVirn1X_X<3V&DM1HYd{tqUThErWYN_ft~l>JEals6_$X4Q67xAEMwPXGqS= zoh>XvP_dtbbIi;`kf{)gsv4EaXO_+m(+~}W042mVT({zNH?iR}P1G7y=XuW(dc~X0 z)sDl5Q;fplj z&!^OzC+b!SRtw@seeJmD=%fqOGvjMa1Xkjq1C(DC?QW zXZ3Vliy3}zlw37RNE7e5q+;_crDCZLKNZhQrXITGNWfsKYY^^p9b3_}!)gOQ%45a{ zi9!HT8n*bcsP0(rS3+4rqTWfvP23HrtCS-poE_K|20eUjbeS7J2jl^ahvQh`1hzSV`4XSh} z!MTy|88JVz<($>9At@?x)7qc;t<&OHZiMpKpsQ*9cJ$+-lQY-*AWtt)7ZOQ=SgpNz z*VEDjckOBda2^#2kxbCi_wELYCd)4eAUcon$>9@e&){+5~?EK2I zVM5f+Qz^z*CySXoP!t+0z?q2po%wk6OhkTuK7J5xW*gm)BCD{zx_^F{<{WyD++BTl z+6B3B(f)$@h(#BP3As4e)P6j>`ma&v8{VDc&Tin7rNBZao9no03dqY?niB1bV;fJU z|L2m^Z;9&Hptnvt=;t(!!+;w|cVcHT(B}hz?bRi5zcXhTMHFZ$GO`ZY^QAT#%rq`= zE6Pf&V~9{&f$_it^r)vn$YY-&y4K|L`;5^{hJKa2n>6t<#l@y4LST@TU8tg3su_KW zCO)#Y7Y9PG-8Rjim^-t6H%1(aK63rx9l~*T{?{iI$HV8iQp3p*yJ1hX+V3xaO(76I zAp*bcRHGK-U>YYJatH{*qdgF_Kh4*!TQ@a2+JlKr?yhcsd{U2mk)%`OmA5tX&(|(4 z!8V0|EtDmCYYeV--WP#d=oft}Sjgj|Y_mbEDs44uL|c!DDXkmEZ^ z$E~Do-M08nB++Q>VVdcD{hCLG7{@wXm9riAjQ$2|#!hf&z|VV zb+B{pqj5wRYBe8n<`j6q!pz~LmK&(<)=o`u%LjsHR#8;n^TP$I+v#k(;+G@BsQ5KY zW$f?D6mB{FwYZSNSMulCs}Rq=#F8Ua)$cHFKY37c^^CxUN??+PuRtb7_0Hbz0KztG zhc}xjA|adOhQnca2o>zK**5D%EJ)ehF{GC8+;l9ol0`Ybp%b8_0ohdqBNpSj8ooPuw@JsCkC4?-12wP2n!3N zHg=#7Y=TVDSBL4K??lTcfG|=CX|XkL8>K#Z=^T%!QGyeu0QWl@K3UP>CpO92sPRZ7vf9) zgS#tJ)8aLcU=Ylw45$9&se=YI__Nz!kACTngQ5Qm-q0%} diff --git a/docs/architecture/assets/relayer-v0-arch.jpg b/docs/architecture/assets/relayer-v0-arch.jpg deleted file mode 100644 index 3b574551e57dce4f6af47eced791d69be4eaf114..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 558757 zcmeFZ2T)V{*ESkNL_myE6_6YY(m|yPNRA>Paxe-?hp0#wktRVxVgm#Oj#Nb;DqVy~ zjZzXJ6akSA0zyb4y(N^eA?@z-|GxKo=iWQ>&e!hT_s-lfv-c+Kwtjp4)>_ZAo(=ye ze+qKovW1leL_h!n;ekI8J{9uH9Odf;f!NqU)F2SZUdSGSJrF@~O8~M9TuA)aZSVjA zIf&4|zK1~U1?2zRw!47JzwaOG3)%f&_W{od9+TnO0}*)r`rpC$b)Et295)c&JwOdd~=%337M1g;Y>^>+YarE^0J(AaM2_FlRI`jDDyS>LRR5VH3 z_AyTA+zx&sA}S+$=&+p9No5sPHC;V@1H-dMW*5yZEH7DIw!41A-oeqy+2f97-pV`87j#iyjErDtTmdY$z?FTbF$sJP@qWmR=eE#dQ*x@K}qYg_xb zj?Vsp!J*-ipQB?`8vXb5%i9 z1$PVoW0%0L@P7>?8*O>W&h2vf46HKVg@?t|JsKSp5%Y+4B*lH3D7M|_+yZLg5ZQWD0mP8 zhwykRkhi-Yi9-JW)qgw$g8bh9Z;TaG7D!-U!u;Sv_U>+{jkM#R@F8N+pFHkkj&W=m z22U;uV3@CfOt*pE=Y!TG9tTkl89y>T#O&e73i3n_GS;l9OejYR!Mfg%LNWCNv}xr} znd(BAwRb>>cD%il)RTQdviB)k&FAeQ+v;sdMktbVP_S9?t`++Zg4Xpj+^fBbB*2IG zlp(*jbJm8mOr_>|u+*U4S`#;SvQ_PGLSHwS@gc&|*d|!D#~%abSYJLw=Gh5V##WQO z`YnB||r$EGhUJsTa)2MbwBah3xw|Z|K#+2(bo$0+LG_t{mlo!-v zfpd#2S6?Z23>gQznE*E)=a>T#VSm7uT9sP>h2{E2Zei2kh zo26zp9l6}9GjhfkTkY1pJ-%GHW^^azgTN|Ajt_ZS&xcGUK3vV@meBFZx%()I6h;bX zFH?FXJ(uXsIl7Y^zZI}@jKb@_X6US8U@*x3S`9JIbel(n`3!G)2 z2hQJ8ew%4Gf{_}XyYsNxIsY4}rK{sD6mKHQhX|#Bw|13te$9c5Wn2sV`P(L_+GPxt zOy@($I3YhtWA4xQ0}cM=z(bx@vX8_>As=${X0Px#3Chz$%L5g=CeRu{4WozVR@FXJ z)=uU_6j2UsS9KjHW`*l_r*6jzwbYD3=n}xT!H*f z?S{^h0-3X{UtAnVsodA%S}~ww!E7vQK|as0*>6_X zLk^4yLXp}(Re1W)B~U?sRpzxT&RX7pa1H@{2slNu`H&_fjzD-w9;Sz~2kx^vhHyw6 zGuFI5mkR^VPubXB7}x9{xXyGhDMf87dM^-=VgaKE>1rp_sxS2inxNkJt=x1j-k%k8_3m ztA(6$`4F=~P3isa-}hJ|J;cagR`er4CX*ykKP=6g+wL{E1T#O$dtc~Lijskm%a7v&{r4$K@|sUyw!u<0POFk$&koWQ z>R@7J4!p*Ge?H_&9Wn(Qw6>Z6zl|Ly(AP~UHOd^RcKU3~yZcsk{hK`sXeZ#!odwRh z#)e5lAhBr7Y5cA6?Rd?-Utd0!dQf2jL54%A0p8y6KUB#LrvRtHk;)R=0&UVLI(_B- zAPp5@Ev!5`_#rQl{or&uAHvXEuXsvb1&;9{OIu#H2ubuM;0x_=T1?odMg}f+X(o`i zehjNfN9{Axn0b=UY1 zHdbeK9HE}F%zfRU%7O1{Fqmee1TEK?djAM*AC4lqJ;G8cD5!2FxX@*IHAd_;>3fLUp9i?K5h$h%<&16BvGaZ6s3d0uC) z2k;@+P?z|Sl`GUuSO0+pplEu@AKu5y&4aP>xUT~CbijS^k66Hz1x1DjGf#exZ9dOQ zWztf6DplszKNgyPNfO6|W!~dMl-~04k|J+1u>iseJRf}Vw=QM~Te%*}&ByGLTxP~n z=i!;06UafHVICU`dd|CHTX4UEr(8AC{06kxgES{5%ZEH0B5gbU!7YM&S(7H9W3p4` zz1ZzLrWzd6H{l#Xo`moOAA(M9!?9K@HD!X*t-IKkd-)KkAE|u2HOf+cL^z%gX?{rI zJmK)D6h;-+DWwPx&5N^1NK*&1eP7iA+lpX zwYBqHX?c9e{Y7nLC78CtXM^BW822^$1{=48cx-xt!iQ+^AypXIqWn6qDTe+4tH9p$ z`1u=^crN1xvMW`@ZX*+iAWqKryePt(s);o&T5cjo0_YdmtgGCNn(Fm?SZ~}D*K6__i=xW~+rL--ZS6eD6wqK^kkV)1%^0 zxm?2$vK~<*Z$D{qu(TKSV!d}e;Z{*))HgF%$7QdLZx1T-AqCA2yz-o$6QGw*rQ#|I zOG!(n2btIyg12v7JwMzZNPF|q9 zdvTDyDpnGD0ZV25jeRjgf~Hj3E_1ZmC$UYIM4@UsOgDvb9XBS{M7h&Gs@nyB&W9Z4 z9r1OMOsj6FVYIgdO8SmuGzg{0C(2p-{<&SYciX=mPknIGh#K-x8flIcGm_y$2qirS zvF?|)Dl2Cei&{1)%=@XyIYuvWRFwfjH_1KaBm54x17kZ@>O(&tf=_!x;EA$G_^un;A5Fx;jA##LL|5Kk-K)GIFjm=+Vo6dP z{<~z>T>+0G^fkaGEJy-2d2TCyS(|7CC>j^Z$6I;7b99k{l)YIB4-}LJ^OxwVWV*+@ zL#u|pIeyl|Wzx2|o;Q8QuB!#Fru?x`>GQ<6!K;}(0aGUoEY+4P2Ef3}=3$%QCLTfl z`5X)v2lvH2GHR*RFc{gf$4yZZi2M3a2nT#64Q@~lpx|vQNAFHKat!KUHxq%|_n-50 zZ1l5yi@dU`5LIrl&E6$=FxDFhXtr}Cp0bMA;-qHSc+r&Z*l@wLQf9R2F;o`v2~nrM zE*0|gC(4?BB=fP!`45v8Ev4r9^IOy>)t@>v4!!&vRy-V@k(F3b!^4FIM0rH)cF+&6 z4QUPW4GS>R`1_ZO>xDHl6G@9srnO{s@NxCy(tq>{W&J-|u?h0~|Ff29&<^A{jwcoe z&Oi{LJ$cB7_|K6PvA(c<3lzx>oRO-jFz47P6G3AP9_=u4$5Tn-u3bea8J4-d4~%C2 zWvW!9cG-F+-25$cx`r;bg_Wm3=lkMX z&*`>XjMES6LQ`8E+!2|hy$O*a7B$xi(+{RIW1rJpb+tZi_&H#^jcsE;^xI*Q4sMQ2 zq*6hrbzoUDt`cx9W8Miom4(wn5hs|~xO;bXDb-pcs8HNqOKOY$_F)&d%d>tg& zl|%7})KK1Na>dj?JPxmNfjeY!=Is}3J=}McLNj^z>-?)5bkAF z#{YEe4+phFBdPK9dveWgCY{nV#5S26S97k~1x51ehX#C}(fpk+=`Jq&rf|-c(Z&y3 zyKrE6z=!BeFM|G}0e`GFamxhG2xMV_FHB?CFJ8%4`hN!)SP%#E?Fd{+1Y^V9bntiL ztHgZA?Vq=ZyMqs6LL_Mz7qO38E{XVnoQOWM^6Uyi@q`j} zGD?i%bw~9R-qcL5C?5DHml1H)yH1b<;8ST7+3FlEK=2kGZ3Z|oEp7U|;t}r-DQDb0%CThgX_;(?@$JN)3q9&Q_zQVm|qIZP%ADejLWi zO1fjGt`_1ZKh&F!xYt$9QTAb*PGYBKR})U^6M+y$Dhu&!*Qt_hw?u5Xaq;Gf5-U-h; z%ZDVDA-I+IIgNH>rtEiM2Ebz(uG$LE%Q33pFPAVPzyVk4Yz=^A3e%I`Mfl;j@=_?E zjf`>Y^)&C(V}26jEURxPM$#SPQSpTK?3sgLYH^I~<)}pJZMjU}`BCf{fi_h*BFr;X z!|jCNb~Y-Oj9{F@rOfpf_DP@vMAl)K(6W($$tAd%JYC@053}m~GNT%hlp8P$zTu)f{ z>TofCzjlrQVsnNkk|Qbe^*9hq-&$^`iHxi&5t4a(d}<{grtZd8b#KmXx@Be2{|@>4 zhQPc%jNvuQkudEE95XRzBmG`2viOh}M3LlhHf0niys<)BT4pUqLzXn*Q@z^2@v^#r zH)Kk6*t&tfvEr@_G+xMi^?81v|??+Kp&3Q7*loiPro=kJ=A?0n# z8hgW}$Q8W4G5ju8_^|v`J4d?R_KRvpZL%8>!L+94LT^|pG4O39#ejB!{7a^R4aZ!U z%g3CE{ud_Ob-t^*1bQa6G*bZaOFSe19cy;{OM_>kuZX4PT|s7hZgcq%@Aj}&KtGu$ zwmr2wW0tzQJkB=eSP%%i$45c@E^Vq_SRY16-!ssT6V!`>Y8dh%R!mj-&$stDHOjd< zwWMM8(dCx;5V)TjAF_kd(_U}nBt=X+g>E(VA*Yh7_>lgyn9aamhBoN@O*-(YIoMO) zoe_A|)?>`sdHaZoVj!dqUgcFbs}|L#n3xZYQ@37}6N(Les&!P`S4A%^$Pc}vK_zVo zrpnN`nri&B_xJ_I!qbs*!g`=xM#LT$cudY^>{O`%A5x^G#|@}74Q0l*tO6Q@uYYdA zs%1G6Mf8hC=C9xZBOEnV9wZ@UR9eBuh4BLVeB_F-v&4(n<2!$N8xDC^;Fl*h{ zslPMq^yFh5TV#Yxn2i{0$>$+IoK+?pdSkug@WR|p(KzgsS~_nd!XgozqG^DK2UaVY ziI@s+#X|gLD!1iga7A73Q87_Q7z3s0T7vfDnnShTTH=qgwU13CNi_a~$8FzTs&Kle zAH5aoZ6+zbFl#5S{?MG*k*#OwEkTNo%Y!pZ2Cz>);QL41Rx38P9)kh35KIzD`>x_! zpL3M4TJTScA92WN#_rdG3d;=~94!7zCu)yulA*Wsohs?rH*CWXdd1_z zc~9ThV`aDleYsJtsb>oKkW(w~S1;ek@h%ti@F6MMxeY`R&BiW`u+1HftpgbjbkC>F zI!4-08ZZ^+kEHu&w3p9nx|<|xITk!|R8Y0Byz21teuvI09LXrnv+N`t0F6k8SAb!)VS58F0whz3?@VX>djwGL zclM$OwwywfC6(iV0zrg{JXLvi`&H2sq#&Ha|8Q}HaQ!4Zp{lK7wpa3e+7D{Yg zs4Nsig066G(31C}+*yClT$3rIMq1-#(0a@$GU-QA3r4J}%GZt4bU^bs2LmVXyMDGH zV@B7%Y)HdZ>4HD|Lz$klWTqF@;+*dAwavL8Jp<1;@(&p0{on}xM7ErJ%A4h=(&J zB3}h=yr1Y%Io%O=L%LexvPgF=rIXKzB>k$oi9?N@CU*IuD zo^e#Tk*;u20r&F_D@=RT!tV@NwPo?v*~s4Zr5hlM_zT;lLh>)_N1vtfA@OoOOUX=i zTOUmlRf=11SA(NTbx9aeOUcop?@7j%q0b~-v2v3bn{9Ij!>pe1S9Yd(!ktptXW_6u z)3|b<+>?DE-T(p}`#I;|`s>^8Bqf*oWJMR2<}Hjx4c>Nf;TqU|%A$4dbk3Y~3Ro%g z&JxMH$^5nuesxa)L<(|M;7_6d?JHsaz<){%z_|hr;ahA|BuIGU0HyBS5-RdJPcN0W zx-2XG9B0LSgOPCa-RViXnL8t(g)?Zexit+^G2vAAmxdX`)4!QEKbhwy(}VR8;wa0O zzT=jE!rHW#rNN-{&xuE|O~`5DK!YYt1b#DcRNh^}GLTA&eVE%wJ!01~d8F>!!No~< zlCgF?MRJnWW))CQ@553B04Mx-Mf|E6A$W=6lEQEg%U-*29aHc$J*)Nh;M*eE;b=vq zzw@GFhlOqdp~j&B{qbJN)22xi-dH6cl6gHV9p~HQ*xc=h|3SDGsc6SA;E8y%bvY_* ztunginN>RS#c|JEx;=B@IO@LJY-ptu>e!@H=xfdCTMcBI73bQSFJ7`Y57g`w+I>B% z=+dbUnc7?aM$7jKlQUYqfBv$eU$GXq;bu9^5^@}Tkv|l> zBYZ>V@bS1B#8Nw3-%%@$j(Cnaz-Vksz4vJpfReA3m(s)>hY+L7w;0JWf7Cbh-%R8D zZ5`!b;`|WfhDnng9hTNGQpr9nZtZwcy5l`;5P1j{5WZ#0Wsv9U8 z9oqLmZ{0S}_I+x78T82pbfZBu^zYttRlzz z__6WlCaN^#p@jn)Nqv?P5mU$*`?~IB;;ZjvI)2Wv3A)9aWwaFQn#cAI2~+D($@y|0 zPp7*@oEql9bTWQuuDheCF7Tjm04>XB>l8;v**m^PWvl4C8yL$vN>A1YTRli6H13Eq z2)w->;MA7F{EYMcfOcWdlCeLNzwQ)U$Mo0mAx9kN+%;+klz->I$&}sI;1@uncIPQR zBo3a0_N8`xOLsQ@a(DXVS&aITwrtC!8QbrEw@OIc;ZI)NFG0R9wPAIN|KNEm~5-;_L*^C-UmUA_Vf(`2y}k7c`y*zMBMcTp;qa+MY!&5axrsJg z9Ieg#f%l(osOxDr#!GTg89OwS+Y9uIKt(wU->knT@%B|HNqoNcR?KFtgV%%ad7D*K zkgCK*Q$(uMdrhS|`V1)9MnuAJz#ME5tU^%+R*p3J+vWr4vqkg6elPJ|a?k%f-~71$ zk@3QRNMx)d;$T9&{Tz>hq-16|)qsqM&$5bpYlLh`-i2~$mkWwsCrWzIqSWnbPn*t6 zz%nZc3Y%mI2EhS=$kPYdsTX_*%ybkUN4galJt;Dz6<+O(ZxZW~;fVN)P~h`2b;`}y zgDC6f;K~)_fKBLr%Fo%1>f0sNg3N!&fy1}h3jXWX<(MvHP4cod+Z1?tWaaKLW^3{@ zqAtqY=8Id~p^b0V#ij>=wjT%fTI8;`WwrKu=8K=)t$8*3os!|@tJ!XJ6@$a(TQ*Kb zbG*fOy7^ftHs|F0=JC2)yn0*~Qi3!!K>@Tu;sGx``Hp=S(+85>B!8Tkqp%S+x`@db z;P^-R3d6lJL6}f|GTECNb=3LP`$@_Lpv;8qZzlEfz!T*Arl9bHH}95sy^8G&|gRW?`pC?22}ydSCbo_sx?)&b7>fO}G&!ihT<+iK|E| zkOjLGQ*TM1;H~iJKFxkd?Znh(Q+0-ZY>5#ebJoW>`FWXM&n{H=%uTx<#z<0G^=yvY zt?{R#Q5k|QoQn}2ucxaQ%5v^XO<(p6XXzE$>ZC6E4LaOORwV5k2uI!h>+Q~k>gyv# zCK1lo96gxzrfCdt9*nw&xT&V6^=dM3%#)NRxy?RvAO^rG(+MhmA z_TWpyUlum6Zk11;ABtV?#SEeQ-TSo9+F5sxxi-qSA%A+aT)1!lz}Yel$T={Zkua48 zpdn;RCboIr2~8i>|(Al$*UNG6K}DmgS-&R1*zqI?Gvuzygpz-*o*2?gel)3?+qq7b`|f3?IruP{43n`g=IAn|H(Nhv zsV7VNs8gW|kB!a2$#!gc*pJg=$e^5RF0t<{wLG0)&W*Od*4p|YaocCO(z(VcB=C}L z`m%O@bC9B9*}M2c#u^`jd)Y8=FnecKjF^$Ns*Al$VqC!i#(`6?R{TL<$}W&Kf$u-b zjUyVJKSxZ6U_zJ1mPi6&0Rgo~6_3}9><0U7s+xoSZATGOqy&@W-gi^|j3_PVHQ~j|Kk#;MI*&8c?&_3(|Aofp2#?vxXX{+<( zODb+3%HL{GZ}lK5xkWBLH}Rd1oZ?GgJznX@MQ7qWy_%TJ0^bb%^~nXf=*9L+Q}iU`HPCjHiEyG9?8C)_w4J{bU(b) zxstx*sxova7;mey= z;z@(7)*+hs4;6;*eGN6M03LR}-Z7>Nl z08*L9dOHMBvQ6RkAPDzzKWX`rhChHawr0#*D!Q^R*JY3de16SY$VfQ2N3=zlUYa%U zrUzBj?G(yg%RG_!m$^UNNNyE|sices*LP{`w@GLlLlfH)M#cthLbNvMOhm`nsmPEw zl&V;cqytavH5ec7*}z-$iANGxP;Q3F1&>@x8|eUvfKjBNo{|Tstv#^~a48A2H}EL1 zdV6=RK{L4tyAORi<*_0mK&-0h_ZO0a_ZPgL;Rvs%xpVR7&Fqqn_L{%UxeY7j`FVzR zna*y@$i5ZN;nZevU)CL#|`ji;HZI3fg(Rq(5Y2&AcUjBFwHn!h%%rOo^sAj1&aC7 zZ!7P}#&S*<%|E= zmwE;i&(}p~FW5#}FUo(Osj07hEn;;uS2^BX$<0whN{~TdeFLiqP`5Lcf~>vxu(qUx z_d^c5#Ab@wt>+`-V3nKQgeLA4`I0RYLb$2$r-2&SGo$YA?j~7T!q^D8lxshOI31HG z_RH1eE$n;vvItn)fdpieJ^W_dkO`=D*iGT)--Y|(qsKzJi?Pk=OfyFSN|PIn@sBIh zL__J~h#P<%t>jj%)A71t6F6;E2&lJDGgpkJVry)UG$;-`w47~2C~Yc)Ifo96K(Ddy z>ef$2U-2;~Rz7*gw(+62#&t1JE!@UKmk~U7^Wf`lvLxBV#-Xc|tVgW}2QxVP-Uqk;yIYQcTmtqEYvbM=Aci15EL3cOAwH*^8x4KW;I# zGS3kMe-8!RjqgAf|0bTe$<&h4iHBYLutcn}%q^6#edvB4y;*#cvv@wx=lEZNr8Q8p`_`^n^B(Jughx!1(I2suB46hT`u zGTz-8o5=|@r5~ZhaNG)`3Y4qXt~D-xJe$VzkgXqKW(wgi+e^z zn;Jk3BOT2tlkehU&jdA6j?d;(T7|umVbFv&&mm-0+I)xc^}K7Q_UtQfYHMptRF$fd zik@IPa0G3RlqFA`$IN-nXsapV4Z#@*H>A6j$qhzkivwZ42z5o=f)5Gn=0jpmdX)4q zJlb_k)}9TAhLV5~y87btrcKck(sxtS+xjed5?LHHtu_P)&D0=VmS5TmClvx1+0waRyK3Q$~?r93rhKJX^)Sy zK(^&cA_v0QT9##>LQR<5m|6x&h)Rz6+lABSg#d+0dZD>owi1Tig}%U`{c`Pki^5%Y zDZX>ZI$=WD@l$S(8)LGp3>9fR5N3@}J{Pi7_ittQIxHS3kJ-l&?@f+3)#HG@R|b~+ z$0fvbqAH5rOoDFwaSYOV1yAdCxHRzf#eq8~r|#~SX|tGue#3*(w_st;!PqIpxMoey zc*xgW_|S4gYpf-3oVt1h?ak{=F_|%jOQ6p&e$-Ee{(xql1F4dwfD?4jreBXA!Buz~ z*ro*}1A!FRYt3xx8SP?~17AumwrZ|~O{i1xWSF?0gyi$+@^{L~Xiu7_lRSi5+LW+V zEHvHEm)l(gMC~X_mt&h+D2$-MmVm{no>6s{FW|A89bGz(K0Wd=a4NCt)30u(Kb=?N zGa_orCcX|3xrM8I+X)JKo_@}%`EusP1rmpwE(*gE%C1n1UCKu?HP&C$%G2ga47GIl z#`DX3i1wGCYf%WEC>ZvIFQFQ9sK6?thnnqhxI6bXV}y0-i%ZzOeZE0a3{ZJV@PFKY zu`%V^kkUQ_hbTDoa~4)cCR$tVRf93uX3s%3k+j-PsyHFE^DB5}Pb^fvVp2 z$1cBeDm#blF;i|q2~`K3*(ic?>pli-k+cf;QNZ*tMJr{kmxJpPqk?*tH6o69ruxdm zn0WHk=10LJpFRrT#qRGSYizE*=yc$=x01NMAiLddfgQdJy=+M0$XbFd$P46i93x2g zq6w05ed5;ZE7xt9*6p@O;maDNy`J{4rOGoqPy3UBn6s*3%s?( zQI7*$)Nw%2p=eMta+A&tbMSDrdW7|mhB<==U#{ddYU8#23$aa?NmFyH!poOPeh3J# zHl}d5W{@Y(>&R5}f;UD*FcQc{)o(tX@fxi9C8d-Y+(Z7M6iYS}bKo4R1NC%QR4_R^ zKZi}uXEe^*!)XP|fgB-t*GliEIFu(w2OqTx$KWUW8bg^C1bqD4BV-Y~-{f3zRG9;i z3xCRofPxpJ)pm6${k45lpwb8`?k+-$H|kO=QJPFgYa*M|AEt1$C$}mZLOxbT-hPkU zDj7Lt=$stOPKM*a6`!wV(dJw`zI>!NgN7aB9h$IIkhJ ze3)O5rGWvycCBwe)88uvEB3rib0t)sQZLa0^nLs8W-PYY;`@uLJ5K z$Bv#z8Lqf=RpQfXvq7dxr+8aX+8x4+)CBjEQ64O)Kg(~vu)Zh^K(4cU`g@*`7-673 z8cW}uG5nCUEKJvayi+rN%w--cM}(sI5X%?!9_k@9#BN`VDZ@UygcIV2QK@|TwAQ}l zFk6se$y(*c@gctFeQ*%CKXSC4Elfq?{U<{V0x4Ds*BUD8%%v{%Tb+1Oq7d{xBRu@J^g$^Z zuiMhsB`!g-Kyj)e9MDYWNaJa%9FPcQ#sAZ5&c=R9P6#xCv!ytO!;|B5Tq{@6msCkK zSRlfNE;@{@yu*5EXqKS==+$#URG|$SIW@l@fZboTSs*M?&&s=BlVbpnZv2>Vymj{H z+)YMlhT1^OZ6%p`YVqWcd{#s5;QZvK^84xdY-)Q_yY1JjVILMI)8>nOujAIrj$}Gn z%}Y;wE(xND5v9;#+<2n+r1O06JO;{$jn&;8;snz12dsBLEF?t+9!hcxYa^2Nxk_hMua_8fT~5>P+RgiZyie(4{seDuI`8G0Z1WrJ_&QI_ zn)%d`n?st)$rT$yi!kiW>T4^361EwT;lC<6C$}dKy(&Qg+S~Md-F^Z0My|)4o$lqq zP^J`nAek1aqE&vU+Q`;F!`X6L!Mgo(L)(p0umAdR^OI5YjUj46tLB>-TcToG-35gj zB^d2Wf4U0S-sSIpa`A9$@i4xwh-sV8b{;O#8zKe4qm5Oq+5a>ZK%HU}f%FN>k$f;t zCLIXlL-vpH2C+5zIjpD)bUMB@XHVy>4Wpc(IHH680|*GxFQu+R%BmNXDwxtsypuq* zR9UbGGrcjobN(tOG)jak()&NvRzSP3!D^++gyew(R2;}1rV5y0oFMur)B4JjF&a!9 z$XyuPNPbQf`uurc8`wL>9@y}%Uz=X;WUs%I}SGWQ#3kv*tg&icja@0^XU#|DJjCQ`^+KCH|3FB3qK>;MiVPw(Pl z#-dlEh4M4uw|L)x1V&d&#VBQRuHBZ9BA=jX@xv!;#Fa5w3L856Ft;{7F1VyehUP0N+oBmcNO?`%KoW%WD5`nEFBgx43|yQUw@^3!=SPo|#! ztYP8eW)beZeht5;KP#dCn!n=@BZDKJvL6BrtETbM$io0XXf~<;P#Hey# z@>KloO=SQ!o8%dx{sadm6$*%Oa^0Ug|yj{O#Q5 z9@gT$X+7V7foFV3g}TOrJ!c}kPUa<_ESoC5ynHP!=U4HM?%wPg`_RH)Y03_ZXYP(H zehO8uuF%bkTq-^9+Y=y>RcB}AvaCtb-ihOg02D?KD-AGU4A5tr@RCkIJ2kwuN7yIV zh=F+iF-Zm#rjOjE*FV-O>(KEXt9h2F^`>lY%l`;3;iD79*XYG5z_Dnnx(@Xk3(1G#>hAlA^FM0Be1A#3n#I9P1?BLKs8R-bQd zYv8!rD$AYWw(S9mWHXzV11$lwaF1K2;bYE|Xt9Cuwyck~>e4AaZEy)MT!%Ly*Hfr$ ze39X4?~2sQ+;Lf<1SY|8XKR3o+KQ+sXAfDs-bKh?Sj22>Cmn_ zv%P{Q6oIq)7za+{J_wZM^}(yXTPZJy8br@G*N46{=eM@jet${0I)ZkJpmW2oJ=ZbR ziK=XiFgPr`^!X?HJ{7vCze zT`!)#u~Y6~rM!@z`Fd0P38)bHN5;i@9e~;AYkIUH!8fu;ZlP`~p@HR|sQ>!oLZ;k- z9;SxeU)PBD`#ya|7Bn*)#BbfDXV{e~u_|t;-;QYX9V(u~n_ubiN?(bn3CZp(Ta8PT zHNSkSDA4MH8Zr{&z&4%5jn+@yiNfqfE5_{RLrS1j&i!@*k185R;PzT#HW#tYTDGJa zhm3sEg5;hQup_PkcJ^O(@J8c7nR>Q0c5|m@jLnC*MC97gz`9y{$)9TfGE6%;7h3OA zxtHPvXQVbAqaW~}(F)t)!hg1~LY2ik+>&4*!OHs zc016iqlVir@d0dKD8aeLDj5B3MOhGmxWPTRO8GA0mNf17U6M5$aKY|-mGlOWmb%Q? zY;QtVMQ+3fQVsvS!QcufwQXd*AUcRtM96`)YQG-YulU{bo~H#aZDDzFiCZ)^z0Nkh z_5s`EXDWE%yDl;sE2}3B3#!p&+)HxdPS-HC2%li3xUz^%6GW#pHv{>qrz(qVN1BK$ z;XbG{NZv=Iv_B`ebF@D4Ax8IkH486o+j*jkC8)TIL^1A5Q&rT53DQuHN4e`?cwGI{ zd_dkg8OQR(`u)bmJ9;wi4E?S93j!ir#$;Zer}U+&aFUU-5r~q87qvVDh7*bAn;0w zpI12q5^t7AG^6lD&$F#Bi2VY6SD$EnUSe(S+-h>#u`OlnS5#(lgw#WNnOLj&$T6%& zCm+Io-%!VG;+29g7=-g&uv;M#t2|LM{fLwd({jmy8Ax7K{Hr_sI&n&H9`u4ew_jkNx11do~)n!;UFj25-fGh)1aDdj^D@O6q~yais5%Ha?rJ5%8m1 z(0$-&UBcd8mD=|8W!78u7oqn&ZJC5I!(-5ehTC@uJPI?1@OXqSoY5 z9BruK!2{Ak%wCRp%(a@(bsA0($N^LR_MM$jLS}H=EL``@cpGO5f3oq?%wfe_(ue55 z4YwoB#;N|ReN8_C;3x5YWmg(xjiEm$HBy~UbG$}o94W^-7VuLABTD3W9)?)?Je#AZuEXCBEiRT>bVR?!mpOt>_)*4%j&>T=|1rn%gwGOLGe zHLBM!NTPdq?}l%XqLIvvqZcX{ekq*b-A@B!dP>w-NGNVU@c{ThT;LvYM?Z5DNK^TI zh}7Ls8^e#oxx1LKL=(?^z>0(4JdJIxXG&9AdZNe5TKslfX_e+VHunYE-j4e&zJ7?Z zPe(L7L-2GnEoCJ2py(?n(n^u^^QV~LG)dm_*f}*5o7nVp^f_llqk&w-X83R42$hbq znqOQ9;15F7qu>pou%10tqnJTlWh@yshHtJlcClmH$4zx`PSe%LQJ?+sz-8iYKO-EO zD$hqk4S$ZBL=nl9>3`0mAy(TLlg;v5DrT3)ScR z&XyzHI}d&ELo5y6OT&*wn(v<2v3%3X#+BhJL1+o~8Fn*)q6mL#&sZ`wux<6{R;%Wj zrs4(Xm^6$YN4J0GTax2Flk-zA~1) zuiNv5c1}iS{4ou&m;BJ=1~u~05iw^}g5`i#pS!+~lu?G6Y2JVDd*J-x|JPt5FiS^p zq>NeO-0ZP6XcDOwvDC%D+kQHu+4h*Oorr_#CQBbacEVbS@T_nIe34j*_$Aqw;_s3F z%T;UmBs}9E+D;wxhCw*P91*?dHs%W!l(L|c=X$g+fM=yVVg^U0N+Uc z`!PS~-Hm|M;=M|SYm@Z9p3Dl*RN2cr<&Rg|5GtKNnR<<2r`E7z@V0$@9C0w1S)sC4 zj$Nu9)T1Q08#B9HOV}y;=~U<=BFrT?SVzL&;pvOGA5*fm84j;U2%owthVX>SW``2& zmCN_|kl_zor*hTB5%iX;vB1JW@~jf~!I0t&F!wg~V;L@Zi4_=>G?fJxGSR98s{>PG zAb@>gA^2ihZ#{yEpbmNtVkOCP_on|YR-4!WJf8cZ&+6YMEzZakXmgFg=#A|1`mXeP znn^hv3&I#b3+#?{=Iqu)Fk=hHxKH3HMA5*2>ji2Is)S=WJ@qVjOgL-O9eg22i~Fud z!y|dKzG858Mtn$dA1z`z%>hX(jfv%Dy2Fy^J(RPj6VR@vSBv=&6?jko)+rcI3?#FJ z4SuEcatyh+WwY#Pgb+vO&$k;8caHt*uSZNDOe|P^bIaVtbl{5OdgL>;d!$I(pOcq= zmGYio*OHX^kTiJD>QV5mmLiVAs$fJV>iCq#JRf4>0`~nNqgeI8xV~SoE3h@5GVeVT z$rUB7u{F7;Z*l9>vAqc1H4vb!J+*}b+729rih=`MI6=7g5jT%NDiI)U*(@mnS=if4 z)F1tJeh!^dHtK}g^a7OQj}z^#?c6?P%c(HpwGpWceC|`#lyS61exayv@U{CIKo?Ie5Hwzg*A1_ZT{>s5)D4c-cOadPkmcOv7KaT14ory| zY%KNwNM0mUN1Ai&_r7KBPcLvs;m-GOl(~SiFb*UIz;i9JB9(UTZBF1xDH1H;Dch zF=C-rgfBy$G4kEk*HV!4S_&QaIcL+_UgQ`K-`{ETh`ZO9enafXdU}~9RK7W9`*z;p z>%Lb)f;sflZG+!W+h*C`M~qVEOKaanTJ$+=Ef`<6cL-Q3EpAmj!am09Kq`P#7!^M* zvr5f;T7x~n(E>dLgXtM-!+qd50h#_jjGht~8xy9Z^IWg{c$dcqv_0@xs60>hN$7<0 zWPr|BMd{?LOpBVLUz6=xakRj8!}@r?*`EY6=vgy0vdzbH=R7v!$`sYA1H)DQm)>bp z?IKk>^D=*Wo^?H?n|?XYTG|%gkiqLd7nKIPNE}B>Wvr;F!RKSyS{Sfm6@Ys{pJ2qf zxmd5BSR)u27VW;LQf9~gY2F2!ws+WL7xlCyP;VS^3I;g{TK5B=j8%8AtPhJUT^K<& z)YXMu>NC}4LEp~(-{0`x_2=%?M`urL2o7${Z*qzSsey4Y#&sxgarXxUi`*aGZ88v#9ILYK;1_S(EcjjlZu(Nk)=! z+_7+lXhY*a&t?BDaAIn|)j7!KIio178ia?gCILq3yuFR(sH!Uri#WD5KyqZBte~kQ zMCWCk1DCd$Ys-2zfma9f`ZYA~YP|;@3=^y)vAU<7f0?Vhd{;`k_;*|9h5&cZ|B3Gs z;01t*P5EDyhtL5=^0GX$p1Rtk{RCcN`J5<=@}N7kB=12hkwEnjHfe0+h2E;b9MNz% z6uxV6aik8q40}6oRP=n)ZP&Q<=fM@-0Ab5d4TI$;QxW|i1AfO3LEwEwO=nQGfqPIE zfiTNFGQ9+S`mGiH653d3h!9I-`p?@Z(hN6@{l~fXEQ+Pg%}K4-LX#yWZsvFXNTTu9 zZbZiYY4q^l+IZkUpV?EV5&vzWJgMM_!X5|vL!O>z_k9%EeelWt{Sj}W>kVM|d=8&V z9RRc1Y131vaGq!rdpGa^DuR|~!V~Xu;Oxt2>E3|u%%>(IG^Fh0`|*Vxi`SlqYx*&g zoa=*9b9IF=FU>uspwCAXkKmdyHu&co=aidsbFXF5D(^-qU?gm=q?Ose3AoTGGgAHT z#!0+L!J>%DU}Djs&+E4t)fH*V7e4e`Poy_4gs^pzea&~=+>8tRrZe?HsuF$|Hu|lt ziv?yFv=2ie0PvDeJuJKl_Za_x{h`=bW4W z;@mI>7fHVFtk!Cs%Bcv7Hkn&(jS{9?Pma-%oXg zELr9T=RYgsJJo7gTqqR~{swU&OJ7jnDC3yg`8=cdDOU_XDJz~TS3JJ1qOg74E_(bi zIA5&5bN-t*Cix>~0M2wBH51;mBYV8{gVi=j617~ee!pq0CmMvxGSwtETqZR{X+v#as{yLTdTS06>i*1-hm2Kv^x=cRmlSUf8 zvKT8WwAUrdmha=T@}P7hS0<4nJoGa!35m;oU@QE6w(54)84yNge_+-Kb#VJE4MMBm zje1UOniefkwJV7tMU}ruoJ8@`cEjv-v)xY-rmpQSRJm10mIixot&w>WRt4fC!iswW zX>?p%X`Iveii63MM%#nXWg#p3_vfdsuvd-7C-qkP67fx6dw%W;UzhVq_X-+M1 z$iM1VNxg%V@Vgt2TZb7|(dj;>(g%#icFV6-TsMs_o=}(h&9Y3n#4b@@^rEx}a|LJ= zxacyIE691(u@~bA&CxV09U6wV{;o{T)~tAlb{y8Rg*ibpB*}Nlu`D&;>A=r%THIeE zx|lHI?v<8I+M|!2H4hzKx4$3KQ2K2!HBqkkT{Wb@rYfH#E75H^&>ndsgLT@jLh;-B zr=#rA60*y~-bHd8t`^6Zj$*lKw2=i7o&jl7yYk#JFWHW*H>nPI;;PP5j`+G#em%q zwCzh@+nbkX?$EkaW_F96sml+L3nnuW5rh`!3W06)xD?}m*p>zKP@PxV67+Q&f}qAu z99A4bvQ6p_kpAGOQT7$C2#G9gwck`yG3yMD_LjPHAv9C~ES0}VKD+Z^8##Rm8augD zww?2=1bm@ipUwoAcgMG6arooyw=+`|-%I$06`Bfe;h|kNZKnW(Q2=L=8eyS8R+6Q= z-h-PqW>!LkiXI264z_1S(1#{VFQ<`Ns}^@Yon^PoyrXk*szDHPuQ{D#r^E!g28p zX#&WAxtD4GN z)m>rC+&<3iImJ8d`+r`p-vF5flsm9m4r!!$etKHs^$D-Cc(#5~K;m-pG4IipYF4#b*9!wJr1@2k)k$$Re4*4*`LvC`pC)oN!`$PinZd!~HiGFuk!Et9^Fo!A8Y>*=r z|6Stm=MQ~(DaZtHM%DGb>D?+TSCR9$(U8!xd2;5qalQpIf z#E1E)?C;48@|&uf+*8lHiKpV&5+GaISIQ`x)-(~MA*Tlywr5ZQR;WW5JGxK8)+x7+ zrP(RWdB}rW^>At5AisFRy&ncr)h<6~_v|jx2#_|8nE>6Mkc?YFf^+Nqc*s@ljy#HO z{QMzC0rBq>o$Am1WwjwM8eX*Xt+|%5PcPBM_v0b!HVV68wd;pdssP!(*XwlSVqEul zaY3brfl_N}4)(6@gULuTsn8P7!IbeMIxM(i?E9Di8Xl6zv!#JtA!3lSs!fqw^`oq+ zYyZvVW9$%R0!tsO|M0EiDmh_}LAmehU#lirokF1|pLsQ?%Pjw*w}p341}LYdDhaMo zfi@aBUMz3fI#cCCxG+%h+`9qpFPzKuzo{(OqG=b$wOWo+l#4S70^-hTx!R}fNaa;< z81Lgi1Ud+#8}U?Q!a?*hWjr%NX*K_D42m`;FNuwhU!?Y|e_!5LxwU!}QH+$+*iMQ=Ha)U9vME-3YcLF6`BOC;cZ^ z;5kc{eU@b+o^LqTA>;2TwHIV#A||HMB`395U4g;o+<7{wrSK2&mI;l~w3iQgSAWln z%RZPt(|4#DhNm+d-}oGCG{%d2)NzW9dA{>TvT{_E_*9k<}< z6I5g@qRw{Juu!S08bVT#Ju)YkP6*)>)x|F`_J0mqIc}}pBPl)XAEw;@2d=x-@y{;W z*Ffg-!o#b~#BzsN-ZLeNz7ugZmBUT!*6!i^#WIASa~C=4uRatarT)xTS4IjcR>b^# zjH8w*y!qD*gkXIiBTvIY3O-$L8*)*{F?*l_hTMW}a!6|-uF!4LKif=xWXomp;rOPX zek|(}qcrzn@eA>OnqYIGovgvj%qIkyztDb}o`z-F)F4C5{JU4HiA21jo;R0`PA@?a*(Q31C)D z2qs<>W301d5LQ3dAwF2@OK+A;OO!Q^fUD8U_(xt8+KE1)?t1f+daehAp8BMevobdE za=Enl#hcNLF`W_XaBJVerGSO9UczH!ecGd~!9=;be|SjvxB>~TtUpamRtxf@t{&+z z5*(0EwYcX)__~gNQ#+=yFFyOXHKPfn0!c1pXO{uIiHJVD+UENBt4jG~YnQ)cy5t)|h zWp}NS23*3rdb&r)0Qmq#^x_Tkhl-xh4SMX;lRdwy=oB3<{WdeD&=9y~wS7KNVa89f zB5+D^uA^YWYXeU8VN0uk9Gn+BF6Udv$ur|&YjINB^g3@jGI3uCH@gCZrrsQ~> zuk{7Tp@s+Y@_)C{30+AN%$g5>N<6j_AHMzJLVOJ~q_%JC^)~ab4p;Si%=BAvHpLm- zZ$8)@wfxX~WOyO=-c_Z4c(StB-B|7dLAGqWw1BxuNSVsFvI)*=b57O8pG=VcKh9#i zGHg=H@29gq{ti#W%0q-sit>*D7u^2-N{ktVD{gy-DF?A|Ym!I94UNwh$MIENXk2CBROw~J@As@9G8k;5jWBKi7iD>%MnJkDZ0zPEQjAX4R)gn1m_-P zurwg_1$bg8G68GWe65tKVcQ+%ES8}r{6@<5(4zC*@_m7GNVg9`>@$hK@4;ht_e+^v zx*B!{03vwF8#5a>RP5enqw zQJ<0rHuZA0ZX(h4~b#RRENgM5Ln7o}T3ZP0!0B3E{l6GgtCwAKK|6*Hj zu_Q_CdM(>#T(`D)H-r$EZ<-b)li7e1UewBul%d$(RlVR?V4wEET~64n1o?X>rU$oX zsWU-SoM5|&{2i>{ez2E)6QGT&4Iqfy53$;G$?Ri0%u9`f89)_aey!KG7Ogp^A8kg) zPi=!zHLBxGK)Sm;ojS7&)|v1zI7hWUa0b`v9_T3i0}dh-=PIrR7n?BInZXs*V5yxr zfl*;}U4se9Jz*c;^E-kdauUV*1uyT>azco1z;+oq?Dvx>ZeZH?ooG;}b6^1K9{@Z% z8*sZ9*xU_8@Q$Ck{pvHeF<7HNxGc2&M-q-Pm~a(v0e?iN?agLiYD>O@v?tq(|-jwqN`E^L; zHVgM?yIGHes}r{yKul?@j;<}n#k=allSC5BxF#}=D7N?@2mUgb!u}L!^Qv9#!aqFs z12$PJY3GK4E{9N~SJ&VH&3SvPd1?7`g`ua#yLXs=>uhC|Z_r%&_l&U*yHP3(g5cq^#L#;lF;>;s9#PgWe|n+k?X3-gSDx(xt`@}Yt=OQfR!+`eEjqab8|T_m zavA*Hc(2LE3dT%bR(@QkOn0YK7s7Y{ngLEPj-=4Qc?n2{p?Lpjmhxq~c=Enqmz@$= zW`#fY3S0CC4O|M`#$=oZI$P^jGs@U+))wU?}7WaSJ`XOMf9eAA5v)2h-L?mYnsH$~68?)bO%Sa<93R zFP`^i*H$FB;YojCrpFS=GiU=Cz9uWQmzc;HTUz?*pNjUpe>i3Mrpo2|P}kma;-s?6 zVaClrIQcTz)rPgc`sxhZi=0H%2uu7NVEl`hhd2 zG3;^|`9{rziG_C%Rq7G!04JBQFk=g;!&cAH>nx$(1=JLdD}vaM&ajTTMNh)G zF?Rmsia}|l9Jkrivy)3kjxlV<$|j$iZMcY^vsm6cr@ymvLch;dmTU81 z{{-g@L5^;Xn%-pkf@fx8gdjiQKOhYy(2ctZd}biwgr@Mnes|s+V!Kg{Vf*~e_73T4 z@RH^wM|BY(N9cOMHfIbz5duehobFusg=kw_ZePnwlxNoQM~ERlu7EwGmn(ti@^7D@J8RR*&NHg9 zZiFkc&^)Mc<`J$z_rKH89S~0pnR9fa560K02)dwz0&=RKRxGyY$B+N*uR-BBux$Tj z>otsg8GA+uR9(A;B0X=b7rzYqWA0pA-y^y1olZD(rk_&Km6n1#$W=$jFn1W{-7<2N zCK7xP=E67|7Roa4Gg5cgAHm2_zbJR-WIxpm9faH@iHllL)`U&&mE4=${xQi;l=J3_ zc&S`n-yRO(f3iB@>nWgLhfjSW1f)%xGg~;h8X;=7q5*{C2p@8ynF!q>BRa^#8>QTx zlqRsGk2{3l&n#DP4t&~^!8kBNbZ*Zt#OXMMiD#X5ceOftt@SnEE(uJsX&~F1F4#4H z3RK&DtHfQQOQ0Am!|44FUTkl-so6R)|8xVrv`VqDJ)rVeV@`6=3_9a%`zWfRf@SSR zbTlf>4lNrDsC1eARI(Hq2<&IvrGNb8-+aDbIU#Q1^km{jS?r6d>y&r8H*Ip#EsJkj z+zh@k`$|72@r7%La{Pk^`J~@{C2%jhqoeOi(5x%-dYK*KeMR@Z*Q0Z%&Bu|)ux9Li zoV@y(r1b?^Asny63-Q0aN9=|3i2K+@$8VCV+-0;(bU)2X2nyQ(T# zRL#Cdo8kg>yI-%e)5GhrJau60C$IxJC2e3A@DmpDffB`*gq-H_W2M+9Mz_-9CK!TYs-I|bN%(4~ ztBc3hUTrFNo!jMlktEjqzDPTGx!Gkwk+7vL4R&prc}5;?V&djbLmCd2b^AeM$b0%v z_K#bS7z`;hoK>&MPubI#p}(kzR-Zx3Mp z|C@YlZ&pdEU(1)LKQb%*n9kMNZ(Tn5;Yybro*{1`oj;c-HMZ5Yq`E}qMqGQ5F7XCE zXCa&{e4KZ~hnWGU8iwmsAo$mhmO;5J7+Z1(bD>;)P(K+9^V3J}ZM%i~{zJ6YGUce3Q!Y0AAS%Wb7{Eho0;o%B*u_YLYryS#B1I)PhTz)N+R7}QvGn% z8l!s?gb1g%{jcZe8J2%&jnGF~v8F~#EbAluN6H)!{dOWbtl}KD8EwObro7?<5-cEu zFg?*aiq+tL`HG#u@rRrw`!jYre`#&QupC6=SYPttfy)2vzsD7s$z9akHlggCg>tiWM<0JVZbA%L@;m?(fxt{V_ z+bKOGy)yn0o9!^}C$ZL!~K_k3_P)0KQ1Jwk!4jIeH?{e>L6JH(-2 zM!?06N5oQyc%{H*6kZhPyQ_neM&Do5jncig^hLIy^8CxGA19{Y5v&sXYfZS1%iUuI zwE(o^>G#3LFRUc72<|7d?t9a!eJ3r?N+jjum!3S!$q3~L`)nOYPP?VwCBWbtBHV2< z0|td2sA(kAjPZ>iiCf4iXbA5vcr2?I->cpsnN7-xeJWcQ7$`q${>EQ&{<+B4Hn%ss zsY5dm@bY=G9Vqtj#!}9Usq~xdO?_ZvUXe~SCcN`P4vaERKi*3AC$dB5a?N7+yK^r{ z0jJ^X$mH@qWYm*(m6f!~v?(91a6hLS=RFs29l-q}I?FyjtTV_D zb!?n^-z3=pCMJIa@dPQdW^u+k6hRz3!Zxa_X^dAeI#3we?y`BcSLJhUPJaLC&T*n8 zGr3G^xE!^+?W|vNGh*ab+p5>%X{b$4)dY(C`mUvz^9v~4gf4?1v8hNdZvxKrV@cvV z&mp|<&?)gp=H0ea%CuhK8SCdT&KyCW>l;4k)uVYLQzdY2?@4`E{x!_9s=)FEy#H$dwJO4w4@;}@D$o>^Np;hr^`B#v8w0-gI@fRtUNvYLagBrtTEqrU#f4a1O?l0bUIt zn1|%_KIR@pr*AUHp^37#alZxfqv`4bI5ANFMslVV>L@$7Iqn)A^Pnw3v5iPsE$E(e z?^(UnQSxSLP4ltikpFyw=4+hxKRnWeg<2#XiGoyA+J?DD^VGQf47!{XTv`+o3PtRJ zhL{;`mSSV1lOJE=% zYpee%-_$IjR=M8_mAP!Iemn1i{f8IT2d?mjPhIc)_VSo!WzbHvz3*2De zwzQ8v>gQWBewy*qMJhF~#N@J4^dHSjxCI?r2w`A+1 z3j6-N5->-w)TNODiRlBaRB(&Ep8aSeIJ=mW^(HayRQiXFna*CYTZC_~OQI7emy&F* z%7~vLm#qg_rbuY6--9U%fsqm2r79Lu%`b+7g< zEX3;I#J8aQQ=Z=Fg)=QWS$(CNmCH>(7O$g2#W~~v-5=X_+i&S4T_5Bz!=02&+4TSGjbCe^|6D8f$z5R1J`U- z>I8`(0>nIifu3nE5~MzA7pd^|J+=43FqO}I4$A&LkB~?pSVB2tj&E+N>SSAKh+k|p z6+quJm7`oMvgNjqJV9)|XxI$Lz1%?%VURXxXp%tDeN3b3lgQIQsXea?FOeF0Q@PTR zGSOaSlvIiipm?Ga_x>~#qDg#oD}3;=HAqS>3@ykN3I+%3N$BVW^@1ACV9NHv(+7%Z zp7-fqLVN0GE4wWr4Vl6nY%DZhHDWEdAM5A&&(78 zJrkofY!dn^WD`NrM;_jtGu%vCThDiXWIA&N{D>F$m>vev#1iE4ud`skNLud~k+pFj zqfN7Ah|bUfy+I6z8lkX#4R*GP6Za2~XaB`ptGGLIQ;}1LJw(Y1d|l}4Jim^4(R1GY z`D<_5gzx@xoF{qm2b%qoaECng#uxYV1!1q5*e5v-oqNoigD|lwW#5X(M9Rng_EA^i7aHihHnzhSO&SF!w;#-aqdH z!}f!*tqcT(sUNyQpzLA%jK!o9;2BLQI_>D54aYRx!Z02>IhZVV6ZC>fOTr0EpY4?U zh^*)9{&McMav_Mz$2t^xEWy{jRF=kcZd~HQAQ&+2p?uEc?Q=S0{(pG<2d8H~ihu60 zy30r=^P9;pG9Ug_`R?K;C(M;xf*%choTOR1Nx=rZ={Dt`xmn~~+OlUOE*|)Urp;JU zzI76O;E-H`-)I@F13yDO0v)1itnuN^&P@d`C^kJ9Wd@p#Tf@YmM? z?Ry#>RWE-W6UN}f;j#Ng9J}&cB1$BvVnwGu5=G!xVmI*_4*bN-JX|7T?=F9gk6pS8U8o+4=;2wj!Z_PxT3Onp4s-v0hkOt;jW|_-VXCWvUXixkNPY&^; zMX zK!j8uw{brN^+bO_M3RQ_$*!yj6ZT+mS3 zK*#qY1PR{jLcUnxQS1q_AL1BSQ7?^BK^DKXV0}_5&z{s22_s6S#m8M7(g=jVZf0oA zCnz7b@0(8b{>kG_DfY*hvRiX8FXY(~i#HsicGflS9cxq;} z!ZQGZ?tdR>eqp%MZr&Z1nVxe!vqo1V^3g?TR+(4cEA5DM4-4S zcbZ8;H_OR>k`4r!vXqV!Y;%zQ-)lHnZyGjuZKxrl-qaX5%090iY9-Y#Q8cW`vMw3* z5Bv@u+N4UEf@90*gOSim9i+h^UwWd= zo{pqEAHIKKzqReTp$VH0SJ-c>yl&a3pFYAwyk6a~!?zk=n?3o3@ zsQPm!BJnHP6nv7q8mA~!E>?~nhfx|a+qY<@GLF@BMrTm@;UR0#gf^@sAMwgS3KP0%SmAdeH-$x^1WRX~=(-)C;zc z@Vd)z!D;j$mE2939;EEVYPMOGJ1qe_+t)73*i#caUyK4hnY~xWP=O)8)+E=W-1L5< z-}qMDm*7G8A{NFF)4&;?kL88yw87uQ8e*pl8t^@GPYMnYeDDe6BbZAIxCC;&94Sp2 z5_B04m0B0XoS~CS8z^{D2yg#&+qlAtAUB6jGf(~9=M{jXjE!kvTHB5IukmsePVw%7 ziHIo%3RXlyLCqjrw65D|y%Uv?uLE6R_)vFx<(?o8vZZKX4Rck;F~?|wuvNlXhYFO4 zf}wMXO)X)q;KRO<7bO-rEI3fRHemIBMq@v+AFiUxO#q^}pFjunwSOBV9K%u8d(sZe z31Ff*HMo1Y6f*A{3TLRWw5eIR105mhhl!9!jGlTI3jd0RnBCbg;>COaPSItza z`@jycPf1D<@Z)Ky_E>^YIdrRQ;_+2$_5@CJg>6`F@S(AT-ahv#-|d*{jVi+% z6?UQi)|)mZ7M8)eQO%Y%c7Z#Agf{u(J*rxX$#%8LcGs%Mr@RfF5VeYRcRU3l+R+WUHuXo zr1N?0tM=IcHLCxa#r!{ZWkeW)l|Be5G_jAgGFgos+056REUYAYhSc=_cs4aFsr1VN zJc(IZv(gX(6)pI&5j5jp>aObJ(>4ti{w;z=na#uZZv%s|vxpbAz(_@iAL^&;-x&Q5 zL5vdap6wAoqqYN9oxth2r$RR2t^b2AXG%zwHnl8f3vB|YDu#Ill%m6_;>0|J<*rAx zPtUy`mz>p)KMdH7_{NW(%M^dV@XJMsIWDTt#*fK!rk}q;-e}UQiYrV=Q;fV;Av=;7 zh^%x^g%4;h|IG&X%&HUMBz?qc*Fo38scyn~#yx>C8ddm^-fIJ;lt5S^z20u}o!$GW zHq&yaId_Z-lqv9RJ@_r#gprx|^vz{27nM@sq~DjXQ@tZGKy+~uFLQ*nF#Ht7xIyd- zD$@JSQa-0p|K#Q7UmcQ9@SsL%y}nYRe=73w8=HV(%W!V!-L!@dS++MQw*=r1_yfbr zZSn!kH9BYY4L+WI1b^$CUWt!j=AKZHyFibpbOq5_!L{?h0}UqoO}w{Qi%j)~-!`x_ zpA3?O?2bo%xEC#9z61Hcw+XW4;yfxe(ZyT#LElifET%Vh`zG(*M>=R0t6s~&e!qnX zZO00{A8Ngy(PBGq$uv`Al(COQbt44W(H;yMb3Zgj0|i-{TsjLZ;!L$SB6jQD*mjNyWdU~)lG~m&K<}Z7_{*d=}E_cwZ7o4 zPs}pMtddCm@UZO0xQpc^SLS0{iYST+gy*olILl8vN?W~5^*Az~^zkC;0*}Zgm9%UY8DXw|8oKDU;gFDC?d-#pp=g@b+qM zJSoZHRidx!_P%xae5ba}q;glOz4Q#&{dj?)qUeIgSN`Xge)aXnBvTJ*2oiqJiPZhs$o=Lm-c;|BPXk^VFF{#_go?Z zUxifLXl$^A5*zI-&qcc6fNH$1lO(%VUBhby4XdH}jyP0{$-ttl>D za{F&?xv3gHi;Fg+Q3)htq&T{wJM!wV_Z=4OPm}$aXPB(InLfKaQ1WH9BS~+UJ!_8D z=z6zXZu>Z(xbttnt$xRK%&#}tWEyE*E3r9B?f7EQBf={3hN647d|0P(oreM68Ne#i zMj-?~f+z0rCOXY}qoj%PlO(jCtBd9u|Dq%9Y8XDGa<<1iU;T^^XN=Hy-9-B|z6h1% zRW(kk?HwF5_~Y^T!N5R|#OdkjQwvQ^k9A@X{_%8Q(1k-?u8$c?}9GyXBXN3ks-0_x2Qs zuwAN__FEXVjfSwuLNhi*-vBpdTd9YYYkN2dTxS82PX`}|qhHB|#fObAa_|fmGZrG! zICk4__EN2o0&c!!zO64NwmaWyY7xTlbq(rZw$P8N@|IH(-|zBp%ii+5hApUZVShrj z#u?^8U=9M;{xQhN02#IpX#BIbD*~B+M1n4@g%h}GtP&AWpu+;w!WP3G-NSWe5NZfd zi|KLTW{yN>6Pb~5NjS>s5!_wdr;>9kQfnCP(nT}bA?yi^Yk`I6;{^SL!6Y+@MSOWz zMezQ{Ki@4r7-*c%s@S(62e|p2>s&+nKq2cgw(}nzp+Jp$89nZzkO~9UYqY%*bWv9@ z?GP|-oru$c@jK65<;zpd%&(vmduk^y$sd1{&$2_GZyU+t;mCu5Rqi-sO+iftGs~gn z@cZMN7$m0zybbT->d9w7t<*_~%hCPbz_uP*O>Jk`c27S%{7^HbpY#XM(?yujthp(yKn*}ilZ_A9ym(%&PPD8Q}w+2xQgJKB&Y-0!DQM4UHa5E4wz9s7|% zuN^DaN77UM$zEd^@93A+Y-38rs-7|D@+j}F7L*Cj<3{XkPIXl$a2!iPBZ z;2A4%d)YE9GwcunhPaF4gXBBqj&0$RBi#TN#*4W^Ur(sU*cU?PJspCz^`p9QDYiS5 zbaz@}xGk#6I>_6r^w!)UPApFw86W6A{{Z%@m3t@xvI+x)ug-HFMsr<@9wvl6W};B! z0(|XfYrSoPnHD2i{aZBa3d%&_*IRPf_UA{Q@@M79?L0VsBt2@q7rwIo1o^&TWx_9l zz&!#3Wq@N?1L$$50_cu{f*fTfzP9bB2)4%8Y6ImH@6SRGnuDu z#csZM&9_r>R_>!dTaK5}%g@?}{gNeK5Wzje4q~_$5K22Kuss;FwvEo8WtIrskvynvgNm8?vr)@9dxvCBw2Qt5Q@LX(sQK zrGNrg8Gx5{l>DseVu2<-W{y}*$%O}v)1?(C1HJ*EM~DKwNif=Pu|oy#aYPp@7X?Q4 zlMVsku0PNoC4F7!JdKDr%nqe|;q7v0vfuO5!rSK^`!u7nQRGU`sj`mI{KI&i?~f1w z=mk#}a|G=I;MR9oqq&)eHnKRt2dohve@9&@)?JICqbNU8A_8$oLZZCz7<-1^t0&S) zd1n|`cuw4gt&09un_kSp-XRgkKEGR^k;&c}A90#3JW=ks5rKn6|LtwN$uN`BTw1tcQ^y-A2=CKpNXM6Yw75HrHtaWVf z{KDxWk8st&GQNyk$j;Xr;T{2K0zhl<;`rEVl#3O;w^vSh%5NtJk@)(Nw#Ed|I=s$? zd>EqMHHpTbwr(}R520tSHvMHy&Al_~RynctDx*93u3UO-sQGezNZRH^(esWe_qKaA_TAFjSoOts{|}MI_76^xwc1TiqU>V8p#h~?I4-@QNT_^Z322JF=sS_;dJzSd|745 zqpDLZv<7)>spHnTRf$o0ii<*~#6I=SrF!bw5gc3Cc$ zuZ8?5Rvc$FxYV!wFnjJ18F$2roCrH&boSuq8h3mwR%F_B%3L@0sW;xaU#W0=7SY*U z8cA`;KjpNCmerI_wEV-fxKcT4 zs$1ZGImf4%JBX~ASmCf#etn6i2<;t4ip<>VR|3_Yo-Tbs(i(rxBDX;TJ9#v#jWZJMNx$-bXqSpW-lxoYQQsJdNB5d zAN=Y#=#ziKyE!WFP_3FaNr}32+4X}e%54cBb$G&GZR!)QA8PRW`jO6+7wNYbotMm* z$gCJo2LVVe$W2hJ{#}{1=r1gQO?Nhv>5gzn<>+r@3Sr?jS^N)95|(%A%*@htjC@l= z9MIgqUHtqhbl+i&0-alqqJYTg?_UZ{*7nr(WVlH%WhOyn%|z;L7d{Y~@nhEN)lcin z`cQncF2W&<>2c)ea;v1egOJ zl=WnCopRTph_CZssmL0zcw`)VWO*iZGV@+soKF4A#JI#?JZnh%xO&5!QpDa|((E4A zD!^Z2z}TAOd^o{OdDJAl7a_vdSOC!JelvOWjvE~j!*-&|ATBlk>t0CJYX<30j(>*$ zcQn1v)W_~?n@=@VX3c=bg?vJVY8VY|1^Qq4QvII#mG`$Rrw;dTvrV~`Jqs!JJ>&-X zDnF=aQdY9`f?XH$d9D2i-JmM!@?W$Uouv?hlAoeWPS%~tEvJbQ0^+RFE{zmF*VO&u zXz|_&@5%E!W?{WM0GEt7$Q8r5)P2<@2smTxpx}kH`0;#khU(P6lF0xXPRK7`sp%`` zoNQibR9SDe-{B>{vZbL(_j#uhPJ7q%-0Vluh_i_PBV6I15J)TpeE_4MRduWJc!w@HsqH-VwdOJI0fLK1BPw(s)%}%K$A6e_hZhXpM>Y)9 zD}c6OpBv}J^xEJ`7BKAuG;2=L)r^>|_(Nlh&aAJd!uB85Bmna{Dt|Ov_ zw_yv*T;a<9c!XT8pegIDFLR>c{!qt0U`q}AxGHv=MN4Cef1JjpvTc1NzwIKYxTa7X zb(!Bjd3MHH34!+Iu@8&g-meW)L9lb|FC$#q34<;lyfe382Req=)M-rh&J zs{d$DaBvrl$QCCRL=w1|79{Z>o~h*pQ2mhd2M)$ERZu^ToMIwELBwNCpfvolq;6?b zITQ<*fa1*C+iU6Hxkb^s6LIx2d@Sv=BOyf|$Ex%1B&>OZQk5wZGLmP0f;3q6pau3C zo1bNPj&g*KW8S1|5IS|n+k{Ggg}I9}aHNB6=PH33-Q?1n*#E04-3)J-AIl$lCfbqsY_7G(rz5sY?C%YIC;D z*X=<4h18s6mz~EGatH6D?iCrtmlkS#`=e?ZSon_~khZOX5VN#LTHz3gHQ{%U++puL8SLhrh z1D+$8d$6Bw+pRB#^B5!e{+k|1m>Kwgt&dW|HKO})v9hwQFBVXeaWQ7%XrF714_x#d z9bRX8-+2vN6nljDw9Xa&{vRtVfGa3Y8{F3!!K{Ey4K5UP;$j}CqdKOI+pW&GLlf`f znFSivo*fWD@#*rPKZ==hUN4iJ-u=P1!}tG>#f@F~pJ?I#-;8S`)=8kV;2|?Z;8k59 z08%6~dN>IlJAe9pw01Z50AoF_o^TXGy4HG1UwZ31O=_5Xv3lrTwphkr*dd?rqxxa# z#Nb0`Uh^^F;4%fp$rxL9ARSMV>|F)-**;IGqypgW(FM~D#)K)a9-WV;_ez>b!`QFH;|dgI{k$DKzH31`twl=8eXXjl@aLf zbv_StqpYp2442fpq)k9k-6nb(ME<;?w*5lcNf zL71e;yGM_@S?Quxv+9G6hWTrcsj*Z>e%yDz&!^q}eY4&rPmKjdH1&71wYa}x56?vn zr8}4gaQOkXVE;U)4kVOmX4>?)6%c^zm#$;jt{!0>ceOeYL;pz=E6^BB!2ZEj|60{| z?(RH{AzaMdaW|&BKMia&wW1)Cs{df%o`y6uqW1Ye{}l-m;ew;#;tW!zlqIGa@^lget^njpTT<^%wA~g$4NQt){Yyi>s+ag$bezAyi zh`Wy+3F)(}*{XD0&p$lCwU!NMASvc)A){j$H5%%mzsg%bqm&NXF;acs#qb<=e5sp4 z3%2RCKeJKtN6fpKxkaP#Jdbkc*=Z6budMq2epCEc^>yUk1zX|q0Iu_k*>O$|+qRn9 z1GF<$h^w48xO=TLGkCrLfUk$N9%aN4s!%LABO_th!Z4-vN)n$xqG|QDPS+98T_i?(y{O#^PkjO6{e1q3tY>z z1d#mx2uqtGi%`_>j{XqbY2*RI?Li5Q6_oN02!iR?Y-Ixo^&n881zs|-sPxO%nNVij z&GjlbxNy(JvE0qt#7w1Sc;Sk=x{2|f_s|jIxWlJnzec0RTvVB9*-D!(g>&d+S<1ku zmO+*BA)!(FsO<7gs{z$tv&NA~nsaVyHeh4D3kLAI(nmrT{Y#KH?cQ*IWS7YqtEOFU zmecXxf~}gf9%4i|8C%5S0Q~?eE*{~LQg8c|E1okIYCzH27yWB1kt+T3MI!m^gn_lw~&JsJiKF=3i}Lx%SGhtS`D$P_65>T?l2+ zVS1w6??&`)w$jCN_hC1Etelq-1a@Cr6W$~c_6-I>04(LDL-Rl_HkoK+b23Pxk zFYG=G0N{2kOL>lStef^Pium3GM2PB(5{x9TAA3M`lO&AhV-c`US9mG;3%0Y_`M|kM zHBR~A2z-aQXQn0pyOV9hAPr~3;1%b(0ucjczxxzbjt1cwHWS$iWoZEJj66fB-8M2} zQ}uHpUA(PZ%A<%zK|3+D*RQgwX~?6t`Wi`MMTNED>PoVT5-rzPbz9@6P950G%o)=ONc^#Z{!i| z;^OZ$AW>LVlOp{yhyaaIW3cIV($Xx@IqQX1ZkBkrLs}ruVFxk7zkkWQu?9S4q(f;& zb3%qG=_@XS`E{u?zEbJVfnyV2U4DbCpXCBx|MVo1r9xP~&UOZ(rB&B3ZG-T8Ofo12 z)H5}V>KeIt*nQXLZrYS&z_$GeN9WB>uJJ2Xu1EYoj@ zcbbWcT-8;lOZ7-2gjw1sp~;0xmb@7wrML0j-FaarhKxJN}1{4a3-@S&gw=)2x1zqxSo7v;5@ZliQ2(b`HlA=TTEuV$Q2~_i_fS34blFBl656 z<;5Pmq$N-T?6I>STCEN?1v#+C>X?b+;_aa_d{a}sGm?7cj@tGmxY9n$+je_?gniCgH*2mt>(WHxz9rq4>aPFflW&$@<^^+gd+JrYKg^8)nE)b)VZ$6hrV}07jymkfB9U`+ao2~@ zS7$s8jToi5Ok@~ZpH2{TFF;1{PBJVvB5#hON|(MhrXf-WS4QY+e7sZG={ynxHOCQ` z+N$LOs@_8=6@HByT`){kV1|NKfTyqig#3a52vCJd3JA-NsGzOiQaKvJHVatkl1(Y2 z-xzX*k)@L(yS=?D6K^gc+&mR^5A5M3PgP)BVz40TvFB#NTo&)rqM;$spD;q4Gwt;T zQD0d>(^vRglX_(nlrDYg!yEZ1cYM>h*tK5S_-F42=r=PqIQLtKB!&Z$8oAZQErPj* zaij86$v|I2km*bJO7ih%8)N<)2u>q6t1(yVOIc6GN@rOt)zamvrk~ak zZRg%=&arqt>|4VYXoMvA5ehxfEWpCLY(ZL8Bc%7R?!5^#n$_pNoQ589(-Wbf>sV`) zCEHs%!qC&SfOfv8!T+Y+^*@&~{Hy0S22}hE!A6rbRi4HXh0=-`hRf;<`avQxZ+_GY z2sdl6ezp0IlxUXtVt-uDiZL49lRY}VVbJih3`G>D2buV{P_w=|eEIHV5z=i|4OWC6 ziQ2fO4g#l0YzqmaSwm@-A9LhUK=O~^V%wl&7y)3NPz-My+>mRo^YzGg7aP5_AdciG z2FbcCD@`{&-31y`j^}X8AE#H3O)VTOS+F%6^PW2Hw08VD#5z+db;@3CHn87>J=khe zy)?NzkhFElzoJk}J2D~t{>FumL*Z}fPc7jSnRbdrsDiA;DO+TQ!mqn_WS$%s2lGUQ z`a@i}p-c^0XVg8hjE=asJUU2HYZX& z?8dMU^N{j%bafs3#FIk}v&F{l6uesVp+5%khc;FNx|B_VsuhenZf7;Cc(>2iW$HZ@ zop0=byQwfy6GaCf^3FDDepE6t{7M?@+)Ch9!=|SYan?vNtjjXok~PCV9I)PY*&I~! zPSQQE&9P6u$+`J(aPD@HtBMQ*)+XSg9*JO(+Is{L>?1|W&DpuEas8oldhNN6Jk@sl zlTm--&pC?Ygi}myEvpx3_P9obP)gGXHC74#QQo!Z+M|%PG3UVcbC2%4qoyh-J5{-l z2U?IED?*2r&D}Jh8&SZi8buf*@DAy)VF7<5t8zTVqtHsl2S6cHIKbmJ`$|H5^YMUI zC0CbWU4Eu8x%|t7(Q8&3eEns_*h*D43Xw{2WK0&7u&`znKRZ&?mAvJD!~#K3^3yZe z{}+y=N{*9b^ELiYRZT-VH z*KRIE1B;Dks;qOJoD;&#psh((3?0If*Wd`rBZsCLf|My1==)rDrZtXdpFv2>n|zT+Gx+DW#FBx@ABF z=TZt=jHASTsjHwon`g@krJvl&vg^PJSHCz_UFQ+dZ|^X_dm14dLn8@{S3Pi39;B72 z#+pqmYK$6C2X!D*te}XCyc6Vl!_=CK-k?FEbIADOtm8(m@vhnHQhyOjcJjMkuk&A7 z=g5}`uZAj#XNxc^VSd(Q#@IfysURCqQK@EBHqnD^jmC1{^DfQ*wN(i7(|9CiBz-Qr zu#VyfdEt1XG9*~X@O=oN9tFExAF37xrk5~UoNS-O2;i{RtRomBDN<2tMb1IlO7_S& zdcskWlehL?S64Vgv%5IZe&tSHsEP5VV7&@T^kl3{z?}p_>V3hr(atlei4jr{*xD1; z+#uTx+}-Zo&tPx4hHWXwR?aQKgWiC$Y>o3u4BLS>mZotm;3a`*gz}+7LP-ZX@Jlft zRX9?40jU!!9PEsp&Nq()md&Jgx%h3zcDJ`cbwOHWXxVw#5Ax^2ogypWVk_Sw^Zcy2 zzy4?$O>~(jJxb2>_iSvMNGOeUI`H-{_NVb*T@=cR@Ed72NH?$+!+ZaikNJP47quWz z4veANj@lQ118mxT@7p=jO#cwhy*xMGDMn}1<15!>1Xof@T^sVnwJ~{4 zzQ@~Ax6j`U3-{jp4lto}zyVRrJ0QXHb(Y=lrbTbvZ9AeoO9L8jZdRZdp)ct( z(I19=o+=JG`(Hn10;7$D507vvt+J$=wDcRZ>OP4c_LDazdew#vC(n29i%V<Qi-7O~ueXV0R7tPaf&ARp|DI@kWNYq_G)q(Rvp}`-Q>SJlQ;}aD9w% zs!vw%*jdwy&l#Fuj>ssNB?HY8bx8*CPQ9Ork%#2VLFS~Y<>MIzbkX#b1doDbT!&KK zA>(#iiW%!|xXQj4WlL&=1n8bk5AHS8p@m72%-fSZaptu`>dL9X+`5JwWF+huJ!H{= zg(cX>NUgt}w~gaQM*rYboRJnxHB7H#j zDV#QRf2Xw_AJJBQ!r1X^jpZQn(Yr)q-hF&=+|m$s5Vy6L+XQo2Wt|1gPRcZn>bvE~ z6yL&y6XD`p{!v8Lw?>}OSUm~WO2;G>aSSv(Os1pMW^OUhgv)(WIWBDivl~>|APKo@ z*hWTfW~-Vta5i@LE-U_faunpB?;W?_NY=YkmU8o@zK~jCw~V6U30HiM(a9HpvSpM0 z!t%D!FF(2N<$#1@x*o@{#YO|J93fWk7qDZ1mMeAiZ6>epvbe@mjb(ZE)$baWEqIPi zj7g&%;^=o$nAwMwv-|Hw?Ayqv?oPoT?j z)3#zTRXWOpEfOvWMwDn z1+$JZq^75q>=>AB1VyE4S>AN8YKDL|WF^tXs^2$_mX#<^tZZg`ZN9>%rr$5U=q9P_ zd!-=d;kUCGO0Zv0P!Lxjc~%PrY~Tt_Zx_Uf&a!MW#*B3p*jWKiR>(T0UxQT1 zzl3rr;;3_ziE@DtKofGa9xKumNYrQ*vYrNhIg>{(2!Y#W8?V~HJzg@lr@v+U@2{qJ zzqmi5YjHX0jT^NrcN$o2)y65!T`YRuJ*$1(9(fL5qBv8!^{#X#a`BL0p|ZS5GPnqxIc{&km#Q%C4jM$2N);0C;%xM%|ZsIpwyt7#|bI3h-ar1I!wG*wR?~Ee?R5zEPh+{`=Lx zjl)@YEW=!tC69?IoLFe6U7lr2y6t+#Dqopho?6ynm`eo(SOd$cgc)y<`{5_YURZWt zjOGJz{*93hl+bso;R}vd@q7j-JXUirlI{BamW6numIKvIQkK=xZ20g-oOy>`%t`pI zp%RynwN_ql;4(_tC7M2IbL!>EXKLU7gEXjr?xDP=Eb6q{BgF>o3T^EOLj#~*{;*i- zr|Sw5GND=h{ERCAqyGxln_vKC%Y<3@0z3T<$uM5tL)0i>c~(OJGXyA(g*_e#mbpH7 zvU!|Nc!51aRQ^p!sV`#mrJ>w*8!007r7T7$PL7!nn%a4-qM?eBfU!$=x|ro~&TD@$ z-Z5!2{3a|q<*tJQCBtz8nG{!$r+F`mY}P)Ia)|jYjKnzGoj=X{G&3+iHgP#2><6Fo z6xE4FSWaWa&so-#Qz8ntpe)1Yvz*Jr=r}n0^s0GPsz5P3)8kc_`=r?DYjKnO1)*^+ z&a9Gc5;t^v;j(Q?Heo~Wdd_bFX)1m)2ZBryP6;h{s<>H&V))|*)|O|jj2(MDAN6X) zCT6)D-~}gq80q#oB-owG*-+|Welv3`$QV)oh5F5CQl-a1vkR%i>j9dc9Wa&#W$Vje zvmRQdhAAHpyN8IUmPSVCX`KRFRL1f9l#UIS#6bt@tgdxCeEReOs$ z)jAcOBQmJ#HjNdX7rJD#uq#&iuvTnP&HSAIW$~?&`{lz`nJmxEE8>>bO@n@R!1J(c zVNw<3MMHnfgZ}Na2>Ium=|E_p)CxFxA%6xewoRUI1pm_Feb{a4C<0A#LY*`cwU(|C zp~}gldGcF<`Ca|cBV$|T%gy!=dUMkc5!dkbQ71p>FYK8PbOtoeJIoy1>&@x&npxfA z*Y|}MEYL3q-ww0tE-BQga-&517HK$3oY_@BS3Fu6f!7|+&o#{;W^#laoaOV)hihK$ zpx75MJ%IKCbAp8?x!sv4&2&hVTSTBoqv;#&|s}{AqR0%69EBR&x0Z!LKmyPn~ z*Nkg+{St8g-fIgQ4Jpf=8xQmoe+%$XemwNLAa{BFuqXUF)%2K`Z}P13ArWhh%Fx-^ zQ-Q|o$#bfG7d}(RQsqpFY`T*m`X1fKbqo|do+dq+OI>?nYjEBW$QErc?XYiRTUwyi zKu{mYx9XP=fh>o%Iv{S1U^LMljT9AZj{EuD zFL{db`wvy$$46%jue@#NzJPthdLg%#?$dD^yAHfgSUG8l&EAjR!?0c&W2*&7_5oZ^ zkSOkv1yNaSb5v#ue!5*&JCkr?@y!7hG<*hjtnVj#_uZbscVY3Pw#A75!rl;{l0Cf26`bVJuUa2}u#!eSu*8uV7-+ujAVgGn1AOg0`SdJ<~?SR;}&}z*;hMiad%u#4`$V-Gcnw zT+}y9b8s$i7uIauYPE9+p83>|_h{c&9#*kq7Sju=8iz>o?bDXVceDETgtwV}oTuLl z&$AFUzXZ62v9l8O`qwVKoiVjkG{|y3qm%3>zs6)WS;R*Q-Z zdiB&wOsutZ+fYHzdLD0t#4r`q>ox|QM<45zeXVU|K>xJ1#S#P7A*?8NKo_1;pYC8Ugq&yb0px1 zF&9t_o2I~Ak)hghRjE8ctEXed+%E{c`t5y1T<)MAgkwA`d4aa}hk7I#7A54itCvwj zHK?8LIYb%#cqz-#FaaOm+D+S58t`Q6>hGsyOU9Ti;SZt?y^I~E-u&7zJ@53^INY-N zJ&syvruxKZV~kKORapPw^CbEtw;Jn$JAXPLiB{N-I8r5U)q5|&asV~JR3^S!AB8im z!!{ee2fCUcQQd2fIoMVA6h*@mz6W$G)6p5b)*oiRQ{npN$-C_w$G&5c#V4hS)xYG| zr{rVMBQnYzSUL0+c;VLl2EDAI1w;I1Y{^oPAx8=YP80zqNZd$UWG0j&1lcNJ-l;%~ zvAXGn!!T#0#Nx#&ZA1j`Y(OgANG%Rxochf?0~?OfdCflY^p?1|Rfc>=kVI9Zj_+Gm zce^AWt1}m9AS3&!{5a*}iOB zbWn^9Lw;$FHfBZL^89IDIX9*qdtBY&BJDwRFocHB!Mo^XqWnGmJW5$}cNhgReolUQ zUp$gN^I=7i3`D}1#a$MhR_Zj9)}c@hI|{H)seC}Nj;-z!w*Rt#kr{Z1)<;t}J2HzF zY(x9reqrUcNyg5!No$cBvnK5qRgd5vIVTwja9H6Bn{g$dBYC4v4Im>=1ttp(` zJsEKldAHXp?v_ChA~lm#af_w9cqyQ3vLl%^=Gr~s;a)8kUzlZ8UdSEqUOw9y!t97+ z0z=w9_U{33-oYfcq-p4{X_i5&2{4h%Ul26pk?+#So2&Jdnfy_xbLlX8I4fATrr*f5 zvRxyN_sbX8X0gFqhmU!BMNCdmJ3`#5z$k6_2jBfX9jD@}W4|i_Jfg9H&|?srI5(`A zk~bnUqwsW0%&O>`CY8xu8vcG2oVIo990O9$8_Wvl{b-;%bfbhD0y2bWV9&vhWqv>c z36G^s+9pL;o+f1$NQVkdws-<2uuB{1B+_o25Y;RbFn7B)T6GyF%!;Q|PtOTB=eb!? zkr7oo!7wuQOj4mO@Au#fTMm38dv*X@dY>z|OxKTn0IYXytUHcMqQxh?UL+mm9SiuW zmEH10?`-t9Ir0F|;gwcpj6Q!{YGGYbl`}G9R?%NZG?T7ZNjo|5dKgw2fs>mk@mKYJ z2K|~%1Mbt~Q<6OG1#B&(ZR$Bshx;CLnsaSf1#JmQ;K{Ol+neS3RX0DY^rSRb7TSIn za?7x=W4VH&?B_|*{^ggtsx&3IfZa)f8}ChIZi+$8Uj9}Yt8S}RAQk!U!TZ|FVwUee zN7p%Pek6GA({ELmKd9fK+v^jSl#)Cn4po98`K^Ro?|gC!OIfzOhS#*5^_cc@H&ry6 z7*F}=<|j6=UCX)L#}TRJ#ex9k0&rJ=@?1SuNh3L0Klt8&B;p*1t2pm>R+ma*coC!> z+sGe$N)VPOj-`O&9vA010Rcf6r+P3<@*{NS5?&P{oV?H>j2ybb<$4@9zn zzP3;K2VdT45R_q=Ags!eg#E);z$z+KcO}95-JzTk7_A1dv`HKHkvj=IfMu>oPZ(g!B&NKWbXAABj1|Na&LgTpppju(aERCEG|#}B-&KS|#~B{$_Q zcI`dT2LZDklKO*h;3f7OjFtC;uSXENh&TaW09ToM7ld|qp<_1UTtVzwHH2pcWsJiZ z=2N>DRXO0diC}kf^SEv^xLrBYS~nCtr+e8-;LdUhJgeLvd|hOolnd_=7&`-Sjjw+2 z(ZQIgsbNcXfyUU4TAmFUvz_0t&i9cUpmN$Z1lzfbB?I;F7C83%pQLpt2Ly=UA`&(W zH>zwC9XV_=FL(OC*Sj~%DoP`4h;`iyfd69 z@lKOqHSYN@m8RsZ{Bhl0>0_MB@eyq&{xLDud%Cf*uC^iKc>i8bJrT>` z??f_g06k1x0`?yAMMGI$G3FD1sMVzqlDt0XQT5OYz}g~D$`czYoP%W!UtUaCCOB3T z-{WZ3ZEev4XYVsrA`4mZXP0#~)#g>PzfMb^3ViTe#g`k)&p){vTzG_8KJ2vZcE=&l zZX(YD&rSYHe?2xmSC1V5wNje`0umxdwHtaUm zOFP+Hwq`x$vvQ;I*1NmZ^UWIPViygjMD3LcFYs@?OfNW6a~d>_kC_a=oc?-uE}~uW z61Kjru4T$~n3}~_s{N>}Zf-LE5<02DIoiiNq{Yt>nd2RtV`R?cG+~`JItctbQ~W^K zC|j@p`oU28RLHrF&1UA&ipcvtoCL0@I=$I%dbIJykN3tiLRZFRn>R3h0!`F3d1_QR+FWCMn-|yvAUCkAE zrWyR;?shu;UQbAOp}6Tq>o~M$eG$1b{-nCC2jDxonZvek{(jyO&Y2kVVE}6S)gcbCRN;&badF=1!Hpi< zHDw`BYvcFtZTt=rua_bx02;S%eB3~#%&$16c=-yxzJGEA+cXm|!|V7EY$2d$-w8c< zEh~?Cj{#0mdxQB7uay-3LQ;>}0Y=^6a7 z#LJ91vYiJz#^hR%)?5u!JdU6Xr%RL-46Zy@GSOh{T}4d(4b6m*r&wB0-tCv{AnfNz z#@r7+YeH)mAMX-rIt#nzj0E4jw2!XJHAPz>2maC)lWn&6!RJ`~gKxp{7Y+qn(#x_- z9I)spclv4;7Z7LvA&mbQl%He7jUvs;P1US&`+o54Ry{t%sSPK6t)t`E*+2M(!-W?r zkvqqnk$-iD+`6?15SDWYPKFl%UYxNl-~+!i!=F0J{^0v~1;OjMi{f62)~a*i8C?9q z_uief>mb8bf=%Z@*M5V(ALXXc{pBb8&Kh`vXl>7Ze*EwCiue7W@~{4thV`HR3ws&I zc&y41ju_qgtE%vrMWEM)Y^XJGUt^sPr~{Q!2%%JY+d~^DGQ$w@*f`9d2-iV)`spE# z4F39z=f#c@1%e+$EMPCW!4j9;fFJXQ%w}#KW3gavs$+y!y|W{8pew4KD&or$pbME) zJZNAZ3H9_8UeItQKa8WB2ra@#%kH$j7sxhUpsFkE5#Y+T29mZW4I|nt-2nA1bd_XjeByHs#G8Y+NJkZp)ojsfk8C4ocui zB``Z?cn5dD(n)6xv-f*wBz{JVajSH7{DK&RgKimDE_QZe1Sx)fFF?cCTm(GwLPsg> zV6zOKreAG@$a9$f3h$TPaQVBsye>druQ@J zBqNA-Fp~i{&q2i^YpkmAi&&wJ<ZAUh_WkNmsbi_O+UweEjJ5+sB*?9zQM?Zdt z?k2g*UN$MDUPz@YsZ%yyX~YCQCd|h+@`urgso4ij&*wfT8`mf{l`bouZFrTjT0Dol z6z1oCB6+6j4)$`JRq?ACXLYeT$2Z}M*lww}l#mJ6QvXWy;FG2V5Q~R8!+sZZ;vG81 z_Tv1tTh;RVbD$YBH2)8(;y^P|j(v&cayJZtpFrD%v}$eiTpQ8)IW_GvZZGJUb<2o0 z5xHRK5BiTKVbN9WSDUEA?q(Y6&Hfo9y_M%o@7XSnn-00v`WZqXPvS%=UqXl(wdzKu zD4UPn8z;oXBy`o!>FTv^&vG`UD{;ZjL5^Dq=bp+zH}nW+7yf&P`%iCC|A!y2Zmg;% zRo9m{*VHyuG}UGduJx`+tSEaed&#(ZDVKSEvyKi5r29v^Ivd8{op?E%^5QjQt&|nS z2tlyqM>(RSTP*Ig%SKMq0lLm50eLjb*g>5gy<=x6bZBdx%(~H(zaPVcE}cWI{$waT z;d-<1(X|}w`JCh5j>J31cIW@#^N%IJ8=?eRw>BlNYOA|zsgo+=C>1_C&9dZm+FQEN z_S>PaX)MLksr(s{Dz?KB0p1~Xx-!rP<-UgfjuJ7&uSSOw9;CD7CRel4+*lribr(`cHwt9SM{YZaSx1MNcVy|xY%3kwA|ssB=w&*0P$>cc5#xijkA#={t=G-%stm_jTMJ+qy5$9iuV2G|j(+ zQD>sI-qh##<$!QQvkOz3VfAUDW8=}n6TDwuJ6$+dasuX}Z<)li=}Jz?xV1BRx;^Ne z+WCPeIj60&bKa#~LGt)DL;8Zw3wd^Bb<2~R zjSghr=G><6#94gvBWV?%ksb}-no8kn+VT;)>iAc}w@1}&lymFq&&#zBr{X*gA0?*Q zrf}mbB~GWp-|ChuYPQ}FI6hO3UJJ=mFvpw7TQvo)KQb@KjP)9F(b6(jJRgQHHkNl3 z*k=cIxSe-@X-=#ArbU>4R)z_6Szk7LR<>z;U3!&S3D#k1vlt7~E{O}ndL|sP`or}3jd#X5 zZin>)Y!ubL8!6PPWxoruxi%2Nl zEScNGr$XB%FGT7Op$+w(<30WBnnSM-zQNrdnv)VctL!-=hktoE?u6nB@pIR!3{Bh3 z^RN73+FAws$=dH<7vGr|w|Ww%-uNh3EX`BK_84BfYQ{D%&!~&?_RTk%bg6+t&8}?P zMA0sErtdRu%cq8$%9F!Ta0g&+SRu}>e%-w#^OGXcz1uX?!s$Q#kYWqkX%Xi-{z#zf zbf3JMNCiEMl5y>`IobYR?@I=v4!^fQhddQde@M4k5s>|K(O6TmuEC;B%kX@J9EIE} zieHw*o8BhkuX&p#&nS&8jcQcdDSar}hkha@WlmHuvM|0QXJVX}qTao_)+9plR8p=U z;xrQKbcHt>w~*YISZ6F%p;?-x0CNZ3W3UstKpa6_19wDiHPlgI=v-CIgbLYWgs@~7 zL$F3LxUFNZ84qhMCe=5r5={x79_I$t>u~h--cx;#{9@aP5plOF1vSskxTFt+y)*DF zG!@z<-dQMnT4`ox^ifI6#`h8X)!|0f4z?r`S#-kkX3h6bh=xR6krBal=w80%xM%Tq zLZJp3cbq7k$5CfhGv#TM&&v>WPx%iymzk=e%eq&6TtvefJ}@SW{qlO=1}93z)aAy1 zqxZV~a(Cr{Ragi8dYv_8wl3q<7t+4iVx!wd(PXnWU=hb* zANk)FqWZV2m;ZpG-hb#Vu--=A5hKP!=zefWkVxg$9VewlsuE@hEs%%?1CrXWh5%BH znqlJFA)!yhYJV0Lddu+2oz9eXk=wG}2{LY%UVTGTTor6fzjA~6t>ao~tz~rV`<^RR z)ks$GvAG9B!?zm6#Rrp$*7Jt}cnbmpYds+lnwAGVQuf`t#Ed9B9f9Q!X#AijQ!DFo zzt&Nvh7off)_O}?A^p<#cI=DMCFx)KEdyMH%~m7Yep~GP*hTc7NR78FBK6MQzif;^fe1b!B zVg%zN@8n$&@b;3mM5w>SzNwQ@vB4ghCP-O!6ehgeC6&l6p*P3|@vBd^U{`v}p?5zi z+0pXc!RXfc&#@P@Nbo0m0E`cPg4y2*K#La7W?kMG=5#ra%0jS@1-O5MMXsCT+y~R> zkf{Bo3^Gl}@bI%%nK}k94fM5nCZ#2MYP4->TW(yCoxH=qD7V|BZIuNjv=u%*|BmrF z*sCc*YWsGwf5qDw#mQ=^!7aURC33L!-pGRC0@|{o{~QpH-x^VP`&1uQ&x_w&?|l8Q zAu|ThQ(FE6HHxr=@o`=oEE2Zcgb};V&jbj^&of{lE3zOz_ahDw7ZHp>S{vIANtB@UDS38h`%DfG-s1^ zRs8+QdulKJ&xu7^casTMa^%=YzM!= z3i+_lHQQL85Q80$#K(rq?YmZ7tmu_zF*p?H+=YMoa>RTpO&=A+T%Vb&1|gB2)W9@1 z6D`H;BTp(gcsh>~niUtN#hM52Gj=~ao~ojFRtd1jNLBZT?pM6(^OlL$@d$KsSk?Hr zd&UADb;>rcw_MA?UAk*1?^&Uvx~O9So8srz^60FBl2WaLe4&ya>iD_xR;9fN@k3do znQg|I334^jVfiZ)*^?jr8fG_E!E&yQB;&$yENDAqf2y#V2!6Le ziTvKy=waNRb#om}&p-NYk~0MaUuf3ff{U!_Bt zyn_yGC5|=_oHRkrpxO{e(N-h3(GfrR^eMfSzP}rL+rTO}r}&KRBX*XZl)@ep`YH&l z+cQM=ai)3q`N!TO;Fz?+e1y;S8A>#`$z#@b&mq}*SD)oom0K3RNXxRmgbaI&d+E1( zaJ_I*l_R79uNcr%M!T@)O0UWGH}B&dWMZQpq&HjE4iSh&G7pm89Msjy3n;tPRWA6) zlj6}2tWYh>?zJ!PlB(@T)ye(vom{EHLObi?$Y88IG?(`IE!{`6NYz&$tmyJz`e0Op z+#QQ|3;qq0MMWOk$vr-A7q+)Evbp5o`Bl`4Y(qnO181;LEk1BzVsT}@RgHGY+x~sw zf!1Tl2j`71AY~JE4;pouCs#j8N(vtL$HttsOcdF}N@5FRyWk|xDK_p0-=^z#(H)ck z_M=ZbY#RWp#fRjFACT5gcac^gyjSUqpc(TnZbxlGmDdHfKVY%lK67Y;X!L_G@hOfQ z`+jQIMa0*;j}z+*c9~)9MuXw5)&H{WrUX-ls)6DHyV zv*Xv5KbK@B<}3Z(xufy6_R1MkJKizhn5;X^i(gpiBzua$q8)=gj6seDCoFgK1Q)7v z`gjLGeD*UpVm99mI^8odn11lwFU`!=Sd`}-7yfRz-3#+m{Wsq3H&o|*!$e@Z|IR^g z&p)-D5*7|)>8{et@@9e!c?Zd$OZCYXq8e?=%BJ^4Ps&tS(WPc_WYPhQgyW8ul9G{p zT=Ut^=3kp{*0eddt%c3E(D1SCH19uH%)8MN0b|I?QggMD45W=eyUfM*O`dV78(OWX zH*X`%9`_Y}jBOY&NSTH$k(q|=+Q&`g&;pEZGYVR^Jp1Km_r8L{2+qmF!3S>o#WbCf zwIm9rO4C?BVs*iv0$+UsIKA{81nH>AtoB>nZ zB#KU-{FUnHE2i9V+BOCVtjbI4W_DBhmkuNr-EY{$9F*J6Qx#mj#iFBC>#(Jx;fp(z($RN7z z00{>k({axAjysr*i|BVE_ml^VY}Ll2m%ytR4$c$rR}Jkd;(!H)feu_M2B7sb{L22= zbvn*-Boq45@^5=$AxHENjSzLjnBf-og=ht^<<<@e7X7siiyEFo3QWG|aR&7nD_w5+ zJGE7>E5#?p*H`vnZ3}VV`skm0GbV}2Gt0C!#Btf`dCK+gtjKMIYD=k@6hE}ts(_1J z5uL)N0|y)x_H->rNSKazp)MgyQBR=S&4@+iyIwL*K>7O8JTGq>yZ&K#x|pcZA0XZ_ zcwPQNU%`Ts?~(6cjU<(NwbEYPRXmU0Si99wYFwn#Zk^>W|24}|RWQiC@QC$0r^4g+ z0v=U;t#^)Z>p1_No8iM5maJJl?%{(7+{mMQuSXQ`gov;l;bJw`_s|a9lM5Mx_TYI>bXov{no0F=8yb1+VX9u z>IYv;`nSymxW`(HV)SZ5(TmdYwMVrdo9jZ(lcg^Wm}J#myXjs9b#ob35krp-za} z{?mLZ0+`?|62YhM$Zw8S3<>wKu$sZ*t;+bcsmz0s7^6w5{jRP!Tkm?hb#2p&;=|hO z(O+YNpIQ}rEk8D$`>Kx#LI0&w_TLHU-}OGidMbGW!A$9?tvBE>O+?E8OE1^eDpi2e z&5@_}E{b0+Q#nT;u7Vw570<+N)EflE%@SfT=UtmchhB*$jni(XczkE$5t(m&4t?Ma(nSmzp4rrx(|c1+}1j-?$BE9UoU8uHCL66$T_k-3bW3 zO=^k(BuYDW)eut%6Mg43E-UXFdh`gz_mkE#UQt~RtM!NiA1;E`{ouRV$2ybHDzN$t zkP@MvgQiCa_a84~X;Vvu3@I{3p;Ga7%ZvarBIg8F;-@er8CcZpaK7`sCiuOz52+jx zcHT1e?}zN4+9?X_RpJRO($n?S{z{`rd-bI4yXWSye0qD(>Vv#bM6wgs`85Z@+7$g% zKIcARKgsI59#t^j^d6-)oO2{Hs+>6HneuD_^>dA=LqX8{<+R>n+6s8jhy+9t?Sr=8^*)%BC zU)XWOSVB>)0JkxOvE)`;SbBTFJ!$^nyGIB!8!sFPp+)blOdCHEZLkMu^+yg(JJ zoRK1_mONm$KaH<^YF4YA8FDh6tb^GfJhtumKkL>1ms8BEzd{`_{m5zrM+Jz9Vk@z& zf;t~?jL`3(Nk6mW|H&3E<9XVmM1BP({RHBGB>_2J`|enOT`;!4+ey~^##RK561 z=XXb-`y%30oe!VX5MY~PJ>(_iFE?drVfHT*+BQ=N(3c+HA)4HW%u$X=5RJbBb%1qo zab{`>OT)&%j_V4)p?-g&r#__XSL|vFNC{&jHiEpod^gGa{@a%?O?Jz1`@&T=r6v?s zRtR!6#0ud9lTw)kpr|fei%jUU62qD% z-@>*2(BK~~POB*mEOoanhi7A`NR4UcDUeV8i7P36_D;ZCsXxW!;Hw{(=t0)`V zUvMiJnJD{|y7tB4X}!FF*+*Yp@`b|aoKrJy9Y9b$tEu$tx`Cgw^)ba^=NH9}-f_lB z-i3kddX|{GJ~NeWGwXTw`*z!?Qb2U zg|O7EiWb@R39oYk4TWAa$D9wF+|@UvzAH;|L&Af!pex#&SP<#m01s_R4Lbe`|NEUy|y+D@!bk(Uvn zDa`)V0b^6I3$gaIOp|umugmzan@<+{a7m`-Zf6U|&Fu6wf>hZK8Ejof6uXIO4n)!LpL{Omq* z)cvX7Y&IG3jZ+iYI?z*I_mCp+>i0(_-GYeEWZ_7A4hRARozE%^q+PUhzuKk#JKoew zIF_E6Q2MOL@ksbBWqb8=j*=zy6N(9`gWD)KTQ|ZJWqCnaf>`xS8t`*Qv?qi^#n4w@ zyCVISQAyR~^M-TLo4w~>L)vhkGkN9?Y_A`DU;YBNiw%Dz zbO+e+;PK#?tDoZDHPb5wVD3vX%O8B+*p0S60CYU7y(5yFEaxB)k&4TzxW3qeh@YY%nQKHpV`9}#E#`)K27c7(Za}3940vOABb=N z=iHt>9B^?h$3aR7VX4%1s*WdY)i59&k>ah#M3x;}odv(jyfXJpz3DpbB#GR#7ld=i zN;oK*iB4{nkZc9J-c_K=I?;xe~ZHJ-Pr;|{sG(4imQZig!*jCS*KpJ z5LVoRztp*e+(t%i4L=a}dWdBNM}niN#%dp3V5{GhINlvC+8gL=jhNO z5*{@$m&UsH{f~oPZj7pY&;umskcXTg5JaWIJRns6X>5lob$c4sf*aG=I!eN|?XD(Z zP8!k@4hLA%t)q1{f~S$9SErk_bd_h2!W=kjg4Sd(+A%6EJfuo-fC{dbom(oy2f!)| zwf*|}H{t)!{01g;bi<-{BU^AWsnDPOOf;^u`htOnL=E;pb(6b}LRB!79^4o1cNe|) zo%?{h?UV9Cc)>hRDfrCFt`1k`PDRjNyj`jG)=yi5&#>Z?=fQ>dg7I&GGK{z$Wa=D8 zG-gYZ0qFq8NzNMY3e<`<#jrXlY&7&+T@GCe?)fWgn^p+Cj2YeJ+RCcLk9d{_9Y#)w zq^8A`oo1^f70(}S_X#}8zZuNlkC`G>0#ak-7PcSeN*cp>}N(^a;<6 zDFN1_{uPhqpu3ckJgdaV24Iz{5?Jyl9CcZ2CTt5l1|iQmj+Qu3KCY^5ha7jmynWSq zt&$EEa}1z#Cb}6sc$xGo>e$4*9(15|Am!ZejFk51*AWuts-HO*yRx+m4zbE$d{R ztdspN$xcShB4o>Vis_pfbH7*5?|FakbG*NQe*Zjwyl;OvjvNOwuK8Zq_4%CV`8iMI ztCAJ5;;v2FgF4!QFZrr)FE*g^FdcVUuI&i_Y7$h|VrT6OVcSjX8owqah9f0See2GA zc#9`Wepkhu=Vm>J5*|sBV7aj&-B!M%QQ;nBB z@lJ5ida}}qX~thDvk8~qA~oA~^*N8h&yD{$QFc!~nPLo9Nkj81mqPI@$?nwM5@>jo zo=3y`rvUO`yHPo`Ualo(F}!YVAWa>qyJysE`HFh>w{t!vQ>OlbySGKl96gk>e_Qna zZI7I1MLy=}{ipVF8+;OY6zaeO?E*lO#OM&{O^5^&XI3l1g9Ao0k&A6oL*XeARwMdY zsU-sd=?LUK2fSFTM%*Q_{s69`@!hZU^&?(Q`I>&Jd9yAB5mykiG=XTl%56b6@;3!_ zi~W9WYL9Auzoio2+oqXxRn=nKY^K6#*)pROulC64tP{7_!I)#Uw67d7n`QX~>+rEN z$ehJMWR5TKn|=>56`7*oJ2keUp7CGRPfEqlfra(XbY?4pjVfhQ_AfFJeCvB5#kkNO zYLiS+AS)=st}gpZ+*0qQi5m^phIY-=T+2~*zL@)yKa;Fc9Hk5Hm!xv!-m6}=zNTd2 zD9Zj~YSSR2@1?Q+TvbGVw2*9aMAd%pX_cI(7&^(Iroc__x!ct;vlO#Yjn@_qx!2yl z{P3y9zkB9`V7X~{LDoa<+^@6n{|3+)K}H7vNc|+ZI&0Kdra0x?d=4$H#hVSlJ^XI? z9$@YW$Dy_81&f#n2k@Mvgyf#$QVeQ;bHE25x zHDt4=wY5+`y2%PA@|ABxf=ZUgp#ZUvXYS@oY|E_SPj5;?j*7H^TT_F`Y@yYV`O;Ht z;L`&Zt#+alL6>^tHqoK8^NYZcvv;8fE6z6@3TxidaU1^tWXJ_pOk$EJx;aN+Ts=Ra zw2@B^1~9H(#EDDzGNG;JXNbpbC<23AEwVzHRWfF+tsdjK2?o&H_~8|};DH`D=ZC1{ zd^gBBns5FRwzV7GNRjF6x)X^`*oYEaPLcWU3~`%_-2?2-kJn=S6;kSM%U3Ko3JD}> zOfYIPWzIE&#lYuC0MEy3ybCF#?Vw&@+R~=;39rWiX_UxBf)qr4XcD!vl4lzr zP#IB!vS^h7cXDpDDnDoad@)|ZuzvkoR^iXYD2c#3;%|^s@%L%Wax`>#Epg@~MCl~Q zW2m$h(k#eFTj)}-jWv;HGQ{vva8MO4yBno8LGc<1^AJ8aR0q5Rtchg-n+?xHc4U#g zeD_cdBOFZk=$brw`9>ZCIZ(h{Ub`)NITwSj|=6)>ZnYfBqDuHTH=L4Bk?M0t@Q49fG5#scw4V2Ke3SihP5KtG_y zSAk1-8gMQEh{G7W`TCrQ)D7ZZn97>3WuLh{cbtFv8C>Eq)RS3;)3oPkO@GMqDe&<4 zqL3Z8WAzKkiDU5i=BpWxXrj~THvSTL>P%_OdBjIU?oPP+CE-Ipau{`lFpjD7Of20C zunKX)Z}df3x?xN`r1BlF%cdPziWu?-v{}}aWqA;hMH2PT*BM*48ZyXZTLsD5QPVy| zo2+paLG4&5qON<+~*++{H>J_4#~2()$IQB^I{(A?Lo6v zQF$jFAs|rEf9vbAZVO+=%d;v)>>C}2SrcaU_lo=@{}NfBK8t(^Ui&H*K4ry~22Y(U z4vf$bbU6SmKnk+X`a^cXas+xu+ulVt&*x=0hfAE_>gcLJNG*13CHITaM4al_N&)Xq zE(_MtC(HGFg%<{V#|2ECo6u|ZWD0+a!N4ZK2mf z4f54q!Qq_db~kJ)U)6i|j8X3;j$?j$=@Wa+peXAReUDI^+Rz zzY_yjjaeS=kKOg>?7Ro?Qu?y3My*BEo(Pm275e;FWqDDDiKlLssC32?_M^m?!kaE& z*~7t4o+SX8RQqM1Jw<}!kGySNCxIXj$tLj6v58{Zv78I$gJHI^i}ZzuIz73S=kEHm zC{j3@S=#8ezs3#kM9%%wNcBm(R9VE>epdM|I7C9XXnHhQEq-M$XMkG~rMWk-Ctq>6 z!bBB{?^MXh4KlIfMD#ij!BOo7*&(8b($+2weys{N@nuJ8-etBlLfLWpUsj5(I)#sl zd09%^x3duTiJ|$ORaK7hh7+kw5ctOPC48`@Fu}!}Q%L?(NbvEfuvAx@8np1h#JCEk^(Y8Z zSeV^7%%xHMIW|TLM{=kc0{79X$fY_!qdAS&w|d*>OxX_CRi5Y(UDMxtev>U`W$fbf z`gPW+UnSRP-Yf`;`_7Ytz1hLd&@9(sE0^YvoI20wsJR9#oOmoQDUgNU>EY_~$x6AX z9Orq)e#D)VMJb@4+f&5o)xsvba^e$AcCeP8s6GD6 zze8e2_ZyKK0;qSOFv<#p_lIB<11=-)HF?5Ya@~)W@0{FO*#xOhl&GyV9e2SKdJ)j? z^>S$k-a0Cz0%zVGsbBGUG5hx^4_8>n%|d&>>YJLgYiE(8)MxCW(u3tIf3?Terqu8aoB}wteCi}QxVL{nwg(* zJ)C7=?(+=soyJ*<3n#()%>uDZaJ+Rf5LMwgA~P4+z`x=WKu{bF&uw1TLEw#AIpVp+ zxDoTlrcs|XcW?y>-skI%*U=PUXV*5KXJr&b>vAH?HIdlX*oRs@2Ir>D%1-R>~Vc#(S3Q!O8XaFB5D z(>gL9O57v+G(YcV)lA}RK1jF@V1UEJ8#CD!^$$}aA{w<9*3h&*@y23*L{s;4jcQfy zA8PdK0SZjTyiPh@m3OvrY{M0+ZCU<)V@uJgSR}5-S*7v!yEmlG_BGj;9xXpFJ7n}R zr(a#C-l-tHbqa0rFmV1<7p31_%IRCwONJ{dEB4fMVZ)=Fls`#BYp@=gi{ua!6uID# zk9o}x9ggR#GU~J|+347Rg|_8$>86^@DWX)0Eqg5`Ea5BubMmDz-~;}JD2~@U)AHEU zf1Du1$AbrHHO+~9aP=NN z+0ItQx1v5KVgD6UeM!SCeSy<~oz^YA6m_mQNAI2Iz&SXba_w4KjMa_?X#UxLZ<9K= zYs@`=azE_7hfdPPie^~7=wiRv^07?EJd~e@I=8-RMU+@<4d;M@lC;rkZXuzPk~#a> zacDHo*-SArGk%B5zHqjlR&`!+3)eE7MXts}eOEcffBVE?<7m4m+9*B#S(07+>EUn>G1gOM)#MJh z2dk>)srm@9DUB8X;1{mffxj=BCP8Gbp%w7q9Md&$zQC#^zLfcF@2ckPUE9p!Ku^?2 zeWlIU>B*da%J6A6ZTR_%Cq2t)r0QEhuj^a5@g?90*-U2bx)DlmPHvtM@M zfb^@KxbxqTGJ&3_CkX1eA3a%jB8iFhnz@>}_d?qp`bt&v`nz8>Sf!x82rpn^wXvdB z@`^2_rS-xk%R{p};x^a`S=q7#G{>jAtHMI1F72QGx2NPxcPAy#hCWG1E6cu1b80oY zNY2eVxo>K@nK&0j>JROi2~2LLwOZpd75uexbrxq9K80_Xrv-e8)GlCKW6hzjq~PF< zdr^)fARv9n)v+pE$^NUNqT;Rhj#v9bP3+=sIPR#qB$Sg>f52heHS^VwO8C1cBncay8wtkiswD`;I+C1>(BrW5ILCA%0f34o?3}08m-8fuDTW#qs z+sV;~?8UJnW3>W%$+1w=5_xSutE)x0w+CF_iyslTv+p`*`Zn}jkEPvHY+yG6(A$NU{X#%;5Yb0o z$Bh3g)T8_xN1B?re(!Kpc|yhN&I(CQYqK7p;{Nkh(x#QnNcZ<{U)4_$h6ay6h0I-Z zO<$19e$Y8qX)fz~lvN$(bH3uS;LACBM^sZA73KdKn!n zTpA+RJ#xQuZDvI!b8G4webaLldFYG6PAa<+YY8#p>hjDzIXfGPK}BEa+Ufph1SO?Q zhG`WB4NeA5Z&iL6o_x1_AmrksmEP#PQ|tNx!_O^?0>%zmW*)UHvdmO@zU#ckw~Qjq zqur`^rl`;7J0wewc7SJ8tw65Z@R(tZMZgqFFhOHxp&Ml5TaY?QReU5XxApv#dE-PC zU$_W2U^d!z@LaL$`5R3+O5&3<6OQ?+_A$K|MmMZ}el?)GyW8Fw>ehK{H`}IEnRxX` zVx^pAAqXsX*lRCme%jd7sAeuxZwg)-c_{g#a5Q*!#rEL)=eBgCE4S@#hk4tdtawo$ zWU5!V=_Z+ry_j2bh9uankphiyZ*Rr?E95)o;2#B=%!d=oC{FOxHwEStIJNIQDeB^# zOuza@lOTEj{l-r$4#^o&^V$cN8+)baL0uAfLrgS2XL9JdMy2agebI(*!Ej$x-sy?2 zzq{IqkKgL#1tjynUvVdr=o=B=>gT@cyup6-vdCxd9i&tk0p@Co`xIQZ`SKckzm5gHl9l=yNHD+vq3Kj&<$To^Vu%Mkk??#VS- zxKGWp(IK1NdIjj1LX{F5r!R#KcP6r~Rne<_e33bU^i$`f(UQdL*q;^=tIc3W1(@kn zD*Z;&Vqt%3pDGb75I4LoZdObPF1k8zwFA;l>I_jJCIwi!+#&#GVIZR5?-no5>_qf8 zTL(}RXMsNZGCMUGFl&z=(N+{Fg6vBGu^xcc@M#laVZnBCWznr$yNHF zs)zpy%Kop~jT?Bf zJX9F5yZS2jb*B!B+kh}Gwv=EmYpV&v1&rkbJds%<(4~#$+H&Go5@*SeJT;+<&e}nr zNBHMB4$L1hVFM4FE>`#P`g}(T`X2-Cyyrz!Go6`LY+91 zU!X0@Ggh7G)_F*^^KoohH5f~-u}O$Vju(wTc+R*yVc2AldG{9U!a#V5>!-`@vH?4@ z{PN9qKhuc0RrdQSkpA(20qb|@B@4uU;h=>{Dwk$SAV9Jp_0;HLvqYX9uH8;z|BkDU z+Z?Y7JrfcV-oI^Yc~E2y1ik`=>>r?N4f=)LpftXq6^S=>CfV3zj1*oM^cR+}#|vC4 zI+<~RFVsNTIb;H&g`^jA4h^-*^R1xZ2e*Uj*A#I%Jf7w9WwO)X0f)!g0(WC$v8b~M z7D=$n7ySAX5Q1`(1Z!aV(jwg8tpb8bA4iNiu(CkGI{?iiYdf6BA$C}^G_iXHLvAI} z>wZ&RC=t$&IDoPss$r)YrIVo2$n*KS`(D#x@u+LnM`4}{44%cEpNZ|@fd#LTjM+;# zjaS`zT!q){X()>@)F_x@H%_}HL{4|DZ8$WfWE3d2a^Z;YFu%Tmye)%8tA zg!-ZYBM(OQXN>DL_vS7pGM@7Ny+WTdO5vCIVc6uHDkD)Z zm$qwhbNp?X#o=4xRDpQp7LCUI3kh8zO`5ZMQ;0r*d{BHtlce1wKEOzu7?`NPgrnXd zrTDtDTIMXZ#=xp6c%b8lrz!{C$q=V|_(k4mG>mWYC~=tyw9?KlU7Se&lDI#ofg%ly zZ~*}h{3M^@?S6#@gD2-YmP9x`j_D#817)cR-lUncmf*8neWE4e#i9i4-IFsmX!{te zkCFP)RJpa(BdOo?`iXa{-tMGrd=20yE>+Kf53lhLlz?as`c@-26@gz>0IbMK@1aSy zU?A^au8~@9Dewd(BB#ED;v#y{B9xX(dcavD5G~Or_T>w0&jxMJ%yJ~K7k9a~ zhfPGjqVQ#DOkjw#JiD;68pSgg^xQx`a7HPPtO1)8{Jxy*Xg=~DDpC=MN4aDk! z9q9e~8`QdZCZ=cO0n4^qcJ%AxJ6{wGve}9h*sLY0bcLA1V3j1c_=|UEpCD=2?GpjW z^FzJ_b6}hQ4ru!!4&vrkC9+VuBo88f%7DZ?2Y%=^xAV zp^d60eqB?!@=j;y z*MxdINn$v{*Ew9Od$}g&5$Z_NNS2>>Ym53HPYk1pzUZ2@j!Fa*vogj*@nxX?*sAPj zcy(A4qOr!)y?pAX+$V9y?QUxKclRFDTF91B{xkddutm6LYgbL*amo>)sCpry90t0} zSS^^vJV4i=frs`XSjv?)IKiz48w}iLX5TMAJ!en^(U^EoM3b^8?N$eFeLw~`+e?rc zuR?v;Khw=LPSIX+D8XP{XQCr%e902Q?)%`-`~i*{ec%uN5L3GrO}t5rjkHZY1QFx) z$s}d{8rYUoxBRb=q~HX`Kg1I^(PboP73tXCI)3wJ_*B2uB`Bz9wSUy}02Eu5{@j0! z8)*G4ngFoni^O2sM|XhU#{47zo{YN2mqN_S#)iLcah^rT8PMvbf7~}wWv}hEP=g=(^V*S~3s#p6-)#A!PP?U7bedaU7^Dm&tnVk?ItYF9IIKZFQLz?WCoO|WC zT9)%_5bGY&dTfray^24=Il>}$HQP%2%M|`)89)cVizdRv+sUsVKf5@0=edv27Wk`U zF5>OS1bW1FZQ38mMCC(mtz#ypRf4yS?Ui`*@&Fx`?9E<)(lR}e9dtqM-Q&+pCXCuB zFJ=N9Y;ZYN1sam6vC(MLm%=}FzzW6@nXR*2v*8WIM%&kuFH94rS%4cVBQikeeMK*f;p&ZriS!e4PWh`&yr|d;B-&>-@&L#!Jq}o*u7s z3n{V8)SEl~A$aJKeRB|P(Y40+l*7Zat7#7pJ}o+Ce6%}Yif~DA3#fF_AHR-pF_8Y( z=2_7{CfZKn&z>s<*3l;EdFQRqsAaGAv8)a4Gg@ltZr2){$2az^&;^b2p8dzUcOuWb z5zpTu?HOUMNagv6&bz!Uy1%m?Fz>05Kw7`;PQ+28q7C$hZU23u~8 z=3afVK7TKuAH&WXvW|K@Up0LyBiH-XLO;;(}fLBT#?*1A;tW1G;mcX)BELwsw+fLdWq9 zOU@PhXSLl-QSF|78#8@_Hi}iv|5wPNEo ztAjc@4Qk2hUFl{KCC;x%|Eaiz+m>Qi zg>m`gD;1eDin;~IP@_wB6pO75)k{BT6ifS3pCb7!o&7o-v#>cZ%U+6{0$o9S7hq2Y zCI`5vttW(oc~`8i&NaYu*ZWi8Y*tC!#-1T2i!Yxo#6>wzLvLb9xx8iAq9AKq=&>JEU5Pg-D+R~7?LbTGW? zCJu89Noxlkj!%SOlolhmZBFx4z*_@6Aasc_8P7ZE!ing>?k9}3oqF}H7Z_l<>4T+u8UnViQisOwq3m<5`GpLPlIlw z`OlAVxuE4jum%JaOl-zBzAy2Y%K!7bHeo=9i*;c(pTfno0$5XEfiDrXIs;3VmUBOg zBfz-YHD4IKN^vPUXks+q4#;4o7;k9ME1<`uMH?OITyS4(GmP)aK9?P4h3MpottlQO z!}wBdJvlAVH>spu{_E+_`|b;;nx!{Tiv49XA8@W-1v7Btg}o&?j_xB@GdGpUkXB{YxbAHUs@Hf4#i;Hxr z@UYVHw=cp)T^e~>NpRE6DUriu8KhfShGvwX>amqhrQD@jrdZe)D7HB&U^CxprRS1P zX4Uu?uDp1>f#6GH9Y=5CPxB;!3`6TvQ+x_vmS^%YpzZi*@IUv=cvm=kNwHz_Uv-KO zTBxKg&FmPzp=NV=_(NB=uNLc;^eIcK)U2QB1+l~r>b8hoe2G~3iL#7He_L4#KW@Wg zi_i$p_98i-Fttx*!|kP4)8VJZrR`KIy+Ehu$EUF^T79b(*9{iq840@Ders(Syh#x# zn{sq!^{4O47@`a(!dGqBAKjPi&pUm!Ho=iBI9grfvi1Y}v+lGhRQoF{Ngk~32xH~X zH}*j@-5W4{T$}>xi(z)+k2~s;Uhl53j9ILhb zmOj`=p27AF$Ty<`i*^ui4}%@DeR)*7aO*LRSpZOQ?O^RTB$#g%K}}){Kb=k+MoKMt z?t$~mK_Dj?8#^BzN`&$Kg@MxbmY#^UO-DtVUIR4h%svs}hgT>v@sh|Jhjd;MvwA zw-LI{$q1Uc-%D)0h3B|=FRgZ=3z0ZuxK-1(2RFaa?KV@&L3Wc5@WlW$f9z{<)YPOt zL)O0`vMvH;ks@WDYu(&j(=u}XQrJIrX4$sxxUh|H&R=TrYGPuYceA9u7B7gzzvEc$ zzv_J4@qo#kb@}q@{#kjm;P2d0o3FtOxeqEmt9)b2mTVlCt}ljKQzHiKHJoeVYwk8^ zgz8wMBZ)=}!t5c@se;{kEYkfHqKO+S4;hV8S~6@1&c8>AU(qurCwiU?I#cMBsI#)N z(N8#fIpIrA>}yGtgl6~8uLEdYk>8y9PfL{=Cik2_VKZEARNd~?xLW?O$TIm(?dM4S z#^w3H1cL*MtkXGBi0*}RUfj(}j_YDfURg3msi$(8j zL-^CN_wf>tGX@^fbdRK7yQ+6@mEhce4WOF=oeX)PcD>jVg z5X9x)&t-Y}CRcXi%}MWTM=CbSJEfZ&q8fN7RSAITiX=dNaWa`8M$_Sf)5pJ>N z+2DoYx!ItTY-*f|GgM*$R5ivIIeBaG>&bcU9Su1kUSjc^wM_MPnXI+hp!4C?uM8Ug zT6R7@VXR#oqrD=1*$JKU``w~F=?3qhkvGnrvrFEu^luG;Y2@X1k{J{Fr%zjW{qeN8 zNZrCsdFk3 z(H&zM17$dKN00z`uB6Z~E3JMeZ(r(?h^B@RL5|o?BSRKMP`V35F)ST=g!xuE+J9Pj z_HTG-7AqQ~Xa7_>IK*25dvUYe?#Fy%RJ5(-`4xLKh1^+k1Jfv-ZOjF6y> z##|csc%gF@{F+c8#K*+uT+ZqT|C#3)c4(r-to@Pv%VD&b$kgNl{O+;3KK%wU6)|i% zx`VvW^OO6&EGJLNoVl@su?tk8pV@-FRq#+H=rjRhP(g$25b!aPMDN#R_hc@}@h0pE zJUM;VJGHmF?HGjUwASn~;99;pY#PEQO_I7~?fQRv)|51F^+p=|^pwbtV`qWq8eajN z*~gMVR|L?Ok3*azz{WnkM{nEK(l4RIn_j*mwbCCWMr5)m@NsfGNe(mhCI2MHHi;M@ z!SXP?N?hSlB+;Ov3RE9mV-I;XO5~kq6U76?*valu4k{BUrP{XfpxIoTnx@8RVczE~ z7iOwO@#8~rD*mcj&Mvd5dWs-n^Pm4r*c4WYK6w`TwwJH$0ci0o$ayQ@w|#BP8L9YE z_=m7S^bq)N1$eK4#K~B}Ka9<1eBramejUE@0zmUac=E)__qg1Dg&c#aXOR^c$cZ=^ z)q|{G;45-CvMlw^R_z6M_(6%F5BVGFI1X+0s{U80KVFZBdCWJ0%gb;?vys&Y*OrXd z67jmMGsMmu>DZbx-80qYuIIKpST0Hl5zLA0mH!IuLNhZlR1v<^h&3F_@#dOrrM4Xt zjQuiPNp1-Q1ygbkK%Va-LC`s{8_3_sAwAkyQvlvx4@#HX4ndV%YK5OgLEtI`G}BSd z#nrDNVV6&eeuX1WOyN!ECh!-Uff7>vW>A$5mdJdHd37%@@2QlB$MiIzk0@NZS{YP1 z`ybwhm>tKjv~RyO+^<(r-K$XBykpN*VSrpj&nUvuEW|_4UC`P}e*ykDD8}AY8e6|c ziieS6?&Ql;~?9s$l1R16e1g;RCts9u~dxb-@Ozs%8h7wnRV^-4UF{>?56q5U%$cG~HIam0suyWY!H& zu&Y97pz9mQP7%3rTd!%%Csfp-ts=Z>B@9Y^v~Zg-u9Io-m}AwI))xgNo}POL@lw!i z&*olGVAdxmDlBv<9~~4S1>-e|lureo0h_@o1v6lpl#(zvp|2Q?V2FZe1c|KENnnUq zRS9JAe$Y&Mc>qS7poJ6vYP*|%ekg1@ctJKgS z{noCmly2fuu<4An@F=)(Zw7$x3VlfYjD4vsh0=nm&1g+^0)pUhey~PS~OpRH945~jRBJ_k7XoT3JKD{arHN`t~Z``^Ow?M zC7yQ?cjv2SCj0<8`KF{R#Pk*wGwF$jK0HP|M5*|luxWX#eSdaZxGU1w;&Auzj(==b zey_`Si@pc6s+}c#6sN|Ea2T>p=y}ds^&Ag3 zG6?OZtA%cs(BoK8$KvEZHI3qhHg zlmu+OoK_1u$iFfmUuI;LJ+2e(Tu&WhCe2~B!$o2gRiolA{(%_OQNUjWXL_==49NG8 zO=(eQx`Ua!bPFi>{=HemzD1N`%{bnJQaAJkB{!nIk=$$?#n=~kd#ZmYsbK1#ADej{ z_wwI~E@(X32Cc3By3?L*MWdN7+-^P=j1g<&WALHux@z9eY>u;e(n330PVdqSl267R zq$K|c>M=XFdGG0tet*{1?SF+_pvK1`5zjq3zF%Y_-w^rx3%G_bXa{yS6Q+J4uo%!(PU}N3g7p@9)=EZLp z)uXGEybh!te5~kQ!}|NTej%a+8H&ta6}e9I9r!rGRfXP%24W=Je#hfp!&7Rpg)&X! zN5Em&N!blQU{T}h7VG|rxMpdBWiyhU#hz_HKanliIr=H}g68sG&Pi?>r*1Qq!lo$&{Gdjz8ZH!J@(HhJ2|tGa3+e z1+od%IuH3lY>N2hw?&Hzx99NYC6ojfBJc(5eQu_->nz~$xQFOg^J$Dd$Lew>)8 zr1gi;se1+X#rnz@x#$B3EPnKJ^gaB04mHknPKi_ISS zs8w9V(d->^{aaFO$78}oF;2Lh^T}fo^&9jlO`>Gd_T{o+2UZeC>M}|JK#Te~xxDUw z7XpaB7h?dLnb(@+Y^aEzZad zYnKS<9M9_N%z`2O$&vti%j9p0@)q4f#J_v_2hIT~EMN0((1>)W@LWNwN}3|+JG{GpF`?`-PSxizAlH9A~3|!EWqr| zI(hffjNvdr;-klu8&a|%w%C@v7kQZq4Lr;eC_V`;sZa3#4lj2(yH(}gRjud?7?Avl zDf0f7Z!iFzdg|n#Sn{yL{<(v+t@gu=GqjqAHqRqzts0GjRX^N}C+)An0UjNFe?DcAJI#NJIr};~>R8Lz8 zQeQRYkeF)eoHcGbnB>JM;`KS)Y|aChW==#W8F9@&xe!<0A@zm4>rWZNZrbn$c01^m z`$8gSQ;v14GK6 z10R9#9Q69w$})%Ci|9 zsoO1OJXf_Xmo;_Io;yD4I_fhV)_=Xr#XscJj7d&rU7);wfjOLN)af1alOUqq&f@3` zs83kqhr^DJmMiETEPlFt$_!+q*oQ3)*!5^WZWzA&;8e#>Htz69UOlDIOQF6&WsKXqY0^cHk z83mhaw!02|sXouwVN{J;Q&ZDWlmCs&WzYMuFQuJ#3Bfp|W+Bmmxb!S9O6*TJC z;izF#WORb7x7Z>2&LIOF&HJb6Lo>bU=gg|qlFC)(Mk%)R4?$)cCvp}t3P0If7)&); z&EE?Id+j9L{+#c=^$HAOAa=kca}F5J(kZ~5>}ZqWAmi(;l(43wTI>Dd6V9V*BjH*g z{TB6Y1}V90H2RyBe{t-LtV)cB<{ReIwkk~L>j0M={iD}4N4q8{X7#7-%SCM}O51HD z?VkSm#i4LjC8xFN$k2f9?_D<>)h!DH97+_fE`E@-Po9g(cT7kWcC6R#dEORA{(^jLft_87#(IeKnWxYDj#<>T{xm-Q zHXuVSEx_;REH@r5hupd0TiFv_ZD1CDFA&uH%6c4W8t8ESkjZYsK%16R*lT$Szb1TRs2Xwr;p?D%ZU{ciOzmsO64lk6rZp?~lH3{rTbdv^PT?Ls`h9 zeakwG^CuSC4!0;hy-{F9R(LdLACh4otAOXb(BW&!+kF1k=#$W>b=3VkLFcx$+b9zl zyU4B3?NV~npFZ{$h%SRh6rF$)6O3SLY`YQ2FSOlX2?qWK)R|ozN}{$_McDR^yWTdw>eQRYWaJ1E#bp$o86{=j=lQc>0Pt5Q4o)NIg{yH_x!_)5&QHI z%YM_sxi!sh*Sexr+J7WMpAx(y)}iBC962N05?GW?Jg_)Eb2z&_5*@=2FTRG;?8vq# z@|e1YOYfc0;VAkU-N`M@6>IlmQTEhf=n)ESg-u1zny5p?&D8IwNQNIyuH?7wp&agi z>E)}TDdiyb@M*}Ygr>x@QK5DzKY7dFZgwahPS0q}{E5-&R94t}dD8?)k9O>A+bUVRxJixc?O9V; z&PGmtt-VIQkGG1bfq4VN4OudeZKZe6AST|LfH{w>ajZ}H=0Z>OAB0#0{YuLal-Vcb=6 zK5bHGsVn9{z!VJ^&FZ9Lej^4lCX0de$A+`BXm-`v29BL=fM^LV$I5Uoz6QBpt0h|} z+tM0crkom^n(FH&f?9N6y0qWU2_GwOebU+t)l{kV%*pLBaL$3g-r?Xhu5>u$BScrf zu#XKUl~~&4mRcLG%*wsHTI3{^ac#uXQsjB^yD#3FW8GdS59}`T46ZA1qt@mb8ul9( z$)gh+GDcM>H}YE7259sP%(V`3%#RsKh|$SNol$~U*Af6M5EhSN;bj zb@L(pu$Ivyob9Q}FF$z(+?rN7p5h}{T^piK-e@%NCcS^2P?`L$+~=zIDRZ$smud7s zv*FNEr|jb%(iQvv7|r|H`qO=y!+CuZB|Q>*;F@k|061!Nrq4~^?m01^jmi#o_K?-6 zz5Vk=Yr#Vh7p~4#1NTfeVT$$x4I-L`1~tFUfX*guHi`HZ@J>$qZOi(JpG`A~pO9O+ zCnrGvSdISX--Ox91QY|X9Z&9&Uc{vXifIGQllP*ne>ghNBL9@@6Z|6H7Q;yEzhEL9 z@ohAr6T|f-*BS~+G217Y3keZ1IyeC$xuSW39@#q>zP&7-4 zX4GPO1|BKz9pStB5?{ieJRpM;Cmev2yPKp-5-Rj69WIQng;}N8S~Mg()Q@cBX?%+^ z%+U-o`*6eRp^)GT0kCZC6Ky(JSUn<6pvC_KKFuVj0P2#GLPQb{@egk0qkTw|z2`Wo z$;Hb#vT^H__`U)hqo>KUfK5)E(W@QNHnCv$o0|?-hMm?4Cnx00{RJ^%!*W%ITO8Qd zLc(+8R1D-!gHI=Og+@&fJR8C^x*F4o5Et~#pgE{qZva`TwCx(w>ld@Gr{&Cl7f9&9 z)_4hyVh@<_B}lkg9T+z*cy|S{S^%1#SE&7Y_dQ)-koI8mPPjn1YSxtjpR!l;(fd+*FuGAyAP++tw5+`^*dHhS87*<&E6~N z3n}q%yJoqKzfKqg_y-2yF0@(%U$)#6d`txXpXgc!h@Jp*LVS~P!XW^?QmSy?J+o{s zsptz}E)8frv)6T;{b&gyOSZMGjU&hft_zB$wJ;b-{I#*$cASdWmmVC_tvGcQrJ#6RXyM|v%^*<80i?9ukr;r5HApKzc;|5p zZYc;>$A+-U4?vS(a*|0x1ke!pL!r#svPGkThq4|`d z+llr^Bw*^Ou>|05;|`m0&>fz8p!dj~c->j#9)D`2Q!~pdCHPxV074F|{)5nPe0GS2 zd!O&!lCxQ#3$unBC4rVV^enPB8x(lV&jU#eAvBf(Yec+3q@och84wF?xDv(kaJamc z-69XocINAj2u}0E;Drjl1^jCUnMLf>+`7)|RaP6SkDDmse5frA>}QdLT$dv6Ja`eH zu$4+C==s;>T@qeE0QH}^Bv^(F{;#j8Ghk0bZirarov<)8#TMy6B#;$v*1;xwgP7Ca z%q?bxDV;#kSo92hj9cX%v%9&DY!dtqnQ>Z@~|M(WSVUK{)> zWD>QG^uzVRs?-t&sHfxRXZJe&@poYvRPB&o5AW^t*m7w z=bjOm!?&Tg<#Cj!I%W>na7eL*9y*=*LY(b}2E+bt=E*Im_x2_P3!$k4^jz+9Se+-w z-wu;_R)j%;ZUbc4fr+Ib9XGM4wAxOkruT(LV2l0e;;}_jH`d%Z>WTX0Q+vfaH_yCy zvZ3TA82=tM|eh5URz#IJnM|nYFokmRPGf@Kd`*EZ3 zil!LsYY|uY@hl<s1T4E=}Qq&ssts4g-8h`lrSa9^n2WUkMG^@KJPci`Nr7i{5a$M!V+fYoKL&& z>%Q*m`Zi#`94_ZTx0qhcC}w$8oa6!*qd^W3GF!$Q%#{eI`tpQbEyO$bk7oO*GFOw0 zo)d=mzUnEtR=1;ojh8pO(uCJe-c8b7gDo6CX+I*Zy!dG~MU#Fd#r z<#u~P`9+w}nx!PrXph&Qr>Rdmm~Rq1G~Xq_x1jG-VbRVJmJWS}uZfr(K|$2&2l$-@ z#9ZI7jrW{opd;#wfQTRx3-OXQ2j8FtcFbGuK?wBtV>fAOZ7D)~Z zQISE`Qfovt*IY#~9VOH`BHWo?;*+(GQ6nsSJVq+MXq_jR&VJTj&}shCOaGQ!E!apD znVdp!^vHTj?^Mu|HT7NysIe3?Sgn;c(~PG!tP-3zPNl{f;x~N$%ZR8nnWf&n(CO_C z;s?wKN_N6|XW`|g*y87Rf~VGU2eB@prIlK<8);3DAZW z>r{Y-1n@5^gGSgV1?f?SAUcN=&;RKavLp6Ef!mi?zipD#7taB$v^O~T;25ItKzt3{ z;>CF!i#jsaPK?6>3KFF=OAe+I1VbDpdNWW^Ck^3*mCUk-H9~B>e;uBOUrRnIG*1F6 zt7$EVkLOBbM*Cy9rR&(v13(YMwaM^%P=1f=@+G7HT#7^dCH7!vogkrFnF8lp46NIh zC$<2cy~#a1$LIzFs2M|z{8ZsT(j_a|*=?$l<2NphmYkOkv+smyW(u88U=_9ReaUUu zVl`rrwe{BW#r>k^QW{xT?1*&YHKD7}=1vh-Q4#{%jDSYLviwS`=Uv%4Nv4ON``jGX ze)%dT{KD&AiBWinKi&6KK^Y?e1zq_q-7le11}pDAqb9pxh40^~qBI)!`6xUueX@mb*`CZtcS|lwV$A3J-|4 zj~87EFTREP0zh%SV9+#MJdzj3UoHe?#5YVBwARxG+a2Hqp-y{)$v_Z*Oh8F#@h+f! zHy4n4+G^U9^d-BCN$E}KO1bC&GR`a8bz=C;LA5m1d#+t_LnA$UxeuGy$jyT{x1vlM zjqHUQ&44@UdWj1BRqQRXme!Cj@=$j*wcvD5VK^^8m6wEWaaO%uMmqk+UA+PhClCM- zir%=O^JzZfc;iyR9sh#FR@Iv}p}gYx#N&(%lSKa6{&V%qr3V;ArjwL~9o% z`_|!&Kq$cGvXb#TUkOIJ`bP02>y}(6TM6QP{iQ6@N<@>5`yfs>*!G<9NlbM_1u$>u zQKm$>M%u~={JLPiN?Qi?HlstIV4*$_f*t?#m}}Dx5$0dZjFOIXXYK}vo*1yw=&PF% zjmby>N*&n&#?k;BN7L9>?(t#+sA(3kL?e)PxDy~)i8Mhb)Km^euT(^ZPe=}qKqxSg zL5RAlOQMI09ma{`Bs=j0q#uk;B>oj(y-R80ePjHri05)&(621t5MeLcpo@qgeFSm$ zfDcU~o5nbpCQKw+@EGy{*8=q)c}TK{Df3+u1hUaIWG$0VT>~{?rM1#HX`wVxT3-re zSEa>McovI#UsQbrYslnOVFi{%m>k#G6@`_DxX+l}Crs`+@|j))lxUg&MyEhURA(gG zSeChJgJ3{}1yn6Pb}V|(95-Y>B+ZuQ6^3Sk*6^ms$j7H>I({`7v^Ob?ajTlyaZDB- zxQ*Y2(M^=q$bc9v3K7vI+n1DLnBDJ(ak%*{lNOY9vnL)H`^m7 zK#g>D3`nhg&0Iua!;SPn`Wpaf6d5eJDO4?dPM~OitW!hZ;R<9wl$U;3%-Yr8XKLLvv^O4rD)CK_YByBz(_qM8fz{tcz zX;1f}{cF8Bw=Um&Us0*gmWC(!;N*iZ>#ZUZ40v8sk(YHK=qQNF;;@?_gx5myvW^!0GM%r)OXq;VUM+F`_>#pt z*FkAnbL%fr?ijS2)z(;ohIdPYhA>@Z^aJTm9?;Q6f9*2)|ndlTuw`Nw?9c42#RaIv+8O_ zq=^~&L{AV6H6CVr|I#uELis`yT7o}fL;MHQh8CRbVrhx?NuQeUhqFdnk%Pwj zCe+mRYVtlvNtF?BYA+E&^sGLh@A5%bEmw5;?w=^M_hY|ZI=of{c4KcYZN5Jdvo zEbPr7GbW>@Iap2|ZC&t8r22cE6T+P_9y)D63lU~EEuPq!mrtUf?rYdeGDojz3=QaR zHF(6suAN*8{`k4-@~Jl93JfH${-lA1ioq&G-bQSWzQru>Y^W`&O8T-R7;-LGd0I!7L7ebwwg<|01 zGYpWiNBGl1<@@=5=d`T+p54Mmf=0W8e44NeQA(83k?m09L5P#HbbuKwUQc!ursa)v zMKd)G&9pA|&*M>yW{Vsb@@2t+%>4F3Hv;81VJyw#*`UN3j>xkJ1yfOml=V*IbjO+M z-jgp2NUxWRVR)}_fYZ)b!D5T*J$GMo3>?Z!(rR_y@!nCs{&LFP zshVmRlkcZUM8l!c%HC@R&$7Cg`dJh5*+&$yn;MkJ7-@FH26&D`ZKKQjFxOHxkZlB` zV3RZr zZ&lNHl7lf2uhkIEW}k!|A-PB>ZXwY!;1Yu$)3HLyk}9 z(W5nu1T!r_byC3%-;zez^bznGGOsq{UnPvSVAo473a!sQXdX~cig{i4$rQ~60@vkM z{;LwqE~q-3kdcB%jQkU7G(pybtDa?g5!WE^0CCu3WM&!c;v~ezn)-V2sIiNh=(rYR zV!Y`#v%U`rv#mTg#TNCe**SGpcKO8x5oz7i(_f~_`+^3WzC4$kb9n|XR*e@pL4zDi zE^}}*qSzr!JPG?3pkI$EnsmgVDiatVl%NATy>a5~hf`b@TL1M&#WLt38cgW6gXwmmDb|hJcH)-QTQa3x^ z?6}U)99=1_EF76xPnv6?{4U6kDe08bb6_>A#5c%B7qQcTbHzY~3m9N^0knf&=$|(Z~by2Si^7kRYuio8Dur&J;QS0viWPpBoM0OAy{wmPDNKNZ3S6} zoWY*Li815(cvbQ-_!AF`9q1lsD%Kqocycq}S7!8A2@*MNFJ%|+nA!-5Zn#yTYJzF^ z8hSnny5^JTzZx#L7J-8~vI=Sh83(7PCYdQH;3Q{0r?jyJxQ^nEBpufZMsENRE<$$t z2+v$j!^CIpd#S1(*P{6N&orx@up5Uy1qdzVl*W9x>#R_=FF%v#uyiddX8#$)@4UwX zd0lFeXR$@mY4&=bb}Pr{r2$UiMGhBqY^?vB3;dD4S*G+LU)N~*W0dRC)HhLq1|zMM zAggPop*AHI|1MFKCQ0OJu{O!x>Gki}dT72Qh>s|xw^|!Nn1+gz41g+oNXMW8cyLQ)}R4r7PUEzhe)kTq)ge4C`V|Z1 zOKwDo02L?Fi)86EaI%7qZlNXFDSlmkc{IO+|Vf0Y@nq)plYabAF_MuT^LNChya(m~-&vk1M$ z?k{&=z%`W{B`uEJtkI6^dgVlx&2g3HG^d3<3`M$xC@Kcf`AyGoBhE< z!v-@FRT`o&TITmFAij%!xwBf}!Zoex`Cb#pHxsJhK1z<4_`ZnSiwh1FE^qsEkt_q> zDBSVnfpsW%^QwkDq){a%i>zO4HleMzY#LkcEm3z26neMIi;sal$P>CoPklVpTR^aysfDbf3+LWoUk*$S3VOK8bnzi;E}IqAe1N7b+2Q5IL#;(e-}6h$!d453 z3FKnhCKB5&k<%4#zP(d+ES`~c!@h=D(W8~Zx$|tV40Hf1z%lQxk@!g;Qa1T59<&ZE zgY7|p6hq{-+?>1W=F<4`c)p7jsio@Cjbc*J_pmQh>Z3`L2~C$zvH z$CpJ<$$~xay7U>7L(|;X>u3qvi}Akk#gc7=f!Pu@o=#+TpM)TI_n|v9fi;vF&h4)q z=~{`mmQS!Dw1uH!=1=j`#{IYAnqEK94K)u=Zrt^{JIT>eX^>k?nN7gawY{ct2Zo~4 zk56BpavbaO2y^x`IZvQjFZ?n9-^lVXQd_b;a3kITVDSc!PP>B~e8PRJq{jp(8IP%`oKT{*y|zaShiwL zwKDzaQag8}r>M~oWHtEx$S|@>HEbfpA__-GlnmNiLBk@%^(EzSuuV6LeP4WWk!TCX z;!O3qN%pOXmY}!1;u%&AhT+dXYOI!Zh^pppMUcvLnwW{3&W&1yStbLb9Y< zWECHL`Kq3}aDUGmIbf!j7bKdV%U2YWrnREl8k94dTd^pEOBbTFs@Ne?(i! z4y0}$3+R`TJO|;dVK8P59%|enArMLm)dP~mUJ{QU@=oi$?5Ea(F-{u&`muYzE#qZ6 zwiHL}hT#`TbTE)0P^;l-gTRkP-3F^^6>6&FReq5U_TKpVNVWKk-YaM zjO9SeTpngSR-P#M4WL5-$k`H6JY@6?uzq!AMUq!ekIb zaB}~J|6#-)q9Pq*%~Yvl`om??H)rJpO5zpBJ2ndIGXtAEKosOg@OS#}`Smkj(eE_u zq@Dol z=L3K!KJ`)V2Mub>!wYcWx!DXXze{``>G>(M$*=>f)*)6QXv*{=8%Zdq9{&Xts!NC+ zk-O8N3%t(ACwfO}_PKl+CWDsii$$)mUsL$u6!dz+5;nnqT5kcZ-~@W$hQ}o9i6UkA z23Qb#qz_CZ#M>ke!nmY5c@FK4anjh&Qj=|*T(~b{~G7ebm$t;-wZ~_H7u)q=~GFHvKXjy^OS$ca&3BpevK7aqdwY};ExuGRv@Nx0!6`J&L$(WRnec<|ueg(TIPXUo5MG*+lu;l`k#!J{^tyrmw&^v%0-6KbSMB7n~q%t`LO}?0uOMkoOy)J zDAn=kiDC6#0}s_Xg1dsyJyV~WAW`D2tU`MuY(XsJ*(BOq{~A%Lj=EWfvIA)cyU#T| zFce)0*rp25wXVSjBQ(A~S4@pl2mS^#4Jdf1g|bE%a$90u0Mi@IcMC#9ihg1EXtEZM z6jZMbr|A2=4dz?7rlX)-)}wFUURg4zapQlOQp}-~Xkho{21?Z7D#0BfKPm`?3xR(F zQ>i^M=mNV0c0p9AfL*o`WdBqvocX}bf@eUr+V?Yt<-h3W++qsf__j;#$1gDF*I*9Q zV`zdis3=-_RSRm+UEGeYm}!TSnP_kLXbCA%^r%{}v!07l@=M1mOMHa}19$=R_Op*< zBTjMVv9U9th&9lc4cD2%rv`oV%8MZiwXDNVe^uQ`?qPv#b#9j_= zFui6U9LeUT^8-hcEJqew9&PKX`W~_rTNutMH8Sk54%gprTVLC!5lmCP=+l3*vQpiF zX6mdHTQdJys~h5OseKVCLwk9}UE|V=fQ@%~pyV3ktEXr1;yl~;>&g5b1(J#NSuay} z=c!@twFfHs4HQJ6;WsV9^3Xv?&MKzT_*qR?rg|8#51ME z`8>b8=jesI>$20i58b@im#!#0`6>JKyLZ7Fhb|h)?ySb`mrK7qCtqcYx&OB2&xG`| zHuWwK`foH$n9er5atNVDBX4Lg+W8QuAfY6u`?UKT3`i&-+H==jgOo6YsFqVb*KUL52@KROz-44Z0W|vn21$sJwp1|(zm{|KW|8?@nsRi^P| zH=$PBy5p)jJn9N)J<&|h(5KkJJtvwf&^6{*&x{#c@RhT~KxCouwdq}sc??l{lpSZAmB z7Uc=p9tFPaslDRE6O}Yo>@~L=?$QQ|`B(Q$=wcn5r_}aX;LdId*&p$0R!C7lkW-8& zArsbxWtV%24Z85-)}@nm3Vs-_lBV#{n&nGW!AzW~8COZsO2CY+(AeCBZ2G9@KVPrd z+@R4t6=b7m(W9@m*4dY3dq2OCP%z$q;M1<;;qDK^n@OH$@3$4nJXOk&>FTQ}-S5`D zppSCynOgAQe6eT7u++u6SD#n=qPL>+B+JEGYQEGTObWO*qkr|6NbVI`^{>r= zpjRPhvbJj(DdXD#sDgZ4aO=}kjNWS9d7|=-)TsAGqrnA!n^J74`(GvROCiSxq*oPm z+DwfeI+qYIjy7p=!ukWdEQPsd6WgJh{eCmZm(;w<`#1dCIx>LXW)(LiImiSAB$ern zO2-zPnd(?C`g#)7D$3gW={{7cR{^2mgszqvxp;l2+$^v5X140k(J~9gw*I$y6VB36 z?(eKUj=gZ={kp3wv;5;?gqy+wTP}w8G+KS_=$csAoSD*{dAd7fH2&KkqJy6Y($?iL zOpn)_@YUb?|C9;bUQHg|xT6;Pi2!_`K|rxW0F-=sL^|+su{~_ycA%q0UGy+(6Wq+H zH|KzghO)UKDWJa_g(eyMlwex}ijl6N+4!Z2eT8*Tj32)YV6uU6N zMDlp3&i%|E>Lb{8l+@^qIG`bSHu4__(?7Dc|KDC#op?}8Yr(@l`b2K~)Hcva0xBwP zXqIBzFEJ@YP_H&C?3?BXSH9}2;G>(4DSF+k7OLj5-m%?XW@3jxLQs;v)Yn;pWBe{e zn>LbbgT>2^FNkkBWXO#>TVS_0?_ht0l)gU_-cH~7Bqi;M(Ji`qO1sT4FtG$juTD+7BOZ}h1(wApiBV;@N!fWM!lKfKZ1#Y%G(?9ahm#7 zFKZK!Hk1kp4LUXI_R^1gM<{#LGZHU!Pp1q|YmK@#6&i+GI!kVewZ4b_>%c@Z0VaI| z^lmqklgk!eW-}a-LX7%Hxw*Zd+L8q}jU$i;b8rBWFXSHGE%9M6!d;CHvu_f&4?NU-tYsPuc#tgY=!$j$J{}4N*53E-$8FU|Bvd%=brV@Nd04V7& zz1f4u!b|sG02NV*86dkPYHsa5D@}BVUygJ-&$VV9&gveGw0`cJVNg&6HSOT?Px5vd z7)bVVJJ(s2`s0J^j-cN`cfd>yCH@9|rh^c>?3FM^1`2y9b5<_(UnFPpAYAy!94ZhQ zNmH2K_#wGwcEuw9sNf(+vli$&n=kJtV{*xWe8QzJp8z*=jjR;x8UB1+wFb%dHd@Fm z9nr5?RGkpEo2(j)dU957u~h&R)0!Zb9JedZ979$H!ay^LSVi)snjF}Lhq`4XHEGpc zD^&2w=+EJHdQ>EZSY?gjnCe%VITJNT<5ihRXQA&vT8e*${JA68 zue^k604Y(M5+2dONY>L32T&-9GzFTE^g(FisF-k84yrF2Q4`2$0}sgY)5*2_pIzQr54`3-bwUp$$uV-tJ*2oe@S=)hSZC0LGqyd z&)*>Sr1Y_W**53^)3*nr8i5h8G>M^AP{AG(l>rc;bU}xG?{uD~g7mTZ8Zt%Tm7gWF zi17B(zDe2E8|-G;dIL_GP*f9^6%26*%&9&>%);)>v4(D-_=Vw0b6FwHZUl#vQz^rk&5~2_Km>1wT65p} z6#u=sT#aC&Q$H)U!1O74eD8k}0RNZp(*M*y%5f|uTT%@&xu9`TiP>I%x#Mj8Cs6js zu?Qj(%q%HOB`Wn7D94w;HPCI=m&!G`K;P9>mZ#vi5yUOm-CWvrvOH7HTi&F*Y;SMY z?VOcFTsUa0bm#6wk?-Len?vW%*|Z+MZZ&w=!tAkQbw_fxp2NEORTX)5kI!B2HhtHf z#HzJ^q&pZfCX@mW9inhhtU*5h`*Ck!;weEx_TIBBh*}biTe-{b z|3cxX!1Ul1>e4Z}wkJk+N6)=4{Pc}Gdnt(R+7_UeA;U4KG79LH$6BslcF*ZH!C`t?HX)cP+MREq{Nqs0Xw z)m_#bk{TOM2N!<~4C@Ivz57RZ$~k5U{u_Ds2pZNdX#Tn%&k%M^v4tT#{2R;`Uo|Z8 zBPK}dyy(pMMBJ|QcSB;a^Mo-fslxZi-b_nmUFm6`^KqxNFTKUIebZF;3jUFo&U?90 zexlDft@Z4zm33MjvHW3i@2il})KUx{)$(W2O{awOlk{)Y>7ua8^zxfig~f}GWG2{H z!QM~~H8%dL=$AXe#VyWw$$CW&hcVN;=;t*fuTz-CEdfb*&VBoz3R8Id#EK5S#=CI zjgeN{JSd*pV|G}#3|B+`kX4T{(veho&lANfz%BtKX>EzDq6n=w%nJKS=8J(glj>(Z zWkEBERre?HE6WyQfiNY@yDf8HJMQJj>{yG`vT0;Du-mDm`Bu2(XNT|Ij9BD9wlVDS z%`fS3Htgv$T*S?r1K}63~?&oS$LpwR^m(9CP1n^2t__M({az8{a6R+ z`S@e$s?N#jg`aH#y`DR{yZrTlHSCN7EzAZfty(!Nb64j5SIOIDKYRb1^{@c)F$+fI z3DOFbI%oh1ZMq?LlK6tg?NG=oFocVWt+B_%n;S@ukrZ3D3UJN!2P_ASbiHdWUcMG; z%=%JLpQiJoNl`6frkJr^a z;Y?H_B}}&*uB!D9A4z4z%z$>05+DBnFJM1&$k%-vOnqwghy8gWg`RD@cus~=1T`9i zBe9SAnJ7BfJ7sQw^ev{-s%ZReW`K-iTvjh6+F}Ly^R#teJ`m}c3VKY)K+QW+x0Jq< z#s|4il4qN5k?l*ufh-Nd@YBvMz$#-yQc_yFmv{bn<;@yOW`Xn2=kcY8q`CuXKsx(g zUVUk1W@(M3ETgnyZM#x^-L8Ah3fh6so)vwRE&BHm2>TEN8v`5C+hvqKa0U*_mTjwJ-Jn?Jq~9=uCgHspq0OQPdm5`xqmGX|P6FQed#!>V^ryAPbk-$~2x zD*aYYA3Qc-n>&6!#`*fgJ$6t0?4P*h$v2%o`_Q@E?rw^2L5`d3uC~C3+7ENIM|Rcc zZPGQAWYl=ib&0o9K2g-AWev-nNGXVtF|_u$w@y@pF3G_sJB_6gGPPalR0D16ZUAqd z^TLSIGB=l0e(RncIh}GbpknfG+_?Fc56=Tt?G@(E20x^f{^`N8-BY!r$X$6c#~qU; zKWCX{oB1W%2>oIImMyaq*TRAwisCb7Cl>w&a)`R-VgA=UT!u~Gy*p<*hsbw{{YX`a zk{HaU$gjS`1`))I;E%zS{;<4c+DM|0XG0(Ewrt*)``;1c?$R8wMQ;`cK5FpNsEq05 zmmUJ-QN5mj@MJUyjVhxvP`oED&5V7k+fHXZd?m5ZJhQ$_F2ry zisNAuKxvYUEgfGnU}_-LQh26m$)V6oJIhe0C(IkBK}APHuFW1bwBO#jZhx!Z^wSIs3yN6<cupU{xlY9gZM!TfP4!Wx8-M_$t;Iwfeq$6x!<_o`pkea-0{CgS=@TK)ih_W*d9 zrm2HJW%N*mm%%60Q~)x#5QV6#aL`hK=SQRA>k@FCjtp$&b!bU_lnJ}Vf~foUf*xM_ z8xIm>y?~mJkpUL>j4g-)Stma7r_7HVmZA^1pEAc?s{rZf1zYmMJTLL5%u`n!qSB9o zeQ>v+c^702#O?UiPGR7K41z4`J0T4`*v-wrXF{&`B{Z)X5AH0i5e&Dy0EHw%O(@A> ziX!;xPV|3%<8S_tc;VDB9c!2&e2kM;L%vuAF+udh*;8U;Dy@3vOZh?i;j6RunRC-?$1O1X$zmzxt8v*$r>PvVnQ z9Zf5?Q|@S}{?}5{@YoXY3zEfhqBIr2YV;s&M%236@8?Qz0ASkfyRLJDxy5E5VTssS zk8%HP@;vC6opGq1b6<3f$7r zW??(=C=LQn=#vP~VG6x0=`KCJ^7e!IzhyjpgayZC|u2Yi>tbgys>{z23D#&j?FqeX+Gq zC(=YUWr@!}`#Dq&{2@cp{gnwsU1D;QXv@v_92a_9N;87dniI}fbAVYHT~6W-Cyij^ zvC5`Kp^7bnjbt^RlC9gp)-}gIJ>n|?G;5fzWq5P;&9WtVu`dYd^&t-wyvu*A z$;MJt53fbHn*5ZxZOuD{s0@SuUPr-545|fWie7rt+~oZsoB(P^WFDML_O08*t-j{j z|JdF})mkoML(n&7Z_;Yi0iT-wTJo-7;5{f;qbns2nFv$588>#HI%_ zOheeLq2Tp()*43sGUf|Umz&itwD-_Q@Ba`3&}{ryMFeVeplkpSWzO_qgE&HYp11*a zbd?tW>J^eDFy#>xdAG_qYq-9^i8N{ZS}>D9*-d)fY%zW1rGOr7dKfDF`n_i|j9lX# zU*|2o&3#D-_X;kty0pw2QK|-0!_N;A*n+~hcZ`0+4pQhmnuQTP==Ern&dz*~@`zyiu!aI3GNCPft6Zv(mvAl%(uJEsr)Bh~^wtRUj z1<07+Hr>lZtAnbcvraS_&DGy*CqyTZ?Rjo?9VY~fai-XuiyhDXSi{vNe)cZoOYW)p zPD5Rx5ELG$RGL?@&&IS+eLr9)4E~bU|H9OeSc^}9a`KF1-K6NafXP<_o$~MUABPAX zc{*{FT@ztd$-%Odd2EX^N+mHUC0JGS;l%826N|32eWTxeXW8K=s(=WBfG$4jr%czrTEOPX+6H1M11}JqFql6sc1a6C z;PCNR;IOg<%iB$b_2Z;kx*RsLnZ`o@l*#f!PjsyTpGe;aR=YD>Qi5gdV=pN;L!8fX z5{tdy+;{;Lf4#`y0?jwX$A8Mqz0Z+eFt2Nn}Col>N5IYJ_dEc`wsNo8Z=ty z6A=eA>k&~Z=fM3f z5Kc`2_V693#U3qiK`othY-CCfVk@veWlY~;|M?v`L4ID$c?}S^HNFHT3Qt7QrKV`-qSuT~gVCQ$dXA06l%%+RtpQ%EkUa;W<;9ajh12ta5j&H)R1O@Cb z$j6^DlDS==(U}y7L|3OH>i_~c>hXWlDBvHpNsj3^F$M1JfQ0PkRMlpEprRq|P=3$1&`tS0`JtOihk0ufNBVOdQ4V?P zA5-^KJ&2YRabsf?b#*Q~vw9b2L5MtxOMXj8;iC>+2>D1o#LRjToRbqfv{LxEq(9zHq zEa!cv<(sOSxVwN+2O7!oci5{LaEDbcY&jD9=#Wd*in=v^STzcm{Y{u~FM_?R|++uFE2sfI5N`>XOGU!pN?nD}msbfz?CUZ)Kj)D`(k z3xfiS;o2r+<-+HF{k{iMg$ElQRl|r0>S|}4hgm@A&WbZonsdyiw4qA1Iyb4cyC-mE z{gbY^>!wSziWY96ZLn4tX-YSGuc~-oQkZX1At0l&dImUs&38iPeC-t=*3KJAx#(97?*jfLafq zoOoxI^xY&on(4!iQ$;{H}KQa8MSw}7CnFHkeKD4 zBL@7uNNcdD+p#aJB~sm8M1uyGc&>r`^U?Y_Oor5O6~iMdXYHae}?9l z{p|a1L>E!!aG}tf8~3J5hV+<6Z^PZuTm!oa1{!O~8T175jS=P=->i*-h8T}ct9JGp zB{_fV|Dc-Tn9S;4zWa53ZQ5p&Qw22{yPTH8e@V+g(DnB(qunvMJmQyq{W8$i+`*$K zFB~_gl^}h_i&36FK@qzP#9NTv0!)mlw-P-DL6hv{-9Z2wQ!JbjA86F%S+621dQg^5 zRv(gMP#T#6KV~$oohbn29Vcv(=2mG9)|aZ{J_J8-g(Dyyef=a>AxP@I_U&qlsW7%y zpP5YA_jSBfqwehZfLs$E#phVfD>4<~@)IsDu`)<4S%-^OASpIGqa!6sTpEgmX&m0v z;~zv+f^kh@2Oe|TN+jsitGgCnc|KvP_X&Wv=p2Jt>4O$bBlFXab?CBI?Y7X zR};Q9=uZYauIF_B(#o9YlkDTo7o`;Rdyj-TLqVUu_2xMI>{1f4t6*3A&^iAPcOQS6 zJIYWfHaKapXg6k?U)!*av4sA~+lUSvCcJ(+THvhcfcXYo)cbcNk;Sy1i0 z-b-QK!@!NEZbl9kF98ik9?6T9~(4@lNlA&Oc;JhF-@^afy0Bn4_b!q9L5f|P3U3_>j}3cpEz zYC*$QqT=sq7kah7!s)V0y`>#&1N;>I!BqW?mDqWMJ6#_V%)=@><6YPj2h6pR@UDHP z0mqZlRMa-N4m*)g*o&Mptj6t7xvAZSHpAyfbLw1mFw90zpONQ9y>1z=3T-4oW0F_1 z&{Y>JKZY#`=PXh3aCy_PnV>zc&YuIV{SLZ?h>1bK^W0rkh>taL(Fkd7(E4bUc#2+8 zN;uvh^maN(>Ug@hq#$;>4SB~4cPS3_%-H09zyZh~83Ur=@@6oH2Rpb2`N0A;Ui#F4 zu&a5Nl*6@bLn(ZPt=o)&hQNnZvdF$ub~G(JZzAnlx^Y1HGr#^YO4T!VYu(@kD>${o zV&4hDYq_~A5T_H+T9R>+3+T#?V^86Bjw{N87Ii{6ZMC3NDx)JgS`HA4`yj+E5H*9) zqn(38Zk(5aS~g<^R_G;l=o(gHN&!o7%;<>R|LzYjgFzy-;u{!k(Dk<-$Y`+I5ryO8 zec#QsrG+JQ2tbI*X$`AkC?a3tC$Pr}Rk*?VkJN5UtaxLg4JvL&i*52G0fLa0T_1{b zEIe}yA|>Yp-CtNXl*O>NvSng`G_Lv8x33L-_mzUMTp)x2Ao-+E1}$6(q);r8SlSTC z15oJO*dLu|m;LvAngU*S;~^|3k}1I9;T>Zwm~|32X;vM2gtlJdBwTrKSo0-J73Q=o zc_))?IQ74cUJQ=VeUnrGoVk`9W8?0e&o!Pd^v`=s$A4~@rUd)Uht33vpP~jQ{x!1$ zkYP|8fsfuwlBFn1N<$7HA2hK{p=v6JNX09VAwhVXF)f;C$j7!iL$T&su*-QtT6H+L zmhWjbQ1RI8`hZPe+12hchCa&aMdZTPl$kU2x7~-&qW*+988m?n^EL!{vxCE8kvDh> zoY1kg;kRBOs*ui$W)-yQhctn1*~@TLtFvS5e8kCaU644zXs+m9%?XDF)t=!GUn_L# zl0yMiL>!&6iX$VH{RPnLK|X9)zd*8%DYAy?Jk)J=g&hBpVBigNgJh?^W2^h>s%}Zh zh?!;c!GrhodyT#C7yxf})Oz2V0~PwYvEN3H^%6@L5)T>Z=hwcBH2G8W50oOvrNFD_ z#AD|v!^?kVU&5ZBGMQdru-9QFYjqIn&>N6qQH-IvlBdg&?UrELX#+S)OafCtsjD4_ z1V04p;uM%BCumDpBA5=`>R%J|!>d~;I3A6xdH{9D4v@06+1%u=$)LTVsT+2fB;9CH`{Y}5 z-Z+*~Br$yT$#C-~x=yTa62ZMU=&K6OQN_~idQcM?Z{iXMw zdKJqA*eNg`D;eO$nldj_6Em><GmKqzE~ z1PdDp04of;fwBi~J_36F*j!L^S-@-n(i2&`drRkODBzdJvZ(#)r=DN<-1U`(Y~7md zHyzfdr5g2h*S}bAv0a8-WeOZIus#)#&GaqX!kL3;Q5c+9fOGe_hAyPRntT$ z8y6Yem7HVQyP>qscO(gWzIVR3wp7-DW8D?Yl-vGrZ(Z_0n{J5cVJ~?#mkK)7yQm=pUzzP`+|;~LrG=9Y1D97XH&+Lk+(^$QrpC$!V7e>!tOlz z({|~P?|Q#Qe|zAp|1j?gn3BKIv7TLn16<^hV3J$V3PrJQrg|qPS)Y3ir*lFX%yp9} z^&P{tJdLA9PAsu~?dRfq_nik_v=68UGqPy@M1tDP;2D^(qp_#)(v8)XnTIA1cWA@} z6MA3Sl{))Tq*fPi-nBcMpR=lMtz=QPT+iRvksjCb4#!Q%c1d{>c1+RQB7*<5wsLTa z_CMB>2J-%ELFq8%{}z4zZjgU(_ zj~U{k7Mt?Wr_Fk}Ag<`i)1qC&7hakUcPS~dPQSX5H{o(s|yP zf1aK4>?rY+eR-Tg{( zaGb}8{z|nJtCjbzT%K4axry{K1^z$}y#tYN4x~#QzyWbZaaj90!CSNRN&NzMq35 zKoOY(GTR-GxoFV1GJ!5v>GF67!o5VcY^ zneOKeg&*cmpK_&ZxsflGSs zHoFp*eg`VgjoXU?(JQ?M zr@Y-Wk`?#)O>VF?9}V4+8yGYae9o-C#NXNYz}SYV_t&RJ*4_>*(Cu#y&eJJT3$yc| z9iljr%pyKl-$3UT}ap`HU`T?b*X^MBW zlYOp`zPi_qJgxmt^u}dB`~TYjlH~$Cq5uwlx#dVZ2!S;)_JY;vACZq}2BY)N|HIyU zM>V;=+oHHo5fM>o3P_Y9oruy~mbw4|0YM;0i-<^xh=2%4$fqJ8U8xE}M0$-k#PQz_O?C>)3RgrMTE?ldwXK1XcGjobC(dXX1Gu!axD zFxA&hPjQ^N9AdSieJOUC%7)yXa$1nC!T2+J-SN{Iqqi1oQU^_Ki?EIZ_IIAUX@?%E zwA+W#A$whYQNi0RG{$0<_>Qdgs7Sa@=qQAro=e)n+67pVy%SDLhrF@+VC<|Bv5Mkj z6d7fzkK9%#n*OhJuZ^JDz+r8#G2fxNyn z)~hmFMqNfWGViifX%905-$TVAa+i|l$n{B;Us{s;T(sIVHgn_#2z(S(!4KNlvgbn` zS7eoJx-Lb`BPR|&Qey6HK9u?#6!EI_CyEY-x42}IX|yVz?Ay)Thwd&ml{D+j&2-b4 z_8&oS3jgUGXRjXf7}R>cMc4`B0PG1=yk@j&9?bMMxy?Fl9Us6dU``KLew;I6#LVi& zJ=1^$rg`Vy)YXg}^uws(vz+1xeCX|OnI#yK^4;jEf5-&bK^0{Q!|5$G>DzLHT0SDnF><-K|n&+cR6|4jZC+F{aM@qVzm=`XW;y)`8ubYsV9Ypav z;M#vY*~w~G$P#=H>%#ED;~sx~tl*p9CC#H=2+-~R)~)ZagD8L7PW_+9DmQ}!4nCmW zY;A8SY9$>)19<|8al-P2p%mwnt87C9)vCQeUh*JQv$hb!+m!85H_SRZYyQ|{NCOpr z$Dc0=IIV9@H|bHa@;6(i)j8@l!$96?EXwz&lcL~utHu*`(9uk2@e(q%U`xIr)Xb*F zU1||6~m`tsoQJKlQH9iO<;19tyHQ9^%LiK$7ngH zNXPA~$$93hz!LV>x`HfJeYjbK)yr(5f!_INb}Zp*#8(ux(-Q8T_X_;a@ITihFRWacP{#y3@HLMwm(+jAf7U3yQA4#<3m{*uiV84IC_)KN3grjCgm_}!5W_YQKk4=nR z67)qU-87+x>Y(3s#7rBts#`mMZcmhSy#Gt1{5t2RZJ0cn=!*KMo4VV5Gi zF18~pqVlwmORow`?R_;A-oIDR9JB~v4w|Wbxf6S56PikII^Fbtx@-O0=EYJ0=S}+U zMt&#Go@i!pYw;QSqY8{1HP80G{`kvrD4+8Y#T#_gF#MybxYqcZ%8RV-fR8=ZJvlm3 zp2lF;H9(Aua(J_ty1=gJIp;9TmIg*>+KZbkDD2b7rV;l6eXD$jMYKvrOi=TQgWlf$ zQ5hyerZp9tRk8fkkk2IxcE zdd`6D;#4z%5jAD-lM8Vg&Qi$x$@L*xV7vDjqzYskyTBHb5zW#e(E$>AQ3SRhmU0b@ z;|{irN4`YT`uZ>BM<#;xCu~{MgZ0J_oKzfF7!r_F6&{NED<@5g%)_Us|d3{x!7tUZOzaj=yOgHm}yhi;ymk+z?=p#*GHgF8Ys5v z6HXeAY9+9BQs5Js^I@HS9SEhAW;0n5%=H!Be?SEw!~n$X;=rG&4Q!KU%Opjbn?W;C zoq=Ya9rspopT$q0$)nls9yG!8$rHb2+Gou<*&G{ck!NTQS|0)t8@6AzJ3H{lsL-P( zSXj7!3DA&P)BIY&$dw%Hz|fq13{!(MJ;A@$ZZ%%to`D32c{jq#YM*KzJ|x(Nx|;gB ziw@`>Ewu_(#MuXEH!#4;ku?V~t7g0^-&neEeLc-6&IvYO-Q0hL&;ZF@uW$hc1DT=khM_CsO+` zBW80AW!5=d3>-4Tl>O9D8{P1*BA=z&GPYJ2L2Qv|7_I#cL3z)Zv{i8{=na(u!Ca#K zs<_hdj5efhD(4{QZeIZAx@ls`{Z8Y-q-L{nDEiUM$%_@HqnyVy&~e~lA1a?UPXF?q{^hXL znT*H&zr)%yR>4uQYnfexm@P(01c8y!de#B)cR7l~#~Pfn5ftQV#tEEOH2g-B_IkFS zEe#%_;foay6L>(_zdBG5i-?s}juLn@bF3w2qKn)aL0&u9@p3BUA@>H8!wUn??NiXqaqBv7p^}UpA6G}#cN@Z;l7fXxg%UZA*h65v@u=cXRu(FU+1Dnfo zDU^kT(S^?7`X{P*G8^$Oz;#I%GYcE+_{RaWUxE-xIP)y0?-#)LAw|q+GI*G0G5S=H zEA^0cV53AmNr-W-J-t_>;Dgh{y38nCFdwn_YbGZt%9S@j(o(CoBo?7U{BmHv_^o0G_FEH@ARt$-4|C9L?C5l*1p*V4 z5-d0N6PB-~o;os`9Uf-*ls==uK+f@n@dh>WIprU4JyZNO!!SID@WSd#sI={Cr&UF= z?0MB*kYUv+FkJ;)(-bHd)YEv+2HdJs+7T0PP%f<^Q)K2mP-eQ?zuzaoa?n3R-JQy4 z+6`ANeLI|N)2wWyIecxj1hBymx5ryN8X62Q(Sld_Xz*Eh2S0+I3cjaXtm`xsn9jrz zQ+mUTzHY-~HGBmupI%thpg+rHL`{FYsl1VG`-jR8*zSYs}ad*4O%qlHtIjV6VPZY45v?{Zh|W%#VuN=z$RT z3HbIFEXb;;DKpuTfL8j>`N-*nQSXd(`rUKvOrpX$eEdvJMo!P=TCW-g&>({+L*mtt zhJodtSf{$T6jEf=Qcs(teBJu zYVB4^+XgaGK50gGgWTBL0jVgc8djDYlND%lC z#LXl=0a5IGLX9U;Ai-X3oCuYCoe`z&ycjY^ylC~x2yjkG#0qb9f+Rx|>3z^1CKTd# zK5aZYJ^qM=zc|eGZ@Y8Q9uhT-XElu-Nu8u4*%A60rCWESlWM1}=&6h{cR#JPTRB41 zd&8j6FYKfV*E=t|O3@7SXaaW0ymnVDtjL^{0Ii-whwmZ!LAQB4n6rnU83YmEA!aiO z)m1@q$dK#^)5Um$vdO@`g<|)D*;oTXp5|y#dMs^eCSMI*_vH zmZ;%iCeO1n{jb*JKXns#uvVegkGMLXuKrK`99|EK)e%k~R;z{KJ4c8`9LDfa5jM2Z zM;3%wmfmc?n3tr^sJ?|(M6HJuYCj{%x{g||t3r6R(zZ0Akyuph>V}9-aTW=h~10oeVn@#6`_~;5tm7Xyg z3rLgiH9PfO`p?f>;YFt(+G71+Pp>^K{Cah16x*5qaAyD-)#*H3eRzJq4h&B zS{yXA#%dH`OrzN630o1M&Q8ev$#s#i6(hjtAS`ZjT%uVw@tk}Vt2Gts1oTreiq#&? zx=Pw$0WBK~juOFvj0VnlQJ;AHrIG)HMnj%rjH1MHvdX!~CW zN+5PtUaKG6a1u{2znHBuYbuvbsj15}iV_O-@Hp5RzfyJ?SwXojnQ+4h8JVCcRBB%) zQ&kc$RKTOt;xJ=55O>FH%_A&h&#eyAw&%j0x+(Ee_*i7>M7-T}awFP)b!HTxp4n_v zH49NI`l#s~(+fLbcnn7gj#Fv!3(gDi`+Zb}9DZWR>ElXG+5A{7pGKmAp}DVuhMp5@ zFC(CPkYqHFklV;qAFO!bV71$W&#=NNraH497L%xGTWnLBQ61@cvPi@6qJ6ZAhK(BW zV{C$$b5plUvG!?$H=L= z?VE8g7t=Qz`Wnk!K7HL*iKLf=dwXj0Y=KY~GsJ8?s=l5j65uz%L-%DrY7qz4cJv*? z%D4M>FW;F_`v_tr=5R5uzRt)GVlEXjlGd5`|X|%&^EQjzmqH~vck{*i_MF{(}|({*5DL4mR03or@ja`U8+>mZ@Z~ za-72R-=Yy`Al`8>E76pMv$Ga;;t70SPTEwMud=7$CU z3yf>^_ZYY7|0>1>BD*XE+HYJOc(Qa*r8^1L8LLXvpIqjQXlhp{-9mUI;j_00p1gWp z{(6!rEjl77S8#@b>cmQ>lfTu#4**f}bbf+L)5`^`#+IrEK#`q>lA!8^F3|b{^L?P0 zk8)$oRrufzAg12IfN?!EegFhA9x1K$dk>%ljWl$yc`^iS+`eI2F~=E$2@L&nds#~63} z=GuXh&|2cvQ_Ea~n!vg^!x!=I<)ZbS{Ded@PVt0=g(a=@`r6+avQdB>DDe-X4|*gX z_`E%TxM4gzNcLv1c!T8~?}2vN!#yvteWe5`n=R*Rm_Ff9{@P06u$s^hh9Awzq~D@$ zGy}VF`-|ts6M&@NLaj*sGfYoTFZLKaf#BuqWSv5W5oK+-BE!7O#?QK=ij&$HmD|I)|ST zNcL=~#kq`r)B5A)H;RgoM_`ML*gchFrzD2P=~+^j)xl+0 zO+8Bx*-?jh-L(qeHQyg&MQl{4u4sOBzU?Rzijj}=Q_S}`JiqAJD_T4nt~)Gf zyA;}&QJswXzLfuL{qT&$L;NLfLleTQ;>iebMq7Bmf8!(+A^24tApP}iU_lwPu4pXO~lsYo^=Te#U`@^-DU$_iRha@vQ=y&SP_B>b= zbGX^UeR9@S`kjt8SwuG#n*XxgWD#(QwgqZ2416p!S zL$%pQZloa0)u9&q+YbvRSc45gwcE7+9pSe562j9TmFN zcB@2KF)*G&Pn9VU{W_P22tarM@s1ORSDfj>Ahsg-4TU(PVwTx7E8xOf1Lt9wlk3}Y zzhO)meiQ?AM~mFBZ%xvEhP!zSNY7Sok}gNz9jypO15X`YxtJd&jf6K329FMzi`5yy zYBv6@@`d7oIZ*Jg6sxFT%KSpk2=4Ih3M3Or-i%`TYqk14OeP|5g*ecd;NcE|nT_1O zGc|vkB(^Mevzc~C{raD8X^{itBIJF)!yeb{U<1)~YZj~?$x^^iA}G*`DM(&i;qbC6 zFp@K}49G9oP^FxsjX4hDC~xZNX{06dY0@%?3Qg{kxu@!JB|fhFRO~Z}!1Gkadr)D* z|6&*U`wD&s3)paf23_MA7#}oFW`Um0u)y3=F02fvANu|~2eeMckNqlmi(TVT!{b`4 znD2O|$Sm|j0?|c7W+hzjC*tv+$_kZOGQ~@hCfK{7) znx+;we{A8|asH(3INL?QUbna*R!&57vhM?LgM*WRe7F5YzRw8&?%lA(gy^>=R|ROA&RijpQn7@aZ&!XCK zu`_&hS9smT5x`iksoeQL(y^ZA6IsF}+)_IOoKx%*FPB z_8pri;wrw*$PB7aL&S!bXLv9al--h-kG`wIt}My-X$YS}YgBNuN*sC&vVUZMxKQ=W zs7G1QxSRH8_qmHaPq2fy8dMu%Tzb|Doa7TGbA*_oRuV7v0ORUH2ln(>Z$V$KzKC_d zpSy<}cwVi0Lrc+)U+`f?UmZo#(BjTMcl!$5-1f`7v3UWT|Ak&a|N22E0}o09rrD>j zFfl`gUZw))9d6%`x3y3-frq%%P+411(b(h36NhmZjaJr(gB@bIj;(m)$iv}-C>|Dk z;O5%x{(Y5Y;fB@4jJc8b)TjGgPaXU~qxMiQ)-2oIDzT~f14k`3elEv+4<@6>WVL6k zGR=e*-p5xw$}?(Gm2}8lJAX#I^B~k~-EF;z^lI@o5YA9f$~P1Tn*-Gf@esRjz!1jL zXS^m`?PFXbj=mhT98$(;%#~kb4b8YZBcdcJfXrxl+T530;%aU%|6@4W*|}Gy(vfhh z)!$_I{3jm=eMz_I+~_<>hf7f~(e>2)uYs0FoCYd^&0hkM4Z&(j<@xV-`2WPWWVoAM079E3IG-d{YS@4lzk05C z=`Jc}Hl!(>rlIn~V6S*Kf2>jIWNToUw_h;`^|DvHAWf5O3`FnG#rG(BiK&#+$Ij;* z-%?(S@yD3PBneRK^%uwD96sE=R3K_PZX27LcVwWgLl0R>?!QcMH&q|RuUt9Rt>cSW zl^7jJ(_%h1nA0qxJDC>fR`#~-wLHm~mzDkIPpHGKB!TnZlKoH}9DhFAKX8Ya5h6`| zh{UGQARo{Nkr}@FQU9v_%yh8XwrBM1MVU19a50_iVx&UFFGBFo9dy zib99VVoIKpCE=p-1vX71683SBr}2JHck-Rsru+q)OV28i&+I=0hFbb|rr!FIrl>G; zGA`QyWom(swIB27PD@;Y}3W3UN5PA4K7w{0?~yF_hdTqd!xtbUium!Z ze&PJ4-@k~GnE_D#{a>SGwErVfGFTs|^)g~M3yeGwW{ZJfJ*gcU&+9BHS~NTr7EFtN zxArK*ekQ1&h=_L-&L**>TBJ-R`(we9xT(0Q$cv3=EN`Mu=6uaQar(ui15*1&w{UNL$B}yUON-#S{j~u06oRv$6x%}r);c>i z*wI8QF-9GnkMl88GfgPTw<5&q5_A83n!812H+;&)g%S5IIF7Qmf1-EiPcGUmj8u!X z-kQowuNG80=Gm2XF`a_Lj)_;FqsPNuciNbJ|C&)>;62gXeXC6Khi3`#O&4H5l&2S> zyxW)!r*HO@Vf#$+^4+|Sqlgi6<56||L2ohSBXt$yeus}grZu}925NnEDN4-<+re5r z2mI|-9^1r8hv7NT%)!Y{+(8ZPdfTDC?{wY zon@Zj**=zDpNV>SyTZmr&$;>II|MH2lQ?G&k$DAbz1pJZ4WnKH69lSJuPJ9{EiSE0 z)W0Kq`W`gQl0Lc_k^(Np+ndc&iXU5v@g|Y{;7$K zq~8&5ar_~k#2h~Aw`j5e&sQ-pGp;~RkhA)zuR+G~FW%7wmWha5Z_U>Zi;kKxd5fGn)cF3p|8ThBZjLZTxW2|_7LVkSg_2-taO__jasStpcW{0vIx zHaw0XaO3lnh$&Z+4?=jS&x4sBle&kc9RQkAN2Hwu94q#|3Qlm;kTc7Nh}zokF_}`g z$@Z$k$Hg9}Ckn=pF*(-SAMRyeHZIAvReN?lPor*fy^5Y0! zm`BXKi-=T)vQypnva{tm3H5I&S3|;fIev@F5rd{M8*9@;=>+*czBT6-4=NbmU1q8= zT615~R_o(A2bOm|)Iw1odkuy}{~o)bw@=N7BkIJq@-a_x)h0op#Ms9h;Q!?0rR;&v-P+HOnj8tc zp<(CQwAeQ-hEA)Q7%1`_oG8JJ(4^@u?C01$6}VL+;0nAyf(&~EJScFFSW2`_qF8;z zwULco)h!I*q$*8Oo11aXPG@!I*-30mF*t1rFw#_oWztq)j=XDY^3xT|i)ox2Iy$=%$I&O@7C@ZcTKr%PHG}063>^DjsF1_k$lVz$R%l*P5+4D_W_8BL&lf~oEk4C-ZqKvwmF8#4A zbF*k){GLBG7{@m~fW)8sOVZ>i;@mepQ=ap&L3HJ{XTdaQAV->EK4-&x?b{lC_g^qFH=W)hWBXvdEa~2PBNf&@3L@m!vKP;7)4b_aHC)r_)4^K0}2DYb6O{}M}x7tDtxdzc9ODy{3{Y|Ss{MWgRD z#qLp^*ps=$hmKKQ&3&s@lA{z>;#)rd_JNMM4D!(B2~%~qYhhkT;?M6e_}{^-kQ|8` zieSAoX(Q1?pxr;YesD9oeuu>%s!6QJi2HycK5r-nEs4Gy`=JR`iBgy{PRAP<+bADv z9siRH)A5t*lLf@qJ3Y2MsPi}ao4-Ip?mvNGYU>#A%UOAhF$ZuM?ugd{vgs#=pwhnz zT4XOrbZczjn<^}j&Y|BhS{AG$aK?!!&s(a2aKYVCZk9^?EJEM~yj&OMWM#+E+%i#|6~McF@T5nsyek7|(#QR)l;l@F4-*6El)dZaIr;>{6lI~96*By!ufzk75)niTBFWz3T_eDd%YpDF@S0rIS_pYJR@gP02&01`R|}(9vy%M zXztC;f+z>R`84@O3L&c^) znJk|m+~4@hIq@|>Xl8U%lJ}I4F7XdS2(aMMq>11TpFP|>+X+rFKW38H6%jY$vxlL6 zm-u!i<^w-w2dBO~S3ls*RN)sl5082O8ixEJuQ#EAdW9=+k>0eu2uU+ffV128Czp>B zVWNn$XOk(9ZkS$1g*492M3ixQTa=#6uYWc*-0P!4hISwMa@8Y!X_be#Mbp96z0$spU&n_y9HpJPxgLa~? zn@HYlGtgAR9q=aX%W9u-!AE1!87XD0G|^UQZ;Ny-{*f|AUEpZp3M?tYQRbj~Y%OcC$c;>Bh1P!V(M;S6dFMaA5bKE5rYYdHzSL?BD&+ zA3A6#4V*8GW|IUl=8Td#zr8FSM(AuWu!-+z(Wcck7XpKt7V$xqx~=WR+R9~7&CHwG zKJo8iSwa3{O;sj){nBlXxj4t6**x&o1VKqy6$bt?JB=_45Y7+OQAX9~>Rrn+rb9?k zTZ>0JZVy!L>_gha;-VH%-x`$LNo%~|Zr+tOuSqqRry%O08Q9UQ6Q0+-+} z$A=zLaqrV=`UTGIGr6F9=XH^hpA?7im=W}_?fcchk}h$Ssq#_H(L-6QHm;v(hpdVk zO+wy`Cgfuh5s!REz7$1RMjB*$Lt^I4hFS{*^KO`i{CM6n0a!&NS7=O`kemHhG;D35 zH!}V1ZEJ=n8G5i%?n#rOUo7m=RwwaaHYiAsF1T^-Xf3@$GvQ^D!ixusg?5oGjT)4F zSixdb&ledw1Kc+Ovm~MMq3^!hS5( zy}%(A6TjJfds(Dfp$??5Oa!MN^*@YBQpd#PR_dt<1W(wwv{Ul35e?P_JxW=U6rY;&;(p$5R&-7ZkFwII)E|*8gT*y*KX?f26tU zyJ4G$i-VX?vBRG$w7 z50WM$X1m#tI&n?+TW-;m&y&S1%s2OVYg}OYt*7QcO(C;>V^@44X_oPkF%cz z7FXGsP3#y|oRT5g0>vV(C74WB_LAn}Kk{tu`U}nSPg2gmTO>mN*{vmJ8T|q~RsRSr ztQ9B7(P8;B?iaI8vT-c}*}DDhne^zw$|8zESi4x+q=`3~{hN*oezq>>bg}o(OqHxT-+-8j@d0U z^(8YK=hyM<8;$USm;#h&T!rFceT~$GJu`vNE4X?~o8hi|96?C|QjvHgdnrE!r*7N3;u76}te#`S1vc=$Bw-zF~!cgzR@ZjNadHodFwCD*=s!w z7nmmWK{jAmHjpW`Nkh+8WOP_4*)rCG!tvgYsP;nzC} zX#V+;(Ry;@8@Xs?Q;c$@=BcgfC!)Q#X8g0=*D)mchk|=0R*LZomLXzx?xHaz*7>PR zJ@34WuoBl^o&|U<`(-f=S zr7?K%VS3~%X18{4r4#5ky592<@5`BRkZZN>N z!lRKh2%3G#9CRRr$9!*JUk4NRjWg972K6l2|(yiy?%68rUHZS|q0%!HG?b@3^uX|e}uzt@vP!{RiyP1nIF!x`pT=shAMwZt-el| z12C0r<(k@Eŏ&d6LQU9zB{jP7r*$r?s3N&ktg;vN`6nCig+)pmEQm7&v zs~`Rp3|aisPOxJuhItKY^#HbLs8?toN7Sk~2;$NF$O{ber@GAdcL>C(xt_4s zQ!yuR5__P73_v~mAM+wN)57u58`;apGi+j{(jIefkr^4Fe;&s`Q$%BLawLJA1QZs} z0Y`yaHKCMnpDUaRsU$ZU-2J|$W>kEcrOvQ;!SbLPzAcY>tUmWFA(|sKP(F`n$U+Vw(6HFdZ?8kc z_9&h(hef?=gk>?s)c4oy@Pb;^5wqWLiJx6DKtTJTn5D|l+tt+@v9l@k!mJw?BALKYO=^}67=F_Gu01|?nR7hO^0hg?h&WEj z#o6&wS8x!d@s0;H?uB-+AAm<#`zP1u?iOyqb?~*Lwx2T!33&`9jwz;y(~Jb1M$?(C z!eAf+cJ}=BG!>TMe8+}!F8<3q$pCP6=gps7S>Qo>z1&J-HjT}SBI3JR20H@a z+`&aj@Vh%f`Sn;}^(YvwgAx1L(zNVQLs5ti++_SGI9pCAh-TGwAG`=6=$bSF+Wa4` zKKgf8fB&~vhb>+Pmj}wj6fSlYVWl5&-H&b4!i#V1Cm0g{wKsvl=P{1JamHy68aIdc z5*ZiEcLPy}{f#q(;@7{B`JfY+iRsN{i+2$CgR(X9^*hi|1tY5sSyeUL+?R9>cl*^9 z0U|EKF&}oAa6KvNZqA#wRYPfvD7Bs?b7ZT%nvv96cV~M8{(Md9%NM%n`}qh@9sH7V zReqpy3bzfu>Gj*|u*smtim2G~oXyPwP@!8SeehQc!T5%Bfz?_9I_Wp%JF{-WX=`VY2J5SeQX zxE4zkZ3>pk9md@*Fr$)IafR^~&``pR0pB98oZqO1F@?Cp0ulC{lOZ*0`x#vwhGJDw z+b&^Une2}F28<%(%4mj`;c-IdP`fTWj^Uxc3hz87`3ndD^z&ekGpA>wh@~&i*?Uy^ zCzlu~t5G6weA8X4#2_``s=*ybA)KOfzF63NH~)k&r48AO2Es%`W}Ro8LXiC#Z7sad zjrQXyR)Y5+yW4+X#QdvQSZk>D5;Xf7suE=#3L`%PA%yTlFky@n?ZmUdKx!HN4%Ouc zpc)0S$DO6k{dVX2`ze_UJZR$OU$Hq?7Q$@zHz|0hg1@n$ggO+GsN^T$dJo!xs4)Th zR{2eqXiArEaJ$t+b;lTBW7k%^3zvwO8QxUBR==>Kkf3tgH$y)R;O7V({fYY4Di6z( zG0u$0T^|nAvL_h=XKFvHE>PJ;)3-?6bJaM{*v`P&Q)Au=-J0zbcLLKuN3Mrg$+6k) z&*9XP&MFHN;)lS7!-hV#zNkYr{*FTcaxQ*O!^{5UGNcSmn|3UPyO5K1bNZmyP+L#k zkQ=qAEoq0h8d0^L!f!*CCXB8PYB^MDWM7>x7-5;?T`1>QZnQQuJ$dTjMOWBM^deMU zBwlMeeJ?|bIL|Qi)~ziyRTxQj)J=0)LRP5QO0T^q1f(AFQDbZkcV4#}J6-aqLPdH{ zg;b+4V&AcvM(5W~@0UJfoXGG*sP31y%NV70Y=s2wdKE*C((CUY;zo<^?6qtonGZE7 z6aC3jZg5un&Vl)|M4$W(pNFV#bKnG&DgVOkFf^(T zl47qd1#@_x0qyMz&Id$wVo3q(HXDR>`!a8F29csCm&bA>X?uN;xax(-rr*tam8-lY zk6Tx=yoRr4^_prBVg`uh=Ch&+ljMRlcDZf}*4@6sY(&n!)^w!I#?nsFHnqqmAgntu zCAC0qq;7oUb-bJK9^;eoc6o;j4tJ;|o4uDEhc}+v?j5>4t!F$`uATt1sv4}xR_&X# z98z!A9OXNzXF0TzYmU|v=TmYZ*Hweqmqcxq3SUa%p5uSS%GlaDet5b17rGr1z`YXP z7G+7QfJLbqQ8dBf4w*|r)p#SS5(~ChrhkV`fs^H5t%ZO3A>W-~51LKLn5d zP_+TJS^DV)*yLJHQBCe)p!|?B3XG@(igAsYUJ58?}{l*#T)ew+1yk)@i`UOK+;GasU&!AMpJmYlI}r! z5wkdG55`DqC53ED^;_LV-AC23 zpqdb6xsb;>2Qj+`40%2o6*_%Q4nY!UPK;ojgV=W#E;Za9xkecARo}Xr>2*{2_SYL% zBYLc=p2e?>$x5+LE48_wVi|aS(W$u5cQdc_wQ%tek*R2Q(6!e%E7@DS@1`$lrmm@F z?Zee&>-OR3kylUJy3$O>+ww~TdwVENsdqJmGSJh`+ew_g?$ivj)w`rP7(Z5!b&^{5 z?1dG1Eu!T}3vDLX&49Iz=D>&7YMsL3Cg;L~0_!}7Hp;}Q$IL^L=y{amk&|Sqy<1Z6 zt;18qqpk&ZPHK1w$0nC2-$ zGBDLqYFt?r+Ti$|`V%;m_Ov0W7X)e5?XCxh@K{Bb4Fjy_Jze)$Zh|U^|wl$bs=4B|4*A_u{P*$CsNP zIvRbMpe8WY!+xWWUNEdUn`88&WGJp3*%78|cXTUn=MU@AeujpYW1ya4^umc}MXqSE zbX@-8$J)N!Qk0|jsOIA4H#pogabiLUka(Bp&yP))trTewJzTQ*-gYo`Ual(M?lb8n zQs1GY+d4idz(*n2cO0wC2Bpn|`Xv#pBqJ9jDH|r7dY{YM%ise8Z^T2G62B7ER(jRT zj)9#ZBodg$7w#lKx>fM|AJ-Xh0kYyJi6M>R3ZzD_zi?K#8!Qw(I#N1@qONT3mr9Pr zoHzuZZgW#;>&d(8IBLInoM&4JmLj~l&=0G`aq+N#OO$NUss~MpW1xXgkkqHHwYeNk z`g0;G!JVDt8D3{u0i#2)`k{y8Z+=L(Df2Z^CE%`;sJUI2ck@vS(#cBKg>}E%;%ZV= z8w~FrHd&Mz?HafOWTxLG+FAxX?4l$M;KJ9o(fP~X^K`#ZtkKw4xxPrQP@YxZpIkBm zbJ6T%ff#5za@p?70^++|FSO#6_-pSdvJ_yiga6{Dj@Vcc0NcrIBI&z#LlHX-j4(or zajieT8;V2k4K8sy9dPW^kp_vNba(9z(`Y^{Z%uk1a<({Qa0i0V&Z zNoX#XF{D-rvriFJE4)x=f8nx-V^S=JKX4AeX9kzpvi^l!?LMSb44u^j%e_1z@yxe zHv7iJOj=2MNRHlGx;Gf^C!Wxx+Yy59aOElu;=u!txVQB|@^>CO6sAxxEJodbFl;DOHC-GJ>J7g&l@q~r+2o4?9VCVvjP(rp4r1;Ii34%=IH9o(XczPz zp8At(473@yGaez-`v7`Fg9=gtu`zObwP8D@doS19qQ!|Qe7JYPqPlmeT$Wm( zB%fdF+Xz42c`C&^HX!9r{M9Vq=47+g`Uz6#Qj0P>r^O&dfEWkjGtep%ulf9cyf;``Dyc)L#MyEFId$v{JgzH!F_E zPVDr)jujTVmh*ch#vaah|0V742P0*TJ#RYg&=!{-q)xwlo|BO>pyIdrX4yEw`HvA@ z^#%Jpi{dj{C(OJSr!>Lp=EZAn(n?q3-{_f1NEtlI+BEmR)GElx@xygcBkA zR!IyYA%ihfNwSxwtWmub`NGX4S1g7o+klhpRvDfsOBWyP_1ys)s8;JF839=Xi)4&oS#Cof&a`OM z8-5&@46V*X+{hA6|KyD6ee zFxbm0`9i3?_7r*&H5WVkGXP4}hJzG8FpW{$X1WIy5#OwYez83nC?cWuQo&Y8L(^aF zfJ6D5DX3wAZ@8jbz~;C|2@iGxS@GXVJU{+#+?8bPp<0H|6aLCm&oir?=AJ1wfpX}m zYTO@^(`W*IF0|qBz;&wJCdXNJ?~TSNHkK*s&Jwt)}4o@BqxcDSzzLV{qUA~g7b6z#l5n3ahF(lBg*LRDpN6j(3y7W@Iv&wK11;2 z(6=Q+iHpp1XNiOk5}wendFJDu-fCd|^iy>&lZHq5{<oE!w402O>_2Q! zyTRhjSY_rJ9Xxsl6=d1@@Sj6`)PK{hn`bGN#WfA`i+b`du(>1Zyd$U22is%Bd2-=-ST0*F_#VTd5oYS8cY_S5Xw zC}-La{1jrG0NgcO5^7@BsS$)k3E&3S+fZhKIzWpqQq{pj;!0IDWqq6>Sv$)X5tEa! zWPu;`Q9edlft=c*+jlEA8kMx$O@6X_I7CxuUFX$U%r0a@S#AH%lO5DKJlVmNc6t00 zs@$s|OFzr%v?7>x;U{OEd}w+biYk*XB_>-2d^qYK*`CY8X+~-C@!}4QS+1860 z-*bfX+MQczd(lqSu{S*=#ZW&H+>R+ACl8514xNHk-M>FSpkL$~I z4fIx~(Wn^4ISu3vOmV#troY~^uTJjl-&nrpYzN5EsJW@xNo5u%jD+T5jerQl3VM=; zpHu#H1g?rd!0OY`Fd$;1kP2V(FHBtrNWInSy7;+>a8=pHw>~(Y6O${>LX7KfKIizp z!5OjE&1R#*BZ20Tbu%2=yI&T~9#y93+0 z6`o$JtzT*w8<2#jj=GnSqF+IN`CB(6nvA_llcB3Hq{wb9z}V@ODJ>Ol(GqNK zV?O|==nC{#m3f@3-tUTJ7!;K7s+E#ZCp|=KUj;RS50x^c(<+fBBU2A>Wl82n({FnG^14yDOIllW~2 z?}D8ey0lQ+$}_QR&^R)8{b@jGsLJed$KJ=q;pz#db7Il8Q|j2s8Qr6`GN`4gqgnF# zyh}XJ>!)UNVYT?G4@-7(sDN{JQMl$qxP@J0Je2w6@stqr;2qL;e3Ooi=H5(n@Gmy? zcOcGN^Cc?yoI;K|S8cw;UQ>-->OdWto^7Ye`QFX(6eLXVSIjz-j`X;==((62?+i|! zm=&krDYNUtl%hWWK9hM%33>8vShcvIm{H_r*@vMc#1ycHvXp zczofx*X%!s1JNymIpM^Il@8?F%WTY+Jf)Tn*0^~;f ze2r2}oQC*`{OLX^8D|9qiVu!|vM0RySpq#I0?B_^PLzhZHV1T2xkDu;+st-d(bk38;y-Lg>;tX=;;Q zc-&tNKjLJIm>o}~sQN3VGpRnwzC2+!SQiq8{`hu0)$@J26$ouhPVTu#RXCe=Tz+@= za)^4V*w0YIQvR`JlGjUftEP-ltm--jTN!q@Fij)o@4g=l5FT2)IU` zzoez!rdY4%(66qD;+pz$sXCWY+~8g~+u3GBj!Vv?IU9V8Qtwp6ackbJ87ZCq!+_Li zpFF+f^YG)fa4jge>xsOPr1GN1v_R(K89%I<`V3#8tm@aU)DOy6GKwC+yCjD#gHED) zP2_n!y9)}HmS+nov7x^&LO_cH=aoF$hNnsUtf|g%al!13D9sgEgxR?2oMzJM0 zM?a&==8vYYrwm*vzinu`OKGNQbCkcB5HuaQD55XYIb{)@ZkQ*Sd5T+B9a|MsI8-QG zHuddt&FI?7FSgJEro^4hQ}Oa2QX8zyu~!u9(vnE&n;!k3`Y*F<`k9k?Z12!I|JV?{oxafCYue!cw*ooMnjA%x$$E9WT{5?)3xM|j@j_i z)A+yiIP*f0_nMyXeuDBSdAU^%mn>CKQe(Z8{53P#+h#7YZSl|~Q4BHX_Y4inIpP;@$o;+GuIH@+es&OydS&*(*-a>(G1c)#`1DWTG_{m8YsJcP-n~;LH*#Dxp9s(C~qC5@szwRjWqE`G!fG9VI-ILlv z@9%)4!kogs_->XF>x-U`V5Q}S);gO9>08e^A6^BRhDOjuJOH;Nh~ar@ZBD~lS3XQm zO<#3=xF1&E<*#k#g?7aT zJ%;?G1tpu#t)w)o81u%=gr+_@^UUDtAH&v}2B5q(;b$=6Sd-v58LSvkL1)sBv07(O zK_9F`=9fA$L)x0z)8}tPdGg5vpUxmGh?zd%4`ozhyHZ@RDny7NJQ!A~jao)%KIH!L zx-S4TP_oRrH#ysr$TI3UWs$t3dNJd$=r>GC!B&kWRKm3Zt_k7{Ug(D_91gDj1 zH&MSav&fx2E6b=)T|%!iHJt6JT5omNf0r%nGu-mtiV65`w?bMFSDM%xrayo=*IGZ< zf@Uv+jVIHgZ3@uklto-fAcpoB@@n5Y(my2DwCxRHKzo9`5%$P`T21pn(PU@_CRcD- z>eGoc<*HOIb&K!{OOJswpIT!dN?pE&aSO}jjLP(O6{dR>X8~P9s_KHArsKwd*rhlx z!xM3Loy@~w>cS!J{Ytm0d=hw4v>JPJtV30F>?i|kI}}agK-5e?!%8L-mgY8Q2(d6E@kf0C`jkt=t7%_33l>z(a8e<)^-&^yj7kZ zhJA&3Rca_V+}dJNjgb=5EmZYnm<)n<#lSmg;03~G-ZWzW6NTq#qciYI_gwYTPFv05 z9p7>0_T+N&t6EnJv-n!JhSkM&rDP#~^O1LqTs-S#-t;y`O-#si;$Hdo1(mv_rj#?u z8Y6otL*ENUy52cX5LWXRRaQoGl!8^|%`?*$^RHa93Mq5{*q~FiKWT64hpc5y&0L@$ zDf|x z)adA<*H4@Mb7PpB3nG@HI2m&c^5LHg<`x+<8DGtl(!G0{5}wp5ri?*F6}J5rO$oqX z5R86oMx{lx$G^&wq6~DHMh1_!IzPo9U|cN&qHeij_-2P_#0&plY~|c)1J}zV+QtbP z1St@F6Z@d;+?e*1dy3FCLDG<(psCcEmgqRvs9V&#v=YoJEm*Znnqut(yPL+e{VgXW$c%&O*{v@@-?NO7v4$D$ zL-~4~gBTP3xduI6v#8}tqx>nv$dY_w#zINHoQY{;?j$PuqUW{$+mic6RLd-cPqC2; z9Qb~K86jP7BSZU2zsBIE`MMo=sQo=nudo#1oFe6vtLsCqv`^6%>UMZ-3&1SekjllF zti#Myox{s%ap&Tq$)2CagYoHGrI?W{f%1`^_)A*P4v*~GM|N?Vup6@8e};ux|K0-| zn5No3IBBVze=bXcuP~IFm^vM*?DA8~zo|@9a2~5BT^XN7jnyxnw#QB(!%;1XI>PnM zf5VlXrcL;Iol$N_$3l*1U7)5|RN7+?yF>U8_CXJ^yT?-6NPUfo@)-qnuQHApvRq2d zMoX=VXG#r5kX4C^jDewSfuq;B+&eEgy)g)Svc3zM!aVqAxyXOdXZPS1fN=sb(qRvp zK6s38n5oLm=ina{p`B1ML~sBAh4jxJ`QEhU-54_O8FKS`-53s6o1#hQiI8BoZxFZW z5*@`Su^h*eiA_3-;;8oe^N)DiA+*zOjFZaqkWNVSILL|YO=52L$zj1y!@HBbO-#U2KNn-RfVv^eME zSLa^yto8hhMEd`EB8O)_n#BdOFZP3A3Z^R>=R=dmGx+SboP}B6kn%7sVBJdE4414O zv6$0bIdQX=b(E~#^Lfme5+?>xn{Q{y)F-Q;!-C<-7=Rgomo1Wc^!PYlb={M@dp-#m zSpe?W@>M5upR#4y4jDN^lM62_k}$&gXEkuJ!F+k#gTmqGjTprQ#-z`UjL263;V}^L$f$K=9>KG z_16#2#~S)@_mO$w!?x>9TZlvL*+S8`@xfVV&&Og+!U(s4qhg_@ZK2gdvPOu7rCU%NUjB2YiGzun zlFD|n29Lq*wUuRfTeXf{XqnA~(LM;bpPvWr>gac0MBgW)Ihs5cn~ zfc#PceFYp)_$l0zp*3?1^{Vl^bkoSj0Iw$tn>tFgtXaCdV|xNk^G|Y009UCWrF$gP zwjX2AgE36-%UF_;>wg@WgpqUXF|L{PUSBO8D35XB&*9Gz&dm`nmQ3lEx}RVtnL35K zCGc0igf{`CcA znd0ZRP+z_zFfJiRGJ5u(2XJqDV))@LyjdInwUQ?yjmeCskFG6>HAbf$Kc;SV9=-C72Q>iRVXDuOor_@awkg zaH9gO@Y@eHHBrU8W16hB3_8S>PB2>d`cl$N7da;>kH;79d6r^w?VNoR^sAD_HghBxel@JT z?kkjjvV=O^CDk;-E!EWSbF3_OIpD`X7IWk!#b2ol$4cQ#H+Up;sGzDtkqQ%j)4UFgv3ZE8NZ7##wz2f3XSJI^Jp> z!uIx1RFGxTjPL@a|>~eScnDY_KM`&-R#Mn`E3Z&bB}cYH z?_yF-#)@XGdSF1Pij|hnCzcw;v<)Rh^Xtmu7*q1+5rmssRgZ0p-y6K~)7C@KJm}4p zDEMMc-SeyUV#S&t-Kv^!37$qDrEAh`(udlK1uo}z%0H(07Du#OB+FMXgczQ*@>m%% zEpZjZh)BtP?AFZ~DH_Q(7brAn#axlQW=K$7^&i(ugB9oHX2v(NBvCCUNL7R_**X#d znMc2bRy9|Jkm5O4kSCic-_hJV$~4o5B>-&Tb&&*Ya>pPY39x=5Ut#W{@NW3l5^{Slzwhj~ zHaGYe)uY5Zck3MS-;e34TO>PC9jd}$>K~HVE0gZknI<-$N8B3KxrET8$h4QbwXsf+ z%W_CCl_hlB%x_=8KfFWZe-5cI1!u&?^iy_kCG^WFZvQA#LiF}V$~32~tQc_hk8Un} z6L$Tmaalf~Lip7!3&R>EY)$)Zze0`J(V`py=?p?_j)1Uihk|loKL9gGSFiT)FZblc z5)HMI=6ePBbHsWGy6VLZyWf^~eKZ8$SLMjgGD% zC)gLQ8Df4Q3WJX|AEBA7vN%KN-gX{3TFgeg$HyJhSM`TMa&u6+m=qtzIz&sTTonrL z_{BEa)(fLVkosy0)vD%=P43ipy+kM%65uB#zbb7o0`pg%LdIPGu3UZK4@d-_{20o} zK1&5X^9ml~01N9Jo^*iWhWan+mf2U!8h)_}f?oX|QTqzYw*WZ=ft2Awdi}Oc)nbXg zlcPrO_~+8^qlrEdxsgE@%xs^FN>XmvfkIR*E%H?sxx4c1aP+i%a*ojbjdcZ=qWs!6 z$+1`aazOG!o>r~uoIjPe+RLsO3`1x^$x=-lNIm(g$`1;f4To>svVHfKXnFg7)rHe+ zM-1lNqL_#FhIiUnoYkvyfy@Myo4d_X)-mLO9c1!M%=5yc)q7oSsV}`6Ri`Q#(C1rL z539I3Qd#y@>Pu6HnsKahH*U{&ZURB}zu3_Jb;WwK)22eQrOs_$ib0`5Cy=Xj%|rM_ zF?O-ZJz(mWPE}vV+fStuu?#ybP12`xlOMmYR6*ID*NdMkJ+ri-+8>_d@B`vr>oC__ z58;9DPd}BilzDu_>_;bH^U=bR2c34b@)Zjs3n2@|R;nfY_IBS40w#if9|Y1p;*_N^ zCZ@eJ$r;|!x;k*v$`Pur+=D(!^9Jw;Ck1lX3l+?C5ggmDq5N`tl;9k|sw=dIJ@G6* zVXrP^7W@AB^Z}70&6~V-!Vk3(y2f&YIp)=~?&dG@m0Qp4?LP{Tx5$S?<<1}7?>AJS zxGkd@0!!^FCy_(VA~eSj>dDy%0owIB)M2}C#YgOoia#qg>Lwid?dJaRLgHtUKODMX ze0T19)37}5efMz{qXEV57B83F#(233fBd?KvC4!gQ!Y)Bu3*cC+uG(@iOc3jRi5b0 z?@1c^UhzHk^r}x#N7Mrd4F`l_o0AqK5cBDeG|PK7OVAW^z4xS>%Fi$FTjeRf-FX4( zPP~j8KMxb6SUs6ye%GF%bOxZ4w8@v1OyX0F{iQ;$+ipSqr)Cf|4LK(&HL`Ra@&RaL zs=fzhorT?#Ii?cwkzSFEv$LxPxrlgX8B*8!Z;~c{Ac+LK4Lr^NOSJWK4J@9sv|8m3 z=NLTM1VWRUeYx4|fs~;LHc`?$35au&X=BPFo&>j6=u3?Ty@Nr#wrzJQ3AT%G`A^8C zsp%j4GW`yD(uRTLIn4F*Ud8S6yN&{OZ^9DSKBOmxp7dJ#;wxauDNw9kXpw2^WK)ys zXm4c~y3~fk&Bm+&%a$IfXybLy$_^Fu%V2zj%K3k!xnIH#fMwv(<2lapOHzZKmVos{NJ)@-OC4tp`}O;RvW4c+dfM`QJ+(^ zkU}&!GX7es<%knh+RL+Dqk2tI3+SXcmWNHfRpW)=4Yp|m*T~ejGB2;}aDD#XSYI)m z-P8;WQx$-sE(KEQ+zFtXm#8!faAMVGaq3DD zj(_(}mNjZ_ev_uSSWM@j@fkCtQTN4NcTkhP)ThH<=k7I;31#`kjNiySk-1)K!|h)u z+m?h`^0evc?ah}OMmNUov@04Y>sy3U?2A%{0Y=_ZVKiSaM#`$xwlh=Xk%>jd;+~6e zhsT2SkP-ItyZ-Yr`B!9Hc^@a`u7OmoX7)x(3F{vIF8*7CDKj0R88nZUaNrSO4N5>? z8ei|qQo`4HS&&{s%!STjz#LQ@kc=Dxy@TieFu)JGO_8j9f)-#Y5~ijm;P%(a zxFj+Gj$2dC&94x);O@|w8DUGnsvZ~^w z`#Ysq{VY1?VZC+(mRdRz9%CPHv33<>wJjzd2}#9t5w$}YDh;&(DiF-YO|V3eUC({6)Znl1uPR+d%3NUJ*6teSM|)F2e=#$(ep(N znkX9S^=Fvy@rEUxeHBrad{h3l13B@LSMKcDCk>_slG~y2;@BI1!CC)LVB0{SHJR!{ z$j(^W6*q@LE&W4I^2~7HMZ>{7=&|E50~cWC=np&ye{ zhC5J)b;P*jBoo)ltDd3|PhFmUI>n~>AI93)iJOd5pcW{5li7q{yG{cUZ}B+Ml?^2_ zOE!V-UBo^G2%+C7LaclpIqVM>=P$MdXAlTtEdirJ`2-RM`g@j8Hr>E9ZCEPgd2?9Hb)*oxcScEPrA=Y4VN~$mBCnI70p#6-YQ0` zbWd$70thxseeF_5D;pI>tBZ<+JxBfXMKD%oFYLS(bBAN5)0!}) z)lhXUNU^QEgd(}7q0kOT&HO-%zcGqkdo35zpr~nqYplopv2Y95exlJ(JQ?75F0ds! zyS~bo{}{ZTtzR5W>M{S8^670;u($xcHKI}L-`Alzn*|LmHWeY^(b810e~Of}QlCZQ!e zNDC5pJh4Q#IQrxCKvpFC;wj~Ki|-_k2Z0>ZZxq@SmYpp5d)7tNP-{1~^P&3R{d#5Tj?i>+=oOl5)ox4GAxj%5YkqR2nUl ztOEhoOPWB%%ickNzG_s!*sVCJk8Kw_^U^|WDLT&zl%gLzgpiJ-iA*E&h_;#YaJ6fvR;ABN&ZSo^3Bylmv9D7Q4rj-Mnzs;Bkye$?+IGe@T}<)^2nzjbjm z;c5^6xhqJyA!4+Ft>{0G^PMnp4SPT0d9U0ijXxvv!;*PYj&S#~dv~*~PL%c7n3jWp z&8-e%@onpT!zTH}$_BHTVJ}6c(X!-SN1kDj#vP<0X$L8El4(1hVEPGC%d_^2O(LDt zqy!a&K4Vyt?75+xZybIg`O2$V?L+=`IQM`|y-hRL=LyVms;ESIfnBekS5CTTVYUz- zL;v{VUX4ZAr|%z|h5Gh>n1VX1UG_Do)q9$0X)fZc14i-jK$6}x`t zBzT0Jnl<7)!xjo5u(hJwhNVPfrJG&v4C)@!D;^m;>d)W?I*OQ*dtp?!ITIWmiX23h zA4k*W;n?|sFudC@wnzzXT6O`PsLVQT3jEJE`AWvqI{MM)>SLMI)O?2tV;2&K%0y1I zIcsDXyRBe9;Xm=Pxqlp$@u0d2krOS0zPnYT=kvUM?r!*_RvC>E(O6ZL?4u)T3CRK~ z(@?j|mP!2cs?woqCvCJIm{jh+t+&zGF77E^RTaM7kQnW?)sdIqOZlnWqAtW=hgXBT zL&69`Ef6tV`1L<%E#%30$I^j_hXLK-9;mVnb0Uu>k1nwC-3iACvAB*eJjHfb^IJj# zk3D`P(2RK?cDz5D8f4;PeW}fe*IKD_Ax;%zaI0&`-?Qh!3#oNW^`d+k^`o_(9}Nk% zy=|R$v%`xD+xV{YXYe!gBpk@bbxJ1p7dEBDn*7b}nP_h8tp!dth5?8X)KPsSKyAA- zx5=FG+?wTvE~3EDgpD(EQl~f)7&L*4%*g|_aTYXA<|?3Ig2MT)2?}u8Ek}0>EW<&@ zBDn*li`>O;_scF4(R;$Ztd=71Q~!7lu(}NDGdnd7T#xa@CDt2Se>-8-L>G>d?6ObyWVeQU%#l!C1@+vyLJ(zuO_sIbckl$qJnHJfdhg+}12G7;}X_bC!aCwe6dKAELH; zE%|o5Vq2(ncFZ{B%+^j7z|VWZz|NKvw*Sq9?Cy!=%s-lwsFiBteSN0Wrwm1!EF=+#D? z{nk6D|7xdCfyLoR=cheuG^4nYo;*|Nz|girjCwQq+M&D;y<>dx9pUjL$(sGtHs^`O znRVsGVJ*oSCEACypy3@JA@{LAATzbC*l;wD&Qajzq~6N&Vutr?BRhGRSo=~1b%X|K znSs>MrQZ-Yxqn#s+AODeW$0$<^IkpquB^;8Ym2&&*Vz2=V`Em(0^8<~f0wWPgCqv3 ziJB{%CEo(E?kxFFo^M^Gz;&~ES}Cs7MgOHT2jeXHSgrIJDiST+sP7f5B{265qi35v z96jL-kLU;-qi^`55C4!jJ6h_sskG*`R>lVH_o0f>tij-4Z9D!Iifpz?0Q^r1KnBLM zB*z+EiA~oXBlv7=XyM=O@q%_X-EXU3sfryGiePYHY;_MuL}(pwg?Z+jf!Spy2|8E7 zL)#3)g_k}OPYb=VV6&;=@AdJRQEIz@PgQ*>I$X<4I_z@fwlNeq zB<=rf8z_8f<8NexxFS9qsdE;dMzv9y<$#OMY}t2@m=+pgPK9)sa*Mu`a5n!I^}6P+ z6OWxrk@BvM%CMzJOTH~L%G}I4*`Po~(0cup@3&X14J8^JS}Thu;cCx`dT=l-tiS4M z&CYHzcsE%ba%3YGha&wF48^6z=YyeD&HKcx`Qa(ox%8xZQ~6NA=oqz2#Ka)fCBpLkcFLRfH3s;`RL&u%x=-GDxhKp`=6;bFk^OAXx7MwfQCtVV1Nsec6tdnMi5h_E?ml*eHSn@d4 zgXT>7qm(G_MnZn5*`JO6Jy~njb6Tiv%W?UGk=VDBw%cI8t)%Q{1<>>N%oGrsomqUb@|AZjJp9jcAZ->xwx29cJJDd!>9^sM{T9ib&>`^w@ zZWb7?uB#MToo@Bmq)#o)(8-a0nbim7M|nUc;twFs4|_>wjSa43{Z0|Y`6@=d)RN07 ziRKvDnbrfiSr-H8fR{r(J!8A(&8c7LQW+KY)4l9Z{4(!6J|{Cq8BQ=yl@e9?knhZE zK2G(2zi^0PxRtcP>1(x^=FJJeQ?5$cX^K5mma4L(P~<<)7>21gM@oH}8LQHn1C{c* zca+tvi{!D)zwjGC+J7|@(uw4(q7@RMY^yb;B-3MXYb%P*yWCaQndkl5`W9g}gVkao zHL14saf?HXqlYS@uPEK~e6knS=&^jacmR_4AV|NujpX{Q{oapmvr?x_juDKW`J*t5 zRdtSsuZN+Xj<>bn=u7P*7++QLpD+V59*eOlXXSJVI5^^EC7{jr>n8-kUJdebFN7q3?tR>T!0!OWr@ zM{cLtGwZs&np2)QjQN|=q-QYM?)8Uv*G1?z7$=!&T31O8kag~ZpPY2YOUpoBX6w-9 z>07oOW-+wxZx74#Rp~_`{kM>P1zny60P-f^=w?!CSKhsa7|_H-q)I(>Itk|9-XRQ= zO$>TU5+*KRN5lQD7RHH4<_K$ft8+ce)a~O`eUNFdoiX2Mtj7#^LbpnI6yJ-l9$3Ru zAXHmQY5WTY+&|XZgmOGZ6+9Z+K$|-7T5#?ZJ=nWY%1VV(uAXuDqy?t>5*os9=7UC(mYReM1!!7uK!|_PA@`DI8!26ZyO=bXNk3pM8F$% z1rhu3Ws7elpFQC&R!bpjIlvSO!sIH{=dW~t|D3QLgKp20sg3b}=kTd{*@E`=Z$ZdV z5qj0xIUe%hZ&k2=rZ_>sOVeV2hoNaM@!ufSkIX!;4Xa^0fS_0Cst46J*U)Br#*N0W zi-XOggl_z4-OzcaecKcBnsGVtwu@Z~@;PkCBVdUvRAmnWh!LJYgx69t#@BKayJl-l zJJAC052hz(L?E|ESYimt<`JYA&9HwT);raIkFrqPF&(T~n34XG@n>S}ih0l<(2|vZ zHI!phil<#ZNX3IS-=SKxpmU}8>cD3@$5{aBgsXkp%!jyv;y}P@W^;7sI5ck=f;X4u zM|u$M6GRf%1bF<>ooW5neYNmhgz<1==={LzO;T6`rPY!2XL&?cJ*VJPp;fD3F$fT`%|4( z;9c|=&*<=TGCGmj6iB~-Fy5-!1Zs@=x!4&rHMR+{Nwb={Obe|=QG$LuhT^beqjQ0D zh#V5Xj=uXHTsqy%SU^qV5%ii|0$=f95mvO&#re%3=;VwH?cH}bS|BsW=Mhciz;5b# z0QjFgn5BQ4RlYVmxk+-40_GCqvsFTCt2}M-zr`v85qc!bU5=rf2d1YLSsDoCXF%oy zB3*3rQTTOxisK9GWvZ8R`zrQ2k^9oxeryStXl7i$s~qw6;MO>c1B7hSmShO)lhYZX z4qEC^WkOn}-PDYs>oE9U4lQ+$Zcm~Gm(?#IuG5~u7Ru|ZP1)d_ z##WoAW4~L)wRP@epRMP~F)?C;>=clMA6jCf(R1u;!^CLE2cIV3^CYqOS56lVawY;{ zI#ph5gZ>7)NUGphgtlO&z;t3}P5sDz%PmnD7fSDulgSn$o=3ENp~D3_c|qMO$<<0> z!aURhqkGj*X$t3P?>&Xr>i0$2caHc5V)S=a2&1Y;_P*DcrgbOkz}bA* z7`$xfz_wciXpZcWlFkac4ypyZ{T885q<7y_*U-bvhg{jg9z*a~J-_=#pg(8cno`?2 zebRR!$@I52PaWGgt_-*6D%T9RxAo*yHHonOEP;G@y>O6$U&l_YM_H*mnEm}=dOtMI z;>CUzXYv(=+vrV?{Q8WcUFVE5IqXYfJJ6w&=KI_K`iepS>-Lh+*YUI~11wHk%B36S zG#~JYk<3VlIS7&i>7IQb;Pxa%eAvc8Pu5AA*?j4B7VkQQ$WS*k<_}hW)vPkz?dMtI z=wx~llnKo0J6m-pRZgyg~6+O{>n)a?_PePUX z9w|d0yxq__i?ALkxf5^s5tj!4&=}t`BN)!tCUI~mFK%`%`0#TN^^~P{joDYqKA)iG z|1a8Nzt~ionhBWsF4N5<>rnEK$u@pPd!LBXw+-E#RZZ)%#qot**y z?$|TDAIV`cmFND>g%h_dr>w*d=*kw#vl^ioM> z5|K(xUJAXoTpy~6Cw57i+}rqnuS`L;gr z7xX2)imBgigFefWX^d_cy}`aN>#f-6NO^8qN!g9YO1_@%dT%hd$)hD|$xMx0-%(y% z!FgC0o8w?T7`v)u!({2He2qlJa7OOVNK9 z6606(s$~CBIRJF9(!o(5HjMI0WX?Wq+h)FlKDfIIe*Vv}#s6PD z{Gbk~xr9fPaA+r_I)0tK&|a$l9a)`V6crov|8iAzj41l zN%ou|9!fdTs0WeiIlbqW_Fx+}?bFROt+UdTC!LfzK4Fj6Z7Z}Wj_ovXy6kXjp)5Mh zX{$MQRn**0^=G|~6pbAc#Zp6#&2r?81$gOOP^{y1F2jv%DMuQAb~7=OyGW&~u>j_M zlNUD=352dtlt^r1&z9xA3-WT#d3{-sb3{gqMgGxStqbrd|3cfxc9(-JFoylhg$A9X zr9rr+0cDQ$3$}6MJtme4_X_*{@J)echL7?7iNRjOF6*;|9KB%_3`LWlqj~x*8WW4L zuh87hr#xE6uB9qw)BJ7>4#b^}yS*s(Y6fyrmp@1Pw>J&;tlOOS+dbar}9sHtei)a_&_Z3~X0pjXHK=t*W-6Y3#wr+BBQf2FKI$ ze(WQ#lEIF3&exxn^ON5l5##fIXmWokro$?MxuCC+csnU?Q%+B1#LnEyuRnJ*W=)H{qW6xIS3%EdNFh-(&!XBzc^{i`K8DoOpxo!dG+EJ|i`q zi#=RgRuSqOp!QDe^?>&#(V6d3wa3@g=K91~V)2*fmW-PPKLGeq`c`xG{Fr&ZfPmR< z&6xKP?TN4AjDTgm4Z?9T3dMpaZyUOLmuWPklk&N-YHHYEtxselhe5-1}mRn}{AY zT(mN?W&x%vv(P8YSYHf|Uz5wBL8;G}P0bp}!DgNFPH=tV=xjSGF2y+DRPB?L-JkG} zwc$z&HBlm%kG15nH-3R7u;r5mT7mo0N#rig-?`g5R4-i7=X4FyJ4tDGIs3dPzh~tB zTsgH}SN%T5Dc8lst2#q|#JF=;bZEV>Syq#_Hy>OMR*jkj ze1M|xxN*~}n%Y=_vzBEgc)ku1Z!Ydg@G+qo)(J;+w!F^TNF4u}`AX{g9)oEORp(C_=F6)1P+%8H9WDkJ@9qI8A zKyc)9lBPF9DFqx!0%R_twYcC-4Q?kZF^B|-HGLR#%!Q9}x+d$-`w&TmN8fL;ukkQW z`Va-MpU^aE(=`EdJ(p?u6zB_>foL6TzcvvsM9cXUtRCIyrSwJ4xtfM|8drMMhT25I zZFeuRA;bQu=W<|+2SiN>Ru{C~C(NP?imP4cAaBlIAM`~(&LusG;kA*u=rMtMQ%(h7}+RcC@Rc_-H(DHB(t1!dns$e_|S5_34Qy>Sc9t>;vax-(W(X6o~oP^meDy zj(+V{Y3n2L#CRT~>{+Qn_Mee|hzCAxJUAa8g}g)_Z^ORK;-M(VGeYMToreD)r#@$h zfAG*bOVN)EXxP&#)>gPNxSbwSs&9d>L!POru1&FGtC=q9d@RBA5;LRwqp$vrV*ltzq1b4 zY#ox28`JXgDsZBO7_%{Fzz2`1E>*q;!fR31RK0X$-ZyuGQaortJW0)kiCShhqcO{N z573laqAuoTX1aqi@?W1JN_G?34x&%z|h4|8e&C>c0gV zvWH(;9!mIw?O1{-8{0(#nWt=wpMOAdKx4uA*(@j6s*av>JZ+HQXjbCvoo0-ag9u}a zy`{0WP)0nOapM4LPhO#d&)~o@vF` zZj|m5KT%5l5FwMqFT>U&l`!0((>XT9q< zHFXIPl0{J8RGpbhZ-`LxjmmWd`@9M0# zpgkja+J|&E1D?0NWMY(b+IJKaQVXxOJBsZ?4PNIgfsv)U?$x zHs{apjs0?k{LvE{e8Lwf-^=Q;t|kr@3BU!gI;+O5=le(HGM6*ovHCn7IG^e?R73vc zOhSVxT+j1K{5piiJq|&ow zJgf&N9l;nI{PDO0MSHY54H+4H``}(8IAFjVbyqnGU#;9>8UrCaK_;_A5Jr?$Y6=As zJxTQov#(4z@E#5nIic*G5CIiq(7KWQ>R)=@uoe`I49LhNK}ne*t!6Wa)WE28tjQtr zK?i2l=uFXLf|2oxDXf6dXaPTr*C_g*4W;~j;XuI={ zE;jWP!P0ZJ!Oi*af275(=T9mtZKN3$`lzCXOi4{vtmSWNc@We3w_jYM44K1ih>E+9 zqv~D|JtkrUyQOjaWg8}ik5jMdUcTSfZbe)SpdZ(t(fW17JsxXl zaPju@afDMuXztbK$A@0s$@G*r?}U47HkOLLHXJG+K`zux!d9>Gn_T1fzb5WaJnG4R zz9UiGL`U>V%9Vv*PFxw{FxS~<@rwn=QLDBBvgAL_UHk{YOI%)W+5&~~3atJxd-J^~ z95G3xyC5(tn=Ahtm3~=5kPvN-jA$oJ!09CrV5JhFbLDzv#&D)h z<$5!n15@wXOo9a-?5;S$wC<1zEvR<|V|+rf#KkLn+QSpgx5$S`iv_jWXM6_IpL)tQ zs;dq6KD+b~tl)3})#+2y=`X2>j?H8V8N^}eMq3y%QB^GBx)Co(IBa%UH;ChG*f8!O zmYjUnb|j>1%zfnSJ8zx37{db>jnjlp_8KR1F2blF*!TSi3{VA zoqiH!-Y>m|>>RJTs_x=Fm7)tWDJ*qW!^CH`EQ^YB_$c1rdcA1{h)1RA-KUC@3q%Z3 zgw9(}ohLTh_mMIUe7a0cOlw0+W6C}VmyrTZ-D~1Y(_8{hboHLk7apx$CAU18aSWG~ z6j5>&lK1wou#@N(BAgoy53^A{sb8RJ z=e>Di?O4R?&AZ<%>4I5PK~^DFjY$tyLS7*qlJ)FH6mjgwG56(m%`c!ncGU)`w|ZJw zb&W;mopzT#qB3_*B{8p9jsN+af{VZrlZ@td()rTFAP=YKD|e`GKNoU__ZF4O*Ej0x zsD8|x_BDOFZPmt`gssu*hELQOuu?G|*!YmSpF#N>6RFlhOC=)XxXCky9aEB@LcY(Q z8M*G*R{GOGpa#Q#eDW*rS}lC7L@pN#?+v_%r-Dge!g?Zgnw~->v=79$o~Ctoa)*#x774MDvfix+ zX>RensZ5)ILMY{=P#c?f$fwqO6Z*ATlgx5bnVvDTq`4KAXqUh8_Mkd=g+(U%sg@q} z$)F*QE!_8l*15%%4rX0esymn^&uc+Gzlsww7q*Qno&dF|r*8-sf*o9t-}zC!ju_TiSJ!%NTjF(qmr}V45_YXyQGk zi&ZV`li3w9))qc$m~Y)Mh%+|#mSGxP^emGso(iZLC*QBW|MqL+`1&X~wWhI_%{1eu zj8Lx}UPa?|Sc*L0TN=^<(QE%uhu;NWl6SbX$BJpbm>aI)02IlIc#!gMa{ zYhP=8W56zUKhq!i*aUrZ3>V2!pXeJYXg&Rf9tj&^1`T4`EpYbNUfOU6T8*B<$4m{A zHgd=#M#=aVT#i&umY-xXo;dIm%h8JUl4@R@8{y#ca@C;Xd_A2%y-eBFn8&EczP2Is z<)UU^&#-sKw+l#1i_dQ&hseVTPXPXW!bex@TbiZ8)>5^8c-@0U9|=RpNB#fBM=AF) zEx=*8F}2f4Sl45~ASZo>(p!ePSJ-xM0e5$-_6gv{+#tIXl@E9hdjSitffW1(oLC?>r10Bo`B|^9yg|G8ukI+Eg2D?=IjNGbFUhby7}HY<@5 z>E!WfmAHlMn+d%gIdaoY9Fb2jZ9#w>kq7ocFakQgxnV{%qz6Fq)Ge}hDpk5YEM(*C zJntdqMXFmY*$9Rh)js4njjHOh&GEG5PEK3Zpx03A2D)=p`%C?mRFid=E!5OPtm>6| zWjJ>M7qm?kKA(=QgE0l*6fE-;vc~U`E&SjY;7!gnf)**;EyIz=nM&0SlS=4o6d9Z^ z%p5cx%0s90qO{a}%28F8w`RIauDzJ{vh6C^$1WyN^$F1Ff%Q;uyLr^RLP#DJ9jKV0 z=WTm^0)35Df(|aW!L<98D|UDUD{{Wi@xm{b5_$LaU2)b@dp={o8dh;(GF2Fi)r4pS zM~}YZT;JgM&z1-S@A?gfSdL7<;^*FD4tZS`+$+hLRja2-RXK(Fot%kY%AC)YpA(t0 z^%5k>PY<|+AB-9cx%~YQ)xKN3?&&$cdwy6-#oagJS92(*)s7@`$*C+0E?gPu=(*xy zaKcFSN@I^nKuBWp=z5sCkwDx>|BR`nRJ>W5*GgAfu&B3NPIOM#)yAT)h}5~dxrZ3{ z%_$5eGaESdfg^ULD|bOw1uFveb~_$tPmJs&15SWttMJG+e7{bS!xvk>N9fbVb-CL4 zM>Q<)pV+5H6c=eq#liQ-`+Vn=GpPN_YJX5q^j|^`i$ktv+%fHxs~NJSDrrZ70OObP z2!S}PMsD!AOc~Yd$5Q3;TI?M?^*B>1-&kuYGJ9qsid!V3*$ZX8FU?)VQ8AfEP&ooJq-1F%uP9!&Q|9HT9Mb?x10#9;mz4_< zcInw=YrNED_TgNrJK0R1TH1y@LAZxZk~o>ztueaUF(c!hqt}k)c0gxMIaDUobM11C zuDOM7-!oB-rfkQqF6ZHtF6aBrWP=RqyT<;eVoT+x z#<`0LQKtE_vNBSX*Vj$c^gA0!eczg0!$xFCop*3o#)=Lg3N9}#rj0fgpCjd`FJ_qf zyx;AmAB6a*1XJpA7kr99e>{loKpw=nx5&v>Fl8_5j!9_Ib~^L}oSuK`)&x`{9anm? z#7wO>&>b7WBl*4@Up3Os$cfGzujzbNsUc9k9xLmnA=bJCnnyut(?Mr#QyT0tf>EWz5 zL8;S13*(O?Ind6me4tB=R;48`&JZ5+y=U`bb!kXqiy6_!l*&p_PEPM?*`32GV^4ibqqvsdL~T^nLMm^A z-2F|U zaU>r?7CU-g*udiTu)gz!;1!Lue7o?WQt$g+%DS!mUo-BGlq#vwYX-H%Xy55-P&mWQ z7Og}DkTfFE5_3Wda-kvgsVTp)Vbr;Vg>b{A8XCW?{lsq$*Cb1x(rqR_ImQJ$AdE*; zPns%9-zwlqimE&-OVM)Y?mcOD{=MPPZzn`3a^YSRA$>evN-s#6Wh3`3ll+WNJUO*G zvebJaTFcQ+@1ptX*bcd`F=tfBAC6=jlNQ5fY%h-z!u0EV`M#YYT+{ZK*$<4*4gmm5 z%?zXfUtyCHL8pI?#$F9%=L%|W!YKWE@W#L1X& zD?o$%r>{!?`&Y3EKb0m zG|DGX9hSF13%(?7SLSJJWLAg>5q2C*&fHX23$p>{p3m?Y25} z4|LW(zIKy;MTiTs;&)FUsIi@R`T0$4)r|v2Gxy14X;1ucv8S44m_wj0&+0Ml1%>w& z2L)B~^KHU+UgBH8F8aZd#Wj=iy(a6M7kniYc_r-hUrh$gkKw;e<@Wcnr2)_R237A` z9SzZynn2)YA4fTFnI!7f(qLf4ibF2%HMqxcT9-bx6;FGyQ5`Y%$)zhn8vw z%p~N0aJ&by6LpCUJ@iE=0%ZCttgL1|c)Yd>(|?pLhth1v@Mwiou4<2oRV5TiQtOk0 zlkXFWjqIX>DO5BMbT!aLseBQIwWDE^RK`cNPL|I&Oh3fj(>}_o2VSSZ!GL+1jO0Lx z(f?p2G%L z?_D0F>7EySsDXq~A?y3VY;Ye#0z`!+XR+E)l`E0`t4!Y<+ z8W;O_&w=hp5Bu^g8Z4D4dz29HrMT1`!1#qtYUQ9VH5`hFhd*Y+ppY)uiK-i~G>bi$ zF67O98oE}!99k8tv|m&9L8;VkeNrSa6_DT}Ri(SewqqaXSaj98h>3b?T_DTsa7TVg zes^NlVH)Z1;oCyrvpD+z(ay+4K3WF}fa0ZXb2^niPjko%DYyWQ(z*>Gof7Zto3?wJ zygz*@1J@vuQZHT zu}wqY&a9~)rkQo|Sn`$Yc>+a;x{f;(3EXYfhfVk0{nx&*Ozp@XObd8=g9P7f$6qn~ zDy#LbOft0DAJ6BpgroOp6Sx0&QP}>WeE3(-^uNAZIy~MFx=J!mLryU0N~1BGa=r9) znp@rigzhsVN0tQ&Vy3u?`sxd!8wWY*x#WBUMR#JP#OVBy)1yIGrA?nG&m2cu+s)_- zRrOYzq=bbH9$e%=gXHXh1X$SV?2mI)!@0>VQ;5a538kT&8VVx#$lXK?4=Zhp0_&_2 zDK57slKl>Nd{+}3`ok&FwZDbJtxFjovPDSwN;`>PUN(?$e!sv|lpn*Ym6`k@Y@RHr zuaG^8sW)z+3{2{=4?iMb^8Vmx6eIH+;(Vyou2$)hb4@f@vKAtQ-=sU30^?iFDS1uRIq>3BaV8$u%mKSKcu*)o z>rx?#MASY*4M_-^j&LVxD-7Z>N1;T$rlUTpK3tBZuPQA_qsBvjEX^_yjqmWkpDhwz zyi-wn@SzrC9`PRhp*2G714qz&z++w%LXef)tA3uS#FDEHpe`9E8V=ucfvff{wmEzc ztE?$Qw0^f8-$~461v-orzZ==f*Ja#pc-P?1>j?Bf;Jldm&zRlodK()*IGkZbEX$F= zC9@yu2d@UStv_UIX*|>hadCO}6g40U9XSjm)&C ztID*BcCy)r1xhBr%n7%iXL^r0Sq*6)MjxW3#=AXhwRn8HIc?~-XS~PbwXPPU3Jt4n?UBPTWwl_9_4OkA33SYU#c9Eb15O@u}SN>Qj4Vv+2@KyGdy1#U+^*bG@fKT^g+MA z2VaA5OUdo9l_~^IE^Vs#xqYawliPmDaQt#2PPvF0@%e2V4uvGMe^W_}SK9H>f=`{w zSef#h+*EXz4Ri(C$c9`^DBtZC-6;i+(by9K#R`ss29!C z>Bx_Q{i9#`J4U0Gk5kh+4OA8s$|vtEUoH|c@UP4;FD_Lnq46)&WK2ahzx>VgF_zoH z+$`6+(yK?!N2VvYCA}rX{2W96L%A*xXa4yyD^JVfHPaJb8nbK^P`NNpm{p2@y3 zjyDWdxQE~5Uz<|we=_7b=lLsph*X=(?KL%9T;?U%dRI^lJ3+hj%oh`cip?7=&BD0c zE<5t>W3ZPTPDVS_w9uf!YZhHe%{SOyxqd8LJ};u&R_6`*qkz71Ym@24XwhTXbDwj} z%C5Y#as1SyvZy8nK&Z{HyMQx7qFhd0;Duzh^J!e2Cx{1@JN!;CUB+q?6Iaf}-Jl>u zRH~+<+EQtK7SryPA_Ttzcktutq*G*)F9lvIs(N49?+`5F@&QTWLoef8bDv7p{Q1v`Xm z#L3RJPuF`$_F5D(Ygc>4wr4SR(Osmr;c^l3ib3s|Q;L0N=-CYykTSR!2b7_DcrKWP zDW<7(E;Y=;_$;548;j$m3Ss4gW8wo(ySC3Q3W=$V6i3#50-|F%td~DHe134ipc};2 zpID{Ab0>&o>@j`Ug*hHzZb#rj3(ae87+k%IzbcIwUr5!ZrpBu7s&s|!$W%-%0#s6Y z#+D*nX*(uDp;zC^jyeB(!bEdgrjTP=mLT5i$|qx`^UL&LX&r{aPMPfFwVqk#10RPn z|An^=#a{kJDS52C4a;NsqUQFvJ^@YL45R*j<$wWp)opb%5sPgm3ffQ-F%4(DvY=S)YIF9#Q5SS5aHfGb ze^1|}JeJ#(|qnakKFi4n) ztV$yr>1K>Oh;F~bsCw9`5H?p4?b0tS9KlB!(vLh046>Rpl=s4kbNf959X&2P~XC} zG6+Ergl=4&OxdLHc1YIPh?$O_0#fp8ezC0wQ&vg#w6AzBySR$aq-IUmP@y;`g1i>~ zoOhm+E&<8X3@OMC<&$9HTqAh&3_6~)))hhe}SDH?sxpxw# zc^Afs%8(T2dbE>T0$UM%muC625VaSfZDUH*2L`+4Qi-_L2XE~!XOjs$U&g*`t3!q5 zp+Vj9I%>!MEY!a32YSkg;0scNoQ{ zJ(kCp$~Qw1j>9-VMHj?C4$|#J_MQ=@O&xP*^9g}!R5d9?My?l9WB40M&3q*%^}1#+ zX7y`cMN<4aSiV=3v;t^+4!?%xIien$#p#qx^aeiAx=K#u-rqi!eQ{+_`xazQ{<0)u>Mlc$2smtwgOZe71Us*uXYq%Td zaKk4esE(x1U!b3JOV;L`B?rTdeQFY9TQ|Xg0EyxBgM++41?Z^yYYsSt8@P6)vbf=O zFt!BzC(KsP^Zoz*!~gv>{(EiwKiMfv64|2EFBxZi&5BtsDCF=;gt4eJ43E z5w-u&bL6BFz?^o?CwqGojD5J6{2{L{wt{I7Z3Miwr58|oZ$ej{+&(XJbh_hKX-SWt z`iBai#uo|5jT9-Gza}L*ka7^zyM6e>_{OB1otw;E)*GeeH>^y!M|q;I&xpL&Oqs8t0=oy!)j&8 zXJcjV=ss54zm&uJ55cB=e;u0`={agBocThpeU6U890Wty`=Rb%VDa#pIp%NhM}Sju zQ(F|XD)eG}=d#hC0tMwjpa90wc^PQ}6n@a1Kt{|6!_vGozBa7>kHOr(z?c8qYLolG z!+O++f6IERI=!~`yTW<}TLCq-`t{qY$Hs<_j}P-IS)@|Q*H`Lp!l6^~5;i57!&i!Q zLtHJST5k~Jb;Z?f&49`E@04O${bJ$H+wJwTYIpy1w!Vvh@OGUh=XWtmzmB?{rteU< zY;i7aw2p4LqppcLfVKe(bSQELyC3cbpXhG8gu=FiPaRtoXh7&1_p5^)>MYSwG~J&1 z4Z?P7e0_fAf@kY-(y1ZAUZROwZWU_N{+$t=SjVSDzrH|5t;L^jn3d;^Zc{>EM#_C~ zuCq?SN94J!bb&1p^)f?h=hbwY!4V!u`i(4$_rqnDO?dn3(JPpNciqp5rCm2$eN2Zn zJ-!yPYb~gHgMT0PKb|t*uTLv^=FRu-{mP1M4u*B|PE^saL7Cj|8xtu)>n&8yF50JTJhfhp_){asMiezfLr{gmfrzx}Ax zZbv1;oF5q(JIhm@KNJk(hic5ftt*^yOBq1aGklvWW<2)}roUdb`~}uI$K-oV7i7Hv z8otU?tw=~7&g9>l^;Vu3Xq!Ddq5_}9(mi4*EUnWEKJfOvkr!~zFLi;t7uDJ}kFE)B z3U#Ck>Wga-y5J{mRvQ&!xM9Yl>$QWtoAtLyDl0FjJRPAd$%M+6}g{JD_G+D4lAiWmJW{Dn-7gk%tQkoy>H<3F@$-rn zDsW+xDYR;PuuAILOK*86^qrgudu}y{w{NLU*MP<4tsPm!SczIKa zmTi=k#&=${&p0FFVr|rnVRk{r%1DBb{I?Gd9$#}!`;)zX4|{{twa=7o^)Sr3Sc9+f zEYtZiyD}Zwi4cRvL4kFgiKHMTAd*FdUuS{Rm@-J~j;#*c+1Okze7$Oh@0|IghU8PR z%}@z-Xl^p(GW6E;Xeavk9hG^CbMhZKZyk2!mMYvemPm@019hj8fxl%^Xvw0wN`1zn ztXf5`&sz&Hq^&<%t`wGeZdu(%zQ=fDO74Q~w!UQb1Vapr1@V{CiIBzo4E%k881$lk z7B+ORh!n(6(~D_Vr}*go`u5RvJ6jvCz1G}-vH@@RBe<62%F^IhcGgqpEFT11^FDe( zy59w*92)>VqpZz`dRwIDTV2P89V=I(8JMq(+%`=gyZ^*VR)0&s$Ee3K^XTS9;;^}s zBQIGu@#zuXPmOIx3((HXg#1!s=UIu z@=ooOrW;PBHt&D)+%wMI->dMOXO2U0=97y?Do^d?`_9%A9=*Tw?UR=OJ8@Ml#Y1(Y z8YcM9-}63yDmC>vm$zyV=qdkUQf7CDCP1?|X|JPi@oIZ1eNr*Q)FjDtgFkEY!N5&u zAH$OQfc5+4j`zUhRy8bfYzlhJ2)WTr+TI8uPSs2;ua669Xjl0JdkZk-Y-M&dPXoSd z(}#OXoz1CyG`C3}i~RPyK=F$eE67QUq-6uVw$F%{S;k$;cmHe^rSyQ7ypXmf;byLM zqJ_U1G(Vgn>64pOYl{4clXp-QGu`$HWFHY=+<{{4XcB}n^xZ!JUc{44CfBR!(ik+q zN)zUYSDIE%%(E9Oe&p`AgHp{>7ea9!gEpwB@9S9yH!YskGNc}-WFMk#h?;v46=M~T z{(9kdIxc=06(@ha*yeSQMTQm8xI9yM3z3xHR25WyTNEs`Lu)Htuvh`qY&D(DaG;`${V$+^y z8fu$s8xk}V?Q%n13Jw?PJ3l>Xk^QCgkol99ktbeed9H`s15BTg&y5sb`EaG@qEVSn zNsf=orb}N^o^6I*(udS55Yg7h&2g0D`h1b|dyzWI~gw;yyD#q)?L^Ca6w8_CtvA)-BkwOoOcPpY^+2`{ z%g0uSDxhh~KoZ_h48pEo@C@(@E2~4iUGcY{nY~uxIc9m@c4f*fpL@12B2XX&>+R!} zKJ%&Pt^CJz(64;h>#y#>`+5=bLfMM<`|@vY8odHkiS&$RHQ`QmW7EtRncFwFE(qs9 zjRoV!EiJrkbPOzqW$Rv~ndmzjO`fZqULCkpN&0_VG={OjK6Dl&XarRDf^=Lh>`&Wt ztNBos)JIu$+DB;1@8=$QQ(#Bd9$;$~V-spMVZt7cYP6+7GVIvu)(VVkqjp2j@F|)L z-KZDGbSks&b){UXP32wDNJ*s-+GZQSvzrOZ?-*Mwk=h8@O)C9q@fQi$@b(7pwZZK* zMcA)HD&b;2mu?Pi=U<$XS5z|`-kz3uUIVt8cE7NXLIv(r%+%PF%=0K2O7ihvR6PDM zQ}*xERlJE9zq#pwdCosiR1X?qLzD3#K9AiM?7 zJ6sNRW)IV(S`;hR9dfS1ZwzgyB2KK;TS~x&@S2RT5Y4ns}^j_p%Lv6&AwKg z&=X=W>xK9CbmX@s?t2A2FSDlK>CMiX1bl~06Y0bn*6+;+{Kz2niNj-4;K!UIE|HgG z70E(dEBXP_6JniHF*%9yy*{q~K%{3dX!lGy1LI)5PKl!Q3|NID22oA%WmGm-T4GX- zmbt6AUb`-1RCUK1C>PRZgO4K{4p6aWSY#?_^jSHW*Xd6K^IyvPjdphETcD(q>cCC}YL60a0Z#<$XX;Kt;gw)%eE>$Qh z_&T&ddvH19g2)3gu58L_Gj6R_Yp;yASD2AfNi<>cU&lE2?_kQEF9;L9*{xjI&ZeYD zQj(UV-pQ+Xen!KdQfMNU@_d=ja!waM>8G@tXC8fUrd8ufp`*IFcRTK=1TU{8e?XAa zPP1Lr#+RXX_Q?UQ?`HY|fhzSve46?yTYqdBDJ0z=)>nwmpGj5d|Jd31(`(v^NyL z1jW9;gft2WE?mvIcnJwwkCOoVqZ}HhbkX}YrxO*ChHOJn81O;7UE1P(7aEidGpTcN z&^^_A#`+!VZlWCCwUB1zrIxtKXc)FW(a_2Ps%>*FPfY zD_x{EUG07Itj_0m_vXXM;|4K4X>P&^6DzKeZTjs3r>FhRuPxdr{+^9LILwN!%+rRF zzKj)#j>=HL0{kTLtjyY5!xvlOY2}Yx3BzTU6qn3R{UKS`@}@OPvs|+&)M_z}3mmaI zB0{mrqp=4G?{7H@@jdEv_P6d)JwMSbai&g4AhulSNbQ9yVQIyV$<{3{rkU0Te2Jq5 zidR$VsoZgeNyNcUvR8Ynn1F89SzgDCjIyyp$Kz!NICVwIBN>Y#?S^_HQuRvX>Tmu< zvHX87!Lu8|P{y=LVcZ6Q%=|K|@&||CX3QSgbvJ;(%SJLC3mMn)@_{bZ4~|D#(kwhh zlUEq3XCK39bQ8mT@w`k)3Hjl5)x;|3rs@WUq4LW;6~8wVJ=8(YRqlVxaPDK8VA|H% zKeaZno?@H{Nm>4VU}}1c{){Fal}tIS@*th52lHS}bYxAU@k~MKlDXv0 zJy#I~!YRIF`3HypYNhWCUn?Pi;R+60FSOGe+BoyXcsHCYd1)hXK4jwtjaM(**zKSp zh8wj-1Sjuo>?^q_1KU;6rLqMv>&FLN0$?flb;$cDk^3ot97 z?}GAU@~|!`%l;T52$7Tt~S6}KK$TV zJmCnKS2aXP9f&l-y5L*DIrBO167PIBlYc*5n>x_887(0`u6@~~F|r<tdqLTC6|BrP9yj_L8Itf}OXK&vU&&8$lh(%akgAFD0M&$XDiUcy3dilU0s2#nONHW}HZN?4<=Bv8t>kep$DSQp-9{ zA%hHoqo&JcX1u1yk{5M!0uA&XYBRr1eVXG+UkajK2^~1BDr{AxuI**3t9oX7xOiK? zg|}}2l7g)h7`~V`BkV1J$c3|ev*6T3^o8`!aqKZ5?%J`?P$}gF`w>^Hl-iF10XO|#Xb;e$E*E3 zrzIgl-6%BTZO<{sPBvbx7E$)CLc7$m{yFUIr@FIw+#w_7&H?BQb|cr-)sLQxd~XXU zA`>;7CYK@#lw*;COztWuXJP--`nF@xi;MHA3i?ahpRIH`HZk)v*u%G!ulp(KTxILwM3ABn@LevwDwV>_a7j6a&jYzKuD6`JkKu z3@5;7>RxsnTm=xWK6?Y~S{%MTdTFC^FTLk4xcYBz1$ZORC(msK!|7L1nm;%~a+vi* z#4Z-#-IgGX4 zIpe`*U^p4z>DI5Lbmy(wgQ#yFMIJcY)ls(~@82z0yeCQz{5DjwETF!mB6&z%QvH=l zRsX`t&xL&wZka_Nt9^6MiCqZ=@9f(F z%pKT-kM%LV^>D_epANPCj25KcKJ_^2kk&E1#RTR#25DXHe?30;e@0F}rjuFunE3*1 z&3F>r6EWq|^&5nEUm%>ip5yq;`?Dj8K;caE#eJ0=WbUIfeo3QDA&;Yo$Z*f-{4^s! z*V4&rn5wnCm{f9$wAGjb2fWW}0RxVtsmE+C7UQ-Zn}n?Wr~$@|CZ@?*Wv^E-Kd}#b z<|I!HZ3JBkKi)jFo!fRYBWE)%c>GN@T!1Zy$K(6ULp+6|o8<+cmU?PD$R)#?$UMi& z^G8!O#Y9YdJQ4kuy84xE%?no3LM+Oa`ZJ}L2jmdR z+gi$ek%k3#FtBl!N~B+eZuq>One-f*>Aa(*2^{GvQ|S@v_}rGu!!ion40*et$zqiE zuaDPp<*WL>KDE7P57ZvkE)FeO`Tm2Wa{1Zp{+Rn;M)FSO#J+uhr_5Gj%$$RA7+|#^sz5Mp5kl8 zJnq+x6^T6K38PvgA(_fY+NG3%V~&$hH(d|tT;bHo^`!Z0;%}}s+6uc%y72LWP07VNO%aokDo6WajN9=k{*~@^vr;kL<5tzS z^-_z(>EC^zuJCpJ<40q|6UO(w**H&t$Of&LB}8l7PS)^G zsC7D|qkBBTR#Nz7nw_L?ER$?rrh%8Xd`j(z4hMM>VEk@pi(tl2TH+LlT%xQxTsoYQv3)A0a*%XKXVtkTVxto zC1hlpZ2hh*&_L-A)>hj<(SRT{{;+?GN30JDQ7C@uO#NNU{8Hm?rf8;@^Ow;oV{<`H z`HyGF!oz8~WpejTy|XS_x%b1VJD5j=?bA#v(3A;zg4wOAYJ*c=&hq@=7!sl&fst|j z4-Oso^*P$16aG>%}hcBG|gN=+K)RdPceWIw=3#pRgtG&MH$6-k?bH&ev!)$xwJV z5U=lS#W{H{sbH!DE4t5B?Zn-AYyC+jqC{E|XPH{vY1wjkIFKGJUZ%He*HQ3OpVozi zTg#8ODjbn1AHN~iH!CRB4HUUMfNkOFYArsS`ldjc`$>mkT8wt4Mk|TiV6@QWM!^wP z=~aO}S!(y*Np{zzzF>v~yGKavgx0tatj6kn+5cP|MnFSi*zoMz5AGpuZqE0u(*o0x zTWSlqE2gxjE)8=^p>cLX!D*n{S+095R&a^-#5pToSN2@i2fIQx3xb;lqa~@0IDPez zGV=J(BYB@UWsT9QSq7#)Gxf5Ur~Rxx?O77!;W|e+c4O%uS+8p069l>^l+U;YrIQJr z%85DB?F>I|2P9e(k|$5Y1<}SdQ7E^oK%8oR9Ee$c-z!PbAbaf({t_^o*_80t^5-bEd;rGE+~2% z>5Mqwxux5;n69aIBlj#+B5bC<_IG*wv=SD7ai8mbXAagB_|+ixTq_^=HwAr+Cf%*f ziRJ}*z#J=)qEJyKS~0hcP@5W6JNFZ6?RM+npMxU;h7_Yqdc)1vD2Aa-`Ebr;5RkZa zHWzYytn|hi6@^xZj_j-U(v7zd;;Vxf4N9l1M-;FJo{2* z1SC(h-wRuBN3n5LpCdNO{pqwYeVn4I`nLBP?wnWMp3ms$OQW-P*J&T0azp~bn>KCu zd=udfD5tzBF>k4Se=Vdt(6VEEhkPcL6)HX(KB?zmZ9*apcl0S12+=NBZuTcQ7lj5Y*R}yc zrUFf1m$^W2E~qZMT|3o^v&<^EAxQMZvk75k-k>uJK>*rIfy4y0lg^ z92yZ^13t^ULkdG8$A68p-oFk1H!1N09S|V49XJZ@1W>u!;cx3V2B>E(>#4A~X7sEY z>xK3)RQP*P*?q31F0x~tF2_sYi=nj+D`_Ml;(=`K^x4w5(y0F3DazR+1s!_ADQbD>L->{Q>^cqN#ai2W^2ShC|#3ZRhG&>=Z2aV7M z4sYYZsl49W{3Cv;QNUdf5nFJIq5GJ^Wq8t&xIQaGEvR=Fn%Zz?mGEroIgvzb zv+vb&j%dZi?kU%#+isr9%H-q|K7Btwhjqm=`4TC4Z~@F+M07p(m269@4 z$$o+Jg#sxlDcwuT0c3S8^+-koH_F`C!piT<*CX8(#MtvET2`G-Q3b3#oqUd9)MA6A zakxG|^~jLCLAprFY1^T4Wj~2*i{SKK^JS#+<%kOa3YkQQ)v+D}|1)fD>AEz90h|;> z*ps4XSCK~qSC}oA5pfcSBHSrL?Q2)upAC-*j6_O6i4^4H&jlyay;_w>Xn0{lb^&e6lE(@&~TtHkEXaVN-GJCyYbeiz95kJE=B#49E@>s2d1%5MSI z-h!|k4qi(=3}vj|zQNN_*n6(gxTeG_N0FUb>CuSG#s>auo03{KF_SCbyLMPITx_ig z*h8HE!2tyEp)&}UFaks_>H-?@qL)j5;bh(b_x`i~500Y4z{5b?F9plo{=)HtBhD8- z9}nMl(%%1rgP2Ss{op9b=YY|;yP3zO*-6sxF;GCO24;%{NPwr`fq;DuoY_wxu$Fn$ zx)JQKAP!828jSiYcJDlPA3(~xAP|EFj32@;1HD@C{W=8b9DLmdeDyf10-U0NkMZU^ z(SeVGXvf6vxs}a3n3>bCT@l{Kc$#s;f*%(Op`$isTtw6GR@5E#-iQ>Iu6+iN5U+UJ zY?_hDq5LP)0|dGp4An1|DIi3LLl}lJ`VuROExsbU_}Qf$%z%jVCSb%jRh6TWLMq;x zYBa0IRrXACK14}FBYwR3Y`KxRawP<}9^BE#t#7)wkSZR+b^fb7HhqjMY@(ZP$BNY! zw?|8|(p!(9)aJ7v`r(_ZPI%?0MW1TTS+*bFbV)djPd$!$Y}Yc1)~6`y_$Y2vDEjX7 zxtL^i8{zRJg&Zy}yKvM)?fn0RKE+$`gTvb|s&1|}d;Kj#KXM>MpYH~(dtR@z>dE_$ z7N#OVi-*^dio*>)eRDdkf7v`{RqUY!ZYf56D04{U7x<$&=&T=80Ct-d51*ewK(gl@ zERB>G+}FvpQd26TE%C4`LkU-is-agkAUrY*8GF5&6O4t7r*JZj~IZGQnD7@tw|aSLtSm_o7Pu9fBB+}`?I7l`Bbf4;NrKlpZkXN_GXyoGOs zQW?pLav5TXkHJ=QWTN&3$)yYM*JDu6kAf=8~$r^eE-C2Zrnv{%RG)XF1 z5Q$jw&KmpZwMZ@fY8oxlIGWQK71{Ga8Lw{ht-I%2fTjC(8`y65z|-9KCm>}!8uwz)-cc()u2GJ^;qW^=vH;;$%Z~KQ;N=Q>l zikQkyXt9)JD&G=9mdHAl#1NBgQ^rgsd-#^{jhG6_I$0+Slf9B;&xkQIQI_Ex%Q$0Z zdOltE{oJqXy6)?`U%%h;yq)M8{n~r4wG`YP8`%~f8|dxR3j}ZZ zP6fyFqp4b$5PyRb0ws)Y6`i|A7>tx5Cmx3G=2u<%3%aWVIwN33SdpX$*8#&o#n`jT@jtu`#6q8KMx=fY-@WfhZzGw#nU>>rwxY-cABLrcC5Rb(;gJ}=n)~=AQ=I=2u|D==(VN!yqi+4vpEzbuK(21pKCSd%{kP?A z3)__#;l>c~aK>=a-`ub~CQu7d<8S{Zaw1c>z3}Trpo`)URN+tNclzh^t$R7DhQeK3 zR2)^G_y&L;!9(u)NGY7JKyj6=Fln-nyQ>z`?*7er+vVJVP5e|=b?^=SX}`*BFQEB! z&``s5=WJl?S0c>Xr35|AKWcGPuulAb&b=HtJx`w!FSX)A>w%!mX|?>T>2=#5o&6

}qmgg9s#@O0&J*7koEON7snF#`tHOGCs#}m`i9LrX=LQ`nb5uAJzJ-IpY zm_-j&s`v*FR3b$0U$okRLSS+%xoe63ra{;j?U*ZiyavOrh(OV_KW%uKaoMbc$KLs^ zCfwD$k)#&+CP?A!#oZL0v6*4brtkRUQ6CQdb=CQHN6Ur)>V50|4FizVBPua>30 z>XJM~8PMv_x*bT?jG0I7>}BCm+rPgtIo#&S<=dxC9C?s$+%j?N@xlYYcCSOi&Vn=5 zUdGnitO1$Jo>6&rM>6{A6XgPP49^@G=V(nMGEkp~e^>pc7ziHGW`+psh*QN-3Gw4{ zGHM=Az4jY~W~m9Qel`mEI71+u6w^codXFG7DsH7%&9!vO2oPX2B-xeeM znBoqYUc~s@4ep@I<$ynAGzahf&X*bF{f29duKgC(Ih+zR@W0#u(M5i; zjx?6EHmjNg!#zW1q`ldLKo zM1;-wgUnHho7w!sGaWCi!hS( zM_Kk%cF5!Q171EYoc49j<@k;J&OY36$Jmm07ZOG?fl?dqPB;GW3yh*dcZpD(?_ERb z3D!2~INQ7DO&@Ao&NC|xaUQe6*{OMEHtW{a8wjSKQ0hGx zylLP7f|(u50u15uq@e}gE4YjWVhH^C6!u&TO;I)Z1zr_SU|T9N$$xvmo`7E>)p*@! zzAl$CZY}zsyjp39!rb{IalcP_o7=jTZoxg`SQQbnA;TKjmN{7v?>-c&S;d1JJh_N~Rj@N6L=1qc@3|jip<2kB0HeRh#+W7o;t_J=X>J8C zYOoS_iPSv0+4Wu31!y9%VCr^Q9MwHsi9Rh2-dn3!MFeFmA$hHrZ?E~emC11xZhi|_ zL2bRfyw6oOuv&iOP*sTpQ2}SkljVj*@?CWkOYx|1$tgNAl8*t>@-_{^MmQ?$Wx}x| zi*?Bfin}YT>+5}t-NVN^EhI+_&x!~@oOJIr01bi>uqJY*#;~=6wAPJ}ga@Inb{Fs* zWcsnO5<%;DE29~k;32(Z?iJm|yubys@-$;JC4XdSYUN_z{+ym)SO3|ZLA*u~dpB_f%KCfYsT zN>UAz95X7}gI8i6o7;|yVIkXX161QNPOb8pYORku%xYfL2(-tB1|a89d6CuJ)kErpAAmPJc0P_MEAj_LP@a#zLMZ| z6>W&b8k{j@c72IvfMBT{)EeV=anS(Wke(BwrsTBqo3Ojyb3>7vd{@L`=I|=(g(XaG zh}gaCRl9gCck|~d_C9+zO|AJI)`_Y>vV7S=Fw*xN2^|@v8C!0zP`nZ7dZmpw7!sSR zAVxcF1O8fn!+uN~lCH%Xp*^56HT#U!nEqxbK(CunR|frqc&VQB0U8-dmKsdGoxbIn z*s|mpOtlxgPt_Jn3aCtA$eS`dx9RTmfCj37~S7Ga?$s; zW4^z3+Vgyd%f}L#&MZ#uf~#AjW@V76Qrpyp*h`4{l5XKrEVq7v8@VmdaToOH46*xnfQJr{sTTzN@Vp#7l-7M&ixFMN@9kw&3>y_YVd- zeN_+l36RZC$1g3&)wpBmNmpO|Fc5rUYp8W7b`1C2jJf3ywX>vqfqFVJv22oM?uwYhYlnZEMJHaf@T$&|AP8?=IVgcMFuz zoX?i9df!omm9ky6RPnON!&KKG4#$*@wozZF0)JFtl2Trp`UsE{JalWg`C!`!=|?&k z4dA%ki^CrUKS_jEq9h74C)$~R5%*Hqq$`fU4rxDaoq+wh`OO0cN^QdJk2F-X$Mp)n z*Hv+KBWFuV968dxs=3WmPU2D{ot-5#=65h6DqJORz&BASg8#kiG)-D|mmx;(iszY~hK&+RVOAwu%sMJpNbT@sAmu|Mp63Ap+n!mvIc?R$X`>kt6j& zoSF}(a1lH_bm|fZL+IdUf|zQHO4R0)KgS^Dwi70cTKitByB!dN0Bj<<4-CQ^T#-=x z*y<$=;lQ9yRfH=K4L7SMk0sO5ZwE{wh7F@VsgHe#(p1#Ty;^~6bkv|r)NIMiriXef zb1_)FJ#*)`s?3%hD`|jxwb?_8n;b>hhcjy$MS%*oVA2j)t!-4Xo9fI&Z2uwq+Sdgc zXMC8YV8n3zyflJ5U+qn4=+$>WG)rG;o0av+ug=yupL+R(g7$xvO8xgiL9zinv;Gf3 zW`hb}72JQ7@CyyHMu`ZD0QV*bxq1t)#qGL}KPVcclNA=mwP3f_w!i(Wg!4i}K>(EC z5%f&A&(|S$c53OK{W^~#?jWbR(DGYA4B|e9SjRsMT_Y)`i~;%x4+PY87q5|b>ecJX zY&j??!ILSFw28B|-Er)=u1!^SqV3kTZEFkftQQIW$9W-u0HYC@&#Ooz$ZbQ`40;UD z9Cl@)HsS19$Tr;#KDWc}hmhq=o+F~J3{VfU$r88;db4|NPq4t%u6I#@6>bgfR_))v zSYjQayLviArkC9C!T;$xmT3TIdA1vrM#d6b0AdEYB38@+6%GFg zqZ0K?>&D;=8xmN@ojd(dZ)$ zDx~}p*^ii^QiOMIFknq^r&}t(2d|?hRcRZ*wb+EufQfMC$1f4*H!;E(=X9YAXnGvK zD@acR{l=TYgRcJr#o9rHwPOUzgJTQ;N79kMM6$Aw(}tk3_e8#Ggiz(KVW*GN2P*swSn zxx?nF!=dH@46g=q@7y~0?q`AokZgx+lap^JbAE5T<6M&xHbNS{cUBETHP%}n4emrV z3+30q1}bGx6;DR z+a6UyKZ6%jJ<19mB&H-D&uGQ~!`LytQ_Y&Fo$QP!zEj+MZ*=yURZ#mf9h*LejW+t@Gu=j;s_yTvFLm*4Qnmt=m(%)9 zyVq5Za!q`HrJrTNo>~KRSc{w09VYdVoo&&Br`q@5_WEk)0iI0lyxqnw;}gE~6m1p5 zT{mH8f-CyqtC_DF8U!9S1!GubJIHvoinbiJQcbQ$jm4v&6Q~4IwQdVYnGqW;fspj5 z+#BphH-*D3G!dc~N@jAwSvP+F^AwIy4scL*iH*);rSNJ!&Fv9PaI3&I@Z+DVORWMV zCoTBH#sg@u_ecxpdUrkb)btGOcekkMIq2l@qu1( zjZ%^xfOSsDY+rv~WG$8b?9qSo0e=nte}U%IE_Qt}<$*}K_j@MucxH!wVs3d2H$Ay+ zQu&@?XcJtl{OF8D!21*UwdJE2Mds?EX zWb2d?&~2{$$kOzur@~^<%88yf)}%d~{|Bb0^@>Js@(QKNCT&a6kydwT6}-{CdX!V| zyhMwp9)AH_Hq((&)T7sGi81`6xsxl3ahFoQj%cT)=DD401l_v62~fQ58o;^gK^#OALovId=q)3^YF~~ZQLgfL4RFYIM*es>EL52o!mZ{ zttI2$-Q1HClRGt7WR(3WO3vYVXNUFEOOHJ%9yJrlAO5QVA$*@nc#q~?r>&taIpQ2? z6XETopC%rMi9>^20J*sEfzv=R1|}lKl<6eMBmjFd;JM!mMX zUhJ^5Qy>5N_MW(y-WnFNa+#m$!~s{kYRDQCJ~I-C5Dy|DAMYk| zl<*+9zU9cYq7IB5)0OdMKz)o|{-`Qrb1(-8zl_+b$P;kD=i!Jt``*%0LYJhu)@elM zc-z%KU)=lXvu>pv5aH-X3&p7C5>>Sxl!g``w5u&aJZVH}ksXq-CHkF73p=9)rYY8PlOd@tz z_zAtaQhwEDr?3+VnPcx%(<-fLM6W}>CcF9@jQQs%P1wTaY3p$40iH-b z8gkCEJlDt3CNFZ#Yy>4_of+(m?p{EiH>_3X7e=YAtff3U$|=3{keMNC?iqT<@fo6u zHkL+&%#pwagDj7tFqVuBgwJR|z2l?c$IIaYJq{)QA6!Ixgi|wpfGRZT&TO5ZR#cTM zO?ZJZ%y?PHwxsQJLj!q-LA5^Yl2)hDhw)j_m&|byegatPw%0i+W?&Ump>wyEjCb+3 z2eD+HF7F}mT>a!-YfAQn&M8o9qh=l^GyR^}aLp4+-eYmr;fZxl%2Sx{4jN^+5V3r(`P+)CqRTenFgPyu(9I01AWCvE6z2 zR*27#?252ZBIst4aH z9GVl#rSSIlE+Mv4T|_tG&DdRcL!A;>Sm{Zlpy7P_EGQYOaG+#@T7yeTcA&EWi=4b2 zk~dq!Qqa+_4bikMurAT~kmX{ZXc3f9`P^=s-F?=I#Ub+Na(VUsO^IrwmX z@k~o;bCnA|H3J`toiP4d_`^N#EjFVk=O7Y`)|{L~lQKqd6PRKTDp9ehO$23J&^a#l zo-u}5S5@ClHDwAsMgsPeOH`WP%lmsujimNDF0X{lI2jRg{QQXif&Q}XiZwqR{za(U z%;0Z{5vn1fn`(U7a5VBjFW!086cPao?YF>S`d-#@f=roo048U3g0|O zN)4TtJR~FXOXNVIh;Rut)`Nz%5qzbUmruY~6bth3PKna))INR07e;d>B^%Kw!1|IZqszx`Ur z2Z;F?8@PN@sB3fZQBYGQSUutE5>elf2&Fd>%M3y9bNZITpUdFPxhL#JZPWL6vP<9F z&P&X1wvukHPJ5JU;xbvS=6UPHYq6Chrg~0WPaAJ**OP8bG1b#4Kdon{jvJ5vLq$31 zk4rzMEXrSdDewL)`uM-k%-_>_{9Q`@AAx|N>;v57`+%Uq70qo=#@jOIL}A^gj0+QW zED)enI0J$*CTX)zMzD4uv1qz>mK@~$5@Y1`c;kXQ1MSx6kKOh}<)Op1JE$04SksKJ zpv1O$V4^fg^CZFmxQVGE%i}sOu3o##!qTVwx3~M{(PLIs3xXia6N4p8N_oZo58B`0 zcJ2l#LyKNwj~YaYDSfaamiTPu?Ku3gYwIReF@(LeNBF6Xo`dyIr>v?Zg8K*cEgs!h zinvcl*qpE_o)Nu{IwRhHqwguaqPBWYndqgs^D zRGY$c)dq5BdlrwBlKJjzbF7!8A990F!T!lxe=R*JxfXx?&@T}abv9ndy6{r@@+J>& z?_I|hE<-n`rjJb8Ro*`Tpnj0=BpH6x(fQGZwwnVjLAK34gBGdjKDref8ipA&3s~e2 zKjFqG5RwkN<8`=>39A5|YqaNZ8ng)mxBEHy52))PR10N?i5gIeNwisZl0(M{_x@!y z5XqX0H@avU$ei1Z0R|FMYhHuJ>SSkh#rX#((8LJx@vT=5zKMTRd;v4q;+THiqDo<0 z?WcOH%fSc!J6qLT)$?`=sx%#b*e7b9KcBzer_D&2<~UkXSH0w4JtRmSJo@Zk)P@m% zeg2;U4LF8Z2DGG@uR>^I(?|&9ZuD}JwXAt@4P?j`r#(g66g0K~+!lL=gNa!(mE}amzpiYn~h~v$1XxpYuz*hSc z%}XQmuoXnVh-Bd@L&dNmVfQN8@LhsnJ%j?qi;*6(P!~X;hDpob8a|*}fddPc%U1gqojvTXndBKHhcQEj zEmi9hl+%qv~|SkgK0=Br*W~#H7Ae< zghM1zus*v<6+jX*61f&EKZZ2nJxYe^@5g*)tOCrI?Akg-G4hUi z!A0Lw?sIK1+MHa7J2URoD(5sb!#^1q>ha?wPwN?Cycfjekl%MsFB?Tiz9m-WgHoKa zdg9KiIl2=wCOQwoHZu}$r_bbt1hPS@0q26Fx{KECh$%JX_@j6CcAfdG_Q9zo z(lAI(DX?o~}fH=|q(;2YcUhXnb6USm#?z(8)%C&e3mPtqelAa3DH8{S!> zAz(9mh;nc7IEqMs%4FySp}6U`SbN}vP5El&FC8DisE zFwg)al{GMAY+RxxhoL+nL^H?6YQeiFBulqZE+*Fa7Ev{y+3ASpFvN$GW$7@sd1b|n zwG)3Vh^6HxYXPOEjr z!6^_XwiCn$m6C#T;G$+uL>Nyij)Pt7m_Ak~koD+UN=8g&l%|BP;R|qK!i@mFT~CCM zk9Df?2=E&4f)AH5ZHDqKdM|y}Q&4Lmis+3vvBK`)wewqf3QEl;+qh)@e%It}r#*8q zRy?aCKTHllDoG@T`qb*IoFUgC`hF24D8m;)dE$iY2u*ag8AsX3UyEs&M3A3C z$`SUw)hEi~GM(YO;AlBMP{Sr}p-Y(~ZD}bNxmLVq11Zzkkf7~>jbSBL!3(~2cZf$& z743X^?Ny@iJqa<)beaK9^7>VH2y$XJPt()mY6clBu6aD;2 z0Sz(MWl5|6$-Lq{g3lM%Es60qf$8=Dr}ZgNgP%-SIt|yYo(3;{2uFm&s7=8PDLCJZ z21~JNkxtnX5RY8wmw~D1F+OQfXV^X$YcAClJsaDn?cWuhZ+Hl?-`yop^9~_i$Uv9P zMG~>JFVnm`fQ` z9zFwsD(<#KK}C+jMU1z#Ttg<0RQ+}hgye5^6>e;N2JUSSRffxI`z6BWfX?(|TpRXW z0)KBIE|b+(0!^T!Db=GA@GWLdFnS#<6YNKhrEM>rlQyLd;N1%iUXQ9$l4hl_HodZE zs;ngBi#{WI81!MDEv&Q}AnYP;LtWFQD>db}0rhvT zu&!d!F1gaws;r`q8c{ihToF5)BFI-b~UMm5l52&vWcU| z;9hP6%d^zYua@M*dW*!Nw;ZXRO)7dLFo|6kokMS5%=)qH)7M? zaWJus+-1Hp!?SIUBM>52&1_~a8hu_zPy#chjbdIRHeMl!fPXuqn74^niA{-u%q4&k z2)E1fwU89>aa#cy(e{eS1XqeG_$4B_p<`8g23+8cA@LIU@0xxp12zl&C33x0K(4bA z5jX%jbw8tFKj5Y;2?uE9${y;F8eck7C<#If!h2f9D23IDZmrdAM9K=g8bk%p z7w5#?r_b!tz1&^ZwFPeqoz9~@Q3mY-Wl%#_*ekQ$cGKIYT%OnV>%2w1oD;(s4MZ{# z5NW;!YU$2%LIsMbVm0#y#dvW8oR z^t-F$Z`rAYicJ3d$YpD)v&HL8#S-PO*E^qW6y1Gq;U!+Ee_>ozNBc4DB*5-T}5s>Pe%v>^TGwe8%;x<=S#T=B8goY-e0<6_;`C>f36 zuNec}$84Sbgbmp!gy!MptMyYCw8O0ZyuZs0qTm(>~*;N7}xq z)89Le{1_Lzg%?|ObWWJIsNZ$F&6a<3vh$X2tyXvg$oq)4gH7>_@EQ3#3Dka(=#8dp z@a?%^DO0$vE$L1x2Kg_CtzP*uBmlxUc6>RBfOLL;AgR~({<{jk4eUTM_h*SZaj{Vn z(>7Xg3S_Osqd2aWYC}TVd?a9LFvVe7@jE#%o1QYImb$`?dI`z3o zW|gkXikziPL=rcp=TtbNdSb@Dx7kFIDv`~|^?Ywwjol>$6g{1}h(D^ivXRDEruG?a z8$fkll=Xh%q|p`2@H)%b{MY?@9bZa9tmGZ4s6D`jX7-ACFE{%;7P|9wEWh7bCwxdg*i zWHS*~!!`$}qo9X41}}+Q-y$&t+~1AYBB98W6=@fQ0NR4G(Yc$*1RZ)<;sSoru% z%3acZfT~;aV^jqiB^UT8?t^BF@~3Mz)a7`5oy=%UD8U%sU`)C+q`n&JkQyZwU?>BS z$Hh^4r%Y#F-$5JE0!^h?Vk+*8iDc26aBH?@OLBW9lMpHja>HkkJ`V?Fe~X;f%lI{_A+%v63qb zI~!MZdfwIC77>mU$L=Ak=ma_%vd|rpXC(^r1$pdbF+8doNW&k^%k1hiOA?B29g{4v zyoi4-?Bx>f=HkNJd+waM%=N09c0-*2GjqmuaNN|*NlR74QD`NR3?s$hY|N;8gD;-R0oXF1i&ag#VCA&aKhO z*UiJP0lm<-1P@GPeUV>o;KG=+NWBD~QocPKxy9o|#>=WeiN{IHErdO_Lt#a}hTGnC zV^)HdKA6v~$Y2FScdi@db+#pL~zX`l*zZ(EHmk z7H*F*r9Le%y^8xJHRRKGkK(N7*5Y)n%DoCo+bI$0dSA1KxPTjApOB7(97zvAi~(n%tf}c5Hn=S zAGT8y?qcWpv^eUCl0^H)t4S~ACFbPb3_LxkKj^E;|8m>9Ym`XKzyP1&6wxN_+m;t> zw0wu%i;Bec82gv2T||t^@I`HcRsnKEjK6hQxN+~Q7BIN##=72Y^}UraW7T~+!_pPU z232uqk~f7Jsna4m`T@KZW?JJ%gPnxi0+j~_m$`0kaC7Usd0L!)bDtW)em7b7 zMY#k|^*+indi@)SrFx6ET5L;j(O0kRbFPYXV6bA0SA)}dZG z)B7OBNZBsWc zboZw|eh@9X`_dr46P~t{!3S`YN{(vu7P0bUFKB$y;XYMst)V_wugbAYu+py$nJ6E@Fzu@sG$KeQQxW zduBmA^=iR`%}1842eOW?0DdSv7QPXL&4WmaH(#F3wVQ55O6V+M z>83()5Imr^o+fhTL81;@s?=LKQ1?h4qaPcfT|$;+hPAxnY?GI?i8a#6GQ4l>`3;k1 za_Dow_n1F=DwA7}kRr=!HjT&TbR~4C=!R|GDO^B=(y*&zsGW1@ zBP+>7^C`z{ds^l2vajncJ8bwYliNx>lzdbei-c@|@=DhXz$w-n!zX21lZS#>%5t3p znNsoiQ?N4&RsMsq!({u^a#@9H3Y)f3d*o|%nbm%$1Ct&qJvvoHF*ug)PC`ttq<@*) z!C)gU2}J+Nu>H+TaQ!6`pCFU~wGHbhxaIuGK<@WHTIz#9oN3mMrKdRBWRjL5&uR}& z3}*-aKlT)U$B&m?TeZw{!vW1`lF&^Nh4lF7+*2;sA|#JlYqI> zeeMDBE+8`is06<==bC6z7vaOqn7hXE5W8{Z((p^z{;kK(Zet{~TeH2VMM53^E#!`DO6JQZGz~{)pp6ZZcHvS0n{_>H z>?I01gNVOf(pUqjwP+JrgH7W4J9fm8JI0RN*g(4RRUJDOY`ts1H&X23qil{Qedn_$ zcoehKn#;OszN2s5>f?V}d4Hd}_%}HiL>FP7h zMC=BY<^XJaNy**|n}HGPgt$=Wjo$7gD*UoZC@|Jb0$HKwN(9cPv1dv8b% zQZ5^uc_OxU{E3+g-X+O1*)hGcX#JA!HWwl5e&tOY-zv9!TtTBPVUi2;7o-y=^3AEo z#q#$f=TvDl{lfaH`*JSx4IyDVq`$dxarmu*r&e=Oi>oBq1u2uG?T5I7SGy(Mn&&Gz z6OY_6&210EJU@`~E#lCqlugUt3~joe!9qz-h&SIm_3G0_PhGMnJ7npcaf(CUFA-b) zszmvad-f$2!F}k1+3QqWJ#SqbeEn>RS79@XUYwHe>Ys039kfvEMZQMy<(F2&h>gyC zTV^udOeo<=24(whg2y>EF|Lr=+YysCo6UG9ZbPzx;#MYY!}NI%wI+_3U3HiNceOpAZbSyGx`~6-hLwWyT5`(QiyV8 zP@u)Z)pI-0(Qlj6?jN5hF)FVpupe;Wk#S1%fGeFtVzdKiR*mq9Ru1LW(+FiY z5jUFbghRrHe<L^fJIfNILh3Z1tsA+`!N`s_xx7Qm#ImF$wzskJ-prq8Dx z+4~1~bMKGa|0WB$?jOX0=X-?`Uf|ri{+qQZ4GDFUAe!-Eu3-xU>P2o3Fxids6w0(E zKc6B&iS$X-7c}$;@M98(341>W<@peylpjQ{P7kp%B+HT$6cPPi*r79q=&SvGZ3LSw zl=yuz)WIbXg+)mu9N}+<9Lv~@d!xn!`bb&d4D|ir=`6H6>{U9odF56yY@<@9lW z_SQ$j@GR3m1{PN_#le2NT+Ig7-|3yfRuOr_KOQbp#%4k5SWGjh8a+IN*vw^Y9=lOv zAT679rQlNetX9WshvzXJWk=kaw;JJmyltE9^P=0w4(dOTZNIXo>6Y&wl zJUPc~`;Ecv-o}27&IY>1WBdi#*v`O<*NYJLvPn#8s7`JX`}Go}r(`t$A>w z%~+hWFWK0aXD5T{)Hs!QRHG^IcEK52>eY-2t7FIeXNfX>`F4-Tt&7#HbLAf}?R2Cc zq)pkT=4OPyEUgYLlW|^iLZa|vqr;ACdo*9E#2Jf-e0ziw37Q;8!Ke!eia<_5#%=I)ds9rDT)fg}WU8RwD{iB)0A_@hNIrAbi<5_>Bzb!PB9HztW{n(y($ zhNZo4^x_2Q%2^Y=A|GB7FCjW=bqM3{o{=YI&yk9mGd zPjj%1Nz9{4tr!4alB2Wk37zFnoA-@b?2Ny#aDDDT^T6AV15LddPv`kbGHmP8!_p7T zKd+4Mv;-pK`7Tu$$9yx%SmD@Wr$O_X-e3XJqVis|WlwR5-iSfdz(JX+)Tjqr=|gna z7TW$BuCA@S542I0eF~4r2-jZWUVIv=C7syjSP<9BJ}6UmvG}y}(SCAVi$ki*QsMh) z%~an{_D{1*?fhqnmALE2Hz_JcOG{d8Kk&sx?=^gT`->dwM2RhlMh*q77{E)rvuUe~ zdaFuVXiew6|6N}j(P1;>EtCM+H30Zg!Z-k57Zn0*m=ss*)SCQ-%^^Zh;LH}fNepHT zu!vs9rQ1O02Po#&x-#zO=f%nJ8LD&PLE_D?WK13xSbc9a{s5OC=j&` zh=HzSBkoXL!QRV{q#omnF}sq3K_s>b?r8MT?kUlyZN{aX>|^@qR@S?ot50(EE5hVl zXZGUlii4Wo8=w8LMi>fWcpG@HA?LUGy2qW1C;B5`)rVZCHq;l^6lRGW&sa-FRZ^W; zAR{Ta2?4x9k3GH57=^fHW@v(R7kI0@HG)l!SqV(VUmL7Fon6MZ@*8@8_-pgt*8OKB z@@rI6@p`jAuIy>(JWD)O_pLy7S0m*b>-(-i&|z}0uRhCXiu~Tr0ECsTyg6B1LuW4g z8?i!`69>8SXi#2fX^cvB7OS;aIk^q!ZCDSa8u`x#i^1c^%~sa56%@J1|;ls4A{wdcO8yf?Gv7Hht^K(b1lRzCE@@8_nm< zm&^s0+>x!7X!G#~LSeXJilT-HVFu*6N%l1vto zx?4_r1adfb0~lS(_U{80{-hoxUs_(cxd{$rCt}S78N{(76NJqWiY3WG~ z=CC%a{+#GNNm8tw0wb#!zpKA$A8)?1sLl_)DZp~nFK9Y@UAI-*U!;du4t#l_f?;wU zd@oP;;Hxw)+<$(AcI3U@g!RmQs zUV3$yw5wawv=HfB3ZuyN=te5 zh0uhC&L67}vw)ZLd_q&q0aSL8>sXN2#cj87l{_6Hmmh<7!RxP@;Y>a&OHUIqT(f(X zNG+@0rO?=QtApJkB}y$B1}VQ|=QO#|yJSq7+(2B(e-Jb`JS=o+jd(eg;lEV;XxQTm zddNm7eYnlVrf$>4O3eL1J70*pH5 zczDwR--C0uf-+Xtv1Zy1>Smqs6P>$n6AB+!xzit`_%UAIb=e~9l|+wSPqyBD<6%IaAi5oRIxLY5%9IO zIz3vnsQIb2hd!U!y>9hGcSx36%Wd$y3hQhu2K2PU&%GW9c6YVOK2{`UZ{DwcrJ=V< zUjDBqFWywtS7gg)V%XP`e7wxJ#=qYGKRoI`KKOsUBIpT6kq{aQtLpNlC#0}v;iA=( zKWJMU_WKC&Vq;Ek&GkUsNs?~M-@sY1uHT$e;0l|)m7Q`p+{{&K&EE33r9-9C=^Qub z4pA9diRJGGIj8j?D7}|tQvmA<#ktV!VV*85`>G=_AJy7XGd9?u%$WSx>3_V1vBTIr z&lRXEmU8cBEq2`(N;Pn4F{LV3tGJ0P``jq?kD-B}EIsI=`Ra}t?gg@$L{)`v@e4st zGCfho&aYUeyE*>jYuvHI{+v&=@xJ3L64(CYI7X(XR#jK+f3Vp-{t8Y$TGj)jq3KV zBuikT2N-91vrbyNaxN@dZ94SV0Bo|Yfpoa5+(R)(4wupQ>di^bBfItXOur#V**7OljIrd-Di4fNJzMVxtr2 zwys^~@1vE`n*8kn@R}Z=JJD-=Vf8eQKJ5T?-KSMB{~Go^-^typ*B!xfw^or<=(=mN zc~WkG;;sLI4rkgL?Vk7DLNFr*^1L%Nn2@IPwarcmyr2f1N)HmXv}A^@Qj~xCbcv^C zsuBTP{Px?C>)M4*)vDzcZXf!d{Sv9OVmu*FWUanx{XqmCutgVasKk@~4QZ9$+c$3;$%t|Nh)U{tE#|08t_kVC(Dht-_^xCq0nA^&9Q_u2#o%dc}VN`i;*% zx7Xyvj#&A71`m9Gzn>5oeeS@;Z(hIk8NjGNqaCyNMKWn^ zwBP!T$;>sZL<1>ZkLu5Ozu)?e1C^Hkx7$9fUMTNf3Mu7&=!d&Ho5_L4Hb>9jESov(d$6JJ$VvUj^$wdLxNaY zrf+NAD2h8^U9M$|X=iqXsipr~L=|3vJ5k)FM%O1HhQ7jDXewQQP6}hVQeDGV9f-R9 z#kX>rIo0>vz!7_xT|v zW7vsJo=M?aFaJ%xGMm4Lo7{*<$lm$rm>TzG5p$r5(#Mf=s;(HU3YSvR(aQ>IUP&4L zxPoo5iC>++TWAZR3Yg$aRt{rh$6a*s@3Y_qzu7S3U@mJHPDJ9pU>xhqc;N78=Nw<%ku0>$RIB*Cg{dpa`156zVoE;zrOu6 z+;$n$vDG?F^C=T=V2C)fdhs-4^m%N|p}_j#`!$nC@xjAqAqb{kb9c1JOw~~Hee8RB zzPi>r)5&_oX+NjJWu8B{YRtC*y`cLQxQ%r$cp@={aC&; z)BB!t#>9%b$*$6TY@yHX`rbnkMXikW`X$mWE+0QD*9xE3x~Vn-LCG#-ROij_Mne$` z#{253)Q5LjAI~=xdJG5nZoV0MGx!C0r{J0xs)^;l>GbQW-~pTE!^DwSocE*s`_tmA z!%6+1aZ);8xVcPl2jw@=Lr@U*P__}8QOK)l+ByrT;i{r`RAVC6*=f~yk~#Mv**g!@ zup4_^A-}glW71>&4m;!9{z+t({NwOwc-QIHH=ubzyUq3yrZxXs#_gBciO21`v*$6n zy|uSrR{Pa5h-j)Xd$H5rYG83#%JWR$^^gdq3l-IU!Szn(>L@gz7iKcQ{Y^!~>xQaF zey$zIif$=o)2BwA_hieOUkNpH%w0eA9~jr(nm5rsqv1s{TO0j1QKaA9Hru4$bN2g? zt#fkE9aJn4K6GW&-*n87zg@DeckebFiM`xxc}v+iEz<_V#_40uQ*I`*B0w7%Cj0OS zf-(G=6y8}0d0^wT75{@8z7rlzGeOp)iB+0d5Z`^5_}2;*^(7IUf@9t ze(xw@=-P4Sj15n zX+~&1@$q{L6s|{a?O^ZzuK{un00_bo5cf_KJP?p9VafgNj{uVpIw1@zJ%n?I?Yke} zkCulN3OnieBh3#A&KYYAw0AOjncgpj707CL{*>SM*=XD5UIvN=TJ>O{n>vC3eaqn` zQU?x3xJQT#%o2k_J@*NGH6NZ4~PoUgH^_uq86?J_?ejJb_C?#kaI0I|*MV%%fR>OpML>&9r37q1W*kE0EP zWQQl-3-LcL1**wu8`h9-`R&TLjX+T|;Xmr}FKIl)yWpR+cWW{-!Dx z>`XA_`!=1%$7$deB7Teg7$d(-v-Jn2^wZC6hmmw06UjcqbCV92?kAW&hV@aFq(Fxt6^-k#D|KRt%$?Jzx;+{{`D327K9Hi`+F9|^zki%-L_OJF4xC5Kf90@*)6 zee)G;r@wQT@|O(Q7E`=FaAln-7l`MFi%%*%essFpz7=Ie0QLg*3!$j4;D95qs#jtV z!Zn_d!<|D)LB2UKpA@uz=JZ9^nB=3CiB;H9_I;$%e1Unh1#ii2B&WQ{W+p!R1^uX~ zRFNVY9C`i^EeM&0o~tn@TW9+0sev33ewG7tVa%?ql-7%nc2uf4+smzCO0*UHI5Sx$ zpoyn0plJ;HWjU*aZN=n;y0K5&;>~(a67)aTiyZF_ay5Teb@b|eZka~mu=zqyL{#kI z@_WhGN0Wjr?R)2ZZD$c8=q^to6K38F%E#(dAT1@Lm z8+4HGBJR~ee$)%4H7keSdwrI(7T47DYs-N=?|QsF#kp4Sy>Moi*uqIs6hVWY5ib{= z|GcB!pW5L3wEf{&S%X!Q{GNxyZ|+uG|Pi-MXN>=KxrZNs|Tvxkyg#)LUJbw4Qmbm0xXLLcn`|J8q>?zbF z+VA$`PCqST=t<@ko4IvGOJKcL3R0!JaM1=Y^tAZni|0OE&8ytZBFIH}SJOz#vbcNY;sg_aK z6(5cg05tAfuR?7tUbQ#eobJ2M*a#QG3unoq=gK9C=uEe(V|!=8l#8{OGHW zEufr(NjHKJ=1lF@H?7^*ImZ4@+q#O60yexGfw2PnvV#}n%F_MC*KrS0`Nj9igb((= zul|3q4k6<0`0+Wa(0kvK_$QPdSRu?28Cgx&4p|zKh}gx^kz@|H4sx^nU(y6c48RP@ ziRJIM+hk z>p|2kjLI$ve4atxf=|F>7OAx+x;>>nhRTvWGHF>$;57B`|aSC@6}F-`zh3*$(| zf)n^Sh{$&rWR62Upj>GO8s$P@7621BJ8esX?^qDC-hfH0Woj!|UBWT8h8KhRJ9|#l z#hns*-YTXR0hgQ7iFQD|udpTREwF772k$jVn__mKG|QXmvk)|2KT{L3I^-pPGPxyJ zYhR&TUh-gP^m`}Yike{e9wq&pQ1V}EXT=OTN=-9uIV{~Di`zk8HrdWwOr^+k`%+|8yB?tYpc8ZCC4>y&)FGUfj9wEquN(5iK zY8zLiqc7!B&u3c%^JCQj3+BJu-(S`r;&I08=r9OndT*lcmiNQBEJ2eyXCg?iQg400|&(}(c7QzJI*p>UDs?`leq-f)x* z@$=L+0i0@8HwwEq**~c!35Nw53__4@AT^hj>%;(b6(n1obCsI{mnExY{yj(#W4 z#bS>d1ZLd!NLwLhy4hnK7%Go-#-z_1gwbbKC5Rg1w);968jN+usJwxQe(Ao?0m+IZ zb4EF#PU<c*X$sFwTRz3G`z?p-M>#+J6c+N>mW%trv zM)`DB*wKTxJrIn?P@T!B#((aT&{}T)k>^Aw1 za|NG85N{+)ems83XF}`NXU?b47094<4@QzOp5a%;I~4k*r~B1sSLJQo)c(-5H(?d|Rk-Nj$ni$NWH&!t;Zh6H55?~$}o2HSvxUy_b{)Vv= zGCP?^>hK**w+H$|3wDIZWgL6jl7=5E)5++aYP5Ye>N=-ja{|3otgH0;^8r zR$*M5CGIY27jKYqKxW`F6C)t>3`o?fh`(L1ge;gS)5^M32gYLO09j2}r?6xY%mI@! zh5ZKv=EcrU=4RD+mAY%Y@&EXb?ec$rf-rjdem6%=M)z|Lpv!9uIuqe4pZ(+aqoc1k z?gG;g0*=GW>ZK^t2D7K1QSKWyV#9n7os%B}-9TX=IpGuCe2FcD{fYyU;*hLOr?kFP zAqN3PmvU?&3zbc_Bs#nmHqXKxXhxOWwjHyq$J5n_vBuPaxN1+dxfC8Us0NxZ*Rv#U z+W{Mqs>y8Lo)Aza>H$Z*Z3RNmHNssOqS_qiFkIYrX(&)4<*XqbR^dA+yp zGPJtUoFInI)yj|vZJ771QVJdHx^d!P{eTYEgNRg}3CE(AUEl53CfGeomXl7*H$z3u+I!n00}}Ppj|DY6w|y*%b3hvnaIR_PG2-uCd|B(-^0|u`@|C zqUIhyJ|5njO-_%ode!s1+v!YYj1v90sX1A7*?M`zo8EL$F0(rLJh2%xM);$7qX>Zn zC7}s0lH?lX<0~^*fX)Wpcim7%6*Am{>(v<_&Xa|9(06J-Q@wNs58bk66vae0v{7!| zdDz^$o7_1AO={HnzZ)&=9yPjY=GlJ^i?o-t=yWQ^2?*=wCn;V=E}MH{hOl<{M;K6N zhYb>_uiz&m$gtfwTczKAL_wyaq+~>&oH}x||Z(iKwdUp2&Za4xPm9=IAo-AK6JuJDnTj zKV>s}Q^*;NgNG#plHqgh$Ge3ldfU$~e+zA*0p+$@gZ?7C?KM5KV5pKyj$>=jUDN!(wLzUv%YDju-{D|AbhcrEmt12|~> znHWRnmvj?u5{{bcsKNA~4DXC+nMShrRgBP4I#)AoitQMe&X50i%9$=T|8HA z4cighS^chF*IK?M1tw%&6^fXdlqd0y%)6Mk>_c^y8l`eEg?n&6%u%EeJPg|43Dkgp zbpktd*&Y{x6XXf-K5WTbsli&$N~Sbr zTv}?9-W-943AEidj;e~YH-F~1+G&{^Sy!TnoCqCB|KXJ@v#(*y-e{wv%j;3@kr6^p zwzyEMVet3gTMSHmGb~UOqItN9c-}rRCfd~~y&6=9-Tsc5AFu07r6%~Pw3LT=*TA-8 zbUxqv>)^>yrS{a6Z*L3jpIHWP#lXU$9`Y^DfY2pxT6n&Nu)@rM^#yd>X2FPYm9=ZC z{fi$eXCRM6-ny-R+WJhSj^~)!=6%3j*pHwl5h{_ym8!3vtunxNNzYoaGPJzAajh@E zWIE;ScbTK{e+-?h(+#g3U0V0wyD??6jkLg!fiaeaQmov@J}IgMRru%7aEj_;GU@Rf z9;bh_ki8vEFzhp>{zuxMg_u1xo$|x$Cay-uC)dCWNt{r&1{QcX#+#050D3EKxB96& zB6jG2UN%3`!a-40>qnN@(D7*V(R<_^{32I%K49D5bNfBNE8Y2dBO+4XsD6KClufKe zh~fdq2X9RLMsU>plkD=EwtcrIAFW8(bo4b{Yz>;=Sjqc~@=lj7<&&W+T{wmnc&VPP zvQUCCEBfIY$ zpKR0ZrfbYs_!mLi;}pa<%k+MKdwy_M{OZ6%oS=zcv>hB@fgVXCz5S(#_6ylwQpz$$*(`o zFWLWnzf}b7v1$gD;xHAB(Xj$_(hE53N8oY!Grdw+0L$t0p?l$dL2oR?81+F8q4qoa zBE#{9eMVOECrdIstdpc$CBCkP?iN>g@1$~Sh*cxbX)a~=IQX0Kj-p&89NbH49={14?ztAlJ8N<{ZGj_rnV8J?1=nud|L6HS_+puBlAQCECBz&sXt!_xiIQ&%y(wjkhkfAPMu*NN*Y z_ZLf5hIB&g?cHRrdo5ApE;zk7bYalU%5wYH|E8J!F94nYADj{Y3xf9lh1Ckt@Vq_R z*{psL_9Ae=@To5I1Me$?M3e&3+NT)@%GFk;pZ0bkM6wzD?OeMlzj}j5U}qp)`%pLH zB)+-PD@M!Spw1prxGA)J=l?Bz@89uagbI*$12lC4q_Dy{JYdj(sLpRQ(SXYfh*l0_ z;TSDX)OL=~=R*>hXbqY3l@*PVoxxbV*tcPh4ij*qQo|QNC75`D`kb$S^H>Twiw&GMhVpo(^P!uPZQp&Pxj?{R6QlX{i z(*6FUj`m?)`AdbW*kq?=D-`P|R62-EgWI29CAyl*ArC!3|!s? zinFt=l|k<->!F`2v}x~I_dYvYe$8<#DmKJ>mN)Q;4*3b2{%p6jq5}xvk1dX_|u#(eCxOlxt5z zwJe_`53;ly)yN;Sj5{PeKVCRW5~@sKS#i@--MiLLS*|_E@FH2FKi-;b%}p#1FPxZ) zJm)ZMq#-+>D?@5v;C^djq(}xn<=DD2jafR-3{#Ee1*N{}-na&-4k6+6ZPPQtjGlsXJ((4umE0YM@-* zD~mOHC0d8>9CxD^>NhT2u**c+98SI%eC;yVuF_x<$B(1d0#(2AMVXXfo6bJ|HbPx~ zd+CuowIP(lP!Ps;de@K_bFL*ajwhuR)_ST3V}%*ly~5OK=V>S=7!4box?#`5{mrlu zc9|SX*?p_gjIi?V+}4PlJ;7D<=nZqbI$dq+HCIApe(LCD>1EapK4F zh)<7uAKQj~8rSv>_D-l%pco;*8^iB|pT~B?&##)3Y(`bp$6GZ(3r98SdA@P1Ut6Q~ zE2bvdDqZ=rF(Z^#8x8koYiq8gB)ROpV0!M^+oj)uK&g<<|-g@a&gpbZQ`8i&JdBXy)O(TPMY@h#W*a&_b8 zzrQs=l zXU1NL=)C2pl%OIZnjxV?v_=hzyaUj!a^1 zM1Bpa#kQQ;A{)y6P-x&Cn2opsvR5&T8e`yjH7aaNzk%8!(9!#LAoLC9!jSh`EW*6@ zv-;@s9=+6v`)Q|@AAOO1Whb5T*B{K10iTv*^`}{%JN`UnotrXT9FdC}zc|!pk$k~a z-O025W?V&H;_&snvT4Q#ri1qCvx-CEwpY@IO63*AwFXB5O-(i_O~JCOfvhs7Pdwt! z@(E`a818xc8Y44=B^6*!F=xYEm_kO?UtJCmJUx6CSJpBee}0v`?gf|UXpeI28FN-6 zlL}v!x?Ua9xjl8cbkpWVkWTn*Reyb%nd>Dd&vx34I`=7xy4+o!zw|LN(Ou2O&|@~f zfYyA+w>T_)<1M#Ts)fE*xT~m6$;jQ7V-akNpFhji0~|9awi)IiWY4~W5oaEiQG)E5 zVQpzZ-{VLru|f`SI98hCQ{kamELWPh(5Uyh0SMt`-#*hasPPqHHl&uIG@0l~4EWB3BQ3Dhl=J<>JQIcTSibzn-MnYy@Uo%L`r(bvmYp!s~;IbHLP zrK3?FKAqaZ;Uew}Jw+V-O*Vl})kH3>vgOg9cFa_chyW{xX+nsAYhexzeM@93y-%38 zA*Iy}5chJ%#OxfFpAIj;Pjk~SUICx=VhqbTXPLW38l62h;2M0q9|Tt+@_AsD>d7#m zQI>=m{9R<%4xwk1!x)4YU2QLvXpCF`%V=O1E4gSKuDa$U{9Q6hJXqm{E~22*r|7%( zQaCTMaN<&ktFFhkrICNaazXLW&j56gT|eKABB&_@hJ87tK;VcW+8bTgfDPD5)DM^- z&aGkXxm=p@#K3O%c*bh?AVt}5rmqN1VBKdrw4{bDy2ep^i!rLrgd2t~!jE^X#6#L7 zmJ*U&&f80&!6s1DoNO~dOBL~`#Y=L|(^vN1-b@2lu;N_;Z|mY$s`czm?z$gm^x6VZ zhoy+hI!Dvw*N|36s&>Hmx^*BQ7o1Q}|KNRX1QxP>yuC|oUFESFpYg85-4kFBC1=C5<}ut7PyBv_3*o7+hxX_vM={8SQB2fU zO&V^yJ5!6Kappicy-b(5>LkV6M)nIQ|UGs0lEX%VJfcT%CKH zkF(K0PFwSqX*`qtUeil+dndh#jo6&gozk65xnMqsX-^C%y z6n?R-I||COYn~#KVmQ|7VtKJvVe&ooR%LkrUOLi?aX<1kHYdvc2Ju8xm1}27-^nKo z9lBGK6xn8|dDk?eI`YWm?TLBp1E;Y1)X9tQzmfd16Ygm@+B)i_7w13g^i$oaF3-nP zd;$+(?!nyYZuZEiQXSk6L~7qr$3nI+_QO_#Um)NUDL-$JCscCOT;6WOFAIvQGbPr9 zi?)5<^&JazOouvHUu?b4+DRm+q^bIyXM)+&5I?S)U$%}uM0?w=)uT^sYB+^Sb< zo6&%M0MNf>#KJcKKDKEC$T|!dq`yI$fmvkAZVfRlm3l9iiNm9gTL`uYeRn^S7V)YV%jSnS1``AKU&4JSKhVf=)0mKjBZknedMc z>rvw`PG?xF1Z9;CF8h^sJ$XyJzKHaF&Mv>)Heh9go2AnUtF2p1rq+MseE}xa;&tN@ zq*}(x3uTUjpIBSG+9LcQZ=&(QW3QhnyZza%w@xdv+iN;|ao3FgH#q!+9nW)UYmFo3Hv@Lr}iX@DHZ94`l5SJV~use)l_JdUgY#G6))?V z<1kJ!j`Z6*K~_|^aSYBF_*NKl4QJzB_x%pEgH;a#O$2sqi0HyTh-#pZ{6jlA@1PDrC6E_HrkbC$IWOx$PpMWQ^~FwDQktHfiD{7 zN_pqrD^lp<&O$G>C_wYd1L)$gu1Tp!>Rp3_=Uz%$#%x9Wr&#_k4Tt|I?C=l1g#TTc zy<>J|0yb^V>!Va6mM*a`9eEppe2S=A=~9ati&Y(}^pM#O;pjplKWab5zEma!Zh6&| zHuIgojZ}Z|wyIGH=!4@K_Y~SrGH@J`^6B{QOzYG39jI#YB|Z9{?2-Jb)p_gz$cCmS z_}%X6@a>*g-tzHXWDr!|$X01bJPbVI==2Bdi~Rp2OaC|A{y!e;{2wSa@r$BtDc&Sc z47mq8hO5da!eYr@rPX6;rup=iT_=E9`RjUBR~euhnQfShUTrHsucKZG*>|K}txS9+ zU3rZUr|^rfCXjQ4Nad>85g?Eu?Cjx|HRISi5FG}eGlu+`IJ439l|3(Vbh3xmfMM9~ zx_3}G&Eb&V%VGn%kRZmA(D+q>>~q=6h$0x(L7gvxZd|*s;kINRZa9?ob2cp6ecfby zak0El^t45>_`YhIxe&Tv(HD2xeKXJ@7m5T3I*?QXjmI|{Bd}WW#V6&QN03Q&P&iM6 zqY04TXH7%{quNd95ua9hP@%22C=taf^~(l{Ok|s*G*g8Z{R!`+`jb0lONaZ0&E&`D z8=TK6X~?|~mzU3NDV1NG$txXsbFpUyUdgD?)d?#Nx|zfn3LeM)%py#t&!Sk1)1rea zhad7pe$QRFAfHoQDw*)|a*o{C+_)4_a4`ycWKe(K#jDxmwPtaCr$>XrQt)E%K2C|G z#>d_4#ikA!Sl7?47Nc>08<*bbysxU>gQeF-pT=*@vtifXriMTNaCS|VCP6>ZIF#Dr za{>91^6@CI{A#E(-6sN7gk*t#hzr!x@+0<#lefLI{ zC1m%5*0e`$|2F9}P1uN5ez^g+qU3@<&nuc+$D%_Nhm31G{|GT>U}n zqPq6(B@s+$`N4jCi_FCnZvw5XTY0QE_`cvGTQy zX-}XvNwfi}jiIu(yLMl7t8ATkf>(`aK2wyEdNe$?Re1l+!j^CJB07RvkBqW3h@wrPtd=X1*09Wu|W+ z0+w1@G6HHwN3y{*lfg8FBNolF1MVd&(%AQe>bsIY?J#gBo!SkMAbl?fMmO0WdT-!X z>Q&Op2?o9sw;#yU8F~eK9TDuxr{RUa+jT6#Udb_TpL*2_+t&7~-#x|b6GK4IG1Jvr zF|4fcj!3rkqjc_~^R>bY4%jWVW}A)>|J4i`aS>e&ocrD@FB=N{EH-Ie_kt0(EuLKte!eX{^PMzgum) zBWn75k65bL+b^bnTz9I7?TDVa^YiG!4qT}Vnrvp%f-D}%RMFzDYbYXH6DO=fr$>!p zPs@E=qOwo2;;Umo4)xfu5ciD`IL_6C1i>MZf@?dAJ&~@5mgN`?2)=IpvYW&bhEZ zV5tackfJrh7}p2JI~=>2N-T(rU#<~Ed@^nkqTyaF!!JTlr|7z2F8*&ghu=X@3dbFn zHwA0fYf4^<2_lk@OudFhQmXkmGU03iG@L$nnfv$eN=t#2#5@)lX3|8mAmVpnL`qC2 z!W(~(lSjMbIav0Wd)HUs5??+QeUvqnes!my%OW90_Oy}dV2njldgvj$x^ILJCOPR| za&HEpi8>u*xkvg8sCSi*wEn-kf0N!hjoI`=?>CbqePF=;6uZsgW z^9EMC(ODi#o;M|(&c?Od(|siK66e14c+r-CfzF}tJc*iX_nxQ&@!EY8rcaKfknUbG z<4veC+SpZneo8ov{X6~Fw)%pD;jzlPsaCR`9ln|i0ugJOfxzSL2LaE-)J(E>&=eLO z*ebk%TIPJK2;@$yuW?XSt1+35Ka$K;d#;l9HL$0^pcEZm=Jfgc)vxE>9(*5*D!SG2 zx^<3e(-q#MN&_jTQy=jS z{GqAJDg$MclLJ*DSrr z%2|p)f-_Uo8~-kk_cerwR19FnEm$oV5N7RP;rMui-OLUq7}6G$e*unrMBcbxp=PfS~^^Of_t<`x}; zC4lOhl(XPCU!vGkcUr$%UeRAF&^OWJ)4C_E@Xnd_Gec3a7m3NIa+mFI)#u*LkV{HQ z@4O!p7hA+R!L25^`aY0F9>i!t<+NAI>pJ`i>LY8Oiv>;ZGE+ZE;$AeLE}e*scyvIe z#sp604d&HpuJ!`agFJ_hagC*=cNfF?CI9j|J)BB@mNxsXJhE7-P%R?o@U;KOf&Q7J z`nEB3Z73mgW=NNDPU`CA98|w*e?K!0#SsS5OI6bY(D8w#y@l9zN^MGR@dEB$WG7x^ z5(=Yrcb+O$TbIBdSlH_Or1=xQpX4%^}DjmZRl^-mvSC zxyTC{sMK}56rUw>Fr%<(K;VVZ2vglBXL`_7%d8k_V;V|3Ws1T*?l$lKY~|i$Xls>^ z3(jD}nIY}g8>#nlK}DTIiolrA1|-pemzlV*b*^})qX<*~Wj;EL=y;MbTj){>;W{M6 z%H2g-$tB;@y3#fJ@kQ=-$w@$+(L+p2g#2~F_$b3z&NOc7IN2Kze*e$LQUWIs5@n>b zUO|_bJE&^ih@Ial!dMZ`O~%q(c(W%>FhDE;c&aS3t|wekM~Y6pDt1r(i#$(0(2#fA zc0qD?owHM^+s2q(Zg}PhAFR#hDF(&9iEGP6)M=22sOQnsU`sO$R>=4+KNRHJDg1}O zpmCdB(u>1BrM6eLx8=G~q%r*2SA1Njaf0n>rqN8|`$nt0mh3j?Vs}mJDSkd48!m#e z1^$*3wDPCevltm>-V>$>EwaN$$epLiK*nOuV$vdB(4_?|qWfxEs_Gre;_^dVX8z?_ z;l&ZR3&{3##d-R99pr!W&x|MjZw4IkE35-R1uK#xa)lKENx%I*Z37k4O7~-2p@T1p za~?28U8}e~$}?CdHg|hj1qHovB@saHm|2H+WQaWPFj%QvuROwMm%a>uI$Eoej!lz_AL)jZxNB@!=Wwzmm=Vbudk20 zkKWfw2ANDIs&w}CbrcG=v^1X@GSOYV-h1?Br(|_PudJee)ThP6hhD=v=Qv`{yaPUf zf{rW2b|4sr#@~4tutQl&DI0A(dG6b0*nS`yszBU3bI9I96Sb$zXds942eU6nz*i5L zO7!p~(K36wa(4mB-nvA&GlQZpF7UjIlpo%1R`?uv1*m9Sj{f!v$XziquXa8>bMkFQ zcBWnRcZ+1ZDqs~5WMuc-D&U1u?V4fjOW@FYkkfZ5sw8 z=EX_cU%S%$Lk5;Ykp@*`HT-F8YhbTpt(_XhG4wy)z}#I@Fq1|eJK~K9rO6!5C$33j zu7uwo7TH;B8TXDjJcP1-yI9tc;O}7@Vt7s7?J}S#^QYgd>>jz9k9KlzYF9HbH%7Ya z_*Z$HWuM-7sB6&eco=#EAP5Y%_IE9yfpalVZDb^uM-l2;wK%3#Pd!%z(I^APNUu;# zvvV=hyT=O=-YWGY)ndY(Y2v@DE~fRwe4|=Kb&ttfE3AnCnIh=7?un)#pA_UjYqy;1 zv$-p%ZPUgN7_~N>)PrKR@;#i$UbBaeA{ou^W8|^d&w%K77<2{XEie9ex5hn z@2J=UU1JI$s~;L(eJYKh;U8;=Jw5+8+P8S%!5^E;<8kYJn-dn`W|T4ZIo^r1e2!=$ zT`2Nu5L+WIpRF_R`efsZM;45mv*slBnNLOL<;jp!KOAm$O`o0*KFX9WHVUub0p52OjidA*L9TJUO5Lk7SI^5)u$2tNZr~vQ2qT*3 z^ne1LNd1nV*^s^ms^Q(X=w7Q+6k*`@hd8JK-IZMmkPKX$E84!maZAf;VBeUZpK08k zXz+gC8MEXVG+j~Svvd|+luE%1TjOx*UqRYEGNgX0o0e^apylt{l;2d#YM$Dl0*O6W z*1=(yr>GaWn)oHx>B})Pbi^K^XDA_Bqf@UE&J$4cU-%w1*V=lY>WFQ>d|Nr(#nHlI zZ3oSjBMEZNowks#d}&t$Lj>5oD)pX=m+tOF#Nk97FUsGzV_&L9o2p}MTxs6wWYihn+}4GBifI3kIxM=AZQ)Vf7Iwi;TRa$JEqz>q66Z zgX;+K4_1$TXBQscxzR(cO5L{wYqw3&-2HMZwuOs3f^1YOi<>M!Z`-?y`{O% z>Y7uhT1bz&9{%vnAJ|z%JjK6i!~O%;_G-&Dyry^sNbms++6M+6Z|6F3*3gBz5|dIj z|M`4_yK_}EeV{#&+81IpRGI^~)Fej|2gIlJntt)+dT5F0uLKT8bkg7X>y?R!JZ~TQ z#fSepc=Xi3;Kz)&Oh ztU3>?9>X(Mr;&?Qu5Z75higNl)Oc}7wle^JbeMC64m=wgtp<&uR%y;%ehU&XqB8}> ziUUKx(Df*3kQl&tb(!?pa{4H*m0vjZPK9qLjj5h_gdME@^;Rc&S);JX@8NK89FF#x z8fW%6JQy8i>7bZ2o7bSAsy{}0mTrg(m!%WF03kL#;If`g%_ri*@M1vGMSp0m1y1Ua z%!`~Z6;)oj!!%8&qr?}spfQy-6K{kdY;nRk*4#m9DcHKA%;3Us+O7Amvij8uXjq;1 zE=TT(D|P)@ps<;`&(t6Baq$=F{q$2CYBsrTB{@=V zk+@-XINRIkL4MA3e>x_0^h$_>{omY6K=J;tGwT8qxHU?rcEySi11pz=oTr;q?AuVT z_qxQqEKn6y47M}k{KSGtNAvPaU({=-3j+}qDK|g;xS7tDsJCn%Z;kU$It7BH#@*J1 zEWz99=duGB!)YsqU8P*vy+$~UOwXSgC5EH3>}nnS}vkh$Y7PQ084EKnD)*hZmh2Py#(U>sS^y7j+s}>y2qy- zuxH9Bor3W%R58Z66Ho9dr<9$a_;xr1qAz3t1&(WzXOiDl1~)iWSfzg5x7-5wbKdPr zy_+FF=Ejrt=G%)L6wM>&cI`!^`waES0D9|zh~Z|@;`oCcer^F=m7_>{Jv(51@QQ!& zm72P@g3fK^I%4ma2r44z;78cho{YyeRJyKeo6#>m%RXOAT=Jb$Lmim({B@KuXK~%Y zAFfbBhz-w0Y;9Z|=EyGvtdl-$;018R4c=b401BX~6oFGoW9cLjHooQBfs8>MZoanD z%%I(V{JSG!s8zu&;IEkdwkjjmo+-x;^*Ku8_WPl=)-hO=tx8LA$4yYH*ICOnTdWt( zjLki|kiG|`N>_yWxud4;n6KCM5X7H@GF(6&R;=SlgXw}^L!K(;47B9r`ZxtFq0r(- z%!kCHp3r>AkLL42P3xiYB&siXoLu>!*wM;I2+B%rdzS5h8_sZ)#){@&YiaZxR|@%h zbKoZ+%;Y0EkBV^17ff(J^p->Nz$wqK2~@EdcoW|9#;!owbX^5w?J!kB8@ z*k63^?7VK{uOT#(#{*+RBJ&1ER^o!Gf9~U(_5ok&2I5KT5hOqOb8wy}6fymlh-3P- zZJF9bmwxlsO{d|5nmW4|tRzgmKgZ;;^;MA!UXynR*{(JoF5vTZ(UJGWd+R7ve6Jg*Cam+&>uO<6RUK;AB13yCbGx zYHC2xE^uuo&qY~Z1|}MO7HsV62CJ#-W7@L48U#q2$P*Btk3NC`t#oGtu2Pm3(SHd3xQICLzZ$wGvz zymOx(9HbnHShFhZp$wh#VK{~W-u`p22z7vJ=ZEaHO5_R0@4@gx;=9HI9Kq)9Zg-n7 zrEJ^?K#4c|FcCL#%1590=1nx{LYV1YA^L=hLP7VL!lS_4N zbX8t$t_1(teKz!sX-Vtc;v|Z5e$LmPZlu#%Wd zqpcE@w}!lm6BR%~9~DV9(zi@LJ2B5V4m)P0f1)Ky=E$7C@&dhbQ9GoYOe!|RWJi2s z7hxmipwwMb1;DXBbkj+Kke-(Ci|@VmdL`Uvgg{MdRfLK`4+KHB2dt+~X5#GtcZHUb zbhA;7yFmx;2}6n9Q>U4I-J$&^HILdOebLKhu(6uY8I2GX)M^J4&y~XR|3H)?vVW>7tPkhl#Y5_XL z{?UD(w5!RkkrZO%uC7H)k8gIb_=dCHmaYOHV@$IDmQj6!T1|9^6`o?6_(mUfA(V7| zwFm@Rdvb}p>PTT{XupRA>NJ<#J-de~Z;OQW?@oB=xRVb`>a zC*9ZFLCw6%=I2I(&D*+MyxK>K2!dLKs8k@%WC<#Crp*W7T8^re^QdJmUv*Lo5+%{$um)uO|y+Y;r>uWw=VgH5K`zrzlJILHX!Z-aZGmH{eX~*OgcQ3pL4+gUr+^A{^8DKZo5n#8`hpYE;70^)txj z8obaYrsCJDX8bkj$1X7%;)2rel{RA$!p~z@7Bje+ds>;q;cwaEwMF8Cs;PRP;QV{T zQT1<3(>Xd$55Lfeef$w9$IaJOPPQT6+C$5YS-+-!^1_nRn>!3Ea{%w1V zBeO(&406*j5jP&KUO+^`4~)M4ONUsDmU@qs7(Hw>STp_hHS*Lv;{JQSfBo#>+xDLC z0N;++u%X8nkWr}py4#rLG(sF`>{>C(5~t%MD2F&oWoU(NxG?D$TR$1-cc9~u_#0#D zO&>-Rt{E5Wyf9O!OOYzJ4Y*B4?)r4rxib`4qtIDbmm-rqXySdVLbZcuyUZB|_tO=N z&+EJ6uKzj4(JQD%s=g}z^1-EgM4kObGu60M%3(P7lD`Y8&iTPbq{z(6aD)BUBEm4w z$c{nJtfY0L(jzPT{tAqtsTNtzw_9fjMP;KzQ|R{b8j95v6(c{##nfA-I!0=bNJ%t# z7g@jzb}ENXla_wZi0%Hx2jvaZ^5Y-%AzAOhD*4ZEEtY!G8q`!(Tp8O&SM2OtjA`Z1 zRa4Dz5i+OJg3`@Luri)~L5#UBt)Cvq?Az{*yxc-`BaxAHoOZiAa=yVbx`XPXWY%pS z#_|UHQ>ipG+`9D>Ub%+lmzfjVg-UZ`GL2L$%2ElJkyFD#7Py~3Wjy*q3rdC74s$`C zLCuF%;5diNX&*6{xJlSE->1}?Dk^H4n0TU!%Nxc@=P$m9;mO4-uG54_Qs5yE@_oaA z0Vf~2%EcPoZcm$=U=U{y~njEkhHVkL~TeT&WiSwY0BdIn^ zIMJf2vHJNAQo%n~!aE>I=3S8FfY%R#*+=WFGcO~b{T}L6JZ$4tVnkeY6k>Q6GNn2g z(xG4DlXKm>sg&*pKqNi%iP8QJGN$4?YKOgQ1$qMcLd(H9>5A>-T}Y(`E&`F!U4L>j z!lxqHfAHF}E-*^6l$a;OcLO)+@GA5Wu|sAz(^oN}F8~x&w-b7AJ{>$E_Sp?}u-U1^ zBSA{=*fTy}EP|Txi?6o)_f~j=tH8ZSktg00?p8VHaG;`cc*tn@-UqR8i zPoEx$T5dN~;MuAj7fo`0LY@<(P);@dBPO+{2EdN-)F7mBI*R%GvJo@Rj17dJN*8h9j!3GbhZfj;x zp4aT+MiHF8`ffrlv?Cg&=SW>?JL6>Jm;r@4?%0bEMSW1>o90#|2PB+(mS<|7ZVeE} zv`e-d@`$q363WE~#o9jGo+RVPD$aUX#AEaX6|EWrWAO4v9u+Od%SvZDoxu#6v({5N zjd{a{fbsG%txNLz&}`a*X`f6Y8_xUqi*MU6KIawwc@fBZ=;OzAby7VuzY`|maI35d z=jsvd8>pIHdRxF=Mc|LRvjXvpy+ZHYZ|*UlWqSLF+0SvaPkuO;B}Fu$O-uA9Kqc6d zW~{0h|HMq2o_&_i-af9oOw%fp%5^?LllKs!e~-jhdzE>bzcGQ|s(V}Q|1iC!0k7Vu z;3X8tF=KYKKr`{_OOD$+Za9wm3cH`w>_=AHga%PZUe%6<*PK+a{UL${qK`;_<#PN?zVq-7yru-x4`c~ z92(#$jSu5nz`Ve_iR1Y|Wwv9%4dtW|UF|$)_ZrYayV^Fj(s%6n_A+h_)IbVPpV{Dt z1md=O_}(R4M*n6c@63^f5s)vHdq3_5S%RYqhd`sfS2|Lv$=jf$WLns5DZS-;&d%GJ z?H@g=_(H4xU&u!P`alq$NE`(WJsCEoQ14@^bTXBeiS0Q@x!Da!MG;vJaa5c}N;s5g zB{*FJRW`k2_lCsZdC+{|^#BVG?;M?)YeH-LLdB#v_u{s0xzrrNA__@Ljcw-6<} zX;yJL+o7Q(j)5j_=>k?3)ps4Q+OO?;!lc$}H`dG7-4*jRDxQyl2TI9cp2Wl|8I0`L z4-h=iJ2=Rpo17MrMo79f=LD?FecI>cKY}xDs$0ltiHba(WUwUQ)*|(Jf0CBQBYwD> znZVms#3i0xYp1Z4@Z(^_UGyE-M^kAt0I`R2oQ92mcv{uSeZ*xhXKNj-Q0hJC+1w%d zg)-)l-n)=;<<-Y~UHM04vM1Qr9fB zcuQ~LSkUG018^CUO34a_bXnhFb!Lru#rV;H9tqAF=0tM5hTRGG)6Y5fuphlguu1l! z#93q#>g@-yy9Zns7HMxu_n~T*Q~UA!HzaHKlWA#jRH%`@;Sum{^(UTRZgPxi$?+Ibfn1Z@ z8eAkuWOhF4NV-!oJ5zzsB;O%_Lr4_7XuaAS@y%3*v3{xW3}!Rf%8=1!B;#C%Yp+-B zv+7%ZzyKZ8@HC>134px?o&yRZDhTUK{>A9J1M&hnSfEN?G=tB`vb; z6TsCka}(BnHzhNN;y60p586t@(aal5ilIwJ3jKjUJ{inEY^-q0NAA{hFX%`h8F!Y6 z?glwoZ0jbY@+fnufLy2(`X%k<;v7Ee0Dl~#F9eHpw| z>#1)9weeL2T~;SIb?u@sX+ataQXfxVKq7EqCI@G=6Xy1aR%}IErj3XkqRkRT0@F!M z`j0+vyrjmit;=+&cXWg+s_-UqPZDO1IItVB1sX6n7k$mbk;Urs~!O8n6n)Y6p&cymqgKJR}TKQWZUz)>h2Ko@V%HV2Zes}9i_vCRPQ{#(9?cp-NE1_gqxMG*>362p z9c|e@_2EbmLeoXGyQ`9ZLEA=ENb1QbQxKiZQ-*)+`3|Tljz;J9xi2Yv`s@%aX=y+U z4F*p^#k%Yf^mrtqo5m3Q$@gc+^z+I)s85&`;m-QbQb{<>6v@e0>Q&SQAJpVQ@k~U9 zdMhH}M!L7j@7OD}WKnO{4O3&K(V>I@_S?^!Wkj`=PLZIEw1)HVRi{RJKNgdpJ`O#e zuoigRHjjJ}`+g=?#N!*&O4N#Dr#!_ovozz*G1o%~qJ$J=}Wz*%zPr{l>VOvHYnH$2~kh!Cn3tV7}n zHL*`u>Jlo0fxuxZE95hByM^)ZP)8EVtQ1!^scOVAZ9#A2AZXa%W_KU(bUsNwkbLr; zh=NmrRdyQ_Bj$0l`1^2WPLf_oiIVd?BRWxHfbyi1i!J*SI+5+9YwGbhH5%)hxt+`Ks#1|bF#>yBTIzzt2B`Y zb&21DXQ`IVm(jS^qd5V_0qWFGd9xorH7hPP2A4_C@N^BDZ@uK97-kq-1?|58uNM zxg&YEpY4qQ?G+#YsXhP9OW~&>Dk<|_kS41MR%c*GR#=H}@bmDCuoZ)=Hw+jra`VV# z3)z)`1?=x%N^^v*TFAxCoT!-hJm2-FL)&+hQf93-U9X-m}7;lF*JCM*M z!11SBB_b+%I7);n2aY(=c~E7bYDB3DAV~v}ef{Z{QwU zMgx^zvsqDEhU5WDnXbKvSz!`6*hQG0F+3)WAuY5k+fjIE!&cc$^j;5V})5B<)6 zu)W8S6#w+`#7CnmqJ;5MNuNM^6*fCKgLIWHzgvi#eFh^SxFC1od3(!uxcc|6FG6> z!B4)!bd*-^nOjf4Z7V;CS3bt-eUO$f+nf5xr*Ci^Vm+FeAHVudhGT@}Pv|OA0k2lA zpJdkcu&UxgpQR>mt}=b|<&)*uRt&7XNTQ0~mqH-WS40ygk8&ULG-02GwFrKcE}bBT z3$v*DV#(MK^~7_~0XeFWWBo4X<&phE0c!X6diTy`-h6u1e&gs$%Ts059n$T_UQjy^?kH)sSqha-t$d6MBdDm!Ff{|%lYN5AGXN1UllA4H}te)rSS2APoM znxDI|2=PiFRPTIA%@r5x(9jTko|CKj!T&-|LB+>ofv?wtqD>`_NUDPw>bZXZd|kf~ z+eazzi_G#hy&E^BWfgB2Uhgq~%h5i_J8VAbJQL=$dZ#~QbAWpvO|8K|7l1|MUO6`+ zB+YsP;fYLn>vOgoNw{2#u#<;04$~Q*86`xc91QamwcbXByMJGzeJW*G98h_9P(PiM zAL3=b+4L`rs|5V#pL`X7cs5#3T}m?I$A2b@f)v4+rrTM9s^jIuZ7ZwTI&rk*2csH~ za<;;cs)3;n!&PL+&QGYt*-?;{&j#m5(`kw)0FDITan*p;UIlYcPrIVRBWEXIlTM#l{uXtV~0M*5n=B7xbl;%mwlYx#w{Dit*6G72YsZU0!zGe{}ZJ-H6h^S0D*e zmpFSta=3L60oee7SBF+%@upBwR?UJK+mK6P5Z@j2_M7&V8?2yBXb@lvyzGd~(1P?9 zx>uA9`+R>uRl1rlMZudVKyFl4SGC~70Y{Q`wm%NR+X zMIRzi?^5=JdxJ}YR3x&5H$;Stt&&H%44_;T$53~mqvuIb4J()eM&#X*W2kb#pRKlgBd1#sKl9xwq<{8`l!+H9N5V(M@z_ zmE>MUIr5R>pL`hss+J|C6R%V}zt3klJO@;*S8boF&e_DS*gpEB5vk_7p&k*0mIA2O zY4F&_K{g||IL7+KEdn~k2!X)Txj;yl_L0SthVn)&nkMV6J%^@f|oHjWR>t*YcK`W;-@=b@)=s}rRNQSnws zeFsNekGOiES^vw@$xG+0coR`pf$G+y9DZ;Q!T670luEA40W@zxYB`;FlvC@_PTbAw zBDsxDC?gi+bpG^V(94`PGWzCRwo$#p4SrYO4*oQ74W*Z_Y>Q4l=DP97{ax z3S^O0=ZQyh%?e5`dmnk|9OmRy@tqmcZXk{8EO?XmlMl>@H))SrIdxg|XC+LWbvh{i z%jY#R1sK@z^|4J}4-V05TIKzf#9)JW8R>t~P(9+Mj6m*yCWp>tVC@+Yd6%!_lF(C997dDvPBX<$>PXp?3O$ifD0P zDEoZj%l^5Yq34}_vaAzk4zU{5SSH}Zj{|uy3boR?EQ%?}$|l-!V=3?JNnsqXVPj=v$j zWC)#_bfDmMQ=yxToQU;{QmC$sGcc>ZQRH^SA%G@Sg?Rn$8}9ONmQO=w$0kXaCbKB# z=ASjZC>v`1=G(u1o&C2>GwT$^RTzLUf`zil^y7DGh*<4>J{0zi$wrr@SG| zws7?ty%q8YnrMQe_HsGL5lly+KXq_VOmMXL*#jWD(ST9uJ`i}U(8K7a81$hA6}BAy zqoRDuiDO+FagceF+>PfQm}4Jqw4b^2@!OL)cMHCV?7!W%A{s2HP*{NJeGL;&*#aQ) z3oduhHgitH-c*Wg36)Db0VkjRa_?@^_SAjrg86*m<_s(NKnG`!Lq~tNKV1}7u>yJi z%A%$@4I62+6O!-jPU)-1$zOnInze^=2y35OE6#m1KP=dtqYNInFPeHk=je(^0ec&) z-<|hCokol&R0>7~iBD=n$!(>3^q`QP2S$dUU$nbx6vkBH$Xx+n(QZgK>jEqbroWn! z&fu`DD3}!R3SC#wHo6|4wS||dSyOmY`7Wx)8hio@P5n!^o(ytfVe>q%0#F{q8M2;*4d~9O% zjUEzyDD?iPpviDgD8M(_3eMmhdUGQ}2EmHCzNNMoysXbX@WG(01C2{jgzL!NuiD)Q zjSPG*nhejq{UK)(d$=beg;ae!nvX>k0op|f$he!Io<7Gi9t^iW`nv={jMs~(OnrWZ zV>9{#2ta6ssm7N0yELdYm;$|xmkr>ywr&TDQx_-GJPM|!##s2qW0HqaS8{y`yIcHb zrjf~OJ$p*uqgyq${`r&dzV3m$Mjv4UTL9mJ*};i|>RBhak9d2ls;87OTkt*T9Vk`Q zjYcK$A9%6y$DQZK`!Y@#e48}+wIuBPeEmGJ!ui<^Qt8#bc{;4YUQ>aF-R(LZ631GL z_gcTWTPSy==+C>yj4tl@@t=3r&h?=58di;t?%?2hwL|ravzyaIi$ZtlMCnIA;kK&o zhEApx7Nx(rF*3)BVs6W4p8`KHXux=bsqa29x68Oc zQHSyqox4~BrX_juYMfEl7203C1MvRB9<9d_=Cp;fVw#>hW7&@<4;W5?#rBVU?OFz+ zeI$LXOD)ga&h|Ym{Bke7?;icji?s45urG=n?GW7EBe}Q``6IJPt^Y7SHc>;CNC40-` z4Jh+w+EzQ^!Dd;`lw_O8vz{B2ysu;>k6WD*n(=72yb^MiX?l*v@g+SYXYKh)XKUhU z+IbRf<0Dq2w>X44J+D&Cx^&YvkF8QNGdDElDyf$j5e(%5pQTK+kD-(wqG?hSA|@mi zUhkcq5)HTZ!+O&NYt-t#k6nbY_ub7CE&KN9^aOhP6?*UL+!dVi+!WjYhO>UKr%)ev z!yX~NaM0(m{)|aQ^5~@&d$Uh!CAo5yY%`+v{dbI8Cu|DO=NmcT24h}ROp2N<&ay1p zUHgo%@UJTJ6*t4^u=Zd+%}>Q1HlWLPb!s#S-imE`b6F=SizGPvV)}gK06)FIn*!t}x5@L`iQsh@m}F~rK3FDU8BuPNp}N#i<%q?S?5N4Jl; zs;PX+Nc;Uu)=Cz!%NXcHCW4JrM}({n`ijmcuy4#wN%Q1>un)Y3Etv8yHB4+QrD9)` z--#C5QhPNM8O=EeR4HUZAl?pjuT71Spxv<9S zoOArD;}hLCi(zl^2b%`okaj)}2@GOA;@QNKLCn1Dw~i!Mk5O+xbF_k$ zxn;Rm#5;T=m_pQV>xo7ZN7`h!uS3`I232qx4K3kzUU12yj}t+QI9aBUxy5(4c_=)7 z(>l|$n;V8~qoo8*7oW-VMneM?`RW1dp6l~r61Q=e&o(9he zm&)Qx+a2EO^wp%0Z-2)3azw!L{S)|^R94!PK$S}7X7SWrUGKB5Pws#_sOny=pHXj@ zsb5g4B5zUV`4dK`T`bJas%rN?y8pWA)t0$YzIRMaGz!U}?3&{VF>7MAkTN)QoAcS9 ze6buW8b*9ZM{Ye9b@sVWb)S<@s7~z*pTAta>&ntzjasOevWO8_!_t9<6l0W?{F~2$ z5StB9ttMf#?-G1Sx1D>5x6=VEDQTu|!qI>Qr3s%YX|#un8It30lq@NY7W}_{yI5gNqoRGWZ8f1+MMg>Y$vb^Eb6vU_3KHs#-HW-jlWF0>)Gv`N zbq~ugl`isFY2SSa%72`%_?Ko6fhf9gmTm<=vHB_KmP0JXyC=48F^TxPV_|mmHM~X@ z74oqUP3EYbrjvFLquX4=)PO)^qG-fS|l6qlxQXe?cWD_;L?W$s#S zpwNnYp&6;dtgehp3GVTl9m+C&huOme7;uv##ZRIZGZx0%2@FS^p-<(tYsgqo%)G_s!0>deVJ@XOS46rVfhCgU;d?8xa5j<#~xg*xKy zgIui9j=hI@>S>K!3O})2d|P|_#Ko2Y0N`cjChBb#``4^Iz~q+&AcTM%7V!S-*g37Z&!N457Nv*DFD=R2Fe$?>8%2(Yw(==Rdfm zu)tUUCtouqN>1YV1}kT8xvNmioe6;p*nX5+?rEvqB8&16Iec260sq_SZRE&mO{Mp* zmIq4jcBvl1^(Mc0_HEiSTD!-3Y(w<}d2Zp>nIv6=LAxYVGxTplln8dHnXg}fR}&e) zFp(wKjntTi1ua%1Du7u)8AZ~KY7HE5H#^Qli?tG=ZF629cui1F;>$TBBc>X&wsL9S z@Hg5QPKHnvM)C=qk@5L^j6-y3Qch=8;MxuJqem|32VS^dITC<+8K!qrzSXv}T>HxD zzA;d+N5aL{^n8MpkC z&!n~q=ZD_`n>%54Po5{9u*eI?GQmni`ngKsZg_4xj{Lq+BQ^b``O$H`TAP;68QQJ5 z&(HeVn$~xUosH~@zL?iIMMz1jzM~zT>Ah5_5K)Dhw50?Ft8_afELZoiQ22B z4yS=2#^B)SY3~Sz;l+Jvyc?^@6aST!`F~EY{_7Eh|6qFl-~E}UcD$Rl|lO{)LN zJ9GdC4Wbbl-)F65Mzz_RUYx{grtXEiFnXe^L%;RJxx;|~S-qYfN-m^637jiE zcsk3D47Ih5T^~&w={uU6YF$ycd=R(w&Rap!-v&1SFG|6GEgA`uQ&5;g{MRVfVQvyx z@%N9*a51a+OM`>L3(p+7l>kSX858s1VUp`Z^8Wine1F~4d4T1=i!SepnUqL=M-*w; zLdDrhA-BP5&65&w9_?+(Ep-Y>f5ks${NZv;dasAf!YPH)f1Dt22k?bd{^Tjcms}u3 zYw|YOfiC(d$KvWH7HEqz2%^AU1|r?pnmPaEs5V8^w>o&!q}Xe}omUmPGDqr2-t&Eu zd^|TW03Zr$fqm4?J~uh*ro+t)M9bk@C=wc;DVyUQt)4_9;Sa@{8y_P4q1@KG?0i<- z*Us0a*~u+LMV#pG(gC{{SFZKSQr*1~5>51klH0Kh+v39WKa&-F z74=_NjK@i~GK=)NZZ7TP#_7xLdP5I#6<#sufTX#n2NlF5qBDh@mJ=&DmC z+4?EDHe$<`6>h-|4f-5SXI)(Q??;f|in33Jswm<by42z z{k%@1)It4>IQ-6V?LmZ1@+ZIGGkxj}hOegba@9^UMekkh(^h-&?z=%$_`KkFpp5oQ zwOBpE@ylz=4Z?1L1mE*duYdBMBlyJz6dA04&-i-wXa^#;FXu=@NWj-ePZGN z)0O(^<8Ja_B^RN!M&pr@-$~56G5T_Z)b9 zT*-$nO_Xxc1q$0J#EO07mr_5Pb0o4KI~)MQR2My?CBjH4dC}#vITagsVtB&&HUFVV zD-rG!#>YF(oeyjpc9}+3Nlr@Z-K#eiyLXE{d#TtSa5+`+M;8lP=X~Dk)OTjv{^VPB zGSLBj%BjvDn_ex%Y9wcG3-m{aG~hm9T@iG@=<3halSeCHC+4=WAL5H`fnm&4YQIl; zCzb!`YhP=tx7jrXhvwPsj7V&tIAZz;YxHWKQp=b~0?4$@5IMCdVs&-bP*^(yQ9SPm%}2 z{Lh^C;g)4NY;Z>rFZ4}0iVF~bZVX18_l_vPdOd$g^P(YyD;<4I*(yu5NUeM;TgMHmE1ShV`ude;5t0_u&UFrmjbmpF>Gs zMK8;uu)g(*G|dGYO&?L%kjUc0!W-N)^o?_rx#)~0mn6bML0d7lXLatB#7$6!ut>va zHCc!e6;^dEZ)tRjx{!`1TeXI9DKWEr-$)edI%Y&cl2U-9`|f<~od#jfTCAyw?4>4_gARl$W8~6ntOG+IwGuLv9-7^1h3Tqq)kdy!zSv zr{#{0#a-fO)Yt=s`RldFuJ7cY$37rn1Q0n=b8YgmEhJc(*krXcZ~V+83if56@kwbd zedoT8mc;KpbCoZWq@=27;--AczP=FZ*gXK>xzR=#`l#-V-VCwf)rXtx0jKfOgBA)j z0Gb1<=|B0isKV=wH#uIT5_9N| zk!9|pN-Qp!1?sJ+RNir=++QKkIu0cyIgGvz7u7@L0&zNpatL+&B3}Z3e5Phl@zG=tf99RNWTXpv<Pib zY5K9pr*Gc>jYKkT9xdyUe0nJs_qq8~!3Wzy`?SW0)YZXhB{)Y7#+@|!6lhCWc5EhW z+H4Hd0}=3n>E`Vy1-e!rUYTS1B|Lod5DYag=Ih4HqtvDE3)GV{H_2(fw7F6nSAFxU zc(tZ&m8qt_M|-3;QcjQ`agKM{%e^T`YVVPr&!1O0c`-@BzUJ-hr6h$fx9+@2E1F8H zF6ld&A@1&a&i~?zLVt}wNnPt^-uls}ps6=rpt;thg_q)W=jheJ4m&>9)9kV;K65%mwY3HrV)_=(IFPdpDlPjo2ddWbGA|ny-8uiVEf#j=fuX(#+q|j*yS^t`5_GuQI#l z`~G$SjJ{uSG!@~tyX!WZ+~QtjcDTRt@{jYNdp-)BB|c1XUp!LJAJLn&`E-I~q;qd? zxfZwc5u>5%ythWfo2qk!{@U}K!)#&)1+v2qFEyF#d*>Wy0*ccQ-EHNN?!78A<8%5) zLkr@-lau{CeW_`sG*In+|%U$D4wgTXrXI5cUgGoNt9Y_mPrEmSKc z;LYCl(%Ql^@ew{>;%iE-^k}cduZC)Np+woY@uQTgh}BH=JUCmnvSA7{y1T`>gYJSB zgxBdt551LFWK&&Z--SvMpTR*gh~L7p9Xias#C1jr68I>c9^n^@miAKA?&(%t_H?qP z`1U?O613rv(K)$ZY#MVgx79t5-TpuVKhu*5PG{s2rGEWaT@CN+;OtdlU(=E%D&m#H zQQZ4Rin9V&-}bJF%6mh&x8A`r<8|0*vOqzCcEX2j`Sm*8R?+)8a@p>Ci~~N`>MV2c z=m&2Tj(tzO(Ed^p=%!~lD!nc@uQAkc?z1Y&_6`DcACj?37sUG3(w>*zuto`8o%8lT zjfOh<)*A>{2%wR#iDB)8{Rp6{8XaQjw)cz)wnf_30ST*66(U4vMzIT@OwByyy6AjkrZ zvltNIBP(DwT{+=pMC`k%D$1W6HL!zCd~rdJXBj&@-a^PWA%QHPIBoMD8Jr2DL%(Rx z0)nFwgng^Td~uhc`Gubi0LE>jymtg>5;(;n?&**Z_oA!tyYStmh=u*kumv4nPzJ$| zs&L5)8%q{#*i%?hkCEn>*FlYC`MH*DRcJv+n0$_wqbBs|q9{w?Z*wjG?vDTG-|cU| zxdjdZ5x)UVr{VjiptI<^w+MNNy__I8i<|GU-Y^B5p(5DFU`=E0FniLQ!rX;Q!p}}4 zPa(#&|MB|nUl7m`)3&9%Q(Hg-6e+A5WrX7F1^H)j^U>VgdetJpCdFCPeG=lqv8weqv&@8N!};7LtjwssjZj_ zQmdZFIrDm>?o@2`EnH+&~hfCkyJ1U*!L(7u)gP{C`xQDEG+07SG zj^Rj*wdd^2045V?VDKTv5iY|8S&XQQeK9%^euUZf>`~4^Vki6uK-eDdCx2?h-M|}c zTZ(FTe@!AI#O#x&=BI@?{Tt(*r8^}nOHI-+n;Vv(Htc(`fSnyQI>sb%#E?7@S8n#C zBTBTGBf~8TFt_+bf#Sn0U~I@y$n^cgZ}40$M~3;J|8eY+x?h$26W4X6-@adxAL_k= zn~r;3jTsdc6JXu8y*@}Obr5j*$#*1VrZRlCR@udVX_nVbt}M`t=>7f`1B*`7F83q^ zVqp#RyMss$61YG+v`u(j=wD%R!)Rb{BjPj34<&AMH?ZL@R5S8n`24tz$k<)TMVC`y z(nJaqr3d*69ks;#S(d(-QPc8av(~Ouf~z|A0jnYRR5LmQDwmw0EG^rbf6WzK|FGFL z4+9Pf|kt($@vNI)Y*$&uYPA{zxneDJy{f1o;y4)@$h2dhEt5c z+W(bdgM&GI^d{lZq zqxha&c1;^*XT>d0eBed;%NWK}-=U#6?e)N(BhC9-?|58FBiSe4=NQoHE7pJeJhX;xu_JLc#5ftB<}wBYh+r0O-mGW=17D_$ROq1nivg!y zI|tObplqg_6BIKObM5x!-V{zwxi`C)^ZYGSH=goGve-DT zfaM4@55$4~{lP*XH60REw@z2NSLJ$BZ~CEF#wF|CP<94IpA}3kKe5tOz z3tF!PN;{?(8}VYLM@>%WeA^(~*#sA7bjijRIw|uerL=Q(4V)r;w?87P&)Gy9n7GXr zSm{I-JpSXh%mrJI>vT!Ehfy^?W$G@$^K)uy?8FbmVq!(d0+X%A`>cQ9L&>b-ZrFhl zhV$_A0uNe@gfh(EqDe*NC@>ZCj%rcFoho96_vzGqy!{-c!e`&y?@*#VyE^XrmzP9s z(U%ra_*MSGt>F>t0+NGC<+%4y66@QyqLiBf&z?F_87H3N_t-!1aaYw{{PLmz$r0J% ze|nu#-)re`V-v%ZLd<^v`LJfsy9AJFf9ZxcFHiHPvPg{0?~OfWUjfMexSW3Z*A-wCE=sjXpW4jABV7DCxRkQvyxV=rq_4O7 z=W1<32@N!9-Xx`r^)_tR4a&Dqs;fK&uV3?RA9y;V@OA7+s>KF{781VW^^Q+VuoLJG z1&Lk;b0{yz@rY=|*hvNSM&`GRfUQ2SL|8vE^KE_Hs30+e(@ZCF-3rJsD~3e`BI zWp&Tt^=cvUm0TP2rmcI%TJ_U$w|s!bZsjHhacYrO7Qw?Rh+yD;zi`ll&~ zZ-n(@yq_T=SHQdmd+|hl!PPAKvN+oFrAyv>b5EoXpQo_aj}lf3O*a>?Vclz3_(<%$ z^~*&zvUS4avgiA;JdPoA_G;2|7e|QoP;B~cvrWM(r90!=dhBQ##Fz!lei9Dl#rv9T zfj{9&XluB$w|nW#aiBNggJ%1gp_}kVOOJdhkdx?$uf?u9s~FxfT#{ROO$F2wv&uF( z$P2Y7#QJqNET)Q!n&}i zb1D{0T)*sR4^SFw+h+)neIrn{w7LShu#DhxrqMbfO_H2ZIO*ydl9>2>zCjhtcYeft z7yRI~Md#EW4eHvrL8iF~X)Qdb%JNgShD3zM^Yaw9jEz#Kei4~h3k%2pO2(hJA%1WR z_=SpGUMC3oP@v1CL>`8tiiHm$+7;xQ*$DSR^1g6cxITBrrbA^sv!|7;lSPY-DU-N* zZTe&8z&RuL2a*_NO58@t(%KCQ|5?Lq4A#wjTJFn%sS@?CpX$OjqcN?m4q3eEikHL> z&DcePwr|%AatHWGK;xlKW0Hg=oO|x^_TanqO`b=wh*IIku=tJFr&Y`jS@D}QQ`xV+Q&51_0bS!3{vn!Mje$c;Sp!)UX4fW6yuZEn{ ze9(<1x064-tarIY125Z@BVP;F(67!h0pBw;RpZc3N<@h83@@uBR3-{BT&J44P3Z!> zDyllNb+8vl_??+fq9Y}7R!u{4HPMSz7yz1Yoee&Iopom>ICCi0Bg~*whs9!8Z1}(* z?N!7sNn^z9Un-NBYPD51_}SRTL&fWR5_6swjVV4o+*bh1Ni2`&BVwc)>}z@rb%%!B zI{LuiDw;YmHR44N=7@QtsV_mbLrEs?B^z=s_Jt#^FtP1!zcGt&B2(uppyYcray7gL z%_6qrFfGQd`j|G+=%F}5snzXZMubm`TVJ^RtFi0)poz^nq=*>Edi>?r(CRp4q@K4! zfbGa@BJ4n47mdZdrp)&XE^MJ9khJ8E6$W~{F*B^qNFMH{3CSZCL?wP~65Hl_6y`#= zWdk3S+CC0}wxN|Z{71iEgK;3OIIK`3K*2oZDd8{5z* z<&>IR!N+r5CT9p+D$Qg_JzhmKy1f1Ha}TYX;bX`+`@cRFABum$N3dS~AIx6<_rF2_ z3mbAxhU+lU9S6?t9_n+D9z-=e7rev952a%wgwr|}%;|>MOHP-bB~4qx$Rn3UcGO%* z-{K7n)DHZ=Q4xQo`X2^Yg@szH_skd_>k=*S9d10L`mO8|-~BI3wxogjd2h6Ux^U%U&Msc?Bo4Zo=ro+0KRayjX`?D8VOv zx%!;5?PZ`}{N^|4@6Je|2kY|budPD^S)qhBtf|7W>=t{z)y~}QT+EekF z3k`<2q{Ar!Kgu;ksG|D@3DnO^@WE?^lzF2a0D`AYjC8PcJN!ZkNHj>r?!{gh@=t2( zVn=22Z5aF-3ye~y{6UcnHyHYZ`-Eoz=C!u-)H|?Blad2ur8%A`oDYrqR&G1n-(W>6 z7X1y@Z9`veI8t3$1mpxF8Rzy?aWbv$qtg1l_;_eM=s`EB2U6fe$qNEk0A&O|DmdT5 z{(X*8;c&moIQKfD5;5=3lyBa|Cl~2H?%`HYr^In=T2DfQEWLQ(O{6!tUprhM;+~PA zqZKQAVj?kRtRs}4d!7eWGj0ZR>+c@J57#YZ-X68!T6(DZjIli3$d01DbkIfNo%ghOP9R(*qvRxT_8M0o^gKj zKb-Wcn}xh z)O!I?my?MvW~MXDP-{X3uTC~?RVd8N&PLDq5MMJfxColh}XcwOcu-q6Cry*e?&tT#8J~$hfyF5 zo5kuMDh1lDNV->onQ%4oT+jokgd&UWdbUv*6_ zHxqD`k|MTT47pa9(@#aT3Pt#=FJyjO%L}3+e(WkKD|+9QcGuH$U$Ls}uDq+ePru(s zO)~L6CG$ zO(yDn09tI$S+Wmj+}adG*))v~cTh~Ao1d{cRTFx7m-}C;rhGtl0ShZn&2M24e{pV* zkU0xD07rNj6CL5=z&y@Pe#I$k2-z|fGvtt2)9#z~B<4pY7X!E=f0lX6nT{$Y$tO!) z{;_G(#Y&jF^$xKROY0H&;kFxMMbXegGxqwTF%8IEZ@dDu82j>6dfm;$=hv%`dDmH{ z6^yoT4I0!BjIL_T&J(-)*I3Km>%YB{;IH^!d7gq0lYJHes!S-3nZ3k>^2iu>KBF3GnrK&I$hdMDHF+jKNaSNz>GMHe=T(A0&lHk}t_s-5Ge zf)LgS+RcPy6hHm?_SgMrZz4ZVzq51B?R8bU!O3vDQdsv?7EiLL4JlgMn!kot4F7G& zy`nOn-@^l1cOH1aK81drJW|6d;yxt+bLmY&ThB-W1AW&>2Z-PQejat2Y8X0Xr|&xc z834;~Q095x)%}~@_Qxm-htMbzGj%4EKk()gIulL1_*eh^eiCG0|H)~@jJgD?iO&1d z5z(((@&MPmQn%%J5suBe#^gQQGZA2g=x{F#qc-|li&eEM3c%T(-dJ$q!`|wvbHf}1 zhPcP8DV(WQ<)bYdXSmrYvob;xC_Sle_Dk~pK!Pxw#CpGF{Sr@@ z0IoyyA%aJnNMI|62T>?*xMhSZuS$$$aUJH&Q*SGoUI{#L?@Q*iDSo)drs7LOsU=nn zcg8{on2YQl#7$Ew2ek~hxH1L$X5Dn-1^9S5{|90F{~nG+|D&cm&fobtYy4#J2rU3+~2)IeL2+}FJQ_#VQZ0ANhEh8KX>JLJlUSBjk4 zR2z(>py@Uj$D7ZG8qlgq1}+#r&MjyUAABxS zgU!YH68^;NT~FOHkFtg`XS7{y41kcQqH5j1JG2a0i#KRb+)BAl0p*j3FZUiwEij*> z%zcImybbw+vyhFxo_eeTl3qBVm3U&BQPAS*9`V}A>_vDmbajv-PFBPv@g_kBbz4?0 z=j@P2#ZbfP{2%tu5Hl(e;LF{`8*!9jUnYg-)+idslMi=+bfdxCgLLcegRPmQ=mt#g zsBfM6c=~d5&Zlql*elZM(y9s*(WBj-!4eL~%;oNeTKr$b{{JcW;0LPBkN?(&`0tC# zfABZaPWZiq4=4!1jmrTv%YKDKzMz2rvbaOE)NthOk{^Jj?UW27r58?0Z`wU2N^!0{ z^R8)`J{ORjK&Y%~OP2UiZQYgnIOZYr@{^v4C0t@1XK?zN=%Vt!EsuZjK0ya+%Ze@w ztelia`V#vtVfOfo&w>yBULfNCMv=j94@YpHHAoVmIR>Ex5cO;!qrBj8*tbIw(cf_T zWqxOFo^x~xrsZhnwPGKIOfcE#Hgu`{5pW8;b-w5ae-$)DE4Ygn!HI${$1Wo&*!*{p zNG%C?U;IIqf{3v<`dUnT1BRW=1RC?}uBNAhytA(MNnTxJLk^v_>{K?L>A<)6Z(ng| z&7h8Re%kk}LZ{JL0Al;z_ITyJJ_t>WV4Pf=S?j<6wPuGBFhD(Krlc#ia4O=lC5@~&oKQTefXjjK%chSP|57!CWR*R9&-#%AujyA zC;n@_Ab=Rt-%Jn@h_9m?HFF?UZU!#E(DKEz9IVQB&zw`cJFpM2d_ ze1}0C0Rt;dhGu+tP-K@sSEBN>FNW_{N*xx3E!wRZ8y5vfPDS!>-F<3b)L*B+?l#>j z_Bna(zs~n@UTwtDDAsj&N1?Yqy2EPpAl0iW0^{~3>(b{+QUY3v!w*f{3|5YX@Vail z^42jrHnbX6YSJn6oaJ3MPtV*w&Fy;sM!1sSmxfq4#5M>gq`DQ3W3}Wuiaf*r2LYpZ*fQehQ zlEWh};5YIaisBcsQVshyL7*rG{;5P0K|Oj)&M~KbmTsZjNtep}#gjb~DNo$ckM!ca z?CRhRV}d5sLtJO_x<$zUV~K*+gF@eMgFQPo&zW1 zFL3b3V1NW%mFmnRU_9Ho@Ff=LzvST5Nm%%1PzhW+#bzPwD0sRj(d1As6S@ zhp?^4&+jQ);H~C%=k52|6Ravm?rC9^?SsceFBR$O|8-s<_k}}%qV10+io}SB`$y?` zxP|iYe|nz(5c@XLsP*0@G7N-Dx4Tig+Je3q#&XE!eH}aZTY$Njuk+cR0`qID)uOiV z=3H@#zuF*#;t~3nNEk;d0r^;%_3yMAc%Xinm3|zTLkiu{pzgY}9A{wNfg5NULqPfyN6!clxI?1Ek|Hcq-!(^n?5A{`T9X0pV-$u_o{yT-y{Oc^I-1v>F+h^4gy)%DR2fOll)aCvX%b*QUAkN z{Cx_7DeHfWv#bA7Hi%JrV;rM2d2CuKAMxD-{y~S+B(LSQWD`6YFw(}r+vr#>JzkrcyXL5g0491Hk1StT#9V=hLy26s# zpg~LcOW1z>0tll}zb~(2Aks0*!g#43wCP_cwwb1o5)(#AzU>dXk#+~3PJV~Aoy9Kq zql80@G8>BWa5LqpQ~rS`j+cDxM+>c8o!9Oj^=zBn9YZ1gdxD`njy48Ec7Dw^eP~@V zt+gu=4Wv9cw+}Ck6H@eH*Zl(}nW1Z%H`@4=mU3~&47zvTF*vGr^p>8=_O;4it0Jl* z)1L!OYLq#5zYlN8{QS4r+vDRQUbvh2rW>2HtrXhn!=4p;ZTFjk9lnv3Mk4T8N)y0V znwKU+@yk~KvB{=AElh60&OVuY`;*&=N6)>7xyz4-yxBzU96O~Kd*5Y;aui!(0L`aa z)J0!!;Y!kk-fN&;i~&2$Gq))l?+<4ZtrikA*M2hP)np z)OuN4gB2R?$EdUT13{YY6p%OvQn|W9S}Ms?IJ@R6PK~^5qEV}wF(7kzK3N-GlG3=H z__k&0IBPY|yGyCwTTL*S`ka5QE?&Dj>e`XdN{Y{4Rb*iMj^olW&oluo=l12k4=W9vgo{j*MXsa?2(KPGd7{&>Dew{m+kh>Gu# zFv@QxDGIb-$1oZetYto(rLs-(%;&*q4dMwWX9mHl{eT)2xo@@ z-y~lD>L%yR{?L7434NRouSqo*tsaR;Ten?GP?|d%dV9^$(aRZ!T^*0C%*e1dDKI~G z|6c6#Rm=8m==_%hi?IHZ*jO;gel=YT-xSR*A!_OUab{^Q>K5tDBjQEdU-us$Nqp-Z zxbP;p2w}19QcT^_Ov6nZ*2LK4+Mep2i_-PR9%EJZ{T%svTG(;2w>-b0qQ3gCEBj`G zXY!n)lD)3o;e5NbZ}EpEZL0kG&(rCbH%=?^ogMgar>qD969WnLGU04ZL zuEu`#h@;(wx9@AL%N-rsv1MDwo;?@Non7Rf(zNmKi!pYk$KW53HEs~s!qsl0BM$|s9Cd}y5=Q`U#zldn@Y{U4#WSG#6|VT8*qHx z0XbEyoeDS<(}2(?o`3Gkbi1gFYurxLCMjPR>+G$!7Nw(_xZ&xKF)4GH<^BWVJzWCo z`al~=^|QcQEyk6KxZ@S6Z3AO%hiV%#f6dJ7#~8ANduR)-+Q6#Qj-uG|6+q9)?2Ff|DU{*|Nkn#*UBCyn4NZIFD%|U2InZ2M*|J>g)^fLK}H2nNrab`2_D{^pIf*| z%~5Ir<+o0C8d!CPEzfy-VSGzsn3AHGtS-ZTAX}3*!{@{WEd$(iS|YHlv4*U{{PimL zDGi6EW%ztp%GSZ9{!9xxtrqYtNYHn_cMeaaKaL$2uK#xWZ0e0u=Y~{z{>z+8)+x^i z;{bGGoq~^CxaH4K7Fqm1WrFHETvf!3t$;E#DDiO5rbHeJT#YJ80X59)Z61(ctkAsONx#WKHZq(qb}}BU>q$$ zhVQjIP+WZrRgx@zSO^po=>EvH%JVpxZa>&X!=O2J5l00)EOe#RNoLz;hbRje(*WuA zY*Y3->pv$ItKgdjaGa0WI0ZoRL=49x>P_*KmW!DHwd|qj($b2;M!H^RW$}dTj6LA@ zp8NUV-<-0`f}-8&WnCZBw!_=@#Ea_eZwrbAp{|pOt!L=&uImwDe~Oz4gq49k&KB=0 z+ETrTwi$u&xr}rWLU43Fb9l-`QM1DOWo2$z`8U~SVURYbs;YY1J1XIN$YY*Q>*B9p zUw8lUcJl4pv+K_1>m?|;)HhW2mYEag)avkRYTFEJ>j79};_2k%uThvS93S_~SidRJ zGQcp~)lGExO8AQ9`3M_`)y9iM@9p>dKw2lhE}*8(b+~>n4<)F<^DVWCQi757jq4NK z6ZD81nj+Q9f0bU!b6lZ3(f9iiH_oTXyam|B6NGYN7e}EFWu&c5TU*^0XRss3#xcd> z^Xk*4+#Wo~CJ z0FYTgka~IUqX3WS?Disqr$GVhvjb>f@)68!i*gl@E);nk9L1@YTDm@i~D#|_2C3YU$JD0oAWgr*+z|D7^vJhg*UogC_4g?3!b)L`+Ux9 ztfkj2Z^HQETdl3jZl`Tqv3#xa)LNyJst6Gwg{Mx|@jP%ttSi*MDO2_f8`t%u>k=#X zLuJg@Nrb=8)4cmkx8%J2q51Cnu&iOKPB$iBouW>xYss_dy);156H*Yw%A;BOF2K_i z?>rbfY^~n$nHLaY?Lx01U#L2B`}OPQ{8fp+AvKjw*)cIyfQSow0)!v`QA(w$%S?nO zcXdYmfpmR)qJ@2+p!k;4n&zU($Rg&?;-kr#JOA269I|=pkg4?Z=#^CAwvt%4@mOMy z|CNM+qs6I*5gRLx#uwgxeM?X8n2XEGLcry?^FP2lDWF-N1vu!9=K%fx@&t&wQrQgU zpPl~;z*+g9p8sAjqEE={$!3(6RCyXEkd}7)qx|kk$hrxeYSR{H1tffFYiY|SGkw;? z92>Bz6ZnbhTA%r9VU)1VAhh&4d(WPUI=_xR!cLTZ1@)ph5FZx>>F_$tz) zVBI`r@$)c0k5R7TWY)N)l!Z-CROU<+%O^l2+y#TxwPXjx*+{TOviYA%;XkwJhBE{`T?dcl!2i56W znQpX@4j-lP?^BjDsCy`~O*9R(1<^J5`X_m%P`^S%L5*Obhc{k{$Yw}(lC`@WV&D}ELkrvA+Mj{>CWlWa6;bQBf1Ax*rva+a+PADaMaw3f$1q4{Or;xU;?EUCJfHtay#vTCHa7 zKN$CBu!6JKHC9AN3vxPgHtfgr@D2}H%&z{raDr9nRWyFLJRno2E$aqyp{~sK3^P^o zcHHjA%cub1{p&K*Z7Q;d6apemRm7Spm7bM(Bz_tjf~Q1ZTGd_I-%D@QK(Bxs#U;Ud zjW$wqkf7AtsDHzejfYk|S-c~LoI?f^D_CF-!pcA|c_WP?_t~gt=0epiXkDnaF%BZ* zbZ;~o!Aqc2;g=)O;LV?Awu1a>Pvk0pox??v1pKpMcit%Rt+saFX0al7>67fapGtb| zGTl?s!?F`_6nNKaP)U>XR*nsawnBj>o)J3J(quaGuA1;Q`zT<{J;zJdQN4As*W&`- zSEzObih^&1`CpJ8;`&sFm0-sYNoInT<$#Od?1%BC0uAIYdCDARSR@IqhL~Nc^#Nkz zChbNOtTud`gen4}N2e!2_s(@?R%bwzB&S)}iELSwJA2H5~pz_1D~Q4S(na zi@Q~^TqHy$DYWE|Rb%o8(FR|+11!N5YB9_p772S8wo_&{!l|=cPrT4FxgJ5h_+tFb zwX3s@UxwQtVC|Mp;orhlwa*Mgp|Vg|yO6XGt&BDqw9_M87hyIUCx)4fo?(BOcsY57 z>!t?~#o~6O!pxA-hpzAHnGH#A^^ZuD6f_Dyo(4H~aj_C5ct%VUp=JyNAjQ}$2e-mh zp-a>t=?eSBXFK&Pfzl(V(IoSyx)sOGlmaCauWdtKZ)ke;>M*xr@1KbA$~KDlZ}3=N zxhFal2Ng)TT1xVOLoRo57xPxgc8Sp=>^I>@Hh7zn*Td(%EgW_&j^QBRDt}d}h7F|` zyujsT$phZk<(~MLfqwJWx&3R4x-BjxqfFskiEl6~D=Oi>I_^cc$wpVP0Zjs|dkPI0F3beUFR~ za&>bJEE4AkDV$RwJ7zMSQ3q;z(VD|Yykjl|tm|S&|1my_eXS!AhQ9v0Qi);@DC7j# zjM6&GtrWJWf#6RVrnGy^$Fh8REAZBNh#-fi+Jx~Oy5fUJL+*!)#zAJZr}Ou)BanJ0 z5Y>Y*EmmiW^6H8o0FN&>Iz3F!_89b4H6A6?Z3ZiPCqTiLhls~0Nn@&vJcRli;Q(wF zCfavbl=f9ToO|pZ(LK}}`V=`UiG0|VU~YFwHMO-}c8qt(IZ9y|syvB<4$Q(kux+T} zN=Xjv2z+(t+wu&=5z`&w>egCpO9x^J-ulbgN*E_%>EX2^v2k|p{PsTg5=e4qYq~!Z0vo$)|ky(LM(09Ved(9 znefpBRKuAAw~{t1F$O+D77CNit-e&S0q>6Wa<;{5E9rG3k|Q zRb^%MkPe8eibPRn7f0Bb3l&C+nfi0($Ftq57#MucjKXWB(d9rTIcRmoW>cE&0soq{ zoS}vqIU||wyJ|;>5T9w)2~QH*hx3F>N^X%3@F^Pjfyk<|$M6}S?rF2Kp9FRP51s)L z5}U)c*y28!#v_JHq+U_~-O+%mEG6VK3=~M2(H7fKZsCv=-&`k-O0jbONtrxev6K8Bh&0#h>)#b3TXpG+0ua_Ib92zxL%bjz+3uq!#-*qQI^&6U9WxmA zOo2RSdfG4U%->6o6HX$2yp`921MeY{F(}h4Ms2I!ip>weTF9k*FyXr@^gyonD=!~0 zixp(v1RETTih4k*2{P>SfJQwCIW2&+TNaSON95X!{@nr?CopD9b@lLGwf8UWR}nut0LI|q zO|G)FC<$Jk7e?n|+Vj@KpoGD-=x7pYLe+S?b)qUUl7mnpMshb>l|7$Y@b5MSc5Xvo zHMD46Sn*ijFN!Gd?mHxTpIQzquU}M z@r=UHWI>L+kpcJqx9&LNV=mWT~$DKhn^=(~KBhWpn> zoqo_;T{(74emmp|{7lN85BaKZ!tD7xc-3@P=WUPJEZF0tinR3q99M)uk7@~czWX^C zeiCw?z*^{It=^_0jsMpr{!7f{zx<;D%i@2h{5nKUU`;~`^?#HmRmB-G7z!u=kl)WE zUku70e)(;o@XWQ@LRq5sUllaShb#tCc7Ilz@)1Csr_!}z?EVAiNCtqvSTRXgJlh7C zN2)Uj0V_$zr+^OMWji#1Hq~cHa$oZ!ZVIgjsiyx#X%Ju9M)~&_)!qFA>MwsPG+^BH z7@zogE|$BmTz{{xl49+@-v8;}{N_ChOdxKZ1#U?IYYkYzH1(2Lv=Pzh+q_iwY#+&{ zwk(g1e@eu04h~4ANREg7Tz+;x!IaL(Juwh9YO^q~q4;G#;{{??Dgk|;*jEbR-SedK zg90IQ*4Kke>`S=ijcO$4>LhR`uGz9(BlELG)V|gP1w-lLPn3y0{kz4@$=jF{|pFYZ*;r?=XVRAihjkiA#>aN$bx@}qi7Cra0 zzj6E5L0>neK`X2GB|Bf7U!|!~w}hSLG$>0_3s3wGmeoRI_H2UAxsj5cbT&_YGS7Ul zY=&1pQVc&744Y@|HG2C&%=n<%ih;sPT0ubs^n8)SBmB;WE?NLo%?~aRnLN4wJ=0^p zm9hahJjSi{`&1Kz5L71;aJMWQK0p1)S+^fCMu)Ap0zAP42^+Sz$iwltmw`wfyJ)ea z@ckoQqc?8OLRUDVz{{!WBg)PanmyD|ecsb5%SyiLuo7h_3%ZU3KkWvn;BwdIogaTy z(NxeYfwOWrbmUi}8P=QGr!d7i$NUlf%n6sg;LGf|h3?QL8jOG3p>$EvL84Y7T3M~oSFOff;@tV0BolFo@Jgu&vl^zi=54EsWgqXiL?NfL zj}(-l;(z8kp6cBMImSO{9JR2wJ(b3J0oQ*2sq(`_Fwa48;}pwFq;6f${IfjfTd-7& zie12ULDzaXl&9kNRNdrc{M4A5*e)`xZ^1Pc$W6JK3Ef4<=s%+`Ip=k_^^~tqw?4Ta z@pDW}2iA*e0{ZkZyfh5{9+j&mI5*ggTuKUvZE{*NP!XldL9Y6m-5w`H7F%6lHi(hQ zOq_=Q*!rH`AKbf21FdH52CA-jCdXj9=pyQG#_^IBV9|Y4^oCRXV9j zvXcK@uUBq$8)7x-ukQtO*f>|Qb3Z5BCH)z;!2M7bc{7|Yx-NN0D8hSh4lT7UL$bs9 z`bE-{l;#FX_26_CfLL*i6U=A86DISaWOeCnfOd*LM6kgyUC7ddvzoS}IRx1UYH6EmXL^_$u`2%;~!WS#CG zXhUfREaW|rgp>6d)k=zruo=Q}=zUTd@vcXWlbJ4i;Y2~%liFO-<^w;Y zN-qDDdSU4y7dtV+469_&yuD>3=sI=J?iYpUO1Hf?y%)37))`=IZ5+uP?Bqc zF9^u8&ATaA5kp-P1zhr+4CeLJNsbUW1G$K4ajAn~2Jbu&6xNlLw@<8{YUe93ao%TI z?Ncu>(OA>_yl0$?k3n?O;C&!Tfl|QrkK;I7kY;3K zk`jDQbmX4-;_nmHWHb00=Y^m!1EqXGcTt2XUhB{2_mt(?;NK1H&VfLaCDU{O!w0f1 z>uE|Qi_Kf{?R^Lseloh3x9q$0AEl>YOeN}Vd(Z2?nsX?wSjnf1Q1k8C_P>gg@>9N3)hq;Vst+#|q8DOdGvC*LqnuVrR52J-?7 zZ$(bquwKxot)Z!A^yVH{EZi)%+zQu-pN_`^ctC_)H{@PL6R16I>R1mU5;t|%n_c&> zbn=t4e1+<#y9kncX$;1g#eao?j^Re=iWTdmZbU~RuGtomh=nSfSW8LPkn}Kio$LS> zF?j}`A|zLAvH0WR2-;{)JRT{ldzpbjYIFu zI$9&9wy^YwG|-UGVGn}2$thnF2q#k!j-~J)!LGQg|dWxM#T0-rfp`gSG)xE z*7K}sD)fM)D$k_&VPeNWdU^p8%~pur#Jv@DuyRU&BNR+6o{HF`q$G9zMkhqy2yIdy z-1BzmH-w4d5=7Nt=<5na&iR6LL zkF#w(>8k_N{0cn1oiv++?0;T=zj)Vce-+t;-9CUXHXo$$8 zY1)ah!}P$hjqo}M+f;jQCW997#J>^I^SGajspzEplh-rqnLU;u@Wsxb>ZJ%a4Fq4Z zUTAotI>rOzrTw@O6uWla`=<~OIF%q%Y;tqsvz!HG2eCRt%`<)F!dooKm6WKce0=1_ zYS&eZT`H$)=}A{N3|Wqj-n)>yAM+RiZ=y{1OC1#NFcr9DlC~5Z&}a+Iayp$v)C_uA z{bo_wHIc{N7V_$D22G8KJ=7z!W1sD35M{PePyBVhBwLKo5Tj0ofg$)UiXP$Q@{5@< zKp0pJ@P3xyfyX!vr!KRH()iKubB$*l#aTx{i^ci$!q~{ED%c77Wp2)RNZx`i9CNVG zd@59Tb**abu2(AwT-{ZuJ7eNK5SC0ZV{BvC6IFhx$nODvEedp!3Y*Z9?QVcNi~CgU zTvlFbP80H|7>0Jn9BdKf*@|5gxEm&52TnW`c4^|dxw&PZtgT(LjZ#2GM&^ERF*_*w zLnkpu{e$W~n9ZhCV`Un3l8U5=ZtHv{p8UeN8U`_diGT=W2!Ah zc-KM9yBU^NX7z0>T9;F6SK;t1AS*mF@Ajp#fNN}jWyNjU3)Bp*h&;l3iEKu33C=yL zN#$q*IPfdTnmRV$F;iF2r;`mxCi1v?GvO#!5--muY2cxE#LA}CyTsLTiVf7& z0pL36loxYZ-%ah*R#iR=AZs{Z5~2&9PMn0Dj`tShqaB=CeLgj{~gZ9ECx06Co1TCmr?jx z3R~!%E9OcAJSxTr{95WRQmY~od0tt4{mH5r=#|xRXb5DKYtoPa?&# zYJdnaAV1k!kr2_Yrz>>KAcgP|OGrqFkYH)uFH>EbU|s?s4IO-BdU;auC@a}w^mM1G zZx6{@n3n05@09h55mWfs;NeDBr{%NCpl%0aip( zOBZa^)l6F}(}CTEENxiGZbNF!VNz@Wj(aV8%T!OGO;VEGF2NH9L($wDT4&kAkfK%| zu>adxfl)GvHIv9|UQ;@!!es`h}tG)3+K@*=9>ksry+8;xW~8o!fFq-atwd{>NcN%uYa zE!e`78V6$doVu+;Z+yws>?#(0!Y1*4!Tx>q{Y+b@qoNz$pl;sBsvtBYl4+1DXtOa3 zDH=YrC#&ER@m3B^599Cb)A>z{cM!&3C^M=N=GwFgyo-CvP?@twfuV;;A7-R=MzB9T zKlr{sem`P_;6B*Q= zT2_CmEMTFQm@!SvI&S)7YQUzbZ%t!EkVQU^>l|MBPRlh?N&XFdy=1VS=*S;ae57nA zD9+lys(jxgDTjxpj*4NHuI!ZbASnoH{XEMdtapd6vE6H+!59L`w(t!>KtKdEuJ0k` z?&gl2J$B}gp0cfZu9?S-ZaW2^U9O`<93u*iZFM_TQl}G53Xlh$ctblDOK_pT%_7G3o?U} zwy2b?87R=MQB@eo97oN2%8X-Wde4ce>tn4B(e1dQf3_}L+1s(@VCs8;n~F^SZ|$gr zN)l*1#ugHdTj#}Zp0q=F0T}j3i@Y^xTY|U|^|&2++cr6VZ24|4$LBI<1}otsgNCop z;QKzlauQL(3Y>*ECC$jiKNKqr#l7tw+AXu``J&L|DlsJuaE=%awaxoN_#M3}; z_L{2Y-NMG@wDF*aSgX4wc%8(;TELNHrqz@sJa;iOeezuS6Ob0js1sn-$^ER#`RN*H zyhjfy=$fWWO@a9Lk(+{%@OL)q1I*}Buhxrka@wK^4C3bPf0Uwaw>p_veX>xNn-b@k z)FD5G`p>)(hdp1(J0aZk9zod(r&3p|tZXC)dn*swW8oa(^74Loy*1Y{y>ZK=P0_vj zW0F$;LfZ7RP&K#5&WS_;j!cL9NHr4ECGbVDW z<$LI>*)-`!h#>lp%b}q)PN<3(i$2vVQHsz0XvQxdbUg!xwS(5){pf|=;;?x2!Ng-% zvfWp-uRV3-`S+vKPm+rsBrFEIeFteZo|=rSwr(+oEHvFmi987%TA7Ea>OGgxTBtC9 z8w#s#s>lBI&agVF<51ONt(D1$EissGyh1HRj90AvB3tuCQakv#<~wkJfEES?Ry3W) zH*`en>>7XCDoan|4evJ0Do`mkxG%JK=%g(!Ue^pGe53v*fQ$HA>7CVBO1o)_57ss)gj z=7g{tr!^`rT~Sip!SHzq&{ulK;%DNNgUf@+Z%Sq7`q7&~aiV#p7bPCeQb&AT9%|NK>UbaO&~!(jT>nw!1jDS{uc#-44eVkdBNjgbsMmjrD4B?rOy@v$;; z|0T#tKyO8*4kl03F23Wu1igM@kJDfKqr3^i6FE`%*S@vP#KWL=>{C`yLzQ7L>7c4D z^BO>}1Ca-X-X4&x*C)|tt+xw-M1qZjRh8?_w{*`11!J{|iT&qP)8PKq2vrWS6|G{usTCk!_VCwUAx!W3D&< zXrLWl#amgA-8xib7{OF5`TNxNAI6!XiJN+krZyqJfNWy1bB|>%nC_!VHw=G0^3|tV zyysN^H}!lY+;os?lL9mh4}h+ua@ypvXyq>HW%5T>WqY$a zWyYo)p=L-qWEs12#S5VR?O!7%d3C;6I+RfLro4&oIw6dCc1B&dbEp$|n(X zQQa2aTiKCTw*|l`B~!=fWGv$(hn+Q>zLb0j8jPn5CS+S?(AEkv$75S^)|BnpvUsx*!gYief zMwsa==?U^6s}ecWEcR<5Z|O(dliyIj|G{*&ZjH>mE0`d+myUwWo(A3b*ZS04MrqkB zi~=RP29feM2isN@VH(*kdH?t3UcQoC<$uC=5mXF6AHxJPkC^k6p$;%CORsk?Xcs!$ z@+s_*SeEr+J?hFjbH; z*QEl#nNxRY_F2?W5p+fBAWqB+9q9H;wDs0+KS@2EB|S4O;Uf=GnsH#Hc#sD#pI@YN9_^D?j>f09B66$oNni;XKabapMtQvO&Z&Y(vFM>EH2@v4VH zdnDV&u_*}aK44b<)Aj*@UzzcKKp?!Y)}i+Fh>{#+A+dqKa{!DV65ioAJwuQ>kl$DP zZ0eo5W1(FeHkVnEkI!}~xKWI^ zIrr(f0i!?2G8*Zm1Xlv<6%Zy`ppxn9()~nZ$tL*&(6B`z6=Sq&Rz}c_N zBNEJH7YZgavsQf=^7fhnia23H9IcW&p>6%W{6P$o3>v}W=Dcd$Sl#lPWT_pGY?;^YLdD81$jj|~jq2#_!Ot?07tq=dGHax# zla(+k3P3$-eg~{kQCwRgvKDjXU7`Vx;7%*okN?qQ~3qlb-s;t!AUvmDdQV z>Q+fYw{Pgh&16^MQ=rO0q)-s%3Jnq% zk8hb(BDS{!0Bm+{a~Y1bQ(T=QGcKGW?|^SaJvYnyyOzm|WsXb%n9gg`sbnba7o)va zEBrBTvy%Oh|FnS-sX%F-HiVFbf%kitq*No%p#+Q~IoZ|h8WK)G*Bf>gAT&P0o}6s! zRML*Yiiz6dtY^*Z`u^w%SSYV381oz(;(0ok1PECTf}{CZHIf;m9gE&T z)OGUKY{sl2Z5GfM5iy6I(sGzRc4h)~oes7F*)uRH!RK_`f6)^vbvh6d5n}(Y1-1J| z(f!;Z75e`a1pcL4@V}eIC9ffWznnBYVbVQ=X+(PvdmI3imU+q zbJz$sV;lsPwsSwTWCmJVk=7h)0w|nRC{HHSO_R|ESWtvykD=Ai z^R|e!W0(O9BFQ;Iuigqe{0z(}09sb|x5|xLkf(XY6I{;Gi_D#X*t&xSh0eCJU?)~E z66Dp(XUywoYyt4rWN-9M!XuO42)mLM+r>*OpBXi`+-TJ5mZ_sfNzw4ui(7Z9{si2f zr9Wk0RzATU9CI(>bx|RBiLl0($)%hR>>xko-M`k=b7i`+_EP7ydLUQ;TKo8OoPL|LI;>*N>u`7e!?Hg)9A| zWJQ6^tHB@$D6X2_Fjwu*9JVU+3lQ=a5wC<--Zn^{98-fa&}=gox_$fM;U0XpR)O9$ z#iW&Zw{=I#oj;W1fF>6^O$37~OF+jD2^|B?^&M(CBYS}^Qe6}G%Ppbiv5i@GNy}jq zXpk)qsLl5EaXMCiK)l=A#TF!{N1WuVdd?>Jw0z_!!yBZk-C8TsJc|_eFw}X=EGcB-&p_9%VjSM(tM}~Xp3q|kL#CpS z%JYL93XHM_Zr;KYc9*_vr#L7nwgjRB#}{HjlaEni7k^h(T@G8Z^TTYt{qt7*wXWSG zAK#P&T5n%GzWvbY`+v1b!%r?vlIbQvQ|-oz6)Dh^XVyXSs=Gx*S3y#nnV(nQkK=t; z&5U}#$@D@5Y4{tT^OilneS#^}Gx8UN@5^3Rs1o{zzl!t_cs z!dtVFqwnNAS|c}f7l8Jj{(E!aH>J%d8~|nc94Q}>i^0=lv~Wzv=~lqz32*FQn#cn7k3?E? zxQB=>dGOO7f4cWG8aYr>g|zH!yj|W3)|giJy-cB6z~)r9!qEFln9e8;SQ-?5Auv%# zWw3=S;7h`YCaMncgy>W^gK!J2(@g_RJdg)`{Mf#BKtkU3swWJi1iAo(M*#6t=n$LkK zvl~Pi)z;2SB5m0^goNHVXY%y(OUOFItWg7we0%Ix)xrrXRClPCwAH>W_0FNyIQ&9* zjZiJwl{#Y~19kfp4(Hi~CasQruOoT46 z!#SK+ zqA)sMcn$7@yQhP6*)uo3G~V@<{E3*5{OA0M_5Vt%qVL}^^mH1CBEo?V)X*3Hx!eJg z6gX~Z)gSg{>5|`QCz6o-I=X4t5A=v=sQQI2siEQ<&*%f%8F zF2JNX3zI%rAU0Aiqml=@S`_AkBwb7SN5G4I=H^IvGO3SjsO_!tuIngy6LjtE*xuFaUr`sCw;cn zK-GYhx2@O~^m~#s-k5?MVZ>3Ui<7ET*!JYt^DIVtdMM?k?4-QZhwa_nIJmT9Mvb4D zIKQ3mbhN(p3W%xk-0#s&?GbB6{s5zux*)JHIiLaNoR6%{c?{KN|I^wv zFcWeWQrS_vLbkA1@Q5SLHoPQL-y6Koac}DSE<6lgWtVth=carMWyNLW3&$DVGD@=r zg`bZ^kj()hpCVlTt#LWbZr5K7S3|RJnJ>lRG{4fXK3ide2(P=%a(U2;oghGrgd$`!~lfAl^vfrGh4TN7+nZ+pY{G4V`GcEaa-J{pkLeIt5^MInW;96iVN1*{v>EIRWjWnLw^L@2TElR! zchxOQ2et}!Xd2sEc^~s-?8e{GZdh zk6g_|!@Gw7bes7|`;{IcroDdD?a@5MCpfN3pjGs6dGl@ygvRAi+X z{__P^iX{u)PMB%9A$)Qr?o5&-4NfogDEwkRrVA;U=5hStd!H2JUs`m6lR|m)ntV*l>} zR<(_3At5D`;V$Y|f}YO`cIMRiKhB*o9XnFJA3*}~fluzyF{8;edWBRDcDWah>!Ijw zTn7Sw*J5>-9&eMgS%=}ua^fXUWI}OK@jG(2FU6Wov^$!W9U9+(|I;9mlw7z{`Pc9F z@&G`XK*XFBR$tXVJcHII8A3DT?`EZ5V@BenTQl|Sc@nFFxXL^A#H{u1pkaMJVjA#t z-&IP+U*$-6;9fqGPC{YNUj}ONG%;7H%M|aqqz`p=&W~Uxj$%EGSTa+e9HN&b@k@MP zB)Mm@zB&N4AqHirL;(%KGq@O!*%@#bi)HesXjP)_p&-9jvJve3q=QdUA4-0d3TUnBn4^p}#GF5%FR7ao_c5Jqoon;n1QY7_3C&Q4`w5{>$M(@Jr0NG- zh8&+bU;U~qHvk{&422Jl(BY(3*2~!p#R>&SVI;VHO~=F6rgwY+S$_s8!hX*gUf#gsY=&0|IFPnF2Sv4CF@5UbvOZ?F~IBOf5Z)G zwqh8FnScUvf8LG{=I4PN)2={NdB8&JTs<^IWQpuD*ObJOFpuBAzRf$vFR8`q<14wx z!3}A<=&yarO6w3C=*D0!gNsehf_CjK1wagx(W^$herfa8HQIIv)1KUi<{Nckcm9lf znA#O8EWoaYf3#R<@R}`)UMcqM*IxoY|3i|I6cf+7ipj_duVYo&9fmptRyQNh=woGD zvkA%njlDOIYARdvhJ&Dhh(Qog5Tc@>B18p5nc{?q5phOANKuqbF`y+#fS7|IlZYv` zgc1rOB7-tSWKsx(nR37|BOqizWhUn+h9en#xA*PaU*GQA{oU%m>s#wvtN$sBs4>}R z?`J>5Z(w@^f(4Dn3jpCWuYTLhsAT3(3BS;qHz|CMml|9Jmv*rDx-3PmTz{z+7Y`{* zPa?)*m3P05aL^**7kJkB!sE;7zZKH8_F4v*h}KkaG*_QsudgHca`*$m*BS*}sg_Y= z?bgh}KdJzG01~D)RivC8w0#?(7aQLYsnHx=HRvPAX&!p4X=EC{zWa>FgDHh-P}nMi z_MX`k%Y153h;Nd_l|9ewFE(?~Ky!timy}q`(t1M`d@Fy=MSYLx9OyrbQ31a6cHo&F z{)PzRTMl(byu+Xmp z_^YSJMRFFQgahsPpr)&CZ^*A+sm{FrJwGzhH=Joc<*ztzj)spUYYD@m?9>sH#l4R*&5b3O!l1>oKN)Ye-UHHwuvgw@GbR{Ohs1g)`c3(27 zDGelm2Yb1D;{rms8uSim@hoGdos(*?;8_UCklb5(c1d!3Wzwwc;=%l#zc>`N3%M-oR1gY2^2e()(2q|tSI?4)E%7`m1Z4ZQ&Aq&5>+)#K9jwFX(m$~syOYLSEr*^Hsu8rHZ*&@ZW zXaKAxIXD~{0A?IySM5{=`wAezYPme{kFd1%TGaJr!ws=sZVroD{AODvT|d=dc_v<} z%K{^+O$2m(>h#u&!I#8We1yhK0rpy&ji_JDLS3bPS>u`OD&Fkg30u-le|r%3S!w-> zO%^p6XfqEciyBDiVbFl71v*Qi5A*lstGnfz@;1(5>3Zd4nm0T!2)1@TPQw#572J%? zW@up(j}nCiNd%?9rvcDFZ`uGFIOEoyM-&9Aei~Dp6ronZ;$Q-naE>&97w2`;tpLy^ zPskVy9{B(e2LNpD#J5g_1nXdY{qxz_o>7wGME~iZ+S#;Vn)joj$=X$sv;c=yq=gNY zRdHUSn6Ex^b?Rv-fH|luR|D9Cg-ejvU1)*Ot%)?SjVEAUV|zTZak=zBY76kc$#&l1 zf-4)Y9jD|#KaKMiTNXs_TRYt43?IIFE@0J`L7QjF;!9wf1nKkOAgnZpDPMXN&~*ku z+o1QZtQ@p>*8IIT?|*#Oi!uMT8p5ZG5)ioS8*9yF$zWA;m}RbY&`RmfQnW^bM~&QJ zx7d*PaCfgE4$a_a(7AQ=70;0mGl`I$&weB{n_3|`TLF03DHNL#Zdn`=r!yulFi+#- z{KvuM_-B?>^W$72k#!dB6UuKrwp85;du73(fkI8$xl%2Ce^qCYm8-IT!dsP$JI%@D znybWnMoT6Xq*U;c+&TQLRFKC27Ml~A$Y(Af4{O7^*Fd|Twh?vWPw)2W1|EL>71fU? zwI>tJDwrFv=DU1~X75H(<25N!W3hYms zbP4K+EPCmJ7oN%rD5f1i6CVYbLF|)&OhH*0OMSX6&q5GPW_IJMeR}tn%T6 zS+HG78;}|h*vi$OKSpCtfp=ob`<_yh&t0E+1whlCg{yBRo@NtrrEAbDWI=B%_SLGe zv`uFM*B`-r8YWu!I(z=6IKS}wn1?TL?bj01!8=f-%h7=QV}?5r-Dx76&SzeeFB^jv zP16h>7bmk&Q9~xK6b8x^M9zwq{h%;Z$*qBcgIN zzDFS-H)Y4sGw)0>&~4W!T#H{zL|CGofp=sPnqCk3(9W+fw9&+3`GV+Ry}Oz>uoup`)I%hgR6dkH>imR<0`K zULAK`hRG9EhNg5Ebv1mqFWPeGOxl@pES797U)MeR+b?_XkI&8FweKr`0_y`>o~iOR zU=tBRh@~$BoI$_H5p^v$^j0Xs6Q?2e@54)j7dZ;^rAJ2|g+|_3esivMzIhDJyr-x7 z(-n`V64&?Xro{f~Izlc1waFG!5x1KVpivHrqhL;aaj1(f-z3@2Prx$t8b?fl67x== zHF0WqDFD$XA6(aHoD9I+D<6*SnN&B(%odue95_mE+l9*{z`I@1&CDsDM1!^l5lyv8 zju8@R27(M9p>Gc2R8Lx7hF1^t!gox4=mV6aA!QQWdVSV)$-prBxbKhjhBSK4{fUVE ziNZAz^DU439(}|3)aQ(MlA`F-QjosJ+yc$;;e}nnLDG!~UWjk8-=pSiQr;oQN!KE8 zZrM-&kwn{01k{Z~Xn1a2=g!l^;X&-*etsQxw|^Hd0ICN;E65&_FJ39JpX8KX&VsA3 z*D_ah(>GHa!9}#x&KjgEO0^q$La~dehZ`F!IPfj<{PQl$NQ;6A!wt_6v%(ZT-``Vg zC>0r$z{<2=U)fLH$EAKpkc5Fj%N{@jBQ20_DSQ6u3+~1@jG@%jCz8ww9-x`6Irv_OYBgnfEw~EZR&uo`=WVSX>IbLpuGz@6 zm1|OtHyLh4LLWOz3Fkl}P5hongT@v4+V9~%MiI*0f!@PcSMVNB?9^o0lFa~uta;6L zumOh)Wh&i^e&6^Vf>_o29fJ7qDa|rzoq+i82Whh+23noDRaVG8CuF|u<5x}vUmZz;B@!moL4K8w07m z5>Xb{I<*s2s>-{N?YCsqb|tm(zY`I0Dy7THq*g%rhn0yPv_1E#bVi>#*{9SZ3M5U{f^$_y4rfaCip z&?+(Y>YXag>YN1*yK$~l9Cfnw9oMD{t-fKuKQ+bB7k($aUx=T=A%0>DSxzkwQ$XUI zl8M5+W&rFrM}q}KPgg-+0*!Ev)0!5E9(Q||eujOnva2oSYC&M-lh?0Vql3+>Ot@5W z6tsl^m~I2QVo3HY6eNlxl@X?m3drtGQSz1kI1Scpza?IshdvgMHV?0bgIN3`J*}Ma ziOvLmC9*x|QPJS^&E8Sh$_ouX>KG`D_q0-~Qy|unJ(XLbWIiBEYti=i%GaWgTbpcB z(#eH13uMvNhs{2MD~kXm*{0}CRv`&`(7(UJJ><#YJ;I1^KGI{K>_fwmg)DT#oK!oH zKMFVwE#T5@jdw6dbz2O#waa%f5_I)yi}=L0O;;l8^hnV;aARU3;@}jsYe|V z>%*bUxLWLlo=IF;|A%%v!{LPhO3j9XDt(&hTj9#+r5O)B$83ViD)l_G2Z#Nu4rdjR zZRmNdcjM#ROKOi`X93tKIsh1UR@M{V>WUAbp2*r>kt@=7tyURyJevj_?iU$m38ujf z@8ao2T0&|Xi_;e^G0wupi&i&owzfHcYu)V!n{O(}ArkCKh%(%Ss)NL3t1LUGchJ z3cRA0I?H0t1A}q}z!VGR;y`1L8(nMYucw^ku-u=AwIi~;(8^}|st!t7^$@^?Y^=B{ zRX1b^O{m>oJAD0mY_#pO@qL9WrL8utDK|x-BQbj0Z!t8 zeyh7*ymTSoLoKO5W?k>Fw6kzJ1g}0mH=j26^>PF@#%_JC<72d|S>5kkUR(u&y*^X4 z`SwR4pf7cqh!4p!Y1R@Gc^kA4a5okoG@oT$%fz%xHyY;}ZOMrAh}H@dGBjT(_+L4;g{UUvi<)i1_*bFz0#Pzym3%$D!^;5Cjg{j9Na`UJEdi9|pzoilUD;k%1U1#J zBbIs#S=Z#2-Gisu-R$G}S86{?yuH(g)g4w$d+TKVt|(xkvB%}A08Zh^5WfbbmRNZ2 zH%!(?AT{^*UnkHZKU`F;vMiOpP-+ISYWmKF@2EHd{#po6nHurADQr8?+UTZOez}}_ zqOW+i$ilJ9JhbT4j7NskiDzB2$=lwLCm+49C@GJTFJN*dkcvL6P{ z$K73eCwKRKRNp!7GtlO)eiRhKPf1cb>jFpo6x)k8P0UIYD3JsdfUME==pr+zR7vPS2Q4Sq_m;dRRWKcye!j zZN|gQpG@^ImjHhO%uKjb2zXv^I_aANND6F6gw$+?#2kfFvV=9qg)P97;YG;IoN z(0{w$iD4{ka+PxbNfgRF^X#_mE8^Dy;q*Q8wXQ=?loi040HtcQoxBa?GRGK(z@kC9 zH8=qbG~)2JtI++T`eB0(2W@JeU^reNdPCm|vwrhr;~ zve)itM5SBlM5SC0?zAo95X!x6d9_`0@cmcasl5@P5`? zSMdz;D^sg>iM?8B%uY1uFG$=g8{yj76l%O42TD@<$trL=F#z{-~F?0cIWk~Vw+J7j|a z&R^vmK9Bj*`E_I^(ND9e4G${7lS>;$s+4{ z_XU}Ikqcz5jk<=q&wE+s#b4CvE*{5zA6~8e2S){v@K|p@HyExf>y?|+&ZAvOl5@#3 zk61YNcYs_cKL~t$XOWKkml?*==fZ%(hcR0ZTN*?*y3LHX#T>0reE9=*Twg$HV8rnh zr3wO8EK`N8CE&$FSUa4LEJMIiK|Kz~AL^K`_Su}ORd!*|<1WwPA+EhRCx6ItKJHQe zyP-gv}c&hbP^#WX4!gYX(g1wc#Wib+G2TLXZ1EAHt$ zORfFv#O||FKa5*J+lKUX&nLG^jpiMnzVZwA2{J2u;8Y&2kP>Cj6)%y+02iNn+geo; zO&K*Ppw4;0#d#w&VM6nqLaB2PZD%3%F-ZFW;?h!GZ@f5|W+osl8aE4|@usyArZ9EdBAeRk-cGX+lsuq00G{WNNrT?u zI|9l!`|T~f15XfqJy(akMfQ02rN^Z7qKGf&y1%IP7xnh`3(BJuXXE*KQ_XyQ6s}bF z8gz~jZ&R5x1@N-Wcxy<#jIeBoH`a-MvbCKgj+wKa9rDQc&x~P(&skRbHRo7A!mn*e z;(x-8Rt{n z_s8Un&u_o$2NbegtchjUTUrJjnCo<&IBFtuM?&r|H5tLy^Wq3NxgJ^xn!GKpi~M;S zbUk#`#m4ozok?-4#oNGB(f-9hgrxO)kY$7;PuP_e-IhfpVl@y!NIuxxcf3j(|&g%NN?gCcJvw! zXk6akH(x$2IAS;X{P)x!u+?A#fhHg9jQ`#r&jQy*U+qF5euzu@5}f27d~G)Q@@Jta z!EPCg_%L8tv#O6pDPr%S>BFDvNI$up8ui>Zx%ZeHFiL^>!&`add(SInieDD^dq0o! zh)l@NR2{j!lAmUkvNG%LLE`d~9UIsEY_!c;Kf3r~!2%#UC>6=O2`~jewFa4NiQqEi z`T*R!jNp64$J5MS6iZ*pRG%O$9_fAD<9;61Kx8tvekF_(&8h2O;13{B&18n2> zKI){4(Z2Rsiur)8y|r_vWwi=rs%+Y(p>qYLaH?o^-0;Bg20@&qquMUqN;!}7M7O~e z-N3B}WxI44+6890;D!s1r>_ZsG=ID!Fs~GtDWC5CGNiu?!N;FBvx;QtqDHOTQ}yh& z*KX<#O3o;c{hgcN?DKQqIm#(r%qJ#l5-imW6H~xF=;uXq>!vHA>EM1#D#zW>8CHLa zXwNnCG^nPe=bck*Eqr2L*RS*FH>9+bewBMTbrbsOjl(fIW|2CtQ;Rcjhh zMV!#H+_Eb2%bm{v=NI2rXGiXRfI6=QnQc2PR$HmFo)Na8ssO(Ps??X-n?9DcH=j&WHNU@l@zZ{(A;i5~2 zWYOS%9s!yq7G2-h5}9FLCnq4xmL$?mTxnZePX6Gyn7NF$j1oR#_TvV&iGis9W>LWe zb1|C&PqtLp98MP(IZ3n*At9+cB_^?G_}hAt7|mW274 zBJYRBB|^`ip;G@^!_M?0&ni9lp6xYv>YuGKZf}F~T_x$n@Q3=#6)#5-l0~Ssviukq zOujaNA@B4MG-(Aw^zVdeHlN;Of8|DI5w%^hngZw{!4~hs! zTniRc6RLOMoCnk-rqKGjow`sI%hRztSKu`%-+s_`-63#-tWS`wgt>bOP-i^=ag0eRL*K4+G9;FXF&X5 zr9hh!{Wwf7Wu~lP%bs)2Hd^Nr&SSQ3QJx5%0vEHAgX|i|o2fa@T*lT15Oko@U6cKc zxev`kvX6JsjuukAreg77d=YlsX8Tn>Zz;)X?H!-udch<^Cje$wv{QfCYya|%0^oV< z!rcJ+wBU2B#r>r)1a5N~NLUly2w{T2lB^S9mKtw`H$XM*jgFKcmlJbx8vNVQR}BNr zoc?l8Cr4-bi#&3~<(%xm)`K^B2fBmG^}(In}DpTUz>jG9coN7XSu^> zfX}atq6NTnE-0h$g}_s1L=eloNwb0kl;h1LHTE`3gW$cLv@7TG0#iHv=^Kz^=!-Jj z#^CUWr{50k`Fs|089-P7Ziy#ZE(d)d;8w^SI4z6yt|QB$KJI^) zFjj|qohQ}DNgSw`fW$)#!fgVQ!M*o{90jU{*bj-14b9MXFNr_WX=Texn(s~If3?gw z*7?D3_+#T~*bb**DXBiQ-<&df+v2y^kLz$xylg-_^8{dSlQd|%Y7yla2YYESh_w2W zuPo)px6^he!pNevs{JGheh-pT>R;LM*1hDmsY%}~YuQ9V@cSHbcK%tX^*wdUGkH_z zB#Y^7%qOmm&JHW&Z|I8*4NAIae(1LI6xRGGl3Z%dwCkD9+OqpGwXvzoaY-MSvg(ey z?ZJ}DE}CujmB+Au+V(Vju+9Z+F8dsxr%KxggXW>y2v^;oz76(EvP>URZjx&w+wb`m z`RmD(@*8^R2E2-LNR1c$pmU*$WtTkO?Ln$CcdgdL$bXjGK=sAEC)_567MZ>QJ&q`k z5bBN=zO(Q4TG|nc#iAp1daF8rM#j>@u9q>Owez-a~jJqfG@KJ9RuV)DD6UFPU}PW32(>L~r>Ev-f>g8Nflk zZD_buXz%3KXJy+)^ympy-y8Uxg8*t>9m9J7zZzstdZ;FwE-A7!JW z5z$RUEx?=s{q$_q#}#!Fx-Rh7ahr1hPxhK;Xe3X=zAwX8p;o#dIH4HBmtUCI&ZO^8<4*6Do21?gvky0YA>l{?-yYD!ilruE`L$r ziAFZo>bq>$=c%HEdc7b0FY9=%iWatX%!UUEYr8|wf?>7PtX zE`J~Vfc?qQe9iNy_fM2Rkid|8yN6u$3!Ey18iCUC_Te3}Jldr&_;a%yhhzY74>Ur$ zHo%Og?S=$32YIKS0By}&e@IDc|7ap&RbR>z$R9v_d!jg%Ze`Ez7UgD|VB`t;il67? zi)fu|#w7Hnp|R z%QjAEKydB0T3$lbzhZNGSU%QfIrhgb3?1}H|Ka>woH_-XxJtINTi}^ya`b0v9i#>b ztChC`?S&L4hZQT!&=Mgt-hDKFwvEsfHotx*3*Zkkm!WR_D+ldE93Pn9TljjuOuR97 zBI4n(@y};nC>Q+|CO}4f1c$C;efJkyi0ii7DkR*zEp=&UQ~0c8V%0G- zTVptS^jXhv|ExMh1PV1GwFzrY;>9q0eCo;P@h{7-9N10vt>EKs>Oycp>(uQ0@5kQC zd!dy@Wk0;cgtO%Z!`JL{x z5^0QEcdUPNt1w^&^hg8q6@{WfunWFnBzQS*7*h%3rdRTdZjr8_&Iad#1`6Y7?JNzkO9W=AQJn|0@O;oyvm)MyEPV5_Con z$sRk{xKmG9bF)cQzdAie!pgAdJ!r}>_h8puHamAjV6E`Soq+EZ@Sy8kAGvCbWDU^i z3`yPtbS9Om?zB`*|I$TsK`ynX!qjXbln^4a(T~aD7Uwc^1LlpB)`@#7wZ2`X1TKU=-C+_WIwe!FR;Pozcj=riS6ZEtVC zoVWb}X`$>>5%i1zinmX|f~h#rxLaOXQ5m~)aJk0*Tl=4XJ-Tajxx&BEB>}9`KWPBi z`!C&Jpo#vkY4v~K<@Rsi<TZoii@6oUm+wM=d=W*LUbSY~9F;-+rpfX3}sc;`@XmDzqZ@a(ZXp zQw}s4#jVEXK>Yo>GDC0HI(fJMz%FlL$RLx7IlU05Vn zJwC#?4l<9-ELcszkgh!fnGG7 z-<_IERxVAQb!EojoMN~sYV1bysm=o~s%Q%iQS z=~M|BGUU2+PESw$i1`Yf%dNrF@sojX%FomFT7m?($h1Jhg#`#28>(9!SO8KTY1udr z>tOBg9$I$i-N4v3^nnOCb9IFdEp9!UQSP$#Rg=3;fpvWOqUz^Bmo;I&xKwsu0jd11 zCb{S%Z7FI5bXL~O^5l5>0I8BO4S`ld6x|s3IM-(wiLaqOx&URspY|axGSiuN+2J2t z$a@!Xs$hx6;2<@|xqas3O7mIg=^K}yV6JdLEHww|xvO*q_;2Bh_`d z#g;ftbnfHBp zWWQIs4aCJ}5re}W%0~>SF9NjL`QFjJe)sO(T8ojv|0o{n?=Hiio}Bf`OE?0i10HJ6 z20E)iz7_iPeXQljP2$ahBJ%^(h?71CE=wyv3+H5u310z{;~U1@A0&ZsG_>X$W)DN| zCI!jMt27MZf8Xc-ll2*KFp}DUJ~b;I&>ONwa7(xW0$iMY3F5?{9S*7v7(%?C@h@#2sg*+aPkc-P_> zftiWW^*UWAaKMRSE?tvLK1DZ%Ycm8xKSLYNpf^ADerlQAgZRaZe5#^54Cw zWS;G?3IFBAI@ijOpg&IjrkMMWgsjD312>sD4d$VyllBhp{wPru6=#)Qds%2qgJ|D?V8y~YJ6qn*n!6BvFu>pF9ZowbmNT?8h-gs3u{?!;qO z4(jHP#K)I$Gy@MzB{00Rc!662!>)@q2NqGt8+>mq|KaHjM)Z$ePYa?P^w4HFOh~w1 zYlJ3wwbN|HGGkCo7)_-@1M}Lfbg`!zv6MNdo0d`Y^8EA4G{W$n^on4Yj;FSm@KmYA zXtQX3IRuax4Efo1aKkP6ibTp|aZ(oQ1G)fU?$YQ$^8&C0r*9ZjImi({v|(^Cm#NDc zJNgat^1FD-*Gs#U|Mz|V&HDIEl9XwXpc>%!mIYKf#E>KN-v;P}+4S9@>E;X$O$-#h zJTso=S=Kkart;<$r=!&=rXFe=GKz!Ee>z|$ynX7#59*Bny(|AW2l%@=lHw$2(t`%n zuo3JO6KZeTG#@5DCu>b)S*iRW@r<33>dKXM z_WxdH>0hf-V0SP7yQ$ybeGw(d7pDrbs3XL`2h?D}cu@J%hmLO;Z_&V0aZhP<@Qi}mg<%;_BM$>MAb;^V$|7e7Fs5?z(SgMLo~t+bvd`UyOodlC{(_baKfMV_wMiYK1@Soh$<6ulXA^Xk2 zY67Q{jb;}QpPh6tLRzEocGeHj)JAJVbHwiRJFGTZu&=fTSsFt~IK;j3m>SX8Xr0zn zIp5Sb(2}o!f{3Uj2CME0fP3%>4r&UNr&^tX%#rm7Y1tX6j;svG4;n5cDN{wK;k^TA zQ4t{YJv3wQ8Mv0V-{a_+2XO99v)dOdx|_E$93tD5i|Bp0Qbr;uD+h3cT+lZ~h+;$8 zeefxvl5d!4KO{`9O=`vV2LorYAhwU7qP)wBvCw~h`b}!y8Tav1X@$2@pVq!KPIPEe zvfgB~mnsr>HPNcgZ#QBzwpc*|*r3W_It&VQLY=EBm@z&Ho^lpai1nNb;Qi|={J?Br zs8L7vlT1#*%8k#o&ApT)^o6&(ECY~$`vPf%eSEb1L$DUYe%}M#3-gR z9O2A6#ciO!aj3ke$0Y5uYZuK?z*{kVrrsc@BCm*?Y8sgzh{3TrC5$O{o zYi}zwxj{D#fRybB;Q~+nR*r`6&C!;Kk8xJhq~jf0P1aj%J>K!g^$utGWtF!TUQP*H z7I4cYAb0#SS0_DCVfM>k=~KXb@cVS{&;Oz@%G0bhlWsyS1TWa$NQ^KTI58M%R`A4( z<3@Cq^udocWtFe~ApC&z)KoCw+mx9FP8{JXFW-Ghl3zO8mHA=Q;Ls@7 z_#;<1hh(=n-ff2^VfSSBfJTG0(5g7~gU?!{D=1JTkiVL)?Yg? z^hNRx9vs4y>JY`Tk}EfvlD+=tJMtDh?nU**SOXxAjzn|5G-t5!BgDlRTL zJ@87X{`|*%x4+(gcs>f(&XVeV2AlsK%99YO4P3dJBIvRxA+61XWWC(GVTA#tlD527 zAP~+H8kasIZ!~MU1?02mbPcV#%Okl@$L5v|Q;!RhoGk#B&ubk*JJs9ePgm^?!VuVj z|J<e$ceJK+h=Lu?zd>Iop5=oak9pinF>3*wf@0r(p06@H%}f6;X>Jtyz_uMgk}iirqVPCq)n z3)iv!zp>~3i@gdNKqboYmZSlmd}#*w0%_78&<=0q8&=r2=>pdn2YvdRDwFGG6ueoj zel2eiH4>=OE-*zVEgYY5JZz=E_E}UqPGX<$601g#2#7Vc#$(MJ;I<^tiYbnUfk=!(rm(uKMU5 zk}RzdKo1Q}_%m0-5L?h!EXj?@o8*xbOLivR$e77UjPKfEbGOjlE0!JXjcDy}|M0BSZfh}h`()!5>NenCH#oy`Pd9NFdJES=h6ePTtut3tz%5Ka zi6gzEh#$b-)XQA%pqKvQ3pt&sZTy5~Uh>{4FCfR~>E|=WQG_#6utG{;Y8_n&)q@il z6x`1dH-I0xw0*KnJ2m)@*cs7g0Fzkx3ufuQdGVu%Uesdlu}=1WpYGstwIN>vEtejt ztAME-XlT{Tl7=ISo`88GcqTQ-9YD#%Ox~%dn9n?P^8_~+s((R(_n48J)jv}8w4$EzhJCC+P8tQR9M^dC7nojiAL zQ=Nt*#%UL>90%{=q3gIwKzM>s;gAF_pc556Wz7BpfyKC?tt^)v>OJaSo*LsamF+BFW@YpUOHUSX=$c*;7R4lXwXkDdqpJcoBH4Ni5pfKIU1|K z%jTaQ`#S$r>;ql$m=xM>072tQmnn;p?yxCqFHMvvfGGjoqv{TP zSE~hF&9O8n-fRuR0Gr;Y$?Plq|YvlRX zCNmGg?B$LZeBe()fahY$xT^j-5$VO*kI0!0y~Uc7M`I29c41^=VDU?87tlgv*#!P? zUfQto)Ew*~W)Grf_n(osI-Yo^yC zI@VPbY~}RSL*FAwvcfN8eO09sMt;pfgG$9k@zkUng*DjK(zUQ_Jlk2o(Fmy74F}!J zozyd|%%0dX2hsAPB}nBskmF3T%Wz0a(j*vJA){e~kdt4bUBmLVPpxDru#IuG9g2B{>!7uom4t z!bxB*p@Av*BIzkuB5%i)iJzv48iTda)IFWEuB*mG*^~1+kE@umqAwoCU7T)yt%m3E8ZKII$Es={u~3hCFJt&-QIVy)8E|ehJ(o9;I$(IKcw+uTlyZv$*y%Ah zIOO03sL6gHQ@7`i0Hgy4d^nDQ+~RxU>#$MYMN+;ahR~4_ zCU$so*cRPVdiEU=o)J!xdiwUl(^GS)*4CE-FHnE9*P)yuGM*lq|9y^PoSMDW#^o2~ zuD{rv|97(8zh)kNp~+{4?k-ziUH!}B>)D%Mw%!^h_1u3{njzauVP7JP; zdwo=Pm%#r$Q!7b4E-Rz$eau`quF5C7c(jm|rCKBUkJNK&e=&y1FN&o8NTJ)gHH>@Fxm*PeGJl3MjP)EEv=HW%4%h zY582L^8n}JnhDYQLr@AI~PS4f$ zR3oC_+m$E(RlMTQmtDXs{u7uKE+oq;If1-r5_|zGUDgMv>L(FSi(DC9iMYwNdD6|I z*a5=_W;7gvmFu{*@U*422tjI`wkxqnN@S?{i**2NHUAm7pS(r3M8b~5pxh8?I5B*lfI^Py&1XVH5jDqlRE z8iBF(w8w6E`gNE4M5=hs-99c)U~kb-p$a-D8t6UHWcsfzTS7VuwSmMBWm&+;8#PvE zHsSjVguG;j#Z(EfiDLm(*FZyS^h%+tfT!ampzPg)#t2>Z61(hnpDR7yse7bbYMQM! zKKm-kBfZAKv}jW}>2No`2+%;!gQyBz5`~PZNXZ7;A>>W|JTIBPR^CQhzz!Qx58zjn z1F>m9%k>llYfxiOM3Q&hFRQ$=_f)s}%z)bIH5CajeWEuAPdH-cPyMMQfEtNGDjt20 z4)g?EaB=F0g}4YU$)z47O32TF+2^3+r%-@ehqQtzy6SmU<+=-=yYnZmo9pQlc^ZM( zhp6rDw22-zQRt#G9FkE-xCt%$p7y}*uISFmCim7xR)0*Rd+L-LczQMIC#&1-d=R`F zyQi>EXqXgyp|>iNDP*k}f00WDSIU?1qLgv-S`FzIJ*P%9^N*1fphuKwZeYn>eo#Sn z5c-RbNg}fZ2hi#bzgX9k76KgUMda17&e(Xc83J4rdMor;%{UFv0F!qD#N{TBBy7Cl z(&wq0GPPjZseo{@dIEClk+AL93#)jHY?&+@^5e!5%7g2;*9oP}R_u~Nlp}C!26@{A zeF<_i-k~XlsJ>gf=!@B=>Fisx9E~x(8>igo;>xQ-B|-5*b?ZaDo4ssb>4z^@{0ef8 zXf>#wtp~vLtwK(#XaIl#YXqF@oRuQB=Lj~AQ<|D^ezj&jpV$8tpJoc_bIF!Oop|63 z*&J0F(^RqD6~u0p__(Qu@JxE~*+wB#s~#M(jsNbD{ZDSB|J%M%_}2ZWq8fNM{He*m zgf(sY&idb>0NODb^L&X+gcr8PhpvJ?5W=8pG+%jaPP#1al>`tT&A{^(#OTwwz{!tZ z(<-}GyUO3@X5a954GFNXBfawSxpB-!O5!+L(>c%m-`}N-WJ>sV*{-&Z%b#vO_;K&5V>^!JH}A3( zfaHfOcot`x1flB*t20#=?#-4n@~TwstjTBTMGOTR^2zuav#JuM>xsD?Y&&9+s+QS4>Uc1@Cx9++gbb^ zRKAktx#Y=E@qHn_q>OOxUa__i^9{3)^VYSoYVI20N8DRp1ASx17aW$#1PzU+tf|ie z-m)>h=7+s{74a;8rV7AKOw2nx^7;AJyEB0{zFCxu=dJ|Utn2ODLR^wxe4|X^%gKMZ zbG9OLJpx1@R|;h0=GraDqC|n~Vl?;=yO-@K4Az{e(9?~dHPAnNb*5b1!@W0@nB-|o z?AlCukRtpfpZ}=laoQ+6@2=6YmkQF$avlU<5_1q;p%1+o2k*mPbLUNmOAbN4n{{==K8-QbC>g)+48;8hEl=JfN*<2Lv6@G=d@O9zL@g z!w@ThljF^H1(x$a%Lke8SyEEk=rl!g0DbVWtI2}4n2m&r5^0J8$#0B-vT17};|Aqp zyj<(o_M}!nKbTv`dCOFx5wsry6rrIlT>kpSX{~?tu&Ja(1Vszi(&e>OL{v(y8Pf9AHdwW$z;#9JePi|2B zTV&}!A@%&1eDqt-pNiE#5cne7zl3Ui_PIqsYFDyLs`|8bj$K1O^>5Qp76dyd;itgh zYd!Eb_3aDlb$x3gcD(I4UO{F?d10Rae)&DoVs!p{tXBP>#OnWB3_(k9@Xzv`NJ%7A ze}cfjOMnk>RL;Kh45(rxS#p6c^V%-sFv-#kKv9PMQ^P7>Xz9iuMK1vZE^ZS^Z_|Fw z;3>w6=Ix)16Ms>Z+$|}2+BtQ8cF3l8xYf0*h%$A4GiB`ov-49$Ylx8^9{xrrZ|X-< zjc%^^G5MrXvi7n)`HK|-|5RS|cZ-4l@FKREg>G__>PCtI3?kZXtST-R!Gr7pF8_w{ ze{LxV6>$2Q0(y4dgo8s0F&(l#RD)i9WZ{x6-30e^qzw2xrsfQu<#e3`1$6^`IpX8a z4ZM$gJsJn}+*qg?BwYoj2>L>|>8rxF<6@BQMEku9yi8vW2RBe~hkMGN)50I@webpY zXT;VT2HrVMjTferT=F;bEd^e&bTW3q+qQ6H4qV!(+e?82kAYGoh(E5tVq%H{uB}e= zC(!ABNl*>psuJ_v2OLL&6W#mn2(cjD!<#Mo&}|nvGMMBaYUZ-AK5gbz;{eUwDW+`W zOi+BB)H-{&-7im+d6s{zb^n`#q5tBqL*IJ;)XsO~@L^%2Tvfria~Gi;SHFcHa!%fc zjMs~Ym(@PD-=HX#$W;@7kg*j=XELv1PT-!5PNiu(DV(4w!Jtz5K^Y)tM*b0q(SLQg zmDX+8+_Wiw{cpcxl)u;dinpCd2BHYD1awUT0X_!K+YYRJu_wq{4s~DcbI_A&+;K-Q z!lrJ&=u|o)>F;O;sO#f^W6!n}CQ0mo-^!W3^HT~MUg(6{qftVT8wVqw^#5m0!T++S z?X<*qWcgsuz2{a6$siN}C|I1ake4~)zJMS$}Y4qbz<6B*Eei|Z;7 z;z6nP4Rao=GdPHbHAncyM||>dE_N2a$_^Lc`sa4llzU7Tt;w!NaA209G3enK#!n}y zqc;aecHPHz?$0@unA>Vetwj% z8-@LHANxH2f4DmUJ5#PUF7X4C=Os|RDIt(>J?(IT>@K3)>Yy1M#Zh^99V(qr6yjP= zJcoa^DQ1|zS3C(+4^0qK6Y*?{64hpxPpa&A6zvs2raz~*m`Q*HB z4c%)qsg$g=k01Surl*CsBNIn&t&RIX?7eqXlWmtS9$r)wgebj7MM0#*LQzU$LsZ0o z3P_ELND~ni2@nW^bO8l{R}c_s(h&$KAQ3`Q5s)rH2nj0E-^T5KXk1?NO_+7-uK?uzV>x(ymD-3mAOXpNoKsRYoe*kaTCp!-}V#g6zDt( zHo%U{%yO7d!1c$&YOZ0x;_?l}lXv?(W=oI6+nb)syJ0Oxg3On9US2w{mQh0fbwD(r zTS99%dOPrXDYQRbclT9Q1AH%cdhmd|cnDFQqX#gOMr>^@SLy&WKa%4D9tL5sgsD42 z-gD^n!S5O+hf0>_{D54qOUx?JxGSK#G)Mp!_0)Z>9%%*lG0Jv$?yH*t)KNM0&Nmf$x|nPow>O(da48G zXAFMk_Uv5kjv^Ol9CI9;*Qa~X;^w|4Ivf;w`S(ux{#QV$-y8zcU*i|?@gT49#8!jr zq-bQXcYv3{Im#7o=w9)grVVCgaWh9o%E!U1C&+3#D78S389SbCbNr_}_EP!q#*;zU zVgU>sBsoS|7}TF%23_c-zr9`mHYoT{F%uh}03VWRl?S38KmsraoTqfKC$)(C`sZ)x z)Z^vc=#p4Bi5#=<8Nw6opU9nE*+JfmvSiYIps-dO$@OE2BS-g`=Mhb7Ej0`!!RU58 zG#Ocq-3SLBfwCg*)SF9CgJpZFDM*l-_A$3ybeC6-b>5JkJ9XFML6n!KrdH{k)^nk) zRPk+H>k-%gJ8zf3FH3Mdfd+JF2cL?9%<#fyew<^((+L;hQc}KNHoz|?7#*Jp=@aPK z<5Az+rtwH#&0u#^qe9)Chib76zAs?EBd_cO>w|X8TMt||-*XfBP1zC07XhtyAtOAw zCTSf|9!P+9Ws(U!-FN59?P*+dhp^x;Qt^B#Q$tL7NnIL zUH$b{J%V2Z_+@m+r2O}6nhS#3bLZZqH)-&k!JN0s#djr`-ky0ZU|zZ3XtgbKa~Wqb~( zYLLdkxRAmPD?d>cCS{iQ;w0%+BGK~G(ldJOv|AR0&O-zzfz2RCmK?y2C!wITB9FZ~ zWM=UkBlnhN>J(}-*D&`T1=^(f#JFaHj0jHshY~cBE}+0n&gHPG*xSLn@Fg0<(HsNf zc^0l2^*~vJSBjA!m{J?Hp}WsDePe^LXMu@Rd6Zr2n~&m}g9f#c^FwdhY1VI^&q$}P zDwn&yh&y0Y(NNV8=zkci@Vfy3`9~!9y5|xPfEUl=9!Nja|E$)`j6`!Dw+iAEPm{Ah$M3U^)x(9HM4P^GCC4R=AR8 zd=YO})tPI94I~z-C2J#J)Q*0z5$M#yx zUO7Mv2J1|Mw}C0Zi5wRdk}I%$t7Z8`&X$ln6^^_~u~#RW<(V?g$JWCV49~|AQ`Hn% z?#VZete;hqkD3?>!UWSc>%p661=s*hlCbBCdca#D{Z^mgJ_>XWD^HnYaU5r8X>h$` zSi63}Pz->({aS0qQf2Itc?A#dXR5%HOx;(DM)MNfbJ02q6)ZC(wL|N48{Ouy;A%aY zT=TM~wz9G|w5zLYqnDSK*5&vy+8}LAH%26ou;W6E?6zp>-It@q4^CJex3YL1qV|6` z6Z_}Tj$Qjpy?%ZWX?)5{g>#<2(H*qrC5Uyp^RpAhE&CGng8M1I4q-HG8q!gpWLhKtK?~cUx3#=~wBjvRK z7e#6UgbWExE7ZZ?v)*GEERcfgDBt7==7EE5wq=#-J+k?Pz)8HkM?iT}GgJPq{M%!pCW8DP?M* z+cWn5iE14=T0t;n7@o?bA8@dB;2!NE2OiRSe;6ag z%K{}RAZJ;kVGXqwo*`Upx)iIVujGnBA;ht4FpD5OGW|Q)`x3=?+a=W}fff zv1iJx|Ca0hchE(J{#uWf3szQGEZ2Ytl9Y89codya;rvdhzIklNTe-JTWGk3f(?rvG z7kkd!XNzR+O9P{Y<22+}qy(u6@Te~<9SdexLG5iQF-j3inBS8nYozmn`eKLHF5yXq zhsB$V)t@0p1O5@l4RUcm_=H0kRgyQU=?Jjq2)A(XAIuWNUVVdYmDyVCqNPiT#!B&b zjEs#``r3}1fYc(jpH7I14%BR-9StD?=}KM{*gXQ+@!@%CK%9gP*-gCqCDFjQ_)60! zE=&}Fhc^DSTcPp|c)6uLiQn6;`kk;<9*v?fn)rvo%-;8|OmEcZeHh6gwIgpTS>bPm zRxblBi!li}kbx5;NW2Y}`$Uu*FBkez=ND9S;wf2+jzCU;bx*|duOP3tA5F^%#MzCo|yPp)yKUxhZ z$NhfM(J5Ult)JGS$IjMWoWIinmsN9qTxzJvltger6Eif8)CB76%YXorALi%EqA+&= zWiaBZ+Z(8Tb_8nl!nTm4eQ)1JKFJ>=#C#mHpCXA7b^Twf;&fsPPpDXv%SZMk;do1;JsW^FuNA5Yv4%{#I#-8}UD z!1GN4bG1g!IzR~k;0}Kh9(ra$Cir`47ayJ^DzI?uP-xgbg00al=nz@ z{oEzWJsN_|lmCbI`+^2o%K3a1a~9xDd2!B8NuyP>J)w~!unbgddQtG~yjE%xWgQ{G zx-xX<*!Po0w#?Ch?)N5|D_dU++X54T(Q;Uy1I#emx!dtIwSYMGmI(TclvXT2e6J#< zK7%aJCNPIE!j9}In~=j!dsmaf{NC;3L}OHslKP0Ehf(xuOOth$N7h+V*X_%-po(ky zm`HkvAABB%I5`m~xnoC2(X%}v8+QNkx7ZDTN6jmY;9nt)Pt3#GdVm?Wt{dJ#ubO;| z-12!e_zOyjsOt*?)5n@#`%%hUbfjJM>pnCF?P=ZleH+W1TQN3860Qz>_fcZTx-K$S zclXabGy8tBsH<&1njGY8(^%iX>}Y?!{uQO4YJBfR-2 zsUx_m0W+^Lh1qnfe=On{oP%>f1s=WuX|VMul1aMrLqSgKdvV3 z^9Y*^lfL8sc1B&it(Rka4FCD&$*xsKF$wI6B+tk& z<=mGZ)=m<=bAzkq`xtNfm$f#~Eqe3%v0DJm{$oyvXG7Nda6T~oaurU#Swwl+^zBoz z)E*ql@qD4aQ|>PLYUqx0_nx!5dj_Z{O69EhKCFZ(sfY08X#WlPPzRIN;15!seY$%Qq{EBQr=Xh6(UNpjAHP{>>A2pI+Io z*ZP?%)gH#z{L-!H|LdH66ko^A^uUX^?&7rd&hMfFY{n~BaXuqSH$NOay^m5acd6>h zd+q4=t~bvGdu*k+WOLiG`8Lrv%=_LQl-}aC(B|*Atgf zLgGi`e`_n;L7e*f+kWV%wjcnE@!%Ir7AV%% z`8kl$9xrEK08dn5POc%a!dRv&VTB)V1n`6Vazo%J$M|Y$E%U2eZ~m59ja)-K4*?FT z2_&yI#Of|mWdVOHN(TE223ekjVLN3>oq^!SQxMX?TH)JUIV!Ly;E7A#K^6~J!; zCJmHGV17Gt9QNbb5se{wa~0EY#_PZt_~p3SoDF}A@d3+*hey1Cc8f7nq14_6`X0CQ zGyRBHIbHitJ;U4@E=% zR`LTobIu-$N6MUQR1u1Noc7@5%QmKZ@2zt!=W-(B&Kw8q*I^<4Ml8_Zq(c_nv;@9z zEXNi^s%tO{oL%v7G7AajoX$V@1SP-*qA_MqV-0N2ocXhCoP&)C-q$%^j7EBFnHFze zMNvK9A6J%@PmQ`WgOhIJolSGs=`(lKEpngjn@+=$&!0PO>7JitZkV50m|vFWd7+Q< zlXUz}+rI}Z{=P>--6XQw+&-wAZ33$^=i~8G^JUv0dc^h!5o7e(lLbRWE2h?wG$mwn zqcS<|qROj*<=8xRxwMO=pk3QA;Hy4&=M9Gall~p9jj1f#{O;vzzRx+!W`>GvbMnr- zs8*&&qXR}lbko5?>r!JGogCqt=S!_Ea!%jIn8p)^c0!zpDzV+&(_ONfo1NwF?0(T9 zmC$BIeC4n~O#JZ$mTdHtvipgQJwC-w)l;AH&gGKgv*Pd&zI>#f|3<`xOrWCyv0SSeDTK z*8ajI=3WQG(C@)ik%+hBi^3nWhF9A(4m2~ZJSd$82Z@@f)2D}0e+{%y%+QjH*M03? zi)m;x&QLPVM8le0UI)87o};{8F1h9t)>br7%IbggnX zm*r()fA!mh1B%Z?tM@8U(^wRuW^un`Jo&1gph32(MOn0Gp!kttd=%I^>p_W{1NcxM z_i-?!#TPkzmG&6=lGPgGJ2Ddc>m!PFz)61%(F2~DM&AfFs}Ty~c*E+GJjEgY<%HqI zHAIF9iluC14fbRC8e%W;&>Eua5)f2M18t#LZ|>ml8P04l!v#1|>wnYRV+e*V)^@eA zKxzQX9E{z}(p?E}aE7RF<)z~0DlSj9O6Ngz>e$A?*`LbB%_8?mMi!lwwq7@KY-&?` zIxr6#RWJGI%sAgZBYpemey2X6xDS=H#s&{54ap8wA|d5x&G9M3Ma$&$0$oC84Q{E3 z6{*#BuH~9smt%{p+k1moitRf;+^jBY<}X!V(98ndn za~)70dl5#rjn-a=5p1{HQMpInCyj6AzWQ zdwQhB72&*ZdpZ|S(aO><^U@iUsBcc&znbZFFf_H*kd~LL&eW6)oPM<#uVxVQB4<5b z)DFW}5YJw3Eo_-#U{z?HW5(M66}N6qYIRbG_fKvn7+sSuYIZ$j;t`WatWtR2l`|(E z+H!f{mNyQ{>PLmEC9@BnYq2apnhJtj(u)~2-nGNv<{D^a$NMOb1OmxShq#DfpnOU-gB+E;iS5micukNcJIX>c*OCTNX zyHxdwtN(0@)s*ihFP4%2W7VZWT3Qc!Ar$jTD4!?`hz3Y@5b!9bPb4(s!IYvY)3w=K zo)FkRCd5*T=CPBRxXm7SSIeFlJ<|VKt~nOy8)n&8mSV4k!==pN8bbpOpN!u`vOzfr zrsl@*fHw^+)?Y?8QE2Uqm0?X)R%3)tchs0z?Q6~%pw1kuMex1S$KOtnp8x24XGgDZ z#Y!fs?#;;?zLbp~PKqqqa4mAW&tX)+KU$^!Ck4!ZmR||{HMT-tp*uURCWDlRZ&7>M zKMM-aceqof=u3$-rpi$C_T*12Ixs#ICqXXOIerXqWP;PqvO6?&At?yzV28o3FUCf) z9=G~2QKB%t*-!k>?%mhY8L`>#zK+aIx*$m2I{%u@{Rdw};7QR;fL_>*)U@yA`EG-W zXKH#zJ!;u?z745ji62s?biY)z^y%E}+|g3vn5gcce&JRKXxwu(Ns(Dg7yKFE(Yt=YNp68GxbrU;ALlMLD<_sDR>S8dV8v2xeBSw`;$V84KB_ zyqy(K_Vv;DFc-uWJvVtSWv8oF<=DvFAGF?eBckMzy(5PoQN?$FOCZy;%cOrz;Gb5>*Kc_L z$y`)4KUJ$b&7A3~PJC0k@%F9rF{k(5ci#A-O)cCmi&?%rdA)rEah z3r~yX67r5#g}x}XDvZcG*!N=Uz4=jGdUCmgsx)Y4e?a>2wbjzJ&^ouEil?g zf6w4KwOgC1YfGP~HA|_?^bz^^@^p`8xLMSfvb$-}V!TrKtKbXUGrtaJ;*2hMn2B!g zSRd7Ny21TmQDUB=$w7k{v76_8>S7bm4q-g8jI(MCTSg%ZjhvJdJ#*Cqw`nt7yXD(L zOLOc4j}P~KbjGLK9v23MR`AW&fW#SbGvr*R1Zqy!)m3m{(A`iV7!lA8!K z9V;}GDb~QJv82<*iUzS_4ZciDByC;oOduiE+ebgLVPD?q#V>-ogREQgDyOfKmdcwN zx+T(K*Ji>Oms3_{6&IgM6zY9D)c0Xg@rKQjF7sM-16c!yUat>0>k+T4nUxT^t^*h- z@ZRF)pi|Z@CG|3?r%Og2PnzxOtlFF|s{VdPoo6p#ns#>Kak(n^gKYf&vAfFqaG%5e zU)^B;&+h#H`C$JhhztG#Xk%C+U)A2DQG%CfB*iF~oU&`XS44=NP>a!>Q9@Z)^JzMexaWl$H$eWCUt=hNLqm(2ai=s@(Q+-Xrk9e4RV^<4w@%C2 zCE6c4UyycXLVBRLSi^3Xe{Kf;!52J4k%4QEKxz?Wk_;h}Rm~GRRwEOT!L%E7A~%TY zSO`hiSbT9jLmbX=eAqnKbyIR*`T1S%tCY8Q?zPoBf8@$-#pd#t%@)aaI#Q=&Bnm%F z4`mqaUAe3lkQCZGWS&D~j45{#$_+Z;Kox4cO!~FtOw{_wfuh2$;@I-&;uGQ6 z(Z2NWh1Mcwj@&HtY`a}q2c^;U?TIX?>AGC$gm8%n7GX! z$R1_;%oolT9O`h(=cDpw7OGeWRucFdWmsVT9iHKsIZW@H37|wIu^m~c_(=#KF;NS+ zd2vLmaUf+cPj6mDM(A{mjAA-0Sth*lSgvXQkBU+HMVhH^%R@W&e>mt*uDV%}(9(*D$t|tNR$w9sTyqSDNvzb}S@CBh zvN)mqG=G+L=&o-P10u^0f5_16laZ}=D|)Ztn3YVgMRUc=`65Z69?9kv6HkFTJ1~s@ z7C+qH?B~*O2y$l3x0cgV8D2w?jYzI~O+%U&lB!pSYvy?}@lo;R`_dep+tNl?BJRxD zv;?cw_oLfV3*5s__EghE;|Qd`l6 zvWr@!zViwM>Nu$4Yj26Oy~P}_GvdO&J`V{lBZ9ms>?mpck<%v5uF2Lcn*Bt1+uT{L zEYFH8=1?;|(n+jtBy>b4YZHv)i;s}@ezW&Z_}Y9)wSB`01kd5O-8G#4``{$qn*%^gPafE=}C~A_P^8 z64mJ*&KYly=xd?~-C0B2BW>btSLzrm2*=3V5d_~hDww_BURx7b{fUcKPR`fr7_p;` zG%$xc-MmVfuIiglKT`$={Qu1~?0f)Z`94i@uFYLZt^J^E`s)EzPma|07QN}IB6Q~1iR8&-ocW4{aqpf=PF|lOz%TATWTFAN{mw!O1# zyX94!^DakH#Ug#n?T@uhvnXZ8k8izQFXir<#AfVy6|m4T;Lsr#mK|l8t>~cf;)HB; z-6>}JgU1eeI!|4A%PnK-EM?rdGM;%?2Kl%n%EuPBd7{XoG@_+@k5$!!U_sFs3+gdB zoOoed=AN7!nFo)vb%hR@tH}j75T#&&@vM-+%xzVWBR#5&$=cPHbvu}NB*>XKrJAnP zZ_WJe7~mFndbHzv^t#rlqj`D{>8%ZZOiU0{>3U6Lps8k#QCW&=ik!?yH`$FPu;hBMyfb2=k1BFrs0pZrniLQsc(jLH;g6qNu?_ z-}#2>>d^6>_;OSO$|D${s^oUARP6;`E?1v}1<>VT$`$-Nt|62fYh=hAwK0Vkp?JWE zuz`Pta5b6udFKM0y3 zIGP#g{7A>Lb$HPYQe}i|L*wYStjOR^AQ@c>oELTvh>X%+Hox79m%o#7)EwKAlrSf1 zpjHK)2zg1par8L!+{n-n*4sR&^BNdC_R-U<$6p7}%E5iF-+ z4YAS>K#O0S`N+LS=NUmlz=mePNQ)U{?CU6GTPt0cnYlP>uhsv@#qV36%ofztJ=k<5 zuJ)Lb*TTduGmU^lC%pdrYMUaV|G@{n>CbVG!0W22IzI;PcpN;f_#q>}_eIC()8wM_ zX@hJpZ9IN^_w5^UozNk+X>5w1UWZ2=tm)uxQNBmY@lfyW3omtwEwX-2pVccq?K8+W zH_1xG)}otmpwCgA6={P-5;c&Ks)Ng@txZKnb}3{M`$dUYbtL(Od|6)b%U`(Lhc-yX zr3WikSc&k`mq-&`S`(Vy66cr>%zSs8KzPTV%PL0Ng2-7{r?UOJX#^uxsg6vY!r}|J z%~D6d)q|Gfk>J=&RaSSDT%>Fdle?OK+f1|R>%h%@h4(u4W;W;VX}m1FfI~Mi^04mm zI!RUzkNt!C%hk};{7u1~q;(}kb*O;3zWkoob5;NW-;}B5c-0fL(|<#~UTnd%IGy)s zIif;)CDCZ?2wCrKP1D8)>&lx6z}&~U{Haf*=m1l=>bd(b+pj zJGhbQCBG#O>psV;N%h^OI?U~1`o%e^U8&+5GvvAF?Fz&?jG1TRzEOssXkJ<f4NTj-v6wUUnmWr22J+jeb{K>B()_QcMJW;dT|BvuzQ_b0awLQ#3dwYX~!BIQ9x1-aRx+;RKBm zZK`-ri6+dT08;3XlSCEkGYb{AoFJ_An6I1E`qFJj6ZzsN53>WX8eW3@^78dy#vPQCS19a^?YI8Gg8w*{@&m&AceS$8(wKg$)Y9_vR>dlgO!HHKiM4Zc{WsP zbkK{aQP9Ddy9uwfU12Y1L>prE2%RtOSJ06NHt{>ydC=Ph>U2A+Bai#Sl0uphXOM4zKYYRLZLr3 z$3|6b(>#B;h>5FNBRKVM8`Z1Lcgs;u*WKmqA|n?gl543hdwezwiji&kNkCcUL|rJokX3lhbI26uGqF zoViZ4`~jeAiE2Z_hmNrkkkEr=q#$?yr$(dT-c;Jn#dQk^z6LQc@)#ACbVMwTXHh#qG%P2SH?KTWI^lLny2rD)Fg)2M2^TGWB}7?{ z17L-H=YN z+uK$GZj_eLV0{q!Syo*M&uBpu#{n;{aqgJMqI;cIJj@>{#+zOntn)2+f}(lvicp%N z+zJWClbUu@#)|?>xl+hxioDU00wR)yYVzkJ2?f-&ojub|d&1i4)S~aq%h=DXr0KS0 zlpkYIL^k@)VMY!EF^Fd*SBLi8vYh8%juhjmMSDg=6x^oqe1^a*ebIK{Wi2NrhvN!yeEC|OlCjvvl_O$M3jQX35%v=nTxToh5E3ySR zne&JpW^>mJ-{1~I1>rfbLfZY@J((0Kx3u^?7af~A$K>Ul=of+vN_ZA@t9@2?q8mjB z1omNkIeuT(hVlmc!YRNgU8X2;jiJ$>VLm*v|N4{!T_(Y-ZFMDDYjD-&G>SOB)?oCb>tj3MFZ!<_b1x<~^$ zkY&hDW{sr>`saG+k*Ze0mlC$Zw{b()^9k2_@vZz_EL8ZNj9V70SNcvu%6lv0dKihm z89QUPA$085XW-;jxf?69!zrN4c>)KFLqclq>rtD78hQ7JgpT*vcIn_m*bn~9;e(h|K zQ5fox`$~=8@%#KW_s%;!igz zYuPv3^`0ERAz|k@o~->*fY-~Fn zBQ|Vx4Y9o&B7)x`+vmfx79P%T1*6FmEHm&|m>uPF!CIhU<<^0nJ=Kg~t?a-p;Q&hh z4O!{UQ9CHHhOqv1h$Dz^75epAQoxxnT+ZrltHlC`YFN#5))HF%&Y84Z2K{s3+x0 zpK3qRkg4+A^Db+IK9yon7F*=Dn)1Y7>03#_wc3)9fJm;S8eiCwV*mn-K3{&SQ6KIj z70v{!v#t-N;KeYKgu@pd;K_FV8j(%?+NGB$^;MOav;@>UaAF?O8QH&$RkkTJposLSe+% z`^1}A#@VNa6Rw~(}b=DOQ%vnLSv*`27hR$j-g|1L5ro~eA#<< zu~aCbW%Z6aBg60F09WJgC(qhvqi*{9&DpkTW`;MBc<{+if-7h4iw69mW?*X~?u!mf z!EeZgy;&RD-?FBpF$P)f?!|eNk(#~}g~m&K2`I_d$cx|={H`YTNT@&EU7;Zl!E=3v zoE&Nb#4P;%L97(kyIYw5iGtL^AYnoFE>=NwgJ>S1_#~@jOc!jYgstu<39c11a}1h~ zFxrX^=jzk|-I~47$GU3yfSJ|g_|)q|D`{=T&d+|(QeNK>V5P=m$Hx;!x|cG` zmXS3$S}M39Ut5|+>?h>ZdA1cVuTe4hGq!4;ilQB3=nO3o6qpg=s-^j~X^84o@`DzU zQHXcLs6gMz>l=pjdZzgbEal{)A9jzbU)NOv>PupvNPWG!*H0J3s@T8OrvE-l{qrNN z5Gd7aNQ>9$E4GxC$2JS7-eSHRmQ#ma83k^C>G;L#YlxqVYY4gp7ghfC0Bp2da1k6% zR%?iD==&75G?2l6^h1}2Ujht^M+tJI&(h{=h=N5yPS_9d92NW)>?|Ivg4Lw7EPpO& zSR|T2Rs8qu_tp^0a(x=VCgQPQt3fit*>f-qM83~ezsy=lzYY>X+sh?Ok~F=j7+_3% z&5Ha-OOXjGUlbf$k0SZPrfUdihWHqP11#s43m}E-8#0dA@*>u zY9Cq2hQVxlK8C~O)|g(9(Yvxyd)H!FPVN3=1g{v>@K^(^Clb~Wnw8*4!Ep^p;G;Vh zn@>C*v-|sN*q0%y(t_GTbj`~h>~XS3ZgqDO?Ol%I?%Fk%%U-#=l~6NtERo;KRH)zi zs^*c41@h0|bQvL_z@TO1RrsS$DYdO!4%pa5=>{1d>#fefkGHq#W=>cR++e(X8C_OQ z5t**Lx=E{_b)wIlr0DaLzWw4r#$n8H0C5%r+Dua;Ao~r2=@_8g+}`ei;3@wm?XE}j z;z^ghhiBNt1a=Huupy2K=4z#3&ldzgMs4^~2VI7EeDDr~p|yj~u{1iAYr4Hx=UR%w zpU(!l=0Mtcs8-tVuC@M(%qQcz5I@RGA+D4e`*)>wZ*(RNIm=^-Sa>S~`kBFC71Ysg zC)Iu5$ZtcaT4)cMBZ^_#zd6t%Z~SJo^G#W{ALEVlMR}c-yZdG|F8ONd&mBhj5W<)^ zDrFO1JC#|EBA}@T=_BoT)BK{0&VzUEcGu`ov@vrp#M?XQHdG$d@##|8XV|)@SlU!r zfSXQgDjmZ@YGHh(VA>->ZCj%_)KI&IkmjRs&ZsCbRp*xIL{$!F4kUUW@1NI7`@Va) z$LZv;=LcR~5fOch8nlTFR^b}6f@afP?{odfyCpp=Xm6Hiq)4J=_KeL-TNBN_W`4W! z(}Qjs*tHwCHlT05+i}V@Lht6n^G`wxec;Y1@q4k7m~F(Pygz}sDc~LI9&l%3X$Ksj z2ntI~!E7Yfb?AI+qrSHQaTjD=Zv2d-LQTc&t{UW?XL_ERc=V0m29>hn4G-=zKPgGv+4be;D;Xfvb=Q z*jhs`Y~H|>>emZpbPAM4)%K32Elw(wc&$nY(>(Xutrtx+{R|%0+%1?B7Wh>Q z-R6sd1TNH>=*;?=4zZCjd=1#A&@-S~UN5|iXai{%FwjCCHnqpKs&LE~`9Moubl6;d z3s8+Bw2{Ovwr)qcWv;(^7s0EExmXZ$Xgu*ycA8F#+;iVV^(&8#weP&OU47?``}++N zOLF%o0*4G{LO2St#0?8gn6`M+Pz=|5e9e(8>EI8y;S^MYIg)A9^eZ6Nb|{EoEF9V^Z?25&7ogGK7nk#>YyQ&wXZu%1zt>L>0aT}_M2q=qCjWViRcOZCPJc?3X|UIzU;1p z6nCSw1UMYlD{WY360^(}`#dj`jD-H*zp3Gl2*%`8}B4PioJ`0*8y z03ctWg%~S0^V=^}R`oA+bU2!t_iJ5htmxO&-d+MHowDxz+TlSaD-Qi)HX@MOxoRO$f&DoNVjMo4k zVrAROC%wC$pGY*lu(@9VLeC{|2t)&TD@Ty%&Z`EcV|^1zLI*1X+dI63oy&jNb!J5o z6_{C3i-bB{c&1V{xxYF++KW88hsr!2I%LcL@cNPJ%U-f|=+yS;@-2ca1-|GwFLVhV zg%M#Pf2)tD_w_M{+HrACSXR(VGbl;fvJIx#$fY@n78^V77>w$ON#WZb>I7YqFppzx z$Z{>y{2kThB~ehqm*G~X)>%~q|5@;Mt|sixx_vBxik0LFjMJkUQ4y-IT+*iqQHRbNUO7W!yUrd#{vI=P%(X%(4ke5Wn_{@OB)m13(dU)Ql6pgi7N0*Xbmw@PDKj>D9(y$h?nLXzzK{_{np?uEUBk9 zdMu9sy~2%YW2ZFdvuh}Q^V_}HbrDa1+S#;$3s=Sn(RB_*m$jjNc> zxlX8J`|utEa=Z{D{^ji)5XDP$O9QC}Wd(T-IO*2~L{%ucF&Q1Nb*HWRvX7@PlV{qZ zEDt?R((mwN%z)w4&m$^d>b}?_f`&l0{S16-FXCSCe^78Ie^akBloE}eLr$PKPX*B1 zanTZ=X`^uFN~^(}y!`A!f{OXj)%#8~0;*}qUdK`4M75{A$Z++q2Gc+{+F)^@AtE@& z10{ly_ZTwtgR;5OYIEH%#P$mb2mt? zE>&Vbk@`_lx8sm%t^eh4CW3E|9sf%DWdKan)p#kWXw+&22(!WnUNWzWGG4xhs3@3K zXIeqXwuBh{w1Spzp<_Q683|NJ;mLO8eLW>V$7S?tm3NxiUSC3qy!XAqUeOEii(2NP zZQfMX)%0st|62NG-iZjr&4O}a{U5v(tT#ny2BXbO$Lv5ye}zh0Gs+)uQ6*EUjS8%x zFm2sk8B2ZTu=n}nucYPE>_$ZoIP`w&=VD)&yOLa8i#me}JoMPlP#59y)=X+!Nn zgXt}4GGSo#Wa!PYgfTm&#}s2c{WqU`Q&Z~0m@_bT z`czl@=!Ki5M~aNkJ>9&IpNww35*kLmoa9Q@vJH6+GP{QrT5b{|n=uls`*o(VE;Vov z6Ce4gl{t5!&@C@fjQQcxdksJ$aX7()UG3Sb!abAfAG9_`>R( zZCp1vj&qEw31M5)Z-bXTOA@EDrhxmyyacR5%|zfg5@M~P+GsQy)ayLd38?jZIxvn< zE|eN9$e$j%E8^)v3&|*P)Rq+-f8)v*`@KEve$W95!vJfgnZ{}?E-(dW@-;`Ic@L1V zD&jC&OFH&Ywc1A-yOA;8OF!~lQ8YX3eP2U`Kc9ok8C$Gh`|7bE+pYt+OO2W_TJW+u z-EsuK^RT_oI2#W0nwZl+U(;`-+F7mDF-FLTe_2}(NW?c24NO1({A1B6cGu8YfJ z>5ZM{A3mIPaj_74{%@)|LG~5!=&wlt0f6ilxEKkfw*920Q4(|pANT26_Zd*WD*^E6 zY_>nYOkrM}d&D~qq+0`B8EBJNDOzfFkYPTr#mvv8Li1sch0uz^e=9!$lsEbff)J9~ z70q$+#7iw>g@e-H>Id&)v9rC)NjC@{-uwIPdUews_v2^lgKl`aWBQCo8-g9uu5^g; zhBgUn07*0pP>3}sBtZItO|ZiRjisbm9(i+M|NJbsSCL$Ca8~sC1x)RE!ObG<3qJ-@ z_qQIzrSyM0bb4PZ+fSPClEHUfy3uh^NHB{NtVR9Wpmx9XF{nUYFZ|jkcg;CvsQvuv z^7WcbTl8f!q%{$0`yl0i$n8V!(;dMNhZb1eE0O|VNwqC3qA?4!6}CoBAfxH;A-yEE zD7D7SUygU8n6MAW9e5=v@9|4jz$NFv31&eiR8U)tqqB&(~{+ zQq`NJQq&3jul$S;KotlM&UTiC-RU$>a9+xjVC=68!+;LmA|^jOaqI}*n%kh3-n8{o@K667nvrVRaN?~OnB>tVlfbNQhoP?{Q&602p^dl39QA}E1&3`LPst{dEfnOw!UfX6$H3}X0f54Q zA1KO}T@RzEHe3l$wmOXC*n=p013R9bxf|{&uqTu^(W}bmR5*u;C;A$drtil30sK=2 zrd#r!aXoVh))Ou4Udr&Y5|S#aoL$lw?X$V2uJJ{kV`HvbPtt`x-)+e06~3@p&J+#c zomBSsKC*~eVDSasBa&O}kIQL06L))8d*5%=7^se|e!zY1Wq7`#s*!v-Osci|-1f~y zA3(_fL<@SS570?XXro+=of5qn6a6ge&?vUo|NJ2b4%J<b5Ff%IMh11yx5@a6%*kZ`Su*Oq$onHAzM@H%!O%#4w?9- z-K|B?*RQ?`9yXShxA+fMjR|k4!4l2{K%gNgE-GxE7fJrqT#_Uj($e)5vhpGCX*k;c z-T(S!IZYE?^S)&3FRmYR++GLm8&}$}*gUo#(2;dFY2{O($@|h_~XC9sS_K2jxG-`|RZaMVRD&L~muGlE z^n>kD1(6?bE1uQ9zi&&PHF@`7oIy|A#lBMOt!atjwyo+NCFQce7MFAQ8v&T5W;j9N zY~zE5(Lfl~NXZF)fM0O>4(7T~Is>Tf_CwTQ)-ms2hlb~Q5B&@H)kfMRGb!F6DKC-* zmg9!!YlwGLkVUBghqp+!*BYXCo@N(aRq;&naPa)4<{kd0&edsVM69rjeX&eh)BYmLb^f07C%SH*}g5 zS~ow-dlvkKM5cu5plK@X5MJ(2-Grz#tD!axL+_|noA3selIh;g+!=HZTtv6(^YxB$ z+V`p^z0IKZ!Scmrr7Iu$to!b~+2f_n)VdqwAov}O*N=?1H_eAreNHdVWQm$i1lBlbwIXR94 z(F+2Yhr|D3@6E%S&bDn)5ET_MBAtQ|l}@FJN~aJAii(I3JGvmG6p<#zXaN!+B#6?B zlpT~%5S6}{ULg=6KvW9pL4uG573mX!@RNkpUDnzAp8fXS=YD6ucfar4ch31o{367# zerwG&=a^%TG0wY|o&zzO>9RC<@KgU6kX}>cHvYgfWp<25iyA80aKXW2gdjRJ$XJgj za1m=j69|yR-skW(!#c}YmRvYvZb(kD$!R6-3M9>SROFK51(i)+3Ig*)+J4T6StmoH z^3YYV*xAOuqiHCD#Rh#SSxB;GdvuBS=LUpDS3iE-(7=qlQa^at&h=I1%xe_LMes04 zD{tCc6h`uR0S$ut(*a5j1xB__7*r%ahflpXi!{xJ!Td0}^+g}FCItRVCzmeGw}jnK z8Shwog0=M>OdE*Sxl%X)fd%>Ykfmh98V;M~?XmK#Y7XwjCK*7{T$ns`3bJhrgf8t~ z=1JVmPHs&jljrut1Hi~b9G0!kH ziDb*s@Zi>DA%F(u4IbVE^2G>bqs0#4+s(F22#b^_`ouf4Ok6J=_v3i{dPKqUfD{>+ z+ZNVR`9Xuz6R?EGlnTQViHdowdHyfP9%40tmE>;IWohSB=GyCbpX{F0KBBw&w5;&V zkIYJ1{3{5rb=YsAD83s)WZ(txp8h=EwUW`ehRAM9 z%T2nWoA(2F5o7rO*hL#CnWDsHOXR?+A&ls4BAa^MQkDl3SC#85NJ3Ku|eyTrdAem=)3Ic-&iMwbnw_WJ5PW*ww!AAJ6{(jQS_8`_AU&*I% z;sL+UZrVW*``T+GRLft5O>EoW?g8r2J7V)f4npRWMS0aTv49+rpF4=1jvpwt|{Y9H&aXBf3aUkUL7#1-tGP9PH2hAlz;BLk#-u;j(mb0sub>r z5`U=5V^EwdY~}BoR)ZvIk=ZD@kFJgk6rFSA))(QdyxTNkeC&+!7>>G@xT?)w;q!y3 z7HfkBhq${FX7{s`bkRzq?g5y**dRl*X_@6q;D|>4DHMJQ0Rm#v0SaKWtbt&{0LY&6 zlDt{+o~=aIAz2MBz-9e66UUATVw>k=CMBj_)wu%k3x_dJ2j@Nl&1x-`XL!zKaE>58 zmOYA{V2!jcW_)fj0w4fZ9~6!{o0E9hwo^Dw&dKYpP`(Y3Zt}MF4(6< z_E|B!!PA3P>(9*YlKSof+@6^~HYj*f&qguCO38*>q9Y$A9TX|VwV(QmDc5 z5ajd3@Y2gZ?N~IByNv01Cj}b-?#}1c9O+_3git&A*am}Pj%<`~dcZE&46exCDEQlv z>uysOlR6*8v)@ClZ-TzewjYfoL$KJ{biRGr5XdV1@%;DWJbFBm!a=Br_olno^5+sI znx0<^!b7-o3hjB0B~YMyuh=#lrxv`JovP-UGn*Q+-DQyF{54}NE!=W=SU(c>itft> zd>8r^Y!qF-QChsWyrEYD2i1E+Y&6n*Q(pK=R$(XJ|BFLlLBNv4xy}ox*7nTmOG(iM zTf9HsC@VZjSaUPwi@sl^Rl<+*Rt5sV*6llwuYr%6kK&)QLbVj;z5aHAWaaUEQv0|E zou%p&+j_G54Knvk_Oa}1F4_kpDx{u*Tj1Rf%EcHk-c3zy1)fY`wY@v}i)vW7Eid_A z1*BKYYxnb&qV2BGbsP>3loN(2~%fZ;-iv z>01Ckc-dHjo_nWta?-a@O$_ zvL>L~`0#7FaHc~N!oDY~HTOx5pJabsT{2@i;?ZdjzqAu-PEV9epGcByH1xzOojkji z&f!GK24xX~(gY`~K#fz)6sX8QXQi>A)yzebqKv z=+m#gk9!xfic`Wea@5pOP&-AUC_nmmO(yhXlTo+^F9G=Dfv~f)Z$$ z%f5pQ97At?WPN!_VkQWQ55leZwxp(p=*)Uq4ku3!8&3IV`!C;ovL@&8OIePrc(a^s z_Yc|KHG9F6pg{yi*zr`5ZEk|Xd9Lv$Rx*dX7DSGEKNJD=$HrUrQF0EJCwA4=BpkeS^fYY3l~)VY{EWqiAGT>86wFP}@b%jyCjBP3cvF-r zu-Afv1GNRpI`i;U*F#X3{vm}OprOKxysR^FKe$Z@4R3^KVcdo=ERRP~qpRa&zK?*7 zhNgcSL}TN2aZ-wCmxSWn zrGgwD#f$JA1MYsk!W;o23V-by+YOx1%j~d!;Ccz5Y+3Xs9bcBE~-_ z*9+oP2-r4_JJUI-ObT!&pQ+7h)mDYCwYDJEY2eO8Z+%xuG;Tw!Asa%vyR%Oncq;>7 zKI670S8iUv^x@1>B<1>bNBQ6Pee`JGE?iQuGf`}w(mrRqS2n>d`>gZW{cWBa_AY1F zX>ayhh50LQ4}8kM7nQfZvQh}T@0osV;h_okvbR84iu8}OM-Amki}?J{K!0rA*oX2A{I)PL-UBT@A|rw*bq! zNd{b~EkzQ!XueDhh{lO^sY%n3p!(vXF_rUVq45=muoV%3q#D!Sav^1W|I^KUSKL9!IY@=_B_C|jlkhxuRR<`kfFCwoZxxt#@*s+h8ShBJfBnU}k51nwUuFf|4F zZ}%jPSJqbEH~Jt0xG5VzQl_sDBB42II+?Wp52?O+Vaik3lyxNP0YJvJWW~AZ5;<^< z`q+-eN~+=UyJbEfYTWk&^#O9@jqoZ|OD9XlcEE%U?vQeHT-XOeX=3)IMMp#4?M72# z=TRuF(FE8{?>;tLsTW1o{;F!NA|R*cHiZ@qO>~rwd|mb`GfC5F%$8nO7P*3Vbt6zX z&&=aRASgBIz~ZfUK}%zfcUh`n;5T5rS+b8$h$OEk?dhxam+T+7I%~S3XD2YteZEZU z^7~94gO$dsFfe(NCboFiyBrn1e&Biwa`aCqmnQ=JZ`)P;g z5VIIEn>tx5I(leiGmb77aDC@bG7h-wkq;Hu0DVy8-$P=XLA*hWl~O^^JWe7l4&(dI z92Qi-qDOTEsjVxB7KvJ~4js@y&2c0diPv!GpfR4nZ`3D9OM|lS( ziLOyZ#&i}AuvW^F@@2lUCGt-IE%VX@THR_H4m)+M{J?XSn?A8PZ!Z-biC1NFUz zASz~y-syjbO*sD{WddIrfs1CTO@Bx^mVK|GEQS6dHJ49@4Sw4u*8Ge58M%tmq)z!X zP6DCiG8}**%P+{PgA0W2M2W*kw&d+bilp>sndRQE_#e;?U@}y|z(qQ6JGyh?61}c- z;f2Hu#p$Oj5Ggi48nOgYh-dc#wi=2R{wzWWYw~>ZBJ$oeu4IEqTe(s}M9%5GkL~!j z6iUA8A!yhwvn&MaD)FGAlEjwDQn-!){{iwkH-UBVuau@%_P0H1U=1#APb!s8bmH;hUq+1$Z4R5baJV7tp^`-50mL>>kdys8CiC*EH4 zEg&)+>Rr zOJ8MutzpARcKlRE3RzjOI@2WO+w5-SA&mt!B-x;bHlSSv__Buwu9~(IDzRFC6jKdy z4DeD^l;0mxwXvZ4t#8KHMybL0T(KKOjR1@WOmb`D+@=7< zXb0-1uND{J@-qwJXEuyhmp*UQu4@IsBQg9mYzP5a&?TqTIms&-;K-mBPmyi{K~S_X z0`S<@^KB(>VGQhwtqIs~z@0mX@~wPOyhS8=OKFC!Twqr@OHnYOmXD`2y&&s$S;L8J z$p#sbDY3}8qDx}%m7*v%EwbgzxbXB5n*+iqcMvoRhrJbjG56F&U=VUoF(ZcsMcOK6 z9B>RdrK-NBM@>pnRYr0E4QTgn(xH6l_R$vtSp3?jB+F-BlQk-J?;B0h&xIFis5)fw zEN9O9*8uJG*bhIw6loNuCsOxr_5u;C3_3{X&|4^x)GG9JG+4H?(6Q%7zD|VlY~2$>O;FU%&1fP;%=I*A0j15NBPiM zAzb*(ExzsRKcqAp<%kbDTdX$;avu|d_{VO@O^&J#x@@J_h&6bL+Pa?lEsfh7{Y_TR zwq2J&-WVOSdY>NZ6B;Bviy96y7rx<-3md^HTrcOJR3a}oBcu-N**RpFN4WWu3B&h( zO|4*F8RWq#O8Vlj`E04sk@5!*-y1++6O^}~KzRI#K3z@(G1G-Q1Gj+>1WR3nsFGu4 zn*sY82C9J`xdbT*sO5X-zMFQkVkPafiGN5H&K-LSn|vRMiUk>;CD(5Htq72eaTHwg z5LWmNvf{h8Hm+THY6>_G4}~`czD#H1F3r4kvyD5TEA36+2=vQQeKO0%^?Bu3h1uZCyiC&~h;8sbDGvcz)RX;HhSk2*D8P4U4V%K}_(DKyZ#f z=09h*)vaXasZLKlC&Ga~B`VoqXYW;{!)|PBii_8_zTmIBYI;p(JkHdLoSDn=)zKZj zR*_LsurDIXIEL3sQ@IuPwCL^SaB9WzyNq(X}OY1Upxr`lQ9qx zIYZR%$BpiTo3EG0mL&04q)1=~$=b6BPqJ^1Cz24koD?eJU z+RRq;AGkVpDcC3=B+Lxb*BPX^BlWMhKxN2cSdRxmwNJlUv?>y5HP z#zdAj%2KFfVD+D&q>+~3#Dbn+RG8Qo^5$o?SZ@)0q;8?f<9TzpHTAoYhjS(+Dg$DF zFbvw>D%tSxq`>J(U}%nprRPA59xXRVJBTcP0r<^Ts; zXs5uStk%&TgC6!C<^yFVNEBy9{X+lXiEb@=IsTkW=75GZ%VSu5Co(O1B`M>zY!6U< zv))P6<1ug1^aRFSdVCOlD+FU=Hl_0@>xo&%s52H`wa)^C-6J2ctT)cuXABNV8z{EQ z_ZPS8lziNLfnl&8+=_p)cYX(siENP;v69D}9^})7MGztH!U&WnJS$MC3>;JKIrX*} zBQ_G$e~l8g)BtXQ%gdLVvI5^&v1R@nZ&px*x|=|=LHh;9f2rcM>=Z$)=pdYhz#&{C zwUR=U0aEf|UqFJndD_HL3skY{G-dn^waH9bDBb6x5@xaCecH{BjAyvoGmfQ@!Y;fNdsPWPsX&TD&OvT}gVy7sOU92lgE! z8l$Y-UjdyZcSWwe6p;#`P6|x&md+1yy(KXbl0C#q$Sjn7YJ}tw? zorRJFS}kfftH+W&95YdOD6$JlP6JqVv9rMB>&~9yAkPE3nL^kmo#}y29zvy|xXvp+ zuhDg;uHpUT>;OQrG8e16^H3Tf69T(B>ft0?)g%;YDhO%mP>qvl6R+US@M?HDW(Z1^ zgbN%i(RAF!{q(XYyIbs7=;{B>zmoRQjYjc#+}_d}i7H2c!}g|{2Ny|CvX>77>Bn|E zu^M=0y01j1!F3VQOW4%X@-z)AsX@TiW`bcH_sLJaPqfix)52Hu4gE7?i_ z_9qGiKa(Np#V+TN@HX%iI3KUkUy>}|GTSB;evz}(M8kNq@=H43}7#LuS)&e2& zC!a4TtlJT<>7M(z=ir!2h>x?kjRs`~B)DtlSJE0)fPB4D>U0tpt0Wm=SKTWJHcEa9 z>2XM^b_ zCn=m_wayq)IC$$b5F?k|36}|qaFuL9$&x`Sw|Y9ron)V@9_Ix&KGSzOwFP4yYw8{( ziYmu>84g`-OSijM!kcv3;*AyIH36kf3 z($QnL%y%D(lu?cJP%cJlEBNO|{T`Io-2Dt~+7R(RHb zPg{=q@jU$pIbO=RlJ0y_OCV(|(L%`Ue+^Uj`damAz*6pVWKFJH^> zUR~>2yR=jZ^dK@aiZ{$(81cMdzuk;~G+Dc%?s&rQ@ANTB`j>EL7#Anbm&LU>=N*sU zuLTX{(RtNGv9DftWY4>^5A4l)%orETxBh-?qwbcZ3r_mVm=w5t>@z24 z++dm4jlAPYYNyX;oU9Hwxi4?s@w1OUWho7VrWXBj*`4L&*K%jGHUrXLg~_zogy8nApDz@@y;y_`NwK#eJv3P5+*}7LNi^vY_nla;X}jewmr53C zo7Qx0+giQ5x6Qj;xpJfD?zjF2yIXjLJ?C7`o&I94=e_cnpL$2I9mefk&u=@>u950f znrp4_;5UEo`Ry)RCd|`!`}@XhRg+4N8z)xvo9YC0f1y{Q#fns+CSZmpL-rIu*iaUi z+5%TLeOaB}cECT?OHGh7Qc;-gvLrdckC?s1i#V?CV^ed69Z;DQ=fCFH+Z~VS!|>;+ zbYC>2#{?Wb-GF2sY-n7F2U){#D-3THCwP1>Nw<0|uGnZy!OLGbYA!hTzLwYSX)QE$ zZg^2M;9c3kDhXC9E<7(YjI`y;aan z6@#J^frE&nE0mU%HG=k16J--pgG*3@zAc^W*BLa`2W-qD+wSRLwvuOn z+Q)gE>Y&=^#f93i?21Xn`jPrmoNZJM)D?Clwu&7+L)#`gOxgia1h*M3sy|~V!U)Nr zi#)~tP8V=u57H)xI>8@nh%d&D1&8O`nF?AdA2^>hoJealznV_l9DT{wSTeiI#u^j$_@n^ z1Zn>YmAp!<*oSm2BB?=IfaRcDG}fN((78Q;vXQI?8NbHF-kn|eUTv8!SEf8lbUofv zN;($keFLnP%EPHGUJiJ(l7ejl#bWiUVC{&iQYGGIq~42u^U^c*XrUGzQF<=hPMyffSP@7j%# z;*wxL_5lN~3U%^e3_=urJy{c4;#qwj%>u%<5?NqP;~C^&gBicP4sl5Hewu&dt8%`To0Yw_Eg$m)O?p?IzjSxL#yy~PD4^F1k&1;+aN zdXK|50zcc0pAac^$v*wxj{$#i6)UR?e-WTtDv%dx2X>YR5{v1IS{)OkctGUIOh zJ)-`aw9CYRUq6oBx#3n!x^D8*b^(^s%rcuW3Jxqkfw_eAfAv%URn!TX)=Bw}TS}TdlyXrbwCMHIw{$zfw{iO<4Z98EIiA0~4i5 z`*;7#96jX-0BZXT@VCIbB|5gsn0smmUHUvvoG!p1`vSUu19F&U&*8D#G97Mr03zeT=Kdzy(i(@o zZ+5%h&coE4FY0X_GhTo0arTQ3;5OKP{p^FpBDA~4*pk3_Z{)kmWpRsTesm|E9c1(s_-yuV?eEj1NXK#tLat1t2po zq%>JkzLradsL=Jsb(94pzz5q3m7R}Yf4aN#g7iJ&c8Sbai_@}S5Mm`e$$Hcea!%7J zgk;MkN`vMvC~G!{uPveZ0HYqTbTK&F5iv*QW{gB0-IDt{m7v_7e}eT;>d#K$3BD{aL+%^~o=kQ>=o>4ENMmlg z93c|Ec~QMg9rET*c}&(=%fUAz{rTP+ zdo)LT+rXl&4^o;;8dr-WR;n9iBqNgj0^imWbKcza{8P{}9hC#a$kl9Fck#i#X&QnD z|eW zxs?*~zDZP-YqZ!$;2`va#!E;4kP3j(+u*XMjuyiw1jp+Hp{|;{`vI;t5j+h7+ z#*}gKfedfg-$nv3Bbf!oY|nse(t~8c=RH<-i(Q`F_AABTlAQ#Hqx|6aglU@9+z!h& ztP05$cO!QK<3jg0Ov$gYFwA%upla^wQmeY(q;4*5v7Sg`TfFOe|7jZ#e#l}1_b@^W zTNZlAU1~vX^dp<_QFl!eJ7ilml?A#P_#(fs*<^}B%pAI}O7!wm?W@|yFgwTYy6p0b zlW~*MVgWHrBL4=WfFAu;D7F<|PM9eZotCtPg(RdI7ZV+tymk(d4rd$e?kyAIQwnm3 z?cAkAqoDkmQk|F{m@@Hpt82=fRr}lg3$_PqY+$b_oD2Zz+jmMyUXu?(iGpy6DzlBs zk{#1)#mc)u4sF&Z5YX4J80yIKughpG@n50lCPvs{0s1_n^1{w)w0unChJ2O1d`b1M zTTGt9i-V-aUy1V)`THw($RMp@q7;G&cd<1p871Pw0!lm*SHk*A-Q2hd<9|IMbN4`5 z^{^7zWdIMd?NY?3w^YTL0}b_zDFCz2fzqDsSH1%ZWo^aC1oOq z_C!-VQfAsgbg)`Zy?uS!v6$rn-g}q0!gu8tn)z?KE434}H+*__IaogGOr*XIyyG&K zAFw1=T$XZyB~faA+so9s1SRub_MuPxLak&)UElw}c;WR&X^}{T5-aoR(_?%GuJ3EY z>PfF%14u1k7ij^D6k8TTb@NVrNOZoxrbc-}^Sddp#-5-O*r~dH-58zlSJOceW-NOL z6PN1+>z=~*OGe>*NAwWT!O-Dq=mTH`PTP}zj7QjL7YvUg?}2oMgEr88&I0Y@pc#f- zeZ#xJZ8A)wr9=2ce28a-z;vZe%RyIv>N{)MCne78MR8d0sO6{+fFehkDH2^G9ffe& z4%gD`7_|bgTiFw4b@VzWD&rm^+#nkw4irk*fBkfoD0~fpa5%88FSTbp?=F!KArr-ERVe-shdl zkG8rQ7#hp~FAyo~n=7EWg=#Dbl#qNa0$Ixr*R;Ytt z3s!17&?I&N<~i*hzG!|~8!Tqj0vP+?Z5g#HE}1)i47atw`% zVWW5rVmWVixwbS3hp#{`h2TnUdg_zj3 zf0CjYjFq3V*0Z3-r*)KZ!r2?3b4_Vy%HuvTgeOvudBiQ*k9(pYv!uVmt6|gCdbZW!otNGvUh1RJiY3MZz^9R%-y?8XDmDLTCJL@1Jgr@DwKl%7Dw|TM9FX2!dlfj zmppvFUtLOG?r0==;|EGp1!Z+rTOB`_DgrRA3VaO#p`A9x^^A?9A$;XKiaQbM_+ZM~ zyN%mkrn%)j*aJKh*B>A}?sz%c4iE~x>3O~?cW%0g@5*JrMns#XWu!9LF%}!3aK=6y zCVP3HL}(luoR2npR`7AJeMm@fM0jwpaq;Qb$@>#n>kP9Jwnkc|MIv3tm|(B#38{Qj zHiN>F<+|RsHXt6D>ImvVdS=^3=spQHAfF2$YxX~UJ3!lCqD|4Z7>^CsK)BX;8P0jX zeY$aOC{cI~@)7M4Z|3_>FYuD!a-uvjp{xmUW6of+Xv>c z+XbVM#ayU5An+W* zx_h?pJ>5q!?;jiv#09QSmT#rK5jZN+e%8`D&?P%RB)Bb+XF+ly0!wG83Ai730mBFS zMbtK2?)OD?#BEmV=gvELujLuS=VV4$L~3PddC(MDathC}Szf+|JF!b0-GLIsq=-UsYIk@zdEA z{m0%@i#&<#t*)sTbu*?tB#Lnc_{`ADsZRAzQv4#VpRE9J`p>vSKx>lUqc%$rP#JG9 z*0gLWwk3_7K#~FWlAipV$vKnDF%88K>;f&el@DdT&fHhuXE0U|g+6EEH50?1o=RE5 zJa@QZQi*Z+>CbE<(*gxRZH^OYs;!X~xQdn123x5r*}%bjPJS4|p5^DX)pN!n^ja#O zKhB_T^iE>}hvQ$MW_`v}qBoirswe30lXXq|cH>&j&$fBPk)Z3bWg){l8t!QbiyMLxRpJwqWrDrnj_zc>qf59`5QCF4XQWB9hh4ExR z5L6E&24@9qc^mc`u0d|0@Clv3%3eck+b!8QWf7YMT0IHYDs|Nje3v4Dw`em6T;|Im zsVk35h?FJ^C#w9T(H%X%<8)ICi2(d*S)X-*;hXQL66RFkO&~|YFG@P8{8RK>s7mxq zwCFII1&mrR3drabzN_>>>L@$HP=DAnR8xIIllE+HUIrAlnc|=)zfn#RQ+J@|R zED|gAZnfXD>VfK)S7Nxc_|Mf+D{}P8#M9-p7f-U?2qo7HKKmsU7b*Xa*qEe#;KXyA z(vvE7r$6XyT~#G59stXP^OA`UE__v(eW>qS=idNSLzP^aWy z!|C)&aO|*hlJ}_9jVh$Ie5V|qp?5QComlZH1Y^{dC6<}Y*tRUk+~*E&EjB2vo|@4+ z*|pGKa@OyXYkelut{0#EsPdTahF=n-za>wDp5f{qNEs;cbyKSbXAS{miK0?X*Oc^U zY71ZqrnG&r*Q-D=CYCh4Z zGQNPm`C#KpZOGCZ4+9>3ui%zChsGe2?jwgF+fw}Wkr=>O>(Sc2N8Ro zpII6D%rgK(F3a=4nXzt`1)4p%kq;VskIV~Jr(z;BV+%e^EQ@lSuvn*Xu80J^#xWnR(#m zC*nIu)bu3lLq(w^?U+uH?fF6a6+}YaU`+8*^2K@qHIil?B9LoRnS7~j(um;K#KrH5 zAwId2U;MSm2L6c3_oKIchDyN04Cb5P`rM#u4jR9CZGT85!DYwq*~@qy_ASB0}?ssz;-b#^W;h5-or=*hM4znjb$2H?DAH1GNIgzj<;mSR| zmoFZV_aZcRC(S(We3WkOkVsUIImlkWQBT3=ksyw&x9&m}W%bjAQmg4xgKMRM-#} zvc`ZBgDM2c_e|F|2#khMY{nsw7U#>fz{e3>5yY13f z-QTeNkNmrvxp})^9e2JC1vuxdKbcai!~!fsXz0vv-TQFCLX|x%1g^;PIh@@XLb}%;{TyZ6*CfHrK!XqU<|R zVNoujhdg+rwShde21$+oS<(=`hkCldDjZzG)=Ot!d8gL(ec@j#=L-8uKcS|K${M?G z=%PaRyq5ny3%t=|Y4(Ykmgd~M@>hM_4-K?%3&#$;tNyZ75SQYf&{VewXBD`=|9MK! z`&*+n-=VqDh*v=~&Q&%qBG{{}#t&R@Wm^8xIrII4K)U_JO3#bSQQw6sVQ zQgo{sB%%10jLri0cMn@le|&qfS>Uf%w)z}A*>k5M@zLc+S-anAO=}-Za#nCw z07lkQg$Y?33bUl0ZKS03Nxfz&FJH3PeqgV2IVE1F!r}Rem1rHMETsqFwF-N`2PfZZ zSJ@-#7zKC#8n9(PQ-@(fb4XPbBvz{jUk>}Fy|3gH1dmv@VuBhq1S&b)jKCuTCm=H7 zHW4OyLdkpcQ`+RaUbnty4u)5lF5SMWrv}qJ+gq;*q{!;Sl%`r{v%*Irjtdtn6$zUl z0>7D0GbgXArXM&KoG07S@MuX>P2d_uv)UiMs>t5__UN{4AvxJQ;=al(WC>S+%;_lk zEl@YLY_J0{V`C6i0TPWFHRQd`!_n0m_h3IvjhdRJI$h~o0HQgqK;Jga{7z7Z6L+Yr z_rwdewxU%Y9>5K*D&@t2NOIP{YN`I)=j0#okAH9{{{27jmlkjTKXnVPY8c5WSt;pA zTBMoZbgh}kc(N|EJ@oj<`q(W7%?B-6n7=-l@5m%G3IUcBJ};BN;5@ZEWKmVS9| zcwQp623&nD1GB!)kdI*RTw9~`i&ys<*2(;EUPwaYW@2iK*~A{i4cYiAxGD)wkNGrn zvofm3u9GEo8a8|KZ}b@co}&$WLJy#S*Gn^Bxfd29`Bi+(2L8{j#=n2v{-eK>{QD1q zn}5!x@(1&tn>cnkQ3Q5s6G+O|TJOA|`0R%(Y}6gG3BU1nKFy}-BBdCpMX$}_qaAym zr>>kzWu-DZ?`WL&ckjbX|F^B(m6_Aq1bw-0NAkb8Vb84A$YwK$7NX&q`OoYIj5qYw5 zYZW2RTB(QTnm63OD|&4>4)0z{p)zb8WmWz)}4Gvy$%wXT3aeH-*0B?AHq7~r2AHG%>!-49K5r6`h> zM?Q#8LzLVhiy{Yt^(!8Aoqs0$f!|?o-y+W=6DEp|GLht;!0F7PS688`u;MM@&F~G? zo<}{2<3d~L7H=*JLo@F95}*hfpQ91t*z%^n$p*W6cXp9HgE^kNx#r1Io&}EG+kB)X z8GirfD*ro2S^hHwPifJ9a2-PL@vh7Gm2+vScZtLdp{IX^i^&VLd8fyZ{E>iJ%Vqh^Cw9nM-Chg?nwLL$! z`uGK-HWOo(&^=d=^?zVC$8Bjr+tO27ERqi{ZBeEhnL31K=(nF3E6K8@AMV>2^)AuJ3 zHe~>66O5R3roSvY=QqWXk1J_C*j=n`zNuHwSmw+2nEcNR_Pvi-!T`&%e1@b^B~4fozv9ziM!!h-MloLzlCo*Q*K`LW_LrAXPMCeric(*U_! z3m~T^L$WEk9YEb}=&ewyuspVvt;K{wFWV;)#6i_^J8YS z!u`qDL^w8AFc!$%6;Ito+ktiO9qpJmIR)cqO_=^ z`1bH+HOf_#7)f6Jht#+!h@b!c&Hud`|5xt~_#L1+E8emsya7ts)8Mo;v-x)D+w6)j z1%k8l%~S;%g|Ed!#Ti-a5z9^rtgGr4*-ZKt$a;1d-3UB#xd(sh(djnPH%WQtVHq2$ zTYum}mGSp=(vpXd{ws&Bf9Z*U(sV{-C6-^w11fs(^%TG4JCapUH^29MpGgBo)C5aEc0=Wf7qtf+^=Dp z=@+y7>P(|EA7_1CcFt(b$eF-+We_xSsksw%5!u&SZXeuOw(=!_rGCbPHQc&=U)%j% zI#SrDuoj^2PZcXOTfigk9Gd6Kwsp#hw{$fg8WE$RH*`58B&rM=KFznPxqp?U$V*lM z3gC=<|4~hWQtiXN4UK}HI~j9g|V=n zfDLhgf$3Uo&|*-lO0$5-sdWKPS@@)IA}l&@dfYTel9BI@UNiFeb8*k`h=-#R|DC`B zsQ3bWw#fv=0bL#7Dm|wc=Jo>q@4!4bT*8LexZXl>sMrd- zHu$hC&o^4^F@}~AZ+pQnku-3rt8(v2^!v30zR5^^j+}OnBDUYeb4No>9ZMlZ*p-R2UT4mYAcLV3KZ$ z4{`1CV!L_5&h{_d|9Q{?Igj{It^bl1e@XZ7&Y;tw1{^X{KLK^=G)?c z?MHvgv#fxDn_(GABgImhWj~&g^mhbk0b7AU)JReQf4WV98FFA#zE5ld6$v&>2}=cK z2}l^{twgPryX`0bmuFlDcI<$8Dp~J`%|*+o=KZU)+E9E>LqX5OGr#3L2K@o0tA+#D zVFL;OQaW0zgSY)vOoDEU48U$f(LpzrWz0Rg>{!-9y$zS9Q9#qtTy6H(wD(D{II(4x zV|eb{0{^oP8^fa0I@O+(c)1NYe|IPiw1FFgp9hWpt6(cq`KMu!F5xwsl`f2>@kap7 zm;wjyywzlG^9a#_!H2w$JQ!}GfaxvH$VQA`*M$qrs}s-tZm$1%%5vrTXP=jrr=pc) z7Qg+eB*dTvi;T&gD8)ttk`Xi`P~?;`P-@~Ukj_d0=#V|+QQ~PD&vBY&Q0PiUbTAA$ zMmrqtZ}cqZdvqod8Bgyj{gfgE^L`KBT1tf{{ZCLzdft2njCX0z9|wAgolbv9^}^%w zWjOyCFsFa;JN%_J{WqYz4Acm=Ka29I`G*XQ$g&E^1egjAM50_#K68+fxp<3hGrSTt zJ;OyQlG&)rFNr)bBPa*ibJ?5ymPYmJ?k(95ETE-`6vzIm_BU<6HCqUOnA*|+W0H-k1|%A8eyWYe58LFq_1_H}5BWE$JvB{Vd>m91{l)n4;H{JE<}zmfgNUmk{R|^EMc+ z2xWd*S7z#gTI*-N+;}Uub5m^2B%lyC9Hu%f?S-u1^#u zRYTiE3Vtf^4+x$c*iV$DI=7XtfPY`M&^fzl_{Lsdx+{}X*Azfmgh6Tsd4rOA>b(-N$#iAP{kTaq61=t05Q`k zkm0yaM{|=Sz2tkDNkHun3Hds8k-|&G#{3QG?&E#PpYh1c5@cUiw?Q_XpBg}hLDA%8 z8_B@H>_UY4(HFl?`~>d>i0Qmg#3zWA+J$TJN02@0rq2 z5?8}($_7|tQmWP7H!aF3H&i-$+t5?q^+eb*EE*Fd0&%z-frenH5*!J!swLq2@~_W~ zPpgW2C)m{pM%f3ClPfWRVD*u#HAvo$#XTT-C%(Qe4&~dDOkTc-8E1w)el}2Ay#PG9 zcF1R)Nc6ET_Kq2eLQ2VloqGxh+-eUw$*H%@D!8oC*v&Dh0m7bG8hwyHT~g3%9CsTJY2B_WY6B%wCvDBp7v% zqC>i<8BdY9gh}TIQpGCzJnW3EfclyugOn-#C>ge1+b#AGC}LudbO*z?24HJ^n`X!i zMon$tT6a^2233ajj=pC#GJ!0A)rz}uPF=8mz?U!@6swd;)+Y)ECF_SpHbAm|RpWjL zE2Kkd7rRf#BVu?uy+NkRdMHZx;D_ILx=bG3Gm;0=Bh~KR>rd~9Z z0u^BfR)H2#3f0^2YvT-jPJHQEoWL*IBOOcpYERZKtK{+YOH|J#u+P0{0LO_lcaL2) zC*D3TIzsNIz$KdG7J3DshKdf6-(%M|nvzaIo?vyyQkm_ja$+)%8B0?iGxTMsnYUOO zwKGR`Ue)y@O?h~PeT_gfxqyh}EHYz#tNVIIsFu;4wYX3}nQ^QJ1@zZAHCQnDttr+9 z=PRmR(g|NBK}gx5;#TMnPoMx}ChQ3+b#wTN3bgo6hJlYQvIhlJX|QK9V;G zWd8?yZypY1|M!jSQmLe=Bw44TC|irA&`jk*5>p8=F_pv+Q`x4B84*I7tBosS2&vFy zozi5oUtN;y8D+L8%WzIH&dfP;{XTu~@9+8D`*GjTaX-J~d5-7)M@N=(X3pjFdB0!p z*V4rJ<&98|=~v)X;TCfWwsZ_AqK{DD4_m+^Lc*YH8^%taR0Ic!Xc-Da7^ICBO38H+ zd2ctx6u6*1&pum{AEMnil37sPZjn-Bwrt9E;u60%jM8^&u>WwGgAxrWe-K?YKsRd$ zoh4|2f9&zV5t~mR)M>I6m$cBX9i_hQLKeL2E2-+UKRwTE@DLuSZUE(pCY)5*{eYeR zIA$vA-%@ux(Q*Bcb;|SCfo-^jR000-JGD2vQ~atFyfIL7 z@iRD8&M6!sk6ZGZdtHjvQWzt9eP%&#s212Qd`54%9QCUHwve;rgKx=|8nXcR_v|nc za84n=+z0WJkwD%0g|!ebb#`dNdM`>#;w)AHD|EQx4_h~08#bK*0F%kvvNMQ9*GbHJ zK!`&p)@DXfetnN%u%h{<#(w{0FQ9?zQG4+2*DqDnUnxZJ*~VPgd(|Dj+@(l7>Z1j0 z7T4}iYtZNL3%p6ISh37Ol(4w{b#rD?2~^fy{p;QRhXT`9A6aU0s}zjNe}L1Suvsxk zfV}9bSjde5w&RKzWVI$=rODwjvaA$8|L*ZC}9zW&UYa)ec#?E1-RN;o2{VI>S(-znPGvJ&_;c5w!k z6-&2(8faRcfH4T=t>v5{4Hh;r76VH^Mgu6JSJd#-gB9lRUFOGeebwGs9*UeP+kdO4 zm1-q1&sn3X{UymB`^XAVU2NJn=d3yM$&Affb#TGo8oMaPo4U3Z5Q{n&|FicErX^V9nWR(GCXfB;@jLwGnX zgb9epP^9MMJwA4@&>aqqk_-qKjkv_`)Q(z$Uf!5uRi_Y#B7zV)jHNlJ4A1>>%Y+Lj z_yt8MV~i?aSzFc@+yZzzi*-Vto&LQ&a(0 z4P#gG-rg_jvEQ3Ae_9DS{qThap(CM(QFLIn###Ec9yEE0e{lGDmocw`7R_cb8c`|3 zrU|OoHa^d=VlE(CNPh!9f75Gs-|Y(OVLJ*wSsbppzrd`fqs)8T4!6~|@ic!P3zHjI z1~$Ul%{L#t&hn*}g7Dk-m!ss>iQ7N=5Gb1@Q8m3Q?24Wplsuc#j>m$11Kx=~!UoMs zf;)1cw(eeWqO-h~YC+^)v~dmHO;tyJ=^I`H42?i3qTquMe15ok!5r9i+x}8Awvz+y z+9V#{4y=*3Kq2vV9M~pBaTq`f0FVaF^uAKsc$)-R#&*6}`O!dYu;ZMj4Ms>M;A8#{ zwt;7iC7e*wRbQJ{dJ0Cjj}`{ZX2mHP)$}19flacdm}Tceb96(qhdgehv&}>q2O`HKi(su1!m?dL%&F$cU%bcXJYV?}1={Ws@ zuqS?-%CZB$s+0qNG8%N`FQs&FlO67ajcQAyWRB#_iB@1(AB0NNPHEt*LVoQlVui&B zRj$$m+VAas_#_SZVem#4p0T>2B-BQs-Zx7W#eRxoZ_!M(N*|KPc=tUWQG&T7*c@!$ zYuXjdtYrHsco+-Lo~1kkD3cDrW{|T#qlvmhUPdGO5soJ|WXs$(eYVBC61=ju(4tsW zLVdNbtaDV#*4e}?nxgo*MFQ@@RK0X zu$dWdh5q>fs9%&$IcJa3KG;c}iK92~ea?1-Rru?;M8C>T(_;HKdN+IqQ{TBM(YB)@ zG7Y)IgAK6q`{-7j`~Ws^UP&<=tZZ?jz7B*~5RO*#jP@}j#TJL_tGmnZd5Jvgs;e`F zJlvI>5g%q?hS`I&KOFujg4C2~C^QsENSPsM!p@3i7U1;LCw<#mNxGC%Al?%Y{YmgV@-kZqLBPFqh+^~r2Fv_o$l`eSB)_&^GTmaqMYZG^=B?Y9=H~2M z1#BHGl=oeVG7Mox*pg!!vKQx7z4Q01tzgwI-;l6oFRB9#Tc{u|9KMJHum5t*@}uwg zJmY%368WNZR%JSrVSu;$O^LSfZIV%G-6sxD+t+@+G5 zftK{p!hndV5?8B&7p#PcaLK)bwEcg8glq6zyHCS_S0~*IR>I#D{ebR?4VX?sgJq0p zp$tz5aB%tm%$^IA3Kpv?*VqwNguY6=g{~=H0>4|G5;s2I3btf z*=ry1Xb^XCK>KV?NlT~zr_)h1CSI9A*$oXe&11!jB%$tXi;Hzbl)agw)@eE>mx92@ z@}sH2F%A$dB@(ri9N=2rWp5>j({zQVQ>y(i8`P3-7JSXzM$C543S1&IaaC0fjdcx9 zS=F@Kcnmbz1Bfm-Sql+)X*rfi8q`#%TF8{hIzBKH=zH2cg0-Dn2v==2D#y8P>Ghhh zNi#rxG9vXN_Cp=@p8}fyR7piH@Uhoy(a8_`*n}r!e9L7G@WP+zP1?mD_i7>wKt&If zYYp3Ka7y@4^jiA`vLh5DxG*qqA=Lyv#88_dtm`}_G;VT!GKSHYgt5{s-7LJHRldv+ z*7mYpg7~4bN0b-l)q2+ajvytKubs&1AcZRGF|FY^5-OQH4q+!#CAaxF;8WJ_44Lwl zQFl;IuwEojh|ZP+ro zlCg?lPJ9KJ;pPBs5p8mKk~Zt6=mV`CNx@}&I}6b65_yhI8(r?1w>2L%+NoI12JTl! z5j>EOe1U6#v!aLeV*byL!vyT3Op(P9Ua_=L2F%5HE5gc2iVcV}jJseHANjKVYE`1I zgmtAxj1pQTS)yuPu9=PwfFmp!o3|?e)YBFi77@36uF4_!_Q81cpBBjKmx>kUKR4!u zFXZWpwF;2%eFQs5lD5Fkk{oF`qZOk9DyBoQTVZVuGuYeT3doTnA4T~YwdGj>^KlZ; zKsd1m-X+DH(x;aaz5DCj>RDlB8+voxDwPy{*fSh1plFD5WV;YU`J?((L*%?p0jrg+ zT9VG*5GvZUquojJtE~rbc&x#y+`_#t>ZOSZr~0`2A?x{?8LCePTlf|{c3)H>b;YuD z$V0IdoB$P2W;EbA!JRiim{zewxe;RpC&>bkH9e7}tC{+%dnwLgLhLGP-|#|!4wb~B zuCwv{PundIM0TcVzbb$GEF?x#sF-s*P!`CFElod+0uM3QRk5OY07R&u6KE>|$&U_i zPC?W=r7SpA!Vsk-**ikaraW^AC^O^-c?~J8*;dlTM{w3bh}BriN!>}N`*n#e%r|$3 zS?NVIajlYsE#(v5EbZv5h{ACtbSV}hfpeWGWFYF(ilwEZ=_G8mjnWfN6O7tg73JF{S59-=^VRXvH$7 zL`AV|PPVC@Bil?-gG>3Kys?;UmtY%#jq*K^h~eKAh8Xg+6n1d#Q??2ptxQ%uP?dE& zEfl+u2m&+AdK7D7^6+PT8?-vkayA=3}_=71jvmh+crH2uJAGJ z6=P-adMi5R;N}Zva4n?4@+?j;V~`>Eat*T;=1HyLH$s#aoY;`JqRR@aPJUs(N@TXP zX}~un-IA&HRXS&z_p$^IXeOX2riu+!J*P7jJqLP?`q>2!iN3`h`HMOe8Bh#}e3)pn z&1xd0aMUr2sUHYSsg`i37jT)1VlE~i){yz*{NTKu60_MG*Sn6x$%HgtcV?X6QB-VW z=6r3Tz)jODvqi0?Ek#_?0RqsyXBH@^T(bc&;K460r8PKlh+yrSdreE_X><6u2(_B2 zGqNG9E`qty$k`7|gxDoRANMr(&s*n8lm!o9Mr+=}^d`*qK8kvPB&f057iJug ztcj^J7^@3b8KX7xV7qClZXeqUl?-%X*my#Xx7b5=fLz~kPN9vUtKif1>#8{t&wel7 zL7uBfh4Q!&dtSV7RbOGUBt|erNagT$eOLfQFT%T zb}~hBH1yiC%5_FrjaBx~?$UA(UEI0B`$zIeoAvKiS{P7B3{s#h1xl|9*3U4^{%df~ zUi4iCM8N$xo~~E|(4s)QpT@rZue!b}%c%d1O#Xi#De~W99e_^KFWy~C*Nqfb{(d zOuu+6I#a40l~iO>se?B$IUuO5VWsXpE=VxU&`k1FUcP|(4{DCZF1iuUhC7{ZwQRM# z*s`&-%`m*!fA^{@-p)ro?*iYXeG69`E2lsl02%wM^jhiR8we{XOZjqd?^`ZWVYo)c zN`TC!%&HyrIe70(FDWt?(>*G6S+?QkRN&qFPcjXkh?SQRKDGe*pH%_;Z~k{GlPzFK{dXQTY6OfRU{HYEprU{FVwN(-FJxdewY*M(5_>|V zCho{UQcZ{JzPi3h3)o4qnVcxwIRC}Okp@rc4@Hf}zqaI=f0^TzfIof9O5>U78gg}v#6QcCr-~I3N@PEer zpm5{x9}{ZBb3v5*3IGTsY#Cz!!vfbL%3hd2dW@KNQCtHdXOVf1a9Y{9j0`4AtTe<% zlwm|je?7dhH*&*cxa9TwuXXOPPCZtRdPKey37ek|-JEz{W$qtkB>V@hO#ijD@BdX% z{-5%*zo#$%gLVLQ4WM)Rzy!T>P^4JqC$lF|!8%jjLQglhUvNR#xdQ$IERWQL7O*4Y z4e%Tdm>_;XmfcE!+;OUOY?Lr5#~<&08W!b;K30*}cC79%B}aU9^~)M1L>u@YTGRNq zurrp54>XvJECo{Vka!fF7^GMV=2i|kaSGUM=&u>b=ufitfB*7-pN;=z_XhTNI&2n$ z=yZZ1-NF($iQmZ$6~rXhZvjT*kh^O~L{lwe%}hIe4RIdVioTj&(J0*y?~rH+S!QN^`69ZPmB^hLvevIhcmEq1IyDdpbT*Q+0rgm zbknMwTbSTy&3P*%zkqa#U`;>=Q|SAWdjzcO<>lhheq@0}bzh1=uji}()-LT)1H=8H z88+TMCC?-(z-B3+Df?+wsegRjO(2N}9J2;P+E=3d89>D(u=-z0_xqI4lEnZ+aaJS) z0XL8kps!$mbR1TBow0pouZIJ&)3#KKZtJ|91fYuJA(1XXlE#1^Uf}epEqFdZl zJfUJv+DzQ{VBgk)hhz|*mlUP?Bht;`kG^wegyT0_T_;>=VpaUaKC2==x=Z}OFOy99 z2bj|&r{cZhb z4rE%3uhV_A>JrIvTmOb&qZ{Mz#x~y^JfDxQ0Q>^Z5MHDy1{Ua>(cGOPiz|pKUk~RE z=ZY*;VVnz0)4UeYHO)2<;Qbc2$$ZDyE@czmw?dA>j}ebkTM~pOjx{}D8Yk7$Q#UEe z_y4or?7wdO`u9=kKWHWXrw;oC`0!b%l4Xs<8zJlskYPsVG}K#wW*>kLjrE(lCu=w; zGQEl0cfXFxd=Q$ADO77oeq0<9ZKFQOJMO>Fs-~~H(!nzPM*CiLLyEWn+pO1!4df+6^9tB*{Pc%J#($IeUu|mp|hrD z54=RuL@XQ|l#5K6411p;Y5vQG$7#Y+@Rf6m%3$nRkJpzBca7OK4d2_u~Z$vdAi z=m@A1;xSw6s|cxtWwW8bZM299U6o<)#xIpU8xt_EX&V|j_MaXi#;r{e;=>3{zK2!D z_xz9h`ez%5{{L^rj@1KE8w9Os%rbC(EU>{D&Q5*iku-Jk46FlUix zZSobfsgt*M3v(1JW6>H1GE_@PF$dfx^U`mnAuS5;r`iPInquLea9O8pExjN# z&$MqA&#A(<$6O4Hlw+oZQ=&;}=J!vyk^RG->>VAOKM14qS3LO+63#Un!<^oY|E1JQ zLu0hY-wNX}!tc$5m59!xdj<=x_FwF{^zdWWb&ZV&2p1Dp2!30qJQo94L*P1yfC1oY zP1~kfD@!c2fEn=q6l0xyPwc=$~lX0 zGhCV_PJLXv^Y(-C4)Cp!XPnwtw`0ok1^-H;iA^BDTVs&515!E2tEW*xOvW-!5SqJn z;x&2+Rg27Re~r?awJGT_8Z5(A-43wjme*gqLnI=upRZBqAv zu}+fA$NX2yZhku(K#uqD?a#ZRnyD&M)aJaotZfrZzMXiZ#5xd<^?LNPBE%-6h71&H zphZl$MeRY?#E3N{^8;S;ynya{GfDvXeg?%Z>lIaRdO&s`5e4hLsh2o}1B z6HGj2Ruhp^Le84dVJnFpabV6mT2r&1SWZs+riy=_d6o?i5BxxkrcmP=`YR%$Qd5awct`nVWCumijSEKOgC9q9v%+bsj{kX=Z z%!Smo(5JR*+oX#1u$5?CAiPgxVwV~zI|h}P;qoW=`VHW2r~qTf`&1nySQ5sWj0N{Q z26~`qX4G<$qAVdyX({~WP%lLVVzdpLR|ku8-1|>TaD%g`1bytQ-R#qD zn-sQp*rhUJ9;!x3QE6~%cYw?3$Xi>EoVjt}gZ)J((M4O^5kI{Hfq}bDUfcoZs$hl$ z9p}Njh5@-D3t2;uM#}G-VbDa*S}MvJsuQ(hYx3?Ez0oDCoY^4JB-B>JF&6PwI_X1x zJBC_@mjWCosfMmeAQSvNyAWaldD^+>Au!1$n^GK#m1s_c$~mzcq%Q4%xA3RA>e5h{ zo{t;>1d~`C)ofg%bL9s zc0PLVd$3}zRScqQos(&SxaDJAZ6%IEW~%;{Gy!5F@O~Z|dmzkg=+Ea8)V1)9i(}*T zxzQ`}Lk+dxeb_!`W`?!Nyu-bwoEXUN_2f{i?tsRY(79UG|Ik#2J}qz)juNFnzCN@smFilWc5r zn}3&m+x??d_I<(35eTc2T;i~*5&qa;w((BIbA^FWo1EQY5^?N?_wOUWTWtxKXRf|O zsyK80W zLy3q@<=~|?nrpi{M#mJWntQcPG3WE}2fENZBQ``ix%1$=oL{hClfYm+=Q?M8gOMy~ z$6reM45Sc{8}H`PehxwWI|m?>rw^qj*S*=Whnm)gQ_Wi8L0(dI@lf&oQB-27^Y4eVVN9dd{ zDcw>o5?gy`pUo|HKi&D(%)%0Hw_)K2*{u_JeVu4wl#4p*_+>O(+GwXnmYuh zZMSf084aWNZiJ>};A0v1hA+pXmi1 z>o{IeezUO4j+Ubv`+9IX#s0|eP|3=pSGRxAa*C{2bE5d_+ijjqQ}a`akBu8Wnobi- z+p@PmK3EhYm}tCv*^1aU@$7jvxo#WNrQRpF2QJtoaXL8FfxYc!71@(@il80&UyMf5 zHu0cnqZZ~QJ)TpU_4ytJo^3uMkZCGWMJD zb*BM;tv+^$plR_?h$JcS=AaU`f(f?IM2rRu$iySEQ+R~%ekLrm34@k1FeYSYEv`xA|5TFs}XY6FwCuaB+(3tPp5Ex$Sj5w$#z z{ULMMt-R>N`eVzR!|go1jYgC}PEl)$xE$1UCWrC`E#Ul^)o=JnPC5D#^7~GvHLHhs1lQYjN3x+DMnpDvIa zgYX+p@N^owW?<3}!byLFn*{WwAnO(o&n!FDL%+kJ4J$*v6#6|gb_8vh?f_Nymomq> z{SUv5g9Oj&SK&ez(lFNU=$WL|hI?P5Z!#bs25dF}vWkt2#3*QvPiUZn0nt)QocT+L zj)U(CCwSL@LFnk0gD(asK0+qyE7vatG0@)%&8!5yBTGA}H|D~LcLAKroBF$Q9pdA* zgRSNW*x2tV-lL(Tyk8J9oG8f>3^YzJ2*#=*K78I{*h-<}ORiNe3z<)*mx1U8x3jo4 zWJYtSku>c7Fv|AwakCs}c_C%s?0M+}B>?^MOL?rd9B|+{c?RMm+fTOH3vtpYmr&R3 zRrs8RFs(JwlcpBp%Sx^J>i(qej_>B;!tqM^?S}c7)EZUG>fZ8$yg}}p-I;wxtozD8 z2IY5Y{4ZEKVGY$5wrOPOW3+%Fry5n&jFoQhDsgDj*3SjAyHBbO3RQ@7&7SbsB1O<^ zPkY4mCujNC8e;UJ`g@xhvE~)RJ%oeZrjt`lQn`3U>BqG6F1WFaX(8Zpz(dDUr2cRi z)B`fHP3(rQRjV@ezs6v&@Xc}`=ks1eM+Uwq2CJYO>1N_;qGk5tyM^Pl!K0mf9!cD5 zEAYeb!PE{}HLX~&KvE)3moy!aXVOA=i^-Zzw3TcUvGfhZ6yloNF37uJR3er2OM4TrSaRMz5s%vY z342umY~a)zRcet+Di3-8Ra&}c{@pV!t=U9pZC_1#Q^b=Hx4=MR2q}EjTKc~Es1=}> zbNHz|$OFekDVCiQFu2OCG%buS#S~IWsn-mxUjVgT-3*h6!mnortjc3?5=^P^dCetz zoRGe_oc@Qr4e#@Pc9Pe9i4TH+i4oj^7>wPC?4I)LmWvm zBCYEcwVekqkbtf-FHk9(r-tQ!nI2*YY0WeMD5^zTfVdz+7)glEV4Lv6^t|2RiVb1j zEp_7LDeo zTRpbBy2I_E8`cNlF(6@p07p2mLobC*tyPO4q`P$+Zo@77@Jjx%qJH2o>az#G6VdaO z+=%mN^#;6n%tAzm-c*Nfl1Cha9$taCSTvEkS;A7&%ZDU@n?2gu)fXD^k30FD%uhT>Iox*;GkfBXR^UuD3if13;@vu(; z?CRTP?%jmD=-5|#MdZp*AHV#Gd9AP_K)T1#bd-%ZQMt&Xk51?1CluC866dTZKQ?X@k`OIyl% z>_A#vVxO(aE$YfwlG}r&N*}bW!N|{&7$w0rqd|a?CJ5{)K!PFXM7+>B1&}RG_Bb4X zrZW)Dz^7yWdJ?tT()nW~$q75T{xP@YRdyer#^f;$C2M*8+*SMoq(ZeH1(ATD>Omok2x*}r-LQVFfuG*V{L)p+84q2LA+~eWP z;M-(WcUE7z4I&F*A8W?LYL#SG@?2vcppUTDF80+b>YdWt#O+xsR8v->9RvDZ4s3>Q zAi-Nq!IFTQMWJYo_vA(PHbt1KC&)HV|83tcwqF5v-Gg3xH;C7~+Lu^0UfX}PeGM?g zomIo|PP{+$+$A8(uUq`IrbtQt@V^X9`ny}#KaxrQ(>Cls_&=;6jb8$~7Dw8L+kn6# zurLuZ6N(^uYaIGeEb13U7iR^Cbs(h;YaPgR0`b@BgJDL`Bw*n``8+xZO1W+prGr24 z)M}~y_EY4b_OtsJ)okG%bc zb3a;YAFq>WHlqgfLPI?hc1KRH8YIj3o9=eB4+U##a{ESF#+7K=9Oy8CJJ-x(m52*v zPE-*7SN*bks@|I7(#Ljw5Ue>6N{bTUe)-@SCg6c^ux#nJoBpl>lv41p4)l?AzGb=1 z#W7Np?#iNeZKXIc&{)U>&jCx(%Yj^Y8AOQ=Kf0RY0u3||WK#%Q)CJ_M!!L0u42^16 zufaN@aV8%QXq^wNzD;Y~B9d?Hn+;nXW1ZDMKb=g9VlE%&qbC4zyznD9WR9Yp039P; zSxe9c9N6i#sif+WXU|{;NKW0s^`*Fi*yEh%`h-c8*4OhN-4vaLGE)jtgvOdg~j*+!Jn?4R1ka z+HKj<+QAF|K{1@DcDNKUMOH~BIEZ3`>*T$OxAlOa=*Lvm}pDyA%S{21unPd0(+|(>n6*g*~XQhZT z*A5yCjnhuD+>Z7>+i3Dh!^*_&u6|fe^ZUvJyw2TO-K7oZUg~)$D=LJNsk=VirRnV# z&$Vk@xt=fH;?XSAzR=uLH)nFnz(2!r+T(tHZ}E83rng2K76I!%`8h>x&Ii zZWeX01095{lowFd6kWq>b6N?Dkcfw{XkYb9H|&w@cf2D#(~;%-#G*R99{xbTX|)*d zR%iXKs8nO*4(3b{#p%~Y6SKo=qWaYmWwSg|qgFq9_v`K_kGyl&oi_Gr@;;y8=&X^{ z($yqLq8wCZnoPsM%y#u#F+=o!cr68$@!-`w=c9bDkcN>yF^EL z9%FRu$6lBVq%B4tTJuk7Gk-&0fs`J{flZ`WWS6PEK)5p5^-$4SzYE4&ygp8Gc=Q1f zEVL1Aw*Q76ij^o9yX9;P%6;*gSC3?naE>GFtEHz1*;fd(ut_6Dm1M+o2C=}e}QvLDQ5}F`yP+yS@v7W zZNpZyz2-o7-vk8|w>_v_oO1MqB(SBmDDY%=Eu-WL-B4Re%H$R*mbHLGfdNOt^R|c7 z_F*kZ?RTi{+VuKfBOrpdA#1ky)|QI~+1SYJ4p3BIx3HMSYLW|&^-y-(uL`-1AN&-o zzpDDypFO_)k#E9h+V|}5Ri6E>Sn?CE*MO^_Lg5?`%AG?Ec{}s~=ohi=-8_I%r!0P% zv91D=W67g_WTx?;mi{Rd8ztBdiPHi1$K&RPyKAKr>1B$Q;gPy2-)Wnw- zg>g3pC9qgS?jbNHxtdq)?aJXo-xoh}(n|sj{ zr({NmG2sff5>R%hA;RQDm`!760&TgyO5D1`W%JBrd#ysE?W?!XTf;4^T=6eomb9LE zp69bdRU6EncTd9>XmEY;4Cok-OZ_%HA=Y+|w_j-6i{&ju6K+ z^AOT%mti0%&lq`1^{TB)JEL^SRfz(&5tTC(UquIOCo`m|CVVk!C9I~zO7I>v&ZEW( zFuTG*ndw%4{<5N-1s6Uwd%V8%RZ{=9)A^_`bj6Y}aN#Ba|I|k2J%rdvlBUWFDQn>i zfG$weSef6Cy4fU{^#OMWxkk+7s*p-94t7TVY8e?_J9eT5@ z)?A}zk7}*qNWv20Wzaqz$cS%ylhstWxm2%noy@NN`IC`5%JRL`^{{FqO~YPAgiH3{ z)9u9Jlde%UBq|B^f%!F4g(c4AFuujsvp-d+wh=GhIMsBlCdH$h8D91&Qj|%uCHHH9QzcHTSbl7foxt z3FR|?3F+d7pjQIKX>$(+D4f=QQ?;AGm)gB3aE&b`@^2kDp$tdMZ&FuUh!@B_6z>=d z&(_`=Zxb?GJCmu4`a~JU`?FQ9u?a=Q;SF`f_2khs9kUf-`=ysoMP(YBeZFPA;z_Ww zTpKF@*&Z&P^E*4i`4=?7oxqBGV?2g0{~efA;08OTR**jn`VP|1PCW#z80{Kz5Dx;(#1GVLbI&Rm%dFr!#_Crt?h zKu!k6Xq@HiRQx0~Lzod)9yz^|Eb{7_8f8P+tM=X{ciBBpude*m9;K8vx+Hv~lHwAF ze~q)0Cb$3^izA#fVT(fv)kKB5M9yo;b@FnP_|UKV>-5)e9Me>f;u=aUQX*ZA@u8=u zzJ1VLJe-dnU$qx~l?Pqu1W&RT0dl3Ma+li>fR^17c>Us`>_*49+q-Tzr%QB>v(j%q z_a~DC{WXE-i>%L0nX4>J1UX??L?=qN3Ne?LazY4cgq2hqvPu(A9is!LEU4<2GLj0+ zajuf6T#Z6s)s9If!4m8xr&@i8IQ*X@EudeNxL8Hdk+FH{!(`L$b+$z&_ zfY7sA5uqFcNnZNRPecv+b6z)2ua{o3|7{Wh8UC$9pa2`fDs=jOop15pc-xvTAz zQu>$}b2mYUF~z+OW`UcT?Ck?*6rLcrx_U{L4{KFI9RnGWYBo*ia6kP4_Z~~x&6BMD zPJq6FT%%IzqE~>TAI?ix*nJz48NKFIVXsYY{(Z~Vx`FG|4Tv49yL_;Id}(!uPIJWa z+_zzSJq==w7xCcTu~K-TejBwr`lr%5A!>jwKu=2K_a4FD-ZciT9X5l9%EtC91hZRK zA*JoWw?wzE+#dUod(Nu|aeV~8_p|&~o;IiyPHRx+@H1$t0D@i~imu^^PZgd3DSIoj zgrU4;Ky8BGBrSX}##tHFiP}?2PHFWwuI~viBTWN8oDlP2-JVESo#&?p*!xZ~Tb{n? zp1JUFVlTSONDKzh3&B7mA(nDlo=e?G4r)MGwKXylI4dnl3*kf$I7)&Rq@>JDFATL+ zNt?;gpa1UgV&HSvnVxbs{y_&hHH=9d-Xgo~N9^v{^r|oU@nXi4<4UwVu;2JeN}D-V zOmU#h4heyB<*kfTva;|?LKLxs-AR%c=Htq_WA&zxy!Yk^Ks70Df>3clQ(ale(j2!w zbKyGPU9bn8IQVZG+x{m9`hTCi_5UZ`m;dFfD53ycx{a@$((`IMH9UteFZ%C)B$cd@+%k1Ybu+-N&4@k$*1qto8{dpzxC z93aikk|qOW=cwK2XQ(#Pa%va0a;|xg^K0}6psNI;_shWA7s;JIT~BA2D~WYp%##ZgK>X>z;q%#EjLo^wZD}PG zo~rhxg)}(f*>@EGI6v_Xi!GUM%#l4zqiIuYU%b4v{o+vz_y9%YQpDn$!uDSiCiV^> zhZaas$eMHo&RJw5@8QTjF^iZ|<*%&2lr|e(p-q7vw7=~dF7X}bb0DbZo@3|f2jFN< zKFSpN*|634k?RMftKrv_4N`s(qam|?bCz3Z(#!NlFuZqki`YCc_pmiew&!7Gd3sD& zxYZhVN1?mPkMCp79#+Nt5)F3g(9=sJYrbop`sC=f``Gt$+O46uVd>5jcPfb7`_H0G zT3J&kdS4y7w{O&PH-H5#bV)4R5_5t{Ng0iOx8?u>-eE9IJov>~`~7Z(DXZ`3wxa$1Kjx*hRZ(l|}xr{IqNwwffCt?zoLdqmOx=UR+r7IeyCurane|)ifaU%dY<00i_zm?V@Tk72aLD<3hQ@Xc)KQ|eM1Vlv`r)|1)W_yGB^Babj z(BZvaryp>@)?MO^aHnOjICEW?LNF2-wNZ?4=5_}OP>6<#R0sY9j1-;4JNcX&*=~&2 zz_BbFEZ`dzdnD5tGbjxTre`KBuDWBV7puFVQj(YcV7#tHF0ZY6*iy_5uC>j0SNh1W zsd((!rx4THas`cXnBo9fneZ+!(77ojUCWMX>M&^KX^e&i;VU3ThHwnC3Qj{U8;_aK zEY!bu@}!F)c*&>3s`o~c`u!Xl`<;vfy`CTN^INGr7aosY#|g&47EDAPCyka27eYKA z_{CEMgsdh#aw#U?q^^MX%Ub8rDYL4v53AYLFC|BwceA%Y`gq($H$j135yJggOV`b; zY-K%?_AZ#iPG-m=sO=n@HOVh2bkfI*>eP%yUsScv@IeGooc^< z$sLhtEoIN;FLX?qpNO6?fye9D({CbcY`}E%QJ}lupn( z(~mI@exDjePr)u{+mw-I&Az7e+-xc*uK3q_b!fPHuMd8o1eI!;8*T!C+tFuf??#7( zhgYaeK~TqCF!&ybWm>hxu19!B?yC&2=* zI?-RzzP#!ySCLaP{tGu2-+N{9so2~|31~XNyfL9v(ZqW;(3}CzhuBO4a?31kAP)Qt z8{y{LMLrZ8_@-#dQ~OP&p&jNNIb04LRbv!V6c_+HYV3V$N)=SzRaSNo&r&=esKT1A`zPV6(U_n_8N3QjTFcD zYTEKrEyI?{8WnomzEXAitcIRjb@(?5dmLTe+=aW!t#p&r@wT4HV}3Cl~mATZmL*VAY_ldLKa;YL{# zb)1+{=PKhV5i(#%*bvR(-)B^E=ruSX#DBH~a7rjj~T()55 z|D(MA`}mQ{4nT%!C_?+l= z<-F0=?8QC+?qyksPK(S4;K|Qy8Z0sZTbSZ00Y$1-y#;LNCkYtG zs7Y|iW^9|n!Ewx1$ufBH)IgK(K*DS^o0ZkLl6+BijK%QGm|gmmL~V;ISsFIN0w6jk z1UD!X;wG`Oqktc!3S#|6JKH#m>`78Ho0htTtU5z(M%LXRA!-p>v_Er8i{e)OdSI*h zfSluYtj;F;JSR<)A>Ow0y~_MiS_9ZWLd4F}xAJ&}66LJK`mvlTQM0#;0E8o9#^)RK z*chOz{Ddep`mM@w?W$`or=*KbEjx8O))?R1NY37L!}yr#!39~MV!QRz3ul?PVrhmf zfLv$CtQ-zSFC{6~jZM)j`NNceK1)v0vaoHxGJ;u&tscWF={xXkVVm9?Khs*HjEJx& zaiG^@0}U}}&{<$#%M0xRQT1~IYA*A&$6J-gT7_AUmBrdW1IE^~pq3lu=Z`$QzLEQR zi)lW3VoUjiEg{7+jL>X5GNXMqHGeE?{$0gmKWtW5$=GGTG8Q*!ww*tj(A~YX1+|d@TH0R~t?0_O#w}*lHX0ur&rz**i`L8Y!59;kjCuQ;<`%v-o}+Gh z(HH&+^_FS3*f8O^GJG9ijQI+1KY@OD(Q;tJmX0r%q8DM5$p(90R-s;mAvn<qTYFhU9)PB?Kr^6HnH!{!6J5 zY%+^9`V<>aaHlI^1IVat*tnW~=3}P9KoU1~1>)gqy&ArKc?r#>vGH4Nw37cE*%1RM zWnVq_qTd-IdJ7~Z#Y*6lLQR53`zHeDT6sW;4t@IcdtCVi0b}vlJRsP+Ielg#)oDG= zRBedj{<{$WhoR^s(Dvsj)OuhSu*m^)lG_mdc=nL^GO2;N7(558$IWl*tz@XAr}nA^ zAOD~wPWqckrc;=w&O6V$u~q*#UA91R>(1W*={6RkcTYl^99}M`*DvJyg)4@@z>}GN zdGePgAN75J1d@_*I!TH!0`C7pX*32K32fycFaj=hHDB7MuV+~g z$I%-ZSG*J&HFs}s2^3<}>bLh8_DtQC+{s0-eAL>(2cK1vr#}A{{YyT2*iVV3M}VRM z_zZlMD|oMSTyuHc%e?04bpH)iD7uMkMO#ze3Mi`G`SwdO8u|?$H@v7BGa)b!b4^ebs@cebr88<%jYW=bz&eAzt|Rio22A* zj#8Hsx_`9qMgQU56sj{l1fVk$!4fOiOW`H*$DDI|c1I-3J(LX`OAsHJUB^rfH;>fZ{uJO z{wy?e=JpERoLD6#7?0 z&Sf*`_11`oByObX%a^QXeVhJSw~EIgnR%h>#b-l%pWhh%WRS4UAVXs>21L&wt9XiK z0V13lIi?k}PX1f{rg4R~0U-@RyP@MHBQ@j!KsYUR%H$>ByR!^>!2u0?>Fe zR#B|@w1q4*=OR(6=yhM4nr%OQO>B;M7iL**B#xQG98!1v{91A1C}veW zwtN2H$#46Yo&3M2QTw}g;2*iW{09fV|J+^<0my$sKG+1;0!ca;;Di@Lm%mH%C7{+* zdqCtUPqI&i@tMmh8vtL-ehctHki4B=SifPwDhIa;WaJi)7L=dZC&*C8v7XZpSF@@r zs)G47KITtH?<;?6M1!Cv;776625ytSdESOd;TA{|F)PD17-QY47d%EGT5X^zN=w(7 zC2{*j=Z2<2;nu0W7M1_0}KEsrAnB{E11G z+FkjmfpRk*9jN)klDdT;RLSNZ0el(Q?p_v2?I86KDtkKhT@-qzc(G2j7siu zxpc9%JtD@Cwv@7%9&x@lT3VZKKl#wdtG}Q7;lSJVtmO4KOJoaX!0LNj{WWJU;xDC% zfM%nHFNAM9n*UPzh?YASQCtY`RWeVL6BVo8%nU{{G-#*W3*oC^!L_r_5u%>e=RDQ( z-)-ieVwP8!<9A^Ef@LjlFhh4#YJCg6(bKf1?Z*67=v&iSf=8yNzM{Y$WA~vQb)Ed^ z&#d0!gPbV|N?mg+&HDnQHvP_S!}~XnjJ^_dbf7OwUWwY7R{f<}v18#A5t0CDF}Uhm z5p_vxju5L!?extogmPL-gDyS#09y-3YUs&zHOPTE^rsg#cj0#(dm=O*+4)F9*^|axGakFHkz>8nG-`yr z^-e|9d2(#qAb*3qZpGaBd(w?L*SBx+Dv8}K_A=od=)4JVT0tyNOLR3Tu2(h6F7!n2P1-2rfAVfs^Celln(4!*K69FL&DoDGc z5SFCzoBP~ztKM_J_n!CEw@=k~>OJ=lODU|ZmATfObIdWG@q3K=pJW>!?d!^QomwHL z{Gzkx*+Ac0O!>#ML*MB~^knvj?ayu{ZcLllqa2TYc=2UWhJ}Nit$%_0qSs}u@V3uf zDjH|HJ=@gmnues6`FeKrS-Ws^73Ujk6qAh1<*awf=I=aQaPqD1+cW7E%hk34DMobx z#S7#^Nx~1rUY^oyKR|L%@&H+`Y=dw(Jf>qHB9;i63*oBjiYgEe^J zd-66cbchOT1?2NdZ5(m#HLW{u5zfEw2`~9>q z+D#ZulM+b6x1JN%!4FEj=`zLoE|_NWI#5Qrch!3#GSFCG@rGyZcGPIXHB(H<>*T(t zP5R>(-85z@5y42o<}>^YgccrFco%Pa0GhO~?;-l|%?Ts@tk(>`ucYpLPntx1kADN1 zvwECzbCU)BR>9QDDRm@{{mYl}{x%1PfJCQp=h2V9N`QF?Sz;5g&{50>I`1r~Z*|KN zZ^End1-H2=VSZ#8Vh}elTA)w@C$v_h_j1=fO<7^w?LS`&9b+XlXRLcDBCi8rg*1Jq zbD!*$RJz^)p;UWzke?Wr+DuKrmQ^zFm+8LiyT8zgVfXW zJZY<5a`CNNT=lpD*v_(N!LLh>|Eu|g-a4Wg{J9`0C^$eu#_Kf=9fn8&-hAct>F?{z(8K3S{F!`)&n(PJtbQ z;fV6N5*@|{39$x1fwxy03lh<1^LJYsGgzWl}b%^YCGe-6fCIb%}s{*5fz&kV4H%GKm}6?3%7~~pc`mT(y9fz_D`}Lx(GHe)iQp6@{U|WG@8rl) zE49|@1)O(YkHk_NR|*6>6q2sDJ#9Nog|u0dtqU6+ao4AkgF%q07lD@K=2YUIkGhOA zN53l8=0@vVom`RD9p~E);4i(5@vn%P7x6gzlfCoDPb0rbiPoxR0ge{<`e|TRUeuQs zx9qPllR5%ta=K8*fG|zfXP-syV){PbT{zo4=PP)@-Q^8~N$I)6P14zMaWD2bJ2@Yd zmpS_2c+@i#1K9WdiiOTEAZM_UH45H^n*HM#b`}oFaiQK3ZJkLahoa4fac9(0lMA0s zO*D(N%W6(vt~C0ieQWKQ6)N&#@_|;81{@T^QC@c0EiWcK4`)pPmgZU_ia|2-R@drh zurZM|HLSpa&JC|9L_g8GQvpz^mYf;`{sHkFwN;eE33EdvXWv@Kt>?@7c3eb$DHN^U z$wDfKhUgWMb4lQS0^pA9PW2k@tq!M!7G!90aD=Z!q`)vC$CdCvzeW>9D}$)vvm_Kz z7ZR1@zgL#TPN?m5bo^9z5&;9cauN`X1L`N4RWU9u1i4l3gqW8CePJZoUA0aXyQzch zUoiTIemp86^qPRoSxnYhGsHjVMka%|C&;poM`wY$lxR6AgTzX zjsaO9_*bR&h2gq3Y`M#18)Z1*Amk3h1AjdvK^G{r_YGF4QX*43c+O6Qz}9i;`hF4W zkZxG9r&8(p@ljVNx4Eq!M!ardKU^3au@?KnguFdEfW_f4;eF9AqB2*vU?iI|3q05^ zn~+zVo9w$VMOzOiWp{C|^eS|w56Qr^j&|ep-*>2M4VDzQq!D~Lcf`NfiKNUxCYdQq zX4O&RL+hP z-QXlP6*--O<|edKHWE}F`urL|)vwLCAtUz&TY5Jke!6eQs<$3vP!h*}YUy2&?! z9Z$J2dFb<(-CK4Co|c$#|C>I||4zOm28uO`=wERZNtzZQ<}9ryy)Z*>*-=oYw1j5Z z^{&>YRoGSV5p!tx(O&4w)&5h|EgIQ@IOQi#p4`F2Fk$0z9uaJ&mVoK9nt$SJ0NVZu zH<6v33vRYGP{#Z*vmGN@d4Aq!ftoG#sBX|yVA=LDa`YLI(Nr2nErT=R_7 zJB%G6^G*&@e@Gk>J`+ggR-_-|O8}d_vVwhntBAYz6dfsaslP)kYD8s%-Sk%R5FN+@ zS9Qnhs9pHVU{+w73$Y51w(pvf%qEQU#$I%+M#k(G7(P=sZNI++`WT}zu$Il-PBwm6 zoHs1YAbT!s_N>jIvj9rSAeG-{91yKmztYWH)4&#bHwRZ`zHl1r#`)YX@EmdyjJ&5@ zG8=muxEvreTsikRZEg>O403(YxdeE>H%kkY>LNk#UcNkQr4?WAz@guFRc?SH<4Kw` zXoZ1#)#n5$_I#bZqPlK*zmCD{twT$ASwf;^eH|24<6>gdw`l2OyI@~F?+qN9g1J_nHbIFMpkBC2jHd)k zv`BJbH}+_rvrgLWIH{%;zoC``=hY3ioY?gY#nckWB(RMHyYxT<-)!Zj8$P@chYk{o ztJjw9bU01ryGYUIzW;(O9Q-*2@szN4ro}MtQ}q+If)ncs3rNY>~Elow5}n~PJvgXV#y z$%-T^3}ZXY1#*Wtqk#eqvJ6^Xpw)+0!weu1gurvaR##_^DIQzS)o!tM(REH!J#5IX zMvz?=#RQ&U#~F2?ZI|Rr;W#_xnl~^j3}(8OHB}R2kE5lVHanc$Tl&T$$5Q+2=a?tS zd^<_{>ySIB1PXKn9Csoc6JRa#Iccs52iZYg2Q$w@3$6HYGIR$xGC^xnvb}+(S#KNH zKALmDcKO7hL9s-4&r*2b7U#}bsm?UM^Bm@sWX^?At6Mib#f1KUutdV{ky#XsI%&7S z0M>@SVdIu*#xQjvVLcH8rB^ODvpp`3EXOR}woP@R6$SpzWwNL@Pt)5&jEGaLl6C<7JD?tA>!K~yN6D`z8+Ly1Dcb^!I0VGBUqTo5P%pA6UoY+UPv#l(GF46B~D z10!XMgl++O(T2rUT-OBQ-Tbb?LmZSMA*(ew_Uqwq{5Si?;AEz?*ffP_6KuUutpr|B}r=NpP z8KHf08@t0W>q%<$8)=tf?HE4(m{_GgmMq2VA)stO@wquMIqZgC|M<7nK&Enpz40d^ zVkdZdUfEnR%&^lqm9Gx>w+*FCE-UTiP)2gk49=S>glr=gG3*1e3W9xj&*JYv0rtt0 zVuxQ!dcofz#mDoi%cNqh^N;=^oz;tXaNN ztN_ohdre7LjM@Ukc6ZP~oe`Ez11Z!?{6L}s?2PmO@|7t8gxMi@pXzzOUM%`oLP{%X z`{0JA<{*(`SY!9=@zI+%GeFwy6caj#xkrKW>8xZ-dG3f0t*eM=39dZB8882Kjc+c{ zbk!fCvsG#)zu=7;Yw_XefcJBBt@-BF2+=C=9KkJ@Q={qb9csB4maHh%=BwVVJ&K@#m!q{{D0BD$cV%FcYQf-$x-=1{iygvG>}DBo1$&#o2x&-7LxX{IQh{u_t7~Yx;TZZ3mf7Lt{s)X5nizb#K+9#m) zng0Ma|JAnozk>q)y~No6{&(Vk)OkW5 zkl#!RuKq;S1_E4cZeyjWF$842KD9!6!jC@@D<8O$P!L0KzZA0!ru3Pws11xovCk9@ z(I}?eHJg8)D8`j&5~{Wn{$IsX#jd*qC3?x=xtsJ$QfG2MAB|#Hu ze3(DfDjHPr&1T&m_2|#7OPerHP*K*WY7}G2uMX%6HWS^{)6LQ<>sQXMv6S?Qu1ei` z`RqxNRWU2Kuas4AeNu4r9eSH)p%r`5{(?*Tt4`_HUqf68F*S#~vLe$JzYeqEnL>6_=|8XCgQmTMDCa&OOcHSlrU76IPlGyfMHrBv%4(aw;{xs6vP zo`yqL&T#E(azkpc82{;u-=RAZJTa9I5SunTWB4A z$}U}Fx7ntid+wjV1jTj?MDA8I)~CKv^3eQT|6(Y*;^7&KyqN`)^2Mukl4<#@-f*tS zqI*sa;AX6-qQ~_?*h%21mL>8D6pl0bQP?`-gV@o(l49W5gP<@sZei;J#2L726OBpV=L8RWtD6}`c% z1s)X`-hN@UKV9J%izyA0IrE`p57gc2jI9v9O=p{VWhpn{0mlSqx_Kq8&edy(5nL6S z=e6Udu>GQA%ojWiG7Y@3OCLXvI%vKGk~$wTh6|T07QI zvC--lY2|dk&!yXGuMgk3iGdC!jE=Hx<__@X1ZwauqH`7wG~tmwFk(~Wb@f?uH2Ond$cdd(S7i8)l+ev8}GL8$sbNLm8fRq zD!~m{hp#4Tb^Q&Jh+p)!ayUgV6+RKIvh>frV71B?3*T;4{GEWt>Eq?5lyp}-qV!Fg zQ%3Bw@n+fYyuKzQmh{B`h_FJh6J_8ZY@4-$-(VxYvS6n$XWq|;>;T0_LzC>lCaMGx zR|QQ^Ez7_O4*i)BHFDO}?ZuScR7#4XaYS~SmElSs&&t^1i_%q)y?6!MTYm!|3JQR4 z?Z~V$I`2FY6atGCxK4L4MJs>7`G-Cyy;h`GE6pC`R>P z&p{_Ui{ZojeYbD9mc^Ct86)gFTi;Tlq7+?uIq&`H(-K?)I78TBqM51~)+6jHIxC4L zSHazo#kT?n%8VrbfwAEVrR0@}Y{=;Nke`|*yE@`??Zf?65&3y=r(+LINqW!qix9bG z%zD(}UR#lDBWkRAHjHlsJM)eTp8(&{_v*-R8hyY}Q}raYG@iW*eGn*#UdVcp_Hr-0 zWSn!t3 zm9{G>GGuD4RzY=byRFNP&)!$LV@w`SIVg-0=;R6PAm*$;IL=X;t83SkXW~$s7Jnj! zTbh~DL=$OBRPmhu&PC~M9e8NescK(xI~7Kj`W}1p!}DzWOok25@?TNJHdr z6n8duMm0Sh}HVTL{yMQ*0n!(HGE>;0a-@a(o}X?(Re(aT6e0G+Wd0->xj>&^T)Ka@pmwW zbXGbO!smh%E{j60bht~#g^2S$fMEA7o?UFzmMRfV1K8H-ahb68jLlqZBz`fdB42Qv zq9!;sg8g9NSV56>vJr;o?;P;c-tnn=k4f1b(pS(iRc-@L9wXpWjau=;qZ5khx5zA; zXcax4`{IgUa*ucFhHUR!YqFmK&*7AmfE3=4>sDx}66aPlwX9_5kW9H&9z8(SACr8<$+&Nw9SY)fQ&eGy_ z128fgOLA`;&1OF-S&}~f z#kp=Y|Aa6bvG>A-uI(iUl|8h?0er_9BSBKdVb*}dybgTmS?46!plF-nZERW`W_YN- zyRRO}F5cH-Ok}z{dN{l3RSe8Qqd@o4!SaT{ zxkOB~-HPoA=CHWl?9{kUY;y?P}!bIsS0E3Ur+v$(wmdVnnCWuzqe4Cl3+vTVik znf>-wYz0)GR`m$O{3w4Q=IJ@4_rZa!NZ2Xx0nBI<9bgE8z;Fe_a{Sx&++$ru*D3IE z^Ju}9Yvi&;9lc|{#CERqj4RnAq)4aWUDfXh(w!cO*VwA+SrbScoQ#e9A!ptlNVvp{ zQ_tE^$0p0eH>XEWay#w_%vaq!q)ul``%ZV*R+rT$_u*#siuD%fY|ICDdpsPq?bl86 z89^YwFx$uw&ipRbKsB0(>z6^D?k;U{Al&61db>5MplJkJ;(U{EGyX%tc z`Ni9315{i0gv#DQM+qY^tWg&&5CIjEHi-I-yXU|zBo)A5tCtrMLXnY^yb!Fow6}d7 zRML<+6{yuF&8=?74%0svw->u{G{jq~yMk9veppk^w;APYMpliqzg`rm)wvON(XD{YRM`IY>m03))k0FLq7C`-R%1cRubh@DOd z;c2k&gFj6&ixgN>uWcb3v+1hvIffnVonZoBr%PANkYT$Ss9UtLSa+gz#3*{{iD-zl zgLv5I>~xCj=AH?@aZ`5er7Kr(CB(rv1p1)xC5AN;=gg}C1UExcUoc}1=$F@4PPPF~ z&opg!Bv?kEE@=)5KCb`dp1qR4 z+UgUe{=6wy6pfGPeay!RQ$7{n|KgX5Kt2Rf4!Db>#abfCNUjYhvMrkKiG^Kj7g|w~ zJTg*$3hW1`sVxAmtxthZ%$iyhV@5D+5Adb2{L9~u_9(sCh_l2ApM23ziSV<$j(ki9 zCK^U2_Ck;Z_D06iE7eQ+0Ik_9b%T543?$&u)}zW33HhdbUchcmB6a9$!V9e^ki^;` zZTF@hklMCaduQS-4O;t8_$WL{zk_*>yo+7uN4bky1eCxSEk#<_G({i8ywkXlf&2WxH z@M-^WAn|`vz5G?@()*{|V*WEP@Fy|>;s24}K;0yDI325^KNooPP}K=@1`T>GPR9n4 zyHqZ8_~bhg-o6j44!WGWJ$>twRLzIVZjQgCL=VX0|9f!&IPx^1$*gqD9|@>5vNX{f zDrezi&^zJ67Lf_JlX!4&0j)hlk_Doh5cdjcR7=jN)hj46)KuDS!xNs4;t*DI#-_fO ze>Mfl>(-dKi}~Y>O7r|BIXma;$oq71){LT}Q%PdMunVp>CnevhDJ0j+$$wGSY5W6r z*u`9@@^jMXin7r$r;o?BE6G>@F9L2kI0KoXfV1*P)bK3_II);`N|;760-U2Vh2?0? z3@f)5lqAs{uXXQY%(tmA*J1dkBuJeJ%rkM-8x}r$b^+&~L;aym8&_8YWye#PIu&hQko z<2ixQAdr0321H-v7UsydR{CbJYQ>@CXi5ai5pHDICy>ivH)yq-1_pZdE=R0u1{V2N z5~sOqvgMVc9(G(MI^eG!dZ6nwfAq;g(ym#v%ULYl}<2UrQbOvfsBjJL1cxs|S&jDIkd$2Hp;j2CcT> z`9eO1G>ENqm|Xztv;2ji)P3Anc3wr`}{JFlii^OhuE^stPThlJcSA5iwUZjHhSa@!AUrhTkB(F-{%gObDi7!XN(t(186a+gGup` z>>0iFFnR~IZ~ugM=bX4LV_URo<_|L zP1`Lv3{W`~Sw{lC18hpW{YY}CAvgM zDwYs?CZ2y9c-Yo!yy#q|RC}-I!wX}z`Ug(i&vfg&E9ieIG3Y1u0||jP`EJfX+Z9zmtgY#*j6DZV7Ekg&C{{f~wp!}JhTbe;aXG=* zW;Bxb8iXbLA0DWH-Q79Z_2);NTXvV}msaGSsD(Z8E2w}P~ z2H{y?oB!$92Z`m9hVe zUdP0Bj)XwVo(jK@-^_Df^u3)Io21BqwI`MQ3r#~bj+Mf&GDj9-L^@8uTP{CvckgJyk(((8QJEG0 zK&JO!$&1Sl>Jf;pFQJbMoV$`Ak2I+&;MYByRq92i!1#|enB}xmD^j7 zT|WTz|JG*P;^BzaQ%iWAH&3^gserQ zcs@Fes`d3ppU?JD-1d^u-tE;sixy%l6uvgmZ~%a`W5*m^s**b#x#5Gw{PWfkCb4F_ z-YUEGP1Hrd>~%TiOVv|Xl-j=MLX-R(wPs5d@+11m1=(*j(}|}W&i&58xs~~DXy%l` zZc|ogTPAK@^YJ>j&*|04QeTDZ70F7g@8vp4_ZK@LU5k|mGK~$ic*B|-2-oxr&c()^ zI+~^HkQKLI>&?3gFPl``a;Kf!XZz95sGqY&{;R6>A9+T9@A>`z`@6sP0{;mx_<#BS zzhn!;;|tyWbEX6AQ{&S^@H*K`fL&>n?2ufZ8Sm{~=zQ;?RBTYzRf|hcR1R#6sZH8$ z|ABaXn}ln;wxny7l0szux(`Ha^`a?_=Se@af|(RRDb;! zPp>?BKA7tDHMbTwrY~TO6GMvsh-@%!R2eV1X;5=nwU~B0Ed{qd_9#~?o@kI;xaW-w z4!xOcos{kL3=?4Dy<^*<=B)|Ibf3l$4dV6A>jThZl36=Yw(i=Kf!W2LWyto~3%~5s zf4k@L+p>PgbFJ34!Bu0!la@O=Uwq6^+)4kOZ!h(ElhrX|%9z_EengzNLHLNcku|pO z9MsWFpY7x)$T@!fZUglG9j&oryqjO0y59Z%!zuEKu^O>IrgAD1Ph$#Z5 zw2Zm31|(hw0qXTqAMC*jY))8sz@Kt@3tWnqvy= z9@hDQ9NuyAN4yrdzpdoA#=*{gBExrF;NKgaf8cljIe`BY2mZ-7@PFdKKRJN^69@ju0sNmh@J|ll|DAE* zIU233fA_q<^zr%e_V)M6y1EOxU*g9{M%JeICEA!4mA0wu&`2LXyFnPRbn*KBkKM`< zo!@3Px2Ywx59LNQUxXfcUEZ_*=`qJ0{aJ_OcD&7YN=!U+e~Yq3b=AlIoyRxcdiuys z{$sZ0qsz&2)mKuw=h0V($llC*wdX!1Cl>;0>(+oHx3h4Dpd;Q~z zb4dJey~&4F=(%Os%*oiAs&&bF3c5<{AA>QeO&*Rkor#MtN*apx5(|wotpmIkKf74# zjpeVqWqWtxZ1>CG67PpJoTUvK-}p?*=*vDpk`A~A3RV$4u**-xK_rUIqP8#vgN)Lj z2-!0nicnr1C}7zAQ9xKPzfE|liQHZ9Ad&+86`d_2)g$acA;J{M0z>IY@^=SwfrWVt zdGR!X49o}$;^hja^^%!;qT8Z2v-oXTIM9Uy$>PK#UM92lc#S)P~ zujj|Xh{DDH<{Digs=Nl+vS8dC=SSPoBkOQhsrG%M=e3?vgt98=U5cK#KuJAb{UJeM?|N0g`xQ1y>!dDz^}{c4Wn5}Z9q;t}y+zN%Erw0(;sOs~0-o-& zYG2*o{c_*K`wb1-MS~}Ae0HV1&$b{*270bsZC~lME3X>RpQ*b&qx#zI6#(v~n#6iP z)^O?VTE*RGzT5YOg5OnJlf^MXOMK(R&39L|^xj>6e18LGwNs9&QdnDWeq<}VRQ|<; zZCtQ{o64ht!xrtQpSo=y8vVxK^T}K|1Xi< z-^MQg8K(RzpENeR7IU|;$M16DP>m}K2}GsmFr#(PA@p6gG$2f?r*YzLsusqkS>h_l z$Z_nULa!Sa#vI2+4{0R5_-fEr{BBS3L&-A-O-}wJ5Z4JnEkHJ6*VStk5tccDarGL| zOruCW!XysxW)5%_AFyqtrh2SQ?Rz>4yhrB7;!`dsUg(mpSo38XWBkSQS*%~*^Zje6 z5%mCQp**Ex9wR_;q~+R3irm%Vw8CeAQsYz^QTNO{*kwf0$6sgL`M!DGin)``U-oXV zt}m;}7{9V*;=!ZPM%Dj<#216K<}xQjj;m6n1(=GA6Z-sgj7ZkSb^nJvy58+uZrO+S z*cvr?>x9oWg@+wqmiF26i{<$zjrixmmme)Tin`&Z$1jDy!wf#n@c;D%v~K@y zLlXn4`4jX>xWF+&YJEXR@{8ZNt-ZE9LP2Rt=M6YFY43f=Em%~%J{-n8qve6 z2da4vzbHtlT+n&fsH~fPRrJe>_jR<3xpa%`mPG3y{h4c>WgQ$P$+5+kgRv^z1+3x{s?gjF&3B2zoqOfja4ncPM z3oq;cFd|~y$hignMj}v7hK7pXk1%KFxImt(3oO0zyjLV*_33hiU0<4YJdSfw5!o45 zdq20*m{Vj(Yh;h7=QX+KjUa-RW)t{(U^$)vkQ6G3x=_*v zC|PfHDcJFamy>T&A)%JXIL%eezflZFp69w&=6MAw8anwud=o_V9!YOiOtv>13Q#O- zy5ubrIxK|8pwkr z;pp}A!AZ&LvFXnWFKetmo|yKDmzZGiJkGN(DR>~S{p~rg> z93VFC22D3tgmgVq7p@X3B|)DYi9!-F97#XVYZaZ$4FsDOm8-?8!%GTiD&?>0`YOAX z2oWMPH26ZVXtjgcbuXED&QCN;L(k7${)xc6@{XlFQr|z=bxXA2J@gdwJL^VY^Wk`v z5^?R$>u}!b+!)6Ym89#h-Z4dh^CPuN5Q5u=Zb!GezI0rr^0c}Op=J(f)` zo9T0%cBrZiPX`79%4*e-m0`GR$z@$`fnFI~%f4SuIrAa0y;!)76>(`I&XgJT>Q?)D zlVVG~#)~(CbZ@>4BA0iZtPCFD%IAN1aya)+TkYAwoz`(q-l$`5_fp?xE9h<86XC=@ zSt2(4);8`Wv2WS0)>p1RD=@_FdWgU&2JL|u|m+`j|o*?RAfn3Q7 z|HB|iLiluj;ZKC)du_9M!0-qG|Grob`ok)a{QxGR*Io9v<*R?zPk)s;{_E7%!~~bT z)OQJAVE)4HJ-aXx$veZqT*GYs1L;!EnC<51Mb#=rh?Iy1gC&b7S;BNDJ2GCCT`Bxz zd{m_>T-d-dbDGx%LN)IZMs#;n7UUB*tAb8pFIA>G+!3F*#KT6EHmsC?oK_oP)q_T5 z-YeJx?BnNN4lqTsI#-v^^)J2hDPh+R=%;-ZNOQ=M4go{8jd84+WKaLctM=Bq;8J7u zW!)hkAODSukE|DGqvYY4wvneQdf@;A+=XwWX$|SdO8IED0KB|zWt_L&t6j?X6jglE zy9V0MrFBNXlti4C_$CI~Vn-cB8|2^?ZX`!~a!!(~#j^mbT!>&7_Zm$a$Si3};}^cB z0$-LTOnFsSF3y;ch1 zix2Rd4L=debPwiz*EPaik&dXB@^Z4xMr9O0h_awMPJ5PS4VgVqpJ5j&Ban zf0z+(=KYQ%NkjJ?u$Sw&j8--O@$P0<*{Zs4x~cEA`7B0o`-|jid=?zga(?Jo`XrA$ z3?LLf^sD3#6QXqqV0`aD6`0EEIZM7j(MFgek|G|Aun;Xgk9Z8Guwf$7M>7{ZlTG?7E^*5d)TIyZ z<*)j7cQK>z=kz_VI#TrxVu};soyfUrb~mgwIh(?j<3$hYkCJ5wkyd=AaI`Xfa@uiA z6@Xwd<)os3K7Ra-{7dWFV-H8M0J?Q`h=3=vReJmrW|xhCWtllbL;@5*Dy)f|iv}ZL z0lPdG4U?-=<~l+DT4N&7ty0|pJ^-b$?Q`_4P|dDV1MZ+%znQevMboh;S_a2bPvNf;kcB|E2A~j-oh234P@2O6(k>QoOcJ8U zRnyu~G2^%5-M2_R*-K?%r zRy_b@xxVj3QdWU2%Oe5#KGJhf`$on(r290wD#{M+t+$?PM8%=Xe8JpZtpS#U*FI2Ic8sNBhn8S;wwFF0_;FsSD`l9v1&hT3d6`nLX%OTsS_hWz)fPGC097zHTF!C1~3IY(cU3x4sfv3%3{ zLPUrb_y{mfL+_IMofvbLP^3uG4qQRVz_8_n&q%sJ$KK5~=M+C>x(}gIQ=(r%vt?Ot zh0Zy9&O{3`hoEXRAlgP)XdO~iQsJesHGBJ;?aT`S)Hu3a$$& zWY!B*rCt=qo4N}2nrWxrDuA=j_n8`S^_-!xH19yPQdu%m?jsy&k#hc3b$xZ8JJ-67 z)1^5-%^B%*DUWHN=kY#@F9F&d(A%y4iz9{>??AsGh`VL>6Fdrawi!pZpkf1^Na-6A zbKyw)((=-wdd!50nqjYOdmGV`UF>XaTwMIn*|Ciif@u}E>0g}N@s(E~27#s!1uf7f z$G0KydDW1SKsHPy`KI_h95_LbFMbv~Vbgxuxd|V0 zjv1}aUzo~Z>1P*LbUC-KP-4f&RgBSCnXF3wn zIL5lvTY&KHiuGKnO3e4RBOV^Cz{qs5kkW4oajpve^{QDegoM^A=-+;l)3@;MU^64&)+W~zC(RJ#?SD1r?4I-C zuOYog$ziDbcASR#&NcDqfa&6Q{b!b>lw<>>k| zAl3nmbN&s-3GVxt(?a(Loq0(wWAV zPqt#C>C&P-MD;40`yx3y>^Om!hqt+KbyDumF608Lr0+aYBmZk^M7ehnylbXa?yJ-0 zhgJ%cv0yGspZMLNb>m(HK=wdK@uMdv!Gj1ztH5b(!LuuqZOnTKT218G>frE?DXH!G z9F)ZHFbWA*+tt)OSisK<+*yU)8)i~VsAzkB>o+)FAsKfq*B$Yl3F~>xl^2U%qe?dv zg6`pnT2X$cAVB!S;FU;5pjQWt&5b3*EV`h->|a1{A+#K%SSPF5kMJ#^33GPWKD)lF zv37l?XD0O4Pb*q$hASU5>tMwf=YcRmAHzyX#kA^;RCi=xfMo+F4D?j^k~9ULjC$`0 zHIMd;y$sSeIO9;E1;n@FO2ioG9CmaXZ5*ISd*nz(T zL|au00ww2@T%)-_zM}|8O%+H8QkT>lb&p^Dy1mo|=aX2sG+A z9C@_YK-IE)Jw2Lel}#vY6YXRyc3t~u^#Ygb^KcrW<`722^Ur`g-biOX$CfY5c5)qf z3EYJ_E&c`)!>GzhDcnFWy4s<(JDMm~GCrM5la6rFpEk~+-ePysR}(1CP!c2NGPKm{ zD$m7))*obiWls@paO*r9Q9}}~Cb6+L2*Q`?p`c>l;(8RvJJvfbNL@Adc9fs<*DV>6 zS?&*aK7U|;;!|8w@1W!P-&P;QgrQ(f%v=pv4)+MNM2c`CPfqw4y;X3GtJlGdp~xHH z)dUBH`M{*3zm;ZK4c1@}zTP^5Ny|41az?bRo~xPp%*(hs`gmUGbl-bt{?7TLxq8$k zV3ly#fWH2Y9~nA>{opoXE|AmaB+S}OOksTKBdQbFR+|K@lI|i|osi%F9*AJ)mXsEi z<(ARTl*-zK4${`4g*k&ruFtGP@=ANLU*B%DTb=5Y=TYJ-*}@9Ur@Sc)@7J&7mEYkU z`T#Q1Mvz+n6CrxP#``CtXmTA&wH32aL7!DbtpwD3(;;6}m;4N@#?eYRtSV_EKz~{k zyL9COk1c^(32Yy7a$G-k!ZOjmGv<|hv|s>s57%|{r&vi$up{Dg^JX)33Zj0 z8wu;U5>jrv-bbL&Z*SCG;mUJz?WA{~Q?|>FzNg{&p5RrYFd?a*yURLj8n`>%h4ps-GwiU;vY5k}P&6=3(4OD&-;;<{iqxt+( zhoKvUC{}^TX1#dc9^P*q=UUksM^Fzq9*1`+whnd{oc$D9XsK{A`~b>XD`|31r2a?k zwwrCYcKJ_)T(Av{JCQq@!SHdc>K^@x_<#|u(}DQB!HOYMQ+-Jhis%GYJyEBY5Og_0 ze;o!v&kL)3udV1K0Gf%~2Qg@z>n+@|u2Yz|T@v2|`}iU(AYa z+`ayr?vBpixNUAXgOfy+N94jLbB_l_ILCg<-c_;1wbsin~Hwii_y=0T2pN#L68RnTS^8wtJ@%F1;Pzml#~Mq zy!)L+R<4IY8<_Ym;AeF!ru%jzoeAR254(RN7U#qlN9dj3XG8yKEtmhRIP$M9)BieB z1u<9>OzO2DGLsZ+6&8_pOfxH(jmQ}E4k3s@y>t$*U}Tt}+5GVQ_a@g}T=%lL46Omb zuDzgC#06*~7r>HNSco-MqaKofSXA4d$&PcoP}9&*hY)lOk$v#64q31<_dF2ySF4A= z5HToaU6WC}iF$bM(V2~eP?60Zt;dd2WJOm0B2^Wav8%4Uytcly92hhDoT;oVudnLU zI-}5@=_$r%@NW|jRtU4e9`-xEBB2c*>Y%iebC>JQGv;cwlGkBAe2zU=582aM{#@&9 z+^wAUE;Cw=NH+0I8NI*k+o)?0FTTw5+I=y>CvuY#$U@OaI(P}f`#Pd6qW2W&961`m zf5Q_w3Go#uIpQWruQ}h;DrXA4x8LFT!w_q}V-?)ZY6;ATBU>Cun+7S}J-}C_>{V%n z+=Z4^cKaom6g4vI4yJN>&WLSh)JU&D3KaD~r*^l*lxS-u*EfaDR298XQF))@#&Om$ zvhSTUDD(~@<224Mu=oyVeQEah8pvY*frby9Q5m*JZ@-+Haw9Wup z4OZoyfpnH*;?sopiB>^u)P4G@MJcL`+U9AMnm+4tbjC__zja~0Fmev#F6wFcM(f`p zF^GkZjfo$H76h^jJj3dqiZto1(&p@=Dxyq9innyZ_)IXbrpPPxeR06nbNu6(`O`}K z#>QPNYjIzGPw|!CAu*94*!;R369??QgMOb&*1L~UN;N_Stp#2XwZnF!Uyhjt=gq_* zB1IeC0wjUWAA#tzK z7p#KC)N$*@gjNDwXgRWe8`RBahMR8J=?Ew8V`10Aw_CRM?O;`?qk1c(G1k4|6&o+_ z@BtaK;^O+pS?U*V%pGmcdxrdk=fQ9dh^{$g!R8eHYEQs8Sxt@7qe*Ku(ZeY+POZ^C z`dIZN6F(6#u0}KnsypuKOZ96&wvQkN?SE;p9@MPwoxdNPoJPhYBzowOKP{2tO#QIT z^B*j-<=CvlVJq+tIH+Aa)i?QvShDH&9auU3Bwgos)NKw2SLGbHogK z4Y^61j$cp|$iWy6C4vcE_=$+CHE*HI0$T5y29}xDVOZ6~v(_QZAHNP~B-+%kABN{2 z8*TLKlvcd0ZHdwxUmISrXDvJWw-~WO0S3%XSt9ABuoOfV!`m0DHFT9BF<$*9q~dx?9Lf~u_U)fXOGT9D~bnB?geRmpifIADjDpBT=I z%~PNoRKfFv3A`|#33{ZLykMt0=t^*%V}!uNz*CV?O;>uT{!b*GHLi@DuUlclm-YGOZYPp#dhZw z{VwHbdXa|eGmmi{aMpQTMV67WnY((&@wwAlr`ZnPmGgT(?|OcRq)h4oIV(DgZRRi$ z*`{O8yDrQGFr5Wy!Pr*pG&STj+b>wp_>y=gf+= z|807%C(JpTLscX|r2Un}Q&?sA5MQIS^(K5t3~(T} z--{zPFAiuP>Vt%;Qv|q`X6Z~)9B3706cR2Cb>If`L+~yYl?jq#L7~(xJ;JLmXx#MOh}e1r zbp?JUFf#ImLH??!CGSEhNjFK3k}|}5;3~oPQaGL8)-tTngcQ_YnzK&_hAHL*l+xEr z;c#2ys&l3uy)gwcgCg5pVl;# zG_5_@xCk|9w@9^7u}Bb$1qb>S2Wq1+8YJhyfbKnCUv@ve%^N2GNTrSdm`PVrp;Ehe zCdRa2qSjBm5nvoQM9Ktxuejsg6D@ghGql~MrQVUtOa{)s{&UHi z$fHOw)V9=7vPRy8uWXCO1>Q{`m;jbBKnvZVAEPQ{FAc)4WT8lCejdxHUbMMhoKlL5 zuX89LE^NWk=3M=i+HsnJcRV9o?~+VDch9apHU*5iN7f?;Q7L%H7CA&0wsB<9e79C7 z*aX*#1T2shken*@7u{E|lb;FbyiMjW)PK1S)caVh zrq!{y5b7D^K*0~Owd^)lh4N;OyV9v;mt2)ZJqnGJAQ&%S$(|arPG3d1)RztV44qOv zii!NSmOtZ>S6~wkJ{XAtNAT3;`;vo|RR}3yjl?BVKd%C@SVjY9-Z%^sccTu2qGbaZ z=v#|_f|gxJFQI--DpHv7d#`7gw$Ecz9S^w@4hPzcnl+ zDL>0&Rjdt1kS^7p0A^(*Q9#qQrt}i{e$U8z;I?A2PCvI9y8ydOc5h2Bo>LQbiB7As zIT3W+L2~fPQvi!J4XV~f+I@@|avdb!U!yR86sG^S)&MOzu^wCktTo(#v~d%)u}D|v z-F?{|Y7kq)HZMp7Gfu$3xsw#-+<&uA_0h;zPb7z_66|SlBkYn|XNFU^IdT=DzxqPy zmo=f0gs~e6Zzmg2J$6apya(QFnfli-C_ZuZtB0MN`>x41gJY+hl@V!hIsbXL_KC_` z!m>{meP8U$4j|W-nkFNPW7@Qf#7~;fs9hX|W;h={d8gxwUkQ$LARq7)_iuC zH~DqWs=C7E?&+rlpAUr>PIn)RBMjQzy)k%XhkcXx8c!qD#M}eN!d(3Cf2wgCXGTwS zmk)+g{9#|Y5>=u{ifjBn3=5m6>UE|9g#y(_@OF%49_dl%(dU_%7iFgV`yTe4J7;cf zuR5S1Y}UwGV=oxduTD9CKpxpn$;4{-KFxG}`{&!u?N6qW=Uk6Io!l01sNQ~?=QHw_ z9Xns|Bs&@ym_70>_In-1w%Izo*QfNm^0|QE@Z6nT`yYo?F8rC2+70xCZr9Dxfu!-l zX(g`IK~_gGkP;!yt7yyyh_-G@~!}pqUT-tN(RDA6rbQ#Vq@VEkf5~N zc|G(g17)~rc_&PKA7H|4_OTFIuKkqhad;rv84sPmq@MLaWH}o& z%zqkM&B+q23gvlN4JHVI1vQ{5j{`-y%kQFmbMC8&n*tqS8kP{tYh0 z|BX8G@8@p)Pm;-h9oF*#X@ff%M9Vk;XP))C%UlW~a5QWxi6kv={XQ8;T^W&0J)F^s z&f6bu@U;7LeRmyDFKp55Zt>IGj(s4Y#?Kd&MCqNeuS%JSe|B*<##YRLfd2|;zX3r@ z6?q2>BfEjsmA6nUX6KS;a2li^trRnnncI9-JBbpN;Z##wP4{Mryr-(}(N!L~fAv(n zZ1%HC=S$RALal9xjP4cq|zlA#ZQmd5T2~A%)zS$ z=IB7fOxiZ0`NWAt_mAFi!dCtIzK8CvebIL%_cyxV+l-*@nE0)>>xkoBM^!2|a}(Ln z;m&2GC^18Jlk!LKSQ&v^(UvspzNsJdaz6UJD*RBEmMg#^37S#zwUsaE`yrq>vbl78 zFrgyPriz`8*-V+=e|sV}+IxBm)OL!iDZPK7mXNmr*D_@}XCC`BW%Xz)Drs;w;fVmL za)8r*W3qBM%3Wl`yJ-6&4%vuTJ=w6yR;T0MgU#oWY!F-?F)H+@6eV^bN*Gi07NOn(*=)lVg@48F(@Y%}xDYKx5A$5j*1!!%8K{+gRgR0{KZ+?KIeEKyd$B%V? zu3c&p^6;jdWDL|}H_4Yhf#L+{2Gj>T&{#DU5jkuq3AhnPLN)f#=ryvXtn9%A`@?)k zS;u6?0Kl|K0mu6)b!^W#u9Y01Kk^x@p46MPU5t=6fnYF7;h?H4r&2`L@@1m}^gtUw zN^0zl;t`wE6X$^qg;6K$$kHL%$yH%1PjU7wgJyg#x9X0N1WMYZ0}9vbh3If~em81ux<_Qvz#i z>kcCqs6Vmb>6P&>fOluLBsb<1@pNTv4BCUXuXki*e8}n9(^>?$J?&CzWBUaFRbxyg zDr2a{`; zfX_}QhlzKVF@0P`%4(#`u{wMPyn)%q9-zyYn}cUC55jJE(;&OmDITvpSqG5AL8dsh zpB&EfMx7Apw?;FQ7;2H7x}-GXVz{uaL|Af{KvW6#QLgO1|E@AMrrZoR$(nZV!yG2> ziQcL>(<||%yr!zKL#P9t0=G%7=*TVs#BEmDaJr)3*f-4)-=w>>TR;66RC6t2&!z*Z z#iVer>dGFCKKoTeC#uq>5A;}$)uv7!i>2;lEU=Sm66DLA_+nXR9gz0~9Sca7gF#0Z z?SZJRmzLD|0!`ypKb&)-y*G|{gJp=+C*6(jYUqMw&UjcmM zQ{~*vc{i$KM1o;wb|Gd~TxFiki|6S(Pv(n&YEeBhjV%g-3oK4u0nl_W7s9 zlg{&^W**9s+K8iQz|*|+$^Ag>WCP52KyBJCQsc85xHQ%ZQ6Y_8ITJ&WXg6O%(IzJ& z9jzr^oWX>$2je)u+T??*{`}x5v1tgG7-{CG6N_yCv$41ptX1+hb|pT6%6kD^pXR3S zmXoa|nv?yYQ-2Bo035a=8RUIeYZ_{BL%Q3azQ+aJbtps0S4y1JK9c5~zL#TE1V!L4 z&7G9o$A~gI%X-vd*u1;}7Z#`hGPf1VvSfA;b0|)H4(Jmj2cr3fQUD8Tzz)J>7?=?{ z0l|9&gcnn3im`Rx53}v2fPP_}8{cHdw0z5@{M4EOW9Bv@n>wn5uV^tVS|G{fWn29? z2nAe?$UcCo7}vrln5kIJbKfKrH-2wFR8{?))^YjM6}%xS8}st)P5q%-%d;E)QYgHS zY-GYlMCqy;0d;B@c*612YWk#?&3X>~3S$NP2FMC8YTk3XCoK#)ft%DE`$eDavZ$iiCi;N8f$ zUhLK&s4XB`IBeS-Hs*uq`L?O!S&!BrE?JCSx8ulH{TEgN&F(W||BE3ZlT&<M;-{ za7^R?UE?c>fpT^Du7C?~Wu{|wNO1zi^g5^9V3q=eI>yPVaG>0;ME6>xlk%e!f`E{cTAP%282C?>HD-oFPD!5X;`_` z4OSkC> zm=$t$`aYu9*V7R*>lJ^=C^4QLrs`gI?kg-LYpSZ8vg+I1tr@ z*RE5N;H1V-;nZefhmnBV7J}l~Yqp?tzwV&}{5SaSRXnB|fhemHI)-y{bSfnpw7g)=A!1du+j9wSpiCqB`Kg*)3g3ldy?!>22WY4L zO@QJu+51S|h^n~FXuu~_z>56QL^AOa`4AlQ2%bIy2N22z27$rFpV^AyDhtbz_1$$i zY%Vo4DyBItFr%st|72T;%Dm)c|z!G5jZ5A&UrZPRdKUVds^*P!CG_e zzsg1tAl`4_O4a6CagW_5=|%3+z;_ScQv(4qh_&b?M~jj_Ojx0lm}Y^MKfhHvDZqz_ zYNV*+eP$t{Hm3X2CYrlHx{td-944v1Sz|2v&l{z;7R&?I7(d{$d?$IkXu9ziBI%MI zb|a+N%v?_JQ5J=!nAYJ#-^SFzW;fIDPQ18A6cull3z_ohlSSRY7p|s)0@Cr^VnmSm^e6`Jtfar+>&44LFY^_db=$Y90N^m zg$7Z;!kb18EA%1gE*B--D6|!)+Ma;eiDnZ@)E!&NlJ==B-v;|o78f^OL0epO#=Oc; z(;Kd}S6$$Tkb?XmE(UjoIT#>b51d2Hq&Qg-MT^?>L==@m*<~+8A;)k~CP|@{F{rza zh=6hXwbF;=x-he))~_Ja#!x;qNw2By=RW(9T2ymq?Ma2X*Ri-A)DR>nWssnh!Glu9 zfH|x16?B-kFaca!aT(OH6T6ydts3>_~A9DvSVADdzr_Q3_v7qQ&~a zNzkc;^`cDP!Crzk4+_;C|1w2hz#lqL|NMG}-uzWpR|_b3$IIqAW{ghz!O`fO1Hn?E z4sS00Vw!>QNsca*dhWy{t183Nlm(7`#pvWn;ephxXEy7X=6jm{d8u;fn3XWSwF&d2 z#qCg^!(hk2MLh8~Td94dZUYY}-(%mMlbY~w%SqvlK5M(@WHgRk{k<6%`L#O1w$`<# z+w3mEVXKd;_UDMJz`XP2o3`*6mP^LxT^h^pc-{2Nq&U-$vY#}3{tJUuP;|KKVMRJ; zVs+xwr`e2aY8icQm)de7^^YBi)VIore*I^7#_XRVa$H!q+rvGx{ge1#h|P9Bz#CK% z05v_drd}y^rtdfR{5o}@;Wc~qo}glcKRq|d7AxUo*1KgE%vU5w&?>EiuQ z;iJEI*b~z!U~-tS2TQl&9km+w1pBJYYaC=_@=kfz4@`>AfzC;}-B76oQ9{l9^0TAn z^}uHf+1CXCCz0;3l$}6Ht8h}i`#=^;TmA?s_hiUd9bEv5mQ>*GtoRFI0@!fNwjuzT z;s9`JiTj1PQwYxBTL>y=6Sa2_7$E~&x9Jc9$G^cI2f5&9Kq=&JMxdaV+}Wel?>zuZ zzZr{!&l2a0a6f~8dp;@t^Q!+`;eYO?zxB;g6w;;YFL?e;sIZNVG#~BJ>o-HKg7*p! z*7UkLgjj7gk5TYo*VT1@a*i(AdZV;zLi?|^qp3jQdZ}RJpc`{0_p5#Osd30n^Za-T za!MJgZaki^saF79EI&>^71(<%>eeRw7Ur^7-g{nW-LB8u#j;H-esw0$rRKSzZU=69 zZ{DZ=Vdn_RBqh*I-1wvM|D69(9Hc%aPR0q?gTf|mCH^V^15hp0C0m}uj*)!o3aJMa zQJbHhNY)mqdG$ufd>ggCq!a~tmZx5*@zL!iZ`XZ(k+AyHb-c%rivIDcyGMHSlkoy- z!AN9We&fkaobpm!<@S$9e>{pfk&)H-+2e@sa#?-oG zg4YX=a%19Dd1+{<33qCzV*0amPUxm4k)^UA*H(M)jt7aH{%em;g&VK-*aN_suLg~M z9GCM~>j%0^4pbVP`C~S)KmSPSE|I(E&ez*g&wli(J6m~6ulk;DvAuVhOIB@^BO_TC zkl?Ri$==nyDi?6Qo$cAY8M zvuMFpBiY1FndMHy^5wUlrCcQyD*W|*ziZ)5&_ww1Oy3!O=|qXJ0s~CO^e-N`_xSsH zr`tW*&p-cIy)$Z6p6vD|HJaV=!Q=Ff1FPLS%;9@AQY$s_tzU=-Twau1on`Y2F&f~* zmL35#ni(hfGC0Xt>wum>Y5Ht5P;bTk!G?F#{6cI+!V&sY20*8k-7qaRAV>c~ynUMv zXH%7MUl}3=Ko%zN`h|G!PA&Ps{rM9>SdF*YKX+3(4{$+F^Ez@xD&IlKm0tid1QRgm z{Oki<=HJAMPI)p%=F|EM(fI}~S1to$t}6OIHn@@8*+n!7iW^lHFl_06ZQ_NdWU zp!0DXw4>R4FKGzu3o!f*aM=By*p=}2+NdUUdfDlaYOW*P!navr3rSjPTFsl-%}ClM z^EO&x#?uD`N$=lZlgi%wlZu8;c(eEK2)cdirO z$(Aoql>qXPFDaaZTel61k)pa-ouO=?xEq9;!X>fRRd$~m(sCzxwM1!U)0&x!y#~E5 z-T?3<4HFQ=Pw~jyQVWgTzn*8dKvqs^U_em-Ad-l47`q|#l+Aroh4>GcS>hN_&=x5$ z4Hs?V-Ki+?#$4zFz8tDj)4ZXH5#8wa)UKVP{>yc^`(w`S%Bx5<&Kcw$K;m!P=(z6B z<_Wz&AHLEqeqC#oV0UcC@~7ALC|(cmKw$s&l2GB--z&TFpK>bx>w{B42!;v+CjEFs zcq?i$Nc2N&FPa_*IY%*pbtOk2ZenPN{2f_SgtWh@6U}c+TfgtPwPiepP!O2a(oz57 z!2F%;keECG!vn)i)dpO%mw?^$3$c!@3Nu82DAPQ=WCHxIN?8rmC4M20uvjEf_A>e6QR8A*s zfSYRJ!Q15P$NN0pQmKAUmq#{MrU{UUps08Ii{NM>ty#r_@3RYYv1qUzpn~R5Z`Tf3 z%$l6l7gA2#;)8n>L+61`xzQNm#t)LmAqB>P)2MOliM!&@D zfx=xc&*-IaunVtz;VDS8|3mi3->aMWUty1E!_yKQ@+w&&c@b8V@(!mgH2^?0PvjPd z2_9J~%DGotKtm@{P%z`Lh#DWMlw#rMaO+9GS&Cg<_c!IN(3%2}bkJspY@9gr)g}4E zvC*qG9!Odj_@qti(Z-GO`}S$OsJbnG^a$aifKXNj5h;lZTUm--;dqGwIY35d^IlM& z9#-K4d5K>L1Q-!gYGmtf=PqDvPb^>qMqWWOram**Ro93Qc4eXdHG@aVwey8*ex1KC+Y!KLLlAhb`70L zI_`33|{r+sL2gp`M_w6@q!tbzstn=|6;i){`z}m4gL%4ncu0x70HI#FZgCg4_Rid0%P~5j6^px-4x6FGaU%9rV3c=`+Grv-JE@IMqCN>|or?B^zFfX& zTJ`qsg9*8QKSopJ%h5V{WRpQ2$yC6-`lYYjgO_}uM{aQNbDqbd3-?h2z5fBv@9!P| z_%HJOwkp8>-RnVppbkPrduXXyN0xppm3N=J6#Z?Q=lp5^Ch30IBdmP* z>+1W%7cP9sZ@@iE{tsB}zqcpypS{?>{h;Hnv3d8Ym0&P2T)u1si=nj7GZUmZky|rP zhoTK=G$Q>Z;JiYrMIr-Po4PPTzq6a}2$Nvz)D;shndA*i=xP%hR;l+ivQj7C55`m( ze*?48%UNrsw%|X1C*LOB2~$P*h8nIyByJJe337_ZZh)z#-G6TV8aT8(AkAGedYUsxE3N{dFc&M_r-%7b0zlIu05;Us8VeeRRMFy`b;Y zp#fS#)&LuQgajrjCc9A!DZt+6R1ntgF@;mh>>C1u`zT1&zDq` zUKsj?H~~;vGf&V%nyQf(;pBl;2$X1uC_74(z7Iy=gw@=6(02Oyum=Gs2JG2AzzlSM z5olNayKd^ASNQilda#!v2L^0tDb>j7mMb~&x$qWCsrqO$RmI$^%#B}o4bVzfliYSg z#HKadq^41dP1=lWy36~`a9ZT`Rh8Y-h4tN2#-+0=*za>Eo~E_C)TMcOg&Z3kA)H4} zRsZ9|_rC#J_^(cKc{n(1zfuR(#Mi)dF)#B1Lz(K2j0Zi27Lg{0T}f?XBx`JF4>?PC0l#ZgHCB|$Y8 zCZd8cef>&nvX-4qdjCP;dy0HHI9V3c7~*hI`4dQ69Ql3*sYZ5$UxkiwkbL|3U=rEOQ>t4B^4{(4GhJ_Ak>cYT+=O z#{KII?gX7}_?dSm%55$ccYp5vm6g~0_@(2o4fKx{YK6@NSg?9-Z`%@&4S4bC>ZU;h zJ*sxkytizF{;orq=;h8CA;$}w>NNO^zypMJT=h%!2vB1f$TtEuw~kqe56 zH~S&|Ux@5%ywqOteY89^T^`EX0A_}d+DRQdyENr#NO|x*Q1`z3K;;P7yj=E781-i& z3!r2_Dsts|abN3yAr7?tLRjL#NW(n9Oz$%C(6V(XY46TW0C9Z`%AOVgjoMMa3fMnX z$;*CYuRzN6U##2I6)}oI(z0k?#BsUx;&wObPjMrU!fpxMoJdyni*AD-dCN4 zPLnE>j0}0;rGP9=E3LB@;+D!D?osU{R^pK@0sQGzfz|PMykyo#-kW9n8xbpqKYR?P zuWZa6rYH(=lQXlL2}Q-6G`5EFtN#2Cznid>Z}oBUXYU5%p`HMUh*nZt408l2mIkV^*b z?IvzRq@~9i{MT5y9b?)euQUlaISsnx9#=i~N0W=Z)U5uvW4~F4wdeckvPhdv?vE>a z&B_k7lD?g*(y=^RllHB3{_U47f#oKlQ~t?<5mILEowcj>ohi*{x83tsTTK^;+?6lr zR;y`WX?AP!{o}9XKl2kgXCC+Z*{RZ)hk}k|2b-U^iVW&7*c*21Y=$Fhe1*0et(E^6 zezQ5){ob~1@y18ARriJ!|NiGk%me#!kD0WBG4xTbTcaAnt@id7S$^}Qmj62mio(y` zAV*##E29bkLpziRAFrL3?nsxKj%I@yj@+qBPSTC+K^)XUg$k&A?O`#no2E7ae#Wwz zZ(u9}+Xg%t^+~BZQp!^&h3a|J@MD-_Vnz$Suj7xR#XLoP+K!&tvtI-IxSj zy*{SD|3#vCNchp`z2@4*-bn#B1>eVmy#Ko6|EIlP27cBl<1-19Zt(j1;o58Ge^Q+2 zr~a%{S*rKIR*}vxKK)1KsoOVGt<+C1%`3fiuFO{+UU9f>^*#TKX(4GPh95q5>!iF1 zyIt_H&~0^U^ND7G;UOnCgWh5Ti`kIsH(E1C6w*ZWX52!ub!7!x1UUTWydjglpGzl0 zrH-Xz^S}zYYoC8KA*f0zh4dx+^l>9QZPTmAnintXe=WkVg8~-v&p#V66O^O)hjlGg zjkh6JH?OT$GT;cFY%dsZNA4}Rs9NJvrsaIw?#PPcM^kQ>-c5fGjVg-V)|}bC=7>7~ z%|xQe{g9DrGUrofDB84)_Q2p#Sea+es@opN^TzG>90}eY9&9)zVS$c}$}Wi&^?f+( z4A{h@3GAPKcopEXr^3U@be&XiBD0scpU>ujV%=r=cF?OS6HZI_n=<8RZ8kN^ts%E& z6N{LD1{TnZzGimUYs{=Y+ktbbKCQxcNv=wLRZsPfVRxr6<*2YBpTUzDWx@W0FUyu+ zy{!~8rxZqC7S@XQvfzzuehHTtbA*O|y+4%W%o@LZu-Z9);tx3Mnbosvt37)jBo)$H z_=LCD@*W+yvoY0fwbu1;`0Yl%&t|@Ih;dEwQ)MnKd^zmyTg0miYV+V~&^~ zFK^f`+HP>sKg-SVMvIogMZF6r_8)t*dr|)nRoTL@91u33A%R-(G9D@st0>}(?Wogw zM8dUXA(QSN&fXm8qq8T%OVS-+csO0D?az&ox3w(xM;h&3M{i~hqIsxB<{Gjg94#UW z?V6?&DAw?pKq-AWSp$knlZNxz%K?|0oiv4xP_Y>0a97MY5{^CXA)hWC8rL<1I&KYy z#u2GeI{k@eB;D0?=L(Mon>+t2_ruV5zYtT4A(S}2OS-?DtN`~y3}I*rAF1X8lozwu zP54C0DmbM8_HDI_tS=jRz;{}SC)v67PE`_BKMdkMP7Z&~x_W2+c2)i7%5E>c@YXjA zCVCJ@7n&Lt`)_dB2PTJP z>2#2hq#W?W^gstW(ZilPuYLmN0CYtDOD~IyBceJR_nZ8b3 zSXbUqLBgubmXKEn@yf7L%8&9*!h2X^^T*BCM>DgPHCIAnl%kzZrr!s$&z2mmc#2lD zTmIhF!LQAVdz)R0m+BR|{&~c%LSWZ5*YZtGC^zL_E8M_e{?mU?^Yd2c*s_#Yrmx{g z&kirKs@94>ygzuCY`MA7q3oTt8TkipZ8q^eVo=0*fJ0yvV3ar-3eDwBBZ95ug%8K& zdUc|GbJz_%W>beIgDo$G-pF%Wr>W(inKzWOzvjKkq1L`>!?*p53;fRRlYfF78q<)F zW&C!jLF3NJi9cS4V}2ntn^( zs4m|#?4H3p`CbvB#b>K1IuRnJKwpP<4{)PtQ)-ZcI7fElocs?;2V)g>F+9f^ zF@9bFO}8*~NTW@7^$4AviPAkhu1*kD)ttL>oZ}?2DWAqVAqn&5@l^vJ+akP=AF<5` zQVKxG;y`ghAOkvqhb<~R`>M2cHraLILWqf;1!X0{^b2Awe|iBJs+1*#PS5Hbm>)kF za<24T+37o9zJ2@RUN%)GxEYjp@cl?Ewv8@daRc15jjbr|Anc;w+KFm=K**A^)^^LS zPfFK|vS|(WRbDa2C0c@gqUzirzekP_ynJpG7@A%{_OQvKPuVpN@V)VEpbAf_vH5cMG~Yz&;IsZJ@2Hq7C)*=^3AuxvMWIjA z_@gz4hxAHcmM)M^26LgvQRylyx7jKun@?1wya!GYn9UDQVm$j>oEFO8`s}QQqw~D( zFB-SHHnJYH+oD?f`jn6K+^9p1;<#R6YMY)ZPO&F5B);+ve2raWho-Q9k}1M-*cx&z zf~muq`GVxo$tpZ@;==nDlj#JqFF)01gwvdZ6xm#h()a;rae7@d!*-KVuAaA6b`%6_ zSk~-DSftjumkGUXAvmH7Za}c(9%ZeK7)i6fC#$FxY-O%!#qMN^~OFB6Oe4aK0ct-p5c@ zQ0FMXC(p50pR02@U9v|+_k{3Qh*c>e8~a;ok+kWD?-glVppTF3LPEj_I&W&6QwlOS z??cEmWi`}L)W8qf&7?3?CRWnj%O6y; zp0J+Q)zo%JhHN@s`Lwsv+w-l&zIXCmm61`2do1n>7q;9aU%CgpwNmhPGbs%eD|k-g z1oWRN4@A~>vV7JCvPsu0Q$c#94}%Kv;}el&EbJ)I*=m~1c8$uH4&+nbG1Nd6j{5LI*xxu6AP8t=tdRQj z7m5NBDMgz4${<>u1~3?*rhP2=~eZGg$5GtwTN_w`c9aOb?b6*Cpb*XZ6qmNrQE>uk=) zLknaGJpRMj9m}ko{OF*#AEtEaeX6CFr&@1}w#6qP8gm z02zt>;rMWof|o2qzP=Ia7B2os(d~&N>>)LXIp`+d4|@x;qg`OGWb?_uYPQyl zj%|~bJT>p@al-fZ6@QsARf9FIk6|~gs#~>8+y2?Jm6tOuJ{?_10V8ckQ9$1F2rQHs zP~LL2$j6z1RCQ_yN^6{flrI>w!e0fmI4gtX8$=fAC9u&xR#l&Qw&7dh5$KKTIBF%? zbd?Xm)XVE7{rKy&V{iG_6~>rJY&E$KSrbAg%MHn1qPBRsHrWEQxPh$~BGV4b;R#bppOgBY?i`#G8v!YeFtE%H$KA-xY-;@8?Y0dqm+;e9z>Mh2Nne@%R_mN+ZH~3&7 zyB6GHAa#r5;5A86ZD5|TI;jxOXwo%>(w>|aAeG^}yL&_WCXV!(IT8+M2{_K((KYu4hYdk4Tx$W(HpgVHd;l#Ubs~l7)-G_TFcph3XGbdF8bS#c! zEw%AzL}!v4?kV#c=5+A{;6Z$WnsKW!NZ~Y%Vh9#dx{@3y*Gq3oEh>LF>U;_DiEV=>ua4!fy+BBbM|O^Y6uB0V-#udyK0ws>;C)n0^Jj zHBgEM%v>%qj4PY$jY@bhwd-R2Ep$7oXXo_T3;Y$hJDIef6y&huq$R^Yb7LC|ouew$ z@J=a-cc;gnG*5Jo@*4OdL6y3GVxz0FpQrbsfO`HyvLfurXT(2&FV`K-S(kD0I)Rv< zx|WHnuG{m7kf*aNQsw9#v2StAE!7yyto1*hp#D1JG{b2CxY`3=Tf#{jehcQ?$^GlD zTzu0PhI#IHtc6M#%}k5vZZHl#QzY~Lf>eo2DVcIX6TXE@c2XCTUI_H+T!qr39z(ecJTh)!^{K<4)++yTwNuRbnhL11v-~-LQOpmC4B!Rejg2pz zk6=TtK;Vg#T{oRnt3Q>Ik52pM)UNmA1=-l_IeS5J;?B(4m+vt!6-?7)ySTeSJx!@;u}wejB`=+rkTj=D86V3WBi1yKfn zHnm@>hLkTwOKhcQWf^AuENGO2(<10%U>AuVd6x)9n@Jk&M*Bg?tKTrI1=GqM{I8Ka z(KBE9ki6s~LAh_v!fmWDxNkl!BXEJJO{%>|Fs%)mn2pXjUKMLmzg)pVqUQnAnz<_S ztpx7VTe|4?S?55uu`X=qb&~8*QgivLYq$ z27*fHSs=Xy{>O*x@8{UiQKrP+5+MIFH$cHHQUIgP-==@sx~1bL>&c2>-0|joEY20@ zkNeQhLxPbMI$N3QJA|fT)roOgbCZP+NVcsMEi03ZAo=!sQ938uLe!RAw@39dwF=`X z=m7iRsK|m@elo83{-%0U^md7NT$M$S}7KGmesM=Wr>xjiwaWoX?L3x4qC~p z{6ful&Qhy)>^{A1!F)b=10dKJyt6tu-hFm;)Rd;=0z4!{C9`?e)TIyVA!I8mfx8Ca ztawF|Enh!2bejoTYreBVMZ00HX5NQ=zcKGaAa;4ZX#c~nSr;THkYOZvVPry~8IQa@NPs>saLOs<0$=c$D5N?t#!P<$Un{ZDeMzByD2d4xSu>|_b zlhV@yn<#-sei__pD_U~7cFz>>8VuFc72aE zfD471oEWucs(K`K?VzK-ILE+gb#T_V+z_<*DW2MV*{oyr$+5x6d)pBduzwp1QOhb( zvDC^asiqUpEk4HsvmNphKZh)vh%4;P`-l89_Q2sn_FzThylwkMU<>poi;?S9NymR5 zenp4BDuAU*r+$A7bVwC`Ax^Wea_7(Y6fMNT=wcMmt%EI|1>BH$N^SL`8exV+@05J! zPT>=vNcsa0TeAhUW-pnxd|8SF_)Rp8Y=KkwI4~ug+w-k19HMcsdNq&~5Ro`Su|GsL1DqD8Z$h$hoFR0S?>lCmEEk>vTqQ-posM-X~d zJL2|TF)A?$mxM@71V9%XRyzjKliB=j(a}iNpsd;Qir@t@OKPJzo0m_8oEWe{Rbeyp z8vDseVF9jr7~9>LrQfEa*`mAPDTHcsL?5p5zJRBRSvE$iTiTK_=4bkI^i!G!bAXb- zYLeFxusIheFp7qPR|UIPE;ue%0wP#jAZ~yya0`?XjW(bU zc6p2e#Jv+@ai@y@U+~@ketgM)l>z>klP_0+?0_~0av@3S0PM|AhwW!gG+Sye{HlYKkB&S^a993|1*-cNlQ#-yO=MB@tOZ`-dfo`6%vTKLGsoIlRv7k|d;oS&O- z`O6REob)DK87F4h-HO309ye@0{lIH*^m&@s-m;0FYc|=-dLJ?0@b2GBpGbeGpCnb` zQ&)%i__cX(#kxwSd?{9Ov6sYh=NnnIVUFK{y5eXIW5Hi8g~86ZevUU~TZXz=Zh1() z%cm+)8(qn3`5nk6AFZb`mYfmouE|PoO(#u8#qAGIyUiv@Q|HQ9z0|14=&(R&fW{% z!rI1untNoW_*qsj!HJ%gZ*Tt`#U9O^erJ?qdapatwDfS;b5>+}i~q$O6Tb8;M;4%S zt;`C}l5>=3xe2*bhHsCHBer(sDt+%RT$8^>o=SNgC%%S4Ry~j$36QIG!iSmQ?A$?C z=86vfLYyH*Phwz1HQ2u!7*kyMCvFh`W5|MiW=J+VbIu1-zNCXKIL(yTQp-rwlP_2+ zs3JfuID3|Q42U6aXgBn9>cg*u zI2wBe##iXHoV*$81)5hM=8jf_OLnS~hzEKq(-(Y=($HNa+B~CGlT0)#S$0lpQ+~4T z{JSx?X@ug}QK_aizq1*gLWS({L+N~W6JsH%T@clb(~xdRKMXNrS-P;Vm#8om<5-lw z2KMI4I;0NsXhsQ>55(cTD&kqQnbMKl~6((HCtlyi(`HPYh*7s<<5l z!)}ERL{o%6wXu#`ftnIQ(emXnV7K_dOi6@P0}N+LLqxTlMyy#cWdj>1X61n_nV60_ zEi@sv0Yrk`giEx_Ne$v7b%H31=*@WOT4o)pbRsL_kFf=1=<-QyXY~065*JAb*cC2y zrzP6bz;yl+8i)fg!9CC^eSODB)_X+(QWN5UwK%jFYbZ6cuPP~X9duG+C!3gSyVINJ zcXss&-MTSl;yYk6X@jr$`c{RX;Pt$+UOvFBLQB;Y`CwuVngdZzs>+ZnEf5YL7p700 z27n<;rydTGtd{P};n+j{$rw|<&ZUHRuSDviOV980yk` z#Ys)+x@cZHsO+kvq^j1A#LK_C9&JETS9%zxgKG>h1gGG|56On2sAS46l0~PhDk;Mg z6y(<^>tH*F{<=N+Bw9mzG4(9m*FAZ&<^FhI)SdqDqC_Cnyc@O5tlr5{#=x!(tWC>~ zIbHgJtOfbdWAa7@PkN4D@XTgjF&lCyv-;LV>KY%M9~`%=&glxgDbT)3I-7d-*sTT9 z--XOm6v#6cKx*=(d{IB2*5-7Xv4*8YcIcJwl)nQWJOI||3qaNj`W&?QlE~ zJ&58JwyA)lBGxq2oV8^5b`QW6M>~D2nZ4B(xf9TLMTg--h&VlEOZiu z@JfnzvhO+jl<(aAoqNvye)swuG)Oz^awAhHPopa;xV>Pb`BA z2`b4_d=XHnr$(Z*IIWFedYwx{nk#{Wpz+fJ_myM^zOFnYG&Z8#?o3y$Hj64k+IGio zt^^wKHYR%SiC;#H;P44gY1?j#L})@qe~2%6=LHds?KjZPo%A1X>w5!1X*oh{5x1Huq7Hy1xCo8^PSz5n)*ptgEZ`ly)50!XX*La#a~G~pxMU}g zW8&s^F=0NELpaAaPAkHuT*+pSg@XRiZSoe5Y|(Fo`TcALx1jk&_2WyXCjgSWhK5yb z=IVHR$Zb?f^f(0vFxc;_Row_`C1!Sc9O!jw&~(`1jYA9dqI~Nf^3I{=@uG0@VW6-m zSNEWkMwTk;$uE=CZB9kA2a-7w4VAZV+bC&z7hoonO0QvZ?{VUR^?4P~F*Jx{XTm9(v&tr&uI=`!O(v90>7vvt2KM)Qp*qD= zT-fhEHP>9bizm4cLU_=i*dxxD(*^S*3;a)_dwq*G!1^uFCIegn$ri>lJ0(aM*s_R! z0{+aqU<(6D!vqkoTm|ARiD_#+-Fqbc35_iUy>8HYk{N4wKH+@7+NT$Yj~G}ZmM?8H zft>rnLy>mEA-sd`0bYqb7jiRY`y!4m#`UcnSXgY=Jj1ch&344gVL?|_xz8^{Pbcx! zw44$W;DTE1paL_`v^ExEqpe%g=P!xrGye3Bc+qe1AP9-?Bwb@?hiOsQk-L!WGf4Ol z6bA6Yf&88}YZw9;GN{kMj@{`vc%u#LE*~C3@_ywoS--EIJu0A|@yTj;8i%B4lI5(E zn~MuFq}&7UiqD1frN47S1es{?9Sn0d9wN&%X7dWkEvbfsd{5Rh1AVrBvmTEa)eP(p zYW5dnfHIj%U7UH>yxNk$&5%D>?a2ry>HGY)`@sgSXRp=O-=Yyj1v%SSrH;_LVJtCy#!SX2+z;yBVRy45!V zVv@JcR9A^FB6uWV<4l<&@U{y38KwI}b8PXbJ@8ky7%PE>+c;gFcfK0v?JY>dMwfc| z$~o>?qgZIRMnkGC%YNGNoI%$6gH3K7mu>&&&EyT<)a0j#*8;phdp3w2!c`DtL*tP+ z_Qj$Il%C*uzV)0CqDun)#_$LF=Re$h(!b!Pe}ai(yj4W32faTAP9}zzM_s9_ zSGM7=2y}t^)d7;lPmvmppCS%lH&EunyA_0%GqeR>HE_zPWLYitP@iC?{s!CxJpja4 z&@2U=?KJ{H7TO6KkrPUs;Y3YThAt|Ky`@%!&n4%QT;L5*I$U#-v+rw!fw&LHj2pu_ zj}0r$#>-ia!tR_hC_*UnY^u7+dN4|@|HHanYEK(qh%ZY1J6rC*rHzgJ$D4jad~XyK zLavYuxeY2qG!a6C%DwZRySkQUW;)kTYo6I{M~r)$6#cnrOqwH0(L|_!8RMBrHT!=u z$aBi0PY0Y|;I`|1-VJoeaclYJ@AwYvk+5TogH1gJ1~heAcgO3#uW~krSUc4=Ui$vL zRN7qk(B;E$J-(a!03j7zaKEKDDlIa74}UF~8{~J=)D2lmTR*&BI%p&wQ)G9r$ztoJ z?>pn9uo>TK$x5CF`a*B$svi1w{ME5);&V0ALqm6(ew0wH(8vGM1(K9hHtpA*vpL_j zXs)+xGfX~zuwg|t)A+J~NcSmzz<&9;zw z^7Z(EW}O~%7n&;vE98;e86`HLG#ldh>$lbjzB2QmdM#$tMb)lhVtRA+K;8sq>(uLx z(8nw_O?!@PT6U|?j&Ji9hcHS~Z*f1$R!+kALO5nEUg(MEm5HD4boj|%F*j}KO(A9P zfBW0Fop~~Px5{!catWWG-8esGHkN&;@OjaJ3aEB$1KV1K2AGK&_mA+Y^c~ih4<$a? zweOhahtl&dD#t$DWGB_Sn@C1~Q0a<3%PW`B)^5ik}q{|n3%4JKE*46e7+I5vV z*kUcNhwx0!_0IFDs2;f!2nJ?I*M5M6;YkWx9QSgR@^U=tcX1H2={$Xpr1{D`hfe6# zjC@A{JG^0eB`YtN;b5xPKA)C@v&Z-c`g^O!ef-$2+I#k!zNUyLP{u9MeT$%X(r(u) zz>IMfq%`DwXO6ibh3)hK@sf7*V88? zE^Iep5~(EV^0d@Kgk@lCwbbcSs#;Q@XFHpq6RPfP?y|%2koMtzcdLhA<&RYzvYpj( zzhrykG7^s>bns8Jv+o&LwdFd0#6Rl#P+!o&-oE0Kd%ykG@TE%qw_mpVZ#r|pyk%=% zSaRo z)M|3qIREQG6EsKm@#>JubN0l2Y2FdPW2Xv?aS{`lO(f;(Y(%qLn;8N{DZn06WFiFH$a^8?L>PKy69Ntgj$l7UUMBF&A*L(4 z^ry)4awO*>dM9=A2|FG1qDT&a2;K!Xg%gc{;TxzZ!9-@PFhuPC`5&sHDW6UFmttEr zR`!tqI-w!uphaIKXV(~5j&2i{iBRd7(@&8X{XYb)!_*SMSWxDg3F5T>0No2-Lv?l} z`B!YlqbVWq<$MLKPqt1SR3&W6HhRFo^pNz^9LzSw8L4NcyUEIDRP?sv_kG8nW9u?g z)dVVh7fymks%g`kFVcsY)jbc34Rjlaj(^@^t#jdt>)K})S;`8DU$m|sJC$*`YPO|* zv&_|v-L#gnT^9r)G)y`aN|tqUO1{3sSougNKG!%bu<2M1;Qq@Vjk;f-x&10Tk-RP= z{opC$L#1```!9XfyI6GS;!X4kj~``%@&8(IK|cU^1xzw+L9?_duo1-6;o$hYaqM<9 zI||673#hN4-=Id2hin2!Mnaq?!P_&3`%U;B@cU+jDu~wt$}GrVA45$O?|IP584<|+ zKeW7n{t=|Q{>rS_u{Uz7K}ogAPPZP@y$_}KoL-wUIuv^8^4i2_cVBEhE2bqXgutrU zLnE!tIJN^oOH9CtjT3GN94|mmctwsgqHd$5>eYqaD*ne-XQwyF-CJaAAztJHqgS=( zHgs|#+lcwm@kpN+=?dqNUC=3NFvT-tvRWzDRh%|-wXA0`MW=^O!&HQA ze;M2}wmm&kN@klF03zYPg2%K|7oQXOLmKqBpCZrXfeh;(XtobQpsesyL`Q;~0j7G_ zfzjDp2>wn`yfFoYy!K#0F8aqkFx^fwt%&{$)Pnwj z2I@BIWdY87l1qAl2R1t9QTmjuZot20%iF#4hjONM2QM0VI6FCeY^v1t_dnW9@FZ-D zGv93-1H8lbK@Dc;@d9Mo0$(<7;tSwxtO4cjV6_qV07_%sEwefW?h`>?zL&-#dNVP( zVaup`B8yrASJ1_i8z`l95zmL=*k;Q{;Q_ecPgQ9X+d46j*>0_<&-zV#5s((wq=JW? z7j}To3T61)AQRoFaDZ>c(QH0ERZVHth@rP65d=~yTcDD%8ig5LxCn9xSa|pgH>C59 zfEWPuGF=6l<0iA8nX&4D#m>2W3plfohDSHy9x5HKtZ;MY=tgT3cf}qa`B8ZU=oISw zfy1{;#)alC+Q3aJ^U%P6&RV44k-sOH!B_JJGHffU00u+F&w!>-RhpC9G>r| z==g^i9N*1X1ECv~=tD|`I#^CC79C=d1Qc=Afl>ZPd&cM;Yi@C^e9+VA zlpOIM-~Z?m_%FwZfB&$55Ct#v259^>$hsY=!%o5tXP9=K;^yA2oEmQJpl=4{EmRsC zy5tu6<%!>R-6_X|a6>+Q9}|X{a$AjjoSampVQTX<$Ap<8v)G$U3_l|R{Kq5F;}0km zMSNM4iLgkpLH2?jF-uHtP9o4oI#n(Ky10TdJv-Q|&;V)J6cC?~Hcgfd`%D0ybbFj_ zHJSQ%Sa2O@Tj`({%dIP z--ejKf9V|#;XdWKJ}5&f;&lR$k+mn&v8GHFi9|Ku0$0o4f=^XJPL7fDN(yGruHlo8 zU5(CkR)4qctnI5`PaskurqilQQQG$w2)3G`@Kb>8gDT*tEux5>fU_Uu1N=8uReC~W zIS65RCm1ATVhW*ee!w`QSP1$!T|Xw!{DVuKLJI&?WYJCN6{LYH_;J*mpCa#o)K6n8 z3>srbUmt=!ls$<4p$9GU?a#gT=P~#%1dq&IP~(Y-%Eryew>EW3J_yXNL=doGnY6Oqa#Z%7;K!>)-Uf?S6!s-pR<|}f0)|)R&QQPD|9_o< zAdSfFj;b8%x`_g8r#(kELOb~^sN(o27UtyxAYbdhA>ZxDzD6T|A1$+^tykUAUubsLqmz#(A4*)OQPSk|L$75f2-^x{s*$cW^RzD zO>SUFI>S$Sr(t0)?e(b1?Nn*DW8$Z2>K33Xf4ewOCvLoG<6idYw9kgUl@FFLS3joF zl6F;BBsa+q#CaV3cv$=!6JlZDgW>$0Cs~QUxQ$qxu$93!pON$(nZ}mCt4~7g9Klys zIrpjW_LSvK_(#1=T9-)D&P2ts^BQ%z0~`;~uVX(N8g@?tRH6l*6mZUkTtKx1G}5la zfjl(4UkwGuxWc+%2FtjP>uJL>X)-tNCz~zE5+8YbCeFQl^f|$T&3#J9B03r4=j}|p z=k`_C#ot7fPyTJH%3oV7{}ze-Xa9uA0+Rbb4g_=xB9mf;6a)PeNT_Rd)uF$8yZry) zj#(q8t21CdD3_411&hU=RBP|=|4`iE>FK$~Q*~KR`hDBB!$w=R9=GjL^=`?ZZ@BbS zM0l2D1v)dkU>~8W&p4BG1F(6415`Q?&!a{Qw+$}2r6Tj69}->ll;#8O4pr21^F z^!TU;rs8#v>aM4Q7h&iQi@ME$KI*pd3g88703Z;QBp;t%^@M^7NzW9I;RS}SCK1+pHXmskT*3p(3JIDjh~j9A)0 z(r{u?C8q+yqsa2GkNxka3JpvJ#r4|D&5SL>s<%3+#z}*31KezvB&b{9UDd?3plL8DpB<$&swxKO z0Lg>|t_GN$-4-a6*u#tsr?4%O5p)|G0P{i-FvsnDRjw(>bbhS0xn2{-XL>wxqiYh= z&ldOZ;^3Njv64fK$<%GVBtx06;e7G6g%sutLt(U;r4I(uE{X`-q2GXft<7vM*wx?I zH@nH}iE4ye|5vtW@DsR)!oz0fI2PgIv+bW!$AX+a(|}K@5}x(otbsB!IY{M{aq!sm zw~-6|@j(~I#TMoNhIqh#GdTYB*HMXrr|3^O|{ZE1EmTnB8RRzOvUoWS22B8-)50MrK~z9S&01c6!-+Dy3N-s6ZDvD4Pz#cy{$Q*Yfe=0516+W*Suh%CcR*1L{=#bUDSQfQIDnqy zkdS~Nfo@(9`h&_6ZIL?u1i478M}U$MlC}I(M1Qqr^ye%8xypZTjQ^{SUw|AKI3Let zl$f*v=kzSXFoBho$jNR3dImS-pRCT`eTmI*)5wonnB~>sE&#{-TC~T}KeT3A!p&dT z#$Bj!!@OHt(&;ZgPhLaRe!zhuJ($JY9}GjAKK-5++4)^wDym64;>m!t+S@mcnFl7k zJEHyN`OR#hYXqefKdv7G8316KDti_5XtzPzZ8gA*$6Z~4brrX1TpXYu^kbg_w7F1FK; zZ^f(E+D%zx*$d!XAkoq^$n$Y}`A}8T4U0}rLX#iqilV&*j z$NktMCumX=+}oit)E|I2HUOsss-AKy1GXiG1Ec3lP}6>PsW3Vl`~eQT`SqvB;qbv? z>f#O%{~I4L_^;7SkJa(&mvKPvRn@=JGDAP z#tiy>Rf6vD4;B(JEcG15wWR59Zyl#rsNuE9S5xxQ_bcP&ylHB6Go8pMSL=cYrX1J& z6j3GFvU|1I*$r*T0nFbh)OgPutwi zy06f=uFuc#oN(IItEgyUVLw=g`7c-0)xGw`<1_9k4&T+x5t~L1wQ=*okP#4~gCwYK zK$H4gZBpl5?a8nGi*j!sk6<{Ikn*)UN*Zj({67@Q4J`AGfo-BU*&Qnm->-OBdl#l> z4WGM$COp32q_I^Ql= z;B)akX9xUWc6|HRJ9qdGPp|pkj&vSaNBuJ!_%ng|Gfn(!W-HcH4|JCtfYTnNRKEwd zV68Z0Mvt5bGU6#WrYZy>TI-Ps_Wjr$`@_!{_W`RGLj_Y$T?g z#gvCuqwSl3td8NoP}%<1EC%Fx;d{!r3riH9v^G+JBHq3WE}p;mD*fF#PX94VfuyO$ z*|XuiW5OW-AMDZaA;LWfPY7LcZN_YXotkhvYVBT3(zA+Ko=S@PO{SbST9V$hcPBH* z!>BgiGHW`4TYtH^sbWNZ#|M;q%d{wO3CL&xrpYR9NFjndQz91N2KHYlva$f`0^rb5 z3(j3=D*z5eEdU&%fDjIH(baJkI83Yp2ZL2UZZmZi>u45<1FYk{igi*Y0oGwLR=Xe|2i3i8}O5g2$cD zFlb`M#@q*yQwk0kyM#1|UlG*hV_iybWPg?;Bj5CUX55I#O7M6h4D6_XlDg^on^5}- zl9}Dnyi6J`lebXqp~WvRU4mG-{Kv;?V5+#|KZR{b4yEeheL4> z_X@6!Ji$GAgDY(9|0yDh#xw3w%D#lJ%9gIF4a$aldW{0XyuEGyl7q5smvZ00Tp3&* zEoWcc>!**bfLs|-rWxfrm6U7M9%hj}vVH>nU<2cEs?=-ShyLQOPc$Fc9<&V8d+x@% zS(U$yvoSAir=#=^6GUYCTu>;%`S?YyU(i#NbD3vkCt4#LuN{7D*>utOVe^*RaA;_D3-b^j4k%robNrhwOszPSMLIjJd*kg>sHMY6xyB7#u6hHFTTOL#qeIbbd`z*%a3K%_8Cvm- z*9j^GIB+9bX4IYRa?{?9NNXND)MJoRbtnuchN0g4J%an}qu62%(}u600lvE~_Lj=H z7CO|@8NIwwrqHStNS}Ba7-pNRqrBG^H99CW+n37UcsV^V$If5J#hc|ufPTe=D^Pc<6gYrVpwV_%k8n3hpv3-nY)8t3RdA@`^c1KlX<^XH}cXKM1_&Zd5mfZ$zJ z{#M61hdJoF#u|scR-GPuMqnPRuT8N%zmwqSH7D0(aw(Ng{1i;Ijog)}bX>;airX*H zKT3Xwyi2XX{Uz6KMXsjW`x5R!E)ogH_fu5{c?1lOI{mBOrpWb2e2LG+leAzUkA6z1r~Co{d=^W(|etRP(v| z>Jr-)!sh8i#ckO!4YcD=r+Cv+?>@&q^-Q0=^n)I*Z@8(z>Q0!f~Gc+C_k7HW^j~#>^+kXz3K{7<|EJwH~GJ&s(wWey-XDbwwOX_p8{u$Ky>Oz9*e%{G<`B^2Lv7 zOtQ50ef=!Iq+sXKhTiv_?GK)O`+(Xl^OSflLEGSc<`o~S6!(kS7U9(A20b!+3{+nU z98T)w5cdoDCck+5+TlLqbI-l|Qgj}NkIrueck6#`U zzmy#9N~2#F%3!1fF<)tq-S^K}SA<*Qq(==$%LfV|m((*ky}OI=8mI}{-lAk~`iBA2 zsg|3!6{PMqa*WeR$E=y=)(@t~Um!C6Uuf<> zai9Dv)PRA;DU^+T4;B=Tl!82PAt(fAFnB2nsAK0ft}r5ddTqMO^EMddjp|;g(D`t+ zy5jd6@rLc?Rh`Rs^^jpywk2*ze`4L6tIXIiM)~qAhgBp!U9wltCJbnT!^Xg2CjXmW zOFZ2rJZJiLKvtUX{Io2+W8hB5t=)T6Um#un!sz+uy(r3Ao-5^flQs$NlBFkC{e#?g z@NEd=bxe%gYh`nGOrdkl_vE$|&zIjiDQ`}o z64~geMJNI#!48il8F<;9;7~*PXqG^-=vSWFgqv_*kEeFZ;n5L7-0brH_gpap1;+aX zQs9W!^r~5Wil7R*&KT-FTQY#g$Ht_thpnqK$mH0U?g?`A&WB4imYNOw=79p8gzZed z?!Y3fY#cUop{LgHj+3kUp6aU5;mdbX`Yn6VHt(IKQ`B=cP4)@ia5M>lfLn$?7}y`!US#!fA~ zUh^GyeZ~_JPhw4rF5yB+rde=nqfmi!`=P5RvKhNMmSujh@Y*+Zi{wH)m!1+?722tpKoCCFaGyHS1lh`LTN&b^frNmVKFKy+zmBy*dOZm^jf^R z!?Zhw+UcRRpG^oasPQqtj%$USEk}-?kY~%`0`!hN9Sr zUitqyD~Gz>kM$y!9Y7N9{Wn?fQP!TxFP zZT>96dZYv~Y_KtPw|xKIXvy4ezG;waI?kE#zP{?gp5+{|$p`;xb`K!}M&j|fFsK-4 zT*oSryD8f<2d{$pMHYH<8_SfWosr^WcbQ`n<%?L0hpzU7wx?Aq+i~_MmByBte4;1L zqX-Am?>dom1o^_PK(hO30s01RLpw_u)(vDcn$a@9C12Xhed`ag-1TQSRG%#DlnsKA^|8KKJ71o(koS?6iN${}pV?Pd^vcn3 zWU7K&0&JgjZw@!_Ea`ZJMyVHWjowh}civD{%>;w6WPP>Rwx4r(S6JpLuhQt_2Ikl< zT5rba%%!}ImKUE)>^PRIJD)|bv9I*JRp_!fc)E~rx~Q8HOD*f)`3?%zlgb=v| zidd}=vC`%yK7go~O;a?oiX%06)oLHs3lIObD*1+Jty&ItD!^X7X9RYcd*sRZVbX+q zsr-w#$)Ki^RGv+U=BrnkwpU*-H?cpH&KId%Zp+?b^Y+xOGBi`CPE|)<@_nkq<(x*x zF=@l5uFs*q#oPLyj(mQV{@L&n1)d+RKQQ+7J;Xk}5X0CuM?Ofh6g;WL=TFOjz0X%0 z7q~W)6-(-CgRIeOX2@GPE}=|Mf=Rdf_XCw(+K*T=Z#>RCpSr!ga^7m{ce~Df_pOdX z=S71FuYK7~&rO*zgs7tEwxb{NN&?Y+@=&w>-No=IGDMk!~6bh!Q7r>=e(bkIWFE&3-r-eF|EH8JGGA5rTMwD z9({G;2jD<0?}O$?aG*=Wq zQ_Kv@&BYRL_cpJ@=jOU|$-Aq=O?%?_KSfY4R*aX0l7p}}b5gDeR|;nG5nL{8$=UMK zVM?f94d=ZirZVOVlM^mJZfl?;8b{qfz29LcK(^@Aur{t~-8U(=96O=g9J_%e3LodF zOkkQM-u(u=$Z;ZYLTkNp)qUf^(kVyjZm;sdw!rnSN!#A^`^l8?loDKAZ0h<7Jqz|Y z?QN?P`{5{f1WF(bx{pwz2Fxsvh;eCxER-%^N|30nu>%foTvZ%)r3^1OUw|>@!Vb2e zXLwq#Z!ig{=DX=y2VAC`F>4C^ZPv_0T?!Isr{L6~Q1`oPpoSz`2K>f`xooR=rldKDKJ#+0(SLm9m z&^tuIxu;RkUWAdEM!ncMG2R{F2t@`Z#lOi(xj(iGc4mt)&&Sb8hXj=No}eNceO+nB z{;G;)Ij*IG9bpTWcyoBBVp&$`l{Z0^_4tJ~e|J@ZA>+*FSsr%ggvMLG1Okvp#dHo~Cids?bbt&%>61Y^VQv@ChJqysn6&SOhfp3PkBVkdq&>!VRkNFPy6_@M1o*P9 zQpX=H@D#|+dPOZckSC6wKH>ZUg<{KwvT$oiSPp)GB*RiO!SxlUIgs#Cx38Dg_9`7Q zd_28%CN3lY6Heu@yNEE_;3N-AByhne4>l%g2r~Jm*OI#7Al_d78l~;*-j5PatfHN8 zI1~JP?eXy7f1O=eB%6?jS31-+>#6-L zo^hp1%Rvm-LTz6>lIadGMU-|*pl!Xmf!IfA{25LWa_bxhkCo#vgc}rjX8fxhjX81V z!Y0@hwO80qku|ugS~;eQLh89Bvfh!7=S>HMen~4B#o}O_CJo)8ay1+4kVg7Kh_GVu{&JgbUaMvBJH;N6Q9$0Cow-goZ#82bt7JJMKLgpQRg7yOx#W->@=+uM9xO zWK{Q#ShJV7DLD!XiE4A7s>*bgPIsK9=6UTCS z&ha;d?4?PHUaTH{?ZM>wdodys?#_GZ1Q zYV7)gpcim5Ah?t=T4JS${`S4ffevQIgT9zB;oiQ*Xgf@G>{dG*o*oMPY%@41?2kq} zqrxmtAQi!Wlm$xFXWeI0G`3qyyvZ~JR8EQ|~v%L!_I=(o0t9ov8j zNB!ETU1eY-Qr4yVnc9D?mwzU!*~2r(AobUpkaK#v;iQ|Qiwm>Kyo1C+t`s;M3nUc* z#Xx}*&V2x9vIDWOB#Wv@q}anxS&DWSdi*HcJkXo@cd8|?`sUmqX%JQYB;^}t&Y7PY z`)XPC=J8gqFDbhp-$jAV9^6R&NGbCyYNZZO2sea-Zbmdoj%Q5nRmSwydT=h%2^vay+&#d=*iCK0~J_=cXI zg34^gYZ8)Y%90UsFqO4A_Kx>ax76L`eX80Dc{4W=Lq+gDBoF`)Wb|-!EgXp63AYWkyLAz5xvf>>C z+tnEDg$bcb-6$OIL)vn1O?Cvi$$-A@j!Io_Qenz?Ao@Q2QN=*wCfy>RJom0c=6e(5 zy)uq%${=nu=z{YZ?n zD=}qO)pYUqyl`77EH$jXZj|HRJ((oa$MyC=yGMw9`#N69-?BV`nbTxbKwF|rd9D&D z?+(dAW80Fr zZa{VnO7V(^&Da*7{OLH#oT2;7xxU3w-OIgx0>Aksft@iYD5bvqj`n=a_fe4`Qyv)n zXi(>$;r&K(3So65@O;Swl+yBi)aGu3{bVFxhSLm4%(V)8tF^xr-4^~Dgc`9beK;fk zd%yMA3Un~(>c>U)91uJWb9vKmFX zrfVvhW)y@wHnZ%V>aCK);$J9y1s=~8&gN1CG3Cfmh8&=#*$7@h&N#U-pz|K38?`_v zs&#!u3|N`C&_`NZ1V0Y4xKe)5Z~J(~VC@Ef^%n~{n4oc$ni_6ka#s9>&1OQ0LEZ(@ zB{uagNHrKkKHO^@of>dh>8{*9xQJP_$&F6Auib7&6YgVo-d%*G+&K1(*1}1jkr^fV z*e3FkdiH#Rdch{Pk#49K6=|edUS7`15wC$7PE$S7KVAsG6E*|ZtrlJ7Kte=f-^XHirCk)$uKiUd zW&J&@k7DDPUeWsezFvR*emKsk7X?yKuQs-F zBkCxyaAZ=Wlr*Y2tuc#eNwzU|04{5h*zlMaf{n|TJ(`|2f7J6MPul#MZ z6}JGaDNaatM1zX#G4e-T#q!)!PU~es7GImqaDele$l=~`;VZgpBNUo2@OyLa6nb-k z7Ed9?V&Gel_Br~r=XkQ=9+%9W_knPD|EBWD#g; zIy7s{EU)IEu;0Tee>%SV3qhl;F5y=E`uCzM2|O~0TU5$x+(Wc0CJF`aYm^KE97B1M z)HrS{+}RMYaGzv3Awk_dgUt>tiz@I!ZX!||Q}+N*kTl5IW6DWAo}Lwp&zOvMQ|&?xTU#}hV2X$s?JnXq71BfhNQDquu}%H=t?yxW7rHBi1{$< z5KWF!75z&)8d5=JgZ+cr&o`88`qqBaS54E%Eb@BF^`~#qu0R}LcU&$Kr4EOWBf^D8 z`I-au$A}g!U^*UT)moi9CUvP&2Iy>~&57RH++wCe;1BIO)W8AlIwidFo01sqSgYb& zr$$fPihV^y0WWbdcUk$iNMV$849*0;Vn{Y4MvF(wb_ZgMjtNcvW5_ zxfg?`%UzI`$WtSG>U`<-b@_7OelbCWC$VZZAPVHAO~O%o`A0Zjp%YY5V)a}1)Eyi` znBRJB2MPOL{*-6?KnirDU9!o{dky!S=kXY#bJ&TJJq03nD6K{G8m_w_!FN>uvLH=( zoOA>>^eou-F-e9R!*|GVOR9{p!hEYA1M;XCZ^0WD;&pPAH&M$|k3XUiA?s|Mba;HH z$*h1ENN$13jk5(IJQSe%ZV)OHL2XHo!rw(#exRmC@)jhzzIJpsYLNe(R9-Ii-_lFmg5vVn&iaQBfAP^fOy zP)L@9%OLj&*K(9HnX%%;)LzTM0uuTvn>JzbG_Vl$gp@bXqgJknJ6Doy`9QABEZX?J z<`Iu)Mt8|8Gw8B}aOgS$-nSeE;nC8=(HINZoVSm!&XLS|IH=-caFV4TD+1dXv!Ss1 zp@!yc{@zrzzD0A1xl_j-M|egZXO(?z?Ztk417YQ;@GTg-J!@df0k(aiAWUqlgkl52 zqxpXMZj22iiHWC=L1mGi2fNRyl^1QBM(vEh@;iSQfsD~x(c{ln4I>^{CtX7A7Cfq7J7q&(m!9EX4H6B}aY=C|uzU6p{PIPi zea9^6GW%N_4ZA!vtL{1e$Tu+MRM6JXPFozO2CduoZWe{XB_$3J*SioB$f8TBY*%4! zY)qdh?>J%(%`*V0U&+GT*zK#%QQP@qz}*h~{6^GH`jm~M7H4g0qJNuQ2t5?P_=ABaxkN)zV7d56h^@~DDBp!uy(yOwjS_r&8d zL-JtbN`)T-At?R8+m^H(roBC%Wcug%PR0?!3HeadrRNFOo7f}>6&@v~)9S@xJ8&RHID^r(cD@6s9iVz$uMA_Co0C3I+qL_0 zll;S4UR1X?_l($mv{&#g#-Bdiw*QwsNACuNw@~TfTyH@#l?fUaVfTPBL-sh8zaJVw zvm>F#4$fdW2@Fwe(BSW(4&z1`LzIz~&~R=77tNiI7G!cUy0Z|yLW$g2ezI2V3*w#- z7zYGn`3{0CzoG}!(#|NV(?SHLJT|gcmq=$kL~ZwANb*fNK9|^EQV1SbUrlR{y6M^k z^zI(23rozOIgsn`m(>a^#h=b{bq3-&BIE~M;!$uiFy>o3-uXZ%152|Q4;h>I+MM)O z9i1df2uj9-tl8zuK*HG;)HnV4vxG9lMhAvb{&Yp9KX!9Dfnb>OoYSWBG{^X1rw8ap z#DJ*?_65dJ0@s?OSQ%1}7WPmjggc2X)B1M+jde?(cA_>xW_ZLQBB^89nmrh<<`+#k z{kk^hay%#+*%lXUtsm4aaE}TEB)t zMdsQ<7BDSP_JN9th5~-ST$YWc$0fU+$t(U`^g> zlDOdIVi2wgDT+GG*MZ-q5uuB*#)H}*iX7>xbuMTuuphC1+P{qxUOe@yb3CHIGF>-449cc zi3dgQqz#WmP{LrM*VV8ubhkpa`Q0&rcg7#>3nMz8+-qfYNY-@sV;!5A(=H~)n=*nW zMZD2lQ3v>1@NuRYuw(`Onvn6LFw};g4<0#iWvlQ&H)$8M?fCw^CnZ}-i^hiVoX(qX zGixQizuKGfs+=DEffW%F(c@(lwk3MVb)u3ltZ|tT50sLugsXYNZrslL zouu8B{U8WCVd2)M+tzwxRmEHP$DLw-BA_F*s|tqCeQe9b`+JY*^ruXy-n)AQawUv- zaL)@0&}B}7=k;j5He8zH#y#R5P=Ak^LPo;p%A$|rL(8!7c}&GKss@;B{KBoa8yW=P z2(|jIDbXkACm)I~?fd7>IQhDej{J7X7s=b=OBDb`0^GECSGLgH5mc~U#lZd>@Cijk zvyjVl99IG%(6|2~OMQ)4^P>&qXOi9^LF+_rMTujHNgCp##4nvEV+NaSa?X#Fw(-Q}1CyRvjey%Fm!esFry-;pz( zjyasVgT*|RC9c`va`P*#r9#On6#v`P4d=A2GUe<0PvwP|P^=}Sm(H838iki#`dB2R zZukAvg|VLUH$@c{6$!P_w%vmJ)bXNJ*`DjLA2XI-@&0QAVtuct#&2-jL=7}6*f07S zYlj_mM@bX5O26@kYm4`Clb5;t>VG zp+x5&x7TSqMdSrHUTQkz$9deu!gm+ocHj_UPo8evHpR@nbvXsn*J?*m-&s6Y)Z6yy z)03oI&kLOY2YGJ-)x^53iw02<5h8-1AjA$)5n=4fM~BuGes z2ndKQMPVrmBGM{^NRKp$^fn7o>4T646{L|Wl~9sO`F_v7dyISDxc9ts-`o4{d*2<3 zLlUS|{muO6{Jw9_xnV6wcW?QFT;l=WLsCJ7d~KMLko0&4>jM1XE&fap7Z8P?fTh}A zi%-$z->3y*{&&D!KB3EBQKW-EjSUOX8sHdFbswdbYhO{zl~5Nph327Dz}kg4i-qx- z^UV{gw)Oi9u+3frUdJ@fRSDtiXq5w^6lc?UuIBeQd&liA@<+Z;D}7Ct?ZTh)f8vX) zLL);c0$b@pc`2E-9e>6qRzRw4!=x)D?I3P&5N7#+k=!hBi!HBpx6%lYC}jK=v2n_y zyF9}XU{6iPG!T+X0%h*vx2HN+@@gvx@)SAO>U=RSB*DLs5`;@Zi*owmH)c~ww9*)M z%P}jx6-l859@MOsT*gQXYw6?k=Qo^aOfS8jZ$TUVCLg4qxZ>!(vNFN5D`po(moG|Y z!;X~xP|#^DmbU^sN)wbxdtoT4yq?}Vq=u%$GO>AEyHX=>5e(>L{?dS|%2W@x*tKYQ z(K|nQd`Pnk+bQ`{HfC-dO1WlYay{z(t@WURH@*TyR01#xo z%u{BJG!9w7E=(u1O~lvsrwMG9^%hMJSd$Lj7!g}n<_XmD!vj)hi`jVrv&Uyrdo|kT z4RV^}&)xqwDA`wZWdta>!CG{(c9VI6NMm)EiaW4ephWhDx}AeQDrR=7tbe8bU&~o1j=) zNpCY%)5Amc%S*8*dCsW*@-AP$qaOA_7i;;Np9I~=ZamQ^?;Y*H^PoV>*GgtGKv^}c zfKGP+myjaQ;O%6%NSo!nNsiU|pY?cjT0y@60i~%1imfvk*4t)Z`KFF8;;eC?#=7P2 zFM&f7FUq}}Qt!D89+YmcIlK9L=jx(Y3bPtAGPtZd{|vd`PWlJX%bgIDud^$Bn~4cH zu@ke;K4y<3O5&n1-<+`;D4uD+VeA`w>Ask`^^KXV@2qM2sgGYBV};FH@%{xK?yttJ z%eQ$PRQO86R)7;^eb-~)@h79e0mvg>(^QaMkZl_n5U^g6%BDF*iv@xKraJ0E4+^5| z*wX2AVwCAJ@s$PMM{le@mE^i(IO(YR(xXS#E9|mNj`v#L%k+mwz&Qx>6Q_72P+v7u z8F7mRed#7m`lmw3a|3HW$3*0Q(kFKo4fEcaPbjqaS}wSqxfb0yIn$bCi%30-%LvwZ`VHvsy8tpwJ`T z*qQ<_d)eNAn}TC#8nvSOP4Q>ifv&>~$fxfV(U+>$Mlzp+#S9+!N)Y}I(&PWikEAW- zfsm)cleZmjrxC5(0}(Z<8ny&rz(F)G5Pn8=?gvtmK}DBDYBz^x!PuixQao;PFk6y% zD^stNncf~OdB=IVr?-kgdRqeS0$(gT5Ew*#=8D4#qYZYXRj$f*(RnL|g->Ft=V8dsp z>+2}+0pmIQe~sN>f}1msSJ&KH|JM9TwdbHtw{Nx2QU5$o&%i*RTYEkmPl~Q?zOi;M z0gozBM9aaGrm|iQsUi8;isi6l6TjTBnG_F53493-obg52nq1US64xlyK8ETx&r;%7 z!qfzw{wPcTt(AL$-|S#s`nn2)xVQ70s@LCayNWb%6xq6;X)W8u zaFi5zZK!`^X76fEqfwP@^5L1bN@Doc(a0 z_#qTmdx(Z}18HUSiF#yxrsnGIYeaHCm(2}li z>7L)a1iK#wZr|QH|CV&foG@!71`OolDybcsEut%7_AqIZ4_3AoLb_5HF6L;o;F`T) zq6FbhW&Hs-2~o?SKl)@5Qx^|Eyq(*Ar)z7_tM_O8-hr0sgoqfK8q}}~I9ZfF?P0qc zJPYPVG)-oUPPOruSgA&cXqwLvb{kIvNh;3DKY=DTvDTys*dGXIc>BdjpTG}-P+)AT5Qna*>7GrYL$QOUE=djvA)1T(% zimoXuW%%pxjgkk9G3yP(o{9UhaPhVDeO>N32}dfM7>+J)gXkkJ{_}H-`UVc@I1Vu0 zDUNdEfDf`q*iVuqN7a!K!>7ZpzG3|3N>4V@CtLr zw7wWN<{eJk?sn*TmF3O5+2@Lj$`z#A6FY%ziIe~NgYYXYN#6qf1Q$wZlf+@ZKDHk= zn~7q&7`8(CD7AHaO>9P}gsh6*a5|a39}N?qI5X^3RS$niDz}LF%nh64?j^J}&MBkP z&j%}fj{-xzwW{g~*vc5xV37Ufite@ECV_Qho0b1YNOzBHTqdF%>X>)KVHAui{tD6krm+nM^S0Uz*@(X5Kg_pr&+pHt)_O0t}d zJr;G9)a@CD@$ZqjH9mvV31zN%Q#LgTd7K*dbKz>kr4# z(6j~oSSSWkG2_Sal`4eShHR?3%fFwjeto9Xl%&dI>lX$K1y{#}{k-Mjkk*(DskVwB zKP@kUL{aQAVj~GPG;bxUu-a(~(|mCI72W(;%)l_#W?kStUg0C=0fY)DF0loig_vaJ z_|rN4sBM$<3mS&)F5BN}+IKR}!PRqs6wzmDuxLfcb;> zllrQ5foCpfjDWHtn%Ve5^?Ab9rbn1_H&X%TidnUsaiym8U;-n-vAKRwg zM(YRsfE_Z~#a|F|!m(l2wN`wjXVXQw2j1lLxLOO>L1b;>m zk37c8PJCM8|10Scza$OQblH!B%{#XY7EU5a@cQy`_B`e)WagU9W{+dm<@0A+p!o*K z{1{N*F)?J0ihErMBtFk!T-ZxBnwdA`c>6<@$X~X&~SY&fR|2iV}r|+ zJX={N_)t%sU2*+~%<4=-LN_?cV*#WEvQQ0yoMJh1JLt1+fhoYkMlMOndgG;phK6%d z$P@YVX5V;^%fN;H!TJhhDm|=1NQG<&?;XlVC?x z^@Lb&!xz}X>s~xP5X9W;FI(j&=#PK#;M4C6E%M@Df;AVfjg$HAdArm(TH)Ml`^aZN zGEzi??IsoX!9q+!TWZ)@u32@XbwuPAGyT1%d+zxR-xhk9Bw4+7y`uRNM%=OPwY99$ z!Sc6D9G5&LY1 zxU6^M4ziV+Kp}>JngO;ph2de3y!@OQ`Qnc^5fxC}WLj^JwPCZ$sY;uJ8F`;-LJE{7 zxj*St9CSTYku4w7UxoTdsK&JsuV=2N#y|_F*J*w25bxxg*2>*lXp6H-Y7+d&K5$;# z>7eo~)87U>8HJz&T>E6j~!Y@ z&j2lRLZc-7rAgXP%~_k`r#mJr$Bju|B`K|5TT-nZcGg`eMqi9Kt2;^*&eO9!`db3e z7mSHWif#bk!s3Q_(3#uHzY2wQTs&E zg2aHRZFB+-EJd1EKdh%AQ^Yk$s2yKy@P#4`5BBw~q1=hzBFM&++!;37%cX z`H@IyEA27VK#t*;rC}>FWP34f)JUvLo5S1!+chM42^)trN_gGVMkiM5d5ftHRP_M+ zrJrjH0&2=7;~qU`Mjy@7W)>x1eA9e*?hO!ims$|;wR}-c=2~=EOht{k6LE!jyN<&+ zjg$kd+Okndd6U-*>%DGm`Ww(JATgxv?Na45<;LS})jjiz4ZH2%1gT!D?RoR<8rk_N zTw6x)bhb$~ZV9ZFn;c33{bB(KOYbG9+9wb-nKV@aPsv{YA5$%FKEA%ifDX zwWJdM>4XHi#OuYk^#u0>9sW4uP*F!E`in?vz#7#2K2B2L2MW=Rh7vg6gGeSg z59jG4$!#4!9j5c4(b!2D88dyu+l(>ZSm}zE^Yk%uMD?3TaFQfZmK^0FPc00Jh_K;{ z8JxErO;r}H4>fG+^Mf;6*EioPGp-Yn^7-%pA3pCUU)>}M#aE~rf1}UDccqNbg1xK=F0aG5G z%o90wc_)UbB*Et*<`&=ken#isd-(NKj%%tDSPZBkU-%`EQlTVIlLLkI(ViIGRN=ci zY%UT`I>}t1hQnZV@+hU$iK$`7ju@Ny=!nc7V^9U*lAiQ=BU1KNT!~>Py@GUq}X6+ z5@F~3`d41vlc363C&HES7@{Cavr`L*fr2v;c;wPP9vZ4BG$H_J^dH(o+p&h$cpZeL*JZ41vr z9rtxzthdIjA#o(Jlp-pnfVwMyji2}c>|qxwLqbLQ6P;o<0dqjTec);axmCwE!DjVP zfZ#l$)XI;u(d^NYS0pbBIK;T&*sj>DQvJdU_WVSP#LX)#QKB zqLd`dZ^=63PF60vNSncn&q-eriSdxsg{KOQ1^Iwd#!v%z{lg0WW4`z2 zS)hH@^?SsLDR|AWb{nM8IO9QVB*ub5rilf8&G_-zKfN?wlPmuCmhL8y1yB1=uI>y{ z-Z$stnDNs{KSgv5*mW074^WY!Ea76_PN=~a=zoRUtfh?cR@PcHzJTE(PHW&N@HNY+ zix?LiI=_t@oj5#m%tpPt&e&v|jY&9$63v8l)lP$7zaypgEnhl!1KbH+3 zBgFznXhL_G+fSFyf)Dd&2t_kdtaYgEut8ibMu$5^Rn1OHu5b_ktYBZEq@H}V4vveh zvttSj+NOw#Orz#G(OpG+A(%Op3x~~S^1WGwA4&PjY#)v3$QJLfQmOOO0 z(MA`E%}~F6z`F!7N%G-%>^pNL$!7yST<3e%V*8{9ZA(AJ;=g6CD1J4PzE;AuVk6I; zW6sMXgr8=_wJaw&-^>iZahLlI^nE$hi|u)}(yjYE?k5=cZK~Ut^1ZtDvQ1knLX=IC zW%J_jjACXytKS2T87ew$^0 zok(jkx1qo>#W#l5qUX6(F>O90{ON44Y5SUHmgeroomSiO1}#vwBT^L4n)%KBrN2r8 zosJPAb^}x$6cV~lt71=j&W{s817GolcYq_404V~P@8yvmmNpQ?1!begn5R5#hDgX7H6TJxQk#accR~h;ev!C2=%VJklgoVs3zyqHVO9l-C@qogzxy z(9+j+8e*cFVEO6tH`q3D{ni1yH{YJUe{w54F{Yw(a%D_@&Asr#RM&fB{`t4;S17`E z5;CT)B1vu}Ck<^!!EMg1mzP;lWlO!7jLmLAp_qpfSEb-Sln^yWs zdU-GBL-+KnJK{m8k+m_psWc$;HN)!%Vb2@9-7-IPbex z9a&LBh*IedizWfwQSK~s8f%r6tPcn>zqz9Q3vGqeKmxF>GI*uW?>l-=$K+H z(kAtWDH?beX+@p4s6z|$l9%w-qkD!WzSrw^D_yOyEhH`U<1OW(QCWk2tQFm4SHWj_ zKGIWCG-fWnWUliuzsS>HpY-@mB;_}pglWPvF?`=-Mf?@dFpX{aLZe}Ff328wqar;CI837aWNaf}ooOj9eA&8xgl${jZBmi6?QrJM z^ItnB%&hGiU3tbjmooANf%@d057%icm3k>9GuV=G$xOo!LNZTZ4s3RSaNy-|@?#@Ouzt+7jwpw-8_jdi~yG&=D zNNa$5M#$!XX?{jI^9RKjLOn@vUR`p${!%m=tfV+>B^xd=hZ?t?l&Fv~#BrSipoP~)R2L8e z@F##<>j0I4hi$YikQYvnf(L`7+>(LF@TpcxoDi<$sWXqDN99~_1%SP6>WI_dfkw#d zrNl|v1Qk7iZF$7FAfw61lQndpngL$}HVyLZlhO`DaOt3mugq_R?;tkLmZ^Xntc85! zWGUElG1(x#ik6wTW|9#IsBz${U{JsE#YQ!iPw-b@2t#q8kkI`O{VKERr~os;7gT+g z8CMP#Da zg49#tH`Tnt`{T|6_a=}QN|W@uI2x(c1}1Iud|Mvj>qNkw0y85@Wy5X**6Vu3*TDdJ zP?z;HH@^-Oj5U^A=;8x+&~VasTtpj9I!ZT(JpTptl-qe zpB~Q$+6`u#&<76j0Ok2 zP?{4OqMsFZ;e42Nf|qHUHi48ydmM^sKrK*dTr+J2GYsvOd0|0vu8j@$3w&08ZZD8{ z>xVpsW8=jI6@{e5s^$&2W?fgE%cIe(5LKHV${6Cx5s#<>+yT~jM7<+(Qv>$S8l zr9yr_uA4o7`9tBzA*F8(q7y1^F#n2`o|wIukvi3S87yI@k$6(&I1(eT^`J4hZf{K! zttp2Q;}6+h0jhza#X|!fkB)m*$iLc}b+*z?9PYkT{0iKQo8S=YhXKIaE7~3fCwn3t z*haP%gWTp3dc#T`*-lb2yb)}=TEqs(P(LA^R! zX{xN%qbYv+8({f(`+_GGVW(;;YRc*=Zs`tgbA21466oZ)w|l~{s>?LV>&45%CDpN? zdzNb|eg{i66jT2RDatmt6kyxs*T6b1)ZdH-%B*Ms+Eo^mNY)yr#be|~i73!|Pgrp= z(36H(&88+)Z5?!&33cOXce3>C3cQZL^P-+gOEPGAsx<000gmRmLWDMvg=n{t^jT(j zFJBi5>`3OTTj?rAnE`pcUB+3fRSy^e#sO{3icT;TLpCF8j@7I`VN0z@o?#j3*jFZM z)y#{2*0O7@JeM!JCxqXSs%9qAWEY^uQDDp{`0@(}syZRD8`Ss&d>@T#u*=4I20e9C zs1sjo^@bu|E(BIzyT6W){=&4anqY;5+i-#UIaOfVq21Nd;8GsqSmiStvOn*EtXPyCxcH zj{jEm9iqfrN0Fx!QPp%$dh*yM&?l`wYw72r?P2l8lrA4{07G zx=Sh_N7*jr6sntHIlEip7l7&qpmerjtv) zj9GW?)jS-@+QnRnE=3f@QdDsT+YQTeN;Gn^`2LjCf`5$>AkcTjv=kA zK0&qZIgm5Jg-xlv^%bWD>|c8iOHZqP-xJdhrD2^oY5)mr2iy;H~*;XV7+q=`OYpdPm&ocygKRR=ceQN+0YnJtf&i zY!d>KG!6f>mpc5GWz6(J=BJzsF6YNxZoMleEdz}zAfn0VpdQDa%%epD z#!HW0vzJ=cHz1>;%-HX>6mxusl%H!&)g|{RJu7 ztj4U~7VVSCJzMyybsGRkuRSoT!(3tTDoc_?KG4lHJ~=n$=#*KtL)~v1_ebi37;^pq z-j}I^4#*aNVwbTvMGe|5Q~iO?Jxp^*EF1qL#l4%gY^F|eC4et{_THX~I5_QO3NP3`vjfR{Jy}!a- z3~t>P?geQO?OKr(kdwZFh7|#HdM*#VL4$z{>t;T=bk~YxOz&oXfRR|(XWra7P(Ky@ z4O-3&>8y~gT)Y2sg`R7hcz#x{=ks$p3xz@8+@sC4?L<^G8*rLtrN-@qMTX{6#_eq=ncj&L>ETr;nD7>DD?Ux^{5p9ps=R zdquxsX!qR8S=Ta&aPhd{(v=wIr}89oYBew6rAwyFshoh1jw*iNEN4?b_yYpI7k~DR zJ)4ay98#KQ?;<^q!!)&VS2ti2gQ0$X)Eku98kscSMe2$88>hOD_}$4I)AGb$06bnE zKv{A~N%4P3g*@L7TOvIEmPLXZT7ZzXVq3ve!_3~AgjBPq^tLt4cpbT(vKh2uOu-l0%PumFA0#N#uVpVZ-iR z*(S=;AB5>V-s;y%-@y4X53H-(5DubTsV`LCi$A349{M*61zz9Zg8?{X?s|0cKI&JE z=vR>@FXS+oZMu#td#1?>>DKGpmYjTlKEmwDv98aT8jF?ht9jz8dqCm%V!+E%1)kzb z&R`n0Nl!@{U5RAYpIY0R4Z?@eMAiz1?Jsb~K;t=&XXAjw&)H(XEGQ-3H7H#)q%cZ> ze@mY1gv)@PoM21XeJOTe^V3^9Zcs%*p3MTa^mD;$QBQcF(Z&*F<~%^?39J`o#z`Z9 z1X(nS>%&7gnLVXK_W+B`>J!G_Wa~o?5#LhL)oIuRV6}g4mN2kF@)P_NKa$X$Yj9_f z5RbnIZQ#nCiV75`KxGUPmYpUAV^c;&L9p_u7vy+VfsxWcDH3Wj`*GDI4UTK5{OW}^ zwF(vMB2J2}E#}`m(_QD}``}#wsGqa^x|R^I$`N_D=P&o1Oh4_qXM7jwB@J7A0apeL z_FTTamf|y|WrvK2g7`Nm{`9MSb+Zg@$pW@yq*b_JY(}5h%jCE`^;!vh1m6Zo!^$jc zz&|+O6Ml4jU{pG@HNa~r-y-&VZmdM4IHN^J&Bt+N#h^u1+N9F6=R6x$q)@GdB)ewVOPQ59v^$Xz~oH^W8!MOj1vq_SCq{~cKHo05##Nf~%)ikAAiT1bB z;z}Uote`gPxwTmwyeknd^00@B(Eyc1&%v#Op@U-{o4C^J{jCcApT7+xTO6BQ9G13u z@ZB|6&uikS9|SHh!DjOZE2Z?0%rz~`Y8Z>+;tR#du}!b)Zg0LD9O^)FtK6O)V7mEk zDyi?y!$kJy$K8*secd~ox;S2O(HWwRr%o>Xc538s!ui`;r5Wbj8cyhXOWu(w1H(Ym zC;Y8BNs`cnBaHV+f}Lw_I#>l?EzHg)ycm00Xhb-2Wx?E2{La~s4L$h_%fa7AjbpYtXEyyCkE$I~v`!*q60;ZH$@vHP*1F}F)nosisi`SiO& zRZhmAOO6j7KY*=0w0j|_uC`_L+V!@_J1oP__hDM@SDVi)PbI5HJ{50ps5DxBol8}X zX?k;|?olt-!#Ve$y621EuDiKa8SmTr=k1fr9S&c*e(KMhfzeytd;{EWoILRXF8(Tx zyK>seUyHH?y)kC(;uhsjlG*GKe<_z6=j=IbAG9Vc@5$*iduwVlm%UG2pA8~{XHz-J z34IhzhUCvL_OJ36!9!YkZYjNg8c=0>avg?xH@B}jc(%XU{@dSA&*Z#)yMeu{_v@Rnn7=E{|1s$Yn``F{s3V9G?-5jlL7W2%fswC0m|eI zH5TDd!Ct50T1tsbl7wxEYXCgv*le$KN=n66vrm2jKc@VKCj`J#`BbTl-2a(CIavVw z_UFl0A=r_N9tC-eq#G3YFa`CZ0m|-4on8S-#Wg!HBk}|v-G>WvnU+624vH8Y2aOF~ zP^A?Y-Ngj<5NcG(rQ6;$Z06q>lLWf^ zTnYMZxMeFfNG#vY`8DFgJ~-_vQ#YdMx&J9LbC>j-^=%R9&7~|cNhN@{ga>P7izoG` z8a&eCf|+X~#P%0u-eMVB#mk?YeH^{)w87hRTXuWSv)RB;5`#*ahVIE_iSI!$pMD6q zDd&0HnXAx;!%lWmKJ#`-FMc7NR~FM2Mqt7M3NRWqUI;A_bWx9)J!QcoYaL1 zs6~MY+;Q^K^|hRN6J%udQc5rw1QhT*C`AbKeZ;4KG9L>$-F;nvIaxM?HLzH+jk`@?t#neEzt`o&*sohXNl3*|>(7l~AO8#YPEg90vOa2;~;g zTUB)>e9V9Za{(y#ZwTyZ0Cunz;|(|nU9OHirFhHiuysNKNx&A&c)3aG{gkp?&{sQ% zK-t40F~YLe%I50EDLs4^0+ogs|ADFKBNs2PQjKv3^kU5YlAqS z0?NkuBHArNFj-e;a4ofX@EKDB;Ad>`4jBbob$O~EDrNPKyo;!v=!qm91;XbNS|e#- zT*^eq6EnyyYlq%_-Q=@Ywqma4sBs`E9;Bq6SQ_lM7x9WRn6Bu~s;PRw={G63N zHnf4J!ziQnVD=CFa1)TTZyf+>1S|9Z@^qQ&dwZKRNpH*9a~QS_ZnL}47%^X<^i(G`|0IW`y!+7!&m@vl z zSN@aS1uLPBFV+IwO*mK%R)&DVipH0wNk;(w4&MkAYMAkXU)czjV9ZY({MU#wT+6VZ z1rVME`zoZ+1Z4S>TJj5X-9;+;rtqCC13u}`756&;)F}gGDIfeIz)V$CaGl{6{T+xQ zX4HYY(qOQ(8bMTewL@|v7crWk!KptjK4Oj;K>d6HsgLgm-t=vvo&5rpGZRmMbnIP= zh%3Og4Iwf3K!CUs@r877;jc1td~t@QbN*?5aHcFd7vdf2y0{(=ZHTI zbvo3rxSTZLJFE@f58y_Tbf%O0Q>59iAH2@)vhvC83()qkOEiT4oFi9cX5roJ%C<=oFggQi z(%bMKl~CMlM{~GPJ-~*DUI_6qE&Vi`dY71p*-0Won+30uHkA%Cg2!EI-vTAP*YWMy zc?@T>0qbqY%Ly|;fD(!=>7k+POiAThiTO+rhQu_8eh`vuHo#T!--yKLa4o(R1D!5& zVF}~W+i>4*V2^c)Kw}f&f4L|A^w)GEG2#xkQ+oRfebVmAvpH=Ol)o*ySW|g1|Ey4u zk86W?3qk|A%g{0qt6uq={h{DX0p~-~VD3e9CrI#ZsId{DVg~G!xDaoZxnXN~Ix=cM zrQD{SJ|d4@NqkKctPz`IWIou_!|bPgu%yww6!Ar-9!P#l$Ct1txRXM3A7AXvF!F>U zV0EO1($r*miTvkRw)~Eqn+y3xuqW68SDR45DZ+O;u6$Rf41tuIGe*?@He8F1nUG7- z@fMyLSc@<5#ej6>`CKjKLV)tAdnZpFII}|Wi|+6UrrR+b zVJ^Le_)daA7F0qhtveyp8^re2_n6eViJerf+_S5|Vi2$I%IUVE`iR(Cm^ES-Td@Pv ziwKv7*AnAqIagCXoZ9P7b1Y3nOi zp~mwo4Vxhqr(v!}OOTkGpuYgkf^yLC26L|+ox#SYboDXT!l;nAdSr6u+O*T2kHRB8 z*xo|HsNi5?fWqu2&||Hg2DkA`+ZvMlvvCb7dW;Ph)RE199uYsE{{pT~LiaNV1CMm$ z;7C{?l+J)XTzUye+QA?S2WC^5Y|9RP-`kP;CxLZY`xEgNiz%gT@qExE+?@{>@WrbU zO0<<7*gITo6$G;*l5ustU_I{%2u=bU$z8-Phy225XJmF#jC6W%Busn9mp>H0Sc-+% z;%un|_3;7K>KLB5N|cN`iqC^LdCX7GVyNta>CKZWjXnCy;hFxEC2(b0b|2pJgV2BZ zV7yvJz&#&MQt_W>Ihz`rmS)AwJ)j`pdPwQ3Smp|h$-=k<$S=rSS}VUo@ne;9~m7vbNbF!G()Qzs5HF4O#fFIgI~K4EZ1Z$Uil{eEYABFaLeFf&W#% zQ~XW_M=EeDUoa`6ThPp206=D00eM}|e+oZ{*e2TuJ-6+FCJuB#-@c0l1R4IbXSd@2 ztgdth!1VmLIII6v$NS%>`~1^J|Nn>mI{xu9`cE7Ee{o-efBZ=O`&Ijo`R<=K`v2lu zZ~upn{=Yb<@&EAAfBnCs&od`iKAoB79sV#tdyD(0*^u0p>FIZ$*Iem&^;Wz3$&69da*EKeF+-SnwwLp=-|jZ`W5XcyXA1x)@pyRg6rIAXMo!B(^FK zz0iD57|kQP5503o0~sS&@17RI+{e^>@S4DRI}7hJ=e0aEI0Dy1)4zFmT^cC?)=efLr?x^KhPJE{-o+IPns-wQ=O-{iPu(_i*y4A-VSU;F1R`}eoPX1HyhJg)0> z&@@FcsB$2vs`hf##~BPq)-OIZ_xE?#Z~fga z_>S}KEQ1{mhi;!3USG9*FXvuByV)S6c2CyM&RZEfUuYyA+;;U(AFT=}2V>LD{`?SL z{urQum1Hc?5(GksjY4I76C>XRfj}LEu}e0(kNJqf#B3@@WbJu4!`L)&T+eIrV_iy9 za+s->NAaETbJBZ3*-6v`fvbE&0}G(NjGe>!At}D@klK0uleDd3>_z=1Rh_=EGH(2f zEoL7diXXjg?y7hH^@RJL)HOuc;5!Qs&$cMp^&iQl%{DzHk*ee3(726an@#Oo+}l(} zmOWY?Oj5t=_AtKn^p3xr_P=T9$xbX9adi*m>~L;58F=K*CbJEj4(j%W>y#s;x0BE% z(x-EOZ_)A}MJ)fH-c!KRK=ADjeE>BW>?yJbKM1q0Gq>I+j_>*DGygvwt2TG#cPQ_( z#wpTu0hC!LJV}W7LHHSlDg5{sQ)&P2jPB2{HDGyemHZyXFJ5%>!98LWr1dfO$PUJb zB`8sBk=vRQ9+K~lMojo-zKi_gQQvVbDBte}%f+~kI(CI0CKhIoXq`8)yED?a zNf_rRzHr>Y%4gxRNBy_YYkuk$m>u0z$uTnvIg=?>?C`*G=*akyWPhLxvI-Rw<;wge6j-T=gmt-9 z05G+oRrt+POvl!@^S^^4*O-0&HR#*nbH4aeUp6MO=|BUA2gx9t52FmXYjqwfd5hTv zS$F5=6{I4<#?+ZASCgs@{ue@ni+w6WotHLezdXG**1!#ZwL$!jrabaJO>KDmWXZC+ zQjW#Z@skFw$NXR9>8Gx;ILvWKa=mS|kDPg%wDpox0Az-6m4~Z!6SlE5|3a1>aJ5{= zb%`Q}PWhHfT!j4FlQ~BzHVEQ>l7G#D*?;5Qo=cagu){|%_fC6U{FrFLI@Ng%R8 z(W@C;%@K7!EmY?Lrl`T1?@PPpwIG2!X&wHQuQI(q1k(THx?!e#l6Vl#pHv!VFAZlYE>0#r%lgBtrn)bZR6!~?Zv!F#8E#FX5KIj$hxeoD58F?|9|M&yE~jhbTCG`< zh(PyhTD%T5>6@q9C0Hw69KY#X(y`(js;MQl&Eo+IGUxxIE|DaI(h>MV?l7f~D}5yg zxRei{h^>a(Zj{pnzAo_ISgvHyySb+L=0o%-(;aag5WjotUlWv?0Ze~IM$yRO_~2sW z=BkA&e=E5K8pEI1MN04oSgI=mD1_7R0Y2dcFtG>lQsoL^#3t6XdBitHk1L|e7 z455Zrs4T6Cex0I+QHP=ab!4EhRJqup&S*vbYsj(FG<7?%>ei3;#w|RyVQdiIU%*M+ zrK4Tz^kwR={N?wFy%cmUY0?_;mbS|HRqx?|+1H3y=mzWGuf4S2p}X zxJdg!@X%;Iq9FJC`&#*rj=BG+Pw4-an44k)IEbL{+sOiR{28TXO71qaiu*gFgzq;o z2T;FlJT&c;WJ7GBeBW-f2H@*TE#TnpIAe&=X&*{(;}`?b+)xdt<-sbT)8Iq5N68vHJI|u{6Hu)dop`)$Ydr z5dGC1{yyWD&mX^3q?-S=D*BlJnPm(|wZ|pR8y8;9GF*Q6y)@Xkr6u2cnoMN$UN1OL zo#$h^qbk1OgR{hj)X=smmwo2;7gTI`PrRXH;p1~Hc8eZ&=5N?@WGVTy*ic(5{V-~T zD}NC2l^B&xl3LK7kfnocJ2u2HJDc9Dqm@=vaqpe2#QR)jzH@oacFBcB>rSfg-jJ|W zR%y~P%Fp42XT0ybq2|8t{Jjs3>ULHkJ)VCIZ{Ap&YnAH~lB&fqaPD+ruVI@TT73`t z>pQ>5=8qMQ%}h#yXxaM}yEES3Y&nVElw{vNQ$0~}>V(c+od<}D(zW}A77m=FPF>*H zQa)5!C3k0#9o~MkR0%TL+j-VB!ld2Z*mTh3m5}N3A&7>xi{bYB&=0k zQ~SGnJa>4)Yn%2TrviqzphXAGwNE^BZ8E@$1jMjh8Ouuvb;q zA)9KBa$fET*>7<3uit|DgyRi3OnNng=_<+0JOe1IHdgo$INz7hHJPW-cEr4yrTE&4 zCYe){T1AR2Psv9U+lE$Eb&Uu^8EzT^o#Zd%_+RiGz4!5n#QJ2g=?{YHs0CLV z{NzFoAwpd&M0aRQ@1q(BS1gPM`J|M`2Ww?pnMPXcTgC2 z=o4>CNorfGRDXHO;m-F+Uit%2MWNYiMJLyrXJI}4_1NgW`%2SJ#vgLDZqeF9=u59} zp9(XcC~MKnalP4mCj6m^d-I0i-=mFQy;^-N>i6nh9+8xE`gIz7?rP>n^o@iVDTEJF z`hduRI@uwiEUbTTF>|ACi%nIKsBu&AgOYR)cqm;=UC7|O_cFEhu<_GF+j@iR)E~S% zzR@w~<)crbH7w3Kg%4G1m_{GiCMLI%t_bHt0d#cT!4jD2hIR~3siEe(dYO)hN(*aw z=hS2fO~X%rMUNR>)pLx$C`6a60X13-t$JL|k9I;-);fiF&-<%~9~A3BkT=Q!;qOmP z=9l(1OsO(=$ulTECqs-%R_eF|IfW9_zqbhv3Fut6;0DU`I zF5l(~OXKf5YTi4p)jHDLQ#6A(-1HnIupF3k!PJi6Z@hNOEB`O{-US@WweK4qsZ?S@ zC}Jv=N>bUC%}k}pk|xPMrcyDcF>T1mm=Q4~%_=Katc(&8lI+T6#@;GR_J$F&u}U_R zYl?AYHhpLJ^Ss~jeee4{o8!IjXMOK;9Ct@Y9odZOy3Xr7|Nr0rx1lakw1RXLnN}xG z{(#~ke!O6=)_DiL368~R-~$X__sU!^2MS$q%L$m}6l<%({81c?bL%~wDn0gAH)C|_ z;yX!+7J}ydi_*7nu%ym|SH2wrJa{{Y=IeRuEO<97X>0Puj8=OsfeRceZA~TXnW9Z` z4QPAu-A~mfa6bwA6C?v)D~w2^ zfGm+r`b?(_7)>*%ybLCiDAU`LdYr`;zenqKPh0RbLo%LM6ttWn<>5ItcM3`Q>s9Kh zV9FT_Y{a-(JGyGI;u(oI_;(QbYYV%S6PwrfJREhKxypiP*uJw)P^{_46fre($8UxK z3T3THJ_vYxv!%+uZk#bYquNHieEIq0&o_2h#h|LehzN@uZW92jRCfufn;ndz>pA8M zd{R&KQMAWq77?s3h1jIuphQ3RnaD^e%(f~fEGcGH<+tZe`JyWx6E6D9J!7B%bR^j-fF(oYS27`Yc2czgyBUm) z*^ndYmSR2Z@N~GLAlN1~gsEw1@7}y%`XxHYwO_U`ngEp?^}pUI>AhHr17yAHqrbKE zIYjRH1;5{}Q=w*eEnT1@$~0;{8;LfNSw+p{8?fS1pd~>*6YO#rQH@VudD*)qbB8=JXaD*O*kdN+{v+WX4ql?Wj&f zT1HAPuy7pl<9Xx!4n&@h3bKo^N>5AQy<>pmS+%RI=`!iE>XR2(t=cQiQ=2aSUf;DO z<3C1W{>qHHJ8*K)oRakPhTXjq3gYAZ;RLsk zluhiREDcV%nGWInePxY{)mF(=&K<`=(c&-rPCA0qF#tIq^G#WyRw(vN5rT*lPHb58 zPCvYvOjUe!OeJ3C__Ni8IPW^~x&X^6mu{K0S~i(|=)SGKvnk_LmVzgfBQc`|)f z>1xWB9+naTQ>m!SsFJQT0*Hx!s5=2!-Ub@eVTjtbgh>|SpWh!N(Ord|>o%P*Qu!ICH)O38nM_#kulGb0aVe0hs6F z;~=zmxpXJ3!44eJt1MkhtgvO6kZ}k##p8>)V)fuS0a9^%tpd(1B$tdW_`b(t1JbEtohvZDu z`5j?m0Yc(v(+o0LKF8SQDF~X+?7!t{N6n)(Y-gWk+~k!yO~-?Da5SQqNCJ#tEuc1s zlYnuUg(NX+BOEt|)A*FeZT`v|vF*656DJTCwCBKB2cMMZ_wx>M)VF|Lj&!0g2D2LQ zOD_$>B!xPlug4MzdMCF9MGDR0ajK4`mM^}XXTJhIAkxFIBO$XuOU;}WXOQ))^lSCo zcP3YkhG_4%Gg7EqK-%;c9LRc76 zcI4o@OM1|L%~A)8CCLbM4yuKB-=0 zdA8?jV>H#Xq^k9r7L;KCE4rn_KQS zJbYl?rN(5vXI&~^0E2U}At>S=0|*H-T|x$n!dG8dhoCS)vL%>VNC}{R?7-+#kutA) zLQ+D;Ig~ne6PeRg*=c9o%EjIQ7cI{BK0Eevs3^bc)ytl_kpR~{rlG=-0q9d{h>k>a zXi|%d@Kp?RiT4Ma8$c@cGv}p9D>hV%uN4Uu4=z!VCyu| zA~CGq0=AHnjVcm4JV*DYZ9aMYhe@&;n#EMC{VJ*I)^j5GTaN|B%{)5T)($+;mf!37 zc!;st?E}NHLUY%LfxW-nGx9}<%LH<3#R@>sRoMWKH?Vb!8Mjy~sjYg3Wjo^`=?U)& zjQXht=#0!f$3Ckx|NWEd=ywl_DI{?roElJ`_0D@P#YPpQI$0Qg_U2O+$p)LH zB?8ssIh9uCf~z)guK?oP8QR;;v)~wJs6N12oLy<1-)Oi> zY<6o&Mq!QrLuJRhe%2p4c1K!iFCz~G+PYtYkiJjp_5UvAQ5n=wn&IPg z&6};LnjDI$kf2|8P+^ZA7O2IKb1kvUT!oC4n+DP=zc?Aznf==1ThUU>M|E||G#_J< z2AI$>-jInS{6>H=AnOaA;#pb~qszspc=YCAz4>?y&3T|1*Q3zm+8ar3M85E$KvmcTIAdT;c%gaAu=o z6U5=Nnm~`FexoRuM*M>Am0(0fn~zN*+=Z`1MYn`j9?5cIs$`b6e+?X|i(bb>$?s>L zkIa9$&?X5DjBS%5@zZrxePWAJ!*=eMlM-t0(3dD|kb7!o<~`33>BT=^U=Bupc6k_u zET)~iFmw$+MG9b=wpG z;6NL2vq_=(W_Di{SY2SVUk0FXOlhizYYnZcf?W}vFU9Wbs7n1aQ1jbsY)|b;v5R8k z`BV4uCBPh3Vqw0_Pl}bgOko1j)VcBFa9o)w`y{6*bWZ zN^0#R^T~l@DhU?B&+QHiMtqx^<|UQ7)#vZuo2p0ks4!^!mjK?N(X-T3TdVU$_n{zM zsg7wh=`pO@jDcR*1(K=LR&KCu12kp0v4WN5U%JC4FEc-vHG^L8^E;IGalLwzL@Z7K zjjN2PrfEKDqchVHFXWGxWO&>n zijz)ly@miYA~i6__~jIUW#7)XqU1-;WFyxC#ajXGgW-?p_GvNNfHs*4%l8`~W!mEs zg1AIv1kSZ1@KH1>xD4d5u$5)HAe}K%(eyHb02m4o&@G@>j{rb==E-o=YR1*@CuFbI z40d8r#kYWHj8om6p`bde%HA0A%(MzPJ5+#nQ0<4?`c1U9Je%S#>{OMJfYk@P3Adtz zr5DUn5(gz&k`iCYY`RP-)&l>ChSBwP&zrMG4Lm`QOM8!Pi)+J z!cF%pi!l-fTqgCFQRKO_Gc)j3n#f$B%DjnMLN%3z#JR{bs9ICzxQ1vxHV(JUjs(>- zn+Q9u>9RcfJQ8OL@~6Lo z_pkaxP^Jg`D;`h`piTks4`u@uNV4qcg6He!I~jRzgl1e6)+O}`OmH2r9G)Hx;5K~# zqo?;{9iwM83aevi&a{^_*=h&LKKZK(Gd(;o)M-6J-8P9q?g;aDYPAxB9UONPQ(9De z7@b4R`J)GxHYjuxq@vUbo116ttz6y!_p7O2;x_|-X>syD#3ceL(`6`5h`@aYHEDo*x>I7zi$gz#rY*PlN+UmTJl$}7r9c%&K7X;_H27v{FK znzr{AfAVjtC)UUbEX+g@Px_!xTC$#ALzI22~Nwna6{4_1htGO+Tp`< z@NNh)My7XFBaS~WN>{SIjnQW59r9kebT^D^e)F?!(AqZmxXNc0nKn=1c3(=Q4Jm=m zIrZ>G=oh_WLixU572sC1EyaFAjl}G5<<3^Ee~O9f2KGH@aQ$NAhFRz7n~~O zrPR7Sem!8f0s0!ywUavhID{Jw`PrZNz}H0uRj(R3Y*`B?4b}-V2LVXRL3B~PvQM=L zu}YMmAZohMK=X@>%hX=WnPud*!*M5iL(jeFE1s?}@vd;ycT4arXIGT7ZW!+x@oTWD z4OWs}qPEcVXo7kSNF7#{ZlVrwBa&umL)K%hbp((v#am|@W)SG#O(cdnmk>crlh5fM z(N0S>-*?g`y$)tP)z~k%k~>f(z^wt+?qAB^l-mxcoIQ>lVcfz%{vf%%q;W>=6da4= zGMYxwH7=-1{HQkU4>kpDa&L2W^UVwP=pj=JLNwBam6`?O<;m9vG7V3{33?Y}@ z9w_tuSFCRJqUP5*SzQ@@Pfq1hi3!%)suE|RFoOKS@b1lDD6Z&Rn@igPJg3XDO z^X%BdnFh0)C{(R}6f{3ZYQiiA(XSrgz8XWEJ2LBBIMv1FzT=S+(S|PAlOrqc%0w_(_qY2R)ijJPZpfI4t3c9y$8S=07s=55wfnB}MPBvQ z&*O4K(CU3MU?@?U5+MNd65X`2;Pk9@)!lzaZy$Gf{WA3^)Ad?uT7R+6#}bZ&iuPow$C1o`>h|OwRZI zv|z;u{9&ncEFDkEpU4P!kU!jpKa-Ci(7D?}2n;+=Sm`;Cme%b=B9c~LYSME|e5ND@ zH^0G}eL5V@`i@xZCD{*-D#5uo1BI>ZXxDNk}=>UswXk7=~<^&m=wJr`p)MhihVxC2PX1)iIsY4 zqCa>0jrxBY$lBFCmmIxwa&mOd>DO5;>(j_*Z*4Q^);_5`aNk4QP1{Z5;Nz_;o;}+I zerumZAe0f8cDEoXa=%!m!6s^N8-8<^g{rg1@V$Vo`d?=Htk-*+@fFSzEo5 z_>zo7)ZIVk; zO9WBo0Uv-0$MR$6P;|^%mI2vJm>x5SHy!V@+dS5(jt|({6}q#C*YUDW@vdJKS}g3_ zu>~ET@Mw`!R3Lgw`KtsE^%z4uq$07Gr-9|17{2$1Az2K2C0fI^I6K+5-%eiuVkCvXwys{yDXXg$Ie{ymz8oG52r>5=cBwvMI(9AMw0dt7t!$6Hfsf;S{m?S z0rs|`MJ20DOm88n$tsOSewNI={de|j(=qzX*Om1!h)K+d-bg^ zUyt^F4%PnnvMLNfz4c}{d`T@t_c7K($V*^KZ&M5H2on*;Ow*efD!ww${@J(Yt55;dq>=c zf)`b_&J2z7gG9!HO^{g|@8+lyTqD>^!jxZ}#Tmkcqhd8ydcikMZi<&<+QRak<*q)& z{H!5M|1DAW2i9kKd-9!33K2n+&_l`a`d29{yEawdAE%mzO2IAl-DyoUk{MXf)b2A9Ny@-==f4jBP|BAh`NYN@C0jTg={mt z`;5@9o#@c)iCYfbJZuI1d2CwH>KDKn#B+2Nh_wj!YxfSyG9hICnH9V-!+R{;dEG7* z5?HYyq(F#Fuw*ZW=Nm`YiP>=!*Zj^WaB5?KS=Vf*CLGwT=ka?BK>R-0bQFKQUx=Sa zDA_-h7fE_SN{eFf%Ir#@wDNf8w5AA!c{7AfZY zA<2zA@8Arb_{ZM96TVi-Ie5=f?}_V$$ zR%)bZ74OhhU-zRdC}0d3dlqc*_KH=6O4e35T52Fu?xr}Mxkj4pxHcnDq<|<6=Q`8Z zq2++xqSa9JenP9+d6i4vn~;IYYX?gAY*qdYe4zdXzeK1b5T_9;2&HdlV|4z{0Mz~> zjgkg;AQ`}&$K^L@{AZ{tnn+)|Ptl0p15e~ou8Pwc(_yZm(t1N%aYSr5YB5~X=Ih0n zeMmSAs;J58D-T`MkCj%|lvlGNdPin1mRx*PM_;Bihm_f8DpWyPeIyl_^_xi#en)%_ z+OP7@kN)#I{I^&SYi)qZtG0#T}j=!o97h4NjbfbP^m%h~AYTl>!A~F2ejeFBn3sq91i|*S0;aP-P;9nf39#`|jP#eI<*MbRf z`h13myh8MWdCRgB>AUGTK6!P3t2`yGfK=qKdb6sWaQw@p4z84ZK=3@Mwf4Y`Hm^pH z+@^gOcBYx^Zu7|B+SO8PdHBS>3p!Umcubu&UV_z_exIg`+Eq^V!J*Ct9M6xALKvg`VXC zPw$ZMAI|^uGFk%#GoeZQ2H=`toZ{k?Tn96->;K~upFoH9zkKySKlndgzJFeje_oLP z`&s(uV)zFy2IUVT-NT?5eoE!xAP?lAgCs?sgE@m*OHu7+EhV?t>)4D|1@e&wuv3Fg zL@dQ&Epo5N3ybu15#D)MmQ=VWB zxhsA$1n1+~3pw8r`JL)rD!kbrf*REKzeeW$cUYG$J@z^~IKa3!)PkfLT#Zc(i+Z6m z^%KWp*xt|e^@=Nxg{_LUq6>&Gi%WN@+|4?C^(;g_8ogX_dGM5#wVL}+zf$~Hc;C|2 zoUpNcWOKD@{xL8%OErk8zd#Q8bw~S>n}6&Izvpr3*J;aj0ea7Tb5F#RPv%-=z3-yg zqJHKLp#KH7q$KE@?X~&%^}z1CelU0iL=Tb`m`F5}3ut_a zz~)F~!-LN=hLRO2?R<e-LLo<*`d9l zIeYlzXGJ@&>@W}?lccsX)!5))?c5!p)Ow9}{Sv1;hamXe>ByB&GzM>Lc^D%2N1TtX z({B8m4jCGC{h>rAEXp8M)2_ukGy)aEwl~L^bc*Womi)C%H(ycrB>U#&u6zAMlAO1Y z)f;#wFn4Ge&m}(0dze4eoF+-LvVyoi+=eyHmxlfPo?JShsjj*AuA37ADO|`n354Va zQ&yb(JhNb9X|$H<6LPVr z_8(7{DG3BZWODVb;%8<4ljHs`p!^2x%0ZWv_nd@JbEjlhE6IkXo8s^Re=E$ zgKs~O1&T&xU7eJas8AV&)A^`G=hVn5ta1Rz`YSyCBt6BHx|W<=Kk&Bbh^cCMRNyu} z1hu#8-;>1s^`Ao>RlH?>IX=gfYFVP>X!5lf^!I<+)Bhqd_}_7xRmRyeZGA~sFCZSw z41#2Vwv*KrSVOA}J5}iFv1$!xd9gUCb(Oz2e6qNRSHr4=-d_$uTMt}(@bd_n*2pd2 zUG<}_cPwYPTEchDw9Z#&Qc!opS3}2`1ujwJ zE|*64`Q!ZI_-Ch*N{c2&w?os{Zxh3x6mC@kk-Mb(@|o!hX(#}lj?nnOWv#FN?J%FW zCd6Od8YgL`+te7Yh!N{C`Cq@`Zo z`jY6`F10=hH-y{w7c#cYIQ8^4F_+ZZUbWccnoT(S1 zMo@J%aq6DlF}uSi&4`r>6pyio$T=iR`XVo`RI=GHSQMk(6bo-vqv z*x{>+U37)Isr)Vme}pac;hS+ehapC+j7X++m$#>oaHZAqEXu+3W+G#H)UYB|;C09B z`JQb5`0CeQ+l)>jX#OYv`z(*YMuzaey*!jsOSn_I)jAbHfZa8GK)hr$wZQk2?}yL# zfpO~RNK%8P)KE;Xs5^W;I9#eGP(k+*UGczSO5sE`18h?0Z7%OnP zBl8hENhsiKcuW{w1E-AnfnWtNr|kg&zUKVSTXr6oi$d~G0r)^*#IaKryEmtEfALLt zaV$4Vt=E6}RKRz{le*GNc`*Q6f+IjWHXZ7mA6*1q?;csDtx^II_8FBV>J~sm&?_2_ zG>k3*Y?N3VN!swMHJN@)W81-)t;&K`w`ii{_wg$ z0WZmJt*~>5UZ1tTD-@7!ryUBNiBWm%#`dXCpEp4R@nIWlDqp?!d$?zSeDajasFM7Z zZ2bU*D8$?ZF8AT!6s77j8`v6iY_N{|sq4v(9|@{SGP5zjg+}7%(I${AUC#rA(qG}C z>r53)Ny;9{SL|YsBRo^^ERp8<)86AReXUnh&ApBKBcBn`50)KT)oV(a$HO4Ic{4Pr2 zUEriy_YX+o_>bxVYPbRb1nx6GA}xTVZ$-XB&#+hM-zYl=SSDi%4HY;_;A-3adkQ2~ zXdp!&`a6Iyq#XI845=U7TJIz&saR5H`X>+%cd186fcPi2jQSSv3fxFKemWDh=+$@qo;ki1I&FFuL(M#{ z@)0HCfm8gl{4S0EkYxb@yIe*UZ54@v8{@4EAh#XuW*NYKSh3~12ZDB@r>hRfodfWW z#95_4_0j_0kO8g}unZ4d*i!s4a?!Gm`lX!dA376f`EMXr)Hl7EfEpSHz_adHIqU-> zCD=MsqiE>DeL?XGRmSShf~H3kKnO_P&N*MxKXbObBC7jh<(qX@*C{=nb{8ex?|>XY z*r@~K>Zxlv6#N)TBbcQL(c@=pMy@tz8H=~%2jv9Ys59~pa;&*|#m^=+8_wqsau4XY zUk<-&alHAK%U%#4)alg5g!ge2%jHl>^Y!$2mfk1ST3YbF*@4|CZP=&@n1N7H{_l-y zulfxV0&F;8Sgn9>dmNpHiz(VxjEJ?)vm?hODo&@tWtW@)jY`Vj@*c3Q*D*~@ZA>$A zsJ(@?p)T%d*9>|WeqkjIIp=!pv$x=7FP&j$`J7FM{b zD|aeMKQBAI_Q?6uD_-ndY1{>4IW)!W$@ZjyJWr*Yupq_^Pok3`ThT@VEa zo`-MH(z2_MWmXWo4RS^*gJ6s`t!KwL2Dnq$(Xs8t=(z(xw1yan;S)`qCbxmO)N(R7>6XD+?>KIyl>=eP z^m}`s3(NYaXXVF+_c-hzE}2Uy{)42yD+#}}6Puw? zY@i)@*QeedWG}aY#kwE<@#kJ}mbbBWWKOUX9~-YQgx!l^m8LBH*HCD_(7;u8xR`ve zy+x0i$Y&8lJ@1T!nHbnMUD;pxwsSXUllf}zK+PEH`@i|K{M)9xf6Z9=f9#}6y<`(? zSwHZmF*P|(7wPCRlgbUy3&=g*wNl#=&jId7l$FYzVA=2I!@h2~x6l3Br;$@8TU9;* z)xvlqYGF0)3tLE2oazGj(#mfTT@jCxmB7sJh;$1CtqD!p@E!5U2}t=A#@`ViIuIzR zlevuc2>?!DV*rsHd%_8Uu1xdK2b0N{O|R5(JYehtTUO z8v(gy1Z1da$yQP}lyG79mbg~5QkPxiQxH{X+^E>bE`>OW2^CMg0h~NFpH^*^D4LpIJ=SmoQ-WLNenZOdHEIGlZk0 z$To#F)V~m7B`F&W`BCgXEKVBl3YS|6UDQGvj=H<(`Mn<;^$xL0i>TzI0zp_XM78kl zZ4dx@-)h3Bv66?Q_I||>9|3&#I$#c;)Hq_)z%uO*1KLVCAR``y-qk(z?vz&vw^5SY zO}7JI3DWjx-y`R~d`v>MXYAV;U31RyAPAz)3i~f^22pMEGJ}%bZ=elGv@CwD=mc() zL^CRSM$(!LRnCM)krYkuY^NpJNO1Z1y~7WgHjPTTLc>1~UMIPc-HqZ#3fpxwvFw2dT{ z>5@VDYpSj6N*Vj!e1kRGIJJSc;&{y(Q-uz^8l?^&6b|UTsQ|^voq>0>wIMo>Mgj*Y zE(Q0mc>>K45I;1ieMbb6;6M;D1W-GX39g4-#@Z?iIShXg@5dtVKlKG^1-;Yfppt~= z0|z9@K6WO2Kax5=V&USQ?jbl;p}B4B$ml|JJ*vG^who8_mh2IGiIz(bk{Ob{aG=mu zWX;!|mX}}!GX}#GHoue($=ufu-{q9u*%P`hn|6~kj{zj% zoYcMZ(Zj9Ef6(}V4hHQ5QPn;pQ`RFLhJjx@?97xa*a?GZ>>4O zWh9!0ZL`DjYt&E;l7%8KoA9)f%SEHSDKD;PzFH-Nke_TC@=5v2nBt$>FD8Z*P?{3HU?fziVao0p}DVK_k2EImF9}^?m73m z&ebHKcg__(?A|{uDxULuft&%_(y#JovOuXjt-;oABXvEf#|Xf&!#Kr~IV73#%Zl96 zH#Y*z&kUfnI)0st2j2Of-xO{+x#|e?C+A(yn`bV9ZXPMiZj`R>mBWr*4G7sR5?7h?O(rc>eC-OS+s=W8& z`m8ULqq^W}-i3j;fk15Ln6+&6)iG-R7;Ww3^*eMZwaGWHkN~)YtG#S3FpJ;-Lc*Pa z`QsH;_lv`EhJ*$57$r;7wY61wU!uFKc>_@%=NjaUcHBQn5;=@tFY~5Ot z5T#1(z^KVQ0!h!v_A=U&Dc|vC)au~*TlWUxg!X!y$X@ja0{Owg%!gJp%~>0|=G0CM zi7eG@ovXWNio>1vZ==4)K>!dGrAtvV3PY_Gc9wbm1nR~Ci(+NR(!=-WJqP9h40g0ED>Jo-GaP!4`xV(I7o5)r2 zYocz4+HRM>s_0xsW*iWECSCgDXM&;rfQ9_3%Y%Nm2WR^%+LJS#*LrmSy6H)L*d_#E z6km_o2b`_osz#P-@1}v;Kp_bUV;ye_K|E;nyUa)UyQ^{&?|ny{d|gp-JAMfZ@AUS` zZ%P>moEo{Lan!s=$gx8S{gqoXc1)iz2yF6w_wf0|l@}+kl!oZ}cl1BGt$FB5;kFI_ zYyRA$2Ud|K4mRVpl)!!%=;RF7V%R@A-w6((fDjC)Z}WinyQ*_Za9nd`Ev4`SdCEsH)O@kjvYYrb|@*T~gK6dH&MfQw4o7Z18}p)QBk z5XxF6Gleas7bz&Pu48>B@5#(it7|=i1RzkUDuEcthDHl93^h)3u|s1j%yX5Zs|>SJ zGYtYzrM5e)Sf@_dyil2&lJ2CwXTG!+m7E_lb&(v0!2vfl*1~8SF4k;s<=~JNLH%c; zeqPdszVWW?e$@jisap!vQ=YvEMv!)23|-#Zupj05gB)O`=%rPnrfZ~Y;GOM4U6Gr3 zSWs18vTU;I3*L8UzF4LC+I73p4gF{1MCSM`y`HO96+(Z<(LVjcWCK-f-_BwYyJyR- zy;r7{1Z*JA)W#{64@)n@88h`j?8g=nAf}LWr)c_lxD}**C#QfM)Ryk+pLn6q($%CP zz`93fE__Hh+IxJtrK!`A9@53~Pak4kCipIiddHD}j!WLiv(X!2eTDiLSXE%)0&GZX zH$V=@da2q|R8y#Kra3z50PJ)AF_-$za0@3~2Va(=cm*H=3RO- zmh^=)R^}C)&p$9`uaC9X`S=~-4N(>(4=$9o&Re^gd9b zH5Xv(lTlikVgUhb2hVORhmD7&mv^a#4z#@4h<&oWS{=jh!jBq@hy|u3U0%e#Ff+)j_YG=6av5XBu~Zcq zdZ55>C!zT!#%^>e5}bQQ%417e`V!F0AJOBz76zXHDazmOYi}R05dyb zqxtD}03Qf@G5iutC0kS>-Arq6tyPx#Qb3G3#Z2l#mNv7?*f%MggYw&uI@62q*3M`P6|;R~?L6-~OR=Ek?Nwo%Nd{Zj(a!Yc}!^ zGkA-aOuOW7CTY)M+?Z1m=0YJYNpBNx5p{zDAEOTk2}|e}OjY=aL*W5hxWH_!5B|DS zlHJh)0cp+QoT*a0vgfOwS_dLcAP;Tx?y?v%-9X4jc9kT8UJ7d$2^zJaTbS9l=>4eF zlmFGt%3Euh0X_E{ugrM5w3yf_p|#O8@KNGwAg(Ek}6^}(2C3S<@#@f%ztjz33#R|+fy5H+PN=Ak}ad`Er?#rd+Ja}l<@sI*Cl z4-GW>J!n62HdbDaw)a)EGhYUYRK*HBQSb`(fHGSRH+LDmI=Raqt~S^bg;|W>S@P@e z19rHGM-$tN)>;Q_-Ci#`jrUa$kTbP(tG&4hkp{^@ph>!T%q=Zi@}~A2M^nId2+3qdU@r1 zAs@Ra7`K9|OxfJ?*|dB{gW|%m+aMS=*wWZDqP@E0>$=TGE))<&WeJ75JL<2=7v6t-&5`nWD4ZEjXGaX4Z%@O zES28pJ_EfC+;zuK;TKLP5b~>ECn&xn+}V9|!*w{fYJ{R4!|!vo`e-(!EV(eoH2HDtrd$s@hCo<17~E)HBkm_IqbbkoKqh>sZlB$C^99Ktr3#R923 zzz9f&qhn*CuOr0fLbuq{4C5QrbvPd*L?6X4JsES zZ|ywYHy3U7Wi2tBu>vMEb5A`(_Y`HLCPJS*eU|vhW50(7V*9h(OOEgQn}ZJGbl{?x zMM{fTEi(d8tjbfPR~uvO%Et1W3(ewCixsusNw3mRP43Xh=nI8ODQlopX0+g{NL^a^Jm-_d>uLQup6EQ}0&I0M zS@v*x1G|EHU&vOcMB#VX;Y)aMddq;JIcM&HrI52gyTxhK+fpMmv8TFx+j#GAe{sd| z#8_=r)~L|Cq}VtCfqaGDJpxXflK@W0#g#?M^JS`x8U$5{H}>Ap8YCeymTZ3iCHQ)M4?tG)Te9@T%X_lUoYAQh zbgm*_J$6s+GI97sRadB`eB#3IQGE?tRr0Da7WV-YH z7)(d!m~I<`JFHltD}LMQ0n>$;BovBe3Uhp_OChh=uqa|+Gv;*ai3gwQ@zk|4On1)f zimtFk&pqN#qx!w;liRDJhI!@hRWk<=Qa?$k*jgk9Z_HQzgtZFj2J<1#7_tFF&)O~C znRIHNp|~+$R+!`!c9`J2Ldgs!Fg|4@`4wgwlzc8 zGp+nJP?88gx-4do&9{C>+~#0SI>mv)@n%wz`i1wKaIkAFu85qnar^t$4X(~-kG#^@ zxZ|fqPd&i8b4S9)UX3`|jrM{ueHVJ^f-9XwuYs39{j~>(WkL6a9T82}(3`|D!!eFC ztEcpuWKEGH5B7X`sysdULlBS5qn*co+j_lIo|M5*FzsyjqiBDF-f4d^E$!cHm6U;D z|FFD@%`X5)s15J{0lrq9@Fv$8(Orl-+(cd1zk_PoAoZ1*!x#faUBR%+&es9~pS>Es z>fWD?M!h;Bx|2QcsQMjY{ztsVNnm1y$M6%GRc12nApT4Pb6Cq+p26lbs(2OFbczqa zq_pI@lqj)kbC-gHTZz_XB(Qbu9P%huj8@!I6tI6yDEVyXY?I~zIN>WNgpUkvhv zzgpt+y-JE`9ZBg$tfNjSs_PrRZUM%j32K^*c$|z9AL2`>{TEG;O;TAWz-h- z%c=%fl@qMqS`#sIjF%Yx9bw=QY`E959eZ1t_F{>Yeh`23*7)IRKN4{dV(q|AqBhjge+>dd`@E_oo zp!ZO_c@Q3xKwU!Ztfot7t+-pbGMXq{fJBr?ihS1&gh#tb?!nV9;iCjE&0#Lybj*Jr ziL$;rWv(NSd4W5~JYHM=(k1!V7)%;!&{}d=9*5!oW~es6K1YLhRLL~Vo2eVb9pkqv zraBzPIJasSi)BGXLj}QmeYc3RL43k&X=b!h(_N!CxiC{eYcAGn_wE_5sHPvT>(g_M z@Bt6cLBPh$6fk94V3Y~=oYOLEc%O84@Bn5thM&b;B~uY%mXoj6SAQ%Sv!!EKjKJ-F zY_VC>jFvy4`nDmuwBOE5Sk-Ugda%={ALm<V_7^cPevQk z8ahGmp)&2n?~Do1>7tz{>A$C_Vznvy6@9fH#`KE&VyDm~&bn`l(b$Th( zE8!Gi3^<_2PPC*A0p}G%6DaF0^iB=nS506yu16`$mJX#1 zC|z;@cf3ApyO<<1qHgK3J1-cbSU(*E#`g-N0gA(ukq!dMY`7>qWxIiX0MYqSMq0sB zuHC?i7@9r`@-!RbHKHo$GJ{7DNkWF{lGjuN+3|D! z;+QA9LyN)*`dB>}_ftP*GwpJIfbU4|or+6mUd1O~M}SCV=?<9&xcwJt4ZtG~%cJv3 zJ8$aq$D4Qt0F#aoYjff(TqBwRBAja7YD#DWn&6Fi?xBT`1KUC;eRneE}-#zW+#v*ASTKH4Vv@&zYe$-^7^nl_GSh)aC zvE`FuYBfOYwSE`cOtv@m1jKBS8#QEYVsoa8%9(4CD+qd_nT2ha1s#EhSy6SE!{YSB zux97bKP$);+eVKVo{T*bgGt1~)+hldmIXHOhIS0(F>{ltM!^sS7&=CkvRQhaVgsqf z7j3S|Xln{s0bxbMkY5v88!qSK3gQ3|x>Nq(PU;*IcO^tzjGH9FC5I2yOM);g?7{F9d|q*wW&dt16f&okEf> zwwQ1o)j~=dP^gtbhB;6Cr&8f_a9|@7X^ByVqa0pTb(;ckt1+MtN3 zgi1&ZWiw+lSS68dM$E=4+u@#K+?h@9Wj*inzR&M{-sk&!j_>h2-{W|X~HL(IB1*+nq1;7-RKBP!1bD{n*yh-}R=6HT&b}K|Tcy)^$?DFLD&^SlQ zMk)LW0lQA)dEWa9Q1U8j))y)yZ%FwkiIDau>~0MehR)!$@#o>wc5Si0qWCD8O1Chchn|`dpOzR% zA5g8>fx4wpGG#wNMkSFW9AX5zoJ2_o52H*3Gb@;=arqB|;*vR3;y6#Julq4bMmv$Qy;Q+U2O;wjlvL0BYP&Jk)g;=tL*eT1v{ z+}bo`;?#-NM-*BvqnFw!Js@B18gzq)Q+QAmV@)SR-IGne)be)M^J0tL=U~@%S&rI$ zBJoc@rK%;ydmo?&jU{?9@FRR24R$GwTbOIl{x~f0M^c0?eEPeiEvIum@hnw%)T#ES zr^zZtCsm)|QF&?I^GY4<^7Wg?JQcoQ0t@L5W5Wa$Fk{h=kjcjRD!;JIMszGr*+wC6 zF#7Vqz{Ie&TT-S*`WyUCUl+dp)D+jm!fpBegsv!DD3ja6xh^t5OK&m*a5vglxDj=Nob`r!>|5YnU^jsD5&$fF%v)xOWlm1W60Gsp8Jc6`%u|54h>aO>Rdp0@dT}8d;>Uv(^XyWR-n*4+J}&hS zlaqPjxcxIRK%TP_$9*uo5urU9*e1d)0eYS$miO^N)<$iRA~5nENw8fXT$XIO1uZA$ zT8E}P{T_MmCeLHJttW-Ug@uRo6oW5}9{C!(OZ!dx^3ay#8FWM{^6O zosMjRmqCWhj03pV8~N&^F(3IP7b+=T*7(o)b> z73ifFZErThq^UA)j#Z<^V4Nut$HJk##&fNN!$qW9a;FMTBF2sV&Mz+1!s&0+CxBK9 zG{(S0XbS%T@#ivGxWS0#VS|d-LME$2GmyDLrWx4(V%xm6PE(q)%j&8dbH17mib3au zP)aN(er`*Y*xQG!5yZXzV>^FMoV5U>_bR{gr3^$D*Ns%<#bfB^vtMr7FN` zZUQ?0Nnfxy!X!JS`BXGu__Qo&N#>HP(hBM>S#O4QZW^zqV}(7h&YUlZkKQJ*(F+fX zC<>f;!WoNi-0|k(b34o8y0N;HGi5PC?Rs^1LYU{QExLsZrrpF#eBj{`Z?GK`{gT7^IXh%s;(fjJGsOHR zP}B^_@MaW00$aVf$U6$MRfL5&^;^I@n9r3>nClN_N4drjCIaSLEC|E=slZ z26kn@54m-q0IyH1r>=+RuTggMfPp2_H$Ot>G<~Gl!Zr2b)uU;J!l19@4d``&42@3B z?ISa*CQORPAD}{_^+7v35QqhoCx`M5ZP z9i5@Lgy9XyywIhUs#MimYgZUJY>u3I>N#n!PN;q}WRE`n@QVspCw8C~VIhV7)?1E> zk$t2AB@K5BH|Qoh4bZwe1yre`_fy|{T%w%lWb)L6I?Xv~77bNe(T@G-LedaIvHAUg zl}BqJT1|dClAL55kOzq!>_3>Di?p-Lfph_W^7>W~``+8$=o9YlP(!AT^CE|~=#c|wqBDP9EEGlv=u)XIbZ zFash%iMF0h`MXtot&6Zg7G!uB$$`T?fqitSzOTgV98ZOcMsA<%I)x-Y134qZ1PK?a z^5%*Ql1E5NpO8!!;qijHi%>DyGf>C+ofi49f&jE67!|fa$gVa~lC&83Ta%E@;Zviq zhsaI(yF`nJ(*qI~cV4NSJuhVs9?2ewv}+0>_@Rg?xFblFu#~S{^}+I~p65+dOsy8L z&`aAVro@eKCMa0`o04FWI;)b_ELIDYtR*VoVd4;XuD_^sDuUaGoCNMtkW8D@Fo>Sw zAdb}KlYW|g5I?GivEEB^q*jbh2o}u#kefv<9g56*C-yC+oi4rfKuNO7U#4Gn|3Nu*BbFA+`4l>W-V_d};bRupQXvH`$(Fc2%2W~$$Oz6@gGZbb@ju>o2V)7)8 zeYmpOAp9W!@DfqiE!_mDib$G^IOvo6zLiS1z!wE^{1)&_HFc3KzXmJFmV<4$bOH(I z(b!teICUe$wgfnisnd&*h(o;k_nwx*uFC4GJr^QQ)Bv6EL8nRi?fJypv`EM-IE4a!i^GMhYw$cUgFy=+4BT1v9IZx4N`FE3`MORp444N;}Q&o zs5r_VTOc$}tj9fpkS$6q5@L$4{~;$Qd!wr((M%S3AUa~ZKoqxhH346d&TIjuu$Y5b z#h%*Tk0fU02q}p7x^|z~L9)DxsrzXjr+M2W^Wg1fxKyN_(dCl2QB({Vej`w=m_mb( zx=+@^tfC1_!0OaQJiuELAbLfVOVVff5j%-PXYEB{xro%u&?OX(l(wKylnbCrzQI~O zMSPYV!nztzE+oZ0H?_e@i}-?&Deo1TCo+G?u`|2upYD|xr-O?EXFMji7!1C_BkS=^ z=vd}z=sac#ICbD7wjLwXX(PBK<`S7MWk2G^qbVX0jSQ5K(_C!QlvR+%2YTAygjUxI ztRVFgiemA^&dZ$Db3^-G-M#8k_w3nHJUyGTZ3&>b!EN_R=?o*N;U(~=2L~L@KpDYE ztHgM+JFjD^;{gaV%3vv-mKb$Y8;ZbZxy`GCnng4?cS5%31#&7(dh;=1KAakvnG$ST zDxn$vk|p{)a;+Zc1um9IE01r126Ps!pbe?-p$e-Qh=-?~Lr(EfYsRU&;c&@Dzg0rK zNMBYY^k~i9_##H8iB_0oY=TP)N5`uB?nQhVTln&D1)dJBN8E$QOp!e3tKxm80STo&^Jl z|NCl#i;C-nFb!w$dhJKa`thdQ7Ocq zNo4G)N&ceg_+TuCm1|OaLxEwUa>oAr*@J-g^MK-IiZHYBhujjfIj{8E>=O&zG71(3 z$YM3mR(DY9kk0E+9Y2v}lw8={d8eZLc`4(7U(_zNYJ`%M9B-5;W5X(}2UZ*uY!6yH9H8+1I5=qz%9;(-r3_{eFzHI*`X7@eqnbAy0(m>r#^$j zcyG4XCpjn&FHjU>xBzlgi8dqGfX%ZVJQIaoE9<~7$C%21DZ5KmT$8hPaeA7_4XnIn zgv%}V-%2Si{Gwt*hm_HrX%cB}3x=*&nBh`X0N#fQY6%(B$~2IflHKaDI#ocpz+wV^ zv&+W(QIko{5D}y;Ep^Lif%75Pe}7urOJ8p(7~JA?q~LX?a_TV!X)x1&=DIErZNlKL z>NzW>f>e1rO?X9$b$6jt?d?dN>%g^hEbZyL`?tfNwfmdpT`|eXAl5uBNY8S-{444k zQ0K3pT6; zXS7XFf%)e1m9jTgUiVIX>Gf1th&rOTXitI`Jwhg_4ViL|Tb~e5Hwd`cO(C3iY z+3gnL!cQkQ38J4UeJvAqc=kX+07{gyvWGM$Tx!zTcu{p<1*GDl@@>F^lZamZxYK5B zP|)v`p0X}W(+%%CR}S){e+oYjI&skeDf10-;5N}xWDAUGV))(DVY#JH*S?5xiLGn~ zhp|p$+A?BQ_dx5K&v`HVXKI%ot~{dslk)H%Z{@$zB)Vtd%$3xSO#M>R@-HqhlZT1N z>1Qxllq05j&GR4)liQmbXF zDaI!$KEHL6VE#xBvI1S|5U9a zB||YL)yA3U)%5$NKMZ|K#wO%t8KM7jO#jD;;6G+~@4rTizg39)_dSQ$!_rsKxrgFF zY8w-z5$DecWBBp)`-CR4m8D1xZdq8`6enO*39UG{-z^_qgW;-SU#IRCGrPWGnnhZ9{-pE?|p9^KT^ZkaJ z-UBE4d;hju)<&_17z-mYX9kc&!A$X z-q5?2roSqEyT|~JkM^Xv)(qB& zL4MMZtZB&am`SIB?Q0cbT; znnd~`N5Xv!=Z80>t#JMtDLGqx?E;_VEN)-`wQCu6Wf`0g~+qpYF$WX7}_=Mcu(JAHtW!DC|8cV6nMR zK*Kvt@V$WS-^i)Jus+T~iin@c)ZGzQx^82ekKh`2ASprlWo}>s=ZTaRmnPX?2Tu<9 zd>kU`SdBlnk9cDD&}Y0X(CW=^fa?QCUmXtQ*6SoTLcetJ6>1Mdtp+YABINzSDJL~S z8cmx@6N2!pojND1Gn>=21Ke9~kG&7w^E#9xPtbi<@^C{SJ1wcCqTNVLn-KCcL5lymP zA3_H&d?M;lRcdBC&vuvNQGXuvV1T}%<_2_ za}{MPa;?zk)A(cwIrz&)@p)QUmEfww>&KjiE02&E$6Tj}S&@49ZcF{`)5XQXxt*Wb zJ}xVjhVj~fQ~;uK+KpkY>;|l<^m(mEet=HK`8E)kf&bDx&#(Nc{YE2I8XXjUTY*PA zKg{KidtDQK-JbLp>Aw9_r+h#kOA2_>DiDUv7=Zq0*a5otr3crJ^vY}~?nt39;&9fi z6KVRkI1m+A9rXF#?ekZTf(9bwBH@|$8^fPpRWVbAs;YU0QIIVj4rMl%BER5*JK`W! zQ!~oY9_~!Nsgj^_a6U4#nYOalg^$q)r2WLl;TroG%WmJ_bH?~jjSM;IYp}6MXygnE zu`2RL-WW>`*1BDRtw6Q(`#R+i*qwq|kWa_Ysw(YNU0w(cjPPfHUZ z?h9rmb4r108^H^&2M$Gu<1r+pc%tg+TvZHXBiTcyS!dE@Eql1!?#ZNqb{5C|!<_9_ zU&l9j8AFF#=i(~J|?9i)W3nrb;I zzzs333{7x!NW7&bRO?SZK?WK~IG~hx*mUwZs~u>ovS`V3W2PLXFBSMWKmCZZ^z|fx zb)1E~wB}z@Ir-U+si$D>CJN-WW#&hI$aRm#2Pn)e{H5Xf@sBkH`P&HLzpbkwhEH2f zK2txe*H1aOIPwvTTI>if5=5|uw+auH6(Ud*Rljq#?TXo)t-o)-B{A96+^MT#(kZ{1 z(G#Xx6tTyJ{AU(F;F051ImxXUnYt3#g61d5^x9Q4_%Qxj^7Bm}Us{#7^9M;PYj3ox z&k5&RV;{nXY|{P~DaEO8iDr5BO@)m&*|S|5(L6` zf(hilIwwC^M(hexGxqGVbXn{FvZtc%P$OS;2rGMkrI=)DoWvjYTA!{Tg|o5Q4MG>t ziA3YcRlI%{E!ht(eGU0%1H~kO+9Vrvfy6-=LH_c-%^7}QBvg=Tp5dnGhBI9VrF7=% zv}MTC3R4|t@UzEzZZ|Cy4SH33y?rY`#d(Pq%!&1>!wdrv-zNk z{*5HC5}%T_G0W1Trx@$0y=Z-g9%VV4S_B!TKbnA6QXKvB+WGn?HeZngyk9Du-JVg$fQZC?O`6HU__PQCp_(AR_K6tczP~YFc8v7X{8e0>o|; z1xr&TiUpLz(kHSVWMeMWBp^0t7$|nt`i!(YSJVNd*1S+~yM$OB9xs^I-jY0Fbrly! z7wlhiIjWC+d~n*pZ=J$-Ks-m|sPCBYAyaeN@^50IY!Ls_L$YeH9!D>1E@g*<#IK?% zt=0w1Vs~sjah1YUw^=VN72QED3eWfmmr}PRH|wVNRTUssEEgU}aa(tQ^{JkYgkrji z_M1+xS4y+_Vh3tBkj>0brNCnnA89^g8-nAuHXSJ$r>@1C3tJwrgA**J1hdCo6{Qu= z-O-i!O(}yut>s^|zut0K#zm#tM1W(V0Nm&?$@V%Vs1TVHO(2R7d?`M__MgvL@|dwX zOSETZIZ~hy#_P}`dgunl3YalAxMDV{l2F~6at@Z|gay0_TixyRW*s=N&J|2}UoJ27 zBY-PoV!5n7hzB!&>z+oqLM`8tYd4hJf*GQwrY~^L8Lqad{4AS7W-qjy>q)f6y>j?L&L6JI~J7lCmhnCr7> zJ+jNOY!^mV&+I@LTAiHHq^+0ZeGXsY+DxY-h7@(ZH3(eLw7Rz2csw zi#W%_{kAJS8u#3y-Jp3+|LE6mM;Q@5yRxQb(_f}TOj%#wa`oxpbQI1AfXHL1OqvW4 z)f{X?MAxO+R2|7d;kMuQ1=yT=G6oG`ts+d$Z3#MMl_IgirtsTRu)Qv-LD`E(~@UHn2y4r;gpMf0KWpd&$+QHAWbq* zj%W3yN5l?_1lR>O?9g?9|TKPwGY{MDnWL;=_}8o&5-xPXq8AOeEFJ(UB;ut3+o znNdxjjYq-Q4g}MzEBJGCz8^@Y6DX?TykdVp21Q;p&defmW>Yz3Uyp_OPHXxV|8U{5`DQI2&i-Q-Iv=cu_x_Np zjSIcx9fjM!A!+l5B;^f%jlGFmbMs8n=Dfo*B?k~KxTXa>Kwj6BX+`l4TNhUDCYX``oMV)e zKj1Z9xh3q%R<@_NkJn&=fSY6~H#=m?6eS30bd6 zhq>Ym{5~m2J%#sCq7YKx%sbFoar@Ni(DwDA4+}r)nb;9XIs*^c^7WLTrN1-Ja-#i= zu$JJqxW$2?k=wgS!z!4H@=~&U)72A2(wr-sipz%vB8#T_ zQ6vRdt-E=f6Yu}0h{aCpO<7kNnw7mvwM8>jSrSCoS2dLk7rF>9=Um3j7W?~F^wox&rBI)jsy}=< zNS^l;TBasKfnzVd{Kil8dF*r(w>kfM$we-5lh9rCi!_~KjU<5{dp+iPXh1xIPQDNt zK|*ka?W_7FRs0$CiG>_S8WDOnk=e&S>#^mW;+K3VWR;2K z=cc#vJrW~C1x(Q~$}S4vERm^7j1kOZVt?nv)i3FAHV^d0tsAfI%O@!f{MutlL@SLg z*p}o)Dn8Nf92&N_5Z2q=xp_t}K%oyP`4{zjf78zEAM3pKw~~Z^>iPfD^EY)My9!01 zjRY`hKwjh6N{WGA0`D%Qkbkmzw5fD0o?_iu(${?w$>A~;$u(~MwJzMDwHq^rP2G-b z_;@3^t@{WW(8^Cpu&vfFN%J0z?kTf+Tq5RUkq7{gXe)^_V$MT-g#M+9eySgSj-~ZMmaOSiP=? z+6#)mD=nOowR@*Es(FaQk=2o6ERx#M%vq9;*uxXJ7>4l+=Br*)dPiIaL$fQ0vieEB z^wVL#Pj4>r#w_6ANFNvH-k76r@%+2+NkCu z*)4us;l8rx6lIvkN(pbu)HO)w7l-)vghtdI#A!`k|M zl8uf^9r_ zwuZd>x`2U?1C^dAtP-L*cmh%+=&&Pu3yKOpA^@et(Vrl#&DM{5);nw2w-PN)-H%jUc@7e&ojC4uJZLw@5mCq< zJ1%ERC`1V^N%kNQF;m@-BzE1~=(g?-*{oc-mx!zMH4xljpFSG$oaoz`YY8*bpCJcj zBZq3~H7@#r+@_1FD`(|8*d`(`#1P<_kAR#h#&Ky(6!Q#C*{Y!9T;V znS|1cVk6QG@BUPv_&v~FkvP}xeym3jiqk--!@}uMJIL&wY9Ko{C5K*;A@r=5Uzh=G z@j8(10H7R;uPB{j!h@oR(qtQeb`-Ux#X#4ic3I4`nv&*`M}1(cCXyNfw^m}$6Bu=o z^iKv{t`=~TB*YPYmj-PS=r%m*r!4F_&mQK_AMFlJ&T((jxf-SPbrZN42!}}$%MtW| z$4EyslY3*>4D5Lmq!o-s?+@cFCEt2G*+NCf@N=sG)wcvt)(g2gpz98dH#<(sBiHaw z{|JbL7c}qEf`_0R@j-G|K1U zk{29fY@DIe0WSh}hry~oiye1*6`rR{xMf;g?P@Mt1M={}v zcb86^&E(TRRG98)I#iV*nx2?;3c6WhW^}jqhWug&y;2 zS+RYEPrWI6b?_Z4k4()^@2ZWXY@iX0JaYaI7xO&{79DD2l5=;^siDt<-fTOPvgc{g z&NXm#k-1Yh=i?-|I}D(Y_xNej?HnLITU;jA;Qn_7GU;h8X|WKUGnFp1vIC@1K?Ig+q6Cek6p4c((cTX{a?4Q@+pLP{AIj zS1;tG122;%^h}&c!5Ir86CZNP%+aAB&FP6<>A_~Vde5c@W$+JwEv|0c(74WF<8Ke| zF6sU6l#l#tt?@r4Q2ASC+5cFJ(!YgiKr7{cn?@GVk}dbTD5iQ+-!8}iPqOsG3)rtp ze0*&9XV|Qm*Revf4^p=0;kC(!+Wg|&fWg3FTFX2en@p0HKDft?3l%EG(Kr9eW``$O zv_+M7Dt*-ht4y^M;{G{XH&*viT#XW{5QgqjT~W~!b^w}~H2mjS zr{}%rQ}^kQ<{R+zslY~x^t=#cWu^>nEE$mW!XO@TzyZ+Gzm=j#?fJPB$LI1Y@W&l_7{Q8z~1rEKAo(7yH|eB1M)772Pa#ZKsvQ0XJh z4g2t1aAEedoHP-n2EY^$F-enXWCM)CMFESictGa7;J}V=YHj)W6svCp-FVhaOm%C5 z#N@%m48IMd>;fK@mG$K|iskCwl&dJjH!&0NsNx_CydD`l%BsdE6UGN|#mc&kL0G(eU(Bj7KA zMK%rIm@-FVQkvvvB}Cs~@)Lx{EPXT|TP97*3tR{rl{it>KZ61y^QIQ?gTeeP znQA^*H@Co7O$@}t$52sd*kiF_yNS6%)|tBnxoFiO2w^dm!-`2w?90WE5Du66WL|gY zLo*rcz9~X#+95`Bo?P)N=pz();|LsNPyvV7NYpmG~UpD z0P9SLGxGv;cg8#*$IS!75g@#GMki3rc>~jiJ++7ArFP(>I$;f=hCrDTq9k(aWODD| zUoz{NY`w#fBDv_G^l`Dwf_%H->Jiv1pVzL#3!^nfkks1wD7F>%cpcI=`Wm^DRa<>5 z7ZVG;sHnWd)ke5&s)FoS6o{~=!S;fqF(|Vi}Gk}I{>7UXU8%y?) z0NmCQpnn5pY=gR<}G$MLL}wOMBu?!uXIb7K~-Ne|KlC;G155cR(t za3)bQzP$f3OxSB6vS zq*L+ZP{U^2m~au-UdJ`h$ijPoL+&it8|Yc`Vv}nlnm_+TZqEepL|wqc2T=<)cqLHv zMF|1HpEC!nbEP2fSCSfFV!-JYkJSB;d%qBcLa3P%%@|plP6OB%b`lU2Mk9fzF3v@! zvhRnSV=@=?BCMBTpl=Ld$okJ){^wi%=fe2^*?trVJ~@MITJ_RVQ}%Iqg__~>z_T#E z`N^3TQ>)e*W!I~$a#H#JIK9i_WckY{Mp3w96~06#p;5|XB)MrJCzbwSl6@|Y{)ni1 z?wwV2aPi~7j50i-I+u1OxI=L$W$r#aH@sN>!3N@g zvlsCHY!v^0UIO}W%YFYoBmWa*!t&pp0eCP5LBLMdj)Q^xMm@)^IU}A&Ypv6B-RY-} z%}#7>m;7D8TUu}M3p|34f;KW=_D`Ae z2bqZJOf+;FXtgQYP(^(wpfZDWSXnvqDYYKY2-Ty}6f3dKEyj%-lZ$cVBxNyf#Oz%)!{o*@VP}0a zw#$&4$PqTk@mGFV70y{n-RA9c0A;sv9<6aYQ64vZ(f95OCOYf%Q|`-m>gL;9`ht!$ z{`zxo+1+Qw!B2Vc>_l(23)K_%?%tu5$#*`lu_tYDb};F!!L&~A8UbN58oZs&OZEZl zA*YqU?E^M7@1pmtleoWktc%P7ZV|gT_Q%yyyL&AgY7cb#c6%_BYo%3bZ#we-r|IVhPg>jO| zLmIAcL%7rHuW!hhKAIu_IP-*H@aO?OZ%r^W^DQ^#Yd_9?S^y?45*@&AQU|H`A|S~j13;ph1^@|6 z)QFets`?8eID{=i1T!pv2n_%ttOGpOU#P|!pc)-C05#l2Z~y)Ts44d7W=N1q-*I(v_G{8_YyZ{^+@6_&B6H#6nJTA z(^OlsQlmSP_KOgcMkn60*;9lNTl?9mcY{C!RUPq=hOaL{H?)20K=E6>V;a9CbQMKG zhiPRv$vxT>xbXQ_SPRJUJYvC^-nakNDBvG!Uj3gUXaD0qLwQLiNN4&v^Wcb{|FBtM zfz^X4uBcHJ91`tQ%gyl*i45QJr+pAX^^@Cr{YHDeDxNR!UtgVd2Ht&sS$1*CyNh=( zEYnQ79Cp$3ZD$j2ykPcFSm1uENJ--TJmrIzF5fWR;w-vxd~p1wuWUqE$fq^qG$==f ztNV(c4Y?Vlult&UveT^kSdneoeCl|tPur~prx*SG&x=i2`B7n&F65sU^|)JriKoq0 z-aE77>8ApYw?FI5-!^ZXzPa{tQ0nKS#am7g+3xk-Puth2FSn@(IC9X{P0Pk%L+R+N zPeB)g2!|eLMUw`)nKg%OHN6Xs7+YqH*;_Y%)0G|=`3q8@9#G{er~0VtkkVhegGq(i z4O#88+F&}9#SKxP(bwa#Vc8qcZ|M8d<>(b$`=BTy;1&owTL+z$zV&x0H9XH|rE0zV zW@EN2*1+P;Z1Ne;lKPStE+V4 z+?Cj0Hj(!MX3u$ZPfYlYD>-$K-3eRl2i|hlxgTn`Z=GW>|FE0+KVjUzMM3|G|A!jI z$kgM0$lVD-B4qDFc#!NN@Pimr5SSG$}xq|_&3%u`EQp0Rb`NWw;gUH&zumv@Tcf~21|t3mWC+KJ@-`w8s zkHYO?26q6?tf}K;ZvWoS63Xr?LM?t~a_Dz@B&uPjbv`4RaOz=mXrBof;^TR1@bIhd z3mD9)I(pao@-Dhw%8AKedcddKAJ^xTFVSi#HTa5FddmIn_>P4E&=dKk!Z@knY|r+e z8;?GWPVv}xrhJ`G?A5zhR~PN4Z`(*Xb#e7}%ioX$>cpzi6WCnejN`4Zot=;7DYb1) zwP}Cb|M`0GZGy$;%dg$thM8Nw^9ljBxF9nlpZ2x#LSBpYCv}al(Gyos(c5tz!kzIZ zo^PE}BX^FlhF4i*nLllP`*gJ^U9I8$z{9QfR5iQt#OB?hPtNQrYP@h>byKsm^)B|B z=dDV{4#6LCRKDy!``g`dUyD0L-oqnf&-V9a7JSv!stq`kxAo?*>3(lfpw^ePPlD0V zjxpoCeWnuo2V=QERip+4g$)_H_L)9>rEIpLs%MH<7q^>5+ITu(B-{twBF? z1!u@}-O269HUsNB{>#bl%)q`|ZQ8Ns^p^bEeIL;XwP_t*MRGSuJmg zV<*@m9~T0~wCD4$)#M{3>)l^}Ej#EXKm1)ar~Q7XV)g$geBm%S0te_d4^+?wTSW)p znW;HmngI8)!zbEY>_%jMWXRRY-eilzJyQ2c3rjz>^2I3Rw$WV#?$J!6{e}&3+N?0A z{=Ws9-#dWD_$gJ3+Jcv-ToE6j5PHo34XS`>wy12Z=n{0zRfGDStwCxDZ7TP`{N0e1m4K!paN5w`~B8Ul7zUqyh~oDV2x4EK9}VA6{v z9z4aKM|JUHrFSmL%E7Z~Ysg?S!!4zZe*CtV-u3d|Fp^b<4LJA&_+mhE0dQXcY9L$m z32}jofS%?G+;$X+e@NNT&rm^(n}^NlF}%?xr#adhHcE@gQ4bq0DWN!*j%5wW?vm(< z+?Q$YbYLyuP+7I|ffA5NH>|=1qEeWZs3{FDl)|Y%CqERnvg-U>@tU)~jx6A|+hnjh zHPWhSc=PM-(1c}aJbm>34;yw?UVQ8j5IR(RJZ)?*rs&UE4Lm#eLaip#T)8jG zSbD>f#MbkbX2=$wl^%Cbnj*I6Aol)9S-v!RZ#Scv4+%`>0;tVAf)OlbE-d+2m6`vC zh_mz3Sxkk(_{9HOdEmJ82sUpo?w~K;7gN7Q=cGKuflxCd5u5w!i zM1W8uc&p0u>{^bgtX0H8oGM8M;R9u2(1i2zUjjMVC9osLNZ%be@xXQ)3lh%>v=AGG zJC8dJeK*Gqpk?i8Tw~d@(&dMhy6^m52F$f1Y0nO^A`2RSN=u~6xaj+_a*#P1_Qc2IRx|WK zXU1|IG19p4D^kQ0T`Q#6{0t6PmuE=$0U@ag$cH(TzazxvpIcJ&Q`1USE=&jI!mXqt z=kVuVjbDiV4nO3q8R-%n3-qk`uOJu3w)x-tbC>Q~lcQJ@ON;D&7?SfvKN)GqUJpSbK z>5m3&+?s%4y|^pOeOP8j9|dtT7jA9dZdZcrt2|0Vh^vGc2MS3cg)*1H~DtRu)nT{)C|C$Mqoq{S^_ot3R6 z!Rrs{g_q?O-BW^X+e~>CWo4Do@89Y!v}vZJaqrqmhS5(Zr<2%=X6G*iD64n@?rph=+ zyO;#jx{3K66Tc&wEm-yF;r3!Dr*oCBYws0&_gm^Vd{}AvuS&*lE0HQPC?xSn4Kx+9 zf+(*-h;Qyl0If-9c-eIk^=LIyi#_<2h?vFpj}7%nIX~StVw4|2%(r_Q*w-~-rJWTf z?b%7R$PjrPeYTg24h3i4afp-+3aXA%H~FO0%j`qxd{RPIOU+}7En>&B4VvzE?6-;J zE6)h!$)Za9v(b7(Du~qrh2@3Ucsjc(R?2leH+y1PIn+?g{|o8 z);a=XJ|5YsHrj|wG1v|_l1G6tXFIbgWhYuL047Jb(~A`vhqp7kvCR(xs&9F>7qA1h zXW+J$!ltipLj180pS1^FwCz+HM8ghDWb-JCIcV4LLvF21Rn~pgD4mza9u*xG{toLj zXWN>+vB-v24cLV}9n&B`^WC5zr4^8q`dKnvp;63;skxo%L!Urxc5q>V&-i7~M3_<- zxi_>VElP2B|Qja;@JgG3^CI^g6Ro8=WZ4cw(Z~<;A)c!JR1MnlQd*o@= zfKeLLT`|vD#n>=r9eMx6MbLR4AHo|C^{LSis>|BxMQRqvGhefAr|OhhnyUUzpGXp` z9{rs8@(!n>V=7W?B=PNMav#$SY_8b@67r_R(_+OSy8stnQVZr8!wG!xX-PjMo z3|rjG)iUODFfX+u-bRu8ib4=7w49Kw?KKM(xUh8C)qzyaeoU9XgKCPBXa5;tOIn_D z>9}=n&O@@9NrYm-JBwK|4Am$QQeVWwieRz)HSQ zgEjcPUzlnvNclxa_KgzK7k1w`4!ZWqawsUdQ zATn3d+eqq+I$orqWA6;69t#NE=ma43PzB+2X{MCMtSG?+eTP6dHCk(qq=umPA#Fuq zTE|R0b*n@TN%MbV&!?x7>2%=&lGiB1!nrYg&WgJJM9X&nsre$p#S4tJJ=%RJO*v>k zRt|YB)bR$t!5c9dX({AK6qP1lt(sEgF_W|6Z?u`IJ6b%b#yfxZa?EQE z!zAWMdQ80-|1kd46f#tj#E5KTnMz_zl58VmM%fce;Y^t9Nt*0sn;1hS+4mST zQ+Amt#*tb2eY@`a_1w$vy07QH?%(sA&vXA?*I$^|%wdk>_#U6{XL&Ddz$2&Ltux7n z7`*$wtEQFJfUSsU%Wx8p_}YEkS8{q~KmGuXmbDoQW5%~{_CeWnSJsO=)%6jM-*a)B zO}%06krN&U+O(^G!i0sSYanS9t@mZYq21NtEtt(fa@S)5n@JKZjO1xU9a^W zVh#vgaoXvTEU8PDO7_2`_IwIwSM*{%xKg47HcINGw&so5ldo!eb&TPlSsrnJsosIr z+KPbo_|DmGtb95QonULVIO!mjrqTo2WYbfYV+}pQiFn@D`Tb18NWEe!9U-0FGt|ZU zUKyf#`N}QbaFV^^+K61d!|^V{LFTCLz$pOviP6r~GwG~|Ul*yXZ=aAmL@S(5_sLNI zM#s5(_^v6bI}yu_lMQ$xlN?_h=jl%(R|Orx(?xcbqEgsi+Os<#n2seQX)tb_$-l_0b<< zlMG0mB#;emX1t_={3EJC3{qfj*JF9k`|vHHkH{FzZ-(MeRhLumjdu7IPCK}%_iEQV z#~%&6Qaup(6DEZZr47s0B&I1>QgAQZcK6lMWgVe`d zz%e2rAfyHDV;3Xty2+Js6y^>1`uG_(6V(a;trjon?T+;P~?JS%ZP_3lGr&1fX;gV$W)#o4BaTX%#)IC|)B@1}v%(r(p zQa?Jlb%pN)8ppdpk`AJKJb4=lps?;>wo!c&FuZ=@L(w6DX%$DUS%_>QZV|QiYAd8$ zDAfls&URNG!!_#}A^LW4I{U!&{LNdW%q8gijO8*%Q6Rp@lmra9$Jl|Y6_6x+BM}vP zEsV(2F)86A64oMV$bn_(rqxwhBvNz6%UD2hsv^bGvcB+3|29WklbGQ6EWmHDs%)iC zF|w$)vA@`=-a_;>#p~$d7h4R^Pd&HXZ<}cptn3~9Fn8~W!q=;lvRdmEV(Y=TQ1I5s zRQW19Tk*JNlDA#k?}45%ZTS{58+6v|?Cqx6wwNjCy|e3_i0QtZ!tL8_leN<>WlIyc z*#6Ljoovo)*)Pv23VV8P_2d*vgJEF3-9d>g&{oQp8 z`|?e{2vPSs-PBy|RV_z=c5NQmypS5Q8=If%8T1GwD0MRQ3H*Wt0ZI3iDRMwKM3*h= zS30Ml2k|C7Z~BXlhV~RWtlFSs-kYX9bH?S{!@N&lzQ4YEaa+)S?ZrDK$K>M-`0q5Y zKFmlKXqGNj5_9|z4RW9fHaR2y%>%?%$Hb$^@iGzzEv<^(f!HF_*|gm*9Led;(*Jx- zDq<0lMYM*`ZgUs-A|k);Oly=MXgJsfw2yava(2PjiWKBiysIVrB)*v|tcWMPJ@+~;%XrI=8}hJa z45Y6lI#nHF}jGG&mR}4thY94sZ+U_ip!12A%&?U1Zn|S8&83&EnZW-ph5}l~j zZ|J(!ZvSx$CScp_+m>0HTama`x-T3l4K{{UFfOhIhJU<`0H~VO1jfTQS|hbgT{0^Z zK|kn9I0%_>b0$q7kk7lH1fl~0$K1_21JsY&YwkHpS)5+N#XFd$TV$Sbel7NxtZNOg ztyq9|Afh~>a*i!q9LR=`@P?o^79yM~ZK%UlV+yHsvND!JaBRsxP(ZU)O;o2zT{7VMz)04M zs{~+wXw)i9qG31yTmh+@MG-K5iui$MU6CeG7L=c-SGIRVVpyJ$fo*_?z5B=4lSE+2 z5HYz{CAGYQZ<8J2zeBtq!MRaY{RE5QSWvfOSDO2|Gj6*fDktI;C(r-v{WiZBBeToO zyIv=D>;8D5hS}idwhNF$CY&8#s3BER#t1Y)158~s2uHwfE*H zY2Ci$)lJ)MwtKQwD?V7kDp$&I)+oZO>@6vdURhEH-q3NxItU160H7?2PlfblL=uZ# zqIojUsXw@k@SI<_lkd?ya%a41Dm<>*^I$^EXCQZ)Nos5z4xDU)51WZ3!uTHM1>TxW zcob#}0>vfAUiCo-c3%@Hu^BN4S%F$XRkxz$3CEs*q*#X-O zi^mi!`pWAZ26`slsJw7!rp7AY1sV>@J|$1$g3YzR3)HzT)^x>0r_cBMq|NLv7<7t% z2?o3#L(nzU5N&Ujja|TTQEXL(^@W-n+F3td*dG<=ysmzA`vP20o~kR_4-?Klfw!R` z%@{!rjR$hKG7m7SnDJ%m!>AxoTmVp`>ZDkvB{gr6Zl5*JwHf;Q~x zM$QW=SB1`yAjoi)J5E5PY=Id^_l4n6px1zJ_|BGensHO(bi{fD##{71cW%yFlRC^j zYvWWWeRj7!-W7&C4i{|dptn*4a)3y9ok&FlB4_uBPW-SI@56Ac0D3Lv7IHjObPzBw zDC-G+&^zvfW@LqSmSO{!m(BI&pX{Y9R5CVt6V>}#;|^3FGeA}L28M;6uj8gx>J}CO z6|ppj&7ay0&Rlu@Di|gJr*+$IG$t11W)dkFD4$KRNeb*Q0aDcBQ+uaq4JZ1CpA_z?#-L@?i62a z%PBWX%Bw9uto!)VhY#jgu2jaRUb0 zuw37!<4W&78h%p3g$H>mpM2f=a1o%+|Durpe>f@nbG4QJ?@5Y?VImN!EPaRAyjrfI z+cEBku_K)7Bw$xp;lM@R7MQH59h*+aW=!+Y(FrUmy=Fxv)2r7Cn}>(DPeneO5F7mTFsK0VeQTDkXCoAcEXw^RZ{`&aC{tt@)qu!*}Sv6c;& z5ul(<--~{elxUrLhf|#b9a3erseB0{Cct@n(998zWGj|3&H1#+p0)47m0MH;=1*95 zVYHLIexjc4hgT0`r#p*2md#yLmxyR99vYLTn`ymaaw}DoaEgvAo3i>;_UcIl_H}R{ z#NSmp74Q9tdpBQaQeodWN`}WZ@(1EOCnuon5R@K7#`;_=9CSAEaCi4yZC{ld6jZ7q z-=A95`Hbf4W+X;?X~%OXWt-({lDtf59ul#Vv7pBOx#TJ>OFF{t>`l?opWt)`$dCw>+&FB5ug6%+;N zC@9fJHAsn5ixQ@Y6hz&mGDMuw7B9tZV;eZ)Er7A}jzxkTfjgFI#-Zt&H8Gi?Ak(#> zR`+Vx?>W7*J2_0U^}CmZyJz7bKGoSm(tL~9;(|!xS2{Pq0mAMvkJ8{Kh17{g8 zUljS~OdD@o5Xjr|WT4j|&TkYZ2T;PPYwAonY?|`#r4W65A=cI#BO6GP@oG@=QJ@Wr z->s+}aa;iE)o>;6nDeRZW_vB1q}Q|;haS*9)-)~aA5Kgq=x|1tBO12j#hUapsvgX^ z$#EQ_fb$vZqH6bGdsz0Ay`smbDw|IxIKSO_4~F0JzM*`44>z65yRD$9816f}vN4`j zB8ld-#%ULC_op?ebO&mRW4d%KyeI*{cdK7xqG!of{akq_TB&yX@fKF7MJ+ZZCu;4&owJ_0D!a;DEz%{;|36_n%z>Zyvf_4|qZ{@_(spSrLE8EChO!F{7$-4=TJBVNA z-G-dF&b&?`u)e^<#|LOsX=H3Aa){3h?Flr+#HvnL^WNn^)nHraeEv2q;UA2>UbR9= zAb1pnq)Ms*1z#`q8{>#PWK~cc-y6qRAMjXf@I(tj3~a0khW>eKaSV zk~T)VwZze}l}VC>YOR%39=oMAeIKl=xojVSs_HwLcK=ML=@IeoY;KZBA_m;b_s}f= ztY96mTL|ScL=tR0`ohph)p%xQe@Za-u_Z?!pupE z8mS3KdLS=RBuI^p1SD6Xc^OmP+6)?vlC22I3633|8d<>@nxVDYXQ@|mrw6o;YJxGX zp)F^jk!Q*L!dWElxPAGyh&451JDq=ZSS}q8Gx7nKEE2es0XHunVIJs-1rupxWew^T z@`zd^+9xe$$p_ag@9&BiV=2cJ431%k{h<^MZOkT@f`W9*W}-s>u0BHp76T5YHQBFR zdqB!@u6iIx9~5VG2;?nzcEZ;h$V>IUylC)$9*FuIeuKy$ff2{YyfIoQ1U)GNyz_Ar zV}mwn8@{bMbu&b3(f{%VG|**2*hpDYQyqBn;^r3lII`5I?FS@B-4?8RH5O;F8j~x= z1c&M5+1GAJA9jNY)D}{2xO>ihh~t~ zB+Y@uY>iZ>0L7^p7_V+b++GH#SAqjG!2!*^jF8olx;A?f5*D-uGUw$x8Xd-DYP%!B z2$Ep9V3f~x-7{zE@w{v>t-8$ewAccC#DI4aa^T+}b|~u$Ga9sLVTkCui&FVxk110V zTLeIsKyGT_H$E-v0B3PPx^2BT+VmS-ckD9G(!l~q!&;gEAt-~vKg2jEm}zt1#*42N z(Hj)2APK&ns2d2TwRGHaQ%(#+FtbXzHhsBVz1sxCp0`&oh9WoN(R|0AT|GH=mPi^% zF{KR+vYqPmuicSvSvNob2I&eas|*GV=|j3Ll7os_sF7M;e3u9*xD1uErPP}5)0wtK z@HGUzF70|HzCJ<%Z!^X=;}kq_zqq@(9OE*C4{prb-n03~-0uZN#)NozRL4Tiv7S;~DcDlrn?dc3NtyQu@I z7)jzjv*0k_!olekow4%5F(^n&8jVd46BXy|790fbRIwrnERagCpnzWehKi0P{mCeS zCHDpGjYzQqoHJrIU0WI6F9=(k;)A#oh}uB&&xLNXG&e3gI2SDrPINBD$@j6O3elS` z7{ezAc*(*<>NuF5B#kdLCxw$XHyi^I@yH$s%hj_K(FW!#@J0u{Rz|P~y{m4PlsS9* z5jMQSS==`wV<%3>A2@&d(N3E68uCyz&lo&fpilS#BSG9E5Qj+IS&;-CL4!xFcvQz! zSh!lj@9H1e6$cBz5 z#L$piaVuodjNS&RxEURSOu6pHWz`M+z>l=ii+AIZ&+hd1J`ObfX3Q1LCNKNp7x#7M zhJ%uxj(7*@rzRD;8n62CC|3Yl?-40PMXw;lhJd65QYoBXuiw&t%-}54&R$n%qINgu zWx25Zp=cK$b2|VtuX)MBeN#hSEr)XF#n&sh3hMiH)L*OB*TRH`;0(hEtA%Am2_SQ( zxjbLbw;B?4Q{_df_@=hzcX}~%S^vap6~h8WwK#%t&g8nSIYJ=}OnPn)kQ$GP4uku58hM2j zgIG%xhaNLyq6p`h@WgaP^E_xtv#QEj^>7oiNsr+wu0m7!u;VEHL?iKNuSXxlnj7?3 zTX$K}weo>eW0(pfkLx`02}Y!1`xiixfj(`QS6Ny<;$4VRIMDQX4y>GSv5J#M zzO$C+vbjpkyoL>-n6-m~)!hPzLg*QzRR!Ixd<^BPKS|RP1KekK-CN3)8%og(m$#tL zd?6*ZCXC@b|A?cQR`{tD0dJo@nB#+_~X`}De0ANvXtU4r?kTB1fj#EQ2k%a!!X zY;xy6L1M-3E1I3|vM?yi+5gSykML*WM^*2MZ%5sH^ih_npQ4((-tXb$p1It0YWvI^ z;#5aSpT!pfN#SZU{~D|FuTB?{f~m&Lp@8XR5L7;%l<-RncAmu;LN*gOb9QiaOU4n9 z=>bD`4!d=gj?mqun!Ayj=ZcTT2U)tk3k>d=XV`JHVkexv9y+@Xsp}sRe|u9)AYBXW zmve~w35Wgyom$L+8^WO4MTK>uQIj+MtHCdFmV{{cbHf>4761x|K~tq z@$aAmDt?0WURMC3w&zg}=w(=gOl5Hu{~%umaF`#7-U88wtE5%PB|E%LxKYMmuwQ>S zNceB6sr|XiBme3K|H}mAKmX$YYgW?s>bBC;J(aH$_Ef*Vb6PFhq~_?wM~X+?1COpz zJ+=Q$qDkw~l9$k(!83~o9lgtQV`=ttCqRA|+3ereLzi+1O&ToHq zA3mIazM|gx!HFFy1=SJ4Kn`wtO%qY|@~aI8JRgnUaMR8`p1Gkzt<@nv!g0>>$kUGP z53DMRdgx59r9Q(h?@N5+yN?NIxA(H8N0K5x9IAozll>x}T^zY4w=3iIC0>FBZtE%D z1ol*DLFHW{381W316JA$JbU`>3eiruE*-1ggF4?+gR2Q6y_ihh{rh^@4VAa=@8=qn z4nMu)^hee$I}=s1@u9G1o;I@C`%}~uuiv?5oAGd`Z1s(Y&y!u9gWQb6aC{?CM*}ea zie040L)B+$x&-REt|q{%XU5tb7eX0Bzb$uaT$Ay>{J6u*tyga3 z?(1s0xc8&o^}fqoN6N)HtSvqCwY?p>(mQarCq-ZObHT&De0vMWdX0O1-A3jyyUuWW{c87W;UTZzJYraN7XIhA%J^AvK=-WTgN z%9<}df%o`Aca%gDpw>FRJbx67?x+AqCRp4H1DdGWKpBU$s734{LpT8fwD*q#s@*@{ z{f}$%k9G3j`)NR`P{#owf`cXv0UOL!{2f@#Z=yBR$gR)oupZpUjYOk#U{9lN%n?Ng zAmLNW+C5tLwJJIkXP5T?2#U!CpZWIiJEv2Fu$=8#@0m zK|Ikh&)!_+zL7lM&h4ml_~n2Lb}8W=7fb+;TWmW0e*p0R)B4yT!TK8#2LHXti)_Xw zq-Rn%LOw7Qm*(n zN1D4HT`(_9HBE4H@Psju%s5b#Ay>3XLx3E*L92-(^7s|DqX9wUCu|BJimo69zf;#! ze>?<4?JZ;;C}VTYgnxkh-Xz!$nC>n4DHcH7Mdy&4&huT)pLiTlG>>Y}wd!|FVOMdF3}% zd^qw63Q~a!WXR0^B!J?;i_82WOJa$Es+)8LFqqrQ8hawumg&i9^#TkpgvS`H5|5k+Fznb{}(~CqW(HNEEh#Kq> zNi2l;jRQ++Wi_sNe8VB#`?%-RfYVFd?v;#F)=X`+Nj%lPuvB7uF6|5_DO!7^ta!%* zNA7C<=lxeK>a|S!A}wWL#BJ1h@~_zy2u9Tg)8tZe(<;8Ks6Blaw3_58q|hfA#?*kg z=g1|fo#Is`+Sz}5d$KA6Rww3LJlzvgwVYnrtTbwD^zN6c@AwF6h|K(2Mh191jM@3 z)3RdYZ3CY;z!*OYXz&zr3XW5^x`nCy_!QMfs$z#;BymWV`JT8lFp7hD=Vv_ z4MK4^e790)DE^##^tzo1_D8bs=s2ve9)@hB#?N|AxJ;#$MZY3R;9olLtnD{K z;08=i4UTjn!7=Uv&u5eqR2nhZZo?LB z)(jdpd~0|(dp3AI>{bGd=tOFKg)9}7a31iSxTtwwuD((m<1V$zzCmxvp{tS60LkYT7^qDDjMfM&cn`0-xFKW{;zcv|Bhh!pD=a! zZw1-@4;P;_%258LrN6(nJ&(sVKSpo<2Cy=#_g+rL=HGknXDaL6p_=$U_Ks7mwIz(S z9|;;E0{Ihc_`D9+9tEil2sTTi`iQE8bKJAb4cob`^U}<<2^GT4Eei5kI}nnUKelU^ zAWn5B+ah9(j?AP54IfC7_T(7$xVd<7gR-v?u%RWbIJTs-fr0bE@%V6f zE0)A$VK~;PVKtsPShj|uL35VWfdxRcc=Q6rRdMOH`66&FA{cZB6iWiC1fJFm*Xe)2 z;#1ZOw95w;$x&Gb)t6&h4nmWb`q9}X-ug|m3QrJIrcBC4FSm`HiCatra3{a15KYj` znReU8`5ISva!1c%nBP&$(I zaR0Btzcp}MAAg3ZCIK7nqoUEVLy$r)VFNcUX`#Y$Grv`2z)89tu&^33YehSRYiBB$ zb`ufP=sO9P4(Cc=*VfjQn)Pni^7Zw-QvdkCswKEU-%}*X{0TF^3i9JF{&65OwiP7L z|9JHu*Wn-Q;Xn6WAaq3o)KaoQ0nR%j%pmS1c8}?U00~T-nnf~@LjhK?p!5l3!PaY4 z<8I8|SjzH|Y2ug_94KYhAGSP14|(DI$%0qR58!Jbmc8;2ScxryCdle|h{{E<0rW44 z=Lk`_{&fAAgoa)C!bUU;|A2n8I(hiTYpobTDBzT0mxnVtTQ`r+Xy#2-NrZ*%dYpOu z%kKKRWarltsn+nWe^H(Iw;7@s+jnG^cdno0Lmu*Enaz#l5H54o-aID9n&*s+;=j|- zwzx8q$8=3!V*WF{zf)(<071*I1n=KmW#5BU?0+O4yGW{Zb_of#bsbcyKeQ;8EDR@& z1O3DD>R-SQ8v+tKn7#c39o?(zPdEcrvm`^E3`4joj#bC)eQds|s^{ZxHaYE4R^~+i zF`esmuT8ZXgL6o4sC)(sn>XGj1H`QT)tX0q@6H3=bfC~TSi!Mers0itChYGcpbghN zoka)7ey!WRr>wN(^NI|v#x<*0D)O7TRLP8`PEON68r2K7;Pr1$`hVYFe>0u_bASS* z-Jr#0PWgd~5y+K`X3azuY7t&yE60Edx!Dv_@>){N;)pa0DIa34!P~{2TOr=x_Evdq z@7Hsx_g{DGdh zAWkIL?+~3))RU-~a{7J3*`~V8-EN2Tk0bkA zuU2f!CLSI>pd6_*!l)NcB7ssj7j3S`ik843M-8EUiLKRNN@ctrPaLS{v3&^e#8hu zp15m6I-Vq{G2d)RBBr$;2`v7R-xNoseuXc^DE)*Paez!XNb$6eEi>Wswt3L3T?MB# zKH#mP&I`udsuhp<=L-koi(DI42YJqKebw_~(*4V@nZ|(isCleuuij>p)nE!X_PbE_ zFj4HiAK_fn(M;l=w>MU9P#%q!9>Lpu{PsarZpPiAubVdEct+Z-N5Ljxo6c1o^?tnf ztDAwdolC!O{d7=A#Y}q5spq5Fduw|#vbqMwBJ(Q(lYK1;*zHSn(vB?7Klw8MhbirU zFs3J3fU5_TE567gssCgw@omNJTe+f-v3CoT-UB z3z7JmMvrvkF0u@yF70K|^v6=h4d`j-IW1|q771>Q)-3H_PcGp5Hl4tFO0`Dlcu{u1 zM0fwRaPrR;)!&RNe-2#ZuSb=IFp^UpSH8Mi+hbB>RE)slnGC0~!D zUiLCNZ+hNZ&KR?t8}rQtvGKB#kx5#2!Zvx!)bVehbdKznExN#2oH}JS@{y3Sq`I*0 zqfN%+hZ*@lQqal8Hi1_b_CI*|S@3z`%a5@BL+XP&CIz;v4oM)i&|7EAMoLc+WjG&` zR@L+oOjwrottYzzMy?qoTsdGTaO-h+)i>Qj~>Q~&Rcq0X#ujX2P-ZRwESElycA}J%8Q#cFrq&-&4_+b0u*p>vg`Ud2H#* z_>tFp>5E$1fg%rR&@9~%J)>JM%*aejM^my!>N}oYOjr8j(yQv!qvKgeCT`;|J6%lQ zdfw26zJGJF*{(x%H+JryIk9g?1g7A)U(uG2o9{A@6g6G#?NwHgxuDH`lQFS2?nPnS~unptIm0k8&&J;=P^C^zq)$N9Jl}B-jArdLaRezH4O~Eo;Ho^t*Skz@0)HmENfGL?nmF|4Ty1r zQ728;WDZ8ztxksRX|Vnzcmn-uDuSWGZAZZ_Zq(#0f&(9o2gF89kFRQJAL0#B3LE1t1$5UtLA&RQlG$IBRzeG$viwj)yt_hBZpd9 zlMy0Kyw&i$w(ty}rqHd!i+bAj+o)*)KFzpy$4jn4w9Zi<*8@2{gcJGLv~FkpPrF}7 z-5(TVJpB)nj*a4P7;+5?1Zzbmfat2dK}w592g2u)HY3FFG}@S^GNKRlDB-Y+=uIzp zIn(%u3<-D^Wlb(0BF=sedjhO1fD5kLoI9T=kPy~hfa;Nr>kI=okXF~-pu!1Y`XNyr z^SokhSXlB8npabYb3bwPVuBHA@lnF#&#j`Q@4B7vs;Phf-Qe#?&;jzVs7?PBwch|t zWi?Ux*O+BB*h(K-6I(GnHHiHaX1~y^4-sF-C8$M!qOSd|_^)`n6@6F6SNp^@E`Hq#E6T3-H$@CfR7yPgp1DpQixPO(3^1 z5pL+tyDLoAXiyP|L2cvk+h8LR>?7SZIt+QTQ5ysYq4G&6!VR&p&j0z8Lcg*i_g41- zod?^eIxDgBz8~<|^Cu?Wlo6F!MiQ25S&H&P)Ee=`U|<* zS-*1)$99mS8-5!otPSn&uhY!O=@q(hDT%vl{YimbMC*4;lH62P4QDcjp!IabW#;BR z@kLiKeEPwO5k(;cK!@@tEbW)5v=Lz^U}Bm5gsF?vz@FF+rcNE?b)+efn}B9FNbZ05 zgA)7jWD{aVmh_FvB8gmnDOdmFK>xe0k@8l)F+r1~cH2;?z`J2fC$(JbtD&W5VLOtS zRT|px(N*2)buvQTy7xe~YY*C)`bX*W+_vEt(wQz|pZ`As%>Kv6|K>ja=fJ@J`ab@3 zJxH0wzHy$wC)`Dng=~tnE8#LE#UVTPRk~_1&%43nTNT`(WKwIXAGQo@36wp z*?Zo7bE)*e9lw>2YOBJ5N(}zI@6dA($DYqC?G9SS%o)G)2jy~dX8rj(#QWBFb10Pk z5A(iu^YzENjLN!^h})o6Yl~kv{>ua#PDk^0trlswu?noJzxS~3 zpc3s{#fJB3E2asBF$*fTyUW|$gA50$)=fe=LpbY&Vs;$!?4<6C;!a?O$a1MYMw=)t zaiX6&Ous0Me7X6$&wdE4{kvQa#j-{xY{kXb;DcAccl{A{GL$(f95s+QrWl2G+wUVRE*GwC=b;H|I>vi&7!$gBo} zh8uqg8d^~RK7daCp`Qh+!=X29Euq(UmBp)YekxqlcpZdyZ}D+)!M0GoE#V~O#}OOv zn7hTf#EfBK1U1g>?AOOQn(Jx z>Ih(KM`^t||KVI4Wz=QHNV9WkAE+bXpJskT%~3aCXFc+;!vy>5kDVwgS-dH+8N^U6 zPg@9D440j$cMcIhk~ac0QPePszOZ>ZON;9pDLCdAh={zvUHhn@Dmg7nu~RJ-hmsur zH1yDE(MM)3UDoDgvLiOtbn?5gQ8367!by#`zhr$NwH&+wjUk=QO=|YNOWG_|4Al0N zIOS)m2aI+=(JkL?(#)y{)S|zAqoKOBEfOwC1cVvk9zP1S^tVf#Xr%$({{iwdNfR}#$>+>^YMS-old+z>3t7IoDiJmPCViaFeynywHf&V{TB)2AV(-{ zP3up0cCMeuFTgt(H*lret1`Z~$rZbdp$84^6STP_k1pSRxBryIQ*q`VfYcG+L|6;O zaoU=|MBt4tP-#tvmbiuV?@%g>!3%`2ObcH*2yj6aIQ>QPiDQSkUx4NchORrN+@(`H znI*MWNnOg{;T6tSH@jfECagU{*eTviRY4dH3*u17lqN+mdG+LP`qRJ zt_ZW+nm6`Mqz@J1vnyXG+nJu%SoNcW>nYl}0}2^`U74gr&=KAt;gEO9=>el%T=7WXYZIE=(tXNf`>o+4 zh@m9jLkPup6>J2WRT3N&`qhAuGU0g73t#P`KuO)6z=T75%iaTvVg9rg-X9D5dY0P7 z&^-%_C1*zWys8YDxBQqHLMm`^!i>TQmVAG(SX`-B7=sb5M%y6ukZge|#OIqq<#WEd zg5My8>=P{VHm8vpdrEM=E0ok;Q&rWCtQ@yXv*^}5MPU{5o#vMefKXl1-rlETljK_r zjw>a~XfM<;&Iku&ow*MFDPoh+F(~DFDhYl7+!d4rV+v5rcMJQLN}^K=&5LJhP|=l{ zd&1BxUcY46nPnP@6F)rqzG^rv_IbLq15lYAdIE37jHrkt?Lb_9Pg2MgJ{BOxDe;I( zv-w?$j+LlLO!-2LO!G=o)WT+Z;FiUX%`=w2OO{w%X_;|_)6k^2rG!1h`_u;J0wP0A z0#>yV+ZY_QrV`r>^+At4YywO~Id{{B#~0dn_aamV%SxFBDR^lcH@Ce0;n zB3EzLc&sLrpCl)TBVQ&BvUxc*!WYENqXgSJNSa&D3r-zqQS6AMGJk`9F!vd5WykfL ze%VEdiM`OhTchL2jGBXPmBORu0sTM$;RkZBcnom_!#M|30WK{l;I*23`MX8^qzZFj zTc>4sQin0bVE)1Fk76#AdEdZNhm3aA=_il}1F-^Xl?kqNd8o$X)#ddtE(KCGn@94M zh&>`X;S+)>f2ZgJx^S)y9wSo4+GFG@!f;Jla85xXi>l~-Gbo!-Qz@*DtgOdg)4H0X zrRj?}kHfvawmbQtbXy8ch~;Xia zcO|Rzxp!LihI>xaE7!<4ztGUY!+G|+&Nhv^PoH~<{aEayjx&MpZ*6myW)ja{81u_3 zez=vH6kho_Q=LxU(x8MFx4}r5_+Yo$SQJn9_owT{R<64`VLty=V;rc9+Gp>6$aQrY z8q#fE)uhH#e~IFIDYOzyE8@4nuqz$ToT&ChGFO3_c6ZKaF$p0+QhVE%dDi`N z8Vfp_hrV0%_nf6ZHXYEmw6*D8h|2r|*y%GXK}7|Qu#lvA5HHs_3(2I5jA1RXB?`IK zr@8iz73d2R-bB3ztc2RB_;KIgpkTAw0nJKDs*IW6(^i(!qf3fXAMOoJp-&Zs-26T~ z>^L%tL2WOG5yPNb(MDa4!mY~fGA+s)!U%B6)D6TBS$q2CDi)-OU2gk04gwXlR-DGd zqsvwO+EOa^Wf8`GCD-OwuT+t-p6nli7VSnyMz7ki%&Mzlq6m?a;3UVB&WI7MhYC5f zu9ZMCOSB8`e4JYtAvkfl+|1`WXw|3tG?e6F^7Iax1YaGHcd)LjO1tMgHq<_W9&>K= zmwov}9QRM$B=OHwfdWc&&V+yICv1>BP7*p(;lEIwx8+Ii_afaXd>Mh?PgrpIN^Z z&x8}#i+ULxaI`l`L!qowWX>dSKj$jBT73TS?j?ZtGt|q$@2s@6V49}I2Np|?ilbhV zHsLRgEjvTxXRRSmO^#8c8n&2(_>}zdrQ5kx8POZPNo-~TQk|RwrQ;elpu@Z4)kN!0 zfIp52pBj2_444xsc~BWrirBOwUkq$5l~p?h&df0PQ{4fWErM;F`4zC=2WERYpkLf) zg<=I4#tuV0VI_FyvsK)yH#e4h`cj%#34OqMD_gKTk}zu;6rfoq3b5y>vUO79(Lp;POOd@FEztOQQ)EX%MG}r1@ zpz7|ET_ZTf?O%9ggJ!3bMRIKw9%>n8~Vu&>DRe$4O!?}4xqIi8Va1Xc2F1+HAP z7E0_2X5NFV{$`jIPjD7K78c@|nh0iLqmTR9PPVA+WlxEy)w2kSLD$6x!1W$K?WnOo z9vhD6a=2rNVIsw*58*`GT(cO$&TLa{`R-Y_z5R;}`1HX~E>&$_s=a}@$Ija6%$B#i zuo3SW8s078$TZRrka-VbB>|lULIWT%G@h-X4zqau?;wmWRDU`%o zilW>FoU5t+2Mn*E6{J9=!74>3vRW~t3jL+`x^tV%sqUj5PvA4p!VRt1ycA(Zi6P!d z-c>+?)NtG*pOj2E6Kodc)&(xJR^hkK5%@ZDidL-Do0?sNjl#@{uqUynH)lRyHCywd zB!3EhCA$Qdo)Vpqi`5NQOe`+;u1;MEdQ>pY^V41q9*z=zx;H#DH2fw>hu+>?N?U!p z`}E8$aN7$VgtRH!GfAU>0yD2dxT0 z|LV8d;u`_wIixE|^YHm|!94iOh+y6W*mtmpgo4Nf_Kn6j$W0^j3Il8~{fa==>WC4`k zD%yNZQGi{pE@R9hyt_Sa_rZ__zTyNM$b-At88Si$OgU3rZd!S3>1aO*ZmyX|r7>uTI#)BP=uElgtrU^O8&;X6zbnNK8u1Sc!=C#(P|Z7u%h zzrsTPE5z8p?IZK&Fcbb~J%v=HH+67foH`#d&WB419J(p&UhS<~2M8)`g=)4C#uS3wmtmlcV>p{$6jiRiQhna7@V=E9#J2%1@c6Ol0kWP)*9}U$k$GsnHzl zI5x$$>g^o4ohP*4&p24yHI$$Oifu9#XHs^@HnF@MXnnkZg6WpZ{W|VL5v>l5mLKn? zSj@H`W>XqT2{qB#`s|4HmY)=iW|5u9`n2uv6@lL%(u45c2SSfr?mSqy70p%kvqfj@~Ibxz6yY zOcz7Xr7bQ&GI}UrpqGEnv@O*>18r{;@J!umvqOCa)@L93tmq!q9hDS}sbI`?aVTu7 z{EN8!*(~+0h50)3ir8wvH^c3E(W19y#;J z-D5|4i}VBbcSp`xov}4?-X613u)k`%-rgpQl=>fPaz_}s-iOXuI_W+(zZ!N7lbaDU zpVmbFv6~@9oipJCi*knpNiibOJ$~2A)e?Oa-%6S_dxXfnK>GBO#FGRMagQ}J6$NQ! z_yc4=#|D&>DjjzSCkYF`71Bvkr+A6F^hrF4z1(DJM$dbDgF3BmRQ!a#W8|C0-u8QM z9405v39ohnsP%i~TxNkIv5U;PL7u0uJ7T+Z8yMw!P3zh6 z?RtJqu3PCeD8!{9{`p(u3A#9={i2aXySj|iK&JIKHu;_j|(nk;cP#H#SX$);aw_5k5Cqaf<&N`sHO^cH7m@ zKz@Y)Dzd)zYy5K{(>- z@EP<%+yaYp?Gg6Hlz!!#m}W)InCOad%{348PsNXG>K(5*S%G^T1mm9o?Fk^*zaJrx z#qs>=A!*ucL>op_A1c&aBWJ`qYDeZH?E2$+V(+a`1e;oMwp?vI+=7)Ly08D@c>BL3i)+y0$ zNSuF~I5?*5!3}Qx2}{6KIK&eUv&gYGX(mPZL?1EJPQp&Mgas7ky1zf4kKA9X5Grh`C}CnKgQ?He4kRxyEztNR9NW5XwSi&LytO{ z7Ef;;J)u5blNaPL(0`vX^vAXC{%bpNj(A7ag0FLS@f|$OQ^=RgoS~?ATC2VJs?sBvm2vB&iJ4epwjaiif461 zlcbiE+K*=KJ9VoBU)5nZFz1+$({*@9J}QW!t0l$`ewY@ zI7J{c!ZYG{MpU$E>US@Ui_;KFxH2czBmIV0F1!itkRZTPP=;p;W?Y?{Yrf|D%XU{# zwm(J>Ym`V;m>47BMw_Nnk>yE(ZCtK9bD0+V*qGxmug-C>g8z3<(7-n*ynedpa@o%4Oa^ACOsZjzO?=9+WNF~?|A z7w8r$%wo|6Oo6TfKudtA;XtHqZ|J!Fr1b&WP1JyL|4mOMB|nDe32R-Pk_HJ<`i}c; zSTP57>9+ha%|o}{svcLy2PRUkM&m(`E*1)VIvI{CZ;)(gE8NrRP+%vb?@D&__qVXJ zYh@;hJwpS|ohvQ9c&_Jkp|z_4MO3-?fxKkH|6B36yr!k%9wl;Lm-!&%(iyr!lHg7T@1N z#Tf{<9k(Y}?=UP}rB=ILuU}$ARfVC+%`iboe~wMmqOE)g{IuEg#`Tm%y0aH&cD=2& z3%F0R_@G=C8nv6`C2=SJWPKvdT1If{G>&22FDMAsrp{WNi9XMks*!m9CZL)M_#o*KMXS zQ8o4o3aNepb)`V5EA)DI%f+EJL^VpRRUEqyXOcJWdE<-2<(PvQApj2aL4lmpS!W5= z+d%v@tj;R1E5o4r{dnzWiHX!lb{7Z4xu9f1g&#W}uP-GLbr4HIT}F-NbH+mEC9+$q z)ku8WMRKjv@!6WAordPyHl8-*1+S0Bc56x10fc)U^rk7jt*A<+ERjtn^n27Q3L-H} zEkTA(VqQ2XT?ccLfpeIvgMmb=!Sj}nes5Z788<@`HkcG6ubEr=%i5DmZ)3W(K_)Hj zXOr35P<@3qc5+zahCCf+u_pmQttf|l71<02(9Nfo4bV8@K3H%i*OTQacSBr1cSu0L zzlYeuUSJMnwg83DWNn4#Ug0*=!=l;vnY|@5jDCZCh+WjBz9g-*r_n~JShQM;6q`fOI|2Htm_K<)!Yjy zV7E&N_+0rpYq%Iy1vecKw3;ufp5TdM1(;@t>%1N5msm@WAlpReOti;Q1GqYI8k=ZU z0Ky&;-lAPa7ZMfg1=|*EGusRvcNBa28GRM&kU_fenZmTt}xS!s%-cs zw;ni(#9W5@ucL3-bH&uX+p$SJ*k2#;1cYQ+?o=~Jc9>8xDF^K!R&yqFU=tAKCDx#M zRDqdXI%Mt`AJLm61?J#AFecHccC*lAl02VSm3QyFRcTLp>8=cR;8}5@L#)|bY4I9& z!{^wB0O=CC_#|>#0)|!VyND|Y{*D*znpELVU_Jl^GL$G=kmAEBo}wjhCu>#icsGTG zxB`bBgK@USC-2Ty-;??0=<9tApnG|DD^Tfy2lA9 z?(ZXd%0WsICh-J5?<)|i=p7Z(FY#F0IKay0AmUv@0W8ZM0=gF1a-|grVB_|%?@~>! zzNqx5B+(V0^_9cqMBFiJ9?3LVZH#8z$$DJk*>(WHdbbE5U#a3Xvj=og)ex}eH$H+^ zXDzXqG7GNn3cKfQu7n@Y7{)9a)0*QX*V@2?ga8Bb3Pf|bS^Go&>C<7w!C5BKwKXHL zhM)2w0=k5=;G@C0FGDIRH=h$UfZsI_wj;+AsyWgnXT=|74`}Apg9y7|j9sy)RALtl zJ(`UHAJj8rj$kAX7s7X8k67Kh3}CZ>)Y!;L?cwiuiSe%U@J$?lHeR>MzK__ej1Pn~}@=NH-E`0Br)KL8$KRjrDug1rgem~}~cr2nc{k0C- zy3P8V+ujkjvMH>YUAURnVVnSxxp`L)AEV;%ktG*z2tg;Q~ci4yM?}^4XGZz8CKVZ zU7QLIwC4~>#-$lD9}1Of9_GF0@VoT)2bK=|S%nvCQM`-Kn3JkY{Ju_sBu-1JsUzlg zPMgHBwQ4o;5u4{05rY|b3hfKojbNe`2@K{^Ef0E;=DvKKOJfQr{jE>bKK?urs-tV~ zU|Q>TI`BgzUS(OP2Q495>BO6IPM+wqZTL1thcVJY?F3f z{%PNk)otdi?16#T+nUO8Q?gD$tD>M5$4}GaPZ+Oqe|+NchC{{4*AC-Zsm;d?d@C02 zI?5_IP-U%=ShV}rt%3vB?i_HwmA9eogNTxl+|=E)_e{;z8hT*!`sT2wPRR~45yd{O zwY>u?TL15BeV_qSgM}R-4cZ|4(f!OKscxqOQ0~{2;3&z%Lj4ANQe;e6Jve?Zk8_Z%0gVD7yFI#j{F%GQqXO!vkf*|~A(cZYcFHDwoWHw`BU^@oDqae0x z>Pyue(FKxXsNVh*eg&mdh?xkJ9HeNsJ;$zK@#X=Gvee$jQeWpv_|NMtEy%UR0S)T-sPfNolb1FV(z5HcOO$0b=rAQ0|tZfGQqyUAh?i5DUBxpR+aU zwn|-PcUi=R?v(0-AmoB8wP7y&qI9`H7H%i<$qSxh%5vR5JngD1#kV!-tz^>=z)%9! zQu9fn59Oi~D?&~-&LlEtq(bQBH4iR35A8+I%B~MPK42H4^Fe zCe4ucc&w(JWSK1S4$iFFv`?E7q91RNwC<*!s~qBmssd`jwWCNl+6H=8GLw$=pli)7 zMqNRw6+?ykOUAKt$+Z4ZxhZ6;2?`oG@P2~CDupLALPEA-9j=SJHsRvV$1O+OJMG3ZMl1JjAagP)j?b)R1;OXm*aCD5N`8 z?|FISlRi8ZU+eM9iXzs-rzuoWHgWE9K=RQWU75{L3seun!Z#_q8={~FZbDYCHc`69 z%FOXWpw>`S>7_~Y%mMw*GU~YXR{JK@uk>j~k{sdkRKLkE`x1daM85>=sQgU0Qvy!Y zAW6I#DHd5eAqhaj?XAxeSXTF};F+jn(#+$2kJ03b>XHhv zLF}JHD!2{zDQDe!?uphtvnUO)c=`)~vlv0QJ-mMV$TdzEXMZ_ONW2<&a(eKff;cOh zaEaG9E-vZ>TQm{N(wZEBeZ_AO8&RGuK-g1;%@}HU4AmPBh%v_535>|wV?9L?eZ;7m z&m(pz&YfXQ45>z^bSUZc1Mk4BPd7^fwTBoX)U=~=DA-WGCIgOq`s6U2HZ@FsuESRM zLA2XiJUJKivzuwy%*Ke@z0*3XI)~V{o$nt%@n?B?M>=jk@-=$j0Y34vz+9C(=a@&zpQ8dnWZwNe8SU}jBfNsK67fVBI7;!lp}))iSMSDMIscz*Oz zA-Ny*3Yzw&5mW|(vd$G?l5FF%qfQs-Mzs0VR`ncDZdZU%WlKKc5}{g4x)c(FqAun+ z)gC!5gyOJDQd@yD{6!qlPIU=Vy?;lTvns@7H+)2U@CsLN)&V4tF;at#B<{0&cDCJA z-(mZwju?{Fb#?-aa$N2ic;luZOw53gwwgVG0he4+YwAbfWREU525OOUSq%Wm-a>(p z%tg=OHs%tLwsG29*iALx3UV z#;Lu%<~r-IH$C*K=hNr^)<@Nby+3>5oVb(>bXbT<84jf8Kn^6<`DF@oaMVK4xa$+nO=JojXt^O|{qkqMQ<^&4R`0K^eb_l=5^6F( z2(Jy#41QqvM&d8`x<1?&jit3KRe%fivs>-zZ*r%B-{Wrc2XeBXf9HBjDd4k(gVY)J@V*9AP%D> z8U|0c0A@Y}5+{wJ$@YVg4XqOxL8&jOj};|rxRh}FUQ`!3{*vP=Y8%C>;#Un(iHOrM zJ7H+~Wg}iH5F4lPJVa&@v$2)$yJjYRdY#+f!)R)RVOZjOIr79H*kND5QawYv2wK1h zwtot6^#J%*y8gHV;$^H^Ye`GNsx06kgP%TfrfylAWz zy2xWZ^WlrrL3e@)YB+f&V&~9%>ZgR+3cI_!F)=sWPV)rxXmY#zh3(jS;7Qf^d-xcD z>XmC1MB!pWFT_vgRVGY2^HY6?l@e+CL36xD`U0v)mr*5CE7(U5hGJ%(m=Tk)3-DUh zlR&$m+eKT`+}3T@Kp_4^+~FN`tk0G}0xJjXq1s zld4v~jOy}k9$Ty=S^|TXvQ#6SPnT*M3~Na>9ubh0BaI^ZFA{alLZr4MGU{0)a)4t}q9?*`w#K zh%u2q%;+PED`Csly)=C)R+d|5d4xi@?KU3_7tnG_DW2Q`x9++%RBOcLLEv0K6Z1!) ziqv9(*X^33S|7W^rCe3j96s2?*q!3^xLQtn0(eaF?=o2Q2{%|rz?rxr*$34>iufki zN>1g?!x`MC8n@SN@T4krn<*jbdC00BjA~#+OO+8IN|RGIQFxxQ_T?;XHl1oP971vs zy$2%X%O71u?fah-D0sa_i5KlH5#GT;)^|@ePxUG6kX?ZSvEUX{f=H=a0`^1^Y|T&0 z=W{g1>z1<-BUuvM!X+nUozz0jMT92uk}>sQZP$TG;K4PooSGrdg5oN(cR%dBrZCJ# zequ~Klh{i^{u)aq@bB;xQDfShZ*sbLP3mz`S+yj%jnBKtTT-V6LR39gP3onU%sw%d zfL{UE)-8wilWMo+BLzP6rred~&)P$kIx36vd(5+~OHCXtEqgvczpYmaYQ7lSYk;#7 zCf-Wa&`$hPSzg@_(m)wQ>dd5deL)gzKx>8 z;AD_)<_n6r;>j|@foQA8$KtumN+*SB^M6X)ax`XH&-!EWYn7Ke?TTJ=x+S@2Mc@y2 zqaVo09sJ?NinDfW4z*lu-e`8Fc~x=inp62e{PKdE-65Cj7hDhR_)%@Wf=VN*4l^$b zo>art@wEg&apXWkZEbAhoJCHaO?bG0>5H(lnmHjSGqu#LpW;3j?|JpgBk8!i!tH;X z@LHX*z94x|K-$(jH}y6gzpl9_Kh1Wd=!MP?Woh?{^L{9~7c+m&K_yec)DOt&hMxx= z1&;F&w?=gy?yApg!4xJ)+pzlTKJ+9rY+bhB$yv;r=)J4=z4AuSt&NilJ1p<{`6b7R zmD_r-vWi_^lX|D7qBh2$*y{!{AvyAV#~%OGgrT%uey8}NN`=*pb!sp(c7o>!XU09V zS|h=0+N@pnq_)Ec*4?12(w408LY}i_nYwWSI#eN z=NP)7-7K##73(SbvCbT3u@Vx^QlWiUb+brA50x4nVFh&2J{fxxdl}6XA&a#Uw_@}& zv&B`}`t>ktrd;sdJ zaFN9l0=D4ikr=u-vScSoozmHa-`XWTJ?wtU&^CW6aWXVAGDo4I#%Sn?kW`9k5O)Td zJjjo0O*Ho?GnWN4X*Y7zgHwZonT*2e)(@7B0y6Y zIksjwCW0(lfx_7dCnORC>LP!Z0N_S=EsCLULle8_C393 zy~2aP*f;-f+7j$i<}78&|1N;Q0Q(Ldi={?@_# z1Ish!)jFRkIFz+shu?~~^H@#_{GF7CMbwZ&8ww{BIw^8F_#zGC+}hrhe}DQl+U z!-3akzg(X!*z`Mt`DCE>q9S1w$pJ=_NNYb86?-=a1dx~MhhEL~*)3mNxN>{vkpqow zF+*GZuayK&)LAYb`Rb-};wEG92&IN%SiHW$rNwu%hW0+Q(bAO;7SHk{x>u|}R=9h{ zK)CTx-oBrUw(RyilyB9&a^I~Z1&8)+wtqU+Z!`eBGeXw%OCklO+Jr@vv)f_mq4VW| zp6jmX*%@#CV0YhrpnuJc$!AAZ)B2e(C#fRqmR%EdN0WJwWaATYc-Dyd1|PtZ2%E`bu_UcG!3-%m4@T9Uftf1&mpHi@3MzTeBWpx(W*9s7p-z2W=28`%L_DdfwG z@mUME0zQWmq98G(B-r-$3Z@##N|X`dh`8+f9WK(@R6}Dfs^&A4^`mPypCRKwYkchB zj<8{e6R&lDb9OW;c`c^M5%%JhK{EG{@q-i#*mWRd+DhwY=+Z2Z>PEe_?X~Wlyiu-6 zW400baJ#udh-;6k&zD1NL8sce1JeX?xdkzMjK!tWpa@23Q) zS;$ZDDn_K&rvNPT8}v=iDK$8nu)py8QNd-lGW6zdxVd#(sf z63#^sS|uEn0iXN#4MPhjLQYa~S4J6uUCg1SRR>R;8F_UZGeH#0HciFzG;6nMrPOIq zcff=u&H@K(ptJ%d3>mgl0|cXGzI*On9Wo*{W^&WA*N9rpe#Y6Kc~fM#|D{6F(M3WK z9g3r({PO#Kf+KR?m*o@$iL14G=i+7ntKZea^$Kp*Ei=;%vqH0{s(tsDBS-d+F5SHP z#p(wJ=5iEHG&_jXUm;{BO;ObWhwLJP6|HWG2z+cszerUS`9AmAB*Nz!Ii0AntWip+ z-BCDJb(d@DVZ2bB^e)d}FU!L-nve?=Qoyp15rc1X3CBS1`Xt%{6z2v^`Z;2M8H4RN z>DUTPv#5`mMMq5lLJCEEMUf#3j>gZky2U0^sxkqYH_p);jpHm@#5%9}QIBlt#0V{kTOvoe%UnufH_((iPV`EHg!Dy-&CcXNqx_y) zr_#|nY=`gTCm|W_dke>UEvkad8&fau>QuU3=iUvlLGb<_0r2Nbvz{8r&A(F5g0DA& zAXe)H6Zjy7H(wB)nQa8yL#iM$a1vpg$5n{S5jVTHrTTU4lhY7lN6pztQa)wA^(3pU zkYAm#F7@7-w(Zz%7dc8!>!g8Xt0x7f8a@S?0eqlWc>1wLtl)KPGx3M|;l^{uig&8? z7N`3V7wcRIwJnd8Sih3&SbTY{wYx%Zn75aGd*y2z&t7u_Rh``(CqJ3%y190fjix6} z9Xcn3L7%2rDv~EFSrrwm3worK>?9yudgb1gZUu9>^9Pt<#*9P5!b!*lCN&%}zk?`; z9lLFr%7T*6`JL2N<{cTIRhJ?ev)-}0wae2!ckH+?tC-9-E*<#Q0Js0S-W6;z&eSA6 zv*ijUWoLT*>$_0HzB)s?TPLs4LBM&Kh8e7H@x zrWyvKRw;bP6|eJKf2_HMUnB}okUALWXmsw&=peS<B)UZdkY>Yj&bt<5!NZa^1)=p-{QoYhsbTvW>?ZhwIb zeD89v`s~`0SBU{J4ux)06qTkSGq@M)jBVKZ`XqZCm!f;2X#Vj>uNuszel5SDJr=t0 zz=A`mR`8|GhkdP*<06iHxzd(gFA!aEVmj>OEzC%d`zDuR?-iaxEqlQ-Fed&0^|cB} zrazD8;x4IQPI~Zh?Ln-7Xxi`@uOvAz(v^i?Na%EHcHg-I3x3iaE_7KTZk0fG;K z(0p#BH7|J}#9{B)***pO5zLy8tBoTfJ=C;6HQD2Y%#PRb`ePT0VPX9Ywvylk) z>a;6%Oss;^j=It<1k@yHZOu(^fyJ2e);Rf*zSxg;o{poX!aN7V*yvR} zm+PlBdb2(VtC(Y1pCgs#o`Fsp;|5c@9Dt z6?ANk`ur!qlye)vClJ%3;_i21_w_Y?uEZwkE>U(9Mt#tV>U3*^U**P)ebjX0yAPM! zYQNaKk)x`7;QqSGrT2<@THTs^@|Pbld(iUpHH@8drr?+G&>+dAM_2ZT!d^dvH5plj z>kE3U^zw)qx_ADCCis7u-pYS90cX1typ4mjJ~mJt0n6$-)%a45z>i~mPR8_&qoA3( z0va1c5`bv@|Gd-xHDms3S34$)4X7d9eb|0=aX1nuu{lE3MfyczY{E#x4pa?&Go<2} zNL*G6wyQER4QI}e!!L$o$E@~^4eunYnIz$McBjEZymm4 zyjzQ9@o>)BM!TJsL+i^U2R8*}Xy;zvl0jxI-HsVc0eo-p{C(vzHjV@kV?{2liCE2; zC>iQ0TcD{R@dGT`>2{vP4#jqj;@c@sumnn z8cPK!59&sM_khN>QUaR~N?6#1hpf+>0LA24A+d?403^6E$uu+RKIFB)F|KZPN~3mb zbMX`gZT~#VR;|@28MCzVX9m{kcGm7KJ3`^YM}lSxCWkaS553fJIApJTcDI~_gDN}o z8JbYNE`dKnOvJ+bnF+w4*-c3AuNXE@NTiw=B_z$LZ|XMlb`(?6K}%(uQ1`4u=bese zgQ-Mx;j_vFL1}A>QK&VhvTt0^grwtO?bxgLT8{Ac07z5N#h}{(;1{m?@Kh^jAXdBp zEWV|5B@!0}f2qe-0zPCnx(a_2deGlTX=`Ta(Y6EwuuyN5vG{0bS?QZ7?tW0NL5gi{hY=^<{Iy<)V%V~>Q(jF@ z9+(Q6UEigqBdqXh z@8lv{xAH^(EW-5@DT+4~ko{ndHlwc@iwM3v#42StVd4dxh+L@C1|av;q&j~}39Au& zh2*o!YFqV%P=m*5r4Q9$bieInIW$yrL(BV;%yQMR}MEJV!RU?6x*UtuTw z6}MRkiW8SAXvMwin%-&0xX&IIDHf@x`PEM~@v0)FOZt(M{vkWP9hihW>WT&-JvzZCae6J<)VrRrMA3vE0=eJIH{6 zIj~;o%j@r91w0Xl{t7)(l4?p&z_Cs8Q$bU!$Sw6`7<0vXWKiNx z`NHRS?n>Fk zk3YG%*WNv%F!Q~S`!!o!{7vra`8=p^qyQSFHyxIT|B4#n|8@`j=KxBSsbnMc!lnnB z*xvlMEJNwS@$43?5qC!EjYs(?`VTCy7n5H}!K;H)phc9+1uC*hZM z)?tI@M4jg3CBLQGA7QaXip(^s(YBuJ7G<4-ef__U=K5X3sFqY)(#hhY?KpdM<;iby zB`H*ZL^l$@$qh#UF~+zG^pTM$7HD2VU=n0Zjv&7#3f{}uD6KYu~2b|Zu2;-}G z%Ls5UIAucs4g3GcWAIzc!H!)eUEKqnvgY(zHuB&@*h|_Y&5o+ldA)U8k-0gJ;5VcD zh_QjA26bCI$O=*rXJOYP2O(q0sGh|W(M56Tw&IVu*v`t7o4215)w$0uEA{b2Dgm++ zUc*YL_BXJ&I$d&5lM!R)3i%Qi0Xc}2d=O-Xi>sg>+qzAVfC;YPXFwTjXFmu-nn*nW zB-x;qHzSAr=VSir+8nL}rJvi$Fl&#JFfcRj*6h8rg};##5Cv!HJoV`IKH-uz1f|dy z-Z^0#wx~qm!NafDH001LK+m8*vk@w9op=qWhy!3oqB&4^gOsIP@hN1X_c;|WZ^t9G zU924ECeN1qZ8;g^q-ABS9Dmf_dj9*z64i7s?zX{qm!Ea2hIurzu8oL8R5XMsWzh69U^dbF*K#d2peX)BdwsKRchTD z7NrCdj~{S$h15;G!y+gVXTbc3E*ELM@= zWX~Wn2Qivz0$&^_(uke@WD5isY zIzP~7mB=jtrvx}12LWxhDKbR6Z4X?^DaRD<}T$7t%k}yqr>sw;CUIFr-LQEJ^jVbXRv>B^1TDNe#aEn;3=TUe4Tnn&ct23B9*YX%Cuy4~_gfqA#)vnJ4hUujnX&%8QVe{=a$xz-aE?5Sw@-0U zaYC-}RiOr}Q%_I-NO+dD4NR%HRdsQ+L2(woxf7C4d3p$*jg@MZzPKwrh&aJdhXbOeUi?8v zd)PQ7D&Navz+=!h@-!3)lr7x|$Dta=03~7P;g{8mtcdE;`B$gvwH=f7W1EZloEWTH*u6*x(!m5HOKdZ&+G@TX2H0 z0&M_9=zYyHeC*^){d{7GxJPseylXCKhXw`(o$IobetW{TLhh;bDdGu^j_4OdJ5Sr z@HQ2kixchZJi&A*QM>9TG%a(6cBYto@Ba-2J6P_ zYnh*Y-pEVpVKJ6FAWjCWy9lV(Fli>V=V0bFMY3HQfLKe81hPR|?dN!b_hXpJJPOkr z23cA?dBQQ?qLYg2Jjxa^lhD-^`oZjj9aX%*IVjOQ&q+g=ay)!DOCTG@z8Q|ycEp!eUm%NLpEwncu2M(3^CwHo{NJsI3BQD z^mH3T6QSFRzKimFX=1FX#o$@|kafF7{uKi9GW%iW=_gZCgM#2Z2fSV#{YcbG=Cmac zbNB^pnRFt2h&f;?7WdX|{zzbjOVyQz3#95}*myh^-hD^FSVBGW2r2N*m%4US?am8T zwnaL^uK@VJfNGFzDyV!r!7RJe8Aev;Xw+2)X8J2G{%tUt*8P1v&y>~;eReb>OIA!z z$u=X&(fEJY>-%3akbhG<=04=-_;q~*N0yXDM;SQl?O5pdI|usq=LkUmVL~nc6>I~= z!(clnQ7lpA7n02+Nd44+Ll4auX`LKef~fPYUsy8K5TX6{snXzcSL}7)!r?JSBMIIj zm!^;(4ixN%-?{ZfFq6!RmMMI-01sOEo0I|*q?%AeI8>R1Bx6z-l?mqIk7yx0Um@}1;l)W%Jnf3;t zg92@lW`5p~$H&~CKv1YA^l`8CaOwRV^{DJNLPRtngNKB+N>>0v&t3^xy2&#P$i$A) zku8yEx|oL?tsIj>^Kr`ekc1tuU2G^pa~vQw*D1{1Ff*Z&p-##BRg(>0r6e5aFr1hv zg(mV+$djr2baHHdU11}p8%Dj|uIut2>8yY6d3~{*bORcZUv__!E9V>l@(b9+LIwUG zugHIWFZC3SU)Zm+CYrpmI{F2PZ&LM3|fe zl^c)&cLXwn0p!KN+9-y-V5Uj^W8E5d`zJRyxHWo&HMlP4fgO{%~H48`ULtyL`SJr}ov-qQlmV zcH-w33$t~@n^(8=@AkXBC>zxzG!f>oxB$RI!vkdI1t@76T9JfrayBF2B8Cfk316*1 zsp$^RDv%kWiWun9IRKdGf3I{@utKa1lWY1t2?Z*9??44A@RiyCvE(l;5aK+}kmi8$ z#}G<{rtuUG&{T~&3)ofP%hEWy2!keYKz=K5EQ3COKP%Xp$3I8r=#K&lq|1PvA+CZjK&tUYJP0R9)gr|^dxuP7phdNZ zxV%XNk@ObQv$!z8@~$_68)kZhMy!xcHSKz0%W> zWk{?uB`?axon8BMeQuzw9C`wzJk>U+#t3+2==@nx9Q!OeY#4Oc@jC1$lBd&bWm-F` z@vQqc?@H%>B<|c+0sigP`Th&#WkLU2QTvxA?myh${#Xj@@9n_4tZq&FNvexrFtSnm zUe+zEfW+w#=N(=CZ^LrD8f%(dt)5+)i~0?4+01l+YeG9~P4(j|C1I&LP7k|7+wN$5 zVw?{-ztHq2r7tT*v8ZY!qPVV-8UoZnJFzsA)lF`fGlF7{_)dJ1A7;RJA^v+_A870r${Tw zjGkCC*uo$;*%#T0k}--@H!T(IuTa0g)Qfsx!27{jqb}ahEFk2W?EF zBY_g1k$|opSY4+hq9>SPH7VhT=r6iLv1+5*w5NA@py7dIhCVyQ{nRo>5bIZ8iLxVX zn)jQN?T=243xgJJx@)`CX;te|r`>l}r)}v{1a8hwOBG#!mGckL*uOyK{^3H_UvB0< z4q$%{XY-GC3@seM3Z8f)kh%f>>R4X~aX0F)c!MP1wD&yuXm7p1oVN}o~i>7P$t zLHmW-^dADl59QLuyafP4oPD?j=qU>a56s_q8~dd4FIVgz{%?Q$k^VWnoIm_XLDRgx zg{Fp#iqui%nF6ULvIJ%em6}7mTCv!=Wuvss?Wiur*F$u!+)tp2Z*8XiRMll%8S=xM zZ*tD0d7>UVzZ5qZb*;btd{khyy6u{(@%2ie^EVL-Utog*q2F128d+=p$df2FMApJ> z&*xt4@eM(L0<)j7(i-DF5R7WT3eG_=(_8 zkP?Us*k6l5xu@eR29;#qW7!Q*cgm<6zsY?l1C?h8y^n*Y4U2t2S~CL|f0N6q0cGYt zAN$Wm_;1+>)@*=vR1%(JQmme}CZHP9GeBO;)Tg36>cG@41QMCn`~2KJ8QOP0dB}U` zumk*|y2!`Icg5`DowH{e9H#`WN8bA7qPve~1^t<4t7G2tgz^vPE4$ zpD^c7E$;&}@pHrmhn?d>ax^C%w>6ZT@iYu}Zoz6C6|F$mFdQr1IP7yta}f3C?t z;yzLMqA#|VrK8JGz;`)=jR$0TlbG2$lg9{fyN#<`%YK;I67f2hB~(emxk$`>5aywj zgI#8W`ZkqMsvSJ%>?#w3-hSO|drCJyV!=RsA`BXMdFVKx>KR;Oxe= zf>}RPbcnYYKxKQX5jVKArDngD{>UD4tkM8g712_?Yq2MdTy9%ZxPqEd{gdDkP}}JK z+nP%M^%ly%{Uqic8(jc;(HlV090Syy2DQMH@kP;fAZhUX+XK2+ooromvcfaw1*vYB z*ocO{AhS}BZz?^V?|y)z%PLKqe4%-VvO2@V*j?FhlA2lFG`snTC+OVDw5UoiDPvp8 ztcDFOdnu4fOPhT9yY8)d39l;_EGcl-NN)Yi+W3T~!gbDfRMjjR-c0LRmauVIkU&Hv zCF3`H?t2?>XqiW)o@5|wmYj?gjLxp47`L8mwa%Mi{g&SyQ zjG-S$Pj9+^_T2kv%~iMK=hxmy|1AqNzaB0><$e8F5%6W5r()FLlpJShte|3}6rVr8 zc(>U{f#ATkRT)XUmCv#0SFYt-yx+R-Oyo<47i`x7@{}YwjJxNc{B~W-c&eX}F^|_= zMDg|Rsw5TO!k_%4do)`K?>(5OSy>y$PE#rL0}O{`i{}GhRyfsDH?-VX@Pxed#MV=1 zqJ-Tlu@D3;Mlu@-6f+htR-bHfP*7x zxiK(PjdD#Gw|qX8d|K)TAMtE%I>;Tb390Xp?g*MEkbAc0s{53YOMgy?2|p}r&2j3@ zxYvbOiK|~hpDkM*7X(x6TUcjKZVT?KP58O7q%%QuePX;#;ZVuw;d)_#;xfnO(M)Bh z;w`%uu(aZvBdv(;;?t6nYMq%0tD-g6-SPX%z0P`$IgPiOmp!#^_1a+HZy<_o|1Wg# z2v>pqBv*od1Z}Iiwf}P${jdA?uOHdJKh930(q4VPyg3)xk2sSy5@%WuUZF=CSpw|F zidixnA5mCBGRu3Nq!C>j65iCCYc5)waB*e%TeHdC)<)!JOEx*QC(OuW!f+yV(A3ca z9{6qVIr%xvp9d3RUqDigOw<`bvp=SPldCcLCReIFyipFV{QbZaRztyB{>bpKi~pM4 zb=td>?rwlH;#N9+RC!u`%02g8^_*7C!6JE?<-2V0(qZqynRFE;y%`jXxEYpP(p zv{IVginI@J>N~k@s|(Wy% zIca8aU#>DkqJLYJ*=ezVwg?s*A3T(^&+K0JSFBmq=k!z|13A)LRxkvH@bo8HFeK)*Q=N*X zMpdFy*Q5AJp<8esw<(Q#sybP!#>6G-O1SnmVfV+Z>aG-d0VSPdW7%sey0j^-KU!u~ zm71;pCI{||VXXy}rx#w8Q_UUC8eLzE#1>>7J$rI3ZX4CCT+~!+8n++6S}TrufY~Lo z2rXB;En+oqf8PPMnm>+UvL*3GAU=~Qr zHN^N9=gG_p%Wra<*|T7WfhO&*zk$o+BFfcvoKTdQAWy|NdTJF5rS_dt{W6O6uFM8c zpJyS0;kapu4QZGBEQd}d_6BQ1am!=)h*-!y0}r6gV(420Pf@{*p0vIi!GckY?ZE};X4Qd zzq^z)-3?d?)TG9~I8!=DrAILq-i@t(y)1guZA_Qd_xAU9l`q4BNx-4CMwRO&Mu_!r zK?9^j!L*=jfO{`tfz+s`gSN6!xO{q#SxDq_o*LEZY@t^{{0s-fOFf{&Q5+S+W|@yX zQ%J@2Yb#!cyh~J=1`bu+Nlr3OgAW{ueQXt;4w|j~*^}4zGWdzVYF%Uob}jZsYQu8L)#f58}6ihX03C z(fIAM%ji1kd|7QJJt<;#A`{TwHX$$asXvKkMj9Ox-6C+d63ea5_76WoXEPf$?GtDi$0WaB$ z0bpdx=SY_UTEXjf`G5Y{KNsZR?T}x1i}4GPVnIu4NEs;;#nx2TbO}20==*BA0v>xM zO_EQ&9W;oz8!=8+fskj9zYKz?9U7gvW6jh3&{cA)1z5oEnqU(dh}?61B4zk|uk=zmVO9x96pZRA z5!;nAs9u0{qz2PdsbLZgTD#A3;2slMM7cj(FU~y2J(aEU+n9D*=3OM+rWa>l(8q1Q zIb}RW-CRJ@tP;D98@+z_sI;`wlw}t14`nph!Ni*T;Mh-&zE5u@tb<2Zbd*t)6pH?#!NPQM%vj` zGX2zxyoi$G9N*^BJ0042YSMU@#p?pgc+H`ALxV@$2cJz>W1D%qnHth{f`yqnjkvID z{71hHEYCV_V3$;W-W=Dada3^ShGRvYZ6f4`Q`7Ln6pZV2+hsU{I5^HRwHS|0HSW$v%9Yyxy^X*c_ zWA~{?Q?@_r4dcXzyE=Zq2pFomL1v{pTiR@D(i<&$oh+U{$d|RyNkQh*p92nCYHeC= zE1Rspwe|Vdv#%bfN5dX#7g}nxDtPH!5^yX1uKPQh%yApt^6Zo>Go6dIQkx#kY`Ql3 zrt&@2(R`0x=7-Z7|AW1EkB73~_r^(5NmCT%GuDgWwHse?kC$?ivAE=>qc73Q7gJOT@0cqc%t{#ViMG4m$yA9Sa@7 z410-UrDY5NEXRl%dJuW{07kiCOyKGcRzsv*Q*C5SYa(BGN?kj9Lm%CmXW;07J192J z9Y#+T?+C_BYm9y0EGJ6mvyx4ypgti7=try}s63ETk6&uF9`3RB*r)LPX`ojlvZCLl zV;kPx)_|U>dS28b8@vD~*S;L?KRFovNa3rR7);MNf-($&5{^(mta00qb!t|!=c>GI>J6t9+s8ceX9Tu&89si0L6vv`!)!;NcV#5pGvfGMHM z9^)kZiuSYpLl?}oOY;Njl+~*fjXlVXtxPj>ht{}I#xi1@wS!+w$xYct3Q#@xa$`D% zCL*GjQ)Z5kE@z=SQ9~b*rIrwQZlAtd*RR319T-qmjN1R5osG#^^i}A_uSYm*}zp=AGuY@r73)*zM?y%wSOiVyQk+| zFJ{e}CxU{$N_$0DjN!6NDcO91+MR}gRprfNK9@fnW!?`YeYAYNZP58dOs49ZC(N}@ zgJ&*GuT6S*mD%>v<#Ts|h4ZGEEhirF52a!&igr3(|EN*mnCJI61i<5+eR#q1Kw4_{>QS?4x&r5m8P&YmSoAt zDolnG5=gqd4i$T$ZY;?<$D^5&J6XHD+Ivw>?!=t;)UwWzxBjMTD?Ely?SFl{VXOIr zl{IqV^QZVX_IEz){1`AT&H;hK_+444FIeh-t@W+?`v8?c7IXAB&2Rj|6*hYpu84?k zK*wjH_({mJ32CO41p)|tKMLBjNWZ{XFyW;&1;P=a+UK#A#WTBR!crLVuT71XpfBeU z3yohyUTSOcuu)PMMQH497jH~s3XY`{Kdbxt?3Y1WAN^+ae!G_773wtx-U1YU3Pd2c zjdCDv;D%8dej{qU(eiB4DWP*U2o4o(p*xM4+Ct}ti;A-RRs^`PiW+$6weSuNd((=V zi)GpTGjACVs=nyGjnj zMqJ$0p((~9LEReCx?W?Ablg&+bwj2?PpFm-J+M%+ClR%Rm3Ip7;dvl3|J5V=MfU;& z0iyx+uQ%d9b0Fm(c1&(j0N{a}fLKgu!j03f&5n$GesVis-8I)h! zRes1^pv-9B;)F_NF8d@PT@*PBGr)DeM%dbbQS(n5O#p>l%^|-dp8j=Ow_7aFws@%E zyGiy14*sEf2KzcX{liVtMwkXg8cCaKqP7H{zh#&OBUT(bqseOJ8{%%0CjK^}=0u8m z#?Q_Rj(a8L=xgs*{L_&P-S`~OB>c(7qbJUo<1&*97q>kaQ@=Uh$kUA}#jTWIdno;q z6GQJqQuNET2)khqK}0T#IZT7sl{U36o+cS_X~T;qhU?WIlxYaGfn3N>$zT)nuzy=+=_YtOI39pqsJr_HU$t`~mY zwa?_~aihxU=0j%v!6DgWmmgnXteY=9lwHAVDN|eMW!6h4Vok?iG7$Es)E;!@#*5*;qC@+}a$JeVstHF|KHXnXl+OSRY&zdI=&oKQz}THk=q$fw3GQ zk!%iqC;+7W=;bB!1C$&h&aSfSoA_nW$3xO`<7CrF$B2M~@98P6_C3X(moCM=S*WLe zg&dDGV>QA3Rw29LnQUU1h$FpD@hOdBmrLf<5`s)v+N9k=T5M>k%3wf$?YZ0V*_I*Z zVQD_1JMt>9&_8Oodn;~<$HpVi!d^T(;**6UNGq+(=kp~*O>zjx$%xF+psK3}P#N1XK`$Hkeu{@dzu)eNOGBFF7xrL|L%BcQ$;uy|lUX zUbl%JKIsb{e^q}yTe@S-R}I3>)2dgnS*W-nmf#^!VQpep7~T=%kd8VnDGk8_Ice-# zk{pK-LtYD~a@g9RhD3LskWAcBivv)+5sjufMx60*aadDrT~V%GKw*HheL-t}a|A`? zZhyDtG-@TV>l#Qq9%Rg3thd%Su#wn?sD`X&H>Kkeu`B7e7r6}BPNKz|#@Gy$d>afY zVhN}VfZoHLVr1b@_v+rH<#_BZSKGGF^?(eK4FVuQTa_ZLmZ&9(c1g?W+o54GP80yz z%&23K3zfW)(&)v}z)Yc4P7x`hKRru23}RI55#fmfYN*33eg$cr+ORP}(ce5A829hV zm!7@#r=FAT28%|f0DRk!o(KTr=yFhw$O{HB7hqJG(-o}aFwp+t3KE^VMG#?-uA3{_ z2|v%8P_Gsh09r5D=|K9_97sEN`v3M&| z2UCI2fXZ>G;JTG6kE66nP5o%AB zR6Wk*r8cS|3#qMYG-cyJwWeF6K&a_bSf*O*wNO4UQgvc()&_W&7eAp!CjMQNvsrGQ zCoH3s@tOKVW;**Xb+v!RBK*x*Vtn4!V}JayFx*b;D}-TFapDD%Z9+4_S257J5#1JIr`Uj}71d<5WPssI zlMMh2Xx2ie8|=lwq5MZz_lHE7Y`*v*@qM(yQQ!GRn+Fu2~n zv$d>P$*zcxeznz2IAu7esn+lg=TT)c2Hz>*^pG4bKJu*&E6Uj*LplHf%^8r4DmY5a z(95yD9YBcr>O+{_YBYf;gCK=A7}lb_iy<`403*2_W1{}4)E?@i__K%ForRk+ zxD?e9U)9mg>0DNn#m*tCV*rXPZkoWPuk+QZr=utPEN7bU&sD(9-M;^xfa0{dbaGKg3jf`RSN z$F8oR!t2-#nBlLYb#MkB9i{%U6{m*0A?tRUNH>T#!v!`Ft%;>bq@L<0X+2J#)Uw5y zrKsIx=1y)JRk+UnbS&LQKSt% zP-%n9FMe2MlieqTt3NUW&G@#>?yi!h7CJ&zLISd@2NMbsZy@MXl9%hJtvw*wG% zG0@jmw}CkQ(qe*vvDm=fy<2i1*el;bp68che%o)-@>P{P5YF_r(g4z=(1mG1G-$8K z3#SEOQP{)%{Cf0azz|^&##1C_Dh4`EGtt^)%Vw^}K4vpwBz%&o+i9^{dLI)2*%a4<&enk{*xzkDbDs2#+$Opk7R9H9*c*lh*C!y*F#?|d` z6($zZ>X=OA)#>G51PV_)}x5W54gX`^Q9+aGCFVv{NGckay=u=1(SeOfm z;rtd)MPbn06qnKUkSIOR&uUW_-TL91euF1|b9K+a?(-sD28(bVY!i0|ryLI*FTH_& zijpX|^JA(3OT87j18`<^TQ~{b-ndg0C)}#;LDNmDRttnKO%!$FtUU;q`4(_o>+N@b zl%;ev&YF<5LErR-wd|X2R`i>Hex7}WJPR(agZ0LP?(Wn*VJffSDut89s4~4OK96(^ zRWt1!oVdElPEO|4*GVQq^WkRo&|>5cZ+EF0@OAKAm19tm$H;I!$(D~@V$+|$H!Lqe z#n_C4ZZ(r=4v0Y;zrm5bV4S8X2^I$;!$T;68RW&&ZDl8+fIv#sy}-xTp-}#oK=jEf z`>HyK8ka@UZA^8$6h&rH3dvtO;&n{laL~O95@nPKbZw%#Bsy?{*hu0peZ=78S*gKe z;-A3OOIp=S&<`c{2ux~bFjLgkB%KICY%L`{l%u503X7^#vfcCI!4H|BGpTkLSEZhN zd`K2(;SjBsP#WC5p2By;USuYv8lj1|BVhWX96J@rr^eHi>z1_=B6Sl;!F*KoQ;92_ z$LomLcbBM#@4hW=@>HNU0$N6Fg-Oz8aA*6K!d6#46CdZSVMG2D0affs9;QJ!Rx)^V|HzyT4{V#5T`5Bj zAO%aGFgP_7D2N4=B6kprM0{y+AcKG(+`vbQAGs1o8$a_;3WML~#}pDl`9$3|wvxKW zH&auX8cm{bM^JdGI~=$%yf<^PaKB=(Um-D~ttV7NNnh?;G_o9+p{Q%*KJ+toJ?3o$ z&|<`k_kf!CM~MMQ@klo2A6{~9ac(yUzHnI3g@~cF-3xTKmZp-b8LYw*AAG3XN z_Dc9X>f-;~T$@$_Z{VjUe?eiPi3zjO`IB!pSd=Ur@Quu_x}{R*Px@(mL^j93?Y zOb|+>xjG}JZ7#2+qoj&5aU1u6Cm0u_k1{D=HGata!WKUek20>H9{u|w*?%bWfVl9# zi<0|er+xl*I4SoXgvC)HfOlf}PhSUEcq+VwDp8?|68cFSu#MBvL%1Z8w|J*-8va^H zhQe`(6=#9s<7Iq@&eJwAd{&vW5)CB*987{a?s{mUh26y>3-y%LFx|Mku5af9w_m{) z0FJ5V*o)Q2D zePj;FwTLHB8FCRE zfh=zXcYgpb;7(tu1VuB2=13f0ff__I$OfwD?j$YYYpdOzbXQ)4!03>NnyWCgD9?|g zpx%C{H*}pyN;m{36*idC0zyOch9=aaw02xTwc}um-5j6!c?D~5?MUFZZ0KrS4aORx zf+#|GJW~-)elL9-fnGLw$OR(pI8B@-X+*PFkyk7Oaort!h=geMLxg3t_ql%ICn^TBjJOG-RjgiGA1wa^}B;t z&{6XchZqV(XpKiy;DLI8P<6q8Ocz^D5V&wq@!GoGpJSfjSDl8afc4nlASVBTF+q-G zApB6u#cLArPI2yC7WP@hY?kfq^VL1)dvq9ewTzx`ov2AXqBI0kzzrXb6Xi;uNI)@v z;e&KhhR~)6;&aumSf~iqc=(n;n=b?0k4YB0g}COq)Oh1boGk3s6mA=55mK9S>oG~k zxfci!5%J8VRchG-hpQMjK78QGpqT8pilQutBGCXDggjIF(n<{(RYxUB)SzH~a_jY< zK`@0s1ig9AUo5cDdn`GW%5!O$PPjPu$=M7L_~F2T=2}`G1*FHi*Pet52sXk5hppnn znP2w|c=gm{D{yZE5Jd;EB3R~wWJ}3jc%@hqZf(J?-Uy$8+RzD^dT??Wz!5ZS%R)OV zUkDv(Tz4#LT*O}SvxN~q(%#i?+7tQgXR*#Zd+)gv-tdXK+RTWEJt&|LVR?u9F|l&` zEluPAQ*%G54Ipda2(j&?n{864qP<;-!8cPtJTEw8xnTBqnoh{mXD{lY-nmm@VOAb5 zCc=h7X@-F37RW1wb;0Q^7l9X1&xqv>R_lfMDOM5nS|^h$+WUS+Sowa)W~^f6h;B2osK-Rh+CIA3{JLAyrPL#)$t9j| zWk_PM9?QTD+ttZ|p9Fe@cp;gIg$}3FQ~*c1!Am&x$z*M6ye$*NU9g5@bc?q6)A^)~ zpy)mXPlM8`a6OR4JWNm_rt@49K6GV4)YuUBNOMVm`CaF5{&*G|^e7v~tmKopVN++o zldXY$VCZ_Mr9xIp00f(q_YK%HY_SoTZ3dg>I3wzj+!P2WfWF^QDPs2_ z8B`53LE#}EyYh%a{oflu`ThU<2N-dyWzr`I$g49Ssx>i`PO=JrdKTN>tm=HLV&HEIb9%Q56*zgPV94glVOkCh9+AFqvaBx+d3i)J)oP+_t;i zy-Ze5&?1S9l@!Ne29mIAL1B`dcS8iO?IR`>TKFK z!qUm*sw-M-wUhg$LTZq^@0eZShRZ+!>imJ)7ny6`r)Ci~i0Oyik}-hxBLZg(Hf?y^~W z(!xlT*jHrdnEmp*vhy`l7eCYYGqbtf-CJmT2EKMp{1)Q!Zw=Rftw@4~Hy8yF04E*- z?0cj#pz!)_RBCl!dJp^l;UreH$sapMf&?<)z4IM2**}=_?{`8>I0){X6zftNKGNhQ z-bD6WL>=UTitr`&(VGa5eT>$Ol_hyY)=Y?*@yt$Vwiz8|rRsX~Ts!g0!M)G#xn99O zSN}^@w?(KHXJN@#l5>ShlR~sKnt_^|_Wg)#IF3nP-#r!jQqHrhQtz^XiI%Qw*UV1C z3W3vMMrDe7z_8+^pCG9C{q429+h!qIs91?SIL4sW6Xhc@fTIJ1F5H1B74b&ch^LxC zQ7z1YX-u{(S>w|{opvwxgO+V$mvxpRMiJlm%UL=kD7k;(y1jnlYX_Og1|HB4_GuZ_ z>X)GT*yZD%hEQrk#fuSCQ>L2G>Q=!dq9t6ERJcb}d*fE|cv|tx(ff}-g^9{u1x6IG zhu*n)?3rFBwD=h)^=u%m0rwLKOcWM0fY8;=_^aq=_$dZN>~G z4m9*wA(2hux?J^wH;1?x)zunCf|$()dT?f|3+$Z-hxas? z-=oEkUQYL{qIZ6bbMf;I+-rE4S|brpDgzl{gBaydpNUECOFNBTc0{x313Xz+IVGIwO{MC2Ou+%k2C|&94voFv={vX2sxrTwy~w>5(A_vTs0+z$>M$FU*|`pd~*EK6a>4) zyGd%WIM3H+hHi;!E{#Q0guRG*>d})bM{1y#U}!nyg}|2S684G^l4ZbH%z&UD)%O!5&U)y#vc&?Zpd2cC zE58R0j752z{VY1@5W~ekqa*->*H(D7(qtPH=SW1j)^Dy&-S`&>Uu}#|A z{og*d==RJ+sySEDdqVZLTv_1mKJ3T2xy&N)3h=G$SjtoUlo904lTU4?MbnOBOKC6dWOLT7-a)}ps^;NuhQEYbXpvgw}NyYS} z-FYdaek-pSA@fB)OUh~74Q=xQ@@9#jG;Y3~mIR=^7DqyQc!YSyD$03k6oM5F#NSBH z^+V9z%U|%ORf!v1go{c-iWTg-zd0CBs3*6+4M{e`^#qcN^mY^1%W$yV9yVl;nX{oy z$%2(7h|MWqtFur~tu~Q1!ydwd2K81Z<~m-9E-%D$TjS4GM|(mFP1MCiN~^YF|HFz* zy`qCNcttoJ*Za+3uYqA!)I?v<6K}EQ>lLi!ug=;YK`oJHQcly}ri<+RX7DS?ui0gk zDJg_$8XBwYi!J4q#^RTfG`P6c6XqUcy)NbB)t;NXzx~PNd&q_`f_ziBia&aAh225} zg16co?M~EQm^**E&=q5`SMz`rcsZkC--rNcZ&ObTSLC8OaJENGF zTeh!W`#BJf_0?Y4X^dS0F6P!;cU+Ib{wUuB__~T3=C_XqTy{w>v#` z>HclMzBiB|cT$+&7@WK4Z^Swf+Bc$kvp-~Z&vRtX;h{sQISaf3n8QH{BS5cn28_5! zF|dw;R2vfwQqDLS9OO51fNX%ADp8I`sA`~g23t!!0sbp;k0$Up5>+!mB>wo+tILG| z$f6Lc@%&^(wF_vHS*`PU*VcNw8+Q?^v-3!JpYjWiG%(1quoa@VGMyU6NS9kK2Pxk@ zzU9mX&7+y8q2BC@qXpKe8_Amw7_9PIoYzxvdF)!F!*iTs+=I+7_tcLO`f5y(A2Kz- z@uEn;+3Eg}=_0`!K=m8nN|8+JRU_+JiIi`@NcI5DL>6!{-)m@(IE--=$TF^dqkKO} zVY0CtP%QX{w^|IoajFnR;^u8wo<0mNX^Uw>0n`!ohfI3(#7@kd9H_K#EzyWG8k%Ln z>nTz-RumTYmV|-zD5JtjR$IWww+rRK*|q}r@PRXTLlLvPJh`JJ8Ps+y!kBGoF- z@f1{$;d4LJ&r#X^(2CZxMJ+pA;JZ%Wdb=~VW7NWrZ|)|9IX?J_{boCL=4Hi)u095U z{y=swwHx_IQbjL1G0r1qiXOV$>XwXS$R!JXXVE3fa_@U1!(;Cn)*b$wb}~nP@}pAH zO-jQhEay)33hN8BD&3gXx{14P`9KOS_O5;B_Dqz!pCXwj@&~l!ndNaUlsk4{d0kM#2DN?Ej{H{(sFaqnMa0lnS*5 z_7G@IT<9Y2AT1KUOl`u&lGni}o}3&X-P-d_8UXA_Z70?{y;$XB>~8D|zgnBZwPk(! zLB(1H&k{BQTt;`}s|_-g^A&%UcgV~9{)qP<%gXROnZ=g^5omZt9Uz0BRct|$coi~? zT|qw~)N91AW}L;Z8L!(4C6`_`F;KyE(J2HjUfqwf5}>l!8g)4j{lg!BGFMABZmmPV zRgb-MF)}Y8b3?b`sj7$Gk@;81MZdwr{$`neFGE*&&91~t?sRA%t(Log$Q&lG&q7`K z8!d$2=5v4ZmDO%(0p&F6?WCx^kAx{fde93%UC`+ibe?uO7Y=akou=Do`wv~ZNBN~X z)%;8^eM^b2OLcA75YuC?*CU6}0H0vXccNQ+4aR!j$bMlj(f-gxuDO&EBo!_=$fkMT@?g$X#$x2dEwM$5~+8xhrWSZTIM(D{b<<3ziwj` zPpz{=X7@C#i;`#@Nqe1w1_caG&e3Ln6D*wOhc5eg)x@RiOoe^k$WjST7P ztz$V**cFq5o2t%Lq&f=|{^aio<~`o61WTtszfLj=lI;W6cJgVO61*<;7*(C-f8SkkN1`lxBLcxkNSPthCh}} zL*Y5XLbgL6qb$4&C+c*2DdqZx7Dkr~nAkzgbl9l<|{6|Xo!C8+rMs1 z;IsVK64@DGSd7t)=s69>m#aJq;&em~FN%S&>c&*Q%%j9NQz2-TE01&Z0Yb*>g3Q7+ z8JHz91m~v?X`ocIV=O}XsKFP!4>>$bBksWUdt7GVm z#)H6^qXLKb=T*0SEl=I`WmWa)vr2iGE#dvV8bC&M_bJ5K^dz( z|GlB@zxL`@XlVtOlg%i@PocQPP=LMF<%Oms7f(0~g1OXKvepgkO36`SxL$>gheAVt z^-7OT56W_yG4Y}8$!d+Ffu#mPGrt;oM=jZ76$_(;rS~+j{Y$;|ufOKsj8p#lmw(5o{jn?+e;#a-l@MU_FDQ5eMxqwbG^*Tp5_T6^ zO6zzxCT(g%#h4hnQ6xVyoHxxZE8p5$@U|6V@}`CcqVM}WKM4wn%ogEfZB%ZU>DU!) z*gOOdFgiu{G3`roo91gqq)y#fCIgm}r) z-oD3}wq{)YQl)A+2QPTL4yI)JhGRt!+zbnadOYoduxxkuc8?n*NR8w6eiK`|U06;L zfx1_8IB8ujk%6cK;OtT1qFGU$^x=&7E$P5x80X#}TDpR?qbD?FWeq`XVn~dMCmT+f zW*$GfxH7k!q@uarV=G}P9Mz^_|3L=1`>n&sp(|trYtvOB*3v!L{c~~1h~A+d1A{5! z5hKTER}FrykXp6$?=1d#>WLsbakRgCXfvle$~bm&xAB1^ioO4-c>Avh*8b-o{C)!S z2Tcqsd;y(d`Gw%oMNt}#mryv5-9An!iWf{~%Jq~OPs_r1S7~&#r*m`B zDx?!d{-ayrbV9IqfJxrHj=ti1_lo;7fIeMl#l>OVBbIUbmGhKV7xoM&Q+z$T8HDCH zN*NQN?C~LcKHJv9Q~E%J-mxD<_~u~OJO)vGS_1HUp zxlObeHog)+Xy!36R@SZm)6T(PH0ok!z>>kRfGMM-dqhvG3}5|u#0my=>*g^I@C51@Io&DRMYi}if#G~ zLN)&MzAE}?)Y{WBNU87NpLP6Cn7n16<4R+a6+2A`tKn84`F2f$$#QiloUGd@G%P;z z&{y3s{pS}YCQ9xn`wR_21m-}7tf&!ZNnrrz4yc7?K&Qkg2z^aD6h#H12l3zDrl z7{|j58Y^Nit(hPfq;?%1VoeXG>8cfk)yb%(nEZUkE}PS(7wKt0i8 z;acRrF=a|tsz?GrA|Y#PiT9Egyx~ONEU&I49?IM`AR;_IdTpU zuTWLI;E8!Uk>FZG@9K~odNz`dwOpmpcqM%GZ#WA6W&`*4fBCly8UJ(Ke>B8_pctnPU@n zp0o9K@O2kw1-a|5g*As?V3^J=e82^_^RTvxlH-YsYaTeR&-FX<(B^^D-H8*0I~!Lf zzPNka`S}jeuXhZz+2(lc&ZVP?E`fCk1{5wenjR?jJ;i*)@&nKxW_aWq+QMYba7My` z$#f`}YOZA1Hi=8=U(_=0(9Az&ry*xjnG>7Hs3W4sonnhqk2J$EP0wT1wlz5=+r5|U zfR3D0*hqU;n#OqpKH`@`#J&R#*X#*eO?!h4qA>v|$oD%Mj{lt5Ul5@)QuQonWlup>?|SObe)C z&@qT&@ei4HCZb`@qe^8gKt(xfo@DB)i><`YHiq`?2=OYYpE!a!yzwL0eocZz3`*0S zTDFA#A3rFB(sY;T`a|Y`LOdD{;j)kzx!-=wt1M{+Sx5g-5PcWta@I17UpdbPipRV^ zEBjg4hTM9#mbqr5TuD#&v!=);661SYS{0k5k+vjAclgwf=_Gb!D zCOS;zmY6i{Y%l6&n)Gcx;V@_B<}AK1QfOe`$kc#e1Y9Hp%8l3Q^l)8gv=)5`eUSc3 z``Wsmpr`|)VV!&3TmHmcC@Q1-S=QJEex$+t7W{&psg28=(JK>5mrch8t9L(4{RsOj zKU~9G<$d&Yu)baf{t@5%(2k-5TPuCf7VgV^2I(Lb5Z z*mi!^Y4|W|NOv~8>(7dV@n&zH9`ZZ1t;NFA&eeFIaBh9%Utv~&`JYTW(bn(rN`!C34 zt4nCDhf@_Y;R8HXHH||i>!P`&Ve_o_ckbz*KX9$+!I`Z+5iS08OScCZm^nH{({*;X zR~r;=B~IyQPmOtIn0-{gef)i*AbPZiw7X~kr{mOsa3YPM?`ZZPU^-xQJK~Y|>8zDZVc*mT*kNdUi zZDqT`Y;HXT-mF&78rlGcQ?eQ7*s^}?6@hiS$ZIoYH;D~hsCqT`)2MRoZ3ACl4CrUI z6#4Kh?T_>h2y(ZX4Vm61QAfSPUhP3&{97FaS;$533dC$nVZ3;u8_9$Y--F6UQE)oC zS&^#h?)wQ07JW$Pf2#0-NMx;)?-Iu}Y9f7`am{pVLg1nsT_&IC0hWh%Mo+c*$b4nL zHA0p?7h6yotm%G2r+Tac!2PxZiDY^`Gmu6L&MUm2d0xR~lF!ifZduLbZD8 zzf?~y9!fy@!B;H&Dy(4h~ zRdrfIg_hMEYR5^sc=w2N|Ii1tslp9k$*fH=(MM1}Tt)f9 z0QJLD^PPxg-$7w61}!OtX@JRfzT03$S_eEYNPP5=`2t@DDDbgppuh*>{WbtM2a$g8 zo)sP>1sLN62x=m@kMyUWaMC6|f1Yg0+)!OvSy`Q&SM&UNrpLwgQH$5xrugNB+}PQd zF?ix+C+OLWy+Mnk4O@2D$`pW{kR=Xc9e7qGlxCT{n9Pu4TIsrDlS~!mhS@2dL z!3iE}a0Azw%Ja}|$y@_)ORFXiF;*B?JgrMQ*2C$P=x|0ntk5L=-tK|6I0s>MlEhkw z;+0U1&!bnuA?$tAd(gwwf7Ws-{J#tRc1sa`1hPB8aYajBOQ6n_Tq#1+K zUU>6o@ve~K^B)e9imi{-#)LWe1tDi!%BUFs8k^7ysGI+=WcGLPn}5)V(C@h1_xNFJ z#O@O4)p?0h0UOF}Vt`m~+zAM_I$^;;1tsl=%%p8&Tyzum^Kr)1R`i_z^Y3@?6SBgG z0K&RzNG$=&0pq365mo|vS)HEbbgx97i(NLsR{jZ6jXfLCSJo*}EN#eo3)?oD%Oej7 z`L4vA6WuwK6>!`!-`teX4o2>EY5ixbL|v4WvWD~MgDh=gT-?r_MB%PYE~#g0qSkIw zyw`txo2#Mu8ueYCE>}z4)`ag$(NkLMC4}Ra9D4E;Zu^oeSb9WkvUzNm_tswDt#=;y zncp7zQn=j5{PrQqE%JLcN8e(@_v93b8aSW_C)nk>0u=ND0MS4ITsvCCHBe~Ufu)kP zcuAKlES;R8ceR1iN3lxo%wu3S9ln+~@>;n)CqUga^?aAD&g(Fixy}jSf9l5R`QJI51OTRT2pcJy9yTDbE`9WlK3?z)8b;@H+-`U;n@mLxTv`LwgSH`&Kz zsqHqRCoBv3i1xr{ z7Yk2HY^BwtPzooa*!EEG^7LOwvfLup(vy>9758zU>Y6eN8cuE1UeOu!en*lX0Ov7z z0(I%R%HDHe035~YqJrX*H()K*ozfF;m#9LmObsAn%^7H>t~$V-48X=?mFu=jOd(1W zW;HzR@jP)?sJJM@wz*b2&ydCyXY8?)rgD=lf38U0jZ1u^5@7N^cyvT2(pqdrZlRUa zrjo*hHn_naZa)orVF$nDc^EY#1`sW-#4rcCuKiPwaew!*uS~avUpoA_5$@?blB|-m zBk$}>Jb2q)X~(a0#8=@0rZe7)*ItAE?BOtVD zZ!y=o(mO+$@>uZ3ez>nEY;ELbvnY@(!_c6 zO@ls`z)qkk9Ez+V9quM;PVBJW6weGXuTag@1Q~V`$wc2tcX}0CQ>P~BquBjS)fI9k z(!hp}mJYrE`kkD>;AXtsOkl7*n9wWzsi$ruX|wPHE$|`94vvfrGng{JJW!EjFK}%9 zXy11ugE-_|ojnnra)0-*hu^G`Yk;$Q=Dl;WAFx%}d8&#?fTQ$fL}SZmTShiCyZww; zqw5sHqqs!V>1@nZx*~T(d-@cRkdruQwdA1VgS_K?&30J(JoHn~8=GVW_<0dRBP}ky zmi<1ck)whe*sSs4k=aK`()5RO)X|-Wa&UtZAH$(p33w{BiGK;V7ht6 zz`3R4GbT_5d%5yufGsqdTm&aIndj3Uj;k-aPbm&a8!f&}DEQ=NMa^?-k1U)>%aHco za(nw}F7mF*$mr;)cCD9{6~`Qm6V!KaR^aG@c#FfK#98SIsY&$RVr_B@zLF&<<(A4! z*i7yxqSJj~P*o&pwUqtG@QY&`Q9s#q1WBI!^!x0AeqsC}MZ+rxc9FRQAp>#4NV z{vc}mFleH*b&fk_*y%SX%};u4esY__xQ7e{gN8t``2Dc0M70QM%PatMfpM*1WXHO5 zK7gg@fFe!htQ6D?rQZ(yMQq_eQEK?Nx5$;l7k|j;pz0}r$`}vWKvRrm=i0%w$)U(F z8gjks+3h*4m;&|+ViQ`-5#Z2cC=5?mRX=x%XyRb4to<1 z0NQCl6T!_5Q@UL2wx{gWe%bHfJ{Q8ELvsrRlTm5pQEg3WI?!pszeV%!0$*&|K}(`0 zGyDmv0j8p_By-vUsOlL{OYXR^I$E-e`$J|WjA5$V0wng<*2cNWl!w9~hp?l)77qEW zg27k8=jTkbWTjRe%gna56E3a_Zm{^p+Cs&qjA^{1(C%!CuKdOu`_J2LxNxHP-s!#T zuUNTD(->!_--<=MqMMG8+wtW-4eYB1yvbp3PbzHEPRJ^OEA{fY{o$j4;@xM`9^v!^ zFv14k4k=#h{xy628Bb|_V~Y$xM?L7Gzt$&be+W&?qD;8gsDp@JCG9c{{ z0_w{_W~(V4RreDd=bYZG{xMo&EtwJyJm-}v`lo%4;TAo&@QzpxGQVYR#1xU1 z*?wVZtR!?{by((BsI5)d9vbk7J-N7lg{QlpAm6{Le7JfHK@&T8)iIisRN_Tm$Kmrn zDZGm{`gKpr{kvMvyN2qwRBVIw`It8kw^jyQJhv-6kr!W_H`~7-7U$58W*){VaS<1)B?xACO z5-iF3E5cRZiiH*XE1dE1R0$Do15<#%|2F;-FpP&m{AB{f;{3CeOVkFkFasPtQ6O|~ zFd(}L3`a<7FeY%2WyO^$Gi&x&1e&=z2xGd{4$xd2yXJhB8ZYJDK6m6&>^Cg-%&gzS zz#@by#SGr+&qgjMl`1qroiFNv%)Y;(&=ltlpQegc>eg9&*yP_?$ zd2rm0ElID6tv1}flTk9h>yZjO9WiSLTcsU&`b?g}x`Z1;JrJ^#v>oeOC zyy@iTC%-Ad&-h4qM~8?ZJrszC(kZQ2=qNTBtOYvd&kxF+)>RqC~l3L+)z`U^}A2o6_rL?B`~MFy0sO6cPc?-pe|weD7MMe zi33C+&1+%(liZrns}xFuaK5uRG@nMczk;5e7i_C+YbP8j=Ty{tx{C=Nmh>%k-r9AQ z&&5>*1oLj^1CwcpKPWqKus@%A3k!JzKCKa(p%<8%&bvvrzrgnsm};F!?BLAR8#%x= z0{w^?`<@NMxO0W}d6`xVORbG6Yf_CGtxu>m+F z(oBZ2^_mVvYrn|4_;&**LE;70z zG`Y>a?0L=6opXZ|o9k+hru}$&K>pzKMdS5*KOY^vr6if3^ae55MtK5tIH!-LO{DtK zlhZ9*3W?E}4P7z|@w!=-1}tQoWshSvR`*PWsK*ThFtN?ISBe7CCnI)N)nel+jsoI? zYub?>ep%0>JFi1*7uz3_6zvR9{vt5m&I|ehGf28m(M>Uwr%-ml5+1v60ohWj?(hV_ zhDNKb{NpQ<@J+g?L7M}G0hle_MRh(oZ+UQ#)E)X%?)PNCJIHk(N^)`L7z-gYkuS!e zU7)9z_tN#%mq3&lY16vgn&5Xp?MHPPKBd68sKSwE`VEU6a+eyUBS zij>59t%}`UbCDj`8JdH~x*tYTPoR-tFIvI&d<`^zzLBm(+QUzalv!UkHAkwDu@EzU zn5Q@oa|HH|lNA zk9X1#3wQniu!B@SfWeKHgvswv&>*yy`~r7CGs86YXle26WAJ>nnrf-EqDl?lNo(%_ zuDPX~>C}s2oVFWIhyGv?KaL9~hK86@+6h;7*rInR+IWyB1K!FIF@$dwYZUF+Dx*fT zc)Zrmv=cYX_zS#o#VRp9F3qv(1*=!Dk+NR=iOx^*ZWg+5E}2z&Ps2w{L-A#SI$>57 zgXW{YW472-z$Bb+VI>-mI^0P3fqg=Hf!J zT(%%4w#G8f#`?k&?x@vGuh@G8jnu-e5B<0Mj7h}azIz4_#bu*=887kh_IVi?56yB=fsXGh;+e2!jc!GccAa>zMQ(mLd%R)(MS4$(BLP($K5tvUy~Cu}4OZ51etL zEWvm>WLj;s>@{~JVNTB^oL^JNk?nspo|c;0y`{ZLO$lj>-a=k1^}eMxY{v%j;;gWZ zj3@@cfizIHWrxKE56tNrB;0q*ezMvVIK%VW;L_XtyKqbn>}s|us5<#t;KH>;)48b! zAbq%r+OaGfSwamg<<3hk$ZyF|4}f)D0n;4T8z%k)a1V)8l>pzZ;;CG&@4Y9alIdf; zm2JVxyF(NFw&v`;okP%#vae7M&OgKCS8FPL9=SywE(6nsIGO?jq1ni_ig!GOIa-hG z*k!XufLlK1nYHu%r4X~Y1iM28{(!<+@1f{y<*em)H#(8^mF)wq{8j1I!>dG988 z8kdK~8dK$#@iqqn1;mx`ZebH|fU9%AU_1gFtJO@=5?j(*<1Hqf%?_owcZPZ&o&J#e zdU%8C7&fNPjn7z5BG`)4qGWatNG}=johAPskdWKKC8*<{@fRkU!8h1(;yOS!33Sj4VSRPcd6e{jK;A`6~>D9_-|}1 z#OTAlB^Ah(B#H(FOsL(4{m(qJHk+ktPh_p>o?~gkcsI@`4Z`D7aI`z;@+C9F^w*#F z5jWmhf>;O>dIE%_G=$O$+{H8?y6{23kfuDB3DizDMNOW~)R;)cY*oDCko|Js1xhg1 zkim;=!rm&cUl+oEc^^1$MX9;`NU*vvTOqR_nriNYHJvukykk+ zf3dJm^iH%}Vf$K1k$(4IO3LJRsXMTEu<2q=UgfZ6TLs$@@ZW9y`J0CK|D-9R{|lMd z8%eqJJlG>Rpn!I)hesNR4gklPgW?qa0;#R3I!jZm^Kh8AieR}{q5ZtCG`Djy7_*UV zb;IsZZIy0FYR7s5!-`%TyMS10!u{c+sa&(+B77p%5vfsLG-2AYPZ`}@quV~CTXO`% zJV|fv$xp?t^E$QNy!>61JLDc%WK*z@y(=%SwcEvf{LS`nA0i(ms~R_R@~udZI}2Z_ z>@#jO$+W*}d8|0^I*%WO3~hgCqs*o*rCut9jZvZ-VE)#jr8GK%)`+DIX*`FLvWg3W zm|Fd0boKyAuhHrwa6|pRGsJ|5diUssKyju{?Jg+cjp}>}#f~NrQV1t!evQ(ADRU-|R`N{8f8@-m59jEmxP0P<;#k zxr6wzE1y+wK>3PebT(0nuG9aDDA3HrGvM;Ch^?hfEUBdo=psd*&WE0*@_wL7*3f{( z{<(K?gc!G1Ogt!>f6FvG@;JIFO8rl89m`?4Q z*?!#{%*q043L+fNW4j8{##W##7Yb{bFT4tr0jCa*jg`HhmS@{wGrl zYJ!eypz4WUnUl)BTb8;883exw@tB`WH`zguqV6z)54tvQi5;klMBl+e#~5Fn=Vdx( z3O0(jP}Uu3&KjOTe%S?#QYB%&pkB~xA!bHmdcZOXDQ|c>tUw~X)sjwL2jP$O%G`x! z>O2q(-7|^`S+=#V-zE@9NlSgUys3XMrhB1o&wGMyUS3QaoPz5DeG3|1GY4bY$75$) zv$`8cm4)bm429+fc~qT>bh~ULgiQ!RCJyPt1JEEpKN_=kNQM9v0}W62USb`7xK#5L z{9V8hvR7~ish+vVt!K6(`Uw4WZ^#W_uHK6P3;j*palN>^%7PAfi&+9@A3OsRVtF2O zBc+S7qz25-dkZWlXTW%V1cObOH+w=-Z)EN*gEa*BSl^X>(d=NGh!Gld?KnUw&OJ5@ z-VthY!<9R(?}_77ZmxY+jaZmgEcXXq$`fEE9H?&%2EQWQ=z5*1BEYC-D^B)HL0|vC z0|nSm3r}o3ZP5VL(NM?u9FHELD;gR4)VXZtwocV1>VI#+vF@NYMtxR1FXBm;$Q)p0 z{={!!FBuF4VD!Zdwq8cfhNwndoUa<;2GJIP&NFV=U&tr@Y8S@$e(!jnT2?$)V{xe5 zG-L3M(wB{kB+083Ey`>3(mD;PA+^ClrXo(1{3w3_Fd}LJRQ1Z!7D$>Evp$E^#van> z7IxWeEFCbS319SPvslnn<|F&-A?d**v7v35S(`h__ThQsLoVq`3+tr%)CNDrD#Z}K z;w-?OgG~``4+0-f{u}%Ts zD9; zthzoFebr`9AWRD~Uijcd+}N~-d2iwcCex@f-J!%Q#it1xkAkd_3Fd8t!u{U3UzHcM zk2y=LIm8>Z{+e z{L}UGe^8Y)NY_&uuw^J2D0T1^IZ%h>5WJ3W7NtP-O-+U(wJ>p9<}(2vHEtDLks5L1 zE8-TJAjrh|h-nEWA+S*`9TyyShDCSw2Bk4!rBhqXugVi#fZl4yn3M8bK@shM7~jHL z&eXST1{a&bN}zNxv_T>Cc-F>eP@?RF_|s4PB%C`$V>gL?3pCzj@KeS4ai-vf)F_R! zP!FFB@hhMPyl3ywd2>ri26U7LBK0yzOFCMxQPi@n^gnpmXd>$ZT?6)3EMF(xOPYCU zV4NYTh0lo(laIiae37-loT3Tut;N&Gmig~U5jZ_IweWZ5E@rBa$zesaaagc%{msX! zv(Kbj6ma%H?#N`8{wxbzWR-N;YJ+)DTsRQM=dBv|qP2j6CV?1>Uu(z-ZQuRMzq10E z8mPlpkN1|~;&vvUPK2jY^|8cEE31`7N>ag_;fH89EX*OpGN53OWYtS|fn6 zhZUHu39_<=?_+~ZJzrYM+l!c+`YMZ$wc*6XX!G~hhr|X+#E5j4hpU4uwNC4050G`;1|sKaS^;qk6J@JDj8Uu!#1-T-fRH#6Ta4M_u=l5(&+4!1f zyJS9>P`**>J}0-thSWJFZa|LCD>NU$KNZP9pK%mueQ4N&F@}@FF9WQAxP5xFzk%VSTMh8llJ`ThyfuR`f3TXZ@L^1uRFbP{-W01F5#2{F}HMDv|FC)Cz|g!m#cb4v#(*a zvp5bV5NusaQ?1GORT>r5sWG~vW6?k)P}cI$gxhG!FY>aH~kf{?wokn6<9nctVB?=P5)(V z0=`* zze_e(I%(^UKy3Tw*y2p6=1H>Z@Okk%aRO;iW4u7H4wvA`t~~_bDl9CFE>Zus;ODl@ z32f2k-7liqyRtP){@u{Ka+Vlt!0kBW#0Z zob-J&_1!Z~wORLx8(f!JZzg7Z{P=X6^2gF>Eab*|YX-_=9OUF2IPLF34o!^2un3RR zqA|sKs5dR)a)sQ3eX#2Kd!GvY+E;v-j4bwf`_>+lvoj-)bZ5pj*s5>lL~_?;(bUdO zi?jgjU9&iBxvFe^qV#)sqIm(`gyZV*rzb<9cpHZzufQsXhE4b*IKrBzaif-0e7Wx1 z$K>^8V%28-%YBlUjo&|m6NS_jKF+~)K|1Ndzn2g0Qq3U)fJd}1>te8_yTLAbAqETR zW_hv;;Z{QvCkNiKAr9NBz2_(A#)&;QUbIkGRywF0c6{s&&I~7n_CX6p{A~<7q=Wz$jywr}?(NDHXJy$iB_vj@Ig=5KTYyqf+ei$5K7| z3J$z)ZhqHjw)c}D@cqXhG;1TRu{8AO6EaoMU&x37NT)A;w&;ISUt>5C4Rl&M^)faF4|z<(TegC@K5-aQ!|xDcBD~!} zwpu8elt07{4}+qIr>FrGWc_7Zv1QXQQ{}gj&5ODqRF2UEr14dF*nQFOunCDKO(?dp z){qZ%DmG3q-cq#sX6wBb#w;}gnu`yTYSQI71$I|1tW*LKVk@{@&rseos!*~u0AN@s zJ)w9*RmSqaC#%BQYbNh1-v`T*#2Tjas<9$N=~>EaD&$IktJ=s_1-EZJuY8`y zG$0Y1(5j&Cj2*GJIQWT7A1)Oj6B(LyR~~r5gJ-~1MI(DZcpsFty{Ce{7WO)D-KqBh z?P%9X*Q_W^N<5u*1OXc=mbXe#ixDJrx-=SC>o@^`_ZAp91RaGP1cPy8OX(gGO6cEM zZ{5P&)NA9(kI!gEudlmM1Yq9{=vec5Eyt82-7SC9a)<{*?&ds_Yx14F;|jsidCn`N>US2 zKcAI|SFC2N2k#Ucqu!4#BL^0aE$roKz=@}>FFIuhj3r*rv%7L*r7GBUDp$dyAW*ox zih0>e2Z_Es5wmo$QT3(}RDTZSQd)FatC#>`)7T6<^Gjg|e;6mVnV>9%BN@;yFsn5} zHPV7OiPU~SICi}P#I^BHzA%Ej^jtZ2oSG1mR6+-Agcs4pb_$I&m;k~f7&2>^7Iko? zpgG|isvk)5g22`Xp~6}64i4s(02jkrP0=AJtT|Nvfj%2zzHApms(ut2Di4#i;dOE6 z3gqFo)rN<7XIC0N(z*1Vu&wTLPLw4JEb)p+GbR0DHEy zirR)JSW%1Q>6KE~iur-aM+B?U%mEO_Bx;bPX?E!}i7+SECSFpNo1w_B4 zNhb=7P)_H$d!Jm&P5oq!kfq2|>cGxdJx*=VrReb}Eodc{-}C@a@>PIjjYX(02k{ldm1R|Fq z0d2WaiPQ$G(mE4B^5d0zq<}GOr(wn_QW_hxs&}=N@7W~63soD<^kEQHw>0*@tgRnP z5_GJF69$_WEE6m1cd94ut23`UF<#Z`SzV2ggQzv?_#!JXY7&gg@|t`=8r3h|Ayb7> z(6k+V{h<@o+JavWoY&_)#YVU4D7C$nk4l3DPIx6?xwN)8_}mS3k#2^$fi_g~iy1x5 z|A)s~xUZ;Sr>(`75?~3D=5Jf(<`gvHCgue=Xr4!{xu) zta=Qy1!=U)!NWg~Wv%HkwQ(x+6=9s8a<-zpLRP?B|N5S@QEPMFZ4OBN6tW_fvqq*q zX56|iOqJx`L^@+1LFN8>I!(CGe_O-V(%|^;OjOM8g)OL0lC|q=>mz!f3=XsuJ?$~t z)}eJ|&+O-0=Hj(nQ=?aht`xia82je0r$7PGaeQTx|cbvhu_3L1!!6im{cd zBd7${@Yr-r#HZW^6mS{TmPNpK;Y z-X72R5kWnKeHPYQil%TFuX*(+%AEKEWJjRiu^QNpy&)646}TeU+Ox~X(SNxQ&ug4@uDS zN7+t#qR>1RE>ReEmu_x+y(Rjpoh`bIS_U*gj%2kwp%S+7mmZKU>6Dp?ztP`4?TB7E z*G_E4>=Qp37(W>oYEjllzEC{jZ)bIPd2ne8`{^iz3!3p-t5U%^JdDl``PbuBX+XAb zF{2*FO4rN49g^Rsi;!RVAEA&i7vh92D}8uP-Ion)VI{V`cjCFY?n>5PHF z?z_2<`H#D!f6a3}8Ml&COe+87&wd*kBaWpqCI0rS+m|IQ_4 zh1p5c^MUVypwb#HeK`bfCLuLCVmgkwRG#PzMt(oq>{9H8DqqU)2!8bqXcMB5q=&Yq zR!x1LjDW5WwNjP#eu>Xi%99&(_X-3|S1k7dfgZT}qH+NLE`r|dG!4?cRUq$pz(Q6` z6jUk~whHLl6WF!0KHISO4^#kdr_8aCl=|ghv`*?X@~%2@RIBcn&jIP~xlWfO(Y7FK z*+C7!!W$kXJ`OZcg?Uor0A6{iNGtFC)_b5LE-9^L!M(#K)2j!ZB&RP_i-}35o{c{5&O;nj z>)!r}_R#lzAWDKlrMqNa-CXqpf(()^6;nCW#t}SWDeO2r9naL2-&bs;bkFqe)~64Z?L0oKG63Yrn57w^(}pB%4zIWlz5`-9k|u( z3InQ6j#V39-_aL|X5CeiHOnYqs}_UeF@V-q3rg9HXf)(VT|0q6LjH6zZjmy@!NXU? z>=s>oA{qNIXx@uL7rel0k-W@Sz}S-&;x#u{5>{p+-RqxLCd^&xcdZXFm^y(3Wn;Q3i&VfL;nB7kjd-&s%_smaPd74$ldEgkCi+QLMjDX)b{uwDh0ssWZ`x zFCggXU^ev@Uz&4z@!T-AKag_?VUwmnM8xr|#)y6y(Oe`m1y zj<9*is^8*(zUkcQQt)%0RS6>}D>)xuJkRPIA~=Cof8R*V_Qjm_2TFr3t%;~D^XkI_ zL119I3BSsQP>LtzHz~G?agE@dfuD@HiFX$KFrS`P*O#@4Gk0J&$Fql+n{k5Gv*S{6 z2`Cy(S?JxrzEvB&>09r)9MtA7hJ{(cJizu(#R&w@A2h`{Nw1U#svu%%=~{&=o~ zo6eRn#3K#3HKYtS1=Ts%u6G1F0{STYLis+L$Q9giU6;p9Qz%n$U*i+)!a0>G#`bZ& zsRuPRP8G4EphhHal^q%YGc7uB1Bee;Kqdf=0%CQ4`FU@lsshMOphO7}sZFMgx!c5B z=tbp7MqK31t*oN<*)w-h`K4bG;(EZF`|D%=KVBQvw?JpcvEU8FXzDpSmu@F70M9)T zG%j%ST%GyP`r~-HttPG zdqE0z&K*W~IwL4l$^h_uL%uDQOme0&gslytO+>v{1W7PfFdtP{s!BfHea-#@w+w#a zTzyfjQh)1uOlp$Q8VuH6dw)OIXYFrxk5y+xpjOymh8->jU-DS7_en8NrnN^x>jB+= z_^501DdSwmq*PE@?(F4lXL!bk9R{(QP-P^&W>5MzUpy&W;VXr}3To#Qn0B-?`{Lmf z$ln=*ZQ$G&17-Pwm?}|$RmEu%QZLJ&cME8w%mZg&PeEx+DcY-wQ(5l^gncG5Al)XN z8e`*@e+sT{sNU=JfY5~p(VG?_#k=|O*yn>Kyzpu@gzCbE|B4gyCx@8Id&?7b-4=Qp zp16$3`GpIjk{my5nl{t^<6ZH)LFbPyckGPP%#SM0biL9N4Q|@4jpV1&;#iQQrmvZeqNPK1ZG2`i@{Id zzcWhhHh1{_-ETf8e^%}o{g1cjfBx6s;F0{35MX~-M);GL<6nPHwSxtUBp0v)ZNiaw z44T9d%r|r>!#vOeM)u~@^<-A!s=VAh)~z~20a}&N=WEn?vD?P;QDs`(M$U%5S*g-()Qs?yzK&Zh_JY&o0o`8D6pIt}qV&}gqfGm4worBwd6IbRWp@u2ZX#u(~dJ_8$d!rHZb7h0aRHu2;ofVG{)>k7&K3* zB_Y2eeqQte`RjZAwO)ViufN{Mzs`xj&egx}p}+3Oe<+?%#vOwgF(G=ix=z_1zFc?e xs2A6%*yISOVj|x3z2Ke*=#c>z0=$l%yZzwewhnEV@5(mXYDWDr6aRJK{{WP7a9aQX diff --git a/docs/architecture/assets/relayer-v0-link.jpeg b/docs/architecture/assets/relayer-v0-link.jpeg deleted file mode 100644 index 166f72307ef1ff6b1b369023cbdbfe495c4426fa..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 51722 zcmeFZ1z225)-K#gZ~~3HYtY7>;0^(TTX6T_1b26bpur(Hq;YqGyK8XwkW1z}XXeb@ znS0Ln-*cat|30_6ySln|)!tQWudZ71uKi=~#~J`tT3kvT00992Ks>(yKh^2prK#9ghPaR9s(jH;CU!7&|rY* z7_e}pWSBTCPKo3oRyKAHVI>vSgxZNIA;tI3zBP6A@L1Rs%5U`z;wPsmMPlQU#*OVA zXAF%T{6+n!#3fwv@(Zdvy1Jh)g7LSD|2l}DwZOj)>94g503@jA;R2z6071b0r;rf1 z5TF0;tRUua1#iZExZ02;o;JELDuxAJj9@|xGrm87^!4`C+ilpOKFqcCW`BB~FgY^N z)Ql-M`;*t&AYclwj=GSX>kMIlu=fkhu=^zdnHpI!+Fgnh=p3QHSupPWik4x7h$q5P z&lLYWkY}AyRK_c()sP76ry1@ct7JQ+hwIMrp4`vfFv?S$;cSx{!>k1%TGV)Dy9ckC z2r$$fdR6NV);lZjBpKG(Tf}Avo7u{dmU-JHCPS?XStHFqxn*z!b{oI21QV5L#qFrl@ zU6Kt|Hq^)6!u$odGnuHy=I?dF!eksJ zqVfo4=X)P>FWQ~D~_l5$rjG19NFdvzl+;nuQJHa`NxJzWQS@R)@2#&KPm z?8Pf#eOOM&K3Ee~%syJ$g|K3~bMy=u)HTs4#Z5DhD-BHhlublC)`vO9*wevmcpZw8 zFX%;jV|-!k5ur;%fJlynpQ>abZA8I?rIczvS)F(svQhUVazoI;`uI z{MDjXmP?~O3Fyl{g4+nK&0cw$ad%9jkfD~LFm_iyh-5eP1G6jVwoR=7p%n0VJ);(8 z`G9ajQgKjNl6=_UTrgt5jAUMnw#hqkRVDk-++m}#rv!O`~U4V z;ZHpQBBp;1zOe%f@+d{HG7hXnT@xu(KbOY&jUacKzp}W%)t&P2t$oJ$5tMnd*aGWs!znUhQ|8B!cE z`>V6O$ewn+5Cc5zmqJI;BG_BdeQmVRq3(iX))%7M5asw+QI(JU(X32o>iM zn;%b8WsV0;|7~8_LiZd`sPuK2%{A56rEiez^!gS3h7~pNttnl1lWG*`lk$BBRV#!V zEu(V1j+4;zW+mD>p_1Afwre0XgZ;RdGC|ej8oR?)wPj=E5lgf5dZU)dhvkP9tHG`Q zt^GOrv;KzMH@&yc(Hs+}@3|$vo;jU1TAOzzlFu$rOu8S0<;)PZTMM}0JIqjk)HBq# zv<)p4l1sXfd09tdr%M`Mk+Svj0XNKDyiX^a8Z&u>xU^t39l>$>i+fK>APdLNXzOd=fKYv9Wf!`!VhnnWGV8CN{}ZN0L5R3}1e*xtk2A1_r1R>)~-NX}`D)A}wg&BAUAt)K9LwM>UWA58{MpuGD8%vk)+( z#8D${#HbQ8=!$RD@Yxz5B210DatP5P6lD75J7pPA5^p$&{h zs?L|OS!BnRCG$`7yYx0x=1@K5{qME`~_{WOl@Yk-?^{1Z8BB$`#)Y?eyopOnm;1vx2O?IbSs!2_JS+v2{JLa zWFNXSZ!b*Pc7PLZQjc%`(6V>gx6EHz&tjb{Wdhv``&k($otG;mi5N>(_-Ia8_Ti?o zP_dnU>u`8fepG&VclO zPM*9Qn2W*+#7Ko zvHv@w(dQO~l>#cp;;#V*Q_jhONC7+cH3~FYx#Y1!ZzIIEe=>;8e;9)pe11=ERUt0a z)E%>L2-v%BpdfLBMHJB8-DQulFzZ|JsFNyu@1YXVp;L4hk)W>~w5HU98U9j!-2^J0 zX4(M4VFDH+FqeUhC&+F^Y^xW)_80ER_=c!YXH2uur-k-v_FY`_2FzKi7|k9P6w%iY zW7{luDuRmqsW!1trd%9+ShuEM&MuVf8y}(@BDcRC+U(z-ei`Ca^$b7SvsS<=^zTEv zWRTH)BR))s(;4O~39z-#;BUxF_aGO*?@>`z=hcUv(AGuR=XuU3}ewBHWfLwhvT zEN>4H>i>EG7dS|Gd>oh4_ejC5I?tD7Vx^<*Fc;D6n(m}4C7+raQzbn#Bh%(~YNs8p?8=feWZ+R2!V z?}C;3LQh@9%s7W`al-WiTq#>6W4P#4MrZHtxnY#q%@07c(BP%-BMGlqc%M=_sege4 zd1gyjr)r%SbLmU|+%jVqR@2B&Xt?yfD7cCIWTFyXYca{7ucoKIdTs0bSZ9~zJD;br zE`e*ukYi4pKxLX&}alXT)dD%K4~atG%)7hl8{yB4UEY;wlQQfE*q-3o=SnkWr= z5jL}nsHR`|Qr@-vd<4E-m0pybH2thao<`&%r)?c|cqFrXgJqi8hCA391=`3BtnP2; z0OwE@Q4g@$0GAydRhqF?SAzufqbl@>Cx-L%3j5c2$eSIn$&dZ0VN@ z^`XikhmztT$~rKg$;bxr?TRnIuFq{%Fg_W^XHX?T@+FwA%5!L*B?Sg?+suD{3e~R!(yxr_S2wy|JJPa;0~2#<)^Rk|yYWg-OZ$OHe*lgtKPWww6XMQyu7T7`C6JP6rr;=fwu~Wgz~E*| zdV5@0+s+8R2TY;=+SyIq1sXiRb!xzM(nVKB64)?8K8m1DtT~`qxf6dLn*EL_`+Lg+ z9EaqepnUy_f>sSzQ{}}C%o~{O>0*9v+`bT1$z%4f9EP*m0CO&j2#*P=#_(FhNqW_n z0oIBA{hVBJgtG4liDtF}P)OEIhn609s*&Yz9u%C5*^VPPivHE)5D~wY?<*PXXTr(1 zNSM36>6bUfXFCtwC*vlllkC>j_$g)CB$?(NxQu2{#zc@c-rtV*ZmcnNgOC|vNv`{H z9*};@sIq_J%>RT_A;SBC6)$jUI-8K&3&QBI2r@f|VW|{Lx*@moUqT^@YKl4`tv81c zL_7a6G9$D6a|!iXYVEz05jE&jg#fQ(VZ}R*96$En&E?;-TU92z4$}Y=aDM<3+KEY z7R5jZd|w$1ReEmTz?7G=c`6|_Hn!1u;Xzo&)sOUqofKzVKhElNmB5B zTzL?VtTExA0WUH_!ar*yEcUh;wc6|R)5<(<4_*b_KymMQew-@Q%#6P-w;fpjuAbd4 zsD&JR*AsAAz;uy(66rPIxzXvalzcZZ<(#?9h@bPHGPs+X+R6iq4G9Nyrm{zmBj>t2+6x*LfFj z_5`khdsWIheg)o@N9z{{YJc!F+qAdC-oOb;hKlhp{Q5oUnd>x-BY#Id6{`LZj>4krwu(|kP3snM2J^@Iv%!*oG}!(2He#wO zWu@`ARkvlJJslk(AXFz>3${iurE)|cfSF55RBE5@nOlt(1S<|Z5lVi4mC!0k-Me4D zD1K)Kp$y8Mn$ zuoMG_=r?z%K*#h$zaW>sp!H@l|HPQAdNZcH2c7w(hON zr&{+n*5g9gtQFUTMhrsI1qWyvHRNJ&MARTaYFk+kLkX~pxO`iRB>K9v^QYxv_?^30 zp2_y3CtA~DuS2;rBS!o5$`;?W^@I#gmmnI|^gfzY-V8tGvgUheG3IanWnL)IZ^U97 za(l|P>m*65oUh91kGGLE6-PLNCZ0-!LHnkJT3?mNC7n^qO-7B1SHY?e80wAr>11d^ zCWx+kIZ;x>hm%>rTJl_Sd;5UKG&m8(mx+1vUTIZoR%*qXLqi5LuL!0f+H3G-BC5!R zkTV@RLH?;=PC&mYv34%jkq}r^^abs=yu}B)yAGz~m9OPzM=C#YAm#n7EH7i3rpSUwF$l=`xB<7MbMf%W}_qE-9}yBa(89&6zDm3floea7Se`9oqn0F7Kr!dYRVu8lhQ%D^A$Z(E#HTR?E++AD3a z_(lEKbuMJqm`-I!>rV3woCt$iuvSQ{`D&A@`{+ZAmtCjVVjrW{C&r+CQ@Re-`MD^b zn-V?P3Ps=MsRHv2bhq5pEHbJkt4<1$1JwIHW?oP&*b!n#z@&bZ9ga>TO%Fz6V90fl zMqa%a*r3Uk8Pb!|kW|k2P{(tCJe>7aqH41tur`$mVK17x-+uE$#;bE}99aCdVb=@=5Njwa?{gBgiaX?kc=1u4oA|Ifg_voez%B3ZE-jbZ!(;cUSM65X-|;Q zfvhQR%3GEod4opt9U|R{<08i4_tSMQ_9C?^#*4TY%EQ<13XyTf45dFW*=_kTEJQJ? zWFV|m2x2yu--on@9exx^q}@8dQy(>`O5fsc+@cw*wT>!#n{AN|f)8pOSka}3w_wr| zae4cm$8TfKjR|i15n_bDZDSXNXu#e)X1#ViyVFFZfBRU-nisnA)Q!fNbl+|IYH?A$ zRErC_{Ln@a@1jQLC0q?#N4Y$Z*giBP#J%R4lG!0Eu-vbM(8zG)ImWlY5mqWJYwh#%7dE}*nXwpsZ*OdgGfS3t ztlp;z4`MyQr0Qa876C5Uw2`BIL}X>KS_D$PmGE z^KLDEIZl0Pdvk~s^#njjLU=s*9$A;0*&y<64}y*!OSj;$4hs42&f;qV=(HlvaV$A%Z?JET6Rd!_K(XTOpdwspFd$%w;qY zBUxMWz3!s7Qq{(fpO{exAn2(bnh>oO^`aDJM3KBoP#U1L;72n&|2#qMQl9=gPMyHdKBpBS=)dz-j)s4!=UwZ=mY89cQJK+w# zO?8y#uSb3KK$&Jm!5AO3HRqYhH~kXGS|*`ZGMst zS~dzSmzCl zzzimOG~>;64Ltd;aAFvShjR3esBK9~+51oJo5ll2NyEmUT4LaB58vXN>xZ=m>c@@| zR}_hZZp=naE)-r?zD>3Vp~QhjCM6oK)h_(>mRoYPRbG}{>eu+z>8^;wf5y43deiLC zJ`&s4%N)&=Xiz4{9N!M30L!#84OVbbp!-cnAyuXF!kh8H3##$&E+&{NzujGgR5Y@p z%U5~ql7yMVHV-x>@WXxDP`lY03H%p{f)81)egK3g>1{=8#p#{i;kN?y%{jGKPKIsM zgjZX(lKWRXuZ7PAg^}a_*`ogMtE2xz^3LVZAM&-JD3?(;;*POyJr(dw9e%8{^in|C z?=k+;>EWAkA)v6YTrNNGpxUr?`c(Gj-QRYHS0#x~nKvYl>jB>zqEz1gVmba*_xw8n zp`<$Da{TCf;^SKKjjh=mm4s6B@0a?X1GU~d^MNYAy?LlAs@OSNQ~5p9JgAxOQ4^;* zF&U#A=G*8~q7K{^u_4XXKX2Ib@4I$jQvc@L_&;C5Xt1~@OB|jSuCg$QtHY%VCPG{E zWAU5RL- zb2zR?u9c_1q&#(gWipifZbXYtr~R&n0gAPKolC>XhE(Zt>G*eMFx6{eU5htg zO|Iz^^JcMJxxn{j9)wuMq< z&*u(L?>(jH#FLORvD28bdSFRYW1+%~tM9mvW9{|S1(4~gd9=DWmhdBOk{7cBqnyjR zBjRdSS{-7cdN*@B=IKB$Z~K}Ea+y|E#>%c>j<8s6MCa~cVlo`LHg6otW*K2J$8g=j z0h^=O-9!tU7ALl=-SKK*wY+&eW z$Sm}fe~iQ};mhcTTX@3u!<*5|gEiWXcrJX#`^V_fyo;zFmXW0`ZQf{dLN#Kke}u&L zZ{2ZwC6p}4363-!aVbq(Jh#bUBm2bst5ppM&!Qc;MfF-*K5FbEFVGwR0TNd_S4v!V z9;QOTyA_KFrO#C>a((vU2t_qtOzk5E zCz>?bu^qb(O)pUsxzv+f&*H0jcs)_1`J05Z{t*&y6kW1LMCDJ3b1&5Hut<-&-wleQ z;75hS8kKePZW3)dgtU2h2-HmB*QyWw0TS!VN4=5=0bGJfH-Eq%?o-(_0LNv!5?Y1ds6;9Wg`c8r4=Z<}+rS>e zeq{w(3MYApJ92sV$HUJH?Qax+zmngxMgR9)RanHItW@UDlGbxauT3CA)Md%3BensV zKHH=?;XdczI7msozS{HCy5tO7C9G#Fj$XZs-HOn>T1${yQF7rWHL@Dy-4@u2N4<`A zncAGd56~C_!z#&C8NnIvnEcC>N`YxzC^K>xJBs9o=xFaJwBDyK=E1b$3|A2FM z1i|Vmp~l&|+%;PnUg}%nu+@^LU~&5XgmA&t{u{CbJ^xBDs9HJ5SAuuiPa+OvIqRp$ z=_a*w7#LeabCU27Co8hRzp5OqKaES6p1AsEp}9?2tiz(--p;<28^qz@Z z8}xU#AJOp=zDu!`*7_vy7z=cc_L$n7+*jOdjqtqoNo^)CO<%QHKbY;AZlC3L&>f+! zbLDY~I^ugCToV@2iAc&kZP)9(%>54X0i3#n4x04l{9|X~{`mx#!4Cseq(?+2&(uo< zKf6r8Sg(V0EpS$7M#?mBQV0u74a?Ax&1K&At;Uftj-R+^xQmE~lS|~@gDw^zO-`dK zY(Id5EA_C|ptQFKJlL|P*S@Xr)}B5OZh6lUpOVWA!?8u3!i^lO#g!7>B_VUPUjv^d z>Q14FqMJW&`6u<(u^f&&rs57YAaUJ_3|;d$s*DWYIVWaDeU@b}V3;KKk_-V$v^*2$ zisn*JT)^YC>R@Ak%`&5j$s~TEnUJ^7Ig7wuL^~YikFnL{;PM<#oK852 z2opP!Er!VqeT3}GiJGbz_@LT?FNP-rKy0&{7I}WsJh9k6z*bi_m)wzY8EgpLD`jYW zvEjPSXrbUi`CTLW$W#f>!lgI^qVePVXX@F7bN?~6x=(Ot$|L5_rm}_G-?>?AbiX=t zvST0BPOgzU(tVk^308Q?Pg)@+*kh$NSr;8v9vl{d$@2PrK^5D@LYgIpNsu$I1pOqk z&W!I>|2_9-tiy?}*S|_q{~%cXcYmtGSI(SX+Vi{YtOD8m&GQ*XbShrMYUiADCPWkD zF(odM9ycVmX^8Lz2lS+-cQpI%yR2JSju;5wbK2tDtGT({=n8b4>kT4n{!4MUrMS? zVs2)El&JGQJxH)q0$#D^bNyO1m6PIY>l2rXPvJ`zA8-Xu>q>NGfSXMX3~<+96MH zaUi(dczlT0!qy&tLpw$nLmt}>qfa!Tc~9fJZUm*E%=`KsxBwpniKyC7hB&D{OmX3Cav$J{; zPPgDAVeG;d;WrQmT*`8butn24CF1col%;wZQwvd&*98JMc4g(owQCB=JZcxQo zvjsMl*7XAp5(LE0V>c6w&%puwGw1$Z|I*1@O-S%IgS>Wf0nz9VaR(ZWFH#q(hP31D z%>!^Iv=$u#Wx`$GG_qfWr)Xt;fTH3&~3EPdwrO z)3SdLK>1fK+vr~LG@4w{pC8=Hi1PV<$?Udu;fh#)!_7OdrN9#7p)livo4smYjcI~U zRx6kQy{dEB0*zQHw0lhBbFkCHmpV^Op?|sI2_+6HXP&>!$A694`RO&~r|rh0VF$y( z<~2biivVXp8#V{x+t0tl=`nu}iWBS280k95#N^bL#69y?Gwnh>I%s6EcS&VM=lg8* zXV2vGI!h=9I8QFL3#~#c*{X9*#A#BGn2+q;(|J=PjhbxxS338B&DpEJ{9jOzCYftu zd6~m`!QdL^8jc$mm#fs&_ZEGpv0FA$?jN$|dFPu2I7Fg)Wv$swQa1=4S|qv6S5#3a zX+lz^6;pscD5z@oUnDz*ED618DL+|alnViEO(yBTN@3WxJmA~MGkp~7xR{wWW#(lP z;AAob%_GT`K2GyO3Rw*B71BA3AFmqN>eh4%6j5WliiJq}Z6-aTK6-3&xKN9yE(G%@ zg`?RI>6gfbrG)yarKaISaf_Z5`$&Ug2;lRvDCZ-SzQGc@ZCj|7XuC=UnUtE~GiiVH zT2J>e?PTmbPWq@ZJ3Ly?Yp!O&tVQ_F-ZZozVZn6`@1zSVtvO;Eq5l+;11q_;T~`qf z9%MtR-BKRcV^y)bEkUR$opO8q>goW~T++b7&cIC}$xZuGR!$v`09?-GJxjJiEC%{c zWAN$o+l~J5xH;a?>8NGLI{X?iF98JkAmixR+~zHzzB6ZGsOjL&-eU2KSFS-m{v>XA z=XF&H+7V@HSm?bG-cfG6Lz^I}H}B@d6E@lvX<6-uUbw3s>MXeCAx80OU!H$NfD>)! zw!_gUGc}yx%{N})Z@7{8B*-!ST{-*SIO-c6Hd~i2eITus6ut}raKTS%DDPiOjtYgn=DtI?4$$kbphBSQxi!oGQD2>765IF4MNq3A*=WJo^S-H-Dc{Xsiz|y?~&@{mE+H@ zSBPnSCq&*qy~fKzfT`eJMQE0xPjbsh!@lRv zw|{8ErK6sR4sgSL#64O*v-=P{vnkPYJzMmEf4=1>${4HuCKl7 zqoyp7=R#4H?$z&ei%|R|q9R&04UO6>TI6Bf(^k=R!F6nRbFRj!d3Kaoe%zAmLq++h zXi;*|=W<}Rm!kMl<@|P4^N+^(GYDC)rtjz_V0f$|Smu)b4W!GiZgW&+@Ro=_lN>Uh zct}@T>q(|8;Bg;JeY4k-)m~1avk&*+*t_#bHf~w`?2XS9u6X6u6)l!UY%#KKI#8dU zwq^br@5X*~uWaON?9sFsNL3(D6 z`F&}If`G`~N>Lp~55+d`H$u=pN^T3s<*op;y@Y%QH_f@)dXecm!lhp=%l)-Jt+@D? z&0CW2W+N>jRS&?KunxnSmPV}2(?x0T;CrZ`QR_m<=!7DUg?#4oLopINiY)McngB2= zHQ0v-WsqoX7baJqy--mPLu4j;NeSsxj>A^XLYt-eIJb}yW1xI+jpMk}o1KPgt>9aWoF*Le5U zYPzTB1oEet26pEr0HH&0|B4cyLqw~ZUGSF`)$hhg=S1jOCblr=u*Iu^#g*(Kw^qx| z);&7gtIMtrumj#S>}J0}2R}U+PLG+Kb8v%iwRONc%NO%`n&$Zv3u2$R$*EJsQd7j` zgav)ycil?fvkn^+8*#)%Oe zwlz31Hdq+RYcjde&yXIKp7}x`C9c6O1!+T}tr{M!6kYWL(#YE7~guOZG(qi)AGO=OU;_54Qgp1CryUucWZTFKlPZa&G z$o3B>`T}GWa_VGlJTRHTF@fCy?%c{fO8RA+xtG3E8|s$_1p{8@NoAb%`dSN zOs7GLm3ol3!^TU!B4(C#C0ujbq*Vm?nIYv^&rmdUT#>2X*H`cZTd2{B3iq+^ui{3m zgQi=O2c|0tFdEe(0w&@Os9_MU$-|oPyPy)XZHvewx3CRJv8gc#Z+^j+z3|%$EVip~ zuw@Mntj;h^mw&R&Oinke{Q+pQ#9QG1e#Dz}W%z}?j<(Nyy9i^snzJ21?%EXMH|&d9 zvwB!)_M(v13#%3%<0rKJ&jv2K{xct%s@6Z-K4D;gV=69I`wd=9sLNV)TBShO(uF## z&I~G)?Ex(u&nB4Aw*+>e147 ztj6O>8KhwW4XR1; zt|$46JA*i^n(%Dy*Xg*>%)L14F)5pCJ89utxF%$n#hWUJIT>#{jLX1aeY1 z?$6&0oQ?Y~9VgfcT4-2s?y}sIxT29NF{YOmxaFJf6N@%d*amPLyg<`n4`;qtc{= z$b&8^*oEV8f2m3qK|dfeusTIpBx>cixyaI<=;l1BMRJCmM{ z7if0lqz*2+q`O-`dr9HT@)3_0TJ|pv9$E!;fk1jwIuTu@8D+RPW4#_YrLdZAi$V5w zCN#s^xht|Bt<2TPNsE_mQ3G8=M&>Flddlb`HgIL#RKth8q+I9_;k@5CmPFcSb+fwO zONI2lcB;DFe_J|ueBPy5`=*b#anB_)4`MSu*u^sq+%kQUUUVa-F}z(?R$t#|K4<6F z`iAs0vzW9E(~aOp3)hs&IWh0+JN{*ggPA>%$}B@5(IjDniKUr-K~@80xUr2$d0_52 zyQ_Yb46^ZjK&S9_yM7q+l6zoCZSeRHz?LoifGcZ<`YO!+?AnufC26G|z8+oYn&pPe zmEFz3iTgp<$2}QX3s?I2?#(jwl6Lmpv_v>k{Miks>W6FBa z7&j7Vvp7KInAwND$Ph=JtS* z4F=~oFua;#OA=vXO_{S=A8&<=6#Q~hdJS1{xzDlrh_P52qd_k^OlZmLNX{T>dZPGQ zLb6)f_{XQT1$Xn>))8~~xMv49%h$^f48SG4!%x=QAkDX)sx({|1L#VSl{=A-xM+5{=2&%b*PVYWEBg` zHV8tEH@Ke?BGlc2YGdDiB5``3GQhXX9aS#URmjzNLPFZ~8z1lg0c$7Ez$58*lx(i@8 z_pit9ON@f0>Swy3XUJ8bw3G18K!S zL^n$)9Z!(l!9K4YZ>+watEX$Bhtuj;A9LtlwN34gs&Gfcm&euxP4zq(Bm_pkRDaXhb+FCpePjWX1K9LlZCLxiexXrbtajC zU@o&?O>!43QgS_SKx}aB7ev8m^x1WwikpD1fz~&|mhrW?nHBW;YeRaM?GK8wO5%@% zis<%1ys*jw!Sxlblh#y@g3&?IK_n%LY*+^xAxQ7r7?7CO=lRRE+sE|={ zoW(bGTftIp+TzFM=Op{LPA_^spX&mLZlTgVyn99su(~bXvKBj*D@Ppoy3X$8O(oD< zzK3UhV=Z2$yJ6I+j33&d7!<+uEORYoo(*OoAUPFRg>j#3Rz_0jcs460BQNOs%}y-PXlvUtzbg?F=@L@Epf|^Cai|T7weG)=q!cmH1{RWz^K>Q z{@Qx0TrNGUAmfvgc{Y8)Jgow9SWacqcCOz`Ko0?_maIK6+&QUEcHt?wWO-oyd?I7h z6z*mvTim`D?N+=->0=stNW^9DLf-no%lPxg$Yrgm*MAp^Qu=q;mH#!I|M$WA|Bc9d z7=;32xO3K9)IFJisLS<-n1MXj3%j$vFah>kQ>n3m>-PWz}(#;x1ws^ zv9M#A!TaU4AR~9KEDQXcc0((nL`IWX2i63o2*ZLhZq1X%n^L4sf!2bI1C6OMGA$Js zUZPQny0xPW*yKHE_AV9lu|l0%|5MZu8S)}h!I2+6Wa4aO~IN` zSyvV_;vpf+j>~Q_~fDiNyGQU{`we> z!EM^nyH`rc*$C^(BxsWkOU^soplw|{;vjZmUVvVB`il~E?M=ngQQnaAizk#Jhq9H< zE7#j7r=6!C0QA`P9{|Jt2FZVPZ03Kb+&xD(GTW${NlVSZs>ZfDKhvbKXg`~(gnJRS zmRZW$6lzXAF7JGNqPKc(TT9GMaj7bJm|c>a=uPz?bP=Z@X81Jtn<7F9t=iw(75=UG zZ|M?mZNzI$CG3uqafo*Av_E2_r9K$JK-pQ-<;RF0?8Q}D+t=pp+)cx)9TI{dR;4%Q zIIlIfh{Zu;m#BAYADTC~jYPZ--<-0)4IYRKZE))Yzh1O14GQFSDg5wtvXVVjr^Sl^ zn6M3ga6vqNnoiv#H`*x(E5Xuo8d#8N_yHgm^iuBEw%%S4#-irx@{Y#=9AjYt5%B&I z`O)!yyKA{XJ(0WRh+h2%&7<7{ z#UFsvN8W*`aFT}apv%?*i@_7HR{z((vGdt3I1k~jc@y*YnK`bo1$dh)G}b#Bi}@|V{hzqSp0)4 zS^wT_Q3EswuHP`BgTQ{7VSAw9J`l*m-1byAkR876<|XCc&-ww0lhctk*=xMYu_IyN z!8>I=Ow+dM2LNs$o-ZSJbD3t8`ov94Tf-~TnFiE&|CZc^v{(~|z0F+qH6%4H_&Rpb zy@17Qf{a@8SC7H3Iv5ddfxao3vk8`qi;IbgiS_aE$pI^^sp@T*1}0QVGdCNCJ;c|_D=d?{{>M|G=VJdOd>ke^U}3|BNc$;|K6lFH85GY|Q4q<~NgElVzTnRAahS24y~Wm&@)StzF~beS7= zO*UQ)#NllUunOqj_-ZbJ+xv$T#WFj#XEw4_Be&@0*R*9DbTc$N3&p_o*Uh!856?iuY2kI2&(930uF@t_8%y%#2s zgg8_H^>vf2Ac5}pczwe2-ttTx0VJXeOr*zR${Q}PwJ1$+px+ICO>4eR>$dv{ESBS9 zbMIxjVu773h?YFLxBPZME5n{qp!)`g=8&-Y%AO{V1u21}LKrO-b)R{?hqx%&vBv9o zitv-zIFhNHhXq#|aLk)XL+avuMbAi2o5PXr*|-LJ6yPaJFtkJc`td$y z&Nq>Q<%)SEU6UEr5$!F-Kv0(vLK)W`!gkZ0a%Eic>l+OAl%VzuVM-F;pd2^YAi`0x zahrfIlUsbeVZB3Hm9=%%U1ro=c9Z&B9rJ#$b=XK5&5}a2w0THO`ItSO%UF43 zzQ0`K?Cc0((2I}HcCl{nD94tt#%-D;{YHu+E*9p3mD7RD>p6l;XaQQO5|bg}4i^0q z?*~Bt8UCicMWFE2@_WEfOT+)~j~LWP=aeyWf@)Yc&uY!3sN@e#DJ{l@Nc+~1I`+sy zpflK6u`=&nKDTvX5QeEUhG-@u>{4BDxoXk&y49ZS+|9eyhbufJRh8$hu1q%Bs`BP( zDP=fkY5K}7l6wLmIm1&z==*_M{7-|y|=Q|An|OBZOO|i;c0dr{GCx=ix6O zfFly>97}Gx8!^I516^kwDVZ-5P5;p+Rt1OuGareEhOE(F(3}HEKhc~usLzN_-vB?I z0BFVy*{8-Y>9@sJh5cL~vu7XR2>%+fov0!eT@o|^w(*3aT&(#4XrFaHJhJs^&U&Rq zd;XGokNxR6e4bVo3GWOq&}GV0L|WLL(XDIW`-k4?UZaB^7}X2{wvwU|&=SuDid$ zB9RwfaJy^v)~2@^j5JA+p3KvQgI@(P zE&p%(KmXl)6!}Z`!POnA13`!wRG2%YRC#)?F}1*Bis6mJ!$aAlmdwl+XJ;Ljed4dj*rYWSH&rPQ#*7LqXqah< zyR0Vt^Dw5|hUCXl>qIv$jy4l{541BZE5mD*h87Aq0UnMq^UeH={tW#foX@U@1bsN% zavzGSBmu}plKm3RAiI8n)ZD#jvF}zje4|?Ux|doS6zYf?+SYSb$_A8tfu`%XEYte> zQO=3|lp)!Zq%TiTxZzR4OV!i~W|WpN^(^-0YHSi?+rnEykOT}(&Iw9~&^PJO5luR4 zzt0;agt)#|Izv=V>1qWIjxCIiu112js=O_W^*VBbo3KXrs~l4!&T++q1T=XbOZ(!R z2>oWD6$DvX`4+~CtWY_4(>>0%eCZMsRJNueZFNrKLEfltx z@;D`%R*f$Qg&UcqUGV@_J>;aKWa8`~Rvi4={Xxg)dCQC22k_$?h}jxzf(U&6BJ_Ga)n8a^9yk z3-gJ^NjpB@OOj}}0BtcdhYyPMN_FDbBSu}=2A{}UU#cG~pJr-ZT7WSPQpDYRu}Pg* z`_QfwD7)3eRj%q+qUmSdzBH#FFIz@qzk1kOxX$LOoXlebMqGz`V@jgzQAhJAvRzA- z$~R~fY=k6TL(Mi~vra7=6t&FX`1bckaW=h4w{S>W$(qN+u-G~X;IW^H(VUvEgw>EZ z;oHD}Ps?-IE?Q(6mafrx^vcX5V5QViFT$LRKC8ee!z{N?+V(y3s9>Rl*t_@Ngv4BH4Ifv)CtUagdqm~kFs2t5xMgIT#_GLispPP zk#~Vh+Zg&7jWOOFCz!``2FF4|$H&{pO&L+%8M|i~-vLxqg1mOi9_SRRXA28?-vI|z zM((;#kJjI)#-Tfq2@+lA4kO2;AU^@uU1hOWqwfZNF8jDnsB*ikdxmcU*FQou^tBjx z*?r*wMm!9;%!6bp8b4pGNQeEyal_Zdy2n?2OA{n>vB68~_uSz&zZU#T2R_Yy(psF6 zAWcE)#qvOSkqEYz=DenS$YbUtmhSHq`$9;ji-;r4J{q}wS68>HZFT*WDr1NGK>I}7 zq;)XvK5l@jFxaP(aU!Vu13gQ#Q_nCyX;zy&dK2`8hO`=w)N(+sOoiflq59A|4aEF` z3kW@Jxv&mS;jf)S+by3b056xj!yXcic$YYIjBukq^yWV9O7^C##!I)yYrGu99+>4@ zs~^1!LVVzDeqH^~c-0R9L%c)Xarv<8(#i3)AxM~+U*YJaJ~r>|Tth)s@~2E%YOP95 zggQFZG3i~K(|U!l3)A({i|PwYw9Fe?eRa`}wgS61`#(Q*WOKJ;d% zp(=6X{M*GyEg!JFjbh-5IN2uMWTaG;3MXQ{GBLr*rkEqp$H84a27%izAd}wyB4vPu zS^tPJZ^Wo z>EqtS5B}nP<{A63No_U!2xJUH!H~g{N_-IuwigEa%}#!K!{z}8vJ1fj_UaFbs#~#* z`Wf3JCPa5%1!#uo{5C^o|301GKg0Z(A%5g9JCMCUM@ey;U5kDt1E0n?X)XBWn6h6Q zHmz?N|35m`<5?NVPAv&uO27TQ7zgbUMA>{L-=4f_P8WV|M<^@!IaF~qrXNzx z7+TteelzQ8{+F6{!DdSEB5oU>8aFuie!j+6564Ge*2+EPmZiURt}WC1O9U4RQZCAq zOtxLm^!vF#ox~1`D3vm7cuK*wFQuj<`>w z7OrofZn$Iq4k%_x|AxFmL!V;1t`Go8`sb|GKO-!F&R}@8)vB=9f`m|=Vk4sqFBm|Y zJVv@sL?dTAuRbFMz8~Vb;hN`U(2`}wsdYn6xqHx|6z|GObHjvv_mZ!q&yRdd`{%0m z<0W3V^M^lIKHGIyMV`TYh#JtU$omJYA`eXmGutLV?O8c!iZuS5B30@1jx&q*D#MAu zyYS3`o5WSF!%!u#qGqU!&JXNt|pJ?X0`N z!*MCP{?3G6D|`k9#7I2$44Wv(NEjRlp_h4vc2;vJxYi3D8wefht``F;?bFVZ zDCK_C;DcCLy5q7TXT{+&F?nFmCWSBzf8VG9O=5{=HS=QeM96HiqXa+g1NWgV7_5NH z))ZocLyMkIaQ2gabLUEzWN?`%%A&bzsNIY&Qak&^i$KWgZ3RiS0p2s?LpqR;8DFXI zmk`oQ9_l0J*p5I}yLvnyVVWB4X9lHKmL&%JD>40oS6>c2<>P#Tq1KMdnWp=}{Ddk# z0+S-BVV}YTR$^fW5tPVjr_*xA-U9dS(3~y5aaL7o%3Nn+s+?Uq4JY&u@)R}N96o7T zku@+UkeXPjv@m_xkG>r4BFEQIac_2XEzhgPhKcCiI+B%`BfC^p;}A4zFg*7WvQr;` zh{RYkr!(#k7-Ar_MVlfZYO}`q4tQky4wz3|=&=xBPm^K$ zX-Ch2{%H-^iCYIL6wGIh3N|;kEY=(7Wi1bKxG8I7WQ4Kwa)0*aW{|!x{8=`Ol^+b8*Fqpst=x+V%=|Qo0-`? zt^*;dyEiYmRql+#mleeOX1h0zZ{2E(%E3#e)m?8-XW7xRVxh@&>{!bBxn<7QBeAZM z2w%LomLW$iI9x3V0x51jpjpFD(J}U%D^_@=U1zWNmAxq>t$Foz>d{1>F@|=0aSc&T z=q1$wzLRKQ8`Q@>kKSNgM1=M&u;ybnQpl@?1O33AY^M|Jpv&Uo7*#80Q+<3b99+nx zcIs`$F*)JyfYmaAx^V54LC8M-ciBQjfDoWg1P~$YWdjtT1`yovMc2qkYl`Saw1x7; z%C*wPb#HhG4yp1Zj=afv6BB$`pS=D-!iky$8P8X|~=>uMZNP@~M*#B@Goq~&L$ ztf%|wtM1C^?G4R3KQ=iFx1!2n{Xr~jqV=;js8P0Jd9t*LMji#OTnc!zI^FaV8WF)! z^j6Haf)y@Knk1^VMKB;dKVX6E!`Lz;5|R@krl40AIns(0&(wR~u=_^Y@J6&qQ`T4# z0y6GT5=W+cgQYkuVhB_I1AbmvSNu~_Yx2dGuZG{(gX1>O9*H3NccKwS*g$8~+Dd`5 zM}<}4yTH14si$cu)A{W%w-M`ad<)57KYBZ#*YJ|Hpcl@BhQGGV$z%-DivWQi5z`EJ zNdsms%F(oW&ig^-J78QFH0|S^o4qQy*cDVNRk+1RO|Mkrs%GINg?mh|REPrB8pyj)I5Dy*iG(D3Tj-D^42K3H%tt>HTZQgs_ zz_9G5CC=<>l67zFcIsSfTNL(*f?FI@618-(=LV7G@6AG zM|-{=kc*IG&j-X%zRiu{fRN$PXSHF7dXpvvrP<@3D$dlxc}0C#_qKG#w-#JVcPAyQ z8)Fvho?WA{leo)--E#~r9gC~kncyyd_PLER_uE*zZVrv)mVL~8$TavwCLJ0HM#{wa zKBEKzBB~RQaIe>w6ViQ$CB>4A)!tw$w9$)YYP|QhCpmG}Bw-wZ*+chfLbf(~jgzJ) zys@_>d#}{9HApV?`=mqq2O~b-^a>TtZ+~%eCZizG+336v&h*eDZ4KQ*y191FmC}@m zj4TmIcO&T|mBE_mecz|FN-Yim7bZt{KbRrySznv+%jY4mSR0*PwhViw>kSJzm|TLS zI4jf8i+R;8`{j0VmlQgwT#^)Rjo)k z@}d}N2DXSoG9+uCj`3t2y&B%SzGRBYJqhUz*RO@N{6Yo@9$(bC6Z?n|PBVQHDovyKg!FW50eR zFm(WI{0%}CF1z_ma1v=tbREUOta<1O!WGABeI&3WeoNu`h{*dvLu`6&=voZzm+EaO z=UbLajT|&i!UynZJ>2|(n3M|t?WqV0ke0wq`6qP~x zZrTEqGHK~XB7H4&tXnC~1<2v)wbi1oFQII;Pel4Aar#bIoQ6y_Lae2qRX%z5!pP{I zcj(`7wi)(WzFLz-6GWA#I+fKXV3RPH^=d_WZ#E&y4X>75Ga8Q#Cqtq*d{Yj^muiX%^pnuK zmsGDgwLG*X^DC+xhg(Y6F1eb$WU~lA1wgIQ=@HZr2wU7dP%eR)4rvFJSttqShQ#_i zrMUB3WDAY`PF0jb@8t*KLo|4f968BNJB5sP$V8&>_EtQg`2-2 zNXU_9F+D_H7`$yaON{7hc<}6xo%e4U4ZM3_3AJbxi)iL{zo1u?cVHZxIWPH(&g$tg zOjwbxT3x5}LCY!q`+O>h0z;mQ-N*5o4z(pxCHeHYoAJ~O-mxchlXz7y^p|xH=A!su zY8_T>RGo+YJVm)>fF|B(q!>E{J@P}u_14Te4O!82pl)@SVd$Xz0JF>}J*5%#OSta< z^)MhNidqA2x&UW`6TxE6Cab;~m8LJ(`$K-S+U5*u)clAZWQN!tS(M5qustAwC+R97 z*&|HiTj3th|&7^{rMP^tt$IOv;;3DVlplqfD?y{o(9l~Xlks<0Ya;; zg&!k2843W6nal+xLbMbd7FsE!n!b`*l^nNJpr#ocl|;P64V}!p0XDof@bNnUA$Vc! z*f%e<8#d<$`aCvrDD|+A{GhtHJa4%BR!D*dVPnOlPn%e8+fsV} zf)XB7oiS?CyT`^|IIfpSw5FrECx<`!X3lPD@g}wn4!Jg;%;sy!gz}8^J@d=%pJmPD z{3LDBut|fsM_lMmGc>&hL?%Uk$Ez#{WXy#^-vNs(AR@ycJ^dg(=_~te@2}2G9L`7B zY~0xIp1gP2TC3$yBdsVioLV54r4W13YH7mx!#oL&oS zDL-hW4j!^-6W{SQJ>X}qlgcZhsqCkz-6nJyk|-dER%V2>u@CP_(_Eh8pL>qs>`>`D zyq-Z1V!HFKs%dh0(JsG&rUy=?z@Bbfq0|K1bCl~I4Hx6JAAS@m!M==99+&exzgQICuq zT1@q_M`4>8dG?SljeG}ufE<5r#xSinYBwfUV&tZq)w7r!62#_!<6?I(3`r1!a3>DA zF*JXxM|p=>GX!+ng|2dz zD}H1yK8VWvm^lm1CcP;w`Gw0|NnAw?=-)a+VZ+%=Be-Y^}6)vpJOko5p_`j-;fbxk2LRHqsSRO64@qGMd z4-;Tx(=nmpJBuEhR~+kONyR{9Uy&jEl%Wn^ov(w zm0A?{<^%2K*J5pZUI_MhO-~nScg+++aWhGEjR_rg(HzShk93VJudc4o85z>-Zy-vF zXk^#aCb!rdx{n-6D;Z(kcG5nT@`qN4iFxt1f7{oRsk)Iqs}LZv&1BNU5h9^)?6hhL zALM_sxdRIzcw9~eLV`rMZu7$xMeS&+_5vl50l#I zpQl|vp-6RVyfOcuE^eZr#?ClXQkc6_ubAPMbu|u^P zeTn$(h3CX9(PmmjfM-ZaxpOW~Z< zE{k^Wmlu~6Q#RElU~q*}6UUddc3htAJc{U^9C)i}JyYgX)xyDVhH)q$MBggDl_tgl zbN$h3jqd|Q^ST;UjPcnmj)`zwOb=`6w#8iSOSl)cfE7%d)7<#{gqhg>yKKqo|XEHu_(EU9LYC`vv$~5dh|`;64yWN=BQm2}$m)i>@MX z+5*#5Mm#wIR=or>6{uD~CR4rS4KPwomE{Pq7^b*ID1@rS2q@>x0Z{-z^qOV#~){BuJVW#_Y8UOBH~9-VzNO>LCy= zm(ktvc7u3cbi!u{1GV+hKCae@c zVAFqf_~F?fZB_qwGAMG}sIICO4_OlO?H1F&hK5}h&DKRX;P7OWaI4%JxqWU-O8&aB zkG{s4coTVGSfijvHr5**b{4-KfsG8y`dT7ZoLNx-n?()J3{ygU(-IOYAsOQ_;{5gf z&(Q_l3_Dm(NyD~L*s<^3g*dUNVK&-z_iRX#s`AJTh*In&5aXS~m2svjAjIy=q5SHF z`F}$hx>+B~iE6%3LyY*+#V~DwjU%cTQ$h+>hPY@!Tjm^X{UDYyf&9~B+LJb88^#Bjlp7zz<4~JIMPV}%@s^l)3|g&uw{FH_EcH@e*3CxsTI#2nT$8R zQ07Q4Py@9qH#+b@kgHY6Tr{{wZ`2*B&Cfqf0*{;?+pPiC-+k~e2w11!dgaZnz2hs! zT8TA}PW@88fT8%oW6RVBOO0NdO)vSP&Q@44V&as`?22O2;mtH!X1G#}Qb?0X&0UAu za)=Op>03R#hEjHWdlkQzGAeQQqw1d zOSwd^Z_)Hy;P#s2kNdPxjBIK_?)P?Vhoi<&!j;AV&Wb^ml8h-#2V)B($$6jg_!_yi zK-Lv2*_;L3v}!>ms|+Hph(C-cd&J-n#gL{Qu>=MuX}@KSA@nV5EX;vE+Ic;aBi(ux zU(X4&$18ZKbMH`!MbF0qmZvm)i|+vMhtM02Sy2j1k3KiF8rfIvoR-KP^%{m>Saxm2`7^XuQ?iu!d4NEzvWK>20S-LvwDz zc%vHu3}IL=+@@t43%*Yp4UaW?EmgSqCedTX@k8OM%#KAZup7rdli3nwRzqy)U09%{ z6erTWxR3QSWQdnkP_78$qK(#j%#;IZUQ zRLh(oeaeGx*JJPeknYRdG*p!vM=_BX?2!|W!&i?ynj{w&m)c{GwBS=pjOxp9dR?>T zTa@8LF@;NH&gKi0o;ZWNPBfH6vonI15&d%mT<@X_q>E7_(7_`{=;%D?NLp#tA2UcY z@3Qv41C&%rMS`c8zia2MhESb!iPhL6a$Y`vN<}*e5i1Cw4IU$0tu1uj^lUs zPRi(nif$wHOJN2@nN^?Zp<9w|`P7DkKOw1X9}nKG+R4yW@?PoKo-75WZWP-;G) zDHx7D$PeHFBid3H$r!5CW|5koTdgsebB&+Yo?wMYcwsaJN~I-DOP0oQQ@Nyh$wqGA zaR65&eX_WT7+*pb{G*1mbAnk!hxhRv2I z#ELw2-%CE~3uo*?;A@mnhAD;KBPlL%5kAPuQ4EL%T1f%dz^}!vH5$&?8hi>EPyTiH zN#d-L1w2ih?&>LJ@!2nFlgABx6!X{4wXEPXVh$1jw`fNr0> zwo#vpxB>5xOj9nJwBtE;nk=G$htBkP@*#J4Q+d<+Twjg0Xn&uzTAd||T0O`(Yw)e& zT6_H!>1>HH2Tp;h`Tig13) zXC)?lmg)|fPVZ9sxjl2>g*xr!DhR1lReaA6yIj^Ws+wz8Y0<8jSEbYg*l{*a!pQgE zL?^eHD_b6HxKWhVIWvBVB{#5S5SZ_hXL)!?jza*kyjS+x2jN^Cf1HWk z!rrN0<q)Y{sq*=nq0L&QBt&$gg zPcoPxZ)1BLXV)iP@;9>KZ+Pubn1dd6x)Nl#2NlZI#8m?Y@=mb>8SbT?X2mW1TZakt z4?9eXy=40v(EnDo4gaj#ER~F2gQ_;o9R5(>s8|nA&q>8NP;&14bNDrrIj~NxG2*qp zYnf__T_ypfQSUJ0Y87Ru5-n$bQf)-e1T0vIO_?^Z9)AL~516v!a{vt=)h=uIJ#OZ` zjOg*l5zC-UF22jO$W>HO`dm|Z<#Uknb8PfOjy<;S6WsU{Iq{9Kq7YKKz6D?Q;&(va zw%50`z6EbV5WiV`;Av9v9YjgeGW<6u?>!FuX7c~+=OC`sSSI&Uh_FNDv5)A-7Yw9! z8qg7wC6>~-;Vv|o;i`jdNOGTklW}RTQ8{Ln!ab{o0mIbBw=nlW9Hwm$hw1pZ*5nrv z*RQ}mh>la{-R7BdL8h>;Ku`?AqdJEL@Zlg@=k}J7X*?2quf(*r$wzHjOs<%$(84*} zl)bX8sXZ!@waYfXZMvO|wiB7Ynif+}Y0>b3YZBbHro`hA(WY(`T(zcP+y9js1QM?P z0_%UH8u+m(=e6stOZs^6por&gVUOWllK5q!2ySwzSAfOUB>Wb2NgB%~LQ4<|gho$0 zu*^kx7!jK>R+t@6>e#w+qEBQN=~ZKooE6SC0KOIw8v+ zGe2ahSQ-z7M-yRKIuWB(V)(Ues24evAub5q79v$B7t;%ut7VA3!Bf5lNpTMxQy|Q- z*sx5`PKAeYCM3NCwMo5!5JN9jlQ^N*DD?fAASUwu9MMvgQ-dX^(QDN-GMp?*cxFsN z=!FH53O|CtNnl{2t4kRZ)G^_7%LZMG4xhig>sj4JehphrCu;D4`e6FLWru22!>>!P!p9(S@H0Z(x~C_7j5K-UZvq)YRTwas{vWJte#z6C4qH~ z?GTI{n@#LJCZN9rkE;Fte8#h6M`*a@Q%$$-_FYJZa*zkXcfc6?V@2JQGH7nL?A*xN zaS2X8J9lH-O025C+q6v*sKSY-SE#VY$9~sAv7F0n)Apez+9YXnMjjPB3@7LmolCjw zDrG02dyB!9W_3B0HgfdsxdD}b1#IT%de=N%t!}R+to-tnhUpHXNy9)Z57nh`8cqE| zn}w}fNv%7>OoCYFX6e+(gf0nm$5`NGRtMMl@u?9Zr~UqO`JHADfIS z;My#G`gF+>!!_1u(%t>IuN-I+)F0>%fZ;kX*Y(tyLQK0-9Ikaz0dR15iv|Rz}P%T0B z$OmZ%#Pt4WS39xZai147T>Z0b2PRwYinAp=WSw6bvL%&dcC}`UXPZLG`h2CcLP;R| zDDPaZl+Y8!he(!d8$aGCXHP{cVIEk**%OnEjMmQD%5YIstNsQ#=im==%!wj-lsUEu*r<(lkTg#F%A(u;P^GrR*wu?H-#yL;_h%cR#d7ifj1iaQ~4!LJo`p6(N6- zO3<0}T5)Q=xrO$tc}L4)JSf;Op>1~2noIL(Z6vF>G)NSc9O{Vq+INvx;O+xNB(bv3 z;ed@I&vTDsj;+7nm&P);KQj&=U@QDtH8X{#dl$Pn1l@qRnFPtYQxjvf1g!3y5|1fVxkwFJNyNx?g=)kr z)Xb+hYjX&leO=ZX*W|{*T@Uw^6+u0RhtU0pO}KA?$7g0lnqEuo>5R6D7OSvh zGuyisWwzWtv#;F|X;tm?<%*Tc9Ef9V5Liq4d^t!cdoqIwx1S)FGyIDT7}?X(eN)4O zEQ05QU$a)QZ|~Q^ys?oX;=s_eJUh(`vi)JQQ0VX-@Od_jlNW}jZvu@G$rX6Xv4GW1 zrA5zx)ZLsbZu%v9*dLvw3In>sCOnWI?q$6wea6ygd6v3ha^F@}xc`p%7R^gN7<8aM zS0>74R2KpRG1Xb4|G8x0Q<%ypF6^nm$*<-P_S)TS6yk}YrW?a*pUz-!z>&59skV9) zOP~EAKC{8UCkGDkY-lcgJIUr;I9TKkR;kmQQ?bppDMcZHQyKDt4-pfABAl>)b&=|d z&>Fdd5!6`CgLlQ(tB-HI27Q8lX5Wnyi6*hrf4Ydc^tDzgGBkVemYeQXW>6ph>}ccO zJVy-+>lW@Skz)xd>GaG{M0+AZuPc#pfpKz-HQ;VY6?EJiil}IIv)Mxby7bpaXl|d} zks;vgoq-J^D;&N{ZdpL3I33sh<-UP zPL|Xx%?fjoeeARCp`=Y?0Ib{xs~A1BX^b1yhJ8;V^`8G1GM@~Z|A_n7>iI_vG5#VU zA9OU$*{SD#%9qUF!uxT+r;_{A_z0Xx$`@qxHbyt9U#jX_k zhuuOrNlF>#LHQ_4fb#2PKtgK5Rs(P_lIp_e5m=14@ZwQpfmXijD^_Hn?TTn(=Bbx@ zywNp!Nq>)h1>4PSS|t1fo^bQuxD%l5X`dymLL73qKGb0=W1TArI(ts~=OyAe_1aE@ zscqUF7%qx&(Nyv}l_ivKf@l!AMOqXPV`IW&S_ll>)g6@U3ChMy^!^6Ir|~=g_qU~g zar;XLV_9!*&WK9$F8Z`CrwLIh!-c%viLipprWDnPb21Y6nb_pxfJ5p>4^%rLyfQ3) z1PI$u2-{#EK>Y>)O&1aE5te9Vc+e~`?Jn(#-LZ~hOzFfxi2e~&y>l>4U> z@P8XzR$2RK5~i}>$>u@SHD%S*ww|=k9MIuDNFUu1a2T-d6;Q3B6<{qNi~jzbfa5-w zWLKboJ^Nxooa{Tqft_LzqVwVTCnz?tU_a8Sn zB}#v(%Kjd!;1}?BJYwt;`WOv;l>MZrt5*FF)-5?ZOvzYQnK*qp8Ki2`XfDnq-tuO9J~uO?pF6h%RsHd%4m<0PF|u)}v_W#jtL_m29` z{#O3~Qmp^uH2w=uQ2yFE1O*HKP8Iu2fASY9MryA=^%sf5s-xV`JW)4#><)g))C6UB z6??Z?re44iUwb;B8p8L=H>GE6_eV8}Vk+&Fda5nliPX*;Fekbi6*}??;+NXG;Oud@oed*ziLb(@aa~v>@U;+S2nDd&!ghm7j!<-aOD1tYUtmqnb3u3B?IhZ zUDLQ}e$oG&~MTbG3ko`zNCCQnXGR~kxiC(DXtxMXSHemaV72Y_MsjtHR{K$U?I zI76cMBby*9)EyhZ4{+96r18nRuxKcW}cyrtS@B|yi!z+)w)o-LR9%P-0n2#gP zDwC+|@PRR5)KM$1RdT9y2Ulo1?F_L>-_ohQ{lZWyoF^(bhCR@l)ZP0>;nLNse-MmE z!NBv2_+FylEwg+060421T{}e!NeI+!H|4LNka|_VCZI*>*y_8CjU1CawHF>t z{(d497y2sxeztOJZQLoF@G#jQ6&Dl4|Hn?l9BAdx6+WsX$WK4}=rn-bxDXc|CJ|m5T7)Ah6$wFVkSUV0A z!P40l-WucfxVnA^K<+dtS?jvBdWdpfczb^w09gQ5ZL8=1B_I9Zf1+&u#~;EVPRsO4 z#&+)jC!!!4&Tc6C?95EMBw4n7Bs5`#v(u7?#bS>iVXT5ldGVIvW1PI7vj@Hdj{Hxr z7d&D?Ang?#$Uwz{s+u6~*4=_v90Zfy2N8b<)T1G(KfD7uSH*U+M81^7C*N4f*!nFd z>9!6OAd^Cem#$UPA$M}(B>Yz#`sHl#e*O3Wrv1^>rG&+{OcP*|IDrKnWj2R>;zo*C zN*E4P3dOaXO`!anp0`X=L^6l{Wa+|7q`zM0Ajh9F_O$cfF5~oSX82f7V;|q_4O)35 zya>p}*lEF0v~0 z4!_k^2-oi0jX`}bnyY#XDKCskWfOP3Ac>=;Bnx*On^Cmytl4$9qN#&8a&Pfk3@-80 zR}EzmT>4Jks%YI}S>H0c$(=%jGjO07!lG|k3z|8m^(r&>aBcT$m?IY`EzzNbPO!-0 z-C_ePIam7U?x+k&)^A&-%4UXo%CpRuU9+Y`{!<;MD5M3I4#Abd72f1CC9t=}7|m1b zIk(I0NJY_Qx~*jBgWIlUvOK{%FrxawmK1}S&@h$3eO*`rVDpfcM-oqj@2HV*)mk`$ zVyMQfPUliwKT(E(&w-oJUCEFVb;yl~!m!~qT(BB0Vb$$#m#Nl4%;lT)86gV@D+n5HOwNtkt@op@y;YA)zT&dR*sg!I1KyJJS_fspw*alCR>M?+BbRYA zEyX{OlMWdsX{iNU(ZMN_GQfjX?-?6eD?X|$vQ%YYi@Yf*GEmrynn}F0OCA>p40W=( zbwA&K6XCKH6u|flFqgKzZR;!hGHR)x`Ne#vX-qU}m7adYRkJyfdJ0`0$Mb*#Gk4Gk zIC}v>n$*8_B>V=ais3YD7utN71k#94zFJNkABX$X+O`m~6fSPou%$LJ>!hl0JPsTT zp@S_WgpDCRcg#N3CtY%SkxBm`iolo@Is~`Xjr7SqcoRE@!&Q|U=c2*rrO~`>qZwn& zu}iZ-(>}oN3$ZJI#71S1ZNb17J(4ff%`M0ibX@pRiG=U+gSY$N^}cZ;*rA^3x8>0F zVfWx?#l(RgGsPAQ+`zjoZ*=%PbE)Q;*gwf#q7ccIG^p00C5N5VWcitAB8^5E&<}Xo z4;K0nHcZpULt40y{E2WRRhYyqwG6B< zbntzmjhLsrILD*2*nGW*jUeuD1GUluO`pNRa{7+B5>>a-Avkp?yd4@*1-kRh<`ieB zOuqLbZz21w?nwA>GEQ0Pk$i4chnvaJ^0bRVIZM&`Y4;nkTvMn8{m^>R8MWF__)Pug z{H^^2?%jr{fdnpedw6^vQd5yp>_7-x4U^*DUYFc3g7DrgD`e+B(G!9LUaK`{rqGp5ey3Lt#3!OH9=R|60=ts6MKE6eo3|N_50V5l|N%FOW8V)TrX92yM#9 zM7c+c{Vcvv<@E;aQAFT!d@RLwf?Li#W-+uVzD4hjc%kV7+l(we_*9@qx!Eo?wgK%# z*K!SP(*9JJA+2q6SLITZRuz{GVMdP+5iDts)qaUZg~0p0!Q>c98sx7u@g)|9n6UDw zCVO1(C!tQegRs$(UB=7S+gh6a!uM+$66{hr0tVFRNK;kzrxs#uBGIg{VPj0g{a|BP*)cRzExjjQOzB-#{ zsfprXstq5WA6jfLt`!ERMV#jIl>uILm%^98{yShVO#Pt*I2*KL;!QK`HQ)YW;l1*I6@lRscK7@7HXX90?>1P6GogA+zBpL z8$jZj^ADi=<3CNoQ4=GdzHZ)hmN}SpBc9C(Kc9Mp%Mk74 zYt6yrpyhWp4YrQqp$*w)SaRL9J84I||c8tjy z+JIbvYmVLADNFO&lF7;qTpQNq{uF0}^$N8LNH){9e>e{cxV&~WKs6A0tD2@UfA`== z0WN1YlaUs-)k96iQ_Oe%f^(oA=pyhpvvj~<)Jsss{4786ypvf4eKI;rpKS3h~HeVdb4@&ShU4~nc zRef8^dQ$&8pgQj_Na??ZqYXZ8eRlhiqKTK6{+{)Hs{NO<^5w-F9u-irvu}Lvjm=a+ zAM$C~<74dsq8K3+cXYUuSqk$d5k z`d*>60p$5;kU)p{KYyS7^e~!;^pXYlg#T)C-TiW&~*1J{hUstx;TDb7-TfE|r zTz(lO?Z~-E(CF#>96fmS-+u>GE>01;TCInF1E@3`HY(UFeFY%6%fQ8csG|Ih=9a^ z&SuFOtiJL3P4&M@t`D2|M}@_FWd4brU4~W5m8o)U&bU3WBTC`E)r5s2*ry7G6QjEa zZ8IW32R887MTFJL+dkQw%UjE95nicHtud&?D@Wad-?wAJ2uZC{J{DVUZ;qJrDg@E( zYD|EbDT0qcDtM&MbatIDOfo((*TYs5Hu}9!MIRmFGJ#$d20Mxj;(%rr7Hq%Rwe$EE z(hL!-FkB$R*~`Yl3e z1u9_&a}>e{^BhoNpqB3kMqL(p+-zXFS38|Il^d)#o1m3A)L}z z=vEi>eRWX(`3mo6O$b9_9Ceyh!{ovi#Ut}bEA3h;+HG{SWkq#D_k7U5z$_3N4nSTltf~nw<5o5TT=zd`8jpRlW5Hd*tHP zgkl`Jk<$PJvUWjCh)dCER7+HLbVTcQu?!{`qruk-Gl{Cf0d?P%YcO9=LS`J~5CJb{ zo3&_&g1%T|?}~=Wa?e$6Qu2T!c8s~{4wgvZ|>P5=H9#s7*oc!6kOhws2x(CT7q z4yFV&^u^l*jdMud&h}PC{I3<)dEK8y&_<{7NA0 ziT`KK!n?3IU5&sm^1Y~`a55W4;IS05dSG@_kYE8hv_zbNHfzG${VjiOZxaObJCJ5_ zEg!|BHui<<7UAT${^=2@I`#)04;19`n)iO6Ay@Dn7&>0ItM?Zd5aRd1->FLd3a!Cw zM@4Qzf^5b7y%G3!3=fI^kNzRwlSv-V+pxuNIWzG~gSj`_b%63ebXI)R2Sm@npOxfS zb9_WW-ZufMUdSd-Mn>z8H%5F&WxwiRL2%xmrDoSl!c3Iy%j@l@L&>y>n!3mh$QWZY zpMTsZ;=rjB>(Nqm`wqx{wTFoCA8r5sFWsX5g-`rfsh236AYoX&B%Mk%+tl&j9L!KE z^si)M$-8jQG`6rFFJ;-RB&b@>X9ggQ;~;hdPa*^vnbA??E6@(TuST;#cD~o4Ptx4# zRUmH^R=?+hCHgDoAC|cPzdUWzRLZ;nZMsllca`QDIs<~_wyk7iC?tvXxE<9UXtg=Y zMJY5=pmqYqw|&J(84<&_kO&%z{+-U~pE`c6mHuf+uOA5e|Gfe-{IT$_T{)Rwc)xkz ztp4FsM)Th&{l6Tg|62&wKQ(^;wGVX+v*CkYXqh`4eN5n$U*vfW+w8MYAp2v~JI2?Yy$uUPJR8Aj(7h zZg{F9x_@*;p_a}@x;p`n;$xi5BNrh~_WBE}TbOI_?Iz8<>$=D%Om8I#ut%Ksa~u3v z>8oK1T6f?SE_IEjZ|yiqU!iMt&Y?elI>Nwg9R~1 ziT5H1C`o9??%%!~$+W3yvNH1OvpmRdBXl@Qjj&jKeFv=*^Sr9(WR+e$O}s(1mW%o^ z?-Sz<#*HX3IC^)Y+#W3n_pbTNxb7nqmxA#fyQF1xU~1j4!EQ)zaB1hH(78CbT$g>1 zg>)AtMx)TQsb_DVV52*H{Ve1A1MPfzto%j+xASi?bTlGM{*CtjcCS7?z9Q0IXuWMK zE@7Ud?$*OJ{`3}YBSMbD45g~}>`fGsoljo)LT{L;T>JH*HrqYx;>Z}bp_8nM-V$t4 zvRb!tEZVsMyK>)Z11@VZ%PV{%C@A$5j@JTgOvml%Y%(Rd_-&q*%O712{XyWy!mYxB zDPd|#zo50oRyVHQ+;yAX3^zMh`UxeZ{=MR!AF6-x4P*UQk+w=7iG?xARAMZui4HVb z9i^4?MmJ_cg57kZ_%iOKth#6V$TAhm(EwW?FSSCG{2J#-l~V^2)JZoy16T4uqgt?N zs{qZXRf@R?t7uj;E;k7%n7^Z|5x(zv|0i7r7h<;T&aihDjzHg*`8 zj9qw$U&m+=_i7dDnuxC6h3_%#`RAf-wngr)eyI48wGG78W3WF@qfWUyxUP!#PF2Ta z+1JoAf}91nY1Dw`k=MczEod|>bd*mVhx4l++R<5TG#@8%VM3n>LRGCI#YJmaDt3V_SU2a3 zjCJak7CK`y$N{coSX0}+)Z@)Why(p7;0OyRyg{+g=s0y$F%gT+UzzQWqF~x)JLT)(#ffW zY9>1ospH6cJB1txeYozGHsZ&FG*)(JJ5j;TK5$WRH^ful0W!(3TC4cY)=PF!Tqmob}Epw5wp9up*K6})AAAusv*^qw{QP7_=~@@I{+Ar&r@WKq=GCJRwCEf)>MO5nnNkN!WHxN4^M~6OIDJnm+g`8b zlhGAT_#ezTB?iFy@7mVLQe@;1M-RUb6W&RzxJQQ`JMzA=%)ECJed4icA&y`6tH4w% z@QHAq?SX7@Ow(3nxaL=Kc?_ptjcfDvedrNb3sLyDw^)fDLyuVo{dhO%E))yTAMrUA zMoarG9gFMDxiJ9dEW3Wn9MR;j5*S03js+WJ%Jz$@Rg}x*%bdyBDzu<*78`)A@4{rH z)<1;GT;g>7&Pn(`d5-oLUyWIEv+|PagesdLqOqsRub;V7Mn`MHRH-NJqL)12`M2Og2t?5X*(@Yh2 zk@goMP0!oK28EfBD)+&K#?K5v;FjGPgh6uQM8W3JC|Y((%XJt`4l+vx=8S)CF4&sG z7NH`%T83H<7+3iW)%TnV`%DVc&R4VM5o64*M^z5hDUY~jL}d(>ygXpuDSMhm26xo( zoF0!Y@Vq-k>lZwkcxOD}g~`+=y0+T?nsdWV9qlHIqf2(97R6M`9Y3kq_;%3Nt2F9U zA;zSiWbaTI6%$|LG&Obl)6r`)%h{OF(CAzc>_fNMVGVz;VrHNe#A(I45E^7-ttNM& znMpkx@PO81bp%lFyB%;|*!>or@!+W$fg`uw%wOJYV5uokO|Fm}L7{n*$*<7BA2-2&W1_h3b?Y>N zMe*es-oVvc`pL|0lY9!-h1~nnaCZQmy)rRQ6KZROEq|<6ADDB&RbHiA`Vs(|bdvk1 zuW+azYA9B<--hS8fUFg*h}#zgN&byD4qX+h3937TOk5N9*-}yefy@FlZMOH_1T#UZ zt`}PsfUNVv5PcVf5EYF&J?DHKUEXNGspToX54?UNB1GaoUERSPC$_U9 zB1gBM5R3W}AQ`Jf5D*5UF1#EjR)Uo4O6bgJds@Cq(?&~#GQS@b&0eyW>N`8;SLWRm zWYTM^v3c%dV??QknUXjSD;&YzzT+T6+XNp0f#iNQ>k(AA)T`~h?S$8>4P0V3nM z!n@TeQL}*3SNSJ#lYHml&D<&PXY+U>pEX;IR2%6$GB8A9vgcIOoJ70ykO_09l|#MQ zoKh%Var{C(UH*M2j?F^Sj&kk_B?703&z><=O+|MG&@HbBME34b6zrZX_$*w5?2XAG zu3F2|r$$1{Crtj<6^*{)jaC%8Y2@yY?+mL}rFL#(qwsv5%}29aTT)S^sD|qm{c%oG z9d}DD6Pmo8nr9Zw zOHF$=Eo)~8+a-ThUH*F&GkCbPWx>Y=Z#m(}KhZx`YL`%@WWX6*Z74>cM96(3Z+5Sz zBYU!0C?KZReWLzq%0h)rbIIw^mGqL75`}b7v69hXh`iybQU9y^YNCgxL}HJG$5PTD zPo>wZBeX?)bXgxRz+$(L!w%JbY2wR|h;J|J!+TC}uA5#AMiOkSIqtIFG+e=II z^r)1pS}h1*x)SnZqij*p?z%JjqRusAUB@m<%0?U)*B$>}JgIZzk@V)osnB>5&>1MQ zCY`+Z_7J#XxAM{7m|FUyq5iRSQ=YOwiQ=(VCXWsEMa7RD{H(eF&jYM&w2Wr@OhgI_ zD?Kv(e5G(oAlhK1r9apX^Xthsd7Zd5*zqNr{6&dhPRM3iSa+N1?J@ zGflUBvePo%s&**qJ$c`1Y%Qs!e%@0>u!`@{x2A%Fv90~_sCoeP&+Iyd2&%ldV~OE2 z{8AesIax#C!aPBD{GfRDz>?)Y$q#t~8~!f4u&c%4H-F4u(SLOb)~pTvfU%~<97mSL z^Lxu2AnCBadJkps3KlFUm&7xeR5Y^>@Kg-m^d{;>JsQ6jf|hG>X9)X}Y%c<17-i_* zqE#~dWKYEE7TpFqj>*t+(QIovW&Fpok$#~n)2_vTw$p!VZ` zD5YTlL)XYra-3LHwx0w_0ZJlV$ z05MEC z2P~5DhC{x3yi~On;6a8y4i6HHw9DfrMG(`f10H7CW+B<{9(>=+4<}4^Lx?siKDP@Z zxIS&k5}zn`SNf}}cXdI%Zd$~6k07HIQxCuH6_|eqPQxAU@7U%cS#-RaH)lGm;)_F; zC=?{+rxsHwa2SqhbUG2@-Z<`1B*NI4FjDaovA$5Ky3twg2n8r#8%(GT=Qi6SexLCo zdaZ4$%%i~I9ua>!k-mcI9UI{>s;wa?Z=BCfo+Q%1gKiUyjWCR3G0!zo@SE*|YnH#d zub*mD!q|UW1o8!d!~o6Ww!tLhw~Yg)GNuN%jSdi85_AH6W|Tvv`D@!>x5fo*-wQp@ zJ7e9DE#Nd=uXOTQ`w1@tOa_+u!yMA@>H{nxVV-=WjA6&77W|IF6SC?ET|>O)-LJ3~ z;Td#~yalG^hOdBeK8yzWr^E?X&Plpx}?P4eKak zT|umsIAt;*PdOivdDfW!;qpDcFgBq8ZlCsrt5c<@9SRXL8{NUgu-#;?Vwpt=SPpL~ zX_~8xo3WJFC;AF=e8VJMVEtr`xOc!6ML~k~LF+>W9T%@`>*sAVNW|7)N%00GF0iHE z(Phg~Ske=+Zc5Lt;VqJkLM>|SXFRmtqXLV)(A{rft>p5jb2*$>(>8dKM5+IE+`GG( zv3?ySu#Y?ewHlH%&8y9Kp$G7N^<5!h5@Yzu4Ydcq*=b`~{bdd^o=&PPnCXH!x9Ot6 z95|$@yt7rL{R$u}xJ8KH><)68f>;s}xc8c#>%(65omQdyU$ox)a1;COc-VIw{`$W* zCQ1FBjJ~^^@xK^N{yxGqE6x3If+M!C%MNddnG2@(Ump2V=3zKt1ht(q;yB5EA?D|~ zX{sqg$nma;q$+Y*J{yfn)JHP)A507ZpKBb6x$hOJ-kdw@9*ITSUwn35H-Hb(+O(Z^qW#rGCRADKb}3Dj8l!E4Igrq@F0@#F_KpvU0lObWk}jbCV+yJae&c~x+U5~2#S67PlZ!ba zEG%iWK{=O3t08{ZH;nJ^3vih=sJz(`aveBRkvKLS!p6~?7@bz7UgGrvZ>w|DA*VbO zMJEbew0^cX;4nWloqpdTn%HkYU0sO57!; z%pIgudA^KKj>17G)|K_>l=$>)Yz%}Ew9auIEE=^Hr@RlyFkxnx;*c?w-O#bS!+tis zH+$mZ(rRqy`RM38Y3WrrN1{=!+NaS3pv-;{mswqPo?1R{0j!6k>=(n!2kj;$V5l35 z>YDZ`b2kE923rT-&B}!A3$PtLhxvTFbp)Nhe18-Glu5|Md@y1$bxlKMImC AA^-pY diff --git a/docs/disclosure-log.md b/docs/disclosure-log.md deleted file mode 100644 index 4a5df43072..0000000000 --- a/docs/disclosure-log.md +++ /dev/null @@ -1,358 +0,0 @@ -# Disclosure Log for IBC Protocols - -This document is a record of all the bugs or issues we uncovered while specifying & formally verifying the IBC protocols. - - -### 1. ICS3 liveness problem due to ICS018 relayer algorithm - -The algorithm for relaying connection handshake datagrams of type `ConnOpenTry`does not handle the situation when both chains are in state `INIT`. -The current relayer algorithm in [ICS018](https://github.com/cosmos/ibc/tree/19f519b2d6829e3096d6b9f79bffb7836033e79c/spec/relayer/ics-018-relayer-algorithms) specifies that the `ConnOpenTry` datagram should be relayed only if one of the chains is in state `INIT` and the other chain is uninitialized (see the snippet below); this is not enough for guaranteeing liveness of the connection handshake protocol (ICS04). - -``` - if (localEnd.state === INIT && remoteEnd === null) -``` - -The correct code should include both the cases when a single chain is in state `INIT`, as well as the case when both chains are in state `INIT`, as specified here: [Relayer.tla](https://github.com/informalsystems/ibc-rs/blob/e1b78946529e39a5c709ccd6d11637993073164e/docs/spec/relayer/Relayer.tla#L174) -This fix only concerns the relayer algorithm ICS018. - -##### Channel handshake (ICS4) liveness problem - -The same issue (and fix) seems to exist for the channel handshake datagrams. - - -### 2. ICS3 liveness problem due to `UpdateClient` semantics - -This problem is not specific to the connection handshake protocol (ICS3) itself, but is a bug in the way the relayers use the `UpdateClient` action. -We classify this under ICS3, however, since this is the context where we discovered the problem. -The TLA+ spec we have for depicting this liveness problem is for the ICS3 protocol. - -##### Problem statement - -Related issues: [#71](https://github.com/informalsystems/ibc-rs/issues/71) and [#61](https://github.com/informalsystems/ibc-rs/issues/61). -The problem is more thoroughly described in #61, but for the sake of completenes we restated it here in a compact form. - -The liveness property that a correct relayer should provide is eventual delivery. -Assuming some source chain `A`, destination chain `B`, and an IBC item `X` (e.g., connection, channel, or packet) on chain `A`, we can define this property as follows: - -> For any IBC item `X` on chain `A` destined for chain `B`, eventually, a correct relayer will submit item `X` to chain `B`. - -This is difficult to guarantee in practice, however. -Intuitively, the difficulty arises because of a combination of two factors: - -1. __Proof requirement:__ For chain `B` to accept item `X`, it must verify the authenticity of this item; this is done via the light client that chain `B` maintains. -Given an item `X` and a commitment proof for `X` constructed at height `h-1`, the light client requires the consensus state at height `h` that includes that commitment root required for verification. - -2. __Concurrency:__ Different relayers may update the same light client. -Suppose a relayer `r1` wants to submit a consensus state at height `h`. -In the meantime, however, another relayer `r2` may update this same light client to height `h'`. -Assume `h'` is bigger than `h`. -If the light client disallows updates with heights smaller than the current height `h'` then `r1`'s update fails . -Consequently, the relayer will be unable to submit consensus state at height `h`. - -To ensure eventual delivery, relayer `r1` would need to retry submitting item `X`, that is: resubmit the consensus state at a larger height (e.g., at `h'`) followed by the message that includes the proof for `X` (e.g., at `h'-1`). -This retry mechanism was adoped as a solution for the [current relayer implementation](https://github.com/informalsystems/ibc-rs/blob/master/docs/architecture/adr-002-ibc-relayer.md#ibc-client-consensus-state-vs-relayer-light-client-states-vs-chain-states). -Note that it is also possible for relayer `r2` to have submitted the same item `X` successfully; in this case, the liveness problem does not actually surface. - - -##### TLA+ trace - -> Note that the TLA+ spec below may change in time. Here we refer to the spec as [existing at this commit](https://github.com/informalsystems/ibc-rs/tree/788c36be9e14725c542bd586b4fe4593edb3ca80/docs/spec/connection-handshake/L2-tla) (unchanged up to [release 0.0.2](https://github.com/informalsystems/ibc-rs/releases/tag/v0.0.2)). - -To obtain an execution in TLA+ that depicts the above liveness problem, it is sufficient to enable the `Concurrency` flag in the L2 default TLA+ spec for ICS3. -This spec is located in [spec/connection-handshake/L2-tla/](spec/connection-handshake/L2-tla/). -In this spec we make a few simplifications compared to the real system, most importantly: to verify an item at height `h`, a light client can use the consensus state at the same height `h` (no need for smaller height `h-1`). -Below we summarize the parameters as well as the sequence of actions that lead to the liveness problem. - -###### Parameters: - -- `MaxBufLen <- 2` -- `MaxHeight <- 8` -- `Concurrency <- TRUE` -- Behavior spec: Temporal formula `Spec` -- Check for `Deadlock`, Invariants `TypeInvariant` and `ConsistencyProperty`, as well as Property `Termination` - -###### Trace: - -Both chains `A` and `B` start at height `1`, and the light client on each chain has consensus state for height `1`. - -1. The environment submits a `ICS3MsgInit` message to chain `A`. - -2. Chain `A` processes the `ICS3MsgInit`, advances to height `2`, and prepares a `ICS3MsgTry` message destined for chain `B`. -The proof in this message is for height `2`. - -3. The environment triggers the `AdvanceChainHeight` action of chain `B`, so this chain transitions from height `1` to height `2`. - -4. The environment triggers the `AdvanceChainHeight` action of chain `A`, so this chain transitions from height `2` to height `3`. - -5. The environment triggers the `AdvanceChainHeight` action of chain `A`, so this chain transitions from height `3` to height `4`. - -6. __Concurrency:__ The environment triggers the `UpdateClient` action on chain `B`: the light client on this chain is updated with height `4` (that is, the latest height of chain `A`), and chain `B` also transitions from height `2` to height `3`. - -7. The environment passes (i.e., relays) the `ICS3MsgTry` message to chain `B`. -Recall that this message has proofs for height `2`; consenquently, the environment also attempts to trigger `UpdateClient` action on chain `B` for consensus state at height `2`. -This action does not enable because the light client on `B` has a more recent consensus state for height `4`. - -8. Chain `B` attempts to process the `ICS3MsgTry` but is unable to verify its authenticity, since the light client on this chain does not have the required consensus state at height `2`. -Chain `B` drops this message. - -From this point on, the model stutters, i.e., is unable to progress further in the connection handshake protocol. - - -### 3. ICS3 problems due to version negotiation - -__Context__. -The original issue triggering this discussion is here: [cosmos/ics/#459](https://github.com/cosmos/ibc/issues/459). -Briefly, version negotiation in the ICS3 handshake can interfere in various ways, breaking either the safety or liveness of this protocol. -Several solution candidates exist, which we classify by their "mode", i.e., a strategy for picking the version at some point or another in the protocol. -For a full description of the modes, please consult [L2-tla/readme.md#version-negotiation-modes](spec/connection-handshake/L2-tla/README.md#version-negotiation-modes). - -__Overview__. -Below we use TLA+ traces to explore and report on the exact problems that can occur. We also show how the solution candidates fare. -The table below summarizes our results for the four cases we consider: - -| Case | Property violation | -|---------|----------------------------| -| (a) Empty version intersection | liveness| -| (b) Mode `overwrite` | safety| -| (c) Mode `onTryNonDet` | liveness| -| (d) Mode `onAckNonDet` | safety| - - -These are the main takeaways from this discussion: - -1. The set of compatible versions that chains start off with (return values of `getCompatibleVersions()` in ICS3) have to intersect, otherwise a liveness issue occurs. This assumption is independent of the version negotiation mode. We report this in __case (a)__ below. -2. Modes "overwrite", "onTryNonDet", and "onAckNonDet" all result in breaking the handshake protocol. See __cases (b), (c), and (d)__ below for traces. -3. The deterministic modes "onTryDet" and "onAckDet" pass model checking, so a solution should be chosen among these two candidates (see the [original issue](https://github.com/cosmos/ibc/issues/459) for follow-up on the solution). - -##### Case (a). Empty version intersection causes liveness issue - -Model checking details in TLA+: -- Model parameters: -``` -Concurrency <- FALSE -MaxBufLen <- 2 -MaxHeight <- 7 -MaxVersionNr <- 2 -VersionPickMode <- "overwrite" -``` -- Check for _Deadlock_ and property _Termination_. - -Outcome: -- Model checking halts with exception "Temporal properties were violated." - -###### Trace - -The two chains start off with different versions (`1` for A, and `2` for B). -So the __compatible version__ sets on these chains do not intersect. - -1. The environment submits a `ICS3MsgInit` message chain `A`. - -2. The environment triggers the `AdvanceChainHeight` action of chain `B`, so this chain transitions from height `1` to height `2`. - -3. The environment triggers the `AdvanceChainHeight` action of chain `B`, so this chain transitions from height `2` to height `3`. - -4. The environment triggers the `AdvanceChainHeight` action of chain `A`, so this chain transitions from height `1` to height `2`. - -5. Chain `A` processes the `ICS3MsgInit`, advances to height `3`, and prepares a `ICS3MsgTry` for chain `B`. -The version in this message is `<<1>>`, the same as the version field that chain `A` started with. - -7. The environment relays the `ICS3MsgTry` message to the input buffer of chain `B`. -This message has proofs for height `3` so chain `B` gets updated with consensus state for height `4`. -With this update, chain `B` also advances to height `4`. - -8. Chain `B` drops the `ICS3MsgTry` message because the version field does not match any of the compatible versions of this chain. -Therefore, the model cannot progress. - -###### Fix - -To fix this issue, the model requires an explicit assumption that the compatible versions on the two chains must have a non-empty intersection. -We capture this assumption in the `Init` action, via the `ChainVersionsOverlap` predicate: - -```tla -Init == - /\ chmA!Init - /\ chmB!Init - /\ ChainVersionsOverlap(storeChainA, storeChainB) - /\ InitEnv -``` - -Once we add the `ChainVersionsOverlap` assumptions, this model no longer has liveness issues. -But the "overwrite" mode can lead to safety problems, however, which we document below. - -##### Case (b). Mode `overwrite` causes safety issue - -Model checking details in TLA+: -- Model parameters: -``` -Concurrency <- FALSE -MaxBufLen <- 2 -MaxHeight <- 7 -MaxVersionNr <- 2 -VersionPickMode <- "overwrite" -``` -- Check for invariant _VersionInvariant, as well as _Deadlock_ and property _Termination_. -- Make sure the `Init` action includes the `ChainVersionsOverlap` predicate. - -Outcome: -- Model checking halts with exception "Invariant VersionInvariant is violated." - -###### Trace - -Both chains `A` and `B` start with the compatible versions `<<1, 2>>`. - -1. The environment submits a `ICS3MsgInit` message to both chains. - -2. Chain `A` processes the `ICS3MsgInit`, advances to height `2`, and prepares a `ICS3MsgTry` for chain `B`. -The versions in this message are `<<1, 2>>`, the same as the version field in chain `A`. -The connection on this chain goes from state `UNINIT` to state `INIT`. - -3. Chain `B` processes the `ICS3MsgInit`, advances to height `2`, and prepares a `ICS3MsgTry` for chain `A`. -The versions in this message are `<<1, 2>>`, the same as the version field in chain `B`. -The connection on this chain goes from state `UNINIT` to state `INIT`. - -4. The environment relays the `ICS3MsgTry` message to the input buffer of chain `B`. -This message has proofs for height `2`, so the environment triggers `UpdateClient` on chain `B` for consensus state at height `2`. -With this update, chain `B` advances to height `3`. - -5. The environment relays the `ICS3MsgTry` message to the input buffer of chain `A`. -This message has proofs for height `2`, so the environment triggers `UpdateClient` on chain `A` for consensus state at height `2`. -With this update, chain `A` advances to height `3`. - -6. Chain `A` processes the `ICS3MsgTry` message, advances to height `4`, and prepares a `ICS3MsgAck` message for `B`. -The version in this message is `1`. -The connection in this chain goes into state `TRYOPEN`, with version chosen to be `1`. - -7. Chain `B` processes the `ICS3MsgTry` message, advances to height `4`, and prepares a `ICS3MsgAck` message for `A`. -The version in this message is `2`. -The connection in this chain goes into state `TRYOPEN`, with version chosen to be `2`. - -8. The environment relays the `ICS3MsgAck` message to the input buffer of chain `B`. -This message has proofs for height `4`, so the environment triggers `UpdateClient` on chain `B` for consensus state at height `4`. -With this update, chain `B` advances to height `5`. - -9. Chain `B` processes the `ICS3MsgAck` message (which had version `1` -- see step 6 above), advances to height `6`, and prepares a `ICS3MsgConfirm` message for `A`. -Chain `B` overwrites its local version (namely, `2`) with the version in the `ICS3MsgAck` message (that is, `1`). -The connection in this chain goes into state `OPEN`, with version chosen to be `1`. -The `ICS3MsgConfirm` that chain `B` creates contains version `1`. - -10. The environment relays the `ICS3MsgAck` message to the input buffer of chain `A`. -This message has proofs for height `4`, so the environment triggers `UpdateClient` on chain `A` for consensus state at height `4`. -With this update, chain `A` also advances to height `5`. - -11. Chain `A` processes the `ICS3MsgAck` message; recall that the version in this message is `2` (see step 7 above). -Upon processing this message, chain `A` overwrites its local version (which was `1`) with the version in the `ICS3MsgAck` message (concretely, `2`). -The connection in this chain goes into state `OPEN`, with version chosen to be `2`. -Chain `A` also advances to height `6` and prepares a `ICS3MsgConfirm` message for `B`; the `ICS3MsgConfirm` contains version `2`. - -At this point, the connection is `OPEN` at both chains, but the version numbers do not match. -Hence, the invariant `VersionInvariant` is violated. - - -##### Case (c). Mode `onTryNonDet` causes liveness issue - -Setup: -- Model parameters: -``` -Concurrency <- FALSE -MaxBufLen <- 2 -MaxHeight <- 7 -MaxVersionNr <- 2 -VersionPickMode <- "onTryNonDet" -``` -- Check for _Deadlock_ and property _Termination_. - -Outcome: -- Model checking halts with exception "Temporal properties were violated." -- The issue is that the version in the two chains diverges and can never reconcile - -###### Trace - -Both chains `A` and `B` start with the field `<<1, 2>>`, that is, with two compatible versions. - -1. The environment submits a `ICS3MsgInit` message to both chains. - -2. The environment triggers the `AdvanceChainHeight` action of chain `A`, so this chain transitions from height `1` to height `2`. - -3. The environment triggers the `AdvanceChainHeight` action of chain `A`, so this chain transitions from height `2` to height `3`. - -4. Chain `B` processes the `ICS3MsgInit`, advances to height `2`, and prepares a `ICS3MsgTry` message destined for chain `A`. -The versions in this message are `<<1, 2>>`, the same as the version field in chain `B`. - -5. The environment triggers the `AdvanceChainHeight` action of chain `B`, so this chain transitions from height `2` to height `3`. - -6. Chain `A` processes the `ICS3MsgInit`, advances to height `4`, and prepares a `ICS3MsgTry` message destined for chain `B`. -The versions in this message are `<<1, 2>>`, the same as the version field in chain `A`. - -7. The environment passes (i.e., relays) the `ICS3MsgTry` message to the input buffer of chain `B`. -This message has proofs for height `4`; consequently, the environment also triggers `UpdateClient` on chain `B` for consensus state at height `4`, preparing this chain to process the message in the input buffer. -With this update, chain `B` advances to height `4`. - -8. The environment passes (i.e., relays) the `ICS3MsgTry` message to the input buffer of chain `A`. -This message has proofs for height `2`, so the environment also does a `UpdateClient` on chain `A` for consensus state at height `2`. -With this update, chain `A` advances to height `5`. - -9. Chain `A` processes the `ICS3MsgTry`, advances to height `6`, and prepares a `ICS3MsgAck` for chain `B`. -The version in this message is `<<2>>`, which is the version which chain `A` choose non-deterministically for this connection. -The connection on chain `A` is now in state `TRYOPEN`. - -10. Chain `B` processes the `ICS3MsgTry`, advances to height `5`, and prepares a `ICS3MsgAck` for chain `A`. -The version in this message is `<<1>>`, which chain `B` choose non-deterministically for this connection. -The connection on chain `B` is now in state `TRYOPEN`. - -From this point on, the two chains can not make further progress in the handshake, since they chose different versions. -Neither of the two chains can process the `ICS3MsgAck` message because the version in this message does not match with the version the chain stores locally. -(A chain should not overwrite its local version either, otherwise the safety issue from case (b) can appear.) -Therefore, the model stutters (cannot progress anymore). - -##### Case (d). Mode `onAckNonDet` causes safety issue - -Model checking details in TLA+: -- Model parameters: -``` -Concurrency <- FALSE -MaxBufLen <- 2 -MaxHeight <- 7 -MaxVersionNr <- 2 -VersionPickMode <- "onAckNonDet" -``` -- Check for invariant _VersionInvariant, as well as _Deadlock_ and property _Termination_. - -Outcome: -- Model checking halts with exception "Invariant VersionInvariant is violated." - -###### Trace - -Both chains `A` and `B` start with the compatible versions `<<1, 2>>`. - -1. The environment submits a `ICS3MsgInit` message chain `A`. - -2. Chain `A` processes the `ICS3MsgInit`, advances to height `2`, and prepares a `ICS3MsgTry` for chain `B`. -The versions in this message are `<<1, 2>>`, the same as the version field in chain `A`. -The connection on this chain goes from state `UNINIT` to state `INIT`. - -3. The environment relays the `ICS3MsgTry` message to the input buffer of chain `B`. -This message has proofs for height `2`, so the environment triggers `UpdateClient` on chain `B` for consensus state at height `2`. -With this update, chain `B` advances to height `2`. - -4. Chain `B` processes the `ICS3MsgTry` message, advances to height `3`, and prepares a `ICS3MsgAck` message for `A`. -The version in this message is `<<1, 2>>`. -The connection in this chain goes into state `TRYOPEN`; chain `B` does not choose a specific version yet, so the connection on `B` still has versions `<<1, 2>>`. - -5. The environment relays the `ICS3MsgAck` message to the input buffer of chain `A`. -This message has proofs for height `3`, so the environment triggers `UpdateClient` on chain `A` for consensus state at height `3`. -With this update, chain `A` advances to height `3`. - -6. Chain `A` processes the `ICS3MsgAck` message (which has versions `<<1, 2>>`), advances to height `4` and prepares a `ICS3MsgConfirm` message for `B`. -Chain `A` locks on version `1` (non-deterministic choice between `<<1, 2>>`), which it also reports in the `ICS3MsgConfirm` message. -The connection in this chain goes into state `OPEN`, with version chosen to `1`. - -7. The environment relays the `ICS3MsgConfirm` message to the input buffer of chain `B`. -This message has proofs for height `4`, so the environment triggers `UpdateClient` on chain `B` for consensus state at height `4`. -With this update, chain `B` also advances to height `4`. - -8. Chain `B` processes the `ICS3MsgConfirm` message (which contains version `1`). -Chain `B` locks on version `2` (non-deterministic choice between its local versions `<<1, 2>>`). -The connection in this chain goes into state `OPEN`. - -At this point, the connection is `OPEN` at both chains, but the version numbers do not match. -Hence, the invariant `VersionInvariant` is violated. diff --git a/flake.lock b/flake.lock deleted file mode 100644 index 3c8414ec3d..0000000000 --- a/flake.lock +++ /dev/null @@ -1,801 +0,0 @@ -{ - "nodes": { - "akash-src": { - "flake": false, - "locked": { - "lastModified": 1648485085, - "narHash": "sha256-33FPy0dn6QuqneEqZYkFoCRm9agG7PE+9C/pYH9Gwx4=", - "owner": "ovrclk", - "repo": "akash", - "rev": "5b8b6bbede6c9fbb2df1ca536b8edfcf5652adf8", - "type": "github" - }, - "original": { - "owner": "ovrclk", - "ref": "v0.15.0-rc17", - "repo": "akash", - "type": "github" - } - }, - "apalache-src": { - "flake": false, - "locked": { - "lastModified": 1650241137, - "narHash": "sha256-15jzwbBc7ByxHJbpHmIukSNvih9oxTXeinNamgXirCU=", - "owner": "informalsystems", - "repo": "apalache", - "rev": "40d9ec66b3defe8e72803ca9241a73366497eeee", - "type": "github" - }, - "original": { - "owner": "informalsystems", - "ref": "v0.24.0", - "repo": "apalache", - "type": "github" - } - }, - "cosmos-nix": { - "inputs": { - "akash-src": "akash-src", - "apalache-src": "apalache-src", - "cosmos-sdk-src": "cosmos-sdk-src", - "crescent-src": "crescent-src", - "evmos-src": "evmos-src", - "flake-utils": "flake-utils", - "gaia5-src": "gaia5-src", - "gaia6-ordered-src": "gaia6-ordered-src", - "gaia6_0_2-src": "gaia6_0_2-src", - "gaia6_0_3-src": "gaia6_0_3-src", - "gaia6_0_4-src": "gaia6_0_4-src", - "gaia7-src": "gaia7-src", - "ibc-go-ics29-src": "ibc-go-ics29-src", - "ibc-go-v2-src": "ibc-go-v2-src", - "ibc-go-v3-src": "ibc-go-v3-src", - "ibc-rs-src": "ibc-rs-src", - "ica-src": "ica-src", - "iris-src": "iris-src", - "ixo-src": "ixo-src", - "juno-src": "juno-src", - "nixpkgs": "nixpkgs", - "osmosis-src": "osmosis-src", - "pre-commit-hooks": "pre-commit-hooks", - "regen-src": "regen-src", - "relayer-src": "relayer-src", - "rust-overlay": "rust-overlay", - "sbt-derivation": "sbt-derivation", - "sconfig-src": "sconfig-src", - "sentinel-src": "sentinel-src", - "sifchain-src": "sifchain-src", - "stargaze-src": "stargaze-src", - "stoml-src": "stoml-src", - "terra-src": "terra-src", - "ts-relayer-src": "ts-relayer-src", - "umee-src": "umee-src", - "wasmd-src": "wasmd-src", - "wasmvm_0_16_3-src": "wasmvm_0_16_3-src", - "wasmvm_1_beta7-src": "wasmvm_1_beta7-src" - }, - "locked": { - "lastModified": 1650621073, - "narHash": "sha256-/x6oEpNpPbtNU4sC3fenY/4XWmDCBTd/EU1w9h4viIk=", - "owner": "informalsystems", - "repo": "cosmos.nix", - "rev": "7cd586a42f1468c077f5d0f5d347d26312bcd6fa", - "type": "github" - }, - "original": { - "owner": "informalsystems", - "repo": "cosmos.nix", - "type": "github" - } - }, - "cosmos-sdk-src": { - "flake": false, - "locked": { - "lastModified": 1642008757, - "narHash": "sha256-owsXBdYIf7yENDjumqyQ5AQ+jPHKxVbpQbApUpTzoxo=", - "owner": "cosmos", - "repo": "cosmos-sdk", - "rev": "c1c1ad7425292924b77dc632370815088b2d3c58", - "type": "github" - }, - "original": { - "owner": "cosmos", - "ref": "v0.45.0-rc1", - "repo": "cosmos-sdk", - "type": "github" - } - }, - "crescent-src": { - "flake": false, - "locked": { - "lastModified": 1647869429, - "narHash": "sha256-c1xiTB/HgtQJSwD3ccFQIoSHPbJK6rf1nSjnM3r0oCE=", - "owner": "crescent-network", - "repo": "crescent", - "rev": "01980cfd06b06786109eaba78c154e6db1adc3d6", - "type": "github" - }, - "original": { - "owner": "crescent-network", - "ref": "v1.0.0-rc3", - "repo": "crescent", - "type": "github" - } - }, - "evmos-src": { - "flake": false, - "locked": { - "lastModified": 1648233712, - "narHash": "sha256-LCNGZPt6SwzN+4DHU6WcOl3ROhMOdXlIIeFJiJGYidc=", - "owner": "tharsis", - "repo": "evmos", - "rev": "2e886b2882d61081c9b0a6f5aa10d96cd78aff7a", - "type": "github" - }, - "original": { - "owner": "tharsis", - "ref": "v3.0.0-beta", - "repo": "evmos", - "type": "github" - } - }, - "flake-utils": { - "locked": { - "lastModified": 1649676176, - "narHash": "sha256-OWKJratjt2RW151VUlJPRALb7OU2S5s+f0vLj4o1bHM=", - "owner": "numtide", - "repo": "flake-utils", - "rev": "a4b154ebbdc88c8498a5c7b01589addc9e9cb678", - "type": "github" - }, - "original": { - "owner": "numtide", - "repo": "flake-utils", - "type": "github" - } - }, - "flake-utils_2": { - "locked": { - "lastModified": 1649676176, - "narHash": "sha256-OWKJratjt2RW151VUlJPRALb7OU2S5s+f0vLj4o1bHM=", - "owner": "numtide", - "repo": "flake-utils", - "rev": "a4b154ebbdc88c8498a5c7b01589addc9e9cb678", - "type": "github" - }, - "original": { - "owner": "numtide", - "repo": "flake-utils", - "type": "github" - } - }, - "flake-utils_3": { - "locked": { - "lastModified": 1637014545, - "narHash": "sha256-26IZAc5yzlD9FlDT54io1oqG/bBoyka+FJk5guaX4x4=", - "owner": "numtide", - "repo": "flake-utils", - "rev": "bba5dcc8e0b20ab664967ad83d24d64cb64ec4f4", - "type": "github" - }, - "original": { - "owner": "numtide", - "repo": "flake-utils", - "type": "github" - } - }, - "flake-utils_4": { - "locked": { - "lastModified": 1649676176, - "narHash": "sha256-OWKJratjt2RW151VUlJPRALb7OU2S5s+f0vLj4o1bHM=", - "owner": "numtide", - "repo": "flake-utils", - "rev": "a4b154ebbdc88c8498a5c7b01589addc9e9cb678", - "type": "github" - }, - "original": { - "owner": "numtide", - "repo": "flake-utils", - "type": "github" - } - }, - "gaia5-src": { - "flake": false, - "locked": { - "lastModified": 1634231239, - "narHash": "sha256-NfR9GRBNBlm5hB3lFea+Vlf4dkapZIZg0sZuyOX2cn8=", - "owner": "cosmos", - "repo": "gaia", - "rev": "b72cc994f7156c8a8991e6beed2dde84ad274588", - "type": "github" - }, - "original": { - "owner": "cosmos", - "ref": "v5.0.8", - "repo": "gaia", - "type": "github" - } - }, - "gaia6-ordered-src": { - "flake": false, - "locked": { - "lastModified": 1648034337, - "narHash": "sha256-yw3WUCLRvn46xlWAnk6nBmvc3T91aryvBcOOfJ2ocPA=", - "owner": "informalsystems", - "repo": "gaia", - "rev": "d9e61fb98308dea2e02e8c6c6a9ab969dc240cc7", - "type": "github" - }, - "original": { - "owner": "informalsystems", - "ref": "v6.0.4-ordered", - "repo": "gaia", - "type": "github" - } - }, - "gaia6_0_2-src": { - "flake": false, - "locked": { - "lastModified": 1645118548, - "narHash": "sha256-an1JVPCMcJgQYi+inx4MrAcwYjHTVFvDzw865pJc6C8=", - "owner": "cosmos", - "repo": "gaia", - "rev": "05f3795f196dd32e9233db97ed8742f8559cb483", - "type": "github" - }, - "original": { - "owner": "cosmos", - "ref": "v6.0.2", - "repo": "gaia", - "type": "github" - } - }, - "gaia6_0_3-src": { - "flake": false, - "locked": { - "lastModified": 1645184577, - "narHash": "sha256-a24C1sooMj8mVGYYV2wL7P3kM7xj/MVzfeggj186PQo=", - "owner": "cosmos", - "repo": "gaia", - "rev": "8f5dd7549fd21b99099e100da043bd8919d37ac3", - "type": "github" - }, - "original": { - "owner": "cosmos", - "ref": "v6.0.3", - "repo": "gaia", - "type": "github" - } - }, - "gaia6_0_4-src": { - "flake": false, - "locked": { - "lastModified": 1646904235, - "narHash": "sha256-JdD0DTdMo05ggGvpHN5hugEEtGA0/WQ4bhbryDlfGXo=", - "owner": "cosmos", - "repo": "gaia", - "rev": "305668ab9d962431c79d718bb0ffdeec77a46439", - "type": "github" - }, - "original": { - "owner": "cosmos", - "ref": "v6.0.4", - "repo": "gaia", - "type": "github" - } - }, - "gaia7-src": { - "flake": false, - "locked": { - "lastModified": 1648134734, - "narHash": "sha256-A9EqVHR2GiyuemTrjeaJWyIm6e3XUQ3nSm9dBF9gwvk=", - "owner": "cosmos", - "repo": "gaia", - "rev": "79fcf71689358b6212ae91f41070de9669421cf5", - "type": "github" - }, - "original": { - "owner": "cosmos", - "ref": "v7.0.0", - "repo": "gaia", - "type": "github" - } - }, - "ibc-go-ics29-src": { - "flake": false, - "locked": { - "lastModified": 1647958967, - "narHash": "sha256-QZ/BQ+qnz+dmosx7/bptIoAyufeWRdT2i420p2ujqf8=", - "owner": "cosmos", - "repo": "ibc-go", - "rev": "ab90f07e9a776a8aafe333a25f91fa43a0e42560", - "type": "github" - }, - "original": { - "owner": "cosmos", - "ref": "ics29-fee-middleware", - "repo": "ibc-go", - "type": "github" - } - }, - "ibc-go-v2-src": { - "flake": false, - "locked": { - "lastModified": 1647351578, - "narHash": "sha256-n2xo3CGyO9wgIPvHgKqDfPjhhy3eHNGX6XDn707BTwk=", - "owner": "cosmos", - "repo": "ibc-go", - "rev": "bfb76858a34489d85c0404e4fdd597389229787d", - "type": "github" - }, - "original": { - "owner": "cosmos", - "ref": "v2.2.0", - "repo": "ibc-go", - "type": "github" - } - }, - "ibc-go-v3-src": { - "flake": false, - "locked": { - "lastModified": 1647356202, - "narHash": "sha256-wX3kUzK5dkPeNgmBGP0mE8QeNR4LRo1obVGasZSLSpE=", - "owner": "cosmos", - "repo": "ibc-go", - "rev": "46e020640e66f9043c14c53a4d215a5b457d6703", - "type": "github" - }, - "original": { - "owner": "cosmos", - "ref": "v3.0.0", - "repo": "ibc-go", - "type": "github" - } - }, - "ibc-rs-src": { - "flake": false, - "locked": { - "lastModified": 1646665771, - "narHash": "sha256-kOc+5MVzgdUvpJrGDkjcv0rmQRPImPDSrkfFW6K+q7M=", - "owner": "informalsystems", - "repo": "ibc-rs", - "rev": "aed240774df62b19099f796f2dd8459dabd72c88", - "type": "github" - }, - "original": { - "owner": "informalsystems", - "ref": "v0.13.0-rc.0", - "repo": "ibc-rs", - "type": "github" - } - }, - "ica-src": { - "flake": false, - "locked": { - "lastModified": 1647255020, - "narHash": "sha256-Ah5pivnAmk3W0fLWnrBbi84tqwJYQETSILSvNVH6fI8=", - "owner": "cosmos", - "repo": "interchain-accounts-demo", - "rev": "09b6a493a84a135f395d74d5ec82ea983617a714", - "type": "github" - }, - "original": { - "owner": "cosmos", - "repo": "interchain-accounts-demo", - "type": "github" - } - }, - "iris-src": { - "flake": false, - "locked": { - "lastModified": 1618986686, - "narHash": "sha256-1nPJOuYeGjzBYFCS0IiC5j9TJd5KVa9IL0kROks328E=", - "owner": "irisnet", - "repo": "irishub", - "rev": "53e156b2ee7eeb0b9d5b263066d0d3c88a1af736", - "type": "github" - }, - "original": { - "owner": "irisnet", - "ref": "v1.1.1", - "repo": "irishub", - "type": "github" - } - }, - "ixo-src": { - "flake": false, - "locked": { - "lastModified": 1645476442, - "narHash": "sha256-Ewp9UyoH6z7YGrcXVpYJveRvDq02c1mNZj2hzlOoW8s=", - "owner": "ixofoundation", - "repo": "ixo-blockchain", - "rev": "2bef5d79205057be71677837dc1174be848e13e9", - "type": "github" - }, - "original": { - "owner": "ixofoundation", - "ref": "v0.18.0-rc1", - "repo": "ixo-blockchain", - "type": "github" - } - }, - "juno-src": { - "flake": false, - "locked": { - "lastModified": 1647987514, - "narHash": "sha256-Mtiin+GOH/dyrr7cO+18er+uwYjCpQDY8xhA+kkzniM=", - "owner": "CosmosContracts", - "repo": "juno", - "rev": "9a1c32f508e6314fb73e57db35313cb329639424", - "type": "github" - }, - "original": { - "owner": "CosmosContracts", - "ref": "v2.3.0-beta.2", - "repo": "juno", - "type": "github" - } - }, - "nixpkgs": { - "locked": { - "lastModified": 1648219316, - "narHash": "sha256-Ctij+dOi0ZZIfX5eMhgwugfvB+WZSrvVNAyAuANOsnQ=", - "owner": "nixos", - "repo": "nixpkgs", - "rev": "30d3d79b7d3607d56546dd2a6b49e156ba0ec634", - "type": "github" - }, - "original": { - "owner": "nixos", - "ref": "nixpkgs-unstable", - "repo": "nixpkgs", - "type": "github" - } - }, - "nixpkgs_2": { - "locked": { - "lastModified": 1651114127, - "narHash": "sha256-/lLC0wkMZkAdA5e1W76SnJzbhfOGDvync3VRHJMtAKk=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "6766fb6503ae1ebebc2a9704c162b2aef351f921", - "type": "github" - }, - "original": { - "id": "nixpkgs", - "type": "indirect" - } - }, - "nixpkgs_3": { - "locked": { - "lastModified": 1637453606, - "narHash": "sha256-Gy6cwUswft9xqsjWxFYEnx/63/qzaFUwatcbV5GF/GQ=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "8afc4e543663ca0a6a4f496262cd05233737e732", - "type": "github" - }, - "original": { - "owner": "NixOS", - "ref": "nixpkgs-unstable", - "repo": "nixpkgs", - "type": "github" - } - }, - "nixpkgs_4": { - "locked": { - "lastModified": 1651114127, - "narHash": "sha256-/lLC0wkMZkAdA5e1W76SnJzbhfOGDvync3VRHJMtAKk=", - "owner": "nixos", - "repo": "nixpkgs", - "rev": "6766fb6503ae1ebebc2a9704c162b2aef351f921", - "type": "github" - }, - "original": { - "owner": "nixos", - "ref": "nixpkgs-unstable", - "repo": "nixpkgs", - "type": "github" - } - }, - "osmosis-src": { - "flake": false, - "locked": { - "lastModified": 1646876870, - "narHash": "sha256-cRrGlMvgZI4GK0IdSF7mtEo/+zq42gbcJEHpE74vJdc=", - "owner": "osmosis-labs", - "repo": "osmosis", - "rev": "e55ad888546b4c970c7474a26e7c60fe079ca686", - "type": "github" - }, - "original": { - "owner": "osmosis-labs", - "ref": "v7.0.4", - "repo": "osmosis", - "type": "github" - } - }, - "pre-commit-hooks": { - "inputs": { - "flake-utils": "flake-utils_2", - "nixpkgs": "nixpkgs_2" - }, - "locked": { - "lastModified": 1646153636, - "narHash": "sha256-AlWHMzK+xJ1mG267FdT8dCq/HvLCA6jwmx2ZUy5O8tY=", - "owner": "cachix", - "repo": "pre-commit-hooks.nix", - "rev": "b6bc0b21e1617e2b07d8205e7fae7224036dfa4b", - "type": "github" - }, - "original": { - "owner": "cachix", - "repo": "pre-commit-hooks.nix", - "type": "github" - } - }, - "regen-src": { - "flake": false, - "locked": { - "lastModified": 1645832054, - "narHash": "sha256-lDb0/Bw4hAX71jsCQJUju1mKYNacWEVezx6+KdIdu6Q=", - "owner": "regen-network", - "repo": "regen-ledger", - "rev": "5fb6268ed18a488ab88fb3bfa4b84e10892a7562", - "type": "github" - }, - "original": { - "owner": "regen-network", - "ref": "v3.0.0", - "repo": "regen-ledger", - "type": "github" - } - }, - "relayer-src": { - "flake": false, - "locked": { - "lastModified": 1635197290, - "narHash": "sha256-xD+xZG4Gb6557y/jkXTGdbt8qJ6izMgC4H3uo2/j5vU=", - "owner": "cosmos", - "repo": "relayer", - "rev": "7797aa103af68faa4269af586fe6df1d30e91d4a", - "type": "github" - }, - "original": { - "owner": "cosmos", - "ref": "v1.0.0", - "repo": "relayer", - "type": "github" - } - }, - "root": { - "inputs": { - "cosmos-nix": "cosmos-nix", - "flake-utils": "flake-utils_4", - "nixpkgs": "nixpkgs_4" - } - }, - "rust-overlay": { - "inputs": { - "flake-utils": "flake-utils_3", - "nixpkgs": "nixpkgs_3" - }, - "locked": { - "lastModified": 1645755566, - "narHash": "sha256-BwjpcywzB+4hHuStgYcOWRomI8I2PCtORUbNEL6qMBk=", - "owner": "oxalica", - "repo": "rust-overlay", - "rev": "46d8d20fce510c6a25fa66f36e31f207f6ea49e4", - "type": "github" - }, - "original": { - "owner": "oxalica", - "repo": "rust-overlay", - "type": "github" - } - }, - "sbt-derivation": { - "locked": { - "lastModified": 1617466857, - "narHash": "sha256-Z7eWMLreLtiSiJ3nWDWBy1w9WNEFexkYCgT/dWZF7yo=", - "owner": "zaninime", - "repo": "sbt-derivation", - "rev": "920b6f187937493371e2b1687261017e6e014cf1", - "type": "github" - }, - "original": { - "owner": "zaninime", - "repo": "sbt-derivation", - "type": "github" - } - }, - "sconfig-src": { - "flake": false, - "locked": { - "lastModified": 1594094862, - "narHash": "sha256-jR2hkR0YlPyW2nKWJl90kL80R+9psNKGPYxGg7Y/YGw=", - "owner": "freshautomations", - "repo": "sconfig", - "rev": "88043754c024aec433b3b059af170b6f555931c3", - "type": "github" - }, - "original": { - "owner": "freshautomations", - "repo": "sconfig", - "type": "github" - } - }, - "sentinel-src": { - "flake": false, - "locked": { - "lastModified": 1647195309, - "narHash": "sha256-+ZobsjLNxVL3+zi6OEFQhff6Gbd9kng8B0haqcOoiP0=", - "owner": "sentinel-official", - "repo": "hub", - "rev": "7001dc8bc4517efa33cfcc83e8b127528b5bdf2e", - "type": "github" - }, - "original": { - "owner": "sentinel-official", - "ref": "v0.9.0-rc0", - "repo": "hub", - "type": "github" - } - }, - "sifchain-src": { - "flake": false, - "locked": { - "lastModified": 1648486445, - "narHash": "sha256-n5fmWtdrc0Rhs6Uo+zjcSXmyEFVIsA5L9dlrbRXGDmU=", - "owner": "Sifchain", - "repo": "sifnode", - "rev": "269cfadf6a4c08879247c2b8373323ae7239a425", - "type": "github" - }, - "original": { - "owner": "Sifchain", - "ref": "v0.12.1", - "repo": "sifnode", - "type": "github" - } - }, - "stargaze-src": { - "flake": false, - "locked": { - "lastModified": 1645539964, - "narHash": "sha256-5I5pdnBJHwNaI2Soet+zH3aH+pUbYdC9TgHBjOd1TmA=", - "owner": "public-awesome", - "repo": "stargaze", - "rev": "6ee57f18714a6d94cc6205afcd1af2ab655f8f0f", - "type": "github" - }, - "original": { - "owner": "public-awesome", - "ref": "v3.0.0", - "repo": "stargaze", - "type": "github" - } - }, - "stoml-src": { - "flake": false, - "locked": { - "lastModified": 1622172633, - "narHash": "sha256-PvKkOjjWkmK90PzKcOBq0pUWLjHLjfYs9PRqqzAR7/8=", - "owner": "freshautomations", - "repo": "stoml", - "rev": "f5dab84dbf52345a1f36389aec38b02fda086a47", - "type": "github" - }, - "original": { - "owner": "freshautomations", - "repo": "stoml", - "type": "github" - } - }, - "terra-src": { - "flake": false, - "locked": { - "lastModified": 1645516218, - "narHash": "sha256-7cmVYWFLeOZJtbfw8qaVKLDMVafoeFDXOcrmrMS9buE=", - "owner": "terra-money", - "repo": "core", - "rev": "a6b93b72a7d4fabbbb85fb89e685426f5d07cac1", - "type": "github" - }, - "original": { - "owner": "terra-money", - "ref": "v0.5.17", - "repo": "core", - "type": "github" - } - }, - "ts-relayer-src": { - "flake": false, - "locked": { - "lastModified": 1640291594, - "narHash": "sha256-mSI+qgB+e9YcFrcUAgHQnbXOQ8wxO2GmD0wNe+3ya0g=", - "owner": "confio", - "repo": "ts-relayer", - "rev": "23930794ddb64afcc80ac73ffe31ca69072c6549", - "type": "github" - }, - "original": { - "owner": "confio", - "ref": "v0.4.0", - "repo": "ts-relayer", - "type": "github" - } - }, - "umee-src": { - "flake": false, - "locked": { - "lastModified": 1648176855, - "narHash": "sha256-s7MnAaM+O84JDO1uBNZm1qGN6ZfYmhXD5rCvns4u/rc=", - "owner": "umee-network", - "repo": "umee", - "rev": "3c9b8db04d6ab19d31e89df65232abc35d1a8a59", - "type": "github" - }, - "original": { - "owner": "umee-network", - "ref": "v2.0.0", - "repo": "umee", - "type": "github" - } - }, - "wasmd-src": { - "flake": false, - "locked": { - "lastModified": 1646852618, - "narHash": "sha256-3ifvKZhdv50E6yA8jDiVnartZZ34Ji09VJbtkkW7Lig=", - "owner": "CosmWasm", - "repo": "wasmd", - "rev": "3bc0bdeab3fa2b3f7de745622226ff36c2ec6d6a", - "type": "github" - }, - "original": { - "owner": "CosmWasm", - "ref": "v0.24.0", - "repo": "wasmd", - "type": "github" - } - }, - "wasmvm_0_16_3-src": { - "flake": false, - "locked": { - "lastModified": 1640251271, - "narHash": "sha256-XvgAMDvAgzWaH7Q+mNZUBoaVhqAVlZ4ucIL0QFyNvWw=", - "owner": "CosmWasm", - "repo": "wasmvm", - "rev": "458e983721624548e66c0dcdd35140383966515e", - "type": "github" - }, - "original": { - "owner": "CosmWasm", - "ref": "v0.16.3", - "repo": "wasmvm", - "type": "github" - } - }, - "wasmvm_1_beta7-src": { - "flake": false, - "locked": { - "lastModified": 1646675433, - "narHash": "sha256-tt9aAPLxtIRsG1VFM1YAIHSotuBl170EiBcHSWTtARI=", - "owner": "CosmWasm", - "repo": "wasmvm", - "rev": "f7015565a59255cd09ebfcbf9345f3c87666fedd", - "type": "github" - }, - "original": { - "owner": "CosmWasm", - "ref": "v1.0.0-beta7", - "repo": "wasmvm", - "type": "github" - } - } - }, - "root": "root", - "version": 7 -} diff --git a/flake.nix b/flake.nix deleted file mode 100644 index d8a412d92e..0000000000 --- a/flake.nix +++ /dev/null @@ -1,47 +0,0 @@ -{ - description = "Nix development dependencies for ibc-rs"; - - inputs = { - nixpkgs.url = github:nixos/nixpkgs/nixpkgs-unstable; - flake-utils.url = github:numtide/flake-utils; - cosmos-nix.url = github:informalsystems/cosmos.nix; - }; - - outputs = inputs: - let - utils = inputs.flake-utils.lib; - in - utils.eachSystem - [ - "aarch64-linux" - "aarch64-darwin" - "x86_64-darwin" - "x86_64-linux" - ] - (system: - let - nixpkgs = import inputs.nixpkgs { - inherit system; - }; - - cosmos-nix = inputs.cosmos-nix.packages.${system}; - in - { - packages = { - inherit (cosmos-nix) - gaia5 - gaia6 - gaia7 - ica - gaia6-ordered - ibc-go-v2-simapp - ibc-go-v3-simapp - apalache - ; - - python = nixpkgs.python3.withPackages (p: [ - p.toml - ]); - }; - }); -} diff --git a/help b/help deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/modules/Cargo.toml b/modules/Cargo.toml index ed18de365d..5776f71135 100644 --- a/modules/Cargo.toml +++ b/modules/Cargo.toml @@ -23,28 +23,22 @@ std = [ "ibc-proto/std", "ics23/std", "clock", - "beefy-client/std", - "sp-runtime/std", "sp-core/std", - "codec/std", - "pallet-mmr-primitives/std", - "beefy-client-primitives/std", - "sp-trie/std", - "sp-io/std", "sp-std/std", + "codec/std", "tendermint-rpc" ] clock = ["tendermint/clock", "time/std"] # This feature grants access to development-time mocking libraries, such as `MockContext` or `MockHeader`. # Depends on the `testgen` suite for generating Tendermint light blocks. -mocks = ["tendermint-testgen", "clock", "std", "sha3", "ripemd", "ics11_beefy"] -ics11_beefy = ["sp-io", "sp-core", "sp-std", "beefy-primitives", "sp-runtime", "sp-trie", "pallet-mmr-primitives", "beefy-client", "beefy-client-primitives"] +mocks = ["clock", "std"] [dependencies] # Proto definitions for all IBC-related interfaces, e.g., connections or channels. -borsh = { version = "0.9.3", default-features = false } ibc-proto = { version = "0.18.0", path = "../proto", default-features = false } +derive = { path = "../derive", package = "ibc-derive" } +borsh = { version = "0.9.3", default-features = false } ics23 = { git = "https://github.com/composablefi/ics23", rev = "b500a5c6068eb53c83c4c6c13bd9d8c25e0bf927", default-features = false } time = { version = "0.3", default-features = false } serde_derive = { version = "1.0.104", default-features = false } @@ -53,29 +47,14 @@ serde_json = { version = "1", default-features = false } tracing = { version = "0.1.34", default-features = false } prost = { version = "0.10", default-features = false } prost-types = { version = "0.10", default-features = false } -bytes = { version = "1.1.0", default-features = false } safe-regex = { version = "0.2.5", default-features = false } subtle-encoding = { version = "0.5", default-features = false } -sha2 = { version = "0.10.2", default-features = false } flex-error = { version = "0.4.4", default-features = false } num-traits = { version = "0.2.15", default-features = false } derive_more = { version = "0.99.17", default-features = false, features = ["from", "into", "display"] } uint = { version = "0.9", default-features = false } -beefy-client = { package = "beefy-light-client", git = "https://github.com/ComposableFi/beefy-rs", rev = "cb8cadc8d45bc444367002c77cbd395eff8a741c", default-features = false, optional = true } -beefy-client-primitives = { package = "primitives", git = "https://github.com/ComposableFi/beefy-rs", rev = "cb8cadc8d45bc444367002c77cbd395eff8a741c", default-features = false, optional = true } -pallet-mmr-primitives = { package = "sp-mmr-primitives", git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.24", default-features = false, optional = true } -beefy-primitives = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.24", default-features = false, optional = true } -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.24", default-features = false, optional = true } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.24", default-features = false, optional = true} -sp-std = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.24", default-features = false, optional = true } -sp-trie = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.24", default-features = false, optional = true } -sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.24", default-features = false, optional = true } -sha3 = { version = "0.10.1", optional = true } -ripemd = { version = "0.1.1", optional = true } - primitive-types = { version = "0.11.1", default-features = false, features = ["serde_no_std"] } - +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } [dependencies.tendermint] git = "https://github.com/composableFi/tendermint-rs" @@ -87,50 +66,19 @@ git = "https://github.com/composableFi/tendermint-rs" rev = "5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8" default-features = false -[dependencies.tendermint-light-client-verifier] -git = "https://github.com/composableFi/tendermint-rs" -rev = "5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8" -default-features = false - [dependencies.tendermint-rpc] git = "https://github.com/composableFi/tendermint-rs" rev = "5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8" optional = true -[dependencies.tendermint-testgen] -git = "https://github.com/composableFi/tendermint-rs" -rev = "5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8" -optional = true -default-features = false - [dev-dependencies] env_logger = "0.9.0" tracing-subscriber = { version = "0.3.11", features = ["fmt", "env-filter", "json"]} test-log = { version = "0.2.10", features = ["trace"] } modelator = "0.4.2" sha2 = { version = "0.10.2" } -tendermint-rpc = { git = "https://github.com/composableFi/tendermint-rs", rev = "5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8", features = ["http-client", "websocket-client"] } -tendermint-testgen = { git = "https://github.com/composableFi/tendermint-rs", rev = "5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8" } # Needed for generating (synthetic) light blocks. -# Beefy Light Client testing dependencies -sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.24"} -subxt = { git = "https://github.com/paritytech/subxt", rev = "ec23283d75e4b3b894294e351fd7ffa2b4431201" } tokio = { version = "1.17.0", features = ["full"] } serde_json = "1.0.74" -beefy-client = { package = "beefy-light-client", git = "https://github.com/ComposableFi/beefy-rs", rev = "cb8cadc8d45bc444367002c77cbd395eff8a741c" } -beefy-queries = { package = "beefy-prover", git = "https://github.com/ComposableFi/beefy-rs", rev = "cb8cadc8d45bc444367002c77cbd395eff8a741c" } sha3 = { version = "0.10.1" } -ripemd = { version = "0.1.1" } -beefy-client-primitives = { package = "primitives", git = "https://github.com/ComposableFi/beefy-rs", rev = "cb8cadc8d45bc444367002c77cbd395eff8a741c" } -beefy-primitives = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.24" } -pallet-mmr-primitives = { package = "sp-mmr-primitives", git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.24" } -codec = { package = "parity-scale-codec", version = "3.0.0"} -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.24" } sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.24" } sp-std = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.24" } -sp-trie = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.24" } -frame-support = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.24" } - -[[test]] -name = "mbt" -path = "tests/mbt.rs" -required-features = ["mocks"] diff --git a/modules/src/applications/transfer/acknowledgement.rs b/modules/src/applications/transfer/acknowledgement.rs index cd7a6272dd..39072dff59 100644 --- a/modules/src/applications/transfer/acknowledgement.rs +++ b/modules/src/applications/transfer/acknowledgement.rs @@ -1,6 +1,5 @@ use super::error::Error; -use crate::core::ics26_routing::context::Acknowledgement as AckTrait; -use crate::prelude::*; +use crate::{core::ics26_routing::context::Acknowledgement as AckTrait, prelude::*}; use core::fmt::{Display, Formatter}; use serde::{Deserialize, Deserializer}; @@ -12,50 +11,50 @@ pub const ACK_SUCCESS_B64: &[u8] = b"AQ=="; #[derive(Clone, Debug)] pub enum Acknowledgement { - /// Equivalent to b"AQ==" (i.e. `base64::encode(0x01)`) - Success(Vec), - /// Error Acknowledgement - Error(String), + /// Equivalent to b"AQ==" (i.e. `base64::encode(0x01)`) + Success(Vec), + /// Error Acknowledgement + Error(String), } impl Acknowledgement { - pub fn success() -> Self { - Self::Success(ACK_SUCCESS_B64.to_vec()) - } + pub fn success() -> Self { + Self::Success(ACK_SUCCESS_B64.to_vec()) + } - pub fn from_error(err: Error) -> Self { - Self::Error(format!("{}: {}", ACK_ERR_STR, err)) - } + pub fn from_error(err: Error) -> Self { + Self::Error(format!("{}: {}", ACK_ERR_STR, err)) + } } impl AsRef<[u8]> for Acknowledgement { - fn as_ref(&self) -> &[u8] { - match self { - Acknowledgement::Success(b) => b.as_slice(), - Acknowledgement::Error(s) => s.as_bytes(), - } - } + fn as_ref(&self) -> &[u8] { + match self { + Acknowledgement::Success(b) => b.as_slice(), + Acknowledgement::Error(s) => s.as_bytes(), + } + } } impl<'de> Deserialize<'de> for Acknowledgement { - fn deserialize>(deserializer: D) -> Result { - let s = String::deserialize(deserializer)?; - let ack = if s.as_bytes() == ACK_SUCCESS_B64 { - Self::Success(ACK_SUCCESS_B64.to_vec()) - } else { - Self::Error(s) - }; - Ok(ack) - } + fn deserialize>(deserializer: D) -> Result { + let s = String::deserialize(deserializer)?; + let ack = if s.as_bytes() == ACK_SUCCESS_B64 { + Self::Success(ACK_SUCCESS_B64.to_vec()) + } else { + Self::Error(s) + }; + Ok(ack) + } } impl Display for Acknowledgement { - fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { - match self { - Acknowledgement::Success(_) => write!(f, "AQ=="), - Acknowledgement::Error(err_str) => write!(f, "{}", err_str), - } - } + fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { + match self { + Acknowledgement::Success(_) => write!(f, "AQ=="), + Acknowledgement::Error(err_str) => write!(f, "{}", err_str), + } + } } impl AckTrait for Acknowledgement {} diff --git a/modules/src/applications/transfer/context.rs b/modules/src/applications/transfer/context.rs index 36e7ad7a91..044954fbca 100644 --- a/modules/src/applications/transfer/context.rs +++ b/modules/src/applications/transfer/context.rs @@ -1,359 +1,366 @@ use subtle_encoding::hex; use super::error::Error as Ics20Error; -use crate::applications::transfer::acknowledgement::Acknowledgement; -use crate::applications::transfer::events::{AckEvent, AckStatusEvent, RecvEvent, TimeoutEvent}; -use crate::applications::transfer::packet::PacketData; -use crate::applications::transfer::relay::on_ack_packet::process_ack_packet; -use crate::applications::transfer::relay::on_recv_packet::process_recv_packet; -use crate::applications::transfer::relay::on_timeout_packet::process_timeout_packet; -use crate::applications::transfer::{PrefixedCoin, PrefixedDenom, VERSION}; -use crate::core::ics04_channel::channel::{Counterparty, Order}; -use crate::core::ics04_channel::context::{ChannelKeeper, ChannelReader}; -use crate::core::ics04_channel::msgs::acknowledgement::Acknowledgement as GenericAcknowledgement; -use crate::core::ics04_channel::packet::Packet; -use crate::core::ics04_channel::Version; -use crate::core::ics24_host::identifier::{ChannelId, ConnectionId, PortId}; -use crate::core::ics26_routing::context::{ModuleOutputBuilder, OnRecvPacketAck, ReaderContext}; -use crate::prelude::*; -use crate::signer::Signer; +use crate::{ + applications::transfer::{ + acknowledgement::Acknowledgement, + events::{AckEvent, AckStatusEvent, RecvEvent, TimeoutEvent}, + packet::PacketData, + relay::{ + on_ack_packet::process_ack_packet, on_recv_packet::process_recv_packet, + on_timeout_packet::process_timeout_packet, + }, + PrefixedCoin, PrefixedDenom, VERSION, + }, + core::{ + ics04_channel::{ + channel::{Counterparty, Order}, + context::{ChannelKeeper, ChannelReader}, + msgs::acknowledgement::Acknowledgement as GenericAcknowledgement, + packet::Packet, + Version, + }, + ics24_host::identifier::{ChannelId, ConnectionId, PortId}, + ics26_routing::context::{ModuleOutputBuilder, OnRecvPacketAck, ReaderContext}, + }, + prelude::*, + signer::Signer, +}; pub trait Ics20Keeper: - ChannelKeeper + BankKeeper::AccountId> + ChannelKeeper + BankKeeper::AccountId> { - type AccountId; + type AccountId; } pub trait Ics20Reader: ChannelReader where - Self: Sized, + Self: Sized, { - type AccountId: TryFrom; - - /// get_port returns the portID for the transfer module. - fn get_port(&self) -> Result; - - /// Returns the escrow account id for a port and channel combination - fn get_channel_escrow_address( - &self, - port_id: &PortId, - channel_id: ChannelId, - ) -> Result<::AccountId, Ics20Error> { - let hash = cosmos_adr028_escrow_address(self, port_id, channel_id); - - String::from_utf8(hex::encode_upper(hash)) - .expect("hex encoded bytes are not valid UTF8") - .parse::() - .map_err(Ics20Error::signer)? - .try_into() - .map_err(|_| Ics20Error::parse_account_failure()) - } - - /// Returns true iff send is enabled. - fn is_send_enabled(&self) -> bool; - - /// Returns true iff receive is enabled. - fn is_receive_enabled(&self) -> bool; - - /// Returns a hash of the prefixed denom. - /// Implement only if the host chain supports hashed denominations. - fn denom_hash_string(&self, _denom: &PrefixedDenom) -> Option { - None - } + type AccountId: TryFrom; + + /// get_port returns the portID for the transfer module. + fn get_port(&self) -> Result; + + /// Returns the escrow account id for a port and channel combination + fn get_channel_escrow_address( + &self, + port_id: &PortId, + channel_id: ChannelId, + ) -> Result<::AccountId, Ics20Error> { + let hash = cosmos_adr028_escrow_address(self, port_id, channel_id); + + String::from_utf8(hex::encode_upper(hash)) + .expect("hex encoded bytes are not valid UTF8") + .parse::() + .map_err(Ics20Error::signer)? + .try_into() + .map_err(|_| Ics20Error::parse_account_failure()) + } + + /// Returns true iff send is enabled. + fn is_send_enabled(&self) -> bool; + + /// Returns true iff receive is enabled. + fn is_receive_enabled(&self) -> bool; + + /// Returns a hash of the prefixed denom. + /// Implement only if the host chain supports hashed denominations. + fn denom_hash_string(&self, _denom: &PrefixedDenom) -> Option { + None + } } // https://github.com/cosmos/cosmos-sdk/blob/master/docs/architecture/adr-028-public-key-addresses.md fn cosmos_adr028_escrow_address( - ctx: &dyn ChannelReader, - port_id: &PortId, - channel_id: ChannelId, + ctx: &dyn ChannelReader, + port_id: &PortId, + channel_id: ChannelId, ) -> Vec { - let contents = format!("{}/{}", port_id, channel_id); - let mut data = VERSION.as_bytes().to_vec(); - data.extend_from_slice(&[0]); - data.extend_from_slice(contents.as_bytes()); - - let mut hash = ctx.hash(data); - hash.truncate(20); - hash + let contents = format!("{}/{}", port_id, channel_id); + let mut data = VERSION.as_bytes().to_vec(); + data.extend_from_slice(&[0]); + data.extend_from_slice(contents.as_bytes()); + + let mut hash = ctx.hash(data); + hash.truncate(20); + hash } pub trait BankKeeper { - type AccountId; - - /// This function should enable sending ibc fungible tokens from one account to another - fn send_coins( - &mut self, - from: &Self::AccountId, - to: &Self::AccountId, - amt: &PrefixedCoin, - ) -> Result<(), Ics20Error>; - - /// This function to enable minting ibc tokens to a user account - fn mint_coins( - &mut self, - account: &Self::AccountId, - amt: &PrefixedCoin, - ) -> Result<(), Ics20Error>; - - /// This function should enable burning of minted tokens in a user account - fn burn_coins( - &mut self, - account: &Self::AccountId, - amt: &PrefixedCoin, - ) -> Result<(), Ics20Error>; + type AccountId; + + /// This function should enable sending ibc fungible tokens from one account to another + fn send_coins( + &mut self, + from: &Self::AccountId, + to: &Self::AccountId, + amt: &PrefixedCoin, + ) -> Result<(), Ics20Error>; + + /// This function to enable minting ibc tokens to a user account + fn mint_coins( + &mut self, + account: &Self::AccountId, + amt: &PrefixedCoin, + ) -> Result<(), Ics20Error>; + + /// This function should enable burning of minted tokens in a user account + fn burn_coins( + &mut self, + account: &Self::AccountId, + amt: &PrefixedCoin, + ) -> Result<(), Ics20Error>; } /// Captures all the dependencies which the ICS20 module requires to be able to dispatch and /// process IBC messages. pub trait Ics20Context: - Ics20Keeper::AccountId> - + Ics20Reader::AccountId> - + ReaderContext + Ics20Keeper::AccountId> + + Ics20Reader::AccountId> + + ReaderContext { - type AccountId: TryFrom; + type AccountId: TryFrom; } fn validate_transfer_channel_params( - ctx: &mut impl Ics20Context, - order: Order, - port_id: &PortId, - channel_id: &ChannelId, - version: &Version, + ctx: &mut impl Ics20Context, + order: Order, + port_id: &PortId, + channel_id: &ChannelId, + version: &Version, ) -> Result<(), Ics20Error> { - if channel_id.sequence() > (u32::MAX as u64) { - return Err(Ics20Error::chan_seq_exceeds_limit(channel_id.sequence())); - } + if channel_id.sequence() > (u32::MAX as u64) { + return Err(Ics20Error::chan_seq_exceeds_limit(channel_id.sequence())) + } - if order != Order::Unordered { - return Err(Ics20Error::channel_not_unordered(order)); - } + if order != Order::Unordered { + return Err(Ics20Error::channel_not_unordered(order)) + } - let bound_port = ctx.get_port()?; - if port_id != &bound_port { - return Err(Ics20Error::invalid_port(port_id.clone(), bound_port)); - } + let bound_port = ctx.get_port()?; + if port_id != &bound_port { + return Err(Ics20Error::invalid_port(port_id.clone(), bound_port)) + } - if version != &Version::ics20() { - return Err(Ics20Error::invalid_version(version.clone())); - } + if version != &Version::ics20() { + return Err(Ics20Error::invalid_version(version.clone())) + } - Ok(()) + Ok(()) } fn validate_counterparty_version(counterparty_version: &Version) -> Result<(), Ics20Error> { - if counterparty_version == &Version::ics20() { - Ok(()) - } else { - Err(Ics20Error::invalid_counterparty_version( - counterparty_version.clone(), - )) - } + if counterparty_version == &Version::ics20() { + Ok(()) + } else { + Err(Ics20Error::invalid_counterparty_version(counterparty_version.clone())) + } } #[allow(clippy::too_many_arguments)] pub fn on_chan_open_init( - ctx: &mut impl Ics20Context, - _output: &mut ModuleOutputBuilder, - order: Order, - _connection_hops: &[ConnectionId], - port_id: &PortId, - channel_id: &ChannelId, - _counterparty: &Counterparty, - version: &Version, + ctx: &mut impl Ics20Context, + _output: &mut ModuleOutputBuilder, + order: Order, + _connection_hops: &[ConnectionId], + port_id: &PortId, + channel_id: &ChannelId, + _counterparty: &Counterparty, + version: &Version, ) -> Result<(), Ics20Error> { - validate_transfer_channel_params(ctx, order, port_id, channel_id, version) + validate_transfer_channel_params(ctx, order, port_id, channel_id, version) } #[allow(clippy::too_many_arguments)] pub fn on_chan_open_try( - ctx: &mut impl Ics20Context, - _output: &mut ModuleOutputBuilder, - order: Order, - _connection_hops: &[ConnectionId], - port_id: &PortId, - channel_id: &ChannelId, - _counterparty: &Counterparty, - version: &Version, - counterparty_version: &Version, + ctx: &mut impl Ics20Context, + _output: &mut ModuleOutputBuilder, + order: Order, + _connection_hops: &[ConnectionId], + port_id: &PortId, + channel_id: &ChannelId, + _counterparty: &Counterparty, + version: &Version, + counterparty_version: &Version, ) -> Result { - validate_transfer_channel_params(ctx, order, port_id, channel_id, version)?; - validate_counterparty_version(counterparty_version)?; - Ok(Version::ics20()) + validate_transfer_channel_params(ctx, order, port_id, channel_id, version)?; + validate_counterparty_version(counterparty_version)?; + Ok(Version::ics20()) } pub fn on_chan_open_ack( - _ctx: &mut impl Ics20Context, - _output: &mut ModuleOutputBuilder, - _port_id: &PortId, - _channel_id: &ChannelId, - counterparty_version: &Version, + _ctx: &mut impl Ics20Context, + _output: &mut ModuleOutputBuilder, + _port_id: &PortId, + _channel_id: &ChannelId, + counterparty_version: &Version, ) -> Result<(), Ics20Error> { - validate_counterparty_version(counterparty_version)?; - Ok(()) + validate_counterparty_version(counterparty_version)?; + Ok(()) } pub fn on_chan_open_confirm( - _ctx: &mut impl Ics20Context, - _output: &mut ModuleOutputBuilder, - _port_id: &PortId, - _channel_id: &ChannelId, + _ctx: &mut impl Ics20Context, + _output: &mut ModuleOutputBuilder, + _port_id: &PortId, + _channel_id: &ChannelId, ) -> Result<(), Ics20Error> { - Ok(()) + Ok(()) } pub fn on_chan_close_init( - _ctx: &mut impl Ics20Context, - _output: &mut ModuleOutputBuilder, - _port_id: &PortId, - _channel_id: &ChannelId, + _ctx: &mut impl Ics20Context, + _output: &mut ModuleOutputBuilder, + _port_id: &PortId, + _channel_id: &ChannelId, ) -> Result<(), Ics20Error> { - Err(Ics20Error::cant_close_channel()) + Err(Ics20Error::cant_close_channel()) } pub fn on_chan_close_confirm( - _ctx: &mut impl Ics20Context, - _output: &mut ModuleOutputBuilder, - _port_id: &PortId, - _channel_id: &ChannelId, + _ctx: &mut impl Ics20Context, + _output: &mut ModuleOutputBuilder, + _port_id: &PortId, + _channel_id: &ChannelId, ) -> Result<(), Ics20Error> { - Ok(()) + Ok(()) } pub fn on_recv_packet( - ctx: &Ctx, - output: &mut ModuleOutputBuilder, - packet: &Packet, - _relayer: &Signer, + ctx: &Ctx, + output: &mut ModuleOutputBuilder, + packet: &Packet, + _relayer: &Signer, ) -> OnRecvPacketAck { - let data = match serde_json::from_slice::(&packet.data) { - Ok(data) => data, - Err(_) => { - return OnRecvPacketAck::Failed(Box::new(Acknowledgement::Error( - Ics20Error::packet_data_deserialization().to_string(), - ))) - } - }; - - let ack = match process_recv_packet(ctx, output, packet, data.clone()) { - Ok(write_fn) => OnRecvPacketAck::Successful(Box::new(Acknowledgement::success()), write_fn), - Err(e) => OnRecvPacketAck::Failed(Box::new(Acknowledgement::from_error(e))), - }; - - let recv_event = RecvEvent { - receiver: data.receiver, - denom: data.token.denom, - amount: data.token.amount, - success: ack.is_successful(), - }; - output.emit(recv_event.into()); - - ack + let data = match serde_json::from_slice::(&packet.data) { + Ok(data) => data, + Err(_) => + return OnRecvPacketAck::Failed(Box::new(Acknowledgement::Error( + Ics20Error::packet_data_deserialization().to_string(), + ))), + }; + + let ack = match process_recv_packet(ctx, output, packet, data.clone()) { + Ok(write_fn) => OnRecvPacketAck::Successful(Box::new(Acknowledgement::success()), write_fn), + Err(e) => OnRecvPacketAck::Failed(Box::new(Acknowledgement::from_error(e))), + }; + + let recv_event = RecvEvent { + receiver: data.receiver, + denom: data.token.denom, + amount: data.token.amount, + success: ack.is_successful(), + }; + output.emit(recv_event.into()); + + ack } pub fn on_acknowledgement_packet( - ctx: &mut impl Ics20Context, - output: &mut ModuleOutputBuilder, - packet: &Packet, - acknowledgement: &GenericAcknowledgement, - _relayer: &Signer, + ctx: &mut impl Ics20Context, + output: &mut ModuleOutputBuilder, + packet: &Packet, + acknowledgement: &GenericAcknowledgement, + _relayer: &Signer, ) -> Result<(), Ics20Error> { - let data = serde_json::from_slice::(&packet.data) - .map_err(|_| Ics20Error::packet_data_deserialization())?; + let data = serde_json::from_slice::(&packet.data) + .map_err(|_| Ics20Error::packet_data_deserialization())?; - let acknowledgement = serde_json::from_slice::(acknowledgement.as_ref()) - .map_err(|_| Ics20Error::ack_deserialization())?; + let acknowledgement = serde_json::from_slice::(acknowledgement.as_ref()) + .map_err(|_| Ics20Error::ack_deserialization())?; - process_ack_packet(ctx, packet, &data, &acknowledgement)?; + process_ack_packet(ctx, packet, &data, &acknowledgement)?; - let ack_event = AckEvent { - receiver: data.receiver, - denom: data.token.denom, - amount: data.token.amount, - acknowledgement: acknowledgement.clone(), - }; - output.emit(ack_event.into()); - output.emit(AckStatusEvent { acknowledgement }.into()); + let ack_event = AckEvent { + receiver: data.receiver, + denom: data.token.denom, + amount: data.token.amount, + acknowledgement: acknowledgement.clone(), + }; + output.emit(ack_event.into()); + output.emit(AckStatusEvent { acknowledgement }.into()); - Ok(()) + Ok(()) } pub fn on_timeout_packet( - ctx: &mut impl Ics20Context, - output: &mut ModuleOutputBuilder, - packet: &Packet, - _relayer: &Signer, + ctx: &mut impl Ics20Context, + output: &mut ModuleOutputBuilder, + packet: &Packet, + _relayer: &Signer, ) -> Result<(), Ics20Error> { - let data = serde_json::from_slice::(&packet.data) - .map_err(|_| Ics20Error::packet_data_deserialization())?; + let data = serde_json::from_slice::(&packet.data) + .map_err(|_| Ics20Error::packet_data_deserialization())?; - process_timeout_packet(ctx, packet, &data)?; + process_timeout_packet(ctx, packet, &data)?; - let timeout_event = TimeoutEvent { - refund_receiver: data.sender, - refund_denom: data.token.denom, - refund_amount: data.token.amount, - }; - output.emit(timeout_event.into()); + let timeout_event = TimeoutEvent { + refund_receiver: data.sender, + refund_denom: data.token.denom, + refund_amount: data.token.amount, + }; + output.emit(timeout_event.into()); - Ok(()) + Ok(()) } #[cfg(test)] pub(crate) mod test { - use std::sync::Mutex; - - use std::sync::Arc; - use subtle_encoding::bech32; - - use crate::applications::transfer::context::cosmos_adr028_escrow_address; - use crate::applications::transfer::error::Error as Ics20Error; - use crate::applications::transfer::msgs::transfer::MsgTransfer; - use crate::applications::transfer::relay::send_transfer::send_transfer; - use crate::applications::transfer::PrefixedCoin; - use crate::core::ics04_channel::error::Error; - use crate::handler::HandlerOutputBuilder; - use crate::mock::context::MockIbcStore; - use crate::prelude::*; - use crate::test_utils::DummyTransferModule; - - pub(crate) fn deliver( - ctx: &mut DummyTransferModule, - output: &mut HandlerOutputBuilder<()>, - msg: MsgTransfer, - ) -> Result<(), Error> { - send_transfer(ctx, output, msg).map_err(|e: Ics20Error| Error::app_module(e.to_string())) - } - - #[test] - fn test_cosmos_escrow_address() { - fn assert_eq_escrow_address(port_id: &str, channel_id: &str, address: &str) { - let port_id = port_id.parse().unwrap(); - let channel_id = channel_id.parse().unwrap(); - let gen_address = { - let ibc_store = MockIbcStore::default(); - let ctx = DummyTransferModule::new(Arc::new(Mutex::new(ibc_store))); - let addr = cosmos_adr028_escrow_address(&ctx, &port_id, channel_id); - bech32::encode("cosmos", addr) - }; - assert_eq!(gen_address, address.to_owned()) - } - - // addresses obtained using `gaiad query ibc-transfer escrow-address [port-id] [channel-id]` - assert_eq_escrow_address( - "transfer", - "channel-141", - "cosmos1x54ltnyg88k0ejmk8ytwrhd3ltm84xehrnlslf", - ); - assert_eq_escrow_address( - "transfer", - "channel-207", - "cosmos1ju6tlfclulxumtt2kglvnxduj5d93a64r5czge", - ); - assert_eq_escrow_address( - "transfer", - "channel-187", - "cosmos177x69sver58mcfs74x6dg0tv6ls4s3xmmcaw53", - ); - } + use std::sync::Mutex; + + use std::sync::Arc; + use subtle_encoding::bech32; + + use crate::{ + applications::transfer::{ + context::cosmos_adr028_escrow_address, error::Error as Ics20Error, + msgs::transfer::MsgTransfer, relay::send_transfer::send_transfer, PrefixedCoin, + }, + core::ics04_channel::error::Error, + handler::HandlerOutputBuilder, + mock::context::{ClientTypes, MockClientTypes, MockIbcStore}, + prelude::*, + test_utils::DummyTransferModule, + }; + + pub(crate) fn deliver( + ctx: &mut DummyTransferModule, + output: &mut HandlerOutputBuilder<()>, + msg: MsgTransfer, + ) -> Result<(), Error> { + send_transfer(ctx, output, msg).map_err(|e: Ics20Error| Error::app_module(e.to_string())) + } + + #[test] + fn test_cosmos_escrow_address() { + fn assert_eq_escrow_address(port_id: &str, channel_id: &str, address: &str) { + let port_id = port_id.parse().unwrap(); + let channel_id = channel_id.parse().unwrap(); + let gen_address = { + let ibc_store = MockIbcStore::::default(); + let ctx = DummyTransferModule::new(Arc::new(Mutex::new(ibc_store))); + let addr = cosmos_adr028_escrow_address(&ctx, &port_id, channel_id); + bech32::encode("cosmos", addr) + }; + assert_eq!(gen_address, address.to_owned()) + } + + // addresses obtained using `gaiad query ibc-transfer escrow-address [port-id] [channel-id]` + assert_eq_escrow_address( + "transfer", + "channel-141", + "cosmos1x54ltnyg88k0ejmk8ytwrhd3ltm84xehrnlslf", + ); + assert_eq_escrow_address( + "transfer", + "channel-207", + "cosmos1ju6tlfclulxumtt2kglvnxduj5d93a64r5czge", + ); + assert_eq_escrow_address( + "transfer", + "channel-187", + "cosmos177x69sver58mcfs74x6dg0tv6ls4s3xmmcaw53", + ); + } } diff --git a/modules/src/applications/transfer/denom.rs b/modules/src/applications/transfer/denom.rs index c55b1fb506..ba9d35c7f0 100644 --- a/modules/src/applications/transfer/denom.rs +++ b/modules/src/applications/transfer/denom.rs @@ -1,16 +1,19 @@ -use core::fmt; -use core::str::FromStr; +use core::{fmt, str::FromStr}; use derive_more::{Display, From, Into}; -use ibc_proto::cosmos::base::v1beta1::Coin as RawCoin; -use ibc_proto::ibc::applications::transfer::v1::DenomTrace as RawDenomTrace; +use ibc_proto::{ + cosmos::base::v1beta1::Coin as RawCoin, + ibc::applications::transfer::v1::DenomTrace as RawDenomTrace, +}; use serde::{Deserialize, Serialize}; use super::error::Error; -use crate::bigint::U256; -use crate::core::ics24_host::identifier::{ChannelId, PortId}; -use crate::prelude::*; -use crate::serializers::serde_string; +use crate::{ + bigint::U256, + core::ics24_host::identifier::{ChannelId, PortId}, + prelude::*, + serializers::serde_string, +}; /// A `Coin` type with fully qualified `PrefixedDenom`. pub type PrefixedCoin = Coin; @@ -24,42 +27,39 @@ pub type BaseCoin = Coin; pub struct BaseDenom(String); impl BaseDenom { - pub fn as_str(&self) -> &str { - &self.0 - } + pub fn as_str(&self) -> &str { + &self.0 + } } impl FromStr for BaseDenom { - type Err = Error; - - fn from_str(s: &str) -> Result { - if s.trim().is_empty() { - Err(Error::empty_base_denom()) - } else { - Ok(BaseDenom(s.to_owned())) - } - } + type Err = Error; + + fn from_str(s: &str) -> Result { + if s.trim().is_empty() { + Err(Error::empty_base_denom()) + } else { + Ok(BaseDenom(s.to_owned())) + } + } } #[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)] pub struct TracePrefix { - port_id: PortId, - channel_id: ChannelId, + port_id: PortId, + channel_id: ChannelId, } impl TracePrefix { - pub fn new(port_id: PortId, channel_id: ChannelId) -> Self { - Self { - port_id, - channel_id, - } - } + pub fn new(port_id: PortId, channel_id: ChannelId) -> Self { + Self { port_id, channel_id } + } } impl fmt::Display for TracePrefix { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}/{}", self.port_id, self.channel_id) - } + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}/{}", self.port_id, self.channel_id) + } } /// A full trace path modelled as a collection of `TracePrefix`s. @@ -70,111 +70,109 @@ impl fmt::Display for TracePrefix { pub struct TracePath(Vec); impl TracePath { - /// Returns true iff this path starts with the specified prefix - pub fn starts_with(&self, prefix: &TracePrefix) -> bool { - self.0.last().map(|p| p == prefix).unwrap_or(false) - } - - /// Removes the specified prefix from the path if there is a match, otherwise does nothing. - pub fn remove_prefix(&mut self, prefix: &TracePrefix) { - if self.starts_with(prefix) { - self.0.pop(); - } - } - - /// Adds the specified prefix to the path. - pub fn add_prefix(&mut self, prefix: TracePrefix) { - self.0.push(prefix) - } - - /// Returns true if the path is empty and false otherwise. - pub fn is_empty(&self) -> bool { - self.0.is_empty() - } + /// Returns true iff this path starts with the specified prefix + pub fn starts_with(&self, prefix: &TracePrefix) -> bool { + self.0.last().map(|p| p == prefix).unwrap_or(false) + } + + /// Removes the specified prefix from the path if there is a match, otherwise does nothing. + pub fn remove_prefix(&mut self, prefix: &TracePrefix) { + if self.starts_with(prefix) { + self.0.pop(); + } + } + + /// Adds the specified prefix to the path. + pub fn add_prefix(&mut self, prefix: TracePrefix) { + self.0.push(prefix) + } + + /// Returns true if the path is empty and false otherwise. + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } } impl<'a> TryFrom> for TracePath { - type Error = Error; - - fn try_from(v: Vec<&'a str>) -> Result { - if v.len() % 2 != 0 { - return Err(Error::invalid_trace_length(v.len())); - } - - let mut trace = vec![]; - let id_pairs = v.chunks_exact(2).map(|paths| (paths[0], paths[1])); - for (pos, (port_id, channel_id)) in id_pairs.rev().enumerate() { - let port_id = - PortId::from_str(port_id).map_err(|e| Error::invalid_trace_port_id(pos, e))?; - let channel_id = ChannelId::from_str(channel_id) - .map_err(|e| Error::invalid_trace_channel_id(pos, e))?; - trace.push(TracePrefix { - port_id, - channel_id, - }); - } - - Ok(trace.into()) - } + type Error = Error; + + fn try_from(v: Vec<&'a str>) -> Result { + if v.len() % 2 != 0 { + return Err(Error::invalid_trace_length(v.len())) + } + + let mut trace = vec![]; + let id_pairs = v.chunks_exact(2).map(|paths| (paths[0], paths[1])); + for (pos, (port_id, channel_id)) in id_pairs.rev().enumerate() { + let port_id = + PortId::from_str(port_id).map_err(|e| Error::invalid_trace_port_id(pos, e))?; + let channel_id = ChannelId::from_str(channel_id) + .map_err(|e| Error::invalid_trace_channel_id(pos, e))?; + trace.push(TracePrefix { port_id, channel_id }); + } + + Ok(trace.into()) + } } impl FromStr for TracePath { - type Err = Error; - - fn from_str(s: &str) -> Result { - let parts = { - let parts: Vec<&str> = s.split('/').collect(); - if parts.len() == 1 && parts[0].trim().is_empty() { - vec![] - } else { - parts - } - }; - parts.try_into() - } + type Err = Error; + + fn from_str(s: &str) -> Result { + let parts = { + let parts: Vec<&str> = s.split('/').collect(); + if parts.len() == 1 && parts[0].trim().is_empty() { + vec![] + } else { + parts + } + }; + parts.try_into() + } } impl fmt::Display for TracePath { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let path = self - .0 - .iter() - .rev() - .map(|prefix| prefix.to_string()) - .collect::>() - .join("/"); - write!(f, "{}", path) - } + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let path = self + .0 + .iter() + .rev() + .map(|prefix| prefix.to_string()) + .collect::>() + .join("/"); + write!(f, "{}", path) + } } /// A type that contains the base denomination for ICS20 and the source tracing information path. #[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Serialize, Deserialize)] pub struct PrefixedDenom { - /// A series of `{port-id}/{channel-id}`s for tracing the source of the token. - #[serde(with = "serde_string")] - trace_path: TracePath, - /// Base denomination of the relayed fungible token. - base_denom: BaseDenom, + /// A series of `{port-id}/{channel-id}`s for tracing the source of the token. + #[serde(with = "serde_string")] + trace_path: TracePath, + /// Base denomination of the relayed fungible token. + base_denom: BaseDenom, } impl PrefixedDenom { - /// Removes the specified prefix from the trace path if there is a match, otherwise does nothing. - pub fn remove_trace_prefix(&mut self, prefix: &TracePrefix) { - self.trace_path.remove_prefix(prefix) - } - - /// Adds the specified prefix to the trace path. - pub fn add_trace_prefix(&mut self, prefix: TracePrefix) { - self.trace_path.add_prefix(prefix) - } - - pub fn trace_path(&self) -> &TracePath { - &self.trace_path - } - - pub fn base_denom(&self) -> &BaseDenom { - &self.base_denom - } + /// Removes the specified prefix from the trace path if there is a match, otherwise does + /// nothing. + pub fn remove_trace_prefix(&mut self, prefix: &TracePrefix) { + self.trace_path.remove_prefix(prefix) + } + + /// Adds the specified prefix to the trace path. + pub fn add_trace_prefix(&mut self, prefix: TracePrefix) { + self.trace_path.add_prefix(prefix) + } + + pub fn trace_path(&self) -> &TracePath { + &self.trace_path + } + + pub fn base_denom(&self) -> &BaseDenom { + &self.base_denom + } } /// Returns true if the denomination originally came from the sender chain and @@ -202,292 +200,256 @@ impl PrefixedDenom { /// created by the chain at the other end of A's port "transfer" and channel /// "someOtherChannel". pub fn is_sender_chain_source( - source_port: PortId, - source_channel: ChannelId, - denom: &PrefixedDenom, + source_port: PortId, + source_channel: ChannelId, + denom: &PrefixedDenom, ) -> bool { - !is_receiver_chain_source(source_port, source_channel, denom) + !is_receiver_chain_source(source_port, source_channel, denom) } /// Returns true if the denomination originally came from the receiving chain and false otherwise. pub fn is_receiver_chain_source( - source_port: PortId, - source_channel: ChannelId, - denom: &PrefixedDenom, + source_port: PortId, + source_channel: ChannelId, + denom: &PrefixedDenom, ) -> bool { - // For example, let - // A: sender chain in this transfer, port "transfer" and channel "c2b" (to B) - // B: receiver chain in this transfer, port "transfer" and channel "c2a" (to A) - // - // If B had originally sent the token in a previous tranfer, then A would have stored the token as - // "transfer/c2b/{token_denom}". Now, A is sending to B, so to check if B is the source of the token, - // we need to check if the token starts with "transfer/c2b". - let prefix = TracePrefix::new(source_port, source_channel); - denom.trace_path.starts_with(&prefix) + // For example, let + // A: sender chain in this transfer, port "transfer" and channel "c2b" (to B) + // B: receiver chain in this transfer, port "transfer" and channel "c2a" (to A) + // + // If B had originally sent the token in a previous tranfer, then A would have stored the token + // as "transfer/c2b/{token_denom}". Now, A is sending to B, so to check if B is the source of + // the token, we need to check if the token starts with "transfer/c2b". + let prefix = TracePrefix::new(source_port, source_channel); + denom.trace_path.starts_with(&prefix) } impl FromStr for PrefixedDenom { - type Err = Error; - - fn from_str(s: &str) -> Result { - let mut parts: Vec<&str> = s.split('/').collect(); - let last_part = parts.pop().expect("split() returned an empty iterator"); - - let (base_denom, trace_path) = { - if last_part == s { - (BaseDenom::from_str(s)?, TracePath::default()) - } else { - let base_denom = BaseDenom::from_str(last_part)?; - let trace_path = TracePath::try_from(parts)?; - (base_denom, trace_path) - } - }; - - Ok(Self { - trace_path, - base_denom, - }) - } + type Err = Error; + + fn from_str(s: &str) -> Result { + let mut parts: Vec<&str> = s.split('/').collect(); + let last_part = parts.pop().expect("split() returned an empty iterator"); + + let (base_denom, trace_path) = { + if last_part == s { + (BaseDenom::from_str(s)?, TracePath::default()) + } else { + let base_denom = BaseDenom::from_str(last_part)?; + let trace_path = TracePath::try_from(parts)?; + (base_denom, trace_path) + } + }; + + Ok(Self { trace_path, base_denom }) + } } impl TryFrom for PrefixedDenom { - type Error = Error; - - fn try_from(value: RawDenomTrace) -> Result { - let base_denom = BaseDenom::from_str(&value.base_denom)?; - let trace_path = TracePath::from_str(&value.path)?; - Ok(Self { - trace_path, - base_denom, - }) - } + type Error = Error; + + fn try_from(value: RawDenomTrace) -> Result { + let base_denom = BaseDenom::from_str(&value.base_denom)?; + let trace_path = TracePath::from_str(&value.path)?; + Ok(Self { trace_path, base_denom }) + } } impl From for RawDenomTrace { - fn from(value: PrefixedDenom) -> Self { - Self { - path: value.trace_path.to_string(), - base_denom: value.base_denom.to_string(), - } - } + fn from(value: PrefixedDenom) -> Self { + Self { path: value.trace_path.to_string(), base_denom: value.base_denom.to_string() } + } } impl From for PrefixedDenom { - fn from(denom: BaseDenom) -> Self { - Self { - trace_path: Default::default(), - base_denom: denom, - } - } + fn from(denom: BaseDenom) -> Self { + Self { trace_path: Default::default(), base_denom: denom } + } } impl fmt::Display for PrefixedDenom { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - if self.trace_path.0.is_empty() { - write!(f, "{}", self.base_denom) - } else { - write!(f, "{}/{}", self.trace_path, self.base_denom) - } - } + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if self.trace_path.0.is_empty() { + write!(f, "{}", self.base_denom) + } else { + write!(f, "{}/{}", self.trace_path, self.base_denom) + } + } } /// A type for representing token transfer amounts. #[derive( - Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Serialize, Deserialize, Display, From, Into, + Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Serialize, Deserialize, Display, From, Into, )] pub struct Amount(U256); impl Amount { - pub fn checked_add(self, rhs: Self) -> Option { - self.0.checked_add(rhs.0).map(Self) - } + pub fn checked_add(self, rhs: Self) -> Option { + self.0.checked_add(rhs.0).map(Self) + } - pub fn checked_sub(self, rhs: Self) -> Option { - self.0.checked_sub(rhs.0).map(Self) - } + pub fn checked_sub(self, rhs: Self) -> Option { + self.0.checked_sub(rhs.0).map(Self) + } - pub fn as_u256(&self) -> U256 { - self.0 - } + pub fn as_u256(&self) -> U256 { + self.0 + } } impl FromStr for Amount { - type Err = Error; + type Err = Error; - fn from_str(s: &str) -> Result { - let amount = U256::from_str_radix(s, 10).map_err(Error::invalid_amount)?; - Ok(Self(amount)) - } + fn from_str(s: &str) -> Result { + let amount = U256::from_str_radix(s, 10).map_err(Error::invalid_amount)?; + Ok(Self(amount)) + } } impl From for Amount { - fn from(v: u64) -> Self { - Self(v.into()) - } + fn from(v: u64) -> Self { + Self(v.into()) + } } /// Coin defines a token with a denomination and an amount. #[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Serialize, Deserialize)] pub struct Coin { - /// Denomination - pub denom: D, - /// Amount - #[serde(with = "serde_string")] - pub amount: Amount, + /// Denomination + pub denom: D, + /// Amount + #[serde(with = "serde_string")] + pub amount: Amount, } impl TryFrom for Coin where - Error: From<::Err>, + Error: From<::Err>, { - type Error = Error; + type Error = Error; - fn try_from(proto: RawCoin) -> Result, Self::Error> { - let denom = D::from_str(&proto.denom)?; - let amount = Amount::from_str(&proto.amount)?; - Ok(Self { denom, amount }) - } + fn try_from(proto: RawCoin) -> Result, Self::Error> { + let denom = D::from_str(&proto.denom)?; + let amount = Amount::from_str(&proto.amount)?; + Ok(Self { denom, amount }) + } } impl From> for RawCoin { - fn from(coin: Coin) -> RawCoin { - RawCoin { - denom: coin.denom.to_string(), - amount: coin.amount.to_string(), - } - } + fn from(coin: Coin) -> RawCoin { + RawCoin { denom: coin.denom.to_string(), amount: coin.amount.to_string() } + } } impl From for PrefixedCoin { - fn from(coin: BaseCoin) -> PrefixedCoin { - PrefixedCoin { - denom: coin.denom.into(), - amount: coin.amount, - } - } + fn from(coin: BaseCoin) -> PrefixedCoin { + PrefixedCoin { denom: coin.denom.into(), amount: coin.amount } + } } impl fmt::Display for PrefixedCoin { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}-{}", self.amount, self.denom) - } + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}-{}", self.amount, self.denom) + } } #[cfg(test)] mod tests { - use super::*; - - #[test] - fn test_denom_validation() -> Result<(), Error> { - assert!(BaseDenom::from_str("").is_err(), "empty base denom"); - assert!(BaseDenom::from_str("uatom").is_ok(), "valid base denom"); - assert!(PrefixedDenom::from_str("").is_err(), "empty denom trace"); - assert!( - PrefixedDenom::from_str("transfer/channel-0/").is_err(), - "empty base denom with trace" - ); - assert!(PrefixedDenom::from_str("/uatom").is_err(), "empty prefix"); - assert!(PrefixedDenom::from_str("//uatom").is_err(), "empty ids"); - assert!( - PrefixedDenom::from_str("transfer/").is_err(), - "single trace" - ); - assert!( - PrefixedDenom::from_str("transfer/atom").is_err(), - "single trace with base denom" - ); - assert!( - PrefixedDenom::from_str("transfer/channel-0/uatom").is_ok(), - "valid single trace info" - ); - assert!( - PrefixedDenom::from_str("transfer/channel-0/transfer/channel-1/uatom").is_ok(), - "valid multiple trace info" - ); - assert!( - PrefixedDenom::from_str("(transfer)/channel-0/uatom").is_err(), - "invalid port" - ); - assert!( - PrefixedDenom::from_str("transfer/(channel-0)/uatom").is_err(), - "invalid channel" - ); - - Ok(()) - } - - #[test] - fn test_denom_trace() -> Result<(), Error> { - assert_eq!( - PrefixedDenom::from_str("transfer/channel-0/uatom")?, - PrefixedDenom { - trace_path: "transfer/channel-0".parse()?, - base_denom: "uatom".parse()? - }, - "valid single trace info" - ); - assert_eq!( - PrefixedDenom::from_str("transfer/channel-0/transfer/channel-1/uatom")?, - PrefixedDenom { - trace_path: "transfer/channel-0/transfer/channel-1".parse()?, - base_denom: "uatom".parse()? - }, - "valid multiple trace info" - ); - - Ok(()) - } - - #[test] - fn test_denom_serde() -> Result<(), Error> { - let dt_str = "transfer/channel-0/uatom"; - let dt = PrefixedDenom::from_str(dt_str)?; - assert_eq!(dt.to_string(), dt_str, "valid single trace info"); - - let dt_str = "transfer/channel-0/transfer/channel-1/uatom"; - let dt = PrefixedDenom::from_str(dt_str)?; - assert_eq!(dt.to_string(), dt_str, "valid multiple trace info"); - - Ok(()) - } - - #[test] - fn test_trace_path() -> Result<(), Error> { - assert!(TracePath::from_str("").is_ok(), "empty trace path"); - assert!( - TracePath::from_str("transfer/uatom").is_err(), - "invalid trace path: bad ChannelId" - ); - assert!( - TracePath::from_str("transfer//uatom").is_err(), - "malformed trace path: missing ChannelId" - ); - assert!( - TracePath::from_str("transfer/channel-0/").is_err(), - "malformed trace path: trailing delimiter" - ); - - let prefix_1 = TracePrefix::new("transfer".parse().unwrap(), "channel-1".parse().unwrap()); - let prefix_2 = TracePrefix::new("transfer".parse().unwrap(), "channel-0".parse().unwrap()); - let mut trace_path = TracePath(vec![prefix_1.clone()]); - - trace_path.add_prefix(prefix_2.clone()); - assert_eq!( - TracePath::from_str("transfer/channel-0/transfer/channel-1")?, - trace_path - ); - assert_eq!( - TracePath(vec![prefix_1.clone(), prefix_2.clone()]), - trace_path - ); - - trace_path.remove_prefix(&prefix_2); - assert_eq!(TracePath::from_str("transfer/channel-1")?, trace_path); - assert_eq!(TracePath(vec![prefix_1.clone()]), trace_path); - - trace_path.remove_prefix(&prefix_1); - assert!(trace_path.is_empty()); - - Ok(()) - } + use super::*; + + #[test] + fn test_denom_validation() -> Result<(), Error> { + assert!(BaseDenom::from_str("").is_err(), "empty base denom"); + assert!(BaseDenom::from_str("uatom").is_ok(), "valid base denom"); + assert!(PrefixedDenom::from_str("").is_err(), "empty denom trace"); + assert!( + PrefixedDenom::from_str("transfer/channel-0/").is_err(), + "empty base denom with trace" + ); + assert!(PrefixedDenom::from_str("/uatom").is_err(), "empty prefix"); + assert!(PrefixedDenom::from_str("//uatom").is_err(), "empty ids"); + assert!(PrefixedDenom::from_str("transfer/").is_err(), "single trace"); + assert!(PrefixedDenom::from_str("transfer/atom").is_err(), "single trace with base denom"); + assert!( + PrefixedDenom::from_str("transfer/channel-0/uatom").is_ok(), + "valid single trace info" + ); + assert!( + PrefixedDenom::from_str("transfer/channel-0/transfer/channel-1/uatom").is_ok(), + "valid multiple trace info" + ); + assert!(PrefixedDenom::from_str("(transfer)/channel-0/uatom").is_err(), "invalid port"); + assert!(PrefixedDenom::from_str("transfer/(channel-0)/uatom").is_err(), "invalid channel"); + + Ok(()) + } + + #[test] + fn test_denom_trace() -> Result<(), Error> { + assert_eq!( + PrefixedDenom::from_str("transfer/channel-0/uatom")?, + PrefixedDenom { + trace_path: "transfer/channel-0".parse()?, + base_denom: "uatom".parse()? + }, + "valid single trace info" + ); + assert_eq!( + PrefixedDenom::from_str("transfer/channel-0/transfer/channel-1/uatom")?, + PrefixedDenom { + trace_path: "transfer/channel-0/transfer/channel-1".parse()?, + base_denom: "uatom".parse()? + }, + "valid multiple trace info" + ); + + Ok(()) + } + + #[test] + fn test_denom_serde() -> Result<(), Error> { + let dt_str = "transfer/channel-0/uatom"; + let dt = PrefixedDenom::from_str(dt_str)?; + assert_eq!(dt.to_string(), dt_str, "valid single trace info"); + + let dt_str = "transfer/channel-0/transfer/channel-1/uatom"; + let dt = PrefixedDenom::from_str(dt_str)?; + assert_eq!(dt.to_string(), dt_str, "valid multiple trace info"); + + Ok(()) + } + + #[test] + fn test_trace_path() -> Result<(), Error> { + assert!(TracePath::from_str("").is_ok(), "empty trace path"); + assert!( + TracePath::from_str("transfer/uatom").is_err(), + "invalid trace path: bad ChannelId" + ); + assert!( + TracePath::from_str("transfer//uatom").is_err(), + "malformed trace path: missing ChannelId" + ); + assert!( + TracePath::from_str("transfer/channel-0/").is_err(), + "malformed trace path: trailing delimiter" + ); + + let prefix_1 = TracePrefix::new("transfer".parse().unwrap(), "channel-1".parse().unwrap()); + let prefix_2 = TracePrefix::new("transfer".parse().unwrap(), "channel-0".parse().unwrap()); + let mut trace_path = TracePath(vec![prefix_1.clone()]); + + trace_path.add_prefix(prefix_2.clone()); + assert_eq!(TracePath::from_str("transfer/channel-0/transfer/channel-1")?, trace_path); + assert_eq!(TracePath(vec![prefix_1.clone(), prefix_2.clone()]), trace_path); + + trace_path.remove_prefix(&prefix_2); + assert_eq!(TracePath::from_str("transfer/channel-1")?, trace_path); + assert_eq!(TracePath(vec![prefix_1.clone()]), trace_path); + + trace_path.remove_prefix(&prefix_1); + assert!(trace_path.is_empty()); + + Ok(()) + } } diff --git a/modules/src/applications/transfer/error.rs b/modules/src/applications/transfer/error.rs index c9f71fdc33..edea8e86fd 100644 --- a/modules/src/applications/transfer/error.rs +++ b/modules/src/applications/transfer/error.rs @@ -5,136 +5,140 @@ use subtle_encoding::Error as EncodingError; use tendermint_proto::Error as TendermintProtoError; use uint::FromStrRadixErr; -use crate::core::ics04_channel::channel::Order; -use crate::core::ics04_channel::error as channel_error; -use crate::core::ics04_channel::Version; -use crate::core::ics24_host::error::ValidationError; -use crate::core::ics24_host::identifier::{ChannelId, PortId}; -use crate::prelude::*; -use crate::signer::SignerError; +use crate::{ + core::{ + ics04_channel::{channel::Order, error as channel_error, Version}, + ics24_host::{ + error::ValidationError, + identifier::{ChannelId, PortId}, + }, + }, + prelude::*, + signer::SignerError, +}; define_error! { - #[derive(Debug, PartialEq, Eq)] - Error { - UnknowMessageTypeUrl - { url: String } - | e | { format_args!("unrecognized ICS-20 transfer message type URL {0}", e.url) }, + #[derive(Debug, PartialEq, Eq)] + Error { + UnknowMessageTypeUrl + { url: String } + | e | { format_args!("unrecognized ICS-20 transfer message type URL {0}", e.url) }, - Ics04Channel - [ channel_error::Error ] - |_ | { "Ics04 channel error" }, + Ics04Channel + [ channel_error::Error ] + |_ | { "Ics04 channel error" }, - DestinationChannelNotFound - { port_id: PortId, channel_id: ChannelId } - | e | { format_args!("destination channel not found in the counterparty of port_id {0} and channel_id {1} ", e.port_id, e.channel_id) }, + DestinationChannelNotFound + { port_id: PortId, channel_id: ChannelId } + | e | { format_args!("destination channel not found in the counterparty of port_id {0} and channel_id {1} ", e.port_id, e.channel_id) }, - InvalidPortId - { context: String } - [ ValidationError ] - | _ | { "invalid port identifier" }, + InvalidPortId + { context: String } + [ ValidationError ] + | _ | { "invalid port identifier" }, - InvalidChannelId - { context: String } - [ ValidationError ] - | _ | { "invalid channel identifier" }, + InvalidChannelId + { context: String } + [ ValidationError ] + | _ | { "invalid channel identifier" }, - InvalidPacketTimeoutHeight - { context: String } - | _ | { "invalid packet timeout height value" }, + InvalidPacketTimeoutHeight + { context: String } + | _ | { "invalid packet timeout height value" }, - InvalidPacketTimeoutTimestamp - { timestamp: u64 } - | _ | { "invalid packet timeout timestamp value" }, + InvalidPacketTimeoutTimestamp + { timestamp: u64 } + | _ | { "invalid packet timeout timestamp value" }, - Utf8 - [ DisplayOnly ] - | _ | { "utf8 decoding error" }, + Utf8 + [ DisplayOnly ] + | _ | { "utf8 decoding error" }, - EmptyBaseDenom - |_| { "base denomination is empty" }, + EmptyBaseDenom + |_| { "base denomination is empty" }, - InvalidTracePortId - { pos: usize } - [ ValidationError ] - | e | { format_args!("invalid port id in trace at position: {0}", e.pos) }, + InvalidTracePortId + { pos: usize } + [ ValidationError ] + | e | { format_args!("invalid port id in trace at position: {0}", e.pos) }, - InvalidTraceChannelId - { pos: usize } - [ ValidationError ] - | e | { format_args!("invalid channel id in trace at position: {0}", e.pos) }, + InvalidTraceChannelId + { pos: usize } + [ ValidationError ] + | e | { format_args!("invalid channel id in trace at position: {0}", e.pos) }, - InvalidTraceLength - { len: usize } - | e | { format_args!("trace length must be even but got: {0}", e.len) }, + InvalidTraceLength + { len: usize } + | e | { format_args!("trace length must be even but got: {0}", e.len) }, - InvalidAmount - [ TraceError ] - | _ | { "invalid amount" }, + InvalidAmount + [ TraceError ] + | _ | { "invalid amount" }, - InvalidToken - | _ | { "invalid token" }, + InvalidToken + | _ | { "invalid token" }, - Signer - [ SignerError ] - | _ | { "failed to parse signer" }, + Signer + [ SignerError ] + | _ | { "failed to parse signer" }, - MissingDenomIbcPrefix - | _ | { "missing 'ibc/' prefix in denomination" }, + MissingDenomIbcPrefix + | _ | { "missing 'ibc/' prefix in denomination" }, - MalformedHashDenom - | _ | { "hashed denom must be of the form 'ibc/{Hash}'" }, + MalformedHashDenom + | _ | { "hashed denom must be of the form 'ibc/{Hash}'" }, - ParseHex - [ TraceError ] - | _ | { "invalid hex string" }, + ParseHex + [ TraceError ] + | _ | { "invalid hex string" }, - ChanSeqExceedsLimit - { sequence: u64 } - | e | { format_args!("channel sequence ({0}) exceeds limit of {1}", e.sequence, u32::MAX) }, + ChanSeqExceedsLimit + { sequence: u64 } + | e | { format_args!("channel sequence ({0}) exceeds limit of {1}", e.sequence, u32::MAX) }, - ChannelNotUnordered - { order: Order } - | e | { format_args!("expected '{0}' channel, got '{1}'", Order::Unordered, e.order) }, + ChannelNotUnordered + { order: Order } + | e | { format_args!("expected '{0}' channel, got '{1}'", Order::Unordered, e.order) }, - InvalidVersion - { version: Version } - | e | { format_args!("expected version '{0}', got '{1}'", Version::ics20(), e.version) }, + InvalidVersion + { version: Version } + | e | { format_args!("expected version '{0}', got '{1}'", Version::ics20(), e.version) }, - InvalidCounterpartyVersion - { version: Version } - | e | { format_args!("expected counterparty version '{0}', got '{1}'", Version::ics20(), e.version) }, + InvalidCounterpartyVersion + { version: Version } + | e | { format_args!("expected counterparty version '{0}', got '{1}'", Version::ics20(), e.version) }, - CantCloseChannel - | _ | { "channel cannot be closed" }, + CantCloseChannel + | _ | { "channel cannot be closed" }, - PacketDataDeserialization - | _ | { "failed to deserialize packet data" }, + PacketDataDeserialization + | _ | { "failed to deserialize packet data" }, - AckDeserialization - | _ | { "failed to deserialize acknowledgement" }, + AckDeserialization + | _ | { "failed to deserialize acknowledgement" }, - ReceiveDisabled - | _ | { "receive is not enabled" }, + ReceiveDisabled + | _ | { "receive is not enabled" }, - SendDisabled - | _ | { "send is not enabled" }, + SendDisabled + | _ | { "send is not enabled" }, - ParseAccountFailure - | _ | { "failed to parse as AccountId" }, + ParseAccountFailure + | _ | { "failed to parse as AccountId" }, - InvalidPort - { port_id: PortId, exp_port_id: PortId } - | e | { format_args!("invalid port: '{0}', expected '{1}'", e.port_id, e.exp_port_id) }, + InvalidPort + { port_id: PortId, exp_port_id: PortId } + | e | { format_args!("invalid port: '{0}', expected '{1}'", e.port_id, e.exp_port_id) }, - TraceNotFound - | _ | { "no trace associated with specified hash" }, + TraceNotFound + | _ | { "no trace associated with specified hash" }, - DecodeRawMsg - [ TraceError ] - | _ | { "error decoding raw msg" }, + DecodeRawMsg + [ TraceError ] + | _ | { "error decoding raw msg" }, - UnknownMsgType - { msg_type: String } - | e | { format_args!("unknown msg type: {0}", e.msg_type) }, - } + UnknownMsgType + { msg_type: String } + | e | { format_args!("unknown msg type: {0}", e.msg_type) }, + } } diff --git a/modules/src/applications/transfer/events.rs b/modules/src/applications/transfer/events.rs index 590638f448..546fc19b70 100644 --- a/modules/src/applications/transfer/events.rs +++ b/modules/src/applications/transfer/events.rs @@ -1,8 +1,11 @@ -use crate::applications::transfer::acknowledgement::Acknowledgement; -use crate::applications::transfer::{Amount, PrefixedDenom, MODULE_ID_STR}; -use crate::events::ModuleEvent; -use crate::prelude::*; -use crate::signer::Signer; +use crate::{ + applications::transfer::{ + acknowledgement::Acknowledgement, Amount, PrefixedDenom, MODULE_ID_STR, + }, + events::ModuleEvent, + prelude::*, + signer::Signer, +}; const EVENT_TYPE_PACKET: &str = "fungible_token_packet"; const EVENT_TYPE_TIMEOUT: &str = "timeout"; @@ -10,163 +13,147 @@ const EVENT_TYPE_DENOM_TRACE: &str = "denomination_trace"; const EVENT_TYPE_TRANSFER: &str = "ibc_transfer"; pub enum Event { - Recv(RecvEvent), - Ack(AckEvent), - AckStatus(AckStatusEvent), - Timeout(TimeoutEvent), - DenomTrace(DenomTraceEvent), - Transfer(TransferEvent), + Recv(RecvEvent), + Ack(AckEvent), + AckStatus(AckStatusEvent), + Timeout(TimeoutEvent), + DenomTrace(DenomTraceEvent), + Transfer(TransferEvent), } pub struct RecvEvent { - pub receiver: Signer, - pub denom: PrefixedDenom, - pub amount: Amount, - pub success: bool, + pub receiver: Signer, + pub denom: PrefixedDenom, + pub amount: Amount, + pub success: bool, } impl From for ModuleEvent { - fn from(ev: RecvEvent) -> Self { - let RecvEvent { - receiver, - denom, - amount, - success, - } = ev; - Self { - kind: EVENT_TYPE_PACKET.to_string(), - module_name: MODULE_ID_STR.parse().expect("invalid ModuleId"), - attributes: vec![ - ("receiver", receiver).into(), - ("denom", denom).into(), - ("amount", amount).into(), - ("success", success).into(), - ], - } - } + fn from(ev: RecvEvent) -> Self { + let RecvEvent { receiver, denom, amount, success } = ev; + Self { + kind: EVENT_TYPE_PACKET.to_string(), + module_name: MODULE_ID_STR.parse().expect("invalid ModuleId"), + attributes: vec![ + ("receiver", receiver).into(), + ("denom", denom).into(), + ("amount", amount).into(), + ("success", success).into(), + ], + } + } } pub struct AckEvent { - pub receiver: Signer, - pub denom: PrefixedDenom, - pub amount: Amount, - pub acknowledgement: Acknowledgement, + pub receiver: Signer, + pub denom: PrefixedDenom, + pub amount: Amount, + pub acknowledgement: Acknowledgement, } impl From for ModuleEvent { - fn from(ev: AckEvent) -> Self { - let AckEvent { - receiver, - denom, - amount, - acknowledgement, - } = ev; - Self { - kind: EVENT_TYPE_PACKET.to_string(), - module_name: MODULE_ID_STR.parse().expect("invalid ModuleId"), - attributes: vec![ - ("receiver", receiver).into(), - ("denom", denom).into(), - ("amount", amount).into(), - ("acknowledgement", acknowledgement).into(), - ], - } - } + fn from(ev: AckEvent) -> Self { + let AckEvent { receiver, denom, amount, acknowledgement } = ev; + Self { + kind: EVENT_TYPE_PACKET.to_string(), + module_name: MODULE_ID_STR.parse().expect("invalid ModuleId"), + attributes: vec![ + ("receiver", receiver).into(), + ("denom", denom).into(), + ("amount", amount).into(), + ("acknowledgement", acknowledgement).into(), + ], + } + } } pub struct AckStatusEvent { - pub acknowledgement: Acknowledgement, + pub acknowledgement: Acknowledgement, } impl From for ModuleEvent { - fn from(ev: AckStatusEvent) -> Self { - let AckStatusEvent { acknowledgement } = ev; - let mut event = Self { - kind: EVENT_TYPE_PACKET.to_string(), - module_name: MODULE_ID_STR.parse().expect("invalid ModuleId"), - attributes: vec![], - }; - let attr_label = match acknowledgement { - Acknowledgement::Success(_) => "success", - Acknowledgement::Error(_) => "error", - }; - event - .attributes - .push((attr_label, acknowledgement.to_string()).into()); - event - } + fn from(ev: AckStatusEvent) -> Self { + let AckStatusEvent { acknowledgement } = ev; + let mut event = Self { + kind: EVENT_TYPE_PACKET.to_string(), + module_name: MODULE_ID_STR.parse().expect("invalid ModuleId"), + attributes: vec![], + }; + let attr_label = match acknowledgement { + Acknowledgement::Success(_) => "success", + Acknowledgement::Error(_) => "error", + }; + event.attributes.push((attr_label, acknowledgement.to_string()).into()); + event + } } pub struct TimeoutEvent { - pub refund_receiver: Signer, - pub refund_denom: PrefixedDenom, - pub refund_amount: Amount, + pub refund_receiver: Signer, + pub refund_denom: PrefixedDenom, + pub refund_amount: Amount, } impl From for ModuleEvent { - fn from(ev: TimeoutEvent) -> Self { - let TimeoutEvent { - refund_receiver, - refund_denom, - refund_amount, - } = ev; - Self { - kind: EVENT_TYPE_TIMEOUT.to_string(), - module_name: MODULE_ID_STR.parse().expect("invalid ModuleId"), - attributes: vec![ - ("refund_receiver", refund_receiver).into(), - ("refund_denom", refund_denom).into(), - ("refund_amount", refund_amount).into(), - ], - } - } + fn from(ev: TimeoutEvent) -> Self { + let TimeoutEvent { refund_receiver, refund_denom, refund_amount } = ev; + Self { + kind: EVENT_TYPE_TIMEOUT.to_string(), + module_name: MODULE_ID_STR.parse().expect("invalid ModuleId"), + attributes: vec![ + ("refund_receiver", refund_receiver).into(), + ("refund_denom", refund_denom).into(), + ("refund_amount", refund_amount).into(), + ], + } + } } pub struct DenomTraceEvent { - pub trace_hash: Option, - pub denom: PrefixedDenom, + pub trace_hash: Option, + pub denom: PrefixedDenom, } impl From for ModuleEvent { - fn from(ev: DenomTraceEvent) -> Self { - let DenomTraceEvent { trace_hash, denom } = ev; - let mut ev = Self { - kind: EVENT_TYPE_DENOM_TRACE.to_string(), - module_name: MODULE_ID_STR.parse().expect("invalid ModuleId"), - attributes: vec![("denom", denom).into()], - }; - if let Some(hash) = trace_hash { - ev.attributes.push(("trace_hash", hash).into()); - } - ev - } + fn from(ev: DenomTraceEvent) -> Self { + let DenomTraceEvent { trace_hash, denom } = ev; + let mut ev = Self { + kind: EVENT_TYPE_DENOM_TRACE.to_string(), + module_name: MODULE_ID_STR.parse().expect("invalid ModuleId"), + attributes: vec![("denom", denom).into()], + }; + if let Some(hash) = trace_hash { + ev.attributes.push(("trace_hash", hash).into()); + } + ev + } } pub struct TransferEvent { - pub sender: Signer, - pub receiver: Signer, + pub sender: Signer, + pub receiver: Signer, } impl From for ModuleEvent { - fn from(ev: TransferEvent) -> Self { - let TransferEvent { sender, receiver } = ev; - Self { - kind: EVENT_TYPE_TRANSFER.to_string(), - module_name: MODULE_ID_STR.parse().expect("invalid ModuleId"), - attributes: vec![("sender", sender).into(), ("receiver", receiver).into()], - } - } + fn from(ev: TransferEvent) -> Self { + let TransferEvent { sender, receiver } = ev; + Self { + kind: EVENT_TYPE_TRANSFER.to_string(), + module_name: MODULE_ID_STR.parse().expect("invalid ModuleId"), + attributes: vec![("sender", sender).into(), ("receiver", receiver).into()], + } + } } impl From for ModuleEvent { - fn from(ev: Event) -> Self { - match ev { - Event::Recv(ev) => ev.into(), - Event::Ack(ev) => ev.into(), - Event::AckStatus(ev) => ev.into(), - Event::Timeout(ev) => ev.into(), - Event::DenomTrace(ev) => ev.into(), - Event::Transfer(ev) => ev.into(), - } - } + fn from(ev: Event) -> Self { + match ev { + Event::Recv(ev) => ev.into(), + Event::Ack(ev) => ev.into(), + Event::AckStatus(ev) => ev.into(), + Event::Timeout(ev) => ev.into(), + Event::DenomTrace(ev) => ev.into(), + Event::Transfer(ev) => ev.into(), + } + } } diff --git a/modules/src/applications/transfer/msgs/transfer.rs b/modules/src/applications/transfer/msgs/transfer.rs index 3ad22b6b39..8d0dd2e8af 100644 --- a/modules/src/applications/transfer/msgs/transfer.rs +++ b/modules/src/applications/transfer/msgs/transfer.rs @@ -2,17 +2,22 @@ use crate::prelude::*; -use ibc_proto::cosmos::base::v1beta1::Coin; -use ibc_proto::google::protobuf::Any; -use ibc_proto::ibc::applications::transfer::v1::MsgTransfer as RawMsgTransfer; +use ibc_proto::{ + cosmos::base::v1beta1::Coin, google::protobuf::Any, + ibc::applications::transfer::v1::MsgTransfer as RawMsgTransfer, +}; use tendermint_proto::Protobuf; -use crate::applications::transfer::error::Error; -use crate::core::ics02_client::height::Height; -use crate::core::ics24_host::identifier::{ChannelId, PortId}; -use crate::signer::Signer; -use crate::timestamp::Timestamp; -use crate::tx_msg::Msg; +use crate::{ + applications::transfer::error::Error, + core::{ + ics02_client::height::Height, + ics24_host::identifier::{ChannelId, PortId}, + }, + signer::Signer, + timestamp::Timestamp, + tx_msg::Msg, +}; pub const TYPE_URL: &str = "/ibc.applications.transfer.v1.MsgTransfer"; @@ -25,139 +30,129 @@ pub const TYPE_URL: &str = "/ibc.applications.transfer.v1.MsgTransfer"; /// let the library figure out how to build the packet properly. #[derive(Clone, Debug, PartialEq)] pub struct MsgTransfer { - /// the port on which the packet will be sent - pub source_port: PortId, - /// the channel by which the packet will be sent - pub source_channel: ChannelId, - /// the tokens to be transferred - pub token: C, - /// the sender address - pub sender: Signer, - /// the recipient address on the destination chain - pub receiver: Signer, - /// Timeout height relative to the current block height. - /// The timeout is disabled when set to 0. - pub timeout_height: Height, - /// Timeout timestamp relative to the current block timestamp. - /// The timeout is disabled when set to 0. - pub timeout_timestamp: Timestamp, + /// the port on which the packet will be sent + pub source_port: PortId, + /// the channel by which the packet will be sent + pub source_channel: ChannelId, + /// the tokens to be transferred + pub token: C, + /// the sender address + pub sender: Signer, + /// the recipient address on the destination chain + pub receiver: Signer, + /// Timeout height relative to the current block height. + /// The timeout is disabled when set to 0. + pub timeout_height: Height, + /// Timeout timestamp relative to the current block timestamp. + /// The timeout is disabled when set to 0. + pub timeout_timestamp: Timestamp, } impl Msg for MsgTransfer { - type ValidationError = Error; - type Raw = RawMsgTransfer; + type ValidationError = Error; + type Raw = RawMsgTransfer; - fn route(&self) -> String { - crate::keys::ROUTER_KEY.to_string() - } + fn route(&self) -> String { + crate::keys::ROUTER_KEY.to_string() + } - fn type_url(&self) -> String { - TYPE_URL.to_string() - } + fn type_url(&self) -> String { + TYPE_URL.to_string() + } } impl TryFrom for MsgTransfer { - type Error = Error; - - fn try_from(raw_msg: RawMsgTransfer) -> Result { - let timeout_timestamp = Timestamp::from_nanoseconds(raw_msg.timeout_timestamp) - .map_err(|_| Error::invalid_packet_timeout_timestamp(raw_msg.timeout_timestamp))?; - - let timeout_height = match raw_msg.timeout_height.clone() { - None => Height::zero(), - Some(raw_height) => raw_height.try_into().map_err(|e| { - Error::invalid_packet_timeout_height(format!("invalid timeout height {}", e)) - })?, - }; - - Ok(MsgTransfer { - source_port: raw_msg - .source_port - .parse() - .map_err(|e| Error::invalid_port_id(raw_msg.source_port.clone(), e))?, - source_channel: raw_msg - .source_channel - .parse() - .map_err(|e| Error::invalid_channel_id(raw_msg.source_channel.clone(), e))?, - token: raw_msg.token.ok_or_else(Error::invalid_token)?, - sender: raw_msg.sender.parse().map_err(Error::signer)?, - receiver: raw_msg.receiver.parse().map_err(Error::signer)?, - timeout_height, - timeout_timestamp, - }) - } + type Error = Error; + + fn try_from(raw_msg: RawMsgTransfer) -> Result { + let timeout_timestamp = Timestamp::from_nanoseconds(raw_msg.timeout_timestamp) + .map_err(|_| Error::invalid_packet_timeout_timestamp(raw_msg.timeout_timestamp))?; + + let timeout_height = match raw_msg.timeout_height.clone() { + None => Height::zero(), + Some(raw_height) => raw_height.try_into().map_err(|e| { + Error::invalid_packet_timeout_height(format!("invalid timeout height {}", e)) + })?, + }; + + Ok(MsgTransfer { + source_port: raw_msg + .source_port + .parse() + .map_err(|e| Error::invalid_port_id(raw_msg.source_port.clone(), e))?, + source_channel: raw_msg + .source_channel + .parse() + .map_err(|e| Error::invalid_channel_id(raw_msg.source_channel.clone(), e))?, + token: raw_msg.token.ok_or_else(Error::invalid_token)?, + sender: raw_msg.sender.parse().map_err(Error::signer)?, + receiver: raw_msg.receiver.parse().map_err(Error::signer)?, + timeout_height, + timeout_timestamp, + }) + } } impl From for RawMsgTransfer { - fn from(domain_msg: MsgTransfer) -> Self { - RawMsgTransfer { - source_port: domain_msg.source_port.to_string(), - source_channel: domain_msg.source_channel.to_string(), - token: Some(domain_msg.token), - sender: domain_msg.sender.to_string(), - receiver: domain_msg.receiver.to_string(), - timeout_height: Some(domain_msg.timeout_height.into()), - timeout_timestamp: domain_msg.timeout_timestamp.nanoseconds(), - } - } + fn from(domain_msg: MsgTransfer) -> Self { + RawMsgTransfer { + source_port: domain_msg.source_port.to_string(), + source_channel: domain_msg.source_channel.to_string(), + token: Some(domain_msg.token), + sender: domain_msg.sender.to_string(), + receiver: domain_msg.receiver.to_string(), + timeout_height: Some(domain_msg.timeout_height.into()), + timeout_timestamp: domain_msg.timeout_timestamp.nanoseconds(), + } + } } impl Protobuf for MsgTransfer {} impl TryFrom for MsgTransfer { - type Error = Error; - - fn try_from(raw: Any) -> Result { - match raw.type_url.as_str() { - TYPE_URL => MsgTransfer::decode_vec(&raw.value).map_err(Error::decode_raw_msg), - _ => Err(Error::unknown_msg_type(raw.type_url)), - } - } + type Error = Error; + + fn try_from(raw: Any) -> Result { + match raw.type_url.as_str() { + TYPE_URL => MsgTransfer::decode_vec(&raw.value).map_err(Error::decode_raw_msg), + _ => Err(Error::unknown_msg_type(raw.type_url)), + } + } } impl From for Any { - fn from(msg: MsgTransfer) -> Self { - Self { - type_url: TYPE_URL.to_string(), - value: msg.encode_vec(), - } - } + fn from(msg: MsgTransfer) -> Self { + Self { type_url: TYPE_URL.to_string(), value: msg.encode_vec() } + } } #[cfg(test)] pub mod test_util { - use core::ops::Add; - use core::time::Duration; - - use super::MsgTransfer; - use crate::bigint::U256; - use crate::signer::Signer; - use crate::{ - applications::transfer::{BaseCoin, PrefixedCoin}, - core::ics24_host::identifier::{ChannelId, PortId}, - test_utils::get_dummy_bech32_account, - timestamp::Timestamp, - Height, - }; - - // Returns a dummy ICS20 `MsgTransfer`, for testing only! - pub fn get_dummy_msg_transfer(height: u64) -> MsgTransfer { - let address: Signer = get_dummy_bech32_account().as_str().parse().unwrap(); - MsgTransfer { - source_port: PortId::default(), - source_channel: ChannelId::default(), - token: BaseCoin { - denom: "uatom".parse().unwrap(), - amount: U256::from(10).into(), - } - .into(), - sender: address.clone(), - receiver: address, - timeout_timestamp: Timestamp::now().add(Duration::from_secs(10)).unwrap(), - timeout_height: Height { - revision_number: 0, - revision_height: height, - }, - } - } + use core::{ops::Add, time::Duration}; + + use super::MsgTransfer; + use crate::{ + applications::transfer::{BaseCoin, PrefixedCoin}, + bigint::U256, + core::ics24_host::identifier::{ChannelId, PortId}, + signer::Signer, + test_utils::get_dummy_bech32_account, + timestamp::Timestamp, + Height, + }; + + // Returns a dummy ICS20 `MsgTransfer`, for testing only! + pub fn get_dummy_msg_transfer(height: u64) -> MsgTransfer { + let address: Signer = get_dummy_bech32_account().as_str().parse().unwrap(); + MsgTransfer { + source_port: PortId::default(), + source_channel: ChannelId::default(), + token: BaseCoin { denom: "uatom".parse().unwrap(), amount: U256::from(10).into() } + .into(), + sender: address.clone(), + receiver: address, + timeout_timestamp: Timestamp::now().add(Duration::from_secs(10)).unwrap(), + timeout_height: Height { revision_number: 0, revision_height: height }, + } + } } diff --git a/modules/src/applications/transfer/packet.rs b/modules/src/applications/transfer/packet.rs index 643ee5e465..d706334b3c 100644 --- a/modules/src/applications/transfer/packet.rs +++ b/modules/src/applications/transfer/packet.rs @@ -1,43 +1,41 @@ use alloc::string::ToString; -use core::convert::TryFrom; -use core::str::FromStr; +use core::{convert::TryFrom, str::FromStr}; use ibc_proto::ibc::applications::transfer::v2::FungibleTokenPacketData as RawPacketData; use serde::{Deserialize, Serialize}; -use super::error::Error; -use super::{Amount, PrefixedCoin, PrefixedDenom}; +use super::{error::Error, Amount, PrefixedCoin, PrefixedDenom}; use crate::signer::Signer; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct PacketData { - pub token: PrefixedCoin, - pub sender: Signer, - pub receiver: Signer, + pub token: PrefixedCoin, + pub sender: Signer, + pub receiver: Signer, } impl TryFrom for PacketData { - type Error = Error; + type Error = Error; - fn try_from(raw_pkt_data: RawPacketData) -> Result { - // This denom may be prefixed or unprefixed. - let denom = PrefixedDenom::from_str(&raw_pkt_data.denom)?; - let amount = Amount::from_str(&raw_pkt_data.amount)?; - Ok(Self { - token: PrefixedCoin { denom, amount }, - sender: raw_pkt_data.sender.parse().map_err(Error::signer)?, - receiver: raw_pkt_data.receiver.parse().map_err(Error::signer)?, - }) - } + fn try_from(raw_pkt_data: RawPacketData) -> Result { + // This denom may be prefixed or unprefixed. + let denom = PrefixedDenom::from_str(&raw_pkt_data.denom)?; + let amount = Amount::from_str(&raw_pkt_data.amount)?; + Ok(Self { + token: PrefixedCoin { denom, amount }, + sender: raw_pkt_data.sender.parse().map_err(Error::signer)?, + receiver: raw_pkt_data.receiver.parse().map_err(Error::signer)?, + }) + } } impl From for RawPacketData { - fn from(pkt_data: PacketData) -> Self { - Self { - denom: pkt_data.token.denom.to_string(), - amount: pkt_data.token.amount.to_string(), - sender: pkt_data.sender.to_string(), - receiver: pkt_data.receiver.to_string(), - } - } + fn from(pkt_data: PacketData) -> Self { + Self { + denom: pkt_data.token.denom.to_string(), + amount: pkt_data.token.amount.to_string(), + sender: pkt_data.sender.to_string(), + receiver: pkt_data.receiver.to_string(), + } + } } diff --git a/modules/src/applications/transfer/relay.rs b/modules/src/applications/transfer/relay.rs index 63fb15103b..934c48afbb 100644 --- a/modules/src/applications/transfer/relay.rs +++ b/modules/src/applications/transfer/relay.rs @@ -1,10 +1,12 @@ //! This module implements the processing logic for ICS20 (token transfer) message. -use crate::applications::transfer::context::Ics20Context; -use crate::applications::transfer::error::Error as Ics20Error; -use crate::applications::transfer::is_sender_chain_source; -use crate::applications::transfer::packet::PacketData; -use crate::core::ics04_channel::packet::Packet; -use crate::prelude::*; +use crate::{ + applications::transfer::{ + context::Ics20Context, error::Error as Ics20Error, is_sender_chain_source, + packet::PacketData, + }, + core::ics04_channel::packet::Packet, + prelude::*, +}; pub mod on_ack_packet; pub mod on_recv_packet; @@ -12,29 +14,26 @@ pub mod on_timeout_packet; pub mod send_transfer; fn refund_packet_token( - ctx: &mut impl Ics20Context, - packet: &Packet, - data: &PacketData, + ctx: &mut impl Ics20Context, + packet: &Packet, + data: &PacketData, ) -> Result<(), Ics20Error> { - let sender = data - .sender - .clone() - .try_into() - .map_err(|_| Ics20Error::parse_account_failure())?; + let sender = data + .sender + .clone() + .try_into() + .map_err(|_| Ics20Error::parse_account_failure())?; - if is_sender_chain_source( - packet.source_port.clone(), - packet.source_channel, - &data.token.denom, - ) { - // unescrow tokens back to sender - let escrow_address = - ctx.get_channel_escrow_address(&packet.source_port, packet.source_channel)?; + if is_sender_chain_source(packet.source_port.clone(), packet.source_channel, &data.token.denom) + { + // unescrow tokens back to sender + let escrow_address = + ctx.get_channel_escrow_address(&packet.source_port, packet.source_channel)?; - ctx.send_coins(&escrow_address, &sender, &data.token) - } - // mint vouchers back to sender - else { - ctx.mint_coins(&sender, &data.token) - } + ctx.send_coins(&escrow_address, &sender, &data.token) + } + // mint vouchers back to sender + else { + ctx.mint_coins(&sender, &data.token) + } } diff --git a/modules/src/applications/transfer/relay/on_ack_packet.rs b/modules/src/applications/transfer/relay/on_ack_packet.rs index a254de1fc9..5efaba8959 100644 --- a/modules/src/applications/transfer/relay/on_ack_packet.rs +++ b/modules/src/applications/transfer/relay/on_ack_packet.rs @@ -1,19 +1,20 @@ -use crate::applications::transfer::acknowledgement::Acknowledgement; -use crate::applications::transfer::context::Ics20Context; -use crate::applications::transfer::error::Error as Ics20Error; -use crate::applications::transfer::packet::PacketData; -use crate::applications::transfer::relay::refund_packet_token; -use crate::core::ics04_channel::packet::Packet; +use crate::{ + applications::transfer::{ + acknowledgement::Acknowledgement, context::Ics20Context, error::Error as Ics20Error, + packet::PacketData, relay::refund_packet_token, + }, + core::ics04_channel::packet::Packet, +}; pub fn process_ack_packet( - ctx: &mut impl Ics20Context, - packet: &Packet, - data: &PacketData, - ack: &Acknowledgement, + ctx: &mut impl Ics20Context, + packet: &Packet, + data: &PacketData, + ack: &Acknowledgement, ) -> Result<(), Ics20Error> { - if matches!(ack, Acknowledgement::Error(_)) { - refund_packet_token(ctx, packet, data)?; - } + if matches!(ack, Acknowledgement::Error(_)) { + refund_packet_token(ctx, packet, data)?; + } - Ok(()) + Ok(()) } diff --git a/modules/src/applications/transfer/relay/on_recv_packet.rs b/modules/src/applications/transfer/relay/on_recv_packet.rs index f6069ee10a..60a033da96 100644 --- a/modules/src/applications/transfer/relay/on_recv_packet.rs +++ b/modules/src/applications/transfer/relay/on_recv_packet.rs @@ -1,68 +1,70 @@ -use crate::applications::transfer::context::Ics20Context; -use crate::applications::transfer::error::Error as Ics20Error; -use crate::applications::transfer::events::DenomTraceEvent; -use crate::applications::transfer::packet::PacketData; -use crate::applications::transfer::{is_receiver_chain_source, TracePrefix}; -use crate::core::ics04_channel::packet::Packet; -use crate::core::ics26_routing::context::{ModuleOutputBuilder, WriteFn}; -use crate::prelude::*; +use crate::{ + applications::transfer::{ + context::Ics20Context, error::Error as Ics20Error, events::DenomTraceEvent, + is_receiver_chain_source, packet::PacketData, TracePrefix, + }, + core::{ + ics04_channel::packet::Packet, + ics26_routing::context::{ModuleOutputBuilder, WriteFn}, + }, + prelude::*, +}; pub fn process_recv_packet( - ctx: &Ctx, - output: &mut ModuleOutputBuilder, - packet: &Packet, - data: PacketData, + ctx: &Ctx, + output: &mut ModuleOutputBuilder, + packet: &Packet, + data: PacketData, ) -> Result, Ics20Error> { - if !ctx.is_receive_enabled() { - return Err(Ics20Error::receive_disabled()); - } + if !ctx.is_receive_enabled() { + return Err(Ics20Error::receive_disabled()) + } - let receiver_account = data - .receiver - .clone() - .try_into() - .map_err(|_| Ics20Error::parse_account_failure())?; + let receiver_account = data + .receiver + .clone() + .try_into() + .map_err(|_| Ics20Error::parse_account_failure())?; - if is_receiver_chain_source( - packet.source_port.clone(), - packet.source_channel, - &data.token.denom, - ) { - // sender chain is not the source, unescrow tokens - let prefix = TracePrefix::new(packet.source_port.clone(), packet.source_channel); - let coin = { - let mut c = data.token; - c.denom.remove_trace_prefix(&prefix); - c - }; + if is_receiver_chain_source( + packet.source_port.clone(), + packet.source_channel, + &data.token.denom, + ) { + // sender chain is not the source, unescrow tokens + let prefix = TracePrefix::new(packet.source_port.clone(), packet.source_channel); + let coin = { + let mut c = data.token; + c.denom.remove_trace_prefix(&prefix); + c + }; - let escrow_address = - ctx.get_channel_escrow_address(&packet.destination_port, packet.destination_channel)?; + let escrow_address = + ctx.get_channel_escrow_address(&packet.destination_port, packet.destination_channel)?; - Ok(Box::new(move |ctx| { - let ctx = ctx.downcast_mut::().unwrap(); - ctx.send_coins(&escrow_address, &receiver_account, &coin) - .map_err(|e| e.to_string()) - })) - } else { - // sender chain is the source, mint vouchers - let prefix = TracePrefix::new(packet.destination_port.clone(), packet.destination_channel); - let coin = { - let mut c = data.token; - c.denom.add_trace_prefix(prefix); - c - }; + Ok(Box::new(move |ctx| { + let ctx = ctx.downcast_mut::().unwrap(); + ctx.send_coins(&escrow_address, &receiver_account, &coin) + .map_err(|e| e.to_string()) + })) + } else { + // sender chain is the source, mint vouchers + let prefix = TracePrefix::new(packet.destination_port.clone(), packet.destination_channel); + let coin = { + let mut c = data.token; + c.denom.add_trace_prefix(prefix); + c + }; - let denom_trace_event = DenomTraceEvent { - trace_hash: ctx.denom_hash_string(&coin.denom), - denom: coin.denom.clone(), - }; - output.emit(denom_trace_event.into()); + let denom_trace_event = DenomTraceEvent { + trace_hash: ctx.denom_hash_string(&coin.denom), + denom: coin.denom.clone(), + }; + output.emit(denom_trace_event.into()); - Ok(Box::new(move |ctx| { - let ctx = ctx.downcast_mut::().unwrap(); - ctx.mint_coins(&receiver_account, &coin) - .map_err(|e| e.to_string()) - })) - } + Ok(Box::new(move |ctx| { + let ctx = ctx.downcast_mut::().unwrap(); + ctx.mint_coins(&receiver_account, &coin).map_err(|e| e.to_string()) + })) + } } diff --git a/modules/src/applications/transfer/relay/on_timeout_packet.rs b/modules/src/applications/transfer/relay/on_timeout_packet.rs index 192a3dd9b6..8b1f3f12c6 100644 --- a/modules/src/applications/transfer/relay/on_timeout_packet.rs +++ b/modules/src/applications/transfer/relay/on_timeout_packet.rs @@ -1,13 +1,15 @@ -use crate::applications::transfer::context::Ics20Context; -use crate::applications::transfer::error::Error as Ics20Error; -use crate::applications::transfer::packet::PacketData; -use crate::applications::transfer::relay::refund_packet_token; -use crate::core::ics04_channel::packet::Packet; +use crate::{ + applications::transfer::{ + context::Ics20Context, error::Error as Ics20Error, packet::PacketData, + relay::refund_packet_token, + }, + core::ics04_channel::packet::Packet, +}; pub fn process_timeout_packet( - ctx: &mut impl Ics20Context, - packet: &Packet, - data: &PacketData, + ctx: &mut impl Ics20Context, + packet: &Packet, + data: &PacketData, ) -> Result<(), Ics20Error> { - refund_packet_token(ctx, packet, data) + refund_packet_token(ctx, packet, data) } diff --git a/modules/src/applications/transfer/relay/send_transfer.rs b/modules/src/applications/transfer/relay/send_transfer.rs index 6925c6776c..37e2d5bd60 100644 --- a/modules/src/applications/transfer/relay/send_transfer.rs +++ b/modules/src/applications/transfer/relay/send_transfer.rs @@ -1,115 +1,89 @@ -use crate::applications::transfer::context::Ics20Context; -use crate::applications::transfer::error::Error; -use crate::applications::transfer::events::TransferEvent; -use crate::applications::transfer::msgs::transfer::MsgTransfer; -use crate::applications::transfer::packet::PacketData; -use crate::applications::transfer::{is_sender_chain_source, Coin, PrefixedCoin}; -use crate::core::ics04_channel::handler::send_packet::send_packet; -use crate::core::ics04_channel::packet::Packet; -use crate::events::ModuleEvent; -use crate::handler::{HandlerOutput, HandlerOutputBuilder}; -use crate::prelude::*; +use crate::{ + applications::transfer::{ + context::Ics20Context, error::Error, events::TransferEvent, is_sender_chain_source, + msgs::transfer::MsgTransfer, packet::PacketData, Coin, PrefixedCoin, + }, + core::ics04_channel::{handler::send_packet::send_packet, packet::Packet}, + events::ModuleEvent, + handler::{HandlerOutput, HandlerOutputBuilder}, + prelude::*, +}; /// This function handles the transfer sending logic. /// If this method returns an error, the runtime is expected to rollback all state modifications to /// the `Ctx` caused by all messages from the transaction that this `msg` is a part of. pub fn send_transfer( - ctx: &mut Ctx, - output: &mut HandlerOutputBuilder<()>, - msg: MsgTransfer, + ctx: &mut Ctx, + output: &mut HandlerOutputBuilder<()>, + msg: MsgTransfer, ) -> Result<(), Error> where - Ctx: Ics20Context, - C: TryInto, + Ctx: Ics20Context, + C: TryInto, { - if !ctx.is_send_enabled() { - return Err(Error::send_disabled()); - } + if !ctx.is_send_enabled() { + return Err(Error::send_disabled()) + } - let source_channel_end = ctx - .channel_end(&(msg.source_port.clone(), msg.source_channel)) - .map_err(Error::ics04_channel)?; + let source_channel_end = ctx + .channel_end(&(msg.source_port.clone(), msg.source_channel)) + .map_err(Error::ics04_channel)?; - let destination_port = source_channel_end.counterparty().port_id().clone(); - let destination_channel = *source_channel_end - .counterparty() - .channel_id() - .ok_or_else(|| { - Error::destination_channel_not_found(msg.source_port.clone(), msg.source_channel) - })?; + let destination_port = source_channel_end.counterparty().port_id().clone(); + let destination_channel = *source_channel_end.counterparty().channel_id().ok_or_else(|| { + Error::destination_channel_not_found(msg.source_port.clone(), msg.source_channel) + })?; - // get the next sequence - let sequence = ctx - .get_next_sequence_send(&(msg.source_port.clone(), msg.source_channel)) - .map_err(Error::ics04_channel)?; + // get the next sequence + let sequence = ctx + .get_next_sequence_send(&(msg.source_port.clone(), msg.source_channel)) + .map_err(Error::ics04_channel)?; - let token = msg.token.try_into().map_err(|_| Error::invalid_token())?; - let denom = token.denom.clone(); - let coin = Coin { - denom: denom.clone(), - amount: token.amount, - }; + let token = msg.token.try_into().map_err(|_| Error::invalid_token())?; + let denom = token.denom.clone(); + let coin = Coin { denom: denom.clone(), amount: token.amount }; - let sender = msg - .sender - .clone() - .try_into() - .map_err(|_| Error::parse_account_failure())?; + let sender = msg.sender.clone().try_into().map_err(|_| Error::parse_account_failure())?; - if is_sender_chain_source(msg.source_port.clone(), msg.source_channel, &denom) { - let escrow_address = - ctx.get_channel_escrow_address(&msg.source_port, msg.source_channel)?; - ctx.send_coins(&sender, &escrow_address, &coin)?; - } else { - ctx.burn_coins(&sender, &coin)?; - } + if is_sender_chain_source(msg.source_port.clone(), msg.source_channel, &denom) { + let escrow_address = + ctx.get_channel_escrow_address(&msg.source_port, msg.source_channel)?; + ctx.send_coins(&sender, &escrow_address, &coin)?; + } else { + ctx.burn_coins(&sender, &coin)?; + } - let data = { - let data = PacketData { - token: coin, - sender: msg.sender.clone(), - receiver: msg.receiver.clone(), - }; - serde_json::to_vec(&data).expect("PacketData's infallible Serialize impl failed") - }; + let data = { + let data = + PacketData { token: coin, sender: msg.sender.clone(), receiver: msg.receiver.clone() }; + serde_json::to_vec(&data).expect("PacketData's infallible Serialize impl failed") + }; - let packet = Packet { - sequence, - source_port: msg.source_port, - source_channel: msg.source_channel, - destination_port, - destination_channel, - data, - timeout_height: msg.timeout_height, - timeout_timestamp: msg.timeout_timestamp, - }; + let packet = Packet { + sequence, + source_port: msg.source_port, + source_channel: msg.source_channel, + destination_port, + destination_channel, + data, + timeout_height: msg.timeout_height, + timeout_timestamp: msg.timeout_timestamp, + }; - let HandlerOutput { - result, - log, - events, - } = send_packet(ctx, packet).map_err(Error::ics04_channel)?; + let HandlerOutput { result, log, events } = + send_packet(ctx, packet).map_err(Error::ics04_channel)?; - ctx.store_packet_result(result) - .map_err(Error::ics04_channel)?; + ctx.store_packet_result(result).map_err(Error::ics04_channel)?; - output.merge_output( - HandlerOutput::builder() - .with_log(log) - .with_events(events) - .with_result(()), - ); + output.merge_output(HandlerOutput::builder().with_log(log).with_events(events).with_result(())); - output.log(format!( - "IBC fungible token transfer: {} --({})--> {}", - msg.sender, token, msg.receiver - )); + output.log(format!( + "IBC fungible token transfer: {} --({})--> {}", + msg.sender, token, msg.receiver + )); - let transfer_event = TransferEvent { - sender: msg.sender, - receiver: msg.receiver, - }; - output.emit(ModuleEvent::from(transfer_event).into()); + let transfer_event = TransferEvent { sender: msg.sender, receiver: msg.receiver }; + output.emit(ModuleEvent::from(transfer_event).into()); - Ok(()) + Ok(()) } diff --git a/modules/src/clients/host_functions.rs b/modules/src/clients/host_functions.rs deleted file mode 100644 index e4499031fc..0000000000 --- a/modules/src/clients/host_functions.rs +++ /dev/null @@ -1,140 +0,0 @@ -use crate::core::ics02_client::error::Error; -use crate::prelude::*; -use core::marker::PhantomData; - -/// This trait captures all the functions that the host chain should provide for -/// crypto operations. -pub trait HostFunctionsProvider: Clone + Send + Sync + Default { - /// Keccak 256 hash function - fn keccak_256(input: &[u8]) -> [u8; 32]; - - /// Compressed Ecdsa public key recovery from a signature - fn secp256k1_ecdsa_recover_compressed( - signature: &[u8; 65], - value: &[u8; 32], - ) -> Option>; - - /// Recover the ED25519 pubkey that produced this signature, given a arbitrarily sized message - fn ed25519_verify(signature: &[u8; 64], msg: &[u8], pubkey: &[u8]) -> bool; - - /// This function should verify membership in a trie proof using sp_state_machine's read_child_proof_check - fn verify_membership_trie_proof( - root: &[u8; 32], - proof: &[Vec], - key: &[u8], - value: &[u8], - ) -> Result<(), Error>; - - /// This function should verify non membership in a trie proof using sp_state_machine's read_child_proof_check - fn verify_non_membership_trie_proof( - root: &[u8; 32], - proof: &[Vec], - key: &[u8], - ) -> Result<(), Error>; - - /// This function should verify membership in a trie proof using parity's sp-trie package - /// with a BlakeTwo256 Hasher - fn verify_timestamp_extrinsic( - root: &[u8; 32], - proof: &[Vec], - value: &[u8], - ) -> Result<(), Error>; - - /// Conduct a 256-bit Sha2 hash - fn sha256_digest(data: &[u8]) -> [u8; 32]; - - /// The SHA-256 hash algorithm - fn sha2_256(message: &[u8]) -> [u8; 32]; - - /// The SHA-512 hash algorithm - fn sha2_512(message: &[u8]) -> [u8; 64]; - - /// The SHA-512 hash algorithm with its output truncated to 256 bits. - fn sha2_512_truncated(message: &[u8]) -> [u8; 32]; - - /// SHA-3-512 hash function. - fn sha3_512(message: &[u8]) -> [u8; 64]; - - /// Ripemd160 hash function. - fn ripemd160(message: &[u8]) -> [u8; 20]; -} - -/// This is a work around that allows us to have one super trait [`HostFunctionsProvider`] -/// that encapsulates all the needed host functions by different subsytems, and then -/// implement the needed traits through this wrapper. -#[derive(Clone, Debug, Default)] -pub struct HostFunctionsManager(PhantomData); - -// implementation for beefy host functions -#[cfg(any(test, feature = "mocks", feature = "ics11_beefy"))] -impl beefy_client_primitives::HostFunctions for HostFunctionsManager -where - T: HostFunctionsProvider, -{ - fn keccak_256(input: &[u8]) -> [u8; 32] { - T::keccak_256(input) - } - - fn secp256k1_ecdsa_recover_compressed( - signature: &[u8; 65], - value: &[u8; 32], - ) -> Option> { - T::secp256k1_ecdsa_recover_compressed(signature, value) - } - - fn verify_timestamp_extrinsic( - root: sp_core::H256, - proof: &[Vec], - value: &[u8], - ) -> Result<(), beefy_client_primitives::error::BeefyClientError> { - T::verify_timestamp_extrinsic(root.as_fixed_bytes(), proof, value) - .map_err(|_| From::from("Timestamp verification failed".to_string())) - } -} - -// implementation for tendermint functions -impl tendermint_light_client_verifier::host_functions::HostFunctionsProvider - for HostFunctionsManager -where - T: HostFunctionsProvider, -{ - fn sha2_256(preimage: &[u8]) -> [u8; 32] { - T::sha256_digest(preimage) - } - - fn ed25519_verify(sig: &[u8], msg: &[u8], pub_key: &[u8]) -> bool { - let mut signature = [0u8; 64]; - signature.copy_from_slice(sig); - T::ed25519_verify(&signature, msg, pub_key) - } - - fn secp256k1_verify(_sig: &[u8], _message: &[u8], _public: &[u8]) -> bool { - unimplemented!() - } -} - -// implementation for ics23 -impl ics23::HostFunctionsProvider for HostFunctionsManager -where - H: HostFunctionsProvider, -{ - fn sha2_256(message: &[u8]) -> [u8; 32] { - H::sha2_256(message) - } - - fn sha2_512(message: &[u8]) -> [u8; 64] { - H::sha2_512(message) - } - - fn sha2_512_truncated(message: &[u8]) -> [u8; 32] { - H::sha2_512_truncated(message) - } - - fn sha3_512(message: &[u8]) -> [u8; 64] { - H::sha3_512(message) - } - - fn ripemd160(message: &[u8]) -> [u8; 20] { - H::ripemd160(message) - } -} diff --git a/modules/src/clients/ics07_tendermint/client_def.rs b/modules/src/clients/ics07_tendermint/client_def.rs deleted file mode 100644 index 7dc1af23d9..0000000000 --- a/modules/src/clients/ics07_tendermint/client_def.rs +++ /dev/null @@ -1,553 +0,0 @@ -use core::convert::TryInto; -use core::fmt::Debug; - -use crate::clients::host_functions::{HostFunctionsManager, HostFunctionsProvider}; -use ibc_proto::ibc::core::commitment::v1::MerkleProof as RawMerkleProof; -use prost::Message; -use tendermint_light_client_verifier::types::{TrustedBlockState, UntrustedBlockState}; -use tendermint_light_client_verifier::{ProdVerifier, Verdict, Verifier}; -use tendermint_proto::Protobuf; - -use crate::clients::ics07_tendermint::client_state::ClientState; -use crate::clients::ics07_tendermint::consensus_state::ConsensusState; -use crate::clients::ics07_tendermint::error::Error; -use crate::clients::ics07_tendermint::header::Header; -use crate::core::ics02_client::client_consensus::AnyConsensusState; -use crate::core::ics02_client::client_def::{ClientDef, ConsensusUpdateResult}; -use crate::core::ics02_client::client_state::AnyClientState; -use crate::core::ics02_client::client_type::ClientType; -use crate::core::ics02_client::error::Error as Ics02Error; -use crate::core::ics03_connection::connection::ConnectionEnd; -use crate::core::ics04_channel::channel::ChannelEnd; -use crate::core::ics04_channel::commitment::{AcknowledgementCommitment, PacketCommitment}; -use crate::core::ics04_channel::packet::Sequence; -use crate::core::ics23_commitment::commitment::{ - CommitmentPrefix, CommitmentProofBytes, CommitmentRoot, -}; -use crate::core::ics23_commitment::merkle::{apply_prefix, MerkleProof}; -use crate::core::ics24_host::identifier::ConnectionId; -use crate::core::ics24_host::identifier::{ChannelId, ClientId, PortId}; -use crate::core::ics24_host::path::{ - AcksPath, ChannelEndsPath, ClientConsensusStatePath, ClientStatePath, CommitmentsPath, - ConnectionsPath, ReceiptsPath, SeqRecvsPath, -}; -use crate::core::ics24_host::Path; -use crate::core::ics26_routing::context::ReaderContext; -use crate::downcast; -use crate::prelude::*; -use crate::Height; - -#[derive(Clone, Debug, Default)] -pub struct TendermintClient { - verifier: ProdVerifier>, -} - -impl ClientDef for TendermintClient -where - H: HostFunctionsProvider, -{ - type Header = Header; - type ClientState = ClientState; - type ConsensusState = ConsensusState; - - fn verify_header( - &self, - ctx: &dyn ReaderContext, - client_id: ClientId, - client_state: Self::ClientState, - header: Self::Header, - ) -> Result<(), Ics02Error> { - if header.height().revision_number != client_state.chain_id.version() { - return Err(Ics02Error::tendermint_handler_error( - Error::mismatched_revisions( - client_state.chain_id.version(), - header.height().revision_number, - ), - )); - } - - // Check if a consensus state is already installed; if so skip - let header_consensus_state = ConsensusState::from(header.clone()); - - let _ = match ctx.maybe_consensus_state(&client_id, header.height())? { - Some(cs) => { - let cs = downcast_consensus_state(cs)?; - // If this consensus state matches, skip verification - // (optimization) - if cs == header_consensus_state { - // Header is already installed and matches the incoming - // header (already verified) - return Ok(()); - } - Some(cs) - } - None => None, - }; - - let trusted_consensus_state = - downcast_consensus_state(ctx.consensus_state(&client_id, header.trusted_height)?)?; - - let trusted_state = TrustedBlockState { - header_time: trusted_consensus_state.timestamp, - height: header - .trusted_height - .revision_height - .try_into() - .map_err(|_| { - Ics02Error::tendermint_handler_error(Error::invalid_header_height( - header.trusted_height, - )) - })?, - next_validators: &header.trusted_validator_set, - next_validators_hash: trusted_consensus_state.next_validators_hash, - }; - - let untrusted_state = UntrustedBlockState { - signed_header: &header.signed_header, - validators: &header.validator_set, - // NB: This will skip the - // VerificationPredicates::next_validators_match check for the - // untrusted state. - next_validators: None, - }; - - let options = client_state.as_light_client_options()?; - - let verdict = self.verifier.verify( - untrusted_state, - trusted_state, - &options, - ctx.host_timestamp().into_tm_time().unwrap(), - ); - - match verdict { - Verdict::Success => {} - Verdict::NotEnoughTrust(voting_power_tally) => { - return Err(Error::not_enough_trusted_vals_signed(format!( - "voting power tally: {}", - voting_power_tally - )) - .into()) - } - Verdict::Invalid(detail) => { - return Err(Ics02Error::tendermint_handler_error( - Error::verification_error(detail), - )) - } - } - - Ok(()) - } - - fn update_state( - &self, - _ctx: &dyn ReaderContext, - _client_id: ClientId, - client_state: Self::ClientState, - header: Self::Header, - ) -> Result<(Self::ClientState, ConsensusUpdateResult), Ics02Error> { - let header_consensus_state = ConsensusState::from(header.clone()); - Ok(( - client_state.with_header(header), - ConsensusUpdateResult::Single(AnyConsensusState::Tendermint(header_consensus_state)), - )) - } - - fn update_state_on_misbehaviour( - &self, - client_state: Self::ClientState, - header: Self::Header, - ) -> Result { - client_state - .with_frozen_height(header.height()) - .map_err(|e| e.into()) - } - - fn check_for_misbehaviour( - &self, - ctx: &dyn ReaderContext, - client_id: ClientId, - client_state: Self::ClientState, - header: Self::Header, - ) -> Result { - // Check if a consensus state is already installed; if so it should - // match the untrusted header. - let header_consensus_state = ConsensusState::from(header.clone()); - - let existing_consensus_state = - match ctx.maybe_consensus_state(&client_id, header.height())? { - Some(cs) => { - let cs = downcast_consensus_state(cs)?; - // If this consensus state matches, skip verification - // (optimization) - if cs == header_consensus_state { - // Header is already installed and matches the incoming - // header (already verified) - return Ok(false); - } - Some(cs) - } - None => None, - }; - - // If the header has verified, but its corresponding consensus state - // differs from the existing consensus state for that height, freeze the - // client and return the installed consensus state. - if let Some(cs) = existing_consensus_state { - if cs != header_consensus_state { - return Ok(true); - } - } - - // Monotonicity checks for timestamps for in-the-middle updates - // (cs-new, cs-next, cs-latest) - if header.height() < client_state.latest_height() { - let maybe_next_cs = ctx - .next_consensus_state(&client_id, header.height())? - .map(downcast_consensus_state) - .transpose()?; - - if let Some(next_cs) = maybe_next_cs { - // New (untrusted) header timestamp cannot occur after next - // consensus state's height - if header.signed_header.header().time > next_cs.timestamp { - return Err(Ics02Error::tendermint_handler_error( - Error::header_timestamp_too_high( - header.signed_header.header().time.to_string(), - next_cs.timestamp.to_string(), - ), - )); - } - } - } - // (cs-trusted, cs-prev, cs-new) - if header.trusted_height < header.height() { - let maybe_prev_cs = ctx - .prev_consensus_state(&client_id, header.height())? - .map(downcast_consensus_state) - .transpose()?; - - if let Some(prev_cs) = maybe_prev_cs { - // New (untrusted) header timestamp cannot occur before the - // previous consensus state's height - if header.signed_header.header().time < prev_cs.timestamp { - return Err(Ics02Error::tendermint_handler_error( - Error::header_timestamp_too_low( - header.signed_header.header().time.to_string(), - prev_cs.timestamp.to_string(), - ), - )); - } - } - } - - Ok(false) - } - - fn verify_client_consensus_state( - &self, - _ctx: &dyn ReaderContext, - client_state: &Self::ClientState, - height: Height, - prefix: &CommitmentPrefix, - proof: &CommitmentProofBytes, - root: &CommitmentRoot, - client_id: &ClientId, - consensus_height: Height, - expected_consensus_state: &AnyConsensusState, - ) -> Result<(), Ics02Error> { - client_state.verify_height(height)?; - - let path = ClientConsensusStatePath { - client_id: client_id.clone(), - epoch: consensus_height.revision_number, - height: consensus_height.revision_height, - }; - let value = expected_consensus_state.encode_vec(); - verify_membership::(client_state, prefix, proof, root, path, value) - } - - fn verify_connection_state( - &self, - _ctx: &dyn ReaderContext, - _client_id: &ClientId, - client_state: &Self::ClientState, - height: Height, - prefix: &CommitmentPrefix, - proof: &CommitmentProofBytes, - root: &CommitmentRoot, - connection_id: &ConnectionId, - expected_connection_end: &ConnectionEnd, - ) -> Result<(), Ics02Error> { - client_state.verify_height(height)?; - - let path = ConnectionsPath(connection_id.clone()); - let value = expected_connection_end.encode_vec(); - verify_membership::(client_state, prefix, proof, root, path, value) - } - - fn verify_channel_state( - &self, - _ctx: &dyn ReaderContext, - _client_id: &ClientId, - client_state: &Self::ClientState, - height: Height, - prefix: &CommitmentPrefix, - proof: &CommitmentProofBytes, - root: &CommitmentRoot, - port_id: &PortId, - channel_id: &ChannelId, - expected_channel_end: &ChannelEnd, - ) -> Result<(), Ics02Error> { - client_state.verify_height(height)?; - - let path = ChannelEndsPath(port_id.clone(), *channel_id); - let value = expected_channel_end.encode_vec(); - verify_membership::(client_state, prefix, proof, root, path, value) - } - - fn verify_client_full_state( - &self, - _ctx: &dyn ReaderContext, - client_state: &Self::ClientState, - height: Height, - prefix: &CommitmentPrefix, - proof: &CommitmentProofBytes, - root: &CommitmentRoot, - client_id: &ClientId, - expected_client_state: &AnyClientState, - ) -> Result<(), Ics02Error> { - client_state.verify_height(height)?; - - let path = ClientStatePath(client_id.clone()); - let value = expected_client_state.encode_vec(); - verify_membership::(client_state, prefix, proof, root, path, value) - } - - fn verify_packet_data( - &self, - ctx: &dyn ReaderContext, - _client_id: &ClientId, - client_state: &Self::ClientState, - height: Height, - connection_end: &ConnectionEnd, - proof: &CommitmentProofBytes, - root: &CommitmentRoot, - port_id: &PortId, - channel_id: &ChannelId, - sequence: Sequence, - commitment: PacketCommitment, - ) -> Result<(), Ics02Error> { - client_state.verify_height(height)?; - verify_delay_passed(ctx, height, connection_end)?; - - let commitment_path = CommitmentsPath { - port_id: port_id.clone(), - channel_id: *channel_id, - sequence, - }; - - verify_membership::( - client_state, - connection_end.counterparty().prefix(), - proof, - root, - commitment_path, - commitment.into_vec(), - ) - } - - fn verify_packet_acknowledgement( - &self, - ctx: &dyn ReaderContext, - _client_id: &ClientId, - client_state: &Self::ClientState, - height: Height, - connection_end: &ConnectionEnd, - proof: &CommitmentProofBytes, - root: &CommitmentRoot, - port_id: &PortId, - channel_id: &ChannelId, - sequence: Sequence, - ack_commitment: AcknowledgementCommitment, - ) -> Result<(), Ics02Error> { - // client state height = consensus state height - client_state.verify_height(height)?; - verify_delay_passed(ctx, height, connection_end)?; - - let ack_path = AcksPath { - port_id: port_id.clone(), - channel_id: *channel_id, - sequence, - }; - verify_membership::( - client_state, - connection_end.counterparty().prefix(), - proof, - root, - ack_path, - ack_commitment.into_vec(), - ) - } - - fn verify_next_sequence_recv( - &self, - ctx: &dyn ReaderContext, - _client_id: &ClientId, - client_state: &Self::ClientState, - height: Height, - connection_end: &ConnectionEnd, - proof: &CommitmentProofBytes, - root: &CommitmentRoot, - port_id: &PortId, - channel_id: &ChannelId, - sequence: Sequence, - ) -> Result<(), Ics02Error> { - client_state.verify_height(height)?; - verify_delay_passed(ctx, height, connection_end)?; - - let mut seq_bytes = Vec::new(); - u64::from(sequence) - .encode(&mut seq_bytes) - .expect("buffer size too small"); - - let seq_path = SeqRecvsPath(port_id.clone(), *channel_id); - verify_membership::( - client_state, - connection_end.counterparty().prefix(), - proof, - root, - seq_path, - seq_bytes, - ) - } - - fn verify_packet_receipt_absence( - &self, - ctx: &dyn ReaderContext, - _client_id: &ClientId, - client_state: &Self::ClientState, - height: Height, - connection_end: &ConnectionEnd, - proof: &CommitmentProofBytes, - root: &CommitmentRoot, - port_id: &PortId, - channel_id: &ChannelId, - sequence: Sequence, - ) -> Result<(), Ics02Error> { - client_state.verify_height(height)?; - verify_delay_passed(ctx, height, connection_end)?; - - let receipt_path = ReceiptsPath { - port_id: port_id.clone(), - channel_id: *channel_id, - sequence, - }; - verify_non_membership::( - client_state, - connection_end.counterparty().prefix(), - proof, - root, - receipt_path, - ) - } - - fn verify_upgrade_and_update_state( - &self, - _client_state: &Self::ClientState, - _consensus_state: &Self::ConsensusState, - _proof_upgrade_client: Vec, - _proof_upgrade_consensus_state: Vec, - ) -> Result<(Self::ClientState, ConsensusUpdateResult), Ics02Error> { - // TODO: - Err(Ics02Error::implementation_specific( - "Not implemented".to_string(), - )) - } -} - -fn verify_membership( - client_state: &ClientState, - prefix: &CommitmentPrefix, - proof: &CommitmentProofBytes, - root: &CommitmentRoot, - path: P, - value: Vec, -) -> Result<(), Ics02Error> -where - H: HostFunctionsProvider, - P: Into, -{ - let merkle_path = apply_prefix(prefix, vec![path.into().to_string()]); - let merkle_proof: MerkleProof = RawMerkleProof::try_from(proof.clone()) - .map_err(Ics02Error::invalid_commitment_proof)? - .into(); - - merkle_proof - .verify_membership( - &client_state.proof_specs, - root.clone().into(), - merkle_path, - value, - 0, - ) - .map_err(|e| Ics02Error::tendermint(Error::ics23_error(e))) -} - -fn verify_non_membership( - client_state: &ClientState, - prefix: &CommitmentPrefix, - proof: &CommitmentProofBytes, - root: &CommitmentRoot, - path: P, -) -> Result<(), Ics02Error> -where - H: HostFunctionsProvider, - P: Into, -{ - let merkle_path = apply_prefix(prefix, vec![path.into().to_string()]); - let merkle_proof: MerkleProof = RawMerkleProof::try_from(proof.clone()) - .map_err(Ics02Error::invalid_commitment_proof)? - .into(); - - merkle_proof - .verify_non_membership(&client_state.proof_specs, root.clone().into(), merkle_path) - .map_err(|e| Ics02Error::tendermint(Error::ics23_error(e))) -} - -fn verify_delay_passed( - ctx: &dyn ReaderContext, - height: Height, - connection_end: &ConnectionEnd, -) -> Result<(), Ics02Error> { - let current_timestamp = ctx.host_timestamp(); - let current_height = ctx.host_height(); - - let client_id = connection_end.client_id(); - let processed_time = ctx - .client_update_time(client_id, height) - .map_err(|_| Error::processed_time_not_found(client_id.clone(), height))?; - let processed_height = ctx - .client_update_height(client_id, height) - .map_err(|_| Error::processed_height_not_found(client_id.clone(), height))?; - - let delay_period_time = connection_end.delay_period(); - let delay_period_height = ctx.block_delay(delay_period_time); - - ClientState::verify_delay_passed( - current_timestamp, - current_height, - processed_time, - processed_height, - delay_period_time, - delay_period_height, - ) - .map_err(|e| e.into()) -} - -fn downcast_consensus_state(cs: AnyConsensusState) -> Result { - downcast!( - cs => AnyConsensusState::Tendermint - ) - .ok_or_else(|| Ics02Error::client_args_type_mismatch(ClientType::Tendermint)) -} diff --git a/modules/src/clients/ics07_tendermint/client_state.rs b/modules/src/clients/ics07_tendermint/client_state.rs deleted file mode 100644 index 81502fffbd..0000000000 --- a/modules/src/clients/ics07_tendermint/client_state.rs +++ /dev/null @@ -1,643 +0,0 @@ -use crate::prelude::*; - -use core::convert::{TryFrom, TryInto}; -use core::time::Duration; - -use serde::{Deserialize, Serialize}; -use tendermint_light_client_verifier::options::Options; -use tendermint_proto::Protobuf; - -use ibc_proto::ibc::lightclients::tendermint::v1::ClientState as RawClientState; - -use crate::clients::ics07_tendermint::error::Error; -use crate::clients::ics07_tendermint::header::Header; -use crate::core::ics02_client::client_state::AnyClientState; -use crate::core::ics02_client::client_type::ClientType; -use crate::core::ics02_client::error::Error as Ics02Error; -use crate::core::ics02_client::trust_threshold::TrustThreshold; -use crate::core::ics23_commitment::specs::ProofSpecs; -use crate::core::ics24_host::identifier::ChainId; -use crate::timestamp::{Timestamp, ZERO_DURATION}; -use crate::Height; - -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct ClientState { - pub chain_id: ChainId, - pub trust_level: TrustThreshold, - pub trusting_period: Duration, - pub unbonding_period: Duration, - pub max_clock_drift: Duration, - pub latest_height: Height, - pub proof_specs: ProofSpecs, - pub upgrade_path: Vec, - pub frozen_height: Option, -} - -impl Protobuf for ClientState {} - -impl ClientState { - #[allow(clippy::too_many_arguments)] - pub fn new( - chain_id: ChainId, - trust_level: TrustThreshold, - trusting_period: Duration, - unbonding_period: Duration, - max_clock_drift: Duration, - latest_height: Height, - proof_specs: ProofSpecs, - upgrade_path: Vec, - ) -> Result { - // Basic validation of trusting period and unbonding period: each should be non-zero. - if trusting_period <= Duration::new(0, 0) { - return Err(Error::invalid_trusting_period(format!( - "ClientState trusting period ({:?}) must be greater than zero", - trusting_period - ))); - } - - if unbonding_period <= Duration::new(0, 0) { - return Err(Error::invalid_unbonding_period(format!( - "ClientState unbonding period ({:?}) must be greater than zero", - unbonding_period - ))); - } - - if trusting_period >= unbonding_period { - return Err(Error::invalid_trusting_period(format!( - "ClientState trusting period ({:?}) must be smaller than unbonding period ({:?})", - trusting_period, unbonding_period, - ))); - } - - // Basic validation for the latest_height parameter. - if latest_height <= Height::zero() { - return Err(Error::validation( - "ClientState latest height must be greater than zero".to_string(), - )); - } - - // `TrustThreshold` is guaranteed to be in the range `[0, 1)`, but a `TrustThreshold::ZERO` - // value is invalid in this context - if trust_level == TrustThreshold::ZERO { - return Err(Error::validation( - "ClientState trust-level cannot be zero".to_string(), - )); - } - - // Disallow empty proof-specs - if proof_specs.is_empty() { - return Err(Error::validation( - "ClientState proof-specs cannot be empty".to_string(), - )); - } - - Ok(Self { - chain_id, - trust_level, - trusting_period, - unbonding_period, - max_clock_drift, - latest_height, - proof_specs, - upgrade_path, - frozen_height: None, - }) - } - - pub fn latest_height(&self) -> Height { - self.latest_height - } - - pub fn with_header(self, h: Header) -> Self { - // TODO: Clarify which fields should update. - ClientState { - latest_height: self - .latest_height - .with_revision_height(u64::from(h.signed_header.header.height)), - ..self - } - } - - pub fn with_frozen_height(self, h: Height) -> Result { - if h == Height::zero() { - return Err(Error::validation( - "ClientState frozen height must be greater than zero".to_string(), - )); - } - Ok(Self { - frozen_height: Some(h), - ..self - }) - } - - /// Get the refresh time to ensure the state does not expire - pub fn refresh_time(&self) -> Option { - Some(2 * self.trusting_period / 3) - } - - /// Check if the state is expired when `elapsed` time has passed since the latest consensus - /// state timestamp - pub fn expired(&self, elapsed: Duration) -> bool { - elapsed > self.trusting_period - } - - /// Helper method to produce a [`Options`] struct for use in - /// Tendermint-specific light client verification. - pub fn as_light_client_options(&self) -> Result { - Ok(Options { - trust_threshold: self - .trust_level - .try_into() - .map_err(|e: Ics02Error| Error::invalid_trust_threshold(e.to_string()))?, - trusting_period: self.trusting_period, - clock_drift: self.max_clock_drift, - }) - } - - /// Verify the time and height delays - pub fn verify_delay_passed( - current_time: Timestamp, - current_height: Height, - processed_time: Timestamp, - processed_height: Height, - delay_period_time: Duration, - delay_period_blocks: u64, - ) -> Result<(), Error> { - let earliest_time = - (processed_time + delay_period_time).map_err(Error::timestamp_overflow)?; - if !(current_time == earliest_time || current_time.after(&earliest_time)) { - return Err(Error::not_enough_time_elapsed(current_time, earliest_time)); - } - - let earliest_height = processed_height.add(delay_period_blocks); - if current_height < earliest_height { - return Err(Error::not_enough_blocks_elapsed( - current_height, - earliest_height, - )); - } - - Ok(()) - } - - /// Verify that the client is at a sufficient height and unfrozen at the given height - pub fn verify_height(&self, height: Height) -> Result<(), Error> { - if self.latest_height < height { - return Err(Error::insufficient_height(self.latest_height(), height)); - } - - match self.frozen_height { - Some(frozen_height) if frozen_height <= height => { - Err(Error::client_frozen(frozen_height, height)) - } - _ => Ok(()), - } - } -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct UpgradeOptions { - pub unbonding_period: Duration, -} - -impl crate::core::ics02_client::client_state::ClientState for ClientState { - type UpgradeOptions = UpgradeOptions; - - fn chain_id(&self) -> ChainId { - self.chain_id.clone() - } - - fn client_type(&self) -> ClientType { - ClientType::Tendermint - } - - fn latest_height(&self) -> Height { - self.latest_height - } - - fn frozen_height(&self) -> Option { - self.frozen_height - } - - fn upgrade( - mut self, - upgrade_height: Height, - upgrade_options: UpgradeOptions, - chain_id: ChainId, - ) -> Self { - // Reset custom fields to zero values - self.trusting_period = ZERO_DURATION; - self.trust_level = TrustThreshold::ZERO; - self.frozen_height = None; - self.max_clock_drift = ZERO_DURATION; - - // Upgrade the client state - self.latest_height = upgrade_height; - self.unbonding_period = upgrade_options.unbonding_period; - self.chain_id = chain_id; - - self - } - - fn wrap_any(self) -> AnyClientState { - AnyClientState::Tendermint(self) - } -} - -impl TryFrom for ClientState { - type Error = Error; - - fn try_from(raw: RawClientState) -> Result { - let trust_level = raw - .trust_level - .clone() - .ok_or_else(Error::missing_trusting_period)?; - - let frozen_height = raw.frozen_height.and_then(|raw_height| { - let height = raw_height.into(); - if height == Height::zero() { - None - } else { - Some(height) - } - }); - - Ok(Self { - chain_id: ChainId::from_string(raw.chain_id.as_str()), - trust_level: trust_level - .try_into() - .map_err(|e| Error::invalid_trust_threshold(format!("{}", e)))?, - trusting_period: raw - .trusting_period - .ok_or_else(Error::missing_trusting_period)? - .try_into() - .map_err(|_| Error::negative_trusting_period())?, - unbonding_period: raw - .unbonding_period - .ok_or_else(Error::missing_unbonding_period)? - .try_into() - .map_err(|_| Error::negative_unbonding_period())?, - max_clock_drift: raw - .max_clock_drift - .ok_or_else(Error::missing_max_clock_drift)? - .try_into() - .map_err(|_| Error::negative_max_clock_drift())?, - latest_height: raw - .latest_height - .ok_or_else(Error::missing_latest_height)? - .into(), - frozen_height, - upgrade_path: raw.upgrade_path, - proof_specs: raw.proof_specs.into(), - }) - } -} - -impl From for RawClientState { - fn from(value: ClientState) -> Self { - RawClientState { - chain_id: value.chain_id.to_string(), - trust_level: Some(value.trust_level.into()), - trusting_period: Some(value.trusting_period.into()), - unbonding_period: Some(value.unbonding_period.into()), - max_clock_drift: Some(value.max_clock_drift.into()), - frozen_height: Some(value.frozen_height.unwrap_or_else(Height::zero).into()), - latest_height: Some(value.latest_height.into()), - proof_specs: value.proof_specs.into(), - upgrade_path: value.upgrade_path, - ..Default::default() - } - } -} - -#[cfg(test)] -mod tests { - use crate::prelude::*; - use crate::Height; - use core::time::Duration; - use test_log::test; - - use ibc_proto::ics23::ProofSpec as Ics23ProofSpec; - use tendermint_rpc::endpoint::abci_query::AbciQuery; - - use crate::clients::ics07_tendermint::client_state::ClientState; - use crate::core::ics02_client::trust_threshold::TrustThreshold; - use crate::core::ics23_commitment::specs::ProofSpecs; - use crate::core::ics24_host::identifier::ChainId; - use crate::test::test_serialization_roundtrip; - use crate::timestamp::{Timestamp, ZERO_DURATION}; - - #[derive(Clone, Debug, PartialEq)] - struct ClientStateParams { - id: ChainId, - trust_level: TrustThreshold, - trusting_period: Duration, - unbonding_period: Duration, - max_clock_drift: Duration, - latest_height: Height, - proof_specs: ProofSpecs, - upgrade_path: Vec, - } - - #[test] - fn serialization_roundtrip_no_proof() { - let json_data = - include_str!("../../../tests/support/query/serialization/client_state.json"); - test_serialization_roundtrip::(json_data); - } - - #[test] - fn serialization_roundtrip_with_proof() { - let json_data = - include_str!("../../../tests/support/query/serialization/client_state_proof.json"); - test_serialization_roundtrip::(json_data); - } - - #[test] - fn client_state_new() { - // Define a "default" set of parameters to reuse throughout these tests. - let default_params: ClientStateParams = ClientStateParams { - id: ChainId::default(), - trust_level: TrustThreshold::ONE_THIRD, - trusting_period: Duration::new(64000, 0), - unbonding_period: Duration::new(128000, 0), - max_clock_drift: Duration::new(3, 0), - latest_height: Height::new(0, 10), - proof_specs: ProofSpecs::default(), - upgrade_path: vec!["".to_string()], - }; - - struct Test { - name: String, - params: ClientStateParams, - want_pass: bool, - } - - let tests: Vec = vec![ - Test { - name: "Valid parameters".to_string(), - params: default_params.clone(), - want_pass: true, - }, - Test { - name: "Invalid unbonding period".to_string(), - params: ClientStateParams { - unbonding_period: ZERO_DURATION, - ..default_params.clone() - }, - want_pass: false, - }, - Test { - name: "Invalid (too small) trusting period".to_string(), - params: ClientStateParams { - trusting_period: ZERO_DURATION, - ..default_params.clone() - }, - want_pass: false, - }, - Test { - name: "Invalid (too large) trusting period w.r.t. unbonding period".to_string(), - params: ClientStateParams { - trusting_period: Duration::new(11, 0), - unbonding_period: Duration::new(10, 0), - ..default_params.clone() - }, - want_pass: false, - }, - Test { - name: "Invalid (too small) trusting trust threshold".to_string(), - params: ClientStateParams { - trust_level: TrustThreshold::ZERO, - ..default_params.clone() - }, - want_pass: false, - }, - Test { - name: "Invalid (too small) latest height".to_string(), - params: ClientStateParams { - latest_height: Height::zero(), - ..default_params.clone() - }, - want_pass: false, - }, - Test { - name: "Invalid (empty) proof specs".to_string(), - params: ClientStateParams { - proof_specs: ProofSpecs::from(Vec::::new()), - ..default_params - }, - want_pass: false, - }, - ] - .into_iter() - .collect(); - - for test in tests { - let p = test.params.clone(); - - let cs_result = ClientState::new( - p.id, - p.trust_level, - p.trusting_period, - p.unbonding_period, - p.max_clock_drift, - p.latest_height, - p.proof_specs, - p.upgrade_path, - ); - - assert_eq!( - test.want_pass, - cs_result.is_ok(), - "ClientState::new() failed for test {}, \nmsg{:?} with error {:?}", - test.name, - test.params.clone(), - cs_result.err(), - ); - } - } - - #[test] - fn client_state_verify_delay_passed() { - #[derive(Debug, Clone)] - struct Params { - current_time: Timestamp, - current_height: Height, - processed_time: Timestamp, - processed_height: Height, - delay_period_time: Duration, - delay_period_blocks: u64, - } - struct Test { - name: String, - params: Params, - want_pass: bool, - } - let now = Timestamp::now(); - - let tests: Vec = vec![ - Test { - name: "Successful delay verification".to_string(), - params: Params { - current_time: (now + Duration::from_nanos(2000)).unwrap(), - current_height: Height::new(0, 5), - processed_time: (now + Duration::from_nanos(1000)).unwrap(), - processed_height: Height::new(0, 3), - delay_period_time: Duration::from_nanos(500), - delay_period_blocks: 2, - }, - want_pass: true, - }, - Test { - name: "Delay period(time) has not elapsed".to_string(), - params: Params { - current_time: (now + Duration::from_nanos(1200)).unwrap(), - current_height: Height::new(0, 5), - processed_time: (now + Duration::from_nanos(1000)).unwrap(), - processed_height: Height::new(0, 3), - delay_period_time: Duration::from_nanos(500), - delay_period_blocks: 2, - }, - want_pass: false, - }, - Test { - name: "Delay period(blocks) has not elapsed".to_string(), - params: Params { - current_time: (now + Duration::from_nanos(2000)).unwrap(), - current_height: Height::new(0, 5), - processed_time: (now + Duration::from_nanos(1000)).unwrap(), - processed_height: Height::new(0, 4), - delay_period_time: Duration::from_nanos(500), - delay_period_blocks: 2, - }, - want_pass: false, - }, - ]; - - for test in tests { - let res = ClientState::verify_delay_passed( - test.params.current_time, - test.params.current_height, - test.params.processed_time, - test.params.processed_height, - test.params.delay_period_time, - test.params.delay_period_blocks, - ); - - assert_eq!( - test.want_pass, - res.is_ok(), - "ClientState::verify_delay_passed() failed for test {}, \nmsg{:?} with error {:?}", - test.name, - test.params.clone(), - res.err(), - ); - } - } - - #[test] - fn client_state_verify_height() { - // Define a "default" set of parameters to reuse throughout these tests. - let default_params: ClientStateParams = ClientStateParams { - id: ChainId::default(), - trust_level: TrustThreshold::ONE_THIRD, - trusting_period: Duration::new(64000, 0), - unbonding_period: Duration::new(128000, 0), - max_clock_drift: Duration::new(3, 0), - latest_height: Height::new(1, 10), - proof_specs: ProofSpecs::default(), - upgrade_path: vec!["".to_string()], - }; - - struct Test { - name: String, - height: Height, - setup: Option ClientState>>, - want_pass: bool, - } - - let tests = vec![ - Test { - name: "Successful height verification".to_string(), - height: Height::new(1, 8), - setup: None, - want_pass: true, - }, - Test { - name: "Invalid (too large) client height".to_string(), - height: Height::new(1, 12), - setup: None, - want_pass: false, - }, - Test { - name: "Invalid, client is frozen below current height".to_string(), - height: Height::new(1, 6), - setup: Some(Box::new(|client_state| { - client_state.with_frozen_height(Height::new(1, 5)).unwrap() - })), - want_pass: false, - }, - ]; - - for test in tests { - let p = default_params.clone(); - let client_state = ClientState::new( - p.id, - p.trust_level, - p.trusting_period, - p.unbonding_period, - p.max_clock_drift, - p.latest_height, - p.proof_specs, - p.upgrade_path, - ) - .unwrap(); - let client_state = match test.setup { - Some(setup) => (setup)(client_state), - _ => client_state, - }; - let res = client_state.verify_height(test.height); - - assert_eq!( - test.want_pass, - res.is_ok(), - "ClientState::verify_delay_height() failed for test {}, \nmsg{:?} with error {:?}", - test.name, - test.height, - res.err(), - ); - } - } -} - -#[cfg(any(test, feature = "mocks"))] -pub mod test_util { - use crate::prelude::*; - use core::time::Duration; - - use tendermint::block::Header; - - use crate::clients::ics07_tendermint::client_state::ClientState; - use crate::core::ics02_client::client_state::AnyClientState; - use crate::core::ics02_client::height::Height; - use crate::core::ics24_host::identifier::ChainId; - - pub fn get_dummy_tendermint_client_state(tm_header: Header) -> AnyClientState { - AnyClientState::Tendermint( - ClientState::new( - ChainId::from(tm_header.chain_id.clone()), - Default::default(), - Duration::from_secs(64000), - Duration::from_secs(128000), - Duration::from_millis(3000), - Height::new( - ChainId::chain_version(tm_header.chain_id.as_str()), - u64::from(tm_header.height), - ), - Default::default(), - vec!["".to_string()], - ) - .unwrap(), - ) - } -} diff --git a/modules/src/clients/ics07_tendermint/consensus_state.rs b/modules/src/clients/ics07_tendermint/consensus_state.rs deleted file mode 100644 index 43b0ebaf49..0000000000 --- a/modules/src/clients/ics07_tendermint/consensus_state.rs +++ /dev/null @@ -1,136 +0,0 @@ -use crate::prelude::*; - -use core::convert::Infallible; -use core::fmt::Debug; - -use serde::Serialize; -use tendermint::{hash::Algorithm, time::Time, Hash}; -use tendermint_proto::google::protobuf as tpb; -use tendermint_proto::Protobuf; - -use ibc_proto::ibc::lightclients::tendermint::v1::ConsensusState as RawConsensusState; - -use crate::clients::ics07_tendermint::error::Error; -use crate::clients::ics07_tendermint::header::Header; -use crate::core::ics02_client::client_consensus::AnyConsensusState; -use crate::core::ics02_client::client_type::ClientType; -use crate::core::ics23_commitment::commitment::CommitmentRoot; - -#[derive(Clone, Debug, PartialEq, Eq, Serialize)] -pub struct ConsensusState { - pub timestamp: Time, - pub root: CommitmentRoot, - pub next_validators_hash: Hash, -} - -impl ConsensusState { - pub fn new(root: CommitmentRoot, timestamp: Time, next_validators_hash: Hash) -> Self { - Self { - timestamp, - root, - next_validators_hash, - } - } -} - -impl crate::core::ics02_client::client_consensus::ConsensusState for ConsensusState { - type Error = Infallible; - - fn client_type(&self) -> ClientType { - ClientType::Tendermint - } - - fn root(&self) -> &CommitmentRoot { - &self.root - } - - fn wrap_any(self) -> AnyConsensusState { - AnyConsensusState::Tendermint(self) - } -} - -impl Protobuf for ConsensusState {} - -impl TryFrom for ConsensusState { - type Error = Error; - - fn try_from(raw: RawConsensusState) -> Result { - let ibc_proto::google::protobuf::Timestamp { seconds, nanos } = raw - .timestamp - .ok_or_else(|| Error::invalid_raw_consensus_state("missing timestamp".into()))?; - // FIXME: shunts like this are necessary due to - // https://github.com/informalsystems/tendermint-rs/issues/1053 - let proto_timestamp = tpb::Timestamp { seconds, nanos }; - let timestamp = proto_timestamp - .try_into() - .map_err(|e| Error::invalid_raw_consensus_state(format!("invalid timestamp: {}", e)))?; - - Ok(Self { - root: raw - .root - .ok_or_else(|| { - Error::invalid_raw_consensus_state("missing commitment root".into()) - })? - .hash - .into(), - timestamp, - next_validators_hash: Hash::from_bytes(Algorithm::Sha256, &raw.next_validators_hash) - .map_err(|e| Error::invalid_raw_consensus_state(e.to_string()))?, - }) - } -} - -impl From for RawConsensusState { - fn from(value: ConsensusState) -> Self { - // FIXME: shunts like this are necessary due to - // https://github.com/informalsystems/tendermint-rs/issues/1053 - let tpb::Timestamp { seconds, nanos } = value.timestamp.into(); - let timestamp = ibc_proto::google::protobuf::Timestamp { seconds, nanos }; - - RawConsensusState { - timestamp: Some(timestamp), - root: Some(ibc_proto::ibc::core::commitment::v1::MerkleRoot { - hash: value.root.into_vec(), - }), - next_validators_hash: value.next_validators_hash.as_bytes().to_vec(), - } - } -} - -impl From for ConsensusState { - fn from(header: tendermint::block::Header) -> Self { - Self { - root: CommitmentRoot::from_bytes(header.app_hash.as_ref()), - timestamp: header.time, - next_validators_hash: header.next_validators_hash, - } - } -} - -impl From

for ConsensusState { - fn from(header: Header) -> Self { - Self::from(header.signed_header.header) - } -} - -#[cfg(test)] -mod tests { - use tendermint_rpc::endpoint::abci_query::AbciQuery; - use test_log::test; - - use crate::test::test_serialization_roundtrip; - - #[test] - fn serialization_roundtrip_no_proof() { - let json_data = - include_str!("../../../tests/support/query/serialization/consensus_state.json"); - test_serialization_roundtrip::(json_data); - } - - #[test] - fn serialization_roundtrip_with_proof() { - let json_data = - include_str!("../../../tests/support/query/serialization/consensus_state_proof.json"); - test_serialization_roundtrip::(json_data); - } -} diff --git a/modules/src/clients/ics07_tendermint/error.rs b/modules/src/clients/ics07_tendermint/error.rs deleted file mode 100644 index 0ef3b9f29c..0000000000 --- a/modules/src/clients/ics07_tendermint/error.rs +++ /dev/null @@ -1,294 +0,0 @@ -use crate::prelude::*; - -use flex_error::{define_error, TraceError}; - -use crate::core::ics23_commitment::error::Error as Ics23Error; -use crate::core::ics24_host::error::ValidationError; -use crate::core::ics24_host::identifier::ClientId; -use crate::timestamp::{Timestamp, TimestampOverflowError}; - -use crate::Height; -use tendermint::account::Id; -use tendermint::hash::Hash; -use tendermint::Error as TendermintError; -use tendermint_light_client_verifier::errors::VerificationErrorDetail as LightClientErrorDetail; - -define_error! { - #[derive(Debug, PartialEq, Eq)] - Error { - InvalidTrustingPeriod - { reason: String } - |e| { format_args!("invalid trusting period: {}", e.reason) }, - - InvalidUnbondingPeriod - { reason: String } - |e| { format_args!("invalid unbonding period: {}", e.reason) }, - - InvalidAddress - |_| { "invalid address" }, - - InvalidHeader - { reason: String } - [ TendermintError ] - |e| { format_args!("invalid header, failed basic validation: {}", e.reason) }, - - InvalidTrustThreshold - { reason: String } - |e| { format_args!("invalid client state trust threshold: {}", e.reason) }, - - MissingSignedHeader - |_| { "missing signed header" }, - - Validation - { reason: String } - |e| { format_args!("invalid header, failed basic validation: {}", e.reason) }, - - InvalidRawClientState - { reason: String } - |e| { format_args!("invalid raw client state: {}", e.reason) }, - - MissingValidatorSet - |_| { "missing validator set" }, - - MissingTrustedValidatorSet - |_| { "missing trusted validator set" }, - - MissingTrustedHeight - |_| { "missing trusted height" }, - - MissingTrustingPeriod - |_| { "missing trusting period" }, - - MissingUnbondingPeriod - |_| { "missing unbonding period" }, - - InvalidChainIdentifier - [ ValidationError ] - |_| { "invalid chain identifier" }, - - NegativeTrustingPeriod - |_| { "negative trusting period" }, - - NegativeUnbondingPeriod - |_| { "negative unbonding period" }, - - MissingMaxClockDrift - |_| { "missing max clock drift" }, - - NegativeMaxClockDrift - |_| { "negative max clock drift" }, - - MissingLatestHeight - |_| { "missing latest height" }, - - MissingFrozenHeight - |_| { "missing frozen height" }, - - InvalidChainId - { raw_value: String } - [ ValidationError ] - |e| { format_args!("invalid chain identifier: {}", e.raw_value) }, - - InvalidRawHeight - { raw_height: u64 } - |e| { format_args!("invalid raw height: {}", e.raw_height) }, - - InvalidRawConsensusState - { reason: String } - | e | { format_args!("invalid raw client consensus state: {}", e.reason) }, - - InvalidRawHeader - [ TendermintError ] - | _ | { "invalid raw header" }, - - InvalidRawMisbehaviour - { reason: String } - | e | { format_args!("invalid raw misbehaviour: {}", e.reason) }, - - Decode - [ TraceError ] - | _ | { "decode error" }, - - InsufficientVotingPower - { reason: String } - | e | { - format_args!("insufficient overlap: {}", e.reason) - }, - - LowUpdateTimestamp - { - low: String, - high: String - } - | e | { - format_args!("header timestamp {0} must be greater than current client consensus state timestamp {1}", e.low, e.high) - }, - - HeaderTimestampOutsideTrustingTime - { - low: String, - high: String - } - | e | { - format_args!("header timestamp {0} is outside the trusting period w.r.t. consensus state timestamp {1}", e.low, e.high) - }, - - HeaderTimestampTooHigh - { - actual: String, - max: String, - } - | e | { - format_args!("given other previous updates, header timestamp should be at most {0}, but was {1}", e.max, e.actual) - }, - - HeaderTimestampTooLow - { - actual: String, - min: String, - } - | e | { - format_args!("given other previous updates, header timestamp should be at least {0}, but was {1}", e.min, e.actual) - }, - - TimestampOverflow - [ TimestampOverflowError ] - |_| { "timestamp overflowed" }, - - NotEnoughTimeElapsed - { - current_time: Timestamp, - earliest_time: Timestamp, - } - | e | { - format_args!("not enough time elapsed, current timestamp {0} is still less than earliest acceptable timestamp {1}", e.current_time, e.earliest_time) - }, - - NotEnoughBlocksElapsed - { - current_height: Height, - earliest_height: Height, - } - | e | { - format_args!("not enough blocks elapsed, current height {0} is still less than earliest acceptable height {1}", e.current_height, e.earliest_height) - }, - - InvalidHeaderHeight - { height: Height } - | e | { - format_args!("header height = {0} is invalid", e.height) - }, - - InvalidTrustedHeaderHeight - { - trusted_header_height: Height, - height_header: Height - } - | e | { - format_args!("header height is {0} and is lower than the trusted header height, which is {1} ", e.height_header, e.trusted_header_height) - }, - - LowUpdateHeight - { - low: Height, - high: Height - } - | e | { - format_args!("header height is {0} but it must be greater than the current client height which is {1}", e.low, e.high) - }, - - MismatchedRevisions - { - current_revision: u64, - update_revision: u64, - } - | e | { - format_args!("the header's current/trusted revision number ({0}) and the update's revision number ({1}) should be the same", e.current_revision, e.update_revision) - }, - - InvalidValidatorSet - { - hash1: Hash, - hash2: Hash, - } - | e | { - format_args!("invalid validator set: header_validators_hash={} and validators_hash={}", e.hash1, e.hash2) - }, - - NotEnoughTrustedValsSigned - { reason: String } - | e | { - format_args!("not enough trust because insufficient validators overlap: {}", e.reason) - }, - - VerificationError - { detail: LightClientErrorDetail } - | e | { - format_args!("verification failed: {}", e.detail) - }, - - ProcessedTimeNotFound - { - client_id: ClientId, - height: Height, - } - | e | { - format_args!( - "Processed time for the client {0} at height {1} not found", - e.client_id, e.height) - }, - - ProcessedHeightNotFound - { - client_id: ClientId, - height: Height, - } - | e | { - format_args!( - "Processed height for the client {0} at height {1} not found", - e.client_id, e.height) - }, - - Ics23Error - [ Ics23Error ] - | _ | { "ics23 commitment error" }, - - InsufficientHeight - { - latest_height: Height, - target_height: Height, - } - | e | { - format_args!("the height is insufficient: latest_height={0} target_height={1}", e.latest_height, e.target_height) - }, - - ClientFrozen - { - frozen_height: Height, - target_height: Height, - } - | e | { - format_args!("the client is frozen: frozen_height={0} target_height={1}", e.frozen_height, e.target_height) - }, - } -} - -define_error! { - #[derive(Debug, PartialEq, Eq)] - VerificationError { - InvalidSignature - | _ | { "couldn't verify validator signature" }, - - DuplicateValidator - { id: Id } - | e | { - format_args!("duplicate validator in commit signatures with address {}", e.id) - }, - - InsufficientOverlap - { q1: u64, q2: u64 } - | e | { - format_args!("insufficient signers overlap between {0} and {1}", e.q1, e.q2) - }, - } -} diff --git a/modules/src/clients/ics07_tendermint/header.rs b/modules/src/clients/ics07_tendermint/header.rs deleted file mode 100644 index 121fdc944e..0000000000 --- a/modules/src/clients/ics07_tendermint/header.rs +++ /dev/null @@ -1,202 +0,0 @@ -use core::cmp::Ordering; - -use bytes::Buf; -use prost::Message; -use serde_derive::{Deserialize, Serialize}; -use tendermint::block::signed_header::SignedHeader; -use tendermint::validator::Set as ValidatorSet; -use tendermint_proto::Protobuf; - -use crate::alloc::string::ToString; - -use ibc_proto::ibc::lightclients::tendermint::v1::Header as RawHeader; - -use crate::clients::ics07_tendermint::error::Error; -use crate::core::ics02_client::client_type::ClientType; -use crate::core::ics02_client::header::AnyHeader; -use crate::core::ics24_host::identifier::ChainId; -use crate::timestamp::Timestamp; -use crate::Height; - -/// Tendermint consensus header -#[derive(Clone, PartialEq, Eq, Deserialize, Serialize)] -pub struct Header { - pub signed_header: SignedHeader, // contains the commitment root - pub validator_set: ValidatorSet, // the validator set that signed Header - pub trusted_height: Height, // the height of a trusted header seen by client less than or equal to Header - // TODO(thane): Rename this to trusted_next_validator_set? - pub trusted_validator_set: ValidatorSet, // the last trusted validator set at trusted height -} - -impl core::fmt::Debug for Header { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { - write!(f, " Header {{...}}") - } -} - -impl Header { - pub fn height(&self) -> Height { - Height::new( - ChainId::chain_version(self.signed_header.header.chain_id.as_str()), - u64::from(self.signed_header.header.height), - ) - } - - pub fn timestamp(&self) -> Timestamp { - self.signed_header.header.time.into() - } - - pub fn compatible_with(&self, other_header: &Header) -> bool { - headers_compatible(&self.signed_header, &other_header.signed_header) - } -} - -pub fn headers_compatible(header: &SignedHeader, other: &SignedHeader) -> bool { - let ibc_client_height = other.header.height; - let self_header_height = header.header.height; - - match self_header_height.cmp(&ibc_client_height) { - Ordering::Equal => { - // 1 - fork - header.commit.block_id == other.commit.block_id - } - Ordering::Greater => { - // 2 - BFT time violation - header.header.time > other.header.time - } - Ordering::Less => { - // 3 - BFT time violation - header.header.time < other.header.time - } - } -} - -impl crate::core::ics02_client::header::Header for Header { - fn client_type(&self) -> ClientType { - ClientType::Tendermint - } - - fn wrap_any(self) -> AnyHeader { - AnyHeader::Tendermint(self) - } -} - -impl Protobuf for Header {} - -impl TryFrom for Header { - type Error = Error; - - fn try_from(raw: RawHeader) -> Result { - let header = Self { - signed_header: raw - .signed_header - .ok_or_else(Error::missing_signed_header)? - .try_into() - .map_err(|e| Error::invalid_header("signed header conversion".to_string(), e))?, - validator_set: raw - .validator_set - .ok_or_else(Error::missing_validator_set)? - .try_into() - .map_err(Error::invalid_raw_header)?, - trusted_height: raw - .trusted_height - .ok_or_else(Error::missing_trusted_height)? - .into(), - trusted_validator_set: raw - .trusted_validators - .ok_or_else(Error::missing_trusted_validator_set)? - .try_into() - .map_err(Error::invalid_raw_header)?, - }; - - if header.height().revision_number != header.trusted_height.revision_number { - return Err(Error::mismatched_revisions( - header.trusted_height.revision_number, - header.height().revision_number, - )); - } - - Ok(header) - } -} - -pub fn decode_header(buf: B) -> Result { - RawHeader::decode(buf).map_err(Error::decode)?.try_into() -} - -impl From
for RawHeader { - fn from(value: Header) -> Self { - RawHeader { - signed_header: Some(value.signed_header.into()), - validator_set: Some(value.validator_set.into()), - trusted_height: Some(value.trusted_height.into()), - trusted_validators: Some(value.trusted_validator_set.into()), - } - } -} - -#[cfg(test)] -pub mod test_util { - use alloc::vec; - - use subtle_encoding::hex; - use tendermint::block::signed_header::SignedHeader; - use tendermint::validator::Info as ValidatorInfo; - use tendermint::validator::Set as ValidatorSet; - use tendermint::PublicKey; - - use crate::clients::ics07_tendermint::header::Header; - use crate::Height; - - pub fn get_dummy_tendermint_header() -> tendermint::block::Header { - serde_json::from_str::(include_str!( - "../../../tests/support/signed_header.json" - )) - .unwrap() - .header - } - - // TODO: This should be replaced with a ::default() or ::produce(). - // The implementation of this function comprises duplicate code (code borrowed from - // `tendermint-rs` for assembling a Header). - // See https://github.com/informalsystems/tendermint-rs/issues/381. - // - // The normal flow is: - // - get the (trusted) signed header and the `trusted_validator_set` at a `trusted_height` - // - get the `signed_header` and the `validator_set` at latest height - // - build the ics07 Header - // For testing purposes this function does: - // - get the `signed_header` from a .json file - // - create the `validator_set` with a single validator that is also the proposer - // - assume a `trusted_height` of 1 and no change in the validator set since height 1, - // i.e. `trusted_validator_set` = `validator_set` - pub fn get_dummy_ics07_header() -> Header { - // Build a SignedHeader from a JSON file. - let shdr = serde_json::from_str::(include_str!( - "../../../tests/support/signed_header.json" - )) - .unwrap(); - - // Build a set of validators. - // Below are test values inspired form `test_validator_set()` in tendermint-rs. - let v1: ValidatorInfo = ValidatorInfo::new( - PublicKey::from_raw_ed25519( - &hex::decode_upper( - "F349539C7E5EF7C49549B09C4BFC2335318AB0FE51FBFAA2433B4F13E816F4A7", - ) - .unwrap(), - ) - .unwrap(), - 281_815_u64.try_into().unwrap(), - ); - - let vs = ValidatorSet::new(vec![v1.clone()], Some(v1)); - - Header { - signed_header: shdr, - validator_set: vs.clone(), - trusted_height: Height::new(0, 1), - trusted_validator_set: vs, - } - } -} diff --git a/modules/src/clients/ics07_tendermint/misbehaviour.rs b/modules/src/clients/ics07_tendermint/misbehaviour.rs deleted file mode 100644 index 016c3fde3e..0000000000 --- a/modules/src/clients/ics07_tendermint/misbehaviour.rs +++ /dev/null @@ -1,76 +0,0 @@ -use crate::prelude::*; - -use tendermint_proto::Protobuf; - -use ibc_proto::ibc::lightclients::tendermint::v1::Misbehaviour as RawMisbehaviour; - -use crate::clients::ics07_tendermint::error::Error; -use crate::clients::ics07_tendermint::header::Header; -use crate::core::ics02_client::misbehaviour::AnyMisbehaviour; -use crate::core::ics24_host::identifier::ClientId; -use crate::Height; - -#[derive(Clone, Debug, PartialEq)] -pub struct Misbehaviour { - pub client_id: ClientId, - pub header1: Header, - pub header2: Header, -} - -impl crate::core::ics02_client::misbehaviour::Misbehaviour for Misbehaviour { - fn client_id(&self) -> &ClientId { - &self.client_id - } - - fn height(&self) -> Height { - self.header1.height() - } - - fn wrap_any(self) -> AnyMisbehaviour { - AnyMisbehaviour::Tendermint(self) - } -} - -impl Protobuf for Misbehaviour {} - -impl TryFrom for Misbehaviour { - type Error = Error; - - fn try_from(raw: RawMisbehaviour) -> Result { - Ok(Self { - client_id: Default::default(), - header1: raw - .header_1 - .ok_or_else(|| Error::invalid_raw_misbehaviour("missing header1".into()))? - .try_into()?, - header2: raw - .header_2 - .ok_or_else(|| Error::invalid_raw_misbehaviour("missing header2".into()))? - .try_into()?, - }) - } -} - -impl From for RawMisbehaviour { - fn from(value: Misbehaviour) -> Self { - RawMisbehaviour { - client_id: value.client_id.to_string(), - header_1: Some(value.header1.into()), - header_2: Some(value.header2.into()), - } - } -} - -impl core::fmt::Display for Misbehaviour { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { - write!( - f, - "{:?} h1: {:?}-{:?} h2: {:?}-{:?}", - self.client_id, - self.header1.height(), - self.header1.trusted_height, - self.header2.height(), - self.header2.trusted_height, - ) - } -} diff --git a/modules/src/clients/ics07_tendermint/mod.rs b/modules/src/clients/ics07_tendermint/mod.rs deleted file mode 100644 index 3b32d8e62f..0000000000 --- a/modules/src/clients/ics07_tendermint/mod.rs +++ /dev/null @@ -1,9 +0,0 @@ -//! ICS 07: Tendermint Client implements a client verification algorithm for blockchains which use -//! the Tendermint consensus algorithm. - -pub mod client_def; -pub mod client_state; -pub mod consensus_state; -pub mod error; -pub mod header; -pub mod misbehaviour; diff --git a/modules/src/clients/ics11_beefy/client_def.rs b/modules/src/clients/ics11_beefy/client_def.rs deleted file mode 100644 index 9d3fe1f103..0000000000 --- a/modules/src/clients/ics11_beefy/client_def.rs +++ /dev/null @@ -1,511 +0,0 @@ -use beefy_client_primitives::ClientState as LightClientState; -use beefy_client_primitives::{ParachainHeader, ParachainsUpdateProof}; -use codec::{Decode, Encode}; -use core::fmt::Debug; -use pallet_mmr_primitives::BatchProof; -use sp_core::H256; -use tendermint_proto::Protobuf; - -use crate::clients::host_functions::{HostFunctionsManager, HostFunctionsProvider}; -use crate::clients::ics11_beefy::client_state::ClientState; -use crate::clients::ics11_beefy::consensus_state::ConsensusState; -use crate::clients::ics11_beefy::error::Error as BeefyError; -use crate::clients::ics11_beefy::header::BeefyHeader; -use crate::core::ics02_client::client_consensus::AnyConsensusState; -use crate::core::ics02_client::client_def::{ClientDef, ConsensusUpdateResult}; -use crate::core::ics02_client::client_state::AnyClientState; -use crate::core::ics02_client::client_type::ClientType; -use crate::core::ics02_client::error::Error; -use crate::core::ics03_connection::connection::ConnectionEnd; -use crate::core::ics04_channel::channel::ChannelEnd; -use crate::core::ics04_channel::commitment::{AcknowledgementCommitment, PacketCommitment}; -use crate::core::ics04_channel::packet::Sequence; - -use crate::core::ics23_commitment::commitment::{ - CommitmentPrefix, CommitmentProofBytes, CommitmentRoot, -}; - -use crate::core::ics24_host::identifier::ConnectionId; -use crate::core::ics24_host::identifier::{ChannelId, ClientId, PortId}; -use crate::core::ics24_host::Path; -use crate::core::ics26_routing::context::ReaderContext; -use crate::prelude::*; -use crate::Height; -use core::marker::PhantomData; - -use crate::core::ics24_host::path::{ - AcksPath, ChannelEndsPath, ClientConsensusStatePath, ClientStatePath, CommitmentsPath, - ConnectionsPath, ReceiptsPath, SeqRecvsPath, -}; -use crate::downcast; - -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct BeefyClient(PhantomData); - -impl Default for BeefyClient { - fn default() -> Self { - Self(PhantomData::default()) - } -} - -impl ClientDef for BeefyClient { - type Header = BeefyHeader; - type ClientState = ClientState; - type ConsensusState = ConsensusState; - - fn verify_header( - &self, - _ctx: &dyn ReaderContext, - _client_id: ClientId, - client_state: Self::ClientState, - header: Self::Header, - ) -> Result<(), Error> { - let light_client_state = LightClientState { - latest_beefy_height: client_state.latest_beefy_height, - mmr_root_hash: client_state.mmr_root_hash, - current_authorities: client_state.authority.clone(), - next_authorities: client_state.next_authority_set.clone(), - beefy_activation_block: client_state.beefy_activation_block, - }; - // If mmr update exists verify it and return the new light client state - // or else return existing light client state - let light_client_state = if let Some(mmr_update) = header.mmr_update_proof { - beefy_client::verify_mmr_root_with_proof::>( - light_client_state, - mmr_update, - ) - .map_err(|e| Error::beefy(BeefyError::invalid_mmr_update(format!("{:?}", e))))? - } else { - light_client_state - }; - - // Extract parachain headers from the beefy header if they exist - if let Some(headers_with_proof) = header.headers_with_proof { - let mut leaf_indices = vec![]; - let parachain_headers = headers_with_proof - .headers - .into_iter() - .map(|header| { - let leaf_index = client_state - .to_leaf_index(header.partial_mmr_leaf.parent_number_and_hash.0 + 1); - leaf_indices.push(leaf_index as u64); - ParachainHeader { - parachain_header: header.parachain_header.encode(), - partial_mmr_leaf: header.partial_mmr_leaf, - para_id: client_state.para_id, - parachain_heads_proof: header.parachain_heads_proof, - heads_leaf_index: header.heads_leaf_index, - heads_total_count: header.heads_total_count, - extrinsic_proof: header.extrinsic_proof, - timestamp_extrinsic: header.timestamp_extrinsic, - } - }) - .collect::>(); - - let leaf_count = - (client_state.to_leaf_index(light_client_state.latest_beefy_height) + 1) as u64; - - let parachain_update_proof = ParachainsUpdateProof { - parachain_headers, - mmr_proof: BatchProof { - leaf_indices, - leaf_count, - items: headers_with_proof - .mmr_proofs - .into_iter() - .map(|item| { - H256::decode(&mut &*item).map_err(|e| { - Error::beefy(BeefyError::invalid_mmr_update(format!("{:?}", e))) - }) - }) - .collect::, _>>()?, - }, - }; - - // Perform the parachain header verification - beefy_client::verify_parachain_headers::>( - light_client_state, - parachain_update_proof, - ) - .map_err(|e| Error::beefy(BeefyError::invalid_mmr_update(format!("{:?}", e))))? - } - - Ok(()) - } - - fn update_state( - &self, - ctx: &dyn ReaderContext, - client_id: ClientId, - client_state: Self::ClientState, - header: Self::Header, - ) -> Result<(Self::ClientState, ConsensusUpdateResult), Error> { - let mut parachain_cs_states = vec![]; - // Extract the new client state from the verified header - let mut client_state = client_state - .from_header(header.clone()) - .map_err(Error::beefy)?; - let mut latest_para_height = client_state.latest_para_height; - - if let Some(parachain_headers) = header.headers_with_proof { - for header in parachain_headers.headers { - // Skip genesis block of parachains since it has no timestamp or ibc root - if header.parachain_header.number == 0 { - continue; - } - if latest_para_height < header.parachain_header.number { - latest_para_height = header.parachain_header.number; - } - let height = Height::new( - client_state.para_id as u64, - header.parachain_header.number as u64, - ); - // Skip duplicate consensus states - if ctx.consensus_state(&client_id, height).is_ok() { - continue; - } - parachain_cs_states.push(( - height, - AnyConsensusState::Beefy(ConsensusState::from_header(header)?), - )) - } - } - - client_state.latest_para_height = latest_para_height; - - Ok(( - client_state, - ConsensusUpdateResult::Batch(parachain_cs_states), - )) - } - - fn update_state_on_misbehaviour( - &self, - client_state: Self::ClientState, - header: Self::Header, - ) -> Result { - let latest_para_height = header - .headers_with_proof - .map(|headers| { - headers - .headers - .into_iter() - .map(|header| header.parachain_header.number) - .max() - }) - .flatten(); - let frozen_height = latest_para_height - .map(|height| Height::new(client_state.para_id.into(), height.into())) - .unwrap_or(Height::new( - client_state.para_id.into(), - client_state.latest_para_height.into(), - )); - client_state - .with_frozen_height(frozen_height) - .map_err(|e| Error::beefy(BeefyError::implementation_specific(e.to_string()))) - } - - fn check_for_misbehaviour( - &self, - _ctx: &dyn ReaderContext, - _client_id: ClientId, - _client_state: Self::ClientState, - _header: Self::Header, - ) -> Result { - Ok(false) - } - - fn verify_upgrade_and_update_state( - &self, - _client_state: &Self::ClientState, - _consensus_state: &Self::ConsensusState, - _proof_upgrade_client: Vec, - _proof_upgrade_consensus_state: Vec, - ) -> Result<(Self::ClientState, ConsensusUpdateResult), Error> { - // TODO: - Err(Error::beefy(BeefyError::implementation_specific( - "Not implemented".to_string(), - ))) - } - - fn verify_client_consensus_state( - &self, - _ctx: &dyn ReaderContext, - client_state: &Self::ClientState, - height: Height, - prefix: &CommitmentPrefix, - proof: &CommitmentProofBytes, - root: &CommitmentRoot, - client_id: &ClientId, - consensus_height: Height, - expected_consensus_state: &AnyConsensusState, - ) -> Result<(), Error> { - client_state.verify_height(height)?; - let path = ClientConsensusStatePath { - client_id: client_id.clone(), - epoch: consensus_height.revision_number, - height: consensus_height.revision_height, - }; - let value = expected_consensus_state.encode_vec(); - verify_membership::(prefix, proof, root, path, value) - } - - // Consensus state will be verified in the verification functions before these are called - fn verify_connection_state( - &self, - _ctx: &dyn ReaderContext, - _client_id: &ClientId, - client_state: &Self::ClientState, - height: Height, - prefix: &CommitmentPrefix, - proof: &CommitmentProofBytes, - root: &CommitmentRoot, - connection_id: &ConnectionId, - expected_connection_end: &ConnectionEnd, - ) -> Result<(), Error> { - client_state.verify_height(height)?; - let path = ConnectionsPath(connection_id.clone()); - let value = expected_connection_end.encode_vec(); - verify_membership::(prefix, proof, root, path, value) - } - - fn verify_channel_state( - &self, - _ctx: &dyn ReaderContext, - _client_id: &ClientId, - client_state: &Self::ClientState, - height: Height, - prefix: &CommitmentPrefix, - proof: &CommitmentProofBytes, - root: &CommitmentRoot, - port_id: &PortId, - channel_id: &ChannelId, - expected_channel_end: &ChannelEnd, - ) -> Result<(), Error> { - client_state.verify_height(height)?; - let path = ChannelEndsPath(port_id.clone(), *channel_id); - let value = expected_channel_end.encode_vec(); - verify_membership::(prefix, proof, root, path, value) - } - - fn verify_client_full_state( - &self, - _ctx: &dyn ReaderContext, - client_state: &Self::ClientState, - height: Height, - prefix: &CommitmentPrefix, - proof: &CommitmentProofBytes, - root: &CommitmentRoot, - client_id: &ClientId, - expected_client_state: &AnyClientState, - ) -> Result<(), Error> { - client_state.verify_height(height)?; - let path = ClientStatePath(client_id.clone()); - let value = expected_client_state.encode_vec(); - verify_membership::(prefix, proof, root, path, value) - } - - fn verify_packet_data( - &self, - ctx: &dyn ReaderContext, - _client_id: &ClientId, - client_state: &Self::ClientState, - height: Height, - connection_end: &ConnectionEnd, - proof: &CommitmentProofBytes, - root: &CommitmentRoot, - port_id: &PortId, - channel_id: &ChannelId, - sequence: Sequence, - commitment: PacketCommitment, - ) -> Result<(), Error> { - client_state.verify_height(height)?; - verify_delay_passed(ctx, height, connection_end)?; - - let commitment_path = CommitmentsPath { - port_id: port_id.clone(), - channel_id: *channel_id, - sequence, - }; - - verify_membership::( - connection_end.counterparty().prefix(), - proof, - root, - commitment_path, - commitment.into_vec(), - ) - } - - fn verify_packet_acknowledgement( - &self, - ctx: &dyn ReaderContext, - _client_id: &ClientId, - client_state: &Self::ClientState, - height: Height, - connection_end: &ConnectionEnd, - proof: &CommitmentProofBytes, - root: &CommitmentRoot, - port_id: &PortId, - channel_id: &ChannelId, - sequence: Sequence, - ack: AcknowledgementCommitment, - ) -> Result<(), Error> { - client_state.verify_height(height)?; - verify_delay_passed(ctx, height, connection_end)?; - - let ack_path = AcksPath { - port_id: port_id.clone(), - channel_id: *channel_id, - sequence, - }; - verify_membership::( - connection_end.counterparty().prefix(), - proof, - root, - ack_path, - ack.into_vec(), - ) - } - - fn verify_next_sequence_recv( - &self, - ctx: &dyn ReaderContext, - _client_id: &ClientId, - client_state: &Self::ClientState, - height: Height, - connection_end: &ConnectionEnd, - proof: &CommitmentProofBytes, - root: &CommitmentRoot, - port_id: &PortId, - channel_id: &ChannelId, - sequence: Sequence, - ) -> Result<(), Error> { - client_state.verify_height(height)?; - verify_delay_passed(ctx, height, connection_end)?; - - let seq_bytes = codec::Encode::encode(&u64::from(sequence)); - - let seq_path = SeqRecvsPath(port_id.clone(), *channel_id); - verify_membership::( - connection_end.counterparty().prefix(), - proof, - root, - seq_path, - seq_bytes, - ) - } - - fn verify_packet_receipt_absence( - &self, - ctx: &dyn ReaderContext, - _client_id: &ClientId, - client_state: &Self::ClientState, - height: Height, - connection_end: &ConnectionEnd, - proof: &CommitmentProofBytes, - root: &CommitmentRoot, - port_id: &PortId, - channel_id: &ChannelId, - sequence: Sequence, - ) -> Result<(), Error> { - client_state.verify_height(height)?; - verify_delay_passed(ctx, height, connection_end)?; - - let receipt_path = ReceiptsPath { - port_id: port_id.clone(), - channel_id: *channel_id, - sequence, - }; - verify_non_membership::( - connection_end.counterparty().prefix(), - proof, - root, - receipt_path, - ) - } -} - -fn verify_membership>( - prefix: &CommitmentPrefix, - proof: &CommitmentProofBytes, - root: &CommitmentRoot, - path: P, - value: Vec, -) -> Result<(), Error> { - if root.as_bytes().len() != 32 { - return Err(Error::beefy(BeefyError::invalid_commitment_root())); - } - let path: Path = path.into(); - let path = path.to_string(); - let mut key = prefix.as_bytes().to_vec(); - key.extend(path.as_bytes()); - let trie_proof: Vec = proof.clone().into(); - let trie_proof: Vec> = codec::Decode::decode(&mut &*trie_proof) - .map_err(|e| Error::beefy(BeefyError::scale_decode(e)))?; - let root = H256::from_slice(root.as_bytes()); - HostFunctions::verify_membership_trie_proof(root.as_fixed_bytes(), &trie_proof, &key, &value) -} - -fn verify_non_membership>( - prefix: &CommitmentPrefix, - proof: &CommitmentProofBytes, - root: &CommitmentRoot, - path: P, -) -> Result<(), Error> { - if root.as_bytes().len() != 32 { - return Err(Error::beefy(BeefyError::invalid_commitment_root())); - } - let path: Path = path.into(); - let path = path.to_string(); - let mut key = prefix.as_bytes().to_vec(); - key.extend(path.as_bytes()); - let trie_proof: Vec = proof.clone().into(); - let trie_proof: Vec> = codec::Decode::decode(&mut &*trie_proof) - .map_err(|e| Error::beefy(BeefyError::scale_decode(e)))?; - let root = H256::from_slice(root.as_bytes()); - HostFunctions::verify_non_membership_trie_proof(root.as_fixed_bytes(), &trie_proof, &key) -} - -fn verify_delay_passed( - ctx: &dyn ReaderContext, - height: Height, - connection_end: &ConnectionEnd, -) -> Result<(), Error> { - let current_timestamp = ctx.host_timestamp(); - let current_height = ctx.host_height(); - - let client_id = connection_end.client_id(); - let processed_time = ctx.client_update_time(client_id, height).map_err(|_| { - Error::beefy(BeefyError::processed_time_not_found( - client_id.clone(), - height, - )) - })?; - let processed_height = ctx.client_update_height(client_id, height).map_err(|_| { - Error::beefy(BeefyError::processed_height_not_found( - client_id.clone(), - height, - )) - })?; - - let delay_period_time = connection_end.delay_period(); - let delay_period_height = ctx.block_delay(delay_period_time); - - ClientState::verify_delay_passed( - current_timestamp, - current_height, - processed_time, - processed_height, - delay_period_time, - delay_period_height, - ) - .map_err(|e| e.into()) -} - -pub fn downcast_consensus_state(cs: AnyConsensusState) -> Result { - downcast!( - cs => AnyConsensusState::Beefy - ) - .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Beefy)) -} diff --git a/modules/src/clients/ics11_beefy/client_state.rs b/modules/src/clients/ics11_beefy/client_state.rs deleted file mode 100644 index 1ec3366ef2..0000000000 --- a/modules/src/clients/ics11_beefy/client_state.rs +++ /dev/null @@ -1,415 +0,0 @@ -use crate::prelude::*; -use beefy_primitives::known_payload_ids::MMR_ROOT_ID; -use beefy_primitives::mmr::BeefyNextAuthoritySet; -use codec::{Decode, Encode}; -use core::convert::TryFrom; -use core::fmt; -use core::str::FromStr; -use core::time::Duration; -use serde::{Deserialize, Serialize}; -use sp_core::H256; -use sp_runtime::SaturatedConversion; -use tendermint_proto::Protobuf; - -use ibc_proto::ibc::lightclients::beefy::v1::{BeefyAuthoritySet, ClientState as RawClientState}; - -use crate::clients::ics11_beefy::error::Error; -use crate::clients::ics11_beefy::header::BeefyHeader; -use crate::core::ics02_client::client_state::AnyClientState; -use crate::core::ics02_client::client_type::ClientType; -use crate::core::ics24_host::identifier::ChainId; -use crate::timestamp::Timestamp; -use crate::Height; - -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct ClientState { - /// The chain id - pub chain_id: ChainId, - /// Relay chain - pub relay_chain: RelayChain, - /// Latest mmr root hash - pub mmr_root_hash: H256, - /// block number for the latest mmr_root_hash - pub latest_beefy_height: u32, - /// Block height when the client was frozen due to a misbehaviour - pub frozen_height: Option, - /// Block number that the beefy protocol was activated on the relay chain. - /// This should be the first block in the merkle-mountain-range tree. - pub beefy_activation_block: u32, - /// latest parachain height - pub latest_para_height: u32, - /// ParaId of associated parachain - pub para_id: u32, - /// authorities for the current round - pub authority: BeefyNextAuthoritySet, - /// authorities for the next round - pub next_authority_set: BeefyNextAuthoritySet, -} - -impl Protobuf for ClientState {} - -impl ClientState { - #[allow(clippy::too_many_arguments)] - pub fn new( - relay_chain: RelayChain, - para_id: u32, - latest_para_height: u32, - mmr_root_hash: H256, - beefy_activation_block: u32, - latest_beefy_height: u32, - authority_set: BeefyNextAuthoritySet, - next_authority_set: BeefyNextAuthoritySet, - ) -> Result { - if beefy_activation_block > latest_beefy_height { - return Err(Error::validation( - "ClientState beefy activation block cannot be greater than latest_beefy_height" - .to_string(), - )); - } - - if authority_set.id >= next_authority_set.id { - return Err(Error::validation( - "ClientState next authority set id must be greater than current authority set id" - .to_string(), - )); - } - let chain_id = ChainId::new(relay_chain.to_string(), para_id.into()); - - Ok(Self { - chain_id, - mmr_root_hash, - latest_beefy_height, - frozen_height: None, - beefy_activation_block, - authority: authority_set, - next_authority_set, - relay_chain, - latest_para_height, - para_id, - }) - } - - pub fn to_leaf_index(&self, block_number: u32) -> u32 { - if self.beefy_activation_block == 0 { - return block_number.saturating_sub(1); - } - self.beefy_activation_block.saturating_sub(block_number + 1) - } - - /// Should only be called if this header has been verified successfully - pub fn from_header(self, header: BeefyHeader) -> Result { - let mut clone = self.clone(); - let mut authority_changed = false; - let (mmr_root_hash, latest_beefy_height, next_authority_set) = - if let Some(mmr_update) = header.mmr_update_proof { - if mmr_update.signed_commitment.commitment.validator_set_id - == self.next_authority_set.id - { - authority_changed = true; - } - ( - H256::from_slice( - mmr_update - .signed_commitment - .commitment - .payload - .get_raw(&MMR_ROOT_ID) - .ok_or_else(Error::invalid_raw_header)?, - ), - mmr_update.signed_commitment.commitment.block_number, - mmr_update.latest_mmr_leaf.beefy_next_authority_set, - ) - } else { - ( - self.mmr_root_hash, - self.latest_beefy_height, - self.next_authority_set, - ) - }; - clone.mmr_root_hash = mmr_root_hash; - clone.latest_beefy_height = latest_beefy_height; - if authority_changed { - clone.authority = clone.next_authority_set; - clone.next_authority_set = next_authority_set; - } - Ok(clone) - } - - /// Verify the time and height delays - pub fn verify_delay_passed( - current_time: Timestamp, - current_height: Height, - processed_time: Timestamp, - processed_height: Height, - delay_period_time: Duration, - delay_period_blocks: u64, - ) -> Result<(), Error> { - let earliest_time = - (processed_time + delay_period_time).map_err(Error::timestamp_overflow)?; - if !(current_time == earliest_time || current_time.after(&earliest_time)) { - return Err(Error::not_enough_time_elapsed(current_time, earliest_time)); - } - - let earliest_height = processed_height.add(delay_period_blocks); - if current_height < earliest_height { - return Err(Error::not_enough_blocks_elapsed( - current_height, - earliest_height, - )); - } - - Ok(()) - } - - pub fn with_frozen_height(self, h: Height) -> Result { - if h == Height::zero() { - return Err(Error::validation( - "ClientState frozen height must be greater than zero".to_string(), - )); - } - Ok(Self { - frozen_height: Some(h), - ..self - }) - } - - /// Verify that the client is at a sufficient height and unfrozen at the given height - pub fn verify_height(&self, height: Height) -> Result<(), Error> { - let latest_para_height = Height::new(self.para_id.into(), self.latest_para_height.into()); - if latest_para_height < height { - return Err(Error::insufficient_height(latest_para_height, height)); - } - - match self.frozen_height { - Some(frozen_height) if frozen_height <= height => { - Err(Error::client_frozen(frozen_height, height)) - } - _ => Ok(()), - } - } - - /// Check if the state is expired when `elapsed` time has passed since the latest consensus - /// state timestamp - pub fn expired(&self, elapsed: Duration) -> bool { - elapsed > self.relay_chain.trusting_period() - } -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct UpgradeOptions; - -impl crate::core::ics02_client::client_state::ClientState for ClientState { - type UpgradeOptions = UpgradeOptions; - - fn chain_id(&self) -> ChainId { - self.chain_id.clone() - } - - fn client_type(&self) -> ClientType { - ClientType::Beefy - } - - fn latest_height(&self) -> Height { - Height::new(self.para_id.into(), self.latest_para_height.into()) - } - - fn frozen_height(&self) -> Option { - self.frozen_height - } - - fn upgrade( - mut self, - upgrade_height: Height, - _upgrade_options: UpgradeOptions, - _chain_id: ChainId, - ) -> Self { - self.frozen_height = None; - // Upgrade the client state - self.latest_beefy_height = upgrade_height.revision_height.saturated_into::(); - - self - } - - fn wrap_any(self) -> AnyClientState { - AnyClientState::Beefy(self) - } -} - -impl TryFrom for ClientState { - type Error = Error; - - fn try_from(raw: RawClientState) -> Result { - let frozen_height = { - let height = Height::new(0, raw.frozen_height.into()); - if height == Height::zero() { - None - } else { - Some(height) - } - }; - - let authority_set = raw - .authority - .and_then(|set| { - Some(BeefyNextAuthoritySet { - id: set.id, - len: set.len, - root: H256::decode(&mut &*set.authority_root).ok()?, - }) - }) - .ok_or_else(Error::missing_beefy_authority_set)?; - - let next_authority_set = raw - .next_authority_set - .and_then(|set| { - Some(BeefyNextAuthoritySet { - id: set.id, - len: set.len, - root: H256::decode(&mut &*set.authority_root).ok()?, - }) - }) - .ok_or_else(Error::missing_beefy_authority_set)?; - - let mmr_root_hash = H256::decode(&mut &*raw.mmr_root_hash).map_err(Error::scale_decode)?; - let relay_chain = RelayChain::from_i32(raw.relay_chain)?; - let chain_id = ChainId::new(relay_chain.to_string(), raw.para_id.into()); - - Ok(Self { - chain_id, - mmr_root_hash, - latest_beefy_height: raw.latest_beefy_height, - frozen_height, - beefy_activation_block: raw.beefy_activation_block, - authority: authority_set, - next_authority_set, - relay_chain, - latest_para_height: raw.latest_para_height, - para_id: raw.para_id, - }) - } -} - -impl From for RawClientState { - fn from(client_state: ClientState) -> Self { - RawClientState { - mmr_root_hash: client_state.mmr_root_hash.encode(), - latest_beefy_height: client_state.latest_beefy_height, - frozen_height: client_state - .frozen_height - .unwrap_or_default() - .revision_height, - beefy_activation_block: client_state.beefy_activation_block, - authority: Some(BeefyAuthoritySet { - id: client_state.authority.id, - len: client_state.authority.len, - authority_root: client_state.authority.root.encode(), - }), - next_authority_set: Some(BeefyAuthoritySet { - id: client_state.next_authority_set.id, - len: client_state.next_authority_set.len, - authority_root: client_state.next_authority_set.root.encode(), - }), - relay_chain: client_state.relay_chain as i32, - para_id: client_state.para_id, - latest_para_height: client_state.latest_para_height, - } - } -} - -#[derive(Clone, Copy, Debug, PartialEq, Eq, Deserialize, Serialize)] -pub enum RelayChain { - Polkadot = 0, - Kusama = 1, - Rococo = 2, -} - -impl Default for RelayChain { - fn default() -> Self { - RelayChain::Rococo - } -} - -impl fmt::Display for RelayChain { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}", self.as_str()) - } -} - -// Unbonding period for relay chains in days -const POLKADOT_UNBONDING_PERIOD: u64 = 28; -const KUSAMA_UNBONDING_PERIOD: u64 = 7; - -impl RelayChain { - /// Yields the Order as a string - pub fn as_str(&self) -> &'static str { - match self { - Self::Polkadot => "Polkadot", - Self::Kusama => "Kusama", - Self::Rococo => "Rococo", - } - } - - // Parses the Order out from a i32. - pub fn from_i32(nr: i32) -> Result { - match nr { - 0 => Ok(Self::Polkadot), - 1 => Ok(Self::Kusama), - 2 => Ok(Self::Rococo), - id => Err(Error::unknown_relay_chain(id.to_string())), - } - } - - pub fn unbonding_period(&self) -> Duration { - match self { - Self::Polkadot => { - let secs = POLKADOT_UNBONDING_PERIOD * 24 * 60 * 60; - Duration::from_secs(secs) - } - Self::Kusama | Self::Rococo => { - let secs = KUSAMA_UNBONDING_PERIOD * 24 * 60 * 60; - Duration::from_secs(secs) - } - } - } - - pub fn trusting_period(&self) -> Duration { - let unbonding_period = self.unbonding_period(); - // Trusting period is 1/3 of unbonding period - unbonding_period.checked_div(3).unwrap() - } -} - -impl FromStr for RelayChain { - type Err = Error; - - fn from_str(s: &str) -> Result { - match s.to_lowercase().trim_start_matches("order_") { - "polkadot" => Ok(Self::Polkadot), - "kusama" => Ok(Self::Kusama), - "rococo" => Ok(Self::Rococo), - _ => Err(Error::unknown_relay_chain(s.to_string())), - } - } -} - -#[cfg(any(test, feature = "mocks"))] -pub mod test_util { - use super::*; - use crate::core::ics02_client::client_state::AnyClientState; - - pub fn get_dummy_beefy_state() -> AnyClientState { - AnyClientState::Beefy( - ClientState::new( - RelayChain::Rococo, - 2000, - 0, - Default::default(), - 0, - 0, - Default::default(), - Default::default(), - ) - .unwrap(), - ) - } -} diff --git a/modules/src/clients/ics11_beefy/consensus_state.rs b/modules/src/clients/ics11_beefy/consensus_state.rs deleted file mode 100644 index debaeb1130..0000000000 --- a/modules/src/clients/ics11_beefy/consensus_state.rs +++ /dev/null @@ -1,120 +0,0 @@ -use crate::prelude::*; - -use core::convert::Infallible; -use core::fmt::Debug; -use serde::Serialize; -use tendermint::time::Time; -use tendermint_proto::google::protobuf as tpb; -use tendermint_proto::Protobuf; - -use ibc_proto::ibc::lightclients::beefy::v1::ConsensusState as RawConsensusState; - -use crate::clients::ics11_beefy::error::Error; -use crate::clients::ics11_beefy::header::ParachainHeader; -use crate::core::ics02_client::client_consensus::AnyConsensusState; -use crate::core::ics02_client::client_type::ClientType; -use crate::core::ics23_commitment::commitment::CommitmentRoot; - -#[derive(Clone, Debug, PartialEq, Eq, Serialize)] -pub struct ConsensusState { - pub timestamp: Time, - pub root: CommitmentRoot, -} - -impl ConsensusState { - pub fn new(root: Vec, timestamp: Time) -> Self { - Self { - timestamp, - root: root.into(), - } - } - - pub fn from_header(header: ParachainHeader) -> Result { - use crate::clients::ics11_beefy::header::decode_timestamp_extrinsic; - use crate::timestamp::Timestamp; - use sp_runtime::SaturatedConversion; - let root = header.parachain_header.state_root.0.to_vec(); - - let timestamp = decode_timestamp_extrinsic(&header)?; - let duration = core::time::Duration::from_millis(timestamp); - let timestamp = Timestamp::from_nanoseconds(duration.as_nanos().saturated_into::()) - .map_err(|e| { - Error::invalid_header(format!( - "Failed to decode timestamp extrinsic, got {}", - e.to_string() - )) - })? - .into_tm_time() - .ok_or_else(|| { - Error::invalid_header( - "Error decoding Timestamp, timestamp cannot be zero".to_string(), - ) - })?; - - Ok(Self { - root: root.into(), - timestamp, - }) - } -} - -impl crate::core::ics02_client::client_consensus::ConsensusState for ConsensusState { - type Error = Infallible; - - fn client_type(&self) -> ClientType { - ClientType::Beefy - } - - fn root(&self) -> &CommitmentRoot { - &self.root - } - - fn wrap_any(self) -> AnyConsensusState { - AnyConsensusState::Beefy(self) - } -} - -impl Protobuf for ConsensusState {} - -impl TryFrom for ConsensusState { - type Error = Error; - - fn try_from(raw: RawConsensusState) -> Result { - let ibc_proto::google::protobuf::Timestamp { seconds, nanos } = raw - .timestamp - .ok_or_else(|| Error::invalid_raw_consensus_state("missing timestamp".into()))?; - let proto_timestamp = tpb::Timestamp { seconds, nanos }; - let timestamp = proto_timestamp - .try_into() - .map_err(|e| Error::invalid_raw_consensus_state(format!("invalid timestamp: {}", e)))?; - - Ok(Self { - root: raw.root.into(), - timestamp, - }) - } -} - -impl From for RawConsensusState { - fn from(value: ConsensusState) -> Self { - let tpb::Timestamp { seconds, nanos } = value.timestamp.into(); - let timestamp = ibc_proto::google::protobuf::Timestamp { seconds, nanos }; - - RawConsensusState { - timestamp: Some(timestamp), - root: value.root.into_vec(), - } - } -} - -#[cfg(any(test, feature = "mocks"))] -pub mod test_util { - use super::*; - - pub fn get_dummy_beefy_consensus_state() -> AnyConsensusState { - AnyConsensusState::Beefy(ConsensusState { - timestamp: Time::now(), - root: vec![0; 32].into(), - }) - } -} diff --git a/modules/src/clients/ics11_beefy/error.rs b/modules/src/clients/ics11_beefy/error.rs deleted file mode 100644 index ee8ef393c2..0000000000 --- a/modules/src/clients/ics11_beefy/error.rs +++ /dev/null @@ -1,207 +0,0 @@ -use crate::prelude::*; - -use flex_error::{define_error, TraceError}; - -use crate::core::ics23_commitment::error::Error as Ics23Error; -use crate::core::ics24_host::error::ValidationError; -use crate::core::ics24_host::identifier::ClientId; -use crate::timestamp::{Timestamp, TimestampOverflowError}; -use codec::Error as ScaleCodecError; - -use crate::Height; - -define_error! { - #[derive(Debug, PartialEq, Eq)] - Error { - InvalidAddress - |_| { "invalid address" }, - - InvalidTrieProof - |_| { "invalid trie proof" }, - - InvalidMmrUpdate - { reason: String } - |e| { format_args!("invalid mmr update {}", e.reason) }, - - InvalidCommitmentRoot - |_| { "invalid commitment root" }, - - TimestampExtrinsic - { reason: String } - |e| { format_args!("error decoding timestamp extrinsic {}", e.reason) }, - - InvalidHeader - { reason: String } - |e| { format_args!("invalid header, failed basic validation: {}", e.reason) }, - - ImplementationSpecific - { reason: String } - |e| { format_args!("Implementation specific error: {}", e.reason) }, - - Validation - { reason: String } - |e| { format_args!("invalid header, failed basic validation: {}", e.reason) }, - - InvalidRawClientState - { reason: String } - |e| { format_args!("invalid raw client state: {}", e.reason) }, - - InvalidChainIdentifier - [ ValidationError ] - |_| { "invalid chain identifier" }, - - MissingLatestHeight - |_| { "missing latest height" }, - - MissingBeefyAuthoritySet - |_| { "missing beefy authority set" }, - - MissingFrozenHeight - |_| { "missing frozen height" }, - - InvalidChainId - { raw_value: String } - [ ValidationError ] - |e| { format_args!("invalid chain identifier: {}", e.raw_value) }, - - InvalidRawHeight - { raw_height: u64 } - |e| { format_args!("invalid raw height: {}", e.raw_height) }, - - InvalidRawConsensusState - { reason: String } - | e | { format_args!("invalid raw client consensus state: {}", e.reason) }, - - InvalidRawHeader - | _ | { "invalid raw header" }, - - InvalidRawMisbehaviour - { reason: String } - | e | { format_args!("invalid raw misbehaviour: {}", e.reason) }, - - ScaleDecode - [ TraceError ] - | _ | { "Scale decode error" }, - Decode - [ TraceError ] - | _ | { "decode error" }, - - LowUpdateTimestamp - { - low: String, - high: String - } - | e | { - format_args!("header timestamp {0} must be greater than current client consensus state timestamp {1}", e.low, e.high) - }, - - HeaderTimestampTooHigh - { - actual: String, - max: String, - } - | e | { - format_args!("given other previous updates, header timestamp should be at most {0}, but was {1}", e.max, e.actual) - }, - - HeaderTimestampTooLow - { - actual: String, - min: String, - } - | e | { - format_args!("given other previous updates, header timestamp should be at least {0}, but was {1}", e.min, e.actual) - }, - - TimestampOverflow - [ TimestampOverflowError ] - |_| { "timestamp overflowed" }, - - NotEnoughTimeElapsed - { - current_time: Timestamp, - earliest_time: Timestamp, - } - | e | { - format_args!("not enough time elapsed, current timestamp {0} is still less than earliest acceptable timestamp {1}", e.current_time, e.earliest_time) - }, - - NotEnoughBlocksElapsed - { - current_height: Height, - earliest_height: Height, - } - | e | { - format_args!("not enough blocks elapsed, current height {0} is still less than earliest acceptable height {1}", e.current_height, e.earliest_height) - }, - - InvalidHeaderHeight - { height: Height } - | e | { - format_args!("header height = {0} is invalid", e.height) - }, - - LowUpdateHeight - { - low: Height, - high: Height - } - | e | { - format_args!("header height is {0} but it must be greater than the current client height which is {1}", e.low, e.high) - }, - - ProcessedTimeNotFound - { - client_id: ClientId, - height: Height, - } - | e | { - format_args!( - "Processed time for the client {0} at height {1} not found", - e.client_id, e.height) - }, - - ProcessedHeightNotFound - { - client_id: ClientId, - height: Height, - } - | e | { - format_args!( - "Processed height for the client {0} at height {1} not found", - e.client_id, e.height) - }, - - - VerificationError - { reason: String } - | e | { - format_args!("verification failed: {:?}", e.reason) - }, - - Ics23Error - [ Ics23Error ] - | _ | { "ics23 commitment error" }, - - InsufficientHeight - { - latest_height: Height, - target_height: Height, - } - | e | { - format_args!("the height is insufficient: latest_height={0} target_height={1}", e.latest_height, e.target_height) - }, - - ClientFrozen - { - frozen_height: Height, - target_height: Height, - } - | e | { - format_args!("the client is frozen: frozen_height={0} target_height={1}", e.frozen_height, e.target_height) - }, - UnknownRelayChain - { type_id: String } - | e | { format_args!("Relaychain type not known: {}", e.type_id) }, - } -} diff --git a/modules/src/clients/ics11_beefy/header.rs b/modules/src/clients/ics11_beefy/header.rs deleted file mode 100644 index 9e8280fb60..0000000000 --- a/modules/src/clients/ics11_beefy/header.rs +++ /dev/null @@ -1,430 +0,0 @@ -use prost::Message; -use tendermint_proto::Protobuf; - -use crate::clients::ics11_beefy::error::Error; -use crate::core::ics02_client::client_type::ClientType; -use crate::core::ics02_client::header::AnyHeader; -use alloc::string::ToString; -use alloc::vec; -use alloc::vec::Vec; -use beefy_client_primitives::{ - BeefyNextAuthoritySet, Hash, MmrUpdateProof, PartialMmrLeaf, SignatureWithAuthorityIndex, - SignedCommitment, -}; -use beefy_primitives::known_payload_ids::MMR_ROOT_ID; -use beefy_primitives::mmr::{MmrLeaf, MmrLeafVersion}; -use beefy_primitives::{Commitment, Payload}; -use bytes::Buf; -use codec::{Compact, Decode, Encode}; -use ibc_proto::ibc::lightclients::beefy::v1::{ - BeefyAuthoritySet as RawBeefyAuthoritySet, BeefyMmrLeaf as RawBeefyMmrLeaf, - BeefyMmrLeafPartial as RawBeefyMmrLeafPartial, ClientStateUpdateProof as RawMmrUpdateProof, - Commitment as RawCommitment, CommitmentSignature, ConsensusStateUpdateProof, - Header as RawBeefyHeader, PayloadItem, SignedCommitment as RawSignedCommitment, -}; -use pallet_mmr_primitives::Proof; -use sp_core::H256; -use sp_runtime::generic::Header as SubstrateHeader; -use sp_runtime::traits::{BlakeTwo256, SaturatedConversion}; - -/// Beefy consensus header -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct BeefyHeader { - pub headers_with_proof: Option, - pub mmr_update_proof: Option, // Proof for updating the latest mmr root hash -} - -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct ParachainHeadersWithProof { - pub headers: Vec, // contains the parachain headers - pub mmr_proofs: Vec>, // mmr proofs for these headers - pub mmr_size: u64, // The latest mmr size -} - -impl crate::core::ics02_client::header::Header for BeefyHeader { - fn client_type(&self) -> ClientType { - ClientType::Beefy - } - - fn wrap_any(self) -> AnyHeader { - AnyHeader::Beefy(self) - } -} - -#[derive(Clone, PartialEq, Eq, Debug, codec::Encode, codec::Decode)] -pub struct ParachainHeader { - pub parachain_header: SubstrateHeader, - /// Reconstructed mmr leaf - pub partial_mmr_leaf: PartialMmrLeaf, - /// Proof for our parachain header inclusion in the parachain headers root - pub parachain_heads_proof: Vec, - /// leaf index for parachain heads proof - pub heads_leaf_index: u32, - /// Total number of parachain heads - pub heads_total_count: u32, - /// Trie merkle proof of inclusion of the set timestamp extrinsic in header.extrinsic_root - pub extrinsic_proof: Vec>, - /// this already encodes the actual extrinsic - pub timestamp_extrinsic: Vec, -} - -pub fn split_leaf_version(version: u8) -> (u8, u8) { - let major = version >> 5; - let minor = version & 0b11111; - (major, minor) -} - -pub fn merge_leaf_version(major: u8, minor: u8) -> u8 { - (major << 5) + minor -} - -impl TryFrom for BeefyHeader { - type Error = Error; - - fn try_from(raw_header: RawBeefyHeader) -> Result { - let headers_with_proof = raw_header - .consensus_state - .map(|consensus_update| { - let parachain_headers = consensus_update - .parachain_headers - .into_iter() - .map(|raw_para_header| { - let mmr_partial_leaf = raw_para_header - .mmr_leaf_partial - .ok_or_else(Error::invalid_raw_header)?; - let parent_hash = - H256::decode(&mut mmr_partial_leaf.parent_hash.as_slice()).unwrap(); - let beefy_next_authority_set = - if let Some(next_set) = mmr_partial_leaf.beefy_next_authority_set { - BeefyNextAuthoritySet { - id: next_set.id, - len: next_set.len, - root: H256::decode(&mut next_set.authority_root.as_slice()) - .map_err(|e| Error::invalid_mmr_update(e.to_string()))?, - } - } else { - Default::default() - }; - Ok(ParachainHeader { - parachain_header: decode_parachain_header( - raw_para_header.parachain_header, - ) - .map_err(|_| Error::invalid_raw_header())?, - partial_mmr_leaf: PartialMmrLeaf { - version: { - let (major, minor) = split_leaf_version( - mmr_partial_leaf.version.saturated_into::(), - ); - MmrLeafVersion::new(major, minor) - }, - parent_number_and_hash: ( - mmr_partial_leaf.parent_number, - parent_hash, - ), - beefy_next_authority_set, - }, - parachain_heads_proof: raw_para_header - .parachain_heads_proof - .into_iter() - .map(|item| { - let mut dest = [0u8; 32]; - if item.len() != 32 { - return Err(Error::invalid_raw_header()); - } - dest.copy_from_slice(&*item); - Ok(dest) - }) - .collect::, Error>>()?, - heads_leaf_index: raw_para_header.heads_leaf_index, - heads_total_count: raw_para_header.heads_total_count, - extrinsic_proof: raw_para_header.extrinsic_proof, - timestamp_extrinsic: raw_para_header.timestamp_extrinsic, - }) - }) - .collect::, Error>>() - .ok(); - parachain_headers.map(|parachain_headers| ParachainHeadersWithProof { - headers: parachain_headers, - mmr_proofs: consensus_update.mmr_proofs, - mmr_size: consensus_update.mmr_size, - }) - }) - .flatten(); - - let mmr_update_proof = if let Some(mmr_update) = raw_header.client_state { - let commitment = mmr_update - .signed_commitment - .as_ref() - .ok_or_else(|| { - Error::invalid_mmr_update("Signed commitment is missing".to_string()) - })? - .commitment - .as_ref() - .ok_or_else(|| Error::invalid_mmr_update("Commitment is missing".to_string()))?; - let payload = { - commitment - .payload - .iter() - .filter_map(|item| { - if item.payload_id.as_slice() != MMR_ROOT_ID { - return None; - } - let mut payload_id = [0u8; 2]; - payload_id.copy_from_slice(&item.payload_id); - Some(Payload::new(payload_id, item.payload_data.clone())) - }) - .collect::>() - .get(0) - .ok_or_else(|| Error::invalid_mmr_update("".to_string()))? - .clone() - }; - let block_number = commitment.block_numer; - let validator_set_id = commitment.validator_set_id; - let signatures = mmr_update - .signed_commitment - .ok_or_else(|| { - Error::invalid_mmr_update("Signed Commiment is missing".to_string()) - })? - .signatures - .into_iter() - .map(|commitment_sig| { - if commitment_sig.signature.len() != 65 { - return Err(Error::invalid_mmr_update( - "Invalid signature length".to_string(), - )); - } - Ok(SignatureWithAuthorityIndex { - signature: { - let mut sig = [0u8; 65]; - sig.copy_from_slice(&commitment_sig.signature); - sig - }, - index: commitment_sig.authority_index, - }) - }) - .collect::, Error>>()?; - - let mmr_leaf = mmr_update - .mmr_leaf - .as_ref() - .ok_or_else(|| Error::invalid_mmr_update("Mmr Leaf is missing".to_string()))?; - let beefy_next_authority_set = - mmr_leaf.beefy_next_authority_set.as_ref().ok_or_else(|| { - Error::invalid_mmr_update("Beefy Next Authority set is missing".to_string()) - })?; - - Some(MmrUpdateProof { - signed_commitment: SignedCommitment { - commitment: Commitment { - payload, - block_number, - validator_set_id, - }, - signatures, - }, - latest_mmr_leaf: MmrLeaf { - version: { - let (major, minor) = - split_leaf_version(mmr_leaf.version.saturated_into::()); - MmrLeafVersion::new(major, minor) - }, - parent_number_and_hash: { - let parent_number = mmr_leaf.parent_number; - let parent_hash = H256::decode(&mut mmr_leaf.parent_hash.as_slice()) - .map_err(|e| Error::invalid_mmr_update(e.to_string()))?; - (parent_number, parent_hash) - }, - beefy_next_authority_set: BeefyNextAuthoritySet { - id: beefy_next_authority_set.id, - len: beefy_next_authority_set.len, - root: H256::decode(&mut beefy_next_authority_set.authority_root.as_slice()) - .map_err(|e| Error::invalid_mmr_update(e.to_string()))?, - }, - leaf_extra: H256::decode(&mut mmr_leaf.parachain_heads.as_slice()) - .map_err(|e| Error::invalid_mmr_update(e.to_string()))?, - }, - mmr_proof: Proof { - leaf_index: mmr_update.mmr_leaf_index, - leaf_count: mmr_update.mmr_leaf_index + 1, - items: mmr_update - .mmr_proof - .into_iter() - .map(|item| { - H256::decode(&mut &*item) - .map_err(|e| Error::invalid_mmr_update(e.to_string())) - }) - .collect::, Error>>()?, - }, - authority_proof: mmr_update - .authorities_proof - .into_iter() - .map(|item| { - if item.len() != 32 { - return Err(Error::invalid_mmr_update( - "Invalid authorities proof".to_string(), - )); - } - let mut dest = [0u8; 32]; - dest.copy_from_slice(&item); - Ok(dest) - }) - .collect::, Error>>()?, - }) - } else { - None - }; - - Ok(Self { - headers_with_proof, - mmr_update_proof, - }) - } -} - -impl From for RawBeefyHeader { - fn from(beefy_header: BeefyHeader) -> Self { - Self { - consensus_state: beefy_header.headers_with_proof.map(|headers| { - let parachain_headers = headers - .headers - .into_iter() - .map( - |para_header| ibc_proto::ibc::lightclients::beefy::v1::ParachainHeader { - parachain_header: para_header.parachain_header.encode(), - mmr_leaf_partial: Some(RawBeefyMmrLeafPartial { - version: { - let (major, minor) = - para_header.partial_mmr_leaf.version.split(); - merge_leaf_version(major, minor) as u32 - }, - parent_number: para_header - .partial_mmr_leaf - .parent_number_and_hash - .0, - parent_hash: para_header - .partial_mmr_leaf - .parent_number_and_hash - .1 - .encode(), - beefy_next_authority_set: Some(RawBeefyAuthoritySet { - id: para_header.partial_mmr_leaf.beefy_next_authority_set.id, - len: para_header.partial_mmr_leaf.beefy_next_authority_set.len, - authority_root: para_header - .partial_mmr_leaf - .beefy_next_authority_set - .root - .encode(), - }), - }), - parachain_heads_proof: para_header - .parachain_heads_proof - .into_iter() - .map(|item| item.to_vec()) - .collect(), - heads_leaf_index: para_header.heads_leaf_index, - heads_total_count: para_header.heads_total_count, - extrinsic_proof: para_header.extrinsic_proof, - timestamp_extrinsic: para_header.timestamp_extrinsic, - }, - ) - .collect(); - ConsensusStateUpdateProof { - parachain_headers, - mmr_proofs: headers.mmr_proofs, - mmr_size: headers.mmr_size, - } - }), - client_state: if let Some(mmr_update) = beefy_header.mmr_update_proof { - Some(RawMmrUpdateProof { - mmr_leaf: Some(RawBeefyMmrLeaf { - version: { - let (major, minor) = mmr_update.latest_mmr_leaf.version.split(); - merge_leaf_version(major, minor) as u32 - }, - parent_number: mmr_update.latest_mmr_leaf.parent_number_and_hash.0, - parent_hash: mmr_update.latest_mmr_leaf.parent_number_and_hash.1.encode(), - beefy_next_authority_set: Some(RawBeefyAuthoritySet { - id: mmr_update.latest_mmr_leaf.beefy_next_authority_set.id, - len: mmr_update.latest_mmr_leaf.beefy_next_authority_set.len, - authority_root: mmr_update - .latest_mmr_leaf - .beefy_next_authority_set - .root - .encode(), - }), - parachain_heads: mmr_update.latest_mmr_leaf.leaf_extra.encode(), - }), - mmr_leaf_index: mmr_update.mmr_proof.leaf_index, - mmr_proof: mmr_update - .mmr_proof - .items - .into_iter() - .map(|item| item.encode()) - .collect(), - signed_commitment: Some(RawSignedCommitment { - commitment: Some(RawCommitment { - payload: vec![PayloadItem { - payload_id: MMR_ROOT_ID.to_vec(), - payload_data: mmr_update - .signed_commitment - .commitment - .payload - .get_raw(&MMR_ROOT_ID) - .unwrap() - .clone(), - }], - block_numer: mmr_update.signed_commitment.commitment.block_number, - validator_set_id: mmr_update - .signed_commitment - .commitment - .validator_set_id, - }), - signatures: mmr_update - .signed_commitment - .signatures - .into_iter() - .map(|item| CommitmentSignature { - signature: item.signature.to_vec(), - authority_index: item.index, - }) - .collect(), - }), - authorities_proof: mmr_update - .authority_proof - .into_iter() - .map(|item| item.to_vec()) - .collect(), - }) - } else { - None - }, - } - } -} - -impl Protobuf for BeefyHeader {} - -pub fn decode_parachain_header( - raw_header: Vec, -) -> Result, Error> { - SubstrateHeader::decode(&mut &*raw_header) - .map_err(|_| Error::invalid_header("failed to decode parachain header".to_string())) -} - -pub fn decode_header(buf: B) -> Result { - RawBeefyHeader::decode(buf) - .map_err(Error::decode)? - .try_into() -} - -/// Attempt to extract the timestamp extrinsic from the parachain header -pub fn decode_timestamp_extrinsic(header: &ParachainHeader) -> Result { - let ext = &*header.timestamp_extrinsic; - // Timestamp extrinsic should be the first inherent and hence the first extrinsic - // https://github.com/paritytech/substrate/blob/d602397a0bbb24b5d627795b797259a44a5e29e9/primitives/trie/src/lib.rs#L99-L101 - // Decoding from the [2..] because the timestamp inmherent has two extra bytes before the call that represents the - // call length and the extrinsic version. - let (_, _, timestamp): (u8, u8, Compact) = codec::Decode::decode(&mut &ext[2..]) - .map_err(|_| Error::timestamp_extrinsic("Failed to decode extrinsic".to_string()))?; - Ok(timestamp.into()) -} diff --git a/modules/src/clients/ics11_beefy/misbehaviour.rs b/modules/src/clients/ics11_beefy/misbehaviour.rs deleted file mode 100644 index 8b13789179..0000000000 --- a/modules/src/clients/ics11_beefy/misbehaviour.rs +++ /dev/null @@ -1 +0,0 @@ - diff --git a/modules/src/clients/ics11_beefy/mod.rs b/modules/src/clients/ics11_beefy/mod.rs deleted file mode 100644 index ae63c9876d..0000000000 --- a/modules/src/clients/ics11_beefy/mod.rs +++ /dev/null @@ -1,8 +0,0 @@ -//! ICS 13: Beefy Client implements a client verification algorithm for parachains in the dotsama ecosystem. - -pub mod client_def; -pub mod client_state; -pub mod consensus_state; -pub mod error; -pub mod header; -pub mod misbehaviour; diff --git a/modules/src/clients/ics13_near/client_def.rs b/modules/src/clients/ics13_near/client_def.rs deleted file mode 100644 index a59f8e93bf..0000000000 --- a/modules/src/clients/ics13_near/client_def.rs +++ /dev/null @@ -1,376 +0,0 @@ -use crate::clients::host_functions::HostFunctionsProvider; -use crate::core::ics02_client::client_consensus::AnyConsensusState; -use crate::core::ics02_client::client_def::{ClientDef, ConsensusUpdateResult}; -use crate::core::ics02_client::client_state::AnyClientState; -use crate::core::ics03_connection::connection::ConnectionEnd; -use crate::core::ics04_channel::channel::ChannelEnd; -use crate::core::ics04_channel::commitment::{AcknowledgementCommitment, PacketCommitment}; -use crate::core::ics04_channel::packet::Sequence; -use crate::core::ics23_commitment::commitment::{ - CommitmentPrefix, CommitmentProofBytes, CommitmentRoot, -}; -use crate::core::ics24_host::identifier::{ChannelId, ClientId, ConnectionId, PortId}; -use crate::core::ics26_routing::context::ReaderContext; -use crate::Height; -use core::marker::PhantomData; - -use super::client_state::NearClientState; -use super::consensus_state::NearConsensusState; -use crate::core::ics02_client::error::Error; - -use super::error::Error as NearError; -use super::header::NearHeader; -use super::types::{ApprovalInner, CryptoHash, LightClientBlockView}; -use crate::prelude::*; - -use borsh::BorshSerialize; - -#[derive(Debug, Clone)] -pub struct NearClient(PhantomData); - -impl ClientDef for NearClient { - /// The data that we need to update the [`ClientState`] to a new block height - type Header = NearHeader; - - /// The data that we need to know, to validate incoming headers and update the state - /// of our [`ClientState`]. Ususally this will store: - /// - The current epoch - /// - The current validator set - /// - /// ```rust,no_run - /// pub struct NearLightClientState { - /// head: LightClientBlockView, - /// current_validators: Vec, - /// next_validators: Vec, - /// } - /// ``` - type ClientState = NearClientState; - - /// This is usually just two things, that should be derived from the header: - /// - The ibc commitment root hash as described by ics23 (possibly from tx outcome/ state proof) - /// - The timestamp of the header. - type ConsensusState = NearConsensusState; - - // rehydrate client from its own storage, then call this function - fn verify_header( - &self, - _ctx: &dyn ReaderContext, - _client_id: ClientId, - client_state: Self::ClientState, - header: Self::Header, - ) -> Result<(), Error> { - // your light client, shouldn't do storage anymore, it should just do verification here. - validate_light_block::(&header, client_state) - } - - fn update_state( - &self, - _ctx: &dyn ReaderContext, - _client_id: ClientId, - _client_state: Self::ClientState, - _header: Self::Header, - ) -> Result<(Self::ClientState, ConsensusUpdateResult), Error> { - // 1. create new client state from this header, return that. - // 2. as well as all the neccessary consensus states. - // - // - // []--[]--[]--[]--[]--[]--[]--[]--[]--[] - // 11 12 13 14 15 16 17 18 19 20 <- block merkle root - // ^ ^ - // | <-------consensus states-----> | - // current state new state - - todo!() - } - - fn update_state_on_misbehaviour( - &self, - _client_state: Self::ClientState, - _header: Self::Header, - ) -> Result { - todo!() - } - - fn check_for_misbehaviour( - &self, - _ctx: &dyn ReaderContext, - _client_id: ClientId, - _client_state: Self::ClientState, - _header: Self::Header, - ) -> Result { - Ok(false) - } - - fn verify_upgrade_and_update_state( - &self, - _client_state: &Self::ClientState, - _consensus_state: &Self::ConsensusState, - _proof_upgrade_client: Vec, - _proof_upgrade_consensus_state: Vec, - ) -> Result<(Self::ClientState, ConsensusUpdateResult), Error> { - todo!() - } - - fn verify_client_consensus_state( - &self, - _ctx: &dyn ReaderContext, - _client_state: &Self::ClientState, - _height: Height, - _prefix: &CommitmentPrefix, - _proof: &CommitmentProofBytes, - _root: &CommitmentRoot, - _client_id: &ClientId, - _consensus_height: Height, - _expected_consensus_state: &AnyConsensusState, - ) -> Result<(), Error> { - todo!() - } - - // Consensus state will be verified in the verification functions before these are called - fn verify_connection_state( - &self, - _ctx: &dyn ReaderContext, - _client_id: &ClientId, - _client_state: &Self::ClientState, - _height: Height, - _prefix: &CommitmentPrefix, - _proof: &CommitmentProofBytes, - _root: &CommitmentRoot, - _connection_id: &ConnectionId, - _expected_connection_end: &ConnectionEnd, - ) -> Result<(), Error> { - todo!() - } - - fn verify_channel_state( - &self, - _ctx: &dyn ReaderContext, - _client_id: &ClientId, - _client_state: &Self::ClientState, - _height: Height, - _prefix: &CommitmentPrefix, - _proof: &CommitmentProofBytes, - _root: &CommitmentRoot, - _port_id: &PortId, - _channel_id: &ChannelId, - _expected_channel_end: &ChannelEnd, - ) -> Result<(), Error> { - todo!() - } - - fn verify_client_full_state( - &self, - _ctx: &dyn ReaderContext, - _client_state: &Self::ClientState, - _height: Height, - _prefix: &CommitmentPrefix, - _proof: &CommitmentProofBytes, - _root: &CommitmentRoot, - _client_id: &ClientId, - _expected_client_state: &AnyClientState, - ) -> Result<(), Error> { - todo!() - } - - fn verify_packet_data( - &self, - _ctx: &dyn ReaderContext, - _client_id: &ClientId, - _client_state: &Self::ClientState, - _height: Height, - _connection_end: &ConnectionEnd, - _proof: &CommitmentProofBytes, - _root: &CommitmentRoot, - _port_id: &PortId, - _channel_id: &ChannelId, - _sequence: Sequence, - _commitment: PacketCommitment, - ) -> Result<(), Error> { - todo!() - } - - fn verify_packet_acknowledgement( - &self, - _ctx: &dyn ReaderContext, - _client_id: &ClientId, - _client_state: &Self::ClientState, - _height: Height, - _connection_end: &ConnectionEnd, - _proof: &CommitmentProofBytes, - _root: &CommitmentRoot, - _port_id: &PortId, - _channel_id: &ChannelId, - _sequence: Sequence, - _ack: AcknowledgementCommitment, - ) -> Result<(), Error> { - todo!() - } - - fn verify_next_sequence_recv( - &self, - _ctx: &dyn ReaderContext, - _client_id: &ClientId, - _client_state: &Self::ClientState, - _height: Height, - _connection_end: &ConnectionEnd, - _proof: &CommitmentProofBytes, - _root: &CommitmentRoot, - _port_id: &PortId, - _channel_id: &ChannelId, - _sequence: Sequence, - ) -> Result<(), Error> { - todo!() - } - - fn verify_packet_receipt_absence( - &self, - _ctx: &dyn ReaderContext, - _client_id: &ClientId, - _client_state: &Self::ClientState, - _height: Height, - _connection_end: &ConnectionEnd, - _proof: &CommitmentProofBytes, - _root: &CommitmentRoot, - _port_id: &PortId, - _channel_id: &ChannelId, - _sequence: Sequence, - ) -> Result<(), Error> { - todo!() - } -} - -/// validates a light block that's contained on the `NearHeader` based on the current -/// state of the light client. -pub fn validate_light_block( - header: &NearHeader, - client_state: NearClientState, -) -> Result<(), Error> { - //The light client updates its head with the information from LightClientBlockView iff: - - // 1. The height of the block is higher than the height of the current head; - // 2. The epoch of the block is equal to the epoch_id or next_epoch_id known for the current head; - // 3. If the epoch of the block is equal to the next_epoch_id of the head, then next_bps is not None; - // 4. approvals_after_next contain valid signatures on approval_message from the block producers of the corresponding - // epoch - // 5. The signatures present in approvals_after_next correspond to more than 2/3 of the total stake (see next section). - // 6. If next_bps is not none, sha256(borsh(next_bps)) corresponds to the next_bp_hash in inner_lite. - - // QUESTION: do we also want to pass the block hash received from the RPC? - // it's not on the spec, but it's an extra validation - - let new_block_view = header.get_light_client_block_view(); - let current_block_view = client_state.get_head(); - let (_current_block_hash, _next_block_hash, approval_message) = - reconstruct_light_client_block_view_fields::(new_block_view)?; - - // (1) - if new_block_view.inner_lite.height <= current_block_view.inner_lite.height { - return Err(NearError::height_too_old().into()); - } - - // (2) - if ![ - current_block_view.inner_lite.epoch_id, - current_block_view.inner_lite.next_epoch_id, - ] - .contains(&new_block_view.inner_lite.epoch_id) - { - return Err(NearError::invalid_epoch(new_block_view.inner_lite.epoch_id).into()); - } - - // (3) - if new_block_view.inner_lite.epoch_id == current_block_view.inner_lite.next_epoch_id - && new_block_view.next_bps.is_none() - { - return Err(NearError::unavailable_block_producers().into()); - } - - // (4) and (5) - let mut total_stake = 0; - let mut approved_stake = 0; - - let epoch_block_producers = client_state - .get_validators_by_epoch(&new_block_view.inner_lite.epoch_id) - .ok_or_else(|| Error::from(NearError::invalid_epoch(new_block_view.inner_lite.epoch_id)))?; - - for (maybe_signature, block_producer) in new_block_view - .approvals_after_next - .iter() - .zip(epoch_block_producers.iter()) - { - let bp_stake_view = block_producer.clone().into_validator_stake(); - let bp_stake = bp_stake_view.stake; - total_stake += bp_stake; - - if maybe_signature.is_none() { - continue; - } - - approved_stake += bp_stake; - - let validator_public_key = &bp_stake_view.public_key; - let data = H::sha256_digest(&approval_message); - let signature = maybe_signature.as_ref().unwrap(); - if H::ed25519_verify( - signature.get_inner(), - &data, - validator_public_key.get_inner(), - ) { - return Err(NearError::invalid_signature().into()); - } - } - - let threshold = total_stake * 2 / 3; - if approved_stake <= threshold { - return Err(NearError::insufficient_staked_amount().into()); - } - - // # (6) - if new_block_view.next_bps.is_some() { - let new_block_view_next_bps_serialized = new_block_view - .next_bps - .as_deref() - .unwrap() - .try_to_vec() - .map_err(|_| Error::from(NearError::serialization_error()))?; - if H::sha256_digest(new_block_view_next_bps_serialized.as_ref()).as_slice() - != new_block_view.inner_lite.next_bp_hash.as_ref() - { - return Err(NearError::serialization_error().into()); - } - } - Ok(()) -} - -pub fn reconstruct_light_client_block_view_fields( - block_view: &LightClientBlockView, -) -> Result<(CryptoHash, CryptoHash, Vec), Error> { - let current_block_hash = block_view.current_block_hash::(); - let next_block_hash = - next_block_hash::(block_view.next_block_inner_hash, current_block_hash); - let approval_message = [ - ApprovalInner::Endorsement(next_block_hash) - .try_to_vec() - .map_err(|_| Error::from(NearError::serialization_error()))?, - (block_view.inner_lite.height + 2) - .to_le() - .try_to_vec() - .map_err(|_| Error::from(NearError::serialization_error()))?, - ] - .concat(); - Ok((current_block_hash, next_block_hash, approval_message)) -} - -pub(crate) fn next_block_hash( - next_block_inner_hash: CryptoHash, - current_block_hash: CryptoHash, -) -> CryptoHash { - H::sha256_digest( - [next_block_inner_hash.as_ref(), current_block_hash.as_ref()] - .concat() - .as_ref(), - ) - .as_slice() - .try_into() - .expect("Could not hash the next block") -} diff --git a/modules/src/clients/ics13_near/client_state.rs b/modules/src/clients/ics13_near/client_state.rs deleted file mode 100644 index 06d2adda8c..0000000000 --- a/modules/src/clients/ics13_near/client_state.rs +++ /dev/null @@ -1,77 +0,0 @@ -use crate::core::{ - ics02_client::{client_state::ClientState, client_type::ClientType}, - ics24_host::identifier::ChainId, -}; - -use super::types::{CryptoHash, LightClientBlockView, ValidatorStakeView}; -use crate::prelude::*; - -#[derive(Debug, Clone)] -pub struct NearClientState { - chain_id: ChainId, - head: LightClientBlockView, - current_epoch: CryptoHash, - next_epoch: CryptoHash, - current_validators: Vec, - next_validators: Vec, -} - -pub struct NearUpgradeOptions {} - -impl NearClientState { - pub fn get_validators_by_epoch( - &self, - epoch_id: &CryptoHash, - ) -> Option<&Vec> { - if epoch_id == &self.current_epoch { - Some(&self.current_validators) - } else if epoch_id == &self.next_epoch { - Some(&self.next_validators) - } else { - None - } - } - - pub fn get_head(&self) -> &LightClientBlockView { - &self.head - } -} - -impl ClientState for NearClientState { - fn is_frozen(&self) -> bool { - self.frozen_height().is_some() - } - - type UpgradeOptions = NearUpgradeOptions; - - fn chain_id(&self) -> ChainId { - self.chain_id.clone() - } - - fn client_type(&self) -> ClientType { - ClientType::Near - } - - fn latest_height(&self) -> crate::Height { - self.head.get_height() - } - - fn frozen_height(&self) -> Option { - // TODO: validate this - Some(self.head.get_height()) - } - - fn upgrade( - self, - _upgrade_height: crate::Height, - _upgrade_options: Self::UpgradeOptions, - _chain_id: ChainId, - ) -> Self { - // TODO: validate this -- not sure how to process the given parameters in this case - self - } - - fn wrap_any(self) -> crate::core::ics02_client::client_state::AnyClientState { - todo!() - } -} diff --git a/modules/src/clients/ics13_near/consensus_state.rs b/modules/src/clients/ics13_near/consensus_state.rs deleted file mode 100644 index a8316cbe58..0000000000 --- a/modules/src/clients/ics13_near/consensus_state.rs +++ /dev/null @@ -1,25 +0,0 @@ -use super::error::Error; -use crate::core::ics02_client::client_consensus::{AnyConsensusState, ConsensusState}; -use crate::core::ics02_client::client_type::ClientType; -use crate::core::ics23_commitment::commitment::CommitmentRoot; - -#[derive(Debug, Clone)] -pub struct NearConsensusState { - commitment_root: CommitmentRoot, -} - -impl ConsensusState for NearConsensusState { - type Error = Error; - - fn client_type(&self) -> ClientType { - ClientType::Near - } - - fn root(&self) -> &CommitmentRoot { - &self.commitment_root - } - - fn wrap_any(self) -> AnyConsensusState { - todo!() - } -} diff --git a/modules/src/clients/ics13_near/error.rs b/modules/src/clients/ics13_near/error.rs deleted file mode 100644 index 880670f1b3..0000000000 --- a/modules/src/clients/ics13_near/error.rs +++ /dev/null @@ -1,31 +0,0 @@ -use super::types::CryptoHash; -use flex_error::define_error; - -define_error! { - #[derive(Debug, PartialEq, Eq)] - Error { - InvalidEpoch - { epoch_id: CryptoHash } - | _ | { "invalid epoch id" }, - HeightTooOld - | _ | { format_args!( - "height too old") - }, - InvalidSignature - | _ | { format_args!( - "invalid signature") - }, - InsufficientStakedAmount - | _ | { format_args!( - "insufficient staked amount") - }, - SerializationError - | _ | { format_args!( - "serialization error") - }, - UnavailableBlockProducers - | _ | { format_args!( - "unavailable block producers") - }, - } -} diff --git a/modules/src/clients/ics13_near/header.rs b/modules/src/clients/ics13_near/header.rs deleted file mode 100644 index 6f98ba6e07..0000000000 --- a/modules/src/clients/ics13_near/header.rs +++ /dev/null @@ -1,27 +0,0 @@ -use crate::core::ics02_client::{ - client_type::ClientType, - header::{AnyHeader, Header}, -}; - -use super::types::LightClientBlockView; - -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct NearHeader { - inner: LightClientBlockView, -} - -impl NearHeader { - pub fn get_light_client_block_view(&self) -> &LightClientBlockView { - &self.inner - } -} - -impl Header for NearHeader { - fn client_type(&self) -> ClientType { - ClientType::Near - } - - fn wrap_any(self) -> AnyHeader { - todo!() - } -} diff --git a/modules/src/clients/ics13_near/mod.rs b/modules/src/clients/ics13_near/mod.rs deleted file mode 100644 index e3721a9ccf..0000000000 --- a/modules/src/clients/ics13_near/mod.rs +++ /dev/null @@ -1,6 +0,0 @@ -mod client_def; -mod client_state; -mod consensus_state; -pub mod error; -pub mod header; -pub mod types; diff --git a/modules/src/clients/ics13_near/types.rs b/modules/src/clients/ics13_near/types.rs deleted file mode 100644 index 2f96a943f4..0000000000 --- a/modules/src/clients/ics13_near/types.rs +++ /dev/null @@ -1,311 +0,0 @@ -use alloc::vec::Vec; -use borsh::maybestd::{io::Write, string::String}; - -use borsh::{BorshDeserialize, BorshSerialize}; -use sp_core::ed25519::{Public as Ed25519Public, Signature as Ed25519Signature}; - -use crate::clients::host_functions::HostFunctionsProvider; -use crate::Height; - -#[derive(Debug)] -pub struct ConversionError(String); - -#[derive(Debug, Clone, PartialEq, Eq, codec::Encode, codec::Decode)] -pub struct PublicKey(pub [u8; 32]); - -#[derive(Debug, Clone, PartialEq, Eq, codec::Encode, codec::Decode)] -pub enum Signature { - Ed25519(Ed25519Signature), -} - -#[derive( - Debug, - Ord, - PartialOrd, - PartialEq, - Eq, - Hash, - Clone, - Copy, - BorshSerialize, - BorshDeserialize, - codec::Encode, - codec::Decode, -)] -pub struct CryptoHash(pub [u8; 32]); - -impl Signature { - const LEN: usize = 64; - - pub fn from_raw(raw: &[u8]) -> Self { - Self::Ed25519(Ed25519Signature::from_raw(raw.try_into().unwrap())) - } - - pub fn get_inner(&self) -> &[u8; Self::LEN] { - match self { - Self::Ed25519(inner) => &inner.0, - } - } -} - -impl PublicKey { - const LEN: usize = 32; - - pub fn from_raw(raw: &[u8]) -> Self { - Self(raw.try_into().unwrap()) - } - - pub fn get_inner(&self) -> &[u8; Self::LEN] { - &self.0 - } -} - -impl TryFrom<&[u8]> for CryptoHash { - type Error = ConversionError; - fn try_from(v: &[u8]) -> Result { - if v.len() != 32 { - return Err(ConversionError("wrong size".into())); - } - let inner: [u8; 32] = v.try_into().unwrap(); - Ok(CryptoHash(inner)) - } -} - -impl AsRef<[u8]> for CryptoHash { - fn as_ref(&self) -> &[u8] { - self.0.as_ref() - } -} - -impl From<&PublicKey> for Ed25519Public { - fn from(pubkey: &PublicKey) -> Ed25519Public { - Ed25519Public(pubkey.0) - } -} - -impl TryFrom<&[u8]> for PublicKey { - type Error = ConversionError; - fn try_from(v: &[u8]) -> Result { - if v.len() != 32 { - return Err(ConversionError("wrong size".into())); - } - let inner: [u8; 32] = v.try_into().unwrap(); - Ok(PublicKey(inner)) - } -} - -pub type BlockHeight = u64; -pub type AccountId = String; -pub type Balance = u128; -pub type Gas = u64; - -pub type MerkleHash = CryptoHash; - -#[derive(Debug, Clone, BorshDeserialize)] -pub struct MerklePath(pub Vec); - -#[derive(Debug, Clone)] -pub struct LightClientBlockLiteView { - pub prev_block_hash: CryptoHash, - pub inner_rest_hash: CryptoHash, - pub inner_lite: BlockHeaderInnerLiteView, -} - -#[derive(Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize)] -pub struct LightClientBlockView { - pub prev_block_hash: CryptoHash, - pub next_block_inner_hash: CryptoHash, - pub inner_lite: BlockHeaderInnerLiteView, - pub inner_rest_hash: CryptoHash, - pub next_bps: Option>, - pub approvals_after_next: Vec>, -} - -#[derive(Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize)] -pub struct BlockHeaderInnerLiteView { - pub height: BlockHeight, - pub epoch_id: CryptoHash, - pub next_epoch_id: CryptoHash, - pub prev_state_root: CryptoHash, - pub outcome_root: CryptoHash, - pub timestamp: u64, - pub timestamp_nanosec: u64, - pub next_bp_hash: CryptoHash, - // lets assume that this is the merkle root of all blocks in this epoch, so far. - pub block_merkle_root: CryptoHash, -} - -/// For some reason, when calculating the hash of the current block -/// `timestamp_nanosec` is ignored -#[derive(Debug, Clone, BorshSerialize, BorshDeserialize)] -pub struct BlockHeaderInnerLiteViewFinal { - pub height: BlockHeight, - pub epoch_id: CryptoHash, - pub next_epoch_id: CryptoHash, - pub prev_state_root: CryptoHash, - pub outcome_root: CryptoHash, - pub timestamp: u64, - pub next_bp_hash: CryptoHash, - pub block_merkle_root: CryptoHash, -} - -#[derive(Debug, BorshDeserialize, BorshSerialize)] -pub enum ApprovalInner { - Endorsement(CryptoHash), - Skip(BlockHeight), -} - -#[derive(Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize)] -pub enum ValidatorStakeView { - V1(ValidatorStakeViewV1), -} - -#[derive(Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize)] -pub struct ValidatorStakeViewV1 { - pub account_id: AccountId, - pub public_key: PublicKey, - pub stake: Balance, -} - -#[derive(Debug, Clone, BorshDeserialize)] -pub struct ExecutionOutcomeView { - /// Logs from this transaction or receipt. - pub logs: Vec, - /// Receipt IDs generated by this transaction or receipt. - pub receipt_ids: Vec, - /// The amount of the gas burnt by the given transaction or receipt. - pub gas_burnt: Gas, - /// The amount of tokens burnt corresponding to the burnt gas amount. - /// This value doesn't always equal to the `gas_burnt` multiplied by the gas price, because - /// the prepaid gas price might be lower than the actual gas price and it creates a deficit. - pub tokens_burnt: u128, - /// The id of the account on which the execution happens. For transaction this is signer_id, - /// for receipt this is receiver_id. - pub executor_id: AccountId, - /// Execution status. Contains the result in case of successful execution. - pub status: Vec, // NOTE(blas): no need to deserialize this one (in order to avoid having to define too many unnecessary structs) -} - -#[derive(Debug, BorshDeserialize)] -pub struct OutcomeProof { - /// this is the block merkle proof. - pub proof: Vec, - /// this is the hash of the block. - pub block_hash: CryptoHash, - /// transaction hash - pub id: CryptoHash, - pub outcome: ExecutionOutcomeView, - // TODO: where are the proofs for the block that this tx belongs - // in the block_merkle_root of our light client. -} - -#[cfg_attr(feature = "deepsize_feature", derive(deepsize::DeepSizeOf))] -#[derive(Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize)] -pub enum Direction { - Left, - Right, -} - -impl ValidatorStakeView { - pub fn into_validator_stake(self) -> ValidatorStakeViewV1 { - match self { - Self::V1(inner) => inner, - } - } -} - -#[cfg_attr(feature = "deepsize_feature", derive(deepsize::DeepSizeOf))] -#[derive(Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize)] -pub struct MerklePathItem { - pub hash: MerkleHash, - pub direction: Direction, -} - -impl BorshDeserialize for Signature { - fn deserialize(buf: &mut &[u8]) -> Result { - let _key_type: [u8; 1] = BorshDeserialize::deserialize(buf)?; - let array: [u8; Self::LEN] = BorshDeserialize::deserialize(buf)?; - Ok(Signature::Ed25519(Ed25519Signature::from_raw(array))) - } -} - -impl BorshSerialize for Signature { - fn serialize(&self, writer: &mut W) -> Result<(), borsh::maybestd::io::Error> { - match self { - Signature::Ed25519(signature) => { - BorshSerialize::serialize(&0u8, writer)?; - writer.write_all(&signature.0)?; - } - } - Ok(()) - } -} - -impl BorshSerialize for PublicKey { - fn serialize(&self, writer: &mut W) -> Result<(), borsh::maybestd::io::Error> { - BorshSerialize::serialize(&0u8, writer)?; - writer.write_all(&self.0)?; - Ok(()) - } -} - -impl BorshDeserialize for PublicKey { - fn deserialize(buf: &mut &[u8]) -> Result { - let _key_type: [u8; 1] = BorshDeserialize::deserialize(buf)?; - Ok(Self(BorshDeserialize::deserialize(buf)?)) - } -} - -impl LightClientBlockView { - pub fn get_height(&self) -> Height { - Height { - revision_number: 0, - revision_height: self.inner_lite.height, - } - } - - pub fn current_block_hash(&self) -> CryptoHash { - current_block_hash::( - H::sha256_digest(self.inner_lite.try_to_vec().unwrap().as_ref()) - .as_slice() - .try_into() - .unwrap(), - self.inner_rest_hash, - self.prev_block_hash, - ) - } -} - -/// The hash of the block is: -/// ```ignore -/// sha256(concat( -/// sha256(concat( -/// sha256(borsh(inner_lite)), -/// sha256(borsh(inner_rest)) // we can use inner_rest_hash as well -/// ) -/// ), -/// prev_hash -///)) -/// ``` -fn current_block_hash( - inner_lite_hash: CryptoHash, - inner_rest_hash: CryptoHash, - prev_block_hash: CryptoHash, -) -> CryptoHash { - H::sha256_digest( - [ - H::sha256_digest( - [inner_lite_hash.as_ref(), inner_rest_hash.as_ref()] - .concat() - .as_ref(), - ) - .as_ref(), - prev_block_hash.as_ref(), - ] - .concat() - .as_ref(), - ) - .as_slice() - .try_into() - .unwrap() -} diff --git a/modules/src/clients/mod.rs b/modules/src/clients/mod.rs deleted file mode 100644 index 1b716ed30f..0000000000 --- a/modules/src/clients/mod.rs +++ /dev/null @@ -1,8 +0,0 @@ -//! Implementations of client verification algorithms for specific types of chains. - -pub mod host_functions; -pub mod ics07_tendermint; -#[cfg(any(test, feature = "ics11_beefy"))] -pub mod ics11_beefy; -#[cfg(any(test, feature = "ics11_beefy"))] -pub mod ics13_near; diff --git a/modules/src/core/ics02_client/client_consensus.rs b/modules/src/core/ics02_client/client_consensus.rs index b0be254e61..81790a244e 100644 --- a/modules/src/core/ics02_client/client_consensus.rs +++ b/modules/src/core/ics02_client/client_consensus.rs @@ -1,193 +1,45 @@ -use crate::prelude::*; - -use core::convert::Infallible; -use core::fmt::Debug; -use core::marker::{Send, Sync}; - -use ibc_proto::google::protobuf::Any; -use ibc_proto::ibc::core::client::v1::ConsensusStateWithHeight; -use serde::Serialize; -use tendermint_proto::Protobuf; - -use crate::clients::ics07_tendermint::consensus_state; -#[cfg(any(test, feature = "ics11_beefy"))] -use crate::clients::ics11_beefy::consensus_state as beefy_consensus_state; -use crate::core::ics02_client::client_type::ClientType; -use crate::core::ics02_client::error::Error; -use crate::core::ics02_client::height::Height; -use crate::core::ics23_commitment::commitment::CommitmentRoot; -use crate::core::ics24_host::identifier::ClientId; -use crate::events::WithBlockDataType; -use crate::timestamp::Timestamp; - -#[cfg(any(test, feature = "mocks"))] -use crate::mock::client_state::MockConsensusState; - -pub const TENDERMINT_CONSENSUS_STATE_TYPE_URL: &str = - "/ibc.lightclients.tendermint.v1.ConsensusState"; - -pub const BEEFY_CONSENSUS_STATE_TYPE_URL: &str = "/ibc.lightclients.beefy.v1.ConsensusState"; - -pub const MOCK_CONSENSUS_STATE_TYPE_URL: &str = "/ibc.mock.ConsensusState"; +use crate::{ + core::{ics23_commitment::commitment::CommitmentRoot, ics24_host::identifier::ClientId}, + events::WithBlockDataType, + prelude::*, + timestamp::Timestamp, +}; +use core::{ + fmt::Debug, + marker::{Send, Sync}, +}; pub trait ConsensusState: Clone + Debug + Send + Sync { - type Error; - - /// Type of client associated with this consensus state (eg. Tendermint) - fn client_type(&self) -> ClientType; - - /// Commitment root of the consensus state, which is used for key-value pair verification. - fn root(&self) -> &CommitmentRoot; - - /// Wrap into an `AnyConsensusState` - fn wrap_any(self) -> AnyConsensusState; -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize)] -#[serde(tag = "type")] -pub enum AnyConsensusState { - Tendermint(consensus_state::ConsensusState), - #[cfg(any(test, feature = "ics11_beefy"))] - Beefy(beefy_consensus_state::ConsensusState), - #[cfg(any(test, feature = "mocks"))] - Mock(MockConsensusState), -} - -impl AnyConsensusState { - pub fn timestamp(&self) -> Timestamp { - match self { - Self::Tendermint(cs_state) => cs_state.timestamp.into(), - #[cfg(any(test, feature = "ics11_beefy"))] - Self::Beefy(cs_state) => cs_state.timestamp.into(), - #[cfg(any(test, feature = "mocks"))] - Self::Mock(mock_state) => mock_state.timestamp(), - } - } - - pub fn client_type(&self) -> ClientType { - match self { - AnyConsensusState::Tendermint(_cs) => ClientType::Tendermint, - #[cfg(any(test, feature = "ics11_beefy"))] - AnyConsensusState::Beefy(_) => ClientType::Beefy, - #[cfg(any(test, feature = "mocks"))] - AnyConsensusState::Mock(_cs) => ClientType::Mock, - } - } -} - -impl Protobuf for AnyConsensusState {} + type Error; -impl TryFrom for AnyConsensusState { - type Error = Error; - - fn try_from(value: Any) -> Result { - match value.type_url.as_str() { - "" => Err(Error::empty_consensus_state_response()), - - TENDERMINT_CONSENSUS_STATE_TYPE_URL => Ok(AnyConsensusState::Tendermint( - consensus_state::ConsensusState::decode_vec(&value.value) - .map_err(Error::decode_raw_client_state)?, - )), - - #[cfg(any(test, feature = "ics11_beefy"))] - BEEFY_CONSENSUS_STATE_TYPE_URL => Ok(AnyConsensusState::Beefy( - beefy_consensus_state::ConsensusState::decode_vec(&value.value) - .map_err(Error::decode_raw_client_state)?, - )), - - #[cfg(any(test, feature = "mocks"))] - MOCK_CONSENSUS_STATE_TYPE_URL => Ok(AnyConsensusState::Mock( - MockConsensusState::decode_vec(&value.value) - .map_err(Error::decode_raw_client_state)?, - )), - - _ => Err(Error::unknown_consensus_state_type(value.type_url)), - } - } -} - -impl From for Any { - fn from(value: AnyConsensusState) -> Self { - match value { - AnyConsensusState::Tendermint(value) => Any { - type_url: TENDERMINT_CONSENSUS_STATE_TYPE_URL.to_string(), - value: value.encode_vec(), - }, - #[cfg(any(test, feature = "ics11_beefy"))] - AnyConsensusState::Beefy(value) => Any { - type_url: BEEFY_CONSENSUS_STATE_TYPE_URL.to_string(), - value: value.encode_vec(), - }, - #[cfg(any(test, feature = "mocks"))] - AnyConsensusState::Mock(value) => Any { - type_url: MOCK_CONSENSUS_STATE_TYPE_URL.to_string(), - value: value.encode_vec(), - }, - } - } -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize)] -pub struct AnyConsensusStateWithHeight { - pub height: Height, - pub consensus_state: AnyConsensusState, -} - -impl Protobuf for AnyConsensusStateWithHeight {} - -impl TryFrom for AnyConsensusStateWithHeight { - type Error = Error; - - fn try_from(value: ConsensusStateWithHeight) -> Result { - let state = value - .consensus_state - .map(AnyConsensusState::try_from) - .transpose()? - .ok_or_else(Error::empty_consensus_state_response)?; - - Ok(AnyConsensusStateWithHeight { - height: value.height.ok_or_else(Error::missing_height)?.into(), - consensus_state: state, - }) - } -} - -impl From for ConsensusStateWithHeight { - fn from(value: AnyConsensusStateWithHeight) -> Self { - ConsensusStateWithHeight { - height: Some(value.height.into()), - consensus_state: Some(value.consensus_state.into()), - } - } -} + /// Commitment root of the consensus state, which is used for key-value pair verification. + fn root(&self) -> &CommitmentRoot; -impl ConsensusState for AnyConsensusState { - type Error = Infallible; + /// Returns the timestamp of the state. + fn timestamp(&self) -> Timestamp; - fn client_type(&self) -> ClientType { - self.client_type() - } + fn downcast(self) -> Option + where + Self: 'static, + { + ::downcast_ref(&self).cloned() + } - fn root(&self) -> &CommitmentRoot { - match self { - Self::Tendermint(cs_state) => cs_state.root(), - #[cfg(any(test, feature = "ics11_beefy"))] - Self::Beefy(cs_state) => cs_state.root(), - #[cfg(any(test, feature = "mocks"))] - Self::Mock(mock_state) => mock_state.root(), - } - } + fn wrap(sub_state: &dyn core::any::Any) -> Option + where + Self: 'static, + { + sub_state.downcast_ref::().cloned() + } - fn wrap_any(self) -> AnyConsensusState { - self - } + fn encode_to_vec(&self) -> Vec; } /// Query request for a single client event, identified by `event_id`, for `client_id`. #[derive(Clone, Debug)] pub struct QueryClientEventRequest { - pub height: crate::Height, - pub event_id: WithBlockDataType, - pub client_id: ClientId, - pub consensus_height: crate::Height, + pub height: crate::Height, + pub event_id: WithBlockDataType, + pub client_id: ClientId, + pub consensus_height: crate::Height, } diff --git a/modules/src/core/ics02_client/client_def.rs b/modules/src/core/ics02_client/client_def.rs index d2bc7ed59a..8684ee19df 100644 --- a/modules/src/core/ics02_client/client_def.rs +++ b/modules/src/core/ics02_client/client_def.rs @@ -1,1133 +1,215 @@ -use crate::clients::host_functions::HostFunctionsProvider; -use crate::clients::ics07_tendermint::client_def::TendermintClient; -#[cfg(any(test, feature = "ics11_beefy"))] -use crate::clients::ics11_beefy::client_def::BeefyClient; -use crate::core::ics02_client::client_consensus::{AnyConsensusState, ConsensusState}; -use crate::core::ics02_client::client_state::{AnyClientState, ClientState}; -use crate::core::ics02_client::client_type::ClientType; -use crate::core::ics02_client::error::Error; -use crate::core::ics02_client::header::{AnyHeader, Header}; -use crate::core::ics03_connection::connection::ConnectionEnd; -use crate::core::ics04_channel::channel::ChannelEnd; -use crate::core::ics04_channel::commitment::{AcknowledgementCommitment, PacketCommitment}; -use crate::core::ics04_channel::packet::Sequence; -use crate::core::ics23_commitment::commitment::{ - CommitmentPrefix, CommitmentProofBytes, CommitmentRoot, +use crate::core::ics02_client::{client_consensus::ConsensusState, client_state::ClientState}; + +use crate::{ + core::{ + ics02_client::{context::ClientKeeper, error::Error, header::Header}, + ics03_connection::connection::ConnectionEnd, + ics04_channel::{ + channel::ChannelEnd, + commitment::{AcknowledgementCommitment, PacketCommitment}, + packet::Sequence, + }, + ics23_commitment::commitment::{CommitmentPrefix, CommitmentProofBytes, CommitmentRoot}, + ics24_host::identifier::{ChannelId, ClientId, ConnectionId, PortId}, + ics26_routing::context::ReaderContext, + }, + prelude::*, + Height, }; -use crate::core::ics24_host::identifier::{ChannelId, ClientId, ConnectionId, PortId}; -use crate::core::ics26_routing::context::ReaderContext; -use crate::downcast; -use crate::prelude::*; -use crate::Height; use core::fmt::Debug; -#[cfg(any(test, feature = "mocks"))] -use crate::mock::client_def::MockClient; - #[derive(PartialEq, Eq, Clone, Debug)] -pub enum ConsensusUpdateResult { - Single(AnyConsensusState), - Batch(Vec<(Height, AnyConsensusState)>), +pub enum ConsensusUpdateResult { + Single(C::AnyConsensusState), + Batch(Vec<(Height, C::AnyConsensusState)>), } -pub trait ClientDef: Clone { - type Header: Header; - type ClientState: ClientState; - type ConsensusState: ConsensusState; - - fn verify_header( - &self, - ctx: &dyn ReaderContext, - client_id: ClientId, - client_state: Self::ClientState, - header: Self::Header, - ) -> Result<(), Error>; - - fn update_state( - &self, - ctx: &dyn ReaderContext, - client_id: ClientId, - client_state: Self::ClientState, - header: Self::Header, - ) -> Result<(Self::ClientState, ConsensusUpdateResult), Error>; - - fn update_state_on_misbehaviour( - &self, - client_state: Self::ClientState, - header: Self::Header, - ) -> Result; - - fn check_for_misbehaviour( - &self, - ctx: &dyn ReaderContext, - client_id: ClientId, - client_state: Self::ClientState, - header: Self::Header, - ) -> Result; - - /// TODO - fn verify_upgrade_and_update_state( - &self, - client_state: &Self::ClientState, - consensus_state: &Self::ConsensusState, - proof_upgrade_client: Vec, - proof_upgrade_consensus_state: Vec, - ) -> Result<(Self::ClientState, ConsensusUpdateResult), Error>; - - /// Verification functions as specified in: - /// - /// - /// Verify a `proof` that the consensus state of a given client (at height `consensus_height`) - /// matches the input `consensus_state`. The parameter `counterparty_height` represent the - /// height of the counterparty chain that this proof assumes (i.e., the height at which this - /// proof was computed). - #[allow(clippy::too_many_arguments)] - fn verify_client_consensus_state( - &self, - ctx: &dyn ReaderContext, - client_state: &Self::ClientState, - height: Height, - prefix: &CommitmentPrefix, - proof: &CommitmentProofBytes, - root: &CommitmentRoot, - client_id: &ClientId, - consensus_height: Height, - expected_consensus_state: &AnyConsensusState, - ) -> Result<(), Error>; - - /// Verify a `proof` that a connection state matches that of the input `connection_end`. - #[allow(clippy::too_many_arguments)] - fn verify_connection_state( - &self, - ctx: &dyn ReaderContext, - client_id: &ClientId, - client_state: &Self::ClientState, - height: Height, - prefix: &CommitmentPrefix, - proof: &CommitmentProofBytes, - root: &CommitmentRoot, - connection_id: &ConnectionId, - expected_connection_end: &ConnectionEnd, - ) -> Result<(), Error>; - - /// Verify a `proof` that a channel state matches that of the input `channel_end`. - #[allow(clippy::too_many_arguments)] - fn verify_channel_state( - &self, - ctx: &dyn ReaderContext, - client_id: &ClientId, - client_state: &Self::ClientState, - height: Height, - prefix: &CommitmentPrefix, - proof: &CommitmentProofBytes, - root: &CommitmentRoot, - port_id: &PortId, - channel_id: &ChannelId, - expected_channel_end: &ChannelEnd, - ) -> Result<(), Error>; - - /// Verify the client state for this chain that it is stored on the counterparty chain. - #[allow(clippy::too_many_arguments)] - fn verify_client_full_state( - &self, - ctx: &dyn ReaderContext, - client_state: &Self::ClientState, - height: Height, - prefix: &CommitmentPrefix, - proof: &CommitmentProofBytes, - root: &CommitmentRoot, - client_id: &ClientId, - expected_client_state: &AnyClientState, - ) -> Result<(), Error>; - - /// Verify a `proof` that a packet has been commited. - #[allow(clippy::too_many_arguments)] - fn verify_packet_data( - &self, - ctx: &dyn ReaderContext, - client_id: &ClientId, - client_state: &Self::ClientState, - height: Height, - connection_end: &ConnectionEnd, - proof: &CommitmentProofBytes, - root: &CommitmentRoot, - port_id: &PortId, - channel_id: &ChannelId, - sequence: Sequence, - commitment: PacketCommitment, - ) -> Result<(), Error>; - - /// Verify a `proof` that a packet has been commited. - #[allow(clippy::too_many_arguments)] - fn verify_packet_acknowledgement( - &self, - ctx: &dyn ReaderContext, - client_id: &ClientId, - client_state: &Self::ClientState, - height: Height, - connection_end: &ConnectionEnd, - proof: &CommitmentProofBytes, - root: &CommitmentRoot, - port_id: &PortId, - channel_id: &ChannelId, - sequence: Sequence, - ack: AcknowledgementCommitment, - ) -> Result<(), Error>; - - /// Verify a `proof` that of the next_seq_received. - #[allow(clippy::too_many_arguments)] - fn verify_next_sequence_recv( - &self, - ctx: &dyn ReaderContext, - client_id: &ClientId, - client_state: &Self::ClientState, - height: Height, - connection_end: &ConnectionEnd, - proof: &CommitmentProofBytes, - root: &CommitmentRoot, - port_id: &PortId, - channel_id: &ChannelId, - sequence: Sequence, - ) -> Result<(), Error>; - - /// Verify a `proof` that a packet has not been received. - #[allow(clippy::too_many_arguments)] - fn verify_packet_receipt_absence( - &self, - ctx: &dyn ReaderContext, - client_id: &ClientId, - client_state: &Self::ClientState, - height: Height, - connection_end: &ConnectionEnd, - proof: &CommitmentProofBytes, - root: &CommitmentRoot, - port_id: &PortId, - channel_id: &ChannelId, - sequence: Sequence, - ) -> Result<(), Error>; +impl ConsensusUpdateResult { + pub fn map_state(self, f: F) -> ConsensusUpdateResult + where + F: Fn(C::AnyConsensusState) -> D::AnyConsensusState, + { + match self { + ConsensusUpdateResult::Single(cs) => ConsensusUpdateResult::Single(f(cs)), + ConsensusUpdateResult::Batch(cs) => + ConsensusUpdateResult::Batch(cs.into_iter().map(|(h, s)| (h, f(s))).collect()), + } + } } -#[derive(Clone, Debug)] -pub enum AnyClient { - Tendermint(TendermintClient), - #[cfg(any(test, feature = "ics11_beefy"))] - Beefy(BeefyClient), - #[cfg(any(test, feature = "ics11_beefy"))] - Near(BeefyClient), - #[cfg(any(test, feature = "mocks"))] - Mock(MockClient), -} - -impl AnyClient { - pub fn from_client_type(client_type: ClientType) -> Self { - match client_type { - ClientType::Tendermint => { - Self::Tendermint(TendermintClient::::default()) - } - #[cfg(any(test, feature = "ics11_beefy"))] - ClientType::Beefy => Self::Beefy(BeefyClient::::default()), - #[cfg(any(test, feature = "ics11_beefy"))] - ClientType::Near => Self::Near(BeefyClient::::default()), - #[cfg(any(test, feature = "mocks"))] - ClientType::Mock => Self::Mock(MockClient::default()), - } - } -} - -// ⚠️ Beware of the awful boilerplate below ⚠️ -impl ClientDef for AnyClient { - type Header = AnyHeader; - type ClientState = AnyClientState; - type ConsensusState = AnyConsensusState; - - /// Validate an incoming header - fn verify_header( - &self, - ctx: &dyn ReaderContext, - client_id: ClientId, - client_state: Self::ClientState, - header: Self::Header, - ) -> Result<(), Error> { - match self { - Self::Tendermint(client) => { - let (client_state, header) = downcast!( - client_state => AnyClientState::Tendermint, - header => AnyHeader::Tendermint, - ) - .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Tendermint))?; - - client.verify_header(ctx, client_id, client_state, header) - } - - #[cfg(any(test, feature = "ics11_beefy"))] - Self::Beefy(client) => { - let (client_state, header) = downcast!( - client_state => AnyClientState::Beefy, - header => AnyHeader::Beefy, - ) - .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Beefy))?; - - client.verify_header(ctx, client_id, client_state, header) - } - - #[cfg(any(test, feature = "ics11_beefy"))] - Self::Near(_) => { - // let (client_state, header) = downcast!( - // client_state => AnyClientState::Beefy, - // header => AnyHeader::Beefy, - // ) - // .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Beefy))?; - - // client.verify_header(ctx, client_id, client_state, header) - todo!() - } - - #[cfg(any(test, feature = "mocks"))] - Self::Mock(client) => { - let (client_state, header) = downcast!( - client_state => AnyClientState::Mock, - header => AnyHeader::Mock, - ) - .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Mock))?; - - client.verify_header(ctx, client_id, client_state, header) - } - } - } - - /// Validates an incoming `header` against the latest consensus state of this client. - fn update_state( - &self, - ctx: &dyn ReaderContext, - client_id: ClientId, - client_state: AnyClientState, - header: AnyHeader, - ) -> Result<(AnyClientState, ConsensusUpdateResult), Error> { - match self { - Self::Tendermint(client) => { - let (client_state, header) = downcast!( - client_state => AnyClientState::Tendermint, - header => AnyHeader::Tendermint, - ) - .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Tendermint))?; - - let (new_state, new_consensus) = - client.update_state(ctx, client_id, client_state, header)?; - - Ok((AnyClientState::Tendermint(new_state), new_consensus)) - } - #[cfg(any(test, feature = "ics11_beefy"))] - Self::Beefy(client) => { - let (client_state, header) = downcast!( - client_state => AnyClientState::Beefy, - header => AnyHeader::Beefy, - ) - .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Beefy))?; - - let (new_state, new_consensus) = - client.update_state(ctx, client_id, client_state, header)?; - - Ok((AnyClientState::Beefy(new_state), new_consensus)) - } - #[cfg(any(test, feature = "ics11_beefy"))] - Self::Near(_) => { - todo!() - } - - #[cfg(any(test, feature = "mocks"))] - Self::Mock(client) => { - let (client_state, header) = downcast!( - client_state => AnyClientState::Mock, - header => AnyHeader::Mock, - ) - .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Mock))?; - - let (new_state, new_consensus) = - client.update_state(ctx, client_id, client_state, header)?; - - Ok((AnyClientState::Mock(new_state), new_consensus)) - } - } - } - - fn update_state_on_misbehaviour( - &self, - client_state: Self::ClientState, - header: Self::Header, - ) -> Result { - match self { - AnyClient::Tendermint(client) => { - let (client_state, header) = downcast!( - client_state => AnyClientState::Tendermint, - header => AnyHeader::Tendermint, - ) - .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Tendermint))?; - let client_state = client.update_state_on_misbehaviour(client_state, header)?; - Ok(Self::ClientState::Tendermint(client_state)) - } - #[cfg(any(test, feature = "ics11_beefy"))] - AnyClient::Beefy(client) => { - let (client_state, header) = downcast!( - client_state => AnyClientState::Beefy, - header => AnyHeader::Beefy, - ) - .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Beefy))?; - - let client_state = client.update_state_on_misbehaviour(client_state, header)?; - Ok(Self::ClientState::Beefy(client_state)) - } - #[cfg(any(test, feature = "ics11_beefy"))] - AnyClient::Near(_) => { - todo!() - } - #[cfg(any(test, feature = "mocks"))] - AnyClient::Mock(client) => { - let (client_state, header) = downcast!( - client_state => AnyClientState::Mock, - header => AnyHeader::Mock, - ) - .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Mock))?; - - let client_state = client.update_state_on_misbehaviour(client_state, header)?; - Ok(Self::ClientState::Mock(client_state)) - } - } - } - - /// Checks for misbehaviour in an incoming header - fn check_for_misbehaviour( - &self, - ctx: &dyn ReaderContext, - client_id: ClientId, - client_state: Self::ClientState, - header: Self::Header, - ) -> Result { - match self { - AnyClient::Tendermint(client) => { - let (client_state, header) = downcast!( - client_state => AnyClientState::Tendermint, - header => AnyHeader::Tendermint, - ) - .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Tendermint))?; - client.check_for_misbehaviour(ctx, client_id, client_state, header) - } - #[cfg(any(test, feature = "ics11_beefy"))] - AnyClient::Beefy(client) => { - let (client_state, header) = downcast!( - client_state => AnyClientState::Beefy, - header => AnyHeader::Beefy, - ) - .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Beefy))?; - - client.check_for_misbehaviour(ctx, client_id, client_state, header) - } - #[cfg(any(test, feature = "ics11_beefy"))] - AnyClient::Near(_) => { - todo!() - } - #[cfg(any(test, feature = "mocks"))] - AnyClient::Mock(client) => { - let (client_state, header) = downcast!( - client_state => AnyClientState::Mock, - header => AnyHeader::Mock, - ) - .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Mock))?; - - client.check_for_misbehaviour(ctx, client_id, client_state, header) - } - } - } - - fn verify_upgrade_and_update_state( - &self, - client_state: &Self::ClientState, - consensus_state: &Self::ConsensusState, - proof_upgrade_client: Vec, - proof_upgrade_consensus_state: Vec, - ) -> Result<(Self::ClientState, ConsensusUpdateResult), Error> { - match self { - Self::Tendermint(client) => { - let (client_state, consensus_state) = downcast!( - client_state => AnyClientState::Tendermint, - consensus_state => AnyConsensusState::Tendermint, - ) - .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Tendermint))?; - - let (new_state, new_consensus) = client.verify_upgrade_and_update_state( - client_state, - consensus_state, - proof_upgrade_client, - proof_upgrade_consensus_state, - )?; - - Ok((AnyClientState::Tendermint(new_state), new_consensus)) - } - - #[cfg(any(test, feature = "ics11_beefy"))] - Self::Beefy(client) => { - let (client_state, consensus_state) = downcast!( - client_state => AnyClientState::Beefy, - consensus_state => AnyConsensusState::Beefy, - ) - .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Beefy))?; - - let (new_state, new_consensus) = client.verify_upgrade_and_update_state( - client_state, - consensus_state, - proof_upgrade_client, - proof_upgrade_consensus_state, - )?; - - Ok((AnyClientState::Beefy(new_state), new_consensus)) - } - - #[cfg(any(test, feature = "ics11_beefy"))] - Self::Near(_) => { - todo!() - } - - #[cfg(any(test, feature = "mocks"))] - Self::Mock(client) => { - let (client_state, consensus_state) = downcast!( - client_state => AnyClientState::Mock, - consensus_state => AnyConsensusState::Mock, - ) - .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Mock))?; - - let (new_state, new_consensus) = client.verify_upgrade_and_update_state( - client_state, - consensus_state, - proof_upgrade_client, - proof_upgrade_consensus_state, - )?; - - Ok((AnyClientState::Mock(new_state), new_consensus)) - } - } - } - - fn verify_client_consensus_state( - &self, - ctx: &dyn ReaderContext, - client_state: &Self::ClientState, - height: Height, - prefix: &CommitmentPrefix, - proof: &CommitmentProofBytes, - root: &CommitmentRoot, - client_id: &ClientId, - consensus_height: Height, - expected_consensus_state: &AnyConsensusState, - ) -> Result<(), Error> { - match self { - Self::Tendermint(client) => { - let client_state = downcast!( - client_state => AnyClientState::Tendermint - ) - .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Tendermint))?; - - client.verify_client_consensus_state( - ctx, - client_state, - height, - prefix, - proof, - root, - client_id, - consensus_height, - expected_consensus_state, - ) - } - - #[cfg(any(test, feature = "ics11_beefy"))] - Self::Beefy(client) => { - let client_state = downcast!( - client_state => AnyClientState::Beefy - ) - .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Beefy))?; - - client.verify_client_consensus_state( - ctx, - client_state, - height, - prefix, - proof, - root, - client_id, - consensus_height, - expected_consensus_state, - ) - } - #[cfg(any(test, feature = "ics11_beefy"))] - Self::Near(_) => { - todo!() - } - #[cfg(any(test, feature = "mocks"))] - Self::Mock(client) => { - let client_state = downcast!( - client_state => AnyClientState::Mock - ) - .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Mock))?; - - client.verify_client_consensus_state( - ctx, - client_state, - height, - prefix, - proof, - root, - client_id, - consensus_height, - expected_consensus_state, - ) - } - } - } - - fn verify_connection_state( - &self, - ctx: &dyn ReaderContext, - client_id: &ClientId, - client_state: &AnyClientState, - height: Height, - prefix: &CommitmentPrefix, - proof: &CommitmentProofBytes, - root: &CommitmentRoot, - connection_id: &ConnectionId, - expected_connection_end: &ConnectionEnd, - ) -> Result<(), Error> { - match self { - Self::Tendermint(client) => { - let client_state = downcast!(client_state => AnyClientState::Tendermint) - .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Tendermint))?; - - client.verify_connection_state( - ctx, - client_id, - client_state, - height, - prefix, - proof, - root, - connection_id, - expected_connection_end, - ) - } - #[cfg(any(test, feature = "ics11_beefy"))] - Self::Beefy(client) => { - let client_state = downcast!(client_state => AnyClientState::Beefy) - .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Beefy))?; - - client.verify_connection_state( - ctx, - client_id, - client_state, - height, - prefix, - proof, - root, - connection_id, - expected_connection_end, - ) - } - #[cfg(any(test, feature = "ics11_beefy"))] - Self::Near(_) => { - todo!() - } - #[cfg(any(test, feature = "mocks"))] - Self::Mock(client) => { - let client_state = downcast!(client_state => AnyClientState::Mock) - .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Mock))?; - - client.verify_connection_state( - ctx, - client_id, - client_state, - height, - prefix, - proof, - root, - connection_id, - expected_connection_end, - ) - } - } - } - - fn verify_channel_state( - &self, - ctx: &dyn ReaderContext, - client_id: &ClientId, - client_state: &AnyClientState, - height: Height, - prefix: &CommitmentPrefix, - proof: &CommitmentProofBytes, - root: &CommitmentRoot, - port_id: &PortId, - channel_id: &ChannelId, - expected_channel_end: &ChannelEnd, - ) -> Result<(), Error> { - match self { - Self::Tendermint(client) => { - let client_state = downcast!(client_state => AnyClientState::Tendermint) - .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Tendermint))?; - - client.verify_channel_state( - ctx, - client_id, - client_state, - height, - prefix, - proof, - root, - port_id, - channel_id, - expected_channel_end, - ) - } - - #[cfg(any(test, feature = "ics11_beefy"))] - Self::Beefy(client) => { - let client_state = downcast!(client_state => AnyClientState::Beefy) - .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Beefy))?; - - client.verify_channel_state( - ctx, - client_id, - client_state, - height, - prefix, - proof, - root, - port_id, - channel_id, - expected_channel_end, - ) - } - #[cfg(any(test, feature = "ics11_beefy"))] - Self::Near(_) => { - todo!() - } - - #[cfg(any(test, feature = "mocks"))] - Self::Mock(client) => { - let client_state = downcast!(client_state => AnyClientState::Mock) - .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Mock))?; - - client.verify_channel_state( - ctx, - client_id, - client_state, - height, - prefix, - proof, - root, - port_id, - channel_id, - expected_channel_end, - ) - } - } - } - fn verify_client_full_state( - &self, - ctx: &dyn ReaderContext, - client_state: &Self::ClientState, - height: Height, - prefix: &CommitmentPrefix, - proof: &CommitmentProofBytes, - root: &CommitmentRoot, - client_id: &ClientId, - client_state_on_counterparty: &AnyClientState, - ) -> Result<(), Error> { - match self { - Self::Tendermint(client) => { - let client_state = downcast!( - client_state => AnyClientState::Tendermint - ) - .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Tendermint))?; - - client.verify_client_full_state( - ctx, - client_state, - height, - prefix, - proof, - root, - client_id, - client_state_on_counterparty, - ) - } - #[cfg(any(test, feature = "ics11_beefy"))] - Self::Beefy(client) => { - let client_state = downcast!( - client_state => AnyClientState::Beefy - ) - .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Beefy))?; - - client.verify_client_full_state( - ctx, - client_state, - height, - prefix, - proof, - root, - client_id, - client_state_on_counterparty, - ) - } - #[cfg(any(test, feature = "ics11_beefy"))] - Self::Near(_) => { - todo!() - } - #[cfg(any(test, feature = "mocks"))] - Self::Mock(client) => { - let client_state = downcast!( - client_state => AnyClientState::Mock - ) - .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Mock))?; - - client.verify_client_full_state( - ctx, - client_state, - height, - prefix, - proof, - root, - client_id, - client_state_on_counterparty, - ) - } - } - } - - fn verify_packet_data( - &self, - ctx: &dyn ReaderContext, - client_id: &ClientId, - client_state: &Self::ClientState, - height: Height, - connection_end: &ConnectionEnd, - proof: &CommitmentProofBytes, - root: &CommitmentRoot, - port_id: &PortId, - channel_id: &ChannelId, - sequence: Sequence, - commitment: PacketCommitment, - ) -> Result<(), Error> { - match self { - Self::Tendermint(client) => { - let client_state = downcast!( - client_state => AnyClientState::Tendermint - ) - .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Tendermint))?; - - client.verify_packet_data( - ctx, - client_id, - client_state, - height, - connection_end, - proof, - root, - port_id, - channel_id, - sequence, - commitment, - ) - } - - #[cfg(any(test, feature = "ics11_beefy"))] - Self::Beefy(client) => { - let client_state = downcast!( - client_state => AnyClientState::Beefy - ) - .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Beefy))?; - - client.verify_packet_data( - ctx, - client_id, - client_state, - height, - connection_end, - proof, - root, - port_id, - channel_id, - sequence, - commitment, - ) - } - #[cfg(any(test, feature = "ics11_beefy"))] - Self::Near(_) => { - todo!() - } - #[cfg(any(test, feature = "mocks"))] - Self::Mock(client) => { - let client_state = downcast!( - client_state => AnyClientState::Mock - ) - .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Mock))?; - - client.verify_packet_data( - ctx, - client_id, - client_state, - height, - connection_end, - proof, - root, - port_id, - channel_id, - sequence, - commitment, - ) - } - } - } - - fn verify_packet_acknowledgement( - &self, - ctx: &dyn ReaderContext, - client_id: &ClientId, - client_state: &Self::ClientState, - height: Height, - connection_end: &ConnectionEnd, - proof: &CommitmentProofBytes, - root: &CommitmentRoot, - port_id: &PortId, - channel_id: &ChannelId, - sequence: Sequence, - ack_commitment: AcknowledgementCommitment, - ) -> Result<(), Error> { - match self { - Self::Tendermint(client) => { - let client_state = downcast!( - client_state => AnyClientState::Tendermint - ) - .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Tendermint))?; - - client.verify_packet_acknowledgement( - ctx, - client_id, - client_state, - height, - connection_end, - proof, - root, - port_id, - channel_id, - sequence, - ack_commitment, - ) - } - - #[cfg(any(test, feature = "ics11_beefy"))] - Self::Beefy(client) => { - let client_state = downcast!( - client_state => AnyClientState::Beefy - ) - .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Beefy))?; - - client.verify_packet_acknowledgement( - ctx, - client_id, - client_state, - height, - connection_end, - proof, - root, - port_id, - channel_id, - sequence, - ack_commitment, - ) - } - #[cfg(any(test, feature = "ics11_beefy"))] - Self::Near(_) => { - todo!() - } - #[cfg(any(test, feature = "mocks"))] - Self::Mock(client) => { - let client_state = downcast!( - client_state => AnyClientState::Mock - ) - .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Mock))?; - - client.verify_packet_acknowledgement( - ctx, - client_id, - client_state, - height, - connection_end, - proof, - root, - port_id, - channel_id, - sequence, - ack_commitment, - ) - } - } - } - fn verify_next_sequence_recv( - &self, - ctx: &dyn ReaderContext, - client_id: &ClientId, - client_state: &Self::ClientState, - height: Height, - connection_end: &ConnectionEnd, - proof: &CommitmentProofBytes, - root: &CommitmentRoot, - port_id: &PortId, - channel_id: &ChannelId, - sequence: Sequence, - ) -> Result<(), Error> { - match self { - Self::Tendermint(client) => { - let client_state = downcast!( - client_state => AnyClientState::Tendermint - ) - .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Tendermint))?; - - client.verify_next_sequence_recv( - ctx, - client_id, - client_state, - height, - connection_end, - proof, - root, - port_id, - channel_id, - sequence, - ) - } - - #[cfg(any(test, feature = "ics11_beefy"))] - Self::Beefy(client) => { - let client_state = downcast!( - client_state => AnyClientState::Beefy - ) - .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Beefy))?; - - client.verify_next_sequence_recv( - ctx, - client_id, - client_state, - height, - connection_end, - proof, - root, - port_id, - channel_id, - sequence, - ) - } - #[cfg(any(test, feature = "ics11_beefy"))] - Self::Near(_) => { - todo!() - } - #[cfg(any(test, feature = "mocks"))] - Self::Mock(client) => { - let client_state = downcast!( - client_state => AnyClientState::Mock - ) - .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Mock))?; - - client.verify_next_sequence_recv( - ctx, - client_id, - client_state, - height, - connection_end, - proof, - root, - port_id, - channel_id, - sequence, - ) - } - } - } - - fn verify_packet_receipt_absence( - &self, - ctx: &dyn ReaderContext, - client_id: &ClientId, - client_state: &Self::ClientState, - height: Height, - connection_end: &ConnectionEnd, - proof: &CommitmentProofBytes, - root: &CommitmentRoot, - port_id: &PortId, - channel_id: &ChannelId, - sequence: Sequence, - ) -> Result<(), Error> { - match self { - Self::Tendermint(client) => { - let client_state = downcast!( - client_state => AnyClientState::Tendermint - ) - .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Tendermint))?; - - client.verify_packet_receipt_absence( - ctx, - client_id, - client_state, - height, - connection_end, - proof, - root, - port_id, - channel_id, - sequence, - ) - } - - #[cfg(any(test, feature = "ics11_beefy"))] - Self::Beefy(client) => { - let client_state = downcast!( - client_state => AnyClientState::Beefy - ) - .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Beefy))?; - - client.verify_packet_receipt_absence( - ctx, - client_id, - client_state, - height, - connection_end, - proof, - root, - port_id, - channel_id, - sequence, - ) - } - #[cfg(any(test, feature = "ics11_beefy"))] - Self::Near(_) => { - todo!() - } - #[cfg(any(test, feature = "mocks"))] - Self::Mock(client) => { - let client_state = downcast!( - client_state => AnyClientState::Mock - ) - .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Mock))?; - - client.verify_packet_receipt_absence( - ctx, - client_id, - client_state, - height, - connection_end, - proof, - root, - port_id, - channel_id, - sequence, - ) - } - } - } +pub trait ClientDef: Clone { + type Header: Header; + type ClientState: ClientState + Eq; + type ConsensusState: ConsensusState + Eq; + + fn verify_header( + &self, + ctx: &Ctx, + client_id: ClientId, + client_state: Self::ClientState, + header: Self::Header, + ) -> Result<(), Error>; + + fn update_state( + &self, + ctx: &Ctx, + client_id: ClientId, + client_state: Self::ClientState, + header: Self::Header, + ) -> Result<(Self::ClientState, ConsensusUpdateResult), Error>; + + fn update_state_on_misbehaviour( + &self, + client_state: Self::ClientState, + header: Self::Header, + ) -> Result; + + fn check_for_misbehaviour( + &self, + ctx: &Ctx, + client_id: ClientId, + client_state: Self::ClientState, + header: Self::Header, + ) -> Result; + + /// TODO + fn verify_upgrade_and_update_state( + &self, + client_state: &Self::ClientState, + consensus_state: &Self::ConsensusState, + proof_upgrade_client: Vec, + proof_upgrade_consensus_state: Vec, + ) -> Result<(Self::ClientState, ConsensusUpdateResult), Error>; + + /// Verification functions as specified in: + /// + /// + /// Verify a `proof` that the consensus state of a given client (at height `consensus_height`) + /// matches the input `consensus_state`. The parameter `counterparty_height` represent the + /// height of the counterparty chain that this proof assumes (i.e., the height at which this + /// proof was computed). + #[allow(clippy::too_many_arguments)] + fn verify_client_consensus_state( + &self, + ctx: &Ctx, + client_state: &Self::ClientState, + height: Height, + prefix: &CommitmentPrefix, + proof: &CommitmentProofBytes, + root: &CommitmentRoot, + client_id: &ClientId, + consensus_height: Height, + expected_consensus_state: &Ctx::AnyConsensusState, + ) -> Result<(), Error>; + + /// Verify a `proof` that a connection state matches that of the input `connection_end`. + #[allow(clippy::too_many_arguments)] + fn verify_connection_state( + &self, + ctx: &Ctx, + client_id: &ClientId, + client_state: &Self::ClientState, + height: Height, + prefix: &CommitmentPrefix, + proof: &CommitmentProofBytes, + root: &CommitmentRoot, + connection_id: &ConnectionId, + expected_connection_end: &ConnectionEnd, + ) -> Result<(), Error>; + + /// Verify a `proof` that a channel state matches that of the input `channel_end`. + #[allow(clippy::too_many_arguments)] + fn verify_channel_state( + &self, + ctx: &Ctx, + client_id: &ClientId, + client_state: &Self::ClientState, + height: Height, + prefix: &CommitmentPrefix, + proof: &CommitmentProofBytes, + root: &CommitmentRoot, + port_id: &PortId, + channel_id: &ChannelId, + expected_channel_end: &ChannelEnd, + ) -> Result<(), Error>; + + /// Verify the client state for this chain that it is stored on the counterparty chain. + #[allow(clippy::too_many_arguments)] + fn verify_client_full_state( + &self, + ctx: &Ctx, + client_state: &Self::ClientState, + height: Height, + prefix: &CommitmentPrefix, + proof: &CommitmentProofBytes, + root: &CommitmentRoot, + client_id: &ClientId, + expected_client_state: &Ctx::AnyClientState, + ) -> Result<(), Error>; + + /// Verify a `proof` that a packet has been commited. + #[allow(clippy::too_many_arguments)] + fn verify_packet_data( + &self, + ctx: &Ctx, + client_id: &ClientId, + client_state: &Self::ClientState, + height: Height, + connection_end: &ConnectionEnd, + proof: &CommitmentProofBytes, + root: &CommitmentRoot, + port_id: &PortId, + channel_id: &ChannelId, + sequence: Sequence, + commitment: PacketCommitment, + ) -> Result<(), Error>; + + /// Verify a `proof` that a packet has been commited. + #[allow(clippy::too_many_arguments)] + fn verify_packet_acknowledgement( + &self, + ctx: &Ctx, + client_id: &ClientId, + client_state: &Self::ClientState, + height: Height, + connection_end: &ConnectionEnd, + proof: &CommitmentProofBytes, + root: &CommitmentRoot, + port_id: &PortId, + channel_id: &ChannelId, + sequence: Sequence, + ack: AcknowledgementCommitment, + ) -> Result<(), Error>; + + /// Verify a `proof` that of the next_seq_received. + #[allow(clippy::too_many_arguments)] + fn verify_next_sequence_recv( + &self, + ctx: &Ctx, + client_id: &ClientId, + client_state: &Self::ClientState, + height: Height, + connection_end: &ConnectionEnd, + proof: &CommitmentProofBytes, + root: &CommitmentRoot, + port_id: &PortId, + channel_id: &ChannelId, + sequence: Sequence, + ) -> Result<(), Error>; + + /// Verify a `proof` that a packet has not been received. + #[allow(clippy::too_many_arguments)] + fn verify_packet_receipt_absence( + &self, + ctx: &Ctx, + client_id: &ClientId, + client_state: &Self::ClientState, + height: Height, + connection_end: &ConnectionEnd, + proof: &CommitmentProofBytes, + root: &CommitmentRoot, + port_id: &PortId, + channel_id: &ChannelId, + sequence: Sequence, + ) -> Result<(), Error>; } diff --git a/modules/src/core/ics02_client/client_state.rs b/modules/src/core/ics02_client/client_state.rs index 25bb3bd8c0..01387e6d4f 100644 --- a/modules/src/core/ics02_client/client_state.rs +++ b/modules/src/core/ics02_client/client_state.rs @@ -1,378 +1,73 @@ -use core::marker::{Send, Sync}; -use core::time::Duration; - -use ibc_proto::google::protobuf::Any; -use serde::{Deserialize, Serialize}; -use tendermint_proto::Protobuf; - -use ibc_proto::ibc::core::client::v1::IdentifiedClientState; - -use crate::clients::ics07_tendermint::client_state; -#[cfg(any(test, feature = "ics11_beefy"))] -use crate::clients::ics11_beefy::client_state as beefy_client_state; -use crate::core::ics02_client::client_type::ClientType; -use crate::core::ics02_client::error::Error; -use crate::core::ics02_client::trust_threshold::TrustThreshold; -use crate::core::ics24_host::error::ValidationError; -use crate::core::ics24_host::identifier::{ChainId, ClientId}; -#[cfg(any(test, feature = "mocks"))] -use crate::mock::client_state::MockClientState; -use crate::prelude::*; -use crate::Height; - -pub const TENDERMINT_CLIENT_STATE_TYPE_URL: &str = "/ibc.lightclients.tendermint.v1.ClientState"; -pub const BEEFY_CLIENT_STATE_TYPE_URL: &str = "/ibc.lightclients.beefy.v1.ClientState"; -pub const MOCK_CLIENT_STATE_TYPE_URL: &str = "/ibc.mock.ClientState"; - -pub trait ClientState: Clone + core::fmt::Debug + Send + Sync { - /// Client-specific options for upgrading the client - type UpgradeOptions; - - /// Return the chain identifier which this client is serving (i.e., the client is verifying - /// consensus states from this chain). - fn chain_id(&self) -> ChainId; - - /// Type of client associated with this state (eg. Tendermint) - fn client_type(&self) -> ClientType; - - /// Latest height of consensus state - fn latest_height(&self) -> Height; - - /// Freeze status of the client - fn is_frozen(&self) -> bool { - self.frozen_height().is_some() - } - - /// Frozen height of the client - fn frozen_height(&self) -> Option; - - /// Helper function to verify the upgrade client procedure. - /// Resets all fields except the blockchain-specific ones, - /// and updates the given fields. - fn upgrade( - self, - upgrade_height: Height, - upgrade_options: Self::UpgradeOptions, - chain_id: ChainId, - ) -> Self; - - /// Wrap into an `AnyClientState` - fn wrap_any(self) -> AnyClientState; -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -#[serde(tag = "type")] -pub enum AnyUpgradeOptions { - Tendermint(client_state::UpgradeOptions), - #[cfg(any(test, feature = "ics11_beefy"))] - Beefy(beefy_client_state::UpgradeOptions), - #[cfg(any(test, feature = "mocks"))] - Mock(()), -} - -impl AnyUpgradeOptions { - fn into_tendermint(self) -> client_state::UpgradeOptions { - match self { - Self::Tendermint(options) => options, - #[cfg(any(test, feature = "ics11_beefy"))] - Self::Beefy(_) => { - panic!("cannot downcast AnyUpgradeOptions::Beefy to Tendermint::UpgradeOptions") - } - #[cfg(any(test, feature = "mocks"))] - Self::Mock(_) => { - panic!("cannot downcast AnyUpgradeOptions::Mock to Tendermint::UpgradeOptions") - } - } - } - - #[cfg(any(test, feature = "ics11_beefy"))] - fn into_beefy(self) -> beefy_client_state::UpgradeOptions { - match self { - Self::Tendermint(_) => { - panic!("cannot downcast AnyUpgradeOptions::Tendermint to Beefy::UpgradeOptions") - } - #[cfg(any(test, feature = "ics11_beefy"))] - Self::Beefy(options) => options, - #[cfg(any(test, feature = "mocks"))] - Self::Mock(_) => { - panic!("cannot downcast AnyUpgradeOptions::Mock to Tendermint::UpgradeOptions") - } - } - } -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -#[serde(tag = "type")] -pub enum AnyClientState { - Tendermint(client_state::ClientState), - #[cfg(any(test, feature = "ics11_beefy"))] - #[serde(skip)] - Beefy(beefy_client_state::ClientState), - #[cfg(any(test, feature = "ics11_beefy"))] - #[serde(skip)] - Near(beefy_client_state::ClientState), - #[cfg(any(test, feature = "mocks"))] - Mock(MockClientState), -} - -impl AnyClientState { - pub fn latest_height(&self) -> Height { - match self { - Self::Tendermint(tm_state) => tm_state.latest_height(), - #[cfg(any(test, feature = "ics11_beefy"))] - Self::Beefy(bf_state) => bf_state.latest_height(), - #[cfg(any(test, feature = "ics11_beefy"))] - Self::Near(_) => todo!(), - #[cfg(any(test, feature = "mocks"))] - Self::Mock(mock_state) => mock_state.latest_height(), - } - } - - pub fn frozen_height(&self) -> Option { - match self { - Self::Tendermint(tm_state) => tm_state.frozen_height(), - #[cfg(any(test, feature = "ics11_beefy"))] - Self::Beefy(bf_state) => bf_state.frozen_height(), - #[cfg(any(test, feature = "ics11_beefy"))] - Self::Near(_) => todo!(), - #[cfg(any(test, feature = "mocks"))] - Self::Mock(mock_state) => mock_state.frozen_height(), - } - } - - pub fn trust_threshold(&self) -> Option { - match self { - AnyClientState::Tendermint(state) => Some(state.trust_level), - #[cfg(any(test, feature = "ics11_beefy"))] - AnyClientState::Beefy(_) => None, - #[cfg(any(test, feature = "ics11_beefy"))] - Self::Near(_) => todo!(), - #[cfg(any(test, feature = "mocks"))] - AnyClientState::Mock(_) => None, - } - } - - pub fn max_clock_drift(&self) -> Duration { - match self { - AnyClientState::Tendermint(state) => state.max_clock_drift, - #[cfg(any(test, feature = "ics11_beefy"))] - AnyClientState::Beefy(_) => Duration::new(0, 0), - #[cfg(any(test, feature = "ics11_beefy"))] - Self::Near(_) => todo!(), - #[cfg(any(test, feature = "mocks"))] - AnyClientState::Mock(_) => Duration::new(0, 0), - } - } - - pub fn client_type(&self) -> ClientType { - match self { - Self::Tendermint(state) => state.client_type(), - #[cfg(any(test, feature = "ics11_beefy"))] - Self::Beefy(state) => state.client_type(), - #[cfg(any(test, feature = "ics11_beefy"))] - Self::Near(_) => todo!(), - #[cfg(any(test, feature = "mocks"))] - Self::Mock(state) => state.client_type(), - } - } - - pub fn refresh_period(&self) -> Option { - match self { - AnyClientState::Tendermint(tm_state) => tm_state.refresh_time(), - #[cfg(any(test, feature = "ics11_beefy"))] - AnyClientState::Beefy(_) => None, - #[cfg(any(test, feature = "ics11_beefy"))] - Self::Near(_) => None, - #[cfg(any(test, feature = "mocks"))] - AnyClientState::Mock(mock_state) => mock_state.refresh_time(), - } - } - - pub fn expired(&self, elapsed_since_latest: Duration) -> bool { - match self { - AnyClientState::Tendermint(tm_state) => tm_state.expired(elapsed_since_latest), - #[cfg(any(test, feature = "ics11_beefy"))] - AnyClientState::Beefy(bf_state) => bf_state.expired(elapsed_since_latest), - #[cfg(any(test, feature = "ics11_beefy"))] - Self::Near(_) => false, - #[cfg(any(test, feature = "mocks"))] - AnyClientState::Mock(mock_state) => mock_state.expired(elapsed_since_latest), - } - } -} - -impl Protobuf for AnyClientState {} - -impl TryFrom for AnyClientState { - type Error = Error; - - fn try_from(raw: Any) -> Result { - match raw.type_url.as_str() { - "" => Err(Error::empty_client_state_response()), - - TENDERMINT_CLIENT_STATE_TYPE_URL => Ok(AnyClientState::Tendermint( - client_state::ClientState::decode_vec(&raw.value) - .map_err(Error::decode_raw_client_state)?, - )), - - #[cfg(any(test, feature = "ics11_beefy"))] - BEEFY_CLIENT_STATE_TYPE_URL => Ok(AnyClientState::Beefy( - beefy_client_state::ClientState::decode_vec(&raw.value) - .map_err(Error::decode_raw_client_state)?, - )), - - #[cfg(any(test, feature = "mocks"))] - MOCK_CLIENT_STATE_TYPE_URL => Ok(AnyClientState::Mock( - MockClientState::decode_vec(&raw.value).map_err(Error::decode_raw_client_state)?, - )), - - _ => Err(Error::unknown_client_state_type(raw.type_url)), - } - } -} - -impl From for Any { - fn from(value: AnyClientState) -> Self { - match value { - AnyClientState::Tendermint(value) => Any { - type_url: TENDERMINT_CLIENT_STATE_TYPE_URL.to_string(), - value: value.encode_vec(), - }, - #[cfg(any(test, feature = "ics11_beefy"))] - AnyClientState::Beefy(value) => Any { - type_url: BEEFY_CLIENT_STATE_TYPE_URL.to_string(), - value: value.encode_vec(), - }, - #[cfg(any(test, feature = "ics11_beefy"))] - AnyClientState::Near(_) => Any { - type_url: BEEFY_CLIENT_STATE_TYPE_URL.to_string(), - value: value.encode_vec(), - }, - #[cfg(any(test, feature = "mocks"))] - AnyClientState::Mock(value) => Any { - type_url: MOCK_CLIENT_STATE_TYPE_URL.to_string(), - value: value.encode_vec(), - }, - } - } -} - -impl ClientState for AnyClientState { - type UpgradeOptions = AnyUpgradeOptions; - - fn chain_id(&self) -> ChainId { - match self { - AnyClientState::Tendermint(tm_state) => tm_state.chain_id(), - #[cfg(any(test, feature = "ics11_beefy"))] - AnyClientState::Beefy(bf_state) => bf_state.chain_id(), - #[cfg(any(test, feature = "ics11_beefy"))] - AnyClientState::Near(_) => todo!(), - #[cfg(any(test, feature = "mocks"))] - AnyClientState::Mock(mock_state) => mock_state.chain_id(), - } - } - - fn client_type(&self) -> ClientType { - self.client_type() - } - - fn latest_height(&self) -> Height { - self.latest_height() - } - - fn frozen_height(&self) -> Option { - self.frozen_height() - } - - fn upgrade( - self, - upgrade_height: Height, - upgrade_options: Self::UpgradeOptions, - chain_id: ChainId, - ) -> Self { - match self { - AnyClientState::Tendermint(tm_state) => tm_state - .upgrade(upgrade_height, upgrade_options.into_tendermint(), chain_id) - .wrap_any(), - #[cfg(any(test, feature = "ics11_beefy"))] - AnyClientState::Beefy(bf_state) => bf_state - .upgrade(upgrade_height, upgrade_options.into_beefy(), chain_id) - .wrap_any(), - #[cfg(any(test, feature = "ics11_beefy"))] - AnyClientState::Near(near_state) => near_state - .upgrade(upgrade_height, upgrade_options.into_beefy(), chain_id) - .wrap_any(), - #[cfg(any(test, feature = "mocks"))] - AnyClientState::Mock(mock_state) => { - mock_state.upgrade(upgrade_height, (), chain_id).wrap_any() - } - } - } - - fn wrap_any(self) -> AnyClientState { - self - } +use crate::{ + core::{ics02_client::client_def::ClientDef, ics24_host::identifier::ChainId}, + prelude::*, + Height, +}; +use core::{ + fmt::Debug, + marker::{Send, Sync}, + time::Duration, +}; + +pub trait ClientState: Clone + Debug + Send + Sync { + /// Client-specific options for upgrading the client + type UpgradeOptions; + type ClientDef: ClientDef; + + /// Return the chain identifier which this client is serving (i.e., the client is verifying + /// consensus states from this chain). + fn chain_id(&self) -> ChainId; + + /// Type of client associated with this state (eg. Tendermint) + fn client_def(&self) -> Self::ClientDef; + + /// Returns one of the prefixes that should be present in any client identifiers. + /// The prefix is deterministic for a given chain type, hence all clients for a Tendermint-type + /// chain, for example, will have the prefix '07-tendermint'. + fn client_type(&self) -> &'static str; + + /// Latest height of consensus state + fn latest_height(&self) -> Height; + + /// Freeze status of the client + fn is_frozen(&self) -> bool { + self.frozen_height().is_some() + } + + /// Frozen height of the client + fn frozen_height(&self) -> Option; + + /// Helper function to verify the upgrade client procedure. + /// Resets all fields except the blockchain-specific ones, + /// and updates the given fields. + fn upgrade( + self, + upgrade_height: Height, + upgrade_options: Self::UpgradeOptions, + chain_id: ChainId, + ) -> Self; + + /// Helper function to verify the upgrade client procedure. + fn expired(&self, elapsed: Duration) -> bool; + + /// Performs downcast of the client state from an "AnyClientState" type to T, otherwise + /// panics. Downcast from `T` to `T` is always successful. + fn downcast(self) -> Option + where + Self: 'static, + { + ::downcast_ref(&self).cloned() + } + + fn wrap(sub_state: &dyn core::any::Any) -> Option + where + Self: 'static, + { + sub_state.downcast_ref::().cloned() + } + + fn encode_to_vec(&self) -> Vec; } -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -#[serde(tag = "type")] -pub struct IdentifiedAnyClientState { - pub client_id: ClientId, - pub client_state: AnyClientState, -} - -impl IdentifiedAnyClientState { - pub fn new(client_id: ClientId, client_state: AnyClientState) -> Self { - IdentifiedAnyClientState { - client_id, - client_state, - } - } -} - -impl Protobuf for IdentifiedAnyClientState {} - -impl TryFrom for IdentifiedAnyClientState { - type Error = Error; - - fn try_from(raw: IdentifiedClientState) -> Result { - Ok(IdentifiedAnyClientState { - client_id: raw.client_id.parse().map_err(|e: ValidationError| { - Error::invalid_raw_client_id(raw.client_id.clone(), e) - })?, - client_state: raw - .client_state - .ok_or_else(Error::missing_raw_client_state)? - .try_into()?, - }) - } -} - -impl From for IdentifiedClientState { - fn from(value: IdentifiedAnyClientState) -> Self { - IdentifiedClientState { - client_id: value.client_id.to_string(), - client_state: Some(value.client_state.into()), - } - } -} - -#[cfg(test)] -mod tests { - - use ibc_proto::google::protobuf::Any; - use test_log::test; - - use crate::clients::ics07_tendermint::client_state::test_util::get_dummy_tendermint_client_state; - use crate::clients::ics07_tendermint::header::test_util::get_dummy_tendermint_header; - use crate::core::ics02_client::client_state::AnyClientState; - - #[test] - fn any_client_state_serialization() { - let tm_client_state = get_dummy_tendermint_client_state(get_dummy_tendermint_header()); - - let raw: Any = tm_client_state.clone().into(); - let tm_client_state_back = AnyClientState::try_from(raw).unwrap(); - assert_eq!(tm_client_state, tm_client_state_back); - } -} +/// Type of the client, depending on the specific consensus algorithm. +pub type ClientType = &'static str; diff --git a/modules/src/core/ics02_client/client_type.rs b/modules/src/core/ics02_client/client_type.rs index f2c84ac0e7..8b13789179 100644 --- a/modules/src/core/ics02_client/client_type.rs +++ b/modules/src/core/ics02_client/client_type.rs @@ -1,122 +1 @@ -use crate::prelude::*; -use core::fmt; -use serde_derive::{Deserialize, Serialize}; -use super::error::Error; - -/// Type of the client, depending on the specific consensus algorithm. -#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] -pub enum ClientType { - Tendermint = 1, - #[cfg(any(test, feature = "ics11_beefy"))] - Beefy = 2, - #[cfg(any(test, feature = "ics11_beefy"))] - Near = 3, - #[cfg(any(test, feature = "mocks"))] - Mock = 9999, -} - -impl ClientType { - const TENDERMINT_STR: &'static str = "07-tendermint"; - #[cfg(any(test, feature = "ics11_beefy"))] - const BEEFY_STR: &'static str = "11-beefy"; - #[cfg(any(test, feature = "ics11_beefy"))] - const NEAR_STR: &'static str = "11-beefy"; - - #[cfg_attr(not(test), allow(dead_code))] - const MOCK_STR: &'static str = "9999-mock"; - - /// Yields the identifier of this client type as a string - pub fn as_str(&self) -> &'static str { - match self { - Self::Tendermint => Self::TENDERMINT_STR, - #[cfg(any(test, feature = "ics11_beefy"))] - Self::Beefy => Self::BEEFY_STR, - #[cfg(any(test, feature = "ics11_beefy"))] - Self::Near => Self::NEAR_STR, - #[cfg(any(test, feature = "mocks"))] - Self::Mock => Self::MOCK_STR, - } - } -} - -impl fmt::Display for ClientType { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "ClientType({})", self.as_str()) - } -} - -impl core::str::FromStr for ClientType { - type Err = Error; - - fn from_str(s: &str) -> Result { - match s { - Self::TENDERMINT_STR => Ok(Self::Tendermint), - #[cfg(any(test, feature = "ics11_beefy"))] - Self::BEEFY_STR => Ok(Self::Beefy), - #[cfg(any(test, feature = "mocks"))] - Self::MOCK_STR => Ok(Self::Mock), - _ => Err(Error::unknown_client_type(s.to_string())), - } - } -} - -#[cfg(test)] -mod tests { - use core::str::FromStr; - use test_log::test; - - use super::ClientType; - use crate::core::ics02_client::error::{Error, ErrorDetail}; - - #[test] - fn parse_tendermint_client_type() { - let client_type = ClientType::from_str("07-tendermint"); - - match client_type { - Ok(ClientType::Tendermint) => (), - _ => panic!("parse failed"), - } - } - - #[test] - fn parse_mock_client_type() { - let client_type = ClientType::from_str("9999-mock"); - - match client_type { - Ok(ClientType::Mock) => (), - _ => panic!("parse failed"), - } - } - - #[test] - fn parse_unknown_client_type() { - let client_type_str = "some-random-client-type"; - let result = ClientType::from_str(client_type_str); - - match result { - Err(Error(ErrorDetail::UnknownClientType(e), _)) => { - assert_eq!(&e.client_type, client_type_str) - } - _ => { - panic!("Expected ClientType::from_str to fail with UnknownClientType, instead got",) - } - } - } - - #[test] - fn parse_mock_as_string_result() { - let client_type = ClientType::Mock; - let type_string = client_type.as_str(); - let client_type_from_str = ClientType::from_str(type_string).unwrap(); - assert_eq!(client_type_from_str, client_type); - } - - #[test] - fn parse_tendermint_as_string_result() { - let client_type = ClientType::Tendermint; - let type_string = client_type.as_str(); - let client_type_from_str = ClientType::from_str(type_string).unwrap(); - assert_eq!(client_type_from_str, client_type); - } -} diff --git a/modules/src/core/ics02_client/context.rs b/modules/src/core/ics02_client/context.rs index 92b753e95d..a5f287873a 100644 --- a/modules/src/core/ics02_client/context.rs +++ b/modules/src/core/ics02_client/context.rs @@ -2,233 +2,262 @@ //! that any host chain must implement to be able to process any `ClientMsg`. See //! "ADR 003: IBC protocol implementation" for more details. -use crate::core::ics02_client::client_consensus::AnyConsensusState; -use crate::core::ics02_client::client_def::ConsensusUpdateResult; -use crate::core::ics02_client::client_state::AnyClientState; -use crate::core::ics02_client::client_type::ClientType; -use crate::core::ics02_client::error::{Error, ErrorDetail}; -use crate::core::ics02_client::handler::ClientResult::{self, Create, Update, Upgrade}; -use crate::core::ics24_host::identifier::ClientId; -use crate::timestamp::Timestamp; -use crate::Height; -use alloc::vec::Vec; +use crate::{ + core::{ + ics02_client::{ + client_consensus::ConsensusState, + client_def::{ClientDef, ConsensusUpdateResult}, + client_state::{ClientState, ClientType}, + error::{Error, ErrorDetail}, + handler::ClientResult::{self, Create, Update, Upgrade}, + header::Header, + misbehaviour::Misbehaviour, + }, + ics24_host::identifier::ClientId, + }, + timestamp::Timestamp, + Height, +}; +use alloc::{string::String, vec::Vec}; +use core::fmt::Debug; /// Defines the read-only part of ICS2 (client functions) context. -pub trait ClientReader { - fn client_type(&self, client_id: &ClientId) -> Result; - fn client_state(&self, client_id: &ClientId) -> Result; - - /// Retrieve the consensus state for the given client ID at the specified - /// height. - /// - /// Returns an error if no such state exists. - fn consensus_state( - &self, - client_id: &ClientId, - height: Height, - ) -> Result; - - /// This should return the host type. - fn host_client_type(&self) -> ClientType; - - /// Similar to `consensus_state`, attempt to retrieve the consensus state, - /// but return `None` if no state exists at the given height. - fn maybe_consensus_state( - &self, - client_id: &ClientId, - height: Height, - ) -> Result, Error> { - match self.consensus_state(client_id, height) { - Ok(cs) => Ok(Some(cs)), - Err(e) => match e.detail() { - ErrorDetail::ConsensusStateNotFound(_) => Ok(None), - _ => Err(e), - }, - } - } - - /// Search for the lowest consensus state higher than `height`. - fn next_consensus_state( - &self, - client_id: &ClientId, - height: Height, - ) -> Result, Error>; - - /// Search for the highest consensus state lower than `height`. - fn prev_consensus_state( - &self, - client_id: &ClientId, - height: Height, - ) -> Result, Error>; - - /// Returns the current height of the local chain. - fn host_height(&self) -> Height; - - /// Returns the current timestamp of the local chain. - fn host_timestamp(&self) -> Timestamp; - - /// Returns the `ConsensusState` of the host (local) chain at a specific height. - /// If this is fetched from a proof whose origin is off-chain, it should ideally be verified first. - fn host_consensus_state( - &self, - height: Height, - proof: Option>, - ) -> Result; - - /// Returns a natural number, counting how many clients have been created thus far. - /// The value of this counter should increase only via method `ClientKeeper::increase_client_counter`. - fn client_counter(&self) -> Result; +pub trait ClientReader: ClientKeeper { + fn client_type(&self, client_id: &ClientId) -> Result; + fn client_state(&self, client_id: &ClientId) -> Result; + + /// Retrieve the consensus state for the given client ID at the specified + /// height. + /// + /// Returns an error if no such state exists. + fn consensus_state( + &self, + client_id: &ClientId, + height: Height, + ) -> Result; + + /// This should return the host type. + fn host_client_type(&self) -> String; + + /// Similar to `consensus_state`, attempt to retrieve the consensus state, + /// but return `None` if no state exists at the given height. + fn maybe_consensus_state( + &self, + client_id: &ClientId, + height: Height, + ) -> Result, Error> { + match self.consensus_state(client_id, height) { + Ok(cs) => Ok(Some(cs)), + Err(e) => match e.detail() { + ErrorDetail::ConsensusStateNotFound(_) => Ok(None), + _ => Err(e), + }, + } + } + + /// Search for the lowest consensus state higher than `height`. + fn next_consensus_state( + &self, + client_id: &ClientId, + height: Height, + ) -> Result, Error>; + + /// Search for the highest consensus state lower than `height`. + fn prev_consensus_state( + &self, + client_id: &ClientId, + height: Height, + ) -> Result, Error>; + + /// Returns the current height of the local chain. + fn host_height(&self) -> Height; + + /// Returns the current timestamp of the local chain. + fn host_timestamp(&self) -> Timestamp; + + /// Returns the `ConsensusState` of the host (local) chain at a specific height. + /// If this is fetched from a proof whose origin is off-chain, it should ideally be verified + /// first. + fn host_consensus_state( + &self, + height: Height, + proof: Option>, + ) -> Result; + + /// Returns a natural number, counting how many clients have been created thus far. + /// The value of this counter should increase only via method + /// `ClientKeeper::increase_client_counter`. + fn client_counter(&self) -> Result; } /// Defines the write-only part of ICS2 (client functions) context. -pub trait ClientKeeper { - fn store_client_result(&mut self, handler_res: ClientResult) -> Result<(), Error> { - match handler_res { - Create(res) => { - let client_id = res.client_id.clone(); - - self.store_client_type(client_id.clone(), res.client_type)?; - self.store_client_state(client_id.clone(), res.client_state.clone())?; - self.store_consensus_state( - client_id, - res.client_state.latest_height(), - res.consensus_state, - )?; - self.increase_client_counter(); - self.store_update_time( - res.client_id.clone(), - res.client_state.latest_height(), - res.processed_time, - )?; - self.store_update_height( - res.client_id, - res.client_state.latest_height(), - res.processed_height, - )?; - Ok(()) - } - Update(res) => { - self.store_client_state(res.client_id.clone(), res.client_state.clone())?; - match res.consensus_state { - None => {} - Some(cs_state_update) => match cs_state_update { - ConsensusUpdateResult::Single(cs_state) => { - self.store_consensus_state( - res.client_id.clone(), - res.client_state.latest_height(), - cs_state, - )?; - - self.store_update_time( - res.client_id.clone(), - res.client_state.latest_height(), - res.processed_time, - )?; - self.store_update_height( - res.client_id, - res.client_state.latest_height(), - res.processed_height, - )?; - } - ConsensusUpdateResult::Batch(cs_states) => { - for (height, cs_state) in cs_states { - self.store_consensus_state( - res.client_id.clone(), - height, - cs_state, - )?; - self.store_update_time( - res.client_id.clone(), - height, - res.processed_time, - )?; - self.store_update_height( - res.client_id.clone(), - height, - res.processed_height, - )?; - } - } - }, - } - Ok(()) - } - Upgrade(res) => { - self.store_client_state(res.client_id.clone(), res.client_state.clone())?; - match res.consensus_state { - None => {} - Some(cs_state_update) => match cs_state_update { - ConsensusUpdateResult::Single(cs_state) => { - self.store_consensus_state( - res.client_id.clone(), - res.client_state.latest_height(), - cs_state, - )?; - } - ConsensusUpdateResult::Batch(cs_states) => { - for (height, cs_state) in cs_states { - self.store_consensus_state( - res.client_id.clone(), - height, - cs_state, - )?; - } - } - }, - } - Ok(()) - } - } - } - - /// Called upon successful client creation - fn store_client_type( - &mut self, - client_id: ClientId, - client_type: ClientType, - ) -> Result<(), Error>; - - /// Called upon successful client creation and update - fn store_client_state( - &mut self, - client_id: ClientId, - client_state: AnyClientState, - ) -> Result<(), Error>; - - /// Called upon successful client creation and update - fn store_consensus_state( - &mut self, - client_id: ClientId, - height: Height, - consensus_state: AnyConsensusState, - ) -> Result<(), Error>; - - /// Called upon client creation. - /// Increases the counter which keeps track of how many clients have been created. - /// Should never fail. - fn increase_client_counter(&mut self); - - /// Called upon successful client update. - /// Implementations are expected to use this to record the specified time as the time at which - /// this update (or header) was processed. - fn store_update_time( - &mut self, - client_id: ClientId, - height: Height, - timestamp: Timestamp, - ) -> Result<(), Error>; - - /// Called upon successful client update. - /// Implementations are expected to use this to record the specified height as the height at - /// at which this update (or header) was processed. - fn store_update_height( - &mut self, - client_id: ClientId, - height: Height, - host_height: Height, - ) -> Result<(), Error>; - - /// validates the client parameters for a client of the running chain - /// This function is only used to validate the client state the counterparty stores for this chain - fn validate_self_client(&self, client_state: &AnyClientState) -> Result<(), Error>; +pub trait ClientKeeper +where + Self: Clone + Debug + Eq, +{ + type AnyHeader: Header; + type AnyClientState: ClientState + Eq; + type AnyConsensusState: ConsensusState + Eq + 'static; + type AnyMisbehaviour: Misbehaviour; + + /// Client definition type (used for verification) + type ClientDef: ClientDef< + Header = Self::AnyHeader, + ClientState = Self::AnyClientState, + ConsensusState = Self::AnyConsensusState, + >; + + fn store_client_result( + &mut self, + handler_res: ClientResult, + ) -> Result<(), Error> { + match handler_res { + Create(res) => { + let client_id = res.client_id.clone(); + + self.store_client_type(client_id.clone(), res.client_type)?; + self.store_client_state(client_id.clone(), res.client_state.clone())?; + self.store_consensus_state( + client_id, + res.client_state.latest_height(), + res.consensus_state, + )?; + self.increase_client_counter(); + self.store_update_time( + res.client_id.clone(), + res.client_state.latest_height(), + res.processed_time, + )?; + self.store_update_height( + res.client_id, + res.client_state.latest_height(), + res.processed_height, + )?; + Ok(()) + }, + Update(res) => { + self.store_client_state(res.client_id.clone(), res.client_state.clone())?; + match res.consensus_state { + None => {}, + Some(cs_state_update) => match cs_state_update { + ConsensusUpdateResult::Single(cs_state) => { + self.store_consensus_state( + res.client_id.clone(), + res.client_state.latest_height(), + cs_state, + )?; + + self.store_update_time( + res.client_id.clone(), + res.client_state.latest_height(), + res.processed_time, + )?; + self.store_update_height( + res.client_id, + res.client_state.latest_height(), + res.processed_height, + )?; + }, + ConsensusUpdateResult::Batch(cs_states) => { + for (height, cs_state) in cs_states { + self.store_consensus_state( + res.client_id.clone(), + height, + cs_state, + )?; + self.store_update_time( + res.client_id.clone(), + height, + res.processed_time, + )?; + self.store_update_height( + res.client_id.clone(), + height, + res.processed_height, + )?; + } + }, + }, + } + Ok(()) + }, + Upgrade(res) => { + self.store_client_state(res.client_id.clone(), res.client_state.clone())?; + match res.consensus_state { + None => {}, + Some(cs_state_update) => match cs_state_update { + ConsensusUpdateResult::Single(cs_state) => { + self.store_consensus_state( + res.client_id.clone(), + res.client_state.latest_height(), + cs_state, + )?; + }, + ConsensusUpdateResult::Batch(cs_states) => { + for (height, cs_state) in cs_states { + self.store_consensus_state( + res.client_id.clone(), + height, + cs_state, + )?; + } + }, + }, + } + Ok(()) + }, + } + } + + /// Called upon successful client creation + fn store_client_type( + &mut self, + client_id: ClientId, + client_type: ClientType, + ) -> Result<(), Error>; + + /// Called upon successful client creation and update + fn store_client_state( + &mut self, + client_id: ClientId, + client_state: Self::AnyClientState, + ) -> Result<(), Error>; + + /// Called upon successful client creation and update + fn store_consensus_state( + &mut self, + client_id: ClientId, + height: Height, + consensus_state: Self::AnyConsensusState, + ) -> Result<(), Error>; + + /// Called upon client creation. + /// Increases the counter which keeps track of how many clients have been created. + /// Should never fail. + fn increase_client_counter(&mut self); + + /// Called upon successful client update. + /// Implementations are expected to use this to record the specified time as the time at which + /// this update (or header) was processed. + fn store_update_time( + &mut self, + client_id: ClientId, + height: Height, + timestamp: Timestamp, + ) -> Result<(), Error>; + + /// Called upon successful client update. + /// Implementations are expected to use this to record the specified height as the height at + /// at which this update (or header) was processed. + fn store_update_height( + &mut self, + client_id: ClientId, + height: Height, + host_height: Height, + ) -> Result<(), Error>; + + /// validates the client parameters for a client of the running chain + /// This function is only used to validate the client state the counterparty stores for this + /// chain + fn validate_self_client(&self, client_state: &Self::AnyClientState) -> Result<(), Error>; } diff --git a/modules/src/core/ics02_client/error.rs b/modules/src/core/ics02_client/error.rs index f7a08e623c..6fa4170aa5 100644 --- a/modules/src/core/ics02_client/error.rs +++ b/modules/src/core/ics02_client/error.rs @@ -4,570 +4,279 @@ use flex_error::{define_error, TraceError}; use tendermint::Error as TendermintError; use tendermint_proto::Error as TendermintProtoError; -use crate::clients::ics07_tendermint::error::Error as Ics07Error; -#[cfg(any(test, feature = "ics11_beefy"))] -use crate::clients::ics11_beefy::error::Error as Ics11Error; -#[cfg(any(test, feature = "ics11_beefy"))] -use crate::clients::ics13_near::error::Error as Ics13Error; -use crate::core::ics02_client::client_type::ClientType; -use crate::core::ics02_client::height::HeightError; -use crate::core::ics23_commitment::error::Error as Ics23Error; -use crate::core::ics24_host::error::ValidationError; -use crate::core::ics24_host::identifier::ClientId; -use crate::signer::SignerError; -use crate::timestamp::Timestamp; -use crate::Height; - -#[cfg(not(any(test, feature = "ics11_beefy")))] -define_error! { - #[derive(Debug, PartialEq, Eq)] - Error { - UnknownClientType - { client_type: String } - | e | { format_args!("unknown client type: {0}", e.client_type) }, - - ClientIdentifierConstructor - { client_type: ClientType, counter: u64 } - [ ValidationError ] - | e | { - format_args!("Client identifier constructor failed for type {0} with counter {1}", - e.client_type, e.counter) - }, - - ClientAlreadyExists - { client_id: ClientId } - | e | { format_args!("client already exists: {0}", e.client_id) }, - - ClientNotFound - { client_id: ClientId } - | e | { format_args!("client not found: {0}", e.client_id) }, - - ClientFrozen - { client_id: ClientId } - | e | { format_args!("client is frozen: {0}", e.client_id) }, - - ConsensusStateNotFound - { client_id: ClientId, height: Height } - | e | { - format_args!("consensus state not found at: {0} at height {1}", - e.client_id, e.height) - }, - - ImplementationSpecific - { reason: String } - | e | { format_args!("implementation specific error: {}", e.reason) }, - - HeaderVerificationFailure - { reason: String } - | e | { format_args!("header verification failed with reason: {}", e.reason) }, - - InvalidTrustThreshold - { numerator: u64, denominator: u64 } - | e | { format_args!("failed to build trust threshold from fraction: {}/{}", e.numerator, e.denominator) }, - - FailedTrustThresholdConversion - { numerator: u64, denominator: u64 } - [ TendermintError ] - | e | { format_args!("failed to build Tendermint domain type trust threshold from fraction: {}/{}", e.numerator, e.denominator) }, - - UnknownClientStateType - { client_state_type: String } - | e | { format_args!("unknown client state type: {0}", e.client_state_type) }, - - EmptyClientStateResponse - | _ | { "the client state was not found" }, - - EmptyPrefix - | _ | { "empty prefix" }, - - UnknownConsensusStateType - { consensus_state_type: String } - | e | { - format_args!("unknown client consensus state type: {0}", - e.consensus_state_type) - }, - - EmptyConsensusStateResponse - | _ | { "the client consensus state was not found" }, - - UnknownHeaderType - { header_type: String } - | e | { - format_args!("unknown header type: {0}", - e.header_type) - }, - - UnknownMisbehaviourType - { misbehavior_type: String } - | e | { - format_args!("unknown misbehaviour type: {0}", - e.misbehavior_type) - }, - - InvalidRawClientId - { client_id: String } - [ ValidationError ] - | e | { - format_args!("invalid raw client identifier {0}", - e.client_id) - }, - - DecodeRawClientState - [ TraceError ] - | _ | { "error decoding raw client state" }, - - MissingRawClientState - | _ | { "missing raw client state" }, - - InvalidRawConsensusState - [ TraceError ] - | _ | { "invalid raw client consensus state" }, - - MissingRawConsensusState - | _ | { "missing raw client consensus state" }, - - InvalidMsgUpdateClientId - [ ValidationError ] - | _ | { "invalid client id in the update client message" }, - - Decode - [ TraceError ] - | _ | { "decode error" }, - - MissingHeight - | _ | { "invalid raw client consensus state: the height field is missing" }, - - InvalidClientIdentifier - [ ValidationError ] - | _ | { "invalid client identifier" }, - - InvalidRawHeader - [ TraceError ] - | _ | { "invalid raw header" }, - - MissingRawHeader - | _ | { "missing raw header" }, - - DecodeRawMisbehaviour - [ TraceError ] - | _ | { "invalid raw misbehaviour" }, - - InvalidRawMisbehaviour - [ ValidationError ] - | _ | { "invalid raw misbehaviour" }, - - MissingRawMisbehaviour - | _ | { "missing raw misbehaviour" }, - - InvalidStringAsHeight - { value: String } - [ HeightError ] - | e | { format_args!("String {0} cannnot be converted to height", e.value) }, - - InvalidHeightResult - | _ | { "height cannot end up zero or negative" }, - - InvalidAddress - | _ | { "invalid address" }, - - InvalidUpgradeClientProof - [ Ics23Error ] - | _ | { "invalid proof for the upgraded client state" }, - - InvalidUpgradeConsensusStateProof - [ Ics23Error ] - | _ | { "invalid proof for the upgraded consensus state" }, - - InvalidCommitmentProof - [ Ics23Error ] - | _ | { "invalid commitment proof bytes" }, - - Tendermint - [ Ics07Error ] - | _ | { "tendermint error" }, - - InvalidPacketTimestamp - [ crate::timestamp::ParseTimestampError ] - | _ | { "invalid packet timeout timestamp value" }, - - ClientArgsTypeMismatch - { client_type: ClientType } - | e | { - format_args!("mismatch between client and arguments types, expected: {0:?}", - e.client_type) - }, - - InsufficientVotingPower - { reason: String } - | e | { - format_args!("Insufficient overlap {}", e.reason) - }, - - RawClientAndConsensusStateTypesMismatch - { - state_type: ClientType, - consensus_type: ClientType, - } - | e | { - format_args!("mismatch in raw client consensus state {} with expected state {}", - e.state_type, e.consensus_type) - }, - - LowHeaderHeight - { - header_height: Height, - latest_height: Height - } - | e | { - format!("received header height ({:?}) is lower than (or equal to) client latest height ({:?})", - e.header_height, e.latest_height) - }, - - LowUpgradeHeight - { - upgraded_height: Height, - client_height: Height, - } - | e | { - format_args!("upgraded client height {} must be at greater than current client height {}", - e.upgraded_height, e.client_height) - }, - - InvalidConsensusStateTimestamp - { - time1: Timestamp, - time2: Timestamp, - } - | e | { - format_args!("timestamp is invalid or missing, timestamp={0}, now={1}", e.time1, e.time2) - }, - - HeaderNotWithinTrustPeriod - { - latest_time:Timestamp, - update_time: Timestamp, - } - | e | { - format_args!("header not withing trusting period: expires_at={0} now={1}", e.latest_time, e.update_time) - }, - - TendermintHandlerError - [ Ics07Error ] - | _ | { format_args!("Tendermint-specific handler error") }, - - MissingLocalConsensusState - { height: Height } - | e | { format_args!("the local consensus state could not be retrieved for height {}", e.height) }, - - InvalidConnectionEnd - [ TraceError] - | _ | { "invalid connection end" }, - - InvalidChannelEnd - [ TraceError] - | _ | { "invalid channel end" }, - - InvalidAnyClientState - [ TraceError] - | _ | { "invalid any client state" }, - - InvalidAnyConsensusState - [ TraceError ] - | _ | { "invalid any client consensus state" }, - - Signer - [ SignerError ] - | _ | { "failed to parse signer" }, - } -} +use crate::{ + core::{ + ics02_client::{client_state::ClientType, height::HeightError}, + ics23_commitment::error::Error as Ics23Error, + ics24_host::{error::ValidationError, identifier::ClientId}, + }, + signer::SignerError, + timestamp::Timestamp, + Height, +}; -#[cfg(any(test, feature = "ics11_beefy"))] define_error! { - #[derive(Debug, PartialEq, Eq)] - Error { - UnknownClientType - { client_type: String } - | e | { format_args!("unknown client type: {0}", e.client_type) }, - - ClientIdentifierConstructor - { client_type: ClientType, counter: u64 } - [ ValidationError ] - | e | { - format_args!("Client identifier constructor failed for type {0} with counter {1}", - e.client_type, e.counter) - }, - - ClientAlreadyExists - { client_id: ClientId } - | e | { format_args!("client already exists: {0}", e.client_id) }, - - ClientNotFound - { client_id: ClientId } - | e | { format_args!("client not found: {0}", e.client_id) }, - - ClientFrozen - { client_id: ClientId } - | e | { format_args!("client is frozen: {0}", e.client_id) }, - - ConsensusStateNotFound - { client_id: ClientId, height: Height } - | e | { - format_args!("consensus state not found at: {0} at height {1}", - e.client_id, e.height) - }, - - ImplementationSpecific - { reason: String } - | e | { format_args!("implementation specific error: {}", e.reason) }, - - HeaderVerificationFailure - { reason: String } - | e | { format_args!("header verification failed with reason: {}", e.reason) }, - - InvalidTrustThreshold - { numerator: u64, denominator: u64 } - | e | { format_args!("failed to build trust threshold from fraction: {}/{}", e.numerator, e.denominator) }, - - FailedTrustThresholdConversion - { numerator: u64, denominator: u64 } - [ TendermintError ] - | e | { format_args!("failed to build Tendermint domain type trust threshold from fraction: {}/{}", e.numerator, e.denominator) }, - - UnknownClientStateType - { client_state_type: String } - | e | { format_args!("unknown client state type: {0}", e.client_state_type) }, - - EmptyClientStateResponse - | _ | { "the client state was not found" }, - - EmptyPrefix - | _ | { "empty prefix" }, - - UnknownConsensusStateType - { consensus_state_type: String } - | e | { - format_args!("unknown client consensus state type: {0}", - e.consensus_state_type) - }, - - EmptyConsensusStateResponse - | _ | { "the client consensus state was not found" }, - - UnknownHeaderType - { header_type: String } - | e | { - format_args!("unknown header type: {0}", - e.header_type) - }, - - UnknownMisbehaviourType - { misbehavior_type: String } - | e | { - format_args!("unknown misbehaviour type: {0}", - e.misbehavior_type) - }, - - InvalidRawClientId - { client_id: String } - [ ValidationError ] - | e | { - format_args!("invalid raw client identifier {0}", - e.client_id) - }, - - DecodeRawClientState - [ TraceError ] - | _ | { "error decoding raw client state" }, - - MissingRawClientState - | _ | { "missing raw client state" }, - - InvalidRawConsensusState - [ TraceError ] - | _ | { "invalid raw client consensus state" }, - - MissingRawConsensusState - | _ | { "missing raw client consensus state" }, - - InvalidMsgUpdateClientId - [ ValidationError ] - | _ | { "invalid client id in the update client message" }, - - Decode - [ TraceError ] - | _ | { "decode error" }, - - MissingHeight - | _ | { "invalid raw client consensus state: the height field is missing" }, - - InvalidClientIdentifier - [ ValidationError ] - | _ | { "invalid client identifier" }, - - InvalidRawHeader - [ TraceError ] - | _ | { "invalid raw header" }, - - MissingRawHeader - | _ | { "missing raw header" }, - - DecodeRawMisbehaviour - [ TraceError ] - | _ | { "invalid raw misbehaviour" }, - - InvalidRawMisbehaviour - [ ValidationError ] - | _ | { "invalid raw misbehaviour" }, - - MissingRawMisbehaviour - | _ | { "missing raw misbehaviour" }, - - InvalidStringAsHeight - { value: String } - [ HeightError ] - | e | { format_args!("String {0} cannnot be converted to height", e.value) }, - - InvalidHeightResult - | _ | { "height cannot end up zero or negative" }, - - InvalidAddress - | _ | { "invalid address" }, - - InvalidUpgradeClientProof - [ Ics23Error ] - | _ | { "invalid proof for the upgraded client state" }, - - InvalidUpgradeConsensusStateProof - [ Ics23Error ] - | _ | { "invalid proof for the upgraded consensus state" }, - - InvalidCommitmentProof - [ Ics23Error ] - | _ | { "invalid commitment proof bytes" }, - - Tendermint - [ Ics07Error ] - | _ | { "tendermint error" }, - - Beefy - [ Ics11Error ] - | _ | { "Beefy error" }, - - Near - [ Ics13Error ] - | _ | { "Near error" }, - - InvalidPacketTimestamp - [ crate::timestamp::ParseTimestampError ] - | _ | { "invalid packet timeout timestamp value" }, - - ClientArgsTypeMismatch - { client_type: ClientType } - | e | { - format_args!("mismatch between client and arguments types, expected: {0:?}", - e.client_type) - }, - - InsufficientVotingPower - { reason: String } - | e | { - format_args!("Insufficient overlap {}", e.reason) - }, - - RawClientAndConsensusStateTypesMismatch - { - state_type: ClientType, - consensus_type: ClientType, - } - | e | { - format_args!("mismatch in raw client consensus state {} with expected state {}", - e.state_type, e.consensus_type) - }, - - LowHeaderHeight - { - header_height: Height, - latest_height: Height - } - | e | { - format!("received header height ({:?}) is lower than (or equal to) client latest height ({:?})", - e.header_height, e.latest_height) - }, - - LowUpgradeHeight - { - upgraded_height: Height, - client_height: Height, - } - | e | { - format_args!("upgraded client height {} must be at greater than current client height {}", - e.upgraded_height, e.client_height) - }, - - InvalidConsensusStateTimestamp - { - time1: Timestamp, - time2: Timestamp, - } - | e | { - format_args!("timestamp is invalid or missing, timestamp={0}, now={1}", e.time1, e.time2) - }, - - HeaderNotWithinTrustPeriod - { - latest_time:Timestamp, - update_time: Timestamp, - } - | e | { - format_args!("header not withing trusting period: expires_at={0} now={1}", e.latest_time, e.update_time) - }, - - TendermintHandlerError - [ Ics07Error ] - | _ | { format_args!("Tendermint-specific handler error") }, - - MissingLocalConsensusState - { height: Height } - | e | { format_args!("the local consensus state could not be retrieved for height {}", e.height) }, - - InvalidConnectionEnd - [ TraceError] - | _ | { "invalid connection end" }, - - InvalidChannelEnd - [ TraceError] - | _ | { "invalid channel end" }, - - InvalidAnyClientState - [ TraceError] - | _ | { "invalid any client state" }, - - InvalidAnyConsensusState - [ TraceError ] - | _ | { "invalid any client consensus state" }, - - Signer - [ SignerError ] - | _ | { "failed to parse signer" }, - } -} - -impl From for Error { - fn from(e: Ics07Error) -> Error { - Error::tendermint_handler_error(e) - } -} - -#[cfg(any(test, feature = "ics11_beefy"))] -impl From for Error { - fn from(e: Ics11Error) -> Error { - Error::beefy(e) - } -} - -#[cfg(any(test, feature = "ics11_beefy"))] -impl From for Error { - fn from(e: Ics13Error) -> Error { - Error::near(e) - } + #[derive(Debug, PartialEq, Eq)] + Error { + ClientError + { client_type: String, inner: String } + |e| { format_args!("client '{}' error: {}", e.client_type, e.inner) }, + + UnknownClientType + { client_type: String } + | e | { format_args!("unknown client type: {0}", e.client_type) }, + + ClientIdentifierConstructor + { client_type: String, counter: u64 } + [ ValidationError ] + | e | { + format_args!("Client identifier constructor failed for type {0} with counter {1}", + e.client_type, e.counter) + }, + + ClientAlreadyExists + { client_id: ClientId } + | e | { format_args!("client already exists: {0}", e.client_id) }, + + ClientNotFound + { client_id: ClientId } + | e | { format_args!("client not found: {0}", e.client_id) }, + + ClientFrozen + { client_id: ClientId } + | e | { format_args!("client is frozen: {0}", e.client_id) }, + + ConsensusStateNotFound + { client_id: ClientId, height: Height } + | e | { + format_args!("consensus state not found at: {0} at height {1}", + e.client_id, e.height) + }, + + ImplementationSpecific + { reason: String } + | e | { format_args!("implementation specific error: {}", e.reason) }, + + HeaderVerificationFailure + { reason: String } + | e | { format_args!("header verification failed with reason: {}", e.reason) }, + + InvalidTrustThreshold + { numerator: u64, denominator: u64 } + | e | { format_args!("failed to build trust threshold from fraction: {}/{}", e.numerator, e.denominator) }, + + FailedTrustThresholdConversion + { numerator: u64, denominator: u64 } + [ TendermintError ] + | e | { format_args!("failed to build Tendermint domain type trust threshold from fraction: {}/{}", e.numerator, e.denominator) }, + + UnknownClientStateType + { client_state_type: String } + | e | { format_args!("unknown client state type: {0}", e.client_state_type) }, + + EmptyClientStateResponse + | _ | { "the client state was not found" }, + + EmptyPrefix + | _ | { "empty prefix" }, + + UnknownConsensusStateType + { consensus_state_type: String } + | e | { + format_args!("unknown client consensus state type: {0}", + e.consensus_state_type) + }, + + EmptyConsensusStateResponse + | _ | { "the client consensus state was not found" }, + + UnknownHeaderType + { header_type: String } + | e | { + format_args!("unknown header type: {0}", + e.header_type) + }, + + UnknownMisbehaviourType + { misbehavior_type: String } + | e | { + format_args!("unknown misbehaviour type: {0}", + e.misbehavior_type) + }, + + InvalidRawClientId + { client_id: String } + [ ValidationError ] + | e | { + format_args!("invalid raw client identifier {0}", + e.client_id) + }, + + DecodeRawClientState + [ TraceError ] + | _ | { "error decoding raw client state" }, + + DecodeRawHeader + [ TraceError ] + | _ | { "error decoding raw header" }, + + DecodeRawConsensusState + [ TraceError ] + | _ | { "error decoding raw header" }, + + MissingRawClientState + | _ | { "missing raw client state" }, + + InvalidRawConsensusState + [ TraceError ] + | _ | { "invalid raw client consensus state" }, + + MissingRawConsensusState + | _ | { "missing raw client consensus state" }, + + InvalidMsgUpdateClientId + [ ValidationError ] + | _ | { "invalid client id in the update client message" }, + + Decode + [ TraceError ] + | _ | { "decode error" }, + + MissingHeight + | _ | { "invalid raw client consensus state: the height field is missing" }, + + InvalidClientIdentifier + [ ValidationError ] + | _ | { "invalid client identifier" }, + + InvalidRawHeader + [ TraceError ] + | _ | { "invalid raw header" }, + + MissingRawHeader + | _ | { "missing raw header" }, + + DecodeRawMisbehaviour + [ TraceError ] + | _ | { "invalid raw misbehaviour" }, + + InvalidRawMisbehaviour + [ ValidationError ] + | _ | { "invalid raw misbehaviour" }, + + MissingRawMisbehaviour + | _ | { "missing raw misbehaviour" }, + + InvalidStringAsHeight + { value: String } + [ HeightError ] + | e | { format_args!("String {0} cannnot be converted to height", e.value) }, + + InvalidHeightResult + | _ | { "height cannot end up zero or negative" }, + + InvalidAddress + | _ | { "invalid address" }, + + InvalidUpgradeClientProof + [ Ics23Error ] + | _ | { "invalid proof for the upgraded client state" }, + + InvalidUpgradeConsensusStateProof + [ Ics23Error ] + | _ | { "invalid proof for the upgraded consensus state" }, + + InvalidCommitmentProof + [ Ics23Error ] + | _ | { "invalid commitment proof bytes" }, + + InvalidPacketTimestamp + [ crate::timestamp::ParseTimestampError ] + | _ | { "invalid packet timeout timestamp value" }, + + ClientArgsTypeMismatch + { client_type: String } + | e | { + format_args!("mismatch between client and arguments types, expected: {0:?}", + e.client_type) + }, + + InsufficientVotingPower + { reason: String } + | e | { + format_args!("Insufficient overlap {}", e.reason) + }, + + RawClientAndConsensusStateTypesMismatch + { + state_type: ClientType, + consensus_type: ClientType, + } + | e | { + format_args!("mismatch in raw client consensus state {} with expected state {}", + e.state_type, e.consensus_type) + }, + + LowHeaderHeight + { + header_height: Height, + latest_height: Height + } + | e | { + format!("received header height ({:?}) is lower than (or equal to) client latest height ({:?})", + e.header_height, e.latest_height) + }, + + LowUpgradeHeight + { + upgraded_height: Height, + client_height: Height, + } + | e | { + format_args!("upgraded client height {} must be at greater than current client height {}", + e.upgraded_height, e.client_height) + }, + + InvalidConsensusStateTimestamp + { + time1: Timestamp, + time2: Timestamp, + } + | e | { + format_args!("timestamp is invalid or missing, timestamp={0}, now={1}", e.time1, e.time2) + }, + + HeaderNotWithinTrustPeriod + { + latest_time:Timestamp, + update_time: Timestamp, + } + | e | { + format_args!("header not withing trusting period: expires_at={0} now={1}", e.latest_time, e.update_time) + }, + + MissingLocalConsensusState + { height: Height } + | e | { format_args!("the local consensus state could not be retrieved for height {}", e.height) }, + + InvalidConnectionEnd + [ TraceError] + | _ | { "invalid connection end" }, + + InvalidChannelEnd + [ TraceError] + | _ | { "invalid channel end" }, + + InvalidAnyClientState + [ TraceError] + | _ | { "invalid any client state" }, + + InvalidAnyConsensusState + [ TraceError ] + | _ | { "invalid any client consensus state" }, + + Signer + [ SignerError ] + | _ | { "failed to parse signer" }, + } } diff --git a/modules/src/core/ics02_client/events.rs b/modules/src/core/ics02_client/events.rs index 6fec32e25a..6e5b41273f 100644 --- a/modules/src/core/ics02_client/events.rs +++ b/modules/src/core/ics02_client/events.rs @@ -1,17 +1,13 @@ //! Types for the IBC events emitted from Tendermint Websocket by the client module. +use crate::{ + core::{ics02_client::height::Height, ics24_host::identifier::ClientId}, + events::IbcEvent, + prelude::*, +}; use serde_derive::{Deserialize, Serialize}; -use tendermint::abci::Event as AbciEvent; use tendermint::abci::EventAttribute; -use crate::core::ics02_client::client_type::ClientType; -use crate::core::ics02_client::error::Error; -use crate::core::ics02_client::header::AnyHeader; -use crate::core::ics02_client::height::Height; -use crate::core::ics24_host::identifier::ClientId; -use crate::events::{IbcEvent, IbcEventType}; -use crate::prelude::*; - /// The content of the `key` field for the attribute containing the height. const HEIGHT_ATTRIBUTE_KEY: &str = "height"; @@ -24,119 +20,49 @@ const CLIENT_TYPE_ATTRIBUTE_KEY: &str = "client_type"; /// The content of the `key` field for the attribute containing the height. const CONSENSUS_HEIGHT_ATTRIBUTE_KEY: &str = "consensus_height"; -/// The content of the `key` field for the header in update client event. -const HEADER_ATTRIBUTE_KEY: &str = "header"; - -pub fn try_from_tx(event: &AbciEvent) -> Option { - match event.kind.parse() { - Ok(IbcEventType::CreateClient) => extract_attributes_from_tx(event) - .map(CreateClient) - .map(IbcEvent::CreateClient) - .ok(), - Ok(IbcEventType::UpdateClient) => match extract_attributes_from_tx(event) { - Ok(attributes) => Some(IbcEvent::UpdateClient(UpdateClient { - common: attributes, - header: extract_header_from_tx(event).ok(), - })), - Err(_) => None, - }, - Ok(IbcEventType::ClientMisbehaviour) => extract_attributes_from_tx(event) - .map(ClientMisbehaviour) - .map(IbcEvent::ClientMisbehaviour) - .ok(), - Ok(IbcEventType::UpgradeClient) => extract_attributes_from_tx(event) - .map(UpgradeClient) - .map(IbcEvent::UpgradeClient) - .ok(), - _ => None, - } -} - -fn extract_attributes_from_tx(event: &AbciEvent) -> Result { - let mut attr = Attributes::default(); - - for tag in &event.attributes { - let key = tag.key.as_ref(); - let value = tag.value.as_str(); - match key { - HEIGHT_ATTRIBUTE_KEY => { - attr.height = value - .parse() - .map_err(|e| Error::invalid_string_as_height(value.to_string(), e))? - } - CLIENT_ID_ATTRIBUTE_KEY => { - attr.client_id = value.parse().map_err(Error::invalid_client_identifier)? - } - CLIENT_TYPE_ATTRIBUTE_KEY => { - attr.client_type = value - .parse() - .map_err(|_| Error::unknown_client_type(value.to_string()))? - } - CONSENSUS_HEIGHT_ATTRIBUTE_KEY => { - attr.consensus_height = value - .parse() - .map_err(|e| Error::invalid_string_as_height(value.to_string(), e))? - } - _ => {} - } - } - - Ok(attr) -} - -pub fn extract_header_from_tx(event: &AbciEvent) -> Result { - for tag in &event.attributes { - let key = tag.key.as_str(); - let value = tag.value.as_str(); - if key == HEADER_ATTRIBUTE_KEY { - return AnyHeader::decode_from_string(value); - } - } - Err(Error::missing_raw_header()) -} - /// NewBlock event signals the committing & execution of a new block. // TODO - find a better place for NewBlock #[derive(Debug, Deserialize, Serialize, Clone, Copy, PartialEq, Eq)] pub struct NewBlock { - pub height: Height, + pub height: Height, } impl NewBlock { - pub fn new(h: Height) -> NewBlock { - NewBlock { height: h } - } - pub fn set_height(&mut self, height: Height) { - self.height = height; - } - pub fn height(&self) -> Height { - self.height - } + pub fn new(h: Height) -> NewBlock { + NewBlock { height: h } + } + pub fn set_height(&mut self, height: Height) { + self.height = height; + } + pub fn height(&self) -> Height { + self.height + } } impl From for IbcEvent { - fn from(v: NewBlock) -> Self { - IbcEvent::NewBlock(v) - } + fn from(v: NewBlock) -> Self { + IbcEvent::NewBlock(v) + } } #[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct Attributes { - pub height: Height, - pub client_id: ClientId, - pub client_type: ClientType, - pub consensus_height: Height, + pub height: Height, + pub client_id: ClientId, + pub client_type: String, + pub consensus_height: Height, } +#[cfg(not(test))] impl Default for Attributes { - fn default() -> Self { - Attributes { - height: Height::default(), - client_id: Default::default(), - client_type: ClientType::Tendermint, - consensus_height: Height::default(), - } - } + fn default() -> Self { + Attributes { + height: Height::default(), + client_id: Default::default(), + client_type: "00-uninitialized".to_owned(), + consensus_height: Height::default(), + } + } } /// Convert attributes to Tendermint ABCI tags @@ -148,39 +74,35 @@ impl Default for Attributes { /// Once tendermint-rs improves the API of the `Key` and `Value` types, /// we will be able to remove the `.parse().unwrap()` calls. impl From for Vec { - fn from(a: Attributes) -> Self { - let height = EventAttribute { - key: HEIGHT_ATTRIBUTE_KEY.parse().unwrap(), - value: a.height.to_string().parse().unwrap(), - index: false, - }; - let client_id = EventAttribute { - key: CLIENT_ID_ATTRIBUTE_KEY.parse().unwrap(), - value: a.client_id.to_string().parse().unwrap(), - index: false, - }; - let client_type = EventAttribute { - key: CLIENT_TYPE_ATTRIBUTE_KEY.parse().unwrap(), - value: a.client_type.as_str().parse().unwrap(), - index: false, - }; - let consensus_height = EventAttribute { - key: CONSENSUS_HEIGHT_ATTRIBUTE_KEY.parse().unwrap(), - value: a.height.to_string().parse().unwrap(), - index: false, - }; - vec![height, client_id, client_type, consensus_height] - } + fn from(a: Attributes) -> Self { + let height = EventAttribute { + key: HEIGHT_ATTRIBUTE_KEY.parse().unwrap(), + value: a.height.to_string().parse().unwrap(), + index: false, + }; + let client_id = EventAttribute { + key: CLIENT_ID_ATTRIBUTE_KEY.parse().unwrap(), + value: a.client_id.to_string().parse().unwrap(), + index: false, + }; + let client_type = EventAttribute { + key: CLIENT_TYPE_ATTRIBUTE_KEY.parse().unwrap(), + value: a.client_type.to_owned(), + index: false, + }; + let consensus_height = EventAttribute { + key: CONSENSUS_HEIGHT_ATTRIBUTE_KEY.parse().unwrap(), + value: a.height.to_string().parse().unwrap(), + index: false, + }; + vec![height, client_id, client_type, consensus_height] + } } impl core::fmt::Display for Attributes { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { - write!( - f, - "h: {}, cs_h: {}({})", - self.height, self.client_id, self.consensus_height - ) - } + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { + write!(f, "h: {}, cs_h: {}({})", self.height, self.client_id, self.consensus_height) + } } /// CreateClient event signals the creation of a new on-chain client (IBC client). @@ -188,116 +110,87 @@ impl core::fmt::Display for Attributes { pub struct CreateClient(pub Attributes); impl CreateClient { - pub fn client_id(&self) -> &ClientId { - &self.0.client_id - } - pub fn height(&self) -> Height { - self.0.height - } - pub fn set_height(&mut self, height: Height) { - self.0.height = height; - } + pub fn client_id(&self) -> &ClientId { + &self.0.client_id + } + pub fn height(&self) -> Height { + self.0.height + } + pub fn set_height(&mut self, height: Height) { + self.0.height = height; + } } impl From for CreateClient { - fn from(attrs: Attributes) -> Self { - CreateClient(attrs) - } + fn from(attrs: Attributes) -> Self { + CreateClient(attrs) + } } impl From for IbcEvent { - fn from(v: CreateClient) -> Self { - IbcEvent::CreateClient(v) - } -} - -impl From for AbciEvent { - fn from(v: CreateClient) -> Self { - let attributes = Vec::::from(v.0); - AbciEvent { - kind: IbcEventType::CreateClient.as_str().to_string(), - attributes, - } - } + fn from(v: CreateClient) -> Self { + IbcEvent::CreateClient(v) + } } impl core::fmt::Display for CreateClient { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { - write!(f, "{}", self.0) - } + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { + write!(f, "{}", self.0) + } } /// UpdateClient event signals a recent update of an on-chain client (IBC Client). +// TODO: use generic header type #[derive(Deserialize, Serialize, Clone, PartialEq, Eq)] pub struct UpdateClient { - pub common: Attributes, - pub header: Option, + pub common: Attributes, + pub header: Option>, } impl UpdateClient { - pub fn client_id(&self) -> &ClientId { - &self.common.client_id - } - pub fn client_type(&self) -> ClientType { - self.common.client_type - } - - pub fn height(&self) -> Height { - self.common.height - } - - pub fn set_height(&mut self, height: Height) { - self.common.height = height; - } - - pub fn consensus_height(&self) -> Height { - self.common.consensus_height - } + pub fn client_id(&self) -> &ClientId { + &self.common.client_id + } + + pub fn client_type(&self) -> &str { + &self.common.client_type + } + + pub fn height(&self) -> Height { + self.common.height + } + + pub fn set_height(&mut self, height: Height) { + self.common.height = height; + } + + pub fn consensus_height(&self) -> Height { + self.common.consensus_height + } } impl From for UpdateClient { - fn from(attrs: Attributes) -> Self { - UpdateClient { - common: attrs, - header: None, - } - } + fn from(attrs: Attributes) -> Self { + UpdateClient { common: attrs, header: None } + } } impl From for IbcEvent { - fn from(v: UpdateClient) -> Self { - IbcEvent::UpdateClient(v) - } -} - -impl From for AbciEvent { - fn from(v: UpdateClient) -> Self { - let mut attributes = Vec::::from(v.common); - if let Some(h) = v.header { - let header = EventAttribute { - key: HEADER_ATTRIBUTE_KEY.parse().unwrap(), - value: h.encode_to_string().parse().unwrap(), - index: false, - }; - attributes.push(header); - } - AbciEvent { - kind: IbcEventType::UpdateClient.as_str().to_string(), - attributes, - } - } + fn from(v: UpdateClient) -> Self { + IbcEvent::UpdateClient(v) + } } impl core::fmt::Display for UpdateClient { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { - write!(f, "{}", self.common) - } + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { + write!(f, "{}", self.common) + } } impl core::fmt::Debug for UpdateClient { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - write!(f, "{}", self.common) - } + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!(f, "{}", self.common) + } } /// ClientMisbehaviour event signals the update of an on-chain client (IBC Client) with evidence of @@ -306,37 +199,27 @@ impl core::fmt::Debug for UpdateClient { pub struct ClientMisbehaviour(pub Attributes); impl ClientMisbehaviour { - pub fn client_id(&self) -> &ClientId { - &self.0.client_id - } - pub fn height(&self) -> Height { - self.0.height - } - pub fn set_height(&mut self, height: Height) { - self.0.height = height; - } + pub fn client_id(&self) -> &ClientId { + &self.0.client_id + } + pub fn height(&self) -> Height { + self.0.height + } + pub fn set_height(&mut self, height: Height) { + self.0.height = height; + } } impl From for ClientMisbehaviour { - fn from(attrs: Attributes) -> Self { - ClientMisbehaviour(attrs) - } + fn from(attrs: Attributes) -> Self { + ClientMisbehaviour(attrs) + } } impl From for IbcEvent { - fn from(v: ClientMisbehaviour) -> Self { - IbcEvent::ClientMisbehaviour(v) - } -} - -impl From for AbciEvent { - fn from(v: ClientMisbehaviour) -> Self { - let attributes = Vec::::from(v.0); - AbciEvent { - kind: IbcEventType::ClientMisbehaviour.as_str().to_string(), - attributes, - } - } + fn from(v: ClientMisbehaviour) -> Self { + IbcEvent::ClientMisbehaviour(v) + } } /// Signals a recent upgrade of an on-chain client (IBC Client). @@ -344,74 +227,19 @@ impl From for AbciEvent { pub struct UpgradeClient(pub Attributes); impl UpgradeClient { - pub fn set_height(&mut self, height: Height) { - self.0.height = height; - } - pub fn height(&self) -> Height { - self.0.height - } - pub fn client_id(&self) -> &ClientId { - &self.0.client_id - } + pub fn set_height(&mut self, height: Height) { + self.0.height = height; + } + pub fn height(&self) -> Height { + self.0.height + } + pub fn client_id(&self) -> &ClientId { + &self.0.client_id + } } impl From for UpgradeClient { - fn from(attrs: Attributes) -> Self { - UpgradeClient(attrs) - } -} - -impl From for AbciEvent { - fn from(v: UpgradeClient) -> Self { - let attributes = Vec::::from(v.0); - AbciEvent { - kind: IbcEventType::UpgradeClient.as_str().to_string(), - attributes, - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::core::ics02_client::header::Header; - use crate::mock::header::MockHeader; - - #[test] - fn client_event_to_abci_event() { - let height = Height::new(1, 1); - let attributes = Attributes { - height, - client_id: "test_client".parse().unwrap(), - client_type: ClientType::Tendermint, - consensus_height: height, - }; - let mut abci_events = vec![]; - let create_client = CreateClient::from(attributes.clone()); - abci_events.push(AbciEvent::from(create_client.clone())); - let client_misbehaviour = ClientMisbehaviour::from(attributes.clone()); - abci_events.push(AbciEvent::from(client_misbehaviour.clone())); - let upgrade_client = UpgradeClient::from(attributes.clone()); - abci_events.push(AbciEvent::from(upgrade_client.clone())); - let mut update_client = UpdateClient::from(attributes); - let header = MockHeader::new(height).wrap_any(); - update_client.header = Some(header); - abci_events.push(AbciEvent::from(update_client.clone())); - - for event in abci_events { - match try_from_tx(&event) { - Some(e) => match e { - IbcEvent::CreateClient(e) => assert_eq!(e.0, create_client.0), - IbcEvent::ClientMisbehaviour(e) => assert_eq!(e.0, client_misbehaviour.0), - IbcEvent::UpgradeClient(e) => assert_eq!(e.0, upgrade_client.0), - IbcEvent::UpdateClient(e) => { - assert_eq!(e.common, update_client.common); - assert_eq!(e.header, update_client.header); - } - _ => panic!("unexpected event type"), - }, - None => panic!("converted event was wrong"), - } - } - } + fn from(attrs: Attributes) -> Self { + UpgradeClient(attrs) + } } diff --git a/modules/src/core/ics02_client/handler.rs b/modules/src/core/ics02_client/handler.rs index 455bd51d3d..d5d3c42693 100644 --- a/modules/src/core/ics02_client/handler.rs +++ b/modules/src/core/ics02_client/handler.rs @@ -1,9 +1,12 @@ //! This module implements the processing logic for ICS2 (client abstractions and functions) msgs. -use crate::clients::host_functions::HostFunctionsProvider; -use crate::core::ics02_client::error::Error; -use crate::core::ics02_client::msgs::ClientMsg; -use crate::core::ics26_routing::context::ReaderContext; -use crate::handler::HandlerOutput; + +use crate::{ + core::{ + ics02_client::{context::ClientKeeper, error::Error, msgs::ClientMsg}, + ics26_routing::context::ReaderContext, + }, + handler::HandlerOutput, +}; use core::fmt::Debug; pub mod create_client; @@ -11,27 +14,26 @@ pub mod update_client; pub mod upgrade_client; #[derive(Clone, Debug, PartialEq, Eq)] -pub enum ClientResult { - Create(create_client::Result), - Update(update_client::Result), - Upgrade(upgrade_client::Result), +pub enum ClientResult { + Create(create_client::Result), + Update(update_client::Result), + Upgrade(upgrade_client::Result), } /// General entry point for processing any message related to ICS2 (client functions) protocols. -pub fn dispatch( - ctx: &Ctx, - msg: ClientMsg, -) -> Result, Error> +pub fn dispatch( + ctx: &Ctx, + msg: ClientMsg, +) -> Result>, Error> where - Ctx: ReaderContext, - HostFunctions: HostFunctionsProvider, + Ctx: ReaderContext, { - match msg { - ClientMsg::CreateClient(msg) => create_client::process(ctx, msg), - ClientMsg::UpdateClient(msg) => update_client::process::(ctx, msg), - ClientMsg::UpgradeClient(msg) => upgrade_client::process::(ctx, msg), - _ => { - unimplemented!() - } - } + match msg { + ClientMsg::CreateClient(msg) => create_client::process::<_>(ctx, msg), + ClientMsg::UpdateClient(msg) => update_client::process::<_>(ctx, msg), + ClientMsg::UpgradeClient(msg) => upgrade_client::process::<_>(ctx, msg), + _ => { + unimplemented!() + }, + } } diff --git a/modules/src/core/ics02_client/handler/create_client.rs b/modules/src/core/ics02_client/handler/create_client.rs index 94e5d79b2c..457d7a4726 100644 --- a/modules/src/core/ics02_client/handler/create_client.rs +++ b/modules/src/core/ics02_client/handler/create_client.rs @@ -1,290 +1,209 @@ //! Protocol logic specific to processing ICS2 messages of type `MsgCreateAnyClient`. -use crate::core::ics26_routing::context::ReaderContext; -use crate::prelude::*; +use crate::{core::ics26_routing::context::ReaderContext, prelude::*}; use core::fmt::Debug; -use crate::core::ics02_client::client_consensus::AnyConsensusState; -use crate::core::ics02_client::client_state::AnyClientState; -use crate::core::ics02_client::client_type::ClientType; -use crate::core::ics02_client::error::Error; -use crate::core::ics02_client::events::Attributes; -use crate::core::ics02_client::handler::ClientResult; -use crate::core::ics02_client::height::Height; -use crate::core::ics02_client::msgs::create_client::MsgCreateAnyClient; -use crate::core::ics24_host::identifier::ClientId; -use crate::events::IbcEvent; -use crate::handler::{HandlerOutput, HandlerResult}; -use crate::timestamp::Timestamp; +use crate::core::ics02_client::client_state::ClientState; + +use crate::{ + core::{ + ics02_client::{ + context::ClientKeeper, error::Error, events::Attributes, handler::ClientResult, + height::Height, msgs::create_client::MsgCreateAnyClient, + }, + ics24_host::identifier::ClientId, + }, + events::IbcEvent, + handler::{HandlerOutput, HandlerResult}, + timestamp::Timestamp, +}; /// The result following the successful processing of a `MsgCreateAnyClient` message. Preferably /// this data type should be used with a qualified name `create_client::Result` to avoid ambiguity. #[derive(Clone, Debug, PartialEq, Eq)] -pub struct Result { - pub client_id: ClientId, - pub client_type: ClientType, - pub client_state: AnyClientState, - pub consensus_state: AnyConsensusState, - pub processed_time: Timestamp, - pub processed_height: Height, +pub struct Result { + pub client_id: ClientId, + pub client_type: &'static str, + pub client_state: C::AnyClientState, + pub consensus_state: C::AnyConsensusState, + pub processed_time: Timestamp, + pub processed_height: Height, } -pub fn process( - ctx: &dyn ReaderContext, - msg: MsgCreateAnyClient, -) -> HandlerResult { - let mut output = HandlerOutput::builder(); - - // Construct this client's identifier - let id_counter = ctx.client_counter()?; - let client_id = ClientId::new(msg.client_state.client_type(), id_counter).map_err(|e| { - Error::client_identifier_constructor(msg.client_state.client_type(), id_counter, e) - })?; - - output.log(format!( - "success: generated new client identifier: {}", - client_id - )); - - let result = ClientResult::Create(Result { - client_id: client_id.clone(), - client_type: msg.client_state.client_type(), - client_state: msg.client_state.clone(), - consensus_state: msg.consensus_state, - processed_time: ctx.host_timestamp(), - processed_height: ctx.host_height(), - }); - - let event_attributes = Attributes { - client_id, - height: ctx.host_height(), - client_type: msg.client_state.client_type(), - consensus_height: msg.client_state.latest_height(), - }; - output.emit(IbcEvent::CreateClient(event_attributes.into())); - - Ok(output.with_result(result)) +pub fn process( + ctx: &Ctx, + msg: MsgCreateAnyClient, +) -> HandlerResult, Error> +where + Ctx: ReaderContext + Eq + Debug + Clone, +{ + let mut output = HandlerOutput::builder(); + + // Construct this client's identifier + let id_counter = ctx.client_counter()?; + let client_type = msg.client_state.client_type(); + let client_id = ClientId::new(client_type, id_counter) + .map_err(|e| Error::client_identifier_constructor(client_type.to_owned(), id_counter, e))?; + + output.log(format!("success: generated new client identifier: {}", client_id)); + + let result = ClientResult::Create(Result { + client_id: client_id.clone(), + client_type: msg.client_state.client_type(), + client_state: msg.client_state.clone(), + consensus_state: msg.consensus_state, + processed_time: ctx.host_timestamp(), + processed_height: ctx.host_height(), + }); + + let event_attributes = Attributes { + client_id, + height: ctx.host_height(), + client_type: msg.client_state.client_type().to_owned(), + consensus_height: msg.client_state.latest_height(), + }; + output.emit(IbcEvent::CreateClient(event_attributes.into())); + + Ok(output.with_result(result)) } #[cfg(test)] mod tests { - use crate::prelude::*; - - use core::time::Duration; - use test_log::test; - - use crate::clients::ics07_tendermint::client_state::ClientState as TendermintClientState; - use crate::clients::ics07_tendermint::header::test_util::get_dummy_tendermint_header; - use crate::core::ics02_client::client_consensus::AnyConsensusState; - use crate::core::ics02_client::client_state::ClientState; - use crate::core::ics02_client::client_type::ClientType; - use crate::core::ics02_client::context::ClientReader; - use crate::core::ics02_client::handler::{dispatch, ClientResult}; - use crate::core::ics02_client::msgs::create_client::MsgCreateAnyClient; - use crate::core::ics02_client::msgs::ClientMsg; - use crate::core::ics02_client::trust_threshold::TrustThreshold; - use crate::core::ics23_commitment::specs::ProofSpecs; - use crate::core::ics24_host::identifier::ClientId; - use crate::events::IbcEvent; - use crate::handler::HandlerOutput; - use crate::mock::client_state::{MockClientState, MockConsensusState}; - use crate::mock::context::MockContext; - use crate::mock::header::MockHeader; - use crate::test_utils::{get_dummy_account_id, Crypto}; - use crate::Height; - - #[test] - fn test_create_client_ok() { - let ctx = MockContext::default(); - let signer = get_dummy_account_id(); - let height = Height::new(0, 42); - - let msg = MsgCreateAnyClient::new( - MockClientState::new(MockHeader::new(height)).into(), - MockConsensusState::new(MockHeader::new(height)).into(), - signer, - ) - .unwrap(); - - let output = dispatch::<_, Crypto>(&ctx, ClientMsg::CreateClient(msg.clone())); - - match output { - Ok(HandlerOutput { - result, mut events, .. - }) => { - assert_eq!(events.len(), 1); - let event = events.pop().unwrap(); - let expected_client_id = ClientId::new(ClientType::Mock, 0).unwrap(); - assert!( - matches!(event, IbcEvent::CreateClient(ref e) if e.client_id() == &expected_client_id) - ); - assert_eq!(event.height(), ctx.host_height()); - match result { - ClientResult::Create(create_result) => { - assert_eq!(create_result.client_type, ClientType::Mock); - assert_eq!(create_result.client_id, expected_client_id); - assert_eq!(create_result.client_state, msg.client_state); - assert_eq!(create_result.consensus_state, msg.consensus_state); - } - _ => { - panic!("unexpected result type: expected ClientResult::CreateResult!"); - } - } - } - Err(err) => { - panic!("unexpected error: {}", err); - } - } - } - - #[test] - fn test_create_client_ok_multiple() { - let existing_client_id = ClientId::default(); - let signer = get_dummy_account_id(); - let height = Height::new(0, 80); - - let ctx = MockContext::default().with_client(&existing_client_id, height); - - let create_client_msgs: Vec = vec![ - MsgCreateAnyClient::new( - MockClientState::new(MockHeader::new(Height { - revision_height: 42, - ..height - })) - .into(), - MockConsensusState::new(MockHeader::new(Height { - revision_height: 42, - ..height - })) - .into(), - signer.clone(), - ) - .unwrap(), - MsgCreateAnyClient::new( - MockClientState::new(MockHeader::new(Height { - revision_height: 42, - ..height - })) - .into(), - MockConsensusState::new(MockHeader::new(Height { - revision_height: 42, - ..height - })) - .into(), - signer.clone(), - ) - .unwrap(), - MsgCreateAnyClient::new( - MockClientState::new(MockHeader::new(Height { - revision_height: 50, - ..height - })) - .into(), - MockConsensusState::new(MockHeader::new(Height { - revision_height: 50, - ..height - })) - .into(), - signer, - ) - .unwrap(), - ] - .into_iter() - .collect(); - - // The expected client id that will be generated will be identical to "9999-mock-0" for all - // tests. This is because we're not persisting any client results (which is done via the - // tests for `ics26_routing::dispatch`. - let expected_client_id = ClientId::new(ClientType::Mock, 0).unwrap(); - - for msg in create_client_msgs { - let output = dispatch::<_, Crypto>(&ctx, ClientMsg::CreateClient(msg.clone())); - - match output { - Ok(HandlerOutput { - result, mut events, .. - }) => { - assert_eq!(events.len(), 1); - let event = events.pop().unwrap(); - assert!( - matches!(event, IbcEvent::CreateClient(ref e) if e.client_id() == &expected_client_id) - ); - assert_eq!(event.height(), ctx.host_height()); - match result { - ClientResult::Create(create_res) => { - assert_eq!(create_res.client_type, msg.client_state.client_type()); - assert_eq!(create_res.client_id, expected_client_id); - assert_eq!(create_res.client_state, msg.client_state); - assert_eq!(create_res.consensus_state, msg.consensus_state); - } - _ => { - panic!("expected result of type ClientResult::CreateResult"); - } - } - } - Err(err) => { - panic!("unexpected error: {}", err); - } - } - } - } - - #[test] - fn test_tm_create_client_ok() { - let signer = get_dummy_account_id(); - - let ctx = MockContext::default(); - - let tm_header = get_dummy_tendermint_header(); - - let tm_client_state = TendermintClientState::new( - tm_header.chain_id.clone().into(), - TrustThreshold::ONE_THIRD, - Duration::from_secs(64000), - Duration::from_secs(128000), - Duration::from_millis(3000), - Height::new(0, u64::from(tm_header.height)), - ProofSpecs::default(), - vec!["".to_string()], - ) - .unwrap() - .wrap_any(); - - let msg = MsgCreateAnyClient::new( - tm_client_state, - AnyConsensusState::Tendermint(tm_header.try_into().unwrap()), - signer, - ) - .unwrap(); - - let output = dispatch::<_, Crypto>(&ctx, ClientMsg::CreateClient(msg.clone())); - - match output { - Ok(HandlerOutput { - result, mut events, .. - }) => { - assert_eq!(events.len(), 1); - let event = events.pop().unwrap(); - let expected_client_id = ClientId::new(ClientType::Tendermint, 0).unwrap(); - assert!( - matches!(event, IbcEvent::CreateClient(ref e) if e.client_id() == &expected_client_id) - ); - assert_eq!(event.height(), ctx.host_height()); - match result { - ClientResult::Create(create_res) => { - assert_eq!(create_res.client_type, ClientType::Tendermint); - assert_eq!(create_res.client_id, expected_client_id); - assert_eq!(create_res.client_state, msg.client_state); - assert_eq!(create_res.consensus_state, msg.consensus_state); - } - _ => { - panic!("expected result of type ClientResult::CreateResult"); - } - } - } - Err(err) => { - panic!("unexpected error: {}", err); - } - } - } + use crate::core::ics02_client::client_state::ClientState; + + use crate::{ + core::{ + ics02_client::{ + context::ClientReader, + handler::{dispatch, ClientResult}, + msgs::{create_client::MsgCreateAnyClient, ClientMsg}, + }, + ics24_host::identifier::ClientId, + }, + events::IbcEvent, + handler::HandlerOutput, + mock::{ + client_state::{MockClientState, MockConsensusState}, + context::{MockClientTypes, MockContext}, + header::MockHeader, + }, + prelude::*, + test_utils::get_dummy_account_id, + Height, + }; + use test_log::test; + + #[test] + fn test_create_client_ok() { + let ctx = MockContext::::default(); + let signer = get_dummy_account_id(); + let height = Height::new(0, 42); + + let msg = MsgCreateAnyClient::new( + MockClientState::new(MockHeader::new(height)).into(), + MockConsensusState::new(MockHeader::new(height)).into(), + signer, + ) + .unwrap(); + + let output = dispatch(&ctx, ClientMsg::CreateClient(msg.clone())); + + match output { + Ok(HandlerOutput { result, mut events, .. }) => { + assert_eq!(events.len(), 1); + let event = events.pop().unwrap(); + let expected_client_id = ClientId::new(MockClientState::client_type(), 0).unwrap(); + assert!( + matches!(event, IbcEvent::CreateClient(ref e) if e.client_id() == &expected_client_id) + ); + assert_eq!(event.height(), ctx.host_height()); + match result { + ClientResult::Create(create_result) => { + assert_eq!(create_result.client_type, MockClientState::client_type()); + assert_eq!(create_result.client_id, expected_client_id); + assert_eq!(create_result.client_state, msg.client_state); + assert_eq!(create_result.consensus_state, msg.consensus_state); + }, + _ => { + panic!("unexpected result type: expected ClientResult::CreateResult!"); + }, + } + }, + Err(err) => { + panic!("unexpected error: {}", err); + }, + } + } + + #[test] + fn test_create_client_ok_multiple() { + let existing_client_id = ClientId::default(); + let signer = get_dummy_account_id(); + let height = Height::new(0, 80); + + let ctx = MockContext::default().with_client(&existing_client_id, height); + + let create_client_msgs: Vec>> = vec![ + MsgCreateAnyClient::new( + MockClientState::new(MockHeader::new(Height { revision_height: 42, ..height })) + .into(), + MockConsensusState::new(MockHeader::new(Height { revision_height: 42, ..height })) + .into(), + signer.clone(), + ) + .unwrap(), + MsgCreateAnyClient::new( + MockClientState::new(MockHeader::new(Height { revision_height: 42, ..height })) + .into(), + MockConsensusState::new(MockHeader::new(Height { revision_height: 42, ..height })) + .into(), + signer.clone(), + ) + .unwrap(), + MsgCreateAnyClient::new( + MockClientState::new(MockHeader::new(Height { revision_height: 50, ..height })) + .into(), + MockConsensusState::new(MockHeader::new(Height { revision_height: 50, ..height })) + .into(), + signer, + ) + .unwrap(), + ] + .into_iter() + .collect(); + + // The expected client id that will be generated will be identical to "9999-mock-0" for all + // tests. This is because we're not persisting any client results (which is done via the + // tests for `ics26_routing::dispatch`. + let expected_client_id = ClientId::new(MockClientState::client_type(), 0).unwrap(); + + for msg in create_client_msgs { + let output = dispatch(&ctx, ClientMsg::CreateClient(msg.clone())); + + match output { + Ok(HandlerOutput { result, mut events, .. }) => { + assert_eq!(events.len(), 1); + let event = events.pop().unwrap(); + assert!( + matches!(event, IbcEvent::CreateClient(ref e) if e.client_id() == &expected_client_id) + ); + assert_eq!(event.height(), ctx.host_height()); + match result { + ClientResult::Create(create_res) => { + assert_eq!(create_res.client_type, msg.client_state.client_type()); + assert_eq!(create_res.client_id, expected_client_id); + assert_eq!(create_res.client_state, msg.client_state); + assert_eq!(create_res.consensus_state, msg.consensus_state); + }, + _ => { + panic!("expected result of type ClientResult::CreateResult"); + }, + } + }, + Err(err) => { + panic!("unexpected error: {}", err); + }, + } + } + } } diff --git a/modules/src/core/ics02_client/handler/update_client.rs b/modules/src/core/ics02_client/handler/update_client.rs index 9612148b16..77a39d460e 100644 --- a/modules/src/core/ics02_client/handler/update_client.rs +++ b/modules/src/core/ics02_client/handler/update_client.rs @@ -1,851 +1,267 @@ //! Protocol logic specific to processing ICS2 messages of type `MsgUpdateAnyClient`. use core::fmt::Debug; -use crate::clients::host_functions::HostFunctionsProvider; -use crate::core::ics02_client::client_def::{AnyClient, ClientDef, ConsensusUpdateResult}; -use crate::core::ics02_client::client_state::{AnyClientState, ClientState}; -use crate::core::ics02_client::error::Error; -use crate::core::ics02_client::events::Attributes; -use crate::core::ics02_client::handler::ClientResult; -use crate::core::ics02_client::height::Height; -use crate::core::ics02_client::msgs::update_client::MsgUpdateAnyClient; -use crate::core::ics24_host::identifier::ClientId; -use crate::core::ics26_routing::context::ReaderContext; -use crate::events::IbcEvent; -use crate::handler::{HandlerOutput, HandlerResult}; -use crate::prelude::*; -use crate::timestamp::Timestamp; +use crate::{ + core::{ + ics02_client::{ + client_consensus::ConsensusState, + client_def::{ClientDef, ConsensusUpdateResult}, + client_state::ClientState, + context::ClientKeeper, + error::Error, + events::Attributes, + handler::ClientResult, + height::Height, + msgs::update_client::MsgUpdateAnyClient, + }, + ics24_host::identifier::ClientId, + ics26_routing::context::ReaderContext, + }, + events::IbcEvent, + handler::{HandlerOutput, HandlerResult}, + prelude::*, + timestamp::Timestamp, +}; /// The result following the successful processing of a `MsgUpdateAnyClient` message. Preferably /// this data type should be used with a qualified name `update_client::Result` to avoid ambiguity. #[derive(Clone, Debug, PartialEq, Eq)] -pub struct Result { - pub client_id: ClientId, - pub client_state: AnyClientState, - pub consensus_state: Option, - pub processed_time: Timestamp, - pub processed_height: Height, +pub struct Result { + pub client_id: ClientId, + pub client_state: C::AnyClientState, + pub consensus_state: Option>, + pub processed_time: Timestamp, + pub processed_height: Height, } -pub fn process( - ctx: &dyn ReaderContext, - msg: MsgUpdateAnyClient, -) -> HandlerResult { - let mut output = HandlerOutput::builder(); - - let MsgUpdateAnyClient { - client_id, - header, - signer: _, - } = msg; - - // Read client type from the host chain store. The client should already exist. - let client_type = ctx.client_type(&client_id)?; - - let client_def = AnyClient::::from_client_type(client_type); - - // Read client state from the host chain store. - let client_state = ctx.client_state(&client_id)?; - - if client_state.is_frozen() { - return Err(Error::client_frozen(client_id)); - } - - // Read consensus state from the host chain store. - let latest_consensus_state = ctx - .consensus_state(&client_id, client_state.latest_height()) - .map_err(|_| { - Error::consensus_state_not_found(client_id.clone(), client_state.latest_height()) - })?; - - tracing::debug!("latest consensus state: {:?}", latest_consensus_state); - - let now = ctx.host_timestamp(); - let duration = now - .duration_since(&latest_consensus_state.timestamp()) - .ok_or_else(|| { - Error::invalid_consensus_state_timestamp(latest_consensus_state.timestamp(), now) - })?; - - if client_state.expired(duration) { - return Err(Error::header_not_within_trust_period( - latest_consensus_state.timestamp(), - now, - )); - } - - client_def - .verify_header(ctx, client_id.clone(), client_state.clone(), header.clone()) - .map_err(|e| Error::header_verification_failure(e.to_string()))?; - - let found_misbehaviour = client_def - .check_for_misbehaviour(ctx, client_id.clone(), client_state.clone(), header.clone()) - .map_err(|e| Error::header_verification_failure(e.to_string()))?; - - let event_attributes = Attributes { - client_id: client_id.clone(), - height: ctx.host_height(), - client_type, - consensus_height: client_state.latest_height(), - }; - - if found_misbehaviour { - let client_state = client_def.update_state_on_misbehaviour(client_state, header)?; - let result = ClientResult::Update(Result { - client_id, - client_state, - consensus_state: None, - processed_time: ctx.host_timestamp(), - processed_height: ctx.host_height(), - }); - output.emit(IbcEvent::ClientMisbehaviour(event_attributes.into())); - return Ok(output.with_result(result)); - } - // Use client_state to validate the new header against the latest consensus_state. - // This function will return the new client_state (its latest_height changed) and a - // consensus_state obtained from header. These will be later persisted by the keeper. - let (new_client_state, new_consensus_state) = client_def - .update_state(ctx, client_id.clone(), client_state, header) - .map_err(|e| Error::header_verification_failure(e.to_string()))?; - - let result = ClientResult::Update(Result { - client_id, - client_state: new_client_state, - consensus_state: Some(new_consensus_state), - processed_time: ctx.host_timestamp(), - processed_height: ctx.host_height(), - }); - - output.emit(IbcEvent::UpdateClient(event_attributes.into())); - - Ok(output.with_result(result)) +pub fn process( + ctx: &Ctx, + msg: MsgUpdateAnyClient, +) -> HandlerResult, Error> +where + Ctx: ReaderContext, +{ + let mut output = HandlerOutput::builder(); + + let MsgUpdateAnyClient { client_id, header, signer: _ } = msg; + + // Read client type from the host chain store. The client should already exist. + let client_type = ctx.client_type(&client_id)?; + + // Read client state from the host chain store. + let client_state = ctx.client_state(&client_id)?; + + let client_def = client_state.client_def(); + + if client_state.is_frozen() { + return Err(Error::client_frozen(client_id)) + } + + // Read consensus state from the host chain store. + let latest_consensus_state = + ctx.consensus_state(&client_id, client_state.latest_height()).map_err(|_| { + Error::consensus_state_not_found(client_id.clone(), client_state.latest_height()) + })?; + + tracing::debug!("latest consensus state: {:?}", latest_consensus_state); + + let now = ctx.host_timestamp(); + let duration = now.duration_since(&latest_consensus_state.timestamp()).ok_or_else(|| { + Error::invalid_consensus_state_timestamp(latest_consensus_state.timestamp(), now) + })?; + + if client_state.expired(duration) { + return Err(Error::header_not_within_trust_period(latest_consensus_state.timestamp(), now)) + } + + client_def + .verify_header::(ctx, client_id.clone(), client_state.clone(), header.clone()) + .map_err(|e| Error::header_verification_failure(e.to_string()))?; + + let found_misbehaviour = client_def + .check_for_misbehaviour(ctx, client_id.clone(), client_state.clone(), header.clone()) + .map_err(|e| Error::header_verification_failure(e.to_string()))?; + + let event_attributes = Attributes { + client_id: client_id.clone(), + height: ctx.host_height(), + client_type: client_type.to_owned(), + consensus_height: client_state.latest_height(), + }; + + if found_misbehaviour { + let client_state = client_def.update_state_on_misbehaviour(client_state, header)?; + let result = ClientResult::Update(Result { + client_id, + client_state, + consensus_state: None, + processed_time: ctx.host_timestamp(), + processed_height: ctx.host_height(), + }); + output.emit(IbcEvent::ClientMisbehaviour(event_attributes.into())); + return Ok(output.with_result(result)) + } + // Use client_state to validate the new header against the latest consensus_state. + // This function will return the new client_state (its latest_height changed) and a + // consensus_state obtained from header. These will be later persisted by the keeper. + let (new_client_state, new_consensus_state) = client_def + .update_state(ctx, client_id.clone(), client_state, header) + .map_err(|e| Error::header_verification_failure(e.to_string()))?; + + let result = ClientResult::::Update(Result { + client_id, + client_state: new_client_state, + consensus_state: Some(new_consensus_state), + processed_time: ctx.host_timestamp(), + processed_height: ctx.host_height(), + }); + + output.emit(IbcEvent::UpdateClient(event_attributes.into())); + + Ok(output.with_result(result)) } #[cfg(test)] mod tests { - use crate::clients::ics11_beefy::header::ParachainHeadersWithProof; - use core::str::FromStr; - use subxt::sp_runtime::traits::Header; - use test_log::test; - - use crate::core::ics02_client::client_consensus::{AnyConsensusState, ConsensusState}; - use crate::core::ics02_client::client_state::{AnyClientState, ClientState}; - use crate::core::ics02_client::client_type::ClientType; - use crate::core::ics02_client::context::{ClientKeeper, ClientReader}; - use crate::core::ics02_client::error::{Error, ErrorDetail}; - use crate::core::ics02_client::handler::dispatch; - use crate::core::ics02_client::handler::ClientResult::Update; - use crate::core::ics02_client::header::AnyHeader; - use crate::core::ics02_client::msgs::create_client::MsgCreateAnyClient; - use crate::core::ics02_client::msgs::update_client::MsgUpdateAnyClient; - use crate::core::ics02_client::msgs::ClientMsg; - use crate::core::ics24_host::identifier::{ChainId, ClientId}; - use crate::events::IbcEvent; - use crate::handler::HandlerOutput; - use crate::mock::client_state::MockClientState; - use crate::mock::context::MockContext; - use crate::mock::header::MockHeader; - use crate::mock::host::HostType; - use crate::prelude::*; - use crate::test_utils::{get_dummy_account_id, Crypto}; - use crate::timestamp::Timestamp; - use crate::Height; - - #[test] - fn test_update_client_ok() { - let client_id = ClientId::default(); - let signer = get_dummy_account_id(); - - let timestamp = Timestamp::now(); - - let ctx = MockContext::default().with_client(&client_id, Height::new(0, 42)); - let msg = MsgUpdateAnyClient { - client_id: client_id.clone(), - header: MockHeader::new(Height::new(0, 46)) - .with_timestamp(timestamp) - .into(), - signer, - }; - - let output = dispatch::<_, Crypto>(&ctx, ClientMsg::UpdateClient(msg.clone())); - - match output { - Ok(HandlerOutput { - result, - mut events, - log, - }) => { - assert_eq!(events.len(), 1); - let event = events.pop().unwrap(); - assert!( - matches!(event, IbcEvent::UpdateClient(ref e) if e.client_id() == &msg.client_id) - ); - assert_eq!(event.height(), ctx.host_height()); - assert!(log.is_empty()); - // Check the result - match result { - Update(upd_res) => { - assert_eq!(upd_res.client_id, client_id); - assert_eq!( - upd_res.client_state, - AnyClientState::Mock(MockClientState::new( - MockHeader::new(msg.header.height()).with_timestamp(timestamp) - )) - ) - } - _ => panic!("update handler result has incorrect type"), - } - } - Err(err) => { - panic!("unexpected error: {}", err); - } - } - } - - #[test] - fn test_update_nonexisting_client() { - let client_id = ClientId::from_str("mockclient1").unwrap(); - let signer = get_dummy_account_id(); - - let ctx = MockContext::default().with_client(&client_id, Height::new(0, 42)); - - let msg = MsgUpdateAnyClient { - client_id: ClientId::from_str("nonexistingclient").unwrap(), - header: MockHeader::new(Height::new(0, 46)).into(), - signer, - }; - - let output = dispatch::<_, Crypto>(&ctx, ClientMsg::UpdateClient(msg.clone())); - - match output { - Err(Error(ErrorDetail::ClientNotFound(e), _)) => { - assert_eq!(e.client_id, msg.client_id); - } - _ => { - panic!("expected ClientNotFound error, instead got {:?}", output) - } - } - } - - #[test] - fn test_update_client_ok_multiple() { - let client_ids = vec![ - ClientId::from_str("mockclient1").unwrap(), - ClientId::from_str("mockclient2").unwrap(), - ClientId::from_str("mockclient3").unwrap(), - ]; - let signer = get_dummy_account_id(); - let initial_height = Height::new(0, 45); - let update_height = Height::new(0, 49); - - let mut ctx = MockContext::default(); - - for cid in &client_ids { - ctx = ctx.with_client(cid, initial_height); - } - - for cid in &client_ids { - let msg = MsgUpdateAnyClient { - client_id: cid.clone(), - header: MockHeader::new(update_height).into(), - signer: signer.clone(), - }; - - let output = dispatch::<_, Crypto>(&ctx, ClientMsg::UpdateClient(msg.clone())); - - match output { - Ok(HandlerOutput { - result: _, - mut events, - log, - }) => { - assert_eq!(events.len(), 1); - let event = events.pop().unwrap(); - assert!( - matches!(event, IbcEvent::UpdateClient(ref e) if e.client_id() == &msg.client_id) - ); - assert_eq!(event.height(), ctx.host_height()); - assert!(log.is_empty()); - } - Err(err) => { - panic!("unexpected error: {}", err); - } - } - } - } - - #[test] - fn test_update_synthetic_tendermint_client_adjacent_ok() { - let client_id = ClientId::new(ClientType::Tendermint, 0).unwrap(); - let client_height = Height::new(1, 20); - let update_height = Height::new(1, 21); - - let ctx = MockContext::new( - ChainId::new("mockgaiaA".to_string(), 1), - HostType::Mock, - 5, - Height::new(1, 1), - ) - .with_client_parametrized( - &client_id, - client_height, - Some(ClientType::Tendermint), // The target host chain (B) is synthetic TM. - Some(client_height), - ); - - let ctx_b = MockContext::new( - ChainId::new("mockgaiaB".to_string(), 1), - HostType::SyntheticTendermint, - 5, - update_height, - ); - - let signer = get_dummy_account_id(); - - let block_ref = ctx_b.host_block(update_height); - let mut latest_header: AnyHeader = block_ref.cloned().map(Into::into).unwrap(); - - latest_header = match latest_header { - AnyHeader::Tendermint(mut theader) => { - theader.trusted_height = client_height; - AnyHeader::Tendermint(theader) - } - AnyHeader::Beefy(h) => AnyHeader::Beefy(h), - AnyHeader::Mock(m) => AnyHeader::Mock(m), - }; - - let msg = MsgUpdateAnyClient { - client_id: client_id.clone(), - header: latest_header, - signer, - }; - - let output = dispatch::<_, Crypto>(&ctx, ClientMsg::UpdateClient(msg.clone())); - - match output { - Ok(HandlerOutput { - result, - mut events, - log, - }) => { - assert_eq!(events.len(), 1); - let event = events.pop().unwrap(); - assert!( - matches!(event, IbcEvent::UpdateClient(ref e) if e.client_id() == &msg.client_id) - ); - assert_eq!(event.height(), ctx.host_height()); - assert!(log.is_empty()); - // Check the result - match result { - Update(upd_res) => { - assert_eq!(upd_res.client_id, client_id); - assert!(!upd_res.client_state.is_frozen()); - assert_eq!(upd_res.client_state.latest_height(), msg.header.height(),) - } - _ => panic!("update handler result has incorrect type"), - } - } - Err(err) => { - panic!("unexpected error: {}", err); - } - } - } - - #[test] - fn test_update_synthetic_tendermint_client_non_adjacent_ok() { - let client_id = ClientId::new(ClientType::Tendermint, 0).unwrap(); - let client_height = Height::new(1, 20); - let update_height = Height::new(1, 21); - - let ctx = MockContext::new( - ChainId::new("mockgaiaA".to_string(), 1), - HostType::Mock, - 5, - Height::new(1, 1), - ) - .with_client_parametrized_history( - &client_id, - client_height, - Some(ClientType::Tendermint), // The target host chain (B) is synthetic TM. - Some(client_height), - ); - - let ctx_b = MockContext::new( - ChainId::new("mockgaiaB".to_string(), 1), - HostType::SyntheticTendermint, - 5, - update_height, - ); - - let signer = get_dummy_account_id(); - - let block_ref = ctx_b.host_block(update_height); - let mut latest_header: AnyHeader = block_ref.cloned().map(Into::into).unwrap(); - - let trusted_height = client_height.clone().sub(1).unwrap_or_default(); - - latest_header = match latest_header { - AnyHeader::Tendermint(mut theader) => { - theader.trusted_height = trusted_height; - AnyHeader::Tendermint(theader) - } - AnyHeader::Beefy(h) => AnyHeader::Beefy(h), - AnyHeader::Mock(m) => AnyHeader::Mock(m), - }; - - let msg = MsgUpdateAnyClient { - client_id: client_id.clone(), - header: latest_header, - signer, - }; - - let output = dispatch::<_, Crypto>(&ctx, ClientMsg::UpdateClient(msg.clone())); - - match output { - Ok(HandlerOutput { - result, - mut events, - log, - }) => { - assert_eq!(events.len(), 1); - let event = events.pop().unwrap(); - assert!( - matches!(event, IbcEvent::UpdateClient(ref e) if e.client_id() == &msg.client_id) - ); - assert_eq!(event.height(), ctx.host_height()); - assert!(log.is_empty()); - // Check the result - match result { - Update(upd_res) => { - assert_eq!(upd_res.client_id, client_id); - assert!(!upd_res.client_state.is_frozen()); - assert_eq!(upd_res.client_state.latest_height(), msg.header.height(),) - } - _ => panic!("update handler result has incorrect type"), - } - } - Err(err) => { - panic!("unexpected error: {}", err); - } - } - } - - #[test] - fn test_update_synthetic_tendermint_client_duplicate_ok() { - let client_id = ClientId::new(ClientType::Tendermint, 0).unwrap(); - let client_height = Height::new(1, 20); - - let chain_start_height = Height::new(1, 11); - - let ctx = MockContext::new( - ChainId::new("mockgaiaA".to_string(), 1), - HostType::Mock, - 5, - chain_start_height, - ) - .with_client_parametrized( - &client_id, - client_height, - Some(ClientType::Tendermint), // The target host chain (B) is synthetic TM. - Some(client_height), - ); - - let ctx_b = MockContext::new( - ChainId::new("mockgaiaB".to_string(), 1), - HostType::SyntheticTendermint, - 5, - client_height, - ); - - let signer = get_dummy_account_id(); - - let block_ref = ctx_b.host_block(client_height); - let latest_header: AnyHeader = match block_ref.cloned().map(Into::into).unwrap() { - AnyHeader::Tendermint(mut theader) => { - let cons_state = ctx.latest_consensus_states(&client_id, &client_height); - if let AnyConsensusState::Tendermint(tcs) = cons_state { - theader.signed_header.header.time = tcs.timestamp; - theader.trusted_height = Height::new(1, 11) - } - AnyHeader::Tendermint(theader) - } - AnyHeader::Beefy(h) => AnyHeader::Beefy(h), - AnyHeader::Mock(header) => AnyHeader::Mock(header), - }; - - let msg = MsgUpdateAnyClient { - client_id: client_id.clone(), - header: latest_header, - signer, - }; - - let output = dispatch::<_, Crypto>(&ctx, ClientMsg::UpdateClient(msg.clone())); - - match output { - Ok(HandlerOutput { - result, - mut events, - log, - }) => { - assert_eq!(events.len(), 1); - let event = events.pop().unwrap(); - assert!( - matches!(event, IbcEvent::UpdateClient(ref e) if e.client_id() == &msg.client_id) - ); - assert_eq!(event.height(), ctx.host_height()); - assert!(log.is_empty()); - // Check the result - match result { - Update(upd_res) => { - assert_eq!(upd_res.client_id, client_id); - assert!(!upd_res.client_state.is_frozen()); - assert_eq!(upd_res.client_state, ctx.latest_client_states(&client_id)); - assert_eq!(upd_res.client_state.latest_height(), msg.header.height(),) - } - _ => panic!("update handler result has incorrect type"), - } - } - Err(err) => { - panic!("unexpected error: {:?}", err); - } - } - } - - #[test] - fn test_update_synthetic_tendermint_client_lower_height() { - let client_id = ClientId::new(ClientType::Tendermint, 0).unwrap(); - let client_height = Height::new(1, 20); - - let client_update_height = Height::new(1, 19); - - let chain_start_height = Height::new(1, 11); - - let ctx = MockContext::new( - ChainId::new("mockgaiaA".to_string(), 1), - HostType::Mock, - 5, - chain_start_height, - ) - .with_client_parametrized( - &client_id, - client_height, - Some(ClientType::Tendermint), // The target host chain (B) is synthetic TM. - Some(client_height), - ); - - let ctx_b = MockContext::new( - ChainId::new("mockgaiaB".to_string(), 1), - HostType::SyntheticTendermint, - 5, - client_height, - ); - - let signer = get_dummy_account_id(); - - let block_ref = ctx_b.host_block(client_update_height); - let latest_header: AnyHeader = block_ref.cloned().map(Into::into).unwrap(); - - let msg = MsgUpdateAnyClient { - client_id, - header: latest_header, - signer, - }; - - let output = dispatch::<_, Crypto>(&ctx, ClientMsg::UpdateClient(msg)); - - match output { - Ok(_) => { - panic!("update handler result has incorrect type"); - } - Err(err) => match err.detail() { - ErrorDetail::HeaderVerificationFailure(_) => {} - _ => panic!("unexpected error: {:?}", err), - }, - } - } - - #[cfg(feature = "ics11_beefy")] - #[tokio::test] - async fn test_continuous_update_of_beefy_client() { - use crate::clients::ics11_beefy::client_state::ClientState as BeefyClientState; - use crate::clients::ics11_beefy::consensus_state::ConsensusState; - use crate::clients::ics11_beefy::header::BeefyHeader; - use crate::clients::ics11_beefy::header::ParachainHeader as BeefyParachainHeader; - use beefy_client_primitives::NodesUtils; - use beefy_client_primitives::PartialMmrLeaf; - use beefy_queries::runtime; - use beefy_queries::{ - helpers::{fetch_timestamp_extrinsic_with_proof, TimeStampExtWithProof}, - ClientWrapper, - }; - use codec::{Decode, Encode}; - use subxt::rpc::{rpc_params, JsonValue, Subscription, SubscriptionClientT}; - - let client_id = ClientId::new(ClientType::Beefy, 0).unwrap(); - - let chain_start_height = Height::new(1, 11); - - let mut ctx = MockContext::new( - ChainId::new("mockgaiaA".to_string(), 1), - HostType::Beefy, - 5, - chain_start_height, - ); - - let signer = get_dummy_account_id(); - - let url = std::env::var("NODE_ENDPOINT").unwrap_or("ws://127.0.0.1:9944".to_string()); - let client = subxt::ClientBuilder::new() - .set_url(url) - .build::() - .await - .unwrap(); - - let para_url = std::env::var("NODE_ENDPOINT").unwrap_or("ws://127.0.0.1:9988".to_string()); - let para_client = subxt::ClientBuilder::new() - .set_url(para_url) - .build::() - .await - .unwrap(); - let client_wrapper = ClientWrapper { - relay_client: client.clone(), - para_client: para_client.clone(), - beefy_activation_block: 0, - para_id: 2001, - }; - - let mut count = 0; - let (client_state, consensus_state) = loop { - let beefy_state = client_wrapper - .construct_beefy_client_state(0) - .await - .unwrap(); - - let api = - client_wrapper - .relay_client - .clone() - .to_runtime_api::, - >>(); - let subxt_block_number: subxt::BlockNumber = beefy_state.latest_beefy_height.into(); - let block_hash = client_wrapper - .relay_client - .rpc() - .block_hash(Some(subxt_block_number)) - .await - .unwrap(); - let head_data = api - .storage() - .paras() - .heads( - &runtime::api::runtime_types::polkadot_parachain::primitives::Id( - client_wrapper.para_id, - ), - block_hash, - ) - .await - .unwrap() - .unwrap(); - let decoded_para_head = frame_support::sp_runtime::generic::Header::< - u32, - frame_support::sp_runtime::traits::BlakeTwo256, - >::decode(&mut &*head_data.0) - .unwrap(); - let block_number = decoded_para_head.number; - let client_state = BeefyClientState { - chain_id: ChainId::new("relay-chain".to_string(), 0), - relay_chain: Default::default(), - mmr_root_hash: beefy_state.mmr_root_hash, - latest_beefy_height: beefy_state.latest_beefy_height, - frozen_height: None, - beefy_activation_block: beefy_state.beefy_activation_block, - latest_para_height: block_number, - para_id: client_wrapper.para_id, - authority: beefy_state.current_authorities, - next_authority_set: beefy_state.next_authorities, - }; - // we can't use the genesis block to construct the initial state. - if block_number == 0 { - continue; - } - let subxt_block_number: subxt::BlockNumber = block_number.into(); - let block_hash = client_wrapper - .para_client - .rpc() - .block_hash(Some(subxt_block_number)) - .await - .unwrap(); - - let TimeStampExtWithProof { - ext: timestamp_extrinsic, - proof: extrinsic_proof, - } = fetch_timestamp_extrinsic_with_proof(&client_wrapper.para_client, block_hash) - .await - .unwrap(); - let parachain_header = BeefyParachainHeader { - parachain_header: decoded_para_head, - partial_mmr_leaf: PartialMmrLeaf { - version: Default::default(), - parent_number_and_hash: Default::default(), - beefy_next_authority_set: Default::default(), - }, - parachain_heads_proof: vec![], - heads_leaf_index: 0, - heads_total_count: 0, - extrinsic_proof, - timestamp_extrinsic, - }; - - let consensus_state = ConsensusState::from_header(parachain_header) - .unwrap() - .wrap_any(); - - break (client_state.wrap_any(), consensus_state); - }; - - let create_client = MsgCreateAnyClient { - client_state, - consensus_state, - signer: signer.clone(), - }; - - // Create the client - let res = dispatch::<_, Crypto>(&ctx, ClientMsg::CreateClient(create_client)).unwrap(); - ctx.store_client_result(res.result).unwrap(); - let mut subscription: Subscription = client - .rpc() - .client - .subscribe( - "beefy_subscribeJustifications", - rpc_params![], - "beefy_unsubscribeJustifications", - ) - .await - .unwrap(); - - while let Some(Ok(commitment)) = subscription.next().await { - if count == 100 { - break; - } - let recv_commitment: sp_core::Bytes = - serde_json::from_value(JsonValue::String(commitment)).unwrap(); - let signed_commitment: beefy_primitives::SignedCommitment< - u32, - beefy_primitives::crypto::Signature, - > = codec::Decode::decode(&mut &*recv_commitment).unwrap(); - let client_state: BeefyClientState = match ctx.client_state(&client_id).unwrap() { - AnyClientState::Beefy(client_state) => client_state, - _ => panic!("unexpected client state"), - }; - match signed_commitment.commitment.validator_set_id { - id if id < client_state.authority.id => { - // If validator set id of signed commitment is less than current validator set id we have - // Then commitment is outdated and we skip it. - println!( - "Skipping outdated commitment \n Received signed commitmment with validator_set_id: {:?}\n Current authority set id: {:?}\n Next authority set id: {:?}\n", - signed_commitment.commitment.validator_set_id, client_state.authority.id, client_state.next_authority_set.id - ); - continue; - } - _ => {} - } - - println!( - "Received signed commitmment for: {:?}", - signed_commitment.commitment.block_number - ); - - let block_number = signed_commitment.commitment.block_number; - let headers = client_wrapper - .query_finalized_parachain_headers_at( - block_number, - client_state.latest_beefy_height, - ) - .await - .unwrap(); - let (parachain_headers, batch_proof) = client_wrapper - .query_finalized_parachain_headers_with_proof( - block_number, - client_state.latest_beefy_height, - headers.iter().map(|h| *h.number()).collect(), - ) - .await - .unwrap(); - - let mmr_update = client_wrapper - .fetch_mmr_update_proof_for(signed_commitment.clone()) - .await - .unwrap(); - - let mmr_size = NodesUtils::new(batch_proof.leaf_count).size(); - - let header = BeefyHeader { - headers_with_proof: Some(ParachainHeadersWithProof { - headers: parachain_headers - .into_iter() - .map(|header| BeefyParachainHeader { - parachain_header: Decode::decode( - &mut &*header.parachain_header.as_slice(), - ) - .unwrap(), - partial_mmr_leaf: header.partial_mmr_leaf, - parachain_heads_proof: header.parachain_heads_proof, - heads_leaf_index: header.heads_leaf_index, - heads_total_count: header.heads_total_count, - extrinsic_proof: header.extrinsic_proof, - timestamp_extrinsic: header.timestamp_extrinsic, - }) - .collect(), - mmr_proofs: batch_proof - .items - .into_iter() - .map(|item| item.encode()) - .collect(), - mmr_size, - }), - mmr_update_proof: Some(mmr_update), - }; - - let msg = MsgUpdateAnyClient { - client_id: client_id.clone(), - header: AnyHeader::Beefy(header), - signer: signer.clone(), - }; - - let res = dispatch::<_, Crypto>(&ctx, ClientMsg::UpdateClient(msg.clone())); - - match res { - Ok(HandlerOutput { - result, - mut events, - log, - }) => { - assert_eq!(events.len(), 1); - let event = events.pop().unwrap(); - assert!( - matches!(event, IbcEvent::UpdateClient(ref e) if e.client_id() == &msg.client_id) - ); - assert_eq!(event.height(), ctx.host_height()); - assert!(log.is_empty()); - ctx.store_client_result(result.clone()).unwrap(); - match result { - Update(upd_res) => { - assert_eq!(upd_res.client_id, client_id); - assert!(!upd_res.client_state.is_frozen()); - assert_eq!( - upd_res.client_state, - ctx.latest_client_states(&client_id).clone() - ); - } - _ => panic!("update handler result has incorrect type"), - } - } - Err(e) => panic!("Unexpected error {:?}", e), - } - println!("Updated client successfully"); - count += 1; - } - } + use core::str::FromStr; + use test_log::test; + + use crate::{ + core::{ + ics02_client::{ + context::ClientReader, + error::{Error, ErrorDetail}, + handler::{dispatch, ClientResult::Update}, + header::Header, + msgs::{update_client::MsgUpdateAnyClient, ClientMsg}, + }, + ics24_host::identifier::ClientId, + }, + events::IbcEvent, + handler::HandlerOutput, + mock::{ + client_state::{AnyClientState, MockClientState}, + context::{MockClientTypes, MockContext}, + header::MockHeader, + }, + prelude::*, + test_utils::get_dummy_account_id, + timestamp::Timestamp, + Height, + }; + + #[test] + fn test_update_client_ok() { + let client_id = ClientId::default(); + let signer = get_dummy_account_id(); + + let timestamp = Timestamp::now(); + + let ctx = + MockContext::::default().with_client(&client_id, Height::new(0, 42)); + let msg = MsgUpdateAnyClient { + client_id: client_id.clone(), + header: MockHeader::new(Height::new(0, 46)).with_timestamp(timestamp).into(), + signer, + }; + + let output = dispatch(&ctx, ClientMsg::UpdateClient(msg.clone())); + + match output { + Ok(HandlerOutput { result, mut events, log }) => { + assert_eq!(events.len(), 1); + let event = events.pop().unwrap(); + assert!( + matches!(event, IbcEvent::UpdateClient(ref e) if e.client_id() == &msg.client_id) + ); + assert_eq!(event.height(), ctx.host_height()); + assert!(log.is_empty()); + // Check the result + match result { + Update(upd_res) => { + assert_eq!(upd_res.client_id, client_id); + assert_eq!( + upd_res.client_state, + AnyClientState::Mock(MockClientState::new( + MockHeader::new(msg.header.height()).with_timestamp(timestamp) + )) + ) + }, + _ => panic!("update handler result has incorrect type"), + } + }, + Err(err) => { + panic!("unexpected error: {}", err); + }, + } + } + + #[test] + fn test_update_nonexisting_client() { + let client_id = ClientId::from_str("mockclient1").unwrap(); + let signer = get_dummy_account_id(); + + let ctx = + MockContext::::default().with_client(&client_id, Height::new(0, 42)); + + let msg = MsgUpdateAnyClient { + client_id: ClientId::from_str("nonexistingclient").unwrap(), + header: MockHeader::new(Height::new(0, 46)).into(), + signer, + }; + + let output = dispatch(&ctx, ClientMsg::UpdateClient(msg.clone())); + + match output { + Err(Error(ErrorDetail::ClientNotFound(e), _)) => { + assert_eq!(e.client_id, msg.client_id); + }, + _ => { + panic!("expected ClientNotFound error, instead got {:?}", output) + }, + } + } + + #[test] + fn test_update_client_ok_multiple() { + let client_ids = vec![ + ClientId::from_str("mockclient1").unwrap(), + ClientId::from_str("mockclient2").unwrap(), + ClientId::from_str("mockclient3").unwrap(), + ]; + let signer = get_dummy_account_id(); + let initial_height = Height::new(0, 45); + let update_height = Height::new(0, 49); + + let mut ctx = MockContext::::default(); + + for cid in &client_ids { + ctx = ctx.with_client(cid, initial_height); + } + + for cid in &client_ids { + let msg = MsgUpdateAnyClient { + client_id: cid.clone(), + header: MockHeader::new(update_height).into(), + signer: signer.clone(), + }; + + let output = dispatch(&ctx, ClientMsg::UpdateClient(msg.clone())); + + match output { + Ok(HandlerOutput { result: _, mut events, log }) => { + assert_eq!(events.len(), 1); + let event = events.pop().unwrap(); + assert!( + matches!(event, IbcEvent::UpdateClient(ref e) if e.client_id() == &msg.client_id) + ); + assert_eq!(event.height(), ctx.host_height()); + assert!(log.is_empty()); + }, + Err(err) => { + panic!("unexpected error: {}", err); + }, + } + } + } } diff --git a/modules/src/core/ics02_client/handler/upgrade_client.rs b/modules/src/core/ics02_client/handler/upgrade_client.rs index 5f3ea9785d..0e6d63a636 100644 --- a/modules/src/core/ics02_client/handler/upgrade_client.rs +++ b/modules/src/core/ics02_client/handler/upgrade_client.rs @@ -1,202 +1,218 @@ //! Protocol logic specific to processing ICS2 messages of type `MsgUpgradeAnyClient`. -//! -use crate::clients::host_functions::HostFunctionsProvider; -use crate::core::ics02_client::client_def::{AnyClient, ClientDef, ConsensusUpdateResult}; -use crate::core::ics02_client::client_state::{AnyClientState, ClientState}; -use crate::core::ics02_client::error::Error; -use crate::core::ics02_client::events::Attributes; -use crate::core::ics02_client::handler::ClientResult; -use crate::core::ics02_client::msgs::upgrade_client::MsgUpgradeAnyClient; -use crate::core::ics24_host::identifier::ClientId; -use crate::core::ics26_routing::context::ReaderContext; -use crate::events::IbcEvent; -use crate::handler::{HandlerOutput, HandlerResult}; -use crate::prelude::*; + +use crate::{ + core::{ + ics02_client::{ + client_def::{ClientDef, ConsensusUpdateResult}, + client_state::ClientState, + context::ClientKeeper, + error::Error, + events::Attributes, + handler::ClientResult, + msgs::upgrade_client::MsgUpgradeAnyClient, + }, + ics24_host::identifier::ClientId, + ics26_routing::context::ReaderContext, + }, + events::IbcEvent, + handler::{HandlerOutput, HandlerResult}, + prelude::*, +}; use core::fmt::Debug; /// The result following the successful processing of a `MsgUpgradeAnyClient` message. /// This data type should be used with a qualified name `upgrade_client::Result` to avoid ambiguity. #[derive(Clone, Debug, PartialEq, Eq)] -pub struct Result { - pub client_id: ClientId, - pub client_state: AnyClientState, - pub consensus_state: Option, +pub struct Result { + pub client_id: ClientId, + pub client_state: C::AnyClientState, + pub consensus_state: Option>, } -pub fn process( - ctx: &dyn ReaderContext, - msg: MsgUpgradeAnyClient, -) -> HandlerResult { - let mut output = HandlerOutput::builder(); - let MsgUpgradeAnyClient { client_id, .. } = msg; - - // Read client state from the host chain store. - let client_state = ctx.client_state(&client_id)?; - - if client_state.is_frozen() { - return Err(Error::client_frozen(client_id)); - } - - let upgrade_client_state = msg.client_state.clone(); - - if client_state.latest_height() >= upgrade_client_state.latest_height() { - return Err(Error::low_upgrade_height( - client_state.latest_height(), - upgrade_client_state.latest_height(), - )); - } - - let client_type = ctx.client_type(&client_id)?; - - let client_def = AnyClient::::from_client_type(client_type); - - let (new_client_state, new_consensus_state) = client_def.verify_upgrade_and_update_state( - &upgrade_client_state, - &msg.consensus_state, - msg.proof_upgrade_client.clone(), - msg.proof_upgrade_consensus_state, - )?; - - // Not implemented yet: https://github.com/informalsystems/ibc-rs/issues/722 - // todo!() - let event_attributes = Attributes { - client_id: client_id.clone(), - height: ctx.host_height(), - client_type, - consensus_height: new_client_state.latest_height(), - }; - - let result = ClientResult::Upgrade(Result { - client_id, - client_state: new_client_state, - consensus_state: Some(new_consensus_state), - }); - - output.emit(IbcEvent::UpgradeClient(event_attributes.into())); - Ok(output.with_result(result)) +pub fn process( + ctx: &Ctx, + msg: MsgUpgradeAnyClient, +) -> HandlerResult, Error> +where + Ctx: ReaderContext + Eq + Debug + Clone, +{ + let mut output = HandlerOutput::builder(); + let MsgUpgradeAnyClient { client_id, .. } = msg; + + // Read client state from the host chain store. + let client_state = ctx.client_state(&client_id)?; + + if client_state.is_frozen() { + return Err(Error::client_frozen(client_id)) + } + + let upgrade_client_state = msg.client_state.clone(); + + if client_state.latest_height() >= upgrade_client_state.latest_height() { + return Err(Error::low_upgrade_height( + client_state.latest_height(), + upgrade_client_state.latest_height(), + )) + } + + let client_type = ctx.client_type(&client_id)?; + + let client_def = client_state.client_def(); + + let (new_client_state, new_consensus_state) = client_def + .verify_upgrade_and_update_state::( + &upgrade_client_state, + &msg.consensus_state, + msg.proof_upgrade_client.clone(), + msg.proof_upgrade_consensus_state, + )?; + + // Not implemented yet: https://github.com/informalsystems/ibc-rs/issues/722 + // todo!() + let event_attributes = Attributes { + client_id: client_id.clone(), + height: ctx.host_height(), + client_type: client_type.to_owned(), + consensus_height: new_client_state.latest_height(), + }; + + let result = ClientResult::Upgrade(Result { + client_id, + client_state: new_client_state, + consensus_state: Some(new_consensus_state), + }); + + output.emit(IbcEvent::UpgradeClient(event_attributes.into())); + Ok(output.with_result(result)) } #[cfg(test)] mod tests { - use crate::prelude::*; - - use core::str::FromStr; - - use crate::core::ics02_client::context::ClientReader; - use crate::core::ics02_client::error::{Error, ErrorDetail}; - use crate::core::ics02_client::handler::dispatch; - use crate::core::ics02_client::handler::ClientResult::Upgrade; - use crate::core::ics02_client::msgs::upgrade_client::MsgUpgradeAnyClient; - use crate::core::ics02_client::msgs::ClientMsg; - use crate::core::ics24_host::identifier::ClientId; - use crate::events::IbcEvent; - use crate::handler::HandlerOutput; - use crate::mock::client_state::{MockClientState, MockConsensusState}; - use crate::mock::context::MockContext; - use crate::mock::header::MockHeader; - use crate::test_utils::{get_dummy_account_id, Crypto}; - use crate::Height; - - #[test] - fn test_upgrade_client_ok() { - let client_id = ClientId::default(); - let signer = get_dummy_account_id(); - - let ctx = MockContext::default().with_client(&client_id, Height::new(0, 42)); - - let msg = MsgUpgradeAnyClient { - client_id: client_id.clone(), - client_state: MockClientState::new(MockHeader::new(Height::new(1, 26))).into(), - consensus_state: MockConsensusState::new(MockHeader::new(Height::new(1, 26))).into(), - proof_upgrade_client: Default::default(), - proof_upgrade_consensus_state: Default::default(), - signer, - }; - - let output = dispatch::<_, Crypto>(&ctx, ClientMsg::UpgradeClient(msg.clone())); - - match output { - Ok(HandlerOutput { - result, - mut events, - log, - }) => { - assert_eq!(events.len(), 1); - let event = events.pop().unwrap(); - assert!( - matches!(event, IbcEvent::UpgradeClient(ref e) if e.client_id() == &msg.client_id) - ); - assert_eq!(event.height(), ctx.host_height()); - assert!(log.is_empty()); - // Check the result - match result { - Upgrade(upg_res) => { - assert_eq!(upg_res.client_id, client_id); - assert_eq!(upg_res.client_state, msg.client_state) - } - _ => panic!("upgrade handler result has incorrect type"), - } - } - Err(err) => { - panic!("unexpected error: {}", err); - } - } - } - - #[test] - fn test_upgrade_nonexisting_client() { - let client_id = ClientId::from_str("mockclient1").unwrap(); - let signer = get_dummy_account_id(); - - let ctx = MockContext::default().with_client(&client_id, Height::new(0, 42)); - - let msg = MsgUpgradeAnyClient { - client_id: ClientId::from_str("nonexistingclient").unwrap(), - client_state: MockClientState::new(MockHeader::new(Height::new(1, 26))).into(), - consensus_state: MockConsensusState::new(MockHeader::new(Height::new(1, 26))).into(), - proof_upgrade_client: Default::default(), - proof_upgrade_consensus_state: Default::default(), - signer, - }; - - let output = dispatch::<_, Crypto>(&ctx, ClientMsg::UpgradeClient(msg.clone())); - - match output { - Err(Error(ErrorDetail::ClientNotFound(e), _)) => { - assert_eq!(e.client_id, msg.client_id); - } - _ => { - panic!("expected ClientNotFound error, instead got {:?}", output); - } - } - } - - #[test] - fn test_upgrade_client_low_height() { - let client_id = ClientId::default(); - let signer = get_dummy_account_id(); - - let ctx = MockContext::default().with_client(&client_id, Height::new(0, 42)); - - let msg = MsgUpgradeAnyClient { - client_id, - client_state: MockClientState::new(MockHeader::new(Height::new(0, 26))).into(), - consensus_state: MockConsensusState::new(MockHeader::new(Height::new(0, 26))).into(), - proof_upgrade_client: Default::default(), - proof_upgrade_consensus_state: Default::default(), - signer, - }; - - let output = dispatch::<_, Crypto>(&ctx, ClientMsg::UpgradeClient(msg.clone())); - - match output { - Err(Error(ErrorDetail::LowUpgradeHeight(e), _)) => { - assert_eq!(e.upgraded_height, Height::new(0, 42)); - assert_eq!(e.client_height, msg.client_state.latest_height()); - } - _ => { - panic!("expected LowUpgradeHeight error, instead got {:?}", output); - } - } - } + use crate::prelude::*; + + use core::str::FromStr; + + use crate::{ + core::{ + ics02_client::{ + client_state::ClientState, + context::ClientReader, + error::{Error, ErrorDetail}, + handler::{dispatch, ClientResult::Upgrade}, + msgs::{upgrade_client::MsgUpgradeAnyClient, ClientMsg}, + }, + ics24_host::identifier::ClientId, + }, + events::IbcEvent, + handler::HandlerOutput, + mock::{ + client_state::{MockClientState, MockConsensusState}, + context::{MockClientTypes, MockContext}, + header::MockHeader, + }, + test_utils::get_dummy_account_id, + Height, + }; + + #[test] + fn test_upgrade_client_ok() { + let client_id = ClientId::default(); + let signer = get_dummy_account_id(); + + let ctx = + MockContext::::default().with_client(&client_id, Height::new(0, 42)); + + let msg = MsgUpgradeAnyClient { + client_id: client_id.clone(), + client_state: MockClientState::new(MockHeader::new(Height::new(1, 26))).into(), + consensus_state: MockConsensusState::new(MockHeader::new(Height::new(1, 26))).into(), + proof_upgrade_client: Default::default(), + proof_upgrade_consensus_state: Default::default(), + signer, + }; + + let output = dispatch(&ctx, ClientMsg::UpgradeClient(msg.clone())); + + match output { + Ok(HandlerOutput { result, mut events, log }) => { + assert_eq!(events.len(), 1); + let event = events.pop().unwrap(); + assert!( + matches!(event, IbcEvent::UpgradeClient(ref e) if e.client_id() == &msg.client_id) + ); + assert_eq!(event.height(), ctx.host_height()); + assert!(log.is_empty()); + // Check the result + match result { + Upgrade(upg_res) => { + assert_eq!(upg_res.client_id, client_id); + assert_eq!(upg_res.client_state, msg.client_state) + }, + _ => panic!("upgrade handler result has incorrect type"), + } + }, + Err(err) => { + panic!("unexpected error: {}", err); + }, + } + } + + #[test] + fn test_upgrade_nonexisting_client() { + let client_id = ClientId::from_str("mockclient1").unwrap(); + let signer = get_dummy_account_id(); + + let ctx = + MockContext::::default().with_client(&client_id, Height::new(0, 42)); + + let msg = MsgUpgradeAnyClient { + client_id: ClientId::from_str("nonexistingclient").unwrap(), + client_state: MockClientState::new(MockHeader::new(Height::new(1, 26))).into(), + consensus_state: MockConsensusState::new(MockHeader::new(Height::new(1, 26))).into(), + proof_upgrade_client: Default::default(), + proof_upgrade_consensus_state: Default::default(), + signer, + }; + + let output = dispatch(&ctx, ClientMsg::UpgradeClient(msg.clone())); + + match output { + Err(Error(ErrorDetail::ClientNotFound(e), _)) => { + assert_eq!(e.client_id, msg.client_id); + }, + _ => { + panic!("expected ClientNotFound error, instead got {:?}", output); + }, + } + } + + #[test] + fn test_upgrade_client_low_height() { + let client_id = ClientId::default(); + let signer = get_dummy_account_id(); + + let ctx = + MockContext::::default().with_client(&client_id, Height::new(0, 42)); + + let msg = MsgUpgradeAnyClient { + client_id, + client_state: MockClientState::new(MockHeader::new(Height::new(0, 26))).into(), + consensus_state: MockConsensusState::new(MockHeader::new(Height::new(0, 26))).into(), + proof_upgrade_client: Default::default(), + proof_upgrade_consensus_state: Default::default(), + signer, + }; + + let output = dispatch(&ctx, ClientMsg::UpgradeClient(msg.clone())); + + match output { + Err(Error(ErrorDetail::LowUpgradeHeight(e), _)) => { + assert_eq!(e.upgraded_height, Height::new(0, 42)); + assert_eq!(e.client_height, msg.client_state.latest_height()); + }, + _ => { + panic!("expected LowUpgradeHeight error, instead got {:?}", output); + }, + } + } } diff --git a/modules/src/core/ics02_client/header.rs b/modules/src/core/ics02_client/header.rs index 88cbddf7da..0809ddc783 100644 --- a/modules/src/core/ics02_client/header.rs +++ b/modules/src/core/ics02_client/header.rs @@ -1,156 +1,23 @@ -use core::ops::Deref; - -use ibc_proto::google::protobuf::Any; -use serde_derive::{Deserialize, Serialize}; -use subtle_encoding::hex; -use tendermint_proto::Protobuf; - -use crate::clients::ics07_tendermint::header::{decode_header, Header as TendermintHeader}; -#[cfg(any(test, feature = "ics11_beefy"))] -use crate::clients::ics11_beefy::header::{decode_header as decode_beefy_header, BeefyHeader}; -// use crate::clients::ics13_near::header::NearHeader; -use crate::core::ics02_client::client_type::ClientType; -use crate::core::ics02_client::error::Error; -#[cfg(any(test, feature = "mocks"))] -use crate::mock::header::MockHeader; -use crate::prelude::*; -use crate::timestamp::Timestamp; -use crate::Height; - -pub const TENDERMINT_HEADER_TYPE_URL: &str = "/ibc.lightclients.tendermint.v1.Header"; -pub const BEEFY_HEADER_TYPE_URL: &str = "/ibc.lightclients.beefy.v1.Header"; -pub const NEAR_HEADER_TYPE_URL: &str = "/ibc.lightclients.near.v1.Header"; -pub const MOCK_HEADER_TYPE_URL: &str = "/ibc.mock.Header"; +use crate::{prelude::*, Height}; /// Abstract of consensus state update information pub trait Header: Clone + core::fmt::Debug + Send + Sync { - /// The type of client (eg. Tendermint) - fn client_type(&self) -> ClientType; - - /// Wrap into an `AnyHeader` - fn wrap_any(self) -> AnyHeader; -} - -#[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] -#[allow(clippy::large_enum_variant)] -pub enum AnyHeader { - Tendermint(TendermintHeader), - #[serde(skip)] - #[cfg(any(test, feature = "ics11_beefy"))] - Beefy(BeefyHeader), - // #[serde(skip)] - // Near(NearHeader), - #[cfg(any(test, feature = "mocks"))] - Mock(MockHeader), -} - -impl AnyHeader { - pub fn height(&self) -> Height { - match self { - Self::Tendermint(header) => header.height(), - #[cfg(any(test, feature = "ics11_beefy"))] - Self::Beefy(_header) => Default::default(), - // Self::Near(_header) => Default::default(), - #[cfg(any(test, feature = "mocks"))] - Self::Mock(header) => header.height(), - } - } - - pub fn timestamp(&self) -> Timestamp { - match self { - Self::Tendermint(header) => header.timestamp(), - #[cfg(any(test, feature = "ics11_beefy"))] - Self::Beefy(_header) => Default::default(), - // Self::Near(_header) => Default::default(), - #[cfg(any(test, feature = "mocks"))] - Self::Mock(header) => header.timestamp(), - } - } -} - -impl Header for AnyHeader { - fn client_type(&self) -> ClientType { - match self { - Self::Tendermint(header) => header.client_type(), - #[cfg(any(test, feature = "ics11_beefy"))] - Self::Beefy(header) => header.client_type(), - // Self::Near(header) => header.client_type(), - #[cfg(any(test, feature = "mocks"))] - Self::Mock(header) => header.client_type(), - } - } - - fn wrap_any(self) -> AnyHeader { - self - } -} - -impl AnyHeader { - pub fn encode_to_string(&self) -> String { - let buf = Protobuf::encode_vec(self); - let encoded = hex::encode(buf); - String::from_utf8(encoded).expect("hex-encoded string should always be valid UTF-8") - } - - pub fn decode_from_string(s: &str) -> Result { - let header_bytes = hex::decode(s).unwrap(); - Protobuf::decode(header_bytes.as_ref()).map_err(Error::invalid_raw_header) - } -} - -impl Protobuf for AnyHeader {} - -impl TryFrom for AnyHeader { - type Error = Error; - - fn try_from(raw: Any) -> Result { - match raw.type_url.as_str() { - TENDERMINT_HEADER_TYPE_URL => { - let val = decode_header(raw.value.deref()).map_err(Error::tendermint)?; - - Ok(AnyHeader::Tendermint(val)) - } - - #[cfg(any(test, feature = "ics11_beefy"))] - BEEFY_HEADER_TYPE_URL => { - let val = decode_beefy_header(&*raw.value).map_err(Error::beefy)?; - Ok(AnyHeader::Beefy(val)) - } - - #[cfg(any(test, feature = "mocks"))] - MOCK_HEADER_TYPE_URL => Ok(AnyHeader::Mock( - MockHeader::decode_vec(&raw.value).map_err(Error::invalid_raw_header)?, - )), - - _ => Err(Error::unknown_header_type(raw.type_url)), - } - } -} - -impl From for Any { - fn from(value: AnyHeader) -> Self { - match value { - AnyHeader::Tendermint(header) => Any { - type_url: TENDERMINT_HEADER_TYPE_URL.to_string(), - value: header.encode_vec(), - }, - - #[cfg(any(test, feature = "ics11_beefy"))] - AnyHeader::Beefy(header) => Any { - type_url: BEEFY_HEADER_TYPE_URL.to_string(), - value: header.encode_vec(), - }, - // AnyHeader::Near(header) => Any { - // type_url: NEAR_HEADER_TYPE_URL.to_string(), - // value: header - // .encode_vec() - // .expect("encodign to `Any` from AnyHeader::Near"), - // }, - #[cfg(any(test, feature = "mocks"))] - AnyHeader::Mock(header) => Any { - type_url: MOCK_HEADER_TYPE_URL.to_string(), - value: header.encode_vec(), - }, - } - } + fn downcast(self) -> Option + where + Self: 'static, + { + ::downcast_ref(&self).cloned() + } + + fn wrap(sub_state: &dyn core::any::Any) -> Option + where + Self: 'static, + { + sub_state.downcast_ref::().cloned() + } + + fn encode_to_vec(&self) -> Vec; + + /// The height of the header + fn height(&self) -> Height; } diff --git a/modules/src/core/ics02_client/height.rs b/modules/src/core/ics02_client/height.rs index e7feae2028..6b6794db68 100644 --- a/modules/src/core/ics02_client/height.rs +++ b/modules/src/core/ics02_client/height.rs @@ -1,8 +1,7 @@ use crate::prelude::*; use core::cmp::Ordering; -use core::num::ParseIntError; -use core::str::FromStr; +use core::{num::ParseIntError, str::FromStr}; use flex_error::{define_error, TraceError}; use serde_derive::{Deserialize, Serialize}; @@ -14,169 +13,157 @@ use crate::core::ics02_client::error::Error; #[derive(Copy, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] pub struct Height { - /// Previously known as "epoch" - pub revision_number: u64, + /// Previously known as "epoch" + pub revision_number: u64, - /// The height of a block - pub revision_height: u64, + /// The height of a block + pub revision_height: u64, } impl Height { - pub fn new(revision_number: u64, revision_height: u64) -> Self { - Self { - revision_number, - revision_height, - } - } - - pub fn zero() -> Height { - Self { - revision_number: 0, - revision_height: 0, - } - } - - pub fn is_zero(&self) -> bool { - self.revision_height == 0 - } - - pub fn add(&self, delta: u64) -> Height { - Height { - revision_number: self.revision_number, - revision_height: self.revision_height + delta, - } - } - - pub fn increment(&self) -> Height { - self.add(1) - } - - pub fn sub(&self, delta: u64) -> Result { - if self.revision_height <= delta { - return Err(Error::invalid_height_result()); - } - - Ok(Height { - revision_number: self.revision_number, - revision_height: self.revision_height - delta, - }) - } - - pub fn decrement(&self) -> Result { - self.sub(1) - } - - pub fn with_revision_height(self, revision_height: u64) -> Height { - Height { - revision_height, - ..self - } - } + pub fn new(revision_number: u64, revision_height: u64) -> Self { + Self { revision_number, revision_height } + } + + pub fn zero() -> Height { + Self { revision_number: 0, revision_height: 0 } + } + + pub fn is_zero(&self) -> bool { + self.revision_height == 0 + } + + pub fn add(&self, delta: u64) -> Height { + Height { + revision_number: self.revision_number, + revision_height: self.revision_height + delta, + } + } + + pub fn increment(&self) -> Height { + self.add(1) + } + + pub fn sub(&self, delta: u64) -> Result { + if self.revision_height <= delta { + return Err(Error::invalid_height_result()) + } + + Ok(Height { + revision_number: self.revision_number, + revision_height: self.revision_height - delta, + }) + } + + pub fn decrement(&self) -> Result { + self.sub(1) + } + + pub fn with_revision_height(self, revision_height: u64) -> Height { + Height { revision_height, ..self } + } } impl Default for Height { - fn default() -> Self { - Self::zero() - } + fn default() -> Self { + Self::zero() + } } impl PartialOrd for Height { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } } impl Ord for Height { - fn cmp(&self, other: &Self) -> Ordering { - if self.revision_number < other.revision_number { - Ordering::Less - } else if self.revision_number > other.revision_number { - Ordering::Greater - } else if self.revision_height < other.revision_height { - Ordering::Less - } else if self.revision_height > other.revision_height { - Ordering::Greater - } else { - Ordering::Equal - } - } + fn cmp(&self, other: &Self) -> Ordering { + if self.revision_number < other.revision_number { + Ordering::Less + } else if self.revision_number > other.revision_number { + Ordering::Greater + } else if self.revision_height < other.revision_height { + Ordering::Less + } else if self.revision_height > other.revision_height { + Ordering::Greater + } else { + Ordering::Equal + } + } } impl Protobuf for Height {} impl From for Height { - fn from(raw: RawHeight) -> Self { - Height { - revision_number: raw.revision_number, - revision_height: raw.revision_height, - } - } + fn from(raw: RawHeight) -> Self { + Height { revision_number: raw.revision_number, revision_height: raw.revision_height } + } } impl From for RawHeight { - fn from(ics_height: Height) -> Self { - RawHeight { - revision_number: ics_height.revision_number, - revision_height: ics_height.revision_height, - } - } + fn from(ics_height: Height) -> Self { + RawHeight { + revision_number: ics_height.revision_number, + revision_height: ics_height.revision_height, + } + } } impl core::fmt::Debug for Height { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { - f.debug_struct("Height") - .field("revision", &self.revision_number) - .field("height", &self.revision_height) - .finish() - } + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { + f.debug_struct("Height") + .field("revision", &self.revision_number) + .field("height", &self.revision_height) + .finish() + } } /// Custom debug output to omit the packet data impl core::fmt::Display for Height { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { - write!(f, "{}-{}", self.revision_number, self.revision_height) - } + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { + write!(f, "{}-{}", self.revision_number, self.revision_height) + } } define_error! { - #[derive(Debug, PartialEq, Eq)] - HeightError { - HeightConversion - { height: String } - [ TraceError ] - | e | { - format_args!("cannot convert into a `Height` type from string {0}", - e.height) - }, - } + #[derive(Debug, PartialEq, Eq)] + HeightError { + HeightConversion + { height: String } + [ TraceError ] + | e | { + format_args!("cannot convert into a `Height` type from string {0}", + e.height) + }, + } } impl TryFrom<&str> for Height { - type Error = HeightError; - - fn try_from(value: &str) -> Result { - let split: Vec<&str> = value.split('-').collect(); - Ok(Height { - revision_number: split[0] - .parse::() - .map_err(|e| HeightError::height_conversion(value.to_owned(), e))?, - revision_height: split[1] - .parse::() - .map_err(|e| HeightError::height_conversion(value.to_owned(), e))?, - }) - } + type Error = HeightError; + + fn try_from(value: &str) -> Result { + let split: Vec<&str> = value.split('-').collect(); + Ok(Height { + revision_number: split[0] + .parse::() + .map_err(|e| HeightError::height_conversion(value.to_owned(), e))?, + revision_height: split[1] + .parse::() + .map_err(|e| HeightError::height_conversion(value.to_owned(), e))?, + }) + } } impl From for String { - fn from(height: Height) -> Self { - format!("{}-{}", height.revision_number, height.revision_number) - } + fn from(height: Height) -> Self { + format!("{}-{}", height.revision_number, height.revision_number) + } } impl FromStr for Height { - type Err = HeightError; + type Err = HeightError; - fn from_str(s: &str) -> Result { - Height::try_from(s) - } + fn from_str(s: &str) -> Result { + Height::try_from(s) + } } diff --git a/modules/src/core/ics02_client/misbehaviour.rs b/modules/src/core/ics02_client/misbehaviour.rs index 98a8b55b10..c417ad439b 100644 --- a/modules/src/core/ics02_client/misbehaviour.rs +++ b/modules/src/core/ics02_client/misbehaviour.rs @@ -1,117 +1,25 @@ -use crate::prelude::*; - -use ibc_proto::google::protobuf::Any; -use tendermint_proto::Protobuf; - -use crate::clients::ics07_tendermint::misbehaviour::Misbehaviour as TmMisbehaviour; -use crate::core::ics02_client::error::Error; - -#[cfg(any(test, feature = "mocks"))] -use crate::mock::misbehaviour::Misbehaviour as MockMisbehaviour; - -use crate::core::ics24_host::identifier::ClientId; -use crate::Height; - -use super::header::AnyHeader; - -pub const TENDERMINT_MISBEHAVIOR_TYPE_URL: &str = "/ibc.lightclients.tendermint.v1.Misbehaviour"; - -#[cfg(any(test, feature = "mocks"))] -pub const MOCK_MISBEHAVIOUR_TYPE_URL: &str = "/ibc.mock.Misbehavior"; +use crate::{core::ics24_host::identifier::ClientId, prelude::*, Height}; pub trait Misbehaviour: Clone + core::fmt::Debug + Send + Sync { - /// The type of client (eg. Tendermint) - fn client_id(&self) -> &ClientId; - - /// The height of the consensus state - fn height(&self) -> Height; - - fn wrap_any(self) -> AnyMisbehaviour; -} - -#[derive(Clone, Debug, PartialEq)] // TODO: Add Eq bound once possible -#[allow(clippy::large_enum_variant)] -pub enum AnyMisbehaviour { - Tendermint(TmMisbehaviour), - - #[cfg(any(test, feature = "mocks"))] - Mock(MockMisbehaviour), -} - -impl Misbehaviour for AnyMisbehaviour { - fn client_id(&self) -> &ClientId { - match self { - Self::Tendermint(misbehaviour) => misbehaviour.client_id(), - - #[cfg(any(test, feature = "mocks"))] - Self::Mock(misbehaviour) => misbehaviour.client_id(), - } - } - - fn height(&self) -> Height { - match self { - Self::Tendermint(misbehaviour) => misbehaviour.height(), - - #[cfg(any(test, feature = "mocks"))] - Self::Mock(misbehaviour) => misbehaviour.height(), - } - } - - fn wrap_any(self) -> AnyMisbehaviour { - self - } -} - -impl Protobuf for AnyMisbehaviour {} - -impl TryFrom for AnyMisbehaviour { - type Error = Error; - - fn try_from(raw: Any) -> Result { - match raw.type_url.as_str() { - TENDERMINT_MISBEHAVIOR_TYPE_URL => Ok(AnyMisbehaviour::Tendermint( - TmMisbehaviour::decode_vec(&raw.value).map_err(Error::decode_raw_misbehaviour)?, - )), - - #[cfg(any(test, feature = "mocks"))] - MOCK_MISBEHAVIOUR_TYPE_URL => Ok(AnyMisbehaviour::Mock( - MockMisbehaviour::decode_vec(&raw.value).map_err(Error::decode_raw_misbehaviour)?, - )), - _ => Err(Error::unknown_misbehaviour_type(raw.type_url)), - } - } -} - -impl From for Any { - fn from(value: AnyMisbehaviour) -> Self { - match value { - AnyMisbehaviour::Tendermint(misbehaviour) => Any { - type_url: TENDERMINT_MISBEHAVIOR_TYPE_URL.to_string(), - value: misbehaviour.encode_vec(), - }, - - #[cfg(any(test, feature = "mocks"))] - AnyMisbehaviour::Mock(misbehaviour) => Any { - type_url: MOCK_MISBEHAVIOUR_TYPE_URL.to_string(), - value: misbehaviour.encode_vec(), - }, - } - } -} - -impl core::fmt::Display for AnyMisbehaviour { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { - match self { - AnyMisbehaviour::Tendermint(tm) => write!(f, "{}", tm), - - #[cfg(any(test, feature = "mocks"))] - AnyMisbehaviour::Mock(mock) => write!(f, "{:?}", mock), - } - } -} - -#[derive(Clone, Debug, PartialEq)] -pub struct MisbehaviourEvidence { - pub misbehaviour: AnyMisbehaviour, - pub supporting_headers: Vec, + /// The type of client (eg. Tendermint) + fn client_id(&self) -> &ClientId; + + /// The height of the consensus state + fn height(&self) -> Height; + + fn downcast(self) -> Option + where + Self: 'static, + { + ::downcast_ref(&self).cloned() + } + + fn wrap(sub_state: &dyn core::any::Any) -> Option + where + Self: 'static, + { + sub_state.downcast_ref::().cloned() + } + + fn encode_to_vec(&self) -> Vec; } diff --git a/modules/src/core/ics02_client/msgs.rs b/modules/src/core/ics02_client/msgs.rs index 3131be9beb..3ea8bfc188 100644 --- a/modules/src/core/ics02_client/msgs.rs +++ b/modules/src/core/ics02_client/msgs.rs @@ -4,10 +4,13 @@ //! subsequently calls into the chain-specific (e.g., ICS 07) client handler. See: //! . -use crate::core::ics02_client::msgs::create_client::MsgCreateAnyClient; -use crate::core::ics02_client::msgs::misbehavior::MsgSubmitAnyMisbehaviour; -use crate::core::ics02_client::msgs::update_client::MsgUpdateAnyClient; -use crate::core::ics02_client::msgs::upgrade_client::MsgUpgradeAnyClient; +use crate::core::ics02_client::{ + context::ClientKeeper, + msgs::{ + create_client::MsgCreateAnyClient, misbehavior::MsgSubmitAnyMisbehaviour, + update_client::MsgUpdateAnyClient, upgrade_client::MsgUpgradeAnyClient, + }, +}; pub mod create_client; pub mod misbehavior; @@ -16,9 +19,9 @@ pub mod upgrade_client; #[allow(clippy::large_enum_variant)] #[derive(Clone, Debug)] -pub enum ClientMsg { - CreateClient(MsgCreateAnyClient), - UpdateClient(MsgUpdateAnyClient), - Misbehaviour(MsgSubmitAnyMisbehaviour), - UpgradeClient(MsgUpgradeAnyClient), +pub enum ClientMsg { + CreateClient(MsgCreateAnyClient), + UpdateClient(MsgUpdateAnyClient), + Misbehaviour(MsgSubmitAnyMisbehaviour), + UpgradeClient(MsgUpgradeAnyClient), } diff --git a/modules/src/core/ics02_client/msgs/create_client.rs b/modules/src/core/ics02_client/msgs/create_client.rs index a006b1b8c8..7ebcaba66a 100644 --- a/modules/src/core/ics02_client/msgs/create_client.rs +++ b/modules/src/core/ics02_client/msgs/create_client.rs @@ -1,125 +1,110 @@ //! Definition of domain type message `MsgCreateAnyClient`. use crate::prelude::*; +use core::fmt::Display; +use ibc_proto::google::protobuf::Any; use tendermint_proto::Protobuf; -use ibc_proto::ibc::core::client::v1::MsgCreateClient as RawMsgCreateClient; +use ibc_proto::ibc::core::client::v1::{MsgCreateClient as RawMsgCreateClient, MsgCreateClient}; -use crate::core::ics02_client::client_consensus::AnyConsensusState; -use crate::core::ics02_client::client_state::AnyClientState; -use crate::core::ics02_client::error::Error; -use crate::signer::Signer; -use crate::tx_msg::Msg; +use crate::{ + core::ics02_client::{context::ClientKeeper, error::Error}, + signer::Signer, + tx_msg::Msg, +}; pub const TYPE_URL: &str = "/ibc.core.client.v1.MsgCreateClient"; /// A type of message that triggers the creation of a new on-chain (IBC) client. #[derive(Clone, Debug, PartialEq, Eq)] -pub struct MsgCreateAnyClient { - pub client_state: AnyClientState, - pub consensus_state: AnyConsensusState, - pub signer: Signer, +pub struct MsgCreateAnyClient { + pub client_state: C::AnyClientState, + pub consensus_state: C::AnyConsensusState, + pub signer: Signer, } -impl MsgCreateAnyClient { - pub fn new( - client_state: AnyClientState, - consensus_state: AnyConsensusState, - signer: Signer, - ) -> Result { - if client_state.client_type() != consensus_state.client_type() { - return Err(Error::raw_client_and_consensus_state_types_mismatch( - client_state.client_type(), - consensus_state.client_type(), - )); - } - - Ok(MsgCreateAnyClient { - client_state, - consensus_state, - signer, - }) - } +impl MsgCreateAnyClient { + pub fn new( + client_state: C::AnyClientState, + consensus_state: C::AnyConsensusState, + signer: Signer, + ) -> Result { + // if client_state.client_type() != consensus_state.client_type() { + // return Err(Error::raw_client_and_consensus_state_types_mismatch( + // client_state.client_type(), + // consensus_state.client_type(), + // )); + // } + + Ok(MsgCreateAnyClient { client_state, consensus_state, signer }) + } } -impl Msg for MsgCreateAnyClient { - type ValidationError = crate::core::ics24_host::error::ValidationError; - type Raw = RawMsgCreateClient; - - fn route(&self) -> String { - crate::keys::ROUTER_KEY.to_string() - } - - fn type_url(&self) -> String { - TYPE_URL.to_string() - } +impl Msg for MsgCreateAnyClient +where + C: ClientKeeper + Clone, + Any: From, + Any: From, +{ + type ValidationError = crate::core::ics24_host::error::ValidationError; + type Raw = RawMsgCreateClient; + + fn route(&self) -> String { + crate::keys::ROUTER_KEY.to_string() + } + + fn type_url(&self) -> String { + TYPE_URL.to_string() + } } -impl Protobuf for MsgCreateAnyClient {} - -impl TryFrom for MsgCreateAnyClient { - type Error = Error; - - fn try_from(raw: RawMsgCreateClient) -> Result { - let raw_client_state = raw - .client_state - .ok_or_else(Error::missing_raw_client_state)?; - - let consensus_state = raw - .consensus_state - .and_then(|cs| AnyConsensusState::try_from(cs).ok()) - .ok_or_else(Error::missing_raw_consensus_state)?; - - MsgCreateAnyClient::new( - AnyClientState::try_from(raw_client_state)?, - consensus_state, - raw.signer.parse().map_err(Error::signer)?, - ) - } +impl Protobuf for MsgCreateAnyClient +where + C: ClientKeeper + Clone, + Any: From, + Any: From, + MsgCreateAnyClient: TryFrom, + as TryFrom>::Error: Display, +{ } -impl From for RawMsgCreateClient { - fn from(ics_msg: MsgCreateAnyClient) -> Self { - RawMsgCreateClient { - client_state: Some(ics_msg.client_state.into()), - consensus_state: Some(ics_msg.consensus_state.into()), - signer: ics_msg.signer.to_string(), - } - } +impl TryFrom for MsgCreateAnyClient +where + C: ClientKeeper, + C::AnyClientState: TryFrom, + C::AnyConsensusState: TryFrom, + Error: From<>::Error>, +{ + type Error = Error; + + fn try_from(raw: RawMsgCreateClient) -> Result { + let raw_client_state = raw.client_state.ok_or_else(Error::missing_raw_client_state)?; + + let consensus_state = raw + .consensus_state + .and_then(|cs| C::AnyConsensusState::try_from(cs).ok()) + .ok_or_else(Error::missing_raw_consensus_state)?; + + MsgCreateAnyClient::new( + C::AnyClientState::try_from(raw_client_state)?, + consensus_state, + raw.signer.parse().map_err(Error::signer)?, + ) + } } -#[cfg(test)] -mod tests { - - use test_log::test; - - use ibc_proto::ibc::core::client::v1::MsgCreateClient; - - use crate::clients::ics07_tendermint::client_state::test_util::get_dummy_tendermint_client_state; - use crate::clients::ics07_tendermint::header::test_util::get_dummy_tendermint_header; - use crate::core::ics02_client::client_consensus::AnyConsensusState; - use crate::core::ics02_client::msgs::MsgCreateAnyClient; - use crate::test_utils::get_dummy_account_id; - - #[test] - fn msg_create_client_serialization() { - let signer = get_dummy_account_id(); - - let tm_header = get_dummy_tendermint_header(); - let tm_client_state = get_dummy_tendermint_client_state(tm_header.clone()); - - let msg = MsgCreateAnyClient::new( - tm_client_state, - AnyConsensusState::Tendermint(tm_header.try_into().unwrap()), - signer, - ) - .unwrap(); - - let raw = MsgCreateClient::from(msg.clone()); - let msg_back = MsgCreateAnyClient::try_from(raw.clone()).unwrap(); - let raw_back = MsgCreateClient::from(msg_back.clone()); - assert_eq!(msg, msg_back); - assert_eq!(raw, raw_back); - } +impl From> for RawMsgCreateClient +where + C: ClientKeeper, + Any: From, + Any: From, +{ + fn from(ics_msg: MsgCreateAnyClient) -> Self { + RawMsgCreateClient { + client_state: Some(ics_msg.client_state.into()), + consensus_state: Some(ics_msg.consensus_state.into()), + signer: ics_msg.signer.to_string(), + } + } } diff --git a/modules/src/core/ics02_client/msgs/misbehavior.rs b/modules/src/core/ics02_client/msgs/misbehavior.rs index 648aaf9d2f..432538ea86 100644 --- a/modules/src/core/ics02_client/msgs/misbehavior.rs +++ b/modules/src/core/ics02_client/msgs/misbehavior.rs @@ -1,67 +1,82 @@ use crate::prelude::*; +use core::fmt::Display; -use ibc_proto::ibc::core::client::v1::MsgSubmitMisbehaviour as RawMsgSubmitMisbehaviour; +use crate::core::ics02_client::context::ClientKeeper; +use ibc_proto::{ + google::protobuf::Any, ibc::core::client::v1::MsgSubmitMisbehaviour as RawMsgSubmitMisbehaviour, +}; use tendermint_proto::Protobuf; -use crate::core::ics02_client::error::Error; -use crate::core::ics02_client::misbehaviour::AnyMisbehaviour; -use crate::core::ics24_host::identifier::ClientId; -use crate::signer::Signer; -use crate::tx_msg::Msg; +use crate::{ + core::{ics02_client::error::Error, ics24_host::identifier::ClientId}, + signer::Signer, + tx_msg::Msg, +}; pub const TYPE_URL: &str = "/ibc.core.client.v1.MsgSubmitMisbehaviour"; /// A type of message that submits client misbehaviour proof. #[derive(Clone, Debug, PartialEq)] -pub struct MsgSubmitAnyMisbehaviour { - /// client unique identifier - pub client_id: ClientId, - /// misbehaviour used for freezing the light client - pub misbehaviour: AnyMisbehaviour, - /// signer address - pub signer: Signer, +pub struct MsgSubmitAnyMisbehaviour { + /// client unique identifier + pub client_id: ClientId, + /// misbehaviour used for freezing the light client + pub misbehaviour: C::AnyMisbehaviour, + /// signer address + pub signer: Signer, } -impl Msg for MsgSubmitAnyMisbehaviour { - type ValidationError = crate::core::ics24_host::error::ValidationError; - type Raw = RawMsgSubmitMisbehaviour; +impl Msg for MsgSubmitAnyMisbehaviour +where + Any: From, +{ + type ValidationError = crate::core::ics24_host::error::ValidationError; + type Raw = RawMsgSubmitMisbehaviour; - fn route(&self) -> String { - crate::keys::ROUTER_KEY.to_string() - } + fn route(&self) -> String { + crate::keys::ROUTER_KEY.to_string() + } - fn type_url(&self) -> String { - TYPE_URL.to_string() - } + fn type_url(&self) -> String { + TYPE_URL.to_string() + } } -impl Protobuf for MsgSubmitAnyMisbehaviour {} +impl Protobuf for MsgSubmitAnyMisbehaviour +where + Any: From, + MsgSubmitAnyMisbehaviour: TryFrom, + as TryFrom>::Error: Display, +{ +} -impl TryFrom for MsgSubmitAnyMisbehaviour { - type Error = Error; +impl TryFrom for MsgSubmitAnyMisbehaviour +where + C::AnyMisbehaviour: TryFrom, + Error: From<>::Error>, +{ + type Error = Error; - fn try_from(raw: RawMsgSubmitMisbehaviour) -> Result { - let raw_misbehaviour = raw - .misbehaviour - .ok_or_else(Error::missing_raw_misbehaviour)?; + fn try_from(raw: RawMsgSubmitMisbehaviour) -> Result { + let raw_misbehaviour = raw.misbehaviour.ok_or_else(Error::missing_raw_misbehaviour)?; - Ok(MsgSubmitAnyMisbehaviour { - client_id: raw - .client_id - .parse() - .map_err(Error::invalid_raw_misbehaviour)?, - misbehaviour: AnyMisbehaviour::try_from(raw_misbehaviour)?, - signer: raw.signer.parse().map_err(Error::signer)?, - }) - } + Ok(MsgSubmitAnyMisbehaviour { + client_id: raw.client_id.parse().map_err(Error::invalid_raw_misbehaviour)?, + misbehaviour: C::AnyMisbehaviour::try_from(raw_misbehaviour)?, + signer: raw.signer.parse().map_err(Error::signer)?, + }) + } } -impl From for RawMsgSubmitMisbehaviour { - fn from(ics_msg: MsgSubmitAnyMisbehaviour) -> Self { - RawMsgSubmitMisbehaviour { - client_id: ics_msg.client_id.to_string(), - misbehaviour: Some(ics_msg.misbehaviour.into()), - signer: ics_msg.signer.to_string(), - } - } +impl From> for RawMsgSubmitMisbehaviour +where + Any: From, +{ + fn from(ics_msg: MsgSubmitAnyMisbehaviour) -> Self { + RawMsgSubmitMisbehaviour { + client_id: ics_msg.client_id.to_string(), + misbehaviour: Some(ics_msg.misbehaviour.into()), + signer: ics_msg.signer.to_string(), + } + } } diff --git a/modules/src/core/ics02_client/msgs/update_client.rs b/modules/src/core/ics02_client/msgs/update_client.rs index b7cdc4c53d..6110c3f14d 100644 --- a/modules/src/core/ics02_client/msgs/update_client.rs +++ b/modules/src/core/ics02_client/msgs/update_client.rs @@ -1,105 +1,134 @@ //! Definition of domain type message `MsgUpdateAnyClient`. use crate::prelude::*; +use core::fmt::Display; +use ibc_proto::google::protobuf::Any; use tendermint_proto::Protobuf; -use ibc_proto::ibc::core::client::v1::MsgUpdateClient as RawMsgUpdateClient; +use crate::core::ics02_client::context::ClientKeeper; +use ibc_proto::ibc::core::client::v1::{MsgUpdateClient as RawMsgUpdateClient, MsgUpdateClient}; use crate::core::ics02_client::error::Error; -use crate::core::ics02_client::header::AnyHeader; -use crate::core::ics24_host::error::ValidationError; -use crate::core::ics24_host::identifier::ClientId; -use crate::signer::Signer; -use crate::tx_msg::Msg; + +use crate::{ + core::ics24_host::{error::ValidationError, identifier::ClientId}, + signer::Signer, + tx_msg::Msg, +}; pub const TYPE_URL: &str = "/ibc.core.client.v1.MsgUpdateClient"; /// A type of message that triggers the update of an on-chain (IBC) client with new headers. #[derive(Clone, Debug, PartialEq)] // TODO: Add Eq bound when possible -pub struct MsgUpdateAnyClient { - pub client_id: ClientId, - pub header: AnyHeader, - pub signer: Signer, +pub struct MsgUpdateAnyClient { + pub client_id: ClientId, + pub header: C::AnyHeader, + pub signer: Signer, } -impl MsgUpdateAnyClient { - pub fn new(client_id: ClientId, header: AnyHeader, signer: Signer) -> Self { - MsgUpdateAnyClient { - client_id, - header, - signer, - } - } +impl MsgUpdateAnyClient +where + C: ClientKeeper, +{ + pub fn new(client_id: ClientId, header: C::AnyHeader, signer: Signer) -> Self { + MsgUpdateAnyClient { client_id, header, signer } + } } -impl Msg for MsgUpdateAnyClient { - type ValidationError = ValidationError; - type Raw = RawMsgUpdateClient; - - fn route(&self) -> String { - crate::keys::ROUTER_KEY.to_string() - } - - fn type_url(&self) -> String { - TYPE_URL.to_string() - } +impl Msg for MsgUpdateAnyClient +where + C: ClientKeeper + Clone, + C::AnyHeader: Clone, + Any: From, +{ + type ValidationError = ValidationError; + type Raw = RawMsgUpdateClient; + + fn route(&self) -> String { + crate::keys::ROUTER_KEY.to_string() + } + + fn type_url(&self) -> String { + TYPE_URL.to_string() + } } -impl Protobuf for MsgUpdateAnyClient {} - -impl TryFrom for MsgUpdateAnyClient { - type Error = Error; - - fn try_from(raw: RawMsgUpdateClient) -> Result { - let raw_header = raw.header.ok_or_else(Error::missing_raw_header)?; +impl Protobuf for MsgUpdateAnyClient +where + C: ClientKeeper + Clone, + C::AnyHeader: Clone, + Any: From, + MsgUpdateAnyClient: TryFrom, + as TryFrom>::Error: Display, +{ +} - Ok(MsgUpdateAnyClient { - client_id: raw - .client_id - .parse() - .map_err(Error::invalid_msg_update_client_id)?, - header: AnyHeader::try_from(raw_header)?, - signer: raw.signer.parse().map_err(Error::signer)?, - }) - } +impl TryFrom for MsgUpdateAnyClient +where + C: ClientKeeper, + C::AnyHeader: TryFrom, + Error: From<>::Error>, +{ + type Error = Error; + + fn try_from(raw: RawMsgUpdateClient) -> Result { + let raw_header = raw.header.ok_or_else(Error::missing_raw_header)?; + + Ok(MsgUpdateAnyClient { + client_id: raw.client_id.parse().map_err(Error::invalid_msg_update_client_id)?, + header: C::AnyHeader::try_from(raw_header)?, + signer: raw.signer.parse().map_err(Error::signer)?, + }) + } } -impl From for RawMsgUpdateClient { - fn from(ics_msg: MsgUpdateAnyClient) -> Self { - RawMsgUpdateClient { - client_id: ics_msg.client_id.to_string(), - header: Some(ics_msg.header.into()), - signer: ics_msg.signer.to_string(), - } - } +impl From> for RawMsgUpdateClient +where + C: ClientKeeper, + Any: From, +{ + fn from(ics_msg: MsgUpdateAnyClient) -> Self { + RawMsgUpdateClient { + client_id: ics_msg.client_id.to_string(), + header: Some(ics_msg.header.into()), + signer: ics_msg.signer.to_string(), + } + } } +/* #[cfg(test)] mod tests { - use test_log::test; - - use ibc_proto::ibc::core::client::v1::MsgUpdateClient; - - use crate::clients::ics07_tendermint::header::test_util::get_dummy_ics07_header; - use crate::core::ics02_client::header::AnyHeader; - use crate::core::ics02_client::msgs::MsgUpdateAnyClient; - use crate::core::ics24_host::identifier::ClientId; - use crate::test_utils::get_dummy_account_id; - - #[test] - fn msg_update_client_serialization() { - let client_id: ClientId = "tendermint".parse().unwrap(); - let signer = get_dummy_account_id(); - - let header = get_dummy_ics07_header(); - - let msg = MsgUpdateAnyClient::new(client_id, AnyHeader::Tendermint(header), signer); - let raw = MsgUpdateClient::from(msg.clone()); - let msg_back = MsgUpdateAnyClient::try_from(raw.clone()).unwrap(); - let raw_back = MsgUpdateClient::from(msg_back.clone()); - assert_eq!(msg, msg_back); - assert_eq!(raw, raw_back); - } + use test_log::test; + + use ibc_proto::ibc::core::client::v1::MsgUpdateClient; + + use crate::clients::ics07_tendermint::header::test_util::get_dummy_ics07_header; + use crate::core::ics02_client::header::AnyHeader; + use crate::core::ics02_client::msgs::MsgUpdateAnyClient; + use crate::core::ics24_host::identifier::ClientId; + use crate::mock::context::MockContext; + use crate::test_utils::get_dummy_account_id; + + #[test] + fn msg_update_client_serialization() { + let client_id: ClientId = "tendermint".parse().unwrap(); + let signer = get_dummy_account_id(); + + let header = get_dummy_ics07_header(); + + let msg = MsgUpdateAnyClient::::new( + client_id, + AnyHeader::Tendermint(header), + signer, + ); + let raw = MsgUpdateClient::from(msg.clone()); + let msg_back = MsgUpdateAnyClient::try_from(raw.clone()).unwrap(); + let raw_back = MsgUpdateClient::from(msg_back.clone()); + assert_eq!(msg, msg_back); + assert_eq!(raw, raw_back); + } } + */ diff --git a/modules/src/core/ics02_client/msgs/upgrade_client.rs b/modules/src/core/ics02_client/msgs/upgrade_client.rs index fe5b94347c..ae24fe73e7 100644 --- a/modules/src/core/ics02_client/msgs/upgrade_client.rs +++ b/modules/src/core/ics02_client/msgs/upgrade_client.rs @@ -1,196 +1,221 @@ //! Definition of domain type msg `MsgUpgradeAnyClient`. use crate::prelude::*; +use core::fmt::Display; use core::str::FromStr; +use ibc_proto::google::protobuf::Any; use tendermint_proto::Protobuf; -use ibc_proto::ibc::core::client::v1::MsgUpgradeClient as RawMsgUpgradeClient; +use ibc_proto::ibc::core::client::v1::{MsgUpgradeClient as RawMsgUpgradeClient, MsgUpgradeClient}; -use crate::core::ics02_client::client_consensus::AnyConsensusState; -use crate::core::ics02_client::client_state::AnyClientState; -use crate::core::ics02_client::error::Error; -use crate::core::ics24_host::identifier::ClientId; -use crate::signer::Signer; -use crate::tx_msg::Msg; +use crate::{ + core::{ + ics02_client::{context::ClientKeeper, error::Error}, + ics24_host::identifier::ClientId, + }, + signer::Signer, + tx_msg::Msg, +}; pub(crate) const TYPE_URL: &str = "/ibc.core.client.v1.MsgUpgradeClient"; /// A type of message that triggers the upgrade of an on-chain (IBC) client. #[derive(Clone, Debug, PartialEq)] -pub struct MsgUpgradeAnyClient { - pub client_id: ClientId, - pub client_state: AnyClientState, - pub consensus_state: AnyConsensusState, - pub proof_upgrade_client: Vec, - pub proof_upgrade_consensus_state: Vec, - pub signer: Signer, +pub struct MsgUpgradeAnyClient { + pub client_id: ClientId, + pub client_state: C::AnyClientState, + pub consensus_state: C::AnyConsensusState, + pub proof_upgrade_client: Vec, + pub proof_upgrade_consensus_state: Vec, + pub signer: Signer, } -impl MsgUpgradeAnyClient { - pub fn new( - client_id: ClientId, - client_state: AnyClientState, - consensus_state: AnyConsensusState, - proof_upgrade_client: Vec, - proof_upgrade_consensus_state: Vec, - signer: Signer, - ) -> Self { - MsgUpgradeAnyClient { - client_id, - client_state, - consensus_state, - proof_upgrade_client, - proof_upgrade_consensus_state, - signer, - } - } +impl MsgUpgradeAnyClient { + pub fn new( + client_id: ClientId, + client_state: C::AnyClientState, + consensus_state: C::AnyConsensusState, + proof_upgrade_client: Vec, + proof_upgrade_consensus_state: Vec, + signer: Signer, + ) -> Self { + MsgUpgradeAnyClient { + client_id, + client_state, + consensus_state, + proof_upgrade_client, + proof_upgrade_consensus_state, + signer, + } + } } -impl Msg for MsgUpgradeAnyClient { - type ValidationError = crate::core::ics24_host::error::ValidationError; - type Raw = RawMsgUpgradeClient; - - fn route(&self) -> String { - crate::keys::ROUTER_KEY.to_string() - } +impl Msg for MsgUpgradeAnyClient +where + C: ClientKeeper + Clone, + Any: From, + Any: From, +{ + type ValidationError = crate::core::ics24_host::error::ValidationError; + type Raw = RawMsgUpgradeClient; + + fn route(&self) -> String { + crate::keys::ROUTER_KEY.to_string() + } + + fn type_url(&self) -> String { + TYPE_URL.to_string() + } +} - fn type_url(&self) -> String { - TYPE_URL.to_string() - } +impl Protobuf for MsgUpgradeAnyClient +where + C: ClientKeeper + Clone, + Any: From, + Any: From, + MsgUpgradeAnyClient: TryFrom, + as TryFrom>::Error: Display, +{ } -impl Protobuf for MsgUpgradeAnyClient {} - -impl From for RawMsgUpgradeClient { - fn from(dm_msg: MsgUpgradeAnyClient) -> RawMsgUpgradeClient { - RawMsgUpgradeClient { - client_id: dm_msg.client_id.to_string(), - client_state: Some(dm_msg.client_state.into()), - consensus_state: Some(dm_msg.consensus_state.into()), - proof_upgrade_client: dm_msg.proof_upgrade_client, - proof_upgrade_consensus_state: dm_msg.proof_upgrade_consensus_state, - signer: dm_msg.signer.to_string(), - } - } +impl From> for RawMsgUpgradeClient +where + C: ClientKeeper, + Any: From, + Any: From, +{ + fn from(dm_msg: MsgUpgradeAnyClient) -> RawMsgUpgradeClient { + RawMsgUpgradeClient { + client_id: dm_msg.client_id.to_string(), + client_state: Some(dm_msg.client_state.into()), + consensus_state: Some(dm_msg.consensus_state.into()), + proof_upgrade_client: dm_msg.proof_upgrade_client, + proof_upgrade_consensus_state: dm_msg.proof_upgrade_consensus_state, + signer: dm_msg.signer.to_string(), + } + } } -impl TryFrom for MsgUpgradeAnyClient { - type Error = Error; - - fn try_from(proto_msg: RawMsgUpgradeClient) -> Result { - let raw_client_state = proto_msg - .client_state - .ok_or_else(Error::missing_raw_client_state)?; - - let raw_consensus_state = proto_msg - .consensus_state - .ok_or_else(Error::missing_raw_client_state)?; - - Ok(MsgUpgradeAnyClient { - client_id: ClientId::from_str(&proto_msg.client_id) - .map_err(Error::invalid_client_identifier)?, - client_state: AnyClientState::try_from(raw_client_state)?, - consensus_state: AnyConsensusState::try_from(raw_consensus_state)?, - proof_upgrade_client: proto_msg.proof_upgrade_client, - proof_upgrade_consensus_state: proto_msg.proof_upgrade_consensus_state, - signer: Signer::from_str(proto_msg.signer.as_str()).map_err(Error::signer)?, - }) - } +impl TryFrom for MsgUpgradeAnyClient +where + C: ClientKeeper, + C::AnyClientState: TryFrom, + C::AnyConsensusState: TryFrom, + Error: From<>::Error>, + Error: From<>::Error>, +{ + type Error = Error; + + fn try_from(proto_msg: RawMsgUpgradeClient) -> Result { + let raw_client_state = + proto_msg.client_state.ok_or_else(Error::missing_raw_client_state)?; + + let raw_consensus_state = + proto_msg.consensus_state.ok_or_else(Error::missing_raw_client_state)?; + + Ok(MsgUpgradeAnyClient { + client_id: ClientId::from_str(&proto_msg.client_id) + .map_err(Error::invalid_client_identifier)?, + client_state: C::AnyClientState::try_from(raw_client_state)?, + consensus_state: C::AnyConsensusState::try_from(raw_consensus_state)?, + proof_upgrade_client: proto_msg.proof_upgrade_client, + proof_upgrade_consensus_state: proto_msg.proof_upgrade_consensus_state, + signer: Signer::from_str(proto_msg.signer.as_str()).map_err(Error::signer)?, + }) + } } #[cfg(test)] pub mod test_util { - use ibc_proto::ibc::core::client::v1::MsgUpgradeClient as RawMsgUpgradeClient; - - use crate::{ - core::{ - ics02_client::{ - client_consensus::AnyConsensusState, client_state::AnyClientState, height::Height, - }, - ics24_host::identifier::ClientId, - }, - mock::{ - client_state::{MockClientState, MockConsensusState}, - header::MockHeader, - }, - test_utils::{get_dummy_bech32_account, get_dummy_proof}, - }; - - use super::MsgUpgradeAnyClient; - - /// Extends the implementation with additional helper methods. - impl MsgUpgradeAnyClient { - /// Setter for `client_id`. Amenable to chaining, since it consumes the input message. - pub fn with_client_id(self, client_id: ClientId) -> Self { - MsgUpgradeAnyClient { client_id, ..self } - } - } - - /// Returns a dummy `RawMsgUpgradeClient`, for testing only! - pub fn get_dummy_raw_msg_upgrade_client(height: Height) -> RawMsgUpgradeClient { - RawMsgUpgradeClient { - client_id: "tendermint".parse().unwrap(), - client_state: Some( - AnyClientState::Mock(MockClientState::new(MockHeader::new(height))).into(), - ), - consensus_state: Some( - AnyConsensusState::Mock(MockConsensusState::new(MockHeader::new(height))).into(), - ), - proof_upgrade_client: get_dummy_proof(), - proof_upgrade_consensus_state: get_dummy_proof(), - signer: get_dummy_bech32_account(), - } - } + use ibc_proto::ibc::core::client::v1::MsgUpgradeClient as RawMsgUpgradeClient; + + use crate::{ + core::{ics02_client::height::Height, ics24_host::identifier::ClientId}, + mock::{ + client_state::{ + AnyClientState, AnyConsensusState, MockClientState, MockConsensusState, + }, + context::{MockClientTypes, MockContext}, + header::MockHeader, + }, + test_utils::{get_dummy_bech32_account, get_dummy_proof}, + }; + + use super::MsgUpgradeAnyClient; + + /// Extends the implementation with additional helper methods. + impl MsgUpgradeAnyClient> { + /// Setter for `client_id`. Amenable to chaining, since it consumes the input message. + pub fn with_client_id(self, client_id: ClientId) -> Self { + MsgUpgradeAnyClient { client_id, ..self } + } + } + + /// Returns a dummy `RawMsgUpgradeClient`, for testing only! + pub fn get_dummy_raw_msg_upgrade_client(height: Height) -> RawMsgUpgradeClient { + RawMsgUpgradeClient { + client_id: "tendermint".parse().unwrap(), + client_state: Some( + AnyClientState::Mock(MockClientState::new(MockHeader::new(height))).into(), + ), + consensus_state: Some( + AnyConsensusState::Mock(MockConsensusState::new(MockHeader::new(height))).into(), + ), + proof_upgrade_client: get_dummy_proof(), + proof_upgrade_consensus_state: get_dummy_proof(), + signer: get_dummy_bech32_account(), + } + } } #[cfg(test)] mod tests { - use alloc::vec::Vec; - use ibc_proto::ibc::core::client::v1::MsgUpgradeClient as RawMsgUpgradeClient; - - use crate::{ - core::{ - ics02_client::{ - client_consensus::AnyConsensusState, client_state::AnyClientState, height::Height, - msgs::upgrade_client::MsgUpgradeAnyClient, - }, - ics23_commitment::commitment::test_util::get_dummy_merkle_proof, - ics24_host::identifier::ClientId, - }, - mock::{ - client_state::{MockClientState, MockConsensusState}, - header::MockHeader, - }, - test_utils::get_dummy_account_id, - }; - - #[test] - fn msg_upgrade_client_serialization() { - let client_id: ClientId = "tendermint".parse().unwrap(); - let signer = get_dummy_account_id(); - - let height = Height::new(1, 1); - - let client_state = AnyClientState::Mock(MockClientState::new(MockHeader::new(height))); - let consensus_state = - AnyConsensusState::Mock(MockConsensusState::new(MockHeader::new(height))); - - let proof = get_dummy_merkle_proof(); - let mut proof_buf = Vec::new(); - prost::Message::encode(&proof, &mut proof_buf).unwrap(); - let msg = MsgUpgradeAnyClient::new( - client_id, - client_state, - consensus_state, - proof_buf.clone(), - proof_buf, - signer, - ); - let raw: RawMsgUpgradeClient = RawMsgUpgradeClient::from(msg.clone()); - let msg_back = MsgUpgradeAnyClient::try_from(raw.clone()).unwrap(); - let raw_back: RawMsgUpgradeClient = RawMsgUpgradeClient::from(msg_back.clone()); - assert_eq!(msg, msg_back); - assert_eq!(raw, raw_back); - } + use alloc::vec::Vec; + use ibc_proto::ibc::core::client::v1::MsgUpgradeClient as RawMsgUpgradeClient; + + use crate::{ + core::{ + ics02_client::{height::Height, msgs::upgrade_client::MsgUpgradeAnyClient}, + ics23_commitment::commitment::test_util::get_dummy_merkle_proof, + ics24_host::identifier::ClientId, + }, + mock::{ + client_state::{ + AnyClientState, AnyConsensusState, MockClientState, MockConsensusState, + }, + context::{MockClientTypes, MockContext}, + header::MockHeader, + }, + test_utils::get_dummy_account_id, + }; + + #[test] + fn msg_upgrade_client_serialization() { + let client_id: ClientId = "tendermint".parse().unwrap(); + let signer = get_dummy_account_id(); + + let height = Height::new(1, 1); + + let client_state = AnyClientState::Mock(MockClientState::new(MockHeader::new(height))); + let consensus_state = + AnyConsensusState::Mock(MockConsensusState::new(MockHeader::new(height))); + + let proof = get_dummy_merkle_proof(); + let mut proof_buf = Vec::new(); + prost::Message::encode(&proof, &mut proof_buf).unwrap(); + let msg = MsgUpgradeAnyClient::>::new( + client_id, + client_state, + consensus_state, + proof_buf.clone(), + proof_buf, + signer, + ); + let raw: RawMsgUpgradeClient = RawMsgUpgradeClient::from(msg.clone()); + let msg_back = MsgUpgradeAnyClient::try_from(raw.clone()).unwrap(); + let raw_back: RawMsgUpgradeClient = RawMsgUpgradeClient::from(msg_back.clone()); + assert_eq!(msg, msg_back); + assert_eq!(raw, raw_back); + } } diff --git a/modules/src/core/ics02_client/trust_threshold.rs b/modules/src/core/ics02_client/trust_threshold.rs index ca5db5e2f8..5cd70ab8fe 100644 --- a/modules/src/core/ics02_client/trust_threshold.rs +++ b/modules/src/core/ics02_client/trust_threshold.rs @@ -22,109 +22,91 @@ use crate::core::ics02_client::error::Error; /// which is used in the client state of an upgrading client. #[derive(Copy, Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] pub struct TrustThreshold { - numerator: u64, - denominator: u64, + numerator: u64, + denominator: u64, } impl TrustThreshold { - /// Constant for a trust threshold of 1/3. - pub const ONE_THIRD: Self = Self { - numerator: 1, - denominator: 3, - }; - - /// Constant for a trust threshold of 2/3. - pub const TWO_THIRDS: Self = Self { - numerator: 2, - denominator: 3, - }; - - /// Constant for a trust threshold of 0/0. - pub const ZERO: Self = Self { - numerator: 0, - denominator: 0, - }; - - /// Instantiate a TrustThreshold with the given denominator and - /// numerator. - /// - /// The constructor succeeds if long as the resulting fraction - /// is in the range`[0, 1)`. - pub fn new(numerator: u64, denominator: u64) -> Result { - // The two parameters cannot yield a fraction that is bigger or equal to 1 - if (numerator > denominator) - || (denominator == 0 && numerator != 0) - || (numerator == denominator && numerator != 0) - { - return Err(Error::invalid_trust_threshold(numerator, denominator)); - } - - Ok(Self { - numerator, - denominator, - }) - } - - /// The numerator of the fraction underlying this trust threshold. - pub fn numerator(&self) -> u64 { - self.numerator - } - - /// The denominator of the fraction underlying this trust threshold. - pub fn denominator(&self) -> u64 { - self.denominator - } + /// Constant for a trust threshold of 1/3. + pub const ONE_THIRD: Self = Self { numerator: 1, denominator: 3 }; + + /// Constant for a trust threshold of 2/3. + pub const TWO_THIRDS: Self = Self { numerator: 2, denominator: 3 }; + + /// Constant for a trust threshold of 0/0. + pub const ZERO: Self = Self { numerator: 0, denominator: 0 }; + + /// Instantiate a TrustThreshold with the given denominator and + /// numerator. + /// + /// The constructor succeeds if long as the resulting fraction + /// is in the range`[0, 1)`. + pub fn new(numerator: u64, denominator: u64) -> Result { + // The two parameters cannot yield a fraction that is bigger or equal to 1 + if (numerator > denominator) || + (denominator == 0 && numerator != 0) || + (numerator == denominator && numerator != 0) + { + return Err(Error::invalid_trust_threshold(numerator, denominator)) + } + + Ok(Self { numerator, denominator }) + } + + /// The numerator of the fraction underlying this trust threshold. + pub fn numerator(&self) -> u64 { + self.numerator + } + + /// The denominator of the fraction underlying this trust threshold. + pub fn denominator(&self) -> u64 { + self.denominator + } } /// Conversion from Tendermint domain type into /// IBC domain type. impl From for TrustThreshold { - fn from(t: TrustThresholdFraction) -> Self { - Self { - numerator: t.numerator(), - denominator: t.denominator(), - } - } + fn from(t: TrustThresholdFraction) -> Self { + Self { numerator: t.numerator(), denominator: t.denominator() } + } } /// Conversion from IBC domain type into /// Tendermint domain type. impl TryFrom for TrustThresholdFraction { - type Error = Error; + type Error = Error; - fn try_from(t: TrustThreshold) -> Result { - Self::new(t.numerator, t.denominator) - .map_err(|e| Error::failed_trust_threshold_conversion(t.numerator, t.denominator, e)) - } + fn try_from(t: TrustThreshold) -> Result { + Self::new(t.numerator, t.denominator) + .map_err(|e| Error::failed_trust_threshold_conversion(t.numerator, t.denominator, e)) + } } impl Protobuf for TrustThreshold {} impl From for Fraction { - fn from(t: TrustThreshold) -> Self { - Self { - numerator: t.numerator, - denominator: t.denominator, - } - } + fn from(t: TrustThreshold) -> Self { + Self { numerator: t.numerator, denominator: t.denominator } + } } impl TryFrom for TrustThreshold { - type Error = Error; + type Error = Error; - fn try_from(value: Fraction) -> Result { - Self::new(value.numerator, value.denominator) - } + fn try_from(value: Fraction) -> Result { + Self::new(value.numerator, value.denominator) + } } impl Default for TrustThreshold { - fn default() -> Self { - Self::ONE_THIRD - } + fn default() -> Self { + Self::ONE_THIRD + } } impl fmt::Display for TrustThreshold { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}/{}", self.numerator, self.denominator) - } + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}/{}", self.numerator, self.denominator) + } } diff --git a/modules/src/core/ics03_connection/connection.rs b/modules/src/core/ics03_connection/connection.rs index 793501f35b..ad29b1e9b0 100644 --- a/modules/src/core/ics03_connection/connection.rs +++ b/modules/src/core/ics03_connection/connection.rs @@ -1,248 +1,230 @@ use crate::prelude::*; -use core::str::FromStr; -use core::time::Duration; -use core::{fmt, u64}; +use core::{fmt, str::FromStr, time::Duration, u64}; use serde::{Deserialize, Serialize}; use tendermint_proto::Protobuf; use ibc_proto::ibc::core::connection::v1::{ - ConnectionEnd as RawConnectionEnd, Counterparty as RawCounterparty, - IdentifiedConnection as RawIdentifiedConnection, + ConnectionEnd as RawConnectionEnd, Counterparty as RawCounterparty, + IdentifiedConnection as RawIdentifiedConnection, }; -use crate::core::ics02_client::error::Error as ClientError; -use crate::core::ics03_connection::error::Error; -use crate::core::ics03_connection::version::Version; -use crate::core::ics23_commitment::commitment::CommitmentPrefix; -use crate::core::ics24_host::error::ValidationError; -use crate::core::ics24_host::identifier::{ClientId, ConnectionId}; -use crate::timestamp::ZERO_DURATION; +use crate::{ + core::{ + ics02_client::error::Error as ClientError, + ics03_connection::{error::Error, version::Version}, + ics23_commitment::commitment::CommitmentPrefix, + ics24_host::{ + error::ValidationError, + identifier::{ClientId, ConnectionId}, + }, + }, + timestamp::ZERO_DURATION, +}; #[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] pub struct IdentifiedConnectionEnd { - pub connection_id: ConnectionId, - pub connection_end: ConnectionEnd, + pub connection_id: ConnectionId, + pub connection_end: ConnectionEnd, } impl IdentifiedConnectionEnd { - pub fn new(connection_id: ConnectionId, connection_end: ConnectionEnd) -> Self { - IdentifiedConnectionEnd { - connection_id, - connection_end, - } - } - - pub fn id(&self) -> &ConnectionId { - &self.connection_id - } - - pub fn end(&self) -> &ConnectionEnd { - &self.connection_end - } + pub fn new(connection_id: ConnectionId, connection_end: ConnectionEnd) -> Self { + IdentifiedConnectionEnd { connection_id, connection_end } + } + + pub fn id(&self) -> &ConnectionId { + &self.connection_id + } + + pub fn end(&self) -> &ConnectionEnd { + &self.connection_end + } } impl Protobuf for IdentifiedConnectionEnd {} impl TryFrom for IdentifiedConnectionEnd { - type Error = Error; - - fn try_from(value: RawIdentifiedConnection) -> Result { - let raw_connection_end = RawConnectionEnd { - client_id: value.client_id.to_string(), - versions: value.versions, - state: value.state, - counterparty: value.counterparty, - delay_period: value.delay_period, - }; - - Ok(IdentifiedConnectionEnd { - connection_id: value.id.parse().map_err(Error::invalid_identifier)?, - connection_end: raw_connection_end.try_into()?, - }) - } + type Error = Error; + + fn try_from(value: RawIdentifiedConnection) -> Result { + let raw_connection_end = RawConnectionEnd { + client_id: value.client_id.to_string(), + versions: value.versions, + state: value.state, + counterparty: value.counterparty, + delay_period: value.delay_period, + }; + + Ok(IdentifiedConnectionEnd { + connection_id: value.id.parse().map_err(Error::invalid_identifier)?, + connection_end: raw_connection_end.try_into()?, + }) + } } impl From for RawIdentifiedConnection { - fn from(value: IdentifiedConnectionEnd) -> Self { - RawIdentifiedConnection { - id: value.connection_id.to_string(), - client_id: value.connection_end.client_id.to_string(), - versions: value - .connection_end - .versions - .iter() - .map(|v| From::from(v.clone())) - .collect(), - state: value.connection_end.state as i32, - delay_period: value.connection_end.delay_period.as_nanos() as u64, - counterparty: Some(value.connection_end.counterparty().clone().into()), - } - } + fn from(value: IdentifiedConnectionEnd) -> Self { + RawIdentifiedConnection { + id: value.connection_id.to_string(), + client_id: value.connection_end.client_id.to_string(), + versions: value.connection_end.versions.iter().map(|v| From::from(v.clone())).collect(), + state: value.connection_end.state as i32, + delay_period: value.connection_end.delay_period.as_nanos() as u64, + counterparty: Some(value.connection_end.counterparty().clone().into()), + } + } } #[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] pub struct ConnectionEnd { - pub state: State, - client_id: ClientId, - counterparty: Counterparty, - versions: Vec, - delay_period: Duration, + pub state: State, + client_id: ClientId, + counterparty: Counterparty, + versions: Vec, + delay_period: Duration, } impl Default for ConnectionEnd { - fn default() -> Self { - Self { - state: State::Uninitialized, - client_id: Default::default(), - counterparty: Default::default(), - versions: Vec::new(), - delay_period: ZERO_DURATION, - } - } + fn default() -> Self { + Self { + state: State::Uninitialized, + client_id: Default::default(), + counterparty: Default::default(), + versions: Vec::new(), + delay_period: ZERO_DURATION, + } + } } impl Protobuf for ConnectionEnd {} impl TryFrom for ConnectionEnd { - type Error = Error; - fn try_from(value: RawConnectionEnd) -> Result { - let state = value.state.try_into()?; - if state == State::Uninitialized { - return Ok(ConnectionEnd::default()); - } - if value.client_id.is_empty() { - return Err(Error::empty_proto_connection_end()); - } - - Ok(Self::new( - state, - value.client_id.parse().map_err(Error::invalid_identifier)?, - value - .counterparty - .ok_or_else(Error::missing_counterparty)? - .try_into()?, - value - .versions - .into_iter() - .map(Version::try_from) - .collect::, _>>()?, - Duration::from_nanos(value.delay_period), - )) - } + type Error = Error; + fn try_from(value: RawConnectionEnd) -> Result { + let state = value.state.try_into()?; + if state == State::Uninitialized { + return Ok(ConnectionEnd::default()) + } + if value.client_id.is_empty() { + return Err(Error::empty_proto_connection_end()) + } + + Ok(Self::new( + state, + value.client_id.parse().map_err(Error::invalid_identifier)?, + value.counterparty.ok_or_else(Error::missing_counterparty)?.try_into()?, + value + .versions + .into_iter() + .map(Version::try_from) + .collect::, _>>()?, + Duration::from_nanos(value.delay_period), + )) + } } impl From for RawConnectionEnd { - fn from(value: ConnectionEnd) -> Self { - RawConnectionEnd { - client_id: value.client_id.to_string(), - versions: value - .versions - .iter() - .map(|v| From::from(v.clone())) - .collect(), - state: value.state as i32, - counterparty: Some(value.counterparty.into()), - delay_period: value.delay_period.as_nanos() as u64, - } - } + fn from(value: ConnectionEnd) -> Self { + RawConnectionEnd { + client_id: value.client_id.to_string(), + versions: value.versions.iter().map(|v| From::from(v.clone())).collect(), + state: value.state as i32, + counterparty: Some(value.counterparty.into()), + delay_period: value.delay_period.as_nanos() as u64, + } + } } impl ConnectionEnd { - pub fn new( - state: State, - client_id: ClientId, - counterparty: Counterparty, - versions: Vec, - delay_period: Duration, - ) -> Self { - Self { - state, - client_id, - counterparty, - versions, - delay_period, - } - } - - /// Getter for the state of this connection end. - pub fn state(&self) -> &State { - &self.state - } - - /// Setter for the `state` field. - pub fn set_state(&mut self, new_state: State) { - self.state = new_state; - } - - /// Setter for the `counterparty` field. - pub fn set_counterparty(&mut self, new_cparty: Counterparty) { - self.counterparty = new_cparty; - } - - /// Setter for the `version` field. - pub fn set_version(&mut self, new_version: Version) { - self.versions = vec![new_version]; - } - - /// Helper function to compare the counterparty of this end with another counterparty. - pub fn counterparty_matches(&self, other: &Counterparty) -> bool { - self.counterparty.eq(other) - } - - /// Helper function to compare the client id of this end with another client identifier. - pub fn client_id_matches(&self, other: &ClientId) -> bool { - self.client_id.eq(other) - } - - /// Helper function to determine whether the connection is open. - pub fn is_open(&self) -> bool { - self.state_matches(&State::Open) - } - - /// Helper function to determine whether the connection is uninitialized. - pub fn is_uninitialized(&self) -> bool { - self.state_matches(&State::Uninitialized) - } - - /// Helper function to compare the state of this end with another state. - pub fn state_matches(&self, other: &State) -> bool { - self.state.eq(other) - } - - /// Getter for the client id on the local party of this connection end. - pub fn client_id(&self) -> &ClientId { - &self.client_id - } - - /// Getter for the list of versions in this connection end. - pub fn versions(&self) -> &[Version] { - &self.versions - } - - /// Getter for the counterparty. - pub fn counterparty(&self) -> &Counterparty { - &self.counterparty - } - - /// Getter for the delay_period field. This represents the duration, at minimum, - /// to delay the sending of a packet after the client update for that packet has been submitted. - pub fn delay_period(&self) -> Duration { - self.delay_period - } - - /// TODO: Clean this up, probably not necessary. - pub fn validate_basic(&self) -> Result<(), ValidationError> { - self.counterparty.validate_basic() - } + pub fn new( + state: State, + client_id: ClientId, + counterparty: Counterparty, + versions: Vec, + delay_period: Duration, + ) -> Self { + Self { state, client_id, counterparty, versions, delay_period } + } + + /// Getter for the state of this connection end. + pub fn state(&self) -> &State { + &self.state + } + + /// Setter for the `state` field. + pub fn set_state(&mut self, new_state: State) { + self.state = new_state; + } + + /// Setter for the `counterparty` field. + pub fn set_counterparty(&mut self, new_cparty: Counterparty) { + self.counterparty = new_cparty; + } + + /// Setter for the `version` field. + pub fn set_version(&mut self, new_version: Version) { + self.versions = vec![new_version]; + } + + /// Helper function to compare the counterparty of this end with another counterparty. + pub fn counterparty_matches(&self, other: &Counterparty) -> bool { + self.counterparty.eq(other) + } + + /// Helper function to compare the client id of this end with another client identifier. + pub fn client_id_matches(&self, other: &ClientId) -> bool { + self.client_id.eq(other) + } + + /// Helper function to determine whether the connection is open. + pub fn is_open(&self) -> bool { + self.state_matches(&State::Open) + } + + /// Helper function to determine whether the connection is uninitialized. + pub fn is_uninitialized(&self) -> bool { + self.state_matches(&State::Uninitialized) + } + + /// Helper function to compare the state of this end with another state. + pub fn state_matches(&self, other: &State) -> bool { + self.state.eq(other) + } + + /// Getter for the client id on the local party of this connection end. + pub fn client_id(&self) -> &ClientId { + &self.client_id + } + + /// Getter for the list of versions in this connection end. + pub fn versions(&self) -> &[Version] { + &self.versions + } + + /// Getter for the counterparty. + pub fn counterparty(&self) -> &Counterparty { + &self.counterparty + } + + /// Getter for the delay_period field. This represents the duration, at minimum, + /// to delay the sending of a packet after the client update for that packet has been submitted. + pub fn delay_period(&self) -> Duration { + self.delay_period + } + + /// TODO: Clean this up, probably not necessary. + pub fn validate_basic(&self) -> Result<(), ValidationError> { + self.counterparty.validate_basic() + } } #[derive(Clone, Debug, Default, PartialEq, Eq, Hash, Serialize, Deserialize)] pub struct Counterparty { - client_id: ClientId, - pub connection_id: Option, - prefix: CommitmentPrefix, + client_id: ClientId, + pub connection_id: Option, + prefix: CommitmentPrefix, } impl Protobuf for Counterparty {} @@ -250,143 +232,139 @@ impl Protobuf for Counterparty {} // Converts from the wire format RawCounterparty. Typically used from the relayer side // during queries for response validation and to extract the Counterparty structure. impl TryFrom for Counterparty { - type Error = Error; - - fn try_from(value: RawCounterparty) -> Result { - let connection_id = Some(value.connection_id) - .filter(|x| !x.is_empty()) - .map(|v| FromStr::from_str(v.as_str())) - .transpose() - .map_err(Error::invalid_identifier)?; - Ok(Counterparty::new( - value.client_id.parse().map_err(Error::invalid_identifier)?, - connection_id, - value - .prefix - .ok_or_else(Error::missing_counterparty)? - .key_prefix - .try_into() - .map_err(|_| Error::ics02_client(ClientError::empty_prefix()))?, - )) - } + type Error = Error; + + fn try_from(value: RawCounterparty) -> Result { + let connection_id = Some(value.connection_id) + .filter(|x| !x.is_empty()) + .map(|v| FromStr::from_str(v.as_str())) + .transpose() + .map_err(Error::invalid_identifier)?; + Ok(Counterparty::new( + value.client_id.parse().map_err(Error::invalid_identifier)?, + connection_id, + value + .prefix + .ok_or_else(Error::missing_counterparty)? + .key_prefix + .try_into() + .map_err(|_| Error::ics02_client(ClientError::empty_prefix()))?, + )) + } } impl From for RawCounterparty { - fn from(value: Counterparty) -> Self { - RawCounterparty { - client_id: value.client_id.as_str().to_string(), - connection_id: value - .connection_id - .map_or_else(|| "".to_string(), |v| v.as_str().to_string()), - prefix: Some(ibc_proto::ibc::core::commitment::v1::MerklePrefix { - key_prefix: value.prefix.into_vec(), - }), - } - } + fn from(value: Counterparty) -> Self { + RawCounterparty { + client_id: value.client_id.as_str().to_string(), + connection_id: value + .connection_id + .map_or_else(|| "".to_string(), |v| v.as_str().to_string()), + prefix: Some(ibc_proto::ibc::core::commitment::v1::MerklePrefix { + key_prefix: value.prefix.into_vec(), + }), + } + } } impl Counterparty { - pub fn new( - client_id: ClientId, - connection_id: Option, - prefix: CommitmentPrefix, - ) -> Self { - Self { - client_id, - connection_id, - prefix, - } - } - - /// Getter for the client id. - pub fn client_id(&self) -> &ClientId { - &self.client_id - } - - /// Getter for connection id. - pub fn connection_id(&self) -> Option<&ConnectionId> { - self.connection_id.as_ref() - } - - pub fn prefix(&self) -> &CommitmentPrefix { - &self.prefix - } - - pub fn validate_basic(&self) -> Result<(), ValidationError> { - Ok(()) - } + pub fn new( + client_id: ClientId, + connection_id: Option, + prefix: CommitmentPrefix, + ) -> Self { + Self { client_id, connection_id, prefix } + } + + /// Getter for the client id. + pub fn client_id(&self) -> &ClientId { + &self.client_id + } + + /// Getter for connection id. + pub fn connection_id(&self) -> Option<&ConnectionId> { + self.connection_id.as_ref() + } + + pub fn prefix(&self) -> &CommitmentPrefix { + &self.prefix + } + + pub fn validate_basic(&self) -> Result<(), ValidationError> { + Ok(()) + } } #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] pub enum State { - Uninitialized = 0, - Init = 1, - TryOpen = 2, - Open = 3, + Uninitialized = 0, + Init = 1, + TryOpen = 2, + Open = 3, } impl State { - /// Yields the State as a string. - pub fn as_str(&self) -> &'static str { - match self { - Self::Uninitialized => "UNINITIALIZED", - Self::Init => "INIT", - Self::TryOpen => "TRYOPEN", - Self::Open => "OPEN", - } - } - - /// Parses the State out from a i32. - pub fn from_i32(s: i32) -> Result { - match s { - 0 => Ok(Self::Uninitialized), - 1 => Ok(Self::Init), - 2 => Ok(Self::TryOpen), - 3 => Ok(Self::Open), - _ => Err(Error::invalid_state(s)), - } - } - - /// Returns whether or not this connection state is `Open`. - pub fn is_open(self) -> bool { - self == State::Open - } - - /// Returns whether or not this connection with this state - /// has progressed less or the same than the argument. - /// - /// # Example - /// ```rust,ignore - /// assert!(State::Init.less_or_equal_progress(State::Open)); - /// assert!(State::TryOpen.less_or_equal_progress(State::TryOpen)); - /// assert!(!State::Open.less_or_equal_progress(State::Uninitialized)); - /// ``` - pub fn less_or_equal_progress(self, other: Self) -> bool { - self as u32 <= other as u32 - } + /// Yields the State as a string. + pub fn as_str(&self) -> &'static str { + match self { + Self::Uninitialized => "UNINITIALIZED", + Self::Init => "INIT", + Self::TryOpen => "TRYOPEN", + Self::Open => "OPEN", + } + } + + /// Parses the State out from a i32. + pub fn from_i32(s: i32) -> Result { + match s { + 0 => Ok(Self::Uninitialized), + 1 => Ok(Self::Init), + 2 => Ok(Self::TryOpen), + 3 => Ok(Self::Open), + _ => Err(Error::invalid_state(s)), + } + } + + /// Returns whether or not this connection state is `Open`. + pub fn is_open(self) -> bool { + self == State::Open + } + + /// Returns whether or not this connection with this state + /// has progressed less or the same than the argument. + /// + /// # Example + /// ```rust,ignore + /// assert!(State::Init.less_or_equal_progress(State::Open)); + /// assert!(State::TryOpen.less_or_equal_progress(State::TryOpen)); + /// assert!(!State::Open.less_or_equal_progress(State::Uninitialized)); + /// ``` + pub fn less_or_equal_progress(self, other: Self) -> bool { + self as u32 <= other as u32 + } } impl fmt::Display for State { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}", self.as_str()) - } + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.as_str()) + } } impl TryFrom for State { - type Error = Error; - fn try_from(value: i32) -> Result { - match value { - 0 => Ok(Self::Uninitialized), - 1 => Ok(Self::Init), - 2 => Ok(Self::TryOpen), - 3 => Ok(Self::Open), - _ => Err(Error::invalid_state(value)), - } - } + type Error = Error; + fn try_from(value: i32) -> Result { + match value { + 0 => Ok(Self::Uninitialized), + 1 => Ok(Self::Init), + 2 => Ok(Self::TryOpen), + 3 => Ok(Self::Open), + _ => Err(Error::invalid_state(value)), + } + } } impl From for i32 { - fn from(value: State) -> Self { - value.into() - } + fn from(value: State) -> Self { + value.into() + } } diff --git a/modules/src/core/ics03_connection/context.rs b/modules/src/core/ics03_connection/context.rs index 4ec14c2983..bbb8f78dfc 100644 --- a/modules/src/core/ics03_connection/context.rs +++ b/modules/src/core/ics03_connection/context.rs @@ -2,85 +2,91 @@ //! the interface that any host chain must implement to be able to process any `ConnectionMsg`. //! See "ADR 003: IBC protocol implementation" for more details. -use crate::core::ics03_connection::connection::ConnectionEnd; -use crate::core::ics03_connection::error::Error; -use crate::core::ics03_connection::handler::{ConnectionIdState, ConnectionResult}; -use crate::core::ics03_connection::version::{get_compatible_versions, pick_version, Version}; -use crate::core::ics23_commitment::commitment::CommitmentPrefix; -use crate::core::ics24_host::identifier::{ClientId, ConnectionId}; -use crate::prelude::*; -use crate::Height; +use crate::{ + core::{ + ics03_connection::{ + connection::ConnectionEnd, + error::Error, + handler::{ConnectionIdState, ConnectionResult}, + version::{get_compatible_versions, pick_version, Version}, + }, + ics23_commitment::commitment::CommitmentPrefix, + ics24_host::identifier::{ClientId, ConnectionId}, + }, + prelude::*, + Height, +}; /// A context supplying all the necessary read-only dependencies for processing any `ConnectionMsg`. pub trait ConnectionReader { - /// Returns the ConnectionEnd for the given identifier `conn_id`. - fn connection_end(&self, conn_id: &ConnectionId) -> Result; + /// Returns the ConnectionEnd for the given identifier `conn_id`. + fn connection_end(&self, conn_id: &ConnectionId) -> Result; - /// Returns the oldest height available on the local chain. - fn host_oldest_height(&self) -> Height; + /// Returns the oldest height available on the local chain. + fn host_oldest_height(&self) -> Height; - /// Returns the prefix that the local chain uses in the KV store. - fn commitment_prefix(&self) -> CommitmentPrefix; + /// Returns the prefix that the local chain uses in the KV store. + fn commitment_prefix(&self) -> CommitmentPrefix; - /// Function required by ICS 03. Returns the list of all possible versions that the connection - /// handshake protocol supports. - fn get_compatible_versions(&self) -> Vec { - get_compatible_versions() - } + /// Function required by ICS 03. Returns the list of all possible versions that the connection + /// handshake protocol supports. + fn get_compatible_versions(&self) -> Vec { + get_compatible_versions() + } - /// Function required by ICS 03. Returns one version out of the supplied list of versions, which the - /// connection handshake protocol prefers. - fn pick_version( - &self, - supported_versions: Vec, - counterparty_candidate_versions: Vec, - ) -> Result { - pick_version(supported_versions, counterparty_candidate_versions) - } + /// Function required by ICS 03. Returns one version out of the supplied list of versions, which + /// the connection handshake protocol prefers. + fn pick_version( + &self, + supported_versions: Vec, + counterparty_candidate_versions: Vec, + ) -> Result { + pick_version(supported_versions, counterparty_candidate_versions) + } - /// Returns a counter on how many connections have been created thus far. - /// The value of this counter should increase only via method - /// `ConnectionKeeper::increase_connection_counter`. - fn connection_counter(&self) -> Result; + /// Returns a counter on how many connections have been created thus far. + /// The value of this counter should increase only via method + /// `ConnectionKeeper::increase_connection_counter`. + fn connection_counter(&self) -> Result; } /// A context supplying all the necessary write-only dependencies (i.e., storage writing facility) /// for processing any `ConnectionMsg`. pub trait ConnectionKeeper { - fn store_connection_result(&mut self, result: ConnectionResult) -> Result<(), Error> { - self.store_connection(result.connection_id.clone(), &result.connection_end)?; + fn store_connection_result(&mut self, result: ConnectionResult) -> Result<(), Error> { + self.store_connection(result.connection_id.clone(), &result.connection_end)?; - // If we generated an identifier, increase the counter & associate this new identifier - // with the client id. - if matches!(result.connection_id_state, ConnectionIdState::Generated) { - self.increase_connection_counter(); + // If we generated an identifier, increase the counter & associate this new identifier + // with the client id. + if matches!(result.connection_id_state, ConnectionIdState::Generated) { + self.increase_connection_counter(); - // Also associate the connection end to its client identifier. - self.store_connection_to_client( - result.connection_id.clone(), - result.connection_end.client_id(), - )?; - } + // Also associate the connection end to its client identifier. + self.store_connection_to_client( + result.connection_id.clone(), + result.connection_end.client_id(), + )?; + } - Ok(()) - } + Ok(()) + } - /// Stores the given connection_end at a path associated with the connection_id. - fn store_connection( - &mut self, - connection_id: ConnectionId, - connection_end: &ConnectionEnd, - ) -> Result<(), Error>; + /// Stores the given connection_end at a path associated with the connection_id. + fn store_connection( + &mut self, + connection_id: ConnectionId, + connection_end: &ConnectionEnd, + ) -> Result<(), Error>; - /// Stores the given connection_id at a path associated with the client_id. - fn store_connection_to_client( - &mut self, - connection_id: ConnectionId, - client_id: &ClientId, - ) -> Result<(), Error>; + /// Stores the given connection_id at a path associated with the client_id. + fn store_connection_to_client( + &mut self, + connection_id: ConnectionId, + client_id: &ClientId, + ) -> Result<(), Error>; - /// Called upon connection identifier creation (Init or Try process). - /// Increases the counter which keeps track of how many connections have been created. - /// Should never fail. - fn increase_connection_counter(&mut self); + /// Called upon connection identifier creation (Init or Try process). + /// Increases the counter which keeps track of how many connections have been created. + /// Should never fail. + fn increase_connection_counter(&mut self); } diff --git a/modules/src/core/ics03_connection/error.rs b/modules/src/core/ics03_connection/error.rs index 4c90f06c41..e4fce3d2dc 100644 --- a/modules/src/core/ics03_connection/error.rs +++ b/modules/src/core/ics03_connection/error.rs @@ -1,163 +1,169 @@ -use crate::core::ics02_client::error as client_error; -use crate::core::ics03_connection::version::Version; -use crate::core::ics24_host::error::ValidationError; -use crate::core::ics24_host::identifier::{ClientId, ConnectionId}; -use crate::prelude::*; -use crate::proofs::ProofError; -use crate::signer::SignerError; -use crate::Height; +use crate::{ + core::{ + ics02_client::error as client_error, + ics03_connection::version::Version, + ics24_host::{ + error::ValidationError, + identifier::{ClientId, ConnectionId}, + }, + }, + prelude::*, + proofs::ProofError, + signer::SignerError, + Height, +}; use flex_error::define_error; define_error! { - #[derive(Debug, PartialEq, Eq)] - Error { - Ics02Client - [ client_error::Error ] - | _ | { "ics02 client error" }, - - InvalidState - { state: i32 } - | e | { format_args!("connection state is unknown: {}", e.state) }, - - ConnectionExistsAlready - { connection_id: ConnectionId } - | e | { - format_args!("connection exists (was initialized) already: {0}", - e.connection_id) - }, - - ConnectionMismatch - { connection_id: ConnectionId } - | e | { - format_args!("connection end for identifier {0} was never initialized", - e.connection_id) - }, - - InvalidConsensusHeight - { - target_height: Height, - currrent_height: Height - } - | e | { - format_args!("consensus height claimed by the client on the other party is too advanced: {0} (host chain current height: {1})", - e.target_height, e.currrent_height) - }, - - StaleConsensusHeight - { - target_height: Height, - oldest_height: Height - } - | e | { - format_args!("consensus height claimed by the client on the other party has been pruned: {0} (host chain oldest height: {1})", - e.target_height, e.oldest_height) - }, - - InvalidIdentifier - [ ValidationError ] - | _ | { "identifier error" }, - - EmptyProtoConnectionEnd - | _ | { "ConnectionEnd domain object could not be constructed out of empty proto object" }, - - EmptyVersions - | _ | { "empty supported versions" }, - - EmptyFeatures - | _ | { "empty supported features" }, - - NoCommonVersion - | _ | { "no common version" }, - - VersionNotSupported - { - version: Version, - } - | e | { format_args!("version \"{}\" not supported", e.version) }, - - InvalidAddress - | _ | { "invalid address" }, - - MissingProofHeight - | _ | { "missing proof height" }, - - MissingConsensusHeight - | _ | { "missing consensus height" }, - - InvalidProof - [ ProofError ] - | _ | { "invalid connection proof" }, - - VerifyConnectionState - [ client_error::Error ] - | _ | { "error verifying connnection state" }, - - Signer - [ SignerError ] - | _ | { "invalid signer" }, - - ConnectionNotFound - { connection_id: ConnectionId } - | e | { - format_args!("no connection was found for the previous connection id provided {0}", - e.connection_id) - }, - - InvalidCounterparty - | _ | { "invalid signer" }, - - ConnectionIdMismatch - { - connection_id: ConnectionId, - counterparty_connection_id: ConnectionId, - } - | e | { - format_args!("counterparty chosen connection id {0} is different than the connection id {1}", - e.connection_id, e.counterparty_connection_id) - }, - - MissingCounterparty - | _ | { "missing counterparty" }, - - - MissingCounterpartyPrefix - | _ | { "missing counterparty prefix" }, - - NullClientProof - | _ | { "client proof must be present" }, - - FrozenClient - { client_id: ClientId } - | e | { - format_args!("the client id does not match any client state: {0}", - e.client_id) - }, - - ConnectionVerificationFailure - | _ | { "the connection proof verification failed" }, - - ConsensusStateVerificationFailure - { height: Height } - [ client_error::Error ] - | e | { - format_args!("the consensus proof verification failed (height: {0})", - e.height) - }, - - // TODO: use more specific error source - ClientStateVerificationFailure - { - client_id: ClientId, - } - [ client_error::Error ] - | e | { - format_args!("the client state proof verification failed for client id {0}", - e.client_id) - }, - - ImplementationSpecific - { reason: String } - | e | { format_args!("implementation specific error: {}", e.reason) }, - } + #[derive(Debug, PartialEq, Eq)] + Error { + Ics02Client + [ client_error::Error ] + | _ | { "ics02 client error" }, + + InvalidState + { state: i32 } + | e | { format_args!("connection state is unknown: {}", e.state) }, + + ConnectionExistsAlready + { connection_id: ConnectionId } + | e | { + format_args!("connection exists (was initialized) already: {0}", + e.connection_id) + }, + + ConnectionMismatch + { connection_id: ConnectionId } + | e | { + format_args!("connection end for identifier {0} was never initialized", + e.connection_id) + }, + + InvalidConsensusHeight + { + target_height: Height, + currrent_height: Height + } + | e | { + format_args!("consensus height claimed by the client on the other party is too advanced: {0} (host chain current height: {1})", + e.target_height, e.currrent_height) + }, + + StaleConsensusHeight + { + target_height: Height, + oldest_height: Height + } + | e | { + format_args!("consensus height claimed by the client on the other party has been pruned: {0} (host chain oldest height: {1})", + e.target_height, e.oldest_height) + }, + + InvalidIdentifier + [ ValidationError ] + | _ | { "identifier error" }, + + EmptyProtoConnectionEnd + | _ | { "ConnectionEnd domain object could not be constructed out of empty proto object" }, + + EmptyVersions + | _ | { "empty supported versions" }, + + EmptyFeatures + | _ | { "empty supported features" }, + + NoCommonVersion + | _ | { "no common version" }, + + VersionNotSupported + { + version: Version, + } + | e | { format_args!("version \"{}\" not supported", e.version) }, + + InvalidAddress + | _ | { "invalid address" }, + + MissingProofHeight + | _ | { "missing proof height" }, + + MissingConsensusHeight + | _ | { "missing consensus height" }, + + InvalidProof + [ ProofError ] + | _ | { "invalid connection proof" }, + + VerifyConnectionState + [ client_error::Error ] + | _ | { "error verifying connnection state" }, + + Signer + [ SignerError ] + | _ | { "invalid signer" }, + + ConnectionNotFound + { connection_id: ConnectionId } + | e | { + format_args!("no connection was found for the previous connection id provided {0}", + e.connection_id) + }, + + InvalidCounterparty + | _ | { "invalid signer" }, + + ConnectionIdMismatch + { + connection_id: ConnectionId, + counterparty_connection_id: ConnectionId, + } + | e | { + format_args!("counterparty chosen connection id {0} is different than the connection id {1}", + e.connection_id, e.counterparty_connection_id) + }, + + MissingCounterparty + | _ | { "missing counterparty" }, + + + MissingCounterpartyPrefix + | _ | { "missing counterparty prefix" }, + + NullClientProof + | _ | { "client proof must be present" }, + + FrozenClient + { client_id: ClientId } + | e | { + format_args!("the client id does not match any client state: {0}", + e.client_id) + }, + + ConnectionVerificationFailure + | _ | { "the connection proof verification failed" }, + + ConsensusStateVerificationFailure + { height: Height } + [ client_error::Error ] + | e | { + format_args!("the consensus proof verification failed (height: {0})", + e.height) + }, + + // TODO: use more specific error source + ClientStateVerificationFailure + { + client_id: ClientId, + } + [ client_error::Error ] + | e | { + format_args!("the client state proof verification failed for client id {0}", + e.client_id) + }, + + ImplementationSpecific + { reason: String } + | e | { format_args!("implementation specific error: {}", e.reason) }, + } } diff --git a/modules/src/core/ics03_connection/events.rs b/modules/src/core/ics03_connection/events.rs index 2a9e9abd99..df5f41f617 100644 --- a/modules/src/core/ics03_connection/events.rs +++ b/modules/src/core/ics03_connection/events.rs @@ -1,15 +1,17 @@ //! Types for the IBC events emitted from Tendermint Websocket by the connection module. use serde_derive::{Deserialize, Serialize}; -use tendermint::abci::Event as AbciEvent; -use tendermint::abci::EventAttribute; - -use crate::core::ics02_client::error::Error as Ics02Error; -use crate::core::ics02_client::height::Height; -use crate::core::ics03_connection::error::Error; -use crate::core::ics24_host::identifier::{ClientId, ConnectionId}; -use crate::events::{IbcEvent, IbcEventType}; -use crate::prelude::*; +use tendermint::abci::{Event as AbciEvent, EventAttribute}; + +use crate::{ + core::{ + ics02_client::{error::Error as Ics02Error, height::Height}, + ics03_connection::error::Error, + ics24_host::identifier::{ClientId, ConnectionId}, + }, + events::{IbcEvent, IbcEventType}, + prelude::*, +}; /// The content of the `key` field for the attribute containing the connection identifier. const HEIGHT_ATTRIBUTE_KEY: &str = "height"; @@ -19,65 +21,65 @@ const COUNTERPARTY_CONN_ID_ATTRIBUTE_KEY: &str = "counterparty_connection_id"; const COUNTERPARTY_CLIENT_ID_ATTRIBUTE_KEY: &str = "counterparty_client_id"; pub fn try_from_tx(event: &tendermint::abci::Event) -> Option { - match event.kind.parse() { - Ok(IbcEventType::OpenInitConnection) => extract_attributes_from_tx(event) - .map(OpenInit::from) - .map(IbcEvent::OpenInitConnection) - .ok(), - Ok(IbcEventType::OpenTryConnection) => extract_attributes_from_tx(event) - .map(OpenTry::from) - .map(IbcEvent::OpenTryConnection) - .ok(), - Ok(IbcEventType::OpenAckConnection) => extract_attributes_from_tx(event) - .map(OpenAck::from) - .map(IbcEvent::OpenAckConnection) - .ok(), - Ok(IbcEventType::OpenConfirmConnection) => extract_attributes_from_tx(event) - .map(OpenConfirm::from) - .map(IbcEvent::OpenConfirmConnection) - .ok(), - _ => None, - } + match event.kind.parse() { + Ok(IbcEventType::OpenInitConnection) => extract_attributes_from_tx(event) + .map(OpenInit::from) + .map(IbcEvent::OpenInitConnection) + .ok(), + Ok(IbcEventType::OpenTryConnection) => extract_attributes_from_tx(event) + .map(OpenTry::from) + .map(IbcEvent::OpenTryConnection) + .ok(), + Ok(IbcEventType::OpenAckConnection) => extract_attributes_from_tx(event) + .map(OpenAck::from) + .map(IbcEvent::OpenAckConnection) + .ok(), + Ok(IbcEventType::OpenConfirmConnection) => extract_attributes_from_tx(event) + .map(OpenConfirm::from) + .map(IbcEvent::OpenConfirmConnection) + .ok(), + _ => None, + } } fn extract_attributes_from_tx(event: &tendermint::abci::Event) -> Result { - let mut attr = Attributes::default(); - - for tag in &event.attributes { - let key = tag.key.as_str(); - let value = tag.value.as_str(); - match key { - HEIGHT_ATTRIBUTE_KEY => { - attr.height = value.parse().map_err(|e| { - Error::ics02_client(Ics02Error::invalid_string_as_height(value.to_string(), e)) - })?; - } - CONN_ID_ATTRIBUTE_KEY => { - attr.connection_id = value.parse().ok(); - } - CLIENT_ID_ATTRIBUTE_KEY => { - attr.client_id = value.parse().map_err(Error::invalid_identifier)?; - } - COUNTERPARTY_CONN_ID_ATTRIBUTE_KEY => { - attr.counterparty_connection_id = value.parse().ok(); - } - COUNTERPARTY_CLIENT_ID_ATTRIBUTE_KEY => { - attr.counterparty_client_id = value.parse().map_err(Error::invalid_identifier)?; - } - _ => {} - } - } - - Ok(attr) + let mut attr = Attributes::default(); + + for tag in &event.attributes { + let key = tag.key.as_str(); + let value = tag.value.as_str(); + match key { + HEIGHT_ATTRIBUTE_KEY => { + attr.height = value.parse().map_err(|e| { + Error::ics02_client(Ics02Error::invalid_string_as_height(value.to_string(), e)) + })?; + }, + CONN_ID_ATTRIBUTE_KEY => { + attr.connection_id = value.parse().ok(); + }, + CLIENT_ID_ATTRIBUTE_KEY => { + attr.client_id = value.parse().map_err(Error::invalid_identifier)?; + }, + COUNTERPARTY_CONN_ID_ATTRIBUTE_KEY => { + attr.counterparty_connection_id = value.parse().ok(); + }, + COUNTERPARTY_CLIENT_ID_ATTRIBUTE_KEY => { + attr.counterparty_client_id = value.parse().map_err(Error::invalid_identifier)?; + }, + _ => {}, + } + } + + Ok(attr) } #[derive(Debug, Default, Deserialize, Serialize, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct Attributes { - pub height: Height, - pub connection_id: Option, - pub client_id: ClientId, - pub counterparty_connection_id: Option, - pub counterparty_client_id: ClientId, + pub height: Height, + pub connection_id: Option, + pub client_id: ClientId, + pub counterparty_connection_id: Option, + pub counterparty_client_id: ClientId, } /// Convert attributes to Tendermint ABCI tags @@ -89,241 +91,229 @@ pub struct Attributes { /// Once tendermint-rs improves the API of the `Key` and `Value` types, /// we will be able to remove the `.parse().unwrap()` calls. impl From for Vec { - fn from(a: Attributes) -> Self { - let mut attributes = vec![]; - let height = EventAttribute { - key: HEIGHT_ATTRIBUTE_KEY.parse().unwrap(), - value: a.height.to_string().parse().unwrap(), - index: false, - }; - attributes.push(height); - if let Some(conn_id) = a.connection_id { - let conn_id = EventAttribute { - key: CONN_ID_ATTRIBUTE_KEY.parse().unwrap(), - value: conn_id.to_string().parse().unwrap(), - index: false, - }; - attributes.push(conn_id); - } - let client_id = EventAttribute { - key: CLIENT_ID_ATTRIBUTE_KEY.parse().unwrap(), - value: a.client_id.to_string().parse().unwrap(), - index: false, - }; - attributes.push(client_id); - if let Some(conn_id) = a.counterparty_connection_id { - let conn_id = EventAttribute { - key: COUNTERPARTY_CONN_ID_ATTRIBUTE_KEY.parse().unwrap(), - value: conn_id.to_string().parse().unwrap(), - index: false, - }; - attributes.push(conn_id); - } - let counterparty_client_id = EventAttribute { - key: COUNTERPARTY_CLIENT_ID_ATTRIBUTE_KEY.parse().unwrap(), - value: a.counterparty_client_id.to_string().parse().unwrap(), - index: false, - }; - attributes.push(counterparty_client_id); - attributes - } + fn from(a: Attributes) -> Self { + let mut attributes = vec![]; + let height = EventAttribute { + key: HEIGHT_ATTRIBUTE_KEY.parse().unwrap(), + value: a.height.to_string().parse().unwrap(), + index: false, + }; + attributes.push(height); + if let Some(conn_id) = a.connection_id { + let conn_id = EventAttribute { + key: CONN_ID_ATTRIBUTE_KEY.parse().unwrap(), + value: conn_id.to_string().parse().unwrap(), + index: false, + }; + attributes.push(conn_id); + } + let client_id = EventAttribute { + key: CLIENT_ID_ATTRIBUTE_KEY.parse().unwrap(), + value: a.client_id.to_string().parse().unwrap(), + index: false, + }; + attributes.push(client_id); + if let Some(conn_id) = a.counterparty_connection_id { + let conn_id = EventAttribute { + key: COUNTERPARTY_CONN_ID_ATTRIBUTE_KEY.parse().unwrap(), + value: conn_id.to_string().parse().unwrap(), + index: false, + }; + attributes.push(conn_id); + } + let counterparty_client_id = EventAttribute { + key: COUNTERPARTY_CLIENT_ID_ATTRIBUTE_KEY.parse().unwrap(), + value: a.counterparty_client_id.to_string().parse().unwrap(), + index: false, + }; + attributes.push(counterparty_client_id); + attributes + } } #[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq)] pub struct OpenInit(Attributes); impl OpenInit { - pub fn attributes(&self) -> &Attributes { - &self.0 - } - pub fn connection_id(&self) -> Option<&ConnectionId> { - self.0.connection_id.as_ref() - } - pub fn height(&self) -> Height { - self.0.height - } - pub fn set_height(&mut self, height: Height) { - self.0.height = height; - } + pub fn attributes(&self) -> &Attributes { + &self.0 + } + pub fn connection_id(&self) -> Option<&ConnectionId> { + self.0.connection_id.as_ref() + } + pub fn height(&self) -> Height { + self.0.height + } + pub fn set_height(&mut self, height: Height) { + self.0.height = height; + } } impl From for OpenInit { - fn from(attrs: Attributes) -> Self { - OpenInit(attrs) - } + fn from(attrs: Attributes) -> Self { + OpenInit(attrs) + } } impl From for IbcEvent { - fn from(v: OpenInit) -> Self { - IbcEvent::OpenInitConnection(v) - } + fn from(v: OpenInit) -> Self { + IbcEvent::OpenInitConnection(v) + } } impl From for AbciEvent { - fn from(v: OpenInit) -> Self { - let attributes = Vec::::from(v.0); - AbciEvent { - kind: IbcEventType::OpenInitConnection.as_str().to_string(), - attributes, - } - } + fn from(v: OpenInit) -> Self { + let attributes = Vec::::from(v.0); + AbciEvent { kind: IbcEventType::OpenInitConnection.as_str().to_string(), attributes } + } } #[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq)] pub struct OpenTry(Attributes); impl OpenTry { - pub fn attributes(&self) -> &Attributes { - &self.0 - } - pub fn connection_id(&self) -> Option<&ConnectionId> { - self.0.connection_id.as_ref() - } - pub fn height(&self) -> Height { - self.0.height - } - pub fn set_height(&mut self, height: Height) { - self.0.height = height; - } + pub fn attributes(&self) -> &Attributes { + &self.0 + } + pub fn connection_id(&self) -> Option<&ConnectionId> { + self.0.connection_id.as_ref() + } + pub fn height(&self) -> Height { + self.0.height + } + pub fn set_height(&mut self, height: Height) { + self.0.height = height; + } } impl From for OpenTry { - fn from(attrs: Attributes) -> Self { - OpenTry(attrs) - } + fn from(attrs: Attributes) -> Self { + OpenTry(attrs) + } } impl From for IbcEvent { - fn from(v: OpenTry) -> Self { - IbcEvent::OpenTryConnection(v) - } + fn from(v: OpenTry) -> Self { + IbcEvent::OpenTryConnection(v) + } } impl From for AbciEvent { - fn from(v: OpenTry) -> Self { - let attributes = Vec::::from(v.0); - AbciEvent { - kind: IbcEventType::OpenTryConnection.as_str().to_string(), - attributes, - } - } + fn from(v: OpenTry) -> Self { + let attributes = Vec::::from(v.0); + AbciEvent { kind: IbcEventType::OpenTryConnection.as_str().to_string(), attributes } + } } #[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq)] pub struct OpenAck(Attributes); impl OpenAck { - pub fn attributes(&self) -> &Attributes { - &self.0 - } - pub fn connection_id(&self) -> Option<&ConnectionId> { - self.0.connection_id.as_ref() - } - pub fn height(&self) -> Height { - self.0.height - } - pub fn set_height(&mut self, height: Height) { - self.0.height = height; - } + pub fn attributes(&self) -> &Attributes { + &self.0 + } + pub fn connection_id(&self) -> Option<&ConnectionId> { + self.0.connection_id.as_ref() + } + pub fn height(&self) -> Height { + self.0.height + } + pub fn set_height(&mut self, height: Height) { + self.0.height = height; + } } impl From for OpenAck { - fn from(attrs: Attributes) -> Self { - OpenAck(attrs) - } + fn from(attrs: Attributes) -> Self { + OpenAck(attrs) + } } impl From for IbcEvent { - fn from(v: OpenAck) -> Self { - IbcEvent::OpenAckConnection(v) - } + fn from(v: OpenAck) -> Self { + IbcEvent::OpenAckConnection(v) + } } impl From for AbciEvent { - fn from(v: OpenAck) -> Self { - let attributes = Vec::::from(v.0); - AbciEvent { - kind: IbcEventType::OpenAckConnection.as_str().to_string(), - attributes, - } - } + fn from(v: OpenAck) -> Self { + let attributes = Vec::::from(v.0); + AbciEvent { kind: IbcEventType::OpenAckConnection.as_str().to_string(), attributes } + } } #[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq)] pub struct OpenConfirm(Attributes); impl OpenConfirm { - pub fn attributes(&self) -> &Attributes { - &self.0 - } - pub fn connection_id(&self) -> Option<&ConnectionId> { - self.0.connection_id.as_ref() - } - pub fn height(&self) -> Height { - self.0.height - } - pub fn set_height(&mut self, height: Height) { - self.0.height = height; - } + pub fn attributes(&self) -> &Attributes { + &self.0 + } + pub fn connection_id(&self) -> Option<&ConnectionId> { + self.0.connection_id.as_ref() + } + pub fn height(&self) -> Height { + self.0.height + } + pub fn set_height(&mut self, height: Height) { + self.0.height = height; + } } impl From for OpenConfirm { - fn from(attrs: Attributes) -> Self { - OpenConfirm(attrs) - } + fn from(attrs: Attributes) -> Self { + OpenConfirm(attrs) + } } impl From for IbcEvent { - fn from(v: OpenConfirm) -> Self { - IbcEvent::OpenConfirmConnection(v) - } + fn from(v: OpenConfirm) -> Self { + IbcEvent::OpenConfirmConnection(v) + } } impl From for AbciEvent { - fn from(v: OpenConfirm) -> Self { - let attributes = Vec::::from(v.0); - AbciEvent { - kind: IbcEventType::OpenConfirmConnection.as_str().to_string(), - attributes, - } - } + fn from(v: OpenConfirm) -> Self { + let attributes = Vec::::from(v.0); + AbciEvent { kind: IbcEventType::OpenConfirmConnection.as_str().to_string(), attributes } + } } #[cfg(test)] mod test { - use super::*; - - #[test] - fn connection_event_to_abci_event() { - let height = Height::new(1, 1); - let attributes = Attributes { - height, - connection_id: Some("test_connection".parse().unwrap()), - client_id: "test_client".parse().unwrap(), - counterparty_connection_id: Some("counterparty_test_conn".parse().unwrap()), - counterparty_client_id: "counterparty_test_client".parse().unwrap(), - }; - let mut abci_events = vec![]; - let open_init = OpenInit::from(attributes.clone()); - abci_events.push(AbciEvent::from(open_init.clone())); - let open_try = OpenTry::from(attributes.clone()); - abci_events.push(AbciEvent::from(open_try.clone())); - let open_ack = OpenAck::from(attributes.clone()); - abci_events.push(AbciEvent::from(open_ack.clone())); - let open_confirm = OpenConfirm::from(attributes); - abci_events.push(AbciEvent::from(open_confirm.clone())); - - for event in abci_events { - match try_from_tx(&event) { - Some(e) => match e { - IbcEvent::OpenInitConnection(e) => assert_eq!(e.0, open_init.0), - IbcEvent::OpenTryConnection(e) => assert_eq!(e.0, open_try.0), - IbcEvent::OpenAckConnection(e) => assert_eq!(e.0, open_ack.0), - IbcEvent::OpenConfirmConnection(e) => assert_eq!(e.0, open_confirm.0), - _ => panic!("unexpected event type"), - }, - None => panic!("converted event was wrong"), - } - } - } + use super::*; + + #[test] + fn connection_event_to_abci_event() { + let height = Height::new(1, 1); + let attributes = Attributes { + height, + connection_id: Some("test_connection".parse().unwrap()), + client_id: "test_client".parse().unwrap(), + counterparty_connection_id: Some("counterparty_test_conn".parse().unwrap()), + counterparty_client_id: "counterparty_test_client".parse().unwrap(), + }; + let mut abci_events = vec![]; + let open_init = OpenInit::from(attributes.clone()); + abci_events.push(AbciEvent::from(open_init.clone())); + let open_try = OpenTry::from(attributes.clone()); + abci_events.push(AbciEvent::from(open_try.clone())); + let open_ack = OpenAck::from(attributes.clone()); + abci_events.push(AbciEvent::from(open_ack.clone())); + let open_confirm = OpenConfirm::from(attributes); + abci_events.push(AbciEvent::from(open_confirm.clone())); + + for event in abci_events { + match try_from_tx(&event) { + Some(e) => match e { + IbcEvent::OpenInitConnection(e) => assert_eq!(e.0, open_init.0), + IbcEvent::OpenTryConnection(e) => assert_eq!(e.0, open_try.0), + IbcEvent::OpenAckConnection(e) => assert_eq!(e.0, open_ack.0), + IbcEvent::OpenConfirmConnection(e) => assert_eq!(e.0, open_confirm.0), + _ => panic!("unexpected event type"), + }, + None => panic!("converted event was wrong"), + } + } + } } diff --git a/modules/src/core/ics03_connection/handler.rs b/modules/src/core/ics03_connection/handler.rs index 154c1a3e6f..85e19d0262 100644 --- a/modules/src/core/ics03_connection/handler.rs +++ b/modules/src/core/ics03_connection/handler.rs @@ -1,11 +1,13 @@ //! This module implements the processing logic for ICS3 (connection open handshake) messages. -use crate::clients::host_functions::HostFunctionsProvider; -use crate::core::ics03_connection::connection::ConnectionEnd; -use crate::core::ics03_connection::error::Error; -use crate::core::ics03_connection::msgs::ConnectionMsg; -use crate::core::ics24_host::identifier::ConnectionId; -use crate::core::ics26_routing::context::ReaderContext; -use crate::handler::HandlerOutput; + +use crate::{ + core::{ + ics03_connection::{connection::ConnectionEnd, error::Error, msgs::ConnectionMsg}, + ics24_host::identifier::ConnectionId, + ics26_routing::context::ReaderContext, + }, + handler::HandlerOutput, +}; use core::fmt::Debug; pub mod conn_open_ack; @@ -18,44 +20,38 @@ pub mod verify; /// Defines the possible states of a connection identifier in a `ConnectionResult`. #[derive(Clone, Debug)] pub enum ConnectionIdState { - /// Specifies that the handler allocated a new connection identifier. This happens during the - /// processing of either the `MsgConnectionOpenInit` or `MsgConnectionOpenTry` message. - Generated, + /// Specifies that the handler allocated a new connection identifier. This happens during the + /// processing of either the `MsgConnectionOpenInit` or `MsgConnectionOpenTry` message. + Generated, - /// Specifies that the handler reused a previously-allocated connection identifier. - Reused, + /// Specifies that the handler reused a previously-allocated connection identifier. + Reused, } #[derive(Clone, Debug)] pub struct ConnectionResult { - /// The identifier for the connection which the handler processed. Typically this represents the - /// newly-generated connection id (e.g., when processing `MsgConnectionOpenInit`) or - /// an existing connection id (e.g., for `MsgConnectionOpenAck`). - pub connection_id: ConnectionId, + /// The identifier for the connection which the handler processed. Typically this represents + /// the newly-generated connection id (e.g., when processing `MsgConnectionOpenInit`) or + /// an existing connection id (e.g., for `MsgConnectionOpenAck`). + pub connection_id: ConnectionId, - /// The state of the connection identifier (whether it was newly-generated or not). - pub connection_id_state: ConnectionIdState, + /// The state of the connection identifier (whether it was newly-generated or not). + pub connection_id_state: ConnectionIdState, - /// The connection end, which the handler produced as a result of processing the message. - pub connection_end: ConnectionEnd, + /// The connection end, which the handler produced as a result of processing the message. + pub connection_end: ConnectionEnd, } /// General entry point for processing any type of message related to the ICS3 connection open /// handshake protocol. -pub fn dispatch( - ctx: &Ctx, - msg: ConnectionMsg, -) -> Result, Error> -where - Ctx: ReaderContext, - HostFunctions: HostFunctionsProvider, -{ - match msg { - ConnectionMsg::ConnectionOpenInit(msg) => conn_open_init::process(ctx, msg), - ConnectionMsg::ConnectionOpenTry(msg) => conn_open_try::process::(ctx, *msg), - ConnectionMsg::ConnectionOpenAck(msg) => conn_open_ack::process::(ctx, *msg), - ConnectionMsg::ConnectionOpenConfirm(msg) => { - conn_open_confirm::process::(ctx, msg) - } - } +pub fn dispatch( + ctx: &Ctx, + msg: ConnectionMsg, +) -> Result, Error> { + match msg { + ConnectionMsg::ConnectionOpenInit(msg) => conn_open_init::process(ctx, msg), + ConnectionMsg::ConnectionOpenTry(msg) => conn_open_try::process::<_>(ctx, *msg), + ConnectionMsg::ConnectionOpenAck(msg) => conn_open_ack::process::<_>(ctx, *msg), + ConnectionMsg::ConnectionOpenConfirm(msg) => conn_open_confirm::process::<_>(ctx, msg), + } } diff --git a/modules/src/core/ics03_connection/handler/conn_open_ack.rs b/modules/src/core/ics03_connection/handler/conn_open_ack.rs index 4e6218047c..2cf0fdc6bf 100644 --- a/modules/src/core/ics03_connection/handler/conn_open_ack.rs +++ b/modules/src/core/ics03_connection/handler/conn_open_ack.rs @@ -1,266 +1,283 @@ //! Protocol logic specific to processing ICS3 messages of type `MsgConnectionOpenAck`. -use crate::clients::host_functions::HostFunctionsProvider; -use crate::core::ics03_connection::connection::{ConnectionEnd, Counterparty, State}; -use crate::core::ics03_connection::error::Error; -use crate::core::ics03_connection::events::Attributes; -use crate::core::ics03_connection::handler::verify::{ - check_client_consensus_height, verify_client_proof, verify_connection_proof, - verify_consensus_proof, +use crate::{ + core::{ + ics03_connection::{ + connection::{ConnectionEnd, Counterparty, State}, + error::Error, + events::Attributes, + handler::{ + verify::{ + check_client_consensus_height, verify_client_proof, verify_connection_proof, + verify_consensus_proof, + }, + ConnectionIdState, ConnectionResult, + }, + msgs::conn_open_ack::MsgConnectionOpenAck, + }, + ics26_routing::context::ReaderContext, + }, + events::IbcEvent, + handler::{HandlerOutput, HandlerResult}, + prelude::*, }; -use crate::core::ics03_connection::handler::{ConnectionIdState, ConnectionResult}; -use crate::core::ics03_connection::msgs::conn_open_ack::MsgConnectionOpenAck; -use crate::core::ics26_routing::context::ReaderContext; -use crate::events::IbcEvent; -use crate::handler::{HandlerOutput, HandlerResult}; -use crate::prelude::*; - -pub(crate) fn process( - ctx: &dyn ReaderContext, - msg: MsgConnectionOpenAck, + +pub(crate) fn process( + ctx: &Ctx, + msg: MsgConnectionOpenAck, ) -> HandlerResult { - let mut output = HandlerOutput::builder(); - - // Check the client's (consensus state) proof height if it consensus proof is provided - if msg.proofs.consensus_proof().is_some() { - check_client_consensus_height(ctx, msg.consensus_height())?; - } - - // Validate the connection end. - let mut conn_end = ctx.connection_end(&msg.connection_id)?; - // A connection end must be Init or TryOpen; otherwise we return an error. - let state_is_consistent = conn_end.state_matches(&State::Init) - && conn_end.versions().contains(&msg.version) - || conn_end.state_matches(&State::TryOpen) - && conn_end.versions().get(0).eq(&Some(&msg.version)); - - if !state_is_consistent { - // Old connection end is in incorrect state, propagate the error. - return Err(Error::connection_mismatch(msg.connection_id)); - } - - // Set the connection ID of the counterparty - let prev_counterparty = conn_end.counterparty(); - let counterparty = Counterparty::new( - prev_counterparty.client_id().clone(), - Some(msg.counterparty_connection_id.clone()), - prev_counterparty.prefix().clone(), - ); - conn_end.set_state(State::Open); - conn_end.set_version(msg.version.clone()); - conn_end.set_counterparty(counterparty); - - // Proof verification. - let expected_conn = { - // The counterparty is the local chain. - let counterparty = Counterparty::new( - conn_end.client_id().clone(), // The local client identifier. - Some(msg.connection_id.clone()), // This chain's connection id as known on counterparty. - ctx.commitment_prefix(), // Local commitment prefix. - ); - - ConnectionEnd::new( - State::TryOpen, - conn_end.counterparty().client_id().clone(), - counterparty, - vec![msg.version.clone()], - conn_end.delay_period(), - ) - }; - - let client_state = msg.client_state.ok_or_else(|| { - Error::implementation_specific("client state is required in connOpenTry".into()) - })?; - - let client_proof = msg.proofs.client_proof().as_ref().ok_or_else(|| { - Error::implementation_specific("client proof is required in connOpenTry".into()) - })?; - - let consensus_proof = msg.proofs.consensus_proof().ok_or_else(|| { - Error::implementation_specific("consensus proof is required in connOpenTry".into()) - })?; - - ctx.validate_self_client(&client_state) - .map_err(Error::ics02_client)?; - - verify_connection_proof::( - ctx, - msg.proofs.height(), - &conn_end, - &expected_conn, - msg.proofs.height(), - msg.proofs.object_proof(), - )?; - - verify_client_proof::( - ctx, - msg.proofs.height(), - &conn_end, - client_state, - msg.proofs.height(), - client_proof, - )?; - - verify_consensus_proof::(ctx, msg.proofs.height(), &conn_end, &consensus_proof)?; - - output.log("success: connection verification passed"); - - let event_attributes = Attributes { - connection_id: Some(msg.connection_id.clone()), - height: ctx.host_height(), - client_id: conn_end.client_id().clone(), - counterparty_connection_id: conn_end.counterparty().connection_id.clone(), - counterparty_client_id: conn_end.counterparty().client_id().clone(), - }; - - let result = ConnectionResult { - connection_id: msg.connection_id, - connection_id_state: ConnectionIdState::Reused, - connection_end: conn_end, - }; - - output.emit(IbcEvent::OpenAckConnection(event_attributes.into())); - - Ok(output.with_result(result)) + let mut output = HandlerOutput::builder(); + + // Check the client's (consensus state) proof height if it consensus proof is provided + if msg.proofs.consensus_proof().is_some() { + check_client_consensus_height(ctx, msg.consensus_height())?; + } + + // Validate the connection end. + let mut conn_end = ctx.connection_end(&msg.connection_id)?; + // A connection end must be Init or TryOpen; otherwise we return an error. + let state_is_consistent = conn_end.state_matches(&State::Init) && + conn_end.versions().contains(&msg.version) || + conn_end.state_matches(&State::TryOpen) && + conn_end.versions().get(0).eq(&Some(&msg.version)); + + if !state_is_consistent { + // Old connection end is in incorrect state, propagate the error. + return Err(Error::connection_mismatch(msg.connection_id)) + } + + // Set the connection ID of the counterparty + let prev_counterparty = conn_end.counterparty(); + let counterparty = Counterparty::new( + prev_counterparty.client_id().clone(), + Some(msg.counterparty_connection_id.clone()), + prev_counterparty.prefix().clone(), + ); + conn_end.set_state(State::Open); + conn_end.set_version(msg.version.clone()); + conn_end.set_counterparty(counterparty); + + // Proof verification. + let expected_conn = { + // The counterparty is the local chain. + let counterparty = Counterparty::new( + conn_end.client_id().clone(), // The local client identifier. + Some(msg.connection_id.clone()), /* This chain's connection id as known on + * counterparty. */ + ctx.commitment_prefix(), // Local commitment prefix. + ); + + ConnectionEnd::new( + State::TryOpen, + conn_end.counterparty().client_id().clone(), + counterparty, + vec![msg.version.clone()], + conn_end.delay_period(), + ) + }; + + let client_state = msg.client_state.ok_or_else(|| { + Error::implementation_specific("client state is required in connOpenTry".into()) + })?; + + let client_proof = msg.proofs.client_proof().as_ref().ok_or_else(|| { + Error::implementation_specific("client proof is required in connOpenTry".into()) + })?; + + let consensus_proof = msg.proofs.consensus_proof().ok_or_else(|| { + Error::implementation_specific("consensus proof is required in connOpenTry".into()) + })?; + + ctx.validate_self_client(&client_state).map_err(Error::ics02_client)?; + + verify_connection_proof::( + ctx, + msg.proofs.height(), + &conn_end, + &expected_conn, + msg.proofs.height(), + msg.proofs.object_proof(), + )?; + + verify_client_proof::( + ctx, + msg.proofs.height(), + &conn_end, + client_state, + msg.proofs.height(), + client_proof, + )?; + + verify_consensus_proof::(ctx, msg.proofs.height(), &conn_end, &consensus_proof)?; + + output.log("success: connection verification passed"); + + let event_attributes = Attributes { + connection_id: Some(msg.connection_id.clone()), + height: ctx.host_height(), + client_id: conn_end.client_id().clone(), + counterparty_connection_id: conn_end.counterparty().connection_id.clone(), + counterparty_client_id: conn_end.counterparty().client_id().clone(), + }; + + let result = ConnectionResult { + connection_id: msg.connection_id, + connection_id_state: ConnectionIdState::Reused, + connection_end: conn_end, + }; + + output.emit(IbcEvent::OpenAckConnection(event_attributes.into())); + + Ok(output.with_result(result)) } #[cfg(test)] mod tests { - use crate::prelude::*; - - use core::str::FromStr; - use test_log::test; - - use crate::core::ics02_client::context::ClientReader; - use crate::core::ics03_connection::connection::{ConnectionEnd, Counterparty, State}; - use crate::core::ics03_connection::error; - use crate::core::ics03_connection::handler::{dispatch, ConnectionResult}; - use crate::core::ics03_connection::msgs::conn_open_ack::test_util::get_dummy_raw_msg_conn_open_ack; - use crate::core::ics03_connection::msgs::conn_open_ack::MsgConnectionOpenAck; - use crate::core::ics03_connection::msgs::ConnectionMsg; - use crate::core::ics23_commitment::commitment::CommitmentPrefix; - use crate::core::ics24_host::identifier::{ChainId, ClientId}; - use crate::events::IbcEvent; - use crate::mock::context::MockContext; - use crate::mock::host::HostType; - use crate::test_utils::Crypto; - use crate::timestamp::ZERO_DURATION; - - #[test] - fn conn_open_ack_msg_processing() { - struct Test { - name: String, - ctx: MockContext, - msg: ConnectionMsg, - want_pass: bool, - match_error: Box, - } - - let msg_ack = - MsgConnectionOpenAck::try_from(get_dummy_raw_msg_conn_open_ack(10, 10)).unwrap(); - let conn_id = msg_ack.connection_id.clone(); - let counterparty_conn_id = msg_ack.counterparty_connection_id.clone(); - - // Client parameters -- identifier and correct height (matching the proof height) - let client_id = ClientId::from_str("mock_clientid").unwrap(); - let proof_height = msg_ack.proofs.height(); - - // Parametrize the host chain to have a height at least as recent as the - // the height of the proofs in the Ack msg. - let latest_height = proof_height.increment(); - let max_history_size = 5; - let default_context = MockContext::new( - ChainId::new("mockgaia".to_string(), latest_height.revision_number), - HostType::Mock, - max_history_size, - latest_height, - ); - - // A connection end that will exercise the successful path. - let default_conn_end = ConnectionEnd::new( - State::Init, - client_id.clone(), - Counterparty::new( - client_id.clone(), - Some(msg_ack.counterparty_connection_id.clone()), - CommitmentPrefix::try_from(b"ibc".to_vec()).unwrap(), - ), - vec![msg_ack.version.clone()], - ZERO_DURATION, - ); - - // A connection end with incorrect state `Open`; will be part of the context. - let mut conn_end_open = default_conn_end.clone(); - conn_end_open.set_state(State::Open); // incorrect field - - let tests: Vec = vec![ - Test { - name: "Successful processing of an Ack message".to_string(), - ctx: default_context - .clone() - .with_client(&client_id, proof_height) - .with_connection(conn_id.clone(), default_conn_end), - msg: ConnectionMsg::ConnectionOpenAck(Box::new(msg_ack.clone())), - want_pass: true, - match_error: Box::new(|_| panic!("should not have error")), - }, - Test { - name: "Processing fails because the connection does not exist in the context" - .to_string(), - ctx: default_context.clone(), - msg: ConnectionMsg::ConnectionOpenAck(Box::new(msg_ack.clone())), - want_pass: false, - match_error: { - let connection_id = conn_id.clone(); - Box::new(move |e| match e.detail() { - error::ErrorDetail::ConnectionNotFound(e) => { - assert_eq!(e.connection_id, connection_id) - } - _ => { - panic!("Expected ConnectionNotFound error"); - } - }) - }, - }, - Test { - name: "Processing fails due to connections mismatch (incorrect 'open' state)" - .to_string(), - ctx: default_context - .with_client(&client_id, proof_height) - .with_connection(conn_id.clone(), conn_end_open), - msg: ConnectionMsg::ConnectionOpenAck(Box::new(msg_ack)), - want_pass: false, - match_error: { - let connection_id = conn_id; - Box::new(move |e| match e.detail() { - error::ErrorDetail::ConnectionMismatch(e) => { - assert_eq!(e.connection_id, connection_id); - } - _ => { - panic!("Expected ConnectionMismatch error"); - } - }) - }, - }, - /* - Test { - name: "Processing fails due to MissingLocalConsensusState".to_string(), - ctx: MockContext::default() - .with_client(&client_id, proof_height) - .with_connection(conn_id, default_conn_end), - msg: ConnectionMsg::ConnectionOpenAck(Box::new(msg_ack)), - want_pass: false, - error_kind: Some(Kind::MissingLocalConsensusState) - }, - */ - ]; - - for test in tests { - let res = dispatch::<_, Crypto>(&test.ctx, test.msg.clone()); - // Additionally check the events and the output objects in the result. - match res { - Ok(proto_output) => { - assert!( + use crate::prelude::*; + + use core::str::FromStr; + use test_log::test; + + use crate::{ + core::{ + ics02_client::context::ClientReader, + ics03_connection::{ + connection::{ConnectionEnd, Counterparty, State}, + error, + handler::{dispatch, ConnectionResult}, + msgs::{ + conn_open_ack::{ + test_util::get_dummy_raw_msg_conn_open_ack, MsgConnectionOpenAck, + }, + ConnectionMsg, + }, + }, + ics23_commitment::commitment::CommitmentPrefix, + ics24_host::identifier::{ChainId, ClientId}, + }, + events::IbcEvent, + mock::{ + context::{MockClientTypes, MockContext}, + host::MockHostType, + }, + timestamp::ZERO_DURATION, + }; + + #[test] + fn conn_open_ack_msg_processing() { + struct Test { + name: String, + ctx: MockContext, + msg: ConnectionMsg>, + want_pass: bool, + match_error: Box, + } + + let msg_ack = + MsgConnectionOpenAck::try_from(get_dummy_raw_msg_conn_open_ack(10, 10)).unwrap(); + let conn_id = msg_ack.connection_id.clone(); + let counterparty_conn_id = msg_ack.counterparty_connection_id.clone(); + + // Client parameters -- identifier and correct height (matching the proof height) + let client_id = ClientId::from_str("mock_clientid").unwrap(); + let proof_height = msg_ack.proofs.height(); + + // Parametrize the host chain to have a height at least as recent as the + // the height of the proofs in the Ack msg. + let latest_height = proof_height.increment(); + let max_history_size = 5; + let default_context = MockContext::new( + ChainId::new("mockgaia".to_string(), latest_height.revision_number), + MockHostType::Mock, + max_history_size, + latest_height, + ); + + // A connection end that will exercise the successful path. + let default_conn_end = ConnectionEnd::new( + State::Init, + client_id.clone(), + Counterparty::new( + client_id.clone(), + Some(msg_ack.counterparty_connection_id.clone()), + CommitmentPrefix::try_from(b"ibc".to_vec()).unwrap(), + ), + vec![msg_ack.version.clone()], + ZERO_DURATION, + ); + + // A connection end with incorrect state `Open`; will be part of the context. + let mut conn_end_open = default_conn_end.clone(); + conn_end_open.set_state(State::Open); // incorrect field + + let tests: Vec = vec![ + Test { + name: "Successful processing of an Ack message".to_string(), + ctx: default_context + .clone() + .with_client(&client_id, proof_height) + .with_connection(conn_id.clone(), default_conn_end), + msg: ConnectionMsg::ConnectionOpenAck(Box::new(msg_ack.clone())), + want_pass: true, + match_error: Box::new(|_| panic!("should not have error")), + }, + Test { + name: "Processing fails because the connection does not exist in the context" + .to_string(), + ctx: default_context.clone(), + msg: ConnectionMsg::ConnectionOpenAck(Box::new(msg_ack.clone())), + want_pass: false, + match_error: { + let connection_id = conn_id.clone(); + Box::new(move |e| match e.detail() { + error::ErrorDetail::ConnectionNotFound(e) => { + assert_eq!(e.connection_id, connection_id) + }, + _ => { + panic!("Expected ConnectionNotFound error"); + }, + }) + }, + }, + Test { + name: "Processing fails due to connections mismatch (incorrect 'open' state)" + .to_string(), + ctx: default_context + .with_client(&client_id, proof_height) + .with_connection(conn_id.clone(), conn_end_open), + msg: ConnectionMsg::ConnectionOpenAck(Box::new(msg_ack)), + want_pass: false, + match_error: { + let connection_id = conn_id; + Box::new(move |e| match e.detail() { + error::ErrorDetail::ConnectionMismatch(e) => { + assert_eq!(e.connection_id, connection_id); + }, + _ => { + panic!("Expected ConnectionMismatch error"); + }, + }) + }, + }, + /* + Test { + name: "Processing fails due to MissingLocalConsensusState".to_string(), + ctx: MockContext::default() + .with_client(&client_id, proof_height) + .with_connection(conn_id, default_conn_end), + msg: ConnectionMsg::ConnectionOpenAck(Box::new(msg_ack)), + want_pass: false, + error_kind: Some(Kind::MissingLocalConsensusState) + }, + */ + ]; + + for test in tests { + let res = dispatch(&test.ctx, test.msg.clone()); + // Additionally check the events and the output objects in the result. + match res { + Ok(proto_output) => { + assert!( test.want_pass, "conn_open_ack: test passed but was supposed to fail for test: {}, \nparams {:?} {:?}", test.name, @@ -268,37 +285,37 @@ mod tests { test.ctx.clone() ); - assert!(!proto_output.events.is_empty()); // Some events must exist. - - // The object in the output is a ConnectionEnd, should have OPEN state. - let res: ConnectionResult = proto_output.result; - assert_eq!(res.connection_end.state().clone(), State::Open); - - // assert that counterparty connection id is correct - assert_eq!( - res.connection_end.counterparty().connection_id, - Some(counterparty_conn_id.clone()) - ); - - for e in proto_output.events.iter() { - assert!(matches!(e, &IbcEvent::OpenAckConnection(_))); - assert_eq!(e.height(), test.ctx.host_height()); - } - } - Err(e) => { - assert!( - !test.want_pass, - "conn_open_ack: failed for test: {}, \nparams {:?} {:?} error: {:?}", - test.name, - test.msg, - test.ctx.clone(), - e, - ); - - // Verify that the error kind matches - (test.match_error)(e); - } - } - } - } + assert!(!proto_output.events.is_empty()); // Some events must exist. + + // The object in the output is a ConnectionEnd, should have OPEN state. + let res: ConnectionResult = proto_output.result; + assert_eq!(res.connection_end.state().clone(), State::Open); + + // assert that counterparty connection id is correct + assert_eq!( + res.connection_end.counterparty().connection_id, + Some(counterparty_conn_id.clone()) + ); + + for e in proto_output.events.iter() { + assert!(matches!(e, &IbcEvent::OpenAckConnection(_))); + assert_eq!(e.height(), test.ctx.host_height()); + } + }, + Err(e) => { + assert!( + !test.want_pass, + "conn_open_ack: failed for test: {}, \nparams {:?} {:?} error: {:?}", + test.name, + test.msg, + test.ctx.clone(), + e, + ); + + // Verify that the error kind matches + (test.match_error)(e); + }, + } + } + } } diff --git a/modules/src/core/ics03_connection/handler/conn_open_confirm.rs b/modules/src/core/ics03_connection/handler/conn_open_confirm.rs index 3c098334e8..9988bb6b50 100644 --- a/modules/src/core/ics03_connection/handler/conn_open_confirm.rs +++ b/modules/src/core/ics03_connection/handler/conn_open_confirm.rs @@ -1,166 +1,178 @@ //! Protocol logic specific to processing ICS3 messages of type `MsgConnectionOpenConfirm`. -use crate::clients::host_functions::HostFunctionsProvider; -use crate::core::ics03_connection::connection::{ConnectionEnd, Counterparty, State}; -use crate::core::ics03_connection::error::Error; -use crate::core::ics03_connection::events::Attributes; -use crate::core::ics03_connection::handler::verify::verify_connection_proof; -use crate::core::ics03_connection::handler::{ConnectionIdState, ConnectionResult}; -use crate::core::ics03_connection::msgs::conn_open_confirm::MsgConnectionOpenConfirm; -use crate::core::ics26_routing::context::ReaderContext; -use crate::events::IbcEvent; -use crate::handler::{HandlerOutput, HandlerResult}; -use crate::prelude::*; - -pub(crate) fn process( - ctx: &dyn ReaderContext, - msg: MsgConnectionOpenConfirm, +use crate::{ + core::{ + ics03_connection::{ + connection::{ConnectionEnd, Counterparty, State}, + error::Error, + events::Attributes, + handler::{verify::verify_connection_proof, ConnectionIdState, ConnectionResult}, + msgs::conn_open_confirm::MsgConnectionOpenConfirm, + }, + ics26_routing::context::ReaderContext, + }, + events::IbcEvent, + handler::{HandlerOutput, HandlerResult}, + prelude::*, +}; + +pub(crate) fn process( + ctx: &Ctx, + msg: MsgConnectionOpenConfirm, ) -> HandlerResult { - let mut output = HandlerOutput::builder(); - - // Validate the connection end. - let mut conn_end = ctx.connection_end(&msg.connection_id)?; - // A connection end must be in TryOpen state; otherwise return error. - if !conn_end.state_matches(&State::TryOpen) { - // Old connection end is in incorrect state, propagate the error. - return Err(Error::connection_mismatch(msg.connection_id)); - } - - // Verify proofs. Assemble the connection end as we expect to find it on the counterparty. - let expected_conn = ConnectionEnd::new( - State::Open, - conn_end.counterparty().client_id().clone(), - Counterparty::new( - // The counterparty is the local chain. - conn_end.client_id().clone(), // The local client identifier. - Some(msg.connection_id.clone()), // Local connection id. - ctx.commitment_prefix(), // Local commitment prefix. - ), - conn_end.versions().to_vec(), - conn_end.delay_period(), - ); - - // 2. Pass the details to the verification function. - verify_connection_proof::( - ctx, - msg.proofs.height(), - &conn_end, - &expected_conn, - msg.proofs.height(), - msg.proofs.object_proof(), - )?; - - output.log("success: connection verification passed"); - - // Transition our own end of the connection to state OPEN. - conn_end.set_state(State::Open); - - let event_attributes = Attributes { - connection_id: Some(msg.connection_id.clone()), - height: ctx.host_height(), - client_id: conn_end.client_id().clone(), - counterparty_connection_id: conn_end.counterparty().connection_id.clone(), - counterparty_client_id: conn_end.counterparty().client_id().clone(), - }; - - let result = ConnectionResult { - connection_id: msg.connection_id, - connection_id_state: ConnectionIdState::Reused, - connection_end: conn_end, - }; - - output.emit(IbcEvent::OpenConfirmConnection(event_attributes.into())); - - Ok(output.with_result(result)) + let mut output = HandlerOutput::builder(); + + // Validate the connection end. + let mut conn_end = ctx.connection_end(&msg.connection_id)?; + // A connection end must be in TryOpen state; otherwise return error. + if !conn_end.state_matches(&State::TryOpen) { + // Old connection end is in incorrect state, propagate the error. + return Err(Error::connection_mismatch(msg.connection_id)) + } + + // Verify proofs. Assemble the connection end as we expect to find it on the counterparty. + let expected_conn = ConnectionEnd::new( + State::Open, + conn_end.counterparty().client_id().clone(), + Counterparty::new( + // The counterparty is the local chain. + conn_end.client_id().clone(), // The local client identifier. + Some(msg.connection_id.clone()), // Local connection id. + ctx.commitment_prefix(), // Local commitment prefix. + ), + conn_end.versions().to_vec(), + conn_end.delay_period(), + ); + + // 2. Pass the details to the verification function. + verify_connection_proof::( + ctx, + msg.proofs.height(), + &conn_end, + &expected_conn, + msg.proofs.height(), + msg.proofs.object_proof(), + )?; + + output.log("success: connection verification passed"); + + // Transition our own end of the connection to state OPEN. + conn_end.set_state(State::Open); + + let event_attributes = Attributes { + connection_id: Some(msg.connection_id.clone()), + height: ctx.host_height(), + client_id: conn_end.client_id().clone(), + counterparty_connection_id: conn_end.counterparty().connection_id.clone(), + counterparty_client_id: conn_end.counterparty().client_id().clone(), + }; + + let result = ConnectionResult { + connection_id: msg.connection_id, + connection_id_state: ConnectionIdState::Reused, + connection_end: conn_end, + }; + + output.emit(IbcEvent::OpenConfirmConnection(event_attributes.into())); + + Ok(output.with_result(result)) } #[cfg(test)] mod tests { - use crate::prelude::*; - - use core::str::FromStr; - use test_log::test; - - use crate::core::ics02_client::context::ClientReader; - use crate::core::ics03_connection::connection::{ConnectionEnd, Counterparty, State}; - use crate::core::ics03_connection::context::ConnectionReader; - use crate::core::ics03_connection::handler::{dispatch, ConnectionResult}; - use crate::core::ics03_connection::msgs::conn_open_confirm::test_util::get_dummy_raw_msg_conn_open_confirm; - use crate::core::ics03_connection::msgs::conn_open_confirm::MsgConnectionOpenConfirm; - use crate::core::ics03_connection::msgs::ConnectionMsg; - use crate::core::ics23_commitment::commitment::CommitmentPrefix; - use crate::core::ics24_host::identifier::ClientId; - use crate::events::IbcEvent; - use crate::mock::context::MockContext; - use crate::test_utils::Crypto; - use crate::timestamp::ZERO_DURATION; - use crate::Height; - - #[test] - fn conn_open_confirm_msg_processing() { - struct Test { - name: String, - ctx: MockContext, - msg: ConnectionMsg, - want_pass: bool, - } - - let client_id = ClientId::from_str("mock_clientid").unwrap(); - let msg_confirm = - MsgConnectionOpenConfirm::try_from(get_dummy_raw_msg_conn_open_confirm()).unwrap(); - let counterparty = Counterparty::new( - client_id.clone(), - Some(msg_confirm.connection_id.clone()), - CommitmentPrefix::try_from(b"ibc".to_vec()).unwrap(), - ); - - let context = MockContext::default(); - - let incorrect_conn_end_state = ConnectionEnd::new( - State::Init, - client_id.clone(), - counterparty, - context.get_compatible_versions(), - ZERO_DURATION, - ); - - let mut correct_conn_end = incorrect_conn_end_state.clone(); - correct_conn_end.set_state(State::TryOpen); - - let tests: Vec = vec![ - Test { - name: "Processing fails due to missing connection in context".to_string(), - ctx: context.clone(), - msg: ConnectionMsg::ConnectionOpenConfirm(msg_confirm.clone()), - want_pass: false, - }, - Test { - name: "Processing fails due to connections mismatch (incorrect state)".to_string(), - ctx: context - .clone() - .with_client(&client_id, Height::new(0, 10)) - .with_connection(msg_confirm.connection_id.clone(), incorrect_conn_end_state), - msg: ConnectionMsg::ConnectionOpenConfirm(msg_confirm.clone()), - want_pass: false, - }, - Test { - name: "Processing successful".to_string(), - ctx: context - .with_client(&client_id, Height::new(0, 10)) - .with_connection(msg_confirm.connection_id.clone(), correct_conn_end), - msg: ConnectionMsg::ConnectionOpenConfirm(msg_confirm), - want_pass: true, - }, - ] - .into_iter() - .collect(); - - for test in tests { - let res = dispatch::<_, Crypto>(&test.ctx, test.msg.clone()); - // Additionally check the events and the output objects in the result. - match res { - Ok(proto_output) => { - assert!( + use crate::prelude::*; + + use core::str::FromStr; + use test_log::test; + + use crate::{ + core::{ + ics02_client::context::ClientReader, + ics03_connection::{ + connection::{ConnectionEnd, Counterparty, State}, + context::ConnectionReader, + handler::{dispatch, ConnectionResult}, + msgs::{ + conn_open_confirm::{ + test_util::get_dummy_raw_msg_conn_open_confirm, MsgConnectionOpenConfirm, + }, + ConnectionMsg, + }, + }, + ics23_commitment::commitment::CommitmentPrefix, + ics24_host::identifier::ClientId, + }, + events::IbcEvent, + mock::context::{MockClientTypes, MockContext}, + timestamp::ZERO_DURATION, + Height, + }; + + #[test] + fn conn_open_confirm_msg_processing() { + struct Test { + name: String, + ctx: MockContext, + msg: ConnectionMsg>, + want_pass: bool, + } + + let client_id = ClientId::from_str("mock_clientid").unwrap(); + let msg_confirm = + MsgConnectionOpenConfirm::try_from(get_dummy_raw_msg_conn_open_confirm()).unwrap(); + let counterparty = Counterparty::new( + client_id.clone(), + Some(msg_confirm.connection_id.clone()), + CommitmentPrefix::try_from(b"ibc".to_vec()).unwrap(), + ); + + let context = MockContext::default(); + + let incorrect_conn_end_state = ConnectionEnd::new( + State::Init, + client_id.clone(), + counterparty, + context.get_compatible_versions(), + ZERO_DURATION, + ); + + let mut correct_conn_end = incorrect_conn_end_state.clone(); + correct_conn_end.set_state(State::TryOpen); + + let tests: Vec = vec![ + Test { + name: "Processing fails due to missing connection in context".to_string(), + ctx: context.clone(), + msg: ConnectionMsg::ConnectionOpenConfirm(msg_confirm.clone()), + want_pass: false, + }, + Test { + name: "Processing fails due to connections mismatch (incorrect state)".to_string(), + ctx: context + .clone() + .with_client(&client_id, Height::new(0, 10)) + .with_connection(msg_confirm.connection_id.clone(), incorrect_conn_end_state), + msg: ConnectionMsg::ConnectionOpenConfirm(msg_confirm.clone()), + want_pass: false, + }, + Test { + name: "Processing successful".to_string(), + ctx: context + .with_client(&client_id, Height::new(0, 10)) + .with_connection(msg_confirm.connection_id.clone(), correct_conn_end), + msg: ConnectionMsg::ConnectionOpenConfirm(msg_confirm), + want_pass: true, + }, + ] + .into_iter() + .collect(); + + for test in tests { + let res = dispatch(&test.ctx, test.msg.clone()); + // Additionally check the events and the output objects in the result. + match res { + Ok(proto_output) => { + assert!( test.want_pass, "conn_open_confirm: test passed but was supposed to fail for: {}, \nparams {:?} {:?}", test.name, @@ -168,28 +180,28 @@ mod tests { test.ctx.clone() ); - assert!(!proto_output.events.is_empty()); // Some events must exist. - - // The object in the output is a ConnectionEnd, should have OPEN state. - let res: ConnectionResult = proto_output.result; - assert_eq!(res.connection_end.state().clone(), State::Open); - - for e in proto_output.events.iter() { - assert!(matches!(e, &IbcEvent::OpenConfirmConnection(_))); - assert_eq!(e.height(), test.ctx.host_height()); - } - } - Err(e) => { - assert!( - !test.want_pass, - "conn_open_confirm: failed for test: {}, \nparams {:?} {:?} error: {:?}", - test.name, - test.msg, - test.ctx.clone(), - e, - ); - } - } - } - } + assert!(!proto_output.events.is_empty()); // Some events must exist. + + // The object in the output is a ConnectionEnd, should have OPEN state. + let res: ConnectionResult = proto_output.result; + assert_eq!(res.connection_end.state().clone(), State::Open); + + for e in proto_output.events.iter() { + assert!(matches!(e, &IbcEvent::OpenConfirmConnection(_))); + assert_eq!(e.height(), test.ctx.host_height()); + } + }, + Err(e) => { + assert!( + !test.want_pass, + "conn_open_confirm: failed for test: {}, \nparams {:?} {:?} error: {:?}", + test.name, + test.msg, + test.ctx.clone(), + e, + ); + }, + } + } + } } diff --git a/modules/src/core/ics03_connection/handler/conn_open_init.rs b/modules/src/core/ics03_connection/handler/conn_open_init.rs index da647a6ad0..01de05a2c8 100644 --- a/modules/src/core/ics03_connection/handler/conn_open_init.rs +++ b/modules/src/core/ics03_connection/handler/conn_open_init.rs @@ -1,194 +1,201 @@ //! Protocol logic specific to ICS3 messages of type `MsgConnectionOpenInit`. -use crate::core::ics03_connection::connection::{ConnectionEnd, State}; -use crate::core::ics03_connection::error::Error; -use crate::core::ics03_connection::events::Attributes; -use crate::core::ics03_connection::handler::{ConnectionIdState, ConnectionResult}; -use crate::core::ics03_connection::msgs::conn_open_init::MsgConnectionOpenInit; -use crate::core::ics24_host::identifier::ConnectionId; -use crate::core::ics26_routing::context::ReaderContext; -use crate::events::IbcEvent; -use crate::handler::{HandlerOutput, HandlerResult}; -use crate::prelude::*; - -pub(crate) fn process( - ctx: &dyn ReaderContext, - msg: MsgConnectionOpenInit, +use crate::{ + core::{ + ics03_connection::{ + connection::{ConnectionEnd, State}, + error::Error, + events::Attributes, + handler::{ConnectionIdState, ConnectionResult}, + msgs::conn_open_init::MsgConnectionOpenInit, + }, + ics24_host::identifier::ConnectionId, + ics26_routing::context::ReaderContext, + }, + events::IbcEvent, + handler::{HandlerOutput, HandlerResult}, + prelude::*, +}; + +pub(crate) fn process( + ctx: &Ctx, + msg: MsgConnectionOpenInit, ) -> HandlerResult { - let mut output = HandlerOutput::builder(); - - // An IBC client running on the local (host) chain should exist. - ctx.client_state(&msg.client_id) - .map_err(Error::ics02_client)?; - - let versions = match msg.version { - Some(version) => { - if ctx.get_compatible_versions().contains(&version) { - Ok(vec![version]) - } else { - Err(Error::version_not_supported(version)) - } - } - None => Ok(ctx.get_compatible_versions()), - }?; - - let new_connection_end = ConnectionEnd::new( - State::Init, - msg.client_id.clone(), - msg.counterparty.clone(), - versions, - msg.delay_period, - ); - - // Construct the identifier for the new connection. - let id_counter = ctx.connection_counter()?; - let conn_id = ConnectionId::new(id_counter); - - output.log(format!( - "success: generated new connection identifier: {}", - conn_id - )); - - let event_attributes = Attributes { - connection_id: Some(conn_id.clone()), - height: ctx.host_height(), - client_id: new_connection_end.client_id().clone(), - counterparty_connection_id: new_connection_end.counterparty().connection_id.clone(), - counterparty_client_id: new_connection_end.counterparty().client_id().clone(), - }; - - let result = ConnectionResult { - connection_id: conn_id, - connection_id_state: ConnectionIdState::Generated, - connection_end: new_connection_end, - }; - - output.emit(IbcEvent::OpenInitConnection(event_attributes.into())); - - Ok(output.with_result(result)) + let mut output = HandlerOutput::builder(); + + // An IBC client running on the local (host) chain should exist. + ctx.client_state(&msg.client_id).map_err(Error::ics02_client)?; + + let versions = match msg.version { + Some(version) => + if ctx.get_compatible_versions().contains(&version) { + Ok(vec![version]) + } else { + Err(Error::version_not_supported(version)) + }, + None => Ok(ctx.get_compatible_versions()), + }?; + + let new_connection_end = ConnectionEnd::new( + State::Init, + msg.client_id.clone(), + msg.counterparty.clone(), + versions, + msg.delay_period, + ); + + // Construct the identifier for the new connection. + let id_counter = ctx.connection_counter()?; + let conn_id = ConnectionId::new(id_counter); + + output.log(format!("success: generated new connection identifier: {}", conn_id)); + + let event_attributes = Attributes { + connection_id: Some(conn_id.clone()), + height: ctx.host_height(), + client_id: new_connection_end.client_id().clone(), + counterparty_connection_id: new_connection_end.counterparty().connection_id.clone(), + counterparty_client_id: new_connection_end.counterparty().client_id().clone(), + }; + + let result = ConnectionResult { + connection_id: conn_id, + connection_id_state: ConnectionIdState::Generated, + connection_end: new_connection_end, + }; + + output.emit(IbcEvent::OpenInitConnection(event_attributes.into())); + + Ok(output.with_result(result)) } #[cfg(test)] mod tests { - use test_log::test; - - use crate::core::ics02_client::context::ClientReader; - use crate::core::ics03_connection::connection::State; - use crate::core::ics03_connection::context::ConnectionReader; - use crate::core::ics03_connection::handler::{dispatch, ConnectionResult}; - use crate::core::ics03_connection::msgs::conn_open_init::test_util::get_dummy_raw_msg_conn_open_init; - use crate::core::ics03_connection::msgs::conn_open_init::MsgConnectionOpenInit; - use crate::core::ics03_connection::msgs::ConnectionMsg; - use crate::core::ics03_connection::version::Version; - use crate::events::IbcEvent; - use crate::mock::context::MockContext; - use crate::prelude::*; - use crate::test_utils::Crypto; - use crate::Height; - - use ibc_proto::ibc::core::connection::v1::Version as RawVersion; - - #[test] - fn conn_open_init_msg_processing() { - struct Test { - name: String, - ctx: MockContext, - msg: ConnectionMsg, - expected_versions: Vec, - want_pass: bool, - } - - let msg_conn_init_default = - MsgConnectionOpenInit::try_from(get_dummy_raw_msg_conn_open_init()).unwrap(); - let msg_conn_init_no_version = MsgConnectionOpenInit { - version: None, - ..msg_conn_init_default.clone() - }; - let msg_conn_init_bad_version = MsgConnectionOpenInit { - version: Version::try_from(RawVersion { - identifier: "random identifier 424242".to_string(), - features: vec![], - }) - .unwrap() - .into(), - ..msg_conn_init_default.clone() - }; - let default_context = MockContext::default(); - let good_context = default_context - .clone() - .with_client(&msg_conn_init_default.client_id, Height::new(0, 10)); - - let tests: Vec = vec![ - Test { - name: "Processing fails because no client exists in the context".to_string(), - ctx: default_context, - msg: ConnectionMsg::ConnectionOpenInit(msg_conn_init_default.clone()), - expected_versions: vec![msg_conn_init_default.version.clone().unwrap()], - want_pass: false, - }, - Test { - name: "Incompatible version in MsgConnectionOpenInit msg".to_string(), - ctx: good_context.clone(), - msg: ConnectionMsg::ConnectionOpenInit(msg_conn_init_bad_version), - expected_versions: vec![], - want_pass: false, - }, - Test { - name: "No version in MsgConnectionOpenInit msg".to_string(), - ctx: good_context.clone(), - msg: ConnectionMsg::ConnectionOpenInit(msg_conn_init_no_version), - expected_versions: good_context.get_compatible_versions(), - want_pass: true, - }, - Test { - name: "Good parameters".to_string(), - ctx: good_context, - msg: ConnectionMsg::ConnectionOpenInit(msg_conn_init_default.clone()), - expected_versions: vec![msg_conn_init_default.version.unwrap()], - want_pass: true, - }, - ] - .into_iter() - .collect(); - - for test in tests { - let res = dispatch::<_, Crypto>(&test.ctx, test.msg.clone()); - // Additionally check the events and the output objects in the result. - match res { - Ok(proto_output) => { - assert!(!proto_output.events.is_empty()); // Some events must exist. - - // The object in the output is a ConnectionEnd, should have init state. - let res: ConnectionResult = proto_output.result; - assert_eq!(res.connection_end.state().clone(), State::Init); - - for e in proto_output.events.iter() { - assert!(matches!(e, &IbcEvent::OpenInitConnection(_))); - assert_eq!(e.height(), test.ctx.host_height()); - } - - assert_eq!(res.connection_end.versions(), test.expected_versions); - - // This needs to be last - assert!( + use test_log::test; + + use crate::{ + core::{ + ics02_client::context::ClientReader, + ics03_connection::{ + connection::State, + context::ConnectionReader, + handler::{dispatch, ConnectionResult}, + msgs::{ + conn_open_init::{ + test_util::get_dummy_raw_msg_conn_open_init, MsgConnectionOpenInit, + }, + ConnectionMsg, + }, + version::Version, + }, + }, + events::IbcEvent, + mock::context::{MockClientTypes, MockContext}, + prelude::*, + Height, + }; + + use ibc_proto::ibc::core::connection::v1::Version as RawVersion; + + #[test] + fn conn_open_init_msg_processing() { + struct Test { + name: String, + ctx: MockContext, + msg: ConnectionMsg>, + expected_versions: Vec, + want_pass: bool, + } + + let msg_conn_init_default = + MsgConnectionOpenInit::try_from(get_dummy_raw_msg_conn_open_init()).unwrap(); + let msg_conn_init_no_version = + MsgConnectionOpenInit { version: None, ..msg_conn_init_default.clone() }; + let msg_conn_init_bad_version = MsgConnectionOpenInit { + version: Version::try_from(RawVersion { + identifier: "random identifier 424242".to_string(), + features: vec![], + }) + .unwrap() + .into(), + ..msg_conn_init_default.clone() + }; + let default_context = MockContext::default(); + let good_context = default_context + .clone() + .with_client(&msg_conn_init_default.client_id, Height::new(0, 10)); + + let tests: Vec = vec![ + Test { + name: "Processing fails because no client exists in the context".to_string(), + ctx: default_context, + msg: ConnectionMsg::ConnectionOpenInit(msg_conn_init_default.clone()), + expected_versions: vec![msg_conn_init_default.version.clone().unwrap()], + want_pass: false, + }, + Test { + name: "Incompatible version in MsgConnectionOpenInit msg".to_string(), + ctx: good_context.clone(), + msg: ConnectionMsg::ConnectionOpenInit(msg_conn_init_bad_version), + expected_versions: vec![], + want_pass: false, + }, + Test { + name: "No version in MsgConnectionOpenInit msg".to_string(), + ctx: good_context.clone(), + msg: ConnectionMsg::ConnectionOpenInit(msg_conn_init_no_version), + expected_versions: good_context.get_compatible_versions(), + want_pass: true, + }, + Test { + name: "Good parameters".to_string(), + ctx: good_context, + msg: ConnectionMsg::ConnectionOpenInit(msg_conn_init_default.clone()), + expected_versions: vec![msg_conn_init_default.version.unwrap()], + want_pass: true, + }, + ] + .into_iter() + .collect(); + + for test in tests { + let res = dispatch(&test.ctx, test.msg.clone()); + // Additionally check the events and the output objects in the result. + match res { + Ok(proto_output) => { + assert!(!proto_output.events.is_empty()); // Some events must exist. + + // The object in the output is a ConnectionEnd, should have init state. + let res: ConnectionResult = proto_output.result; + assert_eq!(res.connection_end.state().clone(), State::Init); + + for e in proto_output.events.iter() { + assert!(matches!(e, &IbcEvent::OpenInitConnection(_))); + assert_eq!(e.height(), test.ctx.host_height()); + } + + assert_eq!(res.connection_end.versions(), test.expected_versions); + + // This needs to be last + assert!( test.want_pass, "conn_open_init: test passed but was supposed to fail for test: {}, \nparams {:?} {:?}", test.name, test.msg.clone(), test.ctx.clone() ); - } - Err(e) => { - assert!( - !test.want_pass, - "conn_open_init: did not pass test: {}, \nparams {:?} {:?} error: {:?}", - test.name, - test.msg, - test.ctx.clone(), - e, - ); - } - } - } - } + }, + Err(e) => { + assert!( + !test.want_pass, + "conn_open_init: did not pass test: {}, \nparams {:?} {:?} error: {:?}", + test.name, + test.msg, + test.ctx.clone(), + e, + ); + }, + } + } + } } diff --git a/modules/src/core/ics03_connection/handler/conn_open_try.rs b/modules/src/core/ics03_connection/handler/conn_open_try.rs index 422a23b4a1..172d1f80a9 100644 --- a/modules/src/core/ics03_connection/handler/conn_open_try.rs +++ b/modules/src/core/ics03_connection/handler/conn_open_try.rs @@ -1,204 +1,211 @@ //! Protocol logic specific to processing ICS3 messages of type `MsgConnectionOpenTry`. -use crate::clients::host_functions::HostFunctionsProvider; -use crate::core::ics03_connection::connection::{ConnectionEnd, Counterparty, State}; -use crate::core::ics03_connection::error::Error; -use crate::core::ics03_connection::events::Attributes; -use crate::core::ics03_connection::handler::verify::{ - check_client_consensus_height, verify_client_proof, verify_connection_proof, - verify_consensus_proof, +use crate::{ + core::{ + ics03_connection::{ + connection::{ConnectionEnd, Counterparty, State}, + error::Error, + events::Attributes, + handler::{ + verify::{ + check_client_consensus_height, verify_client_proof, verify_connection_proof, + verify_consensus_proof, + }, + ConnectionIdState, ConnectionResult, + }, + msgs::conn_open_try::MsgConnectionOpenTry, + }, + ics24_host::identifier::ConnectionId, + ics26_routing::context::ReaderContext, + }, + events::IbcEvent, + handler::{HandlerOutput, HandlerResult}, + prelude::*, }; -use crate::core::ics03_connection::handler::{ConnectionIdState, ConnectionResult}; -use crate::core::ics03_connection::msgs::conn_open_try::MsgConnectionOpenTry; -use crate::core::ics24_host::identifier::ConnectionId; -use crate::core::ics26_routing::context::ReaderContext; -use crate::events::IbcEvent; -use crate::handler::{HandlerOutput, HandlerResult}; -use crate::prelude::*; - -pub(crate) fn process( - ctx: &dyn ReaderContext, - msg: MsgConnectionOpenTry, + +pub(crate) fn process( + ctx: &Ctx, + msg: MsgConnectionOpenTry, ) -> HandlerResult { - let mut output = HandlerOutput::builder(); - - // Check that consensus height if provided (for client proof) in message is not too advanced nor too old. - if msg.proofs.consensus_proof().is_some() { - check_client_consensus_height(ctx, msg.consensus_height())?; - } - - // Unwrap the old connection end (if any) and its identifier. - let (mut new_connection_end, conn_id) = { - // Build a new connection end as well as an identifier. - let conn_end = ConnectionEnd::new( - State::Init, - msg.client_id.clone(), - msg.counterparty.clone(), - msg.counterparty_versions.clone(), - msg.delay_period, - ); - let id_counter = ctx.connection_counter()?; - let conn_id = ConnectionId::new(id_counter); - - output.log(format!( - "success: new connection end and identifier {} generated", - conn_id - )); - (conn_end, conn_id) - }; - - // Proof verification in two steps: - // 1. Setup: build the ConnectionEnd as we expect to find it on the other party. - let expected_conn = ConnectionEnd::new( - State::Init, - msg.counterparty.client_id().clone(), - Counterparty::new(msg.client_id.clone(), None, ctx.commitment_prefix()), - msg.counterparty_versions.clone(), - msg.delay_period, - ); - - let client_state = msg.client_state.ok_or_else(|| { - Error::implementation_specific("client state is required in connOpenTry".into()) - })?; - - let client_proof = msg.proofs.client_proof().as_ref().ok_or_else(|| { - Error::implementation_specific("client proof is required in connOpenTry".into()) - })?; - - let consensus_proof = msg.proofs.consensus_proof().ok_or_else(|| { - Error::implementation_specific("consensus proof is required in connOpenTry".into()) - })?; - - ctx.validate_self_client(&client_state) - .map_err(Error::ics02_client)?; - - verify_connection_proof::( - ctx, - msg.proofs.height(), - &new_connection_end, - &expected_conn, - msg.proofs.height(), - msg.proofs.object_proof(), - )?; - - verify_client_proof::( - ctx, - msg.proofs.height(), - &new_connection_end, - client_state, - msg.proofs.height(), - client_proof, - )?; - - verify_consensus_proof::( - ctx, - msg.proofs.height(), - &new_connection_end, - &consensus_proof, - )?; - - // Transition the connection end to the new state & pick a version. - new_connection_end.set_state(State::TryOpen); - - // Pick the version. - new_connection_end.set_version(ctx.pick_version( - ctx.get_compatible_versions(), - msg.counterparty_versions.clone(), - )?); - - assert_eq!(new_connection_end.versions().len(), 1); - - output.log("success: connection verification passed"); - - let event_attributes = Attributes { - connection_id: Some(conn_id.clone()), - height: ctx.host_height(), - client_id: new_connection_end.client_id().clone(), - counterparty_connection_id: new_connection_end.counterparty().connection_id.clone(), - counterparty_client_id: new_connection_end.counterparty().client_id().clone(), - }; - - let result = ConnectionResult { - connection_id: conn_id, - connection_id_state: ConnectionIdState::Generated, - connection_end: new_connection_end, - }; - - output.emit(IbcEvent::OpenTryConnection(event_attributes.into())); - - Ok(output.with_result(result)) + let mut output = HandlerOutput::builder(); + + // Check that consensus height if provided (for client proof) in message is not too advanced nor + // too old. + if msg.proofs.consensus_proof().is_some() { + check_client_consensus_height(ctx, msg.consensus_height())?; + } + + // Unwrap the old connection end (if any) and its identifier. + let (mut new_connection_end, conn_id) = { + // Build a new connection end as well as an identifier. + let conn_end = ConnectionEnd::new( + State::Init, + msg.client_id.clone(), + msg.counterparty.clone(), + msg.counterparty_versions.clone(), + msg.delay_period, + ); + let id_counter = ctx.connection_counter()?; + let conn_id = ConnectionId::new(id_counter); + + output.log(format!("success: new connection end and identifier {} generated", conn_id)); + (conn_end, conn_id) + }; + + // Proof verification in two steps: + // 1. Setup: build the ConnectionEnd as we expect to find it on the other party. + let expected_conn = ConnectionEnd::new( + State::Init, + msg.counterparty.client_id().clone(), + Counterparty::new(msg.client_id.clone(), None, ctx.commitment_prefix()), + msg.counterparty_versions.clone(), + msg.delay_period, + ); + + let client_state = msg.client_state.ok_or_else(|| { + Error::implementation_specific("client state is required in connOpenTry".into()) + })?; + + let client_proof = msg.proofs.client_proof().as_ref().ok_or_else(|| { + Error::implementation_specific("client proof is required in connOpenTry".into()) + })?; + + let consensus_proof = msg.proofs.consensus_proof().ok_or_else(|| { + Error::implementation_specific("consensus proof is required in connOpenTry".into()) + })?; + + ctx.validate_self_client(&client_state).map_err(Error::ics02_client)?; + + verify_connection_proof::<_>( + ctx, + msg.proofs.height(), + &new_connection_end, + &expected_conn, + msg.proofs.height(), + msg.proofs.object_proof(), + )?; + + verify_client_proof::<_>( + ctx, + msg.proofs.height(), + &new_connection_end, + client_state, + msg.proofs.height(), + client_proof, + )?; + + verify_consensus_proof::<_>(ctx, msg.proofs.height(), &new_connection_end, &consensus_proof)?; + + // Transition the connection end to the new state & pick a version. + new_connection_end.set_state(State::TryOpen); + + // Pick the version. + new_connection_end.set_version( + ctx.pick_version(ctx.get_compatible_versions(), msg.counterparty_versions.clone())?, + ); + + assert_eq!(new_connection_end.versions().len(), 1); + + output.log("success: connection verification passed"); + + let event_attributes = Attributes { + connection_id: Some(conn_id.clone()), + height: ctx.host_height(), + client_id: new_connection_end.client_id().clone(), + counterparty_connection_id: new_connection_end.counterparty().connection_id.clone(), + counterparty_client_id: new_connection_end.counterparty().client_id().clone(), + }; + + let result = ConnectionResult { + connection_id: conn_id, + connection_id_state: ConnectionIdState::Generated, + connection_end: new_connection_end, + }; + + output.emit(IbcEvent::OpenTryConnection(event_attributes.into())); + + Ok(output.with_result(result)) } #[cfg(test)] mod tests { - use crate::prelude::*; - - use test_log::test; - - use crate::core::ics02_client::context::ClientReader; - use crate::core::ics03_connection::connection::State; - use crate::core::ics03_connection::handler::{dispatch, ConnectionResult}; - use crate::core::ics03_connection::msgs::conn_open_try::test_util::get_dummy_raw_msg_conn_open_try; - use crate::core::ics03_connection::msgs::conn_open_try::MsgConnectionOpenTry; - use crate::core::ics03_connection::msgs::ConnectionMsg; - use crate::core::ics24_host::identifier::ChainId; - use crate::events::IbcEvent; - use crate::mock::context::MockContext; - use crate::mock::host::HostType; - use crate::test_utils::Crypto; - use crate::Height; - - #[test] - fn conn_open_try_msg_processing() { - struct Test { - name: String, - ctx: MockContext, - msg: ConnectionMsg, - want_pass: bool, - } - - let host_chain_height = Height::new(0, 35); - let max_history_size = 5; - let context = MockContext::new( - ChainId::new("mockgaia".to_string(), 0), - HostType::Mock, - max_history_size, - host_chain_height, - ); - let client_consensus_state_height = 10; - - let msg_conn_try = MsgConnectionOpenTry::try_from(get_dummy_raw_msg_conn_open_try( - client_consensus_state_height, - host_chain_height.revision_height, - )) - .unwrap(); - - // The proof targets a height that does not exist (i.e., too advanced) on destination chain. - let msg_height_advanced = MsgConnectionOpenTry::try_from(get_dummy_raw_msg_conn_open_try( - client_consensus_state_height, - host_chain_height.increment().revision_height, - )) - .unwrap(); - let pruned_height = host_chain_height - .sub(max_history_size as u64 + 1) - .unwrap() - .revision_height; - // The consensus proof targets a missing height (pruned) on destination chain. - let msg_height_old = MsgConnectionOpenTry::try_from(get_dummy_raw_msg_conn_open_try( - client_consensus_state_height, - pruned_height, - )) - .unwrap(); - - // The proofs in this message are created at a height which the client on destination chain does not have. - let msg_proof_height_missing = - MsgConnectionOpenTry::try_from(get_dummy_raw_msg_conn_open_try( - client_consensus_state_height - 1, - host_chain_height.revision_height, - )) - .unwrap(); - - let tests: Vec = vec![ + use crate::prelude::*; + + use test_log::test; + + use crate::{ + core::{ + ics02_client::context::ClientReader, + ics03_connection::{ + connection::State, + handler::{dispatch, ConnectionResult}, + msgs::{ + conn_open_try::{ + test_util::get_dummy_raw_msg_conn_open_try, MsgConnectionOpenTry, + }, + ConnectionMsg, + }, + }, + ics24_host::identifier::ChainId, + }, + events::IbcEvent, + mock::{ + context::{MockClientTypes, MockContext}, + host::MockHostType, + }, + Height, + }; + + #[test] + fn conn_open_try_msg_processing() { + struct Test { + name: String, + ctx: MockContext, + msg: ConnectionMsg>, + want_pass: bool, + } + + let host_chain_height = Height::new(0, 35); + let max_history_size = 5; + let context = MockContext::new( + ChainId::new("mockgaia".to_string(), 0), + MockHostType::Mock, + max_history_size, + host_chain_height, + ); + let client_consensus_state_height = 10; + + let msg_conn_try = MsgConnectionOpenTry::try_from(get_dummy_raw_msg_conn_open_try( + client_consensus_state_height, + host_chain_height.revision_height, + )) + .unwrap(); + + // The proof targets a height that does not exist (i.e., too advanced) on destination chain. + let msg_height_advanced = MsgConnectionOpenTry::try_from(get_dummy_raw_msg_conn_open_try( + client_consensus_state_height, + host_chain_height.increment().revision_height, + )) + .unwrap(); + let pruned_height = + host_chain_height.sub(max_history_size as u64 + 1).unwrap().revision_height; + // The consensus proof targets a missing height (pruned) on destination chain. + let msg_height_old = MsgConnectionOpenTry::try_from(get_dummy_raw_msg_conn_open_try( + client_consensus_state_height, + pruned_height, + )) + .unwrap(); + + // The proofs in this message are created at a height which the client on destination chain + // does not have. + let msg_proof_height_missing = + MsgConnectionOpenTry::try_from(get_dummy_raw_msg_conn_open_try( + client_consensus_state_height - 1, + host_chain_height.revision_height, + )) + .unwrap(); + + let tests: Vec = vec![ Test { name: "Processing fails because the height is too advanced".to_string(), ctx: context.clone(), @@ -227,12 +234,12 @@ mod tests { .into_iter() .collect(); - for test in tests { - let res = dispatch::<_, Crypto>(&test.ctx, test.msg.clone()); - // Additionally check the events and the output objects in the result. - match res { - Ok(proto_output) => { - assert!( + for test in tests { + let res = dispatch(&test.ctx, test.msg.clone()); + // Additionally check the events and the output objects in the result. + match res { + Ok(proto_output) => { + assert!( test.want_pass, "conn_open_try: test passed but was supposed to fail for test: {}, \nparams {:?} {:?}", test.name, @@ -240,28 +247,28 @@ mod tests { test.ctx.clone() ); - assert!(!proto_output.events.is_empty()); // Some events must exist. - - // The object in the output is a ConnectionEnd, should have TryOpen state. - let res: ConnectionResult = proto_output.result; - assert_eq!(res.connection_end.state().clone(), State::TryOpen); - - for e in proto_output.events.iter() { - assert!(matches!(e, &IbcEvent::OpenTryConnection(_))); - assert_eq!(e.height(), test.ctx.host_height()); - } - } - Err(e) => { - assert!( - !test.want_pass, - "conn_open_try: failed for test: {}, \nparams {:?} {:?} error: {:?}", - test.name, - test.msg, - test.ctx.clone(), - e, - ); - } - } - } - } + assert!(!proto_output.events.is_empty()); // Some events must exist. + + // The object in the output is a ConnectionEnd, should have TryOpen state. + let res: ConnectionResult = proto_output.result; + assert_eq!(res.connection_end.state().clone(), State::TryOpen); + + for e in proto_output.events.iter() { + assert!(matches!(e, &IbcEvent::OpenTryConnection(_))); + assert_eq!(e.height(), test.ctx.host_height()); + } + }, + Err(e) => { + assert!( + !test.want_pass, + "conn_open_try: failed for test: {}, \nparams {:?} {:?} error: {:?}", + test.name, + test.msg, + test.ctx.clone(), + e, + ); + }, + } + } + } } diff --git a/modules/src/core/ics03_connection/handler/verify.rs b/modules/src/core/ics03_connection/handler/verify.rs index 4a83860e2f..2b96fa5de3 100644 --- a/modules/src/core/ics03_connection/handler/verify.rs +++ b/modules/src/core/ics03_connection/handler/verify.rs @@ -1,80 +1,67 @@ //! ICS3 verification functions, common across all four handlers of ICS3. -use crate::clients::host_functions::HostFunctionsProvider; -use crate::core::ics02_client::client_consensus::ConsensusState; -use crate::core::ics02_client::client_state::{AnyClientState, ClientState}; -#[cfg(feature = "ics11_beefy")] -use crate::core::ics02_client::client_type::ClientType; -use crate::core::ics02_client::{client_def::AnyClient, client_def::ClientDef}; -use crate::core::ics03_connection::connection::ConnectionEnd; -use crate::core::ics03_connection::error::Error; -use crate::core::ics23_commitment::commitment::CommitmentProofBytes; -use crate::core::ics26_routing::context::ReaderContext; -use crate::proofs::ConsensusProof; -use crate::Height; -#[cfg(feature = "ics11_beefy")] -use alloc::format; -#[cfg(feature = "ics11_beefy")] -use codec::{Decode, Encode}; -#[cfg(feature = "ics11_beefy")] -use sp_std::vec::Vec; - -#[cfg(feature = "ics11_beefy")] -/// Connection proof type, used in relayer -#[derive(Encode, Decode)] -pub struct ConnectionProof { - pub host_proof: Vec, - pub connection_proof: Vec, -} + +use crate::core::ics02_client::{ + client_consensus::ConsensusState, client_def::ClientDef, client_state::ClientState, +}; + +use crate::{ + core::{ + ics03_connection::{connection::ConnectionEnd, error::Error}, + ics23_commitment::commitment::CommitmentProofBytes, + ics26_routing::context::ReaderContext, + }, + proofs::ConsensusProof, + Height, +}; +use alloc::{format, vec::Vec}; /// Verifies the authenticity and semantic correctness of a commitment `proof`. The commitment /// claims to prove that an object of type connection exists on the source chain (i.e., the chain /// which created this proof). This object must match the state of `expected_conn`. -pub fn verify_connection_proof( - ctx: &dyn ReaderContext, - height: Height, - connection_end: &ConnectionEnd, - expected_conn: &ConnectionEnd, - proof_height: Height, - proof: &CommitmentProofBytes, +pub fn verify_connection_proof( + ctx: &Ctx, + height: Height, + connection_end: &ConnectionEnd, + expected_conn: &ConnectionEnd, + proof_height: Height, + proof: &CommitmentProofBytes, ) -> Result<(), Error> { - // Fetch the client state (IBC client on the local/host chain). - let client_state = ctx - .client_state(connection_end.client_id()) - .map_err(Error::ics02_client)?; - - // The client must not be frozen. - if client_state.is_frozen() { - return Err(Error::frozen_client(connection_end.client_id().clone())); - } - - // The client must have the consensus state for the height where this proof was created. - let consensus_state = ctx - .consensus_state(connection_end.client_id(), proof_height) - .map_err(|e| Error::consensus_state_verification_failure(proof_height, e))?; - - // A counterparty connection id of None causes `unwrap()` below and indicates an internal - // error as this is the connection id on the counterparty chain that must always be present. - let connection_id = connection_end - .counterparty() - .connection_id() - .ok_or_else(Error::invalid_counterparty)?; - - let client_def = AnyClient::::from_client_type(client_state.client_type()); - - // Verify the proof for the connection state against the expected connection end. - client_def - .verify_connection_state( - ctx, - connection_end.client_id(), - &client_state, - height, - connection_end.counterparty().prefix(), - proof, - consensus_state.root(), - connection_id, - expected_conn, - ) - .map_err(Error::verify_connection_state) + // Fetch the client state (IBC client on the local/host chain). + let client_state = ctx.client_state(connection_end.client_id()).map_err(Error::ics02_client)?; + + // The client must not be frozen. + if client_state.is_frozen() { + return Err(Error::frozen_client(connection_end.client_id().clone())) + } + + // The client must have the consensus state for the height where this proof was created. + let consensus_state = ctx + .consensus_state(connection_end.client_id(), proof_height) + .map_err(|e| Error::consensus_state_verification_failure(proof_height, e))?; + + // A counterparty connection id of None causes `unwrap()` below and indicates an internal + // error as this is the connection id on the counterparty chain that must always be present. + let connection_id = connection_end + .counterparty() + .connection_id() + .ok_or_else(Error::invalid_counterparty)?; + + let client_def = client_state.client_def(); + + // Verify the proof for the connection state against the expected connection end. + client_def + .verify_connection_state( + ctx, + connection_end.client_id(), + &client_state, + height, + connection_end.counterparty().prefix(), + proof, + consensus_state.root(), + connection_id, + expected_conn, + ) + .map_err(Error::verify_connection_state) } /// Verifies the client `proof` from a connection handshake message, typically from a @@ -84,131 +71,129 @@ pub fn verify_connection_proof( /// complete verification: that the client state the counterparty stores is valid (i.e., not frozen, /// at the same revision as the current chain, with matching chain identifiers, etc) and that the /// `proof` is correct. -pub fn verify_client_proof( - ctx: &dyn ReaderContext, - height: Height, - connection_end: &ConnectionEnd, - expected_client_state: AnyClientState, - proof_height: Height, - proof: &CommitmentProofBytes, +pub fn verify_client_proof( + ctx: &Ctx, + height: Height, + connection_end: &ConnectionEnd, + expected_client_state: Ctx::AnyClientState, + proof_height: Height, + proof: &CommitmentProofBytes, ) -> Result<(), Error> { - // Fetch the local client state (IBC client running on the host chain). - let client_state = ctx - .client_state(connection_end.client_id()) - .map_err(Error::ics02_client)?; - - if client_state.is_frozen() { - return Err(Error::frozen_client(connection_end.client_id().clone())); - } - - let consensus_state = ctx - .consensus_state(connection_end.client_id(), proof_height) - .map_err(|e| Error::consensus_state_verification_failure(proof_height, e))?; - - let client_def = AnyClient::::from_client_type(client_state.client_type()); - - client_def - .verify_client_full_state( - ctx, - &client_state, - height, - connection_end.counterparty().prefix(), - proof, - consensus_state.root(), - connection_end.counterparty().client_id(), - &expected_client_state, - ) - .map_err(|e| { - Error::client_state_verification_failure(connection_end.client_id().clone(), e) - }) + // Fetch the local client state (IBC client running on the host chain). + let client_state = ctx.client_state(connection_end.client_id()).map_err(Error::ics02_client)?; + + if client_state.is_frozen() { + return Err(Error::frozen_client(connection_end.client_id().clone())) + } + + let consensus_state = ctx + .consensus_state(connection_end.client_id(), proof_height) + .map_err(|e| Error::consensus_state_verification_failure(proof_height, e))?; + + let client_def = client_state.client_def(); + + client_def + .verify_client_full_state( + ctx, + &client_state, + height, + connection_end.counterparty().prefix(), + proof, + consensus_state.root(), + connection_end.counterparty().client_id(), + &expected_client_state, + ) + .map_err(|e| { + Error::client_state_verification_failure(connection_end.client_id().clone(), e) + }) } -pub fn verify_consensus_proof( - ctx: &dyn ReaderContext, - height: Height, - connection_end: &ConnectionEnd, - proof: &ConsensusProof, +pub fn verify_consensus_proof( + ctx: &Ctx, + height: Height, + connection_end: &ConnectionEnd, + proof: &ConsensusProof, ) -> Result<(), Error> { - // Fetch the client state (IBC client on the local chain). - let client_state = ctx - .client_state(connection_end.client_id()) - .map_err(Error::ics02_client)?; - - if client_state.is_frozen() { - return Err(Error::frozen_client(connection_end.client_id().clone())); - } - - let consensus_state = ctx - .consensus_state(connection_end.client_id(), height) - .map_err(|e| Error::consensus_state_verification_failure(height, e))?; - - let client = AnyClient::::from_client_type(client_state.client_type()); - - let (consensus_proof, expected_consensus) = match ctx.host_client_type() { - #[cfg(feature = "ics11_beefy")] - ClientType::Beefy => { - // if the host is beefy or near, we need to decode the proof before passing it on. - let connection_proof: ConnectionProof = - codec::Decode::decode(&mut proof.proof().as_bytes()).map_err(|e| { - Error::implementation_specific(format!("failed to decode: {:?}", e)) - })?; - // Fetch the expected consensus state from the historical (local) header data. - let expected_consensus = ctx - .host_consensus_state(proof.height(), Some(connection_proof.host_proof)) - .map_err(|e| Error::consensus_state_verification_failure(proof.height(), e))?; - ( - CommitmentProofBytes::try_from(connection_proof.connection_proof).map_err(|e| { - Error::implementation_specific(format!("empty proof bytes: {:?}", e)) - })?, - expected_consensus, - ) - } - _ => ( - proof.proof().clone(), - ctx.host_consensus_state(proof.height(), None) - .map_err(|e| Error::consensus_state_verification_failure(proof.height(), e))?, - ), - }; - - client - .verify_client_consensus_state( - ctx, - &client_state, - height, - connection_end.counterparty().prefix(), - &consensus_proof, - consensus_state.root(), - connection_end.counterparty().client_id(), - proof.height(), - &expected_consensus, - ) - .map_err(|e| Error::consensus_state_verification_failure(proof.height(), e))?; - - Ok(()) + // Fetch the client state (IBC client on the local chain). + let client_state = ctx.client_state(connection_end.client_id()).map_err(Error::ics02_client)?; + + if client_state.is_frozen() { + return Err(Error::frozen_client(connection_end.client_id().clone())) + } + + let consensus_state = ctx + .consensus_state(connection_end.client_id(), height) + .map_err(|e| Error::consensus_state_verification_failure(height, e))?; + + let client = client_state.client_def(); + + // todo: we can remove this hack, once this is merged https://github.com/cosmos/ibc/pull/839 + let (consensus_proof, expected_consensus) = match ctx.host_client_type() { + client_type if client_type.contains("beefy") || client_type.contains("near") => { + #[derive(codec::Decode)] + struct ConsensusProofwithHostConsensusStateProof { + host_consensus_state_proof: Vec, + consensus_proof: Vec, + } + // if the host is beefy or near, we need to decode the proof before passing it on. + let connection_proof: ConsensusProofwithHostConsensusStateProof = + codec::Decode::decode(&mut proof.proof().as_bytes()).map_err(|e| { + Error::implementation_specific(format!("failed to decode: {:?}", e)) + })?; + // Fetch the expected consensus state from the historical (local) header data. + let expected_consensus = ctx + .host_consensus_state( + proof.height(), + Some(connection_proof.host_consensus_state_proof), + ) + .map_err(|e| Error::consensus_state_verification_failure(proof.height(), e))?; + ( + CommitmentProofBytes::try_from(connection_proof.consensus_proof).map_err(|e| { + Error::implementation_specific(format!("empty proof bytes: {:?}", e)) + })?, + expected_consensus, + ) + }, + _ => ( + proof.proof().clone(), + ctx.host_consensus_state(proof.height(), None) + .map_err(|e| Error::consensus_state_verification_failure(proof.height(), e))?, + ), + }; + + client + .verify_client_consensus_state( + ctx, + &client_state, + height, + connection_end.counterparty().prefix(), + &consensus_proof, + consensus_state.root(), + connection_end.counterparty().client_id(), + proof.height(), + &expected_consensus, + ) + .map_err(|e| Error::consensus_state_verification_failure(proof.height(), e))?; + + Ok(()) } /// Checks that `claimed_height` is within normal bounds, i.e., fresh enough so that the chain has /// not pruned it yet, but not newer than the current (actual) height of the local chain. -pub fn check_client_consensus_height( - ctx: &dyn ReaderContext, - claimed_height: Height, +pub fn check_client_consensus_height( + ctx: &Ctx, + claimed_height: Height, ) -> Result<(), Error> { - if claimed_height > ctx.host_height() { - // Fail if the consensus height is too advanced. - return Err(Error::invalid_consensus_height( - claimed_height, - ctx.host_height(), - )); - } - - if claimed_height < ctx.host_oldest_height() { - // Fail if the consensus height is too old (has been pruned). - return Err(Error::stale_consensus_height( - claimed_height, - ctx.host_oldest_height(), - )); - } - - // Height check is within normal bounds, check passes. - Ok(()) + if claimed_height > ctx.host_height() { + // Fail if the consensus height is too advanced. + return Err(Error::invalid_consensus_height(claimed_height, ctx.host_height())) + } + + if claimed_height < ctx.host_oldest_height() { + // Fail if the consensus height is too old (has been pruned). + return Err(Error::stale_consensus_height(claimed_height, ctx.host_oldest_height())) + } + + // Height check is within normal bounds, check passes. + Ok(()) } diff --git a/modules/src/core/ics03_connection/msgs.rs b/modules/src/core/ics03_connection/msgs.rs index ab2cbec661..5d0fd280a2 100644 --- a/modules/src/core/ics03_connection/msgs.rs +++ b/modules/src/core/ics03_connection/msgs.rs @@ -12,11 +12,15 @@ //! Another difference to ICS3 specs is that each message comprises an additional field called //! `signer` which is specific to Cosmos-SDK. -use crate::core::ics03_connection::msgs::conn_open_ack::MsgConnectionOpenAck; -use crate::core::ics03_connection::msgs::conn_open_confirm::MsgConnectionOpenConfirm; -use crate::core::ics03_connection::msgs::conn_open_init::MsgConnectionOpenInit; -use crate::core::ics03_connection::msgs::conn_open_try::MsgConnectionOpenTry; +use crate::core::{ + ics02_client::context::ClientKeeper, + ics03_connection::msgs::{ + conn_open_ack::MsgConnectionOpenAck, conn_open_confirm::MsgConnectionOpenConfirm, + conn_open_init::MsgConnectionOpenInit, conn_open_try::MsgConnectionOpenTry, + }, +}; use alloc::boxed::Box; +use core::fmt::Debug; pub mod conn_open_ack; pub mod conn_open_confirm; @@ -25,28 +29,32 @@ pub mod conn_open_try; /// Enumeration of all possible messages that the ICS3 protocol processes. #[derive(Clone, Debug, PartialEq, Eq)] -pub enum ConnectionMsg { - ConnectionOpenInit(MsgConnectionOpenInit), - ConnectionOpenTry(Box), - ConnectionOpenAck(Box), - ConnectionOpenConfirm(MsgConnectionOpenConfirm), +pub enum ConnectionMsg +where + C: ClientKeeper + Clone + Debug + PartialEq + Eq, +{ + ConnectionOpenInit(MsgConnectionOpenInit), + ConnectionOpenTry(Box>), + ConnectionOpenAck(Box>), + ConnectionOpenConfirm(MsgConnectionOpenConfirm), } #[cfg(test)] pub mod test_util { - use crate::core::ics24_host::identifier::{ClientId, ConnectionId}; - use crate::prelude::*; - use ibc_proto::ibc::core::commitment::v1::MerklePrefix; - use ibc_proto::ibc::core::connection::v1::Counterparty as RawCounterparty; + use crate::{ + core::ics24_host::identifier::{ClientId, ConnectionId}, + prelude::*, + }; + use ibc_proto::ibc::core::{ + commitment::v1::MerklePrefix, connection::v1::Counterparty as RawCounterparty, + }; - pub fn get_dummy_raw_counterparty() -> RawCounterparty { - RawCounterparty { - client_id: ClientId::default().to_string(), - connection_id: ConnectionId::default().to_string(), - prefix: Some(MerklePrefix { - key_prefix: b"ibc".to_vec(), - }), - } - } + pub fn get_dummy_raw_counterparty() -> RawCounterparty { + RawCounterparty { + client_id: ClientId::default().to_string(), + connection_id: ConnectionId::default().to_string(), + prefix: Some(MerklePrefix { key_prefix: b"ibc".to_vec() }), + } + } } diff --git a/modules/src/core/ics03_connection/msgs/conn_open_ack.rs b/modules/src/core/ics03_connection/msgs/conn_open_ack.rs index 64d4bcbe5d..da43b36839 100644 --- a/modules/src/core/ics03_connection/msgs/conn_open_ack.rs +++ b/modules/src/core/ics03_connection/msgs/conn_open_ack.rs @@ -1,273 +1,283 @@ use crate::prelude::*; +use core::fmt::Display; -use ibc_proto::ibc::core::connection::v1::MsgConnectionOpenAck as RawMsgConnectionOpenAck; +use crate::core::ics02_client; +use ibc_proto::{ + google::protobuf::Any, + ibc::core::connection::{v1, v1::MsgConnectionOpenAck as RawMsgConnectionOpenAck}, +}; use tendermint_proto::Protobuf; -use crate::core::ics02_client::client_state::AnyClientState; -use crate::core::ics03_connection::error::Error; -use crate::core::ics03_connection::version::Version; -use crate::core::ics23_commitment::commitment::CommitmentProofBytes; -use crate::core::ics24_host::identifier::ConnectionId; -use crate::proofs::{ConsensusProof, Proofs}; -use crate::signer::Signer; -use crate::tx_msg::Msg; -use crate::Height; +use crate::{ + core::{ + ics02_client::context::ClientKeeper, + ics03_connection::{error::Error, version::Version}, + ics23_commitment::commitment::CommitmentProofBytes, + ics24_host::identifier::ConnectionId, + }, + proofs::{ConsensusProof, Proofs}, + signer::Signer, + tx_msg::Msg, + Height, +}; pub const TYPE_URL: &str = "/ibc.core.connection.v1.MsgConnectionOpenAck"; /// Message definition `MsgConnectionOpenAck` (i.e., `ConnOpenAck` datagram). #[derive(Clone, Debug, PartialEq, Eq)] -pub struct MsgConnectionOpenAck { - pub connection_id: ConnectionId, - pub counterparty_connection_id: ConnectionId, - pub client_state: Option, - pub proofs: Proofs, - pub version: Version, - pub signer: Signer, +pub struct MsgConnectionOpenAck { + pub connection_id: ConnectionId, + pub counterparty_connection_id: ConnectionId, + pub client_state: Option, + pub proofs: Proofs, + pub version: Version, + pub signer: Signer, } -impl MsgConnectionOpenAck { - /// Getter for accessing the `consensus_height` field from this message. Returns the special - /// value `Height(0)` if this field is not set. - pub fn consensus_height(&self) -> Height { - match self.proofs.consensus_proof() { - None => Height::zero(), - Some(p) => p.height(), - } - } +impl MsgConnectionOpenAck { + /// Getter for accessing the `consensus_height` field from this message. Returns the special + /// value `Height(0)` if this field is not set. + pub fn consensus_height(&self) -> Height { + match self.proofs.consensus_proof() { + None => Height::zero(), + Some(p) => p.height(), + } + } } -impl Msg for MsgConnectionOpenAck { - type ValidationError = Error; - type Raw = RawMsgConnectionOpenAck; +impl Msg for MsgConnectionOpenAck +where + C: ClientKeeper + Clone, + Any: From, +{ + type ValidationError = Error; + type Raw = RawMsgConnectionOpenAck; - fn route(&self) -> String { - crate::keys::ROUTER_KEY.to_string() - } + fn route(&self) -> String { + crate::keys::ROUTER_KEY.to_string() + } - fn type_url(&self) -> String { - TYPE_URL.to_string() - } + fn type_url(&self) -> String { + TYPE_URL.to_string() + } } -impl Protobuf for MsgConnectionOpenAck {} +impl Protobuf for MsgConnectionOpenAck +where + C: ClientKeeper + Clone, + Any: From, + MsgConnectionOpenAck: TryFrom, + as TryFrom>::Error: Display, +{ +} -impl TryFrom for MsgConnectionOpenAck { - type Error = Error; +impl TryFrom for MsgConnectionOpenAck +where + C: ClientKeeper, + C::AnyClientState: TryFrom, +{ + type Error = Error; - fn try_from(msg: RawMsgConnectionOpenAck) -> Result { - let consensus_proof_obj = { - let proof_bytes: Option = msg.proof_consensus.try_into().ok(); - let consensus_height = msg - .consensus_height - .map(|height| Height::new(height.revision_number, height.revision_height)); - if proof_bytes.is_some() && consensus_height.is_some() { - Some( - ConsensusProof::new(proof_bytes.unwrap(), consensus_height.unwrap()) - .map_err(Error::invalid_proof)?, - ) - } else { - None - } - }; + fn try_from(msg: RawMsgConnectionOpenAck) -> Result { + let consensus_proof_obj = { + let proof_bytes: Option = msg.proof_consensus.try_into().ok(); + let consensus_height = msg + .consensus_height + .map(|height| Height::new(height.revision_number, height.revision_height)); + if proof_bytes.is_some() && consensus_height.is_some() { + Some( + ConsensusProof::new(proof_bytes.unwrap(), consensus_height.unwrap()) + .map_err(Error::invalid_proof)?, + ) + } else { + None + } + }; - let proof_height = msg - .proof_height - .ok_or_else(Error::missing_proof_height)? - .into(); + let proof_height = msg.proof_height.ok_or_else(Error::missing_proof_height)?.into(); - let client_proof = - CommitmentProofBytes::try_from(msg.proof_client).map_err(Error::invalid_proof)?; + let client_proof = + CommitmentProofBytes::try_from(msg.proof_client).map_err(Error::invalid_proof)?; - Ok(Self { - connection_id: msg - .connection_id - .parse() - .map_err(Error::invalid_identifier)?, - counterparty_connection_id: msg - .counterparty_connection_id - .parse() - .map_err(Error::invalid_identifier)?, - client_state: msg - .client_state - .map(AnyClientState::try_from) - .transpose() - .map_err(Error::ics02_client)?, - version: msg.version.ok_or_else(Error::empty_versions)?.try_into()?, - proofs: Proofs::new( - msg.proof_try.try_into().map_err(Error::invalid_proof)?, - Some(client_proof), - consensus_proof_obj, - None, - proof_height, - ) - .map_err(Error::invalid_proof)?, - signer: msg.signer.parse().map_err(Error::signer)?, - }) - } + Ok(Self { + connection_id: msg.connection_id.parse().map_err(Error::invalid_identifier)?, + counterparty_connection_id: msg + .counterparty_connection_id + .parse() + .map_err(Error::invalid_identifier)?, + client_state: msg + .client_state + .map(C::AnyClientState::try_from) + .transpose() + .map_err(Error::ics02_client)?, + version: msg.version.ok_or_else(Error::empty_versions)?.try_into()?, + proofs: Proofs::new( + msg.proof_try.try_into().map_err(Error::invalid_proof)?, + Some(client_proof), + consensus_proof_obj, + None, + proof_height, + ) + .map_err(Error::invalid_proof)?, + signer: msg.signer.parse().map_err(Error::signer)?, + }) + } } -impl From for RawMsgConnectionOpenAck { - fn from(ics_msg: MsgConnectionOpenAck) -> Self { - RawMsgConnectionOpenAck { - connection_id: ics_msg.connection_id.as_str().to_string(), - counterparty_connection_id: ics_msg.counterparty_connection_id.as_str().to_string(), - client_state: ics_msg - .client_state - .map_or_else(|| None, |v| Some(v.into())), - proof_height: Some(ics_msg.proofs.height().into()), - proof_try: ics_msg.proofs.object_proof().clone().into(), - proof_client: ics_msg - .proofs - .client_proof() - .clone() - .map_or_else(Vec::new, |v| v.into()), - proof_consensus: ics_msg - .proofs - .consensus_proof() - .map_or_else(Vec::new, |v| v.proof().clone().into()), - consensus_height: ics_msg - .proofs - .consensus_proof() - .map_or_else(|| None, |h| Some(h.height().into())), - version: Some(ics_msg.version.into()), - signer: ics_msg.signer.to_string(), - } - } +impl From> for RawMsgConnectionOpenAck +where + C: ClientKeeper, + Any: From, +{ + fn from(ics_msg: MsgConnectionOpenAck) -> Self { + RawMsgConnectionOpenAck { + connection_id: ics_msg.connection_id.as_str().to_string(), + counterparty_connection_id: ics_msg.counterparty_connection_id.as_str().to_string(), + client_state: ics_msg.client_state.map_or_else(|| None, |v| Some(v.into())), + proof_height: Some(ics_msg.proofs.height().into()), + proof_try: ics_msg.proofs.object_proof().clone().into(), + proof_client: ics_msg.proofs.client_proof().clone().map_or_else(Vec::new, |v| v.into()), + proof_consensus: ics_msg + .proofs + .consensus_proof() + .map_or_else(Vec::new, |v| v.proof().clone().into()), + consensus_height: ics_msg + .proofs + .consensus_proof() + .map_or_else(|| None, |h| Some(h.height().into())), + version: Some(ics_msg.version.into()), + signer: ics_msg.signer.to_string(), + } + } } #[cfg(test)] pub mod test_util { - use crate::core::ics02_client::client_state::AnyClientState; - use crate::prelude::*; - use ibc_proto::ibc::core::client::v1::Height; - use ibc_proto::ibc::core::connection::v1::MsgConnectionOpenAck as RawMsgConnectionOpenAck; + use crate::{mock::client_state::AnyClientState, prelude::*}; + use ibc_proto::ibc::core::{ + client::v1::Height, connection::v1::MsgConnectionOpenAck as RawMsgConnectionOpenAck, + }; - use crate::core::ics03_connection::version::Version; - use crate::core::ics24_host::identifier::ConnectionId; - use crate::mock::client_state::MockClientState; - use crate::mock::header::MockHeader; - use crate::test_utils::{get_dummy_bech32_account, get_dummy_proof}; + use crate::{ + core::{ics03_connection::version::Version, ics24_host::identifier::ConnectionId}, + mock::{client_state::MockClientState, header::MockHeader}, + test_utils::{get_dummy_bech32_account, get_dummy_proof}, + }; - pub fn get_dummy_raw_msg_conn_open_ack( - proof_height: u64, - consensus_height: u64, - ) -> RawMsgConnectionOpenAck { - RawMsgConnectionOpenAck { - connection_id: ConnectionId::new(0).to_string(), - counterparty_connection_id: ConnectionId::new(1).to_string(), - proof_try: get_dummy_proof(), - proof_height: Some(Height { - revision_number: 0, - revision_height: proof_height, - }), - proof_consensus: get_dummy_proof(), - consensus_height: Some(Height { - revision_number: 0, - revision_height: consensus_height, - }), - client_state: Some( - AnyClientState::Mock(MockClientState::new(MockHeader::default())).into(), - ), - proof_client: get_dummy_proof(), - version: Some(Version::default().into()), - signer: get_dummy_bech32_account(), - } - } + pub fn get_dummy_raw_msg_conn_open_ack( + proof_height: u64, + consensus_height: u64, + ) -> RawMsgConnectionOpenAck { + RawMsgConnectionOpenAck { + connection_id: ConnectionId::new(0).to_string(), + counterparty_connection_id: ConnectionId::new(1).to_string(), + proof_try: get_dummy_proof(), + proof_height: Some(Height { revision_number: 0, revision_height: proof_height }), + proof_consensus: get_dummy_proof(), + consensus_height: Some(Height { + revision_number: 0, + revision_height: consensus_height, + }), + client_state: Some( + AnyClientState::Mock(MockClientState::new(MockHeader::default())).into(), + ), + proof_client: get_dummy_proof(), + version: Some(Version::default().into()), + signer: get_dummy_bech32_account(), + } + } } #[cfg(test)] mod tests { - use crate::prelude::*; + use crate::prelude::*; - use test_log::test; + use test_log::test; - use ibc_proto::ibc::core::client::v1::Height; - use ibc_proto::ibc::core::connection::v1::MsgConnectionOpenAck as RawMsgConnectionOpenAck; + use ibc_proto::ibc::core::{ + client::v1::Height, connection::v1::MsgConnectionOpenAck as RawMsgConnectionOpenAck, + }; - use crate::core::ics03_connection::msgs::conn_open_ack::test_util::get_dummy_raw_msg_conn_open_ack; - use crate::core::ics03_connection::msgs::conn_open_ack::MsgConnectionOpenAck; + use crate::{ + core::ics03_connection::msgs::conn_open_ack::{ + test_util::get_dummy_raw_msg_conn_open_ack, MsgConnectionOpenAck, + }, + mock::context::{MockClientTypes, MockContext}, + }; - #[test] - fn parse_connection_open_ack_msg() { - #[derive(Clone, Debug, PartialEq)] - struct Test { - name: String, - raw: RawMsgConnectionOpenAck, - want_pass: bool, - } + #[test] + fn parse_connection_open_ack_msg() { + #[derive(Clone, Debug, PartialEq)] + struct Test { + name: String, + raw: RawMsgConnectionOpenAck, + want_pass: bool, + } - let default_ack_msg = get_dummy_raw_msg_conn_open_ack(5, 5); + let default_ack_msg = get_dummy_raw_msg_conn_open_ack(5, 5); - let tests: Vec = vec![ - Test { - name: "Good parameters".to_string(), - raw: default_ack_msg.clone(), - want_pass: true, - }, - Test { - name: "Bad connection id, non-alpha".to_string(), - raw: RawMsgConnectionOpenAck { - connection_id: "con007".to_string(), - ..default_ack_msg.clone() - }, - want_pass: false, - }, - Test { - name: "Bad version, missing version".to_string(), - raw: RawMsgConnectionOpenAck { - version: None, - ..default_ack_msg.clone() - }, - want_pass: false, - }, - Test { - name: "Bad proof height, height is 0".to_string(), - raw: RawMsgConnectionOpenAck { - proof_height: Some(Height { - revision_number: 1, - revision_height: 0, - }), - ..default_ack_msg.clone() - }, - want_pass: false, - }, - Test { - name: "Bad consensus height, height is 0".to_string(), - raw: RawMsgConnectionOpenAck { - consensus_height: Some(Height { - revision_number: 1, - revision_height: 0, - }), - ..default_ack_msg - }, - want_pass: false, - }, - ] - .into_iter() - .collect(); + let tests: Vec = vec![ + Test { + name: "Good parameters".to_string(), + raw: default_ack_msg.clone(), + want_pass: true, + }, + Test { + name: "Bad connection id, non-alpha".to_string(), + raw: RawMsgConnectionOpenAck { + connection_id: "con007".to_string(), + ..default_ack_msg.clone() + }, + want_pass: false, + }, + Test { + name: "Bad version, missing version".to_string(), + raw: RawMsgConnectionOpenAck { version: None, ..default_ack_msg.clone() }, + want_pass: false, + }, + Test { + name: "Bad proof height, height is 0".to_string(), + raw: RawMsgConnectionOpenAck { + proof_height: Some(Height { revision_number: 1, revision_height: 0 }), + ..default_ack_msg.clone() + }, + want_pass: false, + }, + Test { + name: "Bad consensus height, height is 0".to_string(), + raw: RawMsgConnectionOpenAck { + consensus_height: Some(Height { revision_number: 1, revision_height: 0 }), + ..default_ack_msg + }, + want_pass: false, + }, + ] + .into_iter() + .collect(); - for test in tests { - let msg = MsgConnectionOpenAck::try_from(test.raw.clone()); + for test in tests { + let msg = + MsgConnectionOpenAck::>::try_from(test.raw.clone()); - assert_eq!( - test.want_pass, - msg.is_ok(), - "MsgConnOpenAck::new failed for test {}, \nmsg {:?} with error {:?}", - test.name, - test.raw, - msg.err(), - ); - } - } + assert_eq!( + test.want_pass, + msg.is_ok(), + "MsgConnOpenAck::new failed for test {}, \nmsg {:?} with error {:?}", + test.name, + test.raw, + msg.err(), + ); + } + } - #[test] - fn to_and_from() { - let raw = get_dummy_raw_msg_conn_open_ack(5, 6); - let msg = MsgConnectionOpenAck::try_from(raw.clone()).unwrap(); - let raw_back = RawMsgConnectionOpenAck::from(msg.clone()); - let msg_back = MsgConnectionOpenAck::try_from(raw_back.clone()).unwrap(); - assert_eq!(raw, raw_back); - assert_eq!(msg, msg_back); - } + #[test] + fn to_and_from() { + let raw = get_dummy_raw_msg_conn_open_ack(5, 6); + let msg = + MsgConnectionOpenAck::>::try_from(raw.clone()).unwrap(); + let raw_back = RawMsgConnectionOpenAck::from(msg.clone()); + let msg_back = MsgConnectionOpenAck::try_from(raw_back.clone()).unwrap(); + assert_eq!(raw, raw_back); + assert_eq!(msg, msg_back); + } } diff --git a/modules/src/core/ics03_connection/msgs/conn_open_confirm.rs b/modules/src/core/ics03_connection/msgs/conn_open_confirm.rs index 8a15be9860..f5c31396f1 100644 --- a/modules/src/core/ics03_connection/msgs/conn_open_confirm.rs +++ b/modules/src/core/ics03_connection/msgs/conn_open_confirm.rs @@ -4,170 +4,161 @@ use tendermint_proto::Protobuf; use ibc_proto::ibc::core::connection::v1::MsgConnectionOpenConfirm as RawMsgConnectionOpenConfirm; -use crate::core::ics03_connection::error::Error; -use crate::core::ics24_host::identifier::ConnectionId; -use crate::proofs::Proofs; -use crate::signer::Signer; -use crate::tx_msg::Msg; +use crate::{ + core::{ics03_connection::error::Error, ics24_host::identifier::ConnectionId}, + proofs::Proofs, + signer::Signer, + tx_msg::Msg, +}; pub const TYPE_URL: &str = "/ibc.core.connection.v1.MsgConnectionOpenConfirm"; /// /// Message definition for `MsgConnectionOpenConfirm` (i.e., `ConnOpenConfirm` datagram). -/// #[derive(Clone, Debug, PartialEq, Eq)] pub struct MsgConnectionOpenConfirm { - pub connection_id: ConnectionId, - pub proofs: Proofs, - pub signer: Signer, + pub connection_id: ConnectionId, + pub proofs: Proofs, + pub signer: Signer, } impl Msg for MsgConnectionOpenConfirm { - type ValidationError = Error; - type Raw = RawMsgConnectionOpenConfirm; + type ValidationError = Error; + type Raw = RawMsgConnectionOpenConfirm; - fn route(&self) -> String { - crate::keys::ROUTER_KEY.to_string() - } + fn route(&self) -> String { + crate::keys::ROUTER_KEY.to_string() + } - fn type_url(&self) -> String { - TYPE_URL.to_string() - } + fn type_url(&self) -> String { + TYPE_URL.to_string() + } } impl Protobuf for MsgConnectionOpenConfirm {} impl TryFrom for MsgConnectionOpenConfirm { - type Error = Error; - - fn try_from(msg: RawMsgConnectionOpenConfirm) -> Result { - let proof_height = msg - .proof_height - .ok_or_else(Error::missing_proof_height)? - .into(); - - Ok(Self { - connection_id: msg - .connection_id - .parse() - .map_err(Error::invalid_identifier)?, - proofs: Proofs::new( - msg.proof_ack.try_into().map_err(Error::invalid_proof)?, - None, - None, - None, - proof_height, - ) - .map_err(Error::invalid_proof)?, - signer: msg.signer.parse().map_err(Error::signer)?, - }) - } + type Error = Error; + + fn try_from(msg: RawMsgConnectionOpenConfirm) -> Result { + let proof_height = msg.proof_height.ok_or_else(Error::missing_proof_height)?.into(); + + Ok(Self { + connection_id: msg.connection_id.parse().map_err(Error::invalid_identifier)?, + proofs: Proofs::new( + msg.proof_ack.try_into().map_err(Error::invalid_proof)?, + None, + None, + None, + proof_height, + ) + .map_err(Error::invalid_proof)?, + signer: msg.signer.parse().map_err(Error::signer)?, + }) + } } impl From for RawMsgConnectionOpenConfirm { - fn from(ics_msg: MsgConnectionOpenConfirm) -> Self { - RawMsgConnectionOpenConfirm { - connection_id: ics_msg.connection_id.as_str().to_string(), - proof_ack: ics_msg.proofs.object_proof().clone().into(), - proof_height: Some(ics_msg.proofs.height().into()), - signer: ics_msg.signer.to_string(), - } - } + fn from(ics_msg: MsgConnectionOpenConfirm) -> Self { + RawMsgConnectionOpenConfirm { + connection_id: ics_msg.connection_id.as_str().to_string(), + proof_ack: ics_msg.proofs.object_proof().clone().into(), + proof_height: Some(ics_msg.proofs.height().into()), + signer: ics_msg.signer.to_string(), + } + } } #[cfg(test)] pub mod test_util { - use crate::prelude::*; - use ibc_proto::ibc::core::client::v1::Height; - use ibc_proto::ibc::core::connection::v1::MsgConnectionOpenConfirm as RawMsgConnectionOpenConfirm; - - use crate::test_utils::{get_dummy_bech32_account, get_dummy_proof}; - - pub fn get_dummy_raw_msg_conn_open_confirm() -> RawMsgConnectionOpenConfirm { - RawMsgConnectionOpenConfirm { - connection_id: "srcconnection".to_string(), - proof_ack: get_dummy_proof(), - proof_height: Some(Height { - revision_number: 0, - revision_height: 10, - }), - signer: get_dummy_bech32_account(), - } - } + use crate::prelude::*; + use ibc_proto::ibc::core::{ + client::v1::Height, connection::v1::MsgConnectionOpenConfirm as RawMsgConnectionOpenConfirm, + }; + + use crate::test_utils::{get_dummy_bech32_account, get_dummy_proof}; + + pub fn get_dummy_raw_msg_conn_open_confirm() -> RawMsgConnectionOpenConfirm { + RawMsgConnectionOpenConfirm { + connection_id: "srcconnection".to_string(), + proof_ack: get_dummy_proof(), + proof_height: Some(Height { revision_number: 0, revision_height: 10 }), + signer: get_dummy_bech32_account(), + } + } } #[cfg(test)] mod tests { - use crate::prelude::*; - - use test_log::test; - - use ibc_proto::ibc::core::client::v1::Height; - use ibc_proto::ibc::core::connection::v1::MsgConnectionOpenConfirm as RawMsgConnectionOpenConfirm; - - use crate::core::ics03_connection::msgs::conn_open_confirm::test_util::get_dummy_raw_msg_conn_open_confirm; - use crate::core::ics03_connection::msgs::conn_open_confirm::MsgConnectionOpenConfirm; - - #[test] - fn parse_connection_open_confirm_msg() { - #[derive(Clone, Debug, PartialEq)] - struct Test { - name: String, - raw: RawMsgConnectionOpenConfirm, - want_pass: bool, - } - - let default_ack_msg = get_dummy_raw_msg_conn_open_confirm(); - let tests: Vec = vec![ - Test { - name: "Good parameters".to_string(), - raw: default_ack_msg.clone(), - want_pass: true, - }, - Test { - name: "Bad connection id, non-alpha".to_string(), - raw: RawMsgConnectionOpenConfirm { - connection_id: "con007".to_string(), - ..default_ack_msg.clone() - }, - want_pass: false, - }, - Test { - name: "Bad proof height, height is 0".to_string(), - raw: RawMsgConnectionOpenConfirm { - proof_height: Some(Height { - revision_number: 1, - revision_height: 0, - }), - ..default_ack_msg - }, - want_pass: false, - }, - ] - .into_iter() - .collect(); - - for test in tests { - let msg = MsgConnectionOpenConfirm::try_from(test.raw.clone()); - - assert_eq!( - test.want_pass, - msg.is_ok(), - "MsgConnOpenTry::new failed for test {}, \nmsg {:?} with error {:?}", - test.name, - test.raw, - msg.err(), - ); - } - } - - #[test] - fn to_and_from() { - let raw = get_dummy_raw_msg_conn_open_confirm(); - let msg = MsgConnectionOpenConfirm::try_from(raw.clone()).unwrap(); - let raw_back = RawMsgConnectionOpenConfirm::from(msg.clone()); - let msg_back = MsgConnectionOpenConfirm::try_from(raw_back.clone()).unwrap(); - assert_eq!(raw, raw_back); - assert_eq!(msg, msg_back); - } + use crate::prelude::*; + + use test_log::test; + + use ibc_proto::ibc::core::{ + client::v1::Height, connection::v1::MsgConnectionOpenConfirm as RawMsgConnectionOpenConfirm, + }; + + use crate::core::ics03_connection::msgs::conn_open_confirm::{ + test_util::get_dummy_raw_msg_conn_open_confirm, MsgConnectionOpenConfirm, + }; + + #[test] + fn parse_connection_open_confirm_msg() { + #[derive(Clone, Debug, PartialEq)] + struct Test { + name: String, + raw: RawMsgConnectionOpenConfirm, + want_pass: bool, + } + + let default_ack_msg = get_dummy_raw_msg_conn_open_confirm(); + let tests: Vec = vec![ + Test { + name: "Good parameters".to_string(), + raw: default_ack_msg.clone(), + want_pass: true, + }, + Test { + name: "Bad connection id, non-alpha".to_string(), + raw: RawMsgConnectionOpenConfirm { + connection_id: "con007".to_string(), + ..default_ack_msg.clone() + }, + want_pass: false, + }, + Test { + name: "Bad proof height, height is 0".to_string(), + raw: RawMsgConnectionOpenConfirm { + proof_height: Some(Height { revision_number: 1, revision_height: 0 }), + ..default_ack_msg + }, + want_pass: false, + }, + ] + .into_iter() + .collect(); + + for test in tests { + let msg = MsgConnectionOpenConfirm::try_from(test.raw.clone()); + + assert_eq!( + test.want_pass, + msg.is_ok(), + "MsgConnOpenTry::new failed for test {}, \nmsg {:?} with error {:?}", + test.name, + test.raw, + msg.err(), + ); + } + } + + #[test] + fn to_and_from() { + let raw = get_dummy_raw_msg_conn_open_confirm(); + let msg = MsgConnectionOpenConfirm::try_from(raw.clone()).unwrap(); + let raw_back = RawMsgConnectionOpenConfirm::from(msg.clone()); + let msg_back = MsgConnectionOpenConfirm::try_from(raw_back.clone()).unwrap(); + assert_eq!(raw, raw_back); + assert_eq!(msg, msg_back); + } } diff --git a/modules/src/core/ics03_connection/msgs/conn_open_init.rs b/modules/src/core/ics03_connection/msgs/conn_open_init.rs index 196fbcf119..ba8f588b49 100644 --- a/modules/src/core/ics03_connection/msgs/conn_open_init.rs +++ b/modules/src/core/ics03_connection/msgs/conn_open_init.rs @@ -5,179 +5,187 @@ use core::time::Duration; use ibc_proto::ibc::core::connection::v1::MsgConnectionOpenInit as RawMsgConnectionOpenInit; use tendermint_proto::Protobuf; -use crate::core::ics03_connection::connection::Counterparty; -use crate::core::ics03_connection::error::Error; -use crate::core::ics03_connection::version::Version; -use crate::core::ics24_host::identifier::ClientId; -use crate::signer::Signer; -use crate::tx_msg::Msg; +use crate::{ + core::{ + ics03_connection::{connection::Counterparty, error::Error, version::Version}, + ics24_host::identifier::ClientId, + }, + signer::Signer, + tx_msg::Msg, +}; pub const TYPE_URL: &str = "/ibc.core.connection.v1.MsgConnectionOpenInit"; /// /// Message definition `MsgConnectionOpenInit` (i.e., the `ConnOpenInit` datagram). -/// #[derive(Clone, Debug, PartialEq, Eq)] pub struct MsgConnectionOpenInit { - pub client_id: ClientId, - pub counterparty: Counterparty, - pub version: Option, - pub delay_period: Duration, - pub signer: Signer, + pub client_id: ClientId, + pub counterparty: Counterparty, + pub version: Option, + pub delay_period: Duration, + pub signer: Signer, } impl Msg for MsgConnectionOpenInit { - type ValidationError = Error; - type Raw = RawMsgConnectionOpenInit; + type ValidationError = Error; + type Raw = RawMsgConnectionOpenInit; - fn route(&self) -> String { - crate::keys::ROUTER_KEY.to_string() - } + fn route(&self) -> String { + crate::keys::ROUTER_KEY.to_string() + } - fn type_url(&self) -> String { - TYPE_URL.to_string() - } + fn type_url(&self) -> String { + TYPE_URL.to_string() + } } impl Protobuf for MsgConnectionOpenInit {} impl TryFrom for MsgConnectionOpenInit { - type Error = Error; - - fn try_from(msg: RawMsgConnectionOpenInit) -> Result { - Ok(Self { - client_id: msg.client_id.parse().map_err(Error::invalid_identifier)?, - counterparty: msg - .counterparty - .ok_or_else(Error::missing_counterparty)? - .try_into()?, - version: msg.version.map(|version| version.try_into()).transpose()?, - delay_period: Duration::from_nanos(msg.delay_period), - signer: msg.signer.parse().map_err(Error::signer)?, - }) - } + type Error = Error; + + fn try_from(msg: RawMsgConnectionOpenInit) -> Result { + Ok(Self { + client_id: msg.client_id.parse().map_err(Error::invalid_identifier)?, + counterparty: msg.counterparty.ok_or_else(Error::missing_counterparty)?.try_into()?, + version: msg.version.map(|version| version.try_into()).transpose()?, + delay_period: Duration::from_nanos(msg.delay_period), + signer: msg.signer.parse().map_err(Error::signer)?, + }) + } } impl From for RawMsgConnectionOpenInit { - fn from(ics_msg: MsgConnectionOpenInit) -> Self { - RawMsgConnectionOpenInit { - client_id: ics_msg.client_id.as_str().to_string(), - counterparty: Some(ics_msg.counterparty.into()), - version: ics_msg.version.map(|version| version.into()), - delay_period: ics_msg.delay_period.as_nanos() as u64, - signer: ics_msg.signer.to_string(), - } - } + fn from(ics_msg: MsgConnectionOpenInit) -> Self { + RawMsgConnectionOpenInit { + client_id: ics_msg.client_id.as_str().to_string(), + counterparty: Some(ics_msg.counterparty.into()), + version: ics_msg.version.map(|version| version.into()), + delay_period: ics_msg.delay_period.as_nanos() as u64, + signer: ics_msg.signer.to_string(), + } + } } #[cfg(test)] pub mod test_util { - use crate::prelude::*; - use ibc_proto::ibc::core::connection::v1::MsgConnectionOpenInit as RawMsgConnectionOpenInit; - - use crate::core::ics03_connection::msgs::conn_open_init::MsgConnectionOpenInit; - use crate::core::ics03_connection::msgs::test_util::get_dummy_raw_counterparty; - use crate::core::ics03_connection::version::Version; - use crate::core::ics24_host::identifier::ClientId; - use crate::test_utils::get_dummy_bech32_account; - - /// Extends the implementation with additional helper methods. - impl MsgConnectionOpenInit { - /// Setter for `client_id`. Amenable to chaining, since it consumes the input message. - pub fn with_client_id(self, client_id: ClientId) -> Self { - MsgConnectionOpenInit { client_id, ..self } - } - } - - /// Returns a dummy message, for testing only. - /// Other unit tests may import this if they depend on a MsgConnectionOpenInit. - pub fn get_dummy_raw_msg_conn_open_init() -> RawMsgConnectionOpenInit { - RawMsgConnectionOpenInit { - client_id: ClientId::default().to_string(), - counterparty: Some(get_dummy_raw_counterparty()), - version: Some(Version::default().into()), - delay_period: 0, - signer: get_dummy_bech32_account(), - } - } + use crate::prelude::*; + use ibc_proto::ibc::core::connection::v1::MsgConnectionOpenInit as RawMsgConnectionOpenInit; + + use crate::{ + core::{ + ics03_connection::{ + msgs::{ + conn_open_init::MsgConnectionOpenInit, test_util::get_dummy_raw_counterparty, + }, + version::Version, + }, + ics24_host::identifier::ClientId, + }, + test_utils::get_dummy_bech32_account, + }; + + /// Extends the implementation with additional helper methods. + impl MsgConnectionOpenInit { + /// Setter for `client_id`. Amenable to chaining, since it consumes the input message. + pub fn with_client_id(self, client_id: ClientId) -> Self { + MsgConnectionOpenInit { client_id, ..self } + } + } + + /// Returns a dummy message, for testing only. + /// Other unit tests may import this if they depend on a MsgConnectionOpenInit. + pub fn get_dummy_raw_msg_conn_open_init() -> RawMsgConnectionOpenInit { + RawMsgConnectionOpenInit { + client_id: ClientId::default().to_string(), + counterparty: Some(get_dummy_raw_counterparty()), + version: Some(Version::default().into()), + delay_period: 0, + signer: get_dummy_bech32_account(), + } + } } #[cfg(test)] mod tests { - use crate::prelude::*; - - use test_log::test; - - use ibc_proto::ibc::core::connection::v1::Counterparty as RawCounterparty; - use ibc_proto::ibc::core::connection::v1::MsgConnectionOpenInit as RawMsgConnectionOpenInit; - - use super::MsgConnectionOpenInit; - use crate::core::ics03_connection::msgs::conn_open_init::test_util::get_dummy_raw_msg_conn_open_init; - use crate::core::ics03_connection::msgs::test_util::get_dummy_raw_counterparty; - - #[test] - fn parse_connection_open_init_msg() { - #[derive(Clone, Debug, PartialEq)] - struct Test { - name: String, - raw: RawMsgConnectionOpenInit, - want_pass: bool, - } - - let default_init_msg = get_dummy_raw_msg_conn_open_init(); - - let tests: Vec = vec![ - Test { - name: "Good parameters".to_string(), - raw: default_init_msg.clone(), - want_pass: true, - }, - Test { - name: "Bad client id, name too short".to_string(), - raw: RawMsgConnectionOpenInit { - client_id: "client".to_string(), - ..default_init_msg.clone() - }, - want_pass: false, - }, - Test { - name: "Bad destination connection id, name too long".to_string(), - raw: RawMsgConnectionOpenInit { - counterparty: Some(RawCounterparty { - connection_id: - "abcdefghijksdffjssdkflweldflsfladfsfwjkrekcmmsdfsdfjflddmnopqrstu" - .to_string(), - ..get_dummy_raw_counterparty() - }), - ..default_init_msg - }, - want_pass: false, - }, - ] - .into_iter() - .collect(); - - for test in tests { - let msg = MsgConnectionOpenInit::try_from(test.raw.clone()); - - assert_eq!( - test.want_pass, - msg.is_ok(), - "MsgConnOpenInit::new failed for test {}, \nmsg {:?} with error {:?}", - test.name, - test.raw, - msg.err(), - ); - } - } - - #[test] - fn to_and_from() { - let raw = get_dummy_raw_msg_conn_open_init(); - let msg = MsgConnectionOpenInit::try_from(raw.clone()).unwrap(); - let raw_back = RawMsgConnectionOpenInit::from(msg.clone()); - let msg_back = MsgConnectionOpenInit::try_from(raw_back.clone()).unwrap(); - assert_eq!(raw, raw_back); - assert_eq!(msg, msg_back); - } + use crate::prelude::*; + + use test_log::test; + + use ibc_proto::ibc::core::connection::v1::{ + Counterparty as RawCounterparty, MsgConnectionOpenInit as RawMsgConnectionOpenInit, + }; + + use super::MsgConnectionOpenInit; + use crate::core::ics03_connection::msgs::{ + conn_open_init::test_util::get_dummy_raw_msg_conn_open_init, + test_util::get_dummy_raw_counterparty, + }; + + #[test] + fn parse_connection_open_init_msg() { + #[derive(Clone, Debug, PartialEq)] + struct Test { + name: String, + raw: RawMsgConnectionOpenInit, + want_pass: bool, + } + + let default_init_msg = get_dummy_raw_msg_conn_open_init(); + + let tests: Vec = vec![ + Test { + name: "Good parameters".to_string(), + raw: default_init_msg.clone(), + want_pass: true, + }, + Test { + name: "Bad client id, name too short".to_string(), + raw: RawMsgConnectionOpenInit { + client_id: "client".to_string(), + ..default_init_msg.clone() + }, + want_pass: false, + }, + Test { + name: "Bad destination connection id, name too long".to_string(), + raw: RawMsgConnectionOpenInit { + counterparty: Some(RawCounterparty { + connection_id: + "abcdefghijksdffjssdkflweldflsfladfsfwjkrekcmmsdfsdfjflddmnopqrstu" + .to_string(), + ..get_dummy_raw_counterparty() + }), + ..default_init_msg + }, + want_pass: false, + }, + ] + .into_iter() + .collect(); + + for test in tests { + let msg = MsgConnectionOpenInit::try_from(test.raw.clone()); + + assert_eq!( + test.want_pass, + msg.is_ok(), + "MsgConnOpenInit::new failed for test {}, \nmsg {:?} with error {:?}", + test.name, + test.raw, + msg.err(), + ); + } + } + + #[test] + fn to_and_from() { + let raw = get_dummy_raw_msg_conn_open_init(); + let msg = MsgConnectionOpenInit::try_from(raw.clone()).unwrap(); + let raw_back = RawMsgConnectionOpenInit::from(msg.clone()); + let msg_back = MsgConnectionOpenInit::try_from(raw_back.clone()).unwrap(); + assert_eq!(raw, raw_back); + assert_eq!(msg, msg_back); + } } diff --git a/modules/src/core/ics03_connection/msgs/conn_open_try.rs b/modules/src/core/ics03_connection/msgs/conn_open_try.rs index 6c8c5b49f4..07fcb85868 100644 --- a/modules/src/core/ics03_connection/msgs/conn_open_try.rs +++ b/modules/src/core/ics03_connection/msgs/conn_open_try.rs @@ -1,353 +1,382 @@ use crate::prelude::*; use core::{ - convert::{TryFrom, TryInto}, - time::Duration, + convert::{TryFrom, TryInto}, + fmt::{Debug, Display}, + time::Duration, }; +use ibc_proto::{google::protobuf::Any, ibc::core::connection::v1}; use tendermint_proto::Protobuf; +use crate::core::ics02_client; use ibc_proto::ibc::core::connection::v1::MsgConnectionOpenTry as RawMsgConnectionOpenTry; -use crate::core::ics02_client::client_state::AnyClientState; -use crate::core::ics03_connection::connection::Counterparty; -use crate::core::ics03_connection::error::Error; -use crate::core::ics03_connection::version::Version; -use crate::core::ics23_commitment::commitment::CommitmentProofBytes; -use crate::core::ics24_host::identifier::ClientId; -use crate::proofs::{ConsensusProof, Proofs}; -use crate::signer::Signer; -use crate::tx_msg::Msg; -use crate::Height; +use crate::{ + core::{ + ics02_client::context::ClientKeeper, + ics03_connection::{connection::Counterparty, error::Error, version::Version}, + ics23_commitment::commitment::CommitmentProofBytes, + ics24_host::identifier::ClientId, + }, + proofs::{ConsensusProof, Proofs}, + signer::Signer, + tx_msg::Msg, + Height, +}; pub const TYPE_URL: &str = "/ibc.core.connection.v1.MsgConnectionOpenTry"; /// /// Message definition `MsgConnectionOpenTry` (i.e., `ConnOpenTry` datagram). -/// #[derive(Clone, Debug, PartialEq, Eq)] -pub struct MsgConnectionOpenTry { - pub client_id: ClientId, - pub client_state: Option, - pub counterparty: Counterparty, - pub counterparty_versions: Vec, - pub proofs: Proofs, - pub delay_period: Duration, - pub signer: Signer, +pub struct MsgConnectionOpenTry { + pub client_id: ClientId, + pub client_state: Option, + pub counterparty: Counterparty, + pub counterparty_versions: Vec, + pub proofs: Proofs, + pub delay_period: Duration, + pub signer: Signer, } -impl MsgConnectionOpenTry { - /// Getter for accessing the `consensus_height` field from this message. Returns the special - /// value `0` if this field is not set. - pub fn consensus_height(&self) -> Height { - match self.proofs.consensus_proof() { - None => Height::zero(), - Some(p) => p.height(), - } - } +impl MsgConnectionOpenTry +where + C: ClientKeeper + Clone + Debug + PartialEq + Eq, +{ + /// Getter for accessing the `consensus_height` field from this message. Returns the special + /// value `0` if this field is not set. + pub fn consensus_height(&self) -> Height { + match self.proofs.consensus_proof() { + None => Height::zero(), + Some(p) => p.height(), + } + } } -impl Msg for MsgConnectionOpenTry { - type ValidationError = Error; - type Raw = RawMsgConnectionOpenTry; - - fn route(&self) -> String { - crate::keys::ROUTER_KEY.to_string() - } - - fn type_url(&self) -> String { - TYPE_URL.to_string() - } +impl Msg for MsgConnectionOpenTry +where + C: ClientKeeper + Clone + Debug + PartialEq + Eq, + Any: From, +{ + type ValidationError = Error; + type Raw = RawMsgConnectionOpenTry; + + fn route(&self) -> String { + crate::keys::ROUTER_KEY.to_string() + } + + fn type_url(&self) -> String { + TYPE_URL.to_string() + } } -impl Protobuf for MsgConnectionOpenTry {} - -impl TryFrom for MsgConnectionOpenTry { - type Error = Error; - - fn try_from(msg: RawMsgConnectionOpenTry) -> Result { - let consensus_proof_obj = { - let proof_bytes: Option = msg.proof_consensus.try_into().ok(); - let consensus_height = msg - .consensus_height - .map(|height| Height::new(height.revision_number, height.revision_height)); - if proof_bytes.is_some() && consensus_height.is_some() { - Some( - ConsensusProof::new(proof_bytes.unwrap(), consensus_height.unwrap()) - .map_err(Error::invalid_proof)?, - ) - } else { - None - } - }; - - let proof_height = msg - .proof_height - .ok_or_else(Error::missing_proof_height)? - .into(); - - let client_proof = - CommitmentProofBytes::try_from(msg.proof_client).map_err(Error::invalid_proof)?; - - let counterparty_versions = msg - .counterparty_versions - .into_iter() - .map(Version::try_from) - .collect::, _>>()?; - - if counterparty_versions.is_empty() { - return Err(Error::empty_versions()); - } +impl Protobuf for MsgConnectionOpenTry +where + C: ClientKeeper + Clone + Debug + PartialEq + Eq, + Any: From, + MsgConnectionOpenTry: TryFrom, + as TryFrom>::Error: Display, +{ +} - Ok(Self { - client_id: msg.client_id.parse().map_err(Error::invalid_identifier)?, - client_state: msg - .client_state - .map(AnyClientState::try_from) - .transpose() - .map_err(Error::ics02_client)?, - counterparty: msg - .counterparty - .ok_or_else(Error::missing_counterparty)? - .try_into()?, - counterparty_versions, - proofs: Proofs::new( - msg.proof_init.try_into().map_err(Error::invalid_proof)?, - Some(client_proof), - consensus_proof_obj, - None, - proof_height, - ) - .map_err(Error::invalid_proof)?, - delay_period: Duration::from_nanos(msg.delay_period), - signer: msg.signer.parse().map_err(Error::signer)?, - }) - } +impl TryFrom for MsgConnectionOpenTry +where + C: ClientKeeper + Clone + Debug + PartialEq + Eq, + C::AnyClientState: TryFrom, +{ + type Error = Error; + + fn try_from(msg: RawMsgConnectionOpenTry) -> Result { + let consensus_proof_obj = { + let proof_bytes: Option = msg.proof_consensus.try_into().ok(); + let consensus_height = msg + .consensus_height + .map(|height| Height::new(height.revision_number, height.revision_height)); + if proof_bytes.is_some() && consensus_height.is_some() { + Some( + ConsensusProof::new(proof_bytes.unwrap(), consensus_height.unwrap()) + .map_err(Error::invalid_proof)?, + ) + } else { + None + } + }; + + let proof_height = msg.proof_height.ok_or_else(Error::missing_proof_height)?.into(); + + let client_proof = + CommitmentProofBytes::try_from(msg.proof_client).map_err(Error::invalid_proof)?; + + let counterparty_versions = msg + .counterparty_versions + .into_iter() + .map(Version::try_from) + .collect::, _>>()?; + + if counterparty_versions.is_empty() { + return Err(Error::empty_versions()) + } + + Ok(Self { + client_id: msg.client_id.parse().map_err(Error::invalid_identifier)?, + client_state: msg + .client_state + .map(C::AnyClientState::try_from) + .transpose() + .map_err(Error::ics02_client)?, + counterparty: msg.counterparty.ok_or_else(Error::missing_counterparty)?.try_into()?, + counterparty_versions, + proofs: Proofs::new( + msg.proof_init.try_into().map_err(Error::invalid_proof)?, + Some(client_proof), + consensus_proof_obj, + None, + proof_height, + ) + .map_err(Error::invalid_proof)?, + delay_period: Duration::from_nanos(msg.delay_period), + signer: msg.signer.parse().map_err(Error::signer)?, + }) + } } -impl From for RawMsgConnectionOpenTry { - fn from(ics_msg: MsgConnectionOpenTry) -> Self { - RawMsgConnectionOpenTry { - client_id: ics_msg.client_id.as_str().to_string(), - client_state: ics_msg - .client_state - .map_or_else(|| None, |v| Some(v.into())), - counterparty: Some(ics_msg.counterparty.into()), - delay_period: ics_msg.delay_period.as_nanos() as u64, - counterparty_versions: ics_msg - .counterparty_versions - .iter() - .map(|v| v.clone().into()) - .collect(), - proof_height: Some(ics_msg.proofs.height().into()), - proof_init: ics_msg.proofs.object_proof().clone().into(), - proof_client: ics_msg - .proofs - .client_proof() - .clone() - .map_or_else(Vec::new, |v| v.into()), - proof_consensus: ics_msg - .proofs - .consensus_proof() - .map_or_else(Vec::new, |v| v.proof().clone().into()), - consensus_height: ics_msg - .proofs - .consensus_proof() - .map_or_else(|| None, |h| Some(h.height().into())), - signer: ics_msg.signer.to_string(), - ..Default::default() - } - } +impl From> for RawMsgConnectionOpenTry +where + C: ClientKeeper + Clone + Debug + PartialEq + Eq, + Any: From, +{ + fn from(ics_msg: MsgConnectionOpenTry) -> Self { + RawMsgConnectionOpenTry { + client_id: ics_msg.client_id.as_str().to_string(), + client_state: ics_msg.client_state.map_or_else(|| None, |v| Some(v.into())), + counterparty: Some(ics_msg.counterparty.into()), + delay_period: ics_msg.delay_period.as_nanos() as u64, + counterparty_versions: ics_msg + .counterparty_versions + .iter() + .map(|v| v.clone().into()) + .collect(), + proof_height: Some(ics_msg.proofs.height().into()), + proof_init: ics_msg.proofs.object_proof().clone().into(), + proof_client: ics_msg.proofs.client_proof().clone().map_or_else(Vec::new, |v| v.into()), + proof_consensus: ics_msg + .proofs + .consensus_proof() + .map_or_else(Vec::new, |v| v.proof().clone().into()), + consensus_height: ics_msg + .proofs + .consensus_proof() + .map_or_else(|| None, |h| Some(h.height().into())), + signer: ics_msg.signer.to_string(), + ..Default::default() + } + } } #[cfg(test)] pub mod test_util { - use crate::core::ics02_client::client_state::AnyClientState; - use crate::prelude::*; - use ibc_proto::ibc::core::client::v1::Height; - use ibc_proto::ibc::core::connection::v1::MsgConnectionOpenTry as RawMsgConnectionOpenTry; - - use crate::core::ics03_connection::msgs::conn_open_try::MsgConnectionOpenTry; - use crate::core::ics03_connection::msgs::test_util::get_dummy_raw_counterparty; - use crate::core::ics03_connection::version::get_compatible_versions; - use crate::core::ics24_host::identifier::ClientId; - use crate::mock::client_state::MockClientState; - use crate::mock::header::MockHeader; - use crate::test_utils::{get_dummy_bech32_account, get_dummy_proof}; - - /// Testing-specific helper methods. - impl MsgConnectionOpenTry { - /// Setter for `client_id`. - pub fn with_client_id(self, client_id: ClientId) -> MsgConnectionOpenTry { - MsgConnectionOpenTry { client_id, ..self } - } - } - - /// Returns a dummy `RawMsgConnectionOpenTry` with parametrized heights. The parameter - /// `proof_height` represents the height, on the source chain, at which this chain produced the - /// proof. Parameter `consensus_height` represents the height of destination chain which a - /// client on the source chain stores. - pub fn get_dummy_raw_msg_conn_open_try( - proof_height: u64, - consensus_height: u64, - ) -> RawMsgConnectionOpenTry { - RawMsgConnectionOpenTry { - client_id: ClientId::default().to_string(), - client_state: Some( - AnyClientState::Mock(MockClientState::new(MockHeader::default())).into(), - ), - counterparty: Some(get_dummy_raw_counterparty()), - delay_period: 0, - counterparty_versions: get_compatible_versions() - .iter() - .map(|v| v.clone().into()) - .collect(), - proof_init: get_dummy_proof(), - proof_height: Some(Height { - revision_number: 0, - revision_height: proof_height, - }), - proof_consensus: get_dummy_proof(), - consensus_height: Some(Height { - revision_number: 0, - revision_height: consensus_height, - }), - proof_client: get_dummy_proof(), - signer: get_dummy_bech32_account(), - ..Default::default() - } - } + use crate::{ + core::{ + ics02_client::context::ClientKeeper, + ics03_connection::{ + msgs::{ + conn_open_try::MsgConnectionOpenTry, test_util::get_dummy_raw_counterparty, + }, + version::get_compatible_versions, + }, + ics24_host::identifier::ClientId, + }, + mock::{ + client_state::{AnyClientState, MockClientState}, + header::MockHeader, + }, + prelude::*, + test_utils::{get_dummy_bech32_account, get_dummy_proof}, + }; + use core::fmt::Debug; + use ibc_proto::ibc::core::{ + client::v1::Height, connection::v1::MsgConnectionOpenTry as RawMsgConnectionOpenTry, + }; + + /// Testing-specific helper methods. + impl MsgConnectionOpenTry + where + C: ClientKeeper + Clone + Debug + Eq, + { + /// Setter for `client_id`. + pub fn with_client_id(self, client_id: ClientId) -> MsgConnectionOpenTry { + MsgConnectionOpenTry { client_id, ..self } + } + } + + /// Returns a dummy `RawMsgConnectionOpenTry` with parametrized heights. The parameter + /// `proof_height` represents the height, on the source chain, at which this chain produced the + /// proof. Parameter `consensus_height` represents the height of destination chain which a + /// client on the source chain stores. + pub fn get_dummy_raw_msg_conn_open_try( + proof_height: u64, + consensus_height: u64, + ) -> RawMsgConnectionOpenTry { + RawMsgConnectionOpenTry { + client_id: ClientId::default().to_string(), + client_state: Some( + AnyClientState::Mock(MockClientState::new(MockHeader::default())).into(), + ), + counterparty: Some(get_dummy_raw_counterparty()), + delay_period: 0, + counterparty_versions: get_compatible_versions() + .iter() + .map(|v| v.clone().into()) + .collect(), + proof_init: get_dummy_proof(), + proof_height: Some(Height { revision_number: 0, revision_height: proof_height }), + proof_consensus: get_dummy_proof(), + consensus_height: Some(Height { + revision_number: 0, + revision_height: consensus_height, + }), + proof_client: get_dummy_proof(), + signer: get_dummy_bech32_account(), + ..Default::default() + } + } } #[cfg(test)] mod tests { - use crate::prelude::*; - - use test_log::test; - - use ibc_proto::ibc::core::client::v1::Height; - use ibc_proto::ibc::core::connection::v1::Counterparty as RawCounterparty; - use ibc_proto::ibc::core::connection::v1::MsgConnectionOpenTry as RawMsgConnectionOpenTry; - - use crate::core::ics03_connection::msgs::conn_open_try::test_util::get_dummy_raw_msg_conn_open_try; - use crate::core::ics03_connection::msgs::conn_open_try::MsgConnectionOpenTry; - use crate::core::ics03_connection::msgs::test_util::get_dummy_raw_counterparty; - - #[test] - fn parse_connection_open_try_msg() { - #[derive(Clone, Debug, PartialEq)] - struct Test { - name: String, - raw: RawMsgConnectionOpenTry, - want_pass: bool, - } - - let default_try_msg = get_dummy_raw_msg_conn_open_try(10, 34); - - let tests: Vec = - vec![ - Test { - name: "Good parameters".to_string(), - raw: default_try_msg.clone(), - want_pass: true, - }, - Test { - name: "Bad client id, name too short".to_string(), - raw: RawMsgConnectionOpenTry { - client_id: "client".to_string(), - ..default_try_msg.clone() - }, - want_pass: false, - }, - Test { - name: "Bad destination connection id, name too long".to_string(), - raw: RawMsgConnectionOpenTry { - counterparty: Some(RawCounterparty { - connection_id: - "abcdasdfasdfsdfasfdwefwfsdfsfsfasfwewvxcvdvwgadvaadsefghijklmnopqrstu" - .to_string(), - ..get_dummy_raw_counterparty() - }), - ..default_try_msg.clone() - }, - want_pass: false, - }, - Test { - name: "Correct destination client id with lower/upper case and special chars" - .to_string(), - raw: RawMsgConnectionOpenTry { - counterparty: Some(RawCounterparty { - client_id: "ClientId_".to_string(), - ..get_dummy_raw_counterparty() - }), - ..default_try_msg.clone() - }, - want_pass: true, - }, - Test { - name: "Bad counterparty versions, empty versions vec".to_string(), - raw: RawMsgConnectionOpenTry { - counterparty_versions: Vec::new(), - ..default_try_msg.clone() - }, - want_pass: false, - }, - Test { - name: "Bad counterparty versions, empty version string".to_string(), - raw: RawMsgConnectionOpenTry { - counterparty_versions: Vec::new(), - ..default_try_msg.clone() - }, - want_pass: false, - }, - Test { - name: "Bad proof height, height is 0".to_string(), - raw: RawMsgConnectionOpenTry { - proof_height: Some(Height { revision_number: 1, revision_height: 0 }), - ..default_try_msg.clone() - }, - want_pass: false, - }, - Test { - name: "Bad consensus height, height is 0".to_string(), - raw: RawMsgConnectionOpenTry { - proof_height: Some(Height { revision_number: 1, revision_height: 0 }), - ..default_try_msg.clone() - }, - want_pass: false, - }, - Test { - name: "Empty proof".to_string(), - raw: RawMsgConnectionOpenTry { - proof_init: b"".to_vec(), - ..default_try_msg - }, - want_pass: false, - } - ] - .into_iter() - .collect(); - - for test in tests { - let msg = MsgConnectionOpenTry::try_from(test.raw.clone()); - - assert_eq!( - test.want_pass, - msg.is_ok(), - "MsgConnOpenTry::new failed for test {}, \nmsg {:?} with error {:?}", - test.name, - test.raw, - msg.err(), - ); - } - } - - #[test] - fn to_and_from() { - let raw = get_dummy_raw_msg_conn_open_try(10, 34); - let msg = MsgConnectionOpenTry::try_from(raw.clone()).unwrap(); - let raw_back = RawMsgConnectionOpenTry::from(msg.clone()); - let msg_back = MsgConnectionOpenTry::try_from(raw_back.clone()).unwrap(); - assert_eq!(raw, raw_back); - assert_eq!(msg, msg_back); - } + use crate::prelude::*; + + use test_log::test; + + use ibc_proto::ibc::core::{ + client::v1::Height, + connection::v1::{ + Counterparty as RawCounterparty, MsgConnectionOpenTry as RawMsgConnectionOpenTry, + }, + }; + + use crate::{ + core::ics03_connection::msgs::{ + conn_open_try::{test_util::get_dummy_raw_msg_conn_open_try, MsgConnectionOpenTry}, + test_util::get_dummy_raw_counterparty, + }, + mock::context::{MockClientTypes, MockContext}, + }; + + #[test] + fn parse_connection_open_try_msg() { + #[derive(Clone, Debug, PartialEq)] + struct Test { + name: String, + raw: RawMsgConnectionOpenTry, + want_pass: bool, + } + + let default_try_msg = get_dummy_raw_msg_conn_open_try(10, 34); + + let tests: Vec = vec![ + Test { + name: "Good parameters".to_string(), + raw: default_try_msg.clone(), + want_pass: true, + }, + Test { + name: "Bad client id, name too short".to_string(), + raw: RawMsgConnectionOpenTry { + client_id: "client".to_string(), + ..default_try_msg.clone() + }, + want_pass: false, + }, + Test { + name: "Bad destination connection id, name too long".to_string(), + raw: RawMsgConnectionOpenTry { + counterparty: Some(RawCounterparty { + connection_id: + "abcdasdfasdfsdfasfdwefwfsdfsfsfasfwewvxcvdvwgadvaadsefghijklmnopqrstu" + .to_string(), + ..get_dummy_raw_counterparty() + }), + ..default_try_msg.clone() + }, + want_pass: false, + }, + Test { + name: "Correct destination client id with lower/upper case and special chars" + .to_string(), + raw: RawMsgConnectionOpenTry { + counterparty: Some(RawCounterparty { + client_id: "ClientId_".to_string(), + ..get_dummy_raw_counterparty() + }), + ..default_try_msg.clone() + }, + want_pass: true, + }, + Test { + name: "Bad counterparty versions, empty versions vec".to_string(), + raw: RawMsgConnectionOpenTry { + counterparty_versions: Vec::new(), + ..default_try_msg.clone() + }, + want_pass: false, + }, + Test { + name: "Bad counterparty versions, empty version string".to_string(), + raw: RawMsgConnectionOpenTry { + counterparty_versions: Vec::new(), + ..default_try_msg.clone() + }, + want_pass: false, + }, + Test { + name: "Bad proof height, height is 0".to_string(), + raw: RawMsgConnectionOpenTry { + proof_height: Some(Height { revision_number: 1, revision_height: 0 }), + ..default_try_msg.clone() + }, + want_pass: false, + }, + Test { + name: "Bad consensus height, height is 0".to_string(), + raw: RawMsgConnectionOpenTry { + proof_height: Some(Height { revision_number: 1, revision_height: 0 }), + ..default_try_msg.clone() + }, + want_pass: false, + }, + Test { + name: "Empty proof".to_string(), + raw: RawMsgConnectionOpenTry { proof_init: b"".to_vec(), ..default_try_msg }, + want_pass: false, + }, + ] + .into_iter() + .collect(); + + for test in tests { + let msg = + MsgConnectionOpenTry::>::try_from(test.raw.clone()); + + assert_eq!( + test.want_pass, + msg.is_ok(), + "MsgConnOpenTry::new failed for test {}, \nmsg {:?} with error {:?}", + test.name, + test.raw, + msg.err(), + ); + } + } + + #[test] + fn to_and_from() { + let raw = get_dummy_raw_msg_conn_open_try(10, 34); + let msg = + MsgConnectionOpenTry::>::try_from(raw.clone()).unwrap(); + let raw_back = RawMsgConnectionOpenTry::from(msg.clone()); + let msg_back = MsgConnectionOpenTry::try_from(raw_back.clone()).unwrap(); + assert_eq!(raw, raw_back); + assert_eq!(msg, msg_back); + } } diff --git a/modules/src/core/ics03_connection/version.rs b/modules/src/core/ics03_connection/version.rs index 652262d6ec..2ef8ae5090 100644 --- a/modules/src/core/ics03_connection/version.rs +++ b/modules/src/core/ics03_connection/version.rs @@ -6,304 +6,265 @@ use ibc_proto::ibc::core::connection::v1::Version as RawVersion; use serde::{Deserialize, Serialize}; use tendermint_proto::Protobuf; -use crate::core::ics03_connection::error::Error; -use crate::core::ics04_channel::channel::Order; +use crate::core::{ics03_connection::error::Error, ics04_channel::channel::Order}; /// Stores the identifier and the features supported by a version #[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] pub struct Version { - /// unique version identifier - identifier: String, - /// list of features compatible with the specified identifier - features: Vec, + /// unique version identifier + identifier: String, + /// list of features compatible with the specified identifier + features: Vec, } impl Version { - /// Checks whether or not the given feature is supported in this versin - pub fn is_supported_feature(&self, feature: String) -> bool { - self.features.contains(&feature) - } + /// Checks whether or not the given feature is supported in this versin + pub fn is_supported_feature(&self, feature: String) -> bool { + self.features.contains(&feature) + } } impl Protobuf for Version {} impl TryFrom for Version { - type Error = Error; - fn try_from(value: RawVersion) -> Result { - if value.identifier.trim().is_empty() { - return Err(Error::empty_versions()); - } - for feature in value.features.iter() { - if feature.trim().is_empty() { - return Err(Error::empty_features()); - } - } - Ok(Version { - identifier: value.identifier, - features: value.features, - }) - } + type Error = Error; + fn try_from(value: RawVersion) -> Result { + if value.identifier.trim().is_empty() { + return Err(Error::empty_versions()) + } + for feature in value.features.iter() { + if feature.trim().is_empty() { + return Err(Error::empty_features()) + } + } + Ok(Version { identifier: value.identifier, features: value.features }) + } } impl From for RawVersion { - fn from(value: Version) -> Self { - Self { - identifier: value.identifier, - features: value.features, - } - } + fn from(value: Version) -> Self { + Self { identifier: value.identifier, features: value.features } + } } impl Default for Version { - fn default() -> Self { - Version { - identifier: "1".to_string(), - features: vec![ - Order::Ordered.as_str().to_owned(), - Order::Unordered.as_str().to_owned(), - ], - } - } + fn default() -> Self { + Version { + identifier: "1".to_string(), + features: vec![ + Order::Ordered.as_str().to_owned(), + Order::Unordered.as_str().to_owned(), + ], + } + } } impl Display for Version { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - write!(f, "{}", self.identifier) - } + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!(f, "{}", self.identifier) + } } /// Returns the lists of supported versions pub fn get_compatible_versions() -> Vec { - vec![Version::default()] + vec![Version::default()] } /// Selects a version from the intersection of locally supported and counterparty versions. pub fn pick_version( - supported_versions: Vec, - counterparty_versions: Vec, + supported_versions: Vec, + counterparty_versions: Vec, ) -> Result { - let mut intersection: Vec = Vec::new(); - for s in supported_versions.iter() { - for c in counterparty_versions.iter() { - if c.identifier != s.identifier { - continue; - } - for feature in c.features.iter() { - if feature.trim().is_empty() { - return Err(Error::empty_features()); - } - } - intersection.append(&mut vec![s.clone()]); - } - } - intersection.sort_by(|a, b| a.identifier.cmp(&b.identifier)); - if intersection.is_empty() { - return Err(Error::no_common_version()); - } - Ok(intersection[0].clone()) + let mut intersection: Vec = Vec::new(); + for s in supported_versions.iter() { + for c in counterparty_versions.iter() { + if c.identifier != s.identifier { + continue + } + for feature in c.features.iter() { + if feature.trim().is_empty() { + return Err(Error::empty_features()) + } + } + intersection.append(&mut vec![s.clone()]); + } + } + intersection.sort_by(|a, b| a.identifier.cmp(&b.identifier)); + if intersection.is_empty() { + return Err(Error::no_common_version()) + } + Ok(intersection[0].clone()) } #[cfg(test)] mod tests { - use crate::prelude::*; + use crate::prelude::*; - use test_log::test; + use test_log::test; - use ibc_proto::ibc::core::connection::v1::Version as RawVersion; + use ibc_proto::ibc::core::connection::v1::Version as RawVersion; - use crate::core::ics03_connection::error::Error; - use crate::core::ics03_connection::version::{get_compatible_versions, pick_version, Version}; + use crate::core::ics03_connection::{ + error::Error, + version::{get_compatible_versions, pick_version, Version}, + }; - fn good_versions() -> Vec { - vec![ - Version::default().into(), - RawVersion { - identifier: "2".to_string(), - features: vec!["ORDER_RANDOM".to_string(), "ORDER_UNORDERED".to_string()], - }, - ] - .into_iter() - .collect() - } + fn good_versions() -> Vec { + vec![ + Version::default().into(), + RawVersion { + identifier: "2".to_string(), + features: vec!["ORDER_RANDOM".to_string(), "ORDER_UNORDERED".to_string()], + }, + ] + .into_iter() + .collect() + } - fn bad_versions_identifier() -> Vec { - vec![RawVersion { - identifier: "".to_string(), - features: vec!["ORDER_RANDOM".to_string(), "ORDER_UNORDERED".to_string()], - }] - .into_iter() - .collect() - } + fn bad_versions_identifier() -> Vec { + vec![RawVersion { + identifier: "".to_string(), + features: vec!["ORDER_RANDOM".to_string(), "ORDER_UNORDERED".to_string()], + }] + .into_iter() + .collect() + } - fn bad_versions_features() -> Vec { - vec![RawVersion { - identifier: "2".to_string(), - features: vec!["".to_string()], - }] - .into_iter() - .collect() - } + fn bad_versions_features() -> Vec { + vec![RawVersion { identifier: "2".to_string(), features: vec!["".to_string()] }] + .into_iter() + .collect() + } - fn overlapping() -> (Vec, Vec, Version) { - ( - vec![ - Version::default(), - Version { - identifier: "3".to_string(), - features: Vec::new(), - }, - Version { - identifier: "4".to_string(), - features: Vec::new(), - }, - ] - .into_iter() - .collect(), - vec![ - Version { - identifier: "2".to_string(), - features: Vec::new(), - }, - Version { - identifier: "4".to_string(), - features: Vec::new(), - }, - Version { - identifier: "3".to_string(), - features: Vec::new(), - }, - ] - .into_iter() - .collect(), - // Should pick version 3 as it's the lowest of the intersection {3, 4} - Version { - identifier: "3".to_string(), - features: Vec::new(), - }, - ) - } + fn overlapping() -> (Vec, Vec, Version) { + ( + vec![ + Version::default(), + Version { identifier: "3".to_string(), features: Vec::new() }, + Version { identifier: "4".to_string(), features: Vec::new() }, + ] + .into_iter() + .collect(), + vec![ + Version { identifier: "2".to_string(), features: Vec::new() }, + Version { identifier: "4".to_string(), features: Vec::new() }, + Version { identifier: "3".to_string(), features: Vec::new() }, + ] + .into_iter() + .collect(), + // Should pick version 3 as it's the lowest of the intersection {3, 4} + Version { identifier: "3".to_string(), features: Vec::new() }, + ) + } - fn disjoint() -> (Vec, Vec) { - ( - vec![Version { - identifier: "1".to_string(), - features: Vec::new(), - }] - .into_iter() - .collect(), - vec![Version { - identifier: "2".to_string(), - features: Vec::new(), - }] - .into_iter() - .collect(), - ) - } + fn disjoint() -> (Vec, Vec) { + ( + vec![Version { identifier: "1".to_string(), features: Vec::new() }] + .into_iter() + .collect(), + vec![Version { identifier: "2".to_string(), features: Vec::new() }] + .into_iter() + .collect(), + ) + } - #[test] - fn verify() { - struct Test { - name: String, - versions: Vec, - want_pass: bool, - } - let tests: Vec = vec![ - Test { - name: "Compatible versions".to_string(), - versions: vec![Version::default().into()], - want_pass: true, - }, - Test { - name: "Multiple versions".to_string(), - versions: good_versions(), - want_pass: true, - }, - Test { - name: "Bad version identifier".to_string(), - versions: bad_versions_identifier(), - want_pass: false, - }, - Test { - name: "Bad version feature".to_string(), - versions: bad_versions_features(), - want_pass: false, - }, - Test { - name: "Bad versions empty".to_string(), - versions: Vec::new(), - want_pass: true, - }, - ]; + #[test] + fn verify() { + struct Test { + name: String, + versions: Vec, + want_pass: bool, + } + let tests: Vec = vec![ + Test { + name: "Compatible versions".to_string(), + versions: vec![Version::default().into()], + want_pass: true, + }, + Test { + name: "Multiple versions".to_string(), + versions: good_versions(), + want_pass: true, + }, + Test { + name: "Bad version identifier".to_string(), + versions: bad_versions_identifier(), + want_pass: false, + }, + Test { + name: "Bad version feature".to_string(), + versions: bad_versions_features(), + want_pass: false, + }, + Test { name: "Bad versions empty".to_string(), versions: Vec::new(), want_pass: true }, + ]; - for test in tests { - let versions = test - .versions - .into_iter() - .map(Version::try_from) - .collect::, _>>(); + for test in tests { + let versions = + test.versions.into_iter().map(Version::try_from).collect::, _>>(); - assert_eq!( - test.want_pass, - versions.is_ok(), - "Validate versions failed for test {} with error {:?}", - test.name, - versions.err(), - ); - } - } - #[test] - fn pick() { - struct Test { - name: String, - supported: Vec, - counterparty: Vec, - picked: Result, - want_pass: bool, - } - let tests: Vec = vec![ - Test { - name: "Compatible versions".to_string(), - supported: get_compatible_versions(), - counterparty: get_compatible_versions(), - picked: Ok(Version::default()), - want_pass: true, - }, - Test { - name: "Overlapping versions".to_string(), - supported: overlapping().0, - counterparty: overlapping().1, - picked: Ok(overlapping().2), - want_pass: true, - }, - Test { - name: "Disjoint versions".to_string(), - supported: disjoint().0, - counterparty: disjoint().1, - picked: Err(Error::no_common_version()), - want_pass: false, - }, - ]; + assert_eq!( + test.want_pass, + versions.is_ok(), + "Validate versions failed for test {} with error {:?}", + test.name, + versions.err(), + ); + } + } + #[test] + fn pick() { + struct Test { + name: String, + supported: Vec, + counterparty: Vec, + picked: Result, + want_pass: bool, + } + let tests: Vec = vec![ + Test { + name: "Compatible versions".to_string(), + supported: get_compatible_versions(), + counterparty: get_compatible_versions(), + picked: Ok(Version::default()), + want_pass: true, + }, + Test { + name: "Overlapping versions".to_string(), + supported: overlapping().0, + counterparty: overlapping().1, + picked: Ok(overlapping().2), + want_pass: true, + }, + Test { + name: "Disjoint versions".to_string(), + supported: disjoint().0, + counterparty: disjoint().1, + picked: Err(Error::no_common_version()), + want_pass: false, + }, + ]; - for test in tests { - let version = pick_version(test.supported, test.counterparty); + for test in tests { + let version = pick_version(test.supported, test.counterparty); - assert_eq!( - test.want_pass, - version.is_ok(), - "Validate versions failed for test {}", - test.name, - ); + assert_eq!( + test.want_pass, + version.is_ok(), + "Validate versions failed for test {}", + test.name, + ); - if test.want_pass { - assert_eq!(version.unwrap(), test.picked.unwrap()); - } - } - } - #[test] - fn serialize() { - let def = Version::default(); - let def_raw: RawVersion = def.clone().into(); - let def_back = def_raw.try_into().unwrap(); - assert_eq!(def, def_back); - } + if test.want_pass { + assert_eq!(version.unwrap(), test.picked.unwrap()); + } + } + } + #[test] + fn serialize() { + let def = Version::default(); + let def_raw: RawVersion = def.clone().into(); + let def_back = def_raw.try_into().unwrap(); + assert_eq!(def, def_back); + } } diff --git a/modules/src/core/ics04_channel/channel.rs b/modules/src/core/ics04_channel/channel.rs index 0e8087e291..08e025fda4 100644 --- a/modules/src/core/ics04_channel/channel.rs +++ b/modules/src/core/ics04_channel/channel.rs @@ -1,598 +1,544 @@ use crate::prelude::*; -use core::fmt; -use core::str::FromStr; +use core::{fmt, str::FromStr}; use serde::{Deserialize, Serialize}; use tendermint_proto::Protobuf; use ibc_proto::ibc::core::channel::v1::{ - Channel as RawChannel, Counterparty as RawCounterparty, - IdentifiedChannel as RawIdentifiedChannel, + Channel as RawChannel, Counterparty as RawCounterparty, + IdentifiedChannel as RawIdentifiedChannel, }; -use crate::core::ics02_client::height::Height; -use crate::core::ics04_channel::{error::Error, packet::Sequence, Version}; -use crate::core::ics24_host::identifier::{ChannelId, ConnectionId, PortId}; -use crate::events::WithBlockDataType; +use crate::{ + core::{ + ics02_client::height::Height, + ics04_channel::{error::Error, packet::Sequence, Version}, + ics24_host::identifier::{ChannelId, ConnectionId, PortId}, + }, + events::WithBlockDataType, +}; #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] pub struct IdentifiedChannelEnd { - pub port_id: PortId, - pub channel_id: ChannelId, - pub channel_end: ChannelEnd, + pub port_id: PortId, + pub channel_id: ChannelId, + pub channel_end: ChannelEnd, } impl IdentifiedChannelEnd { - pub fn new(port_id: PortId, channel_id: ChannelId, channel_end: ChannelEnd) -> Self { - IdentifiedChannelEnd { - port_id, - channel_id, - channel_end, - } - } + pub fn new(port_id: PortId, channel_id: ChannelId, channel_end: ChannelEnd) -> Self { + IdentifiedChannelEnd { port_id, channel_id, channel_end } + } } impl Protobuf for IdentifiedChannelEnd {} impl TryFrom for IdentifiedChannelEnd { - type Error = Error; - - fn try_from(value: RawIdentifiedChannel) -> Result { - let raw_channel_end = RawChannel { - state: value.state, - ordering: value.ordering, - counterparty: value.counterparty, - connection_hops: value.connection_hops, - version: value.version, - }; - - Ok(IdentifiedChannelEnd { - port_id: value.port_id.parse().map_err(Error::identifier)?, - channel_id: value.channel_id.parse().map_err(Error::identifier)?, - channel_end: raw_channel_end.try_into()?, - }) - } + type Error = Error; + + fn try_from(value: RawIdentifiedChannel) -> Result { + let raw_channel_end = RawChannel { + state: value.state, + ordering: value.ordering, + counterparty: value.counterparty, + connection_hops: value.connection_hops, + version: value.version, + }; + + Ok(IdentifiedChannelEnd { + port_id: value.port_id.parse().map_err(Error::identifier)?, + channel_id: value.channel_id.parse().map_err(Error::identifier)?, + channel_end: raw_channel_end.try_into()?, + }) + } } impl From for RawIdentifiedChannel { - fn from(value: IdentifiedChannelEnd) -> Self { - RawIdentifiedChannel { - state: value.channel_end.state as i32, - ordering: value.channel_end.ordering as i32, - counterparty: Some(value.channel_end.counterparty().clone().into()), - connection_hops: value - .channel_end - .connection_hops - .iter() - .map(|v| v.as_str().to_string()) - .collect(), - version: value.channel_end.version.to_string(), - port_id: value.port_id.to_string(), - channel_id: value.channel_id.to_string(), - } - } + fn from(value: IdentifiedChannelEnd) -> Self { + RawIdentifiedChannel { + state: value.channel_end.state as i32, + ordering: value.channel_end.ordering as i32, + counterparty: Some(value.channel_end.counterparty().clone().into()), + connection_hops: value + .channel_end + .connection_hops + .iter() + .map(|v| v.as_str().to_string()) + .collect(), + version: value.channel_end.version.to_string(), + port_id: value.port_id.to_string(), + channel_id: value.channel_id.to_string(), + } + } } #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] pub struct ChannelEnd { - pub state: State, - pub ordering: Order, - pub remote: Counterparty, - pub connection_hops: Vec, - pub version: Version, + pub state: State, + pub ordering: Order, + pub remote: Counterparty, + pub connection_hops: Vec, + pub version: Version, } impl Default for ChannelEnd { - fn default() -> Self { - ChannelEnd { - state: State::Uninitialized, - ordering: Default::default(), - remote: Counterparty::default(), - connection_hops: Vec::new(), - version: Version::default(), - } - } + fn default() -> Self { + ChannelEnd { + state: State::Uninitialized, + ordering: Default::default(), + remote: Counterparty::default(), + connection_hops: Vec::new(), + version: Version::default(), + } + } } impl Protobuf for ChannelEnd {} impl TryFrom for ChannelEnd { - type Error = Error; - - fn try_from(value: RawChannel) -> Result { - let chan_state: State = State::from_i32(value.state)?; - - if chan_state == State::Uninitialized { - return Ok(ChannelEnd::default()); - } - - let chan_ordering = Order::from_i32(value.ordering)?; - - // Assemble the 'remote' attribute of the Channel, which represents the Counterparty. - let remote = value - .counterparty - .ok_or_else(Error::missing_counterparty)? - .try_into()?; - - // Parse each item in connection_hops into a ConnectionId. - let connection_hops = value - .connection_hops - .into_iter() - .map(|conn_id| ConnectionId::from_str(conn_id.as_str())) - .collect::, _>>() - .map_err(Error::identifier)?; - - let version = value.version.into(); - - Ok(ChannelEnd::new( - chan_state, - chan_ordering, - remote, - connection_hops, - version, - )) - } + type Error = Error; + + fn try_from(value: RawChannel) -> Result { + let chan_state: State = State::from_i32(value.state)?; + + if chan_state == State::Uninitialized { + return Ok(ChannelEnd::default()) + } + + let chan_ordering = Order::from_i32(value.ordering)?; + + // Assemble the 'remote' attribute of the Channel, which represents the Counterparty. + let remote = value.counterparty.ok_or_else(Error::missing_counterparty)?.try_into()?; + + // Parse each item in connection_hops into a ConnectionId. + let connection_hops = value + .connection_hops + .into_iter() + .map(|conn_id| ConnectionId::from_str(conn_id.as_str())) + .collect::, _>>() + .map_err(Error::identifier)?; + + let version = value.version.into(); + + Ok(ChannelEnd::new(chan_state, chan_ordering, remote, connection_hops, version)) + } } impl From for RawChannel { - fn from(value: ChannelEnd) -> Self { - RawChannel { - state: value.state as i32, - ordering: value.ordering as i32, - counterparty: Some(value.counterparty().clone().into()), - connection_hops: value - .connection_hops - .iter() - .map(|v| v.as_str().to_string()) - .collect(), - version: value.version.to_string(), - } - } + fn from(value: ChannelEnd) -> Self { + RawChannel { + state: value.state as i32, + ordering: value.ordering as i32, + counterparty: Some(value.counterparty().clone().into()), + connection_hops: value.connection_hops.iter().map(|v| v.as_str().to_string()).collect(), + version: value.version.to_string(), + } + } } impl ChannelEnd { - /// Creates a new ChannelEnd in state Uninitialized and other fields parametrized. - pub fn new( - state: State, - ordering: Order, - remote: Counterparty, - connection_hops: Vec, - version: Version, - ) -> Self { - Self { - state, - ordering, - remote, - connection_hops, - version, - } - } - - /// Updates the ChannelEnd to assume a new State 's'. - pub fn set_state(&mut self, s: State) { - self.state = s; - } - - pub fn set_version(&mut self, v: Version) { - self.version = v; - } - - pub fn set_counterparty_channel_id(&mut self, c: ChannelId) { - self.remote.channel_id = Some(c); - } - - /// Returns `true` if this `ChannelEnd` is in state [`State::Open`]. - pub fn is_open(&self) -> bool { - self.state_matches(&State::Open) - } - - pub fn state(&self) -> &State { - &self.state - } - - pub fn ordering(&self) -> &Order { - &self.ordering - } - - pub fn counterparty(&self) -> &Counterparty { - &self.remote - } - - pub fn connection_hops(&self) -> &Vec { - &self.connection_hops - } - - pub fn version(&self) -> &Version { - &self.version - } - - pub fn validate_basic(&self) -> Result<(), Error> { - if self.connection_hops.len() != 1 { - return Err(Error::invalid_connection_hops_length( - 1, - self.connection_hops.len(), - )); - } - self.counterparty().validate_basic() - } - - /// Helper function to compare the state of this end with another state. - pub fn state_matches(&self, other: &State) -> bool { - self.state.eq(other) - } - - /// Helper function to compare the order of this end with another order. - pub fn order_matches(&self, other: &Order) -> bool { - self.ordering.eq(other) - } - - #[allow(clippy::ptr_arg)] - pub fn connection_hops_matches(&self, other: &Vec) -> bool { - self.connection_hops.eq(other) - } - - pub fn counterparty_matches(&self, other: &Counterparty) -> bool { - self.counterparty().eq(other) - } - - pub fn version_matches(&self, other: &Version) -> bool { - self.version().eq(other) - } + /// Creates a new ChannelEnd in state Uninitialized and other fields parametrized. + pub fn new( + state: State, + ordering: Order, + remote: Counterparty, + connection_hops: Vec, + version: Version, + ) -> Self { + Self { state, ordering, remote, connection_hops, version } + } + + /// Updates the ChannelEnd to assume a new State 's'. + pub fn set_state(&mut self, s: State) { + self.state = s; + } + + pub fn set_version(&mut self, v: Version) { + self.version = v; + } + + pub fn set_counterparty_channel_id(&mut self, c: ChannelId) { + self.remote.channel_id = Some(c); + } + + /// Returns `true` if this `ChannelEnd` is in state [`State::Open`]. + pub fn is_open(&self) -> bool { + self.state_matches(&State::Open) + } + + pub fn state(&self) -> &State { + &self.state + } + + pub fn ordering(&self) -> &Order { + &self.ordering + } + + pub fn counterparty(&self) -> &Counterparty { + &self.remote + } + + pub fn connection_hops(&self) -> &Vec { + &self.connection_hops + } + + pub fn version(&self) -> &Version { + &self.version + } + + pub fn validate_basic(&self) -> Result<(), Error> { + if self.connection_hops.len() != 1 { + return Err(Error::invalid_connection_hops_length(1, self.connection_hops.len())) + } + self.counterparty().validate_basic() + } + + /// Helper function to compare the state of this end with another state. + pub fn state_matches(&self, other: &State) -> bool { + self.state.eq(other) + } + + /// Helper function to compare the order of this end with another order. + pub fn order_matches(&self, other: &Order) -> bool { + self.ordering.eq(other) + } + + #[allow(clippy::ptr_arg)] + pub fn connection_hops_matches(&self, other: &Vec) -> bool { + self.connection_hops.eq(other) + } + + pub fn counterparty_matches(&self, other: &Counterparty) -> bool { + self.counterparty().eq(other) + } + + pub fn version_matches(&self, other: &Version) -> bool { + self.version().eq(other) + } } #[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize)] pub struct Counterparty { - pub port_id: PortId, - pub channel_id: Option, + pub port_id: PortId, + pub channel_id: Option, } impl Counterparty { - pub fn new(port_id: PortId, channel_id: Option) -> Self { - Self { - port_id, - channel_id, - } - } - - pub fn port_id(&self) -> &PortId { - &self.port_id - } - - pub fn channel_id(&self) -> Option<&ChannelId> { - self.channel_id.as_ref() - } - - pub fn validate_basic(&self) -> Result<(), Error> { - Ok(()) - } + pub fn new(port_id: PortId, channel_id: Option) -> Self { + Self { port_id, channel_id } + } + + pub fn port_id(&self) -> &PortId { + &self.port_id + } + + pub fn channel_id(&self) -> Option<&ChannelId> { + self.channel_id.as_ref() + } + + pub fn validate_basic(&self) -> Result<(), Error> { + Ok(()) + } } impl Protobuf for Counterparty {} impl TryFrom for Counterparty { - type Error = Error; - - fn try_from(value: RawCounterparty) -> Result { - let channel_id = Some(value.channel_id) - .filter(|x| !x.is_empty()) - .map(|v| FromStr::from_str(v.as_str())) - .transpose() - .map_err(Error::identifier)?; - Ok(Counterparty::new( - value.port_id.parse().map_err(Error::identifier)?, - channel_id, - )) - } + type Error = Error; + + fn try_from(value: RawCounterparty) -> Result { + let channel_id = Some(value.channel_id) + .filter(|x| !x.is_empty()) + .map(|v| FromStr::from_str(v.as_str())) + .transpose() + .map_err(Error::identifier)?; + Ok(Counterparty::new(value.port_id.parse().map_err(Error::identifier)?, channel_id)) + } } impl From for RawCounterparty { - fn from(value: Counterparty) -> Self { - RawCounterparty { - port_id: value.port_id.as_str().to_string(), - channel_id: value - .channel_id - .map_or_else(|| "".to_string(), |v| v.to_string()), - } - } + fn from(value: Counterparty) -> Self { + RawCounterparty { + port_id: value.port_id.as_str().to_string(), + channel_id: value.channel_id.map_or_else(|| "".to_string(), |v| v.to_string()), + } + } } #[derive(Clone, Copy, Debug, PartialEq, Eq, Deserialize, Serialize)] pub enum Order { - None = 0, - Unordered = 1, - Ordered = 2, + None = 0, + Unordered = 1, + Ordered = 2, } impl Default for Order { - fn default() -> Self { - Order::Unordered - } + fn default() -> Self { + Order::Unordered + } } impl fmt::Display for Order { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}", self.as_str()) - } + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.as_str()) + } } impl Order { - /// Yields the Order as a string - pub fn as_str(&self) -> &'static str { - match self { - Self::None => "UNINITIALIZED", - Self::Unordered => "ORDER_UNORDERED", - Self::Ordered => "ORDER_ORDERED", - } - } - - // Parses the Order out from a i32. - pub fn from_i32(nr: i32) -> Result { - match nr { - 0 => Ok(Self::None), - 1 => Ok(Self::Unordered), - 2 => Ok(Self::Ordered), - _ => Err(Error::unknown_order_type(nr.to_string())), - } - } + /// Yields the Order as a string + pub fn as_str(&self) -> &'static str { + match self { + Self::None => "UNINITIALIZED", + Self::Unordered => "ORDER_UNORDERED", + Self::Ordered => "ORDER_ORDERED", + } + } + + // Parses the Order out from a i32. + pub fn from_i32(nr: i32) -> Result { + match nr { + 0 => Ok(Self::None), + 1 => Ok(Self::Unordered), + 2 => Ok(Self::Ordered), + _ => Err(Error::unknown_order_type(nr.to_string())), + } + } } impl FromStr for Order { - type Err = Error; - - fn from_str(s: &str) -> Result { - match s.to_lowercase().trim_start_matches("order_") { - "uninitialized" => Ok(Self::None), - "unordered" => Ok(Self::Unordered), - "ordered" => Ok(Self::Ordered), - _ => Err(Error::unknown_order_type(s.to_string())), - } - } + type Err = Error; + + fn from_str(s: &str) -> Result { + match s.to_lowercase().trim_start_matches("order_") { + "uninitialized" => Ok(Self::None), + "unordered" => Ok(Self::Unordered), + "ordered" => Ok(Self::Ordered), + _ => Err(Error::unknown_order_type(s.to_string())), + } + } } #[derive(Copy, Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] pub enum State { - Uninitialized = 0, - Init = 1, - TryOpen = 2, - Open = 3, - Closed = 4, + Uninitialized = 0, + Init = 1, + TryOpen = 2, + Open = 3, + Closed = 4, } impl State { - /// Yields the state as a string - pub fn as_string(&self) -> &'static str { - match self { - Self::Uninitialized => "UNINITIALIZED", - Self::Init => "INIT", - Self::TryOpen => "TRYOPEN", - Self::Open => "OPEN", - Self::Closed => "CLOSED", - } - } - - // Parses the State out from a i32. - pub fn from_i32(s: i32) -> Result { - match s { - 0 => Ok(Self::Uninitialized), - 1 => Ok(Self::Init), - 2 => Ok(Self::TryOpen), - 3 => Ok(Self::Open), - 4 => Ok(Self::Closed), - _ => Err(Error::unknown_state(s)), - } - } - - /// Returns whether or not this channel state is `Open`. - pub fn is_open(self) -> bool { - self == State::Open - } - - /// Returns whether or not the channel with this state - /// has progressed less or the same than the argument. - /// - /// # Example - /// ```rust,ignore - /// assert!(State::Init.less_or_equal_progress(State::Open)); - /// assert!(State::TryOpen.less_or_equal_progress(State::TryOpen)); - /// assert!(!State::Closed.less_or_equal_progress(State::Open)); - /// ``` - pub fn less_or_equal_progress(self, other: Self) -> bool { - self as u32 <= other as u32 - } + /// Yields the state as a string + pub fn as_string(&self) -> &'static str { + match self { + Self::Uninitialized => "UNINITIALIZED", + Self::Init => "INIT", + Self::TryOpen => "TRYOPEN", + Self::Open => "OPEN", + Self::Closed => "CLOSED", + } + } + + // Parses the State out from a i32. + pub fn from_i32(s: i32) -> Result { + match s { + 0 => Ok(Self::Uninitialized), + 1 => Ok(Self::Init), + 2 => Ok(Self::TryOpen), + 3 => Ok(Self::Open), + 4 => Ok(Self::Closed), + _ => Err(Error::unknown_state(s)), + } + } + + /// Returns whether or not this channel state is `Open`. + pub fn is_open(self) -> bool { + self == State::Open + } + + /// Returns whether or not the channel with this state + /// has progressed less or the same than the argument. + /// + /// # Example + /// ```rust,ignore + /// assert!(State::Init.less_or_equal_progress(State::Open)); + /// assert!(State::TryOpen.less_or_equal_progress(State::TryOpen)); + /// assert!(!State::Closed.less_or_equal_progress(State::Open)); + /// ``` + pub fn less_or_equal_progress(self, other: Self) -> bool { + self as u32 <= other as u32 + } } /// Provides a `to_string` method. impl core::fmt::Display for State { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { - write!(f, "{}", self.as_string()) - } + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { + write!(f, "{}", self.as_string()) + } } /// Used to query a packet event, identified by `event_id`, for specific channel and sequences. /// The query is preformed for the chain context at `height`. #[derive(Clone, Debug)] pub struct QueryPacketEventDataRequest { - pub event_id: WithBlockDataType, - pub source_channel_id: ChannelId, - pub source_port_id: PortId, - pub destination_channel_id: ChannelId, - pub destination_port_id: PortId, - pub sequences: Vec, - pub height: Height, + pub event_id: WithBlockDataType, + pub source_channel_id: ChannelId, + pub source_port_id: PortId, + pub destination_channel_id: ChannelId, + pub destination_port_id: PortId, + pub sequences: Vec, + pub height: Height, } #[cfg(test)] pub mod test_util { - use crate::core::ics24_host::identifier::{ChannelId, ConnectionId, PortId}; - use crate::prelude::*; - use ibc_proto::ibc::core::channel::v1::Channel as RawChannel; - use ibc_proto::ibc::core::channel::v1::Counterparty as RawCounterparty; - - /// Returns a dummy `RawCounterparty`, for testing only! - /// Can be optionally parametrized with a specific channel identifier. - pub fn get_dummy_raw_counterparty() -> RawCounterparty { - RawCounterparty { - port_id: PortId::default().to_string(), - channel_id: ChannelId::default().to_string(), - } - } - - /// Returns a dummy `RawChannel`, for testing only! - pub fn get_dummy_raw_channel_end() -> RawChannel { - RawChannel { - state: 1, - ordering: 1, - counterparty: Some(get_dummy_raw_counterparty()), - connection_hops: vec![ConnectionId::default().to_string()], - version: "ics20".to_string(), // The version is not validated. - } - } + use crate::{ + core::ics24_host::identifier::{ChannelId, ConnectionId, PortId}, + prelude::*, + }; + use ibc_proto::ibc::core::channel::v1::{ + Channel as RawChannel, Counterparty as RawCounterparty, + }; + + /// Returns a dummy `RawCounterparty`, for testing only! + /// Can be optionally parametrized with a specific channel identifier. + pub fn get_dummy_raw_counterparty() -> RawCounterparty { + RawCounterparty { + port_id: PortId::default().to_string(), + channel_id: ChannelId::default().to_string(), + } + } + + /// Returns a dummy `RawChannel`, for testing only! + pub fn get_dummy_raw_channel_end() -> RawChannel { + RawChannel { + state: 1, + ordering: 1, + counterparty: Some(get_dummy_raw_counterparty()), + connection_hops: vec![ConnectionId::default().to_string()], + version: "ics20".to_string(), // The version is not validated. + } + } } #[cfg(test)] mod tests { - use crate::prelude::*; - - use core::str::FromStr; - use test_log::test; - - use ibc_proto::ibc::core::channel::v1::Channel as RawChannel; - - use crate::core::ics04_channel::channel::test_util::get_dummy_raw_channel_end; - use crate::core::ics04_channel::channel::ChannelEnd; - - #[test] - fn channel_end_try_from_raw() { - let raw_channel_end = get_dummy_raw_channel_end(); - - let empty_raw_channel_end = RawChannel { - counterparty: None, - ..raw_channel_end.clone() - }; - - struct Test { - name: String, - params: RawChannel, - want_pass: bool, - } - - let tests: Vec = vec![ - Test { - name: "Raw channel end with missing counterparty".to_string(), - params: empty_raw_channel_end, - want_pass: false, - }, - Test { - name: "Raw channel end with incorrect state".to_string(), - params: RawChannel { - state: -1, - ..raw_channel_end.clone() - }, - want_pass: false, - }, - Test { - name: "Raw channel end with incorrect ordering".to_string(), - params: RawChannel { - ordering: -1, - ..raw_channel_end.clone() - }, - want_pass: false, - }, - Test { - name: "Raw channel end with incorrect connection id in connection hops".to_string(), - params: RawChannel { - connection_hops: vec!["connection*".to_string()].into_iter().collect(), - ..raw_channel_end.clone() - }, - want_pass: false, - }, - Test { - name: "Raw channel end with incorrect connection id (has blank space)".to_string(), - params: RawChannel { - connection_hops: vec!["con nection".to_string()].into_iter().collect(), - ..raw_channel_end.clone() - }, - want_pass: false, - }, - Test { - name: "Raw channel end with two correct connection ids in connection hops" - .to_string(), - params: RawChannel { - connection_hops: vec!["connection1".to_string(), "connection2".to_string()] - .into_iter() - .collect(), - ..raw_channel_end.clone() - }, - want_pass: true, - }, - Test { - name: "Raw channel end with correct params".to_string(), - params: raw_channel_end, - want_pass: true, - }, - ] - .into_iter() - .collect(); - - for test in tests { - let p = test.params.clone(); - - let ce_result = ChannelEnd::try_from(p); - - assert_eq!( - test.want_pass, - ce_result.is_ok(), - "ChannelEnd::try_from() failed for test {}, \nmsg{:?} with error {:?}", - test.name, - test.params.clone(), - ce_result.err(), - ); - } - } - - #[test] - fn parse_channel_ordering_type() { - use super::Order; - - struct Test { - ordering: &'static str, - want_res: Order, - want_err: bool, - } - let tests: Vec = vec![ - Test { - ordering: "UNINITIALIZED", - want_res: Order::None, - want_err: false, - }, - Test { - ordering: "UNORDERED", - want_res: Order::Unordered, - want_err: false, - }, - Test { - ordering: "ORDERED", - want_res: Order::Ordered, - want_err: false, - }, - Test { - ordering: "UNKNOWN_ORDER", - want_res: Order::None, - want_err: true, - }, - ] - .into_iter() - .collect(); - - for test in tests { - match Order::from_str(test.ordering) { - Ok(res) => { - assert!(!test.want_err); - assert_eq!(test.want_res, res); - } - Err(_) => assert!(test.want_err, "parse failed"), - } - } - } + use crate::prelude::*; + + use core::str::FromStr; + use test_log::test; + + use ibc_proto::ibc::core::channel::v1::Channel as RawChannel; + + use crate::core::ics04_channel::channel::{test_util::get_dummy_raw_channel_end, ChannelEnd}; + + #[test] + fn channel_end_try_from_raw() { + let raw_channel_end = get_dummy_raw_channel_end(); + + let empty_raw_channel_end = RawChannel { counterparty: None, ..raw_channel_end.clone() }; + + struct Test { + name: String, + params: RawChannel, + want_pass: bool, + } + + let tests: Vec = vec![ + Test { + name: "Raw channel end with missing counterparty".to_string(), + params: empty_raw_channel_end, + want_pass: false, + }, + Test { + name: "Raw channel end with incorrect state".to_string(), + params: RawChannel { state: -1, ..raw_channel_end.clone() }, + want_pass: false, + }, + Test { + name: "Raw channel end with incorrect ordering".to_string(), + params: RawChannel { ordering: -1, ..raw_channel_end.clone() }, + want_pass: false, + }, + Test { + name: "Raw channel end with incorrect connection id in connection hops".to_string(), + params: RawChannel { + connection_hops: vec!["connection*".to_string()].into_iter().collect(), + ..raw_channel_end.clone() + }, + want_pass: false, + }, + Test { + name: "Raw channel end with incorrect connection id (has blank space)".to_string(), + params: RawChannel { + connection_hops: vec!["con nection".to_string()].into_iter().collect(), + ..raw_channel_end.clone() + }, + want_pass: false, + }, + Test { + name: "Raw channel end with two correct connection ids in connection hops" + .to_string(), + params: RawChannel { + connection_hops: vec!["connection1".to_string(), "connection2".to_string()] + .into_iter() + .collect(), + ..raw_channel_end.clone() + }, + want_pass: true, + }, + Test { + name: "Raw channel end with correct params".to_string(), + params: raw_channel_end, + want_pass: true, + }, + ] + .into_iter() + .collect(); + + for test in tests { + let p = test.params.clone(); + + let ce_result = ChannelEnd::try_from(p); + + assert_eq!( + test.want_pass, + ce_result.is_ok(), + "ChannelEnd::try_from() failed for test {}, \nmsg{:?} with error {:?}", + test.name, + test.params.clone(), + ce_result.err(), + ); + } + } + + #[test] + fn parse_channel_ordering_type() { + use super::Order; + + struct Test { + ordering: &'static str, + want_res: Order, + want_err: bool, + } + let tests: Vec = vec![ + Test { ordering: "UNINITIALIZED", want_res: Order::None, want_err: false }, + Test { ordering: "UNORDERED", want_res: Order::Unordered, want_err: false }, + Test { ordering: "ORDERED", want_res: Order::Ordered, want_err: false }, + Test { ordering: "UNKNOWN_ORDER", want_res: Order::None, want_err: true }, + ] + .into_iter() + .collect(); + + for test in tests { + match Order::from_str(test.ordering) { + Ok(res) => { + assert!(!test.want_err); + assert_eq!(test.want_res, res); + }, + Err(_) => assert!(test.want_err, "parse failed"), + } + } + } } diff --git a/modules/src/core/ics04_channel/commitment.rs b/modules/src/core/ics04_channel/commitment.rs index 100a989627..7798ca9419 100644 --- a/modules/src/core/ics04_channel/commitment.rs +++ b/modules/src/core/ics04_channel/commitment.rs @@ -7,15 +7,15 @@ use serde_derive::{Deserialize, Serialize}; pub struct PacketCommitment(Vec); impl PacketCommitment { - pub fn into_vec(self) -> Vec { - self.0 - } + pub fn into_vec(self) -> Vec { + self.0 + } } impl From> for PacketCommitment { - fn from(bytes: Vec) -> Self { - Self(bytes) - } + fn from(bytes: Vec) -> Self { + Self(bytes) + } } /// Acknowledgement commitment to be stored @@ -23,13 +23,13 @@ impl From> for PacketCommitment { pub struct AcknowledgementCommitment(Vec); impl AcknowledgementCommitment { - pub fn into_vec(self) -> Vec { - self.0 - } + pub fn into_vec(self) -> Vec { + self.0 + } } impl From> for AcknowledgementCommitment { - fn from(bytes: Vec) -> Self { - Self(bytes) - } + fn from(bytes: Vec) -> Self { + Self(bytes) + } } diff --git a/modules/src/core/ics04_channel/context.rs b/modules/src/core/ics04_channel/context.rs index 8767aca0e5..3a28b88320 100644 --- a/modules/src/core/ics04_channel/context.rs +++ b/modules/src/core/ics04_channel/context.rs @@ -1,279 +1,272 @@ //! ICS4 (channel) context. The two traits `ChannelReader ` and `ChannelKeeper` define //! the interface that any host chain must implement to be able to process any `ChannelMsg`. -//! use core::time::Duration; use num_traits::float::FloatCore; -use crate::core::ics04_channel::channel::ChannelEnd; -use crate::core::ics04_channel::commitment::{AcknowledgementCommitment, PacketCommitment}; -use crate::core::ics04_channel::handler::recv_packet::RecvPacketResult; -use crate::core::ics04_channel::handler::{ChannelIdState, ChannelResult}; -use crate::core::ics04_channel::msgs::acknowledgement::Acknowledgement; -use crate::core::ics04_channel::{error::Error, packet::Receipt}; -use crate::core::ics24_host::identifier::{ChannelId, ClientId, ConnectionId, PortId}; -use crate::prelude::*; -use crate::timestamp::Timestamp; -use crate::Height; +use crate::{ + core::{ + ics04_channel::{ + channel::ChannelEnd, + commitment::{AcknowledgementCommitment, PacketCommitment}, + error::Error, + handler::{recv_packet::RecvPacketResult, ChannelIdState, ChannelResult}, + msgs::acknowledgement::Acknowledgement, + packet::Receipt, + }, + ics24_host::identifier::{ChannelId, ClientId, ConnectionId, PortId}, + }, + prelude::*, + timestamp::Timestamp, + Height, +}; use super::packet::{Packet, PacketResult, Sequence}; /// A context supplying all the necessary read-only dependencies for processing any `ChannelMsg`. pub trait ChannelReader { - /// Returns the ChannelEnd for the given `port_id` and `chan_id`. - fn channel_end(&self, port_channel_id: &(PortId, ChannelId)) -> Result; - - fn connection_channels(&self, cid: &ConnectionId) -> Result, Error>; - - fn get_next_sequence_send( - &self, - port_channel_id: &(PortId, ChannelId), - ) -> Result; - - fn get_next_sequence_recv( - &self, - port_channel_id: &(PortId, ChannelId), - ) -> Result; - - fn get_next_sequence_ack( - &self, - port_channel_id: &(PortId, ChannelId), - ) -> Result; - - fn get_packet_commitment( - &self, - key: &(PortId, ChannelId, Sequence), - ) -> Result; - - fn get_packet_receipt(&self, key: &(PortId, ChannelId, Sequence)) -> Result; - - fn get_packet_acknowledgement( - &self, - key: &(PortId, ChannelId, Sequence), - ) -> Result; - - fn packet_commitment( - &self, - packet_data: Vec, - timeout_height: Height, - timeout_timestamp: Timestamp, - ) -> PacketCommitment { - let mut input = timeout_timestamp.nanoseconds().to_be_bytes().to_vec(); - let revision_number = timeout_height.revision_number.to_be_bytes(); - input.append(&mut revision_number.to_vec()); - let revision_height = timeout_height.revision_height.to_be_bytes(); - input.append(&mut revision_height.to_vec()); - let data = self.hash(packet_data); - input.append(&mut data.to_vec()); - self.hash(input).into() - } - - fn ack_commitment(&self, ack: Acknowledgement) -> AcknowledgementCommitment { - self.hash(ack.into_bytes()).into() - } - - /// A Sha2_256 hashing function - fn hash(&self, value: Vec) -> Vec; - - /// Returns the time when the client state for the given [`ClientId`] was updated with a header for the given [`Height`] - fn client_update_time(&self, client_id: &ClientId, height: Height) -> Result; - - /// Returns the height when the client state for the given [`ClientId`] was updated with a header for the given [`Height`] - fn client_update_height(&self, client_id: &ClientId, height: Height) -> Result; - - /// Returns a counter on the number of channel ids have been created thus far. - /// The value of this counter should increase only via method - /// `ChannelKeeper::increase_channel_counter`. - fn channel_counter(&self) -> Result; - - /// Returns the maximum expected time per block - fn max_expected_time_per_block(&self) -> Duration; - - /// Calculates the block delay period using the connection's delay period and the maximum - /// expected time per block. - fn block_delay(&self, delay_period_time: Duration) -> u64 { - calculate_block_delay(delay_period_time, self.max_expected_time_per_block()) - } + /// Returns the ChannelEnd for the given `port_id` and `chan_id`. + fn channel_end(&self, port_channel_id: &(PortId, ChannelId)) -> Result; + + fn connection_channels(&self, cid: &ConnectionId) -> Result, Error>; + + fn get_next_sequence_send( + &self, + port_channel_id: &(PortId, ChannelId), + ) -> Result; + + fn get_next_sequence_recv( + &self, + port_channel_id: &(PortId, ChannelId), + ) -> Result; + + fn get_next_sequence_ack( + &self, + port_channel_id: &(PortId, ChannelId), + ) -> Result; + + fn get_packet_commitment( + &self, + key: &(PortId, ChannelId, Sequence), + ) -> Result; + + fn get_packet_receipt(&self, key: &(PortId, ChannelId, Sequence)) -> Result; + + fn get_packet_acknowledgement( + &self, + key: &(PortId, ChannelId, Sequence), + ) -> Result; + + fn packet_commitment( + &self, + packet_data: Vec, + timeout_height: Height, + timeout_timestamp: Timestamp, + ) -> PacketCommitment { + let mut input = timeout_timestamp.nanoseconds().to_be_bytes().to_vec(); + let revision_number = timeout_height.revision_number.to_be_bytes(); + input.append(&mut revision_number.to_vec()); + let revision_height = timeout_height.revision_height.to_be_bytes(); + input.append(&mut revision_height.to_vec()); + let data = self.hash(packet_data); + input.append(&mut data.to_vec()); + self.hash(input).into() + } + + fn ack_commitment(&self, ack: Acknowledgement) -> AcknowledgementCommitment { + self.hash(ack.into_bytes()).into() + } + + /// A Sha2_256 hashing function + fn hash(&self, value: Vec) -> Vec; + + /// Returns the time when the client state for the given [`ClientId`] was updated with a header + /// for the given [`Height`] + fn client_update_time(&self, client_id: &ClientId, height: Height) -> Result; + + /// Returns the height when the client state for the given [`ClientId`] was updated with a + /// header for the given [`Height`] + fn client_update_height(&self, client_id: &ClientId, height: Height) -> Result; + + /// Returns a counter on the number of channel ids have been created thus far. + /// The value of this counter should increase only via method + /// `ChannelKeeper::increase_channel_counter`. + fn channel_counter(&self) -> Result; + + /// Returns the maximum expected time per block + fn max_expected_time_per_block(&self) -> Duration; + + /// Calculates the block delay period using the connection's delay period and the maximum + /// expected time per block. + fn block_delay(&self, delay_period_time: Duration) -> u64 { + calculate_block_delay(delay_period_time, self.max_expected_time_per_block()) + } } /// A context supplying all the necessary write-only dependencies (i.e., storage writing facility) /// for processing any `ChannelMsg`. pub trait ChannelKeeper { - fn store_channel_result(&mut self, result: ChannelResult) -> Result<(), Error> { - // The handler processed this channel & some modifications occurred, store the new end. - self.store_channel( - (result.port_id.clone(), result.channel_id), - &result.channel_end, - )?; - - // The channel identifier was freshly brewed. - // Increase counter & initialize seq. nrs. - if matches!(result.channel_id_state, ChannelIdState::Generated) { - self.increase_channel_counter(); - - // Associate also the channel end to its connection. - self.store_connection_channels( - result.channel_end.connection_hops()[0].clone(), - &(result.port_id.clone(), result.channel_id), - )?; - - // Initialize send, recv, and ack sequence numbers. - self.store_next_sequence_send((result.port_id.clone(), result.channel_id), 1.into())?; - self.store_next_sequence_recv((result.port_id.clone(), result.channel_id), 1.into())?; - self.store_next_sequence_ack((result.port_id, result.channel_id), 1.into())?; - } - - Ok(()) - } - - fn store_packet_result(&mut self, general_result: PacketResult) -> Result<(), Error> { - match general_result { - PacketResult::Send(res) => { - self.store_next_sequence_send( - (res.port_id.clone(), res.channel_id), - res.seq_number, - )?; - - self.store_packet_commitment( - (res.port_id.clone(), res.channel_id, res.seq), - res.commitment, - )?; - - self.store_send_packet((res.port_id.clone(), res.channel_id, res.seq), res.packet)?; - } - PacketResult::Recv(res) => match res { - RecvPacketResult::Ordered { - port_id, - channel_id, - next_seq_recv, - packet, - } => { - self.store_next_sequence_recv((port_id.clone(), channel_id), next_seq_recv)?; - self.store_recv_packet((port_id, channel_id, packet.sequence), packet)? - } - RecvPacketResult::Unordered { - port_id, - channel_id, - sequence, - receipt, - packet, - } => { - self.store_packet_receipt((port_id.clone(), channel_id, sequence), receipt)?; - self.store_recv_packet((port_id, channel_id, packet.sequence), packet)? - } - - RecvPacketResult::NoOp => unreachable!(), - }, - PacketResult::WriteAck(res) => { - self.store_packet_acknowledgement( - (res.port_id.clone(), res.channel_id, res.seq), - res.ack_commitment, - )?; - } - PacketResult::Ack(res) => { - if let Some(s) = res.seq_number { - //Ordered Channel - self.store_next_sequence_ack((res.port_id.clone(), res.channel_id), s)?; - } - - // Delete packet commitment since packet has been aknowledged - self.delete_packet_commitment((res.port_id.clone(), res.channel_id, res.seq))?; - } - PacketResult::Timeout(res) => { - if let Some(c) = res.channel { - //Ordered Channel - self.store_channel((res.port_id.clone(), res.channel_id), &c)?; - } - self.delete_packet_commitment((res.port_id.clone(), res.channel_id, res.seq))?; - } - } - Ok(()) - } - - fn store_packet_commitment( - &mut self, - key: (PortId, ChannelId, Sequence), - commitment: PacketCommitment, - ) -> Result<(), Error>; - - /// Allow implementers to optionally store send packets in storage - fn store_send_packet( - &mut self, - key: (PortId, ChannelId, Sequence), - packet: Packet, - ) -> Result<(), Error>; - - /// Allow implementers to optionally store received packets in storage - fn store_recv_packet( - &mut self, - key: (PortId, ChannelId, Sequence), - packet: Packet, - ) -> Result<(), Error>; - - fn delete_packet_commitment(&mut self, key: (PortId, ChannelId, Sequence)) - -> Result<(), Error>; - - fn store_packet_receipt( - &mut self, - key: (PortId, ChannelId, Sequence), - receipt: Receipt, - ) -> Result<(), Error>; - - fn store_packet_acknowledgement( - &mut self, - key: (PortId, ChannelId, Sequence), - ack_commitment: AcknowledgementCommitment, - ) -> Result<(), Error>; - - fn delete_packet_acknowledgement( - &mut self, - key: (PortId, ChannelId, Sequence), - ) -> Result<(), Error>; - - fn store_connection_channels( - &mut self, - conn_id: ConnectionId, - port_channel_id: &(PortId, ChannelId), - ) -> Result<(), Error>; - - /// Stores the given channel_end at a path associated with the port_id and channel_id. - fn store_channel( - &mut self, - port_channel_id: (PortId, ChannelId), - channel_end: &ChannelEnd, - ) -> Result<(), Error>; - - fn store_next_sequence_send( - &mut self, - port_channel_id: (PortId, ChannelId), - seq: Sequence, - ) -> Result<(), Error>; - - fn store_next_sequence_recv( - &mut self, - port_channel_id: (PortId, ChannelId), - seq: Sequence, - ) -> Result<(), Error>; - - fn store_next_sequence_ack( - &mut self, - port_channel_id: (PortId, ChannelId), - seq: Sequence, - ) -> Result<(), Error>; - - /// Called upon channel identifier creation (Init or Try message processing). - /// Increases the counter which keeps track of how many channels have been created. - /// Should never fail. - fn increase_channel_counter(&mut self); + fn store_channel_result(&mut self, result: ChannelResult) -> Result<(), Error> { + // The handler processed this channel & some modifications occurred, store the new end. + self.store_channel((result.port_id.clone(), result.channel_id), &result.channel_end)?; + + // The channel identifier was freshly brewed. + // Increase counter & initialize seq. nrs. + if matches!(result.channel_id_state, ChannelIdState::Generated) { + self.increase_channel_counter(); + + // Associate also the channel end to its connection. + self.store_connection_channels( + result.channel_end.connection_hops()[0].clone(), + &(result.port_id.clone(), result.channel_id), + )?; + + // Initialize send, recv, and ack sequence numbers. + self.store_next_sequence_send((result.port_id.clone(), result.channel_id), 1.into())?; + self.store_next_sequence_recv((result.port_id.clone(), result.channel_id), 1.into())?; + self.store_next_sequence_ack((result.port_id, result.channel_id), 1.into())?; + } + + Ok(()) + } + + fn store_packet_result(&mut self, general_result: PacketResult) -> Result<(), Error> { + match general_result { + PacketResult::Send(res) => { + self.store_next_sequence_send( + (res.port_id.clone(), res.channel_id), + res.seq_number, + )?; + + self.store_packet_commitment( + (res.port_id.clone(), res.channel_id, res.seq), + res.commitment, + )?; + + self.store_send_packet((res.port_id.clone(), res.channel_id, res.seq), res.packet)?; + }, + PacketResult::Recv(res) => match res { + RecvPacketResult::Ordered { port_id, channel_id, next_seq_recv, packet } => { + self.store_next_sequence_recv((port_id.clone(), channel_id), next_seq_recv)?; + self.store_recv_packet((port_id, channel_id, packet.sequence), packet)? + }, + RecvPacketResult::Unordered { port_id, channel_id, sequence, receipt, packet } => { + self.store_packet_receipt((port_id.clone(), channel_id, sequence), receipt)?; + self.store_recv_packet((port_id, channel_id, packet.sequence), packet)? + }, + + RecvPacketResult::NoOp => unreachable!(), + }, + PacketResult::WriteAck(res) => { + self.store_packet_acknowledgement( + (res.port_id.clone(), res.channel_id, res.seq), + res.ack_commitment, + )?; + }, + PacketResult::Ack(res) => { + if let Some(s) = res.seq_number { + //Ordered Channel + self.store_next_sequence_ack((res.port_id.clone(), res.channel_id), s)?; + } + + // Delete packet commitment since packet has been aknowledged + self.delete_packet_commitment((res.port_id.clone(), res.channel_id, res.seq))?; + }, + PacketResult::Timeout(res) => { + if let Some(c) = res.channel { + //Ordered Channel + self.store_channel((res.port_id.clone(), res.channel_id), &c)?; + } + self.delete_packet_commitment((res.port_id.clone(), res.channel_id, res.seq))?; + }, + } + Ok(()) + } + + fn store_packet_commitment( + &mut self, + key: (PortId, ChannelId, Sequence), + commitment: PacketCommitment, + ) -> Result<(), Error>; + + /// Allow implementers to optionally store send packets in storage + fn store_send_packet( + &mut self, + key: (PortId, ChannelId, Sequence), + packet: Packet, + ) -> Result<(), Error>; + + /// Allow implementers to optionally store received packets in storage + fn store_recv_packet( + &mut self, + key: (PortId, ChannelId, Sequence), + packet: Packet, + ) -> Result<(), Error>; + + fn delete_packet_commitment(&mut self, key: (PortId, ChannelId, Sequence)) + -> Result<(), Error>; + + fn store_packet_receipt( + &mut self, + key: (PortId, ChannelId, Sequence), + receipt: Receipt, + ) -> Result<(), Error>; + + fn store_packet_acknowledgement( + &mut self, + key: (PortId, ChannelId, Sequence), + ack_commitment: AcknowledgementCommitment, + ) -> Result<(), Error>; + + fn delete_packet_acknowledgement( + &mut self, + key: (PortId, ChannelId, Sequence), + ) -> Result<(), Error>; + + fn store_connection_channels( + &mut self, + conn_id: ConnectionId, + port_channel_id: &(PortId, ChannelId), + ) -> Result<(), Error>; + + /// Stores the given channel_end at a path associated with the port_id and channel_id. + fn store_channel( + &mut self, + port_channel_id: (PortId, ChannelId), + channel_end: &ChannelEnd, + ) -> Result<(), Error>; + + fn store_next_sequence_send( + &mut self, + port_channel_id: (PortId, ChannelId), + seq: Sequence, + ) -> Result<(), Error>; + + fn store_next_sequence_recv( + &mut self, + port_channel_id: (PortId, ChannelId), + seq: Sequence, + ) -> Result<(), Error>; + + fn store_next_sequence_ack( + &mut self, + port_channel_id: (PortId, ChannelId), + seq: Sequence, + ) -> Result<(), Error>; + + /// Called upon channel identifier creation (Init or Try message processing). + /// Increases the counter which keeps track of how many channels have been created. + /// Should never fail. + fn increase_channel_counter(&mut self); } pub fn calculate_block_delay( - delay_period_time: Duration, - max_expected_time_per_block: Duration, + delay_period_time: Duration, + max_expected_time_per_block: Duration, ) -> u64 { - if max_expected_time_per_block.is_zero() { - return 0; - } + if max_expected_time_per_block.is_zero() { + return 0 + } - FloatCore::ceil(delay_period_time.as_secs_f64() / max_expected_time_per_block.as_secs_f64()) - as u64 + FloatCore::ceil(delay_period_time.as_secs_f64() / max_expected_time_per_block.as_secs_f64()) + as u64 } diff --git a/modules/src/core/ics04_channel/error.rs b/modules/src/core/ics04_channel/error.rs index 9a73862d1b..7b1c1bc3ee 100644 --- a/modules/src/core/ics04_channel/error.rs +++ b/modules/src/core/ics04_channel/error.rs @@ -1,375 +1,381 @@ use super::packet::Sequence; -use crate::core::ics02_client::error as client_error; -use crate::core::ics03_connection::error as connection_error; -use crate::core::ics04_channel::channel::State; -use crate::core::ics05_port::error as port_error; -use crate::core::ics24_host::error::ValidationError; -use crate::core::ics24_host::identifier::{ChannelId, ClientId, ConnectionId, PortId}; -use crate::prelude::*; -use crate::proofs::ProofError; -use crate::signer::SignerError; -use crate::timestamp::Timestamp; -use crate::Height; +use crate::{ + core::{ + ics02_client::error as client_error, + ics03_connection::error as connection_error, + ics04_channel::channel::State, + ics05_port::error as port_error, + ics24_host::{ + error::ValidationError, + identifier::{ChannelId, ClientId, ConnectionId, PortId}, + }, + }, + prelude::*, + proofs::ProofError, + signer::SignerError, + timestamp::Timestamp, + Height, +}; use flex_error::{define_error, TraceError}; use tendermint_proto::Error as TendermintError; define_error! { - #[derive(Debug, PartialEq, Eq)] - Error { - Ics03Connection - [ connection_error::Error ] - | _ | { "ics03 connection error" }, + #[derive(Debug, PartialEq, Eq)] + Error { + Ics03Connection + [ connection_error::Error ] + | _ | { "ics03 connection error" }, - Ics02Client - [ client_error::Error ] - | _ | { "ics02 client error" }, - - Ics05Port - [ port_error::Error ] - | _ | { "ics05 port error" }, - - UnknownState - { state: i32 } - | e | { format_args!("channel state unknown: {}", e.state) }, - - Identifier - [ ValidationError ] - | _ | { "identifier error" }, - - UnknownOrderType - { type_id: String } - | e | { format_args!("channel order type unknown: {}", e.type_id) }, - - InvalidConnectionHopsLength - { expected: usize, actual: usize } - | e | { - format_args!( - "invalid connection hops length: expected {0}; actual {1}", - e.expected, e.actual) - }, - - InvalidPacketCounterparty - { port_id: PortId, channel_id: ChannelId } - | e | { - format_args!( - "packet destination port {} and channel {} doesn't match the counterparty's port/channel", - e.port_id, e.channel_id) - }, - - InvalidVersion - [ TraceError ] - | _ | { "invalid version" }, - - Signer - [ SignerError ] - | _ | { "invalid signer address" }, - - InvalidProof - [ ProofError ] - | _ | { "invalid proof" }, - - MissingHeight - | _ | { "invalid proof: missing height" }, - - MissingChannelProof - | _ | { "invalid proof: missing channel proof" }, - - MissingNextRecvSeq - { port_channel_id: (PortId, ChannelId) } - | e | { - format_args!("Missing sequence number for receiving packets on port {0} and channel {1}", - e.port_channel_id.0, - e.port_channel_id.1) - }, - - ZeroPacketSequence - | _ | { "packet sequence cannot be 0" }, - - ZeroPacketData - | _ | { "packet data bytes cannot be empty" }, - - ZeroPacketTimeout - | _ | { "packet timeout height and packet timeout timestamp cannot both be 0" }, - - InvalidTimeoutHeight - | _ | { "invalid timeout height for the packet" }, - - InvalidPacket - | _ | { "invalid packet" }, - - MissingPacket - | _ | { "there is no packet in this message" }, - - MissingChannelId - | _ | { "missing channel id" }, - - MissingCounterparty - | _ | { "missing counterparty" }, - - NoCommonVersion - | _ | { "no commong version" }, - - MissingChannel - | _ | { "missing channel end" }, - - InvalidVersionLengthConnection - | _ | { "single version must be negociated on connection before opening channel" }, - - ChannelFeatureNotSuportedByConnection - | _ | { "the channel ordering is not supported by connection" }, - - ChannelNotFound - { port_id: PortId, channel_id: ChannelId } - | e | { - format_args!( - "the channel end ({0}, {1}) does not exist", - e.port_id, e.channel_id) - }, - - ChannelMismatch - { channel_id: ChannelId } - | e | { - format_args!( - "a different channel exists (was initialized) already for the same channel identifier {0}", - e.channel_id) - }, - - ConnectionNotOpen - { connection_id: ConnectionId } - | e | { - format_args!( - "the associated connection {0} is not OPEN", - e.connection_id) - }, - - UndefinedConnectionCounterparty - { connection_id: ConnectionId } - | e | { - format_args!( - "Undefined counterparty connection for {0}", - e.connection_id) - }, - - PacketVerificationFailed - { sequence: Sequence } - [ client_error::Error ] - | e | { - format_args!( - "Verification fails for the packet with the sequence number {0}", - e.sequence) - }, - - VerifyChannelFailed - [ client_error::Error ] - | _ | { - "Error verifying channel state" - }, - - InvalidAcknowledgement - | _ | { "Acknowledgment cannot be empty" }, - - AcknowledgementExists - { sequence: Sequence } - | e | { - format_args!( - "Packet acknowledgement exists for the packet with the sequence {0}", - e.sequence) - }, - - MissingNextSendSeq - { port_channel_id: (PortId, ChannelId) } - | e | { - format_args!("Missing sequence number for sending packets on port {0} and channel {1}", - e.port_channel_id.0, - e.port_channel_id.1) - }, - - InvalidStringAsSequence - { value: String } - [ TraceError ] - | e | { - format_args!( - "String {0} cannot be converted to packet sequence", - e.value) - }, - - InvalidPacketSequence - { - given_sequence: Sequence, - next_sequence: Sequence - } - | e | { - format_args!( - "Invalid packet sequence {0} ≠ next send sequence {1}", - e.given_sequence, e.next_sequence) - }, - - LowPacketHeight - { - chain_height: Height, - timeout_height: Height - } - | e | { - format_args!( - "Receiving chain block height {0} >= packet timeout height {1}", - e.chain_height, e.timeout_height) - }, - PacketTimeoutNotReached - { - timeout_height: Height, - chain_height: Height, - timeout_timestamp: Timestamp, - chain_timestamp: Timestamp, - } - | e | { format_args!( - "Packet timeout not satisified for either packet height or timestamp, Packet timeout height {0}, chain height {1}, Packet timeout timestamp {2}, chain timestamp {3}", - e.timeout_height, e.chain_height, e.timeout_timestamp, e.chain_timestamp) }, - - PacketTimeoutHeightNotReached - { - timeout_height: Height, - chain_height: Height, - } - | e | { - format_args!( - "Packet timeout height {0} > chain height {1}", - e.timeout_height, e.chain_height) - }, - - PacketTimeoutTimestampNotReached - { - timeout_timestamp: Timestamp, - chain_timestamp: Timestamp, - } - | e | { - format_args!( - "Packet timeout timestamp {0} > chain timestamp {1}", - e.timeout_timestamp, e.chain_timestamp) - }, - - LowPacketTimestamp - | _ | { "Receiving chain block timestamp >= packet timeout timestamp" }, - - InvalidPacketTimestamp - [ crate::timestamp::ParseTimestampError ] - | _ | { "Invalid packet timeout timestamp value" }, - - ErrorInvalidConsensusState - | _ | { "Invalid timestamp in consensus state; timestamp must be a positive value" }, - - FrozenClient - { client_id: ClientId } - | e | { - format_args!( - "Client with id {0} is frozen", - e.client_id) - }, - - InvalidCounterpartyChannelId - [ ValidationError ] - | _ | { "Invalid channel id in counterparty" }, - - InvalidChannelState - { channel_id: ChannelId, state: State } - | e | { - format_args!( - "Channel {0} should not be state {1}", - e.channel_id, e.state) - }, - - ChannelClosed - { channel_id: ChannelId } - | e | { - format_args!( - "Channel {0} is Closed", - e.channel_id) - }, - - ChanOpenAckProofVerification - | _ | { "Handshake proof verification fails at ChannelOpenAck" }, - - PacketCommitmentNotFound - { sequence: Sequence } - | e | { - format_args!( - "Commitment for the packet {0} not found", - e.sequence) - }, - - IncorrectPacketCommitment - { sequence: Sequence } - | e | { - format_args!( - "The stored commitment of the packet {0} is incorrect", - e.sequence) - }, - - PacketReceiptNotFound - { sequence: Sequence } - | e | { - format_args!( - "Receipt for the packet {0} not found", - e.sequence) - }, - - PacketAcknowledgementNotFound - { sequence: Sequence } - | e | { - format_args!( - "Acknowledgment for the packet {0} not found", - e.sequence) - }, - - MissingNextAckSeq - { port_channel_id: (PortId, ChannelId) } - | e | { - format_args!("Missing sequence number for ack packets on port {0} and channel {1}", - e.port_channel_id.0, - e.port_channel_id.1) - }, - - ProcessedTimeNotFound - { - client_id: ClientId, - height: Height, - } - | e | { - format_args!( - "Processed time for the client {0} at height {1} not found", - e.client_id, e.height) - }, - - ProcessedHeightNotFound - { - client_id: ClientId, - height: Height, - } - | e | { - format_args!( - "Processed height for the client {0} at height {1} not found", - e.client_id, e.height) - }, - - RouteNotFound - | _ | { "route not found" }, - - ImplementationSpecific - { reason: String } - | e | { format_args!("implementation specific error: {}", e.reason) }, - - AppModule - { description: String } - | e | { - format_args!( - "application module error: {0}", - e.description) - }, - } + Ics02Client + [ client_error::Error ] + | _ | { "ics02 client error" }, + + Ics05Port + [ port_error::Error ] + | _ | { "ics05 port error" }, + + UnknownState + { state: i32 } + | e | { format_args!("channel state unknown: {}", e.state) }, + + Identifier + [ ValidationError ] + | _ | { "identifier error" }, + + UnknownOrderType + { type_id: String } + | e | { format_args!("channel order type unknown: {}", e.type_id) }, + + InvalidConnectionHopsLength + { expected: usize, actual: usize } + | e | { + format_args!( + "invalid connection hops length: expected {0}; actual {1}", + e.expected, e.actual) + }, + + InvalidPacketCounterparty + { port_id: PortId, channel_id: ChannelId } + | e | { + format_args!( + "packet destination port {} and channel {} doesn't match the counterparty's port/channel", + e.port_id, e.channel_id) + }, + + InvalidVersion + [ TraceError ] + | _ | { "invalid version" }, + + Signer + [ SignerError ] + | _ | { "invalid signer address" }, + + InvalidProof + [ ProofError ] + | _ | { "invalid proof" }, + + MissingHeight + | _ | { "invalid proof: missing height" }, + + MissingChannelProof + | _ | { "invalid proof: missing channel proof" }, + + MissingNextRecvSeq + { port_channel_id: (PortId, ChannelId) } + | e | { + format_args!("Missing sequence number for receiving packets on port {0} and channel {1}", + e.port_channel_id.0, + e.port_channel_id.1) + }, + + ZeroPacketSequence + | _ | { "packet sequence cannot be 0" }, + + ZeroPacketData + | _ | { "packet data bytes cannot be empty" }, + + ZeroPacketTimeout + | _ | { "packet timeout height and packet timeout timestamp cannot both be 0" }, + + InvalidTimeoutHeight + | _ | { "invalid timeout height for the packet" }, + + InvalidPacket + | _ | { "invalid packet" }, + + MissingPacket + | _ | { "there is no packet in this message" }, + + MissingChannelId + | _ | { "missing channel id" }, + + MissingCounterparty + | _ | { "missing counterparty" }, + + NoCommonVersion + | _ | { "no commong version" }, + + MissingChannel + | _ | { "missing channel end" }, + + InvalidVersionLengthConnection + | _ | { "single version must be negociated on connection before opening channel" }, + + ChannelFeatureNotSuportedByConnection + | _ | { "the channel ordering is not supported by connection" }, + + ChannelNotFound + { port_id: PortId, channel_id: ChannelId } + | e | { + format_args!( + "the channel end ({0}, {1}) does not exist", + e.port_id, e.channel_id) + }, + + ChannelMismatch + { channel_id: ChannelId } + | e | { + format_args!( + "a different channel exists (was initialized) already for the same channel identifier {0}", + e.channel_id) + }, + + ConnectionNotOpen + { connection_id: ConnectionId } + | e | { + format_args!( + "the associated connection {0} is not OPEN", + e.connection_id) + }, + + UndefinedConnectionCounterparty + { connection_id: ConnectionId } + | e | { + format_args!( + "Undefined counterparty connection for {0}", + e.connection_id) + }, + + PacketVerificationFailed + { sequence: Sequence } + [ client_error::Error ] + | e | { + format_args!( + "Verification fails for the packet with the sequence number {0}", + e.sequence) + }, + + VerifyChannelFailed + [ client_error::Error ] + | _ | { + "Error verifying channel state" + }, + + InvalidAcknowledgement + | _ | { "Acknowledgment cannot be empty" }, + + AcknowledgementExists + { sequence: Sequence } + | e | { + format_args!( + "Packet acknowledgement exists for the packet with the sequence {0}", + e.sequence) + }, + + MissingNextSendSeq + { port_channel_id: (PortId, ChannelId) } + | e | { + format_args!("Missing sequence number for sending packets on port {0} and channel {1}", + e.port_channel_id.0, + e.port_channel_id.1) + }, + + InvalidStringAsSequence + { value: String } + [ TraceError ] + | e | { + format_args!( + "String {0} cannot be converted to packet sequence", + e.value) + }, + + InvalidPacketSequence + { + given_sequence: Sequence, + next_sequence: Sequence + } + | e | { + format_args!( + "Invalid packet sequence {0} ≠ next send sequence {1}", + e.given_sequence, e.next_sequence) + }, + + LowPacketHeight + { + chain_height: Height, + timeout_height: Height + } + | e | { + format_args!( + "Receiving chain block height {0} >= packet timeout height {1}", + e.chain_height, e.timeout_height) + }, + PacketTimeoutNotReached + { + timeout_height: Height, + chain_height: Height, + timeout_timestamp: Timestamp, + chain_timestamp: Timestamp, + } + | e | { format_args!( + "Packet timeout not satisified for either packet height or timestamp, Packet timeout height {0}, chain height {1}, Packet timeout timestamp {2}, chain timestamp {3}", + e.timeout_height, e.chain_height, e.timeout_timestamp, e.chain_timestamp) }, + + PacketTimeoutHeightNotReached + { + timeout_height: Height, + chain_height: Height, + } + | e | { + format_args!( + "Packet timeout height {0} > chain height {1}", + e.timeout_height, e.chain_height) + }, + + PacketTimeoutTimestampNotReached + { + timeout_timestamp: Timestamp, + chain_timestamp: Timestamp, + } + | e | { + format_args!( + "Packet timeout timestamp {0} > chain timestamp {1}", + e.timeout_timestamp, e.chain_timestamp) + }, + + LowPacketTimestamp + | _ | { "Receiving chain block timestamp >= packet timeout timestamp" }, + + InvalidPacketTimestamp + [ crate::timestamp::ParseTimestampError ] + | _ | { "Invalid packet timeout timestamp value" }, + + ErrorInvalidConsensusState + | _ | { "Invalid timestamp in consensus state; timestamp must be a positive value" }, + + FrozenClient + { client_id: ClientId } + | e | { + format_args!( + "Client with id {0} is frozen", + e.client_id) + }, + + InvalidCounterpartyChannelId + [ ValidationError ] + | _ | { "Invalid channel id in counterparty" }, + + InvalidChannelState + { channel_id: ChannelId, state: State } + | e | { + format_args!( + "Channel {0} should not be state {1}", + e.channel_id, e.state) + }, + + ChannelClosed + { channel_id: ChannelId } + | e | { + format_args!( + "Channel {0} is Closed", + e.channel_id) + }, + + ChanOpenAckProofVerification + | _ | { "Handshake proof verification fails at ChannelOpenAck" }, + + PacketCommitmentNotFound + { sequence: Sequence } + | e | { + format_args!( + "Commitment for the packet {0} not found", + e.sequence) + }, + + IncorrectPacketCommitment + { sequence: Sequence } + | e | { + format_args!( + "The stored commitment of the packet {0} is incorrect", + e.sequence) + }, + + PacketReceiptNotFound + { sequence: Sequence } + | e | { + format_args!( + "Receipt for the packet {0} not found", + e.sequence) + }, + + PacketAcknowledgementNotFound + { sequence: Sequence } + | e | { + format_args!( + "Acknowledgment for the packet {0} not found", + e.sequence) + }, + + MissingNextAckSeq + { port_channel_id: (PortId, ChannelId) } + | e | { + format_args!("Missing sequence number for ack packets on port {0} and channel {1}", + e.port_channel_id.0, + e.port_channel_id.1) + }, + + ProcessedTimeNotFound + { + client_id: ClientId, + height: Height, + } + | e | { + format_args!( + "Processed time for the client {0} at height {1} not found", + e.client_id, e.height) + }, + + ProcessedHeightNotFound + { + client_id: ClientId, + height: Height, + } + | e | { + format_args!( + "Processed height for the client {0} at height {1} not found", + e.client_id, e.height) + }, + + RouteNotFound + | _ | { "route not found" }, + + ImplementationSpecific + { reason: String } + | e | { format_args!("implementation specific error: {}", e.reason) }, + + AppModule + { description: String } + | e | { + format_args!( + "application module error: {0}", + e.description) + }, + } } impl Error { - pub fn chan_open_confirm_proof_verification(e: Error) -> Error { - e.add_trace(&"Handshake proof verification fails at ChannelOpenConfirm") - } + pub fn chan_open_confirm_proof_verification(e: Error) -> Error { + e.add_trace(&"Handshake proof verification fails at ChannelOpenConfirm") + } } diff --git a/modules/src/core/ics04_channel/events.rs b/modules/src/core/ics04_channel/events.rs index c932787ccb..07a7aa7166 100644 --- a/modules/src/core/ics04_channel/events.rs +++ b/modules/src/core/ics04_channel/events.rs @@ -1,18 +1,20 @@ //! Types for the IBC events emitted from Tendermint Websocket by the channels module. use serde_derive::{Deserialize, Serialize}; -use tendermint::abci::Event as AbciEvent; -use tendermint::abci::EventAttribute; - -use crate::core::ics02_client::height::Height; -use crate::core::ics04_channel::error::Error; -use crate::core::ics04_channel::packet::Packet; -use crate::core::ics24_host::identifier::{ChannelId, ConnectionId, PortId}; -use crate::events::{ - extract_attribute, maybe_extract_attribute, Error as EventError, IbcEvent, IbcEventType, - RawObject, +use tendermint::abci::{Event as AbciEvent, EventAttribute}; + +use crate::{ + core::{ + ics02_client::height::Height, + ics04_channel::{error::Error, packet::Packet}, + ics24_host::identifier::{ChannelId, ConnectionId, PortId}, + }, + events::{ + extract_attribute, maybe_extract_attribute, Error as EventError, IbcEvent, IbcEventType, + RawObject, + }, + prelude::*, }; -use crate::prelude::*; /// Channel event attribute keys const HEIGHT_ATTRIBUTE_KEY: &str = "height"; @@ -34,202 +36,195 @@ const PKT_TIMEOUT_TIMESTAMP_ATTRIBUTE_KEY: &str = "packet_timeout_timestamp"; const PKT_ACK_ATTRIBUTE_KEY: &str = "packet_ack"; pub fn try_from_tx(event: &tendermint::abci::Event) -> Option { - match event.kind.parse() { - Ok(IbcEventType::OpenInitChannel) => extract_attributes_from_tx(event) - .map(OpenInit::try_from) - .map(|res| res.ok().map(IbcEvent::OpenInitChannel)) - .ok() - .flatten(), - Ok(IbcEventType::OpenTryChannel) => extract_attributes_from_tx(event) - .map(OpenTry::try_from) - .map(|res| res.ok().map(IbcEvent::OpenTryChannel)) - .ok() - .flatten(), - Ok(IbcEventType::OpenAckChannel) => extract_attributes_from_tx(event) - .map(OpenAck::try_from) - .map(|res| res.ok().map(IbcEvent::OpenAckChannel)) - .ok() - .flatten(), - Ok(IbcEventType::OpenConfirmChannel) => extract_attributes_from_tx(event) - .map(OpenConfirm::try_from) - .map(|res| res.ok().map(IbcEvent::OpenConfirmChannel)) - .ok() - .flatten(), - Ok(IbcEventType::CloseInitChannel) => extract_attributes_from_tx(event) - .map(CloseInit::try_from) - .map(|res| res.ok().map(IbcEvent::CloseInitChannel)) - .ok() - .flatten(), - Ok(IbcEventType::CloseConfirmChannel) => extract_attributes_from_tx(event) - .map(CloseConfirm::try_from) - .map(|res| res.ok().map(IbcEvent::CloseConfirmChannel)) - .ok() - .flatten(), - Ok(IbcEventType::SendPacket) => { - extract_packet_and_write_ack_from_tx(event) - .map(|(packet, write_ack)| { - // This event should not have a write ack. - debug_assert_eq!(write_ack.len(), 0); - IbcEvent::SendPacket(SendPacket { - height: Default::default(), - packet, - }) - }) - .ok() - } - Ok(IbcEventType::WriteAck) => extract_packet_and_write_ack_from_tx(event) - .map(|(packet, write_ack)| { - IbcEvent::WriteAcknowledgement(WriteAcknowledgement { - height: Default::default(), - packet, - ack: write_ack, - }) - }) - .ok(), - Ok(IbcEventType::AckPacket) => { - extract_packet_and_write_ack_from_tx(event) - .map(|(packet, write_ack)| { - // This event should not have a write ack. - debug_assert_eq!(write_ack.len(), 0); - IbcEvent::AcknowledgePacket(AcknowledgePacket { - height: Default::default(), - packet, - }) - }) - .ok() - } - Ok(IbcEventType::Timeout) => { - extract_packet_and_write_ack_from_tx(event) - .map(|(packet, write_ack)| { - // This event should not have a write ack. - debug_assert_eq!(write_ack.len(), 0); - IbcEvent::TimeoutPacket(TimeoutPacket { - height: Default::default(), - packet, - }) - }) - .ok() - } - _ => None, - } + match event.kind.parse() { + Ok(IbcEventType::OpenInitChannel) => extract_attributes_from_tx(event) + .map(OpenInit::try_from) + .map(|res| res.ok().map(IbcEvent::OpenInitChannel)) + .ok() + .flatten(), + Ok(IbcEventType::OpenTryChannel) => extract_attributes_from_tx(event) + .map(OpenTry::try_from) + .map(|res| res.ok().map(IbcEvent::OpenTryChannel)) + .ok() + .flatten(), + Ok(IbcEventType::OpenAckChannel) => extract_attributes_from_tx(event) + .map(OpenAck::try_from) + .map(|res| res.ok().map(IbcEvent::OpenAckChannel)) + .ok() + .flatten(), + Ok(IbcEventType::OpenConfirmChannel) => extract_attributes_from_tx(event) + .map(OpenConfirm::try_from) + .map(|res| res.ok().map(IbcEvent::OpenConfirmChannel)) + .ok() + .flatten(), + Ok(IbcEventType::CloseInitChannel) => extract_attributes_from_tx(event) + .map(CloseInit::try_from) + .map(|res| res.ok().map(IbcEvent::CloseInitChannel)) + .ok() + .flatten(), + Ok(IbcEventType::CloseConfirmChannel) => extract_attributes_from_tx(event) + .map(CloseConfirm::try_from) + .map(|res| res.ok().map(IbcEvent::CloseConfirmChannel)) + .ok() + .flatten(), + Ok(IbcEventType::SendPacket) => { + extract_packet_and_write_ack_from_tx(event) + .map(|(packet, write_ack)| { + // This event should not have a write ack. + debug_assert_eq!(write_ack.len(), 0); + IbcEvent::SendPacket(SendPacket { height: Default::default(), packet }) + }) + .ok() + }, + Ok(IbcEventType::WriteAck) => extract_packet_and_write_ack_from_tx(event) + .map(|(packet, write_ack)| { + IbcEvent::WriteAcknowledgement(WriteAcknowledgement { + height: Default::default(), + packet, + ack: write_ack, + }) + }) + .ok(), + Ok(IbcEventType::AckPacket) => { + extract_packet_and_write_ack_from_tx(event) + .map(|(packet, write_ack)| { + // This event should not have a write ack. + debug_assert_eq!(write_ack.len(), 0); + IbcEvent::AcknowledgePacket(AcknowledgePacket { + height: Default::default(), + packet, + }) + }) + .ok() + }, + Ok(IbcEventType::Timeout) => { + extract_packet_and_write_ack_from_tx(event) + .map(|(packet, write_ack)| { + // This event should not have a write ack. + debug_assert_eq!(write_ack.len(), 0); + IbcEvent::TimeoutPacket(TimeoutPacket { height: Default::default(), packet }) + }) + .ok() + }, + _ => None, + } } fn extract_attributes_from_tx(event: &tendermint::abci::Event) -> Result { - let mut attr = Attributes::default(); - - for tag in &event.attributes { - let key = tag.key.as_str(); - let value = tag.value.as_str(); - match key { - PORT_ID_ATTRIBUTE_KEY => attr.port_id = value.parse().map_err(Error::identifier)?, - CHANNEL_ID_ATTRIBUTE_KEY => { - attr.channel_id = value.parse().ok(); - } - CONNECTION_ID_ATTRIBUTE_KEY => { - attr.connection_id = value.parse().map_err(Error::identifier)?; - } - COUNTERPARTY_PORT_ID_ATTRIBUTE_KEY => { - attr.counterparty_port_id = value.parse().map_err(Error::identifier)?; - } - COUNTERPARTY_CHANNEL_ID_ATTRIBUTE_KEY => { - attr.counterparty_channel_id = value.parse().ok(); - } - _ => {} - } - } - - Ok(attr) + let mut attr = Attributes::default(); + + for tag in &event.attributes { + let key = tag.key.as_str(); + let value = tag.value.as_str(); + match key { + PORT_ID_ATTRIBUTE_KEY => attr.port_id = value.parse().map_err(Error::identifier)?, + CHANNEL_ID_ATTRIBUTE_KEY => { + attr.channel_id = value.parse().ok(); + }, + CONNECTION_ID_ATTRIBUTE_KEY => { + attr.connection_id = value.parse().map_err(Error::identifier)?; + }, + COUNTERPARTY_PORT_ID_ATTRIBUTE_KEY => { + attr.counterparty_port_id = value.parse().map_err(Error::identifier)?; + }, + COUNTERPARTY_CHANNEL_ID_ATTRIBUTE_KEY => { + attr.counterparty_channel_id = value.parse().ok(); + }, + _ => {}, + } + } + + Ok(attr) } fn extract_packet_and_write_ack_from_tx( - event: &tendermint::abci::Event, + event: &tendermint::abci::Event, ) -> Result<(Packet, Vec), Error> { - let mut packet = Packet::default(); - let mut write_ack: Vec = Vec::new(); - for tag in &event.attributes { - let key = tag.key.as_str(); - let value = tag.value.as_str(); - match key { - PKT_SRC_PORT_ATTRIBUTE_KEY => { - packet.source_port = value.parse().map_err(Error::identifier)?; - } - PKT_SRC_CHANNEL_ATTRIBUTE_KEY => { - packet.source_channel = value.parse().map_err(Error::identifier)?; - } - PKT_DST_PORT_ATTRIBUTE_KEY => { - packet.destination_port = value.parse().map_err(Error::identifier)?; - } - PKT_DST_CHANNEL_ATTRIBUTE_KEY => { - packet.destination_channel = value.parse().map_err(Error::identifier)?; - } - PKT_SEQ_ATTRIBUTE_KEY => { - packet.sequence = value - .parse::() - .map_err(|e| Error::invalid_string_as_sequence(value.to_string(), e))? - .into() - } - PKT_TIMEOUT_HEIGHT_ATTRIBUTE_KEY => { - packet.timeout_height = - value.parse().map_err(|_| Error::invalid_timeout_height())?; - } - PKT_TIMEOUT_TIMESTAMP_ATTRIBUTE_KEY => { - packet.timeout_timestamp = value.parse().unwrap(); - } - PKT_DATA_ATTRIBUTE_KEY => { - packet.data = Vec::from(value.as_bytes()); - } - PKT_ACK_ATTRIBUTE_KEY => { - write_ack = Vec::from(value.as_bytes()); - } - _ => {} - } - } - - Ok((packet, write_ack)) + let mut packet = Packet::default(); + let mut write_ack: Vec = Vec::new(); + for tag in &event.attributes { + let key = tag.key.as_str(); + let value = tag.value.as_str(); + match key { + PKT_SRC_PORT_ATTRIBUTE_KEY => { + packet.source_port = value.parse().map_err(Error::identifier)?; + }, + PKT_SRC_CHANNEL_ATTRIBUTE_KEY => { + packet.source_channel = value.parse().map_err(Error::identifier)?; + }, + PKT_DST_PORT_ATTRIBUTE_KEY => { + packet.destination_port = value.parse().map_err(Error::identifier)?; + }, + PKT_DST_CHANNEL_ATTRIBUTE_KEY => { + packet.destination_channel = value.parse().map_err(Error::identifier)?; + }, + PKT_SEQ_ATTRIBUTE_KEY => + packet.sequence = value + .parse::() + .map_err(|e| Error::invalid_string_as_sequence(value.to_string(), e))? + .into(), + PKT_TIMEOUT_HEIGHT_ATTRIBUTE_KEY => { + packet.timeout_height = + value.parse().map_err(|_| Error::invalid_timeout_height())?; + }, + PKT_TIMEOUT_TIMESTAMP_ATTRIBUTE_KEY => { + packet.timeout_timestamp = value.parse().unwrap(); + }, + PKT_DATA_ATTRIBUTE_KEY => { + packet.data = Vec::from(value.as_bytes()); + }, + PKT_ACK_ATTRIBUTE_KEY => { + write_ack = Vec::from(value.as_bytes()); + }, + _ => {}, + } + } + + Ok((packet, write_ack)) } fn extract_attributes(object: &RawObject<'_>, namespace: &str) -> Result { - Ok(Attributes { - height: object.height, - port_id: extract_attribute(object, &format!("{}.port_id", namespace))? - .parse() - .map_err(EventError::parse)?, - channel_id: maybe_extract_attribute(object, &format!("{}.channel_id", namespace)) - .and_then(|v| v.parse().ok()), - connection_id: extract_attribute(object, &format!("{}.connection_id", namespace))? - .parse() - .map_err(EventError::parse)?, - counterparty_port_id: extract_attribute( - object, - &format!("{}.counterparty_port_id", namespace), - )? - .parse() - .map_err(EventError::parse)?, - counterparty_channel_id: maybe_extract_attribute( - object, - &format!("{}.counterparty_channel_id", namespace), - ) - .and_then(|v| v.parse().ok()), - }) + Ok(Attributes { + height: object.height, + port_id: extract_attribute(object, &format!("{}.port_id", namespace))? + .parse() + .map_err(EventError::parse)?, + channel_id: maybe_extract_attribute(object, &format!("{}.channel_id", namespace)) + .and_then(|v| v.parse().ok()), + connection_id: extract_attribute(object, &format!("{}.connection_id", namespace))? + .parse() + .map_err(EventError::parse)?, + counterparty_port_id: extract_attribute( + object, + &format!("{}.counterparty_port_id", namespace), + )? + .parse() + .map_err(EventError::parse)?, + counterparty_channel_id: maybe_extract_attribute( + object, + &format!("{}.counterparty_channel_id", namespace), + ) + .and_then(|v| v.parse().ok()), + }) } #[derive(Debug, Default, Deserialize, Serialize, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct Attributes { - pub height: Height, - pub port_id: PortId, - pub channel_id: Option, - pub connection_id: ConnectionId, - pub counterparty_port_id: PortId, - pub counterparty_channel_id: Option, + pub height: Height, + pub port_id: PortId, + pub channel_id: Option, + pub connection_id: ConnectionId, + pub counterparty_port_id: PortId, + pub counterparty_channel_id: Option, } impl Attributes { - pub fn port_id(&self) -> &PortId { - &self.port_id - } - pub fn channel_id(&self) -> Option<&ChannelId> { - self.channel_id.as_ref() - } + pub fn port_id(&self) -> &PortId { + &self.port_id + } + pub fn channel_id(&self) -> Option<&ChannelId> { + self.channel_id.as_ref() + } } /// Convert attributes to Tendermint ABCI tags @@ -241,50 +236,50 @@ impl Attributes { /// Once tendermint-rs improves the API of the `Key` and `Value` types, /// we will be able to remove the `.parse().unwrap()` calls. impl From for Vec { - fn from(a: Attributes) -> Self { - let mut attributes = vec![]; - let height = EventAttribute { - key: HEIGHT_ATTRIBUTE_KEY.parse().unwrap(), - value: a.height.to_string().parse().unwrap(), - index: false, - }; - attributes.push(height); - let port_id = EventAttribute { - key: PORT_ID_ATTRIBUTE_KEY.parse().unwrap(), - value: a.port_id.to_string().parse().unwrap(), - index: false, - }; - attributes.push(port_id); - if let Some(channel_id) = a.channel_id { - let channel_id = EventAttribute { - key: CHANNEL_ID_ATTRIBUTE_KEY.parse().unwrap(), - value: channel_id.to_string().parse().unwrap(), - index: false, - }; - attributes.push(channel_id); - } - let connection_id = EventAttribute { - key: CONNECTION_ID_ATTRIBUTE_KEY.parse().unwrap(), - value: a.connection_id.to_string().parse().unwrap(), - index: false, - }; - attributes.push(connection_id); - let counterparty_port_id = EventAttribute { - key: COUNTERPARTY_PORT_ID_ATTRIBUTE_KEY.parse().unwrap(), - value: a.counterparty_port_id.to_string().parse().unwrap(), - index: false, - }; - attributes.push(counterparty_port_id); - if let Some(channel_id) = a.counterparty_channel_id { - let channel_id = EventAttribute { - key: COUNTERPARTY_CHANNEL_ID_ATTRIBUTE_KEY.parse().unwrap(), - value: channel_id.to_string().parse().unwrap(), - index: false, - }; - attributes.push(channel_id); - } - attributes - } + fn from(a: Attributes) -> Self { + let mut attributes = vec![]; + let height = EventAttribute { + key: HEIGHT_ATTRIBUTE_KEY.parse().unwrap(), + value: a.height.to_string().parse().unwrap(), + index: false, + }; + attributes.push(height); + let port_id = EventAttribute { + key: PORT_ID_ATTRIBUTE_KEY.parse().unwrap(), + value: a.port_id.to_string().parse().unwrap(), + index: false, + }; + attributes.push(port_id); + if let Some(channel_id) = a.channel_id { + let channel_id = EventAttribute { + key: CHANNEL_ID_ATTRIBUTE_KEY.parse().unwrap(), + value: channel_id.to_string().parse().unwrap(), + index: false, + }; + attributes.push(channel_id); + } + let connection_id = EventAttribute { + key: CONNECTION_ID_ATTRIBUTE_KEY.parse().unwrap(), + value: a.connection_id.to_string().parse().unwrap(), + index: false, + }; + attributes.push(connection_id); + let counterparty_port_id = EventAttribute { + key: COUNTERPARTY_PORT_ID_ATTRIBUTE_KEY.parse().unwrap(), + value: a.counterparty_port_id.to_string().parse().unwrap(), + index: false, + }; + attributes.push(counterparty_port_id); + if let Some(channel_id) = a.counterparty_channel_id { + let channel_id = EventAttribute { + key: COUNTERPARTY_CHANNEL_ID_ATTRIBUTE_KEY.parse().unwrap(), + value: channel_id.to_string().parse().unwrap(), + index: false, + }; + attributes.push(channel_id); + } + attributes + } } /// Convert attributes to Tendermint ABCI tags @@ -296,417 +291,412 @@ impl From for Vec { /// Once tendermint-rs improves the API of the `Key` and `Value` types, /// we will be able to remove the `.parse().unwrap()` calls. impl TryFrom for Vec { - type Error = Error; - fn try_from(p: Packet) -> Result { - let mut attributes = vec![]; - let src_port = EventAttribute { - key: PKT_SRC_PORT_ATTRIBUTE_KEY.parse().unwrap(), - value: p.source_port.to_string().parse().unwrap(), - index: false, - }; - attributes.push(src_port); - let src_channel = EventAttribute { - key: PKT_SRC_CHANNEL_ATTRIBUTE_KEY.parse().unwrap(), - value: p.source_channel.to_string().parse().unwrap(), - index: false, - }; - attributes.push(src_channel); - let dst_port = EventAttribute { - key: PKT_DST_PORT_ATTRIBUTE_KEY.parse().unwrap(), - value: p.destination_port.to_string().parse().unwrap(), - index: false, - }; - attributes.push(dst_port); - let dst_channel = EventAttribute { - key: PKT_DST_CHANNEL_ATTRIBUTE_KEY.parse().unwrap(), - value: p.destination_channel.to_string().parse().unwrap(), - index: false, - }; - attributes.push(dst_channel); - let sequence = EventAttribute { - key: PKT_SEQ_ATTRIBUTE_KEY.parse().unwrap(), - value: p.sequence.to_string().parse().unwrap(), - index: false, - }; - attributes.push(sequence); - let timeout_height = EventAttribute { - key: PKT_TIMEOUT_HEIGHT_ATTRIBUTE_KEY.parse().unwrap(), - value: p.timeout_height.to_string().parse().unwrap(), - index: false, - }; - attributes.push(timeout_height); - let timeout_timestamp = EventAttribute { - key: PKT_TIMEOUT_TIMESTAMP_ATTRIBUTE_KEY.parse().unwrap(), - value: p - .timeout_timestamp - .nanoseconds() - .to_string() - .parse() - .unwrap(), - index: false, - }; - attributes.push(timeout_timestamp); - let val = - String::from_utf8(p.data).expect("hex-encoded string should always be valid UTF-8"); - let packet_data = EventAttribute { - key: PKT_DATA_ATTRIBUTE_KEY.parse().unwrap(), - value: val.parse().unwrap(), - index: false, - }; - attributes.push(packet_data); - let ack = EventAttribute { - key: PKT_ACK_ATTRIBUTE_KEY.parse().unwrap(), - value: "".parse().unwrap(), - index: false, - }; - attributes.push(ack); - Ok(attributes) - } + type Error = Error; + fn try_from(p: Packet) -> Result { + let mut attributes = vec![]; + let src_port = EventAttribute { + key: PKT_SRC_PORT_ATTRIBUTE_KEY.parse().unwrap(), + value: p.source_port.to_string().parse().unwrap(), + index: false, + }; + attributes.push(src_port); + let src_channel = EventAttribute { + key: PKT_SRC_CHANNEL_ATTRIBUTE_KEY.parse().unwrap(), + value: p.source_channel.to_string().parse().unwrap(), + index: false, + }; + attributes.push(src_channel); + let dst_port = EventAttribute { + key: PKT_DST_PORT_ATTRIBUTE_KEY.parse().unwrap(), + value: p.destination_port.to_string().parse().unwrap(), + index: false, + }; + attributes.push(dst_port); + let dst_channel = EventAttribute { + key: PKT_DST_CHANNEL_ATTRIBUTE_KEY.parse().unwrap(), + value: p.destination_channel.to_string().parse().unwrap(), + index: false, + }; + attributes.push(dst_channel); + let sequence = EventAttribute { + key: PKT_SEQ_ATTRIBUTE_KEY.parse().unwrap(), + value: p.sequence.to_string().parse().unwrap(), + index: false, + }; + attributes.push(sequence); + let timeout_height = EventAttribute { + key: PKT_TIMEOUT_HEIGHT_ATTRIBUTE_KEY.parse().unwrap(), + value: p.timeout_height.to_string().parse().unwrap(), + index: false, + }; + attributes.push(timeout_height); + let timeout_timestamp = EventAttribute { + key: PKT_TIMEOUT_TIMESTAMP_ATTRIBUTE_KEY.parse().unwrap(), + value: p.timeout_timestamp.nanoseconds().to_string().parse().unwrap(), + index: false, + }; + attributes.push(timeout_timestamp); + let val = + String::from_utf8(p.data).expect("hex-encoded string should always be valid UTF-8"); + let packet_data = EventAttribute { + key: PKT_DATA_ATTRIBUTE_KEY.parse().unwrap(), + value: val.parse().unwrap(), + index: false, + }; + attributes.push(packet_data); + let ack = EventAttribute { + key: PKT_ACK_ATTRIBUTE_KEY.parse().unwrap(), + value: "".parse().unwrap(), + index: false, + }; + attributes.push(ack); + Ok(attributes) + } } trait EventType { - fn event_type() -> IbcEventType; + fn event_type() -> IbcEventType; } #[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq)] pub struct OpenInit { - pub height: Height, - pub port_id: PortId, - pub channel_id: Option, - pub connection_id: ConnectionId, - pub counterparty_port_id: PortId, - pub counterparty_channel_id: Option, + pub height: Height, + pub port_id: PortId, + pub channel_id: Option, + pub connection_id: ConnectionId, + pub counterparty_port_id: PortId, + pub counterparty_channel_id: Option, } impl OpenInit { - pub fn channel_id(&self) -> Option<&ChannelId> { - self.channel_id.as_ref() - } - pub fn port_id(&self) -> &PortId { - &self.port_id - } - pub fn height(&self) -> Height { - self.height - } - pub fn set_height(&mut self, height: Height) { - self.height = height; - } + pub fn channel_id(&self) -> Option<&ChannelId> { + self.channel_id.as_ref() + } + pub fn port_id(&self) -> &PortId { + &self.port_id + } + pub fn height(&self) -> Height { + self.height + } + pub fn set_height(&mut self, height: Height) { + self.height = height; + } } impl From for Attributes { - fn from(ev: OpenInit) -> Self { - Self { - height: ev.height, - port_id: ev.port_id, - channel_id: ev.channel_id, - connection_id: ev.connection_id, - counterparty_port_id: ev.counterparty_port_id, - counterparty_channel_id: ev.counterparty_channel_id, - } - } + fn from(ev: OpenInit) -> Self { + Self { + height: ev.height, + port_id: ev.port_id, + channel_id: ev.channel_id, + connection_id: ev.connection_id, + counterparty_port_id: ev.counterparty_port_id, + counterparty_channel_id: ev.counterparty_channel_id, + } + } } impl From for IbcEvent { - fn from(v: OpenInit) -> Self { - IbcEvent::OpenInitChannel(v) - } + fn from(v: OpenInit) -> Self { + IbcEvent::OpenInitChannel(v) + } } impl EventType for OpenInit { - fn event_type() -> IbcEventType { - IbcEventType::OpenInitChannel - } + fn event_type() -> IbcEventType { + IbcEventType::OpenInitChannel + } } #[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq)] pub struct OpenTry { - pub height: Height, - pub port_id: PortId, - pub channel_id: Option, - pub connection_id: ConnectionId, - pub counterparty_port_id: PortId, - pub counterparty_channel_id: Option, + pub height: Height, + pub port_id: PortId, + pub channel_id: Option, + pub connection_id: ConnectionId, + pub counterparty_port_id: PortId, + pub counterparty_channel_id: Option, } impl From for Attributes { - fn from(ev: OpenTry) -> Self { - Self { - height: ev.height, - port_id: ev.port_id, - channel_id: ev.channel_id, - connection_id: ev.connection_id, - counterparty_port_id: ev.counterparty_port_id, - counterparty_channel_id: ev.counterparty_channel_id, - } - } + fn from(ev: OpenTry) -> Self { + Self { + height: ev.height, + port_id: ev.port_id, + channel_id: ev.channel_id, + connection_id: ev.connection_id, + counterparty_port_id: ev.counterparty_port_id, + counterparty_channel_id: ev.counterparty_channel_id, + } + } } impl OpenTry { - pub fn channel_id(&self) -> Option<&ChannelId> { - self.channel_id.as_ref() - } - pub fn port_id(&self) -> &PortId { - &self.port_id - } - pub fn height(&self) -> Height { - self.height - } - pub fn set_height(&mut self, height: Height) { - self.height = height; - } + pub fn channel_id(&self) -> Option<&ChannelId> { + self.channel_id.as_ref() + } + pub fn port_id(&self) -> &PortId { + &self.port_id + } + pub fn height(&self) -> Height { + self.height + } + pub fn set_height(&mut self, height: Height) { + self.height = height; + } } impl From for IbcEvent { - fn from(v: OpenTry) -> Self { - IbcEvent::OpenTryChannel(v) - } + fn from(v: OpenTry) -> Self { + IbcEvent::OpenTryChannel(v) + } } impl EventType for OpenTry { - fn event_type() -> IbcEventType { - IbcEventType::OpenTryChannel - } + fn event_type() -> IbcEventType { + IbcEventType::OpenTryChannel + } } #[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq)] pub struct OpenAck { - pub height: Height, - pub port_id: PortId, - pub channel_id: Option, - pub counterparty_channel_id: Option, - pub connection_id: ConnectionId, - pub counterparty_port_id: PortId, + pub height: Height, + pub port_id: PortId, + pub channel_id: Option, + pub counterparty_channel_id: Option, + pub connection_id: ConnectionId, + pub counterparty_port_id: PortId, } impl From for Attributes { - fn from(ev: OpenAck) -> Self { - Self { - height: ev.height, - port_id: ev.port_id, - channel_id: ev.channel_id, - connection_id: ev.connection_id, - counterparty_port_id: ev.counterparty_port_id, - counterparty_channel_id: ev.counterparty_channel_id, - } - } + fn from(ev: OpenAck) -> Self { + Self { + height: ev.height, + port_id: ev.port_id, + channel_id: ev.channel_id, + connection_id: ev.connection_id, + counterparty_port_id: ev.counterparty_port_id, + counterparty_channel_id: ev.counterparty_channel_id, + } + } } impl OpenAck { - pub fn channel_id(&self) -> Option<&ChannelId> { - self.channel_id.as_ref() - } - pub fn port_id(&self) -> &PortId { - &self.port_id - } - pub fn height(&self) -> Height { - self.height - } - pub fn set_height(&mut self, height: Height) { - self.height = height; - } - - pub fn counterparty_channel_id(&self) -> Option<&ChannelId> { - self.counterparty_channel_id.as_ref() - } + pub fn channel_id(&self) -> Option<&ChannelId> { + self.channel_id.as_ref() + } + pub fn port_id(&self) -> &PortId { + &self.port_id + } + pub fn height(&self) -> Height { + self.height + } + pub fn set_height(&mut self, height: Height) { + self.height = height; + } + + pub fn counterparty_channel_id(&self) -> Option<&ChannelId> { + self.counterparty_channel_id.as_ref() + } } impl From for IbcEvent { - fn from(v: OpenAck) -> Self { - IbcEvent::OpenAckChannel(v) - } + fn from(v: OpenAck) -> Self { + IbcEvent::OpenAckChannel(v) + } } impl EventType for OpenAck { - fn event_type() -> IbcEventType { - IbcEventType::OpenAckChannel - } + fn event_type() -> IbcEventType { + IbcEventType::OpenAckChannel + } } #[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq)] pub struct OpenConfirm { - pub height: Height, - pub port_id: PortId, - pub channel_id: Option, - pub connection_id: ConnectionId, - pub counterparty_port_id: PortId, - pub counterparty_channel_id: Option, + pub height: Height, + pub port_id: PortId, + pub channel_id: Option, + pub connection_id: ConnectionId, + pub counterparty_port_id: PortId, + pub counterparty_channel_id: Option, } impl From for Attributes { - fn from(ev: OpenConfirm) -> Self { - Self { - height: ev.height, - port_id: ev.port_id, - channel_id: ev.channel_id, - connection_id: ev.connection_id, - counterparty_port_id: ev.counterparty_port_id, - counterparty_channel_id: ev.counterparty_channel_id, - } - } + fn from(ev: OpenConfirm) -> Self { + Self { + height: ev.height, + port_id: ev.port_id, + channel_id: ev.channel_id, + connection_id: ev.connection_id, + counterparty_port_id: ev.counterparty_port_id, + counterparty_channel_id: ev.counterparty_channel_id, + } + } } impl OpenConfirm { - pub fn channel_id(&self) -> Option<&ChannelId> { - self.channel_id.as_ref() - } - pub fn port_id(&self) -> &PortId { - &self.port_id - } - pub fn height(&self) -> Height { - self.height - } - pub fn set_height(&mut self, height: Height) { - self.height = height; - } + pub fn channel_id(&self) -> Option<&ChannelId> { + self.channel_id.as_ref() + } + pub fn port_id(&self) -> &PortId { + &self.port_id + } + pub fn height(&self) -> Height { + self.height + } + pub fn set_height(&mut self, height: Height) { + self.height = height; + } } impl From for IbcEvent { - fn from(v: OpenConfirm) -> Self { - IbcEvent::OpenConfirmChannel(v) - } + fn from(v: OpenConfirm) -> Self { + IbcEvent::OpenConfirmChannel(v) + } } impl EventType for OpenConfirm { - fn event_type() -> IbcEventType { - IbcEventType::OpenConfirmChannel - } + fn event_type() -> IbcEventType { + IbcEventType::OpenConfirmChannel + } } #[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq)] pub struct CloseInit { - pub height: Height, - pub port_id: PortId, - pub channel_id: ChannelId, - pub connection_id: ConnectionId, - pub counterparty_port_id: PortId, - pub counterparty_channel_id: Option, + pub height: Height, + pub port_id: PortId, + pub channel_id: ChannelId, + pub connection_id: ConnectionId, + pub counterparty_port_id: PortId, + pub counterparty_channel_id: Option, } impl From for Attributes { - fn from(ev: CloseInit) -> Self { - Self { - height: ev.height, - port_id: ev.port_id, - channel_id: Some(ev.channel_id), - connection_id: ev.connection_id, - counterparty_port_id: ev.counterparty_port_id, - counterparty_channel_id: ev.counterparty_channel_id, - } - } + fn from(ev: CloseInit) -> Self { + Self { + height: ev.height, + port_id: ev.port_id, + channel_id: Some(ev.channel_id), + connection_id: ev.connection_id, + counterparty_port_id: ev.counterparty_port_id, + counterparty_channel_id: ev.counterparty_channel_id, + } + } } impl CloseInit { - pub fn port_id(&self) -> &PortId { - &self.port_id - } + pub fn port_id(&self) -> &PortId { + &self.port_id + } - pub fn channel_id(&self) -> &ChannelId { - &self.channel_id - } + pub fn channel_id(&self) -> &ChannelId { + &self.channel_id + } - pub fn counterparty_port_id(&self) -> &PortId { - &self.counterparty_port_id - } + pub fn counterparty_port_id(&self) -> &PortId { + &self.counterparty_port_id + } - pub fn counterparty_channel_id(&self) -> Option<&ChannelId> { - self.counterparty_channel_id.as_ref() - } + pub fn counterparty_channel_id(&self) -> Option<&ChannelId> { + self.counterparty_channel_id.as_ref() + } - pub fn height(&self) -> Height { - self.height - } + pub fn height(&self) -> Height { + self.height + } - pub fn set_height(&mut self, height: Height) { - self.height = height; - } + pub fn set_height(&mut self, height: Height) { + self.height = height; + } } impl TryFrom for CloseInit { - type Error = EventError; - fn try_from(attrs: Attributes) -> Result { - if let Some(channel_id) = attrs.channel_id() { - Ok(CloseInit { - height: attrs.height, - port_id: attrs.port_id.clone(), - channel_id: *channel_id, - connection_id: attrs.connection_id.clone(), - counterparty_port_id: attrs.counterparty_port_id.clone(), - counterparty_channel_id: attrs.counterparty_channel_id, - }) - } else { - Err(EventError::channel(Error::missing_channel_id())) - } - } + type Error = EventError; + fn try_from(attrs: Attributes) -> Result { + if let Some(channel_id) = attrs.channel_id() { + Ok(CloseInit { + height: attrs.height, + port_id: attrs.port_id.clone(), + channel_id: *channel_id, + connection_id: attrs.connection_id.clone(), + counterparty_port_id: attrs.counterparty_port_id.clone(), + counterparty_channel_id: attrs.counterparty_channel_id, + }) + } else { + Err(EventError::channel(Error::missing_channel_id())) + } + } } impl From for IbcEvent { - fn from(v: CloseInit) -> Self { - IbcEvent::CloseInitChannel(v) - } + fn from(v: CloseInit) -> Self { + IbcEvent::CloseInitChannel(v) + } } impl core::fmt::Display for CloseInit { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { - write!( - f, - "{} {} {:?}", - self.height(), - IbcEventType::CloseInitChannel.as_str(), - Attributes::from(self.clone()) - ) - } + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { + write!( + f, + "{} {} {:?}", + self.height(), + IbcEventType::CloseInitChannel.as_str(), + Attributes::from(self.clone()) + ) + } } impl EventType for CloseInit { - fn event_type() -> IbcEventType { - IbcEventType::CloseInitChannel - } + fn event_type() -> IbcEventType { + IbcEventType::CloseInitChannel + } } #[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq)] pub struct CloseConfirm { - pub height: Height, - pub channel_id: Option, - pub port_id: PortId, - pub connection_id: ConnectionId, - pub counterparty_port_id: PortId, - pub counterparty_channel_id: Option, + pub height: Height, + pub channel_id: Option, + pub port_id: PortId, + pub connection_id: ConnectionId, + pub counterparty_port_id: PortId, + pub counterparty_channel_id: Option, } impl From for Attributes { - fn from(ev: CloseConfirm) -> Self { - Self { - height: ev.height, - port_id: ev.port_id, - channel_id: ev.channel_id, - connection_id: ev.connection_id, - counterparty_port_id: ev.counterparty_port_id, - counterparty_channel_id: ev.counterparty_channel_id, - } - } + fn from(ev: CloseConfirm) -> Self { + Self { + height: ev.height, + port_id: ev.port_id, + channel_id: ev.channel_id, + connection_id: ev.connection_id, + counterparty_port_id: ev.counterparty_port_id, + counterparty_channel_id: ev.counterparty_channel_id, + } + } } impl CloseConfirm { - pub fn channel_id(&self) -> Option<&ChannelId> { - self.channel_id.as_ref() - } - pub fn height(&self) -> Height { - self.height - } - pub fn set_height(&mut self, height: Height) { - self.height = height; - } + pub fn channel_id(&self) -> Option<&ChannelId> { + self.channel_id.as_ref() + } + pub fn height(&self) -> Height { + self.height + } + pub fn set_height(&mut self, height: Height) { + self.height = height; + } } impl From for IbcEvent { - fn from(v: CloseConfirm) -> Self { - IbcEvent::CloseConfirmChannel(v) - } + fn from(v: CloseConfirm) -> Self { + IbcEvent::CloseConfirmChannel(v) + } } impl EventType for CloseConfirm { - fn event_type() -> IbcEventType { - IbcEventType::CloseConfirmChannel - } + fn event_type() -> IbcEventType { + IbcEventType::CloseConfirmChannel + } } macro_rules! impl_try_from_attribute_for_event { @@ -745,14 +735,7 @@ macro_rules! impl_from_ibc_to_abci_event { }; } -impl_from_ibc_to_abci_event!( - OpenInit, - OpenTry, - OpenAck, - OpenConfirm, - CloseInit, - CloseConfirm -); +impl_from_ibc_to_abci_event!(OpenInit, OpenTry, OpenAck, OpenConfirm, CloseInit, CloseConfirm); macro_rules! impl_try_from_raw_obj_for_event { ($($event:ty),+) => { @@ -766,357 +749,320 @@ macro_rules! impl_try_from_raw_obj_for_event { }; } -impl_try_from_raw_obj_for_event!( - OpenInit, - OpenTry, - OpenAck, - OpenConfirm, - CloseInit, - CloseConfirm -); +impl_try_from_raw_obj_for_event!(OpenInit, OpenTry, OpenAck, OpenConfirm, CloseInit, CloseConfirm); #[derive(Deserialize, Serialize, Clone, PartialEq, Eq)] pub struct SendPacket { - pub height: Height, - pub packet: Packet, + pub height: Height, + pub packet: Packet, } impl SendPacket { - pub fn height(&self) -> Height { - self.height - } - pub fn set_height(&mut self, height: Height) { - self.height = height; - } - pub fn src_port_id(&self) -> &PortId { - &self.packet.source_port - } - pub fn src_channel_id(&self) -> &ChannelId { - &self.packet.source_channel - } - pub fn dst_port_id(&self) -> &PortId { - &self.packet.destination_port - } - pub fn dst_channel_id(&self) -> &ChannelId { - &self.packet.destination_channel - } + pub fn height(&self) -> Height { + self.height + } + pub fn set_height(&mut self, height: Height) { + self.height = height; + } + pub fn src_port_id(&self) -> &PortId { + &self.packet.source_port + } + pub fn src_channel_id(&self) -> &ChannelId { + &self.packet.source_channel + } + pub fn dst_port_id(&self) -> &PortId { + &self.packet.destination_port + } + pub fn dst_channel_id(&self) -> &ChannelId { + &self.packet.destination_channel + } } impl From for IbcEvent { - fn from(v: SendPacket) -> Self { - IbcEvent::SendPacket(v) - } + fn from(v: SendPacket) -> Self { + IbcEvent::SendPacket(v) + } } impl TryFrom for AbciEvent { - type Error = Error; + type Error = Error; - fn try_from(v: SendPacket) -> Result { - let attributes = Vec::::try_from(v.packet)?; - Ok(AbciEvent { - kind: IbcEventType::SendPacket.as_str().to_string(), - attributes, - }) - } + fn try_from(v: SendPacket) -> Result { + let attributes = Vec::::try_from(v.packet)?; + Ok(AbciEvent { kind: IbcEventType::SendPacket.as_str().to_string(), attributes }) + } } impl core::fmt::Display for SendPacket { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { - write!(f, "SendPacket - h:{}, {}", self.height, self.packet) - } + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { + write!(f, "SendPacket - h:{}, {}", self.height, self.packet) + } } impl core::fmt::Debug for SendPacket { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - write!(f, "SendPacket - h:{}, {}", self.height, self.packet) - } + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!(f, "SendPacket - h:{}, {}", self.height, self.packet) + } } #[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq)] pub struct ReceivePacket { - pub height: Height, - pub packet: Packet, + pub height: Height, + pub packet: Packet, } impl ReceivePacket { - pub fn height(&self) -> Height { - self.height - } - pub fn set_height(&mut self, height: Height) { - self.height = height; - } - pub fn src_port_id(&self) -> &PortId { - &self.packet.source_port - } - pub fn src_channel_id(&self) -> &ChannelId { - &self.packet.source_channel - } - pub fn dst_port_id(&self) -> &PortId { - &self.packet.destination_port - } - pub fn dst_channel_id(&self) -> &ChannelId { - &self.packet.destination_channel - } + pub fn height(&self) -> Height { + self.height + } + pub fn set_height(&mut self, height: Height) { + self.height = height; + } + pub fn src_port_id(&self) -> &PortId { + &self.packet.source_port + } + pub fn src_channel_id(&self) -> &ChannelId { + &self.packet.source_channel + } + pub fn dst_port_id(&self) -> &PortId { + &self.packet.destination_port + } + pub fn dst_channel_id(&self) -> &ChannelId { + &self.packet.destination_channel + } } impl From for IbcEvent { - fn from(v: ReceivePacket) -> Self { - IbcEvent::ReceivePacket(v) - } + fn from(v: ReceivePacket) -> Self { + IbcEvent::ReceivePacket(v) + } } impl TryFrom for AbciEvent { - type Error = Error; + type Error = Error; - fn try_from(v: ReceivePacket) -> Result { - let attributes = Vec::::try_from(v.packet)?; - Ok(AbciEvent { - kind: IbcEventType::ReceivePacket.as_str().to_string(), - attributes, - }) - } + fn try_from(v: ReceivePacket) -> Result { + let attributes = Vec::::try_from(v.packet)?; + Ok(AbciEvent { kind: IbcEventType::ReceivePacket.as_str().to_string(), attributes }) + } } impl core::fmt::Display for ReceivePacket { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { - write!(f, "ReceivePacket - h:{}, {}", self.height, self.packet) - } + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { + write!(f, "ReceivePacket - h:{}, {}", self.height, self.packet) + } } #[derive(Deserialize, Serialize, Clone, PartialEq, Eq)] pub struct WriteAcknowledgement { - pub height: Height, - pub packet: Packet, - #[serde( - serialize_with = "crate::serializers::ser_hex_upper", - deserialize_with = "crate::serializers::deser_hex_upper" - )] - pub ack: Vec, + pub height: Height, + pub packet: Packet, + #[serde( + serialize_with = "crate::serializers::ser_hex_upper", + deserialize_with = "crate::serializers::deser_hex_upper" + )] + pub ack: Vec, } impl WriteAcknowledgement { - pub fn height(&self) -> Height { - self.height - } - pub fn set_height(&mut self, height: Height) { - self.height = height; - } - pub fn src_port_id(&self) -> &PortId { - &self.packet.source_port - } - pub fn src_channel_id(&self) -> &ChannelId { - &self.packet.source_channel - } - pub fn dst_port_id(&self) -> &PortId { - &self.packet.destination_port - } - pub fn dst_channel_id(&self) -> &ChannelId { - &self.packet.destination_channel - } + pub fn height(&self) -> Height { + self.height + } + pub fn set_height(&mut self, height: Height) { + self.height = height; + } + pub fn src_port_id(&self) -> &PortId { + &self.packet.source_port + } + pub fn src_channel_id(&self) -> &ChannelId { + &self.packet.source_channel + } + pub fn dst_port_id(&self) -> &PortId { + &self.packet.destination_port + } + pub fn dst_channel_id(&self) -> &ChannelId { + &self.packet.destination_channel + } } impl From for IbcEvent { - fn from(v: WriteAcknowledgement) -> Self { - IbcEvent::WriteAcknowledgement(v) - } + fn from(v: WriteAcknowledgement) -> Self { + IbcEvent::WriteAcknowledgement(v) + } } impl TryFrom for AbciEvent { - type Error = Error; - - fn try_from(v: WriteAcknowledgement) -> Result { - let mut attributes = Vec::::try_from(v.packet)?; - let val = - String::from_utf8(v.ack).expect("hex-encoded string should always be valid UTF-8"); - // No actual conversion from string to `EventAttribute::Key` or `EventAttribute::Value` - let ack = EventAttribute { - key: PKT_ACK_ATTRIBUTE_KEY.parse().unwrap(), - value: val.parse().unwrap(), - index: false, - }; - attributes.push(ack); - Ok(AbciEvent { - kind: IbcEventType::WriteAck.as_str().to_string(), - attributes, - }) - } + type Error = Error; + + fn try_from(v: WriteAcknowledgement) -> Result { + let mut attributes = Vec::::try_from(v.packet)?; + let val = + String::from_utf8(v.ack).expect("hex-encoded string should always be valid UTF-8"); + // No actual conversion from string to `EventAttribute::Key` or `EventAttribute::Value` + let ack = EventAttribute { + key: PKT_ACK_ATTRIBUTE_KEY.parse().unwrap(), + value: val.parse().unwrap(), + index: false, + }; + attributes.push(ack); + Ok(AbciEvent { kind: IbcEventType::WriteAck.as_str().to_string(), attributes }) + } } impl core::fmt::Display for WriteAcknowledgement { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { - write!( - f, - "WriteAcknowledgement - h:{}, {}", - self.height, self.packet - ) - } + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { + write!(f, "WriteAcknowledgement - h:{}, {}", self.height, self.packet) + } } impl core::fmt::Debug for WriteAcknowledgement { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - write!( - f, - "WriteAcknowledgement - h:{}, {}", - self.height, self.packet - ) - } + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!(f, "WriteAcknowledgement - h:{}, {}", self.height, self.packet) + } } #[derive(Deserialize, Serialize, Clone, PartialEq, Eq)] pub struct AcknowledgePacket { - pub height: Height, - pub packet: Packet, + pub height: Height, + pub packet: Packet, } impl AcknowledgePacket { - pub fn height(&self) -> Height { - self.height - } - pub fn set_height(&mut self, height: Height) { - self.height = height; - } - pub fn src_port_id(&self) -> &PortId { - &self.packet.source_port - } - pub fn src_channel_id(&self) -> &ChannelId { - &self.packet.source_channel - } + pub fn height(&self) -> Height { + self.height + } + pub fn set_height(&mut self, height: Height) { + self.height = height; + } + pub fn src_port_id(&self) -> &PortId { + &self.packet.source_port + } + pub fn src_channel_id(&self) -> &ChannelId { + &self.packet.source_channel + } } impl From for IbcEvent { - fn from(v: AcknowledgePacket) -> Self { - IbcEvent::AcknowledgePacket(v) - } + fn from(v: AcknowledgePacket) -> Self { + IbcEvent::AcknowledgePacket(v) + } } impl TryFrom for AbciEvent { - type Error = Error; + type Error = Error; - fn try_from(v: AcknowledgePacket) -> Result { - let attributes = Vec::::try_from(v.packet)?; - Ok(AbciEvent { - kind: IbcEventType::AckPacket.as_str().to_string(), - attributes, - }) - } + fn try_from(v: AcknowledgePacket) -> Result { + let attributes = Vec::::try_from(v.packet)?; + Ok(AbciEvent { kind: IbcEventType::AckPacket.as_str().to_string(), attributes }) + } } impl core::fmt::Display for AcknowledgePacket { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { - write!(f, "h:{}, {}", self.height, self.packet) - } + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { + write!(f, "h:{}, {}", self.height, self.packet) + } } impl core::fmt::Debug for AcknowledgePacket { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - write!(f, "AcknowledgePacket - h:{}, {}", self.height, self.packet) - } + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!(f, "AcknowledgePacket - h:{}, {}", self.height, self.packet) + } } #[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq)] pub struct TimeoutPacket { - pub height: Height, - pub packet: Packet, + pub height: Height, + pub packet: Packet, } impl TimeoutPacket { - pub fn height(&self) -> Height { - self.height - } - pub fn set_height(&mut self, height: Height) { - self.height = height; - } - pub fn src_port_id(&self) -> &PortId { - &self.packet.source_port - } - pub fn src_channel_id(&self) -> &ChannelId { - &self.packet.source_channel - } - pub fn dst_port_id(&self) -> &PortId { - &self.packet.destination_port - } - pub fn dst_channel_id(&self) -> &ChannelId { - &self.packet.destination_channel - } + pub fn height(&self) -> Height { + self.height + } + pub fn set_height(&mut self, height: Height) { + self.height = height; + } + pub fn src_port_id(&self) -> &PortId { + &self.packet.source_port + } + pub fn src_channel_id(&self) -> &ChannelId { + &self.packet.source_channel + } + pub fn dst_port_id(&self) -> &PortId { + &self.packet.destination_port + } + pub fn dst_channel_id(&self) -> &ChannelId { + &self.packet.destination_channel + } } impl From for IbcEvent { - fn from(v: TimeoutPacket) -> Self { - IbcEvent::TimeoutPacket(v) - } + fn from(v: TimeoutPacket) -> Self { + IbcEvent::TimeoutPacket(v) + } } impl TryFrom for AbciEvent { - type Error = Error; + type Error = Error; - fn try_from(v: TimeoutPacket) -> Result { - let attributes = Vec::::try_from(v.packet)?; - Ok(AbciEvent { - kind: IbcEventType::Timeout.as_str().to_string(), - attributes, - }) - } + fn try_from(v: TimeoutPacket) -> Result { + let attributes = Vec::::try_from(v.packet)?; + Ok(AbciEvent { kind: IbcEventType::Timeout.as_str().to_string(), attributes }) + } } impl core::fmt::Display for TimeoutPacket { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { - write!(f, "TimeoutPacket - h:{}, {}", self.height, self.packet) - } + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { + write!(f, "TimeoutPacket - h:{}, {}", self.height, self.packet) + } } #[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq)] pub struct TimeoutOnClosePacket { - pub height: Height, - pub packet: Packet, + pub height: Height, + pub packet: Packet, } impl TimeoutOnClosePacket { - pub fn height(&self) -> Height { - self.height - } - pub fn set_height(&mut self, height: Height) { - self.height = height; - } - pub fn src_port_id(&self) -> &PortId { - &self.packet.source_port - } - pub fn src_channel_id(&self) -> &ChannelId { - &self.packet.source_channel - } - pub fn dst_port_id(&self) -> &PortId { - &self.packet.destination_port - } - pub fn dst_channel_id(&self) -> &ChannelId { - &self.packet.destination_channel - } + pub fn height(&self) -> Height { + self.height + } + pub fn set_height(&mut self, height: Height) { + self.height = height; + } + pub fn src_port_id(&self) -> &PortId { + &self.packet.source_port + } + pub fn src_channel_id(&self) -> &ChannelId { + &self.packet.source_channel + } + pub fn dst_port_id(&self) -> &PortId { + &self.packet.destination_port + } + pub fn dst_channel_id(&self) -> &ChannelId { + &self.packet.destination_channel + } } impl From for IbcEvent { - fn from(v: TimeoutOnClosePacket) -> Self { - IbcEvent::TimeoutOnClosePacket(v) - } + fn from(v: TimeoutOnClosePacket) -> Self { + IbcEvent::TimeoutOnClosePacket(v) + } } impl TryFrom for AbciEvent { - type Error = Error; + type Error = Error; - fn try_from(v: TimeoutOnClosePacket) -> Result { - let attributes = Vec::::try_from(v.packet)?; - Ok(AbciEvent { - kind: IbcEventType::TimeoutOnClose.as_str().to_string(), - attributes, - }) - } + fn try_from(v: TimeoutOnClosePacket) -> Result { + let attributes = Vec::::try_from(v.packet)?; + Ok(AbciEvent { kind: IbcEventType::TimeoutOnClose.as_str().to_string(), attributes }) + } } impl core::fmt::Display for TimeoutOnClosePacket { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { - write!( - f, - "TimeoutOnClosePacket - h:{}, {}", - self.height, self.packet - ) - } + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { + write!(f, "TimeoutOnClosePacket - h:{}, {}", self.height, self.packet) + } } macro_rules! impl_try_from_raw_obj_for_packet { @@ -1138,141 +1084,127 @@ macro_rules! impl_try_from_raw_obj_for_packet { } impl_try_from_raw_obj_for_packet!( - SendPacket, - ReceivePacket, - AcknowledgePacket, - TimeoutPacket, - TimeoutOnClosePacket + SendPacket, + ReceivePacket, + AcknowledgePacket, + TimeoutPacket, + TimeoutOnClosePacket ); impl TryFrom> for WriteAcknowledgement { - type Error = EventError; + type Error = EventError; - fn try_from(obj: RawObject<'_>) -> Result { - let height = obj.height; - let data_str: String = - extract_attribute(&obj, &format!("{}.{}", obj.action, PKT_DATA_ATTRIBUTE_KEY))?; - let ack = extract_attribute(&obj, &format!("{}.{}", obj.action, PKT_ACK_ATTRIBUTE_KEY))? - .into_bytes(); + fn try_from(obj: RawObject<'_>) -> Result { + let height = obj.height; + let data_str: String = + extract_attribute(&obj, &format!("{}.{}", obj.action, PKT_DATA_ATTRIBUTE_KEY))?; + let ack = extract_attribute(&obj, &format!("{}.{}", obj.action, PKT_ACK_ATTRIBUTE_KEY))? + .into_bytes(); - let mut packet = Packet::try_from(obj)?; - packet.data = Vec::from(data_str.as_str().as_bytes()); + let mut packet = Packet::try_from(obj)?; + packet.data = Vec::from(data_str.as_str().as_bytes()); - Ok(Self { - height, - packet, - ack, - }) - } + Ok(Self { height, packet, ack }) + } } #[cfg(test)] mod tests { - use super::*; - use crate::core::ics04_channel::packet::Sequence; - use crate::timestamp::Timestamp; - - #[test] - fn channel_event_to_abci_event() { - let attributes = Attributes { - height: Height::default(), - port_id: "test_port".parse().unwrap(), - channel_id: Some("channel-0".parse().unwrap()), - connection_id: "test_connection".parse().unwrap(), - counterparty_port_id: "counterparty_test_port".parse().unwrap(), - counterparty_channel_id: Some("channel-1".parse().unwrap()), - }; - let mut abci_events = vec![]; - let open_init = OpenInit::try_from(attributes.clone()).unwrap(); - abci_events.push(AbciEvent::from(open_init.clone())); - let open_try = OpenTry::try_from(attributes.clone()).unwrap(); - abci_events.push(AbciEvent::from(open_try.clone())); - let open_ack = OpenAck::try_from(attributes.clone()).unwrap(); - abci_events.push(AbciEvent::from(open_ack.clone())); - let open_confirm = OpenConfirm::try_from(attributes.clone()).unwrap(); - abci_events.push(AbciEvent::from(open_confirm.clone())); - let close_init = CloseInit::try_from(attributes.clone()).unwrap(); - abci_events.push(AbciEvent::from(close_init.clone())); - let close_confirm = CloseConfirm::try_from(attributes).unwrap(); - abci_events.push(AbciEvent::from(close_confirm.clone())); - - for event in abci_events { - match try_from_tx(&event) { - Some(e) => match e { - IbcEvent::OpenInitChannel(e) => { - assert_eq!(Attributes::from(e), open_init.clone().into()) - } - IbcEvent::OpenTryChannel(e) => { - assert_eq!(Attributes::from(e), open_try.clone().into()) - } - IbcEvent::OpenAckChannel(e) => { - assert_eq!(Attributes::from(e), open_ack.clone().into()) - } - IbcEvent::OpenConfirmChannel(e) => { - assert_eq!(Attributes::from(e), open_confirm.clone().into()) - } - IbcEvent::CloseInitChannel(e) => { - assert_eq!(Attributes::from(e), close_init.clone().into()) - } - IbcEvent::CloseConfirmChannel(e) => { - assert_eq!(Attributes::from(e), close_confirm.clone().into()) - } - _ => panic!("unexpected event type"), - }, - None => panic!("converted event was wrong"), - } - } - } - - #[test] - fn packet_event_to_abci_event() { - let packet = Packet { - sequence: Sequence::from(10), - source_port: "a_test_port".parse().unwrap(), - source_channel: "channel-0".parse().unwrap(), - destination_port: "b_test_port".parse().unwrap(), - destination_channel: "channel-1".parse().unwrap(), - data: "test_data".as_bytes().to_vec(), - timeout_height: Height::new(1, 10), - timeout_timestamp: Timestamp::now(), - }; - let mut abci_events = vec![]; - let send_packet = SendPacket { - height: Height::default(), - packet: packet.clone(), - }; - abci_events.push(AbciEvent::try_from(send_packet.clone()).unwrap()); - let write_ack = WriteAcknowledgement { - height: Height::default(), - packet: packet.clone(), - ack: "test_ack".as_bytes().to_vec(), - }; - abci_events.push(AbciEvent::try_from(write_ack.clone()).unwrap()); - let ack_packet = AcknowledgePacket { - height: Height::default(), - packet: packet.clone(), - }; - abci_events.push(AbciEvent::try_from(ack_packet.clone()).unwrap()); - let timeout_packet = TimeoutPacket { - height: Height::default(), - packet, - }; - abci_events.push(AbciEvent::try_from(timeout_packet.clone()).unwrap()); - - for event in abci_events { - match try_from_tx(&event) { - Some(e) => match e { - IbcEvent::SendPacket(e) => assert_eq!(e.packet, send_packet.packet), - IbcEvent::WriteAcknowledgement(e) => { - assert_eq!(e.packet, write_ack.packet); - assert_eq!(e.ack, write_ack.ack); - } - IbcEvent::AcknowledgePacket(e) => assert_eq!(e.packet, ack_packet.packet), - IbcEvent::TimeoutPacket(e) => assert_eq!(e.packet, timeout_packet.packet), - _ => panic!("unexpected event type"), - }, - None => panic!("converted event was wrong"), - } - } - } + use super::*; + use crate::{core::ics04_channel::packet::Sequence, timestamp::Timestamp}; + + #[test] + fn channel_event_to_abci_event() { + let attributes = Attributes { + height: Height::default(), + port_id: "test_port".parse().unwrap(), + channel_id: Some("channel-0".parse().unwrap()), + connection_id: "test_connection".parse().unwrap(), + counterparty_port_id: "counterparty_test_port".parse().unwrap(), + counterparty_channel_id: Some("channel-1".parse().unwrap()), + }; + let mut abci_events = vec![]; + let open_init = OpenInit::try_from(attributes.clone()).unwrap(); + abci_events.push(AbciEvent::from(open_init.clone())); + let open_try = OpenTry::try_from(attributes.clone()).unwrap(); + abci_events.push(AbciEvent::from(open_try.clone())); + let open_ack = OpenAck::try_from(attributes.clone()).unwrap(); + abci_events.push(AbciEvent::from(open_ack.clone())); + let open_confirm = OpenConfirm::try_from(attributes.clone()).unwrap(); + abci_events.push(AbciEvent::from(open_confirm.clone())); + let close_init = CloseInit::try_from(attributes.clone()).unwrap(); + abci_events.push(AbciEvent::from(close_init.clone())); + let close_confirm = CloseConfirm::try_from(attributes).unwrap(); + abci_events.push(AbciEvent::from(close_confirm.clone())); + + for event in abci_events { + match try_from_tx(&event) { + Some(e) => match e { + IbcEvent::OpenInitChannel(e) => { + assert_eq!(Attributes::from(e), open_init.clone().into()) + }, + IbcEvent::OpenTryChannel(e) => { + assert_eq!(Attributes::from(e), open_try.clone().into()) + }, + IbcEvent::OpenAckChannel(e) => { + assert_eq!(Attributes::from(e), open_ack.clone().into()) + }, + IbcEvent::OpenConfirmChannel(e) => { + assert_eq!(Attributes::from(e), open_confirm.clone().into()) + }, + IbcEvent::CloseInitChannel(e) => { + assert_eq!(Attributes::from(e), close_init.clone().into()) + }, + IbcEvent::CloseConfirmChannel(e) => { + assert_eq!(Attributes::from(e), close_confirm.clone().into()) + }, + _ => panic!("unexpected event type"), + }, + None => panic!("converted event was wrong"), + } + } + } + + #[test] + fn packet_event_to_abci_event() { + let packet = Packet { + sequence: Sequence::from(10), + source_port: "a_test_port".parse().unwrap(), + source_channel: "channel-0".parse().unwrap(), + destination_port: "b_test_port".parse().unwrap(), + destination_channel: "channel-1".parse().unwrap(), + data: "test_data".as_bytes().to_vec(), + timeout_height: Height::new(1, 10), + timeout_timestamp: Timestamp::now(), + }; + let mut abci_events = vec![]; + let send_packet = SendPacket { height: Height::default(), packet: packet.clone() }; + abci_events.push(AbciEvent::try_from(send_packet.clone()).unwrap()); + let write_ack = WriteAcknowledgement { + height: Height::default(), + packet: packet.clone(), + ack: "test_ack".as_bytes().to_vec(), + }; + abci_events.push(AbciEvent::try_from(write_ack.clone()).unwrap()); + let ack_packet = AcknowledgePacket { height: Height::default(), packet: packet.clone() }; + abci_events.push(AbciEvent::try_from(ack_packet.clone()).unwrap()); + let timeout_packet = TimeoutPacket { height: Height::default(), packet }; + abci_events.push(AbciEvent::try_from(timeout_packet.clone()).unwrap()); + + for event in abci_events { + match try_from_tx(&event) { + Some(e) => match e { + IbcEvent::SendPacket(e) => assert_eq!(e.packet, send_packet.packet), + IbcEvent::WriteAcknowledgement(e) => { + assert_eq!(e.packet, write_ack.packet); + assert_eq!(e.ack, write_ack.ack); + }, + IbcEvent::AcknowledgePacket(e) => assert_eq!(e.packet, ack_packet.packet), + IbcEvent::TimeoutPacket(e) => assert_eq!(e.packet, timeout_packet.packet), + _ => panic!("unexpected event type"), + }, + None => panic!("converted event was wrong"), + } + } + } } diff --git a/modules/src/core/ics04_channel/handler.rs b/modules/src/core/ics04_channel/handler.rs index bb0a789a23..03834eeffc 100644 --- a/modules/src/core/ics04_channel/handler.rs +++ b/modules/src/core/ics04_channel/handler.rs @@ -1,15 +1,20 @@ //! This module implements the processing logic for ICS4 (channel) messages. -use crate::clients::host_functions::HostFunctionsProvider; -use crate::core::ics04_channel::channel::ChannelEnd; -use crate::core::ics04_channel::error::Error; -use crate::core::ics04_channel::msgs::ChannelMsg; -use crate::core::ics04_channel::{msgs::PacketMsg, packet::PacketResult}; -use crate::core::ics24_host::identifier::{ChannelId, PortId}; -use crate::core::ics26_routing::context::{ - Ics26Context, ModuleId, ModuleOutputBuilder, OnRecvPacketAck, ReaderContext, Router, +use crate::{ + core::{ + ics04_channel::{ + channel::ChannelEnd, + error::Error, + msgs::{ChannelMsg, PacketMsg}, + packet::PacketResult, + }, + ics24_host::identifier::{ChannelId, PortId}, + ics26_routing::context::{ + Ics26Context, ModuleId, ModuleOutputBuilder, OnRecvPacketAck, ReaderContext, Router, + }, + }, + handler::{HandlerOutput, HandlerOutputBuilder}, }; -use crate::handler::{HandlerOutput, HandlerOutputBuilder}; use core::fmt::Debug; pub mod acknowledgement; @@ -29,208 +34,180 @@ pub mod write_acknowledgement; /// Defines the possible states of a channel identifier in a `ChannelResult`. #[derive(Clone, Debug)] pub enum ChannelIdState { - /// Specifies that the channel handshake handler allocated a new channel identifier. This - /// happens during the processing of either the `MsgChannelOpenInit` or `MsgChannelOpenTry`. - Generated, + /// Specifies that the channel handshake handler allocated a new channel identifier. This + /// happens during the processing of either the `MsgChannelOpenInit` or `MsgChannelOpenTry`. + Generated, - /// Specifies that the handler reused a previously-allocated channel identifier. - Reused, + /// Specifies that the handler reused a previously-allocated channel identifier. + Reused, } #[derive(Clone, Debug)] pub struct ChannelResult { - pub port_id: PortId, - pub channel_id: ChannelId, - pub channel_id_state: ChannelIdState, - pub channel_end: ChannelEnd, + pub port_id: PortId, + pub channel_id: ChannelId, + pub channel_id_state: ChannelIdState, + pub channel_end: ChannelEnd, } pub fn channel_validate(ctx: &Ctx, msg: &ChannelMsg) -> Result where - Ctx: Ics26Context, + Ctx: Ics26Context, { - let module_id = msg.lookup_module(ctx)?; - if ctx.router().has_route(&module_id) { - Ok(module_id) - } else { - Err(Error::route_not_found()) - } + let module_id = msg.lookup_module(ctx)?; + if ctx.router().has_route(&module_id) { + Ok(module_id) + } else { + Err(Error::route_not_found()) + } } /// General entry point for processing any type of message related to the ICS4 channel open and /// channel close handshake protocols. -pub fn channel_dispatch( - ctx: &Ctx, - msg: &ChannelMsg, +pub fn channel_dispatch( + ctx: &Ctx, + msg: &ChannelMsg, ) -> Result<(HandlerOutputBuilder<()>, ChannelResult), Error> where - Ctx: ReaderContext, - HostFunctions: HostFunctionsProvider, + Ctx: ReaderContext, { - let output = match msg { - ChannelMsg::ChannelOpenInit(msg) => chan_open_init::process(ctx, msg), - ChannelMsg::ChannelOpenTry(msg) => chan_open_try::process::(ctx, msg), - ChannelMsg::ChannelOpenAck(msg) => chan_open_ack::process::(ctx, msg), - ChannelMsg::ChannelOpenConfirm(msg) => { - chan_open_confirm::process::(ctx, msg) - } - ChannelMsg::ChannelCloseInit(msg) => chan_close_init::process(ctx, msg), - ChannelMsg::ChannelCloseConfirm(msg) => { - chan_close_confirm::process::(ctx, msg) - } - }?; - let HandlerOutput { - result, - log, - events, - } = output; - let builder = HandlerOutput::builder().with_log(log).with_events(events); - Ok((builder, result)) + let output = match msg { + ChannelMsg::ChannelOpenInit(msg) => chan_open_init::process(ctx, msg), + ChannelMsg::ChannelOpenTry(msg) => chan_open_try::process::<_>(ctx, msg), + ChannelMsg::ChannelOpenAck(msg) => chan_open_ack::process::<_>(ctx, msg), + ChannelMsg::ChannelOpenConfirm(msg) => chan_open_confirm::process::<_>(ctx, msg), + ChannelMsg::ChannelCloseInit(msg) => chan_close_init::process(ctx, msg), + ChannelMsg::ChannelCloseConfirm(msg) => chan_close_confirm::process::<_>(ctx, msg), + }?; + let HandlerOutput { result, log, events } = output; + let builder = HandlerOutput::builder().with_log(log).with_events(events); + Ok((builder, result)) } pub fn channel_callback( - ctx: &mut Ctx, - module_id: &ModuleId, - msg: &ChannelMsg, - mut result: ChannelResult, - module_output: &mut ModuleOutputBuilder, + ctx: &mut Ctx, + module_id: &ModuleId, + msg: &ChannelMsg, + mut result: ChannelResult, + module_output: &mut ModuleOutputBuilder, ) -> Result where - Ctx: Ics26Context, + Ctx: Ics26Context, { - let cb = ctx - .router_mut() - .get_route_mut(module_id) - .ok_or_else(Error::route_not_found)?; + let cb = ctx.router_mut().get_route_mut(module_id).ok_or_else(Error::route_not_found)?; - match msg { - ChannelMsg::ChannelOpenInit(msg) => cb.on_chan_open_init( - module_output, - msg.channel.ordering, - &msg.channel.connection_hops, - &msg.port_id, - &result.channel_id, - msg.channel.counterparty(), - &msg.channel.version, - )?, - ChannelMsg::ChannelOpenTry(msg) => { - let version = cb.on_chan_open_try( - module_output, - msg.channel.ordering, - &msg.channel.connection_hops, - &msg.port_id, - &result.channel_id, - msg.channel.counterparty(), - msg.channel.version(), - &msg.counterparty_version, - )?; - result.channel_end.version = version; - } - ChannelMsg::ChannelOpenAck(msg) => cb.on_chan_open_ack( - module_output, - &msg.port_id, - &result.channel_id, - &msg.counterparty_version, - )?, - ChannelMsg::ChannelOpenConfirm(msg) => { - cb.on_chan_open_confirm(module_output, &msg.port_id, &result.channel_id)? - } - ChannelMsg::ChannelCloseInit(msg) => { - cb.on_chan_close_init(module_output, &msg.port_id, &result.channel_id)? - } - ChannelMsg::ChannelCloseConfirm(msg) => { - cb.on_chan_close_confirm(module_output, &msg.port_id, &result.channel_id)? - } - } - Ok(result) + match msg { + ChannelMsg::ChannelOpenInit(msg) => cb.on_chan_open_init( + module_output, + msg.channel.ordering, + &msg.channel.connection_hops, + &msg.port_id, + &result.channel_id, + msg.channel.counterparty(), + &msg.channel.version, + )?, + ChannelMsg::ChannelOpenTry(msg) => { + let version = cb.on_chan_open_try( + module_output, + msg.channel.ordering, + &msg.channel.connection_hops, + &msg.port_id, + &result.channel_id, + msg.channel.counterparty(), + msg.channel.version(), + &msg.counterparty_version, + )?; + result.channel_end.version = version; + }, + ChannelMsg::ChannelOpenAck(msg) => cb.on_chan_open_ack( + module_output, + &msg.port_id, + &result.channel_id, + &msg.counterparty_version, + )?, + ChannelMsg::ChannelOpenConfirm(msg) => + cb.on_chan_open_confirm(module_output, &msg.port_id, &result.channel_id)?, + ChannelMsg::ChannelCloseInit(msg) => + cb.on_chan_close_init(module_output, &msg.port_id, &result.channel_id)?, + ChannelMsg::ChannelCloseConfirm(msg) => + cb.on_chan_close_confirm(module_output, &msg.port_id, &result.channel_id)?, + } + Ok(result) } pub fn get_module_for_packet_msg(ctx: &Ctx, msg: &PacketMsg) -> Result where - Ctx: Ics26Context, + Ctx: Ics26Context, { - let module_id = match msg { - PacketMsg::RecvPacket(msg) => ctx - .lookup_module_by_port(&msg.packet.destination_port) - .map_err(Error::ics05_port)?, - PacketMsg::AckPacket(msg) => ctx - .lookup_module_by_port(&msg.packet.source_port) - .map_err(Error::ics05_port)?, - PacketMsg::ToPacket(msg) => ctx - .lookup_module_by_port(&msg.packet.source_port) - .map_err(Error::ics05_port)?, - PacketMsg::ToClosePacket(msg) => ctx - .lookup_module_by_port(&msg.packet.source_port) - .map_err(Error::ics05_port)?, - }; + let module_id = match msg { + PacketMsg::RecvPacket(msg) => ctx + .lookup_module_by_port(&msg.packet.destination_port) + .map_err(Error::ics05_port)?, + PacketMsg::AckPacket(msg) => + ctx.lookup_module_by_port(&msg.packet.source_port).map_err(Error::ics05_port)?, + PacketMsg::ToPacket(msg) => + ctx.lookup_module_by_port(&msg.packet.source_port).map_err(Error::ics05_port)?, + PacketMsg::ToClosePacket(msg) => + ctx.lookup_module_by_port(&msg.packet.source_port).map_err(Error::ics05_port)?, + }; - if ctx.router().has_route(&module_id) { - Ok(module_id) - } else { - Err(Error::route_not_found()) - } + if ctx.router().has_route(&module_id) { + Ok(module_id) + } else { + Err(Error::route_not_found()) + } } /// Dispatcher for processing any type of message related to the ICS4 packet protocols. -pub fn packet_dispatch( - ctx: &Ctx, - msg: &PacketMsg, +pub fn packet_dispatch( + ctx: &Ctx, + msg: &PacketMsg, ) -> Result<(HandlerOutputBuilder<()>, PacketResult), Error> where - Ctx: ReaderContext, - HostFunctions: HostFunctionsProvider, + Ctx: ReaderContext, { - let output = match msg { - PacketMsg::RecvPacket(msg) => recv_packet::process::(ctx, msg), - PacketMsg::AckPacket(msg) => acknowledgement::process::(ctx, msg), - PacketMsg::ToPacket(msg) => timeout::process::(ctx, msg), - PacketMsg::ToClosePacket(msg) => timeout_on_close::process::(ctx, msg), - }?; - let HandlerOutput { - result, - log, - events, - } = output; - let builder = HandlerOutput::builder().with_log(log).with_events(events); - Ok((builder, result)) + let output = match msg { + PacketMsg::RecvPacket(msg) => recv_packet::process::<_>(ctx, msg), + PacketMsg::AckPacket(msg) => acknowledgement::process::<_>(ctx, msg), + PacketMsg::ToPacket(msg) => timeout::process::<_>(ctx, msg), + PacketMsg::ToClosePacket(msg) => timeout_on_close::process::<_>(ctx, msg), + }?; + let HandlerOutput { result, log, events } = output; + let builder = HandlerOutput::builder().with_log(log).with_events(events); + Ok((builder, result)) } pub fn packet_callback( - ctx: &mut Ctx, - module_id: &ModuleId, - msg: &PacketMsg, - module_output: &mut ModuleOutputBuilder, + ctx: &mut Ctx, + module_id: &ModuleId, + msg: &PacketMsg, + module_output: &mut ModuleOutputBuilder, ) -> Result<(), Error> where - Ctx: Ics26Context, + Ctx: Ics26Context, { - let cb = ctx - .router_mut() - .get_route_mut(module_id) - .ok_or_else(Error::route_not_found)?; + let cb = ctx.router_mut().get_route_mut(module_id).ok_or_else(Error::route_not_found)?; - match msg { - PacketMsg::RecvPacket(msg) => { - let result = cb.on_recv_packet(module_output, &msg.packet, &msg.signer); - match result { - OnRecvPacketAck::Nil(write_fn) | OnRecvPacketAck::Successful(_, write_fn) => { - write_fn(cb.as_any_mut()).map_err(Error::app_module)?; - } - OnRecvPacketAck::Failed(_) => {} - } - } - PacketMsg::AckPacket(msg) => cb.on_acknowledgement_packet( - module_output, - &msg.packet, - &msg.acknowledgement, - &msg.signer, - )?, - PacketMsg::ToPacket(msg) => { - cb.on_timeout_packet(module_output, &msg.packet, &msg.signer)? - } - PacketMsg::ToClosePacket(msg) => { - cb.on_timeout_packet(module_output, &msg.packet, &msg.signer)? - } - }; - Ok(()) + match msg { + PacketMsg::RecvPacket(msg) => { + let result = cb.on_recv_packet(module_output, &msg.packet, &msg.signer); + match result { + OnRecvPacketAck::Nil(write_fn) | OnRecvPacketAck::Successful(_, write_fn) => { + write_fn(cb.as_any_mut()).map_err(Error::app_module)?; + }, + OnRecvPacketAck::Failed(_) => {}, + } + }, + PacketMsg::AckPacket(msg) => cb.on_acknowledgement_packet( + module_output, + &msg.packet, + &msg.acknowledgement, + &msg.signer, + )?, + PacketMsg::ToPacket(msg) => + cb.on_timeout_packet(module_output, &msg.packet, &msg.signer)?, + PacketMsg::ToClosePacket(msg) => + cb.on_timeout_packet(module_output, &msg.packet, &msg.signer)?, + }; + Ok(()) } diff --git a/modules/src/core/ics04_channel/handler/acknowledgement.rs b/modules/src/core/ics04_channel/handler/acknowledgement.rs index ccdc305a55..d10c49937f 100644 --- a/modules/src/core/ics04_channel/handler/acknowledgement.rs +++ b/modules/src/core/ics04_channel/handler/acknowledgement.rs @@ -1,239 +1,239 @@ -use crate::clients::host_functions::HostFunctionsProvider; -use crate::core::ics03_connection::connection::State as ConnectionState; -use crate::core::ics04_channel::channel::State; -use crate::core::ics04_channel::channel::{Counterparty, Order}; -use crate::core::ics04_channel::error::Error; -use crate::core::ics04_channel::events::AcknowledgePacket; -use crate::core::ics04_channel::handler::verify::verify_packet_acknowledgement_proofs; -use crate::core::ics04_channel::msgs::acknowledgement::MsgAcknowledgement; -use crate::core::ics04_channel::packet::{PacketResult, Sequence}; -use crate::core::ics24_host::identifier::{ChannelId, PortId}; -use crate::core::ics26_routing::context::ReaderContext; -use crate::events::IbcEvent; -use crate::handler::{HandlerOutput, HandlerResult}; -use crate::prelude::*; +use crate::{ + core::{ + ics03_connection::connection::State as ConnectionState, + ics04_channel::{ + channel::{Counterparty, Order, State}, + error::Error, + events::AcknowledgePacket, + handler::verify::verify_packet_acknowledgement_proofs, + msgs::acknowledgement::MsgAcknowledgement, + packet::{PacketResult, Sequence}, + }, + ics24_host::identifier::{ChannelId, PortId}, + ics26_routing::context::ReaderContext, + }, + events::IbcEvent, + handler::{HandlerOutput, HandlerResult}, + prelude::*, +}; use core::fmt::Debug; #[derive(Clone, Debug)] pub struct AckPacketResult { - pub port_id: PortId, - pub channel_id: ChannelId, - pub seq: Sequence, - pub seq_number: Option, + pub port_id: PortId, + pub channel_id: ChannelId, + pub seq: Sequence, + pub seq_number: Option, } -pub fn process( - ctx: &dyn ReaderContext, - msg: &MsgAcknowledgement, +pub fn process( + ctx: &Ctx, + msg: &MsgAcknowledgement, ) -> HandlerResult { - let mut output = HandlerOutput::builder(); + let mut output = HandlerOutput::builder(); - let packet = &msg.packet; + let packet = &msg.packet; - let source_channel_end = - ctx.channel_end(&(packet.source_port.clone(), packet.source_channel))?; + let source_channel_end = + ctx.channel_end(&(packet.source_port.clone(), packet.source_channel))?; - if !source_channel_end.state_matches(&State::Open) { - return Err(Error::channel_closed(packet.source_channel)); - } + if !source_channel_end.state_matches(&State::Open) { + return Err(Error::channel_closed(packet.source_channel)) + } - let counterparty = Counterparty::new( - packet.destination_port.clone(), - Some(packet.destination_channel), - ); + let counterparty = + Counterparty::new(packet.destination_port.clone(), Some(packet.destination_channel)); - if !source_channel_end.counterparty_matches(&counterparty) { - return Err(Error::invalid_packet_counterparty( - packet.destination_port.clone(), - packet.destination_channel, - )); - } + if !source_channel_end.counterparty_matches(&counterparty) { + return Err(Error::invalid_packet_counterparty( + packet.destination_port.clone(), + packet.destination_channel, + )) + } - let connection_end = ctx - .connection_end(&source_channel_end.connection_hops()[0]) - .map_err(Error::ics03_connection)?; + let connection_end = ctx + .connection_end(&source_channel_end.connection_hops()[0]) + .map_err(Error::ics03_connection)?; - if !connection_end.state_matches(&ConnectionState::Open) { - return Err(Error::connection_not_open( - source_channel_end.connection_hops()[0].clone(), - )); - } + if !connection_end.state_matches(&ConnectionState::Open) { + return Err(Error::connection_not_open(source_channel_end.connection_hops()[0].clone())) + } - // Verify packet commitment - let packet_commitment = ctx.get_packet_commitment(&( - packet.source_port.clone(), - packet.source_channel, - packet.sequence, - ))?; + // Verify packet commitment + let packet_commitment = ctx.get_packet_commitment(&( + packet.source_port.clone(), + packet.source_channel, + packet.sequence, + ))?; - if packet_commitment - != ctx.packet_commitment( - packet.data.clone(), - packet.timeout_height, - packet.timeout_timestamp, - ) - { - return Err(Error::incorrect_packet_commitment(packet.sequence)); - } + if packet_commitment != + ctx.packet_commitment( + packet.data.clone(), + packet.timeout_height, + packet.timeout_timestamp, + ) { + return Err(Error::incorrect_packet_commitment(packet.sequence)) + } - // Verify the acknowledgement proof - verify_packet_acknowledgement_proofs::( - ctx, - msg.proofs.height(), - packet, - msg.acknowledgement.clone(), - &connection_end, - &msg.proofs, - )?; + // Verify the acknowledgement proof + verify_packet_acknowledgement_proofs::( + ctx, + msg.proofs.height(), + packet, + msg.acknowledgement.clone(), + &connection_end, + &msg.proofs, + )?; - let result = if source_channel_end.order_matches(&Order::Ordered) { - let next_seq_ack = - ctx.get_next_sequence_ack(&(packet.source_port.clone(), packet.source_channel))?; + let result = if source_channel_end.order_matches(&Order::Ordered) { + let next_seq_ack = + ctx.get_next_sequence_ack(&(packet.source_port.clone(), packet.source_channel))?; - if packet.sequence != next_seq_ack { - return Err(Error::invalid_packet_sequence( - packet.sequence, - next_seq_ack, - )); - } + if packet.sequence != next_seq_ack { + return Err(Error::invalid_packet_sequence(packet.sequence, next_seq_ack)) + } - PacketResult::Ack(AckPacketResult { - port_id: packet.source_port.clone(), - channel_id: packet.source_channel, - seq: packet.sequence, - seq_number: Some(next_seq_ack.increment()), - }) - } else { - PacketResult::Ack(AckPacketResult { - port_id: packet.source_port.clone(), - channel_id: packet.source_channel, - seq: packet.sequence, - seq_number: None, - }) - }; + PacketResult::Ack(AckPacketResult { + port_id: packet.source_port.clone(), + channel_id: packet.source_channel, + seq: packet.sequence, + seq_number: Some(next_seq_ack.increment()), + }) + } else { + PacketResult::Ack(AckPacketResult { + port_id: packet.source_port.clone(), + channel_id: packet.source_channel, + seq: packet.sequence, + seq_number: None, + }) + }; - output.log("success: packet ack"); + output.log("success: packet ack"); - output.emit(IbcEvent::AcknowledgePacket(AcknowledgePacket { - height: ctx.host_height(), - packet: packet.clone(), - })); + output.emit(IbcEvent::AcknowledgePacket(AcknowledgePacket { + height: ctx.host_height(), + packet: packet.clone(), + })); - Ok(output.with_result(result)) + Ok(output.with_result(result)) } #[cfg(test)] mod tests { - use test_log::test; + use test_log::test; - use crate::core::ics02_client::context::ClientReader; - use crate::core::ics02_client::height::Height; - use crate::core::ics03_connection::connection::ConnectionEnd; - use crate::core::ics03_connection::connection::Counterparty as ConnectionCounterparty; - use crate::core::ics03_connection::connection::State as ConnectionState; - use crate::core::ics03_connection::version::get_compatible_versions; - use crate::core::ics04_channel::channel::{ChannelEnd, Counterparty, Order, State}; - use crate::core::ics04_channel::context::ChannelReader; - use crate::core::ics04_channel::handler::acknowledgement::process; - use crate::core::ics04_channel::msgs::acknowledgement::test_util::get_dummy_raw_msg_acknowledgement; - use crate::core::ics04_channel::msgs::acknowledgement::MsgAcknowledgement; - use crate::core::ics04_channel::Version; - use crate::core::ics24_host::identifier::{ClientId, ConnectionId}; - use crate::events::IbcEvent; - use crate::mock::context::MockContext; - use crate::prelude::*; - use crate::test_utils::Crypto; - use crate::timestamp::ZERO_DURATION; + use crate::{ + core::{ + ics02_client::{context::ClientReader, height::Height}, + ics03_connection::{ + connection::{ + ConnectionEnd, Counterparty as ConnectionCounterparty, State as ConnectionState, + }, + version::get_compatible_versions, + }, + ics04_channel::{ + channel::{ChannelEnd, Counterparty, Order, State}, + context::ChannelReader, + handler::acknowledgement::process, + msgs::acknowledgement::{ + test_util::get_dummy_raw_msg_acknowledgement, MsgAcknowledgement, + }, + Version, + }, + ics24_host::identifier::{ClientId, ConnectionId}, + }, + events::IbcEvent, + mock::context::{MockClientTypes, MockContext}, + prelude::*, + timestamp::ZERO_DURATION, + }; - #[test] - fn ack_packet_processing() { - struct Test { - name: String, - ctx: MockContext, - msg: MsgAcknowledgement, - want_pass: bool, - } + #[test] + fn ack_packet_processing() { + struct Test { + name: String, + ctx: MockContext, + msg: MsgAcknowledgement, + want_pass: bool, + } - let context = MockContext::default(); + let context = MockContext::default(); - let client_height = Height::new(0, Height::default().revision_height + 2); + let client_height = Height::new(0, Height::default().revision_height + 2); - let msg = MsgAcknowledgement::try_from(get_dummy_raw_msg_acknowledgement( - client_height.revision_height, - )) - .unwrap(); - let packet = msg.packet.clone(); + let msg = MsgAcknowledgement::try_from(get_dummy_raw_msg_acknowledgement( + client_height.revision_height, + )) + .unwrap(); + let packet = msg.packet.clone(); - let data = context.packet_commitment( - packet.data.clone(), - packet.timeout_height, - packet.timeout_timestamp, - ); + let data = context.packet_commitment( + packet.data.clone(), + packet.timeout_height, + packet.timeout_timestamp, + ); - let source_channel_end = ChannelEnd::new( - State::Open, - Order::default(), - Counterparty::new( - packet.destination_port.clone(), - Some(packet.destination_channel), - ), - vec![ConnectionId::default()], - Version::ics20(), - ); + let source_channel_end = ChannelEnd::new( + State::Open, + Order::default(), + Counterparty::new(packet.destination_port.clone(), Some(packet.destination_channel)), + vec![ConnectionId::default()], + Version::ics20(), + ); - let connection_end = ConnectionEnd::new( - ConnectionState::Open, - ClientId::default(), - ConnectionCounterparty::new( - ClientId::default(), - Some(ConnectionId::default()), - Default::default(), - ), - get_compatible_versions(), - ZERO_DURATION, - ); + let connection_end = ConnectionEnd::new( + ConnectionState::Open, + ClientId::default(), + ConnectionCounterparty::new( + ClientId::default(), + Some(ConnectionId::default()), + Default::default(), + ), + get_compatible_versions(), + ZERO_DURATION, + ); - let tests: Vec = vec![ - Test { - name: "Processing fails because no channel exists in the context".to_string(), - ctx: context.clone(), - msg: msg.clone(), - want_pass: false, - }, - Test { - name: "Good parameters".to_string(), - ctx: context - .with_client(&ClientId::default(), client_height) - .with_connection(ConnectionId::default(), connection_end) - .with_channel( - packet.source_port.clone(), - packet.source_channel, - source_channel_end, - ) - .with_packet_commitment( - packet.source_port, - packet.source_channel, - packet.sequence, - data, - ) //with_ack_sequence required for ordered channels - .with_ack_sequence( - packet.destination_port, - packet.destination_channel, - 1.into(), - ), - msg, - want_pass: true, - }, - ] - .into_iter() - .collect(); + let tests: Vec = vec![ + Test { + name: "Processing fails because no channel exists in the context".to_string(), + ctx: context.clone(), + msg: msg.clone(), + want_pass: false, + }, + Test { + name: "Good parameters".to_string(), + ctx: context + .with_client(&ClientId::default(), client_height) + .with_connection(ConnectionId::default(), connection_end) + .with_channel( + packet.source_port.clone(), + packet.source_channel, + source_channel_end, + ) + .with_packet_commitment( + packet.source_port, + packet.source_channel, + packet.sequence, + data, + ) //with_ack_sequence required for ordered channels + .with_ack_sequence( + packet.destination_port, + packet.destination_channel, + 1.into(), + ), + msg, + want_pass: true, + }, + ] + .into_iter() + .collect(); - for test in tests { - let res = process::(&test.ctx, &test.msg); - // Additionally check the events and the output objects in the result. - match res { - Ok(proto_output) => { - assert!( + for test in tests { + let res = process(&test.ctx, &test.msg); + // Additionally check the events and the output objects in the result. + match res { + Ok(proto_output) => { + assert!( test.want_pass, "ack_packet: test passed but was supposed to fail for test: {}, \nparams {:?} {:?}", test.name, @@ -241,24 +241,24 @@ mod tests { test.ctx.clone() ); - assert!(!proto_output.events.is_empty()); // Some events must exist. + assert!(!proto_output.events.is_empty()); // Some events must exist. - for e in proto_output.events.iter() { - assert!(matches!(e, &IbcEvent::AcknowledgePacket(_))); - assert_eq!(e.height(), test.ctx.host_height()); - } - } - Err(e) => { - assert!( - !test.want_pass, - "ack_packet: did not pass test: {}, \nparams {:?} {:?} error: {:?}", - test.name, - test.msg.clone(), - test.ctx.clone(), - e, - ); - } - } - } - } + for e in proto_output.events.iter() { + assert!(matches!(e, &IbcEvent::AcknowledgePacket(_))); + assert_eq!(e.height(), test.ctx.host_height()); + } + }, + Err(e) => { + assert!( + !test.want_pass, + "ack_packet: did not pass test: {}, \nparams {:?} {:?} error: {:?}", + test.name, + test.msg.clone(), + test.ctx.clone(), + e, + ); + }, + } + } + } } diff --git a/modules/src/core/ics04_channel/handler/chan_close_confirm.rs b/modules/src/core/ics04_channel/handler/chan_close_confirm.rs index 0a54903a29..af3fde50b3 100644 --- a/modules/src/core/ics04_channel/handler/chan_close_confirm.rs +++ b/modules/src/core/ics04_channel/handler/chan_close_confirm.rs @@ -1,188 +1,195 @@ //! Protocol logic specific to ICS4 messages of type `MsgChannelCloseConfirm`. -use crate::clients::host_functions::HostFunctionsProvider; -use crate::core::ics03_connection::connection::State as ConnectionState; -use crate::core::ics04_channel::channel::{ChannelEnd, Counterparty, State}; -use crate::core::ics04_channel::error::Error; -use crate::core::ics04_channel::events::Attributes; -use crate::core::ics04_channel::handler::verify::verify_channel_proofs; -use crate::core::ics04_channel::handler::{ChannelIdState, ChannelResult}; -use crate::core::ics04_channel::msgs::chan_close_confirm::MsgChannelCloseConfirm; -use crate::core::ics26_routing::context::ReaderContext; -use crate::events::IbcEvent; -use crate::handler::{HandlerOutput, HandlerResult}; -use crate::prelude::*; - -pub(crate) fn process( - ctx: &dyn ReaderContext, - msg: &MsgChannelCloseConfirm, +use crate::{ + core::{ + ics03_connection::connection::State as ConnectionState, + ics04_channel::{ + channel::{ChannelEnd, Counterparty, State}, + error::Error, + events::Attributes, + handler::{verify::verify_channel_proofs, ChannelIdState, ChannelResult}, + msgs::chan_close_confirm::MsgChannelCloseConfirm, + }, + ics26_routing::context::ReaderContext, + }, + events::IbcEvent, + handler::{HandlerOutput, HandlerResult}, + prelude::*, +}; + +pub(crate) fn process( + ctx: &Ctx, + msg: &MsgChannelCloseConfirm, ) -> HandlerResult { - let mut output = HandlerOutput::builder(); - - // Retrieve the old channel end and validate it against the message. - let mut channel_end = ctx.channel_end(&(msg.port_id.clone(), msg.channel_id))?; - - // Validate that the channel end is in a state where it can be closed. - if channel_end.state_matches(&State::Closed) { - return Err(Error::channel_closed(msg.channel_id)); - } - - // An OPEN IBC connection running on the local (host) chain should exist. - if channel_end.connection_hops().len() != 1 { - return Err(Error::invalid_connection_hops_length( - 1, - channel_end.connection_hops().len(), - )); - } - - let conn = ctx - .connection_end(&channel_end.connection_hops()[0]) - .map_err(Error::ics03_connection)?; - - if !conn.state_matches(&ConnectionState::Open) { - return Err(Error::connection_not_open( - channel_end.connection_hops()[0].clone(), - )); - } - - // Proof verification in two steps: - // 1. Setup: build the Channel as we expect to find it on the other party. - - let expected_counterparty = Counterparty::new(msg.port_id.clone(), Some(msg.channel_id)); - - let counterparty = conn.counterparty(); - let ccid = counterparty.connection_id().ok_or_else(|| { - Error::undefined_connection_counterparty(channel_end.connection_hops()[0].clone()) - })?; - - let expected_connection_hops = vec![ccid.clone()]; - - let expected_channel_end = ChannelEnd::new( - State::Closed, - *channel_end.ordering(), - expected_counterparty, - expected_connection_hops, - channel_end.version().clone(), - ); - - verify_channel_proofs::( - ctx, - msg.proofs.height(), - &channel_end, - &conn, - &expected_channel_end, - &msg.proofs.object_proof(), - )?; - - output.log("success: channel close confirm "); - - // Transition the channel end to the new state & pick a version. - channel_end.set_state(State::Closed); - - let event_attributes = Attributes { - channel_id: Some(msg.channel_id), - height: ctx.host_height(), - port_id: msg.port_id.clone(), - connection_id: channel_end.connection_hops[0].clone(), - counterparty_port_id: channel_end.counterparty().port_id.clone(), - counterparty_channel_id: channel_end.counterparty().channel_id.clone(), - }; - - let result = ChannelResult { - port_id: msg.port_id.clone(), - channel_id: msg.channel_id, - channel_id_state: ChannelIdState::Reused, - channel_end, - }; - - output.emit(IbcEvent::CloseConfirmChannel( - event_attributes - .try_into() - .map_err(|_| Error::missing_channel_id())?, - )); - - Ok(output.with_result(result)) + let mut output = HandlerOutput::builder(); + + // Retrieve the old channel end and validate it against the message. + let mut channel_end = ctx.channel_end(&(msg.port_id.clone(), msg.channel_id))?; + + // Validate that the channel end is in a state where it can be closed. + if channel_end.state_matches(&State::Closed) { + return Err(Error::channel_closed(msg.channel_id)) + } + + // An OPEN IBC connection running on the local (host) chain should exist. + if channel_end.connection_hops().len() != 1 { + return Err(Error::invalid_connection_hops_length(1, channel_end.connection_hops().len())) + } + + let conn = ctx + .connection_end(&channel_end.connection_hops()[0]) + .map_err(Error::ics03_connection)?; + + if !conn.state_matches(&ConnectionState::Open) { + return Err(Error::connection_not_open(channel_end.connection_hops()[0].clone())) + } + + // Proof verification in two steps: + // 1. Setup: build the Channel as we expect to find it on the other party. + + let expected_counterparty = Counterparty::new(msg.port_id.clone(), Some(msg.channel_id)); + + let counterparty = conn.counterparty(); + let ccid = counterparty.connection_id().ok_or_else(|| { + Error::undefined_connection_counterparty(channel_end.connection_hops()[0].clone()) + })?; + + let expected_connection_hops = vec![ccid.clone()]; + + let expected_channel_end = ChannelEnd::new( + State::Closed, + *channel_end.ordering(), + expected_counterparty, + expected_connection_hops, + channel_end.version().clone(), + ); + + verify_channel_proofs::( + ctx, + msg.proofs.height(), + &channel_end, + &conn, + &expected_channel_end, + &msg.proofs.object_proof(), + )?; + + output.log("success: channel close confirm "); + + // Transition the channel end to the new state & pick a version. + channel_end.set_state(State::Closed); + + let event_attributes = Attributes { + channel_id: Some(msg.channel_id), + height: ctx.host_height(), + port_id: msg.port_id.clone(), + connection_id: channel_end.connection_hops[0].clone(), + counterparty_port_id: channel_end.counterparty().port_id.clone(), + counterparty_channel_id: channel_end.counterparty().channel_id.clone(), + }; + + let result = ChannelResult { + port_id: msg.port_id.clone(), + channel_id: msg.channel_id, + channel_id_state: ChannelIdState::Reused, + channel_end, + }; + + output.emit(IbcEvent::CloseConfirmChannel( + event_attributes.try_into().map_err(|_| Error::missing_channel_id())?, + )); + + Ok(output.with_result(result)) } #[cfg(test)] mod tests { - use crate::core::ics04_channel::msgs::chan_close_confirm::test_util::get_dummy_raw_msg_chan_close_confirm; - use crate::core::ics04_channel::msgs::chan_close_confirm::MsgChannelCloseConfirm; - use crate::core::ics04_channel::msgs::ChannelMsg; - use crate::events::IbcEvent; - use crate::prelude::*; - - use crate::core::ics02_client::client_type::ClientType; - use crate::core::ics02_client::context::ClientReader; - use crate::core::ics03_connection::connection::ConnectionEnd; - use crate::core::ics03_connection::connection::Counterparty as ConnectionCounterparty; - use crate::core::ics03_connection::connection::State as ConnectionState; - use crate::core::ics03_connection::msgs::test_util::get_dummy_raw_counterparty; - use crate::core::ics03_connection::version::get_compatible_versions; - use crate::core::ics04_channel::channel::{ - ChannelEnd, Counterparty, Order, State as ChannelState, - }; - use crate::core::ics04_channel::handler::channel_dispatch; - use crate::core::ics04_channel::Version; - use crate::core::ics24_host::identifier::{ClientId, ConnectionId}; - use crate::test_utils::Crypto; - - use crate::mock::context::MockContext; - use crate::timestamp::ZERO_DURATION; - - #[test] - fn chan_close_confirm_event_height() { - let client_id = ClientId::new(ClientType::Mock, 24).unwrap(); - let conn_id = ConnectionId::new(2); - let default_context = MockContext::default(); - let client_consensus_state_height = default_context.host_height(); - - let conn_end = ConnectionEnd::new( - ConnectionState::Open, - client_id.clone(), - ConnectionCounterparty::try_from(get_dummy_raw_counterparty()).unwrap(), - get_compatible_versions(), - ZERO_DURATION, - ); - - let msg_chan_close_confirm = MsgChannelCloseConfirm::try_from( - get_dummy_raw_msg_chan_close_confirm(client_consensus_state_height.revision_height), - ) - .unwrap(); - - let chan_end = ChannelEnd::new( - ChannelState::Open, - Order::default(), - Counterparty::new( - msg_chan_close_confirm.port_id.clone(), - Some(msg_chan_close_confirm.channel_id), - ), - vec![conn_id.clone()], - Version::default(), - ); - - let context = default_context - .with_client(&client_id, client_consensus_state_height) - .with_connection(conn_id, conn_end) - .with_channel( - msg_chan_close_confirm.port_id.clone(), - msg_chan_close_confirm.channel_id, - chan_end, - ); - - let (handler_output_builder, _) = channel_dispatch::<_, Crypto>( - &context, - &ChannelMsg::ChannelCloseConfirm(msg_chan_close_confirm), - ) - .unwrap(); - - let handler_output = handler_output_builder.with_result(()); - - assert!(!handler_output.events.is_empty()); // Some events must exist. - - for event in handler_output.events.iter() { - assert!(matches!(event, &IbcEvent::CloseConfirmChannel(_))); - assert_eq!(event.height(), context.host_height()); - } - } + use crate::{ + core::ics04_channel::msgs::{ + chan_close_confirm::{ + test_util::get_dummy_raw_msg_chan_close_confirm, MsgChannelCloseConfirm, + }, + ChannelMsg, + }, + events::IbcEvent, + prelude::*, + }; + + use crate::{ + core::{ + ics02_client::context::ClientReader, + ics03_connection::{ + connection::{ + ConnectionEnd, Counterparty as ConnectionCounterparty, State as ConnectionState, + }, + msgs::test_util::get_dummy_raw_counterparty, + version::get_compatible_versions, + }, + ics04_channel::{ + channel::{ChannelEnd, Counterparty, Order, State as ChannelState}, + handler::channel_dispatch, + Version, + }, + ics24_host::identifier::{ClientId, ConnectionId}, + }, + mock::client_state::MockClientState, + }; + + use crate::{ + mock::context::{MockClientTypes, MockContext}, + timestamp::ZERO_DURATION, + }; + + #[test] + fn chan_close_confirm_event_height() { + let client_id = ClientId::new(MockClientState::client_type(), 24).unwrap(); + let conn_id = ConnectionId::new(2); + let default_context = MockContext::::default(); + let client_consensus_state_height = default_context.host_height(); + + let conn_end = ConnectionEnd::new( + ConnectionState::Open, + client_id.clone(), + ConnectionCounterparty::try_from(get_dummy_raw_counterparty()).unwrap(), + get_compatible_versions(), + ZERO_DURATION, + ); + + let msg_chan_close_confirm = MsgChannelCloseConfirm::try_from( + get_dummy_raw_msg_chan_close_confirm(client_consensus_state_height.revision_height), + ) + .unwrap(); + + let chan_end = ChannelEnd::new( + ChannelState::Open, + Order::default(), + Counterparty::new( + msg_chan_close_confirm.port_id.clone(), + Some(msg_chan_close_confirm.channel_id), + ), + vec![conn_id.clone()], + Version::default(), + ); + + let context = default_context + .with_client(&client_id, client_consensus_state_height) + .with_connection(conn_id, conn_end) + .with_channel( + msg_chan_close_confirm.port_id.clone(), + msg_chan_close_confirm.channel_id, + chan_end, + ); + + let (handler_output_builder, _) = + channel_dispatch(&context, &ChannelMsg::ChannelCloseConfirm(msg_chan_close_confirm)) + .unwrap(); + + let handler_output = handler_output_builder.with_result(()); + + assert!(!handler_output.events.is_empty()); // Some events must exist. + + for event in handler_output.events.iter() { + assert!(matches!(event, &IbcEvent::CloseConfirmChannel(_))); + assert_eq!(event.height(), context.host_height()); + } + } } diff --git a/modules/src/core/ics04_channel/handler/chan_close_init.rs b/modules/src/core/ics04_channel/handler/chan_close_init.rs index 0cd4b3c0df..592827d6c2 100644 --- a/modules/src/core/ics04_channel/handler/chan_close_init.rs +++ b/modules/src/core/ics04_channel/handler/chan_close_init.rs @@ -1,159 +1,162 @@ //! Protocol logic specific to ICS4 messages of type `MsgChannelCloseInit`. -use crate::core::ics03_connection::connection::State as ConnectionState; -use crate::core::ics04_channel::channel::State; -use crate::core::ics04_channel::error::Error; -use crate::core::ics04_channel::events::Attributes; -use crate::core::ics04_channel::handler::{ChannelIdState, ChannelResult}; -use crate::core::ics04_channel::msgs::chan_close_init::MsgChannelCloseInit; -use crate::core::ics26_routing::context::ReaderContext; -use crate::events::IbcEvent; -use crate::handler::{HandlerOutput, HandlerResult}; - -pub(crate) fn process( - ctx: &dyn ReaderContext, - msg: &MsgChannelCloseInit, +use crate::{ + core::{ + ics03_connection::connection::State as ConnectionState, + ics04_channel::{ + channel::State, + error::Error, + events::Attributes, + handler::{ChannelIdState, ChannelResult}, + msgs::chan_close_init::MsgChannelCloseInit, + }, + ics26_routing::context::ReaderContext, + }, + events::IbcEvent, + handler::{HandlerOutput, HandlerResult}, +}; + +pub(crate) fn process( + ctx: &Ctx, + msg: &MsgChannelCloseInit, ) -> HandlerResult { - let mut output = HandlerOutput::builder(); - - // Unwrap the old channel end and validate it against the message. - let mut channel_end = ctx.channel_end(&(msg.port_id.clone(), msg.channel_id))?; - - // Validate that the channel end is in a state where it can be closed. - if channel_end.state_matches(&State::Closed) { - return Err(Error::invalid_channel_state( - msg.channel_id, - channel_end.state, - )); - } - - // An OPEN IBC connection running on the local (host) chain should exist. - if channel_end.connection_hops().len() != 1 { - return Err(Error::invalid_connection_hops_length( - 1, - channel_end.connection_hops().len(), - )); - } - - let conn = ctx - .connection_end(&channel_end.connection_hops()[0]) - .map_err(Error::ics03_connection)?; - - if !conn.state_matches(&ConnectionState::Open) { - return Err(Error::connection_not_open( - channel_end.connection_hops()[0].clone(), - )); - } - - output.log("success: channel close init "); - - // Transition the channel end to the new state & pick a version. - channel_end.set_state(State::Closed); - - let event_attributes = Attributes { - channel_id: Some(msg.channel_id), - height: ctx.host_height(), - port_id: msg.port_id.clone(), - connection_id: channel_end.connection_hops[0].clone(), - counterparty_port_id: channel_end.counterparty().port_id.clone(), - counterparty_channel_id: channel_end.counterparty().channel_id.clone(), - }; - - let result = ChannelResult { - port_id: msg.port_id.clone(), - channel_id: msg.channel_id, - channel_id_state: ChannelIdState::Reused, - channel_end, - }; - - output.emit(IbcEvent::CloseInitChannel( - event_attributes - .try_into() - .map_err(|_| Error::missing_channel_id())?, - )); - - Ok(output.with_result(result)) + let mut output = HandlerOutput::builder(); + + // Unwrap the old channel end and validate it against the message. + let mut channel_end = ctx.channel_end(&(msg.port_id.clone(), msg.channel_id))?; + + // Validate that the channel end is in a state where it can be closed. + if channel_end.state_matches(&State::Closed) { + return Err(Error::invalid_channel_state(msg.channel_id, channel_end.state)) + } + + // An OPEN IBC connection running on the local (host) chain should exist. + if channel_end.connection_hops().len() != 1 { + return Err(Error::invalid_connection_hops_length(1, channel_end.connection_hops().len())) + } + + let conn = ctx + .connection_end(&channel_end.connection_hops()[0]) + .map_err(Error::ics03_connection)?; + + if !conn.state_matches(&ConnectionState::Open) { + return Err(Error::connection_not_open(channel_end.connection_hops()[0].clone())) + } + + output.log("success: channel close init "); + + // Transition the channel end to the new state & pick a version. + channel_end.set_state(State::Closed); + + let event_attributes = Attributes { + channel_id: Some(msg.channel_id), + height: ctx.host_height(), + port_id: msg.port_id.clone(), + connection_id: channel_end.connection_hops[0].clone(), + counterparty_port_id: channel_end.counterparty().port_id.clone(), + counterparty_channel_id: channel_end.counterparty().channel_id.clone(), + }; + + let result = ChannelResult { + port_id: msg.port_id.clone(), + channel_id: msg.channel_id, + channel_id_state: ChannelIdState::Reused, + channel_end, + }; + + output.emit(IbcEvent::CloseInitChannel( + event_attributes.try_into().map_err(|_| Error::missing_channel_id())?, + )); + + Ok(output.with_result(result)) } #[cfg(test)] mod tests { - use crate::core::ics04_channel::msgs::chan_close_init::test_util::get_dummy_raw_msg_chan_close_init; - use crate::core::ics04_channel::msgs::chan_close_init::MsgChannelCloseInit; - use crate::core::ics04_channel::msgs::ChannelMsg; - use crate::events::IbcEvent; - use crate::prelude::*; - - use crate::core::ics02_client::client_type::ClientType; - use crate::core::ics03_connection::connection::ConnectionEnd; - use crate::core::ics03_connection::connection::Counterparty as ConnectionCounterparty; - use crate::core::ics03_connection::connection::State as ConnectionState; - use crate::core::ics03_connection::msgs::test_util::get_dummy_raw_counterparty; - use crate::core::ics03_connection::version::get_compatible_versions; - use crate::core::ics04_channel::channel::{ - ChannelEnd, Counterparty, Order, State as ChannelState, - }; - use crate::core::ics04_channel::handler::channel_dispatch; - use crate::core::ics04_channel::Version; - use crate::core::ics24_host::identifier::{ClientId, ConnectionId}; - - use crate::core::ics02_client::context::ClientReader; - use crate::mock::context::MockContext; - use crate::test_utils::Crypto; - use crate::timestamp::ZERO_DURATION; - - #[test] - fn chan_close_init_event_height() { - let client_id = ClientId::new(ClientType::Mock, 24).unwrap(); - let conn_id = ConnectionId::new(2); - - let conn_end = ConnectionEnd::new( - ConnectionState::Open, - client_id.clone(), - ConnectionCounterparty::try_from(get_dummy_raw_counterparty()).unwrap(), - get_compatible_versions(), - ZERO_DURATION, - ); - - let msg_chan_close_init = - MsgChannelCloseInit::try_from(get_dummy_raw_msg_chan_close_init()).unwrap(); - - let chan_end = ChannelEnd::new( - ChannelState::Open, - Order::default(), - Counterparty::new( - msg_chan_close_init.port_id.clone(), - Some(msg_chan_close_init.channel_id), - ), - vec![conn_id.clone()], - Version::default(), - ); - - let context = { - let default_context = MockContext::default(); - let client_consensus_state_height = default_context.host_height(); - - default_context - .with_client(&client_id, client_consensus_state_height) - .with_connection(conn_id, conn_end) - .with_channel( - msg_chan_close_init.port_id.clone(), - msg_chan_close_init.channel_id, - chan_end, - ) - }; - - let (handler_output_builder, _) = channel_dispatch::<_, Crypto>( - &context, - &ChannelMsg::ChannelCloseInit(msg_chan_close_init), - ) - .unwrap(); - let handler_output = handler_output_builder.with_result(()); - - assert!(!handler_output.events.is_empty()); // Some events must exist. - - for event in handler_output.events.iter() { - assert!(matches!(event, &IbcEvent::CloseInitChannel(_))); - assert_eq!(event.height(), context.host_height()); - } - } + use crate::{ + core::ics04_channel::msgs::{ + chan_close_init::{test_util::get_dummy_raw_msg_chan_close_init, MsgChannelCloseInit}, + ChannelMsg, + }, + events::IbcEvent, + prelude::*, + }; + + use crate::core::{ + ics03_connection::{ + connection::{ + ConnectionEnd, Counterparty as ConnectionCounterparty, State as ConnectionState, + }, + msgs::test_util::get_dummy_raw_counterparty, + version::get_compatible_versions, + }, + ics04_channel::{ + channel::{ChannelEnd, Counterparty, Order, State as ChannelState}, + handler::channel_dispatch, + Version, + }, + ics24_host::identifier::{ClientId, ConnectionId}, + }; + + use crate::{ + core::ics02_client::context::ClientReader, + mock::{ + client_state::MockClientState, + context::{MockClientTypes, MockContext}, + }, + timestamp::ZERO_DURATION, + }; + + #[test] + fn chan_close_init_event_height() { + let client_id = ClientId::new(MockClientState::client_type(), 24).unwrap(); + let conn_id = ConnectionId::new(2); + + let conn_end = ConnectionEnd::new( + ConnectionState::Open, + client_id.clone(), + ConnectionCounterparty::try_from(get_dummy_raw_counterparty()).unwrap(), + get_compatible_versions(), + ZERO_DURATION, + ); + + let msg_chan_close_init = + MsgChannelCloseInit::try_from(get_dummy_raw_msg_chan_close_init()).unwrap(); + + let chan_end = ChannelEnd::new( + ChannelState::Open, + Order::default(), + Counterparty::new( + msg_chan_close_init.port_id.clone(), + Some(msg_chan_close_init.channel_id), + ), + vec![conn_id.clone()], + Version::default(), + ); + + let context = { + let default_context = MockContext::::default(); + let client_consensus_state_height = default_context.host_height(); + + default_context + .with_client(&client_id, client_consensus_state_height) + .with_connection(conn_id, conn_end) + .with_channel( + msg_chan_close_init.port_id.clone(), + msg_chan_close_init.channel_id, + chan_end, + ) + }; + + let (handler_output_builder, _) = + channel_dispatch(&context, &ChannelMsg::ChannelCloseInit(msg_chan_close_init)).unwrap(); + let handler_output = handler_output_builder.with_result(()); + + assert!(!handler_output.events.is_empty()); // Some events must exist. + + for event in handler_output.events.iter() { + assert!(matches!(event, &IbcEvent::CloseInitChannel(_))); + assert_eq!(event.height(), context.host_height()); + } + } } diff --git a/modules/src/core/ics04_channel/handler/chan_open_ack.rs b/modules/src/core/ics04_channel/handler/chan_open_ack.rs index 041d1f00c3..cfbe7bb5b4 100644 --- a/modules/src/core/ics04_channel/handler/chan_open_ack.rs +++ b/modules/src/core/ics04_channel/handler/chan_open_ack.rs @@ -1,298 +1,303 @@ //! Protocol logic specific to ICS4 messages of type `MsgChannelOpenAck`. -use crate::clients::host_functions::HostFunctionsProvider; -use crate::core::ics03_connection::connection::State as ConnectionState; -use crate::core::ics04_channel::channel::{ChannelEnd, Counterparty, State}; -use crate::core::ics04_channel::error::Error; -use crate::core::ics04_channel::events::Attributes; -use crate::core::ics04_channel::handler::verify::verify_channel_proofs; -use crate::core::ics04_channel::handler::{ChannelIdState, ChannelResult}; -use crate::core::ics04_channel::msgs::chan_open_ack::MsgChannelOpenAck; -use crate::core::ics26_routing::context::ReaderContext; -use crate::events::IbcEvent; -use crate::handler::{HandlerOutput, HandlerResult}; -use crate::prelude::*; - -pub(crate) fn process( - ctx: &dyn ReaderContext, - msg: &MsgChannelOpenAck, + +use crate::{ + core::{ + ics03_connection::connection::State as ConnectionState, + ics04_channel::{ + channel::{ChannelEnd, Counterparty, State}, + error::Error, + events::Attributes, + handler::{verify::verify_channel_proofs, ChannelIdState, ChannelResult}, + msgs::chan_open_ack::MsgChannelOpenAck, + }, + ics26_routing::context::ReaderContext, + }, + events::IbcEvent, + handler::{HandlerOutput, HandlerResult}, + prelude::*, +}; + +pub(crate) fn process( + ctx: &Ctx, + msg: &MsgChannelOpenAck, ) -> HandlerResult { - let mut output = HandlerOutput::builder(); - - // Unwrap the old channel end and validate it against the message. - let mut channel_end = ctx.channel_end(&(msg.port_id.clone(), msg.channel_id))?; - - // Validate that the channel end is in a state where it can be ack. - if !channel_end.state_matches(&State::Init) && !channel_end.state_matches(&State::TryOpen) { - return Err(Error::invalid_channel_state( - msg.channel_id, - channel_end.state, - )); - } - - // An OPEN IBC connection running on the local (host) chain should exist. - - if channel_end.connection_hops().len() != 1 { - return Err(Error::invalid_connection_hops_length( - 1, - channel_end.connection_hops().len(), - )); - } - - let conn = ctx - .connection_end(&channel_end.connection_hops()[0]) - .map_err(Error::ics03_connection)?; - - if !conn.state_matches(&ConnectionState::Open) { - return Err(Error::connection_not_open( - channel_end.connection_hops()[0].clone(), - )); - } - - // Proof verification in two steps: - // 1. Setup: build the Channel as we expect to find it on the other party. - - let expected_counterparty = Counterparty::new(msg.port_id.clone(), Some(msg.channel_id)); - - let counterparty = conn.counterparty(); - let ccid = counterparty.connection_id().ok_or_else(|| { - Error::undefined_connection_counterparty(channel_end.connection_hops()[0].clone()) - })?; - - let expected_connection_hops = vec![ccid.clone()]; - - let expected_channel_end = ChannelEnd::new( - State::TryOpen, - *channel_end.ordering(), - expected_counterparty, - expected_connection_hops, - msg.counterparty_version.clone(), - ); - - // set the counterparty channel id to verify against it - channel_end.set_counterparty_channel_id(msg.counterparty_channel_id); - - //2. Verify proofs - verify_channel_proofs::( - ctx, - msg.proofs.height(), - &channel_end, - &conn, - &expected_channel_end, - &msg.proofs.object_proof(), - )?; - - output.log("success: channel open ack "); - - // Transition the channel end to the new state & pick a version. - channel_end.set_state(State::Open); - channel_end.set_version(msg.counterparty_version.clone()); - - let event_attributes = Attributes { - channel_id: Some(msg.channel_id), - height: ctx.host_height(), - port_id: msg.port_id.clone(), - connection_id: channel_end.connection_hops[0].clone(), - counterparty_port_id: channel_end.counterparty().port_id.clone(), - counterparty_channel_id: channel_end.counterparty().channel_id.clone(), - }; - - let result = ChannelResult { - port_id: msg.port_id.clone(), - channel_id: msg.channel_id, - channel_id_state: ChannelIdState::Reused, - channel_end, - }; - - output.emit(IbcEvent::OpenAckChannel( - event_attributes - .try_into() - .map_err(|_| Error::missing_channel_id())?, - )); - - Ok(output.with_result(result)) + let mut output = HandlerOutput::builder(); + + // Unwrap the old channel end and validate it against the message. + let mut channel_end = ctx.channel_end(&(msg.port_id.clone(), msg.channel_id))?; + + // Validate that the channel end is in a state where it can be ack. + if !channel_end.state_matches(&State::Init) && !channel_end.state_matches(&State::TryOpen) { + return Err(Error::invalid_channel_state(msg.channel_id, channel_end.state)) + } + + // An OPEN IBC connection running on the local (host) chain should exist. + + if channel_end.connection_hops().len() != 1 { + return Err(Error::invalid_connection_hops_length(1, channel_end.connection_hops().len())) + } + + let conn = ctx + .connection_end(&channel_end.connection_hops()[0]) + .map_err(Error::ics03_connection)?; + + if !conn.state_matches(&ConnectionState::Open) { + return Err(Error::connection_not_open(channel_end.connection_hops()[0].clone())) + } + + // Proof verification in two steps: + // 1. Setup: build the Channel as we expect to find it on the other party. + + let expected_counterparty = Counterparty::new(msg.port_id.clone(), Some(msg.channel_id)); + + let counterparty = conn.counterparty(); + let ccid = counterparty.connection_id().ok_or_else(|| { + Error::undefined_connection_counterparty(channel_end.connection_hops()[0].clone()) + })?; + + let expected_connection_hops = vec![ccid.clone()]; + + let expected_channel_end = ChannelEnd::new( + State::TryOpen, + *channel_end.ordering(), + expected_counterparty, + expected_connection_hops, + msg.counterparty_version.clone(), + ); + + // set the counterparty channel id to verify against it + channel_end.set_counterparty_channel_id(msg.counterparty_channel_id); + + //2. Verify proofs + verify_channel_proofs::( + ctx, + msg.proofs.height(), + &channel_end, + &conn, + &expected_channel_end, + &msg.proofs.object_proof(), + )?; + + output.log("success: channel open ack "); + + // Transition the channel end to the new state & pick a version. + channel_end.set_state(State::Open); + channel_end.set_version(msg.counterparty_version.clone()); + + let event_attributes = Attributes { + channel_id: Some(msg.channel_id), + height: ctx.host_height(), + port_id: msg.port_id.clone(), + connection_id: channel_end.connection_hops[0].clone(), + counterparty_port_id: channel_end.counterparty().port_id.clone(), + counterparty_channel_id: channel_end.counterparty().channel_id.clone(), + }; + + let result = ChannelResult { + port_id: msg.port_id.clone(), + channel_id: msg.channel_id, + channel_id_state: ChannelIdState::Reused, + channel_end, + }; + + output.emit(IbcEvent::OpenAckChannel( + event_attributes.try_into().map_err(|_| Error::missing_channel_id())?, + )); + + Ok(output.with_result(result)) } #[cfg(test)] mod tests { - use core::str::FromStr; - - use test_log::test; - - use crate::core::ics02_client::context::ClientReader; - use crate::core::ics03_connection::connection::ConnectionEnd; - use crate::core::ics03_connection::connection::Counterparty as ConnectionCounterparty; - use crate::core::ics03_connection::connection::State as ConnectionState; - use crate::core::ics03_connection::msgs::conn_open_init::test_util::get_dummy_raw_msg_conn_open_init; - use crate::core::ics03_connection::msgs::conn_open_init::MsgConnectionOpenInit; - use crate::core::ics03_connection::msgs::conn_open_try::test_util::get_dummy_raw_msg_conn_open_try; - use crate::core::ics03_connection::msgs::conn_open_try::MsgConnectionOpenTry; - use crate::core::ics03_connection::version::get_compatible_versions; - use crate::core::ics04_channel::channel::{ChannelEnd, Counterparty, State}; - use crate::core::ics04_channel::handler::channel_dispatch; - use crate::core::ics04_channel::msgs::chan_open_ack::test_util::get_dummy_raw_msg_chan_open_ack; - use crate::core::ics04_channel::msgs::chan_open_ack::MsgChannelOpenAck; - use crate::core::ics04_channel::msgs::chan_open_try::test_util::get_dummy_raw_msg_chan_open_try; - use crate::core::ics04_channel::msgs::chan_open_try::MsgChannelOpenTry; - use crate::core::ics04_channel::msgs::ChannelMsg; - use crate::core::ics24_host::identifier::ConnectionId; - use crate::events::IbcEvent; - use crate::mock::context::MockContext; - use crate::prelude::*; - use crate::test_utils::Crypto; - use crate::Height; - - // TODO: The tests here are very fragile and complex. - // Should be adapted to use the same structure as `handler::chan_open_try::tests`. - #[test] - fn chan_open_ack_msg_processing() { - struct Test { - name: String, - ctx: MockContext, - msg: ChannelMsg, - want_pass: bool, - } - let proof_height = 10; - let client_consensus_state_height = 10; - let host_chain_height = Height::new(0, 35); - - let context = MockContext::default(); - - let msg_conn_init = - MsgConnectionOpenInit::try_from(get_dummy_raw_msg_conn_open_init()).unwrap(); - - let conn_end = ConnectionEnd::new( - ConnectionState::Open, - msg_conn_init.client_id.clone(), - ConnectionCounterparty::new( - msg_conn_init.counterparty.client_id().clone(), - Some(ConnectionId::from_str("defaultConnection-1").unwrap()), - msg_conn_init.counterparty.prefix().clone(), - ), - get_compatible_versions(), - msg_conn_init.delay_period, - ); - - let ccid = ::from_str("defaultConnection-0"); - let cid = match ccid { - Ok(v) => v, - Err(_e) => ConnectionId::default(), - }; - - let mut connection_vec0 = Vec::new(); - connection_vec0.insert( - 0, - match ::from_str("defaultConnection-0") { - Ok(a) => a, - _ => unreachable!(), - }, - ); - - let msg_conn_try = MsgConnectionOpenTry::try_from(get_dummy_raw_msg_conn_open_try( - client_consensus_state_height, - host_chain_height.revision_height, - )) - .unwrap(); - - let msg_chan_ack = - MsgChannelOpenAck::try_from(get_dummy_raw_msg_chan_open_ack(proof_height)).unwrap(); - - let msg_chan_try = - MsgChannelOpenTry::try_from(get_dummy_raw_msg_chan_open_try(proof_height)).unwrap(); - - let chan_end = ChannelEnd::new( - State::Init, - *msg_chan_try.channel.ordering(), - Counterparty::new(msg_chan_ack.port_id.clone(), Some(msg_chan_ack.channel_id)), - connection_vec0.clone(), - msg_chan_try.channel.version().clone(), - ); - - let failed_chan_end = ChannelEnd::new( - State::Open, - *msg_chan_try.channel.ordering(), - Counterparty::new(msg_chan_ack.port_id.clone(), Some(msg_chan_ack.channel_id)), - connection_vec0, - msg_chan_try.channel.version().clone(), - ); - - let tests: Vec = vec![ - Test { - name: "Processing fails because no channel exists in the context".to_string(), - ctx: context.clone(), - msg: ChannelMsg::ChannelOpenAck(msg_chan_ack.clone()), - want_pass: false, - }, - Test { - name: "Processing fails because the channel is in the wrong state".to_string(), - ctx: context - .clone() - .with_client( - &msg_conn_try.client_id, - Height::new(0, client_consensus_state_height), - ) - .with_channel( - msg_chan_ack.port_id.clone(), - msg_chan_ack.channel_id, - failed_chan_end, - ), - msg: ChannelMsg::ChannelOpenAck(msg_chan_ack.clone()), - want_pass: false, - }, - Test { - name: "Processing fails because a connection does exist".to_string(), - ctx: context - .clone() - .with_client( - &msg_conn_try.client_id, - Height::new(0, client_consensus_state_height), - ) - .with_channel( - msg_chan_ack.port_id.clone(), - msg_chan_ack.channel_id, - chan_end.clone(), - ), - msg: ChannelMsg::ChannelOpenAck(msg_chan_ack.clone()), - want_pass: false, - }, - Test { - name: "Processing fails due to missing client state ".to_string(), - ctx: context - .clone() - .with_connection(cid.clone(), conn_end.clone()) - .with_channel( - msg_chan_ack.port_id.clone(), - msg_chan_ack.channel_id, - chan_end.clone(), - ), - msg: ChannelMsg::ChannelOpenAck(msg_chan_ack.clone()), - want_pass: false, - }, - Test { - name: "Good parameters".to_string(), - ctx: context // .clone() - .with_client( - &msg_conn_try.client_id, - Height::new(0, client_consensus_state_height), - ) - .with_connection(cid, conn_end) - .with_channel( - msg_chan_ack.port_id.clone(), - msg_chan_ack.channel_id, - chan_end, - ), - msg: ChannelMsg::ChannelOpenAck(msg_chan_ack), - want_pass: true, - }, - ] - .into_iter() - .collect(); - - for test in tests { - let res = channel_dispatch::<_, Crypto>(&test.ctx, &test.msg); - // Additionally check the events and the output objects in the result. - match res { - Ok((proto_output, res)) => { - assert!( + use core::str::FromStr; + + use test_log::test; + + use crate::{ + core::{ + ics02_client::context::ClientReader, + ics03_connection::{ + connection::{ + ConnectionEnd, Counterparty as ConnectionCounterparty, State as ConnectionState, + }, + msgs::{ + conn_open_init::{ + test_util::get_dummy_raw_msg_conn_open_init, MsgConnectionOpenInit, + }, + conn_open_try::{ + test_util::get_dummy_raw_msg_conn_open_try, MsgConnectionOpenTry, + }, + }, + version::get_compatible_versions, + }, + ics04_channel::{ + channel::{ChannelEnd, Counterparty, State}, + handler::channel_dispatch, + msgs::{ + chan_open_ack::{ + test_util::get_dummy_raw_msg_chan_open_ack, MsgChannelOpenAck, + }, + chan_open_try::{ + test_util::get_dummy_raw_msg_chan_open_try, MsgChannelOpenTry, + }, + ChannelMsg, + }, + }, + ics24_host::identifier::ConnectionId, + }, + events::IbcEvent, + mock::context::{MockClientTypes, MockContext}, + prelude::*, + Height, + }; + + // TODO: The tests here are very fragile and complex. + // Should be adapted to use the same structure as `handler::chan_open_try::tests`. + #[test] + fn chan_open_ack_msg_processing() { + struct Test { + name: String, + ctx: MockContext, + msg: ChannelMsg, + want_pass: bool, + } + let proof_height = 10; + let client_consensus_state_height = 10; + let host_chain_height = Height::new(0, 35); + + let context = MockContext::default(); + + let msg_conn_init = + MsgConnectionOpenInit::try_from(get_dummy_raw_msg_conn_open_init()).unwrap(); + + let conn_end = ConnectionEnd::new( + ConnectionState::Open, + msg_conn_init.client_id.clone(), + ConnectionCounterparty::new( + msg_conn_init.counterparty.client_id().clone(), + Some(ConnectionId::from_str("defaultConnection-1").unwrap()), + msg_conn_init.counterparty.prefix().clone(), + ), + get_compatible_versions(), + msg_conn_init.delay_period, + ); + + let ccid = ::from_str("defaultConnection-0"); + let cid = match ccid { + Ok(v) => v, + Err(_e) => ConnectionId::default(), + }; + + let mut connection_vec0 = Vec::new(); + connection_vec0.insert( + 0, + match ::from_str("defaultConnection-0") { + Ok(a) => a, + _ => unreachable!(), + }, + ); + + let msg_conn_try = MsgConnectionOpenTry::>::try_from( + get_dummy_raw_msg_conn_open_try( + client_consensus_state_height, + host_chain_height.revision_height, + ), + ) + .unwrap(); + + let msg_chan_ack = + MsgChannelOpenAck::try_from(get_dummy_raw_msg_chan_open_ack(proof_height)).unwrap(); + + let msg_chan_try = + MsgChannelOpenTry::try_from(get_dummy_raw_msg_chan_open_try(proof_height)).unwrap(); + + let chan_end = ChannelEnd::new( + State::Init, + *msg_chan_try.channel.ordering(), + Counterparty::new(msg_chan_ack.port_id.clone(), Some(msg_chan_ack.channel_id)), + connection_vec0.clone(), + msg_chan_try.channel.version().clone(), + ); + + let failed_chan_end = ChannelEnd::new( + State::Open, + *msg_chan_try.channel.ordering(), + Counterparty::new(msg_chan_ack.port_id.clone(), Some(msg_chan_ack.channel_id)), + connection_vec0, + msg_chan_try.channel.version().clone(), + ); + + let tests: Vec = vec![ + Test { + name: "Processing fails because no channel exists in the context".to_string(), + ctx: context.clone(), + msg: ChannelMsg::ChannelOpenAck(msg_chan_ack.clone()), + want_pass: false, + }, + Test { + name: "Processing fails because the channel is in the wrong state".to_string(), + ctx: context + .clone() + .with_client( + &msg_conn_try.client_id, + Height::new(0, client_consensus_state_height), + ) + .with_channel( + msg_chan_ack.port_id.clone(), + msg_chan_ack.channel_id, + failed_chan_end, + ), + msg: ChannelMsg::ChannelOpenAck(msg_chan_ack.clone()), + want_pass: false, + }, + Test { + name: "Processing fails because a connection does exist".to_string(), + ctx: context + .clone() + .with_client( + &msg_conn_try.client_id, + Height::new(0, client_consensus_state_height), + ) + .with_channel( + msg_chan_ack.port_id.clone(), + msg_chan_ack.channel_id, + chan_end.clone(), + ), + msg: ChannelMsg::ChannelOpenAck(msg_chan_ack.clone()), + want_pass: false, + }, + Test { + name: "Processing fails due to missing client state ".to_string(), + ctx: context.clone().with_connection(cid.clone(), conn_end.clone()).with_channel( + msg_chan_ack.port_id.clone(), + msg_chan_ack.channel_id, + chan_end.clone(), + ), + msg: ChannelMsg::ChannelOpenAck(msg_chan_ack.clone()), + want_pass: false, + }, + Test { + name: "Good parameters".to_string(), + ctx: context // .clone() + .with_client( + &msg_conn_try.client_id, + Height::new(0, client_consensus_state_height), + ) + .with_connection(cid, conn_end) + .with_channel(msg_chan_ack.port_id.clone(), msg_chan_ack.channel_id, chan_end), + msg: ChannelMsg::ChannelOpenAck(msg_chan_ack), + want_pass: true, + }, + ] + .into_iter() + .collect(); + + for test in tests { + let res = channel_dispatch(&test.ctx, &test.msg); + // Additionally check the events and the output objects in the result. + match res { + Ok((proto_output, res)) => { + assert!( test.want_pass, "chan_open_ack: test passed but was supposed to fail for test: {}, \nparams {:?} {:?}", test.name, @@ -300,29 +305,29 @@ mod tests { test.ctx.clone() ); - let proto_output = proto_output.with_result(()); - assert!(!proto_output.events.is_empty()); // Some events must exist. - - // The object in the output is a ConnectionEnd, should have init state. - //assert_eq!(res.channel_id, msg_chan_init.channel_id().clone()); - assert_eq!(res.channel_end.state().clone(), State::Open); - - for e in proto_output.events.iter() { - assert!(matches!(e, &IbcEvent::OpenAckChannel(_))); - assert_eq!(e.height(), test.ctx.host_height()); - } - } - Err(e) => { - assert!( - !test.want_pass, - "chan_open_ack: did not pass test: {}, \nparams {:?} {:?} error: {:?}", - test.name, - test.msg, - test.ctx.clone(), - e, - ); - } - } - } - } + let proto_output = proto_output.with_result(()); + assert!(!proto_output.events.is_empty()); // Some events must exist. + + // The object in the output is a ConnectionEnd, should have init state. + //assert_eq!(res.channel_id, msg_chan_init.channel_id().clone()); + assert_eq!(res.channel_end.state().clone(), State::Open); + + for e in proto_output.events.iter() { + assert!(matches!(e, &IbcEvent::OpenAckChannel(_))); + assert_eq!(e.height(), test.ctx.host_height()); + } + }, + Err(e) => { + assert!( + !test.want_pass, + "chan_open_ack: did not pass test: {}, \nparams {:?} {:?} error: {:?}", + test.name, + test.msg, + test.ctx.clone(), + e, + ); + }, + } + } + } } diff --git a/modules/src/core/ics04_channel/handler/chan_open_confirm.rs b/modules/src/core/ics04_channel/handler/chan_open_confirm.rs index 1e362caef5..0e5d8e11ca 100644 --- a/modules/src/core/ics04_channel/handler/chan_open_confirm.rs +++ b/modules/src/core/ics04_channel/handler/chan_open_confirm.rs @@ -1,199 +1,206 @@ //! Protocol logic specific to ICS4 messages of type `MsgChannelOpenConfirm`. -use crate::clients::host_functions::HostFunctionsProvider; -use crate::core::ics03_connection::connection::State as ConnectionState; -use crate::core::ics04_channel::channel::{ChannelEnd, Counterparty, State}; -use crate::core::ics04_channel::error::Error; -use crate::core::ics04_channel::events::Attributes; -use crate::core::ics04_channel::handler::verify::verify_channel_proofs; -use crate::core::ics04_channel::handler::{ChannelIdState, ChannelResult}; -use crate::core::ics04_channel::msgs::chan_open_confirm::MsgChannelOpenConfirm; -use crate::core::ics26_routing::context::ReaderContext; -use crate::events::IbcEvent; -use crate::handler::{HandlerOutput, HandlerResult}; -use crate::prelude::*; - -pub(crate) fn process( - ctx: &dyn ReaderContext, - msg: &MsgChannelOpenConfirm, -) -> HandlerResult { - let mut output = HandlerOutput::builder(); - - // Unwrap the old channel end and validate it against the message. - let mut channel_end = ctx.channel_end(&(msg.port_id.clone(), msg.channel_id))?; - - // Validate that the channel end is in a state where it can be confirmed. - if !channel_end.state_matches(&State::TryOpen) { - return Err(Error::invalid_channel_state( - msg.channel_id, - channel_end.state, - )); - } - - // An OPEN IBC connection running on the local (host) chain should exist. - if channel_end.connection_hops().len() != 1 { - return Err(Error::invalid_connection_hops_length( - 1, - channel_end.connection_hops().len(), - )); - } - - let conn = ctx - .connection_end(&channel_end.connection_hops()[0]) - .map_err(Error::ics03_connection)?; - - if !conn.state_matches(&ConnectionState::Open) { - return Err(Error::connection_not_open( - channel_end.connection_hops()[0].clone(), - )); - } - - // Proof verification in two steps: - // 1. Setup: build the Channel as we expect to find it on the other party. - - let expected_counterparty = Counterparty::new(msg.port_id.clone(), Some(msg.channel_id)); - - let connection_counterparty = conn.counterparty(); - let ccid = connection_counterparty.connection_id().ok_or_else(|| { - Error::undefined_connection_counterparty(channel_end.connection_hops()[0].clone()) - })?; - - let expected_connection_hops = vec![ccid.clone()]; - - let expected_channel_end = ChannelEnd::new( - State::Open, - *channel_end.ordering(), - expected_counterparty, - expected_connection_hops, - channel_end.version().clone(), - ); - //2. Verify proofs - verify_channel_proofs::( - ctx, - msg.proofs.height(), - &channel_end, - &conn, - &expected_channel_end, - &msg.proofs.object_proof(), - ) - .map_err(Error::chan_open_confirm_proof_verification)?; - - output.log("success: channel open confirm "); - - // Transition the channel end to the new state. - channel_end.set_state(State::Open); - - let event_attributes = Attributes { - channel_id: Some(msg.channel_id), - height: ctx.host_height(), - port_id: msg.port_id.clone(), - connection_id: channel_end.connection_hops[0].clone(), - counterparty_port_id: channel_end.counterparty().port_id.clone(), - counterparty_channel_id: channel_end.counterparty().channel_id.clone(), - }; - - let result = ChannelResult { - port_id: msg.port_id.clone(), - channel_id: msg.channel_id, - channel_id_state: ChannelIdState::Reused, - channel_end, - }; - - output.emit(IbcEvent::OpenConfirmChannel( - event_attributes - .try_into() - .map_err(|_| Error::missing_channel_id())?, - )); - - Ok(output.with_result(result)) + +use crate::{ + core::{ + ics03_connection::connection::State as ConnectionState, + ics04_channel::{ + channel::{ChannelEnd, Counterparty, State}, + error::Error, + events::Attributes, + handler::{verify::verify_channel_proofs, ChannelIdState, ChannelResult}, + msgs::chan_open_confirm::MsgChannelOpenConfirm, + }, + ics26_routing::context::ReaderContext, + }, + events::IbcEvent, + handler::{HandlerOutput, HandlerResult}, + prelude::*, +}; + +pub(crate) fn process( + ctx: &Ctx, + msg: &MsgChannelOpenConfirm, +) -> HandlerResult +where + Ctx: ReaderContext, +{ + let mut output = HandlerOutput::builder(); + + // Unwrap the old channel end and validate it against the message. + let mut channel_end = ctx.channel_end(&(msg.port_id.clone(), msg.channel_id))?; + + // Validate that the channel end is in a state where it can be confirmed. + if !channel_end.state_matches(&State::TryOpen) { + return Err(Error::invalid_channel_state(msg.channel_id, channel_end.state)) + } + + // An OPEN IBC connection running on the local (host) chain should exist. + if channel_end.connection_hops().len() != 1 { + return Err(Error::invalid_connection_hops_length(1, channel_end.connection_hops().len())) + } + + let conn = ctx + .connection_end(&channel_end.connection_hops()[0]) + .map_err(Error::ics03_connection)?; + + if !conn.state_matches(&ConnectionState::Open) { + return Err(Error::connection_not_open(channel_end.connection_hops()[0].clone())) + } + + // Proof verification in two steps: + // 1. Setup: build the Channel as we expect to find it on the other party. + + let expected_counterparty = Counterparty::new(msg.port_id.clone(), Some(msg.channel_id)); + + let connection_counterparty = conn.counterparty(); + let ccid = connection_counterparty.connection_id().ok_or_else(|| { + Error::undefined_connection_counterparty(channel_end.connection_hops()[0].clone()) + })?; + + let expected_connection_hops = vec![ccid.clone()]; + + let expected_channel_end = ChannelEnd::new( + State::Open, + *channel_end.ordering(), + expected_counterparty, + expected_connection_hops, + channel_end.version().clone(), + ); + //2. Verify proofs + verify_channel_proofs::( + ctx, + msg.proofs.height(), + &channel_end, + &conn, + &expected_channel_end, + &msg.proofs.object_proof(), + ) + .map_err(Error::chan_open_confirm_proof_verification)?; + + output.log("success: channel open confirm "); + + // Transition the channel end to the new state. + channel_end.set_state(State::Open); + + let event_attributes = Attributes { + channel_id: Some(msg.channel_id), + height: ctx.host_height(), + port_id: msg.port_id.clone(), + connection_id: channel_end.connection_hops[0].clone(), + counterparty_port_id: channel_end.counterparty().port_id.clone(), + counterparty_channel_id: channel_end.counterparty().channel_id.clone(), + }; + + let result = ChannelResult { + port_id: msg.port_id.clone(), + channel_id: msg.channel_id, + channel_id_state: ChannelIdState::Reused, + channel_end, + }; + + output.emit(IbcEvent::OpenConfirmChannel( + event_attributes.try_into().map_err(|_| Error::missing_channel_id())?, + )); + + Ok(output.with_result(result)) } #[cfg(test)] mod tests { - use crate::prelude::*; - - use test_log::test; - - use crate::core::ics02_client::client_type::ClientType; - use crate::core::ics02_client::context::ClientReader; - use crate::core::ics03_connection::connection::ConnectionEnd; - use crate::core::ics03_connection::connection::Counterparty as ConnectionCounterparty; - use crate::core::ics03_connection::connection::State as ConnectionState; - use crate::core::ics03_connection::msgs::test_util::get_dummy_raw_counterparty; - use crate::core::ics03_connection::version::get_compatible_versions; - use crate::core::ics04_channel::channel::{ChannelEnd, Counterparty, Order, State}; - use crate::core::ics04_channel::handler::channel_dispatch; - use crate::core::ics04_channel::msgs::chan_open_confirm::test_util::get_dummy_raw_msg_chan_open_confirm; - use crate::core::ics04_channel::msgs::chan_open_confirm::MsgChannelOpenConfirm; - use crate::core::ics04_channel::msgs::ChannelMsg; - use crate::core::ics04_channel::Version; - use crate::core::ics24_host::identifier::{ClientId, ConnectionId}; - use crate::events::IbcEvent; - use crate::mock::context::MockContext; - use crate::test_utils::Crypto; - use crate::timestamp::ZERO_DURATION; - use crate::Height; - - // TODO: The tests here should use the same structure as `handler::chan_open_try::tests`. - #[test] - fn chan_open_confirm_msg_processing() { - struct Test { - name: String, - ctx: MockContext, - msg: ChannelMsg, - want_pass: bool, - } - let client_id = ClientId::new(ClientType::Mock, 24).unwrap(); - let conn_id = ConnectionId::new(2); - let context = MockContext::default(); - let client_consensus_state_height = context.host_height().revision_height; - - // The connection underlying the channel we're trying to open. - let conn_end = ConnectionEnd::new( - ConnectionState::Open, - client_id.clone(), - ConnectionCounterparty::try_from(get_dummy_raw_counterparty()).unwrap(), - get_compatible_versions(), - ZERO_DURATION, - ); - - let msg_chan_confirm = MsgChannelOpenConfirm::try_from( - get_dummy_raw_msg_chan_open_confirm(client_consensus_state_height), - ) - .unwrap(); - - let chan_end = ChannelEnd::new( - State::TryOpen, - Order::default(), - Counterparty::new( - msg_chan_confirm.port_id.clone(), - Some(msg_chan_confirm.channel_id), - ), - vec![conn_id.clone()], - Version::default(), - ); - - let tests: Vec = vec![Test { - name: "Good parameters".to_string(), - ctx: context - .with_client(&client_id, Height::new(0, client_consensus_state_height)) - .with_connection(conn_id, conn_end) - .with_channel( - msg_chan_confirm.port_id.clone(), - msg_chan_confirm.channel_id, - chan_end, - ), - msg: ChannelMsg::ChannelOpenConfirm(msg_chan_confirm), - want_pass: true, - }] - .into_iter() - .collect(); - - for test in tests { - let res = channel_dispatch::<_, Crypto>(&test.ctx, &test.msg); - // Additionally check the events and the output objects in the result. - match res { - Ok((proto_output, res)) => { - assert!( + use crate::prelude::*; + + use test_log::test; + + use crate::{ + core::{ + ics02_client::context::ClientReader, + ics03_connection::{ + connection::{ + ConnectionEnd, Counterparty as ConnectionCounterparty, State as ConnectionState, + }, + msgs::test_util::get_dummy_raw_counterparty, + version::get_compatible_versions, + }, + ics04_channel::{ + channel::{ChannelEnd, Counterparty, Order, State}, + handler::channel_dispatch, + msgs::{ + chan_open_confirm::{ + test_util::get_dummy_raw_msg_chan_open_confirm, MsgChannelOpenConfirm, + }, + ChannelMsg, + }, + Version, + }, + ics24_host::identifier::{ClientId, ConnectionId}, + }, + events::IbcEvent, + mock::{ + client_state::MockClientState, + context::{MockClientTypes, MockContext}, + }, + timestamp::ZERO_DURATION, + Height, + }; + + // TODO: The tests here should use the same structure as `handler::chan_open_try::tests`. + #[test] + fn chan_open_confirm_msg_processing() { + struct Test { + name: String, + ctx: MockContext, + msg: ChannelMsg, + want_pass: bool, + } + let client_id = ClientId::new(MockClientState::client_type(), 24).unwrap(); + let conn_id = ConnectionId::new(2); + let context = MockContext::default(); + let client_consensus_state_height = context.host_height().revision_height; + + // The connection underlying the channel we're trying to open. + let conn_end = ConnectionEnd::new( + ConnectionState::Open, + client_id.clone(), + ConnectionCounterparty::try_from(get_dummy_raw_counterparty()).unwrap(), + get_compatible_versions(), + ZERO_DURATION, + ); + + let msg_chan_confirm = MsgChannelOpenConfirm::try_from( + get_dummy_raw_msg_chan_open_confirm(client_consensus_state_height), + ) + .unwrap(); + + let chan_end = ChannelEnd::new( + State::TryOpen, + Order::default(), + Counterparty::new(msg_chan_confirm.port_id.clone(), Some(msg_chan_confirm.channel_id)), + vec![conn_id.clone()], + Version::default(), + ); + + let tests: Vec = vec![Test { + name: "Good parameters".to_string(), + ctx: context + .with_client(&client_id, Height::new(0, client_consensus_state_height)) + .with_connection(conn_id, conn_end) + .with_channel( + msg_chan_confirm.port_id.clone(), + msg_chan_confirm.channel_id, + chan_end, + ), + msg: ChannelMsg::ChannelOpenConfirm(msg_chan_confirm), + want_pass: true, + }] + .into_iter() + .collect(); + + for test in tests { + let res = channel_dispatch(&test.ctx, &test.msg); + // Additionally check the events and the output objects in the result. + match res { + Ok((proto_output, res)) => { + assert!( test.want_pass, "chan_open_confirm: test passed but was supposed to fail for test: {}, \nparams {:?} {:?}", test.name, @@ -201,29 +208,29 @@ mod tests { test.ctx.clone() ); - let proto_output = proto_output.with_result(()); - assert!(!proto_output.events.is_empty()); // Some events must exist. - - // The object in the output is a ConnectionEnd, should have init state. - //assert_eq!(res.channel_id, msg_chan_init.channel_id().clone()); - assert_eq!(res.channel_end.state().clone(), State::Open); - - for e in proto_output.events.iter() { - assert!(matches!(e, &IbcEvent::OpenConfirmChannel(_))); - assert_eq!(e.height(), test.ctx.host_height()); - } - } - Err(e) => { - assert!( - !test.want_pass, - "chan_open_ack: did not pass test: {}, \nparams {:?} {:?}\nerror: {:?}", - test.name, - test.msg, - test.ctx.clone(), - e, - ); - } - } - } - } + let proto_output = proto_output.with_result(()); + assert!(!proto_output.events.is_empty()); // Some events must exist. + + // The object in the output is a ConnectionEnd, should have init state. + //assert_eq!(res.channel_id, msg_chan_init.channel_id().clone()); + assert_eq!(res.channel_end.state().clone(), State::Open); + + for e in proto_output.events.iter() { + assert!(matches!(e, &IbcEvent::OpenConfirmChannel(_))); + assert_eq!(e.height(), test.ctx.host_height()); + } + }, + Err(e) => { + assert!( + !test.want_pass, + "chan_open_ack: did not pass test: {}, \nparams {:?} {:?}\nerror: {:?}", + test.name, + test.msg, + test.ctx.clone(), + e, + ); + }, + } + } + } } diff --git a/modules/src/core/ics04_channel/handler/chan_open_init.rs b/modules/src/core/ics04_channel/handler/chan_open_init.rs index 43e3b01b41..8788459581 100644 --- a/modules/src/core/ics04_channel/handler/chan_open_init.rs +++ b/modules/src/core/ics04_channel/handler/chan_open_init.rs @@ -1,160 +1,168 @@ //! Protocol logic specific to ICS4 messages of type `MsgChannelOpenInit`. -use crate::core::ics04_channel::channel::{ChannelEnd, State}; -use crate::core::ics04_channel::error::Error; -use crate::core::ics04_channel::events::Attributes; -use crate::core::ics04_channel::handler::{ChannelIdState, ChannelResult}; -use crate::core::ics04_channel::msgs::chan_open_init::MsgChannelOpenInit; -use crate::core::ics24_host::identifier::ChannelId; -use crate::core::ics26_routing::context::ReaderContext; -use crate::events::IbcEvent; -use crate::handler::{HandlerOutput, HandlerResult}; -use crate::prelude::*; - -pub(crate) fn process( - ctx: &dyn ReaderContext, - msg: &MsgChannelOpenInit, +use crate::{ + core::{ + ics04_channel::{ + channel::{ChannelEnd, State}, + error::Error, + events::Attributes, + handler::{ChannelIdState, ChannelResult}, + msgs::chan_open_init::MsgChannelOpenInit, + }, + ics24_host::identifier::ChannelId, + ics26_routing::context::ReaderContext, + }, + events::IbcEvent, + handler::{HandlerOutput, HandlerResult}, + prelude::*, +}; + +pub(crate) fn process( + ctx: &Ctx, + msg: &MsgChannelOpenInit, ) -> HandlerResult { - let mut output = HandlerOutput::builder(); - - if msg.channel.connection_hops().len() != 1 { - return Err(Error::invalid_connection_hops_length( - 1, - msg.channel.connection_hops().len(), - )); - } - - // An IBC connection running on the local (host) chain should exist. - let conn = ctx - .connection_end(&msg.channel.connection_hops()[0]) - .map_err(Error::ics03_connection)?; - let get_versions = conn.versions(); - let version = match get_versions { - [version] => version, - _ => return Err(Error::invalid_version_length_connection()), - }; - - let channel_feature = msg.channel.ordering().to_string(); - if !version.is_supported_feature(channel_feature) { - return Err(Error::channel_feature_not_suported_by_connection()); - } - - // Channel identifier construction. - let id_counter = ctx.channel_counter()?; - let chan_id = ChannelId::new(id_counter); - - output.log(format!( - "success: generated new channel identifier: {}", - chan_id - )); - - let new_channel_end = ChannelEnd::new( - State::Init, - *msg.channel.ordering(), - msg.channel.counterparty().clone(), - msg.channel.connection_hops().clone(), - msg.channel.version().clone(), - ); - - output.log("success: no channel found"); - - let event_attributes = Attributes { - channel_id: Some(chan_id), - height: ctx.host_height(), - port_id: msg.port_id.clone(), - connection_id: new_channel_end.connection_hops[0].clone(), - counterparty_port_id: new_channel_end.counterparty().port_id.clone(), - counterparty_channel_id: new_channel_end.counterparty().channel_id.clone(), - }; - - let result = ChannelResult { - port_id: msg.port_id.clone(), - channel_id: chan_id, - channel_end: new_channel_end, - channel_id_state: ChannelIdState::Generated, - }; - - output.emit(IbcEvent::OpenInitChannel( - event_attributes - .try_into() - .map_err(|_| Error::missing_channel_id())?, - )); - - Ok(output.with_result(result)) + let mut output = HandlerOutput::builder(); + + if msg.channel.connection_hops().len() != 1 { + return Err(Error::invalid_connection_hops_length(1, msg.channel.connection_hops().len())) + } + + // An IBC connection running on the local (host) chain should exist. + let conn = ctx + .connection_end(&msg.channel.connection_hops()[0]) + .map_err(Error::ics03_connection)?; + let get_versions = conn.versions(); + let version = match get_versions { + [version] => version, + _ => return Err(Error::invalid_version_length_connection()), + }; + + let channel_feature = msg.channel.ordering().to_string(); + if !version.is_supported_feature(channel_feature) { + return Err(Error::channel_feature_not_suported_by_connection()) + } + + // Channel identifier construction. + let id_counter = ctx.channel_counter()?; + let chan_id = ChannelId::new(id_counter); + + output.log(format!("success: generated new channel identifier: {}", chan_id)); + + let new_channel_end = ChannelEnd::new( + State::Init, + *msg.channel.ordering(), + msg.channel.counterparty().clone(), + msg.channel.connection_hops().clone(), + msg.channel.version().clone(), + ); + + output.log("success: no channel found"); + + let event_attributes = Attributes { + channel_id: Some(chan_id), + height: ctx.host_height(), + port_id: msg.port_id.clone(), + connection_id: new_channel_end.connection_hops[0].clone(), + counterparty_port_id: new_channel_end.counterparty().port_id.clone(), + counterparty_channel_id: new_channel_end.counterparty().channel_id.clone(), + }; + + let result = ChannelResult { + port_id: msg.port_id.clone(), + channel_id: chan_id, + channel_end: new_channel_end, + channel_id_state: ChannelIdState::Generated, + }; + + output.emit(IbcEvent::OpenInitChannel( + event_attributes.try_into().map_err(|_| Error::missing_channel_id())?, + )); + + Ok(output.with_result(result)) } #[cfg(test)] mod tests { - use crate::prelude::*; - - use test_log::test; - - use crate::core::ics02_client::context::ClientReader; - use crate::core::ics03_connection::connection::ConnectionEnd; - use crate::core::ics03_connection::connection::State as ConnectionState; - use crate::core::ics03_connection::msgs::conn_open_init::test_util::get_dummy_raw_msg_conn_open_init; - use crate::core::ics03_connection::msgs::conn_open_init::MsgConnectionOpenInit; - use crate::core::ics03_connection::version::get_compatible_versions; - use crate::core::ics04_channel::channel::State; - use crate::core::ics04_channel::handler::channel_dispatch; - use crate::core::ics04_channel::msgs::chan_open_init::test_util::get_dummy_raw_msg_chan_open_init; - use crate::core::ics04_channel::msgs::chan_open_init::MsgChannelOpenInit; - use crate::core::ics04_channel::msgs::ChannelMsg; - use crate::core::ics24_host::identifier::ConnectionId; - use crate::events::IbcEvent; - use crate::mock::context::MockContext; - use crate::test_utils::Crypto; - - #[test] - fn chan_open_init_msg_processing() { - struct Test { - name: String, - ctx: MockContext, - msg: ChannelMsg, - want_pass: bool, - } - - let msg_chan_init = - MsgChannelOpenInit::try_from(get_dummy_raw_msg_chan_open_init()).unwrap(); - - let context = MockContext::default(); - - let msg_conn_init = - MsgConnectionOpenInit::try_from(get_dummy_raw_msg_conn_open_init()).unwrap(); - - let init_conn_end = ConnectionEnd::new( - ConnectionState::Init, - msg_conn_init.client_id.clone(), - msg_conn_init.counterparty.clone(), - get_compatible_versions(), - msg_conn_init.delay_period, - ); - - let cid = ConnectionId::default(); - - let tests: Vec = vec![ - Test { - name: "Processing fails because no connection exists in the context".to_string(), - ctx: context.clone(), - msg: ChannelMsg::ChannelOpenInit(msg_chan_init.clone()), - want_pass: false, - }, - Test { - name: "Good parameters".to_string(), - ctx: context.with_connection(cid, init_conn_end), - msg: ChannelMsg::ChannelOpenInit(msg_chan_init), - want_pass: true, - }, - ] - .into_iter() - .collect(); - - for test in tests { - let res = channel_dispatch::<_, Crypto>(&test.ctx, &test.msg); - // Additionally check the events and the output objects in the result. - match res { - Ok((proto_output, res)) => { - assert!( + use crate::prelude::*; + + use test_log::test; + + use crate::{ + core::{ + ics02_client::context::ClientReader, + ics03_connection::{ + connection::{ConnectionEnd, State as ConnectionState}, + msgs::conn_open_init::{ + test_util::get_dummy_raw_msg_conn_open_init, MsgConnectionOpenInit, + }, + version::get_compatible_versions, + }, + ics04_channel::{ + channel::State, + handler::channel_dispatch, + msgs::{ + chan_open_init::{ + test_util::get_dummy_raw_msg_chan_open_init, MsgChannelOpenInit, + }, + ChannelMsg, + }, + }, + ics24_host::identifier::ConnectionId, + }, + events::IbcEvent, + mock::context::{MockClientTypes, MockContext}, + }; + + #[test] + fn chan_open_init_msg_processing() { + struct Test { + name: String, + ctx: MockContext, + msg: ChannelMsg, + want_pass: bool, + } + + let msg_chan_init = + MsgChannelOpenInit::try_from(get_dummy_raw_msg_chan_open_init()).unwrap(); + + let context = MockContext::default(); + + let msg_conn_init = + MsgConnectionOpenInit::try_from(get_dummy_raw_msg_conn_open_init()).unwrap(); + + let init_conn_end = ConnectionEnd::new( + ConnectionState::Init, + msg_conn_init.client_id.clone(), + msg_conn_init.counterparty.clone(), + get_compatible_versions(), + msg_conn_init.delay_period, + ); + + let cid = ConnectionId::default(); + + let tests: Vec = vec![ + Test { + name: "Processing fails because no connection exists in the context".to_string(), + ctx: context.clone(), + msg: ChannelMsg::ChannelOpenInit(msg_chan_init.clone()), + want_pass: false, + }, + Test { + name: "Good parameters".to_string(), + ctx: context.with_connection(cid, init_conn_end), + msg: ChannelMsg::ChannelOpenInit(msg_chan_init), + want_pass: true, + }, + ] + .into_iter() + .collect(); + + for test in tests { + let res = channel_dispatch(&test.ctx, &test.msg); + // Additionally check the events and the output objects in the result. + match res { + Ok((proto_output, res)) => { + assert!( test.want_pass, "chan_open_init: test passed but was supposed to fail for test: {}, \nparams {:?} {:?}", test.name, @@ -162,33 +170,33 @@ mod tests { test.ctx.clone() ); - let proto_output = proto_output.with_result(()); - assert!(!proto_output.events.is_empty()); // Some events must exist. - - // The object in the output is a ChannelEnd, should have init state. - assert_eq!(res.channel_end.state().clone(), State::Init); - let msg_init = test.msg; - - if let ChannelMsg::ChannelOpenInit(msg_init) = msg_init { - assert_eq!(res.port_id.clone(), msg_init.port_id.clone()); - } - - for e in proto_output.events.iter() { - assert!(matches!(e, &IbcEvent::OpenInitChannel(_))); - assert_eq!(e.height(), test.ctx.host_height()); - } - } - Err(e) => { - assert!( - !test.want_pass, - "chan_open_init: did not pass test: {}, \nparams {:?} {:?} error: {:?}", - test.name, - test.msg, - test.ctx.clone(), - e, - ); - } - } - } - } + let proto_output = proto_output.with_result(()); + assert!(!proto_output.events.is_empty()); // Some events must exist. + + // The object in the output is a ChannelEnd, should have init state. + assert_eq!(res.channel_end.state().clone(), State::Init); + let msg_init = test.msg; + + if let ChannelMsg::ChannelOpenInit(msg_init) = msg_init { + assert_eq!(res.port_id.clone(), msg_init.port_id.clone()); + } + + for e in proto_output.events.iter() { + assert!(matches!(e, &IbcEvent::OpenInitChannel(_))); + assert_eq!(e.height(), test.ctx.host_height()); + } + }, + Err(e) => { + assert!( + !test.want_pass, + "chan_open_init: did not pass test: {}, \nparams {:?} {:?} error: {:?}", + test.name, + test.msg, + test.ctx.clone(), + e, + ); + }, + } + } + } } diff --git a/modules/src/core/ics04_channel/handler/chan_open_try.rs b/modules/src/core/ics04_channel/handler/chan_open_try.rs index 081b1ec0f9..60cfeed3df 100644 --- a/modules/src/core/ics04_channel/handler/chan_open_try.rs +++ b/modules/src/core/ics04_channel/handler/chan_open_try.rs @@ -1,291 +1,300 @@ //! Protocol logic specific to ICS4 messages of type `MsgChannelOpenTry`. -use crate::clients::host_functions::HostFunctionsProvider; -use crate::core::ics03_connection::connection::State as ConnectionState; -use crate::core::ics04_channel::channel::{ChannelEnd, Counterparty, State}; -use crate::core::ics04_channel::error::Error; -use crate::core::ics04_channel::events::Attributes; -use crate::core::ics04_channel::handler::verify::verify_channel_proofs; -use crate::core::ics04_channel::handler::{ChannelIdState, ChannelResult}; -use crate::core::ics04_channel::msgs::chan_open_try::MsgChannelOpenTry; -use crate::core::ics24_host::identifier::ChannelId; -use crate::core::ics26_routing::context::ReaderContext; -use crate::events::IbcEvent; -use crate::handler::{HandlerOutput, HandlerResult}; -use crate::prelude::*; - -pub(crate) fn process( - ctx: &dyn ReaderContext, - msg: &MsgChannelOpenTry, -) -> HandlerResult { - let mut output = HandlerOutput::builder(); - - // Unwrap the old channel end (if any) and validate it against the message. - let (mut new_channel_end, channel_id) = { - let channel_end = ChannelEnd::new( - State::Init, - *msg.channel.ordering(), - msg.channel.counterparty().clone(), - msg.channel.connection_hops().clone(), - msg.counterparty_version.clone(), - ); - - // Channel identifier construction. - let id_counter = ctx.channel_counter()?; - let chan_id = ChannelId::new(id_counter); - - output.log(format!( - "success: generated new channel identifier: {}", - chan_id - )); - - (channel_end, chan_id) - }; - - // An IBC connection running on the local (host) chain should exist. - if msg.channel.connection_hops().len() != 1 { - return Err(Error::invalid_connection_hops_length( - 1, - msg.channel.connection_hops().len(), - )); - } - - let conn = ctx - .connection_end(&msg.channel.connection_hops()[0]) - .map_err(Error::ics03_connection)?; - if !conn.state_matches(&ConnectionState::Open) { - return Err(Error::connection_not_open( - msg.channel.connection_hops()[0].clone(), - )); - } - - let get_versions = conn.versions(); - let version = match get_versions { - [version] => version, - _ => return Err(Error::invalid_version_length_connection()), - }; - - let channel_feature = msg.channel.ordering().to_string(); - if !version.is_supported_feature(channel_feature) { - return Err(Error::channel_feature_not_suported_by_connection()); - } - - // Proof verification in two steps: - // 1. Setup: build the Channel as we expect to find it on the other party. - // the port should be identical with the port we're using; the channel id should not be set - // since the counterparty cannot know yet which ID did we choose. - let expected_counterparty = Counterparty::new(msg.port_id.clone(), None); - let counterparty = conn.counterparty(); - let ccid = counterparty.connection_id().ok_or_else(|| { - Error::undefined_connection_counterparty(msg.channel.connection_hops()[0].clone()) - })?; - let expected_connection_hops = vec![ccid.clone()]; - - // The other party should be storing a channel end in this configuration. - let expected_channel_end = ChannelEnd::new( - State::Init, - *msg.channel.ordering(), - expected_counterparty, - expected_connection_hops, - msg.counterparty_version.clone(), - ); - - // 2. Actual proofs are verified now. - verify_channel_proofs::( - ctx, - msg.proofs.height(), - &new_channel_end, - &conn, - &expected_channel_end, - &msg.proofs.object_proof(), - )?; - - output.log("success: channel open try "); - - // Transition the channel end to the new state & pick a version. - new_channel_end.set_state(State::TryOpen); - - let event_attributes = Attributes { - channel_id: Some(channel_id), - height: ctx.host_height(), - port_id: msg.port_id.clone(), - connection_id: new_channel_end.connection_hops[0].clone(), - counterparty_port_id: new_channel_end.counterparty().port_id.clone(), - counterparty_channel_id: new_channel_end.counterparty().channel_id.clone(), - }; - - let result = ChannelResult { - port_id: msg.port_id.clone(), - channel_id_state: ChannelIdState::Generated, - channel_id, - channel_end: new_channel_end, - }; - - output.emit(IbcEvent::OpenTryChannel( - event_attributes - .try_into() - .map_err(|_| Error::missing_channel_id())?, - )); - - Ok(output.with_result(result)) +use crate::{ + core::{ + ics03_connection::connection::State as ConnectionState, + ics04_channel::{ + channel::{ChannelEnd, Counterparty, State}, + error::Error, + events::Attributes, + handler::{verify::verify_channel_proofs, ChannelIdState, ChannelResult}, + msgs::chan_open_try::MsgChannelOpenTry, + }, + ics24_host::identifier::ChannelId, + ics26_routing::context::ReaderContext, + }, + events::IbcEvent, + handler::{HandlerOutput, HandlerResult}, + prelude::*, +}; + +pub(crate) fn process( + ctx: &Ctx, + msg: &MsgChannelOpenTry, +) -> HandlerResult +where + Ctx: ReaderContext, +{ + let mut output = HandlerOutput::builder(); + + // Unwrap the old channel end (if any) and validate it against the message. + let (mut new_channel_end, channel_id) = { + let channel_end = ChannelEnd::new( + State::Init, + *msg.channel.ordering(), + msg.channel.counterparty().clone(), + msg.channel.connection_hops().clone(), + msg.counterparty_version.clone(), + ); + + // Channel identifier construction. + let id_counter = ctx.channel_counter()?; + let chan_id = ChannelId::new(id_counter); + + output.log(format!("success: generated new channel identifier: {}", chan_id)); + + (channel_end, chan_id) + }; + + // An IBC connection running on the local (host) chain should exist. + if msg.channel.connection_hops().len() != 1 { + return Err(Error::invalid_connection_hops_length(1, msg.channel.connection_hops().len())) + } + + let conn = ctx + .connection_end(&msg.channel.connection_hops()[0]) + .map_err(Error::ics03_connection)?; + if !conn.state_matches(&ConnectionState::Open) { + return Err(Error::connection_not_open(msg.channel.connection_hops()[0].clone())) + } + + let get_versions = conn.versions(); + let version = match get_versions { + [version] => version, + _ => return Err(Error::invalid_version_length_connection()), + }; + + let channel_feature = msg.channel.ordering().to_string(); + if !version.is_supported_feature(channel_feature) { + return Err(Error::channel_feature_not_suported_by_connection()) + } + + // Proof verification in two steps: + // 1. Setup: build the Channel as we expect to find it on the other party. + // the port should be identical with the port we're using; the channel id should not be set + // since the counterparty cannot know yet which ID did we choose. + let expected_counterparty = Counterparty::new(msg.port_id.clone(), None); + let counterparty = conn.counterparty(); + let ccid = counterparty.connection_id().ok_or_else(|| { + Error::undefined_connection_counterparty(msg.channel.connection_hops()[0].clone()) + })?; + let expected_connection_hops = vec![ccid.clone()]; + + // The other party should be storing a channel end in this configuration. + let expected_channel_end = ChannelEnd::new( + State::Init, + *msg.channel.ordering(), + expected_counterparty, + expected_connection_hops, + msg.counterparty_version.clone(), + ); + + // 2. Actual proofs are verified now. + verify_channel_proofs::( + ctx, + msg.proofs.height(), + &new_channel_end, + &conn, + &expected_channel_end, + &msg.proofs.object_proof(), + )?; + + output.log("success: channel open try "); + + // Transition the channel end to the new state & pick a version. + new_channel_end.set_state(State::TryOpen); + + let event_attributes = Attributes { + channel_id: Some(channel_id), + height: ctx.host_height(), + port_id: msg.port_id.clone(), + connection_id: new_channel_end.connection_hops[0].clone(), + counterparty_port_id: new_channel_end.counterparty().port_id.clone(), + counterparty_channel_id: new_channel_end.counterparty().channel_id.clone(), + }; + + let result = ChannelResult { + port_id: msg.port_id.clone(), + channel_id_state: ChannelIdState::Generated, + channel_id, + channel_end: new_channel_end, + }; + + output.emit(IbcEvent::OpenTryChannel( + event_attributes.try_into().map_err(|_| Error::missing_channel_id())?, + )); + + Ok(output.with_result(result)) } #[cfg(test)] mod tests { - use crate::prelude::*; - - use test_log::test; - - use crate::core::ics02_client::client_type::ClientType; - use crate::core::ics02_client::context::ClientReader; - use crate::core::ics02_client::error as ics02_error; - use crate::core::ics03_connection::connection::ConnectionEnd; - use crate::core::ics03_connection::connection::Counterparty as ConnectionCounterparty; - use crate::core::ics03_connection::connection::State as ConnectionState; - use crate::core::ics03_connection::error as ics03_error; - use crate::core::ics03_connection::msgs::test_util::get_dummy_raw_counterparty; - use crate::core::ics03_connection::version::get_compatible_versions; - use crate::core::ics04_channel::channel::{ChannelEnd, State}; - use crate::core::ics04_channel::error; - use crate::core::ics04_channel::handler::channel_dispatch; - use crate::core::ics04_channel::msgs::chan_open_try::test_util::get_dummy_raw_msg_chan_open_try; - use crate::core::ics04_channel::msgs::chan_open_try::MsgChannelOpenTry; - use crate::core::ics04_channel::msgs::ChannelMsg; - use crate::core::ics24_host::identifier::{ChannelId, ClientId, ConnectionId}; - use crate::events::IbcEvent; - use crate::mock::context::MockContext; - use crate::test_utils::Crypto; - use crate::timestamp::ZERO_DURATION; - use crate::Height; - - #[test] - fn chan_open_try_msg_processing() { - struct Test { - name: String, - ctx: MockContext, - msg: ChannelMsg, - want_pass: bool, - match_error: Box, - } - - // Some general-purpose variable to parametrize the messages and the context. - let proof_height = 10; - let conn_id = ConnectionId::new(2); - let client_id = ClientId::new(ClientType::Mock, 45).unwrap(); - - // The context. We'll reuse this same one across all tests. - let context = MockContext::default(); - - // This is the connection underlying the channel we're trying to open. - let conn_end = ConnectionEnd::new( - ConnectionState::Open, - client_id.clone(), - ConnectionCounterparty::try_from(get_dummy_raw_counterparty()).unwrap(), - get_compatible_versions(), - ZERO_DURATION, - ); - - // We're going to test message processing against this message. - let mut msg = - MsgChannelOpenTry::try_from(get_dummy_raw_msg_chan_open_try(proof_height)).unwrap(); - - // Assumption: an already existing `Init` channel should exist in the context for `msg`, and - // this channel should depend on connection `conn_id`. - let chan_id = ChannelId::new(24); - let hops = vec![conn_id.clone()]; - msg.channel.connection_hops = hops; - - // This message does not assume a channel should already be initialized. - let msg_vanilla = msg.clone(); - - // A preloaded channel end that resides in the context. This is constructed so as to be - // consistent with the incoming ChanOpenTry message `msg`. - let correct_chan_end = ChannelEnd::new( - State::Init, - *msg.channel.ordering(), - msg.channel.counterparty().clone(), - msg.channel.connection_hops().clone(), - msg.channel.version().clone(), - ); - - let tests: Vec = vec![ - Test { - name: "Processing fails because no connection exists in the context".to_string(), - ctx: context.clone(), - msg: ChannelMsg::ChannelOpenTry(msg_vanilla.clone()), - want_pass: false, - match_error: { - let connection_id = msg.channel.connection_hops()[0].clone(); - Box::new(move |e| match e { - error::ErrorDetail::Ics03Connection(e) => { - assert_eq!( - e.source, - ics03_error::ErrorDetail::ConnectionNotFound( - ics03_error::ConnectionNotFoundSubdetail { connection_id } - ) - ); - } - _ => { - panic!("Expected MissingConnection, instead got {}", e) - } - }) - }, - }, - Test { - name: "Processing fails b/c the context has no client state".to_string(), - ctx: context - .clone() - .with_connection(conn_id.clone(), conn_end.clone()) - .with_channel(msg.port_id.clone(), chan_id, correct_chan_end.clone()), - msg: ChannelMsg::ChannelOpenTry(msg.clone()), - want_pass: false, - match_error: Box::new(|e| match e { - error::ErrorDetail::Ics02Client(e) => { - assert_eq!( - e.source, - ics02_error::ErrorDetail::ClientNotFound( - ics02_error::ClientNotFoundSubdetail { - client_id: ClientId::new(ClientType::Mock, 45).unwrap() - } - ) - ); - } - _ => { - panic!("Expected MissingClientState, instead got {}", e) - } - }), - }, - Test { - name: "Processing is successful".to_string(), - ctx: context - .clone() - .with_client(&client_id, Height::new(0, proof_height)) - .with_connection(conn_id.clone(), conn_end.clone()) - .with_channel(msg.port_id.clone(), chan_id, correct_chan_end), - msg: ChannelMsg::ChannelOpenTry(msg), - want_pass: true, - match_error: Box::new(|_| {}), - }, - Test { - name: "Processing is successful against an empty context (no preexisting channel)" - .to_string(), - ctx: context - .with_client(&client_id, Height::new(0, proof_height)) - .with_connection(conn_id, conn_end), - msg: ChannelMsg::ChannelOpenTry(msg_vanilla), - want_pass: true, - match_error: Box::new(|_| {}), - }, - ] - .into_iter() - .collect(); - - for test in tests { - let res = channel_dispatch::<_, Crypto>(&test.ctx, &test.msg); - // Additionally check the events and the output objects in the result. - match res { - Ok((proto_output, res)) => { - assert!( + use crate::prelude::*; + + use test_log::test; + + use crate::{ + core::{ + ics02_client::{context::ClientReader, error as ics02_error}, + ics03_connection::{ + connection::{ + ConnectionEnd, Counterparty as ConnectionCounterparty, State as ConnectionState, + }, + error as ics03_error, + msgs::test_util::get_dummy_raw_counterparty, + version::get_compatible_versions, + }, + ics04_channel::{ + channel::{ChannelEnd, State}, + error, + handler::channel_dispatch, + msgs::{ + chan_open_try::{ + test_util::get_dummy_raw_msg_chan_open_try, MsgChannelOpenTry, + }, + ChannelMsg, + }, + }, + ics24_host::identifier::{ChannelId, ClientId, ConnectionId}, + }, + events::IbcEvent, + mock::{ + client_state::MockClientState, + context::{MockClientTypes, MockContext}, + }, + timestamp::ZERO_DURATION, + Height, + }; + + #[test] + fn chan_open_try_msg_processing() { + struct Test { + name: String, + ctx: MockContext, + msg: ChannelMsg, + want_pass: bool, + match_error: Box, + } + + // Some general-purpose variable to parametrize the messages and the context. + let proof_height = 10; + let conn_id = ConnectionId::new(2); + let client_id = ClientId::new(MockClientState::client_type(), 45).unwrap(); + + // The context. We'll reuse this same one across all tests. + let context = MockContext::default(); + + // This is the connection underlying the channel we're trying to open. + let conn_end = ConnectionEnd::new( + ConnectionState::Open, + client_id.clone(), + ConnectionCounterparty::try_from(get_dummy_raw_counterparty()).unwrap(), + get_compatible_versions(), + ZERO_DURATION, + ); + + // We're going to test message processing against this message. + let mut msg = + MsgChannelOpenTry::try_from(get_dummy_raw_msg_chan_open_try(proof_height)).unwrap(); + + // Assumption: an already existing `Init` channel should exist in the context for `msg`, and + // this channel should depend on connection `conn_id`. + let chan_id = ChannelId::new(24); + let hops = vec![conn_id.clone()]; + msg.channel.connection_hops = hops; + + // This message does not assume a channel should already be initialized. + let msg_vanilla = msg.clone(); + + // A preloaded channel end that resides in the context. This is constructed so as to be + // consistent with the incoming ChanOpenTry message `msg`. + let correct_chan_end = ChannelEnd::new( + State::Init, + *msg.channel.ordering(), + msg.channel.counterparty().clone(), + msg.channel.connection_hops().clone(), + msg.channel.version().clone(), + ); + + let tests: Vec = vec![ + Test { + name: "Processing fails because no connection exists in the context".to_string(), + ctx: context.clone(), + msg: ChannelMsg::ChannelOpenTry(msg_vanilla.clone()), + want_pass: false, + match_error: { + let connection_id = msg.channel.connection_hops()[0].clone(); + Box::new(move |e| match e { + error::ErrorDetail::Ics03Connection(e) => { + assert_eq!( + e.source, + ics03_error::ErrorDetail::ConnectionNotFound( + ics03_error::ConnectionNotFoundSubdetail { connection_id } + ) + ); + }, + _ => { + panic!("Expected MissingConnection, instead got {}", e) + }, + }) + }, + }, + Test { + name: "Processing fails b/c the context has no client state".to_string(), + ctx: context + .clone() + .with_connection(conn_id.clone(), conn_end.clone()) + .with_channel(msg.port_id.clone(), chan_id, correct_chan_end.clone()), + msg: ChannelMsg::ChannelOpenTry(msg.clone()), + want_pass: false, + match_error: Box::new(|e| match e { + error::ErrorDetail::Ics02Client(e) => { + assert_eq!( + e.source, + ics02_error::ErrorDetail::ClientNotFound( + ics02_error::ClientNotFoundSubdetail { + client_id: ClientId::new(MockClientState::client_type(), 45) + .unwrap() + } + ) + ); + }, + _ => { + panic!("Expected MissingClientState, instead got {}", e) + }, + }), + }, + Test { + name: "Processing is successful".to_string(), + ctx: context + .clone() + .with_client(&client_id, Height::new(0, proof_height)) + .with_connection(conn_id.clone(), conn_end.clone()) + .with_channel(msg.port_id.clone(), chan_id, correct_chan_end), + msg: ChannelMsg::ChannelOpenTry(msg), + want_pass: true, + match_error: Box::new(|_| {}), + }, + Test { + name: "Processing is successful against an empty context (no preexisting channel)" + .to_string(), + ctx: context + .with_client(&client_id, Height::new(0, proof_height)) + .with_connection(conn_id, conn_end), + msg: ChannelMsg::ChannelOpenTry(msg_vanilla), + want_pass: true, + match_error: Box::new(|_| {}), + }, + ] + .into_iter() + .collect(); + + for test in tests { + let res = channel_dispatch(&test.ctx, &test.msg); + // Additionally check the events and the output objects in the result. + match res { + Ok((proto_output, res)) => { + assert!( test.want_pass, "chan_open_ack: test passed but was supposed to fail for test: {}, \nparams {:?} {:?}", test.name, @@ -293,19 +302,19 @@ mod tests { test.ctx.clone() ); - let proto_output = proto_output.with_result(()); - assert!(!proto_output.events.is_empty()); // Some events must exist. + let proto_output = proto_output.with_result(()); + assert!(!proto_output.events.is_empty()); // Some events must exist. - // The object in the output is a channel end, should have TryOpen state. - assert_eq!(res.channel_end.state().clone(), State::TryOpen); + // The object in the output is a channel end, should have TryOpen state. + assert_eq!(res.channel_end.state().clone(), State::TryOpen); - for e in proto_output.events.iter() { - assert!(matches!(e, &IbcEvent::OpenTryChannel(_))); - assert_eq!(e.height(), test.ctx.host_height()); - } - } - Err(e) => { - assert!( + for e in proto_output.events.iter() { + assert!(matches!(e, &IbcEvent::OpenTryChannel(_))); + assert_eq!(e.height(), test.ctx.host_height()); + } + }, + Err(e) => { + assert!( !test.want_pass, "chan_open_try: did not pass test: {}, \nparams:\n\tmsg={:?}\n\tcontext={:?}\nerror: {:?}", test.name, @@ -314,9 +323,9 @@ mod tests { e, ); - (test.match_error)(e.0); - } - } - } - } + (test.match_error)(e.0); + }, + } + } + } } diff --git a/modules/src/core/ics04_channel/handler/recv_packet.rs b/modules/src/core/ics04_channel/handler/recv_packet.rs index b486fad1df..48685e777c 100644 --- a/modules/src/core/ics04_channel/handler/recv_packet.rs +++ b/modules/src/core/ics04_channel/handler/recv_packet.rs @@ -1,294 +1,293 @@ -use crate::clients::host_functions::HostFunctionsProvider; -use crate::core::ics03_connection::connection::State as ConnectionState; -use crate::core::ics04_channel::channel::{Counterparty, Order, State}; -use crate::core::ics04_channel::error::Error; -use crate::core::ics04_channel::events::ReceivePacket; -use crate::core::ics04_channel::handler::verify::verify_packet_recv_proofs; -use crate::core::ics04_channel::msgs::recv_packet::MsgRecvPacket; -use crate::core::ics04_channel::packet::{Packet, PacketResult, Receipt, Sequence}; -use crate::core::ics24_host::identifier::{ChannelId, PortId}; -use crate::core::ics26_routing::context::ReaderContext; -use crate::events::IbcEvent; -use crate::handler::{HandlerOutput, HandlerResult}; -use crate::prelude::*; -use crate::timestamp::Expiry; -use crate::Height; +use crate::{ + core::{ + ics03_connection::connection::State as ConnectionState, + ics04_channel::{ + channel::{Counterparty, Order, State}, + error::Error, + events::ReceivePacket, + handler::verify::verify_packet_recv_proofs, + msgs::recv_packet::MsgRecvPacket, + packet::{Packet, PacketResult, Receipt, Sequence}, + }, + ics24_host::identifier::{ChannelId, PortId}, + ics26_routing::context::ReaderContext, + }, + events::IbcEvent, + handler::{HandlerOutput, HandlerResult}, + prelude::*, + timestamp::Expiry, + Height, +}; use core::fmt::Debug; #[derive(Clone, Debug)] pub enum RecvPacketResult { - NoOp, - Unordered { - port_id: PortId, - channel_id: ChannelId, - sequence: Sequence, - receipt: Receipt, - packet: Packet, - }, - Ordered { - port_id: PortId, - channel_id: ChannelId, - next_seq_recv: Sequence, - packet: Packet, - }, + NoOp, + Unordered { + port_id: PortId, + channel_id: ChannelId, + sequence: Sequence, + receipt: Receipt, + packet: Packet, + }, + Ordered { + port_id: PortId, + channel_id: ChannelId, + next_seq_recv: Sequence, + packet: Packet, + }, } -pub fn process( - ctx: &dyn ReaderContext, - msg: &MsgRecvPacket, +pub fn process( + ctx: &Ctx, + msg: &MsgRecvPacket, ) -> HandlerResult { - let mut output = HandlerOutput::builder(); - - let packet = &msg.packet; - - let dest_channel_end = - ctx.channel_end(&(packet.destination_port.clone(), packet.destination_channel))?; - - if !dest_channel_end.state_matches(&State::Open) { - return Err(Error::invalid_channel_state( - packet.source_channel, - dest_channel_end.state, - )); - } - - let counterparty = Counterparty::new(packet.source_port.clone(), Some(packet.source_channel)); - - if !dest_channel_end.counterparty_matches(&counterparty) { - return Err(Error::invalid_packet_counterparty( - packet.source_port.clone(), - packet.source_channel, - )); - } - - let connection_end = ctx - .connection_end(&dest_channel_end.connection_hops()[0]) - .map_err(Error::ics03_connection)?; - - if !connection_end.state_matches(&ConnectionState::Open) { - return Err(Error::connection_not_open( - dest_channel_end.connection_hops()[0].clone(), - )); - } - - let latest_height = ctx.host_height(); - if (!packet.timeout_height.is_zero()) && (packet.timeout_height <= latest_height) { - return Err(Error::low_packet_height( - latest_height, - packet.timeout_height, - )); - } - - let latest_timestamp = ctx.host_timestamp(); - if let Expiry::Expired = latest_timestamp.check_expiry(&packet.timeout_timestamp) { - return Err(Error::low_packet_timestamp()); - } - - verify_packet_recv_proofs::( - ctx, - msg.proofs.height(), - packet, - &connection_end, - &msg.proofs, - )?; - - let result = if dest_channel_end.order_matches(&Order::Ordered) { - let next_seq_recv = ctx.get_next_sequence_recv(&( - packet.destination_port.clone(), - packet.destination_channel, - ))?; - - if packet.sequence < next_seq_recv { - output.emit(IbcEvent::ReceivePacket(ReceivePacket { - height: Height::zero(), - packet: msg.packet.clone(), - })); - return Ok(output.with_result(PacketResult::Recv(RecvPacketResult::NoOp))); - } else if packet.sequence != next_seq_recv { - return Err(Error::invalid_packet_sequence( - packet.sequence, - next_seq_recv, - )); - } - - PacketResult::Recv(RecvPacketResult::Ordered { - port_id: packet.destination_port.clone(), - channel_id: packet.destination_channel, - next_seq_recv: next_seq_recv.increment(), - packet: packet.clone(), - }) - } else { - let packet_rec = ctx.get_packet_receipt(&( - packet.destination_port.clone(), - packet.destination_channel, - packet.sequence, - )); - - match packet_rec { - Ok(_receipt) => { - output.emit(IbcEvent::ReceivePacket(ReceivePacket { - height: Height::zero(), - packet: msg.packet.clone(), - })); - return Ok(output.with_result(PacketResult::Recv(RecvPacketResult::NoOp))); - } - Err(e) if e.detail() == Error::packet_receipt_not_found(packet.sequence).detail() => { - // store a receipt that does not contain any data - PacketResult::Recv(RecvPacketResult::Unordered { - port_id: packet.destination_port.clone(), - channel_id: packet.destination_channel, - sequence: packet.sequence, - receipt: Receipt::Ok, - packet: packet.clone(), - }) - } - Err(e) => return Err(Error::implementation_specific(e.to_string())), - } - }; - - output.log("success: packet receive"); - - output.emit(IbcEvent::ReceivePacket(ReceivePacket { - height: ctx.host_height(), - packet: msg.packet.clone(), - })); - - Ok(output.with_result(result)) + let mut output = HandlerOutput::builder(); + + let packet = &msg.packet; + + let dest_channel_end = + ctx.channel_end(&(packet.destination_port.clone(), packet.destination_channel))?; + + if !dest_channel_end.state_matches(&State::Open) { + return Err(Error::invalid_channel_state(packet.source_channel, dest_channel_end.state)) + } + + let counterparty = Counterparty::new(packet.source_port.clone(), Some(packet.source_channel)); + + if !dest_channel_end.counterparty_matches(&counterparty) { + return Err(Error::invalid_packet_counterparty( + packet.source_port.clone(), + packet.source_channel, + )) + } + + let connection_end = ctx + .connection_end(&dest_channel_end.connection_hops()[0]) + .map_err(Error::ics03_connection)?; + + if !connection_end.state_matches(&ConnectionState::Open) { + return Err(Error::connection_not_open(dest_channel_end.connection_hops()[0].clone())) + } + + let latest_height = ctx.host_height(); + if (!packet.timeout_height.is_zero()) && (packet.timeout_height <= latest_height) { + return Err(Error::low_packet_height(latest_height, packet.timeout_height)) + } + + let latest_timestamp = ctx.host_timestamp(); + if let Expiry::Expired = latest_timestamp.check_expiry(&packet.timeout_timestamp) { + return Err(Error::low_packet_timestamp()) + } + + verify_packet_recv_proofs::( + ctx, + msg.proofs.height(), + packet, + &connection_end, + &msg.proofs, + )?; + + let result = if dest_channel_end.order_matches(&Order::Ordered) { + let next_seq_recv = ctx.get_next_sequence_recv(&( + packet.destination_port.clone(), + packet.destination_channel, + ))?; + + if packet.sequence < next_seq_recv { + output.emit(IbcEvent::ReceivePacket(ReceivePacket { + height: Height::zero(), + packet: msg.packet.clone(), + })); + return Ok(output.with_result(PacketResult::Recv(RecvPacketResult::NoOp))) + } else if packet.sequence != next_seq_recv { + return Err(Error::invalid_packet_sequence(packet.sequence, next_seq_recv)) + } + + PacketResult::Recv(RecvPacketResult::Ordered { + port_id: packet.destination_port.clone(), + channel_id: packet.destination_channel, + next_seq_recv: next_seq_recv.increment(), + packet: packet.clone(), + }) + } else { + let packet_rec = ctx.get_packet_receipt(&( + packet.destination_port.clone(), + packet.destination_channel, + packet.sequence, + )); + + match packet_rec { + Ok(_receipt) => { + output.emit(IbcEvent::ReceivePacket(ReceivePacket { + height: Height::zero(), + packet: msg.packet.clone(), + })); + return Ok(output.with_result(PacketResult::Recv(RecvPacketResult::NoOp))) + }, + Err(e) if e.detail() == Error::packet_receipt_not_found(packet.sequence).detail() => { + // store a receipt that does not contain any data + PacketResult::Recv(RecvPacketResult::Unordered { + port_id: packet.destination_port.clone(), + channel_id: packet.destination_channel, + sequence: packet.sequence, + receipt: Receipt::Ok, + packet: packet.clone(), + }) + }, + Err(e) => return Err(Error::implementation_specific(e.to_string())), + } + }; + + output.log("success: packet receive"); + + output.emit(IbcEvent::ReceivePacket(ReceivePacket { + height: ctx.host_height(), + packet: msg.packet.clone(), + })); + + Ok(output.with_result(result)) } #[cfg(test)] mod tests { - use crate::prelude::*; - - use test_log::test; - - use crate::core::ics02_client::context::ClientReader; - use crate::core::ics03_connection::connection::ConnectionEnd; - use crate::core::ics03_connection::connection::Counterparty as ConnectionCounterparty; - use crate::core::ics03_connection::connection::State as ConnectionState; - use crate::core::ics03_connection::version::get_compatible_versions; - use crate::core::ics04_channel::channel::{ChannelEnd, Counterparty, Order, State}; - use crate::core::ics04_channel::handler::recv_packet::process; - use crate::core::ics04_channel::msgs::recv_packet::test_util::get_dummy_raw_msg_recv_packet; - use crate::core::ics04_channel::msgs::recv_packet::MsgRecvPacket; - use crate::core::ics04_channel::Version; - use crate::core::ics24_host::identifier::{ChannelId, ClientId, ConnectionId, PortId}; - use crate::mock::context::MockContext; - use crate::relayer::ics18_relayer::context::Ics18Context; - use crate::test_utils::get_dummy_account_id; - use crate::test_utils::Crypto; - use crate::timestamp::Timestamp; - use crate::timestamp::ZERO_DURATION; - use crate::{core::ics04_channel::packet::Packet, events::IbcEvent}; - - #[test] - fn recv_packet_processing() { - struct Test { - name: String, - ctx: MockContext, - msg: MsgRecvPacket, - want_pass: bool, - } - - let context = MockContext::default(); - - let host_height = context.query_latest_height().increment(); - - let client_height = host_height.increment(); - - let msg = - MsgRecvPacket::try_from(get_dummy_raw_msg_recv_packet(client_height.revision_height)) - .unwrap(); - - let packet = msg.packet.clone(); - - let packet_old = Packet { - sequence: 1.into(), - source_port: PortId::default(), - source_channel: ChannelId::default(), - destination_port: PortId::default(), - destination_channel: ChannelId::default(), - data: Vec::new(), - timeout_height: client_height, - timeout_timestamp: Timestamp::from_nanoseconds(1).unwrap(), - }; - - let msg_packet_old = - MsgRecvPacket::new(packet_old, msg.proofs.clone(), get_dummy_account_id()); - - let dest_channel_end = ChannelEnd::new( - State::Open, - Order::default(), - Counterparty::new(packet.source_port.clone(), Some(packet.source_channel)), - vec![ConnectionId::default()], - Version::ics20(), - ); - - let connection_end = ConnectionEnd::new( - ConnectionState::Open, - ClientId::default(), - ConnectionCounterparty::new( - ClientId::default(), - Some(ConnectionId::default()), - Default::default(), - ), - get_compatible_versions(), - ZERO_DURATION, - ); - - let tests: Vec = vec![ - Test { - name: "Processing fails because no channel exists in the context".to_string(), - ctx: context.clone(), - msg: msg.clone(), - want_pass: false, - }, - Test { - name: "Good parameters".to_string(), - ctx: context - .clone() - .with_client(&ClientId::default(), client_height) - .with_connection(ConnectionId::default(), connection_end.clone()) - .with_channel( - packet.destination_port.clone(), - packet.destination_channel, - dest_channel_end.clone(), - ) - .with_send_sequence( - packet.destination_port.clone(), - packet.destination_channel, - 1.into(), - ) - .with_height(host_height) - // This `with_recv_sequence` is required for ordered channels - .with_recv_sequence( - packet.destination_port.clone(), - packet.destination_channel, - packet.sequence, - ), - msg, - want_pass: true, - }, - Test { - name: "Packet timeout expired".to_string(), - ctx: context - .with_client(&ClientId::default(), client_height) - .with_connection(ConnectionId::default(), connection_end) - .with_channel(PortId::default(), ChannelId::default(), dest_channel_end) - .with_send_sequence(PortId::default(), ChannelId::default(), 1.into()) - .with_height(host_height), - msg: msg_packet_old, - want_pass: false, - }, - ] - .into_iter() - .collect(); - - for test in tests { - let res = process::(&test.ctx, &test.msg); - // Additionally check the events and the output objects in the result. - match res { - Ok(proto_output) => { - assert!( + use crate::prelude::*; + + use test_log::test; + + use crate::{ + core::{ + ics02_client::context::ClientReader, + ics03_connection::{ + connection::{ + ConnectionEnd, Counterparty as ConnectionCounterparty, State as ConnectionState, + }, + version::get_compatible_versions, + }, + ics04_channel::{ + channel::{ChannelEnd, Counterparty, Order, State}, + handler::recv_packet::process, + msgs::recv_packet::{test_util::get_dummy_raw_msg_recv_packet, MsgRecvPacket}, + packet::Packet, + Version, + }, + ics24_host::identifier::{ChannelId, ClientId, ConnectionId, PortId}, + }, + events::IbcEvent, + mock::context::{MockClientTypes, MockContext}, + test_utils::get_dummy_account_id, + timestamp::{Timestamp, ZERO_DURATION}, + }; + + #[test] + fn recv_packet_processing() { + struct Test { + name: String, + ctx: MockContext, + msg: MsgRecvPacket, + want_pass: bool, + } + + let context = MockContext::default(); + + let host_height = context.latest_height().increment(); + + let client_height = host_height.increment(); + + let msg = + MsgRecvPacket::try_from(get_dummy_raw_msg_recv_packet(client_height.revision_height)) + .unwrap(); + + let packet = msg.packet.clone(); + + let packet_old = Packet { + sequence: 1.into(), + source_port: PortId::default(), + source_channel: ChannelId::default(), + destination_port: PortId::default(), + destination_channel: ChannelId::default(), + data: Vec::new(), + timeout_height: client_height, + timeout_timestamp: Timestamp::from_nanoseconds(1).unwrap(), + }; + + let msg_packet_old = + MsgRecvPacket::new(packet_old, msg.proofs.clone(), get_dummy_account_id()); + + let dest_channel_end = ChannelEnd::new( + State::Open, + Order::default(), + Counterparty::new(packet.source_port.clone(), Some(packet.source_channel)), + vec![ConnectionId::default()], + Version::ics20(), + ); + + let connection_end = ConnectionEnd::new( + ConnectionState::Open, + ClientId::default(), + ConnectionCounterparty::new( + ClientId::default(), + Some(ConnectionId::default()), + Default::default(), + ), + get_compatible_versions(), + ZERO_DURATION, + ); + + let tests: Vec = vec![ + Test { + name: "Processing fails because no channel exists in the context".to_string(), + ctx: context.clone(), + msg: msg.clone(), + want_pass: false, + }, + Test { + name: "Good parameters".to_string(), + ctx: context + .clone() + .with_client(&ClientId::default(), client_height) + .with_connection(ConnectionId::default(), connection_end.clone()) + .with_channel( + packet.destination_port.clone(), + packet.destination_channel, + dest_channel_end.clone(), + ) + .with_send_sequence( + packet.destination_port.clone(), + packet.destination_channel, + 1.into(), + ) + .with_height(host_height) + // This `with_recv_sequence` is required for ordered channels + .with_recv_sequence( + packet.destination_port.clone(), + packet.destination_channel, + packet.sequence, + ), + msg, + want_pass: true, + }, + Test { + name: "Packet timeout expired".to_string(), + ctx: context + .with_client(&ClientId::default(), client_height) + .with_connection(ConnectionId::default(), connection_end) + .with_channel(PortId::default(), ChannelId::default(), dest_channel_end) + .with_send_sequence(PortId::default(), ChannelId::default(), 1.into()) + .with_height(host_height), + msg: msg_packet_old, + want_pass: false, + }, + ] + .into_iter() + .collect(); + + for test in tests { + let res = process(&test.ctx, &test.msg); + // Additionally check the events and the output objects in the result. + match res { + Ok(proto_output) => { + assert!( test.want_pass, "recv_packet: test passed but was supposed to fail for test: {}, \nparams \n msg={:?}\nctx:{:?}", test.name, @@ -296,15 +295,15 @@ mod tests { test.ctx.clone() ); - assert!(!proto_output.events.is_empty()); // Some events must exist. + assert!(!proto_output.events.is_empty()); // Some events must exist. - for e in proto_output.events.iter() { - assert!(matches!(e, &IbcEvent::ReceivePacket(_))); - assert_eq!(e.height(), test.ctx.host_height()); - } - } - Err(e) => { - assert!( + for e in proto_output.events.iter() { + assert!(matches!(e, &IbcEvent::ReceivePacket(_))); + assert_eq!(e.height(), test.ctx.host_height()); + } + }, + Err(e) => { + assert!( !test.want_pass, "recv_packet: did not pass test: {}, \nparams \nmsg={:?}\nctx={:?}\nerror={:?}", test.name, @@ -312,8 +311,8 @@ mod tests { test.ctx.clone(), e, ); - } - } - } - } + }, + } + } + } } diff --git a/modules/src/core/ics04_channel/handler/send_packet.rs b/modules/src/core/ics04_channel/handler/send_packet.rs index ca484371f8..63854e9820 100644 --- a/modules/src/core/ics04_channel/handler/send_packet.rs +++ b/modules/src/core/ics04_channel/handler/send_packet.rs @@ -1,223 +1,223 @@ -use crate::core::ics02_client::client_state::ClientState; -use crate::core::ics04_channel::channel::Counterparty; -use crate::core::ics04_channel::channel::State; -use crate::core::ics04_channel::commitment::PacketCommitment; -use crate::core::ics04_channel::events::SendPacket; -use crate::core::ics04_channel::packet::{PacketResult, Sequence}; -use crate::core::ics04_channel::{error::Error, packet::Packet}; -use crate::core::ics24_host::identifier::{ChannelId, PortId}; -use crate::core::ics26_routing::context::ReaderContext; -use crate::events::IbcEvent; -use crate::handler::{HandlerOutput, HandlerResult}; -use crate::prelude::*; -use crate::timestamp::Expiry; +use crate::{ + core::{ + ics02_client::{client_consensus::ConsensusState, client_state::ClientState}, + ics04_channel::{ + channel::{Counterparty, State}, + commitment::PacketCommitment, + error::Error, + events::SendPacket, + packet::{Packet, PacketResult, Sequence}, + }, + ics24_host::identifier::{ChannelId, PortId}, + ics26_routing::context::ReaderContext, + }, + events::IbcEvent, + handler::{HandlerOutput, HandlerResult}, + prelude::*, + timestamp::Expiry, +}; #[derive(Clone, Debug)] pub struct SendPacketResult { - pub port_id: PortId, - pub channel_id: ChannelId, - pub seq: Sequence, - pub seq_number: Sequence, - pub commitment: PacketCommitment, - pub packet: Packet, + pub port_id: PortId, + pub channel_id: ChannelId, + pub seq: Sequence, + pub seq_number: Sequence, + pub commitment: PacketCommitment, + pub packet: Packet, } -pub fn send_packet(ctx: &dyn ReaderContext, packet: Packet) -> HandlerResult { - let mut output = HandlerOutput::builder(); - - let source_channel_end = - ctx.channel_end(&(packet.source_port.clone(), packet.source_channel))?; - - if source_channel_end.state_matches(&State::Closed) { - return Err(Error::channel_closed(packet.source_channel)); - } - - let counterparty = Counterparty::new( - packet.destination_port.clone(), - Some(packet.destination_channel), - ); - - if !source_channel_end.counterparty_matches(&counterparty) { - return Err(Error::invalid_packet_counterparty( - packet.destination_port.clone(), - packet.destination_channel, - )); - } - - let connection_end = ctx - .connection_end(&source_channel_end.connection_hops()[0]) - .map_err(Error::ics03_connection)?; - - let client_id = connection_end.client_id().clone(); - - let client_state = ctx - .client_state(&client_id) - .map_err(|e| Error::implementation_specific(e.to_string()))?; - - // prevent accidental sends with clients that cannot be updated - if client_state.is_frozen() { - return Err(Error::frozen_client(connection_end.client_id().clone())); - } - - let latest_height = client_state.latest_height(); - - if !packet.timeout_height.is_zero() && packet.timeout_height <= latest_height { - return Err(Error::low_packet_height( - latest_height, - packet.timeout_height, - )); - } - - let consensus_state = ctx - .consensus_state(&client_id, latest_height) - .map_err(|_| Error::error_invalid_consensus_state())?; - let latest_timestamp = consensus_state.timestamp(); - let packet_timestamp = packet.timeout_timestamp; - if let Expiry::Expired = latest_timestamp.check_expiry(&packet_timestamp) { - return Err(Error::low_packet_timestamp()); - } - - let next_seq_send = - ctx.get_next_sequence_send(&(packet.source_port.clone(), packet.source_channel))?; - - if packet.sequence != next_seq_send { - return Err(Error::invalid_packet_sequence( - packet.sequence, - next_seq_send, - )); - } - - output.log("success: packet send "); - - let result = PacketResult::Send(SendPacketResult { - port_id: packet.source_port.clone(), - channel_id: packet.source_channel, - seq: packet.sequence, - seq_number: next_seq_send.increment(), - packet: packet.clone(), - commitment: ctx.packet_commitment( - packet.data.clone(), - packet.timeout_height, - packet.timeout_timestamp, - ), - }); - - output.emit(IbcEvent::SendPacket(SendPacket { - height: ctx.host_height(), - packet, - })); - - Ok(output.with_result(result)) +pub fn send_packet( + ctx: &Ctx, + packet: Packet, +) -> HandlerResult { + let mut output = HandlerOutput::builder(); + + let source_channel_end = + ctx.channel_end(&(packet.source_port.clone(), packet.source_channel))?; + + if source_channel_end.state_matches(&State::Closed) { + return Err(Error::channel_closed(packet.source_channel)) + } + + let counterparty = + Counterparty::new(packet.destination_port.clone(), Some(packet.destination_channel)); + + if !source_channel_end.counterparty_matches(&counterparty) { + return Err(Error::invalid_packet_counterparty( + packet.destination_port.clone(), + packet.destination_channel, + )) + } + + let connection_end = ctx + .connection_end(&source_channel_end.connection_hops()[0]) + .map_err(Error::ics03_connection)?; + + let client_id = connection_end.client_id().clone(); + + let client_state = ctx + .client_state(&client_id) + .map_err(|e| Error::implementation_specific(e.to_string()))?; + + // prevent accidental sends with clients that cannot be updated + if client_state.is_frozen() { + return Err(Error::frozen_client(connection_end.client_id().clone())) + } + + let latest_height = client_state.latest_height(); + + if !packet.timeout_height.is_zero() && packet.timeout_height <= latest_height { + return Err(Error::low_packet_height(latest_height, packet.timeout_height)) + } + + let consensus_state = ctx + .consensus_state(&client_id, latest_height) + .map_err(|_| Error::error_invalid_consensus_state())?; + let latest_timestamp = consensus_state.timestamp(); + let packet_timestamp = packet.timeout_timestamp; + if let Expiry::Expired = latest_timestamp.check_expiry(&packet_timestamp) { + return Err(Error::low_packet_timestamp()) + } + + let next_seq_send = + ctx.get_next_sequence_send(&(packet.source_port.clone(), packet.source_channel))?; + + if packet.sequence != next_seq_send { + return Err(Error::invalid_packet_sequence(packet.sequence, next_seq_send)) + } + + output.log("success: packet send "); + + let result = PacketResult::Send(SendPacketResult { + port_id: packet.source_port.clone(), + channel_id: packet.source_channel, + seq: packet.sequence, + seq_number: next_seq_send.increment(), + packet: packet.clone(), + commitment: ctx.packet_commitment( + packet.data.clone(), + packet.timeout_height, + packet.timeout_timestamp, + ), + }); + + output.emit(IbcEvent::SendPacket(SendPacket { height: ctx.host_height(), packet })); + + Ok(output.with_result(result)) } #[cfg(test)] mod tests { - use core::ops::Add; - use core::time::Duration; - - use test_log::test; - - use crate::core::ics02_client::context::ClientReader; - use crate::core::ics02_client::height::Height; - use crate::core::ics03_connection::connection::ConnectionEnd; - use crate::core::ics03_connection::connection::Counterparty as ConnectionCounterparty; - use crate::core::ics03_connection::connection::State as ConnectionState; - use crate::core::ics03_connection::version::get_compatible_versions; - use crate::core::ics04_channel::channel::{ChannelEnd, Counterparty, Order, State}; - use crate::core::ics04_channel::handler::send_packet::send_packet; - use crate::core::ics04_channel::packet::test_utils::get_dummy_raw_packet; - use crate::core::ics04_channel::packet::Packet; - use crate::core::ics04_channel::Version; - use crate::core::ics24_host::identifier::{ChannelId, ClientId, ConnectionId, PortId}; - use crate::events::IbcEvent; - use crate::mock::context::MockContext; - use crate::prelude::*; - use crate::timestamp::Timestamp; - use crate::timestamp::ZERO_DURATION; - - #[test] - fn send_packet_processing() { - struct Test { - name: String, - ctx: MockContext, - packet: Packet, - want_pass: bool, - } - - let context = MockContext::default(); - - let timestamp = Timestamp::now().add(Duration::from_secs(10)); - //CD:TODO remove unwrap - let mut packet: Packet = get_dummy_raw_packet(1, timestamp.unwrap().nanoseconds()) - .try_into() - .unwrap(); - packet.sequence = 1.into(); - packet.data = vec![0]; - - let channel_end = ChannelEnd::new( - State::TryOpen, - Order::default(), - Counterparty::new(PortId::default(), Some(ChannelId::default())), - vec![ConnectionId::default()], - Version::ics20(), - ); - - let connection_end = ConnectionEnd::new( - ConnectionState::Open, - ClientId::default(), - ConnectionCounterparty::new( - ClientId::default(), - Some(ConnectionId::default()), - Default::default(), - ), - get_compatible_versions(), - ZERO_DURATION, - ); - - let mut packet_old: Packet = get_dummy_raw_packet(1, 1).try_into().unwrap(); - packet_old.sequence = 1.into(); - packet_old.data = vec![0]; - - let client_height = Height::new(0, Height::default().revision_height + 1); - - let tests: Vec = vec![ - Test { - name: "Processing fails because no channel exists in the context".to_string(), - ctx: context.clone(), - packet: packet.clone(), - want_pass: false, - }, - Test { - name: "Good parameters".to_string(), - ctx: context - .clone() - .with_client(&ClientId::default(), Height::default()) - .with_connection(ConnectionId::default(), connection_end.clone()) - .with_channel(PortId::default(), ChannelId::default(), channel_end.clone()) - .with_send_sequence(PortId::default(), ChannelId::default(), 1.into()), - packet, - want_pass: true, - }, - Test { - name: "Packet timeout".to_string(), - ctx: context - .with_client(&ClientId::default(), client_height) - .with_connection(ConnectionId::default(), connection_end) - .with_channel(PortId::default(), ChannelId::default(), channel_end) - .with_send_sequence(PortId::default(), ChannelId::default(), 1.into()), - packet: packet_old, - want_pass: false, - }, - ] - .into_iter() - .collect(); - - for test in tests { - let res = send_packet(&test.ctx, test.packet.clone()); - // Additionally check the events and the output objects in the result. - match res { - Ok(proto_output) => { - assert!( + use core::{ops::Add, time::Duration}; + + use test_log::test; + + use crate::{ + core::{ + ics02_client::{context::ClientReader, height::Height}, + ics03_connection::{ + connection::{ + ConnectionEnd, Counterparty as ConnectionCounterparty, State as ConnectionState, + }, + version::get_compatible_versions, + }, + ics04_channel::{ + channel::{ChannelEnd, Counterparty, Order, State}, + handler::send_packet::send_packet, + packet::{test_utils::get_dummy_raw_packet, Packet}, + Version, + }, + ics24_host::identifier::{ChannelId, ClientId, ConnectionId, PortId}, + }, + events::IbcEvent, + mock::context::{MockClientTypes, MockContext}, + prelude::*, + timestamp::{Timestamp, ZERO_DURATION}, + }; + + #[test] + fn send_packet_processing() { + struct Test { + name: String, + ctx: MockContext, + packet: Packet, + want_pass: bool, + } + + let context = MockContext::default(); + + let timestamp = Timestamp::now().add(Duration::from_secs(10)); + //CD:TODO remove unwrap + let mut packet: Packet = + get_dummy_raw_packet(1, timestamp.unwrap().nanoseconds()).try_into().unwrap(); + packet.sequence = 1.into(); + packet.data = vec![0]; + + let channel_end = ChannelEnd::new( + State::TryOpen, + Order::default(), + Counterparty::new(PortId::default(), Some(ChannelId::default())), + vec![ConnectionId::default()], + Version::ics20(), + ); + + let connection_end = ConnectionEnd::new( + ConnectionState::Open, + ClientId::default(), + ConnectionCounterparty::new( + ClientId::default(), + Some(ConnectionId::default()), + Default::default(), + ), + get_compatible_versions(), + ZERO_DURATION, + ); + + let mut packet_old: Packet = get_dummy_raw_packet(1, 1).try_into().unwrap(); + packet_old.sequence = 1.into(); + packet_old.data = vec![0]; + + let client_height = Height::new(0, Height::default().revision_height + 1); + + let tests: Vec = vec![ + Test { + name: "Processing fails because no channel exists in the context".to_string(), + ctx: context.clone(), + packet: packet.clone(), + want_pass: false, + }, + Test { + name: "Good parameters".to_string(), + ctx: context + .clone() + .with_client(&ClientId::default(), Height::default()) + .with_connection(ConnectionId::default(), connection_end.clone()) + .with_channel(PortId::default(), ChannelId::default(), channel_end.clone()) + .with_send_sequence(PortId::default(), ChannelId::default(), 1.into()), + packet, + want_pass: true, + }, + Test { + name: "Packet timeout".to_string(), + ctx: context + .with_client(&ClientId::default(), client_height) + .with_connection(ConnectionId::default(), connection_end) + .with_channel(PortId::default(), ChannelId::default(), channel_end) + .with_send_sequence(PortId::default(), ChannelId::default(), 1.into()), + packet: packet_old, + want_pass: false, + }, + ] + .into_iter() + .collect(); + + for test in tests { + let res = send_packet(&test.ctx, test.packet.clone()); + // Additionally check the events and the output objects in the result. + match res { + Ok(proto_output) => { + assert!( test.want_pass, "send_packet: test passed but was supposed to fail for test: {}, \nparams {:?} {:?}", test.name, @@ -225,25 +225,25 @@ mod tests { test.ctx.clone() ); - assert!(!proto_output.events.is_empty()); // Some events must exist. - - // TODO: The object in the output is a PacketResult what can we check on it? - for e in proto_output.events.iter() { - assert!(matches!(e, &IbcEvent::SendPacket(_))); - assert_eq!(e.height(), test.ctx.host_height()); - } - } - Err(e) => { - assert!( - !test.want_pass, - "send_packet: did not pass test: {}, \nparams {:?} {:?} error: {:?}", - test.name, - test.packet.clone(), - test.ctx.clone(), - e, - ); - } - } - } - } + assert!(!proto_output.events.is_empty()); // Some events must exist. + + // TODO: The object in the output is a PacketResult what can we check on it? + for e in proto_output.events.iter() { + assert!(matches!(e, &IbcEvent::SendPacket(_))); + assert_eq!(e.height(), test.ctx.host_height()); + } + }, + Err(e) => { + assert!( + !test.want_pass, + "send_packet: did not pass test: {}, \nparams {:?} {:?} error: {:?}", + test.name, + test.packet.clone(), + test.ctx.clone(), + e, + ); + }, + } + } + } } diff --git a/modules/src/core/ics04_channel/handler/timeout.rs b/modules/src/core/ics04_channel/handler/timeout.rs index ea771b0902..bd91824a90 100644 --- a/modules/src/core/ics04_channel/handler/timeout.rs +++ b/modules/src/core/ics04_channel/handler/timeout.rs @@ -1,224 +1,221 @@ -use crate::clients::host_functions::HostFunctionsProvider; -use crate::core::ics04_channel::channel::State; -use crate::core::ics04_channel::channel::{ChannelEnd, Counterparty, Order}; -use crate::core::ics04_channel::error::Error; -use crate::core::ics04_channel::events::TimeoutPacket; -use crate::core::ics04_channel::handler::verify::{ - verify_next_sequence_recv, verify_packet_receipt_absence, +use crate::{ + core::{ + ics02_client::client_consensus::ConsensusState, + ics04_channel::{ + channel::{ChannelEnd, Counterparty, Order, State}, + error::Error, + events::TimeoutPacket, + handler::verify::{verify_next_sequence_recv, verify_packet_receipt_absence}, + msgs::timeout::MsgTimeout, + packet::{PacketResult, Sequence}, + }, + ics24_host::identifier::{ChannelId, PortId}, + ics26_routing::context::ReaderContext, + }, + events::IbcEvent, + handler::{HandlerOutput, HandlerResult}, + prelude::*, }; -use crate::core::ics04_channel::msgs::timeout::MsgTimeout; -use crate::core::ics04_channel::packet::{PacketResult, Sequence}; -use crate::core::ics24_host::identifier::{ChannelId, PortId}; -use crate::core::ics26_routing::context::ReaderContext; -use crate::events::IbcEvent; -use crate::handler::{HandlerOutput, HandlerResult}; -use crate::prelude::*; use core::fmt::Debug; #[derive(Clone, Debug)] pub struct TimeoutPacketResult { - pub port_id: PortId, - pub channel_id: ChannelId, - pub seq: Sequence, - pub channel: Option, + pub port_id: PortId, + pub channel_id: ChannelId, + pub seq: Sequence, + pub channel: Option, } -pub fn process( - ctx: &dyn ReaderContext, - msg: &MsgTimeout, -) -> HandlerResult { - let mut output = HandlerOutput::builder(); - - let packet = &msg.packet; - - let mut source_channel_end = - ctx.channel_end(&(packet.source_port.clone(), packet.source_channel))?; - - if !source_channel_end.state_matches(&State::Open) { - return Err(Error::channel_closed(packet.source_channel)); - } - - let counterparty = Counterparty::new( - packet.destination_port.clone(), - Some(packet.destination_channel), - ); - - if !source_channel_end.counterparty_matches(&counterparty) { - return Err(Error::invalid_packet_counterparty( - packet.destination_port.clone(), - packet.destination_channel, - )); - } - - let connection_end = ctx - .connection_end(&source_channel_end.connection_hops()[0]) - .map_err(Error::ics03_connection)?; - - let client_id = connection_end.client_id().clone(); - - // check that timeout height or timeout timestamp has passed on the other end - let proof_height = msg.proofs.height(); - - let consensus_state = ctx - .consensus_state(&client_id, proof_height) - .map_err(|_| Error::error_invalid_consensus_state())?; - - let proof_timestamp = consensus_state.timestamp(); - - if !packet.timed_out(&proof_timestamp, proof_height) { - return Err(Error::packet_timeout_not_reached( - packet.timeout_height, - proof_height, - packet.timeout_timestamp, - proof_timestamp, - )); - } - - //verify packet commitment - let packet_commitment = ctx.get_packet_commitment(&( - packet.source_port.clone(), - packet.source_channel, - packet.sequence, - ))?; - - let expected_commitment = ctx.packet_commitment( - packet.data.clone(), - packet.timeout_height, - packet.timeout_timestamp, - ); - if packet_commitment != expected_commitment { - return Err(Error::incorrect_packet_commitment(packet.sequence)); - } - - let result = if source_channel_end.order_matches(&Order::Ordered) { - if packet.sequence < msg.next_sequence_recv { - return Err(Error::invalid_packet_sequence( - packet.sequence, - msg.next_sequence_recv, - )); - } - verify_next_sequence_recv::( - ctx, - msg.proofs.height(), - &connection_end, - packet.clone(), - msg.next_sequence_recv, - &msg.proofs, - )?; - - source_channel_end.state = State::Closed; - PacketResult::Timeout(TimeoutPacketResult { - port_id: packet.source_port.clone(), - channel_id: packet.source_channel, - seq: packet.sequence, - channel: Some(source_channel_end), - }) - } else { - verify_packet_receipt_absence::( - ctx, - msg.proofs.height(), - &connection_end, - packet.clone(), - &msg.proofs, - )?; - - PacketResult::Timeout(TimeoutPacketResult { - port_id: packet.source_port.clone(), - channel_id: packet.source_channel, - seq: packet.sequence, - channel: None, - }) - }; - - output.log("success: packet timeout "); - - output.emit(IbcEvent::TimeoutPacket(TimeoutPacket { - height: ctx.host_height(), - packet: packet.clone(), - })); - - Ok(output.with_result(result)) +pub fn process(ctx: &Ctx, msg: &MsgTimeout) -> HandlerResult +where + Ctx: ReaderContext, +{ + let mut output = HandlerOutput::builder(); + + let packet = &msg.packet; + + let mut source_channel_end = + ctx.channel_end(&(packet.source_port.clone(), packet.source_channel))?; + + if !source_channel_end.state_matches(&State::Open) { + return Err(Error::channel_closed(packet.source_channel)) + } + + let counterparty = + Counterparty::new(packet.destination_port.clone(), Some(packet.destination_channel)); + + if !source_channel_end.counterparty_matches(&counterparty) { + return Err(Error::invalid_packet_counterparty( + packet.destination_port.clone(), + packet.destination_channel, + )) + } + + let connection_end = ctx + .connection_end(&source_channel_end.connection_hops()[0]) + .map_err(Error::ics03_connection)?; + + let client_id = connection_end.client_id().clone(); + + // check that timeout height or timeout timestamp has passed on the other end + let proof_height = msg.proofs.height(); + + let consensus_state = ctx + .consensus_state(&client_id, proof_height) + .map_err(|_| Error::error_invalid_consensus_state())?; + + let proof_timestamp = consensus_state.timestamp(); + + if !packet.timed_out(&proof_timestamp, proof_height) { + return Err(Error::packet_timeout_not_reached( + packet.timeout_height, + proof_height, + packet.timeout_timestamp, + proof_timestamp, + )) + } + + //verify packet commitment + let packet_commitment = ctx.get_packet_commitment(&( + packet.source_port.clone(), + packet.source_channel, + packet.sequence, + ))?; + + let expected_commitment = + ctx.packet_commitment(packet.data.clone(), packet.timeout_height, packet.timeout_timestamp); + if packet_commitment != expected_commitment { + return Err(Error::incorrect_packet_commitment(packet.sequence)) + } + + let result = if source_channel_end.order_matches(&Order::Ordered) { + if packet.sequence < msg.next_sequence_recv { + return Err(Error::invalid_packet_sequence(packet.sequence, msg.next_sequence_recv)) + } + verify_next_sequence_recv::( + ctx, + msg.proofs.height(), + &connection_end, + packet.clone(), + msg.next_sequence_recv, + &msg.proofs, + )?; + + source_channel_end.state = State::Closed; + PacketResult::Timeout(TimeoutPacketResult { + port_id: packet.source_port.clone(), + channel_id: packet.source_channel, + seq: packet.sequence, + channel: Some(source_channel_end), + }) + } else { + verify_packet_receipt_absence::( + ctx, + msg.proofs.height(), + &connection_end, + packet.clone(), + &msg.proofs, + )?; + + PacketResult::Timeout(TimeoutPacketResult { + port_id: packet.source_port.clone(), + channel_id: packet.source_channel, + seq: packet.sequence, + channel: None, + }) + }; + + output.log("success: packet timeout "); + + output.emit(IbcEvent::TimeoutPacket(TimeoutPacket { + height: ctx.host_height(), + packet: packet.clone(), + })); + + Ok(output.with_result(result)) } #[cfg(test)] mod tests { - use test_log::test; - - use crate::core::ics02_client::context::ClientReader; - use crate::core::ics02_client::height::Height; - use crate::core::ics03_connection::connection::ConnectionEnd; - use crate::core::ics03_connection::connection::Counterparty as ConnectionCounterparty; - use crate::core::ics03_connection::connection::State as ConnectionState; - use crate::core::ics03_connection::version::get_compatible_versions; - use crate::core::ics04_channel::channel::{ChannelEnd, Counterparty, Order, State}; - use crate::core::ics04_channel::context::ChannelReader; - use crate::core::ics04_channel::handler::timeout::process; - use crate::core::ics04_channel::msgs::timeout::test_util::get_dummy_raw_msg_timeout; - use crate::core::ics04_channel::msgs::timeout::MsgTimeout; - use crate::core::ics04_channel::Version; - use crate::core::ics24_host::identifier::{ChannelId, ClientId, ConnectionId, PortId}; - use crate::events::IbcEvent; - use crate::mock::context::MockContext; - use crate::prelude::*; - use crate::test_utils::Crypto; - use crate::timestamp::ZERO_DURATION; - - #[test] - fn timeout_packet_processing() { - struct Test { - name: String, - ctx: MockContext, - msg: MsgTimeout, - want_pass: bool, - } - - let context = MockContext::default(); - - let height = Height::default().revision_height + 2; - let timeout_timestamp = 5; - - let client_height = Height::new(0, Height::default().revision_height + 2); - - let msg = - MsgTimeout::try_from(get_dummy_raw_msg_timeout(height, timeout_timestamp)).unwrap(); - let packet = msg.packet.clone(); - - let mut msg_ok = msg.clone(); - msg_ok.packet.timeout_timestamp = Default::default(); - - let data = context.packet_commitment( - msg_ok.packet.data.clone(), - msg_ok.packet.timeout_height, - msg_ok.packet.timeout_timestamp, - ); - - let source_channel_end = ChannelEnd::new( - State::Open, - Order::default(), - Counterparty::new( - packet.destination_port.clone(), - Some(packet.destination_channel), - ), - vec![ConnectionId::default()], - Version::ics20(), - ); - - let mut source_ordered_channel_end = source_channel_end.clone(); - source_ordered_channel_end.ordering = Order::Ordered; - - let connection_end = ConnectionEnd::new( - ConnectionState::Open, - ClientId::default(), - ConnectionCounterparty::new( - ClientId::default(), - Some(ConnectionId::default()), - Default::default(), - ), - get_compatible_versions(), - ZERO_DURATION, - ); - - let tests: Vec = vec![ + use test_log::test; + + use crate::{ + core::{ + ics02_client::{context::ClientReader, height::Height}, + ics03_connection::{ + connection::{ + ConnectionEnd, Counterparty as ConnectionCounterparty, State as ConnectionState, + }, + version::get_compatible_versions, + }, + ics04_channel::{ + channel::{ChannelEnd, Counterparty, Order, State}, + context::ChannelReader, + handler::timeout::process, + msgs::timeout::{test_util::get_dummy_raw_msg_timeout, MsgTimeout}, + Version, + }, + ics24_host::identifier::{ChannelId, ClientId, ConnectionId, PortId}, + }, + events::IbcEvent, + mock::context::{MockClientTypes, MockContext}, + prelude::*, + timestamp::ZERO_DURATION, + }; + + #[test] + fn timeout_packet_processing() { + struct Test { + name: String, + ctx: MockContext, + msg: MsgTimeout, + want_pass: bool, + } + + let context = MockContext::default(); + + let height = Height::default().revision_height + 2; + let timeout_timestamp = 5; + + let client_height = Height::new(0, Height::default().revision_height + 2); + + let msg = + MsgTimeout::try_from(get_dummy_raw_msg_timeout(height, timeout_timestamp)).unwrap(); + let packet = msg.packet.clone(); + + let mut msg_ok = msg.clone(); + msg_ok.packet.timeout_timestamp = Default::default(); + + let data = context.packet_commitment( + msg_ok.packet.data.clone(), + msg_ok.packet.timeout_height, + msg_ok.packet.timeout_timestamp, + ); + + let source_channel_end = ChannelEnd::new( + State::Open, + Order::default(), + Counterparty::new(packet.destination_port.clone(), Some(packet.destination_channel)), + vec![ConnectionId::default()], + Version::ics20(), + ); + + let mut source_ordered_channel_end = source_channel_end.clone(); + source_ordered_channel_end.ordering = Order::Ordered; + + let connection_end = ConnectionEnd::new( + ConnectionState::Open, + ClientId::default(), + ConnectionCounterparty::new( + ClientId::default(), + Some(ConnectionId::default()), + Default::default(), + ), + get_compatible_versions(), + ZERO_DURATION, + ); + + let tests: Vec = vec![ Test { name: "Processing fails because no channel exists in the context".to_string(), ctx: context.clone(), @@ -297,12 +294,12 @@ mod tests { .into_iter() .collect(); - for test in tests { - let res = process::(&test.ctx, &test.msg); - // Additionally check the events and the output objects in the result. - match res { - Ok(proto_output) => { - assert!( + for test in tests { + let res = process(&test.ctx, &test.msg); + // Additionally check the events and the output objects in the result. + match res { + Ok(proto_output) => { + assert!( test.want_pass, "TO_packet: test passed but was supposed to fail for test: {}, \nparams {:?} {:?}", test.name, @@ -310,24 +307,24 @@ mod tests { test.ctx.clone() ); - assert!(!proto_output.events.is_empty()); // Some events must exist. - - for e in proto_output.events.iter() { - assert!(matches!(e, &IbcEvent::TimeoutPacket(_))); - assert_eq!(e.height(), test.ctx.host_height()); - } - } - Err(e) => { - assert!( - !test.want_pass, - "timeout_packet: did not pass test: {}, \nparams {:?} {:?} error: {:?}", - test.name, - test.msg.clone(), - test.ctx.clone(), - e, - ); - } - } - } - } + assert!(!proto_output.events.is_empty()); // Some events must exist. + + for e in proto_output.events.iter() { + assert!(matches!(e, &IbcEvent::TimeoutPacket(_))); + assert_eq!(e.height(), test.ctx.host_height()); + } + }, + Err(e) => { + assert!( + !test.want_pass, + "timeout_packet: did not pass test: {}, \nparams {:?} {:?} error: {:?}", + test.name, + test.msg.clone(), + test.ctx.clone(), + e, + ); + }, + } + } + } } diff --git a/modules/src/core/ics04_channel/handler/timeout_on_close.rs b/modules/src/core/ics04_channel/handler/timeout_on_close.rs index 6f46deb180..828114d2de 100644 --- a/modules/src/core/ics04_channel/handler/timeout_on_close.rs +++ b/modules/src/core/ics04_channel/handler/timeout_on_close.rs @@ -1,265 +1,267 @@ -use crate::clients::host_functions::HostFunctionsProvider; -use crate::core::ics04_channel::channel::State; -use crate::core::ics04_channel::channel::{ChannelEnd, Counterparty, Order}; -use crate::core::ics04_channel::events::TimeoutOnClosePacket; -use crate::core::ics04_channel::handler::verify::verify_channel_proofs; -use crate::core::ics04_channel::handler::verify::{ - verify_next_sequence_recv, verify_packet_receipt_absence, +use crate::{ + core::{ + ics04_channel::{ + channel::{ChannelEnd, Counterparty, Order, State}, + error::Error, + events::TimeoutOnClosePacket, + handler::{ + timeout::TimeoutPacketResult, + verify::{ + verify_channel_proofs, verify_next_sequence_recv, verify_packet_receipt_absence, + }, + }, + msgs::timeout_on_close::MsgTimeoutOnClose, + packet::PacketResult, + }, + ics26_routing::context::ReaderContext, + }, + events::IbcEvent, + handler::{HandlerOutput, HandlerResult}, + prelude::*, }; -use crate::core::ics04_channel::msgs::timeout_on_close::MsgTimeoutOnClose; -use crate::core::ics04_channel::packet::PacketResult; -use crate::core::ics04_channel::{error::Error, handler::timeout::TimeoutPacketResult}; -use crate::core::ics26_routing::context::ReaderContext; -use crate::events::IbcEvent; -use crate::handler::{HandlerOutput, HandlerResult}; -use crate::prelude::*; - -pub fn process( - ctx: &dyn ReaderContext, - msg: &MsgTimeoutOnClose, + +pub fn process( + ctx: &Ctx, + msg: &MsgTimeoutOnClose, ) -> HandlerResult { - let mut output = HandlerOutput::builder(); - - let packet = &msg.packet; - - let source_channel_end = - ctx.channel_end(&(packet.source_port.clone(), packet.source_channel))?; - - let counterparty = Counterparty::new( - packet.destination_port.clone(), - Some(packet.destination_channel), - ); - - if !source_channel_end.counterparty_matches(&counterparty) { - return Err(Error::invalid_packet_counterparty( - packet.destination_port.clone(), - packet.destination_channel, - )); - } - - let connection_end = ctx - .connection_end(&source_channel_end.connection_hops()[0]) - .map_err(Error::ics03_connection)?; - - //verify the packet was sent, check the store - let packet_commitment = ctx.get_packet_commitment(&( - packet.source_port.clone(), - packet.source_channel, - packet.sequence, - ))?; - - let expected_commitment = ctx.packet_commitment( - packet.data.clone(), - packet.timeout_height, - packet.timeout_timestamp, - ); - if packet_commitment != expected_commitment { - return Err(Error::incorrect_packet_commitment(packet.sequence)); - } - - let expected_counterparty = - Counterparty::new(packet.source_port.clone(), Some(packet.source_channel)); - - let counterparty = connection_end.counterparty(); - let ccid = counterparty.connection_id().ok_or_else(|| { - Error::undefined_connection_counterparty(source_channel_end.connection_hops()[0].clone()) - })?; - - let expected_connection_hops = vec![ccid.clone()]; - - let expected_channel_end = ChannelEnd::new( - State::Closed, - *source_channel_end.ordering(), - expected_counterparty, - expected_connection_hops, - source_channel_end.version().clone(), - ); - - verify_channel_proofs::( - ctx, - msg.proofs.height(), - &source_channel_end, - &connection_end, - &expected_channel_end, - msg.proofs - .other_proof() - .as_ref() - .ok_or_else(|| Error::missing_channel_proof())?, - )?; - - let result = if source_channel_end.order_matches(&Order::Ordered) { - if packet.sequence < msg.next_sequence_recv { - return Err(Error::invalid_packet_sequence( - packet.sequence, - msg.next_sequence_recv, - )); - } - verify_next_sequence_recv::( - ctx, - msg.proofs.height(), - &connection_end, - packet.clone(), - msg.next_sequence_recv, - &msg.proofs, - )?; - - PacketResult::Timeout(TimeoutPacketResult { - port_id: packet.source_port.clone(), - channel_id: packet.source_channel, - seq: packet.sequence, - channel: Some(source_channel_end), - }) - } else { - verify_packet_receipt_absence::( - ctx, - msg.proofs.height(), - &connection_end, - packet.clone(), - &msg.proofs, - )?; - - PacketResult::Timeout(TimeoutPacketResult { - port_id: packet.source_port.clone(), - channel_id: packet.source_channel, - seq: packet.sequence, - channel: None, - }) - }; - - output.log("success: packet timeout "); - - output.emit(IbcEvent::TimeoutOnClosePacket(TimeoutOnClosePacket { - height: ctx.host_height(), - packet: packet.clone(), - })); - - Ok(output.with_result(result)) + let mut output = HandlerOutput::builder(); + + let packet = &msg.packet; + + let source_channel_end = + ctx.channel_end(&(packet.source_port.clone(), packet.source_channel))?; + + let counterparty = + Counterparty::new(packet.destination_port.clone(), Some(packet.destination_channel)); + + if !source_channel_end.counterparty_matches(&counterparty) { + return Err(Error::invalid_packet_counterparty( + packet.destination_port.clone(), + packet.destination_channel, + )) + } + + let connection_end = ctx + .connection_end(&source_channel_end.connection_hops()[0]) + .map_err(Error::ics03_connection)?; + + //verify the packet was sent, check the store + let packet_commitment = ctx.get_packet_commitment(&( + packet.source_port.clone(), + packet.source_channel, + packet.sequence, + ))?; + + let expected_commitment = + ctx.packet_commitment(packet.data.clone(), packet.timeout_height, packet.timeout_timestamp); + if packet_commitment != expected_commitment { + return Err(Error::incorrect_packet_commitment(packet.sequence)) + } + + let expected_counterparty = + Counterparty::new(packet.source_port.clone(), Some(packet.source_channel)); + + let counterparty = connection_end.counterparty(); + let ccid = counterparty.connection_id().ok_or_else(|| { + Error::undefined_connection_counterparty(source_channel_end.connection_hops()[0].clone()) + })?; + + let expected_connection_hops = vec![ccid.clone()]; + + let expected_channel_end = ChannelEnd::new( + State::Closed, + *source_channel_end.ordering(), + expected_counterparty, + expected_connection_hops, + source_channel_end.version().clone(), + ); + + verify_channel_proofs::( + ctx, + msg.proofs.height(), + &source_channel_end, + &connection_end, + &expected_channel_end, + msg.proofs + .other_proof() + .as_ref() + .ok_or_else(|| Error::missing_channel_proof())?, + )?; + + let result = if source_channel_end.order_matches(&Order::Ordered) { + if packet.sequence < msg.next_sequence_recv { + return Err(Error::invalid_packet_sequence(packet.sequence, msg.next_sequence_recv)) + } + verify_next_sequence_recv::( + ctx, + msg.proofs.height(), + &connection_end, + packet.clone(), + msg.next_sequence_recv, + &msg.proofs, + )?; + + PacketResult::Timeout(TimeoutPacketResult { + port_id: packet.source_port.clone(), + channel_id: packet.source_channel, + seq: packet.sequence, + channel: Some(source_channel_end), + }) + } else { + verify_packet_receipt_absence::( + ctx, + msg.proofs.height(), + &connection_end, + packet.clone(), + &msg.proofs, + )?; + + PacketResult::Timeout(TimeoutPacketResult { + port_id: packet.source_port.clone(), + channel_id: packet.source_channel, + seq: packet.sequence, + channel: None, + }) + }; + + output.log("success: packet timeout "); + + output.emit(IbcEvent::TimeoutOnClosePacket(TimeoutOnClosePacket { + height: ctx.host_height(), + packet: packet.clone(), + })); + + Ok(output.with_result(result)) } #[cfg(test)] mod tests { - use test_log::test; - - use crate::core::ics02_client::context::ClientReader; - use crate::core::ics02_client::height::Height; - use crate::core::ics03_connection::connection::ConnectionEnd; - use crate::core::ics03_connection::connection::Counterparty as ConnectionCounterparty; - use crate::core::ics03_connection::connection::State as ConnectionState; - use crate::core::ics03_connection::version::get_compatible_versions; - use crate::core::ics04_channel::channel::{ChannelEnd, Counterparty, Order, State}; - use crate::core::ics04_channel::context::ChannelReader; - use crate::core::ics04_channel::handler::timeout_on_close::process; - use crate::core::ics04_channel::msgs::timeout_on_close::test_util::get_dummy_raw_msg_timeout_on_close; - use crate::core::ics04_channel::msgs::timeout_on_close::MsgTimeoutOnClose; - use crate::core::ics04_channel::Version; - use crate::core::ics24_host::identifier::{ChannelId, ClientId, ConnectionId, PortId}; - use crate::events::IbcEvent; - use crate::mock::context::MockContext; - use crate::prelude::*; - use crate::test_utils::Crypto; - use crate::timestamp::ZERO_DURATION; - - #[test] - fn timeout_on_close_packet_processing() { - struct Test { - name: String, - ctx: MockContext, - msg: MsgTimeoutOnClose, - want_pass: bool, - } - - let context = MockContext::default(); - - let height = Height::default().revision_height + 2; - let timeout_timestamp = 5; - - let client_height = Height::new(0, Height::default().revision_height + 2); - - let msg = MsgTimeoutOnClose::try_from(get_dummy_raw_msg_timeout_on_close( - height, - timeout_timestamp, - )) - .unwrap(); - let packet = msg.packet.clone(); - - let data = context.packet_commitment( - msg.packet.data.clone(), - msg.packet.timeout_height, - msg.packet.timeout_timestamp, - ); - - let source_channel_end = ChannelEnd::new( - State::Open, - Order::Ordered, - Counterparty::new( - packet.destination_port.clone(), - Some(packet.destination_channel), - ), - vec![ConnectionId::default()], - Version::ics20(), - ); - - let connection_end = ConnectionEnd::new( - ConnectionState::Open, - ClientId::default(), - ConnectionCounterparty::new( - ClientId::default(), - Some(ConnectionId::default()), - Default::default(), - ), - get_compatible_versions(), - ZERO_DURATION, - ); - - let tests: Vec = vec![ - Test { - name: "Processing fails because no channel exists in the context".to_string(), - ctx: context.clone(), - msg: msg.clone(), - want_pass: false, - }, - Test { - name: "Processing fails no packet commitment is found".to_string(), - ctx: context - .clone() - .with_channel( - PortId::default(), - ChannelId::default(), - source_channel_end.clone(), - ) - .with_connection(ConnectionId::default(), connection_end.clone()), - msg: msg.clone(), - want_pass: false, - }, - Test { - name: "Good parameters".to_string(), - ctx: context - .with_client(&ClientId::default(), client_height) - .with_connection(ConnectionId::default(), connection_end) - .with_channel( - packet.source_port.clone(), - packet.source_channel, - source_channel_end, - ) - .with_packet_commitment( - msg.packet.source_port.clone(), - msg.packet.source_channel, - msg.packet.sequence, - data, - ), - msg, - want_pass: true, - }, - ] - .into_iter() - .collect(); - - for test in tests { - let res = process::(&test.ctx, &test.msg); - // Additionally check the events and the output objects in the result. - match res { - Ok(proto_output) => { - assert!( + use test_log::test; + + use crate::{ + core::{ + ics02_client::{context::ClientReader, height::Height}, + ics03_connection::{ + connection::{ + ConnectionEnd, Counterparty as ConnectionCounterparty, State as ConnectionState, + }, + version::get_compatible_versions, + }, + ics04_channel::{ + channel::{ChannelEnd, Counterparty, Order, State}, + context::ChannelReader, + handler::timeout_on_close::process, + msgs::timeout_on_close::{ + test_util::get_dummy_raw_msg_timeout_on_close, MsgTimeoutOnClose, + }, + Version, + }, + ics24_host::identifier::{ChannelId, ClientId, ConnectionId, PortId}, + }, + events::IbcEvent, + mock::context::{MockClientTypes, MockContext}, + prelude::*, + timestamp::ZERO_DURATION, + }; + + #[test] + fn timeout_on_close_packet_processing() { + struct Test { + name: String, + ctx: MockContext, + msg: MsgTimeoutOnClose, + want_pass: bool, + } + + let context = MockContext::default(); + + let height = Height::default().revision_height + 2; + let timeout_timestamp = 5; + + let client_height = Height::new(0, Height::default().revision_height + 2); + + let msg = MsgTimeoutOnClose::try_from(get_dummy_raw_msg_timeout_on_close( + height, + timeout_timestamp, + )) + .unwrap(); + let packet = msg.packet.clone(); + + let data = context.packet_commitment( + msg.packet.data.clone(), + msg.packet.timeout_height, + msg.packet.timeout_timestamp, + ); + + let source_channel_end = ChannelEnd::new( + State::Open, + Order::Ordered, + Counterparty::new(packet.destination_port.clone(), Some(packet.destination_channel)), + vec![ConnectionId::default()], + Version::ics20(), + ); + + let connection_end = ConnectionEnd::new( + ConnectionState::Open, + ClientId::default(), + ConnectionCounterparty::new( + ClientId::default(), + Some(ConnectionId::default()), + Default::default(), + ), + get_compatible_versions(), + ZERO_DURATION, + ); + + let tests: Vec = vec![ + Test { + name: "Processing fails because no channel exists in the context".to_string(), + ctx: context.clone(), + msg: msg.clone(), + want_pass: false, + }, + Test { + name: "Processing fails no packet commitment is found".to_string(), + ctx: context + .clone() + .with_channel( + PortId::default(), + ChannelId::default(), + source_channel_end.clone(), + ) + .with_connection(ConnectionId::default(), connection_end.clone()), + msg: msg.clone(), + want_pass: false, + }, + Test { + name: "Good parameters".to_string(), + ctx: context + .with_client(&ClientId::default(), client_height) + .with_connection(ConnectionId::default(), connection_end) + .with_channel( + packet.source_port.clone(), + packet.source_channel, + source_channel_end, + ) + .with_packet_commitment( + msg.packet.source_port.clone(), + msg.packet.source_channel, + msg.packet.sequence, + data, + ), + msg, + want_pass: true, + }, + ] + .into_iter() + .collect(); + + for test in tests { + let res = process(&test.ctx, &test.msg); + // Additionally check the events and the output objects in the result. + match res { + Ok(proto_output) => { + assert!( test.want_pass, "TO_on_close_packet: test passed but was supposed to fail for test: {}, \nparams {:?} {:?}", test.name, @@ -267,23 +269,23 @@ mod tests { test.ctx.clone() ); - assert!(!proto_output.events.is_empty()); // Some events must exist. - for e in proto_output.events.iter() { - assert!(matches!(e, &IbcEvent::TimeoutOnClosePacket(_))); - assert_eq!(e.height(), test.ctx.host_height()); - } - } - Err(e) => { - assert!( - !test.want_pass, - "timeout_packet: did not pass test: {}, \nparams {:?} {:?} error: {:?}", - test.name, - test.msg.clone(), - test.ctx.clone(), - e, - ); - } - } - } - } + assert!(!proto_output.events.is_empty()); // Some events must exist. + for e in proto_output.events.iter() { + assert!(matches!(e, &IbcEvent::TimeoutOnClosePacket(_))); + assert_eq!(e.height(), test.ctx.host_height()); + } + }, + Err(e) => { + assert!( + !test.want_pass, + "timeout_packet: did not pass test: {}, \nparams {:?} {:?} error: {:?}", + test.name, + test.msg.clone(), + test.ctx.clone(), + e, + ); + }, + } + } + } } diff --git a/modules/src/core/ics04_channel/handler/verify.rs b/modules/src/core/ics04_channel/handler/verify.rs index c9be117dce..0284f609c6 100644 --- a/modules/src/core/ics04_channel/handler/verify.rs +++ b/modules/src/core/ics04_channel/handler/verify.rs @@ -1,232 +1,243 @@ -use crate::clients::host_functions::HostFunctionsProvider; -use crate::core::ics02_client::client_consensus::ConsensusState; -use crate::core::ics02_client::client_state::ClientState; -use crate::core::ics02_client::{client_def::AnyClient, client_def::ClientDef}; -use crate::core::ics03_connection::connection::ConnectionEnd; -use crate::core::ics04_channel::channel::ChannelEnd; -use crate::core::ics04_channel::error::Error; -use crate::core::ics04_channel::msgs::acknowledgement::Acknowledgement; -use crate::core::ics04_channel::packet::{Packet, Sequence}; -use crate::core::ics23_commitment::commitment::CommitmentProofBytes; -use crate::core::ics26_routing::context::ReaderContext; -use crate::prelude::*; -use crate::proofs::Proofs; -use crate::Height; +use crate::{ + core::{ + ics02_client::{ + client_consensus::ConsensusState, client_def::ClientDef, client_state::ClientState, + }, + ics03_connection::connection::ConnectionEnd, + ics04_channel::{ + channel::ChannelEnd, + error::Error, + msgs::acknowledgement::Acknowledgement, + packet::{Packet, Sequence}, + }, + ics23_commitment::commitment::CommitmentProofBytes, + ics26_routing::context::ReaderContext, + }, + prelude::*, + proofs::Proofs, + Height, +}; /// Entry point for verifying all proofs bundled in any ICS4 message for channel protocols. -pub fn verify_channel_proofs( - ctx: &dyn ReaderContext, - height: Height, - channel_end: &ChannelEnd, - connection_end: &ConnectionEnd, - expected_chan: &ChannelEnd, - proof: &CommitmentProofBytes, -) -> Result<(), Error> { - // This is the client which will perform proof verification. - let client_id = connection_end.client_id().clone(); - - let client_state = ctx.client_state(&client_id).map_err(Error::ics02_client)?; - - // The client must not be frozen. - if client_state.is_frozen() { - return Err(Error::frozen_client(client_id)); - } - - let consensus_state = ctx - .consensus_state(&client_id, height) - .map_err(|_| Error::error_invalid_consensus_state())?; - - let client_def = AnyClient::::from_client_type(client_state.client_type()); - - // Verify the proof for the channel state against the expected channel end. - // A counterparty channel id of None in not possible, and is checked by validate_basic in msg. - client_def - .verify_channel_state( - ctx, - &client_id, - &client_state, - height, - connection_end.counterparty().prefix(), - &proof, - consensus_state.root(), - channel_end.counterparty().port_id(), - channel_end.counterparty().channel_id().unwrap(), - expected_chan, - ) - .map_err(Error::verify_channel_failed) +pub fn verify_channel_proofs( + ctx: &Ctx, + height: Height, + channel_end: &ChannelEnd, + connection_end: &ConnectionEnd, + expected_chan: &ChannelEnd, + proof: &CommitmentProofBytes, +) -> Result<(), Error> +where + Ctx: ReaderContext, +{ + // This is the client which will perform proof verification. + let client_id = connection_end.client_id().clone(); + + let client_state = ctx.client_state(&client_id).map_err(Error::ics02_client)?; + + // The client must not be frozen. + if client_state.is_frozen() { + return Err(Error::frozen_client(client_id)) + } + + let consensus_state = ctx + .consensus_state(&client_id, height) + .map_err(|_| Error::error_invalid_consensus_state())?; + + let client_def = client_state.client_def(); + + // Verify the proof for the channel state against the expected channel end. + // A counterparty channel id of None in not possible, and is checked by validate_basic in msg. + client_def + .verify_channel_state( + ctx, + &client_id, + &client_state, + height, + connection_end.counterparty().prefix(), + &proof, + consensus_state.root(), + channel_end.counterparty().port_id(), + channel_end.counterparty().channel_id().unwrap(), + expected_chan, + ) + .map_err(Error::verify_channel_failed) } /// Entry point for verifying all proofs bundled in a ICS4 packet recv. message. -pub fn verify_packet_recv_proofs( - ctx: &dyn ReaderContext, - height: Height, - packet: &Packet, - connection_end: &ConnectionEnd, - proofs: &Proofs, +pub fn verify_packet_recv_proofs( + ctx: &Ctx, + height: Height, + packet: &Packet, + connection_end: &ConnectionEnd, + proofs: &Proofs, ) -> Result<(), Error> { - let client_id = connection_end.client_id(); - let client_state = ctx.client_state(client_id).map_err(Error::ics02_client)?; - - // The client must not be frozen. - if client_state.is_frozen() { - return Err(Error::frozen_client(client_id.clone())); - } - - let consensus_state = ctx - .consensus_state(client_id, proofs.height()) - .map_err(|_| Error::error_invalid_consensus_state())?; - - let client_def = AnyClient::::from_client_type(client_state.client_type()); - - let commitment = ctx.packet_commitment( - packet.data.clone(), - packet.timeout_height, - packet.timeout_timestamp, - ); - - // Verify the proof for the packet against the chain store. - client_def - .verify_packet_data( - ctx, - client_id, - &client_state, - height, - connection_end, - proofs.object_proof(), - consensus_state.root(), - &packet.source_port, - &packet.source_channel, - packet.sequence, - commitment, - ) - .map_err(|e| Error::packet_verification_failed(packet.sequence, e))?; - - Ok(()) + let client_id = connection_end.client_id(); + let client_state = ctx.client_state(client_id).map_err(Error::ics02_client)?; + + // The client must not be frozen. + if client_state.is_frozen() { + return Err(Error::frozen_client(client_id.clone())) + } + + let consensus_state = ctx + .consensus_state(client_id, proofs.height()) + .map_err(|_| Error::error_invalid_consensus_state())?; + + let client_def = client_state.client_def(); + + let commitment = + ctx.packet_commitment(packet.data.clone(), packet.timeout_height, packet.timeout_timestamp); + + // Verify the proof for the packet against the chain store. + client_def + .verify_packet_data( + ctx, + client_id, + &client_state, + height, + connection_end, + proofs.object_proof(), + consensus_state.root(), + &packet.source_port, + &packet.source_channel, + packet.sequence, + commitment, + ) + .map_err(|e| Error::packet_verification_failed(packet.sequence, e))?; + + Ok(()) } /// Entry point for verifying all proofs bundled in an ICS4 packet ack message. -pub fn verify_packet_acknowledgement_proofs( - ctx: &dyn ReaderContext, - height: Height, - packet: &Packet, - acknowledgement: Acknowledgement, - connection_end: &ConnectionEnd, - proofs: &Proofs, +pub fn verify_packet_acknowledgement_proofs( + ctx: &Ctx, + height: Height, + packet: &Packet, + acknowledgement: Acknowledgement, + connection_end: &ConnectionEnd, + proofs: &Proofs, ) -> Result<(), Error> { - let client_id = connection_end.client_id(); - let client_state = ctx.client_state(client_id).map_err(Error::ics02_client)?; - - // The client must not be frozen. - if client_state.is_frozen() { - return Err(Error::frozen_client(client_id.clone())); - } - - let consensus_state = ctx - .consensus_state(client_id, proofs.height()) - .map_err(|_| Error::error_invalid_consensus_state())?; - - let ack_commitment = ctx.ack_commitment(acknowledgement); - - let client_def = AnyClient::::from_client_type(client_state.client_type()); - - // Verify the proof for the packet against the chain store. - client_def - .verify_packet_acknowledgement( - ctx, - client_id, - &client_state, - height, - connection_end, - proofs.object_proof(), - consensus_state.root(), - &packet.destination_port, - &packet.destination_channel, - packet.sequence, - ack_commitment, - ) - .map_err(|e| Error::packet_verification_failed(packet.sequence, e))?; - - Ok(()) + let client_id = connection_end.client_id(); + let client_state = ctx.client_state(client_id).map_err(Error::ics02_client)?; + + // The client must not be frozen. + if client_state.is_frozen() { + return Err(Error::frozen_client(client_id.clone())) + } + + let consensus_state = ctx + .consensus_state(client_id, proofs.height()) + .map_err(|_| Error::error_invalid_consensus_state())?; + + let ack_commitment = ctx.ack_commitment(acknowledgement); + + let client_def = client_state.client_def(); + + // Verify the proof for the packet against the chain store. + client_def + .verify_packet_acknowledgement( + ctx, + client_id, + &client_state, + height, + connection_end, + proofs.object_proof(), + consensus_state.root(), + &packet.destination_port, + &packet.destination_channel, + packet.sequence, + ack_commitment, + ) + .map_err(|e| Error::packet_verification_failed(packet.sequence, e))?; + + Ok(()) } /// Entry point for verifying all timeout proofs. -pub fn verify_next_sequence_recv( - ctx: &dyn ReaderContext, - height: Height, - connection_end: &ConnectionEnd, - packet: Packet, - seq: Sequence, - proofs: &Proofs, -) -> Result<(), Error> { - let client_id = connection_end.client_id(); - let client_state = ctx.client_state(client_id).map_err(Error::ics02_client)?; - - // The client must not be frozen. - if client_state.is_frozen() { - return Err(Error::frozen_client(client_id.clone())); - } - - let consensus_state = ctx - .consensus_state(client_id, proofs.height()) - .map_err(|_| Error::error_invalid_consensus_state())?; - - let client_def = AnyClient::::from_client_type(client_state.client_type()); - - // Verify the proof for the packet against the chain store. - client_def - .verify_next_sequence_recv( - ctx, - client_id, - &client_state, - height, - connection_end, - proofs.object_proof(), - consensus_state.root(), - &packet.destination_port, - &packet.destination_channel, - packet.sequence, - ) - .map_err(|e| Error::packet_verification_failed(seq, e))?; - - Ok(()) +pub fn verify_next_sequence_recv( + ctx: &Ctx, + height: Height, + connection_end: &ConnectionEnd, + packet: Packet, + seq: Sequence, + proofs: &Proofs, +) -> Result<(), Error> +where + Ctx: ReaderContext, +{ + let client_id = connection_end.client_id(); + let client_state = ctx.client_state(client_id).map_err(Error::ics02_client)?; + + // The client must not be frozen. + if client_state.is_frozen() { + return Err(Error::frozen_client(client_id.clone())) + } + + let consensus_state = ctx + .consensus_state(client_id, proofs.height()) + .map_err(|_| Error::error_invalid_consensus_state())?; + + let client_def = client_state.client_def(); + + // Verify the proof for the packet against the chain store. + client_def + .verify_next_sequence_recv( + ctx, + client_id, + &client_state, + height, + connection_end, + proofs.object_proof(), + consensus_state.root(), + &packet.destination_port, + &packet.destination_channel, + packet.sequence, + ) + .map_err(|e| Error::packet_verification_failed(seq, e))?; + + Ok(()) } -pub fn verify_packet_receipt_absence( - ctx: &dyn ReaderContext, - height: Height, - connection_end: &ConnectionEnd, - packet: Packet, - proofs: &Proofs, -) -> Result<(), Error> { - let client_id = connection_end.client_id(); - let client_state = ctx.client_state(client_id).map_err(Error::ics02_client)?; - - // The client must not be frozen. - if client_state.is_frozen() { - return Err(Error::frozen_client(client_id.clone())); - } - - let consensus_state = ctx - .consensus_state(client_id, proofs.height()) - .map_err(|_| Error::error_invalid_consensus_state())?; - - let client_def = AnyClient::::from_client_type(client_state.client_type()); - - // Verify the proof for the packet against the chain store. - client_def - .verify_packet_receipt_absence( - ctx, - client_id, - &client_state, - height, - connection_end, - proofs.object_proof(), - consensus_state.root(), - &packet.destination_port, - &packet.destination_channel, - packet.sequence, - ) - .map_err(|e| Error::packet_verification_failed(packet.sequence, e))?; - - Ok(()) +pub fn verify_packet_receipt_absence( + ctx: &Ctx, + height: Height, + connection_end: &ConnectionEnd, + packet: Packet, + proofs: &Proofs, +) -> Result<(), Error> +where + Ctx: ReaderContext, +{ + let client_id = connection_end.client_id(); + let client_state = ctx.client_state(client_id).map_err(Error::ics02_client)?; + + // The client must not be frozen. + if client_state.is_frozen() { + return Err(Error::frozen_client(client_id.clone())) + } + + let consensus_state = ctx + .consensus_state(client_id, proofs.height()) + .map_err(|_| Error::error_invalid_consensus_state())?; + + let client_def = client_state.client_def(); + + // Verify the proof for the packet against the chain store. + client_def + .verify_packet_receipt_absence( + ctx, + client_id, + &client_state, + height, + connection_end, + proofs.object_proof(), + consensus_state.root(), + &packet.destination_port, + &packet.destination_channel, + packet.sequence, + ) + .map_err(|e| Error::packet_verification_failed(packet.sequence, e))?; + + Ok(()) } diff --git a/modules/src/core/ics04_channel/handler/write_acknowledgement.rs b/modules/src/core/ics04_channel/handler/write_acknowledgement.rs index 299bd68f9d..1d5d838c11 100644 --- a/modules/src/core/ics04_channel/handler/write_acknowledgement.rs +++ b/modules/src/core/ics04_channel/handler/write_acknowledgement.rs @@ -1,182 +1,190 @@ -use crate::core::ics04_channel::channel::State; -use crate::core::ics04_channel::commitment::AcknowledgementCommitment; -use crate::core::ics04_channel::error::Error; -use crate::core::ics04_channel::events::WriteAcknowledgement; -use crate::core::ics04_channel::packet::{Packet, PacketResult, Sequence}; -use crate::core::ics24_host::identifier::{ChannelId, PortId}; -use crate::core::ics26_routing::context::ReaderContext; -use crate::prelude::*; use crate::{ - events::IbcEvent, - handler::{HandlerOutput, HandlerResult}, + core::{ + ics04_channel::{ + channel::State, + commitment::AcknowledgementCommitment, + error::Error, + events::WriteAcknowledgement, + packet::{Packet, PacketResult, Sequence}, + }, + ics24_host::identifier::{ChannelId, PortId}, + ics26_routing::context::ReaderContext, + }, + events::IbcEvent, + handler::{HandlerOutput, HandlerResult}, + prelude::*, }; #[derive(Clone, Debug)] pub struct WriteAckPacketResult { - pub port_id: PortId, - pub channel_id: ChannelId, - pub seq: Sequence, - pub ack_commitment: AcknowledgementCommitment, + pub port_id: PortId, + pub channel_id: ChannelId, + pub seq: Sequence, + pub ack_commitment: AcknowledgementCommitment, } -pub fn process( - ctx: &dyn ReaderContext, - packet: Packet, - ack: Vec, +pub fn process( + ctx: &Ctx, + packet: Packet, + ack: Vec, ) -> HandlerResult { - let mut output = HandlerOutput::builder(); - - let dest_channel_end = - ctx.channel_end(&(packet.destination_port.clone(), packet.destination_channel))?; - - if !dest_channel_end.state_matches(&State::Open) { - return Err(Error::invalid_channel_state( - packet.source_channel, - dest_channel_end.state, - )); - } - - // NOTE: IBC app modules might have written the acknowledgement synchronously on - // the OnRecvPacket callback so we need to check if the acknowledgement is already - // set on the store and return an error if so. - match ctx.get_packet_acknowledgement(&( - packet.destination_port.clone(), - packet.destination_channel, - packet.sequence, - )) { - Ok(_) => return Err(Error::acknowledgement_exists(packet.sequence)), - Err(e) - if e.detail() == Error::packet_acknowledgement_not_found(packet.sequence).detail() => {} - Err(e) => return Err(e), - } - - if ack.is_empty() { - return Err(Error::invalid_acknowledgement()); - } - - let result = PacketResult::WriteAck(WriteAckPacketResult { - port_id: packet.source_port.clone(), - channel_id: packet.source_channel, - seq: packet.sequence, - ack_commitment: ctx.ack_commitment(ack.clone().into()), - }); - - output.log("success: packet write acknowledgement"); - - output.emit(IbcEvent::WriteAcknowledgement(WriteAcknowledgement { - height: ctx.host_height(), - packet, - ack, - })); - - Ok(output.with_result(result)) + let mut output = HandlerOutput::builder(); + + let dest_channel_end = + ctx.channel_end(&(packet.destination_port.clone(), packet.destination_channel))?; + + if !dest_channel_end.state_matches(&State::Open) { + return Err(Error::invalid_channel_state(packet.source_channel, dest_channel_end.state)) + } + + // NOTE: IBC app modules might have written the acknowledgement synchronously on + // the OnRecvPacket callback so we need to check if the acknowledgement is already + // set on the store and return an error if so. + match ctx.get_packet_acknowledgement(&( + packet.destination_port.clone(), + packet.destination_channel, + packet.sequence, + )) { + Ok(_) => return Err(Error::acknowledgement_exists(packet.sequence)), + Err(e) + if e.detail() == Error::packet_acknowledgement_not_found(packet.sequence).detail() => {}, + Err(e) => return Err(e), + } + + if ack.is_empty() { + return Err(Error::invalid_acknowledgement()) + } + + let result = PacketResult::WriteAck(WriteAckPacketResult { + port_id: packet.source_port.clone(), + channel_id: packet.source_channel, + seq: packet.sequence, + ack_commitment: ctx.ack_commitment(ack.clone().into()), + }); + + output.log("success: packet write acknowledgement"); + + output.emit(IbcEvent::WriteAcknowledgement(WriteAcknowledgement { + height: ctx.host_height(), + packet, + ack, + })); + + Ok(output.with_result(result)) } #[cfg(test)] mod tests { - use crate::prelude::*; - - use test_log::test; - - use crate::core::ics02_client::context::ClientReader; - use crate::core::ics02_client::height::Height; - use crate::core::ics03_connection::connection::ConnectionEnd; - use crate::core::ics03_connection::connection::Counterparty as ConnectionCounterparty; - use crate::core::ics03_connection::connection::State as ConnectionState; - use crate::core::ics03_connection::version::get_compatible_versions; - use crate::core::ics04_channel::channel::{ChannelEnd, Counterparty, Order, State}; - use crate::core::ics04_channel::handler::write_acknowledgement::process; - use crate::core::ics04_channel::packet::test_utils::get_dummy_raw_packet; - use crate::core::ics04_channel::Version; - use crate::core::ics24_host::identifier::{ChannelId, ClientId, ConnectionId, PortId}; - use crate::mock::context::MockContext; - use crate::timestamp::ZERO_DURATION; - use crate::{core::ics04_channel::packet::Packet, events::IbcEvent}; - - #[test] - fn write_ack_packet_processing() { - struct Test { - name: String, - ctx: MockContext, - packet: Packet, - ack: Vec, - want_pass: bool, - } - - let context = MockContext::default(); - - let client_height = Height::new(0, 1); - - let mut packet: Packet = get_dummy_raw_packet(1, 6).try_into().unwrap(); - packet.sequence = 1.into(); - packet.data = vec![0]; - - let ack = vec![0]; - let ack_null = Vec::new(); - - let dest_channel_end = ChannelEnd::new( - State::Open, - Order::default(), - Counterparty::new(packet.source_port.clone(), Some(packet.source_channel)), - vec![ConnectionId::default()], - Version::ics20(), - ); - - let connection_end = ConnectionEnd::new( - ConnectionState::Open, - ClientId::default(), - ConnectionCounterparty::new( - ClientId::default(), - Some(ConnectionId::default()), - Default::default(), - ), - get_compatible_versions(), - ZERO_DURATION, - ); - - let tests: Vec = vec![ - Test { - name: "Processing fails because no channel exists in the context".to_string(), - ctx: context.clone(), - packet: packet.clone(), - ack: ack.clone(), - want_pass: false, - }, - Test { - name: "Good parameters".to_string(), - ctx: context - .clone() - .with_client(&ClientId::default(), client_height) - .with_connection(ConnectionId::default(), connection_end.clone()) - .with_channel( - packet.destination_port.clone(), - packet.destination_channel, - dest_channel_end.clone(), - ), - packet: packet.clone(), - ack, - want_pass: true, - }, - Test { - name: "Zero ack".to_string(), - ctx: context - .with_client(&ClientId::default(), Height::default()) - .with_connection(ConnectionId::default(), connection_end) - .with_channel(PortId::default(), ChannelId::default(), dest_channel_end), - packet, - ack: ack_null, - want_pass: false, - }, - ] - .into_iter() - .collect(); - - for test in tests { - let res = process(&test.ctx, test.packet.clone(), test.ack); - // Additionally check the events and the output objects in the result. - match res { - Ok(proto_output) => { - assert!( + use crate::prelude::*; + + use test_log::test; + + use crate::{ + core::{ + ics02_client::{context::ClientReader, height::Height}, + ics03_connection::{ + connection::{ + ConnectionEnd, Counterparty as ConnectionCounterparty, State as ConnectionState, + }, + version::get_compatible_versions, + }, + ics04_channel::{ + channel::{ChannelEnd, Counterparty, Order, State}, + handler::write_acknowledgement::process, + packet::{test_utils::get_dummy_raw_packet, Packet}, + Version, + }, + ics24_host::identifier::{ChannelId, ClientId, ConnectionId, PortId}, + }, + events::IbcEvent, + mock::context::{MockClientTypes, MockContext}, + timestamp::ZERO_DURATION, + }; + + #[test] + fn write_ack_packet_processing() { + struct Test { + name: String, + ctx: MockContext, + packet: Packet, + ack: Vec, + want_pass: bool, + } + + let context = MockContext::default(); + + let client_height = Height::new(0, 1); + + let mut packet: Packet = get_dummy_raw_packet(1, 6).try_into().unwrap(); + packet.sequence = 1.into(); + packet.data = vec![0]; + + let ack = vec![0]; + let ack_null = Vec::new(); + + let dest_channel_end = ChannelEnd::new( + State::Open, + Order::default(), + Counterparty::new(packet.source_port.clone(), Some(packet.source_channel)), + vec![ConnectionId::default()], + Version::ics20(), + ); + + let connection_end = ConnectionEnd::new( + ConnectionState::Open, + ClientId::default(), + ConnectionCounterparty::new( + ClientId::default(), + Some(ConnectionId::default()), + Default::default(), + ), + get_compatible_versions(), + ZERO_DURATION, + ); + + let tests: Vec = vec![ + Test { + name: "Processing fails because no channel exists in the context".to_string(), + ctx: context.clone(), + packet: packet.clone(), + ack: ack.clone(), + want_pass: false, + }, + Test { + name: "Good parameters".to_string(), + ctx: context + .clone() + .with_client(&ClientId::default(), client_height) + .with_connection(ConnectionId::default(), connection_end.clone()) + .with_channel( + packet.destination_port.clone(), + packet.destination_channel, + dest_channel_end.clone(), + ), + packet: packet.clone(), + ack, + want_pass: true, + }, + Test { + name: "Zero ack".to_string(), + ctx: context + .with_client(&ClientId::default(), Height::default()) + .with_connection(ConnectionId::default(), connection_end) + .with_channel(PortId::default(), ChannelId::default(), dest_channel_end), + packet, + ack: ack_null, + want_pass: false, + }, + ] + .into_iter() + .collect(); + + for test in tests { + let res = process(&test.ctx, test.packet.clone(), test.ack); + // Additionally check the events and the output objects in the result. + match res { + Ok(proto_output) => { + assert!( test.want_pass, "write_ack: test passed but was supposed to fail for test: {}, \nparams {:?} {:?}", test.name, @@ -184,24 +192,24 @@ mod tests { test.ctx.clone() ); - assert!(!proto_output.events.is_empty()); // Some events must exist. - - for e in proto_output.events.iter() { - assert!(matches!(e, &IbcEvent::WriteAcknowledgement(_))); - assert_eq!(e.height(), test.ctx.host_height()); - } - } - Err(e) => { - assert!( - !test.want_pass, - "write_ack: did not pass test: {}, \nparams {:?} {:?} error: {:?}", - test.name, - test.packet.clone(), - test.ctx.clone(), - e, - ); - } - } - } - } + assert!(!proto_output.events.is_empty()); // Some events must exist. + + for e in proto_output.events.iter() { + assert!(matches!(e, &IbcEvent::WriteAcknowledgement(_))); + assert_eq!(e.height(), test.ctx.host_height()); + } + }, + Err(e) => { + assert!( + !test.want_pass, + "write_ack: did not pass test: {}, \nparams {:?} {:?} error: {:?}", + test.name, + test.packet.clone(), + test.ctx.clone(), + e, + ); + }, + } + } + } } diff --git a/modules/src/core/ics04_channel/msgs.rs b/modules/src/core/ics04_channel/msgs.rs index 0437b9cd16..e9b57ac0b5 100644 --- a/modules/src/core/ics04_channel/msgs.rs +++ b/modules/src/core/ics04_channel/msgs.rs @@ -1,18 +1,19 @@ //! Message definitions for all ICS4 domain types: channel open & close handshake datagrams, as well //! as packets. -use crate::core::ics04_channel::error::Error; -use crate::core::ics04_channel::msgs::acknowledgement::MsgAcknowledgement; -use crate::core::ics04_channel::msgs::chan_close_confirm::MsgChannelCloseConfirm; -use crate::core::ics04_channel::msgs::chan_close_init::MsgChannelCloseInit; -use crate::core::ics04_channel::msgs::chan_open_ack::MsgChannelOpenAck; -use crate::core::ics04_channel::msgs::chan_open_confirm::MsgChannelOpenConfirm; -use crate::core::ics04_channel::msgs::chan_open_init::MsgChannelOpenInit; -use crate::core::ics04_channel::msgs::chan_open_try::MsgChannelOpenTry; -use crate::core::ics04_channel::msgs::recv_packet::MsgRecvPacket; -use crate::core::ics04_channel::msgs::timeout::MsgTimeout; -use crate::core::ics04_channel::msgs::timeout_on_close::MsgTimeoutOnClose; -use crate::core::ics26_routing::context::{Ics26Context, ModuleId}; +use crate::core::{ + ics04_channel::{ + error::Error, + msgs::{ + acknowledgement::MsgAcknowledgement, chan_close_confirm::MsgChannelCloseConfirm, + chan_close_init::MsgChannelCloseInit, chan_open_ack::MsgChannelOpenAck, + chan_open_confirm::MsgChannelOpenConfirm, chan_open_init::MsgChannelOpenInit, + chan_open_try::MsgChannelOpenTry, recv_packet::MsgRecvPacket, timeout::MsgTimeout, + timeout_on_close::MsgTimeoutOnClose, + }, + }, + ics26_routing::context::{Ics26Context, ModuleId}, +}; // Opening handshake messages. pub mod chan_open_ack; @@ -33,44 +34,38 @@ pub mod timeout_on_close; /// Enumeration of all possible messages that the ICS4 protocol processes. #[derive(Clone, Debug, PartialEq)] pub enum ChannelMsg { - ChannelOpenInit(MsgChannelOpenInit), - ChannelOpenTry(MsgChannelOpenTry), - ChannelOpenAck(MsgChannelOpenAck), - ChannelOpenConfirm(MsgChannelOpenConfirm), - ChannelCloseInit(MsgChannelCloseInit), - ChannelCloseConfirm(MsgChannelCloseConfirm), + ChannelOpenInit(MsgChannelOpenInit), + ChannelOpenTry(MsgChannelOpenTry), + ChannelOpenAck(MsgChannelOpenAck), + ChannelOpenConfirm(MsgChannelOpenConfirm), + ChannelCloseInit(MsgChannelCloseInit), + ChannelCloseConfirm(MsgChannelCloseConfirm), } impl ChannelMsg { - pub(super) fn lookup_module(&self, ctx: &impl Ics26Context) -> Result { - let module_id = match self { - ChannelMsg::ChannelOpenInit(msg) => ctx - .lookup_module_by_port(&msg.port_id) - .map_err(Error::ics05_port)?, - ChannelMsg::ChannelOpenTry(msg) => ctx - .lookup_module_by_port(&msg.port_id) - .map_err(Error::ics05_port)?, - ChannelMsg::ChannelOpenAck(msg) => ctx - .lookup_module_by_port(&msg.port_id) - .map_err(Error::ics05_port)?, - ChannelMsg::ChannelOpenConfirm(msg) => ctx - .lookup_module_by_port(&msg.port_id) - .map_err(Error::ics05_port)?, - ChannelMsg::ChannelCloseInit(msg) => ctx - .lookup_module_by_port(&msg.port_id) - .map_err(Error::ics05_port)?, - ChannelMsg::ChannelCloseConfirm(msg) => ctx - .lookup_module_by_port(&msg.port_id) - .map_err(Error::ics05_port)?, - }; - Ok(module_id) - } + pub(super) fn lookup_module(&self, ctx: &impl Ics26Context) -> Result { + let module_id = match self { + ChannelMsg::ChannelOpenInit(msg) => + ctx.lookup_module_by_port(&msg.port_id).map_err(Error::ics05_port)?, + ChannelMsg::ChannelOpenTry(msg) => + ctx.lookup_module_by_port(&msg.port_id).map_err(Error::ics05_port)?, + ChannelMsg::ChannelOpenAck(msg) => + ctx.lookup_module_by_port(&msg.port_id).map_err(Error::ics05_port)?, + ChannelMsg::ChannelOpenConfirm(msg) => + ctx.lookup_module_by_port(&msg.port_id).map_err(Error::ics05_port)?, + ChannelMsg::ChannelCloseInit(msg) => + ctx.lookup_module_by_port(&msg.port_id).map_err(Error::ics05_port)?, + ChannelMsg::ChannelCloseConfirm(msg) => + ctx.lookup_module_by_port(&msg.port_id).map_err(Error::ics05_port)?, + }; + Ok(module_id) + } } #[derive(Clone, Debug, PartialEq)] pub enum PacketMsg { - RecvPacket(MsgRecvPacket), - AckPacket(MsgAcknowledgement), - ToPacket(MsgTimeout), - ToClosePacket(MsgTimeoutOnClose), + RecvPacket(MsgRecvPacket), + AckPacket(MsgAcknowledgement), + ToPacket(MsgTimeout), + ToClosePacket(MsgTimeoutOnClose), } diff --git a/modules/src/core/ics04_channel/msgs/acknowledgement.rs b/modules/src/core/ics04_channel/msgs/acknowledgement.rs index 95bf93279f..4d3ece1fc5 100644 --- a/modules/src/core/ics04_channel/msgs/acknowledgement.rs +++ b/modules/src/core/ics04_channel/msgs/acknowledgement.rs @@ -4,11 +4,12 @@ use tendermint_proto::Protobuf; use ibc_proto::ibc::core::channel::v1::MsgAcknowledgement as RawMsgAcknowledgement; -use crate::core::ics04_channel::error::Error; -use crate::core::ics04_channel::packet::Packet; -use crate::proofs::Proofs; -use crate::signer::Signer; -use crate::tx_msg::Msg; +use crate::{ + core::ics04_channel::{error::Error, packet::Packet}, + proofs::Proofs, + signer::Signer, + tx_msg::Msg, +}; pub const TYPE_URL: &str = "/ibc.core.channel.v1.MsgAcknowledgement"; @@ -17,212 +18,193 @@ pub const TYPE_URL: &str = "/ibc.core.channel.v1.MsgAcknowledgement"; pub struct Acknowledgement(Vec); impl Acknowledgement { - pub fn into_bytes(self) -> Vec { - self.0 - } + pub fn into_bytes(self) -> Vec { + self.0 + } - pub fn from_bytes(bytes: Vec) -> Self { - bytes.into() - } + pub fn from_bytes(bytes: Vec) -> Self { + bytes.into() + } } impl From> for Acknowledgement { - fn from(bytes: Vec) -> Self { - Self(bytes) - } + fn from(bytes: Vec) -> Self { + Self(bytes) + } } impl AsRef<[u8]> for Acknowledgement { - fn as_ref(&self) -> &[u8] { - self.0.as_slice() - } + fn as_ref(&self) -> &[u8] { + self.0.as_slice() + } } /// /// Message definition for packet acknowledgements. -/// #[derive(Clone, Debug, PartialEq)] pub struct MsgAcknowledgement { - pub packet: Packet, - pub acknowledgement: Acknowledgement, - pub proofs: Proofs, - pub signer: Signer, + pub packet: Packet, + pub acknowledgement: Acknowledgement, + pub proofs: Proofs, + pub signer: Signer, } impl MsgAcknowledgement { - pub fn new( - packet: Packet, - acknowledgement: Acknowledgement, - proofs: Proofs, - signer: Signer, - ) -> MsgAcknowledgement { - Self { - packet, - acknowledgement, - proofs, - signer, - } - } - - pub fn acknowledgement(&self) -> &Acknowledgement { - &self.acknowledgement - } - - pub fn proofs(&self) -> &Proofs { - &self.proofs - } + pub fn new( + packet: Packet, + acknowledgement: Acknowledgement, + proofs: Proofs, + signer: Signer, + ) -> MsgAcknowledgement { + Self { packet, acknowledgement, proofs, signer } + } + + pub fn acknowledgement(&self) -> &Acknowledgement { + &self.acknowledgement + } + + pub fn proofs(&self) -> &Proofs { + &self.proofs + } } impl Msg for MsgAcknowledgement { - type ValidationError = Error; - type Raw = RawMsgAcknowledgement; + type ValidationError = Error; + type Raw = RawMsgAcknowledgement; - fn route(&self) -> String { - crate::keys::ROUTER_KEY.to_string() - } + fn route(&self) -> String { + crate::keys::ROUTER_KEY.to_string() + } - fn type_url(&self) -> String { - TYPE_URL.to_string() - } + fn type_url(&self) -> String { + TYPE_URL.to_string() + } } impl Protobuf for MsgAcknowledgement {} impl TryFrom for MsgAcknowledgement { - type Error = Error; - - fn try_from(raw_msg: RawMsgAcknowledgement) -> Result { - let proofs = Proofs::new( - raw_msg - .proof_acked - .try_into() - .map_err(Error::invalid_proof)?, - None, - None, - None, - raw_msg - .proof_height - .ok_or_else(Error::missing_height)? - .into(), - ) - .map_err(Error::invalid_proof)?; - - Ok(MsgAcknowledgement { - packet: raw_msg - .packet - .ok_or_else(Error::missing_packet)? - .try_into()?, - acknowledgement: raw_msg.acknowledgement.into(), - signer: raw_msg.signer.parse().map_err(Error::signer)?, - proofs, - }) - } + type Error = Error; + + fn try_from(raw_msg: RawMsgAcknowledgement) -> Result { + let proofs = Proofs::new( + raw_msg.proof_acked.try_into().map_err(Error::invalid_proof)?, + None, + None, + None, + raw_msg.proof_height.ok_or_else(Error::missing_height)?.into(), + ) + .map_err(Error::invalid_proof)?; + + Ok(MsgAcknowledgement { + packet: raw_msg.packet.ok_or_else(Error::missing_packet)?.try_into()?, + acknowledgement: raw_msg.acknowledgement.into(), + signer: raw_msg.signer.parse().map_err(Error::signer)?, + proofs, + }) + } } impl From for RawMsgAcknowledgement { - fn from(domain_msg: MsgAcknowledgement) -> Self { - RawMsgAcknowledgement { - packet: Some(domain_msg.packet.into()), - acknowledgement: domain_msg.acknowledgement.into_bytes(), - signer: domain_msg.signer.to_string(), - proof_height: Some(domain_msg.proofs.height().into()), - proof_acked: domain_msg.proofs.object_proof().clone().into(), - } - } + fn from(domain_msg: MsgAcknowledgement) -> Self { + RawMsgAcknowledgement { + packet: Some(domain_msg.packet.into()), + acknowledgement: domain_msg.acknowledgement.into_bytes(), + signer: domain_msg.signer.to_string(), + proof_height: Some(domain_msg.proofs.height().into()), + proof_acked: domain_msg.proofs.object_proof().clone().into(), + } + } } #[cfg(test)] pub mod test_util { - use ibc_proto::ibc::core::channel::v1::MsgAcknowledgement as RawMsgAcknowledgement; - use ibc_proto::ibc::core::client::v1::Height as RawHeight; - - use crate::core::ics04_channel::packet::test_utils::get_dummy_raw_packet; - use crate::test_utils::{get_dummy_bech32_account, get_dummy_proof}; - - /// Returns a dummy `RawMsgAcknowledgement`, for testing only! - /// The `height` parametrizes both the proof height as well as the timeout height. - pub fn get_dummy_raw_msg_acknowledgement(height: u64) -> RawMsgAcknowledgement { - RawMsgAcknowledgement { - packet: Some(get_dummy_raw_packet(height, 1)), - acknowledgement: get_dummy_proof(), - proof_acked: get_dummy_proof(), - proof_height: Some(RawHeight { - revision_number: 0, - revision_height: height, - }), - signer: get_dummy_bech32_account(), - } - } + use ibc_proto::ibc::core::{ + channel::v1::MsgAcknowledgement as RawMsgAcknowledgement, client::v1::Height as RawHeight, + }; + + use crate::{ + core::ics04_channel::packet::test_utils::get_dummy_raw_packet, + test_utils::{get_dummy_bech32_account, get_dummy_proof}, + }; + + /// Returns a dummy `RawMsgAcknowledgement`, for testing only! + /// The `height` parametrizes both the proof height as well as the timeout height. + pub fn get_dummy_raw_msg_acknowledgement(height: u64) -> RawMsgAcknowledgement { + RawMsgAcknowledgement { + packet: Some(get_dummy_raw_packet(height, 1)), + acknowledgement: get_dummy_proof(), + proof_acked: get_dummy_proof(), + proof_height: Some(RawHeight { revision_number: 0, revision_height: height }), + signer: get_dummy_bech32_account(), + } + } } #[cfg(test)] mod test { - use crate::prelude::*; - - use test_log::test; - - use ibc_proto::ibc::core::channel::v1::MsgAcknowledgement as RawMsgAcknowledgement; - - use crate::core::ics04_channel::error::Error; - use crate::core::ics04_channel::msgs::acknowledgement::test_util::get_dummy_raw_msg_acknowledgement; - use crate::core::ics04_channel::msgs::acknowledgement::MsgAcknowledgement; - use crate::test_utils::get_dummy_bech32_account; - - #[test] - fn msg_acknowledgment_try_from_raw() { - struct Test { - name: String, - raw: RawMsgAcknowledgement, - want_pass: bool, - } - - let height = 50; - let default_raw_msg = get_dummy_raw_msg_acknowledgement(height); - - let tests: Vec = vec![ - Test { - name: "Good parameters".to_string(), - raw: default_raw_msg.clone(), - want_pass: true, - }, - Test { - name: "Missing packet".to_string(), - raw: RawMsgAcknowledgement { - packet: None, - ..default_raw_msg.clone() - }, - want_pass: false, - }, - Test { - name: "Missing proof height".to_string(), - raw: RawMsgAcknowledgement { - proof_height: None, - ..default_raw_msg.clone() - }, - want_pass: false, - }, - Test { - name: "Empty signer".to_string(), - raw: RawMsgAcknowledgement { - signer: get_dummy_bech32_account(), - ..default_raw_msg.clone() - }, - want_pass: true, - }, - Test { - name: "Empty proof acked".to_string(), - raw: RawMsgAcknowledgement { - proof_acked: Vec::new(), - ..default_raw_msg - }, - want_pass: false, - }, - ]; - - for test in tests { - let res_msg: Result = test.raw.clone().try_into(); - - assert_eq!( + use crate::prelude::*; + + use test_log::test; + + use ibc_proto::ibc::core::channel::v1::MsgAcknowledgement as RawMsgAcknowledgement; + + use crate::{ + core::ics04_channel::{ + error::Error, + msgs::acknowledgement::{ + test_util::get_dummy_raw_msg_acknowledgement, MsgAcknowledgement, + }, + }, + test_utils::get_dummy_bech32_account, + }; + + #[test] + fn msg_acknowledgment_try_from_raw() { + struct Test { + name: String, + raw: RawMsgAcknowledgement, + want_pass: bool, + } + + let height = 50; + let default_raw_msg = get_dummy_raw_msg_acknowledgement(height); + + let tests: Vec = vec![ + Test { + name: "Good parameters".to_string(), + raw: default_raw_msg.clone(), + want_pass: true, + }, + Test { + name: "Missing packet".to_string(), + raw: RawMsgAcknowledgement { packet: None, ..default_raw_msg.clone() }, + want_pass: false, + }, + Test { + name: "Missing proof height".to_string(), + raw: RawMsgAcknowledgement { proof_height: None, ..default_raw_msg.clone() }, + want_pass: false, + }, + Test { + name: "Empty signer".to_string(), + raw: RawMsgAcknowledgement { + signer: get_dummy_bech32_account(), + ..default_raw_msg.clone() + }, + want_pass: true, + }, + Test { + name: "Empty proof acked".to_string(), + raw: RawMsgAcknowledgement { proof_acked: Vec::new(), ..default_raw_msg }, + want_pass: false, + }, + ]; + + for test in tests { + let res_msg: Result = test.raw.clone().try_into(); + + assert_eq!( res_msg.is_ok(), test.want_pass, "MsgAcknowledgement::try_from failed for test {} \nraw message: {:?} with error: {:?}", @@ -230,6 +212,6 @@ mod test { test.raw, res_msg.err() ); - } - } + } + } } diff --git a/modules/src/core/ics04_channel/msgs/chan_close_confirm.rs b/modules/src/core/ics04_channel/msgs/chan_close_confirm.rs index 2dc6bd27dc..70eab6b1d3 100644 --- a/modules/src/core/ics04_channel/msgs/chan_close_confirm.rs +++ b/modules/src/core/ics04_channel/msgs/chan_close_confirm.rs @@ -4,138 +4,132 @@ use tendermint_proto::Protobuf; use ibc_proto::ibc::core::channel::v1::MsgChannelCloseConfirm as RawMsgChannelCloseConfirm; -use crate::core::ics04_channel::error::Error; -use crate::core::ics24_host::identifier::{ChannelId, PortId}; -use crate::proofs::Proofs; -use crate::signer::Signer; -use crate::tx_msg::Msg; +use crate::{ + core::{ + ics04_channel::error::Error, + ics24_host::identifier::{ChannelId, PortId}, + }, + proofs::Proofs, + signer::Signer, + tx_msg::Msg, +}; pub const TYPE_URL: &str = "/ibc.core.channel.v1.MsgChannelCloseConfirm"; /// /// Message definition for the second step in the channel close handshake (the `ChanCloseConfirm` /// datagram). -/// #[derive(Clone, Debug, PartialEq)] pub struct MsgChannelCloseConfirm { - pub port_id: PortId, - pub channel_id: ChannelId, - pub proofs: Proofs, - pub signer: Signer, + pub port_id: PortId, + pub channel_id: ChannelId, + pub proofs: Proofs, + pub signer: Signer, } impl MsgChannelCloseConfirm { - pub fn new(port_id: PortId, channel_id: ChannelId, proofs: Proofs, signer: Signer) -> Self { - Self { - port_id, - channel_id, - proofs, - signer, - } - } + pub fn new(port_id: PortId, channel_id: ChannelId, proofs: Proofs, signer: Signer) -> Self { + Self { port_id, channel_id, proofs, signer } + } } impl Msg for MsgChannelCloseConfirm { - type ValidationError = Error; - type Raw = RawMsgChannelCloseConfirm; + type ValidationError = Error; + type Raw = RawMsgChannelCloseConfirm; - fn route(&self) -> String { - crate::keys::ROUTER_KEY.to_string() - } + fn route(&self) -> String { + crate::keys::ROUTER_KEY.to_string() + } - fn type_url(&self) -> String { - TYPE_URL.to_string() - } + fn type_url(&self) -> String { + TYPE_URL.to_string() + } } impl Protobuf for MsgChannelCloseConfirm {} impl TryFrom for MsgChannelCloseConfirm { - type Error = Error; - - fn try_from(raw_msg: RawMsgChannelCloseConfirm) -> Result { - let proofs = Proofs::new( - raw_msg - .proof_init - .try_into() - .map_err(Error::invalid_proof)?, - None, - None, - None, - raw_msg - .proof_height - .ok_or_else(Error::missing_height)? - .into(), - ) - .map_err(Error::invalid_proof)?; - - Ok(MsgChannelCloseConfirm { - port_id: raw_msg.port_id.parse().map_err(Error::identifier)?, - channel_id: raw_msg.channel_id.parse().map_err(Error::identifier)?, - proofs, - signer: raw_msg.signer.parse().map_err(Error::signer)?, - }) - } + type Error = Error; + + fn try_from(raw_msg: RawMsgChannelCloseConfirm) -> Result { + let proofs = Proofs::new( + raw_msg.proof_init.try_into().map_err(Error::invalid_proof)?, + None, + None, + None, + raw_msg.proof_height.ok_or_else(Error::missing_height)?.into(), + ) + .map_err(Error::invalid_proof)?; + + Ok(MsgChannelCloseConfirm { + port_id: raw_msg.port_id.parse().map_err(Error::identifier)?, + channel_id: raw_msg.channel_id.parse().map_err(Error::identifier)?, + proofs, + signer: raw_msg.signer.parse().map_err(Error::signer)?, + }) + } } impl From for RawMsgChannelCloseConfirm { - fn from(domain_msg: MsgChannelCloseConfirm) -> Self { - RawMsgChannelCloseConfirm { - port_id: domain_msg.port_id.to_string(), - channel_id: domain_msg.channel_id.to_string(), - proof_init: domain_msg.proofs.object_proof().clone().into(), - proof_height: Some(domain_msg.proofs.height().into()), - signer: domain_msg.signer.to_string(), - } - } + fn from(domain_msg: MsgChannelCloseConfirm) -> Self { + RawMsgChannelCloseConfirm { + port_id: domain_msg.port_id.to_string(), + channel_id: domain_msg.channel_id.to_string(), + proof_init: domain_msg.proofs.object_proof().clone().into(), + proof_height: Some(domain_msg.proofs.height().into()), + signer: domain_msg.signer.to_string(), + } + } } #[cfg(test)] pub mod test_util { - use crate::prelude::*; - use ibc_proto::ibc::core::channel::v1::MsgChannelCloseConfirm as RawMsgChannelCloseConfirm; - use ibc_proto::ibc::core::client::v1::Height; - - use crate::core::ics24_host::identifier::{ChannelId, PortId}; - use crate::test_utils::{get_dummy_bech32_account, get_dummy_proof}; - - /// Returns a dummy `RawMsgChannelCloseConfirm`, for testing only! - pub fn get_dummy_raw_msg_chan_close_confirm(proof_height: u64) -> RawMsgChannelCloseConfirm { - RawMsgChannelCloseConfirm { - port_id: PortId::default().to_string(), - channel_id: ChannelId::default().to_string(), - proof_init: get_dummy_proof(), - proof_height: Some(Height { - revision_number: 0, - revision_height: proof_height, - }), - signer: get_dummy_bech32_account(), - } - } + use crate::prelude::*; + use ibc_proto::ibc::core::{ + channel::v1::MsgChannelCloseConfirm as RawMsgChannelCloseConfirm, client::v1::Height, + }; + + use crate::{ + core::ics24_host::identifier::{ChannelId, PortId}, + test_utils::{get_dummy_bech32_account, get_dummy_proof}, + }; + + /// Returns a dummy `RawMsgChannelCloseConfirm`, for testing only! + pub fn get_dummy_raw_msg_chan_close_confirm(proof_height: u64) -> RawMsgChannelCloseConfirm { + RawMsgChannelCloseConfirm { + port_id: PortId::default().to_string(), + channel_id: ChannelId::default().to_string(), + proof_init: get_dummy_proof(), + proof_height: Some(Height { revision_number: 0, revision_height: proof_height }), + signer: get_dummy_bech32_account(), + } + } } #[cfg(test)] mod tests { - use crate::prelude::*; + use crate::prelude::*; - use ibc_proto::ibc::core::channel::v1::MsgChannelCloseConfirm as RawMsgChannelCloseConfirm; - use ibc_proto::ibc::core::client::v1::Height; + use ibc_proto::ibc::core::{ + channel::v1::MsgChannelCloseConfirm as RawMsgChannelCloseConfirm, client::v1::Height, + }; - use crate::core::ics04_channel::msgs::chan_close_confirm::test_util::get_dummy_raw_msg_chan_close_confirm; - use crate::core::ics04_channel::msgs::chan_close_confirm::MsgChannelCloseConfirm; + use crate::core::ics04_channel::msgs::chan_close_confirm::{ + test_util::get_dummy_raw_msg_chan_close_confirm, MsgChannelCloseConfirm, + }; - #[test] - fn parse_channel_close_confirm_msg() { - struct Test { - name: String, - raw: RawMsgChannelCloseConfirm, - want_pass: bool, - } + #[test] + fn parse_channel_close_confirm_msg() { + struct Test { + name: String, + raw: RawMsgChannelCloseConfirm, + want_pass: bool, + } - let proof_height = 10; - let default_raw_msg = get_dummy_raw_msg_chan_close_confirm(proof_height); + let proof_height = 10; + let default_raw_msg = get_dummy_raw_msg_chan_close_confirm(proof_height); - let tests: Vec = vec![ + let tests: Vec = vec![ Test { name: "Good parameters".to_string(), raw: default_raw_msg.clone(), @@ -208,10 +202,10 @@ mod tests { .into_iter() .collect(); - for test in tests { - let msg = MsgChannelCloseConfirm::try_from(test.raw.clone()); + for test in tests { + let msg = MsgChannelCloseConfirm::try_from(test.raw.clone()); - assert_eq!( + assert_eq!( test.want_pass, msg.is_ok(), "MsgChanCloseConfirm::try_from raw failed for test {}, \nraw msg {:?} with error {:?}", @@ -219,16 +213,16 @@ mod tests { test.raw, msg.err(), ); - } - } - - #[test] - fn to_and_from() { - let raw = get_dummy_raw_msg_chan_close_confirm(19); - let msg = MsgChannelCloseConfirm::try_from(raw.clone()).unwrap(); - let raw_back = RawMsgChannelCloseConfirm::from(msg.clone()); - let msg_back = MsgChannelCloseConfirm::try_from(raw_back.clone()).unwrap(); - assert_eq!(raw, raw_back); - assert_eq!(msg, msg_back); - } + } + } + + #[test] + fn to_and_from() { + let raw = get_dummy_raw_msg_chan_close_confirm(19); + let msg = MsgChannelCloseConfirm::try_from(raw.clone()).unwrap(); + let raw_back = RawMsgChannelCloseConfirm::from(msg.clone()); + let msg_back = MsgChannelCloseConfirm::try_from(raw_back.clone()).unwrap(); + assert_eq!(raw, raw_back); + assert_eq!(msg, msg_back); + } } diff --git a/modules/src/core/ics04_channel/msgs/chan_close_init.rs b/modules/src/core/ics04_channel/msgs/chan_close_init.rs index ceb85b5421..192e3d04ae 100644 --- a/modules/src/core/ics04_channel/msgs/chan_close_init.rs +++ b/modules/src/core/ics04_channel/msgs/chan_close_init.rs @@ -4,110 +4,112 @@ use tendermint_proto::Protobuf; use ibc_proto::ibc::core::channel::v1::MsgChannelCloseInit as RawMsgChannelCloseInit; -use crate::core::ics04_channel::error::Error; -use crate::core::ics24_host::identifier::{ChannelId, PortId}; -use crate::signer::Signer; -use crate::tx_msg::Msg; +use crate::{ + core::{ + ics04_channel::error::Error, + ics24_host::identifier::{ChannelId, PortId}, + }, + signer::Signer, + tx_msg::Msg, +}; pub const TYPE_URL: &str = "/ibc.core.channel.v1.MsgChannelCloseInit"; /// /// Message definition for the first step in the channel close handshake (`ChanCloseInit` datagram). -/// #[derive(Clone, Debug, PartialEq)] pub struct MsgChannelCloseInit { - pub port_id: PortId, - pub channel_id: ChannelId, - pub signer: Signer, + pub port_id: PortId, + pub channel_id: ChannelId, + pub signer: Signer, } impl MsgChannelCloseInit { - pub fn new(port_id: PortId, channel_id: ChannelId, signer: Signer) -> Self { - Self { - port_id, - channel_id, - signer, - } - } + pub fn new(port_id: PortId, channel_id: ChannelId, signer: Signer) -> Self { + Self { port_id, channel_id, signer } + } } impl Msg for MsgChannelCloseInit { - type ValidationError = Error; - type Raw = RawMsgChannelCloseInit; + type ValidationError = Error; + type Raw = RawMsgChannelCloseInit; - fn route(&self) -> String { - crate::keys::ROUTER_KEY.to_string() - } + fn route(&self) -> String { + crate::keys::ROUTER_KEY.to_string() + } - fn type_url(&self) -> String { - TYPE_URL.to_string() - } + fn type_url(&self) -> String { + TYPE_URL.to_string() + } } impl Protobuf for MsgChannelCloseInit {} impl TryFrom for MsgChannelCloseInit { - type Error = Error; - - fn try_from(raw_msg: RawMsgChannelCloseInit) -> Result { - Ok(MsgChannelCloseInit { - port_id: raw_msg.port_id.parse().map_err(Error::identifier)?, - channel_id: raw_msg.channel_id.parse().map_err(Error::identifier)?, - signer: raw_msg.signer.parse().map_err(Error::signer)?, - }) - } + type Error = Error; + + fn try_from(raw_msg: RawMsgChannelCloseInit) -> Result { + Ok(MsgChannelCloseInit { + port_id: raw_msg.port_id.parse().map_err(Error::identifier)?, + channel_id: raw_msg.channel_id.parse().map_err(Error::identifier)?, + signer: raw_msg.signer.parse().map_err(Error::signer)?, + }) + } } impl From for RawMsgChannelCloseInit { - fn from(domain_msg: MsgChannelCloseInit) -> Self { - RawMsgChannelCloseInit { - port_id: domain_msg.port_id.to_string(), - channel_id: domain_msg.channel_id.to_string(), - signer: domain_msg.signer.to_string(), - } - } + fn from(domain_msg: MsgChannelCloseInit) -> Self { + RawMsgChannelCloseInit { + port_id: domain_msg.port_id.to_string(), + channel_id: domain_msg.channel_id.to_string(), + signer: domain_msg.signer.to_string(), + } + } } #[cfg(test)] pub mod test_util { - use crate::prelude::*; - use ibc_proto::ibc::core::channel::v1::MsgChannelCloseInit as RawMsgChannelCloseInit; - - use crate::core::ics24_host::identifier::{ChannelId, PortId}; - use crate::test_utils::get_dummy_bech32_account; - - /// Returns a dummy `RawMsgChannelCloseInit`, for testing only! - pub fn get_dummy_raw_msg_chan_close_init() -> RawMsgChannelCloseInit { - RawMsgChannelCloseInit { - port_id: PortId::default().to_string(), - channel_id: ChannelId::default().to_string(), - signer: get_dummy_bech32_account(), - } - } + use crate::prelude::*; + use ibc_proto::ibc::core::channel::v1::MsgChannelCloseInit as RawMsgChannelCloseInit; + + use crate::{ + core::ics24_host::identifier::{ChannelId, PortId}, + test_utils::get_dummy_bech32_account, + }; + + /// Returns a dummy `RawMsgChannelCloseInit`, for testing only! + pub fn get_dummy_raw_msg_chan_close_init() -> RawMsgChannelCloseInit { + RawMsgChannelCloseInit { + port_id: PortId::default().to_string(), + channel_id: ChannelId::default().to_string(), + signer: get_dummy_bech32_account(), + } + } } #[cfg(test)] mod tests { - use crate::prelude::*; + use crate::prelude::*; - use test_log::test; + use test_log::test; - use ibc_proto::ibc::core::channel::v1::MsgChannelCloseInit as RawMsgChannelCloseInit; + use ibc_proto::ibc::core::channel::v1::MsgChannelCloseInit as RawMsgChannelCloseInit; - use crate::core::ics04_channel::msgs::chan_close_init::test_util::get_dummy_raw_msg_chan_close_init; - use crate::core::ics04_channel::msgs::chan_close_init::MsgChannelCloseInit; + use crate::core::ics04_channel::msgs::chan_close_init::{ + test_util::get_dummy_raw_msg_chan_close_init, MsgChannelCloseInit, + }; - #[test] - fn parse_channel_close_init_msg() { - struct Test { - name: String, - raw: RawMsgChannelCloseInit, - want_pass: bool, - } + #[test] + fn parse_channel_close_init_msg() { + struct Test { + name: String, + raw: RawMsgChannelCloseInit, + want_pass: bool, + } - let default_raw_msg = get_dummy_raw_msg_chan_close_init(); + let default_raw_msg = get_dummy_raw_msg_chan_close_init(); - let tests: Vec = vec![ + let tests: Vec = vec![ Test { name: "Good parameters".to_string(), raw: default_raw_msg.clone(), @@ -165,27 +167,27 @@ mod tests { .into_iter() .collect(); - for test in tests { - let msg = MsgChannelCloseInit::try_from(test.raw.clone()); - - assert_eq!( - test.want_pass, - msg.is_ok(), - "MsgChanCloseInit::try_from failed for test {}, \nmsg {:?} with error {:?}", - test.name, - test.raw, - msg.err(), - ); - } - } - - #[test] - fn to_and_from() { - let raw = get_dummy_raw_msg_chan_close_init(); - let msg = MsgChannelCloseInit::try_from(raw.clone()).unwrap(); - let raw_back = RawMsgChannelCloseInit::from(msg.clone()); - let msg_back = MsgChannelCloseInit::try_from(raw_back.clone()).unwrap(); - assert_eq!(raw, raw_back); - assert_eq!(msg, msg_back); - } + for test in tests { + let msg = MsgChannelCloseInit::try_from(test.raw.clone()); + + assert_eq!( + test.want_pass, + msg.is_ok(), + "MsgChanCloseInit::try_from failed for test {}, \nmsg {:?} with error {:?}", + test.name, + test.raw, + msg.err(), + ); + } + } + + #[test] + fn to_and_from() { + let raw = get_dummy_raw_msg_chan_close_init(); + let msg = MsgChannelCloseInit::try_from(raw.clone()).unwrap(); + let raw_back = RawMsgChannelCloseInit::from(msg.clone()); + let msg_back = MsgChannelCloseInit::try_from(raw_back.clone()).unwrap(); + assert_eq!(raw, raw_back); + assert_eq!(msg, msg_back); + } } diff --git a/modules/src/core/ics04_channel/msgs/chan_open_ack.rs b/modules/src/core/ics04_channel/msgs/chan_open_ack.rs index 14cd6d85c6..a591e26e2f 100644 --- a/modules/src/core/ics04_channel/msgs/chan_open_ack.rs +++ b/modules/src/core/ics04_channel/msgs/chan_open_ack.rs @@ -1,10 +1,13 @@ -use crate::core::ics04_channel::error::Error; -use crate::core::ics04_channel::Version; -use crate::core::ics24_host::identifier::{ChannelId, PortId}; -use crate::prelude::*; -use crate::proofs::Proofs; -use crate::signer::Signer; -use crate::tx_msg::Msg; +use crate::{ + core::{ + ics04_channel::{error::Error, Version}, + ics24_host::identifier::{ChannelId, PortId}, + }, + prelude::*, + proofs::Proofs, + signer::Signer, + tx_msg::Msg, +}; use ibc_proto::ibc::core::channel::v1::MsgChannelOpenAck as RawMsgChannelOpenAck; use tendermint_proto::Protobuf; @@ -13,145 +16,134 @@ pub const TYPE_URL: &str = "/ibc.core.channel.v1.MsgChannelOpenAck"; /// /// Message definition for the third step in the channel open handshake (`ChanOpenAck` datagram). -/// #[derive(Clone, Debug, PartialEq)] pub struct MsgChannelOpenAck { - pub port_id: PortId, - pub channel_id: ChannelId, - pub counterparty_channel_id: ChannelId, - pub counterparty_version: Version, - pub proofs: Proofs, - pub signer: Signer, + pub port_id: PortId, + pub channel_id: ChannelId, + pub counterparty_channel_id: ChannelId, + pub counterparty_version: Version, + pub proofs: Proofs, + pub signer: Signer, } impl MsgChannelOpenAck { - pub fn new( - port_id: PortId, - channel_id: ChannelId, - counterparty_channel_id: ChannelId, - counterparty_version: Version, - proofs: Proofs, - signer: Signer, - ) -> Self { - Self { - port_id, - channel_id, - counterparty_channel_id, - counterparty_version, - proofs, - signer, - } - } + pub fn new( + port_id: PortId, + channel_id: ChannelId, + counterparty_channel_id: ChannelId, + counterparty_version: Version, + proofs: Proofs, + signer: Signer, + ) -> Self { + Self { port_id, channel_id, counterparty_channel_id, counterparty_version, proofs, signer } + } } impl Msg for MsgChannelOpenAck { - type ValidationError = Error; - type Raw = RawMsgChannelOpenAck; + type ValidationError = Error; + type Raw = RawMsgChannelOpenAck; - fn route(&self) -> String { - crate::keys::ROUTER_KEY.to_string() - } + fn route(&self) -> String { + crate::keys::ROUTER_KEY.to_string() + } - fn type_url(&self) -> String { - TYPE_URL.to_string() - } + fn type_url(&self) -> String { + TYPE_URL.to_string() + } } impl Protobuf for MsgChannelOpenAck {} impl TryFrom for MsgChannelOpenAck { - type Error = Error; + type Error = Error; - fn try_from(raw_msg: RawMsgChannelOpenAck) -> Result { - let proofs = Proofs::new( - raw_msg.proof_try.try_into().map_err(Error::invalid_proof)?, - None, - None, - None, - raw_msg - .proof_height - .ok_or_else(Error::missing_height)? - .into(), - ) - .map_err(Error::invalid_proof)?; + fn try_from(raw_msg: RawMsgChannelOpenAck) -> Result { + let proofs = Proofs::new( + raw_msg.proof_try.try_into().map_err(Error::invalid_proof)?, + None, + None, + None, + raw_msg.proof_height.ok_or_else(Error::missing_height)?.into(), + ) + .map_err(Error::invalid_proof)?; - Ok(MsgChannelOpenAck { - port_id: raw_msg.port_id.parse().map_err(Error::identifier)?, - channel_id: raw_msg.channel_id.parse().map_err(Error::identifier)?, - counterparty_channel_id: raw_msg - .counterparty_channel_id - .parse() - .map_err(Error::identifier)?, - counterparty_version: raw_msg.counterparty_version.into(), - proofs, - signer: raw_msg.signer.parse().map_err(Error::signer)?, - }) - } + Ok(MsgChannelOpenAck { + port_id: raw_msg.port_id.parse().map_err(Error::identifier)?, + channel_id: raw_msg.channel_id.parse().map_err(Error::identifier)?, + counterparty_channel_id: raw_msg + .counterparty_channel_id + .parse() + .map_err(Error::identifier)?, + counterparty_version: raw_msg.counterparty_version.into(), + proofs, + signer: raw_msg.signer.parse().map_err(Error::signer)?, + }) + } } impl From for RawMsgChannelOpenAck { - fn from(domain_msg: MsgChannelOpenAck) -> Self { - RawMsgChannelOpenAck { - port_id: domain_msg.port_id.to_string(), - channel_id: domain_msg.channel_id.to_string(), - counterparty_channel_id: domain_msg.counterparty_channel_id.to_string(), - counterparty_version: domain_msg.counterparty_version.to_string(), - proof_try: domain_msg.proofs.object_proof().clone().into(), - proof_height: Some(domain_msg.proofs.height().into()), - signer: domain_msg.signer.to_string(), - } - } + fn from(domain_msg: MsgChannelOpenAck) -> Self { + RawMsgChannelOpenAck { + port_id: domain_msg.port_id.to_string(), + channel_id: domain_msg.channel_id.to_string(), + counterparty_channel_id: domain_msg.counterparty_channel_id.to_string(), + counterparty_version: domain_msg.counterparty_version.to_string(), + proof_try: domain_msg.proofs.object_proof().clone().into(), + proof_height: Some(domain_msg.proofs.height().into()), + signer: domain_msg.signer.to_string(), + } + } } #[cfg(test)] pub mod test_util { - use crate::prelude::*; - use ibc_proto::ibc::core::channel::v1::MsgChannelOpenAck as RawMsgChannelOpenAck; + use crate::prelude::*; + use ibc_proto::ibc::core::channel::v1::MsgChannelOpenAck as RawMsgChannelOpenAck; - use crate::core::ics24_host::identifier::{ChannelId, PortId}; - use crate::test_utils::{get_dummy_bech32_account, get_dummy_proof}; - use ibc_proto::ibc::core::client::v1::Height; + use crate::{ + core::ics24_host::identifier::{ChannelId, PortId}, + test_utils::{get_dummy_bech32_account, get_dummy_proof}, + }; + use ibc_proto::ibc::core::client::v1::Height; - /// Returns a dummy `RawMsgChannelOpenAck`, for testing only! - pub fn get_dummy_raw_msg_chan_open_ack(proof_height: u64) -> RawMsgChannelOpenAck { - RawMsgChannelOpenAck { - port_id: PortId::default().to_string(), - channel_id: ChannelId::default().to_string(), - counterparty_channel_id: ChannelId::default().to_string(), - counterparty_version: "".to_string(), - proof_try: get_dummy_proof(), - proof_height: Some(Height { - revision_number: 0, - revision_height: proof_height, - }), - signer: get_dummy_bech32_account(), - } - } + /// Returns a dummy `RawMsgChannelOpenAck`, for testing only! + pub fn get_dummy_raw_msg_chan_open_ack(proof_height: u64) -> RawMsgChannelOpenAck { + RawMsgChannelOpenAck { + port_id: PortId::default().to_string(), + channel_id: ChannelId::default().to_string(), + counterparty_channel_id: ChannelId::default().to_string(), + counterparty_version: "".to_string(), + proof_try: get_dummy_proof(), + proof_height: Some(Height { revision_number: 0, revision_height: proof_height }), + signer: get_dummy_bech32_account(), + } + } } #[cfg(test)] mod tests { - use crate::prelude::*; - use ibc_proto::ibc::core::channel::v1::MsgChannelOpenAck as RawMsgChannelOpenAck; - use test_log::test; + use crate::prelude::*; + use ibc_proto::ibc::core::channel::v1::MsgChannelOpenAck as RawMsgChannelOpenAck; + use test_log::test; - use crate::core::ics04_channel::msgs::chan_open_ack::test_util::get_dummy_raw_msg_chan_open_ack; - use crate::core::ics04_channel::msgs::chan_open_ack::MsgChannelOpenAck; + use crate::core::ics04_channel::msgs::chan_open_ack::{ + test_util::get_dummy_raw_msg_chan_open_ack, MsgChannelOpenAck, + }; - use ibc_proto::ibc::core::client::v1::Height; + use ibc_proto::ibc::core::client::v1::Height; - #[test] - fn parse_channel_open_ack_msg() { - struct Test { - name: String, - raw: RawMsgChannelOpenAck, - want_pass: bool, - } + #[test] + fn parse_channel_open_ack_msg() { + struct Test { + name: String, + raw: RawMsgChannelOpenAck, + want_pass: bool, + } - let proof_height = 20; - let default_raw_msg = get_dummy_raw_msg_chan_open_ack(proof_height); + let proof_height = 20; + let default_raw_msg = get_dummy_raw_msg_chan_open_ack(proof_height); - let tests: Vec = vec![ + let tests: Vec = vec![ Test { name: "Good parameters".to_string(), raw: default_raw_msg.clone(), @@ -276,27 +268,27 @@ mod tests { .into_iter() .collect(); - for test in tests { - let res_msg = MsgChannelOpenAck::try_from(test.raw.clone()); + for test in tests { + let res_msg = MsgChannelOpenAck::try_from(test.raw.clone()); - assert_eq!( - test.want_pass, - res_msg.is_ok(), - "MsgChanOpenAck::try_from raw failed for test {}, \nraw msg {:?} with error {:?}", - test.name, - test.raw, - res_msg.err(), - ); - } - } + assert_eq!( + test.want_pass, + res_msg.is_ok(), + "MsgChanOpenAck::try_from raw failed for test {}, \nraw msg {:?} with error {:?}", + test.name, + test.raw, + res_msg.err(), + ); + } + } - #[test] - fn to_and_from() { - let raw = get_dummy_raw_msg_chan_open_ack(100); - let msg = MsgChannelOpenAck::try_from(raw.clone()).unwrap(); - let raw_back = RawMsgChannelOpenAck::from(msg.clone()); - let msg_back = MsgChannelOpenAck::try_from(raw_back.clone()).unwrap(); - assert_eq!(raw, raw_back); - assert_eq!(msg, msg_back); - } + #[test] + fn to_and_from() { + let raw = get_dummy_raw_msg_chan_open_ack(100); + let msg = MsgChannelOpenAck::try_from(raw.clone()).unwrap(); + let raw_back = RawMsgChannelOpenAck::from(msg.clone()); + let msg_back = MsgChannelOpenAck::try_from(raw_back.clone()).unwrap(); + assert_eq!(raw, raw_back); + assert_eq!(msg, msg_back); + } } diff --git a/modules/src/core/ics04_channel/msgs/chan_open_confirm.rs b/modules/src/core/ics04_channel/msgs/chan_open_confirm.rs index 7ad004adb3..6cc73b1cc9 100644 --- a/modules/src/core/ics04_channel/msgs/chan_open_confirm.rs +++ b/modules/src/core/ics04_channel/msgs/chan_open_confirm.rs @@ -1,9 +1,13 @@ -use crate::core::ics04_channel::error::Error; -use crate::core::ics24_host::identifier::{ChannelId, PortId}; -use crate::prelude::*; -use crate::proofs::Proofs; -use crate::signer::Signer; -use crate::tx_msg::Msg; +use crate::{ + core::{ + ics04_channel::error::Error, + ics24_host::identifier::{ChannelId, PortId}, + }, + prelude::*, + proofs::Proofs, + signer::Signer, + tx_msg::Msg, +}; use ibc_proto::ibc::core::channel::v1::MsgChannelOpenConfirm as RawMsgChannelOpenConfirm; use tendermint_proto::Protobuf; @@ -13,125 +17,116 @@ pub const TYPE_URL: &str = "/ibc.core.channel.v1.MsgChannelOpenConfirm"; /// /// Message definition for the fourth step in the channel open handshake (`ChanOpenConfirm` /// datagram). -/// #[derive(Clone, Debug, PartialEq)] pub struct MsgChannelOpenConfirm { - pub port_id: PortId, - pub channel_id: ChannelId, - pub proofs: Proofs, - pub signer: Signer, + pub port_id: PortId, + pub channel_id: ChannelId, + pub proofs: Proofs, + pub signer: Signer, } impl MsgChannelOpenConfirm { - pub fn new(port_id: PortId, channel_id: ChannelId, proofs: Proofs, signer: Signer) -> Self { - Self { - port_id, - channel_id, - proofs, - signer, - } - } + pub fn new(port_id: PortId, channel_id: ChannelId, proofs: Proofs, signer: Signer) -> Self { + Self { port_id, channel_id, proofs, signer } + } } impl Msg for MsgChannelOpenConfirm { - type ValidationError = Error; - type Raw = RawMsgChannelOpenConfirm; + type ValidationError = Error; + type Raw = RawMsgChannelOpenConfirm; - fn route(&self) -> String { - crate::keys::ROUTER_KEY.to_string() - } + fn route(&self) -> String { + crate::keys::ROUTER_KEY.to_string() + } - fn type_url(&self) -> String { - TYPE_URL.to_string() - } + fn type_url(&self) -> String { + TYPE_URL.to_string() + } } impl Protobuf for MsgChannelOpenConfirm {} impl TryFrom for MsgChannelOpenConfirm { - type Error = Error; - - fn try_from(raw_msg: RawMsgChannelOpenConfirm) -> Result { - let proofs = Proofs::new( - raw_msg.proof_ack.try_into().map_err(Error::invalid_proof)?, - None, - None, - None, - raw_msg - .proof_height - .ok_or_else(Error::missing_height)? - .into(), - ) - .map_err(Error::invalid_proof)?; - - Ok(MsgChannelOpenConfirm { - port_id: raw_msg.port_id.parse().map_err(Error::identifier)?, - channel_id: raw_msg.channel_id.parse().map_err(Error::identifier)?, - proofs, - signer: raw_msg.signer.parse().map_err(Error::signer)?, - }) - } + type Error = Error; + + fn try_from(raw_msg: RawMsgChannelOpenConfirm) -> Result { + let proofs = Proofs::new( + raw_msg.proof_ack.try_into().map_err(Error::invalid_proof)?, + None, + None, + None, + raw_msg.proof_height.ok_or_else(Error::missing_height)?.into(), + ) + .map_err(Error::invalid_proof)?; + + Ok(MsgChannelOpenConfirm { + port_id: raw_msg.port_id.parse().map_err(Error::identifier)?, + channel_id: raw_msg.channel_id.parse().map_err(Error::identifier)?, + proofs, + signer: raw_msg.signer.parse().map_err(Error::signer)?, + }) + } } impl From for RawMsgChannelOpenConfirm { - fn from(domain_msg: MsgChannelOpenConfirm) -> Self { - RawMsgChannelOpenConfirm { - port_id: domain_msg.port_id.to_string(), - channel_id: domain_msg.channel_id.to_string(), - proof_ack: domain_msg.proofs.object_proof().clone().into(), - proof_height: Some(domain_msg.proofs.height().into()), - signer: domain_msg.signer.to_string(), - } - } + fn from(domain_msg: MsgChannelOpenConfirm) -> Self { + RawMsgChannelOpenConfirm { + port_id: domain_msg.port_id.to_string(), + channel_id: domain_msg.channel_id.to_string(), + proof_ack: domain_msg.proofs.object_proof().clone().into(), + proof_height: Some(domain_msg.proofs.height().into()), + signer: domain_msg.signer.to_string(), + } + } } #[cfg(test)] pub mod test_util { - use crate::prelude::*; - use ibc_proto::ibc::core::channel::v1::MsgChannelOpenConfirm as RawMsgChannelOpenConfirm; - - use crate::core::ics24_host::identifier::{ChannelId, PortId}; - use crate::test_utils::{get_dummy_bech32_account, get_dummy_proof}; - use ibc_proto::ibc::core::client::v1::Height; - - /// Returns a dummy `RawMsgChannelOpenConfirm`, for testing only! - pub fn get_dummy_raw_msg_chan_open_confirm(proof_height: u64) -> RawMsgChannelOpenConfirm { - RawMsgChannelOpenConfirm { - port_id: PortId::default().to_string(), - channel_id: ChannelId::default().to_string(), - proof_ack: get_dummy_proof(), - proof_height: Some(Height { - revision_number: 0, - revision_height: proof_height, - }), - signer: get_dummy_bech32_account(), - } - } + use crate::prelude::*; + use ibc_proto::ibc::core::channel::v1::MsgChannelOpenConfirm as RawMsgChannelOpenConfirm; + + use crate::{ + core::ics24_host::identifier::{ChannelId, PortId}, + test_utils::{get_dummy_bech32_account, get_dummy_proof}, + }; + use ibc_proto::ibc::core::client::v1::Height; + + /// Returns a dummy `RawMsgChannelOpenConfirm`, for testing only! + pub fn get_dummy_raw_msg_chan_open_confirm(proof_height: u64) -> RawMsgChannelOpenConfirm { + RawMsgChannelOpenConfirm { + port_id: PortId::default().to_string(), + channel_id: ChannelId::default().to_string(), + proof_ack: get_dummy_proof(), + proof_height: Some(Height { revision_number: 0, revision_height: proof_height }), + signer: get_dummy_bech32_account(), + } + } } #[cfg(test)] mod tests { - use crate::prelude::*; - use ibc_proto::ibc::core::channel::v1::MsgChannelOpenConfirm as RawMsgChannelOpenConfirm; - use test_log::test; + use crate::prelude::*; + use ibc_proto::ibc::core::channel::v1::MsgChannelOpenConfirm as RawMsgChannelOpenConfirm; + use test_log::test; - use crate::core::ics04_channel::msgs::chan_open_confirm::test_util::get_dummy_raw_msg_chan_open_confirm; - use crate::core::ics04_channel::msgs::chan_open_confirm::MsgChannelOpenConfirm; + use crate::core::ics04_channel::msgs::chan_open_confirm::{ + test_util::get_dummy_raw_msg_chan_open_confirm, MsgChannelOpenConfirm, + }; - use ibc_proto::ibc::core::client::v1::Height; + use ibc_proto::ibc::core::client::v1::Height; - #[test] - fn parse_channel_open_confirm_msg() { - struct Test { - name: String, - raw: RawMsgChannelOpenConfirm, - want_pass: bool, - } + #[test] + fn parse_channel_open_confirm_msg() { + struct Test { + name: String, + raw: RawMsgChannelOpenConfirm, + want_pass: bool, + } - let proof_height = 78; - let default_raw_msg = get_dummy_raw_msg_chan_open_confirm(proof_height); + let proof_height = 78; + let default_raw_msg = get_dummy_raw_msg_chan_open_confirm(proof_height); - let tests: Vec = vec![ + let tests: Vec = vec![ Test { name: "Good parameters".to_string(), raw: default_raw_msg.clone(), @@ -208,27 +203,27 @@ mod tests { .into_iter() .collect(); - for test in tests { - let res_msg = MsgChannelOpenConfirm::try_from(test.raw.clone()); - - assert_eq!( - test.want_pass, - res_msg.is_ok(), - "MsgChanOpenConfirm::try_from failed for test {}, \nraw msg {:?} with error {:?}", - test.name, - test.raw, - res_msg.err(), - ); - } - } - - #[test] - fn to_and_from() { - let raw = get_dummy_raw_msg_chan_open_confirm(19); - let msg = MsgChannelOpenConfirm::try_from(raw.clone()).unwrap(); - let raw_back = RawMsgChannelOpenConfirm::from(msg.clone()); - let msg_back = MsgChannelOpenConfirm::try_from(raw_back.clone()).unwrap(); - assert_eq!(raw, raw_back); - assert_eq!(msg, msg_back); - } + for test in tests { + let res_msg = MsgChannelOpenConfirm::try_from(test.raw.clone()); + + assert_eq!( + test.want_pass, + res_msg.is_ok(), + "MsgChanOpenConfirm::try_from failed for test {}, \nraw msg {:?} with error {:?}", + test.name, + test.raw, + res_msg.err(), + ); + } + } + + #[test] + fn to_and_from() { + let raw = get_dummy_raw_msg_chan_open_confirm(19); + let msg = MsgChannelOpenConfirm::try_from(raw.clone()).unwrap(); + let raw_back = RawMsgChannelOpenConfirm::from(msg.clone()); + let msg_back = MsgChannelOpenConfirm::try_from(raw_back.clone()).unwrap(); + assert_eq!(raw, raw_back); + assert_eq!(msg, msg_back); + } } diff --git a/modules/src/core/ics04_channel/msgs/chan_open_init.rs b/modules/src/core/ics04_channel/msgs/chan_open_init.rs index b5b4130a90..c12d3b82a6 100644 --- a/modules/src/core/ics04_channel/msgs/chan_open_init.rs +++ b/modules/src/core/ics04_channel/msgs/chan_open_init.rs @@ -1,9 +1,12 @@ -use crate::core::ics04_channel::channel::ChannelEnd; -use crate::core::ics04_channel::error::Error; -use crate::core::ics24_host::identifier::PortId; -use crate::prelude::*; -use crate::signer::Signer; -use crate::tx_msg::Msg; +use crate::{ + core::{ + ics04_channel::{channel::ChannelEnd, error::Error}, + ics24_host::identifier::PortId, + }, + prelude::*, + signer::Signer, + tx_msg::Msg, +}; use ibc_proto::ibc::core::channel::v1::MsgChannelOpenInit as RawMsgChannelOpenInit; use tendermint_proto::Protobuf; @@ -12,149 +15,145 @@ pub const TYPE_URL: &str = "/ibc.core.channel.v1.MsgChannelOpenInit"; /// /// Message definition for the first step in the channel open handshake (`ChanOpenInit` datagram). -/// #[derive(Clone, Debug, PartialEq)] pub struct MsgChannelOpenInit { - pub port_id: PortId, - pub channel: ChannelEnd, - pub signer: Signer, + pub port_id: PortId, + pub channel: ChannelEnd, + pub signer: Signer, } impl MsgChannelOpenInit { - pub fn new(port_id: PortId, channel: ChannelEnd, signer: Signer) -> Self { - Self { - port_id, - channel, - signer, - } - } + pub fn new(port_id: PortId, channel: ChannelEnd, signer: Signer) -> Self { + Self { port_id, channel, signer } + } } impl Msg for MsgChannelOpenInit { - type ValidationError = Error; - type Raw = RawMsgChannelOpenInit; + type ValidationError = Error; + type Raw = RawMsgChannelOpenInit; - fn route(&self) -> String { - crate::keys::ROUTER_KEY.to_string() - } + fn route(&self) -> String { + crate::keys::ROUTER_KEY.to_string() + } - fn type_url(&self) -> String { - TYPE_URL.to_string() - } + fn type_url(&self) -> String { + TYPE_URL.to_string() + } } impl Protobuf for MsgChannelOpenInit {} impl TryFrom for MsgChannelOpenInit { - type Error = Error; - - fn try_from(raw_msg: RawMsgChannelOpenInit) -> Result { - Ok(MsgChannelOpenInit { - port_id: raw_msg.port_id.parse().map_err(Error::identifier)?, - channel: raw_msg - .channel - .ok_or_else(Error::missing_channel)? - .try_into()?, - signer: raw_msg.signer.parse().map_err(Error::signer)?, - }) - } + type Error = Error; + + fn try_from(raw_msg: RawMsgChannelOpenInit) -> Result { + Ok(MsgChannelOpenInit { + port_id: raw_msg.port_id.parse().map_err(Error::identifier)?, + channel: raw_msg.channel.ok_or_else(Error::missing_channel)?.try_into()?, + signer: raw_msg.signer.parse().map_err(Error::signer)?, + }) + } } impl From for RawMsgChannelOpenInit { - fn from(domain_msg: MsgChannelOpenInit) -> Self { - RawMsgChannelOpenInit { - port_id: domain_msg.port_id.to_string(), - channel: Some(domain_msg.channel.into()), - signer: domain_msg.signer.to_string(), - } - } + fn from(domain_msg: MsgChannelOpenInit) -> Self { + RawMsgChannelOpenInit { + port_id: domain_msg.port_id.to_string(), + channel: Some(domain_msg.channel.into()), + signer: domain_msg.signer.to_string(), + } + } } #[cfg(test)] pub mod test_util { - use crate::prelude::*; - use ibc_proto::ibc::core::channel::v1::MsgChannelOpenInit as RawMsgChannelOpenInit; - - use crate::core::ics04_channel::channel::test_util::get_dummy_raw_channel_end; - use crate::core::ics24_host::identifier::PortId; - use crate::test_utils::get_dummy_bech32_account; - - /// Returns a dummy `RawMsgChannelOpenInit`, for testing only! - pub fn get_dummy_raw_msg_chan_open_init() -> RawMsgChannelOpenInit { - RawMsgChannelOpenInit { - port_id: PortId::default().to_string(), - channel: Some(get_dummy_raw_channel_end()), - signer: get_dummy_bech32_account(), - } - } + use crate::prelude::*; + use ibc_proto::ibc::core::channel::v1::MsgChannelOpenInit as RawMsgChannelOpenInit; + + use crate::{ + core::{ + ics04_channel::channel::test_util::get_dummy_raw_channel_end, + ics24_host::identifier::PortId, + }, + test_utils::get_dummy_bech32_account, + }; + + /// Returns a dummy `RawMsgChannelOpenInit`, for testing only! + pub fn get_dummy_raw_msg_chan_open_init() -> RawMsgChannelOpenInit { + RawMsgChannelOpenInit { + port_id: PortId::default().to_string(), + channel: Some(get_dummy_raw_channel_end()), + signer: get_dummy_bech32_account(), + } + } } #[cfg(test)] mod tests { - use crate::core::ics04_channel::msgs::chan_open_init::test_util::get_dummy_raw_msg_chan_open_init; - use crate::core::ics04_channel::msgs::chan_open_init::MsgChannelOpenInit; - use crate::prelude::*; - - use ibc_proto::ibc::core::channel::v1::MsgChannelOpenInit as RawMsgChannelOpenInit; - use test_log::test; - - #[test] - fn channel_open_init_from_raw() { - struct Test { - name: String, - raw: RawMsgChannelOpenInit, - want_pass: bool, - } - - let default_raw_msg = get_dummy_raw_msg_chan_open_init(); - - let tests: Vec = vec![ - Test { - name: "Good parameters".to_string(), - raw: default_raw_msg.clone(), - want_pass: true, - }, - Test { - name: "Incorrect port identifier, slash (separator) prohibited".to_string(), - raw: RawMsgChannelOpenInit { - port_id: "p34/".to_string(), - ..default_raw_msg.clone() - }, - want_pass: false, - }, - Test { - name: "Missing channel".to_string(), - raw: RawMsgChannelOpenInit { - channel: None, - ..default_raw_msg - }, - want_pass: false, - }, - ] - .into_iter() - .collect(); - - for test in tests { - let res_msg = MsgChannelOpenInit::try_from(test.raw.clone()); - - assert_eq!( - test.want_pass, - res_msg.is_ok(), - "MsgChanOpenInit::try_from failed for test {}, \nraw msg {:?} with error {:?}", - test.name, - test.raw, - res_msg.err(), - ); - } - } - - #[test] - fn to_and_from() { - let raw = get_dummy_raw_msg_chan_open_init(); - let msg = MsgChannelOpenInit::try_from(raw.clone()).unwrap(); - let raw_back = RawMsgChannelOpenInit::from(msg.clone()); - let msg_back = MsgChannelOpenInit::try_from(raw_back.clone()).unwrap(); - assert_eq!(raw, raw_back); - assert_eq!(msg, msg_back); - } + use crate::{ + core::ics04_channel::msgs::chan_open_init::{ + test_util::get_dummy_raw_msg_chan_open_init, MsgChannelOpenInit, + }, + prelude::*, + }; + + use ibc_proto::ibc::core::channel::v1::MsgChannelOpenInit as RawMsgChannelOpenInit; + use test_log::test; + + #[test] + fn channel_open_init_from_raw() { + struct Test { + name: String, + raw: RawMsgChannelOpenInit, + want_pass: bool, + } + + let default_raw_msg = get_dummy_raw_msg_chan_open_init(); + + let tests: Vec = vec![ + Test { + name: "Good parameters".to_string(), + raw: default_raw_msg.clone(), + want_pass: true, + }, + Test { + name: "Incorrect port identifier, slash (separator) prohibited".to_string(), + raw: RawMsgChannelOpenInit { + port_id: "p34/".to_string(), + ..default_raw_msg.clone() + }, + want_pass: false, + }, + Test { + name: "Missing channel".to_string(), + raw: RawMsgChannelOpenInit { channel: None, ..default_raw_msg }, + want_pass: false, + }, + ] + .into_iter() + .collect(); + + for test in tests { + let res_msg = MsgChannelOpenInit::try_from(test.raw.clone()); + + assert_eq!( + test.want_pass, + res_msg.is_ok(), + "MsgChanOpenInit::try_from failed for test {}, \nraw msg {:?} with error {:?}", + test.name, + test.raw, + res_msg.err(), + ); + } + } + + #[test] + fn to_and_from() { + let raw = get_dummy_raw_msg_chan_open_init(); + let msg = MsgChannelOpenInit::try_from(raw.clone()).unwrap(); + let raw_back = RawMsgChannelOpenInit::from(msg.clone()); + let msg_back = MsgChannelOpenInit::try_from(raw_back.clone()).unwrap(); + assert_eq!(raw, raw_back); + assert_eq!(msg, msg_back); + } } diff --git a/modules/src/core/ics04_channel/msgs/chan_open_try.rs b/modules/src/core/ics04_channel/msgs/chan_open_try.rs index bdd1e5c46b..cbfd154ee9 100644 --- a/modules/src/core/ics04_channel/msgs/chan_open_try.rs +++ b/modules/src/core/ics04_channel/msgs/chan_open_try.rs @@ -1,12 +1,13 @@ -use crate::core::ics04_channel::channel::ChannelEnd; -use crate::core::ics04_channel::error::Error as ChannelError; -use crate::core::ics04_channel::Version; -use crate::core::ics24_host::error::ValidationError; -use crate::core::ics24_host::identifier::PortId; -use crate::prelude::*; -use crate::proofs::Proofs; -use crate::signer::Signer; -use crate::tx_msg::Msg; +use crate::{ + core::{ + ics04_channel::{channel::ChannelEnd, error::Error as ChannelError, Version}, + ics24_host::{error::ValidationError, identifier::PortId}, + }, + prelude::*, + proofs::Proofs, + signer::Signer, + tx_msg::Msg, +}; use ibc_proto::ibc::core::channel::v1::MsgChannelOpenTry as RawMsgChannelOpenTry; use tendermint_proto::Protobuf; @@ -15,156 +16,144 @@ pub const TYPE_URL: &str = "/ibc.core.channel.v1.MsgChannelOpenTry"; /// /// Message definition for the second step in the channel open handshake (`ChanOpenTry` datagram). -/// #[derive(Clone, Debug, PartialEq)] pub struct MsgChannelOpenTry { - pub port_id: PortId, - pub channel: ChannelEnd, - pub counterparty_version: Version, - pub proofs: Proofs, - pub signer: Signer, + pub port_id: PortId, + pub channel: ChannelEnd, + pub counterparty_version: Version, + pub proofs: Proofs, + pub signer: Signer, } impl MsgChannelOpenTry { - pub fn new( - port_id: PortId, - channel: ChannelEnd, - counterparty_version: Version, - proofs: Proofs, - signer: Signer, - ) -> Self { - Self { - port_id, - channel, - counterparty_version, - proofs, - signer, - } - } + pub fn new( + port_id: PortId, + channel: ChannelEnd, + counterparty_version: Version, + proofs: Proofs, + signer: Signer, + ) -> Self { + Self { port_id, channel, counterparty_version, proofs, signer } + } } impl Msg for MsgChannelOpenTry { - type ValidationError = ChannelError; - type Raw = RawMsgChannelOpenTry; + type ValidationError = ChannelError; + type Raw = RawMsgChannelOpenTry; - fn route(&self) -> String { - crate::keys::ROUTER_KEY.to_string() - } + fn route(&self) -> String { + crate::keys::ROUTER_KEY.to_string() + } - fn type_url(&self) -> String { - TYPE_URL.to_string() - } + fn type_url(&self) -> String { + TYPE_URL.to_string() + } - fn validate_basic(&self) -> Result<(), ValidationError> { - match self.channel.counterparty().channel_id() { - None => Err(ValidationError::invalid_counterparty_channel_id()), - Some(_c) => Ok(()), - } - } + fn validate_basic(&self) -> Result<(), ValidationError> { + match self.channel.counterparty().channel_id() { + None => Err(ValidationError::invalid_counterparty_channel_id()), + Some(_c) => Ok(()), + } + } } impl Protobuf for MsgChannelOpenTry {} impl TryFrom for MsgChannelOpenTry { - type Error = ChannelError; + type Error = ChannelError; - fn try_from(raw_msg: RawMsgChannelOpenTry) -> Result { - let proofs = Proofs::new( - raw_msg - .proof_init - .try_into() - .map_err(ChannelError::invalid_proof)?, - None, - None, - None, - raw_msg - .proof_height - .ok_or_else(ChannelError::missing_height)? - .into(), - ) - .map_err(ChannelError::invalid_proof)?; + fn try_from(raw_msg: RawMsgChannelOpenTry) -> Result { + let proofs = Proofs::new( + raw_msg.proof_init.try_into().map_err(ChannelError::invalid_proof)?, + None, + None, + None, + raw_msg.proof_height.ok_or_else(ChannelError::missing_height)?.into(), + ) + .map_err(ChannelError::invalid_proof)?; - let msg = MsgChannelOpenTry { - port_id: raw_msg.port_id.parse().map_err(ChannelError::identifier)?, - channel: raw_msg - .channel - .ok_or_else(ChannelError::missing_channel)? - .try_into()?, - counterparty_version: raw_msg.counterparty_version.into(), - proofs, - signer: raw_msg.signer.parse().map_err(ChannelError::signer)?, - }; + let msg = MsgChannelOpenTry { + port_id: raw_msg.port_id.parse().map_err(ChannelError::identifier)?, + channel: raw_msg.channel.ok_or_else(ChannelError::missing_channel)?.try_into()?, + counterparty_version: raw_msg.counterparty_version.into(), + proofs, + signer: raw_msg.signer.parse().map_err(ChannelError::signer)?, + }; - msg.validate_basic() - .map_err(ChannelError::invalid_counterparty_channel_id)?; + msg.validate_basic().map_err(ChannelError::invalid_counterparty_channel_id)?; - Ok(msg) - } + Ok(msg) + } } impl From for RawMsgChannelOpenTry { - fn from(domain_msg: MsgChannelOpenTry) -> Self { - RawMsgChannelOpenTry { - port_id: domain_msg.port_id.to_string(), - channel: Some(domain_msg.channel.into()), - counterparty_version: domain_msg.counterparty_version.to_string(), - proof_init: domain_msg.proofs.object_proof().clone().into(), - proof_height: Some(domain_msg.proofs.height().into()), - signer: domain_msg.signer.to_string(), - ..Default::default() - } - } + fn from(domain_msg: MsgChannelOpenTry) -> Self { + RawMsgChannelOpenTry { + port_id: domain_msg.port_id.to_string(), + channel: Some(domain_msg.channel.into()), + counterparty_version: domain_msg.counterparty_version.to_string(), + proof_init: domain_msg.proofs.object_proof().clone().into(), + proof_height: Some(domain_msg.proofs.height().into()), + signer: domain_msg.signer.to_string(), + ..Default::default() + } + } } #[cfg(test)] pub mod test_util { - use crate::prelude::*; - use ibc_proto::ibc::core::channel::v1::MsgChannelOpenTry as RawMsgChannelOpenTry; + use crate::prelude::*; + use ibc_proto::ibc::core::channel::v1::MsgChannelOpenTry as RawMsgChannelOpenTry; - use crate::core::ics04_channel::channel::test_util::get_dummy_raw_channel_end; - use crate::core::ics24_host::identifier::PortId; - use crate::test_utils::{get_dummy_bech32_account, get_dummy_proof}; - use ibc_proto::ibc::core::client::v1::Height; + use crate::{ + core::{ + ics04_channel::channel::test_util::get_dummy_raw_channel_end, + ics24_host::identifier::PortId, + }, + test_utils::{get_dummy_bech32_account, get_dummy_proof}, + }; + use ibc_proto::ibc::core::client::v1::Height; - /// Returns a dummy `RawMsgChannelOpenTry`, for testing only! - pub fn get_dummy_raw_msg_chan_open_try(proof_height: u64) -> RawMsgChannelOpenTry { - RawMsgChannelOpenTry { - port_id: PortId::default().to_string(), - channel: Some(get_dummy_raw_channel_end()), - counterparty_version: "".to_string(), - proof_init: get_dummy_proof(), - proof_height: Some(Height { - revision_number: 0, - revision_height: proof_height, - }), - signer: get_dummy_bech32_account(), - ..Default::default() - } - } + /// Returns a dummy `RawMsgChannelOpenTry`, for testing only! + pub fn get_dummy_raw_msg_chan_open_try(proof_height: u64) -> RawMsgChannelOpenTry { + RawMsgChannelOpenTry { + port_id: PortId::default().to_string(), + channel: Some(get_dummy_raw_channel_end()), + counterparty_version: "".to_string(), + proof_init: get_dummy_proof(), + proof_height: Some(Height { revision_number: 0, revision_height: proof_height }), + signer: get_dummy_bech32_account(), + ..Default::default() + } + } } #[cfg(test)] mod tests { - use crate::core::ics04_channel::msgs::chan_open_try::test_util::get_dummy_raw_msg_chan_open_try; - use crate::core::ics04_channel::msgs::chan_open_try::MsgChannelOpenTry; - use crate::prelude::*; + use crate::{ + core::ics04_channel::msgs::chan_open_try::{ + test_util::get_dummy_raw_msg_chan_open_try, MsgChannelOpenTry, + }, + prelude::*, + }; - use ibc_proto::ibc::core::channel::v1::MsgChannelOpenTry as RawMsgChannelOpenTry; - use ibc_proto::ibc::core::client::v1::Height; - use test_log::test; + use ibc_proto::ibc::core::{ + channel::v1::MsgChannelOpenTry as RawMsgChannelOpenTry, client::v1::Height, + }; + use test_log::test; - #[test] - fn channel_open_try_from_raw() { - struct Test { - name: String, - raw: RawMsgChannelOpenTry, - want_pass: bool, - } + #[test] + fn channel_open_try_from_raw() { + struct Test { + name: String, + raw: RawMsgChannelOpenTry, + want_pass: bool, + } - let proof_height = 10; - let default_raw_msg = get_dummy_raw_msg_chan_open_try(proof_height); + let proof_height = 10; + let default_raw_msg = get_dummy_raw_msg_chan_open_try(proof_height); - let tests: Vec = vec![ + let tests: Vec = vec![ Test { name: "Good parameters".to_string(), raw: default_raw_msg.clone(), @@ -248,27 +237,27 @@ mod tests { .into_iter() .collect(); - for test in tests { - let res_msg = MsgChannelOpenTry::try_from(test.raw.clone()); + for test in tests { + let res_msg = MsgChannelOpenTry::try_from(test.raw.clone()); - assert_eq!( - test.want_pass, - res_msg.is_ok(), - "MsgChanOpenTry::try_from failed for test {}, \nraw msg {:?} with error {:?}", - test.name, - test.raw, - res_msg.err(), - ); - } - } + assert_eq!( + test.want_pass, + res_msg.is_ok(), + "MsgChanOpenTry::try_from failed for test {}, \nraw msg {:?} with error {:?}", + test.name, + test.raw, + res_msg.err(), + ); + } + } - #[test] - fn to_and_from() { - let raw = get_dummy_raw_msg_chan_open_try(10); - let msg = MsgChannelOpenTry::try_from(raw.clone()).unwrap(); - let raw_back = RawMsgChannelOpenTry::from(msg.clone()); - let msg_back = MsgChannelOpenTry::try_from(raw_back.clone()).unwrap(); - assert_eq!(raw, raw_back); - assert_eq!(msg, msg_back); - } + #[test] + fn to_and_from() { + let raw = get_dummy_raw_msg_chan_open_try(10); + let msg = MsgChannelOpenTry::try_from(raw.clone()).unwrap(); + let raw_back = RawMsgChannelOpenTry::from(msg.clone()); + let msg_back = MsgChannelOpenTry::try_from(raw_back.clone()).unwrap(); + assert_eq!(raw, raw_back); + assert_eq!(msg, msg_back); + } } diff --git a/modules/src/core/ics04_channel/msgs/recv_packet.rs b/modules/src/core/ics04_channel/msgs/recv_packet.rs index 7aad7c6eec..6a71da410b 100644 --- a/modules/src/core/ics04_channel/msgs/recv_packet.rs +++ b/modules/src/core/ics04_channel/msgs/recv_packet.rs @@ -4,196 +4,173 @@ use tendermint_proto::Protobuf; use ibc_proto::ibc::core::channel::v1::MsgRecvPacket as RawMsgRecvPacket; -use crate::core::ics04_channel::error::Error; -use crate::core::ics04_channel::packet::Packet; -use crate::proofs::Proofs; -use crate::signer::Signer; -use crate::tx_msg::Msg; +use crate::{ + core::ics04_channel::{error::Error, packet::Packet}, + proofs::Proofs, + signer::Signer, + tx_msg::Msg, +}; pub const TYPE_URL: &str = "/ibc.core.channel.v1.MsgRecvPacket"; /// /// Message definition for the "packet receiving" datagram. -/// #[derive(Clone, Debug, PartialEq)] pub struct MsgRecvPacket { - pub packet: Packet, - pub proofs: Proofs, - pub signer: Signer, + pub packet: Packet, + pub proofs: Proofs, + pub signer: Signer, } impl MsgRecvPacket { - pub fn new(packet: Packet, proofs: Proofs, signer: Signer) -> MsgRecvPacket { - Self { - packet, - proofs, - signer, - } - } + pub fn new(packet: Packet, proofs: Proofs, signer: Signer) -> MsgRecvPacket { + Self { packet, proofs, signer } + } } impl Msg for MsgRecvPacket { - type ValidationError = Error; - type Raw = RawMsgRecvPacket; + type ValidationError = Error; + type Raw = RawMsgRecvPacket; - fn route(&self) -> String { - crate::keys::ROUTER_KEY.to_string() - } + fn route(&self) -> String { + crate::keys::ROUTER_KEY.to_string() + } - fn type_url(&self) -> String { - TYPE_URL.to_string() - } + fn type_url(&self) -> String { + TYPE_URL.to_string() + } } impl Protobuf for MsgRecvPacket {} impl TryFrom for MsgRecvPacket { - type Error = Error; - - fn try_from(raw_msg: RawMsgRecvPacket) -> Result { - let proofs = Proofs::new( - raw_msg - .proof_commitment - .try_into() - .map_err(Error::invalid_proof)?, - None, - None, - None, - raw_msg - .proof_height - .ok_or_else(Error::missing_height)? - .into(), - ) - .map_err(Error::invalid_proof)?; - - Ok(MsgRecvPacket { - packet: raw_msg - .packet - .ok_or_else(Error::missing_packet)? - .try_into()?, - proofs, - signer: raw_msg.signer.parse().map_err(Error::signer)?, - }) - } + type Error = Error; + + fn try_from(raw_msg: RawMsgRecvPacket) -> Result { + let proofs = Proofs::new( + raw_msg.proof_commitment.try_into().map_err(Error::invalid_proof)?, + None, + None, + None, + raw_msg.proof_height.ok_or_else(Error::missing_height)?.into(), + ) + .map_err(Error::invalid_proof)?; + + Ok(MsgRecvPacket { + packet: raw_msg.packet.ok_or_else(Error::missing_packet)?.try_into()?, + proofs, + signer: raw_msg.signer.parse().map_err(Error::signer)?, + }) + } } impl From for RawMsgRecvPacket { - fn from(domain_msg: MsgRecvPacket) -> Self { - RawMsgRecvPacket { - packet: Some(domain_msg.packet.into()), - proof_commitment: domain_msg.proofs.object_proof().clone().into(), - proof_height: Some(domain_msg.proofs.height().into()), - signer: domain_msg.signer.to_string(), - } - } + fn from(domain_msg: MsgRecvPacket) -> Self { + RawMsgRecvPacket { + packet: Some(domain_msg.packet.into()), + proof_commitment: domain_msg.proofs.object_proof().clone().into(), + proof_height: Some(domain_msg.proofs.height().into()), + signer: domain_msg.signer.to_string(), + } + } } #[cfg(test)] pub mod test_util { - use ibc_proto::ibc::core::channel::v1::MsgRecvPacket as RawMsgRecvPacket; - use ibc_proto::ibc::core::client::v1::Height as RawHeight; - - use crate::core::ics04_channel::packet::test_utils::get_dummy_raw_packet; - use crate::test_utils::{get_dummy_bech32_account, get_dummy_proof}; - use crate::timestamp::Timestamp; - use core::ops::Add; - use core::time::Duration; - - /// Returns a dummy `RawMsgRecvPacket`, for testing only! The `height` parametrizes both the - /// proof height as well as the timeout height. - pub fn get_dummy_raw_msg_recv_packet(height: u64) -> RawMsgRecvPacket { - let timestamp = Timestamp::now().add(Duration::from_secs(9)); - RawMsgRecvPacket { - packet: Some(get_dummy_raw_packet( - height, - timestamp.unwrap().nanoseconds(), - )), - proof_commitment: get_dummy_proof(), - proof_height: Some(RawHeight { - revision_number: 0, - revision_height: height, - }), - signer: get_dummy_bech32_account(), - } - } + use ibc_proto::ibc::core::{ + channel::v1::MsgRecvPacket as RawMsgRecvPacket, client::v1::Height as RawHeight, + }; + + use crate::{ + core::ics04_channel::packet::test_utils::get_dummy_raw_packet, + test_utils::{get_dummy_bech32_account, get_dummy_proof}, + timestamp::Timestamp, + }; + use core::{ops::Add, time::Duration}; + + /// Returns a dummy `RawMsgRecvPacket`, for testing only! The `height` parametrizes both the + /// proof height as well as the timeout height. + pub fn get_dummy_raw_msg_recv_packet(height: u64) -> RawMsgRecvPacket { + let timestamp = Timestamp::now().add(Duration::from_secs(9)); + RawMsgRecvPacket { + packet: Some(get_dummy_raw_packet(height, timestamp.unwrap().nanoseconds())), + proof_commitment: get_dummy_proof(), + proof_height: Some(RawHeight { revision_number: 0, revision_height: height }), + signer: get_dummy_bech32_account(), + } + } } #[cfg(test)] mod test { - use crate::prelude::*; - - use test_log::test; - - use ibc_proto::ibc::core::channel::v1::MsgRecvPacket as RawMsgRecvPacket; - - use crate::core::ics04_channel::error::Error; - use crate::core::ics04_channel::msgs::recv_packet::test_util::get_dummy_raw_msg_recv_packet; - use crate::core::ics04_channel::msgs::recv_packet::MsgRecvPacket; - use crate::test_utils::get_dummy_bech32_account; - - #[test] - fn msg_recv_packet_try_from_raw() { - struct Test { - name: String, - raw: RawMsgRecvPacket, - want_pass: bool, - } - - let height = 20; - let default_raw_msg = get_dummy_raw_msg_recv_packet(height); - let tests: Vec = vec![ - Test { - name: "Good parameters".to_string(), - raw: default_raw_msg.clone(), - want_pass: true, - }, - Test { - name: "Missing proof".to_string(), - raw: RawMsgRecvPacket { - proof_commitment: Vec::new(), - ..default_raw_msg.clone() - }, - want_pass: false, - }, - Test { - name: "Missing proof height".to_string(), - raw: RawMsgRecvPacket { - proof_height: None, - ..default_raw_msg.clone() - }, - want_pass: false, - }, - Test { - name: "Empty signer".to_string(), - raw: RawMsgRecvPacket { - signer: get_dummy_bech32_account(), - ..default_raw_msg - }, - want_pass: true, - }, - ]; - - for test in tests { - let res_msg: Result = test.raw.clone().try_into(); - - assert_eq!( - res_msg.is_ok(), - test.want_pass, - "MsgRecvPacket::try_from failed for test {} \nraw message: {:?} with error: {:?}", - test.name, - test.raw, - res_msg.err() - ); - } - } - - #[test] - fn to_and_from() { - let raw = get_dummy_raw_msg_recv_packet(15); - let msg = MsgRecvPacket::try_from(raw.clone()).unwrap(); - let raw_back = RawMsgRecvPacket::from(msg.clone()); - let msg_back = MsgRecvPacket::try_from(raw_back.clone()).unwrap(); - assert_eq!(raw, raw_back); - assert_eq!(msg, msg_back); - } + use crate::prelude::*; + + use test_log::test; + + use ibc_proto::ibc::core::channel::v1::MsgRecvPacket as RawMsgRecvPacket; + + use crate::{ + core::ics04_channel::{ + error::Error, + msgs::recv_packet::{test_util::get_dummy_raw_msg_recv_packet, MsgRecvPacket}, + }, + test_utils::get_dummy_bech32_account, + }; + + #[test] + fn msg_recv_packet_try_from_raw() { + struct Test { + name: String, + raw: RawMsgRecvPacket, + want_pass: bool, + } + + let height = 20; + let default_raw_msg = get_dummy_raw_msg_recv_packet(height); + let tests: Vec = vec![ + Test { + name: "Good parameters".to_string(), + raw: default_raw_msg.clone(), + want_pass: true, + }, + Test { + name: "Missing proof".to_string(), + raw: RawMsgRecvPacket { proof_commitment: Vec::new(), ..default_raw_msg.clone() }, + want_pass: false, + }, + Test { + name: "Missing proof height".to_string(), + raw: RawMsgRecvPacket { proof_height: None, ..default_raw_msg.clone() }, + want_pass: false, + }, + Test { + name: "Empty signer".to_string(), + raw: RawMsgRecvPacket { signer: get_dummy_bech32_account(), ..default_raw_msg }, + want_pass: true, + }, + ]; + + for test in tests { + let res_msg: Result = test.raw.clone().try_into(); + + assert_eq!( + res_msg.is_ok(), + test.want_pass, + "MsgRecvPacket::try_from failed for test {} \nraw message: {:?} with error: {:?}", + test.name, + test.raw, + res_msg.err() + ); + } + } + + #[test] + fn to_and_from() { + let raw = get_dummy_raw_msg_recv_packet(15); + let msg = MsgRecvPacket::try_from(raw.clone()).unwrap(); + let raw_back = RawMsgRecvPacket::from(msg.clone()); + let msg_back = MsgRecvPacket::try_from(raw_back.clone()).unwrap(); + assert_eq!(raw, raw_back); + assert_eq!(msg, msg_back); + } } diff --git a/modules/src/core/ics04_channel/msgs/timeout.rs b/modules/src/core/ics04_channel/msgs/timeout.rs index a2887e1013..d36dca14da 100644 --- a/modules/src/core/ics04_channel/msgs/timeout.rs +++ b/modules/src/core/ics04_channel/msgs/timeout.rs @@ -4,211 +4,191 @@ use tendermint_proto::Protobuf; use ibc_proto::ibc::core::channel::v1::MsgTimeout as RawMsgTimeout; -use crate::core::ics04_channel::error::Error; -use crate::core::ics04_channel::packet::{Packet, Sequence}; -use crate::proofs::Proofs; -use crate::signer::Signer; -use crate::tx_msg::Msg; +use crate::{ + core::ics04_channel::{ + error::Error, + packet::{Packet, Sequence}, + }, + proofs::Proofs, + signer::Signer, + tx_msg::Msg, +}; pub const TYPE_URL: &str = "/ibc.core.channel.v1.MsgTimeout"; /// /// Message definition for packet timeout domain type. -/// #[derive(Clone, Debug, PartialEq)] pub struct MsgTimeout { - pub packet: Packet, - pub next_sequence_recv: Sequence, - pub proofs: Proofs, - pub signer: Signer, + pub packet: Packet, + pub next_sequence_recv: Sequence, + pub proofs: Proofs, + pub signer: Signer, } impl MsgTimeout { - pub fn new( - packet: Packet, - next_sequence_recv: Sequence, - proofs: Proofs, - signer: Signer, - ) -> MsgTimeout { - Self { - packet, - next_sequence_recv, - proofs, - signer, - } - } + pub fn new( + packet: Packet, + next_sequence_recv: Sequence, + proofs: Proofs, + signer: Signer, + ) -> MsgTimeout { + Self { packet, next_sequence_recv, proofs, signer } + } } impl Msg for MsgTimeout { - type ValidationError = Error; - type Raw = RawMsgTimeout; + type ValidationError = Error; + type Raw = RawMsgTimeout; - fn route(&self) -> String { - crate::keys::ROUTER_KEY.to_string() - } + fn route(&self) -> String { + crate::keys::ROUTER_KEY.to_string() + } - fn type_url(&self) -> String { - TYPE_URL.to_string() - } + fn type_url(&self) -> String { + TYPE_URL.to_string() + } } impl Protobuf for MsgTimeout {} impl TryFrom for MsgTimeout { - type Error = Error; - - fn try_from(raw_msg: RawMsgTimeout) -> Result { - let proofs = Proofs::new( - raw_msg - .proof_unreceived - .try_into() - .map_err(Error::invalid_proof)?, - None, - None, - None, - raw_msg - .proof_height - .ok_or_else(Error::missing_height)? - .into(), - ) - .map_err(Error::invalid_proof)?; - - // TODO: Domain type verification for the next sequence: this should probably be > 0. - - Ok(MsgTimeout { - packet: raw_msg - .packet - .ok_or_else(Error::missing_packet)? - .try_into()?, - next_sequence_recv: Sequence::from(raw_msg.next_sequence_recv), - signer: raw_msg.signer.parse().map_err(Error::signer)?, - proofs, - }) - } + type Error = Error; + + fn try_from(raw_msg: RawMsgTimeout) -> Result { + let proofs = Proofs::new( + raw_msg.proof_unreceived.try_into().map_err(Error::invalid_proof)?, + None, + None, + None, + raw_msg.proof_height.ok_or_else(Error::missing_height)?.into(), + ) + .map_err(Error::invalid_proof)?; + + // TODO: Domain type verification for the next sequence: this should probably be > 0. + + Ok(MsgTimeout { + packet: raw_msg.packet.ok_or_else(Error::missing_packet)?.try_into()?, + next_sequence_recv: Sequence::from(raw_msg.next_sequence_recv), + signer: raw_msg.signer.parse().map_err(Error::signer)?, + proofs, + }) + } } impl From for RawMsgTimeout { - fn from(domain_msg: MsgTimeout) -> Self { - RawMsgTimeout { - packet: Some(domain_msg.packet.into()), - proof_unreceived: domain_msg.proofs.object_proof().clone().into(), - proof_height: Some(domain_msg.proofs.height().into()), - next_sequence_recv: domain_msg.next_sequence_recv.into(), - signer: domain_msg.signer.to_string(), - } - } + fn from(domain_msg: MsgTimeout) -> Self { + RawMsgTimeout { + packet: Some(domain_msg.packet.into()), + proof_unreceived: domain_msg.proofs.object_proof().clone().into(), + proof_height: Some(domain_msg.proofs.height().into()), + next_sequence_recv: domain_msg.next_sequence_recv.into(), + signer: domain_msg.signer.to_string(), + } + } } #[cfg(test)] pub mod test_util { - use ibc_proto::ibc::core::channel::v1::MsgTimeout as RawMsgTimeout; - use ibc_proto::ibc::core::client::v1::Height as RawHeight; - - use crate::core::ics04_channel::packet::test_utils::get_dummy_raw_packet; - use crate::test_utils::{get_dummy_bech32_account, get_dummy_proof}; - - /// Returns a dummy `RawMsgTimeout`, for testing only! - /// The `height` parametrizes both the proof height as well as the timeout height. - pub fn get_dummy_raw_msg_timeout(height: u64, timeout_timestamp: u64) -> RawMsgTimeout { - RawMsgTimeout { - packet: Some(get_dummy_raw_packet(height, timeout_timestamp)), - proof_unreceived: get_dummy_proof(), - proof_height: Some(RawHeight { - revision_number: 0, - revision_height: height, - }), - next_sequence_recv: 1, - signer: get_dummy_bech32_account(), - } - } + use ibc_proto::ibc::core::{ + channel::v1::MsgTimeout as RawMsgTimeout, client::v1::Height as RawHeight, + }; + + use crate::{ + core::ics04_channel::packet::test_utils::get_dummy_raw_packet, + test_utils::{get_dummy_bech32_account, get_dummy_proof}, + }; + + /// Returns a dummy `RawMsgTimeout`, for testing only! + /// The `height` parametrizes both the proof height as well as the timeout height. + pub fn get_dummy_raw_msg_timeout(height: u64, timeout_timestamp: u64) -> RawMsgTimeout { + RawMsgTimeout { + packet: Some(get_dummy_raw_packet(height, timeout_timestamp)), + proof_unreceived: get_dummy_proof(), + proof_height: Some(RawHeight { revision_number: 0, revision_height: height }), + next_sequence_recv: 1, + signer: get_dummy_bech32_account(), + } + } } #[cfg(test)] mod test { - use crate::prelude::*; - - use test_log::test; - - use ibc_proto::ibc::core::channel::v1::MsgTimeout as RawMsgTimeout; - - use crate::core::ics04_channel::error::Error; - use crate::core::ics04_channel::msgs::timeout::test_util::get_dummy_raw_msg_timeout; - use crate::core::ics04_channel::msgs::timeout::MsgTimeout; - use crate::test_utils::get_dummy_bech32_account; - - #[test] - fn msg_timeout_try_from_raw() { - struct Test { - name: String, - raw: RawMsgTimeout, - want_pass: bool, - } - - let height = 50; - let timeout_timestamp = 0; - let default_raw_msg = get_dummy_raw_msg_timeout(height, timeout_timestamp); - - let tests: Vec = vec![ - Test { - name: "Good parameters".to_string(), - raw: default_raw_msg.clone(), - want_pass: true, - }, - Test { - name: "Missing packet".to_string(), - raw: RawMsgTimeout { - packet: None, - ..default_raw_msg.clone() - }, - want_pass: false, - }, - Test { - name: "Missing proof".to_string(), - raw: RawMsgTimeout { - proof_unreceived: Vec::new(), - ..default_raw_msg.clone() - }, - want_pass: false, - }, - Test { - name: "Missing proof height".to_string(), - raw: RawMsgTimeout { - proof_height: None, - ..default_raw_msg.clone() - }, - want_pass: false, - }, - Test { - name: "Empty signer".to_string(), - raw: RawMsgTimeout { - signer: get_dummy_bech32_account(), - ..default_raw_msg - }, - want_pass: true, - }, - ]; - - for test in tests { - let res_msg: Result = test.raw.clone().try_into(); - - assert_eq!( - res_msg.is_ok(), - test.want_pass, - "MsgTimeout::try_from failed for test {} \nraw message: {:?} with error: {:?}", - test.name, - test.raw, - res_msg.err() - ); - } - } - - #[test] - fn to_and_from() { - let raw = get_dummy_raw_msg_timeout(15, 0); - let msg = MsgTimeout::try_from(raw.clone()).unwrap(); - let raw_back = RawMsgTimeout::from(msg.clone()); - let msg_back = MsgTimeout::try_from(raw_back.clone()).unwrap(); - assert_eq!(raw, raw_back); - assert_eq!(msg, msg_back); - } + use crate::prelude::*; + + use test_log::test; + + use ibc_proto::ibc::core::channel::v1::MsgTimeout as RawMsgTimeout; + + use crate::{ + core::ics04_channel::{ + error::Error, + msgs::timeout::{test_util::get_dummy_raw_msg_timeout, MsgTimeout}, + }, + test_utils::get_dummy_bech32_account, + }; + + #[test] + fn msg_timeout_try_from_raw() { + struct Test { + name: String, + raw: RawMsgTimeout, + want_pass: bool, + } + + let height = 50; + let timeout_timestamp = 0; + let default_raw_msg = get_dummy_raw_msg_timeout(height, timeout_timestamp); + + let tests: Vec = vec![ + Test { + name: "Good parameters".to_string(), + raw: default_raw_msg.clone(), + want_pass: true, + }, + Test { + name: "Missing packet".to_string(), + raw: RawMsgTimeout { packet: None, ..default_raw_msg.clone() }, + want_pass: false, + }, + Test { + name: "Missing proof".to_string(), + raw: RawMsgTimeout { proof_unreceived: Vec::new(), ..default_raw_msg.clone() }, + want_pass: false, + }, + Test { + name: "Missing proof height".to_string(), + raw: RawMsgTimeout { proof_height: None, ..default_raw_msg.clone() }, + want_pass: false, + }, + Test { + name: "Empty signer".to_string(), + raw: RawMsgTimeout { signer: get_dummy_bech32_account(), ..default_raw_msg }, + want_pass: true, + }, + ]; + + for test in tests { + let res_msg: Result = test.raw.clone().try_into(); + + assert_eq!( + res_msg.is_ok(), + test.want_pass, + "MsgTimeout::try_from failed for test {} \nraw message: {:?} with error: {:?}", + test.name, + test.raw, + res_msg.err() + ); + } + } + + #[test] + fn to_and_from() { + let raw = get_dummy_raw_msg_timeout(15, 0); + let msg = MsgTimeout::try_from(raw.clone()).unwrap(); + let raw_back = RawMsgTimeout::from(msg.clone()); + let msg_back = MsgTimeout::try_from(raw_back.clone()).unwrap(); + assert_eq!(raw, raw_back); + assert_eq!(msg, msg_back); + } } diff --git a/modules/src/core/ics04_channel/msgs/timeout_on_close.rs b/modules/src/core/ics04_channel/msgs/timeout_on_close.rs index f13a0104db..84548335f5 100644 --- a/modules/src/core/ics04_channel/msgs/timeout_on_close.rs +++ b/modules/src/core/ics04_channel/msgs/timeout_on_close.rs @@ -3,135 +3,119 @@ use crate::prelude::*; use ibc_proto::ibc::core::channel::v1::MsgTimeoutOnClose as RawMsgTimeoutOnClose; use tendermint_proto::Protobuf; -use crate::core::ics04_channel::error::Error; -use crate::core::ics04_channel::packet::{Packet, Sequence}; -use crate::proofs::Proofs; -use crate::signer::Signer; -use crate::tx_msg::Msg; +use crate::{ + core::ics04_channel::{ + error::Error, + packet::{Packet, Sequence}, + }, + proofs::Proofs, + signer::Signer, + tx_msg::Msg, +}; pub const TYPE_URL: &str = "/ibc.core.channel.v1.MsgTimeoutOnClose"; /// /// Message definition for packet timeout domain type. -/// #[derive(Clone, Debug, PartialEq)] pub struct MsgTimeoutOnClose { - pub packet: Packet, - pub next_sequence_recv: Sequence, - pub proofs: Proofs, - pub signer: Signer, + pub packet: Packet, + pub next_sequence_recv: Sequence, + pub proofs: Proofs, + pub signer: Signer, } impl MsgTimeoutOnClose { - pub fn new( - packet: Packet, - next_sequence_recv: Sequence, - proofs: Proofs, - signer: Signer, - ) -> MsgTimeoutOnClose { - Self { - packet, - next_sequence_recv, - proofs, - signer, - } - } + pub fn new( + packet: Packet, + next_sequence_recv: Sequence, + proofs: Proofs, + signer: Signer, + ) -> MsgTimeoutOnClose { + Self { packet, next_sequence_recv, proofs, signer } + } } impl Msg for MsgTimeoutOnClose { - type ValidationError = Error; - type Raw = RawMsgTimeoutOnClose; + type ValidationError = Error; + type Raw = RawMsgTimeoutOnClose; - fn route(&self) -> String { - crate::keys::ROUTER_KEY.to_string() - } + fn route(&self) -> String { + crate::keys::ROUTER_KEY.to_string() + } - fn type_url(&self) -> String { - TYPE_URL.to_string() - } + fn type_url(&self) -> String { + TYPE_URL.to_string() + } } impl Protobuf for MsgTimeoutOnClose {} impl TryFrom for MsgTimeoutOnClose { - type Error = Error; - - fn try_from(raw_msg: RawMsgTimeoutOnClose) -> Result { - let proofs = Proofs::new( - raw_msg - .proof_unreceived - .try_into() - .map_err(Error::invalid_proof)?, - None, - None, - Some( - raw_msg - .proof_close - .try_into() - .map_err(Error::invalid_proof)?, - ), - raw_msg - .proof_height - .ok_or_else(Error::missing_height)? - .into(), - ) - .map_err(Error::invalid_proof)?; - - // TODO: Domain type verification for the next sequence: this should probably be > 0. - - Ok(MsgTimeoutOnClose { - packet: raw_msg - .packet - .ok_or_else(Error::missing_packet)? - .try_into()?, - next_sequence_recv: Sequence::from(raw_msg.next_sequence_recv), - signer: raw_msg.signer.parse().map_err(Error::signer)?, - proofs, - }) - } + type Error = Error; + + fn try_from(raw_msg: RawMsgTimeoutOnClose) -> Result { + let proofs = Proofs::new( + raw_msg.proof_unreceived.try_into().map_err(Error::invalid_proof)?, + None, + None, + Some(raw_msg.proof_close.try_into().map_err(Error::invalid_proof)?), + raw_msg.proof_height.ok_or_else(Error::missing_height)?.into(), + ) + .map_err(Error::invalid_proof)?; + + // TODO: Domain type verification for the next sequence: this should probably be > 0. + + Ok(MsgTimeoutOnClose { + packet: raw_msg.packet.ok_or_else(Error::missing_packet)?.try_into()?, + next_sequence_recv: Sequence::from(raw_msg.next_sequence_recv), + signer: raw_msg.signer.parse().map_err(Error::signer)?, + proofs, + }) + } } impl From for RawMsgTimeoutOnClose { - fn from(domain_msg: MsgTimeoutOnClose) -> Self { - RawMsgTimeoutOnClose { - packet: Some(domain_msg.packet.into()), - proof_unreceived: domain_msg.proofs.object_proof().clone().into(), - proof_close: domain_msg - .proofs - .other_proof() - .clone() - .map_or_else(Vec::new, |v| v.into()), - proof_height: Some(domain_msg.proofs.height().into()), - next_sequence_recv: domain_msg.next_sequence_recv.into(), - signer: domain_msg.signer.to_string(), - } - } + fn from(domain_msg: MsgTimeoutOnClose) -> Self { + RawMsgTimeoutOnClose { + packet: Some(domain_msg.packet.into()), + proof_unreceived: domain_msg.proofs.object_proof().clone().into(), + proof_close: domain_msg + .proofs + .other_proof() + .clone() + .map_or_else(Vec::new, |v| v.into()), + proof_height: Some(domain_msg.proofs.height().into()), + next_sequence_recv: domain_msg.next_sequence_recv.into(), + signer: domain_msg.signer.to_string(), + } + } } #[cfg(test)] pub mod test_util { - use ibc_proto::ibc::core::channel::v1::MsgTimeoutOnClose as RawMsgTimeoutOnClose; - use ibc_proto::ibc::core::client::v1::Height as RawHeight; - - use crate::core::ics04_channel::packet::test_utils::get_dummy_raw_packet; - use crate::test_utils::{get_dummy_bech32_account, get_dummy_proof}; - - /// Returns a dummy `RawMsgTimeoutOnClose`, for testing only! - /// The `height` parametrizes both the proof height as well as the timeout height. - pub fn get_dummy_raw_msg_timeout_on_close( - height: u64, - timeout_timestamp: u64, - ) -> RawMsgTimeoutOnClose { - RawMsgTimeoutOnClose { - packet: Some(get_dummy_raw_packet(height, timeout_timestamp)), - proof_unreceived: get_dummy_proof(), - proof_close: get_dummy_proof(), - proof_height: Some(RawHeight { - revision_number: 0, - revision_height: height, - }), - next_sequence_recv: 1, - signer: get_dummy_bech32_account(), - } - } + use ibc_proto::ibc::core::{ + channel::v1::MsgTimeoutOnClose as RawMsgTimeoutOnClose, client::v1::Height as RawHeight, + }; + + use crate::{ + core::ics04_channel::packet::test_utils::get_dummy_raw_packet, + test_utils::{get_dummy_bech32_account, get_dummy_proof}, + }; + + /// Returns a dummy `RawMsgTimeoutOnClose`, for testing only! + /// The `height` parametrizes both the proof height as well as the timeout height. + pub fn get_dummy_raw_msg_timeout_on_close( + height: u64, + timeout_timestamp: u64, + ) -> RawMsgTimeoutOnClose { + RawMsgTimeoutOnClose { + packet: Some(get_dummy_raw_packet(height, timeout_timestamp)), + proof_unreceived: get_dummy_proof(), + proof_close: get_dummy_proof(), + proof_height: Some(RawHeight { revision_number: 0, revision_height: height }), + next_sequence_recv: 1, + signer: get_dummy_bech32_account(), + } + } } diff --git a/modules/src/core/ics04_channel/packet.rs b/modules/src/core/ics04_channel/packet.rs index 6e9b985235..38a008f35a 100644 --- a/modules/src/core/ics04_channel/packet.rs +++ b/modules/src/core/ics04_channel/packet.rs @@ -6,361 +6,353 @@ use serde_derive::{Deserialize, Serialize}; use ibc_proto::ibc::core::channel::v1::Packet as RawPacket; -use crate::core::ics04_channel::error::Error; -use crate::core::ics24_host::identifier::{ChannelId, PortId}; -use crate::events::{extract_attribute, Error as EventError, RawObject}; -use crate::timestamp::{Expiry::Expired, Timestamp}; -use crate::Height; +use crate::{ + core::{ + ics04_channel::error::Error, + ics24_host::identifier::{ChannelId, PortId}, + }, + events::{extract_attribute, Error as EventError, RawObject}, + timestamp::{Expiry::Expired, Timestamp}, + Height, +}; use super::handler::{ - acknowledgement::AckPacketResult, recv_packet::RecvPacketResult, send_packet::SendPacketResult, - timeout::TimeoutPacketResult, write_acknowledgement::WriteAckPacketResult, + acknowledgement::AckPacketResult, recv_packet::RecvPacketResult, send_packet::SendPacketResult, + timeout::TimeoutPacketResult, write_acknowledgement::WriteAckPacketResult, }; /// Enumeration of proof carrying ICS4 message, helper for relayer. #[derive(Clone, Debug, PartialEq, Eq)] pub enum PacketMsgType { - Recv, - Ack, - TimeoutUnordered, - TimeoutOrdered, - TimeoutOnClose, + Recv, + Ack, + TimeoutUnordered, + TimeoutOrdered, + TimeoutOnClose, } #[derive(Clone, Debug)] pub enum PacketResult { - Send(SendPacketResult), - Recv(RecvPacketResult), - WriteAck(WriteAckPacketResult), - Ack(AckPacketResult), - Timeout(TimeoutPacketResult), + Send(SendPacketResult), + Recv(RecvPacketResult), + WriteAck(WriteAckPacketResult), + Ack(AckPacketResult), + Timeout(TimeoutPacketResult), } #[derive(Clone, Debug)] pub enum Receipt { - Ok, + Ok, } impl core::fmt::Display for PacketMsgType { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - match self { - PacketMsgType::Recv => write!(f, "(PacketMsgType::Recv)"), - PacketMsgType::Ack => write!(f, "(PacketMsgType::Ack)"), - PacketMsgType::TimeoutUnordered => write!(f, "(PacketMsgType::TimeoutUnordered)"), - PacketMsgType::TimeoutOrdered => write!(f, "(PacketMsgType::TimeoutOrdered)"), - PacketMsgType::TimeoutOnClose => write!(f, "(PacketMsgType::TimeoutOnClose)"), - } - } + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + match self { + PacketMsgType::Recv => write!(f, "(PacketMsgType::Recv)"), + PacketMsgType::Ack => write!(f, "(PacketMsgType::Ack)"), + PacketMsgType::TimeoutUnordered => write!(f, "(PacketMsgType::TimeoutUnordered)"), + PacketMsgType::TimeoutOrdered => write!(f, "(PacketMsgType::TimeoutOrdered)"), + PacketMsgType::TimeoutOnClose => write!(f, "(PacketMsgType::TimeoutOnClose)"), + } + } } /// The sequence number of a packet enforces ordering among packets from the same source. #[derive( - Copy, Clone, Debug, Default, PartialEq, Eq, Hash, PartialOrd, Ord, Deserialize, Serialize, + Copy, Clone, Debug, Default, PartialEq, Eq, Hash, PartialOrd, Ord, Deserialize, Serialize, )] pub struct Sequence(u64); impl FromStr for Sequence { - type Err = Error; - - fn from_str(s: &str) -> Result { - Ok(Self::from(s.parse::().map_err(|e| { - Error::invalid_string_as_sequence(s.to_string(), e) - })?)) - } + type Err = Error; + + fn from_str(s: &str) -> Result { + Ok(Self::from( + s.parse::() + .map_err(|e| Error::invalid_string_as_sequence(s.to_string(), e))?, + )) + } } impl Sequence { - pub fn is_zero(&self) -> bool { - self.0 == 0 - } + pub fn is_zero(&self) -> bool { + self.0 == 0 + } - pub fn increment(&self) -> Sequence { - Sequence(self.0 + 1) - } + pub fn increment(&self) -> Sequence { + Sequence(self.0 + 1) + } } impl From for Sequence { - fn from(seq: u64) -> Self { - Sequence(seq) - } + fn from(seq: u64) -> Self { + Sequence(seq) + } } impl From for u64 { - fn from(s: Sequence) -> u64 { - s.0 - } + fn from(s: Sequence) -> u64 { + s.0 + } } impl core::fmt::Display for Sequence { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { - write!(f, "{}", self.0) - } + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { + write!(f, "{}", self.0) + } } #[derive(Clone, Default, Hash, PartialEq, Eq, Deserialize, Serialize)] pub struct Packet { - pub sequence: Sequence, - pub source_port: PortId, - pub source_channel: ChannelId, - pub destination_port: PortId, - pub destination_channel: ChannelId, - #[serde( - serialize_with = "crate::serializers::ser_hex_upper", - deserialize_with = "crate::serializers::deser_hex_upper" - )] - pub data: Vec, - pub timeout_height: Height, - pub timeout_timestamp: Timestamp, + pub sequence: Sequence, + pub source_port: PortId, + pub source_channel: ChannelId, + pub destination_port: PortId, + pub destination_channel: ChannelId, + #[serde( + serialize_with = "crate::serializers::ser_hex_upper", + deserialize_with = "crate::serializers::deser_hex_upper" + )] + pub data: Vec, + pub timeout_height: Height, + pub timeout_timestamp: Timestamp, } struct PacketData<'a>(&'a [u8]); impl<'a> core::fmt::Debug for PacketData<'a> { - fn fmt(&self, formatter: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { - write!(formatter, "{:?}", self.0) - } + fn fmt(&self, formatter: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { + write!(formatter, "{:?}", self.0) + } } impl core::fmt::Debug for Packet { - fn fmt(&self, formatter: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { - // Remember: if you alter the definition of `Packet`, - // 1. update the formatter debug struct builder calls (return object of - // this function) - // 2. update this destructuring assignment accordingly - let Packet { - sequence: _, - source_port: _, - source_channel: _, - destination_port: _, - destination_channel: _, - data, - timeout_height: _, - timeout_timestamp: _, - } = self; - let data_wrapper = PacketData(data); - - formatter - .debug_struct("Packet") - .field("sequence", &self.sequence) - .field("source_port", &self.source_port) - .field("source_channel", &self.source_channel) - .field("destination_port", &self.destination_port) - .field("destination_channel", &self.destination_channel) - .field("data", &data_wrapper) - .field("timeout_height", &self.timeout_height) - .field("timeout_timestamp", &self.timeout_timestamp) - .finish() - } + fn fmt(&self, formatter: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { + // Remember: if you alter the definition of `Packet`, + // 1. update the formatter debug struct builder calls (return object of + // this function) + // 2. update this destructuring assignment accordingly + let Packet { + sequence: _, + source_port: _, + source_channel: _, + destination_port: _, + destination_channel: _, + data, + timeout_height: _, + timeout_timestamp: _, + } = self; + let data_wrapper = PacketData(data); + + formatter + .debug_struct("Packet") + .field("sequence", &self.sequence) + .field("source_port", &self.source_port) + .field("source_channel", &self.source_channel) + .field("destination_port", &self.destination_port) + .field("destination_channel", &self.destination_channel) + .field("data", &data_wrapper) + .field("timeout_height", &self.timeout_height) + .field("timeout_timestamp", &self.timeout_timestamp) + .finish() + } } pub enum TimeoutVariant { - Height, - Timestamp, - Both, + Height, + Timestamp, + Both, } impl Packet { - /// Checks whether a packet from a - /// [`SendPacket`](crate::core::ics04_channel::events::SendPacket) - /// event is timed-out relative to the current state of the - /// destination chain. - /// - /// Checks both for time-out relative to the destination chain's - /// current timestamp `dst_chain_ts` as well as relative to - /// the height `dst_chain_height`. - /// - /// Note: a timed-out packet should result in a - /// [`MsgTimeout`](crate::core::ics04_channel::msgs::timeout::MsgTimeout), - /// instead of the common-case where it results in - /// [`MsgRecvPacket`](crate::core::ics04_channel::msgs::recv_packet::MsgRecvPacket). - pub fn timed_out(&self, dst_chain_ts: &Timestamp, dst_chain_height: Height) -> bool { - (self.timeout_height != Height::zero() && self.timeout_height <= dst_chain_height) - || (self.timeout_timestamp != Timestamp::none() - && dst_chain_ts.check_expiry(&self.timeout_timestamp) == Expired) - } - - pub fn timeout_variant( - &self, - dst_chain_ts: &Timestamp, - dst_chain_height: Height, - ) -> Option { - let height_timeout = - self.timeout_height != Height::zero() && self.timeout_height < dst_chain_height; - let timestamp_timeout = self.timeout_timestamp != Timestamp::none() - && (dst_chain_ts.check_expiry(&self.timeout_timestamp) == Expired); - if height_timeout && !timestamp_timeout { - Some(TimeoutVariant::Height) - } else if timestamp_timeout && !height_timeout { - Some(TimeoutVariant::Timestamp) - } else if timestamp_timeout && height_timeout { - Some(TimeoutVariant::Both) - } else { - None - } - } + /// Checks whether a packet from a + /// [`SendPacket`](crate::core::ics04_channel::events::SendPacket) + /// event is timed-out relative to the current state of the + /// destination chain. + /// + /// Checks both for time-out relative to the destination chain's + /// current timestamp `dst_chain_ts` as well as relative to + /// the height `dst_chain_height`. + /// + /// Note: a timed-out packet should result in a + /// [`MsgTimeout`](crate::core::ics04_channel::msgs::timeout::MsgTimeout), + /// instead of the common-case where it results in + /// [`MsgRecvPacket`](crate::core::ics04_channel::msgs::recv_packet::MsgRecvPacket). + pub fn timed_out(&self, dst_chain_ts: &Timestamp, dst_chain_height: Height) -> bool { + (self.timeout_height != Height::zero() && self.timeout_height <= dst_chain_height) || + (self.timeout_timestamp != Timestamp::none() && + dst_chain_ts.check_expiry(&self.timeout_timestamp) == Expired) + } + + pub fn timeout_variant( + packet: &Packet, + dst_chain_ts: &Timestamp, + dst_chain_height: Height, + ) -> Option { + let height_timeout = + packet.timeout_height != Height::zero() && packet.timeout_height <= dst_chain_height; + let timestamp_timeout = packet.timeout_timestamp != Timestamp::none() && + (dst_chain_ts.check_expiry(&packet.timeout_timestamp) == Expired); + if height_timeout && !timestamp_timeout { + Some(TimeoutVariant::Height) + } else if timestamp_timeout && !height_timeout { + Some(TimeoutVariant::Timestamp) + } else if timestamp_timeout && height_timeout { + Some(TimeoutVariant::Both) + } else { + None + } + } } /// Custom debug output to omit the packet data impl core::fmt::Display for Packet { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { - write!( - f, - "seq:{}, path:{}/{}->{}/{}, toh:{}, tos:{})", - self.sequence, - self.source_channel, - self.source_port, - self.destination_channel, - self.destination_port, - self.timeout_height, - self.timeout_timestamp - ) - } + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { + write!( + f, + "seq:{}, path:{}/{}->{}/{}, toh:{}, tos:{})", + self.sequence, + self.source_channel, + self.source_port, + self.destination_channel, + self.destination_port, + self.timeout_height, + self.timeout_timestamp + ) + } } impl TryFrom for Packet { - type Error = Error; - - fn try_from(raw_pkt: RawPacket) -> Result { - if Sequence::from(raw_pkt.sequence).is_zero() { - return Err(Error::zero_packet_sequence()); - } - let packet_timeout_height: Height = raw_pkt - .timeout_height - .ok_or_else(Error::missing_height)? - .into(); - - if packet_timeout_height.is_zero() && raw_pkt.timeout_timestamp == 0 { - return Err(Error::zero_packet_timeout()); - } - if raw_pkt.data.is_empty() { - return Err(Error::zero_packet_data()); - } - - let timeout_timestamp = Timestamp::from_nanoseconds(raw_pkt.timeout_timestamp) - .map_err(Error::invalid_packet_timestamp)?; - - Ok(Packet { - sequence: Sequence::from(raw_pkt.sequence), - source_port: raw_pkt.source_port.parse().map_err(Error::identifier)?, - source_channel: raw_pkt.source_channel.parse().map_err(Error::identifier)?, - destination_port: raw_pkt - .destination_port - .parse() - .map_err(Error::identifier)?, - destination_channel: raw_pkt - .destination_channel - .parse() - .map_err(Error::identifier)?, - data: raw_pkt.data, - timeout_height: packet_timeout_height, - timeout_timestamp, - }) - } + type Error = Error; + + fn try_from(raw_pkt: RawPacket) -> Result { + if Sequence::from(raw_pkt.sequence).is_zero() { + return Err(Error::zero_packet_sequence()) + } + let packet_timeout_height: Height = + raw_pkt.timeout_height.ok_or_else(Error::missing_height)?.into(); + + if packet_timeout_height.is_zero() && raw_pkt.timeout_timestamp == 0 { + return Err(Error::zero_packet_timeout()) + } + if raw_pkt.data.is_empty() { + return Err(Error::zero_packet_data()) + } + + let timeout_timestamp = Timestamp::from_nanoseconds(raw_pkt.timeout_timestamp) + .map_err(Error::invalid_packet_timestamp)?; + + Ok(Packet { + sequence: Sequence::from(raw_pkt.sequence), + source_port: raw_pkt.source_port.parse().map_err(Error::identifier)?, + source_channel: raw_pkt.source_channel.parse().map_err(Error::identifier)?, + destination_port: raw_pkt.destination_port.parse().map_err(Error::identifier)?, + destination_channel: raw_pkt.destination_channel.parse().map_err(Error::identifier)?, + data: raw_pkt.data, + timeout_height: packet_timeout_height, + timeout_timestamp, + }) + } } impl TryFrom> for Packet { - type Error = EventError; - fn try_from(obj: RawObject<'_>) -> Result { - Ok(Packet { - sequence: extract_attribute(&obj, &format!("{}.packet_sequence", obj.action))? - .parse() - .map_err(EventError::channel)?, - source_port: extract_attribute(&obj, &format!("{}.packet_src_port", obj.action))? - .parse() - .map_err(EventError::parse)?, - source_channel: extract_attribute(&obj, &format!("{}.packet_src_channel", obj.action))? - .parse() - .map_err(EventError::parse)?, - destination_port: extract_attribute(&obj, &format!("{}.packet_dst_port", obj.action))? - .parse() - .map_err(EventError::parse)?, - destination_channel: extract_attribute( - &obj, - &format!("{}.packet_dst_channel", obj.action), - )? - .parse() - .map_err(EventError::parse)?, - data: vec![], - timeout_height: extract_attribute( - &obj, - &format!("{}.packet_timeout_height", obj.action), - )? - .parse() - .map_err(EventError::height)?, - timeout_timestamp: extract_attribute( - &obj, - &format!("{}.packet_timeout_timestamp", obj.action), - )? - .parse() - .map_err(EventError::timestamp)?, - }) - } + type Error = EventError; + fn try_from(obj: RawObject<'_>) -> Result { + Ok(Packet { + sequence: extract_attribute(&obj, &format!("{}.packet_sequence", obj.action))? + .parse() + .map_err(EventError::channel)?, + source_port: extract_attribute(&obj, &format!("{}.packet_src_port", obj.action))? + .parse() + .map_err(EventError::parse)?, + source_channel: extract_attribute(&obj, &format!("{}.packet_src_channel", obj.action))? + .parse() + .map_err(EventError::parse)?, + destination_port: extract_attribute(&obj, &format!("{}.packet_dst_port", obj.action))? + .parse() + .map_err(EventError::parse)?, + destination_channel: extract_attribute( + &obj, + &format!("{}.packet_dst_channel", obj.action), + )? + .parse() + .map_err(EventError::parse)?, + data: vec![], + timeout_height: extract_attribute( + &obj, + &format!("{}.packet_timeout_height", obj.action), + )? + .parse() + .map_err(EventError::height)?, + timeout_timestamp: extract_attribute( + &obj, + &format!("{}.packet_timeout_timestamp", obj.action), + )? + .parse() + .map_err(EventError::timestamp)?, + }) + } } impl From for RawPacket { - fn from(packet: Packet) -> Self { - RawPacket { - sequence: packet.sequence.0, - source_port: packet.source_port.to_string(), - source_channel: packet.source_channel.to_string(), - destination_port: packet.destination_port.to_string(), - destination_channel: packet.destination_channel.to_string(), - data: packet.data, - timeout_height: Some(packet.timeout_height.into()), - timeout_timestamp: packet.timeout_timestamp.nanoseconds(), - } - } + fn from(packet: Packet) -> Self { + RawPacket { + sequence: packet.sequence.0, + source_port: packet.source_port.to_string(), + source_channel: packet.source_channel.to_string(), + destination_port: packet.destination_port.to_string(), + destination_channel: packet.destination_channel.to_string(), + data: packet.data, + timeout_height: Some(packet.timeout_height.into()), + timeout_timestamp: packet.timeout_timestamp.nanoseconds(), + } + } } #[cfg(test)] pub mod test_utils { - use crate::prelude::*; - use ibc_proto::ibc::core::channel::v1::Packet as RawPacket; - use ibc_proto::ibc::core::client::v1::Height as RawHeight; - - use crate::core::ics24_host::identifier::{ChannelId, PortId}; - - /// Returns a dummy `RawPacket`, for testing only! - pub fn get_dummy_raw_packet(timeout_height: u64, timeout_timestamp: u64) -> RawPacket { - RawPacket { - sequence: 1, - source_port: PortId::default().to_string(), - source_channel: ChannelId::default().to_string(), - destination_port: PortId::default().to_string(), - destination_channel: ChannelId::default().to_string(), - data: vec![0], - timeout_height: Some(RawHeight { - revision_number: 0, - revision_height: timeout_height, - }), - timeout_timestamp, - } - } + use crate::prelude::*; + use ibc_proto::ibc::core::{channel::v1::Packet as RawPacket, client::v1::Height as RawHeight}; + + use crate::core::ics24_host::identifier::{ChannelId, PortId}; + + /// Returns a dummy `RawPacket`, for testing only! + pub fn get_dummy_raw_packet(timeout_height: u64, timeout_timestamp: u64) -> RawPacket { + RawPacket { + sequence: 1, + source_port: PortId::default().to_string(), + source_channel: ChannelId::default().to_string(), + destination_port: PortId::default().to_string(), + destination_channel: ChannelId::default().to_string(), + data: vec![0], + timeout_height: Some(RawHeight { revision_number: 0, revision_height: timeout_height }), + timeout_timestamp, + } + } } #[cfg(test)] mod tests { - use crate::prelude::*; + use crate::prelude::*; - use test_log::test; + use test_log::test; - use ibc_proto::ibc::core::channel::v1::Packet as RawPacket; + use ibc_proto::ibc::core::channel::v1::Packet as RawPacket; - use crate::core::ics04_channel::packet::test_utils::get_dummy_raw_packet; - use crate::core::ics04_channel::packet::Packet; + use crate::core::ics04_channel::packet::{test_utils::get_dummy_raw_packet, Packet}; - #[test] - fn packet_try_from_raw() { - struct Test { - name: String, - raw: RawPacket, - want_pass: bool, - } + #[test] + fn packet_try_from_raw() { + struct Test { + name: String, + raw: RawPacket, + want_pass: bool, + } - let proof_height = 10; - let default_raw_msg = get_dummy_raw_packet(proof_height, 0); + let proof_height = 10; + let default_raw_msg = get_dummy_raw_packet(proof_height, 0); - let tests: Vec = vec![ + let tests: Vec = vec![ Test { name: "Good parameters".to_string(), raw: default_raw_msg.clone(), @@ -472,38 +464,35 @@ mod tests { }, ]; - for test in tests { - let res_msg = Packet::try_from(test.raw.clone()); - - assert_eq!( - test.want_pass, - res_msg.is_ok(), - "Packet::try_from failed for test {}, \nraw packet {:?} with error {:?}", - test.name, - test.raw, - res_msg.err(), - ); - } - } - - #[test] - fn to_and_from() { - let raw = get_dummy_raw_packet(15, 0); - let msg = Packet::try_from(raw.clone()).unwrap(); - let raw_back = RawPacket::from(msg.clone()); - let msg_back = Packet::try_from(raw_back.clone()).unwrap(); - assert_eq!(raw, raw_back); - assert_eq!(msg, msg_back); - } - - #[test] - fn serialize_and_deserialize_packet() { - let packet = Packet { - data: vec![5; 32], - ..Default::default() - }; - let json = serde_json::to_string(&packet).unwrap(); - let deserialized_packet: Packet = serde_json::from_str(&json).unwrap(); - assert_eq!(packet, deserialized_packet); - } + for test in tests { + let res_msg = Packet::try_from(test.raw.clone()); + + assert_eq!( + test.want_pass, + res_msg.is_ok(), + "Packet::try_from failed for test {}, \nraw packet {:?} with error {:?}", + test.name, + test.raw, + res_msg.err(), + ); + } + } + + #[test] + fn to_and_from() { + let raw = get_dummy_raw_packet(15, 0); + let msg = Packet::try_from(raw.clone()).unwrap(); + let raw_back = RawPacket::from(msg.clone()); + let msg_back = Packet::try_from(raw_back.clone()).unwrap(); + assert_eq!(raw, raw_back); + assert_eq!(msg, msg_back); + } + + #[test] + fn serialize_and_deserialize_packet() { + let packet = Packet { data: vec![5; 32], ..Default::default() }; + let json = serde_json::to_string(&packet).unwrap(); + let deserialized_packet: Packet = serde_json::from_str(&json).unwrap(); + assert_eq!(packet, deserialized_packet); + } } diff --git a/modules/src/core/ics04_channel/version.rs b/modules/src/core/ics04_channel/version.rs index b0f7918dd0..e5e102b260 100644 --- a/modules/src/core/ics04_channel/version.rs +++ b/modules/src/core/ics04_channel/version.rs @@ -1,14 +1,10 @@ //! Data type definition and utilities for the //! version field of a channel end. -//! -use core::convert::Infallible; -use core::fmt; -use core::str::FromStr; +use core::{convert::Infallible, fmt, str::FromStr}; use serde_derive::{Deserialize, Serialize}; -use crate::applications::transfer; -use crate::prelude::*; +use crate::{applications::transfer, prelude::*}; /// The version field for a `ChannelEnd`. /// @@ -19,42 +15,42 @@ use crate::prelude::*; pub struct Version(String); impl Version { - pub fn new(v: String) -> Self { - Self(v) - } + pub fn new(v: String) -> Self { + Self(v) + } - pub fn ics20() -> Self { - Self::new(transfer::VERSION.to_string()) - } + pub fn ics20() -> Self { + Self::new(transfer::VERSION.to_string()) + } - pub fn empty() -> Self { - Self::new("".to_string()) - } + pub fn empty() -> Self { + Self::new("".to_string()) + } } impl From for Version { - fn from(s: String) -> Self { - Self::new(s) - } + fn from(s: String) -> Self { + Self::new(s) + } } impl FromStr for Version { - type Err = Infallible; + type Err = Infallible; - fn from_str(s: &str) -> Result { - Ok(Self::new(s.to_string())) - } + fn from_str(s: &str) -> Result { + Ok(Self::new(s.to_string())) + } } /// The default version is empty (unspecified). impl Default for Version { - fn default() -> Self { - Version::empty() - } + fn default() -> Self { + Version::empty() + } } impl fmt::Display for Version { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}", self.0) - } + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.0) + } } diff --git a/modules/src/core/ics05_port/context.rs b/modules/src/core/ics05_port/context.rs index 2c59d4a592..ce07d1911a 100644 --- a/modules/src/core/ics05_port/context.rs +++ b/modules/src/core/ics05_port/context.rs @@ -1,10 +1,13 @@ -use crate::core::ics05_port::error::Error; -use crate::core::ics24_host::identifier::PortId; -use crate::core::ics26_routing::context::ModuleId; -use crate::prelude::*; +use crate::{ + core::{ + ics05_port::error::Error, ics24_host::identifier::PortId, ics26_routing::context::ModuleId, + }, + prelude::*, +}; -/// A context supplying all the necessary read-only dependencies for processing any information regarding a port. +/// A context supplying all the necessary read-only dependencies for processing any information +/// regarding a port. pub trait PortReader { - /// Return the module_id associated with a given port_id - fn lookup_module_by_port(&self, port_id: &PortId) -> Result; + /// Return the module_id associated with a given port_id + fn lookup_module_by_port(&self, port_id: &PortId) -> Result; } diff --git a/modules/src/core/ics05_port/error.rs b/modules/src/core/ics05_port/error.rs index 6713f9b44d..9f2a299501 100644 --- a/modules/src/core/ics05_port/error.rs +++ b/modules/src/core/ics05_port/error.rs @@ -1,24 +1,23 @@ -use crate::core::ics24_host::identifier::PortId; -use crate::prelude::*; +use crate::{core::ics24_host::identifier::PortId, prelude::*}; use flex_error::define_error; define_error! { - #[derive(Debug, PartialEq, Eq, derive_more::From)] - Error { - UnknownPort - { port_id: PortId } - | e | { format_args!("port '{0}' is unknown", e.port_id) }, + #[derive(Debug, PartialEq, Eq, derive_more::From)] + Error { + UnknownPort + { port_id: PortId } + | e | { format_args!("port '{0}' is unknown", e.port_id) }, - PortAlreadyBound - { port_id: PortId } - | e | { format_args!("port '{0}' is already bound", e.port_id) }, + PortAlreadyBound + { port_id: PortId } + | e | { format_args!("port '{0}' is already bound", e.port_id) }, - ModuleNotFound - { port_id: PortId } - | e | { format_args!("could not retrieve module from port '{0}'", e.port_id) }, + ModuleNotFound + { port_id: PortId } + | e | { format_args!("could not retrieve module from port '{0}'", e.port_id) }, - ImplementationSpecific - { reason: String } - | e | { format_args!("implementation specific error: {}", e.reason) }, - } + ImplementationSpecific + { reason: String } + | e | { format_args!("implementation specific error: {}", e.reason) }, + } } diff --git a/modules/src/core/ics23_commitment/commitment.rs b/modules/src/core/ics23_commitment/commitment.rs index 01f898eeb3..2b8fd4c519 100644 --- a/modules/src/core/ics23_commitment/commitment.rs +++ b/modules/src/core/ics23_commitment/commitment.rs @@ -1,6 +1,4 @@ -use crate::core::ics23_commitment::error::Error; -use crate::prelude::*; -use crate::proofs::ProofError; +use crate::{core::ics23_commitment::error::Error, prelude::*, proofs::ProofError}; use core::{convert::TryFrom, fmt}; use ibc_proto::ibc::core::commitment::v1::MerkleProof as RawMerkleProof; @@ -12,37 +10,35 @@ use super::merkle::MerkleProof; #[derive(Clone, PartialEq, Eq, Serialize)] #[serde(transparent)] pub struct CommitmentRoot { - #[serde(serialize_with = "crate::serializers::ser_hex_upper")] - bytes: Vec, + #[serde(serialize_with = "crate::serializers::ser_hex_upper")] + bytes: Vec, } impl fmt::Debug for CommitmentRoot { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let hex = Hex::upper_case().encode_to_string(&self.bytes).unwrap(); - f.debug_tuple("CommitmentRoot").field(&hex).finish() - } + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let hex = Hex::upper_case().encode_to_string(&self.bytes).unwrap(); + f.debug_tuple("CommitmentRoot").field(&hex).finish() + } } impl CommitmentRoot { - pub fn from_bytes(bytes: &[u8]) -> Self { - Self { - bytes: Vec::from(bytes), - } - } + pub fn from_bytes(bytes: &[u8]) -> Self { + Self { bytes: Vec::from(bytes) } + } - pub fn as_bytes(&self) -> &[u8] { - &self.bytes - } + pub fn as_bytes(&self) -> &[u8] { + &self.bytes + } - pub fn into_vec(self) -> Vec { - self.bytes - } + pub fn into_vec(self) -> Vec { + self.bytes + } } impl From> for CommitmentRoot { - fn from(bytes: Vec) -> Self { - Self { bytes } - } + fn from(bytes: Vec) -> Self { + Self { bytes } + } } #[derive(Clone, Debug, PartialEq)] @@ -51,127 +47,128 @@ pub struct CommitmentPath; #[derive(Clone, PartialEq, Eq, Serialize)] #[serde(transparent)] pub struct CommitmentProofBytes { - #[serde(serialize_with = "crate::serializers::ser_hex_upper")] - bytes: Vec, + #[serde(serialize_with = "crate::serializers::ser_hex_upper")] + bytes: Vec, } impl CommitmentProofBytes { - /// Get proof bytes - pub fn as_bytes(&self) -> &[u8] { - &self.bytes - } + /// Get proof bytes + pub fn as_bytes(&self) -> &[u8] { + &self.bytes + } } impl fmt::Debug for CommitmentProofBytes { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let hex = Hex::upper_case().encode_to_string(&self.bytes).unwrap(); - f.debug_tuple("CommitmentProof").field(&hex).finish() - } + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let hex = Hex::upper_case().encode_to_string(&self.bytes).unwrap(); + f.debug_tuple("CommitmentProof").field(&hex).finish() + } } impl TryFrom> for CommitmentProofBytes { - type Error = ProofError; + type Error = ProofError; - fn try_from(bytes: Vec) -> Result { - if bytes.is_empty() { - Err(Self::Error::empty_proof()) - } else { - Ok(Self { bytes }) - } - } + fn try_from(bytes: Vec) -> Result { + if bytes.is_empty() { + Err(Self::Error::empty_proof()) + } else { + Ok(Self { bytes }) + } + } } impl From for Vec { - fn from(p: CommitmentProofBytes) -> Vec { - p.bytes - } + fn from(p: CommitmentProofBytes) -> Vec { + p.bytes + } } impl TryFrom for CommitmentProofBytes { - type Error = ProofError; + type Error = ProofError; - fn try_from(proof: RawMerkleProof) -> Result { - let mut buf = Vec::new(); - prost::Message::encode(&proof, &mut buf).unwrap(); - buf.try_into() - } + fn try_from(proof: RawMerkleProof) -> Result { + let mut buf = Vec::new(); + prost::Message::encode(&proof, &mut buf).unwrap(); + buf.try_into() + } } impl TryFrom> for CommitmentProofBytes { - type Error = ProofError; + type Error = ProofError; - fn try_from(value: MerkleProof) -> Result { - Self::try_from(RawMerkleProof::from(value)) - } + fn try_from(value: MerkleProof) -> Result { + Self::try_from(RawMerkleProof::from(value)) + } } impl TryFrom for RawMerkleProof { - type Error = Error; + type Error = Error; - fn try_from(value: CommitmentProofBytes) -> Result { - let value: Vec = value.into(); - let res: RawMerkleProof = - prost::Message::decode(value.as_ref()).map_err(Error::invalid_raw_merkle_proof)?; - Ok(res) - } + fn try_from(value: CommitmentProofBytes) -> Result { + let value: Vec = value.into(); + let res: RawMerkleProof = + prost::Message::decode(value.as_ref()).map_err(Error::invalid_raw_merkle_proof)?; + Ok(res) + } } #[derive(Clone, PartialEq, Eq, Hash, Deserialize, Default)] pub struct CommitmentPrefix { - bytes: Vec, + bytes: Vec, } impl CommitmentPrefix { - pub fn as_bytes(&self) -> &[u8] { - &self.bytes - } + pub fn as_bytes(&self) -> &[u8] { + &self.bytes + } - pub fn into_vec(self) -> Vec { - self.bytes - } + pub fn into_vec(self) -> Vec { + self.bytes + } } impl TryFrom> for CommitmentPrefix { - type Error = Error; + type Error = Error; - fn try_from(bytes: Vec) -> Result { - if bytes.is_empty() { - Err(Self::Error::empty_commitment_prefix()) - } else { - Ok(Self { bytes }) - } - } + fn try_from(bytes: Vec) -> Result { + if bytes.is_empty() { + Err(Self::Error::empty_commitment_prefix()) + } else { + Ok(Self { bytes }) + } + } } impl fmt::Debug for CommitmentPrefix { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let converted = core::str::from_utf8(self.as_bytes()); - match converted { - Ok(s) => write!(f, "{}", s), - Err(_e) => write!(f, "", self.as_bytes()), - } - } + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let converted = core::str::from_utf8(self.as_bytes()); + match converted { + Ok(s) => write!(f, "{}", s), + Err(_e) => write!(f, "", self.as_bytes()), + } + } } impl Serialize for CommitmentPrefix { - fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer, - { - format!("{:?}", self).serialize(serializer) - } + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + format!("{:?}", self).serialize(serializer) + } } #[cfg(test)] pub mod test_util { - use crate::prelude::*; - use ibc_proto::ibc::core::commitment::v1::MerkleProof as RawMerkleProof; - use ibc_proto::ics23::CommitmentProof; - - /// Returns a dummy `RawMerkleProof`, for testing only! - pub fn get_dummy_merkle_proof() -> RawMerkleProof { - let parsed = CommitmentProof { proof: None }; - let mproofs: Vec = vec![parsed]; - RawMerkleProof { proofs: mproofs } - } + use crate::prelude::*; + use ibc_proto::{ + ibc::core::commitment::v1::MerkleProof as RawMerkleProof, ics23::CommitmentProof, + }; + + /// Returns a dummy `RawMerkleProof`, for testing only! + pub fn get_dummy_merkle_proof() -> RawMerkleProof { + let parsed = CommitmentProof { proof: None }; + let mproofs: Vec = vec![parsed]; + RawMerkleProof { proofs: mproofs } + } } diff --git a/modules/src/core/ics23_commitment/error.rs b/modules/src/core/ics23_commitment/error.rs index 260c9557b4..3bc64d17b7 100644 --- a/modules/src/core/ics23_commitment/error.rs +++ b/modules/src/core/ics23_commitment/error.rs @@ -2,38 +2,38 @@ use flex_error::{define_error, TraceError}; use prost::DecodeError; define_error! { - #[derive(Debug, PartialEq, Eq)] - Error { - InvalidRawMerkleProof - [ TraceError ] - |_| { "invalid raw merkle proof" }, + #[derive(Debug, PartialEq, Eq)] + Error { + InvalidRawMerkleProof + [ TraceError ] + |_| { "invalid raw merkle proof" }, - CommitmentProofDecodingFailed - [ TraceError ] - |_| { "failed to decode commitment proof" }, + CommitmentProofDecodingFailed + [ TraceError ] + |_| { "failed to decode commitment proof" }, - EmptyCommitmentPrefix - |_| { "empty commitment prefix" }, + EmptyCommitmentPrefix + |_| { "empty commitment prefix" }, - EmptyMerkleProof - |_| { "empty merkle proof" }, + EmptyMerkleProof + |_| { "empty merkle proof" }, - EmptyMerkleRoot - |_| { "empty merkle root" }, + EmptyMerkleRoot + |_| { "empty merkle root" }, - EmptyVerifiedValue - |_| { "empty verified value" }, + EmptyVerifiedValue + |_| { "empty verified value" }, - NumberOfSpecsMismatch - |_| { "mismatch between the number of proofs with that of specs" }, + NumberOfSpecsMismatch + |_| { "mismatch between the number of proofs with that of specs" }, - NumberOfKeysMismatch - |_| { "mismatch between the number of proofs with that of keys" }, + NumberOfKeysMismatch + |_| { "mismatch between the number of proofs with that of keys" }, - InvalidMerkleProof - |_| { "invalid merkle proof" }, + InvalidMerkleProof + |_| { "invalid merkle proof" }, - VerificationFailure - |_| { "proof verification failed" } - } + VerificationFailure + |_| { "proof verification failed" } + } } diff --git a/modules/src/core/ics23_commitment/merkle.rs b/modules/src/core/ics23_commitment/merkle.rs index e5083cfa73..7a74281823 100644 --- a/modules/src/core/ics23_commitment/merkle.rs +++ b/modules/src/core/ics23_commitment/merkle.rs @@ -1,207 +1,180 @@ -use crate::prelude::*; +use crate::{ + core::ics23_commitment::{ + commitment::{CommitmentPrefix, CommitmentRoot}, + error::Error, + specs::ProofSpecs, + }, + prelude::*, +}; use core::marker::PhantomData; -use tendermint::merkle::proof::Proof as TendermintProof; - -use crate::clients::host_functions::{HostFunctionsManager, HostFunctionsProvider}; -use ibc_proto::ibc::core::commitment::v1::MerklePath; -use ibc_proto::ibc::core::commitment::v1::MerkleProof as RawMerkleProof; -use ibc_proto::ibc::core::commitment::v1::MerkleRoot; -use ics23::commitment_proof::Proof; +use ibc_proto::ibc::core::commitment::v1::{MerklePath, MerkleProof as RawMerkleProof, MerkleRoot}; use ics23::{ - calculate_existence_root, verify_membership, verify_non_membership, CommitmentProof, - NonExistenceProof, + calculate_existence_root, commitment_proof::Proof, verify_membership, verify_non_membership, + CommitmentProof, HostFunctionsProvider, NonExistenceProof, }; -use crate::core::ics23_commitment::commitment::{CommitmentPrefix, CommitmentRoot}; -use crate::core::ics23_commitment::error::Error; -use crate::core::ics23_commitment::specs::ProofSpecs; - pub fn apply_prefix(prefix: &CommitmentPrefix, mut path: Vec) -> MerklePath { - let mut key_path: Vec = vec![format!("{:?}", prefix)]; - key_path.append(&mut path); - MerklePath { key_path } + let mut key_path: Vec = vec![format!("{:?}", prefix)]; + key_path.append(&mut path); + MerklePath { key_path } } impl From for MerkleRoot { - fn from(root: CommitmentRoot) -> Self { - Self { - hash: root.into_vec(), - } - } + fn from(root: CommitmentRoot) -> Self { + Self { hash: root.into_vec() } + } } #[derive(Clone, Debug, PartialEq)] pub struct MerkleProof { - pub proofs: Vec, - _phantom: PhantomData, + pub proofs: Vec, + _phantom: PhantomData, } /// Convert to ics23::CommitmentProof -/// The encoding and decoding shouldn't fail since ics23::CommitmentProof and ibc_proto::ics23::CommitmentProof should be the same -/// Ref. +/// The encoding and decoding shouldn't fail since ics23::CommitmentProof and +/// ibc_proto::ics23::CommitmentProof should be the same Ref. impl From for MerkleProof { - fn from(proof: RawMerkleProof) -> Self { - let proofs: Vec = proof - .proofs - .into_iter() - .map(|p| { - let mut encoded = Vec::new(); - prost::Message::encode(&p, &mut encoded).unwrap(); - prost::Message::decode(&*encoded).unwrap() - }) - .collect(); - Self { - proofs, - _phantom: PhantomData, - } - } + fn from(proof: RawMerkleProof) -> Self { + let proofs: Vec = proof + .proofs + .into_iter() + .map(|p| { + let mut encoded = Vec::new(); + prost::Message::encode(&p, &mut encoded).unwrap(); + prost::Message::decode(&*encoded).unwrap() + }) + .collect(); + Self { proofs, _phantom: PhantomData } + } } impl From> for RawMerkleProof { - fn from(proof: MerkleProof) -> Self { - Self { - proofs: proof - .proofs - .into_iter() - .map(|p| { - let mut encoded = Vec::new(); - prost::Message::encode(&p, &mut encoded).unwrap(); - prost::Message::decode(&*encoded).unwrap() - }) - .collect(), - } - } + fn from(proof: MerkleProof) -> Self { + Self { + proofs: proof + .proofs + .into_iter() + .map(|p| { + let mut encoded = Vec::new(); + prost::Message::encode(&p, &mut encoded).unwrap(); + prost::Message::decode(&*encoded).unwrap() + }) + .collect(), + } + } } -impl MerkleProof -where - H: HostFunctionsProvider, -{ - pub fn verify_membership( - &self, - specs: &ProofSpecs, - root: MerkleRoot, - keys: MerklePath, - value: Vec, - start_index: usize, - ) -> Result<(), Error> { - // validate arguments - if self.proofs.is_empty() { - return Err(Error::empty_merkle_proof()); - } - if root.hash.is_empty() { - return Err(Error::empty_merkle_root()); - } - let num = self.proofs.len(); - let ics23_specs = Vec::::from(specs.clone()); - if ics23_specs.len() != num { - return Err(Error::number_of_specs_mismatch()); - } - if keys.key_path.len() != num { - return Err(Error::number_of_keys_mismatch()); - } - if value.is_empty() { - return Err(Error::empty_verified_value()); - } - - let mut subroot = value.clone(); - let mut value = value; - // keys are represented from root-to-leaf - for ((proof, spec), key) in self - .proofs - .iter() - .zip(ics23_specs.iter()) - .zip(keys.key_path.iter().rev()) - .skip(start_index) - { - match &proof.proof { - Some(Proof::Exist(existence_proof)) => { - subroot = calculate_existence_root::>(existence_proof) - .map_err(|_| Error::invalid_merkle_proof())?; - if !verify_membership::>( - proof, - spec, - &subroot, - key.as_bytes(), - &value, - ) { - return Err(Error::verification_failure()); - } - value = subroot.clone(); - } - _ => return Err(Error::invalid_merkle_proof()), - } - } - - if root.hash != subroot { - return Err(Error::verification_failure()); - } - - Ok(()) - } - - pub fn verify_non_membership( - &self, - specs: &ProofSpecs, - root: MerkleRoot, - keys: MerklePath, - ) -> Result<(), Error> { - // validate arguments - if self.proofs.is_empty() { - return Err(Error::empty_merkle_proof()); - } - if root.hash.is_empty() { - return Err(Error::empty_merkle_root()); - } - let num = self.proofs.len(); - let ics23_specs = Vec::::from(specs.clone()); - if ics23_specs.len() != num { - return Err(Error::number_of_specs_mismatch()); - } - if keys.key_path.len() != num { - return Err(Error::number_of_keys_mismatch()); - } - - // verify the absence of key in lowest subtree - let proof = self.proofs.get(0).ok_or_else(Error::invalid_merkle_proof)?; - let spec = ics23_specs.get(0).ok_or_else(Error::invalid_merkle_proof)?; - // keys are represented from root-to-leaf - let key = keys - .key_path - .get(num - 1) - .ok_or_else(Error::invalid_merkle_proof)?; - match &proof.proof { - Some(Proof::Nonexist(non_existence_proof)) => { - let subroot = calculate_non_existence_root::(non_existence_proof)?; - if !verify_non_membership::>( - proof, - spec, - &subroot, - key.as_bytes(), - ) { - return Err(Error::verification_failure()); - } - // verify membership proofs starting from index 1 with value = subroot - self.verify_membership(specs, root, keys, subroot, 1) - } - _ => Err(Error::invalid_merkle_proof()), - } - } +impl MerkleProof { + pub fn verify_membership( + &self, + specs: &ProofSpecs, + root: MerkleRoot, + keys: MerklePath, + value: Vec, + start_index: usize, + ) -> Result<(), Error> { + // validate arguments + if self.proofs.is_empty() { + return Err(Error::empty_merkle_proof()) + } + if root.hash.is_empty() { + return Err(Error::empty_merkle_root()) + } + let num = self.proofs.len(); + let ics23_specs = Vec::::from(specs.clone()); + if ics23_specs.len() != num { + return Err(Error::number_of_specs_mismatch()) + } + if keys.key_path.len() != num { + return Err(Error::number_of_keys_mismatch()) + } + if value.is_empty() { + return Err(Error::empty_verified_value()) + } + + let mut subroot = value.clone(); + let mut value = value; + // keys are represented from root-to-leaf + for ((proof, spec), key) in self + .proofs + .iter() + .zip(ics23_specs.iter()) + .zip(keys.key_path.iter().rev()) + .skip(start_index) + { + match &proof.proof { + Some(Proof::Exist(existence_proof)) => { + subroot = calculate_existence_root::(existence_proof) + .map_err(|_| Error::invalid_merkle_proof())?; + if !verify_membership::(proof, spec, &subroot, key.as_bytes(), &value) { + return Err(Error::verification_failure()) + } + value = subroot.clone(); + }, + _ => return Err(Error::invalid_merkle_proof()), + } + } + + if root.hash != subroot { + return Err(Error::verification_failure()) + } + + Ok(()) + } + + pub fn verify_non_membership( + &self, + specs: &ProofSpecs, + root: MerkleRoot, + keys: MerklePath, + ) -> Result<(), Error> { + // validate arguments + if self.proofs.is_empty() { + return Err(Error::empty_merkle_proof()) + } + if root.hash.is_empty() { + return Err(Error::empty_merkle_root()) + } + let num = self.proofs.len(); + let ics23_specs = Vec::::from(specs.clone()); + if ics23_specs.len() != num { + return Err(Error::number_of_specs_mismatch()) + } + if keys.key_path.len() != num { + return Err(Error::number_of_keys_mismatch()) + } + + // verify the absence of key in lowest subtree + let proof = self.proofs.get(0).ok_or_else(Error::invalid_merkle_proof)?; + let spec = ics23_specs.get(0).ok_or_else(Error::invalid_merkle_proof)?; + // keys are represented from root-to-leaf + let key = keys.key_path.get(num - 1).ok_or_else(Error::invalid_merkle_proof)?; + match &proof.proof { + Some(Proof::Nonexist(non_existence_proof)) => { + let subroot = calculate_non_existence_root::(non_existence_proof)?; + if !verify_non_membership::(proof, spec, &subroot, key.as_bytes()) { + return Err(Error::verification_failure()) + } + // verify membership proofs starting from index 1 with value = subroot + self.verify_membership(specs, root, keys, subroot, 1) + }, + _ => Err(Error::invalid_merkle_proof()), + } + } } // TODO move to ics23 fn calculate_non_existence_root( - proof: &NonExistenceProof, + proof: &NonExistenceProof, ) -> Result, Error> { - if let Some(left) = &proof.left { - calculate_existence_root::>(left) - .map_err(|_| Error::invalid_merkle_proof()) - } else if let Some(right) = &proof.right { - calculate_existence_root::>(right) - .map_err(|_| Error::invalid_merkle_proof()) - } else { - Err(Error::invalid_merkle_proof()) - } + if let Some(left) = &proof.left { + calculate_existence_root::(left).map_err(|_| Error::invalid_merkle_proof()) + } else if let Some(right) = &proof.right { + calculate_existence_root::(right).map_err(|_| Error::invalid_merkle_proof()) + } else { + Err(Error::invalid_merkle_proof()) + } } // Merkle Proof serialization notes: @@ -209,17 +182,14 @@ fn calculate_non_existence_root( // - TmProof: in tendermint-rs/src/merkle/proof.rs:Proof // - RawProofOps: in tendermint-proto/tendermint.cyrpto.rs:ProofOps // - RawMerkleProof: in ibc-proto/ibc.core.commitment.v1.rs:MerkleProof -// - structure that includes a RawProofOps in its only `proof` field. -// #[derive(Clone, PartialEq, ::prost::Message)] -// pub struct MerkleProof { -// #[prost(message, optional, tag="1")] -// pub proof: ::core::option::Option<::tendermint_proto::crypto::ProofOps>, -// } -// - Vec: RawMerkleProof is not explicitly used but, serialized as Vec, it is -// included in all handshake messages that require proofs (i.e. all except the two `OpenInit`), -// and also in all queries that require proofs -// - MerkleProof: Domain type for RawMerkleProof, currently not used and identical to RawMerkleProof. -// This will change with verification implementation. +// - structure that includes a RawProofOps in its only `proof` field. #[derive(Clone, PartialEq, +// ::prost::Message)] pub struct MerkleProof { #[prost(message, optional, tag="1")] pub proof: +// ::core::option::Option<::tendermint_proto::crypto::ProofOps>, } +// - Vec: RawMerkleProof is not explicitly used but, serialized as Vec, it is included in +// all handshake messages that require proofs (i.e. all except the two `OpenInit`), and also in +// all queries that require proofs +// - MerkleProof: Domain type for RawMerkleProof, currently not used and identical to +// RawMerkleProof. This will change with verification implementation. // - CommitmentProof: Defined in ibc-rs as Vec and currently used in all its messages // // Here are a couple of flows that illustrate the different conversions: @@ -233,15 +203,11 @@ fn calculate_non_existence_root( // TmProof -> RawProofOps => RawMerkleProof -> CommitmentProof -> Vec // // Implementations of (de)serializers and conversions: -// - commitment.rs: -// Vec <-> CommitmentProof -// CommitmentProof <-> RawMerkleProof -// - merkle.rs: -// RawMerkleProof <-> MerkleProof -// - tendermint-rs/src/merkle/proof.rs: -// TmProof <-> RawProofOps -// - cosmos.rs:abci_query() converts from query proof to Merkle proof: -// RawProofOps => RawMerkleProof +// - commitment.rs: Vec <-> CommitmentProof CommitmentProof <-> RawMerkleProof +// - merkle.rs: RawMerkleProof <-> MerkleProof +// - tendermint-rs/src/merkle/proof.rs: TmProof <-> RawProofOps +// - cosmos.rs:abci_query() converts from query proof to Merkle proof: RawProofOps => +// RawMerkleProof // // impl TryFrom for MerkleProof { // type Error = Error; @@ -255,19 +221,3 @@ fn calculate_non_existence_root( // RawMerkleProof { proof: value.proof } // } // } - -pub fn convert_tm_to_ics_merkle_proof( - tm_proof: &TendermintProof, -) -> Result, Error> { - let mut proofs = Vec::new(); - - for op in &tm_proof.ops { - let mut parsed = ibc_proto::ics23::CommitmentProof { proof: None }; - prost::Message::merge(&mut parsed, op.data.as_slice()) - .map_err(Error::commitment_proof_decoding_failed)?; - - proofs.push(parsed); - } - - Ok(MerkleProof::from(RawMerkleProof { proofs })) -} diff --git a/modules/src/core/ics23_commitment/specs.rs b/modules/src/core/ics23_commitment/specs.rs index 53e45a5912..72977501a4 100644 --- a/modules/src/core/ics23_commitment/specs.rs +++ b/modules/src/core/ics23_commitment/specs.rs @@ -14,135 +14,130 @@ use serde::{Deserialize, Serialize}; pub struct ProofSpecs(Vec); impl ProofSpecs { - /// Returns the specification for Cosmos-SDK proofs - pub fn cosmos() -> Self { - vec![ - ics23::iavl_spec(), // Format of proofs-iavl (iavl merkle proofs) - ics23::tendermint_spec(), // Format of proofs-tendermint (crypto/ merkle SimpleProof) - ] - .into() - } - - pub fn is_empty(&self) -> bool { - self.0.is_empty() - } + /// Returns the specification for Cosmos-SDK proofs + pub fn cosmos() -> Self { + vec![ + ics23::iavl_spec(), // Format of proofs-iavl (iavl merkle proofs) + ics23::tendermint_spec(), // Format of proofs-tendermint (crypto/ merkle SimpleProof) + ] + .into() + } + + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } } impl Default for ProofSpecs { - fn default() -> Self { - Self::cosmos() - } + fn default() -> Self { + Self::cosmos() + } } impl From> for ProofSpecs { - fn from(ibc_specs: Vec) -> Self { - Self(ibc_specs.into_iter().map(ProofSpec).collect()) - } + fn from(ibc_specs: Vec) -> Self { + Self(ibc_specs.into_iter().map(ProofSpec).collect()) + } } impl From> for ProofSpecs { - fn from(ics23_specs: Vec) -> Self { - Self( - ics23_specs - .into_iter() - .map(|ics23_spec| ics23_spec.into()) - .collect(), - ) - } + fn from(ics23_specs: Vec) -> Self { + Self(ics23_specs.into_iter().map(|ics23_spec| ics23_spec.into()).collect()) + } } impl From for Vec { - fn from(specs: ProofSpecs) -> Self { - specs.0.into_iter().map(|spec| spec.into()).collect() - } + fn from(specs: ProofSpecs) -> Self { + specs.0.into_iter().map(|spec| spec.into()).collect() + } } impl From for Vec { - fn from(specs: ProofSpecs) -> Self { - specs.0.into_iter().map(|spec| spec.0).collect() - } + fn from(specs: ProofSpecs) -> Self { + specs.0.into_iter().map(|spec| spec.0).collect() + } } #[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] struct ProofSpec(IbcProofSpec); impl From for ProofSpec { - fn from(spec: Ics23ProofSpec) -> Self { - Self(IbcProofSpec { - leaf_spec: spec.leaf_spec.map(|lop| LeafOp::from(lop).0), - inner_spec: spec.inner_spec.map(|ispec| InnerSpec::from(ispec).0), - max_depth: spec.max_depth, - min_depth: spec.min_depth, - }) - } + fn from(spec: Ics23ProofSpec) -> Self { + Self(IbcProofSpec { + leaf_spec: spec.leaf_spec.map(|lop| LeafOp::from(lop).0), + inner_spec: spec.inner_spec.map(|ispec| InnerSpec::from(ispec).0), + max_depth: spec.max_depth, + min_depth: spec.min_depth, + }) + } } impl From for Ics23ProofSpec { - fn from(spec: ProofSpec) -> Self { - let spec = spec.0; - Ics23ProofSpec { - leaf_spec: spec.leaf_spec.map(|lop| LeafOp(lop).into()), - inner_spec: spec.inner_spec.map(|ispec| InnerSpec(ispec).into()), - max_depth: spec.max_depth, - min_depth: spec.min_depth, - } - } + fn from(spec: ProofSpec) -> Self { + let spec = spec.0; + Ics23ProofSpec { + leaf_spec: spec.leaf_spec.map(|lop| LeafOp(lop).into()), + inner_spec: spec.inner_spec.map(|ispec| InnerSpec(ispec).into()), + max_depth: spec.max_depth, + min_depth: spec.min_depth, + } + } } #[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] struct LeafOp(IbcLeafOp); impl From for LeafOp { - fn from(leaf_op: Ics23LeafOp) -> Self { - Self(IbcLeafOp { - hash: leaf_op.hash, - prehash_key: leaf_op.prehash_key, - prehash_value: leaf_op.prehash_value, - length: leaf_op.length, - prefix: leaf_op.prefix, - }) - } + fn from(leaf_op: Ics23LeafOp) -> Self { + Self(IbcLeafOp { + hash: leaf_op.hash, + prehash_key: leaf_op.prehash_key, + prehash_value: leaf_op.prehash_value, + length: leaf_op.length, + prefix: leaf_op.prefix, + }) + } } impl From for Ics23LeafOp { - fn from(leaf_op: LeafOp) -> Self { - let leaf_op = leaf_op.0; - Ics23LeafOp { - hash: leaf_op.hash, - prehash_key: leaf_op.prehash_key, - prehash_value: leaf_op.prehash_value, - length: leaf_op.length, - prefix: leaf_op.prefix, - } - } + fn from(leaf_op: LeafOp) -> Self { + let leaf_op = leaf_op.0; + Ics23LeafOp { + hash: leaf_op.hash, + prehash_key: leaf_op.prehash_key, + prehash_value: leaf_op.prehash_value, + length: leaf_op.length, + prefix: leaf_op.prefix, + } + } } #[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] struct InnerSpec(IbcInnerSpec); impl From for InnerSpec { - fn from(inner_spec: Ics23InnerSpec) -> Self { - Self(IbcInnerSpec { - child_order: inner_spec.child_order, - child_size: inner_spec.child_size, - min_prefix_length: inner_spec.min_prefix_length, - max_prefix_length: inner_spec.max_prefix_length, - empty_child: inner_spec.empty_child, - hash: inner_spec.hash, - }) - } + fn from(inner_spec: Ics23InnerSpec) -> Self { + Self(IbcInnerSpec { + child_order: inner_spec.child_order, + child_size: inner_spec.child_size, + min_prefix_length: inner_spec.min_prefix_length, + max_prefix_length: inner_spec.max_prefix_length, + empty_child: inner_spec.empty_child, + hash: inner_spec.hash, + }) + } } impl From for Ics23InnerSpec { - fn from(inner_spec: InnerSpec) -> Self { - let inner_spec = inner_spec.0; - Ics23InnerSpec { - child_order: inner_spec.child_order, - child_size: inner_spec.child_size, - min_prefix_length: inner_spec.min_prefix_length, - max_prefix_length: inner_spec.max_prefix_length, - empty_child: inner_spec.empty_child, - hash: inner_spec.hash, - } - } + fn from(inner_spec: InnerSpec) -> Self { + let inner_spec = inner_spec.0; + Ics23InnerSpec { + child_order: inner_spec.child_order, + child_size: inner_spec.child_size, + min_prefix_length: inner_spec.min_prefix_length, + max_prefix_length: inner_spec.max_prefix_length, + empty_child: inner_spec.empty_child, + hash: inner_spec.hash, + } + } } diff --git a/modules/src/core/ics24_host/error.rs b/modules/src/core/ics24_host/error.rs index de71ad7974..5ef4b12aab 100644 --- a/modules/src/core/ics24_host/error.rs +++ b/modules/src/core/ics24_host/error.rs @@ -6,40 +6,40 @@ use serde::Serialize; use crate::prelude::*; define_error! { - #[derive(Debug, PartialEq, Eq, Serialize)] - ValidationError { - ContainSeparator - { id : String } - | e | { format_args!("identifier {0} cannot contain separator '/'", e.id) }, - - InvalidLength - { - id: String, - length: usize, - min: usize, - max: usize, - } - | e | { format_args!("identifier {0} has invalid length {1} must be between {2}-{3} characters", e.id, e.length, e.min, e.max) }, - - InvalidCharacter - { id: String } - | e | { format_args!("identifier {0} must only contain alphanumeric characters or `.`, `_`, `+`, `-`, `#`, - `[`, `]`, `<`, `>`", e.id) }, - - Empty - | _ | { "identifier cannot be empty" }, - - ChainIdInvalidFormat - { id: String } - | e | { format_args!("chain identifiers are expected to be in epoch format {0}", e.id) }, - - ChannelIdInvalidFormat - | _ | { "channel identifiers are expected to be in `channel-{N}` format" }, - - ChannelIdParseFailure - [ TraceError ] - | _ | { "failed to parse channel identifier" }, - - InvalidCounterpartyChannelId - |_| { "Invalid channel id in counterparty" } - } + #[derive(Debug, PartialEq, Eq, Serialize)] + ValidationError { + ContainSeparator + { id : String } + | e | { format_args!("identifier {0} cannot contain separator '/'", e.id) }, + + InvalidLength + { + id: String, + length: usize, + min: usize, + max: usize, + } + | e | { format_args!("identifier {0} has invalid length {1} must be between {2}-{3} characters", e.id, e.length, e.min, e.max) }, + + InvalidCharacter + { id: String } + | e | { format_args!("identifier {0} must only contain alphanumeric characters or `.`, `_`, `+`, `-`, `#`, - `[`, `]`, `<`, `>`", e.id) }, + + Empty + | _ | { "identifier cannot be empty" }, + + ChainIdInvalidFormat + { id: String } + | e | { format_args!("chain identifiers are expected to be in epoch format {0}", e.id) }, + + ChannelIdInvalidFormat + | _ | { "channel identifiers are expected to be in `channel-{N}` format" }, + + ChannelIdParseFailure + [ TraceError ] + | _ | { "failed to parse channel identifier" }, + + InvalidCounterpartyChannelId + |_| { "Invalid channel id in counterparty" } + } } diff --git a/modules/src/core/ics24_host/identifier.rs b/modules/src/core/ics24_host/identifier.rs index 50066f81ac..7b01c3a90a 100644 --- a/modules/src/core/ics24_host/identifier.rs +++ b/modules/src/core/ics24_host/identifier.rs @@ -1,13 +1,14 @@ -use core::convert::{From, Infallible}; -use core::fmt::{self, Debug, Display, Formatter}; -use core::str::FromStr; +use core::{ + convert::{From, Infallible}, + fmt::{self, Debug, Display, Formatter}, + str::FromStr, +}; use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; use super::validate::*; -use crate::core::ics02_client::client_type::ClientType; -use crate::core::ics24_host::error::ValidationError; -use crate::prelude::*; + +use crate::{core::ics24_host::error::ValidationError, prelude::*}; /// This type is subject to future changes. /// @@ -19,192 +20,163 @@ use crate::prelude::*; #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] #[serde(from = "tendermint::chain::Id", into = "tendermint::chain::Id")] pub struct ChainId { - id: String, - version: u64, + id: String, + version: u64, } impl ChainId { - /// Creates a new `ChainId` given a chain name and an epoch number. - /// - /// The returned `ChainId` will have the format: `{chain name}-{epoch number}`. - /// ``` - /// use ibc::core::ics24_host::identifier::ChainId; - /// - /// let epoch_number = 10; - /// let id = ChainId::new("chainA".to_string(), epoch_number); - /// assert_eq!(id.version(), epoch_number); - /// ``` - pub fn new(name: String, version: u64) -> Self { - Self { - id: format!("{}-{}", name, version), - version, - } - } - - pub fn from_string(id: &str) -> Self { - let version = if Self::is_epoch_format(id) { - Self::chain_version(id) - } else { - 0 - }; - - Self { - id: id.to_string(), - version, - } - } - - /// Get a reference to the underlying string. - pub fn as_str(&self) -> &str { - &self.id - } - - // TODO: this should probably be named epoch_number. - /// Extract the version from this chain identifier. - pub fn version(&self) -> u64 { - self.version - } - - /// Extract the version from the given chain identifier. - /// ``` - /// use ibc::core::ics24_host::identifier::ChainId; - /// - /// assert_eq!(ChainId::chain_version("chain--a-0"), 0); - /// assert_eq!(ChainId::chain_version("ibc-10"), 10); - /// assert_eq!(ChainId::chain_version("cosmos-hub-97"), 97); - /// assert_eq!(ChainId::chain_version("testnet-helloworld-2"), 2); - /// ``` - pub fn chain_version(chain_id: &str) -> u64 { - if !ChainId::is_epoch_format(chain_id) { - return 0; - } - - let split: Vec<_> = chain_id.split('-').collect(); - split - .last() - .expect("get revision number from chain_id") - .parse() - .unwrap_or(0) - } - - /// is_epoch_format() checks if a chain_id is in the format required for parsing epochs - /// The chainID must be in the form: `{chainID}-{version}` - /// ``` - /// use ibc::core::ics24_host::identifier::ChainId; - /// assert_eq!(ChainId::is_epoch_format("chainA-0"), false); - /// assert_eq!(ChainId::is_epoch_format("chainA"), false); - /// assert_eq!(ChainId::is_epoch_format("chainA-1"), true); - /// ``` - pub fn is_epoch_format(chain_id: &str) -> bool { - let re = safe_regex::regex!(br".+[^-]-{1}[1-9][0-9]*"); - re.is_match(chain_id.as_bytes()) - } + /// Creates a new `ChainId` given a chain name and an epoch number. + /// + /// The returned `ChainId` will have the format: `{chain name}-{epoch number}`. + /// ``` + /// use ibc::core::ics24_host::identifier::ChainId; + /// + /// let epoch_number = 10; + /// let id = ChainId::new("chainA".to_string(), epoch_number); + /// assert_eq!(id.version(), epoch_number); + /// ``` + pub fn new(name: String, version: u64) -> Self { + Self { id: format!("{}-{}", name, version), version } + } + + pub fn from_string(id: &str) -> Self { + let version = if Self::is_epoch_format(id) { Self::chain_version(id) } else { 0 }; + + Self { id: id.to_string(), version } + } + + /// Get a reference to the underlying string. + pub fn as_str(&self) -> &str { + &self.id + } + + // TODO: this should probably be named epoch_number. + /// Extract the version from this chain identifier. + pub fn version(&self) -> u64 { + self.version + } + + /// Extract the version from the given chain identifier. + /// ``` + /// use ibc::core::ics24_host::identifier::ChainId; + /// + /// assert_eq!(ChainId::chain_version("chain--a-0"), 0); + /// assert_eq!(ChainId::chain_version("ibc-10"), 10); + /// assert_eq!(ChainId::chain_version("cosmos-hub-97"), 97); + /// assert_eq!(ChainId::chain_version("testnet-helloworld-2"), 2); + /// ``` + pub fn chain_version(chain_id: &str) -> u64 { + if !ChainId::is_epoch_format(chain_id) { + return 0 + } + + let split: Vec<_> = chain_id.split('-').collect(); + split.last().expect("get revision number from chain_id").parse().unwrap_or(0) + } + + /// is_epoch_format() checks if a chain_id is in the format required for parsing epochs + /// The chainID must be in the form: `{chainID}-{version}` + /// ``` + /// use ibc::core::ics24_host::identifier::ChainId; + /// assert_eq!(ChainId::is_epoch_format("chainA-0"), false); + /// assert_eq!(ChainId::is_epoch_format("chainA"), false); + /// assert_eq!(ChainId::is_epoch_format("chainA-1"), true); + /// ``` + pub fn is_epoch_format(chain_id: &str) -> bool { + let re = safe_regex::regex!(br".+[^-]-{1}[1-9][0-9]*"); + re.is_match(chain_id.as_bytes()) + } } impl FromStr for ChainId { - type Err = Infallible; + type Err = Infallible; - fn from_str(id: &str) -> Result { - Ok(Self::from_string(id)) - } + fn from_str(id: &str) -> Result { + Ok(Self::from_string(id)) + } } impl Display for ChainId { - fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), fmt::Error> { - write!(f, "{}", self.id) - } + fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), fmt::Error> { + write!(f, "{}", self.id) + } } impl From for tendermint::chain::Id { - fn from(id: ChainId) -> Self { - tendermint::chain::Id::from_str(id.as_str()).unwrap() - } + fn from(id: ChainId) -> Self { + tendermint::chain::Id::from_str(id.as_str()).unwrap() + } } impl From for ChainId { - fn from(id: tendermint::chain::Id) -> Self { - ChainId::from_str(id.as_str()).unwrap() - } + fn from(id: tendermint::chain::Id) -> Self { + ChainId::from_str(id.as_str()).unwrap() + } } impl Default for ChainId { - fn default() -> Self { - "defaultChainId".to_string().parse().unwrap() - } + fn default() -> Self { + "defaultChainId".to_string().parse().unwrap() + } } impl From for ChainId { - fn from(value: String) -> Self { - Self::from_string(&value) - } + fn from(value: String) -> Self { + Self::from_string(&value) + } } #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] pub struct ClientId(String); impl ClientId { - /// Builds a new client identifier. Client identifiers are deterministically formed from two - /// elements: a prefix derived from the client type `ctype`, and a monotonically increasing - /// `counter`; these are separated by a dash "-". - /// - /// ``` - /// # use ibc::core::ics24_host::identifier::ClientId; - /// # use ibc::core::ics02_client::client_type::ClientType; - /// let tm_client_id = ClientId::new(ClientType::Tendermint, 0); - /// assert!(tm_client_id.is_ok()); - /// tm_client_id.map(|id| { assert_eq!(&id, "07-tendermint-0") }); - /// ``` - pub fn new(ctype: ClientType, counter: u64) -> Result { - let prefix = Self::prefix(ctype); - let id = format!("{}-{}", prefix, counter); - Self::from_str(id.as_str()) - } - - /// Get this identifier as a borrowed `&str` - pub fn as_str(&self) -> &str { - &self.0 - } - - /// Returns one of the prefixes that should be present in any client identifiers. - /// The prefix is deterministic for a given chain type, hence all clients for a Tendermint-type - /// chain, for example, will have the prefix '07-tendermint'. - pub fn prefix(client_type: ClientType) -> &'static str { - match client_type { - ClientType::Tendermint => ClientType::Tendermint.as_str(), - #[cfg(any(test, feature = "ics11_beefy"))] - ClientType::Beefy => ClientType::Beefy.as_str(), - #[cfg(any(test, feature = "ics11_beefy"))] - ClientType::Near => ClientType::Near.as_str(), - #[cfg(any(test, feature = "mocks"))] - ClientType::Mock => ClientType::Mock.as_str(), - } - } - - /// Get this identifier as a borrowed byte slice - pub fn as_bytes(&self) -> &[u8] { - self.0.as_bytes() - } + /// Builds a new client identifier. Client identifiers are deterministically formed from two + /// elements: a prefix derived from the client type `ctype`, and a monotonically increasing + /// `counter`; these are separated by a dash "-". + /// + /// ``` + /// # use ibc::core::ics24_host::identifier::ClientId; + /// # use ibc::core::ics02_client::client_state::ClientType; + /// let tm_client_id = ClientId::new("07-tendermint", 0); + /// assert!(tm_client_id.is_ok()); + /// tm_client_id.map(|id| { assert_eq!(&id, "07-tendermint-0") }); + /// ``` + pub fn new(prefix: &str, counter: u64) -> Result { + let id = format!("{}-{}", prefix, counter); + Self::from_str(id.as_str()) + } + + /// Get this identifier as a borrowed `&str` + pub fn as_str(&self) -> &str { + &self.0 + } + + /// Get this identifier as a borrowed byte slice + pub fn as_bytes(&self) -> &[u8] { + self.0.as_bytes() + } } /// This implementation provides a `to_string` method. impl Display for ClientId { - fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), fmt::Error> { - write!(f, "{}", self.0) - } + fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), fmt::Error> { + write!(f, "{}", self.0) + } } impl FromStr for ClientId { - type Err = ValidationError; + type Err = ValidationError; - fn from_str(s: &str) -> Result { - validate_client_identifier(s).map(|_| Self(s.to_string())) - } + fn from_str(s: &str) -> Result { + validate_client_identifier(s).map(|_| Self(s.to_string())) + } } +#[cfg(not(test))] impl Default for ClientId { - fn default() -> Self { - Self::new(ClientType::Tendermint, 0).unwrap() - } + fn default() -> Self { + Self::new("00-uninitialized", 0).unwrap() + } } /// Equality check against string literal (satisfies &ClientId == &str). @@ -216,65 +188,65 @@ impl Default for ClientId { /// client_id.map(|id| {assert_eq!(&id, "clientidtwo")}); /// ``` impl PartialEq for ClientId { - fn eq(&self, other: &str) -> bool { - self.as_str().eq(other) - } + fn eq(&self, other: &str) -> bool { + self.as_str().eq(other) + } } #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] pub struct ConnectionId(String); impl ConnectionId { - /// Builds a new connection identifier. Connection identifiers are deterministically formed from - /// two elements: a prefix `prefix`, and a monotonically increasing `counter`; these are - /// separated by a dash "-". The prefix is currently determined statically (see - /// `ConnectionId::prefix()`) so this method accepts a single argument, the `counter`. - /// - /// ``` - /// # use ibc::core::ics24_host::identifier::ConnectionId; - /// let conn_id = ConnectionId::new(11); - /// assert_eq!(&conn_id, "connection-11"); - /// ``` - pub fn new(counter: u64) -> Self { - let id = format!("{}-{}", Self::prefix(), counter); - Self::from_str(id.as_str()).unwrap() - } - - /// Returns the static prefix to be used across all connection identifiers. - pub fn prefix() -> &'static str { - "connection" - } - - /// Get this identifier as a borrowed `&str` - pub fn as_str(&self) -> &str { - &self.0 - } - - /// Get this identifier as a borrowed byte slice - pub fn as_bytes(&self) -> &[u8] { - self.0.as_bytes() - } + /// Builds a new connection identifier. Connection identifiers are deterministically formed from + /// two elements: a prefix `prefix`, and a monotonically increasing `counter`; these are + /// separated by a dash "-". The prefix is currently determined statically (see + /// `ConnectionId::prefix()`) so this method accepts a single argument, the `counter`. + /// + /// ``` + /// # use ibc::core::ics24_host::identifier::ConnectionId; + /// let conn_id = ConnectionId::new(11); + /// assert_eq!(&conn_id, "connection-11"); + /// ``` + pub fn new(counter: u64) -> Self { + let id = format!("{}-{}", Self::prefix(), counter); + Self::from_str(id.as_str()).unwrap() + } + + /// Returns the static prefix to be used across all connection identifiers. + pub fn prefix() -> &'static str { + "connection" + } + + /// Get this identifier as a borrowed `&str` + pub fn as_str(&self) -> &str { + &self.0 + } + + /// Get this identifier as a borrowed byte slice + pub fn as_bytes(&self) -> &[u8] { + self.0.as_bytes() + } } /// This implementation provides a `to_string` method. impl Display for ConnectionId { - fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), fmt::Error> { - write!(f, "{}", self.0) - } + fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), fmt::Error> { + write!(f, "{}", self.0) + } } impl FromStr for ConnectionId { - type Err = ValidationError; + type Err = ValidationError; - fn from_str(s: &str) -> Result { - validate_connection_identifier(s).map(|_| Self(s.to_string())) - } + fn from_str(s: &str) -> Result { + validate_connection_identifier(s).map(|_| Self(s.to_string())) + } } impl Default for ConnectionId { - fn default() -> Self { - Self::new(0) - } + fn default() -> Self { + Self::new(0) + } } /// Equality check against string literal (satisfies &ConnectionId == &str). @@ -286,155 +258,150 @@ impl Default for ConnectionId { /// conn_id.map(|id| {assert_eq!(&id, "connectionId-0")}); /// ``` impl PartialEq for ConnectionId { - fn eq(&self, other: &str) -> bool { - self.as_str().eq(other) - } + fn eq(&self, other: &str) -> bool { + self.as_str().eq(other) + } } #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] pub struct PortId(String); impl PortId { - /// Infallible creation of the well-known transfer port - pub fn transfer() -> Self { - Self("transfer".to_string()) - } + /// Infallible creation of the well-known transfer port + pub fn transfer() -> Self { + Self("transfer".to_string()) + } - /// Get this identifier as a borrowed `&str` - pub fn as_str(&self) -> &str { - &self.0 - } + /// Get this identifier as a borrowed `&str` + pub fn as_str(&self) -> &str { + &self.0 + } - /// Get this identifier as a borrowed byte slice - pub fn as_bytes(&self) -> &[u8] { - self.0.as_bytes() - } + /// Get this identifier as a borrowed byte slice + pub fn as_bytes(&self) -> &[u8] { + self.0.as_bytes() + } } /// This implementation provides a `to_string` method. impl Display for PortId { - fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), fmt::Error> { - write!(f, "{}", self.0) - } + fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), fmt::Error> { + write!(f, "{}", self.0) + } } impl FromStr for PortId { - type Err = ValidationError; + type Err = ValidationError; - fn from_str(s: &str) -> Result { - validate_port_identifier(s).map(|_| Self(s.to_string())) - } + fn from_str(s: &str) -> Result { + validate_port_identifier(s).map(|_| Self(s.to_string())) + } } impl AsRef for PortId { - fn as_ref(&self) -> &str { - self.0.as_str() - } + fn as_ref(&self) -> &str { + self.0.as_str() + } } impl Default for PortId { - fn default() -> Self { - "defaultPort".to_string().parse().unwrap() - } + fn default() -> Self { + "defaultPort".to_string().parse().unwrap() + } } #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct ChannelId(u64); impl ChannelId { - /// Builds a new channel identifier. Like client and connection identifiers, channel ids are - /// deterministically formed from two elements: a prefix `prefix`, and a monotonically - /// increasing `counter`, separated by a dash "-". - /// The prefix is currently determined statically (see `ChannelId::prefix()`) so this method - /// accepts a single argument, the `counter`. - /// - /// ``` - /// # use ibc::core::ics24_host::identifier::ChannelId; - /// let chan_id = ChannelId::new(27); - /// assert_eq!(chan_id.to_string(), "channel-27"); - /// ``` - pub fn new(counter: u64) -> Self { - Self(counter) - } - - pub fn sequence(&self) -> u64 { - self.0 - } - - const fn prefix() -> &'static str { - "channel-" - } + /// Builds a new channel identifier. Like client and connection identifiers, channel ids are + /// deterministically formed from two elements: a prefix `prefix`, and a monotonically + /// increasing `counter`, separated by a dash "-". + /// The prefix is currently determined statically (see `ChannelId::prefix()`) so this method + /// accepts a single argument, the `counter`. + /// + /// ``` + /// # use ibc::core::ics24_host::identifier::ChannelId; + /// let chan_id = ChannelId::new(27); + /// assert_eq!(chan_id.to_string(), "channel-27"); + /// ``` + pub fn new(counter: u64) -> Self { + Self(counter) + } + + pub fn sequence(&self) -> u64 { + self.0 + } + + const fn prefix() -> &'static str { + "channel-" + } } /// This implementation provides a `to_string` method. impl Display for ChannelId { - fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), fmt::Error> { - write!(f, "{}{}", Self::prefix(), self.0) - } + fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), fmt::Error> { + write!(f, "{}{}", Self::prefix(), self.0) + } } impl Debug for ChannelId { - fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), fmt::Error> { - f.debug_tuple("ChannelId").field(&self.to_string()).finish() - } + fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), fmt::Error> { + f.debug_tuple("ChannelId").field(&self.to_string()).finish() + } } impl FromStr for ChannelId { - type Err = ValidationError; + type Err = ValidationError; - fn from_str(s: &str) -> Result { - let s = s - .strip_prefix(Self::prefix()) - .ok_or_else(ValidationError::channel_id_invalid_format)?; - let counter = u64::from_str(s).map_err(ValidationError::channel_id_parse_failure)?; - Ok(Self(counter)) - } + fn from_str(s: &str) -> Result { + let s = s + .strip_prefix(Self::prefix()) + .ok_or_else(ValidationError::channel_id_invalid_format)?; + let counter = u64::from_str(s).map_err(ValidationError::channel_id_parse_failure)?; + Ok(Self(counter)) + } } impl Default for ChannelId { - fn default() -> Self { - Self::new(0) - } + fn default() -> Self { + Self::new(0) + } } impl Serialize for ChannelId { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - serializer.collect_str(self) - } + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.collect_str(self) + } } impl<'de> Deserialize<'de> for ChannelId { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - String::deserialize(deserializer)? - .parse() - .map_err(de::Error::custom) - } + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + String::deserialize(deserializer)?.parse().map_err(de::Error::custom) + } } /// A pair of [`PortId`] and [`ChannelId`] are used together for sending IBC packets. #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] pub struct PortChannelId { - pub channel_id: ChannelId, - pub port_id: PortId, + pub channel_id: ChannelId, + pub port_id: PortId, } impl PortChannelId { - pub fn new(channel_id: ChannelId, port_id: PortId) -> Self { - Self { - channel_id, - port_id, - } - } + pub fn new(channel_id: ChannelId, port_id: PortId) -> Self { + Self { channel_id, port_id } + } } impl Display for PortChannelId { - fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), fmt::Error> { - write!(f, "{}/{}", self.port_id, self.channel_id) - } + fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), fmt::Error> { + write!(f, "{}/{}", self.port_id, self.channel_id) + } } diff --git a/modules/src/core/ics24_host/path.rs b/modules/src/core/ics24_host/path.rs index 56542e1d26..b21a104fdc 100644 --- a/modules/src/core/ics24_host/path.rs +++ b/modules/src/core/ics24_host/path.rs @@ -3,11 +3,12 @@ use crate::prelude::*; /// Path-space as listed in ICS-024 /// https://github.com/cosmos/ibc/tree/master/spec/core/ics-024-host-requirements#path-space /// Some of these are implemented in other ICSs, but ICS-024 has a nice summary table. -/// use core::str::FromStr; -use crate::core::ics04_channel::packet::Sequence; -use crate::core::ics24_host::identifier::{ChannelId, ClientId, ConnectionId, PortId}; +use crate::core::{ + ics04_channel::packet::Sequence, + ics24_host::identifier::{ChannelId, ClientId, ConnectionId, PortId}, +}; use derive_more::{Display, From}; use flex_error::define_error; @@ -22,7 +23,7 @@ pub const SDK_UPGRADE_QUERY_PATH: &str = "store/upgrade/key"; /// ABCI client upgrade keys /// - The key identifying the upgraded IBC state within the upgrade sub-store const UPGRADED_IBC_STATE: &str = "upgradedIBCState"; -///- The key identifying the upgraded client state +/// - The key identifying the upgraded client state const UPGRADED_CLIENT_STATE: &str = "upgradedClient"; /// - The key identifying the upgraded consensus state const UPGRADED_CLIENT_CONSENSUS_STATE: &str = "upgradedConsState"; @@ -30,20 +31,20 @@ const UPGRADED_CLIENT_CONSENSUS_STATE: &str = "upgradedConsState"; /// The Path enum abstracts out the different sub-paths. #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, From, Display)] pub enum Path { - ClientType(ClientTypePath), - ClientState(ClientStatePath), - ClientConsensusState(ClientConsensusStatePath), - ClientConnections(ClientConnectionsPath), - Connections(ConnectionsPath), - Ports(PortsPath), - ChannelEnds(ChannelEndsPath), - SeqSends(SeqSendsPath), - SeqRecvs(SeqRecvsPath), - SeqAcks(SeqAcksPath), - Commitments(CommitmentsPath), - Acks(AcksPath), - Receipts(ReceiptsPath), - Upgrade(ClientUpgradePath), + ClientType(ClientTypePath), + ClientState(ClientStatePath), + ClientConsensusState(ClientConsensusStatePath), + ClientConnections(ClientConnectionsPath), + Connections(ConnectionsPath), + Ports(PortsPath), + ChannelEnds(ChannelEndsPath), + SeqSends(SeqSendsPath), + SeqRecvs(SeqRecvsPath), + SeqAcks(SeqAcksPath), + Commitments(CommitmentsPath), + Acks(AcksPath), + Receipts(ReceiptsPath), + Upgrade(ClientUpgradePath), } #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Display)] @@ -55,16 +56,11 @@ pub struct ClientTypePath(pub ClientId); pub struct ClientStatePath(pub ClientId); #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Display)] -#[display( - fmt = "clients/{}/consensusStates/{}-{}", - "client_id", - "epoch", - "height" -)] +#[display(fmt = "clients/{}/consensusStates/{}-{}", "client_id", "epoch", "height")] pub struct ClientConsensusStatePath { - pub client_id: ClientId, - pub epoch: u64, - pub height: u64, + pub client_id: ClientId, + pub epoch: u64, + pub height: u64, } #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Display)] @@ -97,921 +93,800 @@ pub struct SeqAcksPath(pub PortId, pub ChannelId); #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Display)] #[display( - fmt = "commitments/ports/{}/channels/{}/sequences/{}", - "port_id", - "channel_id", - "sequence" + fmt = "commitments/ports/{}/channels/{}/sequences/{}", + "port_id", + "channel_id", + "sequence" )] pub struct CommitmentsPath { - pub port_id: PortId, - pub channel_id: ChannelId, - pub sequence: Sequence, + pub port_id: PortId, + pub channel_id: ChannelId, + pub sequence: Sequence, } #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Display)] -#[display( - fmt = "acks/ports/{}/channels/{}/sequences/{}", - "port_id", - "channel_id", - "sequence" -)] +#[display(fmt = "acks/ports/{}/channels/{}/sequences/{}", "port_id", "channel_id", "sequence")] pub struct AcksPath { - pub port_id: PortId, - pub channel_id: ChannelId, - pub sequence: Sequence, + pub port_id: PortId, + pub channel_id: ChannelId, + pub sequence: Sequence, } #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Display)] -#[display( - fmt = "receipts/ports/{}/channels/{}/sequences/{}", - "port_id", - "channel_id", - "sequence" -)] +#[display(fmt = "receipts/ports/{}/channels/{}/sequences/{}", "port_id", "channel_id", "sequence")] pub struct ReceiptsPath { - pub port_id: PortId, - pub channel_id: ChannelId, - pub sequence: Sequence, + pub port_id: PortId, + pub channel_id: ChannelId, + pub sequence: Sequence, } /// Paths that are specific for client upgrades. #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Display)] pub enum ClientUpgradePath { - #[display(fmt = "{}/{}/{}", UPGRADED_IBC_STATE, _0, UPGRADED_CLIENT_STATE)] - UpgradedClientState(u64), - #[display( - fmt = "{}/{}/{}", - UPGRADED_IBC_STATE, - _0, - UPGRADED_CLIENT_CONSENSUS_STATE - )] - UpgradedClientConsensusState(u64), + #[display(fmt = "{}/{}/{}", UPGRADED_IBC_STATE, _0, UPGRADED_CLIENT_STATE)] + UpgradedClientState(u64), + #[display(fmt = "{}/{}/{}", UPGRADED_IBC_STATE, _0, UPGRADED_CLIENT_CONSENSUS_STATE)] + UpgradedClientConsensusState(u64), } /// Sub-paths which are not part of the specification, but are still /// useful to represent for parsing purposes. #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] enum SubPath { - Channels(ChannelId), - Sequences(Sequence), + Channels(ChannelId), + Sequences(Sequence), } impl Path { - /// Indication if the path is provable. - pub fn is_provable(&self) -> bool { - !matches!(&self, Path::ClientConnections(_) | Path::Ports(_)) - } - - /// into_bytes implementation - pub fn into_bytes(self) -> Vec { - self.to_string().into_bytes() - } + /// Indication if the path is provable. + pub fn is_provable(&self) -> bool { + !matches!(&self, Path::ClientConnections(_) | Path::Ports(_)) + } + + /// into_bytes implementation + pub fn into_bytes(self) -> Vec { + self.to_string().into_bytes() + } } define_error! { - #[derive(Eq, PartialEq)] - PathError { - ParseFailure - { path: String } - | e | { format!("'{}' could not be parsed into a Path", e.path) }, - } + #[derive(Eq, PartialEq)] + PathError { + ParseFailure + { path: String } + | e | { format!("'{}' could not be parsed into a Path", e.path) }, + } } /// The FromStr trait allows paths encoded as strings to be parsed into Paths. impl FromStr for Path { - type Err = PathError; - - fn from_str(s: &str) -> Result { - let components: Vec<&str> = s.split('/').collect(); - - parse_client_paths(&components) - .or_else(|| parse_connections(&components)) - .or_else(|| parse_ports(&components)) - .or_else(|| parse_channel_ends(&components)) - .or_else(|| parse_seqs(&components)) - .or_else(|| parse_commitments(&components)) - .or_else(|| parse_acks(&components)) - .or_else(|| parse_receipts(&components)) - .or_else(|| parse_upgrades(&components)) - .ok_or_else(|| PathError::parse_failure(s.to_string())) - } + type Err = PathError; + + fn from_str(s: &str) -> Result { + let components: Vec<&str> = s.split('/').collect(); + + parse_client_paths(&components) + .or_else(|| parse_connections(&components)) + .or_else(|| parse_ports(&components)) + .or_else(|| parse_channel_ends(&components)) + .or_else(|| parse_seqs(&components)) + .or_else(|| parse_commitments(&components)) + .or_else(|| parse_acks(&components)) + .or_else(|| parse_receipts(&components)) + .or_else(|| parse_upgrades(&components)) + .ok_or_else(|| PathError::parse_failure(s.to_string())) + } } fn parse_client_paths(components: &[&str]) -> Option { - let first = match components.first() { - Some(f) => *f, - None => return None, - }; - - if first != "clients" { - return None; - } - - let client_id = match ClientId::from_str(components[1]) { - Ok(s) => s, - Err(_) => return None, - }; - - if components.len() == 3 { - match components[2] { - "clientType" => Some(ClientTypePath(client_id).into()), - "clientState" => Some(ClientStatePath(client_id).into()), - "connections" => Some(ClientConnectionsPath(client_id).into()), - _ => None, - } - } else if components.len() == 4 { - if "consensusStates" != components[2] { - return None; - } - - let epoch_height = match components.last() { - Some(eh) => *eh, - None => return None, - }; - - let epoch_height: Vec<&str> = epoch_height.split('-').collect(); - - if epoch_height.len() != 2 { - return None; - } - - let epoch = epoch_height[0]; - let height = epoch_height[1]; - - let epoch = match epoch.parse::() { - Ok(ep) => ep, - Err(_) => return None, - }; - - let height = match height.parse::() { - Ok(h) => h, - Err(_) => return None, - }; - - Some( - ClientConsensusStatePath { - client_id, - epoch, - height, - } - .into(), - ) - } else { - None - } + let first = match components.first() { + Some(f) => *f, + None => return None, + }; + + if first != "clients" { + return None + } + + let client_id = match ClientId::from_str(components[1]) { + Ok(s) => s, + Err(_) => return None, + }; + + if components.len() == 3 { + match components[2] { + "clientType" => Some(ClientTypePath(client_id).into()), + "clientState" => Some(ClientStatePath(client_id).into()), + "connections" => Some(ClientConnectionsPath(client_id).into()), + _ => None, + } + } else if components.len() == 4 { + if "consensusStates" != components[2] { + return None + } + + let epoch_height = match components.last() { + Some(eh) => *eh, + None => return None, + }; + + let epoch_height: Vec<&str> = epoch_height.split('-').collect(); + + if epoch_height.len() != 2 { + return None + } + + let epoch = epoch_height[0]; + let height = epoch_height[1]; + + let epoch = match epoch.parse::() { + Ok(ep) => ep, + Err(_) => return None, + }; + + let height = match height.parse::() { + Ok(h) => h, + Err(_) => return None, + }; + + Some(ClientConsensusStatePath { client_id, epoch, height }.into()) + } else { + None + } } fn parse_connections(components: &[&str]) -> Option { - if components.len() != 2 { - return None; - } - - let first = match components.first() { - Some(f) => *f, - None => return None, - }; - - if first != "connections" { - return None; - } - - let connection_id = match components.last() { - Some(c) => *c, - None => return None, - }; - - let connection_id = match ConnectionId::from_str(connection_id) { - Ok(c) => c, - Err(_) => return None, - }; - - Some(ConnectionsPath(connection_id).into()) + if components.len() != 2 { + return None + } + + let first = match components.first() { + Some(f) => *f, + None => return None, + }; + + if first != "connections" { + return None + } + + let connection_id = match components.last() { + Some(c) => *c, + None => return None, + }; + + let connection_id = match ConnectionId::from_str(connection_id) { + Ok(c) => c, + Err(_) => return None, + }; + + Some(ConnectionsPath(connection_id).into()) } fn parse_ports(components: &[&str]) -> Option { - if components.len() != 2 { - return None; - } - - let first = match components.first() { - Some(f) => *f, - None => return None, - }; - - if first != "ports" { - return None; - } - - let port_id = match components.last() { - Some(p) => *p, - None => return None, - }; - - let port_id = match PortId::from_str(port_id) { - Ok(p) => p, - Err(_) => return None, - }; - - Some(PortsPath(port_id).into()) + if components.len() != 2 { + return None + } + + let first = match components.first() { + Some(f) => *f, + None => return None, + }; + + if first != "ports" { + return None + } + + let port_id = match components.last() { + Some(p) => *p, + None => return None, + }; + + let port_id = match PortId::from_str(port_id) { + Ok(p) => p, + Err(_) => return None, + }; + + Some(PortsPath(port_id).into()) } fn parse_channels(components: &[&str]) -> Option { - if components.len() != 2 { - return None; - } - - let first = match components.first() { - Some(f) => *f, - None => return None, - }; - - if first != "channels" { - return None; - } - - let channel_id = match components.last() { - Some(c) => *c, - None => return None, - }; - - let channel_id = match ChannelId::from_str(channel_id) { - Ok(c) => c, - Err(_) => return None, - }; - - Some(SubPath::Channels(channel_id)) + if components.len() != 2 { + return None + } + + let first = match components.first() { + Some(f) => *f, + None => return None, + }; + + if first != "channels" { + return None + } + + let channel_id = match components.last() { + Some(c) => *c, + None => return None, + }; + + let channel_id = match ChannelId::from_str(channel_id) { + Ok(c) => c, + Err(_) => return None, + }; + + Some(SubPath::Channels(channel_id)) } fn parse_sequences(components: &[&str]) -> Option { - if components.len() != 2 { - return None; - } - - let first = match components.first() { - Some(f) => *f, - None => return None, - }; - - if first != "sequences" { - return None; - } - - let sequence_number = match components.last() { - Some(s) => *s, - None => return None, - }; - - match Sequence::from_str(sequence_number) { - Ok(seq) => Some(SubPath::Sequences(seq)), - Err(_) => None, - } + if components.len() != 2 { + return None + } + + let first = match components.first() { + Some(f) => *f, + None => return None, + }; + + if first != "sequences" { + return None + } + + let sequence_number = match components.last() { + Some(s) => *s, + None => return None, + }; + + match Sequence::from_str(sequence_number) { + Ok(seq) => Some(SubPath::Sequences(seq)), + Err(_) => None, + } } fn parse_channel_ends(components: &[&str]) -> Option { - if components.len() != 5 { - return None; - } - - let first = match components.first() { - Some(f) => *f, - None => return None, - }; - - if first != "channelEnds" { - return None; - } - - let port = parse_ports(&components[1..=2]); - let channel = parse_channels(&components[3..=4]); - - let port_id = if let Some(Path::Ports(PortsPath(port_id))) = port { - port_id - } else { - return None; - }; - - let channel_id = if let Some(SubPath::Channels(channel_id)) = channel { - channel_id - } else { - return None; - }; - - Some(ChannelEndsPath(port_id, channel_id).into()) + if components.len() != 5 { + return None + } + + let first = match components.first() { + Some(f) => *f, + None => return None, + }; + + if first != "channelEnds" { + return None + } + + let port = parse_ports(&components[1..=2]); + let channel = parse_channels(&components[3..=4]); + + let port_id = + if let Some(Path::Ports(PortsPath(port_id))) = port { port_id } else { return None }; + + let channel_id = + if let Some(SubPath::Channels(channel_id)) = channel { channel_id } else { return None }; + + Some(ChannelEndsPath(port_id, channel_id).into()) } fn parse_seqs(components: &[&str]) -> Option { - if components.len() != 5 { - return None; - } - - let first = match components.first() { - Some(f) => *f, - None => return None, - }; - - let port = parse_ports(&components[1..=2]); - let channel = parse_channels(&components[3..=4]); - - let port_id = if let Some(Path::Ports(PortsPath(port_id))) = port { - port_id - } else { - return None; - }; - - let channel_id = if let Some(SubPath::Channels(channel_id)) = channel { - channel_id - } else { - return None; - }; - - match first { - "nextSequenceSend" => Some(SeqSendsPath(port_id, channel_id).into()), - "nextSequenceRecv" => Some(SeqRecvsPath(port_id, channel_id).into()), - "nextSequenceAck" => Some(SeqAcksPath(port_id, channel_id).into()), - _ => None, - } + if components.len() != 5 { + return None + } + + let first = match components.first() { + Some(f) => *f, + None => return None, + }; + + let port = parse_ports(&components[1..=2]); + let channel = parse_channels(&components[3..=4]); + + let port_id = + if let Some(Path::Ports(PortsPath(port_id))) = port { port_id } else { return None }; + + let channel_id = + if let Some(SubPath::Channels(channel_id)) = channel { channel_id } else { return None }; + + match first { + "nextSequenceSend" => Some(SeqSendsPath(port_id, channel_id).into()), + "nextSequenceRecv" => Some(SeqRecvsPath(port_id, channel_id).into()), + "nextSequenceAck" => Some(SeqAcksPath(port_id, channel_id).into()), + _ => None, + } } fn parse_commitments(components: &[&str]) -> Option { - if components.len() != 7 { - return None; - } - - let first = match components.first() { - Some(f) => *f, - None => return None, - }; - - if first != "commitments" { - return None; - } - - let port = parse_ports(&components[1..=2]); - let channel = parse_channels(&components[3..=4]); - let sequence = parse_sequences(&components[5..]); - - let port_id = if let Some(Path::Ports(PortsPath(port_id))) = port { - port_id - } else { - return None; - }; - - let channel_id = if let Some(SubPath::Channels(channel_id)) = channel { - channel_id - } else { - return None; - }; - - let sequence = if let Some(SubPath::Sequences(seq)) = sequence { - seq - } else { - return None; - }; - - Some( - CommitmentsPath { - port_id, - channel_id, - sequence, - } - .into(), - ) + if components.len() != 7 { + return None + } + + let first = match components.first() { + Some(f) => *f, + None => return None, + }; + + if first != "commitments" { + return None + } + + let port = parse_ports(&components[1..=2]); + let channel = parse_channels(&components[3..=4]); + let sequence = parse_sequences(&components[5..]); + + let port_id = + if let Some(Path::Ports(PortsPath(port_id))) = port { port_id } else { return None }; + + let channel_id = + if let Some(SubPath::Channels(channel_id)) = channel { channel_id } else { return None }; + + let sequence = if let Some(SubPath::Sequences(seq)) = sequence { seq } else { return None }; + + Some(CommitmentsPath { port_id, channel_id, sequence }.into()) } fn parse_acks(components: &[&str]) -> Option { - if components.len() != 7 { - return None; - } - - let first = match components.first() { - Some(f) => *f, - None => return None, - }; - - if first != "acks" { - return None; - } - - let port = parse_ports(&components[1..=2]); - let channel = parse_channels(&components[3..=4]); - let sequence = parse_sequences(&components[5..]); - - let port_id = if let Some(Path::Ports(PortsPath(port_id))) = port { - port_id - } else { - return None; - }; - - let channel_id = if let Some(SubPath::Channels(channel_id)) = channel { - channel_id - } else { - return None; - }; - - let sequence = if let Some(SubPath::Sequences(seq)) = sequence { - seq - } else { - return None; - }; - - Some( - AcksPath { - port_id, - channel_id, - sequence, - } - .into(), - ) + if components.len() != 7 { + return None + } + + let first = match components.first() { + Some(f) => *f, + None => return None, + }; + + if first != "acks" { + return None + } + + let port = parse_ports(&components[1..=2]); + let channel = parse_channels(&components[3..=4]); + let sequence = parse_sequences(&components[5..]); + + let port_id = + if let Some(Path::Ports(PortsPath(port_id))) = port { port_id } else { return None }; + + let channel_id = + if let Some(SubPath::Channels(channel_id)) = channel { channel_id } else { return None }; + + let sequence = if let Some(SubPath::Sequences(seq)) = sequence { seq } else { return None }; + + Some(AcksPath { port_id, channel_id, sequence }.into()) } fn parse_receipts(components: &[&str]) -> Option { - if components.len() != 7 { - return None; - } - - let first = match components.first() { - Some(f) => *f, - None => return None, - }; - - if first != "receipts" { - return None; - } - - let port = parse_ports(&components[1..=2]); - let channel = parse_channels(&components[3..=4]); - let sequence = parse_sequences(&components[5..]); - - let port_id = if let Some(Path::Ports(PortsPath(port_id))) = port { - port_id - } else { - return None; - }; - - let channel_id = if let Some(SubPath::Channels(channel_id)) = channel { - channel_id - } else { - return None; - }; - - let sequence = if let Some(SubPath::Sequences(seq)) = sequence { - seq - } else { - return None; - }; - - Some( - ReceiptsPath { - port_id, - channel_id, - sequence, - } - .into(), - ) + if components.len() != 7 { + return None + } + + let first = match components.first() { + Some(f) => *f, + None => return None, + }; + + if first != "receipts" { + return None + } + + let port = parse_ports(&components[1..=2]); + let channel = parse_channels(&components[3..=4]); + let sequence = parse_sequences(&components[5..]); + + let port_id = + if let Some(Path::Ports(PortsPath(port_id))) = port { port_id } else { return None }; + + let channel_id = + if let Some(SubPath::Channels(channel_id)) = channel { channel_id } else { return None }; + + let sequence = if let Some(SubPath::Sequences(seq)) = sequence { seq } else { return None }; + + Some(ReceiptsPath { port_id, channel_id, sequence }.into()) } fn parse_upgrades(components: &[&str]) -> Option { - if components.len() != 3 { - return None; - } - - let first = match components.first() { - Some(f) => *f, - None => return None, - }; - - if first != UPGRADED_IBC_STATE { - return None; - } - - let last = match components.last() { - Some(l) => *l, - None => return None, - }; - - let height = match components[1].parse::() { - Ok(h) => h, - Err(_) => return None, - }; - - match last { - UPGRADED_CLIENT_STATE => Some(ClientUpgradePath::UpgradedClientState(height).into()), - UPGRADED_CLIENT_CONSENSUS_STATE => { - Some(ClientUpgradePath::UpgradedClientConsensusState(height).into()) - } - _ => None, - } + if components.len() != 3 { + return None + } + + let first = match components.first() { + Some(f) => *f, + None => return None, + }; + + if first != UPGRADED_IBC_STATE { + return None + } + + let last = match components.last() { + Some(l) => *l, + None => return None, + }; + + let height = match components[1].parse::() { + Ok(h) => h, + Err(_) => return None, + }; + + match last { + UPGRADED_CLIENT_STATE => Some(ClientUpgradePath::UpgradedClientState(height).into()), + UPGRADED_CLIENT_CONSENSUS_STATE => + Some(ClientUpgradePath::UpgradedClientConsensusState(height).into()), + _ => None, + } } #[cfg(test)] mod tests { - use super::*; - use core::str::FromStr; - - #[test] - fn invalid_path_doesnt_parse() { - let invalid_path = Path::from_str("clients/clientType"); - - assert!(invalid_path.is_err()); - } - - #[test] - fn test_parse_client_paths_fn() { - let path = "clients/07-tendermint-0/clientType"; - let components: Vec<&str> = path.split('/').collect(); - - assert_eq!( - parse_client_paths(&components), - Some(Path::ClientType(ClientTypePath(ClientId::default()))) - ); - - let path = "clients/07-tendermint-0/clientState"; - let components: Vec<&str> = path.split('/').collect(); - - assert_eq!( - parse_client_paths(&components), - Some(Path::ClientState(ClientStatePath(ClientId::default()))) - ); - - let path = "clients/07-tendermint-0/consensusStates/15-31"; - let components: Vec<&str> = path.split('/').collect(); - - assert_eq!( - parse_client_paths(&components), - Some(Path::ClientConsensusState(ClientConsensusStatePath { - client_id: ClientId::default(), - epoch: 15, - height: 31, - })) - ); - } - - #[test] - fn client_type_path_parses() { - let path = "clients/07-tendermint-0/clientType"; - let path = Path::from_str(path); - - assert!(path.is_ok()); - assert_eq!( - path.unwrap(), - Path::ClientType(ClientTypePath(ClientId::default())) - ); - } - - #[test] - fn client_state_path_parses() { - let path = "clients/07-tendermint-0/clientState"; - let path = Path::from_str(path); - - assert!(path.is_ok()); - assert_eq!( - path.unwrap(), - Path::ClientState(ClientStatePath(ClientId::default())) - ); - } - - #[test] - fn client_consensus_state_path_parses() { - let path = "clients/07-tendermint-0/consensusStates/15-31"; - let path = Path::from_str(path); - - assert!(path.is_ok()); - assert_eq!( - path.unwrap(), - Path::ClientConsensusState(ClientConsensusStatePath { - client_id: ClientId::default(), - epoch: 15, - height: 31, - }) - ); - } - - #[test] - fn client_connections_path_parses() { - let path = "clients/07-tendermint-0/connections"; - let path = Path::from_str(path); - - assert!(path.is_ok()); - assert_eq!( - path.unwrap(), - Path::ClientConnections(ClientConnectionsPath(ClientId::default())) - ); - } - - #[test] - fn test_parse_connections_fn() { - let path = "connections/connection-0"; - let components: Vec<&str> = path.split('/').collect(); - - assert_eq!( - parse_connections(&components), - Some(Path::Connections(ConnectionsPath(ConnectionId::new(0)))), - ); - } - - #[test] - fn connections_path_parses() { - let path = "connections/connection-0"; - let path = Path::from_str(path); - - assert!(path.is_ok()); - assert_eq!( - path.unwrap(), - Path::Connections(ConnectionsPath(ConnectionId::new(0))) - ); - } - - #[test] - fn test_parse_ports_fn() { - let path = "ports/defaultPort"; - let components: Vec<&str> = path.split('/').collect(); - - assert_eq!( - parse_ports(&components), - Some(Path::Ports(PortsPath(PortId::default()))), - ); - } - - #[test] - fn ports_path_parses() { - let path = "ports/defaultPort"; - let path = Path::from_str(path); - - assert!(path.is_ok()); - assert_eq!(path.unwrap(), Path::Ports(PortsPath(PortId::default()))); - } - - #[test] - fn test_parse_channels_fn() { - let path = "channels/channel-0"; - let components: Vec<&str> = path.split('/').collect(); - - assert_eq!( - parse_channels(&components), - Some(SubPath::Channels(ChannelId::default())), - ); - } - - #[test] - fn channels_path_doesnt_parse() { - let path = "channels/channel-0"; - let path = Path::from_str(path); - - assert!(path.is_err()); - } - - #[test] - fn test_parse_sequences_fn() { - let path = "sequences/0"; - let components: Vec<&str> = path.split('/').collect(); - - assert_eq!( - parse_sequences(&components), - Some(SubPath::Sequences(Sequence::default())) - ); - } - - #[test] - fn sequences_path_doesnt_parse() { - let path = "sequences/0"; - let path = Path::from_str(path); - - assert!(path.is_err()); - } - - #[test] - fn test_parse_channel_ends_fn() { - let path = "channelEnds/ports/defaultPort/channels/channel-0"; - let components: Vec<&str> = path.split('/').collect(); - - assert_eq!( - parse_channel_ends(&components), - Some(Path::ChannelEnds(ChannelEndsPath( - PortId::default(), - ChannelId::default() - ))), - ); - } - - #[test] - fn channel_ends_path_parses() { - let path = "channelEnds/ports/defaultPort/channels/channel-0"; - let path = Path::from_str(path); - - assert!(path.is_ok()); - assert_eq!( - path.unwrap(), - Path::ChannelEnds(ChannelEndsPath(PortId::default(), ChannelId::default())), - ); - } - - #[test] - fn test_parse_seqs_fn() { - let path = "nextSequenceSend/ports/defaultPort/channels/channel-0"; - let components: Vec<&str> = path.split('/').collect(); - - assert_eq!( - parse_seqs(&components), - Some(Path::SeqSends(SeqSendsPath( - PortId::default(), - ChannelId::default() - ))), - ); - - let path = "nextSequenceRecv/ports/defaultPort/channels/channel-0"; - let components: Vec<&str> = path.split('/').collect(); - - assert_eq!( - parse_seqs(&components), - Some(Path::SeqRecvs(SeqRecvsPath( - PortId::default(), - ChannelId::default() - ))), - ); - - let path = "nextSequenceAck/ports/defaultPort/channels/channel-0"; - let components: Vec<&str> = path.split('/').collect(); - - assert_eq!( - parse_seqs(&components), - Some(Path::SeqAcks(SeqAcksPath( - PortId::default(), - ChannelId::default() - ))), - ); - } - - #[test] - fn sequence_send_path_parses() { - let path = "nextSequenceSend/ports/defaultPort/channels/channel-0"; - let path = Path::from_str(path); - - assert!(path.is_ok()); - assert_eq!( - path.unwrap(), - Path::SeqSends(SeqSendsPath(PortId::default(), ChannelId::default())), - ); - } - - #[test] - fn sequence_recv_path_parses() { - let path = "nextSequenceRecv/ports/defaultPort/channels/channel-0"; - let path = Path::from_str(path); - - assert!(path.is_ok()); - assert_eq!( - path.unwrap(), - Path::SeqRecvs(SeqRecvsPath(PortId::default(), ChannelId::default())), - ); - } - - #[test] - fn sequence_ack_path_parses() { - let path = "nextSequenceAck/ports/defaultPort/channels/channel-0"; - let path = Path::from_str(path); - - assert!(path.is_ok()); - assert_eq!( - path.unwrap(), - Path::SeqAcks(SeqAcksPath(PortId::default(), ChannelId::default())), - ); - } - - #[test] - fn test_parse_commitments_fn() { - let path = "commitments/ports/defaultPort/channels/channel-0/sequences/0"; - let components: Vec<&str> = path.split('/').collect(); - - assert_eq!( - parse_commitments(&components), - Some(Path::Commitments(CommitmentsPath { - port_id: PortId::default(), - channel_id: ChannelId::default(), - sequence: Sequence::default(), - })), - ); - } - - #[test] - fn commitments_path_parses() { - let path = "commitments/ports/defaultPort/channels/channel-0/sequences/0"; - let path = Path::from_str(path); - - assert!(path.is_ok()); - assert_eq!( - path.unwrap(), - Path::Commitments(CommitmentsPath { - port_id: PortId::default(), - channel_id: ChannelId::default(), - sequence: Sequence::default(), - }), - ); - } - - #[test] - fn test_parse_acks_fn() { - let path = "acks/ports/defaultPort/channels/channel-0/sequences/0"; - let components: Vec<&str> = path.split('/').collect(); - - assert_eq!( - parse_acks(&components), - Some(Path::Acks(AcksPath { - port_id: PortId::default(), - channel_id: ChannelId::default(), - sequence: Sequence::default(), - })), - ); - } - - #[test] - fn acks_path_parses() { - let path = "acks/ports/defaultPort/channels/channel-0/sequences/0"; - let path = Path::from_str(path); - - assert!(path.is_ok()); - assert_eq!( - path.unwrap(), - Path::Acks(AcksPath { - port_id: PortId::default(), - channel_id: ChannelId::default(), - sequence: Sequence::default(), - }), - ); - } - - #[test] - fn test_parse_receipts_fn() { - let path = "receipts/ports/defaultPort/channels/channel-0/sequences/0"; - let components: Vec<&str> = path.split('/').collect(); - - assert_eq!( - parse_receipts(&components), - Some(Path::Receipts(ReceiptsPath { - port_id: PortId::default(), - channel_id: ChannelId::default(), - sequence: Sequence::default(), - })), - ); - } - - #[test] - fn receipts_path_parses() { - let path = "receipts/ports/defaultPort/channels/channel-0/sequences/0"; - let path = Path::from_str(path); - - assert!(path.is_ok()); - assert_eq!( - path.unwrap(), - Path::Receipts(ReceiptsPath { - port_id: PortId::default(), - channel_id: ChannelId::default(), - sequence: Sequence::default(), - }), - ); - } - - #[test] - fn test_parse_upgrades_fn() { - let path = "upgradedIBCState/0/upgradedClient"; - let components: Vec<&str> = path.split('/').collect(); - - assert_eq!( - parse_upgrades(&components), - Some(Path::Upgrade(ClientUpgradePath::UpgradedClientState(0))), - ); - - let path = "upgradedIBCState/0/upgradedConsState"; - let components: Vec<&str> = path.split('/').collect(); - - assert_eq!( - parse_upgrades(&components), - Some(Path::Upgrade( - ClientUpgradePath::UpgradedClientConsensusState(0) - )), - ) - } - - #[test] - fn upgrade_client_state_path_parses() { - let path = "upgradedIBCState/0/upgradedClient"; - let path = Path::from_str(path); - - assert!(path.is_ok()); - assert_eq!( - path.unwrap(), - Path::Upgrade(ClientUpgradePath::UpgradedClientState(0)), - ); - } - - #[test] - fn upgrade_client_consensus_state_path_parses() { - let path = "upgradedIBCState/0/upgradedConsState"; - let path = Path::from_str(path); - - assert!(path.is_ok()); - assert_eq!( - path.unwrap(), - Path::Upgrade(ClientUpgradePath::UpgradedClientConsensusState(0)), - ); - } + use super::*; + use core::str::FromStr; + + #[test] + fn invalid_path_doesnt_parse() { + let invalid_path = Path::from_str("clients/clientType"); + + assert!(invalid_path.is_err()); + } + + #[test] + fn test_parse_client_paths_fn() { + let path = "clients/07-tendermint-0/clientType"; + let components: Vec<&str> = path.split('/').collect(); + + assert_eq!( + parse_client_paths(&components), + Some(Path::ClientType(ClientTypePath(ClientId::default()))) + ); + + let path = "clients/07-tendermint-0/clientState"; + let components: Vec<&str> = path.split('/').collect(); + + assert_eq!( + parse_client_paths(&components), + Some(Path::ClientState(ClientStatePath(ClientId::default()))) + ); + + let path = "clients/07-tendermint-0/consensusStates/15-31"; + let components: Vec<&str> = path.split('/').collect(); + + assert_eq!( + parse_client_paths(&components), + Some(Path::ClientConsensusState(ClientConsensusStatePath { + client_id: ClientId::default(), + epoch: 15, + height: 31, + })) + ); + } + + #[test] + fn client_type_path_parses() { + let path = "clients/07-tendermint-0/clientType"; + let path = Path::from_str(path); + + assert!(path.is_ok()); + assert_eq!(path.unwrap(), Path::ClientType(ClientTypePath(ClientId::default()))); + } + + #[test] + fn client_state_path_parses() { + let path = "clients/07-tendermint-0/clientState"; + let path = Path::from_str(path); + + assert!(path.is_ok()); + assert_eq!(path.unwrap(), Path::ClientState(ClientStatePath(ClientId::default()))); + } + + #[test] + fn client_consensus_state_path_parses() { + let path = "clients/07-tendermint-0/consensusStates/15-31"; + let path = Path::from_str(path); + + assert!(path.is_ok()); + assert_eq!( + path.unwrap(), + Path::ClientConsensusState(ClientConsensusStatePath { + client_id: ClientId::default(), + epoch: 15, + height: 31, + }) + ); + } + + #[test] + fn client_connections_path_parses() { + let path = "clients/07-tendermint-0/connections"; + let path = Path::from_str(path); + + assert!(path.is_ok()); + assert_eq!( + path.unwrap(), + Path::ClientConnections(ClientConnectionsPath(ClientId::default())) + ); + } + + #[test] + fn test_parse_connections_fn() { + let path = "connections/connection-0"; + let components: Vec<&str> = path.split('/').collect(); + + assert_eq!( + parse_connections(&components), + Some(Path::Connections(ConnectionsPath(ConnectionId::new(0)))), + ); + } + + #[test] + fn connections_path_parses() { + let path = "connections/connection-0"; + let path = Path::from_str(path); + + assert!(path.is_ok()); + assert_eq!(path.unwrap(), Path::Connections(ConnectionsPath(ConnectionId::new(0)))); + } + + #[test] + fn test_parse_ports_fn() { + let path = "ports/defaultPort"; + let components: Vec<&str> = path.split('/').collect(); + + assert_eq!(parse_ports(&components), Some(Path::Ports(PortsPath(PortId::default()))),); + } + + #[test] + fn ports_path_parses() { + let path = "ports/defaultPort"; + let path = Path::from_str(path); + + assert!(path.is_ok()); + assert_eq!(path.unwrap(), Path::Ports(PortsPath(PortId::default()))); + } + + #[test] + fn test_parse_channels_fn() { + let path = "channels/channel-0"; + let components: Vec<&str> = path.split('/').collect(); + + assert_eq!(parse_channels(&components), Some(SubPath::Channels(ChannelId::default())),); + } + + #[test] + fn channels_path_doesnt_parse() { + let path = "channels/channel-0"; + let path = Path::from_str(path); + + assert!(path.is_err()); + } + + #[test] + fn test_parse_sequences_fn() { + let path = "sequences/0"; + let components: Vec<&str> = path.split('/').collect(); + + assert_eq!(parse_sequences(&components), Some(SubPath::Sequences(Sequence::default()))); + } + + #[test] + fn sequences_path_doesnt_parse() { + let path = "sequences/0"; + let path = Path::from_str(path); + + assert!(path.is_err()); + } + + #[test] + fn test_parse_channel_ends_fn() { + let path = "channelEnds/ports/defaultPort/channels/channel-0"; + let components: Vec<&str> = path.split('/').collect(); + + assert_eq!( + parse_channel_ends(&components), + Some(Path::ChannelEnds(ChannelEndsPath(PortId::default(), ChannelId::default()))), + ); + } + + #[test] + fn channel_ends_path_parses() { + let path = "channelEnds/ports/defaultPort/channels/channel-0"; + let path = Path::from_str(path); + + assert!(path.is_ok()); + assert_eq!( + path.unwrap(), + Path::ChannelEnds(ChannelEndsPath(PortId::default(), ChannelId::default())), + ); + } + + #[test] + fn test_parse_seqs_fn() { + let path = "nextSequenceSend/ports/defaultPort/channels/channel-0"; + let components: Vec<&str> = path.split('/').collect(); + + assert_eq!( + parse_seqs(&components), + Some(Path::SeqSends(SeqSendsPath(PortId::default(), ChannelId::default()))), + ); + + let path = "nextSequenceRecv/ports/defaultPort/channels/channel-0"; + let components: Vec<&str> = path.split('/').collect(); + + assert_eq!( + parse_seqs(&components), + Some(Path::SeqRecvs(SeqRecvsPath(PortId::default(), ChannelId::default()))), + ); + + let path = "nextSequenceAck/ports/defaultPort/channels/channel-0"; + let components: Vec<&str> = path.split('/').collect(); + + assert_eq!( + parse_seqs(&components), + Some(Path::SeqAcks(SeqAcksPath(PortId::default(), ChannelId::default()))), + ); + } + + #[test] + fn sequence_send_path_parses() { + let path = "nextSequenceSend/ports/defaultPort/channels/channel-0"; + let path = Path::from_str(path); + + assert!(path.is_ok()); + assert_eq!( + path.unwrap(), + Path::SeqSends(SeqSendsPath(PortId::default(), ChannelId::default())), + ); + } + + #[test] + fn sequence_recv_path_parses() { + let path = "nextSequenceRecv/ports/defaultPort/channels/channel-0"; + let path = Path::from_str(path); + + assert!(path.is_ok()); + assert_eq!( + path.unwrap(), + Path::SeqRecvs(SeqRecvsPath(PortId::default(), ChannelId::default())), + ); + } + + #[test] + fn sequence_ack_path_parses() { + let path = "nextSequenceAck/ports/defaultPort/channels/channel-0"; + let path = Path::from_str(path); + + assert!(path.is_ok()); + assert_eq!( + path.unwrap(), + Path::SeqAcks(SeqAcksPath(PortId::default(), ChannelId::default())), + ); + } + + #[test] + fn test_parse_commitments_fn() { + let path = "commitments/ports/defaultPort/channels/channel-0/sequences/0"; + let components: Vec<&str> = path.split('/').collect(); + + assert_eq!( + parse_commitments(&components), + Some(Path::Commitments(CommitmentsPath { + port_id: PortId::default(), + channel_id: ChannelId::default(), + sequence: Sequence::default(), + })), + ); + } + + #[test] + fn commitments_path_parses() { + let path = "commitments/ports/defaultPort/channels/channel-0/sequences/0"; + let path = Path::from_str(path); + + assert!(path.is_ok()); + assert_eq!( + path.unwrap(), + Path::Commitments(CommitmentsPath { + port_id: PortId::default(), + channel_id: ChannelId::default(), + sequence: Sequence::default(), + }), + ); + } + + #[test] + fn test_parse_acks_fn() { + let path = "acks/ports/defaultPort/channels/channel-0/sequences/0"; + let components: Vec<&str> = path.split('/').collect(); + + assert_eq!( + parse_acks(&components), + Some(Path::Acks(AcksPath { + port_id: PortId::default(), + channel_id: ChannelId::default(), + sequence: Sequence::default(), + })), + ); + } + + #[test] + fn acks_path_parses() { + let path = "acks/ports/defaultPort/channels/channel-0/sequences/0"; + let path = Path::from_str(path); + + assert!(path.is_ok()); + assert_eq!( + path.unwrap(), + Path::Acks(AcksPath { + port_id: PortId::default(), + channel_id: ChannelId::default(), + sequence: Sequence::default(), + }), + ); + } + + #[test] + fn test_parse_receipts_fn() { + let path = "receipts/ports/defaultPort/channels/channel-0/sequences/0"; + let components: Vec<&str> = path.split('/').collect(); + + assert_eq!( + parse_receipts(&components), + Some(Path::Receipts(ReceiptsPath { + port_id: PortId::default(), + channel_id: ChannelId::default(), + sequence: Sequence::default(), + })), + ); + } + + #[test] + fn receipts_path_parses() { + let path = "receipts/ports/defaultPort/channels/channel-0/sequences/0"; + let path = Path::from_str(path); + + assert!(path.is_ok()); + assert_eq!( + path.unwrap(), + Path::Receipts(ReceiptsPath { + port_id: PortId::default(), + channel_id: ChannelId::default(), + sequence: Sequence::default(), + }), + ); + } + + #[test] + fn test_parse_upgrades_fn() { + let path = "upgradedIBCState/0/upgradedClient"; + let components: Vec<&str> = path.split('/').collect(); + + assert_eq!( + parse_upgrades(&components), + Some(Path::Upgrade(ClientUpgradePath::UpgradedClientState(0))), + ); + + let path = "upgradedIBCState/0/upgradedConsState"; + let components: Vec<&str> = path.split('/').collect(); + + assert_eq!( + parse_upgrades(&components), + Some(Path::Upgrade(ClientUpgradePath::UpgradedClientConsensusState(0))), + ) + } + + #[test] + fn upgrade_client_state_path_parses() { + let path = "upgradedIBCState/0/upgradedClient"; + let path = Path::from_str(path); + + assert!(path.is_ok()); + assert_eq!(path.unwrap(), Path::Upgrade(ClientUpgradePath::UpgradedClientState(0)),); + } + + #[test] + fn upgrade_client_consensus_state_path_parses() { + let path = "upgradedIBCState/0/upgradedConsState"; + let path = Path::from_str(path); + + assert!(path.is_ok()); + assert_eq!( + path.unwrap(), + Path::Upgrade(ClientUpgradePath::UpgradedClientConsensusState(0)), + ); + } } diff --git a/modules/src/core/ics24_host/validate.rs b/modules/src/core/ics24_host/validate.rs index 722d35b124..8069f24705 100644 --- a/modules/src/core/ics24_host/validate.rs +++ b/modules/src/core/ics24_host/validate.rs @@ -11,36 +11,33 @@ const VALID_SPECIAL_CHARS: &str = "._+-#[]<>"; /// A valid identifier only contain lowercase alphabetic characters, and be of a given min and max /// length. pub fn validate_identifier(id: &str, min: usize, max: usize) -> Result<(), Error> { - assert!(max >= min); - - // Check identifier is not empty - if id.is_empty() { - return Err(Error::empty()); - } - - // Check identifier does not contain path separators - if id.contains(PATH_SEPARATOR) { - return Err(Error::contain_separator(id.to_string())); - } - - // Check identifier length is between given min/max - if id.len() < min || id.len() > max { - return Err(Error::invalid_length(id.to_string(), id.len(), min, max)); - } - - // Check that the identifier comprises only valid characters: - // - Alphanumeric - // - `.`, `_`, `+`, `-`, `#` - // - `[`, `]`, `<`, `>` - if !id - .chars() - .all(|c| c.is_alphanumeric() || VALID_SPECIAL_CHARS.contains(c)) - { - return Err(Error::invalid_character(id.to_string())); - } - - // All good! - Ok(()) + assert!(max >= min); + + // Check identifier is not empty + if id.is_empty() { + return Err(Error::empty()) + } + + // Check identifier does not contain path separators + if id.contains(PATH_SEPARATOR) { + return Err(Error::contain_separator(id.to_string())) + } + + // Check identifier length is between given min/max + if id.len() < min || id.len() > max { + return Err(Error::invalid_length(id.to_string(), id.len(), min, max)) + } + + // Check that the identifier comprises only valid characters: + // - Alphanumeric + // - `.`, `_`, `+`, `-`, `#` + // - `[`, `]`, `<`, `>` + if !id.chars().all(|c| c.is_alphanumeric() || VALID_SPECIAL_CHARS.contains(c)) { + return Err(Error::invalid_character(id.to_string())) + } + + // All good! + Ok(()) } /// Default validator function for Client identifiers. @@ -48,7 +45,7 @@ pub fn validate_identifier(id: &str, min: usize, max: usize) -> Result<(), Error /// A valid identifier must be between 9-64 characters and only contain lowercase /// alphabetic characters, pub fn validate_client_identifier(id: &str) -> Result<(), Error> { - validate_identifier(id, 9, 64) + validate_identifier(id, 9, 64) } /// Default validator function for Connection identifiers. @@ -56,7 +53,7 @@ pub fn validate_client_identifier(id: &str) -> Result<(), Error> { /// A valid Identifier must be between 10-64 characters and only contain lowercase /// alphabetic characters, pub fn validate_connection_identifier(id: &str) -> Result<(), Error> { - validate_identifier(id, 10, 64) + validate_identifier(id, 10, 64) } /// Default validator function for Port identifiers. @@ -64,83 +61,83 @@ pub fn validate_connection_identifier(id: &str) -> Result<(), Error> { /// A valid Identifier must be between 2-128 characters and only contain lowercase /// alphabetic characters, pub fn validate_port_identifier(id: &str) -> Result<(), Error> { - validate_identifier(id, 2, 128) + validate_identifier(id, 2, 128) } #[cfg(test)] mod tests { - use crate::core::ics24_host::validate::{ - validate_client_identifier, validate_connection_identifier, validate_identifier, - validate_port_identifier, - }; - use test_log::test; - - #[test] - fn parse_invalid_port_id_min() { - // invalid min port id - let id = validate_port_identifier("p"); - assert!(id.is_err()) - } - - #[test] - fn parse_invalid_port_id_max() { - // invalid max port id (test string length is 130 chars) - let id = validate_port_identifier( + use crate::core::ics24_host::validate::{ + validate_client_identifier, validate_connection_identifier, validate_identifier, + validate_port_identifier, + }; + use test_log::test; + + #[test] + fn parse_invalid_port_id_min() { + // invalid min port id + let id = validate_port_identifier("p"); + assert!(id.is_err()) + } + + #[test] + fn parse_invalid_port_id_max() { + // invalid max port id (test string length is 130 chars) + let id = validate_port_identifier( "9anxkcme6je544d5lnj46zqiiiygfqzf8w4bjecbnyj4lj6s7zlpst67yln64tixp9anxkcme6je544d5lnj46zqiiiygfqzf8w4bjecbnyj4lj6s7zlpst67yln64tixp", ); - assert!(id.is_err()) - } - - #[test] - fn parse_invalid_connection_id_min() { - // invalid min connection id - let id = validate_connection_identifier("connect01"); - assert!(id.is_err()) - } - - #[test] - fn parse_connection_id_max() { - // invalid max connection id (test string length is 65) - let id = validate_connection_identifier( - "ihhankr30iy4nna65hjl2wjod7182io1t2s7u3ip3wqtbbn1sl0rgcntqc540r36r", - ); - assert!(id.is_err()) - } - - #[test] - fn parse_invalid_client_id_min() { - // invalid min client id - let id = validate_client_identifier("client"); - assert!(id.is_err()) - } - - #[test] - fn parse_client_id_max() { - // invalid max client id (test string length is 65) - let id = validate_client_identifier( - "f0isrs5enif9e4td3r2jcbxoevhz6u1fthn4aforq7ams52jn5m48eiesfht9ckpn", - ); - assert!(id.is_err()) - } - - #[test] - fn parse_invalid_id_chars() { - // invalid id chars - let id = validate_identifier("channel@01", 1, 10); - assert!(id.is_err()) - } - - #[test] - fn parse_invalid_id_empty() { - // invalid id empty - let id = validate_identifier("", 1, 10); - assert!(id.is_err()) - } - - #[test] - fn parse_invalid_id_path_separator() { - // invalid id with path separator - let id = validate_identifier("id/1", 1, 10); - assert!(id.is_err()) - } + assert!(id.is_err()) + } + + #[test] + fn parse_invalid_connection_id_min() { + // invalid min connection id + let id = validate_connection_identifier("connect01"); + assert!(id.is_err()) + } + + #[test] + fn parse_connection_id_max() { + // invalid max connection id (test string length is 65) + let id = validate_connection_identifier( + "ihhankr30iy4nna65hjl2wjod7182io1t2s7u3ip3wqtbbn1sl0rgcntqc540r36r", + ); + assert!(id.is_err()) + } + + #[test] + fn parse_invalid_client_id_min() { + // invalid min client id + let id = validate_client_identifier("client"); + assert!(id.is_err()) + } + + #[test] + fn parse_client_id_max() { + // invalid max client id (test string length is 65) + let id = validate_client_identifier( + "f0isrs5enif9e4td3r2jcbxoevhz6u1fthn4aforq7ams52jn5m48eiesfht9ckpn", + ); + assert!(id.is_err()) + } + + #[test] + fn parse_invalid_id_chars() { + // invalid id chars + let id = validate_identifier("channel@01", 1, 10); + assert!(id.is_err()) + } + + #[test] + fn parse_invalid_id_empty() { + // invalid id empty + let id = validate_identifier("", 1, 10); + assert!(id.is_err()) + } + + #[test] + fn parse_invalid_id_path_separator() { + // invalid id with path separator + let id = validate_identifier("id/1", 1, 10); + assert!(id.is_err()) + } } diff --git a/modules/src/core/ics26_routing/context.rs b/modules/src/core/ics26_routing/context.rs index 6b03ba248d..9a43a311a9 100644 --- a/modules/src/core/ics26_routing/context.rs +++ b/modules/src/core/ics26_routing/context.rs @@ -1,47 +1,40 @@ use crate::prelude::*; +use crate::{ + core::{ + ics02_client::context::{ClientKeeper, ClientReader}, + ics03_connection::context::{ConnectionKeeper, ConnectionReader}, + ics04_channel::{ + channel::{Counterparty, Order}, + context::{ChannelKeeper, ChannelReader}, + error::Error, + msgs::acknowledgement::Acknowledgement as GenericAcknowledgement, + packet::Packet, + Version, + }, + ics05_port::context::PortReader, + ics24_host::identifier::{ChannelId, ConnectionId, PortId}, + }, + events::ModuleEvent, + handler::HandlerOutputBuilder, + signer::Signer, +}; use alloc::borrow::{Borrow, Cow}; -use core::any::Any; -use core::fmt::Debug; -use core::{fmt, str::FromStr}; - +use core::{any::Any, fmt, fmt::Debug, str::FromStr}; use serde::{Deserialize, Serialize}; -use crate::core::ics02_client::context::{ClientKeeper, ClientReader}; -use crate::core::ics03_connection::context::{ConnectionKeeper, ConnectionReader}; -use crate::core::ics04_channel::channel::{Counterparty, Order}; -use crate::core::ics04_channel::context::{ChannelKeeper, ChannelReader}; -use crate::core::ics04_channel::error::Error; -use crate::core::ics04_channel::msgs::acknowledgement::Acknowledgement as GenericAcknowledgement; -use crate::core::ics04_channel::packet::Packet; -use crate::core::ics04_channel::Version; -use crate::core::ics05_port::context::PortReader; -use crate::core::ics24_host::identifier::{ChannelId, ConnectionId, PortId}; -use crate::events::ModuleEvent; -use crate::handler::HandlerOutputBuilder; -use crate::signer::Signer; - /// This trait captures all the functional dependencies of needed in light client implementations pub trait ReaderContext: ClientKeeper + ClientReader + ConnectionReader + ChannelReader {} /// This trait captures all the functional dependencies (i.e., context) which the ICS26 module /// requires to be able to dispatch and process IBC messages. In other words, this is the /// representation of a chain from the perspective of the IBC module of that chain. -pub trait Ics26Context: - ClientReader - + ClientKeeper - + ConnectionReader - + ConnectionKeeper - + ChannelKeeper - + ChannelReader - + PortReader - + ReaderContext -{ - type Router: Router; - - fn router(&self) -> &Self::Router; - - fn router_mut(&mut self) -> &mut Self::Router; +pub trait Ics26Context: ConnectionKeeper + ChannelKeeper + PortReader + ReaderContext { + type Router: Router; + + fn router(&self) -> &Self::Router; + + fn router_mut(&mut self) -> &mut Self::Router; } #[derive(Debug, PartialEq)] @@ -51,33 +44,33 @@ pub struct InvalidModuleId; pub struct ModuleId(String); impl ModuleId { - pub fn new(s: Cow<'_, str>) -> Result { - if !s.trim().is_empty() && s.chars().all(char::is_alphanumeric) { - Ok(Self(s.into_owned())) - } else { - Err(InvalidModuleId) - } - } + pub fn new(s: Cow<'_, str>) -> Result { + if !s.trim().is_empty() && s.chars().all(char::is_alphanumeric) { + Ok(Self(s.into_owned())) + } else { + Err(InvalidModuleId) + } + } } impl fmt::Display for ModuleId { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}", self.0) - } + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.0) + } } impl FromStr for ModuleId { - type Err = InvalidModuleId; + type Err = InvalidModuleId; - fn from_str(s: &str) -> Result { - Self::new(Cow::Borrowed(s)) - } + fn from_str(s: &str) -> Result { + Self::new(Cow::Borrowed(s)) + } } impl Borrow for ModuleId { - fn borrow(&self) -> &str { - self.0.as_str() - } + fn borrow(&self) -> &str { + self.0.as_str() + } } /// Types implementing this trait are expected to implement `From` @@ -86,143 +79,143 @@ pub trait Acknowledgement: AsRef<[u8]> {} pub type WriteFn = dyn FnOnce(&mut dyn Any) -> Result<(), String>; pub enum OnRecvPacketAck { - Nil(Box), - Successful(Box, Box), - Failed(Box), + Nil(Box), + Successful(Box, Box), + Failed(Box), } impl OnRecvPacketAck { - pub fn is_successful(&self) -> bool { - matches!(self, OnRecvPacketAck::Successful(_, _)) - } + pub fn is_successful(&self) -> bool { + matches!(self, OnRecvPacketAck::Successful(_, _)) + } } pub type ModuleOutputBuilder = HandlerOutputBuilder<(), ModuleEvent>; pub trait Module: Send + Sync + AsAnyMut { - #[allow(clippy::too_many_arguments)] - fn on_chan_open_init( - &mut self, - _output: &mut ModuleOutputBuilder, - _order: Order, - _connection_hops: &[ConnectionId], - _port_id: &PortId, - _channel_id: &ChannelId, - _counterparty: &Counterparty, - _version: &Version, - ) -> Result<(), Error> { - Ok(()) - } - - #[allow(clippy::too_many_arguments)] - fn on_chan_open_try( - &mut self, - _output: &mut ModuleOutputBuilder, - _order: Order, - _connection_hops: &[ConnectionId], - _port_id: &PortId, - _channel_id: &ChannelId, - _counterparty: &Counterparty, - _version: &Version, - _counterparty_version: &Version, - ) -> Result; - - fn on_chan_open_ack( - &mut self, - _output: &mut ModuleOutputBuilder, - _port_id: &PortId, - _channel_id: &ChannelId, - _counterparty_version: &Version, - ) -> Result<(), Error> { - Ok(()) - } - - fn on_chan_open_confirm( - &mut self, - _output: &mut ModuleOutputBuilder, - _port_id: &PortId, - _channel_id: &ChannelId, - ) -> Result<(), Error> { - Ok(()) - } - - fn on_chan_close_init( - &mut self, - _output: &mut ModuleOutputBuilder, - _port_id: &PortId, - _channel_id: &ChannelId, - ) -> Result<(), Error> { - Ok(()) - } - - fn on_chan_close_confirm( - &mut self, - _output: &mut ModuleOutputBuilder, - _port_id: &PortId, - _channel_id: &ChannelId, - ) -> Result<(), Error> { - Ok(()) - } - - fn on_recv_packet( - &self, - _output: &mut ModuleOutputBuilder, - _packet: &Packet, - _relayer: &Signer, - ) -> OnRecvPacketAck { - OnRecvPacketAck::Nil(Box::new(|_| Ok(()))) - } - - fn on_acknowledgement_packet( - &mut self, - _output: &mut ModuleOutputBuilder, - _packet: &Packet, - _acknowledgement: &GenericAcknowledgement, - _relayer: &Signer, - ) -> Result<(), Error> { - Ok(()) - } - - fn on_timeout_packet( - &mut self, - _output: &mut ModuleOutputBuilder, - _packet: &Packet, - _relayer: &Signer, - ) -> Result<(), Error> { - Ok(()) - } + #[allow(clippy::too_many_arguments)] + fn on_chan_open_init( + &mut self, + _output: &mut ModuleOutputBuilder, + _order: Order, + _connection_hops: &[ConnectionId], + _port_id: &PortId, + _channel_id: &ChannelId, + _counterparty: &Counterparty, + _version: &Version, + ) -> Result<(), Error> { + Ok(()) + } + + #[allow(clippy::too_many_arguments)] + fn on_chan_open_try( + &mut self, + _output: &mut ModuleOutputBuilder, + _order: Order, + _connection_hops: &[ConnectionId], + _port_id: &PortId, + _channel_id: &ChannelId, + _counterparty: &Counterparty, + _version: &Version, + _counterparty_version: &Version, + ) -> Result; + + fn on_chan_open_ack( + &mut self, + _output: &mut ModuleOutputBuilder, + _port_id: &PortId, + _channel_id: &ChannelId, + _counterparty_version: &Version, + ) -> Result<(), Error> { + Ok(()) + } + + fn on_chan_open_confirm( + &mut self, + _output: &mut ModuleOutputBuilder, + _port_id: &PortId, + _channel_id: &ChannelId, + ) -> Result<(), Error> { + Ok(()) + } + + fn on_chan_close_init( + &mut self, + _output: &mut ModuleOutputBuilder, + _port_id: &PortId, + _channel_id: &ChannelId, + ) -> Result<(), Error> { + Ok(()) + } + + fn on_chan_close_confirm( + &mut self, + _output: &mut ModuleOutputBuilder, + _port_id: &PortId, + _channel_id: &ChannelId, + ) -> Result<(), Error> { + Ok(()) + } + + fn on_recv_packet( + &self, + _output: &mut ModuleOutputBuilder, + _packet: &Packet, + _relayer: &Signer, + ) -> OnRecvPacketAck { + OnRecvPacketAck::Nil(Box::new(|_| Ok(()))) + } + + fn on_acknowledgement_packet( + &mut self, + _output: &mut ModuleOutputBuilder, + _packet: &Packet, + _acknowledgement: &GenericAcknowledgement, + _relayer: &Signer, + ) -> Result<(), Error> { + Ok(()) + } + + fn on_timeout_packet( + &mut self, + _output: &mut ModuleOutputBuilder, + _packet: &Packet, + _relayer: &Signer, + ) -> Result<(), Error> { + Ok(()) + } } pub trait RouterBuilder: Sized { - /// The `Router` type that the builder must build - type Router: Router; + /// The `Router` type that the builder must build + type Router: Router; - /// Registers `Module` against the specified `ModuleId` in the `Router`'s internal map - /// - /// Returns an error if a `Module` has already been registered against the specified `ModuleId` - fn add_route(self, module_id: ModuleId, module: impl Module) -> Result; + /// Registers `Module` against the specified `ModuleId` in the `Router`'s internal map + /// + /// Returns an error if a `Module` has already been registered against the specified `ModuleId` + fn add_route(self, module_id: ModuleId, module: impl Module) -> Result; - /// Consumes the `RouterBuilder` and returns a `Router` as configured - fn build(self) -> Self::Router; + /// Consumes the `RouterBuilder` and returns a `Router` as configured + fn build(self) -> Self::Router; } pub trait AsAnyMut: Any { - fn as_any_mut(&mut self) -> &mut dyn Any; + fn as_any_mut(&mut self) -> &mut dyn Any; } impl AsAnyMut for M { - fn as_any_mut(&mut self) -> &mut dyn Any { - self - } + fn as_any_mut(&mut self) -> &mut dyn Any { + self + } } /// A router maintains a mapping of `ModuleId`s against `Modules`. Implementations must not publicly /// expose APIs to add new routes once constructed. Routes may only be added at the time of /// instantiation using the `RouterBuilder`. pub trait Router { - /// Returns a mutable reference to a `Module` registered against the specified `ModuleId` - fn get_route_mut(&mut self, module_id: &impl Borrow) -> Option<&mut dyn Module>; + /// Returns a mutable reference to a `Module` registered against the specified `ModuleId` + fn get_route_mut(&mut self, module_id: &impl Borrow) -> Option<&mut dyn Module>; - /// Returns true if the `Router` has a `Module` registered against the specified `ModuleId` - fn has_route(&self, module_id: &impl Borrow) -> bool; + /// Returns true if the `Router` has a `Module` registered against the specified `ModuleId` + fn has_route(&self, module_id: &impl Borrow) -> bool; } diff --git a/modules/src/core/ics26_routing/error.rs b/modules/src/core/ics26_routing/error.rs index 260a688cdd..e2aa0e2f88 100644 --- a/modules/src/core/ics26_routing/error.rs +++ b/modules/src/core/ics26_routing/error.rs @@ -1,36 +1,36 @@ use crate::prelude::*; use flex_error::{define_error, TraceError}; -use crate::applications::transfer; -use crate::core::ics02_client; -use crate::core::ics03_connection; -use crate::core::ics04_channel; +use crate::{ + applications::transfer, + core::{ics02_client, ics03_connection, ics04_channel}, +}; define_error! { - #[derive(Debug, PartialEq, Eq)] - Error { - Ics02Client - [ ics02_client::error::Error ] - | _ | { "ICS02 client error" }, + #[derive(Debug, PartialEq, Eq)] + Error { + Ics02Client + [ ics02_client::error::Error ] + | _ | { "ICS02 client error" }, - Ics03Connection - [ ics03_connection::error::Error ] - | _ | { "ICS03 connection error" }, + Ics03Connection + [ ics03_connection::error::Error ] + | _ | { "ICS03 connection error" }, - Ics04Channel - [ ics04_channel::error::Error ] - | _ | { "ICS04 channel error" }, + Ics04Channel + [ ics04_channel::error::Error ] + | _ | { "ICS04 channel error" }, - Ics20FungibleTokenTransfer - [ transfer::error::Error ] - | _ | { "ICS20 fungible token transfer error" }, + Ics20FungibleTokenTransfer + [ transfer::error::Error ] + | _ | { "ICS20 fungible token transfer error" }, - UnknownMessageTypeUrl - { url: String } - | e | { format_args!("unknown type URL {0}", e.url) }, + UnknownMessageTypeUrl + { url: String } + | e | { format_args!("unknown type URL {0}", e.url) }, - MalformedMessageBytes - [ TraceError ] - | _ | { "the message is malformed and cannot be decoded" }, - } + MalformedMessageBytes + [ TraceError ] + | _ | { "the message is malformed and cannot be decoded" }, + } } diff --git a/modules/src/core/ics26_routing/handler.rs b/modules/src/core/ics26_routing/handler.rs index 9be3bfe358..b179f8f0f1 100644 --- a/modules/src/core/ics26_routing/handler.rs +++ b/modules/src/core/ics26_routing/handler.rs @@ -1,53 +1,62 @@ -use crate::clients::host_functions::HostFunctionsProvider; -use crate::prelude::*; - -use ibc_proto::google::protobuf::Any; - -use crate::core::ics02_client::handler::dispatch as ics2_msg_dispatcher; -use crate::core::ics03_connection::handler::dispatch as ics3_msg_dispatcher; -use crate::core::ics04_channel::handler::{ - channel_callback as ics4_callback, channel_dispatch as ics4_msg_dispatcher, - channel_validate as ics4_validate, recv_packet::RecvPacketResult, +use crate::{ + core::{ + ics02_client::{context::ClientKeeper, handler::dispatch as ics2_msg_dispatcher}, + ics03_connection::handler::dispatch as ics3_msg_dispatcher, + ics04_channel::{ + handler::{ + channel_callback as ics4_callback, channel_dispatch as ics4_msg_dispatcher, + channel_validate as ics4_validate, get_module_for_packet_msg, + packet_callback as ics4_packet_callback, + packet_dispatch as ics4_packet_msg_dispatcher, recv_packet::RecvPacketResult, + }, + packet::PacketResult, + }, + ics26_routing::{ + context::{Ics26Context, ModuleOutputBuilder, ReaderContext}, + error::Error, + msgs::Ics26Envelope::{self, Ics2Msg, Ics3Msg, Ics4ChannelMsg, Ics4PacketMsg}, + }, + }, + events::IbcEvent, + handler::HandlerOutput, + prelude::*, }; -use crate::core::ics04_channel::handler::{ - get_module_for_packet_msg, packet_callback as ics4_packet_callback, - packet_dispatch as ics4_packet_msg_dispatcher, -}; -use crate::core::ics04_channel::packet::PacketResult; -use crate::core::ics26_routing::context::{Ics26Context, ModuleOutputBuilder}; -use crate::core::ics26_routing::error::Error; -use crate::core::ics26_routing::msgs::Ics26Envelope::{ - self, Ics2Msg, Ics3Msg, Ics4ChannelMsg, Ics4PacketMsg, -}; -use crate::{events::IbcEvent, handler::HandlerOutput}; +use core::fmt::Debug; +use ibc_proto::google::protobuf::Any; /// Result of message execution - comprises of events emitted and logs entries created during the /// execution of a transaction message. pub struct MsgReceipt { - pub events: Vec, - pub log: Vec, + pub events: Vec, + pub log: Vec, } /// Mimics the DeliverTx ABCI interface, but for a single message and at a slightly lower level. /// No need for authentication info or signature checks here. /// Returns a vector of all events that got generated as a byproduct of processing `message`. -pub fn deliver(ctx: &mut Ctx, message: Any) -> Result +pub fn deliver(ctx: &mut Ctx, message: Any) -> Result where - Ctx: Ics26Context, - HostFunctions: HostFunctionsProvider, + Ctx: Ics26Context + ReaderContext, + Ics26Envelope: TryFrom, + Error: From< as TryFrom>::Error>, { - // Decode the proto message into a domain message, creating an ICS26 envelope. - let envelope = decode(message)?; + // Decode the proto message into a domain message, creating an ICS26 envelope. + let envelope = decode::(message)?; - // Process the envelope, and accumulate any events that were generated. - let HandlerOutput { log, events, .. } = dispatch::<_, HostFunctions>(ctx, envelope)?; + // Process the envelope, and accumulate any events that were generated. + let HandlerOutput { log, events, .. } = dispatch::<_>(ctx, envelope)?; - Ok(MsgReceipt { events, log }) + Ok(MsgReceipt { events, log }) } /// Attempts to convert a message into a [Ics26Envelope] message -pub fn decode(message: Any) -> Result { - message.try_into() +pub fn decode(message: Any) -> Result, Error> +where + C: ClientKeeper + Clone + Debug + PartialEq + Eq, + Ics26Envelope: TryFrom, + Error: From< as TryFrom>::Error>, +{ + message.try_into().map_err(Into::into) } /// Top-level ICS dispatch function. Routes incoming IBC messages to their corresponding module. @@ -55,518 +64,511 @@ pub fn decode(message: Any) -> Result { /// and events produced after processing the input `msg`. /// If this method returns an error, the runtime is expected to rollback all state modifications to /// the `Ctx` caused by all messages from the transaction that this `msg` is a part of. -pub fn dispatch( - ctx: &mut Ctx, - msg: Ics26Envelope, -) -> Result, Error> +pub fn dispatch(ctx: &mut Ctx, msg: Ics26Envelope) -> Result, Error> where - Ctx: Ics26Context, - HostFunctions: HostFunctionsProvider, + Ctx: Ics26Context + ClientKeeper, { - let output = match msg { - Ics2Msg(msg) => { - let handler_output = - ics2_msg_dispatcher::<_, HostFunctions>(ctx, msg).map_err(Error::ics02_client)?; - - // Apply the result to the context (host chain store). - ctx.store_client_result(handler_output.result) - .map_err(Error::ics02_client)?; - - HandlerOutput::builder() - .with_log(handler_output.log) - .with_events(handler_output.events) - .with_result(()) - } - - Ics3Msg(msg) => { - let handler_output = ics3_msg_dispatcher::<_, HostFunctions>(ctx, msg) - .map_err(Error::ics03_connection)?; - - // Apply any results to the host chain store. - ctx.store_connection_result(handler_output.result) - .map_err(Error::ics03_connection)?; - - HandlerOutput::builder() - .with_log(handler_output.log) - .with_events(handler_output.events) - .with_result(()) - } - - Ics4ChannelMsg(msg) => { - let module_id = ics4_validate(ctx, &msg).map_err(Error::ics04_channel)?; - let (mut handler_builder, channel_result) = - ics4_msg_dispatcher::<_, HostFunctions>(ctx, &msg).map_err(Error::ics04_channel)?; - - let mut module_output = ModuleOutputBuilder::new(); - let cb_result = - ics4_callback(ctx, &module_id, &msg, channel_result, &mut module_output); - handler_builder.merge(module_output); - let channel_result = cb_result.map_err(Error::ics04_channel)?; - - // Apply any results to the host chain store. - ctx.store_channel_result(channel_result) - .map_err(Error::ics04_channel)?; - - handler_builder.with_result(()) - } - - Ics4PacketMsg(msg) => { - let module_id = get_module_for_packet_msg(ctx, &msg).map_err(Error::ics04_channel)?; - let (mut handler_builder, packet_result) = - ics4_packet_msg_dispatcher::<_, HostFunctions>(ctx, &msg) - .map_err(Error::ics04_channel)?; - - if matches!(packet_result, PacketResult::Recv(RecvPacketResult::NoOp)) { - return Ok(handler_builder.with_result(())); - } - - let mut module_output = ModuleOutputBuilder::new(); - let cb_result = ics4_packet_callback(ctx, &module_id, &msg, &mut module_output); - handler_builder.merge(module_output); - cb_result.map_err(Error::ics04_channel)?; - - // Apply any results to the host chain store. - ctx.store_packet_result(packet_result) - .map_err(Error::ics04_channel)?; - - handler_builder.with_result(()) - } - }; - - Ok(output) + let output = match msg { + Ics2Msg(msg) => { + let handler_output = + ics2_msg_dispatcher::(ctx, msg).map_err(Error::ics02_client)?; + + // Apply the result to the context (host chain store). + ctx.store_client_result::(handler_output.result) + .map_err(Error::ics02_client)?; + + HandlerOutput::builder() + .with_log(handler_output.log) + .with_events(handler_output.events) + .with_result(()) + }, + + Ics3Msg(msg) => { + let handler_output = + ics3_msg_dispatcher::<_>(ctx, msg).map_err(Error::ics03_connection)?; + + // Apply any results to the host chain store. + ctx.store_connection_result(handler_output.result) + .map_err(Error::ics03_connection)?; + + HandlerOutput::builder() + .with_log(handler_output.log) + .with_events(handler_output.events) + .with_result(()) + }, + + Ics4ChannelMsg(msg) => { + let module_id = ics4_validate(ctx, &msg).map_err(Error::ics04_channel)?; + let (mut handler_builder, channel_result) = + ics4_msg_dispatcher::<_>(ctx, &msg).map_err(Error::ics04_channel)?; + + let mut module_output = ModuleOutputBuilder::new(); + let cb_result = + ics4_callback(ctx, &module_id, &msg, channel_result, &mut module_output); + handler_builder.merge(module_output); + let channel_result = cb_result.map_err(Error::ics04_channel)?; + + // Apply any results to the host chain store. + ctx.store_channel_result(channel_result).map_err(Error::ics04_channel)?; + + handler_builder.with_result(()) + }, + + Ics4PacketMsg(msg) => { + let module_id = get_module_for_packet_msg(ctx, &msg).map_err(Error::ics04_channel)?; + let (mut handler_builder, packet_result) = + ics4_packet_msg_dispatcher::<_>(ctx, &msg).map_err(Error::ics04_channel)?; + + if matches!(packet_result, PacketResult::Recv(RecvPacketResult::NoOp)) { + return Ok(handler_builder.with_result(())) + } + + let mut module_output = ModuleOutputBuilder::new(); + let cb_result = ics4_packet_callback(ctx, &module_id, &msg, &mut module_output); + handler_builder.merge(module_output); + cb_result.map_err(Error::ics04_channel)?; + + // Apply any results to the host chain store. + ctx.store_packet_result(packet_result).map_err(Error::ics04_channel)?; + + handler_builder.with_result(()) + }, + }; + + Ok(output) } #[cfg(test)] mod tests { - use crate::prelude::*; - - use test_log::test; - - use crate::applications::transfer::context::test::deliver as ics20_deliver; - use crate::applications::transfer::PrefixedCoin; - use crate::core::ics02_client::client_consensus::AnyConsensusState; - use crate::core::ics02_client::client_state::AnyClientState; - use crate::core::ics02_client::msgs::{ - create_client::MsgCreateAnyClient, update_client::MsgUpdateAnyClient, - upgrade_client::MsgUpgradeAnyClient, ClientMsg, - }; - use crate::core::ics03_connection::msgs::{ - conn_open_ack::{test_util::get_dummy_raw_msg_conn_open_ack, MsgConnectionOpenAck}, - conn_open_init::{test_util::get_dummy_raw_msg_conn_open_init, MsgConnectionOpenInit}, - conn_open_try::{test_util::get_dummy_raw_msg_conn_open_try, MsgConnectionOpenTry}, - ConnectionMsg, - }; - use crate::core::ics04_channel::msgs::{ - chan_close_confirm::{ - test_util::get_dummy_raw_msg_chan_close_confirm, MsgChannelCloseConfirm, - }, - chan_close_init::{test_util::get_dummy_raw_msg_chan_close_init, MsgChannelCloseInit}, - chan_open_ack::{test_util::get_dummy_raw_msg_chan_open_ack, MsgChannelOpenAck}, - chan_open_init::{test_util::get_dummy_raw_msg_chan_open_init, MsgChannelOpenInit}, - chan_open_try::{test_util::get_dummy_raw_msg_chan_open_try, MsgChannelOpenTry}, - recv_packet::{test_util::get_dummy_raw_msg_recv_packet, MsgRecvPacket}, - timeout_on_close::{test_util::get_dummy_raw_msg_timeout_on_close, MsgTimeoutOnClose}, - ChannelMsg, PacketMsg, - }; - use crate::events::IbcEvent; - use crate::test_utils::Crypto; - use crate::{ - applications::transfer::msgs::transfer::test_util::get_dummy_msg_transfer, - applications::transfer::msgs::transfer::MsgTransfer, - applications::transfer::packet::PacketData, applications::transfer::MODULE_ID_STR, - }; - - use crate::core::ics24_host::identifier::ConnectionId; - use crate::core::ics26_routing::context::{Ics26Context, ModuleId, Router, RouterBuilder}; - use crate::core::ics26_routing::error::Error; - use crate::core::ics26_routing::handler::dispatch; - use crate::core::ics26_routing::msgs::Ics26Envelope; - use crate::handler::HandlerOutputBuilder; - use crate::mock::client_state::{MockClientState, MockConsensusState}; - use crate::mock::context::{MockContext, MockRouterBuilder}; - use crate::mock::header::MockHeader; - use crate::test_utils::{get_dummy_account_id, DummyTransferModule}; - use crate::timestamp::Timestamp; - use crate::Height; - - #[test] - /// These tests exercise two main paths: (1) the ability of the ICS26 routing module to dispatch - /// messages to the correct module handler, and more importantly: (2) the ability of ICS handlers - /// to work with the context and correctly store results (i.e., the `ClientKeeper`, - /// `ConnectionKeeper`, and `ChannelKeeper` traits). - fn routing_module_and_keepers() { - #[derive(Clone, Debug)] - enum TestMsg { - Ics26(Ics26Envelope), - Ics20(MsgTransfer), - } - - impl From for TestMsg { - fn from(msg: Ics26Envelope) -> Self { - Self::Ics26(msg) - } - } - - impl From> for TestMsg { - fn from(msg: MsgTransfer) -> Self { - Self::Ics20(msg) - } - } - - // Test parameters - struct Test { - name: String, - msg: TestMsg, - want_pass: bool, - } - let default_signer = get_dummy_account_id(); - let client_height = 5; - let start_client_height = Height::new(0, client_height); - let update_client_height = Height::new(0, 34); - let update_client_height_after_send = Height::new(0, 35); - - let update_client_height_after_second_send = Height::new(0, 36); - - let upgrade_client_height = Height::new(1, 2); - - let upgrade_client_height_second = Height::new(1, 1); - - let transfer_module_id: ModuleId = MODULE_ID_STR.parse().unwrap(); - - // We reuse this same context across all tests. Nothing in particular needs parametrizing. - let mut ctx = { - let ctx = MockContext::default(); - let module = DummyTransferModule::new(ctx.ibc_store_share()); - let router = MockRouterBuilder::default() - .add_route(transfer_module_id.clone(), module) - .unwrap() - .build(); - ctx.with_router(router) - }; - - let create_client_msg = MsgCreateAnyClient::new( - AnyClientState::from(MockClientState::new(MockHeader::new(start_client_height))), - AnyConsensusState::Mock(MockConsensusState::new(MockHeader::new( - start_client_height, - ))), - default_signer.clone(), - ) - .unwrap(); - - // - // Connection handshake messages. - // - let msg_conn_init = - MsgConnectionOpenInit::try_from(get_dummy_raw_msg_conn_open_init()).unwrap(); - - let correct_msg_conn_try = MsgConnectionOpenTry::try_from(get_dummy_raw_msg_conn_open_try( - client_height, - client_height, - )) - .unwrap(); - - // The handler will fail to process this msg because the client height is too advanced. - let incorrect_msg_conn_try = MsgConnectionOpenTry::try_from( - get_dummy_raw_msg_conn_open_try(client_height + 1, client_height + 1), - ) - .unwrap(); - - let msg_conn_ack = MsgConnectionOpenAck::try_from(get_dummy_raw_msg_conn_open_ack( - client_height, - client_height, - )) - .unwrap(); - - // - // Channel handshake messages. - // - let msg_chan_init = - MsgChannelOpenInit::try_from(get_dummy_raw_msg_chan_open_init()).unwrap(); - - // The handler will fail to process this b/c the associated connection does not exist - let mut incorrect_msg_chan_init = msg_chan_init.clone(); - incorrect_msg_chan_init.channel.connection_hops = vec![ConnectionId::new(590)]; - - let msg_chan_try = - MsgChannelOpenTry::try_from(get_dummy_raw_msg_chan_open_try(client_height)).unwrap(); - - let msg_chan_ack = - MsgChannelOpenAck::try_from(get_dummy_raw_msg_chan_open_ack(client_height)).unwrap(); - - let msg_chan_close_init = - MsgChannelCloseInit::try_from(get_dummy_raw_msg_chan_close_init()).unwrap(); - - let msg_chan_close_confirm = - MsgChannelCloseConfirm::try_from(get_dummy_raw_msg_chan_close_confirm(client_height)) - .unwrap(); - - let msg_transfer = get_dummy_msg_transfer(35); - let msg_transfer_two = get_dummy_msg_transfer(36); - - let mut msg_to_on_close = - MsgTimeoutOnClose::try_from(get_dummy_raw_msg_timeout_on_close(36, 5)).unwrap(); - msg_to_on_close.packet.sequence = 2.into(); - msg_to_on_close.packet.timeout_height = msg_transfer_two.timeout_height; - msg_to_on_close.packet.timeout_timestamp = msg_transfer_two.timeout_timestamp; - - let denom = msg_transfer_two.token.denom.clone(); - let packet_data = { - let data = PacketData { - token: PrefixedCoin { - denom, - amount: msg_transfer_two.token.amount, - }, - sender: msg_transfer_two.sender.clone(), - receiver: msg_transfer_two.receiver.clone(), - }; - serde_json::to_vec(&data).expect("PacketData's infallible Serialize impl failed") - }; - msg_to_on_close.packet.data = packet_data; - - let msg_recv_packet = MsgRecvPacket::try_from(get_dummy_raw_msg_recv_packet(35)).unwrap(); - - // First, create a client.. - let res = dispatch::<_, Crypto>( - &mut ctx, - Ics26Envelope::Ics2Msg(ClientMsg::CreateClient(create_client_msg.clone())), - ); - - assert!( + use crate::prelude::*; + + use test_log::test; + + use crate::{ + applications::transfer::{ + context::test::deliver as ics20_deliver, + msgs::transfer::{test_util::get_dummy_msg_transfer, MsgTransfer}, + packet::PacketData, + PrefixedCoin, MODULE_ID_STR, + }, + core::{ + ics02_client::msgs::{ + create_client::MsgCreateAnyClient, update_client::MsgUpdateAnyClient, + upgrade_client::MsgUpgradeAnyClient, ClientMsg, + }, + ics03_connection::msgs::{ + conn_open_ack::{test_util::get_dummy_raw_msg_conn_open_ack, MsgConnectionOpenAck}, + conn_open_init::{ + test_util::get_dummy_raw_msg_conn_open_init, MsgConnectionOpenInit, + }, + conn_open_try::{test_util::get_dummy_raw_msg_conn_open_try, MsgConnectionOpenTry}, + ConnectionMsg, + }, + ics04_channel::msgs::{ + chan_close_confirm::{ + test_util::get_dummy_raw_msg_chan_close_confirm, MsgChannelCloseConfirm, + }, + chan_close_init::{ + test_util::get_dummy_raw_msg_chan_close_init, MsgChannelCloseInit, + }, + chan_open_ack::{test_util::get_dummy_raw_msg_chan_open_ack, MsgChannelOpenAck}, + chan_open_init::{test_util::get_dummy_raw_msg_chan_open_init, MsgChannelOpenInit}, + chan_open_try::{test_util::get_dummy_raw_msg_chan_open_try, MsgChannelOpenTry}, + recv_packet::{test_util::get_dummy_raw_msg_recv_packet, MsgRecvPacket}, + timeout_on_close::{ + test_util::get_dummy_raw_msg_timeout_on_close, MsgTimeoutOnClose, + }, + ChannelMsg, PacketMsg, + }, + }, + events::IbcEvent, + mock::client_state::{AnyClientState, AnyConsensusState}, + }; + + use crate::{ + core::{ + ics24_host::identifier::ConnectionId, + ics26_routing::{ + context::{Ics26Context, ModuleId, Router, RouterBuilder}, + error::Error, + handler::dispatch, + msgs::Ics26Envelope, + }, + }, + handler::HandlerOutputBuilder, + mock::{ + client_state::{MockClientState, MockConsensusState}, + context::{MockClientTypes, MockContext, MockRouterBuilder}, + header::MockHeader, + }, + test_utils::{get_dummy_account_id, DummyTransferModule}, + timestamp::Timestamp, + Height, + }; + + #[test] + /// These tests exercise two main paths: (1) the ability of the ICS26 routing module to dispatch + /// messages to the correct module handler, and more importantly: (2) the ability of ICS + /// handlers to work with the context and correctly store results (i.e., the `ClientKeeper`, + /// `ConnectionKeeper`, and `ChannelKeeper` traits). + fn routing_module_and_keepers() { + #[derive(Clone, Debug)] + enum TestMsg { + Ics26(Ics26Envelope>), + Ics20(MsgTransfer), + } + + impl From>> for TestMsg { + fn from(msg: Ics26Envelope>) -> Self { + Self::Ics26(msg) + } + } + + impl From> for TestMsg { + fn from(msg: MsgTransfer) -> Self { + Self::Ics20(msg) + } + } + + // Test parameters + struct Test { + name: String, + msg: TestMsg, + want_pass: bool, + } + let default_signer = get_dummy_account_id(); + let client_height = 5; + let start_client_height = Height::new(0, client_height); + let update_client_height = Height::new(0, 34); + let update_client_height_after_send = Height::new(0, 35); + + let update_client_height_after_second_send = Height::new(0, 36); + + let upgrade_client_height = Height::new(1, 2); + + let upgrade_client_height_second = Height::new(1, 1); + + let transfer_module_id: ModuleId = MODULE_ID_STR.parse().unwrap(); + + // We reuse this same context across all tests. Nothing in particular needs parametrizing. + let mut ctx = { + let ctx = MockContext::default(); + let module = DummyTransferModule::new(ctx.ibc_store_share()); + let router = MockRouterBuilder::default() + .add_route(transfer_module_id.clone(), module) + .unwrap() + .build(); + ctx.with_router(router) + }; + + let create_client_msg = MsgCreateAnyClient::new( + AnyClientState::from(MockClientState::new(MockHeader::new(start_client_height))), + AnyConsensusState::Mock(MockConsensusState::new(MockHeader::new(start_client_height))), + default_signer.clone(), + ) + .unwrap(); + + // + // Connection handshake messages. + // + let msg_conn_init = + MsgConnectionOpenInit::try_from(get_dummy_raw_msg_conn_open_init()).unwrap(); + + let correct_msg_conn_try = MsgConnectionOpenTry::try_from(get_dummy_raw_msg_conn_open_try( + client_height, + client_height, + )) + .unwrap(); + + // The handler will fail to process this msg because the client height is too advanced. + let incorrect_msg_conn_try = MsgConnectionOpenTry::try_from( + get_dummy_raw_msg_conn_open_try(client_height + 1, client_height + 1), + ) + .unwrap(); + + let msg_conn_ack = MsgConnectionOpenAck::try_from(get_dummy_raw_msg_conn_open_ack( + client_height, + client_height, + )) + .unwrap(); + + // + // Channel handshake messages. + // + let msg_chan_init = + MsgChannelOpenInit::try_from(get_dummy_raw_msg_chan_open_init()).unwrap(); + + // The handler will fail to process this b/c the associated connection does not exist + let mut incorrect_msg_chan_init = msg_chan_init.clone(); + incorrect_msg_chan_init.channel.connection_hops = vec![ConnectionId::new(590)]; + + let msg_chan_try = + MsgChannelOpenTry::try_from(get_dummy_raw_msg_chan_open_try(client_height)).unwrap(); + + let msg_chan_ack = + MsgChannelOpenAck::try_from(get_dummy_raw_msg_chan_open_ack(client_height)).unwrap(); + + let msg_chan_close_init = + MsgChannelCloseInit::try_from(get_dummy_raw_msg_chan_close_init()).unwrap(); + + let msg_chan_close_confirm = + MsgChannelCloseConfirm::try_from(get_dummy_raw_msg_chan_close_confirm(client_height)) + .unwrap(); + + let msg_transfer = get_dummy_msg_transfer(35); + let msg_transfer_two = get_dummy_msg_transfer(36); + + let mut msg_to_on_close = + MsgTimeoutOnClose::try_from(get_dummy_raw_msg_timeout_on_close(36, 5)).unwrap(); + msg_to_on_close.packet.sequence = 2.into(); + msg_to_on_close.packet.timeout_height = msg_transfer_two.timeout_height; + msg_to_on_close.packet.timeout_timestamp = msg_transfer_two.timeout_timestamp; + + let denom = msg_transfer_two.token.denom.clone(); + let packet_data = { + let data = PacketData { + token: PrefixedCoin { denom, amount: msg_transfer_two.token.amount }, + sender: msg_transfer_two.sender.clone(), + receiver: msg_transfer_two.receiver.clone(), + }; + serde_json::to_vec(&data).expect("PacketData's infallible Serialize impl failed") + }; + msg_to_on_close.packet.data = packet_data; + + let msg_recv_packet = MsgRecvPacket::try_from(get_dummy_raw_msg_recv_packet(35)).unwrap(); + + // First, create a client.. + let res = dispatch( + &mut ctx, + Ics26Envelope::Ics2Msg(ClientMsg::CreateClient(create_client_msg.clone())), + ); + + assert!( res.is_ok(), "ICS26 routing dispatch test 'client creation' failed for message {:?} with result: {:?}", create_client_msg, res ); - ctx.scope_port_to_module(msg_chan_init.port_id.clone(), transfer_module_id.clone()); - - // Figure out the ID of the client that was just created. - let mut events = res.unwrap().events; - let client_id_event = events.pop(); - assert!( - client_id_event.is_some(), - "There was no event generated for client creation!" - ); - let client_id = match client_id_event.unwrap() { - IbcEvent::CreateClient(create_client) => create_client.client_id().clone(), - event => panic!("unexpected IBC event: {:?}", event), - }; - - let tests: Vec = vec![ - // Test some ICS2 client functionality. - Test { - name: "Client update successful".to_string(), - msg: Ics26Envelope::Ics2Msg(ClientMsg::UpdateClient(MsgUpdateAnyClient { - client_id: client_id.clone(), - header: MockHeader::new(update_client_height) - .with_timestamp(Timestamp::now()) - .into(), - signer: default_signer.clone(), - })) - .into(), - want_pass: true, - }, - Test { - name: "Client update fails due to stale header".to_string(), - msg: Ics26Envelope::Ics2Msg(ClientMsg::UpdateClient(MsgUpdateAnyClient { - client_id: client_id.clone(), - header: MockHeader::new(update_client_height).into(), - signer: default_signer.clone(), - })) - .into(), - want_pass: false, - }, - Test { - name: "Connection open init succeeds".to_string(), - msg: Ics26Envelope::Ics3Msg(ConnectionMsg::ConnectionOpenInit( - msg_conn_init.with_client_id(client_id.clone()), - )) - .into(), - want_pass: true, - }, - Test { - name: "Connection open try fails due to InvalidConsensusHeight (too high)" - .to_string(), - msg: Ics26Envelope::Ics3Msg(ConnectionMsg::ConnectionOpenTry(Box::new( - incorrect_msg_conn_try, - ))) - .into(), - want_pass: false, - }, - Test { - name: "Connection open try succeeds".to_string(), - msg: Ics26Envelope::Ics3Msg(ConnectionMsg::ConnectionOpenTry(Box::new( - correct_msg_conn_try.with_client_id(client_id.clone()), - ))) - .into(), - want_pass: true, - }, - Test { - name: "Connection open ack succeeds".to_string(), - msg: Ics26Envelope::Ics3Msg(ConnectionMsg::ConnectionOpenAck(Box::new( - msg_conn_ack, - ))) - .into(), - want_pass: true, - }, - // ICS04 - Test { - name: "Channel open init succeeds".to_string(), - msg: Ics26Envelope::Ics4ChannelMsg(ChannelMsg::ChannelOpenInit(msg_chan_init)) - .into(), - want_pass: true, - }, - Test { - name: "Channel open init fail due to missing connection".to_string(), - msg: Ics26Envelope::Ics4ChannelMsg(ChannelMsg::ChannelOpenInit( - incorrect_msg_chan_init, - )) - .into(), - want_pass: false, - }, - Test { - name: "Channel open try succeeds".to_string(), - msg: Ics26Envelope::Ics4ChannelMsg(ChannelMsg::ChannelOpenTry(msg_chan_try)).into(), - want_pass: true, - }, - Test { - name: "Channel open ack succeeds".to_string(), - msg: Ics26Envelope::Ics4ChannelMsg(ChannelMsg::ChannelOpenAck(msg_chan_ack)).into(), - want_pass: true, - }, - Test { - name: "Packet send".to_string(), - msg: msg_transfer.into(), - want_pass: true, - }, - // The client update is required in this test, because the proof associated with - // msg_recv_packet has the same height as the packet TO height (see get_dummy_raw_msg_recv_packet) - Test { - name: "Client update successful #2".to_string(), - msg: Ics26Envelope::Ics2Msg(ClientMsg::UpdateClient(MsgUpdateAnyClient { - client_id: client_id.clone(), - header: MockHeader::new(update_client_height_after_send) - .with_timestamp(Timestamp::now()) - .into(), - signer: default_signer.clone(), - })) - .into(), - want_pass: true, - }, - Test { - name: "Receive packet".to_string(), - msg: Ics26Envelope::Ics4PacketMsg(PacketMsg::RecvPacket(msg_recv_packet.clone())) - .into(), - want_pass: true, - }, - Test { - name: "Re-Receive packet".to_string(), - msg: Ics26Envelope::Ics4PacketMsg(PacketMsg::RecvPacket(msg_recv_packet)).into(), - want_pass: true, - }, - Test { - name: "Packet send".to_string(), - msg: msg_transfer_two.into(), - want_pass: true, - }, - Test { - name: "Client update successful".to_string(), - msg: Ics26Envelope::Ics2Msg(ClientMsg::UpdateClient(MsgUpdateAnyClient { - client_id: client_id.clone(), - header: MockHeader::new(update_client_height_after_second_send).into(), - signer: default_signer.clone(), - })) - .into(), - want_pass: true, - }, - //ICS04-close channel - Test { - name: "Channel close init succeeds".to_string(), - msg: Ics26Envelope::Ics4ChannelMsg(ChannelMsg::ChannelCloseInit( - msg_chan_close_init, - )) - .into(), - want_pass: true, - }, - Test { - name: "Channel close confirm fails cause channel is already closed".to_string(), - msg: Ics26Envelope::Ics4ChannelMsg(ChannelMsg::ChannelCloseConfirm( - msg_chan_close_confirm, - )) - .into(), - want_pass: false, - }, - //ICS04-to_on_close - Test { - name: "Timeout on close".to_string(), - msg: Ics26Envelope::Ics4PacketMsg(PacketMsg::ToClosePacket(msg_to_on_close)).into(), - want_pass: true, - }, - Test { - name: "Client upgrade successful".to_string(), - msg: Ics26Envelope::Ics2Msg(ClientMsg::UpgradeClient(MsgUpgradeAnyClient::new( - client_id.clone(), - AnyClientState::Mock(MockClientState::new(MockHeader::new( - upgrade_client_height, - ))), - AnyConsensusState::Mock(MockConsensusState::new(MockHeader::new( - upgrade_client_height, - ))), - Vec::new(), - Vec::new(), - default_signer.clone(), - ))) - .into(), - want_pass: true, - }, - Test { - name: "Client upgrade un-successful".to_string(), - msg: Ics26Envelope::Ics2Msg(ClientMsg::UpgradeClient(MsgUpgradeAnyClient::new( - client_id, - AnyClientState::Mock(MockClientState::new(MockHeader::new( - upgrade_client_height_second, - ))), - AnyConsensusState::Mock(MockConsensusState::new(MockHeader::new( - upgrade_client_height_second, - ))), - Vec::new(), - Vec::new(), - default_signer, - ))) - .into(), - want_pass: false, - }, - ] - .into_iter() - .collect(); - - for test in tests { - let res = match test.msg.clone() { - TestMsg::Ics26(msg) => dispatch::<_, Crypto>(&mut ctx, msg).map(|_| ()), - TestMsg::Ics20(msg) => { - let transfer_module = - ctx.router_mut().get_route_mut(&transfer_module_id).unwrap(); - ics20_deliver( - transfer_module - .as_any_mut() - .downcast_mut::() - .unwrap(), - &mut HandlerOutputBuilder::new(), - msg, - ) - .map(|_| ()) - .map_err(Error::ics04_channel) - } - }; - - assert_eq!( - test.want_pass, - res.is_ok(), - "ICS26 routing dispatch test '{}' failed for message {:?}\nwith result: {:?}", - test.name, - test.msg, - res - ); - } - } + ctx.scope_port_to_module(msg_chan_init.port_id.clone(), transfer_module_id.clone()); + + // Figure out the ID of the client that was just created. + let mut events = res.unwrap().events; + let client_id_event = events.pop(); + assert!(client_id_event.is_some(), "There was no event generated for client creation!"); + let client_id = match client_id_event.unwrap() { + IbcEvent::CreateClient(create_client) => create_client.client_id().clone(), + event => panic!("unexpected IBC event: {:?}", event), + }; + + let tests: Vec = vec![ + // Test some ICS2 client functionality. + Test { + name: "Client update successful".to_string(), + msg: Ics26Envelope::Ics2Msg(ClientMsg::UpdateClient(MsgUpdateAnyClient { + client_id: client_id.clone(), + header: MockHeader::new(update_client_height) + .with_timestamp(Timestamp::now()) + .into(), + signer: default_signer.clone(), + })) + .into(), + want_pass: true, + }, + Test { + name: "Client update fails due to stale header".to_string(), + msg: Ics26Envelope::Ics2Msg(ClientMsg::UpdateClient(MsgUpdateAnyClient { + client_id: client_id.clone(), + header: MockHeader::new(update_client_height).into(), + signer: default_signer.clone(), + })) + .into(), + want_pass: false, + }, + Test { + name: "Connection open init succeeds".to_string(), + msg: Ics26Envelope::Ics3Msg(ConnectionMsg::ConnectionOpenInit( + msg_conn_init.with_client_id(client_id.clone()), + )) + .into(), + want_pass: true, + }, + Test { + name: "Connection open try fails due to InvalidConsensusHeight (too high)" + .to_string(), + msg: Ics26Envelope::Ics3Msg(ConnectionMsg::ConnectionOpenTry(Box::new( + incorrect_msg_conn_try, + ))) + .into(), + want_pass: false, + }, + Test { + name: "Connection open try succeeds".to_string(), + msg: Ics26Envelope::Ics3Msg(ConnectionMsg::ConnectionOpenTry(Box::new( + correct_msg_conn_try.with_client_id(client_id.clone()), + ))) + .into(), + want_pass: true, + }, + Test { + name: "Connection open ack succeeds".to_string(), + msg: Ics26Envelope::Ics3Msg(ConnectionMsg::ConnectionOpenAck(Box::new( + msg_conn_ack, + ))) + .into(), + want_pass: true, + }, + // ICS04 + Test { + name: "Channel open init succeeds".to_string(), + msg: Ics26Envelope::Ics4ChannelMsg(ChannelMsg::ChannelOpenInit(msg_chan_init)) + .into(), + want_pass: true, + }, + Test { + name: "Channel open init fail due to missing connection".to_string(), + msg: Ics26Envelope::Ics4ChannelMsg(ChannelMsg::ChannelOpenInit( + incorrect_msg_chan_init, + )) + .into(), + want_pass: false, + }, + Test { + name: "Channel open try succeeds".to_string(), + msg: Ics26Envelope::Ics4ChannelMsg(ChannelMsg::ChannelOpenTry(msg_chan_try)).into(), + want_pass: true, + }, + Test { + name: "Channel open ack succeeds".to_string(), + msg: Ics26Envelope::Ics4ChannelMsg(ChannelMsg::ChannelOpenAck(msg_chan_ack)).into(), + want_pass: true, + }, + Test { name: "Packet send".to_string(), msg: msg_transfer.into(), want_pass: true }, + // The client update is required in this test, because the proof associated with + // msg_recv_packet has the same height as the packet TO height (see + // get_dummy_raw_msg_recv_packet) + Test { + name: "Client update successful #2".to_string(), + msg: Ics26Envelope::Ics2Msg(ClientMsg::UpdateClient(MsgUpdateAnyClient { + client_id: client_id.clone(), + header: MockHeader::new(update_client_height_after_send) + .with_timestamp(Timestamp::now()) + .into(), + signer: default_signer.clone(), + })) + .into(), + want_pass: true, + }, + Test { + name: "Receive packet".to_string(), + msg: Ics26Envelope::Ics4PacketMsg(PacketMsg::RecvPacket(msg_recv_packet.clone())) + .into(), + want_pass: true, + }, + Test { + name: "Re-Receive packet".to_string(), + msg: Ics26Envelope::Ics4PacketMsg(PacketMsg::RecvPacket(msg_recv_packet)).into(), + want_pass: true, + }, + Test { name: "Packet send".to_string(), msg: msg_transfer_two.into(), want_pass: true }, + Test { + name: "Client update successful".to_string(), + msg: Ics26Envelope::Ics2Msg(ClientMsg::UpdateClient(MsgUpdateAnyClient { + client_id: client_id.clone(), + header: MockHeader::new(update_client_height_after_second_send).into(), + signer: default_signer.clone(), + })) + .into(), + want_pass: true, + }, + //ICS04-close channel + Test { + name: "Channel close init succeeds".to_string(), + msg: Ics26Envelope::Ics4ChannelMsg(ChannelMsg::ChannelCloseInit( + msg_chan_close_init, + )) + .into(), + want_pass: true, + }, + Test { + name: "Channel close confirm fails cause channel is already closed".to_string(), + msg: Ics26Envelope::Ics4ChannelMsg(ChannelMsg::ChannelCloseConfirm( + msg_chan_close_confirm, + )) + .into(), + want_pass: false, + }, + //ICS04-to_on_close + Test { + name: "Timeout on close".to_string(), + msg: Ics26Envelope::Ics4PacketMsg(PacketMsg::ToClosePacket(msg_to_on_close)).into(), + want_pass: true, + }, + Test { + name: "Client upgrade successful".to_string(), + msg: Ics26Envelope::Ics2Msg(ClientMsg::UpgradeClient(MsgUpgradeAnyClient::new( + client_id.clone(), + AnyClientState::Mock(MockClientState::new(MockHeader::new( + upgrade_client_height, + ))), + AnyConsensusState::Mock(MockConsensusState::new(MockHeader::new( + upgrade_client_height, + ))), + Vec::new(), + Vec::new(), + default_signer.clone(), + ))) + .into(), + want_pass: true, + }, + Test { + name: "Client upgrade un-successful".to_string(), + msg: Ics26Envelope::Ics2Msg(ClientMsg::UpgradeClient(MsgUpgradeAnyClient::new( + client_id, + AnyClientState::Mock(MockClientState::new(MockHeader::new( + upgrade_client_height_second, + ))), + AnyConsensusState::Mock(MockConsensusState::new(MockHeader::new( + upgrade_client_height_second, + ))), + Vec::new(), + Vec::new(), + default_signer, + ))) + .into(), + want_pass: false, + }, + ] + .into_iter() + .collect(); + + for test in tests { + let res = match test.msg.clone() { + TestMsg::Ics26(msg) => dispatch(&mut ctx, msg).map(|_| ()), + TestMsg::Ics20(msg) => { + let transfer_module = + ctx.router_mut().get_route_mut(&transfer_module_id).unwrap(); + ics20_deliver( + transfer_module + .as_any_mut() + .downcast_mut::>() + .unwrap(), + &mut HandlerOutputBuilder::new(), + msg, + ) + .map(|_| ()) + .map_err(Error::ics04_channel) + }, + }; + + assert_eq!( + test.want_pass, + res.is_ok(), + "ICS26 routing dispatch test '{}' failed for message {:?}\nwith result: {:?}", + test.name, + test.msg, + res + ); + } + } } diff --git a/modules/src/core/ics26_routing/msgs.rs b/modules/src/core/ics26_routing/msgs.rs index 3f2306e6e9..1857aef394 100644 --- a/modules/src/core/ics26_routing/msgs.rs +++ b/modules/src/core/ics26_routing/msgs.rs @@ -1,156 +1,169 @@ use crate::prelude::*; +use core::fmt::{Debug, Display}; use ibc_proto::google::protobuf::Any; -use crate::core::ics02_client::msgs::{create_client, update_client, upgrade_client, ClientMsg}; -use crate::core::ics03_connection::msgs::{ - conn_open_ack, conn_open_confirm, conn_open_init, conn_open_try, ConnectionMsg, +use crate::core::{ + ics02_client::msgs::{ + create_client, create_client::MsgCreateAnyClient, update_client, + update_client::MsgUpdateAnyClient, upgrade_client, upgrade_client::MsgUpgradeAnyClient, + ClientMsg, + }, + ics03_connection::msgs::{ + conn_open_ack, conn_open_ack::MsgConnectionOpenAck, conn_open_confirm, conn_open_init, + conn_open_try, conn_open_try::MsgConnectionOpenTry, ConnectionMsg, + }, + ics04_channel::msgs::{ + acknowledgement, chan_close_confirm, chan_close_init, chan_open_ack, chan_open_confirm, + chan_open_init, chan_open_try, recv_packet, timeout, timeout_on_close, ChannelMsg, + PacketMsg, + }, + ics26_routing::error::Error, }; -use crate::core::ics04_channel::msgs::{ - acknowledgement, chan_close_confirm, chan_close_init, chan_open_ack, chan_open_confirm, - chan_open_init, chan_open_try, recv_packet, timeout, timeout_on_close, ChannelMsg, PacketMsg, + +use crate::core::ics02_client::context::ClientKeeper; +use ibc_proto::ibc::core::{ + client::v1::{MsgCreateClient, MsgUpdateClient, MsgUpgradeClient}, + connection, }; -use crate::core::ics26_routing::error::Error; use tendermint_proto::Protobuf; /// Enumeration of all messages that the local ICS26 module is capable of routing. #[derive(Clone, Debug)] -pub enum Ics26Envelope { - Ics2Msg(ClientMsg), - Ics3Msg(ConnectionMsg), - Ics4ChannelMsg(ChannelMsg), - Ics4PacketMsg(PacketMsg), +pub enum Ics26Envelope +where + C: ClientKeeper + Eq + Clone + Debug, +{ + Ics2Msg(ClientMsg), + Ics3Msg(ConnectionMsg), + Ics4ChannelMsg(ChannelMsg), + Ics4PacketMsg(PacketMsg), } -impl TryFrom for Ics26Envelope { - type Error = Error; +impl TryFrom for Ics26Envelope +where + C: ClientKeeper + Clone + Debug + PartialEq + Eq, + Any: From, + Any: From, + Any: From, + MsgCreateAnyClient: TryFrom, + as TryFrom>::Error: Display, + MsgCreateAnyClient: Protobuf, + MsgUpdateAnyClient: TryFrom, + as TryFrom>::Error: Display, + MsgUpdateAnyClient: Protobuf, + MsgUpgradeAnyClient: TryFrom, + as TryFrom>::Error: Display, + MsgUpgradeAnyClient: Protobuf, + MsgConnectionOpenTry: TryFrom, + as TryFrom>::Error: Display, + MsgConnectionOpenTry: Protobuf, + connection::v1::MsgConnectionOpenAck: From>, + MsgConnectionOpenAck: TryFrom, + as TryFrom>::Error: Display, + MsgConnectionOpenAck: Protobuf, +{ + type Error = Error; - fn try_from(any_msg: Any) -> Result { - match any_msg.type_url.as_str() { - // ICS2 messages - create_client::TYPE_URL => { - // Pop out the message and then wrap it in the corresponding type. - let domain_msg = create_client::MsgCreateAnyClient::decode_vec(&any_msg.value) - .map_err(Error::malformed_message_bytes)?; - Ok(Ics26Envelope::Ics2Msg(ClientMsg::CreateClient(domain_msg))) - } - update_client::TYPE_URL => { - let domain_msg = update_client::MsgUpdateAnyClient::decode_vec(&any_msg.value) - .map_err(Error::malformed_message_bytes)?; - Ok(Ics26Envelope::Ics2Msg(ClientMsg::UpdateClient(domain_msg))) - } - upgrade_client::TYPE_URL => { - let domain_msg = upgrade_client::MsgUpgradeAnyClient::decode_vec(&any_msg.value) - .map_err(Error::malformed_message_bytes)?; - Ok(Ics26Envelope::Ics2Msg(ClientMsg::UpgradeClient(domain_msg))) - } + fn try_from(any_msg: Any) -> Result { + match any_msg.type_url.as_str() { + // ICS2 messages + create_client::TYPE_URL => { + // Pop out the message and then wrap it in the corresponding type. + let domain_msg = MsgCreateAnyClient::::decode_vec(&any_msg.value) + .map_err(Error::malformed_message_bytes)?; + Ok(Ics26Envelope::Ics2Msg(ClientMsg::CreateClient(domain_msg))) + }, + update_client::TYPE_URL => { + let domain_msg = MsgUpdateAnyClient::::decode_vec(&any_msg.value) + .map_err(Error::malformed_message_bytes)?; + Ok(Ics26Envelope::Ics2Msg(ClientMsg::UpdateClient(domain_msg))) + }, + upgrade_client::TYPE_URL => { + let domain_msg = + upgrade_client::MsgUpgradeAnyClient::::decode_vec(&any_msg.value) + .map_err(Error::malformed_message_bytes)?; + Ok(Ics26Envelope::Ics2Msg(ClientMsg::UpgradeClient(domain_msg))) + }, - // ICS03 - conn_open_init::TYPE_URL => { - let domain_msg = conn_open_init::MsgConnectionOpenInit::decode_vec(&any_msg.value) - .map_err(Error::malformed_message_bytes)?; - Ok(Ics26Envelope::Ics3Msg(ConnectionMsg::ConnectionOpenInit( - domain_msg, - ))) - } - conn_open_try::TYPE_URL => { - let domain_msg = conn_open_try::MsgConnectionOpenTry::decode_vec(&any_msg.value) - .map_err(Error::malformed_message_bytes)?; - Ok(Ics26Envelope::Ics3Msg(ConnectionMsg::ConnectionOpenTry( - Box::new(domain_msg), - ))) - } - conn_open_ack::TYPE_URL => { - let domain_msg = conn_open_ack::MsgConnectionOpenAck::decode_vec(&any_msg.value) - .map_err(Error::malformed_message_bytes)?; - Ok(Ics26Envelope::Ics3Msg(ConnectionMsg::ConnectionOpenAck( - Box::new(domain_msg), - ))) - } - conn_open_confirm::TYPE_URL => { - let domain_msg = - conn_open_confirm::MsgConnectionOpenConfirm::decode_vec(&any_msg.value) - .map_err(Error::malformed_message_bytes)?; - Ok(Ics26Envelope::Ics3Msg( - ConnectionMsg::ConnectionOpenConfirm(domain_msg), - )) - } + // ICS03 + conn_open_init::TYPE_URL => { + let domain_msg = conn_open_init::MsgConnectionOpenInit::decode_vec(&any_msg.value) + .map_err(Error::malformed_message_bytes)?; + Ok(Ics26Envelope::Ics3Msg(ConnectionMsg::ConnectionOpenInit(domain_msg))) + }, + conn_open_try::TYPE_URL => { + let domain_msg = MsgConnectionOpenTry::::decode_vec(&any_msg.value) + .map_err(Error::malformed_message_bytes)?; + Ok(Ics26Envelope::Ics3Msg(ConnectionMsg::ConnectionOpenTry(Box::new(domain_msg)))) + }, + conn_open_ack::TYPE_URL => { + let domain_msg = MsgConnectionOpenAck::::decode_vec(&any_msg.value) + .map_err(Error::malformed_message_bytes)?; + Ok(Ics26Envelope::Ics3Msg(ConnectionMsg::ConnectionOpenAck(Box::new(domain_msg)))) + }, + conn_open_confirm::TYPE_URL => { + let domain_msg = + conn_open_confirm::MsgConnectionOpenConfirm::decode_vec(&any_msg.value) + .map_err(Error::malformed_message_bytes)?; + Ok(Ics26Envelope::Ics3Msg(ConnectionMsg::ConnectionOpenConfirm(domain_msg))) + }, - // ICS04 channel messages - chan_open_init::TYPE_URL => { - let domain_msg = chan_open_init::MsgChannelOpenInit::decode_vec(&any_msg.value) - .map_err(Error::malformed_message_bytes)?; - Ok(Ics26Envelope::Ics4ChannelMsg(ChannelMsg::ChannelOpenInit( - domain_msg, - ))) - } - chan_open_try::TYPE_URL => { - let domain_msg = chan_open_try::MsgChannelOpenTry::decode_vec(&any_msg.value) - .map_err(Error::malformed_message_bytes)?; - Ok(Ics26Envelope::Ics4ChannelMsg(ChannelMsg::ChannelOpenTry( - domain_msg, - ))) - } - chan_open_ack::TYPE_URL => { - let domain_msg = chan_open_ack::MsgChannelOpenAck::decode_vec(&any_msg.value) - .map_err(Error::malformed_message_bytes)?; - Ok(Ics26Envelope::Ics4ChannelMsg(ChannelMsg::ChannelOpenAck( - domain_msg, - ))) - } - chan_open_confirm::TYPE_URL => { - let domain_msg = - chan_open_confirm::MsgChannelOpenConfirm::decode_vec(&any_msg.value) - .map_err(Error::malformed_message_bytes)?; - Ok(Ics26Envelope::Ics4ChannelMsg( - ChannelMsg::ChannelOpenConfirm(domain_msg), - )) - } - chan_close_init::TYPE_URL => { - let domain_msg = chan_close_init::MsgChannelCloseInit::decode_vec(&any_msg.value) - .map_err(Error::malformed_message_bytes)?; - Ok(Ics26Envelope::Ics4ChannelMsg(ChannelMsg::ChannelCloseInit( - domain_msg, - ))) - } - chan_close_confirm::TYPE_URL => { - let domain_msg = - chan_close_confirm::MsgChannelCloseConfirm::decode_vec(&any_msg.value) - .map_err(Error::malformed_message_bytes)?; - Ok(Ics26Envelope::Ics4ChannelMsg( - ChannelMsg::ChannelCloseConfirm(domain_msg), - )) - } - // ICS04 packet messages - recv_packet::TYPE_URL => { - let domain_msg = recv_packet::MsgRecvPacket::decode_vec(&any_msg.value) - .map_err(Error::malformed_message_bytes)?; - Ok(Ics26Envelope::Ics4PacketMsg(PacketMsg::RecvPacket( - domain_msg, - ))) - } - acknowledgement::TYPE_URL => { - let domain_msg = acknowledgement::MsgAcknowledgement::decode_vec(&any_msg.value) - .map_err(Error::malformed_message_bytes)?; - Ok(Ics26Envelope::Ics4PacketMsg(PacketMsg::AckPacket( - domain_msg, - ))) - } - timeout::TYPE_URL => { - let domain_msg = timeout::MsgTimeout::decode_vec(&any_msg.value) - .map_err(Error::malformed_message_bytes)?; - Ok(Ics26Envelope::Ics4PacketMsg(PacketMsg::ToPacket( - domain_msg, - ))) - } - timeout_on_close::TYPE_URL => { - let domain_msg = timeout_on_close::MsgTimeoutOnClose::decode_vec(&any_msg.value) - .map_err(Error::malformed_message_bytes)?; - Ok(Ics26Envelope::Ics4PacketMsg(PacketMsg::ToClosePacket( - domain_msg, - ))) - } - _ => Err(Error::unknown_message_type_url(any_msg.type_url)), - } - } + // ICS04 channel messages + chan_open_init::TYPE_URL => { + let domain_msg = chan_open_init::MsgChannelOpenInit::decode_vec(&any_msg.value) + .map_err(Error::malformed_message_bytes)?; + Ok(Ics26Envelope::Ics4ChannelMsg(ChannelMsg::ChannelOpenInit(domain_msg))) + }, + chan_open_try::TYPE_URL => { + let domain_msg = chan_open_try::MsgChannelOpenTry::decode_vec(&any_msg.value) + .map_err(Error::malformed_message_bytes)?; + Ok(Ics26Envelope::Ics4ChannelMsg(ChannelMsg::ChannelOpenTry(domain_msg))) + }, + chan_open_ack::TYPE_URL => { + let domain_msg = chan_open_ack::MsgChannelOpenAck::decode_vec(&any_msg.value) + .map_err(Error::malformed_message_bytes)?; + Ok(Ics26Envelope::Ics4ChannelMsg(ChannelMsg::ChannelOpenAck(domain_msg))) + }, + chan_open_confirm::TYPE_URL => { + let domain_msg = + chan_open_confirm::MsgChannelOpenConfirm::decode_vec(&any_msg.value) + .map_err(Error::malformed_message_bytes)?; + Ok(Ics26Envelope::Ics4ChannelMsg(ChannelMsg::ChannelOpenConfirm(domain_msg))) + }, + chan_close_init::TYPE_URL => { + let domain_msg = chan_close_init::MsgChannelCloseInit::decode_vec(&any_msg.value) + .map_err(Error::malformed_message_bytes)?; + Ok(Ics26Envelope::Ics4ChannelMsg(ChannelMsg::ChannelCloseInit(domain_msg))) + }, + chan_close_confirm::TYPE_URL => { + let domain_msg = + chan_close_confirm::MsgChannelCloseConfirm::decode_vec(&any_msg.value) + .map_err(Error::malformed_message_bytes)?; + Ok(Ics26Envelope::Ics4ChannelMsg(ChannelMsg::ChannelCloseConfirm(domain_msg))) + }, + // ICS04 packet messages + recv_packet::TYPE_URL => { + let domain_msg = recv_packet::MsgRecvPacket::decode_vec(&any_msg.value) + .map_err(Error::malformed_message_bytes)?; + Ok(Ics26Envelope::Ics4PacketMsg(PacketMsg::RecvPacket(domain_msg))) + }, + acknowledgement::TYPE_URL => { + let domain_msg = acknowledgement::MsgAcknowledgement::decode_vec(&any_msg.value) + .map_err(Error::malformed_message_bytes)?; + Ok(Ics26Envelope::Ics4PacketMsg(PacketMsg::AckPacket(domain_msg))) + }, + timeout::TYPE_URL => { + let domain_msg = timeout::MsgTimeout::decode_vec(&any_msg.value) + .map_err(Error::malformed_message_bytes)?; + Ok(Ics26Envelope::Ics4PacketMsg(PacketMsg::ToPacket(domain_msg))) + }, + timeout_on_close::TYPE_URL => { + let domain_msg = timeout_on_close::MsgTimeoutOnClose::decode_vec(&any_msg.value) + .map_err(Error::malformed_message_bytes)?; + Ok(Ics26Envelope::Ics4PacketMsg(PacketMsg::ToClosePacket(domain_msg))) + }, + _ => Err(Error::unknown_message_type_url(any_msg.type_url)), + } + } } diff --git a/modules/src/events.rs b/modules/src/events.rs index 721b9cc41b..f3c31f9309 100644 --- a/modules/src/events.rs +++ b/modules/src/events.rs @@ -1,95 +1,96 @@ use crate::prelude::*; use alloc::collections::btree_map::BTreeMap as HashMap; -use core::convert::{TryFrom, TryInto}; -use core::fmt; -use core::str::FromStr; +use core::{convert::TryFrom, fmt, str::FromStr}; use flex_error::{define_error, TraceError}; use prost::alloc::fmt::Formatter; use serde_derive::{Deserialize, Serialize}; -use tendermint::abci::Event as AbciEvent; -use tendermint::abci::EventAttribute; - -use crate::core::ics02_client::error as client_error; -use crate::core::ics02_client::events as ClientEvents; -use crate::core::ics02_client::events::NewBlock; -use crate::core::ics02_client::height::HeightError; -use crate::core::ics03_connection::events as ConnectionEvents; -use crate::core::ics03_connection::events::Attributes as ConnectionAttributes; -use crate::core::ics04_channel::error as channel_error; -use crate::core::ics04_channel::events as ChannelEvents; -use crate::core::ics04_channel::events::Attributes as ChannelAttributes; -use crate::core::ics04_channel::packet::Packet; -use crate::core::ics24_host::error::ValidationError; -use crate::core::ics26_routing::context::ModuleId; -use crate::timestamp::ParseTimestampError; -use crate::Height; +use tendermint::abci::{Event as AbciEvent, EventAttribute}; + +use crate::{ + core::{ + ics02_client::{ + error as client_error, events as ClientEvents, events::NewBlock, height::HeightError, + }, + ics03_connection::{ + events as ConnectionEvents, events::Attributes as ConnectionAttributes, + }, + ics04_channel::{ + error as channel_error, events as ChannelEvents, + events::Attributes as ChannelAttributes, packet::Packet, + }, + ics24_host::error::ValidationError, + ics26_routing::context::ModuleId, + }, + timestamp::ParseTimestampError, + Height, +}; define_error! { - Error { - Height - [ HeightError ] - | _ | { "error parsing height" }, - - Parse - [ ValidationError ] - | _ | { "parse error" }, - - Client - [ client_error::Error ] - | _ | { "ICS02 client error" }, - - Channel - [ channel_error::Error ] - | _ | { "channel error" }, - - Timestamp - [ ParseTimestampError ] - | _ | { "error parsing timestamp" }, - - MissingKey - { key: String } - | e | { format_args!("missing event key {}", e.key) }, - - Decode - [ TraceError ] - | _ | { "error decoding protobuf" }, - - SubtleEncoding - [ TraceError ] - | _ | { "error decoding hex" }, - - MissingActionString - | _ | { "missing action string" }, - - IncorrectEventType - { event: String } - | e | { format_args!("incorrect event type: {}", e.event) }, - - MalformedModuleEvent - { event: ModuleEvent } - | e | { format_args!("module event cannot use core event types: {:?}", e.event) }, - } + Error { + Height + [ HeightError ] + | _ | { "error parsing height" }, + + Parse + [ ValidationError ] + | _ | { "parse error" }, + + Client + [ client_error::Error ] + | _ | { "ICS02 client error" }, + + Channel + [ channel_error::Error ] + | _ | { "channel error" }, + + Timestamp + [ ParseTimestampError ] + | _ | { "error parsing timestamp" }, + + MissingKey + { key: String } + | e | { format_args!("missing event key {}", e.key) }, + + Decode + [ TraceError ] + | _ | { "error decoding protobuf" }, + + SubtleEncoding + [ TraceError ] + | _ | { "error decoding hex" }, + + MissingActionString + | _ | { "missing action string" }, + + IncorrectEventType + { event: String } + | e | { format_args!("incorrect event type: {}", e.event) }, + + MalformedModuleEvent + { event: ModuleEvent } + | e | { format_args!("module event cannot use core event types: {:?}", e.event) }, + } } /// Events whose data is not included in the app state and must be extracted using tendermint RPCs /// (i.e. /tx_search or /block_search) #[derive(Debug, Clone, Deserialize, Serialize)] pub enum WithBlockDataType { - CreateClient, - UpdateClient, - SendPacket, - WriteAck, + CreateClient, + UpdateClient, + SendPacket, + WriteAck, } impl WithBlockDataType { - pub fn as_str(&self) -> &'static str { - match *self { - WithBlockDataType::CreateClient => "create_client", - WithBlockDataType::UpdateClient => "update_client", - WithBlockDataType::SendPacket => "send_packet", - WithBlockDataType::WriteAck => "write_acknowledgement", - } - } + pub fn as_str(&self) -> &'static str { + match *self { + WithBlockDataType::CreateClient => "create_client", + WithBlockDataType::UpdateClient => "update_client", + WithBlockDataType::SendPacket => "send_packet", + WithBlockDataType::WriteAck => "write_acknowledgement", + } + } } const NEW_BLOCK_EVENT: &str = "new_block"; @@ -124,474 +125,406 @@ const TIMEOUT_ON_CLOSE_EVENT: &str = "timeout_packet_on_close"; /// Events types #[derive(Debug, Clone, Deserialize, Serialize)] pub enum IbcEventType { - NewBlock, - CreateClient, - UpdateClient, - UpgradeClient, - ClientMisbehaviour, - OpenInitConnection, - OpenTryConnection, - OpenAckConnection, - OpenConfirmConnection, - OpenInitChannel, - OpenTryChannel, - OpenAckChannel, - OpenConfirmChannel, - CloseInitChannel, - CloseConfirmChannel, - SendPacket, - ReceivePacket, - WriteAck, - AckPacket, - Timeout, - TimeoutOnClose, - AppModule, - Empty, - ChainError, + NewBlock, + CreateClient, + UpdateClient, + UpgradeClient, + ClientMisbehaviour, + OpenInitConnection, + OpenTryConnection, + OpenAckConnection, + OpenConfirmConnection, + OpenInitChannel, + OpenTryChannel, + OpenAckChannel, + OpenConfirmChannel, + CloseInitChannel, + CloseConfirmChannel, + SendPacket, + ReceivePacket, + WriteAck, + AckPacket, + Timeout, + TimeoutOnClose, + AppModule, + Empty, + ChainError, } impl IbcEventType { - pub fn as_str(&self) -> &'static str { - match *self { - IbcEventType::NewBlock => NEW_BLOCK_EVENT, - IbcEventType::CreateClient => CREATE_CLIENT_EVENT, - IbcEventType::UpdateClient => UPDATE_CLIENT_EVENT, - IbcEventType::UpgradeClient => UPGRADE_CLIENT_EVENT, - IbcEventType::ClientMisbehaviour => CLIENT_MISBEHAVIOUR_EVENT, - IbcEventType::OpenInitConnection => CONNECTION_INIT_EVENT, - IbcEventType::OpenTryConnection => CONNECTION_TRY_EVENT, - IbcEventType::OpenAckConnection => CONNECTION_ACK_EVENT, - IbcEventType::OpenConfirmConnection => CONNECTION_CONFIRM_EVENT, - IbcEventType::OpenInitChannel => CHANNEL_OPEN_INIT_EVENT, - IbcEventType::OpenTryChannel => CHANNEL_OPEN_TRY_EVENT, - IbcEventType::OpenAckChannel => CHANNEL_OPEN_ACK_EVENT, - IbcEventType::OpenConfirmChannel => CHANNEL_OPEN_CONFIRM_EVENT, - IbcEventType::CloseInitChannel => CHANNEL_CLOSE_INIT_EVENT, - IbcEventType::CloseConfirmChannel => CHANNEL_CLOSE_CONFIRM_EVENT, - IbcEventType::SendPacket => SEND_PACKET_EVENT, - IbcEventType::ReceivePacket => RECEIVE_PACKET_EVENT, - IbcEventType::WriteAck => WRITE_ACK_EVENT, - IbcEventType::AckPacket => ACK_PACKET_EVENT, - IbcEventType::Timeout => TIMEOUT_EVENT, - IbcEventType::TimeoutOnClose => TIMEOUT_ON_CLOSE_EVENT, - IbcEventType::AppModule => APP_MODULE_EVENT, - IbcEventType::Empty => EMPTY_EVENT, - IbcEventType::ChainError => CHAIN_ERROR_EVENT, - } - } + pub fn as_str(&self) -> &'static str { + match *self { + IbcEventType::NewBlock => NEW_BLOCK_EVENT, + IbcEventType::CreateClient => CREATE_CLIENT_EVENT, + IbcEventType::UpdateClient => UPDATE_CLIENT_EVENT, + IbcEventType::UpgradeClient => UPGRADE_CLIENT_EVENT, + IbcEventType::ClientMisbehaviour => CLIENT_MISBEHAVIOUR_EVENT, + IbcEventType::OpenInitConnection => CONNECTION_INIT_EVENT, + IbcEventType::OpenTryConnection => CONNECTION_TRY_EVENT, + IbcEventType::OpenAckConnection => CONNECTION_ACK_EVENT, + IbcEventType::OpenConfirmConnection => CONNECTION_CONFIRM_EVENT, + IbcEventType::OpenInitChannel => CHANNEL_OPEN_INIT_EVENT, + IbcEventType::OpenTryChannel => CHANNEL_OPEN_TRY_EVENT, + IbcEventType::OpenAckChannel => CHANNEL_OPEN_ACK_EVENT, + IbcEventType::OpenConfirmChannel => CHANNEL_OPEN_CONFIRM_EVENT, + IbcEventType::CloseInitChannel => CHANNEL_CLOSE_INIT_EVENT, + IbcEventType::CloseConfirmChannel => CHANNEL_CLOSE_CONFIRM_EVENT, + IbcEventType::SendPacket => SEND_PACKET_EVENT, + IbcEventType::ReceivePacket => RECEIVE_PACKET_EVENT, + IbcEventType::WriteAck => WRITE_ACK_EVENT, + IbcEventType::AckPacket => ACK_PACKET_EVENT, + IbcEventType::Timeout => TIMEOUT_EVENT, + IbcEventType::TimeoutOnClose => TIMEOUT_ON_CLOSE_EVENT, + IbcEventType::AppModule => APP_MODULE_EVENT, + IbcEventType::Empty => EMPTY_EVENT, + IbcEventType::ChainError => CHAIN_ERROR_EVENT, + } + } } impl FromStr for IbcEventType { - type Err = Error; - - fn from_str(s: &str) -> Result { - match s { - NEW_BLOCK_EVENT => Ok(IbcEventType::NewBlock), - CREATE_CLIENT_EVENT => Ok(IbcEventType::CreateClient), - UPDATE_CLIENT_EVENT => Ok(IbcEventType::UpdateClient), - UPGRADE_CLIENT_EVENT => Ok(IbcEventType::UpgradeClient), - CLIENT_MISBEHAVIOUR_EVENT => Ok(IbcEventType::ClientMisbehaviour), - CONNECTION_INIT_EVENT => Ok(IbcEventType::OpenInitConnection), - CONNECTION_TRY_EVENT => Ok(IbcEventType::OpenTryConnection), - CONNECTION_ACK_EVENT => Ok(IbcEventType::OpenAckConnection), - CONNECTION_CONFIRM_EVENT => Ok(IbcEventType::OpenConfirmConnection), - CHANNEL_OPEN_INIT_EVENT => Ok(IbcEventType::OpenInitChannel), - CHANNEL_OPEN_TRY_EVENT => Ok(IbcEventType::OpenTryChannel), - CHANNEL_OPEN_ACK_EVENT => Ok(IbcEventType::OpenAckChannel), - CHANNEL_OPEN_CONFIRM_EVENT => Ok(IbcEventType::OpenConfirmChannel), - CHANNEL_CLOSE_INIT_EVENT => Ok(IbcEventType::CloseInitChannel), - CHANNEL_CLOSE_CONFIRM_EVENT => Ok(IbcEventType::CloseConfirmChannel), - SEND_PACKET_EVENT => Ok(IbcEventType::SendPacket), - RECEIVE_PACKET_EVENT => Ok(IbcEventType::ReceivePacket), - WRITE_ACK_EVENT => Ok(IbcEventType::WriteAck), - ACK_PACKET_EVENT => Ok(IbcEventType::AckPacket), - TIMEOUT_EVENT => Ok(IbcEventType::Timeout), - TIMEOUT_ON_CLOSE_EVENT => Ok(IbcEventType::TimeoutOnClose), - EMPTY_EVENT => Ok(IbcEventType::Empty), - CHAIN_ERROR_EVENT => Ok(IbcEventType::ChainError), - // from_str() for `APP_MODULE_EVENT` MUST fail because a `ModuleEvent`'s type isn't constant - _ => Err(Error::incorrect_event_type(s.to_string())), - } - } + type Err = Error; + + fn from_str(s: &str) -> Result { + match s { + NEW_BLOCK_EVENT => Ok(IbcEventType::NewBlock), + CREATE_CLIENT_EVENT => Ok(IbcEventType::CreateClient), + UPDATE_CLIENT_EVENT => Ok(IbcEventType::UpdateClient), + UPGRADE_CLIENT_EVENT => Ok(IbcEventType::UpgradeClient), + CLIENT_MISBEHAVIOUR_EVENT => Ok(IbcEventType::ClientMisbehaviour), + CONNECTION_INIT_EVENT => Ok(IbcEventType::OpenInitConnection), + CONNECTION_TRY_EVENT => Ok(IbcEventType::OpenTryConnection), + CONNECTION_ACK_EVENT => Ok(IbcEventType::OpenAckConnection), + CONNECTION_CONFIRM_EVENT => Ok(IbcEventType::OpenConfirmConnection), + CHANNEL_OPEN_INIT_EVENT => Ok(IbcEventType::OpenInitChannel), + CHANNEL_OPEN_TRY_EVENT => Ok(IbcEventType::OpenTryChannel), + CHANNEL_OPEN_ACK_EVENT => Ok(IbcEventType::OpenAckChannel), + CHANNEL_OPEN_CONFIRM_EVENT => Ok(IbcEventType::OpenConfirmChannel), + CHANNEL_CLOSE_INIT_EVENT => Ok(IbcEventType::CloseInitChannel), + CHANNEL_CLOSE_CONFIRM_EVENT => Ok(IbcEventType::CloseConfirmChannel), + SEND_PACKET_EVENT => Ok(IbcEventType::SendPacket), + RECEIVE_PACKET_EVENT => Ok(IbcEventType::ReceivePacket), + WRITE_ACK_EVENT => Ok(IbcEventType::WriteAck), + ACK_PACKET_EVENT => Ok(IbcEventType::AckPacket), + TIMEOUT_EVENT => Ok(IbcEventType::Timeout), + TIMEOUT_ON_CLOSE_EVENT => Ok(IbcEventType::TimeoutOnClose), + EMPTY_EVENT => Ok(IbcEventType::Empty), + CHAIN_ERROR_EVENT => Ok(IbcEventType::ChainError), + // from_str() for `APP_MODULE_EVENT` MUST fail because a `ModuleEvent`'s type isn't + // constant + _ => Err(Error::incorrect_event_type(s.to_string())), + } + } } /// Events created by the IBC component of a chain, destined for a relayer. #[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)] pub enum IbcEvent { - NewBlock(NewBlock), - - CreateClient(ClientEvents::CreateClient), - UpdateClient(ClientEvents::UpdateClient), - UpgradeClient(ClientEvents::UpgradeClient), - ClientMisbehaviour(ClientEvents::ClientMisbehaviour), - - OpenInitConnection(ConnectionEvents::OpenInit), - OpenTryConnection(ConnectionEvents::OpenTry), - OpenAckConnection(ConnectionEvents::OpenAck), - OpenConfirmConnection(ConnectionEvents::OpenConfirm), - - OpenInitChannel(ChannelEvents::OpenInit), - OpenTryChannel(ChannelEvents::OpenTry), - OpenAckChannel(ChannelEvents::OpenAck), - OpenConfirmChannel(ChannelEvents::OpenConfirm), - CloseInitChannel(ChannelEvents::CloseInit), - CloseConfirmChannel(ChannelEvents::CloseConfirm), - - SendPacket(ChannelEvents::SendPacket), - ReceivePacket(ChannelEvents::ReceivePacket), - WriteAcknowledgement(ChannelEvents::WriteAcknowledgement), - AcknowledgePacket(ChannelEvents::AcknowledgePacket), - TimeoutPacket(ChannelEvents::TimeoutPacket), - TimeoutOnClosePacket(ChannelEvents::TimeoutOnClosePacket), - - AppModule(ModuleEvent), - - Empty(String), // Special event, signifying empty response - ChainError(String), // Special event, signifying an error on CheckTx or DeliverTx + NewBlock(NewBlock), + + CreateClient(ClientEvents::CreateClient), + UpdateClient(ClientEvents::UpdateClient), + UpgradeClient(ClientEvents::UpgradeClient), + ClientMisbehaviour(ClientEvents::ClientMisbehaviour), + + OpenInitConnection(ConnectionEvents::OpenInit), + OpenTryConnection(ConnectionEvents::OpenTry), + OpenAckConnection(ConnectionEvents::OpenAck), + OpenConfirmConnection(ConnectionEvents::OpenConfirm), + + OpenInitChannel(ChannelEvents::OpenInit), + OpenTryChannel(ChannelEvents::OpenTry), + OpenAckChannel(ChannelEvents::OpenAck), + OpenConfirmChannel(ChannelEvents::OpenConfirm), + CloseInitChannel(ChannelEvents::CloseInit), + CloseConfirmChannel(ChannelEvents::CloseConfirm), + + SendPacket(ChannelEvents::SendPacket), + ReceivePacket(ChannelEvents::ReceivePacket), + WriteAcknowledgement(ChannelEvents::WriteAcknowledgement), + AcknowledgePacket(ChannelEvents::AcknowledgePacket), + TimeoutPacket(ChannelEvents::TimeoutPacket), + TimeoutOnClosePacket(ChannelEvents::TimeoutOnClosePacket), + + AppModule(ModuleEvent), + + Empty(String), // Special event, signifying empty response + ChainError(String), // Special event, signifying an error on CheckTx or DeliverTx } impl Default for IbcEvent { - fn default() -> Self { - Self::Empty("".to_string()) - } + fn default() -> Self { + Self::Empty("".to_string()) + } } /// For use in debug messages pub struct PrettyEvents<'a>(pub &'a [IbcEvent]); impl<'a> fmt::Display for PrettyEvents<'a> { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - writeln!(f, "events:")?; - for v in self.0 { - writeln!(f, "\t{}", v)?; - } - Ok(()) - } + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + writeln!(f, "events:")?; + for v in self.0 { + writeln!(f, "\t{}", v)?; + } + Ok(()) + } } impl fmt::Display for IbcEvent { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - match self { - IbcEvent::NewBlock(ev) => write!(f, "NewBlock({})", ev.height), - - IbcEvent::CreateClient(ev) => write!(f, "CreateClientEv({})", ev), - IbcEvent::UpdateClient(ev) => write!(f, "UpdateClientEv({})", ev), - IbcEvent::UpgradeClient(ev) => write!(f, "UpgradeClientEv({:?})", ev), - IbcEvent::ClientMisbehaviour(ev) => write!(f, "ClientMisbehaviourEv({:?})", ev), - - IbcEvent::OpenInitConnection(ev) => write!(f, "OpenInitConnectionEv({:?})", ev), - IbcEvent::OpenTryConnection(ev) => write!(f, "OpenTryConnectionEv({:?})", ev), - IbcEvent::OpenAckConnection(ev) => write!(f, "OpenAckConnectionEv({:?})", ev), - IbcEvent::OpenConfirmConnection(ev) => write!(f, "OpenConfirmConnectionEv({:?})", ev), - - IbcEvent::OpenInitChannel(ev) => write!(f, "OpenInitChannelEv({:?})", ev), - IbcEvent::OpenTryChannel(ev) => write!(f, "OpenTryChannelEv({:?})", ev), - IbcEvent::OpenAckChannel(ev) => write!(f, "OpenAckChannelEv({:?})", ev), - IbcEvent::OpenConfirmChannel(ev) => write!(f, "OpenConfirmChannelEv({:?})", ev), - IbcEvent::CloseInitChannel(ev) => write!(f, "CloseInitChannelEv({})", ev), - IbcEvent::CloseConfirmChannel(ev) => write!(f, "CloseConfirmChannelEv({:?})", ev), - - IbcEvent::SendPacket(ev) => write!(f, "SendPacketEv({})", ev), - IbcEvent::ReceivePacket(ev) => write!(f, "ReceivePacketEv({})", ev), - IbcEvent::WriteAcknowledgement(ev) => write!(f, "WriteAcknowledgementEv({})", ev), - IbcEvent::AcknowledgePacket(ev) => write!(f, "AcknowledgePacketEv({})", ev), - IbcEvent::TimeoutPacket(ev) => write!(f, "TimeoutPacketEv({})", ev), - IbcEvent::TimeoutOnClosePacket(ev) => write!(f, "TimeoutOnClosePacketEv({})", ev), - - IbcEvent::AppModule(ev) => write!(f, "AppModuleEv({:?})", ev), - - IbcEvent::Empty(ev) => write!(f, "EmptyEv({})", ev), - IbcEvent::ChainError(ev) => write!(f, "ChainErrorEv({})", ev), - } - } -} - -impl TryFrom for AbciEvent { - type Error = Error; - - fn try_from(event: IbcEvent) -> Result { - Ok(match event { - IbcEvent::CreateClient(event) => event.into(), - IbcEvent::UpdateClient(event) => event.into(), - IbcEvent::UpgradeClient(event) => event.into(), - IbcEvent::ClientMisbehaviour(event) => event.into(), - IbcEvent::OpenInitConnection(event) => event.into(), - IbcEvent::OpenTryConnection(event) => event.into(), - IbcEvent::OpenAckConnection(event) => event.into(), - IbcEvent::OpenConfirmConnection(event) => event.into(), - IbcEvent::OpenInitChannel(event) => event.into(), - IbcEvent::OpenTryChannel(event) => event.into(), - IbcEvent::OpenAckChannel(event) => event.into(), - IbcEvent::OpenConfirmChannel(event) => event.into(), - IbcEvent::CloseInitChannel(event) => event.into(), - IbcEvent::CloseConfirmChannel(event) => event.into(), - IbcEvent::SendPacket(event) => event.try_into().map_err(Error::channel)?, - IbcEvent::ReceivePacket(event) => event.try_into().map_err(Error::channel)?, - IbcEvent::WriteAcknowledgement(event) => event.try_into().map_err(Error::channel)?, - IbcEvent::AcknowledgePacket(event) => event.try_into().map_err(Error::channel)?, - IbcEvent::TimeoutPacket(event) => event.try_into().map_err(Error::channel)?, - IbcEvent::TimeoutOnClosePacket(event) => event.try_into().map_err(Error::channel)?, - IbcEvent::AppModule(event) => event.try_into()?, - IbcEvent::NewBlock(_) | IbcEvent::Empty(_) | IbcEvent::ChainError(_) => { - return Err(Error::incorrect_event_type(event.to_string())) - } - }) - } -} - -// This is tendermint specific -pub fn from_tx_response_event(height: Height, event: &tendermint::abci::Event) -> Option { - // Return the first hit we find - if let Some(mut client_res) = ClientEvents::try_from_tx(event) { - client_res.set_height(height); - Some(client_res) - } else if let Some(mut conn_res) = ConnectionEvents::try_from_tx(event) { - conn_res.set_height(height); - Some(conn_res) - } else if let Some(mut chan_res) = ChannelEvents::try_from_tx(event) { - chan_res.set_height(height); - Some(chan_res) - } else { - None - } + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self { + IbcEvent::NewBlock(ev) => write!(f, "NewBlock({})", ev.height), + + IbcEvent::CreateClient(ev) => write!(f, "CreateClientEv({})", ev), + IbcEvent::UpdateClient(ev) => write!(f, "UpdateClientEv({})", ev), + IbcEvent::UpgradeClient(ev) => write!(f, "UpgradeClientEv({:?})", ev), + IbcEvent::ClientMisbehaviour(ev) => write!(f, "ClientMisbehaviourEv({:?})", ev), + + IbcEvent::OpenInitConnection(ev) => write!(f, "OpenInitConnectionEv({:?})", ev), + IbcEvent::OpenTryConnection(ev) => write!(f, "OpenTryConnectionEv({:?})", ev), + IbcEvent::OpenAckConnection(ev) => write!(f, "OpenAckConnectionEv({:?})", ev), + IbcEvent::OpenConfirmConnection(ev) => write!(f, "OpenConfirmConnectionEv({:?})", ev), + + IbcEvent::OpenInitChannel(ev) => write!(f, "OpenInitChannelEv({:?})", ev), + IbcEvent::OpenTryChannel(ev) => write!(f, "OpenTryChannelEv({:?})", ev), + IbcEvent::OpenAckChannel(ev) => write!(f, "OpenAckChannelEv({:?})", ev), + IbcEvent::OpenConfirmChannel(ev) => write!(f, "OpenConfirmChannelEv({:?})", ev), + IbcEvent::CloseInitChannel(ev) => write!(f, "CloseInitChannelEv({})", ev), + IbcEvent::CloseConfirmChannel(ev) => write!(f, "CloseConfirmChannelEv({:?})", ev), + + IbcEvent::SendPacket(ev) => write!(f, "SendPacketEv({})", ev), + IbcEvent::ReceivePacket(ev) => write!(f, "ReceivePacketEv({})", ev), + IbcEvent::WriteAcknowledgement(ev) => write!(f, "WriteAcknowledgementEv({})", ev), + IbcEvent::AcknowledgePacket(ev) => write!(f, "AcknowledgePacketEv({})", ev), + IbcEvent::TimeoutPacket(ev) => write!(f, "TimeoutPacketEv({})", ev), + IbcEvent::TimeoutOnClosePacket(ev) => write!(f, "TimeoutOnClosePacketEv({})", ev), + + IbcEvent::AppModule(ev) => write!(f, "AppModuleEv({:?})", ev), + + IbcEvent::Empty(ev) => write!(f, "EmptyEv({})", ev), + IbcEvent::ChainError(ev) => write!(f, "ChainErrorEv({})", ev), + } + } } impl IbcEvent { - pub fn to_json(&self) -> String { - match serde_json::to_string(self) { - Ok(value) => value, - Err(_) => format!("{:?}", self), // Fallback to debug printing - } - } - - pub fn height(&self) -> Height { - match self { - IbcEvent::NewBlock(bl) => bl.height(), - IbcEvent::CreateClient(ev) => ev.height(), - IbcEvent::UpdateClient(ev) => ev.height(), - IbcEvent::UpgradeClient(ev) => ev.height(), - IbcEvent::ClientMisbehaviour(ev) => ev.height(), - IbcEvent::OpenInitConnection(ev) => ev.height(), - IbcEvent::OpenTryConnection(ev) => ev.height(), - IbcEvent::OpenAckConnection(ev) => ev.height(), - IbcEvent::OpenConfirmConnection(ev) => ev.height(), - IbcEvent::OpenInitChannel(ev) => ev.height(), - IbcEvent::OpenTryChannel(ev) => ev.height(), - IbcEvent::OpenAckChannel(ev) => ev.height(), - IbcEvent::OpenConfirmChannel(ev) => ev.height(), - IbcEvent::CloseInitChannel(ev) => ev.height(), - IbcEvent::CloseConfirmChannel(ev) => ev.height(), - IbcEvent::SendPacket(ev) => ev.height(), - IbcEvent::ReceivePacket(ev) => ev.height(), - IbcEvent::WriteAcknowledgement(ev) => ev.height(), - IbcEvent::AcknowledgePacket(ev) => ev.height(), - IbcEvent::TimeoutPacket(ev) => ev.height(), - IbcEvent::TimeoutOnClosePacket(ev) => ev.height(), - _ => unimplemented!(), - } - } - - pub fn set_height(&mut self, height: Height) { - match self { - IbcEvent::NewBlock(ev) => ev.set_height(height), - IbcEvent::CreateClient(ev) => ev.set_height(height), - IbcEvent::UpdateClient(ev) => ev.set_height(height), - IbcEvent::UpgradeClient(ev) => ev.set_height(height), - IbcEvent::ClientMisbehaviour(ev) => ev.set_height(height), - IbcEvent::OpenInitConnection(ev) => ev.set_height(height), - IbcEvent::OpenTryConnection(ev) => ev.set_height(height), - IbcEvent::OpenAckConnection(ev) => ev.set_height(height), - IbcEvent::OpenConfirmConnection(ev) => ev.set_height(height), - IbcEvent::OpenInitChannel(ev) => ev.set_height(height), - IbcEvent::OpenTryChannel(ev) => ev.set_height(height), - IbcEvent::OpenAckChannel(ev) => ev.set_height(height), - IbcEvent::OpenConfirmChannel(ev) => ev.set_height(height), - IbcEvent::CloseInitChannel(ev) => ev.set_height(height), - IbcEvent::CloseConfirmChannel(ev) => ev.set_height(height), - IbcEvent::SendPacket(ev) => ev.set_height(height), - IbcEvent::ReceivePacket(ev) => ev.set_height(height), - IbcEvent::WriteAcknowledgement(ev) => ev.set_height(height), - IbcEvent::AcknowledgePacket(ev) => ev.set_height(height), - IbcEvent::TimeoutPacket(ev) => ev.set_height(height), - _ => unimplemented!(), - } - } - - pub fn event_type(&self) -> IbcEventType { - match self { - IbcEvent::NewBlock(_) => IbcEventType::NewBlock, - IbcEvent::CreateClient(_) => IbcEventType::CreateClient, - IbcEvent::UpdateClient(_) => IbcEventType::UpdateClient, - IbcEvent::ClientMisbehaviour(_) => IbcEventType::ClientMisbehaviour, - IbcEvent::UpgradeClient(_) => IbcEventType::UpgradeClient, - IbcEvent::OpenInitConnection(_) => IbcEventType::OpenInitConnection, - IbcEvent::OpenTryConnection(_) => IbcEventType::OpenTryConnection, - IbcEvent::OpenAckConnection(_) => IbcEventType::OpenAckConnection, - IbcEvent::OpenConfirmConnection(_) => IbcEventType::OpenConfirmConnection, - IbcEvent::OpenInitChannel(_) => IbcEventType::OpenInitChannel, - IbcEvent::OpenTryChannel(_) => IbcEventType::OpenTryChannel, - IbcEvent::OpenAckChannel(_) => IbcEventType::OpenAckChannel, - IbcEvent::OpenConfirmChannel(_) => IbcEventType::OpenConfirmChannel, - IbcEvent::CloseInitChannel(_) => IbcEventType::CloseInitChannel, - IbcEvent::CloseConfirmChannel(_) => IbcEventType::CloseConfirmChannel, - IbcEvent::SendPacket(_) => IbcEventType::SendPacket, - IbcEvent::ReceivePacket(_) => IbcEventType::ReceivePacket, - IbcEvent::WriteAcknowledgement(_) => IbcEventType::WriteAck, - IbcEvent::AcknowledgePacket(_) => IbcEventType::AckPacket, - IbcEvent::TimeoutPacket(_) => IbcEventType::Timeout, - IbcEvent::TimeoutOnClosePacket(_) => IbcEventType::TimeoutOnClose, - IbcEvent::AppModule(_) => IbcEventType::AppModule, - IbcEvent::Empty(_) => IbcEventType::Empty, - IbcEvent::ChainError(_) => IbcEventType::ChainError, - } - } - - pub fn channel_attributes(self) -> Option { - match self { - IbcEvent::OpenInitChannel(ev) => Some(ev.into()), - IbcEvent::OpenTryChannel(ev) => Some(ev.into()), - IbcEvent::OpenAckChannel(ev) => Some(ev.into()), - IbcEvent::OpenConfirmChannel(ev) => Some(ev.into()), - _ => None, - } - } - - pub fn connection_attributes(&self) -> Option<&ConnectionAttributes> { - match self { - IbcEvent::OpenInitConnection(ev) => Some(ev.attributes()), - IbcEvent::OpenTryConnection(ev) => Some(ev.attributes()), - IbcEvent::OpenAckConnection(ev) => Some(ev.attributes()), - IbcEvent::OpenConfirmConnection(ev) => Some(ev.attributes()), - _ => None, - } - } - - pub fn packet(&self) -> Option<&Packet> { - match self { - IbcEvent::SendPacket(ev) => Some(&ev.packet), - IbcEvent::ReceivePacket(ev) => Some(&ev.packet), - IbcEvent::WriteAcknowledgement(ev) => Some(&ev.packet), - IbcEvent::AcknowledgePacket(ev) => Some(&ev.packet), - IbcEvent::TimeoutPacket(ev) => Some(&ev.packet), - IbcEvent::TimeoutOnClosePacket(ev) => Some(&ev.packet), - _ => None, - } - } - - pub fn ack(&self) -> Option<&[u8]> { - match self { - IbcEvent::WriteAcknowledgement(ev) => Some(&ev.ack), - _ => None, - } - } + pub fn to_json(&self) -> String { + match serde_json::to_string(self) { + Ok(value) => value, + Err(_) => format!("{:?}", self), // Fallback to debug printing + } + } + + pub fn height(&self) -> Height { + match self { + IbcEvent::NewBlock(bl) => bl.height(), + IbcEvent::CreateClient(ev) => ev.height(), + IbcEvent::UpdateClient(ev) => ev.height(), + IbcEvent::UpgradeClient(ev) => ev.height(), + IbcEvent::ClientMisbehaviour(ev) => ev.height(), + IbcEvent::OpenInitConnection(ev) => ev.height(), + IbcEvent::OpenTryConnection(ev) => ev.height(), + IbcEvent::OpenAckConnection(ev) => ev.height(), + IbcEvent::OpenConfirmConnection(ev) => ev.height(), + IbcEvent::OpenInitChannel(ev) => ev.height(), + IbcEvent::OpenTryChannel(ev) => ev.height(), + IbcEvent::OpenAckChannel(ev) => ev.height(), + IbcEvent::OpenConfirmChannel(ev) => ev.height(), + IbcEvent::CloseInitChannel(ev) => ev.height(), + IbcEvent::CloseConfirmChannel(ev) => ev.height(), + IbcEvent::SendPacket(ev) => ev.height(), + IbcEvent::ReceivePacket(ev) => ev.height(), + IbcEvent::WriteAcknowledgement(ev) => ev.height(), + IbcEvent::AcknowledgePacket(ev) => ev.height(), + IbcEvent::TimeoutPacket(ev) => ev.height(), + IbcEvent::TimeoutOnClosePacket(ev) => ev.height(), + _ => unimplemented!(), + } + } + + pub fn set_height(&mut self, height: Height) { + match self { + IbcEvent::NewBlock(ev) => ev.set_height(height), + IbcEvent::CreateClient(ev) => ev.set_height(height), + IbcEvent::UpdateClient(ev) => ev.set_height(height), + IbcEvent::UpgradeClient(ev) => ev.set_height(height), + IbcEvent::ClientMisbehaviour(ev) => ev.set_height(height), + IbcEvent::OpenInitConnection(ev) => ev.set_height(height), + IbcEvent::OpenTryConnection(ev) => ev.set_height(height), + IbcEvent::OpenAckConnection(ev) => ev.set_height(height), + IbcEvent::OpenConfirmConnection(ev) => ev.set_height(height), + IbcEvent::OpenInitChannel(ev) => ev.set_height(height), + IbcEvent::OpenTryChannel(ev) => ev.set_height(height), + IbcEvent::OpenAckChannel(ev) => ev.set_height(height), + IbcEvent::OpenConfirmChannel(ev) => ev.set_height(height), + IbcEvent::CloseInitChannel(ev) => ev.set_height(height), + IbcEvent::CloseConfirmChannel(ev) => ev.set_height(height), + IbcEvent::SendPacket(ev) => ev.set_height(height), + IbcEvent::ReceivePacket(ev) => ev.set_height(height), + IbcEvent::WriteAcknowledgement(ev) => ev.set_height(height), + IbcEvent::AcknowledgePacket(ev) => ev.set_height(height), + IbcEvent::TimeoutPacket(ev) => ev.set_height(height), + _ => unimplemented!(), + } + } + + pub fn event_type(&self) -> IbcEventType { + match self { + IbcEvent::NewBlock(_) => IbcEventType::NewBlock, + IbcEvent::CreateClient(_) => IbcEventType::CreateClient, + IbcEvent::UpdateClient(_) => IbcEventType::UpdateClient, + IbcEvent::ClientMisbehaviour(_) => IbcEventType::ClientMisbehaviour, + IbcEvent::UpgradeClient(_) => IbcEventType::UpgradeClient, + IbcEvent::OpenInitConnection(_) => IbcEventType::OpenInitConnection, + IbcEvent::OpenTryConnection(_) => IbcEventType::OpenTryConnection, + IbcEvent::OpenAckConnection(_) => IbcEventType::OpenAckConnection, + IbcEvent::OpenConfirmConnection(_) => IbcEventType::OpenConfirmConnection, + IbcEvent::OpenInitChannel(_) => IbcEventType::OpenInitChannel, + IbcEvent::OpenTryChannel(_) => IbcEventType::OpenTryChannel, + IbcEvent::OpenAckChannel(_) => IbcEventType::OpenAckChannel, + IbcEvent::OpenConfirmChannel(_) => IbcEventType::OpenConfirmChannel, + IbcEvent::CloseInitChannel(_) => IbcEventType::CloseInitChannel, + IbcEvent::CloseConfirmChannel(_) => IbcEventType::CloseConfirmChannel, + IbcEvent::SendPacket(_) => IbcEventType::SendPacket, + IbcEvent::ReceivePacket(_) => IbcEventType::ReceivePacket, + IbcEvent::WriteAcknowledgement(_) => IbcEventType::WriteAck, + IbcEvent::AcknowledgePacket(_) => IbcEventType::AckPacket, + IbcEvent::TimeoutPacket(_) => IbcEventType::Timeout, + IbcEvent::TimeoutOnClosePacket(_) => IbcEventType::TimeoutOnClose, + IbcEvent::AppModule(_) => IbcEventType::AppModule, + IbcEvent::Empty(_) => IbcEventType::Empty, + IbcEvent::ChainError(_) => IbcEventType::ChainError, + } + } + + pub fn channel_attributes(self) -> Option { + match self { + IbcEvent::OpenInitChannel(ev) => Some(ev.into()), + IbcEvent::OpenTryChannel(ev) => Some(ev.into()), + IbcEvent::OpenAckChannel(ev) => Some(ev.into()), + IbcEvent::OpenConfirmChannel(ev) => Some(ev.into()), + _ => None, + } + } + + pub fn connection_attributes(&self) -> Option<&ConnectionAttributes> { + match self { + IbcEvent::OpenInitConnection(ev) => Some(ev.attributes()), + IbcEvent::OpenTryConnection(ev) => Some(ev.attributes()), + IbcEvent::OpenAckConnection(ev) => Some(ev.attributes()), + IbcEvent::OpenConfirmConnection(ev) => Some(ev.attributes()), + _ => None, + } + } + + pub fn packet(&self) -> Option<&Packet> { + match self { + IbcEvent::SendPacket(ev) => Some(&ev.packet), + IbcEvent::ReceivePacket(ev) => Some(&ev.packet), + IbcEvent::WriteAcknowledgement(ev) => Some(&ev.packet), + IbcEvent::AcknowledgePacket(ev) => Some(&ev.packet), + IbcEvent::TimeoutPacket(ev) => Some(&ev.packet), + IbcEvent::TimeoutOnClosePacket(ev) => Some(&ev.packet), + _ => None, + } + } + + pub fn ack(&self) -> Option<&[u8]> { + match self { + IbcEvent::WriteAcknowledgement(ev) => Some(&ev.ack), + _ => None, + } + } } #[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)] pub struct ModuleEvent { - pub kind: String, - pub module_name: ModuleId, - pub attributes: Vec, + pub kind: String, + pub module_name: ModuleId, + pub attributes: Vec, } impl TryFrom for AbciEvent { - type Error = Error; - - fn try_from(event: ModuleEvent) -> Result { - if IbcEventType::from_str(event.kind.as_str()).is_ok() { - return Err(Error::malformed_module_event(event)); - } - - let attributes = event.attributes.into_iter().map(Into::into).collect(); - Ok(AbciEvent { - kind: event.kind, - attributes, - }) - } + type Error = Error; + + fn try_from(event: ModuleEvent) -> Result { + if IbcEventType::from_str(event.kind.as_str()).is_ok() { + return Err(Error::malformed_module_event(event)) + } + + let attributes = event.attributes.into_iter().map(Into::into).collect(); + Ok(AbciEvent { kind: event.kind, attributes }) + } } impl From for IbcEvent { - fn from(e: ModuleEvent) -> Self { - IbcEvent::AppModule(e) - } + fn from(e: ModuleEvent) -> Self { + IbcEvent::AppModule(e) + } } #[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)] pub struct ModuleEventAttribute { - pub key: String, - pub value: String, + pub key: String, + pub value: String, } impl From<(K, V)> for ModuleEventAttribute { - fn from((k, v): (K, V)) -> Self { - Self { - key: k.to_string(), - value: v.to_string(), - } - } + fn from((k, v): (K, V)) -> Self { + Self { key: k.to_string(), value: v.to_string() } + } } impl From for EventAttribute { - fn from(attr: ModuleEventAttribute) -> Self { - Self { - key: attr - .key - .parse() - .expect("Key::from_str() impl is infallible"), - value: attr - .key - .parse() - .expect("Value::from_str() impl is infallible"), - index: false, - } - } + fn from(attr: ModuleEventAttribute) -> Self { + Self { + key: attr.key.parse().expect("Key::from_str() impl is infallible"), + value: attr.key.parse().expect("Value::from_str() impl is infallible"), + index: false, + } + } } #[derive(Debug, Clone, Serialize)] pub struct RawObject<'a> { - pub height: Height, - pub action: String, - pub idx: usize, - pub events: &'a HashMap>, + pub height: Height, + pub action: String, + pub idx: usize, + pub events: &'a HashMap>, } impl<'a> RawObject<'a> { - pub fn new( - height: Height, - action: String, - idx: usize, - events: &'a HashMap>, - ) -> RawObject<'a> { - RawObject { - height, - action, - idx, - events, - } - } + pub fn new( + height: Height, + action: String, + idx: usize, + events: &'a HashMap>, + ) -> RawObject<'a> { + RawObject { height, action, idx, events } + } } pub fn extract_events( - events: &HashMap>, - action_string: &str, + events: &HashMap>, + action_string: &str, ) -> Result<(), Error> { - if let Some(message_action) = events.get("message.action") { - if message_action.contains(&action_string.to_owned()) { - return Ok(()); - } - return Err(Error::missing_action_string()); - } - Err(Error::incorrect_event_type(action_string.to_string())) + if let Some(message_action) = events.get("message.action") { + if message_action.contains(&action_string.to_owned()) { + return Ok(()) + } + return Err(Error::missing_action_string()) + } + Err(Error::incorrect_event_type(action_string.to_string())) } pub fn extract_attribute(object: &RawObject<'_>, key: &str) -> Result { - let value = object - .events - .get(key) - .ok_or_else(|| Error::missing_key(key.to_string()))?[object.idx] - .clone(); + let value = object.events.get(key).ok_or_else(|| Error::missing_key(key.to_string()))? + [object.idx] + .clone(); - Ok(value) + Ok(value) } pub fn maybe_extract_attribute(object: &RawObject<'_>, key: &str) -> Option { - object.events.get(key).map(|tags| tags[object.idx].clone()) + object.events.get(key).map(|tags| tags[object.idx].clone()) } diff --git a/modules/src/handler.rs b/modules/src/handler.rs index f8bf203ebb..0325101934 100644 --- a/modules/src/handler.rs +++ b/modules/src/handler.rs @@ -1,79 +1,64 @@ -use crate::events::IbcEvent; -use crate::prelude::*; +use crate::{events::IbcEvent, prelude::*}; use core::marker::PhantomData; pub type HandlerResult = Result, E>; #[derive(Clone, Debug)] pub struct HandlerOutput { - pub result: T, - pub log: Vec, - pub events: Vec, + pub result: T, + pub log: Vec, + pub events: Vec, } impl HandlerOutput { - pub fn builder() -> HandlerOutputBuilder { - HandlerOutputBuilder::new() - } + pub fn builder() -> HandlerOutputBuilder { + HandlerOutputBuilder::new() + } } #[derive(Clone, Debug, Default)] pub struct HandlerOutputBuilder { - log: Vec, - events: Vec, - marker: PhantomData, + log: Vec, + events: Vec, + marker: PhantomData, } impl HandlerOutputBuilder { - pub fn new() -> Self { - Self { - log: Vec::new(), - events: Vec::new(), - marker: PhantomData, - } - } + pub fn new() -> Self { + Self { log: Vec::new(), events: Vec::new(), marker: PhantomData } + } - pub fn with_log(mut self, log: impl Into>) -> Self { - self.log.append(&mut log.into()); - self - } + pub fn with_log(mut self, log: impl Into>) -> Self { + self.log.append(&mut log.into()); + self + } - pub fn log(&mut self, log: impl Into) { - self.log.push(log.into()); - } + pub fn log(&mut self, log: impl Into) { + self.log.push(log.into()); + } - pub fn with_events(mut self, mut events: Vec) -> Self { - self.events.append(&mut events); - self - } + pub fn with_events(mut self, mut events: Vec) -> Self { + self.events.append(&mut events); + self + } - pub fn emit(&mut self, event: E) { - self.events.push(event); - } + pub fn emit(&mut self, event: E) { + self.events.push(event); + } - pub fn with_result(self, result: T) -> HandlerOutput { - HandlerOutput { - result, - log: self.log, - events: self.events, - } - } + pub fn with_result(self, result: T) -> HandlerOutput { + HandlerOutput { result, log: self.log, events: self.events } + } - pub fn merge>(&mut self, other: HandlerOutputBuilder<(), Event>) { - let HandlerOutputBuilder { - mut log, events, .. - } = other; - self.log.append(&mut log); - self.events - .append(&mut events.into_iter().map(Into::into).collect()); - } + pub fn merge>(&mut self, other: HandlerOutputBuilder<(), Event>) { + let HandlerOutputBuilder { mut log, events, .. } = other; + self.log.append(&mut log); + self.events.append(&mut events.into_iter().map(Into::into).collect()); + } - pub fn merge_output>(&mut self, other: HandlerOutput<(), Event>) { - let HandlerOutput { - mut log, events, .. - } = other; - self.log.append(&mut log); - self.events - .append(&mut events.into_iter().map(Into::into).collect()); - } + pub fn merge_output>(&mut self, other: HandlerOutput<(), Event>) { + let HandlerOutput { mut log, events, .. } = other; + self.log.append(&mut log); + self.events.append(&mut events.into_iter().map(Into::into).collect()); + } } diff --git a/modules/src/lib.rs b/modules/src/lib.rs index ae1121e581..6e0df79289 100644 --- a/modules/src/lib.rs +++ b/modules/src/lib.rs @@ -1,7 +1,6 @@ // TODO: disable unwraps: // https://github.com/informalsystems/ibc-rs/issues/987 // #![cfg_attr(not(test), deny(clippy::unwrap_used))] - #![cfg_attr(not(feature = "std"), no_std)] #![allow(clippy::large_enum_variant)] #![deny( @@ -34,8 +33,9 @@ //! `Applications` consists of various packet encoding and processing semantics which underpin the //! various types of transactions that users can perform on any IBC-compliant chain. //! -//! `Relayer` contains utilities for testing the `ibc` crate against the [Hermes IBC relayer][relayer-repo]. It acts -//! as scaffolding for gluing the `ibc` crate with Hermes for testing purposes. +//! `Relayer` contains utilities for testing the `ibc` crate against the [Hermes IBC +//! relayer][relayer-repo]. It acts as scaffolding for gluing the `ibc` crate with Hermes for +//! testing purposes. //! //! [core]: https://github.com/informalsystems/ibc-rs/tree/master/modules/src/core //! [clients]: https://github.com/informalsystems/ibc-rs/tree/master/modules/src/clients @@ -45,38 +45,36 @@ //! [relayer-repo]: https://github.com/informalsystems/ibc-rs/tree/master/relayer extern crate alloc; - +#[allow(unused_imports)] +#[macro_use] +extern crate derive; #[cfg(feature = "std")] extern crate std; -mod prelude; +pub mod prelude; pub mod applications; pub mod bigint; -pub mod clients; pub mod core; pub mod events; pub mod handler; pub mod keys; pub mod macros; pub mod proofs; -#[cfg(feature = "std")] -pub mod query; -pub mod relayer; pub mod signer; pub mod timestamp; -pub mod tx_msg; +pub mod tx_msg; // Context mock, the underlying host chain, and client types: for testing all handlers. mod serializers; /// Re-export of ICS 002 Height domain type -pub type Height = crate::core::ics02_client::height::Height; +pub type Height = core::ics02_client::height::Height; -#[cfg(test)] -mod test; +#[cfg(any(test, feature = "mocks"))] +pub mod test; #[cfg(any(test, feature = "mocks"))] pub mod test_utils; #[cfg(any(test, feature = "mocks"))] -pub mod mock; // Context mock, the underlying host chain, and client types: for testing all handlers. +pub mod mock; diff --git a/modules/src/mock/client_def.rs b/modules/src/mock/client_def.rs index 728d5be17a..70b59f518c 100644 --- a/modules/src/mock/client_def.rs +++ b/modules/src/mock/client_def.rs @@ -1,226 +1,250 @@ -use crate::core::ics02_client::client_consensus::AnyConsensusState; -use crate::core::ics02_client::client_def::{ClientDef, ConsensusUpdateResult}; -use crate::core::ics02_client::client_state::AnyClientState; -use crate::core::ics02_client::error::Error; -use crate::core::ics03_connection::connection::ConnectionEnd; -use crate::core::ics04_channel::channel::ChannelEnd; -use crate::core::ics04_channel::commitment::{AcknowledgementCommitment, PacketCommitment}; -use crate::core::ics04_channel::packet::Sequence; -use crate::core::ics23_commitment::commitment::{ - CommitmentPrefix, CommitmentProofBytes, CommitmentRoot, +use crate::core::ics02_client::{ + client_consensus::ConsensusState, + client_def::{ClientDef, ConsensusUpdateResult}, + client_state::ClientState, +}; + +use crate::{ + core::{ + ics02_client::error::Error, + ics03_connection::connection::ConnectionEnd, + ics04_channel::{ + channel::ChannelEnd, + commitment::{AcknowledgementCommitment, PacketCommitment}, + packet::Sequence, + }, + ics23_commitment::{ + commitment::{CommitmentPrefix, CommitmentProofBytes, CommitmentRoot}, + merkle::apply_prefix, + }, + ics24_host::{ + identifier::{ChannelId, ClientId, ConnectionId, PortId}, + path::ClientConsensusStatePath, + Path, + }, + ics26_routing::context::ReaderContext, + }, + downcast, + mock::{ + client_state::{AnyClientState, AnyConsensusState, MockClientState, MockConsensusState}, + header::{AnyHeader, MockHeader}, + }, + prelude::*, + Height, }; -use crate::core::ics23_commitment::merkle::apply_prefix; -use crate::core::ics24_host::identifier::{ChannelId, ClientId, ConnectionId, PortId}; -use crate::core::ics24_host::path::ClientConsensusStatePath; -use crate::core::ics24_host::Path; -use crate::core::ics26_routing::context::ReaderContext; -use crate::mock::client_state::{MockClientState, MockConsensusState}; -use crate::mock::header::MockHeader; -use crate::prelude::*; -use crate::Height; use core::fmt::Debug; -#[derive(Clone, Debug, Default, PartialEq, Eq)] +#[derive(Clone, Debug, PartialEq, Eq, ClientDef)] +pub enum AnyClient { + Mock(MockClient), +} + +#[derive(Clone, Debug, PartialEq, Eq)] pub struct MockClient; +impl Default for MockClient { + fn default() -> Self { + Self + } +} + impl ClientDef for MockClient { - type Header = MockHeader; - type ClientState = MockClientState; - type ConsensusState = MockConsensusState; - - fn update_state( - &self, - _ctx: &dyn ReaderContext, - _client_id: ClientId, - client_state: Self::ClientState, - header: Self::Header, - ) -> Result<(Self::ClientState, ConsensusUpdateResult), Error> { - if client_state.latest_height() >= header.height() { - return Err(Error::low_header_height( - header.height(), - client_state.latest_height(), - )); - } - - Ok(( - MockClientState::new(header), - ConsensusUpdateResult::Single(AnyConsensusState::Mock(MockConsensusState::new(header))), - )) - } - - fn verify_client_consensus_state( - &self, - _ctx: &dyn ReaderContext, - _client_state: &Self::ClientState, - _height: Height, - prefix: &CommitmentPrefix, - _proof: &CommitmentProofBytes, - _root: &CommitmentRoot, - client_id: &ClientId, - consensus_height: Height, - _expected_consensus_state: &AnyConsensusState, - ) -> Result<(), Error> { - let client_prefixed_path = Path::ClientConsensusState(ClientConsensusStatePath { - client_id: client_id.clone(), - epoch: consensus_height.revision_number, - height: consensus_height.revision_height, - }) - .to_string(); - - let _path = apply_prefix(prefix, vec![client_prefixed_path]); - - Ok(()) - } - - fn verify_connection_state( - &self, - _ctx: &dyn ReaderContext, - _client_id: &ClientId, - _client_state: &Self::ClientState, - _height: Height, - _prefix: &CommitmentPrefix, - _proof: &CommitmentProofBytes, - _root: &CommitmentRoot, - _connection_id: &ConnectionId, - _expected_connection_end: &ConnectionEnd, - ) -> Result<(), Error> { - Ok(()) - } - - fn verify_channel_state( - &self, - _ctx: &dyn ReaderContext, - _client_id: &ClientId, - _client_state: &Self::ClientState, - _height: Height, - _prefix: &CommitmentPrefix, - _proof: &CommitmentProofBytes, - _root: &CommitmentRoot, - _port_id: &PortId, - _channel_id: &ChannelId, - _expected_channel_end: &ChannelEnd, - ) -> Result<(), Error> { - Ok(()) - } - - fn verify_client_full_state( - &self, - _ctx: &dyn ReaderContext, - _client_state: &Self::ClientState, - _height: Height, - _prefix: &CommitmentPrefix, - _proof: &CommitmentProofBytes, - _root: &CommitmentRoot, - _client_id: &ClientId, - _expected_client_state: &AnyClientState, - ) -> Result<(), Error> { - Ok(()) - } - - fn verify_packet_data( - &self, - _ctx: &dyn ReaderContext, - _client_id: &ClientId, - _client_state: &Self::ClientState, - _height: Height, - _connection_end: &ConnectionEnd, - _proof: &CommitmentProofBytes, - _root: &CommitmentRoot, - _port_id: &PortId, - _channel_id: &ChannelId, - _sequence: Sequence, - _commitment: PacketCommitment, - ) -> Result<(), Error> { - Ok(()) - } - - fn verify_packet_acknowledgement( - &self, - _ctx: &dyn ReaderContext, - _client_id: &ClientId, - _client_state: &Self::ClientState, - _height: Height, - _connection_end: &ConnectionEnd, - _proof: &CommitmentProofBytes, - _root: &CommitmentRoot, - _port_id: &PortId, - _channel_id: &ChannelId, - _sequence: Sequence, - _ack: AcknowledgementCommitment, - ) -> Result<(), Error> { - Ok(()) - } - - fn verify_next_sequence_recv( - &self, - _ctx: &dyn ReaderContext, - _client_id: &ClientId, - _client_state: &Self::ClientState, - _height: Height, - _connection_end: &ConnectionEnd, - _proof: &CommitmentProofBytes, - _root: &CommitmentRoot, - _port_id: &PortId, - _channel_id: &ChannelId, - _sequence: Sequence, - ) -> Result<(), Error> { - Ok(()) - } - - fn verify_packet_receipt_absence( - &self, - _ctx: &dyn ReaderContext, - _client_id: &ClientId, - _client_state: &Self::ClientState, - _height: Height, - _connection_end: &ConnectionEnd, - _proof: &CommitmentProofBytes, - _root: &CommitmentRoot, - _port_id: &PortId, - _channel_id: &ChannelId, - _sequence: Sequence, - ) -> Result<(), Error> { - Ok(()) - } - - fn verify_upgrade_and_update_state( - &self, - client_state: &Self::ClientState, - consensus_state: &Self::ConsensusState, - _proof_upgrade_client: Vec, - _proof_upgrade_consensus_state: Vec, - ) -> Result<(Self::ClientState, ConsensusUpdateResult), Error> { - Ok(( - *client_state, - ConsensusUpdateResult::Single(AnyConsensusState::Mock(consensus_state.clone())), - )) - } - - fn verify_header( - &self, - _ctx: &dyn ReaderContext, - _client_id: ClientId, - _client_state: Self::ClientState, - _header: Self::Header, - ) -> Result<(), Error> { - Ok(()) - } - - fn update_state_on_misbehaviour( - &self, - client_state: Self::ClientState, - _header: Self::Header, - ) -> Result { - Ok(client_state) - } - - fn check_for_misbehaviour( - &self, - _ctx: &dyn ReaderContext, - _client_id: ClientId, - _client_state: Self::ClientState, - _header: Self::Header, - ) -> Result { - Ok(false) - } + type Header = MockHeader; + type ClientState = MockClientState; + type ConsensusState = MockConsensusState; + + fn update_state( + &self, + _ctx: &Ctx, + _client_id: ClientId, + client_state: Self::ClientState, + header: Self::Header, + ) -> Result<(Self::ClientState, ConsensusUpdateResult), Error> { + if client_state.latest_height() >= header.height() { + return Err(Error::low_header_height(header.height(), client_state.latest_height())) + } + + Ok(( + MockClientState::new(header), + ConsensusUpdateResult::Single( + Ctx::AnyConsensusState::wrap(&MockConsensusState::new(header)).unwrap(), + ), + )) + } + + fn verify_client_consensus_state( + &self, + _ctx: &Ctx, + _client_state: &Self::ClientState, + _height: Height, + prefix: &CommitmentPrefix, + _proof: &CommitmentProofBytes, + _root: &CommitmentRoot, + client_id: &ClientId, + consensus_height: Height, + _expected_consensus_state: &Ctx::AnyConsensusState, + ) -> Result<(), Error> { + let client_prefixed_path = Path::ClientConsensusState(ClientConsensusStatePath { + client_id: client_id.clone(), + epoch: consensus_height.revision_number, + height: consensus_height.revision_height, + }) + .to_string(); + + let _path = apply_prefix(prefix, vec![client_prefixed_path]); + + Ok(()) + } + + fn verify_connection_state( + &self, + _ctx: &Ctx, + _client_id: &ClientId, + _client_state: &Self::ClientState, + _height: Height, + _prefix: &CommitmentPrefix, + _proof: &CommitmentProofBytes, + _root: &CommitmentRoot, + _connection_id: &ConnectionId, + _expected_connection_end: &ConnectionEnd, + ) -> Result<(), Error> { + Ok(()) + } + + fn verify_channel_state( + &self, + _ctx: &Ctx, + _client_id: &ClientId, + _client_state: &Self::ClientState, + _height: Height, + _prefix: &CommitmentPrefix, + _proof: &CommitmentProofBytes, + _root: &CommitmentRoot, + _port_id: &PortId, + _channel_id: &ChannelId, + _expected_channel_end: &ChannelEnd, + ) -> Result<(), Error> { + Ok(()) + } + + fn verify_client_full_state( + &self, + _ctx: &Ctx, + _client_state: &Self::ClientState, + _height: Height, + _prefix: &CommitmentPrefix, + _proof: &CommitmentProofBytes, + _root: &CommitmentRoot, + _client_id: &ClientId, + _expected_client_state: &Ctx::AnyClientState, + ) -> Result<(), Error> { + Ok(()) + } + + fn verify_packet_data( + &self, + _ctx: &Ctx, + _client_id: &ClientId, + _client_state: &Self::ClientState, + _height: Height, + _connection_end: &ConnectionEnd, + _proof: &CommitmentProofBytes, + _root: &CommitmentRoot, + _port_id: &PortId, + _channel_id: &ChannelId, + _sequence: Sequence, + _commitment: PacketCommitment, + ) -> Result<(), Error> { + Ok(()) + } + + fn verify_packet_acknowledgement( + &self, + _ctx: &Ctx, + _client_id: &ClientId, + _client_state: &Self::ClientState, + _height: Height, + _connection_end: &ConnectionEnd, + _proof: &CommitmentProofBytes, + _root: &CommitmentRoot, + _port_id: &PortId, + _channel_id: &ChannelId, + _sequence: Sequence, + _ack: AcknowledgementCommitment, + ) -> Result<(), Error> { + Ok(()) + } + + fn verify_next_sequence_recv( + &self, + _ctx: &Ctx, + _client_id: &ClientId, + _client_state: &Self::ClientState, + _height: Height, + _connection_end: &ConnectionEnd, + _proof: &CommitmentProofBytes, + _root: &CommitmentRoot, + _port_id: &PortId, + _channel_id: &ChannelId, + _sequence: Sequence, + ) -> Result<(), Error> { + Ok(()) + } + + fn verify_packet_receipt_absence( + &self, + _ctx: &Ctx, + _client_id: &ClientId, + _client_state: &Self::ClientState, + _height: Height, + _connection_end: &ConnectionEnd, + _proof: &CommitmentProofBytes, + _root: &CommitmentRoot, + _port_id: &PortId, + _channel_id: &ChannelId, + _sequence: Sequence, + ) -> Result<(), Error> { + Ok(()) + } + + fn verify_upgrade_and_update_state( + &self, + client_state: &Self::ClientState, + consensus_state: &Self::ConsensusState, + _proof_upgrade_client: Vec, + _proof_upgrade_consensus_state: Vec, + ) -> Result<(Self::ClientState, ConsensusUpdateResult), Error> { + Ok(( + *client_state, + ConsensusUpdateResult::Single(Ctx::AnyConsensusState::wrap(consensus_state).unwrap()), + )) + } + + fn verify_header( + &self, + _ctx: &Ctx, + _client_id: ClientId, + _client_state: Self::ClientState, + _header: Self::Header, + ) -> Result<(), Error> { + Ok(()) + } + + fn update_state_on_misbehaviour( + &self, + client_state: Self::ClientState, + _header: Self::Header, + ) -> Result { + Ok(client_state) + } + + fn check_for_misbehaviour( + &self, + _ctx: &Ctx, + _client_id: ClientId, + _client_state: Self::ClientState, + _header: Self::Header, + ) -> Result { + Ok(false) + } } diff --git a/modules/src/mock/client_state.rs b/modules/src/mock/client_state.rs index 720d307866..c9480c5045 100644 --- a/modules/src/mock/client_state.rs +++ b/modules/src/mock/client_state.rs @@ -2,193 +2,295 @@ use crate::prelude::*; use alloc::collections::btree_map::BTreeMap as HashMap; -use core::convert::Infallible; -use core::fmt::Debug; -use core::time::Duration; +use core::{convert::Infallible, fmt::Debug, time::Duration}; +use ibc_proto::ibc::core::client::v1::ConsensusStateWithHeight; use serde::{Deserialize, Serialize}; use tendermint_proto::Protobuf; -use ibc_proto::ibc::mock::ClientState as RawMockClientState; -use ibc_proto::ibc::mock::ConsensusState as RawMockConsensusState; - -use crate::core::ics02_client::client_consensus::{AnyConsensusState, ConsensusState}; -use crate::core::ics02_client::client_state::{AnyClientState, ClientState}; -use crate::core::ics02_client::client_type::ClientType; -use crate::core::ics02_client::error::Error; -use crate::core::ics23_commitment::commitment::CommitmentRoot; -use crate::core::ics24_host::identifier::ChainId; -use crate::mock::header::MockHeader; -use crate::timestamp::Timestamp; -use crate::Height; +use crate::{ + core::{ + ics02_client::{ + client_consensus::ConsensusState, + client_state::{ClientState, ClientType}, + error::Error, + }, + ics23_commitment::commitment::CommitmentRoot, + ics24_host::identifier::ChainId, + }, + downcast, + mock::{ + client_def::{AnyClient, MockClient}, + context::ClientTypes, + header::MockHeader, + }, + timestamp::Timestamp, + Height, +}; +use ibc_proto::{ + google::protobuf::Any, + ibc::mock::{ClientState as RawMockClientState, ConsensusState as RawMockConsensusState}, +}; + +pub const MOCK_CLIENT_STATE_TYPE_URL: &str = "/ibc.mock.ClientState"; /// A mock of an IBC client record as it is stored in a mock context. /// For testing ICS02 handlers mostly, cf. `MockClientContext`. #[derive(Clone, Debug)] -pub struct MockClientRecord { - /// The type of this client. - pub client_type: ClientType, +pub struct MockClientRecord { + /// The type of this client. + pub client_type: ClientType, + + /// The client state (representing only the latest height at the moment). + pub client_state: Option, - /// The client state (representing only the latest height at the moment). - pub client_state: Option, + /// Mapping of heights to consensus states for this client. + pub consensus_states: HashMap, +} - /// Mapping of heights to consensus states for this client. - pub consensus_states: HashMap, +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(tag = "type")] +pub enum AnyUpgradeOptions { + Mock(()), +} + +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, ClientState, Protobuf)] +#[serde(tag = "type")] +pub enum AnyClientState { + #[ibc(proto_url = "MOCK_CLIENT_STATE_TYPE_URL")] + Mock(MockClientState), } /// A mock of a client state. For an example of a real structure that this mocks, you can see /// `ClientState` of ics07_tendermint/client_state.rs. -#[derive(Copy, Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Copy)] pub struct MockClientState { - pub header: MockHeader, - pub frozen_height: Option, + pub header: MockHeader, + pub frozen_height: Option, } impl Protobuf for MockClientState {} impl MockClientState { - pub fn new(header: MockHeader) -> Self { - Self { - header, - frozen_height: None, - } - } - - pub fn latest_height(&self) -> Height { - self.header.height() - } - - pub fn refresh_time(&self) -> Option { - None - } + pub fn new(header: MockHeader) -> Self { + Self { header, frozen_height: None } + } - pub fn expired(&self, _elapsed: Duration) -> bool { - false - } + pub fn refresh_time(&self) -> Option { + None + } } impl From for AnyClientState { - fn from(mcs: MockClientState) -> Self { - Self::Mock(mcs) - } + fn from(mcs: MockClientState) -> Self { + Self::Mock(mcs) + } } impl TryFrom for MockClientState { - type Error = Error; + type Error = Error; - fn try_from(raw: RawMockClientState) -> Result { - Ok(Self::new(raw.header.unwrap().try_into()?)) - } + fn try_from(raw: RawMockClientState) -> Result { + Ok(Self::new(raw.header.unwrap().try_into()?)) + } } impl From for RawMockClientState { - fn from(value: MockClientState) -> Self { - RawMockClientState { - header: Some(ibc_proto::ibc::mock::Header { - height: Some(value.header.height().into()), - timestamp: value.header.timestamp.nanoseconds(), - }), - } - } + fn from(value: MockClientState) -> Self { + RawMockClientState { + header: Some(ibc_proto::ibc::mock::Header { + height: Some(value.header.height().into()), + timestamp: value.header.timestamp.nanoseconds(), + }), + } + } } impl ClientState for MockClientState { - type UpgradeOptions = (); + type UpgradeOptions = (); + type ClientDef = MockClient; - fn chain_id(&self) -> ChainId { - ChainId::default() - } + fn chain_id(&self) -> ChainId { + self.chain_id() + } - fn client_type(&self) -> ClientType { - ClientType::Mock - } + fn client_def(&self) -> Self::ClientDef { + MockClient::default() + } - fn latest_height(&self) -> Height { - self.header.height() - } + fn client_type(&self) -> ClientType { + Self::client_type() + } - fn frozen_height(&self) -> Option { - self.frozen_height - } + fn latest_height(&self) -> Height { + self.latest_height() + } - fn upgrade(self, _upgrade_height: Height, _upgrade_options: (), _chain_id: ChainId) -> Self { - todo!() - } + fn frozen_height(&self) -> Option { + self.frozen_height() + } - fn wrap_any(self) -> AnyClientState { - AnyClientState::Mock(self) - } + fn upgrade(self, _upgrade_height: Height, _upgrade_options: (), _chain_id: ChainId) -> Self { + self.upgrade(_upgrade_height, _upgrade_options, _chain_id) + } + + fn expired(&self, elapsed: Duration) -> bool { + self.expired(elapsed) + } + + fn encode_to_vec(&self) -> Vec { + self.encode_vec() + } +} + +impl MockClientState { + pub fn chain_id(&self) -> ChainId { + ChainId::default() + } + + pub fn client_type() -> ClientType { + "9999-mock" + } + + pub fn latest_height(&self) -> Height { + self.header.height() + } + + pub fn frozen_height(&self) -> Option { + self.frozen_height + } + + pub fn upgrade( + self, + _upgrade_height: Height, + _upgrade_options: (), + _chain_id: ChainId, + ) -> Self { + todo!() + } + + pub fn expired(&self, _elapsed: Duration) -> bool { + false + } } impl From for MockClientState { - fn from(cs: MockConsensusState) -> Self { - Self::new(cs.header) - } + fn from(cs: MockConsensusState) -> Self { + Self::new(cs.header) + } +} + +pub const MOCK_CONSENSUS_STATE_TYPE_URL: &str = "/ibc.mock.ConsensusState"; + +#[derive(Clone, Debug, PartialEq, Eq, Serialize, ConsensusState, Protobuf)] +#[serde(tag = "type")] +pub enum AnyConsensusState { + #[ibc(proto_url = "MOCK_CONSENSUS_STATE_TYPE_URL")] + Mock(MockConsensusState), +} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize)] +pub struct AnyConsensusStateWithHeight { + pub height: Height, + pub consensus_state: C::AnyConsensusState, +} + +impl Protobuf for AnyConsensusStateWithHeight {} + +impl TryFrom for AnyConsensusStateWithHeight { + type Error = Error; + + fn try_from(value: ConsensusStateWithHeight) -> Result { + let state = value + .consensus_state + .map(C::AnyConsensusState::try_from) + .transpose()? + .ok_or_else(Error::empty_consensus_state_response)?; + + Ok(AnyConsensusStateWithHeight { + height: value.height.ok_or_else(Error::missing_height)?.into(), + consensus_state: state, + }) + } +} + +impl From> for ConsensusStateWithHeight { + fn from(value: AnyConsensusStateWithHeight) -> Self { + ConsensusStateWithHeight { + height: Some(value.height.into()), + consensus_state: Some(value.consensus_state.into()), + } + } } #[derive(Clone, Debug, PartialEq, Eq, Serialize)] pub struct MockConsensusState { - pub header: MockHeader, - pub root: CommitmentRoot, + pub header: MockHeader, + pub root: CommitmentRoot, } impl MockConsensusState { - pub fn new(header: MockHeader) -> Self { - MockConsensusState { - header, - root: CommitmentRoot::from(vec![0]), - } - } + pub fn new(header: MockHeader) -> Self { + MockConsensusState { header, root: CommitmentRoot::from(vec![0]) } + } - pub fn timestamp(&self) -> Timestamp { - self.header.timestamp - } + pub fn timestamp(&self) -> Timestamp { + self.header.timestamp + } } impl Protobuf for MockConsensusState {} impl TryFrom for MockConsensusState { - type Error = Error; + type Error = Error; - fn try_from(raw: RawMockConsensusState) -> Result { - let raw_header = raw.header.ok_or_else(Error::missing_raw_consensus_state)?; + fn try_from(raw: RawMockConsensusState) -> Result { + let raw_header = raw.header.ok_or_else(Error::missing_raw_consensus_state)?; - Ok(Self { - header: MockHeader::try_from(raw_header)?, - root: CommitmentRoot::from(vec![0]), - }) - } + Ok(Self { header: MockHeader::try_from(raw_header)?, root: CommitmentRoot::from(vec![0]) }) + } } impl From for RawMockConsensusState { - fn from(value: MockConsensusState) -> Self { - RawMockConsensusState { - header: Some(ibc_proto::ibc::mock::Header { - height: Some(value.header.height().into()), - timestamp: value.header.timestamp.nanoseconds(), - }), - } - } + fn from(value: MockConsensusState) -> Self { + RawMockConsensusState { + header: Some(ibc_proto::ibc::mock::Header { + height: Some(value.header.height().into()), + timestamp: value.header.timestamp.nanoseconds(), + }), + } + } } impl From for AnyConsensusState { - fn from(mcs: MockConsensusState) -> Self { - Self::Mock(mcs) - } + fn from(mcs: MockConsensusState) -> Self { + Self::Mock(mcs) + } +} + +impl TryFrom for MockConsensusState { + type Error = Error; + + fn try_from(value: AnyConsensusState) -> Result { + downcast!( + value => AnyConsensusState::Mock + ) + .ok_or_else(|| Error::client_args_type_mismatch(MockClientState::client_type().to_owned())) + } } impl ConsensusState for MockConsensusState { - type Error = Infallible; + type Error = Infallible; - fn client_type(&self) -> ClientType { - ClientType::Mock - } + fn root(&self) -> &CommitmentRoot { + &self.root + } - fn root(&self) -> &CommitmentRoot { - &self.root - } + fn timestamp(&self) -> Timestamp { + self.timestamp() + } - fn wrap_any(self) -> AnyConsensusState { - AnyConsensusState::Mock(self) - } + fn encode_to_vec(&self) -> Vec { + self.encode_vec() + } } diff --git a/modules/src/mock/context.rs b/modules/src/mock/context.rs index 68038b1fc6..a4bff3d7a4 100644 --- a/modules/src/mock/context.rs +++ b/modules/src/mock/context.rs @@ -1,1573 +1,1383 @@ //! Implementation of a global context mock. Used in testing handlers of all IBC modules. use crate::prelude::*; -use alloc::collections::btree_map::BTreeMap; -use alloc::sync::Arc; -use core::borrow::Borrow; -use core::cmp::min; -use core::fmt::{Debug, Formatter}; -use core::ops::{Add, Sub}; -use core::time::Duration; -use std::sync::Mutex; +use alloc::{collections::btree_map::BTreeMap, sync::Arc}; +use core::{ + borrow::Borrow, + cmp::min, + fmt::{Debug, Formatter}, + ops::{Add, Sub}, + time::Duration, +}; +use std::{marker::PhantomData, sync::Mutex}; +use crate::core::ics02_client::{client_consensus::ConsensusState, client_def::ClientDef}; use ibc_proto::google::protobuf::Any; use sha2::Digest; use tracing::debug; -use crate::clients::ics07_tendermint::client_state::test_util::get_dummy_tendermint_client_state; -use crate::clients::ics11_beefy::client_state::test_util::get_dummy_beefy_state; -use crate::clients::ics11_beefy::consensus_state::test_util::get_dummy_beefy_consensus_state; -use crate::core::ics02_client::client_consensus::{AnyConsensusState, AnyConsensusStateWithHeight}; -use crate::core::ics02_client::client_state::AnyClientState; -use crate::core::ics02_client::client_type::ClientType; -use crate::core::ics02_client::context::{ClientKeeper, ClientReader}; -use crate::core::ics02_client::error::Error as Ics02Error; -use crate::core::ics02_client::header::AnyHeader; -use crate::core::ics03_connection::connection::ConnectionEnd; -use crate::core::ics03_connection::context::{ConnectionKeeper, ConnectionReader}; -use crate::core::ics03_connection::error::Error as Ics03Error; -use crate::core::ics04_channel::channel::ChannelEnd; -use crate::core::ics04_channel::commitment::{AcknowledgementCommitment, PacketCommitment}; -use crate::core::ics04_channel::context::{ChannelKeeper, ChannelReader}; -use crate::core::ics04_channel::error::Error as Ics04Error; -use crate::core::ics04_channel::packet::{Receipt, Sequence}; -use crate::core::ics05_port::context::PortReader; -use crate::core::ics05_port::error::Error as Ics05Error; -use crate::core::ics05_port::error::Error; -use crate::core::ics23_commitment::commitment::CommitmentPrefix; -use crate::core::ics24_host::identifier::{ChainId, ChannelId, ClientId, ConnectionId, PortId}; -use crate::core::ics26_routing::context::{ - Ics26Context, Module, ModuleId, ReaderContext, Router, RouterBuilder, +#[cfg(test)] +use crate::core::ics02_client::events::Attributes; +use crate::{ + core::{ + ics02_client::{ + client_state::{ClientState, ClientType}, + context::{ClientKeeper, ClientReader}, + error::Error as Ics02Error, + header::Header, + misbehaviour::Misbehaviour, + }, + ics03_connection::{ + connection::ConnectionEnd, + context::{ConnectionKeeper, ConnectionReader}, + error::Error as Ics03Error, + }, + ics04_channel::{ + channel::ChannelEnd, + commitment::{AcknowledgementCommitment, PacketCommitment}, + context::{ChannelKeeper, ChannelReader}, + error::Error as Ics04Error, + packet::{Receipt, Sequence}, + }, + ics05_port::{ + context::PortReader, + error::{Error as Ics05Error, Error}, + }, + ics23_commitment::commitment::CommitmentPrefix, + ics24_host::identifier::{ChainId, ChannelId, ClientId, ConnectionId, PortId}, + ics26_routing::{ + context::{Ics26Context, Module, ModuleId, ReaderContext, Router, RouterBuilder}, + error::Error as Ics26Error, + handler::dispatch, + msgs::Ics26Envelope, + }, + }, + mock::{ + client_def::AnyClient, + client_state::{ + AnyClientState, AnyConsensusState, AnyConsensusStateWithHeight, MockClientRecord, + MockClientState, MockConsensusState, + }, + header::{AnyHeader, MockHeader}, + host::{HostBlock, MockHostBlock}, + misbehaviour::AnyMisbehaviour, + }, + test_utils::Crypto, + timestamp::Timestamp, + Height, }; -use crate::core::ics26_routing::handler::{deliver, dispatch, MsgReceipt}; -use crate::core::ics26_routing::msgs::Ics26Envelope; -use crate::events::IbcEvent; -use crate::mock::client_state::{MockClientRecord, MockClientState, MockConsensusState}; -use crate::mock::header::MockHeader; -use crate::mock::host::{HostBlock, HostType}; -use crate::relayer::ics18_relayer::context::Ics18Context; -use crate::relayer::ics18_relayer::error::Error as Ics18Error; -use crate::signer::Signer; -use crate::test_utils::Crypto; -use crate::timestamp::Timestamp; -use crate::Height; pub const DEFAULT_BLOCK_TIME_SECS: u64 = 3; /// A context implementing the dependencies necessary for testing any IBC module. #[derive(Debug)] -pub struct MockContext { - /// The type of host chain underlying this mock context. - host_chain_type: HostType, +pub struct MockContext { + /// The type of host chain underlying this mock context. + pub host_chain_type: ::HostType, + + /// Host chain identifier. + pub host_chain_id: ChainId, + + /// Maximum size for the history of the host chain. Any block older than this is pruned. + pub max_history_size: usize, - /// Host chain identifier. - host_chain_id: ChainId, + /// The chain of blocks underlying this context. A vector of size up to `max_history_size` + /// blocks, ascending order by their height (latest block is on the last position). + pub history: Vec, - /// Maximum size for the history of the host chain. Any block older than this is pruned. - max_history_size: usize, + /// Average time duration between blocks + pub block_time: Duration, - /// The chain of blocks underlying this context. A vector of size up to `max_history_size` - /// blocks, ascending order by their height (latest block is on the last position). - history: Vec, + /// An object that stores all IBC related data. + pub ibc_store: Arc>>, - /// Average time duration between blocks - block_time: Duration, + /// ICS26 router impl + pub router: MockRouter, - /// An object that stores all IBC related data. - pub ibc_store: Arc>, + pub _phantom: PhantomData, +} - /// ICS26 router impl - router: MockRouter, +impl PartialEq for MockContext { + fn eq(&self, _other: &Self) -> bool { + unimplemented!() + } } -/// Returns a MockContext with bare minimum initialization: no clients, no connections and no channels are -/// present, and the chain has Height(5). This should be used sparingly, mostly for testing the -/// creation of new domain objects. -impl Default for MockContext { - fn default() -> Self { - Self::new( - ChainId::new("mockgaia".to_string(), 0), - HostType::Mock, - 5, - Height::new(0, 5), - ) - } +impl Eq for MockContext {} + +/// Returns a MockContext with bare minimum initialization: no clients, no connections and no +/// channels are present, and the chain has Height(5). This should be used sparingly, mostly for +/// testing the creation of new domain objects. +impl Default for MockContext { + fn default() -> Self { + Self::new( + ChainId::new("mockgaia".to_string(), 0), + ::HostType::default(), + 5, + Height::new(0, 5), + ) + } } /// A manual clone impl is provided because the tests are oblivious to the fact that the `ibc_store` /// is a shared ptr. -impl Clone for MockContext { - fn clone(&self) -> Self { - let ibc_store = { - let ibc_store = self.ibc_store.lock().unwrap().clone(); - Arc::new(Mutex::new(ibc_store)) - }; - Self { - host_chain_type: self.host_chain_type, - host_chain_id: self.host_chain_id.clone(), - max_history_size: self.max_history_size, - history: self.history.clone(), - block_time: self.block_time, - ibc_store, - router: self.router.clone(), - } - } +impl Clone for MockContext { + fn clone(&self) -> Self { + let ibc_store = { + let ibc_store = self.ibc_store.lock().unwrap().clone(); + Arc::new(Mutex::new(ibc_store)) + }; + Self { + host_chain_type: self.host_chain_type, + host_chain_id: self.host_chain_id.clone(), + max_history_size: self.max_history_size, + history: self.history.clone(), + block_time: self.block_time, + ibc_store, + router: self.router.clone(), + _phantom: Default::default(), + } + } } /// Implementation of internal interface for use in testing. The methods in this interface should /// _not_ be accessible to any Ics handler. -impl MockContext { - /// Creates a mock context. Parameter `max_history_size` determines how many blocks will - /// the chain maintain in its history, which also determines the pruning window. Parameter - /// `latest_height` determines the current height of the chain. This context - /// has support to emulate two type of underlying chains: Mock or SyntheticTendermint. - pub fn new( - host_id: ChainId, - host_type: HostType, - max_history_size: usize, - latest_height: Height, - ) -> Self { - assert_ne!( - max_history_size, 0, - "The chain must have a non-zero max_history_size" - ); - - assert_ne!( - latest_height.revision_height, 0, - "The chain must have a non-zero revision_height" - ); - - // Compute the number of blocks to store. - let n = min(max_history_size as u64, latest_height.revision_height); - - assert_eq!( - host_id.version(), - latest_height.revision_number, - "The version in the chain identifier must match the version in the latest height" - ); - - let block_time = Duration::from_secs(DEFAULT_BLOCK_TIME_SECS); - let next_block_timestamp = Timestamp::now().add(block_time).unwrap(); - MockContext { - host_chain_type: host_type, - host_chain_id: host_id.clone(), - max_history_size, - history: (0..n) - .rev() - .map(|i| { - // generate blocks with timestamps -> N, N - BT, N - 2BT, ... - // where N = now(), BT = block_time - HostBlock::generate_block( - host_id.clone(), - host_type, - latest_height.sub(i).unwrap().revision_height, - next_block_timestamp - .sub(Duration::from_secs(DEFAULT_BLOCK_TIME_SECS * (i + 1))) - .unwrap(), - ) - }) - .collect(), - block_time, - ibc_store: Arc::new(Mutex::new(MockIbcStore::default())), - router: Default::default(), - } - } - - /// Associates a client record to this context. - /// Given a client id and a height, registers a new client in the context and also associates - /// to this client a mock client state and a mock consensus state for height `height`. The type - /// of this client is implicitly assumed to be Mock. - pub fn with_client(self, client_id: &ClientId, height: Height) -> Self { - self.with_client_parametrized(client_id, height, Some(ClientType::Mock), Some(height)) - } - - /// Similar to `with_client`, this function associates a client record to this context, but - /// additionally permits to parametrize two details of the client. If `client_type` is None, - /// then the client will have type Mock, otherwise the specified type. If - /// `consensus_state_height` is None, then the client will be initialized with a consensus - /// state matching the same height as the client state (`client_state_height`). - pub fn with_client_parametrized( - self, - client_id: &ClientId, - client_state_height: Height, - client_type: Option, - consensus_state_height: Option, - ) -> Self { - let cs_height = consensus_state_height.unwrap_or(client_state_height); - - let client_type = client_type.unwrap_or(ClientType::Mock); - let (client_state, consensus_state) = match client_type { - // If it's a mock client, create the corresponding mock states. - ClientType::Mock => ( - Some(MockClientState::new(MockHeader::new(client_state_height)).into()), - MockConsensusState::new(MockHeader::new(cs_height)).into(), - ), - ClientType::Beefy => ( - Some(get_dummy_beefy_state()), - get_dummy_beefy_consensus_state(), - ), - // If it's a Tendermint client, we need TM states. - ClientType::Tendermint => { - let light_block = HostBlock::generate_tm_block( - self.host_chain_id.clone(), - cs_height.revision_height, - Timestamp::now(), - ); - - let consensus_state = AnyConsensusState::from(light_block.clone()); - let client_state = - get_dummy_tendermint_client_state(light_block.signed_header.header); - - // Return the tuple. - (Some(client_state), consensus_state) - } - ClientType::Near => todo!(), - }; - let consensus_states = vec![(cs_height, consensus_state)].into_iter().collect(); - - debug!("consensus states: {:?}", consensus_states); - - let client_record = MockClientRecord { - client_type, - client_state, - consensus_states, - }; - self.ibc_store - .lock() - .unwrap() - .clients - .insert(client_id.clone(), client_record); - self - } - - pub fn with_client_parametrized_history( - self, - client_id: &ClientId, - client_state_height: Height, - client_type: Option, - consensus_state_height: Option, - ) -> Self { - let cs_height = consensus_state_height.unwrap_or(client_state_height); - let prev_cs_height = cs_height.clone().sub(1).unwrap_or(client_state_height); - - let client_type = client_type.unwrap_or(ClientType::Mock); - let now = Timestamp::now(); - - let (client_state, consensus_state) = match client_type { - // If it's a mock client, create the corresponding mock states. - ClientType::Mock => ( - Some(MockClientState::new(MockHeader::new(client_state_height)).into()), - MockConsensusState::new(MockHeader::new(cs_height)).into(), - ), - - ClientType::Beefy => ( - Some(get_dummy_beefy_state()), - get_dummy_beefy_consensus_state(), - ), - // If it's a Tendermint client, we need TM states. - ClientType::Tendermint => { - let light_block = HostBlock::generate_tm_block( - self.host_chain_id.clone(), - cs_height.revision_height, - now, - ); - - let consensus_state = AnyConsensusState::from(light_block.clone()); - let client_state = - get_dummy_tendermint_client_state(light_block.signed_header.header); - - // Return the tuple. - (Some(client_state), consensus_state) - } - ClientType::Near => todo!(), - }; - - let prev_consensus_state = match client_type { - // If it's a mock client, create the corresponding mock states. - ClientType::Mock => MockConsensusState::new(MockHeader::new(prev_cs_height)).into(), - // If it's a Tendermint client, we need TM states. - ClientType::Beefy => get_dummy_beefy_consensus_state(), - ClientType::Tendermint => { - let light_block = HostBlock::generate_tm_block( - self.host_chain_id.clone(), - prev_cs_height.revision_height, - now.sub(self.block_time).unwrap(), - ); - AnyConsensusState::from(light_block) - } - ClientType::Near => todo!(), - }; - - let consensus_states = vec![ - (prev_cs_height, prev_consensus_state), - (cs_height, consensus_state), - ] - .into_iter() - .collect(); - - debug!("consensus states: {:?}", consensus_states); - - let client_record = MockClientRecord { - client_type, - client_state, - consensus_states, - }; - - self.ibc_store - .lock() - .unwrap() - .clients - .insert(client_id.clone(), client_record); - self - } - - /// Associates a connection to this context. - pub fn with_connection( - self, - connection_id: ConnectionId, - connection_end: ConnectionEnd, - ) -> Self { - self.ibc_store - .lock() - .unwrap() - .connections - .insert(connection_id, connection_end); - self - } - - /// Associates a channel (in an arbitrary state) to this context. - pub fn with_channel( - self, - port_id: PortId, - chan_id: ChannelId, - channel_end: ChannelEnd, - ) -> Self { - let mut channels = self.ibc_store.lock().unwrap().channels.clone(); - channels.insert((port_id, chan_id), channel_end); - self.ibc_store.lock().unwrap().channels = channels; - self - } - - pub fn with_send_sequence( - self, - port_id: PortId, - chan_id: ChannelId, - seq_number: Sequence, - ) -> Self { - let mut next_sequence_send = self.ibc_store.lock().unwrap().next_sequence_send.clone(); - next_sequence_send.insert((port_id, chan_id), seq_number); - self.ibc_store.lock().unwrap().next_sequence_send = next_sequence_send; - self - } - - pub fn with_recv_sequence( - self, - port_id: PortId, - chan_id: ChannelId, - seq_number: Sequence, - ) -> Self { - let mut next_sequence_recv = self.ibc_store.lock().unwrap().next_sequence_recv.clone(); - next_sequence_recv.insert((port_id, chan_id), seq_number); - self.ibc_store.lock().unwrap().next_sequence_recv = next_sequence_recv; - self - } - - pub fn with_ack_sequence( - self, - port_id: PortId, - chan_id: ChannelId, - seq_number: Sequence, - ) -> Self { - let mut next_sequence_ack = self.ibc_store.lock().unwrap().next_sequence_send.clone(); - next_sequence_ack.insert((port_id, chan_id), seq_number); - self.ibc_store.lock().unwrap().next_sequence_ack = next_sequence_ack; - self - } - - pub fn with_height(self, target_height: Height) -> Self { - let latest_height = self.latest_height(); - if target_height.revision_number > latest_height.revision_number { - unimplemented!() - } else if target_height.revision_number < latest_height.revision_number { - panic!("Cannot rewind history of the chain to a smaller revision number!") - } else if target_height.revision_height < latest_height.revision_height { - panic!("Cannot rewind history of the chain to a smaller revision height!") - } else if target_height.revision_height > latest_height.revision_height { - // Repeatedly advance the host chain height till we hit the desired height - let mut ctx = MockContext { ..self }; - while ctx.latest_height().revision_height < target_height.revision_height { - ctx.advance_host_chain_height() - } - ctx - } else { - // Both the revision number and height match - self - } - } - - pub fn with_packet_commitment( - self, - port_id: PortId, - chan_id: ChannelId, - seq: Sequence, - data: PacketCommitment, - ) -> Self { - let mut packet_commitment = self.ibc_store.lock().unwrap().packet_commitment.clone(); - packet_commitment.insert((port_id, chan_id, seq), data); - self.ibc_store.lock().unwrap().packet_commitment = packet_commitment; - self - } - - pub fn with_router(self, router: MockRouter) -> Self { - Self { router, ..self } - } - - /// Accessor for a block of the local (host) chain from this context. - /// Returns `None` if the block at the requested height does not exist. - pub fn host_block(&self, target_height: Height) -> Option<&HostBlock> { - let target = target_height.revision_height as usize; - let latest = self.latest_height().revision_height as usize; - - // Check that the block is not too advanced, nor has it been pruned. - if (target > latest) || (target <= latest - self.history.len()) { - None // Block for requested height does not exist in history. - } else { - Some(&self.history[self.history.len() + target - latest - 1]) - } - } - - /// Triggers the advancing of the host chain, by extending the history of blocks (or headers). - pub fn advance_host_chain_height(&mut self) { - let latest_block = self.history.last().expect("history cannot be empty"); - let new_block = HostBlock::generate_block( - self.host_chain_id.clone(), - self.host_chain_type, - latest_block.height().increment().revision_height, - latest_block.timestamp().add(self.block_time).unwrap(), - ); - - // Append the new header at the tip of the history. - if self.history.len() >= self.max_history_size { - // History is full, we rotate and replace the tip with the new header. - self.history.rotate_left(1); - self.history[self.max_history_size - 1] = new_block; - } else { - // History is not full yet. - self.history.push(new_block); - } - } - - /// A datagram passes from the relayer to the IBC module (on host chain). - /// Alternative method to `Ics18Context::send` that does not exercise any serialization. - /// Used in testing the Ics18 algorithms, hence this may return a Ics18Error. - pub fn deliver(&mut self, msg: Ics26Envelope) -> Result<(), Ics18Error> { - dispatch::<_, Crypto>(self, msg).map_err(Ics18Error::transaction_failed)?; - // Create a new block. - self.advance_host_chain_height(); - Ok(()) - } - - /// Validates this context. Should be called after the context is mutated by a test. - pub fn validate(&self) -> Result<(), String> { - // Check that the number of entries is not higher than window size. - if self.history.len() > self.max_history_size { - return Err("too many entries".to_string()); - } - - // Check the content of the history. - if !self.history.is_empty() { - // Get the highest block. - let lh = &self.history[self.history.len() - 1]; - // Check latest is properly updated with highest header height. - if lh.height() != self.latest_height() { - return Err("latest height is not updated".to_string()); - } - } - - // Check that headers in the history are in sequential order. - for i in 1..self.history.len() { - let ph = &self.history[i - 1]; - let h = &self.history[i]; - if ph.height().increment() != h.height() { - return Err("headers in history not sequential".to_string()); - } - } - Ok(()) - } - - pub fn add_port(&mut self, port_id: PortId) { - let module_id = ModuleId::new(format!("module{}", port_id).into()).unwrap(); - self.ibc_store - .lock() - .unwrap() - .port_to_module - .insert(port_id, module_id); - } - - pub fn scope_port_to_module(&mut self, port_id: PortId, module_id: ModuleId) { - self.ibc_store - .lock() - .unwrap() - .port_to_module - .insert(port_id, module_id); - } - - pub fn consensus_states(&self, client_id: &ClientId) -> Vec { - self.ibc_store.lock().unwrap().clients[client_id] - .consensus_states - .iter() - .map(|(k, v)| AnyConsensusStateWithHeight { - height: *k, - consensus_state: v.clone(), - }) - .collect() - } - - pub fn latest_client_states(&self, client_id: &ClientId) -> AnyClientState { - self.ibc_store.lock().unwrap().clients[client_id] - .client_state - .as_ref() - .unwrap() - .clone() - } - - pub fn latest_consensus_states( - &self, - client_id: &ClientId, - height: &Height, - ) -> AnyConsensusState { - self.ibc_store.lock().unwrap().clients[client_id] - .consensus_states - .get(height) - .unwrap() - .clone() - } - - #[inline] - fn latest_height(&self) -> Height { - self.history - .last() - .expect("history cannot be empty") - .height() - } - - pub fn ibc_store_share(&self) -> Arc> { - self.ibc_store.clone() - } +impl MockContext { + /// Creates a mock context. Parameter `max_history_size` determines how many blocks will + /// the chain maintain in its history, which also determines the pruning window. Parameter + /// `latest_height` determines the current height of the chain. This context + /// has support to emulate two type of underlying chains: Mock or SyntheticTendermint. + pub fn new( + host_id: ChainId, + host_type: ::HostType, + max_history_size: usize, + latest_height: Height, + ) -> Self { + assert_ne!(max_history_size, 0, "The chain must have a non-zero max_history_size"); + + assert_ne!( + latest_height.revision_height, 0, + "The chain must have a non-zero revision_height" + ); + + // Compute the number of blocks to store. + let n = min(max_history_size as u64, latest_height.revision_height); + + assert_eq!( + host_id.version(), + latest_height.revision_number, + "The version in the chain identifier must match the version in the latest height" + ); + + let block_time = Duration::from_secs(DEFAULT_BLOCK_TIME_SECS); + let next_block_timestamp = Timestamp::now().add(block_time).unwrap(); + MockContext { + host_chain_type: host_type, + host_chain_id: host_id.clone(), + max_history_size, + history: (0..n) + .rev() + .map(|i| { + // generate blocks with timestamps -> N, N - BT, N - 2BT, ... + // where N = now(), BT = block_time + ::generate_block( + host_id.clone(), + host_type, + latest_height.sub(i).unwrap().revision_height, + next_block_timestamp + .sub(Duration::from_secs(DEFAULT_BLOCK_TIME_SECS * (i + 1))) + .unwrap(), + ) + }) + .collect(), + block_time, + ibc_store: Arc::new(Mutex::new(MockIbcStore::::default())), + router: Default::default(), + _phantom: Default::default(), + } + } + + /// Associates a connection to this context. + pub fn with_connection( + self, + connection_id: ConnectionId, + connection_end: ConnectionEnd, + ) -> Self { + self.ibc_store.lock().unwrap().connections.insert(connection_id, connection_end); + self + } + + /// Associates a channel (in an arbitrary state) to this context. + pub fn with_channel( + self, + port_id: PortId, + chan_id: ChannelId, + channel_end: ChannelEnd, + ) -> Self { + let mut channels = self.ibc_store.lock().unwrap().channels.clone(); + channels.insert((port_id, chan_id), channel_end); + self.ibc_store.lock().unwrap().channels = channels; + self + } + + pub fn with_send_sequence( + self, + port_id: PortId, + chan_id: ChannelId, + seq_number: Sequence, + ) -> Self { + let mut next_sequence_send = self.ibc_store.lock().unwrap().next_sequence_send.clone(); + next_sequence_send.insert((port_id, chan_id), seq_number); + self.ibc_store.lock().unwrap().next_sequence_send = next_sequence_send; + self + } + + pub fn with_recv_sequence( + self, + port_id: PortId, + chan_id: ChannelId, + seq_number: Sequence, + ) -> Self { + let mut next_sequence_recv = self.ibc_store.lock().unwrap().next_sequence_recv.clone(); + next_sequence_recv.insert((port_id, chan_id), seq_number); + self.ibc_store.lock().unwrap().next_sequence_recv = next_sequence_recv; + self + } + + pub fn with_ack_sequence( + self, + port_id: PortId, + chan_id: ChannelId, + seq_number: Sequence, + ) -> Self { + let mut next_sequence_ack = self.ibc_store.lock().unwrap().next_sequence_send.clone(); + next_sequence_ack.insert((port_id, chan_id), seq_number); + self.ibc_store.lock().unwrap().next_sequence_ack = next_sequence_ack; + self + } + + pub fn with_height(self, target_height: Height) -> Self { + let latest_height = self.latest_height(); + if target_height.revision_number > latest_height.revision_number { + unimplemented!() + } else if target_height.revision_number < latest_height.revision_number { + panic!("Cannot rewind history of the chain to a smaller revision number!") + } else if target_height.revision_height < latest_height.revision_height { + panic!("Cannot rewind history of the chain to a smaller revision height!") + } else if target_height.revision_height > latest_height.revision_height { + // Repeatedly advance the host chain height till we hit the desired height + let mut ctx = MockContext { ..self }; + while ctx.latest_height().revision_height < target_height.revision_height { + ctx.advance_host_chain_height() + } + ctx + } else { + // Both the revision number and height match + self + } + } + + pub fn with_packet_commitment( + self, + port_id: PortId, + chan_id: ChannelId, + seq: Sequence, + data: PacketCommitment, + ) -> Self { + let mut packet_commitment = self.ibc_store.lock().unwrap().packet_commitment.clone(); + packet_commitment.insert((port_id, chan_id, seq), data); + self.ibc_store.lock().unwrap().packet_commitment = packet_commitment; + self + } + + pub fn with_router(self, router: MockRouter) -> Self { + Self { router, ..self } + } + + /// Accessor for a block of the local (host) chain from this context. + /// Returns `None` if the block at the requested height does not exist. + pub fn host_block(&self, target_height: Height) -> Option<&C::HostBlock> { + let target = target_height.revision_height as usize; + let latest = self.latest_height().revision_height as usize; + + // Check that the block is not too advanced, nor has it been pruned. + if (target > latest) || (target <= latest - self.history.len()) { + None // Block for requested height does not exist in history. + } else { + Some(&self.history[self.history.len() + target - latest - 1]) + } + } + + /// Triggers the advancing of the host chain, by extending the history of blocks (or headers). + pub fn advance_host_chain_height(&mut self) { + let latest_block = self.history.last().expect("history cannot be empty"); + let new_block = ::HostBlock::generate_block( + self.host_chain_id.clone(), + self.host_chain_type, + latest_block.height().increment().revision_height, + latest_block.timestamp().add(self.block_time).unwrap(), + ); + + // Append the new header at the tip of the history. + if self.history.len() >= self.max_history_size { + // History is full, we rotate and replace the tip with the new header. + self.history.rotate_left(1); + self.history[self.max_history_size - 1] = new_block; + } else { + // History is not full yet. + self.history.push(new_block); + } + } + + /// A datagram passes from the relayer to the IBC module (on host chain). + /// Alternative method to `Ics18Context::send` that does not exercise any serialization. + /// Used in testing the Ics18 algorithms, hence this may return a Ics18Error. + pub fn deliver(&mut self, msg: Ics26Envelope>) -> Result<(), Ics26Error> { + dispatch(self, msg)?; + // Create a new block. + self.advance_host_chain_height(); + Ok(()) + } + + /// Validates this context. Should be called after the context is mutated by a test. + pub fn validate(&self) -> Result<(), String> { + // Check that the number of entries is not higher than window size. + if self.history.len() > self.max_history_size { + return Err("too many entries".to_string()) + } + + // Check the content of the history. + if !self.history.is_empty() { + // Get the highest block. + let lh = &self.history[self.history.len() - 1]; + // Check latest is properly updated with highest header height. + if lh.height() != self.latest_height() { + return Err("latest height is not updated".to_string()) + } + } + + // Check that headers in the history are in sequential order. + for i in 1..self.history.len() { + let ph = &self.history[i - 1]; + let h = &self.history[i]; + if ph.height().increment() != h.height() { + return Err("headers in history not sequential".to_string()) + } + } + Ok(()) + } + + pub fn add_port(&mut self, port_id: PortId) { + let module_id = ModuleId::new(format!("module{}", port_id).into()).unwrap(); + self.ibc_store.lock().unwrap().port_to_module.insert(port_id, module_id); + } + + pub fn scope_port_to_module(&mut self, port_id: PortId, module_id: ModuleId) { + self.ibc_store.lock().unwrap().port_to_module.insert(port_id, module_id); + } + + pub fn consensus_states(&self, client_id: &ClientId) -> Vec> { + self.ibc_store.lock().unwrap().clients[client_id] + .consensus_states + .iter() + .map(|(k, v)| AnyConsensusStateWithHeight { height: *k, consensus_state: v.clone() }) + .collect() + } + + pub fn latest_client_states(&self, client_id: &ClientId) -> C::AnyClientState { + self.ibc_store.lock().unwrap().clients[client_id] + .client_state + .as_ref() + .unwrap() + .clone() + } + + pub fn latest_consensus_states( + &self, + client_id: &ClientId, + height: &Height, + ) -> C::AnyConsensusState { + self.ibc_store.lock().unwrap().clients[client_id] + .consensus_states + .get(height) + .unwrap() + .clone() + } + + #[inline] + pub fn latest_height(&self) -> Height { + self.history.last().expect("history cannot be empty").height() + } + + pub fn ibc_store_share(&self) -> Arc>> { + self.ibc_store.clone() + } +} + +impl MockContext +where + C::AnyClientState: From, + C::AnyConsensusState: From, +{ + /// Associates a client record to this context. + /// Given a client id and a height, registers a new client in the context and also associates + /// to this client a mock client state and a mock consensus state for height `height`. The type + /// of this client is implicitly assumed to be Mock. + pub fn with_client(self, client_id: &ClientId, height: Height) -> Self { + self.with_client_parametrized( + client_id, + height, + Some(MockClientState::client_type()), + Some(height), + ) + } + + /// Similar to `with_client`, this function associates a client record to this context, but + /// additionally permits to parametrize two details of the client. If `client_type` is None, + /// then the client will have type Mock, otherwise the specified type. If + /// `consensus_state_height` is None, then the client will be initialized with a consensus + /// state matching the same height as the client state (`client_state_height`). + pub fn with_client_parametrized( + self, + client_id: &ClientId, + client_state_height: Height, + client_type: Option, + consensus_state_height: Option, + ) -> Self { + let cs_height = consensus_state_height.unwrap_or(client_state_height); + + let client_type = client_type.unwrap_or(MockClientState::client_type()); + let (client_state, consensus_state) = match client_type { + // If it's a mock client, create the corresponding mock states. + client_type if client_type == MockClientState::client_type() => ( + Some(MockClientState::new(MockHeader::new(client_state_height)).into()), + MockConsensusState::new(MockHeader::new(cs_height)).into(), + ), + _ => unimplemented!(), + }; + let consensus_states = vec![(cs_height, consensus_state)].into_iter().collect(); + + debug!("consensus states: {:?}", consensus_states); + + let client_record = MockClientRecord { client_type, client_state, consensus_states }; + self.ibc_store.lock().unwrap().clients.insert(client_id.clone(), client_record); + self + } } /// An object that stores all IBC related data. #[derive(Clone, Debug, Default)] -pub struct MockIbcStore { - /// The set of all clients, indexed by their id. - pub clients: BTreeMap, +pub struct MockIbcStore { + /// The set of all clients, indexed by their id. + pub clients: BTreeMap>, - /// Tracks the processed time for clients header updates - pub client_processed_times: BTreeMap<(ClientId, Height), Timestamp>, + /// Tracks the processed time for clients header updates + pub client_processed_times: BTreeMap<(ClientId, Height), Timestamp>, - /// Tracks the processed height for the clients - pub client_processed_heights: BTreeMap<(ClientId, Height), Height>, + /// Tracks the processed height for the clients + pub client_processed_heights: BTreeMap<(ClientId, Height), Height>, - /// Counter for the client identifiers, necessary for `increase_client_counter` and the - /// `client_counter` methods. - pub client_ids_counter: u64, + /// Counter for the client identifiers, necessary for `increase_client_counter` and the + /// `client_counter` methods. + pub client_ids_counter: u64, - /// Association between client ids and connection ids. - pub client_connections: BTreeMap, + /// Association between client ids and connection ids. + pub client_connections: BTreeMap, - /// All the connections in the store. - pub connections: BTreeMap, + /// All the connections in the store. + pub connections: BTreeMap, - /// Counter for connection identifiers (see `increase_connection_counter`). - pub connection_ids_counter: u64, + /// Counter for connection identifiers (see `increase_connection_counter`). + pub connection_ids_counter: u64, - /// Association between connection ids and channel ids. - pub connection_channels: BTreeMap>, + /// Association between connection ids and channel ids. + pub connection_channels: BTreeMap>, - /// Counter for channel identifiers (see `increase_channel_counter`). - pub channel_ids_counter: u64, + /// Counter for channel identifiers (see `increase_channel_counter`). + pub channel_ids_counter: u64, - /// All the channels in the store. TODO Make new key PortId X ChanneId - pub channels: BTreeMap<(PortId, ChannelId), ChannelEnd>, + /// All the channels in the store. TODO Make new key PortId X ChanneId + pub channels: BTreeMap<(PortId, ChannelId), ChannelEnd>, - /// Tracks the sequence number for the next packet to be sent. - pub next_sequence_send: BTreeMap<(PortId, ChannelId), Sequence>, + /// Tracks the sequence number for the next packet to be sent. + pub next_sequence_send: BTreeMap<(PortId, ChannelId), Sequence>, - /// Tracks the sequence number for the next packet to be received. - pub next_sequence_recv: BTreeMap<(PortId, ChannelId), Sequence>, + /// Tracks the sequence number for the next packet to be received. + pub next_sequence_recv: BTreeMap<(PortId, ChannelId), Sequence>, - /// Tracks the sequence number for the next packet to be acknowledged. - pub next_sequence_ack: BTreeMap<(PortId, ChannelId), Sequence>, + /// Tracks the sequence number for the next packet to be acknowledged. + pub next_sequence_ack: BTreeMap<(PortId, ChannelId), Sequence>, - pub packet_acknowledgement: BTreeMap<(PortId, ChannelId, Sequence), AcknowledgementCommitment>, + pub packet_acknowledgement: BTreeMap<(PortId, ChannelId, Sequence), AcknowledgementCommitment>, - /// Maps ports to the the module that owns it - pub port_to_module: BTreeMap, + /// Maps ports to the the module that owns it + pub port_to_module: BTreeMap, - /// Constant-size commitments to packets data fields - pub packet_commitment: BTreeMap<(PortId, ChannelId, Sequence), PacketCommitment>, + /// Constant-size commitments to packets data fields + pub packet_commitment: BTreeMap<(PortId, ChannelId, Sequence), PacketCommitment>, - // Used by unordered channel - pub packet_receipt: BTreeMap<(PortId, ChannelId, Sequence), Receipt>, + // Used by unordered channel + pub packet_receipt: BTreeMap<(PortId, ChannelId, Sequence), Receipt>, } #[derive(Default)] pub struct MockRouterBuilder(MockRouter); impl RouterBuilder for MockRouterBuilder { - type Router = MockRouter; - - fn add_route(mut self, module_id: ModuleId, module: impl Module) -> Result { - match self.0 .0.insert(module_id, Arc::new(module)) { - None => Ok(self), - Some(_) => Err("Duplicate module_id".to_owned()), - } - } - - fn build(self) -> Self::Router { - self.0 - } + type Router = MockRouter; + + fn add_route(mut self, module_id: ModuleId, module: impl Module) -> Result { + match self.0 .0.insert(module_id, Arc::new(module)) { + None => Ok(self), + Some(_) => Err("Duplicate module_id".to_owned()), + } + } + + fn build(self) -> Self::Router { + self.0 + } } #[derive(Clone, Default)] pub struct MockRouter(BTreeMap>); impl Debug for MockRouter { - fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { - write!(f, "{:?}", self.0.keys().collect::>()) - } + fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { + write!(f, "{:?}", self.0.keys().collect::>()) + } } impl Router for MockRouter { - fn get_route_mut(&mut self, module_id: &impl Borrow) -> Option<&mut dyn Module> { - self.0.get_mut(module_id.borrow()).and_then(Arc::get_mut) - } + fn get_route_mut(&mut self, module_id: &impl Borrow) -> Option<&mut dyn Module> { + self.0.get_mut(module_id.borrow()).and_then(Arc::get_mut) + } - fn has_route(&self, module_id: &impl Borrow) -> bool { - self.0.get(module_id.borrow()).is_some() - } + fn has_route(&self, module_id: &impl Borrow) -> bool { + self.0.get(module_id.borrow()).is_some() + } } -impl ReaderContext for MockContext {} +impl ReaderContext for MockContext {} -impl Ics26Context for MockContext { - type Router = MockRouter; +impl Ics26Context for MockContext { + type Router = MockRouter; - fn router(&self) -> &Self::Router { - &self.router - } + fn router(&self) -> &Self::Router { + &self.router + } - fn router_mut(&mut self) -> &mut Self::Router { - &mut self.router - } + fn router_mut(&mut self) -> &mut Self::Router { + &mut self.router + } } -impl PortReader for MockContext { - fn lookup_module_by_port(&self, port_id: &PortId) -> Result { - match self.ibc_store.lock().unwrap().port_to_module.get(port_id) { - Some(mod_id) => Ok(mod_id.clone()), - None => Err(Ics05Error::unknown_port(port_id.clone())), - } - } +impl PortReader for MockContext { + fn lookup_module_by_port(&self, port_id: &PortId) -> Result { + match self.ibc_store.lock().unwrap().port_to_module.get(port_id) { + Some(mod_id) => Ok(mod_id.clone()), + None => Err(Ics05Error::unknown_port(port_id.clone())), + } + } } -impl ChannelReader for MockContext { - fn channel_end(&self, pcid: &(PortId, ChannelId)) -> Result { - match self.ibc_store.lock().unwrap().channels.get(pcid) { - Some(channel_end) => Ok(channel_end.clone()), - None => Err(Ics04Error::channel_not_found(pcid.0.clone(), pcid.1)), - } - } - - fn connection_channels( - &self, - cid: &ConnectionId, - ) -> Result, Ics04Error> { - match self.ibc_store.lock().unwrap().connection_channels.get(cid) { - Some(pcid) => Ok(pcid.clone()), - None => Err(Ics04Error::missing_channel()), - } - } - - fn get_next_sequence_send( - &self, - port_channel_id: &(PortId, ChannelId), - ) -> Result { - match self - .ibc_store - .lock() - .unwrap() - .next_sequence_send - .get(port_channel_id) - { - Some(sequence) => Ok(*sequence), - None => Err(Ics04Error::missing_next_send_seq(port_channel_id.clone())), - } - } - - fn get_next_sequence_recv( - &self, - port_channel_id: &(PortId, ChannelId), - ) -> Result { - match self - .ibc_store - .lock() - .unwrap() - .next_sequence_recv - .get(port_channel_id) - { - Some(sequence) => Ok(*sequence), - None => Err(Ics04Error::missing_next_recv_seq(port_channel_id.clone())), - } - } - - fn get_next_sequence_ack( - &self, - port_channel_id: &(PortId, ChannelId), - ) -> Result { - match self - .ibc_store - .lock() - .unwrap() - .next_sequence_ack - .get(port_channel_id) - { - Some(sequence) => Ok(*sequence), - None => Err(Ics04Error::missing_next_ack_seq(port_channel_id.clone())), - } - } - - fn get_packet_commitment( - &self, - key: &(PortId, ChannelId, Sequence), - ) -> Result { - match self.ibc_store.lock().unwrap().packet_commitment.get(key) { - Some(commitment) => Ok(commitment.clone()), - None => Err(Ics04Error::packet_commitment_not_found(key.2)), - } - } - - fn get_packet_receipt( - &self, - key: &(PortId, ChannelId, Sequence), - ) -> Result { - match self.ibc_store.lock().unwrap().packet_receipt.get(key) { - Some(receipt) => Ok(receipt.clone()), - None => Err(Ics04Error::packet_receipt_not_found(key.2)), - } - } - - fn get_packet_acknowledgement( - &self, - key: &(PortId, ChannelId, Sequence), - ) -> Result { - match self - .ibc_store - .lock() - .unwrap() - .packet_acknowledgement - .get(key) - { - Some(ack) => Ok(ack.clone()), - None => Err(Ics04Error::packet_acknowledgement_not_found(key.2)), - } - } - - fn hash(&self, value: Vec) -> Vec { - sha2::Sha256::digest(value).to_vec() - } - - fn client_update_time( - &self, - client_id: &ClientId, - height: Height, - ) -> Result { - match self - .ibc_store - .lock() - .unwrap() - .client_processed_times - .get(&(client_id.clone(), height)) - { - Some(time) => Ok(*time), - None => Err(Ics04Error::processed_time_not_found( - client_id.clone(), - height, - )), - } - } - - fn client_update_height( - &self, - client_id: &ClientId, - height: Height, - ) -> Result { - match self - .ibc_store - .lock() - .unwrap() - .client_processed_heights - .get(&(client_id.clone(), height)) - { - Some(height) => Ok(*height), - None => Err(Ics04Error::processed_height_not_found( - client_id.clone(), - height, - )), - } - } - - fn channel_counter(&self) -> Result { - Ok(self.ibc_store.lock().unwrap().channel_ids_counter) - } - - fn max_expected_time_per_block(&self) -> Duration { - self.block_time - } +impl ChannelReader for MockContext { + fn channel_end(&self, pcid: &(PortId, ChannelId)) -> Result { + match self.ibc_store.lock().unwrap().channels.get(pcid) { + Some(channel_end) => Ok(channel_end.clone()), + None => Err(Ics04Error::channel_not_found(pcid.0.clone(), pcid.1)), + } + } + + fn connection_channels( + &self, + cid: &ConnectionId, + ) -> Result, Ics04Error> { + match self.ibc_store.lock().unwrap().connection_channels.get(cid) { + Some(pcid) => Ok(pcid.clone()), + None => Err(Ics04Error::missing_channel()), + } + } + + fn get_next_sequence_send( + &self, + port_channel_id: &(PortId, ChannelId), + ) -> Result { + match self.ibc_store.lock().unwrap().next_sequence_send.get(port_channel_id) { + Some(sequence) => Ok(*sequence), + None => Err(Ics04Error::missing_next_send_seq(port_channel_id.clone())), + } + } + + fn get_next_sequence_recv( + &self, + port_channel_id: &(PortId, ChannelId), + ) -> Result { + match self.ibc_store.lock().unwrap().next_sequence_recv.get(port_channel_id) { + Some(sequence) => Ok(*sequence), + None => Err(Ics04Error::missing_next_recv_seq(port_channel_id.clone())), + } + } + + fn get_next_sequence_ack( + &self, + port_channel_id: &(PortId, ChannelId), + ) -> Result { + match self.ibc_store.lock().unwrap().next_sequence_ack.get(port_channel_id) { + Some(sequence) => Ok(*sequence), + None => Err(Ics04Error::missing_next_ack_seq(port_channel_id.clone())), + } + } + + fn get_packet_commitment( + &self, + key: &(PortId, ChannelId, Sequence), + ) -> Result { + match self.ibc_store.lock().unwrap().packet_commitment.get(key) { + Some(commitment) => Ok(commitment.clone()), + None => Err(Ics04Error::packet_commitment_not_found(key.2)), + } + } + + fn get_packet_receipt( + &self, + key: &(PortId, ChannelId, Sequence), + ) -> Result { + match self.ibc_store.lock().unwrap().packet_receipt.get(key) { + Some(receipt) => Ok(receipt.clone()), + None => Err(Ics04Error::packet_receipt_not_found(key.2)), + } + } + + fn get_packet_acknowledgement( + &self, + key: &(PortId, ChannelId, Sequence), + ) -> Result { + match self.ibc_store.lock().unwrap().packet_acknowledgement.get(key) { + Some(ack) => Ok(ack.clone()), + None => Err(Ics04Error::packet_acknowledgement_not_found(key.2)), + } + } + + fn hash(&self, value: Vec) -> Vec { + sha2::Sha256::digest(value).to_vec() + } + + fn client_update_time( + &self, + client_id: &ClientId, + height: Height, + ) -> Result { + match self + .ibc_store + .lock() + .unwrap() + .client_processed_times + .get(&(client_id.clone(), height)) + { + Some(time) => Ok(*time), + None => Err(Ics04Error::processed_time_not_found(client_id.clone(), height)), + } + } + + fn client_update_height( + &self, + client_id: &ClientId, + height: Height, + ) -> Result { + match self + .ibc_store + .lock() + .unwrap() + .client_processed_heights + .get(&(client_id.clone(), height)) + { + Some(height) => Ok(*height), + None => Err(Ics04Error::processed_height_not_found(client_id.clone(), height)), + } + } + + fn channel_counter(&self) -> Result { + Ok(self.ibc_store.lock().unwrap().channel_ids_counter) + } + + fn max_expected_time_per_block(&self) -> Duration { + self.block_time + } } -impl ChannelKeeper for MockContext { - fn store_packet_commitment( - &mut self, - key: (PortId, ChannelId, Sequence), - commitment: PacketCommitment, - ) -> Result<(), Ics04Error> { - self.ibc_store - .lock() - .unwrap() - .packet_commitment - .insert(key, commitment); - Ok(()) - } - - fn store_packet_acknowledgement( - &mut self, - key: (PortId, ChannelId, Sequence), - ack_commitment: AcknowledgementCommitment, - ) -> Result<(), Ics04Error> { - self.ibc_store - .lock() - .unwrap() - .packet_acknowledgement - .insert(key, ack_commitment); - Ok(()) - } - - fn delete_packet_acknowledgement( - &mut self, - key: (PortId, ChannelId, Sequence), - ) -> Result<(), Ics04Error> { - self.ibc_store - .lock() - .unwrap() - .packet_acknowledgement - .remove(&key); - Ok(()) - } - - fn store_connection_channels( - &mut self, - cid: ConnectionId, - port_channel_id: &(PortId, ChannelId), - ) -> Result<(), Ics04Error> { - self.ibc_store - .lock() - .unwrap() - .connection_channels - .entry(cid) - .or_insert_with(Vec::new) - .push(port_channel_id.clone()); - Ok(()) - } - - fn store_channel( - &mut self, - port_channel_id: (PortId, ChannelId), - channel_end: &ChannelEnd, - ) -> Result<(), Ics04Error> { - self.ibc_store - .lock() - .unwrap() - .channels - .insert(port_channel_id, channel_end.clone()); - Ok(()) - } - - fn store_next_sequence_send( - &mut self, - port_channel_id: (PortId, ChannelId), - seq: Sequence, - ) -> Result<(), Ics04Error> { - self.ibc_store - .lock() - .unwrap() - .next_sequence_send - .insert(port_channel_id, seq); - Ok(()) - } - - fn store_next_sequence_recv( - &mut self, - port_channel_id: (PortId, ChannelId), - seq: Sequence, - ) -> Result<(), Ics04Error> { - self.ibc_store - .lock() - .unwrap() - .next_sequence_recv - .insert(port_channel_id, seq); - Ok(()) - } - - fn store_next_sequence_ack( - &mut self, - port_channel_id: (PortId, ChannelId), - seq: Sequence, - ) -> Result<(), Ics04Error> { - self.ibc_store - .lock() - .unwrap() - .next_sequence_ack - .insert(port_channel_id, seq); - Ok(()) - } - - fn increase_channel_counter(&mut self) { - self.ibc_store.lock().unwrap().channel_ids_counter += 1; - } - - fn delete_packet_commitment( - &mut self, - key: (PortId, ChannelId, Sequence), - ) -> Result<(), Ics04Error> { - self.ibc_store - .lock() - .unwrap() - .packet_commitment - .remove(&key); - Ok(()) - } - - fn store_packet_receipt( - &mut self, - key: (PortId, ChannelId, Sequence), - receipt: Receipt, - ) -> Result<(), Ics04Error> { - self.ibc_store - .lock() - .unwrap() - .packet_receipt - .insert(key, receipt); - Ok(()) - } - - fn store_send_packet( - &mut self, - _key: (PortId, ChannelId, Sequence), - _packet: crate::core::ics04_channel::packet::Packet, - ) -> Result<(), Ics04Error> { - Ok(()) - } - - fn store_recv_packet( - &mut self, - _key: (PortId, ChannelId, Sequence), - _packet: crate::core::ics04_channel::packet::Packet, - ) -> Result<(), Ics04Error> { - Ok(()) - } +impl ChannelKeeper for MockContext { + fn store_packet_commitment( + &mut self, + key: (PortId, ChannelId, Sequence), + commitment: PacketCommitment, + ) -> Result<(), Ics04Error> { + self.ibc_store.lock().unwrap().packet_commitment.insert(key, commitment); + Ok(()) + } + + fn store_packet_acknowledgement( + &mut self, + key: (PortId, ChannelId, Sequence), + ack_commitment: AcknowledgementCommitment, + ) -> Result<(), Ics04Error> { + self.ibc_store + .lock() + .unwrap() + .packet_acknowledgement + .insert(key, ack_commitment); + Ok(()) + } + + fn delete_packet_acknowledgement( + &mut self, + key: (PortId, ChannelId, Sequence), + ) -> Result<(), Ics04Error> { + self.ibc_store.lock().unwrap().packet_acknowledgement.remove(&key); + Ok(()) + } + + fn store_connection_channels( + &mut self, + cid: ConnectionId, + port_channel_id: &(PortId, ChannelId), + ) -> Result<(), Ics04Error> { + self.ibc_store + .lock() + .unwrap() + .connection_channels + .entry(cid) + .or_insert_with(Vec::new) + .push(port_channel_id.clone()); + Ok(()) + } + + fn store_channel( + &mut self, + port_channel_id: (PortId, ChannelId), + channel_end: &ChannelEnd, + ) -> Result<(), Ics04Error> { + self.ibc_store + .lock() + .unwrap() + .channels + .insert(port_channel_id, channel_end.clone()); + Ok(()) + } + + fn store_next_sequence_send( + &mut self, + port_channel_id: (PortId, ChannelId), + seq: Sequence, + ) -> Result<(), Ics04Error> { + self.ibc_store.lock().unwrap().next_sequence_send.insert(port_channel_id, seq); + Ok(()) + } + + fn store_next_sequence_recv( + &mut self, + port_channel_id: (PortId, ChannelId), + seq: Sequence, + ) -> Result<(), Ics04Error> { + self.ibc_store.lock().unwrap().next_sequence_recv.insert(port_channel_id, seq); + Ok(()) + } + + fn store_next_sequence_ack( + &mut self, + port_channel_id: (PortId, ChannelId), + seq: Sequence, + ) -> Result<(), Ics04Error> { + self.ibc_store.lock().unwrap().next_sequence_ack.insert(port_channel_id, seq); + Ok(()) + } + + fn increase_channel_counter(&mut self) { + self.ibc_store.lock().unwrap().channel_ids_counter += 1; + } + + fn delete_packet_commitment( + &mut self, + key: (PortId, ChannelId, Sequence), + ) -> Result<(), Ics04Error> { + self.ibc_store.lock().unwrap().packet_commitment.remove(&key); + Ok(()) + } + + fn store_packet_receipt( + &mut self, + key: (PortId, ChannelId, Sequence), + receipt: Receipt, + ) -> Result<(), Ics04Error> { + self.ibc_store.lock().unwrap().packet_receipt.insert(key, receipt); + Ok(()) + } + + fn store_send_packet( + &mut self, + _key: (PortId, ChannelId, Sequence), + _packet: crate::core::ics04_channel::packet::Packet, + ) -> Result<(), Ics04Error> { + Ok(()) + } + + fn store_recv_packet( + &mut self, + _key: (PortId, ChannelId, Sequence), + _packet: crate::core::ics04_channel::packet::Packet, + ) -> Result<(), Ics04Error> { + Ok(()) + } } -impl ConnectionReader for MockContext { - fn connection_end(&self, cid: &ConnectionId) -> Result { - match self.ibc_store.lock().unwrap().connections.get(cid) { - Some(connection_end) => Ok(connection_end.clone()), - None => Err(Ics03Error::connection_not_found(cid.clone())), - } - } - - fn host_oldest_height(&self) -> Height { - // history must be non-empty, so `self.history[0]` is valid - self.history[0].height() - } - - fn commitment_prefix(&self) -> CommitmentPrefix { - CommitmentPrefix::try_from(b"mock".to_vec()).unwrap() - } - - fn connection_counter(&self) -> Result { - Ok(self.ibc_store.lock().unwrap().connection_ids_counter) - } +impl ConnectionReader for MockContext { + fn connection_end(&self, cid: &ConnectionId) -> Result { + match self.ibc_store.lock().unwrap().connections.get(cid) { + Some(connection_end) => Ok(connection_end.clone()), + None => Err(Ics03Error::connection_not_found(cid.clone())), + } + } + + fn host_oldest_height(&self) -> Height { + // history must be non-empty, so `self.history[0]` is valid + self.history[0].height() + } + + fn commitment_prefix(&self) -> CommitmentPrefix { + CommitmentPrefix::try_from(b"mock".to_vec()).unwrap() + } + + fn connection_counter(&self) -> Result { + Ok(self.ibc_store.lock().unwrap().connection_ids_counter) + } } -impl ConnectionKeeper for MockContext { - fn store_connection( - &mut self, - connection_id: ConnectionId, - connection_end: &ConnectionEnd, - ) -> Result<(), Ics03Error> { - self.ibc_store - .lock() - .unwrap() - .connections - .insert(connection_id, connection_end.clone()); - Ok(()) - } - - fn store_connection_to_client( - &mut self, - connection_id: ConnectionId, - client_id: &ClientId, - ) -> Result<(), Ics03Error> { - self.ibc_store - .lock() - .unwrap() - .client_connections - .insert(client_id.clone(), connection_id); - Ok(()) - } - - fn increase_connection_counter(&mut self) { - self.ibc_store.lock().unwrap().connection_ids_counter += 1; - } +impl ConnectionKeeper for MockContext { + fn store_connection( + &mut self, + connection_id: ConnectionId, + connection_end: &ConnectionEnd, + ) -> Result<(), Ics03Error> { + self.ibc_store + .lock() + .unwrap() + .connections + .insert(connection_id, connection_end.clone()); + Ok(()) + } + + fn store_connection_to_client( + &mut self, + connection_id: ConnectionId, + client_id: &ClientId, + ) -> Result<(), Ics03Error> { + self.ibc_store + .lock() + .unwrap() + .client_connections + .insert(client_id.clone(), connection_id); + Ok(()) + } + + fn increase_connection_counter(&mut self) { + self.ibc_store.lock().unwrap().connection_ids_counter += 1; + } } -impl ClientReader for MockContext { - fn client_type(&self, client_id: &ClientId) -> Result { - match self.ibc_store.lock().unwrap().clients.get(client_id) { - Some(client_record) => Ok(client_record.client_type), - None => Err(Ics02Error::client_not_found(client_id.clone())), - } - } - - fn client_state(&self, client_id: &ClientId) -> Result { - match self.ibc_store.lock().unwrap().clients.get(client_id) { - Some(client_record) => client_record - .client_state - .clone() - .ok_or_else(|| Ics02Error::client_not_found(client_id.clone())), - None => Err(Ics02Error::client_not_found(client_id.clone())), - } - } - - fn consensus_state( - &self, - client_id: &ClientId, - height: Height, - ) -> Result { - match self.ibc_store.lock().unwrap().clients.get(client_id) { - Some(client_record) => match client_record.consensus_states.get(&height) { - Some(consensus_state) => Ok(consensus_state.clone()), - None => Err(Ics02Error::consensus_state_not_found( - client_id.clone(), - height, - )), - }, - None => Err(Ics02Error::consensus_state_not_found( - client_id.clone(), - height, - )), - } - } - - fn host_client_type(&self) -> ClientType { - ClientType::Tendermint - } - - /// Search for the lowest consensus state higher than `height`. - fn next_consensus_state( - &self, - client_id: &ClientId, - height: Height, - ) -> Result, Ics02Error> { - let ibc_store = self.ibc_store.lock().unwrap(); - let client_record = ibc_store - .clients - .get(client_id) - .ok_or_else(|| Ics02Error::client_not_found(client_id.clone()))?; - - // Get the consensus state heights and sort them in ascending order. - let mut heights: Vec = client_record.consensus_states.keys().cloned().collect(); - heights.sort(); - - // Search for next state. - for h in heights { - if h > height { - // unwrap should never happen, as the consensus state for h must exist - return Ok(Some( - client_record.consensus_states.get(&h).unwrap().clone(), - )); - } - } - Ok(None) - } - - /// Search for the highest consensus state lower than `height`. - fn prev_consensus_state( - &self, - client_id: &ClientId, - height: Height, - ) -> Result, Ics02Error> { - let ibc_store = self.ibc_store.lock().unwrap(); - let client_record = ibc_store - .clients - .get(client_id) - .ok_or_else(|| Ics02Error::client_not_found(client_id.clone()))?; - - // Get the consensus state heights and sort them in descending order. - let mut heights: Vec = client_record.consensus_states.keys().cloned().collect(); - heights.sort_by(|a, b| b.cmp(a)); - - // Search for previous state. - for h in heights { - if h < height { - // unwrap should never happen, as the consensus state for h must exist - return Ok(Some( - client_record.consensus_states.get(&h).unwrap().clone(), - )); - } - } - Ok(None) - } - - fn host_height(&self) -> Height { - self.latest_height() - } - - fn host_timestamp(&self) -> Timestamp { - if self.host_chain_type == HostType::Beefy { - (Timestamp::now() + Duration::from_secs(86400)).unwrap() - } else { - self.history - .last() - .expect("history cannot be empty") - .timestamp() - .add(self.block_time) - .unwrap() - } - } - - fn host_consensus_state( - &self, - height: Height, - _proof: Option>, - ) -> Result { - match self.host_block(height) { - Some(block_ref) => Ok(block_ref.clone().into()), - None => Err(Ics02Error::missing_local_consensus_state(height)), - } - } - - fn client_counter(&self) -> Result { - Ok(self.ibc_store.lock().unwrap().client_ids_counter) - } +impl ClientReader for MockContext { + fn client_type(&self, client_id: &ClientId) -> Result { + match self.ibc_store.lock().unwrap().clients.get(client_id) { + Some(client_record) => Ok(client_record.client_type), + None => Err(Ics02Error::client_not_found(client_id.clone())), + } + } + + fn client_state(&self, client_id: &ClientId) -> Result { + match self.ibc_store.lock().unwrap().clients.get(client_id) { + Some(client_record) => client_record + .client_state + .clone() + .ok_or_else(|| Ics02Error::client_not_found(client_id.clone())), + None => Err(Ics02Error::client_not_found(client_id.clone())), + } + } + + fn consensus_state( + &self, + client_id: &ClientId, + height: Height, + ) -> Result { + match self.ibc_store.lock().unwrap().clients.get(client_id) { + Some(client_record) => match client_record.consensus_states.get(&height) { + Some(consensus_state) => Ok(consensus_state.clone()), + None => Err(Ics02Error::consensus_state_not_found(client_id.clone(), height)), + }, + None => Err(Ics02Error::consensus_state_not_found(client_id.clone(), height)), + } + } + + /// Search for the lowest consensus state higher than `height`. + fn next_consensus_state( + &self, + client_id: &ClientId, + height: Height, + ) -> Result, Ics02Error> { + let ibc_store = self.ibc_store.lock().unwrap(); + let client_record = ibc_store + .clients + .get(client_id) + .ok_or_else(|| Ics02Error::client_not_found(client_id.clone()))?; + + // Get the consensus state heights and sort them in ascending order. + let mut heights: Vec = client_record.consensus_states.keys().cloned().collect(); + heights.sort(); + + // Search for next state. + for h in heights { + if h > height { + // unwrap should never happen, as the consensus state for h must exist + return Ok(Some(client_record.consensus_states.get(&h).unwrap().clone())) + } + } + Ok(None) + } + + /// Search for the highest consensus state lower than `height`. + fn prev_consensus_state( + &self, + client_id: &ClientId, + height: Height, + ) -> Result, Ics02Error> { + let ibc_store = self.ibc_store.lock().unwrap(); + let client_record = ibc_store + .clients + .get(client_id) + .ok_or_else(|| Ics02Error::client_not_found(client_id.clone()))?; + + // Get the consensus state heights and sort them in descending order. + let mut heights: Vec = client_record.consensus_states.keys().cloned().collect(); + heights.sort_by(|a, b| b.cmp(a)); + + // Search for previous state. + for h in heights { + if h < height { + // unwrap should never happen, as the consensus state for h must exist + return Ok(Some(client_record.consensus_states.get(&h).unwrap().clone())) + } + } + Ok(None) + } + + fn host_height(&self) -> Height { + self.latest_height() + } + + fn host_timestamp(&self) -> Timestamp { + self.history + .last() + .expect("history cannot be empty") + .timestamp() + .add(self.block_time) + .unwrap() + } + + fn host_consensus_state( + &self, + height: Height, + _proof: Option>, + ) -> Result { + match self.host_block(height) { + Some(block_ref) => Ok(block_ref.clone().into()), + None => Err(Ics02Error::missing_local_consensus_state(height)), + } + } + + fn client_counter(&self) -> Result { + Ok(self.ibc_store.lock().unwrap().client_ids_counter) + } } -impl ClientKeeper for MockContext { - fn store_client_type( - &mut self, - client_id: ClientId, - client_type: ClientType, - ) -> Result<(), Ics02Error> { - let mut ibc_store = self.ibc_store.lock().unwrap(); - let client_record = ibc_store - .clients - .entry(client_id) - .or_insert(MockClientRecord { - client_type, - consensus_states: Default::default(), - client_state: Default::default(), - }); - - client_record.client_type = client_type; - Ok(()) - } - - fn store_client_state( - &mut self, - client_id: ClientId, - client_state: AnyClientState, - ) -> Result<(), Ics02Error> { - let mut ibc_store = self.ibc_store.lock().unwrap(); - let client_record = ibc_store - .clients - .entry(client_id) - .or_insert(MockClientRecord { - client_type: client_state.client_type(), - consensus_states: Default::default(), - client_state: Default::default(), - }); - - client_record.client_state = Some(client_state); - Ok(()) - } - - fn store_consensus_state( - &mut self, - client_id: ClientId, - height: Height, - consensus_state: AnyConsensusState, - ) -> Result<(), Ics02Error> { - let mut ibc_store = self.ibc_store.lock().unwrap(); - let client_record = ibc_store - .clients - .entry(client_id) - .or_insert(MockClientRecord { - client_type: ClientType::Mock, - consensus_states: Default::default(), - client_state: Default::default(), - }); - - client_record - .consensus_states - .insert(height, consensus_state); - Ok(()) - } - - fn increase_client_counter(&mut self) { - self.ibc_store.lock().unwrap().client_ids_counter += 1 - } - - fn store_update_time( - &mut self, - client_id: ClientId, - height: Height, - timestamp: Timestamp, - ) -> Result<(), Ics02Error> { - let _ = self - .ibc_store - .lock() - .unwrap() - .client_processed_times - .insert((client_id, height), timestamp); - Ok(()) - } - - fn store_update_height( - &mut self, - client_id: ClientId, - height: Height, - host_height: Height, - ) -> Result<(), Ics02Error> { - let _ = self - .ibc_store - .lock() - .unwrap() - .client_processed_heights - .insert((client_id, height), host_height); - Ok(()) - } - - fn validate_self_client(&self, _client_state: &AnyClientState) -> Result<(), Ics02Error> { - Ok(()) - } +#[derive(Debug, Eq, PartialEq, Clone, Default)] +pub struct MockClientTypes; + +pub trait ClientTypes +where + Self: Clone + Debug + Eq, +{ + type AnyHeader: Header + TryFrom + Into + From; + type AnyClientState: ClientState + + Eq + + TryFrom + + Into; + type AnyConsensusState: ConsensusState + + Eq + + TryFrom + + Into + + From + + 'static; + type AnyMisbehaviour: Misbehaviour; + type HostFunctions: ics23::HostFunctionsProvider; + type ClientDef: ClientDef< + Header = Self::AnyHeader, + ClientState = Self::AnyClientState, + ConsensusState = Self::AnyConsensusState, + >; + type HostBlock: HostBlock + Debug + Clone; } -impl Ics18Context for MockContext { - fn query_latest_height(&self) -> Height { - self.host_height() - } - - fn query_client_full_state(&self, client_id: &ClientId) -> Option { - // Forward call to Ics2. - ClientReader::client_state(self, client_id).ok() - } - - fn query_latest_header(&self) -> Option { - let block_ref = self.host_block(self.host_height()); - block_ref.cloned().map(Into::into) - } - - fn send(&mut self, msgs: Vec) -> Result, Ics18Error> { - // Forward call to Ics26 delivery method. - let mut all_events = vec![]; - for msg in msgs { - let MsgReceipt { mut events, .. } = - deliver::<_, Crypto>(self, msg).map_err(Ics18Error::transaction_failed)?; - all_events.append(&mut events); - } - self.advance_host_chain_height(); // Advance chain height - Ok(all_events) - } - - fn signer(&self) -> Signer { - "0CDA3F47EF3C4906693B170EF650EB968C5F4B2C".parse().unwrap() - } +impl ClientTypes for MockClientTypes { + type AnyHeader = AnyHeader; + type AnyClientState = AnyClientState; + type AnyConsensusState = AnyConsensusState; + type AnyMisbehaviour = AnyMisbehaviour; + type HostFunctions = Crypto; + type ClientDef = AnyClient; + type HostBlock = MockHostBlock; +} + +impl ClientKeeper for MockContext { + type AnyHeader = C::AnyHeader; + type AnyClientState = C::AnyClientState; + type AnyConsensusState = C::AnyConsensusState; + type AnyMisbehaviour = C::AnyMisbehaviour; + type ClientDef = C::ClientDef; + + fn store_client_type( + &mut self, + client_id: ClientId, + client_type: ClientType, + ) -> Result<(), Ics02Error> { + let mut ibc_store = self.ibc_store.lock().unwrap(); + let client_record = ibc_store.clients.entry(client_id).or_insert(MockClientRecord { + client_type, + consensus_states: Default::default(), + client_state: Default::default(), + }); + + client_record.client_type = client_type; + Ok(()) + } + + fn store_client_state( + &mut self, + client_id: ClientId, + client_state: C::AnyClientState, + ) -> Result<(), Ics02Error> { + let mut ibc_store = self.ibc_store.lock().unwrap(); + let client_record = ibc_store.clients.entry(client_id).or_insert(MockClientRecord { + client_type: client_state.client_type(), + consensus_states: Default::default(), + client_state: Default::default(), + }); + + client_record.client_state = Some(client_state); + Ok(()) + } + + fn store_consensus_state( + &mut self, + client_id: ClientId, + height: Height, + consensus_state: C::AnyConsensusState, + ) -> Result<(), Ics02Error> { + let mut ibc_store = self.ibc_store.lock().unwrap(); + let client_record = ibc_store.clients.entry(client_id).or_insert(MockClientRecord { + client_type: MockClientState::client_type(), + consensus_states: Default::default(), + client_state: Default::default(), + }); + + client_record.consensus_states.insert(height, consensus_state); + Ok(()) + } + + fn increase_client_counter(&mut self) { + self.ibc_store.lock().unwrap().client_ids_counter += 1 + } + + fn store_update_time( + &mut self, + client_id: ClientId, + height: Height, + timestamp: Timestamp, + ) -> Result<(), Ics02Error> { + let _ = self + .ibc_store + .lock() + .unwrap() + .client_processed_times + .insert((client_id, height), timestamp); + Ok(()) + } + + fn store_update_height( + &mut self, + client_id: ClientId, + height: Height, + host_height: Height, + ) -> Result<(), Ics02Error> { + let _ = self + .ibc_store + .lock() + .unwrap() + .client_processed_heights + .insert((client_id, height), host_height); + Ok(()) + } + + fn validate_self_client(&self, _client_state: &C::AnyClientState) -> Result<(), Ics02Error> { + Ok(()) + } } #[cfg(test)] mod tests { - use test_log::test; - - use alloc::str::FromStr; - - use crate::core::ics04_channel::channel::{Counterparty, Order}; - use crate::core::ics04_channel::error::Error; - use crate::core::ics04_channel::packet::Packet; - use crate::core::ics04_channel::Version; - use crate::core::ics24_host::identifier::ChainId; - use crate::core::ics24_host::identifier::{ChannelId, ConnectionId, PortId}; - use crate::core::ics26_routing::context::{ - Acknowledgement, Module, ModuleId, ModuleOutputBuilder, OnRecvPacketAck, Router, - RouterBuilder, - }; - use crate::mock::context::MockContext; - use crate::mock::context::MockRouterBuilder; - use crate::mock::host::HostType; - use crate::prelude::*; - use crate::signer::Signer; - use crate::test_utils::get_dummy_bech32_account; - use crate::Height; - - #[test] - fn test_history_manipulation() { - pub struct Test { - name: String, - ctx: MockContext, - } - let cv = 1; // The version to use for all chains. - - let tests: Vec = vec![ - Test { - name: "Empty history, small pruning window".to_string(), - ctx: MockContext::new( - ChainId::new("mockgaia".to_string(), cv), - HostType::Mock, - 2, - Height::new(cv, 1), - ), - }, - Test { - name: "[Synthetic TM host] Empty history, small pruning window".to_string(), - ctx: MockContext::new( - ChainId::new("mocksgaia".to_string(), cv), - HostType::SyntheticTendermint, - 2, - Height::new(cv, 1), - ), - }, - Test { - name: "Large pruning window".to_string(), - ctx: MockContext::new( - ChainId::new("mockgaia".to_string(), cv), - HostType::Mock, - 30, - Height::new(cv, 2), - ), - }, - Test { - name: "[Synthetic TM host] Large pruning window".to_string(), - ctx: MockContext::new( - ChainId::new("mocksgaia".to_string(), cv), - HostType::SyntheticTendermint, - 30, - Height::new(cv, 2), - ), - }, - Test { - name: "Small pruning window".to_string(), - ctx: MockContext::new( - ChainId::new("mockgaia".to_string(), cv), - HostType::Mock, - 3, - Height::new(cv, 30), - ), - }, - Test { - name: "[Synthetic TM host] Small pruning window".to_string(), - ctx: MockContext::new( - ChainId::new("mockgaia".to_string(), cv), - HostType::SyntheticTendermint, - 3, - Height::new(cv, 30), - ), - }, - Test { - name: "Small pruning window, small starting height".to_string(), - ctx: MockContext::new( - ChainId::new("mockgaia".to_string(), cv), - HostType::Mock, - 3, - Height::new(cv, 2), - ), - }, - Test { - name: "[Synthetic TM host] Small pruning window, small starting height".to_string(), - ctx: MockContext::new( - ChainId::new("mockgaia".to_string(), cv), - HostType::SyntheticTendermint, - 3, - Height::new(cv, 2), - ), - }, - Test { - name: "Large pruning window, large starting height".to_string(), - ctx: MockContext::new( - ChainId::new("mockgaia".to_string(), cv), - HostType::Mock, - 50, - Height::new(cv, 2000), - ), - }, - Test { - name: "[Synthetic TM host] Large pruning window, large starting height".to_string(), - ctx: MockContext::new( - ChainId::new("mockgaia".to_string(), cv), - HostType::SyntheticTendermint, - 50, - Height::new(cv, 2000), - ), - }, - ]; - - for mut test in tests { - // All tests should yield a valid context after initialization. - assert!( - test.ctx.validate().is_ok(), - "failed in test {} while validating context {:?}", - test.name, - test.ctx - ); - - let current_height = test.ctx.latest_height(); - - // After advancing the chain's height, the context should still be valid. - test.ctx.advance_host_chain_height(); - assert!( - test.ctx.validate().is_ok(), - "failed in test {} while validating context {:?}", - test.name, - test.ctx - ); - - let next_height = current_height.increment(); - assert_eq!( - test.ctx.latest_height(), - next_height, - "failed while increasing height for context {:?}", - test.ctx - ); - if current_height > Height::new(cv, 0) { - assert_eq!( - test.ctx.host_block(current_height).unwrap().height(), - current_height, - "failed while fetching height {:?} of context {:?}", - current_height, - test.ctx - ); - } - } - } - - #[test] - fn test_router() { - #[derive(Default)] - struct MockAck(Vec); - - impl AsRef<[u8]> for MockAck { - fn as_ref(&self) -> &[u8] { - self.0.as_slice() - } - } - - impl Acknowledgement for MockAck {} - - #[derive(Debug, Default)] - struct FooModule { - counter: usize, - } - - impl Module for FooModule { - fn on_chan_open_try( - &mut self, - _output: &mut ModuleOutputBuilder, - _order: Order, - _connection_hops: &[ConnectionId], - _port_id: &PortId, - _channel_id: &ChannelId, - _counterparty: &Counterparty, - _version: &Version, - counterparty_version: &Version, - ) -> Result { - Ok(counterparty_version.clone()) - } - - fn on_recv_packet( - &self, - _output: &mut ModuleOutputBuilder, - _packet: &Packet, - _relayer: &Signer, - ) -> OnRecvPacketAck { - OnRecvPacketAck::Successful( - Box::new(MockAck::default()), - Box::new(|module| { - let module = module.downcast_mut::().unwrap(); - module.counter += 1; - Ok(()) - }), - ) - } - } - - #[derive(Debug, Default)] - struct BarModule; - - impl Module for BarModule { - fn on_chan_open_try( - &mut self, - _output: &mut ModuleOutputBuilder, - _order: Order, - _connection_hops: &[ConnectionId], - _port_id: &PortId, - _channel_id: &ChannelId, - _counterparty: &Counterparty, - _version: &Version, - counterparty_version: &Version, - ) -> Result { - Ok(counterparty_version.clone()) - } - } - - let r = MockRouterBuilder::default() - .add_route("foomodule".parse().unwrap(), FooModule::default()) - .unwrap() - .add_route("barmodule".parse().unwrap(), BarModule::default()) - .unwrap() - .build(); - - let mut ctx = MockContext::new( - ChainId::new("mockgaia".to_string(), 1), - HostType::Mock, - 1, - Height::new(1, 1), - ) - .with_router(r); - - let mut on_recv_packet_result = |module_id: &'static str| { - let module_id = ModuleId::from_str(module_id).unwrap(); - let m = ctx.router.get_route_mut(&module_id).unwrap(); - let result = m.on_recv_packet( - &mut ModuleOutputBuilder::new(), - &Packet::default(), - &get_dummy_bech32_account().parse().unwrap(), - ); - (module_id, result) - }; - - let results = vec![ - on_recv_packet_result("foomodule"), - on_recv_packet_result("barmodule"), - ]; - results - .into_iter() - .filter_map(|(mid, result)| match result { - OnRecvPacketAck::Nil(write_fn) | OnRecvPacketAck::Successful(_, write_fn) => { - Some((mid, write_fn)) - } - _ => None, - }) - .for_each(|(mid, write_fn)| { - write_fn(ctx.router.get_route_mut(&mid).unwrap().as_any_mut()).unwrap() - }); - } + use test_log::test; + + use alloc::str::FromStr; + + use crate::{ + core::{ + ics04_channel::{ + channel::{Counterparty, Order}, + error::Error, + packet::Packet, + Version, + }, + ics24_host::identifier::{ChainId, ChannelId, ConnectionId, PortId}, + ics26_routing::context::{ + Acknowledgement, Module, ModuleId, ModuleOutputBuilder, OnRecvPacketAck, Router, + RouterBuilder, + }, + }, + mock::{ + context::{MockClientTypes, MockContext, MockRouterBuilder}, + host::{HostBlock, MockHostType}, + }, + prelude::*, + signer::Signer, + test_utils::get_dummy_bech32_account, + Height, + }; + + #[test] + fn test_history_manipulation() { + pub struct Test { + name: String, + ctx: MockContext, + } + let cv = 1; // The version to use for all chains. + + let tests: Vec = vec![ + Test { + name: "Empty history, small pruning window".to_string(), + ctx: MockContext::new( + ChainId::new("mockgaia".to_string(), cv), + MockHostType::Mock, + 2, + Height::new(cv, 1), + ), + }, + Test { + name: "Large pruning window".to_string(), + ctx: MockContext::new( + ChainId::new("mockgaia".to_string(), cv), + MockHostType::Mock, + 30, + Height::new(cv, 2), + ), + }, + Test { + name: "Small pruning window".to_string(), + ctx: MockContext::new( + ChainId::new("mockgaia".to_string(), cv), + MockHostType::Mock, + 3, + Height::new(cv, 30), + ), + }, + Test { + name: "Small pruning window, small starting height".to_string(), + ctx: MockContext::new( + ChainId::new("mockgaia".to_string(), cv), + MockHostType::Mock, + 3, + Height::new(cv, 2), + ), + }, + Test { + name: "Large pruning window, large starting height".to_string(), + ctx: MockContext::new( + ChainId::new("mockgaia".to_string(), cv), + MockHostType::Mock, + 50, + Height::new(cv, 2000), + ), + }, + ]; + + for mut test in tests { + // All tests should yield a valid context after initialization. + assert!( + test.ctx.validate().is_ok(), + "failed in test {} while validating context {:?}", + test.name, + test.ctx + ); + + let current_height = test.ctx.latest_height(); + + // After advancing the chain's height, the context should still be valid. + test.ctx.advance_host_chain_height(); + assert!( + test.ctx.validate().is_ok(), + "failed in test {} while validating context {:?}", + test.name, + test.ctx + ); + + let next_height = current_height.increment(); + assert_eq!( + test.ctx.latest_height(), + next_height, + "failed while increasing height for context {:?}", + test.ctx + ); + if current_height > Height::new(cv, 0) { + assert_eq!( + test.ctx.host_block(current_height).unwrap().height(), + current_height, + "failed while fetching height {:?} of context {:?}", + current_height, + test.ctx + ); + } + } + } + + #[test] + fn test_router() { + #[derive(Default)] + struct MockAck(Vec); + + impl AsRef<[u8]> for MockAck { + fn as_ref(&self) -> &[u8] { + self.0.as_slice() + } + } + + impl Acknowledgement for MockAck {} + + #[derive(Debug, Default)] + struct FooModule { + counter: usize, + } + + impl Module for FooModule { + fn on_chan_open_try( + &mut self, + _output: &mut ModuleOutputBuilder, + _order: Order, + _connection_hops: &[ConnectionId], + _port_id: &PortId, + _channel_id: &ChannelId, + _counterparty: &Counterparty, + _version: &Version, + counterparty_version: &Version, + ) -> Result { + Ok(counterparty_version.clone()) + } + + fn on_recv_packet( + &self, + _output: &mut ModuleOutputBuilder, + _packet: &Packet, + _relayer: &Signer, + ) -> OnRecvPacketAck { + OnRecvPacketAck::Successful( + Box::new(MockAck::default()), + Box::new(|module| { + let module = module.downcast_mut::().unwrap(); + module.counter += 1; + Ok(()) + }), + ) + } + } + + #[derive(Debug, Default)] + struct BarModule; + + impl Module for BarModule { + fn on_chan_open_try( + &mut self, + _output: &mut ModuleOutputBuilder, + _order: Order, + _connection_hops: &[ConnectionId], + _port_id: &PortId, + _channel_id: &ChannelId, + _counterparty: &Counterparty, + _version: &Version, + counterparty_version: &Version, + ) -> Result { + Ok(counterparty_version.clone()) + } + } + + let r = MockRouterBuilder::default() + .add_route("foomodule".parse().unwrap(), FooModule::default()) + .unwrap() + .add_route("barmodule".parse().unwrap(), BarModule::default()) + .unwrap() + .build(); + + let mut ctx = MockContext::::new( + ChainId::new("mockgaia".to_string(), 1), + MockHostType::Mock, + 1, + Height::new(1, 1), + ) + .with_router(r); + + let mut on_recv_packet_result = |module_id: &'static str| { + let module_id = ModuleId::from_str(module_id).unwrap(); + let m = ctx.router.get_route_mut(&module_id).unwrap(); + let result = m.on_recv_packet( + &mut ModuleOutputBuilder::new(), + &Packet::default(), + &get_dummy_bech32_account().parse().unwrap(), + ); + (module_id, result) + }; + + let results = vec![on_recv_packet_result("foomodule"), on_recv_packet_result("barmodule")]; + results + .into_iter() + .filter_map(|(mid, result)| match result { + OnRecvPacketAck::Nil(write_fn) | OnRecvPacketAck::Successful(_, write_fn) => + Some((mid, write_fn)), + _ => None, + }) + .for_each(|(mid, write_fn)| { + write_fn(ctx.router.get_route_mut(&mid).unwrap().as_any_mut()).unwrap() + }); + } +} + +#[cfg(test)] +impl Default for ClientId { + fn default() -> Self { + Self::new("07-tendermint", 0).unwrap() + } +} + +#[cfg(test)] +impl Default for Attributes { + fn default() -> Self { + Attributes { + height: Height::default(), + client_id: Default::default(), + client_type: "07-tendermint".to_owned(), + consensus_height: Height::default(), + } + } } diff --git a/modules/src/mock/header.rs b/modules/src/mock/header.rs index 4a0df64298..033bd0da97 100644 --- a/modules/src/mock/header.rs +++ b/modules/src/mock/header.rs @@ -3,103 +3,108 @@ use tendermint_proto::Protobuf; use ibc_proto::ibc::mock::Header as RawMockHeader; -use crate::core::ics02_client::client_consensus::AnyConsensusState; -use crate::core::ics02_client::client_type::ClientType; -use crate::core::ics02_client::error::Error; -use crate::core::ics02_client::header::AnyHeader; -use crate::core::ics02_client::header::Header; -use crate::mock::client_state::MockConsensusState; -use crate::timestamp::Timestamp; -use crate::Height; +use crate::{ + core::ics02_client::{error::Error, header::Header}, + mock::client_state::{AnyConsensusState, MockConsensusState}, + timestamp::Timestamp, + Height, +}; +use ibc_proto::google::protobuf::Any; + +pub const MOCK_HEADER_TYPE_URL: &str = "/ibc.mock.Header"; + +#[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize, Header, Protobuf)] +#[allow(clippy::large_enum_variant)] +pub enum AnyHeader { + #[ibc(proto_url = "MOCK_HEADER_TYPE_URL")] + Mock(MockHeader), +} #[derive(Copy, Clone, Default, Debug, Deserialize, PartialEq, Eq, Serialize)] pub struct MockHeader { - pub height: Height, - pub timestamp: Timestamp, + pub height: Height, + pub timestamp: Timestamp, } impl Protobuf for MockHeader {} impl TryFrom for MockHeader { - type Error = Error; + type Error = Error; - fn try_from(raw: RawMockHeader) -> Result { - Ok(MockHeader { - height: raw.height.ok_or_else(Error::missing_raw_header)?.into(), + fn try_from(raw: RawMockHeader) -> Result { + Ok(MockHeader { + height: raw.height.ok_or_else(Error::missing_raw_header)?.into(), - timestamp: Timestamp::from_nanoseconds(raw.timestamp) - .map_err(Error::invalid_packet_timestamp)?, - }) - } + timestamp: Timestamp::from_nanoseconds(raw.timestamp) + .map_err(Error::invalid_packet_timestamp)?, + }) + } } impl From for RawMockHeader { - fn from(value: MockHeader) -> Self { - RawMockHeader { - height: Some(value.height.into()), - timestamp: value.timestamp.nanoseconds(), - } - } + fn from(value: MockHeader) -> Self { + RawMockHeader { + height: Some(value.height.into()), + timestamp: value.timestamp.nanoseconds(), + } + } } impl MockHeader { - pub fn height(&self) -> Height { - self.height - } - - pub fn new(height: Height) -> Self { - Self { - height, - timestamp: Timestamp::now(), - } - } - - pub fn timestamp(&self) -> Timestamp { - self.timestamp - } - - pub fn with_timestamp(self, timestamp: Timestamp) -> Self { - Self { timestamp, ..self } - } + pub fn height(&self) -> Height { + self.height + } + + pub fn new(height: Height) -> Self { + Self { height, timestamp: Timestamp::now() } + } + + pub fn timestamp(&self) -> Timestamp { + self.timestamp + } + + pub fn with_timestamp(self, timestamp: Timestamp) -> Self { + Self { timestamp, ..self } + } } impl From for AnyHeader { - fn from(mh: MockHeader) -> Self { - Self::Mock(mh) - } + fn from(mh: MockHeader) -> Self { + Self::Mock(mh) + } } impl Header for MockHeader { - fn client_type(&self) -> ClientType { - ClientType::Mock - } + fn height(&self) -> Height { + self.height() + } - fn wrap_any(self) -> AnyHeader { - AnyHeader::Mock(self) - } + fn encode_to_vec(&self) -> Vec { + self.encode_vec() + } } impl From for AnyConsensusState { - fn from(h: MockHeader) -> Self { - AnyConsensusState::Mock(MockConsensusState::new(h)) - } + fn from(h: MockHeader) -> Self { + AnyConsensusState::Mock(MockConsensusState::new(h)) + } } #[cfg(test)] mod tests { - use super::*; - - #[test] - fn encode_any() { - let header = MockHeader::new(Height::new(1, 10)).with_timestamp(Timestamp::none()); - let bytes = header.wrap_any().encode_vec(); - - assert_eq!( - &bytes, - &[ - 10, 16, 47, 105, 98, 99, 46, 109, 111, 99, 107, 46, 72, 101, 97, 100, 101, 114, 18, - 6, 10, 4, 8, 1, 16, 10 - ] - ); - } + use super::*; + + #[test] + fn encode_any() { + let header = MockHeader::new(Height::new(1, 10)).with_timestamp(Timestamp::none()); + let bytes = AnyHeader::Mock(header).encode_vec(); + + assert_eq!( + &bytes, + &[ + 10, 16, 47, 105, 98, 99, 46, 109, 111, 99, 107, 46, 72, 101, 97, 100, 101, 114, 18, + 6, 10, 4, 8, 1, 16, 10 + ] + ); + } } diff --git a/modules/src/mock/host.rs b/modules/src/mock/host.rs index 7a3e974af4..b49efe1b6e 100644 --- a/modules/src/mock/host.rs +++ b/modules/src/mock/host.rs @@ -1,17 +1,16 @@ //! Host chain types and methods, used by context mock. -use tendermint_testgen::light_block::TmLightBlock; -use tendermint_testgen::{Generator, LightBlock as TestgenLightBlock}; - -use crate::clients::ics07_tendermint::consensus_state::ConsensusState as TMConsensusState; -use crate::clients::ics07_tendermint::header::Header as TMHeader; -use crate::core::ics02_client::client_consensus::AnyConsensusState; -use crate::core::ics02_client::header::AnyHeader; -use crate::core::ics24_host::identifier::ChainId; -use crate::mock::header::MockHeader; -use crate::prelude::*; -use crate::timestamp::Timestamp; -use crate::Height; +use crate::{ + core::ics24_host::identifier::ChainId, + mock::{ + client_state::AnyConsensusState, + header::{AnyHeader, MockHeader}, + }, + prelude::*, + timestamp::Timestamp, + Height, +}; +use core::fmt::Debug; /// Defines the different types of host chains that a mock context can emulate. /// The variants are as follows: @@ -19,107 +18,81 @@ use crate::Height; /// - `SyntheticTendermint`: the context has synthetically-generated Tendermint (light) blocks. /// See also the `HostBlock` enum to get more insights into the underlying block type. #[derive(Clone, Debug, PartialEq, Eq, Copy)] -pub enum HostType { - Mock, - SyntheticTendermint, - Beefy, +pub enum MockHostType { + Mock, +} + +impl Default for MockHostType { + fn default() -> Self { + MockHostType::Mock + } } /// Depending on `HostType` (the type of host chain underlying a context mock), this enum defines /// the type of blocks composing the history of the host chain. #[derive(Clone, Debug)] -pub enum HostBlock { - Mock(MockHeader), - SyntheticTendermint(Box), +pub enum MockHostBlock { + Mock(MockHeader), } -impl HostBlock { - /// Returns the height of a block. - pub fn height(&self) -> Height { - match self { - HostBlock::Mock(header) => header.height(), - HostBlock::SyntheticTendermint(light_block) => Height::new( - ChainId::chain_version(light_block.signed_header.header.chain_id.as_str()), - light_block.signed_header.header.height.value(), - ), - } - } +pub trait HostBlock { + type HostType: Debug + Default + Copy; - /// Returns the timestamp of a block. - pub fn timestamp(&self) -> Timestamp { - match self { - HostBlock::Mock(header) => header.timestamp, - HostBlock::SyntheticTendermint(light_block) => { - light_block.signed_header.header.time.into() - } - } - } + fn height(&self) -> Height; + fn timestamp(&self) -> Timestamp; + fn generate_block( + chain_id: ChainId, + chain_type: Self::HostType, + height: u64, + timestamp: Timestamp, + ) -> Self; +} - /// Generates a new block at `height` for the given chain identifier and chain type. - pub fn generate_block( - chain_id: ChainId, - chain_type: HostType, - height: u64, - timestamp: Timestamp, - ) -> HostBlock { - match chain_type { - HostType::Mock | HostType::Beefy => HostBlock::Mock(MockHeader { - height: Height::new(chain_id.version(), height), - timestamp, - }), - HostType::SyntheticTendermint => HostBlock::SyntheticTendermint(Box::new( - Self::generate_tm_block(chain_id, height, timestamp), - )), - } - } +impl HostBlock for MockHostBlock { + type HostType = MockHostType; - pub fn generate_tm_block(chain_id: ChainId, height: u64, timestamp: Timestamp) -> TmLightBlock { - TestgenLightBlock::new_default_with_time_and_chain_id( - chain_id.to_string(), - timestamp.into_tm_time().unwrap(), - height, - ) - .generate() - .unwrap() - } -} + /// Returns the height of a block. + fn height(&self) -> Height { + match self { + MockHostBlock::Mock(header) => header.height(), + } + } -impl From for AnyConsensusState { - fn from(light_block: TmLightBlock) -> Self { - let cs = TMConsensusState::from(light_block.signed_header.header); - AnyConsensusState::Tendermint(cs) - } -} + /// Returns the timestamp of a block. + fn timestamp(&self) -> Timestamp { + match self { + MockHostBlock::Mock(header) => header.timestamp, + } + } -impl From for AnyConsensusState { - fn from(any_block: HostBlock) -> Self { - match any_block { - HostBlock::Mock(mock_header) => mock_header.into(), - HostBlock::SyntheticTendermint(light_block) => (*light_block).into(), - } - } + /// Generates a new block at `height` for the given chain identifier and chain type. + fn generate_block( + chain_id: ChainId, + chain_type: Self::HostType, + height: u64, + timestamp: Timestamp, + ) -> MockHostBlock { + match chain_type { + MockHostType::Mock => MockHostBlock::Mock(MockHeader { + height: Height::new(chain_id.version(), height), + timestamp, + }), + } + } } -impl From for AnyHeader { - fn from(any_block: HostBlock) -> Self { - match any_block { - HostBlock::Mock(mock_header) => mock_header.into(), - HostBlock::SyntheticTendermint(light_block_box) => { - // Conversion from TMLightBlock to AnyHeader - AnyHeader::Tendermint((*light_block_box).into()) - } - } - } +impl From for AnyConsensusState { + fn from(any_block: MockHostBlock) -> Self { + match any_block { + MockHostBlock::Mock(mock_header) => mock_header.into(), + } + } } -impl From for TMHeader { - fn from(light_block: TmLightBlock) -> Self { - // TODO: This conversion is incorrect for `trusted_height` and `trusted_validator_set`. - TMHeader { - signed_header: light_block.signed_header, - validator_set: light_block.validators, - trusted_height: Default::default(), - trusted_validator_set: light_block.next_validators, - } - } +impl From for AnyHeader { + fn from(any_block: MockHostBlock) -> Self { + match any_block { + MockHostBlock::Mock(mock_header) => mock_header.into(), + } + } } diff --git a/modules/src/mock/misbehaviour.rs b/modules/src/mock/misbehaviour.rs index 1b4d015178..8c177793a2 100644 --- a/modules/src/mock/misbehaviour.rs +++ b/modules/src/mock/misbehaviour.rs @@ -4,59 +4,66 @@ use tendermint_proto::Protobuf; use ibc_proto::ibc::mock::Misbehaviour as RawMisbehaviour; -use crate::core::ics02_client::error::Error; -use crate::core::ics02_client::misbehaviour::AnyMisbehaviour; -use crate::core::ics24_host::identifier::ClientId; -use crate::mock::header::MockHeader; -use crate::Height; +use crate::{ + core::{ + ics02_client::{error::Error, misbehaviour::Misbehaviour}, + ics24_host::identifier::ClientId, + }, + mock::header::MockHeader, + Height, +}; +use ibc_proto::google::protobuf::Any; + +pub const MOCK_MISBEHAVIOUR_TYPE_URL: &str = "/ibc.mock.Misbehavior"; + +#[derive(Clone, Debug, PartialEq, Misbehaviour, Protobuf)] +#[allow(clippy::large_enum_variant)] +pub enum AnyMisbehaviour { + #[ibc(proto_url = "MOCK_MISBEHAVIOUR_TYPE_URL")] + Mock(MockMisbehaviour), +} #[derive(Clone, Debug, PartialEq)] -pub struct Misbehaviour { - pub client_id: ClientId, - pub header1: MockHeader, - pub header2: MockHeader, +pub struct MockMisbehaviour { + pub client_id: ClientId, + pub header1: MockHeader, + pub header2: MockHeader, } -impl crate::core::ics02_client::misbehaviour::Misbehaviour for Misbehaviour { - fn client_id(&self) -> &ClientId { - &self.client_id - } +impl Misbehaviour for MockMisbehaviour { + fn client_id(&self) -> &ClientId { + &self.client_id + } - fn height(&self) -> Height { - self.header1.height() - } + fn height(&self) -> Height { + self.header1.height() + } - fn wrap_any(self) -> AnyMisbehaviour { - AnyMisbehaviour::Mock(self) - } + fn encode_to_vec(&self) -> Vec { + self.encode_vec() + } } -impl Protobuf for Misbehaviour {} - -impl TryFrom for Misbehaviour { - type Error = Error; - - fn try_from(raw: RawMisbehaviour) -> Result { - Ok(Self { - client_id: Default::default(), - header1: raw - .header1 - .ok_or_else(Error::missing_raw_misbehaviour)? - .try_into()?, - header2: raw - .header2 - .ok_or_else(Error::missing_raw_misbehaviour)? - .try_into()?, - }) - } +impl Protobuf for MockMisbehaviour {} + +impl TryFrom for MockMisbehaviour { + type Error = Error; + + fn try_from(raw: RawMisbehaviour) -> Result { + Ok(Self { + client_id: Default::default(), + header1: raw.header1.ok_or_else(Error::missing_raw_misbehaviour)?.try_into()?, + header2: raw.header2.ok_or_else(Error::missing_raw_misbehaviour)?.try_into()?, + }) + } } -impl From for RawMisbehaviour { - fn from(value: Misbehaviour) -> Self { - RawMisbehaviour { - client_id: value.client_id.to_string(), - header1: Some(value.header1.into()), - header2: Some(value.header2.into()), - } - } +impl From for RawMisbehaviour { + fn from(value: MockMisbehaviour) -> Self { + RawMisbehaviour { + client_id: value.client_id.to_string(), + header1: Some(value.header1.into()), + header2: Some(value.header2.into()), + } + } } diff --git a/modules/src/prelude.rs b/modules/src/prelude.rs index bdce26208e..a973303b0d 100644 --- a/modules/src/prelude.rs +++ b/modules/src/prelude.rs @@ -2,14 +2,17 @@ pub use core::prelude::v1::*; // Re-export according to alloc::prelude::v1 because it is not yet stabilized // https://doc.rust-lang.org/src/alloc/prelude/v1.rs.html -pub use alloc::borrow::ToOwned; -pub use alloc::boxed::Box; -pub use alloc::string::{String, ToString}; -pub use alloc::vec::Vec; +pub use alloc::{ + borrow::ToOwned, + boxed::Box, + string::{String, ToString}, + vec::Vec, +}; -pub use alloc::format; -pub use alloc::vec; +pub use alloc::{format, vec}; // Those are exported by default in the std prelude in Rust 2021 -pub use core::convert::{TryFrom, TryInto}; -pub use core::iter::FromIterator; +pub use core::{ + convert::{TryFrom, TryInto}, + iter::FromIterator, +}; diff --git a/modules/src/proofs.rs b/modules/src/proofs.rs index 30e84aa726..b8d285b9a1 100644 --- a/modules/src/proofs.rs +++ b/modules/src/proofs.rs @@ -1,17 +1,16 @@ use serde::Serialize; -use crate::core::ics23_commitment::commitment::CommitmentProofBytes; -use crate::Height; +use crate::{core::ics23_commitment::commitment::CommitmentProofBytes, Height}; use flex_error::define_error; define_error! { - #[derive(Debug, PartialEq, Eq)] - ProofError { - ZeroHeight - | _ | { format_args!("proof height cannot be zero") }, - EmptyProof - | _ | { format_args!("proof cannot be empty") }, - } + #[derive(Debug, PartialEq, Eq)] + ProofError { + ZeroHeight + | _ | { format_args!("proof height cannot be zero") }, + EmptyProof + | _ | { format_args!("proof cannot be empty") }, + } } /// Structure comprising proofs in a message. Proofs are typically present in messages for @@ -19,93 +18,84 @@ define_error! { /// handshake, as well as for ICS4 packets, timeouts, and acknowledgements. #[derive(Clone, Debug, PartialEq, Eq, Serialize)] pub struct Proofs { - object_proof: CommitmentProofBytes, - client_proof: Option, - consensus_proof: Option, - /// Currently used for proof_close for MsgTimeoutOnCLose where object_proof is proof_unreceived - other_proof: Option, - /// Height for the commitment root for proving the proofs above. - /// When creating these proofs, the chain is queried at `height-1`. - height: Height, + object_proof: CommitmentProofBytes, + client_proof: Option, + consensus_proof: Option, + /// Currently used for proof_close for MsgTimeoutOnCLose where object_proof is proof_unreceived + other_proof: Option, + /// Height for the commitment root for proving the proofs above. + /// When creating these proofs, the chain is queried at `height-1`. + height: Height, } impl Proofs { - pub fn new( - object_proof: CommitmentProofBytes, - client_proof: Option, - consensus_proof: Option, - other_proof: Option, - height: Height, - ) -> Result { - if height.is_zero() { - return Err(ProofError::zero_height()); - } + pub fn new( + object_proof: CommitmentProofBytes, + client_proof: Option, + consensus_proof: Option, + other_proof: Option, + height: Height, + ) -> Result { + if height.is_zero() { + return Err(ProofError::zero_height()) + } - Ok(Self { - object_proof, - client_proof, - consensus_proof, - other_proof, - height, - }) - } + Ok(Self { object_proof, client_proof, consensus_proof, other_proof, height }) + } - /// Getter for the consensus_proof field of this proof. Intuitively, this is a proof that a - /// client on the source chain stores a consensus state for the destination chain. - pub fn consensus_proof(&self) -> Option { - self.consensus_proof.clone() - } + /// Getter for the consensus_proof field of this proof. Intuitively, this is a proof that a + /// client on the source chain stores a consensus state for the destination chain. + pub fn consensus_proof(&self) -> Option { + self.consensus_proof.clone() + } - /// Getter for the height field of this proof (i.e., the consensus height where this proof was - /// created). - pub fn height(&self) -> Height { - self.height - } + /// Getter for the height field of this proof (i.e., the consensus height where this proof was + /// created). + pub fn height(&self) -> Height { + self.height + } - /// Getter for the object-specific proof (e.g., proof for connection state or channel state). - pub fn object_proof(&self) -> &CommitmentProofBytes { - &self.object_proof - } + /// Getter for the object-specific proof (e.g., proof for connection state or channel state). + pub fn object_proof(&self) -> &CommitmentProofBytes { + &self.object_proof + } - /// Getter for the client_proof. - pub fn client_proof(&self) -> &Option { - &self.client_proof - } + /// Getter for the client_proof. + pub fn client_proof(&self) -> &Option { + &self.client_proof + } - /// Getter for the other_proof. - pub fn other_proof(&self) -> &Option { - &self.other_proof - } + /// Getter for the other_proof. + pub fn other_proof(&self) -> &Option { + &self.other_proof + } } #[derive(Clone, Debug, PartialEq, Eq, Serialize)] pub struct ConsensusProof { - proof: CommitmentProofBytes, - height: Height, + proof: CommitmentProofBytes, + height: Height, } impl ConsensusProof { - pub fn new( - consensus_proof: CommitmentProofBytes, - consensus_height: Height, - ) -> Result { - if consensus_height.is_zero() { - return Err(ProofError::zero_height()); - } + pub fn new( + consensus_proof: CommitmentProofBytes, + consensus_height: Height, + ) -> Result { + if consensus_height.is_zero() { + return Err(ProofError::zero_height()) + } - Ok(Self { - proof: consensus_proof, - height: consensus_height, - }) - } + Ok(Self { proof: consensus_proof, height: consensus_height }) + } - /// Getter for the height field of this consensus proof. - pub fn height(&self) -> Height { - self.height - } + /// Getter for the height field of this consensus proof. + pub fn height(&self) -> Height { + self.height + } - /// Getter for the proof (CommitmentProof) field of this consensus proof. - pub fn proof(&self) -> &CommitmentProofBytes { - &self.proof - } + /// Getter for the proof (CommitmentProof) field of this consensus proof. + pub fn proof(&self) -> &CommitmentProofBytes { + &self.proof + } } diff --git a/modules/src/query.rs b/modules/src/query.rs deleted file mode 100644 index afccaba8b6..0000000000 --- a/modules/src/query.rs +++ /dev/null @@ -1,20 +0,0 @@ -use tendermint_rpc::abci::transaction::Hash; - -use crate::core::ics02_client::client_consensus::QueryClientEventRequest; -use crate::core::ics04_channel::channel::QueryPacketEventDataRequest; - -/// Used for queries and not yet standardized in channel's query.proto -#[derive(Clone, Debug)] -pub enum QueryTxRequest { - Packet(QueryPacketEventDataRequest), - Client(QueryClientEventRequest), - Transaction(QueryTxHash), -} - -#[derive(Clone, Debug)] -pub enum QueryBlockRequest { - Packet(QueryPacketEventDataRequest), -} - -#[derive(Clone, Debug)] -pub struct QueryTxHash(pub Hash); diff --git a/modules/src/relayer/ics18_relayer/context.rs b/modules/src/relayer/ics18_relayer/context.rs deleted file mode 100644 index 30665b193a..0000000000 --- a/modules/src/relayer/ics18_relayer/context.rs +++ /dev/null @@ -1,34 +0,0 @@ -use crate::prelude::*; -use ibc_proto::google::protobuf::Any; - -use crate::core::ics02_client::client_state::AnyClientState; -use crate::core::ics02_client::header::AnyHeader; -use crate::events::IbcEvent; - -use crate::core::ics24_host::identifier::ClientId; -use crate::relayer::ics18_relayer::error::Error; -use crate::signer::Signer; -use crate::Height; -/// Trait capturing all dependencies (i.e., the context) which algorithms in ICS18 require to -/// relay packets between chains. This trait comprises the dependencies towards a single chain. -/// Most of the functions in this represent wrappers over the ABCI interface. -/// This trait mimics the `Chain` trait, but at a lower level of abstraction (no networking, header -/// types, light client, RPC client, etc.) -pub trait Ics18Context { - /// Returns the latest height of the chain. - fn query_latest_height(&self) -> Height; - - /// Returns this client state for the given `client_id` on this chain. - /// Wrapper over the `/abci_query?path=..` endpoint. - fn query_client_full_state(&self, client_id: &ClientId) -> Option; - - /// Returns the most advanced header of this chain. - fn query_latest_header(&self) -> Option; - - /// Interface that the relayer uses to submit a datagram to this chain. - /// One can think of this as wrapping around the `/broadcast_tx_commit` ABCI endpoint. - fn send(&mut self, msgs: Vec) -> Result, Error>; - - /// Temporary solution. Similar to `CosmosSDKChain::key_and_signer()` but simpler. - fn signer(&self) -> Signer; -} diff --git a/modules/src/relayer/ics18_relayer/error.rs b/modules/src/relayer/ics18_relayer/error.rs deleted file mode 100644 index e975de5a3a..0000000000 --- a/modules/src/relayer/ics18_relayer/error.rs +++ /dev/null @@ -1,38 +0,0 @@ -use crate::core::ics24_host::identifier::ClientId; -use crate::core::ics26_routing::error::Error as RoutingError; -use crate::Height; -use flex_error::define_error; - -define_error! { - Error { - ClientStateNotFound - { client_id: ClientId } - | e | { format_args!("client state on destination chain not found, (client id: {0})", e.client_id) }, - - ClientAlreadyUpToDate - { - client_id: ClientId, - source_height: Height, - destination_height: Height, - } - | e | { - format_args!("the client on destination chain is already up-to-date (client id: {0}, source height: {1}, dest height: {2})", - e.client_id, e.source_height, e.destination_height) - }, - - ClientAtHigherHeight - { - client_id: ClientId, - source_height: Height, - destination_height: Height, - } - | e | { - format_args!("the client on destination chain is at a higher height (client id: {0}, source height: {1}, dest height: {2})", - e.client_id, e.source_height, e.destination_height) - }, - - TransactionFailed - [ RoutingError ] - | _ | { "transaction processing by modules failed" }, - } -} diff --git a/modules/src/relayer/ics18_relayer/mod.rs b/modules/src/relayer/ics18_relayer/mod.rs deleted file mode 100644 index 254c69aa4e..0000000000 --- a/modules/src/relayer/ics18_relayer/mod.rs +++ /dev/null @@ -1,5 +0,0 @@ -//! ICS 18: Relayer contains utilities for testing `ibc` against the Hermes relayer. - -pub mod context; -pub mod error; -pub mod utils; diff --git a/modules/src/relayer/ics18_relayer/utils.rs b/modules/src/relayer/ics18_relayer/utils.rs deleted file mode 100644 index 060cef55b5..0000000000 --- a/modules/src/relayer/ics18_relayer/utils.rs +++ /dev/null @@ -1,211 +0,0 @@ -use crate::core::ics02_client::header::AnyHeader; -use crate::core::ics02_client::msgs::update_client::MsgUpdateAnyClient; -use crate::core::ics02_client::msgs::ClientMsg; -use crate::core::ics24_host::identifier::ClientId; -use crate::relayer::ics18_relayer::context::Ics18Context; -use crate::relayer::ics18_relayer::error::Error; - -/// Builds a `ClientMsg::UpdateClient` for a client with id `client_id` running on the `dest` -/// context, assuming that the latest header on the source context is `src_header`. -pub fn build_client_update_datagram( - dest: &Ctx, - client_id: &ClientId, - src_header: AnyHeader, -) -> Result -where - Ctx: Ics18Context, -{ - // Check if client for ibc0 on ibc1 has been updated to latest height: - // - query client state on destination chain - let dest_client_state = dest - .query_client_full_state(client_id) - .ok_or_else(|| Error::client_state_not_found(client_id.clone()))?; - - let dest_client_latest_height = dest_client_state.latest_height(); - - if src_header.height() == dest_client_latest_height { - return Err(Error::client_already_up_to_date( - client_id.clone(), - src_header.height(), - dest_client_latest_height, - )); - }; - - if dest_client_latest_height > src_header.height() { - return Err(Error::client_at_higher_height( - client_id.clone(), - src_header.height(), - dest_client_latest_height, - )); - }; - - // Client on destination chain can be updated. - Ok(ClientMsg::UpdateClient(MsgUpdateAnyClient { - client_id: client_id.clone(), - header: src_header, - signer: dest.signer(), - })) -} - -#[cfg(test)] -mod tests { - use crate::core::ics02_client::client_type::ClientType; - use crate::core::ics02_client::header::{AnyHeader, Header}; - use crate::core::ics24_host::identifier::{ChainId, ClientId}; - use crate::core::ics26_routing::msgs::Ics26Envelope; - use crate::mock::context::MockContext; - use crate::mock::host::HostType; - use crate::prelude::*; - use crate::relayer::ics18_relayer::context::Ics18Context; - use crate::relayer::ics18_relayer::utils::build_client_update_datagram; - use crate::Height; - use test_log::test; - use tracing::debug; - - #[test] - /// Serves to test both ICS 26 `dispatch` & `build_client_update_datagram` functions. - /// Implements a "ping pong" of client update messages, so that two chains repeatedly - /// process a client update message and update their height in succession. - fn client_update_ping_pong() { - let chain_a_start_height = Height::new(1, 11); - let chain_b_start_height = Height::new(1, 20); - let client_on_b_for_a_height = Height::new(1, 10); // Should be smaller than `chain_a_start_height` - let client_on_a_for_b_height = Height::new(1, 20); // Should be smaller than `chain_b_start_height` - let num_iterations = 4; - - let client_on_a_for_b = ClientId::new(ClientType::Tendermint, 0).unwrap(); - let client_on_b_for_a = ClientId::new(ClientType::Mock, 0).unwrap(); - - // Create two mock contexts, one for each chain. - let mut ctx_a = MockContext::new( - ChainId::new("mockgaiaA".to_string(), 1), - HostType::Mock, - 5, - chain_a_start_height, - ) - .with_client_parametrized( - &client_on_a_for_b, - client_on_a_for_b_height, - Some(ClientType::Tendermint), // The target host chain (B) is synthetic TM. - Some(client_on_a_for_b_height), - ); - let mut ctx_b = MockContext::new( - ChainId::new("mockgaiaB".to_string(), 1), - HostType::SyntheticTendermint, - 5, - chain_b_start_height, - ) - .with_client_parametrized( - &client_on_b_for_a, - client_on_b_for_a_height, - Some(ClientType::Mock), // The target host chain is mock. - Some(client_on_b_for_a_height), - ); - - for _i in 0..num_iterations { - // Update client on chain B to latest height of A. - // - create the client update message with the latest header from A - let a_latest_header = ctx_a.query_latest_header().unwrap(); - assert_eq!( - a_latest_header.client_type(), - ClientType::Mock, - "Client type verification in header failed for context A (Mock); got {:?} but expected {:?}", - a_latest_header.client_type(), - ClientType::Mock - ); - - let client_msg_b_res = - build_client_update_datagram(&ctx_b, &client_on_b_for_a, a_latest_header); - - assert!( - client_msg_b_res.is_ok(), - "create_client_update failed for context destination {:?}, error: {:?}", - ctx_b, - client_msg_b_res - ); - - let client_msg_b = client_msg_b_res.unwrap(); - - // - send the message to B. We bypass ICS18 interface and call directly into - // MockContext `recv` method (to avoid additional serialization steps). - let dispatch_res_b = ctx_b.deliver(Ics26Envelope::Ics2Msg(client_msg_b)); - let validation_res = ctx_b.validate(); - assert!( - validation_res.is_ok(), - "context validation failed with error {:?} for context {:?}", - validation_res, - ctx_b - ); - - // Check if the update succeeded. - assert!( - dispatch_res_b.is_ok(), - "Dispatch failed for host chain b with error: {:?}", - dispatch_res_b - ); - let client_height_b = ctx_b - .query_client_full_state(&client_on_b_for_a) - .unwrap() - .latest_height(); - assert_eq!(client_height_b, ctx_a.query_latest_height()); - - // Update client on chain B to latest height of B. - // - create the client update message with the latest header from B - // The test uses LightClientBlock that does not store the trusted height - let b_latest_header = match ctx_b.query_latest_header().unwrap() { - AnyHeader::Tendermint(header) => { - let th = header.height(); - let mut hheader = header.clone(); - hheader.trusted_height = th.decrement().unwrap(); - hheader.wrap_any() - } - AnyHeader::Beefy(h) => h.wrap_any(), - AnyHeader::Mock(header) => header.wrap_any(), - }; - - assert_eq!( - b_latest_header.client_type(), - ClientType::Tendermint, - "Client type verification in header failed for context B (TM); got {:?} but expected {:?}", - b_latest_header.client_type(), - ClientType::Tendermint - ); - - let client_msg_a_res = - build_client_update_datagram(&ctx_a, &client_on_a_for_b, b_latest_header); - - assert!( - client_msg_a_res.is_ok(), - "create_client_update failed for context destination {:?}, error: {:?}", - ctx_a, - client_msg_a_res - ); - - let client_msg_a = client_msg_a_res.unwrap(); - - debug!("client_msg_a = {:?}", client_msg_a); - - // - send the message to A - let dispatch_res_a = ctx_a.deliver(Ics26Envelope::Ics2Msg(client_msg_a)); - let validation_res = ctx_a.validate(); - assert!( - validation_res.is_ok(), - "context validation failed with error {:?} for context {:?}", - validation_res, - ctx_a - ); - - // Check if the update succeeded. - assert!( - dispatch_res_a.is_ok(), - "Dispatch failed for host chain a with error: {:?}", - dispatch_res_a - ); - let client_height_a = ctx_a - .query_client_full_state(&client_on_a_for_b) - .unwrap() - .latest_height(); - assert_eq!(client_height_a, ctx_b.query_latest_height()); - } - } -} diff --git a/modules/src/relayer/mod.rs b/modules/src/relayer/mod.rs deleted file mode 100644 index e88996bcd5..0000000000 --- a/modules/src/relayer/mod.rs +++ /dev/null @@ -1,5 +0,0 @@ -//! Utilities for testing the `ibc` crate against the [Hermes IBC relayer][relayer-repo]. -//! -//! [relayer-repo]: https://github.com/informalsystems/ibc-rs/tree/master/relayer - -pub mod ics18_relayer; diff --git a/modules/src/serializers.rs b/modules/src/serializers.rs index 20b1a65d3b..7cadbe3e43 100644 --- a/modules/src/serializers.rs +++ b/modules/src/serializers.rs @@ -1,53 +1,50 @@ use crate::prelude::*; use serde::{ - ser::{Serialize, Serializer}, - Deserialize, Deserializer, + ser::{Serialize, Serializer}, + Deserialize, Deserializer, }; use subtle_encoding::{Encoding, Hex}; pub fn ser_hex_upper(data: T, serializer: S) -> Result where - S: Serializer, - T: AsRef<[u8]>, + S: Serializer, + T: AsRef<[u8]>, { - let hex = Hex::upper_case().encode_to_string(data).unwrap(); - hex.serialize(serializer) + let hex = Hex::upper_case().encode_to_string(data).unwrap(); + hex.serialize(serializer) } pub fn deser_hex_upper<'de, T, D>(deserializer: D) -> Result where - D: Deserializer<'de>, - T: AsRef<[u8]>, - T: From>, + D: Deserializer<'de>, + T: AsRef<[u8]>, + T: From>, { - let hex = String::deserialize(deserializer)?; - let bytes = Hex::upper_case().decode(hex.as_bytes()).unwrap(); - Ok(bytes.into()) + let hex = String::deserialize(deserializer)?; + let bytes = Hex::upper_case().decode(hex.as_bytes()).unwrap(); + Ok(bytes.into()) } pub mod serde_string { - use alloc::string::String; - use core::fmt::Display; - use core::str::FromStr; + use alloc::string::String; + use core::{fmt::Display, str::FromStr}; - use serde::{de, Deserialize, Deserializer, Serializer}; + use serde::{de, Deserialize, Deserializer, Serializer}; - pub fn serialize(value: &T, serializer: S) -> Result - where - T: Display, - S: Serializer, - { - serializer.collect_str(value) - } + pub fn serialize(value: &T, serializer: S) -> Result + where + T: Display, + S: Serializer, + { + serializer.collect_str(value) + } - pub fn deserialize<'de, T, D>(deserializer: D) -> Result - where - T: FromStr, - T::Err: Display, - D: Deserializer<'de>, - { - String::deserialize(deserializer)? - .parse() - .map_err(de::Error::custom) - } + pub fn deserialize<'de, T, D>(deserializer: D) -> Result + where + T: FromStr, + T::Err: Display, + D: Deserializer<'de>, + { + String::deserialize(deserializer)?.parse().map_err(de::Error::custom) + } } diff --git a/modules/src/signer.rs b/modules/src/signer.rs index 21c62bf116..febf31da3c 100644 --- a/modules/src/signer.rs +++ b/modules/src/signer.rs @@ -7,30 +7,30 @@ use flex_error::define_error; use serde::{Deserialize, Serialize}; define_error! { - #[derive(Debug, PartialEq, Eq)] - SignerError { - EmptySigner - | _ | { "signer cannot be empty" }, - } + #[derive(Debug, PartialEq, Eq)] + SignerError { + EmptySigner + | _ | { "signer cannot be empty" }, + } } #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, Display)] pub struct Signer(String); impl FromStr for Signer { - type Err = SignerError; + type Err = SignerError; - fn from_str(s: &str) -> Result { - let s = s.to_string(); - if s.trim().is_empty() { - return Err(SignerError::empty_signer()); - } - Ok(Self(s)) - } + fn from_str(s: &str) -> Result { + let s = s.to_string(); + if s.trim().is_empty() { + return Err(SignerError::empty_signer()) + } + Ok(Self(s)) + } } impl AsRef for Signer { - fn as_ref(&self) -> &str { - self.0.as_str() - } + fn as_ref(&self) -> &str { + self.0.as_str() + } } diff --git a/modules/src/test.rs b/modules/src/test.rs index b6a9cb1f34..c9dc728b75 100644 --- a/modules/src/test.rs +++ b/modules/src/test.rs @@ -9,19 +9,19 @@ use serde::{de::DeserializeOwned, Serialize}; /// - that the two parsed structs are equal according to their `PartialEq` impl pub fn test_serialization_roundtrip(json_data: &str) where - T: Debug + Serialize + DeserializeOwned, + T: Debug + Serialize + DeserializeOwned, { - let parsed0 = serde_json::from_str::(json_data); - assert!(parsed0.is_ok()); - let parsed0 = parsed0.unwrap(); + let parsed0 = serde_json::from_str::(json_data); + assert!(parsed0.is_ok()); + let parsed0 = parsed0.unwrap(); - let serialized = serde_json::to_string(&parsed0); - assert!(serialized.is_ok()); - let serialized = serialized.unwrap(); + let serialized = serde_json::to_string(&parsed0); + assert!(serialized.is_ok()); + let serialized = serialized.unwrap(); - let parsed1 = serde_json::from_str::(&serialized); - assert!(parsed1.is_ok()); + let parsed1 = serde_json::from_str::(&serialized); + assert!(parsed1.is_ok()); - // TODO - fix PartialEq bound issue in AbciQuery - //assert_eq!(parsed0, parsed1); + // TODO - fix PartialEq bound issue in AbciQuery + //assert_eq!(parsed0, parsed1); } diff --git a/modules/src/test_utils.rs b/modules/src/test_utils.rs index 1ab996cc3d..6f8ccdd168 100644 --- a/modules/src/test_utils.rs +++ b/modules/src/test_utils.rs @@ -1,628 +1,638 @@ -use codec::Encode; -use std::sync::{Arc, Mutex}; -use std::time::Duration; - -use crate::clients::host_functions::HostFunctionsProvider; -use crate::core::ics02_client::context::{ClientKeeper, ClientReader}; -use crate::core::ics03_connection::context::ConnectionReader; -use crate::prelude::*; -use sp_core::keccak_256; -use sp_trie::LayoutV0; -use tendermint::{block, consensus, evidence, public_key::Algorithm}; +use std::{ + sync::{Arc, Mutex}, + time::Duration, +}; + +use crate::{ + applications::transfer::{ + context::{BankKeeper, Ics20Context, Ics20Keeper, Ics20Reader}, + error::Error as Ics20Error, + PrefixedCoin, + }, + core::{ + ics02_client::{ + client_state::ClientType, + context::{ClientKeeper, ClientReader}, + error::Error as Ics02Error, + }, + ics03_connection::{ + connection::ConnectionEnd, context::ConnectionReader, error::Error as Ics03Error, + }, + ics04_channel::{ + channel::{ChannelEnd, Counterparty, Order}, + commitment::{AcknowledgementCommitment, PacketCommitment}, + context::{ChannelKeeper, ChannelReader}, + error::Error, + packet::{Receipt, Sequence}, + Version, + }, + ics05_port::{context::PortReader, error::Error as PortError}, + ics24_host::identifier::{ChannelId, ClientId, ConnectionId, PortId}, + ics26_routing::context::{Module, ModuleId, ModuleOutputBuilder, ReaderContext}, + }, + mock::context::{ClientTypes, MockIbcStore}, + prelude::*, + signer::Signer, + timestamp::Timestamp, + Height, +}; -use crate::clients::ics11_beefy::error::Error as BeefyError; -use crate::core::ics02_client::error::Error as Ics02Error; - -use crate::applications::transfer::context::{BankKeeper, Ics20Context, Ics20Keeper, Ics20Reader}; -use crate::applications::transfer::{error::Error as Ics20Error, PrefixedCoin}; -use crate::core::ics02_client::client_consensus::AnyConsensusState; -use crate::core::ics02_client::client_state::AnyClientState; -use crate::core::ics02_client::client_type::ClientType; -use crate::core::ics03_connection::connection::ConnectionEnd; -use crate::core::ics03_connection::error::Error as Ics03Error; -use crate::core::ics04_channel::channel::{ChannelEnd, Counterparty, Order}; -use crate::core::ics04_channel::commitment::{AcknowledgementCommitment, PacketCommitment}; -use crate::core::ics04_channel::context::{ChannelKeeper, ChannelReader}; -use crate::core::ics04_channel::error::Error; -use crate::core::ics04_channel::packet::{Receipt, Sequence}; -use crate::core::ics04_channel::Version; -use crate::core::ics05_port::context::PortReader; -use crate::core::ics05_port::error::Error as PortError; -use crate::core::ics24_host::identifier::{ChannelId, ClientId, ConnectionId, PortId}; -use crate::core::ics26_routing::context::{Module, ModuleId, ModuleOutputBuilder, ReaderContext}; -use crate::mock::context::MockIbcStore; -use crate::signer::Signer; -use crate::timestamp::Timestamp; -use crate::Height; +use tendermint::{block, consensus, evidence, public_key::Algorithm}; // Needed in mocks. pub fn default_consensus_params() -> consensus::Params { - consensus::Params { - block: block::Size { - max_bytes: 22020096, - max_gas: -1, - time_iota_ms: 1000, - }, - evidence: evidence::Params { - max_age_num_blocks: 100000, - max_age_duration: evidence::Duration(core::time::Duration::new(48 * 3600, 0)), - max_bytes: 0, - }, - validator: consensus::params::ValidatorParams { - pub_key_types: vec![Algorithm::Ed25519], - }, - version: Some(consensus::params::VersionParams::default()), - } + consensus::Params { + block: block::Size { max_bytes: 22020096, max_gas: -1, time_iota_ms: 1000 }, + evidence: evidence::Params { + max_age_num_blocks: 100000, + max_age_duration: evidence::Duration(core::time::Duration::new(48 * 3600, 0)), + max_bytes: 0, + }, + validator: consensus::params::ValidatorParams { pub_key_types: vec![Algorithm::Ed25519] }, + version: Some(consensus::params::VersionParams::default()), + } } pub fn get_dummy_proof() -> Vec { - "Y29uc2Vuc3VzU3RhdGUvaWJjb25lY2xpZW50LzIy" - .as_bytes() - .to_vec() + "Y29uc2Vuc3VzU3RhdGUvaWJjb25lY2xpZW50LzIy".as_bytes().to_vec() } pub fn get_dummy_account_id() -> Signer { - "0CDA3F47EF3C4906693B170EF650EB968C5F4B2C".parse().unwrap() + "0CDA3F47EF3C4906693B170EF650EB968C5F4B2C".parse().unwrap() } pub fn get_dummy_bech32_account() -> String { - "cosmos1wxeyh7zgn4tctjzs0vtqpc6p5cxq5t2muzl7ng".to_string() + "cosmos1wxeyh7zgn4tctjzs0vtqpc6p5cxq5t2muzl7ng".to_string() +} + +#[derive(Debug, Clone)] +pub struct DummyTransferModule { + ibc_store: Arc>>, } -#[derive(Debug)] -pub struct DummyTransferModule { - ibc_store: Arc>, +impl PartialEq for DummyTransferModule { + fn eq(&self, _other: &Self) -> bool { + false + } } -impl DummyTransferModule { - pub fn new(ibc_store: Arc>) -> Self { - Self { ibc_store } - } +impl Eq for DummyTransferModule {} + +impl DummyTransferModule { + pub fn new(ibc_store: Arc>>) -> Self { + Self { ibc_store } + } } -impl Module for DummyTransferModule { - fn on_chan_open_try( - &mut self, - _output: &mut ModuleOutputBuilder, - _order: Order, - _connection_hops: &[ConnectionId], - _port_id: &PortId, - _channel_id: &ChannelId, - _counterparty: &Counterparty, - _version: &Version, - counterparty_version: &Version, - ) -> Result { - Ok(counterparty_version.clone()) - } +impl Module for DummyTransferModule { + fn on_chan_open_try( + &mut self, + _output: &mut ModuleOutputBuilder, + _order: Order, + _connection_hops: &[ConnectionId], + _port_id: &PortId, + _channel_id: &ChannelId, + _counterparty: &Counterparty, + _version: &Version, + counterparty_version: &Version, + ) -> Result { + Ok(counterparty_version.clone()) + } } #[derive(Clone, Debug, Default, PartialEq, Eq)] pub struct Crypto; - +/* impl HostFunctionsProvider for Crypto { - fn keccak_256(input: &[u8]) -> [u8; 32] { - keccak_256(input) - } - - fn secp256k1_ecdsa_recover_compressed( - signature: &[u8; 65], - value: &[u8; 32], - ) -> Option> { - sp_io::crypto::secp256k1_ecdsa_recover_compressed(signature, value) - .ok() - .map(|val| val.to_vec()) - } - - fn ed25519_verify(_signature: &[u8; 64], _msg: &[u8], _pubkey: &[u8]) -> bool { - true - } - - fn verify_membership_trie_proof( - root: &[u8; 32], - proof: &[Vec], - key: &[u8], - value: &[u8], - ) -> Result<(), Ics02Error> { - let item = vec![(key, Some(value))]; - sp_trie::verify_trie_proof::, _, _, _>( - &sp_core::H256::from_slice(root), - proof, - &item, - ) - .map_err(|_| Ics02Error::beefy(BeefyError::invalid_trie_proof())) - } - - fn verify_non_membership_trie_proof( - root: &[u8; 32], - proof: &[Vec], - key: &[u8], - ) -> Result<(), Ics02Error> { - let item: Vec<(&[u8], Option<&[u8]>)> = vec![(key, None)]; - sp_trie::verify_trie_proof::, _, _, _>( - &sp_core::H256::from_slice(root), - proof, - &item, - ) - .map_err(|_| Ics02Error::beefy(BeefyError::invalid_trie_proof())) - } - - fn sha256_digest(data: &[u8]) -> [u8; 32] { - sp_io::hashing::sha2_256(data) - } - - fn sha2_256(message: &[u8]) -> [u8; 32] { - sp_io::hashing::sha2_256(message) - } - - fn sha2_512(message: &[u8]) -> [u8; 64] { - use sha2::Digest; - let mut hasher = sha2::Sha512::new(); - hasher.update(message); - let hash = hasher.finalize(); - let mut res = [0u8; 64]; - res.copy_from_slice(&hash); - res - } - - fn sha2_512_truncated(message: &[u8]) -> [u8; 32] { - use sha2::Digest; - let mut hasher = sha2::Sha512::new(); - hasher.update(message); - let hash = hasher.finalize(); - let mut res = [0u8; 32]; - res.copy_from_slice(&hash[..32]); - res - } - - fn sha3_512(message: &[u8]) -> [u8; 64] { - use sha3::Digest; - let mut hasher = sha3::Sha3_512::new(); - hasher.update(message); - let hash = hasher.finalize(); - let mut res = [0u8; 64]; - res.copy_from_slice(&hash); - res - } - - fn ripemd160(message: &[u8]) -> [u8; 20] { - use ripemd::Digest; - let mut hasher = ripemd::Ripemd160::new(); - hasher.update(message); - let hash = hasher.finalize(); - let mut res = [0u8; 20]; - res.copy_from_slice(&hash); - res - } - - fn verify_timestamp_extrinsic( - root: &[u8; 32], - proof: &[Vec], - value: &[u8], - ) -> Result<(), Ics02Error> { - let root = sp_core::H256::from_slice(root); - let key = codec::Compact(0u32).encode(); - sp_io::trie::blake2_256_verify_proof( - root, - proof, - &key, - value, - sp_core::storage::StateVersion::V0, - ) - .then(|| ()) - .ok_or_else(|| { - Ics02Error::implementation_specific("timestamp verification failed".to_string()) - }) - } + fn keccak_256(input: &[u8]) -> [u8; 32] { + keccak_256(input) + } + + fn secp256k1_ecdsa_recover_compressed( + signature: &[u8; 65], + value: &[u8; 32], + ) -> Option> { + sp_io::crypto::secp256k1_ecdsa_recover_compressed(signature, value) + .ok() + .map(|val| val.to_vec()) + } + + fn ed25519_verify(_signature: &[u8; 64], _msg: &[u8], _pubkey: &[u8]) -> bool { + true + } + + fn verify_membership_trie_proof( + root: &[u8; 32], + proof: &[Vec], + key: &[u8], + value: &[u8], + ) -> Result<(), Ics02Error> { + let item = vec![(key, Some(value))]; + sp_trie::verify_trie_proof::, _, _, _>( + &sp_core::H256::from_slice(root), + proof, + &item, + ) + .map_err(|e| panic!("{}", e)) + } + + fn verify_non_membership_trie_proof( + root: &[u8; 32], + proof: &[Vec], + key: &[u8], + ) -> Result<(), Ics02Error> { + let item: Vec<(&[u8], Option<&[u8]>)> = vec![(key, None)]; + sp_trie::verify_trie_proof::, _, _, _>( + &sp_core::H256::from_slice(root), + proof, + &item, + ) + .map_err(|e| panic!("{}", e)) + } + + fn sha256_digest(data: &[u8]) -> [u8; 32] { + sp_io::hashing::sha2_256(data) + } + + fn sha2_256(message: &[u8]) -> [u8; 32] { + sp_io::hashing::sha2_256(message) + } + + fn sha2_512(message: &[u8]) -> [u8; 64] { + use sha2::Digest; + let mut hasher = sha2::Sha512::new(); + hasher.update(message); + let hash = hasher.finalize(); + let mut res = [0u8; 64]; + res.copy_from_slice(&hash); + res + } + + fn sha2_512_truncated(message: &[u8]) -> [u8; 32] { + use sha2::Digest; + let mut hasher = sha2::Sha512::new(); + hasher.update(message); + let hash = hasher.finalize(); + let mut res = [0u8; 32]; + res.copy_from_slice(&hash[..32]); + res + } + + fn sha3_512(message: &[u8]) -> [u8; 64] { + use sha3::Digest; + let mut hasher = sha3::Sha3_512::new(); + hasher.update(message); + let hash = hasher.finalize(); + let mut res = [0u8; 64]; + res.copy_from_slice(&hash); + res + } + + fn ripemd160(message: &[u8]) -> [u8; 20] { + use ripemd::Digest; + let mut hasher = ripemd::Ripemd160::new(); + hasher.update(message); + let hash = hasher.finalize(); + let mut res = [0u8; 20]; + res.copy_from_slice(&hash); + res + } + + fn verify_timestamp_extrinsic( + root: &[u8; 32], + proof: &[Vec], + value: &[u8], + ) -> Result<(), Ics02Error> { + let root = sp_core::H256::from_slice(root); + let key = codec::Compact(0u32).encode(); + sp_io::trie::blake2_256_verify_proof( + root, + proof, + &key, + value, + sp_core::storage::StateVersion::V0, + ) + .then(|| ()) + .ok_or_else(|| { + Ics02Error::implementation_specific("timestamp verification failed".to_string()) + }) + } +} + */ + +// implementation for ics23 +impl ics23::HostFunctionsProvider for Crypto { + fn sha2_256(_message: &[u8]) -> [u8; 32] { + unimplemented!() + } + + fn sha2_512(_message: &[u8]) -> [u8; 64] { + unimplemented!() + } + + fn sha2_512_truncated(_message: &[u8]) -> [u8; 32] { + unimplemented!() + } + + fn sha3_512(_message: &[u8]) -> [u8; 64] { + unimplemented!() + } + + fn ripemd160(_message: &[u8]) -> [u8; 20] { + unimplemented!() + } } -impl Ics20Keeper for DummyTransferModule { - type AccountId = Signer; +impl Ics20Keeper for DummyTransferModule { + type AccountId = Signer; } -impl ChannelKeeper for DummyTransferModule { - fn store_packet_commitment( - &mut self, - key: (PortId, ChannelId, Sequence), - commitment: PacketCommitment, - ) -> Result<(), Error> { - self.ibc_store - .lock() - .unwrap() - .packet_commitment - .insert(key, commitment); - Ok(()) - } - - fn delete_packet_commitment( - &mut self, - _key: (PortId, ChannelId, Sequence), - ) -> Result<(), Error> { - unimplemented!() - } - - fn store_packet_receipt( - &mut self, - _key: (PortId, ChannelId, Sequence), - _receipt: Receipt, - ) -> Result<(), Error> { - unimplemented!() - } - - fn store_packet_acknowledgement( - &mut self, - _key: (PortId, ChannelId, Sequence), - _ack: AcknowledgementCommitment, - ) -> Result<(), Error> { - unimplemented!() - } - - fn delete_packet_acknowledgement( - &mut self, - _key: (PortId, ChannelId, Sequence), - ) -> Result<(), Error> { - unimplemented!() - } - - fn store_connection_channels( - &mut self, - _conn_id: ConnectionId, - _port_channel_id: &(PortId, ChannelId), - ) -> Result<(), Error> { - unimplemented!() - } - - fn store_channel( - &mut self, - _port_channel_id: (PortId, ChannelId), - _channel_end: &ChannelEnd, - ) -> Result<(), Error> { - unimplemented!() - } - - fn store_next_sequence_send( - &mut self, - port_channel_id: (PortId, ChannelId), - seq: Sequence, - ) -> Result<(), Error> { - self.ibc_store - .lock() - .unwrap() - .next_sequence_send - .insert(port_channel_id, seq); - Ok(()) - } - - fn store_next_sequence_recv( - &mut self, - _port_channel_id: (PortId, ChannelId), - _seq: Sequence, - ) -> Result<(), Error> { - unimplemented!() - } - - fn store_next_sequence_ack( - &mut self, - _port_channel_id: (PortId, ChannelId), - _seq: Sequence, - ) -> Result<(), Error> { - unimplemented!() - } - - fn increase_channel_counter(&mut self) { - unimplemented!() - } - - fn store_send_packet( - &mut self, - _key: (PortId, ChannelId, Sequence), - _packet: crate::core::ics04_channel::packet::Packet, - ) -> Result<(), Error> { - Ok(()) - } - - fn store_recv_packet( - &mut self, - _key: (PortId, ChannelId, Sequence), - _packet: crate::core::ics04_channel::packet::Packet, - ) -> Result<(), Error> { - Ok(()) - } +impl ChannelKeeper for DummyTransferModule { + fn store_packet_commitment( + &mut self, + key: (PortId, ChannelId, Sequence), + commitment: PacketCommitment, + ) -> Result<(), Error> { + self.ibc_store.lock().unwrap().packet_commitment.insert(key, commitment); + Ok(()) + } + + fn delete_packet_commitment( + &mut self, + _key: (PortId, ChannelId, Sequence), + ) -> Result<(), Error> { + unimplemented!() + } + + fn store_packet_receipt( + &mut self, + _key: (PortId, ChannelId, Sequence), + _receipt: Receipt, + ) -> Result<(), Error> { + unimplemented!() + } + + fn store_packet_acknowledgement( + &mut self, + _key: (PortId, ChannelId, Sequence), + _ack: AcknowledgementCommitment, + ) -> Result<(), Error> { + unimplemented!() + } + + fn delete_packet_acknowledgement( + &mut self, + _key: (PortId, ChannelId, Sequence), + ) -> Result<(), Error> { + unimplemented!() + } + + fn store_connection_channels( + &mut self, + _conn_id: ConnectionId, + _port_channel_id: &(PortId, ChannelId), + ) -> Result<(), Error> { + unimplemented!() + } + + fn store_channel( + &mut self, + _port_channel_id: (PortId, ChannelId), + _channel_end: &ChannelEnd, + ) -> Result<(), Error> { + unimplemented!() + } + + fn store_next_sequence_send( + &mut self, + port_channel_id: (PortId, ChannelId), + seq: Sequence, + ) -> Result<(), Error> { + self.ibc_store.lock().unwrap().next_sequence_send.insert(port_channel_id, seq); + Ok(()) + } + + fn store_next_sequence_recv( + &mut self, + _port_channel_id: (PortId, ChannelId), + _seq: Sequence, + ) -> Result<(), Error> { + unimplemented!() + } + + fn store_next_sequence_ack( + &mut self, + _port_channel_id: (PortId, ChannelId), + _seq: Sequence, + ) -> Result<(), Error> { + unimplemented!() + } + + fn increase_channel_counter(&mut self) { + unimplemented!() + } + + fn store_send_packet( + &mut self, + _key: (PortId, ChannelId, Sequence), + _packet: crate::core::ics04_channel::packet::Packet, + ) -> Result<(), Error> { + Ok(()) + } + + fn store_recv_packet( + &mut self, + _key: (PortId, ChannelId, Sequence), + _packet: crate::core::ics04_channel::packet::Packet, + ) -> Result<(), Error> { + Ok(()) + } } -impl PortReader for DummyTransferModule { - fn lookup_module_by_port(&self, _port_id: &PortId) -> Result { - unimplemented!() - } +impl PortReader for DummyTransferModule { + fn lookup_module_by_port(&self, _port_id: &PortId) -> Result { + unimplemented!() + } } -impl BankKeeper for DummyTransferModule { - type AccountId = Signer; - - fn send_coins( - &mut self, - _from: &Self::AccountId, - _to: &Self::AccountId, - _amt: &PrefixedCoin, - ) -> Result<(), Ics20Error> { - Ok(()) - } - - fn mint_coins( - &mut self, - _account: &Self::AccountId, - _amt: &PrefixedCoin, - ) -> Result<(), Ics20Error> { - Ok(()) - } - - fn burn_coins( - &mut self, - _account: &Self::AccountId, - _amt: &PrefixedCoin, - ) -> Result<(), Ics20Error> { - Ok(()) - } +impl BankKeeper for DummyTransferModule { + type AccountId = Signer; + + fn send_coins( + &mut self, + _from: &Self::AccountId, + _to: &Self::AccountId, + _amt: &PrefixedCoin, + ) -> Result<(), Ics20Error> { + Ok(()) + } + + fn mint_coins( + &mut self, + _account: &Self::AccountId, + _amt: &PrefixedCoin, + ) -> Result<(), Ics20Error> { + Ok(()) + } + + fn burn_coins( + &mut self, + _account: &Self::AccountId, + _amt: &PrefixedCoin, + ) -> Result<(), Ics20Error> { + Ok(()) + } } -impl Ics20Reader for DummyTransferModule { - type AccountId = Signer; +impl Ics20Reader for DummyTransferModule { + type AccountId = Signer; - fn get_port(&self) -> Result { - Ok(PortId::transfer()) - } + fn get_port(&self) -> Result { + Ok(PortId::transfer()) + } - fn is_send_enabled(&self) -> bool { - true - } + fn is_send_enabled(&self) -> bool { + true + } - fn is_receive_enabled(&self) -> bool { - true - } + fn is_receive_enabled(&self) -> bool { + true + } } -impl ConnectionReader for DummyTransferModule { - fn connection_end(&self, cid: &ConnectionId) -> Result { - match self.ibc_store.lock().unwrap().connections.get(cid) { - Some(connection_end) => Ok(connection_end.clone()), - None => Err(Ics03Error::connection_not_found(cid.clone())), - } - } - - fn host_oldest_height(&self) -> Height { - todo!() - } - - fn commitment_prefix(&self) -> crate::core::ics23_commitment::commitment::CommitmentPrefix { - todo!() - } - - fn connection_counter(&self) -> Result { - todo!() - } +impl ConnectionReader for DummyTransferModule { + fn connection_end(&self, cid: &ConnectionId) -> Result { + match self.ibc_store.lock().unwrap().connections.get(cid) { + Some(connection_end) => Ok(connection_end.clone()), + None => Err(Ics03Error::connection_not_found(cid.clone())), + } + } + + fn host_oldest_height(&self) -> Height { + todo!() + } + + fn commitment_prefix(&self) -> crate::core::ics23_commitment::commitment::CommitmentPrefix { + todo!() + } + + fn connection_counter(&self) -> Result { + todo!() + } } -impl ClientReader for DummyTransferModule { - fn client_state(&self, client_id: &ClientId) -> Result { - match self.ibc_store.lock().unwrap().clients.get(client_id) { - Some(client_record) => client_record - .client_state - .clone() - .ok_or_else(|| Ics02Error::client_not_found(client_id.clone())), - None => Err(Ics02Error::client_not_found(client_id.clone())), - } - } - - fn host_height(&self) -> Height { - Height::zero() - } - - fn host_consensus_state( - &self, - _height: Height, - _proof: Option>, - ) -> Result { - unimplemented!() - } - - fn consensus_state( - &self, - client_id: &ClientId, - height: Height, - ) -> Result { - match self.ibc_store.lock().unwrap().clients.get(client_id) { - Some(client_record) => match client_record.consensus_states.get(&height) { - Some(consensus_state) => Ok(consensus_state.clone()), - None => Err(Ics02Error::consensus_state_not_found( - client_id.clone(), - height, - )), - }, - None => Err(Ics02Error::consensus_state_not_found( - client_id.clone(), - height, - )), - } - } - - fn client_type(&self, _client_id: &ClientId) -> Result { - todo!() - } - - fn next_consensus_state( - &self, - _client_id: &ClientId, - _height: Height, - ) -> Result, Ics02Error> { - todo!() - } - - fn prev_consensus_state( - &self, - _client_id: &ClientId, - _height: Height, - ) -> Result, Ics02Error> { - todo!() - } - - fn host_timestamp(&self) -> Timestamp { - todo!() - } - - fn client_counter(&self) -> Result { - todo!() - } - - fn host_client_type(&self) -> ClientType { - ClientType::Tendermint - } +impl ClientReader for DummyTransferModule { + fn client_state(&self, client_id: &ClientId) -> Result { + match self.ibc_store.lock().unwrap().clients.get(client_id) { + Some(client_record) => client_record + .client_state + .clone() + .ok_or_else(|| Ics02Error::client_not_found(client_id.clone())), + None => Err(Ics02Error::client_not_found(client_id.clone())), + } + } + + fn host_height(&self) -> Height { + Height::zero() + } + + fn host_consensus_state( + &self, + _height: Height, + _proof: Option>, + ) -> Result { + unimplemented!() + } + + fn consensus_state( + &self, + client_id: &ClientId, + height: Height, + ) -> Result { + match self.ibc_store.lock().unwrap().clients.get(client_id) { + Some(client_record) => match client_record.consensus_states.get(&height) { + Some(consensus_state) => Ok(consensus_state.clone()), + None => Err(Ics02Error::consensus_state_not_found(client_id.clone(), height)), + }, + None => Err(Ics02Error::consensus_state_not_found(client_id.clone(), height)), + } + } + + fn client_type(&self, _client_id: &ClientId) -> Result { + todo!() + } + + fn next_consensus_state( + &self, + _client_id: &ClientId, + _height: Height, + ) -> Result, Ics02Error> { + todo!() + } + + fn prev_consensus_state( + &self, + _client_id: &ClientId, + _height: Height, + ) -> Result, Ics02Error> { + todo!() + } + + fn host_timestamp(&self) -> Timestamp { + todo!() + } + + fn client_counter(&self) -> Result { + todo!() + } } -impl ChannelReader for DummyTransferModule { - fn channel_end(&self, pcid: &(PortId, ChannelId)) -> Result { - match self.ibc_store.lock().unwrap().channels.get(pcid) { - Some(channel_end) => Ok(channel_end.clone()), - None => Err(Error::channel_not_found(pcid.0.clone(), pcid.1)), - } - } - - fn connection_channels(&self, _cid: &ConnectionId) -> Result, Error> { - unimplemented!() - } - - fn get_next_sequence_send( - &self, - port_channel_id: &(PortId, ChannelId), - ) -> Result { - match self - .ibc_store - .lock() - .unwrap() - .next_sequence_send - .get(port_channel_id) - { - Some(sequence) => Ok(*sequence), - None => Err(Error::missing_next_send_seq(port_channel_id.clone())), - } - } - - fn get_next_sequence_recv( - &self, - _port_channel_id: &(PortId, ChannelId), - ) -> Result { - unimplemented!() - } - - fn get_next_sequence_ack( - &self, - _port_channel_id: &(PortId, ChannelId), - ) -> Result { - unimplemented!() - } - - fn get_packet_commitment( - &self, - _key: &(PortId, ChannelId, Sequence), - ) -> Result { - unimplemented!() - } - - fn get_packet_receipt(&self, _key: &(PortId, ChannelId, Sequence)) -> Result { - unimplemented!() - } - - fn get_packet_acknowledgement( - &self, - _key: &(PortId, ChannelId, Sequence), - ) -> Result { - unimplemented!() - } - - fn hash(&self, value: Vec) -> Vec { - use sha2::Digest; - - sha2::Sha256::digest(value).to_vec() - } - - fn client_update_time( - &self, - _client_id: &ClientId, - _height: Height, - ) -> Result { - unimplemented!() - } - - fn client_update_height( - &self, - _client_id: &ClientId, - _height: Height, - ) -> Result { - unimplemented!() - } - - fn channel_counter(&self) -> Result { - unimplemented!() - } - - fn max_expected_time_per_block(&self) -> Duration { - unimplemented!() - } +impl ChannelReader for DummyTransferModule { + fn channel_end(&self, pcid: &(PortId, ChannelId)) -> Result { + match self.ibc_store.lock().unwrap().channels.get(pcid) { + Some(channel_end) => Ok(channel_end.clone()), + None => Err(Error::channel_not_found(pcid.0.clone(), pcid.1)), + } + } + + fn connection_channels(&self, _cid: &ConnectionId) -> Result, Error> { + unimplemented!() + } + + fn get_next_sequence_send( + &self, + port_channel_id: &(PortId, ChannelId), + ) -> Result { + match self.ibc_store.lock().unwrap().next_sequence_send.get(port_channel_id) { + Some(sequence) => Ok(*sequence), + None => Err(Error::missing_next_send_seq(port_channel_id.clone())), + } + } + + fn get_next_sequence_recv( + &self, + _port_channel_id: &(PortId, ChannelId), + ) -> Result { + unimplemented!() + } + + fn get_next_sequence_ack( + &self, + _port_channel_id: &(PortId, ChannelId), + ) -> Result { + unimplemented!() + } + + fn get_packet_commitment( + &self, + _key: &(PortId, ChannelId, Sequence), + ) -> Result { + unimplemented!() + } + + fn get_packet_receipt(&self, _key: &(PortId, ChannelId, Sequence)) -> Result { + unimplemented!() + } + + fn get_packet_acknowledgement( + &self, + _key: &(PortId, ChannelId, Sequence), + ) -> Result { + unimplemented!() + } + + fn hash(&self, value: Vec) -> Vec { + use sha2::Digest; + + sha2::Sha256::digest(value).to_vec() + } + + fn client_update_time( + &self, + _client_id: &ClientId, + _height: Height, + ) -> Result { + unimplemented!() + } + + fn client_update_height( + &self, + _client_id: &ClientId, + _height: Height, + ) -> Result { + unimplemented!() + } + + fn channel_counter(&self) -> Result { + unimplemented!() + } + + fn max_expected_time_per_block(&self) -> Duration { + unimplemented!() + } } -impl ClientKeeper for DummyTransferModule { - fn store_client_type( - &mut self, - _client_id: ClientId, - _client_type: ClientType, - ) -> Result<(), Ics02Error> { - todo!() - } - - fn store_client_state( - &mut self, - _client_id: ClientId, - _client_state: AnyClientState, - ) -> Result<(), Ics02Error> { - todo!() - } - - fn store_consensus_state( - &mut self, - _client_id: ClientId, - _height: Height, - _consensus_state: AnyConsensusState, - ) -> Result<(), Ics02Error> { - todo!() - } - - fn increase_client_counter(&mut self) { - todo!() - } - - fn store_update_time( - &mut self, - _client_id: ClientId, - _height: Height, - _timestamp: Timestamp, - ) -> Result<(), Ics02Error> { - todo!() - } - - fn store_update_height( - &mut self, - _client_id: ClientId, - _height: Height, - _host_height: Height, - ) -> Result<(), Ics02Error> { - Ok(()) - } - - fn validate_self_client(&self, _client_state: &AnyClientState) -> Result<(), Ics02Error> { - Ok(()) - } +impl ClientKeeper for DummyTransferModule { + type AnyHeader = C::AnyHeader; + type AnyClientState = C::AnyClientState; + type AnyConsensusState = C::AnyConsensusState; + type AnyMisbehaviour = C::AnyMisbehaviour; + type ClientDef = C::ClientDef; + + fn store_client_type( + &mut self, + _client_id: ClientId, + _client_type: ClientType, + ) -> Result<(), Ics02Error> { + todo!() + } + + fn store_client_state( + &mut self, + _client_id: ClientId, + _client_state: Self::AnyClientState, + ) -> Result<(), Ics02Error> { + todo!() + } + + fn store_consensus_state( + &mut self, + _client_id: ClientId, + _height: Height, + _consensus_state: Self::AnyConsensusState, + ) -> Result<(), Ics02Error> { + todo!() + } + + fn increase_client_counter(&mut self) { + todo!() + } + + fn store_update_time( + &mut self, + _client_id: ClientId, + _height: Height, + _timestamp: Timestamp, + ) -> Result<(), Ics02Error> { + todo!() + } + + fn store_update_height( + &mut self, + _client_id: ClientId, + _height: Height, + _host_height: Height, + ) -> Result<(), Ics02Error> { + Ok(()) + } + + fn validate_self_client(&self, _client_state: &Self::AnyClientState) -> Result<(), Ics02Error> { + Ok(()) + } } -impl Ics20Context for DummyTransferModule { - type AccountId = Signer; +impl Ics20Context for DummyTransferModule { + type AccountId = Signer; } -impl ReaderContext for DummyTransferModule {} +impl ReaderContext for DummyTransferModule {} diff --git a/modules/src/timestamp.rs b/modules/src/timestamp.rs index 99a1dd511a..f6fd217f99 100644 --- a/modules/src/timestamp.rs +++ b/modules/src/timestamp.rs @@ -1,11 +1,13 @@ use crate::prelude::*; -use core::fmt::Display; -use core::hash::{Hash, Hasher}; -use core::num::ParseIntError; -use core::ops::{Add, Sub}; -use core::str::FromStr; -use core::time::Duration; +use core::{ + fmt::Display, + hash::{Hash, Hasher}, + num::ParseIntError, + ops::{Add, Sub}, + str::FromStr, + time::Duration, +}; use flex_error::{define_error, TraceError}; use serde_derive::{Deserialize, Serialize}; @@ -23,17 +25,17 @@ pub const ZERO_DURATION: Duration = Duration::from_secs(0); /// of timestamp. #[derive(PartialEq, Eq, Copy, Clone, Debug, Default, Deserialize, Serialize)] pub struct Timestamp { - time: Option

W$aqGA|KtOK z>E`5mxB1D)<%Mhhj=7YzU_C$V*_I63D87D8M`zfHbezl;AsvFSEy&%3yTBcfMMs(> z{KjQyMJ%d8WE&WQ5__h>a7GY2goj*dKseSQJiD3lv` z{GEI?hJaCRc`v^c2psmL7txoi9X;6tdci!Hn9_cO8D?SIrADSo+t_EACp(g{DL-c6 zSYvV3tq|iNz33Y22C)=(n_C3Wbr{T)lBjL@lFYy?uAej73>dlYJH}jjJ2f0H%rDkS zzC~O$lqZ$!%VLW=H{mmVK64}~%BiF9l3GZ(&`dWM7k?y`3*;ML%EcR!S3m*5{S|u# z$S#iethqHJ+I{bYQnkUBxT(P!G{7J5- zj1`+KzsxF|CxwpI6e1BuqZ+MF69jlwD~bIjoiZ%GOzGe*xPc5DcZZ!)Ct%6EhKooc ze{-SpDs9DyWb!l*IX9fe5*nrw5+GqK(y8<78xV+#Uy>qEyUYoM(c0dybBGg1%p_kH zFC!_Iy%6Mv_y#+?bOAcQ*JILcD&GQ!c~6uu5Wa-c13*Ocru$qX58O)Dw@7K_wRZET3Zd&g9_xbhWGG+vmD1W=xa9+cCqvkkTYwXQIpOp~| zzLiY)$|e)q7Fb*z*O#d^zf*H5&9Otp%O|IPMg6C;?1fBxL1#Xy%J(J%0%;}79EDh= z+X>B@=NsF}2y6~J7d`2~JgiZQSL>*YOGJ!|Ew7w$AC7sfRA8nb}FoIKKZ7k;X0a;t-nYYByi#@c(*Ji>G9S~?BHUyk6% z^RC4__=Jl+>CoI?;tgroVd=lReF$gnK*mC$?(CPnoH0^{3yqmcP?w<%JphVfgd^(5 zooK+%MKFOnVS+^!EqXv5AcwiBv}p;2m(~)m-N+L`g@tj;hylDQaay(|?a=1!^@s$B zF^vU0VI+J$F@7lws(vMBjFIoPs51Sf>W*j?I$l(UTc`qRRvTW>T3{H%CF$)cMod*@2V@86CUVXl&@X!Znc~}ghdS$N zLde+GaB4r=+b~_z-jflvZwWlPY-*o4f3526gFyem_*uoIlcaa1>`KI7QKf#eUH2le zd@^0vac)ioFJZh`KDr!%EXoQzgLJfyh9L0Tte|ckoyN8KE}(<3{Aq86BBE<~q{gpD z{#l0W`%C`6Bq#RW%l8D=SDB_b zEYa7w&ZZRT(pqN;_j0EpCRs(C#*gnj>4pnZ{Ks(B7`s6$EEul`c>e%>DImJhC&^_# z-6LJ?1LCjVRgYKq7o;qQ&6t5+MX;hov2$2^v7o+G^_8ZsV>ji!zvcf~T6411q+$P> zaI@iyowc}CO?~|mde3Eg{;xk2e5>K%3>0yp!0JTZYcJ6EBy_*9PU~Pl*m}ZZsB_`D zVX(@Ws{^+o@&IP3blF?t5+ z;Xn6L|1NfG*}=>yO8m0d@4^=*&?1;K=n_-t1``jDpn?uo{D1`#PKDiXa$RL*7em%- zhKiTm0-<|MVa#i{-T`kQnF+ARQIQjI%HcR2n1G`)0$)G*SQ_;ULM9h*rURROSMvNV z?gI`kE(oh{CkzTUAg8WvKP)xu8{e9+MeoY4bH~#X+F<@<)!7(<;Qv z+6%mTheBdgdSBQf#TY*D&3YB?%SM~aS`%*^s@v>Il!dd-8*LnR&kE|PxhWGQ#>nJv zx_$~7YOC&-mRV3Vz-Ps11v-ir8X-vLujr?8oS0fq*f;LeX@v-qx>epKW^`z8nVUrX zIIWn(fcxOUrQmm_#+#ce+MT+P!jbTd>A6+jKK{$_q)ub^(h}s;VC6m$?<6nrq9aYW zMa;Z8uzvQXX1#@pBSt1*Zqe@bs*=PTlWthW**qD~uH|C9aB@L(LTCpvW}f$6`t^79 z0$lAdyfiFB2Z%6yzb@O@SnaxFnVkUhQJWrQ3CAS7(1ud4=aS;V0*=|)db-_#`pJvk z%?h`z7@KYO2EcszjHSrg&-a?%X0ofCP3c7~RJqV#Hs!jl?-Fb=1bDK}Xn$$ts#nzR z8+2lkaj)DRYZlA?whcs{#qx)mEK*sykQ($Z(4u-7*7{j_x@`4MF2m|XSy=V)bdI+1 zlqALIsBBjm^Y<(=jzvY|{h&UOnnPCGN$qJ%o8i~BmKW&G`<{pkZ`6A+s4&$3UoC!ZP@X`Hl*$A=T+_4~ASX6~oz;)n(+rZ-Yr`nlwiMXP)bMqDxF?@vj#x6wYO3VY%sQYSMZ{Pi*|q zSi}eR8?Onq33&x|9I~=Yab3G^(>8f|@LI=|lqi#|YKgLl>JN~g%4YiwN=Fw0mZhs~ z+h9(w&@Op+GARbdB=OA@tFlwqUnWM0p;%#kO3qYlW2`xLG--MO#ftH-m1oFj$#RA_ z>9^8aeHk}2+j)20Rdp6rFF!~y9Z@t(u!Hy~JQVOE26A8e^ntvnehpn`WlPSU@hkr= zrf_(Ls;7dfo^x9~rM|q=oSU0?(M*_Mpw>uYw7Z&*qw~Y9^bZB@hDesmQ+A3)VxnP` zBxJ24u^|K9{nXOlGh^^AI%;Q?f8wIQ{ ziG-FY;s$v;W5sPj%iX|ql-Y;JvskHnIJ&s9M2sX40_2%cZVPq6Ky4H=zi1ddt61SNXcZ|ETD;&Y z=GDuSczNV?`8bbkrV*kbeY!2ubF5ZdM#>J}%K}65>y)yez1+oM^ikGBqwh|qr;cP8 z`sS9mhWGW#JLfV7{4uiQI@T?!g(t7g8dzrJP0-+e)pVP^|*kO6f=1-@-eT^u%|Bt7_-wAjw2@2SAmf2y^CWhHnJr5Nbi~ zd|pO5shm1!!N#^23<%{l>v@h#k(Na?I3A0An98sdfdL?-Zdz-2t%8}g?mTRMfEY{A zZGmYhma7}`WE{_<`5y?ZKhMkp8jJ~0hd*Vm6gWi&bV>VdaRe0;_-(^9MZqy(oaD*Z z^1(-KtAjIutx?xuuLXE^8j`4XC(p=JW#3t&} zcCfHo@U6)k1qtGO-;pez&eY%WMcg@kvKeT=#!~B zm(8FXN49vsz~1o?^ZcQS+Gnl_kn>xbB(K64xs3&gb}O--c8%OZv@IUIs>AWQ<-VR| zEZNSvHF;IQO;9;faX1ihuLj1>zDz(N^f~)^7Ki|@mBzkeShh1v6X!5(iWt1e;v^Te zHF}5|+0*WLPKTX3yqH*#+Aw^)!*%c@C4Ko*);03R2qt{2d1xb=zy;G0Ab^4|{!OIE zF;$R07AV~u$qGY2t~&#~UBk)>Tnn70iMjv|(dv^U!fHFh3Ocb>@7!_;1v6i$evN3i zA>dpnjekm_dyvwga3#J3CxqA5EeFS_C4S-bB{xOXAo6`$q@b0gS#lyF7$Ov_j!0NA zTG5`nt9;xV}BmSAX@*KknV^6KS@>p^r!KSkPFPBn{599 zy!aQ-f7v{pXbrFX)c-z2{txLIz_MfBpxDhrrT=;e|C{|G>)%7mqWFK0HLEC1T9srw zZuuAQ|9Tj;Dum0sO6 z)143Dc};IeiURd{nPaCu2`40-^g`Rksxy5rlGWMoGRO!&&+OQ-U7q7+x_MW@v2OLt zBPkG_Pak5R+`%h8ORR5+afnrNH6T4T=R#-x=z%>VKieUgUG9qKuoz}{D)U;Q5Bd8E zzW49%Va|_1Kkdlr&j}Z&N{_bVY~a(D-c0Er4lN`2<`K>d=0zUeRYL>(9F|Suw9i(j z>MqrjJ#;=G@4^8NNl_cn+WMkbLol6AamrGs^^wVstH;B_!gdACT##ciP8~~4eOI{^ znk36JA_b~ae5&`~*IoE|x=^t?(Xf_o>f;;(rx}LEOi;s;)nYB^BN)wz z2u*v7Dl%4j*dJ|D9h6x>oIX4GcO1aXHypt3Rn9#Kuj3bjH;Ek(Hg6e3y}g#%sapPq z^Uk}WjDcB>%Aq#)Y6Np}B{uVbhBlS816z9uZCYi#0Kt1Q)9(=N%0DJu9`tP{t;*aw znNe4gBIZ|(tm9QrDyxx=oGc=Ss5MMpOwj=d41x3QWU+P7z9{IV<35>^^#c@d~VcuP>z{6jUx5tVg+&c3cR6h5@N`_>AYMp zBp{ZGpkY`_Cr{m;(l=9s+SH|dl{#v(?i;%f8cD!2FGSUR*m;q-F3%w^%RJyY*TvX2 z2+attYeyKr>$Gd)(Pmg3a8Gmwp02^&;+GDIy>u@!T1mH$qAVbQ``POg7t_&^T%`)eSC0l#>TS3sYEtrTJ(>)A;X@SG7;0vjlH}`>GxAz|&QDz>*hxFNXvC4xJLN-3 zgR!IS`E4Ci(6Jtt0z=jHTFVTxQR7_~d%%t4;b`IFfsgUpI$|WkryI~4l;Sch zA2-yncuhSpR`M!T8&2Rpo&y(&FY9I$b($x?-{{okVXEcHaQh<=Z~Dr&I9miH&-z0^ z&SVmedms(oosrE`;d9vjeAzrF%}t2t8J46RbT`iAhad!ZZI8^I9blf z6pwaOgo%dd(v;^~0AcwyS8$B}GOJQ{?7h{mY&}gqS__>dwW4)>O&xs}J=3Ew0bHHE zVp0JH$jjtQI>i;pr*>m$MMKfmqm^PR2gj&*ts?GpCJA-(XuJU5g6>im1nynwqX>YF zrd3f9y>}LXC);IjSERY38NJKMJV!fADny&r-B)r9IhO~0(buQjJnRjTJdcnr)S0CZ=>E!2>ffe@D>hmSTs1FNOl?CibC-RBr z6m)3Y+f5EbB;0E<(NBE>)wUH*GhI+|R}Ipo_S`99>Gbte+7>F8!H~|ROR9-Ij+eEt zu=xUrYc0^T#L{0t1rqecRDew3LYI!Zfq?^-xR{6Uz1-(0!b$0HLD^3|rZFzn_>yTp zp2Y4epc5VI{xU9T8c0P?#Iq-+#3FzW{Ka`$#07;X*HryXtp2s@q4ei@8NF7pUxFTcrsHP0ysLcIZxo%UJUg>ayqtAX0 z5dy;i%66g|_ZrtuX6hkn01JtHNU`DJq+aU4kTC_h-0F?nd%FJK;-vO`2bK`B6V_oH zh|em7zg&yb-gOXBt!eA$ml>`PI398Z5=4G=;KYiivMqM9c=D;mzn9A>)eECuT#QZ< zISLsGWp$J?^gr+{>LMsdx5Qhe?b2M;?5sRlUAS}_VuK~;m%p|!ZqhqPhZ!sR^w~Nt zR6=eIsJb~h+38mCz=VLt&vciyVx1Ifr67(u&iWk>l+u^dS6U~pUJYfoUFr2dnU-*! zD4(V^zN8c;vB3Zb(EI02EX4(iNMJ;3SbusV&9C8YTDMD&1j+Ed!>BxcGoUb@!IfGV z{mEhg7%OIHe(_iys*mjGYs_pi)vo^~Yd~Qc7r5$H^>|``co2A+90i$c73Mg>=&AAF z?hv(#2}NVtfqmgKBJ9lW4K~TCf8B!rS0N&Q^?LI8pHUN(KTs1wDtfz_!~>u4F2Aef z+KO`P&8uY3hkilXEG(;i{o76T2M5mFb3gy2HxNWD$ZQwbKLU_}*BQ_EQQi76a7UbD zDvWuj_D3aY>z`Th3H6vQ;FBop!1K_mz}%%o(js4YZXB2@;P{MPZrbE*3xCVF7$y$3 z7Nu|KK?;jE4u?re-MG_df2q>4(6SbA#;2{C?T1%?tNNicBo|Uaxi6&8sQH(u=AxLz zr`q*j*~U)9MWNai`dv-&8)Wq*JSjmqgQqr6S{Jti5BAoC`) zdzpp_MY`3OJC>C{iJ#wfu3C==iWs6D$cERHi3h1F4^OclOlH|E&g zrQfD4;}`Onfh-&v0l;sFT)vo+ke^#G&^(Gqu4uOk;KXJ4)PB3^JgQ#^&?xO4LXP{{ z3xPL#Xwnzz2NA$iLTEEZSAf}mk0DjlC&?905WT$S}_;MHSD=* z3>fLGSp@>o@=5HXA!71w?Y0xmupQtK#WSPQp^Mv?wC`{~T8}H78hxRn|7@rzQC%u~ z!8*-WZxEy#5k*-ZnX(MdzQU2`jQQ=Qqhyjxsz#t@%l^dd%~0|X9-aK6{-b9}lR%Ll zxB7&&5vsz4hDfuo?t$7dlO_fTDY3 z%D8w&ice0j2|ZCE!8RxQq?i9x{VHTg=VN4HpM1Rb;qt?A^P~Etjd9T!r*;dE@M(T8 zL%>nA?VXXz4xTC2jKgc@FoTT#)A?;-dUuZhU)Udc^^Go?>XqU+#_&vE2 zWdo{mzn}#|R^zwLO7F}KZi(BWR3}lo*(nCS6TNdV@9k~x52`l-bIuuhT4&T2>`4Kx zx>e2zTx&80h1apb#^3(-Zu|Y`aL8KH=1%ulwywGvtgqmow~qar$!?)}^#`3-(<8F1b9yl){T5u39_EV|PWs+%62vkBNJ(U>DU6+X#Q3G^`9GJ&O8+l~cbzLdCtUX| zm_}4HFTf~o`sc7+y!)3k`(yLa8Y%W9@TyHJtnFHri25>21y#*>xDof87_x8F#+oT( z^2LHXVO;EI*U~QdWkDzBw8QtoFhPiKQRK@bGo}dDKJ@twHm#d~`1_CR{~sVBlCskF z<`2U5SI0XJ&DpaC2!g+H*st;a`X8^x-}VR=D!K#?r!6fgBUNgaSbFvyp6PxZ%}!ex z#41+>?8eizD>6?bZ+DZNqCroEFa~cq^JCQ@5?1f@04+TBT*_ifZ+>`aqOGl`4J=T1 zl(~82)xRm|kKKow_Sx$!hR%H>1Tve%1-_%>w% z6_jTt!A43ap6;BI5sg1_bb_akzze#!E489lGQ~6q!se;@GSWDFxL5+pgIwfaHo76Ac>BmW|8W zV8n<|4dK~#OyE^NnEn}|#1;m+LVG{);P!Mb_K22*xBu94KzG!rb%G)EQBjSBw;FPZ z(;0@kFEf*c=9-(b6s!=yEhEXRbJ9u6H{E-sSOo5^{&DPbF?DltMSkoMcrAa<%B9a@ z^rQ^U)&xFZ7kdcLItaioiHt%m$&h@HJ&ez*kbM?WfxV|O_48h<$uNJ-+MDg4&&`+x z4UQa=T;K3G81wt(%gg8MVww+p*4vJaJk?Cm|KjMu*4h2`i{JE<<=a=^=j;~`cS>GG zoj=p`QgF+YgSVvEUSWFHVvZ&&am` zBaBC(pM)sB7UXJY3F!;yXxmMSY~*&oL~pK^(|il8?}eee%&zo2v@kpoV^45NdVD8O z&=bpN?+_qj+6#F(T*l}r$r`@?)==^VctMw?cb}?SsyhV8AohGIlT!(0R!&C^%Y+Ry z!s{;Jh!1m%1ltRAK~~;AS=3=WluA4Zye%GD9n)5)!C6n#{Q)okb`=GBb7*MB(|03oRtg12`}KeXyW#9t0cReicgXYdke(Xtr~*ed z9V1NciMpP@^Vv50Sux;KBRxhH#)d|mZ<3LLPiPx|EY=ODZ4NZ07~P1nt0*+{?-xEO ze^qO~;SETGSy4_yK>C1wyNs~8G|9n_rBMf93LSn>EGMtv-P0EZP*!v+ZvMxE~ER z&UmJJme|diO4$B|+6(DCT@HVi`>Z${BIMKS_O>KlBP3n5H?`WFx!vYf{v|^Q0xl8e z=Zk862qJ;9K$t_t`sP4{J{N*-wHMUd>xF8VLR&@jgr^(Zv^_gGUiNsN>?^eGlHaYz zFKPTfv=^i1Vz2ZOlH0r3>1VRM)6jeOAHyiN1CY z->gz$D5;SP%}S`v+p?IU9qmyjM7YGW-wV6uOxkcKr z5YJ+yhCjYXj1?m}x%gEMP+P^7cuT?~KDG>OlqXW07Uf-iLemFKhus6BhXRVg+AtMN z|A{y3N^ZA{Jf;$#)5FsWv9wWzeW2NlPq$$xmI{GDclv!iGLHZ1+`Z=gONEPz<)&&Y z)7&4w3jj!Y*Q*Gjm1fU?hXyUxlovp{DY>;_yYF$*Qg=vW%l9l4l0$FVC)a&G9T6(HFkRGr2%*N6aLy6oqj(E5g);08O`g5Z?T$pJ3*9H_D1hN96GntPR!W6J`)LiIqoLZFGeQpN#qR}Y*;W2+5_UXF$LcsCn$WE{YVL+7Qd?9ZW*@uyew(_V@3Ta9YupnCT{-RflbD}yPVDqi z``s^IwIvx}?!K{*?sbl|bLLk0Soc`Q;E=^MTs4nS-rY4Uo)}9Y72ZK|aa{zU)1KRf ziwo=o9P5jZH8!B6SJ^IY#4>y^hI*dR2Q$W(P@ma-mo25IP#Hgd&L+vECbs*h5}alz zDhR#@+JiwGh|lBQU5<<@~B?c-fAZ*I}3yIKMIk+PMB7zDxeTL(dt=>qdu3wD*K#h$eoSnYj--kd$aWb&m#J z9L_;!BCqRCtQWT2@kPgv9)Iz1_ObNlqQf^Ll=wM;PgdpMm{1I?DTEgRvKxoGPLKY= z%76SH2$>6$2RCx|OnS^7lx>7~E&jxock`P=7F+*r9_+FE5?V$y|R7}xfdt#n89*zZP%%MaBWpRqLgyGyp;jeLGnYI&0vwq+u7(D~sN|L-($D^GbL_$4fb{?7Dzh%$zc@c?@Rsw` z|F%>4AAj`^vE&$Ulpu!#yp*bf+VIifl>AxI$+z9CfCGjOt0i#Py!D86f0@D^V*P{S zOS>U@?Y*G8Z%*g8O1BO3nu5frzPOcZH^mZ*FR>l^iS_*b=a>I`@Oe$8`GTKYw_X|) z-N~&!SK^~Gj_c8A+;3bQg?Z2c@4UVaFLV&FWmP#MN*6f&0k+?-K;ay7dwpS~U?%gg>kKO_cAC$2t z!tu)wFQCQjmwsit@JI3gLZU0LWTs|>y3W^S+o}T0Z0b4 z)6ku>&D5|w_JL5eqgP7(G&!OtC*4%;k~s&*X*<&bqgj-P+^^pXQ?b8-uV$d8gXwez zbBH8*SyEwdn8T)ZfQtpOa`tUrmY|Q_UUO<86ey&UiFdX1i@nIt1X8opF-(f0DTh7G zUP-owBfFNc@<-MGGwUBxTCju~Yk(EyRV5U?QC>9_60PAJHIrOol-y>@7RwkSOx#DfE52{F3|tUJpcFF zlK+!u@!i*Ihs&iR|Cj=#Jp^_<)Kt2vq?QRUr>Ss^Ot-4~5H=T~vuM!%wVe~8ZTtvi zkILps`G`Ao7=*Tf@flApNWckLyElMfS=>|<~;Q4e#cPwE8f5;Y(%#=yr=$!UhA&Me(K zcx3SX&!1V5D0|so*-}{Ftg0B<%1yB;-ZNXG@>e)khQ>3$s6201`pPzab+7X)+b{o= zH|T9(_?eI|oR?o5d+QQBbYe=?Kym2K_Cf#X-U*9)u)|E<iFQbxPOG_GHw#XGhGc-{4sW+paSlU5XR>BwCT`SCFH3wbT z&d>kX0yqToexH<68LgW2Na{XDaxLj+XguZ(a1T$QtJvC=J8x4`>Ser`a~TqSUN^tr z-v5cjk&M5yGd?>Un}%;R+lfDx%{(~gIdH(c#-_1kN zUs)Lrbx=W@(4r&b1FB-*lM+Gw)>$m(th%CADW>D57QT+9M(rlIlyOw#PecJXqf0tr zC7I3#_H%5dELZc4+o^Sb%nCaN25A8 zXHYIct)PNHjm(^ZC{lW!X=f9hye;-&2fPypa^Y7Ky!w#9(mNh#8|v-!<7;oO5f^hA>gr}dW)x#%>X0!Br65~!<61+Oc? zs6K5ZATI1XiNC93A7~8$s@Hy=e-58Wt9MkNOzuUtTi8OSdO`qUZBaz|<207y_Hlgp zBxzLodFrNX11q-Qra5}RX9!@5*~x9d)fO3dtJ~UPFVg9W7Bag-pN!|{F7tH*7T$p$ zI6u?YY1}k^rsZEk)%gO+WCUa*DVvls2MNtmTx9$xI=J#4$tgp7OLmRgyidrk6!6Z3 zi-7fmh>0hgNG7t}+FSg{o5!F3;d=bt-948L`G+f~<4%IbE3LDfbsCoS6LAz}-B zg$V^8^7hJBakFo+xPvCZ5=(==0Au))Qz*751Y(KSVN2NR9xXnpX_37O=IuzH#W~OSWSz^RxOKxiV*p z2OSF07xm$xF}_7i>Gl7N@9QkU@bo%CGE%HQkUS(4>eB^Cu9qcqO0l>#Z^VE7^%m7!&MHFPXnz!0dUL1-56pdWCjAg0h zSmj7J(^|#kFWCd(V-CejT?M!w~6?Y^<2T!9jcSy5M|Bpi^fjmu**j-nZK zU$Pg`d_(K*4@ANyzPq<6RZg6JfAC?AQN<_qhgt$8NG`U<4DuqEZJzx#*MT!6o% z_xl`Ik^AcRUZB>i)l?8IZ!lpN*9MdFEF>;pRV0)9^(d5*+#2~0^*b#MlaekA3QRwG z*zAe(htB-^$D8J4NX_&Qqc~A`Nkxg9JOzqP$cw}Ui?!_~e%@8}xJX_28UJX#7sz+L zSnjhB3Q%B7fs8!+al1|TWtn>STM(cXTF50j*#=r2$qmOIHA{2t+8s}Do~&B1c)~1C z64$r{2~Tm?Q@ZFYteH67c@FFi)NB!sq_0+_mcBDrV=0No2&2eeAN}L4fB1=yJ~NM| zwzBy6bm7lMj?N;)w-exvZfnm`39bI)6&If-{A>b_a3!I(w{bX;m-~nL9)v60cXhec zTMr;T)%xiw_WpDxcqqB3W-i6XR)&l+{K3zXA?$ji#yPwnki>&&Lo?(G(JBLUTH?0# zazp#LS>SeklzRsDA$jS66d%{id|Cj300^ z+WIqR^?Vh+#z}&9+wiLQK#6{-wDQLyBf$JYM2wOMmrxnEcyzTThw*}w+K(IkXdpV7 zHBxReMa^5dI`$#&E$_>-xIZraKi2v^tdK`dl}q-NQCG&Ix;j^&lu>WdKyLWu)a~0# zWShB&L>66S5()Eb!at1up87rntZ)2%Tp0fa?zH{(>P>#O_P%eqo>qQkdm*ODu@Rs4 zk08o6lRJ*aC2S|lzU4UU-I__JJQO~7({21cX|Ywcf9}iXesGw(izb<0YH#+7XVU#S zG1WiCQ6BM5#L(iun5@g(JevH?)BlX+M?;wc>+QuEt;>IKPo!?kv))Xv*an;beGQ8i zj1|VCR(Ttvb0&d1kS z+o0F4DPP$PhaI2Hhh>JQZlW=DTQfI4b~%yEb##s@IFD%Y-W&fx_)K=ghv#?%BQ&%@ zo$V-=bJo%b$ua@vD@)@{eeZ`X|p@?R~W-+kFaMQ%jqfB4@i_X5VVz0rE)DM zR?=;v!TeHa^4v&DQvj{!UIAE6aNf0dRb5%Vm;h>Oi0B_KL!d+sG8cKEA~~u>^PjlN z2{|*K>YVBldE$~62c2={8U;lu_lNb!HV~fV5yrxf;WchifVcNXZ%1*hLMpVeXcppQ z3gOTo)@sj1nYt7Yi4Y4DmPomP=FK(WDBwvi@! z?9~KKxW&Q#!Tq0?&zs<-``Va7E(7%9=$L|$G=tt@cj(4g#@^g;N>hHZ|Pd z{8C=EXn~H2iX*A)oqx}HQ~jqfx~yGrF$QqGZo4}sU0Zn95@_+>JJLujJqKJuDaYg@ zD5tyC-&>z;U0KcQwc7;+9QQamNX`yXb2SFN&WYh+OuR!8KyTtZqfENk_oc8uNL>oG z?ivp&U~1~#Jfz2p;wP;=(@wxZon@1Ln`s@F=KcV=(E~7q;d8B~my_Q`1#>}2D*_iV zS(mD8XDM5ydzF>}*VOddJsmQl#B7IOr2A|dAFPe{e5kSOUE6*(6;0cHr@`V-2f<3* zgIy}tK+}Ll;mVe?;uE@hSkVk+Gr>F{d@nX{&E3?*#3#>NySst$h@ER$TL+P|C`{S! zXahOS*kQ4_3Re|Mm}zVKmo?HG+3pYGshTEqB+qU*CdEFg_+5F#1)MZ0tS8)R?z3_U zs5w@DkW0Bf+dfA2VBu%K24kyWVqqa6q_t00W6pw;QpT6u50=wwn2zau#2m>@LAtGG zhkgM#Ck>UP(V*|uEUkDa%d1{&rwHH;)rxS34?A&sg8Maq}O1XBn8(V z(L#WsxuNjJ5Tc+QNASp_@rCtQ2ea_FLR@iOZ#|<+6=x|?GjcR~l6Z8}*-HSbF)0K& z9H`)~XL!nL7N9ig;&^#YOfdha*rd1x9=||K%Qk*&nH%TB*w2l4d3pBk7jcWO^x}D! z#5%=IeRi>u8WbtrW3uK=E6`fTIymje*@Xypi{H`nJCmN}PC` zY;(SN9+_mVaC$?{S}$5m3`Mh$C|wa32{m%KxG@w5H0+UxT#wAkszvhK`Nr}h^nU3s zMv;?sT+md=LvoRs+j`m-hH2u|d!~G35?da%th=RU3ll`YY2Nb~ue)*lzzF$)SX6{h znQI~FO7JjStJTa+&K_L3f*S>=8YeVH1J%a*qLp= zD`gEBkrur>>dFJBH1j=a!M&7i{|2bKx?t)S)o*a9vX#i$sRhDgFlws-uj&_~?DS6Y zm(2eZ6-qNGLf!B82I&6EQ0Sqy0fh4FX}nS8ZbL@ixR0P<(+4O!SHpg@?w(TF6dU@M z8ooM-mk9v_O3kVU8s$DJNb4Hy-HO+nJVoP6ZY2wPc6rhH`xbQ!@3ek))W)NENCE;> zF8(*ZmvUX^3&91XoTT(;5L5mV5>0s-F=OITUL@YI{vaLqR_aX>tCtgr{D0Vc@3^M2 zb$vYQpkqNrM3iEoN)Qk*AOXZe?;#1n(3C2nNymVoC@4W-=pCd65{i`2iy~4&lM+g3 zA_NG%ckml$)H}{ObIzQ5Kj+N--QWC^?6vn=@4NQ9_gZ_E_kA9cdR=XI1(YGJ9a!Qp zsPU}E$su;aQH})1WK^FK-mn%j2>!4_7{DvGoW=`+pQ#z>5D0GL3L49X0h(te;!XV`d_csJA15 zSDqXe)pT=jo%(S} z-p!x9@=`g*WZLV@+MGQ{*`aj53Mrl}6Bjk<^bE^cL4~GqF3hT+bH|gAWDh9{Hi4I0 zz*@RAt$TB zOD+VmHMpz^yI3S_%_TCcK#<*59bkndM`OOg1J@bFW3oU2&9@!#a78{N$Lu<$Y8%-( z+^qU$?33UDKB0?XpL{)($@Fv1gp>0XlD=GO+sqrZbpT#E11Y9cZNw(RRD)AToc9Yk z$BQeLMI&+M3eVWNLxLtgm~)G9VVyg)CDBP#CBDrS}I@UERhgZyRC|O z5mN$}qlGH@c~_^2rwNDPBTFJTW?k-^o863D>u$v zrTB|?$`4HX1yVRnk>xJNcy^Nw2M7(@fQCK-xjrTpjvAjFxNtH|#Jb;ty$Oa}Kqt8f zEnurL`T^|qTBZB}83BX70wUnE)&|8y8gz+4{6!}gVob0pw}R}@GUGwY?s>IF$dQ9l zCYJ8}{Ks)u!S_q^)f zb9sWawQ<{!^?>@aoyV0LAAfj#VT)sKb^nIU^M6aOlV_E!5v(5h-6T->Chi_^mj0ns z{>?Elv!#%19bBaS-G&NqM`r%l27+R#xE{ozpiHt=uur&n>XG)AC}byns6ivt*(`Yr zQWZ=)dHsLiNfD8?DYJ*04?+W{o8?xMJ-M$zwJXz>*O6gYW-OhHfS{|xZWQY?z82W4 z<;n=xL0S(m^0`Bx^#FmNdjMr?N83%F8}wh23MK~EKKBN!rmVZZKizWvKC^s>XWJ4j z?ClGp?DZwk3iFS=@kgvA(s7af&r5i{F-}ncI^^eh)B*5kVx4kE&pQ*?Z--f1`X0}A zHg~IPiU1?W7mTrSVJCAn zPRxkNbCN6WWrT``W@`vZCzwp1ITuA`Huq#Q+6-H;)i~i4p~WLC7{{(y8LKJ{INk}^ z!+bxNJnKi$l{?ZZTP>|SV2tOh+7A=}nH4pKKy^Quq?ag6Qy^}{@`H9S7I;Nk95dp2 zUSOhB89KF9OiN9pF__a_+SB-C8z!jz$cLM<%id|ZqEU~!GE=-tqdHfF#dHk)(#)~Fh3R%G-%#HDUOWvBgT(EfQ|T^7Bx4)zRZAf3{+)|a}na`Jw|$%FZf zZ2ifMr>k9q=<7dP2^WfiSZb3%>2W{C9w)jgFX;F--4w)XAo)?}AW&OuxcP|1tQq`% zBv+dl^uYV`m@Z!7W7lbv#&m`TUEG2pI9Zs2T0W!OhW=*|_?%>mIJMM!LDXl`r{v}( zh#B*D9(R#6Dp6obI?lWo&L*(I=ai8R0pfaR`CG>GURRCP!sRMB!}Em_M%SbR+!7dZG0U+}ohzOjHJ zAA7Dp6fY5P@*y4!C&RM%7<5M4%KApiba@lQW{u5kn3_Mv$uMMKuWYk8W0ZAxg0(B< zg5c{!K?bnY5AV;cA|0p1h63)zkOF+Rl$x~V@&40ph@(a;NuK~@Cg(*%+i^+UB*B;Y zVok5y;>er#hIYj(jC*-*Pof`6woECA`4{%8SR|QpvnjEuO?<>v0P*dq=%tqeF5nE{ zh+{!VJuZ8UB&J8%)n>-NPSd2$kj{5 zYMBynL`BDw5p@fUzBtIj>hR;V4Q^o z#ir9fS7&rePL*#T_DTwiy~Pk6ti8}Ii3X5VJA1gSx>Bn)_Lw}>8r+=J!8WW>K5^KT zoUL4_Bygo&(6D(JNuk2!sPlaB?~()x!6`V)l=O7e-JFoq=8qXu3h3rvG+tfagjg>a zpeRF%9r|;yH9i!#$xq1)r~B_Z>){t-BBtmCU%pgEdOJAl1sM0f!*6Rw^j{X`pSNfc z?#4&xp)Q^5j1ag$t!eM-d#N0C!Cg7PzoId^)47G1gzqh3LX^(g#J0T$(+kN5q6mfs z+d^6;_bXKy>?We?ALx}&Vp5aChYaQGjlALd91IBs!{h6=^{H7yi1d)KDC2A zUzK@SYEiB!IY>;n>gv?|Ip#P~dTK#aG)`MA4%d~;AdVa1uX_D-f>$K9;#ypO{8q`_v1w8v@iUAM)PO zyrCFeDqr1cB+(CNeD?#Q7mmexeOt4KZLCj_0U?>_I zj>I~n-6g<#pEi|KE2#R}qMRCAP?oVXlGfM~84tYtF-yh7Ps@V1CDoWmP7ixNZEo6+ zoMQ}Pu5mDs1KIcBaE|ON+Tl2ffu7ipNj&YF>G@sw0r4CQx8A97Y09|}MS{a^hr)WY zO8>$BhBx^f_Z5J)a_Y}qBa-)Y@7eeSP+zQbYj?7GHMM08O*qREW{q-4k7dw{< zv8fT0Av7kJPE4l_vll-je|hnH`z#5wr9>EYz)?p)-k z0tk8t6D4<(z{?vgc6+0n>ZetK;KyjwFc=sv(_L#M(ogBy`>~5f_{QNO1g*U6d(0EK zvc>*kWCgwYW(mZF!|9230497uiCuvG%9Z!>ktUk8FN~q(z9q-}yxYCnlnKl%WV9lq zeFFq2TnX0{S9c-SR$$vUjnJ;N7_||m+)}U(d6ewWY)`CEj9KU^Rxj6HF%;6?G#98+ z>bN##u+n2SYaGmFKRc^C8DBEdA4?-{y*I0tT@>>X)$GDKj&}%FoEWRd+maeQ;OdN@ zhcMW2+=o;5C4MKCmY9E0vH3GNB3FIm7r?~u8H?1RBM$+N`^3NVf%*p0|L?);|833w z?P-zQ4{7bP%24EV(--Wer`;v-IVP!q{VmsjS38~Omq8Q_Z0qMy)yKgW!u-mfRrdAz$D;@2a&2=iIM6aDVYbp#nd*k#wMCbU70~R4bqH=zuf| zC2x7@V)#vb@vX z*|Xint))`xmVtz9Ait8aHwk`BKfFFuKXxtVUE}CNq!m~>4NWSWJdjCJckVDkj~{-Y zJuxrtW1y8l=h5N*dJ^h^qFa0x#iR9MN*;Uw&d)CeW9*{#F zV8j;WJD>{dP7))XZY^#wFGQeuA6)i+^tbm1`09T|1Bvh2DKJ=J!nV)8TMkI&+Zgn; zO=ujovCE5*Q5g;!8QeSlo8o)Exf}(@ee*+7SuJRr^SSm2yeB5}*ZC#7fgU*0hnTGc za^DW=Y{^s0GIQN4yWADO4p2#X|7OOkM^0L|J|DHt@?t5&jS7ejc;o%^ic&wp=lyw6s{PfA?EkAm8kn`bQ5TU#bO$BrblU3ZQkO%z-2 z5C^@H4Iv+;XLNP8+^gc%7MVY&Q=!9_w3W1dgonm6o4XIh>m?CgXNQEwL)^W$>D%1} z+*@Z~I_G3cR~okRY#cyszU`xIFWUZ8^|(~SJ11(V^C)TMSyAK0=+OBYLmjtYq|S>g z%@dYnN*|LeU#)I_b7d`7k++#k(d*rwevdi!Xs={y3+ucF9RA_qNY{(cUU}!g z&fcDzd-SS)?WojH$8?}uG48buC#X4Av|Zgt-#z7%JomC+Q@~Vw#pQA7%c)lEeoZ4W z)$&n&A!9MiFCK~C75tjGKd7}^#UiuGyxq|<`m4n2+3R1keE0vuSxg%{ zesG1Uuj4Fnc`+3zk!@@o`F7M!*r5bweN0XNY%UEku={#_w!-!%QF4!-eW z2uFSC4-V!yNE+mG2$E%@4m@6l)W9-VS@bvFcy3>sEtGD|Pz3Oy^Av30Soza2X$f*md0f#}z8|o7}qYlwI zVInICAaL0C4Q<+f`ri|+@bw}7wNCo8CH_~glYV_hfBXNp!_ea`J9&{WSG;9PikhsV zJI}ttu8D2<2Xl8A^q>KUznAdMhx}qEI(!b^81@OEp@AqurMxtG)v;uy_Kg1yTt7uE zb%?V#N8`cazw6}mN1%zibF#68?_u3=8Kd}UUKRe_SWrgT`f>V4c2lb4vas8{#d&km zPt^0^CPz|(2YrdN!wsb8p!D*4i|@6NydAEgRxA#as%=F z-g2!+9+MvLNftt|u12P1m+5jBbqnfV5|Pi8gY7~geXH=xWn0uL3{~%eT3aEm8$vR? zt3;+)245Wlf}j{60q0BE+e6P!`+&J8T3hGk0deNQBqheYDD{gKDuY_mPVuoQJ@^{h zjJpu#lVyPG^qtI>MU7)9*~9Swb+00mZn7an@03@!3x?(L;HnVGG|HR&ZGYOgw&{x*?pabuJ472<2cMs5djoijg$NE zUa2oZDacke?l;pWpr1;N@xD>LmLX`Mxd)T@2GrxoEak9LVFxv7h%Y!W&hg^zlpvY| zNMQ9(t(GWfOa9s_71Y9l%@vuL&vb+Oz_VqYCD-3*xAiAwGS66^ z&8WOVU)g8yQ=6D}E~fJ~kDR45)MCmY7Zc;l!@XT|;c6yuK$ z-5Kf2syUA$Dku5XqmoRmKN3P6=Ylw_0_9)`9F68gZUVIdh@g{SBN_yD5L%N<4KH9) z{hqyJJ;xtep)L=)UwOh~z%jW@H>vnNNkN(|i)_+l&kqN?ti4LFP{!WwEx}fJ#p+Zf|G?D_ziu(Ts0qK-**d{0I2z4i-J6 za4K35yCDNY_vN(S$+|;w7fUde4X6DAPoH{!ONSs31ML8(HQM=PiwQPv5cG1mNI308G9cZt$FZ0G^-qYuaPhij$>UGfmie$ zF-~@4S##tx7)ypD5*b{UnGEy@j$#$upy23_LHfXOkh=g`A@dNvAU)#^ldkvBTfX)K zu!ph?HF=fy`dIAHB@B$U4|8XN{N30BEJH4i>Lp=s*IE^NsIWHdkOShUvlAWov}Rzv zTnhYc#N2EqCe{dR8x;G&hyn4GW$UCQUBC1BOUK_RNX4kMh{rTu<+0DQaoux|4$W?k z#eqbq^@4T{PAFiO9ToYuV;Q+!en!dI7Sq7G{j&@u4ef zyy>VIYrkUI(CD8H=qR)x-M{k%=R-0j&m?fL^MD*>ug}nzL2_D8Lh7DsO?581RY^7H7ZMRqHyY37^YWt(pWR}2riXfH z@hmRC#Iwx*^lNfUEvH{?N;3R09+B^R_J4os|5(u9?-=ZVHfcHzKZSQ^_7xP?)XFQp z*A`3?JC%}SGt=TBlF?s;S>}bTT6OJ6SGg@@e77v|tuDi{Sm_}9krYbMjER*sYwTJ! zLwcAH5x0DypD6!ow7M-zaK1mADeQgY$%A{v4hXYkDM<9rMwcy8_BQmf2lmCdjH_-a zSUJnsSP#XCQY2bicG4>H%RKZvUtM#V0nNnqEspFw0*imk!JgZ*Q&QyN*cv)6Be@#A z+A_0YxBGfysGz&h#kMhU6)-XIEr)ZR@#@&XM#nX1?DiblvQM&zQn$5l+@r&wcc4m8 zre|OBw;Wnn;=}gfsu0O30mAaiiNdR4&0b~|kk~1)u>0l0!OZ_vi1*4T09T=aG*in( zoF$60xV<#kOnr6MXaV*XdU$BBMu>KGi`q2bf%It@e2W4{cvb{=+DYzDE?JZhrp~C? zX2ib_E?I_mHY@Njx!#2{9x9>D7cuw}h-O5YeVvM3>cjm3FLR0q#*9*Qu=fM$0ed@HH*%eEhO=I=LH;pbG_uY1y>qHHom8-Py8;Bl z6_gyu8Q*E2gjG}kRm4p*$GjjEJs7!-q;e@tOJ^(ONfSOsp*V23O&aUY z4+fzW`Q|$vp@nLyWV~s6AEQOWknTFj+Lu z!yt*57I-J2i_HE`5IGR{5A&(g`*>~GIo(3Z>RCT}r%U)ZKiWTAm-E!U%uot6>h;Wu_(|z_G)%~ zH}hFl>KQD^KP`*z1KgV16l_gshPQ~|o~OqwcoTUTpmuytlz@0DYgROPK>vPeOALeb z-M5^ri@CAck`umNFHwtH{$|;f{Z}8_kjp`gUc3@w*Y>IW^icZ7ZB$N~_6>H`+|H%T zV%o_u$LzZ$n&tX2(*!2h;dFxsSK4ygS*+W-aM#9lcF|e&Gnqn*y795R)i?)rMS>$* z{f*94Ib&Y4wAPYSu56x|M0A%mXrdoyihCzFcaf1?I;4{N;7+@^B#fa0Oc6KMU{jMP z^g8HdSKAO%;@$57+D`U4C3&0^m0VT zsr$Bk{pr2Hnak=^?EVj^z)^plfDlPV-?%MOrm9^(rS>4}7XJeo3ItT$)(kf;fpSYb zzRI_-2xG`r?a;4nRHQqSH{@6*|j!j>^=VdMrbjEV%)O?m*JLw0n_a1(5`EeV%2;P*pvkH-s zV{u}90)gp2I9AzzH@y(neQ%N)u49IsjZ-h~_lxV(d5f@sN?uGa3p+U3VsPn1dx|y- zYgh|a1b9Q4?4&-&8q}k=t1MJ|;0DR%Oy;Kil3f~1iFz+Bnj1#sB)`+9fb*VfJ~+|g z+%*Aq>+EcUA^KCImNY~mbJdDtRur^k7<>v2b(6QK(74|w{v71rlDZ;ZtLUq~qTtMW zmZa7YKyk~Q9zXb0m18t>Lcm3?ffVEzkdH~~L)Zq(d2hdsZ%B3s>qQZmrQ6Fqj{t2y zK0kw-;427BVj&i~8mFa|w@VW)%lCuy2PlxMJgdSkj!7#)c{&y=2a@b(G?4XU$zDHB z@;s)76aBroEcN0p1~}+@5A+8+OQ0kaDd<3LEiITF)@mLMMEmyU_lmeE^<&Qp!Ku~4 zqTYIEtUM_S8J__A^H&@{+@VZq^xazyV;}Rv#0Qtg=ZQJY%LY~GWH~ePdH1K2)H7M# zOSzn+!aKApc^u}_AH%o!3Zg6{&P)zkZ=SC#Oz22jy4po)$TH^&Z89#5Bu6`1mUfu- zy&j_Y1jk1NO<|2Y-ks_;Hjgn9zZw>MA6f#Xg??8s?Ht&JP)5QNGG`h$Wu|ms5{ky` z6HXn6#**-NUuxs9bYsGy3&4%*RJ4M8@A3}L$4oaP&S?zN(Ot=f^yRWQokiq0Rlr@$ z%k5ikIC|6T3$%|rrTAnkD_7QHtr`k@44z}wI1$c>OG7W#c%2hN;$NnPL0RoUJ(I)> zjT>~Ev7P69(suUmfjS2i!q68ct#UgQ*yG-d;gWo=^k7uwG5)tfJh|UFVaLdsbdf$SI_zRrFob&1Yzt zR0|vA7A75#N!sgkDC(}hj26XzvT5~}%dlOi(Z+E4wQ-!Mgza=(NR;2S>UJnwjiHhG zXkxhWTfB97@lY%MjWucLMx&>R`+Vm)2a-%|(W-hwliVtghw@P)pBMzFYt6dj68-mwLWzauw@q^xytgH${4j zMsA*rc2Jj2W(Twq8&i`V#pd4|lrtQ-eBa~A7{?Z)4EG@!{ss9#6(L{`853K(tg++T z;QL}(yUj{NoN{X>o92g0g3*{vljd&TBpHUq*ajR-LeMgA;ES0wHJPa#5Kiux|?t9 zDe@2X$YR>a2PM?D4JZDviJh!?di|MNaJO%}Rk<%!(u@lqkTfoG*8#$F+HcU(uB^P| zvb~>eQlXp$HZj>N{N%VD8$&YdcED4S&ghZk_sdtRSaKNys;Z&d${~ZxvFQz`1JYZX z{IyFYi`RfcbV5q+znds91>|*nje8IOHirDu5B{1c(6)j9$?ggpvUeAX%$YawOjLDT zGAp9aYz_IV-TBAQP6|dF>@BuzE;W)56BT}0)**un%FHiWjvdeQ5kxgz{sNss9o>nyuS)+h#iJXHa+^KUI4(cAHRB>sJ{5h z&}q{Ztv_G;m7w!;&iw=@I_?(#aR zvh_4>@=X8qE&@1+@$pXpDT4zNpPhfJx@pe8OmN5L`(Sib)DgAXj=^+))oey|YtQ9H{$zB_WN$j-&AG!tDVWP>1qdJ2D)ob&z8WISDaDT9wG6MMo*PDTBv&?xXG(wEAX`8V;}s&b+s(xR@p zlNFYR_mju?Y{D8qY_U`9ExBA#4n8=XjhpMpbM`C0k-YC41upl0fmlV%SS0@)H?Mul zbJrWg($f6bYuaeDxCa34 zAM(NfeItHtFk08YwxR#l-qM9FyP|GO;tn4hNXhV)dM_oY+4f2n%xD--h}~f*hg=m3 zxbyp!_}eRcc$KEiT(?K>vZsQ=%C!1(#7Zv$W2B=!j1oIQ>D?uI%Z*HIxQxhd4Da<* z-#2F(B*8a0gWrd_{i^bqOsLj|Vv#dE8r)SRl;*MW32>t?Vz_r*B~1;rsW>$O`=_{{u6`?Cg8#&UdQv#1@1 zTF)+>Ij+v%WA8>y7UyvqIk+b2`)aiS!^V2xQ$!5%ygtBd!bUwgabJnJ}+FhdyXNXE+rdWoguikkrEKr_t96Jzpw4- zu8IfK$Pn1HwBwdVnlhS2=Yv0(WngLu{t-U}VIq_)`;hHc}u`x&suE)e7RVLd{*IM?>fR%evlYO(S;XcRO>YDFZq7PiA6eLKE^N z4JZftu6pvxm`q;%VLtKP3cE(5VQ*vEx6{s_W!&G@qRF^E{N@t?(z#98i7g|{YCg<+ zwivRiVA=n2%k&t9cREh?DBG9v#NM=(<$LLkPw9#Hh+}G#<%rINho5zZe%9&4wx{~( zCqSo@jDV-P;EY@7IjiCAl${{*+Qn||fBXbUpY%ZPw&;I5%}LnRtjPbY zR6Xgy(PNtdSjx}$vgl2d8=jEoURg)vx`fCpm0*iy>CT0F@)d)FsB9M(Jv(hTEVm@s zq5}~d8!jWfHn!dkBLivUer5IK z(b*wq|D`@M8Q46Dsj0CwTn@VG3LWOK5N@~cipMG-!c(1FBdJ@Lq*=Y#Hs5C8Gb6b3 zP+Sgd82~gE*ewV#=v!Wi`8L%a)Td zH=Fy@4pvihinxPdBdHh~s`haEx7c3smo=hMCWO+K)iF6tJpa<|A4l`*uC}5(wG&jf zSlhv&*zpH5m2N^oCACDW9U&h?pld8=L7IP&1wL$yDklxambbQcikX9uG-|41BTfiJ zlw$!tvG-@Bp_NPi6Ye=tEXaU2>gsQAPb}_cB zzY}S#sut|+p@*lmg~9!!H;b~J zSeI9V4AqPAasB&ea79iA%Z%BG4rA?!Y#fXg!Id*+2)|CvX=OMQ zVHTK@Rj^)K*k=$l7oi*AN>Q!j)Qh7uEaRU0#a^h*DK#5OOucG1AM>Ro#r+x5Fc&`Q{V`yiM5Fw9UP)=x~nreU9&y?2_V=rPKfCr)i{*Ewyg3A@Vv=Y z-7q^_z3d*Vv?aN#^g-k}rz>Y6d#vlCQAc^KrhyfVu-FZ&bt zO$`+k+drcE*cLhjaUt@QS%V?IW$Nix*cjGZnf(Cw>gHi@0b|R_*mZ8JEX%_CIFelT z>>N`(bl9C-8T{i~rLwlLZ#*yueL?gAX6&xMkO~%`#o|~bbk&*bZAiIm9*E5!!2w)0 zdAVqr&VKvmlr%I9Xd!)5(mwM+t&~dH>-3wGV9Ihn?W=nje%I4*lYh|gQO-Td)$oGAfN)@~lLAm*{zkw*2V`a2{8-!n09gbrL*IH;+^Pa;XV+)^(cULN ziV}UG#6_t>nxZ>?EAv#x+E&(XVxx%y<=M-9h*$uC5}P9;r=l9E|8yLQ4>(kIj$`9+ zVQ5gW1;0wNy>x{iD;%1|^1SL0-%`f3FiKJ?R0C%6JbgW-*7l=h?EB#&WfA*0@x7 zokrN<13Y5UQ>FrLmQ=3Ct!_-doOYqr5xaY%A7q+oAf}SylR8p$S`*ruHWU*BdXudk zFXJ-rx73sI?;xi~i9y<|`*ZPm?XA{jb@cpXJe#Fxh^a`0n!yH&BG`M|F;}bZmI}A5 ze})3Tu_}(IF+}DNQzN`jmu&rBvqJ|tk!aHhpO@C}t~x49J@tI3`60WCZhMWkv9q~E z-G(}iSmsqqCA3ptuqoO%MsL(YG>#CCb#St!)7R0>JCOrQ@J=s0~ zty^BF?s$K&IL&wC)##w#M`&R0a_VY?R$sWA^-TpjJrq^UzEc(r%tjYyFlW|$1cR1K z$>?aPWs87LoB7v(vnP+jsU-sv8k*0ed=f<}6w@=t-+1y#VVi=e z%(%wPzTwv7AgYhM9)DeV`r>W?Cu=Ee=5jM3^MI6f= zx$YUkhA&&dV309$WZ-!K@HcJkKb-u(74h>s#`_wt68zO0>!Od#%rAJbpNG6%U%j1! z1fy5qz6a=hCuR>KL=9;Hf~&vu2{EF=56OWPe&faeP;KN7?zns>7_gspe`J2oB!2c3 zS=|UXE+l=M7rf?>qafk%447o=R&4AW8IrP+FXeGx^wIYv0genVIl4ci++uGqqgV2! z+oXtEJ6TuUh67(7ePlKT060{jvI|g+YE*_kdhO&@XpbZ?{))4d0>XDDoTvVsoB{_B z=$NQZ>atbO6untH=DO?0{IH>IL>2quO+#U#ihrVjLm3lHtydO4II+&LUM%F78Lci5 zDDY73YGr&;0D&T7Yq|`uT6_)I*Tf5%*w=L7^JdYp%W(=XfByNU#(NeW&cdeHA)vg* z{p^f}N!>8^|Jo!lFekM(4OiBp#~DxzTvUTb-*piV|ATJ%JksLF#R>mzSnr8^kA^cx%f$CH08 zc>UA3)lspjxN1Y$v#EuRhOo^atrseR2vv%2KreQ%%lyZRj-n*ToZD^3Fn;=#tmP4> z)A?@eU%A(SiaD%>C~_W`IRl&3EkPuL1An_L`wyQr-GQG`=#iG2XyVT(^z^1u4yxmY zradPX8mIU4OQ)mqE5YhsdPz1SjnGg`w3vVPb3hI@(myS(F#wB8wcA1ScNLqP08;q>r8<}?$j6KT_(V^j2Gh-Np!=sV;SgRG^ zsk3+SX-=l6l2gdO4NNMG69u#SGjmv^%XaZ4DRo2i`#c3GH6S6%0>x6M#HgP~BXW>~ zK6~=)(Rw1~#Ee-GABd?`0cSIg9C()Fc{X+MBuq$O)jkg-eG=uq8}G2Z7P#T03FCX9 z*x@8#Lo)v0P!)1XUDXxN038;Wbn7r6zZ-R_H+<+`JlT$VOiyR9 z51Ph9Yg&L6Yp$4<6vqG?f0-K|Glt>UDm=|gX#g*`vjsEr$qX|^gx+8|RqNr0S}Pzh zW2)QwyN_)sY`(5ha+*5Dx1#Y~V>^BbI@V^Il&8fl&8FCCsJ${co`cL$;xccQ2;Zck zqqCsxFm-F2^#JbY|L`^c4_y2!GH}B`1o<^aU$)e;?`P1YUt9hMr!TAZb&^Ho*~^>f zR|JU2L*bfgw*EnIFf*5ib2Iqvxfj4S!G6E@6$-MyF8fc9zanyLKHwwhS#P!)!n$-P zGhX70LCcHHha-DqTGUI7aCaM?@hnfNzXHGhjle%b$_{udeF-F-34Oqt5Y_|4t45{^?&v?e>@7-sxO*qre}21>CXx z9&qQ#{BM@oX{3i`y~SN`Xn{qHUD{a{_&NA;>(Hn268X=rf>_yllmo3ZvF@8+zB ze28u2x6tZ{dV0XpaPaT=5Oxd~E&f z-J{cmR+heC@stHLSkVK<-~U{2>!jb9Th$f@xcQO7m1OJfMLo{;drGzp;{(ib;s8F0#FwK9Ff@7i)e- z>&~7hx#w5U9g5X%m~c~(-tBX6P8)9zzyZDeUWqxcvIX?Ef{R;(WBK~8pO*aD28RFG z0+&71AR8&E5Lwm=JD+&#tKphZ*qQg10*zejJ@ z^UcPrgRSw7*a$28E)UI1wUjRU)Ents(m;G337i}^_8`+V)22l%Zdx>UIYM*)(V8bc z?m&Ar(|Thk=gjO%am`g0xP#~QX+Z~LGqHSaMD{4D>X-aoimURi)crP_v&Iqz=NjJ~w+^q}Rr58JKcxkbA%`EiwNd$<1R;}$}T zDk-GU0=)c5(vbz_<82#+KwFf8Oy`WCFd;yiF81pKXC0?uZ*G_EveG!=b^w4kKPyn) z&2NWHHgn%Lw$`|tg%Dr%ur}$Th4idQD#cEZ1S**W>;_otjU59r6DuV(SBZzU-_CbjE_+qd{|WG9sef#+ZqvF}OmbU+XFDQ4 z1(t;!D`Sjn zVOz5{x2|kGU3a6C_Jq!)B)6lL18OJZn~=08uuZNjB5)>5I!Bx)*PZqH<<4j=h24Ew zI(6(5z`j%*>TpmOE^t$h>d{r(7uF{+&Um#&^r{pGx~Kx2K$;R$yzj~NrA$8!WR05;z@yFA;5w8sS6^!~T zyf%;;d)@}}*7Vh!cRNLyW+t}v6bap_)DC%lf283r!GD?Jueai_2J%;5@mII`pZS73 z`uu5HN80wC505_}CU1o+yGnGra2~B@!62cvxms==`RwK1=|{i)YW|h}%L0E{;4cgO zWr2Ui0`zDc4$YzjL}4-U@jNq1mvZEVeYTqy0I1*(ss+y-`5NI5d z#TFAv{#O+ImG@7vz#gC5w~yF{D{j*7(E7GmixQW#vK|{Hs7-||8QD3O z;(rE+1L2<$z`88sl^-PEIX&dg!yt*1k<^SNn+{4?eHTG;GW9}i!Gsc!!&m62eEm#l zSZ5yLdBuwM?8o>cb8qx1x$TI-W$pMNV)#~A9TUA;pR2o-+RkXuAPg)wuBI!5L|Mx~ zRcm!e{h}>7P~y?DE5PO+6b%KkjP+6aVh-^n7@<w4?q*(VhP;8u>C4nYUV7l*EdcIM0W zbh*sP9Ok)?e>xn?>-NKL+GY)Gcc1wr`G?go+^bv37g3impR%WEDSA12 zN#>Fy<(loHd&R)BwXiOj0&Ag3{2_C&*j%-(o(=p%dJIuua|CoC(W{XSIJRmWrM_aD z+ZQy6sra$%^ukEW42xN#l2OOWQ>8k_CdWH+v*nZS&cbG6D^U0_IJN#vCSuDVrBLC1 zL6-ttb*E3YKg%H0UT`(%flC;HVysX&ByANTu5c2>ua-AqSh&F;lHQeM>MkXos5Lq$ zrt{!#7uw!eOklDT7r=wHW0?`RH@3gaT?>wmlP|ikiQbQ5hdYI3hAX4HwXUy;mzq0L z&{%D^K#s?5?fqD4++BTcx;z{#(-Pi5FRNT9&4?*Vv?Ij^v&86G%$%?9TCp=Zp%`T% zfT~pBHE6Ui?l0?h9*MKubj|87neg)$JtZ+2{}VBnAeln$C9ce(lXA3NUTnWag1E7v zWi~~G+XLw2G_0aYnx%8XDW>`|0re?hAY6e%kXfRiF0YQ57ShRVhNR+6FeS6S7?#L$ zEDdFnG*F)oFJMv*L?D9@7Ng|MoVKKN&a6K7RxV4GpsHQn(TfnyZm9&zsZ@HpRSXU@ zqoPSl4JgTeJRRZ8kYikATRRRkg^1q=n^w14l`a`X_F3>tL|7~IUKUfgM)wu?dH2Tx ze)?0|_+Lu@kOd^hFCqB7k3P;wB(!&SlKT5Yd$7JH+O-)2xFlXWl+~{AdtzF09x5iT zS|Sr&)s8Z&@pqC*4T8Sa9cG)qV$*AaB7 z+~5isMoeqZmh3rSqW#`cT>mhFJE>cPsn%Bu$CUOs(=Dhnqk?Ga76zB4!fv?{1T{Ya zmLzxgXeP<0YB100-)tR~Ag zFhuyB20oDAMR`g#d`c{+^EgT=Sss+v?s8nZFRWJ&mt+egqj+Z4Z@wW%T=d+adHMse z8ww~r8baxq%l6&`%3jXf7CR7!(HqO+yp*pM3&E4b30&40TQ=p4&C|e9gD3$yp-Q(d{#kQ(FBA zpu7p3s!4y!S7alI}rl!w9iijo==I_xe(QO>4m>umy7j^a!g3A?WDDF#? zJnH5S+(>k0lziZPbv%Fp0`-7s_Rgq;%51lM0z86lC;#dxKWMe*K{tW|@OU{F%rVTBaK*L)+#a%W|L{x)8XlN^X z36h%)gZH-nU@yT?&cG;1a?ziQ4Z7DzI-JqotZ_@Q<%~4r3Eo`vnvIM9$-_Mo%v>y9 zRQlSbL>dnQ41qkG?PXq~m+i~Ml2PSB1QMXq$7-i9ShSVS751$r7ov50;)7wSG7_yz zv8(!=4*sFNn}$bn&dxJmhdMbI&mbTcFt8iToK&+|JZx)sHV2HU7T&-sJIc4EzP|_T4)M{DmDl`KuAI)B)FxB zgdVCOuyqSc5D+9FT>^v@5~KtOp;(X-q=h6%2O;#LND&nG%{lje=YDtGd(Qa2-?-y^ z<9El&A8U*`*PLr6@0wZjUGIF}=b>ztNJm_|4C7oGN!D+5GH>hZE=f-7Hb>L&ZFAng zpMDV{Dh69c3_pwcg#*kIHGrN==Y3{(4C%bB>sG9ll!Ux@uei1m_CEAbZP{Q>$bgkk z^l9gJ^Te__qwa^wk|yQKSRQDKpD*Hi&z76Bzz2KX9MG#B?=5ctFuxS$&MNlk^Yjru z=TqHM9ZUO;L>CY%U6efyT;Co)Rb}Awm`w7+%zk7mRP48%{i8T`v3#PSeLZh2!#750 zFmq4N4DBEX(&#ffhcoX&Wk*dxTipsfGZXIyxG&^}%i@wAy=Ybf&mYW8Xh;{>4ChGK zV|*qSz3!VwBVLm0hYd9gh7SF)an0#{&7N(xZ~cHeMRn(Gl^)Gxj35l(Am3XO;t`_g za)*H~E0&ZDT2_Z6)Z)i+zoTAt6$zY|BA??b?B>~BjsEeelr+YVvcVlROJij~osJRn z^^`>1)!+JZwjQD!VOH2v9&D8zo?|~9BLg&~q4vBR zOJo#{60KzAo(Ksd*N~X6S0^4U!u5sh$7;T4vI}wZIF9%xGL7X3(o56`QI?F(A}EAv zM`MX0^)1jK@5Ouq9F9L3vJ04B=!hEggCDxtY!;|f~e=x%-| z&EVmE2!M%siurO`4{nR_Rk=85FpfaC+q}oWif&%~3jV`JtAv!z8R6?&pM3df&L*|3 zhYcWxWVor-6IpmoYe{BZREqto_z=Evky~3yPg7`vEXlJ00TWHZ6Z4m9|AYWEDTN1zYlgAVY-VGoS$h>D`EIrYdJ+``ejMorxWmHSQzJRujCtDbOnXYQZ z2%^p@0JF~4R6^-KF~5UNajCl`r4?LIgVK^}Y?M5YeEGjWU-#QjUC%fb zNOJZshU<(@<0g4T?kPz_P*!$CCH}(4kNd8lJs#r54MC+~J_=bM3xE6HFY&LMe_#Ql z7FAg~QqQJ2;dJeEqn}4guD%%Q`B!S{B#!LB(OI6qlG;0IRR?~`n(b)9bIc0I# z)U2FT)7y{VJZ%%~4=>9~KDzIN@o+=_mv^uVhqGaS9J|{xpVcJ2prrx&4kg#Aq`_@9 zRg{gDar#x7x&c_=`MeF(;uqcyT>sq6XN!(8Jcs`Pw59hLvjY)Hu3K*@(!!n##vDV}em#yh*y(nb~=?f91wL;)nQ<8MAAn*EhYYY}C&FDUxXG zxJ8MS7C4tZDH+5nk~ZG0)z8OR>e%G&B%XcOF=PK@O@3FSnE4fxNkiuX0b#+tK`ALi zJv5`b8q2Rz@=Y^+`aU-=sioo8YKfbQe<|gDZ6+YcW$5Bb##z})rtBai5&#H*?f>l` z+=YL1>VRHO3hqov;sW530ac0#{(=fCG_4lG8a7)Lzoeh2gIH}wFZ>dM$5JYZa0gS3 z=U;73L|@S}@vi$*WGUA2lia_x&HbL~7*l$<>G?+0A1kV+D{!J^*_r5^V$q{jP3H0jC%Qv1m?<62wejn==2#clhpG^-$s;l zTMcYoV^jBXK7|x@(=+x0(^xwH#|gKpM=M73(95cb+}g~VfpN`}}W4QwG= z{bu>SK4y3M_*h9BRO>??^d$&mtJ&)iCGEvPr=6r{zq3@h9C#dA;!K1#k|!ta5W<}Q zW{wiQ(4KR>5a8F1Mb28k$R01Z5RwRvz3Q4oFBnwl@rD;VL0$>JhltU%y%(MnC zzw(eg3MCob4WI7kJhxd7vz0unVjQmeGryAm#S4bB4U@lD|_Hh^sUfaN|>XlEXm>#pr%hXEs3y%b6~d^IRo! zwsTX}8<1bEEqh#VyN6&@=C0?8vf?`7>`(Sb%B=T4S=aupiZ=qiUfWEcyGzErfdgH) z^7jwOfbG@s;DQA>+%@G4OnsY@^ruL#4Du_mA+w<&Juwk12g9`$pB5r`q%LB&Rg!Ii zRI3{s4ge_sy6VZ83*j(ihl_^avNwgk;hTpe@!+r_&?<`jm>`ogZ9ZPg3zhD*wzePX zRtSMYd+NPx&y(9LL`lrH<(m*>_K|#u13Q5_5SED?)4t^i1e+t;UB-Zrju}{2L;vxw zSt`KYtn{Pyx%KvJTgHCH5Lx<&yXm7@ARRXEOxMXX2yQzTw%JIvbUp1*iZBF2fHaJ8ofh~YQ zkJhWD@@1dTM|&GkLq`pK(I&3PvZP(A<_r=B`a=qj=8SH9>a1oD)fX&NCn zdng(C*co8Yb*4hG0D$)KF|`a+=GYFr@jbsqFB(M$z9WF$-#8-*fx*pJ|@P zfUlYXWucso#oAZrWT(X!+l#ZrEST)X_^Ffw+sX}EOE=3|nB$guPUG}-#Zo`lk)v2@ zRni0;+{qLt9XMMw^f(;X09S`#l$cFbR;};4y6`q)x@+PRIkfr3%$y^tl#{s_-BwtEzqNeW{{30|RU)Dh1-%Z7#!;Fqu#)H-Q%mF4_i_E( zev}n$YT$u|kC{TTaNywB{)k%Fn{$hczgoPNA8@TF4e(A>yO;p<*Z#+$Xo)^er*l=g(*8WtO8D znR7grQ?ITxg>P3s#WcGY?ZgG`C!D&R5}KEQ;vQ*6+8&|Lw|ua}^Bs{=4^Zb;0ZKuw(S_4@D!ULinod`sGI} z1rvXY(00zYPW%t=;c#ijS)Z$Gw+}t^+xk;vFR zZ~Xu9>s%eH$Ox+)`YkDM1WVKzRThAB9fwF%+NpJG=~_#J==b$*>K}9_Zu|UF-F*($ ze_@{XCZ`o-zi?z1d?kbXlhZ5vgJcixsd-l_rF>|ESNpp{M6isUlspugU>|E z8MET_x%P{8q0_35k~kz*zvK}&n;ms8y6UEU$-}C^FtNF5zr8_^xmeEJV;qZzcxdsB ze1ivhLykFFO9i`zKQFe*DUTqBzY8e**)1M!?pD2VM5yAOwN&hvc{HBauR$Y}r5AZ% zrclWkx^Dl={{EaC)0?FWfoaf7`FPO{tAZIUv5VITU2{lryi|vL`q9gBaTKv5w#z|Y z`W`oUQS6b(zB;O&f3eH(*wqN4B}L)0E<^lWd>3ahG&Eqce*B?)p8=~y*?6tS^s0h& z042@jG>YCOySXYa%#8iDiwP|n*_FCnWG^?LTx+%qvuzyHiRVfz#0(t+n}>?8n1L(%qa~0H)K(FHemk(|0+&Q?tUOz=qtMUYma~kZhK;XD&y-NsmSmS;SeJtn5Ch1Nw&ZE`9%vq^EMUZQo?WNb z1jVks-XP?k`a!&mwtWAxX!#NU`exK$d;O)Sh%@ke zexdX2oUd{kpI!kG!0L0%A#k?zW{<%~OlHsl`@A}9?}5`)RIg_`1oGB?yV^v{`GPbh zdw$gRul@ht;ZPG;5AJAuCYK>onbD&g-+SeLdI7%v{%Om>s!>4h-|P0T`oCiVvHk2} z*n${b?>i$!t^s6Gg)RWh0*Xh*;mIS7N4XI;(!;*^W0E!LwHTjizqAhC=q$zS)MFm< z)&#X?4@3~ibDh>aALVl$9Gtc)u9dyU2?dYr&v*!RiqmXaHQ$R$(&=yXa9@;Ci(|K2 zsjTL5)Xj)h_R8S}5wq)Iz(SZ~DLQBBIP3=7LOcBJVhAD{@%llBWb^;=3C48O40c<_ z^O@sYW-j|Dql;m~7EB6`oJWkvX@J^seB?~PUPYOAd=_|%gFU1QbeVdVp}D;}_rT<# zGhQi2@-?dT4c}*nj4vxfY4FG&o=wWup*?xh#I7QL6J7hEw9f!7NL5P+vVN+HhtR&$ z$t`JP3g|i6t5x%@8$Anv&tP(5Zg87mY(vk1XhN0iJpq3?K2!MgjNa-vfqSI-#v&Yk z($b%1Y{COn*rl)qQCV{8PZsquKK1v%vOei<=;7yYzpgCt&QC;SZnSpqc3@%upCUKT z^jXoa-h9sG+a!V52A!6W>Ks;?k|kntNv3&$S3+$>0$Z}np@%a!94>B zQ!4VS(~3th3LGdbj0o)dNV{a)_E*ZnX?bfF*b#aGyRLK0hFC zG@gjQ0Z_g^5-m-gfR(}aBrh2$>M9v6B zJwjfY+T2yi1S|RsZ2QvK6UQEt8@hVoMjZ9Dp5c%>g^%OgK|=9Lb;zlxKTuAlEBqX1T0vcij~#6suwuJU1ciQDW2pISOx3bWUWdj()YQi0+!1*xVgJ0>M3X zt!9_EbJa~s?AHm~T+YhF%x;wp;}OfUp9oVG%b|r436hqPquC~3_TRUjzt0-UdCa<9 z_Esk#oM$#QF&XNcs`jzhJg?g5%%LX;t447xQNfLq<`I*J_s0P)k|xe}zpwEiPp6q= z->_EyG%*t1b&=#@Z)ar1U%#;1<+ugZ7a(J5(jZWZ;U{VBIHSu~MARD0p86XR-lx_M zgfQygPyy$65H#`Ngt9xN{BK#m{8t*mPO$&^{=#p(mF7YCj_Z2r ze`#ymjFph6%27>Zh}|ohH3PVFm5WSGngX2-P+0B3GEoFYgldyC^*PhpV$1c8b<|++ ztaF-Co=~23ibdam?e0!^wYaFCy#L7}nN)h0l|u{fN|PnwNF)+2Dc)W!dB>`Q@erVY zL_e@WDa+aN!Q8yEd33)a6FJscVLw~<@^`!SI0{2E3Pl@k<7rBQ>(qyqjuzj)MW7gk zAkwy^4D3G_<4;=25q~M=y$ss%$r%P8c{t8Cus%60Rk^BQGse}AW zj;f2~q7*BNhkF(MKADtaK^J_T{bb)7{&8n!_w-1h@gbsd zv;gQT*-b3VJRNd-P>V0i4S;lT$C@BrdH05x)`L%O=Nnz*GW4{YVvhUFFUn<^(qkpr ztym-12eBJt8Jef5yxL8>>JcryuyRbXXPDkfn)I@_C=LF4JH>kbQJrIpm1~7u(n*w@ ztK?-*57sP`iLNZ5(Pq9kgh{F7WuZI=YMzXgk!PoUpM&EqTaF_XhF_st=w zdV=OkA9d}mhNujbh7;dnP+_|tp=x8M7bdQA+tV|xi8O1P0`g50ZXE@i-CHoc^Gh+O z=93$hVu*^u@PVlA;26x1l~JWsEEkHpl_0DX!Ob5rE~sBR`jR<<`JGI zo0wsLaXREpG&~9>HW^}6Q>a!^U}?!drIFkN_l?m^4u_gNy*D5tD`CdN;%N(XT@Y=^ zDg&-=Pul%CzlN0EGfNc9_Kb?gO< zZ-BLavkCJE{?;EK%i|~+VtHM>DqHG`UA>d>hqjdld^OO^H;Tt(2Ro=G%#r<^eOx6u zn}Gy;XGRL4G>r+Ri6hR*r}t&D_O*wF%I<9y;da^EO>j=fH>0J;QxZa)hQR2n-)O#% z$;vPli2S%E!w8iDe4O!5QPl>S( zQ3iMh2kHhU{L}e><B-&@oI3<=Ja3T_L&AG%JzGZbI|7I}fJJIh^7+al?V|S5LOJuaBMPfAbP& z*<_E_qw(|miBQanfve*%#uVg3${!~hQaz%xT2ng7bk*Ad(zh_a@hg0!SG1(BtI17g ziq?#9e?~b{_QM3I<9qFkcE7sXgb^1-@yZnyU8pQB_o>f>o!b^Fsqv`*8^6^Ix^Y#> zlb5uAGPO#jZKPlR8NvfMEwg(im%qy|yf*I)HoF0WV?BQ=m4)jR=R1xO8J99zGtL{Cth=XT(m)ckv$@H2UaUb{1WBsN#KFP3O!K1?%6ZB` zR?{ymNx`aGrD_=5RZx9kxXaFoXPjOPLEa=`mK)U6J$!STY;dfsMyx+rSR?-S4=sWs919{anj)6rQE2>w`CGQBsZ2mAbEtBIpbC*?97| z2jMO|>a5BA8&`}>EATcQwJsrqlpOBrt`t0PTu7tbPa<7xVi+!F&0CW{3;c>c1p&thOqXK_Pj9*9t0TF*r0NHC$KJJ@#|Ji}6q=eO} zx7R+CnUS0=%jSaNbT>fm)rqUJmN=C_|1cx6DKTST9 zvfcNdBynxMSJb~6A=EVwPix9o5QI$Jx@cR!_KVno#b$KPVuK5LB&;m8NHM{`VxR6w zwK`wH-Ui;sj9Rm?A$k1uB`a~LR}^1ms-y5Rb=)LP^3Drnn4|&+y~5pmG8Odnf~0Ff zX2t1>7|By7l1>*_FJiR1s!_j||M3StY3xE&9&ent2Ke=S&~b-O;1bB$#A!Xw6_&?Q zFSJNvC<+xxnob(*NrKQ5Z0*Ku>{Bp(B_PE!k{TvOGwby-Po6ZYj7AUg6;iV-@i3@#eFGT{kOg+ z{?_3C+Qq4k>#32PL8_+z>!FiAGVvD!LO-_6SszFV2$vPY^@5s9g;jNb5W?Io)F8&9f~+@l!G+@(H?V;doOsuY3d*<8gNR zUe}IJdKnDcL;~WKBo@j!ZS!Xo>b#Utl@f9oQg@xlefoFRyFM$He*NESd~dHYz=RaC z({;g9E`0;5yt3omqBaSwZ$A}e{+{6BAq=)N5+9haDTvp}0XGP`GZp zj8*+)!mSb2GgoSk5s3aL8OWLi2`3 znPJjusii52JfXyhhaXlO%|Phd+u`w{{coIVRxs4z8F1+>iJfQ@>jyo1?H2dDFeSCk zBMyGWxchc1oOXMD>++{A@$<}u(P+OaWO-#r7rHhjzDjFg#CE@jr8aVz;3;;am=UJ} zU68&E_NaCoN|7;X))xh1-Tx@AC>i+FI##LuXk!F6E5#5h^pm z0uS!l%P_a}z-mb~4P;pFt|*mhh1o)Hl&yrp#m1|;tz|}=C1LFJjE`pE{lAnYK51I!SlOs--D zm=ohGi^upZ)oiyN0ec{z5n5X?7g$QaS21F>c+-z2q-$3o7)k2J!+K+`tbbVS8C;yW z@@R4OZPd%80p-`r-wX}o6cODL?gNskjlCf2J6>k|2zaH@TXzqyGZ}0mBsyav13fDC z{gLqV%zW~JU4z>zNbp9RP=?J$fTnlGsGqjmTmg8Sel+w2`s>~auUoW@H=)%c8!YwH zqaMs|O}&qH1(aPgny>GX66JjyR*Cl906UuEQaoq?8Vt?G4Xzm{w{>AytsSf3Y~%T9 zR3+oQJ`A&+)=_V|K^V?bM#X5CF3;-b`XkPdhm}uNrM5v#hV~_Pwe#t9V^e#2m#ASS z$ipBXYb6F3HsVRV|06{a&81mf5Jx?OqX=|~GVhf2og4l|4Fj5*Z7Buxd^WBUWK7thxX<)apue6tiVF>ezOe z(w1)xxckeDlfPvy`^~Xu9L_MvVemrey2_mm3gfA@!&Q5RxN>+ALOp0O)SHP7STxR^ zUnsm*6G%+tQqStuEVeqUUr5d`__=L4NbeWUmMGE~XB;h8D~-$x`PgnP#=hIUXKspF={*~Cq-wT^j8 zK@P&*;<8M?%-fOAkG)Njnlv>{kDm+Wz5qEmNQ8Uflofw&T{H)GJb(?Gfnm;?sNtvq zO|$%(s%9JzTY;nyLnihYVokwtq22(^LsKtfV=$`Xqh16K;c{d(CDHkZ@4n})}=QX;?H8~nMD!W1`^>ilkjos z#~;{fl87VOaE;Srl5q`w4S>UMr2iE80E;<((Ga~@Z~$60vV59qqw?FxWPHc75etEk zw3zk%q@$>EULQO>qShr4uMrM$mIe*ESH$_6Sbw;-k!Lv99QFT<6Zy~hA(M@@2*jf4 z&6|B0M;~Or%K^`NsYEfHTq;UUdqWlh0Dk*K*4OY*6T%(4>iA z-wh|^B4s>}2lbe(-B$3wHlLIDWkCRK{#|jEKVPAbSwgL*J>f%}3oI>!0k0+s4+aO> zO+{rq5eaIt?9k7uUWjh$DQNzWcg+lG>X_6=PP+rD;Ni%C{`9{(px7*Mr%>GNj8*D>B9l;c(Od=;=T- zAd9cSKNV+@_D%*nn8(00IN>;;^XK(JjdHuSAz#^)PhKluM3(z)I;T9#QI#u}z-V@Y zle)cA7pQbaFNBHKJLTBAtt}aH{Sj4g4fcZ*A64X{r#n9qxtzx<=4-i|wm_@%*6gB6 zG@pyXKfk(NC``+X^((CLcQp#)82HlU8nOn&I;i@sJbH7K~bN|-`!NY@9Iaxvy*hrUanDi@L_eJRMcWkv;z&5&ryi~K#~pD{oGHO7D10zY)_ z`1VU86S^IuYs}}yXxZpz&DrNraGB)A*^1D_iEx9-jF~qX6mXzVN&(U8Lo>8a@+WWE zMV0T05~Wh5#!^l3ZjXYyBRaaM0qP{ZFo)8*a8}-Rb zD0|b6umnIqv>aoRWge{K{YcL_fSW`gi~Ds$^=-QHBL=pstmvq~kK@*CqO?!p~ zG?NbNHxFKA8>r>j4_FB@2S7F>-Ok@ltde9<`LRV8PpS=xna;-z*w-%DBniyqhNO(d zEB%A5JlF=Wviu{{%5MIr=kLEd{F5xO|1ZvdXla;cjI3nn>h!5k#ly`?qmEkN;^&hH z)S_Z?ofQ=yCd*FT(H_RA+z}Vc6qyQ(P3Q{YMW2B1uI0<_>3V)PA~s%1c$TZ6F-%_b zhN_BF1ujzWB3>T;wJu6N6tDLm-CZpetT(LAfgPkIoLo(jeoX9C0FeNL70NnA?m z{__2~NT$yKaQpU3*ppFzNhc#|ZvBVRgOrICK9&!g)?=QI**!eX~JG|&#dcYIGa1r+}i3iUgFs4j9);bJ4<EE|ZW4V7W+QWR<|#+~aPEky8kr0X z&nGrHzP$+B6y5MuF}Eak;i-9mvQtYce>L?!ZAVn0*FcsDRnf14P?A}sh@FxxcW0fM z?-9jcRt=MseL}Bu4YdYXY;S2BvP|D~ou#xz>An=Zf5cz=PHYi}KhpT5R*HXRC{v%M zI=bPR{nyKiUaB30E|^{=`u97wJC1Zaz1(gFVOk%38Og8<+;!hM`tq;M9LpOi*VmBz zVw*Py{otQ$^{jSsT2m}7s5QwjY{el85qe*$PlVvM1F3w^DAmf!k!DgUXOCg#kvz|U zwD%YjEzl%M=qdrE{!{W>e;xZ@hree5@ogDiRll?~mI>(xMj8{t+md!{ez)3(N3eSy z6v3^-l+-J?4mIEC!{qd>cXfX2wa|rJklHi$c~$^-|3zx@it8rvSwmB0A4bM=oyBb% z&h)SNFn9TfOMh>`e^@Uod}s7`p@g<(*7U4Sid*C~Y$ePIHzbhW@j?PH^){^@f@Vm(?SHb?JF!JR?uvl8V`c4HlO-NS# zIo8Y3c3^SEn!+^U@4Z;q9yXDF|J?&~MsY5@_^B`NQ(ISL*gf5jI)3=bp5kVO+QoRx z%H^_+_}7_hk)?$E&DFHZZe=$0POUPuEonxmq7>up-@>_!T+i>g^z6m>Mtls2({1Le zt0m^)NiOqCvH0wVQlJE}D>6u^e(*kc?u0Z z!pc~1n(orvjWOyh>9p2+Tu@~Jsr7VQ5Qo6KG8UXGI}3IWv)d8}!qIXNlBQnJ&}xlC zvQHV|9e|D?_)XQ@O~FGR6Ahf|q_Vp7dx2W-l|K_C}S|bS{O!)!)8g$ zx2Va?7uv$wCAe~gvWUJ&TJR=%jFVDe=4_(vp}Kx)=cm^uDC8Oe<;+xF&zG5$wB!)k z$8cdz_$cYQ@A_4zU3<`NfDrpmv~qM{Lmg^pca*TRx zT<>z#YU&qaN8sI{B@|`a@O9DWB8KR~4Ku2?6`#35%Xh~RsLq!5v(<(Z$z{sXZD8%v zuuGK#8xc#h^^F4!jJ%CdKGP|`vNEI!N1J9HeG_qf(hloguAPNj>dEIWKO9IhPxdWM zWU1*&sWBdmz17Vy2U#l5sAuF%BS)DQ+$g6(O}#3euw|dcpSdZx(?;Q=B>svi!YA1r zj{dG|o37k0rQXo+Gj%cG7)~h>K=GP8mvzgcPAkGe>|CA7F%Rag!F4LcZ3seI4m3y5 zp_2~3NHd00(d#iZzZV!+9`HkVSoY0fFI|P}+YHIEE~ppON9qg=)@#2}h0QKsx4UX^9fb-CC7NtyzEn6IffI;Jue4s)wnL0;uz}2|F=XO{i6+Cd#;>|;=O6Yn zfG37Z)6@Fe3uHfhCQq`d?+dP63PWF3l@2jSFKP`oxNe@>aC!xo@R2$8q~vjk_;LtD zKaGwR-k4(_6<1nj^8OSF%Kh%sZX-}20NlWvr=FVrJm_dE4fv`8L~=h-2$4M=LUM8? z(Nt{icdsiW{c_D1H;0w+Cur4W7_4T&VSh=yayogLZN#SF9^Z;k7Dq6hUUi07HM#j# zL`66?oC(-Utd)aEK=PKt^LQu)g#g$nDKP^Zu;8+4r|5~wUYKp;dN?Q3VfG9*drzTeN7)m5%)ia<4kh`H-%V|Fn+_tBTEY>_0kL0z%m}bS)~;ikGOjVfKUSA$+nm+%^}XeC+#|D&?M!$t8{h?{87L5!sSsaO zBgjRosISsh;0N!eX4h~`{y2wtRYhe+C`=dGNt^O6;@9ILUfe33$O@Lu)jJ_@$@L9J z4~njzM`EJcG<3)Ey%2s*ZS~XcGv?_wOWj+g3l2vG$>r8wFm}`RR`VfnM>6pj9FoG&Ja>KRy=8F(t6lh5Qxqv+DG`<(wYoL&i<@a8mU%VtsLxbh~Z$rsjb#(HC#w(q8UQ z{Uv;KS*8ObPwV^FnKx^iH%FA3XBThpEG9*9F3^TBULAvq5cK0PLdC;icW<>*)$36G z8r*?*j&z;sK+BBXKF14+!R&!eSoOd5V<*xlpjiGrnjEaFCNA*ZpB z??RWltzOQ+Ub5_UoNbry8s-jJDoiI~lkx=7FRxzDW`|QM2NnVTX#Wx~WXOv?%WLgz z{5ct|C<4nPH*q$AkARdJ z`W}&x8S`VgFWKXDr`$oOb7`hsEvaO?RXZx8xg<-aL9p8e9`|8@ftnXL#FjBLQGVuI zrjqqoqoAY=EWlg-*yNEIZ7AEGRTciqn(Dt*uM(XuZAkLSmZ_?LlQ)AfFznfcPx0MC zLG4g}AMYxKMk7{~W_$RX+vM?Vb?QT)M)q>5xp+xKijt0&HH8M=c=L46%OwC8I_GjA z#hNT&N2q6vl2b%!*vX;3y977D0z-%?BqnP~$E2kD{2E}?#*kxVfQPywiQtJ;#$Js6dc1i_&lWYJ&j>|52qz zeU8!1vp0m9o5NmLTmz^hjC!0DEg*|MP+Izp)rQ*ULCbTio~Ob~t%8za>=lK%Gwhq# zCL=|^Q{qTd@fRZ_krfTiq6Umj$bu{EdKWJY5io@Q4L;y ziFN*tIhL|x5cx8ThulRgb#^zee;Gl9(r~A*$~--Omwt=Ggf#uw6!ffTW-RXstp?lr z6&jFV4}0ZTBVT(D(k8jD<`H|zB23y^(R&hm!IyIlEO?i?v9+p)7WiumeP#z zQx;9Z9JEYlb17D#OfBmzXdL%gm?OLP1F(}{3l1hmU%D;>c0E8nh|+|=aeI`M(X#;p z^++Xbu&0$eW#N58pb;(?My~1U@;9K66atT+R_a@R9SVGTc;#jA%6|5x2CggzaeI*G z#5z0Gr|n_jJ>(7=iu*u`6l>+>J9K)94X~ zn1ag z^_{;UqeR5SI|ytM0Kj+Kv|~LpwoeBQ!s1;gY&yRiWHwpG5jb1YEZ&>IN^(vL*5VNz zOCcx#B_TQ#Ql8bG49!1kk*E2ZJN#(qSl6*ME|3 zBX9I}3G5BzoZn;n@3Pr9*y^b>+Zih&d0Y5q<7trm_jZp;R@uCESDX@j%yRzxVHwUv zCWpsYDns<|f`fgTjBF(Bp)YxUbrJYcU?^~0*8B^(6kJkL2bZsO8MPkfHK?cUj{@JH zVkHe44hQDRPxotaW3GZ3H>d3d!Rdn9eM0#mYXINRY%WZlM^%-Ew z?_KXHOXZ-Zjmf5seL^u}*{?TzT??|fIW}Wg<HjC5z&3-G5(JaeAHmIJ9Gn}6)awf2jWbX#bD%=r!Co=PH z#z{Td+-Z>pk7}+}@jvIpE_bi6xd~@KYMG}*+7}(yIK@rlyg8~^mYgV~;0uxF6K+Z; z@{-@{Uy&`C>UbA4=2h==+zd@D$%foY$o1)G#Rfe}!4SNQu=!SxVqSHOtQs7lFcK*B zi-*itNDFRBr>vTcz-0kn$DUGtKW}y{w6E%C56@r=uJ6ySZ;;$kVPsYOr#GR+8*Mxn z^K`#(yAuxs&kPM?h`|G`VU^|Ko+& z==!x;zNt=9KOMAmop1L&jfB+6zYc4tWZb46HcoXGYM{x0+aG)xj1ql_a$We4LqOUL zTT&MQ)8q5_;QS#w!jv%to7hJgLb8m$Iwq`2)^N2A<)TH($QL zGnSm(7`iM77C1?F5>9tlXbU%o z_QbmA@u_wg%p5vh&)na`W7ZQCNQ$6DgeI~~O;wpF{y_Sk(@2Q_ae+KjQ*}Hrds+W; z-o){g5Fr`mD*Y4h%1=?=;E|2|l!5`@B=>@IZ05*>Vph^<6QJdlaO1#UtI4;IZePRrXZpVmB!Anug$1`u8tLH5{a&pdj(>}NZV1l@K3`gd94 z)ASVw&wKo@hgIP=Q;L&FP zu8j6;5%z9?CXN6RuJv3D;yaVFa5IOJ*-l&4JB>_~>?!d)?jpmIdnZgR*gJ>Onwf6h zt;b%lmV7;g9Y8cB0ecd0?QjhrlR5bCaP&H@YtU!NO*Hq#qla*j|JL4_Mzfi=4_v3y zO5{;YSnsg6&ejd)+j40+ znwk9KY@onxfg@MB&`^GL2t+s^I&sqx^=5Njbx&_9tks6wm#iqKb#HtL4BtK4rUZrBcOY$HezaKToUJ@ zk$}#vit@#B5OZuAYL0OG>Nlh$-7$BIP%#YJx734)a&Ry_^dvk%rSmG~dNi1a4!a>- zP}Z^)aP1AFhh<~>d${GP`zrsFa`)jc5ZLny)Ez2bF$d+s_|#5ItG7HW6rYKEVW$5r zGZG&AHfnucqAgu}X@MKKA?Us9obGE#ijAUWv%OP3Sb*ynAbdGX_>r(HGtFm!>0Tpj zOOTVK#(l|Gs^$FZz~xhGoa6FAD9*CmJFEs0)pkH#g!U5?Zj{yRGaU3R^R8qLW3o#9I$u6V9($vbWKjQw&guN!B0K2BQV1m?V#ULM#Kwz9 z_2XI88AIv`Mo8HT0qm%SD)V7;Ans9+NP{Y{yVEq_;<=IT-iZ&0ebUpBYpnfHA}7=W zp;j(^J$Nu46nEcozA=90i6X$E7K+u4RQ^)-2mCGNqlT1{VZUrZkQwNa*yOT$pMxKW1pa9fcP6ARw`IWu7bX0}GG>}|8JZ9ywqA})Ilv9Pz+WnnbD zst1ekZ*3UUn8}RG>0NFs-aGEvtOQL z?+BwW(62vv;o|n~(0&s#Tfu*PQC~T6;T6D6*7662oh4yxgS={ltJ`t zxDh0-M?}W7dvk2KFv!G6+E0bB^ zpojib>bl!J2vBb6pO}(cP#!5xajnT(4Ou1vex)UGqGk_H9L{PB68CdqS?NV90sIJH zWcrVsZfS=@p1 zB6Lew^N_2ID1RZ)nN!IHVP?T7oT|pAyY?h>Z|+(ZL>%F#zTUNWGLAJGoMev#R%VIm z?mM?MSm{-bZlJ^G+*63h#0*Q+AXzO>GHv8zTxQg;Q-3qHg4ImQ z5x6{_NbPaznb5aRu6N%FK_KR#?uQe>`w4?vFdK$$|BkT8o1F4~a#IJIa8BT^^Lr>$ zzI|ij@tl3%^s!=Iq#G)DQSqG|*3*`z+_K7a#T4|%eFcAK@jjH?&|l4XkCY5arR3Ng z?tkY2lx=A4KvK@J;)`-PnjX$=DFf@Vw>_yr8U}?WrV84X9y!Lcl=|uiHW;k+We4OgYSO3b@t2X!X6=!VO1}WwAA_-6*>!T_4(bE%B)p8@zjXjN=!Hyev9zUI@}L;JshRB6Ki?Fw2nN+NSnw; z9x=BXq^C^~J~2?u;lD{q?X!2BbB&~NMs!g?{L60&ry)nrWBG*T5vW^@GnEmEE_A{j zdnLsPIC?TzSbG{B9Gp!Iqi1Jasd(g7z{`Weq$#cUC|*)XX8y4O%DFe7^u--~l9N6i zPjOFyV1$cHOJ0iBg;6(R@ZE8#BVuieSP#`D5=;AZBLo81K-k)H52W)^^8BPUqqOh$ zukeVfF)gvy$&7cVy|A}{ib350<%d2BQGEkRCX`0XhG2i?b3R%(l=d`NIz3o+m2Cp1 zHUTamCQIhNQ?whFgAL8oZ5fnGJgHIk8iI~0HdV|r=brw4MefAkzLa(o{zU?NRN&q< z0;*}*Fw`*fUKgxG?{(6C|AHehOG}^Hs<(|+^il#q0o?l?W)EJo_-^vs;=n-IQBT#A zyFJN&k?{XE>__w?XP5BO#urQwy(|Tn7NYQ$V}}hvu7^(^>bnJOMr{^V(=#4k9#T>A zXJb8pHfk~1$U6^JV6%R&Z% zub}sR&NG5XqU{mB@HmE$z8Mso9SK)N0;!D9^wZj<8^%MfKdHMdi?mlco`IodankAQ zd8E0}0qNALh(_{;RZbl@;>13QBO~p9`!#m6|91lWJX{xmIU#Jm+0f|hVmBgyZyIC6bhnXSLrH7?(KmLJ#i#{ic5revTX7!@W+Q1E- z9?3ENm?6n4N*g&Gm-@jOR4<0`f9$Y7a_hCQT62Wp1$=D5k~^48x^$y%w1SQZEUDwY zv9SZ*AYN$;aB54MJPC4JZ3wX*Tam$n`B}@vG|Y$Ic3^6HB96RgH9-iBwY5m+xKSv8 z&YGdIC5jU1X1j<>+PDH7=T0OxAlBt~@T3eqnYUh(*l&g-DW9??tZVFkA=g6C*m~DU z_PBcNlyzI=Lh8rgBU*`JeJT6{vHR)G)ux{# zbON!d%I0UYsU*#5s6Dtws%=|QJ~(!a;02f5=lDS~7$hn0xOTn!ST@+nYq6yY>X8Qd z5+g-4C)doW<9RYihkXsHcsgB|Imgj19|^~DO@A&6D-ufja{Q6F3IcXxLpAv1e7K+HX3o$uAnxfq zyYMih&j8J#=k3(Ip~zoT`?H6~U*hfx_w<4dg{%>Be^_yo#Sk+mB}bb6;57sf8k$Va zVFTnMT}i{Q9l>DE!h}QMivi1ucwKzDkIOQwJdoGTFdt5#Bh0ZNSuUd|wruOwilymb z!`gxu{}|n{Y^iV1ePOgm4GVx1JF+D`Lx!Z>6|R?G`6-#~W-&pJTW}6WZlq7-SCf27 zDY3E)Y%yjUv--OD7d)QEkvrT0qF*iFJn&B3dnupc-b*dXz+#u%g8jYP7T&2DJ)dsH z(`?!HrCa`_;ki&8*r>P(t87NE0T4nQ`39xY z4fEjI7uDHHd~Vs7&^ad?ckvQDP=p)yszT86TrLrO42eze&wq=c6T1>IAUMN){nFyA z>HNHW=9M*MEQ2bL!|0}-H-I|5Q;W_D8yW;vux&%!A566zwSnrLL3dg6JWdLZ)Uf=A zYWS7q0ppPTG1F@*586kGnB?T?*34VVkCpVG;`k_TP-#|H?#;lZr*`Cvvaf{6UapL=i4&JD~95@AhsVZ@3&b z-OQ7C(nP8kihPZ?Wdd@Kuh5W;FP=NIpfnU1PB-UL8L~f4N5ti0&GZwfjcc)yNjawF z+Eg7@f;E>BTc}zl2YHbQYtsEe7Z{T5=3%wqHZ$*6GwKWVxzevjIwev1SnF8_8EZD~ z{A1qP^()h%-?X~lV_)FWn5v$X;bC03R;ay-g?!G5joSP=f9>y&r3S|S`X)PUP#`IT zZ~eOr{@8p>@>9mdkE}X3f)Ds5zs%Cc&CJq!-FBk>b^E*VU=&zx1u+Ml&2rUEdKOC+ z1#TTM{?(y~De~i|*DoD(~YI6xi4)9hwY)zm#p3+MAD?`mJcYvzzD0a#L^c71Eqi9}C%xHHnht zjj7ApOfeA-2Pojkcdz#~l%=BlvD%5}y2Cr=T#-M40ex>%9)g^TW3Gh!eW>gRXz%$#njya0y0PbvePKUgom zs$NGdIFB-6phmO190YACFXq!4LzmxkXxWI~E7bDnON_PFFxTGGBYyA>8{|=gK}*Q9 z?37v|nBh$QfRr;Cf4O~)7n!W3N-4*a4=J89VExj;!lh%&;7)D+6K6M!N9~~053u36 z+2C%X?lhvuNxGn4@>&2BEv2u8gx}3nM^6az)3purE78aJ8b&B^a3~?0D>Aypp7`G5 zlvZ{OEi0hpcs#(aB{~ld$=wQr;Qmk&eDOBiQOf*NYINb=)Spt~>N`|5^7V59rmc%w z4Vto@@9TqtjBRO!gyoi^{?M3idG;*aPWOJ*J8Utvs0@sCb71Dl0sv^?RFvhdA_vW# zJV3l$gPq(+hhB?gyjW%JDKFt($yXqG$&Ku<&Aw8*a6r&KLY4L@_db~Nd>TPC;DC3u zwtqz3q*^aK&>LDdos7!v<@pbPry2+boU&!Y?%Yq^)-qJIf3)%xmBczH zzp(cNR--j@u3)hIqPd6a9@#Fe8e5G+m9+l;hS>UJVCt-?tSs1U;JR(XC#r|wTo)}| zbb&qq%CXK9B>_nUifTJAki)d~i#qmamEpisTBa zuuic7l6lJ1DCcMboHmNO=wol4)pHkaWaQ+RaAcN!N#}3e3wKlf1p@o@TyY_ugAQu6 z;_KQ&u>TZNEl({u(0uP zI!m42oJ=*Dm;>!Esw^R;R)1^9T}*(Xzh;Nr*NXb-!jw;~+mzL^^N6Xid_VuyuK}iB?{3I0fn5T-1a=AR X64)iMOJJA4E`ePF|1Sa;{~Z1omUp^4 diff --git a/docs/architecture/assets/IBC_relayer.jpeg b/docs/architecture/assets/IBC_relayer.jpeg deleted file mode 100644 index 9c470d3555645e972f260c18294261c73340a665..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 121564 zcmeFZ2UL^U)-W8qVjIMWQe*~@5(Gwi6%_)~jUhz(C?F7e3B8Oc3MgUd(j`DDBvJx| zswgc;PiUb?m(ZjO2z;47>$~^8?|av}>-)cN{qw+D=kT0!_Os7E`|N#oIqdiEe*}D^ zr>(0EIB?(q;1&A^u+Ic&0*)U&dhFe4x`v*-DF1TORQ z@bP^sEG7D_;8h_$J~2h{tKVOflamt=RaRG$QInFDlld9QffFZAoH}{x;_1^DWxnJ4 zPUhc!_TK~8EXoB$0#-H$&H`1|>r0)JEBZwmZPfxjv6HwFHtz~2=3n*x7R z;BN~2O@aSkQGm-83?sN%Kw=FCkkYT$+KThNQ*eT~kua$ua#=kF0>$!^70<;R?{?P7 zhi)7yl+4+g33+5Z%kSiRc2sqqBTV-Oz<9fRP@vYY|NLHDZ*u4EYV8~BS|Z@^KH%@( z|4-0FxsR#PS&$YwgPf0TMFNE>YALUM7Dk62RQV}eI$Uf&k4s(f8``}46$YEmIM%0U zO1CUU##fSzObM9q6PQqz$m%dJto2E~2n?S7Xh?RDi@KQhg^+j;Ry#?NkYlSYv2EG^XlHf=P1b#eqx&?bM>_51!mejim24Mkfx}nN-?@ zQlm${R)=%#Ol8VAfh_m&dga|R^i*y^62r@j0+aj3P<9idnDuFpWGTYQX33#5V_BV< z9X@>xCbdqOCqK*20}6DQIcAD-acwx|?#@{QCJ3$^7!Hb=S()S6aRJ!_RXhh*J;jCh z0mCO(;Y&aKAvgcr4S%cSf0PjaFVMu+_kLgnOEP1SRIa;?E(^6tOz|uj8BSU-I<2Mk zk#2O{C2He+)ldEVfZMAp=fC-xTmJoreNy{{aqI!VsbG%(DU1VIeIXr4=?0j!Q{$7r z@Ho$(CxFz26GNMZ!7%;lG{+l1!{yos_(-?e)Fd7ZTa4HuZoU@#T_-|B)zu*)SD{(9KLGk5A*N22Y3EwdF0Rw6qKegwLASyauIE6p45;gxR(5 zaasqYH)*{>NrkFjXmh4;bVGUu5kqsN1za~Y_%D)rCLnhEM=Rg>TCacHn!{Z?Ew`n~1e zADz={ZwY+N-6Nd&wK(wPkCy+tz+^hRb@?C}=80*j5whRWV%@uYy2AL7@V_Zx= zuDnyE=qsj6*FM;^EEToJB5;c<0#$D5KICfp-C3R;=V906kS{gPT?B}WlP@${ zS)LgWT-(chz*3sy&0SPj%YXDW)2e=LD=S`I(XtUtQztNtM?qK_DVfN-H!iiEb|sWL z2FX_z1D_JIl3paMp%qW-FaEX!>_XIU*U6VU!t!Z+ib0X4PI{Voq=wnvA$+di z^WOU6S&x+&Ykov6BIBtK&2b(v%Tmyq$If7fGUol;j;{2?B2n3IBMRI* zBaf<Dd>teE{L{Z{?5o|7f%Qqr2PI@v!jNZJhG2 z#Zj(5TK?$#e*}<2Xjfrr!&Vz=AS4HuG}I;BJixsdJ~oP5M5;;b1E_2i9i&yScl%9i z44gd8l^3Sgr4^^U>pK>9kLO2y`{KFNS>uGaW-z>qCwirf&^L`1L3=ldC{0}M6F3MW zL0vHv$#TdA~XdM26!6~n#q`+Yf2ZB#7=;X^@$RUJWYOVhG*jt3POy>dx%JDi--Q3G-v#ue8^1&QZ^{GL`%}Dl1!gaY-U>G^ zyE62&X|k-5h!Z!#~hqdQ&O&3CHpY0HzGKAZ}pRtLSQX>ZF#v<;yELM`E7 zoAZcI0ph98B!V7Jd1lxvt%Ah(zjvsd-3P3PS?s;Fxh})f=ntbV?agIxtBo)pZP4M1 z<&w(`HkQ;STAY^x&CObS3h0+aHBh892C*_ftJ?jhv>*0{lZlSvh^-T|z5eOaLU-C) zghsTi`pR=!GC)4!GCk#c6&sV=9S_{f+S(L~ycAX3Zs|P8=MLaaY7KI6sC_OH&MCelsuO{;A~gd8fM6f5iGYs(q;?JQf=^1(YMB zvUVQ$6f1kgTfn8f)&#YuX7&O3D+de*FXR{W^xpoKB42Dx@-B;h^c;0gb(57jjlH}9 zlYO)9SR2J zC6<5<^yIAHw=oxs3~A^U0$2(xas#Z)XVtTb>Ohtz`^E$dlg#Va1idLHPXmC79)RNCEdtkaru4e ztOZYVv-KKWP>>+~u5G*J>qga)3w=HYJVAIt6ezF-gUydqmP5R&Y+6K51PqKX`Uycn z6`GITi@Wa3i*$wFzPLk?>oh!K+Y@-Z#FRdaAYUj)!W5y;R3<=22x)U*7_zWjQ$~gG z+&jf#dI@8({nIkyw6S}SNteD^YpdMS5H=4JW1r^dO}G}-;E}O={GnXtkupiVbHF7X zU4E#2eh^@!tR%{qxWv;)ZBm@GK)`BJ#ZyR06Wz55qyL z?n1EY8}oPOq5~G!%332ZvyS)zusMtz71@%+rHY$YZbminGwQQi6<)<}f(_w@H>TfR zvHYw@PUU=XJ8Q$;c5}egTT~%fXAuh>KvS88b8x=(q;?oK`Yesgg@lLrc~=gF?xaUu ze;t-%)6I(=xl!o#VoF(kW98+@a0j2?=P-e*o)p-UGo*q>n7H(B^Z`O`;-8Hsx{X|=g&4ISXj7khtE9Bvzy12&y*!azR2WcHL1vVxb)T#7}k7-%0L(H5N`c^ zDS6+^Ir+tV4Hd9_XJK8zOo*^|T2%HhPH`0?Uk8pbZhF=Fw5KUq(jI0J!OposnxK zGjv4Sr$`o=A|WSVJmxPkk&pmxdQL4*#Yz|U?wvd#oqjU8R^;30j5@cttYKMUk4e)X` z!%oDSwc)z`3gSCuqJ9|u7}-bMEE%16!DrqE7{m;T-LLKI9w5o;s>2Q&#`tBew{{gv zz&_X}c@<8Kn}FY3)|G}2n*|ecdN#Gqzs=3T;LS2`$Jd&PI%hNzG{WUMt36W3tM9Cg ztXpk7opI`6FeG!x@zHvftaQU#a|TWQ;`=B9qh8vD1ef=Gs{(=F4D4F5lsa2sPKK2s zs7@9-viP(nh1+qKg&!wBPL5bA1A#KZlby>ANASJOw)*HX*GFs1rpjirQ??9X3rHO( zkmTlXd$R3E*=|Ip4J40ruDgp-i%Fi~J7LmlJCUXb)+S=?&ILJffnGK)8J#r@#T-9w7_gaOVRHmycYe1D)VkSr; zX&;bIjcGR)sBTmV=y<{-;=Yn^Wxr9h8T4E*X1%n~Rpxy^0~Xm>t}f9OV3j!5_U7z$ zRGrFPfbGlUm(|T^bACBR)PvU)MD2R30^k;-VsrUZwWd$1Uy3%1R;g^2XE}3+d zEITULNXQmE|AVq0^TUOcK-WQnvcM5OeF^3{K`KOr3lx=|xC#A`biT`oZsPJn2Q954 zh%xBxQB7q@?`az^s`{p)Eetsnv_S=&W1KBpxMQrRXeXS7y1mL>-4u5nT5cB(Wda^C z;)8Goo4p-laDQ22griwsGF5JUNh`Y699^9k6T^L7a{lX+T;t;C;^#Ll+*p0)2+>OF zXTzNrJqmtT9oJy>6H+GNcKEC5;aPF@n86@dc}a6_nECzi+xhSn!~kz1VkVU^FDN*6 z?MWkGA8ub{~uKeAOcxN~1{H&N&^@|jUFoNzZ z+fiBQ<@bf`zwKP3>0If!Yvr1g2?B{NuqfxW(my;Pc{Buij2M6xXVgN!QRp<5qr?}{ znLi1;1c^*>`Z2r{hi44#E3M7E4>S`Jh6`Qpb0f{Yrq*vJ#<8}ew-x6G7OGnAwq7~r z(&R=h7)iQ3X&s^MF5MjBlAMZL56wz`UZ@J?=N?9@rHS~5p$OfYs;0U@>9db(!66YjvG=(~ zndgEebRqI!d5eUA&&o-t`fKn(if02?IDxR59;4w_4yNo{sdROh;lD!%OHZc7g1v>O zfW#Ne_3j|=&&@piV8SKroMfq!Yatn5QLFv-w4iJNEN0Z#Z-~A@o&9wE0>2YDq>5oS ztZCy`3Dr)xo?NL=v={^DT`;9W+GuEd?!582%E0JMCv;9eIet_YTQH1|c@dNv?FSmW z9L=EFD`Aa;mj}d0?FL)N2$c#u7M|tG9RyQK38aaq1c`Fy_7w6WK~IC>es8exZMtV| z7mzvx+`Z;P?x+DB|3}q41AIg(WCT3Z(b_$qB>P<{ONm(KfP0XHn=Xr56$w+$td#Di zAF@8a+k@B)I`b37R4303X_P#AxEpTx1Z()I%db4OCQIVOLW9ue?rUfD8z|dFqUNEx zO}P`z*!5!a7O?-&ZXim>o)ZG&K8cFZu+!W#=6=>YWS;U-1CBtvlMoYyG7O_8ES)>k~YkPBvN(hUL+ut+-XY= z%IsYZWSa<4IP$QY#RPFfWX^juNpR9Nai}_m23nHvdg<=`Vw0gdb1R(q>**c8^^d1_ zvCAduq@665O*EmpCw4lOq3%_9cjrhj1$ljDkCHp7Y`C!G)ZiU6qCSreo^;jTo!3TD zA#9_ekhRT$qar|VcU9ckrVZR)k(cS9<}W||QT3m@=ECIK@pDOtlzxdRB7Re2y-?sg zn!9_Dkwk{u@{+ykN5@B*^g(Q4e!k5Q